tests: virtio-9p: add FLUSH operation test
[qemu.git] / linux-user / syscall.c
blob74378947f02a3ad5c742cb50fafff3a2eba30008
1 /*
2 * Linux syscalls
4 * Copyright (c) 2003 Fabrice Bellard
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, see <http://www.gnu.org/licenses/>.
19 #define _ATFILE_SOURCE
20 #include "qemu/osdep.h"
21 #include "qemu/cutils.h"
22 #include "qemu/path.h"
23 #include <elf.h>
24 #include <endian.h>
25 #include <grp.h>
26 #include <sys/ipc.h>
27 #include <sys/msg.h>
28 #include <sys/wait.h>
29 #include <sys/mount.h>
30 #include <sys/file.h>
31 #include <sys/fsuid.h>
32 #include <sys/personality.h>
33 #include <sys/prctl.h>
34 #include <sys/resource.h>
35 #include <sys/swap.h>
36 #include <linux/capability.h>
37 #include <sched.h>
38 #include <sys/timex.h>
39 #ifdef __ia64__
40 int __clone2(int (*fn)(void *), void *child_stack_base,
41 size_t stack_size, int flags, void *arg, ...);
42 #endif
43 #include <sys/socket.h>
44 #include <sys/un.h>
45 #include <sys/uio.h>
46 #include <poll.h>
47 #include <sys/times.h>
48 #include <sys/shm.h>
49 #include <sys/sem.h>
50 #include <sys/statfs.h>
51 #include <time.h>
52 #include <utime.h>
53 #include <sys/sysinfo.h>
54 #include <sys/signalfd.h>
55 //#include <sys/user.h>
56 #include <netinet/ip.h>
57 #include <netinet/tcp.h>
58 #include <linux/wireless.h>
59 #include <linux/icmp.h>
60 #include <linux/icmpv6.h>
61 #include <linux/errqueue.h>
62 #include <linux/random.h>
63 #include "qemu-common.h"
64 #ifdef CONFIG_TIMERFD
65 #include <sys/timerfd.h>
66 #endif
67 #ifdef TARGET_GPROF
68 #include <sys/gmon.h>
69 #endif
70 #ifdef CONFIG_EVENTFD
71 #include <sys/eventfd.h>
72 #endif
73 #ifdef CONFIG_EPOLL
74 #include <sys/epoll.h>
75 #endif
76 #ifdef CONFIG_ATTR
77 #include "qemu/xattr.h"
78 #endif
79 #ifdef CONFIG_SENDFILE
80 #include <sys/sendfile.h>
81 #endif
83 #define termios host_termios
84 #define winsize host_winsize
85 #define termio host_termio
86 #define sgttyb host_sgttyb /* same as target */
87 #define tchars host_tchars /* same as target */
88 #define ltchars host_ltchars /* same as target */
90 #include <linux/termios.h>
91 #include <linux/unistd.h>
92 #include <linux/cdrom.h>
93 #include <linux/hdreg.h>
94 #include <linux/soundcard.h>
95 #include <linux/kd.h>
96 #include <linux/mtio.h>
97 #include <linux/fs.h>
98 #if defined(CONFIG_FIEMAP)
99 #include <linux/fiemap.h>
100 #endif
101 #include <linux/fb.h>
102 #include <linux/vt.h>
103 #include <linux/dm-ioctl.h>
104 #include <linux/reboot.h>
105 #include <linux/route.h>
106 #include <linux/filter.h>
107 #include <linux/blkpg.h>
108 #include <netpacket/packet.h>
109 #include <linux/netlink.h>
110 #ifdef CONFIG_RTNETLINK
111 #include <linux/rtnetlink.h>
112 #include <linux/if_bridge.h>
113 #endif
114 #include <linux/audit.h>
115 #include "linux_loop.h"
116 #include "uname.h"
118 #include "qemu.h"
120 #ifndef CLONE_IO
121 #define CLONE_IO 0x80000000 /* Clone io context */
122 #endif
124 /* We can't directly call the host clone syscall, because this will
125 * badly confuse libc (breaking mutexes, for example). So we must
126 * divide clone flags into:
127 * * flag combinations that look like pthread_create()
128 * * flag combinations that look like fork()
129 * * flags we can implement within QEMU itself
130 * * flags we can't support and will return an error for
132 /* For thread creation, all these flags must be present; for
133 * fork, none must be present.
135 #define CLONE_THREAD_FLAGS \
136 (CLONE_VM | CLONE_FS | CLONE_FILES | \
137 CLONE_SIGHAND | CLONE_THREAD | CLONE_SYSVSEM)
139 /* These flags are ignored:
140 * CLONE_DETACHED is now ignored by the kernel;
141 * CLONE_IO is just an optimisation hint to the I/O scheduler
143 #define CLONE_IGNORED_FLAGS \
144 (CLONE_DETACHED | CLONE_IO)
146 /* Flags for fork which we can implement within QEMU itself */
147 #define CLONE_OPTIONAL_FORK_FLAGS \
148 (CLONE_SETTLS | CLONE_PARENT_SETTID | \
149 CLONE_CHILD_CLEARTID | CLONE_CHILD_SETTID)
151 /* Flags for thread creation which we can implement within QEMU itself */
152 #define CLONE_OPTIONAL_THREAD_FLAGS \
153 (CLONE_SETTLS | CLONE_PARENT_SETTID | \
154 CLONE_CHILD_CLEARTID | CLONE_CHILD_SETTID | CLONE_PARENT)
156 #define CLONE_INVALID_FORK_FLAGS \
157 (~(CSIGNAL | CLONE_OPTIONAL_FORK_FLAGS | CLONE_IGNORED_FLAGS))
159 #define CLONE_INVALID_THREAD_FLAGS \
160 (~(CSIGNAL | CLONE_THREAD_FLAGS | CLONE_OPTIONAL_THREAD_FLAGS | \
161 CLONE_IGNORED_FLAGS))
163 /* CLONE_VFORK is special cased early in do_fork(). The other flag bits
164 * have almost all been allocated. We cannot support any of
165 * CLONE_NEWNS, CLONE_NEWCGROUP, CLONE_NEWUTS, CLONE_NEWIPC,
166 * CLONE_NEWUSER, CLONE_NEWPID, CLONE_NEWNET, CLONE_PTRACE, CLONE_UNTRACED.
167 * The checks against the invalid thread masks above will catch these.
168 * (The one remaining unallocated bit is 0x1000 which used to be CLONE_PID.)
171 //#define DEBUG
172 /* Define DEBUG_ERESTARTSYS to force every syscall to be restarted
173 * once. This exercises the codepaths for restart.
175 //#define DEBUG_ERESTARTSYS
177 //#include <linux/msdos_fs.h>
178 #define VFAT_IOCTL_READDIR_BOTH _IOR('r', 1, struct linux_dirent [2])
179 #define VFAT_IOCTL_READDIR_SHORT _IOR('r', 2, struct linux_dirent [2])
181 #undef _syscall0
182 #undef _syscall1
183 #undef _syscall2
184 #undef _syscall3
185 #undef _syscall4
186 #undef _syscall5
187 #undef _syscall6
189 #define _syscall0(type,name) \
190 static type name (void) \
192 return syscall(__NR_##name); \
195 #define _syscall1(type,name,type1,arg1) \
196 static type name (type1 arg1) \
198 return syscall(__NR_##name, arg1); \
201 #define _syscall2(type,name,type1,arg1,type2,arg2) \
202 static type name (type1 arg1,type2 arg2) \
204 return syscall(__NR_##name, arg1, arg2); \
207 #define _syscall3(type,name,type1,arg1,type2,arg2,type3,arg3) \
208 static type name (type1 arg1,type2 arg2,type3 arg3) \
210 return syscall(__NR_##name, arg1, arg2, arg3); \
213 #define _syscall4(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4) \
214 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4) \
216 return syscall(__NR_##name, arg1, arg2, arg3, arg4); \
219 #define _syscall5(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4, \
220 type5,arg5) \
221 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5) \
223 return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5); \
227 #define _syscall6(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4, \
228 type5,arg5,type6,arg6) \
229 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5, \
230 type6 arg6) \
232 return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6); \
236 #define __NR_sys_uname __NR_uname
237 #define __NR_sys_getcwd1 __NR_getcwd
238 #define __NR_sys_getdents __NR_getdents
239 #define __NR_sys_getdents64 __NR_getdents64
240 #define __NR_sys_getpriority __NR_getpriority
241 #define __NR_sys_rt_sigqueueinfo __NR_rt_sigqueueinfo
242 #define __NR_sys_rt_tgsigqueueinfo __NR_rt_tgsigqueueinfo
243 #define __NR_sys_syslog __NR_syslog
244 #define __NR_sys_futex __NR_futex
245 #define __NR_sys_inotify_init __NR_inotify_init
246 #define __NR_sys_inotify_add_watch __NR_inotify_add_watch
247 #define __NR_sys_inotify_rm_watch __NR_inotify_rm_watch
249 #if defined(__alpha__) || defined (__ia64__) || defined(__x86_64__) || \
250 defined(__s390x__)
251 #define __NR__llseek __NR_lseek
252 #endif
254 /* Newer kernel ports have llseek() instead of _llseek() */
255 #if defined(TARGET_NR_llseek) && !defined(TARGET_NR__llseek)
256 #define TARGET_NR__llseek TARGET_NR_llseek
257 #endif
259 #ifdef __NR_gettid
260 _syscall0(int, gettid)
261 #else
262 /* This is a replacement for the host gettid() and must return a host
263 errno. */
264 static int gettid(void) {
265 return -ENOSYS;
267 #endif
268 #if defined(TARGET_NR_getdents) && defined(__NR_getdents)
269 _syscall3(int, sys_getdents, uint, fd, struct linux_dirent *, dirp, uint, count);
270 #endif
271 #if !defined(__NR_getdents) || \
272 (defined(TARGET_NR_getdents64) && defined(__NR_getdents64))
273 _syscall3(int, sys_getdents64, uint, fd, struct linux_dirent64 *, dirp, uint, count);
274 #endif
275 #if defined(TARGET_NR__llseek) && defined(__NR_llseek)
276 _syscall5(int, _llseek, uint, fd, ulong, hi, ulong, lo,
277 loff_t *, res, uint, wh);
278 #endif
279 _syscall3(int, sys_rt_sigqueueinfo, pid_t, pid, int, sig, siginfo_t *, uinfo)
280 _syscall4(int, sys_rt_tgsigqueueinfo, pid_t, pid, pid_t, tid, int, sig,
281 siginfo_t *, uinfo)
282 _syscall3(int,sys_syslog,int,type,char*,bufp,int,len)
283 #ifdef __NR_exit_group
284 _syscall1(int,exit_group,int,error_code)
285 #endif
286 #if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address)
287 _syscall1(int,set_tid_address,int *,tidptr)
288 #endif
289 #if defined(TARGET_NR_futex) && defined(__NR_futex)
290 _syscall6(int,sys_futex,int *,uaddr,int,op,int,val,
291 const struct timespec *,timeout,int *,uaddr2,int,val3)
292 #endif
293 #define __NR_sys_sched_getaffinity __NR_sched_getaffinity
294 _syscall3(int, sys_sched_getaffinity, pid_t, pid, unsigned int, len,
295 unsigned long *, user_mask_ptr);
296 #define __NR_sys_sched_setaffinity __NR_sched_setaffinity
297 _syscall3(int, sys_sched_setaffinity, pid_t, pid, unsigned int, len,
298 unsigned long *, user_mask_ptr);
299 #define __NR_sys_getcpu __NR_getcpu
300 _syscall3(int, sys_getcpu, unsigned *, cpu, unsigned *, node, void *, tcache);
301 _syscall4(int, reboot, int, magic1, int, magic2, unsigned int, cmd,
302 void *, arg);
303 _syscall2(int, capget, struct __user_cap_header_struct *, header,
304 struct __user_cap_data_struct *, data);
305 _syscall2(int, capset, struct __user_cap_header_struct *, header,
306 struct __user_cap_data_struct *, data);
307 #if defined(TARGET_NR_ioprio_get) && defined(__NR_ioprio_get)
308 _syscall2(int, ioprio_get, int, which, int, who)
309 #endif
310 #if defined(TARGET_NR_ioprio_set) && defined(__NR_ioprio_set)
311 _syscall3(int, ioprio_set, int, which, int, who, int, ioprio)
312 #endif
313 #if defined(TARGET_NR_getrandom) && defined(__NR_getrandom)
314 _syscall3(int, getrandom, void *, buf, size_t, buflen, unsigned int, flags)
315 #endif
317 #if defined(TARGET_NR_kcmp) && defined(__NR_kcmp)
318 _syscall5(int, kcmp, pid_t, pid1, pid_t, pid2, int, type,
319 unsigned long, idx1, unsigned long, idx2)
320 #endif
322 static bitmask_transtbl fcntl_flags_tbl[] = {
323 { TARGET_O_ACCMODE, TARGET_O_WRONLY, O_ACCMODE, O_WRONLY, },
324 { TARGET_O_ACCMODE, TARGET_O_RDWR, O_ACCMODE, O_RDWR, },
325 { TARGET_O_CREAT, TARGET_O_CREAT, O_CREAT, O_CREAT, },
326 { TARGET_O_EXCL, TARGET_O_EXCL, O_EXCL, O_EXCL, },
327 { TARGET_O_NOCTTY, TARGET_O_NOCTTY, O_NOCTTY, O_NOCTTY, },
328 { TARGET_O_TRUNC, TARGET_O_TRUNC, O_TRUNC, O_TRUNC, },
329 { TARGET_O_APPEND, TARGET_O_APPEND, O_APPEND, O_APPEND, },
330 { TARGET_O_NONBLOCK, TARGET_O_NONBLOCK, O_NONBLOCK, O_NONBLOCK, },
331 { TARGET_O_SYNC, TARGET_O_DSYNC, O_SYNC, O_DSYNC, },
332 { TARGET_O_SYNC, TARGET_O_SYNC, O_SYNC, O_SYNC, },
333 { TARGET_FASYNC, TARGET_FASYNC, FASYNC, FASYNC, },
334 { TARGET_O_DIRECTORY, TARGET_O_DIRECTORY, O_DIRECTORY, O_DIRECTORY, },
335 { TARGET_O_NOFOLLOW, TARGET_O_NOFOLLOW, O_NOFOLLOW, O_NOFOLLOW, },
336 #if defined(O_DIRECT)
337 { TARGET_O_DIRECT, TARGET_O_DIRECT, O_DIRECT, O_DIRECT, },
338 #endif
339 #if defined(O_NOATIME)
340 { TARGET_O_NOATIME, TARGET_O_NOATIME, O_NOATIME, O_NOATIME },
341 #endif
342 #if defined(O_CLOEXEC)
343 { TARGET_O_CLOEXEC, TARGET_O_CLOEXEC, O_CLOEXEC, O_CLOEXEC },
344 #endif
345 #if defined(O_PATH)
346 { TARGET_O_PATH, TARGET_O_PATH, O_PATH, O_PATH },
347 #endif
348 #if defined(O_TMPFILE)
349 { TARGET_O_TMPFILE, TARGET_O_TMPFILE, O_TMPFILE, O_TMPFILE },
350 #endif
351 /* Don't terminate the list prematurely on 64-bit host+guest. */
352 #if TARGET_O_LARGEFILE != 0 || O_LARGEFILE != 0
353 { TARGET_O_LARGEFILE, TARGET_O_LARGEFILE, O_LARGEFILE, O_LARGEFILE, },
354 #endif
355 { 0, 0, 0, 0 }
358 enum {
359 QEMU_IFLA_BR_UNSPEC,
360 QEMU_IFLA_BR_FORWARD_DELAY,
361 QEMU_IFLA_BR_HELLO_TIME,
362 QEMU_IFLA_BR_MAX_AGE,
363 QEMU_IFLA_BR_AGEING_TIME,
364 QEMU_IFLA_BR_STP_STATE,
365 QEMU_IFLA_BR_PRIORITY,
366 QEMU_IFLA_BR_VLAN_FILTERING,
367 QEMU_IFLA_BR_VLAN_PROTOCOL,
368 QEMU_IFLA_BR_GROUP_FWD_MASK,
369 QEMU_IFLA_BR_ROOT_ID,
370 QEMU_IFLA_BR_BRIDGE_ID,
371 QEMU_IFLA_BR_ROOT_PORT,
372 QEMU_IFLA_BR_ROOT_PATH_COST,
373 QEMU_IFLA_BR_TOPOLOGY_CHANGE,
374 QEMU_IFLA_BR_TOPOLOGY_CHANGE_DETECTED,
375 QEMU_IFLA_BR_HELLO_TIMER,
376 QEMU_IFLA_BR_TCN_TIMER,
377 QEMU_IFLA_BR_TOPOLOGY_CHANGE_TIMER,
378 QEMU_IFLA_BR_GC_TIMER,
379 QEMU_IFLA_BR_GROUP_ADDR,
380 QEMU_IFLA_BR_FDB_FLUSH,
381 QEMU_IFLA_BR_MCAST_ROUTER,
382 QEMU_IFLA_BR_MCAST_SNOOPING,
383 QEMU_IFLA_BR_MCAST_QUERY_USE_IFADDR,
384 QEMU_IFLA_BR_MCAST_QUERIER,
385 QEMU_IFLA_BR_MCAST_HASH_ELASTICITY,
386 QEMU_IFLA_BR_MCAST_HASH_MAX,
387 QEMU_IFLA_BR_MCAST_LAST_MEMBER_CNT,
388 QEMU_IFLA_BR_MCAST_STARTUP_QUERY_CNT,
389 QEMU_IFLA_BR_MCAST_LAST_MEMBER_INTVL,
390 QEMU_IFLA_BR_MCAST_MEMBERSHIP_INTVL,
391 QEMU_IFLA_BR_MCAST_QUERIER_INTVL,
392 QEMU_IFLA_BR_MCAST_QUERY_INTVL,
393 QEMU_IFLA_BR_MCAST_QUERY_RESPONSE_INTVL,
394 QEMU_IFLA_BR_MCAST_STARTUP_QUERY_INTVL,
395 QEMU_IFLA_BR_NF_CALL_IPTABLES,
396 QEMU_IFLA_BR_NF_CALL_IP6TABLES,
397 QEMU_IFLA_BR_NF_CALL_ARPTABLES,
398 QEMU_IFLA_BR_VLAN_DEFAULT_PVID,
399 QEMU_IFLA_BR_PAD,
400 QEMU_IFLA_BR_VLAN_STATS_ENABLED,
401 QEMU_IFLA_BR_MCAST_STATS_ENABLED,
402 QEMU___IFLA_BR_MAX,
405 enum {
406 QEMU_IFLA_UNSPEC,
407 QEMU_IFLA_ADDRESS,
408 QEMU_IFLA_BROADCAST,
409 QEMU_IFLA_IFNAME,
410 QEMU_IFLA_MTU,
411 QEMU_IFLA_LINK,
412 QEMU_IFLA_QDISC,
413 QEMU_IFLA_STATS,
414 QEMU_IFLA_COST,
415 QEMU_IFLA_PRIORITY,
416 QEMU_IFLA_MASTER,
417 QEMU_IFLA_WIRELESS,
418 QEMU_IFLA_PROTINFO,
419 QEMU_IFLA_TXQLEN,
420 QEMU_IFLA_MAP,
421 QEMU_IFLA_WEIGHT,
422 QEMU_IFLA_OPERSTATE,
423 QEMU_IFLA_LINKMODE,
424 QEMU_IFLA_LINKINFO,
425 QEMU_IFLA_NET_NS_PID,
426 QEMU_IFLA_IFALIAS,
427 QEMU_IFLA_NUM_VF,
428 QEMU_IFLA_VFINFO_LIST,
429 QEMU_IFLA_STATS64,
430 QEMU_IFLA_VF_PORTS,
431 QEMU_IFLA_PORT_SELF,
432 QEMU_IFLA_AF_SPEC,
433 QEMU_IFLA_GROUP,
434 QEMU_IFLA_NET_NS_FD,
435 QEMU_IFLA_EXT_MASK,
436 QEMU_IFLA_PROMISCUITY,
437 QEMU_IFLA_NUM_TX_QUEUES,
438 QEMU_IFLA_NUM_RX_QUEUES,
439 QEMU_IFLA_CARRIER,
440 QEMU_IFLA_PHYS_PORT_ID,
441 QEMU_IFLA_CARRIER_CHANGES,
442 QEMU_IFLA_PHYS_SWITCH_ID,
443 QEMU_IFLA_LINK_NETNSID,
444 QEMU_IFLA_PHYS_PORT_NAME,
445 QEMU_IFLA_PROTO_DOWN,
446 QEMU_IFLA_GSO_MAX_SEGS,
447 QEMU_IFLA_GSO_MAX_SIZE,
448 QEMU_IFLA_PAD,
449 QEMU_IFLA_XDP,
450 QEMU___IFLA_MAX
453 enum {
454 QEMU_IFLA_BRPORT_UNSPEC,
455 QEMU_IFLA_BRPORT_STATE,
456 QEMU_IFLA_BRPORT_PRIORITY,
457 QEMU_IFLA_BRPORT_COST,
458 QEMU_IFLA_BRPORT_MODE,
459 QEMU_IFLA_BRPORT_GUARD,
460 QEMU_IFLA_BRPORT_PROTECT,
461 QEMU_IFLA_BRPORT_FAST_LEAVE,
462 QEMU_IFLA_BRPORT_LEARNING,
463 QEMU_IFLA_BRPORT_UNICAST_FLOOD,
464 QEMU_IFLA_BRPORT_PROXYARP,
465 QEMU_IFLA_BRPORT_LEARNING_SYNC,
466 QEMU_IFLA_BRPORT_PROXYARP_WIFI,
467 QEMU_IFLA_BRPORT_ROOT_ID,
468 QEMU_IFLA_BRPORT_BRIDGE_ID,
469 QEMU_IFLA_BRPORT_DESIGNATED_PORT,
470 QEMU_IFLA_BRPORT_DESIGNATED_COST,
471 QEMU_IFLA_BRPORT_ID,
472 QEMU_IFLA_BRPORT_NO,
473 QEMU_IFLA_BRPORT_TOPOLOGY_CHANGE_ACK,
474 QEMU_IFLA_BRPORT_CONFIG_PENDING,
475 QEMU_IFLA_BRPORT_MESSAGE_AGE_TIMER,
476 QEMU_IFLA_BRPORT_FORWARD_DELAY_TIMER,
477 QEMU_IFLA_BRPORT_HOLD_TIMER,
478 QEMU_IFLA_BRPORT_FLUSH,
479 QEMU_IFLA_BRPORT_MULTICAST_ROUTER,
480 QEMU_IFLA_BRPORT_PAD,
481 QEMU___IFLA_BRPORT_MAX
484 enum {
485 QEMU_IFLA_INFO_UNSPEC,
486 QEMU_IFLA_INFO_KIND,
487 QEMU_IFLA_INFO_DATA,
488 QEMU_IFLA_INFO_XSTATS,
489 QEMU_IFLA_INFO_SLAVE_KIND,
490 QEMU_IFLA_INFO_SLAVE_DATA,
491 QEMU___IFLA_INFO_MAX,
494 enum {
495 QEMU_IFLA_INET_UNSPEC,
496 QEMU_IFLA_INET_CONF,
497 QEMU___IFLA_INET_MAX,
500 enum {
501 QEMU_IFLA_INET6_UNSPEC,
502 QEMU_IFLA_INET6_FLAGS,
503 QEMU_IFLA_INET6_CONF,
504 QEMU_IFLA_INET6_STATS,
505 QEMU_IFLA_INET6_MCAST,
506 QEMU_IFLA_INET6_CACHEINFO,
507 QEMU_IFLA_INET6_ICMP6STATS,
508 QEMU_IFLA_INET6_TOKEN,
509 QEMU_IFLA_INET6_ADDR_GEN_MODE,
510 QEMU___IFLA_INET6_MAX
513 typedef abi_long (*TargetFdDataFunc)(void *, size_t);
514 typedef abi_long (*TargetFdAddrFunc)(void *, abi_ulong, socklen_t);
515 typedef struct TargetFdTrans {
516 TargetFdDataFunc host_to_target_data;
517 TargetFdDataFunc target_to_host_data;
518 TargetFdAddrFunc target_to_host_addr;
519 } TargetFdTrans;
521 static TargetFdTrans **target_fd_trans;
523 static unsigned int target_fd_max;
525 static TargetFdDataFunc fd_trans_target_to_host_data(int fd)
527 if (fd >= 0 && fd < target_fd_max && target_fd_trans[fd]) {
528 return target_fd_trans[fd]->target_to_host_data;
530 return NULL;
533 static TargetFdDataFunc fd_trans_host_to_target_data(int fd)
535 if (fd >= 0 && fd < target_fd_max && target_fd_trans[fd]) {
536 return target_fd_trans[fd]->host_to_target_data;
538 return NULL;
541 static TargetFdAddrFunc fd_trans_target_to_host_addr(int fd)
543 if (fd >= 0 && fd < target_fd_max && target_fd_trans[fd]) {
544 return target_fd_trans[fd]->target_to_host_addr;
546 return NULL;
549 static void fd_trans_register(int fd, TargetFdTrans *trans)
551 unsigned int oldmax;
553 if (fd >= target_fd_max) {
554 oldmax = target_fd_max;
555 target_fd_max = ((fd >> 6) + 1) << 6; /* by slice of 64 entries */
556 target_fd_trans = g_renew(TargetFdTrans *,
557 target_fd_trans, target_fd_max);
558 memset((void *)(target_fd_trans + oldmax), 0,
559 (target_fd_max - oldmax) * sizeof(TargetFdTrans *));
561 target_fd_trans[fd] = trans;
564 static void fd_trans_unregister(int fd)
566 if (fd >= 0 && fd < target_fd_max) {
567 target_fd_trans[fd] = NULL;
571 static void fd_trans_dup(int oldfd, int newfd)
573 fd_trans_unregister(newfd);
574 if (oldfd < target_fd_max && target_fd_trans[oldfd]) {
575 fd_trans_register(newfd, target_fd_trans[oldfd]);
579 static int sys_getcwd1(char *buf, size_t size)
581 if (getcwd(buf, size) == NULL) {
582 /* getcwd() sets errno */
583 return (-1);
585 return strlen(buf)+1;
588 #ifdef TARGET_NR_utimensat
589 #if defined(__NR_utimensat)
590 #define __NR_sys_utimensat __NR_utimensat
591 _syscall4(int,sys_utimensat,int,dirfd,const char *,pathname,
592 const struct timespec *,tsp,int,flags)
593 #else
594 static int sys_utimensat(int dirfd, const char *pathname,
595 const struct timespec times[2], int flags)
597 errno = ENOSYS;
598 return -1;
600 #endif
601 #endif /* TARGET_NR_utimensat */
603 #ifdef TARGET_NR_renameat2
604 #if defined(__NR_renameat2)
605 #define __NR_sys_renameat2 __NR_renameat2
606 _syscall5(int, sys_renameat2, int, oldfd, const char *, old, int, newfd,
607 const char *, new, unsigned int, flags)
608 #else
609 static int sys_renameat2(int oldfd, const char *old,
610 int newfd, const char *new, int flags)
612 if (flags == 0) {
613 return renameat(oldfd, old, newfd, new);
615 errno = ENOSYS;
616 return -1;
618 #endif
619 #endif /* TARGET_NR_renameat2 */
621 #ifdef CONFIG_INOTIFY
622 #include <sys/inotify.h>
624 #if defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init)
625 static int sys_inotify_init(void)
627 return (inotify_init());
629 #endif
630 #if defined(TARGET_NR_inotify_add_watch) && defined(__NR_inotify_add_watch)
631 static int sys_inotify_add_watch(int fd,const char *pathname, int32_t mask)
633 return (inotify_add_watch(fd, pathname, mask));
635 #endif
636 #if defined(TARGET_NR_inotify_rm_watch) && defined(__NR_inotify_rm_watch)
637 static int sys_inotify_rm_watch(int fd, int32_t wd)
639 return (inotify_rm_watch(fd, wd));
641 #endif
642 #ifdef CONFIG_INOTIFY1
643 #if defined(TARGET_NR_inotify_init1) && defined(__NR_inotify_init1)
644 static int sys_inotify_init1(int flags)
646 return (inotify_init1(flags));
648 #endif
649 #endif
650 #else
651 /* Userspace can usually survive runtime without inotify */
652 #undef TARGET_NR_inotify_init
653 #undef TARGET_NR_inotify_init1
654 #undef TARGET_NR_inotify_add_watch
655 #undef TARGET_NR_inotify_rm_watch
656 #endif /* CONFIG_INOTIFY */
658 #if defined(TARGET_NR_prlimit64)
659 #ifndef __NR_prlimit64
660 # define __NR_prlimit64 -1
661 #endif
662 #define __NR_sys_prlimit64 __NR_prlimit64
663 /* The glibc rlimit structure may not be that used by the underlying syscall */
664 struct host_rlimit64 {
665 uint64_t rlim_cur;
666 uint64_t rlim_max;
668 _syscall4(int, sys_prlimit64, pid_t, pid, int, resource,
669 const struct host_rlimit64 *, new_limit,
670 struct host_rlimit64 *, old_limit)
671 #endif
674 #if defined(TARGET_NR_timer_create)
675 /* Maxiumum of 32 active POSIX timers allowed at any one time. */
676 static timer_t g_posix_timers[32] = { 0, } ;
678 static inline int next_free_host_timer(void)
680 int k ;
681 /* FIXME: Does finding the next free slot require a lock? */
682 for (k = 0; k < ARRAY_SIZE(g_posix_timers); k++) {
683 if (g_posix_timers[k] == 0) {
684 g_posix_timers[k] = (timer_t) 1;
685 return k;
688 return -1;
690 #endif
692 /* ARM EABI and MIPS expect 64bit types aligned even on pairs or registers */
693 #ifdef TARGET_ARM
694 static inline int regpairs_aligned(void *cpu_env, int num)
696 return ((((CPUARMState *)cpu_env)->eabi) == 1) ;
698 #elif defined(TARGET_MIPS) && (TARGET_ABI_BITS == 32)
699 static inline int regpairs_aligned(void *cpu_env, int num) { return 1; }
700 #elif defined(TARGET_PPC) && !defined(TARGET_PPC64)
701 /* SysV AVI for PPC32 expects 64bit parameters to be passed on odd/even pairs
702 * of registers which translates to the same as ARM/MIPS, because we start with
703 * r3 as arg1 */
704 static inline int regpairs_aligned(void *cpu_env, int num) { return 1; }
705 #elif defined(TARGET_SH4)
706 /* SH4 doesn't align register pairs, except for p{read,write}64 */
707 static inline int regpairs_aligned(void *cpu_env, int num)
709 switch (num) {
710 case TARGET_NR_pread64:
711 case TARGET_NR_pwrite64:
712 return 1;
714 default:
715 return 0;
718 #else
719 static inline int regpairs_aligned(void *cpu_env, int num) { return 0; }
720 #endif
722 #define ERRNO_TABLE_SIZE 1200
724 /* target_to_host_errno_table[] is initialized from
725 * host_to_target_errno_table[] in syscall_init(). */
726 static uint16_t target_to_host_errno_table[ERRNO_TABLE_SIZE] = {
730 * This list is the union of errno values overridden in asm-<arch>/errno.h
731 * minus the errnos that are not actually generic to all archs.
733 static uint16_t host_to_target_errno_table[ERRNO_TABLE_SIZE] = {
734 [EAGAIN] = TARGET_EAGAIN,
735 [EIDRM] = TARGET_EIDRM,
736 [ECHRNG] = TARGET_ECHRNG,
737 [EL2NSYNC] = TARGET_EL2NSYNC,
738 [EL3HLT] = TARGET_EL3HLT,
739 [EL3RST] = TARGET_EL3RST,
740 [ELNRNG] = TARGET_ELNRNG,
741 [EUNATCH] = TARGET_EUNATCH,
742 [ENOCSI] = TARGET_ENOCSI,
743 [EL2HLT] = TARGET_EL2HLT,
744 [EDEADLK] = TARGET_EDEADLK,
745 [ENOLCK] = TARGET_ENOLCK,
746 [EBADE] = TARGET_EBADE,
747 [EBADR] = TARGET_EBADR,
748 [EXFULL] = TARGET_EXFULL,
749 [ENOANO] = TARGET_ENOANO,
750 [EBADRQC] = TARGET_EBADRQC,
751 [EBADSLT] = TARGET_EBADSLT,
752 [EBFONT] = TARGET_EBFONT,
753 [ENOSTR] = TARGET_ENOSTR,
754 [ENODATA] = TARGET_ENODATA,
755 [ETIME] = TARGET_ETIME,
756 [ENOSR] = TARGET_ENOSR,
757 [ENONET] = TARGET_ENONET,
758 [ENOPKG] = TARGET_ENOPKG,
759 [EREMOTE] = TARGET_EREMOTE,
760 [ENOLINK] = TARGET_ENOLINK,
761 [EADV] = TARGET_EADV,
762 [ESRMNT] = TARGET_ESRMNT,
763 [ECOMM] = TARGET_ECOMM,
764 [EPROTO] = TARGET_EPROTO,
765 [EDOTDOT] = TARGET_EDOTDOT,
766 [EMULTIHOP] = TARGET_EMULTIHOP,
767 [EBADMSG] = TARGET_EBADMSG,
768 [ENAMETOOLONG] = TARGET_ENAMETOOLONG,
769 [EOVERFLOW] = TARGET_EOVERFLOW,
770 [ENOTUNIQ] = TARGET_ENOTUNIQ,
771 [EBADFD] = TARGET_EBADFD,
772 [EREMCHG] = TARGET_EREMCHG,
773 [ELIBACC] = TARGET_ELIBACC,
774 [ELIBBAD] = TARGET_ELIBBAD,
775 [ELIBSCN] = TARGET_ELIBSCN,
776 [ELIBMAX] = TARGET_ELIBMAX,
777 [ELIBEXEC] = TARGET_ELIBEXEC,
778 [EILSEQ] = TARGET_EILSEQ,
779 [ENOSYS] = TARGET_ENOSYS,
780 [ELOOP] = TARGET_ELOOP,
781 [ERESTART] = TARGET_ERESTART,
782 [ESTRPIPE] = TARGET_ESTRPIPE,
783 [ENOTEMPTY] = TARGET_ENOTEMPTY,
784 [EUSERS] = TARGET_EUSERS,
785 [ENOTSOCK] = TARGET_ENOTSOCK,
786 [EDESTADDRREQ] = TARGET_EDESTADDRREQ,
787 [EMSGSIZE] = TARGET_EMSGSIZE,
788 [EPROTOTYPE] = TARGET_EPROTOTYPE,
789 [ENOPROTOOPT] = TARGET_ENOPROTOOPT,
790 [EPROTONOSUPPORT] = TARGET_EPROTONOSUPPORT,
791 [ESOCKTNOSUPPORT] = TARGET_ESOCKTNOSUPPORT,
792 [EOPNOTSUPP] = TARGET_EOPNOTSUPP,
793 [EPFNOSUPPORT] = TARGET_EPFNOSUPPORT,
794 [EAFNOSUPPORT] = TARGET_EAFNOSUPPORT,
795 [EADDRINUSE] = TARGET_EADDRINUSE,
796 [EADDRNOTAVAIL] = TARGET_EADDRNOTAVAIL,
797 [ENETDOWN] = TARGET_ENETDOWN,
798 [ENETUNREACH] = TARGET_ENETUNREACH,
799 [ENETRESET] = TARGET_ENETRESET,
800 [ECONNABORTED] = TARGET_ECONNABORTED,
801 [ECONNRESET] = TARGET_ECONNRESET,
802 [ENOBUFS] = TARGET_ENOBUFS,
803 [EISCONN] = TARGET_EISCONN,
804 [ENOTCONN] = TARGET_ENOTCONN,
805 [EUCLEAN] = TARGET_EUCLEAN,
806 [ENOTNAM] = TARGET_ENOTNAM,
807 [ENAVAIL] = TARGET_ENAVAIL,
808 [EISNAM] = TARGET_EISNAM,
809 [EREMOTEIO] = TARGET_EREMOTEIO,
810 [EDQUOT] = TARGET_EDQUOT,
811 [ESHUTDOWN] = TARGET_ESHUTDOWN,
812 [ETOOMANYREFS] = TARGET_ETOOMANYREFS,
813 [ETIMEDOUT] = TARGET_ETIMEDOUT,
814 [ECONNREFUSED] = TARGET_ECONNREFUSED,
815 [EHOSTDOWN] = TARGET_EHOSTDOWN,
816 [EHOSTUNREACH] = TARGET_EHOSTUNREACH,
817 [EALREADY] = TARGET_EALREADY,
818 [EINPROGRESS] = TARGET_EINPROGRESS,
819 [ESTALE] = TARGET_ESTALE,
820 [ECANCELED] = TARGET_ECANCELED,
821 [ENOMEDIUM] = TARGET_ENOMEDIUM,
822 [EMEDIUMTYPE] = TARGET_EMEDIUMTYPE,
823 #ifdef ENOKEY
824 [ENOKEY] = TARGET_ENOKEY,
825 #endif
826 #ifdef EKEYEXPIRED
827 [EKEYEXPIRED] = TARGET_EKEYEXPIRED,
828 #endif
829 #ifdef EKEYREVOKED
830 [EKEYREVOKED] = TARGET_EKEYREVOKED,
831 #endif
832 #ifdef EKEYREJECTED
833 [EKEYREJECTED] = TARGET_EKEYREJECTED,
834 #endif
835 #ifdef EOWNERDEAD
836 [EOWNERDEAD] = TARGET_EOWNERDEAD,
837 #endif
838 #ifdef ENOTRECOVERABLE
839 [ENOTRECOVERABLE] = TARGET_ENOTRECOVERABLE,
840 #endif
841 #ifdef ENOMSG
842 [ENOMSG] = TARGET_ENOMSG,
843 #endif
844 #ifdef ERKFILL
845 [ERFKILL] = TARGET_ERFKILL,
846 #endif
847 #ifdef EHWPOISON
848 [EHWPOISON] = TARGET_EHWPOISON,
849 #endif
852 static inline int host_to_target_errno(int err)
854 if (err >= 0 && err < ERRNO_TABLE_SIZE &&
855 host_to_target_errno_table[err]) {
856 return host_to_target_errno_table[err];
858 return err;
861 static inline int target_to_host_errno(int err)
863 if (err >= 0 && err < ERRNO_TABLE_SIZE &&
864 target_to_host_errno_table[err]) {
865 return target_to_host_errno_table[err];
867 return err;
870 static inline abi_long get_errno(abi_long ret)
872 if (ret == -1)
873 return -host_to_target_errno(errno);
874 else
875 return ret;
878 static inline int is_error(abi_long ret)
880 return (abi_ulong)ret >= (abi_ulong)(-4096);
883 const char *target_strerror(int err)
885 if (err == TARGET_ERESTARTSYS) {
886 return "To be restarted";
888 if (err == TARGET_QEMU_ESIGRETURN) {
889 return "Successful exit from sigreturn";
892 if ((err >= ERRNO_TABLE_SIZE) || (err < 0)) {
893 return NULL;
895 return strerror(target_to_host_errno(err));
898 #define safe_syscall0(type, name) \
899 static type safe_##name(void) \
901 return safe_syscall(__NR_##name); \
904 #define safe_syscall1(type, name, type1, arg1) \
905 static type safe_##name(type1 arg1) \
907 return safe_syscall(__NR_##name, arg1); \
910 #define safe_syscall2(type, name, type1, arg1, type2, arg2) \
911 static type safe_##name(type1 arg1, type2 arg2) \
913 return safe_syscall(__NR_##name, arg1, arg2); \
916 #define safe_syscall3(type, name, type1, arg1, type2, arg2, type3, arg3) \
917 static type safe_##name(type1 arg1, type2 arg2, type3 arg3) \
919 return safe_syscall(__NR_##name, arg1, arg2, arg3); \
922 #define safe_syscall4(type, name, type1, arg1, type2, arg2, type3, arg3, \
923 type4, arg4) \
924 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4) \
926 return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4); \
929 #define safe_syscall5(type, name, type1, arg1, type2, arg2, type3, arg3, \
930 type4, arg4, type5, arg5) \
931 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, \
932 type5 arg5) \
934 return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5); \
937 #define safe_syscall6(type, name, type1, arg1, type2, arg2, type3, arg3, \
938 type4, arg4, type5, arg5, type6, arg6) \
939 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, \
940 type5 arg5, type6 arg6) \
942 return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6); \
945 safe_syscall3(ssize_t, read, int, fd, void *, buff, size_t, count)
946 safe_syscall3(ssize_t, write, int, fd, const void *, buff, size_t, count)
947 safe_syscall4(int, openat, int, dirfd, const char *, pathname, \
948 int, flags, mode_t, mode)
949 safe_syscall4(pid_t, wait4, pid_t, pid, int *, status, int, options, \
950 struct rusage *, rusage)
951 safe_syscall5(int, waitid, idtype_t, idtype, id_t, id, siginfo_t *, infop, \
952 int, options, struct rusage *, rusage)
953 safe_syscall3(int, execve, const char *, filename, char **, argv, char **, envp)
954 safe_syscall6(int, pselect6, int, nfds, fd_set *, readfds, fd_set *, writefds, \
955 fd_set *, exceptfds, struct timespec *, timeout, void *, sig)
956 safe_syscall5(int, ppoll, struct pollfd *, ufds, unsigned int, nfds,
957 struct timespec *, tsp, const sigset_t *, sigmask,
958 size_t, sigsetsize)
959 safe_syscall6(int, epoll_pwait, int, epfd, struct epoll_event *, events,
960 int, maxevents, int, timeout, const sigset_t *, sigmask,
961 size_t, sigsetsize)
962 safe_syscall6(int,futex,int *,uaddr,int,op,int,val, \
963 const struct timespec *,timeout,int *,uaddr2,int,val3)
964 safe_syscall2(int, rt_sigsuspend, sigset_t *, newset, size_t, sigsetsize)
965 safe_syscall2(int, kill, pid_t, pid, int, sig)
966 safe_syscall2(int, tkill, int, tid, int, sig)
967 safe_syscall3(int, tgkill, int, tgid, int, pid, int, sig)
968 safe_syscall3(ssize_t, readv, int, fd, const struct iovec *, iov, int, iovcnt)
969 safe_syscall3(ssize_t, writev, int, fd, const struct iovec *, iov, int, iovcnt)
970 safe_syscall5(ssize_t, preadv, int, fd, const struct iovec *, iov, int, iovcnt,
971 unsigned long, pos_l, unsigned long, pos_h)
972 safe_syscall5(ssize_t, pwritev, int, fd, const struct iovec *, iov, int, iovcnt,
973 unsigned long, pos_l, unsigned long, pos_h)
974 safe_syscall3(int, connect, int, fd, const struct sockaddr *, addr,
975 socklen_t, addrlen)
976 safe_syscall6(ssize_t, sendto, int, fd, const void *, buf, size_t, len,
977 int, flags, const struct sockaddr *, addr, socklen_t, addrlen)
978 safe_syscall6(ssize_t, recvfrom, int, fd, void *, buf, size_t, len,
979 int, flags, struct sockaddr *, addr, socklen_t *, addrlen)
980 safe_syscall3(ssize_t, sendmsg, int, fd, const struct msghdr *, msg, int, flags)
981 safe_syscall3(ssize_t, recvmsg, int, fd, struct msghdr *, msg, int, flags)
982 safe_syscall2(int, flock, int, fd, int, operation)
983 safe_syscall4(int, rt_sigtimedwait, const sigset_t *, these, siginfo_t *, uinfo,
984 const struct timespec *, uts, size_t, sigsetsize)
985 safe_syscall4(int, accept4, int, fd, struct sockaddr *, addr, socklen_t *, len,
986 int, flags)
987 safe_syscall2(int, nanosleep, const struct timespec *, req,
988 struct timespec *, rem)
989 #ifdef TARGET_NR_clock_nanosleep
990 safe_syscall4(int, clock_nanosleep, const clockid_t, clock, int, flags,
991 const struct timespec *, req, struct timespec *, rem)
992 #endif
993 #ifdef __NR_msgsnd
994 safe_syscall4(int, msgsnd, int, msgid, const void *, msgp, size_t, sz,
995 int, flags)
996 safe_syscall5(int, msgrcv, int, msgid, void *, msgp, size_t, sz,
997 long, msgtype, int, flags)
998 safe_syscall4(int, semtimedop, int, semid, struct sembuf *, tsops,
999 unsigned, nsops, const struct timespec *, timeout)
1000 #else
1001 /* This host kernel architecture uses a single ipc syscall; fake up
1002 * wrappers for the sub-operations to hide this implementation detail.
1003 * Annoyingly we can't include linux/ipc.h to get the constant definitions
1004 * for the call parameter because some structs in there conflict with the
1005 * sys/ipc.h ones. So we just define them here, and rely on them being
1006 * the same for all host architectures.
1008 #define Q_SEMTIMEDOP 4
1009 #define Q_MSGSND 11
1010 #define Q_MSGRCV 12
1011 #define Q_IPCCALL(VERSION, OP) ((VERSION) << 16 | (OP))
1013 safe_syscall6(int, ipc, int, call, long, first, long, second, long, third,
1014 void *, ptr, long, fifth)
1015 static int safe_msgsnd(int msgid, const void *msgp, size_t sz, int flags)
1017 return safe_ipc(Q_IPCCALL(0, Q_MSGSND), msgid, sz, flags, (void *)msgp, 0);
1019 static int safe_msgrcv(int msgid, void *msgp, size_t sz, long type, int flags)
1021 return safe_ipc(Q_IPCCALL(1, Q_MSGRCV), msgid, sz, flags, msgp, type);
1023 static int safe_semtimedop(int semid, struct sembuf *tsops, unsigned nsops,
1024 const struct timespec *timeout)
1026 return safe_ipc(Q_IPCCALL(0, Q_SEMTIMEDOP), semid, nsops, 0, tsops,
1027 (long)timeout);
1029 #endif
1030 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
1031 safe_syscall5(int, mq_timedsend, int, mqdes, const char *, msg_ptr,
1032 size_t, len, unsigned, prio, const struct timespec *, timeout)
1033 safe_syscall5(int, mq_timedreceive, int, mqdes, char *, msg_ptr,
1034 size_t, len, unsigned *, prio, const struct timespec *, timeout)
1035 #endif
1036 /* We do ioctl like this rather than via safe_syscall3 to preserve the
1037 * "third argument might be integer or pointer or not present" behaviour of
1038 * the libc function.
1040 #define safe_ioctl(...) safe_syscall(__NR_ioctl, __VA_ARGS__)
1041 /* Similarly for fcntl. Note that callers must always:
1042 * pass the F_GETLK64 etc constants rather than the unsuffixed F_GETLK
1043 * use the flock64 struct rather than unsuffixed flock
1044 * This will then work and use a 64-bit offset for both 32-bit and 64-bit hosts.
1046 #ifdef __NR_fcntl64
1047 #define safe_fcntl(...) safe_syscall(__NR_fcntl64, __VA_ARGS__)
1048 #else
1049 #define safe_fcntl(...) safe_syscall(__NR_fcntl, __VA_ARGS__)
1050 #endif
1052 static inline int host_to_target_sock_type(int host_type)
1054 int target_type;
1056 switch (host_type & 0xf /* SOCK_TYPE_MASK */) {
1057 case SOCK_DGRAM:
1058 target_type = TARGET_SOCK_DGRAM;
1059 break;
1060 case SOCK_STREAM:
1061 target_type = TARGET_SOCK_STREAM;
1062 break;
1063 default:
1064 target_type = host_type & 0xf /* SOCK_TYPE_MASK */;
1065 break;
1068 #if defined(SOCK_CLOEXEC)
1069 if (host_type & SOCK_CLOEXEC) {
1070 target_type |= TARGET_SOCK_CLOEXEC;
1072 #endif
1074 #if defined(SOCK_NONBLOCK)
1075 if (host_type & SOCK_NONBLOCK) {
1076 target_type |= TARGET_SOCK_NONBLOCK;
1078 #endif
1080 return target_type;
1083 static abi_ulong target_brk;
1084 static abi_ulong target_original_brk;
1085 static abi_ulong brk_page;
1087 void target_set_brk(abi_ulong new_brk)
1089 target_original_brk = target_brk = HOST_PAGE_ALIGN(new_brk);
1090 brk_page = HOST_PAGE_ALIGN(target_brk);
1093 //#define DEBUGF_BRK(message, args...) do { fprintf(stderr, (message), ## args); } while (0)
1094 #define DEBUGF_BRK(message, args...)
1096 /* do_brk() must return target values and target errnos. */
1097 abi_long do_brk(abi_ulong new_brk)
1099 abi_long mapped_addr;
1100 abi_ulong new_alloc_size;
1102 DEBUGF_BRK("do_brk(" TARGET_ABI_FMT_lx ") -> ", new_brk);
1104 if (!new_brk) {
1105 DEBUGF_BRK(TARGET_ABI_FMT_lx " (!new_brk)\n", target_brk);
1106 return target_brk;
1108 if (new_brk < target_original_brk) {
1109 DEBUGF_BRK(TARGET_ABI_FMT_lx " (new_brk < target_original_brk)\n",
1110 target_brk);
1111 return target_brk;
1114 /* If the new brk is less than the highest page reserved to the
1115 * target heap allocation, set it and we're almost done... */
1116 if (new_brk <= brk_page) {
1117 /* Heap contents are initialized to zero, as for anonymous
1118 * mapped pages. */
1119 if (new_brk > target_brk) {
1120 memset(g2h(target_brk), 0, new_brk - target_brk);
1122 target_brk = new_brk;
1123 DEBUGF_BRK(TARGET_ABI_FMT_lx " (new_brk <= brk_page)\n", target_brk);
1124 return target_brk;
1127 /* We need to allocate more memory after the brk... Note that
1128 * we don't use MAP_FIXED because that will map over the top of
1129 * any existing mapping (like the one with the host libc or qemu
1130 * itself); instead we treat "mapped but at wrong address" as
1131 * a failure and unmap again.
1133 new_alloc_size = HOST_PAGE_ALIGN(new_brk - brk_page);
1134 mapped_addr = get_errno(target_mmap(brk_page, new_alloc_size,
1135 PROT_READ|PROT_WRITE,
1136 MAP_ANON|MAP_PRIVATE, 0, 0));
1138 if (mapped_addr == brk_page) {
1139 /* Heap contents are initialized to zero, as for anonymous
1140 * mapped pages. Technically the new pages are already
1141 * initialized to zero since they *are* anonymous mapped
1142 * pages, however we have to take care with the contents that
1143 * come from the remaining part of the previous page: it may
1144 * contains garbage data due to a previous heap usage (grown
1145 * then shrunken). */
1146 memset(g2h(target_brk), 0, brk_page - target_brk);
1148 target_brk = new_brk;
1149 brk_page = HOST_PAGE_ALIGN(target_brk);
1150 DEBUGF_BRK(TARGET_ABI_FMT_lx " (mapped_addr == brk_page)\n",
1151 target_brk);
1152 return target_brk;
1153 } else if (mapped_addr != -1) {
1154 /* Mapped but at wrong address, meaning there wasn't actually
1155 * enough space for this brk.
1157 target_munmap(mapped_addr, new_alloc_size);
1158 mapped_addr = -1;
1159 DEBUGF_BRK(TARGET_ABI_FMT_lx " (mapped_addr != -1)\n", target_brk);
1161 else {
1162 DEBUGF_BRK(TARGET_ABI_FMT_lx " (otherwise)\n", target_brk);
1165 #if defined(TARGET_ALPHA)
1166 /* We (partially) emulate OSF/1 on Alpha, which requires we
1167 return a proper errno, not an unchanged brk value. */
1168 return -TARGET_ENOMEM;
1169 #endif
1170 /* For everything else, return the previous break. */
1171 return target_brk;
1174 static inline abi_long copy_from_user_fdset(fd_set *fds,
1175 abi_ulong target_fds_addr,
1176 int n)
1178 int i, nw, j, k;
1179 abi_ulong b, *target_fds;
1181 nw = DIV_ROUND_UP(n, TARGET_ABI_BITS);
1182 if (!(target_fds = lock_user(VERIFY_READ,
1183 target_fds_addr,
1184 sizeof(abi_ulong) * nw,
1185 1)))
1186 return -TARGET_EFAULT;
1188 FD_ZERO(fds);
1189 k = 0;
1190 for (i = 0; i < nw; i++) {
1191 /* grab the abi_ulong */
1192 __get_user(b, &target_fds[i]);
1193 for (j = 0; j < TARGET_ABI_BITS; j++) {
1194 /* check the bit inside the abi_ulong */
1195 if ((b >> j) & 1)
1196 FD_SET(k, fds);
1197 k++;
1201 unlock_user(target_fds, target_fds_addr, 0);
1203 return 0;
1206 static inline abi_ulong copy_from_user_fdset_ptr(fd_set *fds, fd_set **fds_ptr,
1207 abi_ulong target_fds_addr,
1208 int n)
1210 if (target_fds_addr) {
1211 if (copy_from_user_fdset(fds, target_fds_addr, n))
1212 return -TARGET_EFAULT;
1213 *fds_ptr = fds;
1214 } else {
1215 *fds_ptr = NULL;
1217 return 0;
1220 static inline abi_long copy_to_user_fdset(abi_ulong target_fds_addr,
1221 const fd_set *fds,
1222 int n)
1224 int i, nw, j, k;
1225 abi_long v;
1226 abi_ulong *target_fds;
1228 nw = DIV_ROUND_UP(n, TARGET_ABI_BITS);
1229 if (!(target_fds = lock_user(VERIFY_WRITE,
1230 target_fds_addr,
1231 sizeof(abi_ulong) * nw,
1232 0)))
1233 return -TARGET_EFAULT;
1235 k = 0;
1236 for (i = 0; i < nw; i++) {
1237 v = 0;
1238 for (j = 0; j < TARGET_ABI_BITS; j++) {
1239 v |= ((abi_ulong)(FD_ISSET(k, fds) != 0) << j);
1240 k++;
1242 __put_user(v, &target_fds[i]);
1245 unlock_user(target_fds, target_fds_addr, sizeof(abi_ulong) * nw);
1247 return 0;
1250 #if defined(__alpha__)
1251 #define HOST_HZ 1024
1252 #else
1253 #define HOST_HZ 100
1254 #endif
1256 static inline abi_long host_to_target_clock_t(long ticks)
1258 #if HOST_HZ == TARGET_HZ
1259 return ticks;
1260 #else
1261 return ((int64_t)ticks * TARGET_HZ) / HOST_HZ;
1262 #endif
1265 static inline abi_long host_to_target_rusage(abi_ulong target_addr,
1266 const struct rusage *rusage)
1268 struct target_rusage *target_rusage;
1270 if (!lock_user_struct(VERIFY_WRITE, target_rusage, target_addr, 0))
1271 return -TARGET_EFAULT;
1272 target_rusage->ru_utime.tv_sec = tswapal(rusage->ru_utime.tv_sec);
1273 target_rusage->ru_utime.tv_usec = tswapal(rusage->ru_utime.tv_usec);
1274 target_rusage->ru_stime.tv_sec = tswapal(rusage->ru_stime.tv_sec);
1275 target_rusage->ru_stime.tv_usec = tswapal(rusage->ru_stime.tv_usec);
1276 target_rusage->ru_maxrss = tswapal(rusage->ru_maxrss);
1277 target_rusage->ru_ixrss = tswapal(rusage->ru_ixrss);
1278 target_rusage->ru_idrss = tswapal(rusage->ru_idrss);
1279 target_rusage->ru_isrss = tswapal(rusage->ru_isrss);
1280 target_rusage->ru_minflt = tswapal(rusage->ru_minflt);
1281 target_rusage->ru_majflt = tswapal(rusage->ru_majflt);
1282 target_rusage->ru_nswap = tswapal(rusage->ru_nswap);
1283 target_rusage->ru_inblock = tswapal(rusage->ru_inblock);
1284 target_rusage->ru_oublock = tswapal(rusage->ru_oublock);
1285 target_rusage->ru_msgsnd = tswapal(rusage->ru_msgsnd);
1286 target_rusage->ru_msgrcv = tswapal(rusage->ru_msgrcv);
1287 target_rusage->ru_nsignals = tswapal(rusage->ru_nsignals);
1288 target_rusage->ru_nvcsw = tswapal(rusage->ru_nvcsw);
1289 target_rusage->ru_nivcsw = tswapal(rusage->ru_nivcsw);
1290 unlock_user_struct(target_rusage, target_addr, 1);
1292 return 0;
1295 static inline rlim_t target_to_host_rlim(abi_ulong target_rlim)
1297 abi_ulong target_rlim_swap;
1298 rlim_t result;
1300 target_rlim_swap = tswapal(target_rlim);
1301 if (target_rlim_swap == TARGET_RLIM_INFINITY)
1302 return RLIM_INFINITY;
1304 result = target_rlim_swap;
1305 if (target_rlim_swap != (rlim_t)result)
1306 return RLIM_INFINITY;
1308 return result;
1311 static inline abi_ulong host_to_target_rlim(rlim_t rlim)
1313 abi_ulong target_rlim_swap;
1314 abi_ulong result;
1316 if (rlim == RLIM_INFINITY || rlim != (abi_long)rlim)
1317 target_rlim_swap = TARGET_RLIM_INFINITY;
1318 else
1319 target_rlim_swap = rlim;
1320 result = tswapal(target_rlim_swap);
1322 return result;
1325 static inline int target_to_host_resource(int code)
1327 switch (code) {
1328 case TARGET_RLIMIT_AS:
1329 return RLIMIT_AS;
1330 case TARGET_RLIMIT_CORE:
1331 return RLIMIT_CORE;
1332 case TARGET_RLIMIT_CPU:
1333 return RLIMIT_CPU;
1334 case TARGET_RLIMIT_DATA:
1335 return RLIMIT_DATA;
1336 case TARGET_RLIMIT_FSIZE:
1337 return RLIMIT_FSIZE;
1338 case TARGET_RLIMIT_LOCKS:
1339 return RLIMIT_LOCKS;
1340 case TARGET_RLIMIT_MEMLOCK:
1341 return RLIMIT_MEMLOCK;
1342 case TARGET_RLIMIT_MSGQUEUE:
1343 return RLIMIT_MSGQUEUE;
1344 case TARGET_RLIMIT_NICE:
1345 return RLIMIT_NICE;
1346 case TARGET_RLIMIT_NOFILE:
1347 return RLIMIT_NOFILE;
1348 case TARGET_RLIMIT_NPROC:
1349 return RLIMIT_NPROC;
1350 case TARGET_RLIMIT_RSS:
1351 return RLIMIT_RSS;
1352 case TARGET_RLIMIT_RTPRIO:
1353 return RLIMIT_RTPRIO;
1354 case TARGET_RLIMIT_SIGPENDING:
1355 return RLIMIT_SIGPENDING;
1356 case TARGET_RLIMIT_STACK:
1357 return RLIMIT_STACK;
1358 default:
1359 return code;
1363 static inline abi_long copy_from_user_timeval(struct timeval *tv,
1364 abi_ulong target_tv_addr)
1366 struct target_timeval *target_tv;
1368 if (!lock_user_struct(VERIFY_READ, target_tv, target_tv_addr, 1))
1369 return -TARGET_EFAULT;
1371 __get_user(tv->tv_sec, &target_tv->tv_sec);
1372 __get_user(tv->tv_usec, &target_tv->tv_usec);
1374 unlock_user_struct(target_tv, target_tv_addr, 0);
1376 return 0;
1379 static inline abi_long copy_to_user_timeval(abi_ulong target_tv_addr,
1380 const struct timeval *tv)
1382 struct target_timeval *target_tv;
1384 if (!lock_user_struct(VERIFY_WRITE, target_tv, target_tv_addr, 0))
1385 return -TARGET_EFAULT;
1387 __put_user(tv->tv_sec, &target_tv->tv_sec);
1388 __put_user(tv->tv_usec, &target_tv->tv_usec);
1390 unlock_user_struct(target_tv, target_tv_addr, 1);
1392 return 0;
1395 static inline abi_long copy_from_user_timezone(struct timezone *tz,
1396 abi_ulong target_tz_addr)
1398 struct target_timezone *target_tz;
1400 if (!lock_user_struct(VERIFY_READ, target_tz, target_tz_addr, 1)) {
1401 return -TARGET_EFAULT;
1404 __get_user(tz->tz_minuteswest, &target_tz->tz_minuteswest);
1405 __get_user(tz->tz_dsttime, &target_tz->tz_dsttime);
1407 unlock_user_struct(target_tz, target_tz_addr, 0);
1409 return 0;
1412 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
1413 #include <mqueue.h>
1415 static inline abi_long copy_from_user_mq_attr(struct mq_attr *attr,
1416 abi_ulong target_mq_attr_addr)
1418 struct target_mq_attr *target_mq_attr;
1420 if (!lock_user_struct(VERIFY_READ, target_mq_attr,
1421 target_mq_attr_addr, 1))
1422 return -TARGET_EFAULT;
1424 __get_user(attr->mq_flags, &target_mq_attr->mq_flags);
1425 __get_user(attr->mq_maxmsg, &target_mq_attr->mq_maxmsg);
1426 __get_user(attr->mq_msgsize, &target_mq_attr->mq_msgsize);
1427 __get_user(attr->mq_curmsgs, &target_mq_attr->mq_curmsgs);
1429 unlock_user_struct(target_mq_attr, target_mq_attr_addr, 0);
1431 return 0;
1434 static inline abi_long copy_to_user_mq_attr(abi_ulong target_mq_attr_addr,
1435 const struct mq_attr *attr)
1437 struct target_mq_attr *target_mq_attr;
1439 if (!lock_user_struct(VERIFY_WRITE, target_mq_attr,
1440 target_mq_attr_addr, 0))
1441 return -TARGET_EFAULT;
1443 __put_user(attr->mq_flags, &target_mq_attr->mq_flags);
1444 __put_user(attr->mq_maxmsg, &target_mq_attr->mq_maxmsg);
1445 __put_user(attr->mq_msgsize, &target_mq_attr->mq_msgsize);
1446 __put_user(attr->mq_curmsgs, &target_mq_attr->mq_curmsgs);
1448 unlock_user_struct(target_mq_attr, target_mq_attr_addr, 1);
1450 return 0;
1452 #endif
1454 #if defined(TARGET_NR_select) || defined(TARGET_NR__newselect)
1455 /* do_select() must return target values and target errnos. */
1456 static abi_long do_select(int n,
1457 abi_ulong rfd_addr, abi_ulong wfd_addr,
1458 abi_ulong efd_addr, abi_ulong target_tv_addr)
1460 fd_set rfds, wfds, efds;
1461 fd_set *rfds_ptr, *wfds_ptr, *efds_ptr;
1462 struct timeval tv;
1463 struct timespec ts, *ts_ptr;
1464 abi_long ret;
1466 ret = copy_from_user_fdset_ptr(&rfds, &rfds_ptr, rfd_addr, n);
1467 if (ret) {
1468 return ret;
1470 ret = copy_from_user_fdset_ptr(&wfds, &wfds_ptr, wfd_addr, n);
1471 if (ret) {
1472 return ret;
1474 ret = copy_from_user_fdset_ptr(&efds, &efds_ptr, efd_addr, n);
1475 if (ret) {
1476 return ret;
1479 if (target_tv_addr) {
1480 if (copy_from_user_timeval(&tv, target_tv_addr))
1481 return -TARGET_EFAULT;
1482 ts.tv_sec = tv.tv_sec;
1483 ts.tv_nsec = tv.tv_usec * 1000;
1484 ts_ptr = &ts;
1485 } else {
1486 ts_ptr = NULL;
1489 ret = get_errno(safe_pselect6(n, rfds_ptr, wfds_ptr, efds_ptr,
1490 ts_ptr, NULL));
1492 if (!is_error(ret)) {
1493 if (rfd_addr && copy_to_user_fdset(rfd_addr, &rfds, n))
1494 return -TARGET_EFAULT;
1495 if (wfd_addr && copy_to_user_fdset(wfd_addr, &wfds, n))
1496 return -TARGET_EFAULT;
1497 if (efd_addr && copy_to_user_fdset(efd_addr, &efds, n))
1498 return -TARGET_EFAULT;
1500 if (target_tv_addr) {
1501 tv.tv_sec = ts.tv_sec;
1502 tv.tv_usec = ts.tv_nsec / 1000;
1503 if (copy_to_user_timeval(target_tv_addr, &tv)) {
1504 return -TARGET_EFAULT;
1509 return ret;
1512 #if defined(TARGET_WANT_OLD_SYS_SELECT)
1513 static abi_long do_old_select(abi_ulong arg1)
1515 struct target_sel_arg_struct *sel;
1516 abi_ulong inp, outp, exp, tvp;
1517 long nsel;
1519 if (!lock_user_struct(VERIFY_READ, sel, arg1, 1)) {
1520 return -TARGET_EFAULT;
1523 nsel = tswapal(sel->n);
1524 inp = tswapal(sel->inp);
1525 outp = tswapal(sel->outp);
1526 exp = tswapal(sel->exp);
1527 tvp = tswapal(sel->tvp);
1529 unlock_user_struct(sel, arg1, 0);
1531 return do_select(nsel, inp, outp, exp, tvp);
1533 #endif
1534 #endif
1536 static abi_long do_pipe2(int host_pipe[], int flags)
1538 #ifdef CONFIG_PIPE2
1539 return pipe2(host_pipe, flags);
1540 #else
1541 return -ENOSYS;
1542 #endif
1545 static abi_long do_pipe(void *cpu_env, abi_ulong pipedes,
1546 int flags, int is_pipe2)
1548 int host_pipe[2];
1549 abi_long ret;
1550 ret = flags ? do_pipe2(host_pipe, flags) : pipe(host_pipe);
1552 if (is_error(ret))
1553 return get_errno(ret);
1555 /* Several targets have special calling conventions for the original
1556 pipe syscall, but didn't replicate this into the pipe2 syscall. */
1557 if (!is_pipe2) {
1558 #if defined(TARGET_ALPHA)
1559 ((CPUAlphaState *)cpu_env)->ir[IR_A4] = host_pipe[1];
1560 return host_pipe[0];
1561 #elif defined(TARGET_MIPS)
1562 ((CPUMIPSState*)cpu_env)->active_tc.gpr[3] = host_pipe[1];
1563 return host_pipe[0];
1564 #elif defined(TARGET_SH4)
1565 ((CPUSH4State*)cpu_env)->gregs[1] = host_pipe[1];
1566 return host_pipe[0];
1567 #elif defined(TARGET_SPARC)
1568 ((CPUSPARCState*)cpu_env)->regwptr[1] = host_pipe[1];
1569 return host_pipe[0];
1570 #endif
1573 if (put_user_s32(host_pipe[0], pipedes)
1574 || put_user_s32(host_pipe[1], pipedes + sizeof(host_pipe[0])))
1575 return -TARGET_EFAULT;
1576 return get_errno(ret);
1579 static inline abi_long target_to_host_ip_mreq(struct ip_mreqn *mreqn,
1580 abi_ulong target_addr,
1581 socklen_t len)
1583 struct target_ip_mreqn *target_smreqn;
1585 target_smreqn = lock_user(VERIFY_READ, target_addr, len, 1);
1586 if (!target_smreqn)
1587 return -TARGET_EFAULT;
1588 mreqn->imr_multiaddr.s_addr = target_smreqn->imr_multiaddr.s_addr;
1589 mreqn->imr_address.s_addr = target_smreqn->imr_address.s_addr;
1590 if (len == sizeof(struct target_ip_mreqn))
1591 mreqn->imr_ifindex = tswapal(target_smreqn->imr_ifindex);
1592 unlock_user(target_smreqn, target_addr, 0);
1594 return 0;
1597 static inline abi_long target_to_host_sockaddr(int fd, struct sockaddr *addr,
1598 abi_ulong target_addr,
1599 socklen_t len)
1601 const socklen_t unix_maxlen = sizeof (struct sockaddr_un);
1602 sa_family_t sa_family;
1603 struct target_sockaddr *target_saddr;
1605 if (fd_trans_target_to_host_addr(fd)) {
1606 return fd_trans_target_to_host_addr(fd)(addr, target_addr, len);
1609 target_saddr = lock_user(VERIFY_READ, target_addr, len, 1);
1610 if (!target_saddr)
1611 return -TARGET_EFAULT;
1613 sa_family = tswap16(target_saddr->sa_family);
1615 /* Oops. The caller might send a incomplete sun_path; sun_path
1616 * must be terminated by \0 (see the manual page), but
1617 * unfortunately it is quite common to specify sockaddr_un
1618 * length as "strlen(x->sun_path)" while it should be
1619 * "strlen(...) + 1". We'll fix that here if needed.
1620 * Linux kernel has a similar feature.
1623 if (sa_family == AF_UNIX) {
1624 if (len < unix_maxlen && len > 0) {
1625 char *cp = (char*)target_saddr;
1627 if ( cp[len-1] && !cp[len] )
1628 len++;
1630 if (len > unix_maxlen)
1631 len = unix_maxlen;
1634 memcpy(addr, target_saddr, len);
1635 addr->sa_family = sa_family;
1636 if (sa_family == AF_NETLINK) {
1637 struct sockaddr_nl *nladdr;
1639 nladdr = (struct sockaddr_nl *)addr;
1640 nladdr->nl_pid = tswap32(nladdr->nl_pid);
1641 nladdr->nl_groups = tswap32(nladdr->nl_groups);
1642 } else if (sa_family == AF_PACKET) {
1643 struct target_sockaddr_ll *lladdr;
1645 lladdr = (struct target_sockaddr_ll *)addr;
1646 lladdr->sll_ifindex = tswap32(lladdr->sll_ifindex);
1647 lladdr->sll_hatype = tswap16(lladdr->sll_hatype);
1649 unlock_user(target_saddr, target_addr, 0);
1651 return 0;
1654 static inline abi_long host_to_target_sockaddr(abi_ulong target_addr,
1655 struct sockaddr *addr,
1656 socklen_t len)
1658 struct target_sockaddr *target_saddr;
1660 if (len == 0) {
1661 return 0;
1663 assert(addr);
1665 target_saddr = lock_user(VERIFY_WRITE, target_addr, len, 0);
1666 if (!target_saddr)
1667 return -TARGET_EFAULT;
1668 memcpy(target_saddr, addr, len);
1669 if (len >= offsetof(struct target_sockaddr, sa_family) +
1670 sizeof(target_saddr->sa_family)) {
1671 target_saddr->sa_family = tswap16(addr->sa_family);
1673 if (addr->sa_family == AF_NETLINK && len >= sizeof(struct sockaddr_nl)) {
1674 struct sockaddr_nl *target_nl = (struct sockaddr_nl *)target_saddr;
1675 target_nl->nl_pid = tswap32(target_nl->nl_pid);
1676 target_nl->nl_groups = tswap32(target_nl->nl_groups);
1677 } else if (addr->sa_family == AF_PACKET) {
1678 struct sockaddr_ll *target_ll = (struct sockaddr_ll *)target_saddr;
1679 target_ll->sll_ifindex = tswap32(target_ll->sll_ifindex);
1680 target_ll->sll_hatype = tswap16(target_ll->sll_hatype);
1681 } else if (addr->sa_family == AF_INET6 &&
1682 len >= sizeof(struct target_sockaddr_in6)) {
1683 struct target_sockaddr_in6 *target_in6 =
1684 (struct target_sockaddr_in6 *)target_saddr;
1685 target_in6->sin6_scope_id = tswap16(target_in6->sin6_scope_id);
1687 unlock_user(target_saddr, target_addr, len);
1689 return 0;
1692 static inline abi_long target_to_host_cmsg(struct msghdr *msgh,
1693 struct target_msghdr *target_msgh)
1695 struct cmsghdr *cmsg = CMSG_FIRSTHDR(msgh);
1696 abi_long msg_controllen;
1697 abi_ulong target_cmsg_addr;
1698 struct target_cmsghdr *target_cmsg, *target_cmsg_start;
1699 socklen_t space = 0;
1701 msg_controllen = tswapal(target_msgh->msg_controllen);
1702 if (msg_controllen < sizeof (struct target_cmsghdr))
1703 goto the_end;
1704 target_cmsg_addr = tswapal(target_msgh->msg_control);
1705 target_cmsg = lock_user(VERIFY_READ, target_cmsg_addr, msg_controllen, 1);
1706 target_cmsg_start = target_cmsg;
1707 if (!target_cmsg)
1708 return -TARGET_EFAULT;
1710 while (cmsg && target_cmsg) {
1711 void *data = CMSG_DATA(cmsg);
1712 void *target_data = TARGET_CMSG_DATA(target_cmsg);
1714 int len = tswapal(target_cmsg->cmsg_len)
1715 - sizeof(struct target_cmsghdr);
1717 space += CMSG_SPACE(len);
1718 if (space > msgh->msg_controllen) {
1719 space -= CMSG_SPACE(len);
1720 /* This is a QEMU bug, since we allocated the payload
1721 * area ourselves (unlike overflow in host-to-target
1722 * conversion, which is just the guest giving us a buffer
1723 * that's too small). It can't happen for the payload types
1724 * we currently support; if it becomes an issue in future
1725 * we would need to improve our allocation strategy to
1726 * something more intelligent than "twice the size of the
1727 * target buffer we're reading from".
1729 gemu_log("Host cmsg overflow\n");
1730 break;
1733 if (tswap32(target_cmsg->cmsg_level) == TARGET_SOL_SOCKET) {
1734 cmsg->cmsg_level = SOL_SOCKET;
1735 } else {
1736 cmsg->cmsg_level = tswap32(target_cmsg->cmsg_level);
1738 cmsg->cmsg_type = tswap32(target_cmsg->cmsg_type);
1739 cmsg->cmsg_len = CMSG_LEN(len);
1741 if (cmsg->cmsg_level == SOL_SOCKET && cmsg->cmsg_type == SCM_RIGHTS) {
1742 int *fd = (int *)data;
1743 int *target_fd = (int *)target_data;
1744 int i, numfds = len / sizeof(int);
1746 for (i = 0; i < numfds; i++) {
1747 __get_user(fd[i], target_fd + i);
1749 } else if (cmsg->cmsg_level == SOL_SOCKET
1750 && cmsg->cmsg_type == SCM_CREDENTIALS) {
1751 struct ucred *cred = (struct ucred *)data;
1752 struct target_ucred *target_cred =
1753 (struct target_ucred *)target_data;
1755 __get_user(cred->pid, &target_cred->pid);
1756 __get_user(cred->uid, &target_cred->uid);
1757 __get_user(cred->gid, &target_cred->gid);
1758 } else {
1759 gemu_log("Unsupported ancillary data: %d/%d\n",
1760 cmsg->cmsg_level, cmsg->cmsg_type);
1761 memcpy(data, target_data, len);
1764 cmsg = CMSG_NXTHDR(msgh, cmsg);
1765 target_cmsg = TARGET_CMSG_NXTHDR(target_msgh, target_cmsg,
1766 target_cmsg_start);
1768 unlock_user(target_cmsg, target_cmsg_addr, 0);
1769 the_end:
1770 msgh->msg_controllen = space;
1771 return 0;
1774 static inline abi_long host_to_target_cmsg(struct target_msghdr *target_msgh,
1775 struct msghdr *msgh)
1777 struct cmsghdr *cmsg = CMSG_FIRSTHDR(msgh);
1778 abi_long msg_controllen;
1779 abi_ulong target_cmsg_addr;
1780 struct target_cmsghdr *target_cmsg, *target_cmsg_start;
1781 socklen_t space = 0;
1783 msg_controllen = tswapal(target_msgh->msg_controllen);
1784 if (msg_controllen < sizeof (struct target_cmsghdr))
1785 goto the_end;
1786 target_cmsg_addr = tswapal(target_msgh->msg_control);
1787 target_cmsg = lock_user(VERIFY_WRITE, target_cmsg_addr, msg_controllen, 0);
1788 target_cmsg_start = target_cmsg;
1789 if (!target_cmsg)
1790 return -TARGET_EFAULT;
1792 while (cmsg && target_cmsg) {
1793 void *data = CMSG_DATA(cmsg);
1794 void *target_data = TARGET_CMSG_DATA(target_cmsg);
1796 int len = cmsg->cmsg_len - sizeof(struct cmsghdr);
1797 int tgt_len, tgt_space;
1799 /* We never copy a half-header but may copy half-data;
1800 * this is Linux's behaviour in put_cmsg(). Note that
1801 * truncation here is a guest problem (which we report
1802 * to the guest via the CTRUNC bit), unlike truncation
1803 * in target_to_host_cmsg, which is a QEMU bug.
1805 if (msg_controllen < sizeof(struct target_cmsghdr)) {
1806 target_msgh->msg_flags |= tswap32(MSG_CTRUNC);
1807 break;
1810 if (cmsg->cmsg_level == SOL_SOCKET) {
1811 target_cmsg->cmsg_level = tswap32(TARGET_SOL_SOCKET);
1812 } else {
1813 target_cmsg->cmsg_level = tswap32(cmsg->cmsg_level);
1815 target_cmsg->cmsg_type = tswap32(cmsg->cmsg_type);
1817 /* Payload types which need a different size of payload on
1818 * the target must adjust tgt_len here.
1820 switch (cmsg->cmsg_level) {
1821 case SOL_SOCKET:
1822 switch (cmsg->cmsg_type) {
1823 case SO_TIMESTAMP:
1824 tgt_len = sizeof(struct target_timeval);
1825 break;
1826 default:
1827 break;
1829 default:
1830 tgt_len = len;
1831 break;
1834 if (msg_controllen < TARGET_CMSG_LEN(tgt_len)) {
1835 target_msgh->msg_flags |= tswap32(MSG_CTRUNC);
1836 tgt_len = msg_controllen - sizeof(struct target_cmsghdr);
1839 /* We must now copy-and-convert len bytes of payload
1840 * into tgt_len bytes of destination space. Bear in mind
1841 * that in both source and destination we may be dealing
1842 * with a truncated value!
1844 switch (cmsg->cmsg_level) {
1845 case SOL_SOCKET:
1846 switch (cmsg->cmsg_type) {
1847 case SCM_RIGHTS:
1849 int *fd = (int *)data;
1850 int *target_fd = (int *)target_data;
1851 int i, numfds = tgt_len / sizeof(int);
1853 for (i = 0; i < numfds; i++) {
1854 __put_user(fd[i], target_fd + i);
1856 break;
1858 case SO_TIMESTAMP:
1860 struct timeval *tv = (struct timeval *)data;
1861 struct target_timeval *target_tv =
1862 (struct target_timeval *)target_data;
1864 if (len != sizeof(struct timeval) ||
1865 tgt_len != sizeof(struct target_timeval)) {
1866 goto unimplemented;
1869 /* copy struct timeval to target */
1870 __put_user(tv->tv_sec, &target_tv->tv_sec);
1871 __put_user(tv->tv_usec, &target_tv->tv_usec);
1872 break;
1874 case SCM_CREDENTIALS:
1876 struct ucred *cred = (struct ucred *)data;
1877 struct target_ucred *target_cred =
1878 (struct target_ucred *)target_data;
1880 __put_user(cred->pid, &target_cred->pid);
1881 __put_user(cred->uid, &target_cred->uid);
1882 __put_user(cred->gid, &target_cred->gid);
1883 break;
1885 default:
1886 goto unimplemented;
1888 break;
1890 case SOL_IP:
1891 switch (cmsg->cmsg_type) {
1892 case IP_TTL:
1894 uint32_t *v = (uint32_t *)data;
1895 uint32_t *t_int = (uint32_t *)target_data;
1897 if (len != sizeof(uint32_t) ||
1898 tgt_len != sizeof(uint32_t)) {
1899 goto unimplemented;
1901 __put_user(*v, t_int);
1902 break;
1904 case IP_RECVERR:
1906 struct errhdr_t {
1907 struct sock_extended_err ee;
1908 struct sockaddr_in offender;
1910 struct errhdr_t *errh = (struct errhdr_t *)data;
1911 struct errhdr_t *target_errh =
1912 (struct errhdr_t *)target_data;
1914 if (len != sizeof(struct errhdr_t) ||
1915 tgt_len != sizeof(struct errhdr_t)) {
1916 goto unimplemented;
1918 __put_user(errh->ee.ee_errno, &target_errh->ee.ee_errno);
1919 __put_user(errh->ee.ee_origin, &target_errh->ee.ee_origin);
1920 __put_user(errh->ee.ee_type, &target_errh->ee.ee_type);
1921 __put_user(errh->ee.ee_code, &target_errh->ee.ee_code);
1922 __put_user(errh->ee.ee_pad, &target_errh->ee.ee_pad);
1923 __put_user(errh->ee.ee_info, &target_errh->ee.ee_info);
1924 __put_user(errh->ee.ee_data, &target_errh->ee.ee_data);
1925 host_to_target_sockaddr((unsigned long) &target_errh->offender,
1926 (void *) &errh->offender, sizeof(errh->offender));
1927 break;
1929 default:
1930 goto unimplemented;
1932 break;
1934 case SOL_IPV6:
1935 switch (cmsg->cmsg_type) {
1936 case IPV6_HOPLIMIT:
1938 uint32_t *v = (uint32_t *)data;
1939 uint32_t *t_int = (uint32_t *)target_data;
1941 if (len != sizeof(uint32_t) ||
1942 tgt_len != sizeof(uint32_t)) {
1943 goto unimplemented;
1945 __put_user(*v, t_int);
1946 break;
1948 case IPV6_RECVERR:
1950 struct errhdr6_t {
1951 struct sock_extended_err ee;
1952 struct sockaddr_in6 offender;
1954 struct errhdr6_t *errh = (struct errhdr6_t *)data;
1955 struct errhdr6_t *target_errh =
1956 (struct errhdr6_t *)target_data;
1958 if (len != sizeof(struct errhdr6_t) ||
1959 tgt_len != sizeof(struct errhdr6_t)) {
1960 goto unimplemented;
1962 __put_user(errh->ee.ee_errno, &target_errh->ee.ee_errno);
1963 __put_user(errh->ee.ee_origin, &target_errh->ee.ee_origin);
1964 __put_user(errh->ee.ee_type, &target_errh->ee.ee_type);
1965 __put_user(errh->ee.ee_code, &target_errh->ee.ee_code);
1966 __put_user(errh->ee.ee_pad, &target_errh->ee.ee_pad);
1967 __put_user(errh->ee.ee_info, &target_errh->ee.ee_info);
1968 __put_user(errh->ee.ee_data, &target_errh->ee.ee_data);
1969 host_to_target_sockaddr((unsigned long) &target_errh->offender,
1970 (void *) &errh->offender, sizeof(errh->offender));
1971 break;
1973 default:
1974 goto unimplemented;
1976 break;
1978 default:
1979 unimplemented:
1980 gemu_log("Unsupported ancillary data: %d/%d\n",
1981 cmsg->cmsg_level, cmsg->cmsg_type);
1982 memcpy(target_data, data, MIN(len, tgt_len));
1983 if (tgt_len > len) {
1984 memset(target_data + len, 0, tgt_len - len);
1988 target_cmsg->cmsg_len = tswapal(TARGET_CMSG_LEN(tgt_len));
1989 tgt_space = TARGET_CMSG_SPACE(tgt_len);
1990 if (msg_controllen < tgt_space) {
1991 tgt_space = msg_controllen;
1993 msg_controllen -= tgt_space;
1994 space += tgt_space;
1995 cmsg = CMSG_NXTHDR(msgh, cmsg);
1996 target_cmsg = TARGET_CMSG_NXTHDR(target_msgh, target_cmsg,
1997 target_cmsg_start);
1999 unlock_user(target_cmsg, target_cmsg_addr, space);
2000 the_end:
2001 target_msgh->msg_controllen = tswapal(space);
2002 return 0;
2005 static void tswap_nlmsghdr(struct nlmsghdr *nlh)
2007 nlh->nlmsg_len = tswap32(nlh->nlmsg_len);
2008 nlh->nlmsg_type = tswap16(nlh->nlmsg_type);
2009 nlh->nlmsg_flags = tswap16(nlh->nlmsg_flags);
2010 nlh->nlmsg_seq = tswap32(nlh->nlmsg_seq);
2011 nlh->nlmsg_pid = tswap32(nlh->nlmsg_pid);
2014 static abi_long host_to_target_for_each_nlmsg(struct nlmsghdr *nlh,
2015 size_t len,
2016 abi_long (*host_to_target_nlmsg)
2017 (struct nlmsghdr *))
2019 uint32_t nlmsg_len;
2020 abi_long ret;
2022 while (len > sizeof(struct nlmsghdr)) {
2024 nlmsg_len = nlh->nlmsg_len;
2025 if (nlmsg_len < sizeof(struct nlmsghdr) ||
2026 nlmsg_len > len) {
2027 break;
2030 switch (nlh->nlmsg_type) {
2031 case NLMSG_DONE:
2032 tswap_nlmsghdr(nlh);
2033 return 0;
2034 case NLMSG_NOOP:
2035 break;
2036 case NLMSG_ERROR:
2038 struct nlmsgerr *e = NLMSG_DATA(nlh);
2039 e->error = tswap32(e->error);
2040 tswap_nlmsghdr(&e->msg);
2041 tswap_nlmsghdr(nlh);
2042 return 0;
2044 default:
2045 ret = host_to_target_nlmsg(nlh);
2046 if (ret < 0) {
2047 tswap_nlmsghdr(nlh);
2048 return ret;
2050 break;
2052 tswap_nlmsghdr(nlh);
2053 len -= NLMSG_ALIGN(nlmsg_len);
2054 nlh = (struct nlmsghdr *)(((char*)nlh) + NLMSG_ALIGN(nlmsg_len));
2056 return 0;
2059 static abi_long target_to_host_for_each_nlmsg(struct nlmsghdr *nlh,
2060 size_t len,
2061 abi_long (*target_to_host_nlmsg)
2062 (struct nlmsghdr *))
2064 int ret;
2066 while (len > sizeof(struct nlmsghdr)) {
2067 if (tswap32(nlh->nlmsg_len) < sizeof(struct nlmsghdr) ||
2068 tswap32(nlh->nlmsg_len) > len) {
2069 break;
2071 tswap_nlmsghdr(nlh);
2072 switch (nlh->nlmsg_type) {
2073 case NLMSG_DONE:
2074 return 0;
2075 case NLMSG_NOOP:
2076 break;
2077 case NLMSG_ERROR:
2079 struct nlmsgerr *e = NLMSG_DATA(nlh);
2080 e->error = tswap32(e->error);
2081 tswap_nlmsghdr(&e->msg);
2082 return 0;
2084 default:
2085 ret = target_to_host_nlmsg(nlh);
2086 if (ret < 0) {
2087 return ret;
2090 len -= NLMSG_ALIGN(nlh->nlmsg_len);
2091 nlh = (struct nlmsghdr *)(((char *)nlh) + NLMSG_ALIGN(nlh->nlmsg_len));
2093 return 0;
2096 #ifdef CONFIG_RTNETLINK
2097 static abi_long host_to_target_for_each_nlattr(struct nlattr *nlattr,
2098 size_t len, void *context,
2099 abi_long (*host_to_target_nlattr)
2100 (struct nlattr *,
2101 void *context))
2103 unsigned short nla_len;
2104 abi_long ret;
2106 while (len > sizeof(struct nlattr)) {
2107 nla_len = nlattr->nla_len;
2108 if (nla_len < sizeof(struct nlattr) ||
2109 nla_len > len) {
2110 break;
2112 ret = host_to_target_nlattr(nlattr, context);
2113 nlattr->nla_len = tswap16(nlattr->nla_len);
2114 nlattr->nla_type = tswap16(nlattr->nla_type);
2115 if (ret < 0) {
2116 return ret;
2118 len -= NLA_ALIGN(nla_len);
2119 nlattr = (struct nlattr *)(((char *)nlattr) + NLA_ALIGN(nla_len));
2121 return 0;
2124 static abi_long host_to_target_for_each_rtattr(struct rtattr *rtattr,
2125 size_t len,
2126 abi_long (*host_to_target_rtattr)
2127 (struct rtattr *))
2129 unsigned short rta_len;
2130 abi_long ret;
2132 while (len > sizeof(struct rtattr)) {
2133 rta_len = rtattr->rta_len;
2134 if (rta_len < sizeof(struct rtattr) ||
2135 rta_len > len) {
2136 break;
2138 ret = host_to_target_rtattr(rtattr);
2139 rtattr->rta_len = tswap16(rtattr->rta_len);
2140 rtattr->rta_type = tswap16(rtattr->rta_type);
2141 if (ret < 0) {
2142 return ret;
2144 len -= RTA_ALIGN(rta_len);
2145 rtattr = (struct rtattr *)(((char *)rtattr) + RTA_ALIGN(rta_len));
2147 return 0;
2150 #define NLA_DATA(nla) ((void *)((char *)(nla)) + NLA_HDRLEN)
2152 static abi_long host_to_target_data_bridge_nlattr(struct nlattr *nlattr,
2153 void *context)
2155 uint16_t *u16;
2156 uint32_t *u32;
2157 uint64_t *u64;
2159 switch (nlattr->nla_type) {
2160 /* no data */
2161 case QEMU_IFLA_BR_FDB_FLUSH:
2162 break;
2163 /* binary */
2164 case QEMU_IFLA_BR_GROUP_ADDR:
2165 break;
2166 /* uint8_t */
2167 case QEMU_IFLA_BR_VLAN_FILTERING:
2168 case QEMU_IFLA_BR_TOPOLOGY_CHANGE:
2169 case QEMU_IFLA_BR_TOPOLOGY_CHANGE_DETECTED:
2170 case QEMU_IFLA_BR_MCAST_ROUTER:
2171 case QEMU_IFLA_BR_MCAST_SNOOPING:
2172 case QEMU_IFLA_BR_MCAST_QUERY_USE_IFADDR:
2173 case QEMU_IFLA_BR_MCAST_QUERIER:
2174 case QEMU_IFLA_BR_NF_CALL_IPTABLES:
2175 case QEMU_IFLA_BR_NF_CALL_IP6TABLES:
2176 case QEMU_IFLA_BR_NF_CALL_ARPTABLES:
2177 break;
2178 /* uint16_t */
2179 case QEMU_IFLA_BR_PRIORITY:
2180 case QEMU_IFLA_BR_VLAN_PROTOCOL:
2181 case QEMU_IFLA_BR_GROUP_FWD_MASK:
2182 case QEMU_IFLA_BR_ROOT_PORT:
2183 case QEMU_IFLA_BR_VLAN_DEFAULT_PVID:
2184 u16 = NLA_DATA(nlattr);
2185 *u16 = tswap16(*u16);
2186 break;
2187 /* uint32_t */
2188 case QEMU_IFLA_BR_FORWARD_DELAY:
2189 case QEMU_IFLA_BR_HELLO_TIME:
2190 case QEMU_IFLA_BR_MAX_AGE:
2191 case QEMU_IFLA_BR_AGEING_TIME:
2192 case QEMU_IFLA_BR_STP_STATE:
2193 case QEMU_IFLA_BR_ROOT_PATH_COST:
2194 case QEMU_IFLA_BR_MCAST_HASH_ELASTICITY:
2195 case QEMU_IFLA_BR_MCAST_HASH_MAX:
2196 case QEMU_IFLA_BR_MCAST_LAST_MEMBER_CNT:
2197 case QEMU_IFLA_BR_MCAST_STARTUP_QUERY_CNT:
2198 u32 = NLA_DATA(nlattr);
2199 *u32 = tswap32(*u32);
2200 break;
2201 /* uint64_t */
2202 case QEMU_IFLA_BR_HELLO_TIMER:
2203 case QEMU_IFLA_BR_TCN_TIMER:
2204 case QEMU_IFLA_BR_GC_TIMER:
2205 case QEMU_IFLA_BR_TOPOLOGY_CHANGE_TIMER:
2206 case QEMU_IFLA_BR_MCAST_LAST_MEMBER_INTVL:
2207 case QEMU_IFLA_BR_MCAST_MEMBERSHIP_INTVL:
2208 case QEMU_IFLA_BR_MCAST_QUERIER_INTVL:
2209 case QEMU_IFLA_BR_MCAST_QUERY_INTVL:
2210 case QEMU_IFLA_BR_MCAST_QUERY_RESPONSE_INTVL:
2211 case QEMU_IFLA_BR_MCAST_STARTUP_QUERY_INTVL:
2212 u64 = NLA_DATA(nlattr);
2213 *u64 = tswap64(*u64);
2214 break;
2215 /* ifla_bridge_id: uin8_t[] */
2216 case QEMU_IFLA_BR_ROOT_ID:
2217 case QEMU_IFLA_BR_BRIDGE_ID:
2218 break;
2219 default:
2220 gemu_log("Unknown QEMU_IFLA_BR type %d\n", nlattr->nla_type);
2221 break;
2223 return 0;
2226 static abi_long host_to_target_slave_data_bridge_nlattr(struct nlattr *nlattr,
2227 void *context)
2229 uint16_t *u16;
2230 uint32_t *u32;
2231 uint64_t *u64;
2233 switch (nlattr->nla_type) {
2234 /* uint8_t */
2235 case QEMU_IFLA_BRPORT_STATE:
2236 case QEMU_IFLA_BRPORT_MODE:
2237 case QEMU_IFLA_BRPORT_GUARD:
2238 case QEMU_IFLA_BRPORT_PROTECT:
2239 case QEMU_IFLA_BRPORT_FAST_LEAVE:
2240 case QEMU_IFLA_BRPORT_LEARNING:
2241 case QEMU_IFLA_BRPORT_UNICAST_FLOOD:
2242 case QEMU_IFLA_BRPORT_PROXYARP:
2243 case QEMU_IFLA_BRPORT_LEARNING_SYNC:
2244 case QEMU_IFLA_BRPORT_PROXYARP_WIFI:
2245 case QEMU_IFLA_BRPORT_TOPOLOGY_CHANGE_ACK:
2246 case QEMU_IFLA_BRPORT_CONFIG_PENDING:
2247 case QEMU_IFLA_BRPORT_MULTICAST_ROUTER:
2248 break;
2249 /* uint16_t */
2250 case QEMU_IFLA_BRPORT_PRIORITY:
2251 case QEMU_IFLA_BRPORT_DESIGNATED_PORT:
2252 case QEMU_IFLA_BRPORT_DESIGNATED_COST:
2253 case QEMU_IFLA_BRPORT_ID:
2254 case QEMU_IFLA_BRPORT_NO:
2255 u16 = NLA_DATA(nlattr);
2256 *u16 = tswap16(*u16);
2257 break;
2258 /* uin32_t */
2259 case QEMU_IFLA_BRPORT_COST:
2260 u32 = NLA_DATA(nlattr);
2261 *u32 = tswap32(*u32);
2262 break;
2263 /* uint64_t */
2264 case QEMU_IFLA_BRPORT_MESSAGE_AGE_TIMER:
2265 case QEMU_IFLA_BRPORT_FORWARD_DELAY_TIMER:
2266 case QEMU_IFLA_BRPORT_HOLD_TIMER:
2267 u64 = NLA_DATA(nlattr);
2268 *u64 = tswap64(*u64);
2269 break;
2270 /* ifla_bridge_id: uint8_t[] */
2271 case QEMU_IFLA_BRPORT_ROOT_ID:
2272 case QEMU_IFLA_BRPORT_BRIDGE_ID:
2273 break;
2274 default:
2275 gemu_log("Unknown QEMU_IFLA_BRPORT type %d\n", nlattr->nla_type);
2276 break;
2278 return 0;
2281 struct linkinfo_context {
2282 int len;
2283 char *name;
2284 int slave_len;
2285 char *slave_name;
2288 static abi_long host_to_target_data_linkinfo_nlattr(struct nlattr *nlattr,
2289 void *context)
2291 struct linkinfo_context *li_context = context;
2293 switch (nlattr->nla_type) {
2294 /* string */
2295 case QEMU_IFLA_INFO_KIND:
2296 li_context->name = NLA_DATA(nlattr);
2297 li_context->len = nlattr->nla_len - NLA_HDRLEN;
2298 break;
2299 case QEMU_IFLA_INFO_SLAVE_KIND:
2300 li_context->slave_name = NLA_DATA(nlattr);
2301 li_context->slave_len = nlattr->nla_len - NLA_HDRLEN;
2302 break;
2303 /* stats */
2304 case QEMU_IFLA_INFO_XSTATS:
2305 /* FIXME: only used by CAN */
2306 break;
2307 /* nested */
2308 case QEMU_IFLA_INFO_DATA:
2309 if (strncmp(li_context->name, "bridge",
2310 li_context->len) == 0) {
2311 return host_to_target_for_each_nlattr(NLA_DATA(nlattr),
2312 nlattr->nla_len,
2313 NULL,
2314 host_to_target_data_bridge_nlattr);
2315 } else {
2316 gemu_log("Unknown QEMU_IFLA_INFO_KIND %s\n", li_context->name);
2318 break;
2319 case QEMU_IFLA_INFO_SLAVE_DATA:
2320 if (strncmp(li_context->slave_name, "bridge",
2321 li_context->slave_len) == 0) {
2322 return host_to_target_for_each_nlattr(NLA_DATA(nlattr),
2323 nlattr->nla_len,
2324 NULL,
2325 host_to_target_slave_data_bridge_nlattr);
2326 } else {
2327 gemu_log("Unknown QEMU_IFLA_INFO_SLAVE_KIND %s\n",
2328 li_context->slave_name);
2330 break;
2331 default:
2332 gemu_log("Unknown host QEMU_IFLA_INFO type: %d\n", nlattr->nla_type);
2333 break;
2336 return 0;
2339 static abi_long host_to_target_data_inet_nlattr(struct nlattr *nlattr,
2340 void *context)
2342 uint32_t *u32;
2343 int i;
2345 switch (nlattr->nla_type) {
2346 case QEMU_IFLA_INET_CONF:
2347 u32 = NLA_DATA(nlattr);
2348 for (i = 0; i < (nlattr->nla_len - NLA_HDRLEN) / sizeof(*u32);
2349 i++) {
2350 u32[i] = tswap32(u32[i]);
2352 break;
2353 default:
2354 gemu_log("Unknown host AF_INET type: %d\n", nlattr->nla_type);
2356 return 0;
2359 static abi_long host_to_target_data_inet6_nlattr(struct nlattr *nlattr,
2360 void *context)
2362 uint32_t *u32;
2363 uint64_t *u64;
2364 struct ifla_cacheinfo *ci;
2365 int i;
2367 switch (nlattr->nla_type) {
2368 /* binaries */
2369 case QEMU_IFLA_INET6_TOKEN:
2370 break;
2371 /* uint8_t */
2372 case QEMU_IFLA_INET6_ADDR_GEN_MODE:
2373 break;
2374 /* uint32_t */
2375 case QEMU_IFLA_INET6_FLAGS:
2376 u32 = NLA_DATA(nlattr);
2377 *u32 = tswap32(*u32);
2378 break;
2379 /* uint32_t[] */
2380 case QEMU_IFLA_INET6_CONF:
2381 u32 = NLA_DATA(nlattr);
2382 for (i = 0; i < (nlattr->nla_len - NLA_HDRLEN) / sizeof(*u32);
2383 i++) {
2384 u32[i] = tswap32(u32[i]);
2386 break;
2387 /* ifla_cacheinfo */
2388 case QEMU_IFLA_INET6_CACHEINFO:
2389 ci = NLA_DATA(nlattr);
2390 ci->max_reasm_len = tswap32(ci->max_reasm_len);
2391 ci->tstamp = tswap32(ci->tstamp);
2392 ci->reachable_time = tswap32(ci->reachable_time);
2393 ci->retrans_time = tswap32(ci->retrans_time);
2394 break;
2395 /* uint64_t[] */
2396 case QEMU_IFLA_INET6_STATS:
2397 case QEMU_IFLA_INET6_ICMP6STATS:
2398 u64 = NLA_DATA(nlattr);
2399 for (i = 0; i < (nlattr->nla_len - NLA_HDRLEN) / sizeof(*u64);
2400 i++) {
2401 u64[i] = tswap64(u64[i]);
2403 break;
2404 default:
2405 gemu_log("Unknown host AF_INET6 type: %d\n", nlattr->nla_type);
2407 return 0;
2410 static abi_long host_to_target_data_spec_nlattr(struct nlattr *nlattr,
2411 void *context)
2413 switch (nlattr->nla_type) {
2414 case AF_INET:
2415 return host_to_target_for_each_nlattr(NLA_DATA(nlattr), nlattr->nla_len,
2416 NULL,
2417 host_to_target_data_inet_nlattr);
2418 case AF_INET6:
2419 return host_to_target_for_each_nlattr(NLA_DATA(nlattr), nlattr->nla_len,
2420 NULL,
2421 host_to_target_data_inet6_nlattr);
2422 default:
2423 gemu_log("Unknown host AF_SPEC type: %d\n", nlattr->nla_type);
2424 break;
2426 return 0;
2429 static abi_long host_to_target_data_link_rtattr(struct rtattr *rtattr)
2431 uint32_t *u32;
2432 struct rtnl_link_stats *st;
2433 struct rtnl_link_stats64 *st64;
2434 struct rtnl_link_ifmap *map;
2435 struct linkinfo_context li_context;
2437 switch (rtattr->rta_type) {
2438 /* binary stream */
2439 case QEMU_IFLA_ADDRESS:
2440 case QEMU_IFLA_BROADCAST:
2441 /* string */
2442 case QEMU_IFLA_IFNAME:
2443 case QEMU_IFLA_QDISC:
2444 break;
2445 /* uin8_t */
2446 case QEMU_IFLA_OPERSTATE:
2447 case QEMU_IFLA_LINKMODE:
2448 case QEMU_IFLA_CARRIER:
2449 case QEMU_IFLA_PROTO_DOWN:
2450 break;
2451 /* uint32_t */
2452 case QEMU_IFLA_MTU:
2453 case QEMU_IFLA_LINK:
2454 case QEMU_IFLA_WEIGHT:
2455 case QEMU_IFLA_TXQLEN:
2456 case QEMU_IFLA_CARRIER_CHANGES:
2457 case QEMU_IFLA_NUM_RX_QUEUES:
2458 case QEMU_IFLA_NUM_TX_QUEUES:
2459 case QEMU_IFLA_PROMISCUITY:
2460 case QEMU_IFLA_EXT_MASK:
2461 case QEMU_IFLA_LINK_NETNSID:
2462 case QEMU_IFLA_GROUP:
2463 case QEMU_IFLA_MASTER:
2464 case QEMU_IFLA_NUM_VF:
2465 case QEMU_IFLA_GSO_MAX_SEGS:
2466 case QEMU_IFLA_GSO_MAX_SIZE:
2467 u32 = RTA_DATA(rtattr);
2468 *u32 = tswap32(*u32);
2469 break;
2470 /* struct rtnl_link_stats */
2471 case QEMU_IFLA_STATS:
2472 st = RTA_DATA(rtattr);
2473 st->rx_packets = tswap32(st->rx_packets);
2474 st->tx_packets = tswap32(st->tx_packets);
2475 st->rx_bytes = tswap32(st->rx_bytes);
2476 st->tx_bytes = tswap32(st->tx_bytes);
2477 st->rx_errors = tswap32(st->rx_errors);
2478 st->tx_errors = tswap32(st->tx_errors);
2479 st->rx_dropped = tswap32(st->rx_dropped);
2480 st->tx_dropped = tswap32(st->tx_dropped);
2481 st->multicast = tswap32(st->multicast);
2482 st->collisions = tswap32(st->collisions);
2484 /* detailed rx_errors: */
2485 st->rx_length_errors = tswap32(st->rx_length_errors);
2486 st->rx_over_errors = tswap32(st->rx_over_errors);
2487 st->rx_crc_errors = tswap32(st->rx_crc_errors);
2488 st->rx_frame_errors = tswap32(st->rx_frame_errors);
2489 st->rx_fifo_errors = tswap32(st->rx_fifo_errors);
2490 st->rx_missed_errors = tswap32(st->rx_missed_errors);
2492 /* detailed tx_errors */
2493 st->tx_aborted_errors = tswap32(st->tx_aborted_errors);
2494 st->tx_carrier_errors = tswap32(st->tx_carrier_errors);
2495 st->tx_fifo_errors = tswap32(st->tx_fifo_errors);
2496 st->tx_heartbeat_errors = tswap32(st->tx_heartbeat_errors);
2497 st->tx_window_errors = tswap32(st->tx_window_errors);
2499 /* for cslip etc */
2500 st->rx_compressed = tswap32(st->rx_compressed);
2501 st->tx_compressed = tswap32(st->tx_compressed);
2502 break;
2503 /* struct rtnl_link_stats64 */
2504 case QEMU_IFLA_STATS64:
2505 st64 = RTA_DATA(rtattr);
2506 st64->rx_packets = tswap64(st64->rx_packets);
2507 st64->tx_packets = tswap64(st64->tx_packets);
2508 st64->rx_bytes = tswap64(st64->rx_bytes);
2509 st64->tx_bytes = tswap64(st64->tx_bytes);
2510 st64->rx_errors = tswap64(st64->rx_errors);
2511 st64->tx_errors = tswap64(st64->tx_errors);
2512 st64->rx_dropped = tswap64(st64->rx_dropped);
2513 st64->tx_dropped = tswap64(st64->tx_dropped);
2514 st64->multicast = tswap64(st64->multicast);
2515 st64->collisions = tswap64(st64->collisions);
2517 /* detailed rx_errors: */
2518 st64->rx_length_errors = tswap64(st64->rx_length_errors);
2519 st64->rx_over_errors = tswap64(st64->rx_over_errors);
2520 st64->rx_crc_errors = tswap64(st64->rx_crc_errors);
2521 st64->rx_frame_errors = tswap64(st64->rx_frame_errors);
2522 st64->rx_fifo_errors = tswap64(st64->rx_fifo_errors);
2523 st64->rx_missed_errors = tswap64(st64->rx_missed_errors);
2525 /* detailed tx_errors */
2526 st64->tx_aborted_errors = tswap64(st64->tx_aborted_errors);
2527 st64->tx_carrier_errors = tswap64(st64->tx_carrier_errors);
2528 st64->tx_fifo_errors = tswap64(st64->tx_fifo_errors);
2529 st64->tx_heartbeat_errors = tswap64(st64->tx_heartbeat_errors);
2530 st64->tx_window_errors = tswap64(st64->tx_window_errors);
2532 /* for cslip etc */
2533 st64->rx_compressed = tswap64(st64->rx_compressed);
2534 st64->tx_compressed = tswap64(st64->tx_compressed);
2535 break;
2536 /* struct rtnl_link_ifmap */
2537 case QEMU_IFLA_MAP:
2538 map = RTA_DATA(rtattr);
2539 map->mem_start = tswap64(map->mem_start);
2540 map->mem_end = tswap64(map->mem_end);
2541 map->base_addr = tswap64(map->base_addr);
2542 map->irq = tswap16(map->irq);
2543 break;
2544 /* nested */
2545 case QEMU_IFLA_LINKINFO:
2546 memset(&li_context, 0, sizeof(li_context));
2547 return host_to_target_for_each_nlattr(RTA_DATA(rtattr), rtattr->rta_len,
2548 &li_context,
2549 host_to_target_data_linkinfo_nlattr);
2550 case QEMU_IFLA_AF_SPEC:
2551 return host_to_target_for_each_nlattr(RTA_DATA(rtattr), rtattr->rta_len,
2552 NULL,
2553 host_to_target_data_spec_nlattr);
2554 default:
2555 gemu_log("Unknown host QEMU_IFLA type: %d\n", rtattr->rta_type);
2556 break;
2558 return 0;
2561 static abi_long host_to_target_data_addr_rtattr(struct rtattr *rtattr)
2563 uint32_t *u32;
2564 struct ifa_cacheinfo *ci;
2566 switch (rtattr->rta_type) {
2567 /* binary: depends on family type */
2568 case IFA_ADDRESS:
2569 case IFA_LOCAL:
2570 break;
2571 /* string */
2572 case IFA_LABEL:
2573 break;
2574 /* u32 */
2575 case IFA_FLAGS:
2576 case IFA_BROADCAST:
2577 u32 = RTA_DATA(rtattr);
2578 *u32 = tswap32(*u32);
2579 break;
2580 /* struct ifa_cacheinfo */
2581 case IFA_CACHEINFO:
2582 ci = RTA_DATA(rtattr);
2583 ci->ifa_prefered = tswap32(ci->ifa_prefered);
2584 ci->ifa_valid = tswap32(ci->ifa_valid);
2585 ci->cstamp = tswap32(ci->cstamp);
2586 ci->tstamp = tswap32(ci->tstamp);
2587 break;
2588 default:
2589 gemu_log("Unknown host IFA type: %d\n", rtattr->rta_type);
2590 break;
2592 return 0;
2595 static abi_long host_to_target_data_route_rtattr(struct rtattr *rtattr)
2597 uint32_t *u32;
2598 switch (rtattr->rta_type) {
2599 /* binary: depends on family type */
2600 case RTA_GATEWAY:
2601 case RTA_DST:
2602 case RTA_PREFSRC:
2603 break;
2604 /* u32 */
2605 case RTA_PRIORITY:
2606 case RTA_TABLE:
2607 case RTA_OIF:
2608 u32 = RTA_DATA(rtattr);
2609 *u32 = tswap32(*u32);
2610 break;
2611 default:
2612 gemu_log("Unknown host RTA type: %d\n", rtattr->rta_type);
2613 break;
2615 return 0;
2618 static abi_long host_to_target_link_rtattr(struct rtattr *rtattr,
2619 uint32_t rtattr_len)
2621 return host_to_target_for_each_rtattr(rtattr, rtattr_len,
2622 host_to_target_data_link_rtattr);
2625 static abi_long host_to_target_addr_rtattr(struct rtattr *rtattr,
2626 uint32_t rtattr_len)
2628 return host_to_target_for_each_rtattr(rtattr, rtattr_len,
2629 host_to_target_data_addr_rtattr);
2632 static abi_long host_to_target_route_rtattr(struct rtattr *rtattr,
2633 uint32_t rtattr_len)
2635 return host_to_target_for_each_rtattr(rtattr, rtattr_len,
2636 host_to_target_data_route_rtattr);
2639 static abi_long host_to_target_data_route(struct nlmsghdr *nlh)
2641 uint32_t nlmsg_len;
2642 struct ifinfomsg *ifi;
2643 struct ifaddrmsg *ifa;
2644 struct rtmsg *rtm;
2646 nlmsg_len = nlh->nlmsg_len;
2647 switch (nlh->nlmsg_type) {
2648 case RTM_NEWLINK:
2649 case RTM_DELLINK:
2650 case RTM_GETLINK:
2651 if (nlh->nlmsg_len >= NLMSG_LENGTH(sizeof(*ifi))) {
2652 ifi = NLMSG_DATA(nlh);
2653 ifi->ifi_type = tswap16(ifi->ifi_type);
2654 ifi->ifi_index = tswap32(ifi->ifi_index);
2655 ifi->ifi_flags = tswap32(ifi->ifi_flags);
2656 ifi->ifi_change = tswap32(ifi->ifi_change);
2657 host_to_target_link_rtattr(IFLA_RTA(ifi),
2658 nlmsg_len - NLMSG_LENGTH(sizeof(*ifi)));
2660 break;
2661 case RTM_NEWADDR:
2662 case RTM_DELADDR:
2663 case RTM_GETADDR:
2664 if (nlh->nlmsg_len >= NLMSG_LENGTH(sizeof(*ifa))) {
2665 ifa = NLMSG_DATA(nlh);
2666 ifa->ifa_index = tswap32(ifa->ifa_index);
2667 host_to_target_addr_rtattr(IFA_RTA(ifa),
2668 nlmsg_len - NLMSG_LENGTH(sizeof(*ifa)));
2670 break;
2671 case RTM_NEWROUTE:
2672 case RTM_DELROUTE:
2673 case RTM_GETROUTE:
2674 if (nlh->nlmsg_len >= NLMSG_LENGTH(sizeof(*rtm))) {
2675 rtm = NLMSG_DATA(nlh);
2676 rtm->rtm_flags = tswap32(rtm->rtm_flags);
2677 host_to_target_route_rtattr(RTM_RTA(rtm),
2678 nlmsg_len - NLMSG_LENGTH(sizeof(*rtm)));
2680 break;
2681 default:
2682 return -TARGET_EINVAL;
2684 return 0;
2687 static inline abi_long host_to_target_nlmsg_route(struct nlmsghdr *nlh,
2688 size_t len)
2690 return host_to_target_for_each_nlmsg(nlh, len, host_to_target_data_route);
2693 static abi_long target_to_host_for_each_rtattr(struct rtattr *rtattr,
2694 size_t len,
2695 abi_long (*target_to_host_rtattr)
2696 (struct rtattr *))
2698 abi_long ret;
2700 while (len >= sizeof(struct rtattr)) {
2701 if (tswap16(rtattr->rta_len) < sizeof(struct rtattr) ||
2702 tswap16(rtattr->rta_len) > len) {
2703 break;
2705 rtattr->rta_len = tswap16(rtattr->rta_len);
2706 rtattr->rta_type = tswap16(rtattr->rta_type);
2707 ret = target_to_host_rtattr(rtattr);
2708 if (ret < 0) {
2709 return ret;
2711 len -= RTA_ALIGN(rtattr->rta_len);
2712 rtattr = (struct rtattr *)(((char *)rtattr) +
2713 RTA_ALIGN(rtattr->rta_len));
2715 return 0;
2718 static abi_long target_to_host_data_link_rtattr(struct rtattr *rtattr)
2720 switch (rtattr->rta_type) {
2721 default:
2722 gemu_log("Unknown target QEMU_IFLA type: %d\n", rtattr->rta_type);
2723 break;
2725 return 0;
2728 static abi_long target_to_host_data_addr_rtattr(struct rtattr *rtattr)
2730 switch (rtattr->rta_type) {
2731 /* binary: depends on family type */
2732 case IFA_LOCAL:
2733 case IFA_ADDRESS:
2734 break;
2735 default:
2736 gemu_log("Unknown target IFA type: %d\n", rtattr->rta_type);
2737 break;
2739 return 0;
2742 static abi_long target_to_host_data_route_rtattr(struct rtattr *rtattr)
2744 uint32_t *u32;
2745 switch (rtattr->rta_type) {
2746 /* binary: depends on family type */
2747 case RTA_DST:
2748 case RTA_SRC:
2749 case RTA_GATEWAY:
2750 break;
2751 /* u32 */
2752 case RTA_PRIORITY:
2753 case RTA_OIF:
2754 u32 = RTA_DATA(rtattr);
2755 *u32 = tswap32(*u32);
2756 break;
2757 default:
2758 gemu_log("Unknown target RTA type: %d\n", rtattr->rta_type);
2759 break;
2761 return 0;
2764 static void target_to_host_link_rtattr(struct rtattr *rtattr,
2765 uint32_t rtattr_len)
2767 target_to_host_for_each_rtattr(rtattr, rtattr_len,
2768 target_to_host_data_link_rtattr);
2771 static void target_to_host_addr_rtattr(struct rtattr *rtattr,
2772 uint32_t rtattr_len)
2774 target_to_host_for_each_rtattr(rtattr, rtattr_len,
2775 target_to_host_data_addr_rtattr);
2778 static void target_to_host_route_rtattr(struct rtattr *rtattr,
2779 uint32_t rtattr_len)
2781 target_to_host_for_each_rtattr(rtattr, rtattr_len,
2782 target_to_host_data_route_rtattr);
2785 static abi_long target_to_host_data_route(struct nlmsghdr *nlh)
2787 struct ifinfomsg *ifi;
2788 struct ifaddrmsg *ifa;
2789 struct rtmsg *rtm;
2791 switch (nlh->nlmsg_type) {
2792 case RTM_GETLINK:
2793 break;
2794 case RTM_NEWLINK:
2795 case RTM_DELLINK:
2796 if (nlh->nlmsg_len >= NLMSG_LENGTH(sizeof(*ifi))) {
2797 ifi = NLMSG_DATA(nlh);
2798 ifi->ifi_type = tswap16(ifi->ifi_type);
2799 ifi->ifi_index = tswap32(ifi->ifi_index);
2800 ifi->ifi_flags = tswap32(ifi->ifi_flags);
2801 ifi->ifi_change = tswap32(ifi->ifi_change);
2802 target_to_host_link_rtattr(IFLA_RTA(ifi), nlh->nlmsg_len -
2803 NLMSG_LENGTH(sizeof(*ifi)));
2805 break;
2806 case RTM_GETADDR:
2807 case RTM_NEWADDR:
2808 case RTM_DELADDR:
2809 if (nlh->nlmsg_len >= NLMSG_LENGTH(sizeof(*ifa))) {
2810 ifa = NLMSG_DATA(nlh);
2811 ifa->ifa_index = tswap32(ifa->ifa_index);
2812 target_to_host_addr_rtattr(IFA_RTA(ifa), nlh->nlmsg_len -
2813 NLMSG_LENGTH(sizeof(*ifa)));
2815 break;
2816 case RTM_GETROUTE:
2817 break;
2818 case RTM_NEWROUTE:
2819 case RTM_DELROUTE:
2820 if (nlh->nlmsg_len >= NLMSG_LENGTH(sizeof(*rtm))) {
2821 rtm = NLMSG_DATA(nlh);
2822 rtm->rtm_flags = tswap32(rtm->rtm_flags);
2823 target_to_host_route_rtattr(RTM_RTA(rtm), nlh->nlmsg_len -
2824 NLMSG_LENGTH(sizeof(*rtm)));
2826 break;
2827 default:
2828 return -TARGET_EOPNOTSUPP;
2830 return 0;
2833 static abi_long target_to_host_nlmsg_route(struct nlmsghdr *nlh, size_t len)
2835 return target_to_host_for_each_nlmsg(nlh, len, target_to_host_data_route);
2837 #endif /* CONFIG_RTNETLINK */
2839 static abi_long host_to_target_data_audit(struct nlmsghdr *nlh)
2841 switch (nlh->nlmsg_type) {
2842 default:
2843 gemu_log("Unknown host audit message type %d\n",
2844 nlh->nlmsg_type);
2845 return -TARGET_EINVAL;
2847 return 0;
2850 static inline abi_long host_to_target_nlmsg_audit(struct nlmsghdr *nlh,
2851 size_t len)
2853 return host_to_target_for_each_nlmsg(nlh, len, host_to_target_data_audit);
2856 static abi_long target_to_host_data_audit(struct nlmsghdr *nlh)
2858 switch (nlh->nlmsg_type) {
2859 case AUDIT_USER:
2860 case AUDIT_FIRST_USER_MSG ... AUDIT_LAST_USER_MSG:
2861 case AUDIT_FIRST_USER_MSG2 ... AUDIT_LAST_USER_MSG2:
2862 break;
2863 default:
2864 gemu_log("Unknown target audit message type %d\n",
2865 nlh->nlmsg_type);
2866 return -TARGET_EINVAL;
2869 return 0;
2872 static abi_long target_to_host_nlmsg_audit(struct nlmsghdr *nlh, size_t len)
2874 return target_to_host_for_each_nlmsg(nlh, len, target_to_host_data_audit);
2877 /* do_setsockopt() Must return target values and target errnos. */
2878 static abi_long do_setsockopt(int sockfd, int level, int optname,
2879 abi_ulong optval_addr, socklen_t optlen)
2881 abi_long ret;
2882 int val;
2883 struct ip_mreqn *ip_mreq;
2884 struct ip_mreq_source *ip_mreq_source;
2886 switch(level) {
2887 case SOL_TCP:
2888 /* TCP options all take an 'int' value. */
2889 if (optlen < sizeof(uint32_t))
2890 return -TARGET_EINVAL;
2892 if (get_user_u32(val, optval_addr))
2893 return -TARGET_EFAULT;
2894 ret = get_errno(setsockopt(sockfd, level, optname, &val, sizeof(val)));
2895 break;
2896 case SOL_IP:
2897 switch(optname) {
2898 case IP_TOS:
2899 case IP_TTL:
2900 case IP_HDRINCL:
2901 case IP_ROUTER_ALERT:
2902 case IP_RECVOPTS:
2903 case IP_RETOPTS:
2904 case IP_PKTINFO:
2905 case IP_MTU_DISCOVER:
2906 case IP_RECVERR:
2907 case IP_RECVTTL:
2908 case IP_RECVTOS:
2909 #ifdef IP_FREEBIND
2910 case IP_FREEBIND:
2911 #endif
2912 case IP_MULTICAST_TTL:
2913 case IP_MULTICAST_LOOP:
2914 val = 0;
2915 if (optlen >= sizeof(uint32_t)) {
2916 if (get_user_u32(val, optval_addr))
2917 return -TARGET_EFAULT;
2918 } else if (optlen >= 1) {
2919 if (get_user_u8(val, optval_addr))
2920 return -TARGET_EFAULT;
2922 ret = get_errno(setsockopt(sockfd, level, optname, &val, sizeof(val)));
2923 break;
2924 case IP_ADD_MEMBERSHIP:
2925 case IP_DROP_MEMBERSHIP:
2926 if (optlen < sizeof (struct target_ip_mreq) ||
2927 optlen > sizeof (struct target_ip_mreqn))
2928 return -TARGET_EINVAL;
2930 ip_mreq = (struct ip_mreqn *) alloca(optlen);
2931 target_to_host_ip_mreq(ip_mreq, optval_addr, optlen);
2932 ret = get_errno(setsockopt(sockfd, level, optname, ip_mreq, optlen));
2933 break;
2935 case IP_BLOCK_SOURCE:
2936 case IP_UNBLOCK_SOURCE:
2937 case IP_ADD_SOURCE_MEMBERSHIP:
2938 case IP_DROP_SOURCE_MEMBERSHIP:
2939 if (optlen != sizeof (struct target_ip_mreq_source))
2940 return -TARGET_EINVAL;
2942 ip_mreq_source = lock_user(VERIFY_READ, optval_addr, optlen, 1);
2943 ret = get_errno(setsockopt(sockfd, level, optname, ip_mreq_source, optlen));
2944 unlock_user (ip_mreq_source, optval_addr, 0);
2945 break;
2947 default:
2948 goto unimplemented;
2950 break;
2951 case SOL_IPV6:
2952 switch (optname) {
2953 case IPV6_MTU_DISCOVER:
2954 case IPV6_MTU:
2955 case IPV6_V6ONLY:
2956 case IPV6_RECVPKTINFO:
2957 case IPV6_UNICAST_HOPS:
2958 case IPV6_RECVERR:
2959 case IPV6_RECVHOPLIMIT:
2960 case IPV6_2292HOPLIMIT:
2961 case IPV6_CHECKSUM:
2962 val = 0;
2963 if (optlen < sizeof(uint32_t)) {
2964 return -TARGET_EINVAL;
2966 if (get_user_u32(val, optval_addr)) {
2967 return -TARGET_EFAULT;
2969 ret = get_errno(setsockopt(sockfd, level, optname,
2970 &val, sizeof(val)));
2971 break;
2972 case IPV6_PKTINFO:
2974 struct in6_pktinfo pki;
2976 if (optlen < sizeof(pki)) {
2977 return -TARGET_EINVAL;
2980 if (copy_from_user(&pki, optval_addr, sizeof(pki))) {
2981 return -TARGET_EFAULT;
2984 pki.ipi6_ifindex = tswap32(pki.ipi6_ifindex);
2986 ret = get_errno(setsockopt(sockfd, level, optname,
2987 &pki, sizeof(pki)));
2988 break;
2990 default:
2991 goto unimplemented;
2993 break;
2994 case SOL_ICMPV6:
2995 switch (optname) {
2996 case ICMPV6_FILTER:
2998 struct icmp6_filter icmp6f;
3000 if (optlen > sizeof(icmp6f)) {
3001 optlen = sizeof(icmp6f);
3004 if (copy_from_user(&icmp6f, optval_addr, optlen)) {
3005 return -TARGET_EFAULT;
3008 for (val = 0; val < 8; val++) {
3009 icmp6f.data[val] = tswap32(icmp6f.data[val]);
3012 ret = get_errno(setsockopt(sockfd, level, optname,
3013 &icmp6f, optlen));
3014 break;
3016 default:
3017 goto unimplemented;
3019 break;
3020 case SOL_RAW:
3021 switch (optname) {
3022 case ICMP_FILTER:
3023 case IPV6_CHECKSUM:
3024 /* those take an u32 value */
3025 if (optlen < sizeof(uint32_t)) {
3026 return -TARGET_EINVAL;
3029 if (get_user_u32(val, optval_addr)) {
3030 return -TARGET_EFAULT;
3032 ret = get_errno(setsockopt(sockfd, level, optname,
3033 &val, sizeof(val)));
3034 break;
3036 default:
3037 goto unimplemented;
3039 break;
3040 case TARGET_SOL_SOCKET:
3041 switch (optname) {
3042 case TARGET_SO_RCVTIMEO:
3044 struct timeval tv;
3046 optname = SO_RCVTIMEO;
3048 set_timeout:
3049 if (optlen != sizeof(struct target_timeval)) {
3050 return -TARGET_EINVAL;
3053 if (copy_from_user_timeval(&tv, optval_addr)) {
3054 return -TARGET_EFAULT;
3057 ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname,
3058 &tv, sizeof(tv)));
3059 return ret;
3061 case TARGET_SO_SNDTIMEO:
3062 optname = SO_SNDTIMEO;
3063 goto set_timeout;
3064 case TARGET_SO_ATTACH_FILTER:
3066 struct target_sock_fprog *tfprog;
3067 struct target_sock_filter *tfilter;
3068 struct sock_fprog fprog;
3069 struct sock_filter *filter;
3070 int i;
3072 if (optlen != sizeof(*tfprog)) {
3073 return -TARGET_EINVAL;
3075 if (!lock_user_struct(VERIFY_READ, tfprog, optval_addr, 0)) {
3076 return -TARGET_EFAULT;
3078 if (!lock_user_struct(VERIFY_READ, tfilter,
3079 tswapal(tfprog->filter), 0)) {
3080 unlock_user_struct(tfprog, optval_addr, 1);
3081 return -TARGET_EFAULT;
3084 fprog.len = tswap16(tfprog->len);
3085 filter = g_try_new(struct sock_filter, fprog.len);
3086 if (filter == NULL) {
3087 unlock_user_struct(tfilter, tfprog->filter, 1);
3088 unlock_user_struct(tfprog, optval_addr, 1);
3089 return -TARGET_ENOMEM;
3091 for (i = 0; i < fprog.len; i++) {
3092 filter[i].code = tswap16(tfilter[i].code);
3093 filter[i].jt = tfilter[i].jt;
3094 filter[i].jf = tfilter[i].jf;
3095 filter[i].k = tswap32(tfilter[i].k);
3097 fprog.filter = filter;
3099 ret = get_errno(setsockopt(sockfd, SOL_SOCKET,
3100 SO_ATTACH_FILTER, &fprog, sizeof(fprog)));
3101 g_free(filter);
3103 unlock_user_struct(tfilter, tfprog->filter, 1);
3104 unlock_user_struct(tfprog, optval_addr, 1);
3105 return ret;
3107 case TARGET_SO_BINDTODEVICE:
3109 char *dev_ifname, *addr_ifname;
3111 if (optlen > IFNAMSIZ - 1) {
3112 optlen = IFNAMSIZ - 1;
3114 dev_ifname = lock_user(VERIFY_READ, optval_addr, optlen, 1);
3115 if (!dev_ifname) {
3116 return -TARGET_EFAULT;
3118 optname = SO_BINDTODEVICE;
3119 addr_ifname = alloca(IFNAMSIZ);
3120 memcpy(addr_ifname, dev_ifname, optlen);
3121 addr_ifname[optlen] = 0;
3122 ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname,
3123 addr_ifname, optlen));
3124 unlock_user (dev_ifname, optval_addr, 0);
3125 return ret;
3127 /* Options with 'int' argument. */
3128 case TARGET_SO_DEBUG:
3129 optname = SO_DEBUG;
3130 break;
3131 case TARGET_SO_REUSEADDR:
3132 optname = SO_REUSEADDR;
3133 break;
3134 case TARGET_SO_TYPE:
3135 optname = SO_TYPE;
3136 break;
3137 case TARGET_SO_ERROR:
3138 optname = SO_ERROR;
3139 break;
3140 case TARGET_SO_DONTROUTE:
3141 optname = SO_DONTROUTE;
3142 break;
3143 case TARGET_SO_BROADCAST:
3144 optname = SO_BROADCAST;
3145 break;
3146 case TARGET_SO_SNDBUF:
3147 optname = SO_SNDBUF;
3148 break;
3149 case TARGET_SO_SNDBUFFORCE:
3150 optname = SO_SNDBUFFORCE;
3151 break;
3152 case TARGET_SO_RCVBUF:
3153 optname = SO_RCVBUF;
3154 break;
3155 case TARGET_SO_RCVBUFFORCE:
3156 optname = SO_RCVBUFFORCE;
3157 break;
3158 case TARGET_SO_KEEPALIVE:
3159 optname = SO_KEEPALIVE;
3160 break;
3161 case TARGET_SO_OOBINLINE:
3162 optname = SO_OOBINLINE;
3163 break;
3164 case TARGET_SO_NO_CHECK:
3165 optname = SO_NO_CHECK;
3166 break;
3167 case TARGET_SO_PRIORITY:
3168 optname = SO_PRIORITY;
3169 break;
3170 #ifdef SO_BSDCOMPAT
3171 case TARGET_SO_BSDCOMPAT:
3172 optname = SO_BSDCOMPAT;
3173 break;
3174 #endif
3175 case TARGET_SO_PASSCRED:
3176 optname = SO_PASSCRED;
3177 break;
3178 case TARGET_SO_PASSSEC:
3179 optname = SO_PASSSEC;
3180 break;
3181 case TARGET_SO_TIMESTAMP:
3182 optname = SO_TIMESTAMP;
3183 break;
3184 case TARGET_SO_RCVLOWAT:
3185 optname = SO_RCVLOWAT;
3186 break;
3187 default:
3188 goto unimplemented;
3190 if (optlen < sizeof(uint32_t))
3191 return -TARGET_EINVAL;
3193 if (get_user_u32(val, optval_addr))
3194 return -TARGET_EFAULT;
3195 ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname, &val, sizeof(val)));
3196 break;
3197 default:
3198 unimplemented:
3199 gemu_log("Unsupported setsockopt level=%d optname=%d\n", level, optname);
3200 ret = -TARGET_ENOPROTOOPT;
3202 return ret;
3205 /* do_getsockopt() Must return target values and target errnos. */
3206 static abi_long do_getsockopt(int sockfd, int level, int optname,
3207 abi_ulong optval_addr, abi_ulong optlen)
3209 abi_long ret;
3210 int len, val;
3211 socklen_t lv;
3213 switch(level) {
3214 case TARGET_SOL_SOCKET:
3215 level = SOL_SOCKET;
3216 switch (optname) {
3217 /* These don't just return a single integer */
3218 case TARGET_SO_LINGER:
3219 case TARGET_SO_RCVTIMEO:
3220 case TARGET_SO_SNDTIMEO:
3221 case TARGET_SO_PEERNAME:
3222 goto unimplemented;
3223 case TARGET_SO_PEERCRED: {
3224 struct ucred cr;
3225 socklen_t crlen;
3226 struct target_ucred *tcr;
3228 if (get_user_u32(len, optlen)) {
3229 return -TARGET_EFAULT;
3231 if (len < 0) {
3232 return -TARGET_EINVAL;
3235 crlen = sizeof(cr);
3236 ret = get_errno(getsockopt(sockfd, level, SO_PEERCRED,
3237 &cr, &crlen));
3238 if (ret < 0) {
3239 return ret;
3241 if (len > crlen) {
3242 len = crlen;
3244 if (!lock_user_struct(VERIFY_WRITE, tcr, optval_addr, 0)) {
3245 return -TARGET_EFAULT;
3247 __put_user(cr.pid, &tcr->pid);
3248 __put_user(cr.uid, &tcr->uid);
3249 __put_user(cr.gid, &tcr->gid);
3250 unlock_user_struct(tcr, optval_addr, 1);
3251 if (put_user_u32(len, optlen)) {
3252 return -TARGET_EFAULT;
3254 break;
3256 /* Options with 'int' argument. */
3257 case TARGET_SO_DEBUG:
3258 optname = SO_DEBUG;
3259 goto int_case;
3260 case TARGET_SO_REUSEADDR:
3261 optname = SO_REUSEADDR;
3262 goto int_case;
3263 case TARGET_SO_TYPE:
3264 optname = SO_TYPE;
3265 goto int_case;
3266 case TARGET_SO_ERROR:
3267 optname = SO_ERROR;
3268 goto int_case;
3269 case TARGET_SO_DONTROUTE:
3270 optname = SO_DONTROUTE;
3271 goto int_case;
3272 case TARGET_SO_BROADCAST:
3273 optname = SO_BROADCAST;
3274 goto int_case;
3275 case TARGET_SO_SNDBUF:
3276 optname = SO_SNDBUF;
3277 goto int_case;
3278 case TARGET_SO_RCVBUF:
3279 optname = SO_RCVBUF;
3280 goto int_case;
3281 case TARGET_SO_KEEPALIVE:
3282 optname = SO_KEEPALIVE;
3283 goto int_case;
3284 case TARGET_SO_OOBINLINE:
3285 optname = SO_OOBINLINE;
3286 goto int_case;
3287 case TARGET_SO_NO_CHECK:
3288 optname = SO_NO_CHECK;
3289 goto int_case;
3290 case TARGET_SO_PRIORITY:
3291 optname = SO_PRIORITY;
3292 goto int_case;
3293 #ifdef SO_BSDCOMPAT
3294 case TARGET_SO_BSDCOMPAT:
3295 optname = SO_BSDCOMPAT;
3296 goto int_case;
3297 #endif
3298 case TARGET_SO_PASSCRED:
3299 optname = SO_PASSCRED;
3300 goto int_case;
3301 case TARGET_SO_TIMESTAMP:
3302 optname = SO_TIMESTAMP;
3303 goto int_case;
3304 case TARGET_SO_RCVLOWAT:
3305 optname = SO_RCVLOWAT;
3306 goto int_case;
3307 case TARGET_SO_ACCEPTCONN:
3308 optname = SO_ACCEPTCONN;
3309 goto int_case;
3310 default:
3311 goto int_case;
3313 break;
3314 case SOL_TCP:
3315 /* TCP options all take an 'int' value. */
3316 int_case:
3317 if (get_user_u32(len, optlen))
3318 return -TARGET_EFAULT;
3319 if (len < 0)
3320 return -TARGET_EINVAL;
3321 lv = sizeof(lv);
3322 ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
3323 if (ret < 0)
3324 return ret;
3325 if (optname == SO_TYPE) {
3326 val = host_to_target_sock_type(val);
3328 if (len > lv)
3329 len = lv;
3330 if (len == 4) {
3331 if (put_user_u32(val, optval_addr))
3332 return -TARGET_EFAULT;
3333 } else {
3334 if (put_user_u8(val, optval_addr))
3335 return -TARGET_EFAULT;
3337 if (put_user_u32(len, optlen))
3338 return -TARGET_EFAULT;
3339 break;
3340 case SOL_IP:
3341 switch(optname) {
3342 case IP_TOS:
3343 case IP_TTL:
3344 case IP_HDRINCL:
3345 case IP_ROUTER_ALERT:
3346 case IP_RECVOPTS:
3347 case IP_RETOPTS:
3348 case IP_PKTINFO:
3349 case IP_MTU_DISCOVER:
3350 case IP_RECVERR:
3351 case IP_RECVTOS:
3352 #ifdef IP_FREEBIND
3353 case IP_FREEBIND:
3354 #endif
3355 case IP_MULTICAST_TTL:
3356 case IP_MULTICAST_LOOP:
3357 if (get_user_u32(len, optlen))
3358 return -TARGET_EFAULT;
3359 if (len < 0)
3360 return -TARGET_EINVAL;
3361 lv = sizeof(lv);
3362 ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
3363 if (ret < 0)
3364 return ret;
3365 if (len < sizeof(int) && len > 0 && val >= 0 && val < 255) {
3366 len = 1;
3367 if (put_user_u32(len, optlen)
3368 || put_user_u8(val, optval_addr))
3369 return -TARGET_EFAULT;
3370 } else {
3371 if (len > sizeof(int))
3372 len = sizeof(int);
3373 if (put_user_u32(len, optlen)
3374 || put_user_u32(val, optval_addr))
3375 return -TARGET_EFAULT;
3377 break;
3378 default:
3379 ret = -TARGET_ENOPROTOOPT;
3380 break;
3382 break;
3383 default:
3384 unimplemented:
3385 gemu_log("getsockopt level=%d optname=%d not yet supported\n",
3386 level, optname);
3387 ret = -TARGET_EOPNOTSUPP;
3388 break;
3390 return ret;
3393 static struct iovec *lock_iovec(int type, abi_ulong target_addr,
3394 abi_ulong count, int copy)
3396 struct target_iovec *target_vec;
3397 struct iovec *vec;
3398 abi_ulong total_len, max_len;
3399 int i;
3400 int err = 0;
3401 bool bad_address = false;
3403 if (count == 0) {
3404 errno = 0;
3405 return NULL;
3407 if (count > IOV_MAX) {
3408 errno = EINVAL;
3409 return NULL;
3412 vec = g_try_new0(struct iovec, count);
3413 if (vec == NULL) {
3414 errno = ENOMEM;
3415 return NULL;
3418 target_vec = lock_user(VERIFY_READ, target_addr,
3419 count * sizeof(struct target_iovec), 1);
3420 if (target_vec == NULL) {
3421 err = EFAULT;
3422 goto fail2;
3425 /* ??? If host page size > target page size, this will result in a
3426 value larger than what we can actually support. */
3427 max_len = 0x7fffffff & TARGET_PAGE_MASK;
3428 total_len = 0;
3430 for (i = 0; i < count; i++) {
3431 abi_ulong base = tswapal(target_vec[i].iov_base);
3432 abi_long len = tswapal(target_vec[i].iov_len);
3434 if (len < 0) {
3435 err = EINVAL;
3436 goto fail;
3437 } else if (len == 0) {
3438 /* Zero length pointer is ignored. */
3439 vec[i].iov_base = 0;
3440 } else {
3441 vec[i].iov_base = lock_user(type, base, len, copy);
3442 /* If the first buffer pointer is bad, this is a fault. But
3443 * subsequent bad buffers will result in a partial write; this
3444 * is realized by filling the vector with null pointers and
3445 * zero lengths. */
3446 if (!vec[i].iov_base) {
3447 if (i == 0) {
3448 err = EFAULT;
3449 goto fail;
3450 } else {
3451 bad_address = true;
3454 if (bad_address) {
3455 len = 0;
3457 if (len > max_len - total_len) {
3458 len = max_len - total_len;
3461 vec[i].iov_len = len;
3462 total_len += len;
3465 unlock_user(target_vec, target_addr, 0);
3466 return vec;
3468 fail:
3469 while (--i >= 0) {
3470 if (tswapal(target_vec[i].iov_len) > 0) {
3471 unlock_user(vec[i].iov_base, tswapal(target_vec[i].iov_base), 0);
3474 unlock_user(target_vec, target_addr, 0);
3475 fail2:
3476 g_free(vec);
3477 errno = err;
3478 return NULL;
3481 static void unlock_iovec(struct iovec *vec, abi_ulong target_addr,
3482 abi_ulong count, int copy)
3484 struct target_iovec *target_vec;
3485 int i;
3487 target_vec = lock_user(VERIFY_READ, target_addr,
3488 count * sizeof(struct target_iovec), 1);
3489 if (target_vec) {
3490 for (i = 0; i < count; i++) {
3491 abi_ulong base = tswapal(target_vec[i].iov_base);
3492 abi_long len = tswapal(target_vec[i].iov_len);
3493 if (len < 0) {
3494 break;
3496 unlock_user(vec[i].iov_base, base, copy ? vec[i].iov_len : 0);
3498 unlock_user(target_vec, target_addr, 0);
3501 g_free(vec);
3504 static inline int target_to_host_sock_type(int *type)
3506 int host_type = 0;
3507 int target_type = *type;
3509 switch (target_type & TARGET_SOCK_TYPE_MASK) {
3510 case TARGET_SOCK_DGRAM:
3511 host_type = SOCK_DGRAM;
3512 break;
3513 case TARGET_SOCK_STREAM:
3514 host_type = SOCK_STREAM;
3515 break;
3516 default:
3517 host_type = target_type & TARGET_SOCK_TYPE_MASK;
3518 break;
3520 if (target_type & TARGET_SOCK_CLOEXEC) {
3521 #if defined(SOCK_CLOEXEC)
3522 host_type |= SOCK_CLOEXEC;
3523 #else
3524 return -TARGET_EINVAL;
3525 #endif
3527 if (target_type & TARGET_SOCK_NONBLOCK) {
3528 #if defined(SOCK_NONBLOCK)
3529 host_type |= SOCK_NONBLOCK;
3530 #elif !defined(O_NONBLOCK)
3531 return -TARGET_EINVAL;
3532 #endif
3534 *type = host_type;
3535 return 0;
3538 /* Try to emulate socket type flags after socket creation. */
3539 static int sock_flags_fixup(int fd, int target_type)
3541 #if !defined(SOCK_NONBLOCK) && defined(O_NONBLOCK)
3542 if (target_type & TARGET_SOCK_NONBLOCK) {
3543 int flags = fcntl(fd, F_GETFL);
3544 if (fcntl(fd, F_SETFL, O_NONBLOCK | flags) == -1) {
3545 close(fd);
3546 return -TARGET_EINVAL;
3549 #endif
3550 return fd;
3553 static abi_long packet_target_to_host_sockaddr(void *host_addr,
3554 abi_ulong target_addr,
3555 socklen_t len)
3557 struct sockaddr *addr = host_addr;
3558 struct target_sockaddr *target_saddr;
3560 target_saddr = lock_user(VERIFY_READ, target_addr, len, 1);
3561 if (!target_saddr) {
3562 return -TARGET_EFAULT;
3565 memcpy(addr, target_saddr, len);
3566 addr->sa_family = tswap16(target_saddr->sa_family);
3567 /* spkt_protocol is big-endian */
3569 unlock_user(target_saddr, target_addr, 0);
3570 return 0;
3573 static TargetFdTrans target_packet_trans = {
3574 .target_to_host_addr = packet_target_to_host_sockaddr,
3577 #ifdef CONFIG_RTNETLINK
3578 static abi_long netlink_route_target_to_host(void *buf, size_t len)
3580 abi_long ret;
3582 ret = target_to_host_nlmsg_route(buf, len);
3583 if (ret < 0) {
3584 return ret;
3587 return len;
3590 static abi_long netlink_route_host_to_target(void *buf, size_t len)
3592 abi_long ret;
3594 ret = host_to_target_nlmsg_route(buf, len);
3595 if (ret < 0) {
3596 return ret;
3599 return len;
3602 static TargetFdTrans target_netlink_route_trans = {
3603 .target_to_host_data = netlink_route_target_to_host,
3604 .host_to_target_data = netlink_route_host_to_target,
3606 #endif /* CONFIG_RTNETLINK */
3608 static abi_long netlink_audit_target_to_host(void *buf, size_t len)
3610 abi_long ret;
3612 ret = target_to_host_nlmsg_audit(buf, len);
3613 if (ret < 0) {
3614 return ret;
3617 return len;
3620 static abi_long netlink_audit_host_to_target(void *buf, size_t len)
3622 abi_long ret;
3624 ret = host_to_target_nlmsg_audit(buf, len);
3625 if (ret < 0) {
3626 return ret;
3629 return len;
3632 static TargetFdTrans target_netlink_audit_trans = {
3633 .target_to_host_data = netlink_audit_target_to_host,
3634 .host_to_target_data = netlink_audit_host_to_target,
3637 /* do_socket() Must return target values and target errnos. */
3638 static abi_long do_socket(int domain, int type, int protocol)
3640 int target_type = type;
3641 int ret;
3643 ret = target_to_host_sock_type(&type);
3644 if (ret) {
3645 return ret;
3648 if (domain == PF_NETLINK && !(
3649 #ifdef CONFIG_RTNETLINK
3650 protocol == NETLINK_ROUTE ||
3651 #endif
3652 protocol == NETLINK_KOBJECT_UEVENT ||
3653 protocol == NETLINK_AUDIT)) {
3654 return -EPFNOSUPPORT;
3657 if (domain == AF_PACKET ||
3658 (domain == AF_INET && type == SOCK_PACKET)) {
3659 protocol = tswap16(protocol);
3662 ret = get_errno(socket(domain, type, protocol));
3663 if (ret >= 0) {
3664 ret = sock_flags_fixup(ret, target_type);
3665 if (type == SOCK_PACKET) {
3666 /* Manage an obsolete case :
3667 * if socket type is SOCK_PACKET, bind by name
3669 fd_trans_register(ret, &target_packet_trans);
3670 } else if (domain == PF_NETLINK) {
3671 switch (protocol) {
3672 #ifdef CONFIG_RTNETLINK
3673 case NETLINK_ROUTE:
3674 fd_trans_register(ret, &target_netlink_route_trans);
3675 break;
3676 #endif
3677 case NETLINK_KOBJECT_UEVENT:
3678 /* nothing to do: messages are strings */
3679 break;
3680 case NETLINK_AUDIT:
3681 fd_trans_register(ret, &target_netlink_audit_trans);
3682 break;
3683 default:
3684 g_assert_not_reached();
3688 return ret;
3691 /* do_bind() Must return target values and target errnos. */
3692 static abi_long do_bind(int sockfd, abi_ulong target_addr,
3693 socklen_t addrlen)
3695 void *addr;
3696 abi_long ret;
3698 if ((int)addrlen < 0) {
3699 return -TARGET_EINVAL;
3702 addr = alloca(addrlen+1);
3704 ret = target_to_host_sockaddr(sockfd, addr, target_addr, addrlen);
3705 if (ret)
3706 return ret;
3708 return get_errno(bind(sockfd, addr, addrlen));
3711 /* do_connect() Must return target values and target errnos. */
3712 static abi_long do_connect(int sockfd, abi_ulong target_addr,
3713 socklen_t addrlen)
3715 void *addr;
3716 abi_long ret;
3718 if ((int)addrlen < 0) {
3719 return -TARGET_EINVAL;
3722 addr = alloca(addrlen+1);
3724 ret = target_to_host_sockaddr(sockfd, addr, target_addr, addrlen);
3725 if (ret)
3726 return ret;
3728 return get_errno(safe_connect(sockfd, addr, addrlen));
3731 /* do_sendrecvmsg_locked() Must return target values and target errnos. */
3732 static abi_long do_sendrecvmsg_locked(int fd, struct target_msghdr *msgp,
3733 int flags, int send)
3735 abi_long ret, len;
3736 struct msghdr msg;
3737 abi_ulong count;
3738 struct iovec *vec;
3739 abi_ulong target_vec;
3741 if (msgp->msg_name) {
3742 msg.msg_namelen = tswap32(msgp->msg_namelen);
3743 msg.msg_name = alloca(msg.msg_namelen+1);
3744 ret = target_to_host_sockaddr(fd, msg.msg_name,
3745 tswapal(msgp->msg_name),
3746 msg.msg_namelen);
3747 if (ret == -TARGET_EFAULT) {
3748 /* For connected sockets msg_name and msg_namelen must
3749 * be ignored, so returning EFAULT immediately is wrong.
3750 * Instead, pass a bad msg_name to the host kernel, and
3751 * let it decide whether to return EFAULT or not.
3753 msg.msg_name = (void *)-1;
3754 } else if (ret) {
3755 goto out2;
3757 } else {
3758 msg.msg_name = NULL;
3759 msg.msg_namelen = 0;
3761 msg.msg_controllen = 2 * tswapal(msgp->msg_controllen);
3762 msg.msg_control = alloca(msg.msg_controllen);
3763 msg.msg_flags = tswap32(msgp->msg_flags);
3765 count = tswapal(msgp->msg_iovlen);
3766 target_vec = tswapal(msgp->msg_iov);
3768 if (count > IOV_MAX) {
3769 /* sendrcvmsg returns a different errno for this condition than
3770 * readv/writev, so we must catch it here before lock_iovec() does.
3772 ret = -TARGET_EMSGSIZE;
3773 goto out2;
3776 vec = lock_iovec(send ? VERIFY_READ : VERIFY_WRITE,
3777 target_vec, count, send);
3778 if (vec == NULL) {
3779 ret = -host_to_target_errno(errno);
3780 goto out2;
3782 msg.msg_iovlen = count;
3783 msg.msg_iov = vec;
3785 if (send) {
3786 if (fd_trans_target_to_host_data(fd)) {
3787 void *host_msg;
3789 host_msg = g_malloc(msg.msg_iov->iov_len);
3790 memcpy(host_msg, msg.msg_iov->iov_base, msg.msg_iov->iov_len);
3791 ret = fd_trans_target_to_host_data(fd)(host_msg,
3792 msg.msg_iov->iov_len);
3793 if (ret >= 0) {
3794 msg.msg_iov->iov_base = host_msg;
3795 ret = get_errno(safe_sendmsg(fd, &msg, flags));
3797 g_free(host_msg);
3798 } else {
3799 ret = target_to_host_cmsg(&msg, msgp);
3800 if (ret == 0) {
3801 ret = get_errno(safe_sendmsg(fd, &msg, flags));
3804 } else {
3805 ret = get_errno(safe_recvmsg(fd, &msg, flags));
3806 if (!is_error(ret)) {
3807 len = ret;
3808 if (fd_trans_host_to_target_data(fd)) {
3809 ret = fd_trans_host_to_target_data(fd)(msg.msg_iov->iov_base,
3810 len);
3811 } else {
3812 ret = host_to_target_cmsg(msgp, &msg);
3814 if (!is_error(ret)) {
3815 msgp->msg_namelen = tswap32(msg.msg_namelen);
3816 if (msg.msg_name != NULL && msg.msg_name != (void *)-1) {
3817 ret = host_to_target_sockaddr(tswapal(msgp->msg_name),
3818 msg.msg_name, msg.msg_namelen);
3819 if (ret) {
3820 goto out;
3824 ret = len;
3829 out:
3830 unlock_iovec(vec, target_vec, count, !send);
3831 out2:
3832 return ret;
3835 static abi_long do_sendrecvmsg(int fd, abi_ulong target_msg,
3836 int flags, int send)
3838 abi_long ret;
3839 struct target_msghdr *msgp;
3841 if (!lock_user_struct(send ? VERIFY_READ : VERIFY_WRITE,
3842 msgp,
3843 target_msg,
3844 send ? 1 : 0)) {
3845 return -TARGET_EFAULT;
3847 ret = do_sendrecvmsg_locked(fd, msgp, flags, send);
3848 unlock_user_struct(msgp, target_msg, send ? 0 : 1);
3849 return ret;
3852 /* We don't rely on the C library to have sendmmsg/recvmmsg support,
3853 * so it might not have this *mmsg-specific flag either.
3855 #ifndef MSG_WAITFORONE
3856 #define MSG_WAITFORONE 0x10000
3857 #endif
3859 static abi_long do_sendrecvmmsg(int fd, abi_ulong target_msgvec,
3860 unsigned int vlen, unsigned int flags,
3861 int send)
3863 struct target_mmsghdr *mmsgp;
3864 abi_long ret = 0;
3865 int i;
3867 if (vlen > UIO_MAXIOV) {
3868 vlen = UIO_MAXIOV;
3871 mmsgp = lock_user(VERIFY_WRITE, target_msgvec, sizeof(*mmsgp) * vlen, 1);
3872 if (!mmsgp) {
3873 return -TARGET_EFAULT;
3876 for (i = 0; i < vlen; i++) {
3877 ret = do_sendrecvmsg_locked(fd, &mmsgp[i].msg_hdr, flags, send);
3878 if (is_error(ret)) {
3879 break;
3881 mmsgp[i].msg_len = tswap32(ret);
3882 /* MSG_WAITFORONE turns on MSG_DONTWAIT after one packet */
3883 if (flags & MSG_WAITFORONE) {
3884 flags |= MSG_DONTWAIT;
3888 unlock_user(mmsgp, target_msgvec, sizeof(*mmsgp) * i);
3890 /* Return number of datagrams sent if we sent any at all;
3891 * otherwise return the error.
3893 if (i) {
3894 return i;
3896 return ret;
3899 /* do_accept4() Must return target values and target errnos. */
3900 static abi_long do_accept4(int fd, abi_ulong target_addr,
3901 abi_ulong target_addrlen_addr, int flags)
3903 socklen_t addrlen;
3904 void *addr;
3905 abi_long ret;
3906 int host_flags;
3908 host_flags = target_to_host_bitmask(flags, fcntl_flags_tbl);
3910 if (target_addr == 0) {
3911 return get_errno(safe_accept4(fd, NULL, NULL, host_flags));
3914 /* linux returns EINVAL if addrlen pointer is invalid */
3915 if (get_user_u32(addrlen, target_addrlen_addr))
3916 return -TARGET_EINVAL;
3918 if ((int)addrlen < 0) {
3919 return -TARGET_EINVAL;
3922 if (!access_ok(VERIFY_WRITE, target_addr, addrlen))
3923 return -TARGET_EINVAL;
3925 addr = alloca(addrlen);
3927 ret = get_errno(safe_accept4(fd, addr, &addrlen, host_flags));
3928 if (!is_error(ret)) {
3929 host_to_target_sockaddr(target_addr, addr, addrlen);
3930 if (put_user_u32(addrlen, target_addrlen_addr))
3931 ret = -TARGET_EFAULT;
3933 return ret;
3936 /* do_getpeername() Must return target values and target errnos. */
3937 static abi_long do_getpeername(int fd, abi_ulong target_addr,
3938 abi_ulong target_addrlen_addr)
3940 socklen_t addrlen;
3941 void *addr;
3942 abi_long ret;
3944 if (get_user_u32(addrlen, target_addrlen_addr))
3945 return -TARGET_EFAULT;
3947 if ((int)addrlen < 0) {
3948 return -TARGET_EINVAL;
3951 if (!access_ok(VERIFY_WRITE, target_addr, addrlen))
3952 return -TARGET_EFAULT;
3954 addr = alloca(addrlen);
3956 ret = get_errno(getpeername(fd, addr, &addrlen));
3957 if (!is_error(ret)) {
3958 host_to_target_sockaddr(target_addr, addr, addrlen);
3959 if (put_user_u32(addrlen, target_addrlen_addr))
3960 ret = -TARGET_EFAULT;
3962 return ret;
3965 /* do_getsockname() Must return target values and target errnos. */
3966 static abi_long do_getsockname(int fd, abi_ulong target_addr,
3967 abi_ulong target_addrlen_addr)
3969 socklen_t addrlen;
3970 void *addr;
3971 abi_long ret;
3973 if (get_user_u32(addrlen, target_addrlen_addr))
3974 return -TARGET_EFAULT;
3976 if ((int)addrlen < 0) {
3977 return -TARGET_EINVAL;
3980 if (!access_ok(VERIFY_WRITE, target_addr, addrlen))
3981 return -TARGET_EFAULT;
3983 addr = alloca(addrlen);
3985 ret = get_errno(getsockname(fd, addr, &addrlen));
3986 if (!is_error(ret)) {
3987 host_to_target_sockaddr(target_addr, addr, addrlen);
3988 if (put_user_u32(addrlen, target_addrlen_addr))
3989 ret = -TARGET_EFAULT;
3991 return ret;
3994 /* do_socketpair() Must return target values and target errnos. */
3995 static abi_long do_socketpair(int domain, int type, int protocol,
3996 abi_ulong target_tab_addr)
3998 int tab[2];
3999 abi_long ret;
4001 target_to_host_sock_type(&type);
4003 ret = get_errno(socketpair(domain, type, protocol, tab));
4004 if (!is_error(ret)) {
4005 if (put_user_s32(tab[0], target_tab_addr)
4006 || put_user_s32(tab[1], target_tab_addr + sizeof(tab[0])))
4007 ret = -TARGET_EFAULT;
4009 return ret;
4012 /* do_sendto() Must return target values and target errnos. */
4013 static abi_long do_sendto(int fd, abi_ulong msg, size_t len, int flags,
4014 abi_ulong target_addr, socklen_t addrlen)
4016 void *addr;
4017 void *host_msg;
4018 void *copy_msg = NULL;
4019 abi_long ret;
4021 if ((int)addrlen < 0) {
4022 return -TARGET_EINVAL;
4025 host_msg = lock_user(VERIFY_READ, msg, len, 1);
4026 if (!host_msg)
4027 return -TARGET_EFAULT;
4028 if (fd_trans_target_to_host_data(fd)) {
4029 copy_msg = host_msg;
4030 host_msg = g_malloc(len);
4031 memcpy(host_msg, copy_msg, len);
4032 ret = fd_trans_target_to_host_data(fd)(host_msg, len);
4033 if (ret < 0) {
4034 goto fail;
4037 if (target_addr) {
4038 addr = alloca(addrlen+1);
4039 ret = target_to_host_sockaddr(fd, addr, target_addr, addrlen);
4040 if (ret) {
4041 goto fail;
4043 ret = get_errno(safe_sendto(fd, host_msg, len, flags, addr, addrlen));
4044 } else {
4045 ret = get_errno(safe_sendto(fd, host_msg, len, flags, NULL, 0));
4047 fail:
4048 if (copy_msg) {
4049 g_free(host_msg);
4050 host_msg = copy_msg;
4052 unlock_user(host_msg, msg, 0);
4053 return ret;
4056 /* do_recvfrom() Must return target values and target errnos. */
4057 static abi_long do_recvfrom(int fd, abi_ulong msg, size_t len, int flags,
4058 abi_ulong target_addr,
4059 abi_ulong target_addrlen)
4061 socklen_t addrlen;
4062 void *addr;
4063 void *host_msg;
4064 abi_long ret;
4066 host_msg = lock_user(VERIFY_WRITE, msg, len, 0);
4067 if (!host_msg)
4068 return -TARGET_EFAULT;
4069 if (target_addr) {
4070 if (get_user_u32(addrlen, target_addrlen)) {
4071 ret = -TARGET_EFAULT;
4072 goto fail;
4074 if ((int)addrlen < 0) {
4075 ret = -TARGET_EINVAL;
4076 goto fail;
4078 addr = alloca(addrlen);
4079 ret = get_errno(safe_recvfrom(fd, host_msg, len, flags,
4080 addr, &addrlen));
4081 } else {
4082 addr = NULL; /* To keep compiler quiet. */
4083 ret = get_errno(safe_recvfrom(fd, host_msg, len, flags, NULL, 0));
4085 if (!is_error(ret)) {
4086 if (fd_trans_host_to_target_data(fd)) {
4087 ret = fd_trans_host_to_target_data(fd)(host_msg, ret);
4089 if (target_addr) {
4090 host_to_target_sockaddr(target_addr, addr, addrlen);
4091 if (put_user_u32(addrlen, target_addrlen)) {
4092 ret = -TARGET_EFAULT;
4093 goto fail;
4096 unlock_user(host_msg, msg, len);
4097 } else {
4098 fail:
4099 unlock_user(host_msg, msg, 0);
4101 return ret;
4104 #ifdef TARGET_NR_socketcall
4105 /* do_socketcall() must return target values and target errnos. */
4106 static abi_long do_socketcall(int num, abi_ulong vptr)
4108 static const unsigned nargs[] = { /* number of arguments per operation */
4109 [TARGET_SYS_SOCKET] = 3, /* domain, type, protocol */
4110 [TARGET_SYS_BIND] = 3, /* fd, addr, addrlen */
4111 [TARGET_SYS_CONNECT] = 3, /* fd, addr, addrlen */
4112 [TARGET_SYS_LISTEN] = 2, /* fd, backlog */
4113 [TARGET_SYS_ACCEPT] = 3, /* fd, addr, addrlen */
4114 [TARGET_SYS_GETSOCKNAME] = 3, /* fd, addr, addrlen */
4115 [TARGET_SYS_GETPEERNAME] = 3, /* fd, addr, addrlen */
4116 [TARGET_SYS_SOCKETPAIR] = 4, /* domain, type, protocol, tab */
4117 [TARGET_SYS_SEND] = 4, /* fd, msg, len, flags */
4118 [TARGET_SYS_RECV] = 4, /* fd, msg, len, flags */
4119 [TARGET_SYS_SENDTO] = 6, /* fd, msg, len, flags, addr, addrlen */
4120 [TARGET_SYS_RECVFROM] = 6, /* fd, msg, len, flags, addr, addrlen */
4121 [TARGET_SYS_SHUTDOWN] = 2, /* fd, how */
4122 [TARGET_SYS_SETSOCKOPT] = 5, /* fd, level, optname, optval, optlen */
4123 [TARGET_SYS_GETSOCKOPT] = 5, /* fd, level, optname, optval, optlen */
4124 [TARGET_SYS_SENDMSG] = 3, /* fd, msg, flags */
4125 [TARGET_SYS_RECVMSG] = 3, /* fd, msg, flags */
4126 [TARGET_SYS_ACCEPT4] = 4, /* fd, addr, addrlen, flags */
4127 [TARGET_SYS_RECVMMSG] = 4, /* fd, msgvec, vlen, flags */
4128 [TARGET_SYS_SENDMMSG] = 4, /* fd, msgvec, vlen, flags */
4130 abi_long a[6]; /* max 6 args */
4131 unsigned i;
4133 /* check the range of the first argument num */
4134 /* (TARGET_SYS_SENDMMSG is the highest among TARGET_SYS_xxx) */
4135 if (num < 1 || num > TARGET_SYS_SENDMMSG) {
4136 return -TARGET_EINVAL;
4138 /* ensure we have space for args */
4139 if (nargs[num] > ARRAY_SIZE(a)) {
4140 return -TARGET_EINVAL;
4142 /* collect the arguments in a[] according to nargs[] */
4143 for (i = 0; i < nargs[num]; ++i) {
4144 if (get_user_ual(a[i], vptr + i * sizeof(abi_long)) != 0) {
4145 return -TARGET_EFAULT;
4148 /* now when we have the args, invoke the appropriate underlying function */
4149 switch (num) {
4150 case TARGET_SYS_SOCKET: /* domain, type, protocol */
4151 return do_socket(a[0], a[1], a[2]);
4152 case TARGET_SYS_BIND: /* sockfd, addr, addrlen */
4153 return do_bind(a[0], a[1], a[2]);
4154 case TARGET_SYS_CONNECT: /* sockfd, addr, addrlen */
4155 return do_connect(a[0], a[1], a[2]);
4156 case TARGET_SYS_LISTEN: /* sockfd, backlog */
4157 return get_errno(listen(a[0], a[1]));
4158 case TARGET_SYS_ACCEPT: /* sockfd, addr, addrlen */
4159 return do_accept4(a[0], a[1], a[2], 0);
4160 case TARGET_SYS_GETSOCKNAME: /* sockfd, addr, addrlen */
4161 return do_getsockname(a[0], a[1], a[2]);
4162 case TARGET_SYS_GETPEERNAME: /* sockfd, addr, addrlen */
4163 return do_getpeername(a[0], a[1], a[2]);
4164 case TARGET_SYS_SOCKETPAIR: /* domain, type, protocol, tab */
4165 return do_socketpair(a[0], a[1], a[2], a[3]);
4166 case TARGET_SYS_SEND: /* sockfd, msg, len, flags */
4167 return do_sendto(a[0], a[1], a[2], a[3], 0, 0);
4168 case TARGET_SYS_RECV: /* sockfd, msg, len, flags */
4169 return do_recvfrom(a[0], a[1], a[2], a[3], 0, 0);
4170 case TARGET_SYS_SENDTO: /* sockfd, msg, len, flags, addr, addrlen */
4171 return do_sendto(a[0], a[1], a[2], a[3], a[4], a[5]);
4172 case TARGET_SYS_RECVFROM: /* sockfd, msg, len, flags, addr, addrlen */
4173 return do_recvfrom(a[0], a[1], a[2], a[3], a[4], a[5]);
4174 case TARGET_SYS_SHUTDOWN: /* sockfd, how */
4175 return get_errno(shutdown(a[0], a[1]));
4176 case TARGET_SYS_SETSOCKOPT: /* sockfd, level, optname, optval, optlen */
4177 return do_setsockopt(a[0], a[1], a[2], a[3], a[4]);
4178 case TARGET_SYS_GETSOCKOPT: /* sockfd, level, optname, optval, optlen */
4179 return do_getsockopt(a[0], a[1], a[2], a[3], a[4]);
4180 case TARGET_SYS_SENDMSG: /* sockfd, msg, flags */
4181 return do_sendrecvmsg(a[0], a[1], a[2], 1);
4182 case TARGET_SYS_RECVMSG: /* sockfd, msg, flags */
4183 return do_sendrecvmsg(a[0], a[1], a[2], 0);
4184 case TARGET_SYS_ACCEPT4: /* sockfd, addr, addrlen, flags */
4185 return do_accept4(a[0], a[1], a[2], a[3]);
4186 case TARGET_SYS_RECVMMSG: /* sockfd, msgvec, vlen, flags */
4187 return do_sendrecvmmsg(a[0], a[1], a[2], a[3], 0);
4188 case TARGET_SYS_SENDMMSG: /* sockfd, msgvec, vlen, flags */
4189 return do_sendrecvmmsg(a[0], a[1], a[2], a[3], 1);
4190 default:
4191 gemu_log("Unsupported socketcall: %d\n", num);
4192 return -TARGET_EINVAL;
4195 #endif
4197 #define N_SHM_REGIONS 32
4199 static struct shm_region {
4200 abi_ulong start;
4201 abi_ulong size;
4202 bool in_use;
4203 } shm_regions[N_SHM_REGIONS];
4205 #ifndef TARGET_SEMID64_DS
4206 /* asm-generic version of this struct */
4207 struct target_semid64_ds
4209 struct target_ipc_perm sem_perm;
4210 abi_ulong sem_otime;
4211 #if TARGET_ABI_BITS == 32
4212 abi_ulong __unused1;
4213 #endif
4214 abi_ulong sem_ctime;
4215 #if TARGET_ABI_BITS == 32
4216 abi_ulong __unused2;
4217 #endif
4218 abi_ulong sem_nsems;
4219 abi_ulong __unused3;
4220 abi_ulong __unused4;
4222 #endif
4224 static inline abi_long target_to_host_ipc_perm(struct ipc_perm *host_ip,
4225 abi_ulong target_addr)
4227 struct target_ipc_perm *target_ip;
4228 struct target_semid64_ds *target_sd;
4230 if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
4231 return -TARGET_EFAULT;
4232 target_ip = &(target_sd->sem_perm);
4233 host_ip->__key = tswap32(target_ip->__key);
4234 host_ip->uid = tswap32(target_ip->uid);
4235 host_ip->gid = tswap32(target_ip->gid);
4236 host_ip->cuid = tswap32(target_ip->cuid);
4237 host_ip->cgid = tswap32(target_ip->cgid);
4238 #if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_PPC)
4239 host_ip->mode = tswap32(target_ip->mode);
4240 #else
4241 host_ip->mode = tswap16(target_ip->mode);
4242 #endif
4243 #if defined(TARGET_PPC)
4244 host_ip->__seq = tswap32(target_ip->__seq);
4245 #else
4246 host_ip->__seq = tswap16(target_ip->__seq);
4247 #endif
4248 unlock_user_struct(target_sd, target_addr, 0);
4249 return 0;
4252 static inline abi_long host_to_target_ipc_perm(abi_ulong target_addr,
4253 struct ipc_perm *host_ip)
4255 struct target_ipc_perm *target_ip;
4256 struct target_semid64_ds *target_sd;
4258 if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
4259 return -TARGET_EFAULT;
4260 target_ip = &(target_sd->sem_perm);
4261 target_ip->__key = tswap32(host_ip->__key);
4262 target_ip->uid = tswap32(host_ip->uid);
4263 target_ip->gid = tswap32(host_ip->gid);
4264 target_ip->cuid = tswap32(host_ip->cuid);
4265 target_ip->cgid = tswap32(host_ip->cgid);
4266 #if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_PPC)
4267 target_ip->mode = tswap32(host_ip->mode);
4268 #else
4269 target_ip->mode = tswap16(host_ip->mode);
4270 #endif
4271 #if defined(TARGET_PPC)
4272 target_ip->__seq = tswap32(host_ip->__seq);
4273 #else
4274 target_ip->__seq = tswap16(host_ip->__seq);
4275 #endif
4276 unlock_user_struct(target_sd, target_addr, 1);
4277 return 0;
4280 static inline abi_long target_to_host_semid_ds(struct semid_ds *host_sd,
4281 abi_ulong target_addr)
4283 struct target_semid64_ds *target_sd;
4285 if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
4286 return -TARGET_EFAULT;
4287 if (target_to_host_ipc_perm(&(host_sd->sem_perm),target_addr))
4288 return -TARGET_EFAULT;
4289 host_sd->sem_nsems = tswapal(target_sd->sem_nsems);
4290 host_sd->sem_otime = tswapal(target_sd->sem_otime);
4291 host_sd->sem_ctime = tswapal(target_sd->sem_ctime);
4292 unlock_user_struct(target_sd, target_addr, 0);
4293 return 0;
4296 static inline abi_long host_to_target_semid_ds(abi_ulong target_addr,
4297 struct semid_ds *host_sd)
4299 struct target_semid64_ds *target_sd;
4301 if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
4302 return -TARGET_EFAULT;
4303 if (host_to_target_ipc_perm(target_addr,&(host_sd->sem_perm)))
4304 return -TARGET_EFAULT;
4305 target_sd->sem_nsems = tswapal(host_sd->sem_nsems);
4306 target_sd->sem_otime = tswapal(host_sd->sem_otime);
4307 target_sd->sem_ctime = tswapal(host_sd->sem_ctime);
4308 unlock_user_struct(target_sd, target_addr, 1);
4309 return 0;
4312 struct target_seminfo {
4313 int semmap;
4314 int semmni;
4315 int semmns;
4316 int semmnu;
4317 int semmsl;
4318 int semopm;
4319 int semume;
4320 int semusz;
4321 int semvmx;
4322 int semaem;
4325 static inline abi_long host_to_target_seminfo(abi_ulong target_addr,
4326 struct seminfo *host_seminfo)
4328 struct target_seminfo *target_seminfo;
4329 if (!lock_user_struct(VERIFY_WRITE, target_seminfo, target_addr, 0))
4330 return -TARGET_EFAULT;
4331 __put_user(host_seminfo->semmap, &target_seminfo->semmap);
4332 __put_user(host_seminfo->semmni, &target_seminfo->semmni);
4333 __put_user(host_seminfo->semmns, &target_seminfo->semmns);
4334 __put_user(host_seminfo->semmnu, &target_seminfo->semmnu);
4335 __put_user(host_seminfo->semmsl, &target_seminfo->semmsl);
4336 __put_user(host_seminfo->semopm, &target_seminfo->semopm);
4337 __put_user(host_seminfo->semume, &target_seminfo->semume);
4338 __put_user(host_seminfo->semusz, &target_seminfo->semusz);
4339 __put_user(host_seminfo->semvmx, &target_seminfo->semvmx);
4340 __put_user(host_seminfo->semaem, &target_seminfo->semaem);
4341 unlock_user_struct(target_seminfo, target_addr, 1);
4342 return 0;
4345 union semun {
4346 int val;
4347 struct semid_ds *buf;
4348 unsigned short *array;
4349 struct seminfo *__buf;
4352 union target_semun {
4353 int val;
4354 abi_ulong buf;
4355 abi_ulong array;
4356 abi_ulong __buf;
4359 static inline abi_long target_to_host_semarray(int semid, unsigned short **host_array,
4360 abi_ulong target_addr)
4362 int nsems;
4363 unsigned short *array;
4364 union semun semun;
4365 struct semid_ds semid_ds;
4366 int i, ret;
4368 semun.buf = &semid_ds;
4370 ret = semctl(semid, 0, IPC_STAT, semun);
4371 if (ret == -1)
4372 return get_errno(ret);
4374 nsems = semid_ds.sem_nsems;
4376 *host_array = g_try_new(unsigned short, nsems);
4377 if (!*host_array) {
4378 return -TARGET_ENOMEM;
4380 array = lock_user(VERIFY_READ, target_addr,
4381 nsems*sizeof(unsigned short), 1);
4382 if (!array) {
4383 g_free(*host_array);
4384 return -TARGET_EFAULT;
4387 for(i=0; i<nsems; i++) {
4388 __get_user((*host_array)[i], &array[i]);
4390 unlock_user(array, target_addr, 0);
4392 return 0;
4395 static inline abi_long host_to_target_semarray(int semid, abi_ulong target_addr,
4396 unsigned short **host_array)
4398 int nsems;
4399 unsigned short *array;
4400 union semun semun;
4401 struct semid_ds semid_ds;
4402 int i, ret;
4404 semun.buf = &semid_ds;
4406 ret = semctl(semid, 0, IPC_STAT, semun);
4407 if (ret == -1)
4408 return get_errno(ret);
4410 nsems = semid_ds.sem_nsems;
4412 array = lock_user(VERIFY_WRITE, target_addr,
4413 nsems*sizeof(unsigned short), 0);
4414 if (!array)
4415 return -TARGET_EFAULT;
4417 for(i=0; i<nsems; i++) {
4418 __put_user((*host_array)[i], &array[i]);
4420 g_free(*host_array);
4421 unlock_user(array, target_addr, 1);
4423 return 0;
4426 static inline abi_long do_semctl(int semid, int semnum, int cmd,
4427 abi_ulong target_arg)
4429 union target_semun target_su = { .buf = target_arg };
4430 union semun arg;
4431 struct semid_ds dsarg;
4432 unsigned short *array = NULL;
4433 struct seminfo seminfo;
4434 abi_long ret = -TARGET_EINVAL;
4435 abi_long err;
4436 cmd &= 0xff;
4438 switch( cmd ) {
4439 case GETVAL:
4440 case SETVAL:
4441 /* In 64 bit cross-endian situations, we will erroneously pick up
4442 * the wrong half of the union for the "val" element. To rectify
4443 * this, the entire 8-byte structure is byteswapped, followed by
4444 * a swap of the 4 byte val field. In other cases, the data is
4445 * already in proper host byte order. */
4446 if (sizeof(target_su.val) != (sizeof(target_su.buf))) {
4447 target_su.buf = tswapal(target_su.buf);
4448 arg.val = tswap32(target_su.val);
4449 } else {
4450 arg.val = target_su.val;
4452 ret = get_errno(semctl(semid, semnum, cmd, arg));
4453 break;
4454 case GETALL:
4455 case SETALL:
4456 err = target_to_host_semarray(semid, &array, target_su.array);
4457 if (err)
4458 return err;
4459 arg.array = array;
4460 ret = get_errno(semctl(semid, semnum, cmd, arg));
4461 err = host_to_target_semarray(semid, target_su.array, &array);
4462 if (err)
4463 return err;
4464 break;
4465 case IPC_STAT:
4466 case IPC_SET:
4467 case SEM_STAT:
4468 err = target_to_host_semid_ds(&dsarg, target_su.buf);
4469 if (err)
4470 return err;
4471 arg.buf = &dsarg;
4472 ret = get_errno(semctl(semid, semnum, cmd, arg));
4473 err = host_to_target_semid_ds(target_su.buf, &dsarg);
4474 if (err)
4475 return err;
4476 break;
4477 case IPC_INFO:
4478 case SEM_INFO:
4479 arg.__buf = &seminfo;
4480 ret = get_errno(semctl(semid, semnum, cmd, arg));
4481 err = host_to_target_seminfo(target_su.__buf, &seminfo);
4482 if (err)
4483 return err;
4484 break;
4485 case IPC_RMID:
4486 case GETPID:
4487 case GETNCNT:
4488 case GETZCNT:
4489 ret = get_errno(semctl(semid, semnum, cmd, NULL));
4490 break;
4493 return ret;
4496 struct target_sembuf {
4497 unsigned short sem_num;
4498 short sem_op;
4499 short sem_flg;
4502 static inline abi_long target_to_host_sembuf(struct sembuf *host_sembuf,
4503 abi_ulong target_addr,
4504 unsigned nsops)
4506 struct target_sembuf *target_sembuf;
4507 int i;
4509 target_sembuf = lock_user(VERIFY_READ, target_addr,
4510 nsops*sizeof(struct target_sembuf), 1);
4511 if (!target_sembuf)
4512 return -TARGET_EFAULT;
4514 for(i=0; i<nsops; i++) {
4515 __get_user(host_sembuf[i].sem_num, &target_sembuf[i].sem_num);
4516 __get_user(host_sembuf[i].sem_op, &target_sembuf[i].sem_op);
4517 __get_user(host_sembuf[i].sem_flg, &target_sembuf[i].sem_flg);
4520 unlock_user(target_sembuf, target_addr, 0);
4522 return 0;
4525 static inline abi_long do_semop(int semid, abi_long ptr, unsigned nsops)
4527 struct sembuf sops[nsops];
4529 if (target_to_host_sembuf(sops, ptr, nsops))
4530 return -TARGET_EFAULT;
4532 return get_errno(safe_semtimedop(semid, sops, nsops, NULL));
4535 struct target_msqid_ds
4537 struct target_ipc_perm msg_perm;
4538 abi_ulong msg_stime;
4539 #if TARGET_ABI_BITS == 32
4540 abi_ulong __unused1;
4541 #endif
4542 abi_ulong msg_rtime;
4543 #if TARGET_ABI_BITS == 32
4544 abi_ulong __unused2;
4545 #endif
4546 abi_ulong msg_ctime;
4547 #if TARGET_ABI_BITS == 32
4548 abi_ulong __unused3;
4549 #endif
4550 abi_ulong __msg_cbytes;
4551 abi_ulong msg_qnum;
4552 abi_ulong msg_qbytes;
4553 abi_ulong msg_lspid;
4554 abi_ulong msg_lrpid;
4555 abi_ulong __unused4;
4556 abi_ulong __unused5;
4559 static inline abi_long target_to_host_msqid_ds(struct msqid_ds *host_md,
4560 abi_ulong target_addr)
4562 struct target_msqid_ds *target_md;
4564 if (!lock_user_struct(VERIFY_READ, target_md, target_addr, 1))
4565 return -TARGET_EFAULT;
4566 if (target_to_host_ipc_perm(&(host_md->msg_perm),target_addr))
4567 return -TARGET_EFAULT;
4568 host_md->msg_stime = tswapal(target_md->msg_stime);
4569 host_md->msg_rtime = tswapal(target_md->msg_rtime);
4570 host_md->msg_ctime = tswapal(target_md->msg_ctime);
4571 host_md->__msg_cbytes = tswapal(target_md->__msg_cbytes);
4572 host_md->msg_qnum = tswapal(target_md->msg_qnum);
4573 host_md->msg_qbytes = tswapal(target_md->msg_qbytes);
4574 host_md->msg_lspid = tswapal(target_md->msg_lspid);
4575 host_md->msg_lrpid = tswapal(target_md->msg_lrpid);
4576 unlock_user_struct(target_md, target_addr, 0);
4577 return 0;
4580 static inline abi_long host_to_target_msqid_ds(abi_ulong target_addr,
4581 struct msqid_ds *host_md)
4583 struct target_msqid_ds *target_md;
4585 if (!lock_user_struct(VERIFY_WRITE, target_md, target_addr, 0))
4586 return -TARGET_EFAULT;
4587 if (host_to_target_ipc_perm(target_addr,&(host_md->msg_perm)))
4588 return -TARGET_EFAULT;
4589 target_md->msg_stime = tswapal(host_md->msg_stime);
4590 target_md->msg_rtime = tswapal(host_md->msg_rtime);
4591 target_md->msg_ctime = tswapal(host_md->msg_ctime);
4592 target_md->__msg_cbytes = tswapal(host_md->__msg_cbytes);
4593 target_md->msg_qnum = tswapal(host_md->msg_qnum);
4594 target_md->msg_qbytes = tswapal(host_md->msg_qbytes);
4595 target_md->msg_lspid = tswapal(host_md->msg_lspid);
4596 target_md->msg_lrpid = tswapal(host_md->msg_lrpid);
4597 unlock_user_struct(target_md, target_addr, 1);
4598 return 0;
4601 struct target_msginfo {
4602 int msgpool;
4603 int msgmap;
4604 int msgmax;
4605 int msgmnb;
4606 int msgmni;
4607 int msgssz;
4608 int msgtql;
4609 unsigned short int msgseg;
4612 static inline abi_long host_to_target_msginfo(abi_ulong target_addr,
4613 struct msginfo *host_msginfo)
4615 struct target_msginfo *target_msginfo;
4616 if (!lock_user_struct(VERIFY_WRITE, target_msginfo, target_addr, 0))
4617 return -TARGET_EFAULT;
4618 __put_user(host_msginfo->msgpool, &target_msginfo->msgpool);
4619 __put_user(host_msginfo->msgmap, &target_msginfo->msgmap);
4620 __put_user(host_msginfo->msgmax, &target_msginfo->msgmax);
4621 __put_user(host_msginfo->msgmnb, &target_msginfo->msgmnb);
4622 __put_user(host_msginfo->msgmni, &target_msginfo->msgmni);
4623 __put_user(host_msginfo->msgssz, &target_msginfo->msgssz);
4624 __put_user(host_msginfo->msgtql, &target_msginfo->msgtql);
4625 __put_user(host_msginfo->msgseg, &target_msginfo->msgseg);
4626 unlock_user_struct(target_msginfo, target_addr, 1);
4627 return 0;
4630 static inline abi_long do_msgctl(int msgid, int cmd, abi_long ptr)
4632 struct msqid_ds dsarg;
4633 struct msginfo msginfo;
4634 abi_long ret = -TARGET_EINVAL;
4636 cmd &= 0xff;
4638 switch (cmd) {
4639 case IPC_STAT:
4640 case IPC_SET:
4641 case MSG_STAT:
4642 if (target_to_host_msqid_ds(&dsarg,ptr))
4643 return -TARGET_EFAULT;
4644 ret = get_errno(msgctl(msgid, cmd, &dsarg));
4645 if (host_to_target_msqid_ds(ptr,&dsarg))
4646 return -TARGET_EFAULT;
4647 break;
4648 case IPC_RMID:
4649 ret = get_errno(msgctl(msgid, cmd, NULL));
4650 break;
4651 case IPC_INFO:
4652 case MSG_INFO:
4653 ret = get_errno(msgctl(msgid, cmd, (struct msqid_ds *)&msginfo));
4654 if (host_to_target_msginfo(ptr, &msginfo))
4655 return -TARGET_EFAULT;
4656 break;
4659 return ret;
4662 struct target_msgbuf {
4663 abi_long mtype;
4664 char mtext[1];
4667 static inline abi_long do_msgsnd(int msqid, abi_long msgp,
4668 ssize_t msgsz, int msgflg)
4670 struct target_msgbuf *target_mb;
4671 struct msgbuf *host_mb;
4672 abi_long ret = 0;
4674 if (msgsz < 0) {
4675 return -TARGET_EINVAL;
4678 if (!lock_user_struct(VERIFY_READ, target_mb, msgp, 0))
4679 return -TARGET_EFAULT;
4680 host_mb = g_try_malloc(msgsz + sizeof(long));
4681 if (!host_mb) {
4682 unlock_user_struct(target_mb, msgp, 0);
4683 return -TARGET_ENOMEM;
4685 host_mb->mtype = (abi_long) tswapal(target_mb->mtype);
4686 memcpy(host_mb->mtext, target_mb->mtext, msgsz);
4687 ret = get_errno(safe_msgsnd(msqid, host_mb, msgsz, msgflg));
4688 g_free(host_mb);
4689 unlock_user_struct(target_mb, msgp, 0);
4691 return ret;
4694 static inline abi_long do_msgrcv(int msqid, abi_long msgp,
4695 ssize_t msgsz, abi_long msgtyp,
4696 int msgflg)
4698 struct target_msgbuf *target_mb;
4699 char *target_mtext;
4700 struct msgbuf *host_mb;
4701 abi_long ret = 0;
4703 if (msgsz < 0) {
4704 return -TARGET_EINVAL;
4707 if (!lock_user_struct(VERIFY_WRITE, target_mb, msgp, 0))
4708 return -TARGET_EFAULT;
4710 host_mb = g_try_malloc(msgsz + sizeof(long));
4711 if (!host_mb) {
4712 ret = -TARGET_ENOMEM;
4713 goto end;
4715 ret = get_errno(safe_msgrcv(msqid, host_mb, msgsz, msgtyp, msgflg));
4717 if (ret > 0) {
4718 abi_ulong target_mtext_addr = msgp + sizeof(abi_ulong);
4719 target_mtext = lock_user(VERIFY_WRITE, target_mtext_addr, ret, 0);
4720 if (!target_mtext) {
4721 ret = -TARGET_EFAULT;
4722 goto end;
4724 memcpy(target_mb->mtext, host_mb->mtext, ret);
4725 unlock_user(target_mtext, target_mtext_addr, ret);
4728 target_mb->mtype = tswapal(host_mb->mtype);
4730 end:
4731 if (target_mb)
4732 unlock_user_struct(target_mb, msgp, 1);
4733 g_free(host_mb);
4734 return ret;
4737 static inline abi_long target_to_host_shmid_ds(struct shmid_ds *host_sd,
4738 abi_ulong target_addr)
4740 struct target_shmid_ds *target_sd;
4742 if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
4743 return -TARGET_EFAULT;
4744 if (target_to_host_ipc_perm(&(host_sd->shm_perm), target_addr))
4745 return -TARGET_EFAULT;
4746 __get_user(host_sd->shm_segsz, &target_sd->shm_segsz);
4747 __get_user(host_sd->shm_atime, &target_sd->shm_atime);
4748 __get_user(host_sd->shm_dtime, &target_sd->shm_dtime);
4749 __get_user(host_sd->shm_ctime, &target_sd->shm_ctime);
4750 __get_user(host_sd->shm_cpid, &target_sd->shm_cpid);
4751 __get_user(host_sd->shm_lpid, &target_sd->shm_lpid);
4752 __get_user(host_sd->shm_nattch, &target_sd->shm_nattch);
4753 unlock_user_struct(target_sd, target_addr, 0);
4754 return 0;
4757 static inline abi_long host_to_target_shmid_ds(abi_ulong target_addr,
4758 struct shmid_ds *host_sd)
4760 struct target_shmid_ds *target_sd;
4762 if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
4763 return -TARGET_EFAULT;
4764 if (host_to_target_ipc_perm(target_addr, &(host_sd->shm_perm)))
4765 return -TARGET_EFAULT;
4766 __put_user(host_sd->shm_segsz, &target_sd->shm_segsz);
4767 __put_user(host_sd->shm_atime, &target_sd->shm_atime);
4768 __put_user(host_sd->shm_dtime, &target_sd->shm_dtime);
4769 __put_user(host_sd->shm_ctime, &target_sd->shm_ctime);
4770 __put_user(host_sd->shm_cpid, &target_sd->shm_cpid);
4771 __put_user(host_sd->shm_lpid, &target_sd->shm_lpid);
4772 __put_user(host_sd->shm_nattch, &target_sd->shm_nattch);
4773 unlock_user_struct(target_sd, target_addr, 1);
4774 return 0;
4777 struct target_shminfo {
4778 abi_ulong shmmax;
4779 abi_ulong shmmin;
4780 abi_ulong shmmni;
4781 abi_ulong shmseg;
4782 abi_ulong shmall;
4785 static inline abi_long host_to_target_shminfo(abi_ulong target_addr,
4786 struct shminfo *host_shminfo)
4788 struct target_shminfo *target_shminfo;
4789 if (!lock_user_struct(VERIFY_WRITE, target_shminfo, target_addr, 0))
4790 return -TARGET_EFAULT;
4791 __put_user(host_shminfo->shmmax, &target_shminfo->shmmax);
4792 __put_user(host_shminfo->shmmin, &target_shminfo->shmmin);
4793 __put_user(host_shminfo->shmmni, &target_shminfo->shmmni);
4794 __put_user(host_shminfo->shmseg, &target_shminfo->shmseg);
4795 __put_user(host_shminfo->shmall, &target_shminfo->shmall);
4796 unlock_user_struct(target_shminfo, target_addr, 1);
4797 return 0;
4800 struct target_shm_info {
4801 int used_ids;
4802 abi_ulong shm_tot;
4803 abi_ulong shm_rss;
4804 abi_ulong shm_swp;
4805 abi_ulong swap_attempts;
4806 abi_ulong swap_successes;
4809 static inline abi_long host_to_target_shm_info(abi_ulong target_addr,
4810 struct shm_info *host_shm_info)
4812 struct target_shm_info *target_shm_info;
4813 if (!lock_user_struct(VERIFY_WRITE, target_shm_info, target_addr, 0))
4814 return -TARGET_EFAULT;
4815 __put_user(host_shm_info->used_ids, &target_shm_info->used_ids);
4816 __put_user(host_shm_info->shm_tot, &target_shm_info->shm_tot);
4817 __put_user(host_shm_info->shm_rss, &target_shm_info->shm_rss);
4818 __put_user(host_shm_info->shm_swp, &target_shm_info->shm_swp);
4819 __put_user(host_shm_info->swap_attempts, &target_shm_info->swap_attempts);
4820 __put_user(host_shm_info->swap_successes, &target_shm_info->swap_successes);
4821 unlock_user_struct(target_shm_info, target_addr, 1);
4822 return 0;
4825 static inline abi_long do_shmctl(int shmid, int cmd, abi_long buf)
4827 struct shmid_ds dsarg;
4828 struct shminfo shminfo;
4829 struct shm_info shm_info;
4830 abi_long ret = -TARGET_EINVAL;
4832 cmd &= 0xff;
4834 switch(cmd) {
4835 case IPC_STAT:
4836 case IPC_SET:
4837 case SHM_STAT:
4838 if (target_to_host_shmid_ds(&dsarg, buf))
4839 return -TARGET_EFAULT;
4840 ret = get_errno(shmctl(shmid, cmd, &dsarg));
4841 if (host_to_target_shmid_ds(buf, &dsarg))
4842 return -TARGET_EFAULT;
4843 break;
4844 case IPC_INFO:
4845 ret = get_errno(shmctl(shmid, cmd, (struct shmid_ds *)&shminfo));
4846 if (host_to_target_shminfo(buf, &shminfo))
4847 return -TARGET_EFAULT;
4848 break;
4849 case SHM_INFO:
4850 ret = get_errno(shmctl(shmid, cmd, (struct shmid_ds *)&shm_info));
4851 if (host_to_target_shm_info(buf, &shm_info))
4852 return -TARGET_EFAULT;
4853 break;
4854 case IPC_RMID:
4855 case SHM_LOCK:
4856 case SHM_UNLOCK:
4857 ret = get_errno(shmctl(shmid, cmd, NULL));
4858 break;
4861 return ret;
4864 #ifndef TARGET_FORCE_SHMLBA
4865 /* For most architectures, SHMLBA is the same as the page size;
4866 * some architectures have larger values, in which case they should
4867 * define TARGET_FORCE_SHMLBA and provide a target_shmlba() function.
4868 * This corresponds to the kernel arch code defining __ARCH_FORCE_SHMLBA
4869 * and defining its own value for SHMLBA.
4871 * The kernel also permits SHMLBA to be set by the architecture to a
4872 * value larger than the page size without setting __ARCH_FORCE_SHMLBA;
4873 * this means that addresses are rounded to the large size if
4874 * SHM_RND is set but addresses not aligned to that size are not rejected
4875 * as long as they are at least page-aligned. Since the only architecture
4876 * which uses this is ia64 this code doesn't provide for that oddity.
4878 static inline abi_ulong target_shmlba(CPUArchState *cpu_env)
4880 return TARGET_PAGE_SIZE;
4882 #endif
4884 static inline abi_ulong do_shmat(CPUArchState *cpu_env,
4885 int shmid, abi_ulong shmaddr, int shmflg)
4887 abi_long raddr;
4888 void *host_raddr;
4889 struct shmid_ds shm_info;
4890 int i,ret;
4891 abi_ulong shmlba;
4893 /* find out the length of the shared memory segment */
4894 ret = get_errno(shmctl(shmid, IPC_STAT, &shm_info));
4895 if (is_error(ret)) {
4896 /* can't get length, bail out */
4897 return ret;
4900 shmlba = target_shmlba(cpu_env);
4902 if (shmaddr & (shmlba - 1)) {
4903 if (shmflg & SHM_RND) {
4904 shmaddr &= ~(shmlba - 1);
4905 } else {
4906 return -TARGET_EINVAL;
4910 mmap_lock();
4912 if (shmaddr)
4913 host_raddr = shmat(shmid, (void *)g2h(shmaddr), shmflg);
4914 else {
4915 abi_ulong mmap_start;
4917 mmap_start = mmap_find_vma(0, shm_info.shm_segsz);
4919 if (mmap_start == -1) {
4920 errno = ENOMEM;
4921 host_raddr = (void *)-1;
4922 } else
4923 host_raddr = shmat(shmid, g2h(mmap_start), shmflg | SHM_REMAP);
4926 if (host_raddr == (void *)-1) {
4927 mmap_unlock();
4928 return get_errno((long)host_raddr);
4930 raddr=h2g((unsigned long)host_raddr);
4932 page_set_flags(raddr, raddr + shm_info.shm_segsz,
4933 PAGE_VALID | PAGE_READ |
4934 ((shmflg & SHM_RDONLY)? 0 : PAGE_WRITE));
4936 for (i = 0; i < N_SHM_REGIONS; i++) {
4937 if (!shm_regions[i].in_use) {
4938 shm_regions[i].in_use = true;
4939 shm_regions[i].start = raddr;
4940 shm_regions[i].size = shm_info.shm_segsz;
4941 break;
4945 mmap_unlock();
4946 return raddr;
4950 static inline abi_long do_shmdt(abi_ulong shmaddr)
4952 int i;
4954 for (i = 0; i < N_SHM_REGIONS; ++i) {
4955 if (shm_regions[i].in_use && shm_regions[i].start == shmaddr) {
4956 shm_regions[i].in_use = false;
4957 page_set_flags(shmaddr, shmaddr + shm_regions[i].size, 0);
4958 break;
4962 return get_errno(shmdt(g2h(shmaddr)));
4965 #ifdef TARGET_NR_ipc
4966 /* ??? This only works with linear mappings. */
4967 /* do_ipc() must return target values and target errnos. */
4968 static abi_long do_ipc(CPUArchState *cpu_env,
4969 unsigned int call, abi_long first,
4970 abi_long second, abi_long third,
4971 abi_long ptr, abi_long fifth)
4973 int version;
4974 abi_long ret = 0;
4976 version = call >> 16;
4977 call &= 0xffff;
4979 switch (call) {
4980 case IPCOP_semop:
4981 ret = do_semop(first, ptr, second);
4982 break;
4984 case IPCOP_semget:
4985 ret = get_errno(semget(first, second, third));
4986 break;
4988 case IPCOP_semctl: {
4989 /* The semun argument to semctl is passed by value, so dereference the
4990 * ptr argument. */
4991 abi_ulong atptr;
4992 get_user_ual(atptr, ptr);
4993 ret = do_semctl(first, second, third, atptr);
4994 break;
4997 case IPCOP_msgget:
4998 ret = get_errno(msgget(first, second));
4999 break;
5001 case IPCOP_msgsnd:
5002 ret = do_msgsnd(first, ptr, second, third);
5003 break;
5005 case IPCOP_msgctl:
5006 ret = do_msgctl(first, second, ptr);
5007 break;
5009 case IPCOP_msgrcv:
5010 switch (version) {
5011 case 0:
5013 struct target_ipc_kludge {
5014 abi_long msgp;
5015 abi_long msgtyp;
5016 } *tmp;
5018 if (!lock_user_struct(VERIFY_READ, tmp, ptr, 1)) {
5019 ret = -TARGET_EFAULT;
5020 break;
5023 ret = do_msgrcv(first, tswapal(tmp->msgp), second, tswapal(tmp->msgtyp), third);
5025 unlock_user_struct(tmp, ptr, 0);
5026 break;
5028 default:
5029 ret = do_msgrcv(first, ptr, second, fifth, third);
5031 break;
5033 case IPCOP_shmat:
5034 switch (version) {
5035 default:
5037 abi_ulong raddr;
5038 raddr = do_shmat(cpu_env, first, ptr, second);
5039 if (is_error(raddr))
5040 return get_errno(raddr);
5041 if (put_user_ual(raddr, third))
5042 return -TARGET_EFAULT;
5043 break;
5045 case 1:
5046 ret = -TARGET_EINVAL;
5047 break;
5049 break;
5050 case IPCOP_shmdt:
5051 ret = do_shmdt(ptr);
5052 break;
5054 case IPCOP_shmget:
5055 /* IPC_* flag values are the same on all linux platforms */
5056 ret = get_errno(shmget(first, second, third));
5057 break;
5059 /* IPC_* and SHM_* command values are the same on all linux platforms */
5060 case IPCOP_shmctl:
5061 ret = do_shmctl(first, second, ptr);
5062 break;
5063 default:
5064 gemu_log("Unsupported ipc call: %d (version %d)\n", call, version);
5065 ret = -TARGET_ENOSYS;
5066 break;
5068 return ret;
5070 #endif
5072 /* kernel structure types definitions */
5074 #define STRUCT(name, ...) STRUCT_ ## name,
5075 #define STRUCT_SPECIAL(name) STRUCT_ ## name,
5076 enum {
5077 #include "syscall_types.h"
5078 STRUCT_MAX
5080 #undef STRUCT
5081 #undef STRUCT_SPECIAL
5083 #define STRUCT(name, ...) static const argtype struct_ ## name ## _def[] = { __VA_ARGS__, TYPE_NULL };
5084 #define STRUCT_SPECIAL(name)
5085 #include "syscall_types.h"
5086 #undef STRUCT
5087 #undef STRUCT_SPECIAL
5089 typedef struct IOCTLEntry IOCTLEntry;
5091 typedef abi_long do_ioctl_fn(const IOCTLEntry *ie, uint8_t *buf_temp,
5092 int fd, int cmd, abi_long arg);
5094 struct IOCTLEntry {
5095 int target_cmd;
5096 unsigned int host_cmd;
5097 const char *name;
5098 int access;
5099 do_ioctl_fn *do_ioctl;
5100 const argtype arg_type[5];
5103 #define IOC_R 0x0001
5104 #define IOC_W 0x0002
5105 #define IOC_RW (IOC_R | IOC_W)
5107 #define MAX_STRUCT_SIZE 4096
5109 #ifdef CONFIG_FIEMAP
5110 /* So fiemap access checks don't overflow on 32 bit systems.
5111 * This is very slightly smaller than the limit imposed by
5112 * the underlying kernel.
5114 #define FIEMAP_MAX_EXTENTS ((UINT_MAX - sizeof(struct fiemap)) \
5115 / sizeof(struct fiemap_extent))
5117 static abi_long do_ioctl_fs_ioc_fiemap(const IOCTLEntry *ie, uint8_t *buf_temp,
5118 int fd, int cmd, abi_long arg)
5120 /* The parameter for this ioctl is a struct fiemap followed
5121 * by an array of struct fiemap_extent whose size is set
5122 * in fiemap->fm_extent_count. The array is filled in by the
5123 * ioctl.
5125 int target_size_in, target_size_out;
5126 struct fiemap *fm;
5127 const argtype *arg_type = ie->arg_type;
5128 const argtype extent_arg_type[] = { MK_STRUCT(STRUCT_fiemap_extent) };
5129 void *argptr, *p;
5130 abi_long ret;
5131 int i, extent_size = thunk_type_size(extent_arg_type, 0);
5132 uint32_t outbufsz;
5133 int free_fm = 0;
5135 assert(arg_type[0] == TYPE_PTR);
5136 assert(ie->access == IOC_RW);
5137 arg_type++;
5138 target_size_in = thunk_type_size(arg_type, 0);
5139 argptr = lock_user(VERIFY_READ, arg, target_size_in, 1);
5140 if (!argptr) {
5141 return -TARGET_EFAULT;
5143 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
5144 unlock_user(argptr, arg, 0);
5145 fm = (struct fiemap *)buf_temp;
5146 if (fm->fm_extent_count > FIEMAP_MAX_EXTENTS) {
5147 return -TARGET_EINVAL;
5150 outbufsz = sizeof (*fm) +
5151 (sizeof(struct fiemap_extent) * fm->fm_extent_count);
5153 if (outbufsz > MAX_STRUCT_SIZE) {
5154 /* We can't fit all the extents into the fixed size buffer.
5155 * Allocate one that is large enough and use it instead.
5157 fm = g_try_malloc(outbufsz);
5158 if (!fm) {
5159 return -TARGET_ENOMEM;
5161 memcpy(fm, buf_temp, sizeof(struct fiemap));
5162 free_fm = 1;
5164 ret = get_errno(safe_ioctl(fd, ie->host_cmd, fm));
5165 if (!is_error(ret)) {
5166 target_size_out = target_size_in;
5167 /* An extent_count of 0 means we were only counting the extents
5168 * so there are no structs to copy
5170 if (fm->fm_extent_count != 0) {
5171 target_size_out += fm->fm_mapped_extents * extent_size;
5173 argptr = lock_user(VERIFY_WRITE, arg, target_size_out, 0);
5174 if (!argptr) {
5175 ret = -TARGET_EFAULT;
5176 } else {
5177 /* Convert the struct fiemap */
5178 thunk_convert(argptr, fm, arg_type, THUNK_TARGET);
5179 if (fm->fm_extent_count != 0) {
5180 p = argptr + target_size_in;
5181 /* ...and then all the struct fiemap_extents */
5182 for (i = 0; i < fm->fm_mapped_extents; i++) {
5183 thunk_convert(p, &fm->fm_extents[i], extent_arg_type,
5184 THUNK_TARGET);
5185 p += extent_size;
5188 unlock_user(argptr, arg, target_size_out);
5191 if (free_fm) {
5192 g_free(fm);
5194 return ret;
5196 #endif
5198 static abi_long do_ioctl_ifconf(const IOCTLEntry *ie, uint8_t *buf_temp,
5199 int fd, int cmd, abi_long arg)
5201 const argtype *arg_type = ie->arg_type;
5202 int target_size;
5203 void *argptr;
5204 int ret;
5205 struct ifconf *host_ifconf;
5206 uint32_t outbufsz;
5207 const argtype ifreq_arg_type[] = { MK_STRUCT(STRUCT_sockaddr_ifreq) };
5208 int target_ifreq_size;
5209 int nb_ifreq;
5210 int free_buf = 0;
5211 int i;
5212 int target_ifc_len;
5213 abi_long target_ifc_buf;
5214 int host_ifc_len;
5215 char *host_ifc_buf;
5217 assert(arg_type[0] == TYPE_PTR);
5218 assert(ie->access == IOC_RW);
5220 arg_type++;
5221 target_size = thunk_type_size(arg_type, 0);
5223 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5224 if (!argptr)
5225 return -TARGET_EFAULT;
5226 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
5227 unlock_user(argptr, arg, 0);
5229 host_ifconf = (struct ifconf *)(unsigned long)buf_temp;
5230 target_ifc_len = host_ifconf->ifc_len;
5231 target_ifc_buf = (abi_long)(unsigned long)host_ifconf->ifc_buf;
5233 target_ifreq_size = thunk_type_size(ifreq_arg_type, 0);
5234 nb_ifreq = target_ifc_len / target_ifreq_size;
5235 host_ifc_len = nb_ifreq * sizeof(struct ifreq);
5237 outbufsz = sizeof(*host_ifconf) + host_ifc_len;
5238 if (outbufsz > MAX_STRUCT_SIZE) {
5239 /* We can't fit all the extents into the fixed size buffer.
5240 * Allocate one that is large enough and use it instead.
5242 host_ifconf = malloc(outbufsz);
5243 if (!host_ifconf) {
5244 return -TARGET_ENOMEM;
5246 memcpy(host_ifconf, buf_temp, sizeof(*host_ifconf));
5247 free_buf = 1;
5249 host_ifc_buf = (char*)host_ifconf + sizeof(*host_ifconf);
5251 host_ifconf->ifc_len = host_ifc_len;
5252 host_ifconf->ifc_buf = host_ifc_buf;
5254 ret = get_errno(safe_ioctl(fd, ie->host_cmd, host_ifconf));
5255 if (!is_error(ret)) {
5256 /* convert host ifc_len to target ifc_len */
5258 nb_ifreq = host_ifconf->ifc_len / sizeof(struct ifreq);
5259 target_ifc_len = nb_ifreq * target_ifreq_size;
5260 host_ifconf->ifc_len = target_ifc_len;
5262 /* restore target ifc_buf */
5264 host_ifconf->ifc_buf = (char *)(unsigned long)target_ifc_buf;
5266 /* copy struct ifconf to target user */
5268 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
5269 if (!argptr)
5270 return -TARGET_EFAULT;
5271 thunk_convert(argptr, host_ifconf, arg_type, THUNK_TARGET);
5272 unlock_user(argptr, arg, target_size);
5274 /* copy ifreq[] to target user */
5276 argptr = lock_user(VERIFY_WRITE, target_ifc_buf, target_ifc_len, 0);
5277 for (i = 0; i < nb_ifreq ; i++) {
5278 thunk_convert(argptr + i * target_ifreq_size,
5279 host_ifc_buf + i * sizeof(struct ifreq),
5280 ifreq_arg_type, THUNK_TARGET);
5282 unlock_user(argptr, target_ifc_buf, target_ifc_len);
5285 if (free_buf) {
5286 free(host_ifconf);
5289 return ret;
5292 static abi_long do_ioctl_dm(const IOCTLEntry *ie, uint8_t *buf_temp, int fd,
5293 int cmd, abi_long arg)
5295 void *argptr;
5296 struct dm_ioctl *host_dm;
5297 abi_long guest_data;
5298 uint32_t guest_data_size;
5299 int target_size;
5300 const argtype *arg_type = ie->arg_type;
5301 abi_long ret;
5302 void *big_buf = NULL;
5303 char *host_data;
5305 arg_type++;
5306 target_size = thunk_type_size(arg_type, 0);
5307 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5308 if (!argptr) {
5309 ret = -TARGET_EFAULT;
5310 goto out;
5312 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
5313 unlock_user(argptr, arg, 0);
5315 /* buf_temp is too small, so fetch things into a bigger buffer */
5316 big_buf = g_malloc0(((struct dm_ioctl*)buf_temp)->data_size * 2);
5317 memcpy(big_buf, buf_temp, target_size);
5318 buf_temp = big_buf;
5319 host_dm = big_buf;
5321 guest_data = arg + host_dm->data_start;
5322 if ((guest_data - arg) < 0) {
5323 ret = -TARGET_EINVAL;
5324 goto out;
5326 guest_data_size = host_dm->data_size - host_dm->data_start;
5327 host_data = (char*)host_dm + host_dm->data_start;
5329 argptr = lock_user(VERIFY_READ, guest_data, guest_data_size, 1);
5330 if (!argptr) {
5331 ret = -TARGET_EFAULT;
5332 goto out;
5335 switch (ie->host_cmd) {
5336 case DM_REMOVE_ALL:
5337 case DM_LIST_DEVICES:
5338 case DM_DEV_CREATE:
5339 case DM_DEV_REMOVE:
5340 case DM_DEV_SUSPEND:
5341 case DM_DEV_STATUS:
5342 case DM_DEV_WAIT:
5343 case DM_TABLE_STATUS:
5344 case DM_TABLE_CLEAR:
5345 case DM_TABLE_DEPS:
5346 case DM_LIST_VERSIONS:
5347 /* no input data */
5348 break;
5349 case DM_DEV_RENAME:
5350 case DM_DEV_SET_GEOMETRY:
5351 /* data contains only strings */
5352 memcpy(host_data, argptr, guest_data_size);
5353 break;
5354 case DM_TARGET_MSG:
5355 memcpy(host_data, argptr, guest_data_size);
5356 *(uint64_t*)host_data = tswap64(*(uint64_t*)argptr);
5357 break;
5358 case DM_TABLE_LOAD:
5360 void *gspec = argptr;
5361 void *cur_data = host_data;
5362 const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_spec) };
5363 int spec_size = thunk_type_size(arg_type, 0);
5364 int i;
5366 for (i = 0; i < host_dm->target_count; i++) {
5367 struct dm_target_spec *spec = cur_data;
5368 uint32_t next;
5369 int slen;
5371 thunk_convert(spec, gspec, arg_type, THUNK_HOST);
5372 slen = strlen((char*)gspec + spec_size) + 1;
5373 next = spec->next;
5374 spec->next = sizeof(*spec) + slen;
5375 strcpy((char*)&spec[1], gspec + spec_size);
5376 gspec += next;
5377 cur_data += spec->next;
5379 break;
5381 default:
5382 ret = -TARGET_EINVAL;
5383 unlock_user(argptr, guest_data, 0);
5384 goto out;
5386 unlock_user(argptr, guest_data, 0);
5388 ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5389 if (!is_error(ret)) {
5390 guest_data = arg + host_dm->data_start;
5391 guest_data_size = host_dm->data_size - host_dm->data_start;
5392 argptr = lock_user(VERIFY_WRITE, guest_data, guest_data_size, 0);
5393 switch (ie->host_cmd) {
5394 case DM_REMOVE_ALL:
5395 case DM_DEV_CREATE:
5396 case DM_DEV_REMOVE:
5397 case DM_DEV_RENAME:
5398 case DM_DEV_SUSPEND:
5399 case DM_DEV_STATUS:
5400 case DM_TABLE_LOAD:
5401 case DM_TABLE_CLEAR:
5402 case DM_TARGET_MSG:
5403 case DM_DEV_SET_GEOMETRY:
5404 /* no return data */
5405 break;
5406 case DM_LIST_DEVICES:
5408 struct dm_name_list *nl = (void*)host_dm + host_dm->data_start;
5409 uint32_t remaining_data = guest_data_size;
5410 void *cur_data = argptr;
5411 const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_name_list) };
5412 int nl_size = 12; /* can't use thunk_size due to alignment */
5414 while (1) {
5415 uint32_t next = nl->next;
5416 if (next) {
5417 nl->next = nl_size + (strlen(nl->name) + 1);
5419 if (remaining_data < nl->next) {
5420 host_dm->flags |= DM_BUFFER_FULL_FLAG;
5421 break;
5423 thunk_convert(cur_data, nl, arg_type, THUNK_TARGET);
5424 strcpy(cur_data + nl_size, nl->name);
5425 cur_data += nl->next;
5426 remaining_data -= nl->next;
5427 if (!next) {
5428 break;
5430 nl = (void*)nl + next;
5432 break;
5434 case DM_DEV_WAIT:
5435 case DM_TABLE_STATUS:
5437 struct dm_target_spec *spec = (void*)host_dm + host_dm->data_start;
5438 void *cur_data = argptr;
5439 const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_spec) };
5440 int spec_size = thunk_type_size(arg_type, 0);
5441 int i;
5443 for (i = 0; i < host_dm->target_count; i++) {
5444 uint32_t next = spec->next;
5445 int slen = strlen((char*)&spec[1]) + 1;
5446 spec->next = (cur_data - argptr) + spec_size + slen;
5447 if (guest_data_size < spec->next) {
5448 host_dm->flags |= DM_BUFFER_FULL_FLAG;
5449 break;
5451 thunk_convert(cur_data, spec, arg_type, THUNK_TARGET);
5452 strcpy(cur_data + spec_size, (char*)&spec[1]);
5453 cur_data = argptr + spec->next;
5454 spec = (void*)host_dm + host_dm->data_start + next;
5456 break;
5458 case DM_TABLE_DEPS:
5460 void *hdata = (void*)host_dm + host_dm->data_start;
5461 int count = *(uint32_t*)hdata;
5462 uint64_t *hdev = hdata + 8;
5463 uint64_t *gdev = argptr + 8;
5464 int i;
5466 *(uint32_t*)argptr = tswap32(count);
5467 for (i = 0; i < count; i++) {
5468 *gdev = tswap64(*hdev);
5469 gdev++;
5470 hdev++;
5472 break;
5474 case DM_LIST_VERSIONS:
5476 struct dm_target_versions *vers = (void*)host_dm + host_dm->data_start;
5477 uint32_t remaining_data = guest_data_size;
5478 void *cur_data = argptr;
5479 const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_versions) };
5480 int vers_size = thunk_type_size(arg_type, 0);
5482 while (1) {
5483 uint32_t next = vers->next;
5484 if (next) {
5485 vers->next = vers_size + (strlen(vers->name) + 1);
5487 if (remaining_data < vers->next) {
5488 host_dm->flags |= DM_BUFFER_FULL_FLAG;
5489 break;
5491 thunk_convert(cur_data, vers, arg_type, THUNK_TARGET);
5492 strcpy(cur_data + vers_size, vers->name);
5493 cur_data += vers->next;
5494 remaining_data -= vers->next;
5495 if (!next) {
5496 break;
5498 vers = (void*)vers + next;
5500 break;
5502 default:
5503 unlock_user(argptr, guest_data, 0);
5504 ret = -TARGET_EINVAL;
5505 goto out;
5507 unlock_user(argptr, guest_data, guest_data_size);
5509 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
5510 if (!argptr) {
5511 ret = -TARGET_EFAULT;
5512 goto out;
5514 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
5515 unlock_user(argptr, arg, target_size);
5517 out:
5518 g_free(big_buf);
5519 return ret;
5522 static abi_long do_ioctl_blkpg(const IOCTLEntry *ie, uint8_t *buf_temp, int fd,
5523 int cmd, abi_long arg)
5525 void *argptr;
5526 int target_size;
5527 const argtype *arg_type = ie->arg_type;
5528 const argtype part_arg_type[] = { MK_STRUCT(STRUCT_blkpg_partition) };
5529 abi_long ret;
5531 struct blkpg_ioctl_arg *host_blkpg = (void*)buf_temp;
5532 struct blkpg_partition host_part;
5534 /* Read and convert blkpg */
5535 arg_type++;
5536 target_size = thunk_type_size(arg_type, 0);
5537 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5538 if (!argptr) {
5539 ret = -TARGET_EFAULT;
5540 goto out;
5542 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
5543 unlock_user(argptr, arg, 0);
5545 switch (host_blkpg->op) {
5546 case BLKPG_ADD_PARTITION:
5547 case BLKPG_DEL_PARTITION:
5548 /* payload is struct blkpg_partition */
5549 break;
5550 default:
5551 /* Unknown opcode */
5552 ret = -TARGET_EINVAL;
5553 goto out;
5556 /* Read and convert blkpg->data */
5557 arg = (abi_long)(uintptr_t)host_blkpg->data;
5558 target_size = thunk_type_size(part_arg_type, 0);
5559 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5560 if (!argptr) {
5561 ret = -TARGET_EFAULT;
5562 goto out;
5564 thunk_convert(&host_part, argptr, part_arg_type, THUNK_HOST);
5565 unlock_user(argptr, arg, 0);
5567 /* Swizzle the data pointer to our local copy and call! */
5568 host_blkpg->data = &host_part;
5569 ret = get_errno(safe_ioctl(fd, ie->host_cmd, host_blkpg));
5571 out:
5572 return ret;
5575 static abi_long do_ioctl_rt(const IOCTLEntry *ie, uint8_t *buf_temp,
5576 int fd, int cmd, abi_long arg)
5578 const argtype *arg_type = ie->arg_type;
5579 const StructEntry *se;
5580 const argtype *field_types;
5581 const int *dst_offsets, *src_offsets;
5582 int target_size;
5583 void *argptr;
5584 abi_ulong *target_rt_dev_ptr;
5585 unsigned long *host_rt_dev_ptr;
5586 abi_long ret;
5587 int i;
5589 assert(ie->access == IOC_W);
5590 assert(*arg_type == TYPE_PTR);
5591 arg_type++;
5592 assert(*arg_type == TYPE_STRUCT);
5593 target_size = thunk_type_size(arg_type, 0);
5594 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5595 if (!argptr) {
5596 return -TARGET_EFAULT;
5598 arg_type++;
5599 assert(*arg_type == (int)STRUCT_rtentry);
5600 se = struct_entries + *arg_type++;
5601 assert(se->convert[0] == NULL);
5602 /* convert struct here to be able to catch rt_dev string */
5603 field_types = se->field_types;
5604 dst_offsets = se->field_offsets[THUNK_HOST];
5605 src_offsets = se->field_offsets[THUNK_TARGET];
5606 for (i = 0; i < se->nb_fields; i++) {
5607 if (dst_offsets[i] == offsetof(struct rtentry, rt_dev)) {
5608 assert(*field_types == TYPE_PTRVOID);
5609 target_rt_dev_ptr = (abi_ulong *)(argptr + src_offsets[i]);
5610 host_rt_dev_ptr = (unsigned long *)(buf_temp + dst_offsets[i]);
5611 if (*target_rt_dev_ptr != 0) {
5612 *host_rt_dev_ptr = (unsigned long)lock_user_string(
5613 tswapal(*target_rt_dev_ptr));
5614 if (!*host_rt_dev_ptr) {
5615 unlock_user(argptr, arg, 0);
5616 return -TARGET_EFAULT;
5618 } else {
5619 *host_rt_dev_ptr = 0;
5621 field_types++;
5622 continue;
5624 field_types = thunk_convert(buf_temp + dst_offsets[i],
5625 argptr + src_offsets[i],
5626 field_types, THUNK_HOST);
5628 unlock_user(argptr, arg, 0);
5630 ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5631 if (*host_rt_dev_ptr != 0) {
5632 unlock_user((void *)*host_rt_dev_ptr,
5633 *target_rt_dev_ptr, 0);
5635 return ret;
5638 static abi_long do_ioctl_kdsigaccept(const IOCTLEntry *ie, uint8_t *buf_temp,
5639 int fd, int cmd, abi_long arg)
5641 int sig = target_to_host_signal(arg);
5642 return get_errno(safe_ioctl(fd, ie->host_cmd, sig));
5645 static IOCTLEntry ioctl_entries[] = {
5646 #define IOCTL(cmd, access, ...) \
5647 { TARGET_ ## cmd, cmd, #cmd, access, 0, { __VA_ARGS__ } },
5648 #define IOCTL_SPECIAL(cmd, access, dofn, ...) \
5649 { TARGET_ ## cmd, cmd, #cmd, access, dofn, { __VA_ARGS__ } },
5650 #define IOCTL_IGNORE(cmd) \
5651 { TARGET_ ## cmd, 0, #cmd },
5652 #include "ioctls.h"
5653 { 0, 0, },
5656 /* ??? Implement proper locking for ioctls. */
5657 /* do_ioctl() Must return target values and target errnos. */
5658 static abi_long do_ioctl(int fd, int cmd, abi_long arg)
5660 const IOCTLEntry *ie;
5661 const argtype *arg_type;
5662 abi_long ret;
5663 uint8_t buf_temp[MAX_STRUCT_SIZE];
5664 int target_size;
5665 void *argptr;
5667 ie = ioctl_entries;
5668 for(;;) {
5669 if (ie->target_cmd == 0) {
5670 gemu_log("Unsupported ioctl: cmd=0x%04lx\n", (long)cmd);
5671 return -TARGET_ENOSYS;
5673 if (ie->target_cmd == cmd)
5674 break;
5675 ie++;
5677 arg_type = ie->arg_type;
5678 #if defined(DEBUG)
5679 gemu_log("ioctl: cmd=0x%04lx (%s)\n", (long)cmd, ie->name);
5680 #endif
5681 if (ie->do_ioctl) {
5682 return ie->do_ioctl(ie, buf_temp, fd, cmd, arg);
5683 } else if (!ie->host_cmd) {
5684 /* Some architectures define BSD ioctls in their headers
5685 that are not implemented in Linux. */
5686 return -TARGET_ENOSYS;
5689 switch(arg_type[0]) {
5690 case TYPE_NULL:
5691 /* no argument */
5692 ret = get_errno(safe_ioctl(fd, ie->host_cmd));
5693 break;
5694 case TYPE_PTRVOID:
5695 case TYPE_INT:
5696 ret = get_errno(safe_ioctl(fd, ie->host_cmd, arg));
5697 break;
5698 case TYPE_PTR:
5699 arg_type++;
5700 target_size = thunk_type_size(arg_type, 0);
5701 switch(ie->access) {
5702 case IOC_R:
5703 ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5704 if (!is_error(ret)) {
5705 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
5706 if (!argptr)
5707 return -TARGET_EFAULT;
5708 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
5709 unlock_user(argptr, arg, target_size);
5711 break;
5712 case IOC_W:
5713 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5714 if (!argptr)
5715 return -TARGET_EFAULT;
5716 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
5717 unlock_user(argptr, arg, 0);
5718 ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5719 break;
5720 default:
5721 case IOC_RW:
5722 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5723 if (!argptr)
5724 return -TARGET_EFAULT;
5725 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
5726 unlock_user(argptr, arg, 0);
5727 ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5728 if (!is_error(ret)) {
5729 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
5730 if (!argptr)
5731 return -TARGET_EFAULT;
5732 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
5733 unlock_user(argptr, arg, target_size);
5735 break;
5737 break;
5738 default:
5739 gemu_log("Unsupported ioctl type: cmd=0x%04lx type=%d\n",
5740 (long)cmd, arg_type[0]);
5741 ret = -TARGET_ENOSYS;
5742 break;
5744 return ret;
5747 static const bitmask_transtbl iflag_tbl[] = {
5748 { TARGET_IGNBRK, TARGET_IGNBRK, IGNBRK, IGNBRK },
5749 { TARGET_BRKINT, TARGET_BRKINT, BRKINT, BRKINT },
5750 { TARGET_IGNPAR, TARGET_IGNPAR, IGNPAR, IGNPAR },
5751 { TARGET_PARMRK, TARGET_PARMRK, PARMRK, PARMRK },
5752 { TARGET_INPCK, TARGET_INPCK, INPCK, INPCK },
5753 { TARGET_ISTRIP, TARGET_ISTRIP, ISTRIP, ISTRIP },
5754 { TARGET_INLCR, TARGET_INLCR, INLCR, INLCR },
5755 { TARGET_IGNCR, TARGET_IGNCR, IGNCR, IGNCR },
5756 { TARGET_ICRNL, TARGET_ICRNL, ICRNL, ICRNL },
5757 { TARGET_IUCLC, TARGET_IUCLC, IUCLC, IUCLC },
5758 { TARGET_IXON, TARGET_IXON, IXON, IXON },
5759 { TARGET_IXANY, TARGET_IXANY, IXANY, IXANY },
5760 { TARGET_IXOFF, TARGET_IXOFF, IXOFF, IXOFF },
5761 { TARGET_IMAXBEL, TARGET_IMAXBEL, IMAXBEL, IMAXBEL },
5762 { 0, 0, 0, 0 }
5765 static const bitmask_transtbl oflag_tbl[] = {
5766 { TARGET_OPOST, TARGET_OPOST, OPOST, OPOST },
5767 { TARGET_OLCUC, TARGET_OLCUC, OLCUC, OLCUC },
5768 { TARGET_ONLCR, TARGET_ONLCR, ONLCR, ONLCR },
5769 { TARGET_OCRNL, TARGET_OCRNL, OCRNL, OCRNL },
5770 { TARGET_ONOCR, TARGET_ONOCR, ONOCR, ONOCR },
5771 { TARGET_ONLRET, TARGET_ONLRET, ONLRET, ONLRET },
5772 { TARGET_OFILL, TARGET_OFILL, OFILL, OFILL },
5773 { TARGET_OFDEL, TARGET_OFDEL, OFDEL, OFDEL },
5774 { TARGET_NLDLY, TARGET_NL0, NLDLY, NL0 },
5775 { TARGET_NLDLY, TARGET_NL1, NLDLY, NL1 },
5776 { TARGET_CRDLY, TARGET_CR0, CRDLY, CR0 },
5777 { TARGET_CRDLY, TARGET_CR1, CRDLY, CR1 },
5778 { TARGET_CRDLY, TARGET_CR2, CRDLY, CR2 },
5779 { TARGET_CRDLY, TARGET_CR3, CRDLY, CR3 },
5780 { TARGET_TABDLY, TARGET_TAB0, TABDLY, TAB0 },
5781 { TARGET_TABDLY, TARGET_TAB1, TABDLY, TAB1 },
5782 { TARGET_TABDLY, TARGET_TAB2, TABDLY, TAB2 },
5783 { TARGET_TABDLY, TARGET_TAB3, TABDLY, TAB3 },
5784 { TARGET_BSDLY, TARGET_BS0, BSDLY, BS0 },
5785 { TARGET_BSDLY, TARGET_BS1, BSDLY, BS1 },
5786 { TARGET_VTDLY, TARGET_VT0, VTDLY, VT0 },
5787 { TARGET_VTDLY, TARGET_VT1, VTDLY, VT1 },
5788 { TARGET_FFDLY, TARGET_FF0, FFDLY, FF0 },
5789 { TARGET_FFDLY, TARGET_FF1, FFDLY, FF1 },
5790 { 0, 0, 0, 0 }
5793 static const bitmask_transtbl cflag_tbl[] = {
5794 { TARGET_CBAUD, TARGET_B0, CBAUD, B0 },
5795 { TARGET_CBAUD, TARGET_B50, CBAUD, B50 },
5796 { TARGET_CBAUD, TARGET_B75, CBAUD, B75 },
5797 { TARGET_CBAUD, TARGET_B110, CBAUD, B110 },
5798 { TARGET_CBAUD, TARGET_B134, CBAUD, B134 },
5799 { TARGET_CBAUD, TARGET_B150, CBAUD, B150 },
5800 { TARGET_CBAUD, TARGET_B200, CBAUD, B200 },
5801 { TARGET_CBAUD, TARGET_B300, CBAUD, B300 },
5802 { TARGET_CBAUD, TARGET_B600, CBAUD, B600 },
5803 { TARGET_CBAUD, TARGET_B1200, CBAUD, B1200 },
5804 { TARGET_CBAUD, TARGET_B1800, CBAUD, B1800 },
5805 { TARGET_CBAUD, TARGET_B2400, CBAUD, B2400 },
5806 { TARGET_CBAUD, TARGET_B4800, CBAUD, B4800 },
5807 { TARGET_CBAUD, TARGET_B9600, CBAUD, B9600 },
5808 { TARGET_CBAUD, TARGET_B19200, CBAUD, B19200 },
5809 { TARGET_CBAUD, TARGET_B38400, CBAUD, B38400 },
5810 { TARGET_CBAUD, TARGET_B57600, CBAUD, B57600 },
5811 { TARGET_CBAUD, TARGET_B115200, CBAUD, B115200 },
5812 { TARGET_CBAUD, TARGET_B230400, CBAUD, B230400 },
5813 { TARGET_CBAUD, TARGET_B460800, CBAUD, B460800 },
5814 { TARGET_CSIZE, TARGET_CS5, CSIZE, CS5 },
5815 { TARGET_CSIZE, TARGET_CS6, CSIZE, CS6 },
5816 { TARGET_CSIZE, TARGET_CS7, CSIZE, CS7 },
5817 { TARGET_CSIZE, TARGET_CS8, CSIZE, CS8 },
5818 { TARGET_CSTOPB, TARGET_CSTOPB, CSTOPB, CSTOPB },
5819 { TARGET_CREAD, TARGET_CREAD, CREAD, CREAD },
5820 { TARGET_PARENB, TARGET_PARENB, PARENB, PARENB },
5821 { TARGET_PARODD, TARGET_PARODD, PARODD, PARODD },
5822 { TARGET_HUPCL, TARGET_HUPCL, HUPCL, HUPCL },
5823 { TARGET_CLOCAL, TARGET_CLOCAL, CLOCAL, CLOCAL },
5824 { TARGET_CRTSCTS, TARGET_CRTSCTS, CRTSCTS, CRTSCTS },
5825 { 0, 0, 0, 0 }
5828 static const bitmask_transtbl lflag_tbl[] = {
5829 { TARGET_ISIG, TARGET_ISIG, ISIG, ISIG },
5830 { TARGET_ICANON, TARGET_ICANON, ICANON, ICANON },
5831 { TARGET_XCASE, TARGET_XCASE, XCASE, XCASE },
5832 { TARGET_ECHO, TARGET_ECHO, ECHO, ECHO },
5833 { TARGET_ECHOE, TARGET_ECHOE, ECHOE, ECHOE },
5834 { TARGET_ECHOK, TARGET_ECHOK, ECHOK, ECHOK },
5835 { TARGET_ECHONL, TARGET_ECHONL, ECHONL, ECHONL },
5836 { TARGET_NOFLSH, TARGET_NOFLSH, NOFLSH, NOFLSH },
5837 { TARGET_TOSTOP, TARGET_TOSTOP, TOSTOP, TOSTOP },
5838 { TARGET_ECHOCTL, TARGET_ECHOCTL, ECHOCTL, ECHOCTL },
5839 { TARGET_ECHOPRT, TARGET_ECHOPRT, ECHOPRT, ECHOPRT },
5840 { TARGET_ECHOKE, TARGET_ECHOKE, ECHOKE, ECHOKE },
5841 { TARGET_FLUSHO, TARGET_FLUSHO, FLUSHO, FLUSHO },
5842 { TARGET_PENDIN, TARGET_PENDIN, PENDIN, PENDIN },
5843 { TARGET_IEXTEN, TARGET_IEXTEN, IEXTEN, IEXTEN },
5844 { 0, 0, 0, 0 }
5847 static void target_to_host_termios (void *dst, const void *src)
5849 struct host_termios *host = dst;
5850 const struct target_termios *target = src;
5852 host->c_iflag =
5853 target_to_host_bitmask(tswap32(target->c_iflag), iflag_tbl);
5854 host->c_oflag =
5855 target_to_host_bitmask(tswap32(target->c_oflag), oflag_tbl);
5856 host->c_cflag =
5857 target_to_host_bitmask(tswap32(target->c_cflag), cflag_tbl);
5858 host->c_lflag =
5859 target_to_host_bitmask(tswap32(target->c_lflag), lflag_tbl);
5860 host->c_line = target->c_line;
5862 memset(host->c_cc, 0, sizeof(host->c_cc));
5863 host->c_cc[VINTR] = target->c_cc[TARGET_VINTR];
5864 host->c_cc[VQUIT] = target->c_cc[TARGET_VQUIT];
5865 host->c_cc[VERASE] = target->c_cc[TARGET_VERASE];
5866 host->c_cc[VKILL] = target->c_cc[TARGET_VKILL];
5867 host->c_cc[VEOF] = target->c_cc[TARGET_VEOF];
5868 host->c_cc[VTIME] = target->c_cc[TARGET_VTIME];
5869 host->c_cc[VMIN] = target->c_cc[TARGET_VMIN];
5870 host->c_cc[VSWTC] = target->c_cc[TARGET_VSWTC];
5871 host->c_cc[VSTART] = target->c_cc[TARGET_VSTART];
5872 host->c_cc[VSTOP] = target->c_cc[TARGET_VSTOP];
5873 host->c_cc[VSUSP] = target->c_cc[TARGET_VSUSP];
5874 host->c_cc[VEOL] = target->c_cc[TARGET_VEOL];
5875 host->c_cc[VREPRINT] = target->c_cc[TARGET_VREPRINT];
5876 host->c_cc[VDISCARD] = target->c_cc[TARGET_VDISCARD];
5877 host->c_cc[VWERASE] = target->c_cc[TARGET_VWERASE];
5878 host->c_cc[VLNEXT] = target->c_cc[TARGET_VLNEXT];
5879 host->c_cc[VEOL2] = target->c_cc[TARGET_VEOL2];
5882 static void host_to_target_termios (void *dst, const void *src)
5884 struct target_termios *target = dst;
5885 const struct host_termios *host = src;
5887 target->c_iflag =
5888 tswap32(host_to_target_bitmask(host->c_iflag, iflag_tbl));
5889 target->c_oflag =
5890 tswap32(host_to_target_bitmask(host->c_oflag, oflag_tbl));
5891 target->c_cflag =
5892 tswap32(host_to_target_bitmask(host->c_cflag, cflag_tbl));
5893 target->c_lflag =
5894 tswap32(host_to_target_bitmask(host->c_lflag, lflag_tbl));
5895 target->c_line = host->c_line;
5897 memset(target->c_cc, 0, sizeof(target->c_cc));
5898 target->c_cc[TARGET_VINTR] = host->c_cc[VINTR];
5899 target->c_cc[TARGET_VQUIT] = host->c_cc[VQUIT];
5900 target->c_cc[TARGET_VERASE] = host->c_cc[VERASE];
5901 target->c_cc[TARGET_VKILL] = host->c_cc[VKILL];
5902 target->c_cc[TARGET_VEOF] = host->c_cc[VEOF];
5903 target->c_cc[TARGET_VTIME] = host->c_cc[VTIME];
5904 target->c_cc[TARGET_VMIN] = host->c_cc[VMIN];
5905 target->c_cc[TARGET_VSWTC] = host->c_cc[VSWTC];
5906 target->c_cc[TARGET_VSTART] = host->c_cc[VSTART];
5907 target->c_cc[TARGET_VSTOP] = host->c_cc[VSTOP];
5908 target->c_cc[TARGET_VSUSP] = host->c_cc[VSUSP];
5909 target->c_cc[TARGET_VEOL] = host->c_cc[VEOL];
5910 target->c_cc[TARGET_VREPRINT] = host->c_cc[VREPRINT];
5911 target->c_cc[TARGET_VDISCARD] = host->c_cc[VDISCARD];
5912 target->c_cc[TARGET_VWERASE] = host->c_cc[VWERASE];
5913 target->c_cc[TARGET_VLNEXT] = host->c_cc[VLNEXT];
5914 target->c_cc[TARGET_VEOL2] = host->c_cc[VEOL2];
5917 static const StructEntry struct_termios_def = {
5918 .convert = { host_to_target_termios, target_to_host_termios },
5919 .size = { sizeof(struct target_termios), sizeof(struct host_termios) },
5920 .align = { __alignof__(struct target_termios), __alignof__(struct host_termios) },
5923 static bitmask_transtbl mmap_flags_tbl[] = {
5924 { TARGET_MAP_SHARED, TARGET_MAP_SHARED, MAP_SHARED, MAP_SHARED },
5925 { TARGET_MAP_PRIVATE, TARGET_MAP_PRIVATE, MAP_PRIVATE, MAP_PRIVATE },
5926 { TARGET_MAP_FIXED, TARGET_MAP_FIXED, MAP_FIXED, MAP_FIXED },
5927 { TARGET_MAP_ANONYMOUS, TARGET_MAP_ANONYMOUS,
5928 MAP_ANONYMOUS, MAP_ANONYMOUS },
5929 { TARGET_MAP_GROWSDOWN, TARGET_MAP_GROWSDOWN,
5930 MAP_GROWSDOWN, MAP_GROWSDOWN },
5931 { TARGET_MAP_DENYWRITE, TARGET_MAP_DENYWRITE,
5932 MAP_DENYWRITE, MAP_DENYWRITE },
5933 { TARGET_MAP_EXECUTABLE, TARGET_MAP_EXECUTABLE,
5934 MAP_EXECUTABLE, MAP_EXECUTABLE },
5935 { TARGET_MAP_LOCKED, TARGET_MAP_LOCKED, MAP_LOCKED, MAP_LOCKED },
5936 { TARGET_MAP_NORESERVE, TARGET_MAP_NORESERVE,
5937 MAP_NORESERVE, MAP_NORESERVE },
5938 { TARGET_MAP_HUGETLB, TARGET_MAP_HUGETLB, MAP_HUGETLB, MAP_HUGETLB },
5939 /* MAP_STACK had been ignored by the kernel for quite some time.
5940 Recognize it for the target insofar as we do not want to pass
5941 it through to the host. */
5942 { TARGET_MAP_STACK, TARGET_MAP_STACK, 0, 0 },
5943 { 0, 0, 0, 0 }
5946 #if defined(TARGET_I386)
5948 /* NOTE: there is really one LDT for all the threads */
5949 static uint8_t *ldt_table;
5951 static abi_long read_ldt(abi_ulong ptr, unsigned long bytecount)
5953 int size;
5954 void *p;
5956 if (!ldt_table)
5957 return 0;
5958 size = TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE;
5959 if (size > bytecount)
5960 size = bytecount;
5961 p = lock_user(VERIFY_WRITE, ptr, size, 0);
5962 if (!p)
5963 return -TARGET_EFAULT;
5964 /* ??? Should this by byteswapped? */
5965 memcpy(p, ldt_table, size);
5966 unlock_user(p, ptr, size);
5967 return size;
5970 /* XXX: add locking support */
5971 static abi_long write_ldt(CPUX86State *env,
5972 abi_ulong ptr, unsigned long bytecount, int oldmode)
5974 struct target_modify_ldt_ldt_s ldt_info;
5975 struct target_modify_ldt_ldt_s *target_ldt_info;
5976 int seg_32bit, contents, read_exec_only, limit_in_pages;
5977 int seg_not_present, useable, lm;
5978 uint32_t *lp, entry_1, entry_2;
5980 if (bytecount != sizeof(ldt_info))
5981 return -TARGET_EINVAL;
5982 if (!lock_user_struct(VERIFY_READ, target_ldt_info, ptr, 1))
5983 return -TARGET_EFAULT;
5984 ldt_info.entry_number = tswap32(target_ldt_info->entry_number);
5985 ldt_info.base_addr = tswapal(target_ldt_info->base_addr);
5986 ldt_info.limit = tswap32(target_ldt_info->limit);
5987 ldt_info.flags = tswap32(target_ldt_info->flags);
5988 unlock_user_struct(target_ldt_info, ptr, 0);
5990 if (ldt_info.entry_number >= TARGET_LDT_ENTRIES)
5991 return -TARGET_EINVAL;
5992 seg_32bit = ldt_info.flags & 1;
5993 contents = (ldt_info.flags >> 1) & 3;
5994 read_exec_only = (ldt_info.flags >> 3) & 1;
5995 limit_in_pages = (ldt_info.flags >> 4) & 1;
5996 seg_not_present = (ldt_info.flags >> 5) & 1;
5997 useable = (ldt_info.flags >> 6) & 1;
5998 #ifdef TARGET_ABI32
5999 lm = 0;
6000 #else
6001 lm = (ldt_info.flags >> 7) & 1;
6002 #endif
6003 if (contents == 3) {
6004 if (oldmode)
6005 return -TARGET_EINVAL;
6006 if (seg_not_present == 0)
6007 return -TARGET_EINVAL;
6009 /* allocate the LDT */
6010 if (!ldt_table) {
6011 env->ldt.base = target_mmap(0,
6012 TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE,
6013 PROT_READ|PROT_WRITE,
6014 MAP_ANONYMOUS|MAP_PRIVATE, -1, 0);
6015 if (env->ldt.base == -1)
6016 return -TARGET_ENOMEM;
6017 memset(g2h(env->ldt.base), 0,
6018 TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE);
6019 env->ldt.limit = 0xffff;
6020 ldt_table = g2h(env->ldt.base);
6023 /* NOTE: same code as Linux kernel */
6024 /* Allow LDTs to be cleared by the user. */
6025 if (ldt_info.base_addr == 0 && ldt_info.limit == 0) {
6026 if (oldmode ||
6027 (contents == 0 &&
6028 read_exec_only == 1 &&
6029 seg_32bit == 0 &&
6030 limit_in_pages == 0 &&
6031 seg_not_present == 1 &&
6032 useable == 0 )) {
6033 entry_1 = 0;
6034 entry_2 = 0;
6035 goto install;
6039 entry_1 = ((ldt_info.base_addr & 0x0000ffff) << 16) |
6040 (ldt_info.limit & 0x0ffff);
6041 entry_2 = (ldt_info.base_addr & 0xff000000) |
6042 ((ldt_info.base_addr & 0x00ff0000) >> 16) |
6043 (ldt_info.limit & 0xf0000) |
6044 ((read_exec_only ^ 1) << 9) |
6045 (contents << 10) |
6046 ((seg_not_present ^ 1) << 15) |
6047 (seg_32bit << 22) |
6048 (limit_in_pages << 23) |
6049 (lm << 21) |
6050 0x7000;
6051 if (!oldmode)
6052 entry_2 |= (useable << 20);
6054 /* Install the new entry ... */
6055 install:
6056 lp = (uint32_t *)(ldt_table + (ldt_info.entry_number << 3));
6057 lp[0] = tswap32(entry_1);
6058 lp[1] = tswap32(entry_2);
6059 return 0;
6062 /* specific and weird i386 syscalls */
6063 static abi_long do_modify_ldt(CPUX86State *env, int func, abi_ulong ptr,
6064 unsigned long bytecount)
6066 abi_long ret;
6068 switch (func) {
6069 case 0:
6070 ret = read_ldt(ptr, bytecount);
6071 break;
6072 case 1:
6073 ret = write_ldt(env, ptr, bytecount, 1);
6074 break;
6075 case 0x11:
6076 ret = write_ldt(env, ptr, bytecount, 0);
6077 break;
6078 default:
6079 ret = -TARGET_ENOSYS;
6080 break;
6082 return ret;
6085 #if defined(TARGET_I386) && defined(TARGET_ABI32)
6086 abi_long do_set_thread_area(CPUX86State *env, abi_ulong ptr)
6088 uint64_t *gdt_table = g2h(env->gdt.base);
6089 struct target_modify_ldt_ldt_s ldt_info;
6090 struct target_modify_ldt_ldt_s *target_ldt_info;
6091 int seg_32bit, contents, read_exec_only, limit_in_pages;
6092 int seg_not_present, useable, lm;
6093 uint32_t *lp, entry_1, entry_2;
6094 int i;
6096 lock_user_struct(VERIFY_WRITE, target_ldt_info, ptr, 1);
6097 if (!target_ldt_info)
6098 return -TARGET_EFAULT;
6099 ldt_info.entry_number = tswap32(target_ldt_info->entry_number);
6100 ldt_info.base_addr = tswapal(target_ldt_info->base_addr);
6101 ldt_info.limit = tswap32(target_ldt_info->limit);
6102 ldt_info.flags = tswap32(target_ldt_info->flags);
6103 if (ldt_info.entry_number == -1) {
6104 for (i=TARGET_GDT_ENTRY_TLS_MIN; i<=TARGET_GDT_ENTRY_TLS_MAX; i++) {
6105 if (gdt_table[i] == 0) {
6106 ldt_info.entry_number = i;
6107 target_ldt_info->entry_number = tswap32(i);
6108 break;
6112 unlock_user_struct(target_ldt_info, ptr, 1);
6114 if (ldt_info.entry_number < TARGET_GDT_ENTRY_TLS_MIN ||
6115 ldt_info.entry_number > TARGET_GDT_ENTRY_TLS_MAX)
6116 return -TARGET_EINVAL;
6117 seg_32bit = ldt_info.flags & 1;
6118 contents = (ldt_info.flags >> 1) & 3;
6119 read_exec_only = (ldt_info.flags >> 3) & 1;
6120 limit_in_pages = (ldt_info.flags >> 4) & 1;
6121 seg_not_present = (ldt_info.flags >> 5) & 1;
6122 useable = (ldt_info.flags >> 6) & 1;
6123 #ifdef TARGET_ABI32
6124 lm = 0;
6125 #else
6126 lm = (ldt_info.flags >> 7) & 1;
6127 #endif
6129 if (contents == 3) {
6130 if (seg_not_present == 0)
6131 return -TARGET_EINVAL;
6134 /* NOTE: same code as Linux kernel */
6135 /* Allow LDTs to be cleared by the user. */
6136 if (ldt_info.base_addr == 0 && ldt_info.limit == 0) {
6137 if ((contents == 0 &&
6138 read_exec_only == 1 &&
6139 seg_32bit == 0 &&
6140 limit_in_pages == 0 &&
6141 seg_not_present == 1 &&
6142 useable == 0 )) {
6143 entry_1 = 0;
6144 entry_2 = 0;
6145 goto install;
6149 entry_1 = ((ldt_info.base_addr & 0x0000ffff) << 16) |
6150 (ldt_info.limit & 0x0ffff);
6151 entry_2 = (ldt_info.base_addr & 0xff000000) |
6152 ((ldt_info.base_addr & 0x00ff0000) >> 16) |
6153 (ldt_info.limit & 0xf0000) |
6154 ((read_exec_only ^ 1) << 9) |
6155 (contents << 10) |
6156 ((seg_not_present ^ 1) << 15) |
6157 (seg_32bit << 22) |
6158 (limit_in_pages << 23) |
6159 (useable << 20) |
6160 (lm << 21) |
6161 0x7000;
6163 /* Install the new entry ... */
6164 install:
6165 lp = (uint32_t *)(gdt_table + ldt_info.entry_number);
6166 lp[0] = tswap32(entry_1);
6167 lp[1] = tswap32(entry_2);
6168 return 0;
6171 static abi_long do_get_thread_area(CPUX86State *env, abi_ulong ptr)
6173 struct target_modify_ldt_ldt_s *target_ldt_info;
6174 uint64_t *gdt_table = g2h(env->gdt.base);
6175 uint32_t base_addr, limit, flags;
6176 int seg_32bit, contents, read_exec_only, limit_in_pages, idx;
6177 int seg_not_present, useable, lm;
6178 uint32_t *lp, entry_1, entry_2;
6180 lock_user_struct(VERIFY_WRITE, target_ldt_info, ptr, 1);
6181 if (!target_ldt_info)
6182 return -TARGET_EFAULT;
6183 idx = tswap32(target_ldt_info->entry_number);
6184 if (idx < TARGET_GDT_ENTRY_TLS_MIN ||
6185 idx > TARGET_GDT_ENTRY_TLS_MAX) {
6186 unlock_user_struct(target_ldt_info, ptr, 1);
6187 return -TARGET_EINVAL;
6189 lp = (uint32_t *)(gdt_table + idx);
6190 entry_1 = tswap32(lp[0]);
6191 entry_2 = tswap32(lp[1]);
6193 read_exec_only = ((entry_2 >> 9) & 1) ^ 1;
6194 contents = (entry_2 >> 10) & 3;
6195 seg_not_present = ((entry_2 >> 15) & 1) ^ 1;
6196 seg_32bit = (entry_2 >> 22) & 1;
6197 limit_in_pages = (entry_2 >> 23) & 1;
6198 useable = (entry_2 >> 20) & 1;
6199 #ifdef TARGET_ABI32
6200 lm = 0;
6201 #else
6202 lm = (entry_2 >> 21) & 1;
6203 #endif
6204 flags = (seg_32bit << 0) | (contents << 1) |
6205 (read_exec_only << 3) | (limit_in_pages << 4) |
6206 (seg_not_present << 5) | (useable << 6) | (lm << 7);
6207 limit = (entry_1 & 0xffff) | (entry_2 & 0xf0000);
6208 base_addr = (entry_1 >> 16) |
6209 (entry_2 & 0xff000000) |
6210 ((entry_2 & 0xff) << 16);
6211 target_ldt_info->base_addr = tswapal(base_addr);
6212 target_ldt_info->limit = tswap32(limit);
6213 target_ldt_info->flags = tswap32(flags);
6214 unlock_user_struct(target_ldt_info, ptr, 1);
6215 return 0;
6217 #endif /* TARGET_I386 && TARGET_ABI32 */
6219 #ifndef TARGET_ABI32
6220 abi_long do_arch_prctl(CPUX86State *env, int code, abi_ulong addr)
6222 abi_long ret = 0;
6223 abi_ulong val;
6224 int idx;
6226 switch(code) {
6227 case TARGET_ARCH_SET_GS:
6228 case TARGET_ARCH_SET_FS:
6229 if (code == TARGET_ARCH_SET_GS)
6230 idx = R_GS;
6231 else
6232 idx = R_FS;
6233 cpu_x86_load_seg(env, idx, 0);
6234 env->segs[idx].base = addr;
6235 break;
6236 case TARGET_ARCH_GET_GS:
6237 case TARGET_ARCH_GET_FS:
6238 if (code == TARGET_ARCH_GET_GS)
6239 idx = R_GS;
6240 else
6241 idx = R_FS;
6242 val = env->segs[idx].base;
6243 if (put_user(val, addr, abi_ulong))
6244 ret = -TARGET_EFAULT;
6245 break;
6246 default:
6247 ret = -TARGET_EINVAL;
6248 break;
6250 return ret;
6252 #endif
6254 #endif /* defined(TARGET_I386) */
6256 #define NEW_STACK_SIZE 0x40000
6259 static pthread_mutex_t clone_lock = PTHREAD_MUTEX_INITIALIZER;
6260 typedef struct {
6261 CPUArchState *env;
6262 pthread_mutex_t mutex;
6263 pthread_cond_t cond;
6264 pthread_t thread;
6265 uint32_t tid;
6266 abi_ulong child_tidptr;
6267 abi_ulong parent_tidptr;
6268 sigset_t sigmask;
6269 } new_thread_info;
6271 static void *clone_func(void *arg)
6273 new_thread_info *info = arg;
6274 CPUArchState *env;
6275 CPUState *cpu;
6276 TaskState *ts;
6278 rcu_register_thread();
6279 tcg_register_thread();
6280 env = info->env;
6281 cpu = ENV_GET_CPU(env);
6282 thread_cpu = cpu;
6283 ts = (TaskState *)cpu->opaque;
6284 info->tid = gettid();
6285 task_settid(ts);
6286 if (info->child_tidptr)
6287 put_user_u32(info->tid, info->child_tidptr);
6288 if (info->parent_tidptr)
6289 put_user_u32(info->tid, info->parent_tidptr);
6290 /* Enable signals. */
6291 sigprocmask(SIG_SETMASK, &info->sigmask, NULL);
6292 /* Signal to the parent that we're ready. */
6293 pthread_mutex_lock(&info->mutex);
6294 pthread_cond_broadcast(&info->cond);
6295 pthread_mutex_unlock(&info->mutex);
6296 /* Wait until the parent has finished initializing the tls state. */
6297 pthread_mutex_lock(&clone_lock);
6298 pthread_mutex_unlock(&clone_lock);
6299 cpu_loop(env);
6300 /* never exits */
6301 return NULL;
6304 /* do_fork() Must return host values and target errnos (unlike most
6305 do_*() functions). */
6306 static int do_fork(CPUArchState *env, unsigned int flags, abi_ulong newsp,
6307 abi_ulong parent_tidptr, target_ulong newtls,
6308 abi_ulong child_tidptr)
6310 CPUState *cpu = ENV_GET_CPU(env);
6311 int ret;
6312 TaskState *ts;
6313 CPUState *new_cpu;
6314 CPUArchState *new_env;
6315 sigset_t sigmask;
6317 flags &= ~CLONE_IGNORED_FLAGS;
6319 /* Emulate vfork() with fork() */
6320 if (flags & CLONE_VFORK)
6321 flags &= ~(CLONE_VFORK | CLONE_VM);
6323 if (flags & CLONE_VM) {
6324 TaskState *parent_ts = (TaskState *)cpu->opaque;
6325 new_thread_info info;
6326 pthread_attr_t attr;
6328 if (((flags & CLONE_THREAD_FLAGS) != CLONE_THREAD_FLAGS) ||
6329 (flags & CLONE_INVALID_THREAD_FLAGS)) {
6330 return -TARGET_EINVAL;
6333 ts = g_new0(TaskState, 1);
6334 init_task_state(ts);
6335 /* we create a new CPU instance. */
6336 new_env = cpu_copy(env);
6337 /* Init regs that differ from the parent. */
6338 cpu_clone_regs(new_env, newsp);
6339 new_cpu = ENV_GET_CPU(new_env);
6340 new_cpu->opaque = ts;
6341 ts->bprm = parent_ts->bprm;
6342 ts->info = parent_ts->info;
6343 ts->signal_mask = parent_ts->signal_mask;
6345 if (flags & CLONE_CHILD_CLEARTID) {
6346 ts->child_tidptr = child_tidptr;
6349 if (flags & CLONE_SETTLS) {
6350 cpu_set_tls (new_env, newtls);
6353 /* Grab a mutex so that thread setup appears atomic. */
6354 pthread_mutex_lock(&clone_lock);
6356 memset(&info, 0, sizeof(info));
6357 pthread_mutex_init(&info.mutex, NULL);
6358 pthread_mutex_lock(&info.mutex);
6359 pthread_cond_init(&info.cond, NULL);
6360 info.env = new_env;
6361 if (flags & CLONE_CHILD_SETTID) {
6362 info.child_tidptr = child_tidptr;
6364 if (flags & CLONE_PARENT_SETTID) {
6365 info.parent_tidptr = parent_tidptr;
6368 ret = pthread_attr_init(&attr);
6369 ret = pthread_attr_setstacksize(&attr, NEW_STACK_SIZE);
6370 ret = pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED);
6371 /* It is not safe to deliver signals until the child has finished
6372 initializing, so temporarily block all signals. */
6373 sigfillset(&sigmask);
6374 sigprocmask(SIG_BLOCK, &sigmask, &info.sigmask);
6376 /* If this is our first additional thread, we need to ensure we
6377 * generate code for parallel execution and flush old translations.
6379 if (!parallel_cpus) {
6380 parallel_cpus = true;
6381 tb_flush(cpu);
6384 ret = pthread_create(&info.thread, &attr, clone_func, &info);
6385 /* TODO: Free new CPU state if thread creation failed. */
6387 sigprocmask(SIG_SETMASK, &info.sigmask, NULL);
6388 pthread_attr_destroy(&attr);
6389 if (ret == 0) {
6390 /* Wait for the child to initialize. */
6391 pthread_cond_wait(&info.cond, &info.mutex);
6392 ret = info.tid;
6393 } else {
6394 ret = -1;
6396 pthread_mutex_unlock(&info.mutex);
6397 pthread_cond_destroy(&info.cond);
6398 pthread_mutex_destroy(&info.mutex);
6399 pthread_mutex_unlock(&clone_lock);
6400 } else {
6401 /* if no CLONE_VM, we consider it is a fork */
6402 if (flags & CLONE_INVALID_FORK_FLAGS) {
6403 return -TARGET_EINVAL;
6406 /* We can't support custom termination signals */
6407 if ((flags & CSIGNAL) != TARGET_SIGCHLD) {
6408 return -TARGET_EINVAL;
6411 if (block_signals()) {
6412 return -TARGET_ERESTARTSYS;
6415 fork_start();
6416 ret = fork();
6417 if (ret == 0) {
6418 /* Child Process. */
6419 cpu_clone_regs(env, newsp);
6420 fork_end(1);
6421 /* There is a race condition here. The parent process could
6422 theoretically read the TID in the child process before the child
6423 tid is set. This would require using either ptrace
6424 (not implemented) or having *_tidptr to point at a shared memory
6425 mapping. We can't repeat the spinlock hack used above because
6426 the child process gets its own copy of the lock. */
6427 if (flags & CLONE_CHILD_SETTID)
6428 put_user_u32(gettid(), child_tidptr);
6429 if (flags & CLONE_PARENT_SETTID)
6430 put_user_u32(gettid(), parent_tidptr);
6431 ts = (TaskState *)cpu->opaque;
6432 if (flags & CLONE_SETTLS)
6433 cpu_set_tls (env, newtls);
6434 if (flags & CLONE_CHILD_CLEARTID)
6435 ts->child_tidptr = child_tidptr;
6436 } else {
6437 fork_end(0);
6440 return ret;
6443 /* warning : doesn't handle linux specific flags... */
6444 static int target_to_host_fcntl_cmd(int cmd)
6446 switch(cmd) {
6447 case TARGET_F_DUPFD:
6448 case TARGET_F_GETFD:
6449 case TARGET_F_SETFD:
6450 case TARGET_F_GETFL:
6451 case TARGET_F_SETFL:
6452 return cmd;
6453 case TARGET_F_GETLK:
6454 return F_GETLK64;
6455 case TARGET_F_SETLK:
6456 return F_SETLK64;
6457 case TARGET_F_SETLKW:
6458 return F_SETLKW64;
6459 case TARGET_F_GETOWN:
6460 return F_GETOWN;
6461 case TARGET_F_SETOWN:
6462 return F_SETOWN;
6463 case TARGET_F_GETSIG:
6464 return F_GETSIG;
6465 case TARGET_F_SETSIG:
6466 return F_SETSIG;
6467 #if TARGET_ABI_BITS == 32
6468 case TARGET_F_GETLK64:
6469 return F_GETLK64;
6470 case TARGET_F_SETLK64:
6471 return F_SETLK64;
6472 case TARGET_F_SETLKW64:
6473 return F_SETLKW64;
6474 #endif
6475 case TARGET_F_SETLEASE:
6476 return F_SETLEASE;
6477 case TARGET_F_GETLEASE:
6478 return F_GETLEASE;
6479 #ifdef F_DUPFD_CLOEXEC
6480 case TARGET_F_DUPFD_CLOEXEC:
6481 return F_DUPFD_CLOEXEC;
6482 #endif
6483 case TARGET_F_NOTIFY:
6484 return F_NOTIFY;
6485 #ifdef F_GETOWN_EX
6486 case TARGET_F_GETOWN_EX:
6487 return F_GETOWN_EX;
6488 #endif
6489 #ifdef F_SETOWN_EX
6490 case TARGET_F_SETOWN_EX:
6491 return F_SETOWN_EX;
6492 #endif
6493 #ifdef F_SETPIPE_SZ
6494 case TARGET_F_SETPIPE_SZ:
6495 return F_SETPIPE_SZ;
6496 case TARGET_F_GETPIPE_SZ:
6497 return F_GETPIPE_SZ;
6498 #endif
6499 default:
6500 return -TARGET_EINVAL;
6502 return -TARGET_EINVAL;
6505 #define TRANSTBL_CONVERT(a) { -1, TARGET_##a, -1, a }
6506 static const bitmask_transtbl flock_tbl[] = {
6507 TRANSTBL_CONVERT(F_RDLCK),
6508 TRANSTBL_CONVERT(F_WRLCK),
6509 TRANSTBL_CONVERT(F_UNLCK),
6510 TRANSTBL_CONVERT(F_EXLCK),
6511 TRANSTBL_CONVERT(F_SHLCK),
6512 { 0, 0, 0, 0 }
6515 static inline abi_long copy_from_user_flock(struct flock64 *fl,
6516 abi_ulong target_flock_addr)
6518 struct target_flock *target_fl;
6519 short l_type;
6521 if (!lock_user_struct(VERIFY_READ, target_fl, target_flock_addr, 1)) {
6522 return -TARGET_EFAULT;
6525 __get_user(l_type, &target_fl->l_type);
6526 fl->l_type = target_to_host_bitmask(l_type, flock_tbl);
6527 __get_user(fl->l_whence, &target_fl->l_whence);
6528 __get_user(fl->l_start, &target_fl->l_start);
6529 __get_user(fl->l_len, &target_fl->l_len);
6530 __get_user(fl->l_pid, &target_fl->l_pid);
6531 unlock_user_struct(target_fl, target_flock_addr, 0);
6532 return 0;
6535 static inline abi_long copy_to_user_flock(abi_ulong target_flock_addr,
6536 const struct flock64 *fl)
6538 struct target_flock *target_fl;
6539 short l_type;
6541 if (!lock_user_struct(VERIFY_WRITE, target_fl, target_flock_addr, 0)) {
6542 return -TARGET_EFAULT;
6545 l_type = host_to_target_bitmask(fl->l_type, flock_tbl);
6546 __put_user(l_type, &target_fl->l_type);
6547 __put_user(fl->l_whence, &target_fl->l_whence);
6548 __put_user(fl->l_start, &target_fl->l_start);
6549 __put_user(fl->l_len, &target_fl->l_len);
6550 __put_user(fl->l_pid, &target_fl->l_pid);
6551 unlock_user_struct(target_fl, target_flock_addr, 1);
6552 return 0;
6555 typedef abi_long from_flock64_fn(struct flock64 *fl, abi_ulong target_addr);
6556 typedef abi_long to_flock64_fn(abi_ulong target_addr, const struct flock64 *fl);
6558 #if defined(TARGET_ARM) && TARGET_ABI_BITS == 32
6559 static inline abi_long copy_from_user_eabi_flock64(struct flock64 *fl,
6560 abi_ulong target_flock_addr)
6562 struct target_eabi_flock64 *target_fl;
6563 short l_type;
6565 if (!lock_user_struct(VERIFY_READ, target_fl, target_flock_addr, 1)) {
6566 return -TARGET_EFAULT;
6569 __get_user(l_type, &target_fl->l_type);
6570 fl->l_type = target_to_host_bitmask(l_type, flock_tbl);
6571 __get_user(fl->l_whence, &target_fl->l_whence);
6572 __get_user(fl->l_start, &target_fl->l_start);
6573 __get_user(fl->l_len, &target_fl->l_len);
6574 __get_user(fl->l_pid, &target_fl->l_pid);
6575 unlock_user_struct(target_fl, target_flock_addr, 0);
6576 return 0;
6579 static inline abi_long copy_to_user_eabi_flock64(abi_ulong target_flock_addr,
6580 const struct flock64 *fl)
6582 struct target_eabi_flock64 *target_fl;
6583 short l_type;
6585 if (!lock_user_struct(VERIFY_WRITE, target_fl, target_flock_addr, 0)) {
6586 return -TARGET_EFAULT;
6589 l_type = host_to_target_bitmask(fl->l_type, flock_tbl);
6590 __put_user(l_type, &target_fl->l_type);
6591 __put_user(fl->l_whence, &target_fl->l_whence);
6592 __put_user(fl->l_start, &target_fl->l_start);
6593 __put_user(fl->l_len, &target_fl->l_len);
6594 __put_user(fl->l_pid, &target_fl->l_pid);
6595 unlock_user_struct(target_fl, target_flock_addr, 1);
6596 return 0;
6598 #endif
6600 static inline abi_long copy_from_user_flock64(struct flock64 *fl,
6601 abi_ulong target_flock_addr)
6603 struct target_flock64 *target_fl;
6604 short l_type;
6606 if (!lock_user_struct(VERIFY_READ, target_fl, target_flock_addr, 1)) {
6607 return -TARGET_EFAULT;
6610 __get_user(l_type, &target_fl->l_type);
6611 fl->l_type = target_to_host_bitmask(l_type, flock_tbl);
6612 __get_user(fl->l_whence, &target_fl->l_whence);
6613 __get_user(fl->l_start, &target_fl->l_start);
6614 __get_user(fl->l_len, &target_fl->l_len);
6615 __get_user(fl->l_pid, &target_fl->l_pid);
6616 unlock_user_struct(target_fl, target_flock_addr, 0);
6617 return 0;
6620 static inline abi_long copy_to_user_flock64(abi_ulong target_flock_addr,
6621 const struct flock64 *fl)
6623 struct target_flock64 *target_fl;
6624 short l_type;
6626 if (!lock_user_struct(VERIFY_WRITE, target_fl, target_flock_addr, 0)) {
6627 return -TARGET_EFAULT;
6630 l_type = host_to_target_bitmask(fl->l_type, flock_tbl);
6631 __put_user(l_type, &target_fl->l_type);
6632 __put_user(fl->l_whence, &target_fl->l_whence);
6633 __put_user(fl->l_start, &target_fl->l_start);
6634 __put_user(fl->l_len, &target_fl->l_len);
6635 __put_user(fl->l_pid, &target_fl->l_pid);
6636 unlock_user_struct(target_fl, target_flock_addr, 1);
6637 return 0;
6640 static abi_long do_fcntl(int fd, int cmd, abi_ulong arg)
6642 struct flock64 fl64;
6643 #ifdef F_GETOWN_EX
6644 struct f_owner_ex fox;
6645 struct target_f_owner_ex *target_fox;
6646 #endif
6647 abi_long ret;
6648 int host_cmd = target_to_host_fcntl_cmd(cmd);
6650 if (host_cmd == -TARGET_EINVAL)
6651 return host_cmd;
6653 switch(cmd) {
6654 case TARGET_F_GETLK:
6655 ret = copy_from_user_flock(&fl64, arg);
6656 if (ret) {
6657 return ret;
6659 ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
6660 if (ret == 0) {
6661 ret = copy_to_user_flock(arg, &fl64);
6663 break;
6665 case TARGET_F_SETLK:
6666 case TARGET_F_SETLKW:
6667 ret = copy_from_user_flock(&fl64, arg);
6668 if (ret) {
6669 return ret;
6671 ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
6672 break;
6674 case TARGET_F_GETLK64:
6675 ret = copy_from_user_flock64(&fl64, arg);
6676 if (ret) {
6677 return ret;
6679 ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
6680 if (ret == 0) {
6681 ret = copy_to_user_flock64(arg, &fl64);
6683 break;
6684 case TARGET_F_SETLK64:
6685 case TARGET_F_SETLKW64:
6686 ret = copy_from_user_flock64(&fl64, arg);
6687 if (ret) {
6688 return ret;
6690 ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
6691 break;
6693 case TARGET_F_GETFL:
6694 ret = get_errno(safe_fcntl(fd, host_cmd, arg));
6695 if (ret >= 0) {
6696 ret = host_to_target_bitmask(ret, fcntl_flags_tbl);
6698 break;
6700 case TARGET_F_SETFL:
6701 ret = get_errno(safe_fcntl(fd, host_cmd,
6702 target_to_host_bitmask(arg,
6703 fcntl_flags_tbl)));
6704 break;
6706 #ifdef F_GETOWN_EX
6707 case TARGET_F_GETOWN_EX:
6708 ret = get_errno(safe_fcntl(fd, host_cmd, &fox));
6709 if (ret >= 0) {
6710 if (!lock_user_struct(VERIFY_WRITE, target_fox, arg, 0))
6711 return -TARGET_EFAULT;
6712 target_fox->type = tswap32(fox.type);
6713 target_fox->pid = tswap32(fox.pid);
6714 unlock_user_struct(target_fox, arg, 1);
6716 break;
6717 #endif
6719 #ifdef F_SETOWN_EX
6720 case TARGET_F_SETOWN_EX:
6721 if (!lock_user_struct(VERIFY_READ, target_fox, arg, 1))
6722 return -TARGET_EFAULT;
6723 fox.type = tswap32(target_fox->type);
6724 fox.pid = tswap32(target_fox->pid);
6725 unlock_user_struct(target_fox, arg, 0);
6726 ret = get_errno(safe_fcntl(fd, host_cmd, &fox));
6727 break;
6728 #endif
6730 case TARGET_F_SETOWN:
6731 case TARGET_F_GETOWN:
6732 case TARGET_F_SETSIG:
6733 case TARGET_F_GETSIG:
6734 case TARGET_F_SETLEASE:
6735 case TARGET_F_GETLEASE:
6736 case TARGET_F_SETPIPE_SZ:
6737 case TARGET_F_GETPIPE_SZ:
6738 ret = get_errno(safe_fcntl(fd, host_cmd, arg));
6739 break;
6741 default:
6742 ret = get_errno(safe_fcntl(fd, cmd, arg));
6743 break;
6745 return ret;
6748 #ifdef USE_UID16
6750 static inline int high2lowuid(int uid)
6752 if (uid > 65535)
6753 return 65534;
6754 else
6755 return uid;
6758 static inline int high2lowgid(int gid)
6760 if (gid > 65535)
6761 return 65534;
6762 else
6763 return gid;
6766 static inline int low2highuid(int uid)
6768 if ((int16_t)uid == -1)
6769 return -1;
6770 else
6771 return uid;
6774 static inline int low2highgid(int gid)
6776 if ((int16_t)gid == -1)
6777 return -1;
6778 else
6779 return gid;
6781 static inline int tswapid(int id)
6783 return tswap16(id);
6786 #define put_user_id(x, gaddr) put_user_u16(x, gaddr)
6788 #else /* !USE_UID16 */
6789 static inline int high2lowuid(int uid)
6791 return uid;
6793 static inline int high2lowgid(int gid)
6795 return gid;
6797 static inline int low2highuid(int uid)
6799 return uid;
6801 static inline int low2highgid(int gid)
6803 return gid;
6805 static inline int tswapid(int id)
6807 return tswap32(id);
6810 #define put_user_id(x, gaddr) put_user_u32(x, gaddr)
6812 #endif /* USE_UID16 */
6814 /* We must do direct syscalls for setting UID/GID, because we want to
6815 * implement the Linux system call semantics of "change only for this thread",
6816 * not the libc/POSIX semantics of "change for all threads in process".
6817 * (See http://ewontfix.com/17/ for more details.)
6818 * We use the 32-bit version of the syscalls if present; if it is not
6819 * then either the host architecture supports 32-bit UIDs natively with
6820 * the standard syscall, or the 16-bit UID is the best we can do.
6822 #ifdef __NR_setuid32
6823 #define __NR_sys_setuid __NR_setuid32
6824 #else
6825 #define __NR_sys_setuid __NR_setuid
6826 #endif
6827 #ifdef __NR_setgid32
6828 #define __NR_sys_setgid __NR_setgid32
6829 #else
6830 #define __NR_sys_setgid __NR_setgid
6831 #endif
6832 #ifdef __NR_setresuid32
6833 #define __NR_sys_setresuid __NR_setresuid32
6834 #else
6835 #define __NR_sys_setresuid __NR_setresuid
6836 #endif
6837 #ifdef __NR_setresgid32
6838 #define __NR_sys_setresgid __NR_setresgid32
6839 #else
6840 #define __NR_sys_setresgid __NR_setresgid
6841 #endif
6843 _syscall1(int, sys_setuid, uid_t, uid)
6844 _syscall1(int, sys_setgid, gid_t, gid)
6845 _syscall3(int, sys_setresuid, uid_t, ruid, uid_t, euid, uid_t, suid)
6846 _syscall3(int, sys_setresgid, gid_t, rgid, gid_t, egid, gid_t, sgid)
6848 void syscall_init(void)
6850 IOCTLEntry *ie;
6851 const argtype *arg_type;
6852 int size;
6853 int i;
6855 thunk_init(STRUCT_MAX);
6857 #define STRUCT(name, ...) thunk_register_struct(STRUCT_ ## name, #name, struct_ ## name ## _def);
6858 #define STRUCT_SPECIAL(name) thunk_register_struct_direct(STRUCT_ ## name, #name, &struct_ ## name ## _def);
6859 #include "syscall_types.h"
6860 #undef STRUCT
6861 #undef STRUCT_SPECIAL
6863 /* Build target_to_host_errno_table[] table from
6864 * host_to_target_errno_table[]. */
6865 for (i = 0; i < ERRNO_TABLE_SIZE; i++) {
6866 target_to_host_errno_table[host_to_target_errno_table[i]] = i;
6869 /* we patch the ioctl size if necessary. We rely on the fact that
6870 no ioctl has all the bits at '1' in the size field */
6871 ie = ioctl_entries;
6872 while (ie->target_cmd != 0) {
6873 if (((ie->target_cmd >> TARGET_IOC_SIZESHIFT) & TARGET_IOC_SIZEMASK) ==
6874 TARGET_IOC_SIZEMASK) {
6875 arg_type = ie->arg_type;
6876 if (arg_type[0] != TYPE_PTR) {
6877 fprintf(stderr, "cannot patch size for ioctl 0x%x\n",
6878 ie->target_cmd);
6879 exit(1);
6881 arg_type++;
6882 size = thunk_type_size(arg_type, 0);
6883 ie->target_cmd = (ie->target_cmd &
6884 ~(TARGET_IOC_SIZEMASK << TARGET_IOC_SIZESHIFT)) |
6885 (size << TARGET_IOC_SIZESHIFT);
6888 /* automatic consistency check if same arch */
6889 #if (defined(__i386__) && defined(TARGET_I386) && defined(TARGET_ABI32)) || \
6890 (defined(__x86_64__) && defined(TARGET_X86_64))
6891 if (unlikely(ie->target_cmd != ie->host_cmd)) {
6892 fprintf(stderr, "ERROR: ioctl(%s): target=0x%x host=0x%x\n",
6893 ie->name, ie->target_cmd, ie->host_cmd);
6895 #endif
6896 ie++;
6900 #if TARGET_ABI_BITS == 32
6901 static inline uint64_t target_offset64(uint32_t word0, uint32_t word1)
6903 #ifdef TARGET_WORDS_BIGENDIAN
6904 return ((uint64_t)word0 << 32) | word1;
6905 #else
6906 return ((uint64_t)word1 << 32) | word0;
6907 #endif
6909 #else /* TARGET_ABI_BITS == 32 */
6910 static inline uint64_t target_offset64(uint64_t word0, uint64_t word1)
6912 return word0;
6914 #endif /* TARGET_ABI_BITS != 32 */
6916 #ifdef TARGET_NR_truncate64
6917 static inline abi_long target_truncate64(void *cpu_env, const char *arg1,
6918 abi_long arg2,
6919 abi_long arg3,
6920 abi_long arg4)
6922 if (regpairs_aligned(cpu_env, TARGET_NR_truncate64)) {
6923 arg2 = arg3;
6924 arg3 = arg4;
6926 return get_errno(truncate64(arg1, target_offset64(arg2, arg3)));
6928 #endif
6930 #ifdef TARGET_NR_ftruncate64
6931 static inline abi_long target_ftruncate64(void *cpu_env, abi_long arg1,
6932 abi_long arg2,
6933 abi_long arg3,
6934 abi_long arg4)
6936 if (regpairs_aligned(cpu_env, TARGET_NR_ftruncate64)) {
6937 arg2 = arg3;
6938 arg3 = arg4;
6940 return get_errno(ftruncate64(arg1, target_offset64(arg2, arg3)));
6942 #endif
6944 static inline abi_long target_to_host_timespec(struct timespec *host_ts,
6945 abi_ulong target_addr)
6947 struct target_timespec *target_ts;
6949 if (!lock_user_struct(VERIFY_READ, target_ts, target_addr, 1))
6950 return -TARGET_EFAULT;
6951 __get_user(host_ts->tv_sec, &target_ts->tv_sec);
6952 __get_user(host_ts->tv_nsec, &target_ts->tv_nsec);
6953 unlock_user_struct(target_ts, target_addr, 0);
6954 return 0;
6957 static inline abi_long host_to_target_timespec(abi_ulong target_addr,
6958 struct timespec *host_ts)
6960 struct target_timespec *target_ts;
6962 if (!lock_user_struct(VERIFY_WRITE, target_ts, target_addr, 0))
6963 return -TARGET_EFAULT;
6964 __put_user(host_ts->tv_sec, &target_ts->tv_sec);
6965 __put_user(host_ts->tv_nsec, &target_ts->tv_nsec);
6966 unlock_user_struct(target_ts, target_addr, 1);
6967 return 0;
6970 static inline abi_long target_to_host_itimerspec(struct itimerspec *host_itspec,
6971 abi_ulong target_addr)
6973 struct target_itimerspec *target_itspec;
6975 if (!lock_user_struct(VERIFY_READ, target_itspec, target_addr, 1)) {
6976 return -TARGET_EFAULT;
6979 host_itspec->it_interval.tv_sec =
6980 tswapal(target_itspec->it_interval.tv_sec);
6981 host_itspec->it_interval.tv_nsec =
6982 tswapal(target_itspec->it_interval.tv_nsec);
6983 host_itspec->it_value.tv_sec = tswapal(target_itspec->it_value.tv_sec);
6984 host_itspec->it_value.tv_nsec = tswapal(target_itspec->it_value.tv_nsec);
6986 unlock_user_struct(target_itspec, target_addr, 1);
6987 return 0;
6990 static inline abi_long host_to_target_itimerspec(abi_ulong target_addr,
6991 struct itimerspec *host_its)
6993 struct target_itimerspec *target_itspec;
6995 if (!lock_user_struct(VERIFY_WRITE, target_itspec, target_addr, 0)) {
6996 return -TARGET_EFAULT;
6999 target_itspec->it_interval.tv_sec = tswapal(host_its->it_interval.tv_sec);
7000 target_itspec->it_interval.tv_nsec = tswapal(host_its->it_interval.tv_nsec);
7002 target_itspec->it_value.tv_sec = tswapal(host_its->it_value.tv_sec);
7003 target_itspec->it_value.tv_nsec = tswapal(host_its->it_value.tv_nsec);
7005 unlock_user_struct(target_itspec, target_addr, 0);
7006 return 0;
7009 static inline abi_long target_to_host_timex(struct timex *host_tx,
7010 abi_long target_addr)
7012 struct target_timex *target_tx;
7014 if (!lock_user_struct(VERIFY_READ, target_tx, target_addr, 1)) {
7015 return -TARGET_EFAULT;
7018 __get_user(host_tx->modes, &target_tx->modes);
7019 __get_user(host_tx->offset, &target_tx->offset);
7020 __get_user(host_tx->freq, &target_tx->freq);
7021 __get_user(host_tx->maxerror, &target_tx->maxerror);
7022 __get_user(host_tx->esterror, &target_tx->esterror);
7023 __get_user(host_tx->status, &target_tx->status);
7024 __get_user(host_tx->constant, &target_tx->constant);
7025 __get_user(host_tx->precision, &target_tx->precision);
7026 __get_user(host_tx->tolerance, &target_tx->tolerance);
7027 __get_user(host_tx->time.tv_sec, &target_tx->time.tv_sec);
7028 __get_user(host_tx->time.tv_usec, &target_tx->time.tv_usec);
7029 __get_user(host_tx->tick, &target_tx->tick);
7030 __get_user(host_tx->ppsfreq, &target_tx->ppsfreq);
7031 __get_user(host_tx->jitter, &target_tx->jitter);
7032 __get_user(host_tx->shift, &target_tx->shift);
7033 __get_user(host_tx->stabil, &target_tx->stabil);
7034 __get_user(host_tx->jitcnt, &target_tx->jitcnt);
7035 __get_user(host_tx->calcnt, &target_tx->calcnt);
7036 __get_user(host_tx->errcnt, &target_tx->errcnt);
7037 __get_user(host_tx->stbcnt, &target_tx->stbcnt);
7038 __get_user(host_tx->tai, &target_tx->tai);
7040 unlock_user_struct(target_tx, target_addr, 0);
7041 return 0;
7044 static inline abi_long host_to_target_timex(abi_long target_addr,
7045 struct timex *host_tx)
7047 struct target_timex *target_tx;
7049 if (!lock_user_struct(VERIFY_WRITE, target_tx, target_addr, 0)) {
7050 return -TARGET_EFAULT;
7053 __put_user(host_tx->modes, &target_tx->modes);
7054 __put_user(host_tx->offset, &target_tx->offset);
7055 __put_user(host_tx->freq, &target_tx->freq);
7056 __put_user(host_tx->maxerror, &target_tx->maxerror);
7057 __put_user(host_tx->esterror, &target_tx->esterror);
7058 __put_user(host_tx->status, &target_tx->status);
7059 __put_user(host_tx->constant, &target_tx->constant);
7060 __put_user(host_tx->precision, &target_tx->precision);
7061 __put_user(host_tx->tolerance, &target_tx->tolerance);
7062 __put_user(host_tx->time.tv_sec, &target_tx->time.tv_sec);
7063 __put_user(host_tx->time.tv_usec, &target_tx->time.tv_usec);
7064 __put_user(host_tx->tick, &target_tx->tick);
7065 __put_user(host_tx->ppsfreq, &target_tx->ppsfreq);
7066 __put_user(host_tx->jitter, &target_tx->jitter);
7067 __put_user(host_tx->shift, &target_tx->shift);
7068 __put_user(host_tx->stabil, &target_tx->stabil);
7069 __put_user(host_tx->jitcnt, &target_tx->jitcnt);
7070 __put_user(host_tx->calcnt, &target_tx->calcnt);
7071 __put_user(host_tx->errcnt, &target_tx->errcnt);
7072 __put_user(host_tx->stbcnt, &target_tx->stbcnt);
7073 __put_user(host_tx->tai, &target_tx->tai);
7075 unlock_user_struct(target_tx, target_addr, 1);
7076 return 0;
7080 static inline abi_long target_to_host_sigevent(struct sigevent *host_sevp,
7081 abi_ulong target_addr)
7083 struct target_sigevent *target_sevp;
7085 if (!lock_user_struct(VERIFY_READ, target_sevp, target_addr, 1)) {
7086 return -TARGET_EFAULT;
7089 /* This union is awkward on 64 bit systems because it has a 32 bit
7090 * integer and a pointer in it; we follow the conversion approach
7091 * used for handling sigval types in signal.c so the guest should get
7092 * the correct value back even if we did a 64 bit byteswap and it's
7093 * using the 32 bit integer.
7095 host_sevp->sigev_value.sival_ptr =
7096 (void *)(uintptr_t)tswapal(target_sevp->sigev_value.sival_ptr);
7097 host_sevp->sigev_signo =
7098 target_to_host_signal(tswap32(target_sevp->sigev_signo));
7099 host_sevp->sigev_notify = tswap32(target_sevp->sigev_notify);
7100 host_sevp->_sigev_un._tid = tswap32(target_sevp->_sigev_un._tid);
7102 unlock_user_struct(target_sevp, target_addr, 1);
7103 return 0;
7106 #if defined(TARGET_NR_mlockall)
7107 static inline int target_to_host_mlockall_arg(int arg)
7109 int result = 0;
7111 if (arg & TARGET_MLOCKALL_MCL_CURRENT) {
7112 result |= MCL_CURRENT;
7114 if (arg & TARGET_MLOCKALL_MCL_FUTURE) {
7115 result |= MCL_FUTURE;
7117 return result;
7119 #endif
7121 static inline abi_long host_to_target_stat64(void *cpu_env,
7122 abi_ulong target_addr,
7123 struct stat *host_st)
7125 #if defined(TARGET_ARM) && defined(TARGET_ABI32)
7126 if (((CPUARMState *)cpu_env)->eabi) {
7127 struct target_eabi_stat64 *target_st;
7129 if (!lock_user_struct(VERIFY_WRITE, target_st, target_addr, 0))
7130 return -TARGET_EFAULT;
7131 memset(target_st, 0, sizeof(struct target_eabi_stat64));
7132 __put_user(host_st->st_dev, &target_st->st_dev);
7133 __put_user(host_st->st_ino, &target_st->st_ino);
7134 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
7135 __put_user(host_st->st_ino, &target_st->__st_ino);
7136 #endif
7137 __put_user(host_st->st_mode, &target_st->st_mode);
7138 __put_user(host_st->st_nlink, &target_st->st_nlink);
7139 __put_user(host_st->st_uid, &target_st->st_uid);
7140 __put_user(host_st->st_gid, &target_st->st_gid);
7141 __put_user(host_st->st_rdev, &target_st->st_rdev);
7142 __put_user(host_st->st_size, &target_st->st_size);
7143 __put_user(host_st->st_blksize, &target_st->st_blksize);
7144 __put_user(host_st->st_blocks, &target_st->st_blocks);
7145 __put_user(host_st->st_atime, &target_st->target_st_atime);
7146 __put_user(host_st->st_mtime, &target_st->target_st_mtime);
7147 __put_user(host_st->st_ctime, &target_st->target_st_ctime);
7148 unlock_user_struct(target_st, target_addr, 1);
7149 } else
7150 #endif
7152 #if defined(TARGET_HAS_STRUCT_STAT64)
7153 struct target_stat64 *target_st;
7154 #else
7155 struct target_stat *target_st;
7156 #endif
7158 if (!lock_user_struct(VERIFY_WRITE, target_st, target_addr, 0))
7159 return -TARGET_EFAULT;
7160 memset(target_st, 0, sizeof(*target_st));
7161 __put_user(host_st->st_dev, &target_st->st_dev);
7162 __put_user(host_st->st_ino, &target_st->st_ino);
7163 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
7164 __put_user(host_st->st_ino, &target_st->__st_ino);
7165 #endif
7166 __put_user(host_st->st_mode, &target_st->st_mode);
7167 __put_user(host_st->st_nlink, &target_st->st_nlink);
7168 __put_user(host_st->st_uid, &target_st->st_uid);
7169 __put_user(host_st->st_gid, &target_st->st_gid);
7170 __put_user(host_st->st_rdev, &target_st->st_rdev);
7171 /* XXX: better use of kernel struct */
7172 __put_user(host_st->st_size, &target_st->st_size);
7173 __put_user(host_st->st_blksize, &target_st->st_blksize);
7174 __put_user(host_st->st_blocks, &target_st->st_blocks);
7175 __put_user(host_st->st_atime, &target_st->target_st_atime);
7176 __put_user(host_st->st_mtime, &target_st->target_st_mtime);
7177 __put_user(host_st->st_ctime, &target_st->target_st_ctime);
7178 unlock_user_struct(target_st, target_addr, 1);
7181 return 0;
7184 /* ??? Using host futex calls even when target atomic operations
7185 are not really atomic probably breaks things. However implementing
7186 futexes locally would make futexes shared between multiple processes
7187 tricky. However they're probably useless because guest atomic
7188 operations won't work either. */
7189 static int do_futex(target_ulong uaddr, int op, int val, target_ulong timeout,
7190 target_ulong uaddr2, int val3)
7192 struct timespec ts, *pts;
7193 int base_op;
7195 /* ??? We assume FUTEX_* constants are the same on both host
7196 and target. */
7197 #ifdef FUTEX_CMD_MASK
7198 base_op = op & FUTEX_CMD_MASK;
7199 #else
7200 base_op = op;
7201 #endif
7202 switch (base_op) {
7203 case FUTEX_WAIT:
7204 case FUTEX_WAIT_BITSET:
7205 if (timeout) {
7206 pts = &ts;
7207 target_to_host_timespec(pts, timeout);
7208 } else {
7209 pts = NULL;
7211 return get_errno(safe_futex(g2h(uaddr), op, tswap32(val),
7212 pts, NULL, val3));
7213 case FUTEX_WAKE:
7214 return get_errno(safe_futex(g2h(uaddr), op, val, NULL, NULL, 0));
7215 case FUTEX_FD:
7216 return get_errno(safe_futex(g2h(uaddr), op, val, NULL, NULL, 0));
7217 case FUTEX_REQUEUE:
7218 case FUTEX_CMP_REQUEUE:
7219 case FUTEX_WAKE_OP:
7220 /* For FUTEX_REQUEUE, FUTEX_CMP_REQUEUE, and FUTEX_WAKE_OP, the
7221 TIMEOUT parameter is interpreted as a uint32_t by the kernel.
7222 But the prototype takes a `struct timespec *'; insert casts
7223 to satisfy the compiler. We do not need to tswap TIMEOUT
7224 since it's not compared to guest memory. */
7225 pts = (struct timespec *)(uintptr_t) timeout;
7226 return get_errno(safe_futex(g2h(uaddr), op, val, pts,
7227 g2h(uaddr2),
7228 (base_op == FUTEX_CMP_REQUEUE
7229 ? tswap32(val3)
7230 : val3)));
7231 default:
7232 return -TARGET_ENOSYS;
7235 #if defined(TARGET_NR_name_to_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
7236 static abi_long do_name_to_handle_at(abi_long dirfd, abi_long pathname,
7237 abi_long handle, abi_long mount_id,
7238 abi_long flags)
7240 struct file_handle *target_fh;
7241 struct file_handle *fh;
7242 int mid = 0;
7243 abi_long ret;
7244 char *name;
7245 unsigned int size, total_size;
7247 if (get_user_s32(size, handle)) {
7248 return -TARGET_EFAULT;
7251 name = lock_user_string(pathname);
7252 if (!name) {
7253 return -TARGET_EFAULT;
7256 total_size = sizeof(struct file_handle) + size;
7257 target_fh = lock_user(VERIFY_WRITE, handle, total_size, 0);
7258 if (!target_fh) {
7259 unlock_user(name, pathname, 0);
7260 return -TARGET_EFAULT;
7263 fh = g_malloc0(total_size);
7264 fh->handle_bytes = size;
7266 ret = get_errno(name_to_handle_at(dirfd, path(name), fh, &mid, flags));
7267 unlock_user(name, pathname, 0);
7269 /* man name_to_handle_at(2):
7270 * Other than the use of the handle_bytes field, the caller should treat
7271 * the file_handle structure as an opaque data type
7274 memcpy(target_fh, fh, total_size);
7275 target_fh->handle_bytes = tswap32(fh->handle_bytes);
7276 target_fh->handle_type = tswap32(fh->handle_type);
7277 g_free(fh);
7278 unlock_user(target_fh, handle, total_size);
7280 if (put_user_s32(mid, mount_id)) {
7281 return -TARGET_EFAULT;
7284 return ret;
7287 #endif
7289 #if defined(TARGET_NR_open_by_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
7290 static abi_long do_open_by_handle_at(abi_long mount_fd, abi_long handle,
7291 abi_long flags)
7293 struct file_handle *target_fh;
7294 struct file_handle *fh;
7295 unsigned int size, total_size;
7296 abi_long ret;
7298 if (get_user_s32(size, handle)) {
7299 return -TARGET_EFAULT;
7302 total_size = sizeof(struct file_handle) + size;
7303 target_fh = lock_user(VERIFY_READ, handle, total_size, 1);
7304 if (!target_fh) {
7305 return -TARGET_EFAULT;
7308 fh = g_memdup(target_fh, total_size);
7309 fh->handle_bytes = size;
7310 fh->handle_type = tswap32(target_fh->handle_type);
7312 ret = get_errno(open_by_handle_at(mount_fd, fh,
7313 target_to_host_bitmask(flags, fcntl_flags_tbl)));
7315 g_free(fh);
7317 unlock_user(target_fh, handle, total_size);
7319 return ret;
7321 #endif
7323 #if defined(TARGET_NR_signalfd) || defined(TARGET_NR_signalfd4)
7325 /* signalfd siginfo conversion */
7327 static void
7328 host_to_target_signalfd_siginfo(struct signalfd_siginfo *tinfo,
7329 const struct signalfd_siginfo *info)
7331 int sig = host_to_target_signal(info->ssi_signo);
7333 /* linux/signalfd.h defines a ssi_addr_lsb
7334 * not defined in sys/signalfd.h but used by some kernels
7337 #ifdef BUS_MCEERR_AO
7338 if (tinfo->ssi_signo == SIGBUS &&
7339 (tinfo->ssi_code == BUS_MCEERR_AR ||
7340 tinfo->ssi_code == BUS_MCEERR_AO)) {
7341 uint16_t *ssi_addr_lsb = (uint16_t *)(&info->ssi_addr + 1);
7342 uint16_t *tssi_addr_lsb = (uint16_t *)(&tinfo->ssi_addr + 1);
7343 *tssi_addr_lsb = tswap16(*ssi_addr_lsb);
7345 #endif
7347 tinfo->ssi_signo = tswap32(sig);
7348 tinfo->ssi_errno = tswap32(tinfo->ssi_errno);
7349 tinfo->ssi_code = tswap32(info->ssi_code);
7350 tinfo->ssi_pid = tswap32(info->ssi_pid);
7351 tinfo->ssi_uid = tswap32(info->ssi_uid);
7352 tinfo->ssi_fd = tswap32(info->ssi_fd);
7353 tinfo->ssi_tid = tswap32(info->ssi_tid);
7354 tinfo->ssi_band = tswap32(info->ssi_band);
7355 tinfo->ssi_overrun = tswap32(info->ssi_overrun);
7356 tinfo->ssi_trapno = tswap32(info->ssi_trapno);
7357 tinfo->ssi_status = tswap32(info->ssi_status);
7358 tinfo->ssi_int = tswap32(info->ssi_int);
7359 tinfo->ssi_ptr = tswap64(info->ssi_ptr);
7360 tinfo->ssi_utime = tswap64(info->ssi_utime);
7361 tinfo->ssi_stime = tswap64(info->ssi_stime);
7362 tinfo->ssi_addr = tswap64(info->ssi_addr);
7365 static abi_long host_to_target_data_signalfd(void *buf, size_t len)
7367 int i;
7369 for (i = 0; i < len; i += sizeof(struct signalfd_siginfo)) {
7370 host_to_target_signalfd_siginfo(buf + i, buf + i);
7373 return len;
7376 static TargetFdTrans target_signalfd_trans = {
7377 .host_to_target_data = host_to_target_data_signalfd,
7380 static abi_long do_signalfd4(int fd, abi_long mask, int flags)
7382 int host_flags;
7383 target_sigset_t *target_mask;
7384 sigset_t host_mask;
7385 abi_long ret;
7387 if (flags & ~(TARGET_O_NONBLOCK | TARGET_O_CLOEXEC)) {
7388 return -TARGET_EINVAL;
7390 if (!lock_user_struct(VERIFY_READ, target_mask, mask, 1)) {
7391 return -TARGET_EFAULT;
7394 target_to_host_sigset(&host_mask, target_mask);
7396 host_flags = target_to_host_bitmask(flags, fcntl_flags_tbl);
7398 ret = get_errno(signalfd(fd, &host_mask, host_flags));
7399 if (ret >= 0) {
7400 fd_trans_register(ret, &target_signalfd_trans);
7403 unlock_user_struct(target_mask, mask, 0);
7405 return ret;
7407 #endif
7409 /* Map host to target signal numbers for the wait family of syscalls.
7410 Assume all other status bits are the same. */
7411 int host_to_target_waitstatus(int status)
7413 if (WIFSIGNALED(status)) {
7414 return host_to_target_signal(WTERMSIG(status)) | (status & ~0x7f);
7416 if (WIFSTOPPED(status)) {
7417 return (host_to_target_signal(WSTOPSIG(status)) << 8)
7418 | (status & 0xff);
7420 return status;
7423 static int open_self_cmdline(void *cpu_env, int fd)
7425 CPUState *cpu = ENV_GET_CPU((CPUArchState *)cpu_env);
7426 struct linux_binprm *bprm = ((TaskState *)cpu->opaque)->bprm;
7427 int i;
7429 for (i = 0; i < bprm->argc; i++) {
7430 size_t len = strlen(bprm->argv[i]) + 1;
7432 if (write(fd, bprm->argv[i], len) != len) {
7433 return -1;
7437 return 0;
7440 static int open_self_maps(void *cpu_env, int fd)
7442 CPUState *cpu = ENV_GET_CPU((CPUArchState *)cpu_env);
7443 TaskState *ts = cpu->opaque;
7444 FILE *fp;
7445 char *line = NULL;
7446 size_t len = 0;
7447 ssize_t read;
7449 fp = fopen("/proc/self/maps", "r");
7450 if (fp == NULL) {
7451 return -1;
7454 while ((read = getline(&line, &len, fp)) != -1) {
7455 int fields, dev_maj, dev_min, inode;
7456 uint64_t min, max, offset;
7457 char flag_r, flag_w, flag_x, flag_p;
7458 char path[512] = "";
7459 fields = sscanf(line, "%"PRIx64"-%"PRIx64" %c%c%c%c %"PRIx64" %x:%x %d"
7460 " %512s", &min, &max, &flag_r, &flag_w, &flag_x,
7461 &flag_p, &offset, &dev_maj, &dev_min, &inode, path);
7463 if ((fields < 10) || (fields > 11)) {
7464 continue;
7466 if (h2g_valid(min)) {
7467 int flags = page_get_flags(h2g(min));
7468 max = h2g_valid(max - 1) ? max : (uintptr_t)g2h(GUEST_ADDR_MAX);
7469 if (page_check_range(h2g(min), max - min, flags) == -1) {
7470 continue;
7472 if (h2g(min) == ts->info->stack_limit) {
7473 pstrcpy(path, sizeof(path), " [stack]");
7475 dprintf(fd, TARGET_ABI_FMT_lx "-" TARGET_ABI_FMT_lx
7476 " %c%c%c%c %08" PRIx64 " %02x:%02x %d %s%s\n",
7477 h2g(min), h2g(max - 1) + 1, flag_r, flag_w,
7478 flag_x, flag_p, offset, dev_maj, dev_min, inode,
7479 path[0] ? " " : "", path);
7483 free(line);
7484 fclose(fp);
7486 return 0;
7489 static int open_self_stat(void *cpu_env, int fd)
7491 CPUState *cpu = ENV_GET_CPU((CPUArchState *)cpu_env);
7492 TaskState *ts = cpu->opaque;
7493 abi_ulong start_stack = ts->info->start_stack;
7494 int i;
7496 for (i = 0; i < 44; i++) {
7497 char buf[128];
7498 int len;
7499 uint64_t val = 0;
7501 if (i == 0) {
7502 /* pid */
7503 val = getpid();
7504 snprintf(buf, sizeof(buf), "%"PRId64 " ", val);
7505 } else if (i == 1) {
7506 /* app name */
7507 snprintf(buf, sizeof(buf), "(%s) ", ts->bprm->argv[0]);
7508 } else if (i == 27) {
7509 /* stack bottom */
7510 val = start_stack;
7511 snprintf(buf, sizeof(buf), "%"PRId64 " ", val);
7512 } else {
7513 /* for the rest, there is MasterCard */
7514 snprintf(buf, sizeof(buf), "0%c", i == 43 ? '\n' : ' ');
7517 len = strlen(buf);
7518 if (write(fd, buf, len) != len) {
7519 return -1;
7523 return 0;
7526 static int open_self_auxv(void *cpu_env, int fd)
7528 CPUState *cpu = ENV_GET_CPU((CPUArchState *)cpu_env);
7529 TaskState *ts = cpu->opaque;
7530 abi_ulong auxv = ts->info->saved_auxv;
7531 abi_ulong len = ts->info->auxv_len;
7532 char *ptr;
7535 * Auxiliary vector is stored in target process stack.
7536 * read in whole auxv vector and copy it to file
7538 ptr = lock_user(VERIFY_READ, auxv, len, 0);
7539 if (ptr != NULL) {
7540 while (len > 0) {
7541 ssize_t r;
7542 r = write(fd, ptr, len);
7543 if (r <= 0) {
7544 break;
7546 len -= r;
7547 ptr += r;
7549 lseek(fd, 0, SEEK_SET);
7550 unlock_user(ptr, auxv, len);
7553 return 0;
7556 static int is_proc_myself(const char *filename, const char *entry)
7558 if (!strncmp(filename, "/proc/", strlen("/proc/"))) {
7559 filename += strlen("/proc/");
7560 if (!strncmp(filename, "self/", strlen("self/"))) {
7561 filename += strlen("self/");
7562 } else if (*filename >= '1' && *filename <= '9') {
7563 char myself[80];
7564 snprintf(myself, sizeof(myself), "%d/", getpid());
7565 if (!strncmp(filename, myself, strlen(myself))) {
7566 filename += strlen(myself);
7567 } else {
7568 return 0;
7570 } else {
7571 return 0;
7573 if (!strcmp(filename, entry)) {
7574 return 1;
7577 return 0;
7580 #if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN)
7581 static int is_proc(const char *filename, const char *entry)
7583 return strcmp(filename, entry) == 0;
7586 static int open_net_route(void *cpu_env, int fd)
7588 FILE *fp;
7589 char *line = NULL;
7590 size_t len = 0;
7591 ssize_t read;
7593 fp = fopen("/proc/net/route", "r");
7594 if (fp == NULL) {
7595 return -1;
7598 /* read header */
7600 read = getline(&line, &len, fp);
7601 dprintf(fd, "%s", line);
7603 /* read routes */
7605 while ((read = getline(&line, &len, fp)) != -1) {
7606 char iface[16];
7607 uint32_t dest, gw, mask;
7608 unsigned int flags, refcnt, use, metric, mtu, window, irtt;
7609 sscanf(line, "%s\t%08x\t%08x\t%04x\t%d\t%d\t%d\t%08x\t%d\t%u\t%u\n",
7610 iface, &dest, &gw, &flags, &refcnt, &use, &metric,
7611 &mask, &mtu, &window, &irtt);
7612 dprintf(fd, "%s\t%08x\t%08x\t%04x\t%d\t%d\t%d\t%08x\t%d\t%u\t%u\n",
7613 iface, tswap32(dest), tswap32(gw), flags, refcnt, use,
7614 metric, tswap32(mask), mtu, window, irtt);
7617 free(line);
7618 fclose(fp);
7620 return 0;
7622 #endif
7624 static int do_openat(void *cpu_env, int dirfd, const char *pathname, int flags, mode_t mode)
7626 struct fake_open {
7627 const char *filename;
7628 int (*fill)(void *cpu_env, int fd);
7629 int (*cmp)(const char *s1, const char *s2);
7631 const struct fake_open *fake_open;
7632 static const struct fake_open fakes[] = {
7633 { "maps", open_self_maps, is_proc_myself },
7634 { "stat", open_self_stat, is_proc_myself },
7635 { "auxv", open_self_auxv, is_proc_myself },
7636 { "cmdline", open_self_cmdline, is_proc_myself },
7637 #if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN)
7638 { "/proc/net/route", open_net_route, is_proc },
7639 #endif
7640 { NULL, NULL, NULL }
7643 if (is_proc_myself(pathname, "exe")) {
7644 int execfd = qemu_getauxval(AT_EXECFD);
7645 return execfd ? execfd : safe_openat(dirfd, exec_path, flags, mode);
7648 for (fake_open = fakes; fake_open->filename; fake_open++) {
7649 if (fake_open->cmp(pathname, fake_open->filename)) {
7650 break;
7654 if (fake_open->filename) {
7655 const char *tmpdir;
7656 char filename[PATH_MAX];
7657 int fd, r;
7659 /* create temporary file to map stat to */
7660 tmpdir = getenv("TMPDIR");
7661 if (!tmpdir)
7662 tmpdir = "/tmp";
7663 snprintf(filename, sizeof(filename), "%s/qemu-open.XXXXXX", tmpdir);
7664 fd = mkstemp(filename);
7665 if (fd < 0) {
7666 return fd;
7668 unlink(filename);
7670 if ((r = fake_open->fill(cpu_env, fd))) {
7671 int e = errno;
7672 close(fd);
7673 errno = e;
7674 return r;
7676 lseek(fd, 0, SEEK_SET);
7678 return fd;
7681 return safe_openat(dirfd, path(pathname), flags, mode);
7684 #define TIMER_MAGIC 0x0caf0000
7685 #define TIMER_MAGIC_MASK 0xffff0000
7687 /* Convert QEMU provided timer ID back to internal 16bit index format */
7688 static target_timer_t get_timer_id(abi_long arg)
7690 target_timer_t timerid = arg;
7692 if ((timerid & TIMER_MAGIC_MASK) != TIMER_MAGIC) {
7693 return -TARGET_EINVAL;
7696 timerid &= 0xffff;
7698 if (timerid >= ARRAY_SIZE(g_posix_timers)) {
7699 return -TARGET_EINVAL;
7702 return timerid;
7705 static abi_long swap_data_eventfd(void *buf, size_t len)
7707 uint64_t *counter = buf;
7708 int i;
7710 if (len < sizeof(uint64_t)) {
7711 return -EINVAL;
7714 for (i = 0; i < len; i += sizeof(uint64_t)) {
7715 *counter = tswap64(*counter);
7716 counter++;
7719 return len;
7722 static TargetFdTrans target_eventfd_trans = {
7723 .host_to_target_data = swap_data_eventfd,
7724 .target_to_host_data = swap_data_eventfd,
7727 #if (defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init)) || \
7728 (defined(CONFIG_INOTIFY1) && defined(TARGET_NR_inotify_init1) && \
7729 defined(__NR_inotify_init1))
7730 static abi_long host_to_target_data_inotify(void *buf, size_t len)
7732 struct inotify_event *ev;
7733 int i;
7734 uint32_t name_len;
7736 for (i = 0; i < len; i += sizeof(struct inotify_event) + name_len) {
7737 ev = (struct inotify_event *)((char *)buf + i);
7738 name_len = ev->len;
7740 ev->wd = tswap32(ev->wd);
7741 ev->mask = tswap32(ev->mask);
7742 ev->cookie = tswap32(ev->cookie);
7743 ev->len = tswap32(name_len);
7746 return len;
7749 static TargetFdTrans target_inotify_trans = {
7750 .host_to_target_data = host_to_target_data_inotify,
7752 #endif
7754 static int target_to_host_cpu_mask(unsigned long *host_mask,
7755 size_t host_size,
7756 abi_ulong target_addr,
7757 size_t target_size)
7759 unsigned target_bits = sizeof(abi_ulong) * 8;
7760 unsigned host_bits = sizeof(*host_mask) * 8;
7761 abi_ulong *target_mask;
7762 unsigned i, j;
7764 assert(host_size >= target_size);
7766 target_mask = lock_user(VERIFY_READ, target_addr, target_size, 1);
7767 if (!target_mask) {
7768 return -TARGET_EFAULT;
7770 memset(host_mask, 0, host_size);
7772 for (i = 0 ; i < target_size / sizeof(abi_ulong); i++) {
7773 unsigned bit = i * target_bits;
7774 abi_ulong val;
7776 __get_user(val, &target_mask[i]);
7777 for (j = 0; j < target_bits; j++, bit++) {
7778 if (val & (1UL << j)) {
7779 host_mask[bit / host_bits] |= 1UL << (bit % host_bits);
7784 unlock_user(target_mask, target_addr, 0);
7785 return 0;
7788 static int host_to_target_cpu_mask(const unsigned long *host_mask,
7789 size_t host_size,
7790 abi_ulong target_addr,
7791 size_t target_size)
7793 unsigned target_bits = sizeof(abi_ulong) * 8;
7794 unsigned host_bits = sizeof(*host_mask) * 8;
7795 abi_ulong *target_mask;
7796 unsigned i, j;
7798 assert(host_size >= target_size);
7800 target_mask = lock_user(VERIFY_WRITE, target_addr, target_size, 0);
7801 if (!target_mask) {
7802 return -TARGET_EFAULT;
7805 for (i = 0 ; i < target_size / sizeof(abi_ulong); i++) {
7806 unsigned bit = i * target_bits;
7807 abi_ulong val = 0;
7809 for (j = 0; j < target_bits; j++, bit++) {
7810 if (host_mask[bit / host_bits] & (1UL << (bit % host_bits))) {
7811 val |= 1UL << j;
7814 __put_user(val, &target_mask[i]);
7817 unlock_user(target_mask, target_addr, target_size);
7818 return 0;
7821 /* do_syscall() should always have a single exit point at the end so
7822 that actions, such as logging of syscall results, can be performed.
7823 All errnos that do_syscall() returns must be -TARGET_<errcode>. */
7824 abi_long do_syscall(void *cpu_env, int num, abi_long arg1,
7825 abi_long arg2, abi_long arg3, abi_long arg4,
7826 abi_long arg5, abi_long arg6, abi_long arg7,
7827 abi_long arg8)
7829 CPUState *cpu = ENV_GET_CPU(cpu_env);
7830 abi_long ret;
7831 struct stat st;
7832 struct statfs stfs;
7833 void *p;
7835 #if defined(DEBUG_ERESTARTSYS)
7836 /* Debug-only code for exercising the syscall-restart code paths
7837 * in the per-architecture cpu main loops: restart every syscall
7838 * the guest makes once before letting it through.
7841 static int flag;
7843 flag = !flag;
7844 if (flag) {
7845 return -TARGET_ERESTARTSYS;
7848 #endif
7850 #ifdef DEBUG
7851 gemu_log("syscall %d", num);
7852 #endif
7853 trace_guest_user_syscall(cpu, num, arg1, arg2, arg3, arg4, arg5, arg6, arg7, arg8);
7854 if(do_strace)
7855 print_syscall(num, arg1, arg2, arg3, arg4, arg5, arg6);
7857 switch(num) {
7858 case TARGET_NR_exit:
7859 /* In old applications this may be used to implement _exit(2).
7860 However in threaded applictions it is used for thread termination,
7861 and _exit_group is used for application termination.
7862 Do thread termination if we have more then one thread. */
7864 if (block_signals()) {
7865 ret = -TARGET_ERESTARTSYS;
7866 break;
7869 cpu_list_lock();
7871 if (CPU_NEXT(first_cpu)) {
7872 TaskState *ts;
7874 /* Remove the CPU from the list. */
7875 QTAILQ_REMOVE(&cpus, cpu, node);
7877 cpu_list_unlock();
7879 ts = cpu->opaque;
7880 if (ts->child_tidptr) {
7881 put_user_u32(0, ts->child_tidptr);
7882 sys_futex(g2h(ts->child_tidptr), FUTEX_WAKE, INT_MAX,
7883 NULL, NULL, 0);
7885 thread_cpu = NULL;
7886 object_unref(OBJECT(cpu));
7887 g_free(ts);
7888 rcu_unregister_thread();
7889 pthread_exit(NULL);
7892 cpu_list_unlock();
7893 #ifdef TARGET_GPROF
7894 _mcleanup();
7895 #endif
7896 gdb_exit(cpu_env, arg1);
7897 _exit(arg1);
7898 ret = 0; /* avoid warning */
7899 break;
7900 case TARGET_NR_read:
7901 if (arg3 == 0)
7902 ret = 0;
7903 else {
7904 if (!(p = lock_user(VERIFY_WRITE, arg2, arg3, 0)))
7905 goto efault;
7906 ret = get_errno(safe_read(arg1, p, arg3));
7907 if (ret >= 0 &&
7908 fd_trans_host_to_target_data(arg1)) {
7909 ret = fd_trans_host_to_target_data(arg1)(p, ret);
7911 unlock_user(p, arg2, ret);
7913 break;
7914 case TARGET_NR_write:
7915 if (!(p = lock_user(VERIFY_READ, arg2, arg3, 1)))
7916 goto efault;
7917 if (fd_trans_target_to_host_data(arg1)) {
7918 void *copy = g_malloc(arg3);
7919 memcpy(copy, p, arg3);
7920 ret = fd_trans_target_to_host_data(arg1)(copy, arg3);
7921 if (ret >= 0) {
7922 ret = get_errno(safe_write(arg1, copy, ret));
7924 g_free(copy);
7925 } else {
7926 ret = get_errno(safe_write(arg1, p, arg3));
7928 unlock_user(p, arg2, 0);
7929 break;
7930 #ifdef TARGET_NR_open
7931 case TARGET_NR_open:
7932 if (!(p = lock_user_string(arg1)))
7933 goto efault;
7934 ret = get_errno(do_openat(cpu_env, AT_FDCWD, p,
7935 target_to_host_bitmask(arg2, fcntl_flags_tbl),
7936 arg3));
7937 fd_trans_unregister(ret);
7938 unlock_user(p, arg1, 0);
7939 break;
7940 #endif
7941 case TARGET_NR_openat:
7942 if (!(p = lock_user_string(arg2)))
7943 goto efault;
7944 ret = get_errno(do_openat(cpu_env, arg1, p,
7945 target_to_host_bitmask(arg3, fcntl_flags_tbl),
7946 arg4));
7947 fd_trans_unregister(ret);
7948 unlock_user(p, arg2, 0);
7949 break;
7950 #if defined(TARGET_NR_name_to_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
7951 case TARGET_NR_name_to_handle_at:
7952 ret = do_name_to_handle_at(arg1, arg2, arg3, arg4, arg5);
7953 break;
7954 #endif
7955 #if defined(TARGET_NR_open_by_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
7956 case TARGET_NR_open_by_handle_at:
7957 ret = do_open_by_handle_at(arg1, arg2, arg3);
7958 fd_trans_unregister(ret);
7959 break;
7960 #endif
7961 case TARGET_NR_close:
7962 fd_trans_unregister(arg1);
7963 ret = get_errno(close(arg1));
7964 break;
7965 case TARGET_NR_brk:
7966 ret = do_brk(arg1);
7967 break;
7968 #ifdef TARGET_NR_fork
7969 case TARGET_NR_fork:
7970 ret = get_errno(do_fork(cpu_env, TARGET_SIGCHLD, 0, 0, 0, 0));
7971 break;
7972 #endif
7973 #ifdef TARGET_NR_waitpid
7974 case TARGET_NR_waitpid:
7976 int status;
7977 ret = get_errno(safe_wait4(arg1, &status, arg3, 0));
7978 if (!is_error(ret) && arg2 && ret
7979 && put_user_s32(host_to_target_waitstatus(status), arg2))
7980 goto efault;
7982 break;
7983 #endif
7984 #ifdef TARGET_NR_waitid
7985 case TARGET_NR_waitid:
7987 siginfo_t info;
7988 info.si_pid = 0;
7989 ret = get_errno(safe_waitid(arg1, arg2, &info, arg4, NULL));
7990 if (!is_error(ret) && arg3 && info.si_pid != 0) {
7991 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_siginfo_t), 0)))
7992 goto efault;
7993 host_to_target_siginfo(p, &info);
7994 unlock_user(p, arg3, sizeof(target_siginfo_t));
7997 break;
7998 #endif
7999 #ifdef TARGET_NR_creat /* not on alpha */
8000 case TARGET_NR_creat:
8001 if (!(p = lock_user_string(arg1)))
8002 goto efault;
8003 ret = get_errno(creat(p, arg2));
8004 fd_trans_unregister(ret);
8005 unlock_user(p, arg1, 0);
8006 break;
8007 #endif
8008 #ifdef TARGET_NR_link
8009 case TARGET_NR_link:
8011 void * p2;
8012 p = lock_user_string(arg1);
8013 p2 = lock_user_string(arg2);
8014 if (!p || !p2)
8015 ret = -TARGET_EFAULT;
8016 else
8017 ret = get_errno(link(p, p2));
8018 unlock_user(p2, arg2, 0);
8019 unlock_user(p, arg1, 0);
8021 break;
8022 #endif
8023 #if defined(TARGET_NR_linkat)
8024 case TARGET_NR_linkat:
8026 void * p2 = NULL;
8027 if (!arg2 || !arg4)
8028 goto efault;
8029 p = lock_user_string(arg2);
8030 p2 = lock_user_string(arg4);
8031 if (!p || !p2)
8032 ret = -TARGET_EFAULT;
8033 else
8034 ret = get_errno(linkat(arg1, p, arg3, p2, arg5));
8035 unlock_user(p, arg2, 0);
8036 unlock_user(p2, arg4, 0);
8038 break;
8039 #endif
8040 #ifdef TARGET_NR_unlink
8041 case TARGET_NR_unlink:
8042 if (!(p = lock_user_string(arg1)))
8043 goto efault;
8044 ret = get_errno(unlink(p));
8045 unlock_user(p, arg1, 0);
8046 break;
8047 #endif
8048 #if defined(TARGET_NR_unlinkat)
8049 case TARGET_NR_unlinkat:
8050 if (!(p = lock_user_string(arg2)))
8051 goto efault;
8052 ret = get_errno(unlinkat(arg1, p, arg3));
8053 unlock_user(p, arg2, 0);
8054 break;
8055 #endif
8056 case TARGET_NR_execve:
8058 char **argp, **envp;
8059 int argc, envc;
8060 abi_ulong gp;
8061 abi_ulong guest_argp;
8062 abi_ulong guest_envp;
8063 abi_ulong addr;
8064 char **q;
8065 int total_size = 0;
8067 argc = 0;
8068 guest_argp = arg2;
8069 for (gp = guest_argp; gp; gp += sizeof(abi_ulong)) {
8070 if (get_user_ual(addr, gp))
8071 goto efault;
8072 if (!addr)
8073 break;
8074 argc++;
8076 envc = 0;
8077 guest_envp = arg3;
8078 for (gp = guest_envp; gp; gp += sizeof(abi_ulong)) {
8079 if (get_user_ual(addr, gp))
8080 goto efault;
8081 if (!addr)
8082 break;
8083 envc++;
8086 argp = g_new0(char *, argc + 1);
8087 envp = g_new0(char *, envc + 1);
8089 for (gp = guest_argp, q = argp; gp;
8090 gp += sizeof(abi_ulong), q++) {
8091 if (get_user_ual(addr, gp))
8092 goto execve_efault;
8093 if (!addr)
8094 break;
8095 if (!(*q = lock_user_string(addr)))
8096 goto execve_efault;
8097 total_size += strlen(*q) + 1;
8099 *q = NULL;
8101 for (gp = guest_envp, q = envp; gp;
8102 gp += sizeof(abi_ulong), q++) {
8103 if (get_user_ual(addr, gp))
8104 goto execve_efault;
8105 if (!addr)
8106 break;
8107 if (!(*q = lock_user_string(addr)))
8108 goto execve_efault;
8109 total_size += strlen(*q) + 1;
8111 *q = NULL;
8113 if (!(p = lock_user_string(arg1)))
8114 goto execve_efault;
8115 /* Although execve() is not an interruptible syscall it is
8116 * a special case where we must use the safe_syscall wrapper:
8117 * if we allow a signal to happen before we make the host
8118 * syscall then we will 'lose' it, because at the point of
8119 * execve the process leaves QEMU's control. So we use the
8120 * safe syscall wrapper to ensure that we either take the
8121 * signal as a guest signal, or else it does not happen
8122 * before the execve completes and makes it the other
8123 * program's problem.
8125 ret = get_errno(safe_execve(p, argp, envp));
8126 unlock_user(p, arg1, 0);
8128 goto execve_end;
8130 execve_efault:
8131 ret = -TARGET_EFAULT;
8133 execve_end:
8134 for (gp = guest_argp, q = argp; *q;
8135 gp += sizeof(abi_ulong), q++) {
8136 if (get_user_ual(addr, gp)
8137 || !addr)
8138 break;
8139 unlock_user(*q, addr, 0);
8141 for (gp = guest_envp, q = envp; *q;
8142 gp += sizeof(abi_ulong), q++) {
8143 if (get_user_ual(addr, gp)
8144 || !addr)
8145 break;
8146 unlock_user(*q, addr, 0);
8149 g_free(argp);
8150 g_free(envp);
8152 break;
8153 case TARGET_NR_chdir:
8154 if (!(p = lock_user_string(arg1)))
8155 goto efault;
8156 ret = get_errno(chdir(p));
8157 unlock_user(p, arg1, 0);
8158 break;
8159 #ifdef TARGET_NR_time
8160 case TARGET_NR_time:
8162 time_t host_time;
8163 ret = get_errno(time(&host_time));
8164 if (!is_error(ret)
8165 && arg1
8166 && put_user_sal(host_time, arg1))
8167 goto efault;
8169 break;
8170 #endif
8171 #ifdef TARGET_NR_mknod
8172 case TARGET_NR_mknod:
8173 if (!(p = lock_user_string(arg1)))
8174 goto efault;
8175 ret = get_errno(mknod(p, arg2, arg3));
8176 unlock_user(p, arg1, 0);
8177 break;
8178 #endif
8179 #if defined(TARGET_NR_mknodat)
8180 case TARGET_NR_mknodat:
8181 if (!(p = lock_user_string(arg2)))
8182 goto efault;
8183 ret = get_errno(mknodat(arg1, p, arg3, arg4));
8184 unlock_user(p, arg2, 0);
8185 break;
8186 #endif
8187 #ifdef TARGET_NR_chmod
8188 case TARGET_NR_chmod:
8189 if (!(p = lock_user_string(arg1)))
8190 goto efault;
8191 ret = get_errno(chmod(p, arg2));
8192 unlock_user(p, arg1, 0);
8193 break;
8194 #endif
8195 #ifdef TARGET_NR_break
8196 case TARGET_NR_break:
8197 goto unimplemented;
8198 #endif
8199 #ifdef TARGET_NR_oldstat
8200 case TARGET_NR_oldstat:
8201 goto unimplemented;
8202 #endif
8203 case TARGET_NR_lseek:
8204 ret = get_errno(lseek(arg1, arg2, arg3));
8205 break;
8206 #if defined(TARGET_NR_getxpid) && defined(TARGET_ALPHA)
8207 /* Alpha specific */
8208 case TARGET_NR_getxpid:
8209 ((CPUAlphaState *)cpu_env)->ir[IR_A4] = getppid();
8210 ret = get_errno(getpid());
8211 break;
8212 #endif
8213 #ifdef TARGET_NR_getpid
8214 case TARGET_NR_getpid:
8215 ret = get_errno(getpid());
8216 break;
8217 #endif
8218 case TARGET_NR_mount:
8220 /* need to look at the data field */
8221 void *p2, *p3;
8223 if (arg1) {
8224 p = lock_user_string(arg1);
8225 if (!p) {
8226 goto efault;
8228 } else {
8229 p = NULL;
8232 p2 = lock_user_string(arg2);
8233 if (!p2) {
8234 if (arg1) {
8235 unlock_user(p, arg1, 0);
8237 goto efault;
8240 if (arg3) {
8241 p3 = lock_user_string(arg3);
8242 if (!p3) {
8243 if (arg1) {
8244 unlock_user(p, arg1, 0);
8246 unlock_user(p2, arg2, 0);
8247 goto efault;
8249 } else {
8250 p3 = NULL;
8253 /* FIXME - arg5 should be locked, but it isn't clear how to
8254 * do that since it's not guaranteed to be a NULL-terminated
8255 * string.
8257 if (!arg5) {
8258 ret = mount(p, p2, p3, (unsigned long)arg4, NULL);
8259 } else {
8260 ret = mount(p, p2, p3, (unsigned long)arg4, g2h(arg5));
8262 ret = get_errno(ret);
8264 if (arg1) {
8265 unlock_user(p, arg1, 0);
8267 unlock_user(p2, arg2, 0);
8268 if (arg3) {
8269 unlock_user(p3, arg3, 0);
8272 break;
8273 #ifdef TARGET_NR_umount
8274 case TARGET_NR_umount:
8275 if (!(p = lock_user_string(arg1)))
8276 goto efault;
8277 ret = get_errno(umount(p));
8278 unlock_user(p, arg1, 0);
8279 break;
8280 #endif
8281 #ifdef TARGET_NR_stime /* not on alpha */
8282 case TARGET_NR_stime:
8284 time_t host_time;
8285 if (get_user_sal(host_time, arg1))
8286 goto efault;
8287 ret = get_errno(stime(&host_time));
8289 break;
8290 #endif
8291 case TARGET_NR_ptrace:
8292 goto unimplemented;
8293 #ifdef TARGET_NR_alarm /* not on alpha */
8294 case TARGET_NR_alarm:
8295 ret = alarm(arg1);
8296 break;
8297 #endif
8298 #ifdef TARGET_NR_oldfstat
8299 case TARGET_NR_oldfstat:
8300 goto unimplemented;
8301 #endif
8302 #ifdef TARGET_NR_pause /* not on alpha */
8303 case TARGET_NR_pause:
8304 if (!block_signals()) {
8305 sigsuspend(&((TaskState *)cpu->opaque)->signal_mask);
8307 ret = -TARGET_EINTR;
8308 break;
8309 #endif
8310 #ifdef TARGET_NR_utime
8311 case TARGET_NR_utime:
8313 struct utimbuf tbuf, *host_tbuf;
8314 struct target_utimbuf *target_tbuf;
8315 if (arg2) {
8316 if (!lock_user_struct(VERIFY_READ, target_tbuf, arg2, 1))
8317 goto efault;
8318 tbuf.actime = tswapal(target_tbuf->actime);
8319 tbuf.modtime = tswapal(target_tbuf->modtime);
8320 unlock_user_struct(target_tbuf, arg2, 0);
8321 host_tbuf = &tbuf;
8322 } else {
8323 host_tbuf = NULL;
8325 if (!(p = lock_user_string(arg1)))
8326 goto efault;
8327 ret = get_errno(utime(p, host_tbuf));
8328 unlock_user(p, arg1, 0);
8330 break;
8331 #endif
8332 #ifdef TARGET_NR_utimes
8333 case TARGET_NR_utimes:
8335 struct timeval *tvp, tv[2];
8336 if (arg2) {
8337 if (copy_from_user_timeval(&tv[0], arg2)
8338 || copy_from_user_timeval(&tv[1],
8339 arg2 + sizeof(struct target_timeval)))
8340 goto efault;
8341 tvp = tv;
8342 } else {
8343 tvp = NULL;
8345 if (!(p = lock_user_string(arg1)))
8346 goto efault;
8347 ret = get_errno(utimes(p, tvp));
8348 unlock_user(p, arg1, 0);
8350 break;
8351 #endif
8352 #if defined(TARGET_NR_futimesat)
8353 case TARGET_NR_futimesat:
8355 struct timeval *tvp, tv[2];
8356 if (arg3) {
8357 if (copy_from_user_timeval(&tv[0], arg3)
8358 || copy_from_user_timeval(&tv[1],
8359 arg3 + sizeof(struct target_timeval)))
8360 goto efault;
8361 tvp = tv;
8362 } else {
8363 tvp = NULL;
8365 if (!(p = lock_user_string(arg2)))
8366 goto efault;
8367 ret = get_errno(futimesat(arg1, path(p), tvp));
8368 unlock_user(p, arg2, 0);
8370 break;
8371 #endif
8372 #ifdef TARGET_NR_stty
8373 case TARGET_NR_stty:
8374 goto unimplemented;
8375 #endif
8376 #ifdef TARGET_NR_gtty
8377 case TARGET_NR_gtty:
8378 goto unimplemented;
8379 #endif
8380 #ifdef TARGET_NR_access
8381 case TARGET_NR_access:
8382 if (!(p = lock_user_string(arg1)))
8383 goto efault;
8384 ret = get_errno(access(path(p), arg2));
8385 unlock_user(p, arg1, 0);
8386 break;
8387 #endif
8388 #if defined(TARGET_NR_faccessat) && defined(__NR_faccessat)
8389 case TARGET_NR_faccessat:
8390 if (!(p = lock_user_string(arg2)))
8391 goto efault;
8392 ret = get_errno(faccessat(arg1, p, arg3, 0));
8393 unlock_user(p, arg2, 0);
8394 break;
8395 #endif
8396 #ifdef TARGET_NR_nice /* not on alpha */
8397 case TARGET_NR_nice:
8398 ret = get_errno(nice(arg1));
8399 break;
8400 #endif
8401 #ifdef TARGET_NR_ftime
8402 case TARGET_NR_ftime:
8403 goto unimplemented;
8404 #endif
8405 case TARGET_NR_sync:
8406 sync();
8407 ret = 0;
8408 break;
8409 #if defined(TARGET_NR_syncfs) && defined(CONFIG_SYNCFS)
8410 case TARGET_NR_syncfs:
8411 ret = get_errno(syncfs(arg1));
8412 break;
8413 #endif
8414 case TARGET_NR_kill:
8415 ret = get_errno(safe_kill(arg1, target_to_host_signal(arg2)));
8416 break;
8417 #ifdef TARGET_NR_rename
8418 case TARGET_NR_rename:
8420 void *p2;
8421 p = lock_user_string(arg1);
8422 p2 = lock_user_string(arg2);
8423 if (!p || !p2)
8424 ret = -TARGET_EFAULT;
8425 else
8426 ret = get_errno(rename(p, p2));
8427 unlock_user(p2, arg2, 0);
8428 unlock_user(p, arg1, 0);
8430 break;
8431 #endif
8432 #if defined(TARGET_NR_renameat)
8433 case TARGET_NR_renameat:
8435 void *p2;
8436 p = lock_user_string(arg2);
8437 p2 = lock_user_string(arg4);
8438 if (!p || !p2)
8439 ret = -TARGET_EFAULT;
8440 else
8441 ret = get_errno(renameat(arg1, p, arg3, p2));
8442 unlock_user(p2, arg4, 0);
8443 unlock_user(p, arg2, 0);
8445 break;
8446 #endif
8447 #if defined(TARGET_NR_renameat2)
8448 case TARGET_NR_renameat2:
8450 void *p2;
8451 p = lock_user_string(arg2);
8452 p2 = lock_user_string(arg4);
8453 if (!p || !p2) {
8454 ret = -TARGET_EFAULT;
8455 } else {
8456 ret = get_errno(sys_renameat2(arg1, p, arg3, p2, arg5));
8458 unlock_user(p2, arg4, 0);
8459 unlock_user(p, arg2, 0);
8461 break;
8462 #endif
8463 #ifdef TARGET_NR_mkdir
8464 case TARGET_NR_mkdir:
8465 if (!(p = lock_user_string(arg1)))
8466 goto efault;
8467 ret = get_errno(mkdir(p, arg2));
8468 unlock_user(p, arg1, 0);
8469 break;
8470 #endif
8471 #if defined(TARGET_NR_mkdirat)
8472 case TARGET_NR_mkdirat:
8473 if (!(p = lock_user_string(arg2)))
8474 goto efault;
8475 ret = get_errno(mkdirat(arg1, p, arg3));
8476 unlock_user(p, arg2, 0);
8477 break;
8478 #endif
8479 #ifdef TARGET_NR_rmdir
8480 case TARGET_NR_rmdir:
8481 if (!(p = lock_user_string(arg1)))
8482 goto efault;
8483 ret = get_errno(rmdir(p));
8484 unlock_user(p, arg1, 0);
8485 break;
8486 #endif
8487 case TARGET_NR_dup:
8488 ret = get_errno(dup(arg1));
8489 if (ret >= 0) {
8490 fd_trans_dup(arg1, ret);
8492 break;
8493 #ifdef TARGET_NR_pipe
8494 case TARGET_NR_pipe:
8495 ret = do_pipe(cpu_env, arg1, 0, 0);
8496 break;
8497 #endif
8498 #ifdef TARGET_NR_pipe2
8499 case TARGET_NR_pipe2:
8500 ret = do_pipe(cpu_env, arg1,
8501 target_to_host_bitmask(arg2, fcntl_flags_tbl), 1);
8502 break;
8503 #endif
8504 case TARGET_NR_times:
8506 struct target_tms *tmsp;
8507 struct tms tms;
8508 ret = get_errno(times(&tms));
8509 if (arg1) {
8510 tmsp = lock_user(VERIFY_WRITE, arg1, sizeof(struct target_tms), 0);
8511 if (!tmsp)
8512 goto efault;
8513 tmsp->tms_utime = tswapal(host_to_target_clock_t(tms.tms_utime));
8514 tmsp->tms_stime = tswapal(host_to_target_clock_t(tms.tms_stime));
8515 tmsp->tms_cutime = tswapal(host_to_target_clock_t(tms.tms_cutime));
8516 tmsp->tms_cstime = tswapal(host_to_target_clock_t(tms.tms_cstime));
8518 if (!is_error(ret))
8519 ret = host_to_target_clock_t(ret);
8521 break;
8522 #ifdef TARGET_NR_prof
8523 case TARGET_NR_prof:
8524 goto unimplemented;
8525 #endif
8526 #ifdef TARGET_NR_signal
8527 case TARGET_NR_signal:
8528 goto unimplemented;
8529 #endif
8530 case TARGET_NR_acct:
8531 if (arg1 == 0) {
8532 ret = get_errno(acct(NULL));
8533 } else {
8534 if (!(p = lock_user_string(arg1)))
8535 goto efault;
8536 ret = get_errno(acct(path(p)));
8537 unlock_user(p, arg1, 0);
8539 break;
8540 #ifdef TARGET_NR_umount2
8541 case TARGET_NR_umount2:
8542 if (!(p = lock_user_string(arg1)))
8543 goto efault;
8544 ret = get_errno(umount2(p, arg2));
8545 unlock_user(p, arg1, 0);
8546 break;
8547 #endif
8548 #ifdef TARGET_NR_lock
8549 case TARGET_NR_lock:
8550 goto unimplemented;
8551 #endif
8552 case TARGET_NR_ioctl:
8553 ret = do_ioctl(arg1, arg2, arg3);
8554 break;
8555 case TARGET_NR_fcntl:
8556 ret = do_fcntl(arg1, arg2, arg3);
8557 break;
8558 #ifdef TARGET_NR_mpx
8559 case TARGET_NR_mpx:
8560 goto unimplemented;
8561 #endif
8562 case TARGET_NR_setpgid:
8563 ret = get_errno(setpgid(arg1, arg2));
8564 break;
8565 #ifdef TARGET_NR_ulimit
8566 case TARGET_NR_ulimit:
8567 goto unimplemented;
8568 #endif
8569 #ifdef TARGET_NR_oldolduname
8570 case TARGET_NR_oldolduname:
8571 goto unimplemented;
8572 #endif
8573 case TARGET_NR_umask:
8574 ret = get_errno(umask(arg1));
8575 break;
8576 case TARGET_NR_chroot:
8577 if (!(p = lock_user_string(arg1)))
8578 goto efault;
8579 ret = get_errno(chroot(p));
8580 unlock_user(p, arg1, 0);
8581 break;
8582 #ifdef TARGET_NR_ustat
8583 case TARGET_NR_ustat:
8584 goto unimplemented;
8585 #endif
8586 #ifdef TARGET_NR_dup2
8587 case TARGET_NR_dup2:
8588 ret = get_errno(dup2(arg1, arg2));
8589 if (ret >= 0) {
8590 fd_trans_dup(arg1, arg2);
8592 break;
8593 #endif
8594 #if defined(CONFIG_DUP3) && defined(TARGET_NR_dup3)
8595 case TARGET_NR_dup3:
8597 int host_flags;
8599 if ((arg3 & ~TARGET_O_CLOEXEC) != 0) {
8600 return -EINVAL;
8602 host_flags = target_to_host_bitmask(arg3, fcntl_flags_tbl);
8603 ret = get_errno(dup3(arg1, arg2, host_flags));
8604 if (ret >= 0) {
8605 fd_trans_dup(arg1, arg2);
8607 break;
8609 #endif
8610 #ifdef TARGET_NR_getppid /* not on alpha */
8611 case TARGET_NR_getppid:
8612 ret = get_errno(getppid());
8613 break;
8614 #endif
8615 #ifdef TARGET_NR_getpgrp
8616 case TARGET_NR_getpgrp:
8617 ret = get_errno(getpgrp());
8618 break;
8619 #endif
8620 case TARGET_NR_setsid:
8621 ret = get_errno(setsid());
8622 break;
8623 #ifdef TARGET_NR_sigaction
8624 case TARGET_NR_sigaction:
8626 #if defined(TARGET_ALPHA)
8627 struct target_sigaction act, oact, *pact = 0;
8628 struct target_old_sigaction *old_act;
8629 if (arg2) {
8630 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
8631 goto efault;
8632 act._sa_handler = old_act->_sa_handler;
8633 target_siginitset(&act.sa_mask, old_act->sa_mask);
8634 act.sa_flags = old_act->sa_flags;
8635 act.sa_restorer = 0;
8636 unlock_user_struct(old_act, arg2, 0);
8637 pact = &act;
8639 ret = get_errno(do_sigaction(arg1, pact, &oact));
8640 if (!is_error(ret) && arg3) {
8641 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
8642 goto efault;
8643 old_act->_sa_handler = oact._sa_handler;
8644 old_act->sa_mask = oact.sa_mask.sig[0];
8645 old_act->sa_flags = oact.sa_flags;
8646 unlock_user_struct(old_act, arg3, 1);
8648 #elif defined(TARGET_MIPS)
8649 struct target_sigaction act, oact, *pact, *old_act;
8651 if (arg2) {
8652 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
8653 goto efault;
8654 act._sa_handler = old_act->_sa_handler;
8655 target_siginitset(&act.sa_mask, old_act->sa_mask.sig[0]);
8656 act.sa_flags = old_act->sa_flags;
8657 unlock_user_struct(old_act, arg2, 0);
8658 pact = &act;
8659 } else {
8660 pact = NULL;
8663 ret = get_errno(do_sigaction(arg1, pact, &oact));
8665 if (!is_error(ret) && arg3) {
8666 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
8667 goto efault;
8668 old_act->_sa_handler = oact._sa_handler;
8669 old_act->sa_flags = oact.sa_flags;
8670 old_act->sa_mask.sig[0] = oact.sa_mask.sig[0];
8671 old_act->sa_mask.sig[1] = 0;
8672 old_act->sa_mask.sig[2] = 0;
8673 old_act->sa_mask.sig[3] = 0;
8674 unlock_user_struct(old_act, arg3, 1);
8676 #else
8677 struct target_old_sigaction *old_act;
8678 struct target_sigaction act, oact, *pact;
8679 if (arg2) {
8680 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
8681 goto efault;
8682 act._sa_handler = old_act->_sa_handler;
8683 target_siginitset(&act.sa_mask, old_act->sa_mask);
8684 act.sa_flags = old_act->sa_flags;
8685 act.sa_restorer = old_act->sa_restorer;
8686 unlock_user_struct(old_act, arg2, 0);
8687 pact = &act;
8688 } else {
8689 pact = NULL;
8691 ret = get_errno(do_sigaction(arg1, pact, &oact));
8692 if (!is_error(ret) && arg3) {
8693 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
8694 goto efault;
8695 old_act->_sa_handler = oact._sa_handler;
8696 old_act->sa_mask = oact.sa_mask.sig[0];
8697 old_act->sa_flags = oact.sa_flags;
8698 old_act->sa_restorer = oact.sa_restorer;
8699 unlock_user_struct(old_act, arg3, 1);
8701 #endif
8703 break;
8704 #endif
8705 case TARGET_NR_rt_sigaction:
8707 #if defined(TARGET_ALPHA)
8708 /* For Alpha and SPARC this is a 5 argument syscall, with
8709 * a 'restorer' parameter which must be copied into the
8710 * sa_restorer field of the sigaction struct.
8711 * For Alpha that 'restorer' is arg5; for SPARC it is arg4,
8712 * and arg5 is the sigsetsize.
8713 * Alpha also has a separate rt_sigaction struct that it uses
8714 * here; SPARC uses the usual sigaction struct.
8716 struct target_rt_sigaction *rt_act;
8717 struct target_sigaction act, oact, *pact = 0;
8719 if (arg4 != sizeof(target_sigset_t)) {
8720 ret = -TARGET_EINVAL;
8721 break;
8723 if (arg2) {
8724 if (!lock_user_struct(VERIFY_READ, rt_act, arg2, 1))
8725 goto efault;
8726 act._sa_handler = rt_act->_sa_handler;
8727 act.sa_mask = rt_act->sa_mask;
8728 act.sa_flags = rt_act->sa_flags;
8729 act.sa_restorer = arg5;
8730 unlock_user_struct(rt_act, arg2, 0);
8731 pact = &act;
8733 ret = get_errno(do_sigaction(arg1, pact, &oact));
8734 if (!is_error(ret) && arg3) {
8735 if (!lock_user_struct(VERIFY_WRITE, rt_act, arg3, 0))
8736 goto efault;
8737 rt_act->_sa_handler = oact._sa_handler;
8738 rt_act->sa_mask = oact.sa_mask;
8739 rt_act->sa_flags = oact.sa_flags;
8740 unlock_user_struct(rt_act, arg3, 1);
8742 #else
8743 #ifdef TARGET_SPARC
8744 target_ulong restorer = arg4;
8745 target_ulong sigsetsize = arg5;
8746 #else
8747 target_ulong sigsetsize = arg4;
8748 #endif
8749 struct target_sigaction *act;
8750 struct target_sigaction *oact;
8752 if (sigsetsize != sizeof(target_sigset_t)) {
8753 ret = -TARGET_EINVAL;
8754 break;
8756 if (arg2) {
8757 if (!lock_user_struct(VERIFY_READ, act, arg2, 1)) {
8758 goto efault;
8760 #ifdef TARGET_SPARC
8761 act->sa_restorer = restorer;
8762 #endif
8763 } else {
8764 act = NULL;
8766 if (arg3) {
8767 if (!lock_user_struct(VERIFY_WRITE, oact, arg3, 0)) {
8768 ret = -TARGET_EFAULT;
8769 goto rt_sigaction_fail;
8771 } else
8772 oact = NULL;
8773 ret = get_errno(do_sigaction(arg1, act, oact));
8774 rt_sigaction_fail:
8775 if (act)
8776 unlock_user_struct(act, arg2, 0);
8777 if (oact)
8778 unlock_user_struct(oact, arg3, 1);
8779 #endif
8781 break;
8782 #ifdef TARGET_NR_sgetmask /* not on alpha */
8783 case TARGET_NR_sgetmask:
8785 sigset_t cur_set;
8786 abi_ulong target_set;
8787 ret = do_sigprocmask(0, NULL, &cur_set);
8788 if (!ret) {
8789 host_to_target_old_sigset(&target_set, &cur_set);
8790 ret = target_set;
8793 break;
8794 #endif
8795 #ifdef TARGET_NR_ssetmask /* not on alpha */
8796 case TARGET_NR_ssetmask:
8798 sigset_t set, oset;
8799 abi_ulong target_set = arg1;
8800 target_to_host_old_sigset(&set, &target_set);
8801 ret = do_sigprocmask(SIG_SETMASK, &set, &oset);
8802 if (!ret) {
8803 host_to_target_old_sigset(&target_set, &oset);
8804 ret = target_set;
8807 break;
8808 #endif
8809 #ifdef TARGET_NR_sigprocmask
8810 case TARGET_NR_sigprocmask:
8812 #if defined(TARGET_ALPHA)
8813 sigset_t set, oldset;
8814 abi_ulong mask;
8815 int how;
8817 switch (arg1) {
8818 case TARGET_SIG_BLOCK:
8819 how = SIG_BLOCK;
8820 break;
8821 case TARGET_SIG_UNBLOCK:
8822 how = SIG_UNBLOCK;
8823 break;
8824 case TARGET_SIG_SETMASK:
8825 how = SIG_SETMASK;
8826 break;
8827 default:
8828 ret = -TARGET_EINVAL;
8829 goto fail;
8831 mask = arg2;
8832 target_to_host_old_sigset(&set, &mask);
8834 ret = do_sigprocmask(how, &set, &oldset);
8835 if (!is_error(ret)) {
8836 host_to_target_old_sigset(&mask, &oldset);
8837 ret = mask;
8838 ((CPUAlphaState *)cpu_env)->ir[IR_V0] = 0; /* force no error */
8840 #else
8841 sigset_t set, oldset, *set_ptr;
8842 int how;
8844 if (arg2) {
8845 switch (arg1) {
8846 case TARGET_SIG_BLOCK:
8847 how = SIG_BLOCK;
8848 break;
8849 case TARGET_SIG_UNBLOCK:
8850 how = SIG_UNBLOCK;
8851 break;
8852 case TARGET_SIG_SETMASK:
8853 how = SIG_SETMASK;
8854 break;
8855 default:
8856 ret = -TARGET_EINVAL;
8857 goto fail;
8859 if (!(p = lock_user(VERIFY_READ, arg2, sizeof(target_sigset_t), 1)))
8860 goto efault;
8861 target_to_host_old_sigset(&set, p);
8862 unlock_user(p, arg2, 0);
8863 set_ptr = &set;
8864 } else {
8865 how = 0;
8866 set_ptr = NULL;
8868 ret = do_sigprocmask(how, set_ptr, &oldset);
8869 if (!is_error(ret) && arg3) {
8870 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_sigset_t), 0)))
8871 goto efault;
8872 host_to_target_old_sigset(p, &oldset);
8873 unlock_user(p, arg3, sizeof(target_sigset_t));
8875 #endif
8877 break;
8878 #endif
8879 case TARGET_NR_rt_sigprocmask:
8881 int how = arg1;
8882 sigset_t set, oldset, *set_ptr;
8884 if (arg4 != sizeof(target_sigset_t)) {
8885 ret = -TARGET_EINVAL;
8886 break;
8889 if (arg2) {
8890 switch(how) {
8891 case TARGET_SIG_BLOCK:
8892 how = SIG_BLOCK;
8893 break;
8894 case TARGET_SIG_UNBLOCK:
8895 how = SIG_UNBLOCK;
8896 break;
8897 case TARGET_SIG_SETMASK:
8898 how = SIG_SETMASK;
8899 break;
8900 default:
8901 ret = -TARGET_EINVAL;
8902 goto fail;
8904 if (!(p = lock_user(VERIFY_READ, arg2, sizeof(target_sigset_t), 1)))
8905 goto efault;
8906 target_to_host_sigset(&set, p);
8907 unlock_user(p, arg2, 0);
8908 set_ptr = &set;
8909 } else {
8910 how = 0;
8911 set_ptr = NULL;
8913 ret = do_sigprocmask(how, set_ptr, &oldset);
8914 if (!is_error(ret) && arg3) {
8915 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_sigset_t), 0)))
8916 goto efault;
8917 host_to_target_sigset(p, &oldset);
8918 unlock_user(p, arg3, sizeof(target_sigset_t));
8921 break;
8922 #ifdef TARGET_NR_sigpending
8923 case TARGET_NR_sigpending:
8925 sigset_t set;
8926 ret = get_errno(sigpending(&set));
8927 if (!is_error(ret)) {
8928 if (!(p = lock_user(VERIFY_WRITE, arg1, sizeof(target_sigset_t), 0)))
8929 goto efault;
8930 host_to_target_old_sigset(p, &set);
8931 unlock_user(p, arg1, sizeof(target_sigset_t));
8934 break;
8935 #endif
8936 case TARGET_NR_rt_sigpending:
8938 sigset_t set;
8940 /* Yes, this check is >, not != like most. We follow the kernel's
8941 * logic and it does it like this because it implements
8942 * NR_sigpending through the same code path, and in that case
8943 * the old_sigset_t is smaller in size.
8945 if (arg2 > sizeof(target_sigset_t)) {
8946 ret = -TARGET_EINVAL;
8947 break;
8950 ret = get_errno(sigpending(&set));
8951 if (!is_error(ret)) {
8952 if (!(p = lock_user(VERIFY_WRITE, arg1, sizeof(target_sigset_t), 0)))
8953 goto efault;
8954 host_to_target_sigset(p, &set);
8955 unlock_user(p, arg1, sizeof(target_sigset_t));
8958 break;
8959 #ifdef TARGET_NR_sigsuspend
8960 case TARGET_NR_sigsuspend:
8962 TaskState *ts = cpu->opaque;
8963 #if defined(TARGET_ALPHA)
8964 abi_ulong mask = arg1;
8965 target_to_host_old_sigset(&ts->sigsuspend_mask, &mask);
8966 #else
8967 if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
8968 goto efault;
8969 target_to_host_old_sigset(&ts->sigsuspend_mask, p);
8970 unlock_user(p, arg1, 0);
8971 #endif
8972 ret = get_errno(safe_rt_sigsuspend(&ts->sigsuspend_mask,
8973 SIGSET_T_SIZE));
8974 if (ret != -TARGET_ERESTARTSYS) {
8975 ts->in_sigsuspend = 1;
8978 break;
8979 #endif
8980 case TARGET_NR_rt_sigsuspend:
8982 TaskState *ts = cpu->opaque;
8984 if (arg2 != sizeof(target_sigset_t)) {
8985 ret = -TARGET_EINVAL;
8986 break;
8988 if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
8989 goto efault;
8990 target_to_host_sigset(&ts->sigsuspend_mask, p);
8991 unlock_user(p, arg1, 0);
8992 ret = get_errno(safe_rt_sigsuspend(&ts->sigsuspend_mask,
8993 SIGSET_T_SIZE));
8994 if (ret != -TARGET_ERESTARTSYS) {
8995 ts->in_sigsuspend = 1;
8998 break;
8999 case TARGET_NR_rt_sigtimedwait:
9001 sigset_t set;
9002 struct timespec uts, *puts;
9003 siginfo_t uinfo;
9005 if (arg4 != sizeof(target_sigset_t)) {
9006 ret = -TARGET_EINVAL;
9007 break;
9010 if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
9011 goto efault;
9012 target_to_host_sigset(&set, p);
9013 unlock_user(p, arg1, 0);
9014 if (arg3) {
9015 puts = &uts;
9016 target_to_host_timespec(puts, arg3);
9017 } else {
9018 puts = NULL;
9020 ret = get_errno(safe_rt_sigtimedwait(&set, &uinfo, puts,
9021 SIGSET_T_SIZE));
9022 if (!is_error(ret)) {
9023 if (arg2) {
9024 p = lock_user(VERIFY_WRITE, arg2, sizeof(target_siginfo_t),
9026 if (!p) {
9027 goto efault;
9029 host_to_target_siginfo(p, &uinfo);
9030 unlock_user(p, arg2, sizeof(target_siginfo_t));
9032 ret = host_to_target_signal(ret);
9035 break;
9036 case TARGET_NR_rt_sigqueueinfo:
9038 siginfo_t uinfo;
9040 p = lock_user(VERIFY_READ, arg3, sizeof(target_siginfo_t), 1);
9041 if (!p) {
9042 goto efault;
9044 target_to_host_siginfo(&uinfo, p);
9045 unlock_user(p, arg3, 0);
9046 ret = get_errno(sys_rt_sigqueueinfo(arg1, arg2, &uinfo));
9048 break;
9049 case TARGET_NR_rt_tgsigqueueinfo:
9051 siginfo_t uinfo;
9053 p = lock_user(VERIFY_READ, arg4, sizeof(target_siginfo_t), 1);
9054 if (!p) {
9055 goto efault;
9057 target_to_host_siginfo(&uinfo, p);
9058 unlock_user(p, arg4, 0);
9059 ret = get_errno(sys_rt_tgsigqueueinfo(arg1, arg2, arg3, &uinfo));
9061 break;
9062 #ifdef TARGET_NR_sigreturn
9063 case TARGET_NR_sigreturn:
9064 if (block_signals()) {
9065 ret = -TARGET_ERESTARTSYS;
9066 } else {
9067 ret = do_sigreturn(cpu_env);
9069 break;
9070 #endif
9071 case TARGET_NR_rt_sigreturn:
9072 if (block_signals()) {
9073 ret = -TARGET_ERESTARTSYS;
9074 } else {
9075 ret = do_rt_sigreturn(cpu_env);
9077 break;
9078 case TARGET_NR_sethostname:
9079 if (!(p = lock_user_string(arg1)))
9080 goto efault;
9081 ret = get_errno(sethostname(p, arg2));
9082 unlock_user(p, arg1, 0);
9083 break;
9084 case TARGET_NR_setrlimit:
9086 int resource = target_to_host_resource(arg1);
9087 struct target_rlimit *target_rlim;
9088 struct rlimit rlim;
9089 if (!lock_user_struct(VERIFY_READ, target_rlim, arg2, 1))
9090 goto efault;
9091 rlim.rlim_cur = target_to_host_rlim(target_rlim->rlim_cur);
9092 rlim.rlim_max = target_to_host_rlim(target_rlim->rlim_max);
9093 unlock_user_struct(target_rlim, arg2, 0);
9094 ret = get_errno(setrlimit(resource, &rlim));
9096 break;
9097 case TARGET_NR_getrlimit:
9099 int resource = target_to_host_resource(arg1);
9100 struct target_rlimit *target_rlim;
9101 struct rlimit rlim;
9103 ret = get_errno(getrlimit(resource, &rlim));
9104 if (!is_error(ret)) {
9105 if (!lock_user_struct(VERIFY_WRITE, target_rlim, arg2, 0))
9106 goto efault;
9107 target_rlim->rlim_cur = host_to_target_rlim(rlim.rlim_cur);
9108 target_rlim->rlim_max = host_to_target_rlim(rlim.rlim_max);
9109 unlock_user_struct(target_rlim, arg2, 1);
9112 break;
9113 case TARGET_NR_getrusage:
9115 struct rusage rusage;
9116 ret = get_errno(getrusage(arg1, &rusage));
9117 if (!is_error(ret)) {
9118 ret = host_to_target_rusage(arg2, &rusage);
9121 break;
9122 case TARGET_NR_gettimeofday:
9124 struct timeval tv;
9125 ret = get_errno(gettimeofday(&tv, NULL));
9126 if (!is_error(ret)) {
9127 if (copy_to_user_timeval(arg1, &tv))
9128 goto efault;
9131 break;
9132 case TARGET_NR_settimeofday:
9134 struct timeval tv, *ptv = NULL;
9135 struct timezone tz, *ptz = NULL;
9137 if (arg1) {
9138 if (copy_from_user_timeval(&tv, arg1)) {
9139 goto efault;
9141 ptv = &tv;
9144 if (arg2) {
9145 if (copy_from_user_timezone(&tz, arg2)) {
9146 goto efault;
9148 ptz = &tz;
9151 ret = get_errno(settimeofday(ptv, ptz));
9153 break;
9154 #if defined(TARGET_NR_select)
9155 case TARGET_NR_select:
9156 #if defined(TARGET_WANT_NI_OLD_SELECT)
9157 /* some architectures used to have old_select here
9158 * but now ENOSYS it.
9160 ret = -TARGET_ENOSYS;
9161 #elif defined(TARGET_WANT_OLD_SYS_SELECT)
9162 ret = do_old_select(arg1);
9163 #else
9164 ret = do_select(arg1, arg2, arg3, arg4, arg5);
9165 #endif
9166 break;
9167 #endif
9168 #ifdef TARGET_NR_pselect6
9169 case TARGET_NR_pselect6:
9171 abi_long rfd_addr, wfd_addr, efd_addr, n, ts_addr;
9172 fd_set rfds, wfds, efds;
9173 fd_set *rfds_ptr, *wfds_ptr, *efds_ptr;
9174 struct timespec ts, *ts_ptr;
9177 * The 6th arg is actually two args smashed together,
9178 * so we cannot use the C library.
9180 sigset_t set;
9181 struct {
9182 sigset_t *set;
9183 size_t size;
9184 } sig, *sig_ptr;
9186 abi_ulong arg_sigset, arg_sigsize, *arg7;
9187 target_sigset_t *target_sigset;
9189 n = arg1;
9190 rfd_addr = arg2;
9191 wfd_addr = arg3;
9192 efd_addr = arg4;
9193 ts_addr = arg5;
9195 ret = copy_from_user_fdset_ptr(&rfds, &rfds_ptr, rfd_addr, n);
9196 if (ret) {
9197 goto fail;
9199 ret = copy_from_user_fdset_ptr(&wfds, &wfds_ptr, wfd_addr, n);
9200 if (ret) {
9201 goto fail;
9203 ret = copy_from_user_fdset_ptr(&efds, &efds_ptr, efd_addr, n);
9204 if (ret) {
9205 goto fail;
9209 * This takes a timespec, and not a timeval, so we cannot
9210 * use the do_select() helper ...
9212 if (ts_addr) {
9213 if (target_to_host_timespec(&ts, ts_addr)) {
9214 goto efault;
9216 ts_ptr = &ts;
9217 } else {
9218 ts_ptr = NULL;
9221 /* Extract the two packed args for the sigset */
9222 if (arg6) {
9223 sig_ptr = &sig;
9224 sig.size = SIGSET_T_SIZE;
9226 arg7 = lock_user(VERIFY_READ, arg6, sizeof(*arg7) * 2, 1);
9227 if (!arg7) {
9228 goto efault;
9230 arg_sigset = tswapal(arg7[0]);
9231 arg_sigsize = tswapal(arg7[1]);
9232 unlock_user(arg7, arg6, 0);
9234 if (arg_sigset) {
9235 sig.set = &set;
9236 if (arg_sigsize != sizeof(*target_sigset)) {
9237 /* Like the kernel, we enforce correct size sigsets */
9238 ret = -TARGET_EINVAL;
9239 goto fail;
9241 target_sigset = lock_user(VERIFY_READ, arg_sigset,
9242 sizeof(*target_sigset), 1);
9243 if (!target_sigset) {
9244 goto efault;
9246 target_to_host_sigset(&set, target_sigset);
9247 unlock_user(target_sigset, arg_sigset, 0);
9248 } else {
9249 sig.set = NULL;
9251 } else {
9252 sig_ptr = NULL;
9255 ret = get_errno(safe_pselect6(n, rfds_ptr, wfds_ptr, efds_ptr,
9256 ts_ptr, sig_ptr));
9258 if (!is_error(ret)) {
9259 if (rfd_addr && copy_to_user_fdset(rfd_addr, &rfds, n))
9260 goto efault;
9261 if (wfd_addr && copy_to_user_fdset(wfd_addr, &wfds, n))
9262 goto efault;
9263 if (efd_addr && copy_to_user_fdset(efd_addr, &efds, n))
9264 goto efault;
9266 if (ts_addr && host_to_target_timespec(ts_addr, &ts))
9267 goto efault;
9270 break;
9271 #endif
9272 #ifdef TARGET_NR_symlink
9273 case TARGET_NR_symlink:
9275 void *p2;
9276 p = lock_user_string(arg1);
9277 p2 = lock_user_string(arg2);
9278 if (!p || !p2)
9279 ret = -TARGET_EFAULT;
9280 else
9281 ret = get_errno(symlink(p, p2));
9282 unlock_user(p2, arg2, 0);
9283 unlock_user(p, arg1, 0);
9285 break;
9286 #endif
9287 #if defined(TARGET_NR_symlinkat)
9288 case TARGET_NR_symlinkat:
9290 void *p2;
9291 p = lock_user_string(arg1);
9292 p2 = lock_user_string(arg3);
9293 if (!p || !p2)
9294 ret = -TARGET_EFAULT;
9295 else
9296 ret = get_errno(symlinkat(p, arg2, p2));
9297 unlock_user(p2, arg3, 0);
9298 unlock_user(p, arg1, 0);
9300 break;
9301 #endif
9302 #ifdef TARGET_NR_oldlstat
9303 case TARGET_NR_oldlstat:
9304 goto unimplemented;
9305 #endif
9306 #ifdef TARGET_NR_readlink
9307 case TARGET_NR_readlink:
9309 void *p2;
9310 p = lock_user_string(arg1);
9311 p2 = lock_user(VERIFY_WRITE, arg2, arg3, 0);
9312 if (!p || !p2) {
9313 ret = -TARGET_EFAULT;
9314 } else if (!arg3) {
9315 /* Short circuit this for the magic exe check. */
9316 ret = -TARGET_EINVAL;
9317 } else if (is_proc_myself((const char *)p, "exe")) {
9318 char real[PATH_MAX], *temp;
9319 temp = realpath(exec_path, real);
9320 /* Return value is # of bytes that we wrote to the buffer. */
9321 if (temp == NULL) {
9322 ret = get_errno(-1);
9323 } else {
9324 /* Don't worry about sign mismatch as earlier mapping
9325 * logic would have thrown a bad address error. */
9326 ret = MIN(strlen(real), arg3);
9327 /* We cannot NUL terminate the string. */
9328 memcpy(p2, real, ret);
9330 } else {
9331 ret = get_errno(readlink(path(p), p2, arg3));
9333 unlock_user(p2, arg2, ret);
9334 unlock_user(p, arg1, 0);
9336 break;
9337 #endif
9338 #if defined(TARGET_NR_readlinkat)
9339 case TARGET_NR_readlinkat:
9341 void *p2;
9342 p = lock_user_string(arg2);
9343 p2 = lock_user(VERIFY_WRITE, arg3, arg4, 0);
9344 if (!p || !p2) {
9345 ret = -TARGET_EFAULT;
9346 } else if (is_proc_myself((const char *)p, "exe")) {
9347 char real[PATH_MAX], *temp;
9348 temp = realpath(exec_path, real);
9349 ret = temp == NULL ? get_errno(-1) : strlen(real) ;
9350 snprintf((char *)p2, arg4, "%s", real);
9351 } else {
9352 ret = get_errno(readlinkat(arg1, path(p), p2, arg4));
9354 unlock_user(p2, arg3, ret);
9355 unlock_user(p, arg2, 0);
9357 break;
9358 #endif
9359 #ifdef TARGET_NR_uselib
9360 case TARGET_NR_uselib:
9361 goto unimplemented;
9362 #endif
9363 #ifdef TARGET_NR_swapon
9364 case TARGET_NR_swapon:
9365 if (!(p = lock_user_string(arg1)))
9366 goto efault;
9367 ret = get_errno(swapon(p, arg2));
9368 unlock_user(p, arg1, 0);
9369 break;
9370 #endif
9371 case TARGET_NR_reboot:
9372 if (arg3 == LINUX_REBOOT_CMD_RESTART2) {
9373 /* arg4 must be ignored in all other cases */
9374 p = lock_user_string(arg4);
9375 if (!p) {
9376 goto efault;
9378 ret = get_errno(reboot(arg1, arg2, arg3, p));
9379 unlock_user(p, arg4, 0);
9380 } else {
9381 ret = get_errno(reboot(arg1, arg2, arg3, NULL));
9383 break;
9384 #ifdef TARGET_NR_readdir
9385 case TARGET_NR_readdir:
9386 goto unimplemented;
9387 #endif
9388 #ifdef TARGET_NR_mmap
9389 case TARGET_NR_mmap:
9390 #if (defined(TARGET_I386) && defined(TARGET_ABI32)) || \
9391 (defined(TARGET_ARM) && defined(TARGET_ABI32)) || \
9392 defined(TARGET_M68K) || defined(TARGET_CRIS) || defined(TARGET_MICROBLAZE) \
9393 || defined(TARGET_S390X)
9395 abi_ulong *v;
9396 abi_ulong v1, v2, v3, v4, v5, v6;
9397 if (!(v = lock_user(VERIFY_READ, arg1, 6 * sizeof(abi_ulong), 1)))
9398 goto efault;
9399 v1 = tswapal(v[0]);
9400 v2 = tswapal(v[1]);
9401 v3 = tswapal(v[2]);
9402 v4 = tswapal(v[3]);
9403 v5 = tswapal(v[4]);
9404 v6 = tswapal(v[5]);
9405 unlock_user(v, arg1, 0);
9406 ret = get_errno(target_mmap(v1, v2, v3,
9407 target_to_host_bitmask(v4, mmap_flags_tbl),
9408 v5, v6));
9410 #else
9411 ret = get_errno(target_mmap(arg1, arg2, arg3,
9412 target_to_host_bitmask(arg4, mmap_flags_tbl),
9413 arg5,
9414 arg6));
9415 #endif
9416 break;
9417 #endif
9418 #ifdef TARGET_NR_mmap2
9419 case TARGET_NR_mmap2:
9420 #ifndef MMAP_SHIFT
9421 #define MMAP_SHIFT 12
9422 #endif
9423 ret = get_errno(target_mmap(arg1, arg2, arg3,
9424 target_to_host_bitmask(arg4, mmap_flags_tbl),
9425 arg5,
9426 arg6 << MMAP_SHIFT));
9427 break;
9428 #endif
9429 case TARGET_NR_munmap:
9430 ret = get_errno(target_munmap(arg1, arg2));
9431 break;
9432 case TARGET_NR_mprotect:
9434 TaskState *ts = cpu->opaque;
9435 /* Special hack to detect libc making the stack executable. */
9436 if ((arg3 & PROT_GROWSDOWN)
9437 && arg1 >= ts->info->stack_limit
9438 && arg1 <= ts->info->start_stack) {
9439 arg3 &= ~PROT_GROWSDOWN;
9440 arg2 = arg2 + arg1 - ts->info->stack_limit;
9441 arg1 = ts->info->stack_limit;
9444 ret = get_errno(target_mprotect(arg1, arg2, arg3));
9445 break;
9446 #ifdef TARGET_NR_mremap
9447 case TARGET_NR_mremap:
9448 ret = get_errno(target_mremap(arg1, arg2, arg3, arg4, arg5));
9449 break;
9450 #endif
9451 /* ??? msync/mlock/munlock are broken for softmmu. */
9452 #ifdef TARGET_NR_msync
9453 case TARGET_NR_msync:
9454 ret = get_errno(msync(g2h(arg1), arg2, arg3));
9455 break;
9456 #endif
9457 #ifdef TARGET_NR_mlock
9458 case TARGET_NR_mlock:
9459 ret = get_errno(mlock(g2h(arg1), arg2));
9460 break;
9461 #endif
9462 #ifdef TARGET_NR_munlock
9463 case TARGET_NR_munlock:
9464 ret = get_errno(munlock(g2h(arg1), arg2));
9465 break;
9466 #endif
9467 #ifdef TARGET_NR_mlockall
9468 case TARGET_NR_mlockall:
9469 ret = get_errno(mlockall(target_to_host_mlockall_arg(arg1)));
9470 break;
9471 #endif
9472 #ifdef TARGET_NR_munlockall
9473 case TARGET_NR_munlockall:
9474 ret = get_errno(munlockall());
9475 break;
9476 #endif
9477 case TARGET_NR_truncate:
9478 if (!(p = lock_user_string(arg1)))
9479 goto efault;
9480 ret = get_errno(truncate(p, arg2));
9481 unlock_user(p, arg1, 0);
9482 break;
9483 case TARGET_NR_ftruncate:
9484 ret = get_errno(ftruncate(arg1, arg2));
9485 break;
9486 case TARGET_NR_fchmod:
9487 ret = get_errno(fchmod(arg1, arg2));
9488 break;
9489 #if defined(TARGET_NR_fchmodat)
9490 case TARGET_NR_fchmodat:
9491 if (!(p = lock_user_string(arg2)))
9492 goto efault;
9493 ret = get_errno(fchmodat(arg1, p, arg3, 0));
9494 unlock_user(p, arg2, 0);
9495 break;
9496 #endif
9497 case TARGET_NR_getpriority:
9498 /* Note that negative values are valid for getpriority, so we must
9499 differentiate based on errno settings. */
9500 errno = 0;
9501 ret = getpriority(arg1, arg2);
9502 if (ret == -1 && errno != 0) {
9503 ret = -host_to_target_errno(errno);
9504 break;
9506 #ifdef TARGET_ALPHA
9507 /* Return value is the unbiased priority. Signal no error. */
9508 ((CPUAlphaState *)cpu_env)->ir[IR_V0] = 0;
9509 #else
9510 /* Return value is a biased priority to avoid negative numbers. */
9511 ret = 20 - ret;
9512 #endif
9513 break;
9514 case TARGET_NR_setpriority:
9515 ret = get_errno(setpriority(arg1, arg2, arg3));
9516 break;
9517 #ifdef TARGET_NR_profil
9518 case TARGET_NR_profil:
9519 goto unimplemented;
9520 #endif
9521 case TARGET_NR_statfs:
9522 if (!(p = lock_user_string(arg1)))
9523 goto efault;
9524 ret = get_errno(statfs(path(p), &stfs));
9525 unlock_user(p, arg1, 0);
9526 convert_statfs:
9527 if (!is_error(ret)) {
9528 struct target_statfs *target_stfs;
9530 if (!lock_user_struct(VERIFY_WRITE, target_stfs, arg2, 0))
9531 goto efault;
9532 __put_user(stfs.f_type, &target_stfs->f_type);
9533 __put_user(stfs.f_bsize, &target_stfs->f_bsize);
9534 __put_user(stfs.f_blocks, &target_stfs->f_blocks);
9535 __put_user(stfs.f_bfree, &target_stfs->f_bfree);
9536 __put_user(stfs.f_bavail, &target_stfs->f_bavail);
9537 __put_user(stfs.f_files, &target_stfs->f_files);
9538 __put_user(stfs.f_ffree, &target_stfs->f_ffree);
9539 __put_user(stfs.f_fsid.__val[0], &target_stfs->f_fsid.val[0]);
9540 __put_user(stfs.f_fsid.__val[1], &target_stfs->f_fsid.val[1]);
9541 __put_user(stfs.f_namelen, &target_stfs->f_namelen);
9542 __put_user(stfs.f_frsize, &target_stfs->f_frsize);
9543 memset(target_stfs->f_spare, 0, sizeof(target_stfs->f_spare));
9544 unlock_user_struct(target_stfs, arg2, 1);
9546 break;
9547 case TARGET_NR_fstatfs:
9548 ret = get_errno(fstatfs(arg1, &stfs));
9549 goto convert_statfs;
9550 #ifdef TARGET_NR_statfs64
9551 case TARGET_NR_statfs64:
9552 if (!(p = lock_user_string(arg1)))
9553 goto efault;
9554 ret = get_errno(statfs(path(p), &stfs));
9555 unlock_user(p, arg1, 0);
9556 convert_statfs64:
9557 if (!is_error(ret)) {
9558 struct target_statfs64 *target_stfs;
9560 if (!lock_user_struct(VERIFY_WRITE, target_stfs, arg3, 0))
9561 goto efault;
9562 __put_user(stfs.f_type, &target_stfs->f_type);
9563 __put_user(stfs.f_bsize, &target_stfs->f_bsize);
9564 __put_user(stfs.f_blocks, &target_stfs->f_blocks);
9565 __put_user(stfs.f_bfree, &target_stfs->f_bfree);
9566 __put_user(stfs.f_bavail, &target_stfs->f_bavail);
9567 __put_user(stfs.f_files, &target_stfs->f_files);
9568 __put_user(stfs.f_ffree, &target_stfs->f_ffree);
9569 __put_user(stfs.f_fsid.__val[0], &target_stfs->f_fsid.val[0]);
9570 __put_user(stfs.f_fsid.__val[1], &target_stfs->f_fsid.val[1]);
9571 __put_user(stfs.f_namelen, &target_stfs->f_namelen);
9572 __put_user(stfs.f_frsize, &target_stfs->f_frsize);
9573 memset(target_stfs->f_spare, 0, sizeof(target_stfs->f_spare));
9574 unlock_user_struct(target_stfs, arg3, 1);
9576 break;
9577 case TARGET_NR_fstatfs64:
9578 ret = get_errno(fstatfs(arg1, &stfs));
9579 goto convert_statfs64;
9580 #endif
9581 #ifdef TARGET_NR_ioperm
9582 case TARGET_NR_ioperm:
9583 goto unimplemented;
9584 #endif
9585 #ifdef TARGET_NR_socketcall
9586 case TARGET_NR_socketcall:
9587 ret = do_socketcall(arg1, arg2);
9588 break;
9589 #endif
9590 #ifdef TARGET_NR_accept
9591 case TARGET_NR_accept:
9592 ret = do_accept4(arg1, arg2, arg3, 0);
9593 break;
9594 #endif
9595 #ifdef TARGET_NR_accept4
9596 case TARGET_NR_accept4:
9597 ret = do_accept4(arg1, arg2, arg3, arg4);
9598 break;
9599 #endif
9600 #ifdef TARGET_NR_bind
9601 case TARGET_NR_bind:
9602 ret = do_bind(arg1, arg2, arg3);
9603 break;
9604 #endif
9605 #ifdef TARGET_NR_connect
9606 case TARGET_NR_connect:
9607 ret = do_connect(arg1, arg2, arg3);
9608 break;
9609 #endif
9610 #ifdef TARGET_NR_getpeername
9611 case TARGET_NR_getpeername:
9612 ret = do_getpeername(arg1, arg2, arg3);
9613 break;
9614 #endif
9615 #ifdef TARGET_NR_getsockname
9616 case TARGET_NR_getsockname:
9617 ret = do_getsockname(arg1, arg2, arg3);
9618 break;
9619 #endif
9620 #ifdef TARGET_NR_getsockopt
9621 case TARGET_NR_getsockopt:
9622 ret = do_getsockopt(arg1, arg2, arg3, arg4, arg5);
9623 break;
9624 #endif
9625 #ifdef TARGET_NR_listen
9626 case TARGET_NR_listen:
9627 ret = get_errno(listen(arg1, arg2));
9628 break;
9629 #endif
9630 #ifdef TARGET_NR_recv
9631 case TARGET_NR_recv:
9632 ret = do_recvfrom(arg1, arg2, arg3, arg4, 0, 0);
9633 break;
9634 #endif
9635 #ifdef TARGET_NR_recvfrom
9636 case TARGET_NR_recvfrom:
9637 ret = do_recvfrom(arg1, arg2, arg3, arg4, arg5, arg6);
9638 break;
9639 #endif
9640 #ifdef TARGET_NR_recvmsg
9641 case TARGET_NR_recvmsg:
9642 ret = do_sendrecvmsg(arg1, arg2, arg3, 0);
9643 break;
9644 #endif
9645 #ifdef TARGET_NR_send
9646 case TARGET_NR_send:
9647 ret = do_sendto(arg1, arg2, arg3, arg4, 0, 0);
9648 break;
9649 #endif
9650 #ifdef TARGET_NR_sendmsg
9651 case TARGET_NR_sendmsg:
9652 ret = do_sendrecvmsg(arg1, arg2, arg3, 1);
9653 break;
9654 #endif
9655 #ifdef TARGET_NR_sendmmsg
9656 case TARGET_NR_sendmmsg:
9657 ret = do_sendrecvmmsg(arg1, arg2, arg3, arg4, 1);
9658 break;
9659 case TARGET_NR_recvmmsg:
9660 ret = do_sendrecvmmsg(arg1, arg2, arg3, arg4, 0);
9661 break;
9662 #endif
9663 #ifdef TARGET_NR_sendto
9664 case TARGET_NR_sendto:
9665 ret = do_sendto(arg1, arg2, arg3, arg4, arg5, arg6);
9666 break;
9667 #endif
9668 #ifdef TARGET_NR_shutdown
9669 case TARGET_NR_shutdown:
9670 ret = get_errno(shutdown(arg1, arg2));
9671 break;
9672 #endif
9673 #if defined(TARGET_NR_getrandom) && defined(__NR_getrandom)
9674 case TARGET_NR_getrandom:
9675 p = lock_user(VERIFY_WRITE, arg1, arg2, 0);
9676 if (!p) {
9677 goto efault;
9679 ret = get_errno(getrandom(p, arg2, arg3));
9680 unlock_user(p, arg1, ret);
9681 break;
9682 #endif
9683 #ifdef TARGET_NR_socket
9684 case TARGET_NR_socket:
9685 ret = do_socket(arg1, arg2, arg3);
9686 break;
9687 #endif
9688 #ifdef TARGET_NR_socketpair
9689 case TARGET_NR_socketpair:
9690 ret = do_socketpair(arg1, arg2, arg3, arg4);
9691 break;
9692 #endif
9693 #ifdef TARGET_NR_setsockopt
9694 case TARGET_NR_setsockopt:
9695 ret = do_setsockopt(arg1, arg2, arg3, arg4, (socklen_t) arg5);
9696 break;
9697 #endif
9698 #if defined(TARGET_NR_syslog)
9699 case TARGET_NR_syslog:
9701 int len = arg2;
9703 switch (arg1) {
9704 case TARGET_SYSLOG_ACTION_CLOSE: /* Close log */
9705 case TARGET_SYSLOG_ACTION_OPEN: /* Open log */
9706 case TARGET_SYSLOG_ACTION_CLEAR: /* Clear ring buffer */
9707 case TARGET_SYSLOG_ACTION_CONSOLE_OFF: /* Disable logging */
9708 case TARGET_SYSLOG_ACTION_CONSOLE_ON: /* Enable logging */
9709 case TARGET_SYSLOG_ACTION_CONSOLE_LEVEL: /* Set messages level */
9710 case TARGET_SYSLOG_ACTION_SIZE_UNREAD: /* Number of chars */
9711 case TARGET_SYSLOG_ACTION_SIZE_BUFFER: /* Size of the buffer */
9713 ret = get_errno(sys_syslog((int)arg1, NULL, (int)arg3));
9715 break;
9716 case TARGET_SYSLOG_ACTION_READ: /* Read from log */
9717 case TARGET_SYSLOG_ACTION_READ_CLEAR: /* Read/clear msgs */
9718 case TARGET_SYSLOG_ACTION_READ_ALL: /* Read last messages */
9720 ret = -TARGET_EINVAL;
9721 if (len < 0) {
9722 goto fail;
9724 ret = 0;
9725 if (len == 0) {
9726 break;
9728 p = lock_user(VERIFY_WRITE, arg2, arg3, 0);
9729 if (!p) {
9730 ret = -TARGET_EFAULT;
9731 goto fail;
9733 ret = get_errno(sys_syslog((int)arg1, p, (int)arg3));
9734 unlock_user(p, arg2, arg3);
9736 break;
9737 default:
9738 ret = -EINVAL;
9739 break;
9742 break;
9743 #endif
9744 case TARGET_NR_setitimer:
9746 struct itimerval value, ovalue, *pvalue;
9748 if (arg2) {
9749 pvalue = &value;
9750 if (copy_from_user_timeval(&pvalue->it_interval, arg2)
9751 || copy_from_user_timeval(&pvalue->it_value,
9752 arg2 + sizeof(struct target_timeval)))
9753 goto efault;
9754 } else {
9755 pvalue = NULL;
9757 ret = get_errno(setitimer(arg1, pvalue, &ovalue));
9758 if (!is_error(ret) && arg3) {
9759 if (copy_to_user_timeval(arg3,
9760 &ovalue.it_interval)
9761 || copy_to_user_timeval(arg3 + sizeof(struct target_timeval),
9762 &ovalue.it_value))
9763 goto efault;
9766 break;
9767 case TARGET_NR_getitimer:
9769 struct itimerval value;
9771 ret = get_errno(getitimer(arg1, &value));
9772 if (!is_error(ret) && arg2) {
9773 if (copy_to_user_timeval(arg2,
9774 &value.it_interval)
9775 || copy_to_user_timeval(arg2 + sizeof(struct target_timeval),
9776 &value.it_value))
9777 goto efault;
9780 break;
9781 #ifdef TARGET_NR_stat
9782 case TARGET_NR_stat:
9783 if (!(p = lock_user_string(arg1)))
9784 goto efault;
9785 ret = get_errno(stat(path(p), &st));
9786 unlock_user(p, arg1, 0);
9787 goto do_stat;
9788 #endif
9789 #ifdef TARGET_NR_lstat
9790 case TARGET_NR_lstat:
9791 if (!(p = lock_user_string(arg1)))
9792 goto efault;
9793 ret = get_errno(lstat(path(p), &st));
9794 unlock_user(p, arg1, 0);
9795 goto do_stat;
9796 #endif
9797 case TARGET_NR_fstat:
9799 ret = get_errno(fstat(arg1, &st));
9800 #if defined(TARGET_NR_stat) || defined(TARGET_NR_lstat)
9801 do_stat:
9802 #endif
9803 if (!is_error(ret)) {
9804 struct target_stat *target_st;
9806 if (!lock_user_struct(VERIFY_WRITE, target_st, arg2, 0))
9807 goto efault;
9808 memset(target_st, 0, sizeof(*target_st));
9809 __put_user(st.st_dev, &target_st->st_dev);
9810 __put_user(st.st_ino, &target_st->st_ino);
9811 __put_user(st.st_mode, &target_st->st_mode);
9812 __put_user(st.st_uid, &target_st->st_uid);
9813 __put_user(st.st_gid, &target_st->st_gid);
9814 __put_user(st.st_nlink, &target_st->st_nlink);
9815 __put_user(st.st_rdev, &target_st->st_rdev);
9816 __put_user(st.st_size, &target_st->st_size);
9817 __put_user(st.st_blksize, &target_st->st_blksize);
9818 __put_user(st.st_blocks, &target_st->st_blocks);
9819 __put_user(st.st_atime, &target_st->target_st_atime);
9820 __put_user(st.st_mtime, &target_st->target_st_mtime);
9821 __put_user(st.st_ctime, &target_st->target_st_ctime);
9822 unlock_user_struct(target_st, arg2, 1);
9825 break;
9826 #ifdef TARGET_NR_olduname
9827 case TARGET_NR_olduname:
9828 goto unimplemented;
9829 #endif
9830 #ifdef TARGET_NR_iopl
9831 case TARGET_NR_iopl:
9832 goto unimplemented;
9833 #endif
9834 case TARGET_NR_vhangup:
9835 ret = get_errno(vhangup());
9836 break;
9837 #ifdef TARGET_NR_idle
9838 case TARGET_NR_idle:
9839 goto unimplemented;
9840 #endif
9841 #ifdef TARGET_NR_syscall
9842 case TARGET_NR_syscall:
9843 ret = do_syscall(cpu_env, arg1 & 0xffff, arg2, arg3, arg4, arg5,
9844 arg6, arg7, arg8, 0);
9845 break;
9846 #endif
9847 case TARGET_NR_wait4:
9849 int status;
9850 abi_long status_ptr = arg2;
9851 struct rusage rusage, *rusage_ptr;
9852 abi_ulong target_rusage = arg4;
9853 abi_long rusage_err;
9854 if (target_rusage)
9855 rusage_ptr = &rusage;
9856 else
9857 rusage_ptr = NULL;
9858 ret = get_errno(safe_wait4(arg1, &status, arg3, rusage_ptr));
9859 if (!is_error(ret)) {
9860 if (status_ptr && ret) {
9861 status = host_to_target_waitstatus(status);
9862 if (put_user_s32(status, status_ptr))
9863 goto efault;
9865 if (target_rusage) {
9866 rusage_err = host_to_target_rusage(target_rusage, &rusage);
9867 if (rusage_err) {
9868 ret = rusage_err;
9873 break;
9874 #ifdef TARGET_NR_swapoff
9875 case TARGET_NR_swapoff:
9876 if (!(p = lock_user_string(arg1)))
9877 goto efault;
9878 ret = get_errno(swapoff(p));
9879 unlock_user(p, arg1, 0);
9880 break;
9881 #endif
9882 case TARGET_NR_sysinfo:
9884 struct target_sysinfo *target_value;
9885 struct sysinfo value;
9886 ret = get_errno(sysinfo(&value));
9887 if (!is_error(ret) && arg1)
9889 if (!lock_user_struct(VERIFY_WRITE, target_value, arg1, 0))
9890 goto efault;
9891 __put_user(value.uptime, &target_value->uptime);
9892 __put_user(value.loads[0], &target_value->loads[0]);
9893 __put_user(value.loads[1], &target_value->loads[1]);
9894 __put_user(value.loads[2], &target_value->loads[2]);
9895 __put_user(value.totalram, &target_value->totalram);
9896 __put_user(value.freeram, &target_value->freeram);
9897 __put_user(value.sharedram, &target_value->sharedram);
9898 __put_user(value.bufferram, &target_value->bufferram);
9899 __put_user(value.totalswap, &target_value->totalswap);
9900 __put_user(value.freeswap, &target_value->freeswap);
9901 __put_user(value.procs, &target_value->procs);
9902 __put_user(value.totalhigh, &target_value->totalhigh);
9903 __put_user(value.freehigh, &target_value->freehigh);
9904 __put_user(value.mem_unit, &target_value->mem_unit);
9905 unlock_user_struct(target_value, arg1, 1);
9908 break;
9909 #ifdef TARGET_NR_ipc
9910 case TARGET_NR_ipc:
9911 ret = do_ipc(cpu_env, arg1, arg2, arg3, arg4, arg5, arg6);
9912 break;
9913 #endif
9914 #ifdef TARGET_NR_semget
9915 case TARGET_NR_semget:
9916 ret = get_errno(semget(arg1, arg2, arg3));
9917 break;
9918 #endif
9919 #ifdef TARGET_NR_semop
9920 case TARGET_NR_semop:
9921 ret = do_semop(arg1, arg2, arg3);
9922 break;
9923 #endif
9924 #ifdef TARGET_NR_semctl
9925 case TARGET_NR_semctl:
9926 ret = do_semctl(arg1, arg2, arg3, arg4);
9927 break;
9928 #endif
9929 #ifdef TARGET_NR_msgctl
9930 case TARGET_NR_msgctl:
9931 ret = do_msgctl(arg1, arg2, arg3);
9932 break;
9933 #endif
9934 #ifdef TARGET_NR_msgget
9935 case TARGET_NR_msgget:
9936 ret = get_errno(msgget(arg1, arg2));
9937 break;
9938 #endif
9939 #ifdef TARGET_NR_msgrcv
9940 case TARGET_NR_msgrcv:
9941 ret = do_msgrcv(arg1, arg2, arg3, arg4, arg5);
9942 break;
9943 #endif
9944 #ifdef TARGET_NR_msgsnd
9945 case TARGET_NR_msgsnd:
9946 ret = do_msgsnd(arg1, arg2, arg3, arg4);
9947 break;
9948 #endif
9949 #ifdef TARGET_NR_shmget
9950 case TARGET_NR_shmget:
9951 ret = get_errno(shmget(arg1, arg2, arg3));
9952 break;
9953 #endif
9954 #ifdef TARGET_NR_shmctl
9955 case TARGET_NR_shmctl:
9956 ret = do_shmctl(arg1, arg2, arg3);
9957 break;
9958 #endif
9959 #ifdef TARGET_NR_shmat
9960 case TARGET_NR_shmat:
9961 ret = do_shmat(cpu_env, arg1, arg2, arg3);
9962 break;
9963 #endif
9964 #ifdef TARGET_NR_shmdt
9965 case TARGET_NR_shmdt:
9966 ret = do_shmdt(arg1);
9967 break;
9968 #endif
9969 case TARGET_NR_fsync:
9970 ret = get_errno(fsync(arg1));
9971 break;
9972 case TARGET_NR_clone:
9973 /* Linux manages to have three different orderings for its
9974 * arguments to clone(); the BACKWARDS and BACKWARDS2 defines
9975 * match the kernel's CONFIG_CLONE_* settings.
9976 * Microblaze is further special in that it uses a sixth
9977 * implicit argument to clone for the TLS pointer.
9979 #if defined(TARGET_MICROBLAZE)
9980 ret = get_errno(do_fork(cpu_env, arg1, arg2, arg4, arg6, arg5));
9981 #elif defined(TARGET_CLONE_BACKWARDS)
9982 ret = get_errno(do_fork(cpu_env, arg1, arg2, arg3, arg4, arg5));
9983 #elif defined(TARGET_CLONE_BACKWARDS2)
9984 ret = get_errno(do_fork(cpu_env, arg2, arg1, arg3, arg5, arg4));
9985 #else
9986 ret = get_errno(do_fork(cpu_env, arg1, arg2, arg3, arg5, arg4));
9987 #endif
9988 break;
9989 #ifdef __NR_exit_group
9990 /* new thread calls */
9991 case TARGET_NR_exit_group:
9992 #ifdef TARGET_GPROF
9993 _mcleanup();
9994 #endif
9995 gdb_exit(cpu_env, arg1);
9996 ret = get_errno(exit_group(arg1));
9997 break;
9998 #endif
9999 case TARGET_NR_setdomainname:
10000 if (!(p = lock_user_string(arg1)))
10001 goto efault;
10002 ret = get_errno(setdomainname(p, arg2));
10003 unlock_user(p, arg1, 0);
10004 break;
10005 case TARGET_NR_uname:
10006 /* no need to transcode because we use the linux syscall */
10008 struct new_utsname * buf;
10010 if (!lock_user_struct(VERIFY_WRITE, buf, arg1, 0))
10011 goto efault;
10012 ret = get_errno(sys_uname(buf));
10013 if (!is_error(ret)) {
10014 /* Overwrite the native machine name with whatever is being
10015 emulated. */
10016 strcpy (buf->machine, cpu_to_uname_machine(cpu_env));
10017 /* Allow the user to override the reported release. */
10018 if (qemu_uname_release && *qemu_uname_release) {
10019 g_strlcpy(buf->release, qemu_uname_release,
10020 sizeof(buf->release));
10023 unlock_user_struct(buf, arg1, 1);
10025 break;
10026 #ifdef TARGET_I386
10027 case TARGET_NR_modify_ldt:
10028 ret = do_modify_ldt(cpu_env, arg1, arg2, arg3);
10029 break;
10030 #if !defined(TARGET_X86_64)
10031 case TARGET_NR_vm86old:
10032 goto unimplemented;
10033 case TARGET_NR_vm86:
10034 ret = do_vm86(cpu_env, arg1, arg2);
10035 break;
10036 #endif
10037 #endif
10038 case TARGET_NR_adjtimex:
10040 struct timex host_buf;
10042 if (target_to_host_timex(&host_buf, arg1) != 0) {
10043 goto efault;
10045 ret = get_errno(adjtimex(&host_buf));
10046 if (!is_error(ret)) {
10047 if (host_to_target_timex(arg1, &host_buf) != 0) {
10048 goto efault;
10052 break;
10053 #if defined(TARGET_NR_clock_adjtime) && defined(CONFIG_CLOCK_ADJTIME)
10054 case TARGET_NR_clock_adjtime:
10056 struct timex htx, *phtx = &htx;
10058 if (target_to_host_timex(phtx, arg2) != 0) {
10059 goto efault;
10061 ret = get_errno(clock_adjtime(arg1, phtx));
10062 if (!is_error(ret) && phtx) {
10063 if (host_to_target_timex(arg2, phtx) != 0) {
10064 goto efault;
10068 break;
10069 #endif
10070 #ifdef TARGET_NR_create_module
10071 case TARGET_NR_create_module:
10072 #endif
10073 case TARGET_NR_init_module:
10074 case TARGET_NR_delete_module:
10075 #ifdef TARGET_NR_get_kernel_syms
10076 case TARGET_NR_get_kernel_syms:
10077 #endif
10078 goto unimplemented;
10079 case TARGET_NR_quotactl:
10080 goto unimplemented;
10081 case TARGET_NR_getpgid:
10082 ret = get_errno(getpgid(arg1));
10083 break;
10084 case TARGET_NR_fchdir:
10085 ret = get_errno(fchdir(arg1));
10086 break;
10087 #ifdef TARGET_NR_bdflush /* not on x86_64 */
10088 case TARGET_NR_bdflush:
10089 goto unimplemented;
10090 #endif
10091 #ifdef TARGET_NR_sysfs
10092 case TARGET_NR_sysfs:
10093 goto unimplemented;
10094 #endif
10095 case TARGET_NR_personality:
10096 ret = get_errno(personality(arg1));
10097 break;
10098 #ifdef TARGET_NR_afs_syscall
10099 case TARGET_NR_afs_syscall:
10100 goto unimplemented;
10101 #endif
10102 #ifdef TARGET_NR__llseek /* Not on alpha */
10103 case TARGET_NR__llseek:
10105 int64_t res;
10106 #if !defined(__NR_llseek)
10107 res = lseek(arg1, ((uint64_t)arg2 << 32) | (abi_ulong)arg3, arg5);
10108 if (res == -1) {
10109 ret = get_errno(res);
10110 } else {
10111 ret = 0;
10113 #else
10114 ret = get_errno(_llseek(arg1, arg2, arg3, &res, arg5));
10115 #endif
10116 if ((ret == 0) && put_user_s64(res, arg4)) {
10117 goto efault;
10120 break;
10121 #endif
10122 #ifdef TARGET_NR_getdents
10123 case TARGET_NR_getdents:
10124 #ifdef __NR_getdents
10125 #if TARGET_ABI_BITS == 32 && HOST_LONG_BITS == 64
10127 struct target_dirent *target_dirp;
10128 struct linux_dirent *dirp;
10129 abi_long count = arg3;
10131 dirp = g_try_malloc(count);
10132 if (!dirp) {
10133 ret = -TARGET_ENOMEM;
10134 goto fail;
10137 ret = get_errno(sys_getdents(arg1, dirp, count));
10138 if (!is_error(ret)) {
10139 struct linux_dirent *de;
10140 struct target_dirent *tde;
10141 int len = ret;
10142 int reclen, treclen;
10143 int count1, tnamelen;
10145 count1 = 0;
10146 de = dirp;
10147 if (!(target_dirp = lock_user(VERIFY_WRITE, arg2, count, 0)))
10148 goto efault;
10149 tde = target_dirp;
10150 while (len > 0) {
10151 reclen = de->d_reclen;
10152 tnamelen = reclen - offsetof(struct linux_dirent, d_name);
10153 assert(tnamelen >= 0);
10154 treclen = tnamelen + offsetof(struct target_dirent, d_name);
10155 assert(count1 + treclen <= count);
10156 tde->d_reclen = tswap16(treclen);
10157 tde->d_ino = tswapal(de->d_ino);
10158 tde->d_off = tswapal(de->d_off);
10159 memcpy(tde->d_name, de->d_name, tnamelen);
10160 de = (struct linux_dirent *)((char *)de + reclen);
10161 len -= reclen;
10162 tde = (struct target_dirent *)((char *)tde + treclen);
10163 count1 += treclen;
10165 ret = count1;
10166 unlock_user(target_dirp, arg2, ret);
10168 g_free(dirp);
10170 #else
10172 struct linux_dirent *dirp;
10173 abi_long count = arg3;
10175 if (!(dirp = lock_user(VERIFY_WRITE, arg2, count, 0)))
10176 goto efault;
10177 ret = get_errno(sys_getdents(arg1, dirp, count));
10178 if (!is_error(ret)) {
10179 struct linux_dirent *de;
10180 int len = ret;
10181 int reclen;
10182 de = dirp;
10183 while (len > 0) {
10184 reclen = de->d_reclen;
10185 if (reclen > len)
10186 break;
10187 de->d_reclen = tswap16(reclen);
10188 tswapls(&de->d_ino);
10189 tswapls(&de->d_off);
10190 de = (struct linux_dirent *)((char *)de + reclen);
10191 len -= reclen;
10194 unlock_user(dirp, arg2, ret);
10196 #endif
10197 #else
10198 /* Implement getdents in terms of getdents64 */
10200 struct linux_dirent64 *dirp;
10201 abi_long count = arg3;
10203 dirp = lock_user(VERIFY_WRITE, arg2, count, 0);
10204 if (!dirp) {
10205 goto efault;
10207 ret = get_errno(sys_getdents64(arg1, dirp, count));
10208 if (!is_error(ret)) {
10209 /* Convert the dirent64 structs to target dirent. We do this
10210 * in-place, since we can guarantee that a target_dirent is no
10211 * larger than a dirent64; however this means we have to be
10212 * careful to read everything before writing in the new format.
10214 struct linux_dirent64 *de;
10215 struct target_dirent *tde;
10216 int len = ret;
10217 int tlen = 0;
10219 de = dirp;
10220 tde = (struct target_dirent *)dirp;
10221 while (len > 0) {
10222 int namelen, treclen;
10223 int reclen = de->d_reclen;
10224 uint64_t ino = de->d_ino;
10225 int64_t off = de->d_off;
10226 uint8_t type = de->d_type;
10228 namelen = strlen(de->d_name);
10229 treclen = offsetof(struct target_dirent, d_name)
10230 + namelen + 2;
10231 treclen = QEMU_ALIGN_UP(treclen, sizeof(abi_long));
10233 memmove(tde->d_name, de->d_name, namelen + 1);
10234 tde->d_ino = tswapal(ino);
10235 tde->d_off = tswapal(off);
10236 tde->d_reclen = tswap16(treclen);
10237 /* The target_dirent type is in what was formerly a padding
10238 * byte at the end of the structure:
10240 *(((char *)tde) + treclen - 1) = type;
10242 de = (struct linux_dirent64 *)((char *)de + reclen);
10243 tde = (struct target_dirent *)((char *)tde + treclen);
10244 len -= reclen;
10245 tlen += treclen;
10247 ret = tlen;
10249 unlock_user(dirp, arg2, ret);
10251 #endif
10252 break;
10253 #endif /* TARGET_NR_getdents */
10254 #if defined(TARGET_NR_getdents64) && defined(__NR_getdents64)
10255 case TARGET_NR_getdents64:
10257 struct linux_dirent64 *dirp;
10258 abi_long count = arg3;
10259 if (!(dirp = lock_user(VERIFY_WRITE, arg2, count, 0)))
10260 goto efault;
10261 ret = get_errno(sys_getdents64(arg1, dirp, count));
10262 if (!is_error(ret)) {
10263 struct linux_dirent64 *de;
10264 int len = ret;
10265 int reclen;
10266 de = dirp;
10267 while (len > 0) {
10268 reclen = de->d_reclen;
10269 if (reclen > len)
10270 break;
10271 de->d_reclen = tswap16(reclen);
10272 tswap64s((uint64_t *)&de->d_ino);
10273 tswap64s((uint64_t *)&de->d_off);
10274 de = (struct linux_dirent64 *)((char *)de + reclen);
10275 len -= reclen;
10278 unlock_user(dirp, arg2, ret);
10280 break;
10281 #endif /* TARGET_NR_getdents64 */
10282 #if defined(TARGET_NR__newselect)
10283 case TARGET_NR__newselect:
10284 ret = do_select(arg1, arg2, arg3, arg4, arg5);
10285 break;
10286 #endif
10287 #if defined(TARGET_NR_poll) || defined(TARGET_NR_ppoll)
10288 # ifdef TARGET_NR_poll
10289 case TARGET_NR_poll:
10290 # endif
10291 # ifdef TARGET_NR_ppoll
10292 case TARGET_NR_ppoll:
10293 # endif
10295 struct target_pollfd *target_pfd;
10296 unsigned int nfds = arg2;
10297 struct pollfd *pfd;
10298 unsigned int i;
10300 pfd = NULL;
10301 target_pfd = NULL;
10302 if (nfds) {
10303 if (nfds > (INT_MAX / sizeof(struct target_pollfd))) {
10304 ret = -TARGET_EINVAL;
10305 break;
10308 target_pfd = lock_user(VERIFY_WRITE, arg1,
10309 sizeof(struct target_pollfd) * nfds, 1);
10310 if (!target_pfd) {
10311 goto efault;
10314 pfd = alloca(sizeof(struct pollfd) * nfds);
10315 for (i = 0; i < nfds; i++) {
10316 pfd[i].fd = tswap32(target_pfd[i].fd);
10317 pfd[i].events = tswap16(target_pfd[i].events);
10321 switch (num) {
10322 # ifdef TARGET_NR_ppoll
10323 case TARGET_NR_ppoll:
10325 struct timespec _timeout_ts, *timeout_ts = &_timeout_ts;
10326 target_sigset_t *target_set;
10327 sigset_t _set, *set = &_set;
10329 if (arg3) {
10330 if (target_to_host_timespec(timeout_ts, arg3)) {
10331 unlock_user(target_pfd, arg1, 0);
10332 goto efault;
10334 } else {
10335 timeout_ts = NULL;
10338 if (arg4) {
10339 if (arg5 != sizeof(target_sigset_t)) {
10340 unlock_user(target_pfd, arg1, 0);
10341 ret = -TARGET_EINVAL;
10342 break;
10345 target_set = lock_user(VERIFY_READ, arg4, sizeof(target_sigset_t), 1);
10346 if (!target_set) {
10347 unlock_user(target_pfd, arg1, 0);
10348 goto efault;
10350 target_to_host_sigset(set, target_set);
10351 } else {
10352 set = NULL;
10355 ret = get_errno(safe_ppoll(pfd, nfds, timeout_ts,
10356 set, SIGSET_T_SIZE));
10358 if (!is_error(ret) && arg3) {
10359 host_to_target_timespec(arg3, timeout_ts);
10361 if (arg4) {
10362 unlock_user(target_set, arg4, 0);
10364 break;
10366 # endif
10367 # ifdef TARGET_NR_poll
10368 case TARGET_NR_poll:
10370 struct timespec ts, *pts;
10372 if (arg3 >= 0) {
10373 /* Convert ms to secs, ns */
10374 ts.tv_sec = arg3 / 1000;
10375 ts.tv_nsec = (arg3 % 1000) * 1000000LL;
10376 pts = &ts;
10377 } else {
10378 /* -ve poll() timeout means "infinite" */
10379 pts = NULL;
10381 ret = get_errno(safe_ppoll(pfd, nfds, pts, NULL, 0));
10382 break;
10384 # endif
10385 default:
10386 g_assert_not_reached();
10389 if (!is_error(ret)) {
10390 for(i = 0; i < nfds; i++) {
10391 target_pfd[i].revents = tswap16(pfd[i].revents);
10394 unlock_user(target_pfd, arg1, sizeof(struct target_pollfd) * nfds);
10396 break;
10397 #endif
10398 case TARGET_NR_flock:
10399 /* NOTE: the flock constant seems to be the same for every
10400 Linux platform */
10401 ret = get_errno(safe_flock(arg1, arg2));
10402 break;
10403 case TARGET_NR_readv:
10405 struct iovec *vec = lock_iovec(VERIFY_WRITE, arg2, arg3, 0);
10406 if (vec != NULL) {
10407 ret = get_errno(safe_readv(arg1, vec, arg3));
10408 unlock_iovec(vec, arg2, arg3, 1);
10409 } else {
10410 ret = -host_to_target_errno(errno);
10413 break;
10414 case TARGET_NR_writev:
10416 struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1);
10417 if (vec != NULL) {
10418 ret = get_errno(safe_writev(arg1, vec, arg3));
10419 unlock_iovec(vec, arg2, arg3, 0);
10420 } else {
10421 ret = -host_to_target_errno(errno);
10424 break;
10425 #if defined(TARGET_NR_preadv)
10426 case TARGET_NR_preadv:
10428 struct iovec *vec = lock_iovec(VERIFY_WRITE, arg2, arg3, 0);
10429 if (vec != NULL) {
10430 ret = get_errno(safe_preadv(arg1, vec, arg3, arg4, arg5));
10431 unlock_iovec(vec, arg2, arg3, 1);
10432 } else {
10433 ret = -host_to_target_errno(errno);
10436 break;
10437 #endif
10438 #if defined(TARGET_NR_pwritev)
10439 case TARGET_NR_pwritev:
10441 struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1);
10442 if (vec != NULL) {
10443 ret = get_errno(safe_pwritev(arg1, vec, arg3, arg4, arg5));
10444 unlock_iovec(vec, arg2, arg3, 0);
10445 } else {
10446 ret = -host_to_target_errno(errno);
10449 break;
10450 #endif
10451 case TARGET_NR_getsid:
10452 ret = get_errno(getsid(arg1));
10453 break;
10454 #if defined(TARGET_NR_fdatasync) /* Not on alpha (osf_datasync ?) */
10455 case TARGET_NR_fdatasync:
10456 ret = get_errno(fdatasync(arg1));
10457 break;
10458 #endif
10459 #ifdef TARGET_NR__sysctl
10460 case TARGET_NR__sysctl:
10461 /* We don't implement this, but ENOTDIR is always a safe
10462 return value. */
10463 ret = -TARGET_ENOTDIR;
10464 break;
10465 #endif
10466 case TARGET_NR_sched_getaffinity:
10468 unsigned int mask_size;
10469 unsigned long *mask;
10472 * sched_getaffinity needs multiples of ulong, so need to take
10473 * care of mismatches between target ulong and host ulong sizes.
10475 if (arg2 & (sizeof(abi_ulong) - 1)) {
10476 ret = -TARGET_EINVAL;
10477 break;
10479 mask_size = (arg2 + (sizeof(*mask) - 1)) & ~(sizeof(*mask) - 1);
10481 mask = alloca(mask_size);
10482 memset(mask, 0, mask_size);
10483 ret = get_errno(sys_sched_getaffinity(arg1, mask_size, mask));
10485 if (!is_error(ret)) {
10486 if (ret > arg2) {
10487 /* More data returned than the caller's buffer will fit.
10488 * This only happens if sizeof(abi_long) < sizeof(long)
10489 * and the caller passed us a buffer holding an odd number
10490 * of abi_longs. If the host kernel is actually using the
10491 * extra 4 bytes then fail EINVAL; otherwise we can just
10492 * ignore them and only copy the interesting part.
10494 int numcpus = sysconf(_SC_NPROCESSORS_CONF);
10495 if (numcpus > arg2 * 8) {
10496 ret = -TARGET_EINVAL;
10497 break;
10499 ret = arg2;
10502 ret = host_to_target_cpu_mask(mask, mask_size, arg3, arg2);
10505 break;
10506 case TARGET_NR_sched_setaffinity:
10508 unsigned int mask_size;
10509 unsigned long *mask;
10512 * sched_setaffinity needs multiples of ulong, so need to take
10513 * care of mismatches between target ulong and host ulong sizes.
10515 if (arg2 & (sizeof(abi_ulong) - 1)) {
10516 ret = -TARGET_EINVAL;
10517 break;
10519 mask_size = (arg2 + (sizeof(*mask) - 1)) & ~(sizeof(*mask) - 1);
10520 mask = alloca(mask_size);
10522 ret = target_to_host_cpu_mask(mask, mask_size, arg3, arg2);
10523 if (ret) {
10524 break;
10527 ret = get_errno(sys_sched_setaffinity(arg1, mask_size, mask));
10529 break;
10530 case TARGET_NR_getcpu:
10532 unsigned cpu, node;
10533 ret = get_errno(sys_getcpu(arg1 ? &cpu : NULL,
10534 arg2 ? &node : NULL,
10535 NULL));
10536 if (is_error(ret)) {
10537 goto fail;
10539 if (arg1 && put_user_u32(cpu, arg1)) {
10540 goto efault;
10542 if (arg2 && put_user_u32(node, arg2)) {
10543 goto efault;
10546 break;
10547 case TARGET_NR_sched_setparam:
10549 struct sched_param *target_schp;
10550 struct sched_param schp;
10552 if (arg2 == 0) {
10553 return -TARGET_EINVAL;
10555 if (!lock_user_struct(VERIFY_READ, target_schp, arg2, 1))
10556 goto efault;
10557 schp.sched_priority = tswap32(target_schp->sched_priority);
10558 unlock_user_struct(target_schp, arg2, 0);
10559 ret = get_errno(sched_setparam(arg1, &schp));
10561 break;
10562 case TARGET_NR_sched_getparam:
10564 struct sched_param *target_schp;
10565 struct sched_param schp;
10567 if (arg2 == 0) {
10568 return -TARGET_EINVAL;
10570 ret = get_errno(sched_getparam(arg1, &schp));
10571 if (!is_error(ret)) {
10572 if (!lock_user_struct(VERIFY_WRITE, target_schp, arg2, 0))
10573 goto efault;
10574 target_schp->sched_priority = tswap32(schp.sched_priority);
10575 unlock_user_struct(target_schp, arg2, 1);
10578 break;
10579 case TARGET_NR_sched_setscheduler:
10581 struct sched_param *target_schp;
10582 struct sched_param schp;
10583 if (arg3 == 0) {
10584 return -TARGET_EINVAL;
10586 if (!lock_user_struct(VERIFY_READ, target_schp, arg3, 1))
10587 goto efault;
10588 schp.sched_priority = tswap32(target_schp->sched_priority);
10589 unlock_user_struct(target_schp, arg3, 0);
10590 ret = get_errno(sched_setscheduler(arg1, arg2, &schp));
10592 break;
10593 case TARGET_NR_sched_getscheduler:
10594 ret = get_errno(sched_getscheduler(arg1));
10595 break;
10596 case TARGET_NR_sched_yield:
10597 ret = get_errno(sched_yield());
10598 break;
10599 case TARGET_NR_sched_get_priority_max:
10600 ret = get_errno(sched_get_priority_max(arg1));
10601 break;
10602 case TARGET_NR_sched_get_priority_min:
10603 ret = get_errno(sched_get_priority_min(arg1));
10604 break;
10605 case TARGET_NR_sched_rr_get_interval:
10607 struct timespec ts;
10608 ret = get_errno(sched_rr_get_interval(arg1, &ts));
10609 if (!is_error(ret)) {
10610 ret = host_to_target_timespec(arg2, &ts);
10613 break;
10614 case TARGET_NR_nanosleep:
10616 struct timespec req, rem;
10617 target_to_host_timespec(&req, arg1);
10618 ret = get_errno(safe_nanosleep(&req, &rem));
10619 if (is_error(ret) && arg2) {
10620 host_to_target_timespec(arg2, &rem);
10623 break;
10624 #ifdef TARGET_NR_query_module
10625 case TARGET_NR_query_module:
10626 goto unimplemented;
10627 #endif
10628 #ifdef TARGET_NR_nfsservctl
10629 case TARGET_NR_nfsservctl:
10630 goto unimplemented;
10631 #endif
10632 case TARGET_NR_prctl:
10633 switch (arg1) {
10634 case PR_GET_PDEATHSIG:
10636 int deathsig;
10637 ret = get_errno(prctl(arg1, &deathsig, arg3, arg4, arg5));
10638 if (!is_error(ret) && arg2
10639 && put_user_ual(deathsig, arg2)) {
10640 goto efault;
10642 break;
10644 #ifdef PR_GET_NAME
10645 case PR_GET_NAME:
10647 void *name = lock_user(VERIFY_WRITE, arg2, 16, 1);
10648 if (!name) {
10649 goto efault;
10651 ret = get_errno(prctl(arg1, (unsigned long)name,
10652 arg3, arg4, arg5));
10653 unlock_user(name, arg2, 16);
10654 break;
10656 case PR_SET_NAME:
10658 void *name = lock_user(VERIFY_READ, arg2, 16, 1);
10659 if (!name) {
10660 goto efault;
10662 ret = get_errno(prctl(arg1, (unsigned long)name,
10663 arg3, arg4, arg5));
10664 unlock_user(name, arg2, 0);
10665 break;
10667 #endif
10668 case PR_GET_SECCOMP:
10669 case PR_SET_SECCOMP:
10670 /* Disable seccomp to prevent the target disabling syscalls we
10671 * need. */
10672 ret = -TARGET_EINVAL;
10673 break;
10674 default:
10675 /* Most prctl options have no pointer arguments */
10676 ret = get_errno(prctl(arg1, arg2, arg3, arg4, arg5));
10677 break;
10679 break;
10680 #ifdef TARGET_NR_arch_prctl
10681 case TARGET_NR_arch_prctl:
10682 #if defined(TARGET_I386) && !defined(TARGET_ABI32)
10683 ret = do_arch_prctl(cpu_env, arg1, arg2);
10684 break;
10685 #else
10686 goto unimplemented;
10687 #endif
10688 #endif
10689 #ifdef TARGET_NR_pread64
10690 case TARGET_NR_pread64:
10691 if (regpairs_aligned(cpu_env, num)) {
10692 arg4 = arg5;
10693 arg5 = arg6;
10695 if (!(p = lock_user(VERIFY_WRITE, arg2, arg3, 0)))
10696 goto efault;
10697 ret = get_errno(pread64(arg1, p, arg3, target_offset64(arg4, arg5)));
10698 unlock_user(p, arg2, ret);
10699 break;
10700 case TARGET_NR_pwrite64:
10701 if (regpairs_aligned(cpu_env, num)) {
10702 arg4 = arg5;
10703 arg5 = arg6;
10705 if (!(p = lock_user(VERIFY_READ, arg2, arg3, 1)))
10706 goto efault;
10707 ret = get_errno(pwrite64(arg1, p, arg3, target_offset64(arg4, arg5)));
10708 unlock_user(p, arg2, 0);
10709 break;
10710 #endif
10711 case TARGET_NR_getcwd:
10712 if (!(p = lock_user(VERIFY_WRITE, arg1, arg2, 0)))
10713 goto efault;
10714 ret = get_errno(sys_getcwd1(p, arg2));
10715 unlock_user(p, arg1, ret);
10716 break;
10717 case TARGET_NR_capget:
10718 case TARGET_NR_capset:
10720 struct target_user_cap_header *target_header;
10721 struct target_user_cap_data *target_data = NULL;
10722 struct __user_cap_header_struct header;
10723 struct __user_cap_data_struct data[2];
10724 struct __user_cap_data_struct *dataptr = NULL;
10725 int i, target_datalen;
10726 int data_items = 1;
10728 if (!lock_user_struct(VERIFY_WRITE, target_header, arg1, 1)) {
10729 goto efault;
10731 header.version = tswap32(target_header->version);
10732 header.pid = tswap32(target_header->pid);
10734 if (header.version != _LINUX_CAPABILITY_VERSION) {
10735 /* Version 2 and up takes pointer to two user_data structs */
10736 data_items = 2;
10739 target_datalen = sizeof(*target_data) * data_items;
10741 if (arg2) {
10742 if (num == TARGET_NR_capget) {
10743 target_data = lock_user(VERIFY_WRITE, arg2, target_datalen, 0);
10744 } else {
10745 target_data = lock_user(VERIFY_READ, arg2, target_datalen, 1);
10747 if (!target_data) {
10748 unlock_user_struct(target_header, arg1, 0);
10749 goto efault;
10752 if (num == TARGET_NR_capset) {
10753 for (i = 0; i < data_items; i++) {
10754 data[i].effective = tswap32(target_data[i].effective);
10755 data[i].permitted = tswap32(target_data[i].permitted);
10756 data[i].inheritable = tswap32(target_data[i].inheritable);
10760 dataptr = data;
10763 if (num == TARGET_NR_capget) {
10764 ret = get_errno(capget(&header, dataptr));
10765 } else {
10766 ret = get_errno(capset(&header, dataptr));
10769 /* The kernel always updates version for both capget and capset */
10770 target_header->version = tswap32(header.version);
10771 unlock_user_struct(target_header, arg1, 1);
10773 if (arg2) {
10774 if (num == TARGET_NR_capget) {
10775 for (i = 0; i < data_items; i++) {
10776 target_data[i].effective = tswap32(data[i].effective);
10777 target_data[i].permitted = tswap32(data[i].permitted);
10778 target_data[i].inheritable = tswap32(data[i].inheritable);
10780 unlock_user(target_data, arg2, target_datalen);
10781 } else {
10782 unlock_user(target_data, arg2, 0);
10785 break;
10787 case TARGET_NR_sigaltstack:
10788 ret = do_sigaltstack(arg1, arg2, get_sp_from_cpustate((CPUArchState *)cpu_env));
10789 break;
10791 #ifdef CONFIG_SENDFILE
10792 case TARGET_NR_sendfile:
10794 off_t *offp = NULL;
10795 off_t off;
10796 if (arg3) {
10797 ret = get_user_sal(off, arg3);
10798 if (is_error(ret)) {
10799 break;
10801 offp = &off;
10803 ret = get_errno(sendfile(arg1, arg2, offp, arg4));
10804 if (!is_error(ret) && arg3) {
10805 abi_long ret2 = put_user_sal(off, arg3);
10806 if (is_error(ret2)) {
10807 ret = ret2;
10810 break;
10812 #ifdef TARGET_NR_sendfile64
10813 case TARGET_NR_sendfile64:
10815 off_t *offp = NULL;
10816 off_t off;
10817 if (arg3) {
10818 ret = get_user_s64(off, arg3);
10819 if (is_error(ret)) {
10820 break;
10822 offp = &off;
10824 ret = get_errno(sendfile(arg1, arg2, offp, arg4));
10825 if (!is_error(ret) && arg3) {
10826 abi_long ret2 = put_user_s64(off, arg3);
10827 if (is_error(ret2)) {
10828 ret = ret2;
10831 break;
10833 #endif
10834 #else
10835 case TARGET_NR_sendfile:
10836 #ifdef TARGET_NR_sendfile64
10837 case TARGET_NR_sendfile64:
10838 #endif
10839 goto unimplemented;
10840 #endif
10842 #ifdef TARGET_NR_getpmsg
10843 case TARGET_NR_getpmsg:
10844 goto unimplemented;
10845 #endif
10846 #ifdef TARGET_NR_putpmsg
10847 case TARGET_NR_putpmsg:
10848 goto unimplemented;
10849 #endif
10850 #ifdef TARGET_NR_vfork
10851 case TARGET_NR_vfork:
10852 ret = get_errno(do_fork(cpu_env,
10853 CLONE_VFORK | CLONE_VM | TARGET_SIGCHLD,
10854 0, 0, 0, 0));
10855 break;
10856 #endif
10857 #ifdef TARGET_NR_ugetrlimit
10858 case TARGET_NR_ugetrlimit:
10860 struct rlimit rlim;
10861 int resource = target_to_host_resource(arg1);
10862 ret = get_errno(getrlimit(resource, &rlim));
10863 if (!is_error(ret)) {
10864 struct target_rlimit *target_rlim;
10865 if (!lock_user_struct(VERIFY_WRITE, target_rlim, arg2, 0))
10866 goto efault;
10867 target_rlim->rlim_cur = host_to_target_rlim(rlim.rlim_cur);
10868 target_rlim->rlim_max = host_to_target_rlim(rlim.rlim_max);
10869 unlock_user_struct(target_rlim, arg2, 1);
10871 break;
10873 #endif
10874 #ifdef TARGET_NR_truncate64
10875 case TARGET_NR_truncate64:
10876 if (!(p = lock_user_string(arg1)))
10877 goto efault;
10878 ret = target_truncate64(cpu_env, p, arg2, arg3, arg4);
10879 unlock_user(p, arg1, 0);
10880 break;
10881 #endif
10882 #ifdef TARGET_NR_ftruncate64
10883 case TARGET_NR_ftruncate64:
10884 ret = target_ftruncate64(cpu_env, arg1, arg2, arg3, arg4);
10885 break;
10886 #endif
10887 #ifdef TARGET_NR_stat64
10888 case TARGET_NR_stat64:
10889 if (!(p = lock_user_string(arg1)))
10890 goto efault;
10891 ret = get_errno(stat(path(p), &st));
10892 unlock_user(p, arg1, 0);
10893 if (!is_error(ret))
10894 ret = host_to_target_stat64(cpu_env, arg2, &st);
10895 break;
10896 #endif
10897 #ifdef TARGET_NR_lstat64
10898 case TARGET_NR_lstat64:
10899 if (!(p = lock_user_string(arg1)))
10900 goto efault;
10901 ret = get_errno(lstat(path(p), &st));
10902 unlock_user(p, arg1, 0);
10903 if (!is_error(ret))
10904 ret = host_to_target_stat64(cpu_env, arg2, &st);
10905 break;
10906 #endif
10907 #ifdef TARGET_NR_fstat64
10908 case TARGET_NR_fstat64:
10909 ret = get_errno(fstat(arg1, &st));
10910 if (!is_error(ret))
10911 ret = host_to_target_stat64(cpu_env, arg2, &st);
10912 break;
10913 #endif
10914 #if (defined(TARGET_NR_fstatat64) || defined(TARGET_NR_newfstatat))
10915 #ifdef TARGET_NR_fstatat64
10916 case TARGET_NR_fstatat64:
10917 #endif
10918 #ifdef TARGET_NR_newfstatat
10919 case TARGET_NR_newfstatat:
10920 #endif
10921 if (!(p = lock_user_string(arg2)))
10922 goto efault;
10923 ret = get_errno(fstatat(arg1, path(p), &st, arg4));
10924 if (!is_error(ret))
10925 ret = host_to_target_stat64(cpu_env, arg3, &st);
10926 break;
10927 #endif
10928 #ifdef TARGET_NR_lchown
10929 case TARGET_NR_lchown:
10930 if (!(p = lock_user_string(arg1)))
10931 goto efault;
10932 ret = get_errno(lchown(p, low2highuid(arg2), low2highgid(arg3)));
10933 unlock_user(p, arg1, 0);
10934 break;
10935 #endif
10936 #ifdef TARGET_NR_getuid
10937 case TARGET_NR_getuid:
10938 ret = get_errno(high2lowuid(getuid()));
10939 break;
10940 #endif
10941 #ifdef TARGET_NR_getgid
10942 case TARGET_NR_getgid:
10943 ret = get_errno(high2lowgid(getgid()));
10944 break;
10945 #endif
10946 #ifdef TARGET_NR_geteuid
10947 case TARGET_NR_geteuid:
10948 ret = get_errno(high2lowuid(geteuid()));
10949 break;
10950 #endif
10951 #ifdef TARGET_NR_getegid
10952 case TARGET_NR_getegid:
10953 ret = get_errno(high2lowgid(getegid()));
10954 break;
10955 #endif
10956 case TARGET_NR_setreuid:
10957 ret = get_errno(setreuid(low2highuid(arg1), low2highuid(arg2)));
10958 break;
10959 case TARGET_NR_setregid:
10960 ret = get_errno(setregid(low2highgid(arg1), low2highgid(arg2)));
10961 break;
10962 case TARGET_NR_getgroups:
10964 int gidsetsize = arg1;
10965 target_id *target_grouplist;
10966 gid_t *grouplist;
10967 int i;
10969 grouplist = alloca(gidsetsize * sizeof(gid_t));
10970 ret = get_errno(getgroups(gidsetsize, grouplist));
10971 if (gidsetsize == 0)
10972 break;
10973 if (!is_error(ret)) {
10974 target_grouplist = lock_user(VERIFY_WRITE, arg2, gidsetsize * sizeof(target_id), 0);
10975 if (!target_grouplist)
10976 goto efault;
10977 for(i = 0;i < ret; i++)
10978 target_grouplist[i] = tswapid(high2lowgid(grouplist[i]));
10979 unlock_user(target_grouplist, arg2, gidsetsize * sizeof(target_id));
10982 break;
10983 case TARGET_NR_setgroups:
10985 int gidsetsize = arg1;
10986 target_id *target_grouplist;
10987 gid_t *grouplist = NULL;
10988 int i;
10989 if (gidsetsize) {
10990 grouplist = alloca(gidsetsize * sizeof(gid_t));
10991 target_grouplist = lock_user(VERIFY_READ, arg2, gidsetsize * sizeof(target_id), 1);
10992 if (!target_grouplist) {
10993 ret = -TARGET_EFAULT;
10994 goto fail;
10996 for (i = 0; i < gidsetsize; i++) {
10997 grouplist[i] = low2highgid(tswapid(target_grouplist[i]));
10999 unlock_user(target_grouplist, arg2, 0);
11001 ret = get_errno(setgroups(gidsetsize, grouplist));
11003 break;
11004 case TARGET_NR_fchown:
11005 ret = get_errno(fchown(arg1, low2highuid(arg2), low2highgid(arg3)));
11006 break;
11007 #if defined(TARGET_NR_fchownat)
11008 case TARGET_NR_fchownat:
11009 if (!(p = lock_user_string(arg2)))
11010 goto efault;
11011 ret = get_errno(fchownat(arg1, p, low2highuid(arg3),
11012 low2highgid(arg4), arg5));
11013 unlock_user(p, arg2, 0);
11014 break;
11015 #endif
11016 #ifdef TARGET_NR_setresuid
11017 case TARGET_NR_setresuid:
11018 ret = get_errno(sys_setresuid(low2highuid(arg1),
11019 low2highuid(arg2),
11020 low2highuid(arg3)));
11021 break;
11022 #endif
11023 #ifdef TARGET_NR_getresuid
11024 case TARGET_NR_getresuid:
11026 uid_t ruid, euid, suid;
11027 ret = get_errno(getresuid(&ruid, &euid, &suid));
11028 if (!is_error(ret)) {
11029 if (put_user_id(high2lowuid(ruid), arg1)
11030 || put_user_id(high2lowuid(euid), arg2)
11031 || put_user_id(high2lowuid(suid), arg3))
11032 goto efault;
11035 break;
11036 #endif
11037 #ifdef TARGET_NR_getresgid
11038 case TARGET_NR_setresgid:
11039 ret = get_errno(sys_setresgid(low2highgid(arg1),
11040 low2highgid(arg2),
11041 low2highgid(arg3)));
11042 break;
11043 #endif
11044 #ifdef TARGET_NR_getresgid
11045 case TARGET_NR_getresgid:
11047 gid_t rgid, egid, sgid;
11048 ret = get_errno(getresgid(&rgid, &egid, &sgid));
11049 if (!is_error(ret)) {
11050 if (put_user_id(high2lowgid(rgid), arg1)
11051 || put_user_id(high2lowgid(egid), arg2)
11052 || put_user_id(high2lowgid(sgid), arg3))
11053 goto efault;
11056 break;
11057 #endif
11058 #ifdef TARGET_NR_chown
11059 case TARGET_NR_chown:
11060 if (!(p = lock_user_string(arg1)))
11061 goto efault;
11062 ret = get_errno(chown(p, low2highuid(arg2), low2highgid(arg3)));
11063 unlock_user(p, arg1, 0);
11064 break;
11065 #endif
11066 case TARGET_NR_setuid:
11067 ret = get_errno(sys_setuid(low2highuid(arg1)));
11068 break;
11069 case TARGET_NR_setgid:
11070 ret = get_errno(sys_setgid(low2highgid(arg1)));
11071 break;
11072 case TARGET_NR_setfsuid:
11073 ret = get_errno(setfsuid(arg1));
11074 break;
11075 case TARGET_NR_setfsgid:
11076 ret = get_errno(setfsgid(arg1));
11077 break;
11079 #ifdef TARGET_NR_lchown32
11080 case TARGET_NR_lchown32:
11081 if (!(p = lock_user_string(arg1)))
11082 goto efault;
11083 ret = get_errno(lchown(p, arg2, arg3));
11084 unlock_user(p, arg1, 0);
11085 break;
11086 #endif
11087 #ifdef TARGET_NR_getuid32
11088 case TARGET_NR_getuid32:
11089 ret = get_errno(getuid());
11090 break;
11091 #endif
11093 #if defined(TARGET_NR_getxuid) && defined(TARGET_ALPHA)
11094 /* Alpha specific */
11095 case TARGET_NR_getxuid:
11097 uid_t euid;
11098 euid=geteuid();
11099 ((CPUAlphaState *)cpu_env)->ir[IR_A4]=euid;
11101 ret = get_errno(getuid());
11102 break;
11103 #endif
11104 #if defined(TARGET_NR_getxgid) && defined(TARGET_ALPHA)
11105 /* Alpha specific */
11106 case TARGET_NR_getxgid:
11108 uid_t egid;
11109 egid=getegid();
11110 ((CPUAlphaState *)cpu_env)->ir[IR_A4]=egid;
11112 ret = get_errno(getgid());
11113 break;
11114 #endif
11115 #if defined(TARGET_NR_osf_getsysinfo) && defined(TARGET_ALPHA)
11116 /* Alpha specific */
11117 case TARGET_NR_osf_getsysinfo:
11118 ret = -TARGET_EOPNOTSUPP;
11119 switch (arg1) {
11120 case TARGET_GSI_IEEE_FP_CONTROL:
11122 uint64_t swcr, fpcr = cpu_alpha_load_fpcr (cpu_env);
11124 /* Copied from linux ieee_fpcr_to_swcr. */
11125 swcr = (fpcr >> 35) & SWCR_STATUS_MASK;
11126 swcr |= (fpcr >> 36) & SWCR_MAP_DMZ;
11127 swcr |= (~fpcr >> 48) & (SWCR_TRAP_ENABLE_INV
11128 | SWCR_TRAP_ENABLE_DZE
11129 | SWCR_TRAP_ENABLE_OVF);
11130 swcr |= (~fpcr >> 57) & (SWCR_TRAP_ENABLE_UNF
11131 | SWCR_TRAP_ENABLE_INE);
11132 swcr |= (fpcr >> 47) & SWCR_MAP_UMZ;
11133 swcr |= (~fpcr >> 41) & SWCR_TRAP_ENABLE_DNO;
11135 if (put_user_u64 (swcr, arg2))
11136 goto efault;
11137 ret = 0;
11139 break;
11141 /* case GSI_IEEE_STATE_AT_SIGNAL:
11142 -- Not implemented in linux kernel.
11143 case GSI_UACPROC:
11144 -- Retrieves current unaligned access state; not much used.
11145 case GSI_PROC_TYPE:
11146 -- Retrieves implver information; surely not used.
11147 case GSI_GET_HWRPB:
11148 -- Grabs a copy of the HWRPB; surely not used.
11151 break;
11152 #endif
11153 #if defined(TARGET_NR_osf_setsysinfo) && defined(TARGET_ALPHA)
11154 /* Alpha specific */
11155 case TARGET_NR_osf_setsysinfo:
11156 ret = -TARGET_EOPNOTSUPP;
11157 switch (arg1) {
11158 case TARGET_SSI_IEEE_FP_CONTROL:
11160 uint64_t swcr, fpcr, orig_fpcr;
11162 if (get_user_u64 (swcr, arg2)) {
11163 goto efault;
11165 orig_fpcr = cpu_alpha_load_fpcr(cpu_env);
11166 fpcr = orig_fpcr & FPCR_DYN_MASK;
11168 /* Copied from linux ieee_swcr_to_fpcr. */
11169 fpcr |= (swcr & SWCR_STATUS_MASK) << 35;
11170 fpcr |= (swcr & SWCR_MAP_DMZ) << 36;
11171 fpcr |= (~swcr & (SWCR_TRAP_ENABLE_INV
11172 | SWCR_TRAP_ENABLE_DZE
11173 | SWCR_TRAP_ENABLE_OVF)) << 48;
11174 fpcr |= (~swcr & (SWCR_TRAP_ENABLE_UNF
11175 | SWCR_TRAP_ENABLE_INE)) << 57;
11176 fpcr |= (swcr & SWCR_MAP_UMZ ? FPCR_UNDZ | FPCR_UNFD : 0);
11177 fpcr |= (~swcr & SWCR_TRAP_ENABLE_DNO) << 41;
11179 cpu_alpha_store_fpcr(cpu_env, fpcr);
11180 ret = 0;
11182 break;
11184 case TARGET_SSI_IEEE_RAISE_EXCEPTION:
11186 uint64_t exc, fpcr, orig_fpcr;
11187 int si_code;
11189 if (get_user_u64(exc, arg2)) {
11190 goto efault;
11193 orig_fpcr = cpu_alpha_load_fpcr(cpu_env);
11195 /* We only add to the exception status here. */
11196 fpcr = orig_fpcr | ((exc & SWCR_STATUS_MASK) << 35);
11198 cpu_alpha_store_fpcr(cpu_env, fpcr);
11199 ret = 0;
11201 /* Old exceptions are not signaled. */
11202 fpcr &= ~(orig_fpcr & FPCR_STATUS_MASK);
11204 /* If any exceptions set by this call,
11205 and are unmasked, send a signal. */
11206 si_code = 0;
11207 if ((fpcr & (FPCR_INE | FPCR_INED)) == FPCR_INE) {
11208 si_code = TARGET_FPE_FLTRES;
11210 if ((fpcr & (FPCR_UNF | FPCR_UNFD)) == FPCR_UNF) {
11211 si_code = TARGET_FPE_FLTUND;
11213 if ((fpcr & (FPCR_OVF | FPCR_OVFD)) == FPCR_OVF) {
11214 si_code = TARGET_FPE_FLTOVF;
11216 if ((fpcr & (FPCR_DZE | FPCR_DZED)) == FPCR_DZE) {
11217 si_code = TARGET_FPE_FLTDIV;
11219 if ((fpcr & (FPCR_INV | FPCR_INVD)) == FPCR_INV) {
11220 si_code = TARGET_FPE_FLTINV;
11222 if (si_code != 0) {
11223 target_siginfo_t info;
11224 info.si_signo = SIGFPE;
11225 info.si_errno = 0;
11226 info.si_code = si_code;
11227 info._sifields._sigfault._addr
11228 = ((CPUArchState *)cpu_env)->pc;
11229 queue_signal((CPUArchState *)cpu_env, info.si_signo,
11230 QEMU_SI_FAULT, &info);
11233 break;
11235 /* case SSI_NVPAIRS:
11236 -- Used with SSIN_UACPROC to enable unaligned accesses.
11237 case SSI_IEEE_STATE_AT_SIGNAL:
11238 case SSI_IEEE_IGNORE_STATE_AT_SIGNAL:
11239 -- Not implemented in linux kernel
11242 break;
11243 #endif
11244 #ifdef TARGET_NR_osf_sigprocmask
11245 /* Alpha specific. */
11246 case TARGET_NR_osf_sigprocmask:
11248 abi_ulong mask;
11249 int how;
11250 sigset_t set, oldset;
11252 switch(arg1) {
11253 case TARGET_SIG_BLOCK:
11254 how = SIG_BLOCK;
11255 break;
11256 case TARGET_SIG_UNBLOCK:
11257 how = SIG_UNBLOCK;
11258 break;
11259 case TARGET_SIG_SETMASK:
11260 how = SIG_SETMASK;
11261 break;
11262 default:
11263 ret = -TARGET_EINVAL;
11264 goto fail;
11266 mask = arg2;
11267 target_to_host_old_sigset(&set, &mask);
11268 ret = do_sigprocmask(how, &set, &oldset);
11269 if (!ret) {
11270 host_to_target_old_sigset(&mask, &oldset);
11271 ret = mask;
11274 break;
11275 #endif
11277 #ifdef TARGET_NR_getgid32
11278 case TARGET_NR_getgid32:
11279 ret = get_errno(getgid());
11280 break;
11281 #endif
11282 #ifdef TARGET_NR_geteuid32
11283 case TARGET_NR_geteuid32:
11284 ret = get_errno(geteuid());
11285 break;
11286 #endif
11287 #ifdef TARGET_NR_getegid32
11288 case TARGET_NR_getegid32:
11289 ret = get_errno(getegid());
11290 break;
11291 #endif
11292 #ifdef TARGET_NR_setreuid32
11293 case TARGET_NR_setreuid32:
11294 ret = get_errno(setreuid(arg1, arg2));
11295 break;
11296 #endif
11297 #ifdef TARGET_NR_setregid32
11298 case TARGET_NR_setregid32:
11299 ret = get_errno(setregid(arg1, arg2));
11300 break;
11301 #endif
11302 #ifdef TARGET_NR_getgroups32
11303 case TARGET_NR_getgroups32:
11305 int gidsetsize = arg1;
11306 uint32_t *target_grouplist;
11307 gid_t *grouplist;
11308 int i;
11310 grouplist = alloca(gidsetsize * sizeof(gid_t));
11311 ret = get_errno(getgroups(gidsetsize, grouplist));
11312 if (gidsetsize == 0)
11313 break;
11314 if (!is_error(ret)) {
11315 target_grouplist = lock_user(VERIFY_WRITE, arg2, gidsetsize * 4, 0);
11316 if (!target_grouplist) {
11317 ret = -TARGET_EFAULT;
11318 goto fail;
11320 for(i = 0;i < ret; i++)
11321 target_grouplist[i] = tswap32(grouplist[i]);
11322 unlock_user(target_grouplist, arg2, gidsetsize * 4);
11325 break;
11326 #endif
11327 #ifdef TARGET_NR_setgroups32
11328 case TARGET_NR_setgroups32:
11330 int gidsetsize = arg1;
11331 uint32_t *target_grouplist;
11332 gid_t *grouplist;
11333 int i;
11335 grouplist = alloca(gidsetsize * sizeof(gid_t));
11336 target_grouplist = lock_user(VERIFY_READ, arg2, gidsetsize * 4, 1);
11337 if (!target_grouplist) {
11338 ret = -TARGET_EFAULT;
11339 goto fail;
11341 for(i = 0;i < gidsetsize; i++)
11342 grouplist[i] = tswap32(target_grouplist[i]);
11343 unlock_user(target_grouplist, arg2, 0);
11344 ret = get_errno(setgroups(gidsetsize, grouplist));
11346 break;
11347 #endif
11348 #ifdef TARGET_NR_fchown32
11349 case TARGET_NR_fchown32:
11350 ret = get_errno(fchown(arg1, arg2, arg3));
11351 break;
11352 #endif
11353 #ifdef TARGET_NR_setresuid32
11354 case TARGET_NR_setresuid32:
11355 ret = get_errno(sys_setresuid(arg1, arg2, arg3));
11356 break;
11357 #endif
11358 #ifdef TARGET_NR_getresuid32
11359 case TARGET_NR_getresuid32:
11361 uid_t ruid, euid, suid;
11362 ret = get_errno(getresuid(&ruid, &euid, &suid));
11363 if (!is_error(ret)) {
11364 if (put_user_u32(ruid, arg1)
11365 || put_user_u32(euid, arg2)
11366 || put_user_u32(suid, arg3))
11367 goto efault;
11370 break;
11371 #endif
11372 #ifdef TARGET_NR_setresgid32
11373 case TARGET_NR_setresgid32:
11374 ret = get_errno(sys_setresgid(arg1, arg2, arg3));
11375 break;
11376 #endif
11377 #ifdef TARGET_NR_getresgid32
11378 case TARGET_NR_getresgid32:
11380 gid_t rgid, egid, sgid;
11381 ret = get_errno(getresgid(&rgid, &egid, &sgid));
11382 if (!is_error(ret)) {
11383 if (put_user_u32(rgid, arg1)
11384 || put_user_u32(egid, arg2)
11385 || put_user_u32(sgid, arg3))
11386 goto efault;
11389 break;
11390 #endif
11391 #ifdef TARGET_NR_chown32
11392 case TARGET_NR_chown32:
11393 if (!(p = lock_user_string(arg1)))
11394 goto efault;
11395 ret = get_errno(chown(p, arg2, arg3));
11396 unlock_user(p, arg1, 0);
11397 break;
11398 #endif
11399 #ifdef TARGET_NR_setuid32
11400 case TARGET_NR_setuid32:
11401 ret = get_errno(sys_setuid(arg1));
11402 break;
11403 #endif
11404 #ifdef TARGET_NR_setgid32
11405 case TARGET_NR_setgid32:
11406 ret = get_errno(sys_setgid(arg1));
11407 break;
11408 #endif
11409 #ifdef TARGET_NR_setfsuid32
11410 case TARGET_NR_setfsuid32:
11411 ret = get_errno(setfsuid(arg1));
11412 break;
11413 #endif
11414 #ifdef TARGET_NR_setfsgid32
11415 case TARGET_NR_setfsgid32:
11416 ret = get_errno(setfsgid(arg1));
11417 break;
11418 #endif
11420 case TARGET_NR_pivot_root:
11421 goto unimplemented;
11422 #ifdef TARGET_NR_mincore
11423 case TARGET_NR_mincore:
11425 void *a;
11426 ret = -TARGET_ENOMEM;
11427 a = lock_user(VERIFY_READ, arg1, arg2, 0);
11428 if (!a) {
11429 goto fail;
11431 ret = -TARGET_EFAULT;
11432 p = lock_user_string(arg3);
11433 if (!p) {
11434 goto mincore_fail;
11436 ret = get_errno(mincore(a, arg2, p));
11437 unlock_user(p, arg3, ret);
11438 mincore_fail:
11439 unlock_user(a, arg1, 0);
11441 break;
11442 #endif
11443 #ifdef TARGET_NR_arm_fadvise64_64
11444 case TARGET_NR_arm_fadvise64_64:
11445 /* arm_fadvise64_64 looks like fadvise64_64 but
11446 * with different argument order: fd, advice, offset, len
11447 * rather than the usual fd, offset, len, advice.
11448 * Note that offset and len are both 64-bit so appear as
11449 * pairs of 32-bit registers.
11451 ret = posix_fadvise(arg1, target_offset64(arg3, arg4),
11452 target_offset64(arg5, arg6), arg2);
11453 ret = -host_to_target_errno(ret);
11454 break;
11455 #endif
11457 #if TARGET_ABI_BITS == 32
11459 #ifdef TARGET_NR_fadvise64_64
11460 case TARGET_NR_fadvise64_64:
11461 #if defined(TARGET_PPC)
11462 /* 6 args: fd, advice, offset (high, low), len (high, low) */
11463 ret = arg2;
11464 arg2 = arg3;
11465 arg3 = arg4;
11466 arg4 = arg5;
11467 arg5 = arg6;
11468 arg6 = ret;
11469 #else
11470 /* 6 args: fd, offset (high, low), len (high, low), advice */
11471 if (regpairs_aligned(cpu_env, num)) {
11472 /* offset is in (3,4), len in (5,6) and advice in 7 */
11473 arg2 = arg3;
11474 arg3 = arg4;
11475 arg4 = arg5;
11476 arg5 = arg6;
11477 arg6 = arg7;
11479 #endif
11480 ret = -host_to_target_errno(posix_fadvise(arg1,
11481 target_offset64(arg2, arg3),
11482 target_offset64(arg4, arg5),
11483 arg6));
11484 break;
11485 #endif
11487 #ifdef TARGET_NR_fadvise64
11488 case TARGET_NR_fadvise64:
11489 /* 5 args: fd, offset (high, low), len, advice */
11490 if (regpairs_aligned(cpu_env, num)) {
11491 /* offset is in (3,4), len in 5 and advice in 6 */
11492 arg2 = arg3;
11493 arg3 = arg4;
11494 arg4 = arg5;
11495 arg5 = arg6;
11497 ret = -host_to_target_errno(posix_fadvise(arg1,
11498 target_offset64(arg2, arg3),
11499 arg4, arg5));
11500 break;
11501 #endif
11503 #else /* not a 32-bit ABI */
11504 #if defined(TARGET_NR_fadvise64_64) || defined(TARGET_NR_fadvise64)
11505 #ifdef TARGET_NR_fadvise64_64
11506 case TARGET_NR_fadvise64_64:
11507 #endif
11508 #ifdef TARGET_NR_fadvise64
11509 case TARGET_NR_fadvise64:
11510 #endif
11511 #ifdef TARGET_S390X
11512 switch (arg4) {
11513 case 4: arg4 = POSIX_FADV_NOREUSE + 1; break; /* make sure it's an invalid value */
11514 case 5: arg4 = POSIX_FADV_NOREUSE + 2; break; /* ditto */
11515 case 6: arg4 = POSIX_FADV_DONTNEED; break;
11516 case 7: arg4 = POSIX_FADV_NOREUSE; break;
11517 default: break;
11519 #endif
11520 ret = -host_to_target_errno(posix_fadvise(arg1, arg2, arg3, arg4));
11521 break;
11522 #endif
11523 #endif /* end of 64-bit ABI fadvise handling */
11525 #ifdef TARGET_NR_madvise
11526 case TARGET_NR_madvise:
11527 /* A straight passthrough may not be safe because qemu sometimes
11528 turns private file-backed mappings into anonymous mappings.
11529 This will break MADV_DONTNEED.
11530 This is a hint, so ignoring and returning success is ok. */
11531 ret = get_errno(0);
11532 break;
11533 #endif
11534 #if TARGET_ABI_BITS == 32
11535 case TARGET_NR_fcntl64:
11537 int cmd;
11538 struct flock64 fl;
11539 from_flock64_fn *copyfrom = copy_from_user_flock64;
11540 to_flock64_fn *copyto = copy_to_user_flock64;
11542 #ifdef TARGET_ARM
11543 if (((CPUARMState *)cpu_env)->eabi) {
11544 copyfrom = copy_from_user_eabi_flock64;
11545 copyto = copy_to_user_eabi_flock64;
11547 #endif
11549 cmd = target_to_host_fcntl_cmd(arg2);
11550 if (cmd == -TARGET_EINVAL) {
11551 ret = cmd;
11552 break;
11555 switch(arg2) {
11556 case TARGET_F_GETLK64:
11557 ret = copyfrom(&fl, arg3);
11558 if (ret) {
11559 break;
11561 ret = get_errno(fcntl(arg1, cmd, &fl));
11562 if (ret == 0) {
11563 ret = copyto(arg3, &fl);
11565 break;
11567 case TARGET_F_SETLK64:
11568 case TARGET_F_SETLKW64:
11569 ret = copyfrom(&fl, arg3);
11570 if (ret) {
11571 break;
11573 ret = get_errno(safe_fcntl(arg1, cmd, &fl));
11574 break;
11575 default:
11576 ret = do_fcntl(arg1, arg2, arg3);
11577 break;
11579 break;
11581 #endif
11582 #ifdef TARGET_NR_cacheflush
11583 case TARGET_NR_cacheflush:
11584 /* self-modifying code is handled automatically, so nothing needed */
11585 ret = 0;
11586 break;
11587 #endif
11588 #ifdef TARGET_NR_security
11589 case TARGET_NR_security:
11590 goto unimplemented;
11591 #endif
11592 #ifdef TARGET_NR_getpagesize
11593 case TARGET_NR_getpagesize:
11594 ret = TARGET_PAGE_SIZE;
11595 break;
11596 #endif
11597 case TARGET_NR_gettid:
11598 ret = get_errno(gettid());
11599 break;
11600 #ifdef TARGET_NR_readahead
11601 case TARGET_NR_readahead:
11602 #if TARGET_ABI_BITS == 32
11603 if (regpairs_aligned(cpu_env, num)) {
11604 arg2 = arg3;
11605 arg3 = arg4;
11606 arg4 = arg5;
11608 ret = get_errno(readahead(arg1, target_offset64(arg2, arg3) , arg4));
11609 #else
11610 ret = get_errno(readahead(arg1, arg2, arg3));
11611 #endif
11612 break;
11613 #endif
11614 #ifdef CONFIG_ATTR
11615 #ifdef TARGET_NR_setxattr
11616 case TARGET_NR_listxattr:
11617 case TARGET_NR_llistxattr:
11619 void *p, *b = 0;
11620 if (arg2) {
11621 b = lock_user(VERIFY_WRITE, arg2, arg3, 0);
11622 if (!b) {
11623 ret = -TARGET_EFAULT;
11624 break;
11627 p = lock_user_string(arg1);
11628 if (p) {
11629 if (num == TARGET_NR_listxattr) {
11630 ret = get_errno(listxattr(p, b, arg3));
11631 } else {
11632 ret = get_errno(llistxattr(p, b, arg3));
11634 } else {
11635 ret = -TARGET_EFAULT;
11637 unlock_user(p, arg1, 0);
11638 unlock_user(b, arg2, arg3);
11639 break;
11641 case TARGET_NR_flistxattr:
11643 void *b = 0;
11644 if (arg2) {
11645 b = lock_user(VERIFY_WRITE, arg2, arg3, 0);
11646 if (!b) {
11647 ret = -TARGET_EFAULT;
11648 break;
11651 ret = get_errno(flistxattr(arg1, b, arg3));
11652 unlock_user(b, arg2, arg3);
11653 break;
11655 case TARGET_NR_setxattr:
11656 case TARGET_NR_lsetxattr:
11658 void *p, *n, *v = 0;
11659 if (arg3) {
11660 v = lock_user(VERIFY_READ, arg3, arg4, 1);
11661 if (!v) {
11662 ret = -TARGET_EFAULT;
11663 break;
11666 p = lock_user_string(arg1);
11667 n = lock_user_string(arg2);
11668 if (p && n) {
11669 if (num == TARGET_NR_setxattr) {
11670 ret = get_errno(setxattr(p, n, v, arg4, arg5));
11671 } else {
11672 ret = get_errno(lsetxattr(p, n, v, arg4, arg5));
11674 } else {
11675 ret = -TARGET_EFAULT;
11677 unlock_user(p, arg1, 0);
11678 unlock_user(n, arg2, 0);
11679 unlock_user(v, arg3, 0);
11681 break;
11682 case TARGET_NR_fsetxattr:
11684 void *n, *v = 0;
11685 if (arg3) {
11686 v = lock_user(VERIFY_READ, arg3, arg4, 1);
11687 if (!v) {
11688 ret = -TARGET_EFAULT;
11689 break;
11692 n = lock_user_string(arg2);
11693 if (n) {
11694 ret = get_errno(fsetxattr(arg1, n, v, arg4, arg5));
11695 } else {
11696 ret = -TARGET_EFAULT;
11698 unlock_user(n, arg2, 0);
11699 unlock_user(v, arg3, 0);
11701 break;
11702 case TARGET_NR_getxattr:
11703 case TARGET_NR_lgetxattr:
11705 void *p, *n, *v = 0;
11706 if (arg3) {
11707 v = lock_user(VERIFY_WRITE, arg3, arg4, 0);
11708 if (!v) {
11709 ret = -TARGET_EFAULT;
11710 break;
11713 p = lock_user_string(arg1);
11714 n = lock_user_string(arg2);
11715 if (p && n) {
11716 if (num == TARGET_NR_getxattr) {
11717 ret = get_errno(getxattr(p, n, v, arg4));
11718 } else {
11719 ret = get_errno(lgetxattr(p, n, v, arg4));
11721 } else {
11722 ret = -TARGET_EFAULT;
11724 unlock_user(p, arg1, 0);
11725 unlock_user(n, arg2, 0);
11726 unlock_user(v, arg3, arg4);
11728 break;
11729 case TARGET_NR_fgetxattr:
11731 void *n, *v = 0;
11732 if (arg3) {
11733 v = lock_user(VERIFY_WRITE, arg3, arg4, 0);
11734 if (!v) {
11735 ret = -TARGET_EFAULT;
11736 break;
11739 n = lock_user_string(arg2);
11740 if (n) {
11741 ret = get_errno(fgetxattr(arg1, n, v, arg4));
11742 } else {
11743 ret = -TARGET_EFAULT;
11745 unlock_user(n, arg2, 0);
11746 unlock_user(v, arg3, arg4);
11748 break;
11749 case TARGET_NR_removexattr:
11750 case TARGET_NR_lremovexattr:
11752 void *p, *n;
11753 p = lock_user_string(arg1);
11754 n = lock_user_string(arg2);
11755 if (p && n) {
11756 if (num == TARGET_NR_removexattr) {
11757 ret = get_errno(removexattr(p, n));
11758 } else {
11759 ret = get_errno(lremovexattr(p, n));
11761 } else {
11762 ret = -TARGET_EFAULT;
11764 unlock_user(p, arg1, 0);
11765 unlock_user(n, arg2, 0);
11767 break;
11768 case TARGET_NR_fremovexattr:
11770 void *n;
11771 n = lock_user_string(arg2);
11772 if (n) {
11773 ret = get_errno(fremovexattr(arg1, n));
11774 } else {
11775 ret = -TARGET_EFAULT;
11777 unlock_user(n, arg2, 0);
11779 break;
11780 #endif
11781 #endif /* CONFIG_ATTR */
11782 #ifdef TARGET_NR_set_thread_area
11783 case TARGET_NR_set_thread_area:
11784 #if defined(TARGET_MIPS)
11785 ((CPUMIPSState *) cpu_env)->active_tc.CP0_UserLocal = arg1;
11786 ret = 0;
11787 break;
11788 #elif defined(TARGET_CRIS)
11789 if (arg1 & 0xff)
11790 ret = -TARGET_EINVAL;
11791 else {
11792 ((CPUCRISState *) cpu_env)->pregs[PR_PID] = arg1;
11793 ret = 0;
11795 break;
11796 #elif defined(TARGET_I386) && defined(TARGET_ABI32)
11797 ret = do_set_thread_area(cpu_env, arg1);
11798 break;
11799 #elif defined(TARGET_M68K)
11801 TaskState *ts = cpu->opaque;
11802 ts->tp_value = arg1;
11803 ret = 0;
11804 break;
11806 #else
11807 goto unimplemented_nowarn;
11808 #endif
11809 #endif
11810 #ifdef TARGET_NR_get_thread_area
11811 case TARGET_NR_get_thread_area:
11812 #if defined(TARGET_I386) && defined(TARGET_ABI32)
11813 ret = do_get_thread_area(cpu_env, arg1);
11814 break;
11815 #elif defined(TARGET_M68K)
11817 TaskState *ts = cpu->opaque;
11818 ret = ts->tp_value;
11819 break;
11821 #else
11822 goto unimplemented_nowarn;
11823 #endif
11824 #endif
11825 #ifdef TARGET_NR_getdomainname
11826 case TARGET_NR_getdomainname:
11827 goto unimplemented_nowarn;
11828 #endif
11830 #ifdef TARGET_NR_clock_gettime
11831 case TARGET_NR_clock_gettime:
11833 struct timespec ts;
11834 ret = get_errno(clock_gettime(arg1, &ts));
11835 if (!is_error(ret)) {
11836 host_to_target_timespec(arg2, &ts);
11838 break;
11840 #endif
11841 #ifdef TARGET_NR_clock_getres
11842 case TARGET_NR_clock_getres:
11844 struct timespec ts;
11845 ret = get_errno(clock_getres(arg1, &ts));
11846 if (!is_error(ret)) {
11847 host_to_target_timespec(arg2, &ts);
11849 break;
11851 #endif
11852 #ifdef TARGET_NR_clock_nanosleep
11853 case TARGET_NR_clock_nanosleep:
11855 struct timespec ts;
11856 target_to_host_timespec(&ts, arg3);
11857 ret = get_errno(safe_clock_nanosleep(arg1, arg2,
11858 &ts, arg4 ? &ts : NULL));
11859 if (arg4)
11860 host_to_target_timespec(arg4, &ts);
11862 #if defined(TARGET_PPC)
11863 /* clock_nanosleep is odd in that it returns positive errno values.
11864 * On PPC, CR0 bit 3 should be set in such a situation. */
11865 if (ret && ret != -TARGET_ERESTARTSYS) {
11866 ((CPUPPCState *)cpu_env)->crf[0] |= 1;
11868 #endif
11869 break;
11871 #endif
11873 #if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address)
11874 case TARGET_NR_set_tid_address:
11875 ret = get_errno(set_tid_address((int *)g2h(arg1)));
11876 break;
11877 #endif
11879 case TARGET_NR_tkill:
11880 ret = get_errno(safe_tkill((int)arg1, target_to_host_signal(arg2)));
11881 break;
11883 case TARGET_NR_tgkill:
11884 ret = get_errno(safe_tgkill((int)arg1, (int)arg2,
11885 target_to_host_signal(arg3)));
11886 break;
11888 #ifdef TARGET_NR_set_robust_list
11889 case TARGET_NR_set_robust_list:
11890 case TARGET_NR_get_robust_list:
11891 /* The ABI for supporting robust futexes has userspace pass
11892 * the kernel a pointer to a linked list which is updated by
11893 * userspace after the syscall; the list is walked by the kernel
11894 * when the thread exits. Since the linked list in QEMU guest
11895 * memory isn't a valid linked list for the host and we have
11896 * no way to reliably intercept the thread-death event, we can't
11897 * support these. Silently return ENOSYS so that guest userspace
11898 * falls back to a non-robust futex implementation (which should
11899 * be OK except in the corner case of the guest crashing while
11900 * holding a mutex that is shared with another process via
11901 * shared memory).
11903 goto unimplemented_nowarn;
11904 #endif
11906 #if defined(TARGET_NR_utimensat)
11907 case TARGET_NR_utimensat:
11909 struct timespec *tsp, ts[2];
11910 if (!arg3) {
11911 tsp = NULL;
11912 } else {
11913 target_to_host_timespec(ts, arg3);
11914 target_to_host_timespec(ts+1, arg3+sizeof(struct target_timespec));
11915 tsp = ts;
11917 if (!arg2)
11918 ret = get_errno(sys_utimensat(arg1, NULL, tsp, arg4));
11919 else {
11920 if (!(p = lock_user_string(arg2))) {
11921 ret = -TARGET_EFAULT;
11922 goto fail;
11924 ret = get_errno(sys_utimensat(arg1, path(p), tsp, arg4));
11925 unlock_user(p, arg2, 0);
11928 break;
11929 #endif
11930 case TARGET_NR_futex:
11931 ret = do_futex(arg1, arg2, arg3, arg4, arg5, arg6);
11932 break;
11933 #if defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init)
11934 case TARGET_NR_inotify_init:
11935 ret = get_errno(sys_inotify_init());
11936 if (ret >= 0) {
11937 fd_trans_register(ret, &target_inotify_trans);
11939 break;
11940 #endif
11941 #ifdef CONFIG_INOTIFY1
11942 #if defined(TARGET_NR_inotify_init1) && defined(__NR_inotify_init1)
11943 case TARGET_NR_inotify_init1:
11944 ret = get_errno(sys_inotify_init1(target_to_host_bitmask(arg1,
11945 fcntl_flags_tbl)));
11946 if (ret >= 0) {
11947 fd_trans_register(ret, &target_inotify_trans);
11949 break;
11950 #endif
11951 #endif
11952 #if defined(TARGET_NR_inotify_add_watch) && defined(__NR_inotify_add_watch)
11953 case TARGET_NR_inotify_add_watch:
11954 p = lock_user_string(arg2);
11955 ret = get_errno(sys_inotify_add_watch(arg1, path(p), arg3));
11956 unlock_user(p, arg2, 0);
11957 break;
11958 #endif
11959 #if defined(TARGET_NR_inotify_rm_watch) && defined(__NR_inotify_rm_watch)
11960 case TARGET_NR_inotify_rm_watch:
11961 ret = get_errno(sys_inotify_rm_watch(arg1, arg2));
11962 break;
11963 #endif
11965 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
11966 case TARGET_NR_mq_open:
11968 struct mq_attr posix_mq_attr;
11969 struct mq_attr *pposix_mq_attr;
11970 int host_flags;
11972 host_flags = target_to_host_bitmask(arg2, fcntl_flags_tbl);
11973 pposix_mq_attr = NULL;
11974 if (arg4) {
11975 if (copy_from_user_mq_attr(&posix_mq_attr, arg4) != 0) {
11976 goto efault;
11978 pposix_mq_attr = &posix_mq_attr;
11980 p = lock_user_string(arg1 - 1);
11981 if (!p) {
11982 goto efault;
11984 ret = get_errno(mq_open(p, host_flags, arg3, pposix_mq_attr));
11985 unlock_user (p, arg1, 0);
11987 break;
11989 case TARGET_NR_mq_unlink:
11990 p = lock_user_string(arg1 - 1);
11991 if (!p) {
11992 ret = -TARGET_EFAULT;
11993 break;
11995 ret = get_errno(mq_unlink(p));
11996 unlock_user (p, arg1, 0);
11997 break;
11999 case TARGET_NR_mq_timedsend:
12001 struct timespec ts;
12003 p = lock_user (VERIFY_READ, arg2, arg3, 1);
12004 if (arg5 != 0) {
12005 target_to_host_timespec(&ts, arg5);
12006 ret = get_errno(safe_mq_timedsend(arg1, p, arg3, arg4, &ts));
12007 host_to_target_timespec(arg5, &ts);
12008 } else {
12009 ret = get_errno(safe_mq_timedsend(arg1, p, arg3, arg4, NULL));
12011 unlock_user (p, arg2, arg3);
12013 break;
12015 case TARGET_NR_mq_timedreceive:
12017 struct timespec ts;
12018 unsigned int prio;
12020 p = lock_user (VERIFY_READ, arg2, arg3, 1);
12021 if (arg5 != 0) {
12022 target_to_host_timespec(&ts, arg5);
12023 ret = get_errno(safe_mq_timedreceive(arg1, p, arg3,
12024 &prio, &ts));
12025 host_to_target_timespec(arg5, &ts);
12026 } else {
12027 ret = get_errno(safe_mq_timedreceive(arg1, p, arg3,
12028 &prio, NULL));
12030 unlock_user (p, arg2, arg3);
12031 if (arg4 != 0)
12032 put_user_u32(prio, arg4);
12034 break;
12036 /* Not implemented for now... */
12037 /* case TARGET_NR_mq_notify: */
12038 /* break; */
12040 case TARGET_NR_mq_getsetattr:
12042 struct mq_attr posix_mq_attr_in, posix_mq_attr_out;
12043 ret = 0;
12044 if (arg3 != 0) {
12045 ret = mq_getattr(arg1, &posix_mq_attr_out);
12046 copy_to_user_mq_attr(arg3, &posix_mq_attr_out);
12048 if (arg2 != 0) {
12049 copy_from_user_mq_attr(&posix_mq_attr_in, arg2);
12050 ret |= mq_setattr(arg1, &posix_mq_attr_in, &posix_mq_attr_out);
12054 break;
12055 #endif
12057 #ifdef CONFIG_SPLICE
12058 #ifdef TARGET_NR_tee
12059 case TARGET_NR_tee:
12061 ret = get_errno(tee(arg1,arg2,arg3,arg4));
12063 break;
12064 #endif
12065 #ifdef TARGET_NR_splice
12066 case TARGET_NR_splice:
12068 loff_t loff_in, loff_out;
12069 loff_t *ploff_in = NULL, *ploff_out = NULL;
12070 if (arg2) {
12071 if (get_user_u64(loff_in, arg2)) {
12072 goto efault;
12074 ploff_in = &loff_in;
12076 if (arg4) {
12077 if (get_user_u64(loff_out, arg4)) {
12078 goto efault;
12080 ploff_out = &loff_out;
12082 ret = get_errno(splice(arg1, ploff_in, arg3, ploff_out, arg5, arg6));
12083 if (arg2) {
12084 if (put_user_u64(loff_in, arg2)) {
12085 goto efault;
12088 if (arg4) {
12089 if (put_user_u64(loff_out, arg4)) {
12090 goto efault;
12094 break;
12095 #endif
12096 #ifdef TARGET_NR_vmsplice
12097 case TARGET_NR_vmsplice:
12099 struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1);
12100 if (vec != NULL) {
12101 ret = get_errno(vmsplice(arg1, vec, arg3, arg4));
12102 unlock_iovec(vec, arg2, arg3, 0);
12103 } else {
12104 ret = -host_to_target_errno(errno);
12107 break;
12108 #endif
12109 #endif /* CONFIG_SPLICE */
12110 #ifdef CONFIG_EVENTFD
12111 #if defined(TARGET_NR_eventfd)
12112 case TARGET_NR_eventfd:
12113 ret = get_errno(eventfd(arg1, 0));
12114 if (ret >= 0) {
12115 fd_trans_register(ret, &target_eventfd_trans);
12117 break;
12118 #endif
12119 #if defined(TARGET_NR_eventfd2)
12120 case TARGET_NR_eventfd2:
12122 int host_flags = arg2 & (~(TARGET_O_NONBLOCK | TARGET_O_CLOEXEC));
12123 if (arg2 & TARGET_O_NONBLOCK) {
12124 host_flags |= O_NONBLOCK;
12126 if (arg2 & TARGET_O_CLOEXEC) {
12127 host_flags |= O_CLOEXEC;
12129 ret = get_errno(eventfd(arg1, host_flags));
12130 if (ret >= 0) {
12131 fd_trans_register(ret, &target_eventfd_trans);
12133 break;
12135 #endif
12136 #endif /* CONFIG_EVENTFD */
12137 #if defined(CONFIG_FALLOCATE) && defined(TARGET_NR_fallocate)
12138 case TARGET_NR_fallocate:
12139 #if TARGET_ABI_BITS == 32
12140 ret = get_errno(fallocate(arg1, arg2, target_offset64(arg3, arg4),
12141 target_offset64(arg5, arg6)));
12142 #else
12143 ret = get_errno(fallocate(arg1, arg2, arg3, arg4));
12144 #endif
12145 break;
12146 #endif
12147 #if defined(CONFIG_SYNC_FILE_RANGE)
12148 #if defined(TARGET_NR_sync_file_range)
12149 case TARGET_NR_sync_file_range:
12150 #if TARGET_ABI_BITS == 32
12151 #if defined(TARGET_MIPS)
12152 ret = get_errno(sync_file_range(arg1, target_offset64(arg3, arg4),
12153 target_offset64(arg5, arg6), arg7));
12154 #else
12155 ret = get_errno(sync_file_range(arg1, target_offset64(arg2, arg3),
12156 target_offset64(arg4, arg5), arg6));
12157 #endif /* !TARGET_MIPS */
12158 #else
12159 ret = get_errno(sync_file_range(arg1, arg2, arg3, arg4));
12160 #endif
12161 break;
12162 #endif
12163 #if defined(TARGET_NR_sync_file_range2)
12164 case TARGET_NR_sync_file_range2:
12165 /* This is like sync_file_range but the arguments are reordered */
12166 #if TARGET_ABI_BITS == 32
12167 ret = get_errno(sync_file_range(arg1, target_offset64(arg3, arg4),
12168 target_offset64(arg5, arg6), arg2));
12169 #else
12170 ret = get_errno(sync_file_range(arg1, arg3, arg4, arg2));
12171 #endif
12172 break;
12173 #endif
12174 #endif
12175 #if defined(TARGET_NR_signalfd4)
12176 case TARGET_NR_signalfd4:
12177 ret = do_signalfd4(arg1, arg2, arg4);
12178 break;
12179 #endif
12180 #if defined(TARGET_NR_signalfd)
12181 case TARGET_NR_signalfd:
12182 ret = do_signalfd4(arg1, arg2, 0);
12183 break;
12184 #endif
12185 #if defined(CONFIG_EPOLL)
12186 #if defined(TARGET_NR_epoll_create)
12187 case TARGET_NR_epoll_create:
12188 ret = get_errno(epoll_create(arg1));
12189 break;
12190 #endif
12191 #if defined(TARGET_NR_epoll_create1) && defined(CONFIG_EPOLL_CREATE1)
12192 case TARGET_NR_epoll_create1:
12193 ret = get_errno(epoll_create1(arg1));
12194 break;
12195 #endif
12196 #if defined(TARGET_NR_epoll_ctl)
12197 case TARGET_NR_epoll_ctl:
12199 struct epoll_event ep;
12200 struct epoll_event *epp = 0;
12201 if (arg4) {
12202 struct target_epoll_event *target_ep;
12203 if (!lock_user_struct(VERIFY_READ, target_ep, arg4, 1)) {
12204 goto efault;
12206 ep.events = tswap32(target_ep->events);
12207 /* The epoll_data_t union is just opaque data to the kernel,
12208 * so we transfer all 64 bits across and need not worry what
12209 * actual data type it is.
12211 ep.data.u64 = tswap64(target_ep->data.u64);
12212 unlock_user_struct(target_ep, arg4, 0);
12213 epp = &ep;
12215 ret = get_errno(epoll_ctl(arg1, arg2, arg3, epp));
12216 break;
12218 #endif
12220 #if defined(TARGET_NR_epoll_wait) || defined(TARGET_NR_epoll_pwait)
12221 #if defined(TARGET_NR_epoll_wait)
12222 case TARGET_NR_epoll_wait:
12223 #endif
12224 #if defined(TARGET_NR_epoll_pwait)
12225 case TARGET_NR_epoll_pwait:
12226 #endif
12228 struct target_epoll_event *target_ep;
12229 struct epoll_event *ep;
12230 int epfd = arg1;
12231 int maxevents = arg3;
12232 int timeout = arg4;
12234 if (maxevents <= 0 || maxevents > TARGET_EP_MAX_EVENTS) {
12235 ret = -TARGET_EINVAL;
12236 break;
12239 target_ep = lock_user(VERIFY_WRITE, arg2,
12240 maxevents * sizeof(struct target_epoll_event), 1);
12241 if (!target_ep) {
12242 goto efault;
12245 ep = g_try_new(struct epoll_event, maxevents);
12246 if (!ep) {
12247 unlock_user(target_ep, arg2, 0);
12248 ret = -TARGET_ENOMEM;
12249 break;
12252 switch (num) {
12253 #if defined(TARGET_NR_epoll_pwait)
12254 case TARGET_NR_epoll_pwait:
12256 target_sigset_t *target_set;
12257 sigset_t _set, *set = &_set;
12259 if (arg5) {
12260 if (arg6 != sizeof(target_sigset_t)) {
12261 ret = -TARGET_EINVAL;
12262 break;
12265 target_set = lock_user(VERIFY_READ, arg5,
12266 sizeof(target_sigset_t), 1);
12267 if (!target_set) {
12268 ret = -TARGET_EFAULT;
12269 break;
12271 target_to_host_sigset(set, target_set);
12272 unlock_user(target_set, arg5, 0);
12273 } else {
12274 set = NULL;
12277 ret = get_errno(safe_epoll_pwait(epfd, ep, maxevents, timeout,
12278 set, SIGSET_T_SIZE));
12279 break;
12281 #endif
12282 #if defined(TARGET_NR_epoll_wait)
12283 case TARGET_NR_epoll_wait:
12284 ret = get_errno(safe_epoll_pwait(epfd, ep, maxevents, timeout,
12285 NULL, 0));
12286 break;
12287 #endif
12288 default:
12289 ret = -TARGET_ENOSYS;
12291 if (!is_error(ret)) {
12292 int i;
12293 for (i = 0; i < ret; i++) {
12294 target_ep[i].events = tswap32(ep[i].events);
12295 target_ep[i].data.u64 = tswap64(ep[i].data.u64);
12297 unlock_user(target_ep, arg2,
12298 ret * sizeof(struct target_epoll_event));
12299 } else {
12300 unlock_user(target_ep, arg2, 0);
12302 g_free(ep);
12303 break;
12305 #endif
12306 #endif
12307 #ifdef TARGET_NR_prlimit64
12308 case TARGET_NR_prlimit64:
12310 /* args: pid, resource number, ptr to new rlimit, ptr to old rlimit */
12311 struct target_rlimit64 *target_rnew, *target_rold;
12312 struct host_rlimit64 rnew, rold, *rnewp = 0;
12313 int resource = target_to_host_resource(arg2);
12314 if (arg3) {
12315 if (!lock_user_struct(VERIFY_READ, target_rnew, arg3, 1)) {
12316 goto efault;
12318 rnew.rlim_cur = tswap64(target_rnew->rlim_cur);
12319 rnew.rlim_max = tswap64(target_rnew->rlim_max);
12320 unlock_user_struct(target_rnew, arg3, 0);
12321 rnewp = &rnew;
12324 ret = get_errno(sys_prlimit64(arg1, resource, rnewp, arg4 ? &rold : 0));
12325 if (!is_error(ret) && arg4) {
12326 if (!lock_user_struct(VERIFY_WRITE, target_rold, arg4, 1)) {
12327 goto efault;
12329 target_rold->rlim_cur = tswap64(rold.rlim_cur);
12330 target_rold->rlim_max = tswap64(rold.rlim_max);
12331 unlock_user_struct(target_rold, arg4, 1);
12333 break;
12335 #endif
12336 #ifdef TARGET_NR_gethostname
12337 case TARGET_NR_gethostname:
12339 char *name = lock_user(VERIFY_WRITE, arg1, arg2, 0);
12340 if (name) {
12341 ret = get_errno(gethostname(name, arg2));
12342 unlock_user(name, arg1, arg2);
12343 } else {
12344 ret = -TARGET_EFAULT;
12346 break;
12348 #endif
12349 #ifdef TARGET_NR_atomic_cmpxchg_32
12350 case TARGET_NR_atomic_cmpxchg_32:
12352 /* should use start_exclusive from main.c */
12353 abi_ulong mem_value;
12354 if (get_user_u32(mem_value, arg6)) {
12355 target_siginfo_t info;
12356 info.si_signo = SIGSEGV;
12357 info.si_errno = 0;
12358 info.si_code = TARGET_SEGV_MAPERR;
12359 info._sifields._sigfault._addr = arg6;
12360 queue_signal((CPUArchState *)cpu_env, info.si_signo,
12361 QEMU_SI_FAULT, &info);
12362 ret = 0xdeadbeef;
12365 if (mem_value == arg2)
12366 put_user_u32(arg1, arg6);
12367 ret = mem_value;
12368 break;
12370 #endif
12371 #ifdef TARGET_NR_atomic_barrier
12372 case TARGET_NR_atomic_barrier:
12374 /* Like the kernel implementation and the qemu arm barrier, no-op this? */
12375 ret = 0;
12376 break;
12378 #endif
12380 #ifdef TARGET_NR_timer_create
12381 case TARGET_NR_timer_create:
12383 /* args: clockid_t clockid, struct sigevent *sevp, timer_t *timerid */
12385 struct sigevent host_sevp = { {0}, }, *phost_sevp = NULL;
12387 int clkid = arg1;
12388 int timer_index = next_free_host_timer();
12390 if (timer_index < 0) {
12391 ret = -TARGET_EAGAIN;
12392 } else {
12393 timer_t *phtimer = g_posix_timers + timer_index;
12395 if (arg2) {
12396 phost_sevp = &host_sevp;
12397 ret = target_to_host_sigevent(phost_sevp, arg2);
12398 if (ret != 0) {
12399 break;
12403 ret = get_errno(timer_create(clkid, phost_sevp, phtimer));
12404 if (ret) {
12405 phtimer = NULL;
12406 } else {
12407 if (put_user(TIMER_MAGIC | timer_index, arg3, target_timer_t)) {
12408 goto efault;
12412 break;
12414 #endif
12416 #ifdef TARGET_NR_timer_settime
12417 case TARGET_NR_timer_settime:
12419 /* args: timer_t timerid, int flags, const struct itimerspec *new_value,
12420 * struct itimerspec * old_value */
12421 target_timer_t timerid = get_timer_id(arg1);
12423 if (timerid < 0) {
12424 ret = timerid;
12425 } else if (arg3 == 0) {
12426 ret = -TARGET_EINVAL;
12427 } else {
12428 timer_t htimer = g_posix_timers[timerid];
12429 struct itimerspec hspec_new = {{0},}, hspec_old = {{0},};
12431 if (target_to_host_itimerspec(&hspec_new, arg3)) {
12432 goto efault;
12434 ret = get_errno(
12435 timer_settime(htimer, arg2, &hspec_new, &hspec_old));
12436 if (arg4 && host_to_target_itimerspec(arg4, &hspec_old)) {
12437 goto efault;
12440 break;
12442 #endif
12444 #ifdef TARGET_NR_timer_gettime
12445 case TARGET_NR_timer_gettime:
12447 /* args: timer_t timerid, struct itimerspec *curr_value */
12448 target_timer_t timerid = get_timer_id(arg1);
12450 if (timerid < 0) {
12451 ret = timerid;
12452 } else if (!arg2) {
12453 ret = -TARGET_EFAULT;
12454 } else {
12455 timer_t htimer = g_posix_timers[timerid];
12456 struct itimerspec hspec;
12457 ret = get_errno(timer_gettime(htimer, &hspec));
12459 if (host_to_target_itimerspec(arg2, &hspec)) {
12460 ret = -TARGET_EFAULT;
12463 break;
12465 #endif
12467 #ifdef TARGET_NR_timer_getoverrun
12468 case TARGET_NR_timer_getoverrun:
12470 /* args: timer_t timerid */
12471 target_timer_t timerid = get_timer_id(arg1);
12473 if (timerid < 0) {
12474 ret = timerid;
12475 } else {
12476 timer_t htimer = g_posix_timers[timerid];
12477 ret = get_errno(timer_getoverrun(htimer));
12479 fd_trans_unregister(ret);
12480 break;
12482 #endif
12484 #ifdef TARGET_NR_timer_delete
12485 case TARGET_NR_timer_delete:
12487 /* args: timer_t timerid */
12488 target_timer_t timerid = get_timer_id(arg1);
12490 if (timerid < 0) {
12491 ret = timerid;
12492 } else {
12493 timer_t htimer = g_posix_timers[timerid];
12494 ret = get_errno(timer_delete(htimer));
12495 g_posix_timers[timerid] = 0;
12497 break;
12499 #endif
12501 #if defined(TARGET_NR_timerfd_create) && defined(CONFIG_TIMERFD)
12502 case TARGET_NR_timerfd_create:
12503 ret = get_errno(timerfd_create(arg1,
12504 target_to_host_bitmask(arg2, fcntl_flags_tbl)));
12505 break;
12506 #endif
12508 #if defined(TARGET_NR_timerfd_gettime) && defined(CONFIG_TIMERFD)
12509 case TARGET_NR_timerfd_gettime:
12511 struct itimerspec its_curr;
12513 ret = get_errno(timerfd_gettime(arg1, &its_curr));
12515 if (arg2 && host_to_target_itimerspec(arg2, &its_curr)) {
12516 goto efault;
12519 break;
12520 #endif
12522 #if defined(TARGET_NR_timerfd_settime) && defined(CONFIG_TIMERFD)
12523 case TARGET_NR_timerfd_settime:
12525 struct itimerspec its_new, its_old, *p_new;
12527 if (arg3) {
12528 if (target_to_host_itimerspec(&its_new, arg3)) {
12529 goto efault;
12531 p_new = &its_new;
12532 } else {
12533 p_new = NULL;
12536 ret = get_errno(timerfd_settime(arg1, arg2, p_new, &its_old));
12538 if (arg4 && host_to_target_itimerspec(arg4, &its_old)) {
12539 goto efault;
12542 break;
12543 #endif
12545 #if defined(TARGET_NR_ioprio_get) && defined(__NR_ioprio_get)
12546 case TARGET_NR_ioprio_get:
12547 ret = get_errno(ioprio_get(arg1, arg2));
12548 break;
12549 #endif
12551 #if defined(TARGET_NR_ioprio_set) && defined(__NR_ioprio_set)
12552 case TARGET_NR_ioprio_set:
12553 ret = get_errno(ioprio_set(arg1, arg2, arg3));
12554 break;
12555 #endif
12557 #if defined(TARGET_NR_setns) && defined(CONFIG_SETNS)
12558 case TARGET_NR_setns:
12559 ret = get_errno(setns(arg1, arg2));
12560 break;
12561 #endif
12562 #if defined(TARGET_NR_unshare) && defined(CONFIG_SETNS)
12563 case TARGET_NR_unshare:
12564 ret = get_errno(unshare(arg1));
12565 break;
12566 #endif
12567 #if defined(TARGET_NR_kcmp) && defined(__NR_kcmp)
12568 case TARGET_NR_kcmp:
12569 ret = get_errno(kcmp(arg1, arg2, arg3, arg4, arg5));
12570 break;
12571 #endif
12573 default:
12574 unimplemented:
12575 gemu_log("qemu: Unsupported syscall: %d\n", num);
12576 #if defined(TARGET_NR_setxattr) || defined(TARGET_NR_get_thread_area) || defined(TARGET_NR_getdomainname) || defined(TARGET_NR_set_robust_list)
12577 unimplemented_nowarn:
12578 #endif
12579 ret = -TARGET_ENOSYS;
12580 break;
12582 fail:
12583 #ifdef DEBUG
12584 gemu_log(" = " TARGET_ABI_FMT_ld "\n", ret);
12585 #endif
12586 if(do_strace)
12587 print_syscall_ret(num, ret);
12588 trace_guest_user_syscall_ret(cpu, num, ret);
12589 return ret;
12590 efault:
12591 ret = -TARGET_EFAULT;
12592 goto fail;