osdep: add wait.h compat macros
[qemu/ar7.git] / linux-user / syscall.c
blobaf8603f1b775671a0eddd88a4e5501a0758068a9
1 /*
2 * Linux syscalls
4 * Copyright (c) 2003 Fabrice Bellard
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, see <http://www.gnu.org/licenses/>.
19 #define _ATFILE_SOURCE
20 #include "qemu/osdep.h"
21 #include "qemu/cutils.h"
22 #include "qemu/path.h"
23 #include <elf.h>
24 #include <endian.h>
25 #include <grp.h>
26 #include <sys/ipc.h>
27 #include <sys/msg.h>
28 #include <sys/wait.h>
29 #include <sys/mount.h>
30 #include <sys/file.h>
31 #include <sys/fsuid.h>
32 #include <sys/personality.h>
33 #include <sys/prctl.h>
34 #include <sys/resource.h>
35 #include <sys/swap.h>
36 #include <linux/capability.h>
37 #include <sched.h>
38 #include <sys/timex.h>
39 #include <sys/socket.h>
40 #include <sys/un.h>
41 #include <sys/uio.h>
42 #include <poll.h>
43 #include <sys/times.h>
44 #include <sys/shm.h>
45 #include <sys/sem.h>
46 #include <sys/statfs.h>
47 #include <utime.h>
48 #include <sys/sysinfo.h>
49 #include <sys/signalfd.h>
50 //#include <sys/user.h>
51 #include <netinet/ip.h>
52 #include <netinet/tcp.h>
53 #include <linux/wireless.h>
54 #include <linux/icmp.h>
55 #include <linux/icmpv6.h>
56 #include <linux/errqueue.h>
57 #include <linux/random.h>
58 #include "qemu-common.h"
59 #ifdef CONFIG_TIMERFD
60 #include <sys/timerfd.h>
61 #endif
62 #ifdef TARGET_GPROF
63 #include <sys/gmon.h>
64 #endif
65 #ifdef CONFIG_EVENTFD
66 #include <sys/eventfd.h>
67 #endif
68 #ifdef CONFIG_EPOLL
69 #include <sys/epoll.h>
70 #endif
71 #ifdef CONFIG_ATTR
72 #include "qemu/xattr.h"
73 #endif
74 #ifdef CONFIG_SENDFILE
75 #include <sys/sendfile.h>
76 #endif
78 #define termios host_termios
79 #define winsize host_winsize
80 #define termio host_termio
81 #define sgttyb host_sgttyb /* same as target */
82 #define tchars host_tchars /* same as target */
83 #define ltchars host_ltchars /* same as target */
85 #include <linux/termios.h>
86 #include <linux/unistd.h>
87 #include <linux/cdrom.h>
88 #include <linux/hdreg.h>
89 #include <linux/soundcard.h>
90 #include <linux/kd.h>
91 #include <linux/mtio.h>
92 #include <linux/fs.h>
93 #if defined(CONFIG_FIEMAP)
94 #include <linux/fiemap.h>
95 #endif
96 #include <linux/fb.h>
97 #include <linux/vt.h>
98 #include <linux/dm-ioctl.h>
99 #include <linux/reboot.h>
100 #include <linux/route.h>
101 #include <linux/filter.h>
102 #include <linux/blkpg.h>
103 #include <netpacket/packet.h>
104 #include <linux/netlink.h>
105 #ifdef CONFIG_RTNETLINK
106 #include <linux/rtnetlink.h>
107 #include <linux/if_bridge.h>
108 #endif
109 #include <linux/audit.h>
110 #include "linux_loop.h"
111 #include "uname.h"
113 #include "qemu.h"
115 #ifndef CLONE_IO
116 #define CLONE_IO 0x80000000 /* Clone io context */
117 #endif
119 /* We can't directly call the host clone syscall, because this will
120 * badly confuse libc (breaking mutexes, for example). So we must
121 * divide clone flags into:
122 * * flag combinations that look like pthread_create()
123 * * flag combinations that look like fork()
124 * * flags we can implement within QEMU itself
125 * * flags we can't support and will return an error for
127 /* For thread creation, all these flags must be present; for
128 * fork, none must be present.
130 #define CLONE_THREAD_FLAGS \
131 (CLONE_VM | CLONE_FS | CLONE_FILES | \
132 CLONE_SIGHAND | CLONE_THREAD | CLONE_SYSVSEM)
134 /* These flags are ignored:
135 * CLONE_DETACHED is now ignored by the kernel;
136 * CLONE_IO is just an optimisation hint to the I/O scheduler
138 #define CLONE_IGNORED_FLAGS \
139 (CLONE_DETACHED | CLONE_IO)
141 /* Flags for fork which we can implement within QEMU itself */
142 #define CLONE_OPTIONAL_FORK_FLAGS \
143 (CLONE_SETTLS | CLONE_PARENT_SETTID | \
144 CLONE_CHILD_CLEARTID | CLONE_CHILD_SETTID)
146 /* Flags for thread creation which we can implement within QEMU itself */
147 #define CLONE_OPTIONAL_THREAD_FLAGS \
148 (CLONE_SETTLS | CLONE_PARENT_SETTID | \
149 CLONE_CHILD_CLEARTID | CLONE_CHILD_SETTID | CLONE_PARENT)
151 #define CLONE_INVALID_FORK_FLAGS \
152 (~(CSIGNAL | CLONE_OPTIONAL_FORK_FLAGS | CLONE_IGNORED_FLAGS))
154 #define CLONE_INVALID_THREAD_FLAGS \
155 (~(CSIGNAL | CLONE_THREAD_FLAGS | CLONE_OPTIONAL_THREAD_FLAGS | \
156 CLONE_IGNORED_FLAGS))
158 /* CLONE_VFORK is special cased early in do_fork(). The other flag bits
159 * have almost all been allocated. We cannot support any of
160 * CLONE_NEWNS, CLONE_NEWCGROUP, CLONE_NEWUTS, CLONE_NEWIPC,
161 * CLONE_NEWUSER, CLONE_NEWPID, CLONE_NEWNET, CLONE_PTRACE, CLONE_UNTRACED.
162 * The checks against the invalid thread masks above will catch these.
163 * (The one remaining unallocated bit is 0x1000 which used to be CLONE_PID.)
166 //#define DEBUG
167 /* Define DEBUG_ERESTARTSYS to force every syscall to be restarted
168 * once. This exercises the codepaths for restart.
170 //#define DEBUG_ERESTARTSYS
172 //#include <linux/msdos_fs.h>
173 #define VFAT_IOCTL_READDIR_BOTH _IOR('r', 1, struct linux_dirent [2])
174 #define VFAT_IOCTL_READDIR_SHORT _IOR('r', 2, struct linux_dirent [2])
176 #undef _syscall0
177 #undef _syscall1
178 #undef _syscall2
179 #undef _syscall3
180 #undef _syscall4
181 #undef _syscall5
182 #undef _syscall6
184 #define _syscall0(type,name) \
185 static type name (void) \
187 return syscall(__NR_##name); \
190 #define _syscall1(type,name,type1,arg1) \
191 static type name (type1 arg1) \
193 return syscall(__NR_##name, arg1); \
196 #define _syscall2(type,name,type1,arg1,type2,arg2) \
197 static type name (type1 arg1,type2 arg2) \
199 return syscall(__NR_##name, arg1, arg2); \
202 #define _syscall3(type,name,type1,arg1,type2,arg2,type3,arg3) \
203 static type name (type1 arg1,type2 arg2,type3 arg3) \
205 return syscall(__NR_##name, arg1, arg2, arg3); \
208 #define _syscall4(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4) \
209 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4) \
211 return syscall(__NR_##name, arg1, arg2, arg3, arg4); \
214 #define _syscall5(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4, \
215 type5,arg5) \
216 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5) \
218 return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5); \
222 #define _syscall6(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4, \
223 type5,arg5,type6,arg6) \
224 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5, \
225 type6 arg6) \
227 return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6); \
231 #define __NR_sys_uname __NR_uname
232 #define __NR_sys_getcwd1 __NR_getcwd
233 #define __NR_sys_getdents __NR_getdents
234 #define __NR_sys_getdents64 __NR_getdents64
235 #define __NR_sys_getpriority __NR_getpriority
236 #define __NR_sys_rt_sigqueueinfo __NR_rt_sigqueueinfo
237 #define __NR_sys_rt_tgsigqueueinfo __NR_rt_tgsigqueueinfo
238 #define __NR_sys_syslog __NR_syslog
239 #define __NR_sys_futex __NR_futex
240 #define __NR_sys_inotify_init __NR_inotify_init
241 #define __NR_sys_inotify_add_watch __NR_inotify_add_watch
242 #define __NR_sys_inotify_rm_watch __NR_inotify_rm_watch
244 #if defined(__alpha__) || defined(__x86_64__) || defined(__s390x__)
245 #define __NR__llseek __NR_lseek
246 #endif
248 /* Newer kernel ports have llseek() instead of _llseek() */
249 #if defined(TARGET_NR_llseek) && !defined(TARGET_NR__llseek)
250 #define TARGET_NR__llseek TARGET_NR_llseek
251 #endif
253 #ifdef __NR_gettid
254 _syscall0(int, gettid)
255 #else
256 /* This is a replacement for the host gettid() and must return a host
257 errno. */
258 static int gettid(void) {
259 return -ENOSYS;
261 #endif
263 /* For the 64-bit guest on 32-bit host case we must emulate
264 * getdents using getdents64, because otherwise the host
265 * might hand us back more dirent records than we can fit
266 * into the guest buffer after structure format conversion.
267 * Otherwise we emulate getdents with getdents if the host has it.
269 #if defined(__NR_getdents) && HOST_LONG_BITS >= TARGET_ABI_BITS
270 #define EMULATE_GETDENTS_WITH_GETDENTS
271 #endif
273 #if defined(TARGET_NR_getdents) && defined(EMULATE_GETDENTS_WITH_GETDENTS)
274 _syscall3(int, sys_getdents, uint, fd, struct linux_dirent *, dirp, uint, count);
275 #endif
276 #if (defined(TARGET_NR_getdents) && \
277 !defined(EMULATE_GETDENTS_WITH_GETDENTS)) || \
278 (defined(TARGET_NR_getdents64) && defined(__NR_getdents64))
279 _syscall3(int, sys_getdents64, uint, fd, struct linux_dirent64 *, dirp, uint, count);
280 #endif
281 #if defined(TARGET_NR__llseek) && defined(__NR_llseek)
282 _syscall5(int, _llseek, uint, fd, ulong, hi, ulong, lo,
283 loff_t *, res, uint, wh);
284 #endif
285 _syscall3(int, sys_rt_sigqueueinfo, pid_t, pid, int, sig, siginfo_t *, uinfo)
286 _syscall4(int, sys_rt_tgsigqueueinfo, pid_t, pid, pid_t, tid, int, sig,
287 siginfo_t *, uinfo)
288 _syscall3(int,sys_syslog,int,type,char*,bufp,int,len)
289 #ifdef __NR_exit_group
290 _syscall1(int,exit_group,int,error_code)
291 #endif
292 #if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address)
293 _syscall1(int,set_tid_address,int *,tidptr)
294 #endif
295 #if defined(TARGET_NR_futex) && defined(__NR_futex)
296 _syscall6(int,sys_futex,int *,uaddr,int,op,int,val,
297 const struct timespec *,timeout,int *,uaddr2,int,val3)
298 #endif
299 #define __NR_sys_sched_getaffinity __NR_sched_getaffinity
300 _syscall3(int, sys_sched_getaffinity, pid_t, pid, unsigned int, len,
301 unsigned long *, user_mask_ptr);
302 #define __NR_sys_sched_setaffinity __NR_sched_setaffinity
303 _syscall3(int, sys_sched_setaffinity, pid_t, pid, unsigned int, len,
304 unsigned long *, user_mask_ptr);
305 #define __NR_sys_getcpu __NR_getcpu
306 _syscall3(int, sys_getcpu, unsigned *, cpu, unsigned *, node, void *, tcache);
307 _syscall4(int, reboot, int, magic1, int, magic2, unsigned int, cmd,
308 void *, arg);
309 _syscall2(int, capget, struct __user_cap_header_struct *, header,
310 struct __user_cap_data_struct *, data);
311 _syscall2(int, capset, struct __user_cap_header_struct *, header,
312 struct __user_cap_data_struct *, data);
313 #if defined(TARGET_NR_ioprio_get) && defined(__NR_ioprio_get)
314 _syscall2(int, ioprio_get, int, which, int, who)
315 #endif
316 #if defined(TARGET_NR_ioprio_set) && defined(__NR_ioprio_set)
317 _syscall3(int, ioprio_set, int, which, int, who, int, ioprio)
318 #endif
319 #if defined(TARGET_NR_getrandom) && defined(__NR_getrandom)
320 _syscall3(int, getrandom, void *, buf, size_t, buflen, unsigned int, flags)
321 #endif
323 #if defined(TARGET_NR_kcmp) && defined(__NR_kcmp)
324 _syscall5(int, kcmp, pid_t, pid1, pid_t, pid2, int, type,
325 unsigned long, idx1, unsigned long, idx2)
326 #endif
328 static bitmask_transtbl fcntl_flags_tbl[] = {
329 { TARGET_O_ACCMODE, TARGET_O_WRONLY, O_ACCMODE, O_WRONLY, },
330 { TARGET_O_ACCMODE, TARGET_O_RDWR, O_ACCMODE, O_RDWR, },
331 { TARGET_O_CREAT, TARGET_O_CREAT, O_CREAT, O_CREAT, },
332 { TARGET_O_EXCL, TARGET_O_EXCL, O_EXCL, O_EXCL, },
333 { TARGET_O_NOCTTY, TARGET_O_NOCTTY, O_NOCTTY, O_NOCTTY, },
334 { TARGET_O_TRUNC, TARGET_O_TRUNC, O_TRUNC, O_TRUNC, },
335 { TARGET_O_APPEND, TARGET_O_APPEND, O_APPEND, O_APPEND, },
336 { TARGET_O_NONBLOCK, TARGET_O_NONBLOCK, O_NONBLOCK, O_NONBLOCK, },
337 { TARGET_O_SYNC, TARGET_O_DSYNC, O_SYNC, O_DSYNC, },
338 { TARGET_O_SYNC, TARGET_O_SYNC, O_SYNC, O_SYNC, },
339 { TARGET_FASYNC, TARGET_FASYNC, FASYNC, FASYNC, },
340 { TARGET_O_DIRECTORY, TARGET_O_DIRECTORY, O_DIRECTORY, O_DIRECTORY, },
341 { TARGET_O_NOFOLLOW, TARGET_O_NOFOLLOW, O_NOFOLLOW, O_NOFOLLOW, },
342 #if defined(O_DIRECT)
343 { TARGET_O_DIRECT, TARGET_O_DIRECT, O_DIRECT, O_DIRECT, },
344 #endif
345 #if defined(O_NOATIME)
346 { TARGET_O_NOATIME, TARGET_O_NOATIME, O_NOATIME, O_NOATIME },
347 #endif
348 #if defined(O_CLOEXEC)
349 { TARGET_O_CLOEXEC, TARGET_O_CLOEXEC, O_CLOEXEC, O_CLOEXEC },
350 #endif
351 #if defined(O_PATH)
352 { TARGET_O_PATH, TARGET_O_PATH, O_PATH, O_PATH },
353 #endif
354 #if defined(O_TMPFILE)
355 { TARGET_O_TMPFILE, TARGET_O_TMPFILE, O_TMPFILE, O_TMPFILE },
356 #endif
357 /* Don't terminate the list prematurely on 64-bit host+guest. */
358 #if TARGET_O_LARGEFILE != 0 || O_LARGEFILE != 0
359 { TARGET_O_LARGEFILE, TARGET_O_LARGEFILE, O_LARGEFILE, O_LARGEFILE, },
360 #endif
361 { 0, 0, 0, 0 }
364 enum {
365 QEMU_IFLA_BR_UNSPEC,
366 QEMU_IFLA_BR_FORWARD_DELAY,
367 QEMU_IFLA_BR_HELLO_TIME,
368 QEMU_IFLA_BR_MAX_AGE,
369 QEMU_IFLA_BR_AGEING_TIME,
370 QEMU_IFLA_BR_STP_STATE,
371 QEMU_IFLA_BR_PRIORITY,
372 QEMU_IFLA_BR_VLAN_FILTERING,
373 QEMU_IFLA_BR_VLAN_PROTOCOL,
374 QEMU_IFLA_BR_GROUP_FWD_MASK,
375 QEMU_IFLA_BR_ROOT_ID,
376 QEMU_IFLA_BR_BRIDGE_ID,
377 QEMU_IFLA_BR_ROOT_PORT,
378 QEMU_IFLA_BR_ROOT_PATH_COST,
379 QEMU_IFLA_BR_TOPOLOGY_CHANGE,
380 QEMU_IFLA_BR_TOPOLOGY_CHANGE_DETECTED,
381 QEMU_IFLA_BR_HELLO_TIMER,
382 QEMU_IFLA_BR_TCN_TIMER,
383 QEMU_IFLA_BR_TOPOLOGY_CHANGE_TIMER,
384 QEMU_IFLA_BR_GC_TIMER,
385 QEMU_IFLA_BR_GROUP_ADDR,
386 QEMU_IFLA_BR_FDB_FLUSH,
387 QEMU_IFLA_BR_MCAST_ROUTER,
388 QEMU_IFLA_BR_MCAST_SNOOPING,
389 QEMU_IFLA_BR_MCAST_QUERY_USE_IFADDR,
390 QEMU_IFLA_BR_MCAST_QUERIER,
391 QEMU_IFLA_BR_MCAST_HASH_ELASTICITY,
392 QEMU_IFLA_BR_MCAST_HASH_MAX,
393 QEMU_IFLA_BR_MCAST_LAST_MEMBER_CNT,
394 QEMU_IFLA_BR_MCAST_STARTUP_QUERY_CNT,
395 QEMU_IFLA_BR_MCAST_LAST_MEMBER_INTVL,
396 QEMU_IFLA_BR_MCAST_MEMBERSHIP_INTVL,
397 QEMU_IFLA_BR_MCAST_QUERIER_INTVL,
398 QEMU_IFLA_BR_MCAST_QUERY_INTVL,
399 QEMU_IFLA_BR_MCAST_QUERY_RESPONSE_INTVL,
400 QEMU_IFLA_BR_MCAST_STARTUP_QUERY_INTVL,
401 QEMU_IFLA_BR_NF_CALL_IPTABLES,
402 QEMU_IFLA_BR_NF_CALL_IP6TABLES,
403 QEMU_IFLA_BR_NF_CALL_ARPTABLES,
404 QEMU_IFLA_BR_VLAN_DEFAULT_PVID,
405 QEMU_IFLA_BR_PAD,
406 QEMU_IFLA_BR_VLAN_STATS_ENABLED,
407 QEMU_IFLA_BR_MCAST_STATS_ENABLED,
408 QEMU___IFLA_BR_MAX,
411 enum {
412 QEMU_IFLA_UNSPEC,
413 QEMU_IFLA_ADDRESS,
414 QEMU_IFLA_BROADCAST,
415 QEMU_IFLA_IFNAME,
416 QEMU_IFLA_MTU,
417 QEMU_IFLA_LINK,
418 QEMU_IFLA_QDISC,
419 QEMU_IFLA_STATS,
420 QEMU_IFLA_COST,
421 QEMU_IFLA_PRIORITY,
422 QEMU_IFLA_MASTER,
423 QEMU_IFLA_WIRELESS,
424 QEMU_IFLA_PROTINFO,
425 QEMU_IFLA_TXQLEN,
426 QEMU_IFLA_MAP,
427 QEMU_IFLA_WEIGHT,
428 QEMU_IFLA_OPERSTATE,
429 QEMU_IFLA_LINKMODE,
430 QEMU_IFLA_LINKINFO,
431 QEMU_IFLA_NET_NS_PID,
432 QEMU_IFLA_IFALIAS,
433 QEMU_IFLA_NUM_VF,
434 QEMU_IFLA_VFINFO_LIST,
435 QEMU_IFLA_STATS64,
436 QEMU_IFLA_VF_PORTS,
437 QEMU_IFLA_PORT_SELF,
438 QEMU_IFLA_AF_SPEC,
439 QEMU_IFLA_GROUP,
440 QEMU_IFLA_NET_NS_FD,
441 QEMU_IFLA_EXT_MASK,
442 QEMU_IFLA_PROMISCUITY,
443 QEMU_IFLA_NUM_TX_QUEUES,
444 QEMU_IFLA_NUM_RX_QUEUES,
445 QEMU_IFLA_CARRIER,
446 QEMU_IFLA_PHYS_PORT_ID,
447 QEMU_IFLA_CARRIER_CHANGES,
448 QEMU_IFLA_PHYS_SWITCH_ID,
449 QEMU_IFLA_LINK_NETNSID,
450 QEMU_IFLA_PHYS_PORT_NAME,
451 QEMU_IFLA_PROTO_DOWN,
452 QEMU_IFLA_GSO_MAX_SEGS,
453 QEMU_IFLA_GSO_MAX_SIZE,
454 QEMU_IFLA_PAD,
455 QEMU_IFLA_XDP,
456 QEMU___IFLA_MAX
459 enum {
460 QEMU_IFLA_BRPORT_UNSPEC,
461 QEMU_IFLA_BRPORT_STATE,
462 QEMU_IFLA_BRPORT_PRIORITY,
463 QEMU_IFLA_BRPORT_COST,
464 QEMU_IFLA_BRPORT_MODE,
465 QEMU_IFLA_BRPORT_GUARD,
466 QEMU_IFLA_BRPORT_PROTECT,
467 QEMU_IFLA_BRPORT_FAST_LEAVE,
468 QEMU_IFLA_BRPORT_LEARNING,
469 QEMU_IFLA_BRPORT_UNICAST_FLOOD,
470 QEMU_IFLA_BRPORT_PROXYARP,
471 QEMU_IFLA_BRPORT_LEARNING_SYNC,
472 QEMU_IFLA_BRPORT_PROXYARP_WIFI,
473 QEMU_IFLA_BRPORT_ROOT_ID,
474 QEMU_IFLA_BRPORT_BRIDGE_ID,
475 QEMU_IFLA_BRPORT_DESIGNATED_PORT,
476 QEMU_IFLA_BRPORT_DESIGNATED_COST,
477 QEMU_IFLA_BRPORT_ID,
478 QEMU_IFLA_BRPORT_NO,
479 QEMU_IFLA_BRPORT_TOPOLOGY_CHANGE_ACK,
480 QEMU_IFLA_BRPORT_CONFIG_PENDING,
481 QEMU_IFLA_BRPORT_MESSAGE_AGE_TIMER,
482 QEMU_IFLA_BRPORT_FORWARD_DELAY_TIMER,
483 QEMU_IFLA_BRPORT_HOLD_TIMER,
484 QEMU_IFLA_BRPORT_FLUSH,
485 QEMU_IFLA_BRPORT_MULTICAST_ROUTER,
486 QEMU_IFLA_BRPORT_PAD,
487 QEMU___IFLA_BRPORT_MAX
490 enum {
491 QEMU_IFLA_INFO_UNSPEC,
492 QEMU_IFLA_INFO_KIND,
493 QEMU_IFLA_INFO_DATA,
494 QEMU_IFLA_INFO_XSTATS,
495 QEMU_IFLA_INFO_SLAVE_KIND,
496 QEMU_IFLA_INFO_SLAVE_DATA,
497 QEMU___IFLA_INFO_MAX,
500 enum {
501 QEMU_IFLA_INET_UNSPEC,
502 QEMU_IFLA_INET_CONF,
503 QEMU___IFLA_INET_MAX,
506 enum {
507 QEMU_IFLA_INET6_UNSPEC,
508 QEMU_IFLA_INET6_FLAGS,
509 QEMU_IFLA_INET6_CONF,
510 QEMU_IFLA_INET6_STATS,
511 QEMU_IFLA_INET6_MCAST,
512 QEMU_IFLA_INET6_CACHEINFO,
513 QEMU_IFLA_INET6_ICMP6STATS,
514 QEMU_IFLA_INET6_TOKEN,
515 QEMU_IFLA_INET6_ADDR_GEN_MODE,
516 QEMU___IFLA_INET6_MAX
519 typedef abi_long (*TargetFdDataFunc)(void *, size_t);
520 typedef abi_long (*TargetFdAddrFunc)(void *, abi_ulong, socklen_t);
521 typedef struct TargetFdTrans {
522 TargetFdDataFunc host_to_target_data;
523 TargetFdDataFunc target_to_host_data;
524 TargetFdAddrFunc target_to_host_addr;
525 } TargetFdTrans;
527 static TargetFdTrans **target_fd_trans;
529 static unsigned int target_fd_max;
531 static TargetFdDataFunc fd_trans_target_to_host_data(int fd)
533 if (fd >= 0 && fd < target_fd_max && target_fd_trans[fd]) {
534 return target_fd_trans[fd]->target_to_host_data;
536 return NULL;
539 static TargetFdDataFunc fd_trans_host_to_target_data(int fd)
541 if (fd >= 0 && fd < target_fd_max && target_fd_trans[fd]) {
542 return target_fd_trans[fd]->host_to_target_data;
544 return NULL;
547 static TargetFdAddrFunc fd_trans_target_to_host_addr(int fd)
549 if (fd >= 0 && fd < target_fd_max && target_fd_trans[fd]) {
550 return target_fd_trans[fd]->target_to_host_addr;
552 return NULL;
555 static void fd_trans_register(int fd, TargetFdTrans *trans)
557 unsigned int oldmax;
559 if (fd >= target_fd_max) {
560 oldmax = target_fd_max;
561 target_fd_max = ((fd >> 6) + 1) << 6; /* by slice of 64 entries */
562 target_fd_trans = g_renew(TargetFdTrans *,
563 target_fd_trans, target_fd_max);
564 memset((void *)(target_fd_trans + oldmax), 0,
565 (target_fd_max - oldmax) * sizeof(TargetFdTrans *));
567 target_fd_trans[fd] = trans;
570 static void fd_trans_unregister(int fd)
572 if (fd >= 0 && fd < target_fd_max) {
573 target_fd_trans[fd] = NULL;
577 static void fd_trans_dup(int oldfd, int newfd)
579 fd_trans_unregister(newfd);
580 if (oldfd < target_fd_max && target_fd_trans[oldfd]) {
581 fd_trans_register(newfd, target_fd_trans[oldfd]);
585 static int sys_getcwd1(char *buf, size_t size)
587 if (getcwd(buf, size) == NULL) {
588 /* getcwd() sets errno */
589 return (-1);
591 return strlen(buf)+1;
594 #ifdef TARGET_NR_utimensat
595 #if defined(__NR_utimensat)
596 #define __NR_sys_utimensat __NR_utimensat
597 _syscall4(int,sys_utimensat,int,dirfd,const char *,pathname,
598 const struct timespec *,tsp,int,flags)
599 #else
600 static int sys_utimensat(int dirfd, const char *pathname,
601 const struct timespec times[2], int flags)
603 errno = ENOSYS;
604 return -1;
606 #endif
607 #endif /* TARGET_NR_utimensat */
609 #ifdef TARGET_NR_renameat2
610 #if defined(__NR_renameat2)
611 #define __NR_sys_renameat2 __NR_renameat2
612 _syscall5(int, sys_renameat2, int, oldfd, const char *, old, int, newfd,
613 const char *, new, unsigned int, flags)
614 #else
615 static int sys_renameat2(int oldfd, const char *old,
616 int newfd, const char *new, int flags)
618 if (flags == 0) {
619 return renameat(oldfd, old, newfd, new);
621 errno = ENOSYS;
622 return -1;
624 #endif
625 #endif /* TARGET_NR_renameat2 */
627 #ifdef CONFIG_INOTIFY
628 #include <sys/inotify.h>
630 #if defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init)
631 static int sys_inotify_init(void)
633 return (inotify_init());
635 #endif
636 #if defined(TARGET_NR_inotify_add_watch) && defined(__NR_inotify_add_watch)
637 static int sys_inotify_add_watch(int fd,const char *pathname, int32_t mask)
639 return (inotify_add_watch(fd, pathname, mask));
641 #endif
642 #if defined(TARGET_NR_inotify_rm_watch) && defined(__NR_inotify_rm_watch)
643 static int sys_inotify_rm_watch(int fd, int32_t wd)
645 return (inotify_rm_watch(fd, wd));
647 #endif
648 #ifdef CONFIG_INOTIFY1
649 #if defined(TARGET_NR_inotify_init1) && defined(__NR_inotify_init1)
650 static int sys_inotify_init1(int flags)
652 return (inotify_init1(flags));
654 #endif
655 #endif
656 #else
657 /* Userspace can usually survive runtime without inotify */
658 #undef TARGET_NR_inotify_init
659 #undef TARGET_NR_inotify_init1
660 #undef TARGET_NR_inotify_add_watch
661 #undef TARGET_NR_inotify_rm_watch
662 #endif /* CONFIG_INOTIFY */
664 #if defined(TARGET_NR_prlimit64)
665 #ifndef __NR_prlimit64
666 # define __NR_prlimit64 -1
667 #endif
668 #define __NR_sys_prlimit64 __NR_prlimit64
669 /* The glibc rlimit structure may not be that used by the underlying syscall */
670 struct host_rlimit64 {
671 uint64_t rlim_cur;
672 uint64_t rlim_max;
674 _syscall4(int, sys_prlimit64, pid_t, pid, int, resource,
675 const struct host_rlimit64 *, new_limit,
676 struct host_rlimit64 *, old_limit)
677 #endif
680 #if defined(TARGET_NR_timer_create)
681 /* Maxiumum of 32 active POSIX timers allowed at any one time. */
682 static timer_t g_posix_timers[32] = { 0, } ;
684 static inline int next_free_host_timer(void)
686 int k ;
687 /* FIXME: Does finding the next free slot require a lock? */
688 for (k = 0; k < ARRAY_SIZE(g_posix_timers); k++) {
689 if (g_posix_timers[k] == 0) {
690 g_posix_timers[k] = (timer_t) 1;
691 return k;
694 return -1;
696 #endif
698 /* ARM EABI and MIPS expect 64bit types aligned even on pairs or registers */
699 #ifdef TARGET_ARM
700 static inline int regpairs_aligned(void *cpu_env, int num)
702 return ((((CPUARMState *)cpu_env)->eabi) == 1) ;
704 #elif defined(TARGET_MIPS) && (TARGET_ABI_BITS == 32)
705 static inline int regpairs_aligned(void *cpu_env, int num) { return 1; }
706 #elif defined(TARGET_PPC) && !defined(TARGET_PPC64)
707 /* SysV AVI for PPC32 expects 64bit parameters to be passed on odd/even pairs
708 * of registers which translates to the same as ARM/MIPS, because we start with
709 * r3 as arg1 */
710 static inline int regpairs_aligned(void *cpu_env, int num) { return 1; }
711 #elif defined(TARGET_SH4)
712 /* SH4 doesn't align register pairs, except for p{read,write}64 */
713 static inline int regpairs_aligned(void *cpu_env, int num)
715 switch (num) {
716 case TARGET_NR_pread64:
717 case TARGET_NR_pwrite64:
718 return 1;
720 default:
721 return 0;
724 #elif defined(TARGET_XTENSA)
725 static inline int regpairs_aligned(void *cpu_env, int num) { return 1; }
726 #else
727 static inline int regpairs_aligned(void *cpu_env, int num) { return 0; }
728 #endif
730 #define ERRNO_TABLE_SIZE 1200
732 /* target_to_host_errno_table[] is initialized from
733 * host_to_target_errno_table[] in syscall_init(). */
734 static uint16_t target_to_host_errno_table[ERRNO_TABLE_SIZE] = {
738 * This list is the union of errno values overridden in asm-<arch>/errno.h
739 * minus the errnos that are not actually generic to all archs.
741 static uint16_t host_to_target_errno_table[ERRNO_TABLE_SIZE] = {
742 [EAGAIN] = TARGET_EAGAIN,
743 [EIDRM] = TARGET_EIDRM,
744 [ECHRNG] = TARGET_ECHRNG,
745 [EL2NSYNC] = TARGET_EL2NSYNC,
746 [EL3HLT] = TARGET_EL3HLT,
747 [EL3RST] = TARGET_EL3RST,
748 [ELNRNG] = TARGET_ELNRNG,
749 [EUNATCH] = TARGET_EUNATCH,
750 [ENOCSI] = TARGET_ENOCSI,
751 [EL2HLT] = TARGET_EL2HLT,
752 [EDEADLK] = TARGET_EDEADLK,
753 [ENOLCK] = TARGET_ENOLCK,
754 [EBADE] = TARGET_EBADE,
755 [EBADR] = TARGET_EBADR,
756 [EXFULL] = TARGET_EXFULL,
757 [ENOANO] = TARGET_ENOANO,
758 [EBADRQC] = TARGET_EBADRQC,
759 [EBADSLT] = TARGET_EBADSLT,
760 [EBFONT] = TARGET_EBFONT,
761 [ENOSTR] = TARGET_ENOSTR,
762 [ENODATA] = TARGET_ENODATA,
763 [ETIME] = TARGET_ETIME,
764 [ENOSR] = TARGET_ENOSR,
765 [ENONET] = TARGET_ENONET,
766 [ENOPKG] = TARGET_ENOPKG,
767 [EREMOTE] = TARGET_EREMOTE,
768 [ENOLINK] = TARGET_ENOLINK,
769 [EADV] = TARGET_EADV,
770 [ESRMNT] = TARGET_ESRMNT,
771 [ECOMM] = TARGET_ECOMM,
772 [EPROTO] = TARGET_EPROTO,
773 [EDOTDOT] = TARGET_EDOTDOT,
774 [EMULTIHOP] = TARGET_EMULTIHOP,
775 [EBADMSG] = TARGET_EBADMSG,
776 [ENAMETOOLONG] = TARGET_ENAMETOOLONG,
777 [EOVERFLOW] = TARGET_EOVERFLOW,
778 [ENOTUNIQ] = TARGET_ENOTUNIQ,
779 [EBADFD] = TARGET_EBADFD,
780 [EREMCHG] = TARGET_EREMCHG,
781 [ELIBACC] = TARGET_ELIBACC,
782 [ELIBBAD] = TARGET_ELIBBAD,
783 [ELIBSCN] = TARGET_ELIBSCN,
784 [ELIBMAX] = TARGET_ELIBMAX,
785 [ELIBEXEC] = TARGET_ELIBEXEC,
786 [EILSEQ] = TARGET_EILSEQ,
787 [ENOSYS] = TARGET_ENOSYS,
788 [ELOOP] = TARGET_ELOOP,
789 [ERESTART] = TARGET_ERESTART,
790 [ESTRPIPE] = TARGET_ESTRPIPE,
791 [ENOTEMPTY] = TARGET_ENOTEMPTY,
792 [EUSERS] = TARGET_EUSERS,
793 [ENOTSOCK] = TARGET_ENOTSOCK,
794 [EDESTADDRREQ] = TARGET_EDESTADDRREQ,
795 [EMSGSIZE] = TARGET_EMSGSIZE,
796 [EPROTOTYPE] = TARGET_EPROTOTYPE,
797 [ENOPROTOOPT] = TARGET_ENOPROTOOPT,
798 [EPROTONOSUPPORT] = TARGET_EPROTONOSUPPORT,
799 [ESOCKTNOSUPPORT] = TARGET_ESOCKTNOSUPPORT,
800 [EOPNOTSUPP] = TARGET_EOPNOTSUPP,
801 [EPFNOSUPPORT] = TARGET_EPFNOSUPPORT,
802 [EAFNOSUPPORT] = TARGET_EAFNOSUPPORT,
803 [EADDRINUSE] = TARGET_EADDRINUSE,
804 [EADDRNOTAVAIL] = TARGET_EADDRNOTAVAIL,
805 [ENETDOWN] = TARGET_ENETDOWN,
806 [ENETUNREACH] = TARGET_ENETUNREACH,
807 [ENETRESET] = TARGET_ENETRESET,
808 [ECONNABORTED] = TARGET_ECONNABORTED,
809 [ECONNRESET] = TARGET_ECONNRESET,
810 [ENOBUFS] = TARGET_ENOBUFS,
811 [EISCONN] = TARGET_EISCONN,
812 [ENOTCONN] = TARGET_ENOTCONN,
813 [EUCLEAN] = TARGET_EUCLEAN,
814 [ENOTNAM] = TARGET_ENOTNAM,
815 [ENAVAIL] = TARGET_ENAVAIL,
816 [EISNAM] = TARGET_EISNAM,
817 [EREMOTEIO] = TARGET_EREMOTEIO,
818 [EDQUOT] = TARGET_EDQUOT,
819 [ESHUTDOWN] = TARGET_ESHUTDOWN,
820 [ETOOMANYREFS] = TARGET_ETOOMANYREFS,
821 [ETIMEDOUT] = TARGET_ETIMEDOUT,
822 [ECONNREFUSED] = TARGET_ECONNREFUSED,
823 [EHOSTDOWN] = TARGET_EHOSTDOWN,
824 [EHOSTUNREACH] = TARGET_EHOSTUNREACH,
825 [EALREADY] = TARGET_EALREADY,
826 [EINPROGRESS] = TARGET_EINPROGRESS,
827 [ESTALE] = TARGET_ESTALE,
828 [ECANCELED] = TARGET_ECANCELED,
829 [ENOMEDIUM] = TARGET_ENOMEDIUM,
830 [EMEDIUMTYPE] = TARGET_EMEDIUMTYPE,
831 #ifdef ENOKEY
832 [ENOKEY] = TARGET_ENOKEY,
833 #endif
834 #ifdef EKEYEXPIRED
835 [EKEYEXPIRED] = TARGET_EKEYEXPIRED,
836 #endif
837 #ifdef EKEYREVOKED
838 [EKEYREVOKED] = TARGET_EKEYREVOKED,
839 #endif
840 #ifdef EKEYREJECTED
841 [EKEYREJECTED] = TARGET_EKEYREJECTED,
842 #endif
843 #ifdef EOWNERDEAD
844 [EOWNERDEAD] = TARGET_EOWNERDEAD,
845 #endif
846 #ifdef ENOTRECOVERABLE
847 [ENOTRECOVERABLE] = TARGET_ENOTRECOVERABLE,
848 #endif
849 #ifdef ENOMSG
850 [ENOMSG] = TARGET_ENOMSG,
851 #endif
852 #ifdef ERKFILL
853 [ERFKILL] = TARGET_ERFKILL,
854 #endif
855 #ifdef EHWPOISON
856 [EHWPOISON] = TARGET_EHWPOISON,
857 #endif
860 static inline int host_to_target_errno(int err)
862 if (err >= 0 && err < ERRNO_TABLE_SIZE &&
863 host_to_target_errno_table[err]) {
864 return host_to_target_errno_table[err];
866 return err;
869 static inline int target_to_host_errno(int err)
871 if (err >= 0 && err < ERRNO_TABLE_SIZE &&
872 target_to_host_errno_table[err]) {
873 return target_to_host_errno_table[err];
875 return err;
878 static inline abi_long get_errno(abi_long ret)
880 if (ret == -1)
881 return -host_to_target_errno(errno);
882 else
883 return ret;
886 static inline int is_error(abi_long ret)
888 return (abi_ulong)ret >= (abi_ulong)(-4096);
891 const char *target_strerror(int err)
893 if (err == TARGET_ERESTARTSYS) {
894 return "To be restarted";
896 if (err == TARGET_QEMU_ESIGRETURN) {
897 return "Successful exit from sigreturn";
900 if ((err >= ERRNO_TABLE_SIZE) || (err < 0)) {
901 return NULL;
903 return strerror(target_to_host_errno(err));
906 #define safe_syscall0(type, name) \
907 static type safe_##name(void) \
909 return safe_syscall(__NR_##name); \
912 #define safe_syscall1(type, name, type1, arg1) \
913 static type safe_##name(type1 arg1) \
915 return safe_syscall(__NR_##name, arg1); \
918 #define safe_syscall2(type, name, type1, arg1, type2, arg2) \
919 static type safe_##name(type1 arg1, type2 arg2) \
921 return safe_syscall(__NR_##name, arg1, arg2); \
924 #define safe_syscall3(type, name, type1, arg1, type2, arg2, type3, arg3) \
925 static type safe_##name(type1 arg1, type2 arg2, type3 arg3) \
927 return safe_syscall(__NR_##name, arg1, arg2, arg3); \
930 #define safe_syscall4(type, name, type1, arg1, type2, arg2, type3, arg3, \
931 type4, arg4) \
932 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4) \
934 return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4); \
937 #define safe_syscall5(type, name, type1, arg1, type2, arg2, type3, arg3, \
938 type4, arg4, type5, arg5) \
939 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, \
940 type5 arg5) \
942 return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5); \
945 #define safe_syscall6(type, name, type1, arg1, type2, arg2, type3, arg3, \
946 type4, arg4, type5, arg5, type6, arg6) \
947 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, \
948 type5 arg5, type6 arg6) \
950 return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6); \
953 safe_syscall3(ssize_t, read, int, fd, void *, buff, size_t, count)
954 safe_syscall3(ssize_t, write, int, fd, const void *, buff, size_t, count)
955 safe_syscall4(int, openat, int, dirfd, const char *, pathname, \
956 int, flags, mode_t, mode)
957 safe_syscall4(pid_t, wait4, pid_t, pid, int *, status, int, options, \
958 struct rusage *, rusage)
959 safe_syscall5(int, waitid, idtype_t, idtype, id_t, id, siginfo_t *, infop, \
960 int, options, struct rusage *, rusage)
961 safe_syscall3(int, execve, const char *, filename, char **, argv, char **, envp)
962 safe_syscall6(int, pselect6, int, nfds, fd_set *, readfds, fd_set *, writefds, \
963 fd_set *, exceptfds, struct timespec *, timeout, void *, sig)
964 safe_syscall5(int, ppoll, struct pollfd *, ufds, unsigned int, nfds,
965 struct timespec *, tsp, const sigset_t *, sigmask,
966 size_t, sigsetsize)
967 safe_syscall6(int, epoll_pwait, int, epfd, struct epoll_event *, events,
968 int, maxevents, int, timeout, const sigset_t *, sigmask,
969 size_t, sigsetsize)
970 safe_syscall6(int,futex,int *,uaddr,int,op,int,val, \
971 const struct timespec *,timeout,int *,uaddr2,int,val3)
972 safe_syscall2(int, rt_sigsuspend, sigset_t *, newset, size_t, sigsetsize)
973 safe_syscall2(int, kill, pid_t, pid, int, sig)
974 safe_syscall2(int, tkill, int, tid, int, sig)
975 safe_syscall3(int, tgkill, int, tgid, int, pid, int, sig)
976 safe_syscall3(ssize_t, readv, int, fd, const struct iovec *, iov, int, iovcnt)
977 safe_syscall3(ssize_t, writev, int, fd, const struct iovec *, iov, int, iovcnt)
978 safe_syscall5(ssize_t, preadv, int, fd, const struct iovec *, iov, int, iovcnt,
979 unsigned long, pos_l, unsigned long, pos_h)
980 safe_syscall5(ssize_t, pwritev, int, fd, const struct iovec *, iov, int, iovcnt,
981 unsigned long, pos_l, unsigned long, pos_h)
982 safe_syscall3(int, connect, int, fd, const struct sockaddr *, addr,
983 socklen_t, addrlen)
984 safe_syscall6(ssize_t, sendto, int, fd, const void *, buf, size_t, len,
985 int, flags, const struct sockaddr *, addr, socklen_t, addrlen)
986 safe_syscall6(ssize_t, recvfrom, int, fd, void *, buf, size_t, len,
987 int, flags, struct sockaddr *, addr, socklen_t *, addrlen)
988 safe_syscall3(ssize_t, sendmsg, int, fd, const struct msghdr *, msg, int, flags)
989 safe_syscall3(ssize_t, recvmsg, int, fd, struct msghdr *, msg, int, flags)
990 safe_syscall2(int, flock, int, fd, int, operation)
991 safe_syscall4(int, rt_sigtimedwait, const sigset_t *, these, siginfo_t *, uinfo,
992 const struct timespec *, uts, size_t, sigsetsize)
993 safe_syscall4(int, accept4, int, fd, struct sockaddr *, addr, socklen_t *, len,
994 int, flags)
995 safe_syscall2(int, nanosleep, const struct timespec *, req,
996 struct timespec *, rem)
997 #ifdef TARGET_NR_clock_nanosleep
998 safe_syscall4(int, clock_nanosleep, const clockid_t, clock, int, flags,
999 const struct timespec *, req, struct timespec *, rem)
1000 #endif
1001 #ifdef __NR_msgsnd
1002 safe_syscall4(int, msgsnd, int, msgid, const void *, msgp, size_t, sz,
1003 int, flags)
1004 safe_syscall5(int, msgrcv, int, msgid, void *, msgp, size_t, sz,
1005 long, msgtype, int, flags)
1006 safe_syscall4(int, semtimedop, int, semid, struct sembuf *, tsops,
1007 unsigned, nsops, const struct timespec *, timeout)
1008 #else
1009 /* This host kernel architecture uses a single ipc syscall; fake up
1010 * wrappers for the sub-operations to hide this implementation detail.
1011 * Annoyingly we can't include linux/ipc.h to get the constant definitions
1012 * for the call parameter because some structs in there conflict with the
1013 * sys/ipc.h ones. So we just define them here, and rely on them being
1014 * the same for all host architectures.
1016 #define Q_SEMTIMEDOP 4
1017 #define Q_MSGSND 11
1018 #define Q_MSGRCV 12
1019 #define Q_IPCCALL(VERSION, OP) ((VERSION) << 16 | (OP))
1021 safe_syscall6(int, ipc, int, call, long, first, long, second, long, third,
1022 void *, ptr, long, fifth)
1023 static int safe_msgsnd(int msgid, const void *msgp, size_t sz, int flags)
1025 return safe_ipc(Q_IPCCALL(0, Q_MSGSND), msgid, sz, flags, (void *)msgp, 0);
1027 static int safe_msgrcv(int msgid, void *msgp, size_t sz, long type, int flags)
1029 return safe_ipc(Q_IPCCALL(1, Q_MSGRCV), msgid, sz, flags, msgp, type);
1031 static int safe_semtimedop(int semid, struct sembuf *tsops, unsigned nsops,
1032 const struct timespec *timeout)
1034 return safe_ipc(Q_IPCCALL(0, Q_SEMTIMEDOP), semid, nsops, 0, tsops,
1035 (long)timeout);
1037 #endif
1038 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
1039 safe_syscall5(int, mq_timedsend, int, mqdes, const char *, msg_ptr,
1040 size_t, len, unsigned, prio, const struct timespec *, timeout)
1041 safe_syscall5(int, mq_timedreceive, int, mqdes, char *, msg_ptr,
1042 size_t, len, unsigned *, prio, const struct timespec *, timeout)
1043 #endif
1044 /* We do ioctl like this rather than via safe_syscall3 to preserve the
1045 * "third argument might be integer or pointer or not present" behaviour of
1046 * the libc function.
1048 #define safe_ioctl(...) safe_syscall(__NR_ioctl, __VA_ARGS__)
1049 /* Similarly for fcntl. Note that callers must always:
1050 * pass the F_GETLK64 etc constants rather than the unsuffixed F_GETLK
1051 * use the flock64 struct rather than unsuffixed flock
1052 * This will then work and use a 64-bit offset for both 32-bit and 64-bit hosts.
1054 #ifdef __NR_fcntl64
1055 #define safe_fcntl(...) safe_syscall(__NR_fcntl64, __VA_ARGS__)
1056 #else
1057 #define safe_fcntl(...) safe_syscall(__NR_fcntl, __VA_ARGS__)
1058 #endif
1060 static inline int host_to_target_sock_type(int host_type)
1062 int target_type;
1064 switch (host_type & 0xf /* SOCK_TYPE_MASK */) {
1065 case SOCK_DGRAM:
1066 target_type = TARGET_SOCK_DGRAM;
1067 break;
1068 case SOCK_STREAM:
1069 target_type = TARGET_SOCK_STREAM;
1070 break;
1071 default:
1072 target_type = host_type & 0xf /* SOCK_TYPE_MASK */;
1073 break;
1076 #if defined(SOCK_CLOEXEC)
1077 if (host_type & SOCK_CLOEXEC) {
1078 target_type |= TARGET_SOCK_CLOEXEC;
1080 #endif
1082 #if defined(SOCK_NONBLOCK)
1083 if (host_type & SOCK_NONBLOCK) {
1084 target_type |= TARGET_SOCK_NONBLOCK;
1086 #endif
1088 return target_type;
1091 static abi_ulong target_brk;
1092 static abi_ulong target_original_brk;
1093 static abi_ulong brk_page;
1095 void target_set_brk(abi_ulong new_brk)
1097 target_original_brk = target_brk = HOST_PAGE_ALIGN(new_brk);
1098 brk_page = HOST_PAGE_ALIGN(target_brk);
1101 //#define DEBUGF_BRK(message, args...) do { fprintf(stderr, (message), ## args); } while (0)
1102 #define DEBUGF_BRK(message, args...)
1104 /* do_brk() must return target values and target errnos. */
1105 abi_long do_brk(abi_ulong new_brk)
1107 abi_long mapped_addr;
1108 abi_ulong new_alloc_size;
1110 DEBUGF_BRK("do_brk(" TARGET_ABI_FMT_lx ") -> ", new_brk);
1112 if (!new_brk) {
1113 DEBUGF_BRK(TARGET_ABI_FMT_lx " (!new_brk)\n", target_brk);
1114 return target_brk;
1116 if (new_brk < target_original_brk) {
1117 DEBUGF_BRK(TARGET_ABI_FMT_lx " (new_brk < target_original_brk)\n",
1118 target_brk);
1119 return target_brk;
1122 /* If the new brk is less than the highest page reserved to the
1123 * target heap allocation, set it and we're almost done... */
1124 if (new_brk <= brk_page) {
1125 /* Heap contents are initialized to zero, as for anonymous
1126 * mapped pages. */
1127 if (new_brk > target_brk) {
1128 memset(g2h(target_brk), 0, new_brk - target_brk);
1130 target_brk = new_brk;
1131 DEBUGF_BRK(TARGET_ABI_FMT_lx " (new_brk <= brk_page)\n", target_brk);
1132 return target_brk;
1135 /* We need to allocate more memory after the brk... Note that
1136 * we don't use MAP_FIXED because that will map over the top of
1137 * any existing mapping (like the one with the host libc or qemu
1138 * itself); instead we treat "mapped but at wrong address" as
1139 * a failure and unmap again.
1141 new_alloc_size = HOST_PAGE_ALIGN(new_brk - brk_page);
1142 mapped_addr = get_errno(target_mmap(brk_page, new_alloc_size,
1143 PROT_READ|PROT_WRITE,
1144 MAP_ANON|MAP_PRIVATE, 0, 0));
1146 if (mapped_addr == brk_page) {
1147 /* Heap contents are initialized to zero, as for anonymous
1148 * mapped pages. Technically the new pages are already
1149 * initialized to zero since they *are* anonymous mapped
1150 * pages, however we have to take care with the contents that
1151 * come from the remaining part of the previous page: it may
1152 * contains garbage data due to a previous heap usage (grown
1153 * then shrunken). */
1154 memset(g2h(target_brk), 0, brk_page - target_brk);
1156 target_brk = new_brk;
1157 brk_page = HOST_PAGE_ALIGN(target_brk);
1158 DEBUGF_BRK(TARGET_ABI_FMT_lx " (mapped_addr == brk_page)\n",
1159 target_brk);
1160 return target_brk;
1161 } else if (mapped_addr != -1) {
1162 /* Mapped but at wrong address, meaning there wasn't actually
1163 * enough space for this brk.
1165 target_munmap(mapped_addr, new_alloc_size);
1166 mapped_addr = -1;
1167 DEBUGF_BRK(TARGET_ABI_FMT_lx " (mapped_addr != -1)\n", target_brk);
1169 else {
1170 DEBUGF_BRK(TARGET_ABI_FMT_lx " (otherwise)\n", target_brk);
1173 #if defined(TARGET_ALPHA)
1174 /* We (partially) emulate OSF/1 on Alpha, which requires we
1175 return a proper errno, not an unchanged brk value. */
1176 return -TARGET_ENOMEM;
1177 #endif
1178 /* For everything else, return the previous break. */
1179 return target_brk;
1182 static inline abi_long copy_from_user_fdset(fd_set *fds,
1183 abi_ulong target_fds_addr,
1184 int n)
1186 int i, nw, j, k;
1187 abi_ulong b, *target_fds;
1189 nw = DIV_ROUND_UP(n, TARGET_ABI_BITS);
1190 if (!(target_fds = lock_user(VERIFY_READ,
1191 target_fds_addr,
1192 sizeof(abi_ulong) * nw,
1193 1)))
1194 return -TARGET_EFAULT;
1196 FD_ZERO(fds);
1197 k = 0;
1198 for (i = 0; i < nw; i++) {
1199 /* grab the abi_ulong */
1200 __get_user(b, &target_fds[i]);
1201 for (j = 0; j < TARGET_ABI_BITS; j++) {
1202 /* check the bit inside the abi_ulong */
1203 if ((b >> j) & 1)
1204 FD_SET(k, fds);
1205 k++;
1209 unlock_user(target_fds, target_fds_addr, 0);
1211 return 0;
1214 static inline abi_ulong copy_from_user_fdset_ptr(fd_set *fds, fd_set **fds_ptr,
1215 abi_ulong target_fds_addr,
1216 int n)
1218 if (target_fds_addr) {
1219 if (copy_from_user_fdset(fds, target_fds_addr, n))
1220 return -TARGET_EFAULT;
1221 *fds_ptr = fds;
1222 } else {
1223 *fds_ptr = NULL;
1225 return 0;
1228 static inline abi_long copy_to_user_fdset(abi_ulong target_fds_addr,
1229 const fd_set *fds,
1230 int n)
1232 int i, nw, j, k;
1233 abi_long v;
1234 abi_ulong *target_fds;
1236 nw = DIV_ROUND_UP(n, TARGET_ABI_BITS);
1237 if (!(target_fds = lock_user(VERIFY_WRITE,
1238 target_fds_addr,
1239 sizeof(abi_ulong) * nw,
1240 0)))
1241 return -TARGET_EFAULT;
1243 k = 0;
1244 for (i = 0; i < nw; i++) {
1245 v = 0;
1246 for (j = 0; j < TARGET_ABI_BITS; j++) {
1247 v |= ((abi_ulong)(FD_ISSET(k, fds) != 0) << j);
1248 k++;
1250 __put_user(v, &target_fds[i]);
1253 unlock_user(target_fds, target_fds_addr, sizeof(abi_ulong) * nw);
1255 return 0;
1258 #if defined(__alpha__)
1259 #define HOST_HZ 1024
1260 #else
1261 #define HOST_HZ 100
1262 #endif
1264 static inline abi_long host_to_target_clock_t(long ticks)
1266 #if HOST_HZ == TARGET_HZ
1267 return ticks;
1268 #else
1269 return ((int64_t)ticks * TARGET_HZ) / HOST_HZ;
1270 #endif
1273 static inline abi_long host_to_target_rusage(abi_ulong target_addr,
1274 const struct rusage *rusage)
1276 struct target_rusage *target_rusage;
1278 if (!lock_user_struct(VERIFY_WRITE, target_rusage, target_addr, 0))
1279 return -TARGET_EFAULT;
1280 target_rusage->ru_utime.tv_sec = tswapal(rusage->ru_utime.tv_sec);
1281 target_rusage->ru_utime.tv_usec = tswapal(rusage->ru_utime.tv_usec);
1282 target_rusage->ru_stime.tv_sec = tswapal(rusage->ru_stime.tv_sec);
1283 target_rusage->ru_stime.tv_usec = tswapal(rusage->ru_stime.tv_usec);
1284 target_rusage->ru_maxrss = tswapal(rusage->ru_maxrss);
1285 target_rusage->ru_ixrss = tswapal(rusage->ru_ixrss);
1286 target_rusage->ru_idrss = tswapal(rusage->ru_idrss);
1287 target_rusage->ru_isrss = tswapal(rusage->ru_isrss);
1288 target_rusage->ru_minflt = tswapal(rusage->ru_minflt);
1289 target_rusage->ru_majflt = tswapal(rusage->ru_majflt);
1290 target_rusage->ru_nswap = tswapal(rusage->ru_nswap);
1291 target_rusage->ru_inblock = tswapal(rusage->ru_inblock);
1292 target_rusage->ru_oublock = tswapal(rusage->ru_oublock);
1293 target_rusage->ru_msgsnd = tswapal(rusage->ru_msgsnd);
1294 target_rusage->ru_msgrcv = tswapal(rusage->ru_msgrcv);
1295 target_rusage->ru_nsignals = tswapal(rusage->ru_nsignals);
1296 target_rusage->ru_nvcsw = tswapal(rusage->ru_nvcsw);
1297 target_rusage->ru_nivcsw = tswapal(rusage->ru_nivcsw);
1298 unlock_user_struct(target_rusage, target_addr, 1);
1300 return 0;
1303 static inline rlim_t target_to_host_rlim(abi_ulong target_rlim)
1305 abi_ulong target_rlim_swap;
1306 rlim_t result;
1308 target_rlim_swap = tswapal(target_rlim);
1309 if (target_rlim_swap == TARGET_RLIM_INFINITY)
1310 return RLIM_INFINITY;
1312 result = target_rlim_swap;
1313 if (target_rlim_swap != (rlim_t)result)
1314 return RLIM_INFINITY;
1316 return result;
1319 static inline abi_ulong host_to_target_rlim(rlim_t rlim)
1321 abi_ulong target_rlim_swap;
1322 abi_ulong result;
1324 if (rlim == RLIM_INFINITY || rlim != (abi_long)rlim)
1325 target_rlim_swap = TARGET_RLIM_INFINITY;
1326 else
1327 target_rlim_swap = rlim;
1328 result = tswapal(target_rlim_swap);
1330 return result;
1333 static inline int target_to_host_resource(int code)
1335 switch (code) {
1336 case TARGET_RLIMIT_AS:
1337 return RLIMIT_AS;
1338 case TARGET_RLIMIT_CORE:
1339 return RLIMIT_CORE;
1340 case TARGET_RLIMIT_CPU:
1341 return RLIMIT_CPU;
1342 case TARGET_RLIMIT_DATA:
1343 return RLIMIT_DATA;
1344 case TARGET_RLIMIT_FSIZE:
1345 return RLIMIT_FSIZE;
1346 case TARGET_RLIMIT_LOCKS:
1347 return RLIMIT_LOCKS;
1348 case TARGET_RLIMIT_MEMLOCK:
1349 return RLIMIT_MEMLOCK;
1350 case TARGET_RLIMIT_MSGQUEUE:
1351 return RLIMIT_MSGQUEUE;
1352 case TARGET_RLIMIT_NICE:
1353 return RLIMIT_NICE;
1354 case TARGET_RLIMIT_NOFILE:
1355 return RLIMIT_NOFILE;
1356 case TARGET_RLIMIT_NPROC:
1357 return RLIMIT_NPROC;
1358 case TARGET_RLIMIT_RSS:
1359 return RLIMIT_RSS;
1360 case TARGET_RLIMIT_RTPRIO:
1361 return RLIMIT_RTPRIO;
1362 case TARGET_RLIMIT_SIGPENDING:
1363 return RLIMIT_SIGPENDING;
1364 case TARGET_RLIMIT_STACK:
1365 return RLIMIT_STACK;
1366 default:
1367 return code;
1371 static inline abi_long copy_from_user_timeval(struct timeval *tv,
1372 abi_ulong target_tv_addr)
1374 struct target_timeval *target_tv;
1376 if (!lock_user_struct(VERIFY_READ, target_tv, target_tv_addr, 1))
1377 return -TARGET_EFAULT;
1379 __get_user(tv->tv_sec, &target_tv->tv_sec);
1380 __get_user(tv->tv_usec, &target_tv->tv_usec);
1382 unlock_user_struct(target_tv, target_tv_addr, 0);
1384 return 0;
1387 static inline abi_long copy_to_user_timeval(abi_ulong target_tv_addr,
1388 const struct timeval *tv)
1390 struct target_timeval *target_tv;
1392 if (!lock_user_struct(VERIFY_WRITE, target_tv, target_tv_addr, 0))
1393 return -TARGET_EFAULT;
1395 __put_user(tv->tv_sec, &target_tv->tv_sec);
1396 __put_user(tv->tv_usec, &target_tv->tv_usec);
1398 unlock_user_struct(target_tv, target_tv_addr, 1);
1400 return 0;
1403 static inline abi_long copy_from_user_timezone(struct timezone *tz,
1404 abi_ulong target_tz_addr)
1406 struct target_timezone *target_tz;
1408 if (!lock_user_struct(VERIFY_READ, target_tz, target_tz_addr, 1)) {
1409 return -TARGET_EFAULT;
1412 __get_user(tz->tz_minuteswest, &target_tz->tz_minuteswest);
1413 __get_user(tz->tz_dsttime, &target_tz->tz_dsttime);
1415 unlock_user_struct(target_tz, target_tz_addr, 0);
1417 return 0;
1420 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
1421 #include <mqueue.h>
1423 static inline abi_long copy_from_user_mq_attr(struct mq_attr *attr,
1424 abi_ulong target_mq_attr_addr)
1426 struct target_mq_attr *target_mq_attr;
1428 if (!lock_user_struct(VERIFY_READ, target_mq_attr,
1429 target_mq_attr_addr, 1))
1430 return -TARGET_EFAULT;
1432 __get_user(attr->mq_flags, &target_mq_attr->mq_flags);
1433 __get_user(attr->mq_maxmsg, &target_mq_attr->mq_maxmsg);
1434 __get_user(attr->mq_msgsize, &target_mq_attr->mq_msgsize);
1435 __get_user(attr->mq_curmsgs, &target_mq_attr->mq_curmsgs);
1437 unlock_user_struct(target_mq_attr, target_mq_attr_addr, 0);
1439 return 0;
1442 static inline abi_long copy_to_user_mq_attr(abi_ulong target_mq_attr_addr,
1443 const struct mq_attr *attr)
1445 struct target_mq_attr *target_mq_attr;
1447 if (!lock_user_struct(VERIFY_WRITE, target_mq_attr,
1448 target_mq_attr_addr, 0))
1449 return -TARGET_EFAULT;
1451 __put_user(attr->mq_flags, &target_mq_attr->mq_flags);
1452 __put_user(attr->mq_maxmsg, &target_mq_attr->mq_maxmsg);
1453 __put_user(attr->mq_msgsize, &target_mq_attr->mq_msgsize);
1454 __put_user(attr->mq_curmsgs, &target_mq_attr->mq_curmsgs);
1456 unlock_user_struct(target_mq_attr, target_mq_attr_addr, 1);
1458 return 0;
1460 #endif
1462 #if defined(TARGET_NR_select) || defined(TARGET_NR__newselect)
1463 /* do_select() must return target values and target errnos. */
1464 static abi_long do_select(int n,
1465 abi_ulong rfd_addr, abi_ulong wfd_addr,
1466 abi_ulong efd_addr, abi_ulong target_tv_addr)
1468 fd_set rfds, wfds, efds;
1469 fd_set *rfds_ptr, *wfds_ptr, *efds_ptr;
1470 struct timeval tv;
1471 struct timespec ts, *ts_ptr;
1472 abi_long ret;
1474 ret = copy_from_user_fdset_ptr(&rfds, &rfds_ptr, rfd_addr, n);
1475 if (ret) {
1476 return ret;
1478 ret = copy_from_user_fdset_ptr(&wfds, &wfds_ptr, wfd_addr, n);
1479 if (ret) {
1480 return ret;
1482 ret = copy_from_user_fdset_ptr(&efds, &efds_ptr, efd_addr, n);
1483 if (ret) {
1484 return ret;
1487 if (target_tv_addr) {
1488 if (copy_from_user_timeval(&tv, target_tv_addr))
1489 return -TARGET_EFAULT;
1490 ts.tv_sec = tv.tv_sec;
1491 ts.tv_nsec = tv.tv_usec * 1000;
1492 ts_ptr = &ts;
1493 } else {
1494 ts_ptr = NULL;
1497 ret = get_errno(safe_pselect6(n, rfds_ptr, wfds_ptr, efds_ptr,
1498 ts_ptr, NULL));
1500 if (!is_error(ret)) {
1501 if (rfd_addr && copy_to_user_fdset(rfd_addr, &rfds, n))
1502 return -TARGET_EFAULT;
1503 if (wfd_addr && copy_to_user_fdset(wfd_addr, &wfds, n))
1504 return -TARGET_EFAULT;
1505 if (efd_addr && copy_to_user_fdset(efd_addr, &efds, n))
1506 return -TARGET_EFAULT;
1508 if (target_tv_addr) {
1509 tv.tv_sec = ts.tv_sec;
1510 tv.tv_usec = ts.tv_nsec / 1000;
1511 if (copy_to_user_timeval(target_tv_addr, &tv)) {
1512 return -TARGET_EFAULT;
1517 return ret;
1520 #if defined(TARGET_WANT_OLD_SYS_SELECT)
1521 static abi_long do_old_select(abi_ulong arg1)
1523 struct target_sel_arg_struct *sel;
1524 abi_ulong inp, outp, exp, tvp;
1525 long nsel;
1527 if (!lock_user_struct(VERIFY_READ, sel, arg1, 1)) {
1528 return -TARGET_EFAULT;
1531 nsel = tswapal(sel->n);
1532 inp = tswapal(sel->inp);
1533 outp = tswapal(sel->outp);
1534 exp = tswapal(sel->exp);
1535 tvp = tswapal(sel->tvp);
1537 unlock_user_struct(sel, arg1, 0);
1539 return do_select(nsel, inp, outp, exp, tvp);
1541 #endif
1542 #endif
1544 static abi_long do_pipe2(int host_pipe[], int flags)
1546 #ifdef CONFIG_PIPE2
1547 return pipe2(host_pipe, flags);
1548 #else
1549 return -ENOSYS;
1550 #endif
1553 static abi_long do_pipe(void *cpu_env, abi_ulong pipedes,
1554 int flags, int is_pipe2)
1556 int host_pipe[2];
1557 abi_long ret;
1558 ret = flags ? do_pipe2(host_pipe, flags) : pipe(host_pipe);
1560 if (is_error(ret))
1561 return get_errno(ret);
1563 /* Several targets have special calling conventions for the original
1564 pipe syscall, but didn't replicate this into the pipe2 syscall. */
1565 if (!is_pipe2) {
1566 #if defined(TARGET_ALPHA)
1567 ((CPUAlphaState *)cpu_env)->ir[IR_A4] = host_pipe[1];
1568 return host_pipe[0];
1569 #elif defined(TARGET_MIPS)
1570 ((CPUMIPSState*)cpu_env)->active_tc.gpr[3] = host_pipe[1];
1571 return host_pipe[0];
1572 #elif defined(TARGET_SH4)
1573 ((CPUSH4State*)cpu_env)->gregs[1] = host_pipe[1];
1574 return host_pipe[0];
1575 #elif defined(TARGET_SPARC)
1576 ((CPUSPARCState*)cpu_env)->regwptr[1] = host_pipe[1];
1577 return host_pipe[0];
1578 #endif
1581 if (put_user_s32(host_pipe[0], pipedes)
1582 || put_user_s32(host_pipe[1], pipedes + sizeof(host_pipe[0])))
1583 return -TARGET_EFAULT;
1584 return get_errno(ret);
1587 static inline abi_long target_to_host_ip_mreq(struct ip_mreqn *mreqn,
1588 abi_ulong target_addr,
1589 socklen_t len)
1591 struct target_ip_mreqn *target_smreqn;
1593 target_smreqn = lock_user(VERIFY_READ, target_addr, len, 1);
1594 if (!target_smreqn)
1595 return -TARGET_EFAULT;
1596 mreqn->imr_multiaddr.s_addr = target_smreqn->imr_multiaddr.s_addr;
1597 mreqn->imr_address.s_addr = target_smreqn->imr_address.s_addr;
1598 if (len == sizeof(struct target_ip_mreqn))
1599 mreqn->imr_ifindex = tswapal(target_smreqn->imr_ifindex);
1600 unlock_user(target_smreqn, target_addr, 0);
1602 return 0;
1605 static inline abi_long target_to_host_sockaddr(int fd, struct sockaddr *addr,
1606 abi_ulong target_addr,
1607 socklen_t len)
1609 const socklen_t unix_maxlen = sizeof (struct sockaddr_un);
1610 sa_family_t sa_family;
1611 struct target_sockaddr *target_saddr;
1613 if (fd_trans_target_to_host_addr(fd)) {
1614 return fd_trans_target_to_host_addr(fd)(addr, target_addr, len);
1617 target_saddr = lock_user(VERIFY_READ, target_addr, len, 1);
1618 if (!target_saddr)
1619 return -TARGET_EFAULT;
1621 sa_family = tswap16(target_saddr->sa_family);
1623 /* Oops. The caller might send a incomplete sun_path; sun_path
1624 * must be terminated by \0 (see the manual page), but
1625 * unfortunately it is quite common to specify sockaddr_un
1626 * length as "strlen(x->sun_path)" while it should be
1627 * "strlen(...) + 1". We'll fix that here if needed.
1628 * Linux kernel has a similar feature.
1631 if (sa_family == AF_UNIX) {
1632 if (len < unix_maxlen && len > 0) {
1633 char *cp = (char*)target_saddr;
1635 if ( cp[len-1] && !cp[len] )
1636 len++;
1638 if (len > unix_maxlen)
1639 len = unix_maxlen;
1642 memcpy(addr, target_saddr, len);
1643 addr->sa_family = sa_family;
1644 if (sa_family == AF_NETLINK) {
1645 struct sockaddr_nl *nladdr;
1647 nladdr = (struct sockaddr_nl *)addr;
1648 nladdr->nl_pid = tswap32(nladdr->nl_pid);
1649 nladdr->nl_groups = tswap32(nladdr->nl_groups);
1650 } else if (sa_family == AF_PACKET) {
1651 struct target_sockaddr_ll *lladdr;
1653 lladdr = (struct target_sockaddr_ll *)addr;
1654 lladdr->sll_ifindex = tswap32(lladdr->sll_ifindex);
1655 lladdr->sll_hatype = tswap16(lladdr->sll_hatype);
1657 unlock_user(target_saddr, target_addr, 0);
1659 return 0;
1662 static inline abi_long host_to_target_sockaddr(abi_ulong target_addr,
1663 struct sockaddr *addr,
1664 socklen_t len)
1666 struct target_sockaddr *target_saddr;
1668 if (len == 0) {
1669 return 0;
1671 assert(addr);
1673 target_saddr = lock_user(VERIFY_WRITE, target_addr, len, 0);
1674 if (!target_saddr)
1675 return -TARGET_EFAULT;
1676 memcpy(target_saddr, addr, len);
1677 if (len >= offsetof(struct target_sockaddr, sa_family) +
1678 sizeof(target_saddr->sa_family)) {
1679 target_saddr->sa_family = tswap16(addr->sa_family);
1681 if (addr->sa_family == AF_NETLINK && len >= sizeof(struct sockaddr_nl)) {
1682 struct sockaddr_nl *target_nl = (struct sockaddr_nl *)target_saddr;
1683 target_nl->nl_pid = tswap32(target_nl->nl_pid);
1684 target_nl->nl_groups = tswap32(target_nl->nl_groups);
1685 } else if (addr->sa_family == AF_PACKET) {
1686 struct sockaddr_ll *target_ll = (struct sockaddr_ll *)target_saddr;
1687 target_ll->sll_ifindex = tswap32(target_ll->sll_ifindex);
1688 target_ll->sll_hatype = tswap16(target_ll->sll_hatype);
1689 } else if (addr->sa_family == AF_INET6 &&
1690 len >= sizeof(struct target_sockaddr_in6)) {
1691 struct target_sockaddr_in6 *target_in6 =
1692 (struct target_sockaddr_in6 *)target_saddr;
1693 target_in6->sin6_scope_id = tswap16(target_in6->sin6_scope_id);
1695 unlock_user(target_saddr, target_addr, len);
1697 return 0;
1700 static inline abi_long target_to_host_cmsg(struct msghdr *msgh,
1701 struct target_msghdr *target_msgh)
1703 struct cmsghdr *cmsg = CMSG_FIRSTHDR(msgh);
1704 abi_long msg_controllen;
1705 abi_ulong target_cmsg_addr;
1706 struct target_cmsghdr *target_cmsg, *target_cmsg_start;
1707 socklen_t space = 0;
1709 msg_controllen = tswapal(target_msgh->msg_controllen);
1710 if (msg_controllen < sizeof (struct target_cmsghdr))
1711 goto the_end;
1712 target_cmsg_addr = tswapal(target_msgh->msg_control);
1713 target_cmsg = lock_user(VERIFY_READ, target_cmsg_addr, msg_controllen, 1);
1714 target_cmsg_start = target_cmsg;
1715 if (!target_cmsg)
1716 return -TARGET_EFAULT;
1718 while (cmsg && target_cmsg) {
1719 void *data = CMSG_DATA(cmsg);
1720 void *target_data = TARGET_CMSG_DATA(target_cmsg);
1722 int len = tswapal(target_cmsg->cmsg_len)
1723 - sizeof(struct target_cmsghdr);
1725 space += CMSG_SPACE(len);
1726 if (space > msgh->msg_controllen) {
1727 space -= CMSG_SPACE(len);
1728 /* This is a QEMU bug, since we allocated the payload
1729 * area ourselves (unlike overflow in host-to-target
1730 * conversion, which is just the guest giving us a buffer
1731 * that's too small). It can't happen for the payload types
1732 * we currently support; if it becomes an issue in future
1733 * we would need to improve our allocation strategy to
1734 * something more intelligent than "twice the size of the
1735 * target buffer we're reading from".
1737 gemu_log("Host cmsg overflow\n");
1738 break;
1741 if (tswap32(target_cmsg->cmsg_level) == TARGET_SOL_SOCKET) {
1742 cmsg->cmsg_level = SOL_SOCKET;
1743 } else {
1744 cmsg->cmsg_level = tswap32(target_cmsg->cmsg_level);
1746 cmsg->cmsg_type = tswap32(target_cmsg->cmsg_type);
1747 cmsg->cmsg_len = CMSG_LEN(len);
1749 if (cmsg->cmsg_level == SOL_SOCKET && cmsg->cmsg_type == SCM_RIGHTS) {
1750 int *fd = (int *)data;
1751 int *target_fd = (int *)target_data;
1752 int i, numfds = len / sizeof(int);
1754 for (i = 0; i < numfds; i++) {
1755 __get_user(fd[i], target_fd + i);
1757 } else if (cmsg->cmsg_level == SOL_SOCKET
1758 && cmsg->cmsg_type == SCM_CREDENTIALS) {
1759 struct ucred *cred = (struct ucred *)data;
1760 struct target_ucred *target_cred =
1761 (struct target_ucred *)target_data;
1763 __get_user(cred->pid, &target_cred->pid);
1764 __get_user(cred->uid, &target_cred->uid);
1765 __get_user(cred->gid, &target_cred->gid);
1766 } else {
1767 gemu_log("Unsupported ancillary data: %d/%d\n",
1768 cmsg->cmsg_level, cmsg->cmsg_type);
1769 memcpy(data, target_data, len);
1772 cmsg = CMSG_NXTHDR(msgh, cmsg);
1773 target_cmsg = TARGET_CMSG_NXTHDR(target_msgh, target_cmsg,
1774 target_cmsg_start);
1776 unlock_user(target_cmsg, target_cmsg_addr, 0);
1777 the_end:
1778 msgh->msg_controllen = space;
1779 return 0;
1782 static inline abi_long host_to_target_cmsg(struct target_msghdr *target_msgh,
1783 struct msghdr *msgh)
1785 struct cmsghdr *cmsg = CMSG_FIRSTHDR(msgh);
1786 abi_long msg_controllen;
1787 abi_ulong target_cmsg_addr;
1788 struct target_cmsghdr *target_cmsg, *target_cmsg_start;
1789 socklen_t space = 0;
1791 msg_controllen = tswapal(target_msgh->msg_controllen);
1792 if (msg_controllen < sizeof (struct target_cmsghdr))
1793 goto the_end;
1794 target_cmsg_addr = tswapal(target_msgh->msg_control);
1795 target_cmsg = lock_user(VERIFY_WRITE, target_cmsg_addr, msg_controllen, 0);
1796 target_cmsg_start = target_cmsg;
1797 if (!target_cmsg)
1798 return -TARGET_EFAULT;
1800 while (cmsg && target_cmsg) {
1801 void *data = CMSG_DATA(cmsg);
1802 void *target_data = TARGET_CMSG_DATA(target_cmsg);
1804 int len = cmsg->cmsg_len - sizeof(struct cmsghdr);
1805 int tgt_len, tgt_space;
1807 /* We never copy a half-header but may copy half-data;
1808 * this is Linux's behaviour in put_cmsg(). Note that
1809 * truncation here is a guest problem (which we report
1810 * to the guest via the CTRUNC bit), unlike truncation
1811 * in target_to_host_cmsg, which is a QEMU bug.
1813 if (msg_controllen < sizeof(struct target_cmsghdr)) {
1814 target_msgh->msg_flags |= tswap32(MSG_CTRUNC);
1815 break;
1818 if (cmsg->cmsg_level == SOL_SOCKET) {
1819 target_cmsg->cmsg_level = tswap32(TARGET_SOL_SOCKET);
1820 } else {
1821 target_cmsg->cmsg_level = tswap32(cmsg->cmsg_level);
1823 target_cmsg->cmsg_type = tswap32(cmsg->cmsg_type);
1825 /* Payload types which need a different size of payload on
1826 * the target must adjust tgt_len here.
1828 switch (cmsg->cmsg_level) {
1829 case SOL_SOCKET:
1830 switch (cmsg->cmsg_type) {
1831 case SO_TIMESTAMP:
1832 tgt_len = sizeof(struct target_timeval);
1833 break;
1834 default:
1835 break;
1837 default:
1838 tgt_len = len;
1839 break;
1842 if (msg_controllen < TARGET_CMSG_LEN(tgt_len)) {
1843 target_msgh->msg_flags |= tswap32(MSG_CTRUNC);
1844 tgt_len = msg_controllen - sizeof(struct target_cmsghdr);
1847 /* We must now copy-and-convert len bytes of payload
1848 * into tgt_len bytes of destination space. Bear in mind
1849 * that in both source and destination we may be dealing
1850 * with a truncated value!
1852 switch (cmsg->cmsg_level) {
1853 case SOL_SOCKET:
1854 switch (cmsg->cmsg_type) {
1855 case SCM_RIGHTS:
1857 int *fd = (int *)data;
1858 int *target_fd = (int *)target_data;
1859 int i, numfds = tgt_len / sizeof(int);
1861 for (i = 0; i < numfds; i++) {
1862 __put_user(fd[i], target_fd + i);
1864 break;
1866 case SO_TIMESTAMP:
1868 struct timeval *tv = (struct timeval *)data;
1869 struct target_timeval *target_tv =
1870 (struct target_timeval *)target_data;
1872 if (len != sizeof(struct timeval) ||
1873 tgt_len != sizeof(struct target_timeval)) {
1874 goto unimplemented;
1877 /* copy struct timeval to target */
1878 __put_user(tv->tv_sec, &target_tv->tv_sec);
1879 __put_user(tv->tv_usec, &target_tv->tv_usec);
1880 break;
1882 case SCM_CREDENTIALS:
1884 struct ucred *cred = (struct ucred *)data;
1885 struct target_ucred *target_cred =
1886 (struct target_ucred *)target_data;
1888 __put_user(cred->pid, &target_cred->pid);
1889 __put_user(cred->uid, &target_cred->uid);
1890 __put_user(cred->gid, &target_cred->gid);
1891 break;
1893 default:
1894 goto unimplemented;
1896 break;
1898 case SOL_IP:
1899 switch (cmsg->cmsg_type) {
1900 case IP_TTL:
1902 uint32_t *v = (uint32_t *)data;
1903 uint32_t *t_int = (uint32_t *)target_data;
1905 if (len != sizeof(uint32_t) ||
1906 tgt_len != sizeof(uint32_t)) {
1907 goto unimplemented;
1909 __put_user(*v, t_int);
1910 break;
1912 case IP_RECVERR:
1914 struct errhdr_t {
1915 struct sock_extended_err ee;
1916 struct sockaddr_in offender;
1918 struct errhdr_t *errh = (struct errhdr_t *)data;
1919 struct errhdr_t *target_errh =
1920 (struct errhdr_t *)target_data;
1922 if (len != sizeof(struct errhdr_t) ||
1923 tgt_len != sizeof(struct errhdr_t)) {
1924 goto unimplemented;
1926 __put_user(errh->ee.ee_errno, &target_errh->ee.ee_errno);
1927 __put_user(errh->ee.ee_origin, &target_errh->ee.ee_origin);
1928 __put_user(errh->ee.ee_type, &target_errh->ee.ee_type);
1929 __put_user(errh->ee.ee_code, &target_errh->ee.ee_code);
1930 __put_user(errh->ee.ee_pad, &target_errh->ee.ee_pad);
1931 __put_user(errh->ee.ee_info, &target_errh->ee.ee_info);
1932 __put_user(errh->ee.ee_data, &target_errh->ee.ee_data);
1933 host_to_target_sockaddr((unsigned long) &target_errh->offender,
1934 (void *) &errh->offender, sizeof(errh->offender));
1935 break;
1937 default:
1938 goto unimplemented;
1940 break;
1942 case SOL_IPV6:
1943 switch (cmsg->cmsg_type) {
1944 case IPV6_HOPLIMIT:
1946 uint32_t *v = (uint32_t *)data;
1947 uint32_t *t_int = (uint32_t *)target_data;
1949 if (len != sizeof(uint32_t) ||
1950 tgt_len != sizeof(uint32_t)) {
1951 goto unimplemented;
1953 __put_user(*v, t_int);
1954 break;
1956 case IPV6_RECVERR:
1958 struct errhdr6_t {
1959 struct sock_extended_err ee;
1960 struct sockaddr_in6 offender;
1962 struct errhdr6_t *errh = (struct errhdr6_t *)data;
1963 struct errhdr6_t *target_errh =
1964 (struct errhdr6_t *)target_data;
1966 if (len != sizeof(struct errhdr6_t) ||
1967 tgt_len != sizeof(struct errhdr6_t)) {
1968 goto unimplemented;
1970 __put_user(errh->ee.ee_errno, &target_errh->ee.ee_errno);
1971 __put_user(errh->ee.ee_origin, &target_errh->ee.ee_origin);
1972 __put_user(errh->ee.ee_type, &target_errh->ee.ee_type);
1973 __put_user(errh->ee.ee_code, &target_errh->ee.ee_code);
1974 __put_user(errh->ee.ee_pad, &target_errh->ee.ee_pad);
1975 __put_user(errh->ee.ee_info, &target_errh->ee.ee_info);
1976 __put_user(errh->ee.ee_data, &target_errh->ee.ee_data);
1977 host_to_target_sockaddr((unsigned long) &target_errh->offender,
1978 (void *) &errh->offender, sizeof(errh->offender));
1979 break;
1981 default:
1982 goto unimplemented;
1984 break;
1986 default:
1987 unimplemented:
1988 gemu_log("Unsupported ancillary data: %d/%d\n",
1989 cmsg->cmsg_level, cmsg->cmsg_type);
1990 memcpy(target_data, data, MIN(len, tgt_len));
1991 if (tgt_len > len) {
1992 memset(target_data + len, 0, tgt_len - len);
1996 target_cmsg->cmsg_len = tswapal(TARGET_CMSG_LEN(tgt_len));
1997 tgt_space = TARGET_CMSG_SPACE(tgt_len);
1998 if (msg_controllen < tgt_space) {
1999 tgt_space = msg_controllen;
2001 msg_controllen -= tgt_space;
2002 space += tgt_space;
2003 cmsg = CMSG_NXTHDR(msgh, cmsg);
2004 target_cmsg = TARGET_CMSG_NXTHDR(target_msgh, target_cmsg,
2005 target_cmsg_start);
2007 unlock_user(target_cmsg, target_cmsg_addr, space);
2008 the_end:
2009 target_msgh->msg_controllen = tswapal(space);
2010 return 0;
2013 static void tswap_nlmsghdr(struct nlmsghdr *nlh)
2015 nlh->nlmsg_len = tswap32(nlh->nlmsg_len);
2016 nlh->nlmsg_type = tswap16(nlh->nlmsg_type);
2017 nlh->nlmsg_flags = tswap16(nlh->nlmsg_flags);
2018 nlh->nlmsg_seq = tswap32(nlh->nlmsg_seq);
2019 nlh->nlmsg_pid = tswap32(nlh->nlmsg_pid);
2022 static abi_long host_to_target_for_each_nlmsg(struct nlmsghdr *nlh,
2023 size_t len,
2024 abi_long (*host_to_target_nlmsg)
2025 (struct nlmsghdr *))
2027 uint32_t nlmsg_len;
2028 abi_long ret;
2030 while (len > sizeof(struct nlmsghdr)) {
2032 nlmsg_len = nlh->nlmsg_len;
2033 if (nlmsg_len < sizeof(struct nlmsghdr) ||
2034 nlmsg_len > len) {
2035 break;
2038 switch (nlh->nlmsg_type) {
2039 case NLMSG_DONE:
2040 tswap_nlmsghdr(nlh);
2041 return 0;
2042 case NLMSG_NOOP:
2043 break;
2044 case NLMSG_ERROR:
2046 struct nlmsgerr *e = NLMSG_DATA(nlh);
2047 e->error = tswap32(e->error);
2048 tswap_nlmsghdr(&e->msg);
2049 tswap_nlmsghdr(nlh);
2050 return 0;
2052 default:
2053 ret = host_to_target_nlmsg(nlh);
2054 if (ret < 0) {
2055 tswap_nlmsghdr(nlh);
2056 return ret;
2058 break;
2060 tswap_nlmsghdr(nlh);
2061 len -= NLMSG_ALIGN(nlmsg_len);
2062 nlh = (struct nlmsghdr *)(((char*)nlh) + NLMSG_ALIGN(nlmsg_len));
2064 return 0;
2067 static abi_long target_to_host_for_each_nlmsg(struct nlmsghdr *nlh,
2068 size_t len,
2069 abi_long (*target_to_host_nlmsg)
2070 (struct nlmsghdr *))
2072 int ret;
2074 while (len > sizeof(struct nlmsghdr)) {
2075 if (tswap32(nlh->nlmsg_len) < sizeof(struct nlmsghdr) ||
2076 tswap32(nlh->nlmsg_len) > len) {
2077 break;
2079 tswap_nlmsghdr(nlh);
2080 switch (nlh->nlmsg_type) {
2081 case NLMSG_DONE:
2082 return 0;
2083 case NLMSG_NOOP:
2084 break;
2085 case NLMSG_ERROR:
2087 struct nlmsgerr *e = NLMSG_DATA(nlh);
2088 e->error = tswap32(e->error);
2089 tswap_nlmsghdr(&e->msg);
2090 return 0;
2092 default:
2093 ret = target_to_host_nlmsg(nlh);
2094 if (ret < 0) {
2095 return ret;
2098 len -= NLMSG_ALIGN(nlh->nlmsg_len);
2099 nlh = (struct nlmsghdr *)(((char *)nlh) + NLMSG_ALIGN(nlh->nlmsg_len));
2101 return 0;
2104 #ifdef CONFIG_RTNETLINK
2105 static abi_long host_to_target_for_each_nlattr(struct nlattr *nlattr,
2106 size_t len, void *context,
2107 abi_long (*host_to_target_nlattr)
2108 (struct nlattr *,
2109 void *context))
2111 unsigned short nla_len;
2112 abi_long ret;
2114 while (len > sizeof(struct nlattr)) {
2115 nla_len = nlattr->nla_len;
2116 if (nla_len < sizeof(struct nlattr) ||
2117 nla_len > len) {
2118 break;
2120 ret = host_to_target_nlattr(nlattr, context);
2121 nlattr->nla_len = tswap16(nlattr->nla_len);
2122 nlattr->nla_type = tswap16(nlattr->nla_type);
2123 if (ret < 0) {
2124 return ret;
2126 len -= NLA_ALIGN(nla_len);
2127 nlattr = (struct nlattr *)(((char *)nlattr) + NLA_ALIGN(nla_len));
2129 return 0;
2132 static abi_long host_to_target_for_each_rtattr(struct rtattr *rtattr,
2133 size_t len,
2134 abi_long (*host_to_target_rtattr)
2135 (struct rtattr *))
2137 unsigned short rta_len;
2138 abi_long ret;
2140 while (len > sizeof(struct rtattr)) {
2141 rta_len = rtattr->rta_len;
2142 if (rta_len < sizeof(struct rtattr) ||
2143 rta_len > len) {
2144 break;
2146 ret = host_to_target_rtattr(rtattr);
2147 rtattr->rta_len = tswap16(rtattr->rta_len);
2148 rtattr->rta_type = tswap16(rtattr->rta_type);
2149 if (ret < 0) {
2150 return ret;
2152 len -= RTA_ALIGN(rta_len);
2153 rtattr = (struct rtattr *)(((char *)rtattr) + RTA_ALIGN(rta_len));
2155 return 0;
2158 #define NLA_DATA(nla) ((void *)((char *)(nla)) + NLA_HDRLEN)
2160 static abi_long host_to_target_data_bridge_nlattr(struct nlattr *nlattr,
2161 void *context)
2163 uint16_t *u16;
2164 uint32_t *u32;
2165 uint64_t *u64;
2167 switch (nlattr->nla_type) {
2168 /* no data */
2169 case QEMU_IFLA_BR_FDB_FLUSH:
2170 break;
2171 /* binary */
2172 case QEMU_IFLA_BR_GROUP_ADDR:
2173 break;
2174 /* uint8_t */
2175 case QEMU_IFLA_BR_VLAN_FILTERING:
2176 case QEMU_IFLA_BR_TOPOLOGY_CHANGE:
2177 case QEMU_IFLA_BR_TOPOLOGY_CHANGE_DETECTED:
2178 case QEMU_IFLA_BR_MCAST_ROUTER:
2179 case QEMU_IFLA_BR_MCAST_SNOOPING:
2180 case QEMU_IFLA_BR_MCAST_QUERY_USE_IFADDR:
2181 case QEMU_IFLA_BR_MCAST_QUERIER:
2182 case QEMU_IFLA_BR_NF_CALL_IPTABLES:
2183 case QEMU_IFLA_BR_NF_CALL_IP6TABLES:
2184 case QEMU_IFLA_BR_NF_CALL_ARPTABLES:
2185 break;
2186 /* uint16_t */
2187 case QEMU_IFLA_BR_PRIORITY:
2188 case QEMU_IFLA_BR_VLAN_PROTOCOL:
2189 case QEMU_IFLA_BR_GROUP_FWD_MASK:
2190 case QEMU_IFLA_BR_ROOT_PORT:
2191 case QEMU_IFLA_BR_VLAN_DEFAULT_PVID:
2192 u16 = NLA_DATA(nlattr);
2193 *u16 = tswap16(*u16);
2194 break;
2195 /* uint32_t */
2196 case QEMU_IFLA_BR_FORWARD_DELAY:
2197 case QEMU_IFLA_BR_HELLO_TIME:
2198 case QEMU_IFLA_BR_MAX_AGE:
2199 case QEMU_IFLA_BR_AGEING_TIME:
2200 case QEMU_IFLA_BR_STP_STATE:
2201 case QEMU_IFLA_BR_ROOT_PATH_COST:
2202 case QEMU_IFLA_BR_MCAST_HASH_ELASTICITY:
2203 case QEMU_IFLA_BR_MCAST_HASH_MAX:
2204 case QEMU_IFLA_BR_MCAST_LAST_MEMBER_CNT:
2205 case QEMU_IFLA_BR_MCAST_STARTUP_QUERY_CNT:
2206 u32 = NLA_DATA(nlattr);
2207 *u32 = tswap32(*u32);
2208 break;
2209 /* uint64_t */
2210 case QEMU_IFLA_BR_HELLO_TIMER:
2211 case QEMU_IFLA_BR_TCN_TIMER:
2212 case QEMU_IFLA_BR_GC_TIMER:
2213 case QEMU_IFLA_BR_TOPOLOGY_CHANGE_TIMER:
2214 case QEMU_IFLA_BR_MCAST_LAST_MEMBER_INTVL:
2215 case QEMU_IFLA_BR_MCAST_MEMBERSHIP_INTVL:
2216 case QEMU_IFLA_BR_MCAST_QUERIER_INTVL:
2217 case QEMU_IFLA_BR_MCAST_QUERY_INTVL:
2218 case QEMU_IFLA_BR_MCAST_QUERY_RESPONSE_INTVL:
2219 case QEMU_IFLA_BR_MCAST_STARTUP_QUERY_INTVL:
2220 u64 = NLA_DATA(nlattr);
2221 *u64 = tswap64(*u64);
2222 break;
2223 /* ifla_bridge_id: uin8_t[] */
2224 case QEMU_IFLA_BR_ROOT_ID:
2225 case QEMU_IFLA_BR_BRIDGE_ID:
2226 break;
2227 default:
2228 gemu_log("Unknown QEMU_IFLA_BR type %d\n", nlattr->nla_type);
2229 break;
2231 return 0;
2234 static abi_long host_to_target_slave_data_bridge_nlattr(struct nlattr *nlattr,
2235 void *context)
2237 uint16_t *u16;
2238 uint32_t *u32;
2239 uint64_t *u64;
2241 switch (nlattr->nla_type) {
2242 /* uint8_t */
2243 case QEMU_IFLA_BRPORT_STATE:
2244 case QEMU_IFLA_BRPORT_MODE:
2245 case QEMU_IFLA_BRPORT_GUARD:
2246 case QEMU_IFLA_BRPORT_PROTECT:
2247 case QEMU_IFLA_BRPORT_FAST_LEAVE:
2248 case QEMU_IFLA_BRPORT_LEARNING:
2249 case QEMU_IFLA_BRPORT_UNICAST_FLOOD:
2250 case QEMU_IFLA_BRPORT_PROXYARP:
2251 case QEMU_IFLA_BRPORT_LEARNING_SYNC:
2252 case QEMU_IFLA_BRPORT_PROXYARP_WIFI:
2253 case QEMU_IFLA_BRPORT_TOPOLOGY_CHANGE_ACK:
2254 case QEMU_IFLA_BRPORT_CONFIG_PENDING:
2255 case QEMU_IFLA_BRPORT_MULTICAST_ROUTER:
2256 break;
2257 /* uint16_t */
2258 case QEMU_IFLA_BRPORT_PRIORITY:
2259 case QEMU_IFLA_BRPORT_DESIGNATED_PORT:
2260 case QEMU_IFLA_BRPORT_DESIGNATED_COST:
2261 case QEMU_IFLA_BRPORT_ID:
2262 case QEMU_IFLA_BRPORT_NO:
2263 u16 = NLA_DATA(nlattr);
2264 *u16 = tswap16(*u16);
2265 break;
2266 /* uin32_t */
2267 case QEMU_IFLA_BRPORT_COST:
2268 u32 = NLA_DATA(nlattr);
2269 *u32 = tswap32(*u32);
2270 break;
2271 /* uint64_t */
2272 case QEMU_IFLA_BRPORT_MESSAGE_AGE_TIMER:
2273 case QEMU_IFLA_BRPORT_FORWARD_DELAY_TIMER:
2274 case QEMU_IFLA_BRPORT_HOLD_TIMER:
2275 u64 = NLA_DATA(nlattr);
2276 *u64 = tswap64(*u64);
2277 break;
2278 /* ifla_bridge_id: uint8_t[] */
2279 case QEMU_IFLA_BRPORT_ROOT_ID:
2280 case QEMU_IFLA_BRPORT_BRIDGE_ID:
2281 break;
2282 default:
2283 gemu_log("Unknown QEMU_IFLA_BRPORT type %d\n", nlattr->nla_type);
2284 break;
2286 return 0;
2289 struct linkinfo_context {
2290 int len;
2291 char *name;
2292 int slave_len;
2293 char *slave_name;
2296 static abi_long host_to_target_data_linkinfo_nlattr(struct nlattr *nlattr,
2297 void *context)
2299 struct linkinfo_context *li_context = context;
2301 switch (nlattr->nla_type) {
2302 /* string */
2303 case QEMU_IFLA_INFO_KIND:
2304 li_context->name = NLA_DATA(nlattr);
2305 li_context->len = nlattr->nla_len - NLA_HDRLEN;
2306 break;
2307 case QEMU_IFLA_INFO_SLAVE_KIND:
2308 li_context->slave_name = NLA_DATA(nlattr);
2309 li_context->slave_len = nlattr->nla_len - NLA_HDRLEN;
2310 break;
2311 /* stats */
2312 case QEMU_IFLA_INFO_XSTATS:
2313 /* FIXME: only used by CAN */
2314 break;
2315 /* nested */
2316 case QEMU_IFLA_INFO_DATA:
2317 if (strncmp(li_context->name, "bridge",
2318 li_context->len) == 0) {
2319 return host_to_target_for_each_nlattr(NLA_DATA(nlattr),
2320 nlattr->nla_len,
2321 NULL,
2322 host_to_target_data_bridge_nlattr);
2323 } else {
2324 gemu_log("Unknown QEMU_IFLA_INFO_KIND %s\n", li_context->name);
2326 break;
2327 case QEMU_IFLA_INFO_SLAVE_DATA:
2328 if (strncmp(li_context->slave_name, "bridge",
2329 li_context->slave_len) == 0) {
2330 return host_to_target_for_each_nlattr(NLA_DATA(nlattr),
2331 nlattr->nla_len,
2332 NULL,
2333 host_to_target_slave_data_bridge_nlattr);
2334 } else {
2335 gemu_log("Unknown QEMU_IFLA_INFO_SLAVE_KIND %s\n",
2336 li_context->slave_name);
2338 break;
2339 default:
2340 gemu_log("Unknown host QEMU_IFLA_INFO type: %d\n", nlattr->nla_type);
2341 break;
2344 return 0;
2347 static abi_long host_to_target_data_inet_nlattr(struct nlattr *nlattr,
2348 void *context)
2350 uint32_t *u32;
2351 int i;
2353 switch (nlattr->nla_type) {
2354 case QEMU_IFLA_INET_CONF:
2355 u32 = NLA_DATA(nlattr);
2356 for (i = 0; i < (nlattr->nla_len - NLA_HDRLEN) / sizeof(*u32);
2357 i++) {
2358 u32[i] = tswap32(u32[i]);
2360 break;
2361 default:
2362 gemu_log("Unknown host AF_INET type: %d\n", nlattr->nla_type);
2364 return 0;
2367 static abi_long host_to_target_data_inet6_nlattr(struct nlattr *nlattr,
2368 void *context)
2370 uint32_t *u32;
2371 uint64_t *u64;
2372 struct ifla_cacheinfo *ci;
2373 int i;
2375 switch (nlattr->nla_type) {
2376 /* binaries */
2377 case QEMU_IFLA_INET6_TOKEN:
2378 break;
2379 /* uint8_t */
2380 case QEMU_IFLA_INET6_ADDR_GEN_MODE:
2381 break;
2382 /* uint32_t */
2383 case QEMU_IFLA_INET6_FLAGS:
2384 u32 = NLA_DATA(nlattr);
2385 *u32 = tswap32(*u32);
2386 break;
2387 /* uint32_t[] */
2388 case QEMU_IFLA_INET6_CONF:
2389 u32 = NLA_DATA(nlattr);
2390 for (i = 0; i < (nlattr->nla_len - NLA_HDRLEN) / sizeof(*u32);
2391 i++) {
2392 u32[i] = tswap32(u32[i]);
2394 break;
2395 /* ifla_cacheinfo */
2396 case QEMU_IFLA_INET6_CACHEINFO:
2397 ci = NLA_DATA(nlattr);
2398 ci->max_reasm_len = tswap32(ci->max_reasm_len);
2399 ci->tstamp = tswap32(ci->tstamp);
2400 ci->reachable_time = tswap32(ci->reachable_time);
2401 ci->retrans_time = tswap32(ci->retrans_time);
2402 break;
2403 /* uint64_t[] */
2404 case QEMU_IFLA_INET6_STATS:
2405 case QEMU_IFLA_INET6_ICMP6STATS:
2406 u64 = NLA_DATA(nlattr);
2407 for (i = 0; i < (nlattr->nla_len - NLA_HDRLEN) / sizeof(*u64);
2408 i++) {
2409 u64[i] = tswap64(u64[i]);
2411 break;
2412 default:
2413 gemu_log("Unknown host AF_INET6 type: %d\n", nlattr->nla_type);
2415 return 0;
2418 static abi_long host_to_target_data_spec_nlattr(struct nlattr *nlattr,
2419 void *context)
2421 switch (nlattr->nla_type) {
2422 case AF_INET:
2423 return host_to_target_for_each_nlattr(NLA_DATA(nlattr), nlattr->nla_len,
2424 NULL,
2425 host_to_target_data_inet_nlattr);
2426 case AF_INET6:
2427 return host_to_target_for_each_nlattr(NLA_DATA(nlattr), nlattr->nla_len,
2428 NULL,
2429 host_to_target_data_inet6_nlattr);
2430 default:
2431 gemu_log("Unknown host AF_SPEC type: %d\n", nlattr->nla_type);
2432 break;
2434 return 0;
2437 static abi_long host_to_target_data_link_rtattr(struct rtattr *rtattr)
2439 uint32_t *u32;
2440 struct rtnl_link_stats *st;
2441 struct rtnl_link_stats64 *st64;
2442 struct rtnl_link_ifmap *map;
2443 struct linkinfo_context li_context;
2445 switch (rtattr->rta_type) {
2446 /* binary stream */
2447 case QEMU_IFLA_ADDRESS:
2448 case QEMU_IFLA_BROADCAST:
2449 /* string */
2450 case QEMU_IFLA_IFNAME:
2451 case QEMU_IFLA_QDISC:
2452 break;
2453 /* uin8_t */
2454 case QEMU_IFLA_OPERSTATE:
2455 case QEMU_IFLA_LINKMODE:
2456 case QEMU_IFLA_CARRIER:
2457 case QEMU_IFLA_PROTO_DOWN:
2458 break;
2459 /* uint32_t */
2460 case QEMU_IFLA_MTU:
2461 case QEMU_IFLA_LINK:
2462 case QEMU_IFLA_WEIGHT:
2463 case QEMU_IFLA_TXQLEN:
2464 case QEMU_IFLA_CARRIER_CHANGES:
2465 case QEMU_IFLA_NUM_RX_QUEUES:
2466 case QEMU_IFLA_NUM_TX_QUEUES:
2467 case QEMU_IFLA_PROMISCUITY:
2468 case QEMU_IFLA_EXT_MASK:
2469 case QEMU_IFLA_LINK_NETNSID:
2470 case QEMU_IFLA_GROUP:
2471 case QEMU_IFLA_MASTER:
2472 case QEMU_IFLA_NUM_VF:
2473 case QEMU_IFLA_GSO_MAX_SEGS:
2474 case QEMU_IFLA_GSO_MAX_SIZE:
2475 u32 = RTA_DATA(rtattr);
2476 *u32 = tswap32(*u32);
2477 break;
2478 /* struct rtnl_link_stats */
2479 case QEMU_IFLA_STATS:
2480 st = RTA_DATA(rtattr);
2481 st->rx_packets = tswap32(st->rx_packets);
2482 st->tx_packets = tswap32(st->tx_packets);
2483 st->rx_bytes = tswap32(st->rx_bytes);
2484 st->tx_bytes = tswap32(st->tx_bytes);
2485 st->rx_errors = tswap32(st->rx_errors);
2486 st->tx_errors = tswap32(st->tx_errors);
2487 st->rx_dropped = tswap32(st->rx_dropped);
2488 st->tx_dropped = tswap32(st->tx_dropped);
2489 st->multicast = tswap32(st->multicast);
2490 st->collisions = tswap32(st->collisions);
2492 /* detailed rx_errors: */
2493 st->rx_length_errors = tswap32(st->rx_length_errors);
2494 st->rx_over_errors = tswap32(st->rx_over_errors);
2495 st->rx_crc_errors = tswap32(st->rx_crc_errors);
2496 st->rx_frame_errors = tswap32(st->rx_frame_errors);
2497 st->rx_fifo_errors = tswap32(st->rx_fifo_errors);
2498 st->rx_missed_errors = tswap32(st->rx_missed_errors);
2500 /* detailed tx_errors */
2501 st->tx_aborted_errors = tswap32(st->tx_aborted_errors);
2502 st->tx_carrier_errors = tswap32(st->tx_carrier_errors);
2503 st->tx_fifo_errors = tswap32(st->tx_fifo_errors);
2504 st->tx_heartbeat_errors = tswap32(st->tx_heartbeat_errors);
2505 st->tx_window_errors = tswap32(st->tx_window_errors);
2507 /* for cslip etc */
2508 st->rx_compressed = tswap32(st->rx_compressed);
2509 st->tx_compressed = tswap32(st->tx_compressed);
2510 break;
2511 /* struct rtnl_link_stats64 */
2512 case QEMU_IFLA_STATS64:
2513 st64 = RTA_DATA(rtattr);
2514 st64->rx_packets = tswap64(st64->rx_packets);
2515 st64->tx_packets = tswap64(st64->tx_packets);
2516 st64->rx_bytes = tswap64(st64->rx_bytes);
2517 st64->tx_bytes = tswap64(st64->tx_bytes);
2518 st64->rx_errors = tswap64(st64->rx_errors);
2519 st64->tx_errors = tswap64(st64->tx_errors);
2520 st64->rx_dropped = tswap64(st64->rx_dropped);
2521 st64->tx_dropped = tswap64(st64->tx_dropped);
2522 st64->multicast = tswap64(st64->multicast);
2523 st64->collisions = tswap64(st64->collisions);
2525 /* detailed rx_errors: */
2526 st64->rx_length_errors = tswap64(st64->rx_length_errors);
2527 st64->rx_over_errors = tswap64(st64->rx_over_errors);
2528 st64->rx_crc_errors = tswap64(st64->rx_crc_errors);
2529 st64->rx_frame_errors = tswap64(st64->rx_frame_errors);
2530 st64->rx_fifo_errors = tswap64(st64->rx_fifo_errors);
2531 st64->rx_missed_errors = tswap64(st64->rx_missed_errors);
2533 /* detailed tx_errors */
2534 st64->tx_aborted_errors = tswap64(st64->tx_aborted_errors);
2535 st64->tx_carrier_errors = tswap64(st64->tx_carrier_errors);
2536 st64->tx_fifo_errors = tswap64(st64->tx_fifo_errors);
2537 st64->tx_heartbeat_errors = tswap64(st64->tx_heartbeat_errors);
2538 st64->tx_window_errors = tswap64(st64->tx_window_errors);
2540 /* for cslip etc */
2541 st64->rx_compressed = tswap64(st64->rx_compressed);
2542 st64->tx_compressed = tswap64(st64->tx_compressed);
2543 break;
2544 /* struct rtnl_link_ifmap */
2545 case QEMU_IFLA_MAP:
2546 map = RTA_DATA(rtattr);
2547 map->mem_start = tswap64(map->mem_start);
2548 map->mem_end = tswap64(map->mem_end);
2549 map->base_addr = tswap64(map->base_addr);
2550 map->irq = tswap16(map->irq);
2551 break;
2552 /* nested */
2553 case QEMU_IFLA_LINKINFO:
2554 memset(&li_context, 0, sizeof(li_context));
2555 return host_to_target_for_each_nlattr(RTA_DATA(rtattr), rtattr->rta_len,
2556 &li_context,
2557 host_to_target_data_linkinfo_nlattr);
2558 case QEMU_IFLA_AF_SPEC:
2559 return host_to_target_for_each_nlattr(RTA_DATA(rtattr), rtattr->rta_len,
2560 NULL,
2561 host_to_target_data_spec_nlattr);
2562 default:
2563 gemu_log("Unknown host QEMU_IFLA type: %d\n", rtattr->rta_type);
2564 break;
2566 return 0;
2569 static abi_long host_to_target_data_addr_rtattr(struct rtattr *rtattr)
2571 uint32_t *u32;
2572 struct ifa_cacheinfo *ci;
2574 switch (rtattr->rta_type) {
2575 /* binary: depends on family type */
2576 case IFA_ADDRESS:
2577 case IFA_LOCAL:
2578 break;
2579 /* string */
2580 case IFA_LABEL:
2581 break;
2582 /* u32 */
2583 case IFA_FLAGS:
2584 case IFA_BROADCAST:
2585 u32 = RTA_DATA(rtattr);
2586 *u32 = tswap32(*u32);
2587 break;
2588 /* struct ifa_cacheinfo */
2589 case IFA_CACHEINFO:
2590 ci = RTA_DATA(rtattr);
2591 ci->ifa_prefered = tswap32(ci->ifa_prefered);
2592 ci->ifa_valid = tswap32(ci->ifa_valid);
2593 ci->cstamp = tswap32(ci->cstamp);
2594 ci->tstamp = tswap32(ci->tstamp);
2595 break;
2596 default:
2597 gemu_log("Unknown host IFA type: %d\n", rtattr->rta_type);
2598 break;
2600 return 0;
2603 static abi_long host_to_target_data_route_rtattr(struct rtattr *rtattr)
2605 uint32_t *u32;
2606 switch (rtattr->rta_type) {
2607 /* binary: depends on family type */
2608 case RTA_GATEWAY:
2609 case RTA_DST:
2610 case RTA_PREFSRC:
2611 break;
2612 /* u32 */
2613 case RTA_PRIORITY:
2614 case RTA_TABLE:
2615 case RTA_OIF:
2616 u32 = RTA_DATA(rtattr);
2617 *u32 = tswap32(*u32);
2618 break;
2619 default:
2620 gemu_log("Unknown host RTA type: %d\n", rtattr->rta_type);
2621 break;
2623 return 0;
2626 static abi_long host_to_target_link_rtattr(struct rtattr *rtattr,
2627 uint32_t rtattr_len)
2629 return host_to_target_for_each_rtattr(rtattr, rtattr_len,
2630 host_to_target_data_link_rtattr);
2633 static abi_long host_to_target_addr_rtattr(struct rtattr *rtattr,
2634 uint32_t rtattr_len)
2636 return host_to_target_for_each_rtattr(rtattr, rtattr_len,
2637 host_to_target_data_addr_rtattr);
2640 static abi_long host_to_target_route_rtattr(struct rtattr *rtattr,
2641 uint32_t rtattr_len)
2643 return host_to_target_for_each_rtattr(rtattr, rtattr_len,
2644 host_to_target_data_route_rtattr);
2647 static abi_long host_to_target_data_route(struct nlmsghdr *nlh)
2649 uint32_t nlmsg_len;
2650 struct ifinfomsg *ifi;
2651 struct ifaddrmsg *ifa;
2652 struct rtmsg *rtm;
2654 nlmsg_len = nlh->nlmsg_len;
2655 switch (nlh->nlmsg_type) {
2656 case RTM_NEWLINK:
2657 case RTM_DELLINK:
2658 case RTM_GETLINK:
2659 if (nlh->nlmsg_len >= NLMSG_LENGTH(sizeof(*ifi))) {
2660 ifi = NLMSG_DATA(nlh);
2661 ifi->ifi_type = tswap16(ifi->ifi_type);
2662 ifi->ifi_index = tswap32(ifi->ifi_index);
2663 ifi->ifi_flags = tswap32(ifi->ifi_flags);
2664 ifi->ifi_change = tswap32(ifi->ifi_change);
2665 host_to_target_link_rtattr(IFLA_RTA(ifi),
2666 nlmsg_len - NLMSG_LENGTH(sizeof(*ifi)));
2668 break;
2669 case RTM_NEWADDR:
2670 case RTM_DELADDR:
2671 case RTM_GETADDR:
2672 if (nlh->nlmsg_len >= NLMSG_LENGTH(sizeof(*ifa))) {
2673 ifa = NLMSG_DATA(nlh);
2674 ifa->ifa_index = tswap32(ifa->ifa_index);
2675 host_to_target_addr_rtattr(IFA_RTA(ifa),
2676 nlmsg_len - NLMSG_LENGTH(sizeof(*ifa)));
2678 break;
2679 case RTM_NEWROUTE:
2680 case RTM_DELROUTE:
2681 case RTM_GETROUTE:
2682 if (nlh->nlmsg_len >= NLMSG_LENGTH(sizeof(*rtm))) {
2683 rtm = NLMSG_DATA(nlh);
2684 rtm->rtm_flags = tswap32(rtm->rtm_flags);
2685 host_to_target_route_rtattr(RTM_RTA(rtm),
2686 nlmsg_len - NLMSG_LENGTH(sizeof(*rtm)));
2688 break;
2689 default:
2690 return -TARGET_EINVAL;
2692 return 0;
2695 static inline abi_long host_to_target_nlmsg_route(struct nlmsghdr *nlh,
2696 size_t len)
2698 return host_to_target_for_each_nlmsg(nlh, len, host_to_target_data_route);
2701 static abi_long target_to_host_for_each_rtattr(struct rtattr *rtattr,
2702 size_t len,
2703 abi_long (*target_to_host_rtattr)
2704 (struct rtattr *))
2706 abi_long ret;
2708 while (len >= sizeof(struct rtattr)) {
2709 if (tswap16(rtattr->rta_len) < sizeof(struct rtattr) ||
2710 tswap16(rtattr->rta_len) > len) {
2711 break;
2713 rtattr->rta_len = tswap16(rtattr->rta_len);
2714 rtattr->rta_type = tswap16(rtattr->rta_type);
2715 ret = target_to_host_rtattr(rtattr);
2716 if (ret < 0) {
2717 return ret;
2719 len -= RTA_ALIGN(rtattr->rta_len);
2720 rtattr = (struct rtattr *)(((char *)rtattr) +
2721 RTA_ALIGN(rtattr->rta_len));
2723 return 0;
2726 static abi_long target_to_host_data_link_rtattr(struct rtattr *rtattr)
2728 switch (rtattr->rta_type) {
2729 default:
2730 gemu_log("Unknown target QEMU_IFLA type: %d\n", rtattr->rta_type);
2731 break;
2733 return 0;
2736 static abi_long target_to_host_data_addr_rtattr(struct rtattr *rtattr)
2738 switch (rtattr->rta_type) {
2739 /* binary: depends on family type */
2740 case IFA_LOCAL:
2741 case IFA_ADDRESS:
2742 break;
2743 default:
2744 gemu_log("Unknown target IFA type: %d\n", rtattr->rta_type);
2745 break;
2747 return 0;
2750 static abi_long target_to_host_data_route_rtattr(struct rtattr *rtattr)
2752 uint32_t *u32;
2753 switch (rtattr->rta_type) {
2754 /* binary: depends on family type */
2755 case RTA_DST:
2756 case RTA_SRC:
2757 case RTA_GATEWAY:
2758 break;
2759 /* u32 */
2760 case RTA_PRIORITY:
2761 case RTA_OIF:
2762 u32 = RTA_DATA(rtattr);
2763 *u32 = tswap32(*u32);
2764 break;
2765 default:
2766 gemu_log("Unknown target RTA type: %d\n", rtattr->rta_type);
2767 break;
2769 return 0;
2772 static void target_to_host_link_rtattr(struct rtattr *rtattr,
2773 uint32_t rtattr_len)
2775 target_to_host_for_each_rtattr(rtattr, rtattr_len,
2776 target_to_host_data_link_rtattr);
2779 static void target_to_host_addr_rtattr(struct rtattr *rtattr,
2780 uint32_t rtattr_len)
2782 target_to_host_for_each_rtattr(rtattr, rtattr_len,
2783 target_to_host_data_addr_rtattr);
2786 static void target_to_host_route_rtattr(struct rtattr *rtattr,
2787 uint32_t rtattr_len)
2789 target_to_host_for_each_rtattr(rtattr, rtattr_len,
2790 target_to_host_data_route_rtattr);
2793 static abi_long target_to_host_data_route(struct nlmsghdr *nlh)
2795 struct ifinfomsg *ifi;
2796 struct ifaddrmsg *ifa;
2797 struct rtmsg *rtm;
2799 switch (nlh->nlmsg_type) {
2800 case RTM_GETLINK:
2801 break;
2802 case RTM_NEWLINK:
2803 case RTM_DELLINK:
2804 if (nlh->nlmsg_len >= NLMSG_LENGTH(sizeof(*ifi))) {
2805 ifi = NLMSG_DATA(nlh);
2806 ifi->ifi_type = tswap16(ifi->ifi_type);
2807 ifi->ifi_index = tswap32(ifi->ifi_index);
2808 ifi->ifi_flags = tswap32(ifi->ifi_flags);
2809 ifi->ifi_change = tswap32(ifi->ifi_change);
2810 target_to_host_link_rtattr(IFLA_RTA(ifi), nlh->nlmsg_len -
2811 NLMSG_LENGTH(sizeof(*ifi)));
2813 break;
2814 case RTM_GETADDR:
2815 case RTM_NEWADDR:
2816 case RTM_DELADDR:
2817 if (nlh->nlmsg_len >= NLMSG_LENGTH(sizeof(*ifa))) {
2818 ifa = NLMSG_DATA(nlh);
2819 ifa->ifa_index = tswap32(ifa->ifa_index);
2820 target_to_host_addr_rtattr(IFA_RTA(ifa), nlh->nlmsg_len -
2821 NLMSG_LENGTH(sizeof(*ifa)));
2823 break;
2824 case RTM_GETROUTE:
2825 break;
2826 case RTM_NEWROUTE:
2827 case RTM_DELROUTE:
2828 if (nlh->nlmsg_len >= NLMSG_LENGTH(sizeof(*rtm))) {
2829 rtm = NLMSG_DATA(nlh);
2830 rtm->rtm_flags = tswap32(rtm->rtm_flags);
2831 target_to_host_route_rtattr(RTM_RTA(rtm), nlh->nlmsg_len -
2832 NLMSG_LENGTH(sizeof(*rtm)));
2834 break;
2835 default:
2836 return -TARGET_EOPNOTSUPP;
2838 return 0;
2841 static abi_long target_to_host_nlmsg_route(struct nlmsghdr *nlh, size_t len)
2843 return target_to_host_for_each_nlmsg(nlh, len, target_to_host_data_route);
2845 #endif /* CONFIG_RTNETLINK */
2847 static abi_long host_to_target_data_audit(struct nlmsghdr *nlh)
2849 switch (nlh->nlmsg_type) {
2850 default:
2851 gemu_log("Unknown host audit message type %d\n",
2852 nlh->nlmsg_type);
2853 return -TARGET_EINVAL;
2855 return 0;
2858 static inline abi_long host_to_target_nlmsg_audit(struct nlmsghdr *nlh,
2859 size_t len)
2861 return host_to_target_for_each_nlmsg(nlh, len, host_to_target_data_audit);
2864 static abi_long target_to_host_data_audit(struct nlmsghdr *nlh)
2866 switch (nlh->nlmsg_type) {
2867 case AUDIT_USER:
2868 case AUDIT_FIRST_USER_MSG ... AUDIT_LAST_USER_MSG:
2869 case AUDIT_FIRST_USER_MSG2 ... AUDIT_LAST_USER_MSG2:
2870 break;
2871 default:
2872 gemu_log("Unknown target audit message type %d\n",
2873 nlh->nlmsg_type);
2874 return -TARGET_EINVAL;
2877 return 0;
2880 static abi_long target_to_host_nlmsg_audit(struct nlmsghdr *nlh, size_t len)
2882 return target_to_host_for_each_nlmsg(nlh, len, target_to_host_data_audit);
2885 /* do_setsockopt() Must return target values and target errnos. */
2886 static abi_long do_setsockopt(int sockfd, int level, int optname,
2887 abi_ulong optval_addr, socklen_t optlen)
2889 abi_long ret;
2890 int val;
2891 struct ip_mreqn *ip_mreq;
2892 struct ip_mreq_source *ip_mreq_source;
2894 switch(level) {
2895 case SOL_TCP:
2896 /* TCP options all take an 'int' value. */
2897 if (optlen < sizeof(uint32_t))
2898 return -TARGET_EINVAL;
2900 if (get_user_u32(val, optval_addr))
2901 return -TARGET_EFAULT;
2902 ret = get_errno(setsockopt(sockfd, level, optname, &val, sizeof(val)));
2903 break;
2904 case SOL_IP:
2905 switch(optname) {
2906 case IP_TOS:
2907 case IP_TTL:
2908 case IP_HDRINCL:
2909 case IP_ROUTER_ALERT:
2910 case IP_RECVOPTS:
2911 case IP_RETOPTS:
2912 case IP_PKTINFO:
2913 case IP_MTU_DISCOVER:
2914 case IP_RECVERR:
2915 case IP_RECVTTL:
2916 case IP_RECVTOS:
2917 #ifdef IP_FREEBIND
2918 case IP_FREEBIND:
2919 #endif
2920 case IP_MULTICAST_TTL:
2921 case IP_MULTICAST_LOOP:
2922 val = 0;
2923 if (optlen >= sizeof(uint32_t)) {
2924 if (get_user_u32(val, optval_addr))
2925 return -TARGET_EFAULT;
2926 } else if (optlen >= 1) {
2927 if (get_user_u8(val, optval_addr))
2928 return -TARGET_EFAULT;
2930 ret = get_errno(setsockopt(sockfd, level, optname, &val, sizeof(val)));
2931 break;
2932 case IP_ADD_MEMBERSHIP:
2933 case IP_DROP_MEMBERSHIP:
2934 if (optlen < sizeof (struct target_ip_mreq) ||
2935 optlen > sizeof (struct target_ip_mreqn))
2936 return -TARGET_EINVAL;
2938 ip_mreq = (struct ip_mreqn *) alloca(optlen);
2939 target_to_host_ip_mreq(ip_mreq, optval_addr, optlen);
2940 ret = get_errno(setsockopt(sockfd, level, optname, ip_mreq, optlen));
2941 break;
2943 case IP_BLOCK_SOURCE:
2944 case IP_UNBLOCK_SOURCE:
2945 case IP_ADD_SOURCE_MEMBERSHIP:
2946 case IP_DROP_SOURCE_MEMBERSHIP:
2947 if (optlen != sizeof (struct target_ip_mreq_source))
2948 return -TARGET_EINVAL;
2950 ip_mreq_source = lock_user(VERIFY_READ, optval_addr, optlen, 1);
2951 ret = get_errno(setsockopt(sockfd, level, optname, ip_mreq_source, optlen));
2952 unlock_user (ip_mreq_source, optval_addr, 0);
2953 break;
2955 default:
2956 goto unimplemented;
2958 break;
2959 case SOL_IPV6:
2960 switch (optname) {
2961 case IPV6_MTU_DISCOVER:
2962 case IPV6_MTU:
2963 case IPV6_V6ONLY:
2964 case IPV6_RECVPKTINFO:
2965 case IPV6_UNICAST_HOPS:
2966 case IPV6_RECVERR:
2967 case IPV6_RECVHOPLIMIT:
2968 case IPV6_2292HOPLIMIT:
2969 case IPV6_CHECKSUM:
2970 val = 0;
2971 if (optlen < sizeof(uint32_t)) {
2972 return -TARGET_EINVAL;
2974 if (get_user_u32(val, optval_addr)) {
2975 return -TARGET_EFAULT;
2977 ret = get_errno(setsockopt(sockfd, level, optname,
2978 &val, sizeof(val)));
2979 break;
2980 case IPV6_PKTINFO:
2982 struct in6_pktinfo pki;
2984 if (optlen < sizeof(pki)) {
2985 return -TARGET_EINVAL;
2988 if (copy_from_user(&pki, optval_addr, sizeof(pki))) {
2989 return -TARGET_EFAULT;
2992 pki.ipi6_ifindex = tswap32(pki.ipi6_ifindex);
2994 ret = get_errno(setsockopt(sockfd, level, optname,
2995 &pki, sizeof(pki)));
2996 break;
2998 default:
2999 goto unimplemented;
3001 break;
3002 case SOL_ICMPV6:
3003 switch (optname) {
3004 case ICMPV6_FILTER:
3006 struct icmp6_filter icmp6f;
3008 if (optlen > sizeof(icmp6f)) {
3009 optlen = sizeof(icmp6f);
3012 if (copy_from_user(&icmp6f, optval_addr, optlen)) {
3013 return -TARGET_EFAULT;
3016 for (val = 0; val < 8; val++) {
3017 icmp6f.data[val] = tswap32(icmp6f.data[val]);
3020 ret = get_errno(setsockopt(sockfd, level, optname,
3021 &icmp6f, optlen));
3022 break;
3024 default:
3025 goto unimplemented;
3027 break;
3028 case SOL_RAW:
3029 switch (optname) {
3030 case ICMP_FILTER:
3031 case IPV6_CHECKSUM:
3032 /* those take an u32 value */
3033 if (optlen < sizeof(uint32_t)) {
3034 return -TARGET_EINVAL;
3037 if (get_user_u32(val, optval_addr)) {
3038 return -TARGET_EFAULT;
3040 ret = get_errno(setsockopt(sockfd, level, optname,
3041 &val, sizeof(val)));
3042 break;
3044 default:
3045 goto unimplemented;
3047 break;
3048 case TARGET_SOL_SOCKET:
3049 switch (optname) {
3050 case TARGET_SO_RCVTIMEO:
3052 struct timeval tv;
3054 optname = SO_RCVTIMEO;
3056 set_timeout:
3057 if (optlen != sizeof(struct target_timeval)) {
3058 return -TARGET_EINVAL;
3061 if (copy_from_user_timeval(&tv, optval_addr)) {
3062 return -TARGET_EFAULT;
3065 ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname,
3066 &tv, sizeof(tv)));
3067 return ret;
3069 case TARGET_SO_SNDTIMEO:
3070 optname = SO_SNDTIMEO;
3071 goto set_timeout;
3072 case TARGET_SO_ATTACH_FILTER:
3074 struct target_sock_fprog *tfprog;
3075 struct target_sock_filter *tfilter;
3076 struct sock_fprog fprog;
3077 struct sock_filter *filter;
3078 int i;
3080 if (optlen != sizeof(*tfprog)) {
3081 return -TARGET_EINVAL;
3083 if (!lock_user_struct(VERIFY_READ, tfprog, optval_addr, 0)) {
3084 return -TARGET_EFAULT;
3086 if (!lock_user_struct(VERIFY_READ, tfilter,
3087 tswapal(tfprog->filter), 0)) {
3088 unlock_user_struct(tfprog, optval_addr, 1);
3089 return -TARGET_EFAULT;
3092 fprog.len = tswap16(tfprog->len);
3093 filter = g_try_new(struct sock_filter, fprog.len);
3094 if (filter == NULL) {
3095 unlock_user_struct(tfilter, tfprog->filter, 1);
3096 unlock_user_struct(tfprog, optval_addr, 1);
3097 return -TARGET_ENOMEM;
3099 for (i = 0; i < fprog.len; i++) {
3100 filter[i].code = tswap16(tfilter[i].code);
3101 filter[i].jt = tfilter[i].jt;
3102 filter[i].jf = tfilter[i].jf;
3103 filter[i].k = tswap32(tfilter[i].k);
3105 fprog.filter = filter;
3107 ret = get_errno(setsockopt(sockfd, SOL_SOCKET,
3108 SO_ATTACH_FILTER, &fprog, sizeof(fprog)));
3109 g_free(filter);
3111 unlock_user_struct(tfilter, tfprog->filter, 1);
3112 unlock_user_struct(tfprog, optval_addr, 1);
3113 return ret;
3115 case TARGET_SO_BINDTODEVICE:
3117 char *dev_ifname, *addr_ifname;
3119 if (optlen > IFNAMSIZ - 1) {
3120 optlen = IFNAMSIZ - 1;
3122 dev_ifname = lock_user(VERIFY_READ, optval_addr, optlen, 1);
3123 if (!dev_ifname) {
3124 return -TARGET_EFAULT;
3126 optname = SO_BINDTODEVICE;
3127 addr_ifname = alloca(IFNAMSIZ);
3128 memcpy(addr_ifname, dev_ifname, optlen);
3129 addr_ifname[optlen] = 0;
3130 ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname,
3131 addr_ifname, optlen));
3132 unlock_user (dev_ifname, optval_addr, 0);
3133 return ret;
3135 /* Options with 'int' argument. */
3136 case TARGET_SO_DEBUG:
3137 optname = SO_DEBUG;
3138 break;
3139 case TARGET_SO_REUSEADDR:
3140 optname = SO_REUSEADDR;
3141 break;
3142 case TARGET_SO_TYPE:
3143 optname = SO_TYPE;
3144 break;
3145 case TARGET_SO_ERROR:
3146 optname = SO_ERROR;
3147 break;
3148 case TARGET_SO_DONTROUTE:
3149 optname = SO_DONTROUTE;
3150 break;
3151 case TARGET_SO_BROADCAST:
3152 optname = SO_BROADCAST;
3153 break;
3154 case TARGET_SO_SNDBUF:
3155 optname = SO_SNDBUF;
3156 break;
3157 case TARGET_SO_SNDBUFFORCE:
3158 optname = SO_SNDBUFFORCE;
3159 break;
3160 case TARGET_SO_RCVBUF:
3161 optname = SO_RCVBUF;
3162 break;
3163 case TARGET_SO_RCVBUFFORCE:
3164 optname = SO_RCVBUFFORCE;
3165 break;
3166 case TARGET_SO_KEEPALIVE:
3167 optname = SO_KEEPALIVE;
3168 break;
3169 case TARGET_SO_OOBINLINE:
3170 optname = SO_OOBINLINE;
3171 break;
3172 case TARGET_SO_NO_CHECK:
3173 optname = SO_NO_CHECK;
3174 break;
3175 case TARGET_SO_PRIORITY:
3176 optname = SO_PRIORITY;
3177 break;
3178 #ifdef SO_BSDCOMPAT
3179 case TARGET_SO_BSDCOMPAT:
3180 optname = SO_BSDCOMPAT;
3181 break;
3182 #endif
3183 case TARGET_SO_PASSCRED:
3184 optname = SO_PASSCRED;
3185 break;
3186 case TARGET_SO_PASSSEC:
3187 optname = SO_PASSSEC;
3188 break;
3189 case TARGET_SO_TIMESTAMP:
3190 optname = SO_TIMESTAMP;
3191 break;
3192 case TARGET_SO_RCVLOWAT:
3193 optname = SO_RCVLOWAT;
3194 break;
3195 default:
3196 goto unimplemented;
3198 if (optlen < sizeof(uint32_t))
3199 return -TARGET_EINVAL;
3201 if (get_user_u32(val, optval_addr))
3202 return -TARGET_EFAULT;
3203 ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname, &val, sizeof(val)));
3204 break;
3205 default:
3206 unimplemented:
3207 gemu_log("Unsupported setsockopt level=%d optname=%d\n", level, optname);
3208 ret = -TARGET_ENOPROTOOPT;
3210 return ret;
3213 /* do_getsockopt() Must return target values and target errnos. */
3214 static abi_long do_getsockopt(int sockfd, int level, int optname,
3215 abi_ulong optval_addr, abi_ulong optlen)
3217 abi_long ret;
3218 int len, val;
3219 socklen_t lv;
3221 switch(level) {
3222 case TARGET_SOL_SOCKET:
3223 level = SOL_SOCKET;
3224 switch (optname) {
3225 /* These don't just return a single integer */
3226 case TARGET_SO_LINGER:
3227 case TARGET_SO_RCVTIMEO:
3228 case TARGET_SO_SNDTIMEO:
3229 case TARGET_SO_PEERNAME:
3230 goto unimplemented;
3231 case TARGET_SO_PEERCRED: {
3232 struct ucred cr;
3233 socklen_t crlen;
3234 struct target_ucred *tcr;
3236 if (get_user_u32(len, optlen)) {
3237 return -TARGET_EFAULT;
3239 if (len < 0) {
3240 return -TARGET_EINVAL;
3243 crlen = sizeof(cr);
3244 ret = get_errno(getsockopt(sockfd, level, SO_PEERCRED,
3245 &cr, &crlen));
3246 if (ret < 0) {
3247 return ret;
3249 if (len > crlen) {
3250 len = crlen;
3252 if (!lock_user_struct(VERIFY_WRITE, tcr, optval_addr, 0)) {
3253 return -TARGET_EFAULT;
3255 __put_user(cr.pid, &tcr->pid);
3256 __put_user(cr.uid, &tcr->uid);
3257 __put_user(cr.gid, &tcr->gid);
3258 unlock_user_struct(tcr, optval_addr, 1);
3259 if (put_user_u32(len, optlen)) {
3260 return -TARGET_EFAULT;
3262 break;
3264 /* Options with 'int' argument. */
3265 case TARGET_SO_DEBUG:
3266 optname = SO_DEBUG;
3267 goto int_case;
3268 case TARGET_SO_REUSEADDR:
3269 optname = SO_REUSEADDR;
3270 goto int_case;
3271 case TARGET_SO_TYPE:
3272 optname = SO_TYPE;
3273 goto int_case;
3274 case TARGET_SO_ERROR:
3275 optname = SO_ERROR;
3276 goto int_case;
3277 case TARGET_SO_DONTROUTE:
3278 optname = SO_DONTROUTE;
3279 goto int_case;
3280 case TARGET_SO_BROADCAST:
3281 optname = SO_BROADCAST;
3282 goto int_case;
3283 case TARGET_SO_SNDBUF:
3284 optname = SO_SNDBUF;
3285 goto int_case;
3286 case TARGET_SO_RCVBUF:
3287 optname = SO_RCVBUF;
3288 goto int_case;
3289 case TARGET_SO_KEEPALIVE:
3290 optname = SO_KEEPALIVE;
3291 goto int_case;
3292 case TARGET_SO_OOBINLINE:
3293 optname = SO_OOBINLINE;
3294 goto int_case;
3295 case TARGET_SO_NO_CHECK:
3296 optname = SO_NO_CHECK;
3297 goto int_case;
3298 case TARGET_SO_PRIORITY:
3299 optname = SO_PRIORITY;
3300 goto int_case;
3301 #ifdef SO_BSDCOMPAT
3302 case TARGET_SO_BSDCOMPAT:
3303 optname = SO_BSDCOMPAT;
3304 goto int_case;
3305 #endif
3306 case TARGET_SO_PASSCRED:
3307 optname = SO_PASSCRED;
3308 goto int_case;
3309 case TARGET_SO_TIMESTAMP:
3310 optname = SO_TIMESTAMP;
3311 goto int_case;
3312 case TARGET_SO_RCVLOWAT:
3313 optname = SO_RCVLOWAT;
3314 goto int_case;
3315 case TARGET_SO_ACCEPTCONN:
3316 optname = SO_ACCEPTCONN;
3317 goto int_case;
3318 default:
3319 goto int_case;
3321 break;
3322 case SOL_TCP:
3323 /* TCP options all take an 'int' value. */
3324 int_case:
3325 if (get_user_u32(len, optlen))
3326 return -TARGET_EFAULT;
3327 if (len < 0)
3328 return -TARGET_EINVAL;
3329 lv = sizeof(lv);
3330 ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
3331 if (ret < 0)
3332 return ret;
3333 if (optname == SO_TYPE) {
3334 val = host_to_target_sock_type(val);
3336 if (len > lv)
3337 len = lv;
3338 if (len == 4) {
3339 if (put_user_u32(val, optval_addr))
3340 return -TARGET_EFAULT;
3341 } else {
3342 if (put_user_u8(val, optval_addr))
3343 return -TARGET_EFAULT;
3345 if (put_user_u32(len, optlen))
3346 return -TARGET_EFAULT;
3347 break;
3348 case SOL_IP:
3349 switch(optname) {
3350 case IP_TOS:
3351 case IP_TTL:
3352 case IP_HDRINCL:
3353 case IP_ROUTER_ALERT:
3354 case IP_RECVOPTS:
3355 case IP_RETOPTS:
3356 case IP_PKTINFO:
3357 case IP_MTU_DISCOVER:
3358 case IP_RECVERR:
3359 case IP_RECVTOS:
3360 #ifdef IP_FREEBIND
3361 case IP_FREEBIND:
3362 #endif
3363 case IP_MULTICAST_TTL:
3364 case IP_MULTICAST_LOOP:
3365 if (get_user_u32(len, optlen))
3366 return -TARGET_EFAULT;
3367 if (len < 0)
3368 return -TARGET_EINVAL;
3369 lv = sizeof(lv);
3370 ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
3371 if (ret < 0)
3372 return ret;
3373 if (len < sizeof(int) && len > 0 && val >= 0 && val < 255) {
3374 len = 1;
3375 if (put_user_u32(len, optlen)
3376 || put_user_u8(val, optval_addr))
3377 return -TARGET_EFAULT;
3378 } else {
3379 if (len > sizeof(int))
3380 len = sizeof(int);
3381 if (put_user_u32(len, optlen)
3382 || put_user_u32(val, optval_addr))
3383 return -TARGET_EFAULT;
3385 break;
3386 default:
3387 ret = -TARGET_ENOPROTOOPT;
3388 break;
3390 break;
3391 default:
3392 unimplemented:
3393 gemu_log("getsockopt level=%d optname=%d not yet supported\n",
3394 level, optname);
3395 ret = -TARGET_EOPNOTSUPP;
3396 break;
3398 return ret;
3401 /* Convert target low/high pair representing file offset into the host
3402 * low/high pair. This function doesn't handle offsets bigger than 64 bits
3403 * as the kernel doesn't handle them either.
3405 static void target_to_host_low_high(abi_ulong tlow,
3406 abi_ulong thigh,
3407 unsigned long *hlow,
3408 unsigned long *hhigh)
3410 uint64_t off = tlow |
3411 ((unsigned long long)thigh << TARGET_LONG_BITS / 2) <<
3412 TARGET_LONG_BITS / 2;
3414 *hlow = off;
3415 *hhigh = (off >> HOST_LONG_BITS / 2) >> HOST_LONG_BITS / 2;
3418 static struct iovec *lock_iovec(int type, abi_ulong target_addr,
3419 abi_ulong count, int copy)
3421 struct target_iovec *target_vec;
3422 struct iovec *vec;
3423 abi_ulong total_len, max_len;
3424 int i;
3425 int err = 0;
3426 bool bad_address = false;
3428 if (count == 0) {
3429 errno = 0;
3430 return NULL;
3432 if (count > IOV_MAX) {
3433 errno = EINVAL;
3434 return NULL;
3437 vec = g_try_new0(struct iovec, count);
3438 if (vec == NULL) {
3439 errno = ENOMEM;
3440 return NULL;
3443 target_vec = lock_user(VERIFY_READ, target_addr,
3444 count * sizeof(struct target_iovec), 1);
3445 if (target_vec == NULL) {
3446 err = EFAULT;
3447 goto fail2;
3450 /* ??? If host page size > target page size, this will result in a
3451 value larger than what we can actually support. */
3452 max_len = 0x7fffffff & TARGET_PAGE_MASK;
3453 total_len = 0;
3455 for (i = 0; i < count; i++) {
3456 abi_ulong base = tswapal(target_vec[i].iov_base);
3457 abi_long len = tswapal(target_vec[i].iov_len);
3459 if (len < 0) {
3460 err = EINVAL;
3461 goto fail;
3462 } else if (len == 0) {
3463 /* Zero length pointer is ignored. */
3464 vec[i].iov_base = 0;
3465 } else {
3466 vec[i].iov_base = lock_user(type, base, len, copy);
3467 /* If the first buffer pointer is bad, this is a fault. But
3468 * subsequent bad buffers will result in a partial write; this
3469 * is realized by filling the vector with null pointers and
3470 * zero lengths. */
3471 if (!vec[i].iov_base) {
3472 if (i == 0) {
3473 err = EFAULT;
3474 goto fail;
3475 } else {
3476 bad_address = true;
3479 if (bad_address) {
3480 len = 0;
3482 if (len > max_len - total_len) {
3483 len = max_len - total_len;
3486 vec[i].iov_len = len;
3487 total_len += len;
3490 unlock_user(target_vec, target_addr, 0);
3491 return vec;
3493 fail:
3494 while (--i >= 0) {
3495 if (tswapal(target_vec[i].iov_len) > 0) {
3496 unlock_user(vec[i].iov_base, tswapal(target_vec[i].iov_base), 0);
3499 unlock_user(target_vec, target_addr, 0);
3500 fail2:
3501 g_free(vec);
3502 errno = err;
3503 return NULL;
3506 static void unlock_iovec(struct iovec *vec, abi_ulong target_addr,
3507 abi_ulong count, int copy)
3509 struct target_iovec *target_vec;
3510 int i;
3512 target_vec = lock_user(VERIFY_READ, target_addr,
3513 count * sizeof(struct target_iovec), 1);
3514 if (target_vec) {
3515 for (i = 0; i < count; i++) {
3516 abi_ulong base = tswapal(target_vec[i].iov_base);
3517 abi_long len = tswapal(target_vec[i].iov_len);
3518 if (len < 0) {
3519 break;
3521 unlock_user(vec[i].iov_base, base, copy ? vec[i].iov_len : 0);
3523 unlock_user(target_vec, target_addr, 0);
3526 g_free(vec);
3529 static inline int target_to_host_sock_type(int *type)
3531 int host_type = 0;
3532 int target_type = *type;
3534 switch (target_type & TARGET_SOCK_TYPE_MASK) {
3535 case TARGET_SOCK_DGRAM:
3536 host_type = SOCK_DGRAM;
3537 break;
3538 case TARGET_SOCK_STREAM:
3539 host_type = SOCK_STREAM;
3540 break;
3541 default:
3542 host_type = target_type & TARGET_SOCK_TYPE_MASK;
3543 break;
3545 if (target_type & TARGET_SOCK_CLOEXEC) {
3546 #if defined(SOCK_CLOEXEC)
3547 host_type |= SOCK_CLOEXEC;
3548 #else
3549 return -TARGET_EINVAL;
3550 #endif
3552 if (target_type & TARGET_SOCK_NONBLOCK) {
3553 #if defined(SOCK_NONBLOCK)
3554 host_type |= SOCK_NONBLOCK;
3555 #elif !defined(O_NONBLOCK)
3556 return -TARGET_EINVAL;
3557 #endif
3559 *type = host_type;
3560 return 0;
3563 /* Try to emulate socket type flags after socket creation. */
3564 static int sock_flags_fixup(int fd, int target_type)
3566 #if !defined(SOCK_NONBLOCK) && defined(O_NONBLOCK)
3567 if (target_type & TARGET_SOCK_NONBLOCK) {
3568 int flags = fcntl(fd, F_GETFL);
3569 if (fcntl(fd, F_SETFL, O_NONBLOCK | flags) == -1) {
3570 close(fd);
3571 return -TARGET_EINVAL;
3574 #endif
3575 return fd;
3578 static abi_long packet_target_to_host_sockaddr(void *host_addr,
3579 abi_ulong target_addr,
3580 socklen_t len)
3582 struct sockaddr *addr = host_addr;
3583 struct target_sockaddr *target_saddr;
3585 target_saddr = lock_user(VERIFY_READ, target_addr, len, 1);
3586 if (!target_saddr) {
3587 return -TARGET_EFAULT;
3590 memcpy(addr, target_saddr, len);
3591 addr->sa_family = tswap16(target_saddr->sa_family);
3592 /* spkt_protocol is big-endian */
3594 unlock_user(target_saddr, target_addr, 0);
3595 return 0;
3598 static TargetFdTrans target_packet_trans = {
3599 .target_to_host_addr = packet_target_to_host_sockaddr,
3602 #ifdef CONFIG_RTNETLINK
3603 static abi_long netlink_route_target_to_host(void *buf, size_t len)
3605 abi_long ret;
3607 ret = target_to_host_nlmsg_route(buf, len);
3608 if (ret < 0) {
3609 return ret;
3612 return len;
3615 static abi_long netlink_route_host_to_target(void *buf, size_t len)
3617 abi_long ret;
3619 ret = host_to_target_nlmsg_route(buf, len);
3620 if (ret < 0) {
3621 return ret;
3624 return len;
3627 static TargetFdTrans target_netlink_route_trans = {
3628 .target_to_host_data = netlink_route_target_to_host,
3629 .host_to_target_data = netlink_route_host_to_target,
3631 #endif /* CONFIG_RTNETLINK */
3633 static abi_long netlink_audit_target_to_host(void *buf, size_t len)
3635 abi_long ret;
3637 ret = target_to_host_nlmsg_audit(buf, len);
3638 if (ret < 0) {
3639 return ret;
3642 return len;
3645 static abi_long netlink_audit_host_to_target(void *buf, size_t len)
3647 abi_long ret;
3649 ret = host_to_target_nlmsg_audit(buf, len);
3650 if (ret < 0) {
3651 return ret;
3654 return len;
3657 static TargetFdTrans target_netlink_audit_trans = {
3658 .target_to_host_data = netlink_audit_target_to_host,
3659 .host_to_target_data = netlink_audit_host_to_target,
3662 /* do_socket() Must return target values and target errnos. */
3663 static abi_long do_socket(int domain, int type, int protocol)
3665 int target_type = type;
3666 int ret;
3668 ret = target_to_host_sock_type(&type);
3669 if (ret) {
3670 return ret;
3673 if (domain == PF_NETLINK && !(
3674 #ifdef CONFIG_RTNETLINK
3675 protocol == NETLINK_ROUTE ||
3676 #endif
3677 protocol == NETLINK_KOBJECT_UEVENT ||
3678 protocol == NETLINK_AUDIT)) {
3679 return -EPFNOSUPPORT;
3682 if (domain == AF_PACKET ||
3683 (domain == AF_INET && type == SOCK_PACKET)) {
3684 protocol = tswap16(protocol);
3687 ret = get_errno(socket(domain, type, protocol));
3688 if (ret >= 0) {
3689 ret = sock_flags_fixup(ret, target_type);
3690 if (type == SOCK_PACKET) {
3691 /* Manage an obsolete case :
3692 * if socket type is SOCK_PACKET, bind by name
3694 fd_trans_register(ret, &target_packet_trans);
3695 } else if (domain == PF_NETLINK) {
3696 switch (protocol) {
3697 #ifdef CONFIG_RTNETLINK
3698 case NETLINK_ROUTE:
3699 fd_trans_register(ret, &target_netlink_route_trans);
3700 break;
3701 #endif
3702 case NETLINK_KOBJECT_UEVENT:
3703 /* nothing to do: messages are strings */
3704 break;
3705 case NETLINK_AUDIT:
3706 fd_trans_register(ret, &target_netlink_audit_trans);
3707 break;
3708 default:
3709 g_assert_not_reached();
3713 return ret;
3716 /* do_bind() Must return target values and target errnos. */
3717 static abi_long do_bind(int sockfd, abi_ulong target_addr,
3718 socklen_t addrlen)
3720 void *addr;
3721 abi_long ret;
3723 if ((int)addrlen < 0) {
3724 return -TARGET_EINVAL;
3727 addr = alloca(addrlen+1);
3729 ret = target_to_host_sockaddr(sockfd, addr, target_addr, addrlen);
3730 if (ret)
3731 return ret;
3733 return get_errno(bind(sockfd, addr, addrlen));
3736 /* do_connect() Must return target values and target errnos. */
3737 static abi_long do_connect(int sockfd, abi_ulong target_addr,
3738 socklen_t addrlen)
3740 void *addr;
3741 abi_long ret;
3743 if ((int)addrlen < 0) {
3744 return -TARGET_EINVAL;
3747 addr = alloca(addrlen+1);
3749 ret = target_to_host_sockaddr(sockfd, addr, target_addr, addrlen);
3750 if (ret)
3751 return ret;
3753 return get_errno(safe_connect(sockfd, addr, addrlen));
3756 /* do_sendrecvmsg_locked() Must return target values and target errnos. */
3757 static abi_long do_sendrecvmsg_locked(int fd, struct target_msghdr *msgp,
3758 int flags, int send)
3760 abi_long ret, len;
3761 struct msghdr msg;
3762 abi_ulong count;
3763 struct iovec *vec;
3764 abi_ulong target_vec;
3766 if (msgp->msg_name) {
3767 msg.msg_namelen = tswap32(msgp->msg_namelen);
3768 msg.msg_name = alloca(msg.msg_namelen+1);
3769 ret = target_to_host_sockaddr(fd, msg.msg_name,
3770 tswapal(msgp->msg_name),
3771 msg.msg_namelen);
3772 if (ret == -TARGET_EFAULT) {
3773 /* For connected sockets msg_name and msg_namelen must
3774 * be ignored, so returning EFAULT immediately is wrong.
3775 * Instead, pass a bad msg_name to the host kernel, and
3776 * let it decide whether to return EFAULT or not.
3778 msg.msg_name = (void *)-1;
3779 } else if (ret) {
3780 goto out2;
3782 } else {
3783 msg.msg_name = NULL;
3784 msg.msg_namelen = 0;
3786 msg.msg_controllen = 2 * tswapal(msgp->msg_controllen);
3787 msg.msg_control = alloca(msg.msg_controllen);
3788 msg.msg_flags = tswap32(msgp->msg_flags);
3790 count = tswapal(msgp->msg_iovlen);
3791 target_vec = tswapal(msgp->msg_iov);
3793 if (count > IOV_MAX) {
3794 /* sendrcvmsg returns a different errno for this condition than
3795 * readv/writev, so we must catch it here before lock_iovec() does.
3797 ret = -TARGET_EMSGSIZE;
3798 goto out2;
3801 vec = lock_iovec(send ? VERIFY_READ : VERIFY_WRITE,
3802 target_vec, count, send);
3803 if (vec == NULL) {
3804 ret = -host_to_target_errno(errno);
3805 goto out2;
3807 msg.msg_iovlen = count;
3808 msg.msg_iov = vec;
3810 if (send) {
3811 if (fd_trans_target_to_host_data(fd)) {
3812 void *host_msg;
3814 host_msg = g_malloc(msg.msg_iov->iov_len);
3815 memcpy(host_msg, msg.msg_iov->iov_base, msg.msg_iov->iov_len);
3816 ret = fd_trans_target_to_host_data(fd)(host_msg,
3817 msg.msg_iov->iov_len);
3818 if (ret >= 0) {
3819 msg.msg_iov->iov_base = host_msg;
3820 ret = get_errno(safe_sendmsg(fd, &msg, flags));
3822 g_free(host_msg);
3823 } else {
3824 ret = target_to_host_cmsg(&msg, msgp);
3825 if (ret == 0) {
3826 ret = get_errno(safe_sendmsg(fd, &msg, flags));
3829 } else {
3830 ret = get_errno(safe_recvmsg(fd, &msg, flags));
3831 if (!is_error(ret)) {
3832 len = ret;
3833 if (fd_trans_host_to_target_data(fd)) {
3834 ret = fd_trans_host_to_target_data(fd)(msg.msg_iov->iov_base,
3835 len);
3836 } else {
3837 ret = host_to_target_cmsg(msgp, &msg);
3839 if (!is_error(ret)) {
3840 msgp->msg_namelen = tswap32(msg.msg_namelen);
3841 if (msg.msg_name != NULL && msg.msg_name != (void *)-1) {
3842 ret = host_to_target_sockaddr(tswapal(msgp->msg_name),
3843 msg.msg_name, msg.msg_namelen);
3844 if (ret) {
3845 goto out;
3849 ret = len;
3854 out:
3855 unlock_iovec(vec, target_vec, count, !send);
3856 out2:
3857 return ret;
3860 static abi_long do_sendrecvmsg(int fd, abi_ulong target_msg,
3861 int flags, int send)
3863 abi_long ret;
3864 struct target_msghdr *msgp;
3866 if (!lock_user_struct(send ? VERIFY_READ : VERIFY_WRITE,
3867 msgp,
3868 target_msg,
3869 send ? 1 : 0)) {
3870 return -TARGET_EFAULT;
3872 ret = do_sendrecvmsg_locked(fd, msgp, flags, send);
3873 unlock_user_struct(msgp, target_msg, send ? 0 : 1);
3874 return ret;
3877 /* We don't rely on the C library to have sendmmsg/recvmmsg support,
3878 * so it might not have this *mmsg-specific flag either.
3880 #ifndef MSG_WAITFORONE
3881 #define MSG_WAITFORONE 0x10000
3882 #endif
3884 static abi_long do_sendrecvmmsg(int fd, abi_ulong target_msgvec,
3885 unsigned int vlen, unsigned int flags,
3886 int send)
3888 struct target_mmsghdr *mmsgp;
3889 abi_long ret = 0;
3890 int i;
3892 if (vlen > UIO_MAXIOV) {
3893 vlen = UIO_MAXIOV;
3896 mmsgp = lock_user(VERIFY_WRITE, target_msgvec, sizeof(*mmsgp) * vlen, 1);
3897 if (!mmsgp) {
3898 return -TARGET_EFAULT;
3901 for (i = 0; i < vlen; i++) {
3902 ret = do_sendrecvmsg_locked(fd, &mmsgp[i].msg_hdr, flags, send);
3903 if (is_error(ret)) {
3904 break;
3906 mmsgp[i].msg_len = tswap32(ret);
3907 /* MSG_WAITFORONE turns on MSG_DONTWAIT after one packet */
3908 if (flags & MSG_WAITFORONE) {
3909 flags |= MSG_DONTWAIT;
3913 unlock_user(mmsgp, target_msgvec, sizeof(*mmsgp) * i);
3915 /* Return number of datagrams sent if we sent any at all;
3916 * otherwise return the error.
3918 if (i) {
3919 return i;
3921 return ret;
3924 /* do_accept4() Must return target values and target errnos. */
3925 static abi_long do_accept4(int fd, abi_ulong target_addr,
3926 abi_ulong target_addrlen_addr, int flags)
3928 socklen_t addrlen;
3929 void *addr;
3930 abi_long ret;
3931 int host_flags;
3933 host_flags = target_to_host_bitmask(flags, fcntl_flags_tbl);
3935 if (target_addr == 0) {
3936 return get_errno(safe_accept4(fd, NULL, NULL, host_flags));
3939 /* linux returns EINVAL if addrlen pointer is invalid */
3940 if (get_user_u32(addrlen, target_addrlen_addr))
3941 return -TARGET_EINVAL;
3943 if ((int)addrlen < 0) {
3944 return -TARGET_EINVAL;
3947 if (!access_ok(VERIFY_WRITE, target_addr, addrlen))
3948 return -TARGET_EINVAL;
3950 addr = alloca(addrlen);
3952 ret = get_errno(safe_accept4(fd, addr, &addrlen, host_flags));
3953 if (!is_error(ret)) {
3954 host_to_target_sockaddr(target_addr, addr, addrlen);
3955 if (put_user_u32(addrlen, target_addrlen_addr))
3956 ret = -TARGET_EFAULT;
3958 return ret;
3961 /* do_getpeername() Must return target values and target errnos. */
3962 static abi_long do_getpeername(int fd, abi_ulong target_addr,
3963 abi_ulong target_addrlen_addr)
3965 socklen_t addrlen;
3966 void *addr;
3967 abi_long ret;
3969 if (get_user_u32(addrlen, target_addrlen_addr))
3970 return -TARGET_EFAULT;
3972 if ((int)addrlen < 0) {
3973 return -TARGET_EINVAL;
3976 if (!access_ok(VERIFY_WRITE, target_addr, addrlen))
3977 return -TARGET_EFAULT;
3979 addr = alloca(addrlen);
3981 ret = get_errno(getpeername(fd, addr, &addrlen));
3982 if (!is_error(ret)) {
3983 host_to_target_sockaddr(target_addr, addr, addrlen);
3984 if (put_user_u32(addrlen, target_addrlen_addr))
3985 ret = -TARGET_EFAULT;
3987 return ret;
3990 /* do_getsockname() Must return target values and target errnos. */
3991 static abi_long do_getsockname(int fd, abi_ulong target_addr,
3992 abi_ulong target_addrlen_addr)
3994 socklen_t addrlen;
3995 void *addr;
3996 abi_long ret;
3998 if (get_user_u32(addrlen, target_addrlen_addr))
3999 return -TARGET_EFAULT;
4001 if ((int)addrlen < 0) {
4002 return -TARGET_EINVAL;
4005 if (!access_ok(VERIFY_WRITE, target_addr, addrlen))
4006 return -TARGET_EFAULT;
4008 addr = alloca(addrlen);
4010 ret = get_errno(getsockname(fd, addr, &addrlen));
4011 if (!is_error(ret)) {
4012 host_to_target_sockaddr(target_addr, addr, addrlen);
4013 if (put_user_u32(addrlen, target_addrlen_addr))
4014 ret = -TARGET_EFAULT;
4016 return ret;
4019 /* do_socketpair() Must return target values and target errnos. */
4020 static abi_long do_socketpair(int domain, int type, int protocol,
4021 abi_ulong target_tab_addr)
4023 int tab[2];
4024 abi_long ret;
4026 target_to_host_sock_type(&type);
4028 ret = get_errno(socketpair(domain, type, protocol, tab));
4029 if (!is_error(ret)) {
4030 if (put_user_s32(tab[0], target_tab_addr)
4031 || put_user_s32(tab[1], target_tab_addr + sizeof(tab[0])))
4032 ret = -TARGET_EFAULT;
4034 return ret;
4037 /* do_sendto() Must return target values and target errnos. */
4038 static abi_long do_sendto(int fd, abi_ulong msg, size_t len, int flags,
4039 abi_ulong target_addr, socklen_t addrlen)
4041 void *addr;
4042 void *host_msg;
4043 void *copy_msg = NULL;
4044 abi_long ret;
4046 if ((int)addrlen < 0) {
4047 return -TARGET_EINVAL;
4050 host_msg = lock_user(VERIFY_READ, msg, len, 1);
4051 if (!host_msg)
4052 return -TARGET_EFAULT;
4053 if (fd_trans_target_to_host_data(fd)) {
4054 copy_msg = host_msg;
4055 host_msg = g_malloc(len);
4056 memcpy(host_msg, copy_msg, len);
4057 ret = fd_trans_target_to_host_data(fd)(host_msg, len);
4058 if (ret < 0) {
4059 goto fail;
4062 if (target_addr) {
4063 addr = alloca(addrlen+1);
4064 ret = target_to_host_sockaddr(fd, addr, target_addr, addrlen);
4065 if (ret) {
4066 goto fail;
4068 ret = get_errno(safe_sendto(fd, host_msg, len, flags, addr, addrlen));
4069 } else {
4070 ret = get_errno(safe_sendto(fd, host_msg, len, flags, NULL, 0));
4072 fail:
4073 if (copy_msg) {
4074 g_free(host_msg);
4075 host_msg = copy_msg;
4077 unlock_user(host_msg, msg, 0);
4078 return ret;
4081 /* do_recvfrom() Must return target values and target errnos. */
4082 static abi_long do_recvfrom(int fd, abi_ulong msg, size_t len, int flags,
4083 abi_ulong target_addr,
4084 abi_ulong target_addrlen)
4086 socklen_t addrlen;
4087 void *addr;
4088 void *host_msg;
4089 abi_long ret;
4091 host_msg = lock_user(VERIFY_WRITE, msg, len, 0);
4092 if (!host_msg)
4093 return -TARGET_EFAULT;
4094 if (target_addr) {
4095 if (get_user_u32(addrlen, target_addrlen)) {
4096 ret = -TARGET_EFAULT;
4097 goto fail;
4099 if ((int)addrlen < 0) {
4100 ret = -TARGET_EINVAL;
4101 goto fail;
4103 addr = alloca(addrlen);
4104 ret = get_errno(safe_recvfrom(fd, host_msg, len, flags,
4105 addr, &addrlen));
4106 } else {
4107 addr = NULL; /* To keep compiler quiet. */
4108 ret = get_errno(safe_recvfrom(fd, host_msg, len, flags, NULL, 0));
4110 if (!is_error(ret)) {
4111 if (fd_trans_host_to_target_data(fd)) {
4112 ret = fd_trans_host_to_target_data(fd)(host_msg, ret);
4114 if (target_addr) {
4115 host_to_target_sockaddr(target_addr, addr, addrlen);
4116 if (put_user_u32(addrlen, target_addrlen)) {
4117 ret = -TARGET_EFAULT;
4118 goto fail;
4121 unlock_user(host_msg, msg, len);
4122 } else {
4123 fail:
4124 unlock_user(host_msg, msg, 0);
4126 return ret;
4129 #ifdef TARGET_NR_socketcall
4130 /* do_socketcall() must return target values and target errnos. */
4131 static abi_long do_socketcall(int num, abi_ulong vptr)
4133 static const unsigned nargs[] = { /* number of arguments per operation */
4134 [TARGET_SYS_SOCKET] = 3, /* domain, type, protocol */
4135 [TARGET_SYS_BIND] = 3, /* fd, addr, addrlen */
4136 [TARGET_SYS_CONNECT] = 3, /* fd, addr, addrlen */
4137 [TARGET_SYS_LISTEN] = 2, /* fd, backlog */
4138 [TARGET_SYS_ACCEPT] = 3, /* fd, addr, addrlen */
4139 [TARGET_SYS_GETSOCKNAME] = 3, /* fd, addr, addrlen */
4140 [TARGET_SYS_GETPEERNAME] = 3, /* fd, addr, addrlen */
4141 [TARGET_SYS_SOCKETPAIR] = 4, /* domain, type, protocol, tab */
4142 [TARGET_SYS_SEND] = 4, /* fd, msg, len, flags */
4143 [TARGET_SYS_RECV] = 4, /* fd, msg, len, flags */
4144 [TARGET_SYS_SENDTO] = 6, /* fd, msg, len, flags, addr, addrlen */
4145 [TARGET_SYS_RECVFROM] = 6, /* fd, msg, len, flags, addr, addrlen */
4146 [TARGET_SYS_SHUTDOWN] = 2, /* fd, how */
4147 [TARGET_SYS_SETSOCKOPT] = 5, /* fd, level, optname, optval, optlen */
4148 [TARGET_SYS_GETSOCKOPT] = 5, /* fd, level, optname, optval, optlen */
4149 [TARGET_SYS_SENDMSG] = 3, /* fd, msg, flags */
4150 [TARGET_SYS_RECVMSG] = 3, /* fd, msg, flags */
4151 [TARGET_SYS_ACCEPT4] = 4, /* fd, addr, addrlen, flags */
4152 [TARGET_SYS_RECVMMSG] = 4, /* fd, msgvec, vlen, flags */
4153 [TARGET_SYS_SENDMMSG] = 4, /* fd, msgvec, vlen, flags */
4155 abi_long a[6]; /* max 6 args */
4156 unsigned i;
4158 /* check the range of the first argument num */
4159 /* (TARGET_SYS_SENDMMSG is the highest among TARGET_SYS_xxx) */
4160 if (num < 1 || num > TARGET_SYS_SENDMMSG) {
4161 return -TARGET_EINVAL;
4163 /* ensure we have space for args */
4164 if (nargs[num] > ARRAY_SIZE(a)) {
4165 return -TARGET_EINVAL;
4167 /* collect the arguments in a[] according to nargs[] */
4168 for (i = 0; i < nargs[num]; ++i) {
4169 if (get_user_ual(a[i], vptr + i * sizeof(abi_long)) != 0) {
4170 return -TARGET_EFAULT;
4173 /* now when we have the args, invoke the appropriate underlying function */
4174 switch (num) {
4175 case TARGET_SYS_SOCKET: /* domain, type, protocol */
4176 return do_socket(a[0], a[1], a[2]);
4177 case TARGET_SYS_BIND: /* sockfd, addr, addrlen */
4178 return do_bind(a[0], a[1], a[2]);
4179 case TARGET_SYS_CONNECT: /* sockfd, addr, addrlen */
4180 return do_connect(a[0], a[1], a[2]);
4181 case TARGET_SYS_LISTEN: /* sockfd, backlog */
4182 return get_errno(listen(a[0], a[1]));
4183 case TARGET_SYS_ACCEPT: /* sockfd, addr, addrlen */
4184 return do_accept4(a[0], a[1], a[2], 0);
4185 case TARGET_SYS_GETSOCKNAME: /* sockfd, addr, addrlen */
4186 return do_getsockname(a[0], a[1], a[2]);
4187 case TARGET_SYS_GETPEERNAME: /* sockfd, addr, addrlen */
4188 return do_getpeername(a[0], a[1], a[2]);
4189 case TARGET_SYS_SOCKETPAIR: /* domain, type, protocol, tab */
4190 return do_socketpair(a[0], a[1], a[2], a[3]);
4191 case TARGET_SYS_SEND: /* sockfd, msg, len, flags */
4192 return do_sendto(a[0], a[1], a[2], a[3], 0, 0);
4193 case TARGET_SYS_RECV: /* sockfd, msg, len, flags */
4194 return do_recvfrom(a[0], a[1], a[2], a[3], 0, 0);
4195 case TARGET_SYS_SENDTO: /* sockfd, msg, len, flags, addr, addrlen */
4196 return do_sendto(a[0], a[1], a[2], a[3], a[4], a[5]);
4197 case TARGET_SYS_RECVFROM: /* sockfd, msg, len, flags, addr, addrlen */
4198 return do_recvfrom(a[0], a[1], a[2], a[3], a[4], a[5]);
4199 case TARGET_SYS_SHUTDOWN: /* sockfd, how */
4200 return get_errno(shutdown(a[0], a[1]));
4201 case TARGET_SYS_SETSOCKOPT: /* sockfd, level, optname, optval, optlen */
4202 return do_setsockopt(a[0], a[1], a[2], a[3], a[4]);
4203 case TARGET_SYS_GETSOCKOPT: /* sockfd, level, optname, optval, optlen */
4204 return do_getsockopt(a[0], a[1], a[2], a[3], a[4]);
4205 case TARGET_SYS_SENDMSG: /* sockfd, msg, flags */
4206 return do_sendrecvmsg(a[0], a[1], a[2], 1);
4207 case TARGET_SYS_RECVMSG: /* sockfd, msg, flags */
4208 return do_sendrecvmsg(a[0], a[1], a[2], 0);
4209 case TARGET_SYS_ACCEPT4: /* sockfd, addr, addrlen, flags */
4210 return do_accept4(a[0], a[1], a[2], a[3]);
4211 case TARGET_SYS_RECVMMSG: /* sockfd, msgvec, vlen, flags */
4212 return do_sendrecvmmsg(a[0], a[1], a[2], a[3], 0);
4213 case TARGET_SYS_SENDMMSG: /* sockfd, msgvec, vlen, flags */
4214 return do_sendrecvmmsg(a[0], a[1], a[2], a[3], 1);
4215 default:
4216 gemu_log("Unsupported socketcall: %d\n", num);
4217 return -TARGET_EINVAL;
4220 #endif
4222 #define N_SHM_REGIONS 32
4224 static struct shm_region {
4225 abi_ulong start;
4226 abi_ulong size;
4227 bool in_use;
4228 } shm_regions[N_SHM_REGIONS];
4230 #ifndef TARGET_SEMID64_DS
4231 /* asm-generic version of this struct */
4232 struct target_semid64_ds
4234 struct target_ipc_perm sem_perm;
4235 abi_ulong sem_otime;
4236 #if TARGET_ABI_BITS == 32
4237 abi_ulong __unused1;
4238 #endif
4239 abi_ulong sem_ctime;
4240 #if TARGET_ABI_BITS == 32
4241 abi_ulong __unused2;
4242 #endif
4243 abi_ulong sem_nsems;
4244 abi_ulong __unused3;
4245 abi_ulong __unused4;
4247 #endif
4249 static inline abi_long target_to_host_ipc_perm(struct ipc_perm *host_ip,
4250 abi_ulong target_addr)
4252 struct target_ipc_perm *target_ip;
4253 struct target_semid64_ds *target_sd;
4255 if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
4256 return -TARGET_EFAULT;
4257 target_ip = &(target_sd->sem_perm);
4258 host_ip->__key = tswap32(target_ip->__key);
4259 host_ip->uid = tswap32(target_ip->uid);
4260 host_ip->gid = tswap32(target_ip->gid);
4261 host_ip->cuid = tswap32(target_ip->cuid);
4262 host_ip->cgid = tswap32(target_ip->cgid);
4263 #if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_PPC)
4264 host_ip->mode = tswap32(target_ip->mode);
4265 #else
4266 host_ip->mode = tswap16(target_ip->mode);
4267 #endif
4268 #if defined(TARGET_PPC)
4269 host_ip->__seq = tswap32(target_ip->__seq);
4270 #else
4271 host_ip->__seq = tswap16(target_ip->__seq);
4272 #endif
4273 unlock_user_struct(target_sd, target_addr, 0);
4274 return 0;
4277 static inline abi_long host_to_target_ipc_perm(abi_ulong target_addr,
4278 struct ipc_perm *host_ip)
4280 struct target_ipc_perm *target_ip;
4281 struct target_semid64_ds *target_sd;
4283 if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
4284 return -TARGET_EFAULT;
4285 target_ip = &(target_sd->sem_perm);
4286 target_ip->__key = tswap32(host_ip->__key);
4287 target_ip->uid = tswap32(host_ip->uid);
4288 target_ip->gid = tswap32(host_ip->gid);
4289 target_ip->cuid = tswap32(host_ip->cuid);
4290 target_ip->cgid = tswap32(host_ip->cgid);
4291 #if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_PPC)
4292 target_ip->mode = tswap32(host_ip->mode);
4293 #else
4294 target_ip->mode = tswap16(host_ip->mode);
4295 #endif
4296 #if defined(TARGET_PPC)
4297 target_ip->__seq = tswap32(host_ip->__seq);
4298 #else
4299 target_ip->__seq = tswap16(host_ip->__seq);
4300 #endif
4301 unlock_user_struct(target_sd, target_addr, 1);
4302 return 0;
4305 static inline abi_long target_to_host_semid_ds(struct semid_ds *host_sd,
4306 abi_ulong target_addr)
4308 struct target_semid64_ds *target_sd;
4310 if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
4311 return -TARGET_EFAULT;
4312 if (target_to_host_ipc_perm(&(host_sd->sem_perm),target_addr))
4313 return -TARGET_EFAULT;
4314 host_sd->sem_nsems = tswapal(target_sd->sem_nsems);
4315 host_sd->sem_otime = tswapal(target_sd->sem_otime);
4316 host_sd->sem_ctime = tswapal(target_sd->sem_ctime);
4317 unlock_user_struct(target_sd, target_addr, 0);
4318 return 0;
4321 static inline abi_long host_to_target_semid_ds(abi_ulong target_addr,
4322 struct semid_ds *host_sd)
4324 struct target_semid64_ds *target_sd;
4326 if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
4327 return -TARGET_EFAULT;
4328 if (host_to_target_ipc_perm(target_addr,&(host_sd->sem_perm)))
4329 return -TARGET_EFAULT;
4330 target_sd->sem_nsems = tswapal(host_sd->sem_nsems);
4331 target_sd->sem_otime = tswapal(host_sd->sem_otime);
4332 target_sd->sem_ctime = tswapal(host_sd->sem_ctime);
4333 unlock_user_struct(target_sd, target_addr, 1);
4334 return 0;
4337 struct target_seminfo {
4338 int semmap;
4339 int semmni;
4340 int semmns;
4341 int semmnu;
4342 int semmsl;
4343 int semopm;
4344 int semume;
4345 int semusz;
4346 int semvmx;
4347 int semaem;
4350 static inline abi_long host_to_target_seminfo(abi_ulong target_addr,
4351 struct seminfo *host_seminfo)
4353 struct target_seminfo *target_seminfo;
4354 if (!lock_user_struct(VERIFY_WRITE, target_seminfo, target_addr, 0))
4355 return -TARGET_EFAULT;
4356 __put_user(host_seminfo->semmap, &target_seminfo->semmap);
4357 __put_user(host_seminfo->semmni, &target_seminfo->semmni);
4358 __put_user(host_seminfo->semmns, &target_seminfo->semmns);
4359 __put_user(host_seminfo->semmnu, &target_seminfo->semmnu);
4360 __put_user(host_seminfo->semmsl, &target_seminfo->semmsl);
4361 __put_user(host_seminfo->semopm, &target_seminfo->semopm);
4362 __put_user(host_seminfo->semume, &target_seminfo->semume);
4363 __put_user(host_seminfo->semusz, &target_seminfo->semusz);
4364 __put_user(host_seminfo->semvmx, &target_seminfo->semvmx);
4365 __put_user(host_seminfo->semaem, &target_seminfo->semaem);
4366 unlock_user_struct(target_seminfo, target_addr, 1);
4367 return 0;
4370 union semun {
4371 int val;
4372 struct semid_ds *buf;
4373 unsigned short *array;
4374 struct seminfo *__buf;
4377 union target_semun {
4378 int val;
4379 abi_ulong buf;
4380 abi_ulong array;
4381 abi_ulong __buf;
4384 static inline abi_long target_to_host_semarray(int semid, unsigned short **host_array,
4385 abi_ulong target_addr)
4387 int nsems;
4388 unsigned short *array;
4389 union semun semun;
4390 struct semid_ds semid_ds;
4391 int i, ret;
4393 semun.buf = &semid_ds;
4395 ret = semctl(semid, 0, IPC_STAT, semun);
4396 if (ret == -1)
4397 return get_errno(ret);
4399 nsems = semid_ds.sem_nsems;
4401 *host_array = g_try_new(unsigned short, nsems);
4402 if (!*host_array) {
4403 return -TARGET_ENOMEM;
4405 array = lock_user(VERIFY_READ, target_addr,
4406 nsems*sizeof(unsigned short), 1);
4407 if (!array) {
4408 g_free(*host_array);
4409 return -TARGET_EFAULT;
4412 for(i=0; i<nsems; i++) {
4413 __get_user((*host_array)[i], &array[i]);
4415 unlock_user(array, target_addr, 0);
4417 return 0;
4420 static inline abi_long host_to_target_semarray(int semid, abi_ulong target_addr,
4421 unsigned short **host_array)
4423 int nsems;
4424 unsigned short *array;
4425 union semun semun;
4426 struct semid_ds semid_ds;
4427 int i, ret;
4429 semun.buf = &semid_ds;
4431 ret = semctl(semid, 0, IPC_STAT, semun);
4432 if (ret == -1)
4433 return get_errno(ret);
4435 nsems = semid_ds.sem_nsems;
4437 array = lock_user(VERIFY_WRITE, target_addr,
4438 nsems*sizeof(unsigned short), 0);
4439 if (!array)
4440 return -TARGET_EFAULT;
4442 for(i=0; i<nsems; i++) {
4443 __put_user((*host_array)[i], &array[i]);
4445 g_free(*host_array);
4446 unlock_user(array, target_addr, 1);
4448 return 0;
4451 static inline abi_long do_semctl(int semid, int semnum, int cmd,
4452 abi_ulong target_arg)
4454 union target_semun target_su = { .buf = target_arg };
4455 union semun arg;
4456 struct semid_ds dsarg;
4457 unsigned short *array = NULL;
4458 struct seminfo seminfo;
4459 abi_long ret = -TARGET_EINVAL;
4460 abi_long err;
4461 cmd &= 0xff;
4463 switch( cmd ) {
4464 case GETVAL:
4465 case SETVAL:
4466 /* In 64 bit cross-endian situations, we will erroneously pick up
4467 * the wrong half of the union for the "val" element. To rectify
4468 * this, the entire 8-byte structure is byteswapped, followed by
4469 * a swap of the 4 byte val field. In other cases, the data is
4470 * already in proper host byte order. */
4471 if (sizeof(target_su.val) != (sizeof(target_su.buf))) {
4472 target_su.buf = tswapal(target_su.buf);
4473 arg.val = tswap32(target_su.val);
4474 } else {
4475 arg.val = target_su.val;
4477 ret = get_errno(semctl(semid, semnum, cmd, arg));
4478 break;
4479 case GETALL:
4480 case SETALL:
4481 err = target_to_host_semarray(semid, &array, target_su.array);
4482 if (err)
4483 return err;
4484 arg.array = array;
4485 ret = get_errno(semctl(semid, semnum, cmd, arg));
4486 err = host_to_target_semarray(semid, target_su.array, &array);
4487 if (err)
4488 return err;
4489 break;
4490 case IPC_STAT:
4491 case IPC_SET:
4492 case SEM_STAT:
4493 err = target_to_host_semid_ds(&dsarg, target_su.buf);
4494 if (err)
4495 return err;
4496 arg.buf = &dsarg;
4497 ret = get_errno(semctl(semid, semnum, cmd, arg));
4498 err = host_to_target_semid_ds(target_su.buf, &dsarg);
4499 if (err)
4500 return err;
4501 break;
4502 case IPC_INFO:
4503 case SEM_INFO:
4504 arg.__buf = &seminfo;
4505 ret = get_errno(semctl(semid, semnum, cmd, arg));
4506 err = host_to_target_seminfo(target_su.__buf, &seminfo);
4507 if (err)
4508 return err;
4509 break;
4510 case IPC_RMID:
4511 case GETPID:
4512 case GETNCNT:
4513 case GETZCNT:
4514 ret = get_errno(semctl(semid, semnum, cmd, NULL));
4515 break;
4518 return ret;
4521 struct target_sembuf {
4522 unsigned short sem_num;
4523 short sem_op;
4524 short sem_flg;
4527 static inline abi_long target_to_host_sembuf(struct sembuf *host_sembuf,
4528 abi_ulong target_addr,
4529 unsigned nsops)
4531 struct target_sembuf *target_sembuf;
4532 int i;
4534 target_sembuf = lock_user(VERIFY_READ, target_addr,
4535 nsops*sizeof(struct target_sembuf), 1);
4536 if (!target_sembuf)
4537 return -TARGET_EFAULT;
4539 for(i=0; i<nsops; i++) {
4540 __get_user(host_sembuf[i].sem_num, &target_sembuf[i].sem_num);
4541 __get_user(host_sembuf[i].sem_op, &target_sembuf[i].sem_op);
4542 __get_user(host_sembuf[i].sem_flg, &target_sembuf[i].sem_flg);
4545 unlock_user(target_sembuf, target_addr, 0);
4547 return 0;
4550 static inline abi_long do_semop(int semid, abi_long ptr, unsigned nsops)
4552 struct sembuf sops[nsops];
4554 if (target_to_host_sembuf(sops, ptr, nsops))
4555 return -TARGET_EFAULT;
4557 return get_errno(safe_semtimedop(semid, sops, nsops, NULL));
4560 struct target_msqid_ds
4562 struct target_ipc_perm msg_perm;
4563 abi_ulong msg_stime;
4564 #if TARGET_ABI_BITS == 32
4565 abi_ulong __unused1;
4566 #endif
4567 abi_ulong msg_rtime;
4568 #if TARGET_ABI_BITS == 32
4569 abi_ulong __unused2;
4570 #endif
4571 abi_ulong msg_ctime;
4572 #if TARGET_ABI_BITS == 32
4573 abi_ulong __unused3;
4574 #endif
4575 abi_ulong __msg_cbytes;
4576 abi_ulong msg_qnum;
4577 abi_ulong msg_qbytes;
4578 abi_ulong msg_lspid;
4579 abi_ulong msg_lrpid;
4580 abi_ulong __unused4;
4581 abi_ulong __unused5;
4584 static inline abi_long target_to_host_msqid_ds(struct msqid_ds *host_md,
4585 abi_ulong target_addr)
4587 struct target_msqid_ds *target_md;
4589 if (!lock_user_struct(VERIFY_READ, target_md, target_addr, 1))
4590 return -TARGET_EFAULT;
4591 if (target_to_host_ipc_perm(&(host_md->msg_perm),target_addr))
4592 return -TARGET_EFAULT;
4593 host_md->msg_stime = tswapal(target_md->msg_stime);
4594 host_md->msg_rtime = tswapal(target_md->msg_rtime);
4595 host_md->msg_ctime = tswapal(target_md->msg_ctime);
4596 host_md->__msg_cbytes = tswapal(target_md->__msg_cbytes);
4597 host_md->msg_qnum = tswapal(target_md->msg_qnum);
4598 host_md->msg_qbytes = tswapal(target_md->msg_qbytes);
4599 host_md->msg_lspid = tswapal(target_md->msg_lspid);
4600 host_md->msg_lrpid = tswapal(target_md->msg_lrpid);
4601 unlock_user_struct(target_md, target_addr, 0);
4602 return 0;
4605 static inline abi_long host_to_target_msqid_ds(abi_ulong target_addr,
4606 struct msqid_ds *host_md)
4608 struct target_msqid_ds *target_md;
4610 if (!lock_user_struct(VERIFY_WRITE, target_md, target_addr, 0))
4611 return -TARGET_EFAULT;
4612 if (host_to_target_ipc_perm(target_addr,&(host_md->msg_perm)))
4613 return -TARGET_EFAULT;
4614 target_md->msg_stime = tswapal(host_md->msg_stime);
4615 target_md->msg_rtime = tswapal(host_md->msg_rtime);
4616 target_md->msg_ctime = tswapal(host_md->msg_ctime);
4617 target_md->__msg_cbytes = tswapal(host_md->__msg_cbytes);
4618 target_md->msg_qnum = tswapal(host_md->msg_qnum);
4619 target_md->msg_qbytes = tswapal(host_md->msg_qbytes);
4620 target_md->msg_lspid = tswapal(host_md->msg_lspid);
4621 target_md->msg_lrpid = tswapal(host_md->msg_lrpid);
4622 unlock_user_struct(target_md, target_addr, 1);
4623 return 0;
4626 struct target_msginfo {
4627 int msgpool;
4628 int msgmap;
4629 int msgmax;
4630 int msgmnb;
4631 int msgmni;
4632 int msgssz;
4633 int msgtql;
4634 unsigned short int msgseg;
4637 static inline abi_long host_to_target_msginfo(abi_ulong target_addr,
4638 struct msginfo *host_msginfo)
4640 struct target_msginfo *target_msginfo;
4641 if (!lock_user_struct(VERIFY_WRITE, target_msginfo, target_addr, 0))
4642 return -TARGET_EFAULT;
4643 __put_user(host_msginfo->msgpool, &target_msginfo->msgpool);
4644 __put_user(host_msginfo->msgmap, &target_msginfo->msgmap);
4645 __put_user(host_msginfo->msgmax, &target_msginfo->msgmax);
4646 __put_user(host_msginfo->msgmnb, &target_msginfo->msgmnb);
4647 __put_user(host_msginfo->msgmni, &target_msginfo->msgmni);
4648 __put_user(host_msginfo->msgssz, &target_msginfo->msgssz);
4649 __put_user(host_msginfo->msgtql, &target_msginfo->msgtql);
4650 __put_user(host_msginfo->msgseg, &target_msginfo->msgseg);
4651 unlock_user_struct(target_msginfo, target_addr, 1);
4652 return 0;
4655 static inline abi_long do_msgctl(int msgid, int cmd, abi_long ptr)
4657 struct msqid_ds dsarg;
4658 struct msginfo msginfo;
4659 abi_long ret = -TARGET_EINVAL;
4661 cmd &= 0xff;
4663 switch (cmd) {
4664 case IPC_STAT:
4665 case IPC_SET:
4666 case MSG_STAT:
4667 if (target_to_host_msqid_ds(&dsarg,ptr))
4668 return -TARGET_EFAULT;
4669 ret = get_errno(msgctl(msgid, cmd, &dsarg));
4670 if (host_to_target_msqid_ds(ptr,&dsarg))
4671 return -TARGET_EFAULT;
4672 break;
4673 case IPC_RMID:
4674 ret = get_errno(msgctl(msgid, cmd, NULL));
4675 break;
4676 case IPC_INFO:
4677 case MSG_INFO:
4678 ret = get_errno(msgctl(msgid, cmd, (struct msqid_ds *)&msginfo));
4679 if (host_to_target_msginfo(ptr, &msginfo))
4680 return -TARGET_EFAULT;
4681 break;
4684 return ret;
4687 struct target_msgbuf {
4688 abi_long mtype;
4689 char mtext[1];
4692 static inline abi_long do_msgsnd(int msqid, abi_long msgp,
4693 ssize_t msgsz, int msgflg)
4695 struct target_msgbuf *target_mb;
4696 struct msgbuf *host_mb;
4697 abi_long ret = 0;
4699 if (msgsz < 0) {
4700 return -TARGET_EINVAL;
4703 if (!lock_user_struct(VERIFY_READ, target_mb, msgp, 0))
4704 return -TARGET_EFAULT;
4705 host_mb = g_try_malloc(msgsz + sizeof(long));
4706 if (!host_mb) {
4707 unlock_user_struct(target_mb, msgp, 0);
4708 return -TARGET_ENOMEM;
4710 host_mb->mtype = (abi_long) tswapal(target_mb->mtype);
4711 memcpy(host_mb->mtext, target_mb->mtext, msgsz);
4712 ret = get_errno(safe_msgsnd(msqid, host_mb, msgsz, msgflg));
4713 g_free(host_mb);
4714 unlock_user_struct(target_mb, msgp, 0);
4716 return ret;
4719 static inline abi_long do_msgrcv(int msqid, abi_long msgp,
4720 ssize_t msgsz, abi_long msgtyp,
4721 int msgflg)
4723 struct target_msgbuf *target_mb;
4724 char *target_mtext;
4725 struct msgbuf *host_mb;
4726 abi_long ret = 0;
4728 if (msgsz < 0) {
4729 return -TARGET_EINVAL;
4732 if (!lock_user_struct(VERIFY_WRITE, target_mb, msgp, 0))
4733 return -TARGET_EFAULT;
4735 host_mb = g_try_malloc(msgsz + sizeof(long));
4736 if (!host_mb) {
4737 ret = -TARGET_ENOMEM;
4738 goto end;
4740 ret = get_errno(safe_msgrcv(msqid, host_mb, msgsz, msgtyp, msgflg));
4742 if (ret > 0) {
4743 abi_ulong target_mtext_addr = msgp + sizeof(abi_ulong);
4744 target_mtext = lock_user(VERIFY_WRITE, target_mtext_addr, ret, 0);
4745 if (!target_mtext) {
4746 ret = -TARGET_EFAULT;
4747 goto end;
4749 memcpy(target_mb->mtext, host_mb->mtext, ret);
4750 unlock_user(target_mtext, target_mtext_addr, ret);
4753 target_mb->mtype = tswapal(host_mb->mtype);
4755 end:
4756 if (target_mb)
4757 unlock_user_struct(target_mb, msgp, 1);
4758 g_free(host_mb);
4759 return ret;
4762 static inline abi_long target_to_host_shmid_ds(struct shmid_ds *host_sd,
4763 abi_ulong target_addr)
4765 struct target_shmid_ds *target_sd;
4767 if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
4768 return -TARGET_EFAULT;
4769 if (target_to_host_ipc_perm(&(host_sd->shm_perm), target_addr))
4770 return -TARGET_EFAULT;
4771 __get_user(host_sd->shm_segsz, &target_sd->shm_segsz);
4772 __get_user(host_sd->shm_atime, &target_sd->shm_atime);
4773 __get_user(host_sd->shm_dtime, &target_sd->shm_dtime);
4774 __get_user(host_sd->shm_ctime, &target_sd->shm_ctime);
4775 __get_user(host_sd->shm_cpid, &target_sd->shm_cpid);
4776 __get_user(host_sd->shm_lpid, &target_sd->shm_lpid);
4777 __get_user(host_sd->shm_nattch, &target_sd->shm_nattch);
4778 unlock_user_struct(target_sd, target_addr, 0);
4779 return 0;
4782 static inline abi_long host_to_target_shmid_ds(abi_ulong target_addr,
4783 struct shmid_ds *host_sd)
4785 struct target_shmid_ds *target_sd;
4787 if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
4788 return -TARGET_EFAULT;
4789 if (host_to_target_ipc_perm(target_addr, &(host_sd->shm_perm)))
4790 return -TARGET_EFAULT;
4791 __put_user(host_sd->shm_segsz, &target_sd->shm_segsz);
4792 __put_user(host_sd->shm_atime, &target_sd->shm_atime);
4793 __put_user(host_sd->shm_dtime, &target_sd->shm_dtime);
4794 __put_user(host_sd->shm_ctime, &target_sd->shm_ctime);
4795 __put_user(host_sd->shm_cpid, &target_sd->shm_cpid);
4796 __put_user(host_sd->shm_lpid, &target_sd->shm_lpid);
4797 __put_user(host_sd->shm_nattch, &target_sd->shm_nattch);
4798 unlock_user_struct(target_sd, target_addr, 1);
4799 return 0;
4802 struct target_shminfo {
4803 abi_ulong shmmax;
4804 abi_ulong shmmin;
4805 abi_ulong shmmni;
4806 abi_ulong shmseg;
4807 abi_ulong shmall;
4810 static inline abi_long host_to_target_shminfo(abi_ulong target_addr,
4811 struct shminfo *host_shminfo)
4813 struct target_shminfo *target_shminfo;
4814 if (!lock_user_struct(VERIFY_WRITE, target_shminfo, target_addr, 0))
4815 return -TARGET_EFAULT;
4816 __put_user(host_shminfo->shmmax, &target_shminfo->shmmax);
4817 __put_user(host_shminfo->shmmin, &target_shminfo->shmmin);
4818 __put_user(host_shminfo->shmmni, &target_shminfo->shmmni);
4819 __put_user(host_shminfo->shmseg, &target_shminfo->shmseg);
4820 __put_user(host_shminfo->shmall, &target_shminfo->shmall);
4821 unlock_user_struct(target_shminfo, target_addr, 1);
4822 return 0;
4825 struct target_shm_info {
4826 int used_ids;
4827 abi_ulong shm_tot;
4828 abi_ulong shm_rss;
4829 abi_ulong shm_swp;
4830 abi_ulong swap_attempts;
4831 abi_ulong swap_successes;
4834 static inline abi_long host_to_target_shm_info(abi_ulong target_addr,
4835 struct shm_info *host_shm_info)
4837 struct target_shm_info *target_shm_info;
4838 if (!lock_user_struct(VERIFY_WRITE, target_shm_info, target_addr, 0))
4839 return -TARGET_EFAULT;
4840 __put_user(host_shm_info->used_ids, &target_shm_info->used_ids);
4841 __put_user(host_shm_info->shm_tot, &target_shm_info->shm_tot);
4842 __put_user(host_shm_info->shm_rss, &target_shm_info->shm_rss);
4843 __put_user(host_shm_info->shm_swp, &target_shm_info->shm_swp);
4844 __put_user(host_shm_info->swap_attempts, &target_shm_info->swap_attempts);
4845 __put_user(host_shm_info->swap_successes, &target_shm_info->swap_successes);
4846 unlock_user_struct(target_shm_info, target_addr, 1);
4847 return 0;
4850 static inline abi_long do_shmctl(int shmid, int cmd, abi_long buf)
4852 struct shmid_ds dsarg;
4853 struct shminfo shminfo;
4854 struct shm_info shm_info;
4855 abi_long ret = -TARGET_EINVAL;
4857 cmd &= 0xff;
4859 switch(cmd) {
4860 case IPC_STAT:
4861 case IPC_SET:
4862 case SHM_STAT:
4863 if (target_to_host_shmid_ds(&dsarg, buf))
4864 return -TARGET_EFAULT;
4865 ret = get_errno(shmctl(shmid, cmd, &dsarg));
4866 if (host_to_target_shmid_ds(buf, &dsarg))
4867 return -TARGET_EFAULT;
4868 break;
4869 case IPC_INFO:
4870 ret = get_errno(shmctl(shmid, cmd, (struct shmid_ds *)&shminfo));
4871 if (host_to_target_shminfo(buf, &shminfo))
4872 return -TARGET_EFAULT;
4873 break;
4874 case SHM_INFO:
4875 ret = get_errno(shmctl(shmid, cmd, (struct shmid_ds *)&shm_info));
4876 if (host_to_target_shm_info(buf, &shm_info))
4877 return -TARGET_EFAULT;
4878 break;
4879 case IPC_RMID:
4880 case SHM_LOCK:
4881 case SHM_UNLOCK:
4882 ret = get_errno(shmctl(shmid, cmd, NULL));
4883 break;
4886 return ret;
4889 #ifndef TARGET_FORCE_SHMLBA
4890 /* For most architectures, SHMLBA is the same as the page size;
4891 * some architectures have larger values, in which case they should
4892 * define TARGET_FORCE_SHMLBA and provide a target_shmlba() function.
4893 * This corresponds to the kernel arch code defining __ARCH_FORCE_SHMLBA
4894 * and defining its own value for SHMLBA.
4896 * The kernel also permits SHMLBA to be set by the architecture to a
4897 * value larger than the page size without setting __ARCH_FORCE_SHMLBA;
4898 * this means that addresses are rounded to the large size if
4899 * SHM_RND is set but addresses not aligned to that size are not rejected
4900 * as long as they are at least page-aligned. Since the only architecture
4901 * which uses this is ia64 this code doesn't provide for that oddity.
4903 static inline abi_ulong target_shmlba(CPUArchState *cpu_env)
4905 return TARGET_PAGE_SIZE;
4907 #endif
4909 static inline abi_ulong do_shmat(CPUArchState *cpu_env,
4910 int shmid, abi_ulong shmaddr, int shmflg)
4912 abi_long raddr;
4913 void *host_raddr;
4914 struct shmid_ds shm_info;
4915 int i,ret;
4916 abi_ulong shmlba;
4918 /* find out the length of the shared memory segment */
4919 ret = get_errno(shmctl(shmid, IPC_STAT, &shm_info));
4920 if (is_error(ret)) {
4921 /* can't get length, bail out */
4922 return ret;
4925 shmlba = target_shmlba(cpu_env);
4927 if (shmaddr & (shmlba - 1)) {
4928 if (shmflg & SHM_RND) {
4929 shmaddr &= ~(shmlba - 1);
4930 } else {
4931 return -TARGET_EINVAL;
4934 if (!guest_range_valid(shmaddr, shm_info.shm_segsz)) {
4935 return -TARGET_EINVAL;
4938 mmap_lock();
4940 if (shmaddr)
4941 host_raddr = shmat(shmid, (void *)g2h(shmaddr), shmflg);
4942 else {
4943 abi_ulong mmap_start;
4945 mmap_start = mmap_find_vma(0, shm_info.shm_segsz);
4947 if (mmap_start == -1) {
4948 errno = ENOMEM;
4949 host_raddr = (void *)-1;
4950 } else
4951 host_raddr = shmat(shmid, g2h(mmap_start), shmflg | SHM_REMAP);
4954 if (host_raddr == (void *)-1) {
4955 mmap_unlock();
4956 return get_errno((long)host_raddr);
4958 raddr=h2g((unsigned long)host_raddr);
4960 page_set_flags(raddr, raddr + shm_info.shm_segsz,
4961 PAGE_VALID | PAGE_READ |
4962 ((shmflg & SHM_RDONLY)? 0 : PAGE_WRITE));
4964 for (i = 0; i < N_SHM_REGIONS; i++) {
4965 if (!shm_regions[i].in_use) {
4966 shm_regions[i].in_use = true;
4967 shm_regions[i].start = raddr;
4968 shm_regions[i].size = shm_info.shm_segsz;
4969 break;
4973 mmap_unlock();
4974 return raddr;
4978 static inline abi_long do_shmdt(abi_ulong shmaddr)
4980 int i;
4981 abi_long rv;
4983 mmap_lock();
4985 for (i = 0; i < N_SHM_REGIONS; ++i) {
4986 if (shm_regions[i].in_use && shm_regions[i].start == shmaddr) {
4987 shm_regions[i].in_use = false;
4988 page_set_flags(shmaddr, shmaddr + shm_regions[i].size, 0);
4989 break;
4992 rv = get_errno(shmdt(g2h(shmaddr)));
4994 mmap_unlock();
4996 return rv;
4999 #ifdef TARGET_NR_ipc
5000 /* ??? This only works with linear mappings. */
5001 /* do_ipc() must return target values and target errnos. */
5002 static abi_long do_ipc(CPUArchState *cpu_env,
5003 unsigned int call, abi_long first,
5004 abi_long second, abi_long third,
5005 abi_long ptr, abi_long fifth)
5007 int version;
5008 abi_long ret = 0;
5010 version = call >> 16;
5011 call &= 0xffff;
5013 switch (call) {
5014 case IPCOP_semop:
5015 ret = do_semop(first, ptr, second);
5016 break;
5018 case IPCOP_semget:
5019 ret = get_errno(semget(first, second, third));
5020 break;
5022 case IPCOP_semctl: {
5023 /* The semun argument to semctl is passed by value, so dereference the
5024 * ptr argument. */
5025 abi_ulong atptr;
5026 get_user_ual(atptr, ptr);
5027 ret = do_semctl(first, second, third, atptr);
5028 break;
5031 case IPCOP_msgget:
5032 ret = get_errno(msgget(first, second));
5033 break;
5035 case IPCOP_msgsnd:
5036 ret = do_msgsnd(first, ptr, second, third);
5037 break;
5039 case IPCOP_msgctl:
5040 ret = do_msgctl(first, second, ptr);
5041 break;
5043 case IPCOP_msgrcv:
5044 switch (version) {
5045 case 0:
5047 struct target_ipc_kludge {
5048 abi_long msgp;
5049 abi_long msgtyp;
5050 } *tmp;
5052 if (!lock_user_struct(VERIFY_READ, tmp, ptr, 1)) {
5053 ret = -TARGET_EFAULT;
5054 break;
5057 ret = do_msgrcv(first, tswapal(tmp->msgp), second, tswapal(tmp->msgtyp), third);
5059 unlock_user_struct(tmp, ptr, 0);
5060 break;
5062 default:
5063 ret = do_msgrcv(first, ptr, second, fifth, third);
5065 break;
5067 case IPCOP_shmat:
5068 switch (version) {
5069 default:
5071 abi_ulong raddr;
5072 raddr = do_shmat(cpu_env, first, ptr, second);
5073 if (is_error(raddr))
5074 return get_errno(raddr);
5075 if (put_user_ual(raddr, third))
5076 return -TARGET_EFAULT;
5077 break;
5079 case 1:
5080 ret = -TARGET_EINVAL;
5081 break;
5083 break;
5084 case IPCOP_shmdt:
5085 ret = do_shmdt(ptr);
5086 break;
5088 case IPCOP_shmget:
5089 /* IPC_* flag values are the same on all linux platforms */
5090 ret = get_errno(shmget(first, second, third));
5091 break;
5093 /* IPC_* and SHM_* command values are the same on all linux platforms */
5094 case IPCOP_shmctl:
5095 ret = do_shmctl(first, second, ptr);
5096 break;
5097 default:
5098 gemu_log("Unsupported ipc call: %d (version %d)\n", call, version);
5099 ret = -TARGET_ENOSYS;
5100 break;
5102 return ret;
5104 #endif
5106 /* kernel structure types definitions */
5108 #define STRUCT(name, ...) STRUCT_ ## name,
5109 #define STRUCT_SPECIAL(name) STRUCT_ ## name,
5110 enum {
5111 #include "syscall_types.h"
5112 STRUCT_MAX
5114 #undef STRUCT
5115 #undef STRUCT_SPECIAL
5117 #define STRUCT(name, ...) static const argtype struct_ ## name ## _def[] = { __VA_ARGS__, TYPE_NULL };
5118 #define STRUCT_SPECIAL(name)
5119 #include "syscall_types.h"
5120 #undef STRUCT
5121 #undef STRUCT_SPECIAL
5123 typedef struct IOCTLEntry IOCTLEntry;
5125 typedef abi_long do_ioctl_fn(const IOCTLEntry *ie, uint8_t *buf_temp,
5126 int fd, int cmd, abi_long arg);
5128 struct IOCTLEntry {
5129 int target_cmd;
5130 unsigned int host_cmd;
5131 const char *name;
5132 int access;
5133 do_ioctl_fn *do_ioctl;
5134 const argtype arg_type[5];
5137 #define IOC_R 0x0001
5138 #define IOC_W 0x0002
5139 #define IOC_RW (IOC_R | IOC_W)
5141 #define MAX_STRUCT_SIZE 4096
5143 #ifdef CONFIG_FIEMAP
5144 /* So fiemap access checks don't overflow on 32 bit systems.
5145 * This is very slightly smaller than the limit imposed by
5146 * the underlying kernel.
5148 #define FIEMAP_MAX_EXTENTS ((UINT_MAX - sizeof(struct fiemap)) \
5149 / sizeof(struct fiemap_extent))
5151 static abi_long do_ioctl_fs_ioc_fiemap(const IOCTLEntry *ie, uint8_t *buf_temp,
5152 int fd, int cmd, abi_long arg)
5154 /* The parameter for this ioctl is a struct fiemap followed
5155 * by an array of struct fiemap_extent whose size is set
5156 * in fiemap->fm_extent_count. The array is filled in by the
5157 * ioctl.
5159 int target_size_in, target_size_out;
5160 struct fiemap *fm;
5161 const argtype *arg_type = ie->arg_type;
5162 const argtype extent_arg_type[] = { MK_STRUCT(STRUCT_fiemap_extent) };
5163 void *argptr, *p;
5164 abi_long ret;
5165 int i, extent_size = thunk_type_size(extent_arg_type, 0);
5166 uint32_t outbufsz;
5167 int free_fm = 0;
5169 assert(arg_type[0] == TYPE_PTR);
5170 assert(ie->access == IOC_RW);
5171 arg_type++;
5172 target_size_in = thunk_type_size(arg_type, 0);
5173 argptr = lock_user(VERIFY_READ, arg, target_size_in, 1);
5174 if (!argptr) {
5175 return -TARGET_EFAULT;
5177 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
5178 unlock_user(argptr, arg, 0);
5179 fm = (struct fiemap *)buf_temp;
5180 if (fm->fm_extent_count > FIEMAP_MAX_EXTENTS) {
5181 return -TARGET_EINVAL;
5184 outbufsz = sizeof (*fm) +
5185 (sizeof(struct fiemap_extent) * fm->fm_extent_count);
5187 if (outbufsz > MAX_STRUCT_SIZE) {
5188 /* We can't fit all the extents into the fixed size buffer.
5189 * Allocate one that is large enough and use it instead.
5191 fm = g_try_malloc(outbufsz);
5192 if (!fm) {
5193 return -TARGET_ENOMEM;
5195 memcpy(fm, buf_temp, sizeof(struct fiemap));
5196 free_fm = 1;
5198 ret = get_errno(safe_ioctl(fd, ie->host_cmd, fm));
5199 if (!is_error(ret)) {
5200 target_size_out = target_size_in;
5201 /* An extent_count of 0 means we were only counting the extents
5202 * so there are no structs to copy
5204 if (fm->fm_extent_count != 0) {
5205 target_size_out += fm->fm_mapped_extents * extent_size;
5207 argptr = lock_user(VERIFY_WRITE, arg, target_size_out, 0);
5208 if (!argptr) {
5209 ret = -TARGET_EFAULT;
5210 } else {
5211 /* Convert the struct fiemap */
5212 thunk_convert(argptr, fm, arg_type, THUNK_TARGET);
5213 if (fm->fm_extent_count != 0) {
5214 p = argptr + target_size_in;
5215 /* ...and then all the struct fiemap_extents */
5216 for (i = 0; i < fm->fm_mapped_extents; i++) {
5217 thunk_convert(p, &fm->fm_extents[i], extent_arg_type,
5218 THUNK_TARGET);
5219 p += extent_size;
5222 unlock_user(argptr, arg, target_size_out);
5225 if (free_fm) {
5226 g_free(fm);
5228 return ret;
5230 #endif
5232 static abi_long do_ioctl_ifconf(const IOCTLEntry *ie, uint8_t *buf_temp,
5233 int fd, int cmd, abi_long arg)
5235 const argtype *arg_type = ie->arg_type;
5236 int target_size;
5237 void *argptr;
5238 int ret;
5239 struct ifconf *host_ifconf;
5240 uint32_t outbufsz;
5241 const argtype ifreq_arg_type[] = { MK_STRUCT(STRUCT_sockaddr_ifreq) };
5242 int target_ifreq_size;
5243 int nb_ifreq;
5244 int free_buf = 0;
5245 int i;
5246 int target_ifc_len;
5247 abi_long target_ifc_buf;
5248 int host_ifc_len;
5249 char *host_ifc_buf;
5251 assert(arg_type[0] == TYPE_PTR);
5252 assert(ie->access == IOC_RW);
5254 arg_type++;
5255 target_size = thunk_type_size(arg_type, 0);
5257 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5258 if (!argptr)
5259 return -TARGET_EFAULT;
5260 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
5261 unlock_user(argptr, arg, 0);
5263 host_ifconf = (struct ifconf *)(unsigned long)buf_temp;
5264 target_ifc_len = host_ifconf->ifc_len;
5265 target_ifc_buf = (abi_long)(unsigned long)host_ifconf->ifc_buf;
5267 target_ifreq_size = thunk_type_size(ifreq_arg_type, 0);
5268 nb_ifreq = target_ifc_len / target_ifreq_size;
5269 host_ifc_len = nb_ifreq * sizeof(struct ifreq);
5271 outbufsz = sizeof(*host_ifconf) + host_ifc_len;
5272 if (outbufsz > MAX_STRUCT_SIZE) {
5273 /* We can't fit all the extents into the fixed size buffer.
5274 * Allocate one that is large enough and use it instead.
5276 host_ifconf = malloc(outbufsz);
5277 if (!host_ifconf) {
5278 return -TARGET_ENOMEM;
5280 memcpy(host_ifconf, buf_temp, sizeof(*host_ifconf));
5281 free_buf = 1;
5283 host_ifc_buf = (char*)host_ifconf + sizeof(*host_ifconf);
5285 host_ifconf->ifc_len = host_ifc_len;
5286 host_ifconf->ifc_buf = host_ifc_buf;
5288 ret = get_errno(safe_ioctl(fd, ie->host_cmd, host_ifconf));
5289 if (!is_error(ret)) {
5290 /* convert host ifc_len to target ifc_len */
5292 nb_ifreq = host_ifconf->ifc_len / sizeof(struct ifreq);
5293 target_ifc_len = nb_ifreq * target_ifreq_size;
5294 host_ifconf->ifc_len = target_ifc_len;
5296 /* restore target ifc_buf */
5298 host_ifconf->ifc_buf = (char *)(unsigned long)target_ifc_buf;
5300 /* copy struct ifconf to target user */
5302 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
5303 if (!argptr)
5304 return -TARGET_EFAULT;
5305 thunk_convert(argptr, host_ifconf, arg_type, THUNK_TARGET);
5306 unlock_user(argptr, arg, target_size);
5308 /* copy ifreq[] to target user */
5310 argptr = lock_user(VERIFY_WRITE, target_ifc_buf, target_ifc_len, 0);
5311 for (i = 0; i < nb_ifreq ; i++) {
5312 thunk_convert(argptr + i * target_ifreq_size,
5313 host_ifc_buf + i * sizeof(struct ifreq),
5314 ifreq_arg_type, THUNK_TARGET);
5316 unlock_user(argptr, target_ifc_buf, target_ifc_len);
5319 if (free_buf) {
5320 free(host_ifconf);
5323 return ret;
5326 static abi_long do_ioctl_dm(const IOCTLEntry *ie, uint8_t *buf_temp, int fd,
5327 int cmd, abi_long arg)
5329 void *argptr;
5330 struct dm_ioctl *host_dm;
5331 abi_long guest_data;
5332 uint32_t guest_data_size;
5333 int target_size;
5334 const argtype *arg_type = ie->arg_type;
5335 abi_long ret;
5336 void *big_buf = NULL;
5337 char *host_data;
5339 arg_type++;
5340 target_size = thunk_type_size(arg_type, 0);
5341 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5342 if (!argptr) {
5343 ret = -TARGET_EFAULT;
5344 goto out;
5346 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
5347 unlock_user(argptr, arg, 0);
5349 /* buf_temp is too small, so fetch things into a bigger buffer */
5350 big_buf = g_malloc0(((struct dm_ioctl*)buf_temp)->data_size * 2);
5351 memcpy(big_buf, buf_temp, target_size);
5352 buf_temp = big_buf;
5353 host_dm = big_buf;
5355 guest_data = arg + host_dm->data_start;
5356 if ((guest_data - arg) < 0) {
5357 ret = -TARGET_EINVAL;
5358 goto out;
5360 guest_data_size = host_dm->data_size - host_dm->data_start;
5361 host_data = (char*)host_dm + host_dm->data_start;
5363 argptr = lock_user(VERIFY_READ, guest_data, guest_data_size, 1);
5364 if (!argptr) {
5365 ret = -TARGET_EFAULT;
5366 goto out;
5369 switch (ie->host_cmd) {
5370 case DM_REMOVE_ALL:
5371 case DM_LIST_DEVICES:
5372 case DM_DEV_CREATE:
5373 case DM_DEV_REMOVE:
5374 case DM_DEV_SUSPEND:
5375 case DM_DEV_STATUS:
5376 case DM_DEV_WAIT:
5377 case DM_TABLE_STATUS:
5378 case DM_TABLE_CLEAR:
5379 case DM_TABLE_DEPS:
5380 case DM_LIST_VERSIONS:
5381 /* no input data */
5382 break;
5383 case DM_DEV_RENAME:
5384 case DM_DEV_SET_GEOMETRY:
5385 /* data contains only strings */
5386 memcpy(host_data, argptr, guest_data_size);
5387 break;
5388 case DM_TARGET_MSG:
5389 memcpy(host_data, argptr, guest_data_size);
5390 *(uint64_t*)host_data = tswap64(*(uint64_t*)argptr);
5391 break;
5392 case DM_TABLE_LOAD:
5394 void *gspec = argptr;
5395 void *cur_data = host_data;
5396 const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_spec) };
5397 int spec_size = thunk_type_size(arg_type, 0);
5398 int i;
5400 for (i = 0; i < host_dm->target_count; i++) {
5401 struct dm_target_spec *spec = cur_data;
5402 uint32_t next;
5403 int slen;
5405 thunk_convert(spec, gspec, arg_type, THUNK_HOST);
5406 slen = strlen((char*)gspec + spec_size) + 1;
5407 next = spec->next;
5408 spec->next = sizeof(*spec) + slen;
5409 strcpy((char*)&spec[1], gspec + spec_size);
5410 gspec += next;
5411 cur_data += spec->next;
5413 break;
5415 default:
5416 ret = -TARGET_EINVAL;
5417 unlock_user(argptr, guest_data, 0);
5418 goto out;
5420 unlock_user(argptr, guest_data, 0);
5422 ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5423 if (!is_error(ret)) {
5424 guest_data = arg + host_dm->data_start;
5425 guest_data_size = host_dm->data_size - host_dm->data_start;
5426 argptr = lock_user(VERIFY_WRITE, guest_data, guest_data_size, 0);
5427 switch (ie->host_cmd) {
5428 case DM_REMOVE_ALL:
5429 case DM_DEV_CREATE:
5430 case DM_DEV_REMOVE:
5431 case DM_DEV_RENAME:
5432 case DM_DEV_SUSPEND:
5433 case DM_DEV_STATUS:
5434 case DM_TABLE_LOAD:
5435 case DM_TABLE_CLEAR:
5436 case DM_TARGET_MSG:
5437 case DM_DEV_SET_GEOMETRY:
5438 /* no return data */
5439 break;
5440 case DM_LIST_DEVICES:
5442 struct dm_name_list *nl = (void*)host_dm + host_dm->data_start;
5443 uint32_t remaining_data = guest_data_size;
5444 void *cur_data = argptr;
5445 const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_name_list) };
5446 int nl_size = 12; /* can't use thunk_size due to alignment */
5448 while (1) {
5449 uint32_t next = nl->next;
5450 if (next) {
5451 nl->next = nl_size + (strlen(nl->name) + 1);
5453 if (remaining_data < nl->next) {
5454 host_dm->flags |= DM_BUFFER_FULL_FLAG;
5455 break;
5457 thunk_convert(cur_data, nl, arg_type, THUNK_TARGET);
5458 strcpy(cur_data + nl_size, nl->name);
5459 cur_data += nl->next;
5460 remaining_data -= nl->next;
5461 if (!next) {
5462 break;
5464 nl = (void*)nl + next;
5466 break;
5468 case DM_DEV_WAIT:
5469 case DM_TABLE_STATUS:
5471 struct dm_target_spec *spec = (void*)host_dm + host_dm->data_start;
5472 void *cur_data = argptr;
5473 const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_spec) };
5474 int spec_size = thunk_type_size(arg_type, 0);
5475 int i;
5477 for (i = 0; i < host_dm->target_count; i++) {
5478 uint32_t next = spec->next;
5479 int slen = strlen((char*)&spec[1]) + 1;
5480 spec->next = (cur_data - argptr) + spec_size + slen;
5481 if (guest_data_size < spec->next) {
5482 host_dm->flags |= DM_BUFFER_FULL_FLAG;
5483 break;
5485 thunk_convert(cur_data, spec, arg_type, THUNK_TARGET);
5486 strcpy(cur_data + spec_size, (char*)&spec[1]);
5487 cur_data = argptr + spec->next;
5488 spec = (void*)host_dm + host_dm->data_start + next;
5490 break;
5492 case DM_TABLE_DEPS:
5494 void *hdata = (void*)host_dm + host_dm->data_start;
5495 int count = *(uint32_t*)hdata;
5496 uint64_t *hdev = hdata + 8;
5497 uint64_t *gdev = argptr + 8;
5498 int i;
5500 *(uint32_t*)argptr = tswap32(count);
5501 for (i = 0; i < count; i++) {
5502 *gdev = tswap64(*hdev);
5503 gdev++;
5504 hdev++;
5506 break;
5508 case DM_LIST_VERSIONS:
5510 struct dm_target_versions *vers = (void*)host_dm + host_dm->data_start;
5511 uint32_t remaining_data = guest_data_size;
5512 void *cur_data = argptr;
5513 const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_versions) };
5514 int vers_size = thunk_type_size(arg_type, 0);
5516 while (1) {
5517 uint32_t next = vers->next;
5518 if (next) {
5519 vers->next = vers_size + (strlen(vers->name) + 1);
5521 if (remaining_data < vers->next) {
5522 host_dm->flags |= DM_BUFFER_FULL_FLAG;
5523 break;
5525 thunk_convert(cur_data, vers, arg_type, THUNK_TARGET);
5526 strcpy(cur_data + vers_size, vers->name);
5527 cur_data += vers->next;
5528 remaining_data -= vers->next;
5529 if (!next) {
5530 break;
5532 vers = (void*)vers + next;
5534 break;
5536 default:
5537 unlock_user(argptr, guest_data, 0);
5538 ret = -TARGET_EINVAL;
5539 goto out;
5541 unlock_user(argptr, guest_data, guest_data_size);
5543 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
5544 if (!argptr) {
5545 ret = -TARGET_EFAULT;
5546 goto out;
5548 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
5549 unlock_user(argptr, arg, target_size);
5551 out:
5552 g_free(big_buf);
5553 return ret;
5556 static abi_long do_ioctl_blkpg(const IOCTLEntry *ie, uint8_t *buf_temp, int fd,
5557 int cmd, abi_long arg)
5559 void *argptr;
5560 int target_size;
5561 const argtype *arg_type = ie->arg_type;
5562 const argtype part_arg_type[] = { MK_STRUCT(STRUCT_blkpg_partition) };
5563 abi_long ret;
5565 struct blkpg_ioctl_arg *host_blkpg = (void*)buf_temp;
5566 struct blkpg_partition host_part;
5568 /* Read and convert blkpg */
5569 arg_type++;
5570 target_size = thunk_type_size(arg_type, 0);
5571 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5572 if (!argptr) {
5573 ret = -TARGET_EFAULT;
5574 goto out;
5576 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
5577 unlock_user(argptr, arg, 0);
5579 switch (host_blkpg->op) {
5580 case BLKPG_ADD_PARTITION:
5581 case BLKPG_DEL_PARTITION:
5582 /* payload is struct blkpg_partition */
5583 break;
5584 default:
5585 /* Unknown opcode */
5586 ret = -TARGET_EINVAL;
5587 goto out;
5590 /* Read and convert blkpg->data */
5591 arg = (abi_long)(uintptr_t)host_blkpg->data;
5592 target_size = thunk_type_size(part_arg_type, 0);
5593 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5594 if (!argptr) {
5595 ret = -TARGET_EFAULT;
5596 goto out;
5598 thunk_convert(&host_part, argptr, part_arg_type, THUNK_HOST);
5599 unlock_user(argptr, arg, 0);
5601 /* Swizzle the data pointer to our local copy and call! */
5602 host_blkpg->data = &host_part;
5603 ret = get_errno(safe_ioctl(fd, ie->host_cmd, host_blkpg));
5605 out:
5606 return ret;
5609 static abi_long do_ioctl_rt(const IOCTLEntry *ie, uint8_t *buf_temp,
5610 int fd, int cmd, abi_long arg)
5612 const argtype *arg_type = ie->arg_type;
5613 const StructEntry *se;
5614 const argtype *field_types;
5615 const int *dst_offsets, *src_offsets;
5616 int target_size;
5617 void *argptr;
5618 abi_ulong *target_rt_dev_ptr;
5619 unsigned long *host_rt_dev_ptr;
5620 abi_long ret;
5621 int i;
5623 assert(ie->access == IOC_W);
5624 assert(*arg_type == TYPE_PTR);
5625 arg_type++;
5626 assert(*arg_type == TYPE_STRUCT);
5627 target_size = thunk_type_size(arg_type, 0);
5628 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5629 if (!argptr) {
5630 return -TARGET_EFAULT;
5632 arg_type++;
5633 assert(*arg_type == (int)STRUCT_rtentry);
5634 se = struct_entries + *arg_type++;
5635 assert(se->convert[0] == NULL);
5636 /* convert struct here to be able to catch rt_dev string */
5637 field_types = se->field_types;
5638 dst_offsets = se->field_offsets[THUNK_HOST];
5639 src_offsets = se->field_offsets[THUNK_TARGET];
5640 for (i = 0; i < se->nb_fields; i++) {
5641 if (dst_offsets[i] == offsetof(struct rtentry, rt_dev)) {
5642 assert(*field_types == TYPE_PTRVOID);
5643 target_rt_dev_ptr = (abi_ulong *)(argptr + src_offsets[i]);
5644 host_rt_dev_ptr = (unsigned long *)(buf_temp + dst_offsets[i]);
5645 if (*target_rt_dev_ptr != 0) {
5646 *host_rt_dev_ptr = (unsigned long)lock_user_string(
5647 tswapal(*target_rt_dev_ptr));
5648 if (!*host_rt_dev_ptr) {
5649 unlock_user(argptr, arg, 0);
5650 return -TARGET_EFAULT;
5652 } else {
5653 *host_rt_dev_ptr = 0;
5655 field_types++;
5656 continue;
5658 field_types = thunk_convert(buf_temp + dst_offsets[i],
5659 argptr + src_offsets[i],
5660 field_types, THUNK_HOST);
5662 unlock_user(argptr, arg, 0);
5664 ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5665 if (*host_rt_dev_ptr != 0) {
5666 unlock_user((void *)*host_rt_dev_ptr,
5667 *target_rt_dev_ptr, 0);
5669 return ret;
5672 static abi_long do_ioctl_kdsigaccept(const IOCTLEntry *ie, uint8_t *buf_temp,
5673 int fd, int cmd, abi_long arg)
5675 int sig = target_to_host_signal(arg);
5676 return get_errno(safe_ioctl(fd, ie->host_cmd, sig));
5679 #ifdef TIOCGPTPEER
5680 static abi_long do_ioctl_tiocgptpeer(const IOCTLEntry *ie, uint8_t *buf_temp,
5681 int fd, int cmd, abi_long arg)
5683 int flags = target_to_host_bitmask(arg, fcntl_flags_tbl);
5684 return get_errno(safe_ioctl(fd, ie->host_cmd, flags));
5686 #endif
5688 static IOCTLEntry ioctl_entries[] = {
5689 #define IOCTL(cmd, access, ...) \
5690 { TARGET_ ## cmd, cmd, #cmd, access, 0, { __VA_ARGS__ } },
5691 #define IOCTL_SPECIAL(cmd, access, dofn, ...) \
5692 { TARGET_ ## cmd, cmd, #cmd, access, dofn, { __VA_ARGS__ } },
5693 #define IOCTL_IGNORE(cmd) \
5694 { TARGET_ ## cmd, 0, #cmd },
5695 #include "ioctls.h"
5696 { 0, 0, },
5699 /* ??? Implement proper locking for ioctls. */
5700 /* do_ioctl() Must return target values and target errnos. */
5701 static abi_long do_ioctl(int fd, int cmd, abi_long arg)
5703 const IOCTLEntry *ie;
5704 const argtype *arg_type;
5705 abi_long ret;
5706 uint8_t buf_temp[MAX_STRUCT_SIZE];
5707 int target_size;
5708 void *argptr;
5710 ie = ioctl_entries;
5711 for(;;) {
5712 if (ie->target_cmd == 0) {
5713 gemu_log("Unsupported ioctl: cmd=0x%04lx\n", (long)cmd);
5714 return -TARGET_ENOSYS;
5716 if (ie->target_cmd == cmd)
5717 break;
5718 ie++;
5720 arg_type = ie->arg_type;
5721 #if defined(DEBUG)
5722 gemu_log("ioctl: cmd=0x%04lx (%s)\n", (long)cmd, ie->name);
5723 #endif
5724 if (ie->do_ioctl) {
5725 return ie->do_ioctl(ie, buf_temp, fd, cmd, arg);
5726 } else if (!ie->host_cmd) {
5727 /* Some architectures define BSD ioctls in their headers
5728 that are not implemented in Linux. */
5729 return -TARGET_ENOSYS;
5732 switch(arg_type[0]) {
5733 case TYPE_NULL:
5734 /* no argument */
5735 ret = get_errno(safe_ioctl(fd, ie->host_cmd));
5736 break;
5737 case TYPE_PTRVOID:
5738 case TYPE_INT:
5739 ret = get_errno(safe_ioctl(fd, ie->host_cmd, arg));
5740 break;
5741 case TYPE_PTR:
5742 arg_type++;
5743 target_size = thunk_type_size(arg_type, 0);
5744 switch(ie->access) {
5745 case IOC_R:
5746 ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5747 if (!is_error(ret)) {
5748 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
5749 if (!argptr)
5750 return -TARGET_EFAULT;
5751 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
5752 unlock_user(argptr, arg, target_size);
5754 break;
5755 case IOC_W:
5756 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5757 if (!argptr)
5758 return -TARGET_EFAULT;
5759 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
5760 unlock_user(argptr, arg, 0);
5761 ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5762 break;
5763 default:
5764 case IOC_RW:
5765 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5766 if (!argptr)
5767 return -TARGET_EFAULT;
5768 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
5769 unlock_user(argptr, arg, 0);
5770 ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5771 if (!is_error(ret)) {
5772 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
5773 if (!argptr)
5774 return -TARGET_EFAULT;
5775 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
5776 unlock_user(argptr, arg, target_size);
5778 break;
5780 break;
5781 default:
5782 gemu_log("Unsupported ioctl type: cmd=0x%04lx type=%d\n",
5783 (long)cmd, arg_type[0]);
5784 ret = -TARGET_ENOSYS;
5785 break;
5787 return ret;
5790 static const bitmask_transtbl iflag_tbl[] = {
5791 { TARGET_IGNBRK, TARGET_IGNBRK, IGNBRK, IGNBRK },
5792 { TARGET_BRKINT, TARGET_BRKINT, BRKINT, BRKINT },
5793 { TARGET_IGNPAR, TARGET_IGNPAR, IGNPAR, IGNPAR },
5794 { TARGET_PARMRK, TARGET_PARMRK, PARMRK, PARMRK },
5795 { TARGET_INPCK, TARGET_INPCK, INPCK, INPCK },
5796 { TARGET_ISTRIP, TARGET_ISTRIP, ISTRIP, ISTRIP },
5797 { TARGET_INLCR, TARGET_INLCR, INLCR, INLCR },
5798 { TARGET_IGNCR, TARGET_IGNCR, IGNCR, IGNCR },
5799 { TARGET_ICRNL, TARGET_ICRNL, ICRNL, ICRNL },
5800 { TARGET_IUCLC, TARGET_IUCLC, IUCLC, IUCLC },
5801 { TARGET_IXON, TARGET_IXON, IXON, IXON },
5802 { TARGET_IXANY, TARGET_IXANY, IXANY, IXANY },
5803 { TARGET_IXOFF, TARGET_IXOFF, IXOFF, IXOFF },
5804 { TARGET_IMAXBEL, TARGET_IMAXBEL, IMAXBEL, IMAXBEL },
5805 { 0, 0, 0, 0 }
5808 static const bitmask_transtbl oflag_tbl[] = {
5809 { TARGET_OPOST, TARGET_OPOST, OPOST, OPOST },
5810 { TARGET_OLCUC, TARGET_OLCUC, OLCUC, OLCUC },
5811 { TARGET_ONLCR, TARGET_ONLCR, ONLCR, ONLCR },
5812 { TARGET_OCRNL, TARGET_OCRNL, OCRNL, OCRNL },
5813 { TARGET_ONOCR, TARGET_ONOCR, ONOCR, ONOCR },
5814 { TARGET_ONLRET, TARGET_ONLRET, ONLRET, ONLRET },
5815 { TARGET_OFILL, TARGET_OFILL, OFILL, OFILL },
5816 { TARGET_OFDEL, TARGET_OFDEL, OFDEL, OFDEL },
5817 { TARGET_NLDLY, TARGET_NL0, NLDLY, NL0 },
5818 { TARGET_NLDLY, TARGET_NL1, NLDLY, NL1 },
5819 { TARGET_CRDLY, TARGET_CR0, CRDLY, CR0 },
5820 { TARGET_CRDLY, TARGET_CR1, CRDLY, CR1 },
5821 { TARGET_CRDLY, TARGET_CR2, CRDLY, CR2 },
5822 { TARGET_CRDLY, TARGET_CR3, CRDLY, CR3 },
5823 { TARGET_TABDLY, TARGET_TAB0, TABDLY, TAB0 },
5824 { TARGET_TABDLY, TARGET_TAB1, TABDLY, TAB1 },
5825 { TARGET_TABDLY, TARGET_TAB2, TABDLY, TAB2 },
5826 { TARGET_TABDLY, TARGET_TAB3, TABDLY, TAB3 },
5827 { TARGET_BSDLY, TARGET_BS0, BSDLY, BS0 },
5828 { TARGET_BSDLY, TARGET_BS1, BSDLY, BS1 },
5829 { TARGET_VTDLY, TARGET_VT0, VTDLY, VT0 },
5830 { TARGET_VTDLY, TARGET_VT1, VTDLY, VT1 },
5831 { TARGET_FFDLY, TARGET_FF0, FFDLY, FF0 },
5832 { TARGET_FFDLY, TARGET_FF1, FFDLY, FF1 },
5833 { 0, 0, 0, 0 }
5836 static const bitmask_transtbl cflag_tbl[] = {
5837 { TARGET_CBAUD, TARGET_B0, CBAUD, B0 },
5838 { TARGET_CBAUD, TARGET_B50, CBAUD, B50 },
5839 { TARGET_CBAUD, TARGET_B75, CBAUD, B75 },
5840 { TARGET_CBAUD, TARGET_B110, CBAUD, B110 },
5841 { TARGET_CBAUD, TARGET_B134, CBAUD, B134 },
5842 { TARGET_CBAUD, TARGET_B150, CBAUD, B150 },
5843 { TARGET_CBAUD, TARGET_B200, CBAUD, B200 },
5844 { TARGET_CBAUD, TARGET_B300, CBAUD, B300 },
5845 { TARGET_CBAUD, TARGET_B600, CBAUD, B600 },
5846 { TARGET_CBAUD, TARGET_B1200, CBAUD, B1200 },
5847 { TARGET_CBAUD, TARGET_B1800, CBAUD, B1800 },
5848 { TARGET_CBAUD, TARGET_B2400, CBAUD, B2400 },
5849 { TARGET_CBAUD, TARGET_B4800, CBAUD, B4800 },
5850 { TARGET_CBAUD, TARGET_B9600, CBAUD, B9600 },
5851 { TARGET_CBAUD, TARGET_B19200, CBAUD, B19200 },
5852 { TARGET_CBAUD, TARGET_B38400, CBAUD, B38400 },
5853 { TARGET_CBAUD, TARGET_B57600, CBAUD, B57600 },
5854 { TARGET_CBAUD, TARGET_B115200, CBAUD, B115200 },
5855 { TARGET_CBAUD, TARGET_B230400, CBAUD, B230400 },
5856 { TARGET_CBAUD, TARGET_B460800, CBAUD, B460800 },
5857 { TARGET_CSIZE, TARGET_CS5, CSIZE, CS5 },
5858 { TARGET_CSIZE, TARGET_CS6, CSIZE, CS6 },
5859 { TARGET_CSIZE, TARGET_CS7, CSIZE, CS7 },
5860 { TARGET_CSIZE, TARGET_CS8, CSIZE, CS8 },
5861 { TARGET_CSTOPB, TARGET_CSTOPB, CSTOPB, CSTOPB },
5862 { TARGET_CREAD, TARGET_CREAD, CREAD, CREAD },
5863 { TARGET_PARENB, TARGET_PARENB, PARENB, PARENB },
5864 { TARGET_PARODD, TARGET_PARODD, PARODD, PARODD },
5865 { TARGET_HUPCL, TARGET_HUPCL, HUPCL, HUPCL },
5866 { TARGET_CLOCAL, TARGET_CLOCAL, CLOCAL, CLOCAL },
5867 { TARGET_CRTSCTS, TARGET_CRTSCTS, CRTSCTS, CRTSCTS },
5868 { 0, 0, 0, 0 }
5871 static const bitmask_transtbl lflag_tbl[] = {
5872 { TARGET_ISIG, TARGET_ISIG, ISIG, ISIG },
5873 { TARGET_ICANON, TARGET_ICANON, ICANON, ICANON },
5874 { TARGET_XCASE, TARGET_XCASE, XCASE, XCASE },
5875 { TARGET_ECHO, TARGET_ECHO, ECHO, ECHO },
5876 { TARGET_ECHOE, TARGET_ECHOE, ECHOE, ECHOE },
5877 { TARGET_ECHOK, TARGET_ECHOK, ECHOK, ECHOK },
5878 { TARGET_ECHONL, TARGET_ECHONL, ECHONL, ECHONL },
5879 { TARGET_NOFLSH, TARGET_NOFLSH, NOFLSH, NOFLSH },
5880 { TARGET_TOSTOP, TARGET_TOSTOP, TOSTOP, TOSTOP },
5881 { TARGET_ECHOCTL, TARGET_ECHOCTL, ECHOCTL, ECHOCTL },
5882 { TARGET_ECHOPRT, TARGET_ECHOPRT, ECHOPRT, ECHOPRT },
5883 { TARGET_ECHOKE, TARGET_ECHOKE, ECHOKE, ECHOKE },
5884 { TARGET_FLUSHO, TARGET_FLUSHO, FLUSHO, FLUSHO },
5885 { TARGET_PENDIN, TARGET_PENDIN, PENDIN, PENDIN },
5886 { TARGET_IEXTEN, TARGET_IEXTEN, IEXTEN, IEXTEN },
5887 { 0, 0, 0, 0 }
5890 static void target_to_host_termios (void *dst, const void *src)
5892 struct host_termios *host = dst;
5893 const struct target_termios *target = src;
5895 host->c_iflag =
5896 target_to_host_bitmask(tswap32(target->c_iflag), iflag_tbl);
5897 host->c_oflag =
5898 target_to_host_bitmask(tswap32(target->c_oflag), oflag_tbl);
5899 host->c_cflag =
5900 target_to_host_bitmask(tswap32(target->c_cflag), cflag_tbl);
5901 host->c_lflag =
5902 target_to_host_bitmask(tswap32(target->c_lflag), lflag_tbl);
5903 host->c_line = target->c_line;
5905 memset(host->c_cc, 0, sizeof(host->c_cc));
5906 host->c_cc[VINTR] = target->c_cc[TARGET_VINTR];
5907 host->c_cc[VQUIT] = target->c_cc[TARGET_VQUIT];
5908 host->c_cc[VERASE] = target->c_cc[TARGET_VERASE];
5909 host->c_cc[VKILL] = target->c_cc[TARGET_VKILL];
5910 host->c_cc[VEOF] = target->c_cc[TARGET_VEOF];
5911 host->c_cc[VTIME] = target->c_cc[TARGET_VTIME];
5912 host->c_cc[VMIN] = target->c_cc[TARGET_VMIN];
5913 host->c_cc[VSWTC] = target->c_cc[TARGET_VSWTC];
5914 host->c_cc[VSTART] = target->c_cc[TARGET_VSTART];
5915 host->c_cc[VSTOP] = target->c_cc[TARGET_VSTOP];
5916 host->c_cc[VSUSP] = target->c_cc[TARGET_VSUSP];
5917 host->c_cc[VEOL] = target->c_cc[TARGET_VEOL];
5918 host->c_cc[VREPRINT] = target->c_cc[TARGET_VREPRINT];
5919 host->c_cc[VDISCARD] = target->c_cc[TARGET_VDISCARD];
5920 host->c_cc[VWERASE] = target->c_cc[TARGET_VWERASE];
5921 host->c_cc[VLNEXT] = target->c_cc[TARGET_VLNEXT];
5922 host->c_cc[VEOL2] = target->c_cc[TARGET_VEOL2];
5925 static void host_to_target_termios (void *dst, const void *src)
5927 struct target_termios *target = dst;
5928 const struct host_termios *host = src;
5930 target->c_iflag =
5931 tswap32(host_to_target_bitmask(host->c_iflag, iflag_tbl));
5932 target->c_oflag =
5933 tswap32(host_to_target_bitmask(host->c_oflag, oflag_tbl));
5934 target->c_cflag =
5935 tswap32(host_to_target_bitmask(host->c_cflag, cflag_tbl));
5936 target->c_lflag =
5937 tswap32(host_to_target_bitmask(host->c_lflag, lflag_tbl));
5938 target->c_line = host->c_line;
5940 memset(target->c_cc, 0, sizeof(target->c_cc));
5941 target->c_cc[TARGET_VINTR] = host->c_cc[VINTR];
5942 target->c_cc[TARGET_VQUIT] = host->c_cc[VQUIT];
5943 target->c_cc[TARGET_VERASE] = host->c_cc[VERASE];
5944 target->c_cc[TARGET_VKILL] = host->c_cc[VKILL];
5945 target->c_cc[TARGET_VEOF] = host->c_cc[VEOF];
5946 target->c_cc[TARGET_VTIME] = host->c_cc[VTIME];
5947 target->c_cc[TARGET_VMIN] = host->c_cc[VMIN];
5948 target->c_cc[TARGET_VSWTC] = host->c_cc[VSWTC];
5949 target->c_cc[TARGET_VSTART] = host->c_cc[VSTART];
5950 target->c_cc[TARGET_VSTOP] = host->c_cc[VSTOP];
5951 target->c_cc[TARGET_VSUSP] = host->c_cc[VSUSP];
5952 target->c_cc[TARGET_VEOL] = host->c_cc[VEOL];
5953 target->c_cc[TARGET_VREPRINT] = host->c_cc[VREPRINT];
5954 target->c_cc[TARGET_VDISCARD] = host->c_cc[VDISCARD];
5955 target->c_cc[TARGET_VWERASE] = host->c_cc[VWERASE];
5956 target->c_cc[TARGET_VLNEXT] = host->c_cc[VLNEXT];
5957 target->c_cc[TARGET_VEOL2] = host->c_cc[VEOL2];
5960 static const StructEntry struct_termios_def = {
5961 .convert = { host_to_target_termios, target_to_host_termios },
5962 .size = { sizeof(struct target_termios), sizeof(struct host_termios) },
5963 .align = { __alignof__(struct target_termios), __alignof__(struct host_termios) },
5966 static bitmask_transtbl mmap_flags_tbl[] = {
5967 { TARGET_MAP_SHARED, TARGET_MAP_SHARED, MAP_SHARED, MAP_SHARED },
5968 { TARGET_MAP_PRIVATE, TARGET_MAP_PRIVATE, MAP_PRIVATE, MAP_PRIVATE },
5969 { TARGET_MAP_FIXED, TARGET_MAP_FIXED, MAP_FIXED, MAP_FIXED },
5970 { TARGET_MAP_ANONYMOUS, TARGET_MAP_ANONYMOUS,
5971 MAP_ANONYMOUS, MAP_ANONYMOUS },
5972 { TARGET_MAP_GROWSDOWN, TARGET_MAP_GROWSDOWN,
5973 MAP_GROWSDOWN, MAP_GROWSDOWN },
5974 { TARGET_MAP_DENYWRITE, TARGET_MAP_DENYWRITE,
5975 MAP_DENYWRITE, MAP_DENYWRITE },
5976 { TARGET_MAP_EXECUTABLE, TARGET_MAP_EXECUTABLE,
5977 MAP_EXECUTABLE, MAP_EXECUTABLE },
5978 { TARGET_MAP_LOCKED, TARGET_MAP_LOCKED, MAP_LOCKED, MAP_LOCKED },
5979 { TARGET_MAP_NORESERVE, TARGET_MAP_NORESERVE,
5980 MAP_NORESERVE, MAP_NORESERVE },
5981 { TARGET_MAP_HUGETLB, TARGET_MAP_HUGETLB, MAP_HUGETLB, MAP_HUGETLB },
5982 /* MAP_STACK had been ignored by the kernel for quite some time.
5983 Recognize it for the target insofar as we do not want to pass
5984 it through to the host. */
5985 { TARGET_MAP_STACK, TARGET_MAP_STACK, 0, 0 },
5986 { 0, 0, 0, 0 }
5989 #if defined(TARGET_I386)
5991 /* NOTE: there is really one LDT for all the threads */
5992 static uint8_t *ldt_table;
5994 static abi_long read_ldt(abi_ulong ptr, unsigned long bytecount)
5996 int size;
5997 void *p;
5999 if (!ldt_table)
6000 return 0;
6001 size = TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE;
6002 if (size > bytecount)
6003 size = bytecount;
6004 p = lock_user(VERIFY_WRITE, ptr, size, 0);
6005 if (!p)
6006 return -TARGET_EFAULT;
6007 /* ??? Should this by byteswapped? */
6008 memcpy(p, ldt_table, size);
6009 unlock_user(p, ptr, size);
6010 return size;
6013 /* XXX: add locking support */
6014 static abi_long write_ldt(CPUX86State *env,
6015 abi_ulong ptr, unsigned long bytecount, int oldmode)
6017 struct target_modify_ldt_ldt_s ldt_info;
6018 struct target_modify_ldt_ldt_s *target_ldt_info;
6019 int seg_32bit, contents, read_exec_only, limit_in_pages;
6020 int seg_not_present, useable, lm;
6021 uint32_t *lp, entry_1, entry_2;
6023 if (bytecount != sizeof(ldt_info))
6024 return -TARGET_EINVAL;
6025 if (!lock_user_struct(VERIFY_READ, target_ldt_info, ptr, 1))
6026 return -TARGET_EFAULT;
6027 ldt_info.entry_number = tswap32(target_ldt_info->entry_number);
6028 ldt_info.base_addr = tswapal(target_ldt_info->base_addr);
6029 ldt_info.limit = tswap32(target_ldt_info->limit);
6030 ldt_info.flags = tswap32(target_ldt_info->flags);
6031 unlock_user_struct(target_ldt_info, ptr, 0);
6033 if (ldt_info.entry_number >= TARGET_LDT_ENTRIES)
6034 return -TARGET_EINVAL;
6035 seg_32bit = ldt_info.flags & 1;
6036 contents = (ldt_info.flags >> 1) & 3;
6037 read_exec_only = (ldt_info.flags >> 3) & 1;
6038 limit_in_pages = (ldt_info.flags >> 4) & 1;
6039 seg_not_present = (ldt_info.flags >> 5) & 1;
6040 useable = (ldt_info.flags >> 6) & 1;
6041 #ifdef TARGET_ABI32
6042 lm = 0;
6043 #else
6044 lm = (ldt_info.flags >> 7) & 1;
6045 #endif
6046 if (contents == 3) {
6047 if (oldmode)
6048 return -TARGET_EINVAL;
6049 if (seg_not_present == 0)
6050 return -TARGET_EINVAL;
6052 /* allocate the LDT */
6053 if (!ldt_table) {
6054 env->ldt.base = target_mmap(0,
6055 TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE,
6056 PROT_READ|PROT_WRITE,
6057 MAP_ANONYMOUS|MAP_PRIVATE, -1, 0);
6058 if (env->ldt.base == -1)
6059 return -TARGET_ENOMEM;
6060 memset(g2h(env->ldt.base), 0,
6061 TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE);
6062 env->ldt.limit = 0xffff;
6063 ldt_table = g2h(env->ldt.base);
6066 /* NOTE: same code as Linux kernel */
6067 /* Allow LDTs to be cleared by the user. */
6068 if (ldt_info.base_addr == 0 && ldt_info.limit == 0) {
6069 if (oldmode ||
6070 (contents == 0 &&
6071 read_exec_only == 1 &&
6072 seg_32bit == 0 &&
6073 limit_in_pages == 0 &&
6074 seg_not_present == 1 &&
6075 useable == 0 )) {
6076 entry_1 = 0;
6077 entry_2 = 0;
6078 goto install;
6082 entry_1 = ((ldt_info.base_addr & 0x0000ffff) << 16) |
6083 (ldt_info.limit & 0x0ffff);
6084 entry_2 = (ldt_info.base_addr & 0xff000000) |
6085 ((ldt_info.base_addr & 0x00ff0000) >> 16) |
6086 (ldt_info.limit & 0xf0000) |
6087 ((read_exec_only ^ 1) << 9) |
6088 (contents << 10) |
6089 ((seg_not_present ^ 1) << 15) |
6090 (seg_32bit << 22) |
6091 (limit_in_pages << 23) |
6092 (lm << 21) |
6093 0x7000;
6094 if (!oldmode)
6095 entry_2 |= (useable << 20);
6097 /* Install the new entry ... */
6098 install:
6099 lp = (uint32_t *)(ldt_table + (ldt_info.entry_number << 3));
6100 lp[0] = tswap32(entry_1);
6101 lp[1] = tswap32(entry_2);
6102 return 0;
6105 /* specific and weird i386 syscalls */
6106 static abi_long do_modify_ldt(CPUX86State *env, int func, abi_ulong ptr,
6107 unsigned long bytecount)
6109 abi_long ret;
6111 switch (func) {
6112 case 0:
6113 ret = read_ldt(ptr, bytecount);
6114 break;
6115 case 1:
6116 ret = write_ldt(env, ptr, bytecount, 1);
6117 break;
6118 case 0x11:
6119 ret = write_ldt(env, ptr, bytecount, 0);
6120 break;
6121 default:
6122 ret = -TARGET_ENOSYS;
6123 break;
6125 return ret;
6128 #if defined(TARGET_I386) && defined(TARGET_ABI32)
6129 abi_long do_set_thread_area(CPUX86State *env, abi_ulong ptr)
6131 uint64_t *gdt_table = g2h(env->gdt.base);
6132 struct target_modify_ldt_ldt_s ldt_info;
6133 struct target_modify_ldt_ldt_s *target_ldt_info;
6134 int seg_32bit, contents, read_exec_only, limit_in_pages;
6135 int seg_not_present, useable, lm;
6136 uint32_t *lp, entry_1, entry_2;
6137 int i;
6139 lock_user_struct(VERIFY_WRITE, target_ldt_info, ptr, 1);
6140 if (!target_ldt_info)
6141 return -TARGET_EFAULT;
6142 ldt_info.entry_number = tswap32(target_ldt_info->entry_number);
6143 ldt_info.base_addr = tswapal(target_ldt_info->base_addr);
6144 ldt_info.limit = tswap32(target_ldt_info->limit);
6145 ldt_info.flags = tswap32(target_ldt_info->flags);
6146 if (ldt_info.entry_number == -1) {
6147 for (i=TARGET_GDT_ENTRY_TLS_MIN; i<=TARGET_GDT_ENTRY_TLS_MAX; i++) {
6148 if (gdt_table[i] == 0) {
6149 ldt_info.entry_number = i;
6150 target_ldt_info->entry_number = tswap32(i);
6151 break;
6155 unlock_user_struct(target_ldt_info, ptr, 1);
6157 if (ldt_info.entry_number < TARGET_GDT_ENTRY_TLS_MIN ||
6158 ldt_info.entry_number > TARGET_GDT_ENTRY_TLS_MAX)
6159 return -TARGET_EINVAL;
6160 seg_32bit = ldt_info.flags & 1;
6161 contents = (ldt_info.flags >> 1) & 3;
6162 read_exec_only = (ldt_info.flags >> 3) & 1;
6163 limit_in_pages = (ldt_info.flags >> 4) & 1;
6164 seg_not_present = (ldt_info.flags >> 5) & 1;
6165 useable = (ldt_info.flags >> 6) & 1;
6166 #ifdef TARGET_ABI32
6167 lm = 0;
6168 #else
6169 lm = (ldt_info.flags >> 7) & 1;
6170 #endif
6172 if (contents == 3) {
6173 if (seg_not_present == 0)
6174 return -TARGET_EINVAL;
6177 /* NOTE: same code as Linux kernel */
6178 /* Allow LDTs to be cleared by the user. */
6179 if (ldt_info.base_addr == 0 && ldt_info.limit == 0) {
6180 if ((contents == 0 &&
6181 read_exec_only == 1 &&
6182 seg_32bit == 0 &&
6183 limit_in_pages == 0 &&
6184 seg_not_present == 1 &&
6185 useable == 0 )) {
6186 entry_1 = 0;
6187 entry_2 = 0;
6188 goto install;
6192 entry_1 = ((ldt_info.base_addr & 0x0000ffff) << 16) |
6193 (ldt_info.limit & 0x0ffff);
6194 entry_2 = (ldt_info.base_addr & 0xff000000) |
6195 ((ldt_info.base_addr & 0x00ff0000) >> 16) |
6196 (ldt_info.limit & 0xf0000) |
6197 ((read_exec_only ^ 1) << 9) |
6198 (contents << 10) |
6199 ((seg_not_present ^ 1) << 15) |
6200 (seg_32bit << 22) |
6201 (limit_in_pages << 23) |
6202 (useable << 20) |
6203 (lm << 21) |
6204 0x7000;
6206 /* Install the new entry ... */
6207 install:
6208 lp = (uint32_t *)(gdt_table + ldt_info.entry_number);
6209 lp[0] = tswap32(entry_1);
6210 lp[1] = tswap32(entry_2);
6211 return 0;
6214 static abi_long do_get_thread_area(CPUX86State *env, abi_ulong ptr)
6216 struct target_modify_ldt_ldt_s *target_ldt_info;
6217 uint64_t *gdt_table = g2h(env->gdt.base);
6218 uint32_t base_addr, limit, flags;
6219 int seg_32bit, contents, read_exec_only, limit_in_pages, idx;
6220 int seg_not_present, useable, lm;
6221 uint32_t *lp, entry_1, entry_2;
6223 lock_user_struct(VERIFY_WRITE, target_ldt_info, ptr, 1);
6224 if (!target_ldt_info)
6225 return -TARGET_EFAULT;
6226 idx = tswap32(target_ldt_info->entry_number);
6227 if (idx < TARGET_GDT_ENTRY_TLS_MIN ||
6228 idx > TARGET_GDT_ENTRY_TLS_MAX) {
6229 unlock_user_struct(target_ldt_info, ptr, 1);
6230 return -TARGET_EINVAL;
6232 lp = (uint32_t *)(gdt_table + idx);
6233 entry_1 = tswap32(lp[0]);
6234 entry_2 = tswap32(lp[1]);
6236 read_exec_only = ((entry_2 >> 9) & 1) ^ 1;
6237 contents = (entry_2 >> 10) & 3;
6238 seg_not_present = ((entry_2 >> 15) & 1) ^ 1;
6239 seg_32bit = (entry_2 >> 22) & 1;
6240 limit_in_pages = (entry_2 >> 23) & 1;
6241 useable = (entry_2 >> 20) & 1;
6242 #ifdef TARGET_ABI32
6243 lm = 0;
6244 #else
6245 lm = (entry_2 >> 21) & 1;
6246 #endif
6247 flags = (seg_32bit << 0) | (contents << 1) |
6248 (read_exec_only << 3) | (limit_in_pages << 4) |
6249 (seg_not_present << 5) | (useable << 6) | (lm << 7);
6250 limit = (entry_1 & 0xffff) | (entry_2 & 0xf0000);
6251 base_addr = (entry_1 >> 16) |
6252 (entry_2 & 0xff000000) |
6253 ((entry_2 & 0xff) << 16);
6254 target_ldt_info->base_addr = tswapal(base_addr);
6255 target_ldt_info->limit = tswap32(limit);
6256 target_ldt_info->flags = tswap32(flags);
6257 unlock_user_struct(target_ldt_info, ptr, 1);
6258 return 0;
6260 #endif /* TARGET_I386 && TARGET_ABI32 */
6262 #ifndef TARGET_ABI32
6263 abi_long do_arch_prctl(CPUX86State *env, int code, abi_ulong addr)
6265 abi_long ret = 0;
6266 abi_ulong val;
6267 int idx;
6269 switch(code) {
6270 case TARGET_ARCH_SET_GS:
6271 case TARGET_ARCH_SET_FS:
6272 if (code == TARGET_ARCH_SET_GS)
6273 idx = R_GS;
6274 else
6275 idx = R_FS;
6276 cpu_x86_load_seg(env, idx, 0);
6277 env->segs[idx].base = addr;
6278 break;
6279 case TARGET_ARCH_GET_GS:
6280 case TARGET_ARCH_GET_FS:
6281 if (code == TARGET_ARCH_GET_GS)
6282 idx = R_GS;
6283 else
6284 idx = R_FS;
6285 val = env->segs[idx].base;
6286 if (put_user(val, addr, abi_ulong))
6287 ret = -TARGET_EFAULT;
6288 break;
6289 default:
6290 ret = -TARGET_EINVAL;
6291 break;
6293 return ret;
6295 #endif
6297 #endif /* defined(TARGET_I386) */
6299 #define NEW_STACK_SIZE 0x40000
6302 static pthread_mutex_t clone_lock = PTHREAD_MUTEX_INITIALIZER;
6303 typedef struct {
6304 CPUArchState *env;
6305 pthread_mutex_t mutex;
6306 pthread_cond_t cond;
6307 pthread_t thread;
6308 uint32_t tid;
6309 abi_ulong child_tidptr;
6310 abi_ulong parent_tidptr;
6311 sigset_t sigmask;
6312 } new_thread_info;
6314 static void *clone_func(void *arg)
6316 new_thread_info *info = arg;
6317 CPUArchState *env;
6318 CPUState *cpu;
6319 TaskState *ts;
6321 rcu_register_thread();
6322 tcg_register_thread();
6323 env = info->env;
6324 cpu = ENV_GET_CPU(env);
6325 thread_cpu = cpu;
6326 ts = (TaskState *)cpu->opaque;
6327 info->tid = gettid();
6328 task_settid(ts);
6329 if (info->child_tidptr)
6330 put_user_u32(info->tid, info->child_tidptr);
6331 if (info->parent_tidptr)
6332 put_user_u32(info->tid, info->parent_tidptr);
6333 /* Enable signals. */
6334 sigprocmask(SIG_SETMASK, &info->sigmask, NULL);
6335 /* Signal to the parent that we're ready. */
6336 pthread_mutex_lock(&info->mutex);
6337 pthread_cond_broadcast(&info->cond);
6338 pthread_mutex_unlock(&info->mutex);
6339 /* Wait until the parent has finished initializing the tls state. */
6340 pthread_mutex_lock(&clone_lock);
6341 pthread_mutex_unlock(&clone_lock);
6342 cpu_loop(env);
6343 /* never exits */
6344 return NULL;
6347 /* do_fork() Must return host values and target errnos (unlike most
6348 do_*() functions). */
6349 static int do_fork(CPUArchState *env, unsigned int flags, abi_ulong newsp,
6350 abi_ulong parent_tidptr, target_ulong newtls,
6351 abi_ulong child_tidptr)
6353 CPUState *cpu = ENV_GET_CPU(env);
6354 int ret;
6355 TaskState *ts;
6356 CPUState *new_cpu;
6357 CPUArchState *new_env;
6358 sigset_t sigmask;
6360 flags &= ~CLONE_IGNORED_FLAGS;
6362 /* Emulate vfork() with fork() */
6363 if (flags & CLONE_VFORK)
6364 flags &= ~(CLONE_VFORK | CLONE_VM);
6366 if (flags & CLONE_VM) {
6367 TaskState *parent_ts = (TaskState *)cpu->opaque;
6368 new_thread_info info;
6369 pthread_attr_t attr;
6371 if (((flags & CLONE_THREAD_FLAGS) != CLONE_THREAD_FLAGS) ||
6372 (flags & CLONE_INVALID_THREAD_FLAGS)) {
6373 return -TARGET_EINVAL;
6376 ts = g_new0(TaskState, 1);
6377 init_task_state(ts);
6379 /* Grab a mutex so that thread setup appears atomic. */
6380 pthread_mutex_lock(&clone_lock);
6382 /* we create a new CPU instance. */
6383 new_env = cpu_copy(env);
6384 /* Init regs that differ from the parent. */
6385 cpu_clone_regs(new_env, newsp);
6386 new_cpu = ENV_GET_CPU(new_env);
6387 new_cpu->opaque = ts;
6388 ts->bprm = parent_ts->bprm;
6389 ts->info = parent_ts->info;
6390 ts->signal_mask = parent_ts->signal_mask;
6392 if (flags & CLONE_CHILD_CLEARTID) {
6393 ts->child_tidptr = child_tidptr;
6396 if (flags & CLONE_SETTLS) {
6397 cpu_set_tls (new_env, newtls);
6400 memset(&info, 0, sizeof(info));
6401 pthread_mutex_init(&info.mutex, NULL);
6402 pthread_mutex_lock(&info.mutex);
6403 pthread_cond_init(&info.cond, NULL);
6404 info.env = new_env;
6405 if (flags & CLONE_CHILD_SETTID) {
6406 info.child_tidptr = child_tidptr;
6408 if (flags & CLONE_PARENT_SETTID) {
6409 info.parent_tidptr = parent_tidptr;
6412 ret = pthread_attr_init(&attr);
6413 ret = pthread_attr_setstacksize(&attr, NEW_STACK_SIZE);
6414 ret = pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED);
6415 /* It is not safe to deliver signals until the child has finished
6416 initializing, so temporarily block all signals. */
6417 sigfillset(&sigmask);
6418 sigprocmask(SIG_BLOCK, &sigmask, &info.sigmask);
6420 /* If this is our first additional thread, we need to ensure we
6421 * generate code for parallel execution and flush old translations.
6423 if (!parallel_cpus) {
6424 parallel_cpus = true;
6425 tb_flush(cpu);
6428 ret = pthread_create(&info.thread, &attr, clone_func, &info);
6429 /* TODO: Free new CPU state if thread creation failed. */
6431 sigprocmask(SIG_SETMASK, &info.sigmask, NULL);
6432 pthread_attr_destroy(&attr);
6433 if (ret == 0) {
6434 /* Wait for the child to initialize. */
6435 pthread_cond_wait(&info.cond, &info.mutex);
6436 ret = info.tid;
6437 } else {
6438 ret = -1;
6440 pthread_mutex_unlock(&info.mutex);
6441 pthread_cond_destroy(&info.cond);
6442 pthread_mutex_destroy(&info.mutex);
6443 pthread_mutex_unlock(&clone_lock);
6444 } else {
6445 /* if no CLONE_VM, we consider it is a fork */
6446 if (flags & CLONE_INVALID_FORK_FLAGS) {
6447 return -TARGET_EINVAL;
6450 /* We can't support custom termination signals */
6451 if ((flags & CSIGNAL) != TARGET_SIGCHLD) {
6452 return -TARGET_EINVAL;
6455 if (block_signals()) {
6456 return -TARGET_ERESTARTSYS;
6459 fork_start();
6460 ret = fork();
6461 if (ret == 0) {
6462 /* Child Process. */
6463 cpu_clone_regs(env, newsp);
6464 fork_end(1);
6465 /* There is a race condition here. The parent process could
6466 theoretically read the TID in the child process before the child
6467 tid is set. This would require using either ptrace
6468 (not implemented) or having *_tidptr to point at a shared memory
6469 mapping. We can't repeat the spinlock hack used above because
6470 the child process gets its own copy of the lock. */
6471 if (flags & CLONE_CHILD_SETTID)
6472 put_user_u32(gettid(), child_tidptr);
6473 if (flags & CLONE_PARENT_SETTID)
6474 put_user_u32(gettid(), parent_tidptr);
6475 ts = (TaskState *)cpu->opaque;
6476 if (flags & CLONE_SETTLS)
6477 cpu_set_tls (env, newtls);
6478 if (flags & CLONE_CHILD_CLEARTID)
6479 ts->child_tidptr = child_tidptr;
6480 } else {
6481 fork_end(0);
6484 return ret;
6487 /* warning : doesn't handle linux specific flags... */
6488 static int target_to_host_fcntl_cmd(int cmd)
6490 switch(cmd) {
6491 case TARGET_F_DUPFD:
6492 case TARGET_F_GETFD:
6493 case TARGET_F_SETFD:
6494 case TARGET_F_GETFL:
6495 case TARGET_F_SETFL:
6496 return cmd;
6497 case TARGET_F_GETLK:
6498 return F_GETLK64;
6499 case TARGET_F_SETLK:
6500 return F_SETLK64;
6501 case TARGET_F_SETLKW:
6502 return F_SETLKW64;
6503 case TARGET_F_GETOWN:
6504 return F_GETOWN;
6505 case TARGET_F_SETOWN:
6506 return F_SETOWN;
6507 case TARGET_F_GETSIG:
6508 return F_GETSIG;
6509 case TARGET_F_SETSIG:
6510 return F_SETSIG;
6511 #if TARGET_ABI_BITS == 32
6512 case TARGET_F_GETLK64:
6513 return F_GETLK64;
6514 case TARGET_F_SETLK64:
6515 return F_SETLK64;
6516 case TARGET_F_SETLKW64:
6517 return F_SETLKW64;
6518 #endif
6519 case TARGET_F_SETLEASE:
6520 return F_SETLEASE;
6521 case TARGET_F_GETLEASE:
6522 return F_GETLEASE;
6523 #ifdef F_DUPFD_CLOEXEC
6524 case TARGET_F_DUPFD_CLOEXEC:
6525 return F_DUPFD_CLOEXEC;
6526 #endif
6527 case TARGET_F_NOTIFY:
6528 return F_NOTIFY;
6529 #ifdef F_GETOWN_EX
6530 case TARGET_F_GETOWN_EX:
6531 return F_GETOWN_EX;
6532 #endif
6533 #ifdef F_SETOWN_EX
6534 case TARGET_F_SETOWN_EX:
6535 return F_SETOWN_EX;
6536 #endif
6537 #ifdef F_SETPIPE_SZ
6538 case TARGET_F_SETPIPE_SZ:
6539 return F_SETPIPE_SZ;
6540 case TARGET_F_GETPIPE_SZ:
6541 return F_GETPIPE_SZ;
6542 #endif
6543 default:
6544 return -TARGET_EINVAL;
6546 return -TARGET_EINVAL;
6549 #define FLOCK_TRANSTBL \
6550 switch (type) { \
6551 TRANSTBL_CONVERT(F_RDLCK); \
6552 TRANSTBL_CONVERT(F_WRLCK); \
6553 TRANSTBL_CONVERT(F_UNLCK); \
6554 TRANSTBL_CONVERT(F_EXLCK); \
6555 TRANSTBL_CONVERT(F_SHLCK); \
6558 static int target_to_host_flock(int type)
6560 #define TRANSTBL_CONVERT(a) case TARGET_##a: return a
6561 FLOCK_TRANSTBL
6562 #undef TRANSTBL_CONVERT
6563 return -TARGET_EINVAL;
6566 static int host_to_target_flock(int type)
6568 #define TRANSTBL_CONVERT(a) case a: return TARGET_##a
6569 FLOCK_TRANSTBL
6570 #undef TRANSTBL_CONVERT
6571 /* if we don't know how to convert the value coming
6572 * from the host we copy to the target field as-is
6574 return type;
6577 static inline abi_long copy_from_user_flock(struct flock64 *fl,
6578 abi_ulong target_flock_addr)
6580 struct target_flock *target_fl;
6581 int l_type;
6583 if (!lock_user_struct(VERIFY_READ, target_fl, target_flock_addr, 1)) {
6584 return -TARGET_EFAULT;
6587 __get_user(l_type, &target_fl->l_type);
6588 l_type = target_to_host_flock(l_type);
6589 if (l_type < 0) {
6590 return l_type;
6592 fl->l_type = l_type;
6593 __get_user(fl->l_whence, &target_fl->l_whence);
6594 __get_user(fl->l_start, &target_fl->l_start);
6595 __get_user(fl->l_len, &target_fl->l_len);
6596 __get_user(fl->l_pid, &target_fl->l_pid);
6597 unlock_user_struct(target_fl, target_flock_addr, 0);
6598 return 0;
6601 static inline abi_long copy_to_user_flock(abi_ulong target_flock_addr,
6602 const struct flock64 *fl)
6604 struct target_flock *target_fl;
6605 short l_type;
6607 if (!lock_user_struct(VERIFY_WRITE, target_fl, target_flock_addr, 0)) {
6608 return -TARGET_EFAULT;
6611 l_type = host_to_target_flock(fl->l_type);
6612 __put_user(l_type, &target_fl->l_type);
6613 __put_user(fl->l_whence, &target_fl->l_whence);
6614 __put_user(fl->l_start, &target_fl->l_start);
6615 __put_user(fl->l_len, &target_fl->l_len);
6616 __put_user(fl->l_pid, &target_fl->l_pid);
6617 unlock_user_struct(target_fl, target_flock_addr, 1);
6618 return 0;
6621 typedef abi_long from_flock64_fn(struct flock64 *fl, abi_ulong target_addr);
6622 typedef abi_long to_flock64_fn(abi_ulong target_addr, const struct flock64 *fl);
6624 #if defined(TARGET_ARM) && TARGET_ABI_BITS == 32
6625 static inline abi_long copy_from_user_oabi_flock64(struct flock64 *fl,
6626 abi_ulong target_flock_addr)
6628 struct target_oabi_flock64 *target_fl;
6629 int l_type;
6631 if (!lock_user_struct(VERIFY_READ, target_fl, target_flock_addr, 1)) {
6632 return -TARGET_EFAULT;
6635 __get_user(l_type, &target_fl->l_type);
6636 l_type = target_to_host_flock(l_type);
6637 if (l_type < 0) {
6638 return l_type;
6640 fl->l_type = l_type;
6641 __get_user(fl->l_whence, &target_fl->l_whence);
6642 __get_user(fl->l_start, &target_fl->l_start);
6643 __get_user(fl->l_len, &target_fl->l_len);
6644 __get_user(fl->l_pid, &target_fl->l_pid);
6645 unlock_user_struct(target_fl, target_flock_addr, 0);
6646 return 0;
6649 static inline abi_long copy_to_user_oabi_flock64(abi_ulong target_flock_addr,
6650 const struct flock64 *fl)
6652 struct target_oabi_flock64 *target_fl;
6653 short l_type;
6655 if (!lock_user_struct(VERIFY_WRITE, target_fl, target_flock_addr, 0)) {
6656 return -TARGET_EFAULT;
6659 l_type = host_to_target_flock(fl->l_type);
6660 __put_user(l_type, &target_fl->l_type);
6661 __put_user(fl->l_whence, &target_fl->l_whence);
6662 __put_user(fl->l_start, &target_fl->l_start);
6663 __put_user(fl->l_len, &target_fl->l_len);
6664 __put_user(fl->l_pid, &target_fl->l_pid);
6665 unlock_user_struct(target_fl, target_flock_addr, 1);
6666 return 0;
6668 #endif
6670 static inline abi_long copy_from_user_flock64(struct flock64 *fl,
6671 abi_ulong target_flock_addr)
6673 struct target_flock64 *target_fl;
6674 int l_type;
6676 if (!lock_user_struct(VERIFY_READ, target_fl, target_flock_addr, 1)) {
6677 return -TARGET_EFAULT;
6680 __get_user(l_type, &target_fl->l_type);
6681 l_type = target_to_host_flock(l_type);
6682 if (l_type < 0) {
6683 return l_type;
6685 fl->l_type = l_type;
6686 __get_user(fl->l_whence, &target_fl->l_whence);
6687 __get_user(fl->l_start, &target_fl->l_start);
6688 __get_user(fl->l_len, &target_fl->l_len);
6689 __get_user(fl->l_pid, &target_fl->l_pid);
6690 unlock_user_struct(target_fl, target_flock_addr, 0);
6691 return 0;
6694 static inline abi_long copy_to_user_flock64(abi_ulong target_flock_addr,
6695 const struct flock64 *fl)
6697 struct target_flock64 *target_fl;
6698 short l_type;
6700 if (!lock_user_struct(VERIFY_WRITE, target_fl, target_flock_addr, 0)) {
6701 return -TARGET_EFAULT;
6704 l_type = host_to_target_flock(fl->l_type);
6705 __put_user(l_type, &target_fl->l_type);
6706 __put_user(fl->l_whence, &target_fl->l_whence);
6707 __put_user(fl->l_start, &target_fl->l_start);
6708 __put_user(fl->l_len, &target_fl->l_len);
6709 __put_user(fl->l_pid, &target_fl->l_pid);
6710 unlock_user_struct(target_fl, target_flock_addr, 1);
6711 return 0;
6714 static abi_long do_fcntl(int fd, int cmd, abi_ulong arg)
6716 struct flock64 fl64;
6717 #ifdef F_GETOWN_EX
6718 struct f_owner_ex fox;
6719 struct target_f_owner_ex *target_fox;
6720 #endif
6721 abi_long ret;
6722 int host_cmd = target_to_host_fcntl_cmd(cmd);
6724 if (host_cmd == -TARGET_EINVAL)
6725 return host_cmd;
6727 switch(cmd) {
6728 case TARGET_F_GETLK:
6729 ret = copy_from_user_flock(&fl64, arg);
6730 if (ret) {
6731 return ret;
6733 ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
6734 if (ret == 0) {
6735 ret = copy_to_user_flock(arg, &fl64);
6737 break;
6739 case TARGET_F_SETLK:
6740 case TARGET_F_SETLKW:
6741 ret = copy_from_user_flock(&fl64, arg);
6742 if (ret) {
6743 return ret;
6745 ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
6746 break;
6748 case TARGET_F_GETLK64:
6749 ret = copy_from_user_flock64(&fl64, arg);
6750 if (ret) {
6751 return ret;
6753 ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
6754 if (ret == 0) {
6755 ret = copy_to_user_flock64(arg, &fl64);
6757 break;
6758 case TARGET_F_SETLK64:
6759 case TARGET_F_SETLKW64:
6760 ret = copy_from_user_flock64(&fl64, arg);
6761 if (ret) {
6762 return ret;
6764 ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
6765 break;
6767 case TARGET_F_GETFL:
6768 ret = get_errno(safe_fcntl(fd, host_cmd, arg));
6769 if (ret >= 0) {
6770 ret = host_to_target_bitmask(ret, fcntl_flags_tbl);
6772 break;
6774 case TARGET_F_SETFL:
6775 ret = get_errno(safe_fcntl(fd, host_cmd,
6776 target_to_host_bitmask(arg,
6777 fcntl_flags_tbl)));
6778 break;
6780 #ifdef F_GETOWN_EX
6781 case TARGET_F_GETOWN_EX:
6782 ret = get_errno(safe_fcntl(fd, host_cmd, &fox));
6783 if (ret >= 0) {
6784 if (!lock_user_struct(VERIFY_WRITE, target_fox, arg, 0))
6785 return -TARGET_EFAULT;
6786 target_fox->type = tswap32(fox.type);
6787 target_fox->pid = tswap32(fox.pid);
6788 unlock_user_struct(target_fox, arg, 1);
6790 break;
6791 #endif
6793 #ifdef F_SETOWN_EX
6794 case TARGET_F_SETOWN_EX:
6795 if (!lock_user_struct(VERIFY_READ, target_fox, arg, 1))
6796 return -TARGET_EFAULT;
6797 fox.type = tswap32(target_fox->type);
6798 fox.pid = tswap32(target_fox->pid);
6799 unlock_user_struct(target_fox, arg, 0);
6800 ret = get_errno(safe_fcntl(fd, host_cmd, &fox));
6801 break;
6802 #endif
6804 case TARGET_F_SETOWN:
6805 case TARGET_F_GETOWN:
6806 case TARGET_F_SETSIG:
6807 case TARGET_F_GETSIG:
6808 case TARGET_F_SETLEASE:
6809 case TARGET_F_GETLEASE:
6810 case TARGET_F_SETPIPE_SZ:
6811 case TARGET_F_GETPIPE_SZ:
6812 ret = get_errno(safe_fcntl(fd, host_cmd, arg));
6813 break;
6815 default:
6816 ret = get_errno(safe_fcntl(fd, cmd, arg));
6817 break;
6819 return ret;
6822 #ifdef USE_UID16
6824 static inline int high2lowuid(int uid)
6826 if (uid > 65535)
6827 return 65534;
6828 else
6829 return uid;
6832 static inline int high2lowgid(int gid)
6834 if (gid > 65535)
6835 return 65534;
6836 else
6837 return gid;
6840 static inline int low2highuid(int uid)
6842 if ((int16_t)uid == -1)
6843 return -1;
6844 else
6845 return uid;
6848 static inline int low2highgid(int gid)
6850 if ((int16_t)gid == -1)
6851 return -1;
6852 else
6853 return gid;
6855 static inline int tswapid(int id)
6857 return tswap16(id);
6860 #define put_user_id(x, gaddr) put_user_u16(x, gaddr)
6862 #else /* !USE_UID16 */
6863 static inline int high2lowuid(int uid)
6865 return uid;
6867 static inline int high2lowgid(int gid)
6869 return gid;
6871 static inline int low2highuid(int uid)
6873 return uid;
6875 static inline int low2highgid(int gid)
6877 return gid;
6879 static inline int tswapid(int id)
6881 return tswap32(id);
6884 #define put_user_id(x, gaddr) put_user_u32(x, gaddr)
6886 #endif /* USE_UID16 */
6888 /* We must do direct syscalls for setting UID/GID, because we want to
6889 * implement the Linux system call semantics of "change only for this thread",
6890 * not the libc/POSIX semantics of "change for all threads in process".
6891 * (See http://ewontfix.com/17/ for more details.)
6892 * We use the 32-bit version of the syscalls if present; if it is not
6893 * then either the host architecture supports 32-bit UIDs natively with
6894 * the standard syscall, or the 16-bit UID is the best we can do.
6896 #ifdef __NR_setuid32
6897 #define __NR_sys_setuid __NR_setuid32
6898 #else
6899 #define __NR_sys_setuid __NR_setuid
6900 #endif
6901 #ifdef __NR_setgid32
6902 #define __NR_sys_setgid __NR_setgid32
6903 #else
6904 #define __NR_sys_setgid __NR_setgid
6905 #endif
6906 #ifdef __NR_setresuid32
6907 #define __NR_sys_setresuid __NR_setresuid32
6908 #else
6909 #define __NR_sys_setresuid __NR_setresuid
6910 #endif
6911 #ifdef __NR_setresgid32
6912 #define __NR_sys_setresgid __NR_setresgid32
6913 #else
6914 #define __NR_sys_setresgid __NR_setresgid
6915 #endif
6917 _syscall1(int, sys_setuid, uid_t, uid)
6918 _syscall1(int, sys_setgid, gid_t, gid)
6919 _syscall3(int, sys_setresuid, uid_t, ruid, uid_t, euid, uid_t, suid)
6920 _syscall3(int, sys_setresgid, gid_t, rgid, gid_t, egid, gid_t, sgid)
6922 void syscall_init(void)
6924 IOCTLEntry *ie;
6925 const argtype *arg_type;
6926 int size;
6927 int i;
6929 thunk_init(STRUCT_MAX);
6931 #define STRUCT(name, ...) thunk_register_struct(STRUCT_ ## name, #name, struct_ ## name ## _def);
6932 #define STRUCT_SPECIAL(name) thunk_register_struct_direct(STRUCT_ ## name, #name, &struct_ ## name ## _def);
6933 #include "syscall_types.h"
6934 #undef STRUCT
6935 #undef STRUCT_SPECIAL
6937 /* Build target_to_host_errno_table[] table from
6938 * host_to_target_errno_table[]. */
6939 for (i = 0; i < ERRNO_TABLE_SIZE; i++) {
6940 target_to_host_errno_table[host_to_target_errno_table[i]] = i;
6943 /* we patch the ioctl size if necessary. We rely on the fact that
6944 no ioctl has all the bits at '1' in the size field */
6945 ie = ioctl_entries;
6946 while (ie->target_cmd != 0) {
6947 if (((ie->target_cmd >> TARGET_IOC_SIZESHIFT) & TARGET_IOC_SIZEMASK) ==
6948 TARGET_IOC_SIZEMASK) {
6949 arg_type = ie->arg_type;
6950 if (arg_type[0] != TYPE_PTR) {
6951 fprintf(stderr, "cannot patch size for ioctl 0x%x\n",
6952 ie->target_cmd);
6953 exit(1);
6955 arg_type++;
6956 size = thunk_type_size(arg_type, 0);
6957 ie->target_cmd = (ie->target_cmd &
6958 ~(TARGET_IOC_SIZEMASK << TARGET_IOC_SIZESHIFT)) |
6959 (size << TARGET_IOC_SIZESHIFT);
6962 /* automatic consistency check if same arch */
6963 #if (defined(__i386__) && defined(TARGET_I386) && defined(TARGET_ABI32)) || \
6964 (defined(__x86_64__) && defined(TARGET_X86_64))
6965 if (unlikely(ie->target_cmd != ie->host_cmd)) {
6966 fprintf(stderr, "ERROR: ioctl(%s): target=0x%x host=0x%x\n",
6967 ie->name, ie->target_cmd, ie->host_cmd);
6969 #endif
6970 ie++;
6974 #if TARGET_ABI_BITS == 32
6975 static inline uint64_t target_offset64(uint32_t word0, uint32_t word1)
6977 #ifdef TARGET_WORDS_BIGENDIAN
6978 return ((uint64_t)word0 << 32) | word1;
6979 #else
6980 return ((uint64_t)word1 << 32) | word0;
6981 #endif
6983 #else /* TARGET_ABI_BITS == 32 */
6984 static inline uint64_t target_offset64(uint64_t word0, uint64_t word1)
6986 return word0;
6988 #endif /* TARGET_ABI_BITS != 32 */
6990 #ifdef TARGET_NR_truncate64
6991 static inline abi_long target_truncate64(void *cpu_env, const char *arg1,
6992 abi_long arg2,
6993 abi_long arg3,
6994 abi_long arg4)
6996 if (regpairs_aligned(cpu_env, TARGET_NR_truncate64)) {
6997 arg2 = arg3;
6998 arg3 = arg4;
7000 return get_errno(truncate64(arg1, target_offset64(arg2, arg3)));
7002 #endif
7004 #ifdef TARGET_NR_ftruncate64
7005 static inline abi_long target_ftruncate64(void *cpu_env, abi_long arg1,
7006 abi_long arg2,
7007 abi_long arg3,
7008 abi_long arg4)
7010 if (regpairs_aligned(cpu_env, TARGET_NR_ftruncate64)) {
7011 arg2 = arg3;
7012 arg3 = arg4;
7014 return get_errno(ftruncate64(arg1, target_offset64(arg2, arg3)));
7016 #endif
7018 static inline abi_long target_to_host_timespec(struct timespec *host_ts,
7019 abi_ulong target_addr)
7021 struct target_timespec *target_ts;
7023 if (!lock_user_struct(VERIFY_READ, target_ts, target_addr, 1))
7024 return -TARGET_EFAULT;
7025 __get_user(host_ts->tv_sec, &target_ts->tv_sec);
7026 __get_user(host_ts->tv_nsec, &target_ts->tv_nsec);
7027 unlock_user_struct(target_ts, target_addr, 0);
7028 return 0;
7031 static inline abi_long host_to_target_timespec(abi_ulong target_addr,
7032 struct timespec *host_ts)
7034 struct target_timespec *target_ts;
7036 if (!lock_user_struct(VERIFY_WRITE, target_ts, target_addr, 0))
7037 return -TARGET_EFAULT;
7038 __put_user(host_ts->tv_sec, &target_ts->tv_sec);
7039 __put_user(host_ts->tv_nsec, &target_ts->tv_nsec);
7040 unlock_user_struct(target_ts, target_addr, 1);
7041 return 0;
7044 static inline abi_long target_to_host_itimerspec(struct itimerspec *host_itspec,
7045 abi_ulong target_addr)
7047 struct target_itimerspec *target_itspec;
7049 if (!lock_user_struct(VERIFY_READ, target_itspec, target_addr, 1)) {
7050 return -TARGET_EFAULT;
7053 host_itspec->it_interval.tv_sec =
7054 tswapal(target_itspec->it_interval.tv_sec);
7055 host_itspec->it_interval.tv_nsec =
7056 tswapal(target_itspec->it_interval.tv_nsec);
7057 host_itspec->it_value.tv_sec = tswapal(target_itspec->it_value.tv_sec);
7058 host_itspec->it_value.tv_nsec = tswapal(target_itspec->it_value.tv_nsec);
7060 unlock_user_struct(target_itspec, target_addr, 1);
7061 return 0;
7064 static inline abi_long host_to_target_itimerspec(abi_ulong target_addr,
7065 struct itimerspec *host_its)
7067 struct target_itimerspec *target_itspec;
7069 if (!lock_user_struct(VERIFY_WRITE, target_itspec, target_addr, 0)) {
7070 return -TARGET_EFAULT;
7073 target_itspec->it_interval.tv_sec = tswapal(host_its->it_interval.tv_sec);
7074 target_itspec->it_interval.tv_nsec = tswapal(host_its->it_interval.tv_nsec);
7076 target_itspec->it_value.tv_sec = tswapal(host_its->it_value.tv_sec);
7077 target_itspec->it_value.tv_nsec = tswapal(host_its->it_value.tv_nsec);
7079 unlock_user_struct(target_itspec, target_addr, 0);
7080 return 0;
7083 static inline abi_long target_to_host_timex(struct timex *host_tx,
7084 abi_long target_addr)
7086 struct target_timex *target_tx;
7088 if (!lock_user_struct(VERIFY_READ, target_tx, target_addr, 1)) {
7089 return -TARGET_EFAULT;
7092 __get_user(host_tx->modes, &target_tx->modes);
7093 __get_user(host_tx->offset, &target_tx->offset);
7094 __get_user(host_tx->freq, &target_tx->freq);
7095 __get_user(host_tx->maxerror, &target_tx->maxerror);
7096 __get_user(host_tx->esterror, &target_tx->esterror);
7097 __get_user(host_tx->status, &target_tx->status);
7098 __get_user(host_tx->constant, &target_tx->constant);
7099 __get_user(host_tx->precision, &target_tx->precision);
7100 __get_user(host_tx->tolerance, &target_tx->tolerance);
7101 __get_user(host_tx->time.tv_sec, &target_tx->time.tv_sec);
7102 __get_user(host_tx->time.tv_usec, &target_tx->time.tv_usec);
7103 __get_user(host_tx->tick, &target_tx->tick);
7104 __get_user(host_tx->ppsfreq, &target_tx->ppsfreq);
7105 __get_user(host_tx->jitter, &target_tx->jitter);
7106 __get_user(host_tx->shift, &target_tx->shift);
7107 __get_user(host_tx->stabil, &target_tx->stabil);
7108 __get_user(host_tx->jitcnt, &target_tx->jitcnt);
7109 __get_user(host_tx->calcnt, &target_tx->calcnt);
7110 __get_user(host_tx->errcnt, &target_tx->errcnt);
7111 __get_user(host_tx->stbcnt, &target_tx->stbcnt);
7112 __get_user(host_tx->tai, &target_tx->tai);
7114 unlock_user_struct(target_tx, target_addr, 0);
7115 return 0;
7118 static inline abi_long host_to_target_timex(abi_long target_addr,
7119 struct timex *host_tx)
7121 struct target_timex *target_tx;
7123 if (!lock_user_struct(VERIFY_WRITE, target_tx, target_addr, 0)) {
7124 return -TARGET_EFAULT;
7127 __put_user(host_tx->modes, &target_tx->modes);
7128 __put_user(host_tx->offset, &target_tx->offset);
7129 __put_user(host_tx->freq, &target_tx->freq);
7130 __put_user(host_tx->maxerror, &target_tx->maxerror);
7131 __put_user(host_tx->esterror, &target_tx->esterror);
7132 __put_user(host_tx->status, &target_tx->status);
7133 __put_user(host_tx->constant, &target_tx->constant);
7134 __put_user(host_tx->precision, &target_tx->precision);
7135 __put_user(host_tx->tolerance, &target_tx->tolerance);
7136 __put_user(host_tx->time.tv_sec, &target_tx->time.tv_sec);
7137 __put_user(host_tx->time.tv_usec, &target_tx->time.tv_usec);
7138 __put_user(host_tx->tick, &target_tx->tick);
7139 __put_user(host_tx->ppsfreq, &target_tx->ppsfreq);
7140 __put_user(host_tx->jitter, &target_tx->jitter);
7141 __put_user(host_tx->shift, &target_tx->shift);
7142 __put_user(host_tx->stabil, &target_tx->stabil);
7143 __put_user(host_tx->jitcnt, &target_tx->jitcnt);
7144 __put_user(host_tx->calcnt, &target_tx->calcnt);
7145 __put_user(host_tx->errcnt, &target_tx->errcnt);
7146 __put_user(host_tx->stbcnt, &target_tx->stbcnt);
7147 __put_user(host_tx->tai, &target_tx->tai);
7149 unlock_user_struct(target_tx, target_addr, 1);
7150 return 0;
7154 static inline abi_long target_to_host_sigevent(struct sigevent *host_sevp,
7155 abi_ulong target_addr)
7157 struct target_sigevent *target_sevp;
7159 if (!lock_user_struct(VERIFY_READ, target_sevp, target_addr, 1)) {
7160 return -TARGET_EFAULT;
7163 /* This union is awkward on 64 bit systems because it has a 32 bit
7164 * integer and a pointer in it; we follow the conversion approach
7165 * used for handling sigval types in signal.c so the guest should get
7166 * the correct value back even if we did a 64 bit byteswap and it's
7167 * using the 32 bit integer.
7169 host_sevp->sigev_value.sival_ptr =
7170 (void *)(uintptr_t)tswapal(target_sevp->sigev_value.sival_ptr);
7171 host_sevp->sigev_signo =
7172 target_to_host_signal(tswap32(target_sevp->sigev_signo));
7173 host_sevp->sigev_notify = tswap32(target_sevp->sigev_notify);
7174 host_sevp->_sigev_un._tid = tswap32(target_sevp->_sigev_un._tid);
7176 unlock_user_struct(target_sevp, target_addr, 1);
7177 return 0;
7180 #if defined(TARGET_NR_mlockall)
7181 static inline int target_to_host_mlockall_arg(int arg)
7183 int result = 0;
7185 if (arg & TARGET_MLOCKALL_MCL_CURRENT) {
7186 result |= MCL_CURRENT;
7188 if (arg & TARGET_MLOCKALL_MCL_FUTURE) {
7189 result |= MCL_FUTURE;
7191 return result;
7193 #endif
7195 static inline abi_long host_to_target_stat64(void *cpu_env,
7196 abi_ulong target_addr,
7197 struct stat *host_st)
7199 #if defined(TARGET_ARM) && defined(TARGET_ABI32)
7200 if (((CPUARMState *)cpu_env)->eabi) {
7201 struct target_eabi_stat64 *target_st;
7203 if (!lock_user_struct(VERIFY_WRITE, target_st, target_addr, 0))
7204 return -TARGET_EFAULT;
7205 memset(target_st, 0, sizeof(struct target_eabi_stat64));
7206 __put_user(host_st->st_dev, &target_st->st_dev);
7207 __put_user(host_st->st_ino, &target_st->st_ino);
7208 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
7209 __put_user(host_st->st_ino, &target_st->__st_ino);
7210 #endif
7211 __put_user(host_st->st_mode, &target_st->st_mode);
7212 __put_user(host_st->st_nlink, &target_st->st_nlink);
7213 __put_user(host_st->st_uid, &target_st->st_uid);
7214 __put_user(host_st->st_gid, &target_st->st_gid);
7215 __put_user(host_st->st_rdev, &target_st->st_rdev);
7216 __put_user(host_st->st_size, &target_st->st_size);
7217 __put_user(host_st->st_blksize, &target_st->st_blksize);
7218 __put_user(host_st->st_blocks, &target_st->st_blocks);
7219 __put_user(host_st->st_atime, &target_st->target_st_atime);
7220 __put_user(host_st->st_mtime, &target_st->target_st_mtime);
7221 __put_user(host_st->st_ctime, &target_st->target_st_ctime);
7222 unlock_user_struct(target_st, target_addr, 1);
7223 } else
7224 #endif
7226 #if defined(TARGET_HAS_STRUCT_STAT64)
7227 struct target_stat64 *target_st;
7228 #else
7229 struct target_stat *target_st;
7230 #endif
7232 if (!lock_user_struct(VERIFY_WRITE, target_st, target_addr, 0))
7233 return -TARGET_EFAULT;
7234 memset(target_st, 0, sizeof(*target_st));
7235 __put_user(host_st->st_dev, &target_st->st_dev);
7236 __put_user(host_st->st_ino, &target_st->st_ino);
7237 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
7238 __put_user(host_st->st_ino, &target_st->__st_ino);
7239 #endif
7240 __put_user(host_st->st_mode, &target_st->st_mode);
7241 __put_user(host_st->st_nlink, &target_st->st_nlink);
7242 __put_user(host_st->st_uid, &target_st->st_uid);
7243 __put_user(host_st->st_gid, &target_st->st_gid);
7244 __put_user(host_st->st_rdev, &target_st->st_rdev);
7245 /* XXX: better use of kernel struct */
7246 __put_user(host_st->st_size, &target_st->st_size);
7247 __put_user(host_st->st_blksize, &target_st->st_blksize);
7248 __put_user(host_st->st_blocks, &target_st->st_blocks);
7249 __put_user(host_st->st_atime, &target_st->target_st_atime);
7250 __put_user(host_st->st_mtime, &target_st->target_st_mtime);
7251 __put_user(host_st->st_ctime, &target_st->target_st_ctime);
7252 unlock_user_struct(target_st, target_addr, 1);
7255 return 0;
7258 /* ??? Using host futex calls even when target atomic operations
7259 are not really atomic probably breaks things. However implementing
7260 futexes locally would make futexes shared between multiple processes
7261 tricky. However they're probably useless because guest atomic
7262 operations won't work either. */
7263 static int do_futex(target_ulong uaddr, int op, int val, target_ulong timeout,
7264 target_ulong uaddr2, int val3)
7266 struct timespec ts, *pts;
7267 int base_op;
7269 /* ??? We assume FUTEX_* constants are the same on both host
7270 and target. */
7271 #ifdef FUTEX_CMD_MASK
7272 base_op = op & FUTEX_CMD_MASK;
7273 #else
7274 base_op = op;
7275 #endif
7276 switch (base_op) {
7277 case FUTEX_WAIT:
7278 case FUTEX_WAIT_BITSET:
7279 if (timeout) {
7280 pts = &ts;
7281 target_to_host_timespec(pts, timeout);
7282 } else {
7283 pts = NULL;
7285 return get_errno(safe_futex(g2h(uaddr), op, tswap32(val),
7286 pts, NULL, val3));
7287 case FUTEX_WAKE:
7288 return get_errno(safe_futex(g2h(uaddr), op, val, NULL, NULL, 0));
7289 case FUTEX_FD:
7290 return get_errno(safe_futex(g2h(uaddr), op, val, NULL, NULL, 0));
7291 case FUTEX_REQUEUE:
7292 case FUTEX_CMP_REQUEUE:
7293 case FUTEX_WAKE_OP:
7294 /* For FUTEX_REQUEUE, FUTEX_CMP_REQUEUE, and FUTEX_WAKE_OP, the
7295 TIMEOUT parameter is interpreted as a uint32_t by the kernel.
7296 But the prototype takes a `struct timespec *'; insert casts
7297 to satisfy the compiler. We do not need to tswap TIMEOUT
7298 since it's not compared to guest memory. */
7299 pts = (struct timespec *)(uintptr_t) timeout;
7300 return get_errno(safe_futex(g2h(uaddr), op, val, pts,
7301 g2h(uaddr2),
7302 (base_op == FUTEX_CMP_REQUEUE
7303 ? tswap32(val3)
7304 : val3)));
7305 default:
7306 return -TARGET_ENOSYS;
7309 #if defined(TARGET_NR_name_to_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
7310 static abi_long do_name_to_handle_at(abi_long dirfd, abi_long pathname,
7311 abi_long handle, abi_long mount_id,
7312 abi_long flags)
7314 struct file_handle *target_fh;
7315 struct file_handle *fh;
7316 int mid = 0;
7317 abi_long ret;
7318 char *name;
7319 unsigned int size, total_size;
7321 if (get_user_s32(size, handle)) {
7322 return -TARGET_EFAULT;
7325 name = lock_user_string(pathname);
7326 if (!name) {
7327 return -TARGET_EFAULT;
7330 total_size = sizeof(struct file_handle) + size;
7331 target_fh = lock_user(VERIFY_WRITE, handle, total_size, 0);
7332 if (!target_fh) {
7333 unlock_user(name, pathname, 0);
7334 return -TARGET_EFAULT;
7337 fh = g_malloc0(total_size);
7338 fh->handle_bytes = size;
7340 ret = get_errno(name_to_handle_at(dirfd, path(name), fh, &mid, flags));
7341 unlock_user(name, pathname, 0);
7343 /* man name_to_handle_at(2):
7344 * Other than the use of the handle_bytes field, the caller should treat
7345 * the file_handle structure as an opaque data type
7348 memcpy(target_fh, fh, total_size);
7349 target_fh->handle_bytes = tswap32(fh->handle_bytes);
7350 target_fh->handle_type = tswap32(fh->handle_type);
7351 g_free(fh);
7352 unlock_user(target_fh, handle, total_size);
7354 if (put_user_s32(mid, mount_id)) {
7355 return -TARGET_EFAULT;
7358 return ret;
7361 #endif
7363 #if defined(TARGET_NR_open_by_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
7364 static abi_long do_open_by_handle_at(abi_long mount_fd, abi_long handle,
7365 abi_long flags)
7367 struct file_handle *target_fh;
7368 struct file_handle *fh;
7369 unsigned int size, total_size;
7370 abi_long ret;
7372 if (get_user_s32(size, handle)) {
7373 return -TARGET_EFAULT;
7376 total_size = sizeof(struct file_handle) + size;
7377 target_fh = lock_user(VERIFY_READ, handle, total_size, 1);
7378 if (!target_fh) {
7379 return -TARGET_EFAULT;
7382 fh = g_memdup(target_fh, total_size);
7383 fh->handle_bytes = size;
7384 fh->handle_type = tswap32(target_fh->handle_type);
7386 ret = get_errno(open_by_handle_at(mount_fd, fh,
7387 target_to_host_bitmask(flags, fcntl_flags_tbl)));
7389 g_free(fh);
7391 unlock_user(target_fh, handle, total_size);
7393 return ret;
7395 #endif
7397 #if defined(TARGET_NR_signalfd) || defined(TARGET_NR_signalfd4)
7399 /* signalfd siginfo conversion */
7401 static void
7402 host_to_target_signalfd_siginfo(struct signalfd_siginfo *tinfo,
7403 const struct signalfd_siginfo *info)
7405 int sig = host_to_target_signal(info->ssi_signo);
7407 /* linux/signalfd.h defines a ssi_addr_lsb
7408 * not defined in sys/signalfd.h but used by some kernels
7411 #ifdef BUS_MCEERR_AO
7412 if (tinfo->ssi_signo == SIGBUS &&
7413 (tinfo->ssi_code == BUS_MCEERR_AR ||
7414 tinfo->ssi_code == BUS_MCEERR_AO)) {
7415 uint16_t *ssi_addr_lsb = (uint16_t *)(&info->ssi_addr + 1);
7416 uint16_t *tssi_addr_lsb = (uint16_t *)(&tinfo->ssi_addr + 1);
7417 *tssi_addr_lsb = tswap16(*ssi_addr_lsb);
7419 #endif
7421 tinfo->ssi_signo = tswap32(sig);
7422 tinfo->ssi_errno = tswap32(tinfo->ssi_errno);
7423 tinfo->ssi_code = tswap32(info->ssi_code);
7424 tinfo->ssi_pid = tswap32(info->ssi_pid);
7425 tinfo->ssi_uid = tswap32(info->ssi_uid);
7426 tinfo->ssi_fd = tswap32(info->ssi_fd);
7427 tinfo->ssi_tid = tswap32(info->ssi_tid);
7428 tinfo->ssi_band = tswap32(info->ssi_band);
7429 tinfo->ssi_overrun = tswap32(info->ssi_overrun);
7430 tinfo->ssi_trapno = tswap32(info->ssi_trapno);
7431 tinfo->ssi_status = tswap32(info->ssi_status);
7432 tinfo->ssi_int = tswap32(info->ssi_int);
7433 tinfo->ssi_ptr = tswap64(info->ssi_ptr);
7434 tinfo->ssi_utime = tswap64(info->ssi_utime);
7435 tinfo->ssi_stime = tswap64(info->ssi_stime);
7436 tinfo->ssi_addr = tswap64(info->ssi_addr);
7439 static abi_long host_to_target_data_signalfd(void *buf, size_t len)
7441 int i;
7443 for (i = 0; i < len; i += sizeof(struct signalfd_siginfo)) {
7444 host_to_target_signalfd_siginfo(buf + i, buf + i);
7447 return len;
7450 static TargetFdTrans target_signalfd_trans = {
7451 .host_to_target_data = host_to_target_data_signalfd,
7454 static abi_long do_signalfd4(int fd, abi_long mask, int flags)
7456 int host_flags;
7457 target_sigset_t *target_mask;
7458 sigset_t host_mask;
7459 abi_long ret;
7461 if (flags & ~(TARGET_O_NONBLOCK | TARGET_O_CLOEXEC)) {
7462 return -TARGET_EINVAL;
7464 if (!lock_user_struct(VERIFY_READ, target_mask, mask, 1)) {
7465 return -TARGET_EFAULT;
7468 target_to_host_sigset(&host_mask, target_mask);
7470 host_flags = target_to_host_bitmask(flags, fcntl_flags_tbl);
7472 ret = get_errno(signalfd(fd, &host_mask, host_flags));
7473 if (ret >= 0) {
7474 fd_trans_register(ret, &target_signalfd_trans);
7477 unlock_user_struct(target_mask, mask, 0);
7479 return ret;
7481 #endif
7483 /* Map host to target signal numbers for the wait family of syscalls.
7484 Assume all other status bits are the same. */
7485 int host_to_target_waitstatus(int status)
7487 if (WIFSIGNALED(status)) {
7488 return host_to_target_signal(WTERMSIG(status)) | (status & ~0x7f);
7490 if (WIFSTOPPED(status)) {
7491 return (host_to_target_signal(WSTOPSIG(status)) << 8)
7492 | (status & 0xff);
7494 return status;
7497 static int open_self_cmdline(void *cpu_env, int fd)
7499 CPUState *cpu = ENV_GET_CPU((CPUArchState *)cpu_env);
7500 struct linux_binprm *bprm = ((TaskState *)cpu->opaque)->bprm;
7501 int i;
7503 for (i = 0; i < bprm->argc; i++) {
7504 size_t len = strlen(bprm->argv[i]) + 1;
7506 if (write(fd, bprm->argv[i], len) != len) {
7507 return -1;
7511 return 0;
7514 static int open_self_maps(void *cpu_env, int fd)
7516 CPUState *cpu = ENV_GET_CPU((CPUArchState *)cpu_env);
7517 TaskState *ts = cpu->opaque;
7518 FILE *fp;
7519 char *line = NULL;
7520 size_t len = 0;
7521 ssize_t read;
7523 fp = fopen("/proc/self/maps", "r");
7524 if (fp == NULL) {
7525 return -1;
7528 while ((read = getline(&line, &len, fp)) != -1) {
7529 int fields, dev_maj, dev_min, inode;
7530 uint64_t min, max, offset;
7531 char flag_r, flag_w, flag_x, flag_p;
7532 char path[512] = "";
7533 fields = sscanf(line, "%"PRIx64"-%"PRIx64" %c%c%c%c %"PRIx64" %x:%x %d"
7534 " %512s", &min, &max, &flag_r, &flag_w, &flag_x,
7535 &flag_p, &offset, &dev_maj, &dev_min, &inode, path);
7537 if ((fields < 10) || (fields > 11)) {
7538 continue;
7540 if (h2g_valid(min)) {
7541 int flags = page_get_flags(h2g(min));
7542 max = h2g_valid(max - 1) ? max : (uintptr_t)g2h(GUEST_ADDR_MAX) + 1;
7543 if (page_check_range(h2g(min), max - min, flags) == -1) {
7544 continue;
7546 if (h2g(min) == ts->info->stack_limit) {
7547 pstrcpy(path, sizeof(path), " [stack]");
7549 dprintf(fd, TARGET_ABI_FMT_lx "-" TARGET_ABI_FMT_lx
7550 " %c%c%c%c %08" PRIx64 " %02x:%02x %d %s%s\n",
7551 h2g(min), h2g(max - 1) + 1, flag_r, flag_w,
7552 flag_x, flag_p, offset, dev_maj, dev_min, inode,
7553 path[0] ? " " : "", path);
7557 free(line);
7558 fclose(fp);
7560 return 0;
7563 static int open_self_stat(void *cpu_env, int fd)
7565 CPUState *cpu = ENV_GET_CPU((CPUArchState *)cpu_env);
7566 TaskState *ts = cpu->opaque;
7567 abi_ulong start_stack = ts->info->start_stack;
7568 int i;
7570 for (i = 0; i < 44; i++) {
7571 char buf[128];
7572 int len;
7573 uint64_t val = 0;
7575 if (i == 0) {
7576 /* pid */
7577 val = getpid();
7578 snprintf(buf, sizeof(buf), "%"PRId64 " ", val);
7579 } else if (i == 1) {
7580 /* app name */
7581 snprintf(buf, sizeof(buf), "(%s) ", ts->bprm->argv[0]);
7582 } else if (i == 27) {
7583 /* stack bottom */
7584 val = start_stack;
7585 snprintf(buf, sizeof(buf), "%"PRId64 " ", val);
7586 } else {
7587 /* for the rest, there is MasterCard */
7588 snprintf(buf, sizeof(buf), "0%c", i == 43 ? '\n' : ' ');
7591 len = strlen(buf);
7592 if (write(fd, buf, len) != len) {
7593 return -1;
7597 return 0;
7600 static int open_self_auxv(void *cpu_env, int fd)
7602 CPUState *cpu = ENV_GET_CPU((CPUArchState *)cpu_env);
7603 TaskState *ts = cpu->opaque;
7604 abi_ulong auxv = ts->info->saved_auxv;
7605 abi_ulong len = ts->info->auxv_len;
7606 char *ptr;
7609 * Auxiliary vector is stored in target process stack.
7610 * read in whole auxv vector and copy it to file
7612 ptr = lock_user(VERIFY_READ, auxv, len, 0);
7613 if (ptr != NULL) {
7614 while (len > 0) {
7615 ssize_t r;
7616 r = write(fd, ptr, len);
7617 if (r <= 0) {
7618 break;
7620 len -= r;
7621 ptr += r;
7623 lseek(fd, 0, SEEK_SET);
7624 unlock_user(ptr, auxv, len);
7627 return 0;
7630 static int is_proc_myself(const char *filename, const char *entry)
7632 if (!strncmp(filename, "/proc/", strlen("/proc/"))) {
7633 filename += strlen("/proc/");
7634 if (!strncmp(filename, "self/", strlen("self/"))) {
7635 filename += strlen("self/");
7636 } else if (*filename >= '1' && *filename <= '9') {
7637 char myself[80];
7638 snprintf(myself, sizeof(myself), "%d/", getpid());
7639 if (!strncmp(filename, myself, strlen(myself))) {
7640 filename += strlen(myself);
7641 } else {
7642 return 0;
7644 } else {
7645 return 0;
7647 if (!strcmp(filename, entry)) {
7648 return 1;
7651 return 0;
7654 #if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN)
7655 static int is_proc(const char *filename, const char *entry)
7657 return strcmp(filename, entry) == 0;
7660 static int open_net_route(void *cpu_env, int fd)
7662 FILE *fp;
7663 char *line = NULL;
7664 size_t len = 0;
7665 ssize_t read;
7667 fp = fopen("/proc/net/route", "r");
7668 if (fp == NULL) {
7669 return -1;
7672 /* read header */
7674 read = getline(&line, &len, fp);
7675 dprintf(fd, "%s", line);
7677 /* read routes */
7679 while ((read = getline(&line, &len, fp)) != -1) {
7680 char iface[16];
7681 uint32_t dest, gw, mask;
7682 unsigned int flags, refcnt, use, metric, mtu, window, irtt;
7683 sscanf(line, "%s\t%08x\t%08x\t%04x\t%d\t%d\t%d\t%08x\t%d\t%u\t%u\n",
7684 iface, &dest, &gw, &flags, &refcnt, &use, &metric,
7685 &mask, &mtu, &window, &irtt);
7686 dprintf(fd, "%s\t%08x\t%08x\t%04x\t%d\t%d\t%d\t%08x\t%d\t%u\t%u\n",
7687 iface, tswap32(dest), tswap32(gw), flags, refcnt, use,
7688 metric, tswap32(mask), mtu, window, irtt);
7691 free(line);
7692 fclose(fp);
7694 return 0;
7696 #endif
7698 static int do_openat(void *cpu_env, int dirfd, const char *pathname, int flags, mode_t mode)
7700 struct fake_open {
7701 const char *filename;
7702 int (*fill)(void *cpu_env, int fd);
7703 int (*cmp)(const char *s1, const char *s2);
7705 const struct fake_open *fake_open;
7706 static const struct fake_open fakes[] = {
7707 { "maps", open_self_maps, is_proc_myself },
7708 { "stat", open_self_stat, is_proc_myself },
7709 { "auxv", open_self_auxv, is_proc_myself },
7710 { "cmdline", open_self_cmdline, is_proc_myself },
7711 #if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN)
7712 { "/proc/net/route", open_net_route, is_proc },
7713 #endif
7714 { NULL, NULL, NULL }
7717 if (is_proc_myself(pathname, "exe")) {
7718 int execfd = qemu_getauxval(AT_EXECFD);
7719 return execfd ? execfd : safe_openat(dirfd, exec_path, flags, mode);
7722 for (fake_open = fakes; fake_open->filename; fake_open++) {
7723 if (fake_open->cmp(pathname, fake_open->filename)) {
7724 break;
7728 if (fake_open->filename) {
7729 const char *tmpdir;
7730 char filename[PATH_MAX];
7731 int fd, r;
7733 /* create temporary file to map stat to */
7734 tmpdir = getenv("TMPDIR");
7735 if (!tmpdir)
7736 tmpdir = "/tmp";
7737 snprintf(filename, sizeof(filename), "%s/qemu-open.XXXXXX", tmpdir);
7738 fd = mkstemp(filename);
7739 if (fd < 0) {
7740 return fd;
7742 unlink(filename);
7744 if ((r = fake_open->fill(cpu_env, fd))) {
7745 int e = errno;
7746 close(fd);
7747 errno = e;
7748 return r;
7750 lseek(fd, 0, SEEK_SET);
7752 return fd;
7755 return safe_openat(dirfd, path(pathname), flags, mode);
7758 #define TIMER_MAGIC 0x0caf0000
7759 #define TIMER_MAGIC_MASK 0xffff0000
7761 /* Convert QEMU provided timer ID back to internal 16bit index format */
7762 static target_timer_t get_timer_id(abi_long arg)
7764 target_timer_t timerid = arg;
7766 if ((timerid & TIMER_MAGIC_MASK) != TIMER_MAGIC) {
7767 return -TARGET_EINVAL;
7770 timerid &= 0xffff;
7772 if (timerid >= ARRAY_SIZE(g_posix_timers)) {
7773 return -TARGET_EINVAL;
7776 return timerid;
7779 static abi_long swap_data_eventfd(void *buf, size_t len)
7781 uint64_t *counter = buf;
7782 int i;
7784 if (len < sizeof(uint64_t)) {
7785 return -EINVAL;
7788 for (i = 0; i < len; i += sizeof(uint64_t)) {
7789 *counter = tswap64(*counter);
7790 counter++;
7793 return len;
7796 static TargetFdTrans target_eventfd_trans = {
7797 .host_to_target_data = swap_data_eventfd,
7798 .target_to_host_data = swap_data_eventfd,
7801 #if (defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init)) || \
7802 (defined(CONFIG_INOTIFY1) && defined(TARGET_NR_inotify_init1) && \
7803 defined(__NR_inotify_init1))
7804 static abi_long host_to_target_data_inotify(void *buf, size_t len)
7806 struct inotify_event *ev;
7807 int i;
7808 uint32_t name_len;
7810 for (i = 0; i < len; i += sizeof(struct inotify_event) + name_len) {
7811 ev = (struct inotify_event *)((char *)buf + i);
7812 name_len = ev->len;
7814 ev->wd = tswap32(ev->wd);
7815 ev->mask = tswap32(ev->mask);
7816 ev->cookie = tswap32(ev->cookie);
7817 ev->len = tswap32(name_len);
7820 return len;
7823 static TargetFdTrans target_inotify_trans = {
7824 .host_to_target_data = host_to_target_data_inotify,
7826 #endif
7828 static int target_to_host_cpu_mask(unsigned long *host_mask,
7829 size_t host_size,
7830 abi_ulong target_addr,
7831 size_t target_size)
7833 unsigned target_bits = sizeof(abi_ulong) * 8;
7834 unsigned host_bits = sizeof(*host_mask) * 8;
7835 abi_ulong *target_mask;
7836 unsigned i, j;
7838 assert(host_size >= target_size);
7840 target_mask = lock_user(VERIFY_READ, target_addr, target_size, 1);
7841 if (!target_mask) {
7842 return -TARGET_EFAULT;
7844 memset(host_mask, 0, host_size);
7846 for (i = 0 ; i < target_size / sizeof(abi_ulong); i++) {
7847 unsigned bit = i * target_bits;
7848 abi_ulong val;
7850 __get_user(val, &target_mask[i]);
7851 for (j = 0; j < target_bits; j++, bit++) {
7852 if (val & (1UL << j)) {
7853 host_mask[bit / host_bits] |= 1UL << (bit % host_bits);
7858 unlock_user(target_mask, target_addr, 0);
7859 return 0;
7862 static int host_to_target_cpu_mask(const unsigned long *host_mask,
7863 size_t host_size,
7864 abi_ulong target_addr,
7865 size_t target_size)
7867 unsigned target_bits = sizeof(abi_ulong) * 8;
7868 unsigned host_bits = sizeof(*host_mask) * 8;
7869 abi_ulong *target_mask;
7870 unsigned i, j;
7872 assert(host_size >= target_size);
7874 target_mask = lock_user(VERIFY_WRITE, target_addr, target_size, 0);
7875 if (!target_mask) {
7876 return -TARGET_EFAULT;
7879 for (i = 0 ; i < target_size / sizeof(abi_ulong); i++) {
7880 unsigned bit = i * target_bits;
7881 abi_ulong val = 0;
7883 for (j = 0; j < target_bits; j++, bit++) {
7884 if (host_mask[bit / host_bits] & (1UL << (bit % host_bits))) {
7885 val |= 1UL << j;
7888 __put_user(val, &target_mask[i]);
7891 unlock_user(target_mask, target_addr, target_size);
7892 return 0;
7895 /* do_syscall() should always have a single exit point at the end so
7896 that actions, such as logging of syscall results, can be performed.
7897 All errnos that do_syscall() returns must be -TARGET_<errcode>. */
7898 abi_long do_syscall(void *cpu_env, int num, abi_long arg1,
7899 abi_long arg2, abi_long arg3, abi_long arg4,
7900 abi_long arg5, abi_long arg6, abi_long arg7,
7901 abi_long arg8)
7903 CPUState *cpu = ENV_GET_CPU(cpu_env);
7904 abi_long ret;
7905 struct stat st;
7906 struct statfs stfs;
7907 void *p;
7909 #if defined(DEBUG_ERESTARTSYS)
7910 /* Debug-only code for exercising the syscall-restart code paths
7911 * in the per-architecture cpu main loops: restart every syscall
7912 * the guest makes once before letting it through.
7915 static int flag;
7917 flag = !flag;
7918 if (flag) {
7919 return -TARGET_ERESTARTSYS;
7922 #endif
7924 #ifdef DEBUG
7925 gemu_log("syscall %d", num);
7926 #endif
7927 trace_guest_user_syscall(cpu, num, arg1, arg2, arg3, arg4, arg5, arg6, arg7, arg8);
7928 if(do_strace)
7929 print_syscall(num, arg1, arg2, arg3, arg4, arg5, arg6);
7931 switch(num) {
7932 case TARGET_NR_exit:
7933 /* In old applications this may be used to implement _exit(2).
7934 However in threaded applictions it is used for thread termination,
7935 and _exit_group is used for application termination.
7936 Do thread termination if we have more then one thread. */
7938 if (block_signals()) {
7939 ret = -TARGET_ERESTARTSYS;
7940 break;
7943 cpu_list_lock();
7945 if (CPU_NEXT(first_cpu)) {
7946 TaskState *ts;
7948 /* Remove the CPU from the list. */
7949 QTAILQ_REMOVE(&cpus, cpu, node);
7951 cpu_list_unlock();
7953 ts = cpu->opaque;
7954 if (ts->child_tidptr) {
7955 put_user_u32(0, ts->child_tidptr);
7956 sys_futex(g2h(ts->child_tidptr), FUTEX_WAKE, INT_MAX,
7957 NULL, NULL, 0);
7959 thread_cpu = NULL;
7960 object_unref(OBJECT(cpu));
7961 g_free(ts);
7962 rcu_unregister_thread();
7963 pthread_exit(NULL);
7966 cpu_list_unlock();
7967 #ifdef TARGET_GPROF
7968 _mcleanup();
7969 #endif
7970 gdb_exit(cpu_env, arg1);
7971 _exit(arg1);
7972 ret = 0; /* avoid warning */
7973 break;
7974 case TARGET_NR_read:
7975 if (arg3 == 0)
7976 ret = 0;
7977 else {
7978 if (!(p = lock_user(VERIFY_WRITE, arg2, arg3, 0)))
7979 goto efault;
7980 ret = get_errno(safe_read(arg1, p, arg3));
7981 if (ret >= 0 &&
7982 fd_trans_host_to_target_data(arg1)) {
7983 ret = fd_trans_host_to_target_data(arg1)(p, ret);
7985 unlock_user(p, arg2, ret);
7987 break;
7988 case TARGET_NR_write:
7989 if (!(p = lock_user(VERIFY_READ, arg2, arg3, 1)))
7990 goto efault;
7991 if (fd_trans_target_to_host_data(arg1)) {
7992 void *copy = g_malloc(arg3);
7993 memcpy(copy, p, arg3);
7994 ret = fd_trans_target_to_host_data(arg1)(copy, arg3);
7995 if (ret >= 0) {
7996 ret = get_errno(safe_write(arg1, copy, ret));
7998 g_free(copy);
7999 } else {
8000 ret = get_errno(safe_write(arg1, p, arg3));
8002 unlock_user(p, arg2, 0);
8003 break;
8004 #ifdef TARGET_NR_open
8005 case TARGET_NR_open:
8006 if (!(p = lock_user_string(arg1)))
8007 goto efault;
8008 ret = get_errno(do_openat(cpu_env, AT_FDCWD, p,
8009 target_to_host_bitmask(arg2, fcntl_flags_tbl),
8010 arg3));
8011 fd_trans_unregister(ret);
8012 unlock_user(p, arg1, 0);
8013 break;
8014 #endif
8015 case TARGET_NR_openat:
8016 if (!(p = lock_user_string(arg2)))
8017 goto efault;
8018 ret = get_errno(do_openat(cpu_env, arg1, p,
8019 target_to_host_bitmask(arg3, fcntl_flags_tbl),
8020 arg4));
8021 fd_trans_unregister(ret);
8022 unlock_user(p, arg2, 0);
8023 break;
8024 #if defined(TARGET_NR_name_to_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
8025 case TARGET_NR_name_to_handle_at:
8026 ret = do_name_to_handle_at(arg1, arg2, arg3, arg4, arg5);
8027 break;
8028 #endif
8029 #if defined(TARGET_NR_open_by_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
8030 case TARGET_NR_open_by_handle_at:
8031 ret = do_open_by_handle_at(arg1, arg2, arg3);
8032 fd_trans_unregister(ret);
8033 break;
8034 #endif
8035 case TARGET_NR_close:
8036 fd_trans_unregister(arg1);
8037 ret = get_errno(close(arg1));
8038 break;
8039 case TARGET_NR_brk:
8040 ret = do_brk(arg1);
8041 break;
8042 #ifdef TARGET_NR_fork
8043 case TARGET_NR_fork:
8044 ret = get_errno(do_fork(cpu_env, TARGET_SIGCHLD, 0, 0, 0, 0));
8045 break;
8046 #endif
8047 #ifdef TARGET_NR_waitpid
8048 case TARGET_NR_waitpid:
8050 int status;
8051 ret = get_errno(safe_wait4(arg1, &status, arg3, 0));
8052 if (!is_error(ret) && arg2 && ret
8053 && put_user_s32(host_to_target_waitstatus(status), arg2))
8054 goto efault;
8056 break;
8057 #endif
8058 #ifdef TARGET_NR_waitid
8059 case TARGET_NR_waitid:
8061 siginfo_t info;
8062 info.si_pid = 0;
8063 ret = get_errno(safe_waitid(arg1, arg2, &info, arg4, NULL));
8064 if (!is_error(ret) && arg3 && info.si_pid != 0) {
8065 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_siginfo_t), 0)))
8066 goto efault;
8067 host_to_target_siginfo(p, &info);
8068 unlock_user(p, arg3, sizeof(target_siginfo_t));
8071 break;
8072 #endif
8073 #ifdef TARGET_NR_creat /* not on alpha */
8074 case TARGET_NR_creat:
8075 if (!(p = lock_user_string(arg1)))
8076 goto efault;
8077 ret = get_errno(creat(p, arg2));
8078 fd_trans_unregister(ret);
8079 unlock_user(p, arg1, 0);
8080 break;
8081 #endif
8082 #ifdef TARGET_NR_link
8083 case TARGET_NR_link:
8085 void * p2;
8086 p = lock_user_string(arg1);
8087 p2 = lock_user_string(arg2);
8088 if (!p || !p2)
8089 ret = -TARGET_EFAULT;
8090 else
8091 ret = get_errno(link(p, p2));
8092 unlock_user(p2, arg2, 0);
8093 unlock_user(p, arg1, 0);
8095 break;
8096 #endif
8097 #if defined(TARGET_NR_linkat)
8098 case TARGET_NR_linkat:
8100 void * p2 = NULL;
8101 if (!arg2 || !arg4)
8102 goto efault;
8103 p = lock_user_string(arg2);
8104 p2 = lock_user_string(arg4);
8105 if (!p || !p2)
8106 ret = -TARGET_EFAULT;
8107 else
8108 ret = get_errno(linkat(arg1, p, arg3, p2, arg5));
8109 unlock_user(p, arg2, 0);
8110 unlock_user(p2, arg4, 0);
8112 break;
8113 #endif
8114 #ifdef TARGET_NR_unlink
8115 case TARGET_NR_unlink:
8116 if (!(p = lock_user_string(arg1)))
8117 goto efault;
8118 ret = get_errno(unlink(p));
8119 unlock_user(p, arg1, 0);
8120 break;
8121 #endif
8122 #if defined(TARGET_NR_unlinkat)
8123 case TARGET_NR_unlinkat:
8124 if (!(p = lock_user_string(arg2)))
8125 goto efault;
8126 ret = get_errno(unlinkat(arg1, p, arg3));
8127 unlock_user(p, arg2, 0);
8128 break;
8129 #endif
8130 case TARGET_NR_execve:
8132 char **argp, **envp;
8133 int argc, envc;
8134 abi_ulong gp;
8135 abi_ulong guest_argp;
8136 abi_ulong guest_envp;
8137 abi_ulong addr;
8138 char **q;
8139 int total_size = 0;
8141 argc = 0;
8142 guest_argp = arg2;
8143 for (gp = guest_argp; gp; gp += sizeof(abi_ulong)) {
8144 if (get_user_ual(addr, gp))
8145 goto efault;
8146 if (!addr)
8147 break;
8148 argc++;
8150 envc = 0;
8151 guest_envp = arg3;
8152 for (gp = guest_envp; gp; gp += sizeof(abi_ulong)) {
8153 if (get_user_ual(addr, gp))
8154 goto efault;
8155 if (!addr)
8156 break;
8157 envc++;
8160 argp = g_new0(char *, argc + 1);
8161 envp = g_new0(char *, envc + 1);
8163 for (gp = guest_argp, q = argp; gp;
8164 gp += sizeof(abi_ulong), q++) {
8165 if (get_user_ual(addr, gp))
8166 goto execve_efault;
8167 if (!addr)
8168 break;
8169 if (!(*q = lock_user_string(addr)))
8170 goto execve_efault;
8171 total_size += strlen(*q) + 1;
8173 *q = NULL;
8175 for (gp = guest_envp, q = envp; gp;
8176 gp += sizeof(abi_ulong), q++) {
8177 if (get_user_ual(addr, gp))
8178 goto execve_efault;
8179 if (!addr)
8180 break;
8181 if (!(*q = lock_user_string(addr)))
8182 goto execve_efault;
8183 total_size += strlen(*q) + 1;
8185 *q = NULL;
8187 if (!(p = lock_user_string(arg1)))
8188 goto execve_efault;
8189 /* Although execve() is not an interruptible syscall it is
8190 * a special case where we must use the safe_syscall wrapper:
8191 * if we allow a signal to happen before we make the host
8192 * syscall then we will 'lose' it, because at the point of
8193 * execve the process leaves QEMU's control. So we use the
8194 * safe syscall wrapper to ensure that we either take the
8195 * signal as a guest signal, or else it does not happen
8196 * before the execve completes and makes it the other
8197 * program's problem.
8199 ret = get_errno(safe_execve(p, argp, envp));
8200 unlock_user(p, arg1, 0);
8202 goto execve_end;
8204 execve_efault:
8205 ret = -TARGET_EFAULT;
8207 execve_end:
8208 for (gp = guest_argp, q = argp; *q;
8209 gp += sizeof(abi_ulong), q++) {
8210 if (get_user_ual(addr, gp)
8211 || !addr)
8212 break;
8213 unlock_user(*q, addr, 0);
8215 for (gp = guest_envp, q = envp; *q;
8216 gp += sizeof(abi_ulong), q++) {
8217 if (get_user_ual(addr, gp)
8218 || !addr)
8219 break;
8220 unlock_user(*q, addr, 0);
8223 g_free(argp);
8224 g_free(envp);
8226 break;
8227 case TARGET_NR_chdir:
8228 if (!(p = lock_user_string(arg1)))
8229 goto efault;
8230 ret = get_errno(chdir(p));
8231 unlock_user(p, arg1, 0);
8232 break;
8233 #ifdef TARGET_NR_time
8234 case TARGET_NR_time:
8236 time_t host_time;
8237 ret = get_errno(time(&host_time));
8238 if (!is_error(ret)
8239 && arg1
8240 && put_user_sal(host_time, arg1))
8241 goto efault;
8243 break;
8244 #endif
8245 #ifdef TARGET_NR_mknod
8246 case TARGET_NR_mknod:
8247 if (!(p = lock_user_string(arg1)))
8248 goto efault;
8249 ret = get_errno(mknod(p, arg2, arg3));
8250 unlock_user(p, arg1, 0);
8251 break;
8252 #endif
8253 #if defined(TARGET_NR_mknodat)
8254 case TARGET_NR_mknodat:
8255 if (!(p = lock_user_string(arg2)))
8256 goto efault;
8257 ret = get_errno(mknodat(arg1, p, arg3, arg4));
8258 unlock_user(p, arg2, 0);
8259 break;
8260 #endif
8261 #ifdef TARGET_NR_chmod
8262 case TARGET_NR_chmod:
8263 if (!(p = lock_user_string(arg1)))
8264 goto efault;
8265 ret = get_errno(chmod(p, arg2));
8266 unlock_user(p, arg1, 0);
8267 break;
8268 #endif
8269 #ifdef TARGET_NR_break
8270 case TARGET_NR_break:
8271 goto unimplemented;
8272 #endif
8273 #ifdef TARGET_NR_oldstat
8274 case TARGET_NR_oldstat:
8275 goto unimplemented;
8276 #endif
8277 case TARGET_NR_lseek:
8278 ret = get_errno(lseek(arg1, arg2, arg3));
8279 break;
8280 #if defined(TARGET_NR_getxpid) && defined(TARGET_ALPHA)
8281 /* Alpha specific */
8282 case TARGET_NR_getxpid:
8283 ((CPUAlphaState *)cpu_env)->ir[IR_A4] = getppid();
8284 ret = get_errno(getpid());
8285 break;
8286 #endif
8287 #ifdef TARGET_NR_getpid
8288 case TARGET_NR_getpid:
8289 ret = get_errno(getpid());
8290 break;
8291 #endif
8292 case TARGET_NR_mount:
8294 /* need to look at the data field */
8295 void *p2, *p3;
8297 if (arg1) {
8298 p = lock_user_string(arg1);
8299 if (!p) {
8300 goto efault;
8302 } else {
8303 p = NULL;
8306 p2 = lock_user_string(arg2);
8307 if (!p2) {
8308 if (arg1) {
8309 unlock_user(p, arg1, 0);
8311 goto efault;
8314 if (arg3) {
8315 p3 = lock_user_string(arg3);
8316 if (!p3) {
8317 if (arg1) {
8318 unlock_user(p, arg1, 0);
8320 unlock_user(p2, arg2, 0);
8321 goto efault;
8323 } else {
8324 p3 = NULL;
8327 /* FIXME - arg5 should be locked, but it isn't clear how to
8328 * do that since it's not guaranteed to be a NULL-terminated
8329 * string.
8331 if (!arg5) {
8332 ret = mount(p, p2, p3, (unsigned long)arg4, NULL);
8333 } else {
8334 ret = mount(p, p2, p3, (unsigned long)arg4, g2h(arg5));
8336 ret = get_errno(ret);
8338 if (arg1) {
8339 unlock_user(p, arg1, 0);
8341 unlock_user(p2, arg2, 0);
8342 if (arg3) {
8343 unlock_user(p3, arg3, 0);
8346 break;
8347 #ifdef TARGET_NR_umount
8348 case TARGET_NR_umount:
8349 if (!(p = lock_user_string(arg1)))
8350 goto efault;
8351 ret = get_errno(umount(p));
8352 unlock_user(p, arg1, 0);
8353 break;
8354 #endif
8355 #ifdef TARGET_NR_stime /* not on alpha */
8356 case TARGET_NR_stime:
8358 time_t host_time;
8359 if (get_user_sal(host_time, arg1))
8360 goto efault;
8361 ret = get_errno(stime(&host_time));
8363 break;
8364 #endif
8365 case TARGET_NR_ptrace:
8366 goto unimplemented;
8367 #ifdef TARGET_NR_alarm /* not on alpha */
8368 case TARGET_NR_alarm:
8369 ret = alarm(arg1);
8370 break;
8371 #endif
8372 #ifdef TARGET_NR_oldfstat
8373 case TARGET_NR_oldfstat:
8374 goto unimplemented;
8375 #endif
8376 #ifdef TARGET_NR_pause /* not on alpha */
8377 case TARGET_NR_pause:
8378 if (!block_signals()) {
8379 sigsuspend(&((TaskState *)cpu->opaque)->signal_mask);
8381 ret = -TARGET_EINTR;
8382 break;
8383 #endif
8384 #ifdef TARGET_NR_utime
8385 case TARGET_NR_utime:
8387 struct utimbuf tbuf, *host_tbuf;
8388 struct target_utimbuf *target_tbuf;
8389 if (arg2) {
8390 if (!lock_user_struct(VERIFY_READ, target_tbuf, arg2, 1))
8391 goto efault;
8392 tbuf.actime = tswapal(target_tbuf->actime);
8393 tbuf.modtime = tswapal(target_tbuf->modtime);
8394 unlock_user_struct(target_tbuf, arg2, 0);
8395 host_tbuf = &tbuf;
8396 } else {
8397 host_tbuf = NULL;
8399 if (!(p = lock_user_string(arg1)))
8400 goto efault;
8401 ret = get_errno(utime(p, host_tbuf));
8402 unlock_user(p, arg1, 0);
8404 break;
8405 #endif
8406 #ifdef TARGET_NR_utimes
8407 case TARGET_NR_utimes:
8409 struct timeval *tvp, tv[2];
8410 if (arg2) {
8411 if (copy_from_user_timeval(&tv[0], arg2)
8412 || copy_from_user_timeval(&tv[1],
8413 arg2 + sizeof(struct target_timeval)))
8414 goto efault;
8415 tvp = tv;
8416 } else {
8417 tvp = NULL;
8419 if (!(p = lock_user_string(arg1)))
8420 goto efault;
8421 ret = get_errno(utimes(p, tvp));
8422 unlock_user(p, arg1, 0);
8424 break;
8425 #endif
8426 #if defined(TARGET_NR_futimesat)
8427 case TARGET_NR_futimesat:
8429 struct timeval *tvp, tv[2];
8430 if (arg3) {
8431 if (copy_from_user_timeval(&tv[0], arg3)
8432 || copy_from_user_timeval(&tv[1],
8433 arg3 + sizeof(struct target_timeval)))
8434 goto efault;
8435 tvp = tv;
8436 } else {
8437 tvp = NULL;
8439 if (!(p = lock_user_string(arg2)))
8440 goto efault;
8441 ret = get_errno(futimesat(arg1, path(p), tvp));
8442 unlock_user(p, arg2, 0);
8444 break;
8445 #endif
8446 #ifdef TARGET_NR_stty
8447 case TARGET_NR_stty:
8448 goto unimplemented;
8449 #endif
8450 #ifdef TARGET_NR_gtty
8451 case TARGET_NR_gtty:
8452 goto unimplemented;
8453 #endif
8454 #ifdef TARGET_NR_access
8455 case TARGET_NR_access:
8456 if (!(p = lock_user_string(arg1)))
8457 goto efault;
8458 ret = get_errno(access(path(p), arg2));
8459 unlock_user(p, arg1, 0);
8460 break;
8461 #endif
8462 #if defined(TARGET_NR_faccessat) && defined(__NR_faccessat)
8463 case TARGET_NR_faccessat:
8464 if (!(p = lock_user_string(arg2)))
8465 goto efault;
8466 ret = get_errno(faccessat(arg1, p, arg3, 0));
8467 unlock_user(p, arg2, 0);
8468 break;
8469 #endif
8470 #ifdef TARGET_NR_nice /* not on alpha */
8471 case TARGET_NR_nice:
8472 ret = get_errno(nice(arg1));
8473 break;
8474 #endif
8475 #ifdef TARGET_NR_ftime
8476 case TARGET_NR_ftime:
8477 goto unimplemented;
8478 #endif
8479 case TARGET_NR_sync:
8480 sync();
8481 ret = 0;
8482 break;
8483 #if defined(TARGET_NR_syncfs) && defined(CONFIG_SYNCFS)
8484 case TARGET_NR_syncfs:
8485 ret = get_errno(syncfs(arg1));
8486 break;
8487 #endif
8488 case TARGET_NR_kill:
8489 ret = get_errno(safe_kill(arg1, target_to_host_signal(arg2)));
8490 break;
8491 #ifdef TARGET_NR_rename
8492 case TARGET_NR_rename:
8494 void *p2;
8495 p = lock_user_string(arg1);
8496 p2 = lock_user_string(arg2);
8497 if (!p || !p2)
8498 ret = -TARGET_EFAULT;
8499 else
8500 ret = get_errno(rename(p, p2));
8501 unlock_user(p2, arg2, 0);
8502 unlock_user(p, arg1, 0);
8504 break;
8505 #endif
8506 #if defined(TARGET_NR_renameat)
8507 case TARGET_NR_renameat:
8509 void *p2;
8510 p = lock_user_string(arg2);
8511 p2 = lock_user_string(arg4);
8512 if (!p || !p2)
8513 ret = -TARGET_EFAULT;
8514 else
8515 ret = get_errno(renameat(arg1, p, arg3, p2));
8516 unlock_user(p2, arg4, 0);
8517 unlock_user(p, arg2, 0);
8519 break;
8520 #endif
8521 #if defined(TARGET_NR_renameat2)
8522 case TARGET_NR_renameat2:
8524 void *p2;
8525 p = lock_user_string(arg2);
8526 p2 = lock_user_string(arg4);
8527 if (!p || !p2) {
8528 ret = -TARGET_EFAULT;
8529 } else {
8530 ret = get_errno(sys_renameat2(arg1, p, arg3, p2, arg5));
8532 unlock_user(p2, arg4, 0);
8533 unlock_user(p, arg2, 0);
8535 break;
8536 #endif
8537 #ifdef TARGET_NR_mkdir
8538 case TARGET_NR_mkdir:
8539 if (!(p = lock_user_string(arg1)))
8540 goto efault;
8541 ret = get_errno(mkdir(p, arg2));
8542 unlock_user(p, arg1, 0);
8543 break;
8544 #endif
8545 #if defined(TARGET_NR_mkdirat)
8546 case TARGET_NR_mkdirat:
8547 if (!(p = lock_user_string(arg2)))
8548 goto efault;
8549 ret = get_errno(mkdirat(arg1, p, arg3));
8550 unlock_user(p, arg2, 0);
8551 break;
8552 #endif
8553 #ifdef TARGET_NR_rmdir
8554 case TARGET_NR_rmdir:
8555 if (!(p = lock_user_string(arg1)))
8556 goto efault;
8557 ret = get_errno(rmdir(p));
8558 unlock_user(p, arg1, 0);
8559 break;
8560 #endif
8561 case TARGET_NR_dup:
8562 ret = get_errno(dup(arg1));
8563 if (ret >= 0) {
8564 fd_trans_dup(arg1, ret);
8566 break;
8567 #ifdef TARGET_NR_pipe
8568 case TARGET_NR_pipe:
8569 ret = do_pipe(cpu_env, arg1, 0, 0);
8570 break;
8571 #endif
8572 #ifdef TARGET_NR_pipe2
8573 case TARGET_NR_pipe2:
8574 ret = do_pipe(cpu_env, arg1,
8575 target_to_host_bitmask(arg2, fcntl_flags_tbl), 1);
8576 break;
8577 #endif
8578 case TARGET_NR_times:
8580 struct target_tms *tmsp;
8581 struct tms tms;
8582 ret = get_errno(times(&tms));
8583 if (arg1) {
8584 tmsp = lock_user(VERIFY_WRITE, arg1, sizeof(struct target_tms), 0);
8585 if (!tmsp)
8586 goto efault;
8587 tmsp->tms_utime = tswapal(host_to_target_clock_t(tms.tms_utime));
8588 tmsp->tms_stime = tswapal(host_to_target_clock_t(tms.tms_stime));
8589 tmsp->tms_cutime = tswapal(host_to_target_clock_t(tms.tms_cutime));
8590 tmsp->tms_cstime = tswapal(host_to_target_clock_t(tms.tms_cstime));
8592 if (!is_error(ret))
8593 ret = host_to_target_clock_t(ret);
8595 break;
8596 #ifdef TARGET_NR_prof
8597 case TARGET_NR_prof:
8598 goto unimplemented;
8599 #endif
8600 #ifdef TARGET_NR_signal
8601 case TARGET_NR_signal:
8602 goto unimplemented;
8603 #endif
8604 case TARGET_NR_acct:
8605 if (arg1 == 0) {
8606 ret = get_errno(acct(NULL));
8607 } else {
8608 if (!(p = lock_user_string(arg1)))
8609 goto efault;
8610 ret = get_errno(acct(path(p)));
8611 unlock_user(p, arg1, 0);
8613 break;
8614 #ifdef TARGET_NR_umount2
8615 case TARGET_NR_umount2:
8616 if (!(p = lock_user_string(arg1)))
8617 goto efault;
8618 ret = get_errno(umount2(p, arg2));
8619 unlock_user(p, arg1, 0);
8620 break;
8621 #endif
8622 #ifdef TARGET_NR_lock
8623 case TARGET_NR_lock:
8624 goto unimplemented;
8625 #endif
8626 case TARGET_NR_ioctl:
8627 ret = do_ioctl(arg1, arg2, arg3);
8628 break;
8629 #ifdef TARGET_NR_fcntl
8630 case TARGET_NR_fcntl:
8631 ret = do_fcntl(arg1, arg2, arg3);
8632 break;
8633 #endif
8634 #ifdef TARGET_NR_mpx
8635 case TARGET_NR_mpx:
8636 goto unimplemented;
8637 #endif
8638 case TARGET_NR_setpgid:
8639 ret = get_errno(setpgid(arg1, arg2));
8640 break;
8641 #ifdef TARGET_NR_ulimit
8642 case TARGET_NR_ulimit:
8643 goto unimplemented;
8644 #endif
8645 #ifdef TARGET_NR_oldolduname
8646 case TARGET_NR_oldolduname:
8647 goto unimplemented;
8648 #endif
8649 case TARGET_NR_umask:
8650 ret = get_errno(umask(arg1));
8651 break;
8652 case TARGET_NR_chroot:
8653 if (!(p = lock_user_string(arg1)))
8654 goto efault;
8655 ret = get_errno(chroot(p));
8656 unlock_user(p, arg1, 0);
8657 break;
8658 #ifdef TARGET_NR_ustat
8659 case TARGET_NR_ustat:
8660 goto unimplemented;
8661 #endif
8662 #ifdef TARGET_NR_dup2
8663 case TARGET_NR_dup2:
8664 ret = get_errno(dup2(arg1, arg2));
8665 if (ret >= 0) {
8666 fd_trans_dup(arg1, arg2);
8668 break;
8669 #endif
8670 #if defined(CONFIG_DUP3) && defined(TARGET_NR_dup3)
8671 case TARGET_NR_dup3:
8673 int host_flags;
8675 if ((arg3 & ~TARGET_O_CLOEXEC) != 0) {
8676 return -EINVAL;
8678 host_flags = target_to_host_bitmask(arg3, fcntl_flags_tbl);
8679 ret = get_errno(dup3(arg1, arg2, host_flags));
8680 if (ret >= 0) {
8681 fd_trans_dup(arg1, arg2);
8683 break;
8685 #endif
8686 #ifdef TARGET_NR_getppid /* not on alpha */
8687 case TARGET_NR_getppid:
8688 ret = get_errno(getppid());
8689 break;
8690 #endif
8691 #ifdef TARGET_NR_getpgrp
8692 case TARGET_NR_getpgrp:
8693 ret = get_errno(getpgrp());
8694 break;
8695 #endif
8696 case TARGET_NR_setsid:
8697 ret = get_errno(setsid());
8698 break;
8699 #ifdef TARGET_NR_sigaction
8700 case TARGET_NR_sigaction:
8702 #if defined(TARGET_ALPHA)
8703 struct target_sigaction act, oact, *pact = 0;
8704 struct target_old_sigaction *old_act;
8705 if (arg2) {
8706 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
8707 goto efault;
8708 act._sa_handler = old_act->_sa_handler;
8709 target_siginitset(&act.sa_mask, old_act->sa_mask);
8710 act.sa_flags = old_act->sa_flags;
8711 act.sa_restorer = 0;
8712 unlock_user_struct(old_act, arg2, 0);
8713 pact = &act;
8715 ret = get_errno(do_sigaction(arg1, pact, &oact));
8716 if (!is_error(ret) && arg3) {
8717 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
8718 goto efault;
8719 old_act->_sa_handler = oact._sa_handler;
8720 old_act->sa_mask = oact.sa_mask.sig[0];
8721 old_act->sa_flags = oact.sa_flags;
8722 unlock_user_struct(old_act, arg3, 1);
8724 #elif defined(TARGET_MIPS)
8725 struct target_sigaction act, oact, *pact, *old_act;
8727 if (arg2) {
8728 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
8729 goto efault;
8730 act._sa_handler = old_act->_sa_handler;
8731 target_siginitset(&act.sa_mask, old_act->sa_mask.sig[0]);
8732 act.sa_flags = old_act->sa_flags;
8733 unlock_user_struct(old_act, arg2, 0);
8734 pact = &act;
8735 } else {
8736 pact = NULL;
8739 ret = get_errno(do_sigaction(arg1, pact, &oact));
8741 if (!is_error(ret) && arg3) {
8742 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
8743 goto efault;
8744 old_act->_sa_handler = oact._sa_handler;
8745 old_act->sa_flags = oact.sa_flags;
8746 old_act->sa_mask.sig[0] = oact.sa_mask.sig[0];
8747 old_act->sa_mask.sig[1] = 0;
8748 old_act->sa_mask.sig[2] = 0;
8749 old_act->sa_mask.sig[3] = 0;
8750 unlock_user_struct(old_act, arg3, 1);
8752 #else
8753 struct target_old_sigaction *old_act;
8754 struct target_sigaction act, oact, *pact;
8755 if (arg2) {
8756 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
8757 goto efault;
8758 act._sa_handler = old_act->_sa_handler;
8759 target_siginitset(&act.sa_mask, old_act->sa_mask);
8760 act.sa_flags = old_act->sa_flags;
8761 act.sa_restorer = old_act->sa_restorer;
8762 #ifdef TARGET_ARCH_HAS_KA_RESTORER
8763 act.ka_restorer = 0;
8764 #endif
8765 unlock_user_struct(old_act, arg2, 0);
8766 pact = &act;
8767 } else {
8768 pact = NULL;
8770 ret = get_errno(do_sigaction(arg1, pact, &oact));
8771 if (!is_error(ret) && arg3) {
8772 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
8773 goto efault;
8774 old_act->_sa_handler = oact._sa_handler;
8775 old_act->sa_mask = oact.sa_mask.sig[0];
8776 old_act->sa_flags = oact.sa_flags;
8777 old_act->sa_restorer = oact.sa_restorer;
8778 unlock_user_struct(old_act, arg3, 1);
8780 #endif
8782 break;
8783 #endif
8784 case TARGET_NR_rt_sigaction:
8786 #if defined(TARGET_ALPHA)
8787 /* For Alpha and SPARC this is a 5 argument syscall, with
8788 * a 'restorer' parameter which must be copied into the
8789 * sa_restorer field of the sigaction struct.
8790 * For Alpha that 'restorer' is arg5; for SPARC it is arg4,
8791 * and arg5 is the sigsetsize.
8792 * Alpha also has a separate rt_sigaction struct that it uses
8793 * here; SPARC uses the usual sigaction struct.
8795 struct target_rt_sigaction *rt_act;
8796 struct target_sigaction act, oact, *pact = 0;
8798 if (arg4 != sizeof(target_sigset_t)) {
8799 ret = -TARGET_EINVAL;
8800 break;
8802 if (arg2) {
8803 if (!lock_user_struct(VERIFY_READ, rt_act, arg2, 1))
8804 goto efault;
8805 act._sa_handler = rt_act->_sa_handler;
8806 act.sa_mask = rt_act->sa_mask;
8807 act.sa_flags = rt_act->sa_flags;
8808 act.sa_restorer = arg5;
8809 unlock_user_struct(rt_act, arg2, 0);
8810 pact = &act;
8812 ret = get_errno(do_sigaction(arg1, pact, &oact));
8813 if (!is_error(ret) && arg3) {
8814 if (!lock_user_struct(VERIFY_WRITE, rt_act, arg3, 0))
8815 goto efault;
8816 rt_act->_sa_handler = oact._sa_handler;
8817 rt_act->sa_mask = oact.sa_mask;
8818 rt_act->sa_flags = oact.sa_flags;
8819 unlock_user_struct(rt_act, arg3, 1);
8821 #else
8822 #ifdef TARGET_SPARC
8823 target_ulong restorer = arg4;
8824 target_ulong sigsetsize = arg5;
8825 #else
8826 target_ulong sigsetsize = arg4;
8827 #endif
8828 struct target_sigaction *act;
8829 struct target_sigaction *oact;
8831 if (sigsetsize != sizeof(target_sigset_t)) {
8832 ret = -TARGET_EINVAL;
8833 break;
8835 if (arg2) {
8836 if (!lock_user_struct(VERIFY_READ, act, arg2, 1)) {
8837 goto efault;
8839 #ifdef TARGET_ARCH_HAS_KA_RESTORER
8840 act->ka_restorer = restorer;
8841 #endif
8842 } else {
8843 act = NULL;
8845 if (arg3) {
8846 if (!lock_user_struct(VERIFY_WRITE, oact, arg3, 0)) {
8847 ret = -TARGET_EFAULT;
8848 goto rt_sigaction_fail;
8850 } else
8851 oact = NULL;
8852 ret = get_errno(do_sigaction(arg1, act, oact));
8853 rt_sigaction_fail:
8854 if (act)
8855 unlock_user_struct(act, arg2, 0);
8856 if (oact)
8857 unlock_user_struct(oact, arg3, 1);
8858 #endif
8860 break;
8861 #ifdef TARGET_NR_sgetmask /* not on alpha */
8862 case TARGET_NR_sgetmask:
8864 sigset_t cur_set;
8865 abi_ulong target_set;
8866 ret = do_sigprocmask(0, NULL, &cur_set);
8867 if (!ret) {
8868 host_to_target_old_sigset(&target_set, &cur_set);
8869 ret = target_set;
8872 break;
8873 #endif
8874 #ifdef TARGET_NR_ssetmask /* not on alpha */
8875 case TARGET_NR_ssetmask:
8877 sigset_t set, oset;
8878 abi_ulong target_set = arg1;
8879 target_to_host_old_sigset(&set, &target_set);
8880 ret = do_sigprocmask(SIG_SETMASK, &set, &oset);
8881 if (!ret) {
8882 host_to_target_old_sigset(&target_set, &oset);
8883 ret = target_set;
8886 break;
8887 #endif
8888 #ifdef TARGET_NR_sigprocmask
8889 case TARGET_NR_sigprocmask:
8891 #if defined(TARGET_ALPHA)
8892 sigset_t set, oldset;
8893 abi_ulong mask;
8894 int how;
8896 switch (arg1) {
8897 case TARGET_SIG_BLOCK:
8898 how = SIG_BLOCK;
8899 break;
8900 case TARGET_SIG_UNBLOCK:
8901 how = SIG_UNBLOCK;
8902 break;
8903 case TARGET_SIG_SETMASK:
8904 how = SIG_SETMASK;
8905 break;
8906 default:
8907 ret = -TARGET_EINVAL;
8908 goto fail;
8910 mask = arg2;
8911 target_to_host_old_sigset(&set, &mask);
8913 ret = do_sigprocmask(how, &set, &oldset);
8914 if (!is_error(ret)) {
8915 host_to_target_old_sigset(&mask, &oldset);
8916 ret = mask;
8917 ((CPUAlphaState *)cpu_env)->ir[IR_V0] = 0; /* force no error */
8919 #else
8920 sigset_t set, oldset, *set_ptr;
8921 int how;
8923 if (arg2) {
8924 switch (arg1) {
8925 case TARGET_SIG_BLOCK:
8926 how = SIG_BLOCK;
8927 break;
8928 case TARGET_SIG_UNBLOCK:
8929 how = SIG_UNBLOCK;
8930 break;
8931 case TARGET_SIG_SETMASK:
8932 how = SIG_SETMASK;
8933 break;
8934 default:
8935 ret = -TARGET_EINVAL;
8936 goto fail;
8938 if (!(p = lock_user(VERIFY_READ, arg2, sizeof(target_sigset_t), 1)))
8939 goto efault;
8940 target_to_host_old_sigset(&set, p);
8941 unlock_user(p, arg2, 0);
8942 set_ptr = &set;
8943 } else {
8944 how = 0;
8945 set_ptr = NULL;
8947 ret = do_sigprocmask(how, set_ptr, &oldset);
8948 if (!is_error(ret) && arg3) {
8949 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_sigset_t), 0)))
8950 goto efault;
8951 host_to_target_old_sigset(p, &oldset);
8952 unlock_user(p, arg3, sizeof(target_sigset_t));
8954 #endif
8956 break;
8957 #endif
8958 case TARGET_NR_rt_sigprocmask:
8960 int how = arg1;
8961 sigset_t set, oldset, *set_ptr;
8963 if (arg4 != sizeof(target_sigset_t)) {
8964 ret = -TARGET_EINVAL;
8965 break;
8968 if (arg2) {
8969 switch(how) {
8970 case TARGET_SIG_BLOCK:
8971 how = SIG_BLOCK;
8972 break;
8973 case TARGET_SIG_UNBLOCK:
8974 how = SIG_UNBLOCK;
8975 break;
8976 case TARGET_SIG_SETMASK:
8977 how = SIG_SETMASK;
8978 break;
8979 default:
8980 ret = -TARGET_EINVAL;
8981 goto fail;
8983 if (!(p = lock_user(VERIFY_READ, arg2, sizeof(target_sigset_t), 1)))
8984 goto efault;
8985 target_to_host_sigset(&set, p);
8986 unlock_user(p, arg2, 0);
8987 set_ptr = &set;
8988 } else {
8989 how = 0;
8990 set_ptr = NULL;
8992 ret = do_sigprocmask(how, set_ptr, &oldset);
8993 if (!is_error(ret) && arg3) {
8994 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_sigset_t), 0)))
8995 goto efault;
8996 host_to_target_sigset(p, &oldset);
8997 unlock_user(p, arg3, sizeof(target_sigset_t));
9000 break;
9001 #ifdef TARGET_NR_sigpending
9002 case TARGET_NR_sigpending:
9004 sigset_t set;
9005 ret = get_errno(sigpending(&set));
9006 if (!is_error(ret)) {
9007 if (!(p = lock_user(VERIFY_WRITE, arg1, sizeof(target_sigset_t), 0)))
9008 goto efault;
9009 host_to_target_old_sigset(p, &set);
9010 unlock_user(p, arg1, sizeof(target_sigset_t));
9013 break;
9014 #endif
9015 case TARGET_NR_rt_sigpending:
9017 sigset_t set;
9019 /* Yes, this check is >, not != like most. We follow the kernel's
9020 * logic and it does it like this because it implements
9021 * NR_sigpending through the same code path, and in that case
9022 * the old_sigset_t is smaller in size.
9024 if (arg2 > sizeof(target_sigset_t)) {
9025 ret = -TARGET_EINVAL;
9026 break;
9029 ret = get_errno(sigpending(&set));
9030 if (!is_error(ret)) {
9031 if (!(p = lock_user(VERIFY_WRITE, arg1, sizeof(target_sigset_t), 0)))
9032 goto efault;
9033 host_to_target_sigset(p, &set);
9034 unlock_user(p, arg1, sizeof(target_sigset_t));
9037 break;
9038 #ifdef TARGET_NR_sigsuspend
9039 case TARGET_NR_sigsuspend:
9041 TaskState *ts = cpu->opaque;
9042 #if defined(TARGET_ALPHA)
9043 abi_ulong mask = arg1;
9044 target_to_host_old_sigset(&ts->sigsuspend_mask, &mask);
9045 #else
9046 if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
9047 goto efault;
9048 target_to_host_old_sigset(&ts->sigsuspend_mask, p);
9049 unlock_user(p, arg1, 0);
9050 #endif
9051 ret = get_errno(safe_rt_sigsuspend(&ts->sigsuspend_mask,
9052 SIGSET_T_SIZE));
9053 if (ret != -TARGET_ERESTARTSYS) {
9054 ts->in_sigsuspend = 1;
9057 break;
9058 #endif
9059 case TARGET_NR_rt_sigsuspend:
9061 TaskState *ts = cpu->opaque;
9063 if (arg2 != sizeof(target_sigset_t)) {
9064 ret = -TARGET_EINVAL;
9065 break;
9067 if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
9068 goto efault;
9069 target_to_host_sigset(&ts->sigsuspend_mask, p);
9070 unlock_user(p, arg1, 0);
9071 ret = get_errno(safe_rt_sigsuspend(&ts->sigsuspend_mask,
9072 SIGSET_T_SIZE));
9073 if (ret != -TARGET_ERESTARTSYS) {
9074 ts->in_sigsuspend = 1;
9077 break;
9078 case TARGET_NR_rt_sigtimedwait:
9080 sigset_t set;
9081 struct timespec uts, *puts;
9082 siginfo_t uinfo;
9084 if (arg4 != sizeof(target_sigset_t)) {
9085 ret = -TARGET_EINVAL;
9086 break;
9089 if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
9090 goto efault;
9091 target_to_host_sigset(&set, p);
9092 unlock_user(p, arg1, 0);
9093 if (arg3) {
9094 puts = &uts;
9095 target_to_host_timespec(puts, arg3);
9096 } else {
9097 puts = NULL;
9099 ret = get_errno(safe_rt_sigtimedwait(&set, &uinfo, puts,
9100 SIGSET_T_SIZE));
9101 if (!is_error(ret)) {
9102 if (arg2) {
9103 p = lock_user(VERIFY_WRITE, arg2, sizeof(target_siginfo_t),
9105 if (!p) {
9106 goto efault;
9108 host_to_target_siginfo(p, &uinfo);
9109 unlock_user(p, arg2, sizeof(target_siginfo_t));
9111 ret = host_to_target_signal(ret);
9114 break;
9115 case TARGET_NR_rt_sigqueueinfo:
9117 siginfo_t uinfo;
9119 p = lock_user(VERIFY_READ, arg3, sizeof(target_siginfo_t), 1);
9120 if (!p) {
9121 goto efault;
9123 target_to_host_siginfo(&uinfo, p);
9124 unlock_user(p, arg3, 0);
9125 ret = get_errno(sys_rt_sigqueueinfo(arg1, arg2, &uinfo));
9127 break;
9128 case TARGET_NR_rt_tgsigqueueinfo:
9130 siginfo_t uinfo;
9132 p = lock_user(VERIFY_READ, arg4, sizeof(target_siginfo_t), 1);
9133 if (!p) {
9134 goto efault;
9136 target_to_host_siginfo(&uinfo, p);
9137 unlock_user(p, arg4, 0);
9138 ret = get_errno(sys_rt_tgsigqueueinfo(arg1, arg2, arg3, &uinfo));
9140 break;
9141 #ifdef TARGET_NR_sigreturn
9142 case TARGET_NR_sigreturn:
9143 if (block_signals()) {
9144 ret = -TARGET_ERESTARTSYS;
9145 } else {
9146 ret = do_sigreturn(cpu_env);
9148 break;
9149 #endif
9150 case TARGET_NR_rt_sigreturn:
9151 if (block_signals()) {
9152 ret = -TARGET_ERESTARTSYS;
9153 } else {
9154 ret = do_rt_sigreturn(cpu_env);
9156 break;
9157 case TARGET_NR_sethostname:
9158 if (!(p = lock_user_string(arg1)))
9159 goto efault;
9160 ret = get_errno(sethostname(p, arg2));
9161 unlock_user(p, arg1, 0);
9162 break;
9163 case TARGET_NR_setrlimit:
9165 int resource = target_to_host_resource(arg1);
9166 struct target_rlimit *target_rlim;
9167 struct rlimit rlim;
9168 if (!lock_user_struct(VERIFY_READ, target_rlim, arg2, 1))
9169 goto efault;
9170 rlim.rlim_cur = target_to_host_rlim(target_rlim->rlim_cur);
9171 rlim.rlim_max = target_to_host_rlim(target_rlim->rlim_max);
9172 unlock_user_struct(target_rlim, arg2, 0);
9173 ret = get_errno(setrlimit(resource, &rlim));
9175 break;
9176 case TARGET_NR_getrlimit:
9178 int resource = target_to_host_resource(arg1);
9179 struct target_rlimit *target_rlim;
9180 struct rlimit rlim;
9182 ret = get_errno(getrlimit(resource, &rlim));
9183 if (!is_error(ret)) {
9184 if (!lock_user_struct(VERIFY_WRITE, target_rlim, arg2, 0))
9185 goto efault;
9186 target_rlim->rlim_cur = host_to_target_rlim(rlim.rlim_cur);
9187 target_rlim->rlim_max = host_to_target_rlim(rlim.rlim_max);
9188 unlock_user_struct(target_rlim, arg2, 1);
9191 break;
9192 case TARGET_NR_getrusage:
9194 struct rusage rusage;
9195 ret = get_errno(getrusage(arg1, &rusage));
9196 if (!is_error(ret)) {
9197 ret = host_to_target_rusage(arg2, &rusage);
9200 break;
9201 case TARGET_NR_gettimeofday:
9203 struct timeval tv;
9204 ret = get_errno(gettimeofday(&tv, NULL));
9205 if (!is_error(ret)) {
9206 if (copy_to_user_timeval(arg1, &tv))
9207 goto efault;
9210 break;
9211 case TARGET_NR_settimeofday:
9213 struct timeval tv, *ptv = NULL;
9214 struct timezone tz, *ptz = NULL;
9216 if (arg1) {
9217 if (copy_from_user_timeval(&tv, arg1)) {
9218 goto efault;
9220 ptv = &tv;
9223 if (arg2) {
9224 if (copy_from_user_timezone(&tz, arg2)) {
9225 goto efault;
9227 ptz = &tz;
9230 ret = get_errno(settimeofday(ptv, ptz));
9232 break;
9233 #if defined(TARGET_NR_select)
9234 case TARGET_NR_select:
9235 #if defined(TARGET_WANT_NI_OLD_SELECT)
9236 /* some architectures used to have old_select here
9237 * but now ENOSYS it.
9239 ret = -TARGET_ENOSYS;
9240 #elif defined(TARGET_WANT_OLD_SYS_SELECT)
9241 ret = do_old_select(arg1);
9242 #else
9243 ret = do_select(arg1, arg2, arg3, arg4, arg5);
9244 #endif
9245 break;
9246 #endif
9247 #ifdef TARGET_NR_pselect6
9248 case TARGET_NR_pselect6:
9250 abi_long rfd_addr, wfd_addr, efd_addr, n, ts_addr;
9251 fd_set rfds, wfds, efds;
9252 fd_set *rfds_ptr, *wfds_ptr, *efds_ptr;
9253 struct timespec ts, *ts_ptr;
9256 * The 6th arg is actually two args smashed together,
9257 * so we cannot use the C library.
9259 sigset_t set;
9260 struct {
9261 sigset_t *set;
9262 size_t size;
9263 } sig, *sig_ptr;
9265 abi_ulong arg_sigset, arg_sigsize, *arg7;
9266 target_sigset_t *target_sigset;
9268 n = arg1;
9269 rfd_addr = arg2;
9270 wfd_addr = arg3;
9271 efd_addr = arg4;
9272 ts_addr = arg5;
9274 ret = copy_from_user_fdset_ptr(&rfds, &rfds_ptr, rfd_addr, n);
9275 if (ret) {
9276 goto fail;
9278 ret = copy_from_user_fdset_ptr(&wfds, &wfds_ptr, wfd_addr, n);
9279 if (ret) {
9280 goto fail;
9282 ret = copy_from_user_fdset_ptr(&efds, &efds_ptr, efd_addr, n);
9283 if (ret) {
9284 goto fail;
9288 * This takes a timespec, and not a timeval, so we cannot
9289 * use the do_select() helper ...
9291 if (ts_addr) {
9292 if (target_to_host_timespec(&ts, ts_addr)) {
9293 goto efault;
9295 ts_ptr = &ts;
9296 } else {
9297 ts_ptr = NULL;
9300 /* Extract the two packed args for the sigset */
9301 if (arg6) {
9302 sig_ptr = &sig;
9303 sig.size = SIGSET_T_SIZE;
9305 arg7 = lock_user(VERIFY_READ, arg6, sizeof(*arg7) * 2, 1);
9306 if (!arg7) {
9307 goto efault;
9309 arg_sigset = tswapal(arg7[0]);
9310 arg_sigsize = tswapal(arg7[1]);
9311 unlock_user(arg7, arg6, 0);
9313 if (arg_sigset) {
9314 sig.set = &set;
9315 if (arg_sigsize != sizeof(*target_sigset)) {
9316 /* Like the kernel, we enforce correct size sigsets */
9317 ret = -TARGET_EINVAL;
9318 goto fail;
9320 target_sigset = lock_user(VERIFY_READ, arg_sigset,
9321 sizeof(*target_sigset), 1);
9322 if (!target_sigset) {
9323 goto efault;
9325 target_to_host_sigset(&set, target_sigset);
9326 unlock_user(target_sigset, arg_sigset, 0);
9327 } else {
9328 sig.set = NULL;
9330 } else {
9331 sig_ptr = NULL;
9334 ret = get_errno(safe_pselect6(n, rfds_ptr, wfds_ptr, efds_ptr,
9335 ts_ptr, sig_ptr));
9337 if (!is_error(ret)) {
9338 if (rfd_addr && copy_to_user_fdset(rfd_addr, &rfds, n))
9339 goto efault;
9340 if (wfd_addr && copy_to_user_fdset(wfd_addr, &wfds, n))
9341 goto efault;
9342 if (efd_addr && copy_to_user_fdset(efd_addr, &efds, n))
9343 goto efault;
9345 if (ts_addr && host_to_target_timespec(ts_addr, &ts))
9346 goto efault;
9349 break;
9350 #endif
9351 #ifdef TARGET_NR_symlink
9352 case TARGET_NR_symlink:
9354 void *p2;
9355 p = lock_user_string(arg1);
9356 p2 = lock_user_string(arg2);
9357 if (!p || !p2)
9358 ret = -TARGET_EFAULT;
9359 else
9360 ret = get_errno(symlink(p, p2));
9361 unlock_user(p2, arg2, 0);
9362 unlock_user(p, arg1, 0);
9364 break;
9365 #endif
9366 #if defined(TARGET_NR_symlinkat)
9367 case TARGET_NR_symlinkat:
9369 void *p2;
9370 p = lock_user_string(arg1);
9371 p2 = lock_user_string(arg3);
9372 if (!p || !p2)
9373 ret = -TARGET_EFAULT;
9374 else
9375 ret = get_errno(symlinkat(p, arg2, p2));
9376 unlock_user(p2, arg3, 0);
9377 unlock_user(p, arg1, 0);
9379 break;
9380 #endif
9381 #ifdef TARGET_NR_oldlstat
9382 case TARGET_NR_oldlstat:
9383 goto unimplemented;
9384 #endif
9385 #ifdef TARGET_NR_readlink
9386 case TARGET_NR_readlink:
9388 void *p2;
9389 p = lock_user_string(arg1);
9390 p2 = lock_user(VERIFY_WRITE, arg2, arg3, 0);
9391 if (!p || !p2) {
9392 ret = -TARGET_EFAULT;
9393 } else if (!arg3) {
9394 /* Short circuit this for the magic exe check. */
9395 ret = -TARGET_EINVAL;
9396 } else if (is_proc_myself((const char *)p, "exe")) {
9397 char real[PATH_MAX], *temp;
9398 temp = realpath(exec_path, real);
9399 /* Return value is # of bytes that we wrote to the buffer. */
9400 if (temp == NULL) {
9401 ret = get_errno(-1);
9402 } else {
9403 /* Don't worry about sign mismatch as earlier mapping
9404 * logic would have thrown a bad address error. */
9405 ret = MIN(strlen(real), arg3);
9406 /* We cannot NUL terminate the string. */
9407 memcpy(p2, real, ret);
9409 } else {
9410 ret = get_errno(readlink(path(p), p2, arg3));
9412 unlock_user(p2, arg2, ret);
9413 unlock_user(p, arg1, 0);
9415 break;
9416 #endif
9417 #if defined(TARGET_NR_readlinkat)
9418 case TARGET_NR_readlinkat:
9420 void *p2;
9421 p = lock_user_string(arg2);
9422 p2 = lock_user(VERIFY_WRITE, arg3, arg4, 0);
9423 if (!p || !p2) {
9424 ret = -TARGET_EFAULT;
9425 } else if (is_proc_myself((const char *)p, "exe")) {
9426 char real[PATH_MAX], *temp;
9427 temp = realpath(exec_path, real);
9428 ret = temp == NULL ? get_errno(-1) : strlen(real) ;
9429 snprintf((char *)p2, arg4, "%s", real);
9430 } else {
9431 ret = get_errno(readlinkat(arg1, path(p), p2, arg4));
9433 unlock_user(p2, arg3, ret);
9434 unlock_user(p, arg2, 0);
9436 break;
9437 #endif
9438 #ifdef TARGET_NR_uselib
9439 case TARGET_NR_uselib:
9440 goto unimplemented;
9441 #endif
9442 #ifdef TARGET_NR_swapon
9443 case TARGET_NR_swapon:
9444 if (!(p = lock_user_string(arg1)))
9445 goto efault;
9446 ret = get_errno(swapon(p, arg2));
9447 unlock_user(p, arg1, 0);
9448 break;
9449 #endif
9450 case TARGET_NR_reboot:
9451 if (arg3 == LINUX_REBOOT_CMD_RESTART2) {
9452 /* arg4 must be ignored in all other cases */
9453 p = lock_user_string(arg4);
9454 if (!p) {
9455 goto efault;
9457 ret = get_errno(reboot(arg1, arg2, arg3, p));
9458 unlock_user(p, arg4, 0);
9459 } else {
9460 ret = get_errno(reboot(arg1, arg2, arg3, NULL));
9462 break;
9463 #ifdef TARGET_NR_readdir
9464 case TARGET_NR_readdir:
9465 goto unimplemented;
9466 #endif
9467 #ifdef TARGET_NR_mmap
9468 case TARGET_NR_mmap:
9469 #if (defined(TARGET_I386) && defined(TARGET_ABI32)) || \
9470 (defined(TARGET_ARM) && defined(TARGET_ABI32)) || \
9471 defined(TARGET_M68K) || defined(TARGET_CRIS) || defined(TARGET_MICROBLAZE) \
9472 || defined(TARGET_S390X)
9474 abi_ulong *v;
9475 abi_ulong v1, v2, v3, v4, v5, v6;
9476 if (!(v = lock_user(VERIFY_READ, arg1, 6 * sizeof(abi_ulong), 1)))
9477 goto efault;
9478 v1 = tswapal(v[0]);
9479 v2 = tswapal(v[1]);
9480 v3 = tswapal(v[2]);
9481 v4 = tswapal(v[3]);
9482 v5 = tswapal(v[4]);
9483 v6 = tswapal(v[5]);
9484 unlock_user(v, arg1, 0);
9485 ret = get_errno(target_mmap(v1, v2, v3,
9486 target_to_host_bitmask(v4, mmap_flags_tbl),
9487 v5, v6));
9489 #else
9490 ret = get_errno(target_mmap(arg1, arg2, arg3,
9491 target_to_host_bitmask(arg4, mmap_flags_tbl),
9492 arg5,
9493 arg6));
9494 #endif
9495 break;
9496 #endif
9497 #ifdef TARGET_NR_mmap2
9498 case TARGET_NR_mmap2:
9499 #ifndef MMAP_SHIFT
9500 #define MMAP_SHIFT 12
9501 #endif
9502 ret = get_errno(target_mmap(arg1, arg2, arg3,
9503 target_to_host_bitmask(arg4, mmap_flags_tbl),
9504 arg5,
9505 arg6 << MMAP_SHIFT));
9506 break;
9507 #endif
9508 case TARGET_NR_munmap:
9509 ret = get_errno(target_munmap(arg1, arg2));
9510 break;
9511 case TARGET_NR_mprotect:
9513 TaskState *ts = cpu->opaque;
9514 /* Special hack to detect libc making the stack executable. */
9515 if ((arg3 & PROT_GROWSDOWN)
9516 && arg1 >= ts->info->stack_limit
9517 && arg1 <= ts->info->start_stack) {
9518 arg3 &= ~PROT_GROWSDOWN;
9519 arg2 = arg2 + arg1 - ts->info->stack_limit;
9520 arg1 = ts->info->stack_limit;
9523 ret = get_errno(target_mprotect(arg1, arg2, arg3));
9524 break;
9525 #ifdef TARGET_NR_mremap
9526 case TARGET_NR_mremap:
9527 ret = get_errno(target_mremap(arg1, arg2, arg3, arg4, arg5));
9528 break;
9529 #endif
9530 /* ??? msync/mlock/munlock are broken for softmmu. */
9531 #ifdef TARGET_NR_msync
9532 case TARGET_NR_msync:
9533 ret = get_errno(msync(g2h(arg1), arg2, arg3));
9534 break;
9535 #endif
9536 #ifdef TARGET_NR_mlock
9537 case TARGET_NR_mlock:
9538 ret = get_errno(mlock(g2h(arg1), arg2));
9539 break;
9540 #endif
9541 #ifdef TARGET_NR_munlock
9542 case TARGET_NR_munlock:
9543 ret = get_errno(munlock(g2h(arg1), arg2));
9544 break;
9545 #endif
9546 #ifdef TARGET_NR_mlockall
9547 case TARGET_NR_mlockall:
9548 ret = get_errno(mlockall(target_to_host_mlockall_arg(arg1)));
9549 break;
9550 #endif
9551 #ifdef TARGET_NR_munlockall
9552 case TARGET_NR_munlockall:
9553 ret = get_errno(munlockall());
9554 break;
9555 #endif
9556 case TARGET_NR_truncate:
9557 if (!(p = lock_user_string(arg1)))
9558 goto efault;
9559 ret = get_errno(truncate(p, arg2));
9560 unlock_user(p, arg1, 0);
9561 break;
9562 case TARGET_NR_ftruncate:
9563 ret = get_errno(ftruncate(arg1, arg2));
9564 break;
9565 case TARGET_NR_fchmod:
9566 ret = get_errno(fchmod(arg1, arg2));
9567 break;
9568 #if defined(TARGET_NR_fchmodat)
9569 case TARGET_NR_fchmodat:
9570 if (!(p = lock_user_string(arg2)))
9571 goto efault;
9572 ret = get_errno(fchmodat(arg1, p, arg3, 0));
9573 unlock_user(p, arg2, 0);
9574 break;
9575 #endif
9576 case TARGET_NR_getpriority:
9577 /* Note that negative values are valid for getpriority, so we must
9578 differentiate based on errno settings. */
9579 errno = 0;
9580 ret = getpriority(arg1, arg2);
9581 if (ret == -1 && errno != 0) {
9582 ret = -host_to_target_errno(errno);
9583 break;
9585 #ifdef TARGET_ALPHA
9586 /* Return value is the unbiased priority. Signal no error. */
9587 ((CPUAlphaState *)cpu_env)->ir[IR_V0] = 0;
9588 #else
9589 /* Return value is a biased priority to avoid negative numbers. */
9590 ret = 20 - ret;
9591 #endif
9592 break;
9593 case TARGET_NR_setpriority:
9594 ret = get_errno(setpriority(arg1, arg2, arg3));
9595 break;
9596 #ifdef TARGET_NR_profil
9597 case TARGET_NR_profil:
9598 goto unimplemented;
9599 #endif
9600 case TARGET_NR_statfs:
9601 if (!(p = lock_user_string(arg1)))
9602 goto efault;
9603 ret = get_errno(statfs(path(p), &stfs));
9604 unlock_user(p, arg1, 0);
9605 convert_statfs:
9606 if (!is_error(ret)) {
9607 struct target_statfs *target_stfs;
9609 if (!lock_user_struct(VERIFY_WRITE, target_stfs, arg2, 0))
9610 goto efault;
9611 __put_user(stfs.f_type, &target_stfs->f_type);
9612 __put_user(stfs.f_bsize, &target_stfs->f_bsize);
9613 __put_user(stfs.f_blocks, &target_stfs->f_blocks);
9614 __put_user(stfs.f_bfree, &target_stfs->f_bfree);
9615 __put_user(stfs.f_bavail, &target_stfs->f_bavail);
9616 __put_user(stfs.f_files, &target_stfs->f_files);
9617 __put_user(stfs.f_ffree, &target_stfs->f_ffree);
9618 __put_user(stfs.f_fsid.__val[0], &target_stfs->f_fsid.val[0]);
9619 __put_user(stfs.f_fsid.__val[1], &target_stfs->f_fsid.val[1]);
9620 __put_user(stfs.f_namelen, &target_stfs->f_namelen);
9621 __put_user(stfs.f_frsize, &target_stfs->f_frsize);
9622 #ifdef _STATFS_F_FLAGS
9623 __put_user(stfs.f_flags, &target_stfs->f_flags);
9624 #else
9625 __put_user(0, &target_stfs->f_flags);
9626 #endif
9627 memset(target_stfs->f_spare, 0, sizeof(target_stfs->f_spare));
9628 unlock_user_struct(target_stfs, arg2, 1);
9630 break;
9631 case TARGET_NR_fstatfs:
9632 ret = get_errno(fstatfs(arg1, &stfs));
9633 goto convert_statfs;
9634 #ifdef TARGET_NR_statfs64
9635 case TARGET_NR_statfs64:
9636 if (!(p = lock_user_string(arg1)))
9637 goto efault;
9638 ret = get_errno(statfs(path(p), &stfs));
9639 unlock_user(p, arg1, 0);
9640 convert_statfs64:
9641 if (!is_error(ret)) {
9642 struct target_statfs64 *target_stfs;
9644 if (!lock_user_struct(VERIFY_WRITE, target_stfs, arg3, 0))
9645 goto efault;
9646 __put_user(stfs.f_type, &target_stfs->f_type);
9647 __put_user(stfs.f_bsize, &target_stfs->f_bsize);
9648 __put_user(stfs.f_blocks, &target_stfs->f_blocks);
9649 __put_user(stfs.f_bfree, &target_stfs->f_bfree);
9650 __put_user(stfs.f_bavail, &target_stfs->f_bavail);
9651 __put_user(stfs.f_files, &target_stfs->f_files);
9652 __put_user(stfs.f_ffree, &target_stfs->f_ffree);
9653 __put_user(stfs.f_fsid.__val[0], &target_stfs->f_fsid.val[0]);
9654 __put_user(stfs.f_fsid.__val[1], &target_stfs->f_fsid.val[1]);
9655 __put_user(stfs.f_namelen, &target_stfs->f_namelen);
9656 __put_user(stfs.f_frsize, &target_stfs->f_frsize);
9657 memset(target_stfs->f_spare, 0, sizeof(target_stfs->f_spare));
9658 unlock_user_struct(target_stfs, arg3, 1);
9660 break;
9661 case TARGET_NR_fstatfs64:
9662 ret = get_errno(fstatfs(arg1, &stfs));
9663 goto convert_statfs64;
9664 #endif
9665 #ifdef TARGET_NR_ioperm
9666 case TARGET_NR_ioperm:
9667 goto unimplemented;
9668 #endif
9669 #ifdef TARGET_NR_socketcall
9670 case TARGET_NR_socketcall:
9671 ret = do_socketcall(arg1, arg2);
9672 break;
9673 #endif
9674 #ifdef TARGET_NR_accept
9675 case TARGET_NR_accept:
9676 ret = do_accept4(arg1, arg2, arg3, 0);
9677 break;
9678 #endif
9679 #ifdef TARGET_NR_accept4
9680 case TARGET_NR_accept4:
9681 ret = do_accept4(arg1, arg2, arg3, arg4);
9682 break;
9683 #endif
9684 #ifdef TARGET_NR_bind
9685 case TARGET_NR_bind:
9686 ret = do_bind(arg1, arg2, arg3);
9687 break;
9688 #endif
9689 #ifdef TARGET_NR_connect
9690 case TARGET_NR_connect:
9691 ret = do_connect(arg1, arg2, arg3);
9692 break;
9693 #endif
9694 #ifdef TARGET_NR_getpeername
9695 case TARGET_NR_getpeername:
9696 ret = do_getpeername(arg1, arg2, arg3);
9697 break;
9698 #endif
9699 #ifdef TARGET_NR_getsockname
9700 case TARGET_NR_getsockname:
9701 ret = do_getsockname(arg1, arg2, arg3);
9702 break;
9703 #endif
9704 #ifdef TARGET_NR_getsockopt
9705 case TARGET_NR_getsockopt:
9706 ret = do_getsockopt(arg1, arg2, arg3, arg4, arg5);
9707 break;
9708 #endif
9709 #ifdef TARGET_NR_listen
9710 case TARGET_NR_listen:
9711 ret = get_errno(listen(arg1, arg2));
9712 break;
9713 #endif
9714 #ifdef TARGET_NR_recv
9715 case TARGET_NR_recv:
9716 ret = do_recvfrom(arg1, arg2, arg3, arg4, 0, 0);
9717 break;
9718 #endif
9719 #ifdef TARGET_NR_recvfrom
9720 case TARGET_NR_recvfrom:
9721 ret = do_recvfrom(arg1, arg2, arg3, arg4, arg5, arg6);
9722 break;
9723 #endif
9724 #ifdef TARGET_NR_recvmsg
9725 case TARGET_NR_recvmsg:
9726 ret = do_sendrecvmsg(arg1, arg2, arg3, 0);
9727 break;
9728 #endif
9729 #ifdef TARGET_NR_send
9730 case TARGET_NR_send:
9731 ret = do_sendto(arg1, arg2, arg3, arg4, 0, 0);
9732 break;
9733 #endif
9734 #ifdef TARGET_NR_sendmsg
9735 case TARGET_NR_sendmsg:
9736 ret = do_sendrecvmsg(arg1, arg2, arg3, 1);
9737 break;
9738 #endif
9739 #ifdef TARGET_NR_sendmmsg
9740 case TARGET_NR_sendmmsg:
9741 ret = do_sendrecvmmsg(arg1, arg2, arg3, arg4, 1);
9742 break;
9743 case TARGET_NR_recvmmsg:
9744 ret = do_sendrecvmmsg(arg1, arg2, arg3, arg4, 0);
9745 break;
9746 #endif
9747 #ifdef TARGET_NR_sendto
9748 case TARGET_NR_sendto:
9749 ret = do_sendto(arg1, arg2, arg3, arg4, arg5, arg6);
9750 break;
9751 #endif
9752 #ifdef TARGET_NR_shutdown
9753 case TARGET_NR_shutdown:
9754 ret = get_errno(shutdown(arg1, arg2));
9755 break;
9756 #endif
9757 #if defined(TARGET_NR_getrandom) && defined(__NR_getrandom)
9758 case TARGET_NR_getrandom:
9759 p = lock_user(VERIFY_WRITE, arg1, arg2, 0);
9760 if (!p) {
9761 goto efault;
9763 ret = get_errno(getrandom(p, arg2, arg3));
9764 unlock_user(p, arg1, ret);
9765 break;
9766 #endif
9767 #ifdef TARGET_NR_socket
9768 case TARGET_NR_socket:
9769 ret = do_socket(arg1, arg2, arg3);
9770 break;
9771 #endif
9772 #ifdef TARGET_NR_socketpair
9773 case TARGET_NR_socketpair:
9774 ret = do_socketpair(arg1, arg2, arg3, arg4);
9775 break;
9776 #endif
9777 #ifdef TARGET_NR_setsockopt
9778 case TARGET_NR_setsockopt:
9779 ret = do_setsockopt(arg1, arg2, arg3, arg4, (socklen_t) arg5);
9780 break;
9781 #endif
9782 #if defined(TARGET_NR_syslog)
9783 case TARGET_NR_syslog:
9785 int len = arg2;
9787 switch (arg1) {
9788 case TARGET_SYSLOG_ACTION_CLOSE: /* Close log */
9789 case TARGET_SYSLOG_ACTION_OPEN: /* Open log */
9790 case TARGET_SYSLOG_ACTION_CLEAR: /* Clear ring buffer */
9791 case TARGET_SYSLOG_ACTION_CONSOLE_OFF: /* Disable logging */
9792 case TARGET_SYSLOG_ACTION_CONSOLE_ON: /* Enable logging */
9793 case TARGET_SYSLOG_ACTION_CONSOLE_LEVEL: /* Set messages level */
9794 case TARGET_SYSLOG_ACTION_SIZE_UNREAD: /* Number of chars */
9795 case TARGET_SYSLOG_ACTION_SIZE_BUFFER: /* Size of the buffer */
9797 ret = get_errno(sys_syslog((int)arg1, NULL, (int)arg3));
9799 break;
9800 case TARGET_SYSLOG_ACTION_READ: /* Read from log */
9801 case TARGET_SYSLOG_ACTION_READ_CLEAR: /* Read/clear msgs */
9802 case TARGET_SYSLOG_ACTION_READ_ALL: /* Read last messages */
9804 ret = -TARGET_EINVAL;
9805 if (len < 0) {
9806 goto fail;
9808 ret = 0;
9809 if (len == 0) {
9810 break;
9812 p = lock_user(VERIFY_WRITE, arg2, arg3, 0);
9813 if (!p) {
9814 ret = -TARGET_EFAULT;
9815 goto fail;
9817 ret = get_errno(sys_syslog((int)arg1, p, (int)arg3));
9818 unlock_user(p, arg2, arg3);
9820 break;
9821 default:
9822 ret = -EINVAL;
9823 break;
9826 break;
9827 #endif
9828 case TARGET_NR_setitimer:
9830 struct itimerval value, ovalue, *pvalue;
9832 if (arg2) {
9833 pvalue = &value;
9834 if (copy_from_user_timeval(&pvalue->it_interval, arg2)
9835 || copy_from_user_timeval(&pvalue->it_value,
9836 arg2 + sizeof(struct target_timeval)))
9837 goto efault;
9838 } else {
9839 pvalue = NULL;
9841 ret = get_errno(setitimer(arg1, pvalue, &ovalue));
9842 if (!is_error(ret) && arg3) {
9843 if (copy_to_user_timeval(arg3,
9844 &ovalue.it_interval)
9845 || copy_to_user_timeval(arg3 + sizeof(struct target_timeval),
9846 &ovalue.it_value))
9847 goto efault;
9850 break;
9851 case TARGET_NR_getitimer:
9853 struct itimerval value;
9855 ret = get_errno(getitimer(arg1, &value));
9856 if (!is_error(ret) && arg2) {
9857 if (copy_to_user_timeval(arg2,
9858 &value.it_interval)
9859 || copy_to_user_timeval(arg2 + sizeof(struct target_timeval),
9860 &value.it_value))
9861 goto efault;
9864 break;
9865 #ifdef TARGET_NR_stat
9866 case TARGET_NR_stat:
9867 if (!(p = lock_user_string(arg1)))
9868 goto efault;
9869 ret = get_errno(stat(path(p), &st));
9870 unlock_user(p, arg1, 0);
9871 goto do_stat;
9872 #endif
9873 #ifdef TARGET_NR_lstat
9874 case TARGET_NR_lstat:
9875 if (!(p = lock_user_string(arg1)))
9876 goto efault;
9877 ret = get_errno(lstat(path(p), &st));
9878 unlock_user(p, arg1, 0);
9879 goto do_stat;
9880 #endif
9881 case TARGET_NR_fstat:
9883 ret = get_errno(fstat(arg1, &st));
9884 #if defined(TARGET_NR_stat) || defined(TARGET_NR_lstat)
9885 do_stat:
9886 #endif
9887 if (!is_error(ret)) {
9888 struct target_stat *target_st;
9890 if (!lock_user_struct(VERIFY_WRITE, target_st, arg2, 0))
9891 goto efault;
9892 memset(target_st, 0, sizeof(*target_st));
9893 __put_user(st.st_dev, &target_st->st_dev);
9894 __put_user(st.st_ino, &target_st->st_ino);
9895 __put_user(st.st_mode, &target_st->st_mode);
9896 __put_user(st.st_uid, &target_st->st_uid);
9897 __put_user(st.st_gid, &target_st->st_gid);
9898 __put_user(st.st_nlink, &target_st->st_nlink);
9899 __put_user(st.st_rdev, &target_st->st_rdev);
9900 __put_user(st.st_size, &target_st->st_size);
9901 __put_user(st.st_blksize, &target_st->st_blksize);
9902 __put_user(st.st_blocks, &target_st->st_blocks);
9903 __put_user(st.st_atime, &target_st->target_st_atime);
9904 __put_user(st.st_mtime, &target_st->target_st_mtime);
9905 __put_user(st.st_ctime, &target_st->target_st_ctime);
9906 unlock_user_struct(target_st, arg2, 1);
9909 break;
9910 #ifdef TARGET_NR_olduname
9911 case TARGET_NR_olduname:
9912 goto unimplemented;
9913 #endif
9914 #ifdef TARGET_NR_iopl
9915 case TARGET_NR_iopl:
9916 goto unimplemented;
9917 #endif
9918 case TARGET_NR_vhangup:
9919 ret = get_errno(vhangup());
9920 break;
9921 #ifdef TARGET_NR_idle
9922 case TARGET_NR_idle:
9923 goto unimplemented;
9924 #endif
9925 #ifdef TARGET_NR_syscall
9926 case TARGET_NR_syscall:
9927 ret = do_syscall(cpu_env, arg1 & 0xffff, arg2, arg3, arg4, arg5,
9928 arg6, arg7, arg8, 0);
9929 break;
9930 #endif
9931 case TARGET_NR_wait4:
9933 int status;
9934 abi_long status_ptr = arg2;
9935 struct rusage rusage, *rusage_ptr;
9936 abi_ulong target_rusage = arg4;
9937 abi_long rusage_err;
9938 if (target_rusage)
9939 rusage_ptr = &rusage;
9940 else
9941 rusage_ptr = NULL;
9942 ret = get_errno(safe_wait4(arg1, &status, arg3, rusage_ptr));
9943 if (!is_error(ret)) {
9944 if (status_ptr && ret) {
9945 status = host_to_target_waitstatus(status);
9946 if (put_user_s32(status, status_ptr))
9947 goto efault;
9949 if (target_rusage) {
9950 rusage_err = host_to_target_rusage(target_rusage, &rusage);
9951 if (rusage_err) {
9952 ret = rusage_err;
9957 break;
9958 #ifdef TARGET_NR_swapoff
9959 case TARGET_NR_swapoff:
9960 if (!(p = lock_user_string(arg1)))
9961 goto efault;
9962 ret = get_errno(swapoff(p));
9963 unlock_user(p, arg1, 0);
9964 break;
9965 #endif
9966 case TARGET_NR_sysinfo:
9968 struct target_sysinfo *target_value;
9969 struct sysinfo value;
9970 ret = get_errno(sysinfo(&value));
9971 if (!is_error(ret) && arg1)
9973 if (!lock_user_struct(VERIFY_WRITE, target_value, arg1, 0))
9974 goto efault;
9975 __put_user(value.uptime, &target_value->uptime);
9976 __put_user(value.loads[0], &target_value->loads[0]);
9977 __put_user(value.loads[1], &target_value->loads[1]);
9978 __put_user(value.loads[2], &target_value->loads[2]);
9979 __put_user(value.totalram, &target_value->totalram);
9980 __put_user(value.freeram, &target_value->freeram);
9981 __put_user(value.sharedram, &target_value->sharedram);
9982 __put_user(value.bufferram, &target_value->bufferram);
9983 __put_user(value.totalswap, &target_value->totalswap);
9984 __put_user(value.freeswap, &target_value->freeswap);
9985 __put_user(value.procs, &target_value->procs);
9986 __put_user(value.totalhigh, &target_value->totalhigh);
9987 __put_user(value.freehigh, &target_value->freehigh);
9988 __put_user(value.mem_unit, &target_value->mem_unit);
9989 unlock_user_struct(target_value, arg1, 1);
9992 break;
9993 #ifdef TARGET_NR_ipc
9994 case TARGET_NR_ipc:
9995 ret = do_ipc(cpu_env, arg1, arg2, arg3, arg4, arg5, arg6);
9996 break;
9997 #endif
9998 #ifdef TARGET_NR_semget
9999 case TARGET_NR_semget:
10000 ret = get_errno(semget(arg1, arg2, arg3));
10001 break;
10002 #endif
10003 #ifdef TARGET_NR_semop
10004 case TARGET_NR_semop:
10005 ret = do_semop(arg1, arg2, arg3);
10006 break;
10007 #endif
10008 #ifdef TARGET_NR_semctl
10009 case TARGET_NR_semctl:
10010 ret = do_semctl(arg1, arg2, arg3, arg4);
10011 break;
10012 #endif
10013 #ifdef TARGET_NR_msgctl
10014 case TARGET_NR_msgctl:
10015 ret = do_msgctl(arg1, arg2, arg3);
10016 break;
10017 #endif
10018 #ifdef TARGET_NR_msgget
10019 case TARGET_NR_msgget:
10020 ret = get_errno(msgget(arg1, arg2));
10021 break;
10022 #endif
10023 #ifdef TARGET_NR_msgrcv
10024 case TARGET_NR_msgrcv:
10025 ret = do_msgrcv(arg1, arg2, arg3, arg4, arg5);
10026 break;
10027 #endif
10028 #ifdef TARGET_NR_msgsnd
10029 case TARGET_NR_msgsnd:
10030 ret = do_msgsnd(arg1, arg2, arg3, arg4);
10031 break;
10032 #endif
10033 #ifdef TARGET_NR_shmget
10034 case TARGET_NR_shmget:
10035 ret = get_errno(shmget(arg1, arg2, arg3));
10036 break;
10037 #endif
10038 #ifdef TARGET_NR_shmctl
10039 case TARGET_NR_shmctl:
10040 ret = do_shmctl(arg1, arg2, arg3);
10041 break;
10042 #endif
10043 #ifdef TARGET_NR_shmat
10044 case TARGET_NR_shmat:
10045 ret = do_shmat(cpu_env, arg1, arg2, arg3);
10046 break;
10047 #endif
10048 #ifdef TARGET_NR_shmdt
10049 case TARGET_NR_shmdt:
10050 ret = do_shmdt(arg1);
10051 break;
10052 #endif
10053 case TARGET_NR_fsync:
10054 ret = get_errno(fsync(arg1));
10055 break;
10056 case TARGET_NR_clone:
10057 /* Linux manages to have three different orderings for its
10058 * arguments to clone(); the BACKWARDS and BACKWARDS2 defines
10059 * match the kernel's CONFIG_CLONE_* settings.
10060 * Microblaze is further special in that it uses a sixth
10061 * implicit argument to clone for the TLS pointer.
10063 #if defined(TARGET_MICROBLAZE)
10064 ret = get_errno(do_fork(cpu_env, arg1, arg2, arg4, arg6, arg5));
10065 #elif defined(TARGET_CLONE_BACKWARDS)
10066 ret = get_errno(do_fork(cpu_env, arg1, arg2, arg3, arg4, arg5));
10067 #elif defined(TARGET_CLONE_BACKWARDS2)
10068 ret = get_errno(do_fork(cpu_env, arg2, arg1, arg3, arg5, arg4));
10069 #else
10070 ret = get_errno(do_fork(cpu_env, arg1, arg2, arg3, arg5, arg4));
10071 #endif
10072 break;
10073 #ifdef __NR_exit_group
10074 /* new thread calls */
10075 case TARGET_NR_exit_group:
10076 #ifdef TARGET_GPROF
10077 _mcleanup();
10078 #endif
10079 gdb_exit(cpu_env, arg1);
10080 ret = get_errno(exit_group(arg1));
10081 break;
10082 #endif
10083 case TARGET_NR_setdomainname:
10084 if (!(p = lock_user_string(arg1)))
10085 goto efault;
10086 ret = get_errno(setdomainname(p, arg2));
10087 unlock_user(p, arg1, 0);
10088 break;
10089 case TARGET_NR_uname:
10090 /* no need to transcode because we use the linux syscall */
10092 struct new_utsname * buf;
10094 if (!lock_user_struct(VERIFY_WRITE, buf, arg1, 0))
10095 goto efault;
10096 ret = get_errno(sys_uname(buf));
10097 if (!is_error(ret)) {
10098 /* Overwrite the native machine name with whatever is being
10099 emulated. */
10100 strcpy (buf->machine, cpu_to_uname_machine(cpu_env));
10101 /* Allow the user to override the reported release. */
10102 if (qemu_uname_release && *qemu_uname_release) {
10103 g_strlcpy(buf->release, qemu_uname_release,
10104 sizeof(buf->release));
10107 unlock_user_struct(buf, arg1, 1);
10109 break;
10110 #ifdef TARGET_I386
10111 case TARGET_NR_modify_ldt:
10112 ret = do_modify_ldt(cpu_env, arg1, arg2, arg3);
10113 break;
10114 #if !defined(TARGET_X86_64)
10115 case TARGET_NR_vm86old:
10116 goto unimplemented;
10117 case TARGET_NR_vm86:
10118 ret = do_vm86(cpu_env, arg1, arg2);
10119 break;
10120 #endif
10121 #endif
10122 case TARGET_NR_adjtimex:
10124 struct timex host_buf;
10126 if (target_to_host_timex(&host_buf, arg1) != 0) {
10127 goto efault;
10129 ret = get_errno(adjtimex(&host_buf));
10130 if (!is_error(ret)) {
10131 if (host_to_target_timex(arg1, &host_buf) != 0) {
10132 goto efault;
10136 break;
10137 #if defined(TARGET_NR_clock_adjtime) && defined(CONFIG_CLOCK_ADJTIME)
10138 case TARGET_NR_clock_adjtime:
10140 struct timex htx, *phtx = &htx;
10142 if (target_to_host_timex(phtx, arg2) != 0) {
10143 goto efault;
10145 ret = get_errno(clock_adjtime(arg1, phtx));
10146 if (!is_error(ret) && phtx) {
10147 if (host_to_target_timex(arg2, phtx) != 0) {
10148 goto efault;
10152 break;
10153 #endif
10154 #ifdef TARGET_NR_create_module
10155 case TARGET_NR_create_module:
10156 #endif
10157 case TARGET_NR_init_module:
10158 case TARGET_NR_delete_module:
10159 #ifdef TARGET_NR_get_kernel_syms
10160 case TARGET_NR_get_kernel_syms:
10161 #endif
10162 goto unimplemented;
10163 case TARGET_NR_quotactl:
10164 goto unimplemented;
10165 case TARGET_NR_getpgid:
10166 ret = get_errno(getpgid(arg1));
10167 break;
10168 case TARGET_NR_fchdir:
10169 ret = get_errno(fchdir(arg1));
10170 break;
10171 #ifdef TARGET_NR_bdflush /* not on x86_64 */
10172 case TARGET_NR_bdflush:
10173 goto unimplemented;
10174 #endif
10175 #ifdef TARGET_NR_sysfs
10176 case TARGET_NR_sysfs:
10177 goto unimplemented;
10178 #endif
10179 case TARGET_NR_personality:
10180 ret = get_errno(personality(arg1));
10181 break;
10182 #ifdef TARGET_NR_afs_syscall
10183 case TARGET_NR_afs_syscall:
10184 goto unimplemented;
10185 #endif
10186 #ifdef TARGET_NR__llseek /* Not on alpha */
10187 case TARGET_NR__llseek:
10189 int64_t res;
10190 #if !defined(__NR_llseek)
10191 res = lseek(arg1, ((uint64_t)arg2 << 32) | (abi_ulong)arg3, arg5);
10192 if (res == -1) {
10193 ret = get_errno(res);
10194 } else {
10195 ret = 0;
10197 #else
10198 ret = get_errno(_llseek(arg1, arg2, arg3, &res, arg5));
10199 #endif
10200 if ((ret == 0) && put_user_s64(res, arg4)) {
10201 goto efault;
10204 break;
10205 #endif
10206 #ifdef TARGET_NR_getdents
10207 case TARGET_NR_getdents:
10208 #ifdef EMULATE_GETDENTS_WITH_GETDENTS
10209 #if TARGET_ABI_BITS == 32 && HOST_LONG_BITS == 64
10211 struct target_dirent *target_dirp;
10212 struct linux_dirent *dirp;
10213 abi_long count = arg3;
10215 dirp = g_try_malloc(count);
10216 if (!dirp) {
10217 ret = -TARGET_ENOMEM;
10218 goto fail;
10221 ret = get_errno(sys_getdents(arg1, dirp, count));
10222 if (!is_error(ret)) {
10223 struct linux_dirent *de;
10224 struct target_dirent *tde;
10225 int len = ret;
10226 int reclen, treclen;
10227 int count1, tnamelen;
10229 count1 = 0;
10230 de = dirp;
10231 if (!(target_dirp = lock_user(VERIFY_WRITE, arg2, count, 0)))
10232 goto efault;
10233 tde = target_dirp;
10234 while (len > 0) {
10235 reclen = de->d_reclen;
10236 tnamelen = reclen - offsetof(struct linux_dirent, d_name);
10237 assert(tnamelen >= 0);
10238 treclen = tnamelen + offsetof(struct target_dirent, d_name);
10239 assert(count1 + treclen <= count);
10240 tde->d_reclen = tswap16(treclen);
10241 tde->d_ino = tswapal(de->d_ino);
10242 tde->d_off = tswapal(de->d_off);
10243 memcpy(tde->d_name, de->d_name, tnamelen);
10244 de = (struct linux_dirent *)((char *)de + reclen);
10245 len -= reclen;
10246 tde = (struct target_dirent *)((char *)tde + treclen);
10247 count1 += treclen;
10249 ret = count1;
10250 unlock_user(target_dirp, arg2, ret);
10252 g_free(dirp);
10254 #else
10256 struct linux_dirent *dirp;
10257 abi_long count = arg3;
10259 if (!(dirp = lock_user(VERIFY_WRITE, arg2, count, 0)))
10260 goto efault;
10261 ret = get_errno(sys_getdents(arg1, dirp, count));
10262 if (!is_error(ret)) {
10263 struct linux_dirent *de;
10264 int len = ret;
10265 int reclen;
10266 de = dirp;
10267 while (len > 0) {
10268 reclen = de->d_reclen;
10269 if (reclen > len)
10270 break;
10271 de->d_reclen = tswap16(reclen);
10272 tswapls(&de->d_ino);
10273 tswapls(&de->d_off);
10274 de = (struct linux_dirent *)((char *)de + reclen);
10275 len -= reclen;
10278 unlock_user(dirp, arg2, ret);
10280 #endif
10281 #else
10282 /* Implement getdents in terms of getdents64 */
10284 struct linux_dirent64 *dirp;
10285 abi_long count = arg3;
10287 dirp = lock_user(VERIFY_WRITE, arg2, count, 0);
10288 if (!dirp) {
10289 goto efault;
10291 ret = get_errno(sys_getdents64(arg1, dirp, count));
10292 if (!is_error(ret)) {
10293 /* Convert the dirent64 structs to target dirent. We do this
10294 * in-place, since we can guarantee that a target_dirent is no
10295 * larger than a dirent64; however this means we have to be
10296 * careful to read everything before writing in the new format.
10298 struct linux_dirent64 *de;
10299 struct target_dirent *tde;
10300 int len = ret;
10301 int tlen = 0;
10303 de = dirp;
10304 tde = (struct target_dirent *)dirp;
10305 while (len > 0) {
10306 int namelen, treclen;
10307 int reclen = de->d_reclen;
10308 uint64_t ino = de->d_ino;
10309 int64_t off = de->d_off;
10310 uint8_t type = de->d_type;
10312 namelen = strlen(de->d_name);
10313 treclen = offsetof(struct target_dirent, d_name)
10314 + namelen + 2;
10315 treclen = QEMU_ALIGN_UP(treclen, sizeof(abi_long));
10317 memmove(tde->d_name, de->d_name, namelen + 1);
10318 tde->d_ino = tswapal(ino);
10319 tde->d_off = tswapal(off);
10320 tde->d_reclen = tswap16(treclen);
10321 /* The target_dirent type is in what was formerly a padding
10322 * byte at the end of the structure:
10324 *(((char *)tde) + treclen - 1) = type;
10326 de = (struct linux_dirent64 *)((char *)de + reclen);
10327 tde = (struct target_dirent *)((char *)tde + treclen);
10328 len -= reclen;
10329 tlen += treclen;
10331 ret = tlen;
10333 unlock_user(dirp, arg2, ret);
10335 #endif
10336 break;
10337 #endif /* TARGET_NR_getdents */
10338 #if defined(TARGET_NR_getdents64) && defined(__NR_getdents64)
10339 case TARGET_NR_getdents64:
10341 struct linux_dirent64 *dirp;
10342 abi_long count = arg3;
10343 if (!(dirp = lock_user(VERIFY_WRITE, arg2, count, 0)))
10344 goto efault;
10345 ret = get_errno(sys_getdents64(arg1, dirp, count));
10346 if (!is_error(ret)) {
10347 struct linux_dirent64 *de;
10348 int len = ret;
10349 int reclen;
10350 de = dirp;
10351 while (len > 0) {
10352 reclen = de->d_reclen;
10353 if (reclen > len)
10354 break;
10355 de->d_reclen = tswap16(reclen);
10356 tswap64s((uint64_t *)&de->d_ino);
10357 tswap64s((uint64_t *)&de->d_off);
10358 de = (struct linux_dirent64 *)((char *)de + reclen);
10359 len -= reclen;
10362 unlock_user(dirp, arg2, ret);
10364 break;
10365 #endif /* TARGET_NR_getdents64 */
10366 #if defined(TARGET_NR__newselect)
10367 case TARGET_NR__newselect:
10368 ret = do_select(arg1, arg2, arg3, arg4, arg5);
10369 break;
10370 #endif
10371 #if defined(TARGET_NR_poll) || defined(TARGET_NR_ppoll)
10372 # ifdef TARGET_NR_poll
10373 case TARGET_NR_poll:
10374 # endif
10375 # ifdef TARGET_NR_ppoll
10376 case TARGET_NR_ppoll:
10377 # endif
10379 struct target_pollfd *target_pfd;
10380 unsigned int nfds = arg2;
10381 struct pollfd *pfd;
10382 unsigned int i;
10384 pfd = NULL;
10385 target_pfd = NULL;
10386 if (nfds) {
10387 if (nfds > (INT_MAX / sizeof(struct target_pollfd))) {
10388 ret = -TARGET_EINVAL;
10389 break;
10392 target_pfd = lock_user(VERIFY_WRITE, arg1,
10393 sizeof(struct target_pollfd) * nfds, 1);
10394 if (!target_pfd) {
10395 goto efault;
10398 pfd = alloca(sizeof(struct pollfd) * nfds);
10399 for (i = 0; i < nfds; i++) {
10400 pfd[i].fd = tswap32(target_pfd[i].fd);
10401 pfd[i].events = tswap16(target_pfd[i].events);
10405 switch (num) {
10406 # ifdef TARGET_NR_ppoll
10407 case TARGET_NR_ppoll:
10409 struct timespec _timeout_ts, *timeout_ts = &_timeout_ts;
10410 target_sigset_t *target_set;
10411 sigset_t _set, *set = &_set;
10413 if (arg3) {
10414 if (target_to_host_timespec(timeout_ts, arg3)) {
10415 unlock_user(target_pfd, arg1, 0);
10416 goto efault;
10418 } else {
10419 timeout_ts = NULL;
10422 if (arg4) {
10423 if (arg5 != sizeof(target_sigset_t)) {
10424 unlock_user(target_pfd, arg1, 0);
10425 ret = -TARGET_EINVAL;
10426 break;
10429 target_set = lock_user(VERIFY_READ, arg4, sizeof(target_sigset_t), 1);
10430 if (!target_set) {
10431 unlock_user(target_pfd, arg1, 0);
10432 goto efault;
10434 target_to_host_sigset(set, target_set);
10435 } else {
10436 set = NULL;
10439 ret = get_errno(safe_ppoll(pfd, nfds, timeout_ts,
10440 set, SIGSET_T_SIZE));
10442 if (!is_error(ret) && arg3) {
10443 host_to_target_timespec(arg3, timeout_ts);
10445 if (arg4) {
10446 unlock_user(target_set, arg4, 0);
10448 break;
10450 # endif
10451 # ifdef TARGET_NR_poll
10452 case TARGET_NR_poll:
10454 struct timespec ts, *pts;
10456 if (arg3 >= 0) {
10457 /* Convert ms to secs, ns */
10458 ts.tv_sec = arg3 / 1000;
10459 ts.tv_nsec = (arg3 % 1000) * 1000000LL;
10460 pts = &ts;
10461 } else {
10462 /* -ve poll() timeout means "infinite" */
10463 pts = NULL;
10465 ret = get_errno(safe_ppoll(pfd, nfds, pts, NULL, 0));
10466 break;
10468 # endif
10469 default:
10470 g_assert_not_reached();
10473 if (!is_error(ret)) {
10474 for(i = 0; i < nfds; i++) {
10475 target_pfd[i].revents = tswap16(pfd[i].revents);
10478 unlock_user(target_pfd, arg1, sizeof(struct target_pollfd) * nfds);
10480 break;
10481 #endif
10482 case TARGET_NR_flock:
10483 /* NOTE: the flock constant seems to be the same for every
10484 Linux platform */
10485 ret = get_errno(safe_flock(arg1, arg2));
10486 break;
10487 case TARGET_NR_readv:
10489 struct iovec *vec = lock_iovec(VERIFY_WRITE, arg2, arg3, 0);
10490 if (vec != NULL) {
10491 ret = get_errno(safe_readv(arg1, vec, arg3));
10492 unlock_iovec(vec, arg2, arg3, 1);
10493 } else {
10494 ret = -host_to_target_errno(errno);
10497 break;
10498 case TARGET_NR_writev:
10500 struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1);
10501 if (vec != NULL) {
10502 ret = get_errno(safe_writev(arg1, vec, arg3));
10503 unlock_iovec(vec, arg2, arg3, 0);
10504 } else {
10505 ret = -host_to_target_errno(errno);
10508 break;
10509 #if defined(TARGET_NR_preadv)
10510 case TARGET_NR_preadv:
10512 struct iovec *vec = lock_iovec(VERIFY_WRITE, arg2, arg3, 0);
10513 if (vec != NULL) {
10514 unsigned long low, high;
10516 target_to_host_low_high(arg4, arg5, &low, &high);
10517 ret = get_errno(safe_preadv(arg1, vec, arg3, low, high));
10518 unlock_iovec(vec, arg2, arg3, 1);
10519 } else {
10520 ret = -host_to_target_errno(errno);
10523 break;
10524 #endif
10525 #if defined(TARGET_NR_pwritev)
10526 case TARGET_NR_pwritev:
10528 struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1);
10529 if (vec != NULL) {
10530 unsigned long low, high;
10532 target_to_host_low_high(arg4, arg5, &low, &high);
10533 ret = get_errno(safe_pwritev(arg1, vec, arg3, low, high));
10534 unlock_iovec(vec, arg2, arg3, 0);
10535 } else {
10536 ret = -host_to_target_errno(errno);
10539 break;
10540 #endif
10541 case TARGET_NR_getsid:
10542 ret = get_errno(getsid(arg1));
10543 break;
10544 #if defined(TARGET_NR_fdatasync) /* Not on alpha (osf_datasync ?) */
10545 case TARGET_NR_fdatasync:
10546 ret = get_errno(fdatasync(arg1));
10547 break;
10548 #endif
10549 #ifdef TARGET_NR__sysctl
10550 case TARGET_NR__sysctl:
10551 /* We don't implement this, but ENOTDIR is always a safe
10552 return value. */
10553 ret = -TARGET_ENOTDIR;
10554 break;
10555 #endif
10556 case TARGET_NR_sched_getaffinity:
10558 unsigned int mask_size;
10559 unsigned long *mask;
10562 * sched_getaffinity needs multiples of ulong, so need to take
10563 * care of mismatches between target ulong and host ulong sizes.
10565 if (arg2 & (sizeof(abi_ulong) - 1)) {
10566 ret = -TARGET_EINVAL;
10567 break;
10569 mask_size = (arg2 + (sizeof(*mask) - 1)) & ~(sizeof(*mask) - 1);
10571 mask = alloca(mask_size);
10572 memset(mask, 0, mask_size);
10573 ret = get_errno(sys_sched_getaffinity(arg1, mask_size, mask));
10575 if (!is_error(ret)) {
10576 if (ret > arg2) {
10577 /* More data returned than the caller's buffer will fit.
10578 * This only happens if sizeof(abi_long) < sizeof(long)
10579 * and the caller passed us a buffer holding an odd number
10580 * of abi_longs. If the host kernel is actually using the
10581 * extra 4 bytes then fail EINVAL; otherwise we can just
10582 * ignore them and only copy the interesting part.
10584 int numcpus = sysconf(_SC_NPROCESSORS_CONF);
10585 if (numcpus > arg2 * 8) {
10586 ret = -TARGET_EINVAL;
10587 break;
10589 ret = arg2;
10592 if (host_to_target_cpu_mask(mask, mask_size, arg3, ret)) {
10593 goto efault;
10597 break;
10598 case TARGET_NR_sched_setaffinity:
10600 unsigned int mask_size;
10601 unsigned long *mask;
10604 * sched_setaffinity needs multiples of ulong, so need to take
10605 * care of mismatches between target ulong and host ulong sizes.
10607 if (arg2 & (sizeof(abi_ulong) - 1)) {
10608 ret = -TARGET_EINVAL;
10609 break;
10611 mask_size = (arg2 + (sizeof(*mask) - 1)) & ~(sizeof(*mask) - 1);
10612 mask = alloca(mask_size);
10614 ret = target_to_host_cpu_mask(mask, mask_size, arg3, arg2);
10615 if (ret) {
10616 break;
10619 ret = get_errno(sys_sched_setaffinity(arg1, mask_size, mask));
10621 break;
10622 case TARGET_NR_getcpu:
10624 unsigned cpu, node;
10625 ret = get_errno(sys_getcpu(arg1 ? &cpu : NULL,
10626 arg2 ? &node : NULL,
10627 NULL));
10628 if (is_error(ret)) {
10629 goto fail;
10631 if (arg1 && put_user_u32(cpu, arg1)) {
10632 goto efault;
10634 if (arg2 && put_user_u32(node, arg2)) {
10635 goto efault;
10638 break;
10639 case TARGET_NR_sched_setparam:
10641 struct sched_param *target_schp;
10642 struct sched_param schp;
10644 if (arg2 == 0) {
10645 return -TARGET_EINVAL;
10647 if (!lock_user_struct(VERIFY_READ, target_schp, arg2, 1))
10648 goto efault;
10649 schp.sched_priority = tswap32(target_schp->sched_priority);
10650 unlock_user_struct(target_schp, arg2, 0);
10651 ret = get_errno(sched_setparam(arg1, &schp));
10653 break;
10654 case TARGET_NR_sched_getparam:
10656 struct sched_param *target_schp;
10657 struct sched_param schp;
10659 if (arg2 == 0) {
10660 return -TARGET_EINVAL;
10662 ret = get_errno(sched_getparam(arg1, &schp));
10663 if (!is_error(ret)) {
10664 if (!lock_user_struct(VERIFY_WRITE, target_schp, arg2, 0))
10665 goto efault;
10666 target_schp->sched_priority = tswap32(schp.sched_priority);
10667 unlock_user_struct(target_schp, arg2, 1);
10670 break;
10671 case TARGET_NR_sched_setscheduler:
10673 struct sched_param *target_schp;
10674 struct sched_param schp;
10675 if (arg3 == 0) {
10676 return -TARGET_EINVAL;
10678 if (!lock_user_struct(VERIFY_READ, target_schp, arg3, 1))
10679 goto efault;
10680 schp.sched_priority = tswap32(target_schp->sched_priority);
10681 unlock_user_struct(target_schp, arg3, 0);
10682 ret = get_errno(sched_setscheduler(arg1, arg2, &schp));
10684 break;
10685 case TARGET_NR_sched_getscheduler:
10686 ret = get_errno(sched_getscheduler(arg1));
10687 break;
10688 case TARGET_NR_sched_yield:
10689 ret = get_errno(sched_yield());
10690 break;
10691 case TARGET_NR_sched_get_priority_max:
10692 ret = get_errno(sched_get_priority_max(arg1));
10693 break;
10694 case TARGET_NR_sched_get_priority_min:
10695 ret = get_errno(sched_get_priority_min(arg1));
10696 break;
10697 case TARGET_NR_sched_rr_get_interval:
10699 struct timespec ts;
10700 ret = get_errno(sched_rr_get_interval(arg1, &ts));
10701 if (!is_error(ret)) {
10702 ret = host_to_target_timespec(arg2, &ts);
10705 break;
10706 case TARGET_NR_nanosleep:
10708 struct timespec req, rem;
10709 target_to_host_timespec(&req, arg1);
10710 ret = get_errno(safe_nanosleep(&req, &rem));
10711 if (is_error(ret) && arg2) {
10712 host_to_target_timespec(arg2, &rem);
10715 break;
10716 #ifdef TARGET_NR_query_module
10717 case TARGET_NR_query_module:
10718 goto unimplemented;
10719 #endif
10720 #ifdef TARGET_NR_nfsservctl
10721 case TARGET_NR_nfsservctl:
10722 goto unimplemented;
10723 #endif
10724 case TARGET_NR_prctl:
10725 switch (arg1) {
10726 case PR_GET_PDEATHSIG:
10728 int deathsig;
10729 ret = get_errno(prctl(arg1, &deathsig, arg3, arg4, arg5));
10730 if (!is_error(ret) && arg2
10731 && put_user_ual(deathsig, arg2)) {
10732 goto efault;
10734 break;
10736 #ifdef PR_GET_NAME
10737 case PR_GET_NAME:
10739 void *name = lock_user(VERIFY_WRITE, arg2, 16, 1);
10740 if (!name) {
10741 goto efault;
10743 ret = get_errno(prctl(arg1, (unsigned long)name,
10744 arg3, arg4, arg5));
10745 unlock_user(name, arg2, 16);
10746 break;
10748 case PR_SET_NAME:
10750 void *name = lock_user(VERIFY_READ, arg2, 16, 1);
10751 if (!name) {
10752 goto efault;
10754 ret = get_errno(prctl(arg1, (unsigned long)name,
10755 arg3, arg4, arg5));
10756 unlock_user(name, arg2, 0);
10757 break;
10759 #endif
10760 #ifdef TARGET_AARCH64
10761 case TARGET_PR_SVE_SET_VL:
10762 /* We cannot support either PR_SVE_SET_VL_ONEXEC
10763 or PR_SVE_VL_INHERIT. Therefore, anything above
10764 ARM_MAX_VQ results in EINVAL. */
10765 ret = -TARGET_EINVAL;
10766 if (arm_feature(cpu_env, ARM_FEATURE_SVE)
10767 && arg2 >= 0 && arg2 <= ARM_MAX_VQ * 16 && !(arg2 & 15)) {
10768 CPUARMState *env = cpu_env;
10769 int old_vq = (env->vfp.zcr_el[1] & 0xf) + 1;
10770 int vq = MAX(arg2 / 16, 1);
10772 if (vq < old_vq) {
10773 aarch64_sve_narrow_vq(env, vq);
10775 env->vfp.zcr_el[1] = vq - 1;
10776 ret = vq * 16;
10778 break;
10779 case TARGET_PR_SVE_GET_VL:
10780 ret = -TARGET_EINVAL;
10781 if (arm_feature(cpu_env, ARM_FEATURE_SVE)) {
10782 CPUARMState *env = cpu_env;
10783 ret = ((env->vfp.zcr_el[1] & 0xf) + 1) * 16;
10785 break;
10786 #endif /* AARCH64 */
10787 case PR_GET_SECCOMP:
10788 case PR_SET_SECCOMP:
10789 /* Disable seccomp to prevent the target disabling syscalls we
10790 * need. */
10791 ret = -TARGET_EINVAL;
10792 break;
10793 default:
10794 /* Most prctl options have no pointer arguments */
10795 ret = get_errno(prctl(arg1, arg2, arg3, arg4, arg5));
10796 break;
10798 break;
10799 #ifdef TARGET_NR_arch_prctl
10800 case TARGET_NR_arch_prctl:
10801 #if defined(TARGET_I386) && !defined(TARGET_ABI32)
10802 ret = do_arch_prctl(cpu_env, arg1, arg2);
10803 break;
10804 #else
10805 goto unimplemented;
10806 #endif
10807 #endif
10808 #ifdef TARGET_NR_pread64
10809 case TARGET_NR_pread64:
10810 if (regpairs_aligned(cpu_env, num)) {
10811 arg4 = arg5;
10812 arg5 = arg6;
10814 if (!(p = lock_user(VERIFY_WRITE, arg2, arg3, 0)))
10815 goto efault;
10816 ret = get_errno(pread64(arg1, p, arg3, target_offset64(arg4, arg5)));
10817 unlock_user(p, arg2, ret);
10818 break;
10819 case TARGET_NR_pwrite64:
10820 if (regpairs_aligned(cpu_env, num)) {
10821 arg4 = arg5;
10822 arg5 = arg6;
10824 if (!(p = lock_user(VERIFY_READ, arg2, arg3, 1)))
10825 goto efault;
10826 ret = get_errno(pwrite64(arg1, p, arg3, target_offset64(arg4, arg5)));
10827 unlock_user(p, arg2, 0);
10828 break;
10829 #endif
10830 case TARGET_NR_getcwd:
10831 if (!(p = lock_user(VERIFY_WRITE, arg1, arg2, 0)))
10832 goto efault;
10833 ret = get_errno(sys_getcwd1(p, arg2));
10834 unlock_user(p, arg1, ret);
10835 break;
10836 case TARGET_NR_capget:
10837 case TARGET_NR_capset:
10839 struct target_user_cap_header *target_header;
10840 struct target_user_cap_data *target_data = NULL;
10841 struct __user_cap_header_struct header;
10842 struct __user_cap_data_struct data[2];
10843 struct __user_cap_data_struct *dataptr = NULL;
10844 int i, target_datalen;
10845 int data_items = 1;
10847 if (!lock_user_struct(VERIFY_WRITE, target_header, arg1, 1)) {
10848 goto efault;
10850 header.version = tswap32(target_header->version);
10851 header.pid = tswap32(target_header->pid);
10853 if (header.version != _LINUX_CAPABILITY_VERSION) {
10854 /* Version 2 and up takes pointer to two user_data structs */
10855 data_items = 2;
10858 target_datalen = sizeof(*target_data) * data_items;
10860 if (arg2) {
10861 if (num == TARGET_NR_capget) {
10862 target_data = lock_user(VERIFY_WRITE, arg2, target_datalen, 0);
10863 } else {
10864 target_data = lock_user(VERIFY_READ, arg2, target_datalen, 1);
10866 if (!target_data) {
10867 unlock_user_struct(target_header, arg1, 0);
10868 goto efault;
10871 if (num == TARGET_NR_capset) {
10872 for (i = 0; i < data_items; i++) {
10873 data[i].effective = tswap32(target_data[i].effective);
10874 data[i].permitted = tswap32(target_data[i].permitted);
10875 data[i].inheritable = tswap32(target_data[i].inheritable);
10879 dataptr = data;
10882 if (num == TARGET_NR_capget) {
10883 ret = get_errno(capget(&header, dataptr));
10884 } else {
10885 ret = get_errno(capset(&header, dataptr));
10888 /* The kernel always updates version for both capget and capset */
10889 target_header->version = tswap32(header.version);
10890 unlock_user_struct(target_header, arg1, 1);
10892 if (arg2) {
10893 if (num == TARGET_NR_capget) {
10894 for (i = 0; i < data_items; i++) {
10895 target_data[i].effective = tswap32(data[i].effective);
10896 target_data[i].permitted = tswap32(data[i].permitted);
10897 target_data[i].inheritable = tswap32(data[i].inheritable);
10899 unlock_user(target_data, arg2, target_datalen);
10900 } else {
10901 unlock_user(target_data, arg2, 0);
10904 break;
10906 case TARGET_NR_sigaltstack:
10907 ret = do_sigaltstack(arg1, arg2, get_sp_from_cpustate((CPUArchState *)cpu_env));
10908 break;
10910 #ifdef CONFIG_SENDFILE
10911 case TARGET_NR_sendfile:
10913 off_t *offp = NULL;
10914 off_t off;
10915 if (arg3) {
10916 ret = get_user_sal(off, arg3);
10917 if (is_error(ret)) {
10918 break;
10920 offp = &off;
10922 ret = get_errno(sendfile(arg1, arg2, offp, arg4));
10923 if (!is_error(ret) && arg3) {
10924 abi_long ret2 = put_user_sal(off, arg3);
10925 if (is_error(ret2)) {
10926 ret = ret2;
10929 break;
10931 #ifdef TARGET_NR_sendfile64
10932 case TARGET_NR_sendfile64:
10934 off_t *offp = NULL;
10935 off_t off;
10936 if (arg3) {
10937 ret = get_user_s64(off, arg3);
10938 if (is_error(ret)) {
10939 break;
10941 offp = &off;
10943 ret = get_errno(sendfile(arg1, arg2, offp, arg4));
10944 if (!is_error(ret) && arg3) {
10945 abi_long ret2 = put_user_s64(off, arg3);
10946 if (is_error(ret2)) {
10947 ret = ret2;
10950 break;
10952 #endif
10953 #else
10954 case TARGET_NR_sendfile:
10955 #ifdef TARGET_NR_sendfile64
10956 case TARGET_NR_sendfile64:
10957 #endif
10958 goto unimplemented;
10959 #endif
10961 #ifdef TARGET_NR_getpmsg
10962 case TARGET_NR_getpmsg:
10963 goto unimplemented;
10964 #endif
10965 #ifdef TARGET_NR_putpmsg
10966 case TARGET_NR_putpmsg:
10967 goto unimplemented;
10968 #endif
10969 #ifdef TARGET_NR_vfork
10970 case TARGET_NR_vfork:
10971 ret = get_errno(do_fork(cpu_env,
10972 CLONE_VFORK | CLONE_VM | TARGET_SIGCHLD,
10973 0, 0, 0, 0));
10974 break;
10975 #endif
10976 #ifdef TARGET_NR_ugetrlimit
10977 case TARGET_NR_ugetrlimit:
10979 struct rlimit rlim;
10980 int resource = target_to_host_resource(arg1);
10981 ret = get_errno(getrlimit(resource, &rlim));
10982 if (!is_error(ret)) {
10983 struct target_rlimit *target_rlim;
10984 if (!lock_user_struct(VERIFY_WRITE, target_rlim, arg2, 0))
10985 goto efault;
10986 target_rlim->rlim_cur = host_to_target_rlim(rlim.rlim_cur);
10987 target_rlim->rlim_max = host_to_target_rlim(rlim.rlim_max);
10988 unlock_user_struct(target_rlim, arg2, 1);
10990 break;
10992 #endif
10993 #ifdef TARGET_NR_truncate64
10994 case TARGET_NR_truncate64:
10995 if (!(p = lock_user_string(arg1)))
10996 goto efault;
10997 ret = target_truncate64(cpu_env, p, arg2, arg3, arg4);
10998 unlock_user(p, arg1, 0);
10999 break;
11000 #endif
11001 #ifdef TARGET_NR_ftruncate64
11002 case TARGET_NR_ftruncate64:
11003 ret = target_ftruncate64(cpu_env, arg1, arg2, arg3, arg4);
11004 break;
11005 #endif
11006 #ifdef TARGET_NR_stat64
11007 case TARGET_NR_stat64:
11008 if (!(p = lock_user_string(arg1)))
11009 goto efault;
11010 ret = get_errno(stat(path(p), &st));
11011 unlock_user(p, arg1, 0);
11012 if (!is_error(ret))
11013 ret = host_to_target_stat64(cpu_env, arg2, &st);
11014 break;
11015 #endif
11016 #ifdef TARGET_NR_lstat64
11017 case TARGET_NR_lstat64:
11018 if (!(p = lock_user_string(arg1)))
11019 goto efault;
11020 ret = get_errno(lstat(path(p), &st));
11021 unlock_user(p, arg1, 0);
11022 if (!is_error(ret))
11023 ret = host_to_target_stat64(cpu_env, arg2, &st);
11024 break;
11025 #endif
11026 #ifdef TARGET_NR_fstat64
11027 case TARGET_NR_fstat64:
11028 ret = get_errno(fstat(arg1, &st));
11029 if (!is_error(ret))
11030 ret = host_to_target_stat64(cpu_env, arg2, &st);
11031 break;
11032 #endif
11033 #if (defined(TARGET_NR_fstatat64) || defined(TARGET_NR_newfstatat))
11034 #ifdef TARGET_NR_fstatat64
11035 case TARGET_NR_fstatat64:
11036 #endif
11037 #ifdef TARGET_NR_newfstatat
11038 case TARGET_NR_newfstatat:
11039 #endif
11040 if (!(p = lock_user_string(arg2)))
11041 goto efault;
11042 ret = get_errno(fstatat(arg1, path(p), &st, arg4));
11043 if (!is_error(ret))
11044 ret = host_to_target_stat64(cpu_env, arg3, &st);
11045 break;
11046 #endif
11047 #ifdef TARGET_NR_lchown
11048 case TARGET_NR_lchown:
11049 if (!(p = lock_user_string(arg1)))
11050 goto efault;
11051 ret = get_errno(lchown(p, low2highuid(arg2), low2highgid(arg3)));
11052 unlock_user(p, arg1, 0);
11053 break;
11054 #endif
11055 #ifdef TARGET_NR_getuid
11056 case TARGET_NR_getuid:
11057 ret = get_errno(high2lowuid(getuid()));
11058 break;
11059 #endif
11060 #ifdef TARGET_NR_getgid
11061 case TARGET_NR_getgid:
11062 ret = get_errno(high2lowgid(getgid()));
11063 break;
11064 #endif
11065 #ifdef TARGET_NR_geteuid
11066 case TARGET_NR_geteuid:
11067 ret = get_errno(high2lowuid(geteuid()));
11068 break;
11069 #endif
11070 #ifdef TARGET_NR_getegid
11071 case TARGET_NR_getegid:
11072 ret = get_errno(high2lowgid(getegid()));
11073 break;
11074 #endif
11075 case TARGET_NR_setreuid:
11076 ret = get_errno(setreuid(low2highuid(arg1), low2highuid(arg2)));
11077 break;
11078 case TARGET_NR_setregid:
11079 ret = get_errno(setregid(low2highgid(arg1), low2highgid(arg2)));
11080 break;
11081 case TARGET_NR_getgroups:
11083 int gidsetsize = arg1;
11084 target_id *target_grouplist;
11085 gid_t *grouplist;
11086 int i;
11088 grouplist = alloca(gidsetsize * sizeof(gid_t));
11089 ret = get_errno(getgroups(gidsetsize, grouplist));
11090 if (gidsetsize == 0)
11091 break;
11092 if (!is_error(ret)) {
11093 target_grouplist = lock_user(VERIFY_WRITE, arg2, gidsetsize * sizeof(target_id), 0);
11094 if (!target_grouplist)
11095 goto efault;
11096 for(i = 0;i < ret; i++)
11097 target_grouplist[i] = tswapid(high2lowgid(grouplist[i]));
11098 unlock_user(target_grouplist, arg2, gidsetsize * sizeof(target_id));
11101 break;
11102 case TARGET_NR_setgroups:
11104 int gidsetsize = arg1;
11105 target_id *target_grouplist;
11106 gid_t *grouplist = NULL;
11107 int i;
11108 if (gidsetsize) {
11109 grouplist = alloca(gidsetsize * sizeof(gid_t));
11110 target_grouplist = lock_user(VERIFY_READ, arg2, gidsetsize * sizeof(target_id), 1);
11111 if (!target_grouplist) {
11112 ret = -TARGET_EFAULT;
11113 goto fail;
11115 for (i = 0; i < gidsetsize; i++) {
11116 grouplist[i] = low2highgid(tswapid(target_grouplist[i]));
11118 unlock_user(target_grouplist, arg2, 0);
11120 ret = get_errno(setgroups(gidsetsize, grouplist));
11122 break;
11123 case TARGET_NR_fchown:
11124 ret = get_errno(fchown(arg1, low2highuid(arg2), low2highgid(arg3)));
11125 break;
11126 #if defined(TARGET_NR_fchownat)
11127 case TARGET_NR_fchownat:
11128 if (!(p = lock_user_string(arg2)))
11129 goto efault;
11130 ret = get_errno(fchownat(arg1, p, low2highuid(arg3),
11131 low2highgid(arg4), arg5));
11132 unlock_user(p, arg2, 0);
11133 break;
11134 #endif
11135 #ifdef TARGET_NR_setresuid
11136 case TARGET_NR_setresuid:
11137 ret = get_errno(sys_setresuid(low2highuid(arg1),
11138 low2highuid(arg2),
11139 low2highuid(arg3)));
11140 break;
11141 #endif
11142 #ifdef TARGET_NR_getresuid
11143 case TARGET_NR_getresuid:
11145 uid_t ruid, euid, suid;
11146 ret = get_errno(getresuid(&ruid, &euid, &suid));
11147 if (!is_error(ret)) {
11148 if (put_user_id(high2lowuid(ruid), arg1)
11149 || put_user_id(high2lowuid(euid), arg2)
11150 || put_user_id(high2lowuid(suid), arg3))
11151 goto efault;
11154 break;
11155 #endif
11156 #ifdef TARGET_NR_getresgid
11157 case TARGET_NR_setresgid:
11158 ret = get_errno(sys_setresgid(low2highgid(arg1),
11159 low2highgid(arg2),
11160 low2highgid(arg3)));
11161 break;
11162 #endif
11163 #ifdef TARGET_NR_getresgid
11164 case TARGET_NR_getresgid:
11166 gid_t rgid, egid, sgid;
11167 ret = get_errno(getresgid(&rgid, &egid, &sgid));
11168 if (!is_error(ret)) {
11169 if (put_user_id(high2lowgid(rgid), arg1)
11170 || put_user_id(high2lowgid(egid), arg2)
11171 || put_user_id(high2lowgid(sgid), arg3))
11172 goto efault;
11175 break;
11176 #endif
11177 #ifdef TARGET_NR_chown
11178 case TARGET_NR_chown:
11179 if (!(p = lock_user_string(arg1)))
11180 goto efault;
11181 ret = get_errno(chown(p, low2highuid(arg2), low2highgid(arg3)));
11182 unlock_user(p, arg1, 0);
11183 break;
11184 #endif
11185 case TARGET_NR_setuid:
11186 ret = get_errno(sys_setuid(low2highuid(arg1)));
11187 break;
11188 case TARGET_NR_setgid:
11189 ret = get_errno(sys_setgid(low2highgid(arg1)));
11190 break;
11191 case TARGET_NR_setfsuid:
11192 ret = get_errno(setfsuid(arg1));
11193 break;
11194 case TARGET_NR_setfsgid:
11195 ret = get_errno(setfsgid(arg1));
11196 break;
11198 #ifdef TARGET_NR_lchown32
11199 case TARGET_NR_lchown32:
11200 if (!(p = lock_user_string(arg1)))
11201 goto efault;
11202 ret = get_errno(lchown(p, arg2, arg3));
11203 unlock_user(p, arg1, 0);
11204 break;
11205 #endif
11206 #ifdef TARGET_NR_getuid32
11207 case TARGET_NR_getuid32:
11208 ret = get_errno(getuid());
11209 break;
11210 #endif
11212 #if defined(TARGET_NR_getxuid) && defined(TARGET_ALPHA)
11213 /* Alpha specific */
11214 case TARGET_NR_getxuid:
11216 uid_t euid;
11217 euid=geteuid();
11218 ((CPUAlphaState *)cpu_env)->ir[IR_A4]=euid;
11220 ret = get_errno(getuid());
11221 break;
11222 #endif
11223 #if defined(TARGET_NR_getxgid) && defined(TARGET_ALPHA)
11224 /* Alpha specific */
11225 case TARGET_NR_getxgid:
11227 uid_t egid;
11228 egid=getegid();
11229 ((CPUAlphaState *)cpu_env)->ir[IR_A4]=egid;
11231 ret = get_errno(getgid());
11232 break;
11233 #endif
11234 #if defined(TARGET_NR_osf_getsysinfo) && defined(TARGET_ALPHA)
11235 /* Alpha specific */
11236 case TARGET_NR_osf_getsysinfo:
11237 ret = -TARGET_EOPNOTSUPP;
11238 switch (arg1) {
11239 case TARGET_GSI_IEEE_FP_CONTROL:
11241 uint64_t swcr, fpcr = cpu_alpha_load_fpcr (cpu_env);
11243 /* Copied from linux ieee_fpcr_to_swcr. */
11244 swcr = (fpcr >> 35) & SWCR_STATUS_MASK;
11245 swcr |= (fpcr >> 36) & SWCR_MAP_DMZ;
11246 swcr |= (~fpcr >> 48) & (SWCR_TRAP_ENABLE_INV
11247 | SWCR_TRAP_ENABLE_DZE
11248 | SWCR_TRAP_ENABLE_OVF);
11249 swcr |= (~fpcr >> 57) & (SWCR_TRAP_ENABLE_UNF
11250 | SWCR_TRAP_ENABLE_INE);
11251 swcr |= (fpcr >> 47) & SWCR_MAP_UMZ;
11252 swcr |= (~fpcr >> 41) & SWCR_TRAP_ENABLE_DNO;
11254 if (put_user_u64 (swcr, arg2))
11255 goto efault;
11256 ret = 0;
11258 break;
11260 /* case GSI_IEEE_STATE_AT_SIGNAL:
11261 -- Not implemented in linux kernel.
11262 case GSI_UACPROC:
11263 -- Retrieves current unaligned access state; not much used.
11264 case GSI_PROC_TYPE:
11265 -- Retrieves implver information; surely not used.
11266 case GSI_GET_HWRPB:
11267 -- Grabs a copy of the HWRPB; surely not used.
11270 break;
11271 #endif
11272 #if defined(TARGET_NR_osf_setsysinfo) && defined(TARGET_ALPHA)
11273 /* Alpha specific */
11274 case TARGET_NR_osf_setsysinfo:
11275 ret = -TARGET_EOPNOTSUPP;
11276 switch (arg1) {
11277 case TARGET_SSI_IEEE_FP_CONTROL:
11279 uint64_t swcr, fpcr, orig_fpcr;
11281 if (get_user_u64 (swcr, arg2)) {
11282 goto efault;
11284 orig_fpcr = cpu_alpha_load_fpcr(cpu_env);
11285 fpcr = orig_fpcr & FPCR_DYN_MASK;
11287 /* Copied from linux ieee_swcr_to_fpcr. */
11288 fpcr |= (swcr & SWCR_STATUS_MASK) << 35;
11289 fpcr |= (swcr & SWCR_MAP_DMZ) << 36;
11290 fpcr |= (~swcr & (SWCR_TRAP_ENABLE_INV
11291 | SWCR_TRAP_ENABLE_DZE
11292 | SWCR_TRAP_ENABLE_OVF)) << 48;
11293 fpcr |= (~swcr & (SWCR_TRAP_ENABLE_UNF
11294 | SWCR_TRAP_ENABLE_INE)) << 57;
11295 fpcr |= (swcr & SWCR_MAP_UMZ ? FPCR_UNDZ | FPCR_UNFD : 0);
11296 fpcr |= (~swcr & SWCR_TRAP_ENABLE_DNO) << 41;
11298 cpu_alpha_store_fpcr(cpu_env, fpcr);
11299 ret = 0;
11301 break;
11303 case TARGET_SSI_IEEE_RAISE_EXCEPTION:
11305 uint64_t exc, fpcr, orig_fpcr;
11306 int si_code;
11308 if (get_user_u64(exc, arg2)) {
11309 goto efault;
11312 orig_fpcr = cpu_alpha_load_fpcr(cpu_env);
11314 /* We only add to the exception status here. */
11315 fpcr = orig_fpcr | ((exc & SWCR_STATUS_MASK) << 35);
11317 cpu_alpha_store_fpcr(cpu_env, fpcr);
11318 ret = 0;
11320 /* Old exceptions are not signaled. */
11321 fpcr &= ~(orig_fpcr & FPCR_STATUS_MASK);
11323 /* If any exceptions set by this call,
11324 and are unmasked, send a signal. */
11325 si_code = 0;
11326 if ((fpcr & (FPCR_INE | FPCR_INED)) == FPCR_INE) {
11327 si_code = TARGET_FPE_FLTRES;
11329 if ((fpcr & (FPCR_UNF | FPCR_UNFD)) == FPCR_UNF) {
11330 si_code = TARGET_FPE_FLTUND;
11332 if ((fpcr & (FPCR_OVF | FPCR_OVFD)) == FPCR_OVF) {
11333 si_code = TARGET_FPE_FLTOVF;
11335 if ((fpcr & (FPCR_DZE | FPCR_DZED)) == FPCR_DZE) {
11336 si_code = TARGET_FPE_FLTDIV;
11338 if ((fpcr & (FPCR_INV | FPCR_INVD)) == FPCR_INV) {
11339 si_code = TARGET_FPE_FLTINV;
11341 if (si_code != 0) {
11342 target_siginfo_t info;
11343 info.si_signo = SIGFPE;
11344 info.si_errno = 0;
11345 info.si_code = si_code;
11346 info._sifields._sigfault._addr
11347 = ((CPUArchState *)cpu_env)->pc;
11348 queue_signal((CPUArchState *)cpu_env, info.si_signo,
11349 QEMU_SI_FAULT, &info);
11352 break;
11354 /* case SSI_NVPAIRS:
11355 -- Used with SSIN_UACPROC to enable unaligned accesses.
11356 case SSI_IEEE_STATE_AT_SIGNAL:
11357 case SSI_IEEE_IGNORE_STATE_AT_SIGNAL:
11358 -- Not implemented in linux kernel
11361 break;
11362 #endif
11363 #ifdef TARGET_NR_osf_sigprocmask
11364 /* Alpha specific. */
11365 case TARGET_NR_osf_sigprocmask:
11367 abi_ulong mask;
11368 int how;
11369 sigset_t set, oldset;
11371 switch(arg1) {
11372 case TARGET_SIG_BLOCK:
11373 how = SIG_BLOCK;
11374 break;
11375 case TARGET_SIG_UNBLOCK:
11376 how = SIG_UNBLOCK;
11377 break;
11378 case TARGET_SIG_SETMASK:
11379 how = SIG_SETMASK;
11380 break;
11381 default:
11382 ret = -TARGET_EINVAL;
11383 goto fail;
11385 mask = arg2;
11386 target_to_host_old_sigset(&set, &mask);
11387 ret = do_sigprocmask(how, &set, &oldset);
11388 if (!ret) {
11389 host_to_target_old_sigset(&mask, &oldset);
11390 ret = mask;
11393 break;
11394 #endif
11396 #ifdef TARGET_NR_getgid32
11397 case TARGET_NR_getgid32:
11398 ret = get_errno(getgid());
11399 break;
11400 #endif
11401 #ifdef TARGET_NR_geteuid32
11402 case TARGET_NR_geteuid32:
11403 ret = get_errno(geteuid());
11404 break;
11405 #endif
11406 #ifdef TARGET_NR_getegid32
11407 case TARGET_NR_getegid32:
11408 ret = get_errno(getegid());
11409 break;
11410 #endif
11411 #ifdef TARGET_NR_setreuid32
11412 case TARGET_NR_setreuid32:
11413 ret = get_errno(setreuid(arg1, arg2));
11414 break;
11415 #endif
11416 #ifdef TARGET_NR_setregid32
11417 case TARGET_NR_setregid32:
11418 ret = get_errno(setregid(arg1, arg2));
11419 break;
11420 #endif
11421 #ifdef TARGET_NR_getgroups32
11422 case TARGET_NR_getgroups32:
11424 int gidsetsize = arg1;
11425 uint32_t *target_grouplist;
11426 gid_t *grouplist;
11427 int i;
11429 grouplist = alloca(gidsetsize * sizeof(gid_t));
11430 ret = get_errno(getgroups(gidsetsize, grouplist));
11431 if (gidsetsize == 0)
11432 break;
11433 if (!is_error(ret)) {
11434 target_grouplist = lock_user(VERIFY_WRITE, arg2, gidsetsize * 4, 0);
11435 if (!target_grouplist) {
11436 ret = -TARGET_EFAULT;
11437 goto fail;
11439 for(i = 0;i < ret; i++)
11440 target_grouplist[i] = tswap32(grouplist[i]);
11441 unlock_user(target_grouplist, arg2, gidsetsize * 4);
11444 break;
11445 #endif
11446 #ifdef TARGET_NR_setgroups32
11447 case TARGET_NR_setgroups32:
11449 int gidsetsize = arg1;
11450 uint32_t *target_grouplist;
11451 gid_t *grouplist;
11452 int i;
11454 grouplist = alloca(gidsetsize * sizeof(gid_t));
11455 target_grouplist = lock_user(VERIFY_READ, arg2, gidsetsize * 4, 1);
11456 if (!target_grouplist) {
11457 ret = -TARGET_EFAULT;
11458 goto fail;
11460 for(i = 0;i < gidsetsize; i++)
11461 grouplist[i] = tswap32(target_grouplist[i]);
11462 unlock_user(target_grouplist, arg2, 0);
11463 ret = get_errno(setgroups(gidsetsize, grouplist));
11465 break;
11466 #endif
11467 #ifdef TARGET_NR_fchown32
11468 case TARGET_NR_fchown32:
11469 ret = get_errno(fchown(arg1, arg2, arg3));
11470 break;
11471 #endif
11472 #ifdef TARGET_NR_setresuid32
11473 case TARGET_NR_setresuid32:
11474 ret = get_errno(sys_setresuid(arg1, arg2, arg3));
11475 break;
11476 #endif
11477 #ifdef TARGET_NR_getresuid32
11478 case TARGET_NR_getresuid32:
11480 uid_t ruid, euid, suid;
11481 ret = get_errno(getresuid(&ruid, &euid, &suid));
11482 if (!is_error(ret)) {
11483 if (put_user_u32(ruid, arg1)
11484 || put_user_u32(euid, arg2)
11485 || put_user_u32(suid, arg3))
11486 goto efault;
11489 break;
11490 #endif
11491 #ifdef TARGET_NR_setresgid32
11492 case TARGET_NR_setresgid32:
11493 ret = get_errno(sys_setresgid(arg1, arg2, arg3));
11494 break;
11495 #endif
11496 #ifdef TARGET_NR_getresgid32
11497 case TARGET_NR_getresgid32:
11499 gid_t rgid, egid, sgid;
11500 ret = get_errno(getresgid(&rgid, &egid, &sgid));
11501 if (!is_error(ret)) {
11502 if (put_user_u32(rgid, arg1)
11503 || put_user_u32(egid, arg2)
11504 || put_user_u32(sgid, arg3))
11505 goto efault;
11508 break;
11509 #endif
11510 #ifdef TARGET_NR_chown32
11511 case TARGET_NR_chown32:
11512 if (!(p = lock_user_string(arg1)))
11513 goto efault;
11514 ret = get_errno(chown(p, arg2, arg3));
11515 unlock_user(p, arg1, 0);
11516 break;
11517 #endif
11518 #ifdef TARGET_NR_setuid32
11519 case TARGET_NR_setuid32:
11520 ret = get_errno(sys_setuid(arg1));
11521 break;
11522 #endif
11523 #ifdef TARGET_NR_setgid32
11524 case TARGET_NR_setgid32:
11525 ret = get_errno(sys_setgid(arg1));
11526 break;
11527 #endif
11528 #ifdef TARGET_NR_setfsuid32
11529 case TARGET_NR_setfsuid32:
11530 ret = get_errno(setfsuid(arg1));
11531 break;
11532 #endif
11533 #ifdef TARGET_NR_setfsgid32
11534 case TARGET_NR_setfsgid32:
11535 ret = get_errno(setfsgid(arg1));
11536 break;
11537 #endif
11539 case TARGET_NR_pivot_root:
11540 goto unimplemented;
11541 #ifdef TARGET_NR_mincore
11542 case TARGET_NR_mincore:
11544 void *a;
11545 ret = -TARGET_ENOMEM;
11546 a = lock_user(VERIFY_READ, arg1, arg2, 0);
11547 if (!a) {
11548 goto fail;
11550 ret = -TARGET_EFAULT;
11551 p = lock_user_string(arg3);
11552 if (!p) {
11553 goto mincore_fail;
11555 ret = get_errno(mincore(a, arg2, p));
11556 unlock_user(p, arg3, ret);
11557 mincore_fail:
11558 unlock_user(a, arg1, 0);
11560 break;
11561 #endif
11562 #ifdef TARGET_NR_arm_fadvise64_64
11563 case TARGET_NR_arm_fadvise64_64:
11564 /* arm_fadvise64_64 looks like fadvise64_64 but
11565 * with different argument order: fd, advice, offset, len
11566 * rather than the usual fd, offset, len, advice.
11567 * Note that offset and len are both 64-bit so appear as
11568 * pairs of 32-bit registers.
11570 ret = posix_fadvise(arg1, target_offset64(arg3, arg4),
11571 target_offset64(arg5, arg6), arg2);
11572 ret = -host_to_target_errno(ret);
11573 break;
11574 #endif
11576 #if TARGET_ABI_BITS == 32
11578 #ifdef TARGET_NR_fadvise64_64
11579 case TARGET_NR_fadvise64_64:
11580 #if defined(TARGET_PPC) || defined(TARGET_XTENSA)
11581 /* 6 args: fd, advice, offset (high, low), len (high, low) */
11582 ret = arg2;
11583 arg2 = arg3;
11584 arg3 = arg4;
11585 arg4 = arg5;
11586 arg5 = arg6;
11587 arg6 = ret;
11588 #else
11589 /* 6 args: fd, offset (high, low), len (high, low), advice */
11590 if (regpairs_aligned(cpu_env, num)) {
11591 /* offset is in (3,4), len in (5,6) and advice in 7 */
11592 arg2 = arg3;
11593 arg3 = arg4;
11594 arg4 = arg5;
11595 arg5 = arg6;
11596 arg6 = arg7;
11598 #endif
11599 ret = -host_to_target_errno(posix_fadvise(arg1,
11600 target_offset64(arg2, arg3),
11601 target_offset64(arg4, arg5),
11602 arg6));
11603 break;
11604 #endif
11606 #ifdef TARGET_NR_fadvise64
11607 case TARGET_NR_fadvise64:
11608 /* 5 args: fd, offset (high, low), len, advice */
11609 if (regpairs_aligned(cpu_env, num)) {
11610 /* offset is in (3,4), len in 5 and advice in 6 */
11611 arg2 = arg3;
11612 arg3 = arg4;
11613 arg4 = arg5;
11614 arg5 = arg6;
11616 ret = -host_to_target_errno(posix_fadvise(arg1,
11617 target_offset64(arg2, arg3),
11618 arg4, arg5));
11619 break;
11620 #endif
11622 #else /* not a 32-bit ABI */
11623 #if defined(TARGET_NR_fadvise64_64) || defined(TARGET_NR_fadvise64)
11624 #ifdef TARGET_NR_fadvise64_64
11625 case TARGET_NR_fadvise64_64:
11626 #endif
11627 #ifdef TARGET_NR_fadvise64
11628 case TARGET_NR_fadvise64:
11629 #endif
11630 #ifdef TARGET_S390X
11631 switch (arg4) {
11632 case 4: arg4 = POSIX_FADV_NOREUSE + 1; break; /* make sure it's an invalid value */
11633 case 5: arg4 = POSIX_FADV_NOREUSE + 2; break; /* ditto */
11634 case 6: arg4 = POSIX_FADV_DONTNEED; break;
11635 case 7: arg4 = POSIX_FADV_NOREUSE; break;
11636 default: break;
11638 #endif
11639 ret = -host_to_target_errno(posix_fadvise(arg1, arg2, arg3, arg4));
11640 break;
11641 #endif
11642 #endif /* end of 64-bit ABI fadvise handling */
11644 #ifdef TARGET_NR_madvise
11645 case TARGET_NR_madvise:
11646 /* A straight passthrough may not be safe because qemu sometimes
11647 turns private file-backed mappings into anonymous mappings.
11648 This will break MADV_DONTNEED.
11649 This is a hint, so ignoring and returning success is ok. */
11650 ret = get_errno(0);
11651 break;
11652 #endif
11653 #if TARGET_ABI_BITS == 32
11654 case TARGET_NR_fcntl64:
11656 int cmd;
11657 struct flock64 fl;
11658 from_flock64_fn *copyfrom = copy_from_user_flock64;
11659 to_flock64_fn *copyto = copy_to_user_flock64;
11661 #ifdef TARGET_ARM
11662 if (!((CPUARMState *)cpu_env)->eabi) {
11663 copyfrom = copy_from_user_oabi_flock64;
11664 copyto = copy_to_user_oabi_flock64;
11666 #endif
11668 cmd = target_to_host_fcntl_cmd(arg2);
11669 if (cmd == -TARGET_EINVAL) {
11670 ret = cmd;
11671 break;
11674 switch(arg2) {
11675 case TARGET_F_GETLK64:
11676 ret = copyfrom(&fl, arg3);
11677 if (ret) {
11678 break;
11680 ret = get_errno(fcntl(arg1, cmd, &fl));
11681 if (ret == 0) {
11682 ret = copyto(arg3, &fl);
11684 break;
11686 case TARGET_F_SETLK64:
11687 case TARGET_F_SETLKW64:
11688 ret = copyfrom(&fl, arg3);
11689 if (ret) {
11690 break;
11692 ret = get_errno(safe_fcntl(arg1, cmd, &fl));
11693 break;
11694 default:
11695 ret = do_fcntl(arg1, arg2, arg3);
11696 break;
11698 break;
11700 #endif
11701 #ifdef TARGET_NR_cacheflush
11702 case TARGET_NR_cacheflush:
11703 /* self-modifying code is handled automatically, so nothing needed */
11704 ret = 0;
11705 break;
11706 #endif
11707 #ifdef TARGET_NR_security
11708 case TARGET_NR_security:
11709 goto unimplemented;
11710 #endif
11711 #ifdef TARGET_NR_getpagesize
11712 case TARGET_NR_getpagesize:
11713 ret = TARGET_PAGE_SIZE;
11714 break;
11715 #endif
11716 case TARGET_NR_gettid:
11717 ret = get_errno(gettid());
11718 break;
11719 #ifdef TARGET_NR_readahead
11720 case TARGET_NR_readahead:
11721 #if TARGET_ABI_BITS == 32
11722 if (regpairs_aligned(cpu_env, num)) {
11723 arg2 = arg3;
11724 arg3 = arg4;
11725 arg4 = arg5;
11727 ret = get_errno(readahead(arg1, target_offset64(arg2, arg3) , arg4));
11728 #else
11729 ret = get_errno(readahead(arg1, arg2, arg3));
11730 #endif
11731 break;
11732 #endif
11733 #ifdef CONFIG_ATTR
11734 #ifdef TARGET_NR_setxattr
11735 case TARGET_NR_listxattr:
11736 case TARGET_NR_llistxattr:
11738 void *p, *b = 0;
11739 if (arg2) {
11740 b = lock_user(VERIFY_WRITE, arg2, arg3, 0);
11741 if (!b) {
11742 ret = -TARGET_EFAULT;
11743 break;
11746 p = lock_user_string(arg1);
11747 if (p) {
11748 if (num == TARGET_NR_listxattr) {
11749 ret = get_errno(listxattr(p, b, arg3));
11750 } else {
11751 ret = get_errno(llistxattr(p, b, arg3));
11753 } else {
11754 ret = -TARGET_EFAULT;
11756 unlock_user(p, arg1, 0);
11757 unlock_user(b, arg2, arg3);
11758 break;
11760 case TARGET_NR_flistxattr:
11762 void *b = 0;
11763 if (arg2) {
11764 b = lock_user(VERIFY_WRITE, arg2, arg3, 0);
11765 if (!b) {
11766 ret = -TARGET_EFAULT;
11767 break;
11770 ret = get_errno(flistxattr(arg1, b, arg3));
11771 unlock_user(b, arg2, arg3);
11772 break;
11774 case TARGET_NR_setxattr:
11775 case TARGET_NR_lsetxattr:
11777 void *p, *n, *v = 0;
11778 if (arg3) {
11779 v = lock_user(VERIFY_READ, arg3, arg4, 1);
11780 if (!v) {
11781 ret = -TARGET_EFAULT;
11782 break;
11785 p = lock_user_string(arg1);
11786 n = lock_user_string(arg2);
11787 if (p && n) {
11788 if (num == TARGET_NR_setxattr) {
11789 ret = get_errno(setxattr(p, n, v, arg4, arg5));
11790 } else {
11791 ret = get_errno(lsetxattr(p, n, v, arg4, arg5));
11793 } else {
11794 ret = -TARGET_EFAULT;
11796 unlock_user(p, arg1, 0);
11797 unlock_user(n, arg2, 0);
11798 unlock_user(v, arg3, 0);
11800 break;
11801 case TARGET_NR_fsetxattr:
11803 void *n, *v = 0;
11804 if (arg3) {
11805 v = lock_user(VERIFY_READ, arg3, arg4, 1);
11806 if (!v) {
11807 ret = -TARGET_EFAULT;
11808 break;
11811 n = lock_user_string(arg2);
11812 if (n) {
11813 ret = get_errno(fsetxattr(arg1, n, v, arg4, arg5));
11814 } else {
11815 ret = -TARGET_EFAULT;
11817 unlock_user(n, arg2, 0);
11818 unlock_user(v, arg3, 0);
11820 break;
11821 case TARGET_NR_getxattr:
11822 case TARGET_NR_lgetxattr:
11824 void *p, *n, *v = 0;
11825 if (arg3) {
11826 v = lock_user(VERIFY_WRITE, arg3, arg4, 0);
11827 if (!v) {
11828 ret = -TARGET_EFAULT;
11829 break;
11832 p = lock_user_string(arg1);
11833 n = lock_user_string(arg2);
11834 if (p && n) {
11835 if (num == TARGET_NR_getxattr) {
11836 ret = get_errno(getxattr(p, n, v, arg4));
11837 } else {
11838 ret = get_errno(lgetxattr(p, n, v, arg4));
11840 } else {
11841 ret = -TARGET_EFAULT;
11843 unlock_user(p, arg1, 0);
11844 unlock_user(n, arg2, 0);
11845 unlock_user(v, arg3, arg4);
11847 break;
11848 case TARGET_NR_fgetxattr:
11850 void *n, *v = 0;
11851 if (arg3) {
11852 v = lock_user(VERIFY_WRITE, arg3, arg4, 0);
11853 if (!v) {
11854 ret = -TARGET_EFAULT;
11855 break;
11858 n = lock_user_string(arg2);
11859 if (n) {
11860 ret = get_errno(fgetxattr(arg1, n, v, arg4));
11861 } else {
11862 ret = -TARGET_EFAULT;
11864 unlock_user(n, arg2, 0);
11865 unlock_user(v, arg3, arg4);
11867 break;
11868 case TARGET_NR_removexattr:
11869 case TARGET_NR_lremovexattr:
11871 void *p, *n;
11872 p = lock_user_string(arg1);
11873 n = lock_user_string(arg2);
11874 if (p && n) {
11875 if (num == TARGET_NR_removexattr) {
11876 ret = get_errno(removexattr(p, n));
11877 } else {
11878 ret = get_errno(lremovexattr(p, n));
11880 } else {
11881 ret = -TARGET_EFAULT;
11883 unlock_user(p, arg1, 0);
11884 unlock_user(n, arg2, 0);
11886 break;
11887 case TARGET_NR_fremovexattr:
11889 void *n;
11890 n = lock_user_string(arg2);
11891 if (n) {
11892 ret = get_errno(fremovexattr(arg1, n));
11893 } else {
11894 ret = -TARGET_EFAULT;
11896 unlock_user(n, arg2, 0);
11898 break;
11899 #endif
11900 #endif /* CONFIG_ATTR */
11901 #ifdef TARGET_NR_set_thread_area
11902 case TARGET_NR_set_thread_area:
11903 #if defined(TARGET_MIPS)
11904 ((CPUMIPSState *) cpu_env)->active_tc.CP0_UserLocal = arg1;
11905 ret = 0;
11906 break;
11907 #elif defined(TARGET_CRIS)
11908 if (arg1 & 0xff)
11909 ret = -TARGET_EINVAL;
11910 else {
11911 ((CPUCRISState *) cpu_env)->pregs[PR_PID] = arg1;
11912 ret = 0;
11914 break;
11915 #elif defined(TARGET_I386) && defined(TARGET_ABI32)
11916 ret = do_set_thread_area(cpu_env, arg1);
11917 break;
11918 #elif defined(TARGET_M68K)
11920 TaskState *ts = cpu->opaque;
11921 ts->tp_value = arg1;
11922 ret = 0;
11923 break;
11925 #else
11926 goto unimplemented_nowarn;
11927 #endif
11928 #endif
11929 #ifdef TARGET_NR_get_thread_area
11930 case TARGET_NR_get_thread_area:
11931 #if defined(TARGET_I386) && defined(TARGET_ABI32)
11932 ret = do_get_thread_area(cpu_env, arg1);
11933 break;
11934 #elif defined(TARGET_M68K)
11936 TaskState *ts = cpu->opaque;
11937 ret = ts->tp_value;
11938 break;
11940 #else
11941 goto unimplemented_nowarn;
11942 #endif
11943 #endif
11944 #ifdef TARGET_NR_getdomainname
11945 case TARGET_NR_getdomainname:
11946 goto unimplemented_nowarn;
11947 #endif
11949 #ifdef TARGET_NR_clock_settime
11950 case TARGET_NR_clock_settime:
11952 struct timespec ts;
11954 ret = target_to_host_timespec(&ts, arg2);
11955 if (!is_error(ret)) {
11956 ret = get_errno(clock_settime(arg1, &ts));
11958 break;
11960 #endif
11961 #ifdef TARGET_NR_clock_gettime
11962 case TARGET_NR_clock_gettime:
11964 struct timespec ts;
11965 ret = get_errno(clock_gettime(arg1, &ts));
11966 if (!is_error(ret)) {
11967 ret = host_to_target_timespec(arg2, &ts);
11969 break;
11971 #endif
11972 #ifdef TARGET_NR_clock_getres
11973 case TARGET_NR_clock_getres:
11975 struct timespec ts;
11976 ret = get_errno(clock_getres(arg1, &ts));
11977 if (!is_error(ret)) {
11978 host_to_target_timespec(arg2, &ts);
11980 break;
11982 #endif
11983 #ifdef TARGET_NR_clock_nanosleep
11984 case TARGET_NR_clock_nanosleep:
11986 struct timespec ts;
11987 target_to_host_timespec(&ts, arg3);
11988 ret = get_errno(safe_clock_nanosleep(arg1, arg2,
11989 &ts, arg4 ? &ts : NULL));
11990 if (arg4)
11991 host_to_target_timespec(arg4, &ts);
11993 #if defined(TARGET_PPC)
11994 /* clock_nanosleep is odd in that it returns positive errno values.
11995 * On PPC, CR0 bit 3 should be set in such a situation. */
11996 if (ret && ret != -TARGET_ERESTARTSYS) {
11997 ((CPUPPCState *)cpu_env)->crf[0] |= 1;
11999 #endif
12000 break;
12002 #endif
12004 #if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address)
12005 case TARGET_NR_set_tid_address:
12006 ret = get_errno(set_tid_address((int *)g2h(arg1)));
12007 break;
12008 #endif
12010 case TARGET_NR_tkill:
12011 ret = get_errno(safe_tkill((int)arg1, target_to_host_signal(arg2)));
12012 break;
12014 case TARGET_NR_tgkill:
12015 ret = get_errno(safe_tgkill((int)arg1, (int)arg2,
12016 target_to_host_signal(arg3)));
12017 break;
12019 #ifdef TARGET_NR_set_robust_list
12020 case TARGET_NR_set_robust_list:
12021 case TARGET_NR_get_robust_list:
12022 /* The ABI for supporting robust futexes has userspace pass
12023 * the kernel a pointer to a linked list which is updated by
12024 * userspace after the syscall; the list is walked by the kernel
12025 * when the thread exits. Since the linked list in QEMU guest
12026 * memory isn't a valid linked list for the host and we have
12027 * no way to reliably intercept the thread-death event, we can't
12028 * support these. Silently return ENOSYS so that guest userspace
12029 * falls back to a non-robust futex implementation (which should
12030 * be OK except in the corner case of the guest crashing while
12031 * holding a mutex that is shared with another process via
12032 * shared memory).
12034 goto unimplemented_nowarn;
12035 #endif
12037 #if defined(TARGET_NR_utimensat)
12038 case TARGET_NR_utimensat:
12040 struct timespec *tsp, ts[2];
12041 if (!arg3) {
12042 tsp = NULL;
12043 } else {
12044 target_to_host_timespec(ts, arg3);
12045 target_to_host_timespec(ts+1, arg3+sizeof(struct target_timespec));
12046 tsp = ts;
12048 if (!arg2)
12049 ret = get_errno(sys_utimensat(arg1, NULL, tsp, arg4));
12050 else {
12051 if (!(p = lock_user_string(arg2))) {
12052 ret = -TARGET_EFAULT;
12053 goto fail;
12055 ret = get_errno(sys_utimensat(arg1, path(p), tsp, arg4));
12056 unlock_user(p, arg2, 0);
12059 break;
12060 #endif
12061 case TARGET_NR_futex:
12062 ret = do_futex(arg1, arg2, arg3, arg4, arg5, arg6);
12063 break;
12064 #if defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init)
12065 case TARGET_NR_inotify_init:
12066 ret = get_errno(sys_inotify_init());
12067 if (ret >= 0) {
12068 fd_trans_register(ret, &target_inotify_trans);
12070 break;
12071 #endif
12072 #ifdef CONFIG_INOTIFY1
12073 #if defined(TARGET_NR_inotify_init1) && defined(__NR_inotify_init1)
12074 case TARGET_NR_inotify_init1:
12075 ret = get_errno(sys_inotify_init1(target_to_host_bitmask(arg1,
12076 fcntl_flags_tbl)));
12077 if (ret >= 0) {
12078 fd_trans_register(ret, &target_inotify_trans);
12080 break;
12081 #endif
12082 #endif
12083 #if defined(TARGET_NR_inotify_add_watch) && defined(__NR_inotify_add_watch)
12084 case TARGET_NR_inotify_add_watch:
12085 p = lock_user_string(arg2);
12086 ret = get_errno(sys_inotify_add_watch(arg1, path(p), arg3));
12087 unlock_user(p, arg2, 0);
12088 break;
12089 #endif
12090 #if defined(TARGET_NR_inotify_rm_watch) && defined(__NR_inotify_rm_watch)
12091 case TARGET_NR_inotify_rm_watch:
12092 ret = get_errno(sys_inotify_rm_watch(arg1, arg2));
12093 break;
12094 #endif
12096 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
12097 case TARGET_NR_mq_open:
12099 struct mq_attr posix_mq_attr;
12100 struct mq_attr *pposix_mq_attr;
12101 int host_flags;
12103 host_flags = target_to_host_bitmask(arg2, fcntl_flags_tbl);
12104 pposix_mq_attr = NULL;
12105 if (arg4) {
12106 if (copy_from_user_mq_attr(&posix_mq_attr, arg4) != 0) {
12107 goto efault;
12109 pposix_mq_attr = &posix_mq_attr;
12111 p = lock_user_string(arg1 - 1);
12112 if (!p) {
12113 goto efault;
12115 ret = get_errno(mq_open(p, host_flags, arg3, pposix_mq_attr));
12116 unlock_user (p, arg1, 0);
12118 break;
12120 case TARGET_NR_mq_unlink:
12121 p = lock_user_string(arg1 - 1);
12122 if (!p) {
12123 ret = -TARGET_EFAULT;
12124 break;
12126 ret = get_errno(mq_unlink(p));
12127 unlock_user (p, arg1, 0);
12128 break;
12130 case TARGET_NR_mq_timedsend:
12132 struct timespec ts;
12134 p = lock_user (VERIFY_READ, arg2, arg3, 1);
12135 if (arg5 != 0) {
12136 target_to_host_timespec(&ts, arg5);
12137 ret = get_errno(safe_mq_timedsend(arg1, p, arg3, arg4, &ts));
12138 host_to_target_timespec(arg5, &ts);
12139 } else {
12140 ret = get_errno(safe_mq_timedsend(arg1, p, arg3, arg4, NULL));
12142 unlock_user (p, arg2, arg3);
12144 break;
12146 case TARGET_NR_mq_timedreceive:
12148 struct timespec ts;
12149 unsigned int prio;
12151 p = lock_user (VERIFY_READ, arg2, arg3, 1);
12152 if (arg5 != 0) {
12153 target_to_host_timespec(&ts, arg5);
12154 ret = get_errno(safe_mq_timedreceive(arg1, p, arg3,
12155 &prio, &ts));
12156 host_to_target_timespec(arg5, &ts);
12157 } else {
12158 ret = get_errno(safe_mq_timedreceive(arg1, p, arg3,
12159 &prio, NULL));
12161 unlock_user (p, arg2, arg3);
12162 if (arg4 != 0)
12163 put_user_u32(prio, arg4);
12165 break;
12167 /* Not implemented for now... */
12168 /* case TARGET_NR_mq_notify: */
12169 /* break; */
12171 case TARGET_NR_mq_getsetattr:
12173 struct mq_attr posix_mq_attr_in, posix_mq_attr_out;
12174 ret = 0;
12175 if (arg2 != 0) {
12176 copy_from_user_mq_attr(&posix_mq_attr_in, arg2);
12177 ret = get_errno(mq_setattr(arg1, &posix_mq_attr_in,
12178 &posix_mq_attr_out));
12179 } else if (arg3 != 0) {
12180 ret = get_errno(mq_getattr(arg1, &posix_mq_attr_out));
12182 if (ret == 0 && arg3 != 0) {
12183 copy_to_user_mq_attr(arg3, &posix_mq_attr_out);
12186 break;
12187 #endif
12189 #ifdef CONFIG_SPLICE
12190 #ifdef TARGET_NR_tee
12191 case TARGET_NR_tee:
12193 ret = get_errno(tee(arg1,arg2,arg3,arg4));
12195 break;
12196 #endif
12197 #ifdef TARGET_NR_splice
12198 case TARGET_NR_splice:
12200 loff_t loff_in, loff_out;
12201 loff_t *ploff_in = NULL, *ploff_out = NULL;
12202 if (arg2) {
12203 if (get_user_u64(loff_in, arg2)) {
12204 goto efault;
12206 ploff_in = &loff_in;
12208 if (arg4) {
12209 if (get_user_u64(loff_out, arg4)) {
12210 goto efault;
12212 ploff_out = &loff_out;
12214 ret = get_errno(splice(arg1, ploff_in, arg3, ploff_out, arg5, arg6));
12215 if (arg2) {
12216 if (put_user_u64(loff_in, arg2)) {
12217 goto efault;
12220 if (arg4) {
12221 if (put_user_u64(loff_out, arg4)) {
12222 goto efault;
12226 break;
12227 #endif
12228 #ifdef TARGET_NR_vmsplice
12229 case TARGET_NR_vmsplice:
12231 struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1);
12232 if (vec != NULL) {
12233 ret = get_errno(vmsplice(arg1, vec, arg3, arg4));
12234 unlock_iovec(vec, arg2, arg3, 0);
12235 } else {
12236 ret = -host_to_target_errno(errno);
12239 break;
12240 #endif
12241 #endif /* CONFIG_SPLICE */
12242 #ifdef CONFIG_EVENTFD
12243 #if defined(TARGET_NR_eventfd)
12244 case TARGET_NR_eventfd:
12245 ret = get_errno(eventfd(arg1, 0));
12246 if (ret >= 0) {
12247 fd_trans_register(ret, &target_eventfd_trans);
12249 break;
12250 #endif
12251 #if defined(TARGET_NR_eventfd2)
12252 case TARGET_NR_eventfd2:
12254 int host_flags = arg2 & (~(TARGET_O_NONBLOCK | TARGET_O_CLOEXEC));
12255 if (arg2 & TARGET_O_NONBLOCK) {
12256 host_flags |= O_NONBLOCK;
12258 if (arg2 & TARGET_O_CLOEXEC) {
12259 host_flags |= O_CLOEXEC;
12261 ret = get_errno(eventfd(arg1, host_flags));
12262 if (ret >= 0) {
12263 fd_trans_register(ret, &target_eventfd_trans);
12265 break;
12267 #endif
12268 #endif /* CONFIG_EVENTFD */
12269 #if defined(CONFIG_FALLOCATE) && defined(TARGET_NR_fallocate)
12270 case TARGET_NR_fallocate:
12271 #if TARGET_ABI_BITS == 32
12272 ret = get_errno(fallocate(arg1, arg2, target_offset64(arg3, arg4),
12273 target_offset64(arg5, arg6)));
12274 #else
12275 ret = get_errno(fallocate(arg1, arg2, arg3, arg4));
12276 #endif
12277 break;
12278 #endif
12279 #if defined(CONFIG_SYNC_FILE_RANGE)
12280 #if defined(TARGET_NR_sync_file_range)
12281 case TARGET_NR_sync_file_range:
12282 #if TARGET_ABI_BITS == 32
12283 #if defined(TARGET_MIPS)
12284 ret = get_errno(sync_file_range(arg1, target_offset64(arg3, arg4),
12285 target_offset64(arg5, arg6), arg7));
12286 #else
12287 ret = get_errno(sync_file_range(arg1, target_offset64(arg2, arg3),
12288 target_offset64(arg4, arg5), arg6));
12289 #endif /* !TARGET_MIPS */
12290 #else
12291 ret = get_errno(sync_file_range(arg1, arg2, arg3, arg4));
12292 #endif
12293 break;
12294 #endif
12295 #if defined(TARGET_NR_sync_file_range2)
12296 case TARGET_NR_sync_file_range2:
12297 /* This is like sync_file_range but the arguments are reordered */
12298 #if TARGET_ABI_BITS == 32
12299 ret = get_errno(sync_file_range(arg1, target_offset64(arg3, arg4),
12300 target_offset64(arg5, arg6), arg2));
12301 #else
12302 ret = get_errno(sync_file_range(arg1, arg3, arg4, arg2));
12303 #endif
12304 break;
12305 #endif
12306 #endif
12307 #if defined(TARGET_NR_signalfd4)
12308 case TARGET_NR_signalfd4:
12309 ret = do_signalfd4(arg1, arg2, arg4);
12310 break;
12311 #endif
12312 #if defined(TARGET_NR_signalfd)
12313 case TARGET_NR_signalfd:
12314 ret = do_signalfd4(arg1, arg2, 0);
12315 break;
12316 #endif
12317 #if defined(CONFIG_EPOLL)
12318 #if defined(TARGET_NR_epoll_create)
12319 case TARGET_NR_epoll_create:
12320 ret = get_errno(epoll_create(arg1));
12321 break;
12322 #endif
12323 #if defined(TARGET_NR_epoll_create1) && defined(CONFIG_EPOLL_CREATE1)
12324 case TARGET_NR_epoll_create1:
12325 ret = get_errno(epoll_create1(arg1));
12326 break;
12327 #endif
12328 #if defined(TARGET_NR_epoll_ctl)
12329 case TARGET_NR_epoll_ctl:
12331 struct epoll_event ep;
12332 struct epoll_event *epp = 0;
12333 if (arg4) {
12334 struct target_epoll_event *target_ep;
12335 if (!lock_user_struct(VERIFY_READ, target_ep, arg4, 1)) {
12336 goto efault;
12338 ep.events = tswap32(target_ep->events);
12339 /* The epoll_data_t union is just opaque data to the kernel,
12340 * so we transfer all 64 bits across and need not worry what
12341 * actual data type it is.
12343 ep.data.u64 = tswap64(target_ep->data.u64);
12344 unlock_user_struct(target_ep, arg4, 0);
12345 epp = &ep;
12347 ret = get_errno(epoll_ctl(arg1, arg2, arg3, epp));
12348 break;
12350 #endif
12352 #if defined(TARGET_NR_epoll_wait) || defined(TARGET_NR_epoll_pwait)
12353 #if defined(TARGET_NR_epoll_wait)
12354 case TARGET_NR_epoll_wait:
12355 #endif
12356 #if defined(TARGET_NR_epoll_pwait)
12357 case TARGET_NR_epoll_pwait:
12358 #endif
12360 struct target_epoll_event *target_ep;
12361 struct epoll_event *ep;
12362 int epfd = arg1;
12363 int maxevents = arg3;
12364 int timeout = arg4;
12366 if (maxevents <= 0 || maxevents > TARGET_EP_MAX_EVENTS) {
12367 ret = -TARGET_EINVAL;
12368 break;
12371 target_ep = lock_user(VERIFY_WRITE, arg2,
12372 maxevents * sizeof(struct target_epoll_event), 1);
12373 if (!target_ep) {
12374 goto efault;
12377 ep = g_try_new(struct epoll_event, maxevents);
12378 if (!ep) {
12379 unlock_user(target_ep, arg2, 0);
12380 ret = -TARGET_ENOMEM;
12381 break;
12384 switch (num) {
12385 #if defined(TARGET_NR_epoll_pwait)
12386 case TARGET_NR_epoll_pwait:
12388 target_sigset_t *target_set;
12389 sigset_t _set, *set = &_set;
12391 if (arg5) {
12392 if (arg6 != sizeof(target_sigset_t)) {
12393 ret = -TARGET_EINVAL;
12394 break;
12397 target_set = lock_user(VERIFY_READ, arg5,
12398 sizeof(target_sigset_t), 1);
12399 if (!target_set) {
12400 ret = -TARGET_EFAULT;
12401 break;
12403 target_to_host_sigset(set, target_set);
12404 unlock_user(target_set, arg5, 0);
12405 } else {
12406 set = NULL;
12409 ret = get_errno(safe_epoll_pwait(epfd, ep, maxevents, timeout,
12410 set, SIGSET_T_SIZE));
12411 break;
12413 #endif
12414 #if defined(TARGET_NR_epoll_wait)
12415 case TARGET_NR_epoll_wait:
12416 ret = get_errno(safe_epoll_pwait(epfd, ep, maxevents, timeout,
12417 NULL, 0));
12418 break;
12419 #endif
12420 default:
12421 ret = -TARGET_ENOSYS;
12423 if (!is_error(ret)) {
12424 int i;
12425 for (i = 0; i < ret; i++) {
12426 target_ep[i].events = tswap32(ep[i].events);
12427 target_ep[i].data.u64 = tswap64(ep[i].data.u64);
12429 unlock_user(target_ep, arg2,
12430 ret * sizeof(struct target_epoll_event));
12431 } else {
12432 unlock_user(target_ep, arg2, 0);
12434 g_free(ep);
12435 break;
12437 #endif
12438 #endif
12439 #ifdef TARGET_NR_prlimit64
12440 case TARGET_NR_prlimit64:
12442 /* args: pid, resource number, ptr to new rlimit, ptr to old rlimit */
12443 struct target_rlimit64 *target_rnew, *target_rold;
12444 struct host_rlimit64 rnew, rold, *rnewp = 0;
12445 int resource = target_to_host_resource(arg2);
12446 if (arg3) {
12447 if (!lock_user_struct(VERIFY_READ, target_rnew, arg3, 1)) {
12448 goto efault;
12450 rnew.rlim_cur = tswap64(target_rnew->rlim_cur);
12451 rnew.rlim_max = tswap64(target_rnew->rlim_max);
12452 unlock_user_struct(target_rnew, arg3, 0);
12453 rnewp = &rnew;
12456 ret = get_errno(sys_prlimit64(arg1, resource, rnewp, arg4 ? &rold : 0));
12457 if (!is_error(ret) && arg4) {
12458 if (!lock_user_struct(VERIFY_WRITE, target_rold, arg4, 1)) {
12459 goto efault;
12461 target_rold->rlim_cur = tswap64(rold.rlim_cur);
12462 target_rold->rlim_max = tswap64(rold.rlim_max);
12463 unlock_user_struct(target_rold, arg4, 1);
12465 break;
12467 #endif
12468 #ifdef TARGET_NR_gethostname
12469 case TARGET_NR_gethostname:
12471 char *name = lock_user(VERIFY_WRITE, arg1, arg2, 0);
12472 if (name) {
12473 ret = get_errno(gethostname(name, arg2));
12474 unlock_user(name, arg1, arg2);
12475 } else {
12476 ret = -TARGET_EFAULT;
12478 break;
12480 #endif
12481 #ifdef TARGET_NR_atomic_cmpxchg_32
12482 case TARGET_NR_atomic_cmpxchg_32:
12484 /* should use start_exclusive from main.c */
12485 abi_ulong mem_value;
12486 if (get_user_u32(mem_value, arg6)) {
12487 target_siginfo_t info;
12488 info.si_signo = SIGSEGV;
12489 info.si_errno = 0;
12490 info.si_code = TARGET_SEGV_MAPERR;
12491 info._sifields._sigfault._addr = arg6;
12492 queue_signal((CPUArchState *)cpu_env, info.si_signo,
12493 QEMU_SI_FAULT, &info);
12494 ret = 0xdeadbeef;
12497 if (mem_value == arg2)
12498 put_user_u32(arg1, arg6);
12499 ret = mem_value;
12500 break;
12502 #endif
12503 #ifdef TARGET_NR_atomic_barrier
12504 case TARGET_NR_atomic_barrier:
12506 /* Like the kernel implementation and the qemu arm barrier, no-op this? */
12507 ret = 0;
12508 break;
12510 #endif
12512 #ifdef TARGET_NR_timer_create
12513 case TARGET_NR_timer_create:
12515 /* args: clockid_t clockid, struct sigevent *sevp, timer_t *timerid */
12517 struct sigevent host_sevp = { {0}, }, *phost_sevp = NULL;
12519 int clkid = arg1;
12520 int timer_index = next_free_host_timer();
12522 if (timer_index < 0) {
12523 ret = -TARGET_EAGAIN;
12524 } else {
12525 timer_t *phtimer = g_posix_timers + timer_index;
12527 if (arg2) {
12528 phost_sevp = &host_sevp;
12529 ret = target_to_host_sigevent(phost_sevp, arg2);
12530 if (ret != 0) {
12531 break;
12535 ret = get_errno(timer_create(clkid, phost_sevp, phtimer));
12536 if (ret) {
12537 phtimer = NULL;
12538 } else {
12539 if (put_user(TIMER_MAGIC | timer_index, arg3, target_timer_t)) {
12540 goto efault;
12544 break;
12546 #endif
12548 #ifdef TARGET_NR_timer_settime
12549 case TARGET_NR_timer_settime:
12551 /* args: timer_t timerid, int flags, const struct itimerspec *new_value,
12552 * struct itimerspec * old_value */
12553 target_timer_t timerid = get_timer_id(arg1);
12555 if (timerid < 0) {
12556 ret = timerid;
12557 } else if (arg3 == 0) {
12558 ret = -TARGET_EINVAL;
12559 } else {
12560 timer_t htimer = g_posix_timers[timerid];
12561 struct itimerspec hspec_new = {{0},}, hspec_old = {{0},};
12563 if (target_to_host_itimerspec(&hspec_new, arg3)) {
12564 goto efault;
12566 ret = get_errno(
12567 timer_settime(htimer, arg2, &hspec_new, &hspec_old));
12568 if (arg4 && host_to_target_itimerspec(arg4, &hspec_old)) {
12569 goto efault;
12572 break;
12574 #endif
12576 #ifdef TARGET_NR_timer_gettime
12577 case TARGET_NR_timer_gettime:
12579 /* args: timer_t timerid, struct itimerspec *curr_value */
12580 target_timer_t timerid = get_timer_id(arg1);
12582 if (timerid < 0) {
12583 ret = timerid;
12584 } else if (!arg2) {
12585 ret = -TARGET_EFAULT;
12586 } else {
12587 timer_t htimer = g_posix_timers[timerid];
12588 struct itimerspec hspec;
12589 ret = get_errno(timer_gettime(htimer, &hspec));
12591 if (host_to_target_itimerspec(arg2, &hspec)) {
12592 ret = -TARGET_EFAULT;
12595 break;
12597 #endif
12599 #ifdef TARGET_NR_timer_getoverrun
12600 case TARGET_NR_timer_getoverrun:
12602 /* args: timer_t timerid */
12603 target_timer_t timerid = get_timer_id(arg1);
12605 if (timerid < 0) {
12606 ret = timerid;
12607 } else {
12608 timer_t htimer = g_posix_timers[timerid];
12609 ret = get_errno(timer_getoverrun(htimer));
12611 fd_trans_unregister(ret);
12612 break;
12614 #endif
12616 #ifdef TARGET_NR_timer_delete
12617 case TARGET_NR_timer_delete:
12619 /* args: timer_t timerid */
12620 target_timer_t timerid = get_timer_id(arg1);
12622 if (timerid < 0) {
12623 ret = timerid;
12624 } else {
12625 timer_t htimer = g_posix_timers[timerid];
12626 ret = get_errno(timer_delete(htimer));
12627 g_posix_timers[timerid] = 0;
12629 break;
12631 #endif
12633 #if defined(TARGET_NR_timerfd_create) && defined(CONFIG_TIMERFD)
12634 case TARGET_NR_timerfd_create:
12635 ret = get_errno(timerfd_create(arg1,
12636 target_to_host_bitmask(arg2, fcntl_flags_tbl)));
12637 break;
12638 #endif
12640 #if defined(TARGET_NR_timerfd_gettime) && defined(CONFIG_TIMERFD)
12641 case TARGET_NR_timerfd_gettime:
12643 struct itimerspec its_curr;
12645 ret = get_errno(timerfd_gettime(arg1, &its_curr));
12647 if (arg2 && host_to_target_itimerspec(arg2, &its_curr)) {
12648 goto efault;
12651 break;
12652 #endif
12654 #if defined(TARGET_NR_timerfd_settime) && defined(CONFIG_TIMERFD)
12655 case TARGET_NR_timerfd_settime:
12657 struct itimerspec its_new, its_old, *p_new;
12659 if (arg3) {
12660 if (target_to_host_itimerspec(&its_new, arg3)) {
12661 goto efault;
12663 p_new = &its_new;
12664 } else {
12665 p_new = NULL;
12668 ret = get_errno(timerfd_settime(arg1, arg2, p_new, &its_old));
12670 if (arg4 && host_to_target_itimerspec(arg4, &its_old)) {
12671 goto efault;
12674 break;
12675 #endif
12677 #if defined(TARGET_NR_ioprio_get) && defined(__NR_ioprio_get)
12678 case TARGET_NR_ioprio_get:
12679 ret = get_errno(ioprio_get(arg1, arg2));
12680 break;
12681 #endif
12683 #if defined(TARGET_NR_ioprio_set) && defined(__NR_ioprio_set)
12684 case TARGET_NR_ioprio_set:
12685 ret = get_errno(ioprio_set(arg1, arg2, arg3));
12686 break;
12687 #endif
12689 #if defined(TARGET_NR_setns) && defined(CONFIG_SETNS)
12690 case TARGET_NR_setns:
12691 ret = get_errno(setns(arg1, arg2));
12692 break;
12693 #endif
12694 #if defined(TARGET_NR_unshare) && defined(CONFIG_SETNS)
12695 case TARGET_NR_unshare:
12696 ret = get_errno(unshare(arg1));
12697 break;
12698 #endif
12699 #if defined(TARGET_NR_kcmp) && defined(__NR_kcmp)
12700 case TARGET_NR_kcmp:
12701 ret = get_errno(kcmp(arg1, arg2, arg3, arg4, arg5));
12702 break;
12703 #endif
12705 default:
12706 unimplemented:
12707 gemu_log("qemu: Unsupported syscall: %d\n", num);
12708 #if defined(TARGET_NR_setxattr) || defined(TARGET_NR_get_thread_area) || defined(TARGET_NR_getdomainname) || defined(TARGET_NR_set_robust_list)
12709 unimplemented_nowarn:
12710 #endif
12711 ret = -TARGET_ENOSYS;
12712 break;
12714 fail:
12715 #ifdef DEBUG
12716 gemu_log(" = " TARGET_ABI_FMT_ld "\n", ret);
12717 #endif
12718 if(do_strace)
12719 print_syscall_ret(num, ret);
12720 trace_guest_user_syscall_ret(cpu, num, ret);
12721 return ret;
12722 efault:
12723 ret = -TARGET_EFAULT;
12724 goto fail;