linux-user: ppc64: use the correct values for F_*LK64s
[qemu/ar7.git] / linux-user / syscall.c
blobb8b7bced9f1a90aea3ac2762df7518977595ee05
1 /*
2 * Linux syscalls
4 * Copyright (c) 2003 Fabrice Bellard
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, see <http://www.gnu.org/licenses/>.
19 #define _ATFILE_SOURCE
20 #include "qemu/osdep.h"
21 #include "qemu/cutils.h"
22 #include "qemu/path.h"
23 #include <elf.h>
24 #include <endian.h>
25 #include <grp.h>
26 #include <sys/ipc.h>
27 #include <sys/msg.h>
28 #include <sys/wait.h>
29 #include <sys/mount.h>
30 #include <sys/file.h>
31 #include <sys/fsuid.h>
32 #include <sys/personality.h>
33 #include <sys/prctl.h>
34 #include <sys/resource.h>
35 #include <sys/swap.h>
36 #include <linux/capability.h>
37 #include <sched.h>
38 #include <sys/timex.h>
39 #include <sys/socket.h>
40 #include <sys/un.h>
41 #include <sys/uio.h>
42 #include <poll.h>
43 #include <sys/times.h>
44 #include <sys/shm.h>
45 #include <sys/sem.h>
46 #include <sys/statfs.h>
47 #include <utime.h>
48 #include <sys/sysinfo.h>
49 #include <sys/signalfd.h>
50 //#include <sys/user.h>
51 #include <netinet/ip.h>
52 #include <netinet/tcp.h>
53 #include <linux/wireless.h>
54 #include <linux/icmp.h>
55 #include <linux/icmpv6.h>
56 #include <linux/errqueue.h>
57 #include <linux/random.h>
58 #include "qemu-common.h"
59 #ifdef CONFIG_TIMERFD
60 #include <sys/timerfd.h>
61 #endif
62 #ifdef TARGET_GPROF
63 #include <sys/gmon.h>
64 #endif
65 #ifdef CONFIG_EVENTFD
66 #include <sys/eventfd.h>
67 #endif
68 #ifdef CONFIG_EPOLL
69 #include <sys/epoll.h>
70 #endif
71 #ifdef CONFIG_ATTR
72 #include "qemu/xattr.h"
73 #endif
74 #ifdef CONFIG_SENDFILE
75 #include <sys/sendfile.h>
76 #endif
78 #define termios host_termios
79 #define winsize host_winsize
80 #define termio host_termio
81 #define sgttyb host_sgttyb /* same as target */
82 #define tchars host_tchars /* same as target */
83 #define ltchars host_ltchars /* same as target */
85 #include <linux/termios.h>
86 #include <linux/unistd.h>
87 #include <linux/cdrom.h>
88 #include <linux/hdreg.h>
89 #include <linux/soundcard.h>
90 #include <linux/kd.h>
91 #include <linux/mtio.h>
92 #include <linux/fs.h>
93 #if defined(CONFIG_FIEMAP)
94 #include <linux/fiemap.h>
95 #endif
96 #include <linux/fb.h>
97 #include <linux/vt.h>
98 #include <linux/dm-ioctl.h>
99 #include <linux/reboot.h>
100 #include <linux/route.h>
101 #include <linux/filter.h>
102 #include <linux/blkpg.h>
103 #include <netpacket/packet.h>
104 #include <linux/netlink.h>
105 #ifdef CONFIG_RTNETLINK
106 #include <linux/rtnetlink.h>
107 #include <linux/if_bridge.h>
108 #endif
109 #include <linux/audit.h>
110 #include "linux_loop.h"
111 #include "uname.h"
113 #include "qemu.h"
115 #ifndef CLONE_IO
116 #define CLONE_IO 0x80000000 /* Clone io context */
117 #endif
119 /* We can't directly call the host clone syscall, because this will
120 * badly confuse libc (breaking mutexes, for example). So we must
121 * divide clone flags into:
122 * * flag combinations that look like pthread_create()
123 * * flag combinations that look like fork()
124 * * flags we can implement within QEMU itself
125 * * flags we can't support and will return an error for
127 /* For thread creation, all these flags must be present; for
128 * fork, none must be present.
130 #define CLONE_THREAD_FLAGS \
131 (CLONE_VM | CLONE_FS | CLONE_FILES | \
132 CLONE_SIGHAND | CLONE_THREAD | CLONE_SYSVSEM)
134 /* These flags are ignored:
135 * CLONE_DETACHED is now ignored by the kernel;
136 * CLONE_IO is just an optimisation hint to the I/O scheduler
138 #define CLONE_IGNORED_FLAGS \
139 (CLONE_DETACHED | CLONE_IO)
141 /* Flags for fork which we can implement within QEMU itself */
142 #define CLONE_OPTIONAL_FORK_FLAGS \
143 (CLONE_SETTLS | CLONE_PARENT_SETTID | \
144 CLONE_CHILD_CLEARTID | CLONE_CHILD_SETTID)
146 /* Flags for thread creation which we can implement within QEMU itself */
147 #define CLONE_OPTIONAL_THREAD_FLAGS \
148 (CLONE_SETTLS | CLONE_PARENT_SETTID | \
149 CLONE_CHILD_CLEARTID | CLONE_CHILD_SETTID | CLONE_PARENT)
151 #define CLONE_INVALID_FORK_FLAGS \
152 (~(CSIGNAL | CLONE_OPTIONAL_FORK_FLAGS | CLONE_IGNORED_FLAGS))
154 #define CLONE_INVALID_THREAD_FLAGS \
155 (~(CSIGNAL | CLONE_THREAD_FLAGS | CLONE_OPTIONAL_THREAD_FLAGS | \
156 CLONE_IGNORED_FLAGS))
158 /* CLONE_VFORK is special cased early in do_fork(). The other flag bits
159 * have almost all been allocated. We cannot support any of
160 * CLONE_NEWNS, CLONE_NEWCGROUP, CLONE_NEWUTS, CLONE_NEWIPC,
161 * CLONE_NEWUSER, CLONE_NEWPID, CLONE_NEWNET, CLONE_PTRACE, CLONE_UNTRACED.
162 * The checks against the invalid thread masks above will catch these.
163 * (The one remaining unallocated bit is 0x1000 which used to be CLONE_PID.)
166 //#define DEBUG
167 /* Define DEBUG_ERESTARTSYS to force every syscall to be restarted
168 * once. This exercises the codepaths for restart.
170 //#define DEBUG_ERESTARTSYS
172 //#include <linux/msdos_fs.h>
173 #define VFAT_IOCTL_READDIR_BOTH _IOR('r', 1, struct linux_dirent [2])
174 #define VFAT_IOCTL_READDIR_SHORT _IOR('r', 2, struct linux_dirent [2])
176 #undef _syscall0
177 #undef _syscall1
178 #undef _syscall2
179 #undef _syscall3
180 #undef _syscall4
181 #undef _syscall5
182 #undef _syscall6
184 #define _syscall0(type,name) \
185 static type name (void) \
187 return syscall(__NR_##name); \
190 #define _syscall1(type,name,type1,arg1) \
191 static type name (type1 arg1) \
193 return syscall(__NR_##name, arg1); \
196 #define _syscall2(type,name,type1,arg1,type2,arg2) \
197 static type name (type1 arg1,type2 arg2) \
199 return syscall(__NR_##name, arg1, arg2); \
202 #define _syscall3(type,name,type1,arg1,type2,arg2,type3,arg3) \
203 static type name (type1 arg1,type2 arg2,type3 arg3) \
205 return syscall(__NR_##name, arg1, arg2, arg3); \
208 #define _syscall4(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4) \
209 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4) \
211 return syscall(__NR_##name, arg1, arg2, arg3, arg4); \
214 #define _syscall5(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4, \
215 type5,arg5) \
216 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5) \
218 return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5); \
222 #define _syscall6(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4, \
223 type5,arg5,type6,arg6) \
224 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5, \
225 type6 arg6) \
227 return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6); \
231 #define __NR_sys_uname __NR_uname
232 #define __NR_sys_getcwd1 __NR_getcwd
233 #define __NR_sys_getdents __NR_getdents
234 #define __NR_sys_getdents64 __NR_getdents64
235 #define __NR_sys_getpriority __NR_getpriority
236 #define __NR_sys_rt_sigqueueinfo __NR_rt_sigqueueinfo
237 #define __NR_sys_rt_tgsigqueueinfo __NR_rt_tgsigqueueinfo
238 #define __NR_sys_syslog __NR_syslog
239 #define __NR_sys_futex __NR_futex
240 #define __NR_sys_inotify_init __NR_inotify_init
241 #define __NR_sys_inotify_add_watch __NR_inotify_add_watch
242 #define __NR_sys_inotify_rm_watch __NR_inotify_rm_watch
244 #if defined(__alpha__) || defined(__x86_64__) || defined(__s390x__)
245 #define __NR__llseek __NR_lseek
246 #endif
248 /* Newer kernel ports have llseek() instead of _llseek() */
249 #if defined(TARGET_NR_llseek) && !defined(TARGET_NR__llseek)
250 #define TARGET_NR__llseek TARGET_NR_llseek
251 #endif
253 #ifdef __NR_gettid
254 _syscall0(int, gettid)
255 #else
256 /* This is a replacement for the host gettid() and must return a host
257 errno. */
258 static int gettid(void) {
259 return -ENOSYS;
261 #endif
263 /* For the 64-bit guest on 32-bit host case we must emulate
264 * getdents using getdents64, because otherwise the host
265 * might hand us back more dirent records than we can fit
266 * into the guest buffer after structure format conversion.
267 * Otherwise we emulate getdents with getdents if the host has it.
269 #if defined(__NR_getdents) && HOST_LONG_BITS >= TARGET_ABI_BITS
270 #define EMULATE_GETDENTS_WITH_GETDENTS
271 #endif
273 #if defined(TARGET_NR_getdents) && defined(EMULATE_GETDENTS_WITH_GETDENTS)
274 _syscall3(int, sys_getdents, uint, fd, struct linux_dirent *, dirp, uint, count);
275 #endif
276 #if (defined(TARGET_NR_getdents) && \
277 !defined(EMULATE_GETDENTS_WITH_GETDENTS)) || \
278 (defined(TARGET_NR_getdents64) && defined(__NR_getdents64))
279 _syscall3(int, sys_getdents64, uint, fd, struct linux_dirent64 *, dirp, uint, count);
280 #endif
281 #if defined(TARGET_NR__llseek) && defined(__NR_llseek)
282 _syscall5(int, _llseek, uint, fd, ulong, hi, ulong, lo,
283 loff_t *, res, uint, wh);
284 #endif
285 _syscall3(int, sys_rt_sigqueueinfo, pid_t, pid, int, sig, siginfo_t *, uinfo)
286 _syscall4(int, sys_rt_tgsigqueueinfo, pid_t, pid, pid_t, tid, int, sig,
287 siginfo_t *, uinfo)
288 _syscall3(int,sys_syslog,int,type,char*,bufp,int,len)
289 #ifdef __NR_exit_group
290 _syscall1(int,exit_group,int,error_code)
291 #endif
292 #if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address)
293 _syscall1(int,set_tid_address,int *,tidptr)
294 #endif
295 #if defined(TARGET_NR_futex) && defined(__NR_futex)
296 _syscall6(int,sys_futex,int *,uaddr,int,op,int,val,
297 const struct timespec *,timeout,int *,uaddr2,int,val3)
298 #endif
299 #define __NR_sys_sched_getaffinity __NR_sched_getaffinity
300 _syscall3(int, sys_sched_getaffinity, pid_t, pid, unsigned int, len,
301 unsigned long *, user_mask_ptr);
302 #define __NR_sys_sched_setaffinity __NR_sched_setaffinity
303 _syscall3(int, sys_sched_setaffinity, pid_t, pid, unsigned int, len,
304 unsigned long *, user_mask_ptr);
305 #define __NR_sys_getcpu __NR_getcpu
306 _syscall3(int, sys_getcpu, unsigned *, cpu, unsigned *, node, void *, tcache);
307 _syscall4(int, reboot, int, magic1, int, magic2, unsigned int, cmd,
308 void *, arg);
309 _syscall2(int, capget, struct __user_cap_header_struct *, header,
310 struct __user_cap_data_struct *, data);
311 _syscall2(int, capset, struct __user_cap_header_struct *, header,
312 struct __user_cap_data_struct *, data);
313 #if defined(TARGET_NR_ioprio_get) && defined(__NR_ioprio_get)
314 _syscall2(int, ioprio_get, int, which, int, who)
315 #endif
316 #if defined(TARGET_NR_ioprio_set) && defined(__NR_ioprio_set)
317 _syscall3(int, ioprio_set, int, which, int, who, int, ioprio)
318 #endif
319 #if defined(TARGET_NR_getrandom) && defined(__NR_getrandom)
320 _syscall3(int, getrandom, void *, buf, size_t, buflen, unsigned int, flags)
321 #endif
323 #if defined(TARGET_NR_kcmp) && defined(__NR_kcmp)
324 _syscall5(int, kcmp, pid_t, pid1, pid_t, pid2, int, type,
325 unsigned long, idx1, unsigned long, idx2)
326 #endif
328 static bitmask_transtbl fcntl_flags_tbl[] = {
329 { TARGET_O_ACCMODE, TARGET_O_WRONLY, O_ACCMODE, O_WRONLY, },
330 { TARGET_O_ACCMODE, TARGET_O_RDWR, O_ACCMODE, O_RDWR, },
331 { TARGET_O_CREAT, TARGET_O_CREAT, O_CREAT, O_CREAT, },
332 { TARGET_O_EXCL, TARGET_O_EXCL, O_EXCL, O_EXCL, },
333 { TARGET_O_NOCTTY, TARGET_O_NOCTTY, O_NOCTTY, O_NOCTTY, },
334 { TARGET_O_TRUNC, TARGET_O_TRUNC, O_TRUNC, O_TRUNC, },
335 { TARGET_O_APPEND, TARGET_O_APPEND, O_APPEND, O_APPEND, },
336 { TARGET_O_NONBLOCK, TARGET_O_NONBLOCK, O_NONBLOCK, O_NONBLOCK, },
337 { TARGET_O_SYNC, TARGET_O_DSYNC, O_SYNC, O_DSYNC, },
338 { TARGET_O_SYNC, TARGET_O_SYNC, O_SYNC, O_SYNC, },
339 { TARGET_FASYNC, TARGET_FASYNC, FASYNC, FASYNC, },
340 { TARGET_O_DIRECTORY, TARGET_O_DIRECTORY, O_DIRECTORY, O_DIRECTORY, },
341 { TARGET_O_NOFOLLOW, TARGET_O_NOFOLLOW, O_NOFOLLOW, O_NOFOLLOW, },
342 #if defined(O_DIRECT)
343 { TARGET_O_DIRECT, TARGET_O_DIRECT, O_DIRECT, O_DIRECT, },
344 #endif
345 #if defined(O_NOATIME)
346 { TARGET_O_NOATIME, TARGET_O_NOATIME, O_NOATIME, O_NOATIME },
347 #endif
348 #if defined(O_CLOEXEC)
349 { TARGET_O_CLOEXEC, TARGET_O_CLOEXEC, O_CLOEXEC, O_CLOEXEC },
350 #endif
351 #if defined(O_PATH)
352 { TARGET_O_PATH, TARGET_O_PATH, O_PATH, O_PATH },
353 #endif
354 #if defined(O_TMPFILE)
355 { TARGET_O_TMPFILE, TARGET_O_TMPFILE, O_TMPFILE, O_TMPFILE },
356 #endif
357 /* Don't terminate the list prematurely on 64-bit host+guest. */
358 #if TARGET_O_LARGEFILE != 0 || O_LARGEFILE != 0
359 { TARGET_O_LARGEFILE, TARGET_O_LARGEFILE, O_LARGEFILE, O_LARGEFILE, },
360 #endif
361 { 0, 0, 0, 0 }
364 enum {
365 QEMU_IFLA_BR_UNSPEC,
366 QEMU_IFLA_BR_FORWARD_DELAY,
367 QEMU_IFLA_BR_HELLO_TIME,
368 QEMU_IFLA_BR_MAX_AGE,
369 QEMU_IFLA_BR_AGEING_TIME,
370 QEMU_IFLA_BR_STP_STATE,
371 QEMU_IFLA_BR_PRIORITY,
372 QEMU_IFLA_BR_VLAN_FILTERING,
373 QEMU_IFLA_BR_VLAN_PROTOCOL,
374 QEMU_IFLA_BR_GROUP_FWD_MASK,
375 QEMU_IFLA_BR_ROOT_ID,
376 QEMU_IFLA_BR_BRIDGE_ID,
377 QEMU_IFLA_BR_ROOT_PORT,
378 QEMU_IFLA_BR_ROOT_PATH_COST,
379 QEMU_IFLA_BR_TOPOLOGY_CHANGE,
380 QEMU_IFLA_BR_TOPOLOGY_CHANGE_DETECTED,
381 QEMU_IFLA_BR_HELLO_TIMER,
382 QEMU_IFLA_BR_TCN_TIMER,
383 QEMU_IFLA_BR_TOPOLOGY_CHANGE_TIMER,
384 QEMU_IFLA_BR_GC_TIMER,
385 QEMU_IFLA_BR_GROUP_ADDR,
386 QEMU_IFLA_BR_FDB_FLUSH,
387 QEMU_IFLA_BR_MCAST_ROUTER,
388 QEMU_IFLA_BR_MCAST_SNOOPING,
389 QEMU_IFLA_BR_MCAST_QUERY_USE_IFADDR,
390 QEMU_IFLA_BR_MCAST_QUERIER,
391 QEMU_IFLA_BR_MCAST_HASH_ELASTICITY,
392 QEMU_IFLA_BR_MCAST_HASH_MAX,
393 QEMU_IFLA_BR_MCAST_LAST_MEMBER_CNT,
394 QEMU_IFLA_BR_MCAST_STARTUP_QUERY_CNT,
395 QEMU_IFLA_BR_MCAST_LAST_MEMBER_INTVL,
396 QEMU_IFLA_BR_MCAST_MEMBERSHIP_INTVL,
397 QEMU_IFLA_BR_MCAST_QUERIER_INTVL,
398 QEMU_IFLA_BR_MCAST_QUERY_INTVL,
399 QEMU_IFLA_BR_MCAST_QUERY_RESPONSE_INTVL,
400 QEMU_IFLA_BR_MCAST_STARTUP_QUERY_INTVL,
401 QEMU_IFLA_BR_NF_CALL_IPTABLES,
402 QEMU_IFLA_BR_NF_CALL_IP6TABLES,
403 QEMU_IFLA_BR_NF_CALL_ARPTABLES,
404 QEMU_IFLA_BR_VLAN_DEFAULT_PVID,
405 QEMU_IFLA_BR_PAD,
406 QEMU_IFLA_BR_VLAN_STATS_ENABLED,
407 QEMU_IFLA_BR_MCAST_STATS_ENABLED,
408 QEMU_IFLA_BR_MCAST_IGMP_VERSION,
409 QEMU_IFLA_BR_MCAST_MLD_VERSION,
410 QEMU___IFLA_BR_MAX,
413 enum {
414 QEMU_IFLA_UNSPEC,
415 QEMU_IFLA_ADDRESS,
416 QEMU_IFLA_BROADCAST,
417 QEMU_IFLA_IFNAME,
418 QEMU_IFLA_MTU,
419 QEMU_IFLA_LINK,
420 QEMU_IFLA_QDISC,
421 QEMU_IFLA_STATS,
422 QEMU_IFLA_COST,
423 QEMU_IFLA_PRIORITY,
424 QEMU_IFLA_MASTER,
425 QEMU_IFLA_WIRELESS,
426 QEMU_IFLA_PROTINFO,
427 QEMU_IFLA_TXQLEN,
428 QEMU_IFLA_MAP,
429 QEMU_IFLA_WEIGHT,
430 QEMU_IFLA_OPERSTATE,
431 QEMU_IFLA_LINKMODE,
432 QEMU_IFLA_LINKINFO,
433 QEMU_IFLA_NET_NS_PID,
434 QEMU_IFLA_IFALIAS,
435 QEMU_IFLA_NUM_VF,
436 QEMU_IFLA_VFINFO_LIST,
437 QEMU_IFLA_STATS64,
438 QEMU_IFLA_VF_PORTS,
439 QEMU_IFLA_PORT_SELF,
440 QEMU_IFLA_AF_SPEC,
441 QEMU_IFLA_GROUP,
442 QEMU_IFLA_NET_NS_FD,
443 QEMU_IFLA_EXT_MASK,
444 QEMU_IFLA_PROMISCUITY,
445 QEMU_IFLA_NUM_TX_QUEUES,
446 QEMU_IFLA_NUM_RX_QUEUES,
447 QEMU_IFLA_CARRIER,
448 QEMU_IFLA_PHYS_PORT_ID,
449 QEMU_IFLA_CARRIER_CHANGES,
450 QEMU_IFLA_PHYS_SWITCH_ID,
451 QEMU_IFLA_LINK_NETNSID,
452 QEMU_IFLA_PHYS_PORT_NAME,
453 QEMU_IFLA_PROTO_DOWN,
454 QEMU_IFLA_GSO_MAX_SEGS,
455 QEMU_IFLA_GSO_MAX_SIZE,
456 QEMU_IFLA_PAD,
457 QEMU_IFLA_XDP,
458 QEMU_IFLA_EVENT,
459 QEMU_IFLA_NEW_NETNSID,
460 QEMU_IFLA_IF_NETNSID,
461 QEMU_IFLA_CARRIER_UP_COUNT,
462 QEMU_IFLA_CARRIER_DOWN_COUNT,
463 QEMU_IFLA_NEW_IFINDEX,
464 QEMU___IFLA_MAX
467 enum {
468 QEMU_IFLA_BRPORT_UNSPEC,
469 QEMU_IFLA_BRPORT_STATE,
470 QEMU_IFLA_BRPORT_PRIORITY,
471 QEMU_IFLA_BRPORT_COST,
472 QEMU_IFLA_BRPORT_MODE,
473 QEMU_IFLA_BRPORT_GUARD,
474 QEMU_IFLA_BRPORT_PROTECT,
475 QEMU_IFLA_BRPORT_FAST_LEAVE,
476 QEMU_IFLA_BRPORT_LEARNING,
477 QEMU_IFLA_BRPORT_UNICAST_FLOOD,
478 QEMU_IFLA_BRPORT_PROXYARP,
479 QEMU_IFLA_BRPORT_LEARNING_SYNC,
480 QEMU_IFLA_BRPORT_PROXYARP_WIFI,
481 QEMU_IFLA_BRPORT_ROOT_ID,
482 QEMU_IFLA_BRPORT_BRIDGE_ID,
483 QEMU_IFLA_BRPORT_DESIGNATED_PORT,
484 QEMU_IFLA_BRPORT_DESIGNATED_COST,
485 QEMU_IFLA_BRPORT_ID,
486 QEMU_IFLA_BRPORT_NO,
487 QEMU_IFLA_BRPORT_TOPOLOGY_CHANGE_ACK,
488 QEMU_IFLA_BRPORT_CONFIG_PENDING,
489 QEMU_IFLA_BRPORT_MESSAGE_AGE_TIMER,
490 QEMU_IFLA_BRPORT_FORWARD_DELAY_TIMER,
491 QEMU_IFLA_BRPORT_HOLD_TIMER,
492 QEMU_IFLA_BRPORT_FLUSH,
493 QEMU_IFLA_BRPORT_MULTICAST_ROUTER,
494 QEMU_IFLA_BRPORT_PAD,
495 QEMU_IFLA_BRPORT_MCAST_FLOOD,
496 QEMU_IFLA_BRPORT_MCAST_TO_UCAST,
497 QEMU_IFLA_BRPORT_VLAN_TUNNEL,
498 QEMU_IFLA_BRPORT_BCAST_FLOOD,
499 QEMU_IFLA_BRPORT_GROUP_FWD_MASK,
500 QEMU_IFLA_BRPORT_NEIGH_SUPPRESS,
501 QEMU___IFLA_BRPORT_MAX
504 enum {
505 QEMU_IFLA_INFO_UNSPEC,
506 QEMU_IFLA_INFO_KIND,
507 QEMU_IFLA_INFO_DATA,
508 QEMU_IFLA_INFO_XSTATS,
509 QEMU_IFLA_INFO_SLAVE_KIND,
510 QEMU_IFLA_INFO_SLAVE_DATA,
511 QEMU___IFLA_INFO_MAX,
514 enum {
515 QEMU_IFLA_INET_UNSPEC,
516 QEMU_IFLA_INET_CONF,
517 QEMU___IFLA_INET_MAX,
520 enum {
521 QEMU_IFLA_INET6_UNSPEC,
522 QEMU_IFLA_INET6_FLAGS,
523 QEMU_IFLA_INET6_CONF,
524 QEMU_IFLA_INET6_STATS,
525 QEMU_IFLA_INET6_MCAST,
526 QEMU_IFLA_INET6_CACHEINFO,
527 QEMU_IFLA_INET6_ICMP6STATS,
528 QEMU_IFLA_INET6_TOKEN,
529 QEMU_IFLA_INET6_ADDR_GEN_MODE,
530 QEMU___IFLA_INET6_MAX
533 enum {
534 QEMU_IFLA_XDP_UNSPEC,
535 QEMU_IFLA_XDP_FD,
536 QEMU_IFLA_XDP_ATTACHED,
537 QEMU_IFLA_XDP_FLAGS,
538 QEMU_IFLA_XDP_PROG_ID,
539 QEMU___IFLA_XDP_MAX,
542 typedef abi_long (*TargetFdDataFunc)(void *, size_t);
543 typedef abi_long (*TargetFdAddrFunc)(void *, abi_ulong, socklen_t);
544 typedef struct TargetFdTrans {
545 TargetFdDataFunc host_to_target_data;
546 TargetFdDataFunc target_to_host_data;
547 TargetFdAddrFunc target_to_host_addr;
548 } TargetFdTrans;
550 static TargetFdTrans **target_fd_trans;
552 static unsigned int target_fd_max;
554 static TargetFdDataFunc fd_trans_target_to_host_data(int fd)
556 if (fd >= 0 && fd < target_fd_max && target_fd_trans[fd]) {
557 return target_fd_trans[fd]->target_to_host_data;
559 return NULL;
562 static TargetFdDataFunc fd_trans_host_to_target_data(int fd)
564 if (fd >= 0 && fd < target_fd_max && target_fd_trans[fd]) {
565 return target_fd_trans[fd]->host_to_target_data;
567 return NULL;
570 static TargetFdAddrFunc fd_trans_target_to_host_addr(int fd)
572 if (fd >= 0 && fd < target_fd_max && target_fd_trans[fd]) {
573 return target_fd_trans[fd]->target_to_host_addr;
575 return NULL;
578 static void fd_trans_register(int fd, TargetFdTrans *trans)
580 unsigned int oldmax;
582 if (fd >= target_fd_max) {
583 oldmax = target_fd_max;
584 target_fd_max = ((fd >> 6) + 1) << 6; /* by slice of 64 entries */
585 target_fd_trans = g_renew(TargetFdTrans *,
586 target_fd_trans, target_fd_max);
587 memset((void *)(target_fd_trans + oldmax), 0,
588 (target_fd_max - oldmax) * sizeof(TargetFdTrans *));
590 target_fd_trans[fd] = trans;
593 static void fd_trans_unregister(int fd)
595 if (fd >= 0 && fd < target_fd_max) {
596 target_fd_trans[fd] = NULL;
600 static void fd_trans_dup(int oldfd, int newfd)
602 fd_trans_unregister(newfd);
603 if (oldfd < target_fd_max && target_fd_trans[oldfd]) {
604 fd_trans_register(newfd, target_fd_trans[oldfd]);
608 static int sys_getcwd1(char *buf, size_t size)
610 if (getcwd(buf, size) == NULL) {
611 /* getcwd() sets errno */
612 return (-1);
614 return strlen(buf)+1;
617 #ifdef TARGET_NR_utimensat
618 #if defined(__NR_utimensat)
619 #define __NR_sys_utimensat __NR_utimensat
620 _syscall4(int,sys_utimensat,int,dirfd,const char *,pathname,
621 const struct timespec *,tsp,int,flags)
622 #else
623 static int sys_utimensat(int dirfd, const char *pathname,
624 const struct timespec times[2], int flags)
626 errno = ENOSYS;
627 return -1;
629 #endif
630 #endif /* TARGET_NR_utimensat */
632 #ifdef TARGET_NR_renameat2
633 #if defined(__NR_renameat2)
634 #define __NR_sys_renameat2 __NR_renameat2
635 _syscall5(int, sys_renameat2, int, oldfd, const char *, old, int, newfd,
636 const char *, new, unsigned int, flags)
637 #else
638 static int sys_renameat2(int oldfd, const char *old,
639 int newfd, const char *new, int flags)
641 if (flags == 0) {
642 return renameat(oldfd, old, newfd, new);
644 errno = ENOSYS;
645 return -1;
647 #endif
648 #endif /* TARGET_NR_renameat2 */
650 #ifdef CONFIG_INOTIFY
651 #include <sys/inotify.h>
653 #if defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init)
654 static int sys_inotify_init(void)
656 return (inotify_init());
658 #endif
659 #if defined(TARGET_NR_inotify_add_watch) && defined(__NR_inotify_add_watch)
660 static int sys_inotify_add_watch(int fd,const char *pathname, int32_t mask)
662 return (inotify_add_watch(fd, pathname, mask));
664 #endif
665 #if defined(TARGET_NR_inotify_rm_watch) && defined(__NR_inotify_rm_watch)
666 static int sys_inotify_rm_watch(int fd, int32_t wd)
668 return (inotify_rm_watch(fd, wd));
670 #endif
671 #ifdef CONFIG_INOTIFY1
672 #if defined(TARGET_NR_inotify_init1) && defined(__NR_inotify_init1)
673 static int sys_inotify_init1(int flags)
675 return (inotify_init1(flags));
677 #endif
678 #endif
679 #else
680 /* Userspace can usually survive runtime without inotify */
681 #undef TARGET_NR_inotify_init
682 #undef TARGET_NR_inotify_init1
683 #undef TARGET_NR_inotify_add_watch
684 #undef TARGET_NR_inotify_rm_watch
685 #endif /* CONFIG_INOTIFY */
687 #if defined(TARGET_NR_prlimit64)
688 #ifndef __NR_prlimit64
689 # define __NR_prlimit64 -1
690 #endif
691 #define __NR_sys_prlimit64 __NR_prlimit64
692 /* The glibc rlimit structure may not be that used by the underlying syscall */
693 struct host_rlimit64 {
694 uint64_t rlim_cur;
695 uint64_t rlim_max;
697 _syscall4(int, sys_prlimit64, pid_t, pid, int, resource,
698 const struct host_rlimit64 *, new_limit,
699 struct host_rlimit64 *, old_limit)
700 #endif
703 #if defined(TARGET_NR_timer_create)
704 /* Maxiumum of 32 active POSIX timers allowed at any one time. */
705 static timer_t g_posix_timers[32] = { 0, } ;
707 static inline int next_free_host_timer(void)
709 int k ;
710 /* FIXME: Does finding the next free slot require a lock? */
711 for (k = 0; k < ARRAY_SIZE(g_posix_timers); k++) {
712 if (g_posix_timers[k] == 0) {
713 g_posix_timers[k] = (timer_t) 1;
714 return k;
717 return -1;
719 #endif
721 /* ARM EABI and MIPS expect 64bit types aligned even on pairs or registers */
722 #ifdef TARGET_ARM
723 static inline int regpairs_aligned(void *cpu_env, int num)
725 return ((((CPUARMState *)cpu_env)->eabi) == 1) ;
727 #elif defined(TARGET_MIPS) && (TARGET_ABI_BITS == 32)
728 static inline int regpairs_aligned(void *cpu_env, int num) { return 1; }
729 #elif defined(TARGET_PPC) && !defined(TARGET_PPC64)
730 /* SysV AVI for PPC32 expects 64bit parameters to be passed on odd/even pairs
731 * of registers which translates to the same as ARM/MIPS, because we start with
732 * r3 as arg1 */
733 static inline int regpairs_aligned(void *cpu_env, int num) { return 1; }
734 #elif defined(TARGET_SH4)
735 /* SH4 doesn't align register pairs, except for p{read,write}64 */
736 static inline int regpairs_aligned(void *cpu_env, int num)
738 switch (num) {
739 case TARGET_NR_pread64:
740 case TARGET_NR_pwrite64:
741 return 1;
743 default:
744 return 0;
747 #elif defined(TARGET_XTENSA)
748 static inline int regpairs_aligned(void *cpu_env, int num) { return 1; }
749 #else
750 static inline int regpairs_aligned(void *cpu_env, int num) { return 0; }
751 #endif
753 #define ERRNO_TABLE_SIZE 1200
755 /* target_to_host_errno_table[] is initialized from
756 * host_to_target_errno_table[] in syscall_init(). */
757 static uint16_t target_to_host_errno_table[ERRNO_TABLE_SIZE] = {
761 * This list is the union of errno values overridden in asm-<arch>/errno.h
762 * minus the errnos that are not actually generic to all archs.
764 static uint16_t host_to_target_errno_table[ERRNO_TABLE_SIZE] = {
765 [EAGAIN] = TARGET_EAGAIN,
766 [EIDRM] = TARGET_EIDRM,
767 [ECHRNG] = TARGET_ECHRNG,
768 [EL2NSYNC] = TARGET_EL2NSYNC,
769 [EL3HLT] = TARGET_EL3HLT,
770 [EL3RST] = TARGET_EL3RST,
771 [ELNRNG] = TARGET_ELNRNG,
772 [EUNATCH] = TARGET_EUNATCH,
773 [ENOCSI] = TARGET_ENOCSI,
774 [EL2HLT] = TARGET_EL2HLT,
775 [EDEADLK] = TARGET_EDEADLK,
776 [ENOLCK] = TARGET_ENOLCK,
777 [EBADE] = TARGET_EBADE,
778 [EBADR] = TARGET_EBADR,
779 [EXFULL] = TARGET_EXFULL,
780 [ENOANO] = TARGET_ENOANO,
781 [EBADRQC] = TARGET_EBADRQC,
782 [EBADSLT] = TARGET_EBADSLT,
783 [EBFONT] = TARGET_EBFONT,
784 [ENOSTR] = TARGET_ENOSTR,
785 [ENODATA] = TARGET_ENODATA,
786 [ETIME] = TARGET_ETIME,
787 [ENOSR] = TARGET_ENOSR,
788 [ENONET] = TARGET_ENONET,
789 [ENOPKG] = TARGET_ENOPKG,
790 [EREMOTE] = TARGET_EREMOTE,
791 [ENOLINK] = TARGET_ENOLINK,
792 [EADV] = TARGET_EADV,
793 [ESRMNT] = TARGET_ESRMNT,
794 [ECOMM] = TARGET_ECOMM,
795 [EPROTO] = TARGET_EPROTO,
796 [EDOTDOT] = TARGET_EDOTDOT,
797 [EMULTIHOP] = TARGET_EMULTIHOP,
798 [EBADMSG] = TARGET_EBADMSG,
799 [ENAMETOOLONG] = TARGET_ENAMETOOLONG,
800 [EOVERFLOW] = TARGET_EOVERFLOW,
801 [ENOTUNIQ] = TARGET_ENOTUNIQ,
802 [EBADFD] = TARGET_EBADFD,
803 [EREMCHG] = TARGET_EREMCHG,
804 [ELIBACC] = TARGET_ELIBACC,
805 [ELIBBAD] = TARGET_ELIBBAD,
806 [ELIBSCN] = TARGET_ELIBSCN,
807 [ELIBMAX] = TARGET_ELIBMAX,
808 [ELIBEXEC] = TARGET_ELIBEXEC,
809 [EILSEQ] = TARGET_EILSEQ,
810 [ENOSYS] = TARGET_ENOSYS,
811 [ELOOP] = TARGET_ELOOP,
812 [ERESTART] = TARGET_ERESTART,
813 [ESTRPIPE] = TARGET_ESTRPIPE,
814 [ENOTEMPTY] = TARGET_ENOTEMPTY,
815 [EUSERS] = TARGET_EUSERS,
816 [ENOTSOCK] = TARGET_ENOTSOCK,
817 [EDESTADDRREQ] = TARGET_EDESTADDRREQ,
818 [EMSGSIZE] = TARGET_EMSGSIZE,
819 [EPROTOTYPE] = TARGET_EPROTOTYPE,
820 [ENOPROTOOPT] = TARGET_ENOPROTOOPT,
821 [EPROTONOSUPPORT] = TARGET_EPROTONOSUPPORT,
822 [ESOCKTNOSUPPORT] = TARGET_ESOCKTNOSUPPORT,
823 [EOPNOTSUPP] = TARGET_EOPNOTSUPP,
824 [EPFNOSUPPORT] = TARGET_EPFNOSUPPORT,
825 [EAFNOSUPPORT] = TARGET_EAFNOSUPPORT,
826 [EADDRINUSE] = TARGET_EADDRINUSE,
827 [EADDRNOTAVAIL] = TARGET_EADDRNOTAVAIL,
828 [ENETDOWN] = TARGET_ENETDOWN,
829 [ENETUNREACH] = TARGET_ENETUNREACH,
830 [ENETRESET] = TARGET_ENETRESET,
831 [ECONNABORTED] = TARGET_ECONNABORTED,
832 [ECONNRESET] = TARGET_ECONNRESET,
833 [ENOBUFS] = TARGET_ENOBUFS,
834 [EISCONN] = TARGET_EISCONN,
835 [ENOTCONN] = TARGET_ENOTCONN,
836 [EUCLEAN] = TARGET_EUCLEAN,
837 [ENOTNAM] = TARGET_ENOTNAM,
838 [ENAVAIL] = TARGET_ENAVAIL,
839 [EISNAM] = TARGET_EISNAM,
840 [EREMOTEIO] = TARGET_EREMOTEIO,
841 [EDQUOT] = TARGET_EDQUOT,
842 [ESHUTDOWN] = TARGET_ESHUTDOWN,
843 [ETOOMANYREFS] = TARGET_ETOOMANYREFS,
844 [ETIMEDOUT] = TARGET_ETIMEDOUT,
845 [ECONNREFUSED] = TARGET_ECONNREFUSED,
846 [EHOSTDOWN] = TARGET_EHOSTDOWN,
847 [EHOSTUNREACH] = TARGET_EHOSTUNREACH,
848 [EALREADY] = TARGET_EALREADY,
849 [EINPROGRESS] = TARGET_EINPROGRESS,
850 [ESTALE] = TARGET_ESTALE,
851 [ECANCELED] = TARGET_ECANCELED,
852 [ENOMEDIUM] = TARGET_ENOMEDIUM,
853 [EMEDIUMTYPE] = TARGET_EMEDIUMTYPE,
854 #ifdef ENOKEY
855 [ENOKEY] = TARGET_ENOKEY,
856 #endif
857 #ifdef EKEYEXPIRED
858 [EKEYEXPIRED] = TARGET_EKEYEXPIRED,
859 #endif
860 #ifdef EKEYREVOKED
861 [EKEYREVOKED] = TARGET_EKEYREVOKED,
862 #endif
863 #ifdef EKEYREJECTED
864 [EKEYREJECTED] = TARGET_EKEYREJECTED,
865 #endif
866 #ifdef EOWNERDEAD
867 [EOWNERDEAD] = TARGET_EOWNERDEAD,
868 #endif
869 #ifdef ENOTRECOVERABLE
870 [ENOTRECOVERABLE] = TARGET_ENOTRECOVERABLE,
871 #endif
872 #ifdef ENOMSG
873 [ENOMSG] = TARGET_ENOMSG,
874 #endif
875 #ifdef ERKFILL
876 [ERFKILL] = TARGET_ERFKILL,
877 #endif
878 #ifdef EHWPOISON
879 [EHWPOISON] = TARGET_EHWPOISON,
880 #endif
883 static inline int host_to_target_errno(int err)
885 if (err >= 0 && err < ERRNO_TABLE_SIZE &&
886 host_to_target_errno_table[err]) {
887 return host_to_target_errno_table[err];
889 return err;
892 static inline int target_to_host_errno(int err)
894 if (err >= 0 && err < ERRNO_TABLE_SIZE &&
895 target_to_host_errno_table[err]) {
896 return target_to_host_errno_table[err];
898 return err;
901 static inline abi_long get_errno(abi_long ret)
903 if (ret == -1)
904 return -host_to_target_errno(errno);
905 else
906 return ret;
909 const char *target_strerror(int err)
911 if (err == TARGET_ERESTARTSYS) {
912 return "To be restarted";
914 if (err == TARGET_QEMU_ESIGRETURN) {
915 return "Successful exit from sigreturn";
918 if ((err >= ERRNO_TABLE_SIZE) || (err < 0)) {
919 return NULL;
921 return strerror(target_to_host_errno(err));
924 #define safe_syscall0(type, name) \
925 static type safe_##name(void) \
927 return safe_syscall(__NR_##name); \
930 #define safe_syscall1(type, name, type1, arg1) \
931 static type safe_##name(type1 arg1) \
933 return safe_syscall(__NR_##name, arg1); \
936 #define safe_syscall2(type, name, type1, arg1, type2, arg2) \
937 static type safe_##name(type1 arg1, type2 arg2) \
939 return safe_syscall(__NR_##name, arg1, arg2); \
942 #define safe_syscall3(type, name, type1, arg1, type2, arg2, type3, arg3) \
943 static type safe_##name(type1 arg1, type2 arg2, type3 arg3) \
945 return safe_syscall(__NR_##name, arg1, arg2, arg3); \
948 #define safe_syscall4(type, name, type1, arg1, type2, arg2, type3, arg3, \
949 type4, arg4) \
950 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4) \
952 return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4); \
955 #define safe_syscall5(type, name, type1, arg1, type2, arg2, type3, arg3, \
956 type4, arg4, type5, arg5) \
957 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, \
958 type5 arg5) \
960 return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5); \
963 #define safe_syscall6(type, name, type1, arg1, type2, arg2, type3, arg3, \
964 type4, arg4, type5, arg5, type6, arg6) \
965 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, \
966 type5 arg5, type6 arg6) \
968 return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6); \
971 safe_syscall3(ssize_t, read, int, fd, void *, buff, size_t, count)
972 safe_syscall3(ssize_t, write, int, fd, const void *, buff, size_t, count)
973 safe_syscall4(int, openat, int, dirfd, const char *, pathname, \
974 int, flags, mode_t, mode)
975 safe_syscall4(pid_t, wait4, pid_t, pid, int *, status, int, options, \
976 struct rusage *, rusage)
977 safe_syscall5(int, waitid, idtype_t, idtype, id_t, id, siginfo_t *, infop, \
978 int, options, struct rusage *, rusage)
979 safe_syscall3(int, execve, const char *, filename, char **, argv, char **, envp)
980 safe_syscall6(int, pselect6, int, nfds, fd_set *, readfds, fd_set *, writefds, \
981 fd_set *, exceptfds, struct timespec *, timeout, void *, sig)
982 safe_syscall5(int, ppoll, struct pollfd *, ufds, unsigned int, nfds,
983 struct timespec *, tsp, const sigset_t *, sigmask,
984 size_t, sigsetsize)
985 safe_syscall6(int, epoll_pwait, int, epfd, struct epoll_event *, events,
986 int, maxevents, int, timeout, const sigset_t *, sigmask,
987 size_t, sigsetsize)
988 safe_syscall6(int,futex,int *,uaddr,int,op,int,val, \
989 const struct timespec *,timeout,int *,uaddr2,int,val3)
990 safe_syscall2(int, rt_sigsuspend, sigset_t *, newset, size_t, sigsetsize)
991 safe_syscall2(int, kill, pid_t, pid, int, sig)
992 safe_syscall2(int, tkill, int, tid, int, sig)
993 safe_syscall3(int, tgkill, int, tgid, int, pid, int, sig)
994 safe_syscall3(ssize_t, readv, int, fd, const struct iovec *, iov, int, iovcnt)
995 safe_syscall3(ssize_t, writev, int, fd, const struct iovec *, iov, int, iovcnt)
996 safe_syscall5(ssize_t, preadv, int, fd, const struct iovec *, iov, int, iovcnt,
997 unsigned long, pos_l, unsigned long, pos_h)
998 safe_syscall5(ssize_t, pwritev, int, fd, const struct iovec *, iov, int, iovcnt,
999 unsigned long, pos_l, unsigned long, pos_h)
1000 safe_syscall3(int, connect, int, fd, const struct sockaddr *, addr,
1001 socklen_t, addrlen)
1002 safe_syscall6(ssize_t, sendto, int, fd, const void *, buf, size_t, len,
1003 int, flags, const struct sockaddr *, addr, socklen_t, addrlen)
1004 safe_syscall6(ssize_t, recvfrom, int, fd, void *, buf, size_t, len,
1005 int, flags, struct sockaddr *, addr, socklen_t *, addrlen)
1006 safe_syscall3(ssize_t, sendmsg, int, fd, const struct msghdr *, msg, int, flags)
1007 safe_syscall3(ssize_t, recvmsg, int, fd, struct msghdr *, msg, int, flags)
1008 safe_syscall2(int, flock, int, fd, int, operation)
1009 safe_syscall4(int, rt_sigtimedwait, const sigset_t *, these, siginfo_t *, uinfo,
1010 const struct timespec *, uts, size_t, sigsetsize)
1011 safe_syscall4(int, accept4, int, fd, struct sockaddr *, addr, socklen_t *, len,
1012 int, flags)
1013 safe_syscall2(int, nanosleep, const struct timespec *, req,
1014 struct timespec *, rem)
1015 #ifdef TARGET_NR_clock_nanosleep
1016 safe_syscall4(int, clock_nanosleep, const clockid_t, clock, int, flags,
1017 const struct timespec *, req, struct timespec *, rem)
1018 #endif
1019 #ifdef __NR_msgsnd
1020 safe_syscall4(int, msgsnd, int, msgid, const void *, msgp, size_t, sz,
1021 int, flags)
1022 safe_syscall5(int, msgrcv, int, msgid, void *, msgp, size_t, sz,
1023 long, msgtype, int, flags)
1024 safe_syscall4(int, semtimedop, int, semid, struct sembuf *, tsops,
1025 unsigned, nsops, const struct timespec *, timeout)
1026 #else
1027 /* This host kernel architecture uses a single ipc syscall; fake up
1028 * wrappers for the sub-operations to hide this implementation detail.
1029 * Annoyingly we can't include linux/ipc.h to get the constant definitions
1030 * for the call parameter because some structs in there conflict with the
1031 * sys/ipc.h ones. So we just define them here, and rely on them being
1032 * the same for all host architectures.
1034 #define Q_SEMTIMEDOP 4
1035 #define Q_MSGSND 11
1036 #define Q_MSGRCV 12
1037 #define Q_IPCCALL(VERSION, OP) ((VERSION) << 16 | (OP))
1039 safe_syscall6(int, ipc, int, call, long, first, long, second, long, third,
1040 void *, ptr, long, fifth)
1041 static int safe_msgsnd(int msgid, const void *msgp, size_t sz, int flags)
1043 return safe_ipc(Q_IPCCALL(0, Q_MSGSND), msgid, sz, flags, (void *)msgp, 0);
1045 static int safe_msgrcv(int msgid, void *msgp, size_t sz, long type, int flags)
1047 return safe_ipc(Q_IPCCALL(1, Q_MSGRCV), msgid, sz, flags, msgp, type);
1049 static int safe_semtimedop(int semid, struct sembuf *tsops, unsigned nsops,
1050 const struct timespec *timeout)
1052 return safe_ipc(Q_IPCCALL(0, Q_SEMTIMEDOP), semid, nsops, 0, tsops,
1053 (long)timeout);
1055 #endif
1056 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
1057 safe_syscall5(int, mq_timedsend, int, mqdes, const char *, msg_ptr,
1058 size_t, len, unsigned, prio, const struct timespec *, timeout)
1059 safe_syscall5(int, mq_timedreceive, int, mqdes, char *, msg_ptr,
1060 size_t, len, unsigned *, prio, const struct timespec *, timeout)
1061 #endif
1062 /* We do ioctl like this rather than via safe_syscall3 to preserve the
1063 * "third argument might be integer or pointer or not present" behaviour of
1064 * the libc function.
1066 #define safe_ioctl(...) safe_syscall(__NR_ioctl, __VA_ARGS__)
1067 /* Similarly for fcntl. Note that callers must always:
1068 * pass the F_GETLK64 etc constants rather than the unsuffixed F_GETLK
1069 * use the flock64 struct rather than unsuffixed flock
1070 * This will then work and use a 64-bit offset for both 32-bit and 64-bit hosts.
1072 #ifdef __NR_fcntl64
1073 #define safe_fcntl(...) safe_syscall(__NR_fcntl64, __VA_ARGS__)
1074 #else
1075 #define safe_fcntl(...) safe_syscall(__NR_fcntl, __VA_ARGS__)
1076 #endif
1078 static inline int host_to_target_sock_type(int host_type)
1080 int target_type;
1082 switch (host_type & 0xf /* SOCK_TYPE_MASK */) {
1083 case SOCK_DGRAM:
1084 target_type = TARGET_SOCK_DGRAM;
1085 break;
1086 case SOCK_STREAM:
1087 target_type = TARGET_SOCK_STREAM;
1088 break;
1089 default:
1090 target_type = host_type & 0xf /* SOCK_TYPE_MASK */;
1091 break;
1094 #if defined(SOCK_CLOEXEC)
1095 if (host_type & SOCK_CLOEXEC) {
1096 target_type |= TARGET_SOCK_CLOEXEC;
1098 #endif
1100 #if defined(SOCK_NONBLOCK)
1101 if (host_type & SOCK_NONBLOCK) {
1102 target_type |= TARGET_SOCK_NONBLOCK;
1104 #endif
1106 return target_type;
1109 static abi_ulong target_brk;
1110 static abi_ulong target_original_brk;
1111 static abi_ulong brk_page;
1113 void target_set_brk(abi_ulong new_brk)
1115 target_original_brk = target_brk = HOST_PAGE_ALIGN(new_brk);
1116 brk_page = HOST_PAGE_ALIGN(target_brk);
1119 //#define DEBUGF_BRK(message, args...) do { fprintf(stderr, (message), ## args); } while (0)
1120 #define DEBUGF_BRK(message, args...)
1122 /* do_brk() must return target values and target errnos. */
1123 abi_long do_brk(abi_ulong new_brk)
1125 abi_long mapped_addr;
1126 abi_ulong new_alloc_size;
1128 DEBUGF_BRK("do_brk(" TARGET_ABI_FMT_lx ") -> ", new_brk);
1130 if (!new_brk) {
1131 DEBUGF_BRK(TARGET_ABI_FMT_lx " (!new_brk)\n", target_brk);
1132 return target_brk;
1134 if (new_brk < target_original_brk) {
1135 DEBUGF_BRK(TARGET_ABI_FMT_lx " (new_brk < target_original_brk)\n",
1136 target_brk);
1137 return target_brk;
1140 /* If the new brk is less than the highest page reserved to the
1141 * target heap allocation, set it and we're almost done... */
1142 if (new_brk <= brk_page) {
1143 /* Heap contents are initialized to zero, as for anonymous
1144 * mapped pages. */
1145 if (new_brk > target_brk) {
1146 memset(g2h(target_brk), 0, new_brk - target_brk);
1148 target_brk = new_brk;
1149 DEBUGF_BRK(TARGET_ABI_FMT_lx " (new_brk <= brk_page)\n", target_brk);
1150 return target_brk;
1153 /* We need to allocate more memory after the brk... Note that
1154 * we don't use MAP_FIXED because that will map over the top of
1155 * any existing mapping (like the one with the host libc or qemu
1156 * itself); instead we treat "mapped but at wrong address" as
1157 * a failure and unmap again.
1159 new_alloc_size = HOST_PAGE_ALIGN(new_brk - brk_page);
1160 mapped_addr = get_errno(target_mmap(brk_page, new_alloc_size,
1161 PROT_READ|PROT_WRITE,
1162 MAP_ANON|MAP_PRIVATE, 0, 0));
1164 if (mapped_addr == brk_page) {
1165 /* Heap contents are initialized to zero, as for anonymous
1166 * mapped pages. Technically the new pages are already
1167 * initialized to zero since they *are* anonymous mapped
1168 * pages, however we have to take care with the contents that
1169 * come from the remaining part of the previous page: it may
1170 * contains garbage data due to a previous heap usage (grown
1171 * then shrunken). */
1172 memset(g2h(target_brk), 0, brk_page - target_brk);
1174 target_brk = new_brk;
1175 brk_page = HOST_PAGE_ALIGN(target_brk);
1176 DEBUGF_BRK(TARGET_ABI_FMT_lx " (mapped_addr == brk_page)\n",
1177 target_brk);
1178 return target_brk;
1179 } else if (mapped_addr != -1) {
1180 /* Mapped but at wrong address, meaning there wasn't actually
1181 * enough space for this brk.
1183 target_munmap(mapped_addr, new_alloc_size);
1184 mapped_addr = -1;
1185 DEBUGF_BRK(TARGET_ABI_FMT_lx " (mapped_addr != -1)\n", target_brk);
1187 else {
1188 DEBUGF_BRK(TARGET_ABI_FMT_lx " (otherwise)\n", target_brk);
1191 #if defined(TARGET_ALPHA)
1192 /* We (partially) emulate OSF/1 on Alpha, which requires we
1193 return a proper errno, not an unchanged brk value. */
1194 return -TARGET_ENOMEM;
1195 #endif
1196 /* For everything else, return the previous break. */
1197 return target_brk;
1200 static inline abi_long copy_from_user_fdset(fd_set *fds,
1201 abi_ulong target_fds_addr,
1202 int n)
1204 int i, nw, j, k;
1205 abi_ulong b, *target_fds;
1207 nw = DIV_ROUND_UP(n, TARGET_ABI_BITS);
1208 if (!(target_fds = lock_user(VERIFY_READ,
1209 target_fds_addr,
1210 sizeof(abi_ulong) * nw,
1211 1)))
1212 return -TARGET_EFAULT;
1214 FD_ZERO(fds);
1215 k = 0;
1216 for (i = 0; i < nw; i++) {
1217 /* grab the abi_ulong */
1218 __get_user(b, &target_fds[i]);
1219 for (j = 0; j < TARGET_ABI_BITS; j++) {
1220 /* check the bit inside the abi_ulong */
1221 if ((b >> j) & 1)
1222 FD_SET(k, fds);
1223 k++;
1227 unlock_user(target_fds, target_fds_addr, 0);
1229 return 0;
1232 static inline abi_ulong copy_from_user_fdset_ptr(fd_set *fds, fd_set **fds_ptr,
1233 abi_ulong target_fds_addr,
1234 int n)
1236 if (target_fds_addr) {
1237 if (copy_from_user_fdset(fds, target_fds_addr, n))
1238 return -TARGET_EFAULT;
1239 *fds_ptr = fds;
1240 } else {
1241 *fds_ptr = NULL;
1243 return 0;
1246 static inline abi_long copy_to_user_fdset(abi_ulong target_fds_addr,
1247 const fd_set *fds,
1248 int n)
1250 int i, nw, j, k;
1251 abi_long v;
1252 abi_ulong *target_fds;
1254 nw = DIV_ROUND_UP(n, TARGET_ABI_BITS);
1255 if (!(target_fds = lock_user(VERIFY_WRITE,
1256 target_fds_addr,
1257 sizeof(abi_ulong) * nw,
1258 0)))
1259 return -TARGET_EFAULT;
1261 k = 0;
1262 for (i = 0; i < nw; i++) {
1263 v = 0;
1264 for (j = 0; j < TARGET_ABI_BITS; j++) {
1265 v |= ((abi_ulong)(FD_ISSET(k, fds) != 0) << j);
1266 k++;
1268 __put_user(v, &target_fds[i]);
1271 unlock_user(target_fds, target_fds_addr, sizeof(abi_ulong) * nw);
1273 return 0;
1276 #if defined(__alpha__)
1277 #define HOST_HZ 1024
1278 #else
1279 #define HOST_HZ 100
1280 #endif
1282 static inline abi_long host_to_target_clock_t(long ticks)
1284 #if HOST_HZ == TARGET_HZ
1285 return ticks;
1286 #else
1287 return ((int64_t)ticks * TARGET_HZ) / HOST_HZ;
1288 #endif
1291 static inline abi_long host_to_target_rusage(abi_ulong target_addr,
1292 const struct rusage *rusage)
1294 struct target_rusage *target_rusage;
1296 if (!lock_user_struct(VERIFY_WRITE, target_rusage, target_addr, 0))
1297 return -TARGET_EFAULT;
1298 target_rusage->ru_utime.tv_sec = tswapal(rusage->ru_utime.tv_sec);
1299 target_rusage->ru_utime.tv_usec = tswapal(rusage->ru_utime.tv_usec);
1300 target_rusage->ru_stime.tv_sec = tswapal(rusage->ru_stime.tv_sec);
1301 target_rusage->ru_stime.tv_usec = tswapal(rusage->ru_stime.tv_usec);
1302 target_rusage->ru_maxrss = tswapal(rusage->ru_maxrss);
1303 target_rusage->ru_ixrss = tswapal(rusage->ru_ixrss);
1304 target_rusage->ru_idrss = tswapal(rusage->ru_idrss);
1305 target_rusage->ru_isrss = tswapal(rusage->ru_isrss);
1306 target_rusage->ru_minflt = tswapal(rusage->ru_minflt);
1307 target_rusage->ru_majflt = tswapal(rusage->ru_majflt);
1308 target_rusage->ru_nswap = tswapal(rusage->ru_nswap);
1309 target_rusage->ru_inblock = tswapal(rusage->ru_inblock);
1310 target_rusage->ru_oublock = tswapal(rusage->ru_oublock);
1311 target_rusage->ru_msgsnd = tswapal(rusage->ru_msgsnd);
1312 target_rusage->ru_msgrcv = tswapal(rusage->ru_msgrcv);
1313 target_rusage->ru_nsignals = tswapal(rusage->ru_nsignals);
1314 target_rusage->ru_nvcsw = tswapal(rusage->ru_nvcsw);
1315 target_rusage->ru_nivcsw = tswapal(rusage->ru_nivcsw);
1316 unlock_user_struct(target_rusage, target_addr, 1);
1318 return 0;
1321 static inline rlim_t target_to_host_rlim(abi_ulong target_rlim)
1323 abi_ulong target_rlim_swap;
1324 rlim_t result;
1326 target_rlim_swap = tswapal(target_rlim);
1327 if (target_rlim_swap == TARGET_RLIM_INFINITY)
1328 return RLIM_INFINITY;
1330 result = target_rlim_swap;
1331 if (target_rlim_swap != (rlim_t)result)
1332 return RLIM_INFINITY;
1334 return result;
1337 static inline abi_ulong host_to_target_rlim(rlim_t rlim)
1339 abi_ulong target_rlim_swap;
1340 abi_ulong result;
1342 if (rlim == RLIM_INFINITY || rlim != (abi_long)rlim)
1343 target_rlim_swap = TARGET_RLIM_INFINITY;
1344 else
1345 target_rlim_swap = rlim;
1346 result = tswapal(target_rlim_swap);
1348 return result;
1351 static inline int target_to_host_resource(int code)
1353 switch (code) {
1354 case TARGET_RLIMIT_AS:
1355 return RLIMIT_AS;
1356 case TARGET_RLIMIT_CORE:
1357 return RLIMIT_CORE;
1358 case TARGET_RLIMIT_CPU:
1359 return RLIMIT_CPU;
1360 case TARGET_RLIMIT_DATA:
1361 return RLIMIT_DATA;
1362 case TARGET_RLIMIT_FSIZE:
1363 return RLIMIT_FSIZE;
1364 case TARGET_RLIMIT_LOCKS:
1365 return RLIMIT_LOCKS;
1366 case TARGET_RLIMIT_MEMLOCK:
1367 return RLIMIT_MEMLOCK;
1368 case TARGET_RLIMIT_MSGQUEUE:
1369 return RLIMIT_MSGQUEUE;
1370 case TARGET_RLIMIT_NICE:
1371 return RLIMIT_NICE;
1372 case TARGET_RLIMIT_NOFILE:
1373 return RLIMIT_NOFILE;
1374 case TARGET_RLIMIT_NPROC:
1375 return RLIMIT_NPROC;
1376 case TARGET_RLIMIT_RSS:
1377 return RLIMIT_RSS;
1378 case TARGET_RLIMIT_RTPRIO:
1379 return RLIMIT_RTPRIO;
1380 case TARGET_RLIMIT_SIGPENDING:
1381 return RLIMIT_SIGPENDING;
1382 case TARGET_RLIMIT_STACK:
1383 return RLIMIT_STACK;
1384 default:
1385 return code;
1389 static inline abi_long copy_from_user_timeval(struct timeval *tv,
1390 abi_ulong target_tv_addr)
1392 struct target_timeval *target_tv;
1394 if (!lock_user_struct(VERIFY_READ, target_tv, target_tv_addr, 1))
1395 return -TARGET_EFAULT;
1397 __get_user(tv->tv_sec, &target_tv->tv_sec);
1398 __get_user(tv->tv_usec, &target_tv->tv_usec);
1400 unlock_user_struct(target_tv, target_tv_addr, 0);
1402 return 0;
1405 static inline abi_long copy_to_user_timeval(abi_ulong target_tv_addr,
1406 const struct timeval *tv)
1408 struct target_timeval *target_tv;
1410 if (!lock_user_struct(VERIFY_WRITE, target_tv, target_tv_addr, 0))
1411 return -TARGET_EFAULT;
1413 __put_user(tv->tv_sec, &target_tv->tv_sec);
1414 __put_user(tv->tv_usec, &target_tv->tv_usec);
1416 unlock_user_struct(target_tv, target_tv_addr, 1);
1418 return 0;
1421 static inline abi_long copy_from_user_timezone(struct timezone *tz,
1422 abi_ulong target_tz_addr)
1424 struct target_timezone *target_tz;
1426 if (!lock_user_struct(VERIFY_READ, target_tz, target_tz_addr, 1)) {
1427 return -TARGET_EFAULT;
1430 __get_user(tz->tz_minuteswest, &target_tz->tz_minuteswest);
1431 __get_user(tz->tz_dsttime, &target_tz->tz_dsttime);
1433 unlock_user_struct(target_tz, target_tz_addr, 0);
1435 return 0;
1438 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
1439 #include <mqueue.h>
1441 static inline abi_long copy_from_user_mq_attr(struct mq_attr *attr,
1442 abi_ulong target_mq_attr_addr)
1444 struct target_mq_attr *target_mq_attr;
1446 if (!lock_user_struct(VERIFY_READ, target_mq_attr,
1447 target_mq_attr_addr, 1))
1448 return -TARGET_EFAULT;
1450 __get_user(attr->mq_flags, &target_mq_attr->mq_flags);
1451 __get_user(attr->mq_maxmsg, &target_mq_attr->mq_maxmsg);
1452 __get_user(attr->mq_msgsize, &target_mq_attr->mq_msgsize);
1453 __get_user(attr->mq_curmsgs, &target_mq_attr->mq_curmsgs);
1455 unlock_user_struct(target_mq_attr, target_mq_attr_addr, 0);
1457 return 0;
1460 static inline abi_long copy_to_user_mq_attr(abi_ulong target_mq_attr_addr,
1461 const struct mq_attr *attr)
1463 struct target_mq_attr *target_mq_attr;
1465 if (!lock_user_struct(VERIFY_WRITE, target_mq_attr,
1466 target_mq_attr_addr, 0))
1467 return -TARGET_EFAULT;
1469 __put_user(attr->mq_flags, &target_mq_attr->mq_flags);
1470 __put_user(attr->mq_maxmsg, &target_mq_attr->mq_maxmsg);
1471 __put_user(attr->mq_msgsize, &target_mq_attr->mq_msgsize);
1472 __put_user(attr->mq_curmsgs, &target_mq_attr->mq_curmsgs);
1474 unlock_user_struct(target_mq_attr, target_mq_attr_addr, 1);
1476 return 0;
1478 #endif
1480 #if defined(TARGET_NR_select) || defined(TARGET_NR__newselect)
1481 /* do_select() must return target values and target errnos. */
1482 static abi_long do_select(int n,
1483 abi_ulong rfd_addr, abi_ulong wfd_addr,
1484 abi_ulong efd_addr, abi_ulong target_tv_addr)
1486 fd_set rfds, wfds, efds;
1487 fd_set *rfds_ptr, *wfds_ptr, *efds_ptr;
1488 struct timeval tv;
1489 struct timespec ts, *ts_ptr;
1490 abi_long ret;
1492 ret = copy_from_user_fdset_ptr(&rfds, &rfds_ptr, rfd_addr, n);
1493 if (ret) {
1494 return ret;
1496 ret = copy_from_user_fdset_ptr(&wfds, &wfds_ptr, wfd_addr, n);
1497 if (ret) {
1498 return ret;
1500 ret = copy_from_user_fdset_ptr(&efds, &efds_ptr, efd_addr, n);
1501 if (ret) {
1502 return ret;
1505 if (target_tv_addr) {
1506 if (copy_from_user_timeval(&tv, target_tv_addr))
1507 return -TARGET_EFAULT;
1508 ts.tv_sec = tv.tv_sec;
1509 ts.tv_nsec = tv.tv_usec * 1000;
1510 ts_ptr = &ts;
1511 } else {
1512 ts_ptr = NULL;
1515 ret = get_errno(safe_pselect6(n, rfds_ptr, wfds_ptr, efds_ptr,
1516 ts_ptr, NULL));
1518 if (!is_error(ret)) {
1519 if (rfd_addr && copy_to_user_fdset(rfd_addr, &rfds, n))
1520 return -TARGET_EFAULT;
1521 if (wfd_addr && copy_to_user_fdset(wfd_addr, &wfds, n))
1522 return -TARGET_EFAULT;
1523 if (efd_addr && copy_to_user_fdset(efd_addr, &efds, n))
1524 return -TARGET_EFAULT;
1526 if (target_tv_addr) {
1527 tv.tv_sec = ts.tv_sec;
1528 tv.tv_usec = ts.tv_nsec / 1000;
1529 if (copy_to_user_timeval(target_tv_addr, &tv)) {
1530 return -TARGET_EFAULT;
1535 return ret;
1538 #if defined(TARGET_WANT_OLD_SYS_SELECT)
1539 static abi_long do_old_select(abi_ulong arg1)
1541 struct target_sel_arg_struct *sel;
1542 abi_ulong inp, outp, exp, tvp;
1543 long nsel;
1545 if (!lock_user_struct(VERIFY_READ, sel, arg1, 1)) {
1546 return -TARGET_EFAULT;
1549 nsel = tswapal(sel->n);
1550 inp = tswapal(sel->inp);
1551 outp = tswapal(sel->outp);
1552 exp = tswapal(sel->exp);
1553 tvp = tswapal(sel->tvp);
1555 unlock_user_struct(sel, arg1, 0);
1557 return do_select(nsel, inp, outp, exp, tvp);
1559 #endif
1560 #endif
1562 static abi_long do_pipe2(int host_pipe[], int flags)
1564 #ifdef CONFIG_PIPE2
1565 return pipe2(host_pipe, flags);
1566 #else
1567 return -ENOSYS;
1568 #endif
1571 static abi_long do_pipe(void *cpu_env, abi_ulong pipedes,
1572 int flags, int is_pipe2)
1574 int host_pipe[2];
1575 abi_long ret;
1576 ret = flags ? do_pipe2(host_pipe, flags) : pipe(host_pipe);
1578 if (is_error(ret))
1579 return get_errno(ret);
1581 /* Several targets have special calling conventions for the original
1582 pipe syscall, but didn't replicate this into the pipe2 syscall. */
1583 if (!is_pipe2) {
1584 #if defined(TARGET_ALPHA)
1585 ((CPUAlphaState *)cpu_env)->ir[IR_A4] = host_pipe[1];
1586 return host_pipe[0];
1587 #elif defined(TARGET_MIPS)
1588 ((CPUMIPSState*)cpu_env)->active_tc.gpr[3] = host_pipe[1];
1589 return host_pipe[0];
1590 #elif defined(TARGET_SH4)
1591 ((CPUSH4State*)cpu_env)->gregs[1] = host_pipe[1];
1592 return host_pipe[0];
1593 #elif defined(TARGET_SPARC)
1594 ((CPUSPARCState*)cpu_env)->regwptr[1] = host_pipe[1];
1595 return host_pipe[0];
1596 #endif
1599 if (put_user_s32(host_pipe[0], pipedes)
1600 || put_user_s32(host_pipe[1], pipedes + sizeof(host_pipe[0])))
1601 return -TARGET_EFAULT;
1602 return get_errno(ret);
1605 static inline abi_long target_to_host_ip_mreq(struct ip_mreqn *mreqn,
1606 abi_ulong target_addr,
1607 socklen_t len)
1609 struct target_ip_mreqn *target_smreqn;
1611 target_smreqn = lock_user(VERIFY_READ, target_addr, len, 1);
1612 if (!target_smreqn)
1613 return -TARGET_EFAULT;
1614 mreqn->imr_multiaddr.s_addr = target_smreqn->imr_multiaddr.s_addr;
1615 mreqn->imr_address.s_addr = target_smreqn->imr_address.s_addr;
1616 if (len == sizeof(struct target_ip_mreqn))
1617 mreqn->imr_ifindex = tswapal(target_smreqn->imr_ifindex);
1618 unlock_user(target_smreqn, target_addr, 0);
1620 return 0;
1623 static inline abi_long target_to_host_sockaddr(int fd, struct sockaddr *addr,
1624 abi_ulong target_addr,
1625 socklen_t len)
1627 const socklen_t unix_maxlen = sizeof (struct sockaddr_un);
1628 sa_family_t sa_family;
1629 struct target_sockaddr *target_saddr;
1631 if (fd_trans_target_to_host_addr(fd)) {
1632 return fd_trans_target_to_host_addr(fd)(addr, target_addr, len);
1635 target_saddr = lock_user(VERIFY_READ, target_addr, len, 1);
1636 if (!target_saddr)
1637 return -TARGET_EFAULT;
1639 sa_family = tswap16(target_saddr->sa_family);
1641 /* Oops. The caller might send a incomplete sun_path; sun_path
1642 * must be terminated by \0 (see the manual page), but
1643 * unfortunately it is quite common to specify sockaddr_un
1644 * length as "strlen(x->sun_path)" while it should be
1645 * "strlen(...) + 1". We'll fix that here if needed.
1646 * Linux kernel has a similar feature.
1649 if (sa_family == AF_UNIX) {
1650 if (len < unix_maxlen && len > 0) {
1651 char *cp = (char*)target_saddr;
1653 if ( cp[len-1] && !cp[len] )
1654 len++;
1656 if (len > unix_maxlen)
1657 len = unix_maxlen;
1660 memcpy(addr, target_saddr, len);
1661 addr->sa_family = sa_family;
1662 if (sa_family == AF_NETLINK) {
1663 struct sockaddr_nl *nladdr;
1665 nladdr = (struct sockaddr_nl *)addr;
1666 nladdr->nl_pid = tswap32(nladdr->nl_pid);
1667 nladdr->nl_groups = tswap32(nladdr->nl_groups);
1668 } else if (sa_family == AF_PACKET) {
1669 struct target_sockaddr_ll *lladdr;
1671 lladdr = (struct target_sockaddr_ll *)addr;
1672 lladdr->sll_ifindex = tswap32(lladdr->sll_ifindex);
1673 lladdr->sll_hatype = tswap16(lladdr->sll_hatype);
1675 unlock_user(target_saddr, target_addr, 0);
1677 return 0;
1680 static inline abi_long host_to_target_sockaddr(abi_ulong target_addr,
1681 struct sockaddr *addr,
1682 socklen_t len)
1684 struct target_sockaddr *target_saddr;
1686 if (len == 0) {
1687 return 0;
1689 assert(addr);
1691 target_saddr = lock_user(VERIFY_WRITE, target_addr, len, 0);
1692 if (!target_saddr)
1693 return -TARGET_EFAULT;
1694 memcpy(target_saddr, addr, len);
1695 if (len >= offsetof(struct target_sockaddr, sa_family) +
1696 sizeof(target_saddr->sa_family)) {
1697 target_saddr->sa_family = tswap16(addr->sa_family);
1699 if (addr->sa_family == AF_NETLINK && len >= sizeof(struct sockaddr_nl)) {
1700 struct sockaddr_nl *target_nl = (struct sockaddr_nl *)target_saddr;
1701 target_nl->nl_pid = tswap32(target_nl->nl_pid);
1702 target_nl->nl_groups = tswap32(target_nl->nl_groups);
1703 } else if (addr->sa_family == AF_PACKET) {
1704 struct sockaddr_ll *target_ll = (struct sockaddr_ll *)target_saddr;
1705 target_ll->sll_ifindex = tswap32(target_ll->sll_ifindex);
1706 target_ll->sll_hatype = tswap16(target_ll->sll_hatype);
1707 } else if (addr->sa_family == AF_INET6 &&
1708 len >= sizeof(struct target_sockaddr_in6)) {
1709 struct target_sockaddr_in6 *target_in6 =
1710 (struct target_sockaddr_in6 *)target_saddr;
1711 target_in6->sin6_scope_id = tswap16(target_in6->sin6_scope_id);
1713 unlock_user(target_saddr, target_addr, len);
1715 return 0;
1718 static inline abi_long target_to_host_cmsg(struct msghdr *msgh,
1719 struct target_msghdr *target_msgh)
1721 struct cmsghdr *cmsg = CMSG_FIRSTHDR(msgh);
1722 abi_long msg_controllen;
1723 abi_ulong target_cmsg_addr;
1724 struct target_cmsghdr *target_cmsg, *target_cmsg_start;
1725 socklen_t space = 0;
1727 msg_controllen = tswapal(target_msgh->msg_controllen);
1728 if (msg_controllen < sizeof (struct target_cmsghdr))
1729 goto the_end;
1730 target_cmsg_addr = tswapal(target_msgh->msg_control);
1731 target_cmsg = lock_user(VERIFY_READ, target_cmsg_addr, msg_controllen, 1);
1732 target_cmsg_start = target_cmsg;
1733 if (!target_cmsg)
1734 return -TARGET_EFAULT;
1736 while (cmsg && target_cmsg) {
1737 void *data = CMSG_DATA(cmsg);
1738 void *target_data = TARGET_CMSG_DATA(target_cmsg);
1740 int len = tswapal(target_cmsg->cmsg_len)
1741 - sizeof(struct target_cmsghdr);
1743 space += CMSG_SPACE(len);
1744 if (space > msgh->msg_controllen) {
1745 space -= CMSG_SPACE(len);
1746 /* This is a QEMU bug, since we allocated the payload
1747 * area ourselves (unlike overflow in host-to-target
1748 * conversion, which is just the guest giving us a buffer
1749 * that's too small). It can't happen for the payload types
1750 * we currently support; if it becomes an issue in future
1751 * we would need to improve our allocation strategy to
1752 * something more intelligent than "twice the size of the
1753 * target buffer we're reading from".
1755 gemu_log("Host cmsg overflow\n");
1756 break;
1759 if (tswap32(target_cmsg->cmsg_level) == TARGET_SOL_SOCKET) {
1760 cmsg->cmsg_level = SOL_SOCKET;
1761 } else {
1762 cmsg->cmsg_level = tswap32(target_cmsg->cmsg_level);
1764 cmsg->cmsg_type = tswap32(target_cmsg->cmsg_type);
1765 cmsg->cmsg_len = CMSG_LEN(len);
1767 if (cmsg->cmsg_level == SOL_SOCKET && cmsg->cmsg_type == SCM_RIGHTS) {
1768 int *fd = (int *)data;
1769 int *target_fd = (int *)target_data;
1770 int i, numfds = len / sizeof(int);
1772 for (i = 0; i < numfds; i++) {
1773 __get_user(fd[i], target_fd + i);
1775 } else if (cmsg->cmsg_level == SOL_SOCKET
1776 && cmsg->cmsg_type == SCM_CREDENTIALS) {
1777 struct ucred *cred = (struct ucred *)data;
1778 struct target_ucred *target_cred =
1779 (struct target_ucred *)target_data;
1781 __get_user(cred->pid, &target_cred->pid);
1782 __get_user(cred->uid, &target_cred->uid);
1783 __get_user(cred->gid, &target_cred->gid);
1784 } else {
1785 gemu_log("Unsupported ancillary data: %d/%d\n",
1786 cmsg->cmsg_level, cmsg->cmsg_type);
1787 memcpy(data, target_data, len);
1790 cmsg = CMSG_NXTHDR(msgh, cmsg);
1791 target_cmsg = TARGET_CMSG_NXTHDR(target_msgh, target_cmsg,
1792 target_cmsg_start);
1794 unlock_user(target_cmsg, target_cmsg_addr, 0);
1795 the_end:
1796 msgh->msg_controllen = space;
1797 return 0;
1800 static inline abi_long host_to_target_cmsg(struct target_msghdr *target_msgh,
1801 struct msghdr *msgh)
1803 struct cmsghdr *cmsg = CMSG_FIRSTHDR(msgh);
1804 abi_long msg_controllen;
1805 abi_ulong target_cmsg_addr;
1806 struct target_cmsghdr *target_cmsg, *target_cmsg_start;
1807 socklen_t space = 0;
1809 msg_controllen = tswapal(target_msgh->msg_controllen);
1810 if (msg_controllen < sizeof (struct target_cmsghdr))
1811 goto the_end;
1812 target_cmsg_addr = tswapal(target_msgh->msg_control);
1813 target_cmsg = lock_user(VERIFY_WRITE, target_cmsg_addr, msg_controllen, 0);
1814 target_cmsg_start = target_cmsg;
1815 if (!target_cmsg)
1816 return -TARGET_EFAULT;
1818 while (cmsg && target_cmsg) {
1819 void *data = CMSG_DATA(cmsg);
1820 void *target_data = TARGET_CMSG_DATA(target_cmsg);
1822 int len = cmsg->cmsg_len - sizeof(struct cmsghdr);
1823 int tgt_len, tgt_space;
1825 /* We never copy a half-header but may copy half-data;
1826 * this is Linux's behaviour in put_cmsg(). Note that
1827 * truncation here is a guest problem (which we report
1828 * to the guest via the CTRUNC bit), unlike truncation
1829 * in target_to_host_cmsg, which is a QEMU bug.
1831 if (msg_controllen < sizeof(struct target_cmsghdr)) {
1832 target_msgh->msg_flags |= tswap32(MSG_CTRUNC);
1833 break;
1836 if (cmsg->cmsg_level == SOL_SOCKET) {
1837 target_cmsg->cmsg_level = tswap32(TARGET_SOL_SOCKET);
1838 } else {
1839 target_cmsg->cmsg_level = tswap32(cmsg->cmsg_level);
1841 target_cmsg->cmsg_type = tswap32(cmsg->cmsg_type);
1843 /* Payload types which need a different size of payload on
1844 * the target must adjust tgt_len here.
1846 tgt_len = len;
1847 switch (cmsg->cmsg_level) {
1848 case SOL_SOCKET:
1849 switch (cmsg->cmsg_type) {
1850 case SO_TIMESTAMP:
1851 tgt_len = sizeof(struct target_timeval);
1852 break;
1853 default:
1854 break;
1856 break;
1857 default:
1858 break;
1861 if (msg_controllen < TARGET_CMSG_LEN(tgt_len)) {
1862 target_msgh->msg_flags |= tswap32(MSG_CTRUNC);
1863 tgt_len = msg_controllen - sizeof(struct target_cmsghdr);
1866 /* We must now copy-and-convert len bytes of payload
1867 * into tgt_len bytes of destination space. Bear in mind
1868 * that in both source and destination we may be dealing
1869 * with a truncated value!
1871 switch (cmsg->cmsg_level) {
1872 case SOL_SOCKET:
1873 switch (cmsg->cmsg_type) {
1874 case SCM_RIGHTS:
1876 int *fd = (int *)data;
1877 int *target_fd = (int *)target_data;
1878 int i, numfds = tgt_len / sizeof(int);
1880 for (i = 0; i < numfds; i++) {
1881 __put_user(fd[i], target_fd + i);
1883 break;
1885 case SO_TIMESTAMP:
1887 struct timeval *tv = (struct timeval *)data;
1888 struct target_timeval *target_tv =
1889 (struct target_timeval *)target_data;
1891 if (len != sizeof(struct timeval) ||
1892 tgt_len != sizeof(struct target_timeval)) {
1893 goto unimplemented;
1896 /* copy struct timeval to target */
1897 __put_user(tv->tv_sec, &target_tv->tv_sec);
1898 __put_user(tv->tv_usec, &target_tv->tv_usec);
1899 break;
1901 case SCM_CREDENTIALS:
1903 struct ucred *cred = (struct ucred *)data;
1904 struct target_ucred *target_cred =
1905 (struct target_ucred *)target_data;
1907 __put_user(cred->pid, &target_cred->pid);
1908 __put_user(cred->uid, &target_cred->uid);
1909 __put_user(cred->gid, &target_cred->gid);
1910 break;
1912 default:
1913 goto unimplemented;
1915 break;
1917 case SOL_IP:
1918 switch (cmsg->cmsg_type) {
1919 case IP_TTL:
1921 uint32_t *v = (uint32_t *)data;
1922 uint32_t *t_int = (uint32_t *)target_data;
1924 if (len != sizeof(uint32_t) ||
1925 tgt_len != sizeof(uint32_t)) {
1926 goto unimplemented;
1928 __put_user(*v, t_int);
1929 break;
1931 case IP_RECVERR:
1933 struct errhdr_t {
1934 struct sock_extended_err ee;
1935 struct sockaddr_in offender;
1937 struct errhdr_t *errh = (struct errhdr_t *)data;
1938 struct errhdr_t *target_errh =
1939 (struct errhdr_t *)target_data;
1941 if (len != sizeof(struct errhdr_t) ||
1942 tgt_len != sizeof(struct errhdr_t)) {
1943 goto unimplemented;
1945 __put_user(errh->ee.ee_errno, &target_errh->ee.ee_errno);
1946 __put_user(errh->ee.ee_origin, &target_errh->ee.ee_origin);
1947 __put_user(errh->ee.ee_type, &target_errh->ee.ee_type);
1948 __put_user(errh->ee.ee_code, &target_errh->ee.ee_code);
1949 __put_user(errh->ee.ee_pad, &target_errh->ee.ee_pad);
1950 __put_user(errh->ee.ee_info, &target_errh->ee.ee_info);
1951 __put_user(errh->ee.ee_data, &target_errh->ee.ee_data);
1952 host_to_target_sockaddr((unsigned long) &target_errh->offender,
1953 (void *) &errh->offender, sizeof(errh->offender));
1954 break;
1956 default:
1957 goto unimplemented;
1959 break;
1961 case SOL_IPV6:
1962 switch (cmsg->cmsg_type) {
1963 case IPV6_HOPLIMIT:
1965 uint32_t *v = (uint32_t *)data;
1966 uint32_t *t_int = (uint32_t *)target_data;
1968 if (len != sizeof(uint32_t) ||
1969 tgt_len != sizeof(uint32_t)) {
1970 goto unimplemented;
1972 __put_user(*v, t_int);
1973 break;
1975 case IPV6_RECVERR:
1977 struct errhdr6_t {
1978 struct sock_extended_err ee;
1979 struct sockaddr_in6 offender;
1981 struct errhdr6_t *errh = (struct errhdr6_t *)data;
1982 struct errhdr6_t *target_errh =
1983 (struct errhdr6_t *)target_data;
1985 if (len != sizeof(struct errhdr6_t) ||
1986 tgt_len != sizeof(struct errhdr6_t)) {
1987 goto unimplemented;
1989 __put_user(errh->ee.ee_errno, &target_errh->ee.ee_errno);
1990 __put_user(errh->ee.ee_origin, &target_errh->ee.ee_origin);
1991 __put_user(errh->ee.ee_type, &target_errh->ee.ee_type);
1992 __put_user(errh->ee.ee_code, &target_errh->ee.ee_code);
1993 __put_user(errh->ee.ee_pad, &target_errh->ee.ee_pad);
1994 __put_user(errh->ee.ee_info, &target_errh->ee.ee_info);
1995 __put_user(errh->ee.ee_data, &target_errh->ee.ee_data);
1996 host_to_target_sockaddr((unsigned long) &target_errh->offender,
1997 (void *) &errh->offender, sizeof(errh->offender));
1998 break;
2000 default:
2001 goto unimplemented;
2003 break;
2005 default:
2006 unimplemented:
2007 gemu_log("Unsupported ancillary data: %d/%d\n",
2008 cmsg->cmsg_level, cmsg->cmsg_type);
2009 memcpy(target_data, data, MIN(len, tgt_len));
2010 if (tgt_len > len) {
2011 memset(target_data + len, 0, tgt_len - len);
2015 target_cmsg->cmsg_len = tswapal(TARGET_CMSG_LEN(tgt_len));
2016 tgt_space = TARGET_CMSG_SPACE(tgt_len);
2017 if (msg_controllen < tgt_space) {
2018 tgt_space = msg_controllen;
2020 msg_controllen -= tgt_space;
2021 space += tgt_space;
2022 cmsg = CMSG_NXTHDR(msgh, cmsg);
2023 target_cmsg = TARGET_CMSG_NXTHDR(target_msgh, target_cmsg,
2024 target_cmsg_start);
2026 unlock_user(target_cmsg, target_cmsg_addr, space);
2027 the_end:
2028 target_msgh->msg_controllen = tswapal(space);
2029 return 0;
2032 static void tswap_nlmsghdr(struct nlmsghdr *nlh)
2034 nlh->nlmsg_len = tswap32(nlh->nlmsg_len);
2035 nlh->nlmsg_type = tswap16(nlh->nlmsg_type);
2036 nlh->nlmsg_flags = tswap16(nlh->nlmsg_flags);
2037 nlh->nlmsg_seq = tswap32(nlh->nlmsg_seq);
2038 nlh->nlmsg_pid = tswap32(nlh->nlmsg_pid);
2041 static abi_long host_to_target_for_each_nlmsg(struct nlmsghdr *nlh,
2042 size_t len,
2043 abi_long (*host_to_target_nlmsg)
2044 (struct nlmsghdr *))
2046 uint32_t nlmsg_len;
2047 abi_long ret;
2049 while (len > sizeof(struct nlmsghdr)) {
2051 nlmsg_len = nlh->nlmsg_len;
2052 if (nlmsg_len < sizeof(struct nlmsghdr) ||
2053 nlmsg_len > len) {
2054 break;
2057 switch (nlh->nlmsg_type) {
2058 case NLMSG_DONE:
2059 tswap_nlmsghdr(nlh);
2060 return 0;
2061 case NLMSG_NOOP:
2062 break;
2063 case NLMSG_ERROR:
2065 struct nlmsgerr *e = NLMSG_DATA(nlh);
2066 e->error = tswap32(e->error);
2067 tswap_nlmsghdr(&e->msg);
2068 tswap_nlmsghdr(nlh);
2069 return 0;
2071 default:
2072 ret = host_to_target_nlmsg(nlh);
2073 if (ret < 0) {
2074 tswap_nlmsghdr(nlh);
2075 return ret;
2077 break;
2079 tswap_nlmsghdr(nlh);
2080 len -= NLMSG_ALIGN(nlmsg_len);
2081 nlh = (struct nlmsghdr *)(((char*)nlh) + NLMSG_ALIGN(nlmsg_len));
2083 return 0;
2086 static abi_long target_to_host_for_each_nlmsg(struct nlmsghdr *nlh,
2087 size_t len,
2088 abi_long (*target_to_host_nlmsg)
2089 (struct nlmsghdr *))
2091 int ret;
2093 while (len > sizeof(struct nlmsghdr)) {
2094 if (tswap32(nlh->nlmsg_len) < sizeof(struct nlmsghdr) ||
2095 tswap32(nlh->nlmsg_len) > len) {
2096 break;
2098 tswap_nlmsghdr(nlh);
2099 switch (nlh->nlmsg_type) {
2100 case NLMSG_DONE:
2101 return 0;
2102 case NLMSG_NOOP:
2103 break;
2104 case NLMSG_ERROR:
2106 struct nlmsgerr *e = NLMSG_DATA(nlh);
2107 e->error = tswap32(e->error);
2108 tswap_nlmsghdr(&e->msg);
2109 return 0;
2111 default:
2112 ret = target_to_host_nlmsg(nlh);
2113 if (ret < 0) {
2114 return ret;
2117 len -= NLMSG_ALIGN(nlh->nlmsg_len);
2118 nlh = (struct nlmsghdr *)(((char *)nlh) + NLMSG_ALIGN(nlh->nlmsg_len));
2120 return 0;
2123 #ifdef CONFIG_RTNETLINK
2124 static abi_long host_to_target_for_each_nlattr(struct nlattr *nlattr,
2125 size_t len, void *context,
2126 abi_long (*host_to_target_nlattr)
2127 (struct nlattr *,
2128 void *context))
2130 unsigned short nla_len;
2131 abi_long ret;
2133 while (len > sizeof(struct nlattr)) {
2134 nla_len = nlattr->nla_len;
2135 if (nla_len < sizeof(struct nlattr) ||
2136 nla_len > len) {
2137 break;
2139 ret = host_to_target_nlattr(nlattr, context);
2140 nlattr->nla_len = tswap16(nlattr->nla_len);
2141 nlattr->nla_type = tswap16(nlattr->nla_type);
2142 if (ret < 0) {
2143 return ret;
2145 len -= NLA_ALIGN(nla_len);
2146 nlattr = (struct nlattr *)(((char *)nlattr) + NLA_ALIGN(nla_len));
2148 return 0;
2151 static abi_long host_to_target_for_each_rtattr(struct rtattr *rtattr,
2152 size_t len,
2153 abi_long (*host_to_target_rtattr)
2154 (struct rtattr *))
2156 unsigned short rta_len;
2157 abi_long ret;
2159 while (len > sizeof(struct rtattr)) {
2160 rta_len = rtattr->rta_len;
2161 if (rta_len < sizeof(struct rtattr) ||
2162 rta_len > len) {
2163 break;
2165 ret = host_to_target_rtattr(rtattr);
2166 rtattr->rta_len = tswap16(rtattr->rta_len);
2167 rtattr->rta_type = tswap16(rtattr->rta_type);
2168 if (ret < 0) {
2169 return ret;
2171 len -= RTA_ALIGN(rta_len);
2172 rtattr = (struct rtattr *)(((char *)rtattr) + RTA_ALIGN(rta_len));
2174 return 0;
2177 #define NLA_DATA(nla) ((void *)((char *)(nla)) + NLA_HDRLEN)
2179 static abi_long host_to_target_data_bridge_nlattr(struct nlattr *nlattr,
2180 void *context)
2182 uint16_t *u16;
2183 uint32_t *u32;
2184 uint64_t *u64;
2186 switch (nlattr->nla_type) {
2187 /* no data */
2188 case QEMU_IFLA_BR_FDB_FLUSH:
2189 break;
2190 /* binary */
2191 case QEMU_IFLA_BR_GROUP_ADDR:
2192 break;
2193 /* uint8_t */
2194 case QEMU_IFLA_BR_VLAN_FILTERING:
2195 case QEMU_IFLA_BR_TOPOLOGY_CHANGE:
2196 case QEMU_IFLA_BR_TOPOLOGY_CHANGE_DETECTED:
2197 case QEMU_IFLA_BR_MCAST_ROUTER:
2198 case QEMU_IFLA_BR_MCAST_SNOOPING:
2199 case QEMU_IFLA_BR_MCAST_QUERY_USE_IFADDR:
2200 case QEMU_IFLA_BR_MCAST_QUERIER:
2201 case QEMU_IFLA_BR_NF_CALL_IPTABLES:
2202 case QEMU_IFLA_BR_NF_CALL_IP6TABLES:
2203 case QEMU_IFLA_BR_NF_CALL_ARPTABLES:
2204 case QEMU_IFLA_BR_VLAN_STATS_ENABLED:
2205 case QEMU_IFLA_BR_MCAST_STATS_ENABLED:
2206 case QEMU_IFLA_BR_MCAST_IGMP_VERSION:
2207 case QEMU_IFLA_BR_MCAST_MLD_VERSION:
2208 break;
2209 /* uint16_t */
2210 case QEMU_IFLA_BR_PRIORITY:
2211 case QEMU_IFLA_BR_VLAN_PROTOCOL:
2212 case QEMU_IFLA_BR_GROUP_FWD_MASK:
2213 case QEMU_IFLA_BR_ROOT_PORT:
2214 case QEMU_IFLA_BR_VLAN_DEFAULT_PVID:
2215 u16 = NLA_DATA(nlattr);
2216 *u16 = tswap16(*u16);
2217 break;
2218 /* uint32_t */
2219 case QEMU_IFLA_BR_FORWARD_DELAY:
2220 case QEMU_IFLA_BR_HELLO_TIME:
2221 case QEMU_IFLA_BR_MAX_AGE:
2222 case QEMU_IFLA_BR_AGEING_TIME:
2223 case QEMU_IFLA_BR_STP_STATE:
2224 case QEMU_IFLA_BR_ROOT_PATH_COST:
2225 case QEMU_IFLA_BR_MCAST_HASH_ELASTICITY:
2226 case QEMU_IFLA_BR_MCAST_HASH_MAX:
2227 case QEMU_IFLA_BR_MCAST_LAST_MEMBER_CNT:
2228 case QEMU_IFLA_BR_MCAST_STARTUP_QUERY_CNT:
2229 u32 = NLA_DATA(nlattr);
2230 *u32 = tswap32(*u32);
2231 break;
2232 /* uint64_t */
2233 case QEMU_IFLA_BR_HELLO_TIMER:
2234 case QEMU_IFLA_BR_TCN_TIMER:
2235 case QEMU_IFLA_BR_GC_TIMER:
2236 case QEMU_IFLA_BR_TOPOLOGY_CHANGE_TIMER:
2237 case QEMU_IFLA_BR_MCAST_LAST_MEMBER_INTVL:
2238 case QEMU_IFLA_BR_MCAST_MEMBERSHIP_INTVL:
2239 case QEMU_IFLA_BR_MCAST_QUERIER_INTVL:
2240 case QEMU_IFLA_BR_MCAST_QUERY_INTVL:
2241 case QEMU_IFLA_BR_MCAST_QUERY_RESPONSE_INTVL:
2242 case QEMU_IFLA_BR_MCAST_STARTUP_QUERY_INTVL:
2243 u64 = NLA_DATA(nlattr);
2244 *u64 = tswap64(*u64);
2245 break;
2246 /* ifla_bridge_id: uin8_t[] */
2247 case QEMU_IFLA_BR_ROOT_ID:
2248 case QEMU_IFLA_BR_BRIDGE_ID:
2249 break;
2250 default:
2251 gemu_log("Unknown QEMU_IFLA_BR type %d\n", nlattr->nla_type);
2252 break;
2254 return 0;
2257 static abi_long host_to_target_slave_data_bridge_nlattr(struct nlattr *nlattr,
2258 void *context)
2260 uint16_t *u16;
2261 uint32_t *u32;
2262 uint64_t *u64;
2264 switch (nlattr->nla_type) {
2265 /* uint8_t */
2266 case QEMU_IFLA_BRPORT_STATE:
2267 case QEMU_IFLA_BRPORT_MODE:
2268 case QEMU_IFLA_BRPORT_GUARD:
2269 case QEMU_IFLA_BRPORT_PROTECT:
2270 case QEMU_IFLA_BRPORT_FAST_LEAVE:
2271 case QEMU_IFLA_BRPORT_LEARNING:
2272 case QEMU_IFLA_BRPORT_UNICAST_FLOOD:
2273 case QEMU_IFLA_BRPORT_PROXYARP:
2274 case QEMU_IFLA_BRPORT_LEARNING_SYNC:
2275 case QEMU_IFLA_BRPORT_PROXYARP_WIFI:
2276 case QEMU_IFLA_BRPORT_TOPOLOGY_CHANGE_ACK:
2277 case QEMU_IFLA_BRPORT_CONFIG_PENDING:
2278 case QEMU_IFLA_BRPORT_MULTICAST_ROUTER:
2279 case QEMU_IFLA_BRPORT_MCAST_FLOOD:
2280 case QEMU_IFLA_BRPORT_MCAST_TO_UCAST:
2281 case QEMU_IFLA_BRPORT_VLAN_TUNNEL:
2282 case QEMU_IFLA_BRPORT_BCAST_FLOOD:
2283 case QEMU_IFLA_BRPORT_NEIGH_SUPPRESS:
2284 break;
2285 /* uint16_t */
2286 case QEMU_IFLA_BRPORT_PRIORITY:
2287 case QEMU_IFLA_BRPORT_DESIGNATED_PORT:
2288 case QEMU_IFLA_BRPORT_DESIGNATED_COST:
2289 case QEMU_IFLA_BRPORT_ID:
2290 case QEMU_IFLA_BRPORT_NO:
2291 case QEMU_IFLA_BRPORT_GROUP_FWD_MASK:
2292 u16 = NLA_DATA(nlattr);
2293 *u16 = tswap16(*u16);
2294 break;
2295 /* uin32_t */
2296 case QEMU_IFLA_BRPORT_COST:
2297 u32 = NLA_DATA(nlattr);
2298 *u32 = tswap32(*u32);
2299 break;
2300 /* uint64_t */
2301 case QEMU_IFLA_BRPORT_MESSAGE_AGE_TIMER:
2302 case QEMU_IFLA_BRPORT_FORWARD_DELAY_TIMER:
2303 case QEMU_IFLA_BRPORT_HOLD_TIMER:
2304 u64 = NLA_DATA(nlattr);
2305 *u64 = tswap64(*u64);
2306 break;
2307 /* ifla_bridge_id: uint8_t[] */
2308 case QEMU_IFLA_BRPORT_ROOT_ID:
2309 case QEMU_IFLA_BRPORT_BRIDGE_ID:
2310 break;
2311 default:
2312 gemu_log("Unknown QEMU_IFLA_BRPORT type %d\n", nlattr->nla_type);
2313 break;
2315 return 0;
2318 struct linkinfo_context {
2319 int len;
2320 char *name;
2321 int slave_len;
2322 char *slave_name;
2325 static abi_long host_to_target_data_linkinfo_nlattr(struct nlattr *nlattr,
2326 void *context)
2328 struct linkinfo_context *li_context = context;
2330 switch (nlattr->nla_type) {
2331 /* string */
2332 case QEMU_IFLA_INFO_KIND:
2333 li_context->name = NLA_DATA(nlattr);
2334 li_context->len = nlattr->nla_len - NLA_HDRLEN;
2335 break;
2336 case QEMU_IFLA_INFO_SLAVE_KIND:
2337 li_context->slave_name = NLA_DATA(nlattr);
2338 li_context->slave_len = nlattr->nla_len - NLA_HDRLEN;
2339 break;
2340 /* stats */
2341 case QEMU_IFLA_INFO_XSTATS:
2342 /* FIXME: only used by CAN */
2343 break;
2344 /* nested */
2345 case QEMU_IFLA_INFO_DATA:
2346 if (strncmp(li_context->name, "bridge",
2347 li_context->len) == 0) {
2348 return host_to_target_for_each_nlattr(NLA_DATA(nlattr),
2349 nlattr->nla_len,
2350 NULL,
2351 host_to_target_data_bridge_nlattr);
2352 } else {
2353 gemu_log("Unknown QEMU_IFLA_INFO_KIND %s\n", li_context->name);
2355 break;
2356 case QEMU_IFLA_INFO_SLAVE_DATA:
2357 if (strncmp(li_context->slave_name, "bridge",
2358 li_context->slave_len) == 0) {
2359 return host_to_target_for_each_nlattr(NLA_DATA(nlattr),
2360 nlattr->nla_len,
2361 NULL,
2362 host_to_target_slave_data_bridge_nlattr);
2363 } else {
2364 gemu_log("Unknown QEMU_IFLA_INFO_SLAVE_KIND %s\n",
2365 li_context->slave_name);
2367 break;
2368 default:
2369 gemu_log("Unknown host QEMU_IFLA_INFO type: %d\n", nlattr->nla_type);
2370 break;
2373 return 0;
2376 static abi_long host_to_target_data_inet_nlattr(struct nlattr *nlattr,
2377 void *context)
2379 uint32_t *u32;
2380 int i;
2382 switch (nlattr->nla_type) {
2383 case QEMU_IFLA_INET_CONF:
2384 u32 = NLA_DATA(nlattr);
2385 for (i = 0; i < (nlattr->nla_len - NLA_HDRLEN) / sizeof(*u32);
2386 i++) {
2387 u32[i] = tswap32(u32[i]);
2389 break;
2390 default:
2391 gemu_log("Unknown host AF_INET type: %d\n", nlattr->nla_type);
2393 return 0;
2396 static abi_long host_to_target_data_inet6_nlattr(struct nlattr *nlattr,
2397 void *context)
2399 uint32_t *u32;
2400 uint64_t *u64;
2401 struct ifla_cacheinfo *ci;
2402 int i;
2404 switch (nlattr->nla_type) {
2405 /* binaries */
2406 case QEMU_IFLA_INET6_TOKEN:
2407 break;
2408 /* uint8_t */
2409 case QEMU_IFLA_INET6_ADDR_GEN_MODE:
2410 break;
2411 /* uint32_t */
2412 case QEMU_IFLA_INET6_FLAGS:
2413 u32 = NLA_DATA(nlattr);
2414 *u32 = tswap32(*u32);
2415 break;
2416 /* uint32_t[] */
2417 case QEMU_IFLA_INET6_CONF:
2418 u32 = NLA_DATA(nlattr);
2419 for (i = 0; i < (nlattr->nla_len - NLA_HDRLEN) / sizeof(*u32);
2420 i++) {
2421 u32[i] = tswap32(u32[i]);
2423 break;
2424 /* ifla_cacheinfo */
2425 case QEMU_IFLA_INET6_CACHEINFO:
2426 ci = NLA_DATA(nlattr);
2427 ci->max_reasm_len = tswap32(ci->max_reasm_len);
2428 ci->tstamp = tswap32(ci->tstamp);
2429 ci->reachable_time = tswap32(ci->reachable_time);
2430 ci->retrans_time = tswap32(ci->retrans_time);
2431 break;
2432 /* uint64_t[] */
2433 case QEMU_IFLA_INET6_STATS:
2434 case QEMU_IFLA_INET6_ICMP6STATS:
2435 u64 = NLA_DATA(nlattr);
2436 for (i = 0; i < (nlattr->nla_len - NLA_HDRLEN) / sizeof(*u64);
2437 i++) {
2438 u64[i] = tswap64(u64[i]);
2440 break;
2441 default:
2442 gemu_log("Unknown host AF_INET6 type: %d\n", nlattr->nla_type);
2444 return 0;
2447 static abi_long host_to_target_data_spec_nlattr(struct nlattr *nlattr,
2448 void *context)
2450 switch (nlattr->nla_type) {
2451 case AF_INET:
2452 return host_to_target_for_each_nlattr(NLA_DATA(nlattr), nlattr->nla_len,
2453 NULL,
2454 host_to_target_data_inet_nlattr);
2455 case AF_INET6:
2456 return host_to_target_for_each_nlattr(NLA_DATA(nlattr), nlattr->nla_len,
2457 NULL,
2458 host_to_target_data_inet6_nlattr);
2459 default:
2460 gemu_log("Unknown host AF_SPEC type: %d\n", nlattr->nla_type);
2461 break;
2463 return 0;
2466 static abi_long host_to_target_data_xdp_nlattr(struct nlattr *nlattr,
2467 void *context)
2469 uint32_t *u32;
2471 switch (nlattr->nla_type) {
2472 /* uint8_t */
2473 case QEMU_IFLA_XDP_ATTACHED:
2474 break;
2475 /* uint32_t */
2476 case QEMU_IFLA_XDP_PROG_ID:
2477 u32 = NLA_DATA(nlattr);
2478 *u32 = tswap32(*u32);
2479 break;
2480 default:
2481 gemu_log("Unknown host XDP type: %d\n", nlattr->nla_type);
2482 break;
2484 return 0;
2487 static abi_long host_to_target_data_link_rtattr(struct rtattr *rtattr)
2489 uint32_t *u32;
2490 struct rtnl_link_stats *st;
2491 struct rtnl_link_stats64 *st64;
2492 struct rtnl_link_ifmap *map;
2493 struct linkinfo_context li_context;
2495 switch (rtattr->rta_type) {
2496 /* binary stream */
2497 case QEMU_IFLA_ADDRESS:
2498 case QEMU_IFLA_BROADCAST:
2499 /* string */
2500 case QEMU_IFLA_IFNAME:
2501 case QEMU_IFLA_QDISC:
2502 break;
2503 /* uin8_t */
2504 case QEMU_IFLA_OPERSTATE:
2505 case QEMU_IFLA_LINKMODE:
2506 case QEMU_IFLA_CARRIER:
2507 case QEMU_IFLA_PROTO_DOWN:
2508 break;
2509 /* uint32_t */
2510 case QEMU_IFLA_MTU:
2511 case QEMU_IFLA_LINK:
2512 case QEMU_IFLA_WEIGHT:
2513 case QEMU_IFLA_TXQLEN:
2514 case QEMU_IFLA_CARRIER_CHANGES:
2515 case QEMU_IFLA_NUM_RX_QUEUES:
2516 case QEMU_IFLA_NUM_TX_QUEUES:
2517 case QEMU_IFLA_PROMISCUITY:
2518 case QEMU_IFLA_EXT_MASK:
2519 case QEMU_IFLA_LINK_NETNSID:
2520 case QEMU_IFLA_GROUP:
2521 case QEMU_IFLA_MASTER:
2522 case QEMU_IFLA_NUM_VF:
2523 case QEMU_IFLA_GSO_MAX_SEGS:
2524 case QEMU_IFLA_GSO_MAX_SIZE:
2525 case QEMU_IFLA_CARRIER_UP_COUNT:
2526 case QEMU_IFLA_CARRIER_DOWN_COUNT:
2527 u32 = RTA_DATA(rtattr);
2528 *u32 = tswap32(*u32);
2529 break;
2530 /* struct rtnl_link_stats */
2531 case QEMU_IFLA_STATS:
2532 st = RTA_DATA(rtattr);
2533 st->rx_packets = tswap32(st->rx_packets);
2534 st->tx_packets = tswap32(st->tx_packets);
2535 st->rx_bytes = tswap32(st->rx_bytes);
2536 st->tx_bytes = tswap32(st->tx_bytes);
2537 st->rx_errors = tswap32(st->rx_errors);
2538 st->tx_errors = tswap32(st->tx_errors);
2539 st->rx_dropped = tswap32(st->rx_dropped);
2540 st->tx_dropped = tswap32(st->tx_dropped);
2541 st->multicast = tswap32(st->multicast);
2542 st->collisions = tswap32(st->collisions);
2544 /* detailed rx_errors: */
2545 st->rx_length_errors = tswap32(st->rx_length_errors);
2546 st->rx_over_errors = tswap32(st->rx_over_errors);
2547 st->rx_crc_errors = tswap32(st->rx_crc_errors);
2548 st->rx_frame_errors = tswap32(st->rx_frame_errors);
2549 st->rx_fifo_errors = tswap32(st->rx_fifo_errors);
2550 st->rx_missed_errors = tswap32(st->rx_missed_errors);
2552 /* detailed tx_errors */
2553 st->tx_aborted_errors = tswap32(st->tx_aborted_errors);
2554 st->tx_carrier_errors = tswap32(st->tx_carrier_errors);
2555 st->tx_fifo_errors = tswap32(st->tx_fifo_errors);
2556 st->tx_heartbeat_errors = tswap32(st->tx_heartbeat_errors);
2557 st->tx_window_errors = tswap32(st->tx_window_errors);
2559 /* for cslip etc */
2560 st->rx_compressed = tswap32(st->rx_compressed);
2561 st->tx_compressed = tswap32(st->tx_compressed);
2562 break;
2563 /* struct rtnl_link_stats64 */
2564 case QEMU_IFLA_STATS64:
2565 st64 = RTA_DATA(rtattr);
2566 st64->rx_packets = tswap64(st64->rx_packets);
2567 st64->tx_packets = tswap64(st64->tx_packets);
2568 st64->rx_bytes = tswap64(st64->rx_bytes);
2569 st64->tx_bytes = tswap64(st64->tx_bytes);
2570 st64->rx_errors = tswap64(st64->rx_errors);
2571 st64->tx_errors = tswap64(st64->tx_errors);
2572 st64->rx_dropped = tswap64(st64->rx_dropped);
2573 st64->tx_dropped = tswap64(st64->tx_dropped);
2574 st64->multicast = tswap64(st64->multicast);
2575 st64->collisions = tswap64(st64->collisions);
2577 /* detailed rx_errors: */
2578 st64->rx_length_errors = tswap64(st64->rx_length_errors);
2579 st64->rx_over_errors = tswap64(st64->rx_over_errors);
2580 st64->rx_crc_errors = tswap64(st64->rx_crc_errors);
2581 st64->rx_frame_errors = tswap64(st64->rx_frame_errors);
2582 st64->rx_fifo_errors = tswap64(st64->rx_fifo_errors);
2583 st64->rx_missed_errors = tswap64(st64->rx_missed_errors);
2585 /* detailed tx_errors */
2586 st64->tx_aborted_errors = tswap64(st64->tx_aborted_errors);
2587 st64->tx_carrier_errors = tswap64(st64->tx_carrier_errors);
2588 st64->tx_fifo_errors = tswap64(st64->tx_fifo_errors);
2589 st64->tx_heartbeat_errors = tswap64(st64->tx_heartbeat_errors);
2590 st64->tx_window_errors = tswap64(st64->tx_window_errors);
2592 /* for cslip etc */
2593 st64->rx_compressed = tswap64(st64->rx_compressed);
2594 st64->tx_compressed = tswap64(st64->tx_compressed);
2595 break;
2596 /* struct rtnl_link_ifmap */
2597 case QEMU_IFLA_MAP:
2598 map = RTA_DATA(rtattr);
2599 map->mem_start = tswap64(map->mem_start);
2600 map->mem_end = tswap64(map->mem_end);
2601 map->base_addr = tswap64(map->base_addr);
2602 map->irq = tswap16(map->irq);
2603 break;
2604 /* nested */
2605 case QEMU_IFLA_LINKINFO:
2606 memset(&li_context, 0, sizeof(li_context));
2607 return host_to_target_for_each_nlattr(RTA_DATA(rtattr), rtattr->rta_len,
2608 &li_context,
2609 host_to_target_data_linkinfo_nlattr);
2610 case QEMU_IFLA_AF_SPEC:
2611 return host_to_target_for_each_nlattr(RTA_DATA(rtattr), rtattr->rta_len,
2612 NULL,
2613 host_to_target_data_spec_nlattr);
2614 case QEMU_IFLA_XDP:
2615 return host_to_target_for_each_nlattr(RTA_DATA(rtattr), rtattr->rta_len,
2616 NULL,
2617 host_to_target_data_xdp_nlattr);
2618 default:
2619 gemu_log("Unknown host QEMU_IFLA type: %d\n", rtattr->rta_type);
2620 break;
2622 return 0;
2625 static abi_long host_to_target_data_addr_rtattr(struct rtattr *rtattr)
2627 uint32_t *u32;
2628 struct ifa_cacheinfo *ci;
2630 switch (rtattr->rta_type) {
2631 /* binary: depends on family type */
2632 case IFA_ADDRESS:
2633 case IFA_LOCAL:
2634 break;
2635 /* string */
2636 case IFA_LABEL:
2637 break;
2638 /* u32 */
2639 case IFA_FLAGS:
2640 case IFA_BROADCAST:
2641 u32 = RTA_DATA(rtattr);
2642 *u32 = tswap32(*u32);
2643 break;
2644 /* struct ifa_cacheinfo */
2645 case IFA_CACHEINFO:
2646 ci = RTA_DATA(rtattr);
2647 ci->ifa_prefered = tswap32(ci->ifa_prefered);
2648 ci->ifa_valid = tswap32(ci->ifa_valid);
2649 ci->cstamp = tswap32(ci->cstamp);
2650 ci->tstamp = tswap32(ci->tstamp);
2651 break;
2652 default:
2653 gemu_log("Unknown host IFA type: %d\n", rtattr->rta_type);
2654 break;
2656 return 0;
2659 static abi_long host_to_target_data_route_rtattr(struct rtattr *rtattr)
2661 uint32_t *u32;
2662 switch (rtattr->rta_type) {
2663 /* binary: depends on family type */
2664 case RTA_GATEWAY:
2665 case RTA_DST:
2666 case RTA_PREFSRC:
2667 break;
2668 /* u32 */
2669 case RTA_PRIORITY:
2670 case RTA_TABLE:
2671 case RTA_OIF:
2672 u32 = RTA_DATA(rtattr);
2673 *u32 = tswap32(*u32);
2674 break;
2675 default:
2676 gemu_log("Unknown host RTA type: %d\n", rtattr->rta_type);
2677 break;
2679 return 0;
2682 static abi_long host_to_target_link_rtattr(struct rtattr *rtattr,
2683 uint32_t rtattr_len)
2685 return host_to_target_for_each_rtattr(rtattr, rtattr_len,
2686 host_to_target_data_link_rtattr);
2689 static abi_long host_to_target_addr_rtattr(struct rtattr *rtattr,
2690 uint32_t rtattr_len)
2692 return host_to_target_for_each_rtattr(rtattr, rtattr_len,
2693 host_to_target_data_addr_rtattr);
2696 static abi_long host_to_target_route_rtattr(struct rtattr *rtattr,
2697 uint32_t rtattr_len)
2699 return host_to_target_for_each_rtattr(rtattr, rtattr_len,
2700 host_to_target_data_route_rtattr);
2703 static abi_long host_to_target_data_route(struct nlmsghdr *nlh)
2705 uint32_t nlmsg_len;
2706 struct ifinfomsg *ifi;
2707 struct ifaddrmsg *ifa;
2708 struct rtmsg *rtm;
2710 nlmsg_len = nlh->nlmsg_len;
2711 switch (nlh->nlmsg_type) {
2712 case RTM_NEWLINK:
2713 case RTM_DELLINK:
2714 case RTM_GETLINK:
2715 if (nlh->nlmsg_len >= NLMSG_LENGTH(sizeof(*ifi))) {
2716 ifi = NLMSG_DATA(nlh);
2717 ifi->ifi_type = tswap16(ifi->ifi_type);
2718 ifi->ifi_index = tswap32(ifi->ifi_index);
2719 ifi->ifi_flags = tswap32(ifi->ifi_flags);
2720 ifi->ifi_change = tswap32(ifi->ifi_change);
2721 host_to_target_link_rtattr(IFLA_RTA(ifi),
2722 nlmsg_len - NLMSG_LENGTH(sizeof(*ifi)));
2724 break;
2725 case RTM_NEWADDR:
2726 case RTM_DELADDR:
2727 case RTM_GETADDR:
2728 if (nlh->nlmsg_len >= NLMSG_LENGTH(sizeof(*ifa))) {
2729 ifa = NLMSG_DATA(nlh);
2730 ifa->ifa_index = tswap32(ifa->ifa_index);
2731 host_to_target_addr_rtattr(IFA_RTA(ifa),
2732 nlmsg_len - NLMSG_LENGTH(sizeof(*ifa)));
2734 break;
2735 case RTM_NEWROUTE:
2736 case RTM_DELROUTE:
2737 case RTM_GETROUTE:
2738 if (nlh->nlmsg_len >= NLMSG_LENGTH(sizeof(*rtm))) {
2739 rtm = NLMSG_DATA(nlh);
2740 rtm->rtm_flags = tswap32(rtm->rtm_flags);
2741 host_to_target_route_rtattr(RTM_RTA(rtm),
2742 nlmsg_len - NLMSG_LENGTH(sizeof(*rtm)));
2744 break;
2745 default:
2746 return -TARGET_EINVAL;
2748 return 0;
2751 static inline abi_long host_to_target_nlmsg_route(struct nlmsghdr *nlh,
2752 size_t len)
2754 return host_to_target_for_each_nlmsg(nlh, len, host_to_target_data_route);
2757 static abi_long target_to_host_for_each_rtattr(struct rtattr *rtattr,
2758 size_t len,
2759 abi_long (*target_to_host_rtattr)
2760 (struct rtattr *))
2762 abi_long ret;
2764 while (len >= sizeof(struct rtattr)) {
2765 if (tswap16(rtattr->rta_len) < sizeof(struct rtattr) ||
2766 tswap16(rtattr->rta_len) > len) {
2767 break;
2769 rtattr->rta_len = tswap16(rtattr->rta_len);
2770 rtattr->rta_type = tswap16(rtattr->rta_type);
2771 ret = target_to_host_rtattr(rtattr);
2772 if (ret < 0) {
2773 return ret;
2775 len -= RTA_ALIGN(rtattr->rta_len);
2776 rtattr = (struct rtattr *)(((char *)rtattr) +
2777 RTA_ALIGN(rtattr->rta_len));
2779 return 0;
2782 static abi_long target_to_host_data_link_rtattr(struct rtattr *rtattr)
2784 switch (rtattr->rta_type) {
2785 default:
2786 gemu_log("Unknown target QEMU_IFLA type: %d\n", rtattr->rta_type);
2787 break;
2789 return 0;
2792 static abi_long target_to_host_data_addr_rtattr(struct rtattr *rtattr)
2794 switch (rtattr->rta_type) {
2795 /* binary: depends on family type */
2796 case IFA_LOCAL:
2797 case IFA_ADDRESS:
2798 break;
2799 default:
2800 gemu_log("Unknown target IFA type: %d\n", rtattr->rta_type);
2801 break;
2803 return 0;
2806 static abi_long target_to_host_data_route_rtattr(struct rtattr *rtattr)
2808 uint32_t *u32;
2809 switch (rtattr->rta_type) {
2810 /* binary: depends on family type */
2811 case RTA_DST:
2812 case RTA_SRC:
2813 case RTA_GATEWAY:
2814 break;
2815 /* u32 */
2816 case RTA_PRIORITY:
2817 case RTA_OIF:
2818 u32 = RTA_DATA(rtattr);
2819 *u32 = tswap32(*u32);
2820 break;
2821 default:
2822 gemu_log("Unknown target RTA type: %d\n", rtattr->rta_type);
2823 break;
2825 return 0;
2828 static void target_to_host_link_rtattr(struct rtattr *rtattr,
2829 uint32_t rtattr_len)
2831 target_to_host_for_each_rtattr(rtattr, rtattr_len,
2832 target_to_host_data_link_rtattr);
2835 static void target_to_host_addr_rtattr(struct rtattr *rtattr,
2836 uint32_t rtattr_len)
2838 target_to_host_for_each_rtattr(rtattr, rtattr_len,
2839 target_to_host_data_addr_rtattr);
2842 static void target_to_host_route_rtattr(struct rtattr *rtattr,
2843 uint32_t rtattr_len)
2845 target_to_host_for_each_rtattr(rtattr, rtattr_len,
2846 target_to_host_data_route_rtattr);
2849 static abi_long target_to_host_data_route(struct nlmsghdr *nlh)
2851 struct ifinfomsg *ifi;
2852 struct ifaddrmsg *ifa;
2853 struct rtmsg *rtm;
2855 switch (nlh->nlmsg_type) {
2856 case RTM_GETLINK:
2857 break;
2858 case RTM_NEWLINK:
2859 case RTM_DELLINK:
2860 if (nlh->nlmsg_len >= NLMSG_LENGTH(sizeof(*ifi))) {
2861 ifi = NLMSG_DATA(nlh);
2862 ifi->ifi_type = tswap16(ifi->ifi_type);
2863 ifi->ifi_index = tswap32(ifi->ifi_index);
2864 ifi->ifi_flags = tswap32(ifi->ifi_flags);
2865 ifi->ifi_change = tswap32(ifi->ifi_change);
2866 target_to_host_link_rtattr(IFLA_RTA(ifi), nlh->nlmsg_len -
2867 NLMSG_LENGTH(sizeof(*ifi)));
2869 break;
2870 case RTM_GETADDR:
2871 case RTM_NEWADDR:
2872 case RTM_DELADDR:
2873 if (nlh->nlmsg_len >= NLMSG_LENGTH(sizeof(*ifa))) {
2874 ifa = NLMSG_DATA(nlh);
2875 ifa->ifa_index = tswap32(ifa->ifa_index);
2876 target_to_host_addr_rtattr(IFA_RTA(ifa), nlh->nlmsg_len -
2877 NLMSG_LENGTH(sizeof(*ifa)));
2879 break;
2880 case RTM_GETROUTE:
2881 break;
2882 case RTM_NEWROUTE:
2883 case RTM_DELROUTE:
2884 if (nlh->nlmsg_len >= NLMSG_LENGTH(sizeof(*rtm))) {
2885 rtm = NLMSG_DATA(nlh);
2886 rtm->rtm_flags = tswap32(rtm->rtm_flags);
2887 target_to_host_route_rtattr(RTM_RTA(rtm), nlh->nlmsg_len -
2888 NLMSG_LENGTH(sizeof(*rtm)));
2890 break;
2891 default:
2892 return -TARGET_EOPNOTSUPP;
2894 return 0;
2897 static abi_long target_to_host_nlmsg_route(struct nlmsghdr *nlh, size_t len)
2899 return target_to_host_for_each_nlmsg(nlh, len, target_to_host_data_route);
2901 #endif /* CONFIG_RTNETLINK */
2903 static abi_long host_to_target_data_audit(struct nlmsghdr *nlh)
2905 switch (nlh->nlmsg_type) {
2906 default:
2907 gemu_log("Unknown host audit message type %d\n",
2908 nlh->nlmsg_type);
2909 return -TARGET_EINVAL;
2911 return 0;
2914 static inline abi_long host_to_target_nlmsg_audit(struct nlmsghdr *nlh,
2915 size_t len)
2917 return host_to_target_for_each_nlmsg(nlh, len, host_to_target_data_audit);
2920 static abi_long target_to_host_data_audit(struct nlmsghdr *nlh)
2922 switch (nlh->nlmsg_type) {
2923 case AUDIT_USER:
2924 case AUDIT_FIRST_USER_MSG ... AUDIT_LAST_USER_MSG:
2925 case AUDIT_FIRST_USER_MSG2 ... AUDIT_LAST_USER_MSG2:
2926 break;
2927 default:
2928 gemu_log("Unknown target audit message type %d\n",
2929 nlh->nlmsg_type);
2930 return -TARGET_EINVAL;
2933 return 0;
2936 static abi_long target_to_host_nlmsg_audit(struct nlmsghdr *nlh, size_t len)
2938 return target_to_host_for_each_nlmsg(nlh, len, target_to_host_data_audit);
2941 /* do_setsockopt() Must return target values and target errnos. */
2942 static abi_long do_setsockopt(int sockfd, int level, int optname,
2943 abi_ulong optval_addr, socklen_t optlen)
2945 abi_long ret;
2946 int val;
2947 struct ip_mreqn *ip_mreq;
2948 struct ip_mreq_source *ip_mreq_source;
2950 switch(level) {
2951 case SOL_TCP:
2952 /* TCP options all take an 'int' value. */
2953 if (optlen < sizeof(uint32_t))
2954 return -TARGET_EINVAL;
2956 if (get_user_u32(val, optval_addr))
2957 return -TARGET_EFAULT;
2958 ret = get_errno(setsockopt(sockfd, level, optname, &val, sizeof(val)));
2959 break;
2960 case SOL_IP:
2961 switch(optname) {
2962 case IP_TOS:
2963 case IP_TTL:
2964 case IP_HDRINCL:
2965 case IP_ROUTER_ALERT:
2966 case IP_RECVOPTS:
2967 case IP_RETOPTS:
2968 case IP_PKTINFO:
2969 case IP_MTU_DISCOVER:
2970 case IP_RECVERR:
2971 case IP_RECVTTL:
2972 case IP_RECVTOS:
2973 #ifdef IP_FREEBIND
2974 case IP_FREEBIND:
2975 #endif
2976 case IP_MULTICAST_TTL:
2977 case IP_MULTICAST_LOOP:
2978 val = 0;
2979 if (optlen >= sizeof(uint32_t)) {
2980 if (get_user_u32(val, optval_addr))
2981 return -TARGET_EFAULT;
2982 } else if (optlen >= 1) {
2983 if (get_user_u8(val, optval_addr))
2984 return -TARGET_EFAULT;
2986 ret = get_errno(setsockopt(sockfd, level, optname, &val, sizeof(val)));
2987 break;
2988 case IP_ADD_MEMBERSHIP:
2989 case IP_DROP_MEMBERSHIP:
2990 if (optlen < sizeof (struct target_ip_mreq) ||
2991 optlen > sizeof (struct target_ip_mreqn))
2992 return -TARGET_EINVAL;
2994 ip_mreq = (struct ip_mreqn *) alloca(optlen);
2995 target_to_host_ip_mreq(ip_mreq, optval_addr, optlen);
2996 ret = get_errno(setsockopt(sockfd, level, optname, ip_mreq, optlen));
2997 break;
2999 case IP_BLOCK_SOURCE:
3000 case IP_UNBLOCK_SOURCE:
3001 case IP_ADD_SOURCE_MEMBERSHIP:
3002 case IP_DROP_SOURCE_MEMBERSHIP:
3003 if (optlen != sizeof (struct target_ip_mreq_source))
3004 return -TARGET_EINVAL;
3006 ip_mreq_source = lock_user(VERIFY_READ, optval_addr, optlen, 1);
3007 ret = get_errno(setsockopt(sockfd, level, optname, ip_mreq_source, optlen));
3008 unlock_user (ip_mreq_source, optval_addr, 0);
3009 break;
3011 default:
3012 goto unimplemented;
3014 break;
3015 case SOL_IPV6:
3016 switch (optname) {
3017 case IPV6_MTU_DISCOVER:
3018 case IPV6_MTU:
3019 case IPV6_V6ONLY:
3020 case IPV6_RECVPKTINFO:
3021 case IPV6_UNICAST_HOPS:
3022 case IPV6_MULTICAST_HOPS:
3023 case IPV6_MULTICAST_LOOP:
3024 case IPV6_RECVERR:
3025 case IPV6_RECVHOPLIMIT:
3026 case IPV6_2292HOPLIMIT:
3027 case IPV6_CHECKSUM:
3028 val = 0;
3029 if (optlen < sizeof(uint32_t)) {
3030 return -TARGET_EINVAL;
3032 if (get_user_u32(val, optval_addr)) {
3033 return -TARGET_EFAULT;
3035 ret = get_errno(setsockopt(sockfd, level, optname,
3036 &val, sizeof(val)));
3037 break;
3038 case IPV6_PKTINFO:
3040 struct in6_pktinfo pki;
3042 if (optlen < sizeof(pki)) {
3043 return -TARGET_EINVAL;
3046 if (copy_from_user(&pki, optval_addr, sizeof(pki))) {
3047 return -TARGET_EFAULT;
3050 pki.ipi6_ifindex = tswap32(pki.ipi6_ifindex);
3052 ret = get_errno(setsockopt(sockfd, level, optname,
3053 &pki, sizeof(pki)));
3054 break;
3056 default:
3057 goto unimplemented;
3059 break;
3060 case SOL_ICMPV6:
3061 switch (optname) {
3062 case ICMPV6_FILTER:
3064 struct icmp6_filter icmp6f;
3066 if (optlen > sizeof(icmp6f)) {
3067 optlen = sizeof(icmp6f);
3070 if (copy_from_user(&icmp6f, optval_addr, optlen)) {
3071 return -TARGET_EFAULT;
3074 for (val = 0; val < 8; val++) {
3075 icmp6f.data[val] = tswap32(icmp6f.data[val]);
3078 ret = get_errno(setsockopt(sockfd, level, optname,
3079 &icmp6f, optlen));
3080 break;
3082 default:
3083 goto unimplemented;
3085 break;
3086 case SOL_RAW:
3087 switch (optname) {
3088 case ICMP_FILTER:
3089 case IPV6_CHECKSUM:
3090 /* those take an u32 value */
3091 if (optlen < sizeof(uint32_t)) {
3092 return -TARGET_EINVAL;
3095 if (get_user_u32(val, optval_addr)) {
3096 return -TARGET_EFAULT;
3098 ret = get_errno(setsockopt(sockfd, level, optname,
3099 &val, sizeof(val)));
3100 break;
3102 default:
3103 goto unimplemented;
3105 break;
3106 case TARGET_SOL_SOCKET:
3107 switch (optname) {
3108 case TARGET_SO_RCVTIMEO:
3110 struct timeval tv;
3112 optname = SO_RCVTIMEO;
3114 set_timeout:
3115 if (optlen != sizeof(struct target_timeval)) {
3116 return -TARGET_EINVAL;
3119 if (copy_from_user_timeval(&tv, optval_addr)) {
3120 return -TARGET_EFAULT;
3123 ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname,
3124 &tv, sizeof(tv)));
3125 return ret;
3127 case TARGET_SO_SNDTIMEO:
3128 optname = SO_SNDTIMEO;
3129 goto set_timeout;
3130 case TARGET_SO_ATTACH_FILTER:
3132 struct target_sock_fprog *tfprog;
3133 struct target_sock_filter *tfilter;
3134 struct sock_fprog fprog;
3135 struct sock_filter *filter;
3136 int i;
3138 if (optlen != sizeof(*tfprog)) {
3139 return -TARGET_EINVAL;
3141 if (!lock_user_struct(VERIFY_READ, tfprog, optval_addr, 0)) {
3142 return -TARGET_EFAULT;
3144 if (!lock_user_struct(VERIFY_READ, tfilter,
3145 tswapal(tfprog->filter), 0)) {
3146 unlock_user_struct(tfprog, optval_addr, 1);
3147 return -TARGET_EFAULT;
3150 fprog.len = tswap16(tfprog->len);
3151 filter = g_try_new(struct sock_filter, fprog.len);
3152 if (filter == NULL) {
3153 unlock_user_struct(tfilter, tfprog->filter, 1);
3154 unlock_user_struct(tfprog, optval_addr, 1);
3155 return -TARGET_ENOMEM;
3157 for (i = 0; i < fprog.len; i++) {
3158 filter[i].code = tswap16(tfilter[i].code);
3159 filter[i].jt = tfilter[i].jt;
3160 filter[i].jf = tfilter[i].jf;
3161 filter[i].k = tswap32(tfilter[i].k);
3163 fprog.filter = filter;
3165 ret = get_errno(setsockopt(sockfd, SOL_SOCKET,
3166 SO_ATTACH_FILTER, &fprog, sizeof(fprog)));
3167 g_free(filter);
3169 unlock_user_struct(tfilter, tfprog->filter, 1);
3170 unlock_user_struct(tfprog, optval_addr, 1);
3171 return ret;
3173 case TARGET_SO_BINDTODEVICE:
3175 char *dev_ifname, *addr_ifname;
3177 if (optlen > IFNAMSIZ - 1) {
3178 optlen = IFNAMSIZ - 1;
3180 dev_ifname = lock_user(VERIFY_READ, optval_addr, optlen, 1);
3181 if (!dev_ifname) {
3182 return -TARGET_EFAULT;
3184 optname = SO_BINDTODEVICE;
3185 addr_ifname = alloca(IFNAMSIZ);
3186 memcpy(addr_ifname, dev_ifname, optlen);
3187 addr_ifname[optlen] = 0;
3188 ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname,
3189 addr_ifname, optlen));
3190 unlock_user (dev_ifname, optval_addr, 0);
3191 return ret;
3193 /* Options with 'int' argument. */
3194 case TARGET_SO_DEBUG:
3195 optname = SO_DEBUG;
3196 break;
3197 case TARGET_SO_REUSEADDR:
3198 optname = SO_REUSEADDR;
3199 break;
3200 case TARGET_SO_TYPE:
3201 optname = SO_TYPE;
3202 break;
3203 case TARGET_SO_ERROR:
3204 optname = SO_ERROR;
3205 break;
3206 case TARGET_SO_DONTROUTE:
3207 optname = SO_DONTROUTE;
3208 break;
3209 case TARGET_SO_BROADCAST:
3210 optname = SO_BROADCAST;
3211 break;
3212 case TARGET_SO_SNDBUF:
3213 optname = SO_SNDBUF;
3214 break;
3215 case TARGET_SO_SNDBUFFORCE:
3216 optname = SO_SNDBUFFORCE;
3217 break;
3218 case TARGET_SO_RCVBUF:
3219 optname = SO_RCVBUF;
3220 break;
3221 case TARGET_SO_RCVBUFFORCE:
3222 optname = SO_RCVBUFFORCE;
3223 break;
3224 case TARGET_SO_KEEPALIVE:
3225 optname = SO_KEEPALIVE;
3226 break;
3227 case TARGET_SO_OOBINLINE:
3228 optname = SO_OOBINLINE;
3229 break;
3230 case TARGET_SO_NO_CHECK:
3231 optname = SO_NO_CHECK;
3232 break;
3233 case TARGET_SO_PRIORITY:
3234 optname = SO_PRIORITY;
3235 break;
3236 #ifdef SO_BSDCOMPAT
3237 case TARGET_SO_BSDCOMPAT:
3238 optname = SO_BSDCOMPAT;
3239 break;
3240 #endif
3241 case TARGET_SO_PASSCRED:
3242 optname = SO_PASSCRED;
3243 break;
3244 case TARGET_SO_PASSSEC:
3245 optname = SO_PASSSEC;
3246 break;
3247 case TARGET_SO_TIMESTAMP:
3248 optname = SO_TIMESTAMP;
3249 break;
3250 case TARGET_SO_RCVLOWAT:
3251 optname = SO_RCVLOWAT;
3252 break;
3253 default:
3254 goto unimplemented;
3256 if (optlen < sizeof(uint32_t))
3257 return -TARGET_EINVAL;
3259 if (get_user_u32(val, optval_addr))
3260 return -TARGET_EFAULT;
3261 ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname, &val, sizeof(val)));
3262 break;
3263 default:
3264 unimplemented:
3265 gemu_log("Unsupported setsockopt level=%d optname=%d\n", level, optname);
3266 ret = -TARGET_ENOPROTOOPT;
3268 return ret;
3271 /* do_getsockopt() Must return target values and target errnos. */
3272 static abi_long do_getsockopt(int sockfd, int level, int optname,
3273 abi_ulong optval_addr, abi_ulong optlen)
3275 abi_long ret;
3276 int len, val;
3277 socklen_t lv;
3279 switch(level) {
3280 case TARGET_SOL_SOCKET:
3281 level = SOL_SOCKET;
3282 switch (optname) {
3283 /* These don't just return a single integer */
3284 case TARGET_SO_LINGER:
3285 case TARGET_SO_RCVTIMEO:
3286 case TARGET_SO_SNDTIMEO:
3287 case TARGET_SO_PEERNAME:
3288 goto unimplemented;
3289 case TARGET_SO_PEERCRED: {
3290 struct ucred cr;
3291 socklen_t crlen;
3292 struct target_ucred *tcr;
3294 if (get_user_u32(len, optlen)) {
3295 return -TARGET_EFAULT;
3297 if (len < 0) {
3298 return -TARGET_EINVAL;
3301 crlen = sizeof(cr);
3302 ret = get_errno(getsockopt(sockfd, level, SO_PEERCRED,
3303 &cr, &crlen));
3304 if (ret < 0) {
3305 return ret;
3307 if (len > crlen) {
3308 len = crlen;
3310 if (!lock_user_struct(VERIFY_WRITE, tcr, optval_addr, 0)) {
3311 return -TARGET_EFAULT;
3313 __put_user(cr.pid, &tcr->pid);
3314 __put_user(cr.uid, &tcr->uid);
3315 __put_user(cr.gid, &tcr->gid);
3316 unlock_user_struct(tcr, optval_addr, 1);
3317 if (put_user_u32(len, optlen)) {
3318 return -TARGET_EFAULT;
3320 break;
3322 /* Options with 'int' argument. */
3323 case TARGET_SO_DEBUG:
3324 optname = SO_DEBUG;
3325 goto int_case;
3326 case TARGET_SO_REUSEADDR:
3327 optname = SO_REUSEADDR;
3328 goto int_case;
3329 case TARGET_SO_TYPE:
3330 optname = SO_TYPE;
3331 goto int_case;
3332 case TARGET_SO_ERROR:
3333 optname = SO_ERROR;
3334 goto int_case;
3335 case TARGET_SO_DONTROUTE:
3336 optname = SO_DONTROUTE;
3337 goto int_case;
3338 case TARGET_SO_BROADCAST:
3339 optname = SO_BROADCAST;
3340 goto int_case;
3341 case TARGET_SO_SNDBUF:
3342 optname = SO_SNDBUF;
3343 goto int_case;
3344 case TARGET_SO_RCVBUF:
3345 optname = SO_RCVBUF;
3346 goto int_case;
3347 case TARGET_SO_KEEPALIVE:
3348 optname = SO_KEEPALIVE;
3349 goto int_case;
3350 case TARGET_SO_OOBINLINE:
3351 optname = SO_OOBINLINE;
3352 goto int_case;
3353 case TARGET_SO_NO_CHECK:
3354 optname = SO_NO_CHECK;
3355 goto int_case;
3356 case TARGET_SO_PRIORITY:
3357 optname = SO_PRIORITY;
3358 goto int_case;
3359 #ifdef SO_BSDCOMPAT
3360 case TARGET_SO_BSDCOMPAT:
3361 optname = SO_BSDCOMPAT;
3362 goto int_case;
3363 #endif
3364 case TARGET_SO_PASSCRED:
3365 optname = SO_PASSCRED;
3366 goto int_case;
3367 case TARGET_SO_TIMESTAMP:
3368 optname = SO_TIMESTAMP;
3369 goto int_case;
3370 case TARGET_SO_RCVLOWAT:
3371 optname = SO_RCVLOWAT;
3372 goto int_case;
3373 case TARGET_SO_ACCEPTCONN:
3374 optname = SO_ACCEPTCONN;
3375 goto int_case;
3376 default:
3377 goto int_case;
3379 break;
3380 case SOL_TCP:
3381 /* TCP options all take an 'int' value. */
3382 int_case:
3383 if (get_user_u32(len, optlen))
3384 return -TARGET_EFAULT;
3385 if (len < 0)
3386 return -TARGET_EINVAL;
3387 lv = sizeof(lv);
3388 ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
3389 if (ret < 0)
3390 return ret;
3391 if (optname == SO_TYPE) {
3392 val = host_to_target_sock_type(val);
3394 if (len > lv)
3395 len = lv;
3396 if (len == 4) {
3397 if (put_user_u32(val, optval_addr))
3398 return -TARGET_EFAULT;
3399 } else {
3400 if (put_user_u8(val, optval_addr))
3401 return -TARGET_EFAULT;
3403 if (put_user_u32(len, optlen))
3404 return -TARGET_EFAULT;
3405 break;
3406 case SOL_IP:
3407 switch(optname) {
3408 case IP_TOS:
3409 case IP_TTL:
3410 case IP_HDRINCL:
3411 case IP_ROUTER_ALERT:
3412 case IP_RECVOPTS:
3413 case IP_RETOPTS:
3414 case IP_PKTINFO:
3415 case IP_MTU_DISCOVER:
3416 case IP_RECVERR:
3417 case IP_RECVTOS:
3418 #ifdef IP_FREEBIND
3419 case IP_FREEBIND:
3420 #endif
3421 case IP_MULTICAST_TTL:
3422 case IP_MULTICAST_LOOP:
3423 if (get_user_u32(len, optlen))
3424 return -TARGET_EFAULT;
3425 if (len < 0)
3426 return -TARGET_EINVAL;
3427 lv = sizeof(lv);
3428 ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
3429 if (ret < 0)
3430 return ret;
3431 if (len < sizeof(int) && len > 0 && val >= 0 && val < 255) {
3432 len = 1;
3433 if (put_user_u32(len, optlen)
3434 || put_user_u8(val, optval_addr))
3435 return -TARGET_EFAULT;
3436 } else {
3437 if (len > sizeof(int))
3438 len = sizeof(int);
3439 if (put_user_u32(len, optlen)
3440 || put_user_u32(val, optval_addr))
3441 return -TARGET_EFAULT;
3443 break;
3444 default:
3445 ret = -TARGET_ENOPROTOOPT;
3446 break;
3448 break;
3449 default:
3450 unimplemented:
3451 gemu_log("getsockopt level=%d optname=%d not yet supported\n",
3452 level, optname);
3453 ret = -TARGET_EOPNOTSUPP;
3454 break;
3456 return ret;
3459 /* Convert target low/high pair representing file offset into the host
3460 * low/high pair. This function doesn't handle offsets bigger than 64 bits
3461 * as the kernel doesn't handle them either.
3463 static void target_to_host_low_high(abi_ulong tlow,
3464 abi_ulong thigh,
3465 unsigned long *hlow,
3466 unsigned long *hhigh)
3468 uint64_t off = tlow |
3469 ((unsigned long long)thigh << TARGET_LONG_BITS / 2) <<
3470 TARGET_LONG_BITS / 2;
3472 *hlow = off;
3473 *hhigh = (off >> HOST_LONG_BITS / 2) >> HOST_LONG_BITS / 2;
3476 static struct iovec *lock_iovec(int type, abi_ulong target_addr,
3477 abi_ulong count, int copy)
3479 struct target_iovec *target_vec;
3480 struct iovec *vec;
3481 abi_ulong total_len, max_len;
3482 int i;
3483 int err = 0;
3484 bool bad_address = false;
3486 if (count == 0) {
3487 errno = 0;
3488 return NULL;
3490 if (count > IOV_MAX) {
3491 errno = EINVAL;
3492 return NULL;
3495 vec = g_try_new0(struct iovec, count);
3496 if (vec == NULL) {
3497 errno = ENOMEM;
3498 return NULL;
3501 target_vec = lock_user(VERIFY_READ, target_addr,
3502 count * sizeof(struct target_iovec), 1);
3503 if (target_vec == NULL) {
3504 err = EFAULT;
3505 goto fail2;
3508 /* ??? If host page size > target page size, this will result in a
3509 value larger than what we can actually support. */
3510 max_len = 0x7fffffff & TARGET_PAGE_MASK;
3511 total_len = 0;
3513 for (i = 0; i < count; i++) {
3514 abi_ulong base = tswapal(target_vec[i].iov_base);
3515 abi_long len = tswapal(target_vec[i].iov_len);
3517 if (len < 0) {
3518 err = EINVAL;
3519 goto fail;
3520 } else if (len == 0) {
3521 /* Zero length pointer is ignored. */
3522 vec[i].iov_base = 0;
3523 } else {
3524 vec[i].iov_base = lock_user(type, base, len, copy);
3525 /* If the first buffer pointer is bad, this is a fault. But
3526 * subsequent bad buffers will result in a partial write; this
3527 * is realized by filling the vector with null pointers and
3528 * zero lengths. */
3529 if (!vec[i].iov_base) {
3530 if (i == 0) {
3531 err = EFAULT;
3532 goto fail;
3533 } else {
3534 bad_address = true;
3537 if (bad_address) {
3538 len = 0;
3540 if (len > max_len - total_len) {
3541 len = max_len - total_len;
3544 vec[i].iov_len = len;
3545 total_len += len;
3548 unlock_user(target_vec, target_addr, 0);
3549 return vec;
3551 fail:
3552 while (--i >= 0) {
3553 if (tswapal(target_vec[i].iov_len) > 0) {
3554 unlock_user(vec[i].iov_base, tswapal(target_vec[i].iov_base), 0);
3557 unlock_user(target_vec, target_addr, 0);
3558 fail2:
3559 g_free(vec);
3560 errno = err;
3561 return NULL;
3564 static void unlock_iovec(struct iovec *vec, abi_ulong target_addr,
3565 abi_ulong count, int copy)
3567 struct target_iovec *target_vec;
3568 int i;
3570 target_vec = lock_user(VERIFY_READ, target_addr,
3571 count * sizeof(struct target_iovec), 1);
3572 if (target_vec) {
3573 for (i = 0; i < count; i++) {
3574 abi_ulong base = tswapal(target_vec[i].iov_base);
3575 abi_long len = tswapal(target_vec[i].iov_len);
3576 if (len < 0) {
3577 break;
3579 unlock_user(vec[i].iov_base, base, copy ? vec[i].iov_len : 0);
3581 unlock_user(target_vec, target_addr, 0);
3584 g_free(vec);
3587 static inline int target_to_host_sock_type(int *type)
3589 int host_type = 0;
3590 int target_type = *type;
3592 switch (target_type & TARGET_SOCK_TYPE_MASK) {
3593 case TARGET_SOCK_DGRAM:
3594 host_type = SOCK_DGRAM;
3595 break;
3596 case TARGET_SOCK_STREAM:
3597 host_type = SOCK_STREAM;
3598 break;
3599 default:
3600 host_type = target_type & TARGET_SOCK_TYPE_MASK;
3601 break;
3603 if (target_type & TARGET_SOCK_CLOEXEC) {
3604 #if defined(SOCK_CLOEXEC)
3605 host_type |= SOCK_CLOEXEC;
3606 #else
3607 return -TARGET_EINVAL;
3608 #endif
3610 if (target_type & TARGET_SOCK_NONBLOCK) {
3611 #if defined(SOCK_NONBLOCK)
3612 host_type |= SOCK_NONBLOCK;
3613 #elif !defined(O_NONBLOCK)
3614 return -TARGET_EINVAL;
3615 #endif
3617 *type = host_type;
3618 return 0;
3621 /* Try to emulate socket type flags after socket creation. */
3622 static int sock_flags_fixup(int fd, int target_type)
3624 #if !defined(SOCK_NONBLOCK) && defined(O_NONBLOCK)
3625 if (target_type & TARGET_SOCK_NONBLOCK) {
3626 int flags = fcntl(fd, F_GETFL);
3627 if (fcntl(fd, F_SETFL, O_NONBLOCK | flags) == -1) {
3628 close(fd);
3629 return -TARGET_EINVAL;
3632 #endif
3633 return fd;
3636 static abi_long packet_target_to_host_sockaddr(void *host_addr,
3637 abi_ulong target_addr,
3638 socklen_t len)
3640 struct sockaddr *addr = host_addr;
3641 struct target_sockaddr *target_saddr;
3643 target_saddr = lock_user(VERIFY_READ, target_addr, len, 1);
3644 if (!target_saddr) {
3645 return -TARGET_EFAULT;
3648 memcpy(addr, target_saddr, len);
3649 addr->sa_family = tswap16(target_saddr->sa_family);
3650 /* spkt_protocol is big-endian */
3652 unlock_user(target_saddr, target_addr, 0);
3653 return 0;
3656 static TargetFdTrans target_packet_trans = {
3657 .target_to_host_addr = packet_target_to_host_sockaddr,
3660 #ifdef CONFIG_RTNETLINK
3661 static abi_long netlink_route_target_to_host(void *buf, size_t len)
3663 abi_long ret;
3665 ret = target_to_host_nlmsg_route(buf, len);
3666 if (ret < 0) {
3667 return ret;
3670 return len;
3673 static abi_long netlink_route_host_to_target(void *buf, size_t len)
3675 abi_long ret;
3677 ret = host_to_target_nlmsg_route(buf, len);
3678 if (ret < 0) {
3679 return ret;
3682 return len;
3685 static TargetFdTrans target_netlink_route_trans = {
3686 .target_to_host_data = netlink_route_target_to_host,
3687 .host_to_target_data = netlink_route_host_to_target,
3689 #endif /* CONFIG_RTNETLINK */
3691 static abi_long netlink_audit_target_to_host(void *buf, size_t len)
3693 abi_long ret;
3695 ret = target_to_host_nlmsg_audit(buf, len);
3696 if (ret < 0) {
3697 return ret;
3700 return len;
3703 static abi_long netlink_audit_host_to_target(void *buf, size_t len)
3705 abi_long ret;
3707 ret = host_to_target_nlmsg_audit(buf, len);
3708 if (ret < 0) {
3709 return ret;
3712 return len;
3715 static TargetFdTrans target_netlink_audit_trans = {
3716 .target_to_host_data = netlink_audit_target_to_host,
3717 .host_to_target_data = netlink_audit_host_to_target,
3720 /* do_socket() Must return target values and target errnos. */
3721 static abi_long do_socket(int domain, int type, int protocol)
3723 int target_type = type;
3724 int ret;
3726 ret = target_to_host_sock_type(&type);
3727 if (ret) {
3728 return ret;
3731 if (domain == PF_NETLINK && !(
3732 #ifdef CONFIG_RTNETLINK
3733 protocol == NETLINK_ROUTE ||
3734 #endif
3735 protocol == NETLINK_KOBJECT_UEVENT ||
3736 protocol == NETLINK_AUDIT)) {
3737 return -EPFNOSUPPORT;
3740 if (domain == AF_PACKET ||
3741 (domain == AF_INET && type == SOCK_PACKET)) {
3742 protocol = tswap16(protocol);
3745 ret = get_errno(socket(domain, type, protocol));
3746 if (ret >= 0) {
3747 ret = sock_flags_fixup(ret, target_type);
3748 if (type == SOCK_PACKET) {
3749 /* Manage an obsolete case :
3750 * if socket type is SOCK_PACKET, bind by name
3752 fd_trans_register(ret, &target_packet_trans);
3753 } else if (domain == PF_NETLINK) {
3754 switch (protocol) {
3755 #ifdef CONFIG_RTNETLINK
3756 case NETLINK_ROUTE:
3757 fd_trans_register(ret, &target_netlink_route_trans);
3758 break;
3759 #endif
3760 case NETLINK_KOBJECT_UEVENT:
3761 /* nothing to do: messages are strings */
3762 break;
3763 case NETLINK_AUDIT:
3764 fd_trans_register(ret, &target_netlink_audit_trans);
3765 break;
3766 default:
3767 g_assert_not_reached();
3771 return ret;
3774 /* do_bind() Must return target values and target errnos. */
3775 static abi_long do_bind(int sockfd, abi_ulong target_addr,
3776 socklen_t addrlen)
3778 void *addr;
3779 abi_long ret;
3781 if ((int)addrlen < 0) {
3782 return -TARGET_EINVAL;
3785 addr = alloca(addrlen+1);
3787 ret = target_to_host_sockaddr(sockfd, addr, target_addr, addrlen);
3788 if (ret)
3789 return ret;
3791 return get_errno(bind(sockfd, addr, addrlen));
3794 /* do_connect() Must return target values and target errnos. */
3795 static abi_long do_connect(int sockfd, abi_ulong target_addr,
3796 socklen_t addrlen)
3798 void *addr;
3799 abi_long ret;
3801 if ((int)addrlen < 0) {
3802 return -TARGET_EINVAL;
3805 addr = alloca(addrlen+1);
3807 ret = target_to_host_sockaddr(sockfd, addr, target_addr, addrlen);
3808 if (ret)
3809 return ret;
3811 return get_errno(safe_connect(sockfd, addr, addrlen));
3814 /* do_sendrecvmsg_locked() Must return target values and target errnos. */
3815 static abi_long do_sendrecvmsg_locked(int fd, struct target_msghdr *msgp,
3816 int flags, int send)
3818 abi_long ret, len;
3819 struct msghdr msg;
3820 abi_ulong count;
3821 struct iovec *vec;
3822 abi_ulong target_vec;
3824 if (msgp->msg_name) {
3825 msg.msg_namelen = tswap32(msgp->msg_namelen);
3826 msg.msg_name = alloca(msg.msg_namelen+1);
3827 ret = target_to_host_sockaddr(fd, msg.msg_name,
3828 tswapal(msgp->msg_name),
3829 msg.msg_namelen);
3830 if (ret == -TARGET_EFAULT) {
3831 /* For connected sockets msg_name and msg_namelen must
3832 * be ignored, so returning EFAULT immediately is wrong.
3833 * Instead, pass a bad msg_name to the host kernel, and
3834 * let it decide whether to return EFAULT or not.
3836 msg.msg_name = (void *)-1;
3837 } else if (ret) {
3838 goto out2;
3840 } else {
3841 msg.msg_name = NULL;
3842 msg.msg_namelen = 0;
3844 msg.msg_controllen = 2 * tswapal(msgp->msg_controllen);
3845 msg.msg_control = alloca(msg.msg_controllen);
3846 msg.msg_flags = tswap32(msgp->msg_flags);
3848 count = tswapal(msgp->msg_iovlen);
3849 target_vec = tswapal(msgp->msg_iov);
3851 if (count > IOV_MAX) {
3852 /* sendrcvmsg returns a different errno for this condition than
3853 * readv/writev, so we must catch it here before lock_iovec() does.
3855 ret = -TARGET_EMSGSIZE;
3856 goto out2;
3859 vec = lock_iovec(send ? VERIFY_READ : VERIFY_WRITE,
3860 target_vec, count, send);
3861 if (vec == NULL) {
3862 ret = -host_to_target_errno(errno);
3863 goto out2;
3865 msg.msg_iovlen = count;
3866 msg.msg_iov = vec;
3868 if (send) {
3869 if (fd_trans_target_to_host_data(fd)) {
3870 void *host_msg;
3872 host_msg = g_malloc(msg.msg_iov->iov_len);
3873 memcpy(host_msg, msg.msg_iov->iov_base, msg.msg_iov->iov_len);
3874 ret = fd_trans_target_to_host_data(fd)(host_msg,
3875 msg.msg_iov->iov_len);
3876 if (ret >= 0) {
3877 msg.msg_iov->iov_base = host_msg;
3878 ret = get_errno(safe_sendmsg(fd, &msg, flags));
3880 g_free(host_msg);
3881 } else {
3882 ret = target_to_host_cmsg(&msg, msgp);
3883 if (ret == 0) {
3884 ret = get_errno(safe_sendmsg(fd, &msg, flags));
3887 } else {
3888 ret = get_errno(safe_recvmsg(fd, &msg, flags));
3889 if (!is_error(ret)) {
3890 len = ret;
3891 if (fd_trans_host_to_target_data(fd)) {
3892 ret = fd_trans_host_to_target_data(fd)(msg.msg_iov->iov_base,
3893 len);
3894 } else {
3895 ret = host_to_target_cmsg(msgp, &msg);
3897 if (!is_error(ret)) {
3898 msgp->msg_namelen = tswap32(msg.msg_namelen);
3899 if (msg.msg_name != NULL && msg.msg_name != (void *)-1) {
3900 ret = host_to_target_sockaddr(tswapal(msgp->msg_name),
3901 msg.msg_name, msg.msg_namelen);
3902 if (ret) {
3903 goto out;
3907 ret = len;
3912 out:
3913 unlock_iovec(vec, target_vec, count, !send);
3914 out2:
3915 return ret;
3918 static abi_long do_sendrecvmsg(int fd, abi_ulong target_msg,
3919 int flags, int send)
3921 abi_long ret;
3922 struct target_msghdr *msgp;
3924 if (!lock_user_struct(send ? VERIFY_READ : VERIFY_WRITE,
3925 msgp,
3926 target_msg,
3927 send ? 1 : 0)) {
3928 return -TARGET_EFAULT;
3930 ret = do_sendrecvmsg_locked(fd, msgp, flags, send);
3931 unlock_user_struct(msgp, target_msg, send ? 0 : 1);
3932 return ret;
3935 /* We don't rely on the C library to have sendmmsg/recvmmsg support,
3936 * so it might not have this *mmsg-specific flag either.
3938 #ifndef MSG_WAITFORONE
3939 #define MSG_WAITFORONE 0x10000
3940 #endif
3942 static abi_long do_sendrecvmmsg(int fd, abi_ulong target_msgvec,
3943 unsigned int vlen, unsigned int flags,
3944 int send)
3946 struct target_mmsghdr *mmsgp;
3947 abi_long ret = 0;
3948 int i;
3950 if (vlen > UIO_MAXIOV) {
3951 vlen = UIO_MAXIOV;
3954 mmsgp = lock_user(VERIFY_WRITE, target_msgvec, sizeof(*mmsgp) * vlen, 1);
3955 if (!mmsgp) {
3956 return -TARGET_EFAULT;
3959 for (i = 0; i < vlen; i++) {
3960 ret = do_sendrecvmsg_locked(fd, &mmsgp[i].msg_hdr, flags, send);
3961 if (is_error(ret)) {
3962 break;
3964 mmsgp[i].msg_len = tswap32(ret);
3965 /* MSG_WAITFORONE turns on MSG_DONTWAIT after one packet */
3966 if (flags & MSG_WAITFORONE) {
3967 flags |= MSG_DONTWAIT;
3971 unlock_user(mmsgp, target_msgvec, sizeof(*mmsgp) * i);
3973 /* Return number of datagrams sent if we sent any at all;
3974 * otherwise return the error.
3976 if (i) {
3977 return i;
3979 return ret;
3982 /* do_accept4() Must return target values and target errnos. */
3983 static abi_long do_accept4(int fd, abi_ulong target_addr,
3984 abi_ulong target_addrlen_addr, int flags)
3986 socklen_t addrlen;
3987 void *addr;
3988 abi_long ret;
3989 int host_flags;
3991 host_flags = target_to_host_bitmask(flags, fcntl_flags_tbl);
3993 if (target_addr == 0) {
3994 return get_errno(safe_accept4(fd, NULL, NULL, host_flags));
3997 /* linux returns EINVAL if addrlen pointer is invalid */
3998 if (get_user_u32(addrlen, target_addrlen_addr))
3999 return -TARGET_EINVAL;
4001 if ((int)addrlen < 0) {
4002 return -TARGET_EINVAL;
4005 if (!access_ok(VERIFY_WRITE, target_addr, addrlen))
4006 return -TARGET_EINVAL;
4008 addr = alloca(addrlen);
4010 ret = get_errno(safe_accept4(fd, addr, &addrlen, host_flags));
4011 if (!is_error(ret)) {
4012 host_to_target_sockaddr(target_addr, addr, addrlen);
4013 if (put_user_u32(addrlen, target_addrlen_addr))
4014 ret = -TARGET_EFAULT;
4016 return ret;
4019 /* do_getpeername() Must return target values and target errnos. */
4020 static abi_long do_getpeername(int fd, abi_ulong target_addr,
4021 abi_ulong target_addrlen_addr)
4023 socklen_t addrlen;
4024 void *addr;
4025 abi_long ret;
4027 if (get_user_u32(addrlen, target_addrlen_addr))
4028 return -TARGET_EFAULT;
4030 if ((int)addrlen < 0) {
4031 return -TARGET_EINVAL;
4034 if (!access_ok(VERIFY_WRITE, target_addr, addrlen))
4035 return -TARGET_EFAULT;
4037 addr = alloca(addrlen);
4039 ret = get_errno(getpeername(fd, addr, &addrlen));
4040 if (!is_error(ret)) {
4041 host_to_target_sockaddr(target_addr, addr, addrlen);
4042 if (put_user_u32(addrlen, target_addrlen_addr))
4043 ret = -TARGET_EFAULT;
4045 return ret;
4048 /* do_getsockname() Must return target values and target errnos. */
4049 static abi_long do_getsockname(int fd, abi_ulong target_addr,
4050 abi_ulong target_addrlen_addr)
4052 socklen_t addrlen;
4053 void *addr;
4054 abi_long ret;
4056 if (get_user_u32(addrlen, target_addrlen_addr))
4057 return -TARGET_EFAULT;
4059 if ((int)addrlen < 0) {
4060 return -TARGET_EINVAL;
4063 if (!access_ok(VERIFY_WRITE, target_addr, addrlen))
4064 return -TARGET_EFAULT;
4066 addr = alloca(addrlen);
4068 ret = get_errno(getsockname(fd, addr, &addrlen));
4069 if (!is_error(ret)) {
4070 host_to_target_sockaddr(target_addr, addr, addrlen);
4071 if (put_user_u32(addrlen, target_addrlen_addr))
4072 ret = -TARGET_EFAULT;
4074 return ret;
4077 /* do_socketpair() Must return target values and target errnos. */
4078 static abi_long do_socketpair(int domain, int type, int protocol,
4079 abi_ulong target_tab_addr)
4081 int tab[2];
4082 abi_long ret;
4084 target_to_host_sock_type(&type);
4086 ret = get_errno(socketpair(domain, type, protocol, tab));
4087 if (!is_error(ret)) {
4088 if (put_user_s32(tab[0], target_tab_addr)
4089 || put_user_s32(tab[1], target_tab_addr + sizeof(tab[0])))
4090 ret = -TARGET_EFAULT;
4092 return ret;
4095 /* do_sendto() Must return target values and target errnos. */
4096 static abi_long do_sendto(int fd, abi_ulong msg, size_t len, int flags,
4097 abi_ulong target_addr, socklen_t addrlen)
4099 void *addr;
4100 void *host_msg;
4101 void *copy_msg = NULL;
4102 abi_long ret;
4104 if ((int)addrlen < 0) {
4105 return -TARGET_EINVAL;
4108 host_msg = lock_user(VERIFY_READ, msg, len, 1);
4109 if (!host_msg)
4110 return -TARGET_EFAULT;
4111 if (fd_trans_target_to_host_data(fd)) {
4112 copy_msg = host_msg;
4113 host_msg = g_malloc(len);
4114 memcpy(host_msg, copy_msg, len);
4115 ret = fd_trans_target_to_host_data(fd)(host_msg, len);
4116 if (ret < 0) {
4117 goto fail;
4120 if (target_addr) {
4121 addr = alloca(addrlen+1);
4122 ret = target_to_host_sockaddr(fd, addr, target_addr, addrlen);
4123 if (ret) {
4124 goto fail;
4126 ret = get_errno(safe_sendto(fd, host_msg, len, flags, addr, addrlen));
4127 } else {
4128 ret = get_errno(safe_sendto(fd, host_msg, len, flags, NULL, 0));
4130 fail:
4131 if (copy_msg) {
4132 g_free(host_msg);
4133 host_msg = copy_msg;
4135 unlock_user(host_msg, msg, 0);
4136 return ret;
4139 /* do_recvfrom() Must return target values and target errnos. */
4140 static abi_long do_recvfrom(int fd, abi_ulong msg, size_t len, int flags,
4141 abi_ulong target_addr,
4142 abi_ulong target_addrlen)
4144 socklen_t addrlen;
4145 void *addr;
4146 void *host_msg;
4147 abi_long ret;
4149 host_msg = lock_user(VERIFY_WRITE, msg, len, 0);
4150 if (!host_msg)
4151 return -TARGET_EFAULT;
4152 if (target_addr) {
4153 if (get_user_u32(addrlen, target_addrlen)) {
4154 ret = -TARGET_EFAULT;
4155 goto fail;
4157 if ((int)addrlen < 0) {
4158 ret = -TARGET_EINVAL;
4159 goto fail;
4161 addr = alloca(addrlen);
4162 ret = get_errno(safe_recvfrom(fd, host_msg, len, flags,
4163 addr, &addrlen));
4164 } else {
4165 addr = NULL; /* To keep compiler quiet. */
4166 ret = get_errno(safe_recvfrom(fd, host_msg, len, flags, NULL, 0));
4168 if (!is_error(ret)) {
4169 if (fd_trans_host_to_target_data(fd)) {
4170 ret = fd_trans_host_to_target_data(fd)(host_msg, ret);
4172 if (target_addr) {
4173 host_to_target_sockaddr(target_addr, addr, addrlen);
4174 if (put_user_u32(addrlen, target_addrlen)) {
4175 ret = -TARGET_EFAULT;
4176 goto fail;
4179 unlock_user(host_msg, msg, len);
4180 } else {
4181 fail:
4182 unlock_user(host_msg, msg, 0);
4184 return ret;
4187 #ifdef TARGET_NR_socketcall
4188 /* do_socketcall() must return target values and target errnos. */
4189 static abi_long do_socketcall(int num, abi_ulong vptr)
4191 static const unsigned nargs[] = { /* number of arguments per operation */
4192 [TARGET_SYS_SOCKET] = 3, /* domain, type, protocol */
4193 [TARGET_SYS_BIND] = 3, /* fd, addr, addrlen */
4194 [TARGET_SYS_CONNECT] = 3, /* fd, addr, addrlen */
4195 [TARGET_SYS_LISTEN] = 2, /* fd, backlog */
4196 [TARGET_SYS_ACCEPT] = 3, /* fd, addr, addrlen */
4197 [TARGET_SYS_GETSOCKNAME] = 3, /* fd, addr, addrlen */
4198 [TARGET_SYS_GETPEERNAME] = 3, /* fd, addr, addrlen */
4199 [TARGET_SYS_SOCKETPAIR] = 4, /* domain, type, protocol, tab */
4200 [TARGET_SYS_SEND] = 4, /* fd, msg, len, flags */
4201 [TARGET_SYS_RECV] = 4, /* fd, msg, len, flags */
4202 [TARGET_SYS_SENDTO] = 6, /* fd, msg, len, flags, addr, addrlen */
4203 [TARGET_SYS_RECVFROM] = 6, /* fd, msg, len, flags, addr, addrlen */
4204 [TARGET_SYS_SHUTDOWN] = 2, /* fd, how */
4205 [TARGET_SYS_SETSOCKOPT] = 5, /* fd, level, optname, optval, optlen */
4206 [TARGET_SYS_GETSOCKOPT] = 5, /* fd, level, optname, optval, optlen */
4207 [TARGET_SYS_SENDMSG] = 3, /* fd, msg, flags */
4208 [TARGET_SYS_RECVMSG] = 3, /* fd, msg, flags */
4209 [TARGET_SYS_ACCEPT4] = 4, /* fd, addr, addrlen, flags */
4210 [TARGET_SYS_RECVMMSG] = 4, /* fd, msgvec, vlen, flags */
4211 [TARGET_SYS_SENDMMSG] = 4, /* fd, msgvec, vlen, flags */
4213 abi_long a[6]; /* max 6 args */
4214 unsigned i;
4216 /* check the range of the first argument num */
4217 /* (TARGET_SYS_SENDMMSG is the highest among TARGET_SYS_xxx) */
4218 if (num < 1 || num > TARGET_SYS_SENDMMSG) {
4219 return -TARGET_EINVAL;
4221 /* ensure we have space for args */
4222 if (nargs[num] > ARRAY_SIZE(a)) {
4223 return -TARGET_EINVAL;
4225 /* collect the arguments in a[] according to nargs[] */
4226 for (i = 0; i < nargs[num]; ++i) {
4227 if (get_user_ual(a[i], vptr + i * sizeof(abi_long)) != 0) {
4228 return -TARGET_EFAULT;
4231 /* now when we have the args, invoke the appropriate underlying function */
4232 switch (num) {
4233 case TARGET_SYS_SOCKET: /* domain, type, protocol */
4234 return do_socket(a[0], a[1], a[2]);
4235 case TARGET_SYS_BIND: /* sockfd, addr, addrlen */
4236 return do_bind(a[0], a[1], a[2]);
4237 case TARGET_SYS_CONNECT: /* sockfd, addr, addrlen */
4238 return do_connect(a[0], a[1], a[2]);
4239 case TARGET_SYS_LISTEN: /* sockfd, backlog */
4240 return get_errno(listen(a[0], a[1]));
4241 case TARGET_SYS_ACCEPT: /* sockfd, addr, addrlen */
4242 return do_accept4(a[0], a[1], a[2], 0);
4243 case TARGET_SYS_GETSOCKNAME: /* sockfd, addr, addrlen */
4244 return do_getsockname(a[0], a[1], a[2]);
4245 case TARGET_SYS_GETPEERNAME: /* sockfd, addr, addrlen */
4246 return do_getpeername(a[0], a[1], a[2]);
4247 case TARGET_SYS_SOCKETPAIR: /* domain, type, protocol, tab */
4248 return do_socketpair(a[0], a[1], a[2], a[3]);
4249 case TARGET_SYS_SEND: /* sockfd, msg, len, flags */
4250 return do_sendto(a[0], a[1], a[2], a[3], 0, 0);
4251 case TARGET_SYS_RECV: /* sockfd, msg, len, flags */
4252 return do_recvfrom(a[0], a[1], a[2], a[3], 0, 0);
4253 case TARGET_SYS_SENDTO: /* sockfd, msg, len, flags, addr, addrlen */
4254 return do_sendto(a[0], a[1], a[2], a[3], a[4], a[5]);
4255 case TARGET_SYS_RECVFROM: /* sockfd, msg, len, flags, addr, addrlen */
4256 return do_recvfrom(a[0], a[1], a[2], a[3], a[4], a[5]);
4257 case TARGET_SYS_SHUTDOWN: /* sockfd, how */
4258 return get_errno(shutdown(a[0], a[1]));
4259 case TARGET_SYS_SETSOCKOPT: /* sockfd, level, optname, optval, optlen */
4260 return do_setsockopt(a[0], a[1], a[2], a[3], a[4]);
4261 case TARGET_SYS_GETSOCKOPT: /* sockfd, level, optname, optval, optlen */
4262 return do_getsockopt(a[0], a[1], a[2], a[3], a[4]);
4263 case TARGET_SYS_SENDMSG: /* sockfd, msg, flags */
4264 return do_sendrecvmsg(a[0], a[1], a[2], 1);
4265 case TARGET_SYS_RECVMSG: /* sockfd, msg, flags */
4266 return do_sendrecvmsg(a[0], a[1], a[2], 0);
4267 case TARGET_SYS_ACCEPT4: /* sockfd, addr, addrlen, flags */
4268 return do_accept4(a[0], a[1], a[2], a[3]);
4269 case TARGET_SYS_RECVMMSG: /* sockfd, msgvec, vlen, flags */
4270 return do_sendrecvmmsg(a[0], a[1], a[2], a[3], 0);
4271 case TARGET_SYS_SENDMMSG: /* sockfd, msgvec, vlen, flags */
4272 return do_sendrecvmmsg(a[0], a[1], a[2], a[3], 1);
4273 default:
4274 gemu_log("Unsupported socketcall: %d\n", num);
4275 return -TARGET_EINVAL;
4278 #endif
4280 #define N_SHM_REGIONS 32
4282 static struct shm_region {
4283 abi_ulong start;
4284 abi_ulong size;
4285 bool in_use;
4286 } shm_regions[N_SHM_REGIONS];
4288 #ifndef TARGET_SEMID64_DS
4289 /* asm-generic version of this struct */
4290 struct target_semid64_ds
4292 struct target_ipc_perm sem_perm;
4293 abi_ulong sem_otime;
4294 #if TARGET_ABI_BITS == 32
4295 abi_ulong __unused1;
4296 #endif
4297 abi_ulong sem_ctime;
4298 #if TARGET_ABI_BITS == 32
4299 abi_ulong __unused2;
4300 #endif
4301 abi_ulong sem_nsems;
4302 abi_ulong __unused3;
4303 abi_ulong __unused4;
4305 #endif
4307 static inline abi_long target_to_host_ipc_perm(struct ipc_perm *host_ip,
4308 abi_ulong target_addr)
4310 struct target_ipc_perm *target_ip;
4311 struct target_semid64_ds *target_sd;
4313 if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
4314 return -TARGET_EFAULT;
4315 target_ip = &(target_sd->sem_perm);
4316 host_ip->__key = tswap32(target_ip->__key);
4317 host_ip->uid = tswap32(target_ip->uid);
4318 host_ip->gid = tswap32(target_ip->gid);
4319 host_ip->cuid = tswap32(target_ip->cuid);
4320 host_ip->cgid = tswap32(target_ip->cgid);
4321 #if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_PPC)
4322 host_ip->mode = tswap32(target_ip->mode);
4323 #else
4324 host_ip->mode = tswap16(target_ip->mode);
4325 #endif
4326 #if defined(TARGET_PPC)
4327 host_ip->__seq = tswap32(target_ip->__seq);
4328 #else
4329 host_ip->__seq = tswap16(target_ip->__seq);
4330 #endif
4331 unlock_user_struct(target_sd, target_addr, 0);
4332 return 0;
4335 static inline abi_long host_to_target_ipc_perm(abi_ulong target_addr,
4336 struct ipc_perm *host_ip)
4338 struct target_ipc_perm *target_ip;
4339 struct target_semid64_ds *target_sd;
4341 if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
4342 return -TARGET_EFAULT;
4343 target_ip = &(target_sd->sem_perm);
4344 target_ip->__key = tswap32(host_ip->__key);
4345 target_ip->uid = tswap32(host_ip->uid);
4346 target_ip->gid = tswap32(host_ip->gid);
4347 target_ip->cuid = tswap32(host_ip->cuid);
4348 target_ip->cgid = tswap32(host_ip->cgid);
4349 #if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_PPC)
4350 target_ip->mode = tswap32(host_ip->mode);
4351 #else
4352 target_ip->mode = tswap16(host_ip->mode);
4353 #endif
4354 #if defined(TARGET_PPC)
4355 target_ip->__seq = tswap32(host_ip->__seq);
4356 #else
4357 target_ip->__seq = tswap16(host_ip->__seq);
4358 #endif
4359 unlock_user_struct(target_sd, target_addr, 1);
4360 return 0;
4363 static inline abi_long target_to_host_semid_ds(struct semid_ds *host_sd,
4364 abi_ulong target_addr)
4366 struct target_semid64_ds *target_sd;
4368 if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
4369 return -TARGET_EFAULT;
4370 if (target_to_host_ipc_perm(&(host_sd->sem_perm),target_addr))
4371 return -TARGET_EFAULT;
4372 host_sd->sem_nsems = tswapal(target_sd->sem_nsems);
4373 host_sd->sem_otime = tswapal(target_sd->sem_otime);
4374 host_sd->sem_ctime = tswapal(target_sd->sem_ctime);
4375 unlock_user_struct(target_sd, target_addr, 0);
4376 return 0;
4379 static inline abi_long host_to_target_semid_ds(abi_ulong target_addr,
4380 struct semid_ds *host_sd)
4382 struct target_semid64_ds *target_sd;
4384 if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
4385 return -TARGET_EFAULT;
4386 if (host_to_target_ipc_perm(target_addr,&(host_sd->sem_perm)))
4387 return -TARGET_EFAULT;
4388 target_sd->sem_nsems = tswapal(host_sd->sem_nsems);
4389 target_sd->sem_otime = tswapal(host_sd->sem_otime);
4390 target_sd->sem_ctime = tswapal(host_sd->sem_ctime);
4391 unlock_user_struct(target_sd, target_addr, 1);
4392 return 0;
4395 struct target_seminfo {
4396 int semmap;
4397 int semmni;
4398 int semmns;
4399 int semmnu;
4400 int semmsl;
4401 int semopm;
4402 int semume;
4403 int semusz;
4404 int semvmx;
4405 int semaem;
4408 static inline abi_long host_to_target_seminfo(abi_ulong target_addr,
4409 struct seminfo *host_seminfo)
4411 struct target_seminfo *target_seminfo;
4412 if (!lock_user_struct(VERIFY_WRITE, target_seminfo, target_addr, 0))
4413 return -TARGET_EFAULT;
4414 __put_user(host_seminfo->semmap, &target_seminfo->semmap);
4415 __put_user(host_seminfo->semmni, &target_seminfo->semmni);
4416 __put_user(host_seminfo->semmns, &target_seminfo->semmns);
4417 __put_user(host_seminfo->semmnu, &target_seminfo->semmnu);
4418 __put_user(host_seminfo->semmsl, &target_seminfo->semmsl);
4419 __put_user(host_seminfo->semopm, &target_seminfo->semopm);
4420 __put_user(host_seminfo->semume, &target_seminfo->semume);
4421 __put_user(host_seminfo->semusz, &target_seminfo->semusz);
4422 __put_user(host_seminfo->semvmx, &target_seminfo->semvmx);
4423 __put_user(host_seminfo->semaem, &target_seminfo->semaem);
4424 unlock_user_struct(target_seminfo, target_addr, 1);
4425 return 0;
4428 union semun {
4429 int val;
4430 struct semid_ds *buf;
4431 unsigned short *array;
4432 struct seminfo *__buf;
4435 union target_semun {
4436 int val;
4437 abi_ulong buf;
4438 abi_ulong array;
4439 abi_ulong __buf;
4442 static inline abi_long target_to_host_semarray(int semid, unsigned short **host_array,
4443 abi_ulong target_addr)
4445 int nsems;
4446 unsigned short *array;
4447 union semun semun;
4448 struct semid_ds semid_ds;
4449 int i, ret;
4451 semun.buf = &semid_ds;
4453 ret = semctl(semid, 0, IPC_STAT, semun);
4454 if (ret == -1)
4455 return get_errno(ret);
4457 nsems = semid_ds.sem_nsems;
4459 *host_array = g_try_new(unsigned short, nsems);
4460 if (!*host_array) {
4461 return -TARGET_ENOMEM;
4463 array = lock_user(VERIFY_READ, target_addr,
4464 nsems*sizeof(unsigned short), 1);
4465 if (!array) {
4466 g_free(*host_array);
4467 return -TARGET_EFAULT;
4470 for(i=0; i<nsems; i++) {
4471 __get_user((*host_array)[i], &array[i]);
4473 unlock_user(array, target_addr, 0);
4475 return 0;
4478 static inline abi_long host_to_target_semarray(int semid, abi_ulong target_addr,
4479 unsigned short **host_array)
4481 int nsems;
4482 unsigned short *array;
4483 union semun semun;
4484 struct semid_ds semid_ds;
4485 int i, ret;
4487 semun.buf = &semid_ds;
4489 ret = semctl(semid, 0, IPC_STAT, semun);
4490 if (ret == -1)
4491 return get_errno(ret);
4493 nsems = semid_ds.sem_nsems;
4495 array = lock_user(VERIFY_WRITE, target_addr,
4496 nsems*sizeof(unsigned short), 0);
4497 if (!array)
4498 return -TARGET_EFAULT;
4500 for(i=0; i<nsems; i++) {
4501 __put_user((*host_array)[i], &array[i]);
4503 g_free(*host_array);
4504 unlock_user(array, target_addr, 1);
4506 return 0;
4509 static inline abi_long do_semctl(int semid, int semnum, int cmd,
4510 abi_ulong target_arg)
4512 union target_semun target_su = { .buf = target_arg };
4513 union semun arg;
4514 struct semid_ds dsarg;
4515 unsigned short *array = NULL;
4516 struct seminfo seminfo;
4517 abi_long ret = -TARGET_EINVAL;
4518 abi_long err;
4519 cmd &= 0xff;
4521 switch( cmd ) {
4522 case GETVAL:
4523 case SETVAL:
4524 /* In 64 bit cross-endian situations, we will erroneously pick up
4525 * the wrong half of the union for the "val" element. To rectify
4526 * this, the entire 8-byte structure is byteswapped, followed by
4527 * a swap of the 4 byte val field. In other cases, the data is
4528 * already in proper host byte order. */
4529 if (sizeof(target_su.val) != (sizeof(target_su.buf))) {
4530 target_su.buf = tswapal(target_su.buf);
4531 arg.val = tswap32(target_su.val);
4532 } else {
4533 arg.val = target_su.val;
4535 ret = get_errno(semctl(semid, semnum, cmd, arg));
4536 break;
4537 case GETALL:
4538 case SETALL:
4539 err = target_to_host_semarray(semid, &array, target_su.array);
4540 if (err)
4541 return err;
4542 arg.array = array;
4543 ret = get_errno(semctl(semid, semnum, cmd, arg));
4544 err = host_to_target_semarray(semid, target_su.array, &array);
4545 if (err)
4546 return err;
4547 break;
4548 case IPC_STAT:
4549 case IPC_SET:
4550 case SEM_STAT:
4551 err = target_to_host_semid_ds(&dsarg, target_su.buf);
4552 if (err)
4553 return err;
4554 arg.buf = &dsarg;
4555 ret = get_errno(semctl(semid, semnum, cmd, arg));
4556 err = host_to_target_semid_ds(target_su.buf, &dsarg);
4557 if (err)
4558 return err;
4559 break;
4560 case IPC_INFO:
4561 case SEM_INFO:
4562 arg.__buf = &seminfo;
4563 ret = get_errno(semctl(semid, semnum, cmd, arg));
4564 err = host_to_target_seminfo(target_su.__buf, &seminfo);
4565 if (err)
4566 return err;
4567 break;
4568 case IPC_RMID:
4569 case GETPID:
4570 case GETNCNT:
4571 case GETZCNT:
4572 ret = get_errno(semctl(semid, semnum, cmd, NULL));
4573 break;
4576 return ret;
4579 struct target_sembuf {
4580 unsigned short sem_num;
4581 short sem_op;
4582 short sem_flg;
4585 static inline abi_long target_to_host_sembuf(struct sembuf *host_sembuf,
4586 abi_ulong target_addr,
4587 unsigned nsops)
4589 struct target_sembuf *target_sembuf;
4590 int i;
4592 target_sembuf = lock_user(VERIFY_READ, target_addr,
4593 nsops*sizeof(struct target_sembuf), 1);
4594 if (!target_sembuf)
4595 return -TARGET_EFAULT;
4597 for(i=0; i<nsops; i++) {
4598 __get_user(host_sembuf[i].sem_num, &target_sembuf[i].sem_num);
4599 __get_user(host_sembuf[i].sem_op, &target_sembuf[i].sem_op);
4600 __get_user(host_sembuf[i].sem_flg, &target_sembuf[i].sem_flg);
4603 unlock_user(target_sembuf, target_addr, 0);
4605 return 0;
4608 static inline abi_long do_semop(int semid, abi_long ptr, unsigned nsops)
4610 struct sembuf sops[nsops];
4612 if (target_to_host_sembuf(sops, ptr, nsops))
4613 return -TARGET_EFAULT;
4615 return get_errno(safe_semtimedop(semid, sops, nsops, NULL));
4618 struct target_msqid_ds
4620 struct target_ipc_perm msg_perm;
4621 abi_ulong msg_stime;
4622 #if TARGET_ABI_BITS == 32
4623 abi_ulong __unused1;
4624 #endif
4625 abi_ulong msg_rtime;
4626 #if TARGET_ABI_BITS == 32
4627 abi_ulong __unused2;
4628 #endif
4629 abi_ulong msg_ctime;
4630 #if TARGET_ABI_BITS == 32
4631 abi_ulong __unused3;
4632 #endif
4633 abi_ulong __msg_cbytes;
4634 abi_ulong msg_qnum;
4635 abi_ulong msg_qbytes;
4636 abi_ulong msg_lspid;
4637 abi_ulong msg_lrpid;
4638 abi_ulong __unused4;
4639 abi_ulong __unused5;
4642 static inline abi_long target_to_host_msqid_ds(struct msqid_ds *host_md,
4643 abi_ulong target_addr)
4645 struct target_msqid_ds *target_md;
4647 if (!lock_user_struct(VERIFY_READ, target_md, target_addr, 1))
4648 return -TARGET_EFAULT;
4649 if (target_to_host_ipc_perm(&(host_md->msg_perm),target_addr))
4650 return -TARGET_EFAULT;
4651 host_md->msg_stime = tswapal(target_md->msg_stime);
4652 host_md->msg_rtime = tswapal(target_md->msg_rtime);
4653 host_md->msg_ctime = tswapal(target_md->msg_ctime);
4654 host_md->__msg_cbytes = tswapal(target_md->__msg_cbytes);
4655 host_md->msg_qnum = tswapal(target_md->msg_qnum);
4656 host_md->msg_qbytes = tswapal(target_md->msg_qbytes);
4657 host_md->msg_lspid = tswapal(target_md->msg_lspid);
4658 host_md->msg_lrpid = tswapal(target_md->msg_lrpid);
4659 unlock_user_struct(target_md, target_addr, 0);
4660 return 0;
4663 static inline abi_long host_to_target_msqid_ds(abi_ulong target_addr,
4664 struct msqid_ds *host_md)
4666 struct target_msqid_ds *target_md;
4668 if (!lock_user_struct(VERIFY_WRITE, target_md, target_addr, 0))
4669 return -TARGET_EFAULT;
4670 if (host_to_target_ipc_perm(target_addr,&(host_md->msg_perm)))
4671 return -TARGET_EFAULT;
4672 target_md->msg_stime = tswapal(host_md->msg_stime);
4673 target_md->msg_rtime = tswapal(host_md->msg_rtime);
4674 target_md->msg_ctime = tswapal(host_md->msg_ctime);
4675 target_md->__msg_cbytes = tswapal(host_md->__msg_cbytes);
4676 target_md->msg_qnum = tswapal(host_md->msg_qnum);
4677 target_md->msg_qbytes = tswapal(host_md->msg_qbytes);
4678 target_md->msg_lspid = tswapal(host_md->msg_lspid);
4679 target_md->msg_lrpid = tswapal(host_md->msg_lrpid);
4680 unlock_user_struct(target_md, target_addr, 1);
4681 return 0;
4684 struct target_msginfo {
4685 int msgpool;
4686 int msgmap;
4687 int msgmax;
4688 int msgmnb;
4689 int msgmni;
4690 int msgssz;
4691 int msgtql;
4692 unsigned short int msgseg;
4695 static inline abi_long host_to_target_msginfo(abi_ulong target_addr,
4696 struct msginfo *host_msginfo)
4698 struct target_msginfo *target_msginfo;
4699 if (!lock_user_struct(VERIFY_WRITE, target_msginfo, target_addr, 0))
4700 return -TARGET_EFAULT;
4701 __put_user(host_msginfo->msgpool, &target_msginfo->msgpool);
4702 __put_user(host_msginfo->msgmap, &target_msginfo->msgmap);
4703 __put_user(host_msginfo->msgmax, &target_msginfo->msgmax);
4704 __put_user(host_msginfo->msgmnb, &target_msginfo->msgmnb);
4705 __put_user(host_msginfo->msgmni, &target_msginfo->msgmni);
4706 __put_user(host_msginfo->msgssz, &target_msginfo->msgssz);
4707 __put_user(host_msginfo->msgtql, &target_msginfo->msgtql);
4708 __put_user(host_msginfo->msgseg, &target_msginfo->msgseg);
4709 unlock_user_struct(target_msginfo, target_addr, 1);
4710 return 0;
4713 static inline abi_long do_msgctl(int msgid, int cmd, abi_long ptr)
4715 struct msqid_ds dsarg;
4716 struct msginfo msginfo;
4717 abi_long ret = -TARGET_EINVAL;
4719 cmd &= 0xff;
4721 switch (cmd) {
4722 case IPC_STAT:
4723 case IPC_SET:
4724 case MSG_STAT:
4725 if (target_to_host_msqid_ds(&dsarg,ptr))
4726 return -TARGET_EFAULT;
4727 ret = get_errno(msgctl(msgid, cmd, &dsarg));
4728 if (host_to_target_msqid_ds(ptr,&dsarg))
4729 return -TARGET_EFAULT;
4730 break;
4731 case IPC_RMID:
4732 ret = get_errno(msgctl(msgid, cmd, NULL));
4733 break;
4734 case IPC_INFO:
4735 case MSG_INFO:
4736 ret = get_errno(msgctl(msgid, cmd, (struct msqid_ds *)&msginfo));
4737 if (host_to_target_msginfo(ptr, &msginfo))
4738 return -TARGET_EFAULT;
4739 break;
4742 return ret;
4745 struct target_msgbuf {
4746 abi_long mtype;
4747 char mtext[1];
4750 static inline abi_long do_msgsnd(int msqid, abi_long msgp,
4751 ssize_t msgsz, int msgflg)
4753 struct target_msgbuf *target_mb;
4754 struct msgbuf *host_mb;
4755 abi_long ret = 0;
4757 if (msgsz < 0) {
4758 return -TARGET_EINVAL;
4761 if (!lock_user_struct(VERIFY_READ, target_mb, msgp, 0))
4762 return -TARGET_EFAULT;
4763 host_mb = g_try_malloc(msgsz + sizeof(long));
4764 if (!host_mb) {
4765 unlock_user_struct(target_mb, msgp, 0);
4766 return -TARGET_ENOMEM;
4768 host_mb->mtype = (abi_long) tswapal(target_mb->mtype);
4769 memcpy(host_mb->mtext, target_mb->mtext, msgsz);
4770 ret = get_errno(safe_msgsnd(msqid, host_mb, msgsz, msgflg));
4771 g_free(host_mb);
4772 unlock_user_struct(target_mb, msgp, 0);
4774 return ret;
4777 static inline abi_long do_msgrcv(int msqid, abi_long msgp,
4778 ssize_t msgsz, abi_long msgtyp,
4779 int msgflg)
4781 struct target_msgbuf *target_mb;
4782 char *target_mtext;
4783 struct msgbuf *host_mb;
4784 abi_long ret = 0;
4786 if (msgsz < 0) {
4787 return -TARGET_EINVAL;
4790 if (!lock_user_struct(VERIFY_WRITE, target_mb, msgp, 0))
4791 return -TARGET_EFAULT;
4793 host_mb = g_try_malloc(msgsz + sizeof(long));
4794 if (!host_mb) {
4795 ret = -TARGET_ENOMEM;
4796 goto end;
4798 ret = get_errno(safe_msgrcv(msqid, host_mb, msgsz, msgtyp, msgflg));
4800 if (ret > 0) {
4801 abi_ulong target_mtext_addr = msgp + sizeof(abi_ulong);
4802 target_mtext = lock_user(VERIFY_WRITE, target_mtext_addr, ret, 0);
4803 if (!target_mtext) {
4804 ret = -TARGET_EFAULT;
4805 goto end;
4807 memcpy(target_mb->mtext, host_mb->mtext, ret);
4808 unlock_user(target_mtext, target_mtext_addr, ret);
4811 target_mb->mtype = tswapal(host_mb->mtype);
4813 end:
4814 if (target_mb)
4815 unlock_user_struct(target_mb, msgp, 1);
4816 g_free(host_mb);
4817 return ret;
4820 static inline abi_long target_to_host_shmid_ds(struct shmid_ds *host_sd,
4821 abi_ulong target_addr)
4823 struct target_shmid_ds *target_sd;
4825 if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
4826 return -TARGET_EFAULT;
4827 if (target_to_host_ipc_perm(&(host_sd->shm_perm), target_addr))
4828 return -TARGET_EFAULT;
4829 __get_user(host_sd->shm_segsz, &target_sd->shm_segsz);
4830 __get_user(host_sd->shm_atime, &target_sd->shm_atime);
4831 __get_user(host_sd->shm_dtime, &target_sd->shm_dtime);
4832 __get_user(host_sd->shm_ctime, &target_sd->shm_ctime);
4833 __get_user(host_sd->shm_cpid, &target_sd->shm_cpid);
4834 __get_user(host_sd->shm_lpid, &target_sd->shm_lpid);
4835 __get_user(host_sd->shm_nattch, &target_sd->shm_nattch);
4836 unlock_user_struct(target_sd, target_addr, 0);
4837 return 0;
4840 static inline abi_long host_to_target_shmid_ds(abi_ulong target_addr,
4841 struct shmid_ds *host_sd)
4843 struct target_shmid_ds *target_sd;
4845 if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
4846 return -TARGET_EFAULT;
4847 if (host_to_target_ipc_perm(target_addr, &(host_sd->shm_perm)))
4848 return -TARGET_EFAULT;
4849 __put_user(host_sd->shm_segsz, &target_sd->shm_segsz);
4850 __put_user(host_sd->shm_atime, &target_sd->shm_atime);
4851 __put_user(host_sd->shm_dtime, &target_sd->shm_dtime);
4852 __put_user(host_sd->shm_ctime, &target_sd->shm_ctime);
4853 __put_user(host_sd->shm_cpid, &target_sd->shm_cpid);
4854 __put_user(host_sd->shm_lpid, &target_sd->shm_lpid);
4855 __put_user(host_sd->shm_nattch, &target_sd->shm_nattch);
4856 unlock_user_struct(target_sd, target_addr, 1);
4857 return 0;
4860 struct target_shminfo {
4861 abi_ulong shmmax;
4862 abi_ulong shmmin;
4863 abi_ulong shmmni;
4864 abi_ulong shmseg;
4865 abi_ulong shmall;
4868 static inline abi_long host_to_target_shminfo(abi_ulong target_addr,
4869 struct shminfo *host_shminfo)
4871 struct target_shminfo *target_shminfo;
4872 if (!lock_user_struct(VERIFY_WRITE, target_shminfo, target_addr, 0))
4873 return -TARGET_EFAULT;
4874 __put_user(host_shminfo->shmmax, &target_shminfo->shmmax);
4875 __put_user(host_shminfo->shmmin, &target_shminfo->shmmin);
4876 __put_user(host_shminfo->shmmni, &target_shminfo->shmmni);
4877 __put_user(host_shminfo->shmseg, &target_shminfo->shmseg);
4878 __put_user(host_shminfo->shmall, &target_shminfo->shmall);
4879 unlock_user_struct(target_shminfo, target_addr, 1);
4880 return 0;
4883 struct target_shm_info {
4884 int used_ids;
4885 abi_ulong shm_tot;
4886 abi_ulong shm_rss;
4887 abi_ulong shm_swp;
4888 abi_ulong swap_attempts;
4889 abi_ulong swap_successes;
4892 static inline abi_long host_to_target_shm_info(abi_ulong target_addr,
4893 struct shm_info *host_shm_info)
4895 struct target_shm_info *target_shm_info;
4896 if (!lock_user_struct(VERIFY_WRITE, target_shm_info, target_addr, 0))
4897 return -TARGET_EFAULT;
4898 __put_user(host_shm_info->used_ids, &target_shm_info->used_ids);
4899 __put_user(host_shm_info->shm_tot, &target_shm_info->shm_tot);
4900 __put_user(host_shm_info->shm_rss, &target_shm_info->shm_rss);
4901 __put_user(host_shm_info->shm_swp, &target_shm_info->shm_swp);
4902 __put_user(host_shm_info->swap_attempts, &target_shm_info->swap_attempts);
4903 __put_user(host_shm_info->swap_successes, &target_shm_info->swap_successes);
4904 unlock_user_struct(target_shm_info, target_addr, 1);
4905 return 0;
4908 static inline abi_long do_shmctl(int shmid, int cmd, abi_long buf)
4910 struct shmid_ds dsarg;
4911 struct shminfo shminfo;
4912 struct shm_info shm_info;
4913 abi_long ret = -TARGET_EINVAL;
4915 cmd &= 0xff;
4917 switch(cmd) {
4918 case IPC_STAT:
4919 case IPC_SET:
4920 case SHM_STAT:
4921 if (target_to_host_shmid_ds(&dsarg, buf))
4922 return -TARGET_EFAULT;
4923 ret = get_errno(shmctl(shmid, cmd, &dsarg));
4924 if (host_to_target_shmid_ds(buf, &dsarg))
4925 return -TARGET_EFAULT;
4926 break;
4927 case IPC_INFO:
4928 ret = get_errno(shmctl(shmid, cmd, (struct shmid_ds *)&shminfo));
4929 if (host_to_target_shminfo(buf, &shminfo))
4930 return -TARGET_EFAULT;
4931 break;
4932 case SHM_INFO:
4933 ret = get_errno(shmctl(shmid, cmd, (struct shmid_ds *)&shm_info));
4934 if (host_to_target_shm_info(buf, &shm_info))
4935 return -TARGET_EFAULT;
4936 break;
4937 case IPC_RMID:
4938 case SHM_LOCK:
4939 case SHM_UNLOCK:
4940 ret = get_errno(shmctl(shmid, cmd, NULL));
4941 break;
4944 return ret;
4947 #ifndef TARGET_FORCE_SHMLBA
4948 /* For most architectures, SHMLBA is the same as the page size;
4949 * some architectures have larger values, in which case they should
4950 * define TARGET_FORCE_SHMLBA and provide a target_shmlba() function.
4951 * This corresponds to the kernel arch code defining __ARCH_FORCE_SHMLBA
4952 * and defining its own value for SHMLBA.
4954 * The kernel also permits SHMLBA to be set by the architecture to a
4955 * value larger than the page size without setting __ARCH_FORCE_SHMLBA;
4956 * this means that addresses are rounded to the large size if
4957 * SHM_RND is set but addresses not aligned to that size are not rejected
4958 * as long as they are at least page-aligned. Since the only architecture
4959 * which uses this is ia64 this code doesn't provide for that oddity.
4961 static inline abi_ulong target_shmlba(CPUArchState *cpu_env)
4963 return TARGET_PAGE_SIZE;
4965 #endif
4967 static inline abi_ulong do_shmat(CPUArchState *cpu_env,
4968 int shmid, abi_ulong shmaddr, int shmflg)
4970 abi_long raddr;
4971 void *host_raddr;
4972 struct shmid_ds shm_info;
4973 int i,ret;
4974 abi_ulong shmlba;
4976 /* find out the length of the shared memory segment */
4977 ret = get_errno(shmctl(shmid, IPC_STAT, &shm_info));
4978 if (is_error(ret)) {
4979 /* can't get length, bail out */
4980 return ret;
4983 shmlba = target_shmlba(cpu_env);
4985 if (shmaddr & (shmlba - 1)) {
4986 if (shmflg & SHM_RND) {
4987 shmaddr &= ~(shmlba - 1);
4988 } else {
4989 return -TARGET_EINVAL;
4992 if (!guest_range_valid(shmaddr, shm_info.shm_segsz)) {
4993 return -TARGET_EINVAL;
4996 mmap_lock();
4998 if (shmaddr)
4999 host_raddr = shmat(shmid, (void *)g2h(shmaddr), shmflg);
5000 else {
5001 abi_ulong mmap_start;
5003 mmap_start = mmap_find_vma(0, shm_info.shm_segsz);
5005 if (mmap_start == -1) {
5006 errno = ENOMEM;
5007 host_raddr = (void *)-1;
5008 } else
5009 host_raddr = shmat(shmid, g2h(mmap_start), shmflg | SHM_REMAP);
5012 if (host_raddr == (void *)-1) {
5013 mmap_unlock();
5014 return get_errno((long)host_raddr);
5016 raddr=h2g((unsigned long)host_raddr);
5018 page_set_flags(raddr, raddr + shm_info.shm_segsz,
5019 PAGE_VALID | PAGE_READ |
5020 ((shmflg & SHM_RDONLY)? 0 : PAGE_WRITE));
5022 for (i = 0; i < N_SHM_REGIONS; i++) {
5023 if (!shm_regions[i].in_use) {
5024 shm_regions[i].in_use = true;
5025 shm_regions[i].start = raddr;
5026 shm_regions[i].size = shm_info.shm_segsz;
5027 break;
5031 mmap_unlock();
5032 return raddr;
5036 static inline abi_long do_shmdt(abi_ulong shmaddr)
5038 int i;
5039 abi_long rv;
5041 mmap_lock();
5043 for (i = 0; i < N_SHM_REGIONS; ++i) {
5044 if (shm_regions[i].in_use && shm_regions[i].start == shmaddr) {
5045 shm_regions[i].in_use = false;
5046 page_set_flags(shmaddr, shmaddr + shm_regions[i].size, 0);
5047 break;
5050 rv = get_errno(shmdt(g2h(shmaddr)));
5052 mmap_unlock();
5054 return rv;
5057 #ifdef TARGET_NR_ipc
5058 /* ??? This only works with linear mappings. */
5059 /* do_ipc() must return target values and target errnos. */
5060 static abi_long do_ipc(CPUArchState *cpu_env,
5061 unsigned int call, abi_long first,
5062 abi_long second, abi_long third,
5063 abi_long ptr, abi_long fifth)
5065 int version;
5066 abi_long ret = 0;
5068 version = call >> 16;
5069 call &= 0xffff;
5071 switch (call) {
5072 case IPCOP_semop:
5073 ret = do_semop(first, ptr, second);
5074 break;
5076 case IPCOP_semget:
5077 ret = get_errno(semget(first, second, third));
5078 break;
5080 case IPCOP_semctl: {
5081 /* The semun argument to semctl is passed by value, so dereference the
5082 * ptr argument. */
5083 abi_ulong atptr;
5084 get_user_ual(atptr, ptr);
5085 ret = do_semctl(first, second, third, atptr);
5086 break;
5089 case IPCOP_msgget:
5090 ret = get_errno(msgget(first, second));
5091 break;
5093 case IPCOP_msgsnd:
5094 ret = do_msgsnd(first, ptr, second, third);
5095 break;
5097 case IPCOP_msgctl:
5098 ret = do_msgctl(first, second, ptr);
5099 break;
5101 case IPCOP_msgrcv:
5102 switch (version) {
5103 case 0:
5105 struct target_ipc_kludge {
5106 abi_long msgp;
5107 abi_long msgtyp;
5108 } *tmp;
5110 if (!lock_user_struct(VERIFY_READ, tmp, ptr, 1)) {
5111 ret = -TARGET_EFAULT;
5112 break;
5115 ret = do_msgrcv(first, tswapal(tmp->msgp), second, tswapal(tmp->msgtyp), third);
5117 unlock_user_struct(tmp, ptr, 0);
5118 break;
5120 default:
5121 ret = do_msgrcv(first, ptr, second, fifth, third);
5123 break;
5125 case IPCOP_shmat:
5126 switch (version) {
5127 default:
5129 abi_ulong raddr;
5130 raddr = do_shmat(cpu_env, first, ptr, second);
5131 if (is_error(raddr))
5132 return get_errno(raddr);
5133 if (put_user_ual(raddr, third))
5134 return -TARGET_EFAULT;
5135 break;
5137 case 1:
5138 ret = -TARGET_EINVAL;
5139 break;
5141 break;
5142 case IPCOP_shmdt:
5143 ret = do_shmdt(ptr);
5144 break;
5146 case IPCOP_shmget:
5147 /* IPC_* flag values are the same on all linux platforms */
5148 ret = get_errno(shmget(first, second, third));
5149 break;
5151 /* IPC_* and SHM_* command values are the same on all linux platforms */
5152 case IPCOP_shmctl:
5153 ret = do_shmctl(first, second, ptr);
5154 break;
5155 default:
5156 gemu_log("Unsupported ipc call: %d (version %d)\n", call, version);
5157 ret = -TARGET_ENOSYS;
5158 break;
5160 return ret;
5162 #endif
5164 /* kernel structure types definitions */
5166 #define STRUCT(name, ...) STRUCT_ ## name,
5167 #define STRUCT_SPECIAL(name) STRUCT_ ## name,
5168 enum {
5169 #include "syscall_types.h"
5170 STRUCT_MAX
5172 #undef STRUCT
5173 #undef STRUCT_SPECIAL
5175 #define STRUCT(name, ...) static const argtype struct_ ## name ## _def[] = { __VA_ARGS__, TYPE_NULL };
5176 #define STRUCT_SPECIAL(name)
5177 #include "syscall_types.h"
5178 #undef STRUCT
5179 #undef STRUCT_SPECIAL
5181 typedef struct IOCTLEntry IOCTLEntry;
5183 typedef abi_long do_ioctl_fn(const IOCTLEntry *ie, uint8_t *buf_temp,
5184 int fd, int cmd, abi_long arg);
5186 struct IOCTLEntry {
5187 int target_cmd;
5188 unsigned int host_cmd;
5189 const char *name;
5190 int access;
5191 do_ioctl_fn *do_ioctl;
5192 const argtype arg_type[5];
5195 #define IOC_R 0x0001
5196 #define IOC_W 0x0002
5197 #define IOC_RW (IOC_R | IOC_W)
5199 #define MAX_STRUCT_SIZE 4096
5201 #ifdef CONFIG_FIEMAP
5202 /* So fiemap access checks don't overflow on 32 bit systems.
5203 * This is very slightly smaller than the limit imposed by
5204 * the underlying kernel.
5206 #define FIEMAP_MAX_EXTENTS ((UINT_MAX - sizeof(struct fiemap)) \
5207 / sizeof(struct fiemap_extent))
5209 static abi_long do_ioctl_fs_ioc_fiemap(const IOCTLEntry *ie, uint8_t *buf_temp,
5210 int fd, int cmd, abi_long arg)
5212 /* The parameter for this ioctl is a struct fiemap followed
5213 * by an array of struct fiemap_extent whose size is set
5214 * in fiemap->fm_extent_count. The array is filled in by the
5215 * ioctl.
5217 int target_size_in, target_size_out;
5218 struct fiemap *fm;
5219 const argtype *arg_type = ie->arg_type;
5220 const argtype extent_arg_type[] = { MK_STRUCT(STRUCT_fiemap_extent) };
5221 void *argptr, *p;
5222 abi_long ret;
5223 int i, extent_size = thunk_type_size(extent_arg_type, 0);
5224 uint32_t outbufsz;
5225 int free_fm = 0;
5227 assert(arg_type[0] == TYPE_PTR);
5228 assert(ie->access == IOC_RW);
5229 arg_type++;
5230 target_size_in = thunk_type_size(arg_type, 0);
5231 argptr = lock_user(VERIFY_READ, arg, target_size_in, 1);
5232 if (!argptr) {
5233 return -TARGET_EFAULT;
5235 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
5236 unlock_user(argptr, arg, 0);
5237 fm = (struct fiemap *)buf_temp;
5238 if (fm->fm_extent_count > FIEMAP_MAX_EXTENTS) {
5239 return -TARGET_EINVAL;
5242 outbufsz = sizeof (*fm) +
5243 (sizeof(struct fiemap_extent) * fm->fm_extent_count);
5245 if (outbufsz > MAX_STRUCT_SIZE) {
5246 /* We can't fit all the extents into the fixed size buffer.
5247 * Allocate one that is large enough and use it instead.
5249 fm = g_try_malloc(outbufsz);
5250 if (!fm) {
5251 return -TARGET_ENOMEM;
5253 memcpy(fm, buf_temp, sizeof(struct fiemap));
5254 free_fm = 1;
5256 ret = get_errno(safe_ioctl(fd, ie->host_cmd, fm));
5257 if (!is_error(ret)) {
5258 target_size_out = target_size_in;
5259 /* An extent_count of 0 means we were only counting the extents
5260 * so there are no structs to copy
5262 if (fm->fm_extent_count != 0) {
5263 target_size_out += fm->fm_mapped_extents * extent_size;
5265 argptr = lock_user(VERIFY_WRITE, arg, target_size_out, 0);
5266 if (!argptr) {
5267 ret = -TARGET_EFAULT;
5268 } else {
5269 /* Convert the struct fiemap */
5270 thunk_convert(argptr, fm, arg_type, THUNK_TARGET);
5271 if (fm->fm_extent_count != 0) {
5272 p = argptr + target_size_in;
5273 /* ...and then all the struct fiemap_extents */
5274 for (i = 0; i < fm->fm_mapped_extents; i++) {
5275 thunk_convert(p, &fm->fm_extents[i], extent_arg_type,
5276 THUNK_TARGET);
5277 p += extent_size;
5280 unlock_user(argptr, arg, target_size_out);
5283 if (free_fm) {
5284 g_free(fm);
5286 return ret;
5288 #endif
5290 static abi_long do_ioctl_ifconf(const IOCTLEntry *ie, uint8_t *buf_temp,
5291 int fd, int cmd, abi_long arg)
5293 const argtype *arg_type = ie->arg_type;
5294 int target_size;
5295 void *argptr;
5296 int ret;
5297 struct ifconf *host_ifconf;
5298 uint32_t outbufsz;
5299 const argtype ifreq_arg_type[] = { MK_STRUCT(STRUCT_sockaddr_ifreq) };
5300 int target_ifreq_size;
5301 int nb_ifreq;
5302 int free_buf = 0;
5303 int i;
5304 int target_ifc_len;
5305 abi_long target_ifc_buf;
5306 int host_ifc_len;
5307 char *host_ifc_buf;
5309 assert(arg_type[0] == TYPE_PTR);
5310 assert(ie->access == IOC_RW);
5312 arg_type++;
5313 target_size = thunk_type_size(arg_type, 0);
5315 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5316 if (!argptr)
5317 return -TARGET_EFAULT;
5318 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
5319 unlock_user(argptr, arg, 0);
5321 host_ifconf = (struct ifconf *)(unsigned long)buf_temp;
5322 target_ifc_len = host_ifconf->ifc_len;
5323 target_ifc_buf = (abi_long)(unsigned long)host_ifconf->ifc_buf;
5325 target_ifreq_size = thunk_type_size(ifreq_arg_type, 0);
5326 nb_ifreq = target_ifc_len / target_ifreq_size;
5327 host_ifc_len = nb_ifreq * sizeof(struct ifreq);
5329 outbufsz = sizeof(*host_ifconf) + host_ifc_len;
5330 if (outbufsz > MAX_STRUCT_SIZE) {
5331 /* We can't fit all the extents into the fixed size buffer.
5332 * Allocate one that is large enough and use it instead.
5334 host_ifconf = malloc(outbufsz);
5335 if (!host_ifconf) {
5336 return -TARGET_ENOMEM;
5338 memcpy(host_ifconf, buf_temp, sizeof(*host_ifconf));
5339 free_buf = 1;
5341 host_ifc_buf = (char*)host_ifconf + sizeof(*host_ifconf);
5343 host_ifconf->ifc_len = host_ifc_len;
5344 host_ifconf->ifc_buf = host_ifc_buf;
5346 ret = get_errno(safe_ioctl(fd, ie->host_cmd, host_ifconf));
5347 if (!is_error(ret)) {
5348 /* convert host ifc_len to target ifc_len */
5350 nb_ifreq = host_ifconf->ifc_len / sizeof(struct ifreq);
5351 target_ifc_len = nb_ifreq * target_ifreq_size;
5352 host_ifconf->ifc_len = target_ifc_len;
5354 /* restore target ifc_buf */
5356 host_ifconf->ifc_buf = (char *)(unsigned long)target_ifc_buf;
5358 /* copy struct ifconf to target user */
5360 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
5361 if (!argptr)
5362 return -TARGET_EFAULT;
5363 thunk_convert(argptr, host_ifconf, arg_type, THUNK_TARGET);
5364 unlock_user(argptr, arg, target_size);
5366 /* copy ifreq[] to target user */
5368 argptr = lock_user(VERIFY_WRITE, target_ifc_buf, target_ifc_len, 0);
5369 for (i = 0; i < nb_ifreq ; i++) {
5370 thunk_convert(argptr + i * target_ifreq_size,
5371 host_ifc_buf + i * sizeof(struct ifreq),
5372 ifreq_arg_type, THUNK_TARGET);
5374 unlock_user(argptr, target_ifc_buf, target_ifc_len);
5377 if (free_buf) {
5378 free(host_ifconf);
5381 return ret;
5384 static abi_long do_ioctl_dm(const IOCTLEntry *ie, uint8_t *buf_temp, int fd,
5385 int cmd, abi_long arg)
5387 void *argptr;
5388 struct dm_ioctl *host_dm;
5389 abi_long guest_data;
5390 uint32_t guest_data_size;
5391 int target_size;
5392 const argtype *arg_type = ie->arg_type;
5393 abi_long ret;
5394 void *big_buf = NULL;
5395 char *host_data;
5397 arg_type++;
5398 target_size = thunk_type_size(arg_type, 0);
5399 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5400 if (!argptr) {
5401 ret = -TARGET_EFAULT;
5402 goto out;
5404 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
5405 unlock_user(argptr, arg, 0);
5407 /* buf_temp is too small, so fetch things into a bigger buffer */
5408 big_buf = g_malloc0(((struct dm_ioctl*)buf_temp)->data_size * 2);
5409 memcpy(big_buf, buf_temp, target_size);
5410 buf_temp = big_buf;
5411 host_dm = big_buf;
5413 guest_data = arg + host_dm->data_start;
5414 if ((guest_data - arg) < 0) {
5415 ret = -TARGET_EINVAL;
5416 goto out;
5418 guest_data_size = host_dm->data_size - host_dm->data_start;
5419 host_data = (char*)host_dm + host_dm->data_start;
5421 argptr = lock_user(VERIFY_READ, guest_data, guest_data_size, 1);
5422 if (!argptr) {
5423 ret = -TARGET_EFAULT;
5424 goto out;
5427 switch (ie->host_cmd) {
5428 case DM_REMOVE_ALL:
5429 case DM_LIST_DEVICES:
5430 case DM_DEV_CREATE:
5431 case DM_DEV_REMOVE:
5432 case DM_DEV_SUSPEND:
5433 case DM_DEV_STATUS:
5434 case DM_DEV_WAIT:
5435 case DM_TABLE_STATUS:
5436 case DM_TABLE_CLEAR:
5437 case DM_TABLE_DEPS:
5438 case DM_LIST_VERSIONS:
5439 /* no input data */
5440 break;
5441 case DM_DEV_RENAME:
5442 case DM_DEV_SET_GEOMETRY:
5443 /* data contains only strings */
5444 memcpy(host_data, argptr, guest_data_size);
5445 break;
5446 case DM_TARGET_MSG:
5447 memcpy(host_data, argptr, guest_data_size);
5448 *(uint64_t*)host_data = tswap64(*(uint64_t*)argptr);
5449 break;
5450 case DM_TABLE_LOAD:
5452 void *gspec = argptr;
5453 void *cur_data = host_data;
5454 const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_spec) };
5455 int spec_size = thunk_type_size(arg_type, 0);
5456 int i;
5458 for (i = 0; i < host_dm->target_count; i++) {
5459 struct dm_target_spec *spec = cur_data;
5460 uint32_t next;
5461 int slen;
5463 thunk_convert(spec, gspec, arg_type, THUNK_HOST);
5464 slen = strlen((char*)gspec + spec_size) + 1;
5465 next = spec->next;
5466 spec->next = sizeof(*spec) + slen;
5467 strcpy((char*)&spec[1], gspec + spec_size);
5468 gspec += next;
5469 cur_data += spec->next;
5471 break;
5473 default:
5474 ret = -TARGET_EINVAL;
5475 unlock_user(argptr, guest_data, 0);
5476 goto out;
5478 unlock_user(argptr, guest_data, 0);
5480 ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5481 if (!is_error(ret)) {
5482 guest_data = arg + host_dm->data_start;
5483 guest_data_size = host_dm->data_size - host_dm->data_start;
5484 argptr = lock_user(VERIFY_WRITE, guest_data, guest_data_size, 0);
5485 switch (ie->host_cmd) {
5486 case DM_REMOVE_ALL:
5487 case DM_DEV_CREATE:
5488 case DM_DEV_REMOVE:
5489 case DM_DEV_RENAME:
5490 case DM_DEV_SUSPEND:
5491 case DM_DEV_STATUS:
5492 case DM_TABLE_LOAD:
5493 case DM_TABLE_CLEAR:
5494 case DM_TARGET_MSG:
5495 case DM_DEV_SET_GEOMETRY:
5496 /* no return data */
5497 break;
5498 case DM_LIST_DEVICES:
5500 struct dm_name_list *nl = (void*)host_dm + host_dm->data_start;
5501 uint32_t remaining_data = guest_data_size;
5502 void *cur_data = argptr;
5503 const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_name_list) };
5504 int nl_size = 12; /* can't use thunk_size due to alignment */
5506 while (1) {
5507 uint32_t next = nl->next;
5508 if (next) {
5509 nl->next = nl_size + (strlen(nl->name) + 1);
5511 if (remaining_data < nl->next) {
5512 host_dm->flags |= DM_BUFFER_FULL_FLAG;
5513 break;
5515 thunk_convert(cur_data, nl, arg_type, THUNK_TARGET);
5516 strcpy(cur_data + nl_size, nl->name);
5517 cur_data += nl->next;
5518 remaining_data -= nl->next;
5519 if (!next) {
5520 break;
5522 nl = (void*)nl + next;
5524 break;
5526 case DM_DEV_WAIT:
5527 case DM_TABLE_STATUS:
5529 struct dm_target_spec *spec = (void*)host_dm + host_dm->data_start;
5530 void *cur_data = argptr;
5531 const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_spec) };
5532 int spec_size = thunk_type_size(arg_type, 0);
5533 int i;
5535 for (i = 0; i < host_dm->target_count; i++) {
5536 uint32_t next = spec->next;
5537 int slen = strlen((char*)&spec[1]) + 1;
5538 spec->next = (cur_data - argptr) + spec_size + slen;
5539 if (guest_data_size < spec->next) {
5540 host_dm->flags |= DM_BUFFER_FULL_FLAG;
5541 break;
5543 thunk_convert(cur_data, spec, arg_type, THUNK_TARGET);
5544 strcpy(cur_data + spec_size, (char*)&spec[1]);
5545 cur_data = argptr + spec->next;
5546 spec = (void*)host_dm + host_dm->data_start + next;
5548 break;
5550 case DM_TABLE_DEPS:
5552 void *hdata = (void*)host_dm + host_dm->data_start;
5553 int count = *(uint32_t*)hdata;
5554 uint64_t *hdev = hdata + 8;
5555 uint64_t *gdev = argptr + 8;
5556 int i;
5558 *(uint32_t*)argptr = tswap32(count);
5559 for (i = 0; i < count; i++) {
5560 *gdev = tswap64(*hdev);
5561 gdev++;
5562 hdev++;
5564 break;
5566 case DM_LIST_VERSIONS:
5568 struct dm_target_versions *vers = (void*)host_dm + host_dm->data_start;
5569 uint32_t remaining_data = guest_data_size;
5570 void *cur_data = argptr;
5571 const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_versions) };
5572 int vers_size = thunk_type_size(arg_type, 0);
5574 while (1) {
5575 uint32_t next = vers->next;
5576 if (next) {
5577 vers->next = vers_size + (strlen(vers->name) + 1);
5579 if (remaining_data < vers->next) {
5580 host_dm->flags |= DM_BUFFER_FULL_FLAG;
5581 break;
5583 thunk_convert(cur_data, vers, arg_type, THUNK_TARGET);
5584 strcpy(cur_data + vers_size, vers->name);
5585 cur_data += vers->next;
5586 remaining_data -= vers->next;
5587 if (!next) {
5588 break;
5590 vers = (void*)vers + next;
5592 break;
5594 default:
5595 unlock_user(argptr, guest_data, 0);
5596 ret = -TARGET_EINVAL;
5597 goto out;
5599 unlock_user(argptr, guest_data, guest_data_size);
5601 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
5602 if (!argptr) {
5603 ret = -TARGET_EFAULT;
5604 goto out;
5606 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
5607 unlock_user(argptr, arg, target_size);
5609 out:
5610 g_free(big_buf);
5611 return ret;
5614 static abi_long do_ioctl_blkpg(const IOCTLEntry *ie, uint8_t *buf_temp, int fd,
5615 int cmd, abi_long arg)
5617 void *argptr;
5618 int target_size;
5619 const argtype *arg_type = ie->arg_type;
5620 const argtype part_arg_type[] = { MK_STRUCT(STRUCT_blkpg_partition) };
5621 abi_long ret;
5623 struct blkpg_ioctl_arg *host_blkpg = (void*)buf_temp;
5624 struct blkpg_partition host_part;
5626 /* Read and convert blkpg */
5627 arg_type++;
5628 target_size = thunk_type_size(arg_type, 0);
5629 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5630 if (!argptr) {
5631 ret = -TARGET_EFAULT;
5632 goto out;
5634 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
5635 unlock_user(argptr, arg, 0);
5637 switch (host_blkpg->op) {
5638 case BLKPG_ADD_PARTITION:
5639 case BLKPG_DEL_PARTITION:
5640 /* payload is struct blkpg_partition */
5641 break;
5642 default:
5643 /* Unknown opcode */
5644 ret = -TARGET_EINVAL;
5645 goto out;
5648 /* Read and convert blkpg->data */
5649 arg = (abi_long)(uintptr_t)host_blkpg->data;
5650 target_size = thunk_type_size(part_arg_type, 0);
5651 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5652 if (!argptr) {
5653 ret = -TARGET_EFAULT;
5654 goto out;
5656 thunk_convert(&host_part, argptr, part_arg_type, THUNK_HOST);
5657 unlock_user(argptr, arg, 0);
5659 /* Swizzle the data pointer to our local copy and call! */
5660 host_blkpg->data = &host_part;
5661 ret = get_errno(safe_ioctl(fd, ie->host_cmd, host_blkpg));
5663 out:
5664 return ret;
5667 static abi_long do_ioctl_rt(const IOCTLEntry *ie, uint8_t *buf_temp,
5668 int fd, int cmd, abi_long arg)
5670 const argtype *arg_type = ie->arg_type;
5671 const StructEntry *se;
5672 const argtype *field_types;
5673 const int *dst_offsets, *src_offsets;
5674 int target_size;
5675 void *argptr;
5676 abi_ulong *target_rt_dev_ptr;
5677 unsigned long *host_rt_dev_ptr;
5678 abi_long ret;
5679 int i;
5681 assert(ie->access == IOC_W);
5682 assert(*arg_type == TYPE_PTR);
5683 arg_type++;
5684 assert(*arg_type == TYPE_STRUCT);
5685 target_size = thunk_type_size(arg_type, 0);
5686 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5687 if (!argptr) {
5688 return -TARGET_EFAULT;
5690 arg_type++;
5691 assert(*arg_type == (int)STRUCT_rtentry);
5692 se = struct_entries + *arg_type++;
5693 assert(se->convert[0] == NULL);
5694 /* convert struct here to be able to catch rt_dev string */
5695 field_types = se->field_types;
5696 dst_offsets = se->field_offsets[THUNK_HOST];
5697 src_offsets = se->field_offsets[THUNK_TARGET];
5698 for (i = 0; i < se->nb_fields; i++) {
5699 if (dst_offsets[i] == offsetof(struct rtentry, rt_dev)) {
5700 assert(*field_types == TYPE_PTRVOID);
5701 target_rt_dev_ptr = (abi_ulong *)(argptr + src_offsets[i]);
5702 host_rt_dev_ptr = (unsigned long *)(buf_temp + dst_offsets[i]);
5703 if (*target_rt_dev_ptr != 0) {
5704 *host_rt_dev_ptr = (unsigned long)lock_user_string(
5705 tswapal(*target_rt_dev_ptr));
5706 if (!*host_rt_dev_ptr) {
5707 unlock_user(argptr, arg, 0);
5708 return -TARGET_EFAULT;
5710 } else {
5711 *host_rt_dev_ptr = 0;
5713 field_types++;
5714 continue;
5716 field_types = thunk_convert(buf_temp + dst_offsets[i],
5717 argptr + src_offsets[i],
5718 field_types, THUNK_HOST);
5720 unlock_user(argptr, arg, 0);
5722 ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5723 if (*host_rt_dev_ptr != 0) {
5724 unlock_user((void *)*host_rt_dev_ptr,
5725 *target_rt_dev_ptr, 0);
5727 return ret;
5730 static abi_long do_ioctl_kdsigaccept(const IOCTLEntry *ie, uint8_t *buf_temp,
5731 int fd, int cmd, abi_long arg)
5733 int sig = target_to_host_signal(arg);
5734 return get_errno(safe_ioctl(fd, ie->host_cmd, sig));
5737 #ifdef TIOCGPTPEER
5738 static abi_long do_ioctl_tiocgptpeer(const IOCTLEntry *ie, uint8_t *buf_temp,
5739 int fd, int cmd, abi_long arg)
5741 int flags = target_to_host_bitmask(arg, fcntl_flags_tbl);
5742 return get_errno(safe_ioctl(fd, ie->host_cmd, flags));
5744 #endif
5746 static IOCTLEntry ioctl_entries[] = {
5747 #define IOCTL(cmd, access, ...) \
5748 { TARGET_ ## cmd, cmd, #cmd, access, 0, { __VA_ARGS__ } },
5749 #define IOCTL_SPECIAL(cmd, access, dofn, ...) \
5750 { TARGET_ ## cmd, cmd, #cmd, access, dofn, { __VA_ARGS__ } },
5751 #define IOCTL_IGNORE(cmd) \
5752 { TARGET_ ## cmd, 0, #cmd },
5753 #include "ioctls.h"
5754 { 0, 0, },
5757 /* ??? Implement proper locking for ioctls. */
5758 /* do_ioctl() Must return target values and target errnos. */
5759 static abi_long do_ioctl(int fd, int cmd, abi_long arg)
5761 const IOCTLEntry *ie;
5762 const argtype *arg_type;
5763 abi_long ret;
5764 uint8_t buf_temp[MAX_STRUCT_SIZE];
5765 int target_size;
5766 void *argptr;
5768 ie = ioctl_entries;
5769 for(;;) {
5770 if (ie->target_cmd == 0) {
5771 gemu_log("Unsupported ioctl: cmd=0x%04lx\n", (long)cmd);
5772 return -TARGET_ENOSYS;
5774 if (ie->target_cmd == cmd)
5775 break;
5776 ie++;
5778 arg_type = ie->arg_type;
5779 #if defined(DEBUG)
5780 gemu_log("ioctl: cmd=0x%04lx (%s)\n", (long)cmd, ie->name);
5781 #endif
5782 if (ie->do_ioctl) {
5783 return ie->do_ioctl(ie, buf_temp, fd, cmd, arg);
5784 } else if (!ie->host_cmd) {
5785 /* Some architectures define BSD ioctls in their headers
5786 that are not implemented in Linux. */
5787 return -TARGET_ENOSYS;
5790 switch(arg_type[0]) {
5791 case TYPE_NULL:
5792 /* no argument */
5793 ret = get_errno(safe_ioctl(fd, ie->host_cmd));
5794 break;
5795 case TYPE_PTRVOID:
5796 case TYPE_INT:
5797 ret = get_errno(safe_ioctl(fd, ie->host_cmd, arg));
5798 break;
5799 case TYPE_PTR:
5800 arg_type++;
5801 target_size = thunk_type_size(arg_type, 0);
5802 switch(ie->access) {
5803 case IOC_R:
5804 ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5805 if (!is_error(ret)) {
5806 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
5807 if (!argptr)
5808 return -TARGET_EFAULT;
5809 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
5810 unlock_user(argptr, arg, target_size);
5812 break;
5813 case IOC_W:
5814 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5815 if (!argptr)
5816 return -TARGET_EFAULT;
5817 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
5818 unlock_user(argptr, arg, 0);
5819 ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5820 break;
5821 default:
5822 case IOC_RW:
5823 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5824 if (!argptr)
5825 return -TARGET_EFAULT;
5826 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
5827 unlock_user(argptr, arg, 0);
5828 ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5829 if (!is_error(ret)) {
5830 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
5831 if (!argptr)
5832 return -TARGET_EFAULT;
5833 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
5834 unlock_user(argptr, arg, target_size);
5836 break;
5838 break;
5839 default:
5840 gemu_log("Unsupported ioctl type: cmd=0x%04lx type=%d\n",
5841 (long)cmd, arg_type[0]);
5842 ret = -TARGET_ENOSYS;
5843 break;
5845 return ret;
5848 static const bitmask_transtbl iflag_tbl[] = {
5849 { TARGET_IGNBRK, TARGET_IGNBRK, IGNBRK, IGNBRK },
5850 { TARGET_BRKINT, TARGET_BRKINT, BRKINT, BRKINT },
5851 { TARGET_IGNPAR, TARGET_IGNPAR, IGNPAR, IGNPAR },
5852 { TARGET_PARMRK, TARGET_PARMRK, PARMRK, PARMRK },
5853 { TARGET_INPCK, TARGET_INPCK, INPCK, INPCK },
5854 { TARGET_ISTRIP, TARGET_ISTRIP, ISTRIP, ISTRIP },
5855 { TARGET_INLCR, TARGET_INLCR, INLCR, INLCR },
5856 { TARGET_IGNCR, TARGET_IGNCR, IGNCR, IGNCR },
5857 { TARGET_ICRNL, TARGET_ICRNL, ICRNL, ICRNL },
5858 { TARGET_IUCLC, TARGET_IUCLC, IUCLC, IUCLC },
5859 { TARGET_IXON, TARGET_IXON, IXON, IXON },
5860 { TARGET_IXANY, TARGET_IXANY, IXANY, IXANY },
5861 { TARGET_IXOFF, TARGET_IXOFF, IXOFF, IXOFF },
5862 { TARGET_IMAXBEL, TARGET_IMAXBEL, IMAXBEL, IMAXBEL },
5863 { 0, 0, 0, 0 }
5866 static const bitmask_transtbl oflag_tbl[] = {
5867 { TARGET_OPOST, TARGET_OPOST, OPOST, OPOST },
5868 { TARGET_OLCUC, TARGET_OLCUC, OLCUC, OLCUC },
5869 { TARGET_ONLCR, TARGET_ONLCR, ONLCR, ONLCR },
5870 { TARGET_OCRNL, TARGET_OCRNL, OCRNL, OCRNL },
5871 { TARGET_ONOCR, TARGET_ONOCR, ONOCR, ONOCR },
5872 { TARGET_ONLRET, TARGET_ONLRET, ONLRET, ONLRET },
5873 { TARGET_OFILL, TARGET_OFILL, OFILL, OFILL },
5874 { TARGET_OFDEL, TARGET_OFDEL, OFDEL, OFDEL },
5875 { TARGET_NLDLY, TARGET_NL0, NLDLY, NL0 },
5876 { TARGET_NLDLY, TARGET_NL1, NLDLY, NL1 },
5877 { TARGET_CRDLY, TARGET_CR0, CRDLY, CR0 },
5878 { TARGET_CRDLY, TARGET_CR1, CRDLY, CR1 },
5879 { TARGET_CRDLY, TARGET_CR2, CRDLY, CR2 },
5880 { TARGET_CRDLY, TARGET_CR3, CRDLY, CR3 },
5881 { TARGET_TABDLY, TARGET_TAB0, TABDLY, TAB0 },
5882 { TARGET_TABDLY, TARGET_TAB1, TABDLY, TAB1 },
5883 { TARGET_TABDLY, TARGET_TAB2, TABDLY, TAB2 },
5884 { TARGET_TABDLY, TARGET_TAB3, TABDLY, TAB3 },
5885 { TARGET_BSDLY, TARGET_BS0, BSDLY, BS0 },
5886 { TARGET_BSDLY, TARGET_BS1, BSDLY, BS1 },
5887 { TARGET_VTDLY, TARGET_VT0, VTDLY, VT0 },
5888 { TARGET_VTDLY, TARGET_VT1, VTDLY, VT1 },
5889 { TARGET_FFDLY, TARGET_FF0, FFDLY, FF0 },
5890 { TARGET_FFDLY, TARGET_FF1, FFDLY, FF1 },
5891 { 0, 0, 0, 0 }
5894 static const bitmask_transtbl cflag_tbl[] = {
5895 { TARGET_CBAUD, TARGET_B0, CBAUD, B0 },
5896 { TARGET_CBAUD, TARGET_B50, CBAUD, B50 },
5897 { TARGET_CBAUD, TARGET_B75, CBAUD, B75 },
5898 { TARGET_CBAUD, TARGET_B110, CBAUD, B110 },
5899 { TARGET_CBAUD, TARGET_B134, CBAUD, B134 },
5900 { TARGET_CBAUD, TARGET_B150, CBAUD, B150 },
5901 { TARGET_CBAUD, TARGET_B200, CBAUD, B200 },
5902 { TARGET_CBAUD, TARGET_B300, CBAUD, B300 },
5903 { TARGET_CBAUD, TARGET_B600, CBAUD, B600 },
5904 { TARGET_CBAUD, TARGET_B1200, CBAUD, B1200 },
5905 { TARGET_CBAUD, TARGET_B1800, CBAUD, B1800 },
5906 { TARGET_CBAUD, TARGET_B2400, CBAUD, B2400 },
5907 { TARGET_CBAUD, TARGET_B4800, CBAUD, B4800 },
5908 { TARGET_CBAUD, TARGET_B9600, CBAUD, B9600 },
5909 { TARGET_CBAUD, TARGET_B19200, CBAUD, B19200 },
5910 { TARGET_CBAUD, TARGET_B38400, CBAUD, B38400 },
5911 { TARGET_CBAUD, TARGET_B57600, CBAUD, B57600 },
5912 { TARGET_CBAUD, TARGET_B115200, CBAUD, B115200 },
5913 { TARGET_CBAUD, TARGET_B230400, CBAUD, B230400 },
5914 { TARGET_CBAUD, TARGET_B460800, CBAUD, B460800 },
5915 { TARGET_CSIZE, TARGET_CS5, CSIZE, CS5 },
5916 { TARGET_CSIZE, TARGET_CS6, CSIZE, CS6 },
5917 { TARGET_CSIZE, TARGET_CS7, CSIZE, CS7 },
5918 { TARGET_CSIZE, TARGET_CS8, CSIZE, CS8 },
5919 { TARGET_CSTOPB, TARGET_CSTOPB, CSTOPB, CSTOPB },
5920 { TARGET_CREAD, TARGET_CREAD, CREAD, CREAD },
5921 { TARGET_PARENB, TARGET_PARENB, PARENB, PARENB },
5922 { TARGET_PARODD, TARGET_PARODD, PARODD, PARODD },
5923 { TARGET_HUPCL, TARGET_HUPCL, HUPCL, HUPCL },
5924 { TARGET_CLOCAL, TARGET_CLOCAL, CLOCAL, CLOCAL },
5925 { TARGET_CRTSCTS, TARGET_CRTSCTS, CRTSCTS, CRTSCTS },
5926 { 0, 0, 0, 0 }
5929 static const bitmask_transtbl lflag_tbl[] = {
5930 { TARGET_ISIG, TARGET_ISIG, ISIG, ISIG },
5931 { TARGET_ICANON, TARGET_ICANON, ICANON, ICANON },
5932 { TARGET_XCASE, TARGET_XCASE, XCASE, XCASE },
5933 { TARGET_ECHO, TARGET_ECHO, ECHO, ECHO },
5934 { TARGET_ECHOE, TARGET_ECHOE, ECHOE, ECHOE },
5935 { TARGET_ECHOK, TARGET_ECHOK, ECHOK, ECHOK },
5936 { TARGET_ECHONL, TARGET_ECHONL, ECHONL, ECHONL },
5937 { TARGET_NOFLSH, TARGET_NOFLSH, NOFLSH, NOFLSH },
5938 { TARGET_TOSTOP, TARGET_TOSTOP, TOSTOP, TOSTOP },
5939 { TARGET_ECHOCTL, TARGET_ECHOCTL, ECHOCTL, ECHOCTL },
5940 { TARGET_ECHOPRT, TARGET_ECHOPRT, ECHOPRT, ECHOPRT },
5941 { TARGET_ECHOKE, TARGET_ECHOKE, ECHOKE, ECHOKE },
5942 { TARGET_FLUSHO, TARGET_FLUSHO, FLUSHO, FLUSHO },
5943 { TARGET_PENDIN, TARGET_PENDIN, PENDIN, PENDIN },
5944 { TARGET_IEXTEN, TARGET_IEXTEN, IEXTEN, IEXTEN },
5945 { 0, 0, 0, 0 }
5948 static void target_to_host_termios (void *dst, const void *src)
5950 struct host_termios *host = dst;
5951 const struct target_termios *target = src;
5953 host->c_iflag =
5954 target_to_host_bitmask(tswap32(target->c_iflag), iflag_tbl);
5955 host->c_oflag =
5956 target_to_host_bitmask(tswap32(target->c_oflag), oflag_tbl);
5957 host->c_cflag =
5958 target_to_host_bitmask(tswap32(target->c_cflag), cflag_tbl);
5959 host->c_lflag =
5960 target_to_host_bitmask(tswap32(target->c_lflag), lflag_tbl);
5961 host->c_line = target->c_line;
5963 memset(host->c_cc, 0, sizeof(host->c_cc));
5964 host->c_cc[VINTR] = target->c_cc[TARGET_VINTR];
5965 host->c_cc[VQUIT] = target->c_cc[TARGET_VQUIT];
5966 host->c_cc[VERASE] = target->c_cc[TARGET_VERASE];
5967 host->c_cc[VKILL] = target->c_cc[TARGET_VKILL];
5968 host->c_cc[VEOF] = target->c_cc[TARGET_VEOF];
5969 host->c_cc[VTIME] = target->c_cc[TARGET_VTIME];
5970 host->c_cc[VMIN] = target->c_cc[TARGET_VMIN];
5971 host->c_cc[VSWTC] = target->c_cc[TARGET_VSWTC];
5972 host->c_cc[VSTART] = target->c_cc[TARGET_VSTART];
5973 host->c_cc[VSTOP] = target->c_cc[TARGET_VSTOP];
5974 host->c_cc[VSUSP] = target->c_cc[TARGET_VSUSP];
5975 host->c_cc[VEOL] = target->c_cc[TARGET_VEOL];
5976 host->c_cc[VREPRINT] = target->c_cc[TARGET_VREPRINT];
5977 host->c_cc[VDISCARD] = target->c_cc[TARGET_VDISCARD];
5978 host->c_cc[VWERASE] = target->c_cc[TARGET_VWERASE];
5979 host->c_cc[VLNEXT] = target->c_cc[TARGET_VLNEXT];
5980 host->c_cc[VEOL2] = target->c_cc[TARGET_VEOL2];
5983 static void host_to_target_termios (void *dst, const void *src)
5985 struct target_termios *target = dst;
5986 const struct host_termios *host = src;
5988 target->c_iflag =
5989 tswap32(host_to_target_bitmask(host->c_iflag, iflag_tbl));
5990 target->c_oflag =
5991 tswap32(host_to_target_bitmask(host->c_oflag, oflag_tbl));
5992 target->c_cflag =
5993 tswap32(host_to_target_bitmask(host->c_cflag, cflag_tbl));
5994 target->c_lflag =
5995 tswap32(host_to_target_bitmask(host->c_lflag, lflag_tbl));
5996 target->c_line = host->c_line;
5998 memset(target->c_cc, 0, sizeof(target->c_cc));
5999 target->c_cc[TARGET_VINTR] = host->c_cc[VINTR];
6000 target->c_cc[TARGET_VQUIT] = host->c_cc[VQUIT];
6001 target->c_cc[TARGET_VERASE] = host->c_cc[VERASE];
6002 target->c_cc[TARGET_VKILL] = host->c_cc[VKILL];
6003 target->c_cc[TARGET_VEOF] = host->c_cc[VEOF];
6004 target->c_cc[TARGET_VTIME] = host->c_cc[VTIME];
6005 target->c_cc[TARGET_VMIN] = host->c_cc[VMIN];
6006 target->c_cc[TARGET_VSWTC] = host->c_cc[VSWTC];
6007 target->c_cc[TARGET_VSTART] = host->c_cc[VSTART];
6008 target->c_cc[TARGET_VSTOP] = host->c_cc[VSTOP];
6009 target->c_cc[TARGET_VSUSP] = host->c_cc[VSUSP];
6010 target->c_cc[TARGET_VEOL] = host->c_cc[VEOL];
6011 target->c_cc[TARGET_VREPRINT] = host->c_cc[VREPRINT];
6012 target->c_cc[TARGET_VDISCARD] = host->c_cc[VDISCARD];
6013 target->c_cc[TARGET_VWERASE] = host->c_cc[VWERASE];
6014 target->c_cc[TARGET_VLNEXT] = host->c_cc[VLNEXT];
6015 target->c_cc[TARGET_VEOL2] = host->c_cc[VEOL2];
6018 static const StructEntry struct_termios_def = {
6019 .convert = { host_to_target_termios, target_to_host_termios },
6020 .size = { sizeof(struct target_termios), sizeof(struct host_termios) },
6021 .align = { __alignof__(struct target_termios), __alignof__(struct host_termios) },
6024 static bitmask_transtbl mmap_flags_tbl[] = {
6025 { TARGET_MAP_SHARED, TARGET_MAP_SHARED, MAP_SHARED, MAP_SHARED },
6026 { TARGET_MAP_PRIVATE, TARGET_MAP_PRIVATE, MAP_PRIVATE, MAP_PRIVATE },
6027 { TARGET_MAP_FIXED, TARGET_MAP_FIXED, MAP_FIXED, MAP_FIXED },
6028 { TARGET_MAP_ANONYMOUS, TARGET_MAP_ANONYMOUS,
6029 MAP_ANONYMOUS, MAP_ANONYMOUS },
6030 { TARGET_MAP_GROWSDOWN, TARGET_MAP_GROWSDOWN,
6031 MAP_GROWSDOWN, MAP_GROWSDOWN },
6032 { TARGET_MAP_DENYWRITE, TARGET_MAP_DENYWRITE,
6033 MAP_DENYWRITE, MAP_DENYWRITE },
6034 { TARGET_MAP_EXECUTABLE, TARGET_MAP_EXECUTABLE,
6035 MAP_EXECUTABLE, MAP_EXECUTABLE },
6036 { TARGET_MAP_LOCKED, TARGET_MAP_LOCKED, MAP_LOCKED, MAP_LOCKED },
6037 { TARGET_MAP_NORESERVE, TARGET_MAP_NORESERVE,
6038 MAP_NORESERVE, MAP_NORESERVE },
6039 { TARGET_MAP_HUGETLB, TARGET_MAP_HUGETLB, MAP_HUGETLB, MAP_HUGETLB },
6040 /* MAP_STACK had been ignored by the kernel for quite some time.
6041 Recognize it for the target insofar as we do not want to pass
6042 it through to the host. */
6043 { TARGET_MAP_STACK, TARGET_MAP_STACK, 0, 0 },
6044 { 0, 0, 0, 0 }
6047 #if defined(TARGET_I386)
6049 /* NOTE: there is really one LDT for all the threads */
6050 static uint8_t *ldt_table;
6052 static abi_long read_ldt(abi_ulong ptr, unsigned long bytecount)
6054 int size;
6055 void *p;
6057 if (!ldt_table)
6058 return 0;
6059 size = TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE;
6060 if (size > bytecount)
6061 size = bytecount;
6062 p = lock_user(VERIFY_WRITE, ptr, size, 0);
6063 if (!p)
6064 return -TARGET_EFAULT;
6065 /* ??? Should this by byteswapped? */
6066 memcpy(p, ldt_table, size);
6067 unlock_user(p, ptr, size);
6068 return size;
6071 /* XXX: add locking support */
6072 static abi_long write_ldt(CPUX86State *env,
6073 abi_ulong ptr, unsigned long bytecount, int oldmode)
6075 struct target_modify_ldt_ldt_s ldt_info;
6076 struct target_modify_ldt_ldt_s *target_ldt_info;
6077 int seg_32bit, contents, read_exec_only, limit_in_pages;
6078 int seg_not_present, useable, lm;
6079 uint32_t *lp, entry_1, entry_2;
6081 if (bytecount != sizeof(ldt_info))
6082 return -TARGET_EINVAL;
6083 if (!lock_user_struct(VERIFY_READ, target_ldt_info, ptr, 1))
6084 return -TARGET_EFAULT;
6085 ldt_info.entry_number = tswap32(target_ldt_info->entry_number);
6086 ldt_info.base_addr = tswapal(target_ldt_info->base_addr);
6087 ldt_info.limit = tswap32(target_ldt_info->limit);
6088 ldt_info.flags = tswap32(target_ldt_info->flags);
6089 unlock_user_struct(target_ldt_info, ptr, 0);
6091 if (ldt_info.entry_number >= TARGET_LDT_ENTRIES)
6092 return -TARGET_EINVAL;
6093 seg_32bit = ldt_info.flags & 1;
6094 contents = (ldt_info.flags >> 1) & 3;
6095 read_exec_only = (ldt_info.flags >> 3) & 1;
6096 limit_in_pages = (ldt_info.flags >> 4) & 1;
6097 seg_not_present = (ldt_info.flags >> 5) & 1;
6098 useable = (ldt_info.flags >> 6) & 1;
6099 #ifdef TARGET_ABI32
6100 lm = 0;
6101 #else
6102 lm = (ldt_info.flags >> 7) & 1;
6103 #endif
6104 if (contents == 3) {
6105 if (oldmode)
6106 return -TARGET_EINVAL;
6107 if (seg_not_present == 0)
6108 return -TARGET_EINVAL;
6110 /* allocate the LDT */
6111 if (!ldt_table) {
6112 env->ldt.base = target_mmap(0,
6113 TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE,
6114 PROT_READ|PROT_WRITE,
6115 MAP_ANONYMOUS|MAP_PRIVATE, -1, 0);
6116 if (env->ldt.base == -1)
6117 return -TARGET_ENOMEM;
6118 memset(g2h(env->ldt.base), 0,
6119 TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE);
6120 env->ldt.limit = 0xffff;
6121 ldt_table = g2h(env->ldt.base);
6124 /* NOTE: same code as Linux kernel */
6125 /* Allow LDTs to be cleared by the user. */
6126 if (ldt_info.base_addr == 0 && ldt_info.limit == 0) {
6127 if (oldmode ||
6128 (contents == 0 &&
6129 read_exec_only == 1 &&
6130 seg_32bit == 0 &&
6131 limit_in_pages == 0 &&
6132 seg_not_present == 1 &&
6133 useable == 0 )) {
6134 entry_1 = 0;
6135 entry_2 = 0;
6136 goto install;
6140 entry_1 = ((ldt_info.base_addr & 0x0000ffff) << 16) |
6141 (ldt_info.limit & 0x0ffff);
6142 entry_2 = (ldt_info.base_addr & 0xff000000) |
6143 ((ldt_info.base_addr & 0x00ff0000) >> 16) |
6144 (ldt_info.limit & 0xf0000) |
6145 ((read_exec_only ^ 1) << 9) |
6146 (contents << 10) |
6147 ((seg_not_present ^ 1) << 15) |
6148 (seg_32bit << 22) |
6149 (limit_in_pages << 23) |
6150 (lm << 21) |
6151 0x7000;
6152 if (!oldmode)
6153 entry_2 |= (useable << 20);
6155 /* Install the new entry ... */
6156 install:
6157 lp = (uint32_t *)(ldt_table + (ldt_info.entry_number << 3));
6158 lp[0] = tswap32(entry_1);
6159 lp[1] = tswap32(entry_2);
6160 return 0;
6163 /* specific and weird i386 syscalls */
6164 static abi_long do_modify_ldt(CPUX86State *env, int func, abi_ulong ptr,
6165 unsigned long bytecount)
6167 abi_long ret;
6169 switch (func) {
6170 case 0:
6171 ret = read_ldt(ptr, bytecount);
6172 break;
6173 case 1:
6174 ret = write_ldt(env, ptr, bytecount, 1);
6175 break;
6176 case 0x11:
6177 ret = write_ldt(env, ptr, bytecount, 0);
6178 break;
6179 default:
6180 ret = -TARGET_ENOSYS;
6181 break;
6183 return ret;
6186 #if defined(TARGET_I386) && defined(TARGET_ABI32)
6187 abi_long do_set_thread_area(CPUX86State *env, abi_ulong ptr)
6189 uint64_t *gdt_table = g2h(env->gdt.base);
6190 struct target_modify_ldt_ldt_s ldt_info;
6191 struct target_modify_ldt_ldt_s *target_ldt_info;
6192 int seg_32bit, contents, read_exec_only, limit_in_pages;
6193 int seg_not_present, useable, lm;
6194 uint32_t *lp, entry_1, entry_2;
6195 int i;
6197 lock_user_struct(VERIFY_WRITE, target_ldt_info, ptr, 1);
6198 if (!target_ldt_info)
6199 return -TARGET_EFAULT;
6200 ldt_info.entry_number = tswap32(target_ldt_info->entry_number);
6201 ldt_info.base_addr = tswapal(target_ldt_info->base_addr);
6202 ldt_info.limit = tswap32(target_ldt_info->limit);
6203 ldt_info.flags = tswap32(target_ldt_info->flags);
6204 if (ldt_info.entry_number == -1) {
6205 for (i=TARGET_GDT_ENTRY_TLS_MIN; i<=TARGET_GDT_ENTRY_TLS_MAX; i++) {
6206 if (gdt_table[i] == 0) {
6207 ldt_info.entry_number = i;
6208 target_ldt_info->entry_number = tswap32(i);
6209 break;
6213 unlock_user_struct(target_ldt_info, ptr, 1);
6215 if (ldt_info.entry_number < TARGET_GDT_ENTRY_TLS_MIN ||
6216 ldt_info.entry_number > TARGET_GDT_ENTRY_TLS_MAX)
6217 return -TARGET_EINVAL;
6218 seg_32bit = ldt_info.flags & 1;
6219 contents = (ldt_info.flags >> 1) & 3;
6220 read_exec_only = (ldt_info.flags >> 3) & 1;
6221 limit_in_pages = (ldt_info.flags >> 4) & 1;
6222 seg_not_present = (ldt_info.flags >> 5) & 1;
6223 useable = (ldt_info.flags >> 6) & 1;
6224 #ifdef TARGET_ABI32
6225 lm = 0;
6226 #else
6227 lm = (ldt_info.flags >> 7) & 1;
6228 #endif
6230 if (contents == 3) {
6231 if (seg_not_present == 0)
6232 return -TARGET_EINVAL;
6235 /* NOTE: same code as Linux kernel */
6236 /* Allow LDTs to be cleared by the user. */
6237 if (ldt_info.base_addr == 0 && ldt_info.limit == 0) {
6238 if ((contents == 0 &&
6239 read_exec_only == 1 &&
6240 seg_32bit == 0 &&
6241 limit_in_pages == 0 &&
6242 seg_not_present == 1 &&
6243 useable == 0 )) {
6244 entry_1 = 0;
6245 entry_2 = 0;
6246 goto install;
6250 entry_1 = ((ldt_info.base_addr & 0x0000ffff) << 16) |
6251 (ldt_info.limit & 0x0ffff);
6252 entry_2 = (ldt_info.base_addr & 0xff000000) |
6253 ((ldt_info.base_addr & 0x00ff0000) >> 16) |
6254 (ldt_info.limit & 0xf0000) |
6255 ((read_exec_only ^ 1) << 9) |
6256 (contents << 10) |
6257 ((seg_not_present ^ 1) << 15) |
6258 (seg_32bit << 22) |
6259 (limit_in_pages << 23) |
6260 (useable << 20) |
6261 (lm << 21) |
6262 0x7000;
6264 /* Install the new entry ... */
6265 install:
6266 lp = (uint32_t *)(gdt_table + ldt_info.entry_number);
6267 lp[0] = tswap32(entry_1);
6268 lp[1] = tswap32(entry_2);
6269 return 0;
6272 static abi_long do_get_thread_area(CPUX86State *env, abi_ulong ptr)
6274 struct target_modify_ldt_ldt_s *target_ldt_info;
6275 uint64_t *gdt_table = g2h(env->gdt.base);
6276 uint32_t base_addr, limit, flags;
6277 int seg_32bit, contents, read_exec_only, limit_in_pages, idx;
6278 int seg_not_present, useable, lm;
6279 uint32_t *lp, entry_1, entry_2;
6281 lock_user_struct(VERIFY_WRITE, target_ldt_info, ptr, 1);
6282 if (!target_ldt_info)
6283 return -TARGET_EFAULT;
6284 idx = tswap32(target_ldt_info->entry_number);
6285 if (idx < TARGET_GDT_ENTRY_TLS_MIN ||
6286 idx > TARGET_GDT_ENTRY_TLS_MAX) {
6287 unlock_user_struct(target_ldt_info, ptr, 1);
6288 return -TARGET_EINVAL;
6290 lp = (uint32_t *)(gdt_table + idx);
6291 entry_1 = tswap32(lp[0]);
6292 entry_2 = tswap32(lp[1]);
6294 read_exec_only = ((entry_2 >> 9) & 1) ^ 1;
6295 contents = (entry_2 >> 10) & 3;
6296 seg_not_present = ((entry_2 >> 15) & 1) ^ 1;
6297 seg_32bit = (entry_2 >> 22) & 1;
6298 limit_in_pages = (entry_2 >> 23) & 1;
6299 useable = (entry_2 >> 20) & 1;
6300 #ifdef TARGET_ABI32
6301 lm = 0;
6302 #else
6303 lm = (entry_2 >> 21) & 1;
6304 #endif
6305 flags = (seg_32bit << 0) | (contents << 1) |
6306 (read_exec_only << 3) | (limit_in_pages << 4) |
6307 (seg_not_present << 5) | (useable << 6) | (lm << 7);
6308 limit = (entry_1 & 0xffff) | (entry_2 & 0xf0000);
6309 base_addr = (entry_1 >> 16) |
6310 (entry_2 & 0xff000000) |
6311 ((entry_2 & 0xff) << 16);
6312 target_ldt_info->base_addr = tswapal(base_addr);
6313 target_ldt_info->limit = tswap32(limit);
6314 target_ldt_info->flags = tswap32(flags);
6315 unlock_user_struct(target_ldt_info, ptr, 1);
6316 return 0;
6318 #endif /* TARGET_I386 && TARGET_ABI32 */
6320 #ifndef TARGET_ABI32
6321 abi_long do_arch_prctl(CPUX86State *env, int code, abi_ulong addr)
6323 abi_long ret = 0;
6324 abi_ulong val;
6325 int idx;
6327 switch(code) {
6328 case TARGET_ARCH_SET_GS:
6329 case TARGET_ARCH_SET_FS:
6330 if (code == TARGET_ARCH_SET_GS)
6331 idx = R_GS;
6332 else
6333 idx = R_FS;
6334 cpu_x86_load_seg(env, idx, 0);
6335 env->segs[idx].base = addr;
6336 break;
6337 case TARGET_ARCH_GET_GS:
6338 case TARGET_ARCH_GET_FS:
6339 if (code == TARGET_ARCH_GET_GS)
6340 idx = R_GS;
6341 else
6342 idx = R_FS;
6343 val = env->segs[idx].base;
6344 if (put_user(val, addr, abi_ulong))
6345 ret = -TARGET_EFAULT;
6346 break;
6347 default:
6348 ret = -TARGET_EINVAL;
6349 break;
6351 return ret;
6353 #endif
6355 #endif /* defined(TARGET_I386) */
6357 #define NEW_STACK_SIZE 0x40000
6360 static pthread_mutex_t clone_lock = PTHREAD_MUTEX_INITIALIZER;
6361 typedef struct {
6362 CPUArchState *env;
6363 pthread_mutex_t mutex;
6364 pthread_cond_t cond;
6365 pthread_t thread;
6366 uint32_t tid;
6367 abi_ulong child_tidptr;
6368 abi_ulong parent_tidptr;
6369 sigset_t sigmask;
6370 } new_thread_info;
6372 static void *clone_func(void *arg)
6374 new_thread_info *info = arg;
6375 CPUArchState *env;
6376 CPUState *cpu;
6377 TaskState *ts;
6379 rcu_register_thread();
6380 tcg_register_thread();
6381 env = info->env;
6382 cpu = ENV_GET_CPU(env);
6383 thread_cpu = cpu;
6384 ts = (TaskState *)cpu->opaque;
6385 info->tid = gettid();
6386 task_settid(ts);
6387 if (info->child_tidptr)
6388 put_user_u32(info->tid, info->child_tidptr);
6389 if (info->parent_tidptr)
6390 put_user_u32(info->tid, info->parent_tidptr);
6391 /* Enable signals. */
6392 sigprocmask(SIG_SETMASK, &info->sigmask, NULL);
6393 /* Signal to the parent that we're ready. */
6394 pthread_mutex_lock(&info->mutex);
6395 pthread_cond_broadcast(&info->cond);
6396 pthread_mutex_unlock(&info->mutex);
6397 /* Wait until the parent has finished initializing the tls state. */
6398 pthread_mutex_lock(&clone_lock);
6399 pthread_mutex_unlock(&clone_lock);
6400 cpu_loop(env);
6401 /* never exits */
6402 return NULL;
6405 /* do_fork() Must return host values and target errnos (unlike most
6406 do_*() functions). */
6407 static int do_fork(CPUArchState *env, unsigned int flags, abi_ulong newsp,
6408 abi_ulong parent_tidptr, target_ulong newtls,
6409 abi_ulong child_tidptr)
6411 CPUState *cpu = ENV_GET_CPU(env);
6412 int ret;
6413 TaskState *ts;
6414 CPUState *new_cpu;
6415 CPUArchState *new_env;
6416 sigset_t sigmask;
6418 flags &= ~CLONE_IGNORED_FLAGS;
6420 /* Emulate vfork() with fork() */
6421 if (flags & CLONE_VFORK)
6422 flags &= ~(CLONE_VFORK | CLONE_VM);
6424 if (flags & CLONE_VM) {
6425 TaskState *parent_ts = (TaskState *)cpu->opaque;
6426 new_thread_info info;
6427 pthread_attr_t attr;
6429 if (((flags & CLONE_THREAD_FLAGS) != CLONE_THREAD_FLAGS) ||
6430 (flags & CLONE_INVALID_THREAD_FLAGS)) {
6431 return -TARGET_EINVAL;
6434 ts = g_new0(TaskState, 1);
6435 init_task_state(ts);
6437 /* Grab a mutex so that thread setup appears atomic. */
6438 pthread_mutex_lock(&clone_lock);
6440 /* we create a new CPU instance. */
6441 new_env = cpu_copy(env);
6442 /* Init regs that differ from the parent. */
6443 cpu_clone_regs(new_env, newsp);
6444 new_cpu = ENV_GET_CPU(new_env);
6445 new_cpu->opaque = ts;
6446 ts->bprm = parent_ts->bprm;
6447 ts->info = parent_ts->info;
6448 ts->signal_mask = parent_ts->signal_mask;
6450 if (flags & CLONE_CHILD_CLEARTID) {
6451 ts->child_tidptr = child_tidptr;
6454 if (flags & CLONE_SETTLS) {
6455 cpu_set_tls (new_env, newtls);
6458 memset(&info, 0, sizeof(info));
6459 pthread_mutex_init(&info.mutex, NULL);
6460 pthread_mutex_lock(&info.mutex);
6461 pthread_cond_init(&info.cond, NULL);
6462 info.env = new_env;
6463 if (flags & CLONE_CHILD_SETTID) {
6464 info.child_tidptr = child_tidptr;
6466 if (flags & CLONE_PARENT_SETTID) {
6467 info.parent_tidptr = parent_tidptr;
6470 ret = pthread_attr_init(&attr);
6471 ret = pthread_attr_setstacksize(&attr, NEW_STACK_SIZE);
6472 ret = pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED);
6473 /* It is not safe to deliver signals until the child has finished
6474 initializing, so temporarily block all signals. */
6475 sigfillset(&sigmask);
6476 sigprocmask(SIG_BLOCK, &sigmask, &info.sigmask);
6478 /* If this is our first additional thread, we need to ensure we
6479 * generate code for parallel execution and flush old translations.
6481 if (!parallel_cpus) {
6482 parallel_cpus = true;
6483 tb_flush(cpu);
6486 ret = pthread_create(&info.thread, &attr, clone_func, &info);
6487 /* TODO: Free new CPU state if thread creation failed. */
6489 sigprocmask(SIG_SETMASK, &info.sigmask, NULL);
6490 pthread_attr_destroy(&attr);
6491 if (ret == 0) {
6492 /* Wait for the child to initialize. */
6493 pthread_cond_wait(&info.cond, &info.mutex);
6494 ret = info.tid;
6495 } else {
6496 ret = -1;
6498 pthread_mutex_unlock(&info.mutex);
6499 pthread_cond_destroy(&info.cond);
6500 pthread_mutex_destroy(&info.mutex);
6501 pthread_mutex_unlock(&clone_lock);
6502 } else {
6503 /* if no CLONE_VM, we consider it is a fork */
6504 if (flags & CLONE_INVALID_FORK_FLAGS) {
6505 return -TARGET_EINVAL;
6508 /* We can't support custom termination signals */
6509 if ((flags & CSIGNAL) != TARGET_SIGCHLD) {
6510 return -TARGET_EINVAL;
6513 if (block_signals()) {
6514 return -TARGET_ERESTARTSYS;
6517 fork_start();
6518 ret = fork();
6519 if (ret == 0) {
6520 /* Child Process. */
6521 cpu_clone_regs(env, newsp);
6522 fork_end(1);
6523 /* There is a race condition here. The parent process could
6524 theoretically read the TID in the child process before the child
6525 tid is set. This would require using either ptrace
6526 (not implemented) or having *_tidptr to point at a shared memory
6527 mapping. We can't repeat the spinlock hack used above because
6528 the child process gets its own copy of the lock. */
6529 if (flags & CLONE_CHILD_SETTID)
6530 put_user_u32(gettid(), child_tidptr);
6531 if (flags & CLONE_PARENT_SETTID)
6532 put_user_u32(gettid(), parent_tidptr);
6533 ts = (TaskState *)cpu->opaque;
6534 if (flags & CLONE_SETTLS)
6535 cpu_set_tls (env, newtls);
6536 if (flags & CLONE_CHILD_CLEARTID)
6537 ts->child_tidptr = child_tidptr;
6538 } else {
6539 fork_end(0);
6542 return ret;
6545 /* warning : doesn't handle linux specific flags... */
6546 static int target_to_host_fcntl_cmd(int cmd)
6548 int ret;
6550 switch(cmd) {
6551 case TARGET_F_DUPFD:
6552 case TARGET_F_GETFD:
6553 case TARGET_F_SETFD:
6554 case TARGET_F_GETFL:
6555 case TARGET_F_SETFL:
6556 ret = cmd;
6557 break;
6558 case TARGET_F_GETLK:
6559 ret = F_GETLK64;
6560 break;
6561 case TARGET_F_SETLK:
6562 ret = F_SETLK64;
6563 break;
6564 case TARGET_F_SETLKW:
6565 ret = F_SETLKW64;
6566 break;
6567 case TARGET_F_GETOWN:
6568 ret = F_GETOWN;
6569 break;
6570 case TARGET_F_SETOWN:
6571 ret = F_SETOWN;
6572 break;
6573 case TARGET_F_GETSIG:
6574 ret = F_GETSIG;
6575 break;
6576 case TARGET_F_SETSIG:
6577 ret = F_SETSIG;
6578 break;
6579 #if TARGET_ABI_BITS == 32
6580 case TARGET_F_GETLK64:
6581 ret = F_GETLK64;
6582 break;
6583 case TARGET_F_SETLK64:
6584 ret = F_SETLK64;
6585 break;
6586 case TARGET_F_SETLKW64:
6587 ret = F_SETLKW64;
6588 break;
6589 #endif
6590 case TARGET_F_SETLEASE:
6591 ret = F_SETLEASE;
6592 break;
6593 case TARGET_F_GETLEASE:
6594 ret = F_GETLEASE;
6595 break;
6596 #ifdef F_DUPFD_CLOEXEC
6597 case TARGET_F_DUPFD_CLOEXEC:
6598 ret = F_DUPFD_CLOEXEC;
6599 break;
6600 #endif
6601 case TARGET_F_NOTIFY:
6602 ret = F_NOTIFY;
6603 break;
6604 #ifdef F_GETOWN_EX
6605 case TARGET_F_GETOWN_EX:
6606 ret = F_GETOWN_EX;
6607 break;
6608 #endif
6609 #ifdef F_SETOWN_EX
6610 case TARGET_F_SETOWN_EX:
6611 ret = F_SETOWN_EX;
6612 break;
6613 #endif
6614 #ifdef F_SETPIPE_SZ
6615 case TARGET_F_SETPIPE_SZ:
6616 ret = F_SETPIPE_SZ;
6617 break;
6618 case TARGET_F_GETPIPE_SZ:
6619 ret = F_GETPIPE_SZ;
6620 break;
6621 #endif
6622 default:
6623 ret = -TARGET_EINVAL;
6624 break;
6627 #if defined(__powerpc64__)
6628 /* On PPC64, glibc headers has the F_*LK* defined to 12, 13 and 14 and
6629 * is not supported by kernel. The glibc fcntl call actually adjusts
6630 * them to 5, 6 and 7 before making the syscall(). Since we make the
6631 * syscall directly, adjust to what is supported by the kernel.
6633 if (ret >= F_GETLK64 && ret <= F_SETLKW64) {
6634 ret -= F_GETLK64 - 5;
6636 #endif
6638 return ret;
6641 #define FLOCK_TRANSTBL \
6642 switch (type) { \
6643 TRANSTBL_CONVERT(F_RDLCK); \
6644 TRANSTBL_CONVERT(F_WRLCK); \
6645 TRANSTBL_CONVERT(F_UNLCK); \
6646 TRANSTBL_CONVERT(F_EXLCK); \
6647 TRANSTBL_CONVERT(F_SHLCK); \
6650 static int target_to_host_flock(int type)
6652 #define TRANSTBL_CONVERT(a) case TARGET_##a: return a
6653 FLOCK_TRANSTBL
6654 #undef TRANSTBL_CONVERT
6655 return -TARGET_EINVAL;
6658 static int host_to_target_flock(int type)
6660 #define TRANSTBL_CONVERT(a) case a: return TARGET_##a
6661 FLOCK_TRANSTBL
6662 #undef TRANSTBL_CONVERT
6663 /* if we don't know how to convert the value coming
6664 * from the host we copy to the target field as-is
6666 return type;
6669 static inline abi_long copy_from_user_flock(struct flock64 *fl,
6670 abi_ulong target_flock_addr)
6672 struct target_flock *target_fl;
6673 int l_type;
6675 if (!lock_user_struct(VERIFY_READ, target_fl, target_flock_addr, 1)) {
6676 return -TARGET_EFAULT;
6679 __get_user(l_type, &target_fl->l_type);
6680 l_type = target_to_host_flock(l_type);
6681 if (l_type < 0) {
6682 return l_type;
6684 fl->l_type = l_type;
6685 __get_user(fl->l_whence, &target_fl->l_whence);
6686 __get_user(fl->l_start, &target_fl->l_start);
6687 __get_user(fl->l_len, &target_fl->l_len);
6688 __get_user(fl->l_pid, &target_fl->l_pid);
6689 unlock_user_struct(target_fl, target_flock_addr, 0);
6690 return 0;
6693 static inline abi_long copy_to_user_flock(abi_ulong target_flock_addr,
6694 const struct flock64 *fl)
6696 struct target_flock *target_fl;
6697 short l_type;
6699 if (!lock_user_struct(VERIFY_WRITE, target_fl, target_flock_addr, 0)) {
6700 return -TARGET_EFAULT;
6703 l_type = host_to_target_flock(fl->l_type);
6704 __put_user(l_type, &target_fl->l_type);
6705 __put_user(fl->l_whence, &target_fl->l_whence);
6706 __put_user(fl->l_start, &target_fl->l_start);
6707 __put_user(fl->l_len, &target_fl->l_len);
6708 __put_user(fl->l_pid, &target_fl->l_pid);
6709 unlock_user_struct(target_fl, target_flock_addr, 1);
6710 return 0;
6713 typedef abi_long from_flock64_fn(struct flock64 *fl, abi_ulong target_addr);
6714 typedef abi_long to_flock64_fn(abi_ulong target_addr, const struct flock64 *fl);
6716 #if defined(TARGET_ARM) && TARGET_ABI_BITS == 32
6717 static inline abi_long copy_from_user_oabi_flock64(struct flock64 *fl,
6718 abi_ulong target_flock_addr)
6720 struct target_oabi_flock64 *target_fl;
6721 int l_type;
6723 if (!lock_user_struct(VERIFY_READ, target_fl, target_flock_addr, 1)) {
6724 return -TARGET_EFAULT;
6727 __get_user(l_type, &target_fl->l_type);
6728 l_type = target_to_host_flock(l_type);
6729 if (l_type < 0) {
6730 return l_type;
6732 fl->l_type = l_type;
6733 __get_user(fl->l_whence, &target_fl->l_whence);
6734 __get_user(fl->l_start, &target_fl->l_start);
6735 __get_user(fl->l_len, &target_fl->l_len);
6736 __get_user(fl->l_pid, &target_fl->l_pid);
6737 unlock_user_struct(target_fl, target_flock_addr, 0);
6738 return 0;
6741 static inline abi_long copy_to_user_oabi_flock64(abi_ulong target_flock_addr,
6742 const struct flock64 *fl)
6744 struct target_oabi_flock64 *target_fl;
6745 short l_type;
6747 if (!lock_user_struct(VERIFY_WRITE, target_fl, target_flock_addr, 0)) {
6748 return -TARGET_EFAULT;
6751 l_type = host_to_target_flock(fl->l_type);
6752 __put_user(l_type, &target_fl->l_type);
6753 __put_user(fl->l_whence, &target_fl->l_whence);
6754 __put_user(fl->l_start, &target_fl->l_start);
6755 __put_user(fl->l_len, &target_fl->l_len);
6756 __put_user(fl->l_pid, &target_fl->l_pid);
6757 unlock_user_struct(target_fl, target_flock_addr, 1);
6758 return 0;
6760 #endif
6762 static inline abi_long copy_from_user_flock64(struct flock64 *fl,
6763 abi_ulong target_flock_addr)
6765 struct target_flock64 *target_fl;
6766 int l_type;
6768 if (!lock_user_struct(VERIFY_READ, target_fl, target_flock_addr, 1)) {
6769 return -TARGET_EFAULT;
6772 __get_user(l_type, &target_fl->l_type);
6773 l_type = target_to_host_flock(l_type);
6774 if (l_type < 0) {
6775 return l_type;
6777 fl->l_type = l_type;
6778 __get_user(fl->l_whence, &target_fl->l_whence);
6779 __get_user(fl->l_start, &target_fl->l_start);
6780 __get_user(fl->l_len, &target_fl->l_len);
6781 __get_user(fl->l_pid, &target_fl->l_pid);
6782 unlock_user_struct(target_fl, target_flock_addr, 0);
6783 return 0;
6786 static inline abi_long copy_to_user_flock64(abi_ulong target_flock_addr,
6787 const struct flock64 *fl)
6789 struct target_flock64 *target_fl;
6790 short l_type;
6792 if (!lock_user_struct(VERIFY_WRITE, target_fl, target_flock_addr, 0)) {
6793 return -TARGET_EFAULT;
6796 l_type = host_to_target_flock(fl->l_type);
6797 __put_user(l_type, &target_fl->l_type);
6798 __put_user(fl->l_whence, &target_fl->l_whence);
6799 __put_user(fl->l_start, &target_fl->l_start);
6800 __put_user(fl->l_len, &target_fl->l_len);
6801 __put_user(fl->l_pid, &target_fl->l_pid);
6802 unlock_user_struct(target_fl, target_flock_addr, 1);
6803 return 0;
6806 static abi_long do_fcntl(int fd, int cmd, abi_ulong arg)
6808 struct flock64 fl64;
6809 #ifdef F_GETOWN_EX
6810 struct f_owner_ex fox;
6811 struct target_f_owner_ex *target_fox;
6812 #endif
6813 abi_long ret;
6814 int host_cmd = target_to_host_fcntl_cmd(cmd);
6816 if (host_cmd == -TARGET_EINVAL)
6817 return host_cmd;
6819 switch(cmd) {
6820 case TARGET_F_GETLK:
6821 ret = copy_from_user_flock(&fl64, arg);
6822 if (ret) {
6823 return ret;
6825 ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
6826 if (ret == 0) {
6827 ret = copy_to_user_flock(arg, &fl64);
6829 break;
6831 case TARGET_F_SETLK:
6832 case TARGET_F_SETLKW:
6833 ret = copy_from_user_flock(&fl64, arg);
6834 if (ret) {
6835 return ret;
6837 ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
6838 break;
6840 case TARGET_F_GETLK64:
6841 ret = copy_from_user_flock64(&fl64, arg);
6842 if (ret) {
6843 return ret;
6845 ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
6846 if (ret == 0) {
6847 ret = copy_to_user_flock64(arg, &fl64);
6849 break;
6850 case TARGET_F_SETLK64:
6851 case TARGET_F_SETLKW64:
6852 ret = copy_from_user_flock64(&fl64, arg);
6853 if (ret) {
6854 return ret;
6856 ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
6857 break;
6859 case TARGET_F_GETFL:
6860 ret = get_errno(safe_fcntl(fd, host_cmd, arg));
6861 if (ret >= 0) {
6862 ret = host_to_target_bitmask(ret, fcntl_flags_tbl);
6864 break;
6866 case TARGET_F_SETFL:
6867 ret = get_errno(safe_fcntl(fd, host_cmd,
6868 target_to_host_bitmask(arg,
6869 fcntl_flags_tbl)));
6870 break;
6872 #ifdef F_GETOWN_EX
6873 case TARGET_F_GETOWN_EX:
6874 ret = get_errno(safe_fcntl(fd, host_cmd, &fox));
6875 if (ret >= 0) {
6876 if (!lock_user_struct(VERIFY_WRITE, target_fox, arg, 0))
6877 return -TARGET_EFAULT;
6878 target_fox->type = tswap32(fox.type);
6879 target_fox->pid = tswap32(fox.pid);
6880 unlock_user_struct(target_fox, arg, 1);
6882 break;
6883 #endif
6885 #ifdef F_SETOWN_EX
6886 case TARGET_F_SETOWN_EX:
6887 if (!lock_user_struct(VERIFY_READ, target_fox, arg, 1))
6888 return -TARGET_EFAULT;
6889 fox.type = tswap32(target_fox->type);
6890 fox.pid = tswap32(target_fox->pid);
6891 unlock_user_struct(target_fox, arg, 0);
6892 ret = get_errno(safe_fcntl(fd, host_cmd, &fox));
6893 break;
6894 #endif
6896 case TARGET_F_SETOWN:
6897 case TARGET_F_GETOWN:
6898 case TARGET_F_SETSIG:
6899 case TARGET_F_GETSIG:
6900 case TARGET_F_SETLEASE:
6901 case TARGET_F_GETLEASE:
6902 case TARGET_F_SETPIPE_SZ:
6903 case TARGET_F_GETPIPE_SZ:
6904 ret = get_errno(safe_fcntl(fd, host_cmd, arg));
6905 break;
6907 default:
6908 ret = get_errno(safe_fcntl(fd, cmd, arg));
6909 break;
6911 return ret;
6914 #ifdef USE_UID16
6916 static inline int high2lowuid(int uid)
6918 if (uid > 65535)
6919 return 65534;
6920 else
6921 return uid;
6924 static inline int high2lowgid(int gid)
6926 if (gid > 65535)
6927 return 65534;
6928 else
6929 return gid;
6932 static inline int low2highuid(int uid)
6934 if ((int16_t)uid == -1)
6935 return -1;
6936 else
6937 return uid;
6940 static inline int low2highgid(int gid)
6942 if ((int16_t)gid == -1)
6943 return -1;
6944 else
6945 return gid;
6947 static inline int tswapid(int id)
6949 return tswap16(id);
6952 #define put_user_id(x, gaddr) put_user_u16(x, gaddr)
6954 #else /* !USE_UID16 */
6955 static inline int high2lowuid(int uid)
6957 return uid;
6959 static inline int high2lowgid(int gid)
6961 return gid;
6963 static inline int low2highuid(int uid)
6965 return uid;
6967 static inline int low2highgid(int gid)
6969 return gid;
6971 static inline int tswapid(int id)
6973 return tswap32(id);
6976 #define put_user_id(x, gaddr) put_user_u32(x, gaddr)
6978 #endif /* USE_UID16 */
6980 /* We must do direct syscalls for setting UID/GID, because we want to
6981 * implement the Linux system call semantics of "change only for this thread",
6982 * not the libc/POSIX semantics of "change for all threads in process".
6983 * (See http://ewontfix.com/17/ for more details.)
6984 * We use the 32-bit version of the syscalls if present; if it is not
6985 * then either the host architecture supports 32-bit UIDs natively with
6986 * the standard syscall, or the 16-bit UID is the best we can do.
6988 #ifdef __NR_setuid32
6989 #define __NR_sys_setuid __NR_setuid32
6990 #else
6991 #define __NR_sys_setuid __NR_setuid
6992 #endif
6993 #ifdef __NR_setgid32
6994 #define __NR_sys_setgid __NR_setgid32
6995 #else
6996 #define __NR_sys_setgid __NR_setgid
6997 #endif
6998 #ifdef __NR_setresuid32
6999 #define __NR_sys_setresuid __NR_setresuid32
7000 #else
7001 #define __NR_sys_setresuid __NR_setresuid
7002 #endif
7003 #ifdef __NR_setresgid32
7004 #define __NR_sys_setresgid __NR_setresgid32
7005 #else
7006 #define __NR_sys_setresgid __NR_setresgid
7007 #endif
7009 _syscall1(int, sys_setuid, uid_t, uid)
7010 _syscall1(int, sys_setgid, gid_t, gid)
7011 _syscall3(int, sys_setresuid, uid_t, ruid, uid_t, euid, uid_t, suid)
7012 _syscall3(int, sys_setresgid, gid_t, rgid, gid_t, egid, gid_t, sgid)
7014 void syscall_init(void)
7016 IOCTLEntry *ie;
7017 const argtype *arg_type;
7018 int size;
7019 int i;
7021 thunk_init(STRUCT_MAX);
7023 #define STRUCT(name, ...) thunk_register_struct(STRUCT_ ## name, #name, struct_ ## name ## _def);
7024 #define STRUCT_SPECIAL(name) thunk_register_struct_direct(STRUCT_ ## name, #name, &struct_ ## name ## _def);
7025 #include "syscall_types.h"
7026 #undef STRUCT
7027 #undef STRUCT_SPECIAL
7029 /* Build target_to_host_errno_table[] table from
7030 * host_to_target_errno_table[]. */
7031 for (i = 0; i < ERRNO_TABLE_SIZE; i++) {
7032 target_to_host_errno_table[host_to_target_errno_table[i]] = i;
7035 /* we patch the ioctl size if necessary. We rely on the fact that
7036 no ioctl has all the bits at '1' in the size field */
7037 ie = ioctl_entries;
7038 while (ie->target_cmd != 0) {
7039 if (((ie->target_cmd >> TARGET_IOC_SIZESHIFT) & TARGET_IOC_SIZEMASK) ==
7040 TARGET_IOC_SIZEMASK) {
7041 arg_type = ie->arg_type;
7042 if (arg_type[0] != TYPE_PTR) {
7043 fprintf(stderr, "cannot patch size for ioctl 0x%x\n",
7044 ie->target_cmd);
7045 exit(1);
7047 arg_type++;
7048 size = thunk_type_size(arg_type, 0);
7049 ie->target_cmd = (ie->target_cmd &
7050 ~(TARGET_IOC_SIZEMASK << TARGET_IOC_SIZESHIFT)) |
7051 (size << TARGET_IOC_SIZESHIFT);
7054 /* automatic consistency check if same arch */
7055 #if (defined(__i386__) && defined(TARGET_I386) && defined(TARGET_ABI32)) || \
7056 (defined(__x86_64__) && defined(TARGET_X86_64))
7057 if (unlikely(ie->target_cmd != ie->host_cmd)) {
7058 fprintf(stderr, "ERROR: ioctl(%s): target=0x%x host=0x%x\n",
7059 ie->name, ie->target_cmd, ie->host_cmd);
7061 #endif
7062 ie++;
7066 #if TARGET_ABI_BITS == 32
7067 static inline uint64_t target_offset64(uint32_t word0, uint32_t word1)
7069 #ifdef TARGET_WORDS_BIGENDIAN
7070 return ((uint64_t)word0 << 32) | word1;
7071 #else
7072 return ((uint64_t)word1 << 32) | word0;
7073 #endif
7075 #else /* TARGET_ABI_BITS == 32 */
7076 static inline uint64_t target_offset64(uint64_t word0, uint64_t word1)
7078 return word0;
7080 #endif /* TARGET_ABI_BITS != 32 */
7082 #ifdef TARGET_NR_truncate64
7083 static inline abi_long target_truncate64(void *cpu_env, const char *arg1,
7084 abi_long arg2,
7085 abi_long arg3,
7086 abi_long arg4)
7088 if (regpairs_aligned(cpu_env, TARGET_NR_truncate64)) {
7089 arg2 = arg3;
7090 arg3 = arg4;
7092 return get_errno(truncate64(arg1, target_offset64(arg2, arg3)));
7094 #endif
7096 #ifdef TARGET_NR_ftruncate64
7097 static inline abi_long target_ftruncate64(void *cpu_env, abi_long arg1,
7098 abi_long arg2,
7099 abi_long arg3,
7100 abi_long arg4)
7102 if (regpairs_aligned(cpu_env, TARGET_NR_ftruncate64)) {
7103 arg2 = arg3;
7104 arg3 = arg4;
7106 return get_errno(ftruncate64(arg1, target_offset64(arg2, arg3)));
7108 #endif
7110 static inline abi_long target_to_host_timespec(struct timespec *host_ts,
7111 abi_ulong target_addr)
7113 struct target_timespec *target_ts;
7115 if (!lock_user_struct(VERIFY_READ, target_ts, target_addr, 1))
7116 return -TARGET_EFAULT;
7117 __get_user(host_ts->tv_sec, &target_ts->tv_sec);
7118 __get_user(host_ts->tv_nsec, &target_ts->tv_nsec);
7119 unlock_user_struct(target_ts, target_addr, 0);
7120 return 0;
7123 static inline abi_long host_to_target_timespec(abi_ulong target_addr,
7124 struct timespec *host_ts)
7126 struct target_timespec *target_ts;
7128 if (!lock_user_struct(VERIFY_WRITE, target_ts, target_addr, 0))
7129 return -TARGET_EFAULT;
7130 __put_user(host_ts->tv_sec, &target_ts->tv_sec);
7131 __put_user(host_ts->tv_nsec, &target_ts->tv_nsec);
7132 unlock_user_struct(target_ts, target_addr, 1);
7133 return 0;
7136 static inline abi_long target_to_host_itimerspec(struct itimerspec *host_itspec,
7137 abi_ulong target_addr)
7139 struct target_itimerspec *target_itspec;
7141 if (!lock_user_struct(VERIFY_READ, target_itspec, target_addr, 1)) {
7142 return -TARGET_EFAULT;
7145 host_itspec->it_interval.tv_sec =
7146 tswapal(target_itspec->it_interval.tv_sec);
7147 host_itspec->it_interval.tv_nsec =
7148 tswapal(target_itspec->it_interval.tv_nsec);
7149 host_itspec->it_value.tv_sec = tswapal(target_itspec->it_value.tv_sec);
7150 host_itspec->it_value.tv_nsec = tswapal(target_itspec->it_value.tv_nsec);
7152 unlock_user_struct(target_itspec, target_addr, 1);
7153 return 0;
7156 static inline abi_long host_to_target_itimerspec(abi_ulong target_addr,
7157 struct itimerspec *host_its)
7159 struct target_itimerspec *target_itspec;
7161 if (!lock_user_struct(VERIFY_WRITE, target_itspec, target_addr, 0)) {
7162 return -TARGET_EFAULT;
7165 target_itspec->it_interval.tv_sec = tswapal(host_its->it_interval.tv_sec);
7166 target_itspec->it_interval.tv_nsec = tswapal(host_its->it_interval.tv_nsec);
7168 target_itspec->it_value.tv_sec = tswapal(host_its->it_value.tv_sec);
7169 target_itspec->it_value.tv_nsec = tswapal(host_its->it_value.tv_nsec);
7171 unlock_user_struct(target_itspec, target_addr, 0);
7172 return 0;
7175 static inline abi_long target_to_host_timex(struct timex *host_tx,
7176 abi_long target_addr)
7178 struct target_timex *target_tx;
7180 if (!lock_user_struct(VERIFY_READ, target_tx, target_addr, 1)) {
7181 return -TARGET_EFAULT;
7184 __get_user(host_tx->modes, &target_tx->modes);
7185 __get_user(host_tx->offset, &target_tx->offset);
7186 __get_user(host_tx->freq, &target_tx->freq);
7187 __get_user(host_tx->maxerror, &target_tx->maxerror);
7188 __get_user(host_tx->esterror, &target_tx->esterror);
7189 __get_user(host_tx->status, &target_tx->status);
7190 __get_user(host_tx->constant, &target_tx->constant);
7191 __get_user(host_tx->precision, &target_tx->precision);
7192 __get_user(host_tx->tolerance, &target_tx->tolerance);
7193 __get_user(host_tx->time.tv_sec, &target_tx->time.tv_sec);
7194 __get_user(host_tx->time.tv_usec, &target_tx->time.tv_usec);
7195 __get_user(host_tx->tick, &target_tx->tick);
7196 __get_user(host_tx->ppsfreq, &target_tx->ppsfreq);
7197 __get_user(host_tx->jitter, &target_tx->jitter);
7198 __get_user(host_tx->shift, &target_tx->shift);
7199 __get_user(host_tx->stabil, &target_tx->stabil);
7200 __get_user(host_tx->jitcnt, &target_tx->jitcnt);
7201 __get_user(host_tx->calcnt, &target_tx->calcnt);
7202 __get_user(host_tx->errcnt, &target_tx->errcnt);
7203 __get_user(host_tx->stbcnt, &target_tx->stbcnt);
7204 __get_user(host_tx->tai, &target_tx->tai);
7206 unlock_user_struct(target_tx, target_addr, 0);
7207 return 0;
7210 static inline abi_long host_to_target_timex(abi_long target_addr,
7211 struct timex *host_tx)
7213 struct target_timex *target_tx;
7215 if (!lock_user_struct(VERIFY_WRITE, target_tx, target_addr, 0)) {
7216 return -TARGET_EFAULT;
7219 __put_user(host_tx->modes, &target_tx->modes);
7220 __put_user(host_tx->offset, &target_tx->offset);
7221 __put_user(host_tx->freq, &target_tx->freq);
7222 __put_user(host_tx->maxerror, &target_tx->maxerror);
7223 __put_user(host_tx->esterror, &target_tx->esterror);
7224 __put_user(host_tx->status, &target_tx->status);
7225 __put_user(host_tx->constant, &target_tx->constant);
7226 __put_user(host_tx->precision, &target_tx->precision);
7227 __put_user(host_tx->tolerance, &target_tx->tolerance);
7228 __put_user(host_tx->time.tv_sec, &target_tx->time.tv_sec);
7229 __put_user(host_tx->time.tv_usec, &target_tx->time.tv_usec);
7230 __put_user(host_tx->tick, &target_tx->tick);
7231 __put_user(host_tx->ppsfreq, &target_tx->ppsfreq);
7232 __put_user(host_tx->jitter, &target_tx->jitter);
7233 __put_user(host_tx->shift, &target_tx->shift);
7234 __put_user(host_tx->stabil, &target_tx->stabil);
7235 __put_user(host_tx->jitcnt, &target_tx->jitcnt);
7236 __put_user(host_tx->calcnt, &target_tx->calcnt);
7237 __put_user(host_tx->errcnt, &target_tx->errcnt);
7238 __put_user(host_tx->stbcnt, &target_tx->stbcnt);
7239 __put_user(host_tx->tai, &target_tx->tai);
7241 unlock_user_struct(target_tx, target_addr, 1);
7242 return 0;
7246 static inline abi_long target_to_host_sigevent(struct sigevent *host_sevp,
7247 abi_ulong target_addr)
7249 struct target_sigevent *target_sevp;
7251 if (!lock_user_struct(VERIFY_READ, target_sevp, target_addr, 1)) {
7252 return -TARGET_EFAULT;
7255 /* This union is awkward on 64 bit systems because it has a 32 bit
7256 * integer and a pointer in it; we follow the conversion approach
7257 * used for handling sigval types in signal.c so the guest should get
7258 * the correct value back even if we did a 64 bit byteswap and it's
7259 * using the 32 bit integer.
7261 host_sevp->sigev_value.sival_ptr =
7262 (void *)(uintptr_t)tswapal(target_sevp->sigev_value.sival_ptr);
7263 host_sevp->sigev_signo =
7264 target_to_host_signal(tswap32(target_sevp->sigev_signo));
7265 host_sevp->sigev_notify = tswap32(target_sevp->sigev_notify);
7266 host_sevp->_sigev_un._tid = tswap32(target_sevp->_sigev_un._tid);
7268 unlock_user_struct(target_sevp, target_addr, 1);
7269 return 0;
7272 #if defined(TARGET_NR_mlockall)
7273 static inline int target_to_host_mlockall_arg(int arg)
7275 int result = 0;
7277 if (arg & TARGET_MLOCKALL_MCL_CURRENT) {
7278 result |= MCL_CURRENT;
7280 if (arg & TARGET_MLOCKALL_MCL_FUTURE) {
7281 result |= MCL_FUTURE;
7283 return result;
7285 #endif
7287 static inline abi_long host_to_target_stat64(void *cpu_env,
7288 abi_ulong target_addr,
7289 struct stat *host_st)
7291 #if defined(TARGET_ARM) && defined(TARGET_ABI32)
7292 if (((CPUARMState *)cpu_env)->eabi) {
7293 struct target_eabi_stat64 *target_st;
7295 if (!lock_user_struct(VERIFY_WRITE, target_st, target_addr, 0))
7296 return -TARGET_EFAULT;
7297 memset(target_st, 0, sizeof(struct target_eabi_stat64));
7298 __put_user(host_st->st_dev, &target_st->st_dev);
7299 __put_user(host_st->st_ino, &target_st->st_ino);
7300 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
7301 __put_user(host_st->st_ino, &target_st->__st_ino);
7302 #endif
7303 __put_user(host_st->st_mode, &target_st->st_mode);
7304 __put_user(host_st->st_nlink, &target_st->st_nlink);
7305 __put_user(host_st->st_uid, &target_st->st_uid);
7306 __put_user(host_st->st_gid, &target_st->st_gid);
7307 __put_user(host_st->st_rdev, &target_st->st_rdev);
7308 __put_user(host_st->st_size, &target_st->st_size);
7309 __put_user(host_st->st_blksize, &target_st->st_blksize);
7310 __put_user(host_st->st_blocks, &target_st->st_blocks);
7311 __put_user(host_st->st_atime, &target_st->target_st_atime);
7312 __put_user(host_st->st_mtime, &target_st->target_st_mtime);
7313 __put_user(host_st->st_ctime, &target_st->target_st_ctime);
7314 unlock_user_struct(target_st, target_addr, 1);
7315 } else
7316 #endif
7318 #if defined(TARGET_HAS_STRUCT_STAT64)
7319 struct target_stat64 *target_st;
7320 #else
7321 struct target_stat *target_st;
7322 #endif
7324 if (!lock_user_struct(VERIFY_WRITE, target_st, target_addr, 0))
7325 return -TARGET_EFAULT;
7326 memset(target_st, 0, sizeof(*target_st));
7327 __put_user(host_st->st_dev, &target_st->st_dev);
7328 __put_user(host_st->st_ino, &target_st->st_ino);
7329 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
7330 __put_user(host_st->st_ino, &target_st->__st_ino);
7331 #endif
7332 __put_user(host_st->st_mode, &target_st->st_mode);
7333 __put_user(host_st->st_nlink, &target_st->st_nlink);
7334 __put_user(host_st->st_uid, &target_st->st_uid);
7335 __put_user(host_st->st_gid, &target_st->st_gid);
7336 __put_user(host_st->st_rdev, &target_st->st_rdev);
7337 /* XXX: better use of kernel struct */
7338 __put_user(host_st->st_size, &target_st->st_size);
7339 __put_user(host_st->st_blksize, &target_st->st_blksize);
7340 __put_user(host_st->st_blocks, &target_st->st_blocks);
7341 __put_user(host_st->st_atime, &target_st->target_st_atime);
7342 __put_user(host_st->st_mtime, &target_st->target_st_mtime);
7343 __put_user(host_st->st_ctime, &target_st->target_st_ctime);
7344 unlock_user_struct(target_st, target_addr, 1);
7347 return 0;
7350 /* ??? Using host futex calls even when target atomic operations
7351 are not really atomic probably breaks things. However implementing
7352 futexes locally would make futexes shared between multiple processes
7353 tricky. However they're probably useless because guest atomic
7354 operations won't work either. */
7355 static int do_futex(target_ulong uaddr, int op, int val, target_ulong timeout,
7356 target_ulong uaddr2, int val3)
7358 struct timespec ts, *pts;
7359 int base_op;
7361 /* ??? We assume FUTEX_* constants are the same on both host
7362 and target. */
7363 #ifdef FUTEX_CMD_MASK
7364 base_op = op & FUTEX_CMD_MASK;
7365 #else
7366 base_op = op;
7367 #endif
7368 switch (base_op) {
7369 case FUTEX_WAIT:
7370 case FUTEX_WAIT_BITSET:
7371 if (timeout) {
7372 pts = &ts;
7373 target_to_host_timespec(pts, timeout);
7374 } else {
7375 pts = NULL;
7377 return get_errno(safe_futex(g2h(uaddr), op, tswap32(val),
7378 pts, NULL, val3));
7379 case FUTEX_WAKE:
7380 return get_errno(safe_futex(g2h(uaddr), op, val, NULL, NULL, 0));
7381 case FUTEX_FD:
7382 return get_errno(safe_futex(g2h(uaddr), op, val, NULL, NULL, 0));
7383 case FUTEX_REQUEUE:
7384 case FUTEX_CMP_REQUEUE:
7385 case FUTEX_WAKE_OP:
7386 /* For FUTEX_REQUEUE, FUTEX_CMP_REQUEUE, and FUTEX_WAKE_OP, the
7387 TIMEOUT parameter is interpreted as a uint32_t by the kernel.
7388 But the prototype takes a `struct timespec *'; insert casts
7389 to satisfy the compiler. We do not need to tswap TIMEOUT
7390 since it's not compared to guest memory. */
7391 pts = (struct timespec *)(uintptr_t) timeout;
7392 return get_errno(safe_futex(g2h(uaddr), op, val, pts,
7393 g2h(uaddr2),
7394 (base_op == FUTEX_CMP_REQUEUE
7395 ? tswap32(val3)
7396 : val3)));
7397 default:
7398 return -TARGET_ENOSYS;
7401 #if defined(TARGET_NR_name_to_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
7402 static abi_long do_name_to_handle_at(abi_long dirfd, abi_long pathname,
7403 abi_long handle, abi_long mount_id,
7404 abi_long flags)
7406 struct file_handle *target_fh;
7407 struct file_handle *fh;
7408 int mid = 0;
7409 abi_long ret;
7410 char *name;
7411 unsigned int size, total_size;
7413 if (get_user_s32(size, handle)) {
7414 return -TARGET_EFAULT;
7417 name = lock_user_string(pathname);
7418 if (!name) {
7419 return -TARGET_EFAULT;
7422 total_size = sizeof(struct file_handle) + size;
7423 target_fh = lock_user(VERIFY_WRITE, handle, total_size, 0);
7424 if (!target_fh) {
7425 unlock_user(name, pathname, 0);
7426 return -TARGET_EFAULT;
7429 fh = g_malloc0(total_size);
7430 fh->handle_bytes = size;
7432 ret = get_errno(name_to_handle_at(dirfd, path(name), fh, &mid, flags));
7433 unlock_user(name, pathname, 0);
7435 /* man name_to_handle_at(2):
7436 * Other than the use of the handle_bytes field, the caller should treat
7437 * the file_handle structure as an opaque data type
7440 memcpy(target_fh, fh, total_size);
7441 target_fh->handle_bytes = tswap32(fh->handle_bytes);
7442 target_fh->handle_type = tswap32(fh->handle_type);
7443 g_free(fh);
7444 unlock_user(target_fh, handle, total_size);
7446 if (put_user_s32(mid, mount_id)) {
7447 return -TARGET_EFAULT;
7450 return ret;
7453 #endif
7455 #if defined(TARGET_NR_open_by_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
7456 static abi_long do_open_by_handle_at(abi_long mount_fd, abi_long handle,
7457 abi_long flags)
7459 struct file_handle *target_fh;
7460 struct file_handle *fh;
7461 unsigned int size, total_size;
7462 abi_long ret;
7464 if (get_user_s32(size, handle)) {
7465 return -TARGET_EFAULT;
7468 total_size = sizeof(struct file_handle) + size;
7469 target_fh = lock_user(VERIFY_READ, handle, total_size, 1);
7470 if (!target_fh) {
7471 return -TARGET_EFAULT;
7474 fh = g_memdup(target_fh, total_size);
7475 fh->handle_bytes = size;
7476 fh->handle_type = tswap32(target_fh->handle_type);
7478 ret = get_errno(open_by_handle_at(mount_fd, fh,
7479 target_to_host_bitmask(flags, fcntl_flags_tbl)));
7481 g_free(fh);
7483 unlock_user(target_fh, handle, total_size);
7485 return ret;
7487 #endif
7489 #if defined(TARGET_NR_signalfd) || defined(TARGET_NR_signalfd4)
7491 /* signalfd siginfo conversion */
7493 static void
7494 host_to_target_signalfd_siginfo(struct signalfd_siginfo *tinfo,
7495 const struct signalfd_siginfo *info)
7497 int sig = host_to_target_signal(info->ssi_signo);
7499 /* linux/signalfd.h defines a ssi_addr_lsb
7500 * not defined in sys/signalfd.h but used by some kernels
7503 #ifdef BUS_MCEERR_AO
7504 if (tinfo->ssi_signo == SIGBUS &&
7505 (tinfo->ssi_code == BUS_MCEERR_AR ||
7506 tinfo->ssi_code == BUS_MCEERR_AO)) {
7507 uint16_t *ssi_addr_lsb = (uint16_t *)(&info->ssi_addr + 1);
7508 uint16_t *tssi_addr_lsb = (uint16_t *)(&tinfo->ssi_addr + 1);
7509 *tssi_addr_lsb = tswap16(*ssi_addr_lsb);
7511 #endif
7513 tinfo->ssi_signo = tswap32(sig);
7514 tinfo->ssi_errno = tswap32(tinfo->ssi_errno);
7515 tinfo->ssi_code = tswap32(info->ssi_code);
7516 tinfo->ssi_pid = tswap32(info->ssi_pid);
7517 tinfo->ssi_uid = tswap32(info->ssi_uid);
7518 tinfo->ssi_fd = tswap32(info->ssi_fd);
7519 tinfo->ssi_tid = tswap32(info->ssi_tid);
7520 tinfo->ssi_band = tswap32(info->ssi_band);
7521 tinfo->ssi_overrun = tswap32(info->ssi_overrun);
7522 tinfo->ssi_trapno = tswap32(info->ssi_trapno);
7523 tinfo->ssi_status = tswap32(info->ssi_status);
7524 tinfo->ssi_int = tswap32(info->ssi_int);
7525 tinfo->ssi_ptr = tswap64(info->ssi_ptr);
7526 tinfo->ssi_utime = tswap64(info->ssi_utime);
7527 tinfo->ssi_stime = tswap64(info->ssi_stime);
7528 tinfo->ssi_addr = tswap64(info->ssi_addr);
7531 static abi_long host_to_target_data_signalfd(void *buf, size_t len)
7533 int i;
7535 for (i = 0; i < len; i += sizeof(struct signalfd_siginfo)) {
7536 host_to_target_signalfd_siginfo(buf + i, buf + i);
7539 return len;
7542 static TargetFdTrans target_signalfd_trans = {
7543 .host_to_target_data = host_to_target_data_signalfd,
7546 static abi_long do_signalfd4(int fd, abi_long mask, int flags)
7548 int host_flags;
7549 target_sigset_t *target_mask;
7550 sigset_t host_mask;
7551 abi_long ret;
7553 if (flags & ~(TARGET_O_NONBLOCK | TARGET_O_CLOEXEC)) {
7554 return -TARGET_EINVAL;
7556 if (!lock_user_struct(VERIFY_READ, target_mask, mask, 1)) {
7557 return -TARGET_EFAULT;
7560 target_to_host_sigset(&host_mask, target_mask);
7562 host_flags = target_to_host_bitmask(flags, fcntl_flags_tbl);
7564 ret = get_errno(signalfd(fd, &host_mask, host_flags));
7565 if (ret >= 0) {
7566 fd_trans_register(ret, &target_signalfd_trans);
7569 unlock_user_struct(target_mask, mask, 0);
7571 return ret;
7573 #endif
7575 /* Map host to target signal numbers for the wait family of syscalls.
7576 Assume all other status bits are the same. */
7577 int host_to_target_waitstatus(int status)
7579 if (WIFSIGNALED(status)) {
7580 return host_to_target_signal(WTERMSIG(status)) | (status & ~0x7f);
7582 if (WIFSTOPPED(status)) {
7583 return (host_to_target_signal(WSTOPSIG(status)) << 8)
7584 | (status & 0xff);
7586 return status;
7589 static int open_self_cmdline(void *cpu_env, int fd)
7591 CPUState *cpu = ENV_GET_CPU((CPUArchState *)cpu_env);
7592 struct linux_binprm *bprm = ((TaskState *)cpu->opaque)->bprm;
7593 int i;
7595 for (i = 0; i < bprm->argc; i++) {
7596 size_t len = strlen(bprm->argv[i]) + 1;
7598 if (write(fd, bprm->argv[i], len) != len) {
7599 return -1;
7603 return 0;
7606 static int open_self_maps(void *cpu_env, int fd)
7608 CPUState *cpu = ENV_GET_CPU((CPUArchState *)cpu_env);
7609 TaskState *ts = cpu->opaque;
7610 FILE *fp;
7611 char *line = NULL;
7612 size_t len = 0;
7613 ssize_t read;
7615 fp = fopen("/proc/self/maps", "r");
7616 if (fp == NULL) {
7617 return -1;
7620 while ((read = getline(&line, &len, fp)) != -1) {
7621 int fields, dev_maj, dev_min, inode;
7622 uint64_t min, max, offset;
7623 char flag_r, flag_w, flag_x, flag_p;
7624 char path[512] = "";
7625 fields = sscanf(line, "%"PRIx64"-%"PRIx64" %c%c%c%c %"PRIx64" %x:%x %d"
7626 " %512s", &min, &max, &flag_r, &flag_w, &flag_x,
7627 &flag_p, &offset, &dev_maj, &dev_min, &inode, path);
7629 if ((fields < 10) || (fields > 11)) {
7630 continue;
7632 if (h2g_valid(min)) {
7633 int flags = page_get_flags(h2g(min));
7634 max = h2g_valid(max - 1) ? max : (uintptr_t)g2h(GUEST_ADDR_MAX) + 1;
7635 if (page_check_range(h2g(min), max - min, flags) == -1) {
7636 continue;
7638 if (h2g(min) == ts->info->stack_limit) {
7639 pstrcpy(path, sizeof(path), " [stack]");
7641 dprintf(fd, TARGET_ABI_FMT_lx "-" TARGET_ABI_FMT_lx
7642 " %c%c%c%c %08" PRIx64 " %02x:%02x %d %s%s\n",
7643 h2g(min), h2g(max - 1) + 1, flag_r, flag_w,
7644 flag_x, flag_p, offset, dev_maj, dev_min, inode,
7645 path[0] ? " " : "", path);
7649 free(line);
7650 fclose(fp);
7652 return 0;
7655 static int open_self_stat(void *cpu_env, int fd)
7657 CPUState *cpu = ENV_GET_CPU((CPUArchState *)cpu_env);
7658 TaskState *ts = cpu->opaque;
7659 abi_ulong start_stack = ts->info->start_stack;
7660 int i;
7662 for (i = 0; i < 44; i++) {
7663 char buf[128];
7664 int len;
7665 uint64_t val = 0;
7667 if (i == 0) {
7668 /* pid */
7669 val = getpid();
7670 snprintf(buf, sizeof(buf), "%"PRId64 " ", val);
7671 } else if (i == 1) {
7672 /* app name */
7673 snprintf(buf, sizeof(buf), "(%s) ", ts->bprm->argv[0]);
7674 } else if (i == 27) {
7675 /* stack bottom */
7676 val = start_stack;
7677 snprintf(buf, sizeof(buf), "%"PRId64 " ", val);
7678 } else {
7679 /* for the rest, there is MasterCard */
7680 snprintf(buf, sizeof(buf), "0%c", i == 43 ? '\n' : ' ');
7683 len = strlen(buf);
7684 if (write(fd, buf, len) != len) {
7685 return -1;
7689 return 0;
7692 static int open_self_auxv(void *cpu_env, int fd)
7694 CPUState *cpu = ENV_GET_CPU((CPUArchState *)cpu_env);
7695 TaskState *ts = cpu->opaque;
7696 abi_ulong auxv = ts->info->saved_auxv;
7697 abi_ulong len = ts->info->auxv_len;
7698 char *ptr;
7701 * Auxiliary vector is stored in target process stack.
7702 * read in whole auxv vector and copy it to file
7704 ptr = lock_user(VERIFY_READ, auxv, len, 0);
7705 if (ptr != NULL) {
7706 while (len > 0) {
7707 ssize_t r;
7708 r = write(fd, ptr, len);
7709 if (r <= 0) {
7710 break;
7712 len -= r;
7713 ptr += r;
7715 lseek(fd, 0, SEEK_SET);
7716 unlock_user(ptr, auxv, len);
7719 return 0;
7722 static int is_proc_myself(const char *filename, const char *entry)
7724 if (!strncmp(filename, "/proc/", strlen("/proc/"))) {
7725 filename += strlen("/proc/");
7726 if (!strncmp(filename, "self/", strlen("self/"))) {
7727 filename += strlen("self/");
7728 } else if (*filename >= '1' && *filename <= '9') {
7729 char myself[80];
7730 snprintf(myself, sizeof(myself), "%d/", getpid());
7731 if (!strncmp(filename, myself, strlen(myself))) {
7732 filename += strlen(myself);
7733 } else {
7734 return 0;
7736 } else {
7737 return 0;
7739 if (!strcmp(filename, entry)) {
7740 return 1;
7743 return 0;
7746 #if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN)
7747 static int is_proc(const char *filename, const char *entry)
7749 return strcmp(filename, entry) == 0;
7752 static int open_net_route(void *cpu_env, int fd)
7754 FILE *fp;
7755 char *line = NULL;
7756 size_t len = 0;
7757 ssize_t read;
7759 fp = fopen("/proc/net/route", "r");
7760 if (fp == NULL) {
7761 return -1;
7764 /* read header */
7766 read = getline(&line, &len, fp);
7767 dprintf(fd, "%s", line);
7769 /* read routes */
7771 while ((read = getline(&line, &len, fp)) != -1) {
7772 char iface[16];
7773 uint32_t dest, gw, mask;
7774 unsigned int flags, refcnt, use, metric, mtu, window, irtt;
7775 sscanf(line, "%s\t%08x\t%08x\t%04x\t%d\t%d\t%d\t%08x\t%d\t%u\t%u\n",
7776 iface, &dest, &gw, &flags, &refcnt, &use, &metric,
7777 &mask, &mtu, &window, &irtt);
7778 dprintf(fd, "%s\t%08x\t%08x\t%04x\t%d\t%d\t%d\t%08x\t%d\t%u\t%u\n",
7779 iface, tswap32(dest), tswap32(gw), flags, refcnt, use,
7780 metric, tswap32(mask), mtu, window, irtt);
7783 free(line);
7784 fclose(fp);
7786 return 0;
7788 #endif
7790 static int do_openat(void *cpu_env, int dirfd, const char *pathname, int flags, mode_t mode)
7792 struct fake_open {
7793 const char *filename;
7794 int (*fill)(void *cpu_env, int fd);
7795 int (*cmp)(const char *s1, const char *s2);
7797 const struct fake_open *fake_open;
7798 static const struct fake_open fakes[] = {
7799 { "maps", open_self_maps, is_proc_myself },
7800 { "stat", open_self_stat, is_proc_myself },
7801 { "auxv", open_self_auxv, is_proc_myself },
7802 { "cmdline", open_self_cmdline, is_proc_myself },
7803 #if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN)
7804 { "/proc/net/route", open_net_route, is_proc },
7805 #endif
7806 { NULL, NULL, NULL }
7809 if (is_proc_myself(pathname, "exe")) {
7810 int execfd = qemu_getauxval(AT_EXECFD);
7811 return execfd ? execfd : safe_openat(dirfd, exec_path, flags, mode);
7814 for (fake_open = fakes; fake_open->filename; fake_open++) {
7815 if (fake_open->cmp(pathname, fake_open->filename)) {
7816 break;
7820 if (fake_open->filename) {
7821 const char *tmpdir;
7822 char filename[PATH_MAX];
7823 int fd, r;
7825 /* create temporary file to map stat to */
7826 tmpdir = getenv("TMPDIR");
7827 if (!tmpdir)
7828 tmpdir = "/tmp";
7829 snprintf(filename, sizeof(filename), "%s/qemu-open.XXXXXX", tmpdir);
7830 fd = mkstemp(filename);
7831 if (fd < 0) {
7832 return fd;
7834 unlink(filename);
7836 if ((r = fake_open->fill(cpu_env, fd))) {
7837 int e = errno;
7838 close(fd);
7839 errno = e;
7840 return r;
7842 lseek(fd, 0, SEEK_SET);
7844 return fd;
7847 return safe_openat(dirfd, path(pathname), flags, mode);
7850 #define TIMER_MAGIC 0x0caf0000
7851 #define TIMER_MAGIC_MASK 0xffff0000
7853 /* Convert QEMU provided timer ID back to internal 16bit index format */
7854 static target_timer_t get_timer_id(abi_long arg)
7856 target_timer_t timerid = arg;
7858 if ((timerid & TIMER_MAGIC_MASK) != TIMER_MAGIC) {
7859 return -TARGET_EINVAL;
7862 timerid &= 0xffff;
7864 if (timerid >= ARRAY_SIZE(g_posix_timers)) {
7865 return -TARGET_EINVAL;
7868 return timerid;
7871 static abi_long swap_data_eventfd(void *buf, size_t len)
7873 uint64_t *counter = buf;
7874 int i;
7876 if (len < sizeof(uint64_t)) {
7877 return -EINVAL;
7880 for (i = 0; i < len; i += sizeof(uint64_t)) {
7881 *counter = tswap64(*counter);
7882 counter++;
7885 return len;
7888 static TargetFdTrans target_eventfd_trans = {
7889 .host_to_target_data = swap_data_eventfd,
7890 .target_to_host_data = swap_data_eventfd,
7893 #if (defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init)) || \
7894 (defined(CONFIG_INOTIFY1) && defined(TARGET_NR_inotify_init1) && \
7895 defined(__NR_inotify_init1))
7896 static abi_long host_to_target_data_inotify(void *buf, size_t len)
7898 struct inotify_event *ev;
7899 int i;
7900 uint32_t name_len;
7902 for (i = 0; i < len; i += sizeof(struct inotify_event) + name_len) {
7903 ev = (struct inotify_event *)((char *)buf + i);
7904 name_len = ev->len;
7906 ev->wd = tswap32(ev->wd);
7907 ev->mask = tswap32(ev->mask);
7908 ev->cookie = tswap32(ev->cookie);
7909 ev->len = tswap32(name_len);
7912 return len;
7915 static TargetFdTrans target_inotify_trans = {
7916 .host_to_target_data = host_to_target_data_inotify,
7918 #endif
7920 static int target_to_host_cpu_mask(unsigned long *host_mask,
7921 size_t host_size,
7922 abi_ulong target_addr,
7923 size_t target_size)
7925 unsigned target_bits = sizeof(abi_ulong) * 8;
7926 unsigned host_bits = sizeof(*host_mask) * 8;
7927 abi_ulong *target_mask;
7928 unsigned i, j;
7930 assert(host_size >= target_size);
7932 target_mask = lock_user(VERIFY_READ, target_addr, target_size, 1);
7933 if (!target_mask) {
7934 return -TARGET_EFAULT;
7936 memset(host_mask, 0, host_size);
7938 for (i = 0 ; i < target_size / sizeof(abi_ulong); i++) {
7939 unsigned bit = i * target_bits;
7940 abi_ulong val;
7942 __get_user(val, &target_mask[i]);
7943 for (j = 0; j < target_bits; j++, bit++) {
7944 if (val & (1UL << j)) {
7945 host_mask[bit / host_bits] |= 1UL << (bit % host_bits);
7950 unlock_user(target_mask, target_addr, 0);
7951 return 0;
7954 static int host_to_target_cpu_mask(const unsigned long *host_mask,
7955 size_t host_size,
7956 abi_ulong target_addr,
7957 size_t target_size)
7959 unsigned target_bits = sizeof(abi_ulong) * 8;
7960 unsigned host_bits = sizeof(*host_mask) * 8;
7961 abi_ulong *target_mask;
7962 unsigned i, j;
7964 assert(host_size >= target_size);
7966 target_mask = lock_user(VERIFY_WRITE, target_addr, target_size, 0);
7967 if (!target_mask) {
7968 return -TARGET_EFAULT;
7971 for (i = 0 ; i < target_size / sizeof(abi_ulong); i++) {
7972 unsigned bit = i * target_bits;
7973 abi_ulong val = 0;
7975 for (j = 0; j < target_bits; j++, bit++) {
7976 if (host_mask[bit / host_bits] & (1UL << (bit % host_bits))) {
7977 val |= 1UL << j;
7980 __put_user(val, &target_mask[i]);
7983 unlock_user(target_mask, target_addr, target_size);
7984 return 0;
7987 /* do_syscall() should always have a single exit point at the end so
7988 that actions, such as logging of syscall results, can be performed.
7989 All errnos that do_syscall() returns must be -TARGET_<errcode>. */
7990 abi_long do_syscall(void *cpu_env, int num, abi_long arg1,
7991 abi_long arg2, abi_long arg3, abi_long arg4,
7992 abi_long arg5, abi_long arg6, abi_long arg7,
7993 abi_long arg8)
7995 CPUState *cpu = ENV_GET_CPU(cpu_env);
7996 abi_long ret;
7997 struct stat st;
7998 struct statfs stfs;
7999 void *p;
8001 #if defined(DEBUG_ERESTARTSYS)
8002 /* Debug-only code for exercising the syscall-restart code paths
8003 * in the per-architecture cpu main loops: restart every syscall
8004 * the guest makes once before letting it through.
8007 static int flag;
8009 flag = !flag;
8010 if (flag) {
8011 return -TARGET_ERESTARTSYS;
8014 #endif
8016 #ifdef DEBUG
8017 gemu_log("syscall %d", num);
8018 #endif
8019 trace_guest_user_syscall(cpu, num, arg1, arg2, arg3, arg4, arg5, arg6, arg7, arg8);
8020 if(do_strace)
8021 print_syscall(num, arg1, arg2, arg3, arg4, arg5, arg6);
8023 switch(num) {
8024 case TARGET_NR_exit:
8025 /* In old applications this may be used to implement _exit(2).
8026 However in threaded applictions it is used for thread termination,
8027 and _exit_group is used for application termination.
8028 Do thread termination if we have more then one thread. */
8030 if (block_signals()) {
8031 ret = -TARGET_ERESTARTSYS;
8032 break;
8035 cpu_list_lock();
8037 if (CPU_NEXT(first_cpu)) {
8038 TaskState *ts;
8040 /* Remove the CPU from the list. */
8041 QTAILQ_REMOVE(&cpus, cpu, node);
8043 cpu_list_unlock();
8045 ts = cpu->opaque;
8046 if (ts->child_tidptr) {
8047 put_user_u32(0, ts->child_tidptr);
8048 sys_futex(g2h(ts->child_tidptr), FUTEX_WAKE, INT_MAX,
8049 NULL, NULL, 0);
8051 thread_cpu = NULL;
8052 object_unref(OBJECT(cpu));
8053 g_free(ts);
8054 rcu_unregister_thread();
8055 pthread_exit(NULL);
8058 cpu_list_unlock();
8059 preexit_cleanup(cpu_env, arg1);
8060 _exit(arg1);
8061 ret = 0; /* avoid warning */
8062 break;
8063 case TARGET_NR_read:
8064 if (arg3 == 0)
8065 ret = 0;
8066 else {
8067 if (!(p = lock_user(VERIFY_WRITE, arg2, arg3, 0)))
8068 goto efault;
8069 ret = get_errno(safe_read(arg1, p, arg3));
8070 if (ret >= 0 &&
8071 fd_trans_host_to_target_data(arg1)) {
8072 ret = fd_trans_host_to_target_data(arg1)(p, ret);
8074 unlock_user(p, arg2, ret);
8076 break;
8077 case TARGET_NR_write:
8078 if (!(p = lock_user(VERIFY_READ, arg2, arg3, 1)))
8079 goto efault;
8080 if (fd_trans_target_to_host_data(arg1)) {
8081 void *copy = g_malloc(arg3);
8082 memcpy(copy, p, arg3);
8083 ret = fd_trans_target_to_host_data(arg1)(copy, arg3);
8084 if (ret >= 0) {
8085 ret = get_errno(safe_write(arg1, copy, ret));
8087 g_free(copy);
8088 } else {
8089 ret = get_errno(safe_write(arg1, p, arg3));
8091 unlock_user(p, arg2, 0);
8092 break;
8093 #ifdef TARGET_NR_open
8094 case TARGET_NR_open:
8095 if (!(p = lock_user_string(arg1)))
8096 goto efault;
8097 ret = get_errno(do_openat(cpu_env, AT_FDCWD, p,
8098 target_to_host_bitmask(arg2, fcntl_flags_tbl),
8099 arg3));
8100 fd_trans_unregister(ret);
8101 unlock_user(p, arg1, 0);
8102 break;
8103 #endif
8104 case TARGET_NR_openat:
8105 if (!(p = lock_user_string(arg2)))
8106 goto efault;
8107 ret = get_errno(do_openat(cpu_env, arg1, p,
8108 target_to_host_bitmask(arg3, fcntl_flags_tbl),
8109 arg4));
8110 fd_trans_unregister(ret);
8111 unlock_user(p, arg2, 0);
8112 break;
8113 #if defined(TARGET_NR_name_to_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
8114 case TARGET_NR_name_to_handle_at:
8115 ret = do_name_to_handle_at(arg1, arg2, arg3, arg4, arg5);
8116 break;
8117 #endif
8118 #if defined(TARGET_NR_open_by_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
8119 case TARGET_NR_open_by_handle_at:
8120 ret = do_open_by_handle_at(arg1, arg2, arg3);
8121 fd_trans_unregister(ret);
8122 break;
8123 #endif
8124 case TARGET_NR_close:
8125 fd_trans_unregister(arg1);
8126 ret = get_errno(close(arg1));
8127 break;
8128 case TARGET_NR_brk:
8129 ret = do_brk(arg1);
8130 break;
8131 #ifdef TARGET_NR_fork
8132 case TARGET_NR_fork:
8133 ret = get_errno(do_fork(cpu_env, TARGET_SIGCHLD, 0, 0, 0, 0));
8134 break;
8135 #endif
8136 #ifdef TARGET_NR_waitpid
8137 case TARGET_NR_waitpid:
8139 int status;
8140 ret = get_errno(safe_wait4(arg1, &status, arg3, 0));
8141 if (!is_error(ret) && arg2 && ret
8142 && put_user_s32(host_to_target_waitstatus(status), arg2))
8143 goto efault;
8145 break;
8146 #endif
8147 #ifdef TARGET_NR_waitid
8148 case TARGET_NR_waitid:
8150 siginfo_t info;
8151 info.si_pid = 0;
8152 ret = get_errno(safe_waitid(arg1, arg2, &info, arg4, NULL));
8153 if (!is_error(ret) && arg3 && info.si_pid != 0) {
8154 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_siginfo_t), 0)))
8155 goto efault;
8156 host_to_target_siginfo(p, &info);
8157 unlock_user(p, arg3, sizeof(target_siginfo_t));
8160 break;
8161 #endif
8162 #ifdef TARGET_NR_creat /* not on alpha */
8163 case TARGET_NR_creat:
8164 if (!(p = lock_user_string(arg1)))
8165 goto efault;
8166 ret = get_errno(creat(p, arg2));
8167 fd_trans_unregister(ret);
8168 unlock_user(p, arg1, 0);
8169 break;
8170 #endif
8171 #ifdef TARGET_NR_link
8172 case TARGET_NR_link:
8174 void * p2;
8175 p = lock_user_string(arg1);
8176 p2 = lock_user_string(arg2);
8177 if (!p || !p2)
8178 ret = -TARGET_EFAULT;
8179 else
8180 ret = get_errno(link(p, p2));
8181 unlock_user(p2, arg2, 0);
8182 unlock_user(p, arg1, 0);
8184 break;
8185 #endif
8186 #if defined(TARGET_NR_linkat)
8187 case TARGET_NR_linkat:
8189 void * p2 = NULL;
8190 if (!arg2 || !arg4)
8191 goto efault;
8192 p = lock_user_string(arg2);
8193 p2 = lock_user_string(arg4);
8194 if (!p || !p2)
8195 ret = -TARGET_EFAULT;
8196 else
8197 ret = get_errno(linkat(arg1, p, arg3, p2, arg5));
8198 unlock_user(p, arg2, 0);
8199 unlock_user(p2, arg4, 0);
8201 break;
8202 #endif
8203 #ifdef TARGET_NR_unlink
8204 case TARGET_NR_unlink:
8205 if (!(p = lock_user_string(arg1)))
8206 goto efault;
8207 ret = get_errno(unlink(p));
8208 unlock_user(p, arg1, 0);
8209 break;
8210 #endif
8211 #if defined(TARGET_NR_unlinkat)
8212 case TARGET_NR_unlinkat:
8213 if (!(p = lock_user_string(arg2)))
8214 goto efault;
8215 ret = get_errno(unlinkat(arg1, p, arg3));
8216 unlock_user(p, arg2, 0);
8217 break;
8218 #endif
8219 case TARGET_NR_execve:
8221 char **argp, **envp;
8222 int argc, envc;
8223 abi_ulong gp;
8224 abi_ulong guest_argp;
8225 abi_ulong guest_envp;
8226 abi_ulong addr;
8227 char **q;
8228 int total_size = 0;
8230 argc = 0;
8231 guest_argp = arg2;
8232 for (gp = guest_argp; gp; gp += sizeof(abi_ulong)) {
8233 if (get_user_ual(addr, gp))
8234 goto efault;
8235 if (!addr)
8236 break;
8237 argc++;
8239 envc = 0;
8240 guest_envp = arg3;
8241 for (gp = guest_envp; gp; gp += sizeof(abi_ulong)) {
8242 if (get_user_ual(addr, gp))
8243 goto efault;
8244 if (!addr)
8245 break;
8246 envc++;
8249 argp = g_new0(char *, argc + 1);
8250 envp = g_new0(char *, envc + 1);
8252 for (gp = guest_argp, q = argp; gp;
8253 gp += sizeof(abi_ulong), q++) {
8254 if (get_user_ual(addr, gp))
8255 goto execve_efault;
8256 if (!addr)
8257 break;
8258 if (!(*q = lock_user_string(addr)))
8259 goto execve_efault;
8260 total_size += strlen(*q) + 1;
8262 *q = NULL;
8264 for (gp = guest_envp, q = envp; gp;
8265 gp += sizeof(abi_ulong), q++) {
8266 if (get_user_ual(addr, gp))
8267 goto execve_efault;
8268 if (!addr)
8269 break;
8270 if (!(*q = lock_user_string(addr)))
8271 goto execve_efault;
8272 total_size += strlen(*q) + 1;
8274 *q = NULL;
8276 if (!(p = lock_user_string(arg1)))
8277 goto execve_efault;
8278 /* Although execve() is not an interruptible syscall it is
8279 * a special case where we must use the safe_syscall wrapper:
8280 * if we allow a signal to happen before we make the host
8281 * syscall then we will 'lose' it, because at the point of
8282 * execve the process leaves QEMU's control. So we use the
8283 * safe syscall wrapper to ensure that we either take the
8284 * signal as a guest signal, or else it does not happen
8285 * before the execve completes and makes it the other
8286 * program's problem.
8288 ret = get_errno(safe_execve(p, argp, envp));
8289 unlock_user(p, arg1, 0);
8291 goto execve_end;
8293 execve_efault:
8294 ret = -TARGET_EFAULT;
8296 execve_end:
8297 for (gp = guest_argp, q = argp; *q;
8298 gp += sizeof(abi_ulong), q++) {
8299 if (get_user_ual(addr, gp)
8300 || !addr)
8301 break;
8302 unlock_user(*q, addr, 0);
8304 for (gp = guest_envp, q = envp; *q;
8305 gp += sizeof(abi_ulong), q++) {
8306 if (get_user_ual(addr, gp)
8307 || !addr)
8308 break;
8309 unlock_user(*q, addr, 0);
8312 g_free(argp);
8313 g_free(envp);
8315 break;
8316 case TARGET_NR_chdir:
8317 if (!(p = lock_user_string(arg1)))
8318 goto efault;
8319 ret = get_errno(chdir(p));
8320 unlock_user(p, arg1, 0);
8321 break;
8322 #ifdef TARGET_NR_time
8323 case TARGET_NR_time:
8325 time_t host_time;
8326 ret = get_errno(time(&host_time));
8327 if (!is_error(ret)
8328 && arg1
8329 && put_user_sal(host_time, arg1))
8330 goto efault;
8332 break;
8333 #endif
8334 #ifdef TARGET_NR_mknod
8335 case TARGET_NR_mknod:
8336 if (!(p = lock_user_string(arg1)))
8337 goto efault;
8338 ret = get_errno(mknod(p, arg2, arg3));
8339 unlock_user(p, arg1, 0);
8340 break;
8341 #endif
8342 #if defined(TARGET_NR_mknodat)
8343 case TARGET_NR_mknodat:
8344 if (!(p = lock_user_string(arg2)))
8345 goto efault;
8346 ret = get_errno(mknodat(arg1, p, arg3, arg4));
8347 unlock_user(p, arg2, 0);
8348 break;
8349 #endif
8350 #ifdef TARGET_NR_chmod
8351 case TARGET_NR_chmod:
8352 if (!(p = lock_user_string(arg1)))
8353 goto efault;
8354 ret = get_errno(chmod(p, arg2));
8355 unlock_user(p, arg1, 0);
8356 break;
8357 #endif
8358 #ifdef TARGET_NR_break
8359 case TARGET_NR_break:
8360 goto unimplemented;
8361 #endif
8362 #ifdef TARGET_NR_oldstat
8363 case TARGET_NR_oldstat:
8364 goto unimplemented;
8365 #endif
8366 case TARGET_NR_lseek:
8367 ret = get_errno(lseek(arg1, arg2, arg3));
8368 break;
8369 #if defined(TARGET_NR_getxpid) && defined(TARGET_ALPHA)
8370 /* Alpha specific */
8371 case TARGET_NR_getxpid:
8372 ((CPUAlphaState *)cpu_env)->ir[IR_A4] = getppid();
8373 ret = get_errno(getpid());
8374 break;
8375 #endif
8376 #ifdef TARGET_NR_getpid
8377 case TARGET_NR_getpid:
8378 ret = get_errno(getpid());
8379 break;
8380 #endif
8381 case TARGET_NR_mount:
8383 /* need to look at the data field */
8384 void *p2, *p3;
8386 if (arg1) {
8387 p = lock_user_string(arg1);
8388 if (!p) {
8389 goto efault;
8391 } else {
8392 p = NULL;
8395 p2 = lock_user_string(arg2);
8396 if (!p2) {
8397 if (arg1) {
8398 unlock_user(p, arg1, 0);
8400 goto efault;
8403 if (arg3) {
8404 p3 = lock_user_string(arg3);
8405 if (!p3) {
8406 if (arg1) {
8407 unlock_user(p, arg1, 0);
8409 unlock_user(p2, arg2, 0);
8410 goto efault;
8412 } else {
8413 p3 = NULL;
8416 /* FIXME - arg5 should be locked, but it isn't clear how to
8417 * do that since it's not guaranteed to be a NULL-terminated
8418 * string.
8420 if (!arg5) {
8421 ret = mount(p, p2, p3, (unsigned long)arg4, NULL);
8422 } else {
8423 ret = mount(p, p2, p3, (unsigned long)arg4, g2h(arg5));
8425 ret = get_errno(ret);
8427 if (arg1) {
8428 unlock_user(p, arg1, 0);
8430 unlock_user(p2, arg2, 0);
8431 if (arg3) {
8432 unlock_user(p3, arg3, 0);
8435 break;
8436 #ifdef TARGET_NR_umount
8437 case TARGET_NR_umount:
8438 if (!(p = lock_user_string(arg1)))
8439 goto efault;
8440 ret = get_errno(umount(p));
8441 unlock_user(p, arg1, 0);
8442 break;
8443 #endif
8444 #ifdef TARGET_NR_stime /* not on alpha */
8445 case TARGET_NR_stime:
8447 time_t host_time;
8448 if (get_user_sal(host_time, arg1))
8449 goto efault;
8450 ret = get_errno(stime(&host_time));
8452 break;
8453 #endif
8454 case TARGET_NR_ptrace:
8455 goto unimplemented;
8456 #ifdef TARGET_NR_alarm /* not on alpha */
8457 case TARGET_NR_alarm:
8458 ret = alarm(arg1);
8459 break;
8460 #endif
8461 #ifdef TARGET_NR_oldfstat
8462 case TARGET_NR_oldfstat:
8463 goto unimplemented;
8464 #endif
8465 #ifdef TARGET_NR_pause /* not on alpha */
8466 case TARGET_NR_pause:
8467 if (!block_signals()) {
8468 sigsuspend(&((TaskState *)cpu->opaque)->signal_mask);
8470 ret = -TARGET_EINTR;
8471 break;
8472 #endif
8473 #ifdef TARGET_NR_utime
8474 case TARGET_NR_utime:
8476 struct utimbuf tbuf, *host_tbuf;
8477 struct target_utimbuf *target_tbuf;
8478 if (arg2) {
8479 if (!lock_user_struct(VERIFY_READ, target_tbuf, arg2, 1))
8480 goto efault;
8481 tbuf.actime = tswapal(target_tbuf->actime);
8482 tbuf.modtime = tswapal(target_tbuf->modtime);
8483 unlock_user_struct(target_tbuf, arg2, 0);
8484 host_tbuf = &tbuf;
8485 } else {
8486 host_tbuf = NULL;
8488 if (!(p = lock_user_string(arg1)))
8489 goto efault;
8490 ret = get_errno(utime(p, host_tbuf));
8491 unlock_user(p, arg1, 0);
8493 break;
8494 #endif
8495 #ifdef TARGET_NR_utimes
8496 case TARGET_NR_utimes:
8498 struct timeval *tvp, tv[2];
8499 if (arg2) {
8500 if (copy_from_user_timeval(&tv[0], arg2)
8501 || copy_from_user_timeval(&tv[1],
8502 arg2 + sizeof(struct target_timeval)))
8503 goto efault;
8504 tvp = tv;
8505 } else {
8506 tvp = NULL;
8508 if (!(p = lock_user_string(arg1)))
8509 goto efault;
8510 ret = get_errno(utimes(p, tvp));
8511 unlock_user(p, arg1, 0);
8513 break;
8514 #endif
8515 #if defined(TARGET_NR_futimesat)
8516 case TARGET_NR_futimesat:
8518 struct timeval *tvp, tv[2];
8519 if (arg3) {
8520 if (copy_from_user_timeval(&tv[0], arg3)
8521 || copy_from_user_timeval(&tv[1],
8522 arg3 + sizeof(struct target_timeval)))
8523 goto efault;
8524 tvp = tv;
8525 } else {
8526 tvp = NULL;
8528 if (!(p = lock_user_string(arg2)))
8529 goto efault;
8530 ret = get_errno(futimesat(arg1, path(p), tvp));
8531 unlock_user(p, arg2, 0);
8533 break;
8534 #endif
8535 #ifdef TARGET_NR_stty
8536 case TARGET_NR_stty:
8537 goto unimplemented;
8538 #endif
8539 #ifdef TARGET_NR_gtty
8540 case TARGET_NR_gtty:
8541 goto unimplemented;
8542 #endif
8543 #ifdef TARGET_NR_access
8544 case TARGET_NR_access:
8545 if (!(p = lock_user_string(arg1)))
8546 goto efault;
8547 ret = get_errno(access(path(p), arg2));
8548 unlock_user(p, arg1, 0);
8549 break;
8550 #endif
8551 #if defined(TARGET_NR_faccessat) && defined(__NR_faccessat)
8552 case TARGET_NR_faccessat:
8553 if (!(p = lock_user_string(arg2)))
8554 goto efault;
8555 ret = get_errno(faccessat(arg1, p, arg3, 0));
8556 unlock_user(p, arg2, 0);
8557 break;
8558 #endif
8559 #ifdef TARGET_NR_nice /* not on alpha */
8560 case TARGET_NR_nice:
8561 ret = get_errno(nice(arg1));
8562 break;
8563 #endif
8564 #ifdef TARGET_NR_ftime
8565 case TARGET_NR_ftime:
8566 goto unimplemented;
8567 #endif
8568 case TARGET_NR_sync:
8569 sync();
8570 ret = 0;
8571 break;
8572 #if defined(TARGET_NR_syncfs) && defined(CONFIG_SYNCFS)
8573 case TARGET_NR_syncfs:
8574 ret = get_errno(syncfs(arg1));
8575 break;
8576 #endif
8577 case TARGET_NR_kill:
8578 ret = get_errno(safe_kill(arg1, target_to_host_signal(arg2)));
8579 break;
8580 #ifdef TARGET_NR_rename
8581 case TARGET_NR_rename:
8583 void *p2;
8584 p = lock_user_string(arg1);
8585 p2 = lock_user_string(arg2);
8586 if (!p || !p2)
8587 ret = -TARGET_EFAULT;
8588 else
8589 ret = get_errno(rename(p, p2));
8590 unlock_user(p2, arg2, 0);
8591 unlock_user(p, arg1, 0);
8593 break;
8594 #endif
8595 #if defined(TARGET_NR_renameat)
8596 case TARGET_NR_renameat:
8598 void *p2;
8599 p = lock_user_string(arg2);
8600 p2 = lock_user_string(arg4);
8601 if (!p || !p2)
8602 ret = -TARGET_EFAULT;
8603 else
8604 ret = get_errno(renameat(arg1, p, arg3, p2));
8605 unlock_user(p2, arg4, 0);
8606 unlock_user(p, arg2, 0);
8608 break;
8609 #endif
8610 #if defined(TARGET_NR_renameat2)
8611 case TARGET_NR_renameat2:
8613 void *p2;
8614 p = lock_user_string(arg2);
8615 p2 = lock_user_string(arg4);
8616 if (!p || !p2) {
8617 ret = -TARGET_EFAULT;
8618 } else {
8619 ret = get_errno(sys_renameat2(arg1, p, arg3, p2, arg5));
8621 unlock_user(p2, arg4, 0);
8622 unlock_user(p, arg2, 0);
8624 break;
8625 #endif
8626 #ifdef TARGET_NR_mkdir
8627 case TARGET_NR_mkdir:
8628 if (!(p = lock_user_string(arg1)))
8629 goto efault;
8630 ret = get_errno(mkdir(p, arg2));
8631 unlock_user(p, arg1, 0);
8632 break;
8633 #endif
8634 #if defined(TARGET_NR_mkdirat)
8635 case TARGET_NR_mkdirat:
8636 if (!(p = lock_user_string(arg2)))
8637 goto efault;
8638 ret = get_errno(mkdirat(arg1, p, arg3));
8639 unlock_user(p, arg2, 0);
8640 break;
8641 #endif
8642 #ifdef TARGET_NR_rmdir
8643 case TARGET_NR_rmdir:
8644 if (!(p = lock_user_string(arg1)))
8645 goto efault;
8646 ret = get_errno(rmdir(p));
8647 unlock_user(p, arg1, 0);
8648 break;
8649 #endif
8650 case TARGET_NR_dup:
8651 ret = get_errno(dup(arg1));
8652 if (ret >= 0) {
8653 fd_trans_dup(arg1, ret);
8655 break;
8656 #ifdef TARGET_NR_pipe
8657 case TARGET_NR_pipe:
8658 ret = do_pipe(cpu_env, arg1, 0, 0);
8659 break;
8660 #endif
8661 #ifdef TARGET_NR_pipe2
8662 case TARGET_NR_pipe2:
8663 ret = do_pipe(cpu_env, arg1,
8664 target_to_host_bitmask(arg2, fcntl_flags_tbl), 1);
8665 break;
8666 #endif
8667 case TARGET_NR_times:
8669 struct target_tms *tmsp;
8670 struct tms tms;
8671 ret = get_errno(times(&tms));
8672 if (arg1) {
8673 tmsp = lock_user(VERIFY_WRITE, arg1, sizeof(struct target_tms), 0);
8674 if (!tmsp)
8675 goto efault;
8676 tmsp->tms_utime = tswapal(host_to_target_clock_t(tms.tms_utime));
8677 tmsp->tms_stime = tswapal(host_to_target_clock_t(tms.tms_stime));
8678 tmsp->tms_cutime = tswapal(host_to_target_clock_t(tms.tms_cutime));
8679 tmsp->tms_cstime = tswapal(host_to_target_clock_t(tms.tms_cstime));
8681 if (!is_error(ret))
8682 ret = host_to_target_clock_t(ret);
8684 break;
8685 #ifdef TARGET_NR_prof
8686 case TARGET_NR_prof:
8687 goto unimplemented;
8688 #endif
8689 #ifdef TARGET_NR_signal
8690 case TARGET_NR_signal:
8691 goto unimplemented;
8692 #endif
8693 case TARGET_NR_acct:
8694 if (arg1 == 0) {
8695 ret = get_errno(acct(NULL));
8696 } else {
8697 if (!(p = lock_user_string(arg1)))
8698 goto efault;
8699 ret = get_errno(acct(path(p)));
8700 unlock_user(p, arg1, 0);
8702 break;
8703 #ifdef TARGET_NR_umount2
8704 case TARGET_NR_umount2:
8705 if (!(p = lock_user_string(arg1)))
8706 goto efault;
8707 ret = get_errno(umount2(p, arg2));
8708 unlock_user(p, arg1, 0);
8709 break;
8710 #endif
8711 #ifdef TARGET_NR_lock
8712 case TARGET_NR_lock:
8713 goto unimplemented;
8714 #endif
8715 case TARGET_NR_ioctl:
8716 ret = do_ioctl(arg1, arg2, arg3);
8717 break;
8718 #ifdef TARGET_NR_fcntl
8719 case TARGET_NR_fcntl:
8720 ret = do_fcntl(arg1, arg2, arg3);
8721 break;
8722 #endif
8723 #ifdef TARGET_NR_mpx
8724 case TARGET_NR_mpx:
8725 goto unimplemented;
8726 #endif
8727 case TARGET_NR_setpgid:
8728 ret = get_errno(setpgid(arg1, arg2));
8729 break;
8730 #ifdef TARGET_NR_ulimit
8731 case TARGET_NR_ulimit:
8732 goto unimplemented;
8733 #endif
8734 #ifdef TARGET_NR_oldolduname
8735 case TARGET_NR_oldolduname:
8736 goto unimplemented;
8737 #endif
8738 case TARGET_NR_umask:
8739 ret = get_errno(umask(arg1));
8740 break;
8741 case TARGET_NR_chroot:
8742 if (!(p = lock_user_string(arg1)))
8743 goto efault;
8744 ret = get_errno(chroot(p));
8745 unlock_user(p, arg1, 0);
8746 break;
8747 #ifdef TARGET_NR_ustat
8748 case TARGET_NR_ustat:
8749 goto unimplemented;
8750 #endif
8751 #ifdef TARGET_NR_dup2
8752 case TARGET_NR_dup2:
8753 ret = get_errno(dup2(arg1, arg2));
8754 if (ret >= 0) {
8755 fd_trans_dup(arg1, arg2);
8757 break;
8758 #endif
8759 #if defined(CONFIG_DUP3) && defined(TARGET_NR_dup3)
8760 case TARGET_NR_dup3:
8762 int host_flags;
8764 if ((arg3 & ~TARGET_O_CLOEXEC) != 0) {
8765 return -EINVAL;
8767 host_flags = target_to_host_bitmask(arg3, fcntl_flags_tbl);
8768 ret = get_errno(dup3(arg1, arg2, host_flags));
8769 if (ret >= 0) {
8770 fd_trans_dup(arg1, arg2);
8772 break;
8774 #endif
8775 #ifdef TARGET_NR_getppid /* not on alpha */
8776 case TARGET_NR_getppid:
8777 ret = get_errno(getppid());
8778 break;
8779 #endif
8780 #ifdef TARGET_NR_getpgrp
8781 case TARGET_NR_getpgrp:
8782 ret = get_errno(getpgrp());
8783 break;
8784 #endif
8785 case TARGET_NR_setsid:
8786 ret = get_errno(setsid());
8787 break;
8788 #ifdef TARGET_NR_sigaction
8789 case TARGET_NR_sigaction:
8791 #if defined(TARGET_ALPHA)
8792 struct target_sigaction act, oact, *pact = 0;
8793 struct target_old_sigaction *old_act;
8794 if (arg2) {
8795 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
8796 goto efault;
8797 act._sa_handler = old_act->_sa_handler;
8798 target_siginitset(&act.sa_mask, old_act->sa_mask);
8799 act.sa_flags = old_act->sa_flags;
8800 act.sa_restorer = 0;
8801 unlock_user_struct(old_act, arg2, 0);
8802 pact = &act;
8804 ret = get_errno(do_sigaction(arg1, pact, &oact));
8805 if (!is_error(ret) && arg3) {
8806 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
8807 goto efault;
8808 old_act->_sa_handler = oact._sa_handler;
8809 old_act->sa_mask = oact.sa_mask.sig[0];
8810 old_act->sa_flags = oact.sa_flags;
8811 unlock_user_struct(old_act, arg3, 1);
8813 #elif defined(TARGET_MIPS)
8814 struct target_sigaction act, oact, *pact, *old_act;
8816 if (arg2) {
8817 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
8818 goto efault;
8819 act._sa_handler = old_act->_sa_handler;
8820 target_siginitset(&act.sa_mask, old_act->sa_mask.sig[0]);
8821 act.sa_flags = old_act->sa_flags;
8822 unlock_user_struct(old_act, arg2, 0);
8823 pact = &act;
8824 } else {
8825 pact = NULL;
8828 ret = get_errno(do_sigaction(arg1, pact, &oact));
8830 if (!is_error(ret) && arg3) {
8831 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
8832 goto efault;
8833 old_act->_sa_handler = oact._sa_handler;
8834 old_act->sa_flags = oact.sa_flags;
8835 old_act->sa_mask.sig[0] = oact.sa_mask.sig[0];
8836 old_act->sa_mask.sig[1] = 0;
8837 old_act->sa_mask.sig[2] = 0;
8838 old_act->sa_mask.sig[3] = 0;
8839 unlock_user_struct(old_act, arg3, 1);
8841 #else
8842 struct target_old_sigaction *old_act;
8843 struct target_sigaction act, oact, *pact;
8844 if (arg2) {
8845 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
8846 goto efault;
8847 act._sa_handler = old_act->_sa_handler;
8848 target_siginitset(&act.sa_mask, old_act->sa_mask);
8849 act.sa_flags = old_act->sa_flags;
8850 act.sa_restorer = old_act->sa_restorer;
8851 #ifdef TARGET_ARCH_HAS_KA_RESTORER
8852 act.ka_restorer = 0;
8853 #endif
8854 unlock_user_struct(old_act, arg2, 0);
8855 pact = &act;
8856 } else {
8857 pact = NULL;
8859 ret = get_errno(do_sigaction(arg1, pact, &oact));
8860 if (!is_error(ret) && arg3) {
8861 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
8862 goto efault;
8863 old_act->_sa_handler = oact._sa_handler;
8864 old_act->sa_mask = oact.sa_mask.sig[0];
8865 old_act->sa_flags = oact.sa_flags;
8866 old_act->sa_restorer = oact.sa_restorer;
8867 unlock_user_struct(old_act, arg3, 1);
8869 #endif
8871 break;
8872 #endif
8873 case TARGET_NR_rt_sigaction:
8875 #if defined(TARGET_ALPHA)
8876 /* For Alpha and SPARC this is a 5 argument syscall, with
8877 * a 'restorer' parameter which must be copied into the
8878 * sa_restorer field of the sigaction struct.
8879 * For Alpha that 'restorer' is arg5; for SPARC it is arg4,
8880 * and arg5 is the sigsetsize.
8881 * Alpha also has a separate rt_sigaction struct that it uses
8882 * here; SPARC uses the usual sigaction struct.
8884 struct target_rt_sigaction *rt_act;
8885 struct target_sigaction act, oact, *pact = 0;
8887 if (arg4 != sizeof(target_sigset_t)) {
8888 ret = -TARGET_EINVAL;
8889 break;
8891 if (arg2) {
8892 if (!lock_user_struct(VERIFY_READ, rt_act, arg2, 1))
8893 goto efault;
8894 act._sa_handler = rt_act->_sa_handler;
8895 act.sa_mask = rt_act->sa_mask;
8896 act.sa_flags = rt_act->sa_flags;
8897 act.sa_restorer = arg5;
8898 unlock_user_struct(rt_act, arg2, 0);
8899 pact = &act;
8901 ret = get_errno(do_sigaction(arg1, pact, &oact));
8902 if (!is_error(ret) && arg3) {
8903 if (!lock_user_struct(VERIFY_WRITE, rt_act, arg3, 0))
8904 goto efault;
8905 rt_act->_sa_handler = oact._sa_handler;
8906 rt_act->sa_mask = oact.sa_mask;
8907 rt_act->sa_flags = oact.sa_flags;
8908 unlock_user_struct(rt_act, arg3, 1);
8910 #else
8911 #ifdef TARGET_SPARC
8912 target_ulong restorer = arg4;
8913 target_ulong sigsetsize = arg5;
8914 #else
8915 target_ulong sigsetsize = arg4;
8916 #endif
8917 struct target_sigaction *act;
8918 struct target_sigaction *oact;
8920 if (sigsetsize != sizeof(target_sigset_t)) {
8921 ret = -TARGET_EINVAL;
8922 break;
8924 if (arg2) {
8925 if (!lock_user_struct(VERIFY_READ, act, arg2, 1)) {
8926 goto efault;
8928 #ifdef TARGET_ARCH_HAS_KA_RESTORER
8929 act->ka_restorer = restorer;
8930 #endif
8931 } else {
8932 act = NULL;
8934 if (arg3) {
8935 if (!lock_user_struct(VERIFY_WRITE, oact, arg3, 0)) {
8936 ret = -TARGET_EFAULT;
8937 goto rt_sigaction_fail;
8939 } else
8940 oact = NULL;
8941 ret = get_errno(do_sigaction(arg1, act, oact));
8942 rt_sigaction_fail:
8943 if (act)
8944 unlock_user_struct(act, arg2, 0);
8945 if (oact)
8946 unlock_user_struct(oact, arg3, 1);
8947 #endif
8949 break;
8950 #ifdef TARGET_NR_sgetmask /* not on alpha */
8951 case TARGET_NR_sgetmask:
8953 sigset_t cur_set;
8954 abi_ulong target_set;
8955 ret = do_sigprocmask(0, NULL, &cur_set);
8956 if (!ret) {
8957 host_to_target_old_sigset(&target_set, &cur_set);
8958 ret = target_set;
8961 break;
8962 #endif
8963 #ifdef TARGET_NR_ssetmask /* not on alpha */
8964 case TARGET_NR_ssetmask:
8966 sigset_t set, oset;
8967 abi_ulong target_set = arg1;
8968 target_to_host_old_sigset(&set, &target_set);
8969 ret = do_sigprocmask(SIG_SETMASK, &set, &oset);
8970 if (!ret) {
8971 host_to_target_old_sigset(&target_set, &oset);
8972 ret = target_set;
8975 break;
8976 #endif
8977 #ifdef TARGET_NR_sigprocmask
8978 case TARGET_NR_sigprocmask:
8980 #if defined(TARGET_ALPHA)
8981 sigset_t set, oldset;
8982 abi_ulong mask;
8983 int how;
8985 switch (arg1) {
8986 case TARGET_SIG_BLOCK:
8987 how = SIG_BLOCK;
8988 break;
8989 case TARGET_SIG_UNBLOCK:
8990 how = SIG_UNBLOCK;
8991 break;
8992 case TARGET_SIG_SETMASK:
8993 how = SIG_SETMASK;
8994 break;
8995 default:
8996 ret = -TARGET_EINVAL;
8997 goto fail;
8999 mask = arg2;
9000 target_to_host_old_sigset(&set, &mask);
9002 ret = do_sigprocmask(how, &set, &oldset);
9003 if (!is_error(ret)) {
9004 host_to_target_old_sigset(&mask, &oldset);
9005 ret = mask;
9006 ((CPUAlphaState *)cpu_env)->ir[IR_V0] = 0; /* force no error */
9008 #else
9009 sigset_t set, oldset, *set_ptr;
9010 int how;
9012 if (arg2) {
9013 switch (arg1) {
9014 case TARGET_SIG_BLOCK:
9015 how = SIG_BLOCK;
9016 break;
9017 case TARGET_SIG_UNBLOCK:
9018 how = SIG_UNBLOCK;
9019 break;
9020 case TARGET_SIG_SETMASK:
9021 how = SIG_SETMASK;
9022 break;
9023 default:
9024 ret = -TARGET_EINVAL;
9025 goto fail;
9027 if (!(p = lock_user(VERIFY_READ, arg2, sizeof(target_sigset_t), 1)))
9028 goto efault;
9029 target_to_host_old_sigset(&set, p);
9030 unlock_user(p, arg2, 0);
9031 set_ptr = &set;
9032 } else {
9033 how = 0;
9034 set_ptr = NULL;
9036 ret = do_sigprocmask(how, set_ptr, &oldset);
9037 if (!is_error(ret) && arg3) {
9038 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_sigset_t), 0)))
9039 goto efault;
9040 host_to_target_old_sigset(p, &oldset);
9041 unlock_user(p, arg3, sizeof(target_sigset_t));
9043 #endif
9045 break;
9046 #endif
9047 case TARGET_NR_rt_sigprocmask:
9049 int how = arg1;
9050 sigset_t set, oldset, *set_ptr;
9052 if (arg4 != sizeof(target_sigset_t)) {
9053 ret = -TARGET_EINVAL;
9054 break;
9057 if (arg2) {
9058 switch(how) {
9059 case TARGET_SIG_BLOCK:
9060 how = SIG_BLOCK;
9061 break;
9062 case TARGET_SIG_UNBLOCK:
9063 how = SIG_UNBLOCK;
9064 break;
9065 case TARGET_SIG_SETMASK:
9066 how = SIG_SETMASK;
9067 break;
9068 default:
9069 ret = -TARGET_EINVAL;
9070 goto fail;
9072 if (!(p = lock_user(VERIFY_READ, arg2, sizeof(target_sigset_t), 1)))
9073 goto efault;
9074 target_to_host_sigset(&set, p);
9075 unlock_user(p, arg2, 0);
9076 set_ptr = &set;
9077 } else {
9078 how = 0;
9079 set_ptr = NULL;
9081 ret = do_sigprocmask(how, set_ptr, &oldset);
9082 if (!is_error(ret) && arg3) {
9083 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_sigset_t), 0)))
9084 goto efault;
9085 host_to_target_sigset(p, &oldset);
9086 unlock_user(p, arg3, sizeof(target_sigset_t));
9089 break;
9090 #ifdef TARGET_NR_sigpending
9091 case TARGET_NR_sigpending:
9093 sigset_t set;
9094 ret = get_errno(sigpending(&set));
9095 if (!is_error(ret)) {
9096 if (!(p = lock_user(VERIFY_WRITE, arg1, sizeof(target_sigset_t), 0)))
9097 goto efault;
9098 host_to_target_old_sigset(p, &set);
9099 unlock_user(p, arg1, sizeof(target_sigset_t));
9102 break;
9103 #endif
9104 case TARGET_NR_rt_sigpending:
9106 sigset_t set;
9108 /* Yes, this check is >, not != like most. We follow the kernel's
9109 * logic and it does it like this because it implements
9110 * NR_sigpending through the same code path, and in that case
9111 * the old_sigset_t is smaller in size.
9113 if (arg2 > sizeof(target_sigset_t)) {
9114 ret = -TARGET_EINVAL;
9115 break;
9118 ret = get_errno(sigpending(&set));
9119 if (!is_error(ret)) {
9120 if (!(p = lock_user(VERIFY_WRITE, arg1, sizeof(target_sigset_t), 0)))
9121 goto efault;
9122 host_to_target_sigset(p, &set);
9123 unlock_user(p, arg1, sizeof(target_sigset_t));
9126 break;
9127 #ifdef TARGET_NR_sigsuspend
9128 case TARGET_NR_sigsuspend:
9130 TaskState *ts = cpu->opaque;
9131 #if defined(TARGET_ALPHA)
9132 abi_ulong mask = arg1;
9133 target_to_host_old_sigset(&ts->sigsuspend_mask, &mask);
9134 #else
9135 if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
9136 goto efault;
9137 target_to_host_old_sigset(&ts->sigsuspend_mask, p);
9138 unlock_user(p, arg1, 0);
9139 #endif
9140 ret = get_errno(safe_rt_sigsuspend(&ts->sigsuspend_mask,
9141 SIGSET_T_SIZE));
9142 if (ret != -TARGET_ERESTARTSYS) {
9143 ts->in_sigsuspend = 1;
9146 break;
9147 #endif
9148 case TARGET_NR_rt_sigsuspend:
9150 TaskState *ts = cpu->opaque;
9152 if (arg2 != sizeof(target_sigset_t)) {
9153 ret = -TARGET_EINVAL;
9154 break;
9156 if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
9157 goto efault;
9158 target_to_host_sigset(&ts->sigsuspend_mask, p);
9159 unlock_user(p, arg1, 0);
9160 ret = get_errno(safe_rt_sigsuspend(&ts->sigsuspend_mask,
9161 SIGSET_T_SIZE));
9162 if (ret != -TARGET_ERESTARTSYS) {
9163 ts->in_sigsuspend = 1;
9166 break;
9167 case TARGET_NR_rt_sigtimedwait:
9169 sigset_t set;
9170 struct timespec uts, *puts;
9171 siginfo_t uinfo;
9173 if (arg4 != sizeof(target_sigset_t)) {
9174 ret = -TARGET_EINVAL;
9175 break;
9178 if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
9179 goto efault;
9180 target_to_host_sigset(&set, p);
9181 unlock_user(p, arg1, 0);
9182 if (arg3) {
9183 puts = &uts;
9184 target_to_host_timespec(puts, arg3);
9185 } else {
9186 puts = NULL;
9188 ret = get_errno(safe_rt_sigtimedwait(&set, &uinfo, puts,
9189 SIGSET_T_SIZE));
9190 if (!is_error(ret)) {
9191 if (arg2) {
9192 p = lock_user(VERIFY_WRITE, arg2, sizeof(target_siginfo_t),
9194 if (!p) {
9195 goto efault;
9197 host_to_target_siginfo(p, &uinfo);
9198 unlock_user(p, arg2, sizeof(target_siginfo_t));
9200 ret = host_to_target_signal(ret);
9203 break;
9204 case TARGET_NR_rt_sigqueueinfo:
9206 siginfo_t uinfo;
9208 p = lock_user(VERIFY_READ, arg3, sizeof(target_siginfo_t), 1);
9209 if (!p) {
9210 goto efault;
9212 target_to_host_siginfo(&uinfo, p);
9213 unlock_user(p, arg3, 0);
9214 ret = get_errno(sys_rt_sigqueueinfo(arg1, arg2, &uinfo));
9216 break;
9217 case TARGET_NR_rt_tgsigqueueinfo:
9219 siginfo_t uinfo;
9221 p = lock_user(VERIFY_READ, arg4, sizeof(target_siginfo_t), 1);
9222 if (!p) {
9223 goto efault;
9225 target_to_host_siginfo(&uinfo, p);
9226 unlock_user(p, arg4, 0);
9227 ret = get_errno(sys_rt_tgsigqueueinfo(arg1, arg2, arg3, &uinfo));
9229 break;
9230 #ifdef TARGET_NR_sigreturn
9231 case TARGET_NR_sigreturn:
9232 if (block_signals()) {
9233 ret = -TARGET_ERESTARTSYS;
9234 } else {
9235 ret = do_sigreturn(cpu_env);
9237 break;
9238 #endif
9239 case TARGET_NR_rt_sigreturn:
9240 if (block_signals()) {
9241 ret = -TARGET_ERESTARTSYS;
9242 } else {
9243 ret = do_rt_sigreturn(cpu_env);
9245 break;
9246 case TARGET_NR_sethostname:
9247 if (!(p = lock_user_string(arg1)))
9248 goto efault;
9249 ret = get_errno(sethostname(p, arg2));
9250 unlock_user(p, arg1, 0);
9251 break;
9252 case TARGET_NR_setrlimit:
9254 int resource = target_to_host_resource(arg1);
9255 struct target_rlimit *target_rlim;
9256 struct rlimit rlim;
9257 if (!lock_user_struct(VERIFY_READ, target_rlim, arg2, 1))
9258 goto efault;
9259 rlim.rlim_cur = target_to_host_rlim(target_rlim->rlim_cur);
9260 rlim.rlim_max = target_to_host_rlim(target_rlim->rlim_max);
9261 unlock_user_struct(target_rlim, arg2, 0);
9262 ret = get_errno(setrlimit(resource, &rlim));
9264 break;
9265 case TARGET_NR_getrlimit:
9267 int resource = target_to_host_resource(arg1);
9268 struct target_rlimit *target_rlim;
9269 struct rlimit rlim;
9271 ret = get_errno(getrlimit(resource, &rlim));
9272 if (!is_error(ret)) {
9273 if (!lock_user_struct(VERIFY_WRITE, target_rlim, arg2, 0))
9274 goto efault;
9275 target_rlim->rlim_cur = host_to_target_rlim(rlim.rlim_cur);
9276 target_rlim->rlim_max = host_to_target_rlim(rlim.rlim_max);
9277 unlock_user_struct(target_rlim, arg2, 1);
9280 break;
9281 case TARGET_NR_getrusage:
9283 struct rusage rusage;
9284 ret = get_errno(getrusage(arg1, &rusage));
9285 if (!is_error(ret)) {
9286 ret = host_to_target_rusage(arg2, &rusage);
9289 break;
9290 case TARGET_NR_gettimeofday:
9292 struct timeval tv;
9293 ret = get_errno(gettimeofday(&tv, NULL));
9294 if (!is_error(ret)) {
9295 if (copy_to_user_timeval(arg1, &tv))
9296 goto efault;
9299 break;
9300 case TARGET_NR_settimeofday:
9302 struct timeval tv, *ptv = NULL;
9303 struct timezone tz, *ptz = NULL;
9305 if (arg1) {
9306 if (copy_from_user_timeval(&tv, arg1)) {
9307 goto efault;
9309 ptv = &tv;
9312 if (arg2) {
9313 if (copy_from_user_timezone(&tz, arg2)) {
9314 goto efault;
9316 ptz = &tz;
9319 ret = get_errno(settimeofday(ptv, ptz));
9321 break;
9322 #if defined(TARGET_NR_select)
9323 case TARGET_NR_select:
9324 #if defined(TARGET_WANT_NI_OLD_SELECT)
9325 /* some architectures used to have old_select here
9326 * but now ENOSYS it.
9328 ret = -TARGET_ENOSYS;
9329 #elif defined(TARGET_WANT_OLD_SYS_SELECT)
9330 ret = do_old_select(arg1);
9331 #else
9332 ret = do_select(arg1, arg2, arg3, arg4, arg5);
9333 #endif
9334 break;
9335 #endif
9336 #ifdef TARGET_NR_pselect6
9337 case TARGET_NR_pselect6:
9339 abi_long rfd_addr, wfd_addr, efd_addr, n, ts_addr;
9340 fd_set rfds, wfds, efds;
9341 fd_set *rfds_ptr, *wfds_ptr, *efds_ptr;
9342 struct timespec ts, *ts_ptr;
9345 * The 6th arg is actually two args smashed together,
9346 * so we cannot use the C library.
9348 sigset_t set;
9349 struct {
9350 sigset_t *set;
9351 size_t size;
9352 } sig, *sig_ptr;
9354 abi_ulong arg_sigset, arg_sigsize, *arg7;
9355 target_sigset_t *target_sigset;
9357 n = arg1;
9358 rfd_addr = arg2;
9359 wfd_addr = arg3;
9360 efd_addr = arg4;
9361 ts_addr = arg5;
9363 ret = copy_from_user_fdset_ptr(&rfds, &rfds_ptr, rfd_addr, n);
9364 if (ret) {
9365 goto fail;
9367 ret = copy_from_user_fdset_ptr(&wfds, &wfds_ptr, wfd_addr, n);
9368 if (ret) {
9369 goto fail;
9371 ret = copy_from_user_fdset_ptr(&efds, &efds_ptr, efd_addr, n);
9372 if (ret) {
9373 goto fail;
9377 * This takes a timespec, and not a timeval, so we cannot
9378 * use the do_select() helper ...
9380 if (ts_addr) {
9381 if (target_to_host_timespec(&ts, ts_addr)) {
9382 goto efault;
9384 ts_ptr = &ts;
9385 } else {
9386 ts_ptr = NULL;
9389 /* Extract the two packed args for the sigset */
9390 if (arg6) {
9391 sig_ptr = &sig;
9392 sig.size = SIGSET_T_SIZE;
9394 arg7 = lock_user(VERIFY_READ, arg6, sizeof(*arg7) * 2, 1);
9395 if (!arg7) {
9396 goto efault;
9398 arg_sigset = tswapal(arg7[0]);
9399 arg_sigsize = tswapal(arg7[1]);
9400 unlock_user(arg7, arg6, 0);
9402 if (arg_sigset) {
9403 sig.set = &set;
9404 if (arg_sigsize != sizeof(*target_sigset)) {
9405 /* Like the kernel, we enforce correct size sigsets */
9406 ret = -TARGET_EINVAL;
9407 goto fail;
9409 target_sigset = lock_user(VERIFY_READ, arg_sigset,
9410 sizeof(*target_sigset), 1);
9411 if (!target_sigset) {
9412 goto efault;
9414 target_to_host_sigset(&set, target_sigset);
9415 unlock_user(target_sigset, arg_sigset, 0);
9416 } else {
9417 sig.set = NULL;
9419 } else {
9420 sig_ptr = NULL;
9423 ret = get_errno(safe_pselect6(n, rfds_ptr, wfds_ptr, efds_ptr,
9424 ts_ptr, sig_ptr));
9426 if (!is_error(ret)) {
9427 if (rfd_addr && copy_to_user_fdset(rfd_addr, &rfds, n))
9428 goto efault;
9429 if (wfd_addr && copy_to_user_fdset(wfd_addr, &wfds, n))
9430 goto efault;
9431 if (efd_addr && copy_to_user_fdset(efd_addr, &efds, n))
9432 goto efault;
9434 if (ts_addr && host_to_target_timespec(ts_addr, &ts))
9435 goto efault;
9438 break;
9439 #endif
9440 #ifdef TARGET_NR_symlink
9441 case TARGET_NR_symlink:
9443 void *p2;
9444 p = lock_user_string(arg1);
9445 p2 = lock_user_string(arg2);
9446 if (!p || !p2)
9447 ret = -TARGET_EFAULT;
9448 else
9449 ret = get_errno(symlink(p, p2));
9450 unlock_user(p2, arg2, 0);
9451 unlock_user(p, arg1, 0);
9453 break;
9454 #endif
9455 #if defined(TARGET_NR_symlinkat)
9456 case TARGET_NR_symlinkat:
9458 void *p2;
9459 p = lock_user_string(arg1);
9460 p2 = lock_user_string(arg3);
9461 if (!p || !p2)
9462 ret = -TARGET_EFAULT;
9463 else
9464 ret = get_errno(symlinkat(p, arg2, p2));
9465 unlock_user(p2, arg3, 0);
9466 unlock_user(p, arg1, 0);
9468 break;
9469 #endif
9470 #ifdef TARGET_NR_oldlstat
9471 case TARGET_NR_oldlstat:
9472 goto unimplemented;
9473 #endif
9474 #ifdef TARGET_NR_readlink
9475 case TARGET_NR_readlink:
9477 void *p2;
9478 p = lock_user_string(arg1);
9479 p2 = lock_user(VERIFY_WRITE, arg2, arg3, 0);
9480 if (!p || !p2) {
9481 ret = -TARGET_EFAULT;
9482 } else if (!arg3) {
9483 /* Short circuit this for the magic exe check. */
9484 ret = -TARGET_EINVAL;
9485 } else if (is_proc_myself((const char *)p, "exe")) {
9486 char real[PATH_MAX], *temp;
9487 temp = realpath(exec_path, real);
9488 /* Return value is # of bytes that we wrote to the buffer. */
9489 if (temp == NULL) {
9490 ret = get_errno(-1);
9491 } else {
9492 /* Don't worry about sign mismatch as earlier mapping
9493 * logic would have thrown a bad address error. */
9494 ret = MIN(strlen(real), arg3);
9495 /* We cannot NUL terminate the string. */
9496 memcpy(p2, real, ret);
9498 } else {
9499 ret = get_errno(readlink(path(p), p2, arg3));
9501 unlock_user(p2, arg2, ret);
9502 unlock_user(p, arg1, 0);
9504 break;
9505 #endif
9506 #if defined(TARGET_NR_readlinkat)
9507 case TARGET_NR_readlinkat:
9509 void *p2;
9510 p = lock_user_string(arg2);
9511 p2 = lock_user(VERIFY_WRITE, arg3, arg4, 0);
9512 if (!p || !p2) {
9513 ret = -TARGET_EFAULT;
9514 } else if (is_proc_myself((const char *)p, "exe")) {
9515 char real[PATH_MAX], *temp;
9516 temp = realpath(exec_path, real);
9517 ret = temp == NULL ? get_errno(-1) : strlen(real) ;
9518 snprintf((char *)p2, arg4, "%s", real);
9519 } else {
9520 ret = get_errno(readlinkat(arg1, path(p), p2, arg4));
9522 unlock_user(p2, arg3, ret);
9523 unlock_user(p, arg2, 0);
9525 break;
9526 #endif
9527 #ifdef TARGET_NR_uselib
9528 case TARGET_NR_uselib:
9529 goto unimplemented;
9530 #endif
9531 #ifdef TARGET_NR_swapon
9532 case TARGET_NR_swapon:
9533 if (!(p = lock_user_string(arg1)))
9534 goto efault;
9535 ret = get_errno(swapon(p, arg2));
9536 unlock_user(p, arg1, 0);
9537 break;
9538 #endif
9539 case TARGET_NR_reboot:
9540 if (arg3 == LINUX_REBOOT_CMD_RESTART2) {
9541 /* arg4 must be ignored in all other cases */
9542 p = lock_user_string(arg4);
9543 if (!p) {
9544 goto efault;
9546 ret = get_errno(reboot(arg1, arg2, arg3, p));
9547 unlock_user(p, arg4, 0);
9548 } else {
9549 ret = get_errno(reboot(arg1, arg2, arg3, NULL));
9551 break;
9552 #ifdef TARGET_NR_readdir
9553 case TARGET_NR_readdir:
9554 goto unimplemented;
9555 #endif
9556 #ifdef TARGET_NR_mmap
9557 case TARGET_NR_mmap:
9558 #if (defined(TARGET_I386) && defined(TARGET_ABI32)) || \
9559 (defined(TARGET_ARM) && defined(TARGET_ABI32)) || \
9560 defined(TARGET_M68K) || defined(TARGET_CRIS) || defined(TARGET_MICROBLAZE) \
9561 || defined(TARGET_S390X)
9563 abi_ulong *v;
9564 abi_ulong v1, v2, v3, v4, v5, v6;
9565 if (!(v = lock_user(VERIFY_READ, arg1, 6 * sizeof(abi_ulong), 1)))
9566 goto efault;
9567 v1 = tswapal(v[0]);
9568 v2 = tswapal(v[1]);
9569 v3 = tswapal(v[2]);
9570 v4 = tswapal(v[3]);
9571 v5 = tswapal(v[4]);
9572 v6 = tswapal(v[5]);
9573 unlock_user(v, arg1, 0);
9574 ret = get_errno(target_mmap(v1, v2, v3,
9575 target_to_host_bitmask(v4, mmap_flags_tbl),
9576 v5, v6));
9578 #else
9579 ret = get_errno(target_mmap(arg1, arg2, arg3,
9580 target_to_host_bitmask(arg4, mmap_flags_tbl),
9581 arg5,
9582 arg6));
9583 #endif
9584 break;
9585 #endif
9586 #ifdef TARGET_NR_mmap2
9587 case TARGET_NR_mmap2:
9588 #ifndef MMAP_SHIFT
9589 #define MMAP_SHIFT 12
9590 #endif
9591 ret = get_errno(target_mmap(arg1, arg2, arg3,
9592 target_to_host_bitmask(arg4, mmap_flags_tbl),
9593 arg5,
9594 arg6 << MMAP_SHIFT));
9595 break;
9596 #endif
9597 case TARGET_NR_munmap:
9598 ret = get_errno(target_munmap(arg1, arg2));
9599 break;
9600 case TARGET_NR_mprotect:
9602 TaskState *ts = cpu->opaque;
9603 /* Special hack to detect libc making the stack executable. */
9604 if ((arg3 & PROT_GROWSDOWN)
9605 && arg1 >= ts->info->stack_limit
9606 && arg1 <= ts->info->start_stack) {
9607 arg3 &= ~PROT_GROWSDOWN;
9608 arg2 = arg2 + arg1 - ts->info->stack_limit;
9609 arg1 = ts->info->stack_limit;
9612 ret = get_errno(target_mprotect(arg1, arg2, arg3));
9613 break;
9614 #ifdef TARGET_NR_mremap
9615 case TARGET_NR_mremap:
9616 ret = get_errno(target_mremap(arg1, arg2, arg3, arg4, arg5));
9617 break;
9618 #endif
9619 /* ??? msync/mlock/munlock are broken for softmmu. */
9620 #ifdef TARGET_NR_msync
9621 case TARGET_NR_msync:
9622 ret = get_errno(msync(g2h(arg1), arg2, arg3));
9623 break;
9624 #endif
9625 #ifdef TARGET_NR_mlock
9626 case TARGET_NR_mlock:
9627 ret = get_errno(mlock(g2h(arg1), arg2));
9628 break;
9629 #endif
9630 #ifdef TARGET_NR_munlock
9631 case TARGET_NR_munlock:
9632 ret = get_errno(munlock(g2h(arg1), arg2));
9633 break;
9634 #endif
9635 #ifdef TARGET_NR_mlockall
9636 case TARGET_NR_mlockall:
9637 ret = get_errno(mlockall(target_to_host_mlockall_arg(arg1)));
9638 break;
9639 #endif
9640 #ifdef TARGET_NR_munlockall
9641 case TARGET_NR_munlockall:
9642 ret = get_errno(munlockall());
9643 break;
9644 #endif
9645 case TARGET_NR_truncate:
9646 if (!(p = lock_user_string(arg1)))
9647 goto efault;
9648 ret = get_errno(truncate(p, arg2));
9649 unlock_user(p, arg1, 0);
9650 break;
9651 case TARGET_NR_ftruncate:
9652 ret = get_errno(ftruncate(arg1, arg2));
9653 break;
9654 case TARGET_NR_fchmod:
9655 ret = get_errno(fchmod(arg1, arg2));
9656 break;
9657 #if defined(TARGET_NR_fchmodat)
9658 case TARGET_NR_fchmodat:
9659 if (!(p = lock_user_string(arg2)))
9660 goto efault;
9661 ret = get_errno(fchmodat(arg1, p, arg3, 0));
9662 unlock_user(p, arg2, 0);
9663 break;
9664 #endif
9665 case TARGET_NR_getpriority:
9666 /* Note that negative values are valid for getpriority, so we must
9667 differentiate based on errno settings. */
9668 errno = 0;
9669 ret = getpriority(arg1, arg2);
9670 if (ret == -1 && errno != 0) {
9671 ret = -host_to_target_errno(errno);
9672 break;
9674 #ifdef TARGET_ALPHA
9675 /* Return value is the unbiased priority. Signal no error. */
9676 ((CPUAlphaState *)cpu_env)->ir[IR_V0] = 0;
9677 #else
9678 /* Return value is a biased priority to avoid negative numbers. */
9679 ret = 20 - ret;
9680 #endif
9681 break;
9682 case TARGET_NR_setpriority:
9683 ret = get_errno(setpriority(arg1, arg2, arg3));
9684 break;
9685 #ifdef TARGET_NR_profil
9686 case TARGET_NR_profil:
9687 goto unimplemented;
9688 #endif
9689 case TARGET_NR_statfs:
9690 if (!(p = lock_user_string(arg1)))
9691 goto efault;
9692 ret = get_errno(statfs(path(p), &stfs));
9693 unlock_user(p, arg1, 0);
9694 convert_statfs:
9695 if (!is_error(ret)) {
9696 struct target_statfs *target_stfs;
9698 if (!lock_user_struct(VERIFY_WRITE, target_stfs, arg2, 0))
9699 goto efault;
9700 __put_user(stfs.f_type, &target_stfs->f_type);
9701 __put_user(stfs.f_bsize, &target_stfs->f_bsize);
9702 __put_user(stfs.f_blocks, &target_stfs->f_blocks);
9703 __put_user(stfs.f_bfree, &target_stfs->f_bfree);
9704 __put_user(stfs.f_bavail, &target_stfs->f_bavail);
9705 __put_user(stfs.f_files, &target_stfs->f_files);
9706 __put_user(stfs.f_ffree, &target_stfs->f_ffree);
9707 __put_user(stfs.f_fsid.__val[0], &target_stfs->f_fsid.val[0]);
9708 __put_user(stfs.f_fsid.__val[1], &target_stfs->f_fsid.val[1]);
9709 __put_user(stfs.f_namelen, &target_stfs->f_namelen);
9710 __put_user(stfs.f_frsize, &target_stfs->f_frsize);
9711 #ifdef _STATFS_F_FLAGS
9712 __put_user(stfs.f_flags, &target_stfs->f_flags);
9713 #else
9714 __put_user(0, &target_stfs->f_flags);
9715 #endif
9716 memset(target_stfs->f_spare, 0, sizeof(target_stfs->f_spare));
9717 unlock_user_struct(target_stfs, arg2, 1);
9719 break;
9720 case TARGET_NR_fstatfs:
9721 ret = get_errno(fstatfs(arg1, &stfs));
9722 goto convert_statfs;
9723 #ifdef TARGET_NR_statfs64
9724 case TARGET_NR_statfs64:
9725 if (!(p = lock_user_string(arg1)))
9726 goto efault;
9727 ret = get_errno(statfs(path(p), &stfs));
9728 unlock_user(p, arg1, 0);
9729 convert_statfs64:
9730 if (!is_error(ret)) {
9731 struct target_statfs64 *target_stfs;
9733 if (!lock_user_struct(VERIFY_WRITE, target_stfs, arg3, 0))
9734 goto efault;
9735 __put_user(stfs.f_type, &target_stfs->f_type);
9736 __put_user(stfs.f_bsize, &target_stfs->f_bsize);
9737 __put_user(stfs.f_blocks, &target_stfs->f_blocks);
9738 __put_user(stfs.f_bfree, &target_stfs->f_bfree);
9739 __put_user(stfs.f_bavail, &target_stfs->f_bavail);
9740 __put_user(stfs.f_files, &target_stfs->f_files);
9741 __put_user(stfs.f_ffree, &target_stfs->f_ffree);
9742 __put_user(stfs.f_fsid.__val[0], &target_stfs->f_fsid.val[0]);
9743 __put_user(stfs.f_fsid.__val[1], &target_stfs->f_fsid.val[1]);
9744 __put_user(stfs.f_namelen, &target_stfs->f_namelen);
9745 __put_user(stfs.f_frsize, &target_stfs->f_frsize);
9746 memset(target_stfs->f_spare, 0, sizeof(target_stfs->f_spare));
9747 unlock_user_struct(target_stfs, arg3, 1);
9749 break;
9750 case TARGET_NR_fstatfs64:
9751 ret = get_errno(fstatfs(arg1, &stfs));
9752 goto convert_statfs64;
9753 #endif
9754 #ifdef TARGET_NR_ioperm
9755 case TARGET_NR_ioperm:
9756 goto unimplemented;
9757 #endif
9758 #ifdef TARGET_NR_socketcall
9759 case TARGET_NR_socketcall:
9760 ret = do_socketcall(arg1, arg2);
9761 break;
9762 #endif
9763 #ifdef TARGET_NR_accept
9764 case TARGET_NR_accept:
9765 ret = do_accept4(arg1, arg2, arg3, 0);
9766 break;
9767 #endif
9768 #ifdef TARGET_NR_accept4
9769 case TARGET_NR_accept4:
9770 ret = do_accept4(arg1, arg2, arg3, arg4);
9771 break;
9772 #endif
9773 #ifdef TARGET_NR_bind
9774 case TARGET_NR_bind:
9775 ret = do_bind(arg1, arg2, arg3);
9776 break;
9777 #endif
9778 #ifdef TARGET_NR_connect
9779 case TARGET_NR_connect:
9780 ret = do_connect(arg1, arg2, arg3);
9781 break;
9782 #endif
9783 #ifdef TARGET_NR_getpeername
9784 case TARGET_NR_getpeername:
9785 ret = do_getpeername(arg1, arg2, arg3);
9786 break;
9787 #endif
9788 #ifdef TARGET_NR_getsockname
9789 case TARGET_NR_getsockname:
9790 ret = do_getsockname(arg1, arg2, arg3);
9791 break;
9792 #endif
9793 #ifdef TARGET_NR_getsockopt
9794 case TARGET_NR_getsockopt:
9795 ret = do_getsockopt(arg1, arg2, arg3, arg4, arg5);
9796 break;
9797 #endif
9798 #ifdef TARGET_NR_listen
9799 case TARGET_NR_listen:
9800 ret = get_errno(listen(arg1, arg2));
9801 break;
9802 #endif
9803 #ifdef TARGET_NR_recv
9804 case TARGET_NR_recv:
9805 ret = do_recvfrom(arg1, arg2, arg3, arg4, 0, 0);
9806 break;
9807 #endif
9808 #ifdef TARGET_NR_recvfrom
9809 case TARGET_NR_recvfrom:
9810 ret = do_recvfrom(arg1, arg2, arg3, arg4, arg5, arg6);
9811 break;
9812 #endif
9813 #ifdef TARGET_NR_recvmsg
9814 case TARGET_NR_recvmsg:
9815 ret = do_sendrecvmsg(arg1, arg2, arg3, 0);
9816 break;
9817 #endif
9818 #ifdef TARGET_NR_send
9819 case TARGET_NR_send:
9820 ret = do_sendto(arg1, arg2, arg3, arg4, 0, 0);
9821 break;
9822 #endif
9823 #ifdef TARGET_NR_sendmsg
9824 case TARGET_NR_sendmsg:
9825 ret = do_sendrecvmsg(arg1, arg2, arg3, 1);
9826 break;
9827 #endif
9828 #ifdef TARGET_NR_sendmmsg
9829 case TARGET_NR_sendmmsg:
9830 ret = do_sendrecvmmsg(arg1, arg2, arg3, arg4, 1);
9831 break;
9832 case TARGET_NR_recvmmsg:
9833 ret = do_sendrecvmmsg(arg1, arg2, arg3, arg4, 0);
9834 break;
9835 #endif
9836 #ifdef TARGET_NR_sendto
9837 case TARGET_NR_sendto:
9838 ret = do_sendto(arg1, arg2, arg3, arg4, arg5, arg6);
9839 break;
9840 #endif
9841 #ifdef TARGET_NR_shutdown
9842 case TARGET_NR_shutdown:
9843 ret = get_errno(shutdown(arg1, arg2));
9844 break;
9845 #endif
9846 #if defined(TARGET_NR_getrandom) && defined(__NR_getrandom)
9847 case TARGET_NR_getrandom:
9848 p = lock_user(VERIFY_WRITE, arg1, arg2, 0);
9849 if (!p) {
9850 goto efault;
9852 ret = get_errno(getrandom(p, arg2, arg3));
9853 unlock_user(p, arg1, ret);
9854 break;
9855 #endif
9856 #ifdef TARGET_NR_socket
9857 case TARGET_NR_socket:
9858 ret = do_socket(arg1, arg2, arg3);
9859 break;
9860 #endif
9861 #ifdef TARGET_NR_socketpair
9862 case TARGET_NR_socketpair:
9863 ret = do_socketpair(arg1, arg2, arg3, arg4);
9864 break;
9865 #endif
9866 #ifdef TARGET_NR_setsockopt
9867 case TARGET_NR_setsockopt:
9868 ret = do_setsockopt(arg1, arg2, arg3, arg4, (socklen_t) arg5);
9869 break;
9870 #endif
9871 #if defined(TARGET_NR_syslog)
9872 case TARGET_NR_syslog:
9874 int len = arg2;
9876 switch (arg1) {
9877 case TARGET_SYSLOG_ACTION_CLOSE: /* Close log */
9878 case TARGET_SYSLOG_ACTION_OPEN: /* Open log */
9879 case TARGET_SYSLOG_ACTION_CLEAR: /* Clear ring buffer */
9880 case TARGET_SYSLOG_ACTION_CONSOLE_OFF: /* Disable logging */
9881 case TARGET_SYSLOG_ACTION_CONSOLE_ON: /* Enable logging */
9882 case TARGET_SYSLOG_ACTION_CONSOLE_LEVEL: /* Set messages level */
9883 case TARGET_SYSLOG_ACTION_SIZE_UNREAD: /* Number of chars */
9884 case TARGET_SYSLOG_ACTION_SIZE_BUFFER: /* Size of the buffer */
9886 ret = get_errno(sys_syslog((int)arg1, NULL, (int)arg3));
9888 break;
9889 case TARGET_SYSLOG_ACTION_READ: /* Read from log */
9890 case TARGET_SYSLOG_ACTION_READ_CLEAR: /* Read/clear msgs */
9891 case TARGET_SYSLOG_ACTION_READ_ALL: /* Read last messages */
9893 ret = -TARGET_EINVAL;
9894 if (len < 0) {
9895 goto fail;
9897 ret = 0;
9898 if (len == 0) {
9899 break;
9901 p = lock_user(VERIFY_WRITE, arg2, arg3, 0);
9902 if (!p) {
9903 ret = -TARGET_EFAULT;
9904 goto fail;
9906 ret = get_errno(sys_syslog((int)arg1, p, (int)arg3));
9907 unlock_user(p, arg2, arg3);
9909 break;
9910 default:
9911 ret = -EINVAL;
9912 break;
9915 break;
9916 #endif
9917 case TARGET_NR_setitimer:
9919 struct itimerval value, ovalue, *pvalue;
9921 if (arg2) {
9922 pvalue = &value;
9923 if (copy_from_user_timeval(&pvalue->it_interval, arg2)
9924 || copy_from_user_timeval(&pvalue->it_value,
9925 arg2 + sizeof(struct target_timeval)))
9926 goto efault;
9927 } else {
9928 pvalue = NULL;
9930 ret = get_errno(setitimer(arg1, pvalue, &ovalue));
9931 if (!is_error(ret) && arg3) {
9932 if (copy_to_user_timeval(arg3,
9933 &ovalue.it_interval)
9934 || copy_to_user_timeval(arg3 + sizeof(struct target_timeval),
9935 &ovalue.it_value))
9936 goto efault;
9939 break;
9940 case TARGET_NR_getitimer:
9942 struct itimerval value;
9944 ret = get_errno(getitimer(arg1, &value));
9945 if (!is_error(ret) && arg2) {
9946 if (copy_to_user_timeval(arg2,
9947 &value.it_interval)
9948 || copy_to_user_timeval(arg2 + sizeof(struct target_timeval),
9949 &value.it_value))
9950 goto efault;
9953 break;
9954 #ifdef TARGET_NR_stat
9955 case TARGET_NR_stat:
9956 if (!(p = lock_user_string(arg1)))
9957 goto efault;
9958 ret = get_errno(stat(path(p), &st));
9959 unlock_user(p, arg1, 0);
9960 goto do_stat;
9961 #endif
9962 #ifdef TARGET_NR_lstat
9963 case TARGET_NR_lstat:
9964 if (!(p = lock_user_string(arg1)))
9965 goto efault;
9966 ret = get_errno(lstat(path(p), &st));
9967 unlock_user(p, arg1, 0);
9968 goto do_stat;
9969 #endif
9970 case TARGET_NR_fstat:
9972 ret = get_errno(fstat(arg1, &st));
9973 #if defined(TARGET_NR_stat) || defined(TARGET_NR_lstat)
9974 do_stat:
9975 #endif
9976 if (!is_error(ret)) {
9977 struct target_stat *target_st;
9979 if (!lock_user_struct(VERIFY_WRITE, target_st, arg2, 0))
9980 goto efault;
9981 memset(target_st, 0, sizeof(*target_st));
9982 __put_user(st.st_dev, &target_st->st_dev);
9983 __put_user(st.st_ino, &target_st->st_ino);
9984 __put_user(st.st_mode, &target_st->st_mode);
9985 __put_user(st.st_uid, &target_st->st_uid);
9986 __put_user(st.st_gid, &target_st->st_gid);
9987 __put_user(st.st_nlink, &target_st->st_nlink);
9988 __put_user(st.st_rdev, &target_st->st_rdev);
9989 __put_user(st.st_size, &target_st->st_size);
9990 __put_user(st.st_blksize, &target_st->st_blksize);
9991 __put_user(st.st_blocks, &target_st->st_blocks);
9992 __put_user(st.st_atime, &target_st->target_st_atime);
9993 __put_user(st.st_mtime, &target_st->target_st_mtime);
9994 __put_user(st.st_ctime, &target_st->target_st_ctime);
9995 unlock_user_struct(target_st, arg2, 1);
9998 break;
9999 #ifdef TARGET_NR_olduname
10000 case TARGET_NR_olduname:
10001 goto unimplemented;
10002 #endif
10003 #ifdef TARGET_NR_iopl
10004 case TARGET_NR_iopl:
10005 goto unimplemented;
10006 #endif
10007 case TARGET_NR_vhangup:
10008 ret = get_errno(vhangup());
10009 break;
10010 #ifdef TARGET_NR_idle
10011 case TARGET_NR_idle:
10012 goto unimplemented;
10013 #endif
10014 #ifdef TARGET_NR_syscall
10015 case TARGET_NR_syscall:
10016 ret = do_syscall(cpu_env, arg1 & 0xffff, arg2, arg3, arg4, arg5,
10017 arg6, arg7, arg8, 0);
10018 break;
10019 #endif
10020 case TARGET_NR_wait4:
10022 int status;
10023 abi_long status_ptr = arg2;
10024 struct rusage rusage, *rusage_ptr;
10025 abi_ulong target_rusage = arg4;
10026 abi_long rusage_err;
10027 if (target_rusage)
10028 rusage_ptr = &rusage;
10029 else
10030 rusage_ptr = NULL;
10031 ret = get_errno(safe_wait4(arg1, &status, arg3, rusage_ptr));
10032 if (!is_error(ret)) {
10033 if (status_ptr && ret) {
10034 status = host_to_target_waitstatus(status);
10035 if (put_user_s32(status, status_ptr))
10036 goto efault;
10038 if (target_rusage) {
10039 rusage_err = host_to_target_rusage(target_rusage, &rusage);
10040 if (rusage_err) {
10041 ret = rusage_err;
10046 break;
10047 #ifdef TARGET_NR_swapoff
10048 case TARGET_NR_swapoff:
10049 if (!(p = lock_user_string(arg1)))
10050 goto efault;
10051 ret = get_errno(swapoff(p));
10052 unlock_user(p, arg1, 0);
10053 break;
10054 #endif
10055 case TARGET_NR_sysinfo:
10057 struct target_sysinfo *target_value;
10058 struct sysinfo value;
10059 ret = get_errno(sysinfo(&value));
10060 if (!is_error(ret) && arg1)
10062 if (!lock_user_struct(VERIFY_WRITE, target_value, arg1, 0))
10063 goto efault;
10064 __put_user(value.uptime, &target_value->uptime);
10065 __put_user(value.loads[0], &target_value->loads[0]);
10066 __put_user(value.loads[1], &target_value->loads[1]);
10067 __put_user(value.loads[2], &target_value->loads[2]);
10068 __put_user(value.totalram, &target_value->totalram);
10069 __put_user(value.freeram, &target_value->freeram);
10070 __put_user(value.sharedram, &target_value->sharedram);
10071 __put_user(value.bufferram, &target_value->bufferram);
10072 __put_user(value.totalswap, &target_value->totalswap);
10073 __put_user(value.freeswap, &target_value->freeswap);
10074 __put_user(value.procs, &target_value->procs);
10075 __put_user(value.totalhigh, &target_value->totalhigh);
10076 __put_user(value.freehigh, &target_value->freehigh);
10077 __put_user(value.mem_unit, &target_value->mem_unit);
10078 unlock_user_struct(target_value, arg1, 1);
10081 break;
10082 #ifdef TARGET_NR_ipc
10083 case TARGET_NR_ipc:
10084 ret = do_ipc(cpu_env, arg1, arg2, arg3, arg4, arg5, arg6);
10085 break;
10086 #endif
10087 #ifdef TARGET_NR_semget
10088 case TARGET_NR_semget:
10089 ret = get_errno(semget(arg1, arg2, arg3));
10090 break;
10091 #endif
10092 #ifdef TARGET_NR_semop
10093 case TARGET_NR_semop:
10094 ret = do_semop(arg1, arg2, arg3);
10095 break;
10096 #endif
10097 #ifdef TARGET_NR_semctl
10098 case TARGET_NR_semctl:
10099 ret = do_semctl(arg1, arg2, arg3, arg4);
10100 break;
10101 #endif
10102 #ifdef TARGET_NR_msgctl
10103 case TARGET_NR_msgctl:
10104 ret = do_msgctl(arg1, arg2, arg3);
10105 break;
10106 #endif
10107 #ifdef TARGET_NR_msgget
10108 case TARGET_NR_msgget:
10109 ret = get_errno(msgget(arg1, arg2));
10110 break;
10111 #endif
10112 #ifdef TARGET_NR_msgrcv
10113 case TARGET_NR_msgrcv:
10114 ret = do_msgrcv(arg1, arg2, arg3, arg4, arg5);
10115 break;
10116 #endif
10117 #ifdef TARGET_NR_msgsnd
10118 case TARGET_NR_msgsnd:
10119 ret = do_msgsnd(arg1, arg2, arg3, arg4);
10120 break;
10121 #endif
10122 #ifdef TARGET_NR_shmget
10123 case TARGET_NR_shmget:
10124 ret = get_errno(shmget(arg1, arg2, arg3));
10125 break;
10126 #endif
10127 #ifdef TARGET_NR_shmctl
10128 case TARGET_NR_shmctl:
10129 ret = do_shmctl(arg1, arg2, arg3);
10130 break;
10131 #endif
10132 #ifdef TARGET_NR_shmat
10133 case TARGET_NR_shmat:
10134 ret = do_shmat(cpu_env, arg1, arg2, arg3);
10135 break;
10136 #endif
10137 #ifdef TARGET_NR_shmdt
10138 case TARGET_NR_shmdt:
10139 ret = do_shmdt(arg1);
10140 break;
10141 #endif
10142 case TARGET_NR_fsync:
10143 ret = get_errno(fsync(arg1));
10144 break;
10145 case TARGET_NR_clone:
10146 /* Linux manages to have three different orderings for its
10147 * arguments to clone(); the BACKWARDS and BACKWARDS2 defines
10148 * match the kernel's CONFIG_CLONE_* settings.
10149 * Microblaze is further special in that it uses a sixth
10150 * implicit argument to clone for the TLS pointer.
10152 #if defined(TARGET_MICROBLAZE)
10153 ret = get_errno(do_fork(cpu_env, arg1, arg2, arg4, arg6, arg5));
10154 #elif defined(TARGET_CLONE_BACKWARDS)
10155 ret = get_errno(do_fork(cpu_env, arg1, arg2, arg3, arg4, arg5));
10156 #elif defined(TARGET_CLONE_BACKWARDS2)
10157 ret = get_errno(do_fork(cpu_env, arg2, arg1, arg3, arg5, arg4));
10158 #else
10159 ret = get_errno(do_fork(cpu_env, arg1, arg2, arg3, arg5, arg4));
10160 #endif
10161 break;
10162 #ifdef __NR_exit_group
10163 /* new thread calls */
10164 case TARGET_NR_exit_group:
10165 preexit_cleanup(cpu_env, arg1);
10166 ret = get_errno(exit_group(arg1));
10167 break;
10168 #endif
10169 case TARGET_NR_setdomainname:
10170 if (!(p = lock_user_string(arg1)))
10171 goto efault;
10172 ret = get_errno(setdomainname(p, arg2));
10173 unlock_user(p, arg1, 0);
10174 break;
10175 case TARGET_NR_uname:
10176 /* no need to transcode because we use the linux syscall */
10178 struct new_utsname * buf;
10180 if (!lock_user_struct(VERIFY_WRITE, buf, arg1, 0))
10181 goto efault;
10182 ret = get_errno(sys_uname(buf));
10183 if (!is_error(ret)) {
10184 /* Overwrite the native machine name with whatever is being
10185 emulated. */
10186 g_strlcpy(buf->machine, cpu_to_uname_machine(cpu_env),
10187 sizeof(buf->machine));
10188 /* Allow the user to override the reported release. */
10189 if (qemu_uname_release && *qemu_uname_release) {
10190 g_strlcpy(buf->release, qemu_uname_release,
10191 sizeof(buf->release));
10194 unlock_user_struct(buf, arg1, 1);
10196 break;
10197 #ifdef TARGET_I386
10198 case TARGET_NR_modify_ldt:
10199 ret = do_modify_ldt(cpu_env, arg1, arg2, arg3);
10200 break;
10201 #if !defined(TARGET_X86_64)
10202 case TARGET_NR_vm86old:
10203 goto unimplemented;
10204 case TARGET_NR_vm86:
10205 ret = do_vm86(cpu_env, arg1, arg2);
10206 break;
10207 #endif
10208 #endif
10209 case TARGET_NR_adjtimex:
10211 struct timex host_buf;
10213 if (target_to_host_timex(&host_buf, arg1) != 0) {
10214 goto efault;
10216 ret = get_errno(adjtimex(&host_buf));
10217 if (!is_error(ret)) {
10218 if (host_to_target_timex(arg1, &host_buf) != 0) {
10219 goto efault;
10223 break;
10224 #if defined(TARGET_NR_clock_adjtime) && defined(CONFIG_CLOCK_ADJTIME)
10225 case TARGET_NR_clock_adjtime:
10227 struct timex htx, *phtx = &htx;
10229 if (target_to_host_timex(phtx, arg2) != 0) {
10230 goto efault;
10232 ret = get_errno(clock_adjtime(arg1, phtx));
10233 if (!is_error(ret) && phtx) {
10234 if (host_to_target_timex(arg2, phtx) != 0) {
10235 goto efault;
10239 break;
10240 #endif
10241 #ifdef TARGET_NR_create_module
10242 case TARGET_NR_create_module:
10243 #endif
10244 case TARGET_NR_init_module:
10245 case TARGET_NR_delete_module:
10246 #ifdef TARGET_NR_get_kernel_syms
10247 case TARGET_NR_get_kernel_syms:
10248 #endif
10249 goto unimplemented;
10250 case TARGET_NR_quotactl:
10251 goto unimplemented;
10252 case TARGET_NR_getpgid:
10253 ret = get_errno(getpgid(arg1));
10254 break;
10255 case TARGET_NR_fchdir:
10256 ret = get_errno(fchdir(arg1));
10257 break;
10258 #ifdef TARGET_NR_bdflush /* not on x86_64 */
10259 case TARGET_NR_bdflush:
10260 goto unimplemented;
10261 #endif
10262 #ifdef TARGET_NR_sysfs
10263 case TARGET_NR_sysfs:
10264 goto unimplemented;
10265 #endif
10266 case TARGET_NR_personality:
10267 ret = get_errno(personality(arg1));
10268 break;
10269 #ifdef TARGET_NR_afs_syscall
10270 case TARGET_NR_afs_syscall:
10271 goto unimplemented;
10272 #endif
10273 #ifdef TARGET_NR__llseek /* Not on alpha */
10274 case TARGET_NR__llseek:
10276 int64_t res;
10277 #if !defined(__NR_llseek)
10278 res = lseek(arg1, ((uint64_t)arg2 << 32) | (abi_ulong)arg3, arg5);
10279 if (res == -1) {
10280 ret = get_errno(res);
10281 } else {
10282 ret = 0;
10284 #else
10285 ret = get_errno(_llseek(arg1, arg2, arg3, &res, arg5));
10286 #endif
10287 if ((ret == 0) && put_user_s64(res, arg4)) {
10288 goto efault;
10291 break;
10292 #endif
10293 #ifdef TARGET_NR_getdents
10294 case TARGET_NR_getdents:
10295 #ifdef EMULATE_GETDENTS_WITH_GETDENTS
10296 #if TARGET_ABI_BITS == 32 && HOST_LONG_BITS == 64
10298 struct target_dirent *target_dirp;
10299 struct linux_dirent *dirp;
10300 abi_long count = arg3;
10302 dirp = g_try_malloc(count);
10303 if (!dirp) {
10304 ret = -TARGET_ENOMEM;
10305 goto fail;
10308 ret = get_errno(sys_getdents(arg1, dirp, count));
10309 if (!is_error(ret)) {
10310 struct linux_dirent *de;
10311 struct target_dirent *tde;
10312 int len = ret;
10313 int reclen, treclen;
10314 int count1, tnamelen;
10316 count1 = 0;
10317 de = dirp;
10318 if (!(target_dirp = lock_user(VERIFY_WRITE, arg2, count, 0)))
10319 goto efault;
10320 tde = target_dirp;
10321 while (len > 0) {
10322 reclen = de->d_reclen;
10323 tnamelen = reclen - offsetof(struct linux_dirent, d_name);
10324 assert(tnamelen >= 0);
10325 treclen = tnamelen + offsetof(struct target_dirent, d_name);
10326 assert(count1 + treclen <= count);
10327 tde->d_reclen = tswap16(treclen);
10328 tde->d_ino = tswapal(de->d_ino);
10329 tde->d_off = tswapal(de->d_off);
10330 memcpy(tde->d_name, de->d_name, tnamelen);
10331 de = (struct linux_dirent *)((char *)de + reclen);
10332 len -= reclen;
10333 tde = (struct target_dirent *)((char *)tde + treclen);
10334 count1 += treclen;
10336 ret = count1;
10337 unlock_user(target_dirp, arg2, ret);
10339 g_free(dirp);
10341 #else
10343 struct linux_dirent *dirp;
10344 abi_long count = arg3;
10346 if (!(dirp = lock_user(VERIFY_WRITE, arg2, count, 0)))
10347 goto efault;
10348 ret = get_errno(sys_getdents(arg1, dirp, count));
10349 if (!is_error(ret)) {
10350 struct linux_dirent *de;
10351 int len = ret;
10352 int reclen;
10353 de = dirp;
10354 while (len > 0) {
10355 reclen = de->d_reclen;
10356 if (reclen > len)
10357 break;
10358 de->d_reclen = tswap16(reclen);
10359 tswapls(&de->d_ino);
10360 tswapls(&de->d_off);
10361 de = (struct linux_dirent *)((char *)de + reclen);
10362 len -= reclen;
10365 unlock_user(dirp, arg2, ret);
10367 #endif
10368 #else
10369 /* Implement getdents in terms of getdents64 */
10371 struct linux_dirent64 *dirp;
10372 abi_long count = arg3;
10374 dirp = lock_user(VERIFY_WRITE, arg2, count, 0);
10375 if (!dirp) {
10376 goto efault;
10378 ret = get_errno(sys_getdents64(arg1, dirp, count));
10379 if (!is_error(ret)) {
10380 /* Convert the dirent64 structs to target dirent. We do this
10381 * in-place, since we can guarantee that a target_dirent is no
10382 * larger than a dirent64; however this means we have to be
10383 * careful to read everything before writing in the new format.
10385 struct linux_dirent64 *de;
10386 struct target_dirent *tde;
10387 int len = ret;
10388 int tlen = 0;
10390 de = dirp;
10391 tde = (struct target_dirent *)dirp;
10392 while (len > 0) {
10393 int namelen, treclen;
10394 int reclen = de->d_reclen;
10395 uint64_t ino = de->d_ino;
10396 int64_t off = de->d_off;
10397 uint8_t type = de->d_type;
10399 namelen = strlen(de->d_name);
10400 treclen = offsetof(struct target_dirent, d_name)
10401 + namelen + 2;
10402 treclen = QEMU_ALIGN_UP(treclen, sizeof(abi_long));
10404 memmove(tde->d_name, de->d_name, namelen + 1);
10405 tde->d_ino = tswapal(ino);
10406 tde->d_off = tswapal(off);
10407 tde->d_reclen = tswap16(treclen);
10408 /* The target_dirent type is in what was formerly a padding
10409 * byte at the end of the structure:
10411 *(((char *)tde) + treclen - 1) = type;
10413 de = (struct linux_dirent64 *)((char *)de + reclen);
10414 tde = (struct target_dirent *)((char *)tde + treclen);
10415 len -= reclen;
10416 tlen += treclen;
10418 ret = tlen;
10420 unlock_user(dirp, arg2, ret);
10422 #endif
10423 break;
10424 #endif /* TARGET_NR_getdents */
10425 #if defined(TARGET_NR_getdents64) && defined(__NR_getdents64)
10426 case TARGET_NR_getdents64:
10428 struct linux_dirent64 *dirp;
10429 abi_long count = arg3;
10430 if (!(dirp = lock_user(VERIFY_WRITE, arg2, count, 0)))
10431 goto efault;
10432 ret = get_errno(sys_getdents64(arg1, dirp, count));
10433 if (!is_error(ret)) {
10434 struct linux_dirent64 *de;
10435 int len = ret;
10436 int reclen;
10437 de = dirp;
10438 while (len > 0) {
10439 reclen = de->d_reclen;
10440 if (reclen > len)
10441 break;
10442 de->d_reclen = tswap16(reclen);
10443 tswap64s((uint64_t *)&de->d_ino);
10444 tswap64s((uint64_t *)&de->d_off);
10445 de = (struct linux_dirent64 *)((char *)de + reclen);
10446 len -= reclen;
10449 unlock_user(dirp, arg2, ret);
10451 break;
10452 #endif /* TARGET_NR_getdents64 */
10453 #if defined(TARGET_NR__newselect)
10454 case TARGET_NR__newselect:
10455 ret = do_select(arg1, arg2, arg3, arg4, arg5);
10456 break;
10457 #endif
10458 #if defined(TARGET_NR_poll) || defined(TARGET_NR_ppoll)
10459 # ifdef TARGET_NR_poll
10460 case TARGET_NR_poll:
10461 # endif
10462 # ifdef TARGET_NR_ppoll
10463 case TARGET_NR_ppoll:
10464 # endif
10466 struct target_pollfd *target_pfd;
10467 unsigned int nfds = arg2;
10468 struct pollfd *pfd;
10469 unsigned int i;
10471 pfd = NULL;
10472 target_pfd = NULL;
10473 if (nfds) {
10474 if (nfds > (INT_MAX / sizeof(struct target_pollfd))) {
10475 ret = -TARGET_EINVAL;
10476 break;
10479 target_pfd = lock_user(VERIFY_WRITE, arg1,
10480 sizeof(struct target_pollfd) * nfds, 1);
10481 if (!target_pfd) {
10482 goto efault;
10485 pfd = alloca(sizeof(struct pollfd) * nfds);
10486 for (i = 0; i < nfds; i++) {
10487 pfd[i].fd = tswap32(target_pfd[i].fd);
10488 pfd[i].events = tswap16(target_pfd[i].events);
10492 switch (num) {
10493 # ifdef TARGET_NR_ppoll
10494 case TARGET_NR_ppoll:
10496 struct timespec _timeout_ts, *timeout_ts = &_timeout_ts;
10497 target_sigset_t *target_set;
10498 sigset_t _set, *set = &_set;
10500 if (arg3) {
10501 if (target_to_host_timespec(timeout_ts, arg3)) {
10502 unlock_user(target_pfd, arg1, 0);
10503 goto efault;
10505 } else {
10506 timeout_ts = NULL;
10509 if (arg4) {
10510 if (arg5 != sizeof(target_sigset_t)) {
10511 unlock_user(target_pfd, arg1, 0);
10512 ret = -TARGET_EINVAL;
10513 break;
10516 target_set = lock_user(VERIFY_READ, arg4, sizeof(target_sigset_t), 1);
10517 if (!target_set) {
10518 unlock_user(target_pfd, arg1, 0);
10519 goto efault;
10521 target_to_host_sigset(set, target_set);
10522 } else {
10523 set = NULL;
10526 ret = get_errno(safe_ppoll(pfd, nfds, timeout_ts,
10527 set, SIGSET_T_SIZE));
10529 if (!is_error(ret) && arg3) {
10530 host_to_target_timespec(arg3, timeout_ts);
10532 if (arg4) {
10533 unlock_user(target_set, arg4, 0);
10535 break;
10537 # endif
10538 # ifdef TARGET_NR_poll
10539 case TARGET_NR_poll:
10541 struct timespec ts, *pts;
10543 if (arg3 >= 0) {
10544 /* Convert ms to secs, ns */
10545 ts.tv_sec = arg3 / 1000;
10546 ts.tv_nsec = (arg3 % 1000) * 1000000LL;
10547 pts = &ts;
10548 } else {
10549 /* -ve poll() timeout means "infinite" */
10550 pts = NULL;
10552 ret = get_errno(safe_ppoll(pfd, nfds, pts, NULL, 0));
10553 break;
10555 # endif
10556 default:
10557 g_assert_not_reached();
10560 if (!is_error(ret)) {
10561 for(i = 0; i < nfds; i++) {
10562 target_pfd[i].revents = tswap16(pfd[i].revents);
10565 unlock_user(target_pfd, arg1, sizeof(struct target_pollfd) * nfds);
10567 break;
10568 #endif
10569 case TARGET_NR_flock:
10570 /* NOTE: the flock constant seems to be the same for every
10571 Linux platform */
10572 ret = get_errno(safe_flock(arg1, arg2));
10573 break;
10574 case TARGET_NR_readv:
10576 struct iovec *vec = lock_iovec(VERIFY_WRITE, arg2, arg3, 0);
10577 if (vec != NULL) {
10578 ret = get_errno(safe_readv(arg1, vec, arg3));
10579 unlock_iovec(vec, arg2, arg3, 1);
10580 } else {
10581 ret = -host_to_target_errno(errno);
10584 break;
10585 case TARGET_NR_writev:
10587 struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1);
10588 if (vec != NULL) {
10589 ret = get_errno(safe_writev(arg1, vec, arg3));
10590 unlock_iovec(vec, arg2, arg3, 0);
10591 } else {
10592 ret = -host_to_target_errno(errno);
10595 break;
10596 #if defined(TARGET_NR_preadv)
10597 case TARGET_NR_preadv:
10599 struct iovec *vec = lock_iovec(VERIFY_WRITE, arg2, arg3, 0);
10600 if (vec != NULL) {
10601 unsigned long low, high;
10603 target_to_host_low_high(arg4, arg5, &low, &high);
10604 ret = get_errno(safe_preadv(arg1, vec, arg3, low, high));
10605 unlock_iovec(vec, arg2, arg3, 1);
10606 } else {
10607 ret = -host_to_target_errno(errno);
10610 break;
10611 #endif
10612 #if defined(TARGET_NR_pwritev)
10613 case TARGET_NR_pwritev:
10615 struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1);
10616 if (vec != NULL) {
10617 unsigned long low, high;
10619 target_to_host_low_high(arg4, arg5, &low, &high);
10620 ret = get_errno(safe_pwritev(arg1, vec, arg3, low, high));
10621 unlock_iovec(vec, arg2, arg3, 0);
10622 } else {
10623 ret = -host_to_target_errno(errno);
10626 break;
10627 #endif
10628 case TARGET_NR_getsid:
10629 ret = get_errno(getsid(arg1));
10630 break;
10631 #if defined(TARGET_NR_fdatasync) /* Not on alpha (osf_datasync ?) */
10632 case TARGET_NR_fdatasync:
10633 ret = get_errno(fdatasync(arg1));
10634 break;
10635 #endif
10636 #ifdef TARGET_NR__sysctl
10637 case TARGET_NR__sysctl:
10638 /* We don't implement this, but ENOTDIR is always a safe
10639 return value. */
10640 ret = -TARGET_ENOTDIR;
10641 break;
10642 #endif
10643 case TARGET_NR_sched_getaffinity:
10645 unsigned int mask_size;
10646 unsigned long *mask;
10649 * sched_getaffinity needs multiples of ulong, so need to take
10650 * care of mismatches between target ulong and host ulong sizes.
10652 if (arg2 & (sizeof(abi_ulong) - 1)) {
10653 ret = -TARGET_EINVAL;
10654 break;
10656 mask_size = (arg2 + (sizeof(*mask) - 1)) & ~(sizeof(*mask) - 1);
10658 mask = alloca(mask_size);
10659 memset(mask, 0, mask_size);
10660 ret = get_errno(sys_sched_getaffinity(arg1, mask_size, mask));
10662 if (!is_error(ret)) {
10663 if (ret > arg2) {
10664 /* More data returned than the caller's buffer will fit.
10665 * This only happens if sizeof(abi_long) < sizeof(long)
10666 * and the caller passed us a buffer holding an odd number
10667 * of abi_longs. If the host kernel is actually using the
10668 * extra 4 bytes then fail EINVAL; otherwise we can just
10669 * ignore them and only copy the interesting part.
10671 int numcpus = sysconf(_SC_NPROCESSORS_CONF);
10672 if (numcpus > arg2 * 8) {
10673 ret = -TARGET_EINVAL;
10674 break;
10676 ret = arg2;
10679 if (host_to_target_cpu_mask(mask, mask_size, arg3, ret)) {
10680 goto efault;
10684 break;
10685 case TARGET_NR_sched_setaffinity:
10687 unsigned int mask_size;
10688 unsigned long *mask;
10691 * sched_setaffinity needs multiples of ulong, so need to take
10692 * care of mismatches between target ulong and host ulong sizes.
10694 if (arg2 & (sizeof(abi_ulong) - 1)) {
10695 ret = -TARGET_EINVAL;
10696 break;
10698 mask_size = (arg2 + (sizeof(*mask) - 1)) & ~(sizeof(*mask) - 1);
10699 mask = alloca(mask_size);
10701 ret = target_to_host_cpu_mask(mask, mask_size, arg3, arg2);
10702 if (ret) {
10703 break;
10706 ret = get_errno(sys_sched_setaffinity(arg1, mask_size, mask));
10708 break;
10709 case TARGET_NR_getcpu:
10711 unsigned cpu, node;
10712 ret = get_errno(sys_getcpu(arg1 ? &cpu : NULL,
10713 arg2 ? &node : NULL,
10714 NULL));
10715 if (is_error(ret)) {
10716 goto fail;
10718 if (arg1 && put_user_u32(cpu, arg1)) {
10719 goto efault;
10721 if (arg2 && put_user_u32(node, arg2)) {
10722 goto efault;
10725 break;
10726 case TARGET_NR_sched_setparam:
10728 struct sched_param *target_schp;
10729 struct sched_param schp;
10731 if (arg2 == 0) {
10732 return -TARGET_EINVAL;
10734 if (!lock_user_struct(VERIFY_READ, target_schp, arg2, 1))
10735 goto efault;
10736 schp.sched_priority = tswap32(target_schp->sched_priority);
10737 unlock_user_struct(target_schp, arg2, 0);
10738 ret = get_errno(sched_setparam(arg1, &schp));
10740 break;
10741 case TARGET_NR_sched_getparam:
10743 struct sched_param *target_schp;
10744 struct sched_param schp;
10746 if (arg2 == 0) {
10747 return -TARGET_EINVAL;
10749 ret = get_errno(sched_getparam(arg1, &schp));
10750 if (!is_error(ret)) {
10751 if (!lock_user_struct(VERIFY_WRITE, target_schp, arg2, 0))
10752 goto efault;
10753 target_schp->sched_priority = tswap32(schp.sched_priority);
10754 unlock_user_struct(target_schp, arg2, 1);
10757 break;
10758 case TARGET_NR_sched_setscheduler:
10760 struct sched_param *target_schp;
10761 struct sched_param schp;
10762 if (arg3 == 0) {
10763 return -TARGET_EINVAL;
10765 if (!lock_user_struct(VERIFY_READ, target_schp, arg3, 1))
10766 goto efault;
10767 schp.sched_priority = tswap32(target_schp->sched_priority);
10768 unlock_user_struct(target_schp, arg3, 0);
10769 ret = get_errno(sched_setscheduler(arg1, arg2, &schp));
10771 break;
10772 case TARGET_NR_sched_getscheduler:
10773 ret = get_errno(sched_getscheduler(arg1));
10774 break;
10775 case TARGET_NR_sched_yield:
10776 ret = get_errno(sched_yield());
10777 break;
10778 case TARGET_NR_sched_get_priority_max:
10779 ret = get_errno(sched_get_priority_max(arg1));
10780 break;
10781 case TARGET_NR_sched_get_priority_min:
10782 ret = get_errno(sched_get_priority_min(arg1));
10783 break;
10784 case TARGET_NR_sched_rr_get_interval:
10786 struct timespec ts;
10787 ret = get_errno(sched_rr_get_interval(arg1, &ts));
10788 if (!is_error(ret)) {
10789 ret = host_to_target_timespec(arg2, &ts);
10792 break;
10793 case TARGET_NR_nanosleep:
10795 struct timespec req, rem;
10796 target_to_host_timespec(&req, arg1);
10797 ret = get_errno(safe_nanosleep(&req, &rem));
10798 if (is_error(ret) && arg2) {
10799 host_to_target_timespec(arg2, &rem);
10802 break;
10803 #ifdef TARGET_NR_query_module
10804 case TARGET_NR_query_module:
10805 goto unimplemented;
10806 #endif
10807 #ifdef TARGET_NR_nfsservctl
10808 case TARGET_NR_nfsservctl:
10809 goto unimplemented;
10810 #endif
10811 case TARGET_NR_prctl:
10812 switch (arg1) {
10813 case PR_GET_PDEATHSIG:
10815 int deathsig;
10816 ret = get_errno(prctl(arg1, &deathsig, arg3, arg4, arg5));
10817 if (!is_error(ret) && arg2
10818 && put_user_ual(deathsig, arg2)) {
10819 goto efault;
10821 break;
10823 #ifdef PR_GET_NAME
10824 case PR_GET_NAME:
10826 void *name = lock_user(VERIFY_WRITE, arg2, 16, 1);
10827 if (!name) {
10828 goto efault;
10830 ret = get_errno(prctl(arg1, (unsigned long)name,
10831 arg3, arg4, arg5));
10832 unlock_user(name, arg2, 16);
10833 break;
10835 case PR_SET_NAME:
10837 void *name = lock_user(VERIFY_READ, arg2, 16, 1);
10838 if (!name) {
10839 goto efault;
10841 ret = get_errno(prctl(arg1, (unsigned long)name,
10842 arg3, arg4, arg5));
10843 unlock_user(name, arg2, 0);
10844 break;
10846 #endif
10847 #ifdef TARGET_AARCH64
10848 case TARGET_PR_SVE_SET_VL:
10849 /* We cannot support either PR_SVE_SET_VL_ONEXEC
10850 or PR_SVE_VL_INHERIT. Therefore, anything above
10851 ARM_MAX_VQ results in EINVAL. */
10852 ret = -TARGET_EINVAL;
10853 if (arm_feature(cpu_env, ARM_FEATURE_SVE)
10854 && arg2 >= 0 && arg2 <= ARM_MAX_VQ * 16 && !(arg2 & 15)) {
10855 CPUARMState *env = cpu_env;
10856 int old_vq = (env->vfp.zcr_el[1] & 0xf) + 1;
10857 int vq = MAX(arg2 / 16, 1);
10859 if (vq < old_vq) {
10860 aarch64_sve_narrow_vq(env, vq);
10862 env->vfp.zcr_el[1] = vq - 1;
10863 ret = vq * 16;
10865 break;
10866 case TARGET_PR_SVE_GET_VL:
10867 ret = -TARGET_EINVAL;
10868 if (arm_feature(cpu_env, ARM_FEATURE_SVE)) {
10869 CPUARMState *env = cpu_env;
10870 ret = ((env->vfp.zcr_el[1] & 0xf) + 1) * 16;
10872 break;
10873 #endif /* AARCH64 */
10874 case PR_GET_SECCOMP:
10875 case PR_SET_SECCOMP:
10876 /* Disable seccomp to prevent the target disabling syscalls we
10877 * need. */
10878 ret = -TARGET_EINVAL;
10879 break;
10880 default:
10881 /* Most prctl options have no pointer arguments */
10882 ret = get_errno(prctl(arg1, arg2, arg3, arg4, arg5));
10883 break;
10885 break;
10886 #ifdef TARGET_NR_arch_prctl
10887 case TARGET_NR_arch_prctl:
10888 #if defined(TARGET_I386) && !defined(TARGET_ABI32)
10889 ret = do_arch_prctl(cpu_env, arg1, arg2);
10890 break;
10891 #else
10892 goto unimplemented;
10893 #endif
10894 #endif
10895 #ifdef TARGET_NR_pread64
10896 case TARGET_NR_pread64:
10897 if (regpairs_aligned(cpu_env, num)) {
10898 arg4 = arg5;
10899 arg5 = arg6;
10901 if (!(p = lock_user(VERIFY_WRITE, arg2, arg3, 0)))
10902 goto efault;
10903 ret = get_errno(pread64(arg1, p, arg3, target_offset64(arg4, arg5)));
10904 unlock_user(p, arg2, ret);
10905 break;
10906 case TARGET_NR_pwrite64:
10907 if (regpairs_aligned(cpu_env, num)) {
10908 arg4 = arg5;
10909 arg5 = arg6;
10911 if (!(p = lock_user(VERIFY_READ, arg2, arg3, 1)))
10912 goto efault;
10913 ret = get_errno(pwrite64(arg1, p, arg3, target_offset64(arg4, arg5)));
10914 unlock_user(p, arg2, 0);
10915 break;
10916 #endif
10917 case TARGET_NR_getcwd:
10918 if (!(p = lock_user(VERIFY_WRITE, arg1, arg2, 0)))
10919 goto efault;
10920 ret = get_errno(sys_getcwd1(p, arg2));
10921 unlock_user(p, arg1, ret);
10922 break;
10923 case TARGET_NR_capget:
10924 case TARGET_NR_capset:
10926 struct target_user_cap_header *target_header;
10927 struct target_user_cap_data *target_data = NULL;
10928 struct __user_cap_header_struct header;
10929 struct __user_cap_data_struct data[2];
10930 struct __user_cap_data_struct *dataptr = NULL;
10931 int i, target_datalen;
10932 int data_items = 1;
10934 if (!lock_user_struct(VERIFY_WRITE, target_header, arg1, 1)) {
10935 goto efault;
10937 header.version = tswap32(target_header->version);
10938 header.pid = tswap32(target_header->pid);
10940 if (header.version != _LINUX_CAPABILITY_VERSION) {
10941 /* Version 2 and up takes pointer to two user_data structs */
10942 data_items = 2;
10945 target_datalen = sizeof(*target_data) * data_items;
10947 if (arg2) {
10948 if (num == TARGET_NR_capget) {
10949 target_data = lock_user(VERIFY_WRITE, arg2, target_datalen, 0);
10950 } else {
10951 target_data = lock_user(VERIFY_READ, arg2, target_datalen, 1);
10953 if (!target_data) {
10954 unlock_user_struct(target_header, arg1, 0);
10955 goto efault;
10958 if (num == TARGET_NR_capset) {
10959 for (i = 0; i < data_items; i++) {
10960 data[i].effective = tswap32(target_data[i].effective);
10961 data[i].permitted = tswap32(target_data[i].permitted);
10962 data[i].inheritable = tswap32(target_data[i].inheritable);
10966 dataptr = data;
10969 if (num == TARGET_NR_capget) {
10970 ret = get_errno(capget(&header, dataptr));
10971 } else {
10972 ret = get_errno(capset(&header, dataptr));
10975 /* The kernel always updates version for both capget and capset */
10976 target_header->version = tswap32(header.version);
10977 unlock_user_struct(target_header, arg1, 1);
10979 if (arg2) {
10980 if (num == TARGET_NR_capget) {
10981 for (i = 0; i < data_items; i++) {
10982 target_data[i].effective = tswap32(data[i].effective);
10983 target_data[i].permitted = tswap32(data[i].permitted);
10984 target_data[i].inheritable = tswap32(data[i].inheritable);
10986 unlock_user(target_data, arg2, target_datalen);
10987 } else {
10988 unlock_user(target_data, arg2, 0);
10991 break;
10993 case TARGET_NR_sigaltstack:
10994 ret = do_sigaltstack(arg1, arg2, get_sp_from_cpustate((CPUArchState *)cpu_env));
10995 break;
10997 #ifdef CONFIG_SENDFILE
10998 case TARGET_NR_sendfile:
11000 off_t *offp = NULL;
11001 off_t off;
11002 if (arg3) {
11003 ret = get_user_sal(off, arg3);
11004 if (is_error(ret)) {
11005 break;
11007 offp = &off;
11009 ret = get_errno(sendfile(arg1, arg2, offp, arg4));
11010 if (!is_error(ret) && arg3) {
11011 abi_long ret2 = put_user_sal(off, arg3);
11012 if (is_error(ret2)) {
11013 ret = ret2;
11016 break;
11018 #ifdef TARGET_NR_sendfile64
11019 case TARGET_NR_sendfile64:
11021 off_t *offp = NULL;
11022 off_t off;
11023 if (arg3) {
11024 ret = get_user_s64(off, arg3);
11025 if (is_error(ret)) {
11026 break;
11028 offp = &off;
11030 ret = get_errno(sendfile(arg1, arg2, offp, arg4));
11031 if (!is_error(ret) && arg3) {
11032 abi_long ret2 = put_user_s64(off, arg3);
11033 if (is_error(ret2)) {
11034 ret = ret2;
11037 break;
11039 #endif
11040 #else
11041 case TARGET_NR_sendfile:
11042 #ifdef TARGET_NR_sendfile64
11043 case TARGET_NR_sendfile64:
11044 #endif
11045 goto unimplemented;
11046 #endif
11048 #ifdef TARGET_NR_getpmsg
11049 case TARGET_NR_getpmsg:
11050 goto unimplemented;
11051 #endif
11052 #ifdef TARGET_NR_putpmsg
11053 case TARGET_NR_putpmsg:
11054 goto unimplemented;
11055 #endif
11056 #ifdef TARGET_NR_vfork
11057 case TARGET_NR_vfork:
11058 ret = get_errno(do_fork(cpu_env,
11059 CLONE_VFORK | CLONE_VM | TARGET_SIGCHLD,
11060 0, 0, 0, 0));
11061 break;
11062 #endif
11063 #ifdef TARGET_NR_ugetrlimit
11064 case TARGET_NR_ugetrlimit:
11066 struct rlimit rlim;
11067 int resource = target_to_host_resource(arg1);
11068 ret = get_errno(getrlimit(resource, &rlim));
11069 if (!is_error(ret)) {
11070 struct target_rlimit *target_rlim;
11071 if (!lock_user_struct(VERIFY_WRITE, target_rlim, arg2, 0))
11072 goto efault;
11073 target_rlim->rlim_cur = host_to_target_rlim(rlim.rlim_cur);
11074 target_rlim->rlim_max = host_to_target_rlim(rlim.rlim_max);
11075 unlock_user_struct(target_rlim, arg2, 1);
11077 break;
11079 #endif
11080 #ifdef TARGET_NR_truncate64
11081 case TARGET_NR_truncate64:
11082 if (!(p = lock_user_string(arg1)))
11083 goto efault;
11084 ret = target_truncate64(cpu_env, p, arg2, arg3, arg4);
11085 unlock_user(p, arg1, 0);
11086 break;
11087 #endif
11088 #ifdef TARGET_NR_ftruncate64
11089 case TARGET_NR_ftruncate64:
11090 ret = target_ftruncate64(cpu_env, arg1, arg2, arg3, arg4);
11091 break;
11092 #endif
11093 #ifdef TARGET_NR_stat64
11094 case TARGET_NR_stat64:
11095 if (!(p = lock_user_string(arg1)))
11096 goto efault;
11097 ret = get_errno(stat(path(p), &st));
11098 unlock_user(p, arg1, 0);
11099 if (!is_error(ret))
11100 ret = host_to_target_stat64(cpu_env, arg2, &st);
11101 break;
11102 #endif
11103 #ifdef TARGET_NR_lstat64
11104 case TARGET_NR_lstat64:
11105 if (!(p = lock_user_string(arg1)))
11106 goto efault;
11107 ret = get_errno(lstat(path(p), &st));
11108 unlock_user(p, arg1, 0);
11109 if (!is_error(ret))
11110 ret = host_to_target_stat64(cpu_env, arg2, &st);
11111 break;
11112 #endif
11113 #ifdef TARGET_NR_fstat64
11114 case TARGET_NR_fstat64:
11115 ret = get_errno(fstat(arg1, &st));
11116 if (!is_error(ret))
11117 ret = host_to_target_stat64(cpu_env, arg2, &st);
11118 break;
11119 #endif
11120 #if (defined(TARGET_NR_fstatat64) || defined(TARGET_NR_newfstatat))
11121 #ifdef TARGET_NR_fstatat64
11122 case TARGET_NR_fstatat64:
11123 #endif
11124 #ifdef TARGET_NR_newfstatat
11125 case TARGET_NR_newfstatat:
11126 #endif
11127 if (!(p = lock_user_string(arg2)))
11128 goto efault;
11129 ret = get_errno(fstatat(arg1, path(p), &st, arg4));
11130 if (!is_error(ret))
11131 ret = host_to_target_stat64(cpu_env, arg3, &st);
11132 break;
11133 #endif
11134 #ifdef TARGET_NR_lchown
11135 case TARGET_NR_lchown:
11136 if (!(p = lock_user_string(arg1)))
11137 goto efault;
11138 ret = get_errno(lchown(p, low2highuid(arg2), low2highgid(arg3)));
11139 unlock_user(p, arg1, 0);
11140 break;
11141 #endif
11142 #ifdef TARGET_NR_getuid
11143 case TARGET_NR_getuid:
11144 ret = get_errno(high2lowuid(getuid()));
11145 break;
11146 #endif
11147 #ifdef TARGET_NR_getgid
11148 case TARGET_NR_getgid:
11149 ret = get_errno(high2lowgid(getgid()));
11150 break;
11151 #endif
11152 #ifdef TARGET_NR_geteuid
11153 case TARGET_NR_geteuid:
11154 ret = get_errno(high2lowuid(geteuid()));
11155 break;
11156 #endif
11157 #ifdef TARGET_NR_getegid
11158 case TARGET_NR_getegid:
11159 ret = get_errno(high2lowgid(getegid()));
11160 break;
11161 #endif
11162 case TARGET_NR_setreuid:
11163 ret = get_errno(setreuid(low2highuid(arg1), low2highuid(arg2)));
11164 break;
11165 case TARGET_NR_setregid:
11166 ret = get_errno(setregid(low2highgid(arg1), low2highgid(arg2)));
11167 break;
11168 case TARGET_NR_getgroups:
11170 int gidsetsize = arg1;
11171 target_id *target_grouplist;
11172 gid_t *grouplist;
11173 int i;
11175 grouplist = alloca(gidsetsize * sizeof(gid_t));
11176 ret = get_errno(getgroups(gidsetsize, grouplist));
11177 if (gidsetsize == 0)
11178 break;
11179 if (!is_error(ret)) {
11180 target_grouplist = lock_user(VERIFY_WRITE, arg2, gidsetsize * sizeof(target_id), 0);
11181 if (!target_grouplist)
11182 goto efault;
11183 for(i = 0;i < ret; i++)
11184 target_grouplist[i] = tswapid(high2lowgid(grouplist[i]));
11185 unlock_user(target_grouplist, arg2, gidsetsize * sizeof(target_id));
11188 break;
11189 case TARGET_NR_setgroups:
11191 int gidsetsize = arg1;
11192 target_id *target_grouplist;
11193 gid_t *grouplist = NULL;
11194 int i;
11195 if (gidsetsize) {
11196 grouplist = alloca(gidsetsize * sizeof(gid_t));
11197 target_grouplist = lock_user(VERIFY_READ, arg2, gidsetsize * sizeof(target_id), 1);
11198 if (!target_grouplist) {
11199 ret = -TARGET_EFAULT;
11200 goto fail;
11202 for (i = 0; i < gidsetsize; i++) {
11203 grouplist[i] = low2highgid(tswapid(target_grouplist[i]));
11205 unlock_user(target_grouplist, arg2, 0);
11207 ret = get_errno(setgroups(gidsetsize, grouplist));
11209 break;
11210 case TARGET_NR_fchown:
11211 ret = get_errno(fchown(arg1, low2highuid(arg2), low2highgid(arg3)));
11212 break;
11213 #if defined(TARGET_NR_fchownat)
11214 case TARGET_NR_fchownat:
11215 if (!(p = lock_user_string(arg2)))
11216 goto efault;
11217 ret = get_errno(fchownat(arg1, p, low2highuid(arg3),
11218 low2highgid(arg4), arg5));
11219 unlock_user(p, arg2, 0);
11220 break;
11221 #endif
11222 #ifdef TARGET_NR_setresuid
11223 case TARGET_NR_setresuid:
11224 ret = get_errno(sys_setresuid(low2highuid(arg1),
11225 low2highuid(arg2),
11226 low2highuid(arg3)));
11227 break;
11228 #endif
11229 #ifdef TARGET_NR_getresuid
11230 case TARGET_NR_getresuid:
11232 uid_t ruid, euid, suid;
11233 ret = get_errno(getresuid(&ruid, &euid, &suid));
11234 if (!is_error(ret)) {
11235 if (put_user_id(high2lowuid(ruid), arg1)
11236 || put_user_id(high2lowuid(euid), arg2)
11237 || put_user_id(high2lowuid(suid), arg3))
11238 goto efault;
11241 break;
11242 #endif
11243 #ifdef TARGET_NR_getresgid
11244 case TARGET_NR_setresgid:
11245 ret = get_errno(sys_setresgid(low2highgid(arg1),
11246 low2highgid(arg2),
11247 low2highgid(arg3)));
11248 break;
11249 #endif
11250 #ifdef TARGET_NR_getresgid
11251 case TARGET_NR_getresgid:
11253 gid_t rgid, egid, sgid;
11254 ret = get_errno(getresgid(&rgid, &egid, &sgid));
11255 if (!is_error(ret)) {
11256 if (put_user_id(high2lowgid(rgid), arg1)
11257 || put_user_id(high2lowgid(egid), arg2)
11258 || put_user_id(high2lowgid(sgid), arg3))
11259 goto efault;
11262 break;
11263 #endif
11264 #ifdef TARGET_NR_chown
11265 case TARGET_NR_chown:
11266 if (!(p = lock_user_string(arg1)))
11267 goto efault;
11268 ret = get_errno(chown(p, low2highuid(arg2), low2highgid(arg3)));
11269 unlock_user(p, arg1, 0);
11270 break;
11271 #endif
11272 case TARGET_NR_setuid:
11273 ret = get_errno(sys_setuid(low2highuid(arg1)));
11274 break;
11275 case TARGET_NR_setgid:
11276 ret = get_errno(sys_setgid(low2highgid(arg1)));
11277 break;
11278 case TARGET_NR_setfsuid:
11279 ret = get_errno(setfsuid(arg1));
11280 break;
11281 case TARGET_NR_setfsgid:
11282 ret = get_errno(setfsgid(arg1));
11283 break;
11285 #ifdef TARGET_NR_lchown32
11286 case TARGET_NR_lchown32:
11287 if (!(p = lock_user_string(arg1)))
11288 goto efault;
11289 ret = get_errno(lchown(p, arg2, arg3));
11290 unlock_user(p, arg1, 0);
11291 break;
11292 #endif
11293 #ifdef TARGET_NR_getuid32
11294 case TARGET_NR_getuid32:
11295 ret = get_errno(getuid());
11296 break;
11297 #endif
11299 #if defined(TARGET_NR_getxuid) && defined(TARGET_ALPHA)
11300 /* Alpha specific */
11301 case TARGET_NR_getxuid:
11303 uid_t euid;
11304 euid=geteuid();
11305 ((CPUAlphaState *)cpu_env)->ir[IR_A4]=euid;
11307 ret = get_errno(getuid());
11308 break;
11309 #endif
11310 #if defined(TARGET_NR_getxgid) && defined(TARGET_ALPHA)
11311 /* Alpha specific */
11312 case TARGET_NR_getxgid:
11314 uid_t egid;
11315 egid=getegid();
11316 ((CPUAlphaState *)cpu_env)->ir[IR_A4]=egid;
11318 ret = get_errno(getgid());
11319 break;
11320 #endif
11321 #if defined(TARGET_NR_osf_getsysinfo) && defined(TARGET_ALPHA)
11322 /* Alpha specific */
11323 case TARGET_NR_osf_getsysinfo:
11324 ret = -TARGET_EOPNOTSUPP;
11325 switch (arg1) {
11326 case TARGET_GSI_IEEE_FP_CONTROL:
11328 uint64_t swcr, fpcr = cpu_alpha_load_fpcr (cpu_env);
11330 /* Copied from linux ieee_fpcr_to_swcr. */
11331 swcr = (fpcr >> 35) & SWCR_STATUS_MASK;
11332 swcr |= (fpcr >> 36) & SWCR_MAP_DMZ;
11333 swcr |= (~fpcr >> 48) & (SWCR_TRAP_ENABLE_INV
11334 | SWCR_TRAP_ENABLE_DZE
11335 | SWCR_TRAP_ENABLE_OVF);
11336 swcr |= (~fpcr >> 57) & (SWCR_TRAP_ENABLE_UNF
11337 | SWCR_TRAP_ENABLE_INE);
11338 swcr |= (fpcr >> 47) & SWCR_MAP_UMZ;
11339 swcr |= (~fpcr >> 41) & SWCR_TRAP_ENABLE_DNO;
11341 if (put_user_u64 (swcr, arg2))
11342 goto efault;
11343 ret = 0;
11345 break;
11347 /* case GSI_IEEE_STATE_AT_SIGNAL:
11348 -- Not implemented in linux kernel.
11349 case GSI_UACPROC:
11350 -- Retrieves current unaligned access state; not much used.
11351 case GSI_PROC_TYPE:
11352 -- Retrieves implver information; surely not used.
11353 case GSI_GET_HWRPB:
11354 -- Grabs a copy of the HWRPB; surely not used.
11357 break;
11358 #endif
11359 #if defined(TARGET_NR_osf_setsysinfo) && defined(TARGET_ALPHA)
11360 /* Alpha specific */
11361 case TARGET_NR_osf_setsysinfo:
11362 ret = -TARGET_EOPNOTSUPP;
11363 switch (arg1) {
11364 case TARGET_SSI_IEEE_FP_CONTROL:
11366 uint64_t swcr, fpcr, orig_fpcr;
11368 if (get_user_u64 (swcr, arg2)) {
11369 goto efault;
11371 orig_fpcr = cpu_alpha_load_fpcr(cpu_env);
11372 fpcr = orig_fpcr & FPCR_DYN_MASK;
11374 /* Copied from linux ieee_swcr_to_fpcr. */
11375 fpcr |= (swcr & SWCR_STATUS_MASK) << 35;
11376 fpcr |= (swcr & SWCR_MAP_DMZ) << 36;
11377 fpcr |= (~swcr & (SWCR_TRAP_ENABLE_INV
11378 | SWCR_TRAP_ENABLE_DZE
11379 | SWCR_TRAP_ENABLE_OVF)) << 48;
11380 fpcr |= (~swcr & (SWCR_TRAP_ENABLE_UNF
11381 | SWCR_TRAP_ENABLE_INE)) << 57;
11382 fpcr |= (swcr & SWCR_MAP_UMZ ? FPCR_UNDZ | FPCR_UNFD : 0);
11383 fpcr |= (~swcr & SWCR_TRAP_ENABLE_DNO) << 41;
11385 cpu_alpha_store_fpcr(cpu_env, fpcr);
11386 ret = 0;
11388 break;
11390 case TARGET_SSI_IEEE_RAISE_EXCEPTION:
11392 uint64_t exc, fpcr, orig_fpcr;
11393 int si_code;
11395 if (get_user_u64(exc, arg2)) {
11396 goto efault;
11399 orig_fpcr = cpu_alpha_load_fpcr(cpu_env);
11401 /* We only add to the exception status here. */
11402 fpcr = orig_fpcr | ((exc & SWCR_STATUS_MASK) << 35);
11404 cpu_alpha_store_fpcr(cpu_env, fpcr);
11405 ret = 0;
11407 /* Old exceptions are not signaled. */
11408 fpcr &= ~(orig_fpcr & FPCR_STATUS_MASK);
11410 /* If any exceptions set by this call,
11411 and are unmasked, send a signal. */
11412 si_code = 0;
11413 if ((fpcr & (FPCR_INE | FPCR_INED)) == FPCR_INE) {
11414 si_code = TARGET_FPE_FLTRES;
11416 if ((fpcr & (FPCR_UNF | FPCR_UNFD)) == FPCR_UNF) {
11417 si_code = TARGET_FPE_FLTUND;
11419 if ((fpcr & (FPCR_OVF | FPCR_OVFD)) == FPCR_OVF) {
11420 si_code = TARGET_FPE_FLTOVF;
11422 if ((fpcr & (FPCR_DZE | FPCR_DZED)) == FPCR_DZE) {
11423 si_code = TARGET_FPE_FLTDIV;
11425 if ((fpcr & (FPCR_INV | FPCR_INVD)) == FPCR_INV) {
11426 si_code = TARGET_FPE_FLTINV;
11428 if (si_code != 0) {
11429 target_siginfo_t info;
11430 info.si_signo = SIGFPE;
11431 info.si_errno = 0;
11432 info.si_code = si_code;
11433 info._sifields._sigfault._addr
11434 = ((CPUArchState *)cpu_env)->pc;
11435 queue_signal((CPUArchState *)cpu_env, info.si_signo,
11436 QEMU_SI_FAULT, &info);
11439 break;
11441 /* case SSI_NVPAIRS:
11442 -- Used with SSIN_UACPROC to enable unaligned accesses.
11443 case SSI_IEEE_STATE_AT_SIGNAL:
11444 case SSI_IEEE_IGNORE_STATE_AT_SIGNAL:
11445 -- Not implemented in linux kernel
11448 break;
11449 #endif
11450 #ifdef TARGET_NR_osf_sigprocmask
11451 /* Alpha specific. */
11452 case TARGET_NR_osf_sigprocmask:
11454 abi_ulong mask;
11455 int how;
11456 sigset_t set, oldset;
11458 switch(arg1) {
11459 case TARGET_SIG_BLOCK:
11460 how = SIG_BLOCK;
11461 break;
11462 case TARGET_SIG_UNBLOCK:
11463 how = SIG_UNBLOCK;
11464 break;
11465 case TARGET_SIG_SETMASK:
11466 how = SIG_SETMASK;
11467 break;
11468 default:
11469 ret = -TARGET_EINVAL;
11470 goto fail;
11472 mask = arg2;
11473 target_to_host_old_sigset(&set, &mask);
11474 ret = do_sigprocmask(how, &set, &oldset);
11475 if (!ret) {
11476 host_to_target_old_sigset(&mask, &oldset);
11477 ret = mask;
11480 break;
11481 #endif
11483 #ifdef TARGET_NR_getgid32
11484 case TARGET_NR_getgid32:
11485 ret = get_errno(getgid());
11486 break;
11487 #endif
11488 #ifdef TARGET_NR_geteuid32
11489 case TARGET_NR_geteuid32:
11490 ret = get_errno(geteuid());
11491 break;
11492 #endif
11493 #ifdef TARGET_NR_getegid32
11494 case TARGET_NR_getegid32:
11495 ret = get_errno(getegid());
11496 break;
11497 #endif
11498 #ifdef TARGET_NR_setreuid32
11499 case TARGET_NR_setreuid32:
11500 ret = get_errno(setreuid(arg1, arg2));
11501 break;
11502 #endif
11503 #ifdef TARGET_NR_setregid32
11504 case TARGET_NR_setregid32:
11505 ret = get_errno(setregid(arg1, arg2));
11506 break;
11507 #endif
11508 #ifdef TARGET_NR_getgroups32
11509 case TARGET_NR_getgroups32:
11511 int gidsetsize = arg1;
11512 uint32_t *target_grouplist;
11513 gid_t *grouplist;
11514 int i;
11516 grouplist = alloca(gidsetsize * sizeof(gid_t));
11517 ret = get_errno(getgroups(gidsetsize, grouplist));
11518 if (gidsetsize == 0)
11519 break;
11520 if (!is_error(ret)) {
11521 target_grouplist = lock_user(VERIFY_WRITE, arg2, gidsetsize * 4, 0);
11522 if (!target_grouplist) {
11523 ret = -TARGET_EFAULT;
11524 goto fail;
11526 for(i = 0;i < ret; i++)
11527 target_grouplist[i] = tswap32(grouplist[i]);
11528 unlock_user(target_grouplist, arg2, gidsetsize * 4);
11531 break;
11532 #endif
11533 #ifdef TARGET_NR_setgroups32
11534 case TARGET_NR_setgroups32:
11536 int gidsetsize = arg1;
11537 uint32_t *target_grouplist;
11538 gid_t *grouplist;
11539 int i;
11541 grouplist = alloca(gidsetsize * sizeof(gid_t));
11542 target_grouplist = lock_user(VERIFY_READ, arg2, gidsetsize * 4, 1);
11543 if (!target_grouplist) {
11544 ret = -TARGET_EFAULT;
11545 goto fail;
11547 for(i = 0;i < gidsetsize; i++)
11548 grouplist[i] = tswap32(target_grouplist[i]);
11549 unlock_user(target_grouplist, arg2, 0);
11550 ret = get_errno(setgroups(gidsetsize, grouplist));
11552 break;
11553 #endif
11554 #ifdef TARGET_NR_fchown32
11555 case TARGET_NR_fchown32:
11556 ret = get_errno(fchown(arg1, arg2, arg3));
11557 break;
11558 #endif
11559 #ifdef TARGET_NR_setresuid32
11560 case TARGET_NR_setresuid32:
11561 ret = get_errno(sys_setresuid(arg1, arg2, arg3));
11562 break;
11563 #endif
11564 #ifdef TARGET_NR_getresuid32
11565 case TARGET_NR_getresuid32:
11567 uid_t ruid, euid, suid;
11568 ret = get_errno(getresuid(&ruid, &euid, &suid));
11569 if (!is_error(ret)) {
11570 if (put_user_u32(ruid, arg1)
11571 || put_user_u32(euid, arg2)
11572 || put_user_u32(suid, arg3))
11573 goto efault;
11576 break;
11577 #endif
11578 #ifdef TARGET_NR_setresgid32
11579 case TARGET_NR_setresgid32:
11580 ret = get_errno(sys_setresgid(arg1, arg2, arg3));
11581 break;
11582 #endif
11583 #ifdef TARGET_NR_getresgid32
11584 case TARGET_NR_getresgid32:
11586 gid_t rgid, egid, sgid;
11587 ret = get_errno(getresgid(&rgid, &egid, &sgid));
11588 if (!is_error(ret)) {
11589 if (put_user_u32(rgid, arg1)
11590 || put_user_u32(egid, arg2)
11591 || put_user_u32(sgid, arg3))
11592 goto efault;
11595 break;
11596 #endif
11597 #ifdef TARGET_NR_chown32
11598 case TARGET_NR_chown32:
11599 if (!(p = lock_user_string(arg1)))
11600 goto efault;
11601 ret = get_errno(chown(p, arg2, arg3));
11602 unlock_user(p, arg1, 0);
11603 break;
11604 #endif
11605 #ifdef TARGET_NR_setuid32
11606 case TARGET_NR_setuid32:
11607 ret = get_errno(sys_setuid(arg1));
11608 break;
11609 #endif
11610 #ifdef TARGET_NR_setgid32
11611 case TARGET_NR_setgid32:
11612 ret = get_errno(sys_setgid(arg1));
11613 break;
11614 #endif
11615 #ifdef TARGET_NR_setfsuid32
11616 case TARGET_NR_setfsuid32:
11617 ret = get_errno(setfsuid(arg1));
11618 break;
11619 #endif
11620 #ifdef TARGET_NR_setfsgid32
11621 case TARGET_NR_setfsgid32:
11622 ret = get_errno(setfsgid(arg1));
11623 break;
11624 #endif
11626 case TARGET_NR_pivot_root:
11627 goto unimplemented;
11628 #ifdef TARGET_NR_mincore
11629 case TARGET_NR_mincore:
11631 void *a;
11632 ret = -TARGET_ENOMEM;
11633 a = lock_user(VERIFY_READ, arg1, arg2, 0);
11634 if (!a) {
11635 goto fail;
11637 ret = -TARGET_EFAULT;
11638 p = lock_user_string(arg3);
11639 if (!p) {
11640 goto mincore_fail;
11642 ret = get_errno(mincore(a, arg2, p));
11643 unlock_user(p, arg3, ret);
11644 mincore_fail:
11645 unlock_user(a, arg1, 0);
11647 break;
11648 #endif
11649 #ifdef TARGET_NR_arm_fadvise64_64
11650 case TARGET_NR_arm_fadvise64_64:
11651 /* arm_fadvise64_64 looks like fadvise64_64 but
11652 * with different argument order: fd, advice, offset, len
11653 * rather than the usual fd, offset, len, advice.
11654 * Note that offset and len are both 64-bit so appear as
11655 * pairs of 32-bit registers.
11657 ret = posix_fadvise(arg1, target_offset64(arg3, arg4),
11658 target_offset64(arg5, arg6), arg2);
11659 ret = -host_to_target_errno(ret);
11660 break;
11661 #endif
11663 #if TARGET_ABI_BITS == 32
11665 #ifdef TARGET_NR_fadvise64_64
11666 case TARGET_NR_fadvise64_64:
11667 #if defined(TARGET_PPC) || defined(TARGET_XTENSA)
11668 /* 6 args: fd, advice, offset (high, low), len (high, low) */
11669 ret = arg2;
11670 arg2 = arg3;
11671 arg3 = arg4;
11672 arg4 = arg5;
11673 arg5 = arg6;
11674 arg6 = ret;
11675 #else
11676 /* 6 args: fd, offset (high, low), len (high, low), advice */
11677 if (regpairs_aligned(cpu_env, num)) {
11678 /* offset is in (3,4), len in (5,6) and advice in 7 */
11679 arg2 = arg3;
11680 arg3 = arg4;
11681 arg4 = arg5;
11682 arg5 = arg6;
11683 arg6 = arg7;
11685 #endif
11686 ret = -host_to_target_errno(posix_fadvise(arg1,
11687 target_offset64(arg2, arg3),
11688 target_offset64(arg4, arg5),
11689 arg6));
11690 break;
11691 #endif
11693 #ifdef TARGET_NR_fadvise64
11694 case TARGET_NR_fadvise64:
11695 /* 5 args: fd, offset (high, low), len, advice */
11696 if (regpairs_aligned(cpu_env, num)) {
11697 /* offset is in (3,4), len in 5 and advice in 6 */
11698 arg2 = arg3;
11699 arg3 = arg4;
11700 arg4 = arg5;
11701 arg5 = arg6;
11703 ret = -host_to_target_errno(posix_fadvise(arg1,
11704 target_offset64(arg2, arg3),
11705 arg4, arg5));
11706 break;
11707 #endif
11709 #else /* not a 32-bit ABI */
11710 #if defined(TARGET_NR_fadvise64_64) || defined(TARGET_NR_fadvise64)
11711 #ifdef TARGET_NR_fadvise64_64
11712 case TARGET_NR_fadvise64_64:
11713 #endif
11714 #ifdef TARGET_NR_fadvise64
11715 case TARGET_NR_fadvise64:
11716 #endif
11717 #ifdef TARGET_S390X
11718 switch (arg4) {
11719 case 4: arg4 = POSIX_FADV_NOREUSE + 1; break; /* make sure it's an invalid value */
11720 case 5: arg4 = POSIX_FADV_NOREUSE + 2; break; /* ditto */
11721 case 6: arg4 = POSIX_FADV_DONTNEED; break;
11722 case 7: arg4 = POSIX_FADV_NOREUSE; break;
11723 default: break;
11725 #endif
11726 ret = -host_to_target_errno(posix_fadvise(arg1, arg2, arg3, arg4));
11727 break;
11728 #endif
11729 #endif /* end of 64-bit ABI fadvise handling */
11731 #ifdef TARGET_NR_madvise
11732 case TARGET_NR_madvise:
11733 /* A straight passthrough may not be safe because qemu sometimes
11734 turns private file-backed mappings into anonymous mappings.
11735 This will break MADV_DONTNEED.
11736 This is a hint, so ignoring and returning success is ok. */
11737 ret = get_errno(0);
11738 break;
11739 #endif
11740 #if TARGET_ABI_BITS == 32
11741 case TARGET_NR_fcntl64:
11743 int cmd;
11744 struct flock64 fl;
11745 from_flock64_fn *copyfrom = copy_from_user_flock64;
11746 to_flock64_fn *copyto = copy_to_user_flock64;
11748 #ifdef TARGET_ARM
11749 if (!((CPUARMState *)cpu_env)->eabi) {
11750 copyfrom = copy_from_user_oabi_flock64;
11751 copyto = copy_to_user_oabi_flock64;
11753 #endif
11755 cmd = target_to_host_fcntl_cmd(arg2);
11756 if (cmd == -TARGET_EINVAL) {
11757 ret = cmd;
11758 break;
11761 switch(arg2) {
11762 case TARGET_F_GETLK64:
11763 ret = copyfrom(&fl, arg3);
11764 if (ret) {
11765 break;
11767 ret = get_errno(fcntl(arg1, cmd, &fl));
11768 if (ret == 0) {
11769 ret = copyto(arg3, &fl);
11771 break;
11773 case TARGET_F_SETLK64:
11774 case TARGET_F_SETLKW64:
11775 ret = copyfrom(&fl, arg3);
11776 if (ret) {
11777 break;
11779 ret = get_errno(safe_fcntl(arg1, cmd, &fl));
11780 break;
11781 default:
11782 ret = do_fcntl(arg1, arg2, arg3);
11783 break;
11785 break;
11787 #endif
11788 #ifdef TARGET_NR_cacheflush
11789 case TARGET_NR_cacheflush:
11790 /* self-modifying code is handled automatically, so nothing needed */
11791 ret = 0;
11792 break;
11793 #endif
11794 #ifdef TARGET_NR_security
11795 case TARGET_NR_security:
11796 goto unimplemented;
11797 #endif
11798 #ifdef TARGET_NR_getpagesize
11799 case TARGET_NR_getpagesize:
11800 ret = TARGET_PAGE_SIZE;
11801 break;
11802 #endif
11803 case TARGET_NR_gettid:
11804 ret = get_errno(gettid());
11805 break;
11806 #ifdef TARGET_NR_readahead
11807 case TARGET_NR_readahead:
11808 #if TARGET_ABI_BITS == 32
11809 if (regpairs_aligned(cpu_env, num)) {
11810 arg2 = arg3;
11811 arg3 = arg4;
11812 arg4 = arg5;
11814 ret = get_errno(readahead(arg1, target_offset64(arg2, arg3) , arg4));
11815 #else
11816 ret = get_errno(readahead(arg1, arg2, arg3));
11817 #endif
11818 break;
11819 #endif
11820 #ifdef CONFIG_ATTR
11821 #ifdef TARGET_NR_setxattr
11822 case TARGET_NR_listxattr:
11823 case TARGET_NR_llistxattr:
11825 void *p, *b = 0;
11826 if (arg2) {
11827 b = lock_user(VERIFY_WRITE, arg2, arg3, 0);
11828 if (!b) {
11829 ret = -TARGET_EFAULT;
11830 break;
11833 p = lock_user_string(arg1);
11834 if (p) {
11835 if (num == TARGET_NR_listxattr) {
11836 ret = get_errno(listxattr(p, b, arg3));
11837 } else {
11838 ret = get_errno(llistxattr(p, b, arg3));
11840 } else {
11841 ret = -TARGET_EFAULT;
11843 unlock_user(p, arg1, 0);
11844 unlock_user(b, arg2, arg3);
11845 break;
11847 case TARGET_NR_flistxattr:
11849 void *b = 0;
11850 if (arg2) {
11851 b = lock_user(VERIFY_WRITE, arg2, arg3, 0);
11852 if (!b) {
11853 ret = -TARGET_EFAULT;
11854 break;
11857 ret = get_errno(flistxattr(arg1, b, arg3));
11858 unlock_user(b, arg2, arg3);
11859 break;
11861 case TARGET_NR_setxattr:
11862 case TARGET_NR_lsetxattr:
11864 void *p, *n, *v = 0;
11865 if (arg3) {
11866 v = lock_user(VERIFY_READ, arg3, arg4, 1);
11867 if (!v) {
11868 ret = -TARGET_EFAULT;
11869 break;
11872 p = lock_user_string(arg1);
11873 n = lock_user_string(arg2);
11874 if (p && n) {
11875 if (num == TARGET_NR_setxattr) {
11876 ret = get_errno(setxattr(p, n, v, arg4, arg5));
11877 } else {
11878 ret = get_errno(lsetxattr(p, n, v, arg4, arg5));
11880 } else {
11881 ret = -TARGET_EFAULT;
11883 unlock_user(p, arg1, 0);
11884 unlock_user(n, arg2, 0);
11885 unlock_user(v, arg3, 0);
11887 break;
11888 case TARGET_NR_fsetxattr:
11890 void *n, *v = 0;
11891 if (arg3) {
11892 v = lock_user(VERIFY_READ, arg3, arg4, 1);
11893 if (!v) {
11894 ret = -TARGET_EFAULT;
11895 break;
11898 n = lock_user_string(arg2);
11899 if (n) {
11900 ret = get_errno(fsetxattr(arg1, n, v, arg4, arg5));
11901 } else {
11902 ret = -TARGET_EFAULT;
11904 unlock_user(n, arg2, 0);
11905 unlock_user(v, arg3, 0);
11907 break;
11908 case TARGET_NR_getxattr:
11909 case TARGET_NR_lgetxattr:
11911 void *p, *n, *v = 0;
11912 if (arg3) {
11913 v = lock_user(VERIFY_WRITE, arg3, arg4, 0);
11914 if (!v) {
11915 ret = -TARGET_EFAULT;
11916 break;
11919 p = lock_user_string(arg1);
11920 n = lock_user_string(arg2);
11921 if (p && n) {
11922 if (num == TARGET_NR_getxattr) {
11923 ret = get_errno(getxattr(p, n, v, arg4));
11924 } else {
11925 ret = get_errno(lgetxattr(p, n, v, arg4));
11927 } else {
11928 ret = -TARGET_EFAULT;
11930 unlock_user(p, arg1, 0);
11931 unlock_user(n, arg2, 0);
11932 unlock_user(v, arg3, arg4);
11934 break;
11935 case TARGET_NR_fgetxattr:
11937 void *n, *v = 0;
11938 if (arg3) {
11939 v = lock_user(VERIFY_WRITE, arg3, arg4, 0);
11940 if (!v) {
11941 ret = -TARGET_EFAULT;
11942 break;
11945 n = lock_user_string(arg2);
11946 if (n) {
11947 ret = get_errno(fgetxattr(arg1, n, v, arg4));
11948 } else {
11949 ret = -TARGET_EFAULT;
11951 unlock_user(n, arg2, 0);
11952 unlock_user(v, arg3, arg4);
11954 break;
11955 case TARGET_NR_removexattr:
11956 case TARGET_NR_lremovexattr:
11958 void *p, *n;
11959 p = lock_user_string(arg1);
11960 n = lock_user_string(arg2);
11961 if (p && n) {
11962 if (num == TARGET_NR_removexattr) {
11963 ret = get_errno(removexattr(p, n));
11964 } else {
11965 ret = get_errno(lremovexattr(p, n));
11967 } else {
11968 ret = -TARGET_EFAULT;
11970 unlock_user(p, arg1, 0);
11971 unlock_user(n, arg2, 0);
11973 break;
11974 case TARGET_NR_fremovexattr:
11976 void *n;
11977 n = lock_user_string(arg2);
11978 if (n) {
11979 ret = get_errno(fremovexattr(arg1, n));
11980 } else {
11981 ret = -TARGET_EFAULT;
11983 unlock_user(n, arg2, 0);
11985 break;
11986 #endif
11987 #endif /* CONFIG_ATTR */
11988 #ifdef TARGET_NR_set_thread_area
11989 case TARGET_NR_set_thread_area:
11990 #if defined(TARGET_MIPS)
11991 ((CPUMIPSState *) cpu_env)->active_tc.CP0_UserLocal = arg1;
11992 ret = 0;
11993 break;
11994 #elif defined(TARGET_CRIS)
11995 if (arg1 & 0xff)
11996 ret = -TARGET_EINVAL;
11997 else {
11998 ((CPUCRISState *) cpu_env)->pregs[PR_PID] = arg1;
11999 ret = 0;
12001 break;
12002 #elif defined(TARGET_I386) && defined(TARGET_ABI32)
12003 ret = do_set_thread_area(cpu_env, arg1);
12004 break;
12005 #elif defined(TARGET_M68K)
12007 TaskState *ts = cpu->opaque;
12008 ts->tp_value = arg1;
12009 ret = 0;
12010 break;
12012 #else
12013 goto unimplemented_nowarn;
12014 #endif
12015 #endif
12016 #ifdef TARGET_NR_get_thread_area
12017 case TARGET_NR_get_thread_area:
12018 #if defined(TARGET_I386) && defined(TARGET_ABI32)
12019 ret = do_get_thread_area(cpu_env, arg1);
12020 break;
12021 #elif defined(TARGET_M68K)
12023 TaskState *ts = cpu->opaque;
12024 ret = ts->tp_value;
12025 break;
12027 #else
12028 goto unimplemented_nowarn;
12029 #endif
12030 #endif
12031 #ifdef TARGET_NR_getdomainname
12032 case TARGET_NR_getdomainname:
12033 goto unimplemented_nowarn;
12034 #endif
12036 #ifdef TARGET_NR_clock_settime
12037 case TARGET_NR_clock_settime:
12039 struct timespec ts;
12041 ret = target_to_host_timespec(&ts, arg2);
12042 if (!is_error(ret)) {
12043 ret = get_errno(clock_settime(arg1, &ts));
12045 break;
12047 #endif
12048 #ifdef TARGET_NR_clock_gettime
12049 case TARGET_NR_clock_gettime:
12051 struct timespec ts;
12052 ret = get_errno(clock_gettime(arg1, &ts));
12053 if (!is_error(ret)) {
12054 ret = host_to_target_timespec(arg2, &ts);
12056 break;
12058 #endif
12059 #ifdef TARGET_NR_clock_getres
12060 case TARGET_NR_clock_getres:
12062 struct timespec ts;
12063 ret = get_errno(clock_getres(arg1, &ts));
12064 if (!is_error(ret)) {
12065 host_to_target_timespec(arg2, &ts);
12067 break;
12069 #endif
12070 #ifdef TARGET_NR_clock_nanosleep
12071 case TARGET_NR_clock_nanosleep:
12073 struct timespec ts;
12074 target_to_host_timespec(&ts, arg3);
12075 ret = get_errno(safe_clock_nanosleep(arg1, arg2,
12076 &ts, arg4 ? &ts : NULL));
12077 if (arg4)
12078 host_to_target_timespec(arg4, &ts);
12080 #if defined(TARGET_PPC)
12081 /* clock_nanosleep is odd in that it returns positive errno values.
12082 * On PPC, CR0 bit 3 should be set in such a situation. */
12083 if (ret && ret != -TARGET_ERESTARTSYS) {
12084 ((CPUPPCState *)cpu_env)->crf[0] |= 1;
12086 #endif
12087 break;
12089 #endif
12091 #if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address)
12092 case TARGET_NR_set_tid_address:
12093 ret = get_errno(set_tid_address((int *)g2h(arg1)));
12094 break;
12095 #endif
12097 case TARGET_NR_tkill:
12098 ret = get_errno(safe_tkill((int)arg1, target_to_host_signal(arg2)));
12099 break;
12101 case TARGET_NR_tgkill:
12102 ret = get_errno(safe_tgkill((int)arg1, (int)arg2,
12103 target_to_host_signal(arg3)));
12104 break;
12106 #ifdef TARGET_NR_set_robust_list
12107 case TARGET_NR_set_robust_list:
12108 case TARGET_NR_get_robust_list:
12109 /* The ABI for supporting robust futexes has userspace pass
12110 * the kernel a pointer to a linked list which is updated by
12111 * userspace after the syscall; the list is walked by the kernel
12112 * when the thread exits. Since the linked list in QEMU guest
12113 * memory isn't a valid linked list for the host and we have
12114 * no way to reliably intercept the thread-death event, we can't
12115 * support these. Silently return ENOSYS so that guest userspace
12116 * falls back to a non-robust futex implementation (which should
12117 * be OK except in the corner case of the guest crashing while
12118 * holding a mutex that is shared with another process via
12119 * shared memory).
12121 goto unimplemented_nowarn;
12122 #endif
12124 #if defined(TARGET_NR_utimensat)
12125 case TARGET_NR_utimensat:
12127 struct timespec *tsp, ts[2];
12128 if (!arg3) {
12129 tsp = NULL;
12130 } else {
12131 target_to_host_timespec(ts, arg3);
12132 target_to_host_timespec(ts+1, arg3+sizeof(struct target_timespec));
12133 tsp = ts;
12135 if (!arg2)
12136 ret = get_errno(sys_utimensat(arg1, NULL, tsp, arg4));
12137 else {
12138 if (!(p = lock_user_string(arg2))) {
12139 ret = -TARGET_EFAULT;
12140 goto fail;
12142 ret = get_errno(sys_utimensat(arg1, path(p), tsp, arg4));
12143 unlock_user(p, arg2, 0);
12146 break;
12147 #endif
12148 case TARGET_NR_futex:
12149 ret = do_futex(arg1, arg2, arg3, arg4, arg5, arg6);
12150 break;
12151 #if defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init)
12152 case TARGET_NR_inotify_init:
12153 ret = get_errno(sys_inotify_init());
12154 if (ret >= 0) {
12155 fd_trans_register(ret, &target_inotify_trans);
12157 break;
12158 #endif
12159 #ifdef CONFIG_INOTIFY1
12160 #if defined(TARGET_NR_inotify_init1) && defined(__NR_inotify_init1)
12161 case TARGET_NR_inotify_init1:
12162 ret = get_errno(sys_inotify_init1(target_to_host_bitmask(arg1,
12163 fcntl_flags_tbl)));
12164 if (ret >= 0) {
12165 fd_trans_register(ret, &target_inotify_trans);
12167 break;
12168 #endif
12169 #endif
12170 #if defined(TARGET_NR_inotify_add_watch) && defined(__NR_inotify_add_watch)
12171 case TARGET_NR_inotify_add_watch:
12172 p = lock_user_string(arg2);
12173 ret = get_errno(sys_inotify_add_watch(arg1, path(p), arg3));
12174 unlock_user(p, arg2, 0);
12175 break;
12176 #endif
12177 #if defined(TARGET_NR_inotify_rm_watch) && defined(__NR_inotify_rm_watch)
12178 case TARGET_NR_inotify_rm_watch:
12179 ret = get_errno(sys_inotify_rm_watch(arg1, arg2));
12180 break;
12181 #endif
12183 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
12184 case TARGET_NR_mq_open:
12186 struct mq_attr posix_mq_attr;
12187 struct mq_attr *pposix_mq_attr;
12188 int host_flags;
12190 host_flags = target_to_host_bitmask(arg2, fcntl_flags_tbl);
12191 pposix_mq_attr = NULL;
12192 if (arg4) {
12193 if (copy_from_user_mq_attr(&posix_mq_attr, arg4) != 0) {
12194 goto efault;
12196 pposix_mq_attr = &posix_mq_attr;
12198 p = lock_user_string(arg1 - 1);
12199 if (!p) {
12200 goto efault;
12202 ret = get_errno(mq_open(p, host_flags, arg3, pposix_mq_attr));
12203 unlock_user (p, arg1, 0);
12205 break;
12207 case TARGET_NR_mq_unlink:
12208 p = lock_user_string(arg1 - 1);
12209 if (!p) {
12210 ret = -TARGET_EFAULT;
12211 break;
12213 ret = get_errno(mq_unlink(p));
12214 unlock_user (p, arg1, 0);
12215 break;
12217 case TARGET_NR_mq_timedsend:
12219 struct timespec ts;
12221 p = lock_user (VERIFY_READ, arg2, arg3, 1);
12222 if (arg5 != 0) {
12223 target_to_host_timespec(&ts, arg5);
12224 ret = get_errno(safe_mq_timedsend(arg1, p, arg3, arg4, &ts));
12225 host_to_target_timespec(arg5, &ts);
12226 } else {
12227 ret = get_errno(safe_mq_timedsend(arg1, p, arg3, arg4, NULL));
12229 unlock_user (p, arg2, arg3);
12231 break;
12233 case TARGET_NR_mq_timedreceive:
12235 struct timespec ts;
12236 unsigned int prio;
12238 p = lock_user (VERIFY_READ, arg2, arg3, 1);
12239 if (arg5 != 0) {
12240 target_to_host_timespec(&ts, arg5);
12241 ret = get_errno(safe_mq_timedreceive(arg1, p, arg3,
12242 &prio, &ts));
12243 host_to_target_timespec(arg5, &ts);
12244 } else {
12245 ret = get_errno(safe_mq_timedreceive(arg1, p, arg3,
12246 &prio, NULL));
12248 unlock_user (p, arg2, arg3);
12249 if (arg4 != 0)
12250 put_user_u32(prio, arg4);
12252 break;
12254 /* Not implemented for now... */
12255 /* case TARGET_NR_mq_notify: */
12256 /* break; */
12258 case TARGET_NR_mq_getsetattr:
12260 struct mq_attr posix_mq_attr_in, posix_mq_attr_out;
12261 ret = 0;
12262 if (arg2 != 0) {
12263 copy_from_user_mq_attr(&posix_mq_attr_in, arg2);
12264 ret = get_errno(mq_setattr(arg1, &posix_mq_attr_in,
12265 &posix_mq_attr_out));
12266 } else if (arg3 != 0) {
12267 ret = get_errno(mq_getattr(arg1, &posix_mq_attr_out));
12269 if (ret == 0 && arg3 != 0) {
12270 copy_to_user_mq_attr(arg3, &posix_mq_attr_out);
12273 break;
12274 #endif
12276 #ifdef CONFIG_SPLICE
12277 #ifdef TARGET_NR_tee
12278 case TARGET_NR_tee:
12280 ret = get_errno(tee(arg1,arg2,arg3,arg4));
12282 break;
12283 #endif
12284 #ifdef TARGET_NR_splice
12285 case TARGET_NR_splice:
12287 loff_t loff_in, loff_out;
12288 loff_t *ploff_in = NULL, *ploff_out = NULL;
12289 if (arg2) {
12290 if (get_user_u64(loff_in, arg2)) {
12291 goto efault;
12293 ploff_in = &loff_in;
12295 if (arg4) {
12296 if (get_user_u64(loff_out, arg4)) {
12297 goto efault;
12299 ploff_out = &loff_out;
12301 ret = get_errno(splice(arg1, ploff_in, arg3, ploff_out, arg5, arg6));
12302 if (arg2) {
12303 if (put_user_u64(loff_in, arg2)) {
12304 goto efault;
12307 if (arg4) {
12308 if (put_user_u64(loff_out, arg4)) {
12309 goto efault;
12313 break;
12314 #endif
12315 #ifdef TARGET_NR_vmsplice
12316 case TARGET_NR_vmsplice:
12318 struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1);
12319 if (vec != NULL) {
12320 ret = get_errno(vmsplice(arg1, vec, arg3, arg4));
12321 unlock_iovec(vec, arg2, arg3, 0);
12322 } else {
12323 ret = -host_to_target_errno(errno);
12326 break;
12327 #endif
12328 #endif /* CONFIG_SPLICE */
12329 #ifdef CONFIG_EVENTFD
12330 #if defined(TARGET_NR_eventfd)
12331 case TARGET_NR_eventfd:
12332 ret = get_errno(eventfd(arg1, 0));
12333 if (ret >= 0) {
12334 fd_trans_register(ret, &target_eventfd_trans);
12336 break;
12337 #endif
12338 #if defined(TARGET_NR_eventfd2)
12339 case TARGET_NR_eventfd2:
12341 int host_flags = arg2 & (~(TARGET_O_NONBLOCK | TARGET_O_CLOEXEC));
12342 if (arg2 & TARGET_O_NONBLOCK) {
12343 host_flags |= O_NONBLOCK;
12345 if (arg2 & TARGET_O_CLOEXEC) {
12346 host_flags |= O_CLOEXEC;
12348 ret = get_errno(eventfd(arg1, host_flags));
12349 if (ret >= 0) {
12350 fd_trans_register(ret, &target_eventfd_trans);
12352 break;
12354 #endif
12355 #endif /* CONFIG_EVENTFD */
12356 #if defined(CONFIG_FALLOCATE) && defined(TARGET_NR_fallocate)
12357 case TARGET_NR_fallocate:
12358 #if TARGET_ABI_BITS == 32
12359 ret = get_errno(fallocate(arg1, arg2, target_offset64(arg3, arg4),
12360 target_offset64(arg5, arg6)));
12361 #else
12362 ret = get_errno(fallocate(arg1, arg2, arg3, arg4));
12363 #endif
12364 break;
12365 #endif
12366 #if defined(CONFIG_SYNC_FILE_RANGE)
12367 #if defined(TARGET_NR_sync_file_range)
12368 case TARGET_NR_sync_file_range:
12369 #if TARGET_ABI_BITS == 32
12370 #if defined(TARGET_MIPS)
12371 ret = get_errno(sync_file_range(arg1, target_offset64(arg3, arg4),
12372 target_offset64(arg5, arg6), arg7));
12373 #else
12374 ret = get_errno(sync_file_range(arg1, target_offset64(arg2, arg3),
12375 target_offset64(arg4, arg5), arg6));
12376 #endif /* !TARGET_MIPS */
12377 #else
12378 ret = get_errno(sync_file_range(arg1, arg2, arg3, arg4));
12379 #endif
12380 break;
12381 #endif
12382 #if defined(TARGET_NR_sync_file_range2)
12383 case TARGET_NR_sync_file_range2:
12384 /* This is like sync_file_range but the arguments are reordered */
12385 #if TARGET_ABI_BITS == 32
12386 ret = get_errno(sync_file_range(arg1, target_offset64(arg3, arg4),
12387 target_offset64(arg5, arg6), arg2));
12388 #else
12389 ret = get_errno(sync_file_range(arg1, arg3, arg4, arg2));
12390 #endif
12391 break;
12392 #endif
12393 #endif
12394 #if defined(TARGET_NR_signalfd4)
12395 case TARGET_NR_signalfd4:
12396 ret = do_signalfd4(arg1, arg2, arg4);
12397 break;
12398 #endif
12399 #if defined(TARGET_NR_signalfd)
12400 case TARGET_NR_signalfd:
12401 ret = do_signalfd4(arg1, arg2, 0);
12402 break;
12403 #endif
12404 #if defined(CONFIG_EPOLL)
12405 #if defined(TARGET_NR_epoll_create)
12406 case TARGET_NR_epoll_create:
12407 ret = get_errno(epoll_create(arg1));
12408 break;
12409 #endif
12410 #if defined(TARGET_NR_epoll_create1) && defined(CONFIG_EPOLL_CREATE1)
12411 case TARGET_NR_epoll_create1:
12412 ret = get_errno(epoll_create1(arg1));
12413 break;
12414 #endif
12415 #if defined(TARGET_NR_epoll_ctl)
12416 case TARGET_NR_epoll_ctl:
12418 struct epoll_event ep;
12419 struct epoll_event *epp = 0;
12420 if (arg4) {
12421 struct target_epoll_event *target_ep;
12422 if (!lock_user_struct(VERIFY_READ, target_ep, arg4, 1)) {
12423 goto efault;
12425 ep.events = tswap32(target_ep->events);
12426 /* The epoll_data_t union is just opaque data to the kernel,
12427 * so we transfer all 64 bits across and need not worry what
12428 * actual data type it is.
12430 ep.data.u64 = tswap64(target_ep->data.u64);
12431 unlock_user_struct(target_ep, arg4, 0);
12432 epp = &ep;
12434 ret = get_errno(epoll_ctl(arg1, arg2, arg3, epp));
12435 break;
12437 #endif
12439 #if defined(TARGET_NR_epoll_wait) || defined(TARGET_NR_epoll_pwait)
12440 #if defined(TARGET_NR_epoll_wait)
12441 case TARGET_NR_epoll_wait:
12442 #endif
12443 #if defined(TARGET_NR_epoll_pwait)
12444 case TARGET_NR_epoll_pwait:
12445 #endif
12447 struct target_epoll_event *target_ep;
12448 struct epoll_event *ep;
12449 int epfd = arg1;
12450 int maxevents = arg3;
12451 int timeout = arg4;
12453 if (maxevents <= 0 || maxevents > TARGET_EP_MAX_EVENTS) {
12454 ret = -TARGET_EINVAL;
12455 break;
12458 target_ep = lock_user(VERIFY_WRITE, arg2,
12459 maxevents * sizeof(struct target_epoll_event), 1);
12460 if (!target_ep) {
12461 goto efault;
12464 ep = g_try_new(struct epoll_event, maxevents);
12465 if (!ep) {
12466 unlock_user(target_ep, arg2, 0);
12467 ret = -TARGET_ENOMEM;
12468 break;
12471 switch (num) {
12472 #if defined(TARGET_NR_epoll_pwait)
12473 case TARGET_NR_epoll_pwait:
12475 target_sigset_t *target_set;
12476 sigset_t _set, *set = &_set;
12478 if (arg5) {
12479 if (arg6 != sizeof(target_sigset_t)) {
12480 ret = -TARGET_EINVAL;
12481 break;
12484 target_set = lock_user(VERIFY_READ, arg5,
12485 sizeof(target_sigset_t), 1);
12486 if (!target_set) {
12487 ret = -TARGET_EFAULT;
12488 break;
12490 target_to_host_sigset(set, target_set);
12491 unlock_user(target_set, arg5, 0);
12492 } else {
12493 set = NULL;
12496 ret = get_errno(safe_epoll_pwait(epfd, ep, maxevents, timeout,
12497 set, SIGSET_T_SIZE));
12498 break;
12500 #endif
12501 #if defined(TARGET_NR_epoll_wait)
12502 case TARGET_NR_epoll_wait:
12503 ret = get_errno(safe_epoll_pwait(epfd, ep, maxevents, timeout,
12504 NULL, 0));
12505 break;
12506 #endif
12507 default:
12508 ret = -TARGET_ENOSYS;
12510 if (!is_error(ret)) {
12511 int i;
12512 for (i = 0; i < ret; i++) {
12513 target_ep[i].events = tswap32(ep[i].events);
12514 target_ep[i].data.u64 = tswap64(ep[i].data.u64);
12516 unlock_user(target_ep, arg2,
12517 ret * sizeof(struct target_epoll_event));
12518 } else {
12519 unlock_user(target_ep, arg2, 0);
12521 g_free(ep);
12522 break;
12524 #endif
12525 #endif
12526 #ifdef TARGET_NR_prlimit64
12527 case TARGET_NR_prlimit64:
12529 /* args: pid, resource number, ptr to new rlimit, ptr to old rlimit */
12530 struct target_rlimit64 *target_rnew, *target_rold;
12531 struct host_rlimit64 rnew, rold, *rnewp = 0;
12532 int resource = target_to_host_resource(arg2);
12533 if (arg3) {
12534 if (!lock_user_struct(VERIFY_READ, target_rnew, arg3, 1)) {
12535 goto efault;
12537 rnew.rlim_cur = tswap64(target_rnew->rlim_cur);
12538 rnew.rlim_max = tswap64(target_rnew->rlim_max);
12539 unlock_user_struct(target_rnew, arg3, 0);
12540 rnewp = &rnew;
12543 ret = get_errno(sys_prlimit64(arg1, resource, rnewp, arg4 ? &rold : 0));
12544 if (!is_error(ret) && arg4) {
12545 if (!lock_user_struct(VERIFY_WRITE, target_rold, arg4, 1)) {
12546 goto efault;
12548 target_rold->rlim_cur = tswap64(rold.rlim_cur);
12549 target_rold->rlim_max = tswap64(rold.rlim_max);
12550 unlock_user_struct(target_rold, arg4, 1);
12552 break;
12554 #endif
12555 #ifdef TARGET_NR_gethostname
12556 case TARGET_NR_gethostname:
12558 char *name = lock_user(VERIFY_WRITE, arg1, arg2, 0);
12559 if (name) {
12560 ret = get_errno(gethostname(name, arg2));
12561 unlock_user(name, arg1, arg2);
12562 } else {
12563 ret = -TARGET_EFAULT;
12565 break;
12567 #endif
12568 #ifdef TARGET_NR_atomic_cmpxchg_32
12569 case TARGET_NR_atomic_cmpxchg_32:
12571 /* should use start_exclusive from main.c */
12572 abi_ulong mem_value;
12573 if (get_user_u32(mem_value, arg6)) {
12574 target_siginfo_t info;
12575 info.si_signo = SIGSEGV;
12576 info.si_errno = 0;
12577 info.si_code = TARGET_SEGV_MAPERR;
12578 info._sifields._sigfault._addr = arg6;
12579 queue_signal((CPUArchState *)cpu_env, info.si_signo,
12580 QEMU_SI_FAULT, &info);
12581 ret = 0xdeadbeef;
12584 if (mem_value == arg2)
12585 put_user_u32(arg1, arg6);
12586 ret = mem_value;
12587 break;
12589 #endif
12590 #ifdef TARGET_NR_atomic_barrier
12591 case TARGET_NR_atomic_barrier:
12593 /* Like the kernel implementation and the qemu arm barrier, no-op this? */
12594 ret = 0;
12595 break;
12597 #endif
12599 #ifdef TARGET_NR_timer_create
12600 case TARGET_NR_timer_create:
12602 /* args: clockid_t clockid, struct sigevent *sevp, timer_t *timerid */
12604 struct sigevent host_sevp = { {0}, }, *phost_sevp = NULL;
12606 int clkid = arg1;
12607 int timer_index = next_free_host_timer();
12609 if (timer_index < 0) {
12610 ret = -TARGET_EAGAIN;
12611 } else {
12612 timer_t *phtimer = g_posix_timers + timer_index;
12614 if (arg2) {
12615 phost_sevp = &host_sevp;
12616 ret = target_to_host_sigevent(phost_sevp, arg2);
12617 if (ret != 0) {
12618 break;
12622 ret = get_errno(timer_create(clkid, phost_sevp, phtimer));
12623 if (ret) {
12624 phtimer = NULL;
12625 } else {
12626 if (put_user(TIMER_MAGIC | timer_index, arg3, target_timer_t)) {
12627 goto efault;
12631 break;
12633 #endif
12635 #ifdef TARGET_NR_timer_settime
12636 case TARGET_NR_timer_settime:
12638 /* args: timer_t timerid, int flags, const struct itimerspec *new_value,
12639 * struct itimerspec * old_value */
12640 target_timer_t timerid = get_timer_id(arg1);
12642 if (timerid < 0) {
12643 ret = timerid;
12644 } else if (arg3 == 0) {
12645 ret = -TARGET_EINVAL;
12646 } else {
12647 timer_t htimer = g_posix_timers[timerid];
12648 struct itimerspec hspec_new = {{0},}, hspec_old = {{0},};
12650 if (target_to_host_itimerspec(&hspec_new, arg3)) {
12651 goto efault;
12653 ret = get_errno(
12654 timer_settime(htimer, arg2, &hspec_new, &hspec_old));
12655 if (arg4 && host_to_target_itimerspec(arg4, &hspec_old)) {
12656 goto efault;
12659 break;
12661 #endif
12663 #ifdef TARGET_NR_timer_gettime
12664 case TARGET_NR_timer_gettime:
12666 /* args: timer_t timerid, struct itimerspec *curr_value */
12667 target_timer_t timerid = get_timer_id(arg1);
12669 if (timerid < 0) {
12670 ret = timerid;
12671 } else if (!arg2) {
12672 ret = -TARGET_EFAULT;
12673 } else {
12674 timer_t htimer = g_posix_timers[timerid];
12675 struct itimerspec hspec;
12676 ret = get_errno(timer_gettime(htimer, &hspec));
12678 if (host_to_target_itimerspec(arg2, &hspec)) {
12679 ret = -TARGET_EFAULT;
12682 break;
12684 #endif
12686 #ifdef TARGET_NR_timer_getoverrun
12687 case TARGET_NR_timer_getoverrun:
12689 /* args: timer_t timerid */
12690 target_timer_t timerid = get_timer_id(arg1);
12692 if (timerid < 0) {
12693 ret = timerid;
12694 } else {
12695 timer_t htimer = g_posix_timers[timerid];
12696 ret = get_errno(timer_getoverrun(htimer));
12698 fd_trans_unregister(ret);
12699 break;
12701 #endif
12703 #ifdef TARGET_NR_timer_delete
12704 case TARGET_NR_timer_delete:
12706 /* args: timer_t timerid */
12707 target_timer_t timerid = get_timer_id(arg1);
12709 if (timerid < 0) {
12710 ret = timerid;
12711 } else {
12712 timer_t htimer = g_posix_timers[timerid];
12713 ret = get_errno(timer_delete(htimer));
12714 g_posix_timers[timerid] = 0;
12716 break;
12718 #endif
12720 #if defined(TARGET_NR_timerfd_create) && defined(CONFIG_TIMERFD)
12721 case TARGET_NR_timerfd_create:
12722 ret = get_errno(timerfd_create(arg1,
12723 target_to_host_bitmask(arg2, fcntl_flags_tbl)));
12724 break;
12725 #endif
12727 #if defined(TARGET_NR_timerfd_gettime) && defined(CONFIG_TIMERFD)
12728 case TARGET_NR_timerfd_gettime:
12730 struct itimerspec its_curr;
12732 ret = get_errno(timerfd_gettime(arg1, &its_curr));
12734 if (arg2 && host_to_target_itimerspec(arg2, &its_curr)) {
12735 goto efault;
12738 break;
12739 #endif
12741 #if defined(TARGET_NR_timerfd_settime) && defined(CONFIG_TIMERFD)
12742 case TARGET_NR_timerfd_settime:
12744 struct itimerspec its_new, its_old, *p_new;
12746 if (arg3) {
12747 if (target_to_host_itimerspec(&its_new, arg3)) {
12748 goto efault;
12750 p_new = &its_new;
12751 } else {
12752 p_new = NULL;
12755 ret = get_errno(timerfd_settime(arg1, arg2, p_new, &its_old));
12757 if (arg4 && host_to_target_itimerspec(arg4, &its_old)) {
12758 goto efault;
12761 break;
12762 #endif
12764 #if defined(TARGET_NR_ioprio_get) && defined(__NR_ioprio_get)
12765 case TARGET_NR_ioprio_get:
12766 ret = get_errno(ioprio_get(arg1, arg2));
12767 break;
12768 #endif
12770 #if defined(TARGET_NR_ioprio_set) && defined(__NR_ioprio_set)
12771 case TARGET_NR_ioprio_set:
12772 ret = get_errno(ioprio_set(arg1, arg2, arg3));
12773 break;
12774 #endif
12776 #if defined(TARGET_NR_setns) && defined(CONFIG_SETNS)
12777 case TARGET_NR_setns:
12778 ret = get_errno(setns(arg1, arg2));
12779 break;
12780 #endif
12781 #if defined(TARGET_NR_unshare) && defined(CONFIG_SETNS)
12782 case TARGET_NR_unshare:
12783 ret = get_errno(unshare(arg1));
12784 break;
12785 #endif
12786 #if defined(TARGET_NR_kcmp) && defined(__NR_kcmp)
12787 case TARGET_NR_kcmp:
12788 ret = get_errno(kcmp(arg1, arg2, arg3, arg4, arg5));
12789 break;
12790 #endif
12792 default:
12793 unimplemented:
12794 qemu_log_mask(LOG_UNIMP, "Unsupported syscall: %d\n", num);
12795 #if defined(TARGET_NR_setxattr) || defined(TARGET_NR_get_thread_area) || defined(TARGET_NR_getdomainname) || defined(TARGET_NR_set_robust_list)
12796 unimplemented_nowarn:
12797 #endif
12798 ret = -TARGET_ENOSYS;
12799 break;
12801 fail:
12802 #ifdef DEBUG
12803 gemu_log(" = " TARGET_ABI_FMT_ld "\n", ret);
12804 #endif
12805 if(do_strace)
12806 print_syscall_ret(num, ret);
12807 trace_guest_user_syscall_ret(cpu, num, ret);
12808 return ret;
12809 efault:
12810 ret = -TARGET_EFAULT;
12811 goto fail;