vfio/quirks: Enable ioeventfd quirks to be handled by vfio directly
[qemu/ar7.git] / linux-user / syscall.c
blob7b9ac3b408605dcd76ead4cad35341f90f223f28
1 /*
2 * Linux syscalls
4 * Copyright (c) 2003 Fabrice Bellard
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, see <http://www.gnu.org/licenses/>.
19 #define _ATFILE_SOURCE
20 #include "qemu/osdep.h"
21 #include "qemu/cutils.h"
22 #include "qemu/path.h"
23 #include <elf.h>
24 #include <endian.h>
25 #include <grp.h>
26 #include <sys/ipc.h>
27 #include <sys/msg.h>
28 #include <sys/wait.h>
29 #include <sys/mount.h>
30 #include <sys/file.h>
31 #include <sys/fsuid.h>
32 #include <sys/personality.h>
33 #include <sys/prctl.h>
34 #include <sys/resource.h>
35 #include <sys/swap.h>
36 #include <linux/capability.h>
37 #include <sched.h>
38 #include <sys/timex.h>
39 #include <sys/socket.h>
40 #include <sys/un.h>
41 #include <sys/uio.h>
42 #include <poll.h>
43 #include <sys/times.h>
44 #include <sys/shm.h>
45 #include <sys/sem.h>
46 #include <sys/statfs.h>
47 #include <utime.h>
48 #include <sys/sysinfo.h>
49 #include <sys/signalfd.h>
50 //#include <sys/user.h>
51 #include <netinet/ip.h>
52 #include <netinet/tcp.h>
53 #include <linux/wireless.h>
54 #include <linux/icmp.h>
55 #include <linux/icmpv6.h>
56 #include <linux/errqueue.h>
57 #include <linux/random.h>
58 #include "qemu-common.h"
59 #ifdef CONFIG_TIMERFD
60 #include <sys/timerfd.h>
61 #endif
62 #ifdef TARGET_GPROF
63 #include <sys/gmon.h>
64 #endif
65 #ifdef CONFIG_EVENTFD
66 #include <sys/eventfd.h>
67 #endif
68 #ifdef CONFIG_EPOLL
69 #include <sys/epoll.h>
70 #endif
71 #ifdef CONFIG_ATTR
72 #include "qemu/xattr.h"
73 #endif
74 #ifdef CONFIG_SENDFILE
75 #include <sys/sendfile.h>
76 #endif
78 #define termios host_termios
79 #define winsize host_winsize
80 #define termio host_termio
81 #define sgttyb host_sgttyb /* same as target */
82 #define tchars host_tchars /* same as target */
83 #define ltchars host_ltchars /* same as target */
85 #include <linux/termios.h>
86 #include <linux/unistd.h>
87 #include <linux/cdrom.h>
88 #include <linux/hdreg.h>
89 #include <linux/soundcard.h>
90 #include <linux/kd.h>
91 #include <linux/mtio.h>
92 #include <linux/fs.h>
93 #if defined(CONFIG_FIEMAP)
94 #include <linux/fiemap.h>
95 #endif
96 #include <linux/fb.h>
97 #include <linux/vt.h>
98 #include <linux/dm-ioctl.h>
99 #include <linux/reboot.h>
100 #include <linux/route.h>
101 #include <linux/filter.h>
102 #include <linux/blkpg.h>
103 #include <netpacket/packet.h>
104 #include <linux/netlink.h>
105 #ifdef CONFIG_RTNETLINK
106 #include <linux/rtnetlink.h>
107 #include <linux/if_bridge.h>
108 #endif
109 #include <linux/audit.h>
110 #include "linux_loop.h"
111 #include "uname.h"
113 #include "qemu.h"
115 #ifndef CLONE_IO
116 #define CLONE_IO 0x80000000 /* Clone io context */
117 #endif
119 /* We can't directly call the host clone syscall, because this will
120 * badly confuse libc (breaking mutexes, for example). So we must
121 * divide clone flags into:
122 * * flag combinations that look like pthread_create()
123 * * flag combinations that look like fork()
124 * * flags we can implement within QEMU itself
125 * * flags we can't support and will return an error for
127 /* For thread creation, all these flags must be present; for
128 * fork, none must be present.
130 #define CLONE_THREAD_FLAGS \
131 (CLONE_VM | CLONE_FS | CLONE_FILES | \
132 CLONE_SIGHAND | CLONE_THREAD | CLONE_SYSVSEM)
134 /* These flags are ignored:
135 * CLONE_DETACHED is now ignored by the kernel;
136 * CLONE_IO is just an optimisation hint to the I/O scheduler
138 #define CLONE_IGNORED_FLAGS \
139 (CLONE_DETACHED | CLONE_IO)
141 /* Flags for fork which we can implement within QEMU itself */
142 #define CLONE_OPTIONAL_FORK_FLAGS \
143 (CLONE_SETTLS | CLONE_PARENT_SETTID | \
144 CLONE_CHILD_CLEARTID | CLONE_CHILD_SETTID)
146 /* Flags for thread creation which we can implement within QEMU itself */
147 #define CLONE_OPTIONAL_THREAD_FLAGS \
148 (CLONE_SETTLS | CLONE_PARENT_SETTID | \
149 CLONE_CHILD_CLEARTID | CLONE_CHILD_SETTID | CLONE_PARENT)
151 #define CLONE_INVALID_FORK_FLAGS \
152 (~(CSIGNAL | CLONE_OPTIONAL_FORK_FLAGS | CLONE_IGNORED_FLAGS))
154 #define CLONE_INVALID_THREAD_FLAGS \
155 (~(CSIGNAL | CLONE_THREAD_FLAGS | CLONE_OPTIONAL_THREAD_FLAGS | \
156 CLONE_IGNORED_FLAGS))
158 /* CLONE_VFORK is special cased early in do_fork(). The other flag bits
159 * have almost all been allocated. We cannot support any of
160 * CLONE_NEWNS, CLONE_NEWCGROUP, CLONE_NEWUTS, CLONE_NEWIPC,
161 * CLONE_NEWUSER, CLONE_NEWPID, CLONE_NEWNET, CLONE_PTRACE, CLONE_UNTRACED.
162 * The checks against the invalid thread masks above will catch these.
163 * (The one remaining unallocated bit is 0x1000 which used to be CLONE_PID.)
166 //#define DEBUG
167 /* Define DEBUG_ERESTARTSYS to force every syscall to be restarted
168 * once. This exercises the codepaths for restart.
170 //#define DEBUG_ERESTARTSYS
172 //#include <linux/msdos_fs.h>
173 #define VFAT_IOCTL_READDIR_BOTH _IOR('r', 1, struct linux_dirent [2])
174 #define VFAT_IOCTL_READDIR_SHORT _IOR('r', 2, struct linux_dirent [2])
176 #undef _syscall0
177 #undef _syscall1
178 #undef _syscall2
179 #undef _syscall3
180 #undef _syscall4
181 #undef _syscall5
182 #undef _syscall6
184 #define _syscall0(type,name) \
185 static type name (void) \
187 return syscall(__NR_##name); \
190 #define _syscall1(type,name,type1,arg1) \
191 static type name (type1 arg1) \
193 return syscall(__NR_##name, arg1); \
196 #define _syscall2(type,name,type1,arg1,type2,arg2) \
197 static type name (type1 arg1,type2 arg2) \
199 return syscall(__NR_##name, arg1, arg2); \
202 #define _syscall3(type,name,type1,arg1,type2,arg2,type3,arg3) \
203 static type name (type1 arg1,type2 arg2,type3 arg3) \
205 return syscall(__NR_##name, arg1, arg2, arg3); \
208 #define _syscall4(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4) \
209 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4) \
211 return syscall(__NR_##name, arg1, arg2, arg3, arg4); \
214 #define _syscall5(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4, \
215 type5,arg5) \
216 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5) \
218 return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5); \
222 #define _syscall6(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4, \
223 type5,arg5,type6,arg6) \
224 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5, \
225 type6 arg6) \
227 return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6); \
231 #define __NR_sys_uname __NR_uname
232 #define __NR_sys_getcwd1 __NR_getcwd
233 #define __NR_sys_getdents __NR_getdents
234 #define __NR_sys_getdents64 __NR_getdents64
235 #define __NR_sys_getpriority __NR_getpriority
236 #define __NR_sys_rt_sigqueueinfo __NR_rt_sigqueueinfo
237 #define __NR_sys_rt_tgsigqueueinfo __NR_rt_tgsigqueueinfo
238 #define __NR_sys_syslog __NR_syslog
239 #define __NR_sys_futex __NR_futex
240 #define __NR_sys_inotify_init __NR_inotify_init
241 #define __NR_sys_inotify_add_watch __NR_inotify_add_watch
242 #define __NR_sys_inotify_rm_watch __NR_inotify_rm_watch
244 #if defined(__alpha__) || defined(__x86_64__) || defined(__s390x__)
245 #define __NR__llseek __NR_lseek
246 #endif
248 /* Newer kernel ports have llseek() instead of _llseek() */
249 #if defined(TARGET_NR_llseek) && !defined(TARGET_NR__llseek)
250 #define TARGET_NR__llseek TARGET_NR_llseek
251 #endif
253 #ifdef __NR_gettid
254 _syscall0(int, gettid)
255 #else
256 /* This is a replacement for the host gettid() and must return a host
257 errno. */
258 static int gettid(void) {
259 return -ENOSYS;
261 #endif
263 /* For the 64-bit guest on 32-bit host case we must emulate
264 * getdents using getdents64, because otherwise the host
265 * might hand us back more dirent records than we can fit
266 * into the guest buffer after structure format conversion.
267 * Otherwise we emulate getdents with getdents if the host has it.
269 #if defined(__NR_getdents) && HOST_LONG_BITS >= TARGET_ABI_BITS
270 #define EMULATE_GETDENTS_WITH_GETDENTS
271 #endif
273 #if defined(TARGET_NR_getdents) && defined(EMULATE_GETDENTS_WITH_GETDENTS)
274 _syscall3(int, sys_getdents, uint, fd, struct linux_dirent *, dirp, uint, count);
275 #endif
276 #if (defined(TARGET_NR_getdents) && \
277 !defined(EMULATE_GETDENTS_WITH_GETDENTS)) || \
278 (defined(TARGET_NR_getdents64) && defined(__NR_getdents64))
279 _syscall3(int, sys_getdents64, uint, fd, struct linux_dirent64 *, dirp, uint, count);
280 #endif
281 #if defined(TARGET_NR__llseek) && defined(__NR_llseek)
282 _syscall5(int, _llseek, uint, fd, ulong, hi, ulong, lo,
283 loff_t *, res, uint, wh);
284 #endif
285 _syscall3(int, sys_rt_sigqueueinfo, pid_t, pid, int, sig, siginfo_t *, uinfo)
286 _syscall4(int, sys_rt_tgsigqueueinfo, pid_t, pid, pid_t, tid, int, sig,
287 siginfo_t *, uinfo)
288 _syscall3(int,sys_syslog,int,type,char*,bufp,int,len)
289 #ifdef __NR_exit_group
290 _syscall1(int,exit_group,int,error_code)
291 #endif
292 #if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address)
293 _syscall1(int,set_tid_address,int *,tidptr)
294 #endif
295 #if defined(TARGET_NR_futex) && defined(__NR_futex)
296 _syscall6(int,sys_futex,int *,uaddr,int,op,int,val,
297 const struct timespec *,timeout,int *,uaddr2,int,val3)
298 #endif
299 #define __NR_sys_sched_getaffinity __NR_sched_getaffinity
300 _syscall3(int, sys_sched_getaffinity, pid_t, pid, unsigned int, len,
301 unsigned long *, user_mask_ptr);
302 #define __NR_sys_sched_setaffinity __NR_sched_setaffinity
303 _syscall3(int, sys_sched_setaffinity, pid_t, pid, unsigned int, len,
304 unsigned long *, user_mask_ptr);
305 #define __NR_sys_getcpu __NR_getcpu
306 _syscall3(int, sys_getcpu, unsigned *, cpu, unsigned *, node, void *, tcache);
307 _syscall4(int, reboot, int, magic1, int, magic2, unsigned int, cmd,
308 void *, arg);
309 _syscall2(int, capget, struct __user_cap_header_struct *, header,
310 struct __user_cap_data_struct *, data);
311 _syscall2(int, capset, struct __user_cap_header_struct *, header,
312 struct __user_cap_data_struct *, data);
313 #if defined(TARGET_NR_ioprio_get) && defined(__NR_ioprio_get)
314 _syscall2(int, ioprio_get, int, which, int, who)
315 #endif
316 #if defined(TARGET_NR_ioprio_set) && defined(__NR_ioprio_set)
317 _syscall3(int, ioprio_set, int, which, int, who, int, ioprio)
318 #endif
319 #if defined(TARGET_NR_getrandom) && defined(__NR_getrandom)
320 _syscall3(int, getrandom, void *, buf, size_t, buflen, unsigned int, flags)
321 #endif
323 #if defined(TARGET_NR_kcmp) && defined(__NR_kcmp)
324 _syscall5(int, kcmp, pid_t, pid1, pid_t, pid2, int, type,
325 unsigned long, idx1, unsigned long, idx2)
326 #endif
328 static bitmask_transtbl fcntl_flags_tbl[] = {
329 { TARGET_O_ACCMODE, TARGET_O_WRONLY, O_ACCMODE, O_WRONLY, },
330 { TARGET_O_ACCMODE, TARGET_O_RDWR, O_ACCMODE, O_RDWR, },
331 { TARGET_O_CREAT, TARGET_O_CREAT, O_CREAT, O_CREAT, },
332 { TARGET_O_EXCL, TARGET_O_EXCL, O_EXCL, O_EXCL, },
333 { TARGET_O_NOCTTY, TARGET_O_NOCTTY, O_NOCTTY, O_NOCTTY, },
334 { TARGET_O_TRUNC, TARGET_O_TRUNC, O_TRUNC, O_TRUNC, },
335 { TARGET_O_APPEND, TARGET_O_APPEND, O_APPEND, O_APPEND, },
336 { TARGET_O_NONBLOCK, TARGET_O_NONBLOCK, O_NONBLOCK, O_NONBLOCK, },
337 { TARGET_O_SYNC, TARGET_O_DSYNC, O_SYNC, O_DSYNC, },
338 { TARGET_O_SYNC, TARGET_O_SYNC, O_SYNC, O_SYNC, },
339 { TARGET_FASYNC, TARGET_FASYNC, FASYNC, FASYNC, },
340 { TARGET_O_DIRECTORY, TARGET_O_DIRECTORY, O_DIRECTORY, O_DIRECTORY, },
341 { TARGET_O_NOFOLLOW, TARGET_O_NOFOLLOW, O_NOFOLLOW, O_NOFOLLOW, },
342 #if defined(O_DIRECT)
343 { TARGET_O_DIRECT, TARGET_O_DIRECT, O_DIRECT, O_DIRECT, },
344 #endif
345 #if defined(O_NOATIME)
346 { TARGET_O_NOATIME, TARGET_O_NOATIME, O_NOATIME, O_NOATIME },
347 #endif
348 #if defined(O_CLOEXEC)
349 { TARGET_O_CLOEXEC, TARGET_O_CLOEXEC, O_CLOEXEC, O_CLOEXEC },
350 #endif
351 #if defined(O_PATH)
352 { TARGET_O_PATH, TARGET_O_PATH, O_PATH, O_PATH },
353 #endif
354 #if defined(O_TMPFILE)
355 { TARGET_O_TMPFILE, TARGET_O_TMPFILE, O_TMPFILE, O_TMPFILE },
356 #endif
357 /* Don't terminate the list prematurely on 64-bit host+guest. */
358 #if TARGET_O_LARGEFILE != 0 || O_LARGEFILE != 0
359 { TARGET_O_LARGEFILE, TARGET_O_LARGEFILE, O_LARGEFILE, O_LARGEFILE, },
360 #endif
361 { 0, 0, 0, 0 }
364 enum {
365 QEMU_IFLA_BR_UNSPEC,
366 QEMU_IFLA_BR_FORWARD_DELAY,
367 QEMU_IFLA_BR_HELLO_TIME,
368 QEMU_IFLA_BR_MAX_AGE,
369 QEMU_IFLA_BR_AGEING_TIME,
370 QEMU_IFLA_BR_STP_STATE,
371 QEMU_IFLA_BR_PRIORITY,
372 QEMU_IFLA_BR_VLAN_FILTERING,
373 QEMU_IFLA_BR_VLAN_PROTOCOL,
374 QEMU_IFLA_BR_GROUP_FWD_MASK,
375 QEMU_IFLA_BR_ROOT_ID,
376 QEMU_IFLA_BR_BRIDGE_ID,
377 QEMU_IFLA_BR_ROOT_PORT,
378 QEMU_IFLA_BR_ROOT_PATH_COST,
379 QEMU_IFLA_BR_TOPOLOGY_CHANGE,
380 QEMU_IFLA_BR_TOPOLOGY_CHANGE_DETECTED,
381 QEMU_IFLA_BR_HELLO_TIMER,
382 QEMU_IFLA_BR_TCN_TIMER,
383 QEMU_IFLA_BR_TOPOLOGY_CHANGE_TIMER,
384 QEMU_IFLA_BR_GC_TIMER,
385 QEMU_IFLA_BR_GROUP_ADDR,
386 QEMU_IFLA_BR_FDB_FLUSH,
387 QEMU_IFLA_BR_MCAST_ROUTER,
388 QEMU_IFLA_BR_MCAST_SNOOPING,
389 QEMU_IFLA_BR_MCAST_QUERY_USE_IFADDR,
390 QEMU_IFLA_BR_MCAST_QUERIER,
391 QEMU_IFLA_BR_MCAST_HASH_ELASTICITY,
392 QEMU_IFLA_BR_MCAST_HASH_MAX,
393 QEMU_IFLA_BR_MCAST_LAST_MEMBER_CNT,
394 QEMU_IFLA_BR_MCAST_STARTUP_QUERY_CNT,
395 QEMU_IFLA_BR_MCAST_LAST_MEMBER_INTVL,
396 QEMU_IFLA_BR_MCAST_MEMBERSHIP_INTVL,
397 QEMU_IFLA_BR_MCAST_QUERIER_INTVL,
398 QEMU_IFLA_BR_MCAST_QUERY_INTVL,
399 QEMU_IFLA_BR_MCAST_QUERY_RESPONSE_INTVL,
400 QEMU_IFLA_BR_MCAST_STARTUP_QUERY_INTVL,
401 QEMU_IFLA_BR_NF_CALL_IPTABLES,
402 QEMU_IFLA_BR_NF_CALL_IP6TABLES,
403 QEMU_IFLA_BR_NF_CALL_ARPTABLES,
404 QEMU_IFLA_BR_VLAN_DEFAULT_PVID,
405 QEMU_IFLA_BR_PAD,
406 QEMU_IFLA_BR_VLAN_STATS_ENABLED,
407 QEMU_IFLA_BR_MCAST_STATS_ENABLED,
408 QEMU_IFLA_BR_MCAST_IGMP_VERSION,
409 QEMU_IFLA_BR_MCAST_MLD_VERSION,
410 QEMU___IFLA_BR_MAX,
413 enum {
414 QEMU_IFLA_UNSPEC,
415 QEMU_IFLA_ADDRESS,
416 QEMU_IFLA_BROADCAST,
417 QEMU_IFLA_IFNAME,
418 QEMU_IFLA_MTU,
419 QEMU_IFLA_LINK,
420 QEMU_IFLA_QDISC,
421 QEMU_IFLA_STATS,
422 QEMU_IFLA_COST,
423 QEMU_IFLA_PRIORITY,
424 QEMU_IFLA_MASTER,
425 QEMU_IFLA_WIRELESS,
426 QEMU_IFLA_PROTINFO,
427 QEMU_IFLA_TXQLEN,
428 QEMU_IFLA_MAP,
429 QEMU_IFLA_WEIGHT,
430 QEMU_IFLA_OPERSTATE,
431 QEMU_IFLA_LINKMODE,
432 QEMU_IFLA_LINKINFO,
433 QEMU_IFLA_NET_NS_PID,
434 QEMU_IFLA_IFALIAS,
435 QEMU_IFLA_NUM_VF,
436 QEMU_IFLA_VFINFO_LIST,
437 QEMU_IFLA_STATS64,
438 QEMU_IFLA_VF_PORTS,
439 QEMU_IFLA_PORT_SELF,
440 QEMU_IFLA_AF_SPEC,
441 QEMU_IFLA_GROUP,
442 QEMU_IFLA_NET_NS_FD,
443 QEMU_IFLA_EXT_MASK,
444 QEMU_IFLA_PROMISCUITY,
445 QEMU_IFLA_NUM_TX_QUEUES,
446 QEMU_IFLA_NUM_RX_QUEUES,
447 QEMU_IFLA_CARRIER,
448 QEMU_IFLA_PHYS_PORT_ID,
449 QEMU_IFLA_CARRIER_CHANGES,
450 QEMU_IFLA_PHYS_SWITCH_ID,
451 QEMU_IFLA_LINK_NETNSID,
452 QEMU_IFLA_PHYS_PORT_NAME,
453 QEMU_IFLA_PROTO_DOWN,
454 QEMU_IFLA_GSO_MAX_SEGS,
455 QEMU_IFLA_GSO_MAX_SIZE,
456 QEMU_IFLA_PAD,
457 QEMU_IFLA_XDP,
458 QEMU_IFLA_EVENT,
459 QEMU_IFLA_NEW_NETNSID,
460 QEMU_IFLA_IF_NETNSID,
461 QEMU_IFLA_CARRIER_UP_COUNT,
462 QEMU_IFLA_CARRIER_DOWN_COUNT,
463 QEMU_IFLA_NEW_IFINDEX,
464 QEMU___IFLA_MAX
467 enum {
468 QEMU_IFLA_BRPORT_UNSPEC,
469 QEMU_IFLA_BRPORT_STATE,
470 QEMU_IFLA_BRPORT_PRIORITY,
471 QEMU_IFLA_BRPORT_COST,
472 QEMU_IFLA_BRPORT_MODE,
473 QEMU_IFLA_BRPORT_GUARD,
474 QEMU_IFLA_BRPORT_PROTECT,
475 QEMU_IFLA_BRPORT_FAST_LEAVE,
476 QEMU_IFLA_BRPORT_LEARNING,
477 QEMU_IFLA_BRPORT_UNICAST_FLOOD,
478 QEMU_IFLA_BRPORT_PROXYARP,
479 QEMU_IFLA_BRPORT_LEARNING_SYNC,
480 QEMU_IFLA_BRPORT_PROXYARP_WIFI,
481 QEMU_IFLA_BRPORT_ROOT_ID,
482 QEMU_IFLA_BRPORT_BRIDGE_ID,
483 QEMU_IFLA_BRPORT_DESIGNATED_PORT,
484 QEMU_IFLA_BRPORT_DESIGNATED_COST,
485 QEMU_IFLA_BRPORT_ID,
486 QEMU_IFLA_BRPORT_NO,
487 QEMU_IFLA_BRPORT_TOPOLOGY_CHANGE_ACK,
488 QEMU_IFLA_BRPORT_CONFIG_PENDING,
489 QEMU_IFLA_BRPORT_MESSAGE_AGE_TIMER,
490 QEMU_IFLA_BRPORT_FORWARD_DELAY_TIMER,
491 QEMU_IFLA_BRPORT_HOLD_TIMER,
492 QEMU_IFLA_BRPORT_FLUSH,
493 QEMU_IFLA_BRPORT_MULTICAST_ROUTER,
494 QEMU_IFLA_BRPORT_PAD,
495 QEMU_IFLA_BRPORT_MCAST_FLOOD,
496 QEMU_IFLA_BRPORT_MCAST_TO_UCAST,
497 QEMU_IFLA_BRPORT_VLAN_TUNNEL,
498 QEMU_IFLA_BRPORT_BCAST_FLOOD,
499 QEMU_IFLA_BRPORT_GROUP_FWD_MASK,
500 QEMU_IFLA_BRPORT_NEIGH_SUPPRESS,
501 QEMU___IFLA_BRPORT_MAX
504 enum {
505 QEMU_IFLA_INFO_UNSPEC,
506 QEMU_IFLA_INFO_KIND,
507 QEMU_IFLA_INFO_DATA,
508 QEMU_IFLA_INFO_XSTATS,
509 QEMU_IFLA_INFO_SLAVE_KIND,
510 QEMU_IFLA_INFO_SLAVE_DATA,
511 QEMU___IFLA_INFO_MAX,
514 enum {
515 QEMU_IFLA_INET_UNSPEC,
516 QEMU_IFLA_INET_CONF,
517 QEMU___IFLA_INET_MAX,
520 enum {
521 QEMU_IFLA_INET6_UNSPEC,
522 QEMU_IFLA_INET6_FLAGS,
523 QEMU_IFLA_INET6_CONF,
524 QEMU_IFLA_INET6_STATS,
525 QEMU_IFLA_INET6_MCAST,
526 QEMU_IFLA_INET6_CACHEINFO,
527 QEMU_IFLA_INET6_ICMP6STATS,
528 QEMU_IFLA_INET6_TOKEN,
529 QEMU_IFLA_INET6_ADDR_GEN_MODE,
530 QEMU___IFLA_INET6_MAX
533 enum {
534 QEMU_IFLA_XDP_UNSPEC,
535 QEMU_IFLA_XDP_FD,
536 QEMU_IFLA_XDP_ATTACHED,
537 QEMU_IFLA_XDP_FLAGS,
538 QEMU_IFLA_XDP_PROG_ID,
539 QEMU___IFLA_XDP_MAX,
542 typedef abi_long (*TargetFdDataFunc)(void *, size_t);
543 typedef abi_long (*TargetFdAddrFunc)(void *, abi_ulong, socklen_t);
544 typedef struct TargetFdTrans {
545 TargetFdDataFunc host_to_target_data;
546 TargetFdDataFunc target_to_host_data;
547 TargetFdAddrFunc target_to_host_addr;
548 } TargetFdTrans;
550 static TargetFdTrans **target_fd_trans;
552 static unsigned int target_fd_max;
554 static TargetFdDataFunc fd_trans_target_to_host_data(int fd)
556 if (fd >= 0 && fd < target_fd_max && target_fd_trans[fd]) {
557 return target_fd_trans[fd]->target_to_host_data;
559 return NULL;
562 static TargetFdDataFunc fd_trans_host_to_target_data(int fd)
564 if (fd >= 0 && fd < target_fd_max && target_fd_trans[fd]) {
565 return target_fd_trans[fd]->host_to_target_data;
567 return NULL;
570 static TargetFdAddrFunc fd_trans_target_to_host_addr(int fd)
572 if (fd >= 0 && fd < target_fd_max && target_fd_trans[fd]) {
573 return target_fd_trans[fd]->target_to_host_addr;
575 return NULL;
578 static void fd_trans_register(int fd, TargetFdTrans *trans)
580 unsigned int oldmax;
582 if (fd >= target_fd_max) {
583 oldmax = target_fd_max;
584 target_fd_max = ((fd >> 6) + 1) << 6; /* by slice of 64 entries */
585 target_fd_trans = g_renew(TargetFdTrans *,
586 target_fd_trans, target_fd_max);
587 memset((void *)(target_fd_trans + oldmax), 0,
588 (target_fd_max - oldmax) * sizeof(TargetFdTrans *));
590 target_fd_trans[fd] = trans;
593 static void fd_trans_unregister(int fd)
595 if (fd >= 0 && fd < target_fd_max) {
596 target_fd_trans[fd] = NULL;
600 static void fd_trans_dup(int oldfd, int newfd)
602 fd_trans_unregister(newfd);
603 if (oldfd < target_fd_max && target_fd_trans[oldfd]) {
604 fd_trans_register(newfd, target_fd_trans[oldfd]);
608 static int sys_getcwd1(char *buf, size_t size)
610 if (getcwd(buf, size) == NULL) {
611 /* getcwd() sets errno */
612 return (-1);
614 return strlen(buf)+1;
617 #ifdef TARGET_NR_utimensat
618 #if defined(__NR_utimensat)
619 #define __NR_sys_utimensat __NR_utimensat
620 _syscall4(int,sys_utimensat,int,dirfd,const char *,pathname,
621 const struct timespec *,tsp,int,flags)
622 #else
623 static int sys_utimensat(int dirfd, const char *pathname,
624 const struct timespec times[2], int flags)
626 errno = ENOSYS;
627 return -1;
629 #endif
630 #endif /* TARGET_NR_utimensat */
632 #ifdef TARGET_NR_renameat2
633 #if defined(__NR_renameat2)
634 #define __NR_sys_renameat2 __NR_renameat2
635 _syscall5(int, sys_renameat2, int, oldfd, const char *, old, int, newfd,
636 const char *, new, unsigned int, flags)
637 #else
638 static int sys_renameat2(int oldfd, const char *old,
639 int newfd, const char *new, int flags)
641 if (flags == 0) {
642 return renameat(oldfd, old, newfd, new);
644 errno = ENOSYS;
645 return -1;
647 #endif
648 #endif /* TARGET_NR_renameat2 */
650 #ifdef CONFIG_INOTIFY
651 #include <sys/inotify.h>
653 #if defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init)
654 static int sys_inotify_init(void)
656 return (inotify_init());
658 #endif
659 #if defined(TARGET_NR_inotify_add_watch) && defined(__NR_inotify_add_watch)
660 static int sys_inotify_add_watch(int fd,const char *pathname, int32_t mask)
662 return (inotify_add_watch(fd, pathname, mask));
664 #endif
665 #if defined(TARGET_NR_inotify_rm_watch) && defined(__NR_inotify_rm_watch)
666 static int sys_inotify_rm_watch(int fd, int32_t wd)
668 return (inotify_rm_watch(fd, wd));
670 #endif
671 #ifdef CONFIG_INOTIFY1
672 #if defined(TARGET_NR_inotify_init1) && defined(__NR_inotify_init1)
673 static int sys_inotify_init1(int flags)
675 return (inotify_init1(flags));
677 #endif
678 #endif
679 #else
680 /* Userspace can usually survive runtime without inotify */
681 #undef TARGET_NR_inotify_init
682 #undef TARGET_NR_inotify_init1
683 #undef TARGET_NR_inotify_add_watch
684 #undef TARGET_NR_inotify_rm_watch
685 #endif /* CONFIG_INOTIFY */
687 #if defined(TARGET_NR_prlimit64)
688 #ifndef __NR_prlimit64
689 # define __NR_prlimit64 -1
690 #endif
691 #define __NR_sys_prlimit64 __NR_prlimit64
692 /* The glibc rlimit structure may not be that used by the underlying syscall */
693 struct host_rlimit64 {
694 uint64_t rlim_cur;
695 uint64_t rlim_max;
697 _syscall4(int, sys_prlimit64, pid_t, pid, int, resource,
698 const struct host_rlimit64 *, new_limit,
699 struct host_rlimit64 *, old_limit)
700 #endif
703 #if defined(TARGET_NR_timer_create)
704 /* Maxiumum of 32 active POSIX timers allowed at any one time. */
705 static timer_t g_posix_timers[32] = { 0, } ;
707 static inline int next_free_host_timer(void)
709 int k ;
710 /* FIXME: Does finding the next free slot require a lock? */
711 for (k = 0; k < ARRAY_SIZE(g_posix_timers); k++) {
712 if (g_posix_timers[k] == 0) {
713 g_posix_timers[k] = (timer_t) 1;
714 return k;
717 return -1;
719 #endif
721 /* ARM EABI and MIPS expect 64bit types aligned even on pairs or registers */
722 #ifdef TARGET_ARM
723 static inline int regpairs_aligned(void *cpu_env, int num)
725 return ((((CPUARMState *)cpu_env)->eabi) == 1) ;
727 #elif defined(TARGET_MIPS) && (TARGET_ABI_BITS == 32)
728 static inline int regpairs_aligned(void *cpu_env, int num) { return 1; }
729 #elif defined(TARGET_PPC) && !defined(TARGET_PPC64)
730 /* SysV AVI for PPC32 expects 64bit parameters to be passed on odd/even pairs
731 * of registers which translates to the same as ARM/MIPS, because we start with
732 * r3 as arg1 */
733 static inline int regpairs_aligned(void *cpu_env, int num) { return 1; }
734 #elif defined(TARGET_SH4)
735 /* SH4 doesn't align register pairs, except for p{read,write}64 */
736 static inline int regpairs_aligned(void *cpu_env, int num)
738 switch (num) {
739 case TARGET_NR_pread64:
740 case TARGET_NR_pwrite64:
741 return 1;
743 default:
744 return 0;
747 #elif defined(TARGET_XTENSA)
748 static inline int regpairs_aligned(void *cpu_env, int num) { return 1; }
749 #else
750 static inline int regpairs_aligned(void *cpu_env, int num) { return 0; }
751 #endif
753 #define ERRNO_TABLE_SIZE 1200
755 /* target_to_host_errno_table[] is initialized from
756 * host_to_target_errno_table[] in syscall_init(). */
757 static uint16_t target_to_host_errno_table[ERRNO_TABLE_SIZE] = {
761 * This list is the union of errno values overridden in asm-<arch>/errno.h
762 * minus the errnos that are not actually generic to all archs.
764 static uint16_t host_to_target_errno_table[ERRNO_TABLE_SIZE] = {
765 [EAGAIN] = TARGET_EAGAIN,
766 [EIDRM] = TARGET_EIDRM,
767 [ECHRNG] = TARGET_ECHRNG,
768 [EL2NSYNC] = TARGET_EL2NSYNC,
769 [EL3HLT] = TARGET_EL3HLT,
770 [EL3RST] = TARGET_EL3RST,
771 [ELNRNG] = TARGET_ELNRNG,
772 [EUNATCH] = TARGET_EUNATCH,
773 [ENOCSI] = TARGET_ENOCSI,
774 [EL2HLT] = TARGET_EL2HLT,
775 [EDEADLK] = TARGET_EDEADLK,
776 [ENOLCK] = TARGET_ENOLCK,
777 [EBADE] = TARGET_EBADE,
778 [EBADR] = TARGET_EBADR,
779 [EXFULL] = TARGET_EXFULL,
780 [ENOANO] = TARGET_ENOANO,
781 [EBADRQC] = TARGET_EBADRQC,
782 [EBADSLT] = TARGET_EBADSLT,
783 [EBFONT] = TARGET_EBFONT,
784 [ENOSTR] = TARGET_ENOSTR,
785 [ENODATA] = TARGET_ENODATA,
786 [ETIME] = TARGET_ETIME,
787 [ENOSR] = TARGET_ENOSR,
788 [ENONET] = TARGET_ENONET,
789 [ENOPKG] = TARGET_ENOPKG,
790 [EREMOTE] = TARGET_EREMOTE,
791 [ENOLINK] = TARGET_ENOLINK,
792 [EADV] = TARGET_EADV,
793 [ESRMNT] = TARGET_ESRMNT,
794 [ECOMM] = TARGET_ECOMM,
795 [EPROTO] = TARGET_EPROTO,
796 [EDOTDOT] = TARGET_EDOTDOT,
797 [EMULTIHOP] = TARGET_EMULTIHOP,
798 [EBADMSG] = TARGET_EBADMSG,
799 [ENAMETOOLONG] = TARGET_ENAMETOOLONG,
800 [EOVERFLOW] = TARGET_EOVERFLOW,
801 [ENOTUNIQ] = TARGET_ENOTUNIQ,
802 [EBADFD] = TARGET_EBADFD,
803 [EREMCHG] = TARGET_EREMCHG,
804 [ELIBACC] = TARGET_ELIBACC,
805 [ELIBBAD] = TARGET_ELIBBAD,
806 [ELIBSCN] = TARGET_ELIBSCN,
807 [ELIBMAX] = TARGET_ELIBMAX,
808 [ELIBEXEC] = TARGET_ELIBEXEC,
809 [EILSEQ] = TARGET_EILSEQ,
810 [ENOSYS] = TARGET_ENOSYS,
811 [ELOOP] = TARGET_ELOOP,
812 [ERESTART] = TARGET_ERESTART,
813 [ESTRPIPE] = TARGET_ESTRPIPE,
814 [ENOTEMPTY] = TARGET_ENOTEMPTY,
815 [EUSERS] = TARGET_EUSERS,
816 [ENOTSOCK] = TARGET_ENOTSOCK,
817 [EDESTADDRREQ] = TARGET_EDESTADDRREQ,
818 [EMSGSIZE] = TARGET_EMSGSIZE,
819 [EPROTOTYPE] = TARGET_EPROTOTYPE,
820 [ENOPROTOOPT] = TARGET_ENOPROTOOPT,
821 [EPROTONOSUPPORT] = TARGET_EPROTONOSUPPORT,
822 [ESOCKTNOSUPPORT] = TARGET_ESOCKTNOSUPPORT,
823 [EOPNOTSUPP] = TARGET_EOPNOTSUPP,
824 [EPFNOSUPPORT] = TARGET_EPFNOSUPPORT,
825 [EAFNOSUPPORT] = TARGET_EAFNOSUPPORT,
826 [EADDRINUSE] = TARGET_EADDRINUSE,
827 [EADDRNOTAVAIL] = TARGET_EADDRNOTAVAIL,
828 [ENETDOWN] = TARGET_ENETDOWN,
829 [ENETUNREACH] = TARGET_ENETUNREACH,
830 [ENETRESET] = TARGET_ENETRESET,
831 [ECONNABORTED] = TARGET_ECONNABORTED,
832 [ECONNRESET] = TARGET_ECONNRESET,
833 [ENOBUFS] = TARGET_ENOBUFS,
834 [EISCONN] = TARGET_EISCONN,
835 [ENOTCONN] = TARGET_ENOTCONN,
836 [EUCLEAN] = TARGET_EUCLEAN,
837 [ENOTNAM] = TARGET_ENOTNAM,
838 [ENAVAIL] = TARGET_ENAVAIL,
839 [EISNAM] = TARGET_EISNAM,
840 [EREMOTEIO] = TARGET_EREMOTEIO,
841 [EDQUOT] = TARGET_EDQUOT,
842 [ESHUTDOWN] = TARGET_ESHUTDOWN,
843 [ETOOMANYREFS] = TARGET_ETOOMANYREFS,
844 [ETIMEDOUT] = TARGET_ETIMEDOUT,
845 [ECONNREFUSED] = TARGET_ECONNREFUSED,
846 [EHOSTDOWN] = TARGET_EHOSTDOWN,
847 [EHOSTUNREACH] = TARGET_EHOSTUNREACH,
848 [EALREADY] = TARGET_EALREADY,
849 [EINPROGRESS] = TARGET_EINPROGRESS,
850 [ESTALE] = TARGET_ESTALE,
851 [ECANCELED] = TARGET_ECANCELED,
852 [ENOMEDIUM] = TARGET_ENOMEDIUM,
853 [EMEDIUMTYPE] = TARGET_EMEDIUMTYPE,
854 #ifdef ENOKEY
855 [ENOKEY] = TARGET_ENOKEY,
856 #endif
857 #ifdef EKEYEXPIRED
858 [EKEYEXPIRED] = TARGET_EKEYEXPIRED,
859 #endif
860 #ifdef EKEYREVOKED
861 [EKEYREVOKED] = TARGET_EKEYREVOKED,
862 #endif
863 #ifdef EKEYREJECTED
864 [EKEYREJECTED] = TARGET_EKEYREJECTED,
865 #endif
866 #ifdef EOWNERDEAD
867 [EOWNERDEAD] = TARGET_EOWNERDEAD,
868 #endif
869 #ifdef ENOTRECOVERABLE
870 [ENOTRECOVERABLE] = TARGET_ENOTRECOVERABLE,
871 #endif
872 #ifdef ENOMSG
873 [ENOMSG] = TARGET_ENOMSG,
874 #endif
875 #ifdef ERKFILL
876 [ERFKILL] = TARGET_ERFKILL,
877 #endif
878 #ifdef EHWPOISON
879 [EHWPOISON] = TARGET_EHWPOISON,
880 #endif
883 static inline int host_to_target_errno(int err)
885 if (err >= 0 && err < ERRNO_TABLE_SIZE &&
886 host_to_target_errno_table[err]) {
887 return host_to_target_errno_table[err];
889 return err;
892 static inline int target_to_host_errno(int err)
894 if (err >= 0 && err < ERRNO_TABLE_SIZE &&
895 target_to_host_errno_table[err]) {
896 return target_to_host_errno_table[err];
898 return err;
901 static inline abi_long get_errno(abi_long ret)
903 if (ret == -1)
904 return -host_to_target_errno(errno);
905 else
906 return ret;
909 static inline int is_error(abi_long ret)
911 return (abi_ulong)ret >= (abi_ulong)(-4096);
914 const char *target_strerror(int err)
916 if (err == TARGET_ERESTARTSYS) {
917 return "To be restarted";
919 if (err == TARGET_QEMU_ESIGRETURN) {
920 return "Successful exit from sigreturn";
923 if ((err >= ERRNO_TABLE_SIZE) || (err < 0)) {
924 return NULL;
926 return strerror(target_to_host_errno(err));
929 #define safe_syscall0(type, name) \
930 static type safe_##name(void) \
932 return safe_syscall(__NR_##name); \
935 #define safe_syscall1(type, name, type1, arg1) \
936 static type safe_##name(type1 arg1) \
938 return safe_syscall(__NR_##name, arg1); \
941 #define safe_syscall2(type, name, type1, arg1, type2, arg2) \
942 static type safe_##name(type1 arg1, type2 arg2) \
944 return safe_syscall(__NR_##name, arg1, arg2); \
947 #define safe_syscall3(type, name, type1, arg1, type2, arg2, type3, arg3) \
948 static type safe_##name(type1 arg1, type2 arg2, type3 arg3) \
950 return safe_syscall(__NR_##name, arg1, arg2, arg3); \
953 #define safe_syscall4(type, name, type1, arg1, type2, arg2, type3, arg3, \
954 type4, arg4) \
955 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4) \
957 return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4); \
960 #define safe_syscall5(type, name, type1, arg1, type2, arg2, type3, arg3, \
961 type4, arg4, type5, arg5) \
962 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, \
963 type5 arg5) \
965 return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5); \
968 #define safe_syscall6(type, name, type1, arg1, type2, arg2, type3, arg3, \
969 type4, arg4, type5, arg5, type6, arg6) \
970 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, \
971 type5 arg5, type6 arg6) \
973 return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6); \
976 safe_syscall3(ssize_t, read, int, fd, void *, buff, size_t, count)
977 safe_syscall3(ssize_t, write, int, fd, const void *, buff, size_t, count)
978 safe_syscall4(int, openat, int, dirfd, const char *, pathname, \
979 int, flags, mode_t, mode)
980 safe_syscall4(pid_t, wait4, pid_t, pid, int *, status, int, options, \
981 struct rusage *, rusage)
982 safe_syscall5(int, waitid, idtype_t, idtype, id_t, id, siginfo_t *, infop, \
983 int, options, struct rusage *, rusage)
984 safe_syscall3(int, execve, const char *, filename, char **, argv, char **, envp)
985 safe_syscall6(int, pselect6, int, nfds, fd_set *, readfds, fd_set *, writefds, \
986 fd_set *, exceptfds, struct timespec *, timeout, void *, sig)
987 safe_syscall5(int, ppoll, struct pollfd *, ufds, unsigned int, nfds,
988 struct timespec *, tsp, const sigset_t *, sigmask,
989 size_t, sigsetsize)
990 safe_syscall6(int, epoll_pwait, int, epfd, struct epoll_event *, events,
991 int, maxevents, int, timeout, const sigset_t *, sigmask,
992 size_t, sigsetsize)
993 safe_syscall6(int,futex,int *,uaddr,int,op,int,val, \
994 const struct timespec *,timeout,int *,uaddr2,int,val3)
995 safe_syscall2(int, rt_sigsuspend, sigset_t *, newset, size_t, sigsetsize)
996 safe_syscall2(int, kill, pid_t, pid, int, sig)
997 safe_syscall2(int, tkill, int, tid, int, sig)
998 safe_syscall3(int, tgkill, int, tgid, int, pid, int, sig)
999 safe_syscall3(ssize_t, readv, int, fd, const struct iovec *, iov, int, iovcnt)
1000 safe_syscall3(ssize_t, writev, int, fd, const struct iovec *, iov, int, iovcnt)
1001 safe_syscall5(ssize_t, preadv, int, fd, const struct iovec *, iov, int, iovcnt,
1002 unsigned long, pos_l, unsigned long, pos_h)
1003 safe_syscall5(ssize_t, pwritev, int, fd, const struct iovec *, iov, int, iovcnt,
1004 unsigned long, pos_l, unsigned long, pos_h)
1005 safe_syscall3(int, connect, int, fd, const struct sockaddr *, addr,
1006 socklen_t, addrlen)
1007 safe_syscall6(ssize_t, sendto, int, fd, const void *, buf, size_t, len,
1008 int, flags, const struct sockaddr *, addr, socklen_t, addrlen)
1009 safe_syscall6(ssize_t, recvfrom, int, fd, void *, buf, size_t, len,
1010 int, flags, struct sockaddr *, addr, socklen_t *, addrlen)
1011 safe_syscall3(ssize_t, sendmsg, int, fd, const struct msghdr *, msg, int, flags)
1012 safe_syscall3(ssize_t, recvmsg, int, fd, struct msghdr *, msg, int, flags)
1013 safe_syscall2(int, flock, int, fd, int, operation)
1014 safe_syscall4(int, rt_sigtimedwait, const sigset_t *, these, siginfo_t *, uinfo,
1015 const struct timespec *, uts, size_t, sigsetsize)
1016 safe_syscall4(int, accept4, int, fd, struct sockaddr *, addr, socklen_t *, len,
1017 int, flags)
1018 safe_syscall2(int, nanosleep, const struct timespec *, req,
1019 struct timespec *, rem)
1020 #ifdef TARGET_NR_clock_nanosleep
1021 safe_syscall4(int, clock_nanosleep, const clockid_t, clock, int, flags,
1022 const struct timespec *, req, struct timespec *, rem)
1023 #endif
1024 #ifdef __NR_msgsnd
1025 safe_syscall4(int, msgsnd, int, msgid, const void *, msgp, size_t, sz,
1026 int, flags)
1027 safe_syscall5(int, msgrcv, int, msgid, void *, msgp, size_t, sz,
1028 long, msgtype, int, flags)
1029 safe_syscall4(int, semtimedop, int, semid, struct sembuf *, tsops,
1030 unsigned, nsops, const struct timespec *, timeout)
1031 #else
1032 /* This host kernel architecture uses a single ipc syscall; fake up
1033 * wrappers for the sub-operations to hide this implementation detail.
1034 * Annoyingly we can't include linux/ipc.h to get the constant definitions
1035 * for the call parameter because some structs in there conflict with the
1036 * sys/ipc.h ones. So we just define them here, and rely on them being
1037 * the same for all host architectures.
1039 #define Q_SEMTIMEDOP 4
1040 #define Q_MSGSND 11
1041 #define Q_MSGRCV 12
1042 #define Q_IPCCALL(VERSION, OP) ((VERSION) << 16 | (OP))
1044 safe_syscall6(int, ipc, int, call, long, first, long, second, long, third,
1045 void *, ptr, long, fifth)
1046 static int safe_msgsnd(int msgid, const void *msgp, size_t sz, int flags)
1048 return safe_ipc(Q_IPCCALL(0, Q_MSGSND), msgid, sz, flags, (void *)msgp, 0);
1050 static int safe_msgrcv(int msgid, void *msgp, size_t sz, long type, int flags)
1052 return safe_ipc(Q_IPCCALL(1, Q_MSGRCV), msgid, sz, flags, msgp, type);
1054 static int safe_semtimedop(int semid, struct sembuf *tsops, unsigned nsops,
1055 const struct timespec *timeout)
1057 return safe_ipc(Q_IPCCALL(0, Q_SEMTIMEDOP), semid, nsops, 0, tsops,
1058 (long)timeout);
1060 #endif
1061 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
1062 safe_syscall5(int, mq_timedsend, int, mqdes, const char *, msg_ptr,
1063 size_t, len, unsigned, prio, const struct timespec *, timeout)
1064 safe_syscall5(int, mq_timedreceive, int, mqdes, char *, msg_ptr,
1065 size_t, len, unsigned *, prio, const struct timespec *, timeout)
1066 #endif
1067 /* We do ioctl like this rather than via safe_syscall3 to preserve the
1068 * "third argument might be integer or pointer or not present" behaviour of
1069 * the libc function.
1071 #define safe_ioctl(...) safe_syscall(__NR_ioctl, __VA_ARGS__)
1072 /* Similarly for fcntl. Note that callers must always:
1073 * pass the F_GETLK64 etc constants rather than the unsuffixed F_GETLK
1074 * use the flock64 struct rather than unsuffixed flock
1075 * This will then work and use a 64-bit offset for both 32-bit and 64-bit hosts.
1077 #ifdef __NR_fcntl64
1078 #define safe_fcntl(...) safe_syscall(__NR_fcntl64, __VA_ARGS__)
1079 #else
1080 #define safe_fcntl(...) safe_syscall(__NR_fcntl, __VA_ARGS__)
1081 #endif
1083 static inline int host_to_target_sock_type(int host_type)
1085 int target_type;
1087 switch (host_type & 0xf /* SOCK_TYPE_MASK */) {
1088 case SOCK_DGRAM:
1089 target_type = TARGET_SOCK_DGRAM;
1090 break;
1091 case SOCK_STREAM:
1092 target_type = TARGET_SOCK_STREAM;
1093 break;
1094 default:
1095 target_type = host_type & 0xf /* SOCK_TYPE_MASK */;
1096 break;
1099 #if defined(SOCK_CLOEXEC)
1100 if (host_type & SOCK_CLOEXEC) {
1101 target_type |= TARGET_SOCK_CLOEXEC;
1103 #endif
1105 #if defined(SOCK_NONBLOCK)
1106 if (host_type & SOCK_NONBLOCK) {
1107 target_type |= TARGET_SOCK_NONBLOCK;
1109 #endif
1111 return target_type;
1114 static abi_ulong target_brk;
1115 static abi_ulong target_original_brk;
1116 static abi_ulong brk_page;
1118 void target_set_brk(abi_ulong new_brk)
1120 target_original_brk = target_brk = HOST_PAGE_ALIGN(new_brk);
1121 brk_page = HOST_PAGE_ALIGN(target_brk);
1124 //#define DEBUGF_BRK(message, args...) do { fprintf(stderr, (message), ## args); } while (0)
1125 #define DEBUGF_BRK(message, args...)
1127 /* do_brk() must return target values and target errnos. */
1128 abi_long do_brk(abi_ulong new_brk)
1130 abi_long mapped_addr;
1131 abi_ulong new_alloc_size;
1133 DEBUGF_BRK("do_brk(" TARGET_ABI_FMT_lx ") -> ", new_brk);
1135 if (!new_brk) {
1136 DEBUGF_BRK(TARGET_ABI_FMT_lx " (!new_brk)\n", target_brk);
1137 return target_brk;
1139 if (new_brk < target_original_brk) {
1140 DEBUGF_BRK(TARGET_ABI_FMT_lx " (new_brk < target_original_brk)\n",
1141 target_brk);
1142 return target_brk;
1145 /* If the new brk is less than the highest page reserved to the
1146 * target heap allocation, set it and we're almost done... */
1147 if (new_brk <= brk_page) {
1148 /* Heap contents are initialized to zero, as for anonymous
1149 * mapped pages. */
1150 if (new_brk > target_brk) {
1151 memset(g2h(target_brk), 0, new_brk - target_brk);
1153 target_brk = new_brk;
1154 DEBUGF_BRK(TARGET_ABI_FMT_lx " (new_brk <= brk_page)\n", target_brk);
1155 return target_brk;
1158 /* We need to allocate more memory after the brk... Note that
1159 * we don't use MAP_FIXED because that will map over the top of
1160 * any existing mapping (like the one with the host libc or qemu
1161 * itself); instead we treat "mapped but at wrong address" as
1162 * a failure and unmap again.
1164 new_alloc_size = HOST_PAGE_ALIGN(new_brk - brk_page);
1165 mapped_addr = get_errno(target_mmap(brk_page, new_alloc_size,
1166 PROT_READ|PROT_WRITE,
1167 MAP_ANON|MAP_PRIVATE, 0, 0));
1169 if (mapped_addr == brk_page) {
1170 /* Heap contents are initialized to zero, as for anonymous
1171 * mapped pages. Technically the new pages are already
1172 * initialized to zero since they *are* anonymous mapped
1173 * pages, however we have to take care with the contents that
1174 * come from the remaining part of the previous page: it may
1175 * contains garbage data due to a previous heap usage (grown
1176 * then shrunken). */
1177 memset(g2h(target_brk), 0, brk_page - target_brk);
1179 target_brk = new_brk;
1180 brk_page = HOST_PAGE_ALIGN(target_brk);
1181 DEBUGF_BRK(TARGET_ABI_FMT_lx " (mapped_addr == brk_page)\n",
1182 target_brk);
1183 return target_brk;
1184 } else if (mapped_addr != -1) {
1185 /* Mapped but at wrong address, meaning there wasn't actually
1186 * enough space for this brk.
1188 target_munmap(mapped_addr, new_alloc_size);
1189 mapped_addr = -1;
1190 DEBUGF_BRK(TARGET_ABI_FMT_lx " (mapped_addr != -1)\n", target_brk);
1192 else {
1193 DEBUGF_BRK(TARGET_ABI_FMT_lx " (otherwise)\n", target_brk);
1196 #if defined(TARGET_ALPHA)
1197 /* We (partially) emulate OSF/1 on Alpha, which requires we
1198 return a proper errno, not an unchanged brk value. */
1199 return -TARGET_ENOMEM;
1200 #endif
1201 /* For everything else, return the previous break. */
1202 return target_brk;
1205 static inline abi_long copy_from_user_fdset(fd_set *fds,
1206 abi_ulong target_fds_addr,
1207 int n)
1209 int i, nw, j, k;
1210 abi_ulong b, *target_fds;
1212 nw = DIV_ROUND_UP(n, TARGET_ABI_BITS);
1213 if (!(target_fds = lock_user(VERIFY_READ,
1214 target_fds_addr,
1215 sizeof(abi_ulong) * nw,
1216 1)))
1217 return -TARGET_EFAULT;
1219 FD_ZERO(fds);
1220 k = 0;
1221 for (i = 0; i < nw; i++) {
1222 /* grab the abi_ulong */
1223 __get_user(b, &target_fds[i]);
1224 for (j = 0; j < TARGET_ABI_BITS; j++) {
1225 /* check the bit inside the abi_ulong */
1226 if ((b >> j) & 1)
1227 FD_SET(k, fds);
1228 k++;
1232 unlock_user(target_fds, target_fds_addr, 0);
1234 return 0;
1237 static inline abi_ulong copy_from_user_fdset_ptr(fd_set *fds, fd_set **fds_ptr,
1238 abi_ulong target_fds_addr,
1239 int n)
1241 if (target_fds_addr) {
1242 if (copy_from_user_fdset(fds, target_fds_addr, n))
1243 return -TARGET_EFAULT;
1244 *fds_ptr = fds;
1245 } else {
1246 *fds_ptr = NULL;
1248 return 0;
1251 static inline abi_long copy_to_user_fdset(abi_ulong target_fds_addr,
1252 const fd_set *fds,
1253 int n)
1255 int i, nw, j, k;
1256 abi_long v;
1257 abi_ulong *target_fds;
1259 nw = DIV_ROUND_UP(n, TARGET_ABI_BITS);
1260 if (!(target_fds = lock_user(VERIFY_WRITE,
1261 target_fds_addr,
1262 sizeof(abi_ulong) * nw,
1263 0)))
1264 return -TARGET_EFAULT;
1266 k = 0;
1267 for (i = 0; i < nw; i++) {
1268 v = 0;
1269 for (j = 0; j < TARGET_ABI_BITS; j++) {
1270 v |= ((abi_ulong)(FD_ISSET(k, fds) != 0) << j);
1271 k++;
1273 __put_user(v, &target_fds[i]);
1276 unlock_user(target_fds, target_fds_addr, sizeof(abi_ulong) * nw);
1278 return 0;
1281 #if defined(__alpha__)
1282 #define HOST_HZ 1024
1283 #else
1284 #define HOST_HZ 100
1285 #endif
1287 static inline abi_long host_to_target_clock_t(long ticks)
1289 #if HOST_HZ == TARGET_HZ
1290 return ticks;
1291 #else
1292 return ((int64_t)ticks * TARGET_HZ) / HOST_HZ;
1293 #endif
1296 static inline abi_long host_to_target_rusage(abi_ulong target_addr,
1297 const struct rusage *rusage)
1299 struct target_rusage *target_rusage;
1301 if (!lock_user_struct(VERIFY_WRITE, target_rusage, target_addr, 0))
1302 return -TARGET_EFAULT;
1303 target_rusage->ru_utime.tv_sec = tswapal(rusage->ru_utime.tv_sec);
1304 target_rusage->ru_utime.tv_usec = tswapal(rusage->ru_utime.tv_usec);
1305 target_rusage->ru_stime.tv_sec = tswapal(rusage->ru_stime.tv_sec);
1306 target_rusage->ru_stime.tv_usec = tswapal(rusage->ru_stime.tv_usec);
1307 target_rusage->ru_maxrss = tswapal(rusage->ru_maxrss);
1308 target_rusage->ru_ixrss = tswapal(rusage->ru_ixrss);
1309 target_rusage->ru_idrss = tswapal(rusage->ru_idrss);
1310 target_rusage->ru_isrss = tswapal(rusage->ru_isrss);
1311 target_rusage->ru_minflt = tswapal(rusage->ru_minflt);
1312 target_rusage->ru_majflt = tswapal(rusage->ru_majflt);
1313 target_rusage->ru_nswap = tswapal(rusage->ru_nswap);
1314 target_rusage->ru_inblock = tswapal(rusage->ru_inblock);
1315 target_rusage->ru_oublock = tswapal(rusage->ru_oublock);
1316 target_rusage->ru_msgsnd = tswapal(rusage->ru_msgsnd);
1317 target_rusage->ru_msgrcv = tswapal(rusage->ru_msgrcv);
1318 target_rusage->ru_nsignals = tswapal(rusage->ru_nsignals);
1319 target_rusage->ru_nvcsw = tswapal(rusage->ru_nvcsw);
1320 target_rusage->ru_nivcsw = tswapal(rusage->ru_nivcsw);
1321 unlock_user_struct(target_rusage, target_addr, 1);
1323 return 0;
1326 static inline rlim_t target_to_host_rlim(abi_ulong target_rlim)
1328 abi_ulong target_rlim_swap;
1329 rlim_t result;
1331 target_rlim_swap = tswapal(target_rlim);
1332 if (target_rlim_swap == TARGET_RLIM_INFINITY)
1333 return RLIM_INFINITY;
1335 result = target_rlim_swap;
1336 if (target_rlim_swap != (rlim_t)result)
1337 return RLIM_INFINITY;
1339 return result;
1342 static inline abi_ulong host_to_target_rlim(rlim_t rlim)
1344 abi_ulong target_rlim_swap;
1345 abi_ulong result;
1347 if (rlim == RLIM_INFINITY || rlim != (abi_long)rlim)
1348 target_rlim_swap = TARGET_RLIM_INFINITY;
1349 else
1350 target_rlim_swap = rlim;
1351 result = tswapal(target_rlim_swap);
1353 return result;
1356 static inline int target_to_host_resource(int code)
1358 switch (code) {
1359 case TARGET_RLIMIT_AS:
1360 return RLIMIT_AS;
1361 case TARGET_RLIMIT_CORE:
1362 return RLIMIT_CORE;
1363 case TARGET_RLIMIT_CPU:
1364 return RLIMIT_CPU;
1365 case TARGET_RLIMIT_DATA:
1366 return RLIMIT_DATA;
1367 case TARGET_RLIMIT_FSIZE:
1368 return RLIMIT_FSIZE;
1369 case TARGET_RLIMIT_LOCKS:
1370 return RLIMIT_LOCKS;
1371 case TARGET_RLIMIT_MEMLOCK:
1372 return RLIMIT_MEMLOCK;
1373 case TARGET_RLIMIT_MSGQUEUE:
1374 return RLIMIT_MSGQUEUE;
1375 case TARGET_RLIMIT_NICE:
1376 return RLIMIT_NICE;
1377 case TARGET_RLIMIT_NOFILE:
1378 return RLIMIT_NOFILE;
1379 case TARGET_RLIMIT_NPROC:
1380 return RLIMIT_NPROC;
1381 case TARGET_RLIMIT_RSS:
1382 return RLIMIT_RSS;
1383 case TARGET_RLIMIT_RTPRIO:
1384 return RLIMIT_RTPRIO;
1385 case TARGET_RLIMIT_SIGPENDING:
1386 return RLIMIT_SIGPENDING;
1387 case TARGET_RLIMIT_STACK:
1388 return RLIMIT_STACK;
1389 default:
1390 return code;
1394 static inline abi_long copy_from_user_timeval(struct timeval *tv,
1395 abi_ulong target_tv_addr)
1397 struct target_timeval *target_tv;
1399 if (!lock_user_struct(VERIFY_READ, target_tv, target_tv_addr, 1))
1400 return -TARGET_EFAULT;
1402 __get_user(tv->tv_sec, &target_tv->tv_sec);
1403 __get_user(tv->tv_usec, &target_tv->tv_usec);
1405 unlock_user_struct(target_tv, target_tv_addr, 0);
1407 return 0;
1410 static inline abi_long copy_to_user_timeval(abi_ulong target_tv_addr,
1411 const struct timeval *tv)
1413 struct target_timeval *target_tv;
1415 if (!lock_user_struct(VERIFY_WRITE, target_tv, target_tv_addr, 0))
1416 return -TARGET_EFAULT;
1418 __put_user(tv->tv_sec, &target_tv->tv_sec);
1419 __put_user(tv->tv_usec, &target_tv->tv_usec);
1421 unlock_user_struct(target_tv, target_tv_addr, 1);
1423 return 0;
1426 static inline abi_long copy_from_user_timezone(struct timezone *tz,
1427 abi_ulong target_tz_addr)
1429 struct target_timezone *target_tz;
1431 if (!lock_user_struct(VERIFY_READ, target_tz, target_tz_addr, 1)) {
1432 return -TARGET_EFAULT;
1435 __get_user(tz->tz_minuteswest, &target_tz->tz_minuteswest);
1436 __get_user(tz->tz_dsttime, &target_tz->tz_dsttime);
1438 unlock_user_struct(target_tz, target_tz_addr, 0);
1440 return 0;
1443 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
1444 #include <mqueue.h>
1446 static inline abi_long copy_from_user_mq_attr(struct mq_attr *attr,
1447 abi_ulong target_mq_attr_addr)
1449 struct target_mq_attr *target_mq_attr;
1451 if (!lock_user_struct(VERIFY_READ, target_mq_attr,
1452 target_mq_attr_addr, 1))
1453 return -TARGET_EFAULT;
1455 __get_user(attr->mq_flags, &target_mq_attr->mq_flags);
1456 __get_user(attr->mq_maxmsg, &target_mq_attr->mq_maxmsg);
1457 __get_user(attr->mq_msgsize, &target_mq_attr->mq_msgsize);
1458 __get_user(attr->mq_curmsgs, &target_mq_attr->mq_curmsgs);
1460 unlock_user_struct(target_mq_attr, target_mq_attr_addr, 0);
1462 return 0;
1465 static inline abi_long copy_to_user_mq_attr(abi_ulong target_mq_attr_addr,
1466 const struct mq_attr *attr)
1468 struct target_mq_attr *target_mq_attr;
1470 if (!lock_user_struct(VERIFY_WRITE, target_mq_attr,
1471 target_mq_attr_addr, 0))
1472 return -TARGET_EFAULT;
1474 __put_user(attr->mq_flags, &target_mq_attr->mq_flags);
1475 __put_user(attr->mq_maxmsg, &target_mq_attr->mq_maxmsg);
1476 __put_user(attr->mq_msgsize, &target_mq_attr->mq_msgsize);
1477 __put_user(attr->mq_curmsgs, &target_mq_attr->mq_curmsgs);
1479 unlock_user_struct(target_mq_attr, target_mq_attr_addr, 1);
1481 return 0;
1483 #endif
1485 #if defined(TARGET_NR_select) || defined(TARGET_NR__newselect)
1486 /* do_select() must return target values and target errnos. */
1487 static abi_long do_select(int n,
1488 abi_ulong rfd_addr, abi_ulong wfd_addr,
1489 abi_ulong efd_addr, abi_ulong target_tv_addr)
1491 fd_set rfds, wfds, efds;
1492 fd_set *rfds_ptr, *wfds_ptr, *efds_ptr;
1493 struct timeval tv;
1494 struct timespec ts, *ts_ptr;
1495 abi_long ret;
1497 ret = copy_from_user_fdset_ptr(&rfds, &rfds_ptr, rfd_addr, n);
1498 if (ret) {
1499 return ret;
1501 ret = copy_from_user_fdset_ptr(&wfds, &wfds_ptr, wfd_addr, n);
1502 if (ret) {
1503 return ret;
1505 ret = copy_from_user_fdset_ptr(&efds, &efds_ptr, efd_addr, n);
1506 if (ret) {
1507 return ret;
1510 if (target_tv_addr) {
1511 if (copy_from_user_timeval(&tv, target_tv_addr))
1512 return -TARGET_EFAULT;
1513 ts.tv_sec = tv.tv_sec;
1514 ts.tv_nsec = tv.tv_usec * 1000;
1515 ts_ptr = &ts;
1516 } else {
1517 ts_ptr = NULL;
1520 ret = get_errno(safe_pselect6(n, rfds_ptr, wfds_ptr, efds_ptr,
1521 ts_ptr, NULL));
1523 if (!is_error(ret)) {
1524 if (rfd_addr && copy_to_user_fdset(rfd_addr, &rfds, n))
1525 return -TARGET_EFAULT;
1526 if (wfd_addr && copy_to_user_fdset(wfd_addr, &wfds, n))
1527 return -TARGET_EFAULT;
1528 if (efd_addr && copy_to_user_fdset(efd_addr, &efds, n))
1529 return -TARGET_EFAULT;
1531 if (target_tv_addr) {
1532 tv.tv_sec = ts.tv_sec;
1533 tv.tv_usec = ts.tv_nsec / 1000;
1534 if (copy_to_user_timeval(target_tv_addr, &tv)) {
1535 return -TARGET_EFAULT;
1540 return ret;
1543 #if defined(TARGET_WANT_OLD_SYS_SELECT)
1544 static abi_long do_old_select(abi_ulong arg1)
1546 struct target_sel_arg_struct *sel;
1547 abi_ulong inp, outp, exp, tvp;
1548 long nsel;
1550 if (!lock_user_struct(VERIFY_READ, sel, arg1, 1)) {
1551 return -TARGET_EFAULT;
1554 nsel = tswapal(sel->n);
1555 inp = tswapal(sel->inp);
1556 outp = tswapal(sel->outp);
1557 exp = tswapal(sel->exp);
1558 tvp = tswapal(sel->tvp);
1560 unlock_user_struct(sel, arg1, 0);
1562 return do_select(nsel, inp, outp, exp, tvp);
1564 #endif
1565 #endif
1567 static abi_long do_pipe2(int host_pipe[], int flags)
1569 #ifdef CONFIG_PIPE2
1570 return pipe2(host_pipe, flags);
1571 #else
1572 return -ENOSYS;
1573 #endif
1576 static abi_long do_pipe(void *cpu_env, abi_ulong pipedes,
1577 int flags, int is_pipe2)
1579 int host_pipe[2];
1580 abi_long ret;
1581 ret = flags ? do_pipe2(host_pipe, flags) : pipe(host_pipe);
1583 if (is_error(ret))
1584 return get_errno(ret);
1586 /* Several targets have special calling conventions for the original
1587 pipe syscall, but didn't replicate this into the pipe2 syscall. */
1588 if (!is_pipe2) {
1589 #if defined(TARGET_ALPHA)
1590 ((CPUAlphaState *)cpu_env)->ir[IR_A4] = host_pipe[1];
1591 return host_pipe[0];
1592 #elif defined(TARGET_MIPS)
1593 ((CPUMIPSState*)cpu_env)->active_tc.gpr[3] = host_pipe[1];
1594 return host_pipe[0];
1595 #elif defined(TARGET_SH4)
1596 ((CPUSH4State*)cpu_env)->gregs[1] = host_pipe[1];
1597 return host_pipe[0];
1598 #elif defined(TARGET_SPARC)
1599 ((CPUSPARCState*)cpu_env)->regwptr[1] = host_pipe[1];
1600 return host_pipe[0];
1601 #endif
1604 if (put_user_s32(host_pipe[0], pipedes)
1605 || put_user_s32(host_pipe[1], pipedes + sizeof(host_pipe[0])))
1606 return -TARGET_EFAULT;
1607 return get_errno(ret);
1610 static inline abi_long target_to_host_ip_mreq(struct ip_mreqn *mreqn,
1611 abi_ulong target_addr,
1612 socklen_t len)
1614 struct target_ip_mreqn *target_smreqn;
1616 target_smreqn = lock_user(VERIFY_READ, target_addr, len, 1);
1617 if (!target_smreqn)
1618 return -TARGET_EFAULT;
1619 mreqn->imr_multiaddr.s_addr = target_smreqn->imr_multiaddr.s_addr;
1620 mreqn->imr_address.s_addr = target_smreqn->imr_address.s_addr;
1621 if (len == sizeof(struct target_ip_mreqn))
1622 mreqn->imr_ifindex = tswapal(target_smreqn->imr_ifindex);
1623 unlock_user(target_smreqn, target_addr, 0);
1625 return 0;
1628 static inline abi_long target_to_host_sockaddr(int fd, struct sockaddr *addr,
1629 abi_ulong target_addr,
1630 socklen_t len)
1632 const socklen_t unix_maxlen = sizeof (struct sockaddr_un);
1633 sa_family_t sa_family;
1634 struct target_sockaddr *target_saddr;
1636 if (fd_trans_target_to_host_addr(fd)) {
1637 return fd_trans_target_to_host_addr(fd)(addr, target_addr, len);
1640 target_saddr = lock_user(VERIFY_READ, target_addr, len, 1);
1641 if (!target_saddr)
1642 return -TARGET_EFAULT;
1644 sa_family = tswap16(target_saddr->sa_family);
1646 /* Oops. The caller might send a incomplete sun_path; sun_path
1647 * must be terminated by \0 (see the manual page), but
1648 * unfortunately it is quite common to specify sockaddr_un
1649 * length as "strlen(x->sun_path)" while it should be
1650 * "strlen(...) + 1". We'll fix that here if needed.
1651 * Linux kernel has a similar feature.
1654 if (sa_family == AF_UNIX) {
1655 if (len < unix_maxlen && len > 0) {
1656 char *cp = (char*)target_saddr;
1658 if ( cp[len-1] && !cp[len] )
1659 len++;
1661 if (len > unix_maxlen)
1662 len = unix_maxlen;
1665 memcpy(addr, target_saddr, len);
1666 addr->sa_family = sa_family;
1667 if (sa_family == AF_NETLINK) {
1668 struct sockaddr_nl *nladdr;
1670 nladdr = (struct sockaddr_nl *)addr;
1671 nladdr->nl_pid = tswap32(nladdr->nl_pid);
1672 nladdr->nl_groups = tswap32(nladdr->nl_groups);
1673 } else if (sa_family == AF_PACKET) {
1674 struct target_sockaddr_ll *lladdr;
1676 lladdr = (struct target_sockaddr_ll *)addr;
1677 lladdr->sll_ifindex = tswap32(lladdr->sll_ifindex);
1678 lladdr->sll_hatype = tswap16(lladdr->sll_hatype);
1680 unlock_user(target_saddr, target_addr, 0);
1682 return 0;
1685 static inline abi_long host_to_target_sockaddr(abi_ulong target_addr,
1686 struct sockaddr *addr,
1687 socklen_t len)
1689 struct target_sockaddr *target_saddr;
1691 if (len == 0) {
1692 return 0;
1694 assert(addr);
1696 target_saddr = lock_user(VERIFY_WRITE, target_addr, len, 0);
1697 if (!target_saddr)
1698 return -TARGET_EFAULT;
1699 memcpy(target_saddr, addr, len);
1700 if (len >= offsetof(struct target_sockaddr, sa_family) +
1701 sizeof(target_saddr->sa_family)) {
1702 target_saddr->sa_family = tswap16(addr->sa_family);
1704 if (addr->sa_family == AF_NETLINK && len >= sizeof(struct sockaddr_nl)) {
1705 struct sockaddr_nl *target_nl = (struct sockaddr_nl *)target_saddr;
1706 target_nl->nl_pid = tswap32(target_nl->nl_pid);
1707 target_nl->nl_groups = tswap32(target_nl->nl_groups);
1708 } else if (addr->sa_family == AF_PACKET) {
1709 struct sockaddr_ll *target_ll = (struct sockaddr_ll *)target_saddr;
1710 target_ll->sll_ifindex = tswap32(target_ll->sll_ifindex);
1711 target_ll->sll_hatype = tswap16(target_ll->sll_hatype);
1712 } else if (addr->sa_family == AF_INET6 &&
1713 len >= sizeof(struct target_sockaddr_in6)) {
1714 struct target_sockaddr_in6 *target_in6 =
1715 (struct target_sockaddr_in6 *)target_saddr;
1716 target_in6->sin6_scope_id = tswap16(target_in6->sin6_scope_id);
1718 unlock_user(target_saddr, target_addr, len);
1720 return 0;
1723 static inline abi_long target_to_host_cmsg(struct msghdr *msgh,
1724 struct target_msghdr *target_msgh)
1726 struct cmsghdr *cmsg = CMSG_FIRSTHDR(msgh);
1727 abi_long msg_controllen;
1728 abi_ulong target_cmsg_addr;
1729 struct target_cmsghdr *target_cmsg, *target_cmsg_start;
1730 socklen_t space = 0;
1732 msg_controllen = tswapal(target_msgh->msg_controllen);
1733 if (msg_controllen < sizeof (struct target_cmsghdr))
1734 goto the_end;
1735 target_cmsg_addr = tswapal(target_msgh->msg_control);
1736 target_cmsg = lock_user(VERIFY_READ, target_cmsg_addr, msg_controllen, 1);
1737 target_cmsg_start = target_cmsg;
1738 if (!target_cmsg)
1739 return -TARGET_EFAULT;
1741 while (cmsg && target_cmsg) {
1742 void *data = CMSG_DATA(cmsg);
1743 void *target_data = TARGET_CMSG_DATA(target_cmsg);
1745 int len = tswapal(target_cmsg->cmsg_len)
1746 - sizeof(struct target_cmsghdr);
1748 space += CMSG_SPACE(len);
1749 if (space > msgh->msg_controllen) {
1750 space -= CMSG_SPACE(len);
1751 /* This is a QEMU bug, since we allocated the payload
1752 * area ourselves (unlike overflow in host-to-target
1753 * conversion, which is just the guest giving us a buffer
1754 * that's too small). It can't happen for the payload types
1755 * we currently support; if it becomes an issue in future
1756 * we would need to improve our allocation strategy to
1757 * something more intelligent than "twice the size of the
1758 * target buffer we're reading from".
1760 gemu_log("Host cmsg overflow\n");
1761 break;
1764 if (tswap32(target_cmsg->cmsg_level) == TARGET_SOL_SOCKET) {
1765 cmsg->cmsg_level = SOL_SOCKET;
1766 } else {
1767 cmsg->cmsg_level = tswap32(target_cmsg->cmsg_level);
1769 cmsg->cmsg_type = tswap32(target_cmsg->cmsg_type);
1770 cmsg->cmsg_len = CMSG_LEN(len);
1772 if (cmsg->cmsg_level == SOL_SOCKET && cmsg->cmsg_type == SCM_RIGHTS) {
1773 int *fd = (int *)data;
1774 int *target_fd = (int *)target_data;
1775 int i, numfds = len / sizeof(int);
1777 for (i = 0; i < numfds; i++) {
1778 __get_user(fd[i], target_fd + i);
1780 } else if (cmsg->cmsg_level == SOL_SOCKET
1781 && cmsg->cmsg_type == SCM_CREDENTIALS) {
1782 struct ucred *cred = (struct ucred *)data;
1783 struct target_ucred *target_cred =
1784 (struct target_ucred *)target_data;
1786 __get_user(cred->pid, &target_cred->pid);
1787 __get_user(cred->uid, &target_cred->uid);
1788 __get_user(cred->gid, &target_cred->gid);
1789 } else {
1790 gemu_log("Unsupported ancillary data: %d/%d\n",
1791 cmsg->cmsg_level, cmsg->cmsg_type);
1792 memcpy(data, target_data, len);
1795 cmsg = CMSG_NXTHDR(msgh, cmsg);
1796 target_cmsg = TARGET_CMSG_NXTHDR(target_msgh, target_cmsg,
1797 target_cmsg_start);
1799 unlock_user(target_cmsg, target_cmsg_addr, 0);
1800 the_end:
1801 msgh->msg_controllen = space;
1802 return 0;
1805 static inline abi_long host_to_target_cmsg(struct target_msghdr *target_msgh,
1806 struct msghdr *msgh)
1808 struct cmsghdr *cmsg = CMSG_FIRSTHDR(msgh);
1809 abi_long msg_controllen;
1810 abi_ulong target_cmsg_addr;
1811 struct target_cmsghdr *target_cmsg, *target_cmsg_start;
1812 socklen_t space = 0;
1814 msg_controllen = tswapal(target_msgh->msg_controllen);
1815 if (msg_controllen < sizeof (struct target_cmsghdr))
1816 goto the_end;
1817 target_cmsg_addr = tswapal(target_msgh->msg_control);
1818 target_cmsg = lock_user(VERIFY_WRITE, target_cmsg_addr, msg_controllen, 0);
1819 target_cmsg_start = target_cmsg;
1820 if (!target_cmsg)
1821 return -TARGET_EFAULT;
1823 while (cmsg && target_cmsg) {
1824 void *data = CMSG_DATA(cmsg);
1825 void *target_data = TARGET_CMSG_DATA(target_cmsg);
1827 int len = cmsg->cmsg_len - sizeof(struct cmsghdr);
1828 int tgt_len, tgt_space;
1830 /* We never copy a half-header but may copy half-data;
1831 * this is Linux's behaviour in put_cmsg(). Note that
1832 * truncation here is a guest problem (which we report
1833 * to the guest via the CTRUNC bit), unlike truncation
1834 * in target_to_host_cmsg, which is a QEMU bug.
1836 if (msg_controllen < sizeof(struct target_cmsghdr)) {
1837 target_msgh->msg_flags |= tswap32(MSG_CTRUNC);
1838 break;
1841 if (cmsg->cmsg_level == SOL_SOCKET) {
1842 target_cmsg->cmsg_level = tswap32(TARGET_SOL_SOCKET);
1843 } else {
1844 target_cmsg->cmsg_level = tswap32(cmsg->cmsg_level);
1846 target_cmsg->cmsg_type = tswap32(cmsg->cmsg_type);
1848 /* Payload types which need a different size of payload on
1849 * the target must adjust tgt_len here.
1851 tgt_len = len;
1852 switch (cmsg->cmsg_level) {
1853 case SOL_SOCKET:
1854 switch (cmsg->cmsg_type) {
1855 case SO_TIMESTAMP:
1856 tgt_len = sizeof(struct target_timeval);
1857 break;
1858 default:
1859 break;
1861 break;
1862 default:
1863 break;
1866 if (msg_controllen < TARGET_CMSG_LEN(tgt_len)) {
1867 target_msgh->msg_flags |= tswap32(MSG_CTRUNC);
1868 tgt_len = msg_controllen - sizeof(struct target_cmsghdr);
1871 /* We must now copy-and-convert len bytes of payload
1872 * into tgt_len bytes of destination space. Bear in mind
1873 * that in both source and destination we may be dealing
1874 * with a truncated value!
1876 switch (cmsg->cmsg_level) {
1877 case SOL_SOCKET:
1878 switch (cmsg->cmsg_type) {
1879 case SCM_RIGHTS:
1881 int *fd = (int *)data;
1882 int *target_fd = (int *)target_data;
1883 int i, numfds = tgt_len / sizeof(int);
1885 for (i = 0; i < numfds; i++) {
1886 __put_user(fd[i], target_fd + i);
1888 break;
1890 case SO_TIMESTAMP:
1892 struct timeval *tv = (struct timeval *)data;
1893 struct target_timeval *target_tv =
1894 (struct target_timeval *)target_data;
1896 if (len != sizeof(struct timeval) ||
1897 tgt_len != sizeof(struct target_timeval)) {
1898 goto unimplemented;
1901 /* copy struct timeval to target */
1902 __put_user(tv->tv_sec, &target_tv->tv_sec);
1903 __put_user(tv->tv_usec, &target_tv->tv_usec);
1904 break;
1906 case SCM_CREDENTIALS:
1908 struct ucred *cred = (struct ucred *)data;
1909 struct target_ucred *target_cred =
1910 (struct target_ucred *)target_data;
1912 __put_user(cred->pid, &target_cred->pid);
1913 __put_user(cred->uid, &target_cred->uid);
1914 __put_user(cred->gid, &target_cred->gid);
1915 break;
1917 default:
1918 goto unimplemented;
1920 break;
1922 case SOL_IP:
1923 switch (cmsg->cmsg_type) {
1924 case IP_TTL:
1926 uint32_t *v = (uint32_t *)data;
1927 uint32_t *t_int = (uint32_t *)target_data;
1929 if (len != sizeof(uint32_t) ||
1930 tgt_len != sizeof(uint32_t)) {
1931 goto unimplemented;
1933 __put_user(*v, t_int);
1934 break;
1936 case IP_RECVERR:
1938 struct errhdr_t {
1939 struct sock_extended_err ee;
1940 struct sockaddr_in offender;
1942 struct errhdr_t *errh = (struct errhdr_t *)data;
1943 struct errhdr_t *target_errh =
1944 (struct errhdr_t *)target_data;
1946 if (len != sizeof(struct errhdr_t) ||
1947 tgt_len != sizeof(struct errhdr_t)) {
1948 goto unimplemented;
1950 __put_user(errh->ee.ee_errno, &target_errh->ee.ee_errno);
1951 __put_user(errh->ee.ee_origin, &target_errh->ee.ee_origin);
1952 __put_user(errh->ee.ee_type, &target_errh->ee.ee_type);
1953 __put_user(errh->ee.ee_code, &target_errh->ee.ee_code);
1954 __put_user(errh->ee.ee_pad, &target_errh->ee.ee_pad);
1955 __put_user(errh->ee.ee_info, &target_errh->ee.ee_info);
1956 __put_user(errh->ee.ee_data, &target_errh->ee.ee_data);
1957 host_to_target_sockaddr((unsigned long) &target_errh->offender,
1958 (void *) &errh->offender, sizeof(errh->offender));
1959 break;
1961 default:
1962 goto unimplemented;
1964 break;
1966 case SOL_IPV6:
1967 switch (cmsg->cmsg_type) {
1968 case IPV6_HOPLIMIT:
1970 uint32_t *v = (uint32_t *)data;
1971 uint32_t *t_int = (uint32_t *)target_data;
1973 if (len != sizeof(uint32_t) ||
1974 tgt_len != sizeof(uint32_t)) {
1975 goto unimplemented;
1977 __put_user(*v, t_int);
1978 break;
1980 case IPV6_RECVERR:
1982 struct errhdr6_t {
1983 struct sock_extended_err ee;
1984 struct sockaddr_in6 offender;
1986 struct errhdr6_t *errh = (struct errhdr6_t *)data;
1987 struct errhdr6_t *target_errh =
1988 (struct errhdr6_t *)target_data;
1990 if (len != sizeof(struct errhdr6_t) ||
1991 tgt_len != sizeof(struct errhdr6_t)) {
1992 goto unimplemented;
1994 __put_user(errh->ee.ee_errno, &target_errh->ee.ee_errno);
1995 __put_user(errh->ee.ee_origin, &target_errh->ee.ee_origin);
1996 __put_user(errh->ee.ee_type, &target_errh->ee.ee_type);
1997 __put_user(errh->ee.ee_code, &target_errh->ee.ee_code);
1998 __put_user(errh->ee.ee_pad, &target_errh->ee.ee_pad);
1999 __put_user(errh->ee.ee_info, &target_errh->ee.ee_info);
2000 __put_user(errh->ee.ee_data, &target_errh->ee.ee_data);
2001 host_to_target_sockaddr((unsigned long) &target_errh->offender,
2002 (void *) &errh->offender, sizeof(errh->offender));
2003 break;
2005 default:
2006 goto unimplemented;
2008 break;
2010 default:
2011 unimplemented:
2012 gemu_log("Unsupported ancillary data: %d/%d\n",
2013 cmsg->cmsg_level, cmsg->cmsg_type);
2014 memcpy(target_data, data, MIN(len, tgt_len));
2015 if (tgt_len > len) {
2016 memset(target_data + len, 0, tgt_len - len);
2020 target_cmsg->cmsg_len = tswapal(TARGET_CMSG_LEN(tgt_len));
2021 tgt_space = TARGET_CMSG_SPACE(tgt_len);
2022 if (msg_controllen < tgt_space) {
2023 tgt_space = msg_controllen;
2025 msg_controllen -= tgt_space;
2026 space += tgt_space;
2027 cmsg = CMSG_NXTHDR(msgh, cmsg);
2028 target_cmsg = TARGET_CMSG_NXTHDR(target_msgh, target_cmsg,
2029 target_cmsg_start);
2031 unlock_user(target_cmsg, target_cmsg_addr, space);
2032 the_end:
2033 target_msgh->msg_controllen = tswapal(space);
2034 return 0;
2037 static void tswap_nlmsghdr(struct nlmsghdr *nlh)
2039 nlh->nlmsg_len = tswap32(nlh->nlmsg_len);
2040 nlh->nlmsg_type = tswap16(nlh->nlmsg_type);
2041 nlh->nlmsg_flags = tswap16(nlh->nlmsg_flags);
2042 nlh->nlmsg_seq = tswap32(nlh->nlmsg_seq);
2043 nlh->nlmsg_pid = tswap32(nlh->nlmsg_pid);
2046 static abi_long host_to_target_for_each_nlmsg(struct nlmsghdr *nlh,
2047 size_t len,
2048 abi_long (*host_to_target_nlmsg)
2049 (struct nlmsghdr *))
2051 uint32_t nlmsg_len;
2052 abi_long ret;
2054 while (len > sizeof(struct nlmsghdr)) {
2056 nlmsg_len = nlh->nlmsg_len;
2057 if (nlmsg_len < sizeof(struct nlmsghdr) ||
2058 nlmsg_len > len) {
2059 break;
2062 switch (nlh->nlmsg_type) {
2063 case NLMSG_DONE:
2064 tswap_nlmsghdr(nlh);
2065 return 0;
2066 case NLMSG_NOOP:
2067 break;
2068 case NLMSG_ERROR:
2070 struct nlmsgerr *e = NLMSG_DATA(nlh);
2071 e->error = tswap32(e->error);
2072 tswap_nlmsghdr(&e->msg);
2073 tswap_nlmsghdr(nlh);
2074 return 0;
2076 default:
2077 ret = host_to_target_nlmsg(nlh);
2078 if (ret < 0) {
2079 tswap_nlmsghdr(nlh);
2080 return ret;
2082 break;
2084 tswap_nlmsghdr(nlh);
2085 len -= NLMSG_ALIGN(nlmsg_len);
2086 nlh = (struct nlmsghdr *)(((char*)nlh) + NLMSG_ALIGN(nlmsg_len));
2088 return 0;
2091 static abi_long target_to_host_for_each_nlmsg(struct nlmsghdr *nlh,
2092 size_t len,
2093 abi_long (*target_to_host_nlmsg)
2094 (struct nlmsghdr *))
2096 int ret;
2098 while (len > sizeof(struct nlmsghdr)) {
2099 if (tswap32(nlh->nlmsg_len) < sizeof(struct nlmsghdr) ||
2100 tswap32(nlh->nlmsg_len) > len) {
2101 break;
2103 tswap_nlmsghdr(nlh);
2104 switch (nlh->nlmsg_type) {
2105 case NLMSG_DONE:
2106 return 0;
2107 case NLMSG_NOOP:
2108 break;
2109 case NLMSG_ERROR:
2111 struct nlmsgerr *e = NLMSG_DATA(nlh);
2112 e->error = tswap32(e->error);
2113 tswap_nlmsghdr(&e->msg);
2114 return 0;
2116 default:
2117 ret = target_to_host_nlmsg(nlh);
2118 if (ret < 0) {
2119 return ret;
2122 len -= NLMSG_ALIGN(nlh->nlmsg_len);
2123 nlh = (struct nlmsghdr *)(((char *)nlh) + NLMSG_ALIGN(nlh->nlmsg_len));
2125 return 0;
2128 #ifdef CONFIG_RTNETLINK
2129 static abi_long host_to_target_for_each_nlattr(struct nlattr *nlattr,
2130 size_t len, void *context,
2131 abi_long (*host_to_target_nlattr)
2132 (struct nlattr *,
2133 void *context))
2135 unsigned short nla_len;
2136 abi_long ret;
2138 while (len > sizeof(struct nlattr)) {
2139 nla_len = nlattr->nla_len;
2140 if (nla_len < sizeof(struct nlattr) ||
2141 nla_len > len) {
2142 break;
2144 ret = host_to_target_nlattr(nlattr, context);
2145 nlattr->nla_len = tswap16(nlattr->nla_len);
2146 nlattr->nla_type = tswap16(nlattr->nla_type);
2147 if (ret < 0) {
2148 return ret;
2150 len -= NLA_ALIGN(nla_len);
2151 nlattr = (struct nlattr *)(((char *)nlattr) + NLA_ALIGN(nla_len));
2153 return 0;
2156 static abi_long host_to_target_for_each_rtattr(struct rtattr *rtattr,
2157 size_t len,
2158 abi_long (*host_to_target_rtattr)
2159 (struct rtattr *))
2161 unsigned short rta_len;
2162 abi_long ret;
2164 while (len > sizeof(struct rtattr)) {
2165 rta_len = rtattr->rta_len;
2166 if (rta_len < sizeof(struct rtattr) ||
2167 rta_len > len) {
2168 break;
2170 ret = host_to_target_rtattr(rtattr);
2171 rtattr->rta_len = tswap16(rtattr->rta_len);
2172 rtattr->rta_type = tswap16(rtattr->rta_type);
2173 if (ret < 0) {
2174 return ret;
2176 len -= RTA_ALIGN(rta_len);
2177 rtattr = (struct rtattr *)(((char *)rtattr) + RTA_ALIGN(rta_len));
2179 return 0;
2182 #define NLA_DATA(nla) ((void *)((char *)(nla)) + NLA_HDRLEN)
2184 static abi_long host_to_target_data_bridge_nlattr(struct nlattr *nlattr,
2185 void *context)
2187 uint16_t *u16;
2188 uint32_t *u32;
2189 uint64_t *u64;
2191 switch (nlattr->nla_type) {
2192 /* no data */
2193 case QEMU_IFLA_BR_FDB_FLUSH:
2194 break;
2195 /* binary */
2196 case QEMU_IFLA_BR_GROUP_ADDR:
2197 break;
2198 /* uint8_t */
2199 case QEMU_IFLA_BR_VLAN_FILTERING:
2200 case QEMU_IFLA_BR_TOPOLOGY_CHANGE:
2201 case QEMU_IFLA_BR_TOPOLOGY_CHANGE_DETECTED:
2202 case QEMU_IFLA_BR_MCAST_ROUTER:
2203 case QEMU_IFLA_BR_MCAST_SNOOPING:
2204 case QEMU_IFLA_BR_MCAST_QUERY_USE_IFADDR:
2205 case QEMU_IFLA_BR_MCAST_QUERIER:
2206 case QEMU_IFLA_BR_NF_CALL_IPTABLES:
2207 case QEMU_IFLA_BR_NF_CALL_IP6TABLES:
2208 case QEMU_IFLA_BR_NF_CALL_ARPTABLES:
2209 case QEMU_IFLA_BR_VLAN_STATS_ENABLED:
2210 case QEMU_IFLA_BR_MCAST_STATS_ENABLED:
2211 case QEMU_IFLA_BR_MCAST_IGMP_VERSION:
2212 case QEMU_IFLA_BR_MCAST_MLD_VERSION:
2213 break;
2214 /* uint16_t */
2215 case QEMU_IFLA_BR_PRIORITY:
2216 case QEMU_IFLA_BR_VLAN_PROTOCOL:
2217 case QEMU_IFLA_BR_GROUP_FWD_MASK:
2218 case QEMU_IFLA_BR_ROOT_PORT:
2219 case QEMU_IFLA_BR_VLAN_DEFAULT_PVID:
2220 u16 = NLA_DATA(nlattr);
2221 *u16 = tswap16(*u16);
2222 break;
2223 /* uint32_t */
2224 case QEMU_IFLA_BR_FORWARD_DELAY:
2225 case QEMU_IFLA_BR_HELLO_TIME:
2226 case QEMU_IFLA_BR_MAX_AGE:
2227 case QEMU_IFLA_BR_AGEING_TIME:
2228 case QEMU_IFLA_BR_STP_STATE:
2229 case QEMU_IFLA_BR_ROOT_PATH_COST:
2230 case QEMU_IFLA_BR_MCAST_HASH_ELASTICITY:
2231 case QEMU_IFLA_BR_MCAST_HASH_MAX:
2232 case QEMU_IFLA_BR_MCAST_LAST_MEMBER_CNT:
2233 case QEMU_IFLA_BR_MCAST_STARTUP_QUERY_CNT:
2234 u32 = NLA_DATA(nlattr);
2235 *u32 = tswap32(*u32);
2236 break;
2237 /* uint64_t */
2238 case QEMU_IFLA_BR_HELLO_TIMER:
2239 case QEMU_IFLA_BR_TCN_TIMER:
2240 case QEMU_IFLA_BR_GC_TIMER:
2241 case QEMU_IFLA_BR_TOPOLOGY_CHANGE_TIMER:
2242 case QEMU_IFLA_BR_MCAST_LAST_MEMBER_INTVL:
2243 case QEMU_IFLA_BR_MCAST_MEMBERSHIP_INTVL:
2244 case QEMU_IFLA_BR_MCAST_QUERIER_INTVL:
2245 case QEMU_IFLA_BR_MCAST_QUERY_INTVL:
2246 case QEMU_IFLA_BR_MCAST_QUERY_RESPONSE_INTVL:
2247 case QEMU_IFLA_BR_MCAST_STARTUP_QUERY_INTVL:
2248 u64 = NLA_DATA(nlattr);
2249 *u64 = tswap64(*u64);
2250 break;
2251 /* ifla_bridge_id: uin8_t[] */
2252 case QEMU_IFLA_BR_ROOT_ID:
2253 case QEMU_IFLA_BR_BRIDGE_ID:
2254 break;
2255 default:
2256 gemu_log("Unknown QEMU_IFLA_BR type %d\n", nlattr->nla_type);
2257 break;
2259 return 0;
2262 static abi_long host_to_target_slave_data_bridge_nlattr(struct nlattr *nlattr,
2263 void *context)
2265 uint16_t *u16;
2266 uint32_t *u32;
2267 uint64_t *u64;
2269 switch (nlattr->nla_type) {
2270 /* uint8_t */
2271 case QEMU_IFLA_BRPORT_STATE:
2272 case QEMU_IFLA_BRPORT_MODE:
2273 case QEMU_IFLA_BRPORT_GUARD:
2274 case QEMU_IFLA_BRPORT_PROTECT:
2275 case QEMU_IFLA_BRPORT_FAST_LEAVE:
2276 case QEMU_IFLA_BRPORT_LEARNING:
2277 case QEMU_IFLA_BRPORT_UNICAST_FLOOD:
2278 case QEMU_IFLA_BRPORT_PROXYARP:
2279 case QEMU_IFLA_BRPORT_LEARNING_SYNC:
2280 case QEMU_IFLA_BRPORT_PROXYARP_WIFI:
2281 case QEMU_IFLA_BRPORT_TOPOLOGY_CHANGE_ACK:
2282 case QEMU_IFLA_BRPORT_CONFIG_PENDING:
2283 case QEMU_IFLA_BRPORT_MULTICAST_ROUTER:
2284 case QEMU_IFLA_BRPORT_MCAST_FLOOD:
2285 case QEMU_IFLA_BRPORT_MCAST_TO_UCAST:
2286 case QEMU_IFLA_BRPORT_VLAN_TUNNEL:
2287 case QEMU_IFLA_BRPORT_BCAST_FLOOD:
2288 case QEMU_IFLA_BRPORT_NEIGH_SUPPRESS:
2289 break;
2290 /* uint16_t */
2291 case QEMU_IFLA_BRPORT_PRIORITY:
2292 case QEMU_IFLA_BRPORT_DESIGNATED_PORT:
2293 case QEMU_IFLA_BRPORT_DESIGNATED_COST:
2294 case QEMU_IFLA_BRPORT_ID:
2295 case QEMU_IFLA_BRPORT_NO:
2296 case QEMU_IFLA_BRPORT_GROUP_FWD_MASK:
2297 u16 = NLA_DATA(nlattr);
2298 *u16 = tswap16(*u16);
2299 break;
2300 /* uin32_t */
2301 case QEMU_IFLA_BRPORT_COST:
2302 u32 = NLA_DATA(nlattr);
2303 *u32 = tswap32(*u32);
2304 break;
2305 /* uint64_t */
2306 case QEMU_IFLA_BRPORT_MESSAGE_AGE_TIMER:
2307 case QEMU_IFLA_BRPORT_FORWARD_DELAY_TIMER:
2308 case QEMU_IFLA_BRPORT_HOLD_TIMER:
2309 u64 = NLA_DATA(nlattr);
2310 *u64 = tswap64(*u64);
2311 break;
2312 /* ifla_bridge_id: uint8_t[] */
2313 case QEMU_IFLA_BRPORT_ROOT_ID:
2314 case QEMU_IFLA_BRPORT_BRIDGE_ID:
2315 break;
2316 default:
2317 gemu_log("Unknown QEMU_IFLA_BRPORT type %d\n", nlattr->nla_type);
2318 break;
2320 return 0;
2323 struct linkinfo_context {
2324 int len;
2325 char *name;
2326 int slave_len;
2327 char *slave_name;
2330 static abi_long host_to_target_data_linkinfo_nlattr(struct nlattr *nlattr,
2331 void *context)
2333 struct linkinfo_context *li_context = context;
2335 switch (nlattr->nla_type) {
2336 /* string */
2337 case QEMU_IFLA_INFO_KIND:
2338 li_context->name = NLA_DATA(nlattr);
2339 li_context->len = nlattr->nla_len - NLA_HDRLEN;
2340 break;
2341 case QEMU_IFLA_INFO_SLAVE_KIND:
2342 li_context->slave_name = NLA_DATA(nlattr);
2343 li_context->slave_len = nlattr->nla_len - NLA_HDRLEN;
2344 break;
2345 /* stats */
2346 case QEMU_IFLA_INFO_XSTATS:
2347 /* FIXME: only used by CAN */
2348 break;
2349 /* nested */
2350 case QEMU_IFLA_INFO_DATA:
2351 if (strncmp(li_context->name, "bridge",
2352 li_context->len) == 0) {
2353 return host_to_target_for_each_nlattr(NLA_DATA(nlattr),
2354 nlattr->nla_len,
2355 NULL,
2356 host_to_target_data_bridge_nlattr);
2357 } else {
2358 gemu_log("Unknown QEMU_IFLA_INFO_KIND %s\n", li_context->name);
2360 break;
2361 case QEMU_IFLA_INFO_SLAVE_DATA:
2362 if (strncmp(li_context->slave_name, "bridge",
2363 li_context->slave_len) == 0) {
2364 return host_to_target_for_each_nlattr(NLA_DATA(nlattr),
2365 nlattr->nla_len,
2366 NULL,
2367 host_to_target_slave_data_bridge_nlattr);
2368 } else {
2369 gemu_log("Unknown QEMU_IFLA_INFO_SLAVE_KIND %s\n",
2370 li_context->slave_name);
2372 break;
2373 default:
2374 gemu_log("Unknown host QEMU_IFLA_INFO type: %d\n", nlattr->nla_type);
2375 break;
2378 return 0;
2381 static abi_long host_to_target_data_inet_nlattr(struct nlattr *nlattr,
2382 void *context)
2384 uint32_t *u32;
2385 int i;
2387 switch (nlattr->nla_type) {
2388 case QEMU_IFLA_INET_CONF:
2389 u32 = NLA_DATA(nlattr);
2390 for (i = 0; i < (nlattr->nla_len - NLA_HDRLEN) / sizeof(*u32);
2391 i++) {
2392 u32[i] = tswap32(u32[i]);
2394 break;
2395 default:
2396 gemu_log("Unknown host AF_INET type: %d\n", nlattr->nla_type);
2398 return 0;
2401 static abi_long host_to_target_data_inet6_nlattr(struct nlattr *nlattr,
2402 void *context)
2404 uint32_t *u32;
2405 uint64_t *u64;
2406 struct ifla_cacheinfo *ci;
2407 int i;
2409 switch (nlattr->nla_type) {
2410 /* binaries */
2411 case QEMU_IFLA_INET6_TOKEN:
2412 break;
2413 /* uint8_t */
2414 case QEMU_IFLA_INET6_ADDR_GEN_MODE:
2415 break;
2416 /* uint32_t */
2417 case QEMU_IFLA_INET6_FLAGS:
2418 u32 = NLA_DATA(nlattr);
2419 *u32 = tswap32(*u32);
2420 break;
2421 /* uint32_t[] */
2422 case QEMU_IFLA_INET6_CONF:
2423 u32 = NLA_DATA(nlattr);
2424 for (i = 0; i < (nlattr->nla_len - NLA_HDRLEN) / sizeof(*u32);
2425 i++) {
2426 u32[i] = tswap32(u32[i]);
2428 break;
2429 /* ifla_cacheinfo */
2430 case QEMU_IFLA_INET6_CACHEINFO:
2431 ci = NLA_DATA(nlattr);
2432 ci->max_reasm_len = tswap32(ci->max_reasm_len);
2433 ci->tstamp = tswap32(ci->tstamp);
2434 ci->reachable_time = tswap32(ci->reachable_time);
2435 ci->retrans_time = tswap32(ci->retrans_time);
2436 break;
2437 /* uint64_t[] */
2438 case QEMU_IFLA_INET6_STATS:
2439 case QEMU_IFLA_INET6_ICMP6STATS:
2440 u64 = NLA_DATA(nlattr);
2441 for (i = 0; i < (nlattr->nla_len - NLA_HDRLEN) / sizeof(*u64);
2442 i++) {
2443 u64[i] = tswap64(u64[i]);
2445 break;
2446 default:
2447 gemu_log("Unknown host AF_INET6 type: %d\n", nlattr->nla_type);
2449 return 0;
2452 static abi_long host_to_target_data_spec_nlattr(struct nlattr *nlattr,
2453 void *context)
2455 switch (nlattr->nla_type) {
2456 case AF_INET:
2457 return host_to_target_for_each_nlattr(NLA_DATA(nlattr), nlattr->nla_len,
2458 NULL,
2459 host_to_target_data_inet_nlattr);
2460 case AF_INET6:
2461 return host_to_target_for_each_nlattr(NLA_DATA(nlattr), nlattr->nla_len,
2462 NULL,
2463 host_to_target_data_inet6_nlattr);
2464 default:
2465 gemu_log("Unknown host AF_SPEC type: %d\n", nlattr->nla_type);
2466 break;
2468 return 0;
2471 static abi_long host_to_target_data_xdp_nlattr(struct nlattr *nlattr,
2472 void *context)
2474 uint32_t *u32;
2476 switch (nlattr->nla_type) {
2477 /* uint8_t */
2478 case QEMU_IFLA_XDP_ATTACHED:
2479 break;
2480 /* uint32_t */
2481 case QEMU_IFLA_XDP_PROG_ID:
2482 u32 = NLA_DATA(nlattr);
2483 *u32 = tswap32(*u32);
2484 break;
2485 default:
2486 gemu_log("Unknown host XDP type: %d\n", nlattr->nla_type);
2487 break;
2489 return 0;
2492 static abi_long host_to_target_data_link_rtattr(struct rtattr *rtattr)
2494 uint32_t *u32;
2495 struct rtnl_link_stats *st;
2496 struct rtnl_link_stats64 *st64;
2497 struct rtnl_link_ifmap *map;
2498 struct linkinfo_context li_context;
2500 switch (rtattr->rta_type) {
2501 /* binary stream */
2502 case QEMU_IFLA_ADDRESS:
2503 case QEMU_IFLA_BROADCAST:
2504 /* string */
2505 case QEMU_IFLA_IFNAME:
2506 case QEMU_IFLA_QDISC:
2507 break;
2508 /* uin8_t */
2509 case QEMU_IFLA_OPERSTATE:
2510 case QEMU_IFLA_LINKMODE:
2511 case QEMU_IFLA_CARRIER:
2512 case QEMU_IFLA_PROTO_DOWN:
2513 break;
2514 /* uint32_t */
2515 case QEMU_IFLA_MTU:
2516 case QEMU_IFLA_LINK:
2517 case QEMU_IFLA_WEIGHT:
2518 case QEMU_IFLA_TXQLEN:
2519 case QEMU_IFLA_CARRIER_CHANGES:
2520 case QEMU_IFLA_NUM_RX_QUEUES:
2521 case QEMU_IFLA_NUM_TX_QUEUES:
2522 case QEMU_IFLA_PROMISCUITY:
2523 case QEMU_IFLA_EXT_MASK:
2524 case QEMU_IFLA_LINK_NETNSID:
2525 case QEMU_IFLA_GROUP:
2526 case QEMU_IFLA_MASTER:
2527 case QEMU_IFLA_NUM_VF:
2528 case QEMU_IFLA_GSO_MAX_SEGS:
2529 case QEMU_IFLA_GSO_MAX_SIZE:
2530 u32 = RTA_DATA(rtattr);
2531 *u32 = tswap32(*u32);
2532 break;
2533 /* struct rtnl_link_stats */
2534 case QEMU_IFLA_STATS:
2535 st = RTA_DATA(rtattr);
2536 st->rx_packets = tswap32(st->rx_packets);
2537 st->tx_packets = tswap32(st->tx_packets);
2538 st->rx_bytes = tswap32(st->rx_bytes);
2539 st->tx_bytes = tswap32(st->tx_bytes);
2540 st->rx_errors = tswap32(st->rx_errors);
2541 st->tx_errors = tswap32(st->tx_errors);
2542 st->rx_dropped = tswap32(st->rx_dropped);
2543 st->tx_dropped = tswap32(st->tx_dropped);
2544 st->multicast = tswap32(st->multicast);
2545 st->collisions = tswap32(st->collisions);
2547 /* detailed rx_errors: */
2548 st->rx_length_errors = tswap32(st->rx_length_errors);
2549 st->rx_over_errors = tswap32(st->rx_over_errors);
2550 st->rx_crc_errors = tswap32(st->rx_crc_errors);
2551 st->rx_frame_errors = tswap32(st->rx_frame_errors);
2552 st->rx_fifo_errors = tswap32(st->rx_fifo_errors);
2553 st->rx_missed_errors = tswap32(st->rx_missed_errors);
2555 /* detailed tx_errors */
2556 st->tx_aborted_errors = tswap32(st->tx_aborted_errors);
2557 st->tx_carrier_errors = tswap32(st->tx_carrier_errors);
2558 st->tx_fifo_errors = tswap32(st->tx_fifo_errors);
2559 st->tx_heartbeat_errors = tswap32(st->tx_heartbeat_errors);
2560 st->tx_window_errors = tswap32(st->tx_window_errors);
2562 /* for cslip etc */
2563 st->rx_compressed = tswap32(st->rx_compressed);
2564 st->tx_compressed = tswap32(st->tx_compressed);
2565 break;
2566 /* struct rtnl_link_stats64 */
2567 case QEMU_IFLA_STATS64:
2568 st64 = RTA_DATA(rtattr);
2569 st64->rx_packets = tswap64(st64->rx_packets);
2570 st64->tx_packets = tswap64(st64->tx_packets);
2571 st64->rx_bytes = tswap64(st64->rx_bytes);
2572 st64->tx_bytes = tswap64(st64->tx_bytes);
2573 st64->rx_errors = tswap64(st64->rx_errors);
2574 st64->tx_errors = tswap64(st64->tx_errors);
2575 st64->rx_dropped = tswap64(st64->rx_dropped);
2576 st64->tx_dropped = tswap64(st64->tx_dropped);
2577 st64->multicast = tswap64(st64->multicast);
2578 st64->collisions = tswap64(st64->collisions);
2580 /* detailed rx_errors: */
2581 st64->rx_length_errors = tswap64(st64->rx_length_errors);
2582 st64->rx_over_errors = tswap64(st64->rx_over_errors);
2583 st64->rx_crc_errors = tswap64(st64->rx_crc_errors);
2584 st64->rx_frame_errors = tswap64(st64->rx_frame_errors);
2585 st64->rx_fifo_errors = tswap64(st64->rx_fifo_errors);
2586 st64->rx_missed_errors = tswap64(st64->rx_missed_errors);
2588 /* detailed tx_errors */
2589 st64->tx_aborted_errors = tswap64(st64->tx_aborted_errors);
2590 st64->tx_carrier_errors = tswap64(st64->tx_carrier_errors);
2591 st64->tx_fifo_errors = tswap64(st64->tx_fifo_errors);
2592 st64->tx_heartbeat_errors = tswap64(st64->tx_heartbeat_errors);
2593 st64->tx_window_errors = tswap64(st64->tx_window_errors);
2595 /* for cslip etc */
2596 st64->rx_compressed = tswap64(st64->rx_compressed);
2597 st64->tx_compressed = tswap64(st64->tx_compressed);
2598 break;
2599 /* struct rtnl_link_ifmap */
2600 case QEMU_IFLA_MAP:
2601 map = RTA_DATA(rtattr);
2602 map->mem_start = tswap64(map->mem_start);
2603 map->mem_end = tswap64(map->mem_end);
2604 map->base_addr = tswap64(map->base_addr);
2605 map->irq = tswap16(map->irq);
2606 break;
2607 /* nested */
2608 case QEMU_IFLA_LINKINFO:
2609 memset(&li_context, 0, sizeof(li_context));
2610 return host_to_target_for_each_nlattr(RTA_DATA(rtattr), rtattr->rta_len,
2611 &li_context,
2612 host_to_target_data_linkinfo_nlattr);
2613 case QEMU_IFLA_AF_SPEC:
2614 return host_to_target_for_each_nlattr(RTA_DATA(rtattr), rtattr->rta_len,
2615 NULL,
2616 host_to_target_data_spec_nlattr);
2617 case QEMU_IFLA_XDP:
2618 return host_to_target_for_each_nlattr(RTA_DATA(rtattr), rtattr->rta_len,
2619 NULL,
2620 host_to_target_data_xdp_nlattr);
2621 default:
2622 gemu_log("Unknown host QEMU_IFLA type: %d\n", rtattr->rta_type);
2623 break;
2625 return 0;
2628 static abi_long host_to_target_data_addr_rtattr(struct rtattr *rtattr)
2630 uint32_t *u32;
2631 struct ifa_cacheinfo *ci;
2633 switch (rtattr->rta_type) {
2634 /* binary: depends on family type */
2635 case IFA_ADDRESS:
2636 case IFA_LOCAL:
2637 break;
2638 /* string */
2639 case IFA_LABEL:
2640 break;
2641 /* u32 */
2642 case IFA_FLAGS:
2643 case IFA_BROADCAST:
2644 u32 = RTA_DATA(rtattr);
2645 *u32 = tswap32(*u32);
2646 break;
2647 /* struct ifa_cacheinfo */
2648 case IFA_CACHEINFO:
2649 ci = RTA_DATA(rtattr);
2650 ci->ifa_prefered = tswap32(ci->ifa_prefered);
2651 ci->ifa_valid = tswap32(ci->ifa_valid);
2652 ci->cstamp = tswap32(ci->cstamp);
2653 ci->tstamp = tswap32(ci->tstamp);
2654 break;
2655 default:
2656 gemu_log("Unknown host IFA type: %d\n", rtattr->rta_type);
2657 break;
2659 return 0;
2662 static abi_long host_to_target_data_route_rtattr(struct rtattr *rtattr)
2664 uint32_t *u32;
2665 switch (rtattr->rta_type) {
2666 /* binary: depends on family type */
2667 case RTA_GATEWAY:
2668 case RTA_DST:
2669 case RTA_PREFSRC:
2670 break;
2671 /* u32 */
2672 case RTA_PRIORITY:
2673 case RTA_TABLE:
2674 case RTA_OIF:
2675 u32 = RTA_DATA(rtattr);
2676 *u32 = tswap32(*u32);
2677 break;
2678 default:
2679 gemu_log("Unknown host RTA type: %d\n", rtattr->rta_type);
2680 break;
2682 return 0;
2685 static abi_long host_to_target_link_rtattr(struct rtattr *rtattr,
2686 uint32_t rtattr_len)
2688 return host_to_target_for_each_rtattr(rtattr, rtattr_len,
2689 host_to_target_data_link_rtattr);
2692 static abi_long host_to_target_addr_rtattr(struct rtattr *rtattr,
2693 uint32_t rtattr_len)
2695 return host_to_target_for_each_rtattr(rtattr, rtattr_len,
2696 host_to_target_data_addr_rtattr);
2699 static abi_long host_to_target_route_rtattr(struct rtattr *rtattr,
2700 uint32_t rtattr_len)
2702 return host_to_target_for_each_rtattr(rtattr, rtattr_len,
2703 host_to_target_data_route_rtattr);
2706 static abi_long host_to_target_data_route(struct nlmsghdr *nlh)
2708 uint32_t nlmsg_len;
2709 struct ifinfomsg *ifi;
2710 struct ifaddrmsg *ifa;
2711 struct rtmsg *rtm;
2713 nlmsg_len = nlh->nlmsg_len;
2714 switch (nlh->nlmsg_type) {
2715 case RTM_NEWLINK:
2716 case RTM_DELLINK:
2717 case RTM_GETLINK:
2718 if (nlh->nlmsg_len >= NLMSG_LENGTH(sizeof(*ifi))) {
2719 ifi = NLMSG_DATA(nlh);
2720 ifi->ifi_type = tswap16(ifi->ifi_type);
2721 ifi->ifi_index = tswap32(ifi->ifi_index);
2722 ifi->ifi_flags = tswap32(ifi->ifi_flags);
2723 ifi->ifi_change = tswap32(ifi->ifi_change);
2724 host_to_target_link_rtattr(IFLA_RTA(ifi),
2725 nlmsg_len - NLMSG_LENGTH(sizeof(*ifi)));
2727 break;
2728 case RTM_NEWADDR:
2729 case RTM_DELADDR:
2730 case RTM_GETADDR:
2731 if (nlh->nlmsg_len >= NLMSG_LENGTH(sizeof(*ifa))) {
2732 ifa = NLMSG_DATA(nlh);
2733 ifa->ifa_index = tswap32(ifa->ifa_index);
2734 host_to_target_addr_rtattr(IFA_RTA(ifa),
2735 nlmsg_len - NLMSG_LENGTH(sizeof(*ifa)));
2737 break;
2738 case RTM_NEWROUTE:
2739 case RTM_DELROUTE:
2740 case RTM_GETROUTE:
2741 if (nlh->nlmsg_len >= NLMSG_LENGTH(sizeof(*rtm))) {
2742 rtm = NLMSG_DATA(nlh);
2743 rtm->rtm_flags = tswap32(rtm->rtm_flags);
2744 host_to_target_route_rtattr(RTM_RTA(rtm),
2745 nlmsg_len - NLMSG_LENGTH(sizeof(*rtm)));
2747 break;
2748 default:
2749 return -TARGET_EINVAL;
2751 return 0;
2754 static inline abi_long host_to_target_nlmsg_route(struct nlmsghdr *nlh,
2755 size_t len)
2757 return host_to_target_for_each_nlmsg(nlh, len, host_to_target_data_route);
2760 static abi_long target_to_host_for_each_rtattr(struct rtattr *rtattr,
2761 size_t len,
2762 abi_long (*target_to_host_rtattr)
2763 (struct rtattr *))
2765 abi_long ret;
2767 while (len >= sizeof(struct rtattr)) {
2768 if (tswap16(rtattr->rta_len) < sizeof(struct rtattr) ||
2769 tswap16(rtattr->rta_len) > len) {
2770 break;
2772 rtattr->rta_len = tswap16(rtattr->rta_len);
2773 rtattr->rta_type = tswap16(rtattr->rta_type);
2774 ret = target_to_host_rtattr(rtattr);
2775 if (ret < 0) {
2776 return ret;
2778 len -= RTA_ALIGN(rtattr->rta_len);
2779 rtattr = (struct rtattr *)(((char *)rtattr) +
2780 RTA_ALIGN(rtattr->rta_len));
2782 return 0;
2785 static abi_long target_to_host_data_link_rtattr(struct rtattr *rtattr)
2787 switch (rtattr->rta_type) {
2788 default:
2789 gemu_log("Unknown target QEMU_IFLA type: %d\n", rtattr->rta_type);
2790 break;
2792 return 0;
2795 static abi_long target_to_host_data_addr_rtattr(struct rtattr *rtattr)
2797 switch (rtattr->rta_type) {
2798 /* binary: depends on family type */
2799 case IFA_LOCAL:
2800 case IFA_ADDRESS:
2801 break;
2802 default:
2803 gemu_log("Unknown target IFA type: %d\n", rtattr->rta_type);
2804 break;
2806 return 0;
2809 static abi_long target_to_host_data_route_rtattr(struct rtattr *rtattr)
2811 uint32_t *u32;
2812 switch (rtattr->rta_type) {
2813 /* binary: depends on family type */
2814 case RTA_DST:
2815 case RTA_SRC:
2816 case RTA_GATEWAY:
2817 break;
2818 /* u32 */
2819 case RTA_PRIORITY:
2820 case RTA_OIF:
2821 u32 = RTA_DATA(rtattr);
2822 *u32 = tswap32(*u32);
2823 break;
2824 default:
2825 gemu_log("Unknown target RTA type: %d\n", rtattr->rta_type);
2826 break;
2828 return 0;
2831 static void target_to_host_link_rtattr(struct rtattr *rtattr,
2832 uint32_t rtattr_len)
2834 target_to_host_for_each_rtattr(rtattr, rtattr_len,
2835 target_to_host_data_link_rtattr);
2838 static void target_to_host_addr_rtattr(struct rtattr *rtattr,
2839 uint32_t rtattr_len)
2841 target_to_host_for_each_rtattr(rtattr, rtattr_len,
2842 target_to_host_data_addr_rtattr);
2845 static void target_to_host_route_rtattr(struct rtattr *rtattr,
2846 uint32_t rtattr_len)
2848 target_to_host_for_each_rtattr(rtattr, rtattr_len,
2849 target_to_host_data_route_rtattr);
2852 static abi_long target_to_host_data_route(struct nlmsghdr *nlh)
2854 struct ifinfomsg *ifi;
2855 struct ifaddrmsg *ifa;
2856 struct rtmsg *rtm;
2858 switch (nlh->nlmsg_type) {
2859 case RTM_GETLINK:
2860 break;
2861 case RTM_NEWLINK:
2862 case RTM_DELLINK:
2863 if (nlh->nlmsg_len >= NLMSG_LENGTH(sizeof(*ifi))) {
2864 ifi = NLMSG_DATA(nlh);
2865 ifi->ifi_type = tswap16(ifi->ifi_type);
2866 ifi->ifi_index = tswap32(ifi->ifi_index);
2867 ifi->ifi_flags = tswap32(ifi->ifi_flags);
2868 ifi->ifi_change = tswap32(ifi->ifi_change);
2869 target_to_host_link_rtattr(IFLA_RTA(ifi), nlh->nlmsg_len -
2870 NLMSG_LENGTH(sizeof(*ifi)));
2872 break;
2873 case RTM_GETADDR:
2874 case RTM_NEWADDR:
2875 case RTM_DELADDR:
2876 if (nlh->nlmsg_len >= NLMSG_LENGTH(sizeof(*ifa))) {
2877 ifa = NLMSG_DATA(nlh);
2878 ifa->ifa_index = tswap32(ifa->ifa_index);
2879 target_to_host_addr_rtattr(IFA_RTA(ifa), nlh->nlmsg_len -
2880 NLMSG_LENGTH(sizeof(*ifa)));
2882 break;
2883 case RTM_GETROUTE:
2884 break;
2885 case RTM_NEWROUTE:
2886 case RTM_DELROUTE:
2887 if (nlh->nlmsg_len >= NLMSG_LENGTH(sizeof(*rtm))) {
2888 rtm = NLMSG_DATA(nlh);
2889 rtm->rtm_flags = tswap32(rtm->rtm_flags);
2890 target_to_host_route_rtattr(RTM_RTA(rtm), nlh->nlmsg_len -
2891 NLMSG_LENGTH(sizeof(*rtm)));
2893 break;
2894 default:
2895 return -TARGET_EOPNOTSUPP;
2897 return 0;
2900 static abi_long target_to_host_nlmsg_route(struct nlmsghdr *nlh, size_t len)
2902 return target_to_host_for_each_nlmsg(nlh, len, target_to_host_data_route);
2904 #endif /* CONFIG_RTNETLINK */
2906 static abi_long host_to_target_data_audit(struct nlmsghdr *nlh)
2908 switch (nlh->nlmsg_type) {
2909 default:
2910 gemu_log("Unknown host audit message type %d\n",
2911 nlh->nlmsg_type);
2912 return -TARGET_EINVAL;
2914 return 0;
2917 static inline abi_long host_to_target_nlmsg_audit(struct nlmsghdr *nlh,
2918 size_t len)
2920 return host_to_target_for_each_nlmsg(nlh, len, host_to_target_data_audit);
2923 static abi_long target_to_host_data_audit(struct nlmsghdr *nlh)
2925 switch (nlh->nlmsg_type) {
2926 case AUDIT_USER:
2927 case AUDIT_FIRST_USER_MSG ... AUDIT_LAST_USER_MSG:
2928 case AUDIT_FIRST_USER_MSG2 ... AUDIT_LAST_USER_MSG2:
2929 break;
2930 default:
2931 gemu_log("Unknown target audit message type %d\n",
2932 nlh->nlmsg_type);
2933 return -TARGET_EINVAL;
2936 return 0;
2939 static abi_long target_to_host_nlmsg_audit(struct nlmsghdr *nlh, size_t len)
2941 return target_to_host_for_each_nlmsg(nlh, len, target_to_host_data_audit);
2944 /* do_setsockopt() Must return target values and target errnos. */
2945 static abi_long do_setsockopt(int sockfd, int level, int optname,
2946 abi_ulong optval_addr, socklen_t optlen)
2948 abi_long ret;
2949 int val;
2950 struct ip_mreqn *ip_mreq;
2951 struct ip_mreq_source *ip_mreq_source;
2953 switch(level) {
2954 case SOL_TCP:
2955 /* TCP options all take an 'int' value. */
2956 if (optlen < sizeof(uint32_t))
2957 return -TARGET_EINVAL;
2959 if (get_user_u32(val, optval_addr))
2960 return -TARGET_EFAULT;
2961 ret = get_errno(setsockopt(sockfd, level, optname, &val, sizeof(val)));
2962 break;
2963 case SOL_IP:
2964 switch(optname) {
2965 case IP_TOS:
2966 case IP_TTL:
2967 case IP_HDRINCL:
2968 case IP_ROUTER_ALERT:
2969 case IP_RECVOPTS:
2970 case IP_RETOPTS:
2971 case IP_PKTINFO:
2972 case IP_MTU_DISCOVER:
2973 case IP_RECVERR:
2974 case IP_RECVTTL:
2975 case IP_RECVTOS:
2976 #ifdef IP_FREEBIND
2977 case IP_FREEBIND:
2978 #endif
2979 case IP_MULTICAST_TTL:
2980 case IP_MULTICAST_LOOP:
2981 val = 0;
2982 if (optlen >= sizeof(uint32_t)) {
2983 if (get_user_u32(val, optval_addr))
2984 return -TARGET_EFAULT;
2985 } else if (optlen >= 1) {
2986 if (get_user_u8(val, optval_addr))
2987 return -TARGET_EFAULT;
2989 ret = get_errno(setsockopt(sockfd, level, optname, &val, sizeof(val)));
2990 break;
2991 case IP_ADD_MEMBERSHIP:
2992 case IP_DROP_MEMBERSHIP:
2993 if (optlen < sizeof (struct target_ip_mreq) ||
2994 optlen > sizeof (struct target_ip_mreqn))
2995 return -TARGET_EINVAL;
2997 ip_mreq = (struct ip_mreqn *) alloca(optlen);
2998 target_to_host_ip_mreq(ip_mreq, optval_addr, optlen);
2999 ret = get_errno(setsockopt(sockfd, level, optname, ip_mreq, optlen));
3000 break;
3002 case IP_BLOCK_SOURCE:
3003 case IP_UNBLOCK_SOURCE:
3004 case IP_ADD_SOURCE_MEMBERSHIP:
3005 case IP_DROP_SOURCE_MEMBERSHIP:
3006 if (optlen != sizeof (struct target_ip_mreq_source))
3007 return -TARGET_EINVAL;
3009 ip_mreq_source = lock_user(VERIFY_READ, optval_addr, optlen, 1);
3010 ret = get_errno(setsockopt(sockfd, level, optname, ip_mreq_source, optlen));
3011 unlock_user (ip_mreq_source, optval_addr, 0);
3012 break;
3014 default:
3015 goto unimplemented;
3017 break;
3018 case SOL_IPV6:
3019 switch (optname) {
3020 case IPV6_MTU_DISCOVER:
3021 case IPV6_MTU:
3022 case IPV6_V6ONLY:
3023 case IPV6_RECVPKTINFO:
3024 case IPV6_UNICAST_HOPS:
3025 case IPV6_RECVERR:
3026 case IPV6_RECVHOPLIMIT:
3027 case IPV6_2292HOPLIMIT:
3028 case IPV6_CHECKSUM:
3029 val = 0;
3030 if (optlen < sizeof(uint32_t)) {
3031 return -TARGET_EINVAL;
3033 if (get_user_u32(val, optval_addr)) {
3034 return -TARGET_EFAULT;
3036 ret = get_errno(setsockopt(sockfd, level, optname,
3037 &val, sizeof(val)));
3038 break;
3039 case IPV6_PKTINFO:
3041 struct in6_pktinfo pki;
3043 if (optlen < sizeof(pki)) {
3044 return -TARGET_EINVAL;
3047 if (copy_from_user(&pki, optval_addr, sizeof(pki))) {
3048 return -TARGET_EFAULT;
3051 pki.ipi6_ifindex = tswap32(pki.ipi6_ifindex);
3053 ret = get_errno(setsockopt(sockfd, level, optname,
3054 &pki, sizeof(pki)));
3055 break;
3057 default:
3058 goto unimplemented;
3060 break;
3061 case SOL_ICMPV6:
3062 switch (optname) {
3063 case ICMPV6_FILTER:
3065 struct icmp6_filter icmp6f;
3067 if (optlen > sizeof(icmp6f)) {
3068 optlen = sizeof(icmp6f);
3071 if (copy_from_user(&icmp6f, optval_addr, optlen)) {
3072 return -TARGET_EFAULT;
3075 for (val = 0; val < 8; val++) {
3076 icmp6f.data[val] = tswap32(icmp6f.data[val]);
3079 ret = get_errno(setsockopt(sockfd, level, optname,
3080 &icmp6f, optlen));
3081 break;
3083 default:
3084 goto unimplemented;
3086 break;
3087 case SOL_RAW:
3088 switch (optname) {
3089 case ICMP_FILTER:
3090 case IPV6_CHECKSUM:
3091 /* those take an u32 value */
3092 if (optlen < sizeof(uint32_t)) {
3093 return -TARGET_EINVAL;
3096 if (get_user_u32(val, optval_addr)) {
3097 return -TARGET_EFAULT;
3099 ret = get_errno(setsockopt(sockfd, level, optname,
3100 &val, sizeof(val)));
3101 break;
3103 default:
3104 goto unimplemented;
3106 break;
3107 case TARGET_SOL_SOCKET:
3108 switch (optname) {
3109 case TARGET_SO_RCVTIMEO:
3111 struct timeval tv;
3113 optname = SO_RCVTIMEO;
3115 set_timeout:
3116 if (optlen != sizeof(struct target_timeval)) {
3117 return -TARGET_EINVAL;
3120 if (copy_from_user_timeval(&tv, optval_addr)) {
3121 return -TARGET_EFAULT;
3124 ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname,
3125 &tv, sizeof(tv)));
3126 return ret;
3128 case TARGET_SO_SNDTIMEO:
3129 optname = SO_SNDTIMEO;
3130 goto set_timeout;
3131 case TARGET_SO_ATTACH_FILTER:
3133 struct target_sock_fprog *tfprog;
3134 struct target_sock_filter *tfilter;
3135 struct sock_fprog fprog;
3136 struct sock_filter *filter;
3137 int i;
3139 if (optlen != sizeof(*tfprog)) {
3140 return -TARGET_EINVAL;
3142 if (!lock_user_struct(VERIFY_READ, tfprog, optval_addr, 0)) {
3143 return -TARGET_EFAULT;
3145 if (!lock_user_struct(VERIFY_READ, tfilter,
3146 tswapal(tfprog->filter), 0)) {
3147 unlock_user_struct(tfprog, optval_addr, 1);
3148 return -TARGET_EFAULT;
3151 fprog.len = tswap16(tfprog->len);
3152 filter = g_try_new(struct sock_filter, fprog.len);
3153 if (filter == NULL) {
3154 unlock_user_struct(tfilter, tfprog->filter, 1);
3155 unlock_user_struct(tfprog, optval_addr, 1);
3156 return -TARGET_ENOMEM;
3158 for (i = 0; i < fprog.len; i++) {
3159 filter[i].code = tswap16(tfilter[i].code);
3160 filter[i].jt = tfilter[i].jt;
3161 filter[i].jf = tfilter[i].jf;
3162 filter[i].k = tswap32(tfilter[i].k);
3164 fprog.filter = filter;
3166 ret = get_errno(setsockopt(sockfd, SOL_SOCKET,
3167 SO_ATTACH_FILTER, &fprog, sizeof(fprog)));
3168 g_free(filter);
3170 unlock_user_struct(tfilter, tfprog->filter, 1);
3171 unlock_user_struct(tfprog, optval_addr, 1);
3172 return ret;
3174 case TARGET_SO_BINDTODEVICE:
3176 char *dev_ifname, *addr_ifname;
3178 if (optlen > IFNAMSIZ - 1) {
3179 optlen = IFNAMSIZ - 1;
3181 dev_ifname = lock_user(VERIFY_READ, optval_addr, optlen, 1);
3182 if (!dev_ifname) {
3183 return -TARGET_EFAULT;
3185 optname = SO_BINDTODEVICE;
3186 addr_ifname = alloca(IFNAMSIZ);
3187 memcpy(addr_ifname, dev_ifname, optlen);
3188 addr_ifname[optlen] = 0;
3189 ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname,
3190 addr_ifname, optlen));
3191 unlock_user (dev_ifname, optval_addr, 0);
3192 return ret;
3194 /* Options with 'int' argument. */
3195 case TARGET_SO_DEBUG:
3196 optname = SO_DEBUG;
3197 break;
3198 case TARGET_SO_REUSEADDR:
3199 optname = SO_REUSEADDR;
3200 break;
3201 case TARGET_SO_TYPE:
3202 optname = SO_TYPE;
3203 break;
3204 case TARGET_SO_ERROR:
3205 optname = SO_ERROR;
3206 break;
3207 case TARGET_SO_DONTROUTE:
3208 optname = SO_DONTROUTE;
3209 break;
3210 case TARGET_SO_BROADCAST:
3211 optname = SO_BROADCAST;
3212 break;
3213 case TARGET_SO_SNDBUF:
3214 optname = SO_SNDBUF;
3215 break;
3216 case TARGET_SO_SNDBUFFORCE:
3217 optname = SO_SNDBUFFORCE;
3218 break;
3219 case TARGET_SO_RCVBUF:
3220 optname = SO_RCVBUF;
3221 break;
3222 case TARGET_SO_RCVBUFFORCE:
3223 optname = SO_RCVBUFFORCE;
3224 break;
3225 case TARGET_SO_KEEPALIVE:
3226 optname = SO_KEEPALIVE;
3227 break;
3228 case TARGET_SO_OOBINLINE:
3229 optname = SO_OOBINLINE;
3230 break;
3231 case TARGET_SO_NO_CHECK:
3232 optname = SO_NO_CHECK;
3233 break;
3234 case TARGET_SO_PRIORITY:
3235 optname = SO_PRIORITY;
3236 break;
3237 #ifdef SO_BSDCOMPAT
3238 case TARGET_SO_BSDCOMPAT:
3239 optname = SO_BSDCOMPAT;
3240 break;
3241 #endif
3242 case TARGET_SO_PASSCRED:
3243 optname = SO_PASSCRED;
3244 break;
3245 case TARGET_SO_PASSSEC:
3246 optname = SO_PASSSEC;
3247 break;
3248 case TARGET_SO_TIMESTAMP:
3249 optname = SO_TIMESTAMP;
3250 break;
3251 case TARGET_SO_RCVLOWAT:
3252 optname = SO_RCVLOWAT;
3253 break;
3254 default:
3255 goto unimplemented;
3257 if (optlen < sizeof(uint32_t))
3258 return -TARGET_EINVAL;
3260 if (get_user_u32(val, optval_addr))
3261 return -TARGET_EFAULT;
3262 ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname, &val, sizeof(val)));
3263 break;
3264 default:
3265 unimplemented:
3266 gemu_log("Unsupported setsockopt level=%d optname=%d\n", level, optname);
3267 ret = -TARGET_ENOPROTOOPT;
3269 return ret;
3272 /* do_getsockopt() Must return target values and target errnos. */
3273 static abi_long do_getsockopt(int sockfd, int level, int optname,
3274 abi_ulong optval_addr, abi_ulong optlen)
3276 abi_long ret;
3277 int len, val;
3278 socklen_t lv;
3280 switch(level) {
3281 case TARGET_SOL_SOCKET:
3282 level = SOL_SOCKET;
3283 switch (optname) {
3284 /* These don't just return a single integer */
3285 case TARGET_SO_LINGER:
3286 case TARGET_SO_RCVTIMEO:
3287 case TARGET_SO_SNDTIMEO:
3288 case TARGET_SO_PEERNAME:
3289 goto unimplemented;
3290 case TARGET_SO_PEERCRED: {
3291 struct ucred cr;
3292 socklen_t crlen;
3293 struct target_ucred *tcr;
3295 if (get_user_u32(len, optlen)) {
3296 return -TARGET_EFAULT;
3298 if (len < 0) {
3299 return -TARGET_EINVAL;
3302 crlen = sizeof(cr);
3303 ret = get_errno(getsockopt(sockfd, level, SO_PEERCRED,
3304 &cr, &crlen));
3305 if (ret < 0) {
3306 return ret;
3308 if (len > crlen) {
3309 len = crlen;
3311 if (!lock_user_struct(VERIFY_WRITE, tcr, optval_addr, 0)) {
3312 return -TARGET_EFAULT;
3314 __put_user(cr.pid, &tcr->pid);
3315 __put_user(cr.uid, &tcr->uid);
3316 __put_user(cr.gid, &tcr->gid);
3317 unlock_user_struct(tcr, optval_addr, 1);
3318 if (put_user_u32(len, optlen)) {
3319 return -TARGET_EFAULT;
3321 break;
3323 /* Options with 'int' argument. */
3324 case TARGET_SO_DEBUG:
3325 optname = SO_DEBUG;
3326 goto int_case;
3327 case TARGET_SO_REUSEADDR:
3328 optname = SO_REUSEADDR;
3329 goto int_case;
3330 case TARGET_SO_TYPE:
3331 optname = SO_TYPE;
3332 goto int_case;
3333 case TARGET_SO_ERROR:
3334 optname = SO_ERROR;
3335 goto int_case;
3336 case TARGET_SO_DONTROUTE:
3337 optname = SO_DONTROUTE;
3338 goto int_case;
3339 case TARGET_SO_BROADCAST:
3340 optname = SO_BROADCAST;
3341 goto int_case;
3342 case TARGET_SO_SNDBUF:
3343 optname = SO_SNDBUF;
3344 goto int_case;
3345 case TARGET_SO_RCVBUF:
3346 optname = SO_RCVBUF;
3347 goto int_case;
3348 case TARGET_SO_KEEPALIVE:
3349 optname = SO_KEEPALIVE;
3350 goto int_case;
3351 case TARGET_SO_OOBINLINE:
3352 optname = SO_OOBINLINE;
3353 goto int_case;
3354 case TARGET_SO_NO_CHECK:
3355 optname = SO_NO_CHECK;
3356 goto int_case;
3357 case TARGET_SO_PRIORITY:
3358 optname = SO_PRIORITY;
3359 goto int_case;
3360 #ifdef SO_BSDCOMPAT
3361 case TARGET_SO_BSDCOMPAT:
3362 optname = SO_BSDCOMPAT;
3363 goto int_case;
3364 #endif
3365 case TARGET_SO_PASSCRED:
3366 optname = SO_PASSCRED;
3367 goto int_case;
3368 case TARGET_SO_TIMESTAMP:
3369 optname = SO_TIMESTAMP;
3370 goto int_case;
3371 case TARGET_SO_RCVLOWAT:
3372 optname = SO_RCVLOWAT;
3373 goto int_case;
3374 case TARGET_SO_ACCEPTCONN:
3375 optname = SO_ACCEPTCONN;
3376 goto int_case;
3377 default:
3378 goto int_case;
3380 break;
3381 case SOL_TCP:
3382 /* TCP options all take an 'int' value. */
3383 int_case:
3384 if (get_user_u32(len, optlen))
3385 return -TARGET_EFAULT;
3386 if (len < 0)
3387 return -TARGET_EINVAL;
3388 lv = sizeof(lv);
3389 ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
3390 if (ret < 0)
3391 return ret;
3392 if (optname == SO_TYPE) {
3393 val = host_to_target_sock_type(val);
3395 if (len > lv)
3396 len = lv;
3397 if (len == 4) {
3398 if (put_user_u32(val, optval_addr))
3399 return -TARGET_EFAULT;
3400 } else {
3401 if (put_user_u8(val, optval_addr))
3402 return -TARGET_EFAULT;
3404 if (put_user_u32(len, optlen))
3405 return -TARGET_EFAULT;
3406 break;
3407 case SOL_IP:
3408 switch(optname) {
3409 case IP_TOS:
3410 case IP_TTL:
3411 case IP_HDRINCL:
3412 case IP_ROUTER_ALERT:
3413 case IP_RECVOPTS:
3414 case IP_RETOPTS:
3415 case IP_PKTINFO:
3416 case IP_MTU_DISCOVER:
3417 case IP_RECVERR:
3418 case IP_RECVTOS:
3419 #ifdef IP_FREEBIND
3420 case IP_FREEBIND:
3421 #endif
3422 case IP_MULTICAST_TTL:
3423 case IP_MULTICAST_LOOP:
3424 if (get_user_u32(len, optlen))
3425 return -TARGET_EFAULT;
3426 if (len < 0)
3427 return -TARGET_EINVAL;
3428 lv = sizeof(lv);
3429 ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
3430 if (ret < 0)
3431 return ret;
3432 if (len < sizeof(int) && len > 0 && val >= 0 && val < 255) {
3433 len = 1;
3434 if (put_user_u32(len, optlen)
3435 || put_user_u8(val, optval_addr))
3436 return -TARGET_EFAULT;
3437 } else {
3438 if (len > sizeof(int))
3439 len = sizeof(int);
3440 if (put_user_u32(len, optlen)
3441 || put_user_u32(val, optval_addr))
3442 return -TARGET_EFAULT;
3444 break;
3445 default:
3446 ret = -TARGET_ENOPROTOOPT;
3447 break;
3449 break;
3450 default:
3451 unimplemented:
3452 gemu_log("getsockopt level=%d optname=%d not yet supported\n",
3453 level, optname);
3454 ret = -TARGET_EOPNOTSUPP;
3455 break;
3457 return ret;
3460 /* Convert target low/high pair representing file offset into the host
3461 * low/high pair. This function doesn't handle offsets bigger than 64 bits
3462 * as the kernel doesn't handle them either.
3464 static void target_to_host_low_high(abi_ulong tlow,
3465 abi_ulong thigh,
3466 unsigned long *hlow,
3467 unsigned long *hhigh)
3469 uint64_t off = tlow |
3470 ((unsigned long long)thigh << TARGET_LONG_BITS / 2) <<
3471 TARGET_LONG_BITS / 2;
3473 *hlow = off;
3474 *hhigh = (off >> HOST_LONG_BITS / 2) >> HOST_LONG_BITS / 2;
3477 static struct iovec *lock_iovec(int type, abi_ulong target_addr,
3478 abi_ulong count, int copy)
3480 struct target_iovec *target_vec;
3481 struct iovec *vec;
3482 abi_ulong total_len, max_len;
3483 int i;
3484 int err = 0;
3485 bool bad_address = false;
3487 if (count == 0) {
3488 errno = 0;
3489 return NULL;
3491 if (count > IOV_MAX) {
3492 errno = EINVAL;
3493 return NULL;
3496 vec = g_try_new0(struct iovec, count);
3497 if (vec == NULL) {
3498 errno = ENOMEM;
3499 return NULL;
3502 target_vec = lock_user(VERIFY_READ, target_addr,
3503 count * sizeof(struct target_iovec), 1);
3504 if (target_vec == NULL) {
3505 err = EFAULT;
3506 goto fail2;
3509 /* ??? If host page size > target page size, this will result in a
3510 value larger than what we can actually support. */
3511 max_len = 0x7fffffff & TARGET_PAGE_MASK;
3512 total_len = 0;
3514 for (i = 0; i < count; i++) {
3515 abi_ulong base = tswapal(target_vec[i].iov_base);
3516 abi_long len = tswapal(target_vec[i].iov_len);
3518 if (len < 0) {
3519 err = EINVAL;
3520 goto fail;
3521 } else if (len == 0) {
3522 /* Zero length pointer is ignored. */
3523 vec[i].iov_base = 0;
3524 } else {
3525 vec[i].iov_base = lock_user(type, base, len, copy);
3526 /* If the first buffer pointer is bad, this is a fault. But
3527 * subsequent bad buffers will result in a partial write; this
3528 * is realized by filling the vector with null pointers and
3529 * zero lengths. */
3530 if (!vec[i].iov_base) {
3531 if (i == 0) {
3532 err = EFAULT;
3533 goto fail;
3534 } else {
3535 bad_address = true;
3538 if (bad_address) {
3539 len = 0;
3541 if (len > max_len - total_len) {
3542 len = max_len - total_len;
3545 vec[i].iov_len = len;
3546 total_len += len;
3549 unlock_user(target_vec, target_addr, 0);
3550 return vec;
3552 fail:
3553 while (--i >= 0) {
3554 if (tswapal(target_vec[i].iov_len) > 0) {
3555 unlock_user(vec[i].iov_base, tswapal(target_vec[i].iov_base), 0);
3558 unlock_user(target_vec, target_addr, 0);
3559 fail2:
3560 g_free(vec);
3561 errno = err;
3562 return NULL;
3565 static void unlock_iovec(struct iovec *vec, abi_ulong target_addr,
3566 abi_ulong count, int copy)
3568 struct target_iovec *target_vec;
3569 int i;
3571 target_vec = lock_user(VERIFY_READ, target_addr,
3572 count * sizeof(struct target_iovec), 1);
3573 if (target_vec) {
3574 for (i = 0; i < count; i++) {
3575 abi_ulong base = tswapal(target_vec[i].iov_base);
3576 abi_long len = tswapal(target_vec[i].iov_len);
3577 if (len < 0) {
3578 break;
3580 unlock_user(vec[i].iov_base, base, copy ? vec[i].iov_len : 0);
3582 unlock_user(target_vec, target_addr, 0);
3585 g_free(vec);
3588 static inline int target_to_host_sock_type(int *type)
3590 int host_type = 0;
3591 int target_type = *type;
3593 switch (target_type & TARGET_SOCK_TYPE_MASK) {
3594 case TARGET_SOCK_DGRAM:
3595 host_type = SOCK_DGRAM;
3596 break;
3597 case TARGET_SOCK_STREAM:
3598 host_type = SOCK_STREAM;
3599 break;
3600 default:
3601 host_type = target_type & TARGET_SOCK_TYPE_MASK;
3602 break;
3604 if (target_type & TARGET_SOCK_CLOEXEC) {
3605 #if defined(SOCK_CLOEXEC)
3606 host_type |= SOCK_CLOEXEC;
3607 #else
3608 return -TARGET_EINVAL;
3609 #endif
3611 if (target_type & TARGET_SOCK_NONBLOCK) {
3612 #if defined(SOCK_NONBLOCK)
3613 host_type |= SOCK_NONBLOCK;
3614 #elif !defined(O_NONBLOCK)
3615 return -TARGET_EINVAL;
3616 #endif
3618 *type = host_type;
3619 return 0;
3622 /* Try to emulate socket type flags after socket creation. */
3623 static int sock_flags_fixup(int fd, int target_type)
3625 #if !defined(SOCK_NONBLOCK) && defined(O_NONBLOCK)
3626 if (target_type & TARGET_SOCK_NONBLOCK) {
3627 int flags = fcntl(fd, F_GETFL);
3628 if (fcntl(fd, F_SETFL, O_NONBLOCK | flags) == -1) {
3629 close(fd);
3630 return -TARGET_EINVAL;
3633 #endif
3634 return fd;
3637 static abi_long packet_target_to_host_sockaddr(void *host_addr,
3638 abi_ulong target_addr,
3639 socklen_t len)
3641 struct sockaddr *addr = host_addr;
3642 struct target_sockaddr *target_saddr;
3644 target_saddr = lock_user(VERIFY_READ, target_addr, len, 1);
3645 if (!target_saddr) {
3646 return -TARGET_EFAULT;
3649 memcpy(addr, target_saddr, len);
3650 addr->sa_family = tswap16(target_saddr->sa_family);
3651 /* spkt_protocol is big-endian */
3653 unlock_user(target_saddr, target_addr, 0);
3654 return 0;
3657 static TargetFdTrans target_packet_trans = {
3658 .target_to_host_addr = packet_target_to_host_sockaddr,
3661 #ifdef CONFIG_RTNETLINK
3662 static abi_long netlink_route_target_to_host(void *buf, size_t len)
3664 abi_long ret;
3666 ret = target_to_host_nlmsg_route(buf, len);
3667 if (ret < 0) {
3668 return ret;
3671 return len;
3674 static abi_long netlink_route_host_to_target(void *buf, size_t len)
3676 abi_long ret;
3678 ret = host_to_target_nlmsg_route(buf, len);
3679 if (ret < 0) {
3680 return ret;
3683 return len;
3686 static TargetFdTrans target_netlink_route_trans = {
3687 .target_to_host_data = netlink_route_target_to_host,
3688 .host_to_target_data = netlink_route_host_to_target,
3690 #endif /* CONFIG_RTNETLINK */
3692 static abi_long netlink_audit_target_to_host(void *buf, size_t len)
3694 abi_long ret;
3696 ret = target_to_host_nlmsg_audit(buf, len);
3697 if (ret < 0) {
3698 return ret;
3701 return len;
3704 static abi_long netlink_audit_host_to_target(void *buf, size_t len)
3706 abi_long ret;
3708 ret = host_to_target_nlmsg_audit(buf, len);
3709 if (ret < 0) {
3710 return ret;
3713 return len;
3716 static TargetFdTrans target_netlink_audit_trans = {
3717 .target_to_host_data = netlink_audit_target_to_host,
3718 .host_to_target_data = netlink_audit_host_to_target,
3721 /* do_socket() Must return target values and target errnos. */
3722 static abi_long do_socket(int domain, int type, int protocol)
3724 int target_type = type;
3725 int ret;
3727 ret = target_to_host_sock_type(&type);
3728 if (ret) {
3729 return ret;
3732 if (domain == PF_NETLINK && !(
3733 #ifdef CONFIG_RTNETLINK
3734 protocol == NETLINK_ROUTE ||
3735 #endif
3736 protocol == NETLINK_KOBJECT_UEVENT ||
3737 protocol == NETLINK_AUDIT)) {
3738 return -EPFNOSUPPORT;
3741 if (domain == AF_PACKET ||
3742 (domain == AF_INET && type == SOCK_PACKET)) {
3743 protocol = tswap16(protocol);
3746 ret = get_errno(socket(domain, type, protocol));
3747 if (ret >= 0) {
3748 ret = sock_flags_fixup(ret, target_type);
3749 if (type == SOCK_PACKET) {
3750 /* Manage an obsolete case :
3751 * if socket type is SOCK_PACKET, bind by name
3753 fd_trans_register(ret, &target_packet_trans);
3754 } else if (domain == PF_NETLINK) {
3755 switch (protocol) {
3756 #ifdef CONFIG_RTNETLINK
3757 case NETLINK_ROUTE:
3758 fd_trans_register(ret, &target_netlink_route_trans);
3759 break;
3760 #endif
3761 case NETLINK_KOBJECT_UEVENT:
3762 /* nothing to do: messages are strings */
3763 break;
3764 case NETLINK_AUDIT:
3765 fd_trans_register(ret, &target_netlink_audit_trans);
3766 break;
3767 default:
3768 g_assert_not_reached();
3772 return ret;
3775 /* do_bind() Must return target values and target errnos. */
3776 static abi_long do_bind(int sockfd, abi_ulong target_addr,
3777 socklen_t addrlen)
3779 void *addr;
3780 abi_long ret;
3782 if ((int)addrlen < 0) {
3783 return -TARGET_EINVAL;
3786 addr = alloca(addrlen+1);
3788 ret = target_to_host_sockaddr(sockfd, addr, target_addr, addrlen);
3789 if (ret)
3790 return ret;
3792 return get_errno(bind(sockfd, addr, addrlen));
3795 /* do_connect() Must return target values and target errnos. */
3796 static abi_long do_connect(int sockfd, abi_ulong target_addr,
3797 socklen_t addrlen)
3799 void *addr;
3800 abi_long ret;
3802 if ((int)addrlen < 0) {
3803 return -TARGET_EINVAL;
3806 addr = alloca(addrlen+1);
3808 ret = target_to_host_sockaddr(sockfd, addr, target_addr, addrlen);
3809 if (ret)
3810 return ret;
3812 return get_errno(safe_connect(sockfd, addr, addrlen));
3815 /* do_sendrecvmsg_locked() Must return target values and target errnos. */
3816 static abi_long do_sendrecvmsg_locked(int fd, struct target_msghdr *msgp,
3817 int flags, int send)
3819 abi_long ret, len;
3820 struct msghdr msg;
3821 abi_ulong count;
3822 struct iovec *vec;
3823 abi_ulong target_vec;
3825 if (msgp->msg_name) {
3826 msg.msg_namelen = tswap32(msgp->msg_namelen);
3827 msg.msg_name = alloca(msg.msg_namelen+1);
3828 ret = target_to_host_sockaddr(fd, msg.msg_name,
3829 tswapal(msgp->msg_name),
3830 msg.msg_namelen);
3831 if (ret == -TARGET_EFAULT) {
3832 /* For connected sockets msg_name and msg_namelen must
3833 * be ignored, so returning EFAULT immediately is wrong.
3834 * Instead, pass a bad msg_name to the host kernel, and
3835 * let it decide whether to return EFAULT or not.
3837 msg.msg_name = (void *)-1;
3838 } else if (ret) {
3839 goto out2;
3841 } else {
3842 msg.msg_name = NULL;
3843 msg.msg_namelen = 0;
3845 msg.msg_controllen = 2 * tswapal(msgp->msg_controllen);
3846 msg.msg_control = alloca(msg.msg_controllen);
3847 msg.msg_flags = tswap32(msgp->msg_flags);
3849 count = tswapal(msgp->msg_iovlen);
3850 target_vec = tswapal(msgp->msg_iov);
3852 if (count > IOV_MAX) {
3853 /* sendrcvmsg returns a different errno for this condition than
3854 * readv/writev, so we must catch it here before lock_iovec() does.
3856 ret = -TARGET_EMSGSIZE;
3857 goto out2;
3860 vec = lock_iovec(send ? VERIFY_READ : VERIFY_WRITE,
3861 target_vec, count, send);
3862 if (vec == NULL) {
3863 ret = -host_to_target_errno(errno);
3864 goto out2;
3866 msg.msg_iovlen = count;
3867 msg.msg_iov = vec;
3869 if (send) {
3870 if (fd_trans_target_to_host_data(fd)) {
3871 void *host_msg;
3873 host_msg = g_malloc(msg.msg_iov->iov_len);
3874 memcpy(host_msg, msg.msg_iov->iov_base, msg.msg_iov->iov_len);
3875 ret = fd_trans_target_to_host_data(fd)(host_msg,
3876 msg.msg_iov->iov_len);
3877 if (ret >= 0) {
3878 msg.msg_iov->iov_base = host_msg;
3879 ret = get_errno(safe_sendmsg(fd, &msg, flags));
3881 g_free(host_msg);
3882 } else {
3883 ret = target_to_host_cmsg(&msg, msgp);
3884 if (ret == 0) {
3885 ret = get_errno(safe_sendmsg(fd, &msg, flags));
3888 } else {
3889 ret = get_errno(safe_recvmsg(fd, &msg, flags));
3890 if (!is_error(ret)) {
3891 len = ret;
3892 if (fd_trans_host_to_target_data(fd)) {
3893 ret = fd_trans_host_to_target_data(fd)(msg.msg_iov->iov_base,
3894 len);
3895 } else {
3896 ret = host_to_target_cmsg(msgp, &msg);
3898 if (!is_error(ret)) {
3899 msgp->msg_namelen = tswap32(msg.msg_namelen);
3900 if (msg.msg_name != NULL && msg.msg_name != (void *)-1) {
3901 ret = host_to_target_sockaddr(tswapal(msgp->msg_name),
3902 msg.msg_name, msg.msg_namelen);
3903 if (ret) {
3904 goto out;
3908 ret = len;
3913 out:
3914 unlock_iovec(vec, target_vec, count, !send);
3915 out2:
3916 return ret;
3919 static abi_long do_sendrecvmsg(int fd, abi_ulong target_msg,
3920 int flags, int send)
3922 abi_long ret;
3923 struct target_msghdr *msgp;
3925 if (!lock_user_struct(send ? VERIFY_READ : VERIFY_WRITE,
3926 msgp,
3927 target_msg,
3928 send ? 1 : 0)) {
3929 return -TARGET_EFAULT;
3931 ret = do_sendrecvmsg_locked(fd, msgp, flags, send);
3932 unlock_user_struct(msgp, target_msg, send ? 0 : 1);
3933 return ret;
3936 /* We don't rely on the C library to have sendmmsg/recvmmsg support,
3937 * so it might not have this *mmsg-specific flag either.
3939 #ifndef MSG_WAITFORONE
3940 #define MSG_WAITFORONE 0x10000
3941 #endif
3943 static abi_long do_sendrecvmmsg(int fd, abi_ulong target_msgvec,
3944 unsigned int vlen, unsigned int flags,
3945 int send)
3947 struct target_mmsghdr *mmsgp;
3948 abi_long ret = 0;
3949 int i;
3951 if (vlen > UIO_MAXIOV) {
3952 vlen = UIO_MAXIOV;
3955 mmsgp = lock_user(VERIFY_WRITE, target_msgvec, sizeof(*mmsgp) * vlen, 1);
3956 if (!mmsgp) {
3957 return -TARGET_EFAULT;
3960 for (i = 0; i < vlen; i++) {
3961 ret = do_sendrecvmsg_locked(fd, &mmsgp[i].msg_hdr, flags, send);
3962 if (is_error(ret)) {
3963 break;
3965 mmsgp[i].msg_len = tswap32(ret);
3966 /* MSG_WAITFORONE turns on MSG_DONTWAIT after one packet */
3967 if (flags & MSG_WAITFORONE) {
3968 flags |= MSG_DONTWAIT;
3972 unlock_user(mmsgp, target_msgvec, sizeof(*mmsgp) * i);
3974 /* Return number of datagrams sent if we sent any at all;
3975 * otherwise return the error.
3977 if (i) {
3978 return i;
3980 return ret;
3983 /* do_accept4() Must return target values and target errnos. */
3984 static abi_long do_accept4(int fd, abi_ulong target_addr,
3985 abi_ulong target_addrlen_addr, int flags)
3987 socklen_t addrlen;
3988 void *addr;
3989 abi_long ret;
3990 int host_flags;
3992 host_flags = target_to_host_bitmask(flags, fcntl_flags_tbl);
3994 if (target_addr == 0) {
3995 return get_errno(safe_accept4(fd, NULL, NULL, host_flags));
3998 /* linux returns EINVAL if addrlen pointer is invalid */
3999 if (get_user_u32(addrlen, target_addrlen_addr))
4000 return -TARGET_EINVAL;
4002 if ((int)addrlen < 0) {
4003 return -TARGET_EINVAL;
4006 if (!access_ok(VERIFY_WRITE, target_addr, addrlen))
4007 return -TARGET_EINVAL;
4009 addr = alloca(addrlen);
4011 ret = get_errno(safe_accept4(fd, addr, &addrlen, host_flags));
4012 if (!is_error(ret)) {
4013 host_to_target_sockaddr(target_addr, addr, addrlen);
4014 if (put_user_u32(addrlen, target_addrlen_addr))
4015 ret = -TARGET_EFAULT;
4017 return ret;
4020 /* do_getpeername() Must return target values and target errnos. */
4021 static abi_long do_getpeername(int fd, abi_ulong target_addr,
4022 abi_ulong target_addrlen_addr)
4024 socklen_t addrlen;
4025 void *addr;
4026 abi_long ret;
4028 if (get_user_u32(addrlen, target_addrlen_addr))
4029 return -TARGET_EFAULT;
4031 if ((int)addrlen < 0) {
4032 return -TARGET_EINVAL;
4035 if (!access_ok(VERIFY_WRITE, target_addr, addrlen))
4036 return -TARGET_EFAULT;
4038 addr = alloca(addrlen);
4040 ret = get_errno(getpeername(fd, addr, &addrlen));
4041 if (!is_error(ret)) {
4042 host_to_target_sockaddr(target_addr, addr, addrlen);
4043 if (put_user_u32(addrlen, target_addrlen_addr))
4044 ret = -TARGET_EFAULT;
4046 return ret;
4049 /* do_getsockname() Must return target values and target errnos. */
4050 static abi_long do_getsockname(int fd, abi_ulong target_addr,
4051 abi_ulong target_addrlen_addr)
4053 socklen_t addrlen;
4054 void *addr;
4055 abi_long ret;
4057 if (get_user_u32(addrlen, target_addrlen_addr))
4058 return -TARGET_EFAULT;
4060 if ((int)addrlen < 0) {
4061 return -TARGET_EINVAL;
4064 if (!access_ok(VERIFY_WRITE, target_addr, addrlen))
4065 return -TARGET_EFAULT;
4067 addr = alloca(addrlen);
4069 ret = get_errno(getsockname(fd, addr, &addrlen));
4070 if (!is_error(ret)) {
4071 host_to_target_sockaddr(target_addr, addr, addrlen);
4072 if (put_user_u32(addrlen, target_addrlen_addr))
4073 ret = -TARGET_EFAULT;
4075 return ret;
4078 /* do_socketpair() Must return target values and target errnos. */
4079 static abi_long do_socketpair(int domain, int type, int protocol,
4080 abi_ulong target_tab_addr)
4082 int tab[2];
4083 abi_long ret;
4085 target_to_host_sock_type(&type);
4087 ret = get_errno(socketpair(domain, type, protocol, tab));
4088 if (!is_error(ret)) {
4089 if (put_user_s32(tab[0], target_tab_addr)
4090 || put_user_s32(tab[1], target_tab_addr + sizeof(tab[0])))
4091 ret = -TARGET_EFAULT;
4093 return ret;
4096 /* do_sendto() Must return target values and target errnos. */
4097 static abi_long do_sendto(int fd, abi_ulong msg, size_t len, int flags,
4098 abi_ulong target_addr, socklen_t addrlen)
4100 void *addr;
4101 void *host_msg;
4102 void *copy_msg = NULL;
4103 abi_long ret;
4105 if ((int)addrlen < 0) {
4106 return -TARGET_EINVAL;
4109 host_msg = lock_user(VERIFY_READ, msg, len, 1);
4110 if (!host_msg)
4111 return -TARGET_EFAULT;
4112 if (fd_trans_target_to_host_data(fd)) {
4113 copy_msg = host_msg;
4114 host_msg = g_malloc(len);
4115 memcpy(host_msg, copy_msg, len);
4116 ret = fd_trans_target_to_host_data(fd)(host_msg, len);
4117 if (ret < 0) {
4118 goto fail;
4121 if (target_addr) {
4122 addr = alloca(addrlen+1);
4123 ret = target_to_host_sockaddr(fd, addr, target_addr, addrlen);
4124 if (ret) {
4125 goto fail;
4127 ret = get_errno(safe_sendto(fd, host_msg, len, flags, addr, addrlen));
4128 } else {
4129 ret = get_errno(safe_sendto(fd, host_msg, len, flags, NULL, 0));
4131 fail:
4132 if (copy_msg) {
4133 g_free(host_msg);
4134 host_msg = copy_msg;
4136 unlock_user(host_msg, msg, 0);
4137 return ret;
4140 /* do_recvfrom() Must return target values and target errnos. */
4141 static abi_long do_recvfrom(int fd, abi_ulong msg, size_t len, int flags,
4142 abi_ulong target_addr,
4143 abi_ulong target_addrlen)
4145 socklen_t addrlen;
4146 void *addr;
4147 void *host_msg;
4148 abi_long ret;
4150 host_msg = lock_user(VERIFY_WRITE, msg, len, 0);
4151 if (!host_msg)
4152 return -TARGET_EFAULT;
4153 if (target_addr) {
4154 if (get_user_u32(addrlen, target_addrlen)) {
4155 ret = -TARGET_EFAULT;
4156 goto fail;
4158 if ((int)addrlen < 0) {
4159 ret = -TARGET_EINVAL;
4160 goto fail;
4162 addr = alloca(addrlen);
4163 ret = get_errno(safe_recvfrom(fd, host_msg, len, flags,
4164 addr, &addrlen));
4165 } else {
4166 addr = NULL; /* To keep compiler quiet. */
4167 ret = get_errno(safe_recvfrom(fd, host_msg, len, flags, NULL, 0));
4169 if (!is_error(ret)) {
4170 if (fd_trans_host_to_target_data(fd)) {
4171 ret = fd_trans_host_to_target_data(fd)(host_msg, ret);
4173 if (target_addr) {
4174 host_to_target_sockaddr(target_addr, addr, addrlen);
4175 if (put_user_u32(addrlen, target_addrlen)) {
4176 ret = -TARGET_EFAULT;
4177 goto fail;
4180 unlock_user(host_msg, msg, len);
4181 } else {
4182 fail:
4183 unlock_user(host_msg, msg, 0);
4185 return ret;
4188 #ifdef TARGET_NR_socketcall
4189 /* do_socketcall() must return target values and target errnos. */
4190 static abi_long do_socketcall(int num, abi_ulong vptr)
4192 static const unsigned nargs[] = { /* number of arguments per operation */
4193 [TARGET_SYS_SOCKET] = 3, /* domain, type, protocol */
4194 [TARGET_SYS_BIND] = 3, /* fd, addr, addrlen */
4195 [TARGET_SYS_CONNECT] = 3, /* fd, addr, addrlen */
4196 [TARGET_SYS_LISTEN] = 2, /* fd, backlog */
4197 [TARGET_SYS_ACCEPT] = 3, /* fd, addr, addrlen */
4198 [TARGET_SYS_GETSOCKNAME] = 3, /* fd, addr, addrlen */
4199 [TARGET_SYS_GETPEERNAME] = 3, /* fd, addr, addrlen */
4200 [TARGET_SYS_SOCKETPAIR] = 4, /* domain, type, protocol, tab */
4201 [TARGET_SYS_SEND] = 4, /* fd, msg, len, flags */
4202 [TARGET_SYS_RECV] = 4, /* fd, msg, len, flags */
4203 [TARGET_SYS_SENDTO] = 6, /* fd, msg, len, flags, addr, addrlen */
4204 [TARGET_SYS_RECVFROM] = 6, /* fd, msg, len, flags, addr, addrlen */
4205 [TARGET_SYS_SHUTDOWN] = 2, /* fd, how */
4206 [TARGET_SYS_SETSOCKOPT] = 5, /* fd, level, optname, optval, optlen */
4207 [TARGET_SYS_GETSOCKOPT] = 5, /* fd, level, optname, optval, optlen */
4208 [TARGET_SYS_SENDMSG] = 3, /* fd, msg, flags */
4209 [TARGET_SYS_RECVMSG] = 3, /* fd, msg, flags */
4210 [TARGET_SYS_ACCEPT4] = 4, /* fd, addr, addrlen, flags */
4211 [TARGET_SYS_RECVMMSG] = 4, /* fd, msgvec, vlen, flags */
4212 [TARGET_SYS_SENDMMSG] = 4, /* fd, msgvec, vlen, flags */
4214 abi_long a[6]; /* max 6 args */
4215 unsigned i;
4217 /* check the range of the first argument num */
4218 /* (TARGET_SYS_SENDMMSG is the highest among TARGET_SYS_xxx) */
4219 if (num < 1 || num > TARGET_SYS_SENDMMSG) {
4220 return -TARGET_EINVAL;
4222 /* ensure we have space for args */
4223 if (nargs[num] > ARRAY_SIZE(a)) {
4224 return -TARGET_EINVAL;
4226 /* collect the arguments in a[] according to nargs[] */
4227 for (i = 0; i < nargs[num]; ++i) {
4228 if (get_user_ual(a[i], vptr + i * sizeof(abi_long)) != 0) {
4229 return -TARGET_EFAULT;
4232 /* now when we have the args, invoke the appropriate underlying function */
4233 switch (num) {
4234 case TARGET_SYS_SOCKET: /* domain, type, protocol */
4235 return do_socket(a[0], a[1], a[2]);
4236 case TARGET_SYS_BIND: /* sockfd, addr, addrlen */
4237 return do_bind(a[0], a[1], a[2]);
4238 case TARGET_SYS_CONNECT: /* sockfd, addr, addrlen */
4239 return do_connect(a[0], a[1], a[2]);
4240 case TARGET_SYS_LISTEN: /* sockfd, backlog */
4241 return get_errno(listen(a[0], a[1]));
4242 case TARGET_SYS_ACCEPT: /* sockfd, addr, addrlen */
4243 return do_accept4(a[0], a[1], a[2], 0);
4244 case TARGET_SYS_GETSOCKNAME: /* sockfd, addr, addrlen */
4245 return do_getsockname(a[0], a[1], a[2]);
4246 case TARGET_SYS_GETPEERNAME: /* sockfd, addr, addrlen */
4247 return do_getpeername(a[0], a[1], a[2]);
4248 case TARGET_SYS_SOCKETPAIR: /* domain, type, protocol, tab */
4249 return do_socketpair(a[0], a[1], a[2], a[3]);
4250 case TARGET_SYS_SEND: /* sockfd, msg, len, flags */
4251 return do_sendto(a[0], a[1], a[2], a[3], 0, 0);
4252 case TARGET_SYS_RECV: /* sockfd, msg, len, flags */
4253 return do_recvfrom(a[0], a[1], a[2], a[3], 0, 0);
4254 case TARGET_SYS_SENDTO: /* sockfd, msg, len, flags, addr, addrlen */
4255 return do_sendto(a[0], a[1], a[2], a[3], a[4], a[5]);
4256 case TARGET_SYS_RECVFROM: /* sockfd, msg, len, flags, addr, addrlen */
4257 return do_recvfrom(a[0], a[1], a[2], a[3], a[4], a[5]);
4258 case TARGET_SYS_SHUTDOWN: /* sockfd, how */
4259 return get_errno(shutdown(a[0], a[1]));
4260 case TARGET_SYS_SETSOCKOPT: /* sockfd, level, optname, optval, optlen */
4261 return do_setsockopt(a[0], a[1], a[2], a[3], a[4]);
4262 case TARGET_SYS_GETSOCKOPT: /* sockfd, level, optname, optval, optlen */
4263 return do_getsockopt(a[0], a[1], a[2], a[3], a[4]);
4264 case TARGET_SYS_SENDMSG: /* sockfd, msg, flags */
4265 return do_sendrecvmsg(a[0], a[1], a[2], 1);
4266 case TARGET_SYS_RECVMSG: /* sockfd, msg, flags */
4267 return do_sendrecvmsg(a[0], a[1], a[2], 0);
4268 case TARGET_SYS_ACCEPT4: /* sockfd, addr, addrlen, flags */
4269 return do_accept4(a[0], a[1], a[2], a[3]);
4270 case TARGET_SYS_RECVMMSG: /* sockfd, msgvec, vlen, flags */
4271 return do_sendrecvmmsg(a[0], a[1], a[2], a[3], 0);
4272 case TARGET_SYS_SENDMMSG: /* sockfd, msgvec, vlen, flags */
4273 return do_sendrecvmmsg(a[0], a[1], a[2], a[3], 1);
4274 default:
4275 gemu_log("Unsupported socketcall: %d\n", num);
4276 return -TARGET_EINVAL;
4279 #endif
4281 #define N_SHM_REGIONS 32
4283 static struct shm_region {
4284 abi_ulong start;
4285 abi_ulong size;
4286 bool in_use;
4287 } shm_regions[N_SHM_REGIONS];
4289 #ifndef TARGET_SEMID64_DS
4290 /* asm-generic version of this struct */
4291 struct target_semid64_ds
4293 struct target_ipc_perm sem_perm;
4294 abi_ulong sem_otime;
4295 #if TARGET_ABI_BITS == 32
4296 abi_ulong __unused1;
4297 #endif
4298 abi_ulong sem_ctime;
4299 #if TARGET_ABI_BITS == 32
4300 abi_ulong __unused2;
4301 #endif
4302 abi_ulong sem_nsems;
4303 abi_ulong __unused3;
4304 abi_ulong __unused4;
4306 #endif
4308 static inline abi_long target_to_host_ipc_perm(struct ipc_perm *host_ip,
4309 abi_ulong target_addr)
4311 struct target_ipc_perm *target_ip;
4312 struct target_semid64_ds *target_sd;
4314 if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
4315 return -TARGET_EFAULT;
4316 target_ip = &(target_sd->sem_perm);
4317 host_ip->__key = tswap32(target_ip->__key);
4318 host_ip->uid = tswap32(target_ip->uid);
4319 host_ip->gid = tswap32(target_ip->gid);
4320 host_ip->cuid = tswap32(target_ip->cuid);
4321 host_ip->cgid = tswap32(target_ip->cgid);
4322 #if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_PPC)
4323 host_ip->mode = tswap32(target_ip->mode);
4324 #else
4325 host_ip->mode = tswap16(target_ip->mode);
4326 #endif
4327 #if defined(TARGET_PPC)
4328 host_ip->__seq = tswap32(target_ip->__seq);
4329 #else
4330 host_ip->__seq = tswap16(target_ip->__seq);
4331 #endif
4332 unlock_user_struct(target_sd, target_addr, 0);
4333 return 0;
4336 static inline abi_long host_to_target_ipc_perm(abi_ulong target_addr,
4337 struct ipc_perm *host_ip)
4339 struct target_ipc_perm *target_ip;
4340 struct target_semid64_ds *target_sd;
4342 if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
4343 return -TARGET_EFAULT;
4344 target_ip = &(target_sd->sem_perm);
4345 target_ip->__key = tswap32(host_ip->__key);
4346 target_ip->uid = tswap32(host_ip->uid);
4347 target_ip->gid = tswap32(host_ip->gid);
4348 target_ip->cuid = tswap32(host_ip->cuid);
4349 target_ip->cgid = tswap32(host_ip->cgid);
4350 #if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_PPC)
4351 target_ip->mode = tswap32(host_ip->mode);
4352 #else
4353 target_ip->mode = tswap16(host_ip->mode);
4354 #endif
4355 #if defined(TARGET_PPC)
4356 target_ip->__seq = tswap32(host_ip->__seq);
4357 #else
4358 target_ip->__seq = tswap16(host_ip->__seq);
4359 #endif
4360 unlock_user_struct(target_sd, target_addr, 1);
4361 return 0;
4364 static inline abi_long target_to_host_semid_ds(struct semid_ds *host_sd,
4365 abi_ulong target_addr)
4367 struct target_semid64_ds *target_sd;
4369 if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
4370 return -TARGET_EFAULT;
4371 if (target_to_host_ipc_perm(&(host_sd->sem_perm),target_addr))
4372 return -TARGET_EFAULT;
4373 host_sd->sem_nsems = tswapal(target_sd->sem_nsems);
4374 host_sd->sem_otime = tswapal(target_sd->sem_otime);
4375 host_sd->sem_ctime = tswapal(target_sd->sem_ctime);
4376 unlock_user_struct(target_sd, target_addr, 0);
4377 return 0;
4380 static inline abi_long host_to_target_semid_ds(abi_ulong target_addr,
4381 struct semid_ds *host_sd)
4383 struct target_semid64_ds *target_sd;
4385 if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
4386 return -TARGET_EFAULT;
4387 if (host_to_target_ipc_perm(target_addr,&(host_sd->sem_perm)))
4388 return -TARGET_EFAULT;
4389 target_sd->sem_nsems = tswapal(host_sd->sem_nsems);
4390 target_sd->sem_otime = tswapal(host_sd->sem_otime);
4391 target_sd->sem_ctime = tswapal(host_sd->sem_ctime);
4392 unlock_user_struct(target_sd, target_addr, 1);
4393 return 0;
4396 struct target_seminfo {
4397 int semmap;
4398 int semmni;
4399 int semmns;
4400 int semmnu;
4401 int semmsl;
4402 int semopm;
4403 int semume;
4404 int semusz;
4405 int semvmx;
4406 int semaem;
4409 static inline abi_long host_to_target_seminfo(abi_ulong target_addr,
4410 struct seminfo *host_seminfo)
4412 struct target_seminfo *target_seminfo;
4413 if (!lock_user_struct(VERIFY_WRITE, target_seminfo, target_addr, 0))
4414 return -TARGET_EFAULT;
4415 __put_user(host_seminfo->semmap, &target_seminfo->semmap);
4416 __put_user(host_seminfo->semmni, &target_seminfo->semmni);
4417 __put_user(host_seminfo->semmns, &target_seminfo->semmns);
4418 __put_user(host_seminfo->semmnu, &target_seminfo->semmnu);
4419 __put_user(host_seminfo->semmsl, &target_seminfo->semmsl);
4420 __put_user(host_seminfo->semopm, &target_seminfo->semopm);
4421 __put_user(host_seminfo->semume, &target_seminfo->semume);
4422 __put_user(host_seminfo->semusz, &target_seminfo->semusz);
4423 __put_user(host_seminfo->semvmx, &target_seminfo->semvmx);
4424 __put_user(host_seminfo->semaem, &target_seminfo->semaem);
4425 unlock_user_struct(target_seminfo, target_addr, 1);
4426 return 0;
4429 union semun {
4430 int val;
4431 struct semid_ds *buf;
4432 unsigned short *array;
4433 struct seminfo *__buf;
4436 union target_semun {
4437 int val;
4438 abi_ulong buf;
4439 abi_ulong array;
4440 abi_ulong __buf;
4443 static inline abi_long target_to_host_semarray(int semid, unsigned short **host_array,
4444 abi_ulong target_addr)
4446 int nsems;
4447 unsigned short *array;
4448 union semun semun;
4449 struct semid_ds semid_ds;
4450 int i, ret;
4452 semun.buf = &semid_ds;
4454 ret = semctl(semid, 0, IPC_STAT, semun);
4455 if (ret == -1)
4456 return get_errno(ret);
4458 nsems = semid_ds.sem_nsems;
4460 *host_array = g_try_new(unsigned short, nsems);
4461 if (!*host_array) {
4462 return -TARGET_ENOMEM;
4464 array = lock_user(VERIFY_READ, target_addr,
4465 nsems*sizeof(unsigned short), 1);
4466 if (!array) {
4467 g_free(*host_array);
4468 return -TARGET_EFAULT;
4471 for(i=0; i<nsems; i++) {
4472 __get_user((*host_array)[i], &array[i]);
4474 unlock_user(array, target_addr, 0);
4476 return 0;
4479 static inline abi_long host_to_target_semarray(int semid, abi_ulong target_addr,
4480 unsigned short **host_array)
4482 int nsems;
4483 unsigned short *array;
4484 union semun semun;
4485 struct semid_ds semid_ds;
4486 int i, ret;
4488 semun.buf = &semid_ds;
4490 ret = semctl(semid, 0, IPC_STAT, semun);
4491 if (ret == -1)
4492 return get_errno(ret);
4494 nsems = semid_ds.sem_nsems;
4496 array = lock_user(VERIFY_WRITE, target_addr,
4497 nsems*sizeof(unsigned short), 0);
4498 if (!array)
4499 return -TARGET_EFAULT;
4501 for(i=0; i<nsems; i++) {
4502 __put_user((*host_array)[i], &array[i]);
4504 g_free(*host_array);
4505 unlock_user(array, target_addr, 1);
4507 return 0;
4510 static inline abi_long do_semctl(int semid, int semnum, int cmd,
4511 abi_ulong target_arg)
4513 union target_semun target_su = { .buf = target_arg };
4514 union semun arg;
4515 struct semid_ds dsarg;
4516 unsigned short *array = NULL;
4517 struct seminfo seminfo;
4518 abi_long ret = -TARGET_EINVAL;
4519 abi_long err;
4520 cmd &= 0xff;
4522 switch( cmd ) {
4523 case GETVAL:
4524 case SETVAL:
4525 /* In 64 bit cross-endian situations, we will erroneously pick up
4526 * the wrong half of the union for the "val" element. To rectify
4527 * this, the entire 8-byte structure is byteswapped, followed by
4528 * a swap of the 4 byte val field. In other cases, the data is
4529 * already in proper host byte order. */
4530 if (sizeof(target_su.val) != (sizeof(target_su.buf))) {
4531 target_su.buf = tswapal(target_su.buf);
4532 arg.val = tswap32(target_su.val);
4533 } else {
4534 arg.val = target_su.val;
4536 ret = get_errno(semctl(semid, semnum, cmd, arg));
4537 break;
4538 case GETALL:
4539 case SETALL:
4540 err = target_to_host_semarray(semid, &array, target_su.array);
4541 if (err)
4542 return err;
4543 arg.array = array;
4544 ret = get_errno(semctl(semid, semnum, cmd, arg));
4545 err = host_to_target_semarray(semid, target_su.array, &array);
4546 if (err)
4547 return err;
4548 break;
4549 case IPC_STAT:
4550 case IPC_SET:
4551 case SEM_STAT:
4552 err = target_to_host_semid_ds(&dsarg, target_su.buf);
4553 if (err)
4554 return err;
4555 arg.buf = &dsarg;
4556 ret = get_errno(semctl(semid, semnum, cmd, arg));
4557 err = host_to_target_semid_ds(target_su.buf, &dsarg);
4558 if (err)
4559 return err;
4560 break;
4561 case IPC_INFO:
4562 case SEM_INFO:
4563 arg.__buf = &seminfo;
4564 ret = get_errno(semctl(semid, semnum, cmd, arg));
4565 err = host_to_target_seminfo(target_su.__buf, &seminfo);
4566 if (err)
4567 return err;
4568 break;
4569 case IPC_RMID:
4570 case GETPID:
4571 case GETNCNT:
4572 case GETZCNT:
4573 ret = get_errno(semctl(semid, semnum, cmd, NULL));
4574 break;
4577 return ret;
4580 struct target_sembuf {
4581 unsigned short sem_num;
4582 short sem_op;
4583 short sem_flg;
4586 static inline abi_long target_to_host_sembuf(struct sembuf *host_sembuf,
4587 abi_ulong target_addr,
4588 unsigned nsops)
4590 struct target_sembuf *target_sembuf;
4591 int i;
4593 target_sembuf = lock_user(VERIFY_READ, target_addr,
4594 nsops*sizeof(struct target_sembuf), 1);
4595 if (!target_sembuf)
4596 return -TARGET_EFAULT;
4598 for(i=0; i<nsops; i++) {
4599 __get_user(host_sembuf[i].sem_num, &target_sembuf[i].sem_num);
4600 __get_user(host_sembuf[i].sem_op, &target_sembuf[i].sem_op);
4601 __get_user(host_sembuf[i].sem_flg, &target_sembuf[i].sem_flg);
4604 unlock_user(target_sembuf, target_addr, 0);
4606 return 0;
4609 static inline abi_long do_semop(int semid, abi_long ptr, unsigned nsops)
4611 struct sembuf sops[nsops];
4613 if (target_to_host_sembuf(sops, ptr, nsops))
4614 return -TARGET_EFAULT;
4616 return get_errno(safe_semtimedop(semid, sops, nsops, NULL));
4619 struct target_msqid_ds
4621 struct target_ipc_perm msg_perm;
4622 abi_ulong msg_stime;
4623 #if TARGET_ABI_BITS == 32
4624 abi_ulong __unused1;
4625 #endif
4626 abi_ulong msg_rtime;
4627 #if TARGET_ABI_BITS == 32
4628 abi_ulong __unused2;
4629 #endif
4630 abi_ulong msg_ctime;
4631 #if TARGET_ABI_BITS == 32
4632 abi_ulong __unused3;
4633 #endif
4634 abi_ulong __msg_cbytes;
4635 abi_ulong msg_qnum;
4636 abi_ulong msg_qbytes;
4637 abi_ulong msg_lspid;
4638 abi_ulong msg_lrpid;
4639 abi_ulong __unused4;
4640 abi_ulong __unused5;
4643 static inline abi_long target_to_host_msqid_ds(struct msqid_ds *host_md,
4644 abi_ulong target_addr)
4646 struct target_msqid_ds *target_md;
4648 if (!lock_user_struct(VERIFY_READ, target_md, target_addr, 1))
4649 return -TARGET_EFAULT;
4650 if (target_to_host_ipc_perm(&(host_md->msg_perm),target_addr))
4651 return -TARGET_EFAULT;
4652 host_md->msg_stime = tswapal(target_md->msg_stime);
4653 host_md->msg_rtime = tswapal(target_md->msg_rtime);
4654 host_md->msg_ctime = tswapal(target_md->msg_ctime);
4655 host_md->__msg_cbytes = tswapal(target_md->__msg_cbytes);
4656 host_md->msg_qnum = tswapal(target_md->msg_qnum);
4657 host_md->msg_qbytes = tswapal(target_md->msg_qbytes);
4658 host_md->msg_lspid = tswapal(target_md->msg_lspid);
4659 host_md->msg_lrpid = tswapal(target_md->msg_lrpid);
4660 unlock_user_struct(target_md, target_addr, 0);
4661 return 0;
4664 static inline abi_long host_to_target_msqid_ds(abi_ulong target_addr,
4665 struct msqid_ds *host_md)
4667 struct target_msqid_ds *target_md;
4669 if (!lock_user_struct(VERIFY_WRITE, target_md, target_addr, 0))
4670 return -TARGET_EFAULT;
4671 if (host_to_target_ipc_perm(target_addr,&(host_md->msg_perm)))
4672 return -TARGET_EFAULT;
4673 target_md->msg_stime = tswapal(host_md->msg_stime);
4674 target_md->msg_rtime = tswapal(host_md->msg_rtime);
4675 target_md->msg_ctime = tswapal(host_md->msg_ctime);
4676 target_md->__msg_cbytes = tswapal(host_md->__msg_cbytes);
4677 target_md->msg_qnum = tswapal(host_md->msg_qnum);
4678 target_md->msg_qbytes = tswapal(host_md->msg_qbytes);
4679 target_md->msg_lspid = tswapal(host_md->msg_lspid);
4680 target_md->msg_lrpid = tswapal(host_md->msg_lrpid);
4681 unlock_user_struct(target_md, target_addr, 1);
4682 return 0;
4685 struct target_msginfo {
4686 int msgpool;
4687 int msgmap;
4688 int msgmax;
4689 int msgmnb;
4690 int msgmni;
4691 int msgssz;
4692 int msgtql;
4693 unsigned short int msgseg;
4696 static inline abi_long host_to_target_msginfo(abi_ulong target_addr,
4697 struct msginfo *host_msginfo)
4699 struct target_msginfo *target_msginfo;
4700 if (!lock_user_struct(VERIFY_WRITE, target_msginfo, target_addr, 0))
4701 return -TARGET_EFAULT;
4702 __put_user(host_msginfo->msgpool, &target_msginfo->msgpool);
4703 __put_user(host_msginfo->msgmap, &target_msginfo->msgmap);
4704 __put_user(host_msginfo->msgmax, &target_msginfo->msgmax);
4705 __put_user(host_msginfo->msgmnb, &target_msginfo->msgmnb);
4706 __put_user(host_msginfo->msgmni, &target_msginfo->msgmni);
4707 __put_user(host_msginfo->msgssz, &target_msginfo->msgssz);
4708 __put_user(host_msginfo->msgtql, &target_msginfo->msgtql);
4709 __put_user(host_msginfo->msgseg, &target_msginfo->msgseg);
4710 unlock_user_struct(target_msginfo, target_addr, 1);
4711 return 0;
4714 static inline abi_long do_msgctl(int msgid, int cmd, abi_long ptr)
4716 struct msqid_ds dsarg;
4717 struct msginfo msginfo;
4718 abi_long ret = -TARGET_EINVAL;
4720 cmd &= 0xff;
4722 switch (cmd) {
4723 case IPC_STAT:
4724 case IPC_SET:
4725 case MSG_STAT:
4726 if (target_to_host_msqid_ds(&dsarg,ptr))
4727 return -TARGET_EFAULT;
4728 ret = get_errno(msgctl(msgid, cmd, &dsarg));
4729 if (host_to_target_msqid_ds(ptr,&dsarg))
4730 return -TARGET_EFAULT;
4731 break;
4732 case IPC_RMID:
4733 ret = get_errno(msgctl(msgid, cmd, NULL));
4734 break;
4735 case IPC_INFO:
4736 case MSG_INFO:
4737 ret = get_errno(msgctl(msgid, cmd, (struct msqid_ds *)&msginfo));
4738 if (host_to_target_msginfo(ptr, &msginfo))
4739 return -TARGET_EFAULT;
4740 break;
4743 return ret;
4746 struct target_msgbuf {
4747 abi_long mtype;
4748 char mtext[1];
4751 static inline abi_long do_msgsnd(int msqid, abi_long msgp,
4752 ssize_t msgsz, int msgflg)
4754 struct target_msgbuf *target_mb;
4755 struct msgbuf *host_mb;
4756 abi_long ret = 0;
4758 if (msgsz < 0) {
4759 return -TARGET_EINVAL;
4762 if (!lock_user_struct(VERIFY_READ, target_mb, msgp, 0))
4763 return -TARGET_EFAULT;
4764 host_mb = g_try_malloc(msgsz + sizeof(long));
4765 if (!host_mb) {
4766 unlock_user_struct(target_mb, msgp, 0);
4767 return -TARGET_ENOMEM;
4769 host_mb->mtype = (abi_long) tswapal(target_mb->mtype);
4770 memcpy(host_mb->mtext, target_mb->mtext, msgsz);
4771 ret = get_errno(safe_msgsnd(msqid, host_mb, msgsz, msgflg));
4772 g_free(host_mb);
4773 unlock_user_struct(target_mb, msgp, 0);
4775 return ret;
4778 static inline abi_long do_msgrcv(int msqid, abi_long msgp,
4779 ssize_t msgsz, abi_long msgtyp,
4780 int msgflg)
4782 struct target_msgbuf *target_mb;
4783 char *target_mtext;
4784 struct msgbuf *host_mb;
4785 abi_long ret = 0;
4787 if (msgsz < 0) {
4788 return -TARGET_EINVAL;
4791 if (!lock_user_struct(VERIFY_WRITE, target_mb, msgp, 0))
4792 return -TARGET_EFAULT;
4794 host_mb = g_try_malloc(msgsz + sizeof(long));
4795 if (!host_mb) {
4796 ret = -TARGET_ENOMEM;
4797 goto end;
4799 ret = get_errno(safe_msgrcv(msqid, host_mb, msgsz, msgtyp, msgflg));
4801 if (ret > 0) {
4802 abi_ulong target_mtext_addr = msgp + sizeof(abi_ulong);
4803 target_mtext = lock_user(VERIFY_WRITE, target_mtext_addr, ret, 0);
4804 if (!target_mtext) {
4805 ret = -TARGET_EFAULT;
4806 goto end;
4808 memcpy(target_mb->mtext, host_mb->mtext, ret);
4809 unlock_user(target_mtext, target_mtext_addr, ret);
4812 target_mb->mtype = tswapal(host_mb->mtype);
4814 end:
4815 if (target_mb)
4816 unlock_user_struct(target_mb, msgp, 1);
4817 g_free(host_mb);
4818 return ret;
4821 static inline abi_long target_to_host_shmid_ds(struct shmid_ds *host_sd,
4822 abi_ulong target_addr)
4824 struct target_shmid_ds *target_sd;
4826 if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
4827 return -TARGET_EFAULT;
4828 if (target_to_host_ipc_perm(&(host_sd->shm_perm), target_addr))
4829 return -TARGET_EFAULT;
4830 __get_user(host_sd->shm_segsz, &target_sd->shm_segsz);
4831 __get_user(host_sd->shm_atime, &target_sd->shm_atime);
4832 __get_user(host_sd->shm_dtime, &target_sd->shm_dtime);
4833 __get_user(host_sd->shm_ctime, &target_sd->shm_ctime);
4834 __get_user(host_sd->shm_cpid, &target_sd->shm_cpid);
4835 __get_user(host_sd->shm_lpid, &target_sd->shm_lpid);
4836 __get_user(host_sd->shm_nattch, &target_sd->shm_nattch);
4837 unlock_user_struct(target_sd, target_addr, 0);
4838 return 0;
4841 static inline abi_long host_to_target_shmid_ds(abi_ulong target_addr,
4842 struct shmid_ds *host_sd)
4844 struct target_shmid_ds *target_sd;
4846 if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
4847 return -TARGET_EFAULT;
4848 if (host_to_target_ipc_perm(target_addr, &(host_sd->shm_perm)))
4849 return -TARGET_EFAULT;
4850 __put_user(host_sd->shm_segsz, &target_sd->shm_segsz);
4851 __put_user(host_sd->shm_atime, &target_sd->shm_atime);
4852 __put_user(host_sd->shm_dtime, &target_sd->shm_dtime);
4853 __put_user(host_sd->shm_ctime, &target_sd->shm_ctime);
4854 __put_user(host_sd->shm_cpid, &target_sd->shm_cpid);
4855 __put_user(host_sd->shm_lpid, &target_sd->shm_lpid);
4856 __put_user(host_sd->shm_nattch, &target_sd->shm_nattch);
4857 unlock_user_struct(target_sd, target_addr, 1);
4858 return 0;
4861 struct target_shminfo {
4862 abi_ulong shmmax;
4863 abi_ulong shmmin;
4864 abi_ulong shmmni;
4865 abi_ulong shmseg;
4866 abi_ulong shmall;
4869 static inline abi_long host_to_target_shminfo(abi_ulong target_addr,
4870 struct shminfo *host_shminfo)
4872 struct target_shminfo *target_shminfo;
4873 if (!lock_user_struct(VERIFY_WRITE, target_shminfo, target_addr, 0))
4874 return -TARGET_EFAULT;
4875 __put_user(host_shminfo->shmmax, &target_shminfo->shmmax);
4876 __put_user(host_shminfo->shmmin, &target_shminfo->shmmin);
4877 __put_user(host_shminfo->shmmni, &target_shminfo->shmmni);
4878 __put_user(host_shminfo->shmseg, &target_shminfo->shmseg);
4879 __put_user(host_shminfo->shmall, &target_shminfo->shmall);
4880 unlock_user_struct(target_shminfo, target_addr, 1);
4881 return 0;
4884 struct target_shm_info {
4885 int used_ids;
4886 abi_ulong shm_tot;
4887 abi_ulong shm_rss;
4888 abi_ulong shm_swp;
4889 abi_ulong swap_attempts;
4890 abi_ulong swap_successes;
4893 static inline abi_long host_to_target_shm_info(abi_ulong target_addr,
4894 struct shm_info *host_shm_info)
4896 struct target_shm_info *target_shm_info;
4897 if (!lock_user_struct(VERIFY_WRITE, target_shm_info, target_addr, 0))
4898 return -TARGET_EFAULT;
4899 __put_user(host_shm_info->used_ids, &target_shm_info->used_ids);
4900 __put_user(host_shm_info->shm_tot, &target_shm_info->shm_tot);
4901 __put_user(host_shm_info->shm_rss, &target_shm_info->shm_rss);
4902 __put_user(host_shm_info->shm_swp, &target_shm_info->shm_swp);
4903 __put_user(host_shm_info->swap_attempts, &target_shm_info->swap_attempts);
4904 __put_user(host_shm_info->swap_successes, &target_shm_info->swap_successes);
4905 unlock_user_struct(target_shm_info, target_addr, 1);
4906 return 0;
4909 static inline abi_long do_shmctl(int shmid, int cmd, abi_long buf)
4911 struct shmid_ds dsarg;
4912 struct shminfo shminfo;
4913 struct shm_info shm_info;
4914 abi_long ret = -TARGET_EINVAL;
4916 cmd &= 0xff;
4918 switch(cmd) {
4919 case IPC_STAT:
4920 case IPC_SET:
4921 case SHM_STAT:
4922 if (target_to_host_shmid_ds(&dsarg, buf))
4923 return -TARGET_EFAULT;
4924 ret = get_errno(shmctl(shmid, cmd, &dsarg));
4925 if (host_to_target_shmid_ds(buf, &dsarg))
4926 return -TARGET_EFAULT;
4927 break;
4928 case IPC_INFO:
4929 ret = get_errno(shmctl(shmid, cmd, (struct shmid_ds *)&shminfo));
4930 if (host_to_target_shminfo(buf, &shminfo))
4931 return -TARGET_EFAULT;
4932 break;
4933 case SHM_INFO:
4934 ret = get_errno(shmctl(shmid, cmd, (struct shmid_ds *)&shm_info));
4935 if (host_to_target_shm_info(buf, &shm_info))
4936 return -TARGET_EFAULT;
4937 break;
4938 case IPC_RMID:
4939 case SHM_LOCK:
4940 case SHM_UNLOCK:
4941 ret = get_errno(shmctl(shmid, cmd, NULL));
4942 break;
4945 return ret;
4948 #ifndef TARGET_FORCE_SHMLBA
4949 /* For most architectures, SHMLBA is the same as the page size;
4950 * some architectures have larger values, in which case they should
4951 * define TARGET_FORCE_SHMLBA and provide a target_shmlba() function.
4952 * This corresponds to the kernel arch code defining __ARCH_FORCE_SHMLBA
4953 * and defining its own value for SHMLBA.
4955 * The kernel also permits SHMLBA to be set by the architecture to a
4956 * value larger than the page size without setting __ARCH_FORCE_SHMLBA;
4957 * this means that addresses are rounded to the large size if
4958 * SHM_RND is set but addresses not aligned to that size are not rejected
4959 * as long as they are at least page-aligned. Since the only architecture
4960 * which uses this is ia64 this code doesn't provide for that oddity.
4962 static inline abi_ulong target_shmlba(CPUArchState *cpu_env)
4964 return TARGET_PAGE_SIZE;
4966 #endif
4968 static inline abi_ulong do_shmat(CPUArchState *cpu_env,
4969 int shmid, abi_ulong shmaddr, int shmflg)
4971 abi_long raddr;
4972 void *host_raddr;
4973 struct shmid_ds shm_info;
4974 int i,ret;
4975 abi_ulong shmlba;
4977 /* find out the length of the shared memory segment */
4978 ret = get_errno(shmctl(shmid, IPC_STAT, &shm_info));
4979 if (is_error(ret)) {
4980 /* can't get length, bail out */
4981 return ret;
4984 shmlba = target_shmlba(cpu_env);
4986 if (shmaddr & (shmlba - 1)) {
4987 if (shmflg & SHM_RND) {
4988 shmaddr &= ~(shmlba - 1);
4989 } else {
4990 return -TARGET_EINVAL;
4993 if (!guest_range_valid(shmaddr, shm_info.shm_segsz)) {
4994 return -TARGET_EINVAL;
4997 mmap_lock();
4999 if (shmaddr)
5000 host_raddr = shmat(shmid, (void *)g2h(shmaddr), shmflg);
5001 else {
5002 abi_ulong mmap_start;
5004 mmap_start = mmap_find_vma(0, shm_info.shm_segsz);
5006 if (mmap_start == -1) {
5007 errno = ENOMEM;
5008 host_raddr = (void *)-1;
5009 } else
5010 host_raddr = shmat(shmid, g2h(mmap_start), shmflg | SHM_REMAP);
5013 if (host_raddr == (void *)-1) {
5014 mmap_unlock();
5015 return get_errno((long)host_raddr);
5017 raddr=h2g((unsigned long)host_raddr);
5019 page_set_flags(raddr, raddr + shm_info.shm_segsz,
5020 PAGE_VALID | PAGE_READ |
5021 ((shmflg & SHM_RDONLY)? 0 : PAGE_WRITE));
5023 for (i = 0; i < N_SHM_REGIONS; i++) {
5024 if (!shm_regions[i].in_use) {
5025 shm_regions[i].in_use = true;
5026 shm_regions[i].start = raddr;
5027 shm_regions[i].size = shm_info.shm_segsz;
5028 break;
5032 mmap_unlock();
5033 return raddr;
5037 static inline abi_long do_shmdt(abi_ulong shmaddr)
5039 int i;
5040 abi_long rv;
5042 mmap_lock();
5044 for (i = 0; i < N_SHM_REGIONS; ++i) {
5045 if (shm_regions[i].in_use && shm_regions[i].start == shmaddr) {
5046 shm_regions[i].in_use = false;
5047 page_set_flags(shmaddr, shmaddr + shm_regions[i].size, 0);
5048 break;
5051 rv = get_errno(shmdt(g2h(shmaddr)));
5053 mmap_unlock();
5055 return rv;
5058 #ifdef TARGET_NR_ipc
5059 /* ??? This only works with linear mappings. */
5060 /* do_ipc() must return target values and target errnos. */
5061 static abi_long do_ipc(CPUArchState *cpu_env,
5062 unsigned int call, abi_long first,
5063 abi_long second, abi_long third,
5064 abi_long ptr, abi_long fifth)
5066 int version;
5067 abi_long ret = 0;
5069 version = call >> 16;
5070 call &= 0xffff;
5072 switch (call) {
5073 case IPCOP_semop:
5074 ret = do_semop(first, ptr, second);
5075 break;
5077 case IPCOP_semget:
5078 ret = get_errno(semget(first, second, third));
5079 break;
5081 case IPCOP_semctl: {
5082 /* The semun argument to semctl is passed by value, so dereference the
5083 * ptr argument. */
5084 abi_ulong atptr;
5085 get_user_ual(atptr, ptr);
5086 ret = do_semctl(first, second, third, atptr);
5087 break;
5090 case IPCOP_msgget:
5091 ret = get_errno(msgget(first, second));
5092 break;
5094 case IPCOP_msgsnd:
5095 ret = do_msgsnd(first, ptr, second, third);
5096 break;
5098 case IPCOP_msgctl:
5099 ret = do_msgctl(first, second, ptr);
5100 break;
5102 case IPCOP_msgrcv:
5103 switch (version) {
5104 case 0:
5106 struct target_ipc_kludge {
5107 abi_long msgp;
5108 abi_long msgtyp;
5109 } *tmp;
5111 if (!lock_user_struct(VERIFY_READ, tmp, ptr, 1)) {
5112 ret = -TARGET_EFAULT;
5113 break;
5116 ret = do_msgrcv(first, tswapal(tmp->msgp), second, tswapal(tmp->msgtyp), third);
5118 unlock_user_struct(tmp, ptr, 0);
5119 break;
5121 default:
5122 ret = do_msgrcv(first, ptr, second, fifth, third);
5124 break;
5126 case IPCOP_shmat:
5127 switch (version) {
5128 default:
5130 abi_ulong raddr;
5131 raddr = do_shmat(cpu_env, first, ptr, second);
5132 if (is_error(raddr))
5133 return get_errno(raddr);
5134 if (put_user_ual(raddr, third))
5135 return -TARGET_EFAULT;
5136 break;
5138 case 1:
5139 ret = -TARGET_EINVAL;
5140 break;
5142 break;
5143 case IPCOP_shmdt:
5144 ret = do_shmdt(ptr);
5145 break;
5147 case IPCOP_shmget:
5148 /* IPC_* flag values are the same on all linux platforms */
5149 ret = get_errno(shmget(first, second, third));
5150 break;
5152 /* IPC_* and SHM_* command values are the same on all linux platforms */
5153 case IPCOP_shmctl:
5154 ret = do_shmctl(first, second, ptr);
5155 break;
5156 default:
5157 gemu_log("Unsupported ipc call: %d (version %d)\n", call, version);
5158 ret = -TARGET_ENOSYS;
5159 break;
5161 return ret;
5163 #endif
5165 /* kernel structure types definitions */
5167 #define STRUCT(name, ...) STRUCT_ ## name,
5168 #define STRUCT_SPECIAL(name) STRUCT_ ## name,
5169 enum {
5170 #include "syscall_types.h"
5171 STRUCT_MAX
5173 #undef STRUCT
5174 #undef STRUCT_SPECIAL
5176 #define STRUCT(name, ...) static const argtype struct_ ## name ## _def[] = { __VA_ARGS__, TYPE_NULL };
5177 #define STRUCT_SPECIAL(name)
5178 #include "syscall_types.h"
5179 #undef STRUCT
5180 #undef STRUCT_SPECIAL
5182 typedef struct IOCTLEntry IOCTLEntry;
5184 typedef abi_long do_ioctl_fn(const IOCTLEntry *ie, uint8_t *buf_temp,
5185 int fd, int cmd, abi_long arg);
5187 struct IOCTLEntry {
5188 int target_cmd;
5189 unsigned int host_cmd;
5190 const char *name;
5191 int access;
5192 do_ioctl_fn *do_ioctl;
5193 const argtype arg_type[5];
5196 #define IOC_R 0x0001
5197 #define IOC_W 0x0002
5198 #define IOC_RW (IOC_R | IOC_W)
5200 #define MAX_STRUCT_SIZE 4096
5202 #ifdef CONFIG_FIEMAP
5203 /* So fiemap access checks don't overflow on 32 bit systems.
5204 * This is very slightly smaller than the limit imposed by
5205 * the underlying kernel.
5207 #define FIEMAP_MAX_EXTENTS ((UINT_MAX - sizeof(struct fiemap)) \
5208 / sizeof(struct fiemap_extent))
5210 static abi_long do_ioctl_fs_ioc_fiemap(const IOCTLEntry *ie, uint8_t *buf_temp,
5211 int fd, int cmd, abi_long arg)
5213 /* The parameter for this ioctl is a struct fiemap followed
5214 * by an array of struct fiemap_extent whose size is set
5215 * in fiemap->fm_extent_count. The array is filled in by the
5216 * ioctl.
5218 int target_size_in, target_size_out;
5219 struct fiemap *fm;
5220 const argtype *arg_type = ie->arg_type;
5221 const argtype extent_arg_type[] = { MK_STRUCT(STRUCT_fiemap_extent) };
5222 void *argptr, *p;
5223 abi_long ret;
5224 int i, extent_size = thunk_type_size(extent_arg_type, 0);
5225 uint32_t outbufsz;
5226 int free_fm = 0;
5228 assert(arg_type[0] == TYPE_PTR);
5229 assert(ie->access == IOC_RW);
5230 arg_type++;
5231 target_size_in = thunk_type_size(arg_type, 0);
5232 argptr = lock_user(VERIFY_READ, arg, target_size_in, 1);
5233 if (!argptr) {
5234 return -TARGET_EFAULT;
5236 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
5237 unlock_user(argptr, arg, 0);
5238 fm = (struct fiemap *)buf_temp;
5239 if (fm->fm_extent_count > FIEMAP_MAX_EXTENTS) {
5240 return -TARGET_EINVAL;
5243 outbufsz = sizeof (*fm) +
5244 (sizeof(struct fiemap_extent) * fm->fm_extent_count);
5246 if (outbufsz > MAX_STRUCT_SIZE) {
5247 /* We can't fit all the extents into the fixed size buffer.
5248 * Allocate one that is large enough and use it instead.
5250 fm = g_try_malloc(outbufsz);
5251 if (!fm) {
5252 return -TARGET_ENOMEM;
5254 memcpy(fm, buf_temp, sizeof(struct fiemap));
5255 free_fm = 1;
5257 ret = get_errno(safe_ioctl(fd, ie->host_cmd, fm));
5258 if (!is_error(ret)) {
5259 target_size_out = target_size_in;
5260 /* An extent_count of 0 means we were only counting the extents
5261 * so there are no structs to copy
5263 if (fm->fm_extent_count != 0) {
5264 target_size_out += fm->fm_mapped_extents * extent_size;
5266 argptr = lock_user(VERIFY_WRITE, arg, target_size_out, 0);
5267 if (!argptr) {
5268 ret = -TARGET_EFAULT;
5269 } else {
5270 /* Convert the struct fiemap */
5271 thunk_convert(argptr, fm, arg_type, THUNK_TARGET);
5272 if (fm->fm_extent_count != 0) {
5273 p = argptr + target_size_in;
5274 /* ...and then all the struct fiemap_extents */
5275 for (i = 0; i < fm->fm_mapped_extents; i++) {
5276 thunk_convert(p, &fm->fm_extents[i], extent_arg_type,
5277 THUNK_TARGET);
5278 p += extent_size;
5281 unlock_user(argptr, arg, target_size_out);
5284 if (free_fm) {
5285 g_free(fm);
5287 return ret;
5289 #endif
5291 static abi_long do_ioctl_ifconf(const IOCTLEntry *ie, uint8_t *buf_temp,
5292 int fd, int cmd, abi_long arg)
5294 const argtype *arg_type = ie->arg_type;
5295 int target_size;
5296 void *argptr;
5297 int ret;
5298 struct ifconf *host_ifconf;
5299 uint32_t outbufsz;
5300 const argtype ifreq_arg_type[] = { MK_STRUCT(STRUCT_sockaddr_ifreq) };
5301 int target_ifreq_size;
5302 int nb_ifreq;
5303 int free_buf = 0;
5304 int i;
5305 int target_ifc_len;
5306 abi_long target_ifc_buf;
5307 int host_ifc_len;
5308 char *host_ifc_buf;
5310 assert(arg_type[0] == TYPE_PTR);
5311 assert(ie->access == IOC_RW);
5313 arg_type++;
5314 target_size = thunk_type_size(arg_type, 0);
5316 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5317 if (!argptr)
5318 return -TARGET_EFAULT;
5319 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
5320 unlock_user(argptr, arg, 0);
5322 host_ifconf = (struct ifconf *)(unsigned long)buf_temp;
5323 target_ifc_len = host_ifconf->ifc_len;
5324 target_ifc_buf = (abi_long)(unsigned long)host_ifconf->ifc_buf;
5326 target_ifreq_size = thunk_type_size(ifreq_arg_type, 0);
5327 nb_ifreq = target_ifc_len / target_ifreq_size;
5328 host_ifc_len = nb_ifreq * sizeof(struct ifreq);
5330 outbufsz = sizeof(*host_ifconf) + host_ifc_len;
5331 if (outbufsz > MAX_STRUCT_SIZE) {
5332 /* We can't fit all the extents into the fixed size buffer.
5333 * Allocate one that is large enough and use it instead.
5335 host_ifconf = malloc(outbufsz);
5336 if (!host_ifconf) {
5337 return -TARGET_ENOMEM;
5339 memcpy(host_ifconf, buf_temp, sizeof(*host_ifconf));
5340 free_buf = 1;
5342 host_ifc_buf = (char*)host_ifconf + sizeof(*host_ifconf);
5344 host_ifconf->ifc_len = host_ifc_len;
5345 host_ifconf->ifc_buf = host_ifc_buf;
5347 ret = get_errno(safe_ioctl(fd, ie->host_cmd, host_ifconf));
5348 if (!is_error(ret)) {
5349 /* convert host ifc_len to target ifc_len */
5351 nb_ifreq = host_ifconf->ifc_len / sizeof(struct ifreq);
5352 target_ifc_len = nb_ifreq * target_ifreq_size;
5353 host_ifconf->ifc_len = target_ifc_len;
5355 /* restore target ifc_buf */
5357 host_ifconf->ifc_buf = (char *)(unsigned long)target_ifc_buf;
5359 /* copy struct ifconf to target user */
5361 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
5362 if (!argptr)
5363 return -TARGET_EFAULT;
5364 thunk_convert(argptr, host_ifconf, arg_type, THUNK_TARGET);
5365 unlock_user(argptr, arg, target_size);
5367 /* copy ifreq[] to target user */
5369 argptr = lock_user(VERIFY_WRITE, target_ifc_buf, target_ifc_len, 0);
5370 for (i = 0; i < nb_ifreq ; i++) {
5371 thunk_convert(argptr + i * target_ifreq_size,
5372 host_ifc_buf + i * sizeof(struct ifreq),
5373 ifreq_arg_type, THUNK_TARGET);
5375 unlock_user(argptr, target_ifc_buf, target_ifc_len);
5378 if (free_buf) {
5379 free(host_ifconf);
5382 return ret;
5385 static abi_long do_ioctl_dm(const IOCTLEntry *ie, uint8_t *buf_temp, int fd,
5386 int cmd, abi_long arg)
5388 void *argptr;
5389 struct dm_ioctl *host_dm;
5390 abi_long guest_data;
5391 uint32_t guest_data_size;
5392 int target_size;
5393 const argtype *arg_type = ie->arg_type;
5394 abi_long ret;
5395 void *big_buf = NULL;
5396 char *host_data;
5398 arg_type++;
5399 target_size = thunk_type_size(arg_type, 0);
5400 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5401 if (!argptr) {
5402 ret = -TARGET_EFAULT;
5403 goto out;
5405 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
5406 unlock_user(argptr, arg, 0);
5408 /* buf_temp is too small, so fetch things into a bigger buffer */
5409 big_buf = g_malloc0(((struct dm_ioctl*)buf_temp)->data_size * 2);
5410 memcpy(big_buf, buf_temp, target_size);
5411 buf_temp = big_buf;
5412 host_dm = big_buf;
5414 guest_data = arg + host_dm->data_start;
5415 if ((guest_data - arg) < 0) {
5416 ret = -TARGET_EINVAL;
5417 goto out;
5419 guest_data_size = host_dm->data_size - host_dm->data_start;
5420 host_data = (char*)host_dm + host_dm->data_start;
5422 argptr = lock_user(VERIFY_READ, guest_data, guest_data_size, 1);
5423 if (!argptr) {
5424 ret = -TARGET_EFAULT;
5425 goto out;
5428 switch (ie->host_cmd) {
5429 case DM_REMOVE_ALL:
5430 case DM_LIST_DEVICES:
5431 case DM_DEV_CREATE:
5432 case DM_DEV_REMOVE:
5433 case DM_DEV_SUSPEND:
5434 case DM_DEV_STATUS:
5435 case DM_DEV_WAIT:
5436 case DM_TABLE_STATUS:
5437 case DM_TABLE_CLEAR:
5438 case DM_TABLE_DEPS:
5439 case DM_LIST_VERSIONS:
5440 /* no input data */
5441 break;
5442 case DM_DEV_RENAME:
5443 case DM_DEV_SET_GEOMETRY:
5444 /* data contains only strings */
5445 memcpy(host_data, argptr, guest_data_size);
5446 break;
5447 case DM_TARGET_MSG:
5448 memcpy(host_data, argptr, guest_data_size);
5449 *(uint64_t*)host_data = tswap64(*(uint64_t*)argptr);
5450 break;
5451 case DM_TABLE_LOAD:
5453 void *gspec = argptr;
5454 void *cur_data = host_data;
5455 const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_spec) };
5456 int spec_size = thunk_type_size(arg_type, 0);
5457 int i;
5459 for (i = 0; i < host_dm->target_count; i++) {
5460 struct dm_target_spec *spec = cur_data;
5461 uint32_t next;
5462 int slen;
5464 thunk_convert(spec, gspec, arg_type, THUNK_HOST);
5465 slen = strlen((char*)gspec + spec_size) + 1;
5466 next = spec->next;
5467 spec->next = sizeof(*spec) + slen;
5468 strcpy((char*)&spec[1], gspec + spec_size);
5469 gspec += next;
5470 cur_data += spec->next;
5472 break;
5474 default:
5475 ret = -TARGET_EINVAL;
5476 unlock_user(argptr, guest_data, 0);
5477 goto out;
5479 unlock_user(argptr, guest_data, 0);
5481 ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5482 if (!is_error(ret)) {
5483 guest_data = arg + host_dm->data_start;
5484 guest_data_size = host_dm->data_size - host_dm->data_start;
5485 argptr = lock_user(VERIFY_WRITE, guest_data, guest_data_size, 0);
5486 switch (ie->host_cmd) {
5487 case DM_REMOVE_ALL:
5488 case DM_DEV_CREATE:
5489 case DM_DEV_REMOVE:
5490 case DM_DEV_RENAME:
5491 case DM_DEV_SUSPEND:
5492 case DM_DEV_STATUS:
5493 case DM_TABLE_LOAD:
5494 case DM_TABLE_CLEAR:
5495 case DM_TARGET_MSG:
5496 case DM_DEV_SET_GEOMETRY:
5497 /* no return data */
5498 break;
5499 case DM_LIST_DEVICES:
5501 struct dm_name_list *nl = (void*)host_dm + host_dm->data_start;
5502 uint32_t remaining_data = guest_data_size;
5503 void *cur_data = argptr;
5504 const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_name_list) };
5505 int nl_size = 12; /* can't use thunk_size due to alignment */
5507 while (1) {
5508 uint32_t next = nl->next;
5509 if (next) {
5510 nl->next = nl_size + (strlen(nl->name) + 1);
5512 if (remaining_data < nl->next) {
5513 host_dm->flags |= DM_BUFFER_FULL_FLAG;
5514 break;
5516 thunk_convert(cur_data, nl, arg_type, THUNK_TARGET);
5517 strcpy(cur_data + nl_size, nl->name);
5518 cur_data += nl->next;
5519 remaining_data -= nl->next;
5520 if (!next) {
5521 break;
5523 nl = (void*)nl + next;
5525 break;
5527 case DM_DEV_WAIT:
5528 case DM_TABLE_STATUS:
5530 struct dm_target_spec *spec = (void*)host_dm + host_dm->data_start;
5531 void *cur_data = argptr;
5532 const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_spec) };
5533 int spec_size = thunk_type_size(arg_type, 0);
5534 int i;
5536 for (i = 0; i < host_dm->target_count; i++) {
5537 uint32_t next = spec->next;
5538 int slen = strlen((char*)&spec[1]) + 1;
5539 spec->next = (cur_data - argptr) + spec_size + slen;
5540 if (guest_data_size < spec->next) {
5541 host_dm->flags |= DM_BUFFER_FULL_FLAG;
5542 break;
5544 thunk_convert(cur_data, spec, arg_type, THUNK_TARGET);
5545 strcpy(cur_data + spec_size, (char*)&spec[1]);
5546 cur_data = argptr + spec->next;
5547 spec = (void*)host_dm + host_dm->data_start + next;
5549 break;
5551 case DM_TABLE_DEPS:
5553 void *hdata = (void*)host_dm + host_dm->data_start;
5554 int count = *(uint32_t*)hdata;
5555 uint64_t *hdev = hdata + 8;
5556 uint64_t *gdev = argptr + 8;
5557 int i;
5559 *(uint32_t*)argptr = tswap32(count);
5560 for (i = 0; i < count; i++) {
5561 *gdev = tswap64(*hdev);
5562 gdev++;
5563 hdev++;
5565 break;
5567 case DM_LIST_VERSIONS:
5569 struct dm_target_versions *vers = (void*)host_dm + host_dm->data_start;
5570 uint32_t remaining_data = guest_data_size;
5571 void *cur_data = argptr;
5572 const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_versions) };
5573 int vers_size = thunk_type_size(arg_type, 0);
5575 while (1) {
5576 uint32_t next = vers->next;
5577 if (next) {
5578 vers->next = vers_size + (strlen(vers->name) + 1);
5580 if (remaining_data < vers->next) {
5581 host_dm->flags |= DM_BUFFER_FULL_FLAG;
5582 break;
5584 thunk_convert(cur_data, vers, arg_type, THUNK_TARGET);
5585 strcpy(cur_data + vers_size, vers->name);
5586 cur_data += vers->next;
5587 remaining_data -= vers->next;
5588 if (!next) {
5589 break;
5591 vers = (void*)vers + next;
5593 break;
5595 default:
5596 unlock_user(argptr, guest_data, 0);
5597 ret = -TARGET_EINVAL;
5598 goto out;
5600 unlock_user(argptr, guest_data, guest_data_size);
5602 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
5603 if (!argptr) {
5604 ret = -TARGET_EFAULT;
5605 goto out;
5607 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
5608 unlock_user(argptr, arg, target_size);
5610 out:
5611 g_free(big_buf);
5612 return ret;
5615 static abi_long do_ioctl_blkpg(const IOCTLEntry *ie, uint8_t *buf_temp, int fd,
5616 int cmd, abi_long arg)
5618 void *argptr;
5619 int target_size;
5620 const argtype *arg_type = ie->arg_type;
5621 const argtype part_arg_type[] = { MK_STRUCT(STRUCT_blkpg_partition) };
5622 abi_long ret;
5624 struct blkpg_ioctl_arg *host_blkpg = (void*)buf_temp;
5625 struct blkpg_partition host_part;
5627 /* Read and convert blkpg */
5628 arg_type++;
5629 target_size = thunk_type_size(arg_type, 0);
5630 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5631 if (!argptr) {
5632 ret = -TARGET_EFAULT;
5633 goto out;
5635 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
5636 unlock_user(argptr, arg, 0);
5638 switch (host_blkpg->op) {
5639 case BLKPG_ADD_PARTITION:
5640 case BLKPG_DEL_PARTITION:
5641 /* payload is struct blkpg_partition */
5642 break;
5643 default:
5644 /* Unknown opcode */
5645 ret = -TARGET_EINVAL;
5646 goto out;
5649 /* Read and convert blkpg->data */
5650 arg = (abi_long)(uintptr_t)host_blkpg->data;
5651 target_size = thunk_type_size(part_arg_type, 0);
5652 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5653 if (!argptr) {
5654 ret = -TARGET_EFAULT;
5655 goto out;
5657 thunk_convert(&host_part, argptr, part_arg_type, THUNK_HOST);
5658 unlock_user(argptr, arg, 0);
5660 /* Swizzle the data pointer to our local copy and call! */
5661 host_blkpg->data = &host_part;
5662 ret = get_errno(safe_ioctl(fd, ie->host_cmd, host_blkpg));
5664 out:
5665 return ret;
5668 static abi_long do_ioctl_rt(const IOCTLEntry *ie, uint8_t *buf_temp,
5669 int fd, int cmd, abi_long arg)
5671 const argtype *arg_type = ie->arg_type;
5672 const StructEntry *se;
5673 const argtype *field_types;
5674 const int *dst_offsets, *src_offsets;
5675 int target_size;
5676 void *argptr;
5677 abi_ulong *target_rt_dev_ptr;
5678 unsigned long *host_rt_dev_ptr;
5679 abi_long ret;
5680 int i;
5682 assert(ie->access == IOC_W);
5683 assert(*arg_type == TYPE_PTR);
5684 arg_type++;
5685 assert(*arg_type == TYPE_STRUCT);
5686 target_size = thunk_type_size(arg_type, 0);
5687 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5688 if (!argptr) {
5689 return -TARGET_EFAULT;
5691 arg_type++;
5692 assert(*arg_type == (int)STRUCT_rtentry);
5693 se = struct_entries + *arg_type++;
5694 assert(se->convert[0] == NULL);
5695 /* convert struct here to be able to catch rt_dev string */
5696 field_types = se->field_types;
5697 dst_offsets = se->field_offsets[THUNK_HOST];
5698 src_offsets = se->field_offsets[THUNK_TARGET];
5699 for (i = 0; i < se->nb_fields; i++) {
5700 if (dst_offsets[i] == offsetof(struct rtentry, rt_dev)) {
5701 assert(*field_types == TYPE_PTRVOID);
5702 target_rt_dev_ptr = (abi_ulong *)(argptr + src_offsets[i]);
5703 host_rt_dev_ptr = (unsigned long *)(buf_temp + dst_offsets[i]);
5704 if (*target_rt_dev_ptr != 0) {
5705 *host_rt_dev_ptr = (unsigned long)lock_user_string(
5706 tswapal(*target_rt_dev_ptr));
5707 if (!*host_rt_dev_ptr) {
5708 unlock_user(argptr, arg, 0);
5709 return -TARGET_EFAULT;
5711 } else {
5712 *host_rt_dev_ptr = 0;
5714 field_types++;
5715 continue;
5717 field_types = thunk_convert(buf_temp + dst_offsets[i],
5718 argptr + src_offsets[i],
5719 field_types, THUNK_HOST);
5721 unlock_user(argptr, arg, 0);
5723 ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5724 if (*host_rt_dev_ptr != 0) {
5725 unlock_user((void *)*host_rt_dev_ptr,
5726 *target_rt_dev_ptr, 0);
5728 return ret;
5731 static abi_long do_ioctl_kdsigaccept(const IOCTLEntry *ie, uint8_t *buf_temp,
5732 int fd, int cmd, abi_long arg)
5734 int sig = target_to_host_signal(arg);
5735 return get_errno(safe_ioctl(fd, ie->host_cmd, sig));
5738 #ifdef TIOCGPTPEER
5739 static abi_long do_ioctl_tiocgptpeer(const IOCTLEntry *ie, uint8_t *buf_temp,
5740 int fd, int cmd, abi_long arg)
5742 int flags = target_to_host_bitmask(arg, fcntl_flags_tbl);
5743 return get_errno(safe_ioctl(fd, ie->host_cmd, flags));
5745 #endif
5747 static IOCTLEntry ioctl_entries[] = {
5748 #define IOCTL(cmd, access, ...) \
5749 { TARGET_ ## cmd, cmd, #cmd, access, 0, { __VA_ARGS__ } },
5750 #define IOCTL_SPECIAL(cmd, access, dofn, ...) \
5751 { TARGET_ ## cmd, cmd, #cmd, access, dofn, { __VA_ARGS__ } },
5752 #define IOCTL_IGNORE(cmd) \
5753 { TARGET_ ## cmd, 0, #cmd },
5754 #include "ioctls.h"
5755 { 0, 0, },
5758 /* ??? Implement proper locking for ioctls. */
5759 /* do_ioctl() Must return target values and target errnos. */
5760 static abi_long do_ioctl(int fd, int cmd, abi_long arg)
5762 const IOCTLEntry *ie;
5763 const argtype *arg_type;
5764 abi_long ret;
5765 uint8_t buf_temp[MAX_STRUCT_SIZE];
5766 int target_size;
5767 void *argptr;
5769 ie = ioctl_entries;
5770 for(;;) {
5771 if (ie->target_cmd == 0) {
5772 gemu_log("Unsupported ioctl: cmd=0x%04lx\n", (long)cmd);
5773 return -TARGET_ENOSYS;
5775 if (ie->target_cmd == cmd)
5776 break;
5777 ie++;
5779 arg_type = ie->arg_type;
5780 #if defined(DEBUG)
5781 gemu_log("ioctl: cmd=0x%04lx (%s)\n", (long)cmd, ie->name);
5782 #endif
5783 if (ie->do_ioctl) {
5784 return ie->do_ioctl(ie, buf_temp, fd, cmd, arg);
5785 } else if (!ie->host_cmd) {
5786 /* Some architectures define BSD ioctls in their headers
5787 that are not implemented in Linux. */
5788 return -TARGET_ENOSYS;
5791 switch(arg_type[0]) {
5792 case TYPE_NULL:
5793 /* no argument */
5794 ret = get_errno(safe_ioctl(fd, ie->host_cmd));
5795 break;
5796 case TYPE_PTRVOID:
5797 case TYPE_INT:
5798 ret = get_errno(safe_ioctl(fd, ie->host_cmd, arg));
5799 break;
5800 case TYPE_PTR:
5801 arg_type++;
5802 target_size = thunk_type_size(arg_type, 0);
5803 switch(ie->access) {
5804 case IOC_R:
5805 ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5806 if (!is_error(ret)) {
5807 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
5808 if (!argptr)
5809 return -TARGET_EFAULT;
5810 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
5811 unlock_user(argptr, arg, target_size);
5813 break;
5814 case IOC_W:
5815 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5816 if (!argptr)
5817 return -TARGET_EFAULT;
5818 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
5819 unlock_user(argptr, arg, 0);
5820 ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5821 break;
5822 default:
5823 case IOC_RW:
5824 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5825 if (!argptr)
5826 return -TARGET_EFAULT;
5827 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
5828 unlock_user(argptr, arg, 0);
5829 ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5830 if (!is_error(ret)) {
5831 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
5832 if (!argptr)
5833 return -TARGET_EFAULT;
5834 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
5835 unlock_user(argptr, arg, target_size);
5837 break;
5839 break;
5840 default:
5841 gemu_log("Unsupported ioctl type: cmd=0x%04lx type=%d\n",
5842 (long)cmd, arg_type[0]);
5843 ret = -TARGET_ENOSYS;
5844 break;
5846 return ret;
5849 static const bitmask_transtbl iflag_tbl[] = {
5850 { TARGET_IGNBRK, TARGET_IGNBRK, IGNBRK, IGNBRK },
5851 { TARGET_BRKINT, TARGET_BRKINT, BRKINT, BRKINT },
5852 { TARGET_IGNPAR, TARGET_IGNPAR, IGNPAR, IGNPAR },
5853 { TARGET_PARMRK, TARGET_PARMRK, PARMRK, PARMRK },
5854 { TARGET_INPCK, TARGET_INPCK, INPCK, INPCK },
5855 { TARGET_ISTRIP, TARGET_ISTRIP, ISTRIP, ISTRIP },
5856 { TARGET_INLCR, TARGET_INLCR, INLCR, INLCR },
5857 { TARGET_IGNCR, TARGET_IGNCR, IGNCR, IGNCR },
5858 { TARGET_ICRNL, TARGET_ICRNL, ICRNL, ICRNL },
5859 { TARGET_IUCLC, TARGET_IUCLC, IUCLC, IUCLC },
5860 { TARGET_IXON, TARGET_IXON, IXON, IXON },
5861 { TARGET_IXANY, TARGET_IXANY, IXANY, IXANY },
5862 { TARGET_IXOFF, TARGET_IXOFF, IXOFF, IXOFF },
5863 { TARGET_IMAXBEL, TARGET_IMAXBEL, IMAXBEL, IMAXBEL },
5864 { 0, 0, 0, 0 }
5867 static const bitmask_transtbl oflag_tbl[] = {
5868 { TARGET_OPOST, TARGET_OPOST, OPOST, OPOST },
5869 { TARGET_OLCUC, TARGET_OLCUC, OLCUC, OLCUC },
5870 { TARGET_ONLCR, TARGET_ONLCR, ONLCR, ONLCR },
5871 { TARGET_OCRNL, TARGET_OCRNL, OCRNL, OCRNL },
5872 { TARGET_ONOCR, TARGET_ONOCR, ONOCR, ONOCR },
5873 { TARGET_ONLRET, TARGET_ONLRET, ONLRET, ONLRET },
5874 { TARGET_OFILL, TARGET_OFILL, OFILL, OFILL },
5875 { TARGET_OFDEL, TARGET_OFDEL, OFDEL, OFDEL },
5876 { TARGET_NLDLY, TARGET_NL0, NLDLY, NL0 },
5877 { TARGET_NLDLY, TARGET_NL1, NLDLY, NL1 },
5878 { TARGET_CRDLY, TARGET_CR0, CRDLY, CR0 },
5879 { TARGET_CRDLY, TARGET_CR1, CRDLY, CR1 },
5880 { TARGET_CRDLY, TARGET_CR2, CRDLY, CR2 },
5881 { TARGET_CRDLY, TARGET_CR3, CRDLY, CR3 },
5882 { TARGET_TABDLY, TARGET_TAB0, TABDLY, TAB0 },
5883 { TARGET_TABDLY, TARGET_TAB1, TABDLY, TAB1 },
5884 { TARGET_TABDLY, TARGET_TAB2, TABDLY, TAB2 },
5885 { TARGET_TABDLY, TARGET_TAB3, TABDLY, TAB3 },
5886 { TARGET_BSDLY, TARGET_BS0, BSDLY, BS0 },
5887 { TARGET_BSDLY, TARGET_BS1, BSDLY, BS1 },
5888 { TARGET_VTDLY, TARGET_VT0, VTDLY, VT0 },
5889 { TARGET_VTDLY, TARGET_VT1, VTDLY, VT1 },
5890 { TARGET_FFDLY, TARGET_FF0, FFDLY, FF0 },
5891 { TARGET_FFDLY, TARGET_FF1, FFDLY, FF1 },
5892 { 0, 0, 0, 0 }
5895 static const bitmask_transtbl cflag_tbl[] = {
5896 { TARGET_CBAUD, TARGET_B0, CBAUD, B0 },
5897 { TARGET_CBAUD, TARGET_B50, CBAUD, B50 },
5898 { TARGET_CBAUD, TARGET_B75, CBAUD, B75 },
5899 { TARGET_CBAUD, TARGET_B110, CBAUD, B110 },
5900 { TARGET_CBAUD, TARGET_B134, CBAUD, B134 },
5901 { TARGET_CBAUD, TARGET_B150, CBAUD, B150 },
5902 { TARGET_CBAUD, TARGET_B200, CBAUD, B200 },
5903 { TARGET_CBAUD, TARGET_B300, CBAUD, B300 },
5904 { TARGET_CBAUD, TARGET_B600, CBAUD, B600 },
5905 { TARGET_CBAUD, TARGET_B1200, CBAUD, B1200 },
5906 { TARGET_CBAUD, TARGET_B1800, CBAUD, B1800 },
5907 { TARGET_CBAUD, TARGET_B2400, CBAUD, B2400 },
5908 { TARGET_CBAUD, TARGET_B4800, CBAUD, B4800 },
5909 { TARGET_CBAUD, TARGET_B9600, CBAUD, B9600 },
5910 { TARGET_CBAUD, TARGET_B19200, CBAUD, B19200 },
5911 { TARGET_CBAUD, TARGET_B38400, CBAUD, B38400 },
5912 { TARGET_CBAUD, TARGET_B57600, CBAUD, B57600 },
5913 { TARGET_CBAUD, TARGET_B115200, CBAUD, B115200 },
5914 { TARGET_CBAUD, TARGET_B230400, CBAUD, B230400 },
5915 { TARGET_CBAUD, TARGET_B460800, CBAUD, B460800 },
5916 { TARGET_CSIZE, TARGET_CS5, CSIZE, CS5 },
5917 { TARGET_CSIZE, TARGET_CS6, CSIZE, CS6 },
5918 { TARGET_CSIZE, TARGET_CS7, CSIZE, CS7 },
5919 { TARGET_CSIZE, TARGET_CS8, CSIZE, CS8 },
5920 { TARGET_CSTOPB, TARGET_CSTOPB, CSTOPB, CSTOPB },
5921 { TARGET_CREAD, TARGET_CREAD, CREAD, CREAD },
5922 { TARGET_PARENB, TARGET_PARENB, PARENB, PARENB },
5923 { TARGET_PARODD, TARGET_PARODD, PARODD, PARODD },
5924 { TARGET_HUPCL, TARGET_HUPCL, HUPCL, HUPCL },
5925 { TARGET_CLOCAL, TARGET_CLOCAL, CLOCAL, CLOCAL },
5926 { TARGET_CRTSCTS, TARGET_CRTSCTS, CRTSCTS, CRTSCTS },
5927 { 0, 0, 0, 0 }
5930 static const bitmask_transtbl lflag_tbl[] = {
5931 { TARGET_ISIG, TARGET_ISIG, ISIG, ISIG },
5932 { TARGET_ICANON, TARGET_ICANON, ICANON, ICANON },
5933 { TARGET_XCASE, TARGET_XCASE, XCASE, XCASE },
5934 { TARGET_ECHO, TARGET_ECHO, ECHO, ECHO },
5935 { TARGET_ECHOE, TARGET_ECHOE, ECHOE, ECHOE },
5936 { TARGET_ECHOK, TARGET_ECHOK, ECHOK, ECHOK },
5937 { TARGET_ECHONL, TARGET_ECHONL, ECHONL, ECHONL },
5938 { TARGET_NOFLSH, TARGET_NOFLSH, NOFLSH, NOFLSH },
5939 { TARGET_TOSTOP, TARGET_TOSTOP, TOSTOP, TOSTOP },
5940 { TARGET_ECHOCTL, TARGET_ECHOCTL, ECHOCTL, ECHOCTL },
5941 { TARGET_ECHOPRT, TARGET_ECHOPRT, ECHOPRT, ECHOPRT },
5942 { TARGET_ECHOKE, TARGET_ECHOKE, ECHOKE, ECHOKE },
5943 { TARGET_FLUSHO, TARGET_FLUSHO, FLUSHO, FLUSHO },
5944 { TARGET_PENDIN, TARGET_PENDIN, PENDIN, PENDIN },
5945 { TARGET_IEXTEN, TARGET_IEXTEN, IEXTEN, IEXTEN },
5946 { 0, 0, 0, 0 }
5949 static void target_to_host_termios (void *dst, const void *src)
5951 struct host_termios *host = dst;
5952 const struct target_termios *target = src;
5954 host->c_iflag =
5955 target_to_host_bitmask(tswap32(target->c_iflag), iflag_tbl);
5956 host->c_oflag =
5957 target_to_host_bitmask(tswap32(target->c_oflag), oflag_tbl);
5958 host->c_cflag =
5959 target_to_host_bitmask(tswap32(target->c_cflag), cflag_tbl);
5960 host->c_lflag =
5961 target_to_host_bitmask(tswap32(target->c_lflag), lflag_tbl);
5962 host->c_line = target->c_line;
5964 memset(host->c_cc, 0, sizeof(host->c_cc));
5965 host->c_cc[VINTR] = target->c_cc[TARGET_VINTR];
5966 host->c_cc[VQUIT] = target->c_cc[TARGET_VQUIT];
5967 host->c_cc[VERASE] = target->c_cc[TARGET_VERASE];
5968 host->c_cc[VKILL] = target->c_cc[TARGET_VKILL];
5969 host->c_cc[VEOF] = target->c_cc[TARGET_VEOF];
5970 host->c_cc[VTIME] = target->c_cc[TARGET_VTIME];
5971 host->c_cc[VMIN] = target->c_cc[TARGET_VMIN];
5972 host->c_cc[VSWTC] = target->c_cc[TARGET_VSWTC];
5973 host->c_cc[VSTART] = target->c_cc[TARGET_VSTART];
5974 host->c_cc[VSTOP] = target->c_cc[TARGET_VSTOP];
5975 host->c_cc[VSUSP] = target->c_cc[TARGET_VSUSP];
5976 host->c_cc[VEOL] = target->c_cc[TARGET_VEOL];
5977 host->c_cc[VREPRINT] = target->c_cc[TARGET_VREPRINT];
5978 host->c_cc[VDISCARD] = target->c_cc[TARGET_VDISCARD];
5979 host->c_cc[VWERASE] = target->c_cc[TARGET_VWERASE];
5980 host->c_cc[VLNEXT] = target->c_cc[TARGET_VLNEXT];
5981 host->c_cc[VEOL2] = target->c_cc[TARGET_VEOL2];
5984 static void host_to_target_termios (void *dst, const void *src)
5986 struct target_termios *target = dst;
5987 const struct host_termios *host = src;
5989 target->c_iflag =
5990 tswap32(host_to_target_bitmask(host->c_iflag, iflag_tbl));
5991 target->c_oflag =
5992 tswap32(host_to_target_bitmask(host->c_oflag, oflag_tbl));
5993 target->c_cflag =
5994 tswap32(host_to_target_bitmask(host->c_cflag, cflag_tbl));
5995 target->c_lflag =
5996 tswap32(host_to_target_bitmask(host->c_lflag, lflag_tbl));
5997 target->c_line = host->c_line;
5999 memset(target->c_cc, 0, sizeof(target->c_cc));
6000 target->c_cc[TARGET_VINTR] = host->c_cc[VINTR];
6001 target->c_cc[TARGET_VQUIT] = host->c_cc[VQUIT];
6002 target->c_cc[TARGET_VERASE] = host->c_cc[VERASE];
6003 target->c_cc[TARGET_VKILL] = host->c_cc[VKILL];
6004 target->c_cc[TARGET_VEOF] = host->c_cc[VEOF];
6005 target->c_cc[TARGET_VTIME] = host->c_cc[VTIME];
6006 target->c_cc[TARGET_VMIN] = host->c_cc[VMIN];
6007 target->c_cc[TARGET_VSWTC] = host->c_cc[VSWTC];
6008 target->c_cc[TARGET_VSTART] = host->c_cc[VSTART];
6009 target->c_cc[TARGET_VSTOP] = host->c_cc[VSTOP];
6010 target->c_cc[TARGET_VSUSP] = host->c_cc[VSUSP];
6011 target->c_cc[TARGET_VEOL] = host->c_cc[VEOL];
6012 target->c_cc[TARGET_VREPRINT] = host->c_cc[VREPRINT];
6013 target->c_cc[TARGET_VDISCARD] = host->c_cc[VDISCARD];
6014 target->c_cc[TARGET_VWERASE] = host->c_cc[VWERASE];
6015 target->c_cc[TARGET_VLNEXT] = host->c_cc[VLNEXT];
6016 target->c_cc[TARGET_VEOL2] = host->c_cc[VEOL2];
6019 static const StructEntry struct_termios_def = {
6020 .convert = { host_to_target_termios, target_to_host_termios },
6021 .size = { sizeof(struct target_termios), sizeof(struct host_termios) },
6022 .align = { __alignof__(struct target_termios), __alignof__(struct host_termios) },
6025 static bitmask_transtbl mmap_flags_tbl[] = {
6026 { TARGET_MAP_SHARED, TARGET_MAP_SHARED, MAP_SHARED, MAP_SHARED },
6027 { TARGET_MAP_PRIVATE, TARGET_MAP_PRIVATE, MAP_PRIVATE, MAP_PRIVATE },
6028 { TARGET_MAP_FIXED, TARGET_MAP_FIXED, MAP_FIXED, MAP_FIXED },
6029 { TARGET_MAP_ANONYMOUS, TARGET_MAP_ANONYMOUS,
6030 MAP_ANONYMOUS, MAP_ANONYMOUS },
6031 { TARGET_MAP_GROWSDOWN, TARGET_MAP_GROWSDOWN,
6032 MAP_GROWSDOWN, MAP_GROWSDOWN },
6033 { TARGET_MAP_DENYWRITE, TARGET_MAP_DENYWRITE,
6034 MAP_DENYWRITE, MAP_DENYWRITE },
6035 { TARGET_MAP_EXECUTABLE, TARGET_MAP_EXECUTABLE,
6036 MAP_EXECUTABLE, MAP_EXECUTABLE },
6037 { TARGET_MAP_LOCKED, TARGET_MAP_LOCKED, MAP_LOCKED, MAP_LOCKED },
6038 { TARGET_MAP_NORESERVE, TARGET_MAP_NORESERVE,
6039 MAP_NORESERVE, MAP_NORESERVE },
6040 { TARGET_MAP_HUGETLB, TARGET_MAP_HUGETLB, MAP_HUGETLB, MAP_HUGETLB },
6041 /* MAP_STACK had been ignored by the kernel for quite some time.
6042 Recognize it for the target insofar as we do not want to pass
6043 it through to the host. */
6044 { TARGET_MAP_STACK, TARGET_MAP_STACK, 0, 0 },
6045 { 0, 0, 0, 0 }
6048 #if defined(TARGET_I386)
6050 /* NOTE: there is really one LDT for all the threads */
6051 static uint8_t *ldt_table;
6053 static abi_long read_ldt(abi_ulong ptr, unsigned long bytecount)
6055 int size;
6056 void *p;
6058 if (!ldt_table)
6059 return 0;
6060 size = TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE;
6061 if (size > bytecount)
6062 size = bytecount;
6063 p = lock_user(VERIFY_WRITE, ptr, size, 0);
6064 if (!p)
6065 return -TARGET_EFAULT;
6066 /* ??? Should this by byteswapped? */
6067 memcpy(p, ldt_table, size);
6068 unlock_user(p, ptr, size);
6069 return size;
6072 /* XXX: add locking support */
6073 static abi_long write_ldt(CPUX86State *env,
6074 abi_ulong ptr, unsigned long bytecount, int oldmode)
6076 struct target_modify_ldt_ldt_s ldt_info;
6077 struct target_modify_ldt_ldt_s *target_ldt_info;
6078 int seg_32bit, contents, read_exec_only, limit_in_pages;
6079 int seg_not_present, useable, lm;
6080 uint32_t *lp, entry_1, entry_2;
6082 if (bytecount != sizeof(ldt_info))
6083 return -TARGET_EINVAL;
6084 if (!lock_user_struct(VERIFY_READ, target_ldt_info, ptr, 1))
6085 return -TARGET_EFAULT;
6086 ldt_info.entry_number = tswap32(target_ldt_info->entry_number);
6087 ldt_info.base_addr = tswapal(target_ldt_info->base_addr);
6088 ldt_info.limit = tswap32(target_ldt_info->limit);
6089 ldt_info.flags = tswap32(target_ldt_info->flags);
6090 unlock_user_struct(target_ldt_info, ptr, 0);
6092 if (ldt_info.entry_number >= TARGET_LDT_ENTRIES)
6093 return -TARGET_EINVAL;
6094 seg_32bit = ldt_info.flags & 1;
6095 contents = (ldt_info.flags >> 1) & 3;
6096 read_exec_only = (ldt_info.flags >> 3) & 1;
6097 limit_in_pages = (ldt_info.flags >> 4) & 1;
6098 seg_not_present = (ldt_info.flags >> 5) & 1;
6099 useable = (ldt_info.flags >> 6) & 1;
6100 #ifdef TARGET_ABI32
6101 lm = 0;
6102 #else
6103 lm = (ldt_info.flags >> 7) & 1;
6104 #endif
6105 if (contents == 3) {
6106 if (oldmode)
6107 return -TARGET_EINVAL;
6108 if (seg_not_present == 0)
6109 return -TARGET_EINVAL;
6111 /* allocate the LDT */
6112 if (!ldt_table) {
6113 env->ldt.base = target_mmap(0,
6114 TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE,
6115 PROT_READ|PROT_WRITE,
6116 MAP_ANONYMOUS|MAP_PRIVATE, -1, 0);
6117 if (env->ldt.base == -1)
6118 return -TARGET_ENOMEM;
6119 memset(g2h(env->ldt.base), 0,
6120 TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE);
6121 env->ldt.limit = 0xffff;
6122 ldt_table = g2h(env->ldt.base);
6125 /* NOTE: same code as Linux kernel */
6126 /* Allow LDTs to be cleared by the user. */
6127 if (ldt_info.base_addr == 0 && ldt_info.limit == 0) {
6128 if (oldmode ||
6129 (contents == 0 &&
6130 read_exec_only == 1 &&
6131 seg_32bit == 0 &&
6132 limit_in_pages == 0 &&
6133 seg_not_present == 1 &&
6134 useable == 0 )) {
6135 entry_1 = 0;
6136 entry_2 = 0;
6137 goto install;
6141 entry_1 = ((ldt_info.base_addr & 0x0000ffff) << 16) |
6142 (ldt_info.limit & 0x0ffff);
6143 entry_2 = (ldt_info.base_addr & 0xff000000) |
6144 ((ldt_info.base_addr & 0x00ff0000) >> 16) |
6145 (ldt_info.limit & 0xf0000) |
6146 ((read_exec_only ^ 1) << 9) |
6147 (contents << 10) |
6148 ((seg_not_present ^ 1) << 15) |
6149 (seg_32bit << 22) |
6150 (limit_in_pages << 23) |
6151 (lm << 21) |
6152 0x7000;
6153 if (!oldmode)
6154 entry_2 |= (useable << 20);
6156 /* Install the new entry ... */
6157 install:
6158 lp = (uint32_t *)(ldt_table + (ldt_info.entry_number << 3));
6159 lp[0] = tswap32(entry_1);
6160 lp[1] = tswap32(entry_2);
6161 return 0;
6164 /* specific and weird i386 syscalls */
6165 static abi_long do_modify_ldt(CPUX86State *env, int func, abi_ulong ptr,
6166 unsigned long bytecount)
6168 abi_long ret;
6170 switch (func) {
6171 case 0:
6172 ret = read_ldt(ptr, bytecount);
6173 break;
6174 case 1:
6175 ret = write_ldt(env, ptr, bytecount, 1);
6176 break;
6177 case 0x11:
6178 ret = write_ldt(env, ptr, bytecount, 0);
6179 break;
6180 default:
6181 ret = -TARGET_ENOSYS;
6182 break;
6184 return ret;
6187 #if defined(TARGET_I386) && defined(TARGET_ABI32)
6188 abi_long do_set_thread_area(CPUX86State *env, abi_ulong ptr)
6190 uint64_t *gdt_table = g2h(env->gdt.base);
6191 struct target_modify_ldt_ldt_s ldt_info;
6192 struct target_modify_ldt_ldt_s *target_ldt_info;
6193 int seg_32bit, contents, read_exec_only, limit_in_pages;
6194 int seg_not_present, useable, lm;
6195 uint32_t *lp, entry_1, entry_2;
6196 int i;
6198 lock_user_struct(VERIFY_WRITE, target_ldt_info, ptr, 1);
6199 if (!target_ldt_info)
6200 return -TARGET_EFAULT;
6201 ldt_info.entry_number = tswap32(target_ldt_info->entry_number);
6202 ldt_info.base_addr = tswapal(target_ldt_info->base_addr);
6203 ldt_info.limit = tswap32(target_ldt_info->limit);
6204 ldt_info.flags = tswap32(target_ldt_info->flags);
6205 if (ldt_info.entry_number == -1) {
6206 for (i=TARGET_GDT_ENTRY_TLS_MIN; i<=TARGET_GDT_ENTRY_TLS_MAX; i++) {
6207 if (gdt_table[i] == 0) {
6208 ldt_info.entry_number = i;
6209 target_ldt_info->entry_number = tswap32(i);
6210 break;
6214 unlock_user_struct(target_ldt_info, ptr, 1);
6216 if (ldt_info.entry_number < TARGET_GDT_ENTRY_TLS_MIN ||
6217 ldt_info.entry_number > TARGET_GDT_ENTRY_TLS_MAX)
6218 return -TARGET_EINVAL;
6219 seg_32bit = ldt_info.flags & 1;
6220 contents = (ldt_info.flags >> 1) & 3;
6221 read_exec_only = (ldt_info.flags >> 3) & 1;
6222 limit_in_pages = (ldt_info.flags >> 4) & 1;
6223 seg_not_present = (ldt_info.flags >> 5) & 1;
6224 useable = (ldt_info.flags >> 6) & 1;
6225 #ifdef TARGET_ABI32
6226 lm = 0;
6227 #else
6228 lm = (ldt_info.flags >> 7) & 1;
6229 #endif
6231 if (contents == 3) {
6232 if (seg_not_present == 0)
6233 return -TARGET_EINVAL;
6236 /* NOTE: same code as Linux kernel */
6237 /* Allow LDTs to be cleared by the user. */
6238 if (ldt_info.base_addr == 0 && ldt_info.limit == 0) {
6239 if ((contents == 0 &&
6240 read_exec_only == 1 &&
6241 seg_32bit == 0 &&
6242 limit_in_pages == 0 &&
6243 seg_not_present == 1 &&
6244 useable == 0 )) {
6245 entry_1 = 0;
6246 entry_2 = 0;
6247 goto install;
6251 entry_1 = ((ldt_info.base_addr & 0x0000ffff) << 16) |
6252 (ldt_info.limit & 0x0ffff);
6253 entry_2 = (ldt_info.base_addr & 0xff000000) |
6254 ((ldt_info.base_addr & 0x00ff0000) >> 16) |
6255 (ldt_info.limit & 0xf0000) |
6256 ((read_exec_only ^ 1) << 9) |
6257 (contents << 10) |
6258 ((seg_not_present ^ 1) << 15) |
6259 (seg_32bit << 22) |
6260 (limit_in_pages << 23) |
6261 (useable << 20) |
6262 (lm << 21) |
6263 0x7000;
6265 /* Install the new entry ... */
6266 install:
6267 lp = (uint32_t *)(gdt_table + ldt_info.entry_number);
6268 lp[0] = tswap32(entry_1);
6269 lp[1] = tswap32(entry_2);
6270 return 0;
6273 static abi_long do_get_thread_area(CPUX86State *env, abi_ulong ptr)
6275 struct target_modify_ldt_ldt_s *target_ldt_info;
6276 uint64_t *gdt_table = g2h(env->gdt.base);
6277 uint32_t base_addr, limit, flags;
6278 int seg_32bit, contents, read_exec_only, limit_in_pages, idx;
6279 int seg_not_present, useable, lm;
6280 uint32_t *lp, entry_1, entry_2;
6282 lock_user_struct(VERIFY_WRITE, target_ldt_info, ptr, 1);
6283 if (!target_ldt_info)
6284 return -TARGET_EFAULT;
6285 idx = tswap32(target_ldt_info->entry_number);
6286 if (idx < TARGET_GDT_ENTRY_TLS_MIN ||
6287 idx > TARGET_GDT_ENTRY_TLS_MAX) {
6288 unlock_user_struct(target_ldt_info, ptr, 1);
6289 return -TARGET_EINVAL;
6291 lp = (uint32_t *)(gdt_table + idx);
6292 entry_1 = tswap32(lp[0]);
6293 entry_2 = tswap32(lp[1]);
6295 read_exec_only = ((entry_2 >> 9) & 1) ^ 1;
6296 contents = (entry_2 >> 10) & 3;
6297 seg_not_present = ((entry_2 >> 15) & 1) ^ 1;
6298 seg_32bit = (entry_2 >> 22) & 1;
6299 limit_in_pages = (entry_2 >> 23) & 1;
6300 useable = (entry_2 >> 20) & 1;
6301 #ifdef TARGET_ABI32
6302 lm = 0;
6303 #else
6304 lm = (entry_2 >> 21) & 1;
6305 #endif
6306 flags = (seg_32bit << 0) | (contents << 1) |
6307 (read_exec_only << 3) | (limit_in_pages << 4) |
6308 (seg_not_present << 5) | (useable << 6) | (lm << 7);
6309 limit = (entry_1 & 0xffff) | (entry_2 & 0xf0000);
6310 base_addr = (entry_1 >> 16) |
6311 (entry_2 & 0xff000000) |
6312 ((entry_2 & 0xff) << 16);
6313 target_ldt_info->base_addr = tswapal(base_addr);
6314 target_ldt_info->limit = tswap32(limit);
6315 target_ldt_info->flags = tswap32(flags);
6316 unlock_user_struct(target_ldt_info, ptr, 1);
6317 return 0;
6319 #endif /* TARGET_I386 && TARGET_ABI32 */
6321 #ifndef TARGET_ABI32
6322 abi_long do_arch_prctl(CPUX86State *env, int code, abi_ulong addr)
6324 abi_long ret = 0;
6325 abi_ulong val;
6326 int idx;
6328 switch(code) {
6329 case TARGET_ARCH_SET_GS:
6330 case TARGET_ARCH_SET_FS:
6331 if (code == TARGET_ARCH_SET_GS)
6332 idx = R_GS;
6333 else
6334 idx = R_FS;
6335 cpu_x86_load_seg(env, idx, 0);
6336 env->segs[idx].base = addr;
6337 break;
6338 case TARGET_ARCH_GET_GS:
6339 case TARGET_ARCH_GET_FS:
6340 if (code == TARGET_ARCH_GET_GS)
6341 idx = R_GS;
6342 else
6343 idx = R_FS;
6344 val = env->segs[idx].base;
6345 if (put_user(val, addr, abi_ulong))
6346 ret = -TARGET_EFAULT;
6347 break;
6348 default:
6349 ret = -TARGET_EINVAL;
6350 break;
6352 return ret;
6354 #endif
6356 #endif /* defined(TARGET_I386) */
6358 #define NEW_STACK_SIZE 0x40000
6361 static pthread_mutex_t clone_lock = PTHREAD_MUTEX_INITIALIZER;
6362 typedef struct {
6363 CPUArchState *env;
6364 pthread_mutex_t mutex;
6365 pthread_cond_t cond;
6366 pthread_t thread;
6367 uint32_t tid;
6368 abi_ulong child_tidptr;
6369 abi_ulong parent_tidptr;
6370 sigset_t sigmask;
6371 } new_thread_info;
6373 static void *clone_func(void *arg)
6375 new_thread_info *info = arg;
6376 CPUArchState *env;
6377 CPUState *cpu;
6378 TaskState *ts;
6380 rcu_register_thread();
6381 tcg_register_thread();
6382 env = info->env;
6383 cpu = ENV_GET_CPU(env);
6384 thread_cpu = cpu;
6385 ts = (TaskState *)cpu->opaque;
6386 info->tid = gettid();
6387 task_settid(ts);
6388 if (info->child_tidptr)
6389 put_user_u32(info->tid, info->child_tidptr);
6390 if (info->parent_tidptr)
6391 put_user_u32(info->tid, info->parent_tidptr);
6392 /* Enable signals. */
6393 sigprocmask(SIG_SETMASK, &info->sigmask, NULL);
6394 /* Signal to the parent that we're ready. */
6395 pthread_mutex_lock(&info->mutex);
6396 pthread_cond_broadcast(&info->cond);
6397 pthread_mutex_unlock(&info->mutex);
6398 /* Wait until the parent has finished initializing the tls state. */
6399 pthread_mutex_lock(&clone_lock);
6400 pthread_mutex_unlock(&clone_lock);
6401 cpu_loop(env);
6402 /* never exits */
6403 return NULL;
6406 /* do_fork() Must return host values and target errnos (unlike most
6407 do_*() functions). */
6408 static int do_fork(CPUArchState *env, unsigned int flags, abi_ulong newsp,
6409 abi_ulong parent_tidptr, target_ulong newtls,
6410 abi_ulong child_tidptr)
6412 CPUState *cpu = ENV_GET_CPU(env);
6413 int ret;
6414 TaskState *ts;
6415 CPUState *new_cpu;
6416 CPUArchState *new_env;
6417 sigset_t sigmask;
6419 flags &= ~CLONE_IGNORED_FLAGS;
6421 /* Emulate vfork() with fork() */
6422 if (flags & CLONE_VFORK)
6423 flags &= ~(CLONE_VFORK | CLONE_VM);
6425 if (flags & CLONE_VM) {
6426 TaskState *parent_ts = (TaskState *)cpu->opaque;
6427 new_thread_info info;
6428 pthread_attr_t attr;
6430 if (((flags & CLONE_THREAD_FLAGS) != CLONE_THREAD_FLAGS) ||
6431 (flags & CLONE_INVALID_THREAD_FLAGS)) {
6432 return -TARGET_EINVAL;
6435 ts = g_new0(TaskState, 1);
6436 init_task_state(ts);
6438 /* Grab a mutex so that thread setup appears atomic. */
6439 pthread_mutex_lock(&clone_lock);
6441 /* we create a new CPU instance. */
6442 new_env = cpu_copy(env);
6443 /* Init regs that differ from the parent. */
6444 cpu_clone_regs(new_env, newsp);
6445 new_cpu = ENV_GET_CPU(new_env);
6446 new_cpu->opaque = ts;
6447 ts->bprm = parent_ts->bprm;
6448 ts->info = parent_ts->info;
6449 ts->signal_mask = parent_ts->signal_mask;
6451 if (flags & CLONE_CHILD_CLEARTID) {
6452 ts->child_tidptr = child_tidptr;
6455 if (flags & CLONE_SETTLS) {
6456 cpu_set_tls (new_env, newtls);
6459 memset(&info, 0, sizeof(info));
6460 pthread_mutex_init(&info.mutex, NULL);
6461 pthread_mutex_lock(&info.mutex);
6462 pthread_cond_init(&info.cond, NULL);
6463 info.env = new_env;
6464 if (flags & CLONE_CHILD_SETTID) {
6465 info.child_tidptr = child_tidptr;
6467 if (flags & CLONE_PARENT_SETTID) {
6468 info.parent_tidptr = parent_tidptr;
6471 ret = pthread_attr_init(&attr);
6472 ret = pthread_attr_setstacksize(&attr, NEW_STACK_SIZE);
6473 ret = pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED);
6474 /* It is not safe to deliver signals until the child has finished
6475 initializing, so temporarily block all signals. */
6476 sigfillset(&sigmask);
6477 sigprocmask(SIG_BLOCK, &sigmask, &info.sigmask);
6479 /* If this is our first additional thread, we need to ensure we
6480 * generate code for parallel execution and flush old translations.
6482 if (!parallel_cpus) {
6483 parallel_cpus = true;
6484 tb_flush(cpu);
6487 ret = pthread_create(&info.thread, &attr, clone_func, &info);
6488 /* TODO: Free new CPU state if thread creation failed. */
6490 sigprocmask(SIG_SETMASK, &info.sigmask, NULL);
6491 pthread_attr_destroy(&attr);
6492 if (ret == 0) {
6493 /* Wait for the child to initialize. */
6494 pthread_cond_wait(&info.cond, &info.mutex);
6495 ret = info.tid;
6496 } else {
6497 ret = -1;
6499 pthread_mutex_unlock(&info.mutex);
6500 pthread_cond_destroy(&info.cond);
6501 pthread_mutex_destroy(&info.mutex);
6502 pthread_mutex_unlock(&clone_lock);
6503 } else {
6504 /* if no CLONE_VM, we consider it is a fork */
6505 if (flags & CLONE_INVALID_FORK_FLAGS) {
6506 return -TARGET_EINVAL;
6509 /* We can't support custom termination signals */
6510 if ((flags & CSIGNAL) != TARGET_SIGCHLD) {
6511 return -TARGET_EINVAL;
6514 if (block_signals()) {
6515 return -TARGET_ERESTARTSYS;
6518 fork_start();
6519 ret = fork();
6520 if (ret == 0) {
6521 /* Child Process. */
6522 cpu_clone_regs(env, newsp);
6523 fork_end(1);
6524 /* There is a race condition here. The parent process could
6525 theoretically read the TID in the child process before the child
6526 tid is set. This would require using either ptrace
6527 (not implemented) or having *_tidptr to point at a shared memory
6528 mapping. We can't repeat the spinlock hack used above because
6529 the child process gets its own copy of the lock. */
6530 if (flags & CLONE_CHILD_SETTID)
6531 put_user_u32(gettid(), child_tidptr);
6532 if (flags & CLONE_PARENT_SETTID)
6533 put_user_u32(gettid(), parent_tidptr);
6534 ts = (TaskState *)cpu->opaque;
6535 if (flags & CLONE_SETTLS)
6536 cpu_set_tls (env, newtls);
6537 if (flags & CLONE_CHILD_CLEARTID)
6538 ts->child_tidptr = child_tidptr;
6539 } else {
6540 fork_end(0);
6543 return ret;
6546 /* warning : doesn't handle linux specific flags... */
6547 static int target_to_host_fcntl_cmd(int cmd)
6549 switch(cmd) {
6550 case TARGET_F_DUPFD:
6551 case TARGET_F_GETFD:
6552 case TARGET_F_SETFD:
6553 case TARGET_F_GETFL:
6554 case TARGET_F_SETFL:
6555 return cmd;
6556 case TARGET_F_GETLK:
6557 return F_GETLK64;
6558 case TARGET_F_SETLK:
6559 return F_SETLK64;
6560 case TARGET_F_SETLKW:
6561 return F_SETLKW64;
6562 case TARGET_F_GETOWN:
6563 return F_GETOWN;
6564 case TARGET_F_SETOWN:
6565 return F_SETOWN;
6566 case TARGET_F_GETSIG:
6567 return F_GETSIG;
6568 case TARGET_F_SETSIG:
6569 return F_SETSIG;
6570 #if TARGET_ABI_BITS == 32
6571 case TARGET_F_GETLK64:
6572 return F_GETLK64;
6573 case TARGET_F_SETLK64:
6574 return F_SETLK64;
6575 case TARGET_F_SETLKW64:
6576 return F_SETLKW64;
6577 #endif
6578 case TARGET_F_SETLEASE:
6579 return F_SETLEASE;
6580 case TARGET_F_GETLEASE:
6581 return F_GETLEASE;
6582 #ifdef F_DUPFD_CLOEXEC
6583 case TARGET_F_DUPFD_CLOEXEC:
6584 return F_DUPFD_CLOEXEC;
6585 #endif
6586 case TARGET_F_NOTIFY:
6587 return F_NOTIFY;
6588 #ifdef F_GETOWN_EX
6589 case TARGET_F_GETOWN_EX:
6590 return F_GETOWN_EX;
6591 #endif
6592 #ifdef F_SETOWN_EX
6593 case TARGET_F_SETOWN_EX:
6594 return F_SETOWN_EX;
6595 #endif
6596 #ifdef F_SETPIPE_SZ
6597 case TARGET_F_SETPIPE_SZ:
6598 return F_SETPIPE_SZ;
6599 case TARGET_F_GETPIPE_SZ:
6600 return F_GETPIPE_SZ;
6601 #endif
6602 default:
6603 return -TARGET_EINVAL;
6605 return -TARGET_EINVAL;
6608 #define FLOCK_TRANSTBL \
6609 switch (type) { \
6610 TRANSTBL_CONVERT(F_RDLCK); \
6611 TRANSTBL_CONVERT(F_WRLCK); \
6612 TRANSTBL_CONVERT(F_UNLCK); \
6613 TRANSTBL_CONVERT(F_EXLCK); \
6614 TRANSTBL_CONVERT(F_SHLCK); \
6617 static int target_to_host_flock(int type)
6619 #define TRANSTBL_CONVERT(a) case TARGET_##a: return a
6620 FLOCK_TRANSTBL
6621 #undef TRANSTBL_CONVERT
6622 return -TARGET_EINVAL;
6625 static int host_to_target_flock(int type)
6627 #define TRANSTBL_CONVERT(a) case a: return TARGET_##a
6628 FLOCK_TRANSTBL
6629 #undef TRANSTBL_CONVERT
6630 /* if we don't know how to convert the value coming
6631 * from the host we copy to the target field as-is
6633 return type;
6636 static inline abi_long copy_from_user_flock(struct flock64 *fl,
6637 abi_ulong target_flock_addr)
6639 struct target_flock *target_fl;
6640 int l_type;
6642 if (!lock_user_struct(VERIFY_READ, target_fl, target_flock_addr, 1)) {
6643 return -TARGET_EFAULT;
6646 __get_user(l_type, &target_fl->l_type);
6647 l_type = target_to_host_flock(l_type);
6648 if (l_type < 0) {
6649 return l_type;
6651 fl->l_type = l_type;
6652 __get_user(fl->l_whence, &target_fl->l_whence);
6653 __get_user(fl->l_start, &target_fl->l_start);
6654 __get_user(fl->l_len, &target_fl->l_len);
6655 __get_user(fl->l_pid, &target_fl->l_pid);
6656 unlock_user_struct(target_fl, target_flock_addr, 0);
6657 return 0;
6660 static inline abi_long copy_to_user_flock(abi_ulong target_flock_addr,
6661 const struct flock64 *fl)
6663 struct target_flock *target_fl;
6664 short l_type;
6666 if (!lock_user_struct(VERIFY_WRITE, target_fl, target_flock_addr, 0)) {
6667 return -TARGET_EFAULT;
6670 l_type = host_to_target_flock(fl->l_type);
6671 __put_user(l_type, &target_fl->l_type);
6672 __put_user(fl->l_whence, &target_fl->l_whence);
6673 __put_user(fl->l_start, &target_fl->l_start);
6674 __put_user(fl->l_len, &target_fl->l_len);
6675 __put_user(fl->l_pid, &target_fl->l_pid);
6676 unlock_user_struct(target_fl, target_flock_addr, 1);
6677 return 0;
6680 typedef abi_long from_flock64_fn(struct flock64 *fl, abi_ulong target_addr);
6681 typedef abi_long to_flock64_fn(abi_ulong target_addr, const struct flock64 *fl);
6683 #if defined(TARGET_ARM) && TARGET_ABI_BITS == 32
6684 static inline abi_long copy_from_user_oabi_flock64(struct flock64 *fl,
6685 abi_ulong target_flock_addr)
6687 struct target_oabi_flock64 *target_fl;
6688 int l_type;
6690 if (!lock_user_struct(VERIFY_READ, target_fl, target_flock_addr, 1)) {
6691 return -TARGET_EFAULT;
6694 __get_user(l_type, &target_fl->l_type);
6695 l_type = target_to_host_flock(l_type);
6696 if (l_type < 0) {
6697 return l_type;
6699 fl->l_type = l_type;
6700 __get_user(fl->l_whence, &target_fl->l_whence);
6701 __get_user(fl->l_start, &target_fl->l_start);
6702 __get_user(fl->l_len, &target_fl->l_len);
6703 __get_user(fl->l_pid, &target_fl->l_pid);
6704 unlock_user_struct(target_fl, target_flock_addr, 0);
6705 return 0;
6708 static inline abi_long copy_to_user_oabi_flock64(abi_ulong target_flock_addr,
6709 const struct flock64 *fl)
6711 struct target_oabi_flock64 *target_fl;
6712 short l_type;
6714 if (!lock_user_struct(VERIFY_WRITE, target_fl, target_flock_addr, 0)) {
6715 return -TARGET_EFAULT;
6718 l_type = host_to_target_flock(fl->l_type);
6719 __put_user(l_type, &target_fl->l_type);
6720 __put_user(fl->l_whence, &target_fl->l_whence);
6721 __put_user(fl->l_start, &target_fl->l_start);
6722 __put_user(fl->l_len, &target_fl->l_len);
6723 __put_user(fl->l_pid, &target_fl->l_pid);
6724 unlock_user_struct(target_fl, target_flock_addr, 1);
6725 return 0;
6727 #endif
6729 static inline abi_long copy_from_user_flock64(struct flock64 *fl,
6730 abi_ulong target_flock_addr)
6732 struct target_flock64 *target_fl;
6733 int l_type;
6735 if (!lock_user_struct(VERIFY_READ, target_fl, target_flock_addr, 1)) {
6736 return -TARGET_EFAULT;
6739 __get_user(l_type, &target_fl->l_type);
6740 l_type = target_to_host_flock(l_type);
6741 if (l_type < 0) {
6742 return l_type;
6744 fl->l_type = l_type;
6745 __get_user(fl->l_whence, &target_fl->l_whence);
6746 __get_user(fl->l_start, &target_fl->l_start);
6747 __get_user(fl->l_len, &target_fl->l_len);
6748 __get_user(fl->l_pid, &target_fl->l_pid);
6749 unlock_user_struct(target_fl, target_flock_addr, 0);
6750 return 0;
6753 static inline abi_long copy_to_user_flock64(abi_ulong target_flock_addr,
6754 const struct flock64 *fl)
6756 struct target_flock64 *target_fl;
6757 short l_type;
6759 if (!lock_user_struct(VERIFY_WRITE, target_fl, target_flock_addr, 0)) {
6760 return -TARGET_EFAULT;
6763 l_type = host_to_target_flock(fl->l_type);
6764 __put_user(l_type, &target_fl->l_type);
6765 __put_user(fl->l_whence, &target_fl->l_whence);
6766 __put_user(fl->l_start, &target_fl->l_start);
6767 __put_user(fl->l_len, &target_fl->l_len);
6768 __put_user(fl->l_pid, &target_fl->l_pid);
6769 unlock_user_struct(target_fl, target_flock_addr, 1);
6770 return 0;
6773 static abi_long do_fcntl(int fd, int cmd, abi_ulong arg)
6775 struct flock64 fl64;
6776 #ifdef F_GETOWN_EX
6777 struct f_owner_ex fox;
6778 struct target_f_owner_ex *target_fox;
6779 #endif
6780 abi_long ret;
6781 int host_cmd = target_to_host_fcntl_cmd(cmd);
6783 if (host_cmd == -TARGET_EINVAL)
6784 return host_cmd;
6786 switch(cmd) {
6787 case TARGET_F_GETLK:
6788 ret = copy_from_user_flock(&fl64, arg);
6789 if (ret) {
6790 return ret;
6792 ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
6793 if (ret == 0) {
6794 ret = copy_to_user_flock(arg, &fl64);
6796 break;
6798 case TARGET_F_SETLK:
6799 case TARGET_F_SETLKW:
6800 ret = copy_from_user_flock(&fl64, arg);
6801 if (ret) {
6802 return ret;
6804 ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
6805 break;
6807 case TARGET_F_GETLK64:
6808 ret = copy_from_user_flock64(&fl64, arg);
6809 if (ret) {
6810 return ret;
6812 ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
6813 if (ret == 0) {
6814 ret = copy_to_user_flock64(arg, &fl64);
6816 break;
6817 case TARGET_F_SETLK64:
6818 case TARGET_F_SETLKW64:
6819 ret = copy_from_user_flock64(&fl64, arg);
6820 if (ret) {
6821 return ret;
6823 ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
6824 break;
6826 case TARGET_F_GETFL:
6827 ret = get_errno(safe_fcntl(fd, host_cmd, arg));
6828 if (ret >= 0) {
6829 ret = host_to_target_bitmask(ret, fcntl_flags_tbl);
6831 break;
6833 case TARGET_F_SETFL:
6834 ret = get_errno(safe_fcntl(fd, host_cmd,
6835 target_to_host_bitmask(arg,
6836 fcntl_flags_tbl)));
6837 break;
6839 #ifdef F_GETOWN_EX
6840 case TARGET_F_GETOWN_EX:
6841 ret = get_errno(safe_fcntl(fd, host_cmd, &fox));
6842 if (ret >= 0) {
6843 if (!lock_user_struct(VERIFY_WRITE, target_fox, arg, 0))
6844 return -TARGET_EFAULT;
6845 target_fox->type = tswap32(fox.type);
6846 target_fox->pid = tswap32(fox.pid);
6847 unlock_user_struct(target_fox, arg, 1);
6849 break;
6850 #endif
6852 #ifdef F_SETOWN_EX
6853 case TARGET_F_SETOWN_EX:
6854 if (!lock_user_struct(VERIFY_READ, target_fox, arg, 1))
6855 return -TARGET_EFAULT;
6856 fox.type = tswap32(target_fox->type);
6857 fox.pid = tswap32(target_fox->pid);
6858 unlock_user_struct(target_fox, arg, 0);
6859 ret = get_errno(safe_fcntl(fd, host_cmd, &fox));
6860 break;
6861 #endif
6863 case TARGET_F_SETOWN:
6864 case TARGET_F_GETOWN:
6865 case TARGET_F_SETSIG:
6866 case TARGET_F_GETSIG:
6867 case TARGET_F_SETLEASE:
6868 case TARGET_F_GETLEASE:
6869 case TARGET_F_SETPIPE_SZ:
6870 case TARGET_F_GETPIPE_SZ:
6871 ret = get_errno(safe_fcntl(fd, host_cmd, arg));
6872 break;
6874 default:
6875 ret = get_errno(safe_fcntl(fd, cmd, arg));
6876 break;
6878 return ret;
6881 #ifdef USE_UID16
6883 static inline int high2lowuid(int uid)
6885 if (uid > 65535)
6886 return 65534;
6887 else
6888 return uid;
6891 static inline int high2lowgid(int gid)
6893 if (gid > 65535)
6894 return 65534;
6895 else
6896 return gid;
6899 static inline int low2highuid(int uid)
6901 if ((int16_t)uid == -1)
6902 return -1;
6903 else
6904 return uid;
6907 static inline int low2highgid(int gid)
6909 if ((int16_t)gid == -1)
6910 return -1;
6911 else
6912 return gid;
6914 static inline int tswapid(int id)
6916 return tswap16(id);
6919 #define put_user_id(x, gaddr) put_user_u16(x, gaddr)
6921 #else /* !USE_UID16 */
6922 static inline int high2lowuid(int uid)
6924 return uid;
6926 static inline int high2lowgid(int gid)
6928 return gid;
6930 static inline int low2highuid(int uid)
6932 return uid;
6934 static inline int low2highgid(int gid)
6936 return gid;
6938 static inline int tswapid(int id)
6940 return tswap32(id);
6943 #define put_user_id(x, gaddr) put_user_u32(x, gaddr)
6945 #endif /* USE_UID16 */
6947 /* We must do direct syscalls for setting UID/GID, because we want to
6948 * implement the Linux system call semantics of "change only for this thread",
6949 * not the libc/POSIX semantics of "change for all threads in process".
6950 * (See http://ewontfix.com/17/ for more details.)
6951 * We use the 32-bit version of the syscalls if present; if it is not
6952 * then either the host architecture supports 32-bit UIDs natively with
6953 * the standard syscall, or the 16-bit UID is the best we can do.
6955 #ifdef __NR_setuid32
6956 #define __NR_sys_setuid __NR_setuid32
6957 #else
6958 #define __NR_sys_setuid __NR_setuid
6959 #endif
6960 #ifdef __NR_setgid32
6961 #define __NR_sys_setgid __NR_setgid32
6962 #else
6963 #define __NR_sys_setgid __NR_setgid
6964 #endif
6965 #ifdef __NR_setresuid32
6966 #define __NR_sys_setresuid __NR_setresuid32
6967 #else
6968 #define __NR_sys_setresuid __NR_setresuid
6969 #endif
6970 #ifdef __NR_setresgid32
6971 #define __NR_sys_setresgid __NR_setresgid32
6972 #else
6973 #define __NR_sys_setresgid __NR_setresgid
6974 #endif
6976 _syscall1(int, sys_setuid, uid_t, uid)
6977 _syscall1(int, sys_setgid, gid_t, gid)
6978 _syscall3(int, sys_setresuid, uid_t, ruid, uid_t, euid, uid_t, suid)
6979 _syscall3(int, sys_setresgid, gid_t, rgid, gid_t, egid, gid_t, sgid)
6981 void syscall_init(void)
6983 IOCTLEntry *ie;
6984 const argtype *arg_type;
6985 int size;
6986 int i;
6988 thunk_init(STRUCT_MAX);
6990 #define STRUCT(name, ...) thunk_register_struct(STRUCT_ ## name, #name, struct_ ## name ## _def);
6991 #define STRUCT_SPECIAL(name) thunk_register_struct_direct(STRUCT_ ## name, #name, &struct_ ## name ## _def);
6992 #include "syscall_types.h"
6993 #undef STRUCT
6994 #undef STRUCT_SPECIAL
6996 /* Build target_to_host_errno_table[] table from
6997 * host_to_target_errno_table[]. */
6998 for (i = 0; i < ERRNO_TABLE_SIZE; i++) {
6999 target_to_host_errno_table[host_to_target_errno_table[i]] = i;
7002 /* we patch the ioctl size if necessary. We rely on the fact that
7003 no ioctl has all the bits at '1' in the size field */
7004 ie = ioctl_entries;
7005 while (ie->target_cmd != 0) {
7006 if (((ie->target_cmd >> TARGET_IOC_SIZESHIFT) & TARGET_IOC_SIZEMASK) ==
7007 TARGET_IOC_SIZEMASK) {
7008 arg_type = ie->arg_type;
7009 if (arg_type[0] != TYPE_PTR) {
7010 fprintf(stderr, "cannot patch size for ioctl 0x%x\n",
7011 ie->target_cmd);
7012 exit(1);
7014 arg_type++;
7015 size = thunk_type_size(arg_type, 0);
7016 ie->target_cmd = (ie->target_cmd &
7017 ~(TARGET_IOC_SIZEMASK << TARGET_IOC_SIZESHIFT)) |
7018 (size << TARGET_IOC_SIZESHIFT);
7021 /* automatic consistency check if same arch */
7022 #if (defined(__i386__) && defined(TARGET_I386) && defined(TARGET_ABI32)) || \
7023 (defined(__x86_64__) && defined(TARGET_X86_64))
7024 if (unlikely(ie->target_cmd != ie->host_cmd)) {
7025 fprintf(stderr, "ERROR: ioctl(%s): target=0x%x host=0x%x\n",
7026 ie->name, ie->target_cmd, ie->host_cmd);
7028 #endif
7029 ie++;
7033 #if TARGET_ABI_BITS == 32
7034 static inline uint64_t target_offset64(uint32_t word0, uint32_t word1)
7036 #ifdef TARGET_WORDS_BIGENDIAN
7037 return ((uint64_t)word0 << 32) | word1;
7038 #else
7039 return ((uint64_t)word1 << 32) | word0;
7040 #endif
7042 #else /* TARGET_ABI_BITS == 32 */
7043 static inline uint64_t target_offset64(uint64_t word0, uint64_t word1)
7045 return word0;
7047 #endif /* TARGET_ABI_BITS != 32 */
7049 #ifdef TARGET_NR_truncate64
7050 static inline abi_long target_truncate64(void *cpu_env, const char *arg1,
7051 abi_long arg2,
7052 abi_long arg3,
7053 abi_long arg4)
7055 if (regpairs_aligned(cpu_env, TARGET_NR_truncate64)) {
7056 arg2 = arg3;
7057 arg3 = arg4;
7059 return get_errno(truncate64(arg1, target_offset64(arg2, arg3)));
7061 #endif
7063 #ifdef TARGET_NR_ftruncate64
7064 static inline abi_long target_ftruncate64(void *cpu_env, abi_long arg1,
7065 abi_long arg2,
7066 abi_long arg3,
7067 abi_long arg4)
7069 if (regpairs_aligned(cpu_env, TARGET_NR_ftruncate64)) {
7070 arg2 = arg3;
7071 arg3 = arg4;
7073 return get_errno(ftruncate64(arg1, target_offset64(arg2, arg3)));
7075 #endif
7077 static inline abi_long target_to_host_timespec(struct timespec *host_ts,
7078 abi_ulong target_addr)
7080 struct target_timespec *target_ts;
7082 if (!lock_user_struct(VERIFY_READ, target_ts, target_addr, 1))
7083 return -TARGET_EFAULT;
7084 __get_user(host_ts->tv_sec, &target_ts->tv_sec);
7085 __get_user(host_ts->tv_nsec, &target_ts->tv_nsec);
7086 unlock_user_struct(target_ts, target_addr, 0);
7087 return 0;
7090 static inline abi_long host_to_target_timespec(abi_ulong target_addr,
7091 struct timespec *host_ts)
7093 struct target_timespec *target_ts;
7095 if (!lock_user_struct(VERIFY_WRITE, target_ts, target_addr, 0))
7096 return -TARGET_EFAULT;
7097 __put_user(host_ts->tv_sec, &target_ts->tv_sec);
7098 __put_user(host_ts->tv_nsec, &target_ts->tv_nsec);
7099 unlock_user_struct(target_ts, target_addr, 1);
7100 return 0;
7103 static inline abi_long target_to_host_itimerspec(struct itimerspec *host_itspec,
7104 abi_ulong target_addr)
7106 struct target_itimerspec *target_itspec;
7108 if (!lock_user_struct(VERIFY_READ, target_itspec, target_addr, 1)) {
7109 return -TARGET_EFAULT;
7112 host_itspec->it_interval.tv_sec =
7113 tswapal(target_itspec->it_interval.tv_sec);
7114 host_itspec->it_interval.tv_nsec =
7115 tswapal(target_itspec->it_interval.tv_nsec);
7116 host_itspec->it_value.tv_sec = tswapal(target_itspec->it_value.tv_sec);
7117 host_itspec->it_value.tv_nsec = tswapal(target_itspec->it_value.tv_nsec);
7119 unlock_user_struct(target_itspec, target_addr, 1);
7120 return 0;
7123 static inline abi_long host_to_target_itimerspec(abi_ulong target_addr,
7124 struct itimerspec *host_its)
7126 struct target_itimerspec *target_itspec;
7128 if (!lock_user_struct(VERIFY_WRITE, target_itspec, target_addr, 0)) {
7129 return -TARGET_EFAULT;
7132 target_itspec->it_interval.tv_sec = tswapal(host_its->it_interval.tv_sec);
7133 target_itspec->it_interval.tv_nsec = tswapal(host_its->it_interval.tv_nsec);
7135 target_itspec->it_value.tv_sec = tswapal(host_its->it_value.tv_sec);
7136 target_itspec->it_value.tv_nsec = tswapal(host_its->it_value.tv_nsec);
7138 unlock_user_struct(target_itspec, target_addr, 0);
7139 return 0;
7142 static inline abi_long target_to_host_timex(struct timex *host_tx,
7143 abi_long target_addr)
7145 struct target_timex *target_tx;
7147 if (!lock_user_struct(VERIFY_READ, target_tx, target_addr, 1)) {
7148 return -TARGET_EFAULT;
7151 __get_user(host_tx->modes, &target_tx->modes);
7152 __get_user(host_tx->offset, &target_tx->offset);
7153 __get_user(host_tx->freq, &target_tx->freq);
7154 __get_user(host_tx->maxerror, &target_tx->maxerror);
7155 __get_user(host_tx->esterror, &target_tx->esterror);
7156 __get_user(host_tx->status, &target_tx->status);
7157 __get_user(host_tx->constant, &target_tx->constant);
7158 __get_user(host_tx->precision, &target_tx->precision);
7159 __get_user(host_tx->tolerance, &target_tx->tolerance);
7160 __get_user(host_tx->time.tv_sec, &target_tx->time.tv_sec);
7161 __get_user(host_tx->time.tv_usec, &target_tx->time.tv_usec);
7162 __get_user(host_tx->tick, &target_tx->tick);
7163 __get_user(host_tx->ppsfreq, &target_tx->ppsfreq);
7164 __get_user(host_tx->jitter, &target_tx->jitter);
7165 __get_user(host_tx->shift, &target_tx->shift);
7166 __get_user(host_tx->stabil, &target_tx->stabil);
7167 __get_user(host_tx->jitcnt, &target_tx->jitcnt);
7168 __get_user(host_tx->calcnt, &target_tx->calcnt);
7169 __get_user(host_tx->errcnt, &target_tx->errcnt);
7170 __get_user(host_tx->stbcnt, &target_tx->stbcnt);
7171 __get_user(host_tx->tai, &target_tx->tai);
7173 unlock_user_struct(target_tx, target_addr, 0);
7174 return 0;
7177 static inline abi_long host_to_target_timex(abi_long target_addr,
7178 struct timex *host_tx)
7180 struct target_timex *target_tx;
7182 if (!lock_user_struct(VERIFY_WRITE, target_tx, target_addr, 0)) {
7183 return -TARGET_EFAULT;
7186 __put_user(host_tx->modes, &target_tx->modes);
7187 __put_user(host_tx->offset, &target_tx->offset);
7188 __put_user(host_tx->freq, &target_tx->freq);
7189 __put_user(host_tx->maxerror, &target_tx->maxerror);
7190 __put_user(host_tx->esterror, &target_tx->esterror);
7191 __put_user(host_tx->status, &target_tx->status);
7192 __put_user(host_tx->constant, &target_tx->constant);
7193 __put_user(host_tx->precision, &target_tx->precision);
7194 __put_user(host_tx->tolerance, &target_tx->tolerance);
7195 __put_user(host_tx->time.tv_sec, &target_tx->time.tv_sec);
7196 __put_user(host_tx->time.tv_usec, &target_tx->time.tv_usec);
7197 __put_user(host_tx->tick, &target_tx->tick);
7198 __put_user(host_tx->ppsfreq, &target_tx->ppsfreq);
7199 __put_user(host_tx->jitter, &target_tx->jitter);
7200 __put_user(host_tx->shift, &target_tx->shift);
7201 __put_user(host_tx->stabil, &target_tx->stabil);
7202 __put_user(host_tx->jitcnt, &target_tx->jitcnt);
7203 __put_user(host_tx->calcnt, &target_tx->calcnt);
7204 __put_user(host_tx->errcnt, &target_tx->errcnt);
7205 __put_user(host_tx->stbcnt, &target_tx->stbcnt);
7206 __put_user(host_tx->tai, &target_tx->tai);
7208 unlock_user_struct(target_tx, target_addr, 1);
7209 return 0;
7213 static inline abi_long target_to_host_sigevent(struct sigevent *host_sevp,
7214 abi_ulong target_addr)
7216 struct target_sigevent *target_sevp;
7218 if (!lock_user_struct(VERIFY_READ, target_sevp, target_addr, 1)) {
7219 return -TARGET_EFAULT;
7222 /* This union is awkward on 64 bit systems because it has a 32 bit
7223 * integer and a pointer in it; we follow the conversion approach
7224 * used for handling sigval types in signal.c so the guest should get
7225 * the correct value back even if we did a 64 bit byteswap and it's
7226 * using the 32 bit integer.
7228 host_sevp->sigev_value.sival_ptr =
7229 (void *)(uintptr_t)tswapal(target_sevp->sigev_value.sival_ptr);
7230 host_sevp->sigev_signo =
7231 target_to_host_signal(tswap32(target_sevp->sigev_signo));
7232 host_sevp->sigev_notify = tswap32(target_sevp->sigev_notify);
7233 host_sevp->_sigev_un._tid = tswap32(target_sevp->_sigev_un._tid);
7235 unlock_user_struct(target_sevp, target_addr, 1);
7236 return 0;
7239 #if defined(TARGET_NR_mlockall)
7240 static inline int target_to_host_mlockall_arg(int arg)
7242 int result = 0;
7244 if (arg & TARGET_MLOCKALL_MCL_CURRENT) {
7245 result |= MCL_CURRENT;
7247 if (arg & TARGET_MLOCKALL_MCL_FUTURE) {
7248 result |= MCL_FUTURE;
7250 return result;
7252 #endif
7254 static inline abi_long host_to_target_stat64(void *cpu_env,
7255 abi_ulong target_addr,
7256 struct stat *host_st)
7258 #if defined(TARGET_ARM) && defined(TARGET_ABI32)
7259 if (((CPUARMState *)cpu_env)->eabi) {
7260 struct target_eabi_stat64 *target_st;
7262 if (!lock_user_struct(VERIFY_WRITE, target_st, target_addr, 0))
7263 return -TARGET_EFAULT;
7264 memset(target_st, 0, sizeof(struct target_eabi_stat64));
7265 __put_user(host_st->st_dev, &target_st->st_dev);
7266 __put_user(host_st->st_ino, &target_st->st_ino);
7267 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
7268 __put_user(host_st->st_ino, &target_st->__st_ino);
7269 #endif
7270 __put_user(host_st->st_mode, &target_st->st_mode);
7271 __put_user(host_st->st_nlink, &target_st->st_nlink);
7272 __put_user(host_st->st_uid, &target_st->st_uid);
7273 __put_user(host_st->st_gid, &target_st->st_gid);
7274 __put_user(host_st->st_rdev, &target_st->st_rdev);
7275 __put_user(host_st->st_size, &target_st->st_size);
7276 __put_user(host_st->st_blksize, &target_st->st_blksize);
7277 __put_user(host_st->st_blocks, &target_st->st_blocks);
7278 __put_user(host_st->st_atime, &target_st->target_st_atime);
7279 __put_user(host_st->st_mtime, &target_st->target_st_mtime);
7280 __put_user(host_st->st_ctime, &target_st->target_st_ctime);
7281 unlock_user_struct(target_st, target_addr, 1);
7282 } else
7283 #endif
7285 #if defined(TARGET_HAS_STRUCT_STAT64)
7286 struct target_stat64 *target_st;
7287 #else
7288 struct target_stat *target_st;
7289 #endif
7291 if (!lock_user_struct(VERIFY_WRITE, target_st, target_addr, 0))
7292 return -TARGET_EFAULT;
7293 memset(target_st, 0, sizeof(*target_st));
7294 __put_user(host_st->st_dev, &target_st->st_dev);
7295 __put_user(host_st->st_ino, &target_st->st_ino);
7296 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
7297 __put_user(host_st->st_ino, &target_st->__st_ino);
7298 #endif
7299 __put_user(host_st->st_mode, &target_st->st_mode);
7300 __put_user(host_st->st_nlink, &target_st->st_nlink);
7301 __put_user(host_st->st_uid, &target_st->st_uid);
7302 __put_user(host_st->st_gid, &target_st->st_gid);
7303 __put_user(host_st->st_rdev, &target_st->st_rdev);
7304 /* XXX: better use of kernel struct */
7305 __put_user(host_st->st_size, &target_st->st_size);
7306 __put_user(host_st->st_blksize, &target_st->st_blksize);
7307 __put_user(host_st->st_blocks, &target_st->st_blocks);
7308 __put_user(host_st->st_atime, &target_st->target_st_atime);
7309 __put_user(host_st->st_mtime, &target_st->target_st_mtime);
7310 __put_user(host_st->st_ctime, &target_st->target_st_ctime);
7311 unlock_user_struct(target_st, target_addr, 1);
7314 return 0;
7317 /* ??? Using host futex calls even when target atomic operations
7318 are not really atomic probably breaks things. However implementing
7319 futexes locally would make futexes shared between multiple processes
7320 tricky. However they're probably useless because guest atomic
7321 operations won't work either. */
7322 static int do_futex(target_ulong uaddr, int op, int val, target_ulong timeout,
7323 target_ulong uaddr2, int val3)
7325 struct timespec ts, *pts;
7326 int base_op;
7328 /* ??? We assume FUTEX_* constants are the same on both host
7329 and target. */
7330 #ifdef FUTEX_CMD_MASK
7331 base_op = op & FUTEX_CMD_MASK;
7332 #else
7333 base_op = op;
7334 #endif
7335 switch (base_op) {
7336 case FUTEX_WAIT:
7337 case FUTEX_WAIT_BITSET:
7338 if (timeout) {
7339 pts = &ts;
7340 target_to_host_timespec(pts, timeout);
7341 } else {
7342 pts = NULL;
7344 return get_errno(safe_futex(g2h(uaddr), op, tswap32(val),
7345 pts, NULL, val3));
7346 case FUTEX_WAKE:
7347 return get_errno(safe_futex(g2h(uaddr), op, val, NULL, NULL, 0));
7348 case FUTEX_FD:
7349 return get_errno(safe_futex(g2h(uaddr), op, val, NULL, NULL, 0));
7350 case FUTEX_REQUEUE:
7351 case FUTEX_CMP_REQUEUE:
7352 case FUTEX_WAKE_OP:
7353 /* For FUTEX_REQUEUE, FUTEX_CMP_REQUEUE, and FUTEX_WAKE_OP, the
7354 TIMEOUT parameter is interpreted as a uint32_t by the kernel.
7355 But the prototype takes a `struct timespec *'; insert casts
7356 to satisfy the compiler. We do not need to tswap TIMEOUT
7357 since it's not compared to guest memory. */
7358 pts = (struct timespec *)(uintptr_t) timeout;
7359 return get_errno(safe_futex(g2h(uaddr), op, val, pts,
7360 g2h(uaddr2),
7361 (base_op == FUTEX_CMP_REQUEUE
7362 ? tswap32(val3)
7363 : val3)));
7364 default:
7365 return -TARGET_ENOSYS;
7368 #if defined(TARGET_NR_name_to_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
7369 static abi_long do_name_to_handle_at(abi_long dirfd, abi_long pathname,
7370 abi_long handle, abi_long mount_id,
7371 abi_long flags)
7373 struct file_handle *target_fh;
7374 struct file_handle *fh;
7375 int mid = 0;
7376 abi_long ret;
7377 char *name;
7378 unsigned int size, total_size;
7380 if (get_user_s32(size, handle)) {
7381 return -TARGET_EFAULT;
7384 name = lock_user_string(pathname);
7385 if (!name) {
7386 return -TARGET_EFAULT;
7389 total_size = sizeof(struct file_handle) + size;
7390 target_fh = lock_user(VERIFY_WRITE, handle, total_size, 0);
7391 if (!target_fh) {
7392 unlock_user(name, pathname, 0);
7393 return -TARGET_EFAULT;
7396 fh = g_malloc0(total_size);
7397 fh->handle_bytes = size;
7399 ret = get_errno(name_to_handle_at(dirfd, path(name), fh, &mid, flags));
7400 unlock_user(name, pathname, 0);
7402 /* man name_to_handle_at(2):
7403 * Other than the use of the handle_bytes field, the caller should treat
7404 * the file_handle structure as an opaque data type
7407 memcpy(target_fh, fh, total_size);
7408 target_fh->handle_bytes = tswap32(fh->handle_bytes);
7409 target_fh->handle_type = tswap32(fh->handle_type);
7410 g_free(fh);
7411 unlock_user(target_fh, handle, total_size);
7413 if (put_user_s32(mid, mount_id)) {
7414 return -TARGET_EFAULT;
7417 return ret;
7420 #endif
7422 #if defined(TARGET_NR_open_by_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
7423 static abi_long do_open_by_handle_at(abi_long mount_fd, abi_long handle,
7424 abi_long flags)
7426 struct file_handle *target_fh;
7427 struct file_handle *fh;
7428 unsigned int size, total_size;
7429 abi_long ret;
7431 if (get_user_s32(size, handle)) {
7432 return -TARGET_EFAULT;
7435 total_size = sizeof(struct file_handle) + size;
7436 target_fh = lock_user(VERIFY_READ, handle, total_size, 1);
7437 if (!target_fh) {
7438 return -TARGET_EFAULT;
7441 fh = g_memdup(target_fh, total_size);
7442 fh->handle_bytes = size;
7443 fh->handle_type = tswap32(target_fh->handle_type);
7445 ret = get_errno(open_by_handle_at(mount_fd, fh,
7446 target_to_host_bitmask(flags, fcntl_flags_tbl)));
7448 g_free(fh);
7450 unlock_user(target_fh, handle, total_size);
7452 return ret;
7454 #endif
7456 #if defined(TARGET_NR_signalfd) || defined(TARGET_NR_signalfd4)
7458 /* signalfd siginfo conversion */
7460 static void
7461 host_to_target_signalfd_siginfo(struct signalfd_siginfo *tinfo,
7462 const struct signalfd_siginfo *info)
7464 int sig = host_to_target_signal(info->ssi_signo);
7466 /* linux/signalfd.h defines a ssi_addr_lsb
7467 * not defined in sys/signalfd.h but used by some kernels
7470 #ifdef BUS_MCEERR_AO
7471 if (tinfo->ssi_signo == SIGBUS &&
7472 (tinfo->ssi_code == BUS_MCEERR_AR ||
7473 tinfo->ssi_code == BUS_MCEERR_AO)) {
7474 uint16_t *ssi_addr_lsb = (uint16_t *)(&info->ssi_addr + 1);
7475 uint16_t *tssi_addr_lsb = (uint16_t *)(&tinfo->ssi_addr + 1);
7476 *tssi_addr_lsb = tswap16(*ssi_addr_lsb);
7478 #endif
7480 tinfo->ssi_signo = tswap32(sig);
7481 tinfo->ssi_errno = tswap32(tinfo->ssi_errno);
7482 tinfo->ssi_code = tswap32(info->ssi_code);
7483 tinfo->ssi_pid = tswap32(info->ssi_pid);
7484 tinfo->ssi_uid = tswap32(info->ssi_uid);
7485 tinfo->ssi_fd = tswap32(info->ssi_fd);
7486 tinfo->ssi_tid = tswap32(info->ssi_tid);
7487 tinfo->ssi_band = tswap32(info->ssi_band);
7488 tinfo->ssi_overrun = tswap32(info->ssi_overrun);
7489 tinfo->ssi_trapno = tswap32(info->ssi_trapno);
7490 tinfo->ssi_status = tswap32(info->ssi_status);
7491 tinfo->ssi_int = tswap32(info->ssi_int);
7492 tinfo->ssi_ptr = tswap64(info->ssi_ptr);
7493 tinfo->ssi_utime = tswap64(info->ssi_utime);
7494 tinfo->ssi_stime = tswap64(info->ssi_stime);
7495 tinfo->ssi_addr = tswap64(info->ssi_addr);
7498 static abi_long host_to_target_data_signalfd(void *buf, size_t len)
7500 int i;
7502 for (i = 0; i < len; i += sizeof(struct signalfd_siginfo)) {
7503 host_to_target_signalfd_siginfo(buf + i, buf + i);
7506 return len;
7509 static TargetFdTrans target_signalfd_trans = {
7510 .host_to_target_data = host_to_target_data_signalfd,
7513 static abi_long do_signalfd4(int fd, abi_long mask, int flags)
7515 int host_flags;
7516 target_sigset_t *target_mask;
7517 sigset_t host_mask;
7518 abi_long ret;
7520 if (flags & ~(TARGET_O_NONBLOCK | TARGET_O_CLOEXEC)) {
7521 return -TARGET_EINVAL;
7523 if (!lock_user_struct(VERIFY_READ, target_mask, mask, 1)) {
7524 return -TARGET_EFAULT;
7527 target_to_host_sigset(&host_mask, target_mask);
7529 host_flags = target_to_host_bitmask(flags, fcntl_flags_tbl);
7531 ret = get_errno(signalfd(fd, &host_mask, host_flags));
7532 if (ret >= 0) {
7533 fd_trans_register(ret, &target_signalfd_trans);
7536 unlock_user_struct(target_mask, mask, 0);
7538 return ret;
7540 #endif
7542 /* Map host to target signal numbers for the wait family of syscalls.
7543 Assume all other status bits are the same. */
7544 int host_to_target_waitstatus(int status)
7546 if (WIFSIGNALED(status)) {
7547 return host_to_target_signal(WTERMSIG(status)) | (status & ~0x7f);
7549 if (WIFSTOPPED(status)) {
7550 return (host_to_target_signal(WSTOPSIG(status)) << 8)
7551 | (status & 0xff);
7553 return status;
7556 static int open_self_cmdline(void *cpu_env, int fd)
7558 CPUState *cpu = ENV_GET_CPU((CPUArchState *)cpu_env);
7559 struct linux_binprm *bprm = ((TaskState *)cpu->opaque)->bprm;
7560 int i;
7562 for (i = 0; i < bprm->argc; i++) {
7563 size_t len = strlen(bprm->argv[i]) + 1;
7565 if (write(fd, bprm->argv[i], len) != len) {
7566 return -1;
7570 return 0;
7573 static int open_self_maps(void *cpu_env, int fd)
7575 CPUState *cpu = ENV_GET_CPU((CPUArchState *)cpu_env);
7576 TaskState *ts = cpu->opaque;
7577 FILE *fp;
7578 char *line = NULL;
7579 size_t len = 0;
7580 ssize_t read;
7582 fp = fopen("/proc/self/maps", "r");
7583 if (fp == NULL) {
7584 return -1;
7587 while ((read = getline(&line, &len, fp)) != -1) {
7588 int fields, dev_maj, dev_min, inode;
7589 uint64_t min, max, offset;
7590 char flag_r, flag_w, flag_x, flag_p;
7591 char path[512] = "";
7592 fields = sscanf(line, "%"PRIx64"-%"PRIx64" %c%c%c%c %"PRIx64" %x:%x %d"
7593 " %512s", &min, &max, &flag_r, &flag_w, &flag_x,
7594 &flag_p, &offset, &dev_maj, &dev_min, &inode, path);
7596 if ((fields < 10) || (fields > 11)) {
7597 continue;
7599 if (h2g_valid(min)) {
7600 int flags = page_get_flags(h2g(min));
7601 max = h2g_valid(max - 1) ? max : (uintptr_t)g2h(GUEST_ADDR_MAX) + 1;
7602 if (page_check_range(h2g(min), max - min, flags) == -1) {
7603 continue;
7605 if (h2g(min) == ts->info->stack_limit) {
7606 pstrcpy(path, sizeof(path), " [stack]");
7608 dprintf(fd, TARGET_ABI_FMT_lx "-" TARGET_ABI_FMT_lx
7609 " %c%c%c%c %08" PRIx64 " %02x:%02x %d %s%s\n",
7610 h2g(min), h2g(max - 1) + 1, flag_r, flag_w,
7611 flag_x, flag_p, offset, dev_maj, dev_min, inode,
7612 path[0] ? " " : "", path);
7616 free(line);
7617 fclose(fp);
7619 return 0;
7622 static int open_self_stat(void *cpu_env, int fd)
7624 CPUState *cpu = ENV_GET_CPU((CPUArchState *)cpu_env);
7625 TaskState *ts = cpu->opaque;
7626 abi_ulong start_stack = ts->info->start_stack;
7627 int i;
7629 for (i = 0; i < 44; i++) {
7630 char buf[128];
7631 int len;
7632 uint64_t val = 0;
7634 if (i == 0) {
7635 /* pid */
7636 val = getpid();
7637 snprintf(buf, sizeof(buf), "%"PRId64 " ", val);
7638 } else if (i == 1) {
7639 /* app name */
7640 snprintf(buf, sizeof(buf), "(%s) ", ts->bprm->argv[0]);
7641 } else if (i == 27) {
7642 /* stack bottom */
7643 val = start_stack;
7644 snprintf(buf, sizeof(buf), "%"PRId64 " ", val);
7645 } else {
7646 /* for the rest, there is MasterCard */
7647 snprintf(buf, sizeof(buf), "0%c", i == 43 ? '\n' : ' ');
7650 len = strlen(buf);
7651 if (write(fd, buf, len) != len) {
7652 return -1;
7656 return 0;
7659 static int open_self_auxv(void *cpu_env, int fd)
7661 CPUState *cpu = ENV_GET_CPU((CPUArchState *)cpu_env);
7662 TaskState *ts = cpu->opaque;
7663 abi_ulong auxv = ts->info->saved_auxv;
7664 abi_ulong len = ts->info->auxv_len;
7665 char *ptr;
7668 * Auxiliary vector is stored in target process stack.
7669 * read in whole auxv vector and copy it to file
7671 ptr = lock_user(VERIFY_READ, auxv, len, 0);
7672 if (ptr != NULL) {
7673 while (len > 0) {
7674 ssize_t r;
7675 r = write(fd, ptr, len);
7676 if (r <= 0) {
7677 break;
7679 len -= r;
7680 ptr += r;
7682 lseek(fd, 0, SEEK_SET);
7683 unlock_user(ptr, auxv, len);
7686 return 0;
7689 static int is_proc_myself(const char *filename, const char *entry)
7691 if (!strncmp(filename, "/proc/", strlen("/proc/"))) {
7692 filename += strlen("/proc/");
7693 if (!strncmp(filename, "self/", strlen("self/"))) {
7694 filename += strlen("self/");
7695 } else if (*filename >= '1' && *filename <= '9') {
7696 char myself[80];
7697 snprintf(myself, sizeof(myself), "%d/", getpid());
7698 if (!strncmp(filename, myself, strlen(myself))) {
7699 filename += strlen(myself);
7700 } else {
7701 return 0;
7703 } else {
7704 return 0;
7706 if (!strcmp(filename, entry)) {
7707 return 1;
7710 return 0;
7713 #if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN)
7714 static int is_proc(const char *filename, const char *entry)
7716 return strcmp(filename, entry) == 0;
7719 static int open_net_route(void *cpu_env, int fd)
7721 FILE *fp;
7722 char *line = NULL;
7723 size_t len = 0;
7724 ssize_t read;
7726 fp = fopen("/proc/net/route", "r");
7727 if (fp == NULL) {
7728 return -1;
7731 /* read header */
7733 read = getline(&line, &len, fp);
7734 dprintf(fd, "%s", line);
7736 /* read routes */
7738 while ((read = getline(&line, &len, fp)) != -1) {
7739 char iface[16];
7740 uint32_t dest, gw, mask;
7741 unsigned int flags, refcnt, use, metric, mtu, window, irtt;
7742 sscanf(line, "%s\t%08x\t%08x\t%04x\t%d\t%d\t%d\t%08x\t%d\t%u\t%u\n",
7743 iface, &dest, &gw, &flags, &refcnt, &use, &metric,
7744 &mask, &mtu, &window, &irtt);
7745 dprintf(fd, "%s\t%08x\t%08x\t%04x\t%d\t%d\t%d\t%08x\t%d\t%u\t%u\n",
7746 iface, tswap32(dest), tswap32(gw), flags, refcnt, use,
7747 metric, tswap32(mask), mtu, window, irtt);
7750 free(line);
7751 fclose(fp);
7753 return 0;
7755 #endif
7757 static int do_openat(void *cpu_env, int dirfd, const char *pathname, int flags, mode_t mode)
7759 struct fake_open {
7760 const char *filename;
7761 int (*fill)(void *cpu_env, int fd);
7762 int (*cmp)(const char *s1, const char *s2);
7764 const struct fake_open *fake_open;
7765 static const struct fake_open fakes[] = {
7766 { "maps", open_self_maps, is_proc_myself },
7767 { "stat", open_self_stat, is_proc_myself },
7768 { "auxv", open_self_auxv, is_proc_myself },
7769 { "cmdline", open_self_cmdline, is_proc_myself },
7770 #if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN)
7771 { "/proc/net/route", open_net_route, is_proc },
7772 #endif
7773 { NULL, NULL, NULL }
7776 if (is_proc_myself(pathname, "exe")) {
7777 int execfd = qemu_getauxval(AT_EXECFD);
7778 return execfd ? execfd : safe_openat(dirfd, exec_path, flags, mode);
7781 for (fake_open = fakes; fake_open->filename; fake_open++) {
7782 if (fake_open->cmp(pathname, fake_open->filename)) {
7783 break;
7787 if (fake_open->filename) {
7788 const char *tmpdir;
7789 char filename[PATH_MAX];
7790 int fd, r;
7792 /* create temporary file to map stat to */
7793 tmpdir = getenv("TMPDIR");
7794 if (!tmpdir)
7795 tmpdir = "/tmp";
7796 snprintf(filename, sizeof(filename), "%s/qemu-open.XXXXXX", tmpdir);
7797 fd = mkstemp(filename);
7798 if (fd < 0) {
7799 return fd;
7801 unlink(filename);
7803 if ((r = fake_open->fill(cpu_env, fd))) {
7804 int e = errno;
7805 close(fd);
7806 errno = e;
7807 return r;
7809 lseek(fd, 0, SEEK_SET);
7811 return fd;
7814 return safe_openat(dirfd, path(pathname), flags, mode);
7817 #define TIMER_MAGIC 0x0caf0000
7818 #define TIMER_MAGIC_MASK 0xffff0000
7820 /* Convert QEMU provided timer ID back to internal 16bit index format */
7821 static target_timer_t get_timer_id(abi_long arg)
7823 target_timer_t timerid = arg;
7825 if ((timerid & TIMER_MAGIC_MASK) != TIMER_MAGIC) {
7826 return -TARGET_EINVAL;
7829 timerid &= 0xffff;
7831 if (timerid >= ARRAY_SIZE(g_posix_timers)) {
7832 return -TARGET_EINVAL;
7835 return timerid;
7838 static abi_long swap_data_eventfd(void *buf, size_t len)
7840 uint64_t *counter = buf;
7841 int i;
7843 if (len < sizeof(uint64_t)) {
7844 return -EINVAL;
7847 for (i = 0; i < len; i += sizeof(uint64_t)) {
7848 *counter = tswap64(*counter);
7849 counter++;
7852 return len;
7855 static TargetFdTrans target_eventfd_trans = {
7856 .host_to_target_data = swap_data_eventfd,
7857 .target_to_host_data = swap_data_eventfd,
7860 #if (defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init)) || \
7861 (defined(CONFIG_INOTIFY1) && defined(TARGET_NR_inotify_init1) && \
7862 defined(__NR_inotify_init1))
7863 static abi_long host_to_target_data_inotify(void *buf, size_t len)
7865 struct inotify_event *ev;
7866 int i;
7867 uint32_t name_len;
7869 for (i = 0; i < len; i += sizeof(struct inotify_event) + name_len) {
7870 ev = (struct inotify_event *)((char *)buf + i);
7871 name_len = ev->len;
7873 ev->wd = tswap32(ev->wd);
7874 ev->mask = tswap32(ev->mask);
7875 ev->cookie = tswap32(ev->cookie);
7876 ev->len = tswap32(name_len);
7879 return len;
7882 static TargetFdTrans target_inotify_trans = {
7883 .host_to_target_data = host_to_target_data_inotify,
7885 #endif
7887 static int target_to_host_cpu_mask(unsigned long *host_mask,
7888 size_t host_size,
7889 abi_ulong target_addr,
7890 size_t target_size)
7892 unsigned target_bits = sizeof(abi_ulong) * 8;
7893 unsigned host_bits = sizeof(*host_mask) * 8;
7894 abi_ulong *target_mask;
7895 unsigned i, j;
7897 assert(host_size >= target_size);
7899 target_mask = lock_user(VERIFY_READ, target_addr, target_size, 1);
7900 if (!target_mask) {
7901 return -TARGET_EFAULT;
7903 memset(host_mask, 0, host_size);
7905 for (i = 0 ; i < target_size / sizeof(abi_ulong); i++) {
7906 unsigned bit = i * target_bits;
7907 abi_ulong val;
7909 __get_user(val, &target_mask[i]);
7910 for (j = 0; j < target_bits; j++, bit++) {
7911 if (val & (1UL << j)) {
7912 host_mask[bit / host_bits] |= 1UL << (bit % host_bits);
7917 unlock_user(target_mask, target_addr, 0);
7918 return 0;
7921 static int host_to_target_cpu_mask(const unsigned long *host_mask,
7922 size_t host_size,
7923 abi_ulong target_addr,
7924 size_t target_size)
7926 unsigned target_bits = sizeof(abi_ulong) * 8;
7927 unsigned host_bits = sizeof(*host_mask) * 8;
7928 abi_ulong *target_mask;
7929 unsigned i, j;
7931 assert(host_size >= target_size);
7933 target_mask = lock_user(VERIFY_WRITE, target_addr, target_size, 0);
7934 if (!target_mask) {
7935 return -TARGET_EFAULT;
7938 for (i = 0 ; i < target_size / sizeof(abi_ulong); i++) {
7939 unsigned bit = i * target_bits;
7940 abi_ulong val = 0;
7942 for (j = 0; j < target_bits; j++, bit++) {
7943 if (host_mask[bit / host_bits] & (1UL << (bit % host_bits))) {
7944 val |= 1UL << j;
7947 __put_user(val, &target_mask[i]);
7950 unlock_user(target_mask, target_addr, target_size);
7951 return 0;
7954 /* do_syscall() should always have a single exit point at the end so
7955 that actions, such as logging of syscall results, can be performed.
7956 All errnos that do_syscall() returns must be -TARGET_<errcode>. */
7957 abi_long do_syscall(void *cpu_env, int num, abi_long arg1,
7958 abi_long arg2, abi_long arg3, abi_long arg4,
7959 abi_long arg5, abi_long arg6, abi_long arg7,
7960 abi_long arg8)
7962 CPUState *cpu = ENV_GET_CPU(cpu_env);
7963 abi_long ret;
7964 struct stat st;
7965 struct statfs stfs;
7966 void *p;
7968 #if defined(DEBUG_ERESTARTSYS)
7969 /* Debug-only code for exercising the syscall-restart code paths
7970 * in the per-architecture cpu main loops: restart every syscall
7971 * the guest makes once before letting it through.
7974 static int flag;
7976 flag = !flag;
7977 if (flag) {
7978 return -TARGET_ERESTARTSYS;
7981 #endif
7983 #ifdef DEBUG
7984 gemu_log("syscall %d", num);
7985 #endif
7986 trace_guest_user_syscall(cpu, num, arg1, arg2, arg3, arg4, arg5, arg6, arg7, arg8);
7987 if(do_strace)
7988 print_syscall(num, arg1, arg2, arg3, arg4, arg5, arg6);
7990 switch(num) {
7991 case TARGET_NR_exit:
7992 /* In old applications this may be used to implement _exit(2).
7993 However in threaded applictions it is used for thread termination,
7994 and _exit_group is used for application termination.
7995 Do thread termination if we have more then one thread. */
7997 if (block_signals()) {
7998 ret = -TARGET_ERESTARTSYS;
7999 break;
8002 cpu_list_lock();
8004 if (CPU_NEXT(first_cpu)) {
8005 TaskState *ts;
8007 /* Remove the CPU from the list. */
8008 QTAILQ_REMOVE(&cpus, cpu, node);
8010 cpu_list_unlock();
8012 ts = cpu->opaque;
8013 if (ts->child_tidptr) {
8014 put_user_u32(0, ts->child_tidptr);
8015 sys_futex(g2h(ts->child_tidptr), FUTEX_WAKE, INT_MAX,
8016 NULL, NULL, 0);
8018 thread_cpu = NULL;
8019 object_unref(OBJECT(cpu));
8020 g_free(ts);
8021 rcu_unregister_thread();
8022 pthread_exit(NULL);
8025 cpu_list_unlock();
8026 #ifdef TARGET_GPROF
8027 _mcleanup();
8028 #endif
8029 gdb_exit(cpu_env, arg1);
8030 _exit(arg1);
8031 ret = 0; /* avoid warning */
8032 break;
8033 case TARGET_NR_read:
8034 if (arg3 == 0)
8035 ret = 0;
8036 else {
8037 if (!(p = lock_user(VERIFY_WRITE, arg2, arg3, 0)))
8038 goto efault;
8039 ret = get_errno(safe_read(arg1, p, arg3));
8040 if (ret >= 0 &&
8041 fd_trans_host_to_target_data(arg1)) {
8042 ret = fd_trans_host_to_target_data(arg1)(p, ret);
8044 unlock_user(p, arg2, ret);
8046 break;
8047 case TARGET_NR_write:
8048 if (!(p = lock_user(VERIFY_READ, arg2, arg3, 1)))
8049 goto efault;
8050 if (fd_trans_target_to_host_data(arg1)) {
8051 void *copy = g_malloc(arg3);
8052 memcpy(copy, p, arg3);
8053 ret = fd_trans_target_to_host_data(arg1)(copy, arg3);
8054 if (ret >= 0) {
8055 ret = get_errno(safe_write(arg1, copy, ret));
8057 g_free(copy);
8058 } else {
8059 ret = get_errno(safe_write(arg1, p, arg3));
8061 unlock_user(p, arg2, 0);
8062 break;
8063 #ifdef TARGET_NR_open
8064 case TARGET_NR_open:
8065 if (!(p = lock_user_string(arg1)))
8066 goto efault;
8067 ret = get_errno(do_openat(cpu_env, AT_FDCWD, p,
8068 target_to_host_bitmask(arg2, fcntl_flags_tbl),
8069 arg3));
8070 fd_trans_unregister(ret);
8071 unlock_user(p, arg1, 0);
8072 break;
8073 #endif
8074 case TARGET_NR_openat:
8075 if (!(p = lock_user_string(arg2)))
8076 goto efault;
8077 ret = get_errno(do_openat(cpu_env, arg1, p,
8078 target_to_host_bitmask(arg3, fcntl_flags_tbl),
8079 arg4));
8080 fd_trans_unregister(ret);
8081 unlock_user(p, arg2, 0);
8082 break;
8083 #if defined(TARGET_NR_name_to_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
8084 case TARGET_NR_name_to_handle_at:
8085 ret = do_name_to_handle_at(arg1, arg2, arg3, arg4, arg5);
8086 break;
8087 #endif
8088 #if defined(TARGET_NR_open_by_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
8089 case TARGET_NR_open_by_handle_at:
8090 ret = do_open_by_handle_at(arg1, arg2, arg3);
8091 fd_trans_unregister(ret);
8092 break;
8093 #endif
8094 case TARGET_NR_close:
8095 fd_trans_unregister(arg1);
8096 ret = get_errno(close(arg1));
8097 break;
8098 case TARGET_NR_brk:
8099 ret = do_brk(arg1);
8100 break;
8101 #ifdef TARGET_NR_fork
8102 case TARGET_NR_fork:
8103 ret = get_errno(do_fork(cpu_env, TARGET_SIGCHLD, 0, 0, 0, 0));
8104 break;
8105 #endif
8106 #ifdef TARGET_NR_waitpid
8107 case TARGET_NR_waitpid:
8109 int status;
8110 ret = get_errno(safe_wait4(arg1, &status, arg3, 0));
8111 if (!is_error(ret) && arg2 && ret
8112 && put_user_s32(host_to_target_waitstatus(status), arg2))
8113 goto efault;
8115 break;
8116 #endif
8117 #ifdef TARGET_NR_waitid
8118 case TARGET_NR_waitid:
8120 siginfo_t info;
8121 info.si_pid = 0;
8122 ret = get_errno(safe_waitid(arg1, arg2, &info, arg4, NULL));
8123 if (!is_error(ret) && arg3 && info.si_pid != 0) {
8124 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_siginfo_t), 0)))
8125 goto efault;
8126 host_to_target_siginfo(p, &info);
8127 unlock_user(p, arg3, sizeof(target_siginfo_t));
8130 break;
8131 #endif
8132 #ifdef TARGET_NR_creat /* not on alpha */
8133 case TARGET_NR_creat:
8134 if (!(p = lock_user_string(arg1)))
8135 goto efault;
8136 ret = get_errno(creat(p, arg2));
8137 fd_trans_unregister(ret);
8138 unlock_user(p, arg1, 0);
8139 break;
8140 #endif
8141 #ifdef TARGET_NR_link
8142 case TARGET_NR_link:
8144 void * p2;
8145 p = lock_user_string(arg1);
8146 p2 = lock_user_string(arg2);
8147 if (!p || !p2)
8148 ret = -TARGET_EFAULT;
8149 else
8150 ret = get_errno(link(p, p2));
8151 unlock_user(p2, arg2, 0);
8152 unlock_user(p, arg1, 0);
8154 break;
8155 #endif
8156 #if defined(TARGET_NR_linkat)
8157 case TARGET_NR_linkat:
8159 void * p2 = NULL;
8160 if (!arg2 || !arg4)
8161 goto efault;
8162 p = lock_user_string(arg2);
8163 p2 = lock_user_string(arg4);
8164 if (!p || !p2)
8165 ret = -TARGET_EFAULT;
8166 else
8167 ret = get_errno(linkat(arg1, p, arg3, p2, arg5));
8168 unlock_user(p, arg2, 0);
8169 unlock_user(p2, arg4, 0);
8171 break;
8172 #endif
8173 #ifdef TARGET_NR_unlink
8174 case TARGET_NR_unlink:
8175 if (!(p = lock_user_string(arg1)))
8176 goto efault;
8177 ret = get_errno(unlink(p));
8178 unlock_user(p, arg1, 0);
8179 break;
8180 #endif
8181 #if defined(TARGET_NR_unlinkat)
8182 case TARGET_NR_unlinkat:
8183 if (!(p = lock_user_string(arg2)))
8184 goto efault;
8185 ret = get_errno(unlinkat(arg1, p, arg3));
8186 unlock_user(p, arg2, 0);
8187 break;
8188 #endif
8189 case TARGET_NR_execve:
8191 char **argp, **envp;
8192 int argc, envc;
8193 abi_ulong gp;
8194 abi_ulong guest_argp;
8195 abi_ulong guest_envp;
8196 abi_ulong addr;
8197 char **q;
8198 int total_size = 0;
8200 argc = 0;
8201 guest_argp = arg2;
8202 for (gp = guest_argp; gp; gp += sizeof(abi_ulong)) {
8203 if (get_user_ual(addr, gp))
8204 goto efault;
8205 if (!addr)
8206 break;
8207 argc++;
8209 envc = 0;
8210 guest_envp = arg3;
8211 for (gp = guest_envp; gp; gp += sizeof(abi_ulong)) {
8212 if (get_user_ual(addr, gp))
8213 goto efault;
8214 if (!addr)
8215 break;
8216 envc++;
8219 argp = g_new0(char *, argc + 1);
8220 envp = g_new0(char *, envc + 1);
8222 for (gp = guest_argp, q = argp; gp;
8223 gp += sizeof(abi_ulong), q++) {
8224 if (get_user_ual(addr, gp))
8225 goto execve_efault;
8226 if (!addr)
8227 break;
8228 if (!(*q = lock_user_string(addr)))
8229 goto execve_efault;
8230 total_size += strlen(*q) + 1;
8232 *q = NULL;
8234 for (gp = guest_envp, q = envp; gp;
8235 gp += sizeof(abi_ulong), q++) {
8236 if (get_user_ual(addr, gp))
8237 goto execve_efault;
8238 if (!addr)
8239 break;
8240 if (!(*q = lock_user_string(addr)))
8241 goto execve_efault;
8242 total_size += strlen(*q) + 1;
8244 *q = NULL;
8246 if (!(p = lock_user_string(arg1)))
8247 goto execve_efault;
8248 /* Although execve() is not an interruptible syscall it is
8249 * a special case where we must use the safe_syscall wrapper:
8250 * if we allow a signal to happen before we make the host
8251 * syscall then we will 'lose' it, because at the point of
8252 * execve the process leaves QEMU's control. So we use the
8253 * safe syscall wrapper to ensure that we either take the
8254 * signal as a guest signal, or else it does not happen
8255 * before the execve completes and makes it the other
8256 * program's problem.
8258 ret = get_errno(safe_execve(p, argp, envp));
8259 unlock_user(p, arg1, 0);
8261 goto execve_end;
8263 execve_efault:
8264 ret = -TARGET_EFAULT;
8266 execve_end:
8267 for (gp = guest_argp, q = argp; *q;
8268 gp += sizeof(abi_ulong), q++) {
8269 if (get_user_ual(addr, gp)
8270 || !addr)
8271 break;
8272 unlock_user(*q, addr, 0);
8274 for (gp = guest_envp, q = envp; *q;
8275 gp += sizeof(abi_ulong), q++) {
8276 if (get_user_ual(addr, gp)
8277 || !addr)
8278 break;
8279 unlock_user(*q, addr, 0);
8282 g_free(argp);
8283 g_free(envp);
8285 break;
8286 case TARGET_NR_chdir:
8287 if (!(p = lock_user_string(arg1)))
8288 goto efault;
8289 ret = get_errno(chdir(p));
8290 unlock_user(p, arg1, 0);
8291 break;
8292 #ifdef TARGET_NR_time
8293 case TARGET_NR_time:
8295 time_t host_time;
8296 ret = get_errno(time(&host_time));
8297 if (!is_error(ret)
8298 && arg1
8299 && put_user_sal(host_time, arg1))
8300 goto efault;
8302 break;
8303 #endif
8304 #ifdef TARGET_NR_mknod
8305 case TARGET_NR_mknod:
8306 if (!(p = lock_user_string(arg1)))
8307 goto efault;
8308 ret = get_errno(mknod(p, arg2, arg3));
8309 unlock_user(p, arg1, 0);
8310 break;
8311 #endif
8312 #if defined(TARGET_NR_mknodat)
8313 case TARGET_NR_mknodat:
8314 if (!(p = lock_user_string(arg2)))
8315 goto efault;
8316 ret = get_errno(mknodat(arg1, p, arg3, arg4));
8317 unlock_user(p, arg2, 0);
8318 break;
8319 #endif
8320 #ifdef TARGET_NR_chmod
8321 case TARGET_NR_chmod:
8322 if (!(p = lock_user_string(arg1)))
8323 goto efault;
8324 ret = get_errno(chmod(p, arg2));
8325 unlock_user(p, arg1, 0);
8326 break;
8327 #endif
8328 #ifdef TARGET_NR_break
8329 case TARGET_NR_break:
8330 goto unimplemented;
8331 #endif
8332 #ifdef TARGET_NR_oldstat
8333 case TARGET_NR_oldstat:
8334 goto unimplemented;
8335 #endif
8336 case TARGET_NR_lseek:
8337 ret = get_errno(lseek(arg1, arg2, arg3));
8338 break;
8339 #if defined(TARGET_NR_getxpid) && defined(TARGET_ALPHA)
8340 /* Alpha specific */
8341 case TARGET_NR_getxpid:
8342 ((CPUAlphaState *)cpu_env)->ir[IR_A4] = getppid();
8343 ret = get_errno(getpid());
8344 break;
8345 #endif
8346 #ifdef TARGET_NR_getpid
8347 case TARGET_NR_getpid:
8348 ret = get_errno(getpid());
8349 break;
8350 #endif
8351 case TARGET_NR_mount:
8353 /* need to look at the data field */
8354 void *p2, *p3;
8356 if (arg1) {
8357 p = lock_user_string(arg1);
8358 if (!p) {
8359 goto efault;
8361 } else {
8362 p = NULL;
8365 p2 = lock_user_string(arg2);
8366 if (!p2) {
8367 if (arg1) {
8368 unlock_user(p, arg1, 0);
8370 goto efault;
8373 if (arg3) {
8374 p3 = lock_user_string(arg3);
8375 if (!p3) {
8376 if (arg1) {
8377 unlock_user(p, arg1, 0);
8379 unlock_user(p2, arg2, 0);
8380 goto efault;
8382 } else {
8383 p3 = NULL;
8386 /* FIXME - arg5 should be locked, but it isn't clear how to
8387 * do that since it's not guaranteed to be a NULL-terminated
8388 * string.
8390 if (!arg5) {
8391 ret = mount(p, p2, p3, (unsigned long)arg4, NULL);
8392 } else {
8393 ret = mount(p, p2, p3, (unsigned long)arg4, g2h(arg5));
8395 ret = get_errno(ret);
8397 if (arg1) {
8398 unlock_user(p, arg1, 0);
8400 unlock_user(p2, arg2, 0);
8401 if (arg3) {
8402 unlock_user(p3, arg3, 0);
8405 break;
8406 #ifdef TARGET_NR_umount
8407 case TARGET_NR_umount:
8408 if (!(p = lock_user_string(arg1)))
8409 goto efault;
8410 ret = get_errno(umount(p));
8411 unlock_user(p, arg1, 0);
8412 break;
8413 #endif
8414 #ifdef TARGET_NR_stime /* not on alpha */
8415 case TARGET_NR_stime:
8417 time_t host_time;
8418 if (get_user_sal(host_time, arg1))
8419 goto efault;
8420 ret = get_errno(stime(&host_time));
8422 break;
8423 #endif
8424 case TARGET_NR_ptrace:
8425 goto unimplemented;
8426 #ifdef TARGET_NR_alarm /* not on alpha */
8427 case TARGET_NR_alarm:
8428 ret = alarm(arg1);
8429 break;
8430 #endif
8431 #ifdef TARGET_NR_oldfstat
8432 case TARGET_NR_oldfstat:
8433 goto unimplemented;
8434 #endif
8435 #ifdef TARGET_NR_pause /* not on alpha */
8436 case TARGET_NR_pause:
8437 if (!block_signals()) {
8438 sigsuspend(&((TaskState *)cpu->opaque)->signal_mask);
8440 ret = -TARGET_EINTR;
8441 break;
8442 #endif
8443 #ifdef TARGET_NR_utime
8444 case TARGET_NR_utime:
8446 struct utimbuf tbuf, *host_tbuf;
8447 struct target_utimbuf *target_tbuf;
8448 if (arg2) {
8449 if (!lock_user_struct(VERIFY_READ, target_tbuf, arg2, 1))
8450 goto efault;
8451 tbuf.actime = tswapal(target_tbuf->actime);
8452 tbuf.modtime = tswapal(target_tbuf->modtime);
8453 unlock_user_struct(target_tbuf, arg2, 0);
8454 host_tbuf = &tbuf;
8455 } else {
8456 host_tbuf = NULL;
8458 if (!(p = lock_user_string(arg1)))
8459 goto efault;
8460 ret = get_errno(utime(p, host_tbuf));
8461 unlock_user(p, arg1, 0);
8463 break;
8464 #endif
8465 #ifdef TARGET_NR_utimes
8466 case TARGET_NR_utimes:
8468 struct timeval *tvp, tv[2];
8469 if (arg2) {
8470 if (copy_from_user_timeval(&tv[0], arg2)
8471 || copy_from_user_timeval(&tv[1],
8472 arg2 + sizeof(struct target_timeval)))
8473 goto efault;
8474 tvp = tv;
8475 } else {
8476 tvp = NULL;
8478 if (!(p = lock_user_string(arg1)))
8479 goto efault;
8480 ret = get_errno(utimes(p, tvp));
8481 unlock_user(p, arg1, 0);
8483 break;
8484 #endif
8485 #if defined(TARGET_NR_futimesat)
8486 case TARGET_NR_futimesat:
8488 struct timeval *tvp, tv[2];
8489 if (arg3) {
8490 if (copy_from_user_timeval(&tv[0], arg3)
8491 || copy_from_user_timeval(&tv[1],
8492 arg3 + sizeof(struct target_timeval)))
8493 goto efault;
8494 tvp = tv;
8495 } else {
8496 tvp = NULL;
8498 if (!(p = lock_user_string(arg2)))
8499 goto efault;
8500 ret = get_errno(futimesat(arg1, path(p), tvp));
8501 unlock_user(p, arg2, 0);
8503 break;
8504 #endif
8505 #ifdef TARGET_NR_stty
8506 case TARGET_NR_stty:
8507 goto unimplemented;
8508 #endif
8509 #ifdef TARGET_NR_gtty
8510 case TARGET_NR_gtty:
8511 goto unimplemented;
8512 #endif
8513 #ifdef TARGET_NR_access
8514 case TARGET_NR_access:
8515 if (!(p = lock_user_string(arg1)))
8516 goto efault;
8517 ret = get_errno(access(path(p), arg2));
8518 unlock_user(p, arg1, 0);
8519 break;
8520 #endif
8521 #if defined(TARGET_NR_faccessat) && defined(__NR_faccessat)
8522 case TARGET_NR_faccessat:
8523 if (!(p = lock_user_string(arg2)))
8524 goto efault;
8525 ret = get_errno(faccessat(arg1, p, arg3, 0));
8526 unlock_user(p, arg2, 0);
8527 break;
8528 #endif
8529 #ifdef TARGET_NR_nice /* not on alpha */
8530 case TARGET_NR_nice:
8531 ret = get_errno(nice(arg1));
8532 break;
8533 #endif
8534 #ifdef TARGET_NR_ftime
8535 case TARGET_NR_ftime:
8536 goto unimplemented;
8537 #endif
8538 case TARGET_NR_sync:
8539 sync();
8540 ret = 0;
8541 break;
8542 #if defined(TARGET_NR_syncfs) && defined(CONFIG_SYNCFS)
8543 case TARGET_NR_syncfs:
8544 ret = get_errno(syncfs(arg1));
8545 break;
8546 #endif
8547 case TARGET_NR_kill:
8548 ret = get_errno(safe_kill(arg1, target_to_host_signal(arg2)));
8549 break;
8550 #ifdef TARGET_NR_rename
8551 case TARGET_NR_rename:
8553 void *p2;
8554 p = lock_user_string(arg1);
8555 p2 = lock_user_string(arg2);
8556 if (!p || !p2)
8557 ret = -TARGET_EFAULT;
8558 else
8559 ret = get_errno(rename(p, p2));
8560 unlock_user(p2, arg2, 0);
8561 unlock_user(p, arg1, 0);
8563 break;
8564 #endif
8565 #if defined(TARGET_NR_renameat)
8566 case TARGET_NR_renameat:
8568 void *p2;
8569 p = lock_user_string(arg2);
8570 p2 = lock_user_string(arg4);
8571 if (!p || !p2)
8572 ret = -TARGET_EFAULT;
8573 else
8574 ret = get_errno(renameat(arg1, p, arg3, p2));
8575 unlock_user(p2, arg4, 0);
8576 unlock_user(p, arg2, 0);
8578 break;
8579 #endif
8580 #if defined(TARGET_NR_renameat2)
8581 case TARGET_NR_renameat2:
8583 void *p2;
8584 p = lock_user_string(arg2);
8585 p2 = lock_user_string(arg4);
8586 if (!p || !p2) {
8587 ret = -TARGET_EFAULT;
8588 } else {
8589 ret = get_errno(sys_renameat2(arg1, p, arg3, p2, arg5));
8591 unlock_user(p2, arg4, 0);
8592 unlock_user(p, arg2, 0);
8594 break;
8595 #endif
8596 #ifdef TARGET_NR_mkdir
8597 case TARGET_NR_mkdir:
8598 if (!(p = lock_user_string(arg1)))
8599 goto efault;
8600 ret = get_errno(mkdir(p, arg2));
8601 unlock_user(p, arg1, 0);
8602 break;
8603 #endif
8604 #if defined(TARGET_NR_mkdirat)
8605 case TARGET_NR_mkdirat:
8606 if (!(p = lock_user_string(arg2)))
8607 goto efault;
8608 ret = get_errno(mkdirat(arg1, p, arg3));
8609 unlock_user(p, arg2, 0);
8610 break;
8611 #endif
8612 #ifdef TARGET_NR_rmdir
8613 case TARGET_NR_rmdir:
8614 if (!(p = lock_user_string(arg1)))
8615 goto efault;
8616 ret = get_errno(rmdir(p));
8617 unlock_user(p, arg1, 0);
8618 break;
8619 #endif
8620 case TARGET_NR_dup:
8621 ret = get_errno(dup(arg1));
8622 if (ret >= 0) {
8623 fd_trans_dup(arg1, ret);
8625 break;
8626 #ifdef TARGET_NR_pipe
8627 case TARGET_NR_pipe:
8628 ret = do_pipe(cpu_env, arg1, 0, 0);
8629 break;
8630 #endif
8631 #ifdef TARGET_NR_pipe2
8632 case TARGET_NR_pipe2:
8633 ret = do_pipe(cpu_env, arg1,
8634 target_to_host_bitmask(arg2, fcntl_flags_tbl), 1);
8635 break;
8636 #endif
8637 case TARGET_NR_times:
8639 struct target_tms *tmsp;
8640 struct tms tms;
8641 ret = get_errno(times(&tms));
8642 if (arg1) {
8643 tmsp = lock_user(VERIFY_WRITE, arg1, sizeof(struct target_tms), 0);
8644 if (!tmsp)
8645 goto efault;
8646 tmsp->tms_utime = tswapal(host_to_target_clock_t(tms.tms_utime));
8647 tmsp->tms_stime = tswapal(host_to_target_clock_t(tms.tms_stime));
8648 tmsp->tms_cutime = tswapal(host_to_target_clock_t(tms.tms_cutime));
8649 tmsp->tms_cstime = tswapal(host_to_target_clock_t(tms.tms_cstime));
8651 if (!is_error(ret))
8652 ret = host_to_target_clock_t(ret);
8654 break;
8655 #ifdef TARGET_NR_prof
8656 case TARGET_NR_prof:
8657 goto unimplemented;
8658 #endif
8659 #ifdef TARGET_NR_signal
8660 case TARGET_NR_signal:
8661 goto unimplemented;
8662 #endif
8663 case TARGET_NR_acct:
8664 if (arg1 == 0) {
8665 ret = get_errno(acct(NULL));
8666 } else {
8667 if (!(p = lock_user_string(arg1)))
8668 goto efault;
8669 ret = get_errno(acct(path(p)));
8670 unlock_user(p, arg1, 0);
8672 break;
8673 #ifdef TARGET_NR_umount2
8674 case TARGET_NR_umount2:
8675 if (!(p = lock_user_string(arg1)))
8676 goto efault;
8677 ret = get_errno(umount2(p, arg2));
8678 unlock_user(p, arg1, 0);
8679 break;
8680 #endif
8681 #ifdef TARGET_NR_lock
8682 case TARGET_NR_lock:
8683 goto unimplemented;
8684 #endif
8685 case TARGET_NR_ioctl:
8686 ret = do_ioctl(arg1, arg2, arg3);
8687 break;
8688 #ifdef TARGET_NR_fcntl
8689 case TARGET_NR_fcntl:
8690 ret = do_fcntl(arg1, arg2, arg3);
8691 break;
8692 #endif
8693 #ifdef TARGET_NR_mpx
8694 case TARGET_NR_mpx:
8695 goto unimplemented;
8696 #endif
8697 case TARGET_NR_setpgid:
8698 ret = get_errno(setpgid(arg1, arg2));
8699 break;
8700 #ifdef TARGET_NR_ulimit
8701 case TARGET_NR_ulimit:
8702 goto unimplemented;
8703 #endif
8704 #ifdef TARGET_NR_oldolduname
8705 case TARGET_NR_oldolduname:
8706 goto unimplemented;
8707 #endif
8708 case TARGET_NR_umask:
8709 ret = get_errno(umask(arg1));
8710 break;
8711 case TARGET_NR_chroot:
8712 if (!(p = lock_user_string(arg1)))
8713 goto efault;
8714 ret = get_errno(chroot(p));
8715 unlock_user(p, arg1, 0);
8716 break;
8717 #ifdef TARGET_NR_ustat
8718 case TARGET_NR_ustat:
8719 goto unimplemented;
8720 #endif
8721 #ifdef TARGET_NR_dup2
8722 case TARGET_NR_dup2:
8723 ret = get_errno(dup2(arg1, arg2));
8724 if (ret >= 0) {
8725 fd_trans_dup(arg1, arg2);
8727 break;
8728 #endif
8729 #if defined(CONFIG_DUP3) && defined(TARGET_NR_dup3)
8730 case TARGET_NR_dup3:
8732 int host_flags;
8734 if ((arg3 & ~TARGET_O_CLOEXEC) != 0) {
8735 return -EINVAL;
8737 host_flags = target_to_host_bitmask(arg3, fcntl_flags_tbl);
8738 ret = get_errno(dup3(arg1, arg2, host_flags));
8739 if (ret >= 0) {
8740 fd_trans_dup(arg1, arg2);
8742 break;
8744 #endif
8745 #ifdef TARGET_NR_getppid /* not on alpha */
8746 case TARGET_NR_getppid:
8747 ret = get_errno(getppid());
8748 break;
8749 #endif
8750 #ifdef TARGET_NR_getpgrp
8751 case TARGET_NR_getpgrp:
8752 ret = get_errno(getpgrp());
8753 break;
8754 #endif
8755 case TARGET_NR_setsid:
8756 ret = get_errno(setsid());
8757 break;
8758 #ifdef TARGET_NR_sigaction
8759 case TARGET_NR_sigaction:
8761 #if defined(TARGET_ALPHA)
8762 struct target_sigaction act, oact, *pact = 0;
8763 struct target_old_sigaction *old_act;
8764 if (arg2) {
8765 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
8766 goto efault;
8767 act._sa_handler = old_act->_sa_handler;
8768 target_siginitset(&act.sa_mask, old_act->sa_mask);
8769 act.sa_flags = old_act->sa_flags;
8770 act.sa_restorer = 0;
8771 unlock_user_struct(old_act, arg2, 0);
8772 pact = &act;
8774 ret = get_errno(do_sigaction(arg1, pact, &oact));
8775 if (!is_error(ret) && arg3) {
8776 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
8777 goto efault;
8778 old_act->_sa_handler = oact._sa_handler;
8779 old_act->sa_mask = oact.sa_mask.sig[0];
8780 old_act->sa_flags = oact.sa_flags;
8781 unlock_user_struct(old_act, arg3, 1);
8783 #elif defined(TARGET_MIPS)
8784 struct target_sigaction act, oact, *pact, *old_act;
8786 if (arg2) {
8787 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
8788 goto efault;
8789 act._sa_handler = old_act->_sa_handler;
8790 target_siginitset(&act.sa_mask, old_act->sa_mask.sig[0]);
8791 act.sa_flags = old_act->sa_flags;
8792 unlock_user_struct(old_act, arg2, 0);
8793 pact = &act;
8794 } else {
8795 pact = NULL;
8798 ret = get_errno(do_sigaction(arg1, pact, &oact));
8800 if (!is_error(ret) && arg3) {
8801 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
8802 goto efault;
8803 old_act->_sa_handler = oact._sa_handler;
8804 old_act->sa_flags = oact.sa_flags;
8805 old_act->sa_mask.sig[0] = oact.sa_mask.sig[0];
8806 old_act->sa_mask.sig[1] = 0;
8807 old_act->sa_mask.sig[2] = 0;
8808 old_act->sa_mask.sig[3] = 0;
8809 unlock_user_struct(old_act, arg3, 1);
8811 #else
8812 struct target_old_sigaction *old_act;
8813 struct target_sigaction act, oact, *pact;
8814 if (arg2) {
8815 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
8816 goto efault;
8817 act._sa_handler = old_act->_sa_handler;
8818 target_siginitset(&act.sa_mask, old_act->sa_mask);
8819 act.sa_flags = old_act->sa_flags;
8820 act.sa_restorer = old_act->sa_restorer;
8821 #ifdef TARGET_ARCH_HAS_KA_RESTORER
8822 act.ka_restorer = 0;
8823 #endif
8824 unlock_user_struct(old_act, arg2, 0);
8825 pact = &act;
8826 } else {
8827 pact = NULL;
8829 ret = get_errno(do_sigaction(arg1, pact, &oact));
8830 if (!is_error(ret) && arg3) {
8831 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
8832 goto efault;
8833 old_act->_sa_handler = oact._sa_handler;
8834 old_act->sa_mask = oact.sa_mask.sig[0];
8835 old_act->sa_flags = oact.sa_flags;
8836 old_act->sa_restorer = oact.sa_restorer;
8837 unlock_user_struct(old_act, arg3, 1);
8839 #endif
8841 break;
8842 #endif
8843 case TARGET_NR_rt_sigaction:
8845 #if defined(TARGET_ALPHA)
8846 /* For Alpha and SPARC this is a 5 argument syscall, with
8847 * a 'restorer' parameter which must be copied into the
8848 * sa_restorer field of the sigaction struct.
8849 * For Alpha that 'restorer' is arg5; for SPARC it is arg4,
8850 * and arg5 is the sigsetsize.
8851 * Alpha also has a separate rt_sigaction struct that it uses
8852 * here; SPARC uses the usual sigaction struct.
8854 struct target_rt_sigaction *rt_act;
8855 struct target_sigaction act, oact, *pact = 0;
8857 if (arg4 != sizeof(target_sigset_t)) {
8858 ret = -TARGET_EINVAL;
8859 break;
8861 if (arg2) {
8862 if (!lock_user_struct(VERIFY_READ, rt_act, arg2, 1))
8863 goto efault;
8864 act._sa_handler = rt_act->_sa_handler;
8865 act.sa_mask = rt_act->sa_mask;
8866 act.sa_flags = rt_act->sa_flags;
8867 act.sa_restorer = arg5;
8868 unlock_user_struct(rt_act, arg2, 0);
8869 pact = &act;
8871 ret = get_errno(do_sigaction(arg1, pact, &oact));
8872 if (!is_error(ret) && arg3) {
8873 if (!lock_user_struct(VERIFY_WRITE, rt_act, arg3, 0))
8874 goto efault;
8875 rt_act->_sa_handler = oact._sa_handler;
8876 rt_act->sa_mask = oact.sa_mask;
8877 rt_act->sa_flags = oact.sa_flags;
8878 unlock_user_struct(rt_act, arg3, 1);
8880 #else
8881 #ifdef TARGET_SPARC
8882 target_ulong restorer = arg4;
8883 target_ulong sigsetsize = arg5;
8884 #else
8885 target_ulong sigsetsize = arg4;
8886 #endif
8887 struct target_sigaction *act;
8888 struct target_sigaction *oact;
8890 if (sigsetsize != sizeof(target_sigset_t)) {
8891 ret = -TARGET_EINVAL;
8892 break;
8894 if (arg2) {
8895 if (!lock_user_struct(VERIFY_READ, act, arg2, 1)) {
8896 goto efault;
8898 #ifdef TARGET_ARCH_HAS_KA_RESTORER
8899 act->ka_restorer = restorer;
8900 #endif
8901 } else {
8902 act = NULL;
8904 if (arg3) {
8905 if (!lock_user_struct(VERIFY_WRITE, oact, arg3, 0)) {
8906 ret = -TARGET_EFAULT;
8907 goto rt_sigaction_fail;
8909 } else
8910 oact = NULL;
8911 ret = get_errno(do_sigaction(arg1, act, oact));
8912 rt_sigaction_fail:
8913 if (act)
8914 unlock_user_struct(act, arg2, 0);
8915 if (oact)
8916 unlock_user_struct(oact, arg3, 1);
8917 #endif
8919 break;
8920 #ifdef TARGET_NR_sgetmask /* not on alpha */
8921 case TARGET_NR_sgetmask:
8923 sigset_t cur_set;
8924 abi_ulong target_set;
8925 ret = do_sigprocmask(0, NULL, &cur_set);
8926 if (!ret) {
8927 host_to_target_old_sigset(&target_set, &cur_set);
8928 ret = target_set;
8931 break;
8932 #endif
8933 #ifdef TARGET_NR_ssetmask /* not on alpha */
8934 case TARGET_NR_ssetmask:
8936 sigset_t set, oset;
8937 abi_ulong target_set = arg1;
8938 target_to_host_old_sigset(&set, &target_set);
8939 ret = do_sigprocmask(SIG_SETMASK, &set, &oset);
8940 if (!ret) {
8941 host_to_target_old_sigset(&target_set, &oset);
8942 ret = target_set;
8945 break;
8946 #endif
8947 #ifdef TARGET_NR_sigprocmask
8948 case TARGET_NR_sigprocmask:
8950 #if defined(TARGET_ALPHA)
8951 sigset_t set, oldset;
8952 abi_ulong mask;
8953 int how;
8955 switch (arg1) {
8956 case TARGET_SIG_BLOCK:
8957 how = SIG_BLOCK;
8958 break;
8959 case TARGET_SIG_UNBLOCK:
8960 how = SIG_UNBLOCK;
8961 break;
8962 case TARGET_SIG_SETMASK:
8963 how = SIG_SETMASK;
8964 break;
8965 default:
8966 ret = -TARGET_EINVAL;
8967 goto fail;
8969 mask = arg2;
8970 target_to_host_old_sigset(&set, &mask);
8972 ret = do_sigprocmask(how, &set, &oldset);
8973 if (!is_error(ret)) {
8974 host_to_target_old_sigset(&mask, &oldset);
8975 ret = mask;
8976 ((CPUAlphaState *)cpu_env)->ir[IR_V0] = 0; /* force no error */
8978 #else
8979 sigset_t set, oldset, *set_ptr;
8980 int how;
8982 if (arg2) {
8983 switch (arg1) {
8984 case TARGET_SIG_BLOCK:
8985 how = SIG_BLOCK;
8986 break;
8987 case TARGET_SIG_UNBLOCK:
8988 how = SIG_UNBLOCK;
8989 break;
8990 case TARGET_SIG_SETMASK:
8991 how = SIG_SETMASK;
8992 break;
8993 default:
8994 ret = -TARGET_EINVAL;
8995 goto fail;
8997 if (!(p = lock_user(VERIFY_READ, arg2, sizeof(target_sigset_t), 1)))
8998 goto efault;
8999 target_to_host_old_sigset(&set, p);
9000 unlock_user(p, arg2, 0);
9001 set_ptr = &set;
9002 } else {
9003 how = 0;
9004 set_ptr = NULL;
9006 ret = do_sigprocmask(how, set_ptr, &oldset);
9007 if (!is_error(ret) && arg3) {
9008 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_sigset_t), 0)))
9009 goto efault;
9010 host_to_target_old_sigset(p, &oldset);
9011 unlock_user(p, arg3, sizeof(target_sigset_t));
9013 #endif
9015 break;
9016 #endif
9017 case TARGET_NR_rt_sigprocmask:
9019 int how = arg1;
9020 sigset_t set, oldset, *set_ptr;
9022 if (arg4 != sizeof(target_sigset_t)) {
9023 ret = -TARGET_EINVAL;
9024 break;
9027 if (arg2) {
9028 switch(how) {
9029 case TARGET_SIG_BLOCK:
9030 how = SIG_BLOCK;
9031 break;
9032 case TARGET_SIG_UNBLOCK:
9033 how = SIG_UNBLOCK;
9034 break;
9035 case TARGET_SIG_SETMASK:
9036 how = SIG_SETMASK;
9037 break;
9038 default:
9039 ret = -TARGET_EINVAL;
9040 goto fail;
9042 if (!(p = lock_user(VERIFY_READ, arg2, sizeof(target_sigset_t), 1)))
9043 goto efault;
9044 target_to_host_sigset(&set, p);
9045 unlock_user(p, arg2, 0);
9046 set_ptr = &set;
9047 } else {
9048 how = 0;
9049 set_ptr = NULL;
9051 ret = do_sigprocmask(how, set_ptr, &oldset);
9052 if (!is_error(ret) && arg3) {
9053 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_sigset_t), 0)))
9054 goto efault;
9055 host_to_target_sigset(p, &oldset);
9056 unlock_user(p, arg3, sizeof(target_sigset_t));
9059 break;
9060 #ifdef TARGET_NR_sigpending
9061 case TARGET_NR_sigpending:
9063 sigset_t set;
9064 ret = get_errno(sigpending(&set));
9065 if (!is_error(ret)) {
9066 if (!(p = lock_user(VERIFY_WRITE, arg1, sizeof(target_sigset_t), 0)))
9067 goto efault;
9068 host_to_target_old_sigset(p, &set);
9069 unlock_user(p, arg1, sizeof(target_sigset_t));
9072 break;
9073 #endif
9074 case TARGET_NR_rt_sigpending:
9076 sigset_t set;
9078 /* Yes, this check is >, not != like most. We follow the kernel's
9079 * logic and it does it like this because it implements
9080 * NR_sigpending through the same code path, and in that case
9081 * the old_sigset_t is smaller in size.
9083 if (arg2 > sizeof(target_sigset_t)) {
9084 ret = -TARGET_EINVAL;
9085 break;
9088 ret = get_errno(sigpending(&set));
9089 if (!is_error(ret)) {
9090 if (!(p = lock_user(VERIFY_WRITE, arg1, sizeof(target_sigset_t), 0)))
9091 goto efault;
9092 host_to_target_sigset(p, &set);
9093 unlock_user(p, arg1, sizeof(target_sigset_t));
9096 break;
9097 #ifdef TARGET_NR_sigsuspend
9098 case TARGET_NR_sigsuspend:
9100 TaskState *ts = cpu->opaque;
9101 #if defined(TARGET_ALPHA)
9102 abi_ulong mask = arg1;
9103 target_to_host_old_sigset(&ts->sigsuspend_mask, &mask);
9104 #else
9105 if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
9106 goto efault;
9107 target_to_host_old_sigset(&ts->sigsuspend_mask, p);
9108 unlock_user(p, arg1, 0);
9109 #endif
9110 ret = get_errno(safe_rt_sigsuspend(&ts->sigsuspend_mask,
9111 SIGSET_T_SIZE));
9112 if (ret != -TARGET_ERESTARTSYS) {
9113 ts->in_sigsuspend = 1;
9116 break;
9117 #endif
9118 case TARGET_NR_rt_sigsuspend:
9120 TaskState *ts = cpu->opaque;
9122 if (arg2 != sizeof(target_sigset_t)) {
9123 ret = -TARGET_EINVAL;
9124 break;
9126 if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
9127 goto efault;
9128 target_to_host_sigset(&ts->sigsuspend_mask, p);
9129 unlock_user(p, arg1, 0);
9130 ret = get_errno(safe_rt_sigsuspend(&ts->sigsuspend_mask,
9131 SIGSET_T_SIZE));
9132 if (ret != -TARGET_ERESTARTSYS) {
9133 ts->in_sigsuspend = 1;
9136 break;
9137 case TARGET_NR_rt_sigtimedwait:
9139 sigset_t set;
9140 struct timespec uts, *puts;
9141 siginfo_t uinfo;
9143 if (arg4 != sizeof(target_sigset_t)) {
9144 ret = -TARGET_EINVAL;
9145 break;
9148 if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
9149 goto efault;
9150 target_to_host_sigset(&set, p);
9151 unlock_user(p, arg1, 0);
9152 if (arg3) {
9153 puts = &uts;
9154 target_to_host_timespec(puts, arg3);
9155 } else {
9156 puts = NULL;
9158 ret = get_errno(safe_rt_sigtimedwait(&set, &uinfo, puts,
9159 SIGSET_T_SIZE));
9160 if (!is_error(ret)) {
9161 if (arg2) {
9162 p = lock_user(VERIFY_WRITE, arg2, sizeof(target_siginfo_t),
9164 if (!p) {
9165 goto efault;
9167 host_to_target_siginfo(p, &uinfo);
9168 unlock_user(p, arg2, sizeof(target_siginfo_t));
9170 ret = host_to_target_signal(ret);
9173 break;
9174 case TARGET_NR_rt_sigqueueinfo:
9176 siginfo_t uinfo;
9178 p = lock_user(VERIFY_READ, arg3, sizeof(target_siginfo_t), 1);
9179 if (!p) {
9180 goto efault;
9182 target_to_host_siginfo(&uinfo, p);
9183 unlock_user(p, arg3, 0);
9184 ret = get_errno(sys_rt_sigqueueinfo(arg1, arg2, &uinfo));
9186 break;
9187 case TARGET_NR_rt_tgsigqueueinfo:
9189 siginfo_t uinfo;
9191 p = lock_user(VERIFY_READ, arg4, sizeof(target_siginfo_t), 1);
9192 if (!p) {
9193 goto efault;
9195 target_to_host_siginfo(&uinfo, p);
9196 unlock_user(p, arg4, 0);
9197 ret = get_errno(sys_rt_tgsigqueueinfo(arg1, arg2, arg3, &uinfo));
9199 break;
9200 #ifdef TARGET_NR_sigreturn
9201 case TARGET_NR_sigreturn:
9202 if (block_signals()) {
9203 ret = -TARGET_ERESTARTSYS;
9204 } else {
9205 ret = do_sigreturn(cpu_env);
9207 break;
9208 #endif
9209 case TARGET_NR_rt_sigreturn:
9210 if (block_signals()) {
9211 ret = -TARGET_ERESTARTSYS;
9212 } else {
9213 ret = do_rt_sigreturn(cpu_env);
9215 break;
9216 case TARGET_NR_sethostname:
9217 if (!(p = lock_user_string(arg1)))
9218 goto efault;
9219 ret = get_errno(sethostname(p, arg2));
9220 unlock_user(p, arg1, 0);
9221 break;
9222 case TARGET_NR_setrlimit:
9224 int resource = target_to_host_resource(arg1);
9225 struct target_rlimit *target_rlim;
9226 struct rlimit rlim;
9227 if (!lock_user_struct(VERIFY_READ, target_rlim, arg2, 1))
9228 goto efault;
9229 rlim.rlim_cur = target_to_host_rlim(target_rlim->rlim_cur);
9230 rlim.rlim_max = target_to_host_rlim(target_rlim->rlim_max);
9231 unlock_user_struct(target_rlim, arg2, 0);
9232 ret = get_errno(setrlimit(resource, &rlim));
9234 break;
9235 case TARGET_NR_getrlimit:
9237 int resource = target_to_host_resource(arg1);
9238 struct target_rlimit *target_rlim;
9239 struct rlimit rlim;
9241 ret = get_errno(getrlimit(resource, &rlim));
9242 if (!is_error(ret)) {
9243 if (!lock_user_struct(VERIFY_WRITE, target_rlim, arg2, 0))
9244 goto efault;
9245 target_rlim->rlim_cur = host_to_target_rlim(rlim.rlim_cur);
9246 target_rlim->rlim_max = host_to_target_rlim(rlim.rlim_max);
9247 unlock_user_struct(target_rlim, arg2, 1);
9250 break;
9251 case TARGET_NR_getrusage:
9253 struct rusage rusage;
9254 ret = get_errno(getrusage(arg1, &rusage));
9255 if (!is_error(ret)) {
9256 ret = host_to_target_rusage(arg2, &rusage);
9259 break;
9260 case TARGET_NR_gettimeofday:
9262 struct timeval tv;
9263 ret = get_errno(gettimeofday(&tv, NULL));
9264 if (!is_error(ret)) {
9265 if (copy_to_user_timeval(arg1, &tv))
9266 goto efault;
9269 break;
9270 case TARGET_NR_settimeofday:
9272 struct timeval tv, *ptv = NULL;
9273 struct timezone tz, *ptz = NULL;
9275 if (arg1) {
9276 if (copy_from_user_timeval(&tv, arg1)) {
9277 goto efault;
9279 ptv = &tv;
9282 if (arg2) {
9283 if (copy_from_user_timezone(&tz, arg2)) {
9284 goto efault;
9286 ptz = &tz;
9289 ret = get_errno(settimeofday(ptv, ptz));
9291 break;
9292 #if defined(TARGET_NR_select)
9293 case TARGET_NR_select:
9294 #if defined(TARGET_WANT_NI_OLD_SELECT)
9295 /* some architectures used to have old_select here
9296 * but now ENOSYS it.
9298 ret = -TARGET_ENOSYS;
9299 #elif defined(TARGET_WANT_OLD_SYS_SELECT)
9300 ret = do_old_select(arg1);
9301 #else
9302 ret = do_select(arg1, arg2, arg3, arg4, arg5);
9303 #endif
9304 break;
9305 #endif
9306 #ifdef TARGET_NR_pselect6
9307 case TARGET_NR_pselect6:
9309 abi_long rfd_addr, wfd_addr, efd_addr, n, ts_addr;
9310 fd_set rfds, wfds, efds;
9311 fd_set *rfds_ptr, *wfds_ptr, *efds_ptr;
9312 struct timespec ts, *ts_ptr;
9315 * The 6th arg is actually two args smashed together,
9316 * so we cannot use the C library.
9318 sigset_t set;
9319 struct {
9320 sigset_t *set;
9321 size_t size;
9322 } sig, *sig_ptr;
9324 abi_ulong arg_sigset, arg_sigsize, *arg7;
9325 target_sigset_t *target_sigset;
9327 n = arg1;
9328 rfd_addr = arg2;
9329 wfd_addr = arg3;
9330 efd_addr = arg4;
9331 ts_addr = arg5;
9333 ret = copy_from_user_fdset_ptr(&rfds, &rfds_ptr, rfd_addr, n);
9334 if (ret) {
9335 goto fail;
9337 ret = copy_from_user_fdset_ptr(&wfds, &wfds_ptr, wfd_addr, n);
9338 if (ret) {
9339 goto fail;
9341 ret = copy_from_user_fdset_ptr(&efds, &efds_ptr, efd_addr, n);
9342 if (ret) {
9343 goto fail;
9347 * This takes a timespec, and not a timeval, so we cannot
9348 * use the do_select() helper ...
9350 if (ts_addr) {
9351 if (target_to_host_timespec(&ts, ts_addr)) {
9352 goto efault;
9354 ts_ptr = &ts;
9355 } else {
9356 ts_ptr = NULL;
9359 /* Extract the two packed args for the sigset */
9360 if (arg6) {
9361 sig_ptr = &sig;
9362 sig.size = SIGSET_T_SIZE;
9364 arg7 = lock_user(VERIFY_READ, arg6, sizeof(*arg7) * 2, 1);
9365 if (!arg7) {
9366 goto efault;
9368 arg_sigset = tswapal(arg7[0]);
9369 arg_sigsize = tswapal(arg7[1]);
9370 unlock_user(arg7, arg6, 0);
9372 if (arg_sigset) {
9373 sig.set = &set;
9374 if (arg_sigsize != sizeof(*target_sigset)) {
9375 /* Like the kernel, we enforce correct size sigsets */
9376 ret = -TARGET_EINVAL;
9377 goto fail;
9379 target_sigset = lock_user(VERIFY_READ, arg_sigset,
9380 sizeof(*target_sigset), 1);
9381 if (!target_sigset) {
9382 goto efault;
9384 target_to_host_sigset(&set, target_sigset);
9385 unlock_user(target_sigset, arg_sigset, 0);
9386 } else {
9387 sig.set = NULL;
9389 } else {
9390 sig_ptr = NULL;
9393 ret = get_errno(safe_pselect6(n, rfds_ptr, wfds_ptr, efds_ptr,
9394 ts_ptr, sig_ptr));
9396 if (!is_error(ret)) {
9397 if (rfd_addr && copy_to_user_fdset(rfd_addr, &rfds, n))
9398 goto efault;
9399 if (wfd_addr && copy_to_user_fdset(wfd_addr, &wfds, n))
9400 goto efault;
9401 if (efd_addr && copy_to_user_fdset(efd_addr, &efds, n))
9402 goto efault;
9404 if (ts_addr && host_to_target_timespec(ts_addr, &ts))
9405 goto efault;
9408 break;
9409 #endif
9410 #ifdef TARGET_NR_symlink
9411 case TARGET_NR_symlink:
9413 void *p2;
9414 p = lock_user_string(arg1);
9415 p2 = lock_user_string(arg2);
9416 if (!p || !p2)
9417 ret = -TARGET_EFAULT;
9418 else
9419 ret = get_errno(symlink(p, p2));
9420 unlock_user(p2, arg2, 0);
9421 unlock_user(p, arg1, 0);
9423 break;
9424 #endif
9425 #if defined(TARGET_NR_symlinkat)
9426 case TARGET_NR_symlinkat:
9428 void *p2;
9429 p = lock_user_string(arg1);
9430 p2 = lock_user_string(arg3);
9431 if (!p || !p2)
9432 ret = -TARGET_EFAULT;
9433 else
9434 ret = get_errno(symlinkat(p, arg2, p2));
9435 unlock_user(p2, arg3, 0);
9436 unlock_user(p, arg1, 0);
9438 break;
9439 #endif
9440 #ifdef TARGET_NR_oldlstat
9441 case TARGET_NR_oldlstat:
9442 goto unimplemented;
9443 #endif
9444 #ifdef TARGET_NR_readlink
9445 case TARGET_NR_readlink:
9447 void *p2;
9448 p = lock_user_string(arg1);
9449 p2 = lock_user(VERIFY_WRITE, arg2, arg3, 0);
9450 if (!p || !p2) {
9451 ret = -TARGET_EFAULT;
9452 } else if (!arg3) {
9453 /* Short circuit this for the magic exe check. */
9454 ret = -TARGET_EINVAL;
9455 } else if (is_proc_myself((const char *)p, "exe")) {
9456 char real[PATH_MAX], *temp;
9457 temp = realpath(exec_path, real);
9458 /* Return value is # of bytes that we wrote to the buffer. */
9459 if (temp == NULL) {
9460 ret = get_errno(-1);
9461 } else {
9462 /* Don't worry about sign mismatch as earlier mapping
9463 * logic would have thrown a bad address error. */
9464 ret = MIN(strlen(real), arg3);
9465 /* We cannot NUL terminate the string. */
9466 memcpy(p2, real, ret);
9468 } else {
9469 ret = get_errno(readlink(path(p), p2, arg3));
9471 unlock_user(p2, arg2, ret);
9472 unlock_user(p, arg1, 0);
9474 break;
9475 #endif
9476 #if defined(TARGET_NR_readlinkat)
9477 case TARGET_NR_readlinkat:
9479 void *p2;
9480 p = lock_user_string(arg2);
9481 p2 = lock_user(VERIFY_WRITE, arg3, arg4, 0);
9482 if (!p || !p2) {
9483 ret = -TARGET_EFAULT;
9484 } else if (is_proc_myself((const char *)p, "exe")) {
9485 char real[PATH_MAX], *temp;
9486 temp = realpath(exec_path, real);
9487 ret = temp == NULL ? get_errno(-1) : strlen(real) ;
9488 snprintf((char *)p2, arg4, "%s", real);
9489 } else {
9490 ret = get_errno(readlinkat(arg1, path(p), p2, arg4));
9492 unlock_user(p2, arg3, ret);
9493 unlock_user(p, arg2, 0);
9495 break;
9496 #endif
9497 #ifdef TARGET_NR_uselib
9498 case TARGET_NR_uselib:
9499 goto unimplemented;
9500 #endif
9501 #ifdef TARGET_NR_swapon
9502 case TARGET_NR_swapon:
9503 if (!(p = lock_user_string(arg1)))
9504 goto efault;
9505 ret = get_errno(swapon(p, arg2));
9506 unlock_user(p, arg1, 0);
9507 break;
9508 #endif
9509 case TARGET_NR_reboot:
9510 if (arg3 == LINUX_REBOOT_CMD_RESTART2) {
9511 /* arg4 must be ignored in all other cases */
9512 p = lock_user_string(arg4);
9513 if (!p) {
9514 goto efault;
9516 ret = get_errno(reboot(arg1, arg2, arg3, p));
9517 unlock_user(p, arg4, 0);
9518 } else {
9519 ret = get_errno(reboot(arg1, arg2, arg3, NULL));
9521 break;
9522 #ifdef TARGET_NR_readdir
9523 case TARGET_NR_readdir:
9524 goto unimplemented;
9525 #endif
9526 #ifdef TARGET_NR_mmap
9527 case TARGET_NR_mmap:
9528 #if (defined(TARGET_I386) && defined(TARGET_ABI32)) || \
9529 (defined(TARGET_ARM) && defined(TARGET_ABI32)) || \
9530 defined(TARGET_M68K) || defined(TARGET_CRIS) || defined(TARGET_MICROBLAZE) \
9531 || defined(TARGET_S390X)
9533 abi_ulong *v;
9534 abi_ulong v1, v2, v3, v4, v5, v6;
9535 if (!(v = lock_user(VERIFY_READ, arg1, 6 * sizeof(abi_ulong), 1)))
9536 goto efault;
9537 v1 = tswapal(v[0]);
9538 v2 = tswapal(v[1]);
9539 v3 = tswapal(v[2]);
9540 v4 = tswapal(v[3]);
9541 v5 = tswapal(v[4]);
9542 v6 = tswapal(v[5]);
9543 unlock_user(v, arg1, 0);
9544 ret = get_errno(target_mmap(v1, v2, v3,
9545 target_to_host_bitmask(v4, mmap_flags_tbl),
9546 v5, v6));
9548 #else
9549 ret = get_errno(target_mmap(arg1, arg2, arg3,
9550 target_to_host_bitmask(arg4, mmap_flags_tbl),
9551 arg5,
9552 arg6));
9553 #endif
9554 break;
9555 #endif
9556 #ifdef TARGET_NR_mmap2
9557 case TARGET_NR_mmap2:
9558 #ifndef MMAP_SHIFT
9559 #define MMAP_SHIFT 12
9560 #endif
9561 ret = get_errno(target_mmap(arg1, arg2, arg3,
9562 target_to_host_bitmask(arg4, mmap_flags_tbl),
9563 arg5,
9564 arg6 << MMAP_SHIFT));
9565 break;
9566 #endif
9567 case TARGET_NR_munmap:
9568 ret = get_errno(target_munmap(arg1, arg2));
9569 break;
9570 case TARGET_NR_mprotect:
9572 TaskState *ts = cpu->opaque;
9573 /* Special hack to detect libc making the stack executable. */
9574 if ((arg3 & PROT_GROWSDOWN)
9575 && arg1 >= ts->info->stack_limit
9576 && arg1 <= ts->info->start_stack) {
9577 arg3 &= ~PROT_GROWSDOWN;
9578 arg2 = arg2 + arg1 - ts->info->stack_limit;
9579 arg1 = ts->info->stack_limit;
9582 ret = get_errno(target_mprotect(arg1, arg2, arg3));
9583 break;
9584 #ifdef TARGET_NR_mremap
9585 case TARGET_NR_mremap:
9586 ret = get_errno(target_mremap(arg1, arg2, arg3, arg4, arg5));
9587 break;
9588 #endif
9589 /* ??? msync/mlock/munlock are broken for softmmu. */
9590 #ifdef TARGET_NR_msync
9591 case TARGET_NR_msync:
9592 ret = get_errno(msync(g2h(arg1), arg2, arg3));
9593 break;
9594 #endif
9595 #ifdef TARGET_NR_mlock
9596 case TARGET_NR_mlock:
9597 ret = get_errno(mlock(g2h(arg1), arg2));
9598 break;
9599 #endif
9600 #ifdef TARGET_NR_munlock
9601 case TARGET_NR_munlock:
9602 ret = get_errno(munlock(g2h(arg1), arg2));
9603 break;
9604 #endif
9605 #ifdef TARGET_NR_mlockall
9606 case TARGET_NR_mlockall:
9607 ret = get_errno(mlockall(target_to_host_mlockall_arg(arg1)));
9608 break;
9609 #endif
9610 #ifdef TARGET_NR_munlockall
9611 case TARGET_NR_munlockall:
9612 ret = get_errno(munlockall());
9613 break;
9614 #endif
9615 case TARGET_NR_truncate:
9616 if (!(p = lock_user_string(arg1)))
9617 goto efault;
9618 ret = get_errno(truncate(p, arg2));
9619 unlock_user(p, arg1, 0);
9620 break;
9621 case TARGET_NR_ftruncate:
9622 ret = get_errno(ftruncate(arg1, arg2));
9623 break;
9624 case TARGET_NR_fchmod:
9625 ret = get_errno(fchmod(arg1, arg2));
9626 break;
9627 #if defined(TARGET_NR_fchmodat)
9628 case TARGET_NR_fchmodat:
9629 if (!(p = lock_user_string(arg2)))
9630 goto efault;
9631 ret = get_errno(fchmodat(arg1, p, arg3, 0));
9632 unlock_user(p, arg2, 0);
9633 break;
9634 #endif
9635 case TARGET_NR_getpriority:
9636 /* Note that negative values are valid for getpriority, so we must
9637 differentiate based on errno settings. */
9638 errno = 0;
9639 ret = getpriority(arg1, arg2);
9640 if (ret == -1 && errno != 0) {
9641 ret = -host_to_target_errno(errno);
9642 break;
9644 #ifdef TARGET_ALPHA
9645 /* Return value is the unbiased priority. Signal no error. */
9646 ((CPUAlphaState *)cpu_env)->ir[IR_V0] = 0;
9647 #else
9648 /* Return value is a biased priority to avoid negative numbers. */
9649 ret = 20 - ret;
9650 #endif
9651 break;
9652 case TARGET_NR_setpriority:
9653 ret = get_errno(setpriority(arg1, arg2, arg3));
9654 break;
9655 #ifdef TARGET_NR_profil
9656 case TARGET_NR_profil:
9657 goto unimplemented;
9658 #endif
9659 case TARGET_NR_statfs:
9660 if (!(p = lock_user_string(arg1)))
9661 goto efault;
9662 ret = get_errno(statfs(path(p), &stfs));
9663 unlock_user(p, arg1, 0);
9664 convert_statfs:
9665 if (!is_error(ret)) {
9666 struct target_statfs *target_stfs;
9668 if (!lock_user_struct(VERIFY_WRITE, target_stfs, arg2, 0))
9669 goto efault;
9670 __put_user(stfs.f_type, &target_stfs->f_type);
9671 __put_user(stfs.f_bsize, &target_stfs->f_bsize);
9672 __put_user(stfs.f_blocks, &target_stfs->f_blocks);
9673 __put_user(stfs.f_bfree, &target_stfs->f_bfree);
9674 __put_user(stfs.f_bavail, &target_stfs->f_bavail);
9675 __put_user(stfs.f_files, &target_stfs->f_files);
9676 __put_user(stfs.f_ffree, &target_stfs->f_ffree);
9677 __put_user(stfs.f_fsid.__val[0], &target_stfs->f_fsid.val[0]);
9678 __put_user(stfs.f_fsid.__val[1], &target_stfs->f_fsid.val[1]);
9679 __put_user(stfs.f_namelen, &target_stfs->f_namelen);
9680 __put_user(stfs.f_frsize, &target_stfs->f_frsize);
9681 #ifdef _STATFS_F_FLAGS
9682 __put_user(stfs.f_flags, &target_stfs->f_flags);
9683 #else
9684 __put_user(0, &target_stfs->f_flags);
9685 #endif
9686 memset(target_stfs->f_spare, 0, sizeof(target_stfs->f_spare));
9687 unlock_user_struct(target_stfs, arg2, 1);
9689 break;
9690 case TARGET_NR_fstatfs:
9691 ret = get_errno(fstatfs(arg1, &stfs));
9692 goto convert_statfs;
9693 #ifdef TARGET_NR_statfs64
9694 case TARGET_NR_statfs64:
9695 if (!(p = lock_user_string(arg1)))
9696 goto efault;
9697 ret = get_errno(statfs(path(p), &stfs));
9698 unlock_user(p, arg1, 0);
9699 convert_statfs64:
9700 if (!is_error(ret)) {
9701 struct target_statfs64 *target_stfs;
9703 if (!lock_user_struct(VERIFY_WRITE, target_stfs, arg3, 0))
9704 goto efault;
9705 __put_user(stfs.f_type, &target_stfs->f_type);
9706 __put_user(stfs.f_bsize, &target_stfs->f_bsize);
9707 __put_user(stfs.f_blocks, &target_stfs->f_blocks);
9708 __put_user(stfs.f_bfree, &target_stfs->f_bfree);
9709 __put_user(stfs.f_bavail, &target_stfs->f_bavail);
9710 __put_user(stfs.f_files, &target_stfs->f_files);
9711 __put_user(stfs.f_ffree, &target_stfs->f_ffree);
9712 __put_user(stfs.f_fsid.__val[0], &target_stfs->f_fsid.val[0]);
9713 __put_user(stfs.f_fsid.__val[1], &target_stfs->f_fsid.val[1]);
9714 __put_user(stfs.f_namelen, &target_stfs->f_namelen);
9715 __put_user(stfs.f_frsize, &target_stfs->f_frsize);
9716 memset(target_stfs->f_spare, 0, sizeof(target_stfs->f_spare));
9717 unlock_user_struct(target_stfs, arg3, 1);
9719 break;
9720 case TARGET_NR_fstatfs64:
9721 ret = get_errno(fstatfs(arg1, &stfs));
9722 goto convert_statfs64;
9723 #endif
9724 #ifdef TARGET_NR_ioperm
9725 case TARGET_NR_ioperm:
9726 goto unimplemented;
9727 #endif
9728 #ifdef TARGET_NR_socketcall
9729 case TARGET_NR_socketcall:
9730 ret = do_socketcall(arg1, arg2);
9731 break;
9732 #endif
9733 #ifdef TARGET_NR_accept
9734 case TARGET_NR_accept:
9735 ret = do_accept4(arg1, arg2, arg3, 0);
9736 break;
9737 #endif
9738 #ifdef TARGET_NR_accept4
9739 case TARGET_NR_accept4:
9740 ret = do_accept4(arg1, arg2, arg3, arg4);
9741 break;
9742 #endif
9743 #ifdef TARGET_NR_bind
9744 case TARGET_NR_bind:
9745 ret = do_bind(arg1, arg2, arg3);
9746 break;
9747 #endif
9748 #ifdef TARGET_NR_connect
9749 case TARGET_NR_connect:
9750 ret = do_connect(arg1, arg2, arg3);
9751 break;
9752 #endif
9753 #ifdef TARGET_NR_getpeername
9754 case TARGET_NR_getpeername:
9755 ret = do_getpeername(arg1, arg2, arg3);
9756 break;
9757 #endif
9758 #ifdef TARGET_NR_getsockname
9759 case TARGET_NR_getsockname:
9760 ret = do_getsockname(arg1, arg2, arg3);
9761 break;
9762 #endif
9763 #ifdef TARGET_NR_getsockopt
9764 case TARGET_NR_getsockopt:
9765 ret = do_getsockopt(arg1, arg2, arg3, arg4, arg5);
9766 break;
9767 #endif
9768 #ifdef TARGET_NR_listen
9769 case TARGET_NR_listen:
9770 ret = get_errno(listen(arg1, arg2));
9771 break;
9772 #endif
9773 #ifdef TARGET_NR_recv
9774 case TARGET_NR_recv:
9775 ret = do_recvfrom(arg1, arg2, arg3, arg4, 0, 0);
9776 break;
9777 #endif
9778 #ifdef TARGET_NR_recvfrom
9779 case TARGET_NR_recvfrom:
9780 ret = do_recvfrom(arg1, arg2, arg3, arg4, arg5, arg6);
9781 break;
9782 #endif
9783 #ifdef TARGET_NR_recvmsg
9784 case TARGET_NR_recvmsg:
9785 ret = do_sendrecvmsg(arg1, arg2, arg3, 0);
9786 break;
9787 #endif
9788 #ifdef TARGET_NR_send
9789 case TARGET_NR_send:
9790 ret = do_sendto(arg1, arg2, arg3, arg4, 0, 0);
9791 break;
9792 #endif
9793 #ifdef TARGET_NR_sendmsg
9794 case TARGET_NR_sendmsg:
9795 ret = do_sendrecvmsg(arg1, arg2, arg3, 1);
9796 break;
9797 #endif
9798 #ifdef TARGET_NR_sendmmsg
9799 case TARGET_NR_sendmmsg:
9800 ret = do_sendrecvmmsg(arg1, arg2, arg3, arg4, 1);
9801 break;
9802 case TARGET_NR_recvmmsg:
9803 ret = do_sendrecvmmsg(arg1, arg2, arg3, arg4, 0);
9804 break;
9805 #endif
9806 #ifdef TARGET_NR_sendto
9807 case TARGET_NR_sendto:
9808 ret = do_sendto(arg1, arg2, arg3, arg4, arg5, arg6);
9809 break;
9810 #endif
9811 #ifdef TARGET_NR_shutdown
9812 case TARGET_NR_shutdown:
9813 ret = get_errno(shutdown(arg1, arg2));
9814 break;
9815 #endif
9816 #if defined(TARGET_NR_getrandom) && defined(__NR_getrandom)
9817 case TARGET_NR_getrandom:
9818 p = lock_user(VERIFY_WRITE, arg1, arg2, 0);
9819 if (!p) {
9820 goto efault;
9822 ret = get_errno(getrandom(p, arg2, arg3));
9823 unlock_user(p, arg1, ret);
9824 break;
9825 #endif
9826 #ifdef TARGET_NR_socket
9827 case TARGET_NR_socket:
9828 ret = do_socket(arg1, arg2, arg3);
9829 break;
9830 #endif
9831 #ifdef TARGET_NR_socketpair
9832 case TARGET_NR_socketpair:
9833 ret = do_socketpair(arg1, arg2, arg3, arg4);
9834 break;
9835 #endif
9836 #ifdef TARGET_NR_setsockopt
9837 case TARGET_NR_setsockopt:
9838 ret = do_setsockopt(arg1, arg2, arg3, arg4, (socklen_t) arg5);
9839 break;
9840 #endif
9841 #if defined(TARGET_NR_syslog)
9842 case TARGET_NR_syslog:
9844 int len = arg2;
9846 switch (arg1) {
9847 case TARGET_SYSLOG_ACTION_CLOSE: /* Close log */
9848 case TARGET_SYSLOG_ACTION_OPEN: /* Open log */
9849 case TARGET_SYSLOG_ACTION_CLEAR: /* Clear ring buffer */
9850 case TARGET_SYSLOG_ACTION_CONSOLE_OFF: /* Disable logging */
9851 case TARGET_SYSLOG_ACTION_CONSOLE_ON: /* Enable logging */
9852 case TARGET_SYSLOG_ACTION_CONSOLE_LEVEL: /* Set messages level */
9853 case TARGET_SYSLOG_ACTION_SIZE_UNREAD: /* Number of chars */
9854 case TARGET_SYSLOG_ACTION_SIZE_BUFFER: /* Size of the buffer */
9856 ret = get_errno(sys_syslog((int)arg1, NULL, (int)arg3));
9858 break;
9859 case TARGET_SYSLOG_ACTION_READ: /* Read from log */
9860 case TARGET_SYSLOG_ACTION_READ_CLEAR: /* Read/clear msgs */
9861 case TARGET_SYSLOG_ACTION_READ_ALL: /* Read last messages */
9863 ret = -TARGET_EINVAL;
9864 if (len < 0) {
9865 goto fail;
9867 ret = 0;
9868 if (len == 0) {
9869 break;
9871 p = lock_user(VERIFY_WRITE, arg2, arg3, 0);
9872 if (!p) {
9873 ret = -TARGET_EFAULT;
9874 goto fail;
9876 ret = get_errno(sys_syslog((int)arg1, p, (int)arg3));
9877 unlock_user(p, arg2, arg3);
9879 break;
9880 default:
9881 ret = -EINVAL;
9882 break;
9885 break;
9886 #endif
9887 case TARGET_NR_setitimer:
9889 struct itimerval value, ovalue, *pvalue;
9891 if (arg2) {
9892 pvalue = &value;
9893 if (copy_from_user_timeval(&pvalue->it_interval, arg2)
9894 || copy_from_user_timeval(&pvalue->it_value,
9895 arg2 + sizeof(struct target_timeval)))
9896 goto efault;
9897 } else {
9898 pvalue = NULL;
9900 ret = get_errno(setitimer(arg1, pvalue, &ovalue));
9901 if (!is_error(ret) && arg3) {
9902 if (copy_to_user_timeval(arg3,
9903 &ovalue.it_interval)
9904 || copy_to_user_timeval(arg3 + sizeof(struct target_timeval),
9905 &ovalue.it_value))
9906 goto efault;
9909 break;
9910 case TARGET_NR_getitimer:
9912 struct itimerval value;
9914 ret = get_errno(getitimer(arg1, &value));
9915 if (!is_error(ret) && arg2) {
9916 if (copy_to_user_timeval(arg2,
9917 &value.it_interval)
9918 || copy_to_user_timeval(arg2 + sizeof(struct target_timeval),
9919 &value.it_value))
9920 goto efault;
9923 break;
9924 #ifdef TARGET_NR_stat
9925 case TARGET_NR_stat:
9926 if (!(p = lock_user_string(arg1)))
9927 goto efault;
9928 ret = get_errno(stat(path(p), &st));
9929 unlock_user(p, arg1, 0);
9930 goto do_stat;
9931 #endif
9932 #ifdef TARGET_NR_lstat
9933 case TARGET_NR_lstat:
9934 if (!(p = lock_user_string(arg1)))
9935 goto efault;
9936 ret = get_errno(lstat(path(p), &st));
9937 unlock_user(p, arg1, 0);
9938 goto do_stat;
9939 #endif
9940 case TARGET_NR_fstat:
9942 ret = get_errno(fstat(arg1, &st));
9943 #if defined(TARGET_NR_stat) || defined(TARGET_NR_lstat)
9944 do_stat:
9945 #endif
9946 if (!is_error(ret)) {
9947 struct target_stat *target_st;
9949 if (!lock_user_struct(VERIFY_WRITE, target_st, arg2, 0))
9950 goto efault;
9951 memset(target_st, 0, sizeof(*target_st));
9952 __put_user(st.st_dev, &target_st->st_dev);
9953 __put_user(st.st_ino, &target_st->st_ino);
9954 __put_user(st.st_mode, &target_st->st_mode);
9955 __put_user(st.st_uid, &target_st->st_uid);
9956 __put_user(st.st_gid, &target_st->st_gid);
9957 __put_user(st.st_nlink, &target_st->st_nlink);
9958 __put_user(st.st_rdev, &target_st->st_rdev);
9959 __put_user(st.st_size, &target_st->st_size);
9960 __put_user(st.st_blksize, &target_st->st_blksize);
9961 __put_user(st.st_blocks, &target_st->st_blocks);
9962 __put_user(st.st_atime, &target_st->target_st_atime);
9963 __put_user(st.st_mtime, &target_st->target_st_mtime);
9964 __put_user(st.st_ctime, &target_st->target_st_ctime);
9965 unlock_user_struct(target_st, arg2, 1);
9968 break;
9969 #ifdef TARGET_NR_olduname
9970 case TARGET_NR_olduname:
9971 goto unimplemented;
9972 #endif
9973 #ifdef TARGET_NR_iopl
9974 case TARGET_NR_iopl:
9975 goto unimplemented;
9976 #endif
9977 case TARGET_NR_vhangup:
9978 ret = get_errno(vhangup());
9979 break;
9980 #ifdef TARGET_NR_idle
9981 case TARGET_NR_idle:
9982 goto unimplemented;
9983 #endif
9984 #ifdef TARGET_NR_syscall
9985 case TARGET_NR_syscall:
9986 ret = do_syscall(cpu_env, arg1 & 0xffff, arg2, arg3, arg4, arg5,
9987 arg6, arg7, arg8, 0);
9988 break;
9989 #endif
9990 case TARGET_NR_wait4:
9992 int status;
9993 abi_long status_ptr = arg2;
9994 struct rusage rusage, *rusage_ptr;
9995 abi_ulong target_rusage = arg4;
9996 abi_long rusage_err;
9997 if (target_rusage)
9998 rusage_ptr = &rusage;
9999 else
10000 rusage_ptr = NULL;
10001 ret = get_errno(safe_wait4(arg1, &status, arg3, rusage_ptr));
10002 if (!is_error(ret)) {
10003 if (status_ptr && ret) {
10004 status = host_to_target_waitstatus(status);
10005 if (put_user_s32(status, status_ptr))
10006 goto efault;
10008 if (target_rusage) {
10009 rusage_err = host_to_target_rusage(target_rusage, &rusage);
10010 if (rusage_err) {
10011 ret = rusage_err;
10016 break;
10017 #ifdef TARGET_NR_swapoff
10018 case TARGET_NR_swapoff:
10019 if (!(p = lock_user_string(arg1)))
10020 goto efault;
10021 ret = get_errno(swapoff(p));
10022 unlock_user(p, arg1, 0);
10023 break;
10024 #endif
10025 case TARGET_NR_sysinfo:
10027 struct target_sysinfo *target_value;
10028 struct sysinfo value;
10029 ret = get_errno(sysinfo(&value));
10030 if (!is_error(ret) && arg1)
10032 if (!lock_user_struct(VERIFY_WRITE, target_value, arg1, 0))
10033 goto efault;
10034 __put_user(value.uptime, &target_value->uptime);
10035 __put_user(value.loads[0], &target_value->loads[0]);
10036 __put_user(value.loads[1], &target_value->loads[1]);
10037 __put_user(value.loads[2], &target_value->loads[2]);
10038 __put_user(value.totalram, &target_value->totalram);
10039 __put_user(value.freeram, &target_value->freeram);
10040 __put_user(value.sharedram, &target_value->sharedram);
10041 __put_user(value.bufferram, &target_value->bufferram);
10042 __put_user(value.totalswap, &target_value->totalswap);
10043 __put_user(value.freeswap, &target_value->freeswap);
10044 __put_user(value.procs, &target_value->procs);
10045 __put_user(value.totalhigh, &target_value->totalhigh);
10046 __put_user(value.freehigh, &target_value->freehigh);
10047 __put_user(value.mem_unit, &target_value->mem_unit);
10048 unlock_user_struct(target_value, arg1, 1);
10051 break;
10052 #ifdef TARGET_NR_ipc
10053 case TARGET_NR_ipc:
10054 ret = do_ipc(cpu_env, arg1, arg2, arg3, arg4, arg5, arg6);
10055 break;
10056 #endif
10057 #ifdef TARGET_NR_semget
10058 case TARGET_NR_semget:
10059 ret = get_errno(semget(arg1, arg2, arg3));
10060 break;
10061 #endif
10062 #ifdef TARGET_NR_semop
10063 case TARGET_NR_semop:
10064 ret = do_semop(arg1, arg2, arg3);
10065 break;
10066 #endif
10067 #ifdef TARGET_NR_semctl
10068 case TARGET_NR_semctl:
10069 ret = do_semctl(arg1, arg2, arg3, arg4);
10070 break;
10071 #endif
10072 #ifdef TARGET_NR_msgctl
10073 case TARGET_NR_msgctl:
10074 ret = do_msgctl(arg1, arg2, arg3);
10075 break;
10076 #endif
10077 #ifdef TARGET_NR_msgget
10078 case TARGET_NR_msgget:
10079 ret = get_errno(msgget(arg1, arg2));
10080 break;
10081 #endif
10082 #ifdef TARGET_NR_msgrcv
10083 case TARGET_NR_msgrcv:
10084 ret = do_msgrcv(arg1, arg2, arg3, arg4, arg5);
10085 break;
10086 #endif
10087 #ifdef TARGET_NR_msgsnd
10088 case TARGET_NR_msgsnd:
10089 ret = do_msgsnd(arg1, arg2, arg3, arg4);
10090 break;
10091 #endif
10092 #ifdef TARGET_NR_shmget
10093 case TARGET_NR_shmget:
10094 ret = get_errno(shmget(arg1, arg2, arg3));
10095 break;
10096 #endif
10097 #ifdef TARGET_NR_shmctl
10098 case TARGET_NR_shmctl:
10099 ret = do_shmctl(arg1, arg2, arg3);
10100 break;
10101 #endif
10102 #ifdef TARGET_NR_shmat
10103 case TARGET_NR_shmat:
10104 ret = do_shmat(cpu_env, arg1, arg2, arg3);
10105 break;
10106 #endif
10107 #ifdef TARGET_NR_shmdt
10108 case TARGET_NR_shmdt:
10109 ret = do_shmdt(arg1);
10110 break;
10111 #endif
10112 case TARGET_NR_fsync:
10113 ret = get_errno(fsync(arg1));
10114 break;
10115 case TARGET_NR_clone:
10116 /* Linux manages to have three different orderings for its
10117 * arguments to clone(); the BACKWARDS and BACKWARDS2 defines
10118 * match the kernel's CONFIG_CLONE_* settings.
10119 * Microblaze is further special in that it uses a sixth
10120 * implicit argument to clone for the TLS pointer.
10122 #if defined(TARGET_MICROBLAZE)
10123 ret = get_errno(do_fork(cpu_env, arg1, arg2, arg4, arg6, arg5));
10124 #elif defined(TARGET_CLONE_BACKWARDS)
10125 ret = get_errno(do_fork(cpu_env, arg1, arg2, arg3, arg4, arg5));
10126 #elif defined(TARGET_CLONE_BACKWARDS2)
10127 ret = get_errno(do_fork(cpu_env, arg2, arg1, arg3, arg5, arg4));
10128 #else
10129 ret = get_errno(do_fork(cpu_env, arg1, arg2, arg3, arg5, arg4));
10130 #endif
10131 break;
10132 #ifdef __NR_exit_group
10133 /* new thread calls */
10134 case TARGET_NR_exit_group:
10135 #ifdef TARGET_GPROF
10136 _mcleanup();
10137 #endif
10138 gdb_exit(cpu_env, arg1);
10139 ret = get_errno(exit_group(arg1));
10140 break;
10141 #endif
10142 case TARGET_NR_setdomainname:
10143 if (!(p = lock_user_string(arg1)))
10144 goto efault;
10145 ret = get_errno(setdomainname(p, arg2));
10146 unlock_user(p, arg1, 0);
10147 break;
10148 case TARGET_NR_uname:
10149 /* no need to transcode because we use the linux syscall */
10151 struct new_utsname * buf;
10153 if (!lock_user_struct(VERIFY_WRITE, buf, arg1, 0))
10154 goto efault;
10155 ret = get_errno(sys_uname(buf));
10156 if (!is_error(ret)) {
10157 /* Overwrite the native machine name with whatever is being
10158 emulated. */
10159 g_strlcpy(buf->machine, cpu_to_uname_machine(cpu_env),
10160 sizeof(buf->machine));
10161 /* Allow the user to override the reported release. */
10162 if (qemu_uname_release && *qemu_uname_release) {
10163 g_strlcpy(buf->release, qemu_uname_release,
10164 sizeof(buf->release));
10167 unlock_user_struct(buf, arg1, 1);
10169 break;
10170 #ifdef TARGET_I386
10171 case TARGET_NR_modify_ldt:
10172 ret = do_modify_ldt(cpu_env, arg1, arg2, arg3);
10173 break;
10174 #if !defined(TARGET_X86_64)
10175 case TARGET_NR_vm86old:
10176 goto unimplemented;
10177 case TARGET_NR_vm86:
10178 ret = do_vm86(cpu_env, arg1, arg2);
10179 break;
10180 #endif
10181 #endif
10182 case TARGET_NR_adjtimex:
10184 struct timex host_buf;
10186 if (target_to_host_timex(&host_buf, arg1) != 0) {
10187 goto efault;
10189 ret = get_errno(adjtimex(&host_buf));
10190 if (!is_error(ret)) {
10191 if (host_to_target_timex(arg1, &host_buf) != 0) {
10192 goto efault;
10196 break;
10197 #if defined(TARGET_NR_clock_adjtime) && defined(CONFIG_CLOCK_ADJTIME)
10198 case TARGET_NR_clock_adjtime:
10200 struct timex htx, *phtx = &htx;
10202 if (target_to_host_timex(phtx, arg2) != 0) {
10203 goto efault;
10205 ret = get_errno(clock_adjtime(arg1, phtx));
10206 if (!is_error(ret) && phtx) {
10207 if (host_to_target_timex(arg2, phtx) != 0) {
10208 goto efault;
10212 break;
10213 #endif
10214 #ifdef TARGET_NR_create_module
10215 case TARGET_NR_create_module:
10216 #endif
10217 case TARGET_NR_init_module:
10218 case TARGET_NR_delete_module:
10219 #ifdef TARGET_NR_get_kernel_syms
10220 case TARGET_NR_get_kernel_syms:
10221 #endif
10222 goto unimplemented;
10223 case TARGET_NR_quotactl:
10224 goto unimplemented;
10225 case TARGET_NR_getpgid:
10226 ret = get_errno(getpgid(arg1));
10227 break;
10228 case TARGET_NR_fchdir:
10229 ret = get_errno(fchdir(arg1));
10230 break;
10231 #ifdef TARGET_NR_bdflush /* not on x86_64 */
10232 case TARGET_NR_bdflush:
10233 goto unimplemented;
10234 #endif
10235 #ifdef TARGET_NR_sysfs
10236 case TARGET_NR_sysfs:
10237 goto unimplemented;
10238 #endif
10239 case TARGET_NR_personality:
10240 ret = get_errno(personality(arg1));
10241 break;
10242 #ifdef TARGET_NR_afs_syscall
10243 case TARGET_NR_afs_syscall:
10244 goto unimplemented;
10245 #endif
10246 #ifdef TARGET_NR__llseek /* Not on alpha */
10247 case TARGET_NR__llseek:
10249 int64_t res;
10250 #if !defined(__NR_llseek)
10251 res = lseek(arg1, ((uint64_t)arg2 << 32) | (abi_ulong)arg3, arg5);
10252 if (res == -1) {
10253 ret = get_errno(res);
10254 } else {
10255 ret = 0;
10257 #else
10258 ret = get_errno(_llseek(arg1, arg2, arg3, &res, arg5));
10259 #endif
10260 if ((ret == 0) && put_user_s64(res, arg4)) {
10261 goto efault;
10264 break;
10265 #endif
10266 #ifdef TARGET_NR_getdents
10267 case TARGET_NR_getdents:
10268 #ifdef EMULATE_GETDENTS_WITH_GETDENTS
10269 #if TARGET_ABI_BITS == 32 && HOST_LONG_BITS == 64
10271 struct target_dirent *target_dirp;
10272 struct linux_dirent *dirp;
10273 abi_long count = arg3;
10275 dirp = g_try_malloc(count);
10276 if (!dirp) {
10277 ret = -TARGET_ENOMEM;
10278 goto fail;
10281 ret = get_errno(sys_getdents(arg1, dirp, count));
10282 if (!is_error(ret)) {
10283 struct linux_dirent *de;
10284 struct target_dirent *tde;
10285 int len = ret;
10286 int reclen, treclen;
10287 int count1, tnamelen;
10289 count1 = 0;
10290 de = dirp;
10291 if (!(target_dirp = lock_user(VERIFY_WRITE, arg2, count, 0)))
10292 goto efault;
10293 tde = target_dirp;
10294 while (len > 0) {
10295 reclen = de->d_reclen;
10296 tnamelen = reclen - offsetof(struct linux_dirent, d_name);
10297 assert(tnamelen >= 0);
10298 treclen = tnamelen + offsetof(struct target_dirent, d_name);
10299 assert(count1 + treclen <= count);
10300 tde->d_reclen = tswap16(treclen);
10301 tde->d_ino = tswapal(de->d_ino);
10302 tde->d_off = tswapal(de->d_off);
10303 memcpy(tde->d_name, de->d_name, tnamelen);
10304 de = (struct linux_dirent *)((char *)de + reclen);
10305 len -= reclen;
10306 tde = (struct target_dirent *)((char *)tde + treclen);
10307 count1 += treclen;
10309 ret = count1;
10310 unlock_user(target_dirp, arg2, ret);
10312 g_free(dirp);
10314 #else
10316 struct linux_dirent *dirp;
10317 abi_long count = arg3;
10319 if (!(dirp = lock_user(VERIFY_WRITE, arg2, count, 0)))
10320 goto efault;
10321 ret = get_errno(sys_getdents(arg1, dirp, count));
10322 if (!is_error(ret)) {
10323 struct linux_dirent *de;
10324 int len = ret;
10325 int reclen;
10326 de = dirp;
10327 while (len > 0) {
10328 reclen = de->d_reclen;
10329 if (reclen > len)
10330 break;
10331 de->d_reclen = tswap16(reclen);
10332 tswapls(&de->d_ino);
10333 tswapls(&de->d_off);
10334 de = (struct linux_dirent *)((char *)de + reclen);
10335 len -= reclen;
10338 unlock_user(dirp, arg2, ret);
10340 #endif
10341 #else
10342 /* Implement getdents in terms of getdents64 */
10344 struct linux_dirent64 *dirp;
10345 abi_long count = arg3;
10347 dirp = lock_user(VERIFY_WRITE, arg2, count, 0);
10348 if (!dirp) {
10349 goto efault;
10351 ret = get_errno(sys_getdents64(arg1, dirp, count));
10352 if (!is_error(ret)) {
10353 /* Convert the dirent64 structs to target dirent. We do this
10354 * in-place, since we can guarantee that a target_dirent is no
10355 * larger than a dirent64; however this means we have to be
10356 * careful to read everything before writing in the new format.
10358 struct linux_dirent64 *de;
10359 struct target_dirent *tde;
10360 int len = ret;
10361 int tlen = 0;
10363 de = dirp;
10364 tde = (struct target_dirent *)dirp;
10365 while (len > 0) {
10366 int namelen, treclen;
10367 int reclen = de->d_reclen;
10368 uint64_t ino = de->d_ino;
10369 int64_t off = de->d_off;
10370 uint8_t type = de->d_type;
10372 namelen = strlen(de->d_name);
10373 treclen = offsetof(struct target_dirent, d_name)
10374 + namelen + 2;
10375 treclen = QEMU_ALIGN_UP(treclen, sizeof(abi_long));
10377 memmove(tde->d_name, de->d_name, namelen + 1);
10378 tde->d_ino = tswapal(ino);
10379 tde->d_off = tswapal(off);
10380 tde->d_reclen = tswap16(treclen);
10381 /* The target_dirent type is in what was formerly a padding
10382 * byte at the end of the structure:
10384 *(((char *)tde) + treclen - 1) = type;
10386 de = (struct linux_dirent64 *)((char *)de + reclen);
10387 tde = (struct target_dirent *)((char *)tde + treclen);
10388 len -= reclen;
10389 tlen += treclen;
10391 ret = tlen;
10393 unlock_user(dirp, arg2, ret);
10395 #endif
10396 break;
10397 #endif /* TARGET_NR_getdents */
10398 #if defined(TARGET_NR_getdents64) && defined(__NR_getdents64)
10399 case TARGET_NR_getdents64:
10401 struct linux_dirent64 *dirp;
10402 abi_long count = arg3;
10403 if (!(dirp = lock_user(VERIFY_WRITE, arg2, count, 0)))
10404 goto efault;
10405 ret = get_errno(sys_getdents64(arg1, dirp, count));
10406 if (!is_error(ret)) {
10407 struct linux_dirent64 *de;
10408 int len = ret;
10409 int reclen;
10410 de = dirp;
10411 while (len > 0) {
10412 reclen = de->d_reclen;
10413 if (reclen > len)
10414 break;
10415 de->d_reclen = tswap16(reclen);
10416 tswap64s((uint64_t *)&de->d_ino);
10417 tswap64s((uint64_t *)&de->d_off);
10418 de = (struct linux_dirent64 *)((char *)de + reclen);
10419 len -= reclen;
10422 unlock_user(dirp, arg2, ret);
10424 break;
10425 #endif /* TARGET_NR_getdents64 */
10426 #if defined(TARGET_NR__newselect)
10427 case TARGET_NR__newselect:
10428 ret = do_select(arg1, arg2, arg3, arg4, arg5);
10429 break;
10430 #endif
10431 #if defined(TARGET_NR_poll) || defined(TARGET_NR_ppoll)
10432 # ifdef TARGET_NR_poll
10433 case TARGET_NR_poll:
10434 # endif
10435 # ifdef TARGET_NR_ppoll
10436 case TARGET_NR_ppoll:
10437 # endif
10439 struct target_pollfd *target_pfd;
10440 unsigned int nfds = arg2;
10441 struct pollfd *pfd;
10442 unsigned int i;
10444 pfd = NULL;
10445 target_pfd = NULL;
10446 if (nfds) {
10447 if (nfds > (INT_MAX / sizeof(struct target_pollfd))) {
10448 ret = -TARGET_EINVAL;
10449 break;
10452 target_pfd = lock_user(VERIFY_WRITE, arg1,
10453 sizeof(struct target_pollfd) * nfds, 1);
10454 if (!target_pfd) {
10455 goto efault;
10458 pfd = alloca(sizeof(struct pollfd) * nfds);
10459 for (i = 0; i < nfds; i++) {
10460 pfd[i].fd = tswap32(target_pfd[i].fd);
10461 pfd[i].events = tswap16(target_pfd[i].events);
10465 switch (num) {
10466 # ifdef TARGET_NR_ppoll
10467 case TARGET_NR_ppoll:
10469 struct timespec _timeout_ts, *timeout_ts = &_timeout_ts;
10470 target_sigset_t *target_set;
10471 sigset_t _set, *set = &_set;
10473 if (arg3) {
10474 if (target_to_host_timespec(timeout_ts, arg3)) {
10475 unlock_user(target_pfd, arg1, 0);
10476 goto efault;
10478 } else {
10479 timeout_ts = NULL;
10482 if (arg4) {
10483 if (arg5 != sizeof(target_sigset_t)) {
10484 unlock_user(target_pfd, arg1, 0);
10485 ret = -TARGET_EINVAL;
10486 break;
10489 target_set = lock_user(VERIFY_READ, arg4, sizeof(target_sigset_t), 1);
10490 if (!target_set) {
10491 unlock_user(target_pfd, arg1, 0);
10492 goto efault;
10494 target_to_host_sigset(set, target_set);
10495 } else {
10496 set = NULL;
10499 ret = get_errno(safe_ppoll(pfd, nfds, timeout_ts,
10500 set, SIGSET_T_SIZE));
10502 if (!is_error(ret) && arg3) {
10503 host_to_target_timespec(arg3, timeout_ts);
10505 if (arg4) {
10506 unlock_user(target_set, arg4, 0);
10508 break;
10510 # endif
10511 # ifdef TARGET_NR_poll
10512 case TARGET_NR_poll:
10514 struct timespec ts, *pts;
10516 if (arg3 >= 0) {
10517 /* Convert ms to secs, ns */
10518 ts.tv_sec = arg3 / 1000;
10519 ts.tv_nsec = (arg3 % 1000) * 1000000LL;
10520 pts = &ts;
10521 } else {
10522 /* -ve poll() timeout means "infinite" */
10523 pts = NULL;
10525 ret = get_errno(safe_ppoll(pfd, nfds, pts, NULL, 0));
10526 break;
10528 # endif
10529 default:
10530 g_assert_not_reached();
10533 if (!is_error(ret)) {
10534 for(i = 0; i < nfds; i++) {
10535 target_pfd[i].revents = tswap16(pfd[i].revents);
10538 unlock_user(target_pfd, arg1, sizeof(struct target_pollfd) * nfds);
10540 break;
10541 #endif
10542 case TARGET_NR_flock:
10543 /* NOTE: the flock constant seems to be the same for every
10544 Linux platform */
10545 ret = get_errno(safe_flock(arg1, arg2));
10546 break;
10547 case TARGET_NR_readv:
10549 struct iovec *vec = lock_iovec(VERIFY_WRITE, arg2, arg3, 0);
10550 if (vec != NULL) {
10551 ret = get_errno(safe_readv(arg1, vec, arg3));
10552 unlock_iovec(vec, arg2, arg3, 1);
10553 } else {
10554 ret = -host_to_target_errno(errno);
10557 break;
10558 case TARGET_NR_writev:
10560 struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1);
10561 if (vec != NULL) {
10562 ret = get_errno(safe_writev(arg1, vec, arg3));
10563 unlock_iovec(vec, arg2, arg3, 0);
10564 } else {
10565 ret = -host_to_target_errno(errno);
10568 break;
10569 #if defined(TARGET_NR_preadv)
10570 case TARGET_NR_preadv:
10572 struct iovec *vec = lock_iovec(VERIFY_WRITE, arg2, arg3, 0);
10573 if (vec != NULL) {
10574 unsigned long low, high;
10576 target_to_host_low_high(arg4, arg5, &low, &high);
10577 ret = get_errno(safe_preadv(arg1, vec, arg3, low, high));
10578 unlock_iovec(vec, arg2, arg3, 1);
10579 } else {
10580 ret = -host_to_target_errno(errno);
10583 break;
10584 #endif
10585 #if defined(TARGET_NR_pwritev)
10586 case TARGET_NR_pwritev:
10588 struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1);
10589 if (vec != NULL) {
10590 unsigned long low, high;
10592 target_to_host_low_high(arg4, arg5, &low, &high);
10593 ret = get_errno(safe_pwritev(arg1, vec, arg3, low, high));
10594 unlock_iovec(vec, arg2, arg3, 0);
10595 } else {
10596 ret = -host_to_target_errno(errno);
10599 break;
10600 #endif
10601 case TARGET_NR_getsid:
10602 ret = get_errno(getsid(arg1));
10603 break;
10604 #if defined(TARGET_NR_fdatasync) /* Not on alpha (osf_datasync ?) */
10605 case TARGET_NR_fdatasync:
10606 ret = get_errno(fdatasync(arg1));
10607 break;
10608 #endif
10609 #ifdef TARGET_NR__sysctl
10610 case TARGET_NR__sysctl:
10611 /* We don't implement this, but ENOTDIR is always a safe
10612 return value. */
10613 ret = -TARGET_ENOTDIR;
10614 break;
10615 #endif
10616 case TARGET_NR_sched_getaffinity:
10618 unsigned int mask_size;
10619 unsigned long *mask;
10622 * sched_getaffinity needs multiples of ulong, so need to take
10623 * care of mismatches between target ulong and host ulong sizes.
10625 if (arg2 & (sizeof(abi_ulong) - 1)) {
10626 ret = -TARGET_EINVAL;
10627 break;
10629 mask_size = (arg2 + (sizeof(*mask) - 1)) & ~(sizeof(*mask) - 1);
10631 mask = alloca(mask_size);
10632 memset(mask, 0, mask_size);
10633 ret = get_errno(sys_sched_getaffinity(arg1, mask_size, mask));
10635 if (!is_error(ret)) {
10636 if (ret > arg2) {
10637 /* More data returned than the caller's buffer will fit.
10638 * This only happens if sizeof(abi_long) < sizeof(long)
10639 * and the caller passed us a buffer holding an odd number
10640 * of abi_longs. If the host kernel is actually using the
10641 * extra 4 bytes then fail EINVAL; otherwise we can just
10642 * ignore them and only copy the interesting part.
10644 int numcpus = sysconf(_SC_NPROCESSORS_CONF);
10645 if (numcpus > arg2 * 8) {
10646 ret = -TARGET_EINVAL;
10647 break;
10649 ret = arg2;
10652 if (host_to_target_cpu_mask(mask, mask_size, arg3, ret)) {
10653 goto efault;
10657 break;
10658 case TARGET_NR_sched_setaffinity:
10660 unsigned int mask_size;
10661 unsigned long *mask;
10664 * sched_setaffinity needs multiples of ulong, so need to take
10665 * care of mismatches between target ulong and host ulong sizes.
10667 if (arg2 & (sizeof(abi_ulong) - 1)) {
10668 ret = -TARGET_EINVAL;
10669 break;
10671 mask_size = (arg2 + (sizeof(*mask) - 1)) & ~(sizeof(*mask) - 1);
10672 mask = alloca(mask_size);
10674 ret = target_to_host_cpu_mask(mask, mask_size, arg3, arg2);
10675 if (ret) {
10676 break;
10679 ret = get_errno(sys_sched_setaffinity(arg1, mask_size, mask));
10681 break;
10682 case TARGET_NR_getcpu:
10684 unsigned cpu, node;
10685 ret = get_errno(sys_getcpu(arg1 ? &cpu : NULL,
10686 arg2 ? &node : NULL,
10687 NULL));
10688 if (is_error(ret)) {
10689 goto fail;
10691 if (arg1 && put_user_u32(cpu, arg1)) {
10692 goto efault;
10694 if (arg2 && put_user_u32(node, arg2)) {
10695 goto efault;
10698 break;
10699 case TARGET_NR_sched_setparam:
10701 struct sched_param *target_schp;
10702 struct sched_param schp;
10704 if (arg2 == 0) {
10705 return -TARGET_EINVAL;
10707 if (!lock_user_struct(VERIFY_READ, target_schp, arg2, 1))
10708 goto efault;
10709 schp.sched_priority = tswap32(target_schp->sched_priority);
10710 unlock_user_struct(target_schp, arg2, 0);
10711 ret = get_errno(sched_setparam(arg1, &schp));
10713 break;
10714 case TARGET_NR_sched_getparam:
10716 struct sched_param *target_schp;
10717 struct sched_param schp;
10719 if (arg2 == 0) {
10720 return -TARGET_EINVAL;
10722 ret = get_errno(sched_getparam(arg1, &schp));
10723 if (!is_error(ret)) {
10724 if (!lock_user_struct(VERIFY_WRITE, target_schp, arg2, 0))
10725 goto efault;
10726 target_schp->sched_priority = tswap32(schp.sched_priority);
10727 unlock_user_struct(target_schp, arg2, 1);
10730 break;
10731 case TARGET_NR_sched_setscheduler:
10733 struct sched_param *target_schp;
10734 struct sched_param schp;
10735 if (arg3 == 0) {
10736 return -TARGET_EINVAL;
10738 if (!lock_user_struct(VERIFY_READ, target_schp, arg3, 1))
10739 goto efault;
10740 schp.sched_priority = tswap32(target_schp->sched_priority);
10741 unlock_user_struct(target_schp, arg3, 0);
10742 ret = get_errno(sched_setscheduler(arg1, arg2, &schp));
10744 break;
10745 case TARGET_NR_sched_getscheduler:
10746 ret = get_errno(sched_getscheduler(arg1));
10747 break;
10748 case TARGET_NR_sched_yield:
10749 ret = get_errno(sched_yield());
10750 break;
10751 case TARGET_NR_sched_get_priority_max:
10752 ret = get_errno(sched_get_priority_max(arg1));
10753 break;
10754 case TARGET_NR_sched_get_priority_min:
10755 ret = get_errno(sched_get_priority_min(arg1));
10756 break;
10757 case TARGET_NR_sched_rr_get_interval:
10759 struct timespec ts;
10760 ret = get_errno(sched_rr_get_interval(arg1, &ts));
10761 if (!is_error(ret)) {
10762 ret = host_to_target_timespec(arg2, &ts);
10765 break;
10766 case TARGET_NR_nanosleep:
10768 struct timespec req, rem;
10769 target_to_host_timespec(&req, arg1);
10770 ret = get_errno(safe_nanosleep(&req, &rem));
10771 if (is_error(ret) && arg2) {
10772 host_to_target_timespec(arg2, &rem);
10775 break;
10776 #ifdef TARGET_NR_query_module
10777 case TARGET_NR_query_module:
10778 goto unimplemented;
10779 #endif
10780 #ifdef TARGET_NR_nfsservctl
10781 case TARGET_NR_nfsservctl:
10782 goto unimplemented;
10783 #endif
10784 case TARGET_NR_prctl:
10785 switch (arg1) {
10786 case PR_GET_PDEATHSIG:
10788 int deathsig;
10789 ret = get_errno(prctl(arg1, &deathsig, arg3, arg4, arg5));
10790 if (!is_error(ret) && arg2
10791 && put_user_ual(deathsig, arg2)) {
10792 goto efault;
10794 break;
10796 #ifdef PR_GET_NAME
10797 case PR_GET_NAME:
10799 void *name = lock_user(VERIFY_WRITE, arg2, 16, 1);
10800 if (!name) {
10801 goto efault;
10803 ret = get_errno(prctl(arg1, (unsigned long)name,
10804 arg3, arg4, arg5));
10805 unlock_user(name, arg2, 16);
10806 break;
10808 case PR_SET_NAME:
10810 void *name = lock_user(VERIFY_READ, arg2, 16, 1);
10811 if (!name) {
10812 goto efault;
10814 ret = get_errno(prctl(arg1, (unsigned long)name,
10815 arg3, arg4, arg5));
10816 unlock_user(name, arg2, 0);
10817 break;
10819 #endif
10820 #ifdef TARGET_AARCH64
10821 case TARGET_PR_SVE_SET_VL:
10822 /* We cannot support either PR_SVE_SET_VL_ONEXEC
10823 or PR_SVE_VL_INHERIT. Therefore, anything above
10824 ARM_MAX_VQ results in EINVAL. */
10825 ret = -TARGET_EINVAL;
10826 if (arm_feature(cpu_env, ARM_FEATURE_SVE)
10827 && arg2 >= 0 && arg2 <= ARM_MAX_VQ * 16 && !(arg2 & 15)) {
10828 CPUARMState *env = cpu_env;
10829 int old_vq = (env->vfp.zcr_el[1] & 0xf) + 1;
10830 int vq = MAX(arg2 / 16, 1);
10832 if (vq < old_vq) {
10833 aarch64_sve_narrow_vq(env, vq);
10835 env->vfp.zcr_el[1] = vq - 1;
10836 ret = vq * 16;
10838 break;
10839 case TARGET_PR_SVE_GET_VL:
10840 ret = -TARGET_EINVAL;
10841 if (arm_feature(cpu_env, ARM_FEATURE_SVE)) {
10842 CPUARMState *env = cpu_env;
10843 ret = ((env->vfp.zcr_el[1] & 0xf) + 1) * 16;
10845 break;
10846 #endif /* AARCH64 */
10847 case PR_GET_SECCOMP:
10848 case PR_SET_SECCOMP:
10849 /* Disable seccomp to prevent the target disabling syscalls we
10850 * need. */
10851 ret = -TARGET_EINVAL;
10852 break;
10853 default:
10854 /* Most prctl options have no pointer arguments */
10855 ret = get_errno(prctl(arg1, arg2, arg3, arg4, arg5));
10856 break;
10858 break;
10859 #ifdef TARGET_NR_arch_prctl
10860 case TARGET_NR_arch_prctl:
10861 #if defined(TARGET_I386) && !defined(TARGET_ABI32)
10862 ret = do_arch_prctl(cpu_env, arg1, arg2);
10863 break;
10864 #else
10865 goto unimplemented;
10866 #endif
10867 #endif
10868 #ifdef TARGET_NR_pread64
10869 case TARGET_NR_pread64:
10870 if (regpairs_aligned(cpu_env, num)) {
10871 arg4 = arg5;
10872 arg5 = arg6;
10874 if (!(p = lock_user(VERIFY_WRITE, arg2, arg3, 0)))
10875 goto efault;
10876 ret = get_errno(pread64(arg1, p, arg3, target_offset64(arg4, arg5)));
10877 unlock_user(p, arg2, ret);
10878 break;
10879 case TARGET_NR_pwrite64:
10880 if (regpairs_aligned(cpu_env, num)) {
10881 arg4 = arg5;
10882 arg5 = arg6;
10884 if (!(p = lock_user(VERIFY_READ, arg2, arg3, 1)))
10885 goto efault;
10886 ret = get_errno(pwrite64(arg1, p, arg3, target_offset64(arg4, arg5)));
10887 unlock_user(p, arg2, 0);
10888 break;
10889 #endif
10890 case TARGET_NR_getcwd:
10891 if (!(p = lock_user(VERIFY_WRITE, arg1, arg2, 0)))
10892 goto efault;
10893 ret = get_errno(sys_getcwd1(p, arg2));
10894 unlock_user(p, arg1, ret);
10895 break;
10896 case TARGET_NR_capget:
10897 case TARGET_NR_capset:
10899 struct target_user_cap_header *target_header;
10900 struct target_user_cap_data *target_data = NULL;
10901 struct __user_cap_header_struct header;
10902 struct __user_cap_data_struct data[2];
10903 struct __user_cap_data_struct *dataptr = NULL;
10904 int i, target_datalen;
10905 int data_items = 1;
10907 if (!lock_user_struct(VERIFY_WRITE, target_header, arg1, 1)) {
10908 goto efault;
10910 header.version = tswap32(target_header->version);
10911 header.pid = tswap32(target_header->pid);
10913 if (header.version != _LINUX_CAPABILITY_VERSION) {
10914 /* Version 2 and up takes pointer to two user_data structs */
10915 data_items = 2;
10918 target_datalen = sizeof(*target_data) * data_items;
10920 if (arg2) {
10921 if (num == TARGET_NR_capget) {
10922 target_data = lock_user(VERIFY_WRITE, arg2, target_datalen, 0);
10923 } else {
10924 target_data = lock_user(VERIFY_READ, arg2, target_datalen, 1);
10926 if (!target_data) {
10927 unlock_user_struct(target_header, arg1, 0);
10928 goto efault;
10931 if (num == TARGET_NR_capset) {
10932 for (i = 0; i < data_items; i++) {
10933 data[i].effective = tswap32(target_data[i].effective);
10934 data[i].permitted = tswap32(target_data[i].permitted);
10935 data[i].inheritable = tswap32(target_data[i].inheritable);
10939 dataptr = data;
10942 if (num == TARGET_NR_capget) {
10943 ret = get_errno(capget(&header, dataptr));
10944 } else {
10945 ret = get_errno(capset(&header, dataptr));
10948 /* The kernel always updates version for both capget and capset */
10949 target_header->version = tswap32(header.version);
10950 unlock_user_struct(target_header, arg1, 1);
10952 if (arg2) {
10953 if (num == TARGET_NR_capget) {
10954 for (i = 0; i < data_items; i++) {
10955 target_data[i].effective = tswap32(data[i].effective);
10956 target_data[i].permitted = tswap32(data[i].permitted);
10957 target_data[i].inheritable = tswap32(data[i].inheritable);
10959 unlock_user(target_data, arg2, target_datalen);
10960 } else {
10961 unlock_user(target_data, arg2, 0);
10964 break;
10966 case TARGET_NR_sigaltstack:
10967 ret = do_sigaltstack(arg1, arg2, get_sp_from_cpustate((CPUArchState *)cpu_env));
10968 break;
10970 #ifdef CONFIG_SENDFILE
10971 case TARGET_NR_sendfile:
10973 off_t *offp = NULL;
10974 off_t off;
10975 if (arg3) {
10976 ret = get_user_sal(off, arg3);
10977 if (is_error(ret)) {
10978 break;
10980 offp = &off;
10982 ret = get_errno(sendfile(arg1, arg2, offp, arg4));
10983 if (!is_error(ret) && arg3) {
10984 abi_long ret2 = put_user_sal(off, arg3);
10985 if (is_error(ret2)) {
10986 ret = ret2;
10989 break;
10991 #ifdef TARGET_NR_sendfile64
10992 case TARGET_NR_sendfile64:
10994 off_t *offp = NULL;
10995 off_t off;
10996 if (arg3) {
10997 ret = get_user_s64(off, arg3);
10998 if (is_error(ret)) {
10999 break;
11001 offp = &off;
11003 ret = get_errno(sendfile(arg1, arg2, offp, arg4));
11004 if (!is_error(ret) && arg3) {
11005 abi_long ret2 = put_user_s64(off, arg3);
11006 if (is_error(ret2)) {
11007 ret = ret2;
11010 break;
11012 #endif
11013 #else
11014 case TARGET_NR_sendfile:
11015 #ifdef TARGET_NR_sendfile64
11016 case TARGET_NR_sendfile64:
11017 #endif
11018 goto unimplemented;
11019 #endif
11021 #ifdef TARGET_NR_getpmsg
11022 case TARGET_NR_getpmsg:
11023 goto unimplemented;
11024 #endif
11025 #ifdef TARGET_NR_putpmsg
11026 case TARGET_NR_putpmsg:
11027 goto unimplemented;
11028 #endif
11029 #ifdef TARGET_NR_vfork
11030 case TARGET_NR_vfork:
11031 ret = get_errno(do_fork(cpu_env,
11032 CLONE_VFORK | CLONE_VM | TARGET_SIGCHLD,
11033 0, 0, 0, 0));
11034 break;
11035 #endif
11036 #ifdef TARGET_NR_ugetrlimit
11037 case TARGET_NR_ugetrlimit:
11039 struct rlimit rlim;
11040 int resource = target_to_host_resource(arg1);
11041 ret = get_errno(getrlimit(resource, &rlim));
11042 if (!is_error(ret)) {
11043 struct target_rlimit *target_rlim;
11044 if (!lock_user_struct(VERIFY_WRITE, target_rlim, arg2, 0))
11045 goto efault;
11046 target_rlim->rlim_cur = host_to_target_rlim(rlim.rlim_cur);
11047 target_rlim->rlim_max = host_to_target_rlim(rlim.rlim_max);
11048 unlock_user_struct(target_rlim, arg2, 1);
11050 break;
11052 #endif
11053 #ifdef TARGET_NR_truncate64
11054 case TARGET_NR_truncate64:
11055 if (!(p = lock_user_string(arg1)))
11056 goto efault;
11057 ret = target_truncate64(cpu_env, p, arg2, arg3, arg4);
11058 unlock_user(p, arg1, 0);
11059 break;
11060 #endif
11061 #ifdef TARGET_NR_ftruncate64
11062 case TARGET_NR_ftruncate64:
11063 ret = target_ftruncate64(cpu_env, arg1, arg2, arg3, arg4);
11064 break;
11065 #endif
11066 #ifdef TARGET_NR_stat64
11067 case TARGET_NR_stat64:
11068 if (!(p = lock_user_string(arg1)))
11069 goto efault;
11070 ret = get_errno(stat(path(p), &st));
11071 unlock_user(p, arg1, 0);
11072 if (!is_error(ret))
11073 ret = host_to_target_stat64(cpu_env, arg2, &st);
11074 break;
11075 #endif
11076 #ifdef TARGET_NR_lstat64
11077 case TARGET_NR_lstat64:
11078 if (!(p = lock_user_string(arg1)))
11079 goto efault;
11080 ret = get_errno(lstat(path(p), &st));
11081 unlock_user(p, arg1, 0);
11082 if (!is_error(ret))
11083 ret = host_to_target_stat64(cpu_env, arg2, &st);
11084 break;
11085 #endif
11086 #ifdef TARGET_NR_fstat64
11087 case TARGET_NR_fstat64:
11088 ret = get_errno(fstat(arg1, &st));
11089 if (!is_error(ret))
11090 ret = host_to_target_stat64(cpu_env, arg2, &st);
11091 break;
11092 #endif
11093 #if (defined(TARGET_NR_fstatat64) || defined(TARGET_NR_newfstatat))
11094 #ifdef TARGET_NR_fstatat64
11095 case TARGET_NR_fstatat64:
11096 #endif
11097 #ifdef TARGET_NR_newfstatat
11098 case TARGET_NR_newfstatat:
11099 #endif
11100 if (!(p = lock_user_string(arg2)))
11101 goto efault;
11102 ret = get_errno(fstatat(arg1, path(p), &st, arg4));
11103 if (!is_error(ret))
11104 ret = host_to_target_stat64(cpu_env, arg3, &st);
11105 break;
11106 #endif
11107 #ifdef TARGET_NR_lchown
11108 case TARGET_NR_lchown:
11109 if (!(p = lock_user_string(arg1)))
11110 goto efault;
11111 ret = get_errno(lchown(p, low2highuid(arg2), low2highgid(arg3)));
11112 unlock_user(p, arg1, 0);
11113 break;
11114 #endif
11115 #ifdef TARGET_NR_getuid
11116 case TARGET_NR_getuid:
11117 ret = get_errno(high2lowuid(getuid()));
11118 break;
11119 #endif
11120 #ifdef TARGET_NR_getgid
11121 case TARGET_NR_getgid:
11122 ret = get_errno(high2lowgid(getgid()));
11123 break;
11124 #endif
11125 #ifdef TARGET_NR_geteuid
11126 case TARGET_NR_geteuid:
11127 ret = get_errno(high2lowuid(geteuid()));
11128 break;
11129 #endif
11130 #ifdef TARGET_NR_getegid
11131 case TARGET_NR_getegid:
11132 ret = get_errno(high2lowgid(getegid()));
11133 break;
11134 #endif
11135 case TARGET_NR_setreuid:
11136 ret = get_errno(setreuid(low2highuid(arg1), low2highuid(arg2)));
11137 break;
11138 case TARGET_NR_setregid:
11139 ret = get_errno(setregid(low2highgid(arg1), low2highgid(arg2)));
11140 break;
11141 case TARGET_NR_getgroups:
11143 int gidsetsize = arg1;
11144 target_id *target_grouplist;
11145 gid_t *grouplist;
11146 int i;
11148 grouplist = alloca(gidsetsize * sizeof(gid_t));
11149 ret = get_errno(getgroups(gidsetsize, grouplist));
11150 if (gidsetsize == 0)
11151 break;
11152 if (!is_error(ret)) {
11153 target_grouplist = lock_user(VERIFY_WRITE, arg2, gidsetsize * sizeof(target_id), 0);
11154 if (!target_grouplist)
11155 goto efault;
11156 for(i = 0;i < ret; i++)
11157 target_grouplist[i] = tswapid(high2lowgid(grouplist[i]));
11158 unlock_user(target_grouplist, arg2, gidsetsize * sizeof(target_id));
11161 break;
11162 case TARGET_NR_setgroups:
11164 int gidsetsize = arg1;
11165 target_id *target_grouplist;
11166 gid_t *grouplist = NULL;
11167 int i;
11168 if (gidsetsize) {
11169 grouplist = alloca(gidsetsize * sizeof(gid_t));
11170 target_grouplist = lock_user(VERIFY_READ, arg2, gidsetsize * sizeof(target_id), 1);
11171 if (!target_grouplist) {
11172 ret = -TARGET_EFAULT;
11173 goto fail;
11175 for (i = 0; i < gidsetsize; i++) {
11176 grouplist[i] = low2highgid(tswapid(target_grouplist[i]));
11178 unlock_user(target_grouplist, arg2, 0);
11180 ret = get_errno(setgroups(gidsetsize, grouplist));
11182 break;
11183 case TARGET_NR_fchown:
11184 ret = get_errno(fchown(arg1, low2highuid(arg2), low2highgid(arg3)));
11185 break;
11186 #if defined(TARGET_NR_fchownat)
11187 case TARGET_NR_fchownat:
11188 if (!(p = lock_user_string(arg2)))
11189 goto efault;
11190 ret = get_errno(fchownat(arg1, p, low2highuid(arg3),
11191 low2highgid(arg4), arg5));
11192 unlock_user(p, arg2, 0);
11193 break;
11194 #endif
11195 #ifdef TARGET_NR_setresuid
11196 case TARGET_NR_setresuid:
11197 ret = get_errno(sys_setresuid(low2highuid(arg1),
11198 low2highuid(arg2),
11199 low2highuid(arg3)));
11200 break;
11201 #endif
11202 #ifdef TARGET_NR_getresuid
11203 case TARGET_NR_getresuid:
11205 uid_t ruid, euid, suid;
11206 ret = get_errno(getresuid(&ruid, &euid, &suid));
11207 if (!is_error(ret)) {
11208 if (put_user_id(high2lowuid(ruid), arg1)
11209 || put_user_id(high2lowuid(euid), arg2)
11210 || put_user_id(high2lowuid(suid), arg3))
11211 goto efault;
11214 break;
11215 #endif
11216 #ifdef TARGET_NR_getresgid
11217 case TARGET_NR_setresgid:
11218 ret = get_errno(sys_setresgid(low2highgid(arg1),
11219 low2highgid(arg2),
11220 low2highgid(arg3)));
11221 break;
11222 #endif
11223 #ifdef TARGET_NR_getresgid
11224 case TARGET_NR_getresgid:
11226 gid_t rgid, egid, sgid;
11227 ret = get_errno(getresgid(&rgid, &egid, &sgid));
11228 if (!is_error(ret)) {
11229 if (put_user_id(high2lowgid(rgid), arg1)
11230 || put_user_id(high2lowgid(egid), arg2)
11231 || put_user_id(high2lowgid(sgid), arg3))
11232 goto efault;
11235 break;
11236 #endif
11237 #ifdef TARGET_NR_chown
11238 case TARGET_NR_chown:
11239 if (!(p = lock_user_string(arg1)))
11240 goto efault;
11241 ret = get_errno(chown(p, low2highuid(arg2), low2highgid(arg3)));
11242 unlock_user(p, arg1, 0);
11243 break;
11244 #endif
11245 case TARGET_NR_setuid:
11246 ret = get_errno(sys_setuid(low2highuid(arg1)));
11247 break;
11248 case TARGET_NR_setgid:
11249 ret = get_errno(sys_setgid(low2highgid(arg1)));
11250 break;
11251 case TARGET_NR_setfsuid:
11252 ret = get_errno(setfsuid(arg1));
11253 break;
11254 case TARGET_NR_setfsgid:
11255 ret = get_errno(setfsgid(arg1));
11256 break;
11258 #ifdef TARGET_NR_lchown32
11259 case TARGET_NR_lchown32:
11260 if (!(p = lock_user_string(arg1)))
11261 goto efault;
11262 ret = get_errno(lchown(p, arg2, arg3));
11263 unlock_user(p, arg1, 0);
11264 break;
11265 #endif
11266 #ifdef TARGET_NR_getuid32
11267 case TARGET_NR_getuid32:
11268 ret = get_errno(getuid());
11269 break;
11270 #endif
11272 #if defined(TARGET_NR_getxuid) && defined(TARGET_ALPHA)
11273 /* Alpha specific */
11274 case TARGET_NR_getxuid:
11276 uid_t euid;
11277 euid=geteuid();
11278 ((CPUAlphaState *)cpu_env)->ir[IR_A4]=euid;
11280 ret = get_errno(getuid());
11281 break;
11282 #endif
11283 #if defined(TARGET_NR_getxgid) && defined(TARGET_ALPHA)
11284 /* Alpha specific */
11285 case TARGET_NR_getxgid:
11287 uid_t egid;
11288 egid=getegid();
11289 ((CPUAlphaState *)cpu_env)->ir[IR_A4]=egid;
11291 ret = get_errno(getgid());
11292 break;
11293 #endif
11294 #if defined(TARGET_NR_osf_getsysinfo) && defined(TARGET_ALPHA)
11295 /* Alpha specific */
11296 case TARGET_NR_osf_getsysinfo:
11297 ret = -TARGET_EOPNOTSUPP;
11298 switch (arg1) {
11299 case TARGET_GSI_IEEE_FP_CONTROL:
11301 uint64_t swcr, fpcr = cpu_alpha_load_fpcr (cpu_env);
11303 /* Copied from linux ieee_fpcr_to_swcr. */
11304 swcr = (fpcr >> 35) & SWCR_STATUS_MASK;
11305 swcr |= (fpcr >> 36) & SWCR_MAP_DMZ;
11306 swcr |= (~fpcr >> 48) & (SWCR_TRAP_ENABLE_INV
11307 | SWCR_TRAP_ENABLE_DZE
11308 | SWCR_TRAP_ENABLE_OVF);
11309 swcr |= (~fpcr >> 57) & (SWCR_TRAP_ENABLE_UNF
11310 | SWCR_TRAP_ENABLE_INE);
11311 swcr |= (fpcr >> 47) & SWCR_MAP_UMZ;
11312 swcr |= (~fpcr >> 41) & SWCR_TRAP_ENABLE_DNO;
11314 if (put_user_u64 (swcr, arg2))
11315 goto efault;
11316 ret = 0;
11318 break;
11320 /* case GSI_IEEE_STATE_AT_SIGNAL:
11321 -- Not implemented in linux kernel.
11322 case GSI_UACPROC:
11323 -- Retrieves current unaligned access state; not much used.
11324 case GSI_PROC_TYPE:
11325 -- Retrieves implver information; surely not used.
11326 case GSI_GET_HWRPB:
11327 -- Grabs a copy of the HWRPB; surely not used.
11330 break;
11331 #endif
11332 #if defined(TARGET_NR_osf_setsysinfo) && defined(TARGET_ALPHA)
11333 /* Alpha specific */
11334 case TARGET_NR_osf_setsysinfo:
11335 ret = -TARGET_EOPNOTSUPP;
11336 switch (arg1) {
11337 case TARGET_SSI_IEEE_FP_CONTROL:
11339 uint64_t swcr, fpcr, orig_fpcr;
11341 if (get_user_u64 (swcr, arg2)) {
11342 goto efault;
11344 orig_fpcr = cpu_alpha_load_fpcr(cpu_env);
11345 fpcr = orig_fpcr & FPCR_DYN_MASK;
11347 /* Copied from linux ieee_swcr_to_fpcr. */
11348 fpcr |= (swcr & SWCR_STATUS_MASK) << 35;
11349 fpcr |= (swcr & SWCR_MAP_DMZ) << 36;
11350 fpcr |= (~swcr & (SWCR_TRAP_ENABLE_INV
11351 | SWCR_TRAP_ENABLE_DZE
11352 | SWCR_TRAP_ENABLE_OVF)) << 48;
11353 fpcr |= (~swcr & (SWCR_TRAP_ENABLE_UNF
11354 | SWCR_TRAP_ENABLE_INE)) << 57;
11355 fpcr |= (swcr & SWCR_MAP_UMZ ? FPCR_UNDZ | FPCR_UNFD : 0);
11356 fpcr |= (~swcr & SWCR_TRAP_ENABLE_DNO) << 41;
11358 cpu_alpha_store_fpcr(cpu_env, fpcr);
11359 ret = 0;
11361 break;
11363 case TARGET_SSI_IEEE_RAISE_EXCEPTION:
11365 uint64_t exc, fpcr, orig_fpcr;
11366 int si_code;
11368 if (get_user_u64(exc, arg2)) {
11369 goto efault;
11372 orig_fpcr = cpu_alpha_load_fpcr(cpu_env);
11374 /* We only add to the exception status here. */
11375 fpcr = orig_fpcr | ((exc & SWCR_STATUS_MASK) << 35);
11377 cpu_alpha_store_fpcr(cpu_env, fpcr);
11378 ret = 0;
11380 /* Old exceptions are not signaled. */
11381 fpcr &= ~(orig_fpcr & FPCR_STATUS_MASK);
11383 /* If any exceptions set by this call,
11384 and are unmasked, send a signal. */
11385 si_code = 0;
11386 if ((fpcr & (FPCR_INE | FPCR_INED)) == FPCR_INE) {
11387 si_code = TARGET_FPE_FLTRES;
11389 if ((fpcr & (FPCR_UNF | FPCR_UNFD)) == FPCR_UNF) {
11390 si_code = TARGET_FPE_FLTUND;
11392 if ((fpcr & (FPCR_OVF | FPCR_OVFD)) == FPCR_OVF) {
11393 si_code = TARGET_FPE_FLTOVF;
11395 if ((fpcr & (FPCR_DZE | FPCR_DZED)) == FPCR_DZE) {
11396 si_code = TARGET_FPE_FLTDIV;
11398 if ((fpcr & (FPCR_INV | FPCR_INVD)) == FPCR_INV) {
11399 si_code = TARGET_FPE_FLTINV;
11401 if (si_code != 0) {
11402 target_siginfo_t info;
11403 info.si_signo = SIGFPE;
11404 info.si_errno = 0;
11405 info.si_code = si_code;
11406 info._sifields._sigfault._addr
11407 = ((CPUArchState *)cpu_env)->pc;
11408 queue_signal((CPUArchState *)cpu_env, info.si_signo,
11409 QEMU_SI_FAULT, &info);
11412 break;
11414 /* case SSI_NVPAIRS:
11415 -- Used with SSIN_UACPROC to enable unaligned accesses.
11416 case SSI_IEEE_STATE_AT_SIGNAL:
11417 case SSI_IEEE_IGNORE_STATE_AT_SIGNAL:
11418 -- Not implemented in linux kernel
11421 break;
11422 #endif
11423 #ifdef TARGET_NR_osf_sigprocmask
11424 /* Alpha specific. */
11425 case TARGET_NR_osf_sigprocmask:
11427 abi_ulong mask;
11428 int how;
11429 sigset_t set, oldset;
11431 switch(arg1) {
11432 case TARGET_SIG_BLOCK:
11433 how = SIG_BLOCK;
11434 break;
11435 case TARGET_SIG_UNBLOCK:
11436 how = SIG_UNBLOCK;
11437 break;
11438 case TARGET_SIG_SETMASK:
11439 how = SIG_SETMASK;
11440 break;
11441 default:
11442 ret = -TARGET_EINVAL;
11443 goto fail;
11445 mask = arg2;
11446 target_to_host_old_sigset(&set, &mask);
11447 ret = do_sigprocmask(how, &set, &oldset);
11448 if (!ret) {
11449 host_to_target_old_sigset(&mask, &oldset);
11450 ret = mask;
11453 break;
11454 #endif
11456 #ifdef TARGET_NR_getgid32
11457 case TARGET_NR_getgid32:
11458 ret = get_errno(getgid());
11459 break;
11460 #endif
11461 #ifdef TARGET_NR_geteuid32
11462 case TARGET_NR_geteuid32:
11463 ret = get_errno(geteuid());
11464 break;
11465 #endif
11466 #ifdef TARGET_NR_getegid32
11467 case TARGET_NR_getegid32:
11468 ret = get_errno(getegid());
11469 break;
11470 #endif
11471 #ifdef TARGET_NR_setreuid32
11472 case TARGET_NR_setreuid32:
11473 ret = get_errno(setreuid(arg1, arg2));
11474 break;
11475 #endif
11476 #ifdef TARGET_NR_setregid32
11477 case TARGET_NR_setregid32:
11478 ret = get_errno(setregid(arg1, arg2));
11479 break;
11480 #endif
11481 #ifdef TARGET_NR_getgroups32
11482 case TARGET_NR_getgroups32:
11484 int gidsetsize = arg1;
11485 uint32_t *target_grouplist;
11486 gid_t *grouplist;
11487 int i;
11489 grouplist = alloca(gidsetsize * sizeof(gid_t));
11490 ret = get_errno(getgroups(gidsetsize, grouplist));
11491 if (gidsetsize == 0)
11492 break;
11493 if (!is_error(ret)) {
11494 target_grouplist = lock_user(VERIFY_WRITE, arg2, gidsetsize * 4, 0);
11495 if (!target_grouplist) {
11496 ret = -TARGET_EFAULT;
11497 goto fail;
11499 for(i = 0;i < ret; i++)
11500 target_grouplist[i] = tswap32(grouplist[i]);
11501 unlock_user(target_grouplist, arg2, gidsetsize * 4);
11504 break;
11505 #endif
11506 #ifdef TARGET_NR_setgroups32
11507 case TARGET_NR_setgroups32:
11509 int gidsetsize = arg1;
11510 uint32_t *target_grouplist;
11511 gid_t *grouplist;
11512 int i;
11514 grouplist = alloca(gidsetsize * sizeof(gid_t));
11515 target_grouplist = lock_user(VERIFY_READ, arg2, gidsetsize * 4, 1);
11516 if (!target_grouplist) {
11517 ret = -TARGET_EFAULT;
11518 goto fail;
11520 for(i = 0;i < gidsetsize; i++)
11521 grouplist[i] = tswap32(target_grouplist[i]);
11522 unlock_user(target_grouplist, arg2, 0);
11523 ret = get_errno(setgroups(gidsetsize, grouplist));
11525 break;
11526 #endif
11527 #ifdef TARGET_NR_fchown32
11528 case TARGET_NR_fchown32:
11529 ret = get_errno(fchown(arg1, arg2, arg3));
11530 break;
11531 #endif
11532 #ifdef TARGET_NR_setresuid32
11533 case TARGET_NR_setresuid32:
11534 ret = get_errno(sys_setresuid(arg1, arg2, arg3));
11535 break;
11536 #endif
11537 #ifdef TARGET_NR_getresuid32
11538 case TARGET_NR_getresuid32:
11540 uid_t ruid, euid, suid;
11541 ret = get_errno(getresuid(&ruid, &euid, &suid));
11542 if (!is_error(ret)) {
11543 if (put_user_u32(ruid, arg1)
11544 || put_user_u32(euid, arg2)
11545 || put_user_u32(suid, arg3))
11546 goto efault;
11549 break;
11550 #endif
11551 #ifdef TARGET_NR_setresgid32
11552 case TARGET_NR_setresgid32:
11553 ret = get_errno(sys_setresgid(arg1, arg2, arg3));
11554 break;
11555 #endif
11556 #ifdef TARGET_NR_getresgid32
11557 case TARGET_NR_getresgid32:
11559 gid_t rgid, egid, sgid;
11560 ret = get_errno(getresgid(&rgid, &egid, &sgid));
11561 if (!is_error(ret)) {
11562 if (put_user_u32(rgid, arg1)
11563 || put_user_u32(egid, arg2)
11564 || put_user_u32(sgid, arg3))
11565 goto efault;
11568 break;
11569 #endif
11570 #ifdef TARGET_NR_chown32
11571 case TARGET_NR_chown32:
11572 if (!(p = lock_user_string(arg1)))
11573 goto efault;
11574 ret = get_errno(chown(p, arg2, arg3));
11575 unlock_user(p, arg1, 0);
11576 break;
11577 #endif
11578 #ifdef TARGET_NR_setuid32
11579 case TARGET_NR_setuid32:
11580 ret = get_errno(sys_setuid(arg1));
11581 break;
11582 #endif
11583 #ifdef TARGET_NR_setgid32
11584 case TARGET_NR_setgid32:
11585 ret = get_errno(sys_setgid(arg1));
11586 break;
11587 #endif
11588 #ifdef TARGET_NR_setfsuid32
11589 case TARGET_NR_setfsuid32:
11590 ret = get_errno(setfsuid(arg1));
11591 break;
11592 #endif
11593 #ifdef TARGET_NR_setfsgid32
11594 case TARGET_NR_setfsgid32:
11595 ret = get_errno(setfsgid(arg1));
11596 break;
11597 #endif
11599 case TARGET_NR_pivot_root:
11600 goto unimplemented;
11601 #ifdef TARGET_NR_mincore
11602 case TARGET_NR_mincore:
11604 void *a;
11605 ret = -TARGET_ENOMEM;
11606 a = lock_user(VERIFY_READ, arg1, arg2, 0);
11607 if (!a) {
11608 goto fail;
11610 ret = -TARGET_EFAULT;
11611 p = lock_user_string(arg3);
11612 if (!p) {
11613 goto mincore_fail;
11615 ret = get_errno(mincore(a, arg2, p));
11616 unlock_user(p, arg3, ret);
11617 mincore_fail:
11618 unlock_user(a, arg1, 0);
11620 break;
11621 #endif
11622 #ifdef TARGET_NR_arm_fadvise64_64
11623 case TARGET_NR_arm_fadvise64_64:
11624 /* arm_fadvise64_64 looks like fadvise64_64 but
11625 * with different argument order: fd, advice, offset, len
11626 * rather than the usual fd, offset, len, advice.
11627 * Note that offset and len are both 64-bit so appear as
11628 * pairs of 32-bit registers.
11630 ret = posix_fadvise(arg1, target_offset64(arg3, arg4),
11631 target_offset64(arg5, arg6), arg2);
11632 ret = -host_to_target_errno(ret);
11633 break;
11634 #endif
11636 #if TARGET_ABI_BITS == 32
11638 #ifdef TARGET_NR_fadvise64_64
11639 case TARGET_NR_fadvise64_64:
11640 #if defined(TARGET_PPC) || defined(TARGET_XTENSA)
11641 /* 6 args: fd, advice, offset (high, low), len (high, low) */
11642 ret = arg2;
11643 arg2 = arg3;
11644 arg3 = arg4;
11645 arg4 = arg5;
11646 arg5 = arg6;
11647 arg6 = ret;
11648 #else
11649 /* 6 args: fd, offset (high, low), len (high, low), advice */
11650 if (regpairs_aligned(cpu_env, num)) {
11651 /* offset is in (3,4), len in (5,6) and advice in 7 */
11652 arg2 = arg3;
11653 arg3 = arg4;
11654 arg4 = arg5;
11655 arg5 = arg6;
11656 arg6 = arg7;
11658 #endif
11659 ret = -host_to_target_errno(posix_fadvise(arg1,
11660 target_offset64(arg2, arg3),
11661 target_offset64(arg4, arg5),
11662 arg6));
11663 break;
11664 #endif
11666 #ifdef TARGET_NR_fadvise64
11667 case TARGET_NR_fadvise64:
11668 /* 5 args: fd, offset (high, low), len, advice */
11669 if (regpairs_aligned(cpu_env, num)) {
11670 /* offset is in (3,4), len in 5 and advice in 6 */
11671 arg2 = arg3;
11672 arg3 = arg4;
11673 arg4 = arg5;
11674 arg5 = arg6;
11676 ret = -host_to_target_errno(posix_fadvise(arg1,
11677 target_offset64(arg2, arg3),
11678 arg4, arg5));
11679 break;
11680 #endif
11682 #else /* not a 32-bit ABI */
11683 #if defined(TARGET_NR_fadvise64_64) || defined(TARGET_NR_fadvise64)
11684 #ifdef TARGET_NR_fadvise64_64
11685 case TARGET_NR_fadvise64_64:
11686 #endif
11687 #ifdef TARGET_NR_fadvise64
11688 case TARGET_NR_fadvise64:
11689 #endif
11690 #ifdef TARGET_S390X
11691 switch (arg4) {
11692 case 4: arg4 = POSIX_FADV_NOREUSE + 1; break; /* make sure it's an invalid value */
11693 case 5: arg4 = POSIX_FADV_NOREUSE + 2; break; /* ditto */
11694 case 6: arg4 = POSIX_FADV_DONTNEED; break;
11695 case 7: arg4 = POSIX_FADV_NOREUSE; break;
11696 default: break;
11698 #endif
11699 ret = -host_to_target_errno(posix_fadvise(arg1, arg2, arg3, arg4));
11700 break;
11701 #endif
11702 #endif /* end of 64-bit ABI fadvise handling */
11704 #ifdef TARGET_NR_madvise
11705 case TARGET_NR_madvise:
11706 /* A straight passthrough may not be safe because qemu sometimes
11707 turns private file-backed mappings into anonymous mappings.
11708 This will break MADV_DONTNEED.
11709 This is a hint, so ignoring and returning success is ok. */
11710 ret = get_errno(0);
11711 break;
11712 #endif
11713 #if TARGET_ABI_BITS == 32
11714 case TARGET_NR_fcntl64:
11716 int cmd;
11717 struct flock64 fl;
11718 from_flock64_fn *copyfrom = copy_from_user_flock64;
11719 to_flock64_fn *copyto = copy_to_user_flock64;
11721 #ifdef TARGET_ARM
11722 if (!((CPUARMState *)cpu_env)->eabi) {
11723 copyfrom = copy_from_user_oabi_flock64;
11724 copyto = copy_to_user_oabi_flock64;
11726 #endif
11728 cmd = target_to_host_fcntl_cmd(arg2);
11729 if (cmd == -TARGET_EINVAL) {
11730 ret = cmd;
11731 break;
11734 switch(arg2) {
11735 case TARGET_F_GETLK64:
11736 ret = copyfrom(&fl, arg3);
11737 if (ret) {
11738 break;
11740 ret = get_errno(fcntl(arg1, cmd, &fl));
11741 if (ret == 0) {
11742 ret = copyto(arg3, &fl);
11744 break;
11746 case TARGET_F_SETLK64:
11747 case TARGET_F_SETLKW64:
11748 ret = copyfrom(&fl, arg3);
11749 if (ret) {
11750 break;
11752 ret = get_errno(safe_fcntl(arg1, cmd, &fl));
11753 break;
11754 default:
11755 ret = do_fcntl(arg1, arg2, arg3);
11756 break;
11758 break;
11760 #endif
11761 #ifdef TARGET_NR_cacheflush
11762 case TARGET_NR_cacheflush:
11763 /* self-modifying code is handled automatically, so nothing needed */
11764 ret = 0;
11765 break;
11766 #endif
11767 #ifdef TARGET_NR_security
11768 case TARGET_NR_security:
11769 goto unimplemented;
11770 #endif
11771 #ifdef TARGET_NR_getpagesize
11772 case TARGET_NR_getpagesize:
11773 ret = TARGET_PAGE_SIZE;
11774 break;
11775 #endif
11776 case TARGET_NR_gettid:
11777 ret = get_errno(gettid());
11778 break;
11779 #ifdef TARGET_NR_readahead
11780 case TARGET_NR_readahead:
11781 #if TARGET_ABI_BITS == 32
11782 if (regpairs_aligned(cpu_env, num)) {
11783 arg2 = arg3;
11784 arg3 = arg4;
11785 arg4 = arg5;
11787 ret = get_errno(readahead(arg1, target_offset64(arg2, arg3) , arg4));
11788 #else
11789 ret = get_errno(readahead(arg1, arg2, arg3));
11790 #endif
11791 break;
11792 #endif
11793 #ifdef CONFIG_ATTR
11794 #ifdef TARGET_NR_setxattr
11795 case TARGET_NR_listxattr:
11796 case TARGET_NR_llistxattr:
11798 void *p, *b = 0;
11799 if (arg2) {
11800 b = lock_user(VERIFY_WRITE, arg2, arg3, 0);
11801 if (!b) {
11802 ret = -TARGET_EFAULT;
11803 break;
11806 p = lock_user_string(arg1);
11807 if (p) {
11808 if (num == TARGET_NR_listxattr) {
11809 ret = get_errno(listxattr(p, b, arg3));
11810 } else {
11811 ret = get_errno(llistxattr(p, b, arg3));
11813 } else {
11814 ret = -TARGET_EFAULT;
11816 unlock_user(p, arg1, 0);
11817 unlock_user(b, arg2, arg3);
11818 break;
11820 case TARGET_NR_flistxattr:
11822 void *b = 0;
11823 if (arg2) {
11824 b = lock_user(VERIFY_WRITE, arg2, arg3, 0);
11825 if (!b) {
11826 ret = -TARGET_EFAULT;
11827 break;
11830 ret = get_errno(flistxattr(arg1, b, arg3));
11831 unlock_user(b, arg2, arg3);
11832 break;
11834 case TARGET_NR_setxattr:
11835 case TARGET_NR_lsetxattr:
11837 void *p, *n, *v = 0;
11838 if (arg3) {
11839 v = lock_user(VERIFY_READ, arg3, arg4, 1);
11840 if (!v) {
11841 ret = -TARGET_EFAULT;
11842 break;
11845 p = lock_user_string(arg1);
11846 n = lock_user_string(arg2);
11847 if (p && n) {
11848 if (num == TARGET_NR_setxattr) {
11849 ret = get_errno(setxattr(p, n, v, arg4, arg5));
11850 } else {
11851 ret = get_errno(lsetxattr(p, n, v, arg4, arg5));
11853 } else {
11854 ret = -TARGET_EFAULT;
11856 unlock_user(p, arg1, 0);
11857 unlock_user(n, arg2, 0);
11858 unlock_user(v, arg3, 0);
11860 break;
11861 case TARGET_NR_fsetxattr:
11863 void *n, *v = 0;
11864 if (arg3) {
11865 v = lock_user(VERIFY_READ, arg3, arg4, 1);
11866 if (!v) {
11867 ret = -TARGET_EFAULT;
11868 break;
11871 n = lock_user_string(arg2);
11872 if (n) {
11873 ret = get_errno(fsetxattr(arg1, n, v, arg4, arg5));
11874 } else {
11875 ret = -TARGET_EFAULT;
11877 unlock_user(n, arg2, 0);
11878 unlock_user(v, arg3, 0);
11880 break;
11881 case TARGET_NR_getxattr:
11882 case TARGET_NR_lgetxattr:
11884 void *p, *n, *v = 0;
11885 if (arg3) {
11886 v = lock_user(VERIFY_WRITE, arg3, arg4, 0);
11887 if (!v) {
11888 ret = -TARGET_EFAULT;
11889 break;
11892 p = lock_user_string(arg1);
11893 n = lock_user_string(arg2);
11894 if (p && n) {
11895 if (num == TARGET_NR_getxattr) {
11896 ret = get_errno(getxattr(p, n, v, arg4));
11897 } else {
11898 ret = get_errno(lgetxattr(p, n, v, arg4));
11900 } else {
11901 ret = -TARGET_EFAULT;
11903 unlock_user(p, arg1, 0);
11904 unlock_user(n, arg2, 0);
11905 unlock_user(v, arg3, arg4);
11907 break;
11908 case TARGET_NR_fgetxattr:
11910 void *n, *v = 0;
11911 if (arg3) {
11912 v = lock_user(VERIFY_WRITE, arg3, arg4, 0);
11913 if (!v) {
11914 ret = -TARGET_EFAULT;
11915 break;
11918 n = lock_user_string(arg2);
11919 if (n) {
11920 ret = get_errno(fgetxattr(arg1, n, v, arg4));
11921 } else {
11922 ret = -TARGET_EFAULT;
11924 unlock_user(n, arg2, 0);
11925 unlock_user(v, arg3, arg4);
11927 break;
11928 case TARGET_NR_removexattr:
11929 case TARGET_NR_lremovexattr:
11931 void *p, *n;
11932 p = lock_user_string(arg1);
11933 n = lock_user_string(arg2);
11934 if (p && n) {
11935 if (num == TARGET_NR_removexattr) {
11936 ret = get_errno(removexattr(p, n));
11937 } else {
11938 ret = get_errno(lremovexattr(p, n));
11940 } else {
11941 ret = -TARGET_EFAULT;
11943 unlock_user(p, arg1, 0);
11944 unlock_user(n, arg2, 0);
11946 break;
11947 case TARGET_NR_fremovexattr:
11949 void *n;
11950 n = lock_user_string(arg2);
11951 if (n) {
11952 ret = get_errno(fremovexattr(arg1, n));
11953 } else {
11954 ret = -TARGET_EFAULT;
11956 unlock_user(n, arg2, 0);
11958 break;
11959 #endif
11960 #endif /* CONFIG_ATTR */
11961 #ifdef TARGET_NR_set_thread_area
11962 case TARGET_NR_set_thread_area:
11963 #if defined(TARGET_MIPS)
11964 ((CPUMIPSState *) cpu_env)->active_tc.CP0_UserLocal = arg1;
11965 ret = 0;
11966 break;
11967 #elif defined(TARGET_CRIS)
11968 if (arg1 & 0xff)
11969 ret = -TARGET_EINVAL;
11970 else {
11971 ((CPUCRISState *) cpu_env)->pregs[PR_PID] = arg1;
11972 ret = 0;
11974 break;
11975 #elif defined(TARGET_I386) && defined(TARGET_ABI32)
11976 ret = do_set_thread_area(cpu_env, arg1);
11977 break;
11978 #elif defined(TARGET_M68K)
11980 TaskState *ts = cpu->opaque;
11981 ts->tp_value = arg1;
11982 ret = 0;
11983 break;
11985 #else
11986 goto unimplemented_nowarn;
11987 #endif
11988 #endif
11989 #ifdef TARGET_NR_get_thread_area
11990 case TARGET_NR_get_thread_area:
11991 #if defined(TARGET_I386) && defined(TARGET_ABI32)
11992 ret = do_get_thread_area(cpu_env, arg1);
11993 break;
11994 #elif defined(TARGET_M68K)
11996 TaskState *ts = cpu->opaque;
11997 ret = ts->tp_value;
11998 break;
12000 #else
12001 goto unimplemented_nowarn;
12002 #endif
12003 #endif
12004 #ifdef TARGET_NR_getdomainname
12005 case TARGET_NR_getdomainname:
12006 goto unimplemented_nowarn;
12007 #endif
12009 #ifdef TARGET_NR_clock_settime
12010 case TARGET_NR_clock_settime:
12012 struct timespec ts;
12014 ret = target_to_host_timespec(&ts, arg2);
12015 if (!is_error(ret)) {
12016 ret = get_errno(clock_settime(arg1, &ts));
12018 break;
12020 #endif
12021 #ifdef TARGET_NR_clock_gettime
12022 case TARGET_NR_clock_gettime:
12024 struct timespec ts;
12025 ret = get_errno(clock_gettime(arg1, &ts));
12026 if (!is_error(ret)) {
12027 ret = host_to_target_timespec(arg2, &ts);
12029 break;
12031 #endif
12032 #ifdef TARGET_NR_clock_getres
12033 case TARGET_NR_clock_getres:
12035 struct timespec ts;
12036 ret = get_errno(clock_getres(arg1, &ts));
12037 if (!is_error(ret)) {
12038 host_to_target_timespec(arg2, &ts);
12040 break;
12042 #endif
12043 #ifdef TARGET_NR_clock_nanosleep
12044 case TARGET_NR_clock_nanosleep:
12046 struct timespec ts;
12047 target_to_host_timespec(&ts, arg3);
12048 ret = get_errno(safe_clock_nanosleep(arg1, arg2,
12049 &ts, arg4 ? &ts : NULL));
12050 if (arg4)
12051 host_to_target_timespec(arg4, &ts);
12053 #if defined(TARGET_PPC)
12054 /* clock_nanosleep is odd in that it returns positive errno values.
12055 * On PPC, CR0 bit 3 should be set in such a situation. */
12056 if (ret && ret != -TARGET_ERESTARTSYS) {
12057 ((CPUPPCState *)cpu_env)->crf[0] |= 1;
12059 #endif
12060 break;
12062 #endif
12064 #if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address)
12065 case TARGET_NR_set_tid_address:
12066 ret = get_errno(set_tid_address((int *)g2h(arg1)));
12067 break;
12068 #endif
12070 case TARGET_NR_tkill:
12071 ret = get_errno(safe_tkill((int)arg1, target_to_host_signal(arg2)));
12072 break;
12074 case TARGET_NR_tgkill:
12075 ret = get_errno(safe_tgkill((int)arg1, (int)arg2,
12076 target_to_host_signal(arg3)));
12077 break;
12079 #ifdef TARGET_NR_set_robust_list
12080 case TARGET_NR_set_robust_list:
12081 case TARGET_NR_get_robust_list:
12082 /* The ABI for supporting robust futexes has userspace pass
12083 * the kernel a pointer to a linked list which is updated by
12084 * userspace after the syscall; the list is walked by the kernel
12085 * when the thread exits. Since the linked list in QEMU guest
12086 * memory isn't a valid linked list for the host and we have
12087 * no way to reliably intercept the thread-death event, we can't
12088 * support these. Silently return ENOSYS so that guest userspace
12089 * falls back to a non-robust futex implementation (which should
12090 * be OK except in the corner case of the guest crashing while
12091 * holding a mutex that is shared with another process via
12092 * shared memory).
12094 goto unimplemented_nowarn;
12095 #endif
12097 #if defined(TARGET_NR_utimensat)
12098 case TARGET_NR_utimensat:
12100 struct timespec *tsp, ts[2];
12101 if (!arg3) {
12102 tsp = NULL;
12103 } else {
12104 target_to_host_timespec(ts, arg3);
12105 target_to_host_timespec(ts+1, arg3+sizeof(struct target_timespec));
12106 tsp = ts;
12108 if (!arg2)
12109 ret = get_errno(sys_utimensat(arg1, NULL, tsp, arg4));
12110 else {
12111 if (!(p = lock_user_string(arg2))) {
12112 ret = -TARGET_EFAULT;
12113 goto fail;
12115 ret = get_errno(sys_utimensat(arg1, path(p), tsp, arg4));
12116 unlock_user(p, arg2, 0);
12119 break;
12120 #endif
12121 case TARGET_NR_futex:
12122 ret = do_futex(arg1, arg2, arg3, arg4, arg5, arg6);
12123 break;
12124 #if defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init)
12125 case TARGET_NR_inotify_init:
12126 ret = get_errno(sys_inotify_init());
12127 if (ret >= 0) {
12128 fd_trans_register(ret, &target_inotify_trans);
12130 break;
12131 #endif
12132 #ifdef CONFIG_INOTIFY1
12133 #if defined(TARGET_NR_inotify_init1) && defined(__NR_inotify_init1)
12134 case TARGET_NR_inotify_init1:
12135 ret = get_errno(sys_inotify_init1(target_to_host_bitmask(arg1,
12136 fcntl_flags_tbl)));
12137 if (ret >= 0) {
12138 fd_trans_register(ret, &target_inotify_trans);
12140 break;
12141 #endif
12142 #endif
12143 #if defined(TARGET_NR_inotify_add_watch) && defined(__NR_inotify_add_watch)
12144 case TARGET_NR_inotify_add_watch:
12145 p = lock_user_string(arg2);
12146 ret = get_errno(sys_inotify_add_watch(arg1, path(p), arg3));
12147 unlock_user(p, arg2, 0);
12148 break;
12149 #endif
12150 #if defined(TARGET_NR_inotify_rm_watch) && defined(__NR_inotify_rm_watch)
12151 case TARGET_NR_inotify_rm_watch:
12152 ret = get_errno(sys_inotify_rm_watch(arg1, arg2));
12153 break;
12154 #endif
12156 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
12157 case TARGET_NR_mq_open:
12159 struct mq_attr posix_mq_attr;
12160 struct mq_attr *pposix_mq_attr;
12161 int host_flags;
12163 host_flags = target_to_host_bitmask(arg2, fcntl_flags_tbl);
12164 pposix_mq_attr = NULL;
12165 if (arg4) {
12166 if (copy_from_user_mq_attr(&posix_mq_attr, arg4) != 0) {
12167 goto efault;
12169 pposix_mq_attr = &posix_mq_attr;
12171 p = lock_user_string(arg1 - 1);
12172 if (!p) {
12173 goto efault;
12175 ret = get_errno(mq_open(p, host_flags, arg3, pposix_mq_attr));
12176 unlock_user (p, arg1, 0);
12178 break;
12180 case TARGET_NR_mq_unlink:
12181 p = lock_user_string(arg1 - 1);
12182 if (!p) {
12183 ret = -TARGET_EFAULT;
12184 break;
12186 ret = get_errno(mq_unlink(p));
12187 unlock_user (p, arg1, 0);
12188 break;
12190 case TARGET_NR_mq_timedsend:
12192 struct timespec ts;
12194 p = lock_user (VERIFY_READ, arg2, arg3, 1);
12195 if (arg5 != 0) {
12196 target_to_host_timespec(&ts, arg5);
12197 ret = get_errno(safe_mq_timedsend(arg1, p, arg3, arg4, &ts));
12198 host_to_target_timespec(arg5, &ts);
12199 } else {
12200 ret = get_errno(safe_mq_timedsend(arg1, p, arg3, arg4, NULL));
12202 unlock_user (p, arg2, arg3);
12204 break;
12206 case TARGET_NR_mq_timedreceive:
12208 struct timespec ts;
12209 unsigned int prio;
12211 p = lock_user (VERIFY_READ, arg2, arg3, 1);
12212 if (arg5 != 0) {
12213 target_to_host_timespec(&ts, arg5);
12214 ret = get_errno(safe_mq_timedreceive(arg1, p, arg3,
12215 &prio, &ts));
12216 host_to_target_timespec(arg5, &ts);
12217 } else {
12218 ret = get_errno(safe_mq_timedreceive(arg1, p, arg3,
12219 &prio, NULL));
12221 unlock_user (p, arg2, arg3);
12222 if (arg4 != 0)
12223 put_user_u32(prio, arg4);
12225 break;
12227 /* Not implemented for now... */
12228 /* case TARGET_NR_mq_notify: */
12229 /* break; */
12231 case TARGET_NR_mq_getsetattr:
12233 struct mq_attr posix_mq_attr_in, posix_mq_attr_out;
12234 ret = 0;
12235 if (arg2 != 0) {
12236 copy_from_user_mq_attr(&posix_mq_attr_in, arg2);
12237 ret = get_errno(mq_setattr(arg1, &posix_mq_attr_in,
12238 &posix_mq_attr_out));
12239 } else if (arg3 != 0) {
12240 ret = get_errno(mq_getattr(arg1, &posix_mq_attr_out));
12242 if (ret == 0 && arg3 != 0) {
12243 copy_to_user_mq_attr(arg3, &posix_mq_attr_out);
12246 break;
12247 #endif
12249 #ifdef CONFIG_SPLICE
12250 #ifdef TARGET_NR_tee
12251 case TARGET_NR_tee:
12253 ret = get_errno(tee(arg1,arg2,arg3,arg4));
12255 break;
12256 #endif
12257 #ifdef TARGET_NR_splice
12258 case TARGET_NR_splice:
12260 loff_t loff_in, loff_out;
12261 loff_t *ploff_in = NULL, *ploff_out = NULL;
12262 if (arg2) {
12263 if (get_user_u64(loff_in, arg2)) {
12264 goto efault;
12266 ploff_in = &loff_in;
12268 if (arg4) {
12269 if (get_user_u64(loff_out, arg4)) {
12270 goto efault;
12272 ploff_out = &loff_out;
12274 ret = get_errno(splice(arg1, ploff_in, arg3, ploff_out, arg5, arg6));
12275 if (arg2) {
12276 if (put_user_u64(loff_in, arg2)) {
12277 goto efault;
12280 if (arg4) {
12281 if (put_user_u64(loff_out, arg4)) {
12282 goto efault;
12286 break;
12287 #endif
12288 #ifdef TARGET_NR_vmsplice
12289 case TARGET_NR_vmsplice:
12291 struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1);
12292 if (vec != NULL) {
12293 ret = get_errno(vmsplice(arg1, vec, arg3, arg4));
12294 unlock_iovec(vec, arg2, arg3, 0);
12295 } else {
12296 ret = -host_to_target_errno(errno);
12299 break;
12300 #endif
12301 #endif /* CONFIG_SPLICE */
12302 #ifdef CONFIG_EVENTFD
12303 #if defined(TARGET_NR_eventfd)
12304 case TARGET_NR_eventfd:
12305 ret = get_errno(eventfd(arg1, 0));
12306 if (ret >= 0) {
12307 fd_trans_register(ret, &target_eventfd_trans);
12309 break;
12310 #endif
12311 #if defined(TARGET_NR_eventfd2)
12312 case TARGET_NR_eventfd2:
12314 int host_flags = arg2 & (~(TARGET_O_NONBLOCK | TARGET_O_CLOEXEC));
12315 if (arg2 & TARGET_O_NONBLOCK) {
12316 host_flags |= O_NONBLOCK;
12318 if (arg2 & TARGET_O_CLOEXEC) {
12319 host_flags |= O_CLOEXEC;
12321 ret = get_errno(eventfd(arg1, host_flags));
12322 if (ret >= 0) {
12323 fd_trans_register(ret, &target_eventfd_trans);
12325 break;
12327 #endif
12328 #endif /* CONFIG_EVENTFD */
12329 #if defined(CONFIG_FALLOCATE) && defined(TARGET_NR_fallocate)
12330 case TARGET_NR_fallocate:
12331 #if TARGET_ABI_BITS == 32
12332 ret = get_errno(fallocate(arg1, arg2, target_offset64(arg3, arg4),
12333 target_offset64(arg5, arg6)));
12334 #else
12335 ret = get_errno(fallocate(arg1, arg2, arg3, arg4));
12336 #endif
12337 break;
12338 #endif
12339 #if defined(CONFIG_SYNC_FILE_RANGE)
12340 #if defined(TARGET_NR_sync_file_range)
12341 case TARGET_NR_sync_file_range:
12342 #if TARGET_ABI_BITS == 32
12343 #if defined(TARGET_MIPS)
12344 ret = get_errno(sync_file_range(arg1, target_offset64(arg3, arg4),
12345 target_offset64(arg5, arg6), arg7));
12346 #else
12347 ret = get_errno(sync_file_range(arg1, target_offset64(arg2, arg3),
12348 target_offset64(arg4, arg5), arg6));
12349 #endif /* !TARGET_MIPS */
12350 #else
12351 ret = get_errno(sync_file_range(arg1, arg2, arg3, arg4));
12352 #endif
12353 break;
12354 #endif
12355 #if defined(TARGET_NR_sync_file_range2)
12356 case TARGET_NR_sync_file_range2:
12357 /* This is like sync_file_range but the arguments are reordered */
12358 #if TARGET_ABI_BITS == 32
12359 ret = get_errno(sync_file_range(arg1, target_offset64(arg3, arg4),
12360 target_offset64(arg5, arg6), arg2));
12361 #else
12362 ret = get_errno(sync_file_range(arg1, arg3, arg4, arg2));
12363 #endif
12364 break;
12365 #endif
12366 #endif
12367 #if defined(TARGET_NR_signalfd4)
12368 case TARGET_NR_signalfd4:
12369 ret = do_signalfd4(arg1, arg2, arg4);
12370 break;
12371 #endif
12372 #if defined(TARGET_NR_signalfd)
12373 case TARGET_NR_signalfd:
12374 ret = do_signalfd4(arg1, arg2, 0);
12375 break;
12376 #endif
12377 #if defined(CONFIG_EPOLL)
12378 #if defined(TARGET_NR_epoll_create)
12379 case TARGET_NR_epoll_create:
12380 ret = get_errno(epoll_create(arg1));
12381 break;
12382 #endif
12383 #if defined(TARGET_NR_epoll_create1) && defined(CONFIG_EPOLL_CREATE1)
12384 case TARGET_NR_epoll_create1:
12385 ret = get_errno(epoll_create1(arg1));
12386 break;
12387 #endif
12388 #if defined(TARGET_NR_epoll_ctl)
12389 case TARGET_NR_epoll_ctl:
12391 struct epoll_event ep;
12392 struct epoll_event *epp = 0;
12393 if (arg4) {
12394 struct target_epoll_event *target_ep;
12395 if (!lock_user_struct(VERIFY_READ, target_ep, arg4, 1)) {
12396 goto efault;
12398 ep.events = tswap32(target_ep->events);
12399 /* The epoll_data_t union is just opaque data to the kernel,
12400 * so we transfer all 64 bits across and need not worry what
12401 * actual data type it is.
12403 ep.data.u64 = tswap64(target_ep->data.u64);
12404 unlock_user_struct(target_ep, arg4, 0);
12405 epp = &ep;
12407 ret = get_errno(epoll_ctl(arg1, arg2, arg3, epp));
12408 break;
12410 #endif
12412 #if defined(TARGET_NR_epoll_wait) || defined(TARGET_NR_epoll_pwait)
12413 #if defined(TARGET_NR_epoll_wait)
12414 case TARGET_NR_epoll_wait:
12415 #endif
12416 #if defined(TARGET_NR_epoll_pwait)
12417 case TARGET_NR_epoll_pwait:
12418 #endif
12420 struct target_epoll_event *target_ep;
12421 struct epoll_event *ep;
12422 int epfd = arg1;
12423 int maxevents = arg3;
12424 int timeout = arg4;
12426 if (maxevents <= 0 || maxevents > TARGET_EP_MAX_EVENTS) {
12427 ret = -TARGET_EINVAL;
12428 break;
12431 target_ep = lock_user(VERIFY_WRITE, arg2,
12432 maxevents * sizeof(struct target_epoll_event), 1);
12433 if (!target_ep) {
12434 goto efault;
12437 ep = g_try_new(struct epoll_event, maxevents);
12438 if (!ep) {
12439 unlock_user(target_ep, arg2, 0);
12440 ret = -TARGET_ENOMEM;
12441 break;
12444 switch (num) {
12445 #if defined(TARGET_NR_epoll_pwait)
12446 case TARGET_NR_epoll_pwait:
12448 target_sigset_t *target_set;
12449 sigset_t _set, *set = &_set;
12451 if (arg5) {
12452 if (arg6 != sizeof(target_sigset_t)) {
12453 ret = -TARGET_EINVAL;
12454 break;
12457 target_set = lock_user(VERIFY_READ, arg5,
12458 sizeof(target_sigset_t), 1);
12459 if (!target_set) {
12460 ret = -TARGET_EFAULT;
12461 break;
12463 target_to_host_sigset(set, target_set);
12464 unlock_user(target_set, arg5, 0);
12465 } else {
12466 set = NULL;
12469 ret = get_errno(safe_epoll_pwait(epfd, ep, maxevents, timeout,
12470 set, SIGSET_T_SIZE));
12471 break;
12473 #endif
12474 #if defined(TARGET_NR_epoll_wait)
12475 case TARGET_NR_epoll_wait:
12476 ret = get_errno(safe_epoll_pwait(epfd, ep, maxevents, timeout,
12477 NULL, 0));
12478 break;
12479 #endif
12480 default:
12481 ret = -TARGET_ENOSYS;
12483 if (!is_error(ret)) {
12484 int i;
12485 for (i = 0; i < ret; i++) {
12486 target_ep[i].events = tswap32(ep[i].events);
12487 target_ep[i].data.u64 = tswap64(ep[i].data.u64);
12489 unlock_user(target_ep, arg2,
12490 ret * sizeof(struct target_epoll_event));
12491 } else {
12492 unlock_user(target_ep, arg2, 0);
12494 g_free(ep);
12495 break;
12497 #endif
12498 #endif
12499 #ifdef TARGET_NR_prlimit64
12500 case TARGET_NR_prlimit64:
12502 /* args: pid, resource number, ptr to new rlimit, ptr to old rlimit */
12503 struct target_rlimit64 *target_rnew, *target_rold;
12504 struct host_rlimit64 rnew, rold, *rnewp = 0;
12505 int resource = target_to_host_resource(arg2);
12506 if (arg3) {
12507 if (!lock_user_struct(VERIFY_READ, target_rnew, arg3, 1)) {
12508 goto efault;
12510 rnew.rlim_cur = tswap64(target_rnew->rlim_cur);
12511 rnew.rlim_max = tswap64(target_rnew->rlim_max);
12512 unlock_user_struct(target_rnew, arg3, 0);
12513 rnewp = &rnew;
12516 ret = get_errno(sys_prlimit64(arg1, resource, rnewp, arg4 ? &rold : 0));
12517 if (!is_error(ret) && arg4) {
12518 if (!lock_user_struct(VERIFY_WRITE, target_rold, arg4, 1)) {
12519 goto efault;
12521 target_rold->rlim_cur = tswap64(rold.rlim_cur);
12522 target_rold->rlim_max = tswap64(rold.rlim_max);
12523 unlock_user_struct(target_rold, arg4, 1);
12525 break;
12527 #endif
12528 #ifdef TARGET_NR_gethostname
12529 case TARGET_NR_gethostname:
12531 char *name = lock_user(VERIFY_WRITE, arg1, arg2, 0);
12532 if (name) {
12533 ret = get_errno(gethostname(name, arg2));
12534 unlock_user(name, arg1, arg2);
12535 } else {
12536 ret = -TARGET_EFAULT;
12538 break;
12540 #endif
12541 #ifdef TARGET_NR_atomic_cmpxchg_32
12542 case TARGET_NR_atomic_cmpxchg_32:
12544 /* should use start_exclusive from main.c */
12545 abi_ulong mem_value;
12546 if (get_user_u32(mem_value, arg6)) {
12547 target_siginfo_t info;
12548 info.si_signo = SIGSEGV;
12549 info.si_errno = 0;
12550 info.si_code = TARGET_SEGV_MAPERR;
12551 info._sifields._sigfault._addr = arg6;
12552 queue_signal((CPUArchState *)cpu_env, info.si_signo,
12553 QEMU_SI_FAULT, &info);
12554 ret = 0xdeadbeef;
12557 if (mem_value == arg2)
12558 put_user_u32(arg1, arg6);
12559 ret = mem_value;
12560 break;
12562 #endif
12563 #ifdef TARGET_NR_atomic_barrier
12564 case TARGET_NR_atomic_barrier:
12566 /* Like the kernel implementation and the qemu arm barrier, no-op this? */
12567 ret = 0;
12568 break;
12570 #endif
12572 #ifdef TARGET_NR_timer_create
12573 case TARGET_NR_timer_create:
12575 /* args: clockid_t clockid, struct sigevent *sevp, timer_t *timerid */
12577 struct sigevent host_sevp = { {0}, }, *phost_sevp = NULL;
12579 int clkid = arg1;
12580 int timer_index = next_free_host_timer();
12582 if (timer_index < 0) {
12583 ret = -TARGET_EAGAIN;
12584 } else {
12585 timer_t *phtimer = g_posix_timers + timer_index;
12587 if (arg2) {
12588 phost_sevp = &host_sevp;
12589 ret = target_to_host_sigevent(phost_sevp, arg2);
12590 if (ret != 0) {
12591 break;
12595 ret = get_errno(timer_create(clkid, phost_sevp, phtimer));
12596 if (ret) {
12597 phtimer = NULL;
12598 } else {
12599 if (put_user(TIMER_MAGIC | timer_index, arg3, target_timer_t)) {
12600 goto efault;
12604 break;
12606 #endif
12608 #ifdef TARGET_NR_timer_settime
12609 case TARGET_NR_timer_settime:
12611 /* args: timer_t timerid, int flags, const struct itimerspec *new_value,
12612 * struct itimerspec * old_value */
12613 target_timer_t timerid = get_timer_id(arg1);
12615 if (timerid < 0) {
12616 ret = timerid;
12617 } else if (arg3 == 0) {
12618 ret = -TARGET_EINVAL;
12619 } else {
12620 timer_t htimer = g_posix_timers[timerid];
12621 struct itimerspec hspec_new = {{0},}, hspec_old = {{0},};
12623 if (target_to_host_itimerspec(&hspec_new, arg3)) {
12624 goto efault;
12626 ret = get_errno(
12627 timer_settime(htimer, arg2, &hspec_new, &hspec_old));
12628 if (arg4 && host_to_target_itimerspec(arg4, &hspec_old)) {
12629 goto efault;
12632 break;
12634 #endif
12636 #ifdef TARGET_NR_timer_gettime
12637 case TARGET_NR_timer_gettime:
12639 /* args: timer_t timerid, struct itimerspec *curr_value */
12640 target_timer_t timerid = get_timer_id(arg1);
12642 if (timerid < 0) {
12643 ret = timerid;
12644 } else if (!arg2) {
12645 ret = -TARGET_EFAULT;
12646 } else {
12647 timer_t htimer = g_posix_timers[timerid];
12648 struct itimerspec hspec;
12649 ret = get_errno(timer_gettime(htimer, &hspec));
12651 if (host_to_target_itimerspec(arg2, &hspec)) {
12652 ret = -TARGET_EFAULT;
12655 break;
12657 #endif
12659 #ifdef TARGET_NR_timer_getoverrun
12660 case TARGET_NR_timer_getoverrun:
12662 /* args: timer_t timerid */
12663 target_timer_t timerid = get_timer_id(arg1);
12665 if (timerid < 0) {
12666 ret = timerid;
12667 } else {
12668 timer_t htimer = g_posix_timers[timerid];
12669 ret = get_errno(timer_getoverrun(htimer));
12671 fd_trans_unregister(ret);
12672 break;
12674 #endif
12676 #ifdef TARGET_NR_timer_delete
12677 case TARGET_NR_timer_delete:
12679 /* args: timer_t timerid */
12680 target_timer_t timerid = get_timer_id(arg1);
12682 if (timerid < 0) {
12683 ret = timerid;
12684 } else {
12685 timer_t htimer = g_posix_timers[timerid];
12686 ret = get_errno(timer_delete(htimer));
12687 g_posix_timers[timerid] = 0;
12689 break;
12691 #endif
12693 #if defined(TARGET_NR_timerfd_create) && defined(CONFIG_TIMERFD)
12694 case TARGET_NR_timerfd_create:
12695 ret = get_errno(timerfd_create(arg1,
12696 target_to_host_bitmask(arg2, fcntl_flags_tbl)));
12697 break;
12698 #endif
12700 #if defined(TARGET_NR_timerfd_gettime) && defined(CONFIG_TIMERFD)
12701 case TARGET_NR_timerfd_gettime:
12703 struct itimerspec its_curr;
12705 ret = get_errno(timerfd_gettime(arg1, &its_curr));
12707 if (arg2 && host_to_target_itimerspec(arg2, &its_curr)) {
12708 goto efault;
12711 break;
12712 #endif
12714 #if defined(TARGET_NR_timerfd_settime) && defined(CONFIG_TIMERFD)
12715 case TARGET_NR_timerfd_settime:
12717 struct itimerspec its_new, its_old, *p_new;
12719 if (arg3) {
12720 if (target_to_host_itimerspec(&its_new, arg3)) {
12721 goto efault;
12723 p_new = &its_new;
12724 } else {
12725 p_new = NULL;
12728 ret = get_errno(timerfd_settime(arg1, arg2, p_new, &its_old));
12730 if (arg4 && host_to_target_itimerspec(arg4, &its_old)) {
12731 goto efault;
12734 break;
12735 #endif
12737 #if defined(TARGET_NR_ioprio_get) && defined(__NR_ioprio_get)
12738 case TARGET_NR_ioprio_get:
12739 ret = get_errno(ioprio_get(arg1, arg2));
12740 break;
12741 #endif
12743 #if defined(TARGET_NR_ioprio_set) && defined(__NR_ioprio_set)
12744 case TARGET_NR_ioprio_set:
12745 ret = get_errno(ioprio_set(arg1, arg2, arg3));
12746 break;
12747 #endif
12749 #if defined(TARGET_NR_setns) && defined(CONFIG_SETNS)
12750 case TARGET_NR_setns:
12751 ret = get_errno(setns(arg1, arg2));
12752 break;
12753 #endif
12754 #if defined(TARGET_NR_unshare) && defined(CONFIG_SETNS)
12755 case TARGET_NR_unshare:
12756 ret = get_errno(unshare(arg1));
12757 break;
12758 #endif
12759 #if defined(TARGET_NR_kcmp) && defined(__NR_kcmp)
12760 case TARGET_NR_kcmp:
12761 ret = get_errno(kcmp(arg1, arg2, arg3, arg4, arg5));
12762 break;
12763 #endif
12765 default:
12766 unimplemented:
12767 gemu_log("qemu: Unsupported syscall: %d\n", num);
12768 #if defined(TARGET_NR_setxattr) || defined(TARGET_NR_get_thread_area) || defined(TARGET_NR_getdomainname) || defined(TARGET_NR_set_robust_list)
12769 unimplemented_nowarn:
12770 #endif
12771 ret = -TARGET_ENOSYS;
12772 break;
12774 fail:
12775 #ifdef DEBUG
12776 gemu_log(" = " TARGET_ABI_FMT_ld "\n", ret);
12777 #endif
12778 if(do_strace)
12779 print_syscall_ret(num, ret);
12780 trace_guest_user_syscall_ret(cpu, num, ret);
12781 return ret;
12782 efault:
12783 ret = -TARGET_EFAULT;
12784 goto fail;