linux-user: add netlink CARRIER_UP_COUNT/CARRIER_DOWN_COUNT
[qemu/kevin.git] / linux-user / syscall.c
blob4460f1e39a07f4a6253d1b0969fcb062a33989b6
1 /*
2 * Linux syscalls
4 * Copyright (c) 2003 Fabrice Bellard
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, see <http://www.gnu.org/licenses/>.
19 #define _ATFILE_SOURCE
20 #include "qemu/osdep.h"
21 #include "qemu/cutils.h"
22 #include "qemu/path.h"
23 #include <elf.h>
24 #include <endian.h>
25 #include <grp.h>
26 #include <sys/ipc.h>
27 #include <sys/msg.h>
28 #include <sys/wait.h>
29 #include <sys/mount.h>
30 #include <sys/file.h>
31 #include <sys/fsuid.h>
32 #include <sys/personality.h>
33 #include <sys/prctl.h>
34 #include <sys/resource.h>
35 #include <sys/swap.h>
36 #include <linux/capability.h>
37 #include <sched.h>
38 #include <sys/timex.h>
39 #include <sys/socket.h>
40 #include <sys/un.h>
41 #include <sys/uio.h>
42 #include <poll.h>
43 #include <sys/times.h>
44 #include <sys/shm.h>
45 #include <sys/sem.h>
46 #include <sys/statfs.h>
47 #include <utime.h>
48 #include <sys/sysinfo.h>
49 #include <sys/signalfd.h>
50 //#include <sys/user.h>
51 #include <netinet/ip.h>
52 #include <netinet/tcp.h>
53 #include <linux/wireless.h>
54 #include <linux/icmp.h>
55 #include <linux/icmpv6.h>
56 #include <linux/errqueue.h>
57 #include <linux/random.h>
58 #include "qemu-common.h"
59 #ifdef CONFIG_TIMERFD
60 #include <sys/timerfd.h>
61 #endif
62 #ifdef TARGET_GPROF
63 #include <sys/gmon.h>
64 #endif
65 #ifdef CONFIG_EVENTFD
66 #include <sys/eventfd.h>
67 #endif
68 #ifdef CONFIG_EPOLL
69 #include <sys/epoll.h>
70 #endif
71 #ifdef CONFIG_ATTR
72 #include "qemu/xattr.h"
73 #endif
74 #ifdef CONFIG_SENDFILE
75 #include <sys/sendfile.h>
76 #endif
78 #define termios host_termios
79 #define winsize host_winsize
80 #define termio host_termio
81 #define sgttyb host_sgttyb /* same as target */
82 #define tchars host_tchars /* same as target */
83 #define ltchars host_ltchars /* same as target */
85 #include <linux/termios.h>
86 #include <linux/unistd.h>
87 #include <linux/cdrom.h>
88 #include <linux/hdreg.h>
89 #include <linux/soundcard.h>
90 #include <linux/kd.h>
91 #include <linux/mtio.h>
92 #include <linux/fs.h>
93 #if defined(CONFIG_FIEMAP)
94 #include <linux/fiemap.h>
95 #endif
96 #include <linux/fb.h>
97 #include <linux/vt.h>
98 #include <linux/dm-ioctl.h>
99 #include <linux/reboot.h>
100 #include <linux/route.h>
101 #include <linux/filter.h>
102 #include <linux/blkpg.h>
103 #include <netpacket/packet.h>
104 #include <linux/netlink.h>
105 #ifdef CONFIG_RTNETLINK
106 #include <linux/rtnetlink.h>
107 #include <linux/if_bridge.h>
108 #endif
109 #include <linux/audit.h>
110 #include "linux_loop.h"
111 #include "uname.h"
113 #include "qemu.h"
115 #ifndef CLONE_IO
116 #define CLONE_IO 0x80000000 /* Clone io context */
117 #endif
119 /* We can't directly call the host clone syscall, because this will
120 * badly confuse libc (breaking mutexes, for example). So we must
121 * divide clone flags into:
122 * * flag combinations that look like pthread_create()
123 * * flag combinations that look like fork()
124 * * flags we can implement within QEMU itself
125 * * flags we can't support and will return an error for
127 /* For thread creation, all these flags must be present; for
128 * fork, none must be present.
130 #define CLONE_THREAD_FLAGS \
131 (CLONE_VM | CLONE_FS | CLONE_FILES | \
132 CLONE_SIGHAND | CLONE_THREAD | CLONE_SYSVSEM)
134 /* These flags are ignored:
135 * CLONE_DETACHED is now ignored by the kernel;
136 * CLONE_IO is just an optimisation hint to the I/O scheduler
138 #define CLONE_IGNORED_FLAGS \
139 (CLONE_DETACHED | CLONE_IO)
141 /* Flags for fork which we can implement within QEMU itself */
142 #define CLONE_OPTIONAL_FORK_FLAGS \
143 (CLONE_SETTLS | CLONE_PARENT_SETTID | \
144 CLONE_CHILD_CLEARTID | CLONE_CHILD_SETTID)
146 /* Flags for thread creation which we can implement within QEMU itself */
147 #define CLONE_OPTIONAL_THREAD_FLAGS \
148 (CLONE_SETTLS | CLONE_PARENT_SETTID | \
149 CLONE_CHILD_CLEARTID | CLONE_CHILD_SETTID | CLONE_PARENT)
151 #define CLONE_INVALID_FORK_FLAGS \
152 (~(CSIGNAL | CLONE_OPTIONAL_FORK_FLAGS | CLONE_IGNORED_FLAGS))
154 #define CLONE_INVALID_THREAD_FLAGS \
155 (~(CSIGNAL | CLONE_THREAD_FLAGS | CLONE_OPTIONAL_THREAD_FLAGS | \
156 CLONE_IGNORED_FLAGS))
158 /* CLONE_VFORK is special cased early in do_fork(). The other flag bits
159 * have almost all been allocated. We cannot support any of
160 * CLONE_NEWNS, CLONE_NEWCGROUP, CLONE_NEWUTS, CLONE_NEWIPC,
161 * CLONE_NEWUSER, CLONE_NEWPID, CLONE_NEWNET, CLONE_PTRACE, CLONE_UNTRACED.
162 * The checks against the invalid thread masks above will catch these.
163 * (The one remaining unallocated bit is 0x1000 which used to be CLONE_PID.)
166 //#define DEBUG
167 /* Define DEBUG_ERESTARTSYS to force every syscall to be restarted
168 * once. This exercises the codepaths for restart.
170 //#define DEBUG_ERESTARTSYS
172 //#include <linux/msdos_fs.h>
173 #define VFAT_IOCTL_READDIR_BOTH _IOR('r', 1, struct linux_dirent [2])
174 #define VFAT_IOCTL_READDIR_SHORT _IOR('r', 2, struct linux_dirent [2])
176 #undef _syscall0
177 #undef _syscall1
178 #undef _syscall2
179 #undef _syscall3
180 #undef _syscall4
181 #undef _syscall5
182 #undef _syscall6
184 #define _syscall0(type,name) \
185 static type name (void) \
187 return syscall(__NR_##name); \
190 #define _syscall1(type,name,type1,arg1) \
191 static type name (type1 arg1) \
193 return syscall(__NR_##name, arg1); \
196 #define _syscall2(type,name,type1,arg1,type2,arg2) \
197 static type name (type1 arg1,type2 arg2) \
199 return syscall(__NR_##name, arg1, arg2); \
202 #define _syscall3(type,name,type1,arg1,type2,arg2,type3,arg3) \
203 static type name (type1 arg1,type2 arg2,type3 arg3) \
205 return syscall(__NR_##name, arg1, arg2, arg3); \
208 #define _syscall4(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4) \
209 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4) \
211 return syscall(__NR_##name, arg1, arg2, arg3, arg4); \
214 #define _syscall5(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4, \
215 type5,arg5) \
216 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5) \
218 return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5); \
222 #define _syscall6(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4, \
223 type5,arg5,type6,arg6) \
224 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5, \
225 type6 arg6) \
227 return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6); \
231 #define __NR_sys_uname __NR_uname
232 #define __NR_sys_getcwd1 __NR_getcwd
233 #define __NR_sys_getdents __NR_getdents
234 #define __NR_sys_getdents64 __NR_getdents64
235 #define __NR_sys_getpriority __NR_getpriority
236 #define __NR_sys_rt_sigqueueinfo __NR_rt_sigqueueinfo
237 #define __NR_sys_rt_tgsigqueueinfo __NR_rt_tgsigqueueinfo
238 #define __NR_sys_syslog __NR_syslog
239 #define __NR_sys_futex __NR_futex
240 #define __NR_sys_inotify_init __NR_inotify_init
241 #define __NR_sys_inotify_add_watch __NR_inotify_add_watch
242 #define __NR_sys_inotify_rm_watch __NR_inotify_rm_watch
244 #if defined(__alpha__) || defined(__x86_64__) || defined(__s390x__)
245 #define __NR__llseek __NR_lseek
246 #endif
248 /* Newer kernel ports have llseek() instead of _llseek() */
249 #if defined(TARGET_NR_llseek) && !defined(TARGET_NR__llseek)
250 #define TARGET_NR__llseek TARGET_NR_llseek
251 #endif
253 #ifdef __NR_gettid
254 _syscall0(int, gettid)
255 #else
256 /* This is a replacement for the host gettid() and must return a host
257 errno. */
258 static int gettid(void) {
259 return -ENOSYS;
261 #endif
263 /* For the 64-bit guest on 32-bit host case we must emulate
264 * getdents using getdents64, because otherwise the host
265 * might hand us back more dirent records than we can fit
266 * into the guest buffer after structure format conversion.
267 * Otherwise we emulate getdents with getdents if the host has it.
269 #if defined(__NR_getdents) && HOST_LONG_BITS >= TARGET_ABI_BITS
270 #define EMULATE_GETDENTS_WITH_GETDENTS
271 #endif
273 #if defined(TARGET_NR_getdents) && defined(EMULATE_GETDENTS_WITH_GETDENTS)
274 _syscall3(int, sys_getdents, uint, fd, struct linux_dirent *, dirp, uint, count);
275 #endif
276 #if (defined(TARGET_NR_getdents) && \
277 !defined(EMULATE_GETDENTS_WITH_GETDENTS)) || \
278 (defined(TARGET_NR_getdents64) && defined(__NR_getdents64))
279 _syscall3(int, sys_getdents64, uint, fd, struct linux_dirent64 *, dirp, uint, count);
280 #endif
281 #if defined(TARGET_NR__llseek) && defined(__NR_llseek)
282 _syscall5(int, _llseek, uint, fd, ulong, hi, ulong, lo,
283 loff_t *, res, uint, wh);
284 #endif
285 _syscall3(int, sys_rt_sigqueueinfo, pid_t, pid, int, sig, siginfo_t *, uinfo)
286 _syscall4(int, sys_rt_tgsigqueueinfo, pid_t, pid, pid_t, tid, int, sig,
287 siginfo_t *, uinfo)
288 _syscall3(int,sys_syslog,int,type,char*,bufp,int,len)
289 #ifdef __NR_exit_group
290 _syscall1(int,exit_group,int,error_code)
291 #endif
292 #if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address)
293 _syscall1(int,set_tid_address,int *,tidptr)
294 #endif
295 #if defined(TARGET_NR_futex) && defined(__NR_futex)
296 _syscall6(int,sys_futex,int *,uaddr,int,op,int,val,
297 const struct timespec *,timeout,int *,uaddr2,int,val3)
298 #endif
299 #define __NR_sys_sched_getaffinity __NR_sched_getaffinity
300 _syscall3(int, sys_sched_getaffinity, pid_t, pid, unsigned int, len,
301 unsigned long *, user_mask_ptr);
302 #define __NR_sys_sched_setaffinity __NR_sched_setaffinity
303 _syscall3(int, sys_sched_setaffinity, pid_t, pid, unsigned int, len,
304 unsigned long *, user_mask_ptr);
305 #define __NR_sys_getcpu __NR_getcpu
306 _syscall3(int, sys_getcpu, unsigned *, cpu, unsigned *, node, void *, tcache);
307 _syscall4(int, reboot, int, magic1, int, magic2, unsigned int, cmd,
308 void *, arg);
309 _syscall2(int, capget, struct __user_cap_header_struct *, header,
310 struct __user_cap_data_struct *, data);
311 _syscall2(int, capset, struct __user_cap_header_struct *, header,
312 struct __user_cap_data_struct *, data);
313 #if defined(TARGET_NR_ioprio_get) && defined(__NR_ioprio_get)
314 _syscall2(int, ioprio_get, int, which, int, who)
315 #endif
316 #if defined(TARGET_NR_ioprio_set) && defined(__NR_ioprio_set)
317 _syscall3(int, ioprio_set, int, which, int, who, int, ioprio)
318 #endif
319 #if defined(TARGET_NR_getrandom) && defined(__NR_getrandom)
320 _syscall3(int, getrandom, void *, buf, size_t, buflen, unsigned int, flags)
321 #endif
323 #if defined(TARGET_NR_kcmp) && defined(__NR_kcmp)
324 _syscall5(int, kcmp, pid_t, pid1, pid_t, pid2, int, type,
325 unsigned long, idx1, unsigned long, idx2)
326 #endif
328 static bitmask_transtbl fcntl_flags_tbl[] = {
329 { TARGET_O_ACCMODE, TARGET_O_WRONLY, O_ACCMODE, O_WRONLY, },
330 { TARGET_O_ACCMODE, TARGET_O_RDWR, O_ACCMODE, O_RDWR, },
331 { TARGET_O_CREAT, TARGET_O_CREAT, O_CREAT, O_CREAT, },
332 { TARGET_O_EXCL, TARGET_O_EXCL, O_EXCL, O_EXCL, },
333 { TARGET_O_NOCTTY, TARGET_O_NOCTTY, O_NOCTTY, O_NOCTTY, },
334 { TARGET_O_TRUNC, TARGET_O_TRUNC, O_TRUNC, O_TRUNC, },
335 { TARGET_O_APPEND, TARGET_O_APPEND, O_APPEND, O_APPEND, },
336 { TARGET_O_NONBLOCK, TARGET_O_NONBLOCK, O_NONBLOCK, O_NONBLOCK, },
337 { TARGET_O_SYNC, TARGET_O_DSYNC, O_SYNC, O_DSYNC, },
338 { TARGET_O_SYNC, TARGET_O_SYNC, O_SYNC, O_SYNC, },
339 { TARGET_FASYNC, TARGET_FASYNC, FASYNC, FASYNC, },
340 { TARGET_O_DIRECTORY, TARGET_O_DIRECTORY, O_DIRECTORY, O_DIRECTORY, },
341 { TARGET_O_NOFOLLOW, TARGET_O_NOFOLLOW, O_NOFOLLOW, O_NOFOLLOW, },
342 #if defined(O_DIRECT)
343 { TARGET_O_DIRECT, TARGET_O_DIRECT, O_DIRECT, O_DIRECT, },
344 #endif
345 #if defined(O_NOATIME)
346 { TARGET_O_NOATIME, TARGET_O_NOATIME, O_NOATIME, O_NOATIME },
347 #endif
348 #if defined(O_CLOEXEC)
349 { TARGET_O_CLOEXEC, TARGET_O_CLOEXEC, O_CLOEXEC, O_CLOEXEC },
350 #endif
351 #if defined(O_PATH)
352 { TARGET_O_PATH, TARGET_O_PATH, O_PATH, O_PATH },
353 #endif
354 #if defined(O_TMPFILE)
355 { TARGET_O_TMPFILE, TARGET_O_TMPFILE, O_TMPFILE, O_TMPFILE },
356 #endif
357 /* Don't terminate the list prematurely on 64-bit host+guest. */
358 #if TARGET_O_LARGEFILE != 0 || O_LARGEFILE != 0
359 { TARGET_O_LARGEFILE, TARGET_O_LARGEFILE, O_LARGEFILE, O_LARGEFILE, },
360 #endif
361 { 0, 0, 0, 0 }
364 enum {
365 QEMU_IFLA_BR_UNSPEC,
366 QEMU_IFLA_BR_FORWARD_DELAY,
367 QEMU_IFLA_BR_HELLO_TIME,
368 QEMU_IFLA_BR_MAX_AGE,
369 QEMU_IFLA_BR_AGEING_TIME,
370 QEMU_IFLA_BR_STP_STATE,
371 QEMU_IFLA_BR_PRIORITY,
372 QEMU_IFLA_BR_VLAN_FILTERING,
373 QEMU_IFLA_BR_VLAN_PROTOCOL,
374 QEMU_IFLA_BR_GROUP_FWD_MASK,
375 QEMU_IFLA_BR_ROOT_ID,
376 QEMU_IFLA_BR_BRIDGE_ID,
377 QEMU_IFLA_BR_ROOT_PORT,
378 QEMU_IFLA_BR_ROOT_PATH_COST,
379 QEMU_IFLA_BR_TOPOLOGY_CHANGE,
380 QEMU_IFLA_BR_TOPOLOGY_CHANGE_DETECTED,
381 QEMU_IFLA_BR_HELLO_TIMER,
382 QEMU_IFLA_BR_TCN_TIMER,
383 QEMU_IFLA_BR_TOPOLOGY_CHANGE_TIMER,
384 QEMU_IFLA_BR_GC_TIMER,
385 QEMU_IFLA_BR_GROUP_ADDR,
386 QEMU_IFLA_BR_FDB_FLUSH,
387 QEMU_IFLA_BR_MCAST_ROUTER,
388 QEMU_IFLA_BR_MCAST_SNOOPING,
389 QEMU_IFLA_BR_MCAST_QUERY_USE_IFADDR,
390 QEMU_IFLA_BR_MCAST_QUERIER,
391 QEMU_IFLA_BR_MCAST_HASH_ELASTICITY,
392 QEMU_IFLA_BR_MCAST_HASH_MAX,
393 QEMU_IFLA_BR_MCAST_LAST_MEMBER_CNT,
394 QEMU_IFLA_BR_MCAST_STARTUP_QUERY_CNT,
395 QEMU_IFLA_BR_MCAST_LAST_MEMBER_INTVL,
396 QEMU_IFLA_BR_MCAST_MEMBERSHIP_INTVL,
397 QEMU_IFLA_BR_MCAST_QUERIER_INTVL,
398 QEMU_IFLA_BR_MCAST_QUERY_INTVL,
399 QEMU_IFLA_BR_MCAST_QUERY_RESPONSE_INTVL,
400 QEMU_IFLA_BR_MCAST_STARTUP_QUERY_INTVL,
401 QEMU_IFLA_BR_NF_CALL_IPTABLES,
402 QEMU_IFLA_BR_NF_CALL_IP6TABLES,
403 QEMU_IFLA_BR_NF_CALL_ARPTABLES,
404 QEMU_IFLA_BR_VLAN_DEFAULT_PVID,
405 QEMU_IFLA_BR_PAD,
406 QEMU_IFLA_BR_VLAN_STATS_ENABLED,
407 QEMU_IFLA_BR_MCAST_STATS_ENABLED,
408 QEMU_IFLA_BR_MCAST_IGMP_VERSION,
409 QEMU_IFLA_BR_MCAST_MLD_VERSION,
410 QEMU___IFLA_BR_MAX,
413 enum {
414 QEMU_IFLA_UNSPEC,
415 QEMU_IFLA_ADDRESS,
416 QEMU_IFLA_BROADCAST,
417 QEMU_IFLA_IFNAME,
418 QEMU_IFLA_MTU,
419 QEMU_IFLA_LINK,
420 QEMU_IFLA_QDISC,
421 QEMU_IFLA_STATS,
422 QEMU_IFLA_COST,
423 QEMU_IFLA_PRIORITY,
424 QEMU_IFLA_MASTER,
425 QEMU_IFLA_WIRELESS,
426 QEMU_IFLA_PROTINFO,
427 QEMU_IFLA_TXQLEN,
428 QEMU_IFLA_MAP,
429 QEMU_IFLA_WEIGHT,
430 QEMU_IFLA_OPERSTATE,
431 QEMU_IFLA_LINKMODE,
432 QEMU_IFLA_LINKINFO,
433 QEMU_IFLA_NET_NS_PID,
434 QEMU_IFLA_IFALIAS,
435 QEMU_IFLA_NUM_VF,
436 QEMU_IFLA_VFINFO_LIST,
437 QEMU_IFLA_STATS64,
438 QEMU_IFLA_VF_PORTS,
439 QEMU_IFLA_PORT_SELF,
440 QEMU_IFLA_AF_SPEC,
441 QEMU_IFLA_GROUP,
442 QEMU_IFLA_NET_NS_FD,
443 QEMU_IFLA_EXT_MASK,
444 QEMU_IFLA_PROMISCUITY,
445 QEMU_IFLA_NUM_TX_QUEUES,
446 QEMU_IFLA_NUM_RX_QUEUES,
447 QEMU_IFLA_CARRIER,
448 QEMU_IFLA_PHYS_PORT_ID,
449 QEMU_IFLA_CARRIER_CHANGES,
450 QEMU_IFLA_PHYS_SWITCH_ID,
451 QEMU_IFLA_LINK_NETNSID,
452 QEMU_IFLA_PHYS_PORT_NAME,
453 QEMU_IFLA_PROTO_DOWN,
454 QEMU_IFLA_GSO_MAX_SEGS,
455 QEMU_IFLA_GSO_MAX_SIZE,
456 QEMU_IFLA_PAD,
457 QEMU_IFLA_XDP,
458 QEMU_IFLA_EVENT,
459 QEMU_IFLA_NEW_NETNSID,
460 QEMU_IFLA_IF_NETNSID,
461 QEMU_IFLA_CARRIER_UP_COUNT,
462 QEMU_IFLA_CARRIER_DOWN_COUNT,
463 QEMU_IFLA_NEW_IFINDEX,
464 QEMU___IFLA_MAX
467 enum {
468 QEMU_IFLA_BRPORT_UNSPEC,
469 QEMU_IFLA_BRPORT_STATE,
470 QEMU_IFLA_BRPORT_PRIORITY,
471 QEMU_IFLA_BRPORT_COST,
472 QEMU_IFLA_BRPORT_MODE,
473 QEMU_IFLA_BRPORT_GUARD,
474 QEMU_IFLA_BRPORT_PROTECT,
475 QEMU_IFLA_BRPORT_FAST_LEAVE,
476 QEMU_IFLA_BRPORT_LEARNING,
477 QEMU_IFLA_BRPORT_UNICAST_FLOOD,
478 QEMU_IFLA_BRPORT_PROXYARP,
479 QEMU_IFLA_BRPORT_LEARNING_SYNC,
480 QEMU_IFLA_BRPORT_PROXYARP_WIFI,
481 QEMU_IFLA_BRPORT_ROOT_ID,
482 QEMU_IFLA_BRPORT_BRIDGE_ID,
483 QEMU_IFLA_BRPORT_DESIGNATED_PORT,
484 QEMU_IFLA_BRPORT_DESIGNATED_COST,
485 QEMU_IFLA_BRPORT_ID,
486 QEMU_IFLA_BRPORT_NO,
487 QEMU_IFLA_BRPORT_TOPOLOGY_CHANGE_ACK,
488 QEMU_IFLA_BRPORT_CONFIG_PENDING,
489 QEMU_IFLA_BRPORT_MESSAGE_AGE_TIMER,
490 QEMU_IFLA_BRPORT_FORWARD_DELAY_TIMER,
491 QEMU_IFLA_BRPORT_HOLD_TIMER,
492 QEMU_IFLA_BRPORT_FLUSH,
493 QEMU_IFLA_BRPORT_MULTICAST_ROUTER,
494 QEMU_IFLA_BRPORT_PAD,
495 QEMU_IFLA_BRPORT_MCAST_FLOOD,
496 QEMU_IFLA_BRPORT_MCAST_TO_UCAST,
497 QEMU_IFLA_BRPORT_VLAN_TUNNEL,
498 QEMU_IFLA_BRPORT_BCAST_FLOOD,
499 QEMU_IFLA_BRPORT_GROUP_FWD_MASK,
500 QEMU_IFLA_BRPORT_NEIGH_SUPPRESS,
501 QEMU___IFLA_BRPORT_MAX
504 enum {
505 QEMU_IFLA_INFO_UNSPEC,
506 QEMU_IFLA_INFO_KIND,
507 QEMU_IFLA_INFO_DATA,
508 QEMU_IFLA_INFO_XSTATS,
509 QEMU_IFLA_INFO_SLAVE_KIND,
510 QEMU_IFLA_INFO_SLAVE_DATA,
511 QEMU___IFLA_INFO_MAX,
514 enum {
515 QEMU_IFLA_INET_UNSPEC,
516 QEMU_IFLA_INET_CONF,
517 QEMU___IFLA_INET_MAX,
520 enum {
521 QEMU_IFLA_INET6_UNSPEC,
522 QEMU_IFLA_INET6_FLAGS,
523 QEMU_IFLA_INET6_CONF,
524 QEMU_IFLA_INET6_STATS,
525 QEMU_IFLA_INET6_MCAST,
526 QEMU_IFLA_INET6_CACHEINFO,
527 QEMU_IFLA_INET6_ICMP6STATS,
528 QEMU_IFLA_INET6_TOKEN,
529 QEMU_IFLA_INET6_ADDR_GEN_MODE,
530 QEMU___IFLA_INET6_MAX
533 enum {
534 QEMU_IFLA_XDP_UNSPEC,
535 QEMU_IFLA_XDP_FD,
536 QEMU_IFLA_XDP_ATTACHED,
537 QEMU_IFLA_XDP_FLAGS,
538 QEMU_IFLA_XDP_PROG_ID,
539 QEMU___IFLA_XDP_MAX,
542 typedef abi_long (*TargetFdDataFunc)(void *, size_t);
543 typedef abi_long (*TargetFdAddrFunc)(void *, abi_ulong, socklen_t);
544 typedef struct TargetFdTrans {
545 TargetFdDataFunc host_to_target_data;
546 TargetFdDataFunc target_to_host_data;
547 TargetFdAddrFunc target_to_host_addr;
548 } TargetFdTrans;
550 static TargetFdTrans **target_fd_trans;
552 static unsigned int target_fd_max;
554 static TargetFdDataFunc fd_trans_target_to_host_data(int fd)
556 if (fd >= 0 && fd < target_fd_max && target_fd_trans[fd]) {
557 return target_fd_trans[fd]->target_to_host_data;
559 return NULL;
562 static TargetFdDataFunc fd_trans_host_to_target_data(int fd)
564 if (fd >= 0 && fd < target_fd_max && target_fd_trans[fd]) {
565 return target_fd_trans[fd]->host_to_target_data;
567 return NULL;
570 static TargetFdAddrFunc fd_trans_target_to_host_addr(int fd)
572 if (fd >= 0 && fd < target_fd_max && target_fd_trans[fd]) {
573 return target_fd_trans[fd]->target_to_host_addr;
575 return NULL;
578 static void fd_trans_register(int fd, TargetFdTrans *trans)
580 unsigned int oldmax;
582 if (fd >= target_fd_max) {
583 oldmax = target_fd_max;
584 target_fd_max = ((fd >> 6) + 1) << 6; /* by slice of 64 entries */
585 target_fd_trans = g_renew(TargetFdTrans *,
586 target_fd_trans, target_fd_max);
587 memset((void *)(target_fd_trans + oldmax), 0,
588 (target_fd_max - oldmax) * sizeof(TargetFdTrans *));
590 target_fd_trans[fd] = trans;
593 static void fd_trans_unregister(int fd)
595 if (fd >= 0 && fd < target_fd_max) {
596 target_fd_trans[fd] = NULL;
600 static void fd_trans_dup(int oldfd, int newfd)
602 fd_trans_unregister(newfd);
603 if (oldfd < target_fd_max && target_fd_trans[oldfd]) {
604 fd_trans_register(newfd, target_fd_trans[oldfd]);
608 static int sys_getcwd1(char *buf, size_t size)
610 if (getcwd(buf, size) == NULL) {
611 /* getcwd() sets errno */
612 return (-1);
614 return strlen(buf)+1;
617 #ifdef TARGET_NR_utimensat
618 #if defined(__NR_utimensat)
619 #define __NR_sys_utimensat __NR_utimensat
620 _syscall4(int,sys_utimensat,int,dirfd,const char *,pathname,
621 const struct timespec *,tsp,int,flags)
622 #else
623 static int sys_utimensat(int dirfd, const char *pathname,
624 const struct timespec times[2], int flags)
626 errno = ENOSYS;
627 return -1;
629 #endif
630 #endif /* TARGET_NR_utimensat */
632 #ifdef TARGET_NR_renameat2
633 #if defined(__NR_renameat2)
634 #define __NR_sys_renameat2 __NR_renameat2
635 _syscall5(int, sys_renameat2, int, oldfd, const char *, old, int, newfd,
636 const char *, new, unsigned int, flags)
637 #else
638 static int sys_renameat2(int oldfd, const char *old,
639 int newfd, const char *new, int flags)
641 if (flags == 0) {
642 return renameat(oldfd, old, newfd, new);
644 errno = ENOSYS;
645 return -1;
647 #endif
648 #endif /* TARGET_NR_renameat2 */
650 #ifdef CONFIG_INOTIFY
651 #include <sys/inotify.h>
653 #if defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init)
654 static int sys_inotify_init(void)
656 return (inotify_init());
658 #endif
659 #if defined(TARGET_NR_inotify_add_watch) && defined(__NR_inotify_add_watch)
660 static int sys_inotify_add_watch(int fd,const char *pathname, int32_t mask)
662 return (inotify_add_watch(fd, pathname, mask));
664 #endif
665 #if defined(TARGET_NR_inotify_rm_watch) && defined(__NR_inotify_rm_watch)
666 static int sys_inotify_rm_watch(int fd, int32_t wd)
668 return (inotify_rm_watch(fd, wd));
670 #endif
671 #ifdef CONFIG_INOTIFY1
672 #if defined(TARGET_NR_inotify_init1) && defined(__NR_inotify_init1)
673 static int sys_inotify_init1(int flags)
675 return (inotify_init1(flags));
677 #endif
678 #endif
679 #else
680 /* Userspace can usually survive runtime without inotify */
681 #undef TARGET_NR_inotify_init
682 #undef TARGET_NR_inotify_init1
683 #undef TARGET_NR_inotify_add_watch
684 #undef TARGET_NR_inotify_rm_watch
685 #endif /* CONFIG_INOTIFY */
687 #if defined(TARGET_NR_prlimit64)
688 #ifndef __NR_prlimit64
689 # define __NR_prlimit64 -1
690 #endif
691 #define __NR_sys_prlimit64 __NR_prlimit64
692 /* The glibc rlimit structure may not be that used by the underlying syscall */
693 struct host_rlimit64 {
694 uint64_t rlim_cur;
695 uint64_t rlim_max;
697 _syscall4(int, sys_prlimit64, pid_t, pid, int, resource,
698 const struct host_rlimit64 *, new_limit,
699 struct host_rlimit64 *, old_limit)
700 #endif
703 #if defined(TARGET_NR_timer_create)
704 /* Maxiumum of 32 active POSIX timers allowed at any one time. */
705 static timer_t g_posix_timers[32] = { 0, } ;
707 static inline int next_free_host_timer(void)
709 int k ;
710 /* FIXME: Does finding the next free slot require a lock? */
711 for (k = 0; k < ARRAY_SIZE(g_posix_timers); k++) {
712 if (g_posix_timers[k] == 0) {
713 g_posix_timers[k] = (timer_t) 1;
714 return k;
717 return -1;
719 #endif
721 /* ARM EABI and MIPS expect 64bit types aligned even on pairs or registers */
722 #ifdef TARGET_ARM
723 static inline int regpairs_aligned(void *cpu_env, int num)
725 return ((((CPUARMState *)cpu_env)->eabi) == 1) ;
727 #elif defined(TARGET_MIPS) && (TARGET_ABI_BITS == 32)
728 static inline int regpairs_aligned(void *cpu_env, int num) { return 1; }
729 #elif defined(TARGET_PPC) && !defined(TARGET_PPC64)
730 /* SysV AVI for PPC32 expects 64bit parameters to be passed on odd/even pairs
731 * of registers which translates to the same as ARM/MIPS, because we start with
732 * r3 as arg1 */
733 static inline int regpairs_aligned(void *cpu_env, int num) { return 1; }
734 #elif defined(TARGET_SH4)
735 /* SH4 doesn't align register pairs, except for p{read,write}64 */
736 static inline int regpairs_aligned(void *cpu_env, int num)
738 switch (num) {
739 case TARGET_NR_pread64:
740 case TARGET_NR_pwrite64:
741 return 1;
743 default:
744 return 0;
747 #elif defined(TARGET_XTENSA)
748 static inline int regpairs_aligned(void *cpu_env, int num) { return 1; }
749 #else
750 static inline int regpairs_aligned(void *cpu_env, int num) { return 0; }
751 #endif
753 #define ERRNO_TABLE_SIZE 1200
755 /* target_to_host_errno_table[] is initialized from
756 * host_to_target_errno_table[] in syscall_init(). */
757 static uint16_t target_to_host_errno_table[ERRNO_TABLE_SIZE] = {
761 * This list is the union of errno values overridden in asm-<arch>/errno.h
762 * minus the errnos that are not actually generic to all archs.
764 static uint16_t host_to_target_errno_table[ERRNO_TABLE_SIZE] = {
765 [EAGAIN] = TARGET_EAGAIN,
766 [EIDRM] = TARGET_EIDRM,
767 [ECHRNG] = TARGET_ECHRNG,
768 [EL2NSYNC] = TARGET_EL2NSYNC,
769 [EL3HLT] = TARGET_EL3HLT,
770 [EL3RST] = TARGET_EL3RST,
771 [ELNRNG] = TARGET_ELNRNG,
772 [EUNATCH] = TARGET_EUNATCH,
773 [ENOCSI] = TARGET_ENOCSI,
774 [EL2HLT] = TARGET_EL2HLT,
775 [EDEADLK] = TARGET_EDEADLK,
776 [ENOLCK] = TARGET_ENOLCK,
777 [EBADE] = TARGET_EBADE,
778 [EBADR] = TARGET_EBADR,
779 [EXFULL] = TARGET_EXFULL,
780 [ENOANO] = TARGET_ENOANO,
781 [EBADRQC] = TARGET_EBADRQC,
782 [EBADSLT] = TARGET_EBADSLT,
783 [EBFONT] = TARGET_EBFONT,
784 [ENOSTR] = TARGET_ENOSTR,
785 [ENODATA] = TARGET_ENODATA,
786 [ETIME] = TARGET_ETIME,
787 [ENOSR] = TARGET_ENOSR,
788 [ENONET] = TARGET_ENONET,
789 [ENOPKG] = TARGET_ENOPKG,
790 [EREMOTE] = TARGET_EREMOTE,
791 [ENOLINK] = TARGET_ENOLINK,
792 [EADV] = TARGET_EADV,
793 [ESRMNT] = TARGET_ESRMNT,
794 [ECOMM] = TARGET_ECOMM,
795 [EPROTO] = TARGET_EPROTO,
796 [EDOTDOT] = TARGET_EDOTDOT,
797 [EMULTIHOP] = TARGET_EMULTIHOP,
798 [EBADMSG] = TARGET_EBADMSG,
799 [ENAMETOOLONG] = TARGET_ENAMETOOLONG,
800 [EOVERFLOW] = TARGET_EOVERFLOW,
801 [ENOTUNIQ] = TARGET_ENOTUNIQ,
802 [EBADFD] = TARGET_EBADFD,
803 [EREMCHG] = TARGET_EREMCHG,
804 [ELIBACC] = TARGET_ELIBACC,
805 [ELIBBAD] = TARGET_ELIBBAD,
806 [ELIBSCN] = TARGET_ELIBSCN,
807 [ELIBMAX] = TARGET_ELIBMAX,
808 [ELIBEXEC] = TARGET_ELIBEXEC,
809 [EILSEQ] = TARGET_EILSEQ,
810 [ENOSYS] = TARGET_ENOSYS,
811 [ELOOP] = TARGET_ELOOP,
812 [ERESTART] = TARGET_ERESTART,
813 [ESTRPIPE] = TARGET_ESTRPIPE,
814 [ENOTEMPTY] = TARGET_ENOTEMPTY,
815 [EUSERS] = TARGET_EUSERS,
816 [ENOTSOCK] = TARGET_ENOTSOCK,
817 [EDESTADDRREQ] = TARGET_EDESTADDRREQ,
818 [EMSGSIZE] = TARGET_EMSGSIZE,
819 [EPROTOTYPE] = TARGET_EPROTOTYPE,
820 [ENOPROTOOPT] = TARGET_ENOPROTOOPT,
821 [EPROTONOSUPPORT] = TARGET_EPROTONOSUPPORT,
822 [ESOCKTNOSUPPORT] = TARGET_ESOCKTNOSUPPORT,
823 [EOPNOTSUPP] = TARGET_EOPNOTSUPP,
824 [EPFNOSUPPORT] = TARGET_EPFNOSUPPORT,
825 [EAFNOSUPPORT] = TARGET_EAFNOSUPPORT,
826 [EADDRINUSE] = TARGET_EADDRINUSE,
827 [EADDRNOTAVAIL] = TARGET_EADDRNOTAVAIL,
828 [ENETDOWN] = TARGET_ENETDOWN,
829 [ENETUNREACH] = TARGET_ENETUNREACH,
830 [ENETRESET] = TARGET_ENETRESET,
831 [ECONNABORTED] = TARGET_ECONNABORTED,
832 [ECONNRESET] = TARGET_ECONNRESET,
833 [ENOBUFS] = TARGET_ENOBUFS,
834 [EISCONN] = TARGET_EISCONN,
835 [ENOTCONN] = TARGET_ENOTCONN,
836 [EUCLEAN] = TARGET_EUCLEAN,
837 [ENOTNAM] = TARGET_ENOTNAM,
838 [ENAVAIL] = TARGET_ENAVAIL,
839 [EISNAM] = TARGET_EISNAM,
840 [EREMOTEIO] = TARGET_EREMOTEIO,
841 [EDQUOT] = TARGET_EDQUOT,
842 [ESHUTDOWN] = TARGET_ESHUTDOWN,
843 [ETOOMANYREFS] = TARGET_ETOOMANYREFS,
844 [ETIMEDOUT] = TARGET_ETIMEDOUT,
845 [ECONNREFUSED] = TARGET_ECONNREFUSED,
846 [EHOSTDOWN] = TARGET_EHOSTDOWN,
847 [EHOSTUNREACH] = TARGET_EHOSTUNREACH,
848 [EALREADY] = TARGET_EALREADY,
849 [EINPROGRESS] = TARGET_EINPROGRESS,
850 [ESTALE] = TARGET_ESTALE,
851 [ECANCELED] = TARGET_ECANCELED,
852 [ENOMEDIUM] = TARGET_ENOMEDIUM,
853 [EMEDIUMTYPE] = TARGET_EMEDIUMTYPE,
854 #ifdef ENOKEY
855 [ENOKEY] = TARGET_ENOKEY,
856 #endif
857 #ifdef EKEYEXPIRED
858 [EKEYEXPIRED] = TARGET_EKEYEXPIRED,
859 #endif
860 #ifdef EKEYREVOKED
861 [EKEYREVOKED] = TARGET_EKEYREVOKED,
862 #endif
863 #ifdef EKEYREJECTED
864 [EKEYREJECTED] = TARGET_EKEYREJECTED,
865 #endif
866 #ifdef EOWNERDEAD
867 [EOWNERDEAD] = TARGET_EOWNERDEAD,
868 #endif
869 #ifdef ENOTRECOVERABLE
870 [ENOTRECOVERABLE] = TARGET_ENOTRECOVERABLE,
871 #endif
872 #ifdef ENOMSG
873 [ENOMSG] = TARGET_ENOMSG,
874 #endif
875 #ifdef ERKFILL
876 [ERFKILL] = TARGET_ERFKILL,
877 #endif
878 #ifdef EHWPOISON
879 [EHWPOISON] = TARGET_EHWPOISON,
880 #endif
883 static inline int host_to_target_errno(int err)
885 if (err >= 0 && err < ERRNO_TABLE_SIZE &&
886 host_to_target_errno_table[err]) {
887 return host_to_target_errno_table[err];
889 return err;
892 static inline int target_to_host_errno(int err)
894 if (err >= 0 && err < ERRNO_TABLE_SIZE &&
895 target_to_host_errno_table[err]) {
896 return target_to_host_errno_table[err];
898 return err;
901 static inline abi_long get_errno(abi_long ret)
903 if (ret == -1)
904 return -host_to_target_errno(errno);
905 else
906 return ret;
909 const char *target_strerror(int err)
911 if (err == TARGET_ERESTARTSYS) {
912 return "To be restarted";
914 if (err == TARGET_QEMU_ESIGRETURN) {
915 return "Successful exit from sigreturn";
918 if ((err >= ERRNO_TABLE_SIZE) || (err < 0)) {
919 return NULL;
921 return strerror(target_to_host_errno(err));
924 #define safe_syscall0(type, name) \
925 static type safe_##name(void) \
927 return safe_syscall(__NR_##name); \
930 #define safe_syscall1(type, name, type1, arg1) \
931 static type safe_##name(type1 arg1) \
933 return safe_syscall(__NR_##name, arg1); \
936 #define safe_syscall2(type, name, type1, arg1, type2, arg2) \
937 static type safe_##name(type1 arg1, type2 arg2) \
939 return safe_syscall(__NR_##name, arg1, arg2); \
942 #define safe_syscall3(type, name, type1, arg1, type2, arg2, type3, arg3) \
943 static type safe_##name(type1 arg1, type2 arg2, type3 arg3) \
945 return safe_syscall(__NR_##name, arg1, arg2, arg3); \
948 #define safe_syscall4(type, name, type1, arg1, type2, arg2, type3, arg3, \
949 type4, arg4) \
950 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4) \
952 return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4); \
955 #define safe_syscall5(type, name, type1, arg1, type2, arg2, type3, arg3, \
956 type4, arg4, type5, arg5) \
957 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, \
958 type5 arg5) \
960 return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5); \
963 #define safe_syscall6(type, name, type1, arg1, type2, arg2, type3, arg3, \
964 type4, arg4, type5, arg5, type6, arg6) \
965 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, \
966 type5 arg5, type6 arg6) \
968 return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6); \
971 safe_syscall3(ssize_t, read, int, fd, void *, buff, size_t, count)
972 safe_syscall3(ssize_t, write, int, fd, const void *, buff, size_t, count)
973 safe_syscall4(int, openat, int, dirfd, const char *, pathname, \
974 int, flags, mode_t, mode)
975 safe_syscall4(pid_t, wait4, pid_t, pid, int *, status, int, options, \
976 struct rusage *, rusage)
977 safe_syscall5(int, waitid, idtype_t, idtype, id_t, id, siginfo_t *, infop, \
978 int, options, struct rusage *, rusage)
979 safe_syscall3(int, execve, const char *, filename, char **, argv, char **, envp)
980 safe_syscall6(int, pselect6, int, nfds, fd_set *, readfds, fd_set *, writefds, \
981 fd_set *, exceptfds, struct timespec *, timeout, void *, sig)
982 safe_syscall5(int, ppoll, struct pollfd *, ufds, unsigned int, nfds,
983 struct timespec *, tsp, const sigset_t *, sigmask,
984 size_t, sigsetsize)
985 safe_syscall6(int, epoll_pwait, int, epfd, struct epoll_event *, events,
986 int, maxevents, int, timeout, const sigset_t *, sigmask,
987 size_t, sigsetsize)
988 safe_syscall6(int,futex,int *,uaddr,int,op,int,val, \
989 const struct timespec *,timeout,int *,uaddr2,int,val3)
990 safe_syscall2(int, rt_sigsuspend, sigset_t *, newset, size_t, sigsetsize)
991 safe_syscall2(int, kill, pid_t, pid, int, sig)
992 safe_syscall2(int, tkill, int, tid, int, sig)
993 safe_syscall3(int, tgkill, int, tgid, int, pid, int, sig)
994 safe_syscall3(ssize_t, readv, int, fd, const struct iovec *, iov, int, iovcnt)
995 safe_syscall3(ssize_t, writev, int, fd, const struct iovec *, iov, int, iovcnt)
996 safe_syscall5(ssize_t, preadv, int, fd, const struct iovec *, iov, int, iovcnt,
997 unsigned long, pos_l, unsigned long, pos_h)
998 safe_syscall5(ssize_t, pwritev, int, fd, const struct iovec *, iov, int, iovcnt,
999 unsigned long, pos_l, unsigned long, pos_h)
1000 safe_syscall3(int, connect, int, fd, const struct sockaddr *, addr,
1001 socklen_t, addrlen)
1002 safe_syscall6(ssize_t, sendto, int, fd, const void *, buf, size_t, len,
1003 int, flags, const struct sockaddr *, addr, socklen_t, addrlen)
1004 safe_syscall6(ssize_t, recvfrom, int, fd, void *, buf, size_t, len,
1005 int, flags, struct sockaddr *, addr, socklen_t *, addrlen)
1006 safe_syscall3(ssize_t, sendmsg, int, fd, const struct msghdr *, msg, int, flags)
1007 safe_syscall3(ssize_t, recvmsg, int, fd, struct msghdr *, msg, int, flags)
1008 safe_syscall2(int, flock, int, fd, int, operation)
1009 safe_syscall4(int, rt_sigtimedwait, const sigset_t *, these, siginfo_t *, uinfo,
1010 const struct timespec *, uts, size_t, sigsetsize)
1011 safe_syscall4(int, accept4, int, fd, struct sockaddr *, addr, socklen_t *, len,
1012 int, flags)
1013 safe_syscall2(int, nanosleep, const struct timespec *, req,
1014 struct timespec *, rem)
1015 #ifdef TARGET_NR_clock_nanosleep
1016 safe_syscall4(int, clock_nanosleep, const clockid_t, clock, int, flags,
1017 const struct timespec *, req, struct timespec *, rem)
1018 #endif
1019 #ifdef __NR_msgsnd
1020 safe_syscall4(int, msgsnd, int, msgid, const void *, msgp, size_t, sz,
1021 int, flags)
1022 safe_syscall5(int, msgrcv, int, msgid, void *, msgp, size_t, sz,
1023 long, msgtype, int, flags)
1024 safe_syscall4(int, semtimedop, int, semid, struct sembuf *, tsops,
1025 unsigned, nsops, const struct timespec *, timeout)
1026 #else
1027 /* This host kernel architecture uses a single ipc syscall; fake up
1028 * wrappers for the sub-operations to hide this implementation detail.
1029 * Annoyingly we can't include linux/ipc.h to get the constant definitions
1030 * for the call parameter because some structs in there conflict with the
1031 * sys/ipc.h ones. So we just define them here, and rely on them being
1032 * the same for all host architectures.
1034 #define Q_SEMTIMEDOP 4
1035 #define Q_MSGSND 11
1036 #define Q_MSGRCV 12
1037 #define Q_IPCCALL(VERSION, OP) ((VERSION) << 16 | (OP))
1039 safe_syscall6(int, ipc, int, call, long, first, long, second, long, third,
1040 void *, ptr, long, fifth)
1041 static int safe_msgsnd(int msgid, const void *msgp, size_t sz, int flags)
1043 return safe_ipc(Q_IPCCALL(0, Q_MSGSND), msgid, sz, flags, (void *)msgp, 0);
1045 static int safe_msgrcv(int msgid, void *msgp, size_t sz, long type, int flags)
1047 return safe_ipc(Q_IPCCALL(1, Q_MSGRCV), msgid, sz, flags, msgp, type);
1049 static int safe_semtimedop(int semid, struct sembuf *tsops, unsigned nsops,
1050 const struct timespec *timeout)
1052 return safe_ipc(Q_IPCCALL(0, Q_SEMTIMEDOP), semid, nsops, 0, tsops,
1053 (long)timeout);
1055 #endif
1056 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
1057 safe_syscall5(int, mq_timedsend, int, mqdes, const char *, msg_ptr,
1058 size_t, len, unsigned, prio, const struct timespec *, timeout)
1059 safe_syscall5(int, mq_timedreceive, int, mqdes, char *, msg_ptr,
1060 size_t, len, unsigned *, prio, const struct timespec *, timeout)
1061 #endif
1062 /* We do ioctl like this rather than via safe_syscall3 to preserve the
1063 * "third argument might be integer or pointer or not present" behaviour of
1064 * the libc function.
1066 #define safe_ioctl(...) safe_syscall(__NR_ioctl, __VA_ARGS__)
1067 /* Similarly for fcntl. Note that callers must always:
1068 * pass the F_GETLK64 etc constants rather than the unsuffixed F_GETLK
1069 * use the flock64 struct rather than unsuffixed flock
1070 * This will then work and use a 64-bit offset for both 32-bit and 64-bit hosts.
1072 #ifdef __NR_fcntl64
1073 #define safe_fcntl(...) safe_syscall(__NR_fcntl64, __VA_ARGS__)
1074 #else
1075 #define safe_fcntl(...) safe_syscall(__NR_fcntl, __VA_ARGS__)
1076 #endif
1078 static inline int host_to_target_sock_type(int host_type)
1080 int target_type;
1082 switch (host_type & 0xf /* SOCK_TYPE_MASK */) {
1083 case SOCK_DGRAM:
1084 target_type = TARGET_SOCK_DGRAM;
1085 break;
1086 case SOCK_STREAM:
1087 target_type = TARGET_SOCK_STREAM;
1088 break;
1089 default:
1090 target_type = host_type & 0xf /* SOCK_TYPE_MASK */;
1091 break;
1094 #if defined(SOCK_CLOEXEC)
1095 if (host_type & SOCK_CLOEXEC) {
1096 target_type |= TARGET_SOCK_CLOEXEC;
1098 #endif
1100 #if defined(SOCK_NONBLOCK)
1101 if (host_type & SOCK_NONBLOCK) {
1102 target_type |= TARGET_SOCK_NONBLOCK;
1104 #endif
1106 return target_type;
1109 static abi_ulong target_brk;
1110 static abi_ulong target_original_brk;
1111 static abi_ulong brk_page;
1113 void target_set_brk(abi_ulong new_brk)
1115 target_original_brk = target_brk = HOST_PAGE_ALIGN(new_brk);
1116 brk_page = HOST_PAGE_ALIGN(target_brk);
1119 //#define DEBUGF_BRK(message, args...) do { fprintf(stderr, (message), ## args); } while (0)
1120 #define DEBUGF_BRK(message, args...)
1122 /* do_brk() must return target values and target errnos. */
1123 abi_long do_brk(abi_ulong new_brk)
1125 abi_long mapped_addr;
1126 abi_ulong new_alloc_size;
1128 DEBUGF_BRK("do_brk(" TARGET_ABI_FMT_lx ") -> ", new_brk);
1130 if (!new_brk) {
1131 DEBUGF_BRK(TARGET_ABI_FMT_lx " (!new_brk)\n", target_brk);
1132 return target_brk;
1134 if (new_brk < target_original_brk) {
1135 DEBUGF_BRK(TARGET_ABI_FMT_lx " (new_brk < target_original_brk)\n",
1136 target_brk);
1137 return target_brk;
1140 /* If the new brk is less than the highest page reserved to the
1141 * target heap allocation, set it and we're almost done... */
1142 if (new_brk <= brk_page) {
1143 /* Heap contents are initialized to zero, as for anonymous
1144 * mapped pages. */
1145 if (new_brk > target_brk) {
1146 memset(g2h(target_brk), 0, new_brk - target_brk);
1148 target_brk = new_brk;
1149 DEBUGF_BRK(TARGET_ABI_FMT_lx " (new_brk <= brk_page)\n", target_brk);
1150 return target_brk;
1153 /* We need to allocate more memory after the brk... Note that
1154 * we don't use MAP_FIXED because that will map over the top of
1155 * any existing mapping (like the one with the host libc or qemu
1156 * itself); instead we treat "mapped but at wrong address" as
1157 * a failure and unmap again.
1159 new_alloc_size = HOST_PAGE_ALIGN(new_brk - brk_page);
1160 mapped_addr = get_errno(target_mmap(brk_page, new_alloc_size,
1161 PROT_READ|PROT_WRITE,
1162 MAP_ANON|MAP_PRIVATE, 0, 0));
1164 if (mapped_addr == brk_page) {
1165 /* Heap contents are initialized to zero, as for anonymous
1166 * mapped pages. Technically the new pages are already
1167 * initialized to zero since they *are* anonymous mapped
1168 * pages, however we have to take care with the contents that
1169 * come from the remaining part of the previous page: it may
1170 * contains garbage data due to a previous heap usage (grown
1171 * then shrunken). */
1172 memset(g2h(target_brk), 0, brk_page - target_brk);
1174 target_brk = new_brk;
1175 brk_page = HOST_PAGE_ALIGN(target_brk);
1176 DEBUGF_BRK(TARGET_ABI_FMT_lx " (mapped_addr == brk_page)\n",
1177 target_brk);
1178 return target_brk;
1179 } else if (mapped_addr != -1) {
1180 /* Mapped but at wrong address, meaning there wasn't actually
1181 * enough space for this brk.
1183 target_munmap(mapped_addr, new_alloc_size);
1184 mapped_addr = -1;
1185 DEBUGF_BRK(TARGET_ABI_FMT_lx " (mapped_addr != -1)\n", target_brk);
1187 else {
1188 DEBUGF_BRK(TARGET_ABI_FMT_lx " (otherwise)\n", target_brk);
1191 #if defined(TARGET_ALPHA)
1192 /* We (partially) emulate OSF/1 on Alpha, which requires we
1193 return a proper errno, not an unchanged brk value. */
1194 return -TARGET_ENOMEM;
1195 #endif
1196 /* For everything else, return the previous break. */
1197 return target_brk;
1200 static inline abi_long copy_from_user_fdset(fd_set *fds,
1201 abi_ulong target_fds_addr,
1202 int n)
1204 int i, nw, j, k;
1205 abi_ulong b, *target_fds;
1207 nw = DIV_ROUND_UP(n, TARGET_ABI_BITS);
1208 if (!(target_fds = lock_user(VERIFY_READ,
1209 target_fds_addr,
1210 sizeof(abi_ulong) * nw,
1211 1)))
1212 return -TARGET_EFAULT;
1214 FD_ZERO(fds);
1215 k = 0;
1216 for (i = 0; i < nw; i++) {
1217 /* grab the abi_ulong */
1218 __get_user(b, &target_fds[i]);
1219 for (j = 0; j < TARGET_ABI_BITS; j++) {
1220 /* check the bit inside the abi_ulong */
1221 if ((b >> j) & 1)
1222 FD_SET(k, fds);
1223 k++;
1227 unlock_user(target_fds, target_fds_addr, 0);
1229 return 0;
1232 static inline abi_ulong copy_from_user_fdset_ptr(fd_set *fds, fd_set **fds_ptr,
1233 abi_ulong target_fds_addr,
1234 int n)
1236 if (target_fds_addr) {
1237 if (copy_from_user_fdset(fds, target_fds_addr, n))
1238 return -TARGET_EFAULT;
1239 *fds_ptr = fds;
1240 } else {
1241 *fds_ptr = NULL;
1243 return 0;
1246 static inline abi_long copy_to_user_fdset(abi_ulong target_fds_addr,
1247 const fd_set *fds,
1248 int n)
1250 int i, nw, j, k;
1251 abi_long v;
1252 abi_ulong *target_fds;
1254 nw = DIV_ROUND_UP(n, TARGET_ABI_BITS);
1255 if (!(target_fds = lock_user(VERIFY_WRITE,
1256 target_fds_addr,
1257 sizeof(abi_ulong) * nw,
1258 0)))
1259 return -TARGET_EFAULT;
1261 k = 0;
1262 for (i = 0; i < nw; i++) {
1263 v = 0;
1264 for (j = 0; j < TARGET_ABI_BITS; j++) {
1265 v |= ((abi_ulong)(FD_ISSET(k, fds) != 0) << j);
1266 k++;
1268 __put_user(v, &target_fds[i]);
1271 unlock_user(target_fds, target_fds_addr, sizeof(abi_ulong) * nw);
1273 return 0;
1276 #if defined(__alpha__)
1277 #define HOST_HZ 1024
1278 #else
1279 #define HOST_HZ 100
1280 #endif
1282 static inline abi_long host_to_target_clock_t(long ticks)
1284 #if HOST_HZ == TARGET_HZ
1285 return ticks;
1286 #else
1287 return ((int64_t)ticks * TARGET_HZ) / HOST_HZ;
1288 #endif
1291 static inline abi_long host_to_target_rusage(abi_ulong target_addr,
1292 const struct rusage *rusage)
1294 struct target_rusage *target_rusage;
1296 if (!lock_user_struct(VERIFY_WRITE, target_rusage, target_addr, 0))
1297 return -TARGET_EFAULT;
1298 target_rusage->ru_utime.tv_sec = tswapal(rusage->ru_utime.tv_sec);
1299 target_rusage->ru_utime.tv_usec = tswapal(rusage->ru_utime.tv_usec);
1300 target_rusage->ru_stime.tv_sec = tswapal(rusage->ru_stime.tv_sec);
1301 target_rusage->ru_stime.tv_usec = tswapal(rusage->ru_stime.tv_usec);
1302 target_rusage->ru_maxrss = tswapal(rusage->ru_maxrss);
1303 target_rusage->ru_ixrss = tswapal(rusage->ru_ixrss);
1304 target_rusage->ru_idrss = tswapal(rusage->ru_idrss);
1305 target_rusage->ru_isrss = tswapal(rusage->ru_isrss);
1306 target_rusage->ru_minflt = tswapal(rusage->ru_minflt);
1307 target_rusage->ru_majflt = tswapal(rusage->ru_majflt);
1308 target_rusage->ru_nswap = tswapal(rusage->ru_nswap);
1309 target_rusage->ru_inblock = tswapal(rusage->ru_inblock);
1310 target_rusage->ru_oublock = tswapal(rusage->ru_oublock);
1311 target_rusage->ru_msgsnd = tswapal(rusage->ru_msgsnd);
1312 target_rusage->ru_msgrcv = tswapal(rusage->ru_msgrcv);
1313 target_rusage->ru_nsignals = tswapal(rusage->ru_nsignals);
1314 target_rusage->ru_nvcsw = tswapal(rusage->ru_nvcsw);
1315 target_rusage->ru_nivcsw = tswapal(rusage->ru_nivcsw);
1316 unlock_user_struct(target_rusage, target_addr, 1);
1318 return 0;
1321 static inline rlim_t target_to_host_rlim(abi_ulong target_rlim)
1323 abi_ulong target_rlim_swap;
1324 rlim_t result;
1326 target_rlim_swap = tswapal(target_rlim);
1327 if (target_rlim_swap == TARGET_RLIM_INFINITY)
1328 return RLIM_INFINITY;
1330 result = target_rlim_swap;
1331 if (target_rlim_swap != (rlim_t)result)
1332 return RLIM_INFINITY;
1334 return result;
1337 static inline abi_ulong host_to_target_rlim(rlim_t rlim)
1339 abi_ulong target_rlim_swap;
1340 abi_ulong result;
1342 if (rlim == RLIM_INFINITY || rlim != (abi_long)rlim)
1343 target_rlim_swap = TARGET_RLIM_INFINITY;
1344 else
1345 target_rlim_swap = rlim;
1346 result = tswapal(target_rlim_swap);
1348 return result;
1351 static inline int target_to_host_resource(int code)
1353 switch (code) {
1354 case TARGET_RLIMIT_AS:
1355 return RLIMIT_AS;
1356 case TARGET_RLIMIT_CORE:
1357 return RLIMIT_CORE;
1358 case TARGET_RLIMIT_CPU:
1359 return RLIMIT_CPU;
1360 case TARGET_RLIMIT_DATA:
1361 return RLIMIT_DATA;
1362 case TARGET_RLIMIT_FSIZE:
1363 return RLIMIT_FSIZE;
1364 case TARGET_RLIMIT_LOCKS:
1365 return RLIMIT_LOCKS;
1366 case TARGET_RLIMIT_MEMLOCK:
1367 return RLIMIT_MEMLOCK;
1368 case TARGET_RLIMIT_MSGQUEUE:
1369 return RLIMIT_MSGQUEUE;
1370 case TARGET_RLIMIT_NICE:
1371 return RLIMIT_NICE;
1372 case TARGET_RLIMIT_NOFILE:
1373 return RLIMIT_NOFILE;
1374 case TARGET_RLIMIT_NPROC:
1375 return RLIMIT_NPROC;
1376 case TARGET_RLIMIT_RSS:
1377 return RLIMIT_RSS;
1378 case TARGET_RLIMIT_RTPRIO:
1379 return RLIMIT_RTPRIO;
1380 case TARGET_RLIMIT_SIGPENDING:
1381 return RLIMIT_SIGPENDING;
1382 case TARGET_RLIMIT_STACK:
1383 return RLIMIT_STACK;
1384 default:
1385 return code;
1389 static inline abi_long copy_from_user_timeval(struct timeval *tv,
1390 abi_ulong target_tv_addr)
1392 struct target_timeval *target_tv;
1394 if (!lock_user_struct(VERIFY_READ, target_tv, target_tv_addr, 1))
1395 return -TARGET_EFAULT;
1397 __get_user(tv->tv_sec, &target_tv->tv_sec);
1398 __get_user(tv->tv_usec, &target_tv->tv_usec);
1400 unlock_user_struct(target_tv, target_tv_addr, 0);
1402 return 0;
1405 static inline abi_long copy_to_user_timeval(abi_ulong target_tv_addr,
1406 const struct timeval *tv)
1408 struct target_timeval *target_tv;
1410 if (!lock_user_struct(VERIFY_WRITE, target_tv, target_tv_addr, 0))
1411 return -TARGET_EFAULT;
1413 __put_user(tv->tv_sec, &target_tv->tv_sec);
1414 __put_user(tv->tv_usec, &target_tv->tv_usec);
1416 unlock_user_struct(target_tv, target_tv_addr, 1);
1418 return 0;
1421 static inline abi_long copy_from_user_timezone(struct timezone *tz,
1422 abi_ulong target_tz_addr)
1424 struct target_timezone *target_tz;
1426 if (!lock_user_struct(VERIFY_READ, target_tz, target_tz_addr, 1)) {
1427 return -TARGET_EFAULT;
1430 __get_user(tz->tz_minuteswest, &target_tz->tz_minuteswest);
1431 __get_user(tz->tz_dsttime, &target_tz->tz_dsttime);
1433 unlock_user_struct(target_tz, target_tz_addr, 0);
1435 return 0;
1438 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
1439 #include <mqueue.h>
1441 static inline abi_long copy_from_user_mq_attr(struct mq_attr *attr,
1442 abi_ulong target_mq_attr_addr)
1444 struct target_mq_attr *target_mq_attr;
1446 if (!lock_user_struct(VERIFY_READ, target_mq_attr,
1447 target_mq_attr_addr, 1))
1448 return -TARGET_EFAULT;
1450 __get_user(attr->mq_flags, &target_mq_attr->mq_flags);
1451 __get_user(attr->mq_maxmsg, &target_mq_attr->mq_maxmsg);
1452 __get_user(attr->mq_msgsize, &target_mq_attr->mq_msgsize);
1453 __get_user(attr->mq_curmsgs, &target_mq_attr->mq_curmsgs);
1455 unlock_user_struct(target_mq_attr, target_mq_attr_addr, 0);
1457 return 0;
1460 static inline abi_long copy_to_user_mq_attr(abi_ulong target_mq_attr_addr,
1461 const struct mq_attr *attr)
1463 struct target_mq_attr *target_mq_attr;
1465 if (!lock_user_struct(VERIFY_WRITE, target_mq_attr,
1466 target_mq_attr_addr, 0))
1467 return -TARGET_EFAULT;
1469 __put_user(attr->mq_flags, &target_mq_attr->mq_flags);
1470 __put_user(attr->mq_maxmsg, &target_mq_attr->mq_maxmsg);
1471 __put_user(attr->mq_msgsize, &target_mq_attr->mq_msgsize);
1472 __put_user(attr->mq_curmsgs, &target_mq_attr->mq_curmsgs);
1474 unlock_user_struct(target_mq_attr, target_mq_attr_addr, 1);
1476 return 0;
1478 #endif
1480 #if defined(TARGET_NR_select) || defined(TARGET_NR__newselect)
1481 /* do_select() must return target values and target errnos. */
1482 static abi_long do_select(int n,
1483 abi_ulong rfd_addr, abi_ulong wfd_addr,
1484 abi_ulong efd_addr, abi_ulong target_tv_addr)
1486 fd_set rfds, wfds, efds;
1487 fd_set *rfds_ptr, *wfds_ptr, *efds_ptr;
1488 struct timeval tv;
1489 struct timespec ts, *ts_ptr;
1490 abi_long ret;
1492 ret = copy_from_user_fdset_ptr(&rfds, &rfds_ptr, rfd_addr, n);
1493 if (ret) {
1494 return ret;
1496 ret = copy_from_user_fdset_ptr(&wfds, &wfds_ptr, wfd_addr, n);
1497 if (ret) {
1498 return ret;
1500 ret = copy_from_user_fdset_ptr(&efds, &efds_ptr, efd_addr, n);
1501 if (ret) {
1502 return ret;
1505 if (target_tv_addr) {
1506 if (copy_from_user_timeval(&tv, target_tv_addr))
1507 return -TARGET_EFAULT;
1508 ts.tv_sec = tv.tv_sec;
1509 ts.tv_nsec = tv.tv_usec * 1000;
1510 ts_ptr = &ts;
1511 } else {
1512 ts_ptr = NULL;
1515 ret = get_errno(safe_pselect6(n, rfds_ptr, wfds_ptr, efds_ptr,
1516 ts_ptr, NULL));
1518 if (!is_error(ret)) {
1519 if (rfd_addr && copy_to_user_fdset(rfd_addr, &rfds, n))
1520 return -TARGET_EFAULT;
1521 if (wfd_addr && copy_to_user_fdset(wfd_addr, &wfds, n))
1522 return -TARGET_EFAULT;
1523 if (efd_addr && copy_to_user_fdset(efd_addr, &efds, n))
1524 return -TARGET_EFAULT;
1526 if (target_tv_addr) {
1527 tv.tv_sec = ts.tv_sec;
1528 tv.tv_usec = ts.tv_nsec / 1000;
1529 if (copy_to_user_timeval(target_tv_addr, &tv)) {
1530 return -TARGET_EFAULT;
1535 return ret;
1538 #if defined(TARGET_WANT_OLD_SYS_SELECT)
1539 static abi_long do_old_select(abi_ulong arg1)
1541 struct target_sel_arg_struct *sel;
1542 abi_ulong inp, outp, exp, tvp;
1543 long nsel;
1545 if (!lock_user_struct(VERIFY_READ, sel, arg1, 1)) {
1546 return -TARGET_EFAULT;
1549 nsel = tswapal(sel->n);
1550 inp = tswapal(sel->inp);
1551 outp = tswapal(sel->outp);
1552 exp = tswapal(sel->exp);
1553 tvp = tswapal(sel->tvp);
1555 unlock_user_struct(sel, arg1, 0);
1557 return do_select(nsel, inp, outp, exp, tvp);
1559 #endif
1560 #endif
1562 static abi_long do_pipe2(int host_pipe[], int flags)
1564 #ifdef CONFIG_PIPE2
1565 return pipe2(host_pipe, flags);
1566 #else
1567 return -ENOSYS;
1568 #endif
1571 static abi_long do_pipe(void *cpu_env, abi_ulong pipedes,
1572 int flags, int is_pipe2)
1574 int host_pipe[2];
1575 abi_long ret;
1576 ret = flags ? do_pipe2(host_pipe, flags) : pipe(host_pipe);
1578 if (is_error(ret))
1579 return get_errno(ret);
1581 /* Several targets have special calling conventions for the original
1582 pipe syscall, but didn't replicate this into the pipe2 syscall. */
1583 if (!is_pipe2) {
1584 #if defined(TARGET_ALPHA)
1585 ((CPUAlphaState *)cpu_env)->ir[IR_A4] = host_pipe[1];
1586 return host_pipe[0];
1587 #elif defined(TARGET_MIPS)
1588 ((CPUMIPSState*)cpu_env)->active_tc.gpr[3] = host_pipe[1];
1589 return host_pipe[0];
1590 #elif defined(TARGET_SH4)
1591 ((CPUSH4State*)cpu_env)->gregs[1] = host_pipe[1];
1592 return host_pipe[0];
1593 #elif defined(TARGET_SPARC)
1594 ((CPUSPARCState*)cpu_env)->regwptr[1] = host_pipe[1];
1595 return host_pipe[0];
1596 #endif
1599 if (put_user_s32(host_pipe[0], pipedes)
1600 || put_user_s32(host_pipe[1], pipedes + sizeof(host_pipe[0])))
1601 return -TARGET_EFAULT;
1602 return get_errno(ret);
1605 static inline abi_long target_to_host_ip_mreq(struct ip_mreqn *mreqn,
1606 abi_ulong target_addr,
1607 socklen_t len)
1609 struct target_ip_mreqn *target_smreqn;
1611 target_smreqn = lock_user(VERIFY_READ, target_addr, len, 1);
1612 if (!target_smreqn)
1613 return -TARGET_EFAULT;
1614 mreqn->imr_multiaddr.s_addr = target_smreqn->imr_multiaddr.s_addr;
1615 mreqn->imr_address.s_addr = target_smreqn->imr_address.s_addr;
1616 if (len == sizeof(struct target_ip_mreqn))
1617 mreqn->imr_ifindex = tswapal(target_smreqn->imr_ifindex);
1618 unlock_user(target_smreqn, target_addr, 0);
1620 return 0;
1623 static inline abi_long target_to_host_sockaddr(int fd, struct sockaddr *addr,
1624 abi_ulong target_addr,
1625 socklen_t len)
1627 const socklen_t unix_maxlen = sizeof (struct sockaddr_un);
1628 sa_family_t sa_family;
1629 struct target_sockaddr *target_saddr;
1631 if (fd_trans_target_to_host_addr(fd)) {
1632 return fd_trans_target_to_host_addr(fd)(addr, target_addr, len);
1635 target_saddr = lock_user(VERIFY_READ, target_addr, len, 1);
1636 if (!target_saddr)
1637 return -TARGET_EFAULT;
1639 sa_family = tswap16(target_saddr->sa_family);
1641 /* Oops. The caller might send a incomplete sun_path; sun_path
1642 * must be terminated by \0 (see the manual page), but
1643 * unfortunately it is quite common to specify sockaddr_un
1644 * length as "strlen(x->sun_path)" while it should be
1645 * "strlen(...) + 1". We'll fix that here if needed.
1646 * Linux kernel has a similar feature.
1649 if (sa_family == AF_UNIX) {
1650 if (len < unix_maxlen && len > 0) {
1651 char *cp = (char*)target_saddr;
1653 if ( cp[len-1] && !cp[len] )
1654 len++;
1656 if (len > unix_maxlen)
1657 len = unix_maxlen;
1660 memcpy(addr, target_saddr, len);
1661 addr->sa_family = sa_family;
1662 if (sa_family == AF_NETLINK) {
1663 struct sockaddr_nl *nladdr;
1665 nladdr = (struct sockaddr_nl *)addr;
1666 nladdr->nl_pid = tswap32(nladdr->nl_pid);
1667 nladdr->nl_groups = tswap32(nladdr->nl_groups);
1668 } else if (sa_family == AF_PACKET) {
1669 struct target_sockaddr_ll *lladdr;
1671 lladdr = (struct target_sockaddr_ll *)addr;
1672 lladdr->sll_ifindex = tswap32(lladdr->sll_ifindex);
1673 lladdr->sll_hatype = tswap16(lladdr->sll_hatype);
1675 unlock_user(target_saddr, target_addr, 0);
1677 return 0;
1680 static inline abi_long host_to_target_sockaddr(abi_ulong target_addr,
1681 struct sockaddr *addr,
1682 socklen_t len)
1684 struct target_sockaddr *target_saddr;
1686 if (len == 0) {
1687 return 0;
1689 assert(addr);
1691 target_saddr = lock_user(VERIFY_WRITE, target_addr, len, 0);
1692 if (!target_saddr)
1693 return -TARGET_EFAULT;
1694 memcpy(target_saddr, addr, len);
1695 if (len >= offsetof(struct target_sockaddr, sa_family) +
1696 sizeof(target_saddr->sa_family)) {
1697 target_saddr->sa_family = tswap16(addr->sa_family);
1699 if (addr->sa_family == AF_NETLINK && len >= sizeof(struct sockaddr_nl)) {
1700 struct sockaddr_nl *target_nl = (struct sockaddr_nl *)target_saddr;
1701 target_nl->nl_pid = tswap32(target_nl->nl_pid);
1702 target_nl->nl_groups = tswap32(target_nl->nl_groups);
1703 } else if (addr->sa_family == AF_PACKET) {
1704 struct sockaddr_ll *target_ll = (struct sockaddr_ll *)target_saddr;
1705 target_ll->sll_ifindex = tswap32(target_ll->sll_ifindex);
1706 target_ll->sll_hatype = tswap16(target_ll->sll_hatype);
1707 } else if (addr->sa_family == AF_INET6 &&
1708 len >= sizeof(struct target_sockaddr_in6)) {
1709 struct target_sockaddr_in6 *target_in6 =
1710 (struct target_sockaddr_in6 *)target_saddr;
1711 target_in6->sin6_scope_id = tswap16(target_in6->sin6_scope_id);
1713 unlock_user(target_saddr, target_addr, len);
1715 return 0;
1718 static inline abi_long target_to_host_cmsg(struct msghdr *msgh,
1719 struct target_msghdr *target_msgh)
1721 struct cmsghdr *cmsg = CMSG_FIRSTHDR(msgh);
1722 abi_long msg_controllen;
1723 abi_ulong target_cmsg_addr;
1724 struct target_cmsghdr *target_cmsg, *target_cmsg_start;
1725 socklen_t space = 0;
1727 msg_controllen = tswapal(target_msgh->msg_controllen);
1728 if (msg_controllen < sizeof (struct target_cmsghdr))
1729 goto the_end;
1730 target_cmsg_addr = tswapal(target_msgh->msg_control);
1731 target_cmsg = lock_user(VERIFY_READ, target_cmsg_addr, msg_controllen, 1);
1732 target_cmsg_start = target_cmsg;
1733 if (!target_cmsg)
1734 return -TARGET_EFAULT;
1736 while (cmsg && target_cmsg) {
1737 void *data = CMSG_DATA(cmsg);
1738 void *target_data = TARGET_CMSG_DATA(target_cmsg);
1740 int len = tswapal(target_cmsg->cmsg_len)
1741 - sizeof(struct target_cmsghdr);
1743 space += CMSG_SPACE(len);
1744 if (space > msgh->msg_controllen) {
1745 space -= CMSG_SPACE(len);
1746 /* This is a QEMU bug, since we allocated the payload
1747 * area ourselves (unlike overflow in host-to-target
1748 * conversion, which is just the guest giving us a buffer
1749 * that's too small). It can't happen for the payload types
1750 * we currently support; if it becomes an issue in future
1751 * we would need to improve our allocation strategy to
1752 * something more intelligent than "twice the size of the
1753 * target buffer we're reading from".
1755 gemu_log("Host cmsg overflow\n");
1756 break;
1759 if (tswap32(target_cmsg->cmsg_level) == TARGET_SOL_SOCKET) {
1760 cmsg->cmsg_level = SOL_SOCKET;
1761 } else {
1762 cmsg->cmsg_level = tswap32(target_cmsg->cmsg_level);
1764 cmsg->cmsg_type = tswap32(target_cmsg->cmsg_type);
1765 cmsg->cmsg_len = CMSG_LEN(len);
1767 if (cmsg->cmsg_level == SOL_SOCKET && cmsg->cmsg_type == SCM_RIGHTS) {
1768 int *fd = (int *)data;
1769 int *target_fd = (int *)target_data;
1770 int i, numfds = len / sizeof(int);
1772 for (i = 0; i < numfds; i++) {
1773 __get_user(fd[i], target_fd + i);
1775 } else if (cmsg->cmsg_level == SOL_SOCKET
1776 && cmsg->cmsg_type == SCM_CREDENTIALS) {
1777 struct ucred *cred = (struct ucred *)data;
1778 struct target_ucred *target_cred =
1779 (struct target_ucred *)target_data;
1781 __get_user(cred->pid, &target_cred->pid);
1782 __get_user(cred->uid, &target_cred->uid);
1783 __get_user(cred->gid, &target_cred->gid);
1784 } else {
1785 gemu_log("Unsupported ancillary data: %d/%d\n",
1786 cmsg->cmsg_level, cmsg->cmsg_type);
1787 memcpy(data, target_data, len);
1790 cmsg = CMSG_NXTHDR(msgh, cmsg);
1791 target_cmsg = TARGET_CMSG_NXTHDR(target_msgh, target_cmsg,
1792 target_cmsg_start);
1794 unlock_user(target_cmsg, target_cmsg_addr, 0);
1795 the_end:
1796 msgh->msg_controllen = space;
1797 return 0;
1800 static inline abi_long host_to_target_cmsg(struct target_msghdr *target_msgh,
1801 struct msghdr *msgh)
1803 struct cmsghdr *cmsg = CMSG_FIRSTHDR(msgh);
1804 abi_long msg_controllen;
1805 abi_ulong target_cmsg_addr;
1806 struct target_cmsghdr *target_cmsg, *target_cmsg_start;
1807 socklen_t space = 0;
1809 msg_controllen = tswapal(target_msgh->msg_controllen);
1810 if (msg_controllen < sizeof (struct target_cmsghdr))
1811 goto the_end;
1812 target_cmsg_addr = tswapal(target_msgh->msg_control);
1813 target_cmsg = lock_user(VERIFY_WRITE, target_cmsg_addr, msg_controllen, 0);
1814 target_cmsg_start = target_cmsg;
1815 if (!target_cmsg)
1816 return -TARGET_EFAULT;
1818 while (cmsg && target_cmsg) {
1819 void *data = CMSG_DATA(cmsg);
1820 void *target_data = TARGET_CMSG_DATA(target_cmsg);
1822 int len = cmsg->cmsg_len - sizeof(struct cmsghdr);
1823 int tgt_len, tgt_space;
1825 /* We never copy a half-header but may copy half-data;
1826 * this is Linux's behaviour in put_cmsg(). Note that
1827 * truncation here is a guest problem (which we report
1828 * to the guest via the CTRUNC bit), unlike truncation
1829 * in target_to_host_cmsg, which is a QEMU bug.
1831 if (msg_controllen < sizeof(struct target_cmsghdr)) {
1832 target_msgh->msg_flags |= tswap32(MSG_CTRUNC);
1833 break;
1836 if (cmsg->cmsg_level == SOL_SOCKET) {
1837 target_cmsg->cmsg_level = tswap32(TARGET_SOL_SOCKET);
1838 } else {
1839 target_cmsg->cmsg_level = tswap32(cmsg->cmsg_level);
1841 target_cmsg->cmsg_type = tswap32(cmsg->cmsg_type);
1843 /* Payload types which need a different size of payload on
1844 * the target must adjust tgt_len here.
1846 tgt_len = len;
1847 switch (cmsg->cmsg_level) {
1848 case SOL_SOCKET:
1849 switch (cmsg->cmsg_type) {
1850 case SO_TIMESTAMP:
1851 tgt_len = sizeof(struct target_timeval);
1852 break;
1853 default:
1854 break;
1856 break;
1857 default:
1858 break;
1861 if (msg_controllen < TARGET_CMSG_LEN(tgt_len)) {
1862 target_msgh->msg_flags |= tswap32(MSG_CTRUNC);
1863 tgt_len = msg_controllen - sizeof(struct target_cmsghdr);
1866 /* We must now copy-and-convert len bytes of payload
1867 * into tgt_len bytes of destination space. Bear in mind
1868 * that in both source and destination we may be dealing
1869 * with a truncated value!
1871 switch (cmsg->cmsg_level) {
1872 case SOL_SOCKET:
1873 switch (cmsg->cmsg_type) {
1874 case SCM_RIGHTS:
1876 int *fd = (int *)data;
1877 int *target_fd = (int *)target_data;
1878 int i, numfds = tgt_len / sizeof(int);
1880 for (i = 0; i < numfds; i++) {
1881 __put_user(fd[i], target_fd + i);
1883 break;
1885 case SO_TIMESTAMP:
1887 struct timeval *tv = (struct timeval *)data;
1888 struct target_timeval *target_tv =
1889 (struct target_timeval *)target_data;
1891 if (len != sizeof(struct timeval) ||
1892 tgt_len != sizeof(struct target_timeval)) {
1893 goto unimplemented;
1896 /* copy struct timeval to target */
1897 __put_user(tv->tv_sec, &target_tv->tv_sec);
1898 __put_user(tv->tv_usec, &target_tv->tv_usec);
1899 break;
1901 case SCM_CREDENTIALS:
1903 struct ucred *cred = (struct ucred *)data;
1904 struct target_ucred *target_cred =
1905 (struct target_ucred *)target_data;
1907 __put_user(cred->pid, &target_cred->pid);
1908 __put_user(cred->uid, &target_cred->uid);
1909 __put_user(cred->gid, &target_cred->gid);
1910 break;
1912 default:
1913 goto unimplemented;
1915 break;
1917 case SOL_IP:
1918 switch (cmsg->cmsg_type) {
1919 case IP_TTL:
1921 uint32_t *v = (uint32_t *)data;
1922 uint32_t *t_int = (uint32_t *)target_data;
1924 if (len != sizeof(uint32_t) ||
1925 tgt_len != sizeof(uint32_t)) {
1926 goto unimplemented;
1928 __put_user(*v, t_int);
1929 break;
1931 case IP_RECVERR:
1933 struct errhdr_t {
1934 struct sock_extended_err ee;
1935 struct sockaddr_in offender;
1937 struct errhdr_t *errh = (struct errhdr_t *)data;
1938 struct errhdr_t *target_errh =
1939 (struct errhdr_t *)target_data;
1941 if (len != sizeof(struct errhdr_t) ||
1942 tgt_len != sizeof(struct errhdr_t)) {
1943 goto unimplemented;
1945 __put_user(errh->ee.ee_errno, &target_errh->ee.ee_errno);
1946 __put_user(errh->ee.ee_origin, &target_errh->ee.ee_origin);
1947 __put_user(errh->ee.ee_type, &target_errh->ee.ee_type);
1948 __put_user(errh->ee.ee_code, &target_errh->ee.ee_code);
1949 __put_user(errh->ee.ee_pad, &target_errh->ee.ee_pad);
1950 __put_user(errh->ee.ee_info, &target_errh->ee.ee_info);
1951 __put_user(errh->ee.ee_data, &target_errh->ee.ee_data);
1952 host_to_target_sockaddr((unsigned long) &target_errh->offender,
1953 (void *) &errh->offender, sizeof(errh->offender));
1954 break;
1956 default:
1957 goto unimplemented;
1959 break;
1961 case SOL_IPV6:
1962 switch (cmsg->cmsg_type) {
1963 case IPV6_HOPLIMIT:
1965 uint32_t *v = (uint32_t *)data;
1966 uint32_t *t_int = (uint32_t *)target_data;
1968 if (len != sizeof(uint32_t) ||
1969 tgt_len != sizeof(uint32_t)) {
1970 goto unimplemented;
1972 __put_user(*v, t_int);
1973 break;
1975 case IPV6_RECVERR:
1977 struct errhdr6_t {
1978 struct sock_extended_err ee;
1979 struct sockaddr_in6 offender;
1981 struct errhdr6_t *errh = (struct errhdr6_t *)data;
1982 struct errhdr6_t *target_errh =
1983 (struct errhdr6_t *)target_data;
1985 if (len != sizeof(struct errhdr6_t) ||
1986 tgt_len != sizeof(struct errhdr6_t)) {
1987 goto unimplemented;
1989 __put_user(errh->ee.ee_errno, &target_errh->ee.ee_errno);
1990 __put_user(errh->ee.ee_origin, &target_errh->ee.ee_origin);
1991 __put_user(errh->ee.ee_type, &target_errh->ee.ee_type);
1992 __put_user(errh->ee.ee_code, &target_errh->ee.ee_code);
1993 __put_user(errh->ee.ee_pad, &target_errh->ee.ee_pad);
1994 __put_user(errh->ee.ee_info, &target_errh->ee.ee_info);
1995 __put_user(errh->ee.ee_data, &target_errh->ee.ee_data);
1996 host_to_target_sockaddr((unsigned long) &target_errh->offender,
1997 (void *) &errh->offender, sizeof(errh->offender));
1998 break;
2000 default:
2001 goto unimplemented;
2003 break;
2005 default:
2006 unimplemented:
2007 gemu_log("Unsupported ancillary data: %d/%d\n",
2008 cmsg->cmsg_level, cmsg->cmsg_type);
2009 memcpy(target_data, data, MIN(len, tgt_len));
2010 if (tgt_len > len) {
2011 memset(target_data + len, 0, tgt_len - len);
2015 target_cmsg->cmsg_len = tswapal(TARGET_CMSG_LEN(tgt_len));
2016 tgt_space = TARGET_CMSG_SPACE(tgt_len);
2017 if (msg_controllen < tgt_space) {
2018 tgt_space = msg_controllen;
2020 msg_controllen -= tgt_space;
2021 space += tgt_space;
2022 cmsg = CMSG_NXTHDR(msgh, cmsg);
2023 target_cmsg = TARGET_CMSG_NXTHDR(target_msgh, target_cmsg,
2024 target_cmsg_start);
2026 unlock_user(target_cmsg, target_cmsg_addr, space);
2027 the_end:
2028 target_msgh->msg_controllen = tswapal(space);
2029 return 0;
2032 static void tswap_nlmsghdr(struct nlmsghdr *nlh)
2034 nlh->nlmsg_len = tswap32(nlh->nlmsg_len);
2035 nlh->nlmsg_type = tswap16(nlh->nlmsg_type);
2036 nlh->nlmsg_flags = tswap16(nlh->nlmsg_flags);
2037 nlh->nlmsg_seq = tswap32(nlh->nlmsg_seq);
2038 nlh->nlmsg_pid = tswap32(nlh->nlmsg_pid);
2041 static abi_long host_to_target_for_each_nlmsg(struct nlmsghdr *nlh,
2042 size_t len,
2043 abi_long (*host_to_target_nlmsg)
2044 (struct nlmsghdr *))
2046 uint32_t nlmsg_len;
2047 abi_long ret;
2049 while (len > sizeof(struct nlmsghdr)) {
2051 nlmsg_len = nlh->nlmsg_len;
2052 if (nlmsg_len < sizeof(struct nlmsghdr) ||
2053 nlmsg_len > len) {
2054 break;
2057 switch (nlh->nlmsg_type) {
2058 case NLMSG_DONE:
2059 tswap_nlmsghdr(nlh);
2060 return 0;
2061 case NLMSG_NOOP:
2062 break;
2063 case NLMSG_ERROR:
2065 struct nlmsgerr *e = NLMSG_DATA(nlh);
2066 e->error = tswap32(e->error);
2067 tswap_nlmsghdr(&e->msg);
2068 tswap_nlmsghdr(nlh);
2069 return 0;
2071 default:
2072 ret = host_to_target_nlmsg(nlh);
2073 if (ret < 0) {
2074 tswap_nlmsghdr(nlh);
2075 return ret;
2077 break;
2079 tswap_nlmsghdr(nlh);
2080 len -= NLMSG_ALIGN(nlmsg_len);
2081 nlh = (struct nlmsghdr *)(((char*)nlh) + NLMSG_ALIGN(nlmsg_len));
2083 return 0;
2086 static abi_long target_to_host_for_each_nlmsg(struct nlmsghdr *nlh,
2087 size_t len,
2088 abi_long (*target_to_host_nlmsg)
2089 (struct nlmsghdr *))
2091 int ret;
2093 while (len > sizeof(struct nlmsghdr)) {
2094 if (tswap32(nlh->nlmsg_len) < sizeof(struct nlmsghdr) ||
2095 tswap32(nlh->nlmsg_len) > len) {
2096 break;
2098 tswap_nlmsghdr(nlh);
2099 switch (nlh->nlmsg_type) {
2100 case NLMSG_DONE:
2101 return 0;
2102 case NLMSG_NOOP:
2103 break;
2104 case NLMSG_ERROR:
2106 struct nlmsgerr *e = NLMSG_DATA(nlh);
2107 e->error = tswap32(e->error);
2108 tswap_nlmsghdr(&e->msg);
2109 return 0;
2111 default:
2112 ret = target_to_host_nlmsg(nlh);
2113 if (ret < 0) {
2114 return ret;
2117 len -= NLMSG_ALIGN(nlh->nlmsg_len);
2118 nlh = (struct nlmsghdr *)(((char *)nlh) + NLMSG_ALIGN(nlh->nlmsg_len));
2120 return 0;
2123 #ifdef CONFIG_RTNETLINK
2124 static abi_long host_to_target_for_each_nlattr(struct nlattr *nlattr,
2125 size_t len, void *context,
2126 abi_long (*host_to_target_nlattr)
2127 (struct nlattr *,
2128 void *context))
2130 unsigned short nla_len;
2131 abi_long ret;
2133 while (len > sizeof(struct nlattr)) {
2134 nla_len = nlattr->nla_len;
2135 if (nla_len < sizeof(struct nlattr) ||
2136 nla_len > len) {
2137 break;
2139 ret = host_to_target_nlattr(nlattr, context);
2140 nlattr->nla_len = tswap16(nlattr->nla_len);
2141 nlattr->nla_type = tswap16(nlattr->nla_type);
2142 if (ret < 0) {
2143 return ret;
2145 len -= NLA_ALIGN(nla_len);
2146 nlattr = (struct nlattr *)(((char *)nlattr) + NLA_ALIGN(nla_len));
2148 return 0;
2151 static abi_long host_to_target_for_each_rtattr(struct rtattr *rtattr,
2152 size_t len,
2153 abi_long (*host_to_target_rtattr)
2154 (struct rtattr *))
2156 unsigned short rta_len;
2157 abi_long ret;
2159 while (len > sizeof(struct rtattr)) {
2160 rta_len = rtattr->rta_len;
2161 if (rta_len < sizeof(struct rtattr) ||
2162 rta_len > len) {
2163 break;
2165 ret = host_to_target_rtattr(rtattr);
2166 rtattr->rta_len = tswap16(rtattr->rta_len);
2167 rtattr->rta_type = tswap16(rtattr->rta_type);
2168 if (ret < 0) {
2169 return ret;
2171 len -= RTA_ALIGN(rta_len);
2172 rtattr = (struct rtattr *)(((char *)rtattr) + RTA_ALIGN(rta_len));
2174 return 0;
2177 #define NLA_DATA(nla) ((void *)((char *)(nla)) + NLA_HDRLEN)
2179 static abi_long host_to_target_data_bridge_nlattr(struct nlattr *nlattr,
2180 void *context)
2182 uint16_t *u16;
2183 uint32_t *u32;
2184 uint64_t *u64;
2186 switch (nlattr->nla_type) {
2187 /* no data */
2188 case QEMU_IFLA_BR_FDB_FLUSH:
2189 break;
2190 /* binary */
2191 case QEMU_IFLA_BR_GROUP_ADDR:
2192 break;
2193 /* uint8_t */
2194 case QEMU_IFLA_BR_VLAN_FILTERING:
2195 case QEMU_IFLA_BR_TOPOLOGY_CHANGE:
2196 case QEMU_IFLA_BR_TOPOLOGY_CHANGE_DETECTED:
2197 case QEMU_IFLA_BR_MCAST_ROUTER:
2198 case QEMU_IFLA_BR_MCAST_SNOOPING:
2199 case QEMU_IFLA_BR_MCAST_QUERY_USE_IFADDR:
2200 case QEMU_IFLA_BR_MCAST_QUERIER:
2201 case QEMU_IFLA_BR_NF_CALL_IPTABLES:
2202 case QEMU_IFLA_BR_NF_CALL_IP6TABLES:
2203 case QEMU_IFLA_BR_NF_CALL_ARPTABLES:
2204 case QEMU_IFLA_BR_VLAN_STATS_ENABLED:
2205 case QEMU_IFLA_BR_MCAST_STATS_ENABLED:
2206 case QEMU_IFLA_BR_MCAST_IGMP_VERSION:
2207 case QEMU_IFLA_BR_MCAST_MLD_VERSION:
2208 break;
2209 /* uint16_t */
2210 case QEMU_IFLA_BR_PRIORITY:
2211 case QEMU_IFLA_BR_VLAN_PROTOCOL:
2212 case QEMU_IFLA_BR_GROUP_FWD_MASK:
2213 case QEMU_IFLA_BR_ROOT_PORT:
2214 case QEMU_IFLA_BR_VLAN_DEFAULT_PVID:
2215 u16 = NLA_DATA(nlattr);
2216 *u16 = tswap16(*u16);
2217 break;
2218 /* uint32_t */
2219 case QEMU_IFLA_BR_FORWARD_DELAY:
2220 case QEMU_IFLA_BR_HELLO_TIME:
2221 case QEMU_IFLA_BR_MAX_AGE:
2222 case QEMU_IFLA_BR_AGEING_TIME:
2223 case QEMU_IFLA_BR_STP_STATE:
2224 case QEMU_IFLA_BR_ROOT_PATH_COST:
2225 case QEMU_IFLA_BR_MCAST_HASH_ELASTICITY:
2226 case QEMU_IFLA_BR_MCAST_HASH_MAX:
2227 case QEMU_IFLA_BR_MCAST_LAST_MEMBER_CNT:
2228 case QEMU_IFLA_BR_MCAST_STARTUP_QUERY_CNT:
2229 u32 = NLA_DATA(nlattr);
2230 *u32 = tswap32(*u32);
2231 break;
2232 /* uint64_t */
2233 case QEMU_IFLA_BR_HELLO_TIMER:
2234 case QEMU_IFLA_BR_TCN_TIMER:
2235 case QEMU_IFLA_BR_GC_TIMER:
2236 case QEMU_IFLA_BR_TOPOLOGY_CHANGE_TIMER:
2237 case QEMU_IFLA_BR_MCAST_LAST_MEMBER_INTVL:
2238 case QEMU_IFLA_BR_MCAST_MEMBERSHIP_INTVL:
2239 case QEMU_IFLA_BR_MCAST_QUERIER_INTVL:
2240 case QEMU_IFLA_BR_MCAST_QUERY_INTVL:
2241 case QEMU_IFLA_BR_MCAST_QUERY_RESPONSE_INTVL:
2242 case QEMU_IFLA_BR_MCAST_STARTUP_QUERY_INTVL:
2243 u64 = NLA_DATA(nlattr);
2244 *u64 = tswap64(*u64);
2245 break;
2246 /* ifla_bridge_id: uin8_t[] */
2247 case QEMU_IFLA_BR_ROOT_ID:
2248 case QEMU_IFLA_BR_BRIDGE_ID:
2249 break;
2250 default:
2251 gemu_log("Unknown QEMU_IFLA_BR type %d\n", nlattr->nla_type);
2252 break;
2254 return 0;
2257 static abi_long host_to_target_slave_data_bridge_nlattr(struct nlattr *nlattr,
2258 void *context)
2260 uint16_t *u16;
2261 uint32_t *u32;
2262 uint64_t *u64;
2264 switch (nlattr->nla_type) {
2265 /* uint8_t */
2266 case QEMU_IFLA_BRPORT_STATE:
2267 case QEMU_IFLA_BRPORT_MODE:
2268 case QEMU_IFLA_BRPORT_GUARD:
2269 case QEMU_IFLA_BRPORT_PROTECT:
2270 case QEMU_IFLA_BRPORT_FAST_LEAVE:
2271 case QEMU_IFLA_BRPORT_LEARNING:
2272 case QEMU_IFLA_BRPORT_UNICAST_FLOOD:
2273 case QEMU_IFLA_BRPORT_PROXYARP:
2274 case QEMU_IFLA_BRPORT_LEARNING_SYNC:
2275 case QEMU_IFLA_BRPORT_PROXYARP_WIFI:
2276 case QEMU_IFLA_BRPORT_TOPOLOGY_CHANGE_ACK:
2277 case QEMU_IFLA_BRPORT_CONFIG_PENDING:
2278 case QEMU_IFLA_BRPORT_MULTICAST_ROUTER:
2279 case QEMU_IFLA_BRPORT_MCAST_FLOOD:
2280 case QEMU_IFLA_BRPORT_MCAST_TO_UCAST:
2281 case QEMU_IFLA_BRPORT_VLAN_TUNNEL:
2282 case QEMU_IFLA_BRPORT_BCAST_FLOOD:
2283 case QEMU_IFLA_BRPORT_NEIGH_SUPPRESS:
2284 break;
2285 /* uint16_t */
2286 case QEMU_IFLA_BRPORT_PRIORITY:
2287 case QEMU_IFLA_BRPORT_DESIGNATED_PORT:
2288 case QEMU_IFLA_BRPORT_DESIGNATED_COST:
2289 case QEMU_IFLA_BRPORT_ID:
2290 case QEMU_IFLA_BRPORT_NO:
2291 case QEMU_IFLA_BRPORT_GROUP_FWD_MASK:
2292 u16 = NLA_DATA(nlattr);
2293 *u16 = tswap16(*u16);
2294 break;
2295 /* uin32_t */
2296 case QEMU_IFLA_BRPORT_COST:
2297 u32 = NLA_DATA(nlattr);
2298 *u32 = tswap32(*u32);
2299 break;
2300 /* uint64_t */
2301 case QEMU_IFLA_BRPORT_MESSAGE_AGE_TIMER:
2302 case QEMU_IFLA_BRPORT_FORWARD_DELAY_TIMER:
2303 case QEMU_IFLA_BRPORT_HOLD_TIMER:
2304 u64 = NLA_DATA(nlattr);
2305 *u64 = tswap64(*u64);
2306 break;
2307 /* ifla_bridge_id: uint8_t[] */
2308 case QEMU_IFLA_BRPORT_ROOT_ID:
2309 case QEMU_IFLA_BRPORT_BRIDGE_ID:
2310 break;
2311 default:
2312 gemu_log("Unknown QEMU_IFLA_BRPORT type %d\n", nlattr->nla_type);
2313 break;
2315 return 0;
2318 struct linkinfo_context {
2319 int len;
2320 char *name;
2321 int slave_len;
2322 char *slave_name;
2325 static abi_long host_to_target_data_linkinfo_nlattr(struct nlattr *nlattr,
2326 void *context)
2328 struct linkinfo_context *li_context = context;
2330 switch (nlattr->nla_type) {
2331 /* string */
2332 case QEMU_IFLA_INFO_KIND:
2333 li_context->name = NLA_DATA(nlattr);
2334 li_context->len = nlattr->nla_len - NLA_HDRLEN;
2335 break;
2336 case QEMU_IFLA_INFO_SLAVE_KIND:
2337 li_context->slave_name = NLA_DATA(nlattr);
2338 li_context->slave_len = nlattr->nla_len - NLA_HDRLEN;
2339 break;
2340 /* stats */
2341 case QEMU_IFLA_INFO_XSTATS:
2342 /* FIXME: only used by CAN */
2343 break;
2344 /* nested */
2345 case QEMU_IFLA_INFO_DATA:
2346 if (strncmp(li_context->name, "bridge",
2347 li_context->len) == 0) {
2348 return host_to_target_for_each_nlattr(NLA_DATA(nlattr),
2349 nlattr->nla_len,
2350 NULL,
2351 host_to_target_data_bridge_nlattr);
2352 } else {
2353 gemu_log("Unknown QEMU_IFLA_INFO_KIND %s\n", li_context->name);
2355 break;
2356 case QEMU_IFLA_INFO_SLAVE_DATA:
2357 if (strncmp(li_context->slave_name, "bridge",
2358 li_context->slave_len) == 0) {
2359 return host_to_target_for_each_nlattr(NLA_DATA(nlattr),
2360 nlattr->nla_len,
2361 NULL,
2362 host_to_target_slave_data_bridge_nlattr);
2363 } else {
2364 gemu_log("Unknown QEMU_IFLA_INFO_SLAVE_KIND %s\n",
2365 li_context->slave_name);
2367 break;
2368 default:
2369 gemu_log("Unknown host QEMU_IFLA_INFO type: %d\n", nlattr->nla_type);
2370 break;
2373 return 0;
2376 static abi_long host_to_target_data_inet_nlattr(struct nlattr *nlattr,
2377 void *context)
2379 uint32_t *u32;
2380 int i;
2382 switch (nlattr->nla_type) {
2383 case QEMU_IFLA_INET_CONF:
2384 u32 = NLA_DATA(nlattr);
2385 for (i = 0; i < (nlattr->nla_len - NLA_HDRLEN) / sizeof(*u32);
2386 i++) {
2387 u32[i] = tswap32(u32[i]);
2389 break;
2390 default:
2391 gemu_log("Unknown host AF_INET type: %d\n", nlattr->nla_type);
2393 return 0;
2396 static abi_long host_to_target_data_inet6_nlattr(struct nlattr *nlattr,
2397 void *context)
2399 uint32_t *u32;
2400 uint64_t *u64;
2401 struct ifla_cacheinfo *ci;
2402 int i;
2404 switch (nlattr->nla_type) {
2405 /* binaries */
2406 case QEMU_IFLA_INET6_TOKEN:
2407 break;
2408 /* uint8_t */
2409 case QEMU_IFLA_INET6_ADDR_GEN_MODE:
2410 break;
2411 /* uint32_t */
2412 case QEMU_IFLA_INET6_FLAGS:
2413 u32 = NLA_DATA(nlattr);
2414 *u32 = tswap32(*u32);
2415 break;
2416 /* uint32_t[] */
2417 case QEMU_IFLA_INET6_CONF:
2418 u32 = NLA_DATA(nlattr);
2419 for (i = 0; i < (nlattr->nla_len - NLA_HDRLEN) / sizeof(*u32);
2420 i++) {
2421 u32[i] = tswap32(u32[i]);
2423 break;
2424 /* ifla_cacheinfo */
2425 case QEMU_IFLA_INET6_CACHEINFO:
2426 ci = NLA_DATA(nlattr);
2427 ci->max_reasm_len = tswap32(ci->max_reasm_len);
2428 ci->tstamp = tswap32(ci->tstamp);
2429 ci->reachable_time = tswap32(ci->reachable_time);
2430 ci->retrans_time = tswap32(ci->retrans_time);
2431 break;
2432 /* uint64_t[] */
2433 case QEMU_IFLA_INET6_STATS:
2434 case QEMU_IFLA_INET6_ICMP6STATS:
2435 u64 = NLA_DATA(nlattr);
2436 for (i = 0; i < (nlattr->nla_len - NLA_HDRLEN) / sizeof(*u64);
2437 i++) {
2438 u64[i] = tswap64(u64[i]);
2440 break;
2441 default:
2442 gemu_log("Unknown host AF_INET6 type: %d\n", nlattr->nla_type);
2444 return 0;
2447 static abi_long host_to_target_data_spec_nlattr(struct nlattr *nlattr,
2448 void *context)
2450 switch (nlattr->nla_type) {
2451 case AF_INET:
2452 return host_to_target_for_each_nlattr(NLA_DATA(nlattr), nlattr->nla_len,
2453 NULL,
2454 host_to_target_data_inet_nlattr);
2455 case AF_INET6:
2456 return host_to_target_for_each_nlattr(NLA_DATA(nlattr), nlattr->nla_len,
2457 NULL,
2458 host_to_target_data_inet6_nlattr);
2459 default:
2460 gemu_log("Unknown host AF_SPEC type: %d\n", nlattr->nla_type);
2461 break;
2463 return 0;
2466 static abi_long host_to_target_data_xdp_nlattr(struct nlattr *nlattr,
2467 void *context)
2469 uint32_t *u32;
2471 switch (nlattr->nla_type) {
2472 /* uint8_t */
2473 case QEMU_IFLA_XDP_ATTACHED:
2474 break;
2475 /* uint32_t */
2476 case QEMU_IFLA_XDP_PROG_ID:
2477 u32 = NLA_DATA(nlattr);
2478 *u32 = tswap32(*u32);
2479 break;
2480 default:
2481 gemu_log("Unknown host XDP type: %d\n", nlattr->nla_type);
2482 break;
2484 return 0;
2487 static abi_long host_to_target_data_link_rtattr(struct rtattr *rtattr)
2489 uint32_t *u32;
2490 struct rtnl_link_stats *st;
2491 struct rtnl_link_stats64 *st64;
2492 struct rtnl_link_ifmap *map;
2493 struct linkinfo_context li_context;
2495 switch (rtattr->rta_type) {
2496 /* binary stream */
2497 case QEMU_IFLA_ADDRESS:
2498 case QEMU_IFLA_BROADCAST:
2499 /* string */
2500 case QEMU_IFLA_IFNAME:
2501 case QEMU_IFLA_QDISC:
2502 break;
2503 /* uin8_t */
2504 case QEMU_IFLA_OPERSTATE:
2505 case QEMU_IFLA_LINKMODE:
2506 case QEMU_IFLA_CARRIER:
2507 case QEMU_IFLA_PROTO_DOWN:
2508 break;
2509 /* uint32_t */
2510 case QEMU_IFLA_MTU:
2511 case QEMU_IFLA_LINK:
2512 case QEMU_IFLA_WEIGHT:
2513 case QEMU_IFLA_TXQLEN:
2514 case QEMU_IFLA_CARRIER_CHANGES:
2515 case QEMU_IFLA_NUM_RX_QUEUES:
2516 case QEMU_IFLA_NUM_TX_QUEUES:
2517 case QEMU_IFLA_PROMISCUITY:
2518 case QEMU_IFLA_EXT_MASK:
2519 case QEMU_IFLA_LINK_NETNSID:
2520 case QEMU_IFLA_GROUP:
2521 case QEMU_IFLA_MASTER:
2522 case QEMU_IFLA_NUM_VF:
2523 case QEMU_IFLA_GSO_MAX_SEGS:
2524 case QEMU_IFLA_GSO_MAX_SIZE:
2525 case QEMU_IFLA_CARRIER_UP_COUNT:
2526 case QEMU_IFLA_CARRIER_DOWN_COUNT:
2527 u32 = RTA_DATA(rtattr);
2528 *u32 = tswap32(*u32);
2529 break;
2530 /* struct rtnl_link_stats */
2531 case QEMU_IFLA_STATS:
2532 st = RTA_DATA(rtattr);
2533 st->rx_packets = tswap32(st->rx_packets);
2534 st->tx_packets = tswap32(st->tx_packets);
2535 st->rx_bytes = tswap32(st->rx_bytes);
2536 st->tx_bytes = tswap32(st->tx_bytes);
2537 st->rx_errors = tswap32(st->rx_errors);
2538 st->tx_errors = tswap32(st->tx_errors);
2539 st->rx_dropped = tswap32(st->rx_dropped);
2540 st->tx_dropped = tswap32(st->tx_dropped);
2541 st->multicast = tswap32(st->multicast);
2542 st->collisions = tswap32(st->collisions);
2544 /* detailed rx_errors: */
2545 st->rx_length_errors = tswap32(st->rx_length_errors);
2546 st->rx_over_errors = tswap32(st->rx_over_errors);
2547 st->rx_crc_errors = tswap32(st->rx_crc_errors);
2548 st->rx_frame_errors = tswap32(st->rx_frame_errors);
2549 st->rx_fifo_errors = tswap32(st->rx_fifo_errors);
2550 st->rx_missed_errors = tswap32(st->rx_missed_errors);
2552 /* detailed tx_errors */
2553 st->tx_aborted_errors = tswap32(st->tx_aborted_errors);
2554 st->tx_carrier_errors = tswap32(st->tx_carrier_errors);
2555 st->tx_fifo_errors = tswap32(st->tx_fifo_errors);
2556 st->tx_heartbeat_errors = tswap32(st->tx_heartbeat_errors);
2557 st->tx_window_errors = tswap32(st->tx_window_errors);
2559 /* for cslip etc */
2560 st->rx_compressed = tswap32(st->rx_compressed);
2561 st->tx_compressed = tswap32(st->tx_compressed);
2562 break;
2563 /* struct rtnl_link_stats64 */
2564 case QEMU_IFLA_STATS64:
2565 st64 = RTA_DATA(rtattr);
2566 st64->rx_packets = tswap64(st64->rx_packets);
2567 st64->tx_packets = tswap64(st64->tx_packets);
2568 st64->rx_bytes = tswap64(st64->rx_bytes);
2569 st64->tx_bytes = tswap64(st64->tx_bytes);
2570 st64->rx_errors = tswap64(st64->rx_errors);
2571 st64->tx_errors = tswap64(st64->tx_errors);
2572 st64->rx_dropped = tswap64(st64->rx_dropped);
2573 st64->tx_dropped = tswap64(st64->tx_dropped);
2574 st64->multicast = tswap64(st64->multicast);
2575 st64->collisions = tswap64(st64->collisions);
2577 /* detailed rx_errors: */
2578 st64->rx_length_errors = tswap64(st64->rx_length_errors);
2579 st64->rx_over_errors = tswap64(st64->rx_over_errors);
2580 st64->rx_crc_errors = tswap64(st64->rx_crc_errors);
2581 st64->rx_frame_errors = tswap64(st64->rx_frame_errors);
2582 st64->rx_fifo_errors = tswap64(st64->rx_fifo_errors);
2583 st64->rx_missed_errors = tswap64(st64->rx_missed_errors);
2585 /* detailed tx_errors */
2586 st64->tx_aborted_errors = tswap64(st64->tx_aborted_errors);
2587 st64->tx_carrier_errors = tswap64(st64->tx_carrier_errors);
2588 st64->tx_fifo_errors = tswap64(st64->tx_fifo_errors);
2589 st64->tx_heartbeat_errors = tswap64(st64->tx_heartbeat_errors);
2590 st64->tx_window_errors = tswap64(st64->tx_window_errors);
2592 /* for cslip etc */
2593 st64->rx_compressed = tswap64(st64->rx_compressed);
2594 st64->tx_compressed = tswap64(st64->tx_compressed);
2595 break;
2596 /* struct rtnl_link_ifmap */
2597 case QEMU_IFLA_MAP:
2598 map = RTA_DATA(rtattr);
2599 map->mem_start = tswap64(map->mem_start);
2600 map->mem_end = tswap64(map->mem_end);
2601 map->base_addr = tswap64(map->base_addr);
2602 map->irq = tswap16(map->irq);
2603 break;
2604 /* nested */
2605 case QEMU_IFLA_LINKINFO:
2606 memset(&li_context, 0, sizeof(li_context));
2607 return host_to_target_for_each_nlattr(RTA_DATA(rtattr), rtattr->rta_len,
2608 &li_context,
2609 host_to_target_data_linkinfo_nlattr);
2610 case QEMU_IFLA_AF_SPEC:
2611 return host_to_target_for_each_nlattr(RTA_DATA(rtattr), rtattr->rta_len,
2612 NULL,
2613 host_to_target_data_spec_nlattr);
2614 case QEMU_IFLA_XDP:
2615 return host_to_target_for_each_nlattr(RTA_DATA(rtattr), rtattr->rta_len,
2616 NULL,
2617 host_to_target_data_xdp_nlattr);
2618 default:
2619 gemu_log("Unknown host QEMU_IFLA type: %d\n", rtattr->rta_type);
2620 break;
2622 return 0;
2625 static abi_long host_to_target_data_addr_rtattr(struct rtattr *rtattr)
2627 uint32_t *u32;
2628 struct ifa_cacheinfo *ci;
2630 switch (rtattr->rta_type) {
2631 /* binary: depends on family type */
2632 case IFA_ADDRESS:
2633 case IFA_LOCAL:
2634 break;
2635 /* string */
2636 case IFA_LABEL:
2637 break;
2638 /* u32 */
2639 case IFA_FLAGS:
2640 case IFA_BROADCAST:
2641 u32 = RTA_DATA(rtattr);
2642 *u32 = tswap32(*u32);
2643 break;
2644 /* struct ifa_cacheinfo */
2645 case IFA_CACHEINFO:
2646 ci = RTA_DATA(rtattr);
2647 ci->ifa_prefered = tswap32(ci->ifa_prefered);
2648 ci->ifa_valid = tswap32(ci->ifa_valid);
2649 ci->cstamp = tswap32(ci->cstamp);
2650 ci->tstamp = tswap32(ci->tstamp);
2651 break;
2652 default:
2653 gemu_log("Unknown host IFA type: %d\n", rtattr->rta_type);
2654 break;
2656 return 0;
2659 static abi_long host_to_target_data_route_rtattr(struct rtattr *rtattr)
2661 uint32_t *u32;
2662 switch (rtattr->rta_type) {
2663 /* binary: depends on family type */
2664 case RTA_GATEWAY:
2665 case RTA_DST:
2666 case RTA_PREFSRC:
2667 break;
2668 /* u32 */
2669 case RTA_PRIORITY:
2670 case RTA_TABLE:
2671 case RTA_OIF:
2672 u32 = RTA_DATA(rtattr);
2673 *u32 = tswap32(*u32);
2674 break;
2675 default:
2676 gemu_log("Unknown host RTA type: %d\n", rtattr->rta_type);
2677 break;
2679 return 0;
2682 static abi_long host_to_target_link_rtattr(struct rtattr *rtattr,
2683 uint32_t rtattr_len)
2685 return host_to_target_for_each_rtattr(rtattr, rtattr_len,
2686 host_to_target_data_link_rtattr);
2689 static abi_long host_to_target_addr_rtattr(struct rtattr *rtattr,
2690 uint32_t rtattr_len)
2692 return host_to_target_for_each_rtattr(rtattr, rtattr_len,
2693 host_to_target_data_addr_rtattr);
2696 static abi_long host_to_target_route_rtattr(struct rtattr *rtattr,
2697 uint32_t rtattr_len)
2699 return host_to_target_for_each_rtattr(rtattr, rtattr_len,
2700 host_to_target_data_route_rtattr);
2703 static abi_long host_to_target_data_route(struct nlmsghdr *nlh)
2705 uint32_t nlmsg_len;
2706 struct ifinfomsg *ifi;
2707 struct ifaddrmsg *ifa;
2708 struct rtmsg *rtm;
2710 nlmsg_len = nlh->nlmsg_len;
2711 switch (nlh->nlmsg_type) {
2712 case RTM_NEWLINK:
2713 case RTM_DELLINK:
2714 case RTM_GETLINK:
2715 if (nlh->nlmsg_len >= NLMSG_LENGTH(sizeof(*ifi))) {
2716 ifi = NLMSG_DATA(nlh);
2717 ifi->ifi_type = tswap16(ifi->ifi_type);
2718 ifi->ifi_index = tswap32(ifi->ifi_index);
2719 ifi->ifi_flags = tswap32(ifi->ifi_flags);
2720 ifi->ifi_change = tswap32(ifi->ifi_change);
2721 host_to_target_link_rtattr(IFLA_RTA(ifi),
2722 nlmsg_len - NLMSG_LENGTH(sizeof(*ifi)));
2724 break;
2725 case RTM_NEWADDR:
2726 case RTM_DELADDR:
2727 case RTM_GETADDR:
2728 if (nlh->nlmsg_len >= NLMSG_LENGTH(sizeof(*ifa))) {
2729 ifa = NLMSG_DATA(nlh);
2730 ifa->ifa_index = tswap32(ifa->ifa_index);
2731 host_to_target_addr_rtattr(IFA_RTA(ifa),
2732 nlmsg_len - NLMSG_LENGTH(sizeof(*ifa)));
2734 break;
2735 case RTM_NEWROUTE:
2736 case RTM_DELROUTE:
2737 case RTM_GETROUTE:
2738 if (nlh->nlmsg_len >= NLMSG_LENGTH(sizeof(*rtm))) {
2739 rtm = NLMSG_DATA(nlh);
2740 rtm->rtm_flags = tswap32(rtm->rtm_flags);
2741 host_to_target_route_rtattr(RTM_RTA(rtm),
2742 nlmsg_len - NLMSG_LENGTH(sizeof(*rtm)));
2744 break;
2745 default:
2746 return -TARGET_EINVAL;
2748 return 0;
2751 static inline abi_long host_to_target_nlmsg_route(struct nlmsghdr *nlh,
2752 size_t len)
2754 return host_to_target_for_each_nlmsg(nlh, len, host_to_target_data_route);
2757 static abi_long target_to_host_for_each_rtattr(struct rtattr *rtattr,
2758 size_t len,
2759 abi_long (*target_to_host_rtattr)
2760 (struct rtattr *))
2762 abi_long ret;
2764 while (len >= sizeof(struct rtattr)) {
2765 if (tswap16(rtattr->rta_len) < sizeof(struct rtattr) ||
2766 tswap16(rtattr->rta_len) > len) {
2767 break;
2769 rtattr->rta_len = tswap16(rtattr->rta_len);
2770 rtattr->rta_type = tswap16(rtattr->rta_type);
2771 ret = target_to_host_rtattr(rtattr);
2772 if (ret < 0) {
2773 return ret;
2775 len -= RTA_ALIGN(rtattr->rta_len);
2776 rtattr = (struct rtattr *)(((char *)rtattr) +
2777 RTA_ALIGN(rtattr->rta_len));
2779 return 0;
2782 static abi_long target_to_host_data_link_rtattr(struct rtattr *rtattr)
2784 switch (rtattr->rta_type) {
2785 default:
2786 gemu_log("Unknown target QEMU_IFLA type: %d\n", rtattr->rta_type);
2787 break;
2789 return 0;
2792 static abi_long target_to_host_data_addr_rtattr(struct rtattr *rtattr)
2794 switch (rtattr->rta_type) {
2795 /* binary: depends on family type */
2796 case IFA_LOCAL:
2797 case IFA_ADDRESS:
2798 break;
2799 default:
2800 gemu_log("Unknown target IFA type: %d\n", rtattr->rta_type);
2801 break;
2803 return 0;
2806 static abi_long target_to_host_data_route_rtattr(struct rtattr *rtattr)
2808 uint32_t *u32;
2809 switch (rtattr->rta_type) {
2810 /* binary: depends on family type */
2811 case RTA_DST:
2812 case RTA_SRC:
2813 case RTA_GATEWAY:
2814 break;
2815 /* u32 */
2816 case RTA_PRIORITY:
2817 case RTA_OIF:
2818 u32 = RTA_DATA(rtattr);
2819 *u32 = tswap32(*u32);
2820 break;
2821 default:
2822 gemu_log("Unknown target RTA type: %d\n", rtattr->rta_type);
2823 break;
2825 return 0;
2828 static void target_to_host_link_rtattr(struct rtattr *rtattr,
2829 uint32_t rtattr_len)
2831 target_to_host_for_each_rtattr(rtattr, rtattr_len,
2832 target_to_host_data_link_rtattr);
2835 static void target_to_host_addr_rtattr(struct rtattr *rtattr,
2836 uint32_t rtattr_len)
2838 target_to_host_for_each_rtattr(rtattr, rtattr_len,
2839 target_to_host_data_addr_rtattr);
2842 static void target_to_host_route_rtattr(struct rtattr *rtattr,
2843 uint32_t rtattr_len)
2845 target_to_host_for_each_rtattr(rtattr, rtattr_len,
2846 target_to_host_data_route_rtattr);
2849 static abi_long target_to_host_data_route(struct nlmsghdr *nlh)
2851 struct ifinfomsg *ifi;
2852 struct ifaddrmsg *ifa;
2853 struct rtmsg *rtm;
2855 switch (nlh->nlmsg_type) {
2856 case RTM_GETLINK:
2857 break;
2858 case RTM_NEWLINK:
2859 case RTM_DELLINK:
2860 if (nlh->nlmsg_len >= NLMSG_LENGTH(sizeof(*ifi))) {
2861 ifi = NLMSG_DATA(nlh);
2862 ifi->ifi_type = tswap16(ifi->ifi_type);
2863 ifi->ifi_index = tswap32(ifi->ifi_index);
2864 ifi->ifi_flags = tswap32(ifi->ifi_flags);
2865 ifi->ifi_change = tswap32(ifi->ifi_change);
2866 target_to_host_link_rtattr(IFLA_RTA(ifi), nlh->nlmsg_len -
2867 NLMSG_LENGTH(sizeof(*ifi)));
2869 break;
2870 case RTM_GETADDR:
2871 case RTM_NEWADDR:
2872 case RTM_DELADDR:
2873 if (nlh->nlmsg_len >= NLMSG_LENGTH(sizeof(*ifa))) {
2874 ifa = NLMSG_DATA(nlh);
2875 ifa->ifa_index = tswap32(ifa->ifa_index);
2876 target_to_host_addr_rtattr(IFA_RTA(ifa), nlh->nlmsg_len -
2877 NLMSG_LENGTH(sizeof(*ifa)));
2879 break;
2880 case RTM_GETROUTE:
2881 break;
2882 case RTM_NEWROUTE:
2883 case RTM_DELROUTE:
2884 if (nlh->nlmsg_len >= NLMSG_LENGTH(sizeof(*rtm))) {
2885 rtm = NLMSG_DATA(nlh);
2886 rtm->rtm_flags = tswap32(rtm->rtm_flags);
2887 target_to_host_route_rtattr(RTM_RTA(rtm), nlh->nlmsg_len -
2888 NLMSG_LENGTH(sizeof(*rtm)));
2890 break;
2891 default:
2892 return -TARGET_EOPNOTSUPP;
2894 return 0;
2897 static abi_long target_to_host_nlmsg_route(struct nlmsghdr *nlh, size_t len)
2899 return target_to_host_for_each_nlmsg(nlh, len, target_to_host_data_route);
2901 #endif /* CONFIG_RTNETLINK */
2903 static abi_long host_to_target_data_audit(struct nlmsghdr *nlh)
2905 switch (nlh->nlmsg_type) {
2906 default:
2907 gemu_log("Unknown host audit message type %d\n",
2908 nlh->nlmsg_type);
2909 return -TARGET_EINVAL;
2911 return 0;
2914 static inline abi_long host_to_target_nlmsg_audit(struct nlmsghdr *nlh,
2915 size_t len)
2917 return host_to_target_for_each_nlmsg(nlh, len, host_to_target_data_audit);
2920 static abi_long target_to_host_data_audit(struct nlmsghdr *nlh)
2922 switch (nlh->nlmsg_type) {
2923 case AUDIT_USER:
2924 case AUDIT_FIRST_USER_MSG ... AUDIT_LAST_USER_MSG:
2925 case AUDIT_FIRST_USER_MSG2 ... AUDIT_LAST_USER_MSG2:
2926 break;
2927 default:
2928 gemu_log("Unknown target audit message type %d\n",
2929 nlh->nlmsg_type);
2930 return -TARGET_EINVAL;
2933 return 0;
2936 static abi_long target_to_host_nlmsg_audit(struct nlmsghdr *nlh, size_t len)
2938 return target_to_host_for_each_nlmsg(nlh, len, target_to_host_data_audit);
2941 /* do_setsockopt() Must return target values and target errnos. */
2942 static abi_long do_setsockopt(int sockfd, int level, int optname,
2943 abi_ulong optval_addr, socklen_t optlen)
2945 abi_long ret;
2946 int val;
2947 struct ip_mreqn *ip_mreq;
2948 struct ip_mreq_source *ip_mreq_source;
2950 switch(level) {
2951 case SOL_TCP:
2952 /* TCP options all take an 'int' value. */
2953 if (optlen < sizeof(uint32_t))
2954 return -TARGET_EINVAL;
2956 if (get_user_u32(val, optval_addr))
2957 return -TARGET_EFAULT;
2958 ret = get_errno(setsockopt(sockfd, level, optname, &val, sizeof(val)));
2959 break;
2960 case SOL_IP:
2961 switch(optname) {
2962 case IP_TOS:
2963 case IP_TTL:
2964 case IP_HDRINCL:
2965 case IP_ROUTER_ALERT:
2966 case IP_RECVOPTS:
2967 case IP_RETOPTS:
2968 case IP_PKTINFO:
2969 case IP_MTU_DISCOVER:
2970 case IP_RECVERR:
2971 case IP_RECVTTL:
2972 case IP_RECVTOS:
2973 #ifdef IP_FREEBIND
2974 case IP_FREEBIND:
2975 #endif
2976 case IP_MULTICAST_TTL:
2977 case IP_MULTICAST_LOOP:
2978 val = 0;
2979 if (optlen >= sizeof(uint32_t)) {
2980 if (get_user_u32(val, optval_addr))
2981 return -TARGET_EFAULT;
2982 } else if (optlen >= 1) {
2983 if (get_user_u8(val, optval_addr))
2984 return -TARGET_EFAULT;
2986 ret = get_errno(setsockopt(sockfd, level, optname, &val, sizeof(val)));
2987 break;
2988 case IP_ADD_MEMBERSHIP:
2989 case IP_DROP_MEMBERSHIP:
2990 if (optlen < sizeof (struct target_ip_mreq) ||
2991 optlen > sizeof (struct target_ip_mreqn))
2992 return -TARGET_EINVAL;
2994 ip_mreq = (struct ip_mreqn *) alloca(optlen);
2995 target_to_host_ip_mreq(ip_mreq, optval_addr, optlen);
2996 ret = get_errno(setsockopt(sockfd, level, optname, ip_mreq, optlen));
2997 break;
2999 case IP_BLOCK_SOURCE:
3000 case IP_UNBLOCK_SOURCE:
3001 case IP_ADD_SOURCE_MEMBERSHIP:
3002 case IP_DROP_SOURCE_MEMBERSHIP:
3003 if (optlen != sizeof (struct target_ip_mreq_source))
3004 return -TARGET_EINVAL;
3006 ip_mreq_source = lock_user(VERIFY_READ, optval_addr, optlen, 1);
3007 ret = get_errno(setsockopt(sockfd, level, optname, ip_mreq_source, optlen));
3008 unlock_user (ip_mreq_source, optval_addr, 0);
3009 break;
3011 default:
3012 goto unimplemented;
3014 break;
3015 case SOL_IPV6:
3016 switch (optname) {
3017 case IPV6_MTU_DISCOVER:
3018 case IPV6_MTU:
3019 case IPV6_V6ONLY:
3020 case IPV6_RECVPKTINFO:
3021 case IPV6_UNICAST_HOPS:
3022 case IPV6_RECVERR:
3023 case IPV6_RECVHOPLIMIT:
3024 case IPV6_2292HOPLIMIT:
3025 case IPV6_CHECKSUM:
3026 val = 0;
3027 if (optlen < sizeof(uint32_t)) {
3028 return -TARGET_EINVAL;
3030 if (get_user_u32(val, optval_addr)) {
3031 return -TARGET_EFAULT;
3033 ret = get_errno(setsockopt(sockfd, level, optname,
3034 &val, sizeof(val)));
3035 break;
3036 case IPV6_PKTINFO:
3038 struct in6_pktinfo pki;
3040 if (optlen < sizeof(pki)) {
3041 return -TARGET_EINVAL;
3044 if (copy_from_user(&pki, optval_addr, sizeof(pki))) {
3045 return -TARGET_EFAULT;
3048 pki.ipi6_ifindex = tswap32(pki.ipi6_ifindex);
3050 ret = get_errno(setsockopt(sockfd, level, optname,
3051 &pki, sizeof(pki)));
3052 break;
3054 default:
3055 goto unimplemented;
3057 break;
3058 case SOL_ICMPV6:
3059 switch (optname) {
3060 case ICMPV6_FILTER:
3062 struct icmp6_filter icmp6f;
3064 if (optlen > sizeof(icmp6f)) {
3065 optlen = sizeof(icmp6f);
3068 if (copy_from_user(&icmp6f, optval_addr, optlen)) {
3069 return -TARGET_EFAULT;
3072 for (val = 0; val < 8; val++) {
3073 icmp6f.data[val] = tswap32(icmp6f.data[val]);
3076 ret = get_errno(setsockopt(sockfd, level, optname,
3077 &icmp6f, optlen));
3078 break;
3080 default:
3081 goto unimplemented;
3083 break;
3084 case SOL_RAW:
3085 switch (optname) {
3086 case ICMP_FILTER:
3087 case IPV6_CHECKSUM:
3088 /* those take an u32 value */
3089 if (optlen < sizeof(uint32_t)) {
3090 return -TARGET_EINVAL;
3093 if (get_user_u32(val, optval_addr)) {
3094 return -TARGET_EFAULT;
3096 ret = get_errno(setsockopt(sockfd, level, optname,
3097 &val, sizeof(val)));
3098 break;
3100 default:
3101 goto unimplemented;
3103 break;
3104 case TARGET_SOL_SOCKET:
3105 switch (optname) {
3106 case TARGET_SO_RCVTIMEO:
3108 struct timeval tv;
3110 optname = SO_RCVTIMEO;
3112 set_timeout:
3113 if (optlen != sizeof(struct target_timeval)) {
3114 return -TARGET_EINVAL;
3117 if (copy_from_user_timeval(&tv, optval_addr)) {
3118 return -TARGET_EFAULT;
3121 ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname,
3122 &tv, sizeof(tv)));
3123 return ret;
3125 case TARGET_SO_SNDTIMEO:
3126 optname = SO_SNDTIMEO;
3127 goto set_timeout;
3128 case TARGET_SO_ATTACH_FILTER:
3130 struct target_sock_fprog *tfprog;
3131 struct target_sock_filter *tfilter;
3132 struct sock_fprog fprog;
3133 struct sock_filter *filter;
3134 int i;
3136 if (optlen != sizeof(*tfprog)) {
3137 return -TARGET_EINVAL;
3139 if (!lock_user_struct(VERIFY_READ, tfprog, optval_addr, 0)) {
3140 return -TARGET_EFAULT;
3142 if (!lock_user_struct(VERIFY_READ, tfilter,
3143 tswapal(tfprog->filter), 0)) {
3144 unlock_user_struct(tfprog, optval_addr, 1);
3145 return -TARGET_EFAULT;
3148 fprog.len = tswap16(tfprog->len);
3149 filter = g_try_new(struct sock_filter, fprog.len);
3150 if (filter == NULL) {
3151 unlock_user_struct(tfilter, tfprog->filter, 1);
3152 unlock_user_struct(tfprog, optval_addr, 1);
3153 return -TARGET_ENOMEM;
3155 for (i = 0; i < fprog.len; i++) {
3156 filter[i].code = tswap16(tfilter[i].code);
3157 filter[i].jt = tfilter[i].jt;
3158 filter[i].jf = tfilter[i].jf;
3159 filter[i].k = tswap32(tfilter[i].k);
3161 fprog.filter = filter;
3163 ret = get_errno(setsockopt(sockfd, SOL_SOCKET,
3164 SO_ATTACH_FILTER, &fprog, sizeof(fprog)));
3165 g_free(filter);
3167 unlock_user_struct(tfilter, tfprog->filter, 1);
3168 unlock_user_struct(tfprog, optval_addr, 1);
3169 return ret;
3171 case TARGET_SO_BINDTODEVICE:
3173 char *dev_ifname, *addr_ifname;
3175 if (optlen > IFNAMSIZ - 1) {
3176 optlen = IFNAMSIZ - 1;
3178 dev_ifname = lock_user(VERIFY_READ, optval_addr, optlen, 1);
3179 if (!dev_ifname) {
3180 return -TARGET_EFAULT;
3182 optname = SO_BINDTODEVICE;
3183 addr_ifname = alloca(IFNAMSIZ);
3184 memcpy(addr_ifname, dev_ifname, optlen);
3185 addr_ifname[optlen] = 0;
3186 ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname,
3187 addr_ifname, optlen));
3188 unlock_user (dev_ifname, optval_addr, 0);
3189 return ret;
3191 /* Options with 'int' argument. */
3192 case TARGET_SO_DEBUG:
3193 optname = SO_DEBUG;
3194 break;
3195 case TARGET_SO_REUSEADDR:
3196 optname = SO_REUSEADDR;
3197 break;
3198 case TARGET_SO_TYPE:
3199 optname = SO_TYPE;
3200 break;
3201 case TARGET_SO_ERROR:
3202 optname = SO_ERROR;
3203 break;
3204 case TARGET_SO_DONTROUTE:
3205 optname = SO_DONTROUTE;
3206 break;
3207 case TARGET_SO_BROADCAST:
3208 optname = SO_BROADCAST;
3209 break;
3210 case TARGET_SO_SNDBUF:
3211 optname = SO_SNDBUF;
3212 break;
3213 case TARGET_SO_SNDBUFFORCE:
3214 optname = SO_SNDBUFFORCE;
3215 break;
3216 case TARGET_SO_RCVBUF:
3217 optname = SO_RCVBUF;
3218 break;
3219 case TARGET_SO_RCVBUFFORCE:
3220 optname = SO_RCVBUFFORCE;
3221 break;
3222 case TARGET_SO_KEEPALIVE:
3223 optname = SO_KEEPALIVE;
3224 break;
3225 case TARGET_SO_OOBINLINE:
3226 optname = SO_OOBINLINE;
3227 break;
3228 case TARGET_SO_NO_CHECK:
3229 optname = SO_NO_CHECK;
3230 break;
3231 case TARGET_SO_PRIORITY:
3232 optname = SO_PRIORITY;
3233 break;
3234 #ifdef SO_BSDCOMPAT
3235 case TARGET_SO_BSDCOMPAT:
3236 optname = SO_BSDCOMPAT;
3237 break;
3238 #endif
3239 case TARGET_SO_PASSCRED:
3240 optname = SO_PASSCRED;
3241 break;
3242 case TARGET_SO_PASSSEC:
3243 optname = SO_PASSSEC;
3244 break;
3245 case TARGET_SO_TIMESTAMP:
3246 optname = SO_TIMESTAMP;
3247 break;
3248 case TARGET_SO_RCVLOWAT:
3249 optname = SO_RCVLOWAT;
3250 break;
3251 default:
3252 goto unimplemented;
3254 if (optlen < sizeof(uint32_t))
3255 return -TARGET_EINVAL;
3257 if (get_user_u32(val, optval_addr))
3258 return -TARGET_EFAULT;
3259 ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname, &val, sizeof(val)));
3260 break;
3261 default:
3262 unimplemented:
3263 gemu_log("Unsupported setsockopt level=%d optname=%d\n", level, optname);
3264 ret = -TARGET_ENOPROTOOPT;
3266 return ret;
3269 /* do_getsockopt() Must return target values and target errnos. */
3270 static abi_long do_getsockopt(int sockfd, int level, int optname,
3271 abi_ulong optval_addr, abi_ulong optlen)
3273 abi_long ret;
3274 int len, val;
3275 socklen_t lv;
3277 switch(level) {
3278 case TARGET_SOL_SOCKET:
3279 level = SOL_SOCKET;
3280 switch (optname) {
3281 /* These don't just return a single integer */
3282 case TARGET_SO_LINGER:
3283 case TARGET_SO_RCVTIMEO:
3284 case TARGET_SO_SNDTIMEO:
3285 case TARGET_SO_PEERNAME:
3286 goto unimplemented;
3287 case TARGET_SO_PEERCRED: {
3288 struct ucred cr;
3289 socklen_t crlen;
3290 struct target_ucred *tcr;
3292 if (get_user_u32(len, optlen)) {
3293 return -TARGET_EFAULT;
3295 if (len < 0) {
3296 return -TARGET_EINVAL;
3299 crlen = sizeof(cr);
3300 ret = get_errno(getsockopt(sockfd, level, SO_PEERCRED,
3301 &cr, &crlen));
3302 if (ret < 0) {
3303 return ret;
3305 if (len > crlen) {
3306 len = crlen;
3308 if (!lock_user_struct(VERIFY_WRITE, tcr, optval_addr, 0)) {
3309 return -TARGET_EFAULT;
3311 __put_user(cr.pid, &tcr->pid);
3312 __put_user(cr.uid, &tcr->uid);
3313 __put_user(cr.gid, &tcr->gid);
3314 unlock_user_struct(tcr, optval_addr, 1);
3315 if (put_user_u32(len, optlen)) {
3316 return -TARGET_EFAULT;
3318 break;
3320 /* Options with 'int' argument. */
3321 case TARGET_SO_DEBUG:
3322 optname = SO_DEBUG;
3323 goto int_case;
3324 case TARGET_SO_REUSEADDR:
3325 optname = SO_REUSEADDR;
3326 goto int_case;
3327 case TARGET_SO_TYPE:
3328 optname = SO_TYPE;
3329 goto int_case;
3330 case TARGET_SO_ERROR:
3331 optname = SO_ERROR;
3332 goto int_case;
3333 case TARGET_SO_DONTROUTE:
3334 optname = SO_DONTROUTE;
3335 goto int_case;
3336 case TARGET_SO_BROADCAST:
3337 optname = SO_BROADCAST;
3338 goto int_case;
3339 case TARGET_SO_SNDBUF:
3340 optname = SO_SNDBUF;
3341 goto int_case;
3342 case TARGET_SO_RCVBUF:
3343 optname = SO_RCVBUF;
3344 goto int_case;
3345 case TARGET_SO_KEEPALIVE:
3346 optname = SO_KEEPALIVE;
3347 goto int_case;
3348 case TARGET_SO_OOBINLINE:
3349 optname = SO_OOBINLINE;
3350 goto int_case;
3351 case TARGET_SO_NO_CHECK:
3352 optname = SO_NO_CHECK;
3353 goto int_case;
3354 case TARGET_SO_PRIORITY:
3355 optname = SO_PRIORITY;
3356 goto int_case;
3357 #ifdef SO_BSDCOMPAT
3358 case TARGET_SO_BSDCOMPAT:
3359 optname = SO_BSDCOMPAT;
3360 goto int_case;
3361 #endif
3362 case TARGET_SO_PASSCRED:
3363 optname = SO_PASSCRED;
3364 goto int_case;
3365 case TARGET_SO_TIMESTAMP:
3366 optname = SO_TIMESTAMP;
3367 goto int_case;
3368 case TARGET_SO_RCVLOWAT:
3369 optname = SO_RCVLOWAT;
3370 goto int_case;
3371 case TARGET_SO_ACCEPTCONN:
3372 optname = SO_ACCEPTCONN;
3373 goto int_case;
3374 default:
3375 goto int_case;
3377 break;
3378 case SOL_TCP:
3379 /* TCP options all take an 'int' value. */
3380 int_case:
3381 if (get_user_u32(len, optlen))
3382 return -TARGET_EFAULT;
3383 if (len < 0)
3384 return -TARGET_EINVAL;
3385 lv = sizeof(lv);
3386 ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
3387 if (ret < 0)
3388 return ret;
3389 if (optname == SO_TYPE) {
3390 val = host_to_target_sock_type(val);
3392 if (len > lv)
3393 len = lv;
3394 if (len == 4) {
3395 if (put_user_u32(val, optval_addr))
3396 return -TARGET_EFAULT;
3397 } else {
3398 if (put_user_u8(val, optval_addr))
3399 return -TARGET_EFAULT;
3401 if (put_user_u32(len, optlen))
3402 return -TARGET_EFAULT;
3403 break;
3404 case SOL_IP:
3405 switch(optname) {
3406 case IP_TOS:
3407 case IP_TTL:
3408 case IP_HDRINCL:
3409 case IP_ROUTER_ALERT:
3410 case IP_RECVOPTS:
3411 case IP_RETOPTS:
3412 case IP_PKTINFO:
3413 case IP_MTU_DISCOVER:
3414 case IP_RECVERR:
3415 case IP_RECVTOS:
3416 #ifdef IP_FREEBIND
3417 case IP_FREEBIND:
3418 #endif
3419 case IP_MULTICAST_TTL:
3420 case IP_MULTICAST_LOOP:
3421 if (get_user_u32(len, optlen))
3422 return -TARGET_EFAULT;
3423 if (len < 0)
3424 return -TARGET_EINVAL;
3425 lv = sizeof(lv);
3426 ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
3427 if (ret < 0)
3428 return ret;
3429 if (len < sizeof(int) && len > 0 && val >= 0 && val < 255) {
3430 len = 1;
3431 if (put_user_u32(len, optlen)
3432 || put_user_u8(val, optval_addr))
3433 return -TARGET_EFAULT;
3434 } else {
3435 if (len > sizeof(int))
3436 len = sizeof(int);
3437 if (put_user_u32(len, optlen)
3438 || put_user_u32(val, optval_addr))
3439 return -TARGET_EFAULT;
3441 break;
3442 default:
3443 ret = -TARGET_ENOPROTOOPT;
3444 break;
3446 break;
3447 default:
3448 unimplemented:
3449 gemu_log("getsockopt level=%d optname=%d not yet supported\n",
3450 level, optname);
3451 ret = -TARGET_EOPNOTSUPP;
3452 break;
3454 return ret;
3457 /* Convert target low/high pair representing file offset into the host
3458 * low/high pair. This function doesn't handle offsets bigger than 64 bits
3459 * as the kernel doesn't handle them either.
3461 static void target_to_host_low_high(abi_ulong tlow,
3462 abi_ulong thigh,
3463 unsigned long *hlow,
3464 unsigned long *hhigh)
3466 uint64_t off = tlow |
3467 ((unsigned long long)thigh << TARGET_LONG_BITS / 2) <<
3468 TARGET_LONG_BITS / 2;
3470 *hlow = off;
3471 *hhigh = (off >> HOST_LONG_BITS / 2) >> HOST_LONG_BITS / 2;
3474 static struct iovec *lock_iovec(int type, abi_ulong target_addr,
3475 abi_ulong count, int copy)
3477 struct target_iovec *target_vec;
3478 struct iovec *vec;
3479 abi_ulong total_len, max_len;
3480 int i;
3481 int err = 0;
3482 bool bad_address = false;
3484 if (count == 0) {
3485 errno = 0;
3486 return NULL;
3488 if (count > IOV_MAX) {
3489 errno = EINVAL;
3490 return NULL;
3493 vec = g_try_new0(struct iovec, count);
3494 if (vec == NULL) {
3495 errno = ENOMEM;
3496 return NULL;
3499 target_vec = lock_user(VERIFY_READ, target_addr,
3500 count * sizeof(struct target_iovec), 1);
3501 if (target_vec == NULL) {
3502 err = EFAULT;
3503 goto fail2;
3506 /* ??? If host page size > target page size, this will result in a
3507 value larger than what we can actually support. */
3508 max_len = 0x7fffffff & TARGET_PAGE_MASK;
3509 total_len = 0;
3511 for (i = 0; i < count; i++) {
3512 abi_ulong base = tswapal(target_vec[i].iov_base);
3513 abi_long len = tswapal(target_vec[i].iov_len);
3515 if (len < 0) {
3516 err = EINVAL;
3517 goto fail;
3518 } else if (len == 0) {
3519 /* Zero length pointer is ignored. */
3520 vec[i].iov_base = 0;
3521 } else {
3522 vec[i].iov_base = lock_user(type, base, len, copy);
3523 /* If the first buffer pointer is bad, this is a fault. But
3524 * subsequent bad buffers will result in a partial write; this
3525 * is realized by filling the vector with null pointers and
3526 * zero lengths. */
3527 if (!vec[i].iov_base) {
3528 if (i == 0) {
3529 err = EFAULT;
3530 goto fail;
3531 } else {
3532 bad_address = true;
3535 if (bad_address) {
3536 len = 0;
3538 if (len > max_len - total_len) {
3539 len = max_len - total_len;
3542 vec[i].iov_len = len;
3543 total_len += len;
3546 unlock_user(target_vec, target_addr, 0);
3547 return vec;
3549 fail:
3550 while (--i >= 0) {
3551 if (tswapal(target_vec[i].iov_len) > 0) {
3552 unlock_user(vec[i].iov_base, tswapal(target_vec[i].iov_base), 0);
3555 unlock_user(target_vec, target_addr, 0);
3556 fail2:
3557 g_free(vec);
3558 errno = err;
3559 return NULL;
3562 static void unlock_iovec(struct iovec *vec, abi_ulong target_addr,
3563 abi_ulong count, int copy)
3565 struct target_iovec *target_vec;
3566 int i;
3568 target_vec = lock_user(VERIFY_READ, target_addr,
3569 count * sizeof(struct target_iovec), 1);
3570 if (target_vec) {
3571 for (i = 0; i < count; i++) {
3572 abi_ulong base = tswapal(target_vec[i].iov_base);
3573 abi_long len = tswapal(target_vec[i].iov_len);
3574 if (len < 0) {
3575 break;
3577 unlock_user(vec[i].iov_base, base, copy ? vec[i].iov_len : 0);
3579 unlock_user(target_vec, target_addr, 0);
3582 g_free(vec);
3585 static inline int target_to_host_sock_type(int *type)
3587 int host_type = 0;
3588 int target_type = *type;
3590 switch (target_type & TARGET_SOCK_TYPE_MASK) {
3591 case TARGET_SOCK_DGRAM:
3592 host_type = SOCK_DGRAM;
3593 break;
3594 case TARGET_SOCK_STREAM:
3595 host_type = SOCK_STREAM;
3596 break;
3597 default:
3598 host_type = target_type & TARGET_SOCK_TYPE_MASK;
3599 break;
3601 if (target_type & TARGET_SOCK_CLOEXEC) {
3602 #if defined(SOCK_CLOEXEC)
3603 host_type |= SOCK_CLOEXEC;
3604 #else
3605 return -TARGET_EINVAL;
3606 #endif
3608 if (target_type & TARGET_SOCK_NONBLOCK) {
3609 #if defined(SOCK_NONBLOCK)
3610 host_type |= SOCK_NONBLOCK;
3611 #elif !defined(O_NONBLOCK)
3612 return -TARGET_EINVAL;
3613 #endif
3615 *type = host_type;
3616 return 0;
3619 /* Try to emulate socket type flags after socket creation. */
3620 static int sock_flags_fixup(int fd, int target_type)
3622 #if !defined(SOCK_NONBLOCK) && defined(O_NONBLOCK)
3623 if (target_type & TARGET_SOCK_NONBLOCK) {
3624 int flags = fcntl(fd, F_GETFL);
3625 if (fcntl(fd, F_SETFL, O_NONBLOCK | flags) == -1) {
3626 close(fd);
3627 return -TARGET_EINVAL;
3630 #endif
3631 return fd;
3634 static abi_long packet_target_to_host_sockaddr(void *host_addr,
3635 abi_ulong target_addr,
3636 socklen_t len)
3638 struct sockaddr *addr = host_addr;
3639 struct target_sockaddr *target_saddr;
3641 target_saddr = lock_user(VERIFY_READ, target_addr, len, 1);
3642 if (!target_saddr) {
3643 return -TARGET_EFAULT;
3646 memcpy(addr, target_saddr, len);
3647 addr->sa_family = tswap16(target_saddr->sa_family);
3648 /* spkt_protocol is big-endian */
3650 unlock_user(target_saddr, target_addr, 0);
3651 return 0;
3654 static TargetFdTrans target_packet_trans = {
3655 .target_to_host_addr = packet_target_to_host_sockaddr,
3658 #ifdef CONFIG_RTNETLINK
3659 static abi_long netlink_route_target_to_host(void *buf, size_t len)
3661 abi_long ret;
3663 ret = target_to_host_nlmsg_route(buf, len);
3664 if (ret < 0) {
3665 return ret;
3668 return len;
3671 static abi_long netlink_route_host_to_target(void *buf, size_t len)
3673 abi_long ret;
3675 ret = host_to_target_nlmsg_route(buf, len);
3676 if (ret < 0) {
3677 return ret;
3680 return len;
3683 static TargetFdTrans target_netlink_route_trans = {
3684 .target_to_host_data = netlink_route_target_to_host,
3685 .host_to_target_data = netlink_route_host_to_target,
3687 #endif /* CONFIG_RTNETLINK */
3689 static abi_long netlink_audit_target_to_host(void *buf, size_t len)
3691 abi_long ret;
3693 ret = target_to_host_nlmsg_audit(buf, len);
3694 if (ret < 0) {
3695 return ret;
3698 return len;
3701 static abi_long netlink_audit_host_to_target(void *buf, size_t len)
3703 abi_long ret;
3705 ret = host_to_target_nlmsg_audit(buf, len);
3706 if (ret < 0) {
3707 return ret;
3710 return len;
3713 static TargetFdTrans target_netlink_audit_trans = {
3714 .target_to_host_data = netlink_audit_target_to_host,
3715 .host_to_target_data = netlink_audit_host_to_target,
3718 /* do_socket() Must return target values and target errnos. */
3719 static abi_long do_socket(int domain, int type, int protocol)
3721 int target_type = type;
3722 int ret;
3724 ret = target_to_host_sock_type(&type);
3725 if (ret) {
3726 return ret;
3729 if (domain == PF_NETLINK && !(
3730 #ifdef CONFIG_RTNETLINK
3731 protocol == NETLINK_ROUTE ||
3732 #endif
3733 protocol == NETLINK_KOBJECT_UEVENT ||
3734 protocol == NETLINK_AUDIT)) {
3735 return -EPFNOSUPPORT;
3738 if (domain == AF_PACKET ||
3739 (domain == AF_INET && type == SOCK_PACKET)) {
3740 protocol = tswap16(protocol);
3743 ret = get_errno(socket(domain, type, protocol));
3744 if (ret >= 0) {
3745 ret = sock_flags_fixup(ret, target_type);
3746 if (type == SOCK_PACKET) {
3747 /* Manage an obsolete case :
3748 * if socket type is SOCK_PACKET, bind by name
3750 fd_trans_register(ret, &target_packet_trans);
3751 } else if (domain == PF_NETLINK) {
3752 switch (protocol) {
3753 #ifdef CONFIG_RTNETLINK
3754 case NETLINK_ROUTE:
3755 fd_trans_register(ret, &target_netlink_route_trans);
3756 break;
3757 #endif
3758 case NETLINK_KOBJECT_UEVENT:
3759 /* nothing to do: messages are strings */
3760 break;
3761 case NETLINK_AUDIT:
3762 fd_trans_register(ret, &target_netlink_audit_trans);
3763 break;
3764 default:
3765 g_assert_not_reached();
3769 return ret;
3772 /* do_bind() Must return target values and target errnos. */
3773 static abi_long do_bind(int sockfd, abi_ulong target_addr,
3774 socklen_t addrlen)
3776 void *addr;
3777 abi_long ret;
3779 if ((int)addrlen < 0) {
3780 return -TARGET_EINVAL;
3783 addr = alloca(addrlen+1);
3785 ret = target_to_host_sockaddr(sockfd, addr, target_addr, addrlen);
3786 if (ret)
3787 return ret;
3789 return get_errno(bind(sockfd, addr, addrlen));
3792 /* do_connect() Must return target values and target errnos. */
3793 static abi_long do_connect(int sockfd, abi_ulong target_addr,
3794 socklen_t addrlen)
3796 void *addr;
3797 abi_long ret;
3799 if ((int)addrlen < 0) {
3800 return -TARGET_EINVAL;
3803 addr = alloca(addrlen+1);
3805 ret = target_to_host_sockaddr(sockfd, addr, target_addr, addrlen);
3806 if (ret)
3807 return ret;
3809 return get_errno(safe_connect(sockfd, addr, addrlen));
3812 /* do_sendrecvmsg_locked() Must return target values and target errnos. */
3813 static abi_long do_sendrecvmsg_locked(int fd, struct target_msghdr *msgp,
3814 int flags, int send)
3816 abi_long ret, len;
3817 struct msghdr msg;
3818 abi_ulong count;
3819 struct iovec *vec;
3820 abi_ulong target_vec;
3822 if (msgp->msg_name) {
3823 msg.msg_namelen = tswap32(msgp->msg_namelen);
3824 msg.msg_name = alloca(msg.msg_namelen+1);
3825 ret = target_to_host_sockaddr(fd, msg.msg_name,
3826 tswapal(msgp->msg_name),
3827 msg.msg_namelen);
3828 if (ret == -TARGET_EFAULT) {
3829 /* For connected sockets msg_name and msg_namelen must
3830 * be ignored, so returning EFAULT immediately is wrong.
3831 * Instead, pass a bad msg_name to the host kernel, and
3832 * let it decide whether to return EFAULT or not.
3834 msg.msg_name = (void *)-1;
3835 } else if (ret) {
3836 goto out2;
3838 } else {
3839 msg.msg_name = NULL;
3840 msg.msg_namelen = 0;
3842 msg.msg_controllen = 2 * tswapal(msgp->msg_controllen);
3843 msg.msg_control = alloca(msg.msg_controllen);
3844 msg.msg_flags = tswap32(msgp->msg_flags);
3846 count = tswapal(msgp->msg_iovlen);
3847 target_vec = tswapal(msgp->msg_iov);
3849 if (count > IOV_MAX) {
3850 /* sendrcvmsg returns a different errno for this condition than
3851 * readv/writev, so we must catch it here before lock_iovec() does.
3853 ret = -TARGET_EMSGSIZE;
3854 goto out2;
3857 vec = lock_iovec(send ? VERIFY_READ : VERIFY_WRITE,
3858 target_vec, count, send);
3859 if (vec == NULL) {
3860 ret = -host_to_target_errno(errno);
3861 goto out2;
3863 msg.msg_iovlen = count;
3864 msg.msg_iov = vec;
3866 if (send) {
3867 if (fd_trans_target_to_host_data(fd)) {
3868 void *host_msg;
3870 host_msg = g_malloc(msg.msg_iov->iov_len);
3871 memcpy(host_msg, msg.msg_iov->iov_base, msg.msg_iov->iov_len);
3872 ret = fd_trans_target_to_host_data(fd)(host_msg,
3873 msg.msg_iov->iov_len);
3874 if (ret >= 0) {
3875 msg.msg_iov->iov_base = host_msg;
3876 ret = get_errno(safe_sendmsg(fd, &msg, flags));
3878 g_free(host_msg);
3879 } else {
3880 ret = target_to_host_cmsg(&msg, msgp);
3881 if (ret == 0) {
3882 ret = get_errno(safe_sendmsg(fd, &msg, flags));
3885 } else {
3886 ret = get_errno(safe_recvmsg(fd, &msg, flags));
3887 if (!is_error(ret)) {
3888 len = ret;
3889 if (fd_trans_host_to_target_data(fd)) {
3890 ret = fd_trans_host_to_target_data(fd)(msg.msg_iov->iov_base,
3891 len);
3892 } else {
3893 ret = host_to_target_cmsg(msgp, &msg);
3895 if (!is_error(ret)) {
3896 msgp->msg_namelen = tswap32(msg.msg_namelen);
3897 if (msg.msg_name != NULL && msg.msg_name != (void *)-1) {
3898 ret = host_to_target_sockaddr(tswapal(msgp->msg_name),
3899 msg.msg_name, msg.msg_namelen);
3900 if (ret) {
3901 goto out;
3905 ret = len;
3910 out:
3911 unlock_iovec(vec, target_vec, count, !send);
3912 out2:
3913 return ret;
3916 static abi_long do_sendrecvmsg(int fd, abi_ulong target_msg,
3917 int flags, int send)
3919 abi_long ret;
3920 struct target_msghdr *msgp;
3922 if (!lock_user_struct(send ? VERIFY_READ : VERIFY_WRITE,
3923 msgp,
3924 target_msg,
3925 send ? 1 : 0)) {
3926 return -TARGET_EFAULT;
3928 ret = do_sendrecvmsg_locked(fd, msgp, flags, send);
3929 unlock_user_struct(msgp, target_msg, send ? 0 : 1);
3930 return ret;
3933 /* We don't rely on the C library to have sendmmsg/recvmmsg support,
3934 * so it might not have this *mmsg-specific flag either.
3936 #ifndef MSG_WAITFORONE
3937 #define MSG_WAITFORONE 0x10000
3938 #endif
3940 static abi_long do_sendrecvmmsg(int fd, abi_ulong target_msgvec,
3941 unsigned int vlen, unsigned int flags,
3942 int send)
3944 struct target_mmsghdr *mmsgp;
3945 abi_long ret = 0;
3946 int i;
3948 if (vlen > UIO_MAXIOV) {
3949 vlen = UIO_MAXIOV;
3952 mmsgp = lock_user(VERIFY_WRITE, target_msgvec, sizeof(*mmsgp) * vlen, 1);
3953 if (!mmsgp) {
3954 return -TARGET_EFAULT;
3957 for (i = 0; i < vlen; i++) {
3958 ret = do_sendrecvmsg_locked(fd, &mmsgp[i].msg_hdr, flags, send);
3959 if (is_error(ret)) {
3960 break;
3962 mmsgp[i].msg_len = tswap32(ret);
3963 /* MSG_WAITFORONE turns on MSG_DONTWAIT after one packet */
3964 if (flags & MSG_WAITFORONE) {
3965 flags |= MSG_DONTWAIT;
3969 unlock_user(mmsgp, target_msgvec, sizeof(*mmsgp) * i);
3971 /* Return number of datagrams sent if we sent any at all;
3972 * otherwise return the error.
3974 if (i) {
3975 return i;
3977 return ret;
3980 /* do_accept4() Must return target values and target errnos. */
3981 static abi_long do_accept4(int fd, abi_ulong target_addr,
3982 abi_ulong target_addrlen_addr, int flags)
3984 socklen_t addrlen;
3985 void *addr;
3986 abi_long ret;
3987 int host_flags;
3989 host_flags = target_to_host_bitmask(flags, fcntl_flags_tbl);
3991 if (target_addr == 0) {
3992 return get_errno(safe_accept4(fd, NULL, NULL, host_flags));
3995 /* linux returns EINVAL if addrlen pointer is invalid */
3996 if (get_user_u32(addrlen, target_addrlen_addr))
3997 return -TARGET_EINVAL;
3999 if ((int)addrlen < 0) {
4000 return -TARGET_EINVAL;
4003 if (!access_ok(VERIFY_WRITE, target_addr, addrlen))
4004 return -TARGET_EINVAL;
4006 addr = alloca(addrlen);
4008 ret = get_errno(safe_accept4(fd, addr, &addrlen, host_flags));
4009 if (!is_error(ret)) {
4010 host_to_target_sockaddr(target_addr, addr, addrlen);
4011 if (put_user_u32(addrlen, target_addrlen_addr))
4012 ret = -TARGET_EFAULT;
4014 return ret;
4017 /* do_getpeername() Must return target values and target errnos. */
4018 static abi_long do_getpeername(int fd, abi_ulong target_addr,
4019 abi_ulong target_addrlen_addr)
4021 socklen_t addrlen;
4022 void *addr;
4023 abi_long ret;
4025 if (get_user_u32(addrlen, target_addrlen_addr))
4026 return -TARGET_EFAULT;
4028 if ((int)addrlen < 0) {
4029 return -TARGET_EINVAL;
4032 if (!access_ok(VERIFY_WRITE, target_addr, addrlen))
4033 return -TARGET_EFAULT;
4035 addr = alloca(addrlen);
4037 ret = get_errno(getpeername(fd, addr, &addrlen));
4038 if (!is_error(ret)) {
4039 host_to_target_sockaddr(target_addr, addr, addrlen);
4040 if (put_user_u32(addrlen, target_addrlen_addr))
4041 ret = -TARGET_EFAULT;
4043 return ret;
4046 /* do_getsockname() Must return target values and target errnos. */
4047 static abi_long do_getsockname(int fd, abi_ulong target_addr,
4048 abi_ulong target_addrlen_addr)
4050 socklen_t addrlen;
4051 void *addr;
4052 abi_long ret;
4054 if (get_user_u32(addrlen, target_addrlen_addr))
4055 return -TARGET_EFAULT;
4057 if ((int)addrlen < 0) {
4058 return -TARGET_EINVAL;
4061 if (!access_ok(VERIFY_WRITE, target_addr, addrlen))
4062 return -TARGET_EFAULT;
4064 addr = alloca(addrlen);
4066 ret = get_errno(getsockname(fd, addr, &addrlen));
4067 if (!is_error(ret)) {
4068 host_to_target_sockaddr(target_addr, addr, addrlen);
4069 if (put_user_u32(addrlen, target_addrlen_addr))
4070 ret = -TARGET_EFAULT;
4072 return ret;
4075 /* do_socketpair() Must return target values and target errnos. */
4076 static abi_long do_socketpair(int domain, int type, int protocol,
4077 abi_ulong target_tab_addr)
4079 int tab[2];
4080 abi_long ret;
4082 target_to_host_sock_type(&type);
4084 ret = get_errno(socketpair(domain, type, protocol, tab));
4085 if (!is_error(ret)) {
4086 if (put_user_s32(tab[0], target_tab_addr)
4087 || put_user_s32(tab[1], target_tab_addr + sizeof(tab[0])))
4088 ret = -TARGET_EFAULT;
4090 return ret;
4093 /* do_sendto() Must return target values and target errnos. */
4094 static abi_long do_sendto(int fd, abi_ulong msg, size_t len, int flags,
4095 abi_ulong target_addr, socklen_t addrlen)
4097 void *addr;
4098 void *host_msg;
4099 void *copy_msg = NULL;
4100 abi_long ret;
4102 if ((int)addrlen < 0) {
4103 return -TARGET_EINVAL;
4106 host_msg = lock_user(VERIFY_READ, msg, len, 1);
4107 if (!host_msg)
4108 return -TARGET_EFAULT;
4109 if (fd_trans_target_to_host_data(fd)) {
4110 copy_msg = host_msg;
4111 host_msg = g_malloc(len);
4112 memcpy(host_msg, copy_msg, len);
4113 ret = fd_trans_target_to_host_data(fd)(host_msg, len);
4114 if (ret < 0) {
4115 goto fail;
4118 if (target_addr) {
4119 addr = alloca(addrlen+1);
4120 ret = target_to_host_sockaddr(fd, addr, target_addr, addrlen);
4121 if (ret) {
4122 goto fail;
4124 ret = get_errno(safe_sendto(fd, host_msg, len, flags, addr, addrlen));
4125 } else {
4126 ret = get_errno(safe_sendto(fd, host_msg, len, flags, NULL, 0));
4128 fail:
4129 if (copy_msg) {
4130 g_free(host_msg);
4131 host_msg = copy_msg;
4133 unlock_user(host_msg, msg, 0);
4134 return ret;
4137 /* do_recvfrom() Must return target values and target errnos. */
4138 static abi_long do_recvfrom(int fd, abi_ulong msg, size_t len, int flags,
4139 abi_ulong target_addr,
4140 abi_ulong target_addrlen)
4142 socklen_t addrlen;
4143 void *addr;
4144 void *host_msg;
4145 abi_long ret;
4147 host_msg = lock_user(VERIFY_WRITE, msg, len, 0);
4148 if (!host_msg)
4149 return -TARGET_EFAULT;
4150 if (target_addr) {
4151 if (get_user_u32(addrlen, target_addrlen)) {
4152 ret = -TARGET_EFAULT;
4153 goto fail;
4155 if ((int)addrlen < 0) {
4156 ret = -TARGET_EINVAL;
4157 goto fail;
4159 addr = alloca(addrlen);
4160 ret = get_errno(safe_recvfrom(fd, host_msg, len, flags,
4161 addr, &addrlen));
4162 } else {
4163 addr = NULL; /* To keep compiler quiet. */
4164 ret = get_errno(safe_recvfrom(fd, host_msg, len, flags, NULL, 0));
4166 if (!is_error(ret)) {
4167 if (fd_trans_host_to_target_data(fd)) {
4168 ret = fd_trans_host_to_target_data(fd)(host_msg, ret);
4170 if (target_addr) {
4171 host_to_target_sockaddr(target_addr, addr, addrlen);
4172 if (put_user_u32(addrlen, target_addrlen)) {
4173 ret = -TARGET_EFAULT;
4174 goto fail;
4177 unlock_user(host_msg, msg, len);
4178 } else {
4179 fail:
4180 unlock_user(host_msg, msg, 0);
4182 return ret;
4185 #ifdef TARGET_NR_socketcall
4186 /* do_socketcall() must return target values and target errnos. */
4187 static abi_long do_socketcall(int num, abi_ulong vptr)
4189 static const unsigned nargs[] = { /* number of arguments per operation */
4190 [TARGET_SYS_SOCKET] = 3, /* domain, type, protocol */
4191 [TARGET_SYS_BIND] = 3, /* fd, addr, addrlen */
4192 [TARGET_SYS_CONNECT] = 3, /* fd, addr, addrlen */
4193 [TARGET_SYS_LISTEN] = 2, /* fd, backlog */
4194 [TARGET_SYS_ACCEPT] = 3, /* fd, addr, addrlen */
4195 [TARGET_SYS_GETSOCKNAME] = 3, /* fd, addr, addrlen */
4196 [TARGET_SYS_GETPEERNAME] = 3, /* fd, addr, addrlen */
4197 [TARGET_SYS_SOCKETPAIR] = 4, /* domain, type, protocol, tab */
4198 [TARGET_SYS_SEND] = 4, /* fd, msg, len, flags */
4199 [TARGET_SYS_RECV] = 4, /* fd, msg, len, flags */
4200 [TARGET_SYS_SENDTO] = 6, /* fd, msg, len, flags, addr, addrlen */
4201 [TARGET_SYS_RECVFROM] = 6, /* fd, msg, len, flags, addr, addrlen */
4202 [TARGET_SYS_SHUTDOWN] = 2, /* fd, how */
4203 [TARGET_SYS_SETSOCKOPT] = 5, /* fd, level, optname, optval, optlen */
4204 [TARGET_SYS_GETSOCKOPT] = 5, /* fd, level, optname, optval, optlen */
4205 [TARGET_SYS_SENDMSG] = 3, /* fd, msg, flags */
4206 [TARGET_SYS_RECVMSG] = 3, /* fd, msg, flags */
4207 [TARGET_SYS_ACCEPT4] = 4, /* fd, addr, addrlen, flags */
4208 [TARGET_SYS_RECVMMSG] = 4, /* fd, msgvec, vlen, flags */
4209 [TARGET_SYS_SENDMMSG] = 4, /* fd, msgvec, vlen, flags */
4211 abi_long a[6]; /* max 6 args */
4212 unsigned i;
4214 /* check the range of the first argument num */
4215 /* (TARGET_SYS_SENDMMSG is the highest among TARGET_SYS_xxx) */
4216 if (num < 1 || num > TARGET_SYS_SENDMMSG) {
4217 return -TARGET_EINVAL;
4219 /* ensure we have space for args */
4220 if (nargs[num] > ARRAY_SIZE(a)) {
4221 return -TARGET_EINVAL;
4223 /* collect the arguments in a[] according to nargs[] */
4224 for (i = 0; i < nargs[num]; ++i) {
4225 if (get_user_ual(a[i], vptr + i * sizeof(abi_long)) != 0) {
4226 return -TARGET_EFAULT;
4229 /* now when we have the args, invoke the appropriate underlying function */
4230 switch (num) {
4231 case TARGET_SYS_SOCKET: /* domain, type, protocol */
4232 return do_socket(a[0], a[1], a[2]);
4233 case TARGET_SYS_BIND: /* sockfd, addr, addrlen */
4234 return do_bind(a[0], a[1], a[2]);
4235 case TARGET_SYS_CONNECT: /* sockfd, addr, addrlen */
4236 return do_connect(a[0], a[1], a[2]);
4237 case TARGET_SYS_LISTEN: /* sockfd, backlog */
4238 return get_errno(listen(a[0], a[1]));
4239 case TARGET_SYS_ACCEPT: /* sockfd, addr, addrlen */
4240 return do_accept4(a[0], a[1], a[2], 0);
4241 case TARGET_SYS_GETSOCKNAME: /* sockfd, addr, addrlen */
4242 return do_getsockname(a[0], a[1], a[2]);
4243 case TARGET_SYS_GETPEERNAME: /* sockfd, addr, addrlen */
4244 return do_getpeername(a[0], a[1], a[2]);
4245 case TARGET_SYS_SOCKETPAIR: /* domain, type, protocol, tab */
4246 return do_socketpair(a[0], a[1], a[2], a[3]);
4247 case TARGET_SYS_SEND: /* sockfd, msg, len, flags */
4248 return do_sendto(a[0], a[1], a[2], a[3], 0, 0);
4249 case TARGET_SYS_RECV: /* sockfd, msg, len, flags */
4250 return do_recvfrom(a[0], a[1], a[2], a[3], 0, 0);
4251 case TARGET_SYS_SENDTO: /* sockfd, msg, len, flags, addr, addrlen */
4252 return do_sendto(a[0], a[1], a[2], a[3], a[4], a[5]);
4253 case TARGET_SYS_RECVFROM: /* sockfd, msg, len, flags, addr, addrlen */
4254 return do_recvfrom(a[0], a[1], a[2], a[3], a[4], a[5]);
4255 case TARGET_SYS_SHUTDOWN: /* sockfd, how */
4256 return get_errno(shutdown(a[0], a[1]));
4257 case TARGET_SYS_SETSOCKOPT: /* sockfd, level, optname, optval, optlen */
4258 return do_setsockopt(a[0], a[1], a[2], a[3], a[4]);
4259 case TARGET_SYS_GETSOCKOPT: /* sockfd, level, optname, optval, optlen */
4260 return do_getsockopt(a[0], a[1], a[2], a[3], a[4]);
4261 case TARGET_SYS_SENDMSG: /* sockfd, msg, flags */
4262 return do_sendrecvmsg(a[0], a[1], a[2], 1);
4263 case TARGET_SYS_RECVMSG: /* sockfd, msg, flags */
4264 return do_sendrecvmsg(a[0], a[1], a[2], 0);
4265 case TARGET_SYS_ACCEPT4: /* sockfd, addr, addrlen, flags */
4266 return do_accept4(a[0], a[1], a[2], a[3]);
4267 case TARGET_SYS_RECVMMSG: /* sockfd, msgvec, vlen, flags */
4268 return do_sendrecvmmsg(a[0], a[1], a[2], a[3], 0);
4269 case TARGET_SYS_SENDMMSG: /* sockfd, msgvec, vlen, flags */
4270 return do_sendrecvmmsg(a[0], a[1], a[2], a[3], 1);
4271 default:
4272 gemu_log("Unsupported socketcall: %d\n", num);
4273 return -TARGET_EINVAL;
4276 #endif
4278 #define N_SHM_REGIONS 32
4280 static struct shm_region {
4281 abi_ulong start;
4282 abi_ulong size;
4283 bool in_use;
4284 } shm_regions[N_SHM_REGIONS];
4286 #ifndef TARGET_SEMID64_DS
4287 /* asm-generic version of this struct */
4288 struct target_semid64_ds
4290 struct target_ipc_perm sem_perm;
4291 abi_ulong sem_otime;
4292 #if TARGET_ABI_BITS == 32
4293 abi_ulong __unused1;
4294 #endif
4295 abi_ulong sem_ctime;
4296 #if TARGET_ABI_BITS == 32
4297 abi_ulong __unused2;
4298 #endif
4299 abi_ulong sem_nsems;
4300 abi_ulong __unused3;
4301 abi_ulong __unused4;
4303 #endif
4305 static inline abi_long target_to_host_ipc_perm(struct ipc_perm *host_ip,
4306 abi_ulong target_addr)
4308 struct target_ipc_perm *target_ip;
4309 struct target_semid64_ds *target_sd;
4311 if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
4312 return -TARGET_EFAULT;
4313 target_ip = &(target_sd->sem_perm);
4314 host_ip->__key = tswap32(target_ip->__key);
4315 host_ip->uid = tswap32(target_ip->uid);
4316 host_ip->gid = tswap32(target_ip->gid);
4317 host_ip->cuid = tswap32(target_ip->cuid);
4318 host_ip->cgid = tswap32(target_ip->cgid);
4319 #if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_PPC)
4320 host_ip->mode = tswap32(target_ip->mode);
4321 #else
4322 host_ip->mode = tswap16(target_ip->mode);
4323 #endif
4324 #if defined(TARGET_PPC)
4325 host_ip->__seq = tswap32(target_ip->__seq);
4326 #else
4327 host_ip->__seq = tswap16(target_ip->__seq);
4328 #endif
4329 unlock_user_struct(target_sd, target_addr, 0);
4330 return 0;
4333 static inline abi_long host_to_target_ipc_perm(abi_ulong target_addr,
4334 struct ipc_perm *host_ip)
4336 struct target_ipc_perm *target_ip;
4337 struct target_semid64_ds *target_sd;
4339 if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
4340 return -TARGET_EFAULT;
4341 target_ip = &(target_sd->sem_perm);
4342 target_ip->__key = tswap32(host_ip->__key);
4343 target_ip->uid = tswap32(host_ip->uid);
4344 target_ip->gid = tswap32(host_ip->gid);
4345 target_ip->cuid = tswap32(host_ip->cuid);
4346 target_ip->cgid = tswap32(host_ip->cgid);
4347 #if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_PPC)
4348 target_ip->mode = tswap32(host_ip->mode);
4349 #else
4350 target_ip->mode = tswap16(host_ip->mode);
4351 #endif
4352 #if defined(TARGET_PPC)
4353 target_ip->__seq = tswap32(host_ip->__seq);
4354 #else
4355 target_ip->__seq = tswap16(host_ip->__seq);
4356 #endif
4357 unlock_user_struct(target_sd, target_addr, 1);
4358 return 0;
4361 static inline abi_long target_to_host_semid_ds(struct semid_ds *host_sd,
4362 abi_ulong target_addr)
4364 struct target_semid64_ds *target_sd;
4366 if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
4367 return -TARGET_EFAULT;
4368 if (target_to_host_ipc_perm(&(host_sd->sem_perm),target_addr))
4369 return -TARGET_EFAULT;
4370 host_sd->sem_nsems = tswapal(target_sd->sem_nsems);
4371 host_sd->sem_otime = tswapal(target_sd->sem_otime);
4372 host_sd->sem_ctime = tswapal(target_sd->sem_ctime);
4373 unlock_user_struct(target_sd, target_addr, 0);
4374 return 0;
4377 static inline abi_long host_to_target_semid_ds(abi_ulong target_addr,
4378 struct semid_ds *host_sd)
4380 struct target_semid64_ds *target_sd;
4382 if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
4383 return -TARGET_EFAULT;
4384 if (host_to_target_ipc_perm(target_addr,&(host_sd->sem_perm)))
4385 return -TARGET_EFAULT;
4386 target_sd->sem_nsems = tswapal(host_sd->sem_nsems);
4387 target_sd->sem_otime = tswapal(host_sd->sem_otime);
4388 target_sd->sem_ctime = tswapal(host_sd->sem_ctime);
4389 unlock_user_struct(target_sd, target_addr, 1);
4390 return 0;
4393 struct target_seminfo {
4394 int semmap;
4395 int semmni;
4396 int semmns;
4397 int semmnu;
4398 int semmsl;
4399 int semopm;
4400 int semume;
4401 int semusz;
4402 int semvmx;
4403 int semaem;
4406 static inline abi_long host_to_target_seminfo(abi_ulong target_addr,
4407 struct seminfo *host_seminfo)
4409 struct target_seminfo *target_seminfo;
4410 if (!lock_user_struct(VERIFY_WRITE, target_seminfo, target_addr, 0))
4411 return -TARGET_EFAULT;
4412 __put_user(host_seminfo->semmap, &target_seminfo->semmap);
4413 __put_user(host_seminfo->semmni, &target_seminfo->semmni);
4414 __put_user(host_seminfo->semmns, &target_seminfo->semmns);
4415 __put_user(host_seminfo->semmnu, &target_seminfo->semmnu);
4416 __put_user(host_seminfo->semmsl, &target_seminfo->semmsl);
4417 __put_user(host_seminfo->semopm, &target_seminfo->semopm);
4418 __put_user(host_seminfo->semume, &target_seminfo->semume);
4419 __put_user(host_seminfo->semusz, &target_seminfo->semusz);
4420 __put_user(host_seminfo->semvmx, &target_seminfo->semvmx);
4421 __put_user(host_seminfo->semaem, &target_seminfo->semaem);
4422 unlock_user_struct(target_seminfo, target_addr, 1);
4423 return 0;
4426 union semun {
4427 int val;
4428 struct semid_ds *buf;
4429 unsigned short *array;
4430 struct seminfo *__buf;
4433 union target_semun {
4434 int val;
4435 abi_ulong buf;
4436 abi_ulong array;
4437 abi_ulong __buf;
4440 static inline abi_long target_to_host_semarray(int semid, unsigned short **host_array,
4441 abi_ulong target_addr)
4443 int nsems;
4444 unsigned short *array;
4445 union semun semun;
4446 struct semid_ds semid_ds;
4447 int i, ret;
4449 semun.buf = &semid_ds;
4451 ret = semctl(semid, 0, IPC_STAT, semun);
4452 if (ret == -1)
4453 return get_errno(ret);
4455 nsems = semid_ds.sem_nsems;
4457 *host_array = g_try_new(unsigned short, nsems);
4458 if (!*host_array) {
4459 return -TARGET_ENOMEM;
4461 array = lock_user(VERIFY_READ, target_addr,
4462 nsems*sizeof(unsigned short), 1);
4463 if (!array) {
4464 g_free(*host_array);
4465 return -TARGET_EFAULT;
4468 for(i=0; i<nsems; i++) {
4469 __get_user((*host_array)[i], &array[i]);
4471 unlock_user(array, target_addr, 0);
4473 return 0;
4476 static inline abi_long host_to_target_semarray(int semid, abi_ulong target_addr,
4477 unsigned short **host_array)
4479 int nsems;
4480 unsigned short *array;
4481 union semun semun;
4482 struct semid_ds semid_ds;
4483 int i, ret;
4485 semun.buf = &semid_ds;
4487 ret = semctl(semid, 0, IPC_STAT, semun);
4488 if (ret == -1)
4489 return get_errno(ret);
4491 nsems = semid_ds.sem_nsems;
4493 array = lock_user(VERIFY_WRITE, target_addr,
4494 nsems*sizeof(unsigned short), 0);
4495 if (!array)
4496 return -TARGET_EFAULT;
4498 for(i=0; i<nsems; i++) {
4499 __put_user((*host_array)[i], &array[i]);
4501 g_free(*host_array);
4502 unlock_user(array, target_addr, 1);
4504 return 0;
4507 static inline abi_long do_semctl(int semid, int semnum, int cmd,
4508 abi_ulong target_arg)
4510 union target_semun target_su = { .buf = target_arg };
4511 union semun arg;
4512 struct semid_ds dsarg;
4513 unsigned short *array = NULL;
4514 struct seminfo seminfo;
4515 abi_long ret = -TARGET_EINVAL;
4516 abi_long err;
4517 cmd &= 0xff;
4519 switch( cmd ) {
4520 case GETVAL:
4521 case SETVAL:
4522 /* In 64 bit cross-endian situations, we will erroneously pick up
4523 * the wrong half of the union for the "val" element. To rectify
4524 * this, the entire 8-byte structure is byteswapped, followed by
4525 * a swap of the 4 byte val field. In other cases, the data is
4526 * already in proper host byte order. */
4527 if (sizeof(target_su.val) != (sizeof(target_su.buf))) {
4528 target_su.buf = tswapal(target_su.buf);
4529 arg.val = tswap32(target_su.val);
4530 } else {
4531 arg.val = target_su.val;
4533 ret = get_errno(semctl(semid, semnum, cmd, arg));
4534 break;
4535 case GETALL:
4536 case SETALL:
4537 err = target_to_host_semarray(semid, &array, target_su.array);
4538 if (err)
4539 return err;
4540 arg.array = array;
4541 ret = get_errno(semctl(semid, semnum, cmd, arg));
4542 err = host_to_target_semarray(semid, target_su.array, &array);
4543 if (err)
4544 return err;
4545 break;
4546 case IPC_STAT:
4547 case IPC_SET:
4548 case SEM_STAT:
4549 err = target_to_host_semid_ds(&dsarg, target_su.buf);
4550 if (err)
4551 return err;
4552 arg.buf = &dsarg;
4553 ret = get_errno(semctl(semid, semnum, cmd, arg));
4554 err = host_to_target_semid_ds(target_su.buf, &dsarg);
4555 if (err)
4556 return err;
4557 break;
4558 case IPC_INFO:
4559 case SEM_INFO:
4560 arg.__buf = &seminfo;
4561 ret = get_errno(semctl(semid, semnum, cmd, arg));
4562 err = host_to_target_seminfo(target_su.__buf, &seminfo);
4563 if (err)
4564 return err;
4565 break;
4566 case IPC_RMID:
4567 case GETPID:
4568 case GETNCNT:
4569 case GETZCNT:
4570 ret = get_errno(semctl(semid, semnum, cmd, NULL));
4571 break;
4574 return ret;
4577 struct target_sembuf {
4578 unsigned short sem_num;
4579 short sem_op;
4580 short sem_flg;
4583 static inline abi_long target_to_host_sembuf(struct sembuf *host_sembuf,
4584 abi_ulong target_addr,
4585 unsigned nsops)
4587 struct target_sembuf *target_sembuf;
4588 int i;
4590 target_sembuf = lock_user(VERIFY_READ, target_addr,
4591 nsops*sizeof(struct target_sembuf), 1);
4592 if (!target_sembuf)
4593 return -TARGET_EFAULT;
4595 for(i=0; i<nsops; i++) {
4596 __get_user(host_sembuf[i].sem_num, &target_sembuf[i].sem_num);
4597 __get_user(host_sembuf[i].sem_op, &target_sembuf[i].sem_op);
4598 __get_user(host_sembuf[i].sem_flg, &target_sembuf[i].sem_flg);
4601 unlock_user(target_sembuf, target_addr, 0);
4603 return 0;
4606 static inline abi_long do_semop(int semid, abi_long ptr, unsigned nsops)
4608 struct sembuf sops[nsops];
4610 if (target_to_host_sembuf(sops, ptr, nsops))
4611 return -TARGET_EFAULT;
4613 return get_errno(safe_semtimedop(semid, sops, nsops, NULL));
4616 struct target_msqid_ds
4618 struct target_ipc_perm msg_perm;
4619 abi_ulong msg_stime;
4620 #if TARGET_ABI_BITS == 32
4621 abi_ulong __unused1;
4622 #endif
4623 abi_ulong msg_rtime;
4624 #if TARGET_ABI_BITS == 32
4625 abi_ulong __unused2;
4626 #endif
4627 abi_ulong msg_ctime;
4628 #if TARGET_ABI_BITS == 32
4629 abi_ulong __unused3;
4630 #endif
4631 abi_ulong __msg_cbytes;
4632 abi_ulong msg_qnum;
4633 abi_ulong msg_qbytes;
4634 abi_ulong msg_lspid;
4635 abi_ulong msg_lrpid;
4636 abi_ulong __unused4;
4637 abi_ulong __unused5;
4640 static inline abi_long target_to_host_msqid_ds(struct msqid_ds *host_md,
4641 abi_ulong target_addr)
4643 struct target_msqid_ds *target_md;
4645 if (!lock_user_struct(VERIFY_READ, target_md, target_addr, 1))
4646 return -TARGET_EFAULT;
4647 if (target_to_host_ipc_perm(&(host_md->msg_perm),target_addr))
4648 return -TARGET_EFAULT;
4649 host_md->msg_stime = tswapal(target_md->msg_stime);
4650 host_md->msg_rtime = tswapal(target_md->msg_rtime);
4651 host_md->msg_ctime = tswapal(target_md->msg_ctime);
4652 host_md->__msg_cbytes = tswapal(target_md->__msg_cbytes);
4653 host_md->msg_qnum = tswapal(target_md->msg_qnum);
4654 host_md->msg_qbytes = tswapal(target_md->msg_qbytes);
4655 host_md->msg_lspid = tswapal(target_md->msg_lspid);
4656 host_md->msg_lrpid = tswapal(target_md->msg_lrpid);
4657 unlock_user_struct(target_md, target_addr, 0);
4658 return 0;
4661 static inline abi_long host_to_target_msqid_ds(abi_ulong target_addr,
4662 struct msqid_ds *host_md)
4664 struct target_msqid_ds *target_md;
4666 if (!lock_user_struct(VERIFY_WRITE, target_md, target_addr, 0))
4667 return -TARGET_EFAULT;
4668 if (host_to_target_ipc_perm(target_addr,&(host_md->msg_perm)))
4669 return -TARGET_EFAULT;
4670 target_md->msg_stime = tswapal(host_md->msg_stime);
4671 target_md->msg_rtime = tswapal(host_md->msg_rtime);
4672 target_md->msg_ctime = tswapal(host_md->msg_ctime);
4673 target_md->__msg_cbytes = tswapal(host_md->__msg_cbytes);
4674 target_md->msg_qnum = tswapal(host_md->msg_qnum);
4675 target_md->msg_qbytes = tswapal(host_md->msg_qbytes);
4676 target_md->msg_lspid = tswapal(host_md->msg_lspid);
4677 target_md->msg_lrpid = tswapal(host_md->msg_lrpid);
4678 unlock_user_struct(target_md, target_addr, 1);
4679 return 0;
4682 struct target_msginfo {
4683 int msgpool;
4684 int msgmap;
4685 int msgmax;
4686 int msgmnb;
4687 int msgmni;
4688 int msgssz;
4689 int msgtql;
4690 unsigned short int msgseg;
4693 static inline abi_long host_to_target_msginfo(abi_ulong target_addr,
4694 struct msginfo *host_msginfo)
4696 struct target_msginfo *target_msginfo;
4697 if (!lock_user_struct(VERIFY_WRITE, target_msginfo, target_addr, 0))
4698 return -TARGET_EFAULT;
4699 __put_user(host_msginfo->msgpool, &target_msginfo->msgpool);
4700 __put_user(host_msginfo->msgmap, &target_msginfo->msgmap);
4701 __put_user(host_msginfo->msgmax, &target_msginfo->msgmax);
4702 __put_user(host_msginfo->msgmnb, &target_msginfo->msgmnb);
4703 __put_user(host_msginfo->msgmni, &target_msginfo->msgmni);
4704 __put_user(host_msginfo->msgssz, &target_msginfo->msgssz);
4705 __put_user(host_msginfo->msgtql, &target_msginfo->msgtql);
4706 __put_user(host_msginfo->msgseg, &target_msginfo->msgseg);
4707 unlock_user_struct(target_msginfo, target_addr, 1);
4708 return 0;
4711 static inline abi_long do_msgctl(int msgid, int cmd, abi_long ptr)
4713 struct msqid_ds dsarg;
4714 struct msginfo msginfo;
4715 abi_long ret = -TARGET_EINVAL;
4717 cmd &= 0xff;
4719 switch (cmd) {
4720 case IPC_STAT:
4721 case IPC_SET:
4722 case MSG_STAT:
4723 if (target_to_host_msqid_ds(&dsarg,ptr))
4724 return -TARGET_EFAULT;
4725 ret = get_errno(msgctl(msgid, cmd, &dsarg));
4726 if (host_to_target_msqid_ds(ptr,&dsarg))
4727 return -TARGET_EFAULT;
4728 break;
4729 case IPC_RMID:
4730 ret = get_errno(msgctl(msgid, cmd, NULL));
4731 break;
4732 case IPC_INFO:
4733 case MSG_INFO:
4734 ret = get_errno(msgctl(msgid, cmd, (struct msqid_ds *)&msginfo));
4735 if (host_to_target_msginfo(ptr, &msginfo))
4736 return -TARGET_EFAULT;
4737 break;
4740 return ret;
4743 struct target_msgbuf {
4744 abi_long mtype;
4745 char mtext[1];
4748 static inline abi_long do_msgsnd(int msqid, abi_long msgp,
4749 ssize_t msgsz, int msgflg)
4751 struct target_msgbuf *target_mb;
4752 struct msgbuf *host_mb;
4753 abi_long ret = 0;
4755 if (msgsz < 0) {
4756 return -TARGET_EINVAL;
4759 if (!lock_user_struct(VERIFY_READ, target_mb, msgp, 0))
4760 return -TARGET_EFAULT;
4761 host_mb = g_try_malloc(msgsz + sizeof(long));
4762 if (!host_mb) {
4763 unlock_user_struct(target_mb, msgp, 0);
4764 return -TARGET_ENOMEM;
4766 host_mb->mtype = (abi_long) tswapal(target_mb->mtype);
4767 memcpy(host_mb->mtext, target_mb->mtext, msgsz);
4768 ret = get_errno(safe_msgsnd(msqid, host_mb, msgsz, msgflg));
4769 g_free(host_mb);
4770 unlock_user_struct(target_mb, msgp, 0);
4772 return ret;
4775 static inline abi_long do_msgrcv(int msqid, abi_long msgp,
4776 ssize_t msgsz, abi_long msgtyp,
4777 int msgflg)
4779 struct target_msgbuf *target_mb;
4780 char *target_mtext;
4781 struct msgbuf *host_mb;
4782 abi_long ret = 0;
4784 if (msgsz < 0) {
4785 return -TARGET_EINVAL;
4788 if (!lock_user_struct(VERIFY_WRITE, target_mb, msgp, 0))
4789 return -TARGET_EFAULT;
4791 host_mb = g_try_malloc(msgsz + sizeof(long));
4792 if (!host_mb) {
4793 ret = -TARGET_ENOMEM;
4794 goto end;
4796 ret = get_errno(safe_msgrcv(msqid, host_mb, msgsz, msgtyp, msgflg));
4798 if (ret > 0) {
4799 abi_ulong target_mtext_addr = msgp + sizeof(abi_ulong);
4800 target_mtext = lock_user(VERIFY_WRITE, target_mtext_addr, ret, 0);
4801 if (!target_mtext) {
4802 ret = -TARGET_EFAULT;
4803 goto end;
4805 memcpy(target_mb->mtext, host_mb->mtext, ret);
4806 unlock_user(target_mtext, target_mtext_addr, ret);
4809 target_mb->mtype = tswapal(host_mb->mtype);
4811 end:
4812 if (target_mb)
4813 unlock_user_struct(target_mb, msgp, 1);
4814 g_free(host_mb);
4815 return ret;
4818 static inline abi_long target_to_host_shmid_ds(struct shmid_ds *host_sd,
4819 abi_ulong target_addr)
4821 struct target_shmid_ds *target_sd;
4823 if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
4824 return -TARGET_EFAULT;
4825 if (target_to_host_ipc_perm(&(host_sd->shm_perm), target_addr))
4826 return -TARGET_EFAULT;
4827 __get_user(host_sd->shm_segsz, &target_sd->shm_segsz);
4828 __get_user(host_sd->shm_atime, &target_sd->shm_atime);
4829 __get_user(host_sd->shm_dtime, &target_sd->shm_dtime);
4830 __get_user(host_sd->shm_ctime, &target_sd->shm_ctime);
4831 __get_user(host_sd->shm_cpid, &target_sd->shm_cpid);
4832 __get_user(host_sd->shm_lpid, &target_sd->shm_lpid);
4833 __get_user(host_sd->shm_nattch, &target_sd->shm_nattch);
4834 unlock_user_struct(target_sd, target_addr, 0);
4835 return 0;
4838 static inline abi_long host_to_target_shmid_ds(abi_ulong target_addr,
4839 struct shmid_ds *host_sd)
4841 struct target_shmid_ds *target_sd;
4843 if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
4844 return -TARGET_EFAULT;
4845 if (host_to_target_ipc_perm(target_addr, &(host_sd->shm_perm)))
4846 return -TARGET_EFAULT;
4847 __put_user(host_sd->shm_segsz, &target_sd->shm_segsz);
4848 __put_user(host_sd->shm_atime, &target_sd->shm_atime);
4849 __put_user(host_sd->shm_dtime, &target_sd->shm_dtime);
4850 __put_user(host_sd->shm_ctime, &target_sd->shm_ctime);
4851 __put_user(host_sd->shm_cpid, &target_sd->shm_cpid);
4852 __put_user(host_sd->shm_lpid, &target_sd->shm_lpid);
4853 __put_user(host_sd->shm_nattch, &target_sd->shm_nattch);
4854 unlock_user_struct(target_sd, target_addr, 1);
4855 return 0;
4858 struct target_shminfo {
4859 abi_ulong shmmax;
4860 abi_ulong shmmin;
4861 abi_ulong shmmni;
4862 abi_ulong shmseg;
4863 abi_ulong shmall;
4866 static inline abi_long host_to_target_shminfo(abi_ulong target_addr,
4867 struct shminfo *host_shminfo)
4869 struct target_shminfo *target_shminfo;
4870 if (!lock_user_struct(VERIFY_WRITE, target_shminfo, target_addr, 0))
4871 return -TARGET_EFAULT;
4872 __put_user(host_shminfo->shmmax, &target_shminfo->shmmax);
4873 __put_user(host_shminfo->shmmin, &target_shminfo->shmmin);
4874 __put_user(host_shminfo->shmmni, &target_shminfo->shmmni);
4875 __put_user(host_shminfo->shmseg, &target_shminfo->shmseg);
4876 __put_user(host_shminfo->shmall, &target_shminfo->shmall);
4877 unlock_user_struct(target_shminfo, target_addr, 1);
4878 return 0;
4881 struct target_shm_info {
4882 int used_ids;
4883 abi_ulong shm_tot;
4884 abi_ulong shm_rss;
4885 abi_ulong shm_swp;
4886 abi_ulong swap_attempts;
4887 abi_ulong swap_successes;
4890 static inline abi_long host_to_target_shm_info(abi_ulong target_addr,
4891 struct shm_info *host_shm_info)
4893 struct target_shm_info *target_shm_info;
4894 if (!lock_user_struct(VERIFY_WRITE, target_shm_info, target_addr, 0))
4895 return -TARGET_EFAULT;
4896 __put_user(host_shm_info->used_ids, &target_shm_info->used_ids);
4897 __put_user(host_shm_info->shm_tot, &target_shm_info->shm_tot);
4898 __put_user(host_shm_info->shm_rss, &target_shm_info->shm_rss);
4899 __put_user(host_shm_info->shm_swp, &target_shm_info->shm_swp);
4900 __put_user(host_shm_info->swap_attempts, &target_shm_info->swap_attempts);
4901 __put_user(host_shm_info->swap_successes, &target_shm_info->swap_successes);
4902 unlock_user_struct(target_shm_info, target_addr, 1);
4903 return 0;
4906 static inline abi_long do_shmctl(int shmid, int cmd, abi_long buf)
4908 struct shmid_ds dsarg;
4909 struct shminfo shminfo;
4910 struct shm_info shm_info;
4911 abi_long ret = -TARGET_EINVAL;
4913 cmd &= 0xff;
4915 switch(cmd) {
4916 case IPC_STAT:
4917 case IPC_SET:
4918 case SHM_STAT:
4919 if (target_to_host_shmid_ds(&dsarg, buf))
4920 return -TARGET_EFAULT;
4921 ret = get_errno(shmctl(shmid, cmd, &dsarg));
4922 if (host_to_target_shmid_ds(buf, &dsarg))
4923 return -TARGET_EFAULT;
4924 break;
4925 case IPC_INFO:
4926 ret = get_errno(shmctl(shmid, cmd, (struct shmid_ds *)&shminfo));
4927 if (host_to_target_shminfo(buf, &shminfo))
4928 return -TARGET_EFAULT;
4929 break;
4930 case SHM_INFO:
4931 ret = get_errno(shmctl(shmid, cmd, (struct shmid_ds *)&shm_info));
4932 if (host_to_target_shm_info(buf, &shm_info))
4933 return -TARGET_EFAULT;
4934 break;
4935 case IPC_RMID:
4936 case SHM_LOCK:
4937 case SHM_UNLOCK:
4938 ret = get_errno(shmctl(shmid, cmd, NULL));
4939 break;
4942 return ret;
4945 #ifndef TARGET_FORCE_SHMLBA
4946 /* For most architectures, SHMLBA is the same as the page size;
4947 * some architectures have larger values, in which case they should
4948 * define TARGET_FORCE_SHMLBA and provide a target_shmlba() function.
4949 * This corresponds to the kernel arch code defining __ARCH_FORCE_SHMLBA
4950 * and defining its own value for SHMLBA.
4952 * The kernel also permits SHMLBA to be set by the architecture to a
4953 * value larger than the page size without setting __ARCH_FORCE_SHMLBA;
4954 * this means that addresses are rounded to the large size if
4955 * SHM_RND is set but addresses not aligned to that size are not rejected
4956 * as long as they are at least page-aligned. Since the only architecture
4957 * which uses this is ia64 this code doesn't provide for that oddity.
4959 static inline abi_ulong target_shmlba(CPUArchState *cpu_env)
4961 return TARGET_PAGE_SIZE;
4963 #endif
4965 static inline abi_ulong do_shmat(CPUArchState *cpu_env,
4966 int shmid, abi_ulong shmaddr, int shmflg)
4968 abi_long raddr;
4969 void *host_raddr;
4970 struct shmid_ds shm_info;
4971 int i,ret;
4972 abi_ulong shmlba;
4974 /* find out the length of the shared memory segment */
4975 ret = get_errno(shmctl(shmid, IPC_STAT, &shm_info));
4976 if (is_error(ret)) {
4977 /* can't get length, bail out */
4978 return ret;
4981 shmlba = target_shmlba(cpu_env);
4983 if (shmaddr & (shmlba - 1)) {
4984 if (shmflg & SHM_RND) {
4985 shmaddr &= ~(shmlba - 1);
4986 } else {
4987 return -TARGET_EINVAL;
4990 if (!guest_range_valid(shmaddr, shm_info.shm_segsz)) {
4991 return -TARGET_EINVAL;
4994 mmap_lock();
4996 if (shmaddr)
4997 host_raddr = shmat(shmid, (void *)g2h(shmaddr), shmflg);
4998 else {
4999 abi_ulong mmap_start;
5001 mmap_start = mmap_find_vma(0, shm_info.shm_segsz);
5003 if (mmap_start == -1) {
5004 errno = ENOMEM;
5005 host_raddr = (void *)-1;
5006 } else
5007 host_raddr = shmat(shmid, g2h(mmap_start), shmflg | SHM_REMAP);
5010 if (host_raddr == (void *)-1) {
5011 mmap_unlock();
5012 return get_errno((long)host_raddr);
5014 raddr=h2g((unsigned long)host_raddr);
5016 page_set_flags(raddr, raddr + shm_info.shm_segsz,
5017 PAGE_VALID | PAGE_READ |
5018 ((shmflg & SHM_RDONLY)? 0 : PAGE_WRITE));
5020 for (i = 0; i < N_SHM_REGIONS; i++) {
5021 if (!shm_regions[i].in_use) {
5022 shm_regions[i].in_use = true;
5023 shm_regions[i].start = raddr;
5024 shm_regions[i].size = shm_info.shm_segsz;
5025 break;
5029 mmap_unlock();
5030 return raddr;
5034 static inline abi_long do_shmdt(abi_ulong shmaddr)
5036 int i;
5037 abi_long rv;
5039 mmap_lock();
5041 for (i = 0; i < N_SHM_REGIONS; ++i) {
5042 if (shm_regions[i].in_use && shm_regions[i].start == shmaddr) {
5043 shm_regions[i].in_use = false;
5044 page_set_flags(shmaddr, shmaddr + shm_regions[i].size, 0);
5045 break;
5048 rv = get_errno(shmdt(g2h(shmaddr)));
5050 mmap_unlock();
5052 return rv;
5055 #ifdef TARGET_NR_ipc
5056 /* ??? This only works with linear mappings. */
5057 /* do_ipc() must return target values and target errnos. */
5058 static abi_long do_ipc(CPUArchState *cpu_env,
5059 unsigned int call, abi_long first,
5060 abi_long second, abi_long third,
5061 abi_long ptr, abi_long fifth)
5063 int version;
5064 abi_long ret = 0;
5066 version = call >> 16;
5067 call &= 0xffff;
5069 switch (call) {
5070 case IPCOP_semop:
5071 ret = do_semop(first, ptr, second);
5072 break;
5074 case IPCOP_semget:
5075 ret = get_errno(semget(first, second, third));
5076 break;
5078 case IPCOP_semctl: {
5079 /* The semun argument to semctl is passed by value, so dereference the
5080 * ptr argument. */
5081 abi_ulong atptr;
5082 get_user_ual(atptr, ptr);
5083 ret = do_semctl(first, second, third, atptr);
5084 break;
5087 case IPCOP_msgget:
5088 ret = get_errno(msgget(first, second));
5089 break;
5091 case IPCOP_msgsnd:
5092 ret = do_msgsnd(first, ptr, second, third);
5093 break;
5095 case IPCOP_msgctl:
5096 ret = do_msgctl(first, second, ptr);
5097 break;
5099 case IPCOP_msgrcv:
5100 switch (version) {
5101 case 0:
5103 struct target_ipc_kludge {
5104 abi_long msgp;
5105 abi_long msgtyp;
5106 } *tmp;
5108 if (!lock_user_struct(VERIFY_READ, tmp, ptr, 1)) {
5109 ret = -TARGET_EFAULT;
5110 break;
5113 ret = do_msgrcv(first, tswapal(tmp->msgp), second, tswapal(tmp->msgtyp), third);
5115 unlock_user_struct(tmp, ptr, 0);
5116 break;
5118 default:
5119 ret = do_msgrcv(first, ptr, second, fifth, third);
5121 break;
5123 case IPCOP_shmat:
5124 switch (version) {
5125 default:
5127 abi_ulong raddr;
5128 raddr = do_shmat(cpu_env, first, ptr, second);
5129 if (is_error(raddr))
5130 return get_errno(raddr);
5131 if (put_user_ual(raddr, third))
5132 return -TARGET_EFAULT;
5133 break;
5135 case 1:
5136 ret = -TARGET_EINVAL;
5137 break;
5139 break;
5140 case IPCOP_shmdt:
5141 ret = do_shmdt(ptr);
5142 break;
5144 case IPCOP_shmget:
5145 /* IPC_* flag values are the same on all linux platforms */
5146 ret = get_errno(shmget(first, second, third));
5147 break;
5149 /* IPC_* and SHM_* command values are the same on all linux platforms */
5150 case IPCOP_shmctl:
5151 ret = do_shmctl(first, second, ptr);
5152 break;
5153 default:
5154 gemu_log("Unsupported ipc call: %d (version %d)\n", call, version);
5155 ret = -TARGET_ENOSYS;
5156 break;
5158 return ret;
5160 #endif
5162 /* kernel structure types definitions */
5164 #define STRUCT(name, ...) STRUCT_ ## name,
5165 #define STRUCT_SPECIAL(name) STRUCT_ ## name,
5166 enum {
5167 #include "syscall_types.h"
5168 STRUCT_MAX
5170 #undef STRUCT
5171 #undef STRUCT_SPECIAL
5173 #define STRUCT(name, ...) static const argtype struct_ ## name ## _def[] = { __VA_ARGS__, TYPE_NULL };
5174 #define STRUCT_SPECIAL(name)
5175 #include "syscall_types.h"
5176 #undef STRUCT
5177 #undef STRUCT_SPECIAL
5179 typedef struct IOCTLEntry IOCTLEntry;
5181 typedef abi_long do_ioctl_fn(const IOCTLEntry *ie, uint8_t *buf_temp,
5182 int fd, int cmd, abi_long arg);
5184 struct IOCTLEntry {
5185 int target_cmd;
5186 unsigned int host_cmd;
5187 const char *name;
5188 int access;
5189 do_ioctl_fn *do_ioctl;
5190 const argtype arg_type[5];
5193 #define IOC_R 0x0001
5194 #define IOC_W 0x0002
5195 #define IOC_RW (IOC_R | IOC_W)
5197 #define MAX_STRUCT_SIZE 4096
5199 #ifdef CONFIG_FIEMAP
5200 /* So fiemap access checks don't overflow on 32 bit systems.
5201 * This is very slightly smaller than the limit imposed by
5202 * the underlying kernel.
5204 #define FIEMAP_MAX_EXTENTS ((UINT_MAX - sizeof(struct fiemap)) \
5205 / sizeof(struct fiemap_extent))
5207 static abi_long do_ioctl_fs_ioc_fiemap(const IOCTLEntry *ie, uint8_t *buf_temp,
5208 int fd, int cmd, abi_long arg)
5210 /* The parameter for this ioctl is a struct fiemap followed
5211 * by an array of struct fiemap_extent whose size is set
5212 * in fiemap->fm_extent_count. The array is filled in by the
5213 * ioctl.
5215 int target_size_in, target_size_out;
5216 struct fiemap *fm;
5217 const argtype *arg_type = ie->arg_type;
5218 const argtype extent_arg_type[] = { MK_STRUCT(STRUCT_fiemap_extent) };
5219 void *argptr, *p;
5220 abi_long ret;
5221 int i, extent_size = thunk_type_size(extent_arg_type, 0);
5222 uint32_t outbufsz;
5223 int free_fm = 0;
5225 assert(arg_type[0] == TYPE_PTR);
5226 assert(ie->access == IOC_RW);
5227 arg_type++;
5228 target_size_in = thunk_type_size(arg_type, 0);
5229 argptr = lock_user(VERIFY_READ, arg, target_size_in, 1);
5230 if (!argptr) {
5231 return -TARGET_EFAULT;
5233 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
5234 unlock_user(argptr, arg, 0);
5235 fm = (struct fiemap *)buf_temp;
5236 if (fm->fm_extent_count > FIEMAP_MAX_EXTENTS) {
5237 return -TARGET_EINVAL;
5240 outbufsz = sizeof (*fm) +
5241 (sizeof(struct fiemap_extent) * fm->fm_extent_count);
5243 if (outbufsz > MAX_STRUCT_SIZE) {
5244 /* We can't fit all the extents into the fixed size buffer.
5245 * Allocate one that is large enough and use it instead.
5247 fm = g_try_malloc(outbufsz);
5248 if (!fm) {
5249 return -TARGET_ENOMEM;
5251 memcpy(fm, buf_temp, sizeof(struct fiemap));
5252 free_fm = 1;
5254 ret = get_errno(safe_ioctl(fd, ie->host_cmd, fm));
5255 if (!is_error(ret)) {
5256 target_size_out = target_size_in;
5257 /* An extent_count of 0 means we were only counting the extents
5258 * so there are no structs to copy
5260 if (fm->fm_extent_count != 0) {
5261 target_size_out += fm->fm_mapped_extents * extent_size;
5263 argptr = lock_user(VERIFY_WRITE, arg, target_size_out, 0);
5264 if (!argptr) {
5265 ret = -TARGET_EFAULT;
5266 } else {
5267 /* Convert the struct fiemap */
5268 thunk_convert(argptr, fm, arg_type, THUNK_TARGET);
5269 if (fm->fm_extent_count != 0) {
5270 p = argptr + target_size_in;
5271 /* ...and then all the struct fiemap_extents */
5272 for (i = 0; i < fm->fm_mapped_extents; i++) {
5273 thunk_convert(p, &fm->fm_extents[i], extent_arg_type,
5274 THUNK_TARGET);
5275 p += extent_size;
5278 unlock_user(argptr, arg, target_size_out);
5281 if (free_fm) {
5282 g_free(fm);
5284 return ret;
5286 #endif
5288 static abi_long do_ioctl_ifconf(const IOCTLEntry *ie, uint8_t *buf_temp,
5289 int fd, int cmd, abi_long arg)
5291 const argtype *arg_type = ie->arg_type;
5292 int target_size;
5293 void *argptr;
5294 int ret;
5295 struct ifconf *host_ifconf;
5296 uint32_t outbufsz;
5297 const argtype ifreq_arg_type[] = { MK_STRUCT(STRUCT_sockaddr_ifreq) };
5298 int target_ifreq_size;
5299 int nb_ifreq;
5300 int free_buf = 0;
5301 int i;
5302 int target_ifc_len;
5303 abi_long target_ifc_buf;
5304 int host_ifc_len;
5305 char *host_ifc_buf;
5307 assert(arg_type[0] == TYPE_PTR);
5308 assert(ie->access == IOC_RW);
5310 arg_type++;
5311 target_size = thunk_type_size(arg_type, 0);
5313 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5314 if (!argptr)
5315 return -TARGET_EFAULT;
5316 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
5317 unlock_user(argptr, arg, 0);
5319 host_ifconf = (struct ifconf *)(unsigned long)buf_temp;
5320 target_ifc_len = host_ifconf->ifc_len;
5321 target_ifc_buf = (abi_long)(unsigned long)host_ifconf->ifc_buf;
5323 target_ifreq_size = thunk_type_size(ifreq_arg_type, 0);
5324 nb_ifreq = target_ifc_len / target_ifreq_size;
5325 host_ifc_len = nb_ifreq * sizeof(struct ifreq);
5327 outbufsz = sizeof(*host_ifconf) + host_ifc_len;
5328 if (outbufsz > MAX_STRUCT_SIZE) {
5329 /* We can't fit all the extents into the fixed size buffer.
5330 * Allocate one that is large enough and use it instead.
5332 host_ifconf = malloc(outbufsz);
5333 if (!host_ifconf) {
5334 return -TARGET_ENOMEM;
5336 memcpy(host_ifconf, buf_temp, sizeof(*host_ifconf));
5337 free_buf = 1;
5339 host_ifc_buf = (char*)host_ifconf + sizeof(*host_ifconf);
5341 host_ifconf->ifc_len = host_ifc_len;
5342 host_ifconf->ifc_buf = host_ifc_buf;
5344 ret = get_errno(safe_ioctl(fd, ie->host_cmd, host_ifconf));
5345 if (!is_error(ret)) {
5346 /* convert host ifc_len to target ifc_len */
5348 nb_ifreq = host_ifconf->ifc_len / sizeof(struct ifreq);
5349 target_ifc_len = nb_ifreq * target_ifreq_size;
5350 host_ifconf->ifc_len = target_ifc_len;
5352 /* restore target ifc_buf */
5354 host_ifconf->ifc_buf = (char *)(unsigned long)target_ifc_buf;
5356 /* copy struct ifconf to target user */
5358 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
5359 if (!argptr)
5360 return -TARGET_EFAULT;
5361 thunk_convert(argptr, host_ifconf, arg_type, THUNK_TARGET);
5362 unlock_user(argptr, arg, target_size);
5364 /* copy ifreq[] to target user */
5366 argptr = lock_user(VERIFY_WRITE, target_ifc_buf, target_ifc_len, 0);
5367 for (i = 0; i < nb_ifreq ; i++) {
5368 thunk_convert(argptr + i * target_ifreq_size,
5369 host_ifc_buf + i * sizeof(struct ifreq),
5370 ifreq_arg_type, THUNK_TARGET);
5372 unlock_user(argptr, target_ifc_buf, target_ifc_len);
5375 if (free_buf) {
5376 free(host_ifconf);
5379 return ret;
5382 static abi_long do_ioctl_dm(const IOCTLEntry *ie, uint8_t *buf_temp, int fd,
5383 int cmd, abi_long arg)
5385 void *argptr;
5386 struct dm_ioctl *host_dm;
5387 abi_long guest_data;
5388 uint32_t guest_data_size;
5389 int target_size;
5390 const argtype *arg_type = ie->arg_type;
5391 abi_long ret;
5392 void *big_buf = NULL;
5393 char *host_data;
5395 arg_type++;
5396 target_size = thunk_type_size(arg_type, 0);
5397 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5398 if (!argptr) {
5399 ret = -TARGET_EFAULT;
5400 goto out;
5402 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
5403 unlock_user(argptr, arg, 0);
5405 /* buf_temp is too small, so fetch things into a bigger buffer */
5406 big_buf = g_malloc0(((struct dm_ioctl*)buf_temp)->data_size * 2);
5407 memcpy(big_buf, buf_temp, target_size);
5408 buf_temp = big_buf;
5409 host_dm = big_buf;
5411 guest_data = arg + host_dm->data_start;
5412 if ((guest_data - arg) < 0) {
5413 ret = -TARGET_EINVAL;
5414 goto out;
5416 guest_data_size = host_dm->data_size - host_dm->data_start;
5417 host_data = (char*)host_dm + host_dm->data_start;
5419 argptr = lock_user(VERIFY_READ, guest_data, guest_data_size, 1);
5420 if (!argptr) {
5421 ret = -TARGET_EFAULT;
5422 goto out;
5425 switch (ie->host_cmd) {
5426 case DM_REMOVE_ALL:
5427 case DM_LIST_DEVICES:
5428 case DM_DEV_CREATE:
5429 case DM_DEV_REMOVE:
5430 case DM_DEV_SUSPEND:
5431 case DM_DEV_STATUS:
5432 case DM_DEV_WAIT:
5433 case DM_TABLE_STATUS:
5434 case DM_TABLE_CLEAR:
5435 case DM_TABLE_DEPS:
5436 case DM_LIST_VERSIONS:
5437 /* no input data */
5438 break;
5439 case DM_DEV_RENAME:
5440 case DM_DEV_SET_GEOMETRY:
5441 /* data contains only strings */
5442 memcpy(host_data, argptr, guest_data_size);
5443 break;
5444 case DM_TARGET_MSG:
5445 memcpy(host_data, argptr, guest_data_size);
5446 *(uint64_t*)host_data = tswap64(*(uint64_t*)argptr);
5447 break;
5448 case DM_TABLE_LOAD:
5450 void *gspec = argptr;
5451 void *cur_data = host_data;
5452 const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_spec) };
5453 int spec_size = thunk_type_size(arg_type, 0);
5454 int i;
5456 for (i = 0; i < host_dm->target_count; i++) {
5457 struct dm_target_spec *spec = cur_data;
5458 uint32_t next;
5459 int slen;
5461 thunk_convert(spec, gspec, arg_type, THUNK_HOST);
5462 slen = strlen((char*)gspec + spec_size) + 1;
5463 next = spec->next;
5464 spec->next = sizeof(*spec) + slen;
5465 strcpy((char*)&spec[1], gspec + spec_size);
5466 gspec += next;
5467 cur_data += spec->next;
5469 break;
5471 default:
5472 ret = -TARGET_EINVAL;
5473 unlock_user(argptr, guest_data, 0);
5474 goto out;
5476 unlock_user(argptr, guest_data, 0);
5478 ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5479 if (!is_error(ret)) {
5480 guest_data = arg + host_dm->data_start;
5481 guest_data_size = host_dm->data_size - host_dm->data_start;
5482 argptr = lock_user(VERIFY_WRITE, guest_data, guest_data_size, 0);
5483 switch (ie->host_cmd) {
5484 case DM_REMOVE_ALL:
5485 case DM_DEV_CREATE:
5486 case DM_DEV_REMOVE:
5487 case DM_DEV_RENAME:
5488 case DM_DEV_SUSPEND:
5489 case DM_DEV_STATUS:
5490 case DM_TABLE_LOAD:
5491 case DM_TABLE_CLEAR:
5492 case DM_TARGET_MSG:
5493 case DM_DEV_SET_GEOMETRY:
5494 /* no return data */
5495 break;
5496 case DM_LIST_DEVICES:
5498 struct dm_name_list *nl = (void*)host_dm + host_dm->data_start;
5499 uint32_t remaining_data = guest_data_size;
5500 void *cur_data = argptr;
5501 const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_name_list) };
5502 int nl_size = 12; /* can't use thunk_size due to alignment */
5504 while (1) {
5505 uint32_t next = nl->next;
5506 if (next) {
5507 nl->next = nl_size + (strlen(nl->name) + 1);
5509 if (remaining_data < nl->next) {
5510 host_dm->flags |= DM_BUFFER_FULL_FLAG;
5511 break;
5513 thunk_convert(cur_data, nl, arg_type, THUNK_TARGET);
5514 strcpy(cur_data + nl_size, nl->name);
5515 cur_data += nl->next;
5516 remaining_data -= nl->next;
5517 if (!next) {
5518 break;
5520 nl = (void*)nl + next;
5522 break;
5524 case DM_DEV_WAIT:
5525 case DM_TABLE_STATUS:
5527 struct dm_target_spec *spec = (void*)host_dm + host_dm->data_start;
5528 void *cur_data = argptr;
5529 const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_spec) };
5530 int spec_size = thunk_type_size(arg_type, 0);
5531 int i;
5533 for (i = 0; i < host_dm->target_count; i++) {
5534 uint32_t next = spec->next;
5535 int slen = strlen((char*)&spec[1]) + 1;
5536 spec->next = (cur_data - argptr) + spec_size + slen;
5537 if (guest_data_size < spec->next) {
5538 host_dm->flags |= DM_BUFFER_FULL_FLAG;
5539 break;
5541 thunk_convert(cur_data, spec, arg_type, THUNK_TARGET);
5542 strcpy(cur_data + spec_size, (char*)&spec[1]);
5543 cur_data = argptr + spec->next;
5544 spec = (void*)host_dm + host_dm->data_start + next;
5546 break;
5548 case DM_TABLE_DEPS:
5550 void *hdata = (void*)host_dm + host_dm->data_start;
5551 int count = *(uint32_t*)hdata;
5552 uint64_t *hdev = hdata + 8;
5553 uint64_t *gdev = argptr + 8;
5554 int i;
5556 *(uint32_t*)argptr = tswap32(count);
5557 for (i = 0; i < count; i++) {
5558 *gdev = tswap64(*hdev);
5559 gdev++;
5560 hdev++;
5562 break;
5564 case DM_LIST_VERSIONS:
5566 struct dm_target_versions *vers = (void*)host_dm + host_dm->data_start;
5567 uint32_t remaining_data = guest_data_size;
5568 void *cur_data = argptr;
5569 const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_versions) };
5570 int vers_size = thunk_type_size(arg_type, 0);
5572 while (1) {
5573 uint32_t next = vers->next;
5574 if (next) {
5575 vers->next = vers_size + (strlen(vers->name) + 1);
5577 if (remaining_data < vers->next) {
5578 host_dm->flags |= DM_BUFFER_FULL_FLAG;
5579 break;
5581 thunk_convert(cur_data, vers, arg_type, THUNK_TARGET);
5582 strcpy(cur_data + vers_size, vers->name);
5583 cur_data += vers->next;
5584 remaining_data -= vers->next;
5585 if (!next) {
5586 break;
5588 vers = (void*)vers + next;
5590 break;
5592 default:
5593 unlock_user(argptr, guest_data, 0);
5594 ret = -TARGET_EINVAL;
5595 goto out;
5597 unlock_user(argptr, guest_data, guest_data_size);
5599 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
5600 if (!argptr) {
5601 ret = -TARGET_EFAULT;
5602 goto out;
5604 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
5605 unlock_user(argptr, arg, target_size);
5607 out:
5608 g_free(big_buf);
5609 return ret;
5612 static abi_long do_ioctl_blkpg(const IOCTLEntry *ie, uint8_t *buf_temp, int fd,
5613 int cmd, abi_long arg)
5615 void *argptr;
5616 int target_size;
5617 const argtype *arg_type = ie->arg_type;
5618 const argtype part_arg_type[] = { MK_STRUCT(STRUCT_blkpg_partition) };
5619 abi_long ret;
5621 struct blkpg_ioctl_arg *host_blkpg = (void*)buf_temp;
5622 struct blkpg_partition host_part;
5624 /* Read and convert blkpg */
5625 arg_type++;
5626 target_size = thunk_type_size(arg_type, 0);
5627 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5628 if (!argptr) {
5629 ret = -TARGET_EFAULT;
5630 goto out;
5632 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
5633 unlock_user(argptr, arg, 0);
5635 switch (host_blkpg->op) {
5636 case BLKPG_ADD_PARTITION:
5637 case BLKPG_DEL_PARTITION:
5638 /* payload is struct blkpg_partition */
5639 break;
5640 default:
5641 /* Unknown opcode */
5642 ret = -TARGET_EINVAL;
5643 goto out;
5646 /* Read and convert blkpg->data */
5647 arg = (abi_long)(uintptr_t)host_blkpg->data;
5648 target_size = thunk_type_size(part_arg_type, 0);
5649 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5650 if (!argptr) {
5651 ret = -TARGET_EFAULT;
5652 goto out;
5654 thunk_convert(&host_part, argptr, part_arg_type, THUNK_HOST);
5655 unlock_user(argptr, arg, 0);
5657 /* Swizzle the data pointer to our local copy and call! */
5658 host_blkpg->data = &host_part;
5659 ret = get_errno(safe_ioctl(fd, ie->host_cmd, host_blkpg));
5661 out:
5662 return ret;
5665 static abi_long do_ioctl_rt(const IOCTLEntry *ie, uint8_t *buf_temp,
5666 int fd, int cmd, abi_long arg)
5668 const argtype *arg_type = ie->arg_type;
5669 const StructEntry *se;
5670 const argtype *field_types;
5671 const int *dst_offsets, *src_offsets;
5672 int target_size;
5673 void *argptr;
5674 abi_ulong *target_rt_dev_ptr;
5675 unsigned long *host_rt_dev_ptr;
5676 abi_long ret;
5677 int i;
5679 assert(ie->access == IOC_W);
5680 assert(*arg_type == TYPE_PTR);
5681 arg_type++;
5682 assert(*arg_type == TYPE_STRUCT);
5683 target_size = thunk_type_size(arg_type, 0);
5684 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5685 if (!argptr) {
5686 return -TARGET_EFAULT;
5688 arg_type++;
5689 assert(*arg_type == (int)STRUCT_rtentry);
5690 se = struct_entries + *arg_type++;
5691 assert(se->convert[0] == NULL);
5692 /* convert struct here to be able to catch rt_dev string */
5693 field_types = se->field_types;
5694 dst_offsets = se->field_offsets[THUNK_HOST];
5695 src_offsets = se->field_offsets[THUNK_TARGET];
5696 for (i = 0; i < se->nb_fields; i++) {
5697 if (dst_offsets[i] == offsetof(struct rtentry, rt_dev)) {
5698 assert(*field_types == TYPE_PTRVOID);
5699 target_rt_dev_ptr = (abi_ulong *)(argptr + src_offsets[i]);
5700 host_rt_dev_ptr = (unsigned long *)(buf_temp + dst_offsets[i]);
5701 if (*target_rt_dev_ptr != 0) {
5702 *host_rt_dev_ptr = (unsigned long)lock_user_string(
5703 tswapal(*target_rt_dev_ptr));
5704 if (!*host_rt_dev_ptr) {
5705 unlock_user(argptr, arg, 0);
5706 return -TARGET_EFAULT;
5708 } else {
5709 *host_rt_dev_ptr = 0;
5711 field_types++;
5712 continue;
5714 field_types = thunk_convert(buf_temp + dst_offsets[i],
5715 argptr + src_offsets[i],
5716 field_types, THUNK_HOST);
5718 unlock_user(argptr, arg, 0);
5720 ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5721 if (*host_rt_dev_ptr != 0) {
5722 unlock_user((void *)*host_rt_dev_ptr,
5723 *target_rt_dev_ptr, 0);
5725 return ret;
5728 static abi_long do_ioctl_kdsigaccept(const IOCTLEntry *ie, uint8_t *buf_temp,
5729 int fd, int cmd, abi_long arg)
5731 int sig = target_to_host_signal(arg);
5732 return get_errno(safe_ioctl(fd, ie->host_cmd, sig));
5735 #ifdef TIOCGPTPEER
5736 static abi_long do_ioctl_tiocgptpeer(const IOCTLEntry *ie, uint8_t *buf_temp,
5737 int fd, int cmd, abi_long arg)
5739 int flags = target_to_host_bitmask(arg, fcntl_flags_tbl);
5740 return get_errno(safe_ioctl(fd, ie->host_cmd, flags));
5742 #endif
5744 static IOCTLEntry ioctl_entries[] = {
5745 #define IOCTL(cmd, access, ...) \
5746 { TARGET_ ## cmd, cmd, #cmd, access, 0, { __VA_ARGS__ } },
5747 #define IOCTL_SPECIAL(cmd, access, dofn, ...) \
5748 { TARGET_ ## cmd, cmd, #cmd, access, dofn, { __VA_ARGS__ } },
5749 #define IOCTL_IGNORE(cmd) \
5750 { TARGET_ ## cmd, 0, #cmd },
5751 #include "ioctls.h"
5752 { 0, 0, },
5755 /* ??? Implement proper locking for ioctls. */
5756 /* do_ioctl() Must return target values and target errnos. */
5757 static abi_long do_ioctl(int fd, int cmd, abi_long arg)
5759 const IOCTLEntry *ie;
5760 const argtype *arg_type;
5761 abi_long ret;
5762 uint8_t buf_temp[MAX_STRUCT_SIZE];
5763 int target_size;
5764 void *argptr;
5766 ie = ioctl_entries;
5767 for(;;) {
5768 if (ie->target_cmd == 0) {
5769 gemu_log("Unsupported ioctl: cmd=0x%04lx\n", (long)cmd);
5770 return -TARGET_ENOSYS;
5772 if (ie->target_cmd == cmd)
5773 break;
5774 ie++;
5776 arg_type = ie->arg_type;
5777 #if defined(DEBUG)
5778 gemu_log("ioctl: cmd=0x%04lx (%s)\n", (long)cmd, ie->name);
5779 #endif
5780 if (ie->do_ioctl) {
5781 return ie->do_ioctl(ie, buf_temp, fd, cmd, arg);
5782 } else if (!ie->host_cmd) {
5783 /* Some architectures define BSD ioctls in their headers
5784 that are not implemented in Linux. */
5785 return -TARGET_ENOSYS;
5788 switch(arg_type[0]) {
5789 case TYPE_NULL:
5790 /* no argument */
5791 ret = get_errno(safe_ioctl(fd, ie->host_cmd));
5792 break;
5793 case TYPE_PTRVOID:
5794 case TYPE_INT:
5795 ret = get_errno(safe_ioctl(fd, ie->host_cmd, arg));
5796 break;
5797 case TYPE_PTR:
5798 arg_type++;
5799 target_size = thunk_type_size(arg_type, 0);
5800 switch(ie->access) {
5801 case IOC_R:
5802 ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5803 if (!is_error(ret)) {
5804 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
5805 if (!argptr)
5806 return -TARGET_EFAULT;
5807 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
5808 unlock_user(argptr, arg, target_size);
5810 break;
5811 case IOC_W:
5812 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5813 if (!argptr)
5814 return -TARGET_EFAULT;
5815 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
5816 unlock_user(argptr, arg, 0);
5817 ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5818 break;
5819 default:
5820 case IOC_RW:
5821 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5822 if (!argptr)
5823 return -TARGET_EFAULT;
5824 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
5825 unlock_user(argptr, arg, 0);
5826 ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5827 if (!is_error(ret)) {
5828 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
5829 if (!argptr)
5830 return -TARGET_EFAULT;
5831 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
5832 unlock_user(argptr, arg, target_size);
5834 break;
5836 break;
5837 default:
5838 gemu_log("Unsupported ioctl type: cmd=0x%04lx type=%d\n",
5839 (long)cmd, arg_type[0]);
5840 ret = -TARGET_ENOSYS;
5841 break;
5843 return ret;
5846 static const bitmask_transtbl iflag_tbl[] = {
5847 { TARGET_IGNBRK, TARGET_IGNBRK, IGNBRK, IGNBRK },
5848 { TARGET_BRKINT, TARGET_BRKINT, BRKINT, BRKINT },
5849 { TARGET_IGNPAR, TARGET_IGNPAR, IGNPAR, IGNPAR },
5850 { TARGET_PARMRK, TARGET_PARMRK, PARMRK, PARMRK },
5851 { TARGET_INPCK, TARGET_INPCK, INPCK, INPCK },
5852 { TARGET_ISTRIP, TARGET_ISTRIP, ISTRIP, ISTRIP },
5853 { TARGET_INLCR, TARGET_INLCR, INLCR, INLCR },
5854 { TARGET_IGNCR, TARGET_IGNCR, IGNCR, IGNCR },
5855 { TARGET_ICRNL, TARGET_ICRNL, ICRNL, ICRNL },
5856 { TARGET_IUCLC, TARGET_IUCLC, IUCLC, IUCLC },
5857 { TARGET_IXON, TARGET_IXON, IXON, IXON },
5858 { TARGET_IXANY, TARGET_IXANY, IXANY, IXANY },
5859 { TARGET_IXOFF, TARGET_IXOFF, IXOFF, IXOFF },
5860 { TARGET_IMAXBEL, TARGET_IMAXBEL, IMAXBEL, IMAXBEL },
5861 { 0, 0, 0, 0 }
5864 static const bitmask_transtbl oflag_tbl[] = {
5865 { TARGET_OPOST, TARGET_OPOST, OPOST, OPOST },
5866 { TARGET_OLCUC, TARGET_OLCUC, OLCUC, OLCUC },
5867 { TARGET_ONLCR, TARGET_ONLCR, ONLCR, ONLCR },
5868 { TARGET_OCRNL, TARGET_OCRNL, OCRNL, OCRNL },
5869 { TARGET_ONOCR, TARGET_ONOCR, ONOCR, ONOCR },
5870 { TARGET_ONLRET, TARGET_ONLRET, ONLRET, ONLRET },
5871 { TARGET_OFILL, TARGET_OFILL, OFILL, OFILL },
5872 { TARGET_OFDEL, TARGET_OFDEL, OFDEL, OFDEL },
5873 { TARGET_NLDLY, TARGET_NL0, NLDLY, NL0 },
5874 { TARGET_NLDLY, TARGET_NL1, NLDLY, NL1 },
5875 { TARGET_CRDLY, TARGET_CR0, CRDLY, CR0 },
5876 { TARGET_CRDLY, TARGET_CR1, CRDLY, CR1 },
5877 { TARGET_CRDLY, TARGET_CR2, CRDLY, CR2 },
5878 { TARGET_CRDLY, TARGET_CR3, CRDLY, CR3 },
5879 { TARGET_TABDLY, TARGET_TAB0, TABDLY, TAB0 },
5880 { TARGET_TABDLY, TARGET_TAB1, TABDLY, TAB1 },
5881 { TARGET_TABDLY, TARGET_TAB2, TABDLY, TAB2 },
5882 { TARGET_TABDLY, TARGET_TAB3, TABDLY, TAB3 },
5883 { TARGET_BSDLY, TARGET_BS0, BSDLY, BS0 },
5884 { TARGET_BSDLY, TARGET_BS1, BSDLY, BS1 },
5885 { TARGET_VTDLY, TARGET_VT0, VTDLY, VT0 },
5886 { TARGET_VTDLY, TARGET_VT1, VTDLY, VT1 },
5887 { TARGET_FFDLY, TARGET_FF0, FFDLY, FF0 },
5888 { TARGET_FFDLY, TARGET_FF1, FFDLY, FF1 },
5889 { 0, 0, 0, 0 }
5892 static const bitmask_transtbl cflag_tbl[] = {
5893 { TARGET_CBAUD, TARGET_B0, CBAUD, B0 },
5894 { TARGET_CBAUD, TARGET_B50, CBAUD, B50 },
5895 { TARGET_CBAUD, TARGET_B75, CBAUD, B75 },
5896 { TARGET_CBAUD, TARGET_B110, CBAUD, B110 },
5897 { TARGET_CBAUD, TARGET_B134, CBAUD, B134 },
5898 { TARGET_CBAUD, TARGET_B150, CBAUD, B150 },
5899 { TARGET_CBAUD, TARGET_B200, CBAUD, B200 },
5900 { TARGET_CBAUD, TARGET_B300, CBAUD, B300 },
5901 { TARGET_CBAUD, TARGET_B600, CBAUD, B600 },
5902 { TARGET_CBAUD, TARGET_B1200, CBAUD, B1200 },
5903 { TARGET_CBAUD, TARGET_B1800, CBAUD, B1800 },
5904 { TARGET_CBAUD, TARGET_B2400, CBAUD, B2400 },
5905 { TARGET_CBAUD, TARGET_B4800, CBAUD, B4800 },
5906 { TARGET_CBAUD, TARGET_B9600, CBAUD, B9600 },
5907 { TARGET_CBAUD, TARGET_B19200, CBAUD, B19200 },
5908 { TARGET_CBAUD, TARGET_B38400, CBAUD, B38400 },
5909 { TARGET_CBAUD, TARGET_B57600, CBAUD, B57600 },
5910 { TARGET_CBAUD, TARGET_B115200, CBAUD, B115200 },
5911 { TARGET_CBAUD, TARGET_B230400, CBAUD, B230400 },
5912 { TARGET_CBAUD, TARGET_B460800, CBAUD, B460800 },
5913 { TARGET_CSIZE, TARGET_CS5, CSIZE, CS5 },
5914 { TARGET_CSIZE, TARGET_CS6, CSIZE, CS6 },
5915 { TARGET_CSIZE, TARGET_CS7, CSIZE, CS7 },
5916 { TARGET_CSIZE, TARGET_CS8, CSIZE, CS8 },
5917 { TARGET_CSTOPB, TARGET_CSTOPB, CSTOPB, CSTOPB },
5918 { TARGET_CREAD, TARGET_CREAD, CREAD, CREAD },
5919 { TARGET_PARENB, TARGET_PARENB, PARENB, PARENB },
5920 { TARGET_PARODD, TARGET_PARODD, PARODD, PARODD },
5921 { TARGET_HUPCL, TARGET_HUPCL, HUPCL, HUPCL },
5922 { TARGET_CLOCAL, TARGET_CLOCAL, CLOCAL, CLOCAL },
5923 { TARGET_CRTSCTS, TARGET_CRTSCTS, CRTSCTS, CRTSCTS },
5924 { 0, 0, 0, 0 }
5927 static const bitmask_transtbl lflag_tbl[] = {
5928 { TARGET_ISIG, TARGET_ISIG, ISIG, ISIG },
5929 { TARGET_ICANON, TARGET_ICANON, ICANON, ICANON },
5930 { TARGET_XCASE, TARGET_XCASE, XCASE, XCASE },
5931 { TARGET_ECHO, TARGET_ECHO, ECHO, ECHO },
5932 { TARGET_ECHOE, TARGET_ECHOE, ECHOE, ECHOE },
5933 { TARGET_ECHOK, TARGET_ECHOK, ECHOK, ECHOK },
5934 { TARGET_ECHONL, TARGET_ECHONL, ECHONL, ECHONL },
5935 { TARGET_NOFLSH, TARGET_NOFLSH, NOFLSH, NOFLSH },
5936 { TARGET_TOSTOP, TARGET_TOSTOP, TOSTOP, TOSTOP },
5937 { TARGET_ECHOCTL, TARGET_ECHOCTL, ECHOCTL, ECHOCTL },
5938 { TARGET_ECHOPRT, TARGET_ECHOPRT, ECHOPRT, ECHOPRT },
5939 { TARGET_ECHOKE, TARGET_ECHOKE, ECHOKE, ECHOKE },
5940 { TARGET_FLUSHO, TARGET_FLUSHO, FLUSHO, FLUSHO },
5941 { TARGET_PENDIN, TARGET_PENDIN, PENDIN, PENDIN },
5942 { TARGET_IEXTEN, TARGET_IEXTEN, IEXTEN, IEXTEN },
5943 { 0, 0, 0, 0 }
5946 static void target_to_host_termios (void *dst, const void *src)
5948 struct host_termios *host = dst;
5949 const struct target_termios *target = src;
5951 host->c_iflag =
5952 target_to_host_bitmask(tswap32(target->c_iflag), iflag_tbl);
5953 host->c_oflag =
5954 target_to_host_bitmask(tswap32(target->c_oflag), oflag_tbl);
5955 host->c_cflag =
5956 target_to_host_bitmask(tswap32(target->c_cflag), cflag_tbl);
5957 host->c_lflag =
5958 target_to_host_bitmask(tswap32(target->c_lflag), lflag_tbl);
5959 host->c_line = target->c_line;
5961 memset(host->c_cc, 0, sizeof(host->c_cc));
5962 host->c_cc[VINTR] = target->c_cc[TARGET_VINTR];
5963 host->c_cc[VQUIT] = target->c_cc[TARGET_VQUIT];
5964 host->c_cc[VERASE] = target->c_cc[TARGET_VERASE];
5965 host->c_cc[VKILL] = target->c_cc[TARGET_VKILL];
5966 host->c_cc[VEOF] = target->c_cc[TARGET_VEOF];
5967 host->c_cc[VTIME] = target->c_cc[TARGET_VTIME];
5968 host->c_cc[VMIN] = target->c_cc[TARGET_VMIN];
5969 host->c_cc[VSWTC] = target->c_cc[TARGET_VSWTC];
5970 host->c_cc[VSTART] = target->c_cc[TARGET_VSTART];
5971 host->c_cc[VSTOP] = target->c_cc[TARGET_VSTOP];
5972 host->c_cc[VSUSP] = target->c_cc[TARGET_VSUSP];
5973 host->c_cc[VEOL] = target->c_cc[TARGET_VEOL];
5974 host->c_cc[VREPRINT] = target->c_cc[TARGET_VREPRINT];
5975 host->c_cc[VDISCARD] = target->c_cc[TARGET_VDISCARD];
5976 host->c_cc[VWERASE] = target->c_cc[TARGET_VWERASE];
5977 host->c_cc[VLNEXT] = target->c_cc[TARGET_VLNEXT];
5978 host->c_cc[VEOL2] = target->c_cc[TARGET_VEOL2];
5981 static void host_to_target_termios (void *dst, const void *src)
5983 struct target_termios *target = dst;
5984 const struct host_termios *host = src;
5986 target->c_iflag =
5987 tswap32(host_to_target_bitmask(host->c_iflag, iflag_tbl));
5988 target->c_oflag =
5989 tswap32(host_to_target_bitmask(host->c_oflag, oflag_tbl));
5990 target->c_cflag =
5991 tswap32(host_to_target_bitmask(host->c_cflag, cflag_tbl));
5992 target->c_lflag =
5993 tswap32(host_to_target_bitmask(host->c_lflag, lflag_tbl));
5994 target->c_line = host->c_line;
5996 memset(target->c_cc, 0, sizeof(target->c_cc));
5997 target->c_cc[TARGET_VINTR] = host->c_cc[VINTR];
5998 target->c_cc[TARGET_VQUIT] = host->c_cc[VQUIT];
5999 target->c_cc[TARGET_VERASE] = host->c_cc[VERASE];
6000 target->c_cc[TARGET_VKILL] = host->c_cc[VKILL];
6001 target->c_cc[TARGET_VEOF] = host->c_cc[VEOF];
6002 target->c_cc[TARGET_VTIME] = host->c_cc[VTIME];
6003 target->c_cc[TARGET_VMIN] = host->c_cc[VMIN];
6004 target->c_cc[TARGET_VSWTC] = host->c_cc[VSWTC];
6005 target->c_cc[TARGET_VSTART] = host->c_cc[VSTART];
6006 target->c_cc[TARGET_VSTOP] = host->c_cc[VSTOP];
6007 target->c_cc[TARGET_VSUSP] = host->c_cc[VSUSP];
6008 target->c_cc[TARGET_VEOL] = host->c_cc[VEOL];
6009 target->c_cc[TARGET_VREPRINT] = host->c_cc[VREPRINT];
6010 target->c_cc[TARGET_VDISCARD] = host->c_cc[VDISCARD];
6011 target->c_cc[TARGET_VWERASE] = host->c_cc[VWERASE];
6012 target->c_cc[TARGET_VLNEXT] = host->c_cc[VLNEXT];
6013 target->c_cc[TARGET_VEOL2] = host->c_cc[VEOL2];
6016 static const StructEntry struct_termios_def = {
6017 .convert = { host_to_target_termios, target_to_host_termios },
6018 .size = { sizeof(struct target_termios), sizeof(struct host_termios) },
6019 .align = { __alignof__(struct target_termios), __alignof__(struct host_termios) },
6022 static bitmask_transtbl mmap_flags_tbl[] = {
6023 { TARGET_MAP_SHARED, TARGET_MAP_SHARED, MAP_SHARED, MAP_SHARED },
6024 { TARGET_MAP_PRIVATE, TARGET_MAP_PRIVATE, MAP_PRIVATE, MAP_PRIVATE },
6025 { TARGET_MAP_FIXED, TARGET_MAP_FIXED, MAP_FIXED, MAP_FIXED },
6026 { TARGET_MAP_ANONYMOUS, TARGET_MAP_ANONYMOUS,
6027 MAP_ANONYMOUS, MAP_ANONYMOUS },
6028 { TARGET_MAP_GROWSDOWN, TARGET_MAP_GROWSDOWN,
6029 MAP_GROWSDOWN, MAP_GROWSDOWN },
6030 { TARGET_MAP_DENYWRITE, TARGET_MAP_DENYWRITE,
6031 MAP_DENYWRITE, MAP_DENYWRITE },
6032 { TARGET_MAP_EXECUTABLE, TARGET_MAP_EXECUTABLE,
6033 MAP_EXECUTABLE, MAP_EXECUTABLE },
6034 { TARGET_MAP_LOCKED, TARGET_MAP_LOCKED, MAP_LOCKED, MAP_LOCKED },
6035 { TARGET_MAP_NORESERVE, TARGET_MAP_NORESERVE,
6036 MAP_NORESERVE, MAP_NORESERVE },
6037 { TARGET_MAP_HUGETLB, TARGET_MAP_HUGETLB, MAP_HUGETLB, MAP_HUGETLB },
6038 /* MAP_STACK had been ignored by the kernel for quite some time.
6039 Recognize it for the target insofar as we do not want to pass
6040 it through to the host. */
6041 { TARGET_MAP_STACK, TARGET_MAP_STACK, 0, 0 },
6042 { 0, 0, 0, 0 }
6045 #if defined(TARGET_I386)
6047 /* NOTE: there is really one LDT for all the threads */
6048 static uint8_t *ldt_table;
6050 static abi_long read_ldt(abi_ulong ptr, unsigned long bytecount)
6052 int size;
6053 void *p;
6055 if (!ldt_table)
6056 return 0;
6057 size = TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE;
6058 if (size > bytecount)
6059 size = bytecount;
6060 p = lock_user(VERIFY_WRITE, ptr, size, 0);
6061 if (!p)
6062 return -TARGET_EFAULT;
6063 /* ??? Should this by byteswapped? */
6064 memcpy(p, ldt_table, size);
6065 unlock_user(p, ptr, size);
6066 return size;
6069 /* XXX: add locking support */
6070 static abi_long write_ldt(CPUX86State *env,
6071 abi_ulong ptr, unsigned long bytecount, int oldmode)
6073 struct target_modify_ldt_ldt_s ldt_info;
6074 struct target_modify_ldt_ldt_s *target_ldt_info;
6075 int seg_32bit, contents, read_exec_only, limit_in_pages;
6076 int seg_not_present, useable, lm;
6077 uint32_t *lp, entry_1, entry_2;
6079 if (bytecount != sizeof(ldt_info))
6080 return -TARGET_EINVAL;
6081 if (!lock_user_struct(VERIFY_READ, target_ldt_info, ptr, 1))
6082 return -TARGET_EFAULT;
6083 ldt_info.entry_number = tswap32(target_ldt_info->entry_number);
6084 ldt_info.base_addr = tswapal(target_ldt_info->base_addr);
6085 ldt_info.limit = tswap32(target_ldt_info->limit);
6086 ldt_info.flags = tswap32(target_ldt_info->flags);
6087 unlock_user_struct(target_ldt_info, ptr, 0);
6089 if (ldt_info.entry_number >= TARGET_LDT_ENTRIES)
6090 return -TARGET_EINVAL;
6091 seg_32bit = ldt_info.flags & 1;
6092 contents = (ldt_info.flags >> 1) & 3;
6093 read_exec_only = (ldt_info.flags >> 3) & 1;
6094 limit_in_pages = (ldt_info.flags >> 4) & 1;
6095 seg_not_present = (ldt_info.flags >> 5) & 1;
6096 useable = (ldt_info.flags >> 6) & 1;
6097 #ifdef TARGET_ABI32
6098 lm = 0;
6099 #else
6100 lm = (ldt_info.flags >> 7) & 1;
6101 #endif
6102 if (contents == 3) {
6103 if (oldmode)
6104 return -TARGET_EINVAL;
6105 if (seg_not_present == 0)
6106 return -TARGET_EINVAL;
6108 /* allocate the LDT */
6109 if (!ldt_table) {
6110 env->ldt.base = target_mmap(0,
6111 TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE,
6112 PROT_READ|PROT_WRITE,
6113 MAP_ANONYMOUS|MAP_PRIVATE, -1, 0);
6114 if (env->ldt.base == -1)
6115 return -TARGET_ENOMEM;
6116 memset(g2h(env->ldt.base), 0,
6117 TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE);
6118 env->ldt.limit = 0xffff;
6119 ldt_table = g2h(env->ldt.base);
6122 /* NOTE: same code as Linux kernel */
6123 /* Allow LDTs to be cleared by the user. */
6124 if (ldt_info.base_addr == 0 && ldt_info.limit == 0) {
6125 if (oldmode ||
6126 (contents == 0 &&
6127 read_exec_only == 1 &&
6128 seg_32bit == 0 &&
6129 limit_in_pages == 0 &&
6130 seg_not_present == 1 &&
6131 useable == 0 )) {
6132 entry_1 = 0;
6133 entry_2 = 0;
6134 goto install;
6138 entry_1 = ((ldt_info.base_addr & 0x0000ffff) << 16) |
6139 (ldt_info.limit & 0x0ffff);
6140 entry_2 = (ldt_info.base_addr & 0xff000000) |
6141 ((ldt_info.base_addr & 0x00ff0000) >> 16) |
6142 (ldt_info.limit & 0xf0000) |
6143 ((read_exec_only ^ 1) << 9) |
6144 (contents << 10) |
6145 ((seg_not_present ^ 1) << 15) |
6146 (seg_32bit << 22) |
6147 (limit_in_pages << 23) |
6148 (lm << 21) |
6149 0x7000;
6150 if (!oldmode)
6151 entry_2 |= (useable << 20);
6153 /* Install the new entry ... */
6154 install:
6155 lp = (uint32_t *)(ldt_table + (ldt_info.entry_number << 3));
6156 lp[0] = tswap32(entry_1);
6157 lp[1] = tswap32(entry_2);
6158 return 0;
6161 /* specific and weird i386 syscalls */
6162 static abi_long do_modify_ldt(CPUX86State *env, int func, abi_ulong ptr,
6163 unsigned long bytecount)
6165 abi_long ret;
6167 switch (func) {
6168 case 0:
6169 ret = read_ldt(ptr, bytecount);
6170 break;
6171 case 1:
6172 ret = write_ldt(env, ptr, bytecount, 1);
6173 break;
6174 case 0x11:
6175 ret = write_ldt(env, ptr, bytecount, 0);
6176 break;
6177 default:
6178 ret = -TARGET_ENOSYS;
6179 break;
6181 return ret;
6184 #if defined(TARGET_I386) && defined(TARGET_ABI32)
6185 abi_long do_set_thread_area(CPUX86State *env, abi_ulong ptr)
6187 uint64_t *gdt_table = g2h(env->gdt.base);
6188 struct target_modify_ldt_ldt_s ldt_info;
6189 struct target_modify_ldt_ldt_s *target_ldt_info;
6190 int seg_32bit, contents, read_exec_only, limit_in_pages;
6191 int seg_not_present, useable, lm;
6192 uint32_t *lp, entry_1, entry_2;
6193 int i;
6195 lock_user_struct(VERIFY_WRITE, target_ldt_info, ptr, 1);
6196 if (!target_ldt_info)
6197 return -TARGET_EFAULT;
6198 ldt_info.entry_number = tswap32(target_ldt_info->entry_number);
6199 ldt_info.base_addr = tswapal(target_ldt_info->base_addr);
6200 ldt_info.limit = tswap32(target_ldt_info->limit);
6201 ldt_info.flags = tswap32(target_ldt_info->flags);
6202 if (ldt_info.entry_number == -1) {
6203 for (i=TARGET_GDT_ENTRY_TLS_MIN; i<=TARGET_GDT_ENTRY_TLS_MAX; i++) {
6204 if (gdt_table[i] == 0) {
6205 ldt_info.entry_number = i;
6206 target_ldt_info->entry_number = tswap32(i);
6207 break;
6211 unlock_user_struct(target_ldt_info, ptr, 1);
6213 if (ldt_info.entry_number < TARGET_GDT_ENTRY_TLS_MIN ||
6214 ldt_info.entry_number > TARGET_GDT_ENTRY_TLS_MAX)
6215 return -TARGET_EINVAL;
6216 seg_32bit = ldt_info.flags & 1;
6217 contents = (ldt_info.flags >> 1) & 3;
6218 read_exec_only = (ldt_info.flags >> 3) & 1;
6219 limit_in_pages = (ldt_info.flags >> 4) & 1;
6220 seg_not_present = (ldt_info.flags >> 5) & 1;
6221 useable = (ldt_info.flags >> 6) & 1;
6222 #ifdef TARGET_ABI32
6223 lm = 0;
6224 #else
6225 lm = (ldt_info.flags >> 7) & 1;
6226 #endif
6228 if (contents == 3) {
6229 if (seg_not_present == 0)
6230 return -TARGET_EINVAL;
6233 /* NOTE: same code as Linux kernel */
6234 /* Allow LDTs to be cleared by the user. */
6235 if (ldt_info.base_addr == 0 && ldt_info.limit == 0) {
6236 if ((contents == 0 &&
6237 read_exec_only == 1 &&
6238 seg_32bit == 0 &&
6239 limit_in_pages == 0 &&
6240 seg_not_present == 1 &&
6241 useable == 0 )) {
6242 entry_1 = 0;
6243 entry_2 = 0;
6244 goto install;
6248 entry_1 = ((ldt_info.base_addr & 0x0000ffff) << 16) |
6249 (ldt_info.limit & 0x0ffff);
6250 entry_2 = (ldt_info.base_addr & 0xff000000) |
6251 ((ldt_info.base_addr & 0x00ff0000) >> 16) |
6252 (ldt_info.limit & 0xf0000) |
6253 ((read_exec_only ^ 1) << 9) |
6254 (contents << 10) |
6255 ((seg_not_present ^ 1) << 15) |
6256 (seg_32bit << 22) |
6257 (limit_in_pages << 23) |
6258 (useable << 20) |
6259 (lm << 21) |
6260 0x7000;
6262 /* Install the new entry ... */
6263 install:
6264 lp = (uint32_t *)(gdt_table + ldt_info.entry_number);
6265 lp[0] = tswap32(entry_1);
6266 lp[1] = tswap32(entry_2);
6267 return 0;
6270 static abi_long do_get_thread_area(CPUX86State *env, abi_ulong ptr)
6272 struct target_modify_ldt_ldt_s *target_ldt_info;
6273 uint64_t *gdt_table = g2h(env->gdt.base);
6274 uint32_t base_addr, limit, flags;
6275 int seg_32bit, contents, read_exec_only, limit_in_pages, idx;
6276 int seg_not_present, useable, lm;
6277 uint32_t *lp, entry_1, entry_2;
6279 lock_user_struct(VERIFY_WRITE, target_ldt_info, ptr, 1);
6280 if (!target_ldt_info)
6281 return -TARGET_EFAULT;
6282 idx = tswap32(target_ldt_info->entry_number);
6283 if (idx < TARGET_GDT_ENTRY_TLS_MIN ||
6284 idx > TARGET_GDT_ENTRY_TLS_MAX) {
6285 unlock_user_struct(target_ldt_info, ptr, 1);
6286 return -TARGET_EINVAL;
6288 lp = (uint32_t *)(gdt_table + idx);
6289 entry_1 = tswap32(lp[0]);
6290 entry_2 = tswap32(lp[1]);
6292 read_exec_only = ((entry_2 >> 9) & 1) ^ 1;
6293 contents = (entry_2 >> 10) & 3;
6294 seg_not_present = ((entry_2 >> 15) & 1) ^ 1;
6295 seg_32bit = (entry_2 >> 22) & 1;
6296 limit_in_pages = (entry_2 >> 23) & 1;
6297 useable = (entry_2 >> 20) & 1;
6298 #ifdef TARGET_ABI32
6299 lm = 0;
6300 #else
6301 lm = (entry_2 >> 21) & 1;
6302 #endif
6303 flags = (seg_32bit << 0) | (contents << 1) |
6304 (read_exec_only << 3) | (limit_in_pages << 4) |
6305 (seg_not_present << 5) | (useable << 6) | (lm << 7);
6306 limit = (entry_1 & 0xffff) | (entry_2 & 0xf0000);
6307 base_addr = (entry_1 >> 16) |
6308 (entry_2 & 0xff000000) |
6309 ((entry_2 & 0xff) << 16);
6310 target_ldt_info->base_addr = tswapal(base_addr);
6311 target_ldt_info->limit = tswap32(limit);
6312 target_ldt_info->flags = tswap32(flags);
6313 unlock_user_struct(target_ldt_info, ptr, 1);
6314 return 0;
6316 #endif /* TARGET_I386 && TARGET_ABI32 */
6318 #ifndef TARGET_ABI32
6319 abi_long do_arch_prctl(CPUX86State *env, int code, abi_ulong addr)
6321 abi_long ret = 0;
6322 abi_ulong val;
6323 int idx;
6325 switch(code) {
6326 case TARGET_ARCH_SET_GS:
6327 case TARGET_ARCH_SET_FS:
6328 if (code == TARGET_ARCH_SET_GS)
6329 idx = R_GS;
6330 else
6331 idx = R_FS;
6332 cpu_x86_load_seg(env, idx, 0);
6333 env->segs[idx].base = addr;
6334 break;
6335 case TARGET_ARCH_GET_GS:
6336 case TARGET_ARCH_GET_FS:
6337 if (code == TARGET_ARCH_GET_GS)
6338 idx = R_GS;
6339 else
6340 idx = R_FS;
6341 val = env->segs[idx].base;
6342 if (put_user(val, addr, abi_ulong))
6343 ret = -TARGET_EFAULT;
6344 break;
6345 default:
6346 ret = -TARGET_EINVAL;
6347 break;
6349 return ret;
6351 #endif
6353 #endif /* defined(TARGET_I386) */
6355 #define NEW_STACK_SIZE 0x40000
6358 static pthread_mutex_t clone_lock = PTHREAD_MUTEX_INITIALIZER;
6359 typedef struct {
6360 CPUArchState *env;
6361 pthread_mutex_t mutex;
6362 pthread_cond_t cond;
6363 pthread_t thread;
6364 uint32_t tid;
6365 abi_ulong child_tidptr;
6366 abi_ulong parent_tidptr;
6367 sigset_t sigmask;
6368 } new_thread_info;
6370 static void *clone_func(void *arg)
6372 new_thread_info *info = arg;
6373 CPUArchState *env;
6374 CPUState *cpu;
6375 TaskState *ts;
6377 rcu_register_thread();
6378 tcg_register_thread();
6379 env = info->env;
6380 cpu = ENV_GET_CPU(env);
6381 thread_cpu = cpu;
6382 ts = (TaskState *)cpu->opaque;
6383 info->tid = gettid();
6384 task_settid(ts);
6385 if (info->child_tidptr)
6386 put_user_u32(info->tid, info->child_tidptr);
6387 if (info->parent_tidptr)
6388 put_user_u32(info->tid, info->parent_tidptr);
6389 /* Enable signals. */
6390 sigprocmask(SIG_SETMASK, &info->sigmask, NULL);
6391 /* Signal to the parent that we're ready. */
6392 pthread_mutex_lock(&info->mutex);
6393 pthread_cond_broadcast(&info->cond);
6394 pthread_mutex_unlock(&info->mutex);
6395 /* Wait until the parent has finished initializing the tls state. */
6396 pthread_mutex_lock(&clone_lock);
6397 pthread_mutex_unlock(&clone_lock);
6398 cpu_loop(env);
6399 /* never exits */
6400 return NULL;
6403 /* do_fork() Must return host values and target errnos (unlike most
6404 do_*() functions). */
6405 static int do_fork(CPUArchState *env, unsigned int flags, abi_ulong newsp,
6406 abi_ulong parent_tidptr, target_ulong newtls,
6407 abi_ulong child_tidptr)
6409 CPUState *cpu = ENV_GET_CPU(env);
6410 int ret;
6411 TaskState *ts;
6412 CPUState *new_cpu;
6413 CPUArchState *new_env;
6414 sigset_t sigmask;
6416 flags &= ~CLONE_IGNORED_FLAGS;
6418 /* Emulate vfork() with fork() */
6419 if (flags & CLONE_VFORK)
6420 flags &= ~(CLONE_VFORK | CLONE_VM);
6422 if (flags & CLONE_VM) {
6423 TaskState *parent_ts = (TaskState *)cpu->opaque;
6424 new_thread_info info;
6425 pthread_attr_t attr;
6427 if (((flags & CLONE_THREAD_FLAGS) != CLONE_THREAD_FLAGS) ||
6428 (flags & CLONE_INVALID_THREAD_FLAGS)) {
6429 return -TARGET_EINVAL;
6432 ts = g_new0(TaskState, 1);
6433 init_task_state(ts);
6435 /* Grab a mutex so that thread setup appears atomic. */
6436 pthread_mutex_lock(&clone_lock);
6438 /* we create a new CPU instance. */
6439 new_env = cpu_copy(env);
6440 /* Init regs that differ from the parent. */
6441 cpu_clone_regs(new_env, newsp);
6442 new_cpu = ENV_GET_CPU(new_env);
6443 new_cpu->opaque = ts;
6444 ts->bprm = parent_ts->bprm;
6445 ts->info = parent_ts->info;
6446 ts->signal_mask = parent_ts->signal_mask;
6448 if (flags & CLONE_CHILD_CLEARTID) {
6449 ts->child_tidptr = child_tidptr;
6452 if (flags & CLONE_SETTLS) {
6453 cpu_set_tls (new_env, newtls);
6456 memset(&info, 0, sizeof(info));
6457 pthread_mutex_init(&info.mutex, NULL);
6458 pthread_mutex_lock(&info.mutex);
6459 pthread_cond_init(&info.cond, NULL);
6460 info.env = new_env;
6461 if (flags & CLONE_CHILD_SETTID) {
6462 info.child_tidptr = child_tidptr;
6464 if (flags & CLONE_PARENT_SETTID) {
6465 info.parent_tidptr = parent_tidptr;
6468 ret = pthread_attr_init(&attr);
6469 ret = pthread_attr_setstacksize(&attr, NEW_STACK_SIZE);
6470 ret = pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED);
6471 /* It is not safe to deliver signals until the child has finished
6472 initializing, so temporarily block all signals. */
6473 sigfillset(&sigmask);
6474 sigprocmask(SIG_BLOCK, &sigmask, &info.sigmask);
6476 /* If this is our first additional thread, we need to ensure we
6477 * generate code for parallel execution and flush old translations.
6479 if (!parallel_cpus) {
6480 parallel_cpus = true;
6481 tb_flush(cpu);
6484 ret = pthread_create(&info.thread, &attr, clone_func, &info);
6485 /* TODO: Free new CPU state if thread creation failed. */
6487 sigprocmask(SIG_SETMASK, &info.sigmask, NULL);
6488 pthread_attr_destroy(&attr);
6489 if (ret == 0) {
6490 /* Wait for the child to initialize. */
6491 pthread_cond_wait(&info.cond, &info.mutex);
6492 ret = info.tid;
6493 } else {
6494 ret = -1;
6496 pthread_mutex_unlock(&info.mutex);
6497 pthread_cond_destroy(&info.cond);
6498 pthread_mutex_destroy(&info.mutex);
6499 pthread_mutex_unlock(&clone_lock);
6500 } else {
6501 /* if no CLONE_VM, we consider it is a fork */
6502 if (flags & CLONE_INVALID_FORK_FLAGS) {
6503 return -TARGET_EINVAL;
6506 /* We can't support custom termination signals */
6507 if ((flags & CSIGNAL) != TARGET_SIGCHLD) {
6508 return -TARGET_EINVAL;
6511 if (block_signals()) {
6512 return -TARGET_ERESTARTSYS;
6515 fork_start();
6516 ret = fork();
6517 if (ret == 0) {
6518 /* Child Process. */
6519 cpu_clone_regs(env, newsp);
6520 fork_end(1);
6521 /* There is a race condition here. The parent process could
6522 theoretically read the TID in the child process before the child
6523 tid is set. This would require using either ptrace
6524 (not implemented) or having *_tidptr to point at a shared memory
6525 mapping. We can't repeat the spinlock hack used above because
6526 the child process gets its own copy of the lock. */
6527 if (flags & CLONE_CHILD_SETTID)
6528 put_user_u32(gettid(), child_tidptr);
6529 if (flags & CLONE_PARENT_SETTID)
6530 put_user_u32(gettid(), parent_tidptr);
6531 ts = (TaskState *)cpu->opaque;
6532 if (flags & CLONE_SETTLS)
6533 cpu_set_tls (env, newtls);
6534 if (flags & CLONE_CHILD_CLEARTID)
6535 ts->child_tidptr = child_tidptr;
6536 } else {
6537 fork_end(0);
6540 return ret;
6543 /* warning : doesn't handle linux specific flags... */
6544 static int target_to_host_fcntl_cmd(int cmd)
6546 switch(cmd) {
6547 case TARGET_F_DUPFD:
6548 case TARGET_F_GETFD:
6549 case TARGET_F_SETFD:
6550 case TARGET_F_GETFL:
6551 case TARGET_F_SETFL:
6552 return cmd;
6553 case TARGET_F_GETLK:
6554 return F_GETLK64;
6555 case TARGET_F_SETLK:
6556 return F_SETLK64;
6557 case TARGET_F_SETLKW:
6558 return F_SETLKW64;
6559 case TARGET_F_GETOWN:
6560 return F_GETOWN;
6561 case TARGET_F_SETOWN:
6562 return F_SETOWN;
6563 case TARGET_F_GETSIG:
6564 return F_GETSIG;
6565 case TARGET_F_SETSIG:
6566 return F_SETSIG;
6567 #if TARGET_ABI_BITS == 32
6568 case TARGET_F_GETLK64:
6569 return F_GETLK64;
6570 case TARGET_F_SETLK64:
6571 return F_SETLK64;
6572 case TARGET_F_SETLKW64:
6573 return F_SETLKW64;
6574 #endif
6575 case TARGET_F_SETLEASE:
6576 return F_SETLEASE;
6577 case TARGET_F_GETLEASE:
6578 return F_GETLEASE;
6579 #ifdef F_DUPFD_CLOEXEC
6580 case TARGET_F_DUPFD_CLOEXEC:
6581 return F_DUPFD_CLOEXEC;
6582 #endif
6583 case TARGET_F_NOTIFY:
6584 return F_NOTIFY;
6585 #ifdef F_GETOWN_EX
6586 case TARGET_F_GETOWN_EX:
6587 return F_GETOWN_EX;
6588 #endif
6589 #ifdef F_SETOWN_EX
6590 case TARGET_F_SETOWN_EX:
6591 return F_SETOWN_EX;
6592 #endif
6593 #ifdef F_SETPIPE_SZ
6594 case TARGET_F_SETPIPE_SZ:
6595 return F_SETPIPE_SZ;
6596 case TARGET_F_GETPIPE_SZ:
6597 return F_GETPIPE_SZ;
6598 #endif
6599 default:
6600 return -TARGET_EINVAL;
6602 return -TARGET_EINVAL;
6605 #define FLOCK_TRANSTBL \
6606 switch (type) { \
6607 TRANSTBL_CONVERT(F_RDLCK); \
6608 TRANSTBL_CONVERT(F_WRLCK); \
6609 TRANSTBL_CONVERT(F_UNLCK); \
6610 TRANSTBL_CONVERT(F_EXLCK); \
6611 TRANSTBL_CONVERT(F_SHLCK); \
6614 static int target_to_host_flock(int type)
6616 #define TRANSTBL_CONVERT(a) case TARGET_##a: return a
6617 FLOCK_TRANSTBL
6618 #undef TRANSTBL_CONVERT
6619 return -TARGET_EINVAL;
6622 static int host_to_target_flock(int type)
6624 #define TRANSTBL_CONVERT(a) case a: return TARGET_##a
6625 FLOCK_TRANSTBL
6626 #undef TRANSTBL_CONVERT
6627 /* if we don't know how to convert the value coming
6628 * from the host we copy to the target field as-is
6630 return type;
6633 static inline abi_long copy_from_user_flock(struct flock64 *fl,
6634 abi_ulong target_flock_addr)
6636 struct target_flock *target_fl;
6637 int l_type;
6639 if (!lock_user_struct(VERIFY_READ, target_fl, target_flock_addr, 1)) {
6640 return -TARGET_EFAULT;
6643 __get_user(l_type, &target_fl->l_type);
6644 l_type = target_to_host_flock(l_type);
6645 if (l_type < 0) {
6646 return l_type;
6648 fl->l_type = l_type;
6649 __get_user(fl->l_whence, &target_fl->l_whence);
6650 __get_user(fl->l_start, &target_fl->l_start);
6651 __get_user(fl->l_len, &target_fl->l_len);
6652 __get_user(fl->l_pid, &target_fl->l_pid);
6653 unlock_user_struct(target_fl, target_flock_addr, 0);
6654 return 0;
6657 static inline abi_long copy_to_user_flock(abi_ulong target_flock_addr,
6658 const struct flock64 *fl)
6660 struct target_flock *target_fl;
6661 short l_type;
6663 if (!lock_user_struct(VERIFY_WRITE, target_fl, target_flock_addr, 0)) {
6664 return -TARGET_EFAULT;
6667 l_type = host_to_target_flock(fl->l_type);
6668 __put_user(l_type, &target_fl->l_type);
6669 __put_user(fl->l_whence, &target_fl->l_whence);
6670 __put_user(fl->l_start, &target_fl->l_start);
6671 __put_user(fl->l_len, &target_fl->l_len);
6672 __put_user(fl->l_pid, &target_fl->l_pid);
6673 unlock_user_struct(target_fl, target_flock_addr, 1);
6674 return 0;
6677 typedef abi_long from_flock64_fn(struct flock64 *fl, abi_ulong target_addr);
6678 typedef abi_long to_flock64_fn(abi_ulong target_addr, const struct flock64 *fl);
6680 #if defined(TARGET_ARM) && TARGET_ABI_BITS == 32
6681 static inline abi_long copy_from_user_oabi_flock64(struct flock64 *fl,
6682 abi_ulong target_flock_addr)
6684 struct target_oabi_flock64 *target_fl;
6685 int l_type;
6687 if (!lock_user_struct(VERIFY_READ, target_fl, target_flock_addr, 1)) {
6688 return -TARGET_EFAULT;
6691 __get_user(l_type, &target_fl->l_type);
6692 l_type = target_to_host_flock(l_type);
6693 if (l_type < 0) {
6694 return l_type;
6696 fl->l_type = l_type;
6697 __get_user(fl->l_whence, &target_fl->l_whence);
6698 __get_user(fl->l_start, &target_fl->l_start);
6699 __get_user(fl->l_len, &target_fl->l_len);
6700 __get_user(fl->l_pid, &target_fl->l_pid);
6701 unlock_user_struct(target_fl, target_flock_addr, 0);
6702 return 0;
6705 static inline abi_long copy_to_user_oabi_flock64(abi_ulong target_flock_addr,
6706 const struct flock64 *fl)
6708 struct target_oabi_flock64 *target_fl;
6709 short l_type;
6711 if (!lock_user_struct(VERIFY_WRITE, target_fl, target_flock_addr, 0)) {
6712 return -TARGET_EFAULT;
6715 l_type = host_to_target_flock(fl->l_type);
6716 __put_user(l_type, &target_fl->l_type);
6717 __put_user(fl->l_whence, &target_fl->l_whence);
6718 __put_user(fl->l_start, &target_fl->l_start);
6719 __put_user(fl->l_len, &target_fl->l_len);
6720 __put_user(fl->l_pid, &target_fl->l_pid);
6721 unlock_user_struct(target_fl, target_flock_addr, 1);
6722 return 0;
6724 #endif
6726 static inline abi_long copy_from_user_flock64(struct flock64 *fl,
6727 abi_ulong target_flock_addr)
6729 struct target_flock64 *target_fl;
6730 int l_type;
6732 if (!lock_user_struct(VERIFY_READ, target_fl, target_flock_addr, 1)) {
6733 return -TARGET_EFAULT;
6736 __get_user(l_type, &target_fl->l_type);
6737 l_type = target_to_host_flock(l_type);
6738 if (l_type < 0) {
6739 return l_type;
6741 fl->l_type = l_type;
6742 __get_user(fl->l_whence, &target_fl->l_whence);
6743 __get_user(fl->l_start, &target_fl->l_start);
6744 __get_user(fl->l_len, &target_fl->l_len);
6745 __get_user(fl->l_pid, &target_fl->l_pid);
6746 unlock_user_struct(target_fl, target_flock_addr, 0);
6747 return 0;
6750 static inline abi_long copy_to_user_flock64(abi_ulong target_flock_addr,
6751 const struct flock64 *fl)
6753 struct target_flock64 *target_fl;
6754 short l_type;
6756 if (!lock_user_struct(VERIFY_WRITE, target_fl, target_flock_addr, 0)) {
6757 return -TARGET_EFAULT;
6760 l_type = host_to_target_flock(fl->l_type);
6761 __put_user(l_type, &target_fl->l_type);
6762 __put_user(fl->l_whence, &target_fl->l_whence);
6763 __put_user(fl->l_start, &target_fl->l_start);
6764 __put_user(fl->l_len, &target_fl->l_len);
6765 __put_user(fl->l_pid, &target_fl->l_pid);
6766 unlock_user_struct(target_fl, target_flock_addr, 1);
6767 return 0;
6770 static abi_long do_fcntl(int fd, int cmd, abi_ulong arg)
6772 struct flock64 fl64;
6773 #ifdef F_GETOWN_EX
6774 struct f_owner_ex fox;
6775 struct target_f_owner_ex *target_fox;
6776 #endif
6777 abi_long ret;
6778 int host_cmd = target_to_host_fcntl_cmd(cmd);
6780 if (host_cmd == -TARGET_EINVAL)
6781 return host_cmd;
6783 switch(cmd) {
6784 case TARGET_F_GETLK:
6785 ret = copy_from_user_flock(&fl64, arg);
6786 if (ret) {
6787 return ret;
6789 ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
6790 if (ret == 0) {
6791 ret = copy_to_user_flock(arg, &fl64);
6793 break;
6795 case TARGET_F_SETLK:
6796 case TARGET_F_SETLKW:
6797 ret = copy_from_user_flock(&fl64, arg);
6798 if (ret) {
6799 return ret;
6801 ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
6802 break;
6804 case TARGET_F_GETLK64:
6805 ret = copy_from_user_flock64(&fl64, arg);
6806 if (ret) {
6807 return ret;
6809 ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
6810 if (ret == 0) {
6811 ret = copy_to_user_flock64(arg, &fl64);
6813 break;
6814 case TARGET_F_SETLK64:
6815 case TARGET_F_SETLKW64:
6816 ret = copy_from_user_flock64(&fl64, arg);
6817 if (ret) {
6818 return ret;
6820 ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
6821 break;
6823 case TARGET_F_GETFL:
6824 ret = get_errno(safe_fcntl(fd, host_cmd, arg));
6825 if (ret >= 0) {
6826 ret = host_to_target_bitmask(ret, fcntl_flags_tbl);
6828 break;
6830 case TARGET_F_SETFL:
6831 ret = get_errno(safe_fcntl(fd, host_cmd,
6832 target_to_host_bitmask(arg,
6833 fcntl_flags_tbl)));
6834 break;
6836 #ifdef F_GETOWN_EX
6837 case TARGET_F_GETOWN_EX:
6838 ret = get_errno(safe_fcntl(fd, host_cmd, &fox));
6839 if (ret >= 0) {
6840 if (!lock_user_struct(VERIFY_WRITE, target_fox, arg, 0))
6841 return -TARGET_EFAULT;
6842 target_fox->type = tswap32(fox.type);
6843 target_fox->pid = tswap32(fox.pid);
6844 unlock_user_struct(target_fox, arg, 1);
6846 break;
6847 #endif
6849 #ifdef F_SETOWN_EX
6850 case TARGET_F_SETOWN_EX:
6851 if (!lock_user_struct(VERIFY_READ, target_fox, arg, 1))
6852 return -TARGET_EFAULT;
6853 fox.type = tswap32(target_fox->type);
6854 fox.pid = tswap32(target_fox->pid);
6855 unlock_user_struct(target_fox, arg, 0);
6856 ret = get_errno(safe_fcntl(fd, host_cmd, &fox));
6857 break;
6858 #endif
6860 case TARGET_F_SETOWN:
6861 case TARGET_F_GETOWN:
6862 case TARGET_F_SETSIG:
6863 case TARGET_F_GETSIG:
6864 case TARGET_F_SETLEASE:
6865 case TARGET_F_GETLEASE:
6866 case TARGET_F_SETPIPE_SZ:
6867 case TARGET_F_GETPIPE_SZ:
6868 ret = get_errno(safe_fcntl(fd, host_cmd, arg));
6869 break;
6871 default:
6872 ret = get_errno(safe_fcntl(fd, cmd, arg));
6873 break;
6875 return ret;
6878 #ifdef USE_UID16
6880 static inline int high2lowuid(int uid)
6882 if (uid > 65535)
6883 return 65534;
6884 else
6885 return uid;
6888 static inline int high2lowgid(int gid)
6890 if (gid > 65535)
6891 return 65534;
6892 else
6893 return gid;
6896 static inline int low2highuid(int uid)
6898 if ((int16_t)uid == -1)
6899 return -1;
6900 else
6901 return uid;
6904 static inline int low2highgid(int gid)
6906 if ((int16_t)gid == -1)
6907 return -1;
6908 else
6909 return gid;
6911 static inline int tswapid(int id)
6913 return tswap16(id);
6916 #define put_user_id(x, gaddr) put_user_u16(x, gaddr)
6918 #else /* !USE_UID16 */
6919 static inline int high2lowuid(int uid)
6921 return uid;
6923 static inline int high2lowgid(int gid)
6925 return gid;
6927 static inline int low2highuid(int uid)
6929 return uid;
6931 static inline int low2highgid(int gid)
6933 return gid;
6935 static inline int tswapid(int id)
6937 return tswap32(id);
6940 #define put_user_id(x, gaddr) put_user_u32(x, gaddr)
6942 #endif /* USE_UID16 */
6944 /* We must do direct syscalls for setting UID/GID, because we want to
6945 * implement the Linux system call semantics of "change only for this thread",
6946 * not the libc/POSIX semantics of "change for all threads in process".
6947 * (See http://ewontfix.com/17/ for more details.)
6948 * We use the 32-bit version of the syscalls if present; if it is not
6949 * then either the host architecture supports 32-bit UIDs natively with
6950 * the standard syscall, or the 16-bit UID is the best we can do.
6952 #ifdef __NR_setuid32
6953 #define __NR_sys_setuid __NR_setuid32
6954 #else
6955 #define __NR_sys_setuid __NR_setuid
6956 #endif
6957 #ifdef __NR_setgid32
6958 #define __NR_sys_setgid __NR_setgid32
6959 #else
6960 #define __NR_sys_setgid __NR_setgid
6961 #endif
6962 #ifdef __NR_setresuid32
6963 #define __NR_sys_setresuid __NR_setresuid32
6964 #else
6965 #define __NR_sys_setresuid __NR_setresuid
6966 #endif
6967 #ifdef __NR_setresgid32
6968 #define __NR_sys_setresgid __NR_setresgid32
6969 #else
6970 #define __NR_sys_setresgid __NR_setresgid
6971 #endif
6973 _syscall1(int, sys_setuid, uid_t, uid)
6974 _syscall1(int, sys_setgid, gid_t, gid)
6975 _syscall3(int, sys_setresuid, uid_t, ruid, uid_t, euid, uid_t, suid)
6976 _syscall3(int, sys_setresgid, gid_t, rgid, gid_t, egid, gid_t, sgid)
6978 void syscall_init(void)
6980 IOCTLEntry *ie;
6981 const argtype *arg_type;
6982 int size;
6983 int i;
6985 thunk_init(STRUCT_MAX);
6987 #define STRUCT(name, ...) thunk_register_struct(STRUCT_ ## name, #name, struct_ ## name ## _def);
6988 #define STRUCT_SPECIAL(name) thunk_register_struct_direct(STRUCT_ ## name, #name, &struct_ ## name ## _def);
6989 #include "syscall_types.h"
6990 #undef STRUCT
6991 #undef STRUCT_SPECIAL
6993 /* Build target_to_host_errno_table[] table from
6994 * host_to_target_errno_table[]. */
6995 for (i = 0; i < ERRNO_TABLE_SIZE; i++) {
6996 target_to_host_errno_table[host_to_target_errno_table[i]] = i;
6999 /* we patch the ioctl size if necessary. We rely on the fact that
7000 no ioctl has all the bits at '1' in the size field */
7001 ie = ioctl_entries;
7002 while (ie->target_cmd != 0) {
7003 if (((ie->target_cmd >> TARGET_IOC_SIZESHIFT) & TARGET_IOC_SIZEMASK) ==
7004 TARGET_IOC_SIZEMASK) {
7005 arg_type = ie->arg_type;
7006 if (arg_type[0] != TYPE_PTR) {
7007 fprintf(stderr, "cannot patch size for ioctl 0x%x\n",
7008 ie->target_cmd);
7009 exit(1);
7011 arg_type++;
7012 size = thunk_type_size(arg_type, 0);
7013 ie->target_cmd = (ie->target_cmd &
7014 ~(TARGET_IOC_SIZEMASK << TARGET_IOC_SIZESHIFT)) |
7015 (size << TARGET_IOC_SIZESHIFT);
7018 /* automatic consistency check if same arch */
7019 #if (defined(__i386__) && defined(TARGET_I386) && defined(TARGET_ABI32)) || \
7020 (defined(__x86_64__) && defined(TARGET_X86_64))
7021 if (unlikely(ie->target_cmd != ie->host_cmd)) {
7022 fprintf(stderr, "ERROR: ioctl(%s): target=0x%x host=0x%x\n",
7023 ie->name, ie->target_cmd, ie->host_cmd);
7025 #endif
7026 ie++;
7030 #if TARGET_ABI_BITS == 32
7031 static inline uint64_t target_offset64(uint32_t word0, uint32_t word1)
7033 #ifdef TARGET_WORDS_BIGENDIAN
7034 return ((uint64_t)word0 << 32) | word1;
7035 #else
7036 return ((uint64_t)word1 << 32) | word0;
7037 #endif
7039 #else /* TARGET_ABI_BITS == 32 */
7040 static inline uint64_t target_offset64(uint64_t word0, uint64_t word1)
7042 return word0;
7044 #endif /* TARGET_ABI_BITS != 32 */
7046 #ifdef TARGET_NR_truncate64
7047 static inline abi_long target_truncate64(void *cpu_env, const char *arg1,
7048 abi_long arg2,
7049 abi_long arg3,
7050 abi_long arg4)
7052 if (regpairs_aligned(cpu_env, TARGET_NR_truncate64)) {
7053 arg2 = arg3;
7054 arg3 = arg4;
7056 return get_errno(truncate64(arg1, target_offset64(arg2, arg3)));
7058 #endif
7060 #ifdef TARGET_NR_ftruncate64
7061 static inline abi_long target_ftruncate64(void *cpu_env, abi_long arg1,
7062 abi_long arg2,
7063 abi_long arg3,
7064 abi_long arg4)
7066 if (regpairs_aligned(cpu_env, TARGET_NR_ftruncate64)) {
7067 arg2 = arg3;
7068 arg3 = arg4;
7070 return get_errno(ftruncate64(arg1, target_offset64(arg2, arg3)));
7072 #endif
7074 static inline abi_long target_to_host_timespec(struct timespec *host_ts,
7075 abi_ulong target_addr)
7077 struct target_timespec *target_ts;
7079 if (!lock_user_struct(VERIFY_READ, target_ts, target_addr, 1))
7080 return -TARGET_EFAULT;
7081 __get_user(host_ts->tv_sec, &target_ts->tv_sec);
7082 __get_user(host_ts->tv_nsec, &target_ts->tv_nsec);
7083 unlock_user_struct(target_ts, target_addr, 0);
7084 return 0;
7087 static inline abi_long host_to_target_timespec(abi_ulong target_addr,
7088 struct timespec *host_ts)
7090 struct target_timespec *target_ts;
7092 if (!lock_user_struct(VERIFY_WRITE, target_ts, target_addr, 0))
7093 return -TARGET_EFAULT;
7094 __put_user(host_ts->tv_sec, &target_ts->tv_sec);
7095 __put_user(host_ts->tv_nsec, &target_ts->tv_nsec);
7096 unlock_user_struct(target_ts, target_addr, 1);
7097 return 0;
7100 static inline abi_long target_to_host_itimerspec(struct itimerspec *host_itspec,
7101 abi_ulong target_addr)
7103 struct target_itimerspec *target_itspec;
7105 if (!lock_user_struct(VERIFY_READ, target_itspec, target_addr, 1)) {
7106 return -TARGET_EFAULT;
7109 host_itspec->it_interval.tv_sec =
7110 tswapal(target_itspec->it_interval.tv_sec);
7111 host_itspec->it_interval.tv_nsec =
7112 tswapal(target_itspec->it_interval.tv_nsec);
7113 host_itspec->it_value.tv_sec = tswapal(target_itspec->it_value.tv_sec);
7114 host_itspec->it_value.tv_nsec = tswapal(target_itspec->it_value.tv_nsec);
7116 unlock_user_struct(target_itspec, target_addr, 1);
7117 return 0;
7120 static inline abi_long host_to_target_itimerspec(abi_ulong target_addr,
7121 struct itimerspec *host_its)
7123 struct target_itimerspec *target_itspec;
7125 if (!lock_user_struct(VERIFY_WRITE, target_itspec, target_addr, 0)) {
7126 return -TARGET_EFAULT;
7129 target_itspec->it_interval.tv_sec = tswapal(host_its->it_interval.tv_sec);
7130 target_itspec->it_interval.tv_nsec = tswapal(host_its->it_interval.tv_nsec);
7132 target_itspec->it_value.tv_sec = tswapal(host_its->it_value.tv_sec);
7133 target_itspec->it_value.tv_nsec = tswapal(host_its->it_value.tv_nsec);
7135 unlock_user_struct(target_itspec, target_addr, 0);
7136 return 0;
7139 static inline abi_long target_to_host_timex(struct timex *host_tx,
7140 abi_long target_addr)
7142 struct target_timex *target_tx;
7144 if (!lock_user_struct(VERIFY_READ, target_tx, target_addr, 1)) {
7145 return -TARGET_EFAULT;
7148 __get_user(host_tx->modes, &target_tx->modes);
7149 __get_user(host_tx->offset, &target_tx->offset);
7150 __get_user(host_tx->freq, &target_tx->freq);
7151 __get_user(host_tx->maxerror, &target_tx->maxerror);
7152 __get_user(host_tx->esterror, &target_tx->esterror);
7153 __get_user(host_tx->status, &target_tx->status);
7154 __get_user(host_tx->constant, &target_tx->constant);
7155 __get_user(host_tx->precision, &target_tx->precision);
7156 __get_user(host_tx->tolerance, &target_tx->tolerance);
7157 __get_user(host_tx->time.tv_sec, &target_tx->time.tv_sec);
7158 __get_user(host_tx->time.tv_usec, &target_tx->time.tv_usec);
7159 __get_user(host_tx->tick, &target_tx->tick);
7160 __get_user(host_tx->ppsfreq, &target_tx->ppsfreq);
7161 __get_user(host_tx->jitter, &target_tx->jitter);
7162 __get_user(host_tx->shift, &target_tx->shift);
7163 __get_user(host_tx->stabil, &target_tx->stabil);
7164 __get_user(host_tx->jitcnt, &target_tx->jitcnt);
7165 __get_user(host_tx->calcnt, &target_tx->calcnt);
7166 __get_user(host_tx->errcnt, &target_tx->errcnt);
7167 __get_user(host_tx->stbcnt, &target_tx->stbcnt);
7168 __get_user(host_tx->tai, &target_tx->tai);
7170 unlock_user_struct(target_tx, target_addr, 0);
7171 return 0;
7174 static inline abi_long host_to_target_timex(abi_long target_addr,
7175 struct timex *host_tx)
7177 struct target_timex *target_tx;
7179 if (!lock_user_struct(VERIFY_WRITE, target_tx, target_addr, 0)) {
7180 return -TARGET_EFAULT;
7183 __put_user(host_tx->modes, &target_tx->modes);
7184 __put_user(host_tx->offset, &target_tx->offset);
7185 __put_user(host_tx->freq, &target_tx->freq);
7186 __put_user(host_tx->maxerror, &target_tx->maxerror);
7187 __put_user(host_tx->esterror, &target_tx->esterror);
7188 __put_user(host_tx->status, &target_tx->status);
7189 __put_user(host_tx->constant, &target_tx->constant);
7190 __put_user(host_tx->precision, &target_tx->precision);
7191 __put_user(host_tx->tolerance, &target_tx->tolerance);
7192 __put_user(host_tx->time.tv_sec, &target_tx->time.tv_sec);
7193 __put_user(host_tx->time.tv_usec, &target_tx->time.tv_usec);
7194 __put_user(host_tx->tick, &target_tx->tick);
7195 __put_user(host_tx->ppsfreq, &target_tx->ppsfreq);
7196 __put_user(host_tx->jitter, &target_tx->jitter);
7197 __put_user(host_tx->shift, &target_tx->shift);
7198 __put_user(host_tx->stabil, &target_tx->stabil);
7199 __put_user(host_tx->jitcnt, &target_tx->jitcnt);
7200 __put_user(host_tx->calcnt, &target_tx->calcnt);
7201 __put_user(host_tx->errcnt, &target_tx->errcnt);
7202 __put_user(host_tx->stbcnt, &target_tx->stbcnt);
7203 __put_user(host_tx->tai, &target_tx->tai);
7205 unlock_user_struct(target_tx, target_addr, 1);
7206 return 0;
7210 static inline abi_long target_to_host_sigevent(struct sigevent *host_sevp,
7211 abi_ulong target_addr)
7213 struct target_sigevent *target_sevp;
7215 if (!lock_user_struct(VERIFY_READ, target_sevp, target_addr, 1)) {
7216 return -TARGET_EFAULT;
7219 /* This union is awkward on 64 bit systems because it has a 32 bit
7220 * integer and a pointer in it; we follow the conversion approach
7221 * used for handling sigval types in signal.c so the guest should get
7222 * the correct value back even if we did a 64 bit byteswap and it's
7223 * using the 32 bit integer.
7225 host_sevp->sigev_value.sival_ptr =
7226 (void *)(uintptr_t)tswapal(target_sevp->sigev_value.sival_ptr);
7227 host_sevp->sigev_signo =
7228 target_to_host_signal(tswap32(target_sevp->sigev_signo));
7229 host_sevp->sigev_notify = tswap32(target_sevp->sigev_notify);
7230 host_sevp->_sigev_un._tid = tswap32(target_sevp->_sigev_un._tid);
7232 unlock_user_struct(target_sevp, target_addr, 1);
7233 return 0;
7236 #if defined(TARGET_NR_mlockall)
7237 static inline int target_to_host_mlockall_arg(int arg)
7239 int result = 0;
7241 if (arg & TARGET_MLOCKALL_MCL_CURRENT) {
7242 result |= MCL_CURRENT;
7244 if (arg & TARGET_MLOCKALL_MCL_FUTURE) {
7245 result |= MCL_FUTURE;
7247 return result;
7249 #endif
7251 static inline abi_long host_to_target_stat64(void *cpu_env,
7252 abi_ulong target_addr,
7253 struct stat *host_st)
7255 #if defined(TARGET_ARM) && defined(TARGET_ABI32)
7256 if (((CPUARMState *)cpu_env)->eabi) {
7257 struct target_eabi_stat64 *target_st;
7259 if (!lock_user_struct(VERIFY_WRITE, target_st, target_addr, 0))
7260 return -TARGET_EFAULT;
7261 memset(target_st, 0, sizeof(struct target_eabi_stat64));
7262 __put_user(host_st->st_dev, &target_st->st_dev);
7263 __put_user(host_st->st_ino, &target_st->st_ino);
7264 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
7265 __put_user(host_st->st_ino, &target_st->__st_ino);
7266 #endif
7267 __put_user(host_st->st_mode, &target_st->st_mode);
7268 __put_user(host_st->st_nlink, &target_st->st_nlink);
7269 __put_user(host_st->st_uid, &target_st->st_uid);
7270 __put_user(host_st->st_gid, &target_st->st_gid);
7271 __put_user(host_st->st_rdev, &target_st->st_rdev);
7272 __put_user(host_st->st_size, &target_st->st_size);
7273 __put_user(host_st->st_blksize, &target_st->st_blksize);
7274 __put_user(host_st->st_blocks, &target_st->st_blocks);
7275 __put_user(host_st->st_atime, &target_st->target_st_atime);
7276 __put_user(host_st->st_mtime, &target_st->target_st_mtime);
7277 __put_user(host_st->st_ctime, &target_st->target_st_ctime);
7278 unlock_user_struct(target_st, target_addr, 1);
7279 } else
7280 #endif
7282 #if defined(TARGET_HAS_STRUCT_STAT64)
7283 struct target_stat64 *target_st;
7284 #else
7285 struct target_stat *target_st;
7286 #endif
7288 if (!lock_user_struct(VERIFY_WRITE, target_st, target_addr, 0))
7289 return -TARGET_EFAULT;
7290 memset(target_st, 0, sizeof(*target_st));
7291 __put_user(host_st->st_dev, &target_st->st_dev);
7292 __put_user(host_st->st_ino, &target_st->st_ino);
7293 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
7294 __put_user(host_st->st_ino, &target_st->__st_ino);
7295 #endif
7296 __put_user(host_st->st_mode, &target_st->st_mode);
7297 __put_user(host_st->st_nlink, &target_st->st_nlink);
7298 __put_user(host_st->st_uid, &target_st->st_uid);
7299 __put_user(host_st->st_gid, &target_st->st_gid);
7300 __put_user(host_st->st_rdev, &target_st->st_rdev);
7301 /* XXX: better use of kernel struct */
7302 __put_user(host_st->st_size, &target_st->st_size);
7303 __put_user(host_st->st_blksize, &target_st->st_blksize);
7304 __put_user(host_st->st_blocks, &target_st->st_blocks);
7305 __put_user(host_st->st_atime, &target_st->target_st_atime);
7306 __put_user(host_st->st_mtime, &target_st->target_st_mtime);
7307 __put_user(host_st->st_ctime, &target_st->target_st_ctime);
7308 unlock_user_struct(target_st, target_addr, 1);
7311 return 0;
7314 /* ??? Using host futex calls even when target atomic operations
7315 are not really atomic probably breaks things. However implementing
7316 futexes locally would make futexes shared between multiple processes
7317 tricky. However they're probably useless because guest atomic
7318 operations won't work either. */
7319 static int do_futex(target_ulong uaddr, int op, int val, target_ulong timeout,
7320 target_ulong uaddr2, int val3)
7322 struct timespec ts, *pts;
7323 int base_op;
7325 /* ??? We assume FUTEX_* constants are the same on both host
7326 and target. */
7327 #ifdef FUTEX_CMD_MASK
7328 base_op = op & FUTEX_CMD_MASK;
7329 #else
7330 base_op = op;
7331 #endif
7332 switch (base_op) {
7333 case FUTEX_WAIT:
7334 case FUTEX_WAIT_BITSET:
7335 if (timeout) {
7336 pts = &ts;
7337 target_to_host_timespec(pts, timeout);
7338 } else {
7339 pts = NULL;
7341 return get_errno(safe_futex(g2h(uaddr), op, tswap32(val),
7342 pts, NULL, val3));
7343 case FUTEX_WAKE:
7344 return get_errno(safe_futex(g2h(uaddr), op, val, NULL, NULL, 0));
7345 case FUTEX_FD:
7346 return get_errno(safe_futex(g2h(uaddr), op, val, NULL, NULL, 0));
7347 case FUTEX_REQUEUE:
7348 case FUTEX_CMP_REQUEUE:
7349 case FUTEX_WAKE_OP:
7350 /* For FUTEX_REQUEUE, FUTEX_CMP_REQUEUE, and FUTEX_WAKE_OP, the
7351 TIMEOUT parameter is interpreted as a uint32_t by the kernel.
7352 But the prototype takes a `struct timespec *'; insert casts
7353 to satisfy the compiler. We do not need to tswap TIMEOUT
7354 since it's not compared to guest memory. */
7355 pts = (struct timespec *)(uintptr_t) timeout;
7356 return get_errno(safe_futex(g2h(uaddr), op, val, pts,
7357 g2h(uaddr2),
7358 (base_op == FUTEX_CMP_REQUEUE
7359 ? tswap32(val3)
7360 : val3)));
7361 default:
7362 return -TARGET_ENOSYS;
7365 #if defined(TARGET_NR_name_to_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
7366 static abi_long do_name_to_handle_at(abi_long dirfd, abi_long pathname,
7367 abi_long handle, abi_long mount_id,
7368 abi_long flags)
7370 struct file_handle *target_fh;
7371 struct file_handle *fh;
7372 int mid = 0;
7373 abi_long ret;
7374 char *name;
7375 unsigned int size, total_size;
7377 if (get_user_s32(size, handle)) {
7378 return -TARGET_EFAULT;
7381 name = lock_user_string(pathname);
7382 if (!name) {
7383 return -TARGET_EFAULT;
7386 total_size = sizeof(struct file_handle) + size;
7387 target_fh = lock_user(VERIFY_WRITE, handle, total_size, 0);
7388 if (!target_fh) {
7389 unlock_user(name, pathname, 0);
7390 return -TARGET_EFAULT;
7393 fh = g_malloc0(total_size);
7394 fh->handle_bytes = size;
7396 ret = get_errno(name_to_handle_at(dirfd, path(name), fh, &mid, flags));
7397 unlock_user(name, pathname, 0);
7399 /* man name_to_handle_at(2):
7400 * Other than the use of the handle_bytes field, the caller should treat
7401 * the file_handle structure as an opaque data type
7404 memcpy(target_fh, fh, total_size);
7405 target_fh->handle_bytes = tswap32(fh->handle_bytes);
7406 target_fh->handle_type = tswap32(fh->handle_type);
7407 g_free(fh);
7408 unlock_user(target_fh, handle, total_size);
7410 if (put_user_s32(mid, mount_id)) {
7411 return -TARGET_EFAULT;
7414 return ret;
7417 #endif
7419 #if defined(TARGET_NR_open_by_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
7420 static abi_long do_open_by_handle_at(abi_long mount_fd, abi_long handle,
7421 abi_long flags)
7423 struct file_handle *target_fh;
7424 struct file_handle *fh;
7425 unsigned int size, total_size;
7426 abi_long ret;
7428 if (get_user_s32(size, handle)) {
7429 return -TARGET_EFAULT;
7432 total_size = sizeof(struct file_handle) + size;
7433 target_fh = lock_user(VERIFY_READ, handle, total_size, 1);
7434 if (!target_fh) {
7435 return -TARGET_EFAULT;
7438 fh = g_memdup(target_fh, total_size);
7439 fh->handle_bytes = size;
7440 fh->handle_type = tswap32(target_fh->handle_type);
7442 ret = get_errno(open_by_handle_at(mount_fd, fh,
7443 target_to_host_bitmask(flags, fcntl_flags_tbl)));
7445 g_free(fh);
7447 unlock_user(target_fh, handle, total_size);
7449 return ret;
7451 #endif
7453 #if defined(TARGET_NR_signalfd) || defined(TARGET_NR_signalfd4)
7455 /* signalfd siginfo conversion */
7457 static void
7458 host_to_target_signalfd_siginfo(struct signalfd_siginfo *tinfo,
7459 const struct signalfd_siginfo *info)
7461 int sig = host_to_target_signal(info->ssi_signo);
7463 /* linux/signalfd.h defines a ssi_addr_lsb
7464 * not defined in sys/signalfd.h but used by some kernels
7467 #ifdef BUS_MCEERR_AO
7468 if (tinfo->ssi_signo == SIGBUS &&
7469 (tinfo->ssi_code == BUS_MCEERR_AR ||
7470 tinfo->ssi_code == BUS_MCEERR_AO)) {
7471 uint16_t *ssi_addr_lsb = (uint16_t *)(&info->ssi_addr + 1);
7472 uint16_t *tssi_addr_lsb = (uint16_t *)(&tinfo->ssi_addr + 1);
7473 *tssi_addr_lsb = tswap16(*ssi_addr_lsb);
7475 #endif
7477 tinfo->ssi_signo = tswap32(sig);
7478 tinfo->ssi_errno = tswap32(tinfo->ssi_errno);
7479 tinfo->ssi_code = tswap32(info->ssi_code);
7480 tinfo->ssi_pid = tswap32(info->ssi_pid);
7481 tinfo->ssi_uid = tswap32(info->ssi_uid);
7482 tinfo->ssi_fd = tswap32(info->ssi_fd);
7483 tinfo->ssi_tid = tswap32(info->ssi_tid);
7484 tinfo->ssi_band = tswap32(info->ssi_band);
7485 tinfo->ssi_overrun = tswap32(info->ssi_overrun);
7486 tinfo->ssi_trapno = tswap32(info->ssi_trapno);
7487 tinfo->ssi_status = tswap32(info->ssi_status);
7488 tinfo->ssi_int = tswap32(info->ssi_int);
7489 tinfo->ssi_ptr = tswap64(info->ssi_ptr);
7490 tinfo->ssi_utime = tswap64(info->ssi_utime);
7491 tinfo->ssi_stime = tswap64(info->ssi_stime);
7492 tinfo->ssi_addr = tswap64(info->ssi_addr);
7495 static abi_long host_to_target_data_signalfd(void *buf, size_t len)
7497 int i;
7499 for (i = 0; i < len; i += sizeof(struct signalfd_siginfo)) {
7500 host_to_target_signalfd_siginfo(buf + i, buf + i);
7503 return len;
7506 static TargetFdTrans target_signalfd_trans = {
7507 .host_to_target_data = host_to_target_data_signalfd,
7510 static abi_long do_signalfd4(int fd, abi_long mask, int flags)
7512 int host_flags;
7513 target_sigset_t *target_mask;
7514 sigset_t host_mask;
7515 abi_long ret;
7517 if (flags & ~(TARGET_O_NONBLOCK | TARGET_O_CLOEXEC)) {
7518 return -TARGET_EINVAL;
7520 if (!lock_user_struct(VERIFY_READ, target_mask, mask, 1)) {
7521 return -TARGET_EFAULT;
7524 target_to_host_sigset(&host_mask, target_mask);
7526 host_flags = target_to_host_bitmask(flags, fcntl_flags_tbl);
7528 ret = get_errno(signalfd(fd, &host_mask, host_flags));
7529 if (ret >= 0) {
7530 fd_trans_register(ret, &target_signalfd_trans);
7533 unlock_user_struct(target_mask, mask, 0);
7535 return ret;
7537 #endif
7539 /* Map host to target signal numbers for the wait family of syscalls.
7540 Assume all other status bits are the same. */
7541 int host_to_target_waitstatus(int status)
7543 if (WIFSIGNALED(status)) {
7544 return host_to_target_signal(WTERMSIG(status)) | (status & ~0x7f);
7546 if (WIFSTOPPED(status)) {
7547 return (host_to_target_signal(WSTOPSIG(status)) << 8)
7548 | (status & 0xff);
7550 return status;
7553 static int open_self_cmdline(void *cpu_env, int fd)
7555 CPUState *cpu = ENV_GET_CPU((CPUArchState *)cpu_env);
7556 struct linux_binprm *bprm = ((TaskState *)cpu->opaque)->bprm;
7557 int i;
7559 for (i = 0; i < bprm->argc; i++) {
7560 size_t len = strlen(bprm->argv[i]) + 1;
7562 if (write(fd, bprm->argv[i], len) != len) {
7563 return -1;
7567 return 0;
7570 static int open_self_maps(void *cpu_env, int fd)
7572 CPUState *cpu = ENV_GET_CPU((CPUArchState *)cpu_env);
7573 TaskState *ts = cpu->opaque;
7574 FILE *fp;
7575 char *line = NULL;
7576 size_t len = 0;
7577 ssize_t read;
7579 fp = fopen("/proc/self/maps", "r");
7580 if (fp == NULL) {
7581 return -1;
7584 while ((read = getline(&line, &len, fp)) != -1) {
7585 int fields, dev_maj, dev_min, inode;
7586 uint64_t min, max, offset;
7587 char flag_r, flag_w, flag_x, flag_p;
7588 char path[512] = "";
7589 fields = sscanf(line, "%"PRIx64"-%"PRIx64" %c%c%c%c %"PRIx64" %x:%x %d"
7590 " %512s", &min, &max, &flag_r, &flag_w, &flag_x,
7591 &flag_p, &offset, &dev_maj, &dev_min, &inode, path);
7593 if ((fields < 10) || (fields > 11)) {
7594 continue;
7596 if (h2g_valid(min)) {
7597 int flags = page_get_flags(h2g(min));
7598 max = h2g_valid(max - 1) ? max : (uintptr_t)g2h(GUEST_ADDR_MAX) + 1;
7599 if (page_check_range(h2g(min), max - min, flags) == -1) {
7600 continue;
7602 if (h2g(min) == ts->info->stack_limit) {
7603 pstrcpy(path, sizeof(path), " [stack]");
7605 dprintf(fd, TARGET_ABI_FMT_lx "-" TARGET_ABI_FMT_lx
7606 " %c%c%c%c %08" PRIx64 " %02x:%02x %d %s%s\n",
7607 h2g(min), h2g(max - 1) + 1, flag_r, flag_w,
7608 flag_x, flag_p, offset, dev_maj, dev_min, inode,
7609 path[0] ? " " : "", path);
7613 free(line);
7614 fclose(fp);
7616 return 0;
7619 static int open_self_stat(void *cpu_env, int fd)
7621 CPUState *cpu = ENV_GET_CPU((CPUArchState *)cpu_env);
7622 TaskState *ts = cpu->opaque;
7623 abi_ulong start_stack = ts->info->start_stack;
7624 int i;
7626 for (i = 0; i < 44; i++) {
7627 char buf[128];
7628 int len;
7629 uint64_t val = 0;
7631 if (i == 0) {
7632 /* pid */
7633 val = getpid();
7634 snprintf(buf, sizeof(buf), "%"PRId64 " ", val);
7635 } else if (i == 1) {
7636 /* app name */
7637 snprintf(buf, sizeof(buf), "(%s) ", ts->bprm->argv[0]);
7638 } else if (i == 27) {
7639 /* stack bottom */
7640 val = start_stack;
7641 snprintf(buf, sizeof(buf), "%"PRId64 " ", val);
7642 } else {
7643 /* for the rest, there is MasterCard */
7644 snprintf(buf, sizeof(buf), "0%c", i == 43 ? '\n' : ' ');
7647 len = strlen(buf);
7648 if (write(fd, buf, len) != len) {
7649 return -1;
7653 return 0;
7656 static int open_self_auxv(void *cpu_env, int fd)
7658 CPUState *cpu = ENV_GET_CPU((CPUArchState *)cpu_env);
7659 TaskState *ts = cpu->opaque;
7660 abi_ulong auxv = ts->info->saved_auxv;
7661 abi_ulong len = ts->info->auxv_len;
7662 char *ptr;
7665 * Auxiliary vector is stored in target process stack.
7666 * read in whole auxv vector and copy it to file
7668 ptr = lock_user(VERIFY_READ, auxv, len, 0);
7669 if (ptr != NULL) {
7670 while (len > 0) {
7671 ssize_t r;
7672 r = write(fd, ptr, len);
7673 if (r <= 0) {
7674 break;
7676 len -= r;
7677 ptr += r;
7679 lseek(fd, 0, SEEK_SET);
7680 unlock_user(ptr, auxv, len);
7683 return 0;
7686 static int is_proc_myself(const char *filename, const char *entry)
7688 if (!strncmp(filename, "/proc/", strlen("/proc/"))) {
7689 filename += strlen("/proc/");
7690 if (!strncmp(filename, "self/", strlen("self/"))) {
7691 filename += strlen("self/");
7692 } else if (*filename >= '1' && *filename <= '9') {
7693 char myself[80];
7694 snprintf(myself, sizeof(myself), "%d/", getpid());
7695 if (!strncmp(filename, myself, strlen(myself))) {
7696 filename += strlen(myself);
7697 } else {
7698 return 0;
7700 } else {
7701 return 0;
7703 if (!strcmp(filename, entry)) {
7704 return 1;
7707 return 0;
7710 #if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN)
7711 static int is_proc(const char *filename, const char *entry)
7713 return strcmp(filename, entry) == 0;
7716 static int open_net_route(void *cpu_env, int fd)
7718 FILE *fp;
7719 char *line = NULL;
7720 size_t len = 0;
7721 ssize_t read;
7723 fp = fopen("/proc/net/route", "r");
7724 if (fp == NULL) {
7725 return -1;
7728 /* read header */
7730 read = getline(&line, &len, fp);
7731 dprintf(fd, "%s", line);
7733 /* read routes */
7735 while ((read = getline(&line, &len, fp)) != -1) {
7736 char iface[16];
7737 uint32_t dest, gw, mask;
7738 unsigned int flags, refcnt, use, metric, mtu, window, irtt;
7739 sscanf(line, "%s\t%08x\t%08x\t%04x\t%d\t%d\t%d\t%08x\t%d\t%u\t%u\n",
7740 iface, &dest, &gw, &flags, &refcnt, &use, &metric,
7741 &mask, &mtu, &window, &irtt);
7742 dprintf(fd, "%s\t%08x\t%08x\t%04x\t%d\t%d\t%d\t%08x\t%d\t%u\t%u\n",
7743 iface, tswap32(dest), tswap32(gw), flags, refcnt, use,
7744 metric, tswap32(mask), mtu, window, irtt);
7747 free(line);
7748 fclose(fp);
7750 return 0;
7752 #endif
7754 static int do_openat(void *cpu_env, int dirfd, const char *pathname, int flags, mode_t mode)
7756 struct fake_open {
7757 const char *filename;
7758 int (*fill)(void *cpu_env, int fd);
7759 int (*cmp)(const char *s1, const char *s2);
7761 const struct fake_open *fake_open;
7762 static const struct fake_open fakes[] = {
7763 { "maps", open_self_maps, is_proc_myself },
7764 { "stat", open_self_stat, is_proc_myself },
7765 { "auxv", open_self_auxv, is_proc_myself },
7766 { "cmdline", open_self_cmdline, is_proc_myself },
7767 #if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN)
7768 { "/proc/net/route", open_net_route, is_proc },
7769 #endif
7770 { NULL, NULL, NULL }
7773 if (is_proc_myself(pathname, "exe")) {
7774 int execfd = qemu_getauxval(AT_EXECFD);
7775 return execfd ? execfd : safe_openat(dirfd, exec_path, flags, mode);
7778 for (fake_open = fakes; fake_open->filename; fake_open++) {
7779 if (fake_open->cmp(pathname, fake_open->filename)) {
7780 break;
7784 if (fake_open->filename) {
7785 const char *tmpdir;
7786 char filename[PATH_MAX];
7787 int fd, r;
7789 /* create temporary file to map stat to */
7790 tmpdir = getenv("TMPDIR");
7791 if (!tmpdir)
7792 tmpdir = "/tmp";
7793 snprintf(filename, sizeof(filename), "%s/qemu-open.XXXXXX", tmpdir);
7794 fd = mkstemp(filename);
7795 if (fd < 0) {
7796 return fd;
7798 unlink(filename);
7800 if ((r = fake_open->fill(cpu_env, fd))) {
7801 int e = errno;
7802 close(fd);
7803 errno = e;
7804 return r;
7806 lseek(fd, 0, SEEK_SET);
7808 return fd;
7811 return safe_openat(dirfd, path(pathname), flags, mode);
7814 #define TIMER_MAGIC 0x0caf0000
7815 #define TIMER_MAGIC_MASK 0xffff0000
7817 /* Convert QEMU provided timer ID back to internal 16bit index format */
7818 static target_timer_t get_timer_id(abi_long arg)
7820 target_timer_t timerid = arg;
7822 if ((timerid & TIMER_MAGIC_MASK) != TIMER_MAGIC) {
7823 return -TARGET_EINVAL;
7826 timerid &= 0xffff;
7828 if (timerid >= ARRAY_SIZE(g_posix_timers)) {
7829 return -TARGET_EINVAL;
7832 return timerid;
7835 static abi_long swap_data_eventfd(void *buf, size_t len)
7837 uint64_t *counter = buf;
7838 int i;
7840 if (len < sizeof(uint64_t)) {
7841 return -EINVAL;
7844 for (i = 0; i < len; i += sizeof(uint64_t)) {
7845 *counter = tswap64(*counter);
7846 counter++;
7849 return len;
7852 static TargetFdTrans target_eventfd_trans = {
7853 .host_to_target_data = swap_data_eventfd,
7854 .target_to_host_data = swap_data_eventfd,
7857 #if (defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init)) || \
7858 (defined(CONFIG_INOTIFY1) && defined(TARGET_NR_inotify_init1) && \
7859 defined(__NR_inotify_init1))
7860 static abi_long host_to_target_data_inotify(void *buf, size_t len)
7862 struct inotify_event *ev;
7863 int i;
7864 uint32_t name_len;
7866 for (i = 0; i < len; i += sizeof(struct inotify_event) + name_len) {
7867 ev = (struct inotify_event *)((char *)buf + i);
7868 name_len = ev->len;
7870 ev->wd = tswap32(ev->wd);
7871 ev->mask = tswap32(ev->mask);
7872 ev->cookie = tswap32(ev->cookie);
7873 ev->len = tswap32(name_len);
7876 return len;
7879 static TargetFdTrans target_inotify_trans = {
7880 .host_to_target_data = host_to_target_data_inotify,
7882 #endif
7884 static int target_to_host_cpu_mask(unsigned long *host_mask,
7885 size_t host_size,
7886 abi_ulong target_addr,
7887 size_t target_size)
7889 unsigned target_bits = sizeof(abi_ulong) * 8;
7890 unsigned host_bits = sizeof(*host_mask) * 8;
7891 abi_ulong *target_mask;
7892 unsigned i, j;
7894 assert(host_size >= target_size);
7896 target_mask = lock_user(VERIFY_READ, target_addr, target_size, 1);
7897 if (!target_mask) {
7898 return -TARGET_EFAULT;
7900 memset(host_mask, 0, host_size);
7902 for (i = 0 ; i < target_size / sizeof(abi_ulong); i++) {
7903 unsigned bit = i * target_bits;
7904 abi_ulong val;
7906 __get_user(val, &target_mask[i]);
7907 for (j = 0; j < target_bits; j++, bit++) {
7908 if (val & (1UL << j)) {
7909 host_mask[bit / host_bits] |= 1UL << (bit % host_bits);
7914 unlock_user(target_mask, target_addr, 0);
7915 return 0;
7918 static int host_to_target_cpu_mask(const unsigned long *host_mask,
7919 size_t host_size,
7920 abi_ulong target_addr,
7921 size_t target_size)
7923 unsigned target_bits = sizeof(abi_ulong) * 8;
7924 unsigned host_bits = sizeof(*host_mask) * 8;
7925 abi_ulong *target_mask;
7926 unsigned i, j;
7928 assert(host_size >= target_size);
7930 target_mask = lock_user(VERIFY_WRITE, target_addr, target_size, 0);
7931 if (!target_mask) {
7932 return -TARGET_EFAULT;
7935 for (i = 0 ; i < target_size / sizeof(abi_ulong); i++) {
7936 unsigned bit = i * target_bits;
7937 abi_ulong val = 0;
7939 for (j = 0; j < target_bits; j++, bit++) {
7940 if (host_mask[bit / host_bits] & (1UL << (bit % host_bits))) {
7941 val |= 1UL << j;
7944 __put_user(val, &target_mask[i]);
7947 unlock_user(target_mask, target_addr, target_size);
7948 return 0;
7951 /* do_syscall() should always have a single exit point at the end so
7952 that actions, such as logging of syscall results, can be performed.
7953 All errnos that do_syscall() returns must be -TARGET_<errcode>. */
7954 abi_long do_syscall(void *cpu_env, int num, abi_long arg1,
7955 abi_long arg2, abi_long arg3, abi_long arg4,
7956 abi_long arg5, abi_long arg6, abi_long arg7,
7957 abi_long arg8)
7959 CPUState *cpu = ENV_GET_CPU(cpu_env);
7960 abi_long ret;
7961 struct stat st;
7962 struct statfs stfs;
7963 void *p;
7965 #if defined(DEBUG_ERESTARTSYS)
7966 /* Debug-only code for exercising the syscall-restart code paths
7967 * in the per-architecture cpu main loops: restart every syscall
7968 * the guest makes once before letting it through.
7971 static int flag;
7973 flag = !flag;
7974 if (flag) {
7975 return -TARGET_ERESTARTSYS;
7978 #endif
7980 #ifdef DEBUG
7981 gemu_log("syscall %d", num);
7982 #endif
7983 trace_guest_user_syscall(cpu, num, arg1, arg2, arg3, arg4, arg5, arg6, arg7, arg8);
7984 if(do_strace)
7985 print_syscall(num, arg1, arg2, arg3, arg4, arg5, arg6);
7987 switch(num) {
7988 case TARGET_NR_exit:
7989 /* In old applications this may be used to implement _exit(2).
7990 However in threaded applictions it is used for thread termination,
7991 and _exit_group is used for application termination.
7992 Do thread termination if we have more then one thread. */
7994 if (block_signals()) {
7995 ret = -TARGET_ERESTARTSYS;
7996 break;
7999 cpu_list_lock();
8001 if (CPU_NEXT(first_cpu)) {
8002 TaskState *ts;
8004 /* Remove the CPU from the list. */
8005 QTAILQ_REMOVE(&cpus, cpu, node);
8007 cpu_list_unlock();
8009 ts = cpu->opaque;
8010 if (ts->child_tidptr) {
8011 put_user_u32(0, ts->child_tidptr);
8012 sys_futex(g2h(ts->child_tidptr), FUTEX_WAKE, INT_MAX,
8013 NULL, NULL, 0);
8015 thread_cpu = NULL;
8016 object_unref(OBJECT(cpu));
8017 g_free(ts);
8018 rcu_unregister_thread();
8019 pthread_exit(NULL);
8022 cpu_list_unlock();
8023 #ifdef TARGET_GPROF
8024 _mcleanup();
8025 #endif
8026 gdb_exit(cpu_env, arg1);
8027 _exit(arg1);
8028 ret = 0; /* avoid warning */
8029 break;
8030 case TARGET_NR_read:
8031 if (arg3 == 0)
8032 ret = 0;
8033 else {
8034 if (!(p = lock_user(VERIFY_WRITE, arg2, arg3, 0)))
8035 goto efault;
8036 ret = get_errno(safe_read(arg1, p, arg3));
8037 if (ret >= 0 &&
8038 fd_trans_host_to_target_data(arg1)) {
8039 ret = fd_trans_host_to_target_data(arg1)(p, ret);
8041 unlock_user(p, arg2, ret);
8043 break;
8044 case TARGET_NR_write:
8045 if (!(p = lock_user(VERIFY_READ, arg2, arg3, 1)))
8046 goto efault;
8047 if (fd_trans_target_to_host_data(arg1)) {
8048 void *copy = g_malloc(arg3);
8049 memcpy(copy, p, arg3);
8050 ret = fd_trans_target_to_host_data(arg1)(copy, arg3);
8051 if (ret >= 0) {
8052 ret = get_errno(safe_write(arg1, copy, ret));
8054 g_free(copy);
8055 } else {
8056 ret = get_errno(safe_write(arg1, p, arg3));
8058 unlock_user(p, arg2, 0);
8059 break;
8060 #ifdef TARGET_NR_open
8061 case TARGET_NR_open:
8062 if (!(p = lock_user_string(arg1)))
8063 goto efault;
8064 ret = get_errno(do_openat(cpu_env, AT_FDCWD, p,
8065 target_to_host_bitmask(arg2, fcntl_flags_tbl),
8066 arg3));
8067 fd_trans_unregister(ret);
8068 unlock_user(p, arg1, 0);
8069 break;
8070 #endif
8071 case TARGET_NR_openat:
8072 if (!(p = lock_user_string(arg2)))
8073 goto efault;
8074 ret = get_errno(do_openat(cpu_env, arg1, p,
8075 target_to_host_bitmask(arg3, fcntl_flags_tbl),
8076 arg4));
8077 fd_trans_unregister(ret);
8078 unlock_user(p, arg2, 0);
8079 break;
8080 #if defined(TARGET_NR_name_to_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
8081 case TARGET_NR_name_to_handle_at:
8082 ret = do_name_to_handle_at(arg1, arg2, arg3, arg4, arg5);
8083 break;
8084 #endif
8085 #if defined(TARGET_NR_open_by_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
8086 case TARGET_NR_open_by_handle_at:
8087 ret = do_open_by_handle_at(arg1, arg2, arg3);
8088 fd_trans_unregister(ret);
8089 break;
8090 #endif
8091 case TARGET_NR_close:
8092 fd_trans_unregister(arg1);
8093 ret = get_errno(close(arg1));
8094 break;
8095 case TARGET_NR_brk:
8096 ret = do_brk(arg1);
8097 break;
8098 #ifdef TARGET_NR_fork
8099 case TARGET_NR_fork:
8100 ret = get_errno(do_fork(cpu_env, TARGET_SIGCHLD, 0, 0, 0, 0));
8101 break;
8102 #endif
8103 #ifdef TARGET_NR_waitpid
8104 case TARGET_NR_waitpid:
8106 int status;
8107 ret = get_errno(safe_wait4(arg1, &status, arg3, 0));
8108 if (!is_error(ret) && arg2 && ret
8109 && put_user_s32(host_to_target_waitstatus(status), arg2))
8110 goto efault;
8112 break;
8113 #endif
8114 #ifdef TARGET_NR_waitid
8115 case TARGET_NR_waitid:
8117 siginfo_t info;
8118 info.si_pid = 0;
8119 ret = get_errno(safe_waitid(arg1, arg2, &info, arg4, NULL));
8120 if (!is_error(ret) && arg3 && info.si_pid != 0) {
8121 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_siginfo_t), 0)))
8122 goto efault;
8123 host_to_target_siginfo(p, &info);
8124 unlock_user(p, arg3, sizeof(target_siginfo_t));
8127 break;
8128 #endif
8129 #ifdef TARGET_NR_creat /* not on alpha */
8130 case TARGET_NR_creat:
8131 if (!(p = lock_user_string(arg1)))
8132 goto efault;
8133 ret = get_errno(creat(p, arg2));
8134 fd_trans_unregister(ret);
8135 unlock_user(p, arg1, 0);
8136 break;
8137 #endif
8138 #ifdef TARGET_NR_link
8139 case TARGET_NR_link:
8141 void * p2;
8142 p = lock_user_string(arg1);
8143 p2 = lock_user_string(arg2);
8144 if (!p || !p2)
8145 ret = -TARGET_EFAULT;
8146 else
8147 ret = get_errno(link(p, p2));
8148 unlock_user(p2, arg2, 0);
8149 unlock_user(p, arg1, 0);
8151 break;
8152 #endif
8153 #if defined(TARGET_NR_linkat)
8154 case TARGET_NR_linkat:
8156 void * p2 = NULL;
8157 if (!arg2 || !arg4)
8158 goto efault;
8159 p = lock_user_string(arg2);
8160 p2 = lock_user_string(arg4);
8161 if (!p || !p2)
8162 ret = -TARGET_EFAULT;
8163 else
8164 ret = get_errno(linkat(arg1, p, arg3, p2, arg5));
8165 unlock_user(p, arg2, 0);
8166 unlock_user(p2, arg4, 0);
8168 break;
8169 #endif
8170 #ifdef TARGET_NR_unlink
8171 case TARGET_NR_unlink:
8172 if (!(p = lock_user_string(arg1)))
8173 goto efault;
8174 ret = get_errno(unlink(p));
8175 unlock_user(p, arg1, 0);
8176 break;
8177 #endif
8178 #if defined(TARGET_NR_unlinkat)
8179 case TARGET_NR_unlinkat:
8180 if (!(p = lock_user_string(arg2)))
8181 goto efault;
8182 ret = get_errno(unlinkat(arg1, p, arg3));
8183 unlock_user(p, arg2, 0);
8184 break;
8185 #endif
8186 case TARGET_NR_execve:
8188 char **argp, **envp;
8189 int argc, envc;
8190 abi_ulong gp;
8191 abi_ulong guest_argp;
8192 abi_ulong guest_envp;
8193 abi_ulong addr;
8194 char **q;
8195 int total_size = 0;
8197 argc = 0;
8198 guest_argp = arg2;
8199 for (gp = guest_argp; gp; gp += sizeof(abi_ulong)) {
8200 if (get_user_ual(addr, gp))
8201 goto efault;
8202 if (!addr)
8203 break;
8204 argc++;
8206 envc = 0;
8207 guest_envp = arg3;
8208 for (gp = guest_envp; gp; gp += sizeof(abi_ulong)) {
8209 if (get_user_ual(addr, gp))
8210 goto efault;
8211 if (!addr)
8212 break;
8213 envc++;
8216 argp = g_new0(char *, argc + 1);
8217 envp = g_new0(char *, envc + 1);
8219 for (gp = guest_argp, q = argp; gp;
8220 gp += sizeof(abi_ulong), q++) {
8221 if (get_user_ual(addr, gp))
8222 goto execve_efault;
8223 if (!addr)
8224 break;
8225 if (!(*q = lock_user_string(addr)))
8226 goto execve_efault;
8227 total_size += strlen(*q) + 1;
8229 *q = NULL;
8231 for (gp = guest_envp, q = envp; gp;
8232 gp += sizeof(abi_ulong), q++) {
8233 if (get_user_ual(addr, gp))
8234 goto execve_efault;
8235 if (!addr)
8236 break;
8237 if (!(*q = lock_user_string(addr)))
8238 goto execve_efault;
8239 total_size += strlen(*q) + 1;
8241 *q = NULL;
8243 if (!(p = lock_user_string(arg1)))
8244 goto execve_efault;
8245 /* Although execve() is not an interruptible syscall it is
8246 * a special case where we must use the safe_syscall wrapper:
8247 * if we allow a signal to happen before we make the host
8248 * syscall then we will 'lose' it, because at the point of
8249 * execve the process leaves QEMU's control. So we use the
8250 * safe syscall wrapper to ensure that we either take the
8251 * signal as a guest signal, or else it does not happen
8252 * before the execve completes and makes it the other
8253 * program's problem.
8255 ret = get_errno(safe_execve(p, argp, envp));
8256 unlock_user(p, arg1, 0);
8258 goto execve_end;
8260 execve_efault:
8261 ret = -TARGET_EFAULT;
8263 execve_end:
8264 for (gp = guest_argp, q = argp; *q;
8265 gp += sizeof(abi_ulong), q++) {
8266 if (get_user_ual(addr, gp)
8267 || !addr)
8268 break;
8269 unlock_user(*q, addr, 0);
8271 for (gp = guest_envp, q = envp; *q;
8272 gp += sizeof(abi_ulong), q++) {
8273 if (get_user_ual(addr, gp)
8274 || !addr)
8275 break;
8276 unlock_user(*q, addr, 0);
8279 g_free(argp);
8280 g_free(envp);
8282 break;
8283 case TARGET_NR_chdir:
8284 if (!(p = lock_user_string(arg1)))
8285 goto efault;
8286 ret = get_errno(chdir(p));
8287 unlock_user(p, arg1, 0);
8288 break;
8289 #ifdef TARGET_NR_time
8290 case TARGET_NR_time:
8292 time_t host_time;
8293 ret = get_errno(time(&host_time));
8294 if (!is_error(ret)
8295 && arg1
8296 && put_user_sal(host_time, arg1))
8297 goto efault;
8299 break;
8300 #endif
8301 #ifdef TARGET_NR_mknod
8302 case TARGET_NR_mknod:
8303 if (!(p = lock_user_string(arg1)))
8304 goto efault;
8305 ret = get_errno(mknod(p, arg2, arg3));
8306 unlock_user(p, arg1, 0);
8307 break;
8308 #endif
8309 #if defined(TARGET_NR_mknodat)
8310 case TARGET_NR_mknodat:
8311 if (!(p = lock_user_string(arg2)))
8312 goto efault;
8313 ret = get_errno(mknodat(arg1, p, arg3, arg4));
8314 unlock_user(p, arg2, 0);
8315 break;
8316 #endif
8317 #ifdef TARGET_NR_chmod
8318 case TARGET_NR_chmod:
8319 if (!(p = lock_user_string(arg1)))
8320 goto efault;
8321 ret = get_errno(chmod(p, arg2));
8322 unlock_user(p, arg1, 0);
8323 break;
8324 #endif
8325 #ifdef TARGET_NR_break
8326 case TARGET_NR_break:
8327 goto unimplemented;
8328 #endif
8329 #ifdef TARGET_NR_oldstat
8330 case TARGET_NR_oldstat:
8331 goto unimplemented;
8332 #endif
8333 case TARGET_NR_lseek:
8334 ret = get_errno(lseek(arg1, arg2, arg3));
8335 break;
8336 #if defined(TARGET_NR_getxpid) && defined(TARGET_ALPHA)
8337 /* Alpha specific */
8338 case TARGET_NR_getxpid:
8339 ((CPUAlphaState *)cpu_env)->ir[IR_A4] = getppid();
8340 ret = get_errno(getpid());
8341 break;
8342 #endif
8343 #ifdef TARGET_NR_getpid
8344 case TARGET_NR_getpid:
8345 ret = get_errno(getpid());
8346 break;
8347 #endif
8348 case TARGET_NR_mount:
8350 /* need to look at the data field */
8351 void *p2, *p3;
8353 if (arg1) {
8354 p = lock_user_string(arg1);
8355 if (!p) {
8356 goto efault;
8358 } else {
8359 p = NULL;
8362 p2 = lock_user_string(arg2);
8363 if (!p2) {
8364 if (arg1) {
8365 unlock_user(p, arg1, 0);
8367 goto efault;
8370 if (arg3) {
8371 p3 = lock_user_string(arg3);
8372 if (!p3) {
8373 if (arg1) {
8374 unlock_user(p, arg1, 0);
8376 unlock_user(p2, arg2, 0);
8377 goto efault;
8379 } else {
8380 p3 = NULL;
8383 /* FIXME - arg5 should be locked, but it isn't clear how to
8384 * do that since it's not guaranteed to be a NULL-terminated
8385 * string.
8387 if (!arg5) {
8388 ret = mount(p, p2, p3, (unsigned long)arg4, NULL);
8389 } else {
8390 ret = mount(p, p2, p3, (unsigned long)arg4, g2h(arg5));
8392 ret = get_errno(ret);
8394 if (arg1) {
8395 unlock_user(p, arg1, 0);
8397 unlock_user(p2, arg2, 0);
8398 if (arg3) {
8399 unlock_user(p3, arg3, 0);
8402 break;
8403 #ifdef TARGET_NR_umount
8404 case TARGET_NR_umount:
8405 if (!(p = lock_user_string(arg1)))
8406 goto efault;
8407 ret = get_errno(umount(p));
8408 unlock_user(p, arg1, 0);
8409 break;
8410 #endif
8411 #ifdef TARGET_NR_stime /* not on alpha */
8412 case TARGET_NR_stime:
8414 time_t host_time;
8415 if (get_user_sal(host_time, arg1))
8416 goto efault;
8417 ret = get_errno(stime(&host_time));
8419 break;
8420 #endif
8421 case TARGET_NR_ptrace:
8422 goto unimplemented;
8423 #ifdef TARGET_NR_alarm /* not on alpha */
8424 case TARGET_NR_alarm:
8425 ret = alarm(arg1);
8426 break;
8427 #endif
8428 #ifdef TARGET_NR_oldfstat
8429 case TARGET_NR_oldfstat:
8430 goto unimplemented;
8431 #endif
8432 #ifdef TARGET_NR_pause /* not on alpha */
8433 case TARGET_NR_pause:
8434 if (!block_signals()) {
8435 sigsuspend(&((TaskState *)cpu->opaque)->signal_mask);
8437 ret = -TARGET_EINTR;
8438 break;
8439 #endif
8440 #ifdef TARGET_NR_utime
8441 case TARGET_NR_utime:
8443 struct utimbuf tbuf, *host_tbuf;
8444 struct target_utimbuf *target_tbuf;
8445 if (arg2) {
8446 if (!lock_user_struct(VERIFY_READ, target_tbuf, arg2, 1))
8447 goto efault;
8448 tbuf.actime = tswapal(target_tbuf->actime);
8449 tbuf.modtime = tswapal(target_tbuf->modtime);
8450 unlock_user_struct(target_tbuf, arg2, 0);
8451 host_tbuf = &tbuf;
8452 } else {
8453 host_tbuf = NULL;
8455 if (!(p = lock_user_string(arg1)))
8456 goto efault;
8457 ret = get_errno(utime(p, host_tbuf));
8458 unlock_user(p, arg1, 0);
8460 break;
8461 #endif
8462 #ifdef TARGET_NR_utimes
8463 case TARGET_NR_utimes:
8465 struct timeval *tvp, tv[2];
8466 if (arg2) {
8467 if (copy_from_user_timeval(&tv[0], arg2)
8468 || copy_from_user_timeval(&tv[1],
8469 arg2 + sizeof(struct target_timeval)))
8470 goto efault;
8471 tvp = tv;
8472 } else {
8473 tvp = NULL;
8475 if (!(p = lock_user_string(arg1)))
8476 goto efault;
8477 ret = get_errno(utimes(p, tvp));
8478 unlock_user(p, arg1, 0);
8480 break;
8481 #endif
8482 #if defined(TARGET_NR_futimesat)
8483 case TARGET_NR_futimesat:
8485 struct timeval *tvp, tv[2];
8486 if (arg3) {
8487 if (copy_from_user_timeval(&tv[0], arg3)
8488 || copy_from_user_timeval(&tv[1],
8489 arg3 + sizeof(struct target_timeval)))
8490 goto efault;
8491 tvp = tv;
8492 } else {
8493 tvp = NULL;
8495 if (!(p = lock_user_string(arg2)))
8496 goto efault;
8497 ret = get_errno(futimesat(arg1, path(p), tvp));
8498 unlock_user(p, arg2, 0);
8500 break;
8501 #endif
8502 #ifdef TARGET_NR_stty
8503 case TARGET_NR_stty:
8504 goto unimplemented;
8505 #endif
8506 #ifdef TARGET_NR_gtty
8507 case TARGET_NR_gtty:
8508 goto unimplemented;
8509 #endif
8510 #ifdef TARGET_NR_access
8511 case TARGET_NR_access:
8512 if (!(p = lock_user_string(arg1)))
8513 goto efault;
8514 ret = get_errno(access(path(p), arg2));
8515 unlock_user(p, arg1, 0);
8516 break;
8517 #endif
8518 #if defined(TARGET_NR_faccessat) && defined(__NR_faccessat)
8519 case TARGET_NR_faccessat:
8520 if (!(p = lock_user_string(arg2)))
8521 goto efault;
8522 ret = get_errno(faccessat(arg1, p, arg3, 0));
8523 unlock_user(p, arg2, 0);
8524 break;
8525 #endif
8526 #ifdef TARGET_NR_nice /* not on alpha */
8527 case TARGET_NR_nice:
8528 ret = get_errno(nice(arg1));
8529 break;
8530 #endif
8531 #ifdef TARGET_NR_ftime
8532 case TARGET_NR_ftime:
8533 goto unimplemented;
8534 #endif
8535 case TARGET_NR_sync:
8536 sync();
8537 ret = 0;
8538 break;
8539 #if defined(TARGET_NR_syncfs) && defined(CONFIG_SYNCFS)
8540 case TARGET_NR_syncfs:
8541 ret = get_errno(syncfs(arg1));
8542 break;
8543 #endif
8544 case TARGET_NR_kill:
8545 ret = get_errno(safe_kill(arg1, target_to_host_signal(arg2)));
8546 break;
8547 #ifdef TARGET_NR_rename
8548 case TARGET_NR_rename:
8550 void *p2;
8551 p = lock_user_string(arg1);
8552 p2 = lock_user_string(arg2);
8553 if (!p || !p2)
8554 ret = -TARGET_EFAULT;
8555 else
8556 ret = get_errno(rename(p, p2));
8557 unlock_user(p2, arg2, 0);
8558 unlock_user(p, arg1, 0);
8560 break;
8561 #endif
8562 #if defined(TARGET_NR_renameat)
8563 case TARGET_NR_renameat:
8565 void *p2;
8566 p = lock_user_string(arg2);
8567 p2 = lock_user_string(arg4);
8568 if (!p || !p2)
8569 ret = -TARGET_EFAULT;
8570 else
8571 ret = get_errno(renameat(arg1, p, arg3, p2));
8572 unlock_user(p2, arg4, 0);
8573 unlock_user(p, arg2, 0);
8575 break;
8576 #endif
8577 #if defined(TARGET_NR_renameat2)
8578 case TARGET_NR_renameat2:
8580 void *p2;
8581 p = lock_user_string(arg2);
8582 p2 = lock_user_string(arg4);
8583 if (!p || !p2) {
8584 ret = -TARGET_EFAULT;
8585 } else {
8586 ret = get_errno(sys_renameat2(arg1, p, arg3, p2, arg5));
8588 unlock_user(p2, arg4, 0);
8589 unlock_user(p, arg2, 0);
8591 break;
8592 #endif
8593 #ifdef TARGET_NR_mkdir
8594 case TARGET_NR_mkdir:
8595 if (!(p = lock_user_string(arg1)))
8596 goto efault;
8597 ret = get_errno(mkdir(p, arg2));
8598 unlock_user(p, arg1, 0);
8599 break;
8600 #endif
8601 #if defined(TARGET_NR_mkdirat)
8602 case TARGET_NR_mkdirat:
8603 if (!(p = lock_user_string(arg2)))
8604 goto efault;
8605 ret = get_errno(mkdirat(arg1, p, arg3));
8606 unlock_user(p, arg2, 0);
8607 break;
8608 #endif
8609 #ifdef TARGET_NR_rmdir
8610 case TARGET_NR_rmdir:
8611 if (!(p = lock_user_string(arg1)))
8612 goto efault;
8613 ret = get_errno(rmdir(p));
8614 unlock_user(p, arg1, 0);
8615 break;
8616 #endif
8617 case TARGET_NR_dup:
8618 ret = get_errno(dup(arg1));
8619 if (ret >= 0) {
8620 fd_trans_dup(arg1, ret);
8622 break;
8623 #ifdef TARGET_NR_pipe
8624 case TARGET_NR_pipe:
8625 ret = do_pipe(cpu_env, arg1, 0, 0);
8626 break;
8627 #endif
8628 #ifdef TARGET_NR_pipe2
8629 case TARGET_NR_pipe2:
8630 ret = do_pipe(cpu_env, arg1,
8631 target_to_host_bitmask(arg2, fcntl_flags_tbl), 1);
8632 break;
8633 #endif
8634 case TARGET_NR_times:
8636 struct target_tms *tmsp;
8637 struct tms tms;
8638 ret = get_errno(times(&tms));
8639 if (arg1) {
8640 tmsp = lock_user(VERIFY_WRITE, arg1, sizeof(struct target_tms), 0);
8641 if (!tmsp)
8642 goto efault;
8643 tmsp->tms_utime = tswapal(host_to_target_clock_t(tms.tms_utime));
8644 tmsp->tms_stime = tswapal(host_to_target_clock_t(tms.tms_stime));
8645 tmsp->tms_cutime = tswapal(host_to_target_clock_t(tms.tms_cutime));
8646 tmsp->tms_cstime = tswapal(host_to_target_clock_t(tms.tms_cstime));
8648 if (!is_error(ret))
8649 ret = host_to_target_clock_t(ret);
8651 break;
8652 #ifdef TARGET_NR_prof
8653 case TARGET_NR_prof:
8654 goto unimplemented;
8655 #endif
8656 #ifdef TARGET_NR_signal
8657 case TARGET_NR_signal:
8658 goto unimplemented;
8659 #endif
8660 case TARGET_NR_acct:
8661 if (arg1 == 0) {
8662 ret = get_errno(acct(NULL));
8663 } else {
8664 if (!(p = lock_user_string(arg1)))
8665 goto efault;
8666 ret = get_errno(acct(path(p)));
8667 unlock_user(p, arg1, 0);
8669 break;
8670 #ifdef TARGET_NR_umount2
8671 case TARGET_NR_umount2:
8672 if (!(p = lock_user_string(arg1)))
8673 goto efault;
8674 ret = get_errno(umount2(p, arg2));
8675 unlock_user(p, arg1, 0);
8676 break;
8677 #endif
8678 #ifdef TARGET_NR_lock
8679 case TARGET_NR_lock:
8680 goto unimplemented;
8681 #endif
8682 case TARGET_NR_ioctl:
8683 ret = do_ioctl(arg1, arg2, arg3);
8684 break;
8685 #ifdef TARGET_NR_fcntl
8686 case TARGET_NR_fcntl:
8687 ret = do_fcntl(arg1, arg2, arg3);
8688 break;
8689 #endif
8690 #ifdef TARGET_NR_mpx
8691 case TARGET_NR_mpx:
8692 goto unimplemented;
8693 #endif
8694 case TARGET_NR_setpgid:
8695 ret = get_errno(setpgid(arg1, arg2));
8696 break;
8697 #ifdef TARGET_NR_ulimit
8698 case TARGET_NR_ulimit:
8699 goto unimplemented;
8700 #endif
8701 #ifdef TARGET_NR_oldolduname
8702 case TARGET_NR_oldolduname:
8703 goto unimplemented;
8704 #endif
8705 case TARGET_NR_umask:
8706 ret = get_errno(umask(arg1));
8707 break;
8708 case TARGET_NR_chroot:
8709 if (!(p = lock_user_string(arg1)))
8710 goto efault;
8711 ret = get_errno(chroot(p));
8712 unlock_user(p, arg1, 0);
8713 break;
8714 #ifdef TARGET_NR_ustat
8715 case TARGET_NR_ustat:
8716 goto unimplemented;
8717 #endif
8718 #ifdef TARGET_NR_dup2
8719 case TARGET_NR_dup2:
8720 ret = get_errno(dup2(arg1, arg2));
8721 if (ret >= 0) {
8722 fd_trans_dup(arg1, arg2);
8724 break;
8725 #endif
8726 #if defined(CONFIG_DUP3) && defined(TARGET_NR_dup3)
8727 case TARGET_NR_dup3:
8729 int host_flags;
8731 if ((arg3 & ~TARGET_O_CLOEXEC) != 0) {
8732 return -EINVAL;
8734 host_flags = target_to_host_bitmask(arg3, fcntl_flags_tbl);
8735 ret = get_errno(dup3(arg1, arg2, host_flags));
8736 if (ret >= 0) {
8737 fd_trans_dup(arg1, arg2);
8739 break;
8741 #endif
8742 #ifdef TARGET_NR_getppid /* not on alpha */
8743 case TARGET_NR_getppid:
8744 ret = get_errno(getppid());
8745 break;
8746 #endif
8747 #ifdef TARGET_NR_getpgrp
8748 case TARGET_NR_getpgrp:
8749 ret = get_errno(getpgrp());
8750 break;
8751 #endif
8752 case TARGET_NR_setsid:
8753 ret = get_errno(setsid());
8754 break;
8755 #ifdef TARGET_NR_sigaction
8756 case TARGET_NR_sigaction:
8758 #if defined(TARGET_ALPHA)
8759 struct target_sigaction act, oact, *pact = 0;
8760 struct target_old_sigaction *old_act;
8761 if (arg2) {
8762 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
8763 goto efault;
8764 act._sa_handler = old_act->_sa_handler;
8765 target_siginitset(&act.sa_mask, old_act->sa_mask);
8766 act.sa_flags = old_act->sa_flags;
8767 act.sa_restorer = 0;
8768 unlock_user_struct(old_act, arg2, 0);
8769 pact = &act;
8771 ret = get_errno(do_sigaction(arg1, pact, &oact));
8772 if (!is_error(ret) && arg3) {
8773 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
8774 goto efault;
8775 old_act->_sa_handler = oact._sa_handler;
8776 old_act->sa_mask = oact.sa_mask.sig[0];
8777 old_act->sa_flags = oact.sa_flags;
8778 unlock_user_struct(old_act, arg3, 1);
8780 #elif defined(TARGET_MIPS)
8781 struct target_sigaction act, oact, *pact, *old_act;
8783 if (arg2) {
8784 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
8785 goto efault;
8786 act._sa_handler = old_act->_sa_handler;
8787 target_siginitset(&act.sa_mask, old_act->sa_mask.sig[0]);
8788 act.sa_flags = old_act->sa_flags;
8789 unlock_user_struct(old_act, arg2, 0);
8790 pact = &act;
8791 } else {
8792 pact = NULL;
8795 ret = get_errno(do_sigaction(arg1, pact, &oact));
8797 if (!is_error(ret) && arg3) {
8798 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
8799 goto efault;
8800 old_act->_sa_handler = oact._sa_handler;
8801 old_act->sa_flags = oact.sa_flags;
8802 old_act->sa_mask.sig[0] = oact.sa_mask.sig[0];
8803 old_act->sa_mask.sig[1] = 0;
8804 old_act->sa_mask.sig[2] = 0;
8805 old_act->sa_mask.sig[3] = 0;
8806 unlock_user_struct(old_act, arg3, 1);
8808 #else
8809 struct target_old_sigaction *old_act;
8810 struct target_sigaction act, oact, *pact;
8811 if (arg2) {
8812 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
8813 goto efault;
8814 act._sa_handler = old_act->_sa_handler;
8815 target_siginitset(&act.sa_mask, old_act->sa_mask);
8816 act.sa_flags = old_act->sa_flags;
8817 act.sa_restorer = old_act->sa_restorer;
8818 #ifdef TARGET_ARCH_HAS_KA_RESTORER
8819 act.ka_restorer = 0;
8820 #endif
8821 unlock_user_struct(old_act, arg2, 0);
8822 pact = &act;
8823 } else {
8824 pact = NULL;
8826 ret = get_errno(do_sigaction(arg1, pact, &oact));
8827 if (!is_error(ret) && arg3) {
8828 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
8829 goto efault;
8830 old_act->_sa_handler = oact._sa_handler;
8831 old_act->sa_mask = oact.sa_mask.sig[0];
8832 old_act->sa_flags = oact.sa_flags;
8833 old_act->sa_restorer = oact.sa_restorer;
8834 unlock_user_struct(old_act, arg3, 1);
8836 #endif
8838 break;
8839 #endif
8840 case TARGET_NR_rt_sigaction:
8842 #if defined(TARGET_ALPHA)
8843 /* For Alpha and SPARC this is a 5 argument syscall, with
8844 * a 'restorer' parameter which must be copied into the
8845 * sa_restorer field of the sigaction struct.
8846 * For Alpha that 'restorer' is arg5; for SPARC it is arg4,
8847 * and arg5 is the sigsetsize.
8848 * Alpha also has a separate rt_sigaction struct that it uses
8849 * here; SPARC uses the usual sigaction struct.
8851 struct target_rt_sigaction *rt_act;
8852 struct target_sigaction act, oact, *pact = 0;
8854 if (arg4 != sizeof(target_sigset_t)) {
8855 ret = -TARGET_EINVAL;
8856 break;
8858 if (arg2) {
8859 if (!lock_user_struct(VERIFY_READ, rt_act, arg2, 1))
8860 goto efault;
8861 act._sa_handler = rt_act->_sa_handler;
8862 act.sa_mask = rt_act->sa_mask;
8863 act.sa_flags = rt_act->sa_flags;
8864 act.sa_restorer = arg5;
8865 unlock_user_struct(rt_act, arg2, 0);
8866 pact = &act;
8868 ret = get_errno(do_sigaction(arg1, pact, &oact));
8869 if (!is_error(ret) && arg3) {
8870 if (!lock_user_struct(VERIFY_WRITE, rt_act, arg3, 0))
8871 goto efault;
8872 rt_act->_sa_handler = oact._sa_handler;
8873 rt_act->sa_mask = oact.sa_mask;
8874 rt_act->sa_flags = oact.sa_flags;
8875 unlock_user_struct(rt_act, arg3, 1);
8877 #else
8878 #ifdef TARGET_SPARC
8879 target_ulong restorer = arg4;
8880 target_ulong sigsetsize = arg5;
8881 #else
8882 target_ulong sigsetsize = arg4;
8883 #endif
8884 struct target_sigaction *act;
8885 struct target_sigaction *oact;
8887 if (sigsetsize != sizeof(target_sigset_t)) {
8888 ret = -TARGET_EINVAL;
8889 break;
8891 if (arg2) {
8892 if (!lock_user_struct(VERIFY_READ, act, arg2, 1)) {
8893 goto efault;
8895 #ifdef TARGET_ARCH_HAS_KA_RESTORER
8896 act->ka_restorer = restorer;
8897 #endif
8898 } else {
8899 act = NULL;
8901 if (arg3) {
8902 if (!lock_user_struct(VERIFY_WRITE, oact, arg3, 0)) {
8903 ret = -TARGET_EFAULT;
8904 goto rt_sigaction_fail;
8906 } else
8907 oact = NULL;
8908 ret = get_errno(do_sigaction(arg1, act, oact));
8909 rt_sigaction_fail:
8910 if (act)
8911 unlock_user_struct(act, arg2, 0);
8912 if (oact)
8913 unlock_user_struct(oact, arg3, 1);
8914 #endif
8916 break;
8917 #ifdef TARGET_NR_sgetmask /* not on alpha */
8918 case TARGET_NR_sgetmask:
8920 sigset_t cur_set;
8921 abi_ulong target_set;
8922 ret = do_sigprocmask(0, NULL, &cur_set);
8923 if (!ret) {
8924 host_to_target_old_sigset(&target_set, &cur_set);
8925 ret = target_set;
8928 break;
8929 #endif
8930 #ifdef TARGET_NR_ssetmask /* not on alpha */
8931 case TARGET_NR_ssetmask:
8933 sigset_t set, oset;
8934 abi_ulong target_set = arg1;
8935 target_to_host_old_sigset(&set, &target_set);
8936 ret = do_sigprocmask(SIG_SETMASK, &set, &oset);
8937 if (!ret) {
8938 host_to_target_old_sigset(&target_set, &oset);
8939 ret = target_set;
8942 break;
8943 #endif
8944 #ifdef TARGET_NR_sigprocmask
8945 case TARGET_NR_sigprocmask:
8947 #if defined(TARGET_ALPHA)
8948 sigset_t set, oldset;
8949 abi_ulong mask;
8950 int how;
8952 switch (arg1) {
8953 case TARGET_SIG_BLOCK:
8954 how = SIG_BLOCK;
8955 break;
8956 case TARGET_SIG_UNBLOCK:
8957 how = SIG_UNBLOCK;
8958 break;
8959 case TARGET_SIG_SETMASK:
8960 how = SIG_SETMASK;
8961 break;
8962 default:
8963 ret = -TARGET_EINVAL;
8964 goto fail;
8966 mask = arg2;
8967 target_to_host_old_sigset(&set, &mask);
8969 ret = do_sigprocmask(how, &set, &oldset);
8970 if (!is_error(ret)) {
8971 host_to_target_old_sigset(&mask, &oldset);
8972 ret = mask;
8973 ((CPUAlphaState *)cpu_env)->ir[IR_V0] = 0; /* force no error */
8975 #else
8976 sigset_t set, oldset, *set_ptr;
8977 int how;
8979 if (arg2) {
8980 switch (arg1) {
8981 case TARGET_SIG_BLOCK:
8982 how = SIG_BLOCK;
8983 break;
8984 case TARGET_SIG_UNBLOCK:
8985 how = SIG_UNBLOCK;
8986 break;
8987 case TARGET_SIG_SETMASK:
8988 how = SIG_SETMASK;
8989 break;
8990 default:
8991 ret = -TARGET_EINVAL;
8992 goto fail;
8994 if (!(p = lock_user(VERIFY_READ, arg2, sizeof(target_sigset_t), 1)))
8995 goto efault;
8996 target_to_host_old_sigset(&set, p);
8997 unlock_user(p, arg2, 0);
8998 set_ptr = &set;
8999 } else {
9000 how = 0;
9001 set_ptr = NULL;
9003 ret = do_sigprocmask(how, set_ptr, &oldset);
9004 if (!is_error(ret) && arg3) {
9005 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_sigset_t), 0)))
9006 goto efault;
9007 host_to_target_old_sigset(p, &oldset);
9008 unlock_user(p, arg3, sizeof(target_sigset_t));
9010 #endif
9012 break;
9013 #endif
9014 case TARGET_NR_rt_sigprocmask:
9016 int how = arg1;
9017 sigset_t set, oldset, *set_ptr;
9019 if (arg4 != sizeof(target_sigset_t)) {
9020 ret = -TARGET_EINVAL;
9021 break;
9024 if (arg2) {
9025 switch(how) {
9026 case TARGET_SIG_BLOCK:
9027 how = SIG_BLOCK;
9028 break;
9029 case TARGET_SIG_UNBLOCK:
9030 how = SIG_UNBLOCK;
9031 break;
9032 case TARGET_SIG_SETMASK:
9033 how = SIG_SETMASK;
9034 break;
9035 default:
9036 ret = -TARGET_EINVAL;
9037 goto fail;
9039 if (!(p = lock_user(VERIFY_READ, arg2, sizeof(target_sigset_t), 1)))
9040 goto efault;
9041 target_to_host_sigset(&set, p);
9042 unlock_user(p, arg2, 0);
9043 set_ptr = &set;
9044 } else {
9045 how = 0;
9046 set_ptr = NULL;
9048 ret = do_sigprocmask(how, set_ptr, &oldset);
9049 if (!is_error(ret) && arg3) {
9050 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_sigset_t), 0)))
9051 goto efault;
9052 host_to_target_sigset(p, &oldset);
9053 unlock_user(p, arg3, sizeof(target_sigset_t));
9056 break;
9057 #ifdef TARGET_NR_sigpending
9058 case TARGET_NR_sigpending:
9060 sigset_t set;
9061 ret = get_errno(sigpending(&set));
9062 if (!is_error(ret)) {
9063 if (!(p = lock_user(VERIFY_WRITE, arg1, sizeof(target_sigset_t), 0)))
9064 goto efault;
9065 host_to_target_old_sigset(p, &set);
9066 unlock_user(p, arg1, sizeof(target_sigset_t));
9069 break;
9070 #endif
9071 case TARGET_NR_rt_sigpending:
9073 sigset_t set;
9075 /* Yes, this check is >, not != like most. We follow the kernel's
9076 * logic and it does it like this because it implements
9077 * NR_sigpending through the same code path, and in that case
9078 * the old_sigset_t is smaller in size.
9080 if (arg2 > sizeof(target_sigset_t)) {
9081 ret = -TARGET_EINVAL;
9082 break;
9085 ret = get_errno(sigpending(&set));
9086 if (!is_error(ret)) {
9087 if (!(p = lock_user(VERIFY_WRITE, arg1, sizeof(target_sigset_t), 0)))
9088 goto efault;
9089 host_to_target_sigset(p, &set);
9090 unlock_user(p, arg1, sizeof(target_sigset_t));
9093 break;
9094 #ifdef TARGET_NR_sigsuspend
9095 case TARGET_NR_sigsuspend:
9097 TaskState *ts = cpu->opaque;
9098 #if defined(TARGET_ALPHA)
9099 abi_ulong mask = arg1;
9100 target_to_host_old_sigset(&ts->sigsuspend_mask, &mask);
9101 #else
9102 if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
9103 goto efault;
9104 target_to_host_old_sigset(&ts->sigsuspend_mask, p);
9105 unlock_user(p, arg1, 0);
9106 #endif
9107 ret = get_errno(safe_rt_sigsuspend(&ts->sigsuspend_mask,
9108 SIGSET_T_SIZE));
9109 if (ret != -TARGET_ERESTARTSYS) {
9110 ts->in_sigsuspend = 1;
9113 break;
9114 #endif
9115 case TARGET_NR_rt_sigsuspend:
9117 TaskState *ts = cpu->opaque;
9119 if (arg2 != sizeof(target_sigset_t)) {
9120 ret = -TARGET_EINVAL;
9121 break;
9123 if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
9124 goto efault;
9125 target_to_host_sigset(&ts->sigsuspend_mask, p);
9126 unlock_user(p, arg1, 0);
9127 ret = get_errno(safe_rt_sigsuspend(&ts->sigsuspend_mask,
9128 SIGSET_T_SIZE));
9129 if (ret != -TARGET_ERESTARTSYS) {
9130 ts->in_sigsuspend = 1;
9133 break;
9134 case TARGET_NR_rt_sigtimedwait:
9136 sigset_t set;
9137 struct timespec uts, *puts;
9138 siginfo_t uinfo;
9140 if (arg4 != sizeof(target_sigset_t)) {
9141 ret = -TARGET_EINVAL;
9142 break;
9145 if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
9146 goto efault;
9147 target_to_host_sigset(&set, p);
9148 unlock_user(p, arg1, 0);
9149 if (arg3) {
9150 puts = &uts;
9151 target_to_host_timespec(puts, arg3);
9152 } else {
9153 puts = NULL;
9155 ret = get_errno(safe_rt_sigtimedwait(&set, &uinfo, puts,
9156 SIGSET_T_SIZE));
9157 if (!is_error(ret)) {
9158 if (arg2) {
9159 p = lock_user(VERIFY_WRITE, arg2, sizeof(target_siginfo_t),
9161 if (!p) {
9162 goto efault;
9164 host_to_target_siginfo(p, &uinfo);
9165 unlock_user(p, arg2, sizeof(target_siginfo_t));
9167 ret = host_to_target_signal(ret);
9170 break;
9171 case TARGET_NR_rt_sigqueueinfo:
9173 siginfo_t uinfo;
9175 p = lock_user(VERIFY_READ, arg3, sizeof(target_siginfo_t), 1);
9176 if (!p) {
9177 goto efault;
9179 target_to_host_siginfo(&uinfo, p);
9180 unlock_user(p, arg3, 0);
9181 ret = get_errno(sys_rt_sigqueueinfo(arg1, arg2, &uinfo));
9183 break;
9184 case TARGET_NR_rt_tgsigqueueinfo:
9186 siginfo_t uinfo;
9188 p = lock_user(VERIFY_READ, arg4, sizeof(target_siginfo_t), 1);
9189 if (!p) {
9190 goto efault;
9192 target_to_host_siginfo(&uinfo, p);
9193 unlock_user(p, arg4, 0);
9194 ret = get_errno(sys_rt_tgsigqueueinfo(arg1, arg2, arg3, &uinfo));
9196 break;
9197 #ifdef TARGET_NR_sigreturn
9198 case TARGET_NR_sigreturn:
9199 if (block_signals()) {
9200 ret = -TARGET_ERESTARTSYS;
9201 } else {
9202 ret = do_sigreturn(cpu_env);
9204 break;
9205 #endif
9206 case TARGET_NR_rt_sigreturn:
9207 if (block_signals()) {
9208 ret = -TARGET_ERESTARTSYS;
9209 } else {
9210 ret = do_rt_sigreturn(cpu_env);
9212 break;
9213 case TARGET_NR_sethostname:
9214 if (!(p = lock_user_string(arg1)))
9215 goto efault;
9216 ret = get_errno(sethostname(p, arg2));
9217 unlock_user(p, arg1, 0);
9218 break;
9219 case TARGET_NR_setrlimit:
9221 int resource = target_to_host_resource(arg1);
9222 struct target_rlimit *target_rlim;
9223 struct rlimit rlim;
9224 if (!lock_user_struct(VERIFY_READ, target_rlim, arg2, 1))
9225 goto efault;
9226 rlim.rlim_cur = target_to_host_rlim(target_rlim->rlim_cur);
9227 rlim.rlim_max = target_to_host_rlim(target_rlim->rlim_max);
9228 unlock_user_struct(target_rlim, arg2, 0);
9229 ret = get_errno(setrlimit(resource, &rlim));
9231 break;
9232 case TARGET_NR_getrlimit:
9234 int resource = target_to_host_resource(arg1);
9235 struct target_rlimit *target_rlim;
9236 struct rlimit rlim;
9238 ret = get_errno(getrlimit(resource, &rlim));
9239 if (!is_error(ret)) {
9240 if (!lock_user_struct(VERIFY_WRITE, target_rlim, arg2, 0))
9241 goto efault;
9242 target_rlim->rlim_cur = host_to_target_rlim(rlim.rlim_cur);
9243 target_rlim->rlim_max = host_to_target_rlim(rlim.rlim_max);
9244 unlock_user_struct(target_rlim, arg2, 1);
9247 break;
9248 case TARGET_NR_getrusage:
9250 struct rusage rusage;
9251 ret = get_errno(getrusage(arg1, &rusage));
9252 if (!is_error(ret)) {
9253 ret = host_to_target_rusage(arg2, &rusage);
9256 break;
9257 case TARGET_NR_gettimeofday:
9259 struct timeval tv;
9260 ret = get_errno(gettimeofday(&tv, NULL));
9261 if (!is_error(ret)) {
9262 if (copy_to_user_timeval(arg1, &tv))
9263 goto efault;
9266 break;
9267 case TARGET_NR_settimeofday:
9269 struct timeval tv, *ptv = NULL;
9270 struct timezone tz, *ptz = NULL;
9272 if (arg1) {
9273 if (copy_from_user_timeval(&tv, arg1)) {
9274 goto efault;
9276 ptv = &tv;
9279 if (arg2) {
9280 if (copy_from_user_timezone(&tz, arg2)) {
9281 goto efault;
9283 ptz = &tz;
9286 ret = get_errno(settimeofday(ptv, ptz));
9288 break;
9289 #if defined(TARGET_NR_select)
9290 case TARGET_NR_select:
9291 #if defined(TARGET_WANT_NI_OLD_SELECT)
9292 /* some architectures used to have old_select here
9293 * but now ENOSYS it.
9295 ret = -TARGET_ENOSYS;
9296 #elif defined(TARGET_WANT_OLD_SYS_SELECT)
9297 ret = do_old_select(arg1);
9298 #else
9299 ret = do_select(arg1, arg2, arg3, arg4, arg5);
9300 #endif
9301 break;
9302 #endif
9303 #ifdef TARGET_NR_pselect6
9304 case TARGET_NR_pselect6:
9306 abi_long rfd_addr, wfd_addr, efd_addr, n, ts_addr;
9307 fd_set rfds, wfds, efds;
9308 fd_set *rfds_ptr, *wfds_ptr, *efds_ptr;
9309 struct timespec ts, *ts_ptr;
9312 * The 6th arg is actually two args smashed together,
9313 * so we cannot use the C library.
9315 sigset_t set;
9316 struct {
9317 sigset_t *set;
9318 size_t size;
9319 } sig, *sig_ptr;
9321 abi_ulong arg_sigset, arg_sigsize, *arg7;
9322 target_sigset_t *target_sigset;
9324 n = arg1;
9325 rfd_addr = arg2;
9326 wfd_addr = arg3;
9327 efd_addr = arg4;
9328 ts_addr = arg5;
9330 ret = copy_from_user_fdset_ptr(&rfds, &rfds_ptr, rfd_addr, n);
9331 if (ret) {
9332 goto fail;
9334 ret = copy_from_user_fdset_ptr(&wfds, &wfds_ptr, wfd_addr, n);
9335 if (ret) {
9336 goto fail;
9338 ret = copy_from_user_fdset_ptr(&efds, &efds_ptr, efd_addr, n);
9339 if (ret) {
9340 goto fail;
9344 * This takes a timespec, and not a timeval, so we cannot
9345 * use the do_select() helper ...
9347 if (ts_addr) {
9348 if (target_to_host_timespec(&ts, ts_addr)) {
9349 goto efault;
9351 ts_ptr = &ts;
9352 } else {
9353 ts_ptr = NULL;
9356 /* Extract the two packed args for the sigset */
9357 if (arg6) {
9358 sig_ptr = &sig;
9359 sig.size = SIGSET_T_SIZE;
9361 arg7 = lock_user(VERIFY_READ, arg6, sizeof(*arg7) * 2, 1);
9362 if (!arg7) {
9363 goto efault;
9365 arg_sigset = tswapal(arg7[0]);
9366 arg_sigsize = tswapal(arg7[1]);
9367 unlock_user(arg7, arg6, 0);
9369 if (arg_sigset) {
9370 sig.set = &set;
9371 if (arg_sigsize != sizeof(*target_sigset)) {
9372 /* Like the kernel, we enforce correct size sigsets */
9373 ret = -TARGET_EINVAL;
9374 goto fail;
9376 target_sigset = lock_user(VERIFY_READ, arg_sigset,
9377 sizeof(*target_sigset), 1);
9378 if (!target_sigset) {
9379 goto efault;
9381 target_to_host_sigset(&set, target_sigset);
9382 unlock_user(target_sigset, arg_sigset, 0);
9383 } else {
9384 sig.set = NULL;
9386 } else {
9387 sig_ptr = NULL;
9390 ret = get_errno(safe_pselect6(n, rfds_ptr, wfds_ptr, efds_ptr,
9391 ts_ptr, sig_ptr));
9393 if (!is_error(ret)) {
9394 if (rfd_addr && copy_to_user_fdset(rfd_addr, &rfds, n))
9395 goto efault;
9396 if (wfd_addr && copy_to_user_fdset(wfd_addr, &wfds, n))
9397 goto efault;
9398 if (efd_addr && copy_to_user_fdset(efd_addr, &efds, n))
9399 goto efault;
9401 if (ts_addr && host_to_target_timespec(ts_addr, &ts))
9402 goto efault;
9405 break;
9406 #endif
9407 #ifdef TARGET_NR_symlink
9408 case TARGET_NR_symlink:
9410 void *p2;
9411 p = lock_user_string(arg1);
9412 p2 = lock_user_string(arg2);
9413 if (!p || !p2)
9414 ret = -TARGET_EFAULT;
9415 else
9416 ret = get_errno(symlink(p, p2));
9417 unlock_user(p2, arg2, 0);
9418 unlock_user(p, arg1, 0);
9420 break;
9421 #endif
9422 #if defined(TARGET_NR_symlinkat)
9423 case TARGET_NR_symlinkat:
9425 void *p2;
9426 p = lock_user_string(arg1);
9427 p2 = lock_user_string(arg3);
9428 if (!p || !p2)
9429 ret = -TARGET_EFAULT;
9430 else
9431 ret = get_errno(symlinkat(p, arg2, p2));
9432 unlock_user(p2, arg3, 0);
9433 unlock_user(p, arg1, 0);
9435 break;
9436 #endif
9437 #ifdef TARGET_NR_oldlstat
9438 case TARGET_NR_oldlstat:
9439 goto unimplemented;
9440 #endif
9441 #ifdef TARGET_NR_readlink
9442 case TARGET_NR_readlink:
9444 void *p2;
9445 p = lock_user_string(arg1);
9446 p2 = lock_user(VERIFY_WRITE, arg2, arg3, 0);
9447 if (!p || !p2) {
9448 ret = -TARGET_EFAULT;
9449 } else if (!arg3) {
9450 /* Short circuit this for the magic exe check. */
9451 ret = -TARGET_EINVAL;
9452 } else if (is_proc_myself((const char *)p, "exe")) {
9453 char real[PATH_MAX], *temp;
9454 temp = realpath(exec_path, real);
9455 /* Return value is # of bytes that we wrote to the buffer. */
9456 if (temp == NULL) {
9457 ret = get_errno(-1);
9458 } else {
9459 /* Don't worry about sign mismatch as earlier mapping
9460 * logic would have thrown a bad address error. */
9461 ret = MIN(strlen(real), arg3);
9462 /* We cannot NUL terminate the string. */
9463 memcpy(p2, real, ret);
9465 } else {
9466 ret = get_errno(readlink(path(p), p2, arg3));
9468 unlock_user(p2, arg2, ret);
9469 unlock_user(p, arg1, 0);
9471 break;
9472 #endif
9473 #if defined(TARGET_NR_readlinkat)
9474 case TARGET_NR_readlinkat:
9476 void *p2;
9477 p = lock_user_string(arg2);
9478 p2 = lock_user(VERIFY_WRITE, arg3, arg4, 0);
9479 if (!p || !p2) {
9480 ret = -TARGET_EFAULT;
9481 } else if (is_proc_myself((const char *)p, "exe")) {
9482 char real[PATH_MAX], *temp;
9483 temp = realpath(exec_path, real);
9484 ret = temp == NULL ? get_errno(-1) : strlen(real) ;
9485 snprintf((char *)p2, arg4, "%s", real);
9486 } else {
9487 ret = get_errno(readlinkat(arg1, path(p), p2, arg4));
9489 unlock_user(p2, arg3, ret);
9490 unlock_user(p, arg2, 0);
9492 break;
9493 #endif
9494 #ifdef TARGET_NR_uselib
9495 case TARGET_NR_uselib:
9496 goto unimplemented;
9497 #endif
9498 #ifdef TARGET_NR_swapon
9499 case TARGET_NR_swapon:
9500 if (!(p = lock_user_string(arg1)))
9501 goto efault;
9502 ret = get_errno(swapon(p, arg2));
9503 unlock_user(p, arg1, 0);
9504 break;
9505 #endif
9506 case TARGET_NR_reboot:
9507 if (arg3 == LINUX_REBOOT_CMD_RESTART2) {
9508 /* arg4 must be ignored in all other cases */
9509 p = lock_user_string(arg4);
9510 if (!p) {
9511 goto efault;
9513 ret = get_errno(reboot(arg1, arg2, arg3, p));
9514 unlock_user(p, arg4, 0);
9515 } else {
9516 ret = get_errno(reboot(arg1, arg2, arg3, NULL));
9518 break;
9519 #ifdef TARGET_NR_readdir
9520 case TARGET_NR_readdir:
9521 goto unimplemented;
9522 #endif
9523 #ifdef TARGET_NR_mmap
9524 case TARGET_NR_mmap:
9525 #if (defined(TARGET_I386) && defined(TARGET_ABI32)) || \
9526 (defined(TARGET_ARM) && defined(TARGET_ABI32)) || \
9527 defined(TARGET_M68K) || defined(TARGET_CRIS) || defined(TARGET_MICROBLAZE) \
9528 || defined(TARGET_S390X)
9530 abi_ulong *v;
9531 abi_ulong v1, v2, v3, v4, v5, v6;
9532 if (!(v = lock_user(VERIFY_READ, arg1, 6 * sizeof(abi_ulong), 1)))
9533 goto efault;
9534 v1 = tswapal(v[0]);
9535 v2 = tswapal(v[1]);
9536 v3 = tswapal(v[2]);
9537 v4 = tswapal(v[3]);
9538 v5 = tswapal(v[4]);
9539 v6 = tswapal(v[5]);
9540 unlock_user(v, arg1, 0);
9541 ret = get_errno(target_mmap(v1, v2, v3,
9542 target_to_host_bitmask(v4, mmap_flags_tbl),
9543 v5, v6));
9545 #else
9546 ret = get_errno(target_mmap(arg1, arg2, arg3,
9547 target_to_host_bitmask(arg4, mmap_flags_tbl),
9548 arg5,
9549 arg6));
9550 #endif
9551 break;
9552 #endif
9553 #ifdef TARGET_NR_mmap2
9554 case TARGET_NR_mmap2:
9555 #ifndef MMAP_SHIFT
9556 #define MMAP_SHIFT 12
9557 #endif
9558 ret = get_errno(target_mmap(arg1, arg2, arg3,
9559 target_to_host_bitmask(arg4, mmap_flags_tbl),
9560 arg5,
9561 arg6 << MMAP_SHIFT));
9562 break;
9563 #endif
9564 case TARGET_NR_munmap:
9565 ret = get_errno(target_munmap(arg1, arg2));
9566 break;
9567 case TARGET_NR_mprotect:
9569 TaskState *ts = cpu->opaque;
9570 /* Special hack to detect libc making the stack executable. */
9571 if ((arg3 & PROT_GROWSDOWN)
9572 && arg1 >= ts->info->stack_limit
9573 && arg1 <= ts->info->start_stack) {
9574 arg3 &= ~PROT_GROWSDOWN;
9575 arg2 = arg2 + arg1 - ts->info->stack_limit;
9576 arg1 = ts->info->stack_limit;
9579 ret = get_errno(target_mprotect(arg1, arg2, arg3));
9580 break;
9581 #ifdef TARGET_NR_mremap
9582 case TARGET_NR_mremap:
9583 ret = get_errno(target_mremap(arg1, arg2, arg3, arg4, arg5));
9584 break;
9585 #endif
9586 /* ??? msync/mlock/munlock are broken for softmmu. */
9587 #ifdef TARGET_NR_msync
9588 case TARGET_NR_msync:
9589 ret = get_errno(msync(g2h(arg1), arg2, arg3));
9590 break;
9591 #endif
9592 #ifdef TARGET_NR_mlock
9593 case TARGET_NR_mlock:
9594 ret = get_errno(mlock(g2h(arg1), arg2));
9595 break;
9596 #endif
9597 #ifdef TARGET_NR_munlock
9598 case TARGET_NR_munlock:
9599 ret = get_errno(munlock(g2h(arg1), arg2));
9600 break;
9601 #endif
9602 #ifdef TARGET_NR_mlockall
9603 case TARGET_NR_mlockall:
9604 ret = get_errno(mlockall(target_to_host_mlockall_arg(arg1)));
9605 break;
9606 #endif
9607 #ifdef TARGET_NR_munlockall
9608 case TARGET_NR_munlockall:
9609 ret = get_errno(munlockall());
9610 break;
9611 #endif
9612 case TARGET_NR_truncate:
9613 if (!(p = lock_user_string(arg1)))
9614 goto efault;
9615 ret = get_errno(truncate(p, arg2));
9616 unlock_user(p, arg1, 0);
9617 break;
9618 case TARGET_NR_ftruncate:
9619 ret = get_errno(ftruncate(arg1, arg2));
9620 break;
9621 case TARGET_NR_fchmod:
9622 ret = get_errno(fchmod(arg1, arg2));
9623 break;
9624 #if defined(TARGET_NR_fchmodat)
9625 case TARGET_NR_fchmodat:
9626 if (!(p = lock_user_string(arg2)))
9627 goto efault;
9628 ret = get_errno(fchmodat(arg1, p, arg3, 0));
9629 unlock_user(p, arg2, 0);
9630 break;
9631 #endif
9632 case TARGET_NR_getpriority:
9633 /* Note that negative values are valid for getpriority, so we must
9634 differentiate based on errno settings. */
9635 errno = 0;
9636 ret = getpriority(arg1, arg2);
9637 if (ret == -1 && errno != 0) {
9638 ret = -host_to_target_errno(errno);
9639 break;
9641 #ifdef TARGET_ALPHA
9642 /* Return value is the unbiased priority. Signal no error. */
9643 ((CPUAlphaState *)cpu_env)->ir[IR_V0] = 0;
9644 #else
9645 /* Return value is a biased priority to avoid negative numbers. */
9646 ret = 20 - ret;
9647 #endif
9648 break;
9649 case TARGET_NR_setpriority:
9650 ret = get_errno(setpriority(arg1, arg2, arg3));
9651 break;
9652 #ifdef TARGET_NR_profil
9653 case TARGET_NR_profil:
9654 goto unimplemented;
9655 #endif
9656 case TARGET_NR_statfs:
9657 if (!(p = lock_user_string(arg1)))
9658 goto efault;
9659 ret = get_errno(statfs(path(p), &stfs));
9660 unlock_user(p, arg1, 0);
9661 convert_statfs:
9662 if (!is_error(ret)) {
9663 struct target_statfs *target_stfs;
9665 if (!lock_user_struct(VERIFY_WRITE, target_stfs, arg2, 0))
9666 goto efault;
9667 __put_user(stfs.f_type, &target_stfs->f_type);
9668 __put_user(stfs.f_bsize, &target_stfs->f_bsize);
9669 __put_user(stfs.f_blocks, &target_stfs->f_blocks);
9670 __put_user(stfs.f_bfree, &target_stfs->f_bfree);
9671 __put_user(stfs.f_bavail, &target_stfs->f_bavail);
9672 __put_user(stfs.f_files, &target_stfs->f_files);
9673 __put_user(stfs.f_ffree, &target_stfs->f_ffree);
9674 __put_user(stfs.f_fsid.__val[0], &target_stfs->f_fsid.val[0]);
9675 __put_user(stfs.f_fsid.__val[1], &target_stfs->f_fsid.val[1]);
9676 __put_user(stfs.f_namelen, &target_stfs->f_namelen);
9677 __put_user(stfs.f_frsize, &target_stfs->f_frsize);
9678 #ifdef _STATFS_F_FLAGS
9679 __put_user(stfs.f_flags, &target_stfs->f_flags);
9680 #else
9681 __put_user(0, &target_stfs->f_flags);
9682 #endif
9683 memset(target_stfs->f_spare, 0, sizeof(target_stfs->f_spare));
9684 unlock_user_struct(target_stfs, arg2, 1);
9686 break;
9687 case TARGET_NR_fstatfs:
9688 ret = get_errno(fstatfs(arg1, &stfs));
9689 goto convert_statfs;
9690 #ifdef TARGET_NR_statfs64
9691 case TARGET_NR_statfs64:
9692 if (!(p = lock_user_string(arg1)))
9693 goto efault;
9694 ret = get_errno(statfs(path(p), &stfs));
9695 unlock_user(p, arg1, 0);
9696 convert_statfs64:
9697 if (!is_error(ret)) {
9698 struct target_statfs64 *target_stfs;
9700 if (!lock_user_struct(VERIFY_WRITE, target_stfs, arg3, 0))
9701 goto efault;
9702 __put_user(stfs.f_type, &target_stfs->f_type);
9703 __put_user(stfs.f_bsize, &target_stfs->f_bsize);
9704 __put_user(stfs.f_blocks, &target_stfs->f_blocks);
9705 __put_user(stfs.f_bfree, &target_stfs->f_bfree);
9706 __put_user(stfs.f_bavail, &target_stfs->f_bavail);
9707 __put_user(stfs.f_files, &target_stfs->f_files);
9708 __put_user(stfs.f_ffree, &target_stfs->f_ffree);
9709 __put_user(stfs.f_fsid.__val[0], &target_stfs->f_fsid.val[0]);
9710 __put_user(stfs.f_fsid.__val[1], &target_stfs->f_fsid.val[1]);
9711 __put_user(stfs.f_namelen, &target_stfs->f_namelen);
9712 __put_user(stfs.f_frsize, &target_stfs->f_frsize);
9713 memset(target_stfs->f_spare, 0, sizeof(target_stfs->f_spare));
9714 unlock_user_struct(target_stfs, arg3, 1);
9716 break;
9717 case TARGET_NR_fstatfs64:
9718 ret = get_errno(fstatfs(arg1, &stfs));
9719 goto convert_statfs64;
9720 #endif
9721 #ifdef TARGET_NR_ioperm
9722 case TARGET_NR_ioperm:
9723 goto unimplemented;
9724 #endif
9725 #ifdef TARGET_NR_socketcall
9726 case TARGET_NR_socketcall:
9727 ret = do_socketcall(arg1, arg2);
9728 break;
9729 #endif
9730 #ifdef TARGET_NR_accept
9731 case TARGET_NR_accept:
9732 ret = do_accept4(arg1, arg2, arg3, 0);
9733 break;
9734 #endif
9735 #ifdef TARGET_NR_accept4
9736 case TARGET_NR_accept4:
9737 ret = do_accept4(arg1, arg2, arg3, arg4);
9738 break;
9739 #endif
9740 #ifdef TARGET_NR_bind
9741 case TARGET_NR_bind:
9742 ret = do_bind(arg1, arg2, arg3);
9743 break;
9744 #endif
9745 #ifdef TARGET_NR_connect
9746 case TARGET_NR_connect:
9747 ret = do_connect(arg1, arg2, arg3);
9748 break;
9749 #endif
9750 #ifdef TARGET_NR_getpeername
9751 case TARGET_NR_getpeername:
9752 ret = do_getpeername(arg1, arg2, arg3);
9753 break;
9754 #endif
9755 #ifdef TARGET_NR_getsockname
9756 case TARGET_NR_getsockname:
9757 ret = do_getsockname(arg1, arg2, arg3);
9758 break;
9759 #endif
9760 #ifdef TARGET_NR_getsockopt
9761 case TARGET_NR_getsockopt:
9762 ret = do_getsockopt(arg1, arg2, arg3, arg4, arg5);
9763 break;
9764 #endif
9765 #ifdef TARGET_NR_listen
9766 case TARGET_NR_listen:
9767 ret = get_errno(listen(arg1, arg2));
9768 break;
9769 #endif
9770 #ifdef TARGET_NR_recv
9771 case TARGET_NR_recv:
9772 ret = do_recvfrom(arg1, arg2, arg3, arg4, 0, 0);
9773 break;
9774 #endif
9775 #ifdef TARGET_NR_recvfrom
9776 case TARGET_NR_recvfrom:
9777 ret = do_recvfrom(arg1, arg2, arg3, arg4, arg5, arg6);
9778 break;
9779 #endif
9780 #ifdef TARGET_NR_recvmsg
9781 case TARGET_NR_recvmsg:
9782 ret = do_sendrecvmsg(arg1, arg2, arg3, 0);
9783 break;
9784 #endif
9785 #ifdef TARGET_NR_send
9786 case TARGET_NR_send:
9787 ret = do_sendto(arg1, arg2, arg3, arg4, 0, 0);
9788 break;
9789 #endif
9790 #ifdef TARGET_NR_sendmsg
9791 case TARGET_NR_sendmsg:
9792 ret = do_sendrecvmsg(arg1, arg2, arg3, 1);
9793 break;
9794 #endif
9795 #ifdef TARGET_NR_sendmmsg
9796 case TARGET_NR_sendmmsg:
9797 ret = do_sendrecvmmsg(arg1, arg2, arg3, arg4, 1);
9798 break;
9799 case TARGET_NR_recvmmsg:
9800 ret = do_sendrecvmmsg(arg1, arg2, arg3, arg4, 0);
9801 break;
9802 #endif
9803 #ifdef TARGET_NR_sendto
9804 case TARGET_NR_sendto:
9805 ret = do_sendto(arg1, arg2, arg3, arg4, arg5, arg6);
9806 break;
9807 #endif
9808 #ifdef TARGET_NR_shutdown
9809 case TARGET_NR_shutdown:
9810 ret = get_errno(shutdown(arg1, arg2));
9811 break;
9812 #endif
9813 #if defined(TARGET_NR_getrandom) && defined(__NR_getrandom)
9814 case TARGET_NR_getrandom:
9815 p = lock_user(VERIFY_WRITE, arg1, arg2, 0);
9816 if (!p) {
9817 goto efault;
9819 ret = get_errno(getrandom(p, arg2, arg3));
9820 unlock_user(p, arg1, ret);
9821 break;
9822 #endif
9823 #ifdef TARGET_NR_socket
9824 case TARGET_NR_socket:
9825 ret = do_socket(arg1, arg2, arg3);
9826 break;
9827 #endif
9828 #ifdef TARGET_NR_socketpair
9829 case TARGET_NR_socketpair:
9830 ret = do_socketpair(arg1, arg2, arg3, arg4);
9831 break;
9832 #endif
9833 #ifdef TARGET_NR_setsockopt
9834 case TARGET_NR_setsockopt:
9835 ret = do_setsockopt(arg1, arg2, arg3, arg4, (socklen_t) arg5);
9836 break;
9837 #endif
9838 #if defined(TARGET_NR_syslog)
9839 case TARGET_NR_syslog:
9841 int len = arg2;
9843 switch (arg1) {
9844 case TARGET_SYSLOG_ACTION_CLOSE: /* Close log */
9845 case TARGET_SYSLOG_ACTION_OPEN: /* Open log */
9846 case TARGET_SYSLOG_ACTION_CLEAR: /* Clear ring buffer */
9847 case TARGET_SYSLOG_ACTION_CONSOLE_OFF: /* Disable logging */
9848 case TARGET_SYSLOG_ACTION_CONSOLE_ON: /* Enable logging */
9849 case TARGET_SYSLOG_ACTION_CONSOLE_LEVEL: /* Set messages level */
9850 case TARGET_SYSLOG_ACTION_SIZE_UNREAD: /* Number of chars */
9851 case TARGET_SYSLOG_ACTION_SIZE_BUFFER: /* Size of the buffer */
9853 ret = get_errno(sys_syslog((int)arg1, NULL, (int)arg3));
9855 break;
9856 case TARGET_SYSLOG_ACTION_READ: /* Read from log */
9857 case TARGET_SYSLOG_ACTION_READ_CLEAR: /* Read/clear msgs */
9858 case TARGET_SYSLOG_ACTION_READ_ALL: /* Read last messages */
9860 ret = -TARGET_EINVAL;
9861 if (len < 0) {
9862 goto fail;
9864 ret = 0;
9865 if (len == 0) {
9866 break;
9868 p = lock_user(VERIFY_WRITE, arg2, arg3, 0);
9869 if (!p) {
9870 ret = -TARGET_EFAULT;
9871 goto fail;
9873 ret = get_errno(sys_syslog((int)arg1, p, (int)arg3));
9874 unlock_user(p, arg2, arg3);
9876 break;
9877 default:
9878 ret = -EINVAL;
9879 break;
9882 break;
9883 #endif
9884 case TARGET_NR_setitimer:
9886 struct itimerval value, ovalue, *pvalue;
9888 if (arg2) {
9889 pvalue = &value;
9890 if (copy_from_user_timeval(&pvalue->it_interval, arg2)
9891 || copy_from_user_timeval(&pvalue->it_value,
9892 arg2 + sizeof(struct target_timeval)))
9893 goto efault;
9894 } else {
9895 pvalue = NULL;
9897 ret = get_errno(setitimer(arg1, pvalue, &ovalue));
9898 if (!is_error(ret) && arg3) {
9899 if (copy_to_user_timeval(arg3,
9900 &ovalue.it_interval)
9901 || copy_to_user_timeval(arg3 + sizeof(struct target_timeval),
9902 &ovalue.it_value))
9903 goto efault;
9906 break;
9907 case TARGET_NR_getitimer:
9909 struct itimerval value;
9911 ret = get_errno(getitimer(arg1, &value));
9912 if (!is_error(ret) && arg2) {
9913 if (copy_to_user_timeval(arg2,
9914 &value.it_interval)
9915 || copy_to_user_timeval(arg2 + sizeof(struct target_timeval),
9916 &value.it_value))
9917 goto efault;
9920 break;
9921 #ifdef TARGET_NR_stat
9922 case TARGET_NR_stat:
9923 if (!(p = lock_user_string(arg1)))
9924 goto efault;
9925 ret = get_errno(stat(path(p), &st));
9926 unlock_user(p, arg1, 0);
9927 goto do_stat;
9928 #endif
9929 #ifdef TARGET_NR_lstat
9930 case TARGET_NR_lstat:
9931 if (!(p = lock_user_string(arg1)))
9932 goto efault;
9933 ret = get_errno(lstat(path(p), &st));
9934 unlock_user(p, arg1, 0);
9935 goto do_stat;
9936 #endif
9937 case TARGET_NR_fstat:
9939 ret = get_errno(fstat(arg1, &st));
9940 #if defined(TARGET_NR_stat) || defined(TARGET_NR_lstat)
9941 do_stat:
9942 #endif
9943 if (!is_error(ret)) {
9944 struct target_stat *target_st;
9946 if (!lock_user_struct(VERIFY_WRITE, target_st, arg2, 0))
9947 goto efault;
9948 memset(target_st, 0, sizeof(*target_st));
9949 __put_user(st.st_dev, &target_st->st_dev);
9950 __put_user(st.st_ino, &target_st->st_ino);
9951 __put_user(st.st_mode, &target_st->st_mode);
9952 __put_user(st.st_uid, &target_st->st_uid);
9953 __put_user(st.st_gid, &target_st->st_gid);
9954 __put_user(st.st_nlink, &target_st->st_nlink);
9955 __put_user(st.st_rdev, &target_st->st_rdev);
9956 __put_user(st.st_size, &target_st->st_size);
9957 __put_user(st.st_blksize, &target_st->st_blksize);
9958 __put_user(st.st_blocks, &target_st->st_blocks);
9959 __put_user(st.st_atime, &target_st->target_st_atime);
9960 __put_user(st.st_mtime, &target_st->target_st_mtime);
9961 __put_user(st.st_ctime, &target_st->target_st_ctime);
9962 unlock_user_struct(target_st, arg2, 1);
9965 break;
9966 #ifdef TARGET_NR_olduname
9967 case TARGET_NR_olduname:
9968 goto unimplemented;
9969 #endif
9970 #ifdef TARGET_NR_iopl
9971 case TARGET_NR_iopl:
9972 goto unimplemented;
9973 #endif
9974 case TARGET_NR_vhangup:
9975 ret = get_errno(vhangup());
9976 break;
9977 #ifdef TARGET_NR_idle
9978 case TARGET_NR_idle:
9979 goto unimplemented;
9980 #endif
9981 #ifdef TARGET_NR_syscall
9982 case TARGET_NR_syscall:
9983 ret = do_syscall(cpu_env, arg1 & 0xffff, arg2, arg3, arg4, arg5,
9984 arg6, arg7, arg8, 0);
9985 break;
9986 #endif
9987 case TARGET_NR_wait4:
9989 int status;
9990 abi_long status_ptr = arg2;
9991 struct rusage rusage, *rusage_ptr;
9992 abi_ulong target_rusage = arg4;
9993 abi_long rusage_err;
9994 if (target_rusage)
9995 rusage_ptr = &rusage;
9996 else
9997 rusage_ptr = NULL;
9998 ret = get_errno(safe_wait4(arg1, &status, arg3, rusage_ptr));
9999 if (!is_error(ret)) {
10000 if (status_ptr && ret) {
10001 status = host_to_target_waitstatus(status);
10002 if (put_user_s32(status, status_ptr))
10003 goto efault;
10005 if (target_rusage) {
10006 rusage_err = host_to_target_rusage(target_rusage, &rusage);
10007 if (rusage_err) {
10008 ret = rusage_err;
10013 break;
10014 #ifdef TARGET_NR_swapoff
10015 case TARGET_NR_swapoff:
10016 if (!(p = lock_user_string(arg1)))
10017 goto efault;
10018 ret = get_errno(swapoff(p));
10019 unlock_user(p, arg1, 0);
10020 break;
10021 #endif
10022 case TARGET_NR_sysinfo:
10024 struct target_sysinfo *target_value;
10025 struct sysinfo value;
10026 ret = get_errno(sysinfo(&value));
10027 if (!is_error(ret) && arg1)
10029 if (!lock_user_struct(VERIFY_WRITE, target_value, arg1, 0))
10030 goto efault;
10031 __put_user(value.uptime, &target_value->uptime);
10032 __put_user(value.loads[0], &target_value->loads[0]);
10033 __put_user(value.loads[1], &target_value->loads[1]);
10034 __put_user(value.loads[2], &target_value->loads[2]);
10035 __put_user(value.totalram, &target_value->totalram);
10036 __put_user(value.freeram, &target_value->freeram);
10037 __put_user(value.sharedram, &target_value->sharedram);
10038 __put_user(value.bufferram, &target_value->bufferram);
10039 __put_user(value.totalswap, &target_value->totalswap);
10040 __put_user(value.freeswap, &target_value->freeswap);
10041 __put_user(value.procs, &target_value->procs);
10042 __put_user(value.totalhigh, &target_value->totalhigh);
10043 __put_user(value.freehigh, &target_value->freehigh);
10044 __put_user(value.mem_unit, &target_value->mem_unit);
10045 unlock_user_struct(target_value, arg1, 1);
10048 break;
10049 #ifdef TARGET_NR_ipc
10050 case TARGET_NR_ipc:
10051 ret = do_ipc(cpu_env, arg1, arg2, arg3, arg4, arg5, arg6);
10052 break;
10053 #endif
10054 #ifdef TARGET_NR_semget
10055 case TARGET_NR_semget:
10056 ret = get_errno(semget(arg1, arg2, arg3));
10057 break;
10058 #endif
10059 #ifdef TARGET_NR_semop
10060 case TARGET_NR_semop:
10061 ret = do_semop(arg1, arg2, arg3);
10062 break;
10063 #endif
10064 #ifdef TARGET_NR_semctl
10065 case TARGET_NR_semctl:
10066 ret = do_semctl(arg1, arg2, arg3, arg4);
10067 break;
10068 #endif
10069 #ifdef TARGET_NR_msgctl
10070 case TARGET_NR_msgctl:
10071 ret = do_msgctl(arg1, arg2, arg3);
10072 break;
10073 #endif
10074 #ifdef TARGET_NR_msgget
10075 case TARGET_NR_msgget:
10076 ret = get_errno(msgget(arg1, arg2));
10077 break;
10078 #endif
10079 #ifdef TARGET_NR_msgrcv
10080 case TARGET_NR_msgrcv:
10081 ret = do_msgrcv(arg1, arg2, arg3, arg4, arg5);
10082 break;
10083 #endif
10084 #ifdef TARGET_NR_msgsnd
10085 case TARGET_NR_msgsnd:
10086 ret = do_msgsnd(arg1, arg2, arg3, arg4);
10087 break;
10088 #endif
10089 #ifdef TARGET_NR_shmget
10090 case TARGET_NR_shmget:
10091 ret = get_errno(shmget(arg1, arg2, arg3));
10092 break;
10093 #endif
10094 #ifdef TARGET_NR_shmctl
10095 case TARGET_NR_shmctl:
10096 ret = do_shmctl(arg1, arg2, arg3);
10097 break;
10098 #endif
10099 #ifdef TARGET_NR_shmat
10100 case TARGET_NR_shmat:
10101 ret = do_shmat(cpu_env, arg1, arg2, arg3);
10102 break;
10103 #endif
10104 #ifdef TARGET_NR_shmdt
10105 case TARGET_NR_shmdt:
10106 ret = do_shmdt(arg1);
10107 break;
10108 #endif
10109 case TARGET_NR_fsync:
10110 ret = get_errno(fsync(arg1));
10111 break;
10112 case TARGET_NR_clone:
10113 /* Linux manages to have three different orderings for its
10114 * arguments to clone(); the BACKWARDS and BACKWARDS2 defines
10115 * match the kernel's CONFIG_CLONE_* settings.
10116 * Microblaze is further special in that it uses a sixth
10117 * implicit argument to clone for the TLS pointer.
10119 #if defined(TARGET_MICROBLAZE)
10120 ret = get_errno(do_fork(cpu_env, arg1, arg2, arg4, arg6, arg5));
10121 #elif defined(TARGET_CLONE_BACKWARDS)
10122 ret = get_errno(do_fork(cpu_env, arg1, arg2, arg3, arg4, arg5));
10123 #elif defined(TARGET_CLONE_BACKWARDS2)
10124 ret = get_errno(do_fork(cpu_env, arg2, arg1, arg3, arg5, arg4));
10125 #else
10126 ret = get_errno(do_fork(cpu_env, arg1, arg2, arg3, arg5, arg4));
10127 #endif
10128 break;
10129 #ifdef __NR_exit_group
10130 /* new thread calls */
10131 case TARGET_NR_exit_group:
10132 #ifdef TARGET_GPROF
10133 _mcleanup();
10134 #endif
10135 gdb_exit(cpu_env, arg1);
10136 ret = get_errno(exit_group(arg1));
10137 break;
10138 #endif
10139 case TARGET_NR_setdomainname:
10140 if (!(p = lock_user_string(arg1)))
10141 goto efault;
10142 ret = get_errno(setdomainname(p, arg2));
10143 unlock_user(p, arg1, 0);
10144 break;
10145 case TARGET_NR_uname:
10146 /* no need to transcode because we use the linux syscall */
10148 struct new_utsname * buf;
10150 if (!lock_user_struct(VERIFY_WRITE, buf, arg1, 0))
10151 goto efault;
10152 ret = get_errno(sys_uname(buf));
10153 if (!is_error(ret)) {
10154 /* Overwrite the native machine name with whatever is being
10155 emulated. */
10156 g_strlcpy(buf->machine, cpu_to_uname_machine(cpu_env),
10157 sizeof(buf->machine));
10158 /* Allow the user to override the reported release. */
10159 if (qemu_uname_release && *qemu_uname_release) {
10160 g_strlcpy(buf->release, qemu_uname_release,
10161 sizeof(buf->release));
10164 unlock_user_struct(buf, arg1, 1);
10166 break;
10167 #ifdef TARGET_I386
10168 case TARGET_NR_modify_ldt:
10169 ret = do_modify_ldt(cpu_env, arg1, arg2, arg3);
10170 break;
10171 #if !defined(TARGET_X86_64)
10172 case TARGET_NR_vm86old:
10173 goto unimplemented;
10174 case TARGET_NR_vm86:
10175 ret = do_vm86(cpu_env, arg1, arg2);
10176 break;
10177 #endif
10178 #endif
10179 case TARGET_NR_adjtimex:
10181 struct timex host_buf;
10183 if (target_to_host_timex(&host_buf, arg1) != 0) {
10184 goto efault;
10186 ret = get_errno(adjtimex(&host_buf));
10187 if (!is_error(ret)) {
10188 if (host_to_target_timex(arg1, &host_buf) != 0) {
10189 goto efault;
10193 break;
10194 #if defined(TARGET_NR_clock_adjtime) && defined(CONFIG_CLOCK_ADJTIME)
10195 case TARGET_NR_clock_adjtime:
10197 struct timex htx, *phtx = &htx;
10199 if (target_to_host_timex(phtx, arg2) != 0) {
10200 goto efault;
10202 ret = get_errno(clock_adjtime(arg1, phtx));
10203 if (!is_error(ret) && phtx) {
10204 if (host_to_target_timex(arg2, phtx) != 0) {
10205 goto efault;
10209 break;
10210 #endif
10211 #ifdef TARGET_NR_create_module
10212 case TARGET_NR_create_module:
10213 #endif
10214 case TARGET_NR_init_module:
10215 case TARGET_NR_delete_module:
10216 #ifdef TARGET_NR_get_kernel_syms
10217 case TARGET_NR_get_kernel_syms:
10218 #endif
10219 goto unimplemented;
10220 case TARGET_NR_quotactl:
10221 goto unimplemented;
10222 case TARGET_NR_getpgid:
10223 ret = get_errno(getpgid(arg1));
10224 break;
10225 case TARGET_NR_fchdir:
10226 ret = get_errno(fchdir(arg1));
10227 break;
10228 #ifdef TARGET_NR_bdflush /* not on x86_64 */
10229 case TARGET_NR_bdflush:
10230 goto unimplemented;
10231 #endif
10232 #ifdef TARGET_NR_sysfs
10233 case TARGET_NR_sysfs:
10234 goto unimplemented;
10235 #endif
10236 case TARGET_NR_personality:
10237 ret = get_errno(personality(arg1));
10238 break;
10239 #ifdef TARGET_NR_afs_syscall
10240 case TARGET_NR_afs_syscall:
10241 goto unimplemented;
10242 #endif
10243 #ifdef TARGET_NR__llseek /* Not on alpha */
10244 case TARGET_NR__llseek:
10246 int64_t res;
10247 #if !defined(__NR_llseek)
10248 res = lseek(arg1, ((uint64_t)arg2 << 32) | (abi_ulong)arg3, arg5);
10249 if (res == -1) {
10250 ret = get_errno(res);
10251 } else {
10252 ret = 0;
10254 #else
10255 ret = get_errno(_llseek(arg1, arg2, arg3, &res, arg5));
10256 #endif
10257 if ((ret == 0) && put_user_s64(res, arg4)) {
10258 goto efault;
10261 break;
10262 #endif
10263 #ifdef TARGET_NR_getdents
10264 case TARGET_NR_getdents:
10265 #ifdef EMULATE_GETDENTS_WITH_GETDENTS
10266 #if TARGET_ABI_BITS == 32 && HOST_LONG_BITS == 64
10268 struct target_dirent *target_dirp;
10269 struct linux_dirent *dirp;
10270 abi_long count = arg3;
10272 dirp = g_try_malloc(count);
10273 if (!dirp) {
10274 ret = -TARGET_ENOMEM;
10275 goto fail;
10278 ret = get_errno(sys_getdents(arg1, dirp, count));
10279 if (!is_error(ret)) {
10280 struct linux_dirent *de;
10281 struct target_dirent *tde;
10282 int len = ret;
10283 int reclen, treclen;
10284 int count1, tnamelen;
10286 count1 = 0;
10287 de = dirp;
10288 if (!(target_dirp = lock_user(VERIFY_WRITE, arg2, count, 0)))
10289 goto efault;
10290 tde = target_dirp;
10291 while (len > 0) {
10292 reclen = de->d_reclen;
10293 tnamelen = reclen - offsetof(struct linux_dirent, d_name);
10294 assert(tnamelen >= 0);
10295 treclen = tnamelen + offsetof(struct target_dirent, d_name);
10296 assert(count1 + treclen <= count);
10297 tde->d_reclen = tswap16(treclen);
10298 tde->d_ino = tswapal(de->d_ino);
10299 tde->d_off = tswapal(de->d_off);
10300 memcpy(tde->d_name, de->d_name, tnamelen);
10301 de = (struct linux_dirent *)((char *)de + reclen);
10302 len -= reclen;
10303 tde = (struct target_dirent *)((char *)tde + treclen);
10304 count1 += treclen;
10306 ret = count1;
10307 unlock_user(target_dirp, arg2, ret);
10309 g_free(dirp);
10311 #else
10313 struct linux_dirent *dirp;
10314 abi_long count = arg3;
10316 if (!(dirp = lock_user(VERIFY_WRITE, arg2, count, 0)))
10317 goto efault;
10318 ret = get_errno(sys_getdents(arg1, dirp, count));
10319 if (!is_error(ret)) {
10320 struct linux_dirent *de;
10321 int len = ret;
10322 int reclen;
10323 de = dirp;
10324 while (len > 0) {
10325 reclen = de->d_reclen;
10326 if (reclen > len)
10327 break;
10328 de->d_reclen = tswap16(reclen);
10329 tswapls(&de->d_ino);
10330 tswapls(&de->d_off);
10331 de = (struct linux_dirent *)((char *)de + reclen);
10332 len -= reclen;
10335 unlock_user(dirp, arg2, ret);
10337 #endif
10338 #else
10339 /* Implement getdents in terms of getdents64 */
10341 struct linux_dirent64 *dirp;
10342 abi_long count = arg3;
10344 dirp = lock_user(VERIFY_WRITE, arg2, count, 0);
10345 if (!dirp) {
10346 goto efault;
10348 ret = get_errno(sys_getdents64(arg1, dirp, count));
10349 if (!is_error(ret)) {
10350 /* Convert the dirent64 structs to target dirent. We do this
10351 * in-place, since we can guarantee that a target_dirent is no
10352 * larger than a dirent64; however this means we have to be
10353 * careful to read everything before writing in the new format.
10355 struct linux_dirent64 *de;
10356 struct target_dirent *tde;
10357 int len = ret;
10358 int tlen = 0;
10360 de = dirp;
10361 tde = (struct target_dirent *)dirp;
10362 while (len > 0) {
10363 int namelen, treclen;
10364 int reclen = de->d_reclen;
10365 uint64_t ino = de->d_ino;
10366 int64_t off = de->d_off;
10367 uint8_t type = de->d_type;
10369 namelen = strlen(de->d_name);
10370 treclen = offsetof(struct target_dirent, d_name)
10371 + namelen + 2;
10372 treclen = QEMU_ALIGN_UP(treclen, sizeof(abi_long));
10374 memmove(tde->d_name, de->d_name, namelen + 1);
10375 tde->d_ino = tswapal(ino);
10376 tde->d_off = tswapal(off);
10377 tde->d_reclen = tswap16(treclen);
10378 /* The target_dirent type is in what was formerly a padding
10379 * byte at the end of the structure:
10381 *(((char *)tde) + treclen - 1) = type;
10383 de = (struct linux_dirent64 *)((char *)de + reclen);
10384 tde = (struct target_dirent *)((char *)tde + treclen);
10385 len -= reclen;
10386 tlen += treclen;
10388 ret = tlen;
10390 unlock_user(dirp, arg2, ret);
10392 #endif
10393 break;
10394 #endif /* TARGET_NR_getdents */
10395 #if defined(TARGET_NR_getdents64) && defined(__NR_getdents64)
10396 case TARGET_NR_getdents64:
10398 struct linux_dirent64 *dirp;
10399 abi_long count = arg3;
10400 if (!(dirp = lock_user(VERIFY_WRITE, arg2, count, 0)))
10401 goto efault;
10402 ret = get_errno(sys_getdents64(arg1, dirp, count));
10403 if (!is_error(ret)) {
10404 struct linux_dirent64 *de;
10405 int len = ret;
10406 int reclen;
10407 de = dirp;
10408 while (len > 0) {
10409 reclen = de->d_reclen;
10410 if (reclen > len)
10411 break;
10412 de->d_reclen = tswap16(reclen);
10413 tswap64s((uint64_t *)&de->d_ino);
10414 tswap64s((uint64_t *)&de->d_off);
10415 de = (struct linux_dirent64 *)((char *)de + reclen);
10416 len -= reclen;
10419 unlock_user(dirp, arg2, ret);
10421 break;
10422 #endif /* TARGET_NR_getdents64 */
10423 #if defined(TARGET_NR__newselect)
10424 case TARGET_NR__newselect:
10425 ret = do_select(arg1, arg2, arg3, arg4, arg5);
10426 break;
10427 #endif
10428 #if defined(TARGET_NR_poll) || defined(TARGET_NR_ppoll)
10429 # ifdef TARGET_NR_poll
10430 case TARGET_NR_poll:
10431 # endif
10432 # ifdef TARGET_NR_ppoll
10433 case TARGET_NR_ppoll:
10434 # endif
10436 struct target_pollfd *target_pfd;
10437 unsigned int nfds = arg2;
10438 struct pollfd *pfd;
10439 unsigned int i;
10441 pfd = NULL;
10442 target_pfd = NULL;
10443 if (nfds) {
10444 if (nfds > (INT_MAX / sizeof(struct target_pollfd))) {
10445 ret = -TARGET_EINVAL;
10446 break;
10449 target_pfd = lock_user(VERIFY_WRITE, arg1,
10450 sizeof(struct target_pollfd) * nfds, 1);
10451 if (!target_pfd) {
10452 goto efault;
10455 pfd = alloca(sizeof(struct pollfd) * nfds);
10456 for (i = 0; i < nfds; i++) {
10457 pfd[i].fd = tswap32(target_pfd[i].fd);
10458 pfd[i].events = tswap16(target_pfd[i].events);
10462 switch (num) {
10463 # ifdef TARGET_NR_ppoll
10464 case TARGET_NR_ppoll:
10466 struct timespec _timeout_ts, *timeout_ts = &_timeout_ts;
10467 target_sigset_t *target_set;
10468 sigset_t _set, *set = &_set;
10470 if (arg3) {
10471 if (target_to_host_timespec(timeout_ts, arg3)) {
10472 unlock_user(target_pfd, arg1, 0);
10473 goto efault;
10475 } else {
10476 timeout_ts = NULL;
10479 if (arg4) {
10480 if (arg5 != sizeof(target_sigset_t)) {
10481 unlock_user(target_pfd, arg1, 0);
10482 ret = -TARGET_EINVAL;
10483 break;
10486 target_set = lock_user(VERIFY_READ, arg4, sizeof(target_sigset_t), 1);
10487 if (!target_set) {
10488 unlock_user(target_pfd, arg1, 0);
10489 goto efault;
10491 target_to_host_sigset(set, target_set);
10492 } else {
10493 set = NULL;
10496 ret = get_errno(safe_ppoll(pfd, nfds, timeout_ts,
10497 set, SIGSET_T_SIZE));
10499 if (!is_error(ret) && arg3) {
10500 host_to_target_timespec(arg3, timeout_ts);
10502 if (arg4) {
10503 unlock_user(target_set, arg4, 0);
10505 break;
10507 # endif
10508 # ifdef TARGET_NR_poll
10509 case TARGET_NR_poll:
10511 struct timespec ts, *pts;
10513 if (arg3 >= 0) {
10514 /* Convert ms to secs, ns */
10515 ts.tv_sec = arg3 / 1000;
10516 ts.tv_nsec = (arg3 % 1000) * 1000000LL;
10517 pts = &ts;
10518 } else {
10519 /* -ve poll() timeout means "infinite" */
10520 pts = NULL;
10522 ret = get_errno(safe_ppoll(pfd, nfds, pts, NULL, 0));
10523 break;
10525 # endif
10526 default:
10527 g_assert_not_reached();
10530 if (!is_error(ret)) {
10531 for(i = 0; i < nfds; i++) {
10532 target_pfd[i].revents = tswap16(pfd[i].revents);
10535 unlock_user(target_pfd, arg1, sizeof(struct target_pollfd) * nfds);
10537 break;
10538 #endif
10539 case TARGET_NR_flock:
10540 /* NOTE: the flock constant seems to be the same for every
10541 Linux platform */
10542 ret = get_errno(safe_flock(arg1, arg2));
10543 break;
10544 case TARGET_NR_readv:
10546 struct iovec *vec = lock_iovec(VERIFY_WRITE, arg2, arg3, 0);
10547 if (vec != NULL) {
10548 ret = get_errno(safe_readv(arg1, vec, arg3));
10549 unlock_iovec(vec, arg2, arg3, 1);
10550 } else {
10551 ret = -host_to_target_errno(errno);
10554 break;
10555 case TARGET_NR_writev:
10557 struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1);
10558 if (vec != NULL) {
10559 ret = get_errno(safe_writev(arg1, vec, arg3));
10560 unlock_iovec(vec, arg2, arg3, 0);
10561 } else {
10562 ret = -host_to_target_errno(errno);
10565 break;
10566 #if defined(TARGET_NR_preadv)
10567 case TARGET_NR_preadv:
10569 struct iovec *vec = lock_iovec(VERIFY_WRITE, arg2, arg3, 0);
10570 if (vec != NULL) {
10571 unsigned long low, high;
10573 target_to_host_low_high(arg4, arg5, &low, &high);
10574 ret = get_errno(safe_preadv(arg1, vec, arg3, low, high));
10575 unlock_iovec(vec, arg2, arg3, 1);
10576 } else {
10577 ret = -host_to_target_errno(errno);
10580 break;
10581 #endif
10582 #if defined(TARGET_NR_pwritev)
10583 case TARGET_NR_pwritev:
10585 struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1);
10586 if (vec != NULL) {
10587 unsigned long low, high;
10589 target_to_host_low_high(arg4, arg5, &low, &high);
10590 ret = get_errno(safe_pwritev(arg1, vec, arg3, low, high));
10591 unlock_iovec(vec, arg2, arg3, 0);
10592 } else {
10593 ret = -host_to_target_errno(errno);
10596 break;
10597 #endif
10598 case TARGET_NR_getsid:
10599 ret = get_errno(getsid(arg1));
10600 break;
10601 #if defined(TARGET_NR_fdatasync) /* Not on alpha (osf_datasync ?) */
10602 case TARGET_NR_fdatasync:
10603 ret = get_errno(fdatasync(arg1));
10604 break;
10605 #endif
10606 #ifdef TARGET_NR__sysctl
10607 case TARGET_NR__sysctl:
10608 /* We don't implement this, but ENOTDIR is always a safe
10609 return value. */
10610 ret = -TARGET_ENOTDIR;
10611 break;
10612 #endif
10613 case TARGET_NR_sched_getaffinity:
10615 unsigned int mask_size;
10616 unsigned long *mask;
10619 * sched_getaffinity needs multiples of ulong, so need to take
10620 * care of mismatches between target ulong and host ulong sizes.
10622 if (arg2 & (sizeof(abi_ulong) - 1)) {
10623 ret = -TARGET_EINVAL;
10624 break;
10626 mask_size = (arg2 + (sizeof(*mask) - 1)) & ~(sizeof(*mask) - 1);
10628 mask = alloca(mask_size);
10629 memset(mask, 0, mask_size);
10630 ret = get_errno(sys_sched_getaffinity(arg1, mask_size, mask));
10632 if (!is_error(ret)) {
10633 if (ret > arg2) {
10634 /* More data returned than the caller's buffer will fit.
10635 * This only happens if sizeof(abi_long) < sizeof(long)
10636 * and the caller passed us a buffer holding an odd number
10637 * of abi_longs. If the host kernel is actually using the
10638 * extra 4 bytes then fail EINVAL; otherwise we can just
10639 * ignore them and only copy the interesting part.
10641 int numcpus = sysconf(_SC_NPROCESSORS_CONF);
10642 if (numcpus > arg2 * 8) {
10643 ret = -TARGET_EINVAL;
10644 break;
10646 ret = arg2;
10649 if (host_to_target_cpu_mask(mask, mask_size, arg3, ret)) {
10650 goto efault;
10654 break;
10655 case TARGET_NR_sched_setaffinity:
10657 unsigned int mask_size;
10658 unsigned long *mask;
10661 * sched_setaffinity needs multiples of ulong, so need to take
10662 * care of mismatches between target ulong and host ulong sizes.
10664 if (arg2 & (sizeof(abi_ulong) - 1)) {
10665 ret = -TARGET_EINVAL;
10666 break;
10668 mask_size = (arg2 + (sizeof(*mask) - 1)) & ~(sizeof(*mask) - 1);
10669 mask = alloca(mask_size);
10671 ret = target_to_host_cpu_mask(mask, mask_size, arg3, arg2);
10672 if (ret) {
10673 break;
10676 ret = get_errno(sys_sched_setaffinity(arg1, mask_size, mask));
10678 break;
10679 case TARGET_NR_getcpu:
10681 unsigned cpu, node;
10682 ret = get_errno(sys_getcpu(arg1 ? &cpu : NULL,
10683 arg2 ? &node : NULL,
10684 NULL));
10685 if (is_error(ret)) {
10686 goto fail;
10688 if (arg1 && put_user_u32(cpu, arg1)) {
10689 goto efault;
10691 if (arg2 && put_user_u32(node, arg2)) {
10692 goto efault;
10695 break;
10696 case TARGET_NR_sched_setparam:
10698 struct sched_param *target_schp;
10699 struct sched_param schp;
10701 if (arg2 == 0) {
10702 return -TARGET_EINVAL;
10704 if (!lock_user_struct(VERIFY_READ, target_schp, arg2, 1))
10705 goto efault;
10706 schp.sched_priority = tswap32(target_schp->sched_priority);
10707 unlock_user_struct(target_schp, arg2, 0);
10708 ret = get_errno(sched_setparam(arg1, &schp));
10710 break;
10711 case TARGET_NR_sched_getparam:
10713 struct sched_param *target_schp;
10714 struct sched_param schp;
10716 if (arg2 == 0) {
10717 return -TARGET_EINVAL;
10719 ret = get_errno(sched_getparam(arg1, &schp));
10720 if (!is_error(ret)) {
10721 if (!lock_user_struct(VERIFY_WRITE, target_schp, arg2, 0))
10722 goto efault;
10723 target_schp->sched_priority = tswap32(schp.sched_priority);
10724 unlock_user_struct(target_schp, arg2, 1);
10727 break;
10728 case TARGET_NR_sched_setscheduler:
10730 struct sched_param *target_schp;
10731 struct sched_param schp;
10732 if (arg3 == 0) {
10733 return -TARGET_EINVAL;
10735 if (!lock_user_struct(VERIFY_READ, target_schp, arg3, 1))
10736 goto efault;
10737 schp.sched_priority = tswap32(target_schp->sched_priority);
10738 unlock_user_struct(target_schp, arg3, 0);
10739 ret = get_errno(sched_setscheduler(arg1, arg2, &schp));
10741 break;
10742 case TARGET_NR_sched_getscheduler:
10743 ret = get_errno(sched_getscheduler(arg1));
10744 break;
10745 case TARGET_NR_sched_yield:
10746 ret = get_errno(sched_yield());
10747 break;
10748 case TARGET_NR_sched_get_priority_max:
10749 ret = get_errno(sched_get_priority_max(arg1));
10750 break;
10751 case TARGET_NR_sched_get_priority_min:
10752 ret = get_errno(sched_get_priority_min(arg1));
10753 break;
10754 case TARGET_NR_sched_rr_get_interval:
10756 struct timespec ts;
10757 ret = get_errno(sched_rr_get_interval(arg1, &ts));
10758 if (!is_error(ret)) {
10759 ret = host_to_target_timespec(arg2, &ts);
10762 break;
10763 case TARGET_NR_nanosleep:
10765 struct timespec req, rem;
10766 target_to_host_timespec(&req, arg1);
10767 ret = get_errno(safe_nanosleep(&req, &rem));
10768 if (is_error(ret) && arg2) {
10769 host_to_target_timespec(arg2, &rem);
10772 break;
10773 #ifdef TARGET_NR_query_module
10774 case TARGET_NR_query_module:
10775 goto unimplemented;
10776 #endif
10777 #ifdef TARGET_NR_nfsservctl
10778 case TARGET_NR_nfsservctl:
10779 goto unimplemented;
10780 #endif
10781 case TARGET_NR_prctl:
10782 switch (arg1) {
10783 case PR_GET_PDEATHSIG:
10785 int deathsig;
10786 ret = get_errno(prctl(arg1, &deathsig, arg3, arg4, arg5));
10787 if (!is_error(ret) && arg2
10788 && put_user_ual(deathsig, arg2)) {
10789 goto efault;
10791 break;
10793 #ifdef PR_GET_NAME
10794 case PR_GET_NAME:
10796 void *name = lock_user(VERIFY_WRITE, arg2, 16, 1);
10797 if (!name) {
10798 goto efault;
10800 ret = get_errno(prctl(arg1, (unsigned long)name,
10801 arg3, arg4, arg5));
10802 unlock_user(name, arg2, 16);
10803 break;
10805 case PR_SET_NAME:
10807 void *name = lock_user(VERIFY_READ, arg2, 16, 1);
10808 if (!name) {
10809 goto efault;
10811 ret = get_errno(prctl(arg1, (unsigned long)name,
10812 arg3, arg4, arg5));
10813 unlock_user(name, arg2, 0);
10814 break;
10816 #endif
10817 #ifdef TARGET_AARCH64
10818 case TARGET_PR_SVE_SET_VL:
10819 /* We cannot support either PR_SVE_SET_VL_ONEXEC
10820 or PR_SVE_VL_INHERIT. Therefore, anything above
10821 ARM_MAX_VQ results in EINVAL. */
10822 ret = -TARGET_EINVAL;
10823 if (arm_feature(cpu_env, ARM_FEATURE_SVE)
10824 && arg2 >= 0 && arg2 <= ARM_MAX_VQ * 16 && !(arg2 & 15)) {
10825 CPUARMState *env = cpu_env;
10826 int old_vq = (env->vfp.zcr_el[1] & 0xf) + 1;
10827 int vq = MAX(arg2 / 16, 1);
10829 if (vq < old_vq) {
10830 aarch64_sve_narrow_vq(env, vq);
10832 env->vfp.zcr_el[1] = vq - 1;
10833 ret = vq * 16;
10835 break;
10836 case TARGET_PR_SVE_GET_VL:
10837 ret = -TARGET_EINVAL;
10838 if (arm_feature(cpu_env, ARM_FEATURE_SVE)) {
10839 CPUARMState *env = cpu_env;
10840 ret = ((env->vfp.zcr_el[1] & 0xf) + 1) * 16;
10842 break;
10843 #endif /* AARCH64 */
10844 case PR_GET_SECCOMP:
10845 case PR_SET_SECCOMP:
10846 /* Disable seccomp to prevent the target disabling syscalls we
10847 * need. */
10848 ret = -TARGET_EINVAL;
10849 break;
10850 default:
10851 /* Most prctl options have no pointer arguments */
10852 ret = get_errno(prctl(arg1, arg2, arg3, arg4, arg5));
10853 break;
10855 break;
10856 #ifdef TARGET_NR_arch_prctl
10857 case TARGET_NR_arch_prctl:
10858 #if defined(TARGET_I386) && !defined(TARGET_ABI32)
10859 ret = do_arch_prctl(cpu_env, arg1, arg2);
10860 break;
10861 #else
10862 goto unimplemented;
10863 #endif
10864 #endif
10865 #ifdef TARGET_NR_pread64
10866 case TARGET_NR_pread64:
10867 if (regpairs_aligned(cpu_env, num)) {
10868 arg4 = arg5;
10869 arg5 = arg6;
10871 if (!(p = lock_user(VERIFY_WRITE, arg2, arg3, 0)))
10872 goto efault;
10873 ret = get_errno(pread64(arg1, p, arg3, target_offset64(arg4, arg5)));
10874 unlock_user(p, arg2, ret);
10875 break;
10876 case TARGET_NR_pwrite64:
10877 if (regpairs_aligned(cpu_env, num)) {
10878 arg4 = arg5;
10879 arg5 = arg6;
10881 if (!(p = lock_user(VERIFY_READ, arg2, arg3, 1)))
10882 goto efault;
10883 ret = get_errno(pwrite64(arg1, p, arg3, target_offset64(arg4, arg5)));
10884 unlock_user(p, arg2, 0);
10885 break;
10886 #endif
10887 case TARGET_NR_getcwd:
10888 if (!(p = lock_user(VERIFY_WRITE, arg1, arg2, 0)))
10889 goto efault;
10890 ret = get_errno(sys_getcwd1(p, arg2));
10891 unlock_user(p, arg1, ret);
10892 break;
10893 case TARGET_NR_capget:
10894 case TARGET_NR_capset:
10896 struct target_user_cap_header *target_header;
10897 struct target_user_cap_data *target_data = NULL;
10898 struct __user_cap_header_struct header;
10899 struct __user_cap_data_struct data[2];
10900 struct __user_cap_data_struct *dataptr = NULL;
10901 int i, target_datalen;
10902 int data_items = 1;
10904 if (!lock_user_struct(VERIFY_WRITE, target_header, arg1, 1)) {
10905 goto efault;
10907 header.version = tswap32(target_header->version);
10908 header.pid = tswap32(target_header->pid);
10910 if (header.version != _LINUX_CAPABILITY_VERSION) {
10911 /* Version 2 and up takes pointer to two user_data structs */
10912 data_items = 2;
10915 target_datalen = sizeof(*target_data) * data_items;
10917 if (arg2) {
10918 if (num == TARGET_NR_capget) {
10919 target_data = lock_user(VERIFY_WRITE, arg2, target_datalen, 0);
10920 } else {
10921 target_data = lock_user(VERIFY_READ, arg2, target_datalen, 1);
10923 if (!target_data) {
10924 unlock_user_struct(target_header, arg1, 0);
10925 goto efault;
10928 if (num == TARGET_NR_capset) {
10929 for (i = 0; i < data_items; i++) {
10930 data[i].effective = tswap32(target_data[i].effective);
10931 data[i].permitted = tswap32(target_data[i].permitted);
10932 data[i].inheritable = tswap32(target_data[i].inheritable);
10936 dataptr = data;
10939 if (num == TARGET_NR_capget) {
10940 ret = get_errno(capget(&header, dataptr));
10941 } else {
10942 ret = get_errno(capset(&header, dataptr));
10945 /* The kernel always updates version for both capget and capset */
10946 target_header->version = tswap32(header.version);
10947 unlock_user_struct(target_header, arg1, 1);
10949 if (arg2) {
10950 if (num == TARGET_NR_capget) {
10951 for (i = 0; i < data_items; i++) {
10952 target_data[i].effective = tswap32(data[i].effective);
10953 target_data[i].permitted = tswap32(data[i].permitted);
10954 target_data[i].inheritable = tswap32(data[i].inheritable);
10956 unlock_user(target_data, arg2, target_datalen);
10957 } else {
10958 unlock_user(target_data, arg2, 0);
10961 break;
10963 case TARGET_NR_sigaltstack:
10964 ret = do_sigaltstack(arg1, arg2, get_sp_from_cpustate((CPUArchState *)cpu_env));
10965 break;
10967 #ifdef CONFIG_SENDFILE
10968 case TARGET_NR_sendfile:
10970 off_t *offp = NULL;
10971 off_t off;
10972 if (arg3) {
10973 ret = get_user_sal(off, arg3);
10974 if (is_error(ret)) {
10975 break;
10977 offp = &off;
10979 ret = get_errno(sendfile(arg1, arg2, offp, arg4));
10980 if (!is_error(ret) && arg3) {
10981 abi_long ret2 = put_user_sal(off, arg3);
10982 if (is_error(ret2)) {
10983 ret = ret2;
10986 break;
10988 #ifdef TARGET_NR_sendfile64
10989 case TARGET_NR_sendfile64:
10991 off_t *offp = NULL;
10992 off_t off;
10993 if (arg3) {
10994 ret = get_user_s64(off, arg3);
10995 if (is_error(ret)) {
10996 break;
10998 offp = &off;
11000 ret = get_errno(sendfile(arg1, arg2, offp, arg4));
11001 if (!is_error(ret) && arg3) {
11002 abi_long ret2 = put_user_s64(off, arg3);
11003 if (is_error(ret2)) {
11004 ret = ret2;
11007 break;
11009 #endif
11010 #else
11011 case TARGET_NR_sendfile:
11012 #ifdef TARGET_NR_sendfile64
11013 case TARGET_NR_sendfile64:
11014 #endif
11015 goto unimplemented;
11016 #endif
11018 #ifdef TARGET_NR_getpmsg
11019 case TARGET_NR_getpmsg:
11020 goto unimplemented;
11021 #endif
11022 #ifdef TARGET_NR_putpmsg
11023 case TARGET_NR_putpmsg:
11024 goto unimplemented;
11025 #endif
11026 #ifdef TARGET_NR_vfork
11027 case TARGET_NR_vfork:
11028 ret = get_errno(do_fork(cpu_env,
11029 CLONE_VFORK | CLONE_VM | TARGET_SIGCHLD,
11030 0, 0, 0, 0));
11031 break;
11032 #endif
11033 #ifdef TARGET_NR_ugetrlimit
11034 case TARGET_NR_ugetrlimit:
11036 struct rlimit rlim;
11037 int resource = target_to_host_resource(arg1);
11038 ret = get_errno(getrlimit(resource, &rlim));
11039 if (!is_error(ret)) {
11040 struct target_rlimit *target_rlim;
11041 if (!lock_user_struct(VERIFY_WRITE, target_rlim, arg2, 0))
11042 goto efault;
11043 target_rlim->rlim_cur = host_to_target_rlim(rlim.rlim_cur);
11044 target_rlim->rlim_max = host_to_target_rlim(rlim.rlim_max);
11045 unlock_user_struct(target_rlim, arg2, 1);
11047 break;
11049 #endif
11050 #ifdef TARGET_NR_truncate64
11051 case TARGET_NR_truncate64:
11052 if (!(p = lock_user_string(arg1)))
11053 goto efault;
11054 ret = target_truncate64(cpu_env, p, arg2, arg3, arg4);
11055 unlock_user(p, arg1, 0);
11056 break;
11057 #endif
11058 #ifdef TARGET_NR_ftruncate64
11059 case TARGET_NR_ftruncate64:
11060 ret = target_ftruncate64(cpu_env, arg1, arg2, arg3, arg4);
11061 break;
11062 #endif
11063 #ifdef TARGET_NR_stat64
11064 case TARGET_NR_stat64:
11065 if (!(p = lock_user_string(arg1)))
11066 goto efault;
11067 ret = get_errno(stat(path(p), &st));
11068 unlock_user(p, arg1, 0);
11069 if (!is_error(ret))
11070 ret = host_to_target_stat64(cpu_env, arg2, &st);
11071 break;
11072 #endif
11073 #ifdef TARGET_NR_lstat64
11074 case TARGET_NR_lstat64:
11075 if (!(p = lock_user_string(arg1)))
11076 goto efault;
11077 ret = get_errno(lstat(path(p), &st));
11078 unlock_user(p, arg1, 0);
11079 if (!is_error(ret))
11080 ret = host_to_target_stat64(cpu_env, arg2, &st);
11081 break;
11082 #endif
11083 #ifdef TARGET_NR_fstat64
11084 case TARGET_NR_fstat64:
11085 ret = get_errno(fstat(arg1, &st));
11086 if (!is_error(ret))
11087 ret = host_to_target_stat64(cpu_env, arg2, &st);
11088 break;
11089 #endif
11090 #if (defined(TARGET_NR_fstatat64) || defined(TARGET_NR_newfstatat))
11091 #ifdef TARGET_NR_fstatat64
11092 case TARGET_NR_fstatat64:
11093 #endif
11094 #ifdef TARGET_NR_newfstatat
11095 case TARGET_NR_newfstatat:
11096 #endif
11097 if (!(p = lock_user_string(arg2)))
11098 goto efault;
11099 ret = get_errno(fstatat(arg1, path(p), &st, arg4));
11100 if (!is_error(ret))
11101 ret = host_to_target_stat64(cpu_env, arg3, &st);
11102 break;
11103 #endif
11104 #ifdef TARGET_NR_lchown
11105 case TARGET_NR_lchown:
11106 if (!(p = lock_user_string(arg1)))
11107 goto efault;
11108 ret = get_errno(lchown(p, low2highuid(arg2), low2highgid(arg3)));
11109 unlock_user(p, arg1, 0);
11110 break;
11111 #endif
11112 #ifdef TARGET_NR_getuid
11113 case TARGET_NR_getuid:
11114 ret = get_errno(high2lowuid(getuid()));
11115 break;
11116 #endif
11117 #ifdef TARGET_NR_getgid
11118 case TARGET_NR_getgid:
11119 ret = get_errno(high2lowgid(getgid()));
11120 break;
11121 #endif
11122 #ifdef TARGET_NR_geteuid
11123 case TARGET_NR_geteuid:
11124 ret = get_errno(high2lowuid(geteuid()));
11125 break;
11126 #endif
11127 #ifdef TARGET_NR_getegid
11128 case TARGET_NR_getegid:
11129 ret = get_errno(high2lowgid(getegid()));
11130 break;
11131 #endif
11132 case TARGET_NR_setreuid:
11133 ret = get_errno(setreuid(low2highuid(arg1), low2highuid(arg2)));
11134 break;
11135 case TARGET_NR_setregid:
11136 ret = get_errno(setregid(low2highgid(arg1), low2highgid(arg2)));
11137 break;
11138 case TARGET_NR_getgroups:
11140 int gidsetsize = arg1;
11141 target_id *target_grouplist;
11142 gid_t *grouplist;
11143 int i;
11145 grouplist = alloca(gidsetsize * sizeof(gid_t));
11146 ret = get_errno(getgroups(gidsetsize, grouplist));
11147 if (gidsetsize == 0)
11148 break;
11149 if (!is_error(ret)) {
11150 target_grouplist = lock_user(VERIFY_WRITE, arg2, gidsetsize * sizeof(target_id), 0);
11151 if (!target_grouplist)
11152 goto efault;
11153 for(i = 0;i < ret; i++)
11154 target_grouplist[i] = tswapid(high2lowgid(grouplist[i]));
11155 unlock_user(target_grouplist, arg2, gidsetsize * sizeof(target_id));
11158 break;
11159 case TARGET_NR_setgroups:
11161 int gidsetsize = arg1;
11162 target_id *target_grouplist;
11163 gid_t *grouplist = NULL;
11164 int i;
11165 if (gidsetsize) {
11166 grouplist = alloca(gidsetsize * sizeof(gid_t));
11167 target_grouplist = lock_user(VERIFY_READ, arg2, gidsetsize * sizeof(target_id), 1);
11168 if (!target_grouplist) {
11169 ret = -TARGET_EFAULT;
11170 goto fail;
11172 for (i = 0; i < gidsetsize; i++) {
11173 grouplist[i] = low2highgid(tswapid(target_grouplist[i]));
11175 unlock_user(target_grouplist, arg2, 0);
11177 ret = get_errno(setgroups(gidsetsize, grouplist));
11179 break;
11180 case TARGET_NR_fchown:
11181 ret = get_errno(fchown(arg1, low2highuid(arg2), low2highgid(arg3)));
11182 break;
11183 #if defined(TARGET_NR_fchownat)
11184 case TARGET_NR_fchownat:
11185 if (!(p = lock_user_string(arg2)))
11186 goto efault;
11187 ret = get_errno(fchownat(arg1, p, low2highuid(arg3),
11188 low2highgid(arg4), arg5));
11189 unlock_user(p, arg2, 0);
11190 break;
11191 #endif
11192 #ifdef TARGET_NR_setresuid
11193 case TARGET_NR_setresuid:
11194 ret = get_errno(sys_setresuid(low2highuid(arg1),
11195 low2highuid(arg2),
11196 low2highuid(arg3)));
11197 break;
11198 #endif
11199 #ifdef TARGET_NR_getresuid
11200 case TARGET_NR_getresuid:
11202 uid_t ruid, euid, suid;
11203 ret = get_errno(getresuid(&ruid, &euid, &suid));
11204 if (!is_error(ret)) {
11205 if (put_user_id(high2lowuid(ruid), arg1)
11206 || put_user_id(high2lowuid(euid), arg2)
11207 || put_user_id(high2lowuid(suid), arg3))
11208 goto efault;
11211 break;
11212 #endif
11213 #ifdef TARGET_NR_getresgid
11214 case TARGET_NR_setresgid:
11215 ret = get_errno(sys_setresgid(low2highgid(arg1),
11216 low2highgid(arg2),
11217 low2highgid(arg3)));
11218 break;
11219 #endif
11220 #ifdef TARGET_NR_getresgid
11221 case TARGET_NR_getresgid:
11223 gid_t rgid, egid, sgid;
11224 ret = get_errno(getresgid(&rgid, &egid, &sgid));
11225 if (!is_error(ret)) {
11226 if (put_user_id(high2lowgid(rgid), arg1)
11227 || put_user_id(high2lowgid(egid), arg2)
11228 || put_user_id(high2lowgid(sgid), arg3))
11229 goto efault;
11232 break;
11233 #endif
11234 #ifdef TARGET_NR_chown
11235 case TARGET_NR_chown:
11236 if (!(p = lock_user_string(arg1)))
11237 goto efault;
11238 ret = get_errno(chown(p, low2highuid(arg2), low2highgid(arg3)));
11239 unlock_user(p, arg1, 0);
11240 break;
11241 #endif
11242 case TARGET_NR_setuid:
11243 ret = get_errno(sys_setuid(low2highuid(arg1)));
11244 break;
11245 case TARGET_NR_setgid:
11246 ret = get_errno(sys_setgid(low2highgid(arg1)));
11247 break;
11248 case TARGET_NR_setfsuid:
11249 ret = get_errno(setfsuid(arg1));
11250 break;
11251 case TARGET_NR_setfsgid:
11252 ret = get_errno(setfsgid(arg1));
11253 break;
11255 #ifdef TARGET_NR_lchown32
11256 case TARGET_NR_lchown32:
11257 if (!(p = lock_user_string(arg1)))
11258 goto efault;
11259 ret = get_errno(lchown(p, arg2, arg3));
11260 unlock_user(p, arg1, 0);
11261 break;
11262 #endif
11263 #ifdef TARGET_NR_getuid32
11264 case TARGET_NR_getuid32:
11265 ret = get_errno(getuid());
11266 break;
11267 #endif
11269 #if defined(TARGET_NR_getxuid) && defined(TARGET_ALPHA)
11270 /* Alpha specific */
11271 case TARGET_NR_getxuid:
11273 uid_t euid;
11274 euid=geteuid();
11275 ((CPUAlphaState *)cpu_env)->ir[IR_A4]=euid;
11277 ret = get_errno(getuid());
11278 break;
11279 #endif
11280 #if defined(TARGET_NR_getxgid) && defined(TARGET_ALPHA)
11281 /* Alpha specific */
11282 case TARGET_NR_getxgid:
11284 uid_t egid;
11285 egid=getegid();
11286 ((CPUAlphaState *)cpu_env)->ir[IR_A4]=egid;
11288 ret = get_errno(getgid());
11289 break;
11290 #endif
11291 #if defined(TARGET_NR_osf_getsysinfo) && defined(TARGET_ALPHA)
11292 /* Alpha specific */
11293 case TARGET_NR_osf_getsysinfo:
11294 ret = -TARGET_EOPNOTSUPP;
11295 switch (arg1) {
11296 case TARGET_GSI_IEEE_FP_CONTROL:
11298 uint64_t swcr, fpcr = cpu_alpha_load_fpcr (cpu_env);
11300 /* Copied from linux ieee_fpcr_to_swcr. */
11301 swcr = (fpcr >> 35) & SWCR_STATUS_MASK;
11302 swcr |= (fpcr >> 36) & SWCR_MAP_DMZ;
11303 swcr |= (~fpcr >> 48) & (SWCR_TRAP_ENABLE_INV
11304 | SWCR_TRAP_ENABLE_DZE
11305 | SWCR_TRAP_ENABLE_OVF);
11306 swcr |= (~fpcr >> 57) & (SWCR_TRAP_ENABLE_UNF
11307 | SWCR_TRAP_ENABLE_INE);
11308 swcr |= (fpcr >> 47) & SWCR_MAP_UMZ;
11309 swcr |= (~fpcr >> 41) & SWCR_TRAP_ENABLE_DNO;
11311 if (put_user_u64 (swcr, arg2))
11312 goto efault;
11313 ret = 0;
11315 break;
11317 /* case GSI_IEEE_STATE_AT_SIGNAL:
11318 -- Not implemented in linux kernel.
11319 case GSI_UACPROC:
11320 -- Retrieves current unaligned access state; not much used.
11321 case GSI_PROC_TYPE:
11322 -- Retrieves implver information; surely not used.
11323 case GSI_GET_HWRPB:
11324 -- Grabs a copy of the HWRPB; surely not used.
11327 break;
11328 #endif
11329 #if defined(TARGET_NR_osf_setsysinfo) && defined(TARGET_ALPHA)
11330 /* Alpha specific */
11331 case TARGET_NR_osf_setsysinfo:
11332 ret = -TARGET_EOPNOTSUPP;
11333 switch (arg1) {
11334 case TARGET_SSI_IEEE_FP_CONTROL:
11336 uint64_t swcr, fpcr, orig_fpcr;
11338 if (get_user_u64 (swcr, arg2)) {
11339 goto efault;
11341 orig_fpcr = cpu_alpha_load_fpcr(cpu_env);
11342 fpcr = orig_fpcr & FPCR_DYN_MASK;
11344 /* Copied from linux ieee_swcr_to_fpcr. */
11345 fpcr |= (swcr & SWCR_STATUS_MASK) << 35;
11346 fpcr |= (swcr & SWCR_MAP_DMZ) << 36;
11347 fpcr |= (~swcr & (SWCR_TRAP_ENABLE_INV
11348 | SWCR_TRAP_ENABLE_DZE
11349 | SWCR_TRAP_ENABLE_OVF)) << 48;
11350 fpcr |= (~swcr & (SWCR_TRAP_ENABLE_UNF
11351 | SWCR_TRAP_ENABLE_INE)) << 57;
11352 fpcr |= (swcr & SWCR_MAP_UMZ ? FPCR_UNDZ | FPCR_UNFD : 0);
11353 fpcr |= (~swcr & SWCR_TRAP_ENABLE_DNO) << 41;
11355 cpu_alpha_store_fpcr(cpu_env, fpcr);
11356 ret = 0;
11358 break;
11360 case TARGET_SSI_IEEE_RAISE_EXCEPTION:
11362 uint64_t exc, fpcr, orig_fpcr;
11363 int si_code;
11365 if (get_user_u64(exc, arg2)) {
11366 goto efault;
11369 orig_fpcr = cpu_alpha_load_fpcr(cpu_env);
11371 /* We only add to the exception status here. */
11372 fpcr = orig_fpcr | ((exc & SWCR_STATUS_MASK) << 35);
11374 cpu_alpha_store_fpcr(cpu_env, fpcr);
11375 ret = 0;
11377 /* Old exceptions are not signaled. */
11378 fpcr &= ~(orig_fpcr & FPCR_STATUS_MASK);
11380 /* If any exceptions set by this call,
11381 and are unmasked, send a signal. */
11382 si_code = 0;
11383 if ((fpcr & (FPCR_INE | FPCR_INED)) == FPCR_INE) {
11384 si_code = TARGET_FPE_FLTRES;
11386 if ((fpcr & (FPCR_UNF | FPCR_UNFD)) == FPCR_UNF) {
11387 si_code = TARGET_FPE_FLTUND;
11389 if ((fpcr & (FPCR_OVF | FPCR_OVFD)) == FPCR_OVF) {
11390 si_code = TARGET_FPE_FLTOVF;
11392 if ((fpcr & (FPCR_DZE | FPCR_DZED)) == FPCR_DZE) {
11393 si_code = TARGET_FPE_FLTDIV;
11395 if ((fpcr & (FPCR_INV | FPCR_INVD)) == FPCR_INV) {
11396 si_code = TARGET_FPE_FLTINV;
11398 if (si_code != 0) {
11399 target_siginfo_t info;
11400 info.si_signo = SIGFPE;
11401 info.si_errno = 0;
11402 info.si_code = si_code;
11403 info._sifields._sigfault._addr
11404 = ((CPUArchState *)cpu_env)->pc;
11405 queue_signal((CPUArchState *)cpu_env, info.si_signo,
11406 QEMU_SI_FAULT, &info);
11409 break;
11411 /* case SSI_NVPAIRS:
11412 -- Used with SSIN_UACPROC to enable unaligned accesses.
11413 case SSI_IEEE_STATE_AT_SIGNAL:
11414 case SSI_IEEE_IGNORE_STATE_AT_SIGNAL:
11415 -- Not implemented in linux kernel
11418 break;
11419 #endif
11420 #ifdef TARGET_NR_osf_sigprocmask
11421 /* Alpha specific. */
11422 case TARGET_NR_osf_sigprocmask:
11424 abi_ulong mask;
11425 int how;
11426 sigset_t set, oldset;
11428 switch(arg1) {
11429 case TARGET_SIG_BLOCK:
11430 how = SIG_BLOCK;
11431 break;
11432 case TARGET_SIG_UNBLOCK:
11433 how = SIG_UNBLOCK;
11434 break;
11435 case TARGET_SIG_SETMASK:
11436 how = SIG_SETMASK;
11437 break;
11438 default:
11439 ret = -TARGET_EINVAL;
11440 goto fail;
11442 mask = arg2;
11443 target_to_host_old_sigset(&set, &mask);
11444 ret = do_sigprocmask(how, &set, &oldset);
11445 if (!ret) {
11446 host_to_target_old_sigset(&mask, &oldset);
11447 ret = mask;
11450 break;
11451 #endif
11453 #ifdef TARGET_NR_getgid32
11454 case TARGET_NR_getgid32:
11455 ret = get_errno(getgid());
11456 break;
11457 #endif
11458 #ifdef TARGET_NR_geteuid32
11459 case TARGET_NR_geteuid32:
11460 ret = get_errno(geteuid());
11461 break;
11462 #endif
11463 #ifdef TARGET_NR_getegid32
11464 case TARGET_NR_getegid32:
11465 ret = get_errno(getegid());
11466 break;
11467 #endif
11468 #ifdef TARGET_NR_setreuid32
11469 case TARGET_NR_setreuid32:
11470 ret = get_errno(setreuid(arg1, arg2));
11471 break;
11472 #endif
11473 #ifdef TARGET_NR_setregid32
11474 case TARGET_NR_setregid32:
11475 ret = get_errno(setregid(arg1, arg2));
11476 break;
11477 #endif
11478 #ifdef TARGET_NR_getgroups32
11479 case TARGET_NR_getgroups32:
11481 int gidsetsize = arg1;
11482 uint32_t *target_grouplist;
11483 gid_t *grouplist;
11484 int i;
11486 grouplist = alloca(gidsetsize * sizeof(gid_t));
11487 ret = get_errno(getgroups(gidsetsize, grouplist));
11488 if (gidsetsize == 0)
11489 break;
11490 if (!is_error(ret)) {
11491 target_grouplist = lock_user(VERIFY_WRITE, arg2, gidsetsize * 4, 0);
11492 if (!target_grouplist) {
11493 ret = -TARGET_EFAULT;
11494 goto fail;
11496 for(i = 0;i < ret; i++)
11497 target_grouplist[i] = tswap32(grouplist[i]);
11498 unlock_user(target_grouplist, arg2, gidsetsize * 4);
11501 break;
11502 #endif
11503 #ifdef TARGET_NR_setgroups32
11504 case TARGET_NR_setgroups32:
11506 int gidsetsize = arg1;
11507 uint32_t *target_grouplist;
11508 gid_t *grouplist;
11509 int i;
11511 grouplist = alloca(gidsetsize * sizeof(gid_t));
11512 target_grouplist = lock_user(VERIFY_READ, arg2, gidsetsize * 4, 1);
11513 if (!target_grouplist) {
11514 ret = -TARGET_EFAULT;
11515 goto fail;
11517 for(i = 0;i < gidsetsize; i++)
11518 grouplist[i] = tswap32(target_grouplist[i]);
11519 unlock_user(target_grouplist, arg2, 0);
11520 ret = get_errno(setgroups(gidsetsize, grouplist));
11522 break;
11523 #endif
11524 #ifdef TARGET_NR_fchown32
11525 case TARGET_NR_fchown32:
11526 ret = get_errno(fchown(arg1, arg2, arg3));
11527 break;
11528 #endif
11529 #ifdef TARGET_NR_setresuid32
11530 case TARGET_NR_setresuid32:
11531 ret = get_errno(sys_setresuid(arg1, arg2, arg3));
11532 break;
11533 #endif
11534 #ifdef TARGET_NR_getresuid32
11535 case TARGET_NR_getresuid32:
11537 uid_t ruid, euid, suid;
11538 ret = get_errno(getresuid(&ruid, &euid, &suid));
11539 if (!is_error(ret)) {
11540 if (put_user_u32(ruid, arg1)
11541 || put_user_u32(euid, arg2)
11542 || put_user_u32(suid, arg3))
11543 goto efault;
11546 break;
11547 #endif
11548 #ifdef TARGET_NR_setresgid32
11549 case TARGET_NR_setresgid32:
11550 ret = get_errno(sys_setresgid(arg1, arg2, arg3));
11551 break;
11552 #endif
11553 #ifdef TARGET_NR_getresgid32
11554 case TARGET_NR_getresgid32:
11556 gid_t rgid, egid, sgid;
11557 ret = get_errno(getresgid(&rgid, &egid, &sgid));
11558 if (!is_error(ret)) {
11559 if (put_user_u32(rgid, arg1)
11560 || put_user_u32(egid, arg2)
11561 || put_user_u32(sgid, arg3))
11562 goto efault;
11565 break;
11566 #endif
11567 #ifdef TARGET_NR_chown32
11568 case TARGET_NR_chown32:
11569 if (!(p = lock_user_string(arg1)))
11570 goto efault;
11571 ret = get_errno(chown(p, arg2, arg3));
11572 unlock_user(p, arg1, 0);
11573 break;
11574 #endif
11575 #ifdef TARGET_NR_setuid32
11576 case TARGET_NR_setuid32:
11577 ret = get_errno(sys_setuid(arg1));
11578 break;
11579 #endif
11580 #ifdef TARGET_NR_setgid32
11581 case TARGET_NR_setgid32:
11582 ret = get_errno(sys_setgid(arg1));
11583 break;
11584 #endif
11585 #ifdef TARGET_NR_setfsuid32
11586 case TARGET_NR_setfsuid32:
11587 ret = get_errno(setfsuid(arg1));
11588 break;
11589 #endif
11590 #ifdef TARGET_NR_setfsgid32
11591 case TARGET_NR_setfsgid32:
11592 ret = get_errno(setfsgid(arg1));
11593 break;
11594 #endif
11596 case TARGET_NR_pivot_root:
11597 goto unimplemented;
11598 #ifdef TARGET_NR_mincore
11599 case TARGET_NR_mincore:
11601 void *a;
11602 ret = -TARGET_ENOMEM;
11603 a = lock_user(VERIFY_READ, arg1, arg2, 0);
11604 if (!a) {
11605 goto fail;
11607 ret = -TARGET_EFAULT;
11608 p = lock_user_string(arg3);
11609 if (!p) {
11610 goto mincore_fail;
11612 ret = get_errno(mincore(a, arg2, p));
11613 unlock_user(p, arg3, ret);
11614 mincore_fail:
11615 unlock_user(a, arg1, 0);
11617 break;
11618 #endif
11619 #ifdef TARGET_NR_arm_fadvise64_64
11620 case TARGET_NR_arm_fadvise64_64:
11621 /* arm_fadvise64_64 looks like fadvise64_64 but
11622 * with different argument order: fd, advice, offset, len
11623 * rather than the usual fd, offset, len, advice.
11624 * Note that offset and len are both 64-bit so appear as
11625 * pairs of 32-bit registers.
11627 ret = posix_fadvise(arg1, target_offset64(arg3, arg4),
11628 target_offset64(arg5, arg6), arg2);
11629 ret = -host_to_target_errno(ret);
11630 break;
11631 #endif
11633 #if TARGET_ABI_BITS == 32
11635 #ifdef TARGET_NR_fadvise64_64
11636 case TARGET_NR_fadvise64_64:
11637 #if defined(TARGET_PPC) || defined(TARGET_XTENSA)
11638 /* 6 args: fd, advice, offset (high, low), len (high, low) */
11639 ret = arg2;
11640 arg2 = arg3;
11641 arg3 = arg4;
11642 arg4 = arg5;
11643 arg5 = arg6;
11644 arg6 = ret;
11645 #else
11646 /* 6 args: fd, offset (high, low), len (high, low), advice */
11647 if (regpairs_aligned(cpu_env, num)) {
11648 /* offset is in (3,4), len in (5,6) and advice in 7 */
11649 arg2 = arg3;
11650 arg3 = arg4;
11651 arg4 = arg5;
11652 arg5 = arg6;
11653 arg6 = arg7;
11655 #endif
11656 ret = -host_to_target_errno(posix_fadvise(arg1,
11657 target_offset64(arg2, arg3),
11658 target_offset64(arg4, arg5),
11659 arg6));
11660 break;
11661 #endif
11663 #ifdef TARGET_NR_fadvise64
11664 case TARGET_NR_fadvise64:
11665 /* 5 args: fd, offset (high, low), len, advice */
11666 if (regpairs_aligned(cpu_env, num)) {
11667 /* offset is in (3,4), len in 5 and advice in 6 */
11668 arg2 = arg3;
11669 arg3 = arg4;
11670 arg4 = arg5;
11671 arg5 = arg6;
11673 ret = -host_to_target_errno(posix_fadvise(arg1,
11674 target_offset64(arg2, arg3),
11675 arg4, arg5));
11676 break;
11677 #endif
11679 #else /* not a 32-bit ABI */
11680 #if defined(TARGET_NR_fadvise64_64) || defined(TARGET_NR_fadvise64)
11681 #ifdef TARGET_NR_fadvise64_64
11682 case TARGET_NR_fadvise64_64:
11683 #endif
11684 #ifdef TARGET_NR_fadvise64
11685 case TARGET_NR_fadvise64:
11686 #endif
11687 #ifdef TARGET_S390X
11688 switch (arg4) {
11689 case 4: arg4 = POSIX_FADV_NOREUSE + 1; break; /* make sure it's an invalid value */
11690 case 5: arg4 = POSIX_FADV_NOREUSE + 2; break; /* ditto */
11691 case 6: arg4 = POSIX_FADV_DONTNEED; break;
11692 case 7: arg4 = POSIX_FADV_NOREUSE; break;
11693 default: break;
11695 #endif
11696 ret = -host_to_target_errno(posix_fadvise(arg1, arg2, arg3, arg4));
11697 break;
11698 #endif
11699 #endif /* end of 64-bit ABI fadvise handling */
11701 #ifdef TARGET_NR_madvise
11702 case TARGET_NR_madvise:
11703 /* A straight passthrough may not be safe because qemu sometimes
11704 turns private file-backed mappings into anonymous mappings.
11705 This will break MADV_DONTNEED.
11706 This is a hint, so ignoring and returning success is ok. */
11707 ret = get_errno(0);
11708 break;
11709 #endif
11710 #if TARGET_ABI_BITS == 32
11711 case TARGET_NR_fcntl64:
11713 int cmd;
11714 struct flock64 fl;
11715 from_flock64_fn *copyfrom = copy_from_user_flock64;
11716 to_flock64_fn *copyto = copy_to_user_flock64;
11718 #ifdef TARGET_ARM
11719 if (!((CPUARMState *)cpu_env)->eabi) {
11720 copyfrom = copy_from_user_oabi_flock64;
11721 copyto = copy_to_user_oabi_flock64;
11723 #endif
11725 cmd = target_to_host_fcntl_cmd(arg2);
11726 if (cmd == -TARGET_EINVAL) {
11727 ret = cmd;
11728 break;
11731 switch(arg2) {
11732 case TARGET_F_GETLK64:
11733 ret = copyfrom(&fl, arg3);
11734 if (ret) {
11735 break;
11737 ret = get_errno(fcntl(arg1, cmd, &fl));
11738 if (ret == 0) {
11739 ret = copyto(arg3, &fl);
11741 break;
11743 case TARGET_F_SETLK64:
11744 case TARGET_F_SETLKW64:
11745 ret = copyfrom(&fl, arg3);
11746 if (ret) {
11747 break;
11749 ret = get_errno(safe_fcntl(arg1, cmd, &fl));
11750 break;
11751 default:
11752 ret = do_fcntl(arg1, arg2, arg3);
11753 break;
11755 break;
11757 #endif
11758 #ifdef TARGET_NR_cacheflush
11759 case TARGET_NR_cacheflush:
11760 /* self-modifying code is handled automatically, so nothing needed */
11761 ret = 0;
11762 break;
11763 #endif
11764 #ifdef TARGET_NR_security
11765 case TARGET_NR_security:
11766 goto unimplemented;
11767 #endif
11768 #ifdef TARGET_NR_getpagesize
11769 case TARGET_NR_getpagesize:
11770 ret = TARGET_PAGE_SIZE;
11771 break;
11772 #endif
11773 case TARGET_NR_gettid:
11774 ret = get_errno(gettid());
11775 break;
11776 #ifdef TARGET_NR_readahead
11777 case TARGET_NR_readahead:
11778 #if TARGET_ABI_BITS == 32
11779 if (regpairs_aligned(cpu_env, num)) {
11780 arg2 = arg3;
11781 arg3 = arg4;
11782 arg4 = arg5;
11784 ret = get_errno(readahead(arg1, target_offset64(arg2, arg3) , arg4));
11785 #else
11786 ret = get_errno(readahead(arg1, arg2, arg3));
11787 #endif
11788 break;
11789 #endif
11790 #ifdef CONFIG_ATTR
11791 #ifdef TARGET_NR_setxattr
11792 case TARGET_NR_listxattr:
11793 case TARGET_NR_llistxattr:
11795 void *p, *b = 0;
11796 if (arg2) {
11797 b = lock_user(VERIFY_WRITE, arg2, arg3, 0);
11798 if (!b) {
11799 ret = -TARGET_EFAULT;
11800 break;
11803 p = lock_user_string(arg1);
11804 if (p) {
11805 if (num == TARGET_NR_listxattr) {
11806 ret = get_errno(listxattr(p, b, arg3));
11807 } else {
11808 ret = get_errno(llistxattr(p, b, arg3));
11810 } else {
11811 ret = -TARGET_EFAULT;
11813 unlock_user(p, arg1, 0);
11814 unlock_user(b, arg2, arg3);
11815 break;
11817 case TARGET_NR_flistxattr:
11819 void *b = 0;
11820 if (arg2) {
11821 b = lock_user(VERIFY_WRITE, arg2, arg3, 0);
11822 if (!b) {
11823 ret = -TARGET_EFAULT;
11824 break;
11827 ret = get_errno(flistxattr(arg1, b, arg3));
11828 unlock_user(b, arg2, arg3);
11829 break;
11831 case TARGET_NR_setxattr:
11832 case TARGET_NR_lsetxattr:
11834 void *p, *n, *v = 0;
11835 if (arg3) {
11836 v = lock_user(VERIFY_READ, arg3, arg4, 1);
11837 if (!v) {
11838 ret = -TARGET_EFAULT;
11839 break;
11842 p = lock_user_string(arg1);
11843 n = lock_user_string(arg2);
11844 if (p && n) {
11845 if (num == TARGET_NR_setxattr) {
11846 ret = get_errno(setxattr(p, n, v, arg4, arg5));
11847 } else {
11848 ret = get_errno(lsetxattr(p, n, v, arg4, arg5));
11850 } else {
11851 ret = -TARGET_EFAULT;
11853 unlock_user(p, arg1, 0);
11854 unlock_user(n, arg2, 0);
11855 unlock_user(v, arg3, 0);
11857 break;
11858 case TARGET_NR_fsetxattr:
11860 void *n, *v = 0;
11861 if (arg3) {
11862 v = lock_user(VERIFY_READ, arg3, arg4, 1);
11863 if (!v) {
11864 ret = -TARGET_EFAULT;
11865 break;
11868 n = lock_user_string(arg2);
11869 if (n) {
11870 ret = get_errno(fsetxattr(arg1, n, v, arg4, arg5));
11871 } else {
11872 ret = -TARGET_EFAULT;
11874 unlock_user(n, arg2, 0);
11875 unlock_user(v, arg3, 0);
11877 break;
11878 case TARGET_NR_getxattr:
11879 case TARGET_NR_lgetxattr:
11881 void *p, *n, *v = 0;
11882 if (arg3) {
11883 v = lock_user(VERIFY_WRITE, arg3, arg4, 0);
11884 if (!v) {
11885 ret = -TARGET_EFAULT;
11886 break;
11889 p = lock_user_string(arg1);
11890 n = lock_user_string(arg2);
11891 if (p && n) {
11892 if (num == TARGET_NR_getxattr) {
11893 ret = get_errno(getxattr(p, n, v, arg4));
11894 } else {
11895 ret = get_errno(lgetxattr(p, n, v, arg4));
11897 } else {
11898 ret = -TARGET_EFAULT;
11900 unlock_user(p, arg1, 0);
11901 unlock_user(n, arg2, 0);
11902 unlock_user(v, arg3, arg4);
11904 break;
11905 case TARGET_NR_fgetxattr:
11907 void *n, *v = 0;
11908 if (arg3) {
11909 v = lock_user(VERIFY_WRITE, arg3, arg4, 0);
11910 if (!v) {
11911 ret = -TARGET_EFAULT;
11912 break;
11915 n = lock_user_string(arg2);
11916 if (n) {
11917 ret = get_errno(fgetxattr(arg1, n, v, arg4));
11918 } else {
11919 ret = -TARGET_EFAULT;
11921 unlock_user(n, arg2, 0);
11922 unlock_user(v, arg3, arg4);
11924 break;
11925 case TARGET_NR_removexattr:
11926 case TARGET_NR_lremovexattr:
11928 void *p, *n;
11929 p = lock_user_string(arg1);
11930 n = lock_user_string(arg2);
11931 if (p && n) {
11932 if (num == TARGET_NR_removexattr) {
11933 ret = get_errno(removexattr(p, n));
11934 } else {
11935 ret = get_errno(lremovexattr(p, n));
11937 } else {
11938 ret = -TARGET_EFAULT;
11940 unlock_user(p, arg1, 0);
11941 unlock_user(n, arg2, 0);
11943 break;
11944 case TARGET_NR_fremovexattr:
11946 void *n;
11947 n = lock_user_string(arg2);
11948 if (n) {
11949 ret = get_errno(fremovexattr(arg1, n));
11950 } else {
11951 ret = -TARGET_EFAULT;
11953 unlock_user(n, arg2, 0);
11955 break;
11956 #endif
11957 #endif /* CONFIG_ATTR */
11958 #ifdef TARGET_NR_set_thread_area
11959 case TARGET_NR_set_thread_area:
11960 #if defined(TARGET_MIPS)
11961 ((CPUMIPSState *) cpu_env)->active_tc.CP0_UserLocal = arg1;
11962 ret = 0;
11963 break;
11964 #elif defined(TARGET_CRIS)
11965 if (arg1 & 0xff)
11966 ret = -TARGET_EINVAL;
11967 else {
11968 ((CPUCRISState *) cpu_env)->pregs[PR_PID] = arg1;
11969 ret = 0;
11971 break;
11972 #elif defined(TARGET_I386) && defined(TARGET_ABI32)
11973 ret = do_set_thread_area(cpu_env, arg1);
11974 break;
11975 #elif defined(TARGET_M68K)
11977 TaskState *ts = cpu->opaque;
11978 ts->tp_value = arg1;
11979 ret = 0;
11980 break;
11982 #else
11983 goto unimplemented_nowarn;
11984 #endif
11985 #endif
11986 #ifdef TARGET_NR_get_thread_area
11987 case TARGET_NR_get_thread_area:
11988 #if defined(TARGET_I386) && defined(TARGET_ABI32)
11989 ret = do_get_thread_area(cpu_env, arg1);
11990 break;
11991 #elif defined(TARGET_M68K)
11993 TaskState *ts = cpu->opaque;
11994 ret = ts->tp_value;
11995 break;
11997 #else
11998 goto unimplemented_nowarn;
11999 #endif
12000 #endif
12001 #ifdef TARGET_NR_getdomainname
12002 case TARGET_NR_getdomainname:
12003 goto unimplemented_nowarn;
12004 #endif
12006 #ifdef TARGET_NR_clock_settime
12007 case TARGET_NR_clock_settime:
12009 struct timespec ts;
12011 ret = target_to_host_timespec(&ts, arg2);
12012 if (!is_error(ret)) {
12013 ret = get_errno(clock_settime(arg1, &ts));
12015 break;
12017 #endif
12018 #ifdef TARGET_NR_clock_gettime
12019 case TARGET_NR_clock_gettime:
12021 struct timespec ts;
12022 ret = get_errno(clock_gettime(arg1, &ts));
12023 if (!is_error(ret)) {
12024 ret = host_to_target_timespec(arg2, &ts);
12026 break;
12028 #endif
12029 #ifdef TARGET_NR_clock_getres
12030 case TARGET_NR_clock_getres:
12032 struct timespec ts;
12033 ret = get_errno(clock_getres(arg1, &ts));
12034 if (!is_error(ret)) {
12035 host_to_target_timespec(arg2, &ts);
12037 break;
12039 #endif
12040 #ifdef TARGET_NR_clock_nanosleep
12041 case TARGET_NR_clock_nanosleep:
12043 struct timespec ts;
12044 target_to_host_timespec(&ts, arg3);
12045 ret = get_errno(safe_clock_nanosleep(arg1, arg2,
12046 &ts, arg4 ? &ts : NULL));
12047 if (arg4)
12048 host_to_target_timespec(arg4, &ts);
12050 #if defined(TARGET_PPC)
12051 /* clock_nanosleep is odd in that it returns positive errno values.
12052 * On PPC, CR0 bit 3 should be set in such a situation. */
12053 if (ret && ret != -TARGET_ERESTARTSYS) {
12054 ((CPUPPCState *)cpu_env)->crf[0] |= 1;
12056 #endif
12057 break;
12059 #endif
12061 #if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address)
12062 case TARGET_NR_set_tid_address:
12063 ret = get_errno(set_tid_address((int *)g2h(arg1)));
12064 break;
12065 #endif
12067 case TARGET_NR_tkill:
12068 ret = get_errno(safe_tkill((int)arg1, target_to_host_signal(arg2)));
12069 break;
12071 case TARGET_NR_tgkill:
12072 ret = get_errno(safe_tgkill((int)arg1, (int)arg2,
12073 target_to_host_signal(arg3)));
12074 break;
12076 #ifdef TARGET_NR_set_robust_list
12077 case TARGET_NR_set_robust_list:
12078 case TARGET_NR_get_robust_list:
12079 /* The ABI for supporting robust futexes has userspace pass
12080 * the kernel a pointer to a linked list which is updated by
12081 * userspace after the syscall; the list is walked by the kernel
12082 * when the thread exits. Since the linked list in QEMU guest
12083 * memory isn't a valid linked list for the host and we have
12084 * no way to reliably intercept the thread-death event, we can't
12085 * support these. Silently return ENOSYS so that guest userspace
12086 * falls back to a non-robust futex implementation (which should
12087 * be OK except in the corner case of the guest crashing while
12088 * holding a mutex that is shared with another process via
12089 * shared memory).
12091 goto unimplemented_nowarn;
12092 #endif
12094 #if defined(TARGET_NR_utimensat)
12095 case TARGET_NR_utimensat:
12097 struct timespec *tsp, ts[2];
12098 if (!arg3) {
12099 tsp = NULL;
12100 } else {
12101 target_to_host_timespec(ts, arg3);
12102 target_to_host_timespec(ts+1, arg3+sizeof(struct target_timespec));
12103 tsp = ts;
12105 if (!arg2)
12106 ret = get_errno(sys_utimensat(arg1, NULL, tsp, arg4));
12107 else {
12108 if (!(p = lock_user_string(arg2))) {
12109 ret = -TARGET_EFAULT;
12110 goto fail;
12112 ret = get_errno(sys_utimensat(arg1, path(p), tsp, arg4));
12113 unlock_user(p, arg2, 0);
12116 break;
12117 #endif
12118 case TARGET_NR_futex:
12119 ret = do_futex(arg1, arg2, arg3, arg4, arg5, arg6);
12120 break;
12121 #if defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init)
12122 case TARGET_NR_inotify_init:
12123 ret = get_errno(sys_inotify_init());
12124 if (ret >= 0) {
12125 fd_trans_register(ret, &target_inotify_trans);
12127 break;
12128 #endif
12129 #ifdef CONFIG_INOTIFY1
12130 #if defined(TARGET_NR_inotify_init1) && defined(__NR_inotify_init1)
12131 case TARGET_NR_inotify_init1:
12132 ret = get_errno(sys_inotify_init1(target_to_host_bitmask(arg1,
12133 fcntl_flags_tbl)));
12134 if (ret >= 0) {
12135 fd_trans_register(ret, &target_inotify_trans);
12137 break;
12138 #endif
12139 #endif
12140 #if defined(TARGET_NR_inotify_add_watch) && defined(__NR_inotify_add_watch)
12141 case TARGET_NR_inotify_add_watch:
12142 p = lock_user_string(arg2);
12143 ret = get_errno(sys_inotify_add_watch(arg1, path(p), arg3));
12144 unlock_user(p, arg2, 0);
12145 break;
12146 #endif
12147 #if defined(TARGET_NR_inotify_rm_watch) && defined(__NR_inotify_rm_watch)
12148 case TARGET_NR_inotify_rm_watch:
12149 ret = get_errno(sys_inotify_rm_watch(arg1, arg2));
12150 break;
12151 #endif
12153 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
12154 case TARGET_NR_mq_open:
12156 struct mq_attr posix_mq_attr;
12157 struct mq_attr *pposix_mq_attr;
12158 int host_flags;
12160 host_flags = target_to_host_bitmask(arg2, fcntl_flags_tbl);
12161 pposix_mq_attr = NULL;
12162 if (arg4) {
12163 if (copy_from_user_mq_attr(&posix_mq_attr, arg4) != 0) {
12164 goto efault;
12166 pposix_mq_attr = &posix_mq_attr;
12168 p = lock_user_string(arg1 - 1);
12169 if (!p) {
12170 goto efault;
12172 ret = get_errno(mq_open(p, host_flags, arg3, pposix_mq_attr));
12173 unlock_user (p, arg1, 0);
12175 break;
12177 case TARGET_NR_mq_unlink:
12178 p = lock_user_string(arg1 - 1);
12179 if (!p) {
12180 ret = -TARGET_EFAULT;
12181 break;
12183 ret = get_errno(mq_unlink(p));
12184 unlock_user (p, arg1, 0);
12185 break;
12187 case TARGET_NR_mq_timedsend:
12189 struct timespec ts;
12191 p = lock_user (VERIFY_READ, arg2, arg3, 1);
12192 if (arg5 != 0) {
12193 target_to_host_timespec(&ts, arg5);
12194 ret = get_errno(safe_mq_timedsend(arg1, p, arg3, arg4, &ts));
12195 host_to_target_timespec(arg5, &ts);
12196 } else {
12197 ret = get_errno(safe_mq_timedsend(arg1, p, arg3, arg4, NULL));
12199 unlock_user (p, arg2, arg3);
12201 break;
12203 case TARGET_NR_mq_timedreceive:
12205 struct timespec ts;
12206 unsigned int prio;
12208 p = lock_user (VERIFY_READ, arg2, arg3, 1);
12209 if (arg5 != 0) {
12210 target_to_host_timespec(&ts, arg5);
12211 ret = get_errno(safe_mq_timedreceive(arg1, p, arg3,
12212 &prio, &ts));
12213 host_to_target_timespec(arg5, &ts);
12214 } else {
12215 ret = get_errno(safe_mq_timedreceive(arg1, p, arg3,
12216 &prio, NULL));
12218 unlock_user (p, arg2, arg3);
12219 if (arg4 != 0)
12220 put_user_u32(prio, arg4);
12222 break;
12224 /* Not implemented for now... */
12225 /* case TARGET_NR_mq_notify: */
12226 /* break; */
12228 case TARGET_NR_mq_getsetattr:
12230 struct mq_attr posix_mq_attr_in, posix_mq_attr_out;
12231 ret = 0;
12232 if (arg2 != 0) {
12233 copy_from_user_mq_attr(&posix_mq_attr_in, arg2);
12234 ret = get_errno(mq_setattr(arg1, &posix_mq_attr_in,
12235 &posix_mq_attr_out));
12236 } else if (arg3 != 0) {
12237 ret = get_errno(mq_getattr(arg1, &posix_mq_attr_out));
12239 if (ret == 0 && arg3 != 0) {
12240 copy_to_user_mq_attr(arg3, &posix_mq_attr_out);
12243 break;
12244 #endif
12246 #ifdef CONFIG_SPLICE
12247 #ifdef TARGET_NR_tee
12248 case TARGET_NR_tee:
12250 ret = get_errno(tee(arg1,arg2,arg3,arg4));
12252 break;
12253 #endif
12254 #ifdef TARGET_NR_splice
12255 case TARGET_NR_splice:
12257 loff_t loff_in, loff_out;
12258 loff_t *ploff_in = NULL, *ploff_out = NULL;
12259 if (arg2) {
12260 if (get_user_u64(loff_in, arg2)) {
12261 goto efault;
12263 ploff_in = &loff_in;
12265 if (arg4) {
12266 if (get_user_u64(loff_out, arg4)) {
12267 goto efault;
12269 ploff_out = &loff_out;
12271 ret = get_errno(splice(arg1, ploff_in, arg3, ploff_out, arg5, arg6));
12272 if (arg2) {
12273 if (put_user_u64(loff_in, arg2)) {
12274 goto efault;
12277 if (arg4) {
12278 if (put_user_u64(loff_out, arg4)) {
12279 goto efault;
12283 break;
12284 #endif
12285 #ifdef TARGET_NR_vmsplice
12286 case TARGET_NR_vmsplice:
12288 struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1);
12289 if (vec != NULL) {
12290 ret = get_errno(vmsplice(arg1, vec, arg3, arg4));
12291 unlock_iovec(vec, arg2, arg3, 0);
12292 } else {
12293 ret = -host_to_target_errno(errno);
12296 break;
12297 #endif
12298 #endif /* CONFIG_SPLICE */
12299 #ifdef CONFIG_EVENTFD
12300 #if defined(TARGET_NR_eventfd)
12301 case TARGET_NR_eventfd:
12302 ret = get_errno(eventfd(arg1, 0));
12303 if (ret >= 0) {
12304 fd_trans_register(ret, &target_eventfd_trans);
12306 break;
12307 #endif
12308 #if defined(TARGET_NR_eventfd2)
12309 case TARGET_NR_eventfd2:
12311 int host_flags = arg2 & (~(TARGET_O_NONBLOCK | TARGET_O_CLOEXEC));
12312 if (arg2 & TARGET_O_NONBLOCK) {
12313 host_flags |= O_NONBLOCK;
12315 if (arg2 & TARGET_O_CLOEXEC) {
12316 host_flags |= O_CLOEXEC;
12318 ret = get_errno(eventfd(arg1, host_flags));
12319 if (ret >= 0) {
12320 fd_trans_register(ret, &target_eventfd_trans);
12322 break;
12324 #endif
12325 #endif /* CONFIG_EVENTFD */
12326 #if defined(CONFIG_FALLOCATE) && defined(TARGET_NR_fallocate)
12327 case TARGET_NR_fallocate:
12328 #if TARGET_ABI_BITS == 32
12329 ret = get_errno(fallocate(arg1, arg2, target_offset64(arg3, arg4),
12330 target_offset64(arg5, arg6)));
12331 #else
12332 ret = get_errno(fallocate(arg1, arg2, arg3, arg4));
12333 #endif
12334 break;
12335 #endif
12336 #if defined(CONFIG_SYNC_FILE_RANGE)
12337 #if defined(TARGET_NR_sync_file_range)
12338 case TARGET_NR_sync_file_range:
12339 #if TARGET_ABI_BITS == 32
12340 #if defined(TARGET_MIPS)
12341 ret = get_errno(sync_file_range(arg1, target_offset64(arg3, arg4),
12342 target_offset64(arg5, arg6), arg7));
12343 #else
12344 ret = get_errno(sync_file_range(arg1, target_offset64(arg2, arg3),
12345 target_offset64(arg4, arg5), arg6));
12346 #endif /* !TARGET_MIPS */
12347 #else
12348 ret = get_errno(sync_file_range(arg1, arg2, arg3, arg4));
12349 #endif
12350 break;
12351 #endif
12352 #if defined(TARGET_NR_sync_file_range2)
12353 case TARGET_NR_sync_file_range2:
12354 /* This is like sync_file_range but the arguments are reordered */
12355 #if TARGET_ABI_BITS == 32
12356 ret = get_errno(sync_file_range(arg1, target_offset64(arg3, arg4),
12357 target_offset64(arg5, arg6), arg2));
12358 #else
12359 ret = get_errno(sync_file_range(arg1, arg3, arg4, arg2));
12360 #endif
12361 break;
12362 #endif
12363 #endif
12364 #if defined(TARGET_NR_signalfd4)
12365 case TARGET_NR_signalfd4:
12366 ret = do_signalfd4(arg1, arg2, arg4);
12367 break;
12368 #endif
12369 #if defined(TARGET_NR_signalfd)
12370 case TARGET_NR_signalfd:
12371 ret = do_signalfd4(arg1, arg2, 0);
12372 break;
12373 #endif
12374 #if defined(CONFIG_EPOLL)
12375 #if defined(TARGET_NR_epoll_create)
12376 case TARGET_NR_epoll_create:
12377 ret = get_errno(epoll_create(arg1));
12378 break;
12379 #endif
12380 #if defined(TARGET_NR_epoll_create1) && defined(CONFIG_EPOLL_CREATE1)
12381 case TARGET_NR_epoll_create1:
12382 ret = get_errno(epoll_create1(arg1));
12383 break;
12384 #endif
12385 #if defined(TARGET_NR_epoll_ctl)
12386 case TARGET_NR_epoll_ctl:
12388 struct epoll_event ep;
12389 struct epoll_event *epp = 0;
12390 if (arg4) {
12391 struct target_epoll_event *target_ep;
12392 if (!lock_user_struct(VERIFY_READ, target_ep, arg4, 1)) {
12393 goto efault;
12395 ep.events = tswap32(target_ep->events);
12396 /* The epoll_data_t union is just opaque data to the kernel,
12397 * so we transfer all 64 bits across and need not worry what
12398 * actual data type it is.
12400 ep.data.u64 = tswap64(target_ep->data.u64);
12401 unlock_user_struct(target_ep, arg4, 0);
12402 epp = &ep;
12404 ret = get_errno(epoll_ctl(arg1, arg2, arg3, epp));
12405 break;
12407 #endif
12409 #if defined(TARGET_NR_epoll_wait) || defined(TARGET_NR_epoll_pwait)
12410 #if defined(TARGET_NR_epoll_wait)
12411 case TARGET_NR_epoll_wait:
12412 #endif
12413 #if defined(TARGET_NR_epoll_pwait)
12414 case TARGET_NR_epoll_pwait:
12415 #endif
12417 struct target_epoll_event *target_ep;
12418 struct epoll_event *ep;
12419 int epfd = arg1;
12420 int maxevents = arg3;
12421 int timeout = arg4;
12423 if (maxevents <= 0 || maxevents > TARGET_EP_MAX_EVENTS) {
12424 ret = -TARGET_EINVAL;
12425 break;
12428 target_ep = lock_user(VERIFY_WRITE, arg2,
12429 maxevents * sizeof(struct target_epoll_event), 1);
12430 if (!target_ep) {
12431 goto efault;
12434 ep = g_try_new(struct epoll_event, maxevents);
12435 if (!ep) {
12436 unlock_user(target_ep, arg2, 0);
12437 ret = -TARGET_ENOMEM;
12438 break;
12441 switch (num) {
12442 #if defined(TARGET_NR_epoll_pwait)
12443 case TARGET_NR_epoll_pwait:
12445 target_sigset_t *target_set;
12446 sigset_t _set, *set = &_set;
12448 if (arg5) {
12449 if (arg6 != sizeof(target_sigset_t)) {
12450 ret = -TARGET_EINVAL;
12451 break;
12454 target_set = lock_user(VERIFY_READ, arg5,
12455 sizeof(target_sigset_t), 1);
12456 if (!target_set) {
12457 ret = -TARGET_EFAULT;
12458 break;
12460 target_to_host_sigset(set, target_set);
12461 unlock_user(target_set, arg5, 0);
12462 } else {
12463 set = NULL;
12466 ret = get_errno(safe_epoll_pwait(epfd, ep, maxevents, timeout,
12467 set, SIGSET_T_SIZE));
12468 break;
12470 #endif
12471 #if defined(TARGET_NR_epoll_wait)
12472 case TARGET_NR_epoll_wait:
12473 ret = get_errno(safe_epoll_pwait(epfd, ep, maxevents, timeout,
12474 NULL, 0));
12475 break;
12476 #endif
12477 default:
12478 ret = -TARGET_ENOSYS;
12480 if (!is_error(ret)) {
12481 int i;
12482 for (i = 0; i < ret; i++) {
12483 target_ep[i].events = tswap32(ep[i].events);
12484 target_ep[i].data.u64 = tswap64(ep[i].data.u64);
12486 unlock_user(target_ep, arg2,
12487 ret * sizeof(struct target_epoll_event));
12488 } else {
12489 unlock_user(target_ep, arg2, 0);
12491 g_free(ep);
12492 break;
12494 #endif
12495 #endif
12496 #ifdef TARGET_NR_prlimit64
12497 case TARGET_NR_prlimit64:
12499 /* args: pid, resource number, ptr to new rlimit, ptr to old rlimit */
12500 struct target_rlimit64 *target_rnew, *target_rold;
12501 struct host_rlimit64 rnew, rold, *rnewp = 0;
12502 int resource = target_to_host_resource(arg2);
12503 if (arg3) {
12504 if (!lock_user_struct(VERIFY_READ, target_rnew, arg3, 1)) {
12505 goto efault;
12507 rnew.rlim_cur = tswap64(target_rnew->rlim_cur);
12508 rnew.rlim_max = tswap64(target_rnew->rlim_max);
12509 unlock_user_struct(target_rnew, arg3, 0);
12510 rnewp = &rnew;
12513 ret = get_errno(sys_prlimit64(arg1, resource, rnewp, arg4 ? &rold : 0));
12514 if (!is_error(ret) && arg4) {
12515 if (!lock_user_struct(VERIFY_WRITE, target_rold, arg4, 1)) {
12516 goto efault;
12518 target_rold->rlim_cur = tswap64(rold.rlim_cur);
12519 target_rold->rlim_max = tswap64(rold.rlim_max);
12520 unlock_user_struct(target_rold, arg4, 1);
12522 break;
12524 #endif
12525 #ifdef TARGET_NR_gethostname
12526 case TARGET_NR_gethostname:
12528 char *name = lock_user(VERIFY_WRITE, arg1, arg2, 0);
12529 if (name) {
12530 ret = get_errno(gethostname(name, arg2));
12531 unlock_user(name, arg1, arg2);
12532 } else {
12533 ret = -TARGET_EFAULT;
12535 break;
12537 #endif
12538 #ifdef TARGET_NR_atomic_cmpxchg_32
12539 case TARGET_NR_atomic_cmpxchg_32:
12541 /* should use start_exclusive from main.c */
12542 abi_ulong mem_value;
12543 if (get_user_u32(mem_value, arg6)) {
12544 target_siginfo_t info;
12545 info.si_signo = SIGSEGV;
12546 info.si_errno = 0;
12547 info.si_code = TARGET_SEGV_MAPERR;
12548 info._sifields._sigfault._addr = arg6;
12549 queue_signal((CPUArchState *)cpu_env, info.si_signo,
12550 QEMU_SI_FAULT, &info);
12551 ret = 0xdeadbeef;
12554 if (mem_value == arg2)
12555 put_user_u32(arg1, arg6);
12556 ret = mem_value;
12557 break;
12559 #endif
12560 #ifdef TARGET_NR_atomic_barrier
12561 case TARGET_NR_atomic_barrier:
12563 /* Like the kernel implementation and the qemu arm barrier, no-op this? */
12564 ret = 0;
12565 break;
12567 #endif
12569 #ifdef TARGET_NR_timer_create
12570 case TARGET_NR_timer_create:
12572 /* args: clockid_t clockid, struct sigevent *sevp, timer_t *timerid */
12574 struct sigevent host_sevp = { {0}, }, *phost_sevp = NULL;
12576 int clkid = arg1;
12577 int timer_index = next_free_host_timer();
12579 if (timer_index < 0) {
12580 ret = -TARGET_EAGAIN;
12581 } else {
12582 timer_t *phtimer = g_posix_timers + timer_index;
12584 if (arg2) {
12585 phost_sevp = &host_sevp;
12586 ret = target_to_host_sigevent(phost_sevp, arg2);
12587 if (ret != 0) {
12588 break;
12592 ret = get_errno(timer_create(clkid, phost_sevp, phtimer));
12593 if (ret) {
12594 phtimer = NULL;
12595 } else {
12596 if (put_user(TIMER_MAGIC | timer_index, arg3, target_timer_t)) {
12597 goto efault;
12601 break;
12603 #endif
12605 #ifdef TARGET_NR_timer_settime
12606 case TARGET_NR_timer_settime:
12608 /* args: timer_t timerid, int flags, const struct itimerspec *new_value,
12609 * struct itimerspec * old_value */
12610 target_timer_t timerid = get_timer_id(arg1);
12612 if (timerid < 0) {
12613 ret = timerid;
12614 } else if (arg3 == 0) {
12615 ret = -TARGET_EINVAL;
12616 } else {
12617 timer_t htimer = g_posix_timers[timerid];
12618 struct itimerspec hspec_new = {{0},}, hspec_old = {{0},};
12620 if (target_to_host_itimerspec(&hspec_new, arg3)) {
12621 goto efault;
12623 ret = get_errno(
12624 timer_settime(htimer, arg2, &hspec_new, &hspec_old));
12625 if (arg4 && host_to_target_itimerspec(arg4, &hspec_old)) {
12626 goto efault;
12629 break;
12631 #endif
12633 #ifdef TARGET_NR_timer_gettime
12634 case TARGET_NR_timer_gettime:
12636 /* args: timer_t timerid, struct itimerspec *curr_value */
12637 target_timer_t timerid = get_timer_id(arg1);
12639 if (timerid < 0) {
12640 ret = timerid;
12641 } else if (!arg2) {
12642 ret = -TARGET_EFAULT;
12643 } else {
12644 timer_t htimer = g_posix_timers[timerid];
12645 struct itimerspec hspec;
12646 ret = get_errno(timer_gettime(htimer, &hspec));
12648 if (host_to_target_itimerspec(arg2, &hspec)) {
12649 ret = -TARGET_EFAULT;
12652 break;
12654 #endif
12656 #ifdef TARGET_NR_timer_getoverrun
12657 case TARGET_NR_timer_getoverrun:
12659 /* args: timer_t timerid */
12660 target_timer_t timerid = get_timer_id(arg1);
12662 if (timerid < 0) {
12663 ret = timerid;
12664 } else {
12665 timer_t htimer = g_posix_timers[timerid];
12666 ret = get_errno(timer_getoverrun(htimer));
12668 fd_trans_unregister(ret);
12669 break;
12671 #endif
12673 #ifdef TARGET_NR_timer_delete
12674 case TARGET_NR_timer_delete:
12676 /* args: timer_t timerid */
12677 target_timer_t timerid = get_timer_id(arg1);
12679 if (timerid < 0) {
12680 ret = timerid;
12681 } else {
12682 timer_t htimer = g_posix_timers[timerid];
12683 ret = get_errno(timer_delete(htimer));
12684 g_posix_timers[timerid] = 0;
12686 break;
12688 #endif
12690 #if defined(TARGET_NR_timerfd_create) && defined(CONFIG_TIMERFD)
12691 case TARGET_NR_timerfd_create:
12692 ret = get_errno(timerfd_create(arg1,
12693 target_to_host_bitmask(arg2, fcntl_flags_tbl)));
12694 break;
12695 #endif
12697 #if defined(TARGET_NR_timerfd_gettime) && defined(CONFIG_TIMERFD)
12698 case TARGET_NR_timerfd_gettime:
12700 struct itimerspec its_curr;
12702 ret = get_errno(timerfd_gettime(arg1, &its_curr));
12704 if (arg2 && host_to_target_itimerspec(arg2, &its_curr)) {
12705 goto efault;
12708 break;
12709 #endif
12711 #if defined(TARGET_NR_timerfd_settime) && defined(CONFIG_TIMERFD)
12712 case TARGET_NR_timerfd_settime:
12714 struct itimerspec its_new, its_old, *p_new;
12716 if (arg3) {
12717 if (target_to_host_itimerspec(&its_new, arg3)) {
12718 goto efault;
12720 p_new = &its_new;
12721 } else {
12722 p_new = NULL;
12725 ret = get_errno(timerfd_settime(arg1, arg2, p_new, &its_old));
12727 if (arg4 && host_to_target_itimerspec(arg4, &its_old)) {
12728 goto efault;
12731 break;
12732 #endif
12734 #if defined(TARGET_NR_ioprio_get) && defined(__NR_ioprio_get)
12735 case TARGET_NR_ioprio_get:
12736 ret = get_errno(ioprio_get(arg1, arg2));
12737 break;
12738 #endif
12740 #if defined(TARGET_NR_ioprio_set) && defined(__NR_ioprio_set)
12741 case TARGET_NR_ioprio_set:
12742 ret = get_errno(ioprio_set(arg1, arg2, arg3));
12743 break;
12744 #endif
12746 #if defined(TARGET_NR_setns) && defined(CONFIG_SETNS)
12747 case TARGET_NR_setns:
12748 ret = get_errno(setns(arg1, arg2));
12749 break;
12750 #endif
12751 #if defined(TARGET_NR_unshare) && defined(CONFIG_SETNS)
12752 case TARGET_NR_unshare:
12753 ret = get_errno(unshare(arg1));
12754 break;
12755 #endif
12756 #if defined(TARGET_NR_kcmp) && defined(__NR_kcmp)
12757 case TARGET_NR_kcmp:
12758 ret = get_errno(kcmp(arg1, arg2, arg3, arg4, arg5));
12759 break;
12760 #endif
12762 default:
12763 unimplemented:
12764 gemu_log("qemu: Unsupported syscall: %d\n", num);
12765 #if defined(TARGET_NR_setxattr) || defined(TARGET_NR_get_thread_area) || defined(TARGET_NR_getdomainname) || defined(TARGET_NR_set_robust_list)
12766 unimplemented_nowarn:
12767 #endif
12768 ret = -TARGET_ENOSYS;
12769 break;
12771 fail:
12772 #ifdef DEBUG
12773 gemu_log(" = " TARGET_ABI_FMT_ld "\n", ret);
12774 #endif
12775 if(do_strace)
12776 print_syscall_ret(num, ret);
12777 trace_guest_user_syscall_ret(cpu, num, ret);
12778 return ret;
12779 efault:
12780 ret = -TARGET_EFAULT;
12781 goto fail;