Merge remote-tracking branch 'remotes/kraxel/tags/vga-20180618-pull-request' into...
[qemu.git] / linux-user / syscall.c
blob2117fb13b4a0ae6f706f920ce72ab6bdf1bf2500
1 /*
2 * Linux syscalls
4 * Copyright (c) 2003 Fabrice Bellard
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, see <http://www.gnu.org/licenses/>.
19 #define _ATFILE_SOURCE
20 #include "qemu/osdep.h"
21 #include "qemu/cutils.h"
22 #include "qemu/path.h"
23 #include <elf.h>
24 #include <endian.h>
25 #include <grp.h>
26 #include <sys/ipc.h>
27 #include <sys/msg.h>
28 #include <sys/wait.h>
29 #include <sys/mount.h>
30 #include <sys/file.h>
31 #include <sys/fsuid.h>
32 #include <sys/personality.h>
33 #include <sys/prctl.h>
34 #include <sys/resource.h>
35 #include <sys/swap.h>
36 #include <linux/capability.h>
37 #include <sched.h>
38 #include <sys/timex.h>
39 #include <sys/socket.h>
40 #include <sys/un.h>
41 #include <sys/uio.h>
42 #include <poll.h>
43 #include <sys/times.h>
44 #include <sys/shm.h>
45 #include <sys/sem.h>
46 #include <sys/statfs.h>
47 #include <utime.h>
48 #include <sys/sysinfo.h>
49 #include <sys/signalfd.h>
50 //#include <sys/user.h>
51 #include <netinet/ip.h>
52 #include <netinet/tcp.h>
53 #include <linux/wireless.h>
54 #include <linux/icmp.h>
55 #include <linux/icmpv6.h>
56 #include <linux/errqueue.h>
57 #include <linux/random.h>
58 #include "qemu-common.h"
59 #ifdef CONFIG_TIMERFD
60 #include <sys/timerfd.h>
61 #endif
62 #ifdef TARGET_GPROF
63 #include <sys/gmon.h>
64 #endif
65 #ifdef CONFIG_EVENTFD
66 #include <sys/eventfd.h>
67 #endif
68 #ifdef CONFIG_EPOLL
69 #include <sys/epoll.h>
70 #endif
71 #ifdef CONFIG_ATTR
72 #include "qemu/xattr.h"
73 #endif
74 #ifdef CONFIG_SENDFILE
75 #include <sys/sendfile.h>
76 #endif
78 #define termios host_termios
79 #define winsize host_winsize
80 #define termio host_termio
81 #define sgttyb host_sgttyb /* same as target */
82 #define tchars host_tchars /* same as target */
83 #define ltchars host_ltchars /* same as target */
85 #include <linux/termios.h>
86 #include <linux/unistd.h>
87 #include <linux/cdrom.h>
88 #include <linux/hdreg.h>
89 #include <linux/soundcard.h>
90 #include <linux/kd.h>
91 #include <linux/mtio.h>
92 #include <linux/fs.h>
93 #if defined(CONFIG_FIEMAP)
94 #include <linux/fiemap.h>
95 #endif
96 #include <linux/fb.h>
97 #include <linux/vt.h>
98 #include <linux/dm-ioctl.h>
99 #include <linux/reboot.h>
100 #include <linux/route.h>
101 #include <linux/filter.h>
102 #include <linux/blkpg.h>
103 #include <netpacket/packet.h>
104 #include <linux/netlink.h>
105 #ifdef CONFIG_RTNETLINK
106 #include <linux/rtnetlink.h>
107 #include <linux/if_bridge.h>
108 #endif
109 #include <linux/audit.h>
110 #include "linux_loop.h"
111 #include "uname.h"
113 #include "qemu.h"
115 #ifndef CLONE_IO
116 #define CLONE_IO 0x80000000 /* Clone io context */
117 #endif
119 /* We can't directly call the host clone syscall, because this will
120 * badly confuse libc (breaking mutexes, for example). So we must
121 * divide clone flags into:
122 * * flag combinations that look like pthread_create()
123 * * flag combinations that look like fork()
124 * * flags we can implement within QEMU itself
125 * * flags we can't support and will return an error for
127 /* For thread creation, all these flags must be present; for
128 * fork, none must be present.
130 #define CLONE_THREAD_FLAGS \
131 (CLONE_VM | CLONE_FS | CLONE_FILES | \
132 CLONE_SIGHAND | CLONE_THREAD | CLONE_SYSVSEM)
134 /* These flags are ignored:
135 * CLONE_DETACHED is now ignored by the kernel;
136 * CLONE_IO is just an optimisation hint to the I/O scheduler
138 #define CLONE_IGNORED_FLAGS \
139 (CLONE_DETACHED | CLONE_IO)
141 /* Flags for fork which we can implement within QEMU itself */
142 #define CLONE_OPTIONAL_FORK_FLAGS \
143 (CLONE_SETTLS | CLONE_PARENT_SETTID | \
144 CLONE_CHILD_CLEARTID | CLONE_CHILD_SETTID)
146 /* Flags for thread creation which we can implement within QEMU itself */
147 #define CLONE_OPTIONAL_THREAD_FLAGS \
148 (CLONE_SETTLS | CLONE_PARENT_SETTID | \
149 CLONE_CHILD_CLEARTID | CLONE_CHILD_SETTID | CLONE_PARENT)
151 #define CLONE_INVALID_FORK_FLAGS \
152 (~(CSIGNAL | CLONE_OPTIONAL_FORK_FLAGS | CLONE_IGNORED_FLAGS))
154 #define CLONE_INVALID_THREAD_FLAGS \
155 (~(CSIGNAL | CLONE_THREAD_FLAGS | CLONE_OPTIONAL_THREAD_FLAGS | \
156 CLONE_IGNORED_FLAGS))
158 /* CLONE_VFORK is special cased early in do_fork(). The other flag bits
159 * have almost all been allocated. We cannot support any of
160 * CLONE_NEWNS, CLONE_NEWCGROUP, CLONE_NEWUTS, CLONE_NEWIPC,
161 * CLONE_NEWUSER, CLONE_NEWPID, CLONE_NEWNET, CLONE_PTRACE, CLONE_UNTRACED.
162 * The checks against the invalid thread masks above will catch these.
163 * (The one remaining unallocated bit is 0x1000 which used to be CLONE_PID.)
166 //#define DEBUG
167 /* Define DEBUG_ERESTARTSYS to force every syscall to be restarted
168 * once. This exercises the codepaths for restart.
170 //#define DEBUG_ERESTARTSYS
172 //#include <linux/msdos_fs.h>
173 #define VFAT_IOCTL_READDIR_BOTH _IOR('r', 1, struct linux_dirent [2])
174 #define VFAT_IOCTL_READDIR_SHORT _IOR('r', 2, struct linux_dirent [2])
176 #undef _syscall0
177 #undef _syscall1
178 #undef _syscall2
179 #undef _syscall3
180 #undef _syscall4
181 #undef _syscall5
182 #undef _syscall6
184 #define _syscall0(type,name) \
185 static type name (void) \
187 return syscall(__NR_##name); \
190 #define _syscall1(type,name,type1,arg1) \
191 static type name (type1 arg1) \
193 return syscall(__NR_##name, arg1); \
196 #define _syscall2(type,name,type1,arg1,type2,arg2) \
197 static type name (type1 arg1,type2 arg2) \
199 return syscall(__NR_##name, arg1, arg2); \
202 #define _syscall3(type,name,type1,arg1,type2,arg2,type3,arg3) \
203 static type name (type1 arg1,type2 arg2,type3 arg3) \
205 return syscall(__NR_##name, arg1, arg2, arg3); \
208 #define _syscall4(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4) \
209 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4) \
211 return syscall(__NR_##name, arg1, arg2, arg3, arg4); \
214 #define _syscall5(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4, \
215 type5,arg5) \
216 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5) \
218 return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5); \
222 #define _syscall6(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4, \
223 type5,arg5,type6,arg6) \
224 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5, \
225 type6 arg6) \
227 return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6); \
231 #define __NR_sys_uname __NR_uname
232 #define __NR_sys_getcwd1 __NR_getcwd
233 #define __NR_sys_getdents __NR_getdents
234 #define __NR_sys_getdents64 __NR_getdents64
235 #define __NR_sys_getpriority __NR_getpriority
236 #define __NR_sys_rt_sigqueueinfo __NR_rt_sigqueueinfo
237 #define __NR_sys_rt_tgsigqueueinfo __NR_rt_tgsigqueueinfo
238 #define __NR_sys_syslog __NR_syslog
239 #define __NR_sys_futex __NR_futex
240 #define __NR_sys_inotify_init __NR_inotify_init
241 #define __NR_sys_inotify_add_watch __NR_inotify_add_watch
242 #define __NR_sys_inotify_rm_watch __NR_inotify_rm_watch
244 #if defined(__alpha__) || defined(__x86_64__) || defined(__s390x__)
245 #define __NR__llseek __NR_lseek
246 #endif
248 /* Newer kernel ports have llseek() instead of _llseek() */
249 #if defined(TARGET_NR_llseek) && !defined(TARGET_NR__llseek)
250 #define TARGET_NR__llseek TARGET_NR_llseek
251 #endif
253 #ifdef __NR_gettid
254 _syscall0(int, gettid)
255 #else
256 /* This is a replacement for the host gettid() and must return a host
257 errno. */
258 static int gettid(void) {
259 return -ENOSYS;
261 #endif
263 /* For the 64-bit guest on 32-bit host case we must emulate
264 * getdents using getdents64, because otherwise the host
265 * might hand us back more dirent records than we can fit
266 * into the guest buffer after structure format conversion.
267 * Otherwise we emulate getdents with getdents if the host has it.
269 #if defined(__NR_getdents) && HOST_LONG_BITS >= TARGET_ABI_BITS
270 #define EMULATE_GETDENTS_WITH_GETDENTS
271 #endif
273 #if defined(TARGET_NR_getdents) && defined(EMULATE_GETDENTS_WITH_GETDENTS)
274 _syscall3(int, sys_getdents, uint, fd, struct linux_dirent *, dirp, uint, count);
275 #endif
276 #if (defined(TARGET_NR_getdents) && \
277 !defined(EMULATE_GETDENTS_WITH_GETDENTS)) || \
278 (defined(TARGET_NR_getdents64) && defined(__NR_getdents64))
279 _syscall3(int, sys_getdents64, uint, fd, struct linux_dirent64 *, dirp, uint, count);
280 #endif
281 #if defined(TARGET_NR__llseek) && defined(__NR_llseek)
282 _syscall5(int, _llseek, uint, fd, ulong, hi, ulong, lo,
283 loff_t *, res, uint, wh);
284 #endif
285 _syscall3(int, sys_rt_sigqueueinfo, pid_t, pid, int, sig, siginfo_t *, uinfo)
286 _syscall4(int, sys_rt_tgsigqueueinfo, pid_t, pid, pid_t, tid, int, sig,
287 siginfo_t *, uinfo)
288 _syscall3(int,sys_syslog,int,type,char*,bufp,int,len)
289 #ifdef __NR_exit_group
290 _syscall1(int,exit_group,int,error_code)
291 #endif
292 #if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address)
293 _syscall1(int,set_tid_address,int *,tidptr)
294 #endif
295 #if defined(TARGET_NR_futex) && defined(__NR_futex)
296 _syscall6(int,sys_futex,int *,uaddr,int,op,int,val,
297 const struct timespec *,timeout,int *,uaddr2,int,val3)
298 #endif
299 #define __NR_sys_sched_getaffinity __NR_sched_getaffinity
300 _syscall3(int, sys_sched_getaffinity, pid_t, pid, unsigned int, len,
301 unsigned long *, user_mask_ptr);
302 #define __NR_sys_sched_setaffinity __NR_sched_setaffinity
303 _syscall3(int, sys_sched_setaffinity, pid_t, pid, unsigned int, len,
304 unsigned long *, user_mask_ptr);
305 #define __NR_sys_getcpu __NR_getcpu
306 _syscall3(int, sys_getcpu, unsigned *, cpu, unsigned *, node, void *, tcache);
307 _syscall4(int, reboot, int, magic1, int, magic2, unsigned int, cmd,
308 void *, arg);
309 _syscall2(int, capget, struct __user_cap_header_struct *, header,
310 struct __user_cap_data_struct *, data);
311 _syscall2(int, capset, struct __user_cap_header_struct *, header,
312 struct __user_cap_data_struct *, data);
313 #if defined(TARGET_NR_ioprio_get) && defined(__NR_ioprio_get)
314 _syscall2(int, ioprio_get, int, which, int, who)
315 #endif
316 #if defined(TARGET_NR_ioprio_set) && defined(__NR_ioprio_set)
317 _syscall3(int, ioprio_set, int, which, int, who, int, ioprio)
318 #endif
319 #if defined(TARGET_NR_getrandom) && defined(__NR_getrandom)
320 _syscall3(int, getrandom, void *, buf, size_t, buflen, unsigned int, flags)
321 #endif
323 #if defined(TARGET_NR_kcmp) && defined(__NR_kcmp)
324 _syscall5(int, kcmp, pid_t, pid1, pid_t, pid2, int, type,
325 unsigned long, idx1, unsigned long, idx2)
326 #endif
328 static bitmask_transtbl fcntl_flags_tbl[] = {
329 { TARGET_O_ACCMODE, TARGET_O_WRONLY, O_ACCMODE, O_WRONLY, },
330 { TARGET_O_ACCMODE, TARGET_O_RDWR, O_ACCMODE, O_RDWR, },
331 { TARGET_O_CREAT, TARGET_O_CREAT, O_CREAT, O_CREAT, },
332 { TARGET_O_EXCL, TARGET_O_EXCL, O_EXCL, O_EXCL, },
333 { TARGET_O_NOCTTY, TARGET_O_NOCTTY, O_NOCTTY, O_NOCTTY, },
334 { TARGET_O_TRUNC, TARGET_O_TRUNC, O_TRUNC, O_TRUNC, },
335 { TARGET_O_APPEND, TARGET_O_APPEND, O_APPEND, O_APPEND, },
336 { TARGET_O_NONBLOCK, TARGET_O_NONBLOCK, O_NONBLOCK, O_NONBLOCK, },
337 { TARGET_O_SYNC, TARGET_O_DSYNC, O_SYNC, O_DSYNC, },
338 { TARGET_O_SYNC, TARGET_O_SYNC, O_SYNC, O_SYNC, },
339 { TARGET_FASYNC, TARGET_FASYNC, FASYNC, FASYNC, },
340 { TARGET_O_DIRECTORY, TARGET_O_DIRECTORY, O_DIRECTORY, O_DIRECTORY, },
341 { TARGET_O_NOFOLLOW, TARGET_O_NOFOLLOW, O_NOFOLLOW, O_NOFOLLOW, },
342 #if defined(O_DIRECT)
343 { TARGET_O_DIRECT, TARGET_O_DIRECT, O_DIRECT, O_DIRECT, },
344 #endif
345 #if defined(O_NOATIME)
346 { TARGET_O_NOATIME, TARGET_O_NOATIME, O_NOATIME, O_NOATIME },
347 #endif
348 #if defined(O_CLOEXEC)
349 { TARGET_O_CLOEXEC, TARGET_O_CLOEXEC, O_CLOEXEC, O_CLOEXEC },
350 #endif
351 #if defined(O_PATH)
352 { TARGET_O_PATH, TARGET_O_PATH, O_PATH, O_PATH },
353 #endif
354 #if defined(O_TMPFILE)
355 { TARGET_O_TMPFILE, TARGET_O_TMPFILE, O_TMPFILE, O_TMPFILE },
356 #endif
357 /* Don't terminate the list prematurely on 64-bit host+guest. */
358 #if TARGET_O_LARGEFILE != 0 || O_LARGEFILE != 0
359 { TARGET_O_LARGEFILE, TARGET_O_LARGEFILE, O_LARGEFILE, O_LARGEFILE, },
360 #endif
361 { 0, 0, 0, 0 }
364 enum {
365 QEMU_IFLA_BR_UNSPEC,
366 QEMU_IFLA_BR_FORWARD_DELAY,
367 QEMU_IFLA_BR_HELLO_TIME,
368 QEMU_IFLA_BR_MAX_AGE,
369 QEMU_IFLA_BR_AGEING_TIME,
370 QEMU_IFLA_BR_STP_STATE,
371 QEMU_IFLA_BR_PRIORITY,
372 QEMU_IFLA_BR_VLAN_FILTERING,
373 QEMU_IFLA_BR_VLAN_PROTOCOL,
374 QEMU_IFLA_BR_GROUP_FWD_MASK,
375 QEMU_IFLA_BR_ROOT_ID,
376 QEMU_IFLA_BR_BRIDGE_ID,
377 QEMU_IFLA_BR_ROOT_PORT,
378 QEMU_IFLA_BR_ROOT_PATH_COST,
379 QEMU_IFLA_BR_TOPOLOGY_CHANGE,
380 QEMU_IFLA_BR_TOPOLOGY_CHANGE_DETECTED,
381 QEMU_IFLA_BR_HELLO_TIMER,
382 QEMU_IFLA_BR_TCN_TIMER,
383 QEMU_IFLA_BR_TOPOLOGY_CHANGE_TIMER,
384 QEMU_IFLA_BR_GC_TIMER,
385 QEMU_IFLA_BR_GROUP_ADDR,
386 QEMU_IFLA_BR_FDB_FLUSH,
387 QEMU_IFLA_BR_MCAST_ROUTER,
388 QEMU_IFLA_BR_MCAST_SNOOPING,
389 QEMU_IFLA_BR_MCAST_QUERY_USE_IFADDR,
390 QEMU_IFLA_BR_MCAST_QUERIER,
391 QEMU_IFLA_BR_MCAST_HASH_ELASTICITY,
392 QEMU_IFLA_BR_MCAST_HASH_MAX,
393 QEMU_IFLA_BR_MCAST_LAST_MEMBER_CNT,
394 QEMU_IFLA_BR_MCAST_STARTUP_QUERY_CNT,
395 QEMU_IFLA_BR_MCAST_LAST_MEMBER_INTVL,
396 QEMU_IFLA_BR_MCAST_MEMBERSHIP_INTVL,
397 QEMU_IFLA_BR_MCAST_QUERIER_INTVL,
398 QEMU_IFLA_BR_MCAST_QUERY_INTVL,
399 QEMU_IFLA_BR_MCAST_QUERY_RESPONSE_INTVL,
400 QEMU_IFLA_BR_MCAST_STARTUP_QUERY_INTVL,
401 QEMU_IFLA_BR_NF_CALL_IPTABLES,
402 QEMU_IFLA_BR_NF_CALL_IP6TABLES,
403 QEMU_IFLA_BR_NF_CALL_ARPTABLES,
404 QEMU_IFLA_BR_VLAN_DEFAULT_PVID,
405 QEMU_IFLA_BR_PAD,
406 QEMU_IFLA_BR_VLAN_STATS_ENABLED,
407 QEMU_IFLA_BR_MCAST_STATS_ENABLED,
408 QEMU_IFLA_BR_MCAST_IGMP_VERSION,
409 QEMU_IFLA_BR_MCAST_MLD_VERSION,
410 QEMU___IFLA_BR_MAX,
413 enum {
414 QEMU_IFLA_UNSPEC,
415 QEMU_IFLA_ADDRESS,
416 QEMU_IFLA_BROADCAST,
417 QEMU_IFLA_IFNAME,
418 QEMU_IFLA_MTU,
419 QEMU_IFLA_LINK,
420 QEMU_IFLA_QDISC,
421 QEMU_IFLA_STATS,
422 QEMU_IFLA_COST,
423 QEMU_IFLA_PRIORITY,
424 QEMU_IFLA_MASTER,
425 QEMU_IFLA_WIRELESS,
426 QEMU_IFLA_PROTINFO,
427 QEMU_IFLA_TXQLEN,
428 QEMU_IFLA_MAP,
429 QEMU_IFLA_WEIGHT,
430 QEMU_IFLA_OPERSTATE,
431 QEMU_IFLA_LINKMODE,
432 QEMU_IFLA_LINKINFO,
433 QEMU_IFLA_NET_NS_PID,
434 QEMU_IFLA_IFALIAS,
435 QEMU_IFLA_NUM_VF,
436 QEMU_IFLA_VFINFO_LIST,
437 QEMU_IFLA_STATS64,
438 QEMU_IFLA_VF_PORTS,
439 QEMU_IFLA_PORT_SELF,
440 QEMU_IFLA_AF_SPEC,
441 QEMU_IFLA_GROUP,
442 QEMU_IFLA_NET_NS_FD,
443 QEMU_IFLA_EXT_MASK,
444 QEMU_IFLA_PROMISCUITY,
445 QEMU_IFLA_NUM_TX_QUEUES,
446 QEMU_IFLA_NUM_RX_QUEUES,
447 QEMU_IFLA_CARRIER,
448 QEMU_IFLA_PHYS_PORT_ID,
449 QEMU_IFLA_CARRIER_CHANGES,
450 QEMU_IFLA_PHYS_SWITCH_ID,
451 QEMU_IFLA_LINK_NETNSID,
452 QEMU_IFLA_PHYS_PORT_NAME,
453 QEMU_IFLA_PROTO_DOWN,
454 QEMU_IFLA_GSO_MAX_SEGS,
455 QEMU_IFLA_GSO_MAX_SIZE,
456 QEMU_IFLA_PAD,
457 QEMU_IFLA_XDP,
458 QEMU_IFLA_EVENT,
459 QEMU_IFLA_NEW_NETNSID,
460 QEMU_IFLA_IF_NETNSID,
461 QEMU_IFLA_CARRIER_UP_COUNT,
462 QEMU_IFLA_CARRIER_DOWN_COUNT,
463 QEMU_IFLA_NEW_IFINDEX,
464 QEMU___IFLA_MAX
467 enum {
468 QEMU_IFLA_BRPORT_UNSPEC,
469 QEMU_IFLA_BRPORT_STATE,
470 QEMU_IFLA_BRPORT_PRIORITY,
471 QEMU_IFLA_BRPORT_COST,
472 QEMU_IFLA_BRPORT_MODE,
473 QEMU_IFLA_BRPORT_GUARD,
474 QEMU_IFLA_BRPORT_PROTECT,
475 QEMU_IFLA_BRPORT_FAST_LEAVE,
476 QEMU_IFLA_BRPORT_LEARNING,
477 QEMU_IFLA_BRPORT_UNICAST_FLOOD,
478 QEMU_IFLA_BRPORT_PROXYARP,
479 QEMU_IFLA_BRPORT_LEARNING_SYNC,
480 QEMU_IFLA_BRPORT_PROXYARP_WIFI,
481 QEMU_IFLA_BRPORT_ROOT_ID,
482 QEMU_IFLA_BRPORT_BRIDGE_ID,
483 QEMU_IFLA_BRPORT_DESIGNATED_PORT,
484 QEMU_IFLA_BRPORT_DESIGNATED_COST,
485 QEMU_IFLA_BRPORT_ID,
486 QEMU_IFLA_BRPORT_NO,
487 QEMU_IFLA_BRPORT_TOPOLOGY_CHANGE_ACK,
488 QEMU_IFLA_BRPORT_CONFIG_PENDING,
489 QEMU_IFLA_BRPORT_MESSAGE_AGE_TIMER,
490 QEMU_IFLA_BRPORT_FORWARD_DELAY_TIMER,
491 QEMU_IFLA_BRPORT_HOLD_TIMER,
492 QEMU_IFLA_BRPORT_FLUSH,
493 QEMU_IFLA_BRPORT_MULTICAST_ROUTER,
494 QEMU_IFLA_BRPORT_PAD,
495 QEMU_IFLA_BRPORT_MCAST_FLOOD,
496 QEMU_IFLA_BRPORT_MCAST_TO_UCAST,
497 QEMU_IFLA_BRPORT_VLAN_TUNNEL,
498 QEMU_IFLA_BRPORT_BCAST_FLOOD,
499 QEMU_IFLA_BRPORT_GROUP_FWD_MASK,
500 QEMU_IFLA_BRPORT_NEIGH_SUPPRESS,
501 QEMU___IFLA_BRPORT_MAX
504 enum {
505 QEMU_IFLA_INFO_UNSPEC,
506 QEMU_IFLA_INFO_KIND,
507 QEMU_IFLA_INFO_DATA,
508 QEMU_IFLA_INFO_XSTATS,
509 QEMU_IFLA_INFO_SLAVE_KIND,
510 QEMU_IFLA_INFO_SLAVE_DATA,
511 QEMU___IFLA_INFO_MAX,
514 enum {
515 QEMU_IFLA_INET_UNSPEC,
516 QEMU_IFLA_INET_CONF,
517 QEMU___IFLA_INET_MAX,
520 enum {
521 QEMU_IFLA_INET6_UNSPEC,
522 QEMU_IFLA_INET6_FLAGS,
523 QEMU_IFLA_INET6_CONF,
524 QEMU_IFLA_INET6_STATS,
525 QEMU_IFLA_INET6_MCAST,
526 QEMU_IFLA_INET6_CACHEINFO,
527 QEMU_IFLA_INET6_ICMP6STATS,
528 QEMU_IFLA_INET6_TOKEN,
529 QEMU_IFLA_INET6_ADDR_GEN_MODE,
530 QEMU___IFLA_INET6_MAX
533 enum {
534 QEMU_IFLA_XDP_UNSPEC,
535 QEMU_IFLA_XDP_FD,
536 QEMU_IFLA_XDP_ATTACHED,
537 QEMU_IFLA_XDP_FLAGS,
538 QEMU_IFLA_XDP_PROG_ID,
539 QEMU___IFLA_XDP_MAX,
542 typedef abi_long (*TargetFdDataFunc)(void *, size_t);
543 typedef abi_long (*TargetFdAddrFunc)(void *, abi_ulong, socklen_t);
544 typedef struct TargetFdTrans {
545 TargetFdDataFunc host_to_target_data;
546 TargetFdDataFunc target_to_host_data;
547 TargetFdAddrFunc target_to_host_addr;
548 } TargetFdTrans;
550 static TargetFdTrans **target_fd_trans;
552 static unsigned int target_fd_max;
554 static TargetFdDataFunc fd_trans_target_to_host_data(int fd)
556 if (fd >= 0 && fd < target_fd_max && target_fd_trans[fd]) {
557 return target_fd_trans[fd]->target_to_host_data;
559 return NULL;
562 static TargetFdDataFunc fd_trans_host_to_target_data(int fd)
564 if (fd >= 0 && fd < target_fd_max && target_fd_trans[fd]) {
565 return target_fd_trans[fd]->host_to_target_data;
567 return NULL;
570 static TargetFdAddrFunc fd_trans_target_to_host_addr(int fd)
572 if (fd >= 0 && fd < target_fd_max && target_fd_trans[fd]) {
573 return target_fd_trans[fd]->target_to_host_addr;
575 return NULL;
578 static void fd_trans_register(int fd, TargetFdTrans *trans)
580 unsigned int oldmax;
582 if (fd >= target_fd_max) {
583 oldmax = target_fd_max;
584 target_fd_max = ((fd >> 6) + 1) << 6; /* by slice of 64 entries */
585 target_fd_trans = g_renew(TargetFdTrans *,
586 target_fd_trans, target_fd_max);
587 memset((void *)(target_fd_trans + oldmax), 0,
588 (target_fd_max - oldmax) * sizeof(TargetFdTrans *));
590 target_fd_trans[fd] = trans;
593 static void fd_trans_unregister(int fd)
595 if (fd >= 0 && fd < target_fd_max) {
596 target_fd_trans[fd] = NULL;
600 static void fd_trans_dup(int oldfd, int newfd)
602 fd_trans_unregister(newfd);
603 if (oldfd < target_fd_max && target_fd_trans[oldfd]) {
604 fd_trans_register(newfd, target_fd_trans[oldfd]);
608 static int sys_getcwd1(char *buf, size_t size)
610 if (getcwd(buf, size) == NULL) {
611 /* getcwd() sets errno */
612 return (-1);
614 return strlen(buf)+1;
617 #ifdef TARGET_NR_utimensat
618 #if defined(__NR_utimensat)
619 #define __NR_sys_utimensat __NR_utimensat
620 _syscall4(int,sys_utimensat,int,dirfd,const char *,pathname,
621 const struct timespec *,tsp,int,flags)
622 #else
623 static int sys_utimensat(int dirfd, const char *pathname,
624 const struct timespec times[2], int flags)
626 errno = ENOSYS;
627 return -1;
629 #endif
630 #endif /* TARGET_NR_utimensat */
632 #ifdef TARGET_NR_renameat2
633 #if defined(__NR_renameat2)
634 #define __NR_sys_renameat2 __NR_renameat2
635 _syscall5(int, sys_renameat2, int, oldfd, const char *, old, int, newfd,
636 const char *, new, unsigned int, flags)
637 #else
638 static int sys_renameat2(int oldfd, const char *old,
639 int newfd, const char *new, int flags)
641 if (flags == 0) {
642 return renameat(oldfd, old, newfd, new);
644 errno = ENOSYS;
645 return -1;
647 #endif
648 #endif /* TARGET_NR_renameat2 */
650 #ifdef CONFIG_INOTIFY
651 #include <sys/inotify.h>
653 #if defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init)
654 static int sys_inotify_init(void)
656 return (inotify_init());
658 #endif
659 #if defined(TARGET_NR_inotify_add_watch) && defined(__NR_inotify_add_watch)
660 static int sys_inotify_add_watch(int fd,const char *pathname, int32_t mask)
662 return (inotify_add_watch(fd, pathname, mask));
664 #endif
665 #if defined(TARGET_NR_inotify_rm_watch) && defined(__NR_inotify_rm_watch)
666 static int sys_inotify_rm_watch(int fd, int32_t wd)
668 return (inotify_rm_watch(fd, wd));
670 #endif
671 #ifdef CONFIG_INOTIFY1
672 #if defined(TARGET_NR_inotify_init1) && defined(__NR_inotify_init1)
673 static int sys_inotify_init1(int flags)
675 return (inotify_init1(flags));
677 #endif
678 #endif
679 #else
680 /* Userspace can usually survive runtime without inotify */
681 #undef TARGET_NR_inotify_init
682 #undef TARGET_NR_inotify_init1
683 #undef TARGET_NR_inotify_add_watch
684 #undef TARGET_NR_inotify_rm_watch
685 #endif /* CONFIG_INOTIFY */
687 #if defined(TARGET_NR_prlimit64)
688 #ifndef __NR_prlimit64
689 # define __NR_prlimit64 -1
690 #endif
691 #define __NR_sys_prlimit64 __NR_prlimit64
692 /* The glibc rlimit structure may not be that used by the underlying syscall */
693 struct host_rlimit64 {
694 uint64_t rlim_cur;
695 uint64_t rlim_max;
697 _syscall4(int, sys_prlimit64, pid_t, pid, int, resource,
698 const struct host_rlimit64 *, new_limit,
699 struct host_rlimit64 *, old_limit)
700 #endif
703 #if defined(TARGET_NR_timer_create)
704 /* Maxiumum of 32 active POSIX timers allowed at any one time. */
705 static timer_t g_posix_timers[32] = { 0, } ;
707 static inline int next_free_host_timer(void)
709 int k ;
710 /* FIXME: Does finding the next free slot require a lock? */
711 for (k = 0; k < ARRAY_SIZE(g_posix_timers); k++) {
712 if (g_posix_timers[k] == 0) {
713 g_posix_timers[k] = (timer_t) 1;
714 return k;
717 return -1;
719 #endif
721 /* ARM EABI and MIPS expect 64bit types aligned even on pairs or registers */
722 #ifdef TARGET_ARM
723 static inline int regpairs_aligned(void *cpu_env, int num)
725 return ((((CPUARMState *)cpu_env)->eabi) == 1) ;
727 #elif defined(TARGET_MIPS) && (TARGET_ABI_BITS == 32)
728 static inline int regpairs_aligned(void *cpu_env, int num) { return 1; }
729 #elif defined(TARGET_PPC) && !defined(TARGET_PPC64)
730 /* SysV AVI for PPC32 expects 64bit parameters to be passed on odd/even pairs
731 * of registers which translates to the same as ARM/MIPS, because we start with
732 * r3 as arg1 */
733 static inline int regpairs_aligned(void *cpu_env, int num) { return 1; }
734 #elif defined(TARGET_SH4)
735 /* SH4 doesn't align register pairs, except for p{read,write}64 */
736 static inline int regpairs_aligned(void *cpu_env, int num)
738 switch (num) {
739 case TARGET_NR_pread64:
740 case TARGET_NR_pwrite64:
741 return 1;
743 default:
744 return 0;
747 #elif defined(TARGET_XTENSA)
748 static inline int regpairs_aligned(void *cpu_env, int num) { return 1; }
749 #else
750 static inline int regpairs_aligned(void *cpu_env, int num) { return 0; }
751 #endif
753 #define ERRNO_TABLE_SIZE 1200
755 /* target_to_host_errno_table[] is initialized from
756 * host_to_target_errno_table[] in syscall_init(). */
757 static uint16_t target_to_host_errno_table[ERRNO_TABLE_SIZE] = {
761 * This list is the union of errno values overridden in asm-<arch>/errno.h
762 * minus the errnos that are not actually generic to all archs.
764 static uint16_t host_to_target_errno_table[ERRNO_TABLE_SIZE] = {
765 [EAGAIN] = TARGET_EAGAIN,
766 [EIDRM] = TARGET_EIDRM,
767 [ECHRNG] = TARGET_ECHRNG,
768 [EL2NSYNC] = TARGET_EL2NSYNC,
769 [EL3HLT] = TARGET_EL3HLT,
770 [EL3RST] = TARGET_EL3RST,
771 [ELNRNG] = TARGET_ELNRNG,
772 [EUNATCH] = TARGET_EUNATCH,
773 [ENOCSI] = TARGET_ENOCSI,
774 [EL2HLT] = TARGET_EL2HLT,
775 [EDEADLK] = TARGET_EDEADLK,
776 [ENOLCK] = TARGET_ENOLCK,
777 [EBADE] = TARGET_EBADE,
778 [EBADR] = TARGET_EBADR,
779 [EXFULL] = TARGET_EXFULL,
780 [ENOANO] = TARGET_ENOANO,
781 [EBADRQC] = TARGET_EBADRQC,
782 [EBADSLT] = TARGET_EBADSLT,
783 [EBFONT] = TARGET_EBFONT,
784 [ENOSTR] = TARGET_ENOSTR,
785 [ENODATA] = TARGET_ENODATA,
786 [ETIME] = TARGET_ETIME,
787 [ENOSR] = TARGET_ENOSR,
788 [ENONET] = TARGET_ENONET,
789 [ENOPKG] = TARGET_ENOPKG,
790 [EREMOTE] = TARGET_EREMOTE,
791 [ENOLINK] = TARGET_ENOLINK,
792 [EADV] = TARGET_EADV,
793 [ESRMNT] = TARGET_ESRMNT,
794 [ECOMM] = TARGET_ECOMM,
795 [EPROTO] = TARGET_EPROTO,
796 [EDOTDOT] = TARGET_EDOTDOT,
797 [EMULTIHOP] = TARGET_EMULTIHOP,
798 [EBADMSG] = TARGET_EBADMSG,
799 [ENAMETOOLONG] = TARGET_ENAMETOOLONG,
800 [EOVERFLOW] = TARGET_EOVERFLOW,
801 [ENOTUNIQ] = TARGET_ENOTUNIQ,
802 [EBADFD] = TARGET_EBADFD,
803 [EREMCHG] = TARGET_EREMCHG,
804 [ELIBACC] = TARGET_ELIBACC,
805 [ELIBBAD] = TARGET_ELIBBAD,
806 [ELIBSCN] = TARGET_ELIBSCN,
807 [ELIBMAX] = TARGET_ELIBMAX,
808 [ELIBEXEC] = TARGET_ELIBEXEC,
809 [EILSEQ] = TARGET_EILSEQ,
810 [ENOSYS] = TARGET_ENOSYS,
811 [ELOOP] = TARGET_ELOOP,
812 [ERESTART] = TARGET_ERESTART,
813 [ESTRPIPE] = TARGET_ESTRPIPE,
814 [ENOTEMPTY] = TARGET_ENOTEMPTY,
815 [EUSERS] = TARGET_EUSERS,
816 [ENOTSOCK] = TARGET_ENOTSOCK,
817 [EDESTADDRREQ] = TARGET_EDESTADDRREQ,
818 [EMSGSIZE] = TARGET_EMSGSIZE,
819 [EPROTOTYPE] = TARGET_EPROTOTYPE,
820 [ENOPROTOOPT] = TARGET_ENOPROTOOPT,
821 [EPROTONOSUPPORT] = TARGET_EPROTONOSUPPORT,
822 [ESOCKTNOSUPPORT] = TARGET_ESOCKTNOSUPPORT,
823 [EOPNOTSUPP] = TARGET_EOPNOTSUPP,
824 [EPFNOSUPPORT] = TARGET_EPFNOSUPPORT,
825 [EAFNOSUPPORT] = TARGET_EAFNOSUPPORT,
826 [EADDRINUSE] = TARGET_EADDRINUSE,
827 [EADDRNOTAVAIL] = TARGET_EADDRNOTAVAIL,
828 [ENETDOWN] = TARGET_ENETDOWN,
829 [ENETUNREACH] = TARGET_ENETUNREACH,
830 [ENETRESET] = TARGET_ENETRESET,
831 [ECONNABORTED] = TARGET_ECONNABORTED,
832 [ECONNRESET] = TARGET_ECONNRESET,
833 [ENOBUFS] = TARGET_ENOBUFS,
834 [EISCONN] = TARGET_EISCONN,
835 [ENOTCONN] = TARGET_ENOTCONN,
836 [EUCLEAN] = TARGET_EUCLEAN,
837 [ENOTNAM] = TARGET_ENOTNAM,
838 [ENAVAIL] = TARGET_ENAVAIL,
839 [EISNAM] = TARGET_EISNAM,
840 [EREMOTEIO] = TARGET_EREMOTEIO,
841 [EDQUOT] = TARGET_EDQUOT,
842 [ESHUTDOWN] = TARGET_ESHUTDOWN,
843 [ETOOMANYREFS] = TARGET_ETOOMANYREFS,
844 [ETIMEDOUT] = TARGET_ETIMEDOUT,
845 [ECONNREFUSED] = TARGET_ECONNREFUSED,
846 [EHOSTDOWN] = TARGET_EHOSTDOWN,
847 [EHOSTUNREACH] = TARGET_EHOSTUNREACH,
848 [EALREADY] = TARGET_EALREADY,
849 [EINPROGRESS] = TARGET_EINPROGRESS,
850 [ESTALE] = TARGET_ESTALE,
851 [ECANCELED] = TARGET_ECANCELED,
852 [ENOMEDIUM] = TARGET_ENOMEDIUM,
853 [EMEDIUMTYPE] = TARGET_EMEDIUMTYPE,
854 #ifdef ENOKEY
855 [ENOKEY] = TARGET_ENOKEY,
856 #endif
857 #ifdef EKEYEXPIRED
858 [EKEYEXPIRED] = TARGET_EKEYEXPIRED,
859 #endif
860 #ifdef EKEYREVOKED
861 [EKEYREVOKED] = TARGET_EKEYREVOKED,
862 #endif
863 #ifdef EKEYREJECTED
864 [EKEYREJECTED] = TARGET_EKEYREJECTED,
865 #endif
866 #ifdef EOWNERDEAD
867 [EOWNERDEAD] = TARGET_EOWNERDEAD,
868 #endif
869 #ifdef ENOTRECOVERABLE
870 [ENOTRECOVERABLE] = TARGET_ENOTRECOVERABLE,
871 #endif
872 #ifdef ENOMSG
873 [ENOMSG] = TARGET_ENOMSG,
874 #endif
875 #ifdef ERKFILL
876 [ERFKILL] = TARGET_ERFKILL,
877 #endif
878 #ifdef EHWPOISON
879 [EHWPOISON] = TARGET_EHWPOISON,
880 #endif
883 static inline int host_to_target_errno(int err)
885 if (err >= 0 && err < ERRNO_TABLE_SIZE &&
886 host_to_target_errno_table[err]) {
887 return host_to_target_errno_table[err];
889 return err;
892 static inline int target_to_host_errno(int err)
894 if (err >= 0 && err < ERRNO_TABLE_SIZE &&
895 target_to_host_errno_table[err]) {
896 return target_to_host_errno_table[err];
898 return err;
901 static inline abi_long get_errno(abi_long ret)
903 if (ret == -1)
904 return -host_to_target_errno(errno);
905 else
906 return ret;
909 const char *target_strerror(int err)
911 if (err == TARGET_ERESTARTSYS) {
912 return "To be restarted";
914 if (err == TARGET_QEMU_ESIGRETURN) {
915 return "Successful exit from sigreturn";
918 if ((err >= ERRNO_TABLE_SIZE) || (err < 0)) {
919 return NULL;
921 return strerror(target_to_host_errno(err));
924 #define safe_syscall0(type, name) \
925 static type safe_##name(void) \
927 return safe_syscall(__NR_##name); \
930 #define safe_syscall1(type, name, type1, arg1) \
931 static type safe_##name(type1 arg1) \
933 return safe_syscall(__NR_##name, arg1); \
936 #define safe_syscall2(type, name, type1, arg1, type2, arg2) \
937 static type safe_##name(type1 arg1, type2 arg2) \
939 return safe_syscall(__NR_##name, arg1, arg2); \
942 #define safe_syscall3(type, name, type1, arg1, type2, arg2, type3, arg3) \
943 static type safe_##name(type1 arg1, type2 arg2, type3 arg3) \
945 return safe_syscall(__NR_##name, arg1, arg2, arg3); \
948 #define safe_syscall4(type, name, type1, arg1, type2, arg2, type3, arg3, \
949 type4, arg4) \
950 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4) \
952 return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4); \
955 #define safe_syscall5(type, name, type1, arg1, type2, arg2, type3, arg3, \
956 type4, arg4, type5, arg5) \
957 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, \
958 type5 arg5) \
960 return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5); \
963 #define safe_syscall6(type, name, type1, arg1, type2, arg2, type3, arg3, \
964 type4, arg4, type5, arg5, type6, arg6) \
965 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, \
966 type5 arg5, type6 arg6) \
968 return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6); \
971 safe_syscall3(ssize_t, read, int, fd, void *, buff, size_t, count)
972 safe_syscall3(ssize_t, write, int, fd, const void *, buff, size_t, count)
973 safe_syscall4(int, openat, int, dirfd, const char *, pathname, \
974 int, flags, mode_t, mode)
975 safe_syscall4(pid_t, wait4, pid_t, pid, int *, status, int, options, \
976 struct rusage *, rusage)
977 safe_syscall5(int, waitid, idtype_t, idtype, id_t, id, siginfo_t *, infop, \
978 int, options, struct rusage *, rusage)
979 safe_syscall3(int, execve, const char *, filename, char **, argv, char **, envp)
980 safe_syscall6(int, pselect6, int, nfds, fd_set *, readfds, fd_set *, writefds, \
981 fd_set *, exceptfds, struct timespec *, timeout, void *, sig)
982 safe_syscall5(int, ppoll, struct pollfd *, ufds, unsigned int, nfds,
983 struct timespec *, tsp, const sigset_t *, sigmask,
984 size_t, sigsetsize)
985 safe_syscall6(int, epoll_pwait, int, epfd, struct epoll_event *, events,
986 int, maxevents, int, timeout, const sigset_t *, sigmask,
987 size_t, sigsetsize)
988 safe_syscall6(int,futex,int *,uaddr,int,op,int,val, \
989 const struct timespec *,timeout,int *,uaddr2,int,val3)
990 safe_syscall2(int, rt_sigsuspend, sigset_t *, newset, size_t, sigsetsize)
991 safe_syscall2(int, kill, pid_t, pid, int, sig)
992 safe_syscall2(int, tkill, int, tid, int, sig)
993 safe_syscall3(int, tgkill, int, tgid, int, pid, int, sig)
994 safe_syscall3(ssize_t, readv, int, fd, const struct iovec *, iov, int, iovcnt)
995 safe_syscall3(ssize_t, writev, int, fd, const struct iovec *, iov, int, iovcnt)
996 safe_syscall5(ssize_t, preadv, int, fd, const struct iovec *, iov, int, iovcnt,
997 unsigned long, pos_l, unsigned long, pos_h)
998 safe_syscall5(ssize_t, pwritev, int, fd, const struct iovec *, iov, int, iovcnt,
999 unsigned long, pos_l, unsigned long, pos_h)
1000 safe_syscall3(int, connect, int, fd, const struct sockaddr *, addr,
1001 socklen_t, addrlen)
1002 safe_syscall6(ssize_t, sendto, int, fd, const void *, buf, size_t, len,
1003 int, flags, const struct sockaddr *, addr, socklen_t, addrlen)
1004 safe_syscall6(ssize_t, recvfrom, int, fd, void *, buf, size_t, len,
1005 int, flags, struct sockaddr *, addr, socklen_t *, addrlen)
1006 safe_syscall3(ssize_t, sendmsg, int, fd, const struct msghdr *, msg, int, flags)
1007 safe_syscall3(ssize_t, recvmsg, int, fd, struct msghdr *, msg, int, flags)
1008 safe_syscall2(int, flock, int, fd, int, operation)
1009 safe_syscall4(int, rt_sigtimedwait, const sigset_t *, these, siginfo_t *, uinfo,
1010 const struct timespec *, uts, size_t, sigsetsize)
1011 safe_syscall4(int, accept4, int, fd, struct sockaddr *, addr, socklen_t *, len,
1012 int, flags)
1013 safe_syscall2(int, nanosleep, const struct timespec *, req,
1014 struct timespec *, rem)
1015 #ifdef TARGET_NR_clock_nanosleep
1016 safe_syscall4(int, clock_nanosleep, const clockid_t, clock, int, flags,
1017 const struct timespec *, req, struct timespec *, rem)
1018 #endif
1019 #ifdef __NR_msgsnd
1020 safe_syscall4(int, msgsnd, int, msgid, const void *, msgp, size_t, sz,
1021 int, flags)
1022 safe_syscall5(int, msgrcv, int, msgid, void *, msgp, size_t, sz,
1023 long, msgtype, int, flags)
1024 safe_syscall4(int, semtimedop, int, semid, struct sembuf *, tsops,
1025 unsigned, nsops, const struct timespec *, timeout)
1026 #else
1027 /* This host kernel architecture uses a single ipc syscall; fake up
1028 * wrappers for the sub-operations to hide this implementation detail.
1029 * Annoyingly we can't include linux/ipc.h to get the constant definitions
1030 * for the call parameter because some structs in there conflict with the
1031 * sys/ipc.h ones. So we just define them here, and rely on them being
1032 * the same for all host architectures.
1034 #define Q_SEMTIMEDOP 4
1035 #define Q_MSGSND 11
1036 #define Q_MSGRCV 12
1037 #define Q_IPCCALL(VERSION, OP) ((VERSION) << 16 | (OP))
1039 safe_syscall6(int, ipc, int, call, long, first, long, second, long, third,
1040 void *, ptr, long, fifth)
1041 static int safe_msgsnd(int msgid, const void *msgp, size_t sz, int flags)
1043 return safe_ipc(Q_IPCCALL(0, Q_MSGSND), msgid, sz, flags, (void *)msgp, 0);
1045 static int safe_msgrcv(int msgid, void *msgp, size_t sz, long type, int flags)
1047 return safe_ipc(Q_IPCCALL(1, Q_MSGRCV), msgid, sz, flags, msgp, type);
1049 static int safe_semtimedop(int semid, struct sembuf *tsops, unsigned nsops,
1050 const struct timespec *timeout)
1052 return safe_ipc(Q_IPCCALL(0, Q_SEMTIMEDOP), semid, nsops, 0, tsops,
1053 (long)timeout);
1055 #endif
1056 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
1057 safe_syscall5(int, mq_timedsend, int, mqdes, const char *, msg_ptr,
1058 size_t, len, unsigned, prio, const struct timespec *, timeout)
1059 safe_syscall5(int, mq_timedreceive, int, mqdes, char *, msg_ptr,
1060 size_t, len, unsigned *, prio, const struct timespec *, timeout)
1061 #endif
1062 /* We do ioctl like this rather than via safe_syscall3 to preserve the
1063 * "third argument might be integer or pointer or not present" behaviour of
1064 * the libc function.
1066 #define safe_ioctl(...) safe_syscall(__NR_ioctl, __VA_ARGS__)
1067 /* Similarly for fcntl. Note that callers must always:
1068 * pass the F_GETLK64 etc constants rather than the unsuffixed F_GETLK
1069 * use the flock64 struct rather than unsuffixed flock
1070 * This will then work and use a 64-bit offset for both 32-bit and 64-bit hosts.
1072 #ifdef __NR_fcntl64
1073 #define safe_fcntl(...) safe_syscall(__NR_fcntl64, __VA_ARGS__)
1074 #else
1075 #define safe_fcntl(...) safe_syscall(__NR_fcntl, __VA_ARGS__)
1076 #endif
1078 static inline int host_to_target_sock_type(int host_type)
1080 int target_type;
1082 switch (host_type & 0xf /* SOCK_TYPE_MASK */) {
1083 case SOCK_DGRAM:
1084 target_type = TARGET_SOCK_DGRAM;
1085 break;
1086 case SOCK_STREAM:
1087 target_type = TARGET_SOCK_STREAM;
1088 break;
1089 default:
1090 target_type = host_type & 0xf /* SOCK_TYPE_MASK */;
1091 break;
1094 #if defined(SOCK_CLOEXEC)
1095 if (host_type & SOCK_CLOEXEC) {
1096 target_type |= TARGET_SOCK_CLOEXEC;
1098 #endif
1100 #if defined(SOCK_NONBLOCK)
1101 if (host_type & SOCK_NONBLOCK) {
1102 target_type |= TARGET_SOCK_NONBLOCK;
1104 #endif
1106 return target_type;
1109 static abi_ulong target_brk;
1110 static abi_ulong target_original_brk;
1111 static abi_ulong brk_page;
1113 void target_set_brk(abi_ulong new_brk)
1115 target_original_brk = target_brk = HOST_PAGE_ALIGN(new_brk);
1116 brk_page = HOST_PAGE_ALIGN(target_brk);
1119 //#define DEBUGF_BRK(message, args...) do { fprintf(stderr, (message), ## args); } while (0)
1120 #define DEBUGF_BRK(message, args...)
1122 /* do_brk() must return target values and target errnos. */
1123 abi_long do_brk(abi_ulong new_brk)
1125 abi_long mapped_addr;
1126 abi_ulong new_alloc_size;
1128 DEBUGF_BRK("do_brk(" TARGET_ABI_FMT_lx ") -> ", new_brk);
1130 if (!new_brk) {
1131 DEBUGF_BRK(TARGET_ABI_FMT_lx " (!new_brk)\n", target_brk);
1132 return target_brk;
1134 if (new_brk < target_original_brk) {
1135 DEBUGF_BRK(TARGET_ABI_FMT_lx " (new_brk < target_original_brk)\n",
1136 target_brk);
1137 return target_brk;
1140 /* If the new brk is less than the highest page reserved to the
1141 * target heap allocation, set it and we're almost done... */
1142 if (new_brk <= brk_page) {
1143 /* Heap contents are initialized to zero, as for anonymous
1144 * mapped pages. */
1145 if (new_brk > target_brk) {
1146 memset(g2h(target_brk), 0, new_brk - target_brk);
1148 target_brk = new_brk;
1149 DEBUGF_BRK(TARGET_ABI_FMT_lx " (new_brk <= brk_page)\n", target_brk);
1150 return target_brk;
1153 /* We need to allocate more memory after the brk... Note that
1154 * we don't use MAP_FIXED because that will map over the top of
1155 * any existing mapping (like the one with the host libc or qemu
1156 * itself); instead we treat "mapped but at wrong address" as
1157 * a failure and unmap again.
1159 new_alloc_size = HOST_PAGE_ALIGN(new_brk - brk_page);
1160 mapped_addr = get_errno(target_mmap(brk_page, new_alloc_size,
1161 PROT_READ|PROT_WRITE,
1162 MAP_ANON|MAP_PRIVATE, 0, 0));
1164 if (mapped_addr == brk_page) {
1165 /* Heap contents are initialized to zero, as for anonymous
1166 * mapped pages. Technically the new pages are already
1167 * initialized to zero since they *are* anonymous mapped
1168 * pages, however we have to take care with the contents that
1169 * come from the remaining part of the previous page: it may
1170 * contains garbage data due to a previous heap usage (grown
1171 * then shrunken). */
1172 memset(g2h(target_brk), 0, brk_page - target_brk);
1174 target_brk = new_brk;
1175 brk_page = HOST_PAGE_ALIGN(target_brk);
1176 DEBUGF_BRK(TARGET_ABI_FMT_lx " (mapped_addr == brk_page)\n",
1177 target_brk);
1178 return target_brk;
1179 } else if (mapped_addr != -1) {
1180 /* Mapped but at wrong address, meaning there wasn't actually
1181 * enough space for this brk.
1183 target_munmap(mapped_addr, new_alloc_size);
1184 mapped_addr = -1;
1185 DEBUGF_BRK(TARGET_ABI_FMT_lx " (mapped_addr != -1)\n", target_brk);
1187 else {
1188 DEBUGF_BRK(TARGET_ABI_FMT_lx " (otherwise)\n", target_brk);
1191 #if defined(TARGET_ALPHA)
1192 /* We (partially) emulate OSF/1 on Alpha, which requires we
1193 return a proper errno, not an unchanged brk value. */
1194 return -TARGET_ENOMEM;
1195 #endif
1196 /* For everything else, return the previous break. */
1197 return target_brk;
1200 static inline abi_long copy_from_user_fdset(fd_set *fds,
1201 abi_ulong target_fds_addr,
1202 int n)
1204 int i, nw, j, k;
1205 abi_ulong b, *target_fds;
1207 nw = DIV_ROUND_UP(n, TARGET_ABI_BITS);
1208 if (!(target_fds = lock_user(VERIFY_READ,
1209 target_fds_addr,
1210 sizeof(abi_ulong) * nw,
1211 1)))
1212 return -TARGET_EFAULT;
1214 FD_ZERO(fds);
1215 k = 0;
1216 for (i = 0; i < nw; i++) {
1217 /* grab the abi_ulong */
1218 __get_user(b, &target_fds[i]);
1219 for (j = 0; j < TARGET_ABI_BITS; j++) {
1220 /* check the bit inside the abi_ulong */
1221 if ((b >> j) & 1)
1222 FD_SET(k, fds);
1223 k++;
1227 unlock_user(target_fds, target_fds_addr, 0);
1229 return 0;
1232 static inline abi_ulong copy_from_user_fdset_ptr(fd_set *fds, fd_set **fds_ptr,
1233 abi_ulong target_fds_addr,
1234 int n)
1236 if (target_fds_addr) {
1237 if (copy_from_user_fdset(fds, target_fds_addr, n))
1238 return -TARGET_EFAULT;
1239 *fds_ptr = fds;
1240 } else {
1241 *fds_ptr = NULL;
1243 return 0;
1246 static inline abi_long copy_to_user_fdset(abi_ulong target_fds_addr,
1247 const fd_set *fds,
1248 int n)
1250 int i, nw, j, k;
1251 abi_long v;
1252 abi_ulong *target_fds;
1254 nw = DIV_ROUND_UP(n, TARGET_ABI_BITS);
1255 if (!(target_fds = lock_user(VERIFY_WRITE,
1256 target_fds_addr,
1257 sizeof(abi_ulong) * nw,
1258 0)))
1259 return -TARGET_EFAULT;
1261 k = 0;
1262 for (i = 0; i < nw; i++) {
1263 v = 0;
1264 for (j = 0; j < TARGET_ABI_BITS; j++) {
1265 v |= ((abi_ulong)(FD_ISSET(k, fds) != 0) << j);
1266 k++;
1268 __put_user(v, &target_fds[i]);
1271 unlock_user(target_fds, target_fds_addr, sizeof(abi_ulong) * nw);
1273 return 0;
1276 #if defined(__alpha__)
1277 #define HOST_HZ 1024
1278 #else
1279 #define HOST_HZ 100
1280 #endif
1282 static inline abi_long host_to_target_clock_t(long ticks)
1284 #if HOST_HZ == TARGET_HZ
1285 return ticks;
1286 #else
1287 return ((int64_t)ticks * TARGET_HZ) / HOST_HZ;
1288 #endif
1291 static inline abi_long host_to_target_rusage(abi_ulong target_addr,
1292 const struct rusage *rusage)
1294 struct target_rusage *target_rusage;
1296 if (!lock_user_struct(VERIFY_WRITE, target_rusage, target_addr, 0))
1297 return -TARGET_EFAULT;
1298 target_rusage->ru_utime.tv_sec = tswapal(rusage->ru_utime.tv_sec);
1299 target_rusage->ru_utime.tv_usec = tswapal(rusage->ru_utime.tv_usec);
1300 target_rusage->ru_stime.tv_sec = tswapal(rusage->ru_stime.tv_sec);
1301 target_rusage->ru_stime.tv_usec = tswapal(rusage->ru_stime.tv_usec);
1302 target_rusage->ru_maxrss = tswapal(rusage->ru_maxrss);
1303 target_rusage->ru_ixrss = tswapal(rusage->ru_ixrss);
1304 target_rusage->ru_idrss = tswapal(rusage->ru_idrss);
1305 target_rusage->ru_isrss = tswapal(rusage->ru_isrss);
1306 target_rusage->ru_minflt = tswapal(rusage->ru_minflt);
1307 target_rusage->ru_majflt = tswapal(rusage->ru_majflt);
1308 target_rusage->ru_nswap = tswapal(rusage->ru_nswap);
1309 target_rusage->ru_inblock = tswapal(rusage->ru_inblock);
1310 target_rusage->ru_oublock = tswapal(rusage->ru_oublock);
1311 target_rusage->ru_msgsnd = tswapal(rusage->ru_msgsnd);
1312 target_rusage->ru_msgrcv = tswapal(rusage->ru_msgrcv);
1313 target_rusage->ru_nsignals = tswapal(rusage->ru_nsignals);
1314 target_rusage->ru_nvcsw = tswapal(rusage->ru_nvcsw);
1315 target_rusage->ru_nivcsw = tswapal(rusage->ru_nivcsw);
1316 unlock_user_struct(target_rusage, target_addr, 1);
1318 return 0;
1321 static inline rlim_t target_to_host_rlim(abi_ulong target_rlim)
1323 abi_ulong target_rlim_swap;
1324 rlim_t result;
1326 target_rlim_swap = tswapal(target_rlim);
1327 if (target_rlim_swap == TARGET_RLIM_INFINITY)
1328 return RLIM_INFINITY;
1330 result = target_rlim_swap;
1331 if (target_rlim_swap != (rlim_t)result)
1332 return RLIM_INFINITY;
1334 return result;
1337 static inline abi_ulong host_to_target_rlim(rlim_t rlim)
1339 abi_ulong target_rlim_swap;
1340 abi_ulong result;
1342 if (rlim == RLIM_INFINITY || rlim != (abi_long)rlim)
1343 target_rlim_swap = TARGET_RLIM_INFINITY;
1344 else
1345 target_rlim_swap = rlim;
1346 result = tswapal(target_rlim_swap);
1348 return result;
1351 static inline int target_to_host_resource(int code)
1353 switch (code) {
1354 case TARGET_RLIMIT_AS:
1355 return RLIMIT_AS;
1356 case TARGET_RLIMIT_CORE:
1357 return RLIMIT_CORE;
1358 case TARGET_RLIMIT_CPU:
1359 return RLIMIT_CPU;
1360 case TARGET_RLIMIT_DATA:
1361 return RLIMIT_DATA;
1362 case TARGET_RLIMIT_FSIZE:
1363 return RLIMIT_FSIZE;
1364 case TARGET_RLIMIT_LOCKS:
1365 return RLIMIT_LOCKS;
1366 case TARGET_RLIMIT_MEMLOCK:
1367 return RLIMIT_MEMLOCK;
1368 case TARGET_RLIMIT_MSGQUEUE:
1369 return RLIMIT_MSGQUEUE;
1370 case TARGET_RLIMIT_NICE:
1371 return RLIMIT_NICE;
1372 case TARGET_RLIMIT_NOFILE:
1373 return RLIMIT_NOFILE;
1374 case TARGET_RLIMIT_NPROC:
1375 return RLIMIT_NPROC;
1376 case TARGET_RLIMIT_RSS:
1377 return RLIMIT_RSS;
1378 case TARGET_RLIMIT_RTPRIO:
1379 return RLIMIT_RTPRIO;
1380 case TARGET_RLIMIT_SIGPENDING:
1381 return RLIMIT_SIGPENDING;
1382 case TARGET_RLIMIT_STACK:
1383 return RLIMIT_STACK;
1384 default:
1385 return code;
1389 static inline abi_long copy_from_user_timeval(struct timeval *tv,
1390 abi_ulong target_tv_addr)
1392 struct target_timeval *target_tv;
1394 if (!lock_user_struct(VERIFY_READ, target_tv, target_tv_addr, 1))
1395 return -TARGET_EFAULT;
1397 __get_user(tv->tv_sec, &target_tv->tv_sec);
1398 __get_user(tv->tv_usec, &target_tv->tv_usec);
1400 unlock_user_struct(target_tv, target_tv_addr, 0);
1402 return 0;
1405 static inline abi_long copy_to_user_timeval(abi_ulong target_tv_addr,
1406 const struct timeval *tv)
1408 struct target_timeval *target_tv;
1410 if (!lock_user_struct(VERIFY_WRITE, target_tv, target_tv_addr, 0))
1411 return -TARGET_EFAULT;
1413 __put_user(tv->tv_sec, &target_tv->tv_sec);
1414 __put_user(tv->tv_usec, &target_tv->tv_usec);
1416 unlock_user_struct(target_tv, target_tv_addr, 1);
1418 return 0;
1421 static inline abi_long copy_from_user_timezone(struct timezone *tz,
1422 abi_ulong target_tz_addr)
1424 struct target_timezone *target_tz;
1426 if (!lock_user_struct(VERIFY_READ, target_tz, target_tz_addr, 1)) {
1427 return -TARGET_EFAULT;
1430 __get_user(tz->tz_minuteswest, &target_tz->tz_minuteswest);
1431 __get_user(tz->tz_dsttime, &target_tz->tz_dsttime);
1433 unlock_user_struct(target_tz, target_tz_addr, 0);
1435 return 0;
1438 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
1439 #include <mqueue.h>
1441 static inline abi_long copy_from_user_mq_attr(struct mq_attr *attr,
1442 abi_ulong target_mq_attr_addr)
1444 struct target_mq_attr *target_mq_attr;
1446 if (!lock_user_struct(VERIFY_READ, target_mq_attr,
1447 target_mq_attr_addr, 1))
1448 return -TARGET_EFAULT;
1450 __get_user(attr->mq_flags, &target_mq_attr->mq_flags);
1451 __get_user(attr->mq_maxmsg, &target_mq_attr->mq_maxmsg);
1452 __get_user(attr->mq_msgsize, &target_mq_attr->mq_msgsize);
1453 __get_user(attr->mq_curmsgs, &target_mq_attr->mq_curmsgs);
1455 unlock_user_struct(target_mq_attr, target_mq_attr_addr, 0);
1457 return 0;
1460 static inline abi_long copy_to_user_mq_attr(abi_ulong target_mq_attr_addr,
1461 const struct mq_attr *attr)
1463 struct target_mq_attr *target_mq_attr;
1465 if (!lock_user_struct(VERIFY_WRITE, target_mq_attr,
1466 target_mq_attr_addr, 0))
1467 return -TARGET_EFAULT;
1469 __put_user(attr->mq_flags, &target_mq_attr->mq_flags);
1470 __put_user(attr->mq_maxmsg, &target_mq_attr->mq_maxmsg);
1471 __put_user(attr->mq_msgsize, &target_mq_attr->mq_msgsize);
1472 __put_user(attr->mq_curmsgs, &target_mq_attr->mq_curmsgs);
1474 unlock_user_struct(target_mq_attr, target_mq_attr_addr, 1);
1476 return 0;
1478 #endif
1480 #if defined(TARGET_NR_select) || defined(TARGET_NR__newselect)
1481 /* do_select() must return target values and target errnos. */
1482 static abi_long do_select(int n,
1483 abi_ulong rfd_addr, abi_ulong wfd_addr,
1484 abi_ulong efd_addr, abi_ulong target_tv_addr)
1486 fd_set rfds, wfds, efds;
1487 fd_set *rfds_ptr, *wfds_ptr, *efds_ptr;
1488 struct timeval tv;
1489 struct timespec ts, *ts_ptr;
1490 abi_long ret;
1492 ret = copy_from_user_fdset_ptr(&rfds, &rfds_ptr, rfd_addr, n);
1493 if (ret) {
1494 return ret;
1496 ret = copy_from_user_fdset_ptr(&wfds, &wfds_ptr, wfd_addr, n);
1497 if (ret) {
1498 return ret;
1500 ret = copy_from_user_fdset_ptr(&efds, &efds_ptr, efd_addr, n);
1501 if (ret) {
1502 return ret;
1505 if (target_tv_addr) {
1506 if (copy_from_user_timeval(&tv, target_tv_addr))
1507 return -TARGET_EFAULT;
1508 ts.tv_sec = tv.tv_sec;
1509 ts.tv_nsec = tv.tv_usec * 1000;
1510 ts_ptr = &ts;
1511 } else {
1512 ts_ptr = NULL;
1515 ret = get_errno(safe_pselect6(n, rfds_ptr, wfds_ptr, efds_ptr,
1516 ts_ptr, NULL));
1518 if (!is_error(ret)) {
1519 if (rfd_addr && copy_to_user_fdset(rfd_addr, &rfds, n))
1520 return -TARGET_EFAULT;
1521 if (wfd_addr && copy_to_user_fdset(wfd_addr, &wfds, n))
1522 return -TARGET_EFAULT;
1523 if (efd_addr && copy_to_user_fdset(efd_addr, &efds, n))
1524 return -TARGET_EFAULT;
1526 if (target_tv_addr) {
1527 tv.tv_sec = ts.tv_sec;
1528 tv.tv_usec = ts.tv_nsec / 1000;
1529 if (copy_to_user_timeval(target_tv_addr, &tv)) {
1530 return -TARGET_EFAULT;
1535 return ret;
1538 #if defined(TARGET_WANT_OLD_SYS_SELECT)
1539 static abi_long do_old_select(abi_ulong arg1)
1541 struct target_sel_arg_struct *sel;
1542 abi_ulong inp, outp, exp, tvp;
1543 long nsel;
1545 if (!lock_user_struct(VERIFY_READ, sel, arg1, 1)) {
1546 return -TARGET_EFAULT;
1549 nsel = tswapal(sel->n);
1550 inp = tswapal(sel->inp);
1551 outp = tswapal(sel->outp);
1552 exp = tswapal(sel->exp);
1553 tvp = tswapal(sel->tvp);
1555 unlock_user_struct(sel, arg1, 0);
1557 return do_select(nsel, inp, outp, exp, tvp);
1559 #endif
1560 #endif
1562 static abi_long do_pipe2(int host_pipe[], int flags)
1564 #ifdef CONFIG_PIPE2
1565 return pipe2(host_pipe, flags);
1566 #else
1567 return -ENOSYS;
1568 #endif
1571 static abi_long do_pipe(void *cpu_env, abi_ulong pipedes,
1572 int flags, int is_pipe2)
1574 int host_pipe[2];
1575 abi_long ret;
1576 ret = flags ? do_pipe2(host_pipe, flags) : pipe(host_pipe);
1578 if (is_error(ret))
1579 return get_errno(ret);
1581 /* Several targets have special calling conventions for the original
1582 pipe syscall, but didn't replicate this into the pipe2 syscall. */
1583 if (!is_pipe2) {
1584 #if defined(TARGET_ALPHA)
1585 ((CPUAlphaState *)cpu_env)->ir[IR_A4] = host_pipe[1];
1586 return host_pipe[0];
1587 #elif defined(TARGET_MIPS)
1588 ((CPUMIPSState*)cpu_env)->active_tc.gpr[3] = host_pipe[1];
1589 return host_pipe[0];
1590 #elif defined(TARGET_SH4)
1591 ((CPUSH4State*)cpu_env)->gregs[1] = host_pipe[1];
1592 return host_pipe[0];
1593 #elif defined(TARGET_SPARC)
1594 ((CPUSPARCState*)cpu_env)->regwptr[1] = host_pipe[1];
1595 return host_pipe[0];
1596 #endif
1599 if (put_user_s32(host_pipe[0], pipedes)
1600 || put_user_s32(host_pipe[1], pipedes + sizeof(host_pipe[0])))
1601 return -TARGET_EFAULT;
1602 return get_errno(ret);
1605 static inline abi_long target_to_host_ip_mreq(struct ip_mreqn *mreqn,
1606 abi_ulong target_addr,
1607 socklen_t len)
1609 struct target_ip_mreqn *target_smreqn;
1611 target_smreqn = lock_user(VERIFY_READ, target_addr, len, 1);
1612 if (!target_smreqn)
1613 return -TARGET_EFAULT;
1614 mreqn->imr_multiaddr.s_addr = target_smreqn->imr_multiaddr.s_addr;
1615 mreqn->imr_address.s_addr = target_smreqn->imr_address.s_addr;
1616 if (len == sizeof(struct target_ip_mreqn))
1617 mreqn->imr_ifindex = tswapal(target_smreqn->imr_ifindex);
1618 unlock_user(target_smreqn, target_addr, 0);
1620 return 0;
1623 static inline abi_long target_to_host_sockaddr(int fd, struct sockaddr *addr,
1624 abi_ulong target_addr,
1625 socklen_t len)
1627 const socklen_t unix_maxlen = sizeof (struct sockaddr_un);
1628 sa_family_t sa_family;
1629 struct target_sockaddr *target_saddr;
1631 if (fd_trans_target_to_host_addr(fd)) {
1632 return fd_trans_target_to_host_addr(fd)(addr, target_addr, len);
1635 target_saddr = lock_user(VERIFY_READ, target_addr, len, 1);
1636 if (!target_saddr)
1637 return -TARGET_EFAULT;
1639 sa_family = tswap16(target_saddr->sa_family);
1641 /* Oops. The caller might send a incomplete sun_path; sun_path
1642 * must be terminated by \0 (see the manual page), but
1643 * unfortunately it is quite common to specify sockaddr_un
1644 * length as "strlen(x->sun_path)" while it should be
1645 * "strlen(...) + 1". We'll fix that here if needed.
1646 * Linux kernel has a similar feature.
1649 if (sa_family == AF_UNIX) {
1650 if (len < unix_maxlen && len > 0) {
1651 char *cp = (char*)target_saddr;
1653 if ( cp[len-1] && !cp[len] )
1654 len++;
1656 if (len > unix_maxlen)
1657 len = unix_maxlen;
1660 memcpy(addr, target_saddr, len);
1661 addr->sa_family = sa_family;
1662 if (sa_family == AF_NETLINK) {
1663 struct sockaddr_nl *nladdr;
1665 nladdr = (struct sockaddr_nl *)addr;
1666 nladdr->nl_pid = tswap32(nladdr->nl_pid);
1667 nladdr->nl_groups = tswap32(nladdr->nl_groups);
1668 } else if (sa_family == AF_PACKET) {
1669 struct target_sockaddr_ll *lladdr;
1671 lladdr = (struct target_sockaddr_ll *)addr;
1672 lladdr->sll_ifindex = tswap32(lladdr->sll_ifindex);
1673 lladdr->sll_hatype = tswap16(lladdr->sll_hatype);
1675 unlock_user(target_saddr, target_addr, 0);
1677 return 0;
1680 static inline abi_long host_to_target_sockaddr(abi_ulong target_addr,
1681 struct sockaddr *addr,
1682 socklen_t len)
1684 struct target_sockaddr *target_saddr;
1686 if (len == 0) {
1687 return 0;
1689 assert(addr);
1691 target_saddr = lock_user(VERIFY_WRITE, target_addr, len, 0);
1692 if (!target_saddr)
1693 return -TARGET_EFAULT;
1694 memcpy(target_saddr, addr, len);
1695 if (len >= offsetof(struct target_sockaddr, sa_family) +
1696 sizeof(target_saddr->sa_family)) {
1697 target_saddr->sa_family = tswap16(addr->sa_family);
1699 if (addr->sa_family == AF_NETLINK && len >= sizeof(struct sockaddr_nl)) {
1700 struct sockaddr_nl *target_nl = (struct sockaddr_nl *)target_saddr;
1701 target_nl->nl_pid = tswap32(target_nl->nl_pid);
1702 target_nl->nl_groups = tswap32(target_nl->nl_groups);
1703 } else if (addr->sa_family == AF_PACKET) {
1704 struct sockaddr_ll *target_ll = (struct sockaddr_ll *)target_saddr;
1705 target_ll->sll_ifindex = tswap32(target_ll->sll_ifindex);
1706 target_ll->sll_hatype = tswap16(target_ll->sll_hatype);
1707 } else if (addr->sa_family == AF_INET6 &&
1708 len >= sizeof(struct target_sockaddr_in6)) {
1709 struct target_sockaddr_in6 *target_in6 =
1710 (struct target_sockaddr_in6 *)target_saddr;
1711 target_in6->sin6_scope_id = tswap16(target_in6->sin6_scope_id);
1713 unlock_user(target_saddr, target_addr, len);
1715 return 0;
1718 static inline abi_long target_to_host_cmsg(struct msghdr *msgh,
1719 struct target_msghdr *target_msgh)
1721 struct cmsghdr *cmsg = CMSG_FIRSTHDR(msgh);
1722 abi_long msg_controllen;
1723 abi_ulong target_cmsg_addr;
1724 struct target_cmsghdr *target_cmsg, *target_cmsg_start;
1725 socklen_t space = 0;
1727 msg_controllen = tswapal(target_msgh->msg_controllen);
1728 if (msg_controllen < sizeof (struct target_cmsghdr))
1729 goto the_end;
1730 target_cmsg_addr = tswapal(target_msgh->msg_control);
1731 target_cmsg = lock_user(VERIFY_READ, target_cmsg_addr, msg_controllen, 1);
1732 target_cmsg_start = target_cmsg;
1733 if (!target_cmsg)
1734 return -TARGET_EFAULT;
1736 while (cmsg && target_cmsg) {
1737 void *data = CMSG_DATA(cmsg);
1738 void *target_data = TARGET_CMSG_DATA(target_cmsg);
1740 int len = tswapal(target_cmsg->cmsg_len)
1741 - sizeof(struct target_cmsghdr);
1743 space += CMSG_SPACE(len);
1744 if (space > msgh->msg_controllen) {
1745 space -= CMSG_SPACE(len);
1746 /* This is a QEMU bug, since we allocated the payload
1747 * area ourselves (unlike overflow in host-to-target
1748 * conversion, which is just the guest giving us a buffer
1749 * that's too small). It can't happen for the payload types
1750 * we currently support; if it becomes an issue in future
1751 * we would need to improve our allocation strategy to
1752 * something more intelligent than "twice the size of the
1753 * target buffer we're reading from".
1755 gemu_log("Host cmsg overflow\n");
1756 break;
1759 if (tswap32(target_cmsg->cmsg_level) == TARGET_SOL_SOCKET) {
1760 cmsg->cmsg_level = SOL_SOCKET;
1761 } else {
1762 cmsg->cmsg_level = tswap32(target_cmsg->cmsg_level);
1764 cmsg->cmsg_type = tswap32(target_cmsg->cmsg_type);
1765 cmsg->cmsg_len = CMSG_LEN(len);
1767 if (cmsg->cmsg_level == SOL_SOCKET && cmsg->cmsg_type == SCM_RIGHTS) {
1768 int *fd = (int *)data;
1769 int *target_fd = (int *)target_data;
1770 int i, numfds = len / sizeof(int);
1772 for (i = 0; i < numfds; i++) {
1773 __get_user(fd[i], target_fd + i);
1775 } else if (cmsg->cmsg_level == SOL_SOCKET
1776 && cmsg->cmsg_type == SCM_CREDENTIALS) {
1777 struct ucred *cred = (struct ucred *)data;
1778 struct target_ucred *target_cred =
1779 (struct target_ucred *)target_data;
1781 __get_user(cred->pid, &target_cred->pid);
1782 __get_user(cred->uid, &target_cred->uid);
1783 __get_user(cred->gid, &target_cred->gid);
1784 } else {
1785 gemu_log("Unsupported ancillary data: %d/%d\n",
1786 cmsg->cmsg_level, cmsg->cmsg_type);
1787 memcpy(data, target_data, len);
1790 cmsg = CMSG_NXTHDR(msgh, cmsg);
1791 target_cmsg = TARGET_CMSG_NXTHDR(target_msgh, target_cmsg,
1792 target_cmsg_start);
1794 unlock_user(target_cmsg, target_cmsg_addr, 0);
1795 the_end:
1796 msgh->msg_controllen = space;
1797 return 0;
1800 static inline abi_long host_to_target_cmsg(struct target_msghdr *target_msgh,
1801 struct msghdr *msgh)
1803 struct cmsghdr *cmsg = CMSG_FIRSTHDR(msgh);
1804 abi_long msg_controllen;
1805 abi_ulong target_cmsg_addr;
1806 struct target_cmsghdr *target_cmsg, *target_cmsg_start;
1807 socklen_t space = 0;
1809 msg_controllen = tswapal(target_msgh->msg_controllen);
1810 if (msg_controllen < sizeof (struct target_cmsghdr))
1811 goto the_end;
1812 target_cmsg_addr = tswapal(target_msgh->msg_control);
1813 target_cmsg = lock_user(VERIFY_WRITE, target_cmsg_addr, msg_controllen, 0);
1814 target_cmsg_start = target_cmsg;
1815 if (!target_cmsg)
1816 return -TARGET_EFAULT;
1818 while (cmsg && target_cmsg) {
1819 void *data = CMSG_DATA(cmsg);
1820 void *target_data = TARGET_CMSG_DATA(target_cmsg);
1822 int len = cmsg->cmsg_len - sizeof(struct cmsghdr);
1823 int tgt_len, tgt_space;
1825 /* We never copy a half-header but may copy half-data;
1826 * this is Linux's behaviour in put_cmsg(). Note that
1827 * truncation here is a guest problem (which we report
1828 * to the guest via the CTRUNC bit), unlike truncation
1829 * in target_to_host_cmsg, which is a QEMU bug.
1831 if (msg_controllen < sizeof(struct target_cmsghdr)) {
1832 target_msgh->msg_flags |= tswap32(MSG_CTRUNC);
1833 break;
1836 if (cmsg->cmsg_level == SOL_SOCKET) {
1837 target_cmsg->cmsg_level = tswap32(TARGET_SOL_SOCKET);
1838 } else {
1839 target_cmsg->cmsg_level = tswap32(cmsg->cmsg_level);
1841 target_cmsg->cmsg_type = tswap32(cmsg->cmsg_type);
1843 /* Payload types which need a different size of payload on
1844 * the target must adjust tgt_len here.
1846 tgt_len = len;
1847 switch (cmsg->cmsg_level) {
1848 case SOL_SOCKET:
1849 switch (cmsg->cmsg_type) {
1850 case SO_TIMESTAMP:
1851 tgt_len = sizeof(struct target_timeval);
1852 break;
1853 default:
1854 break;
1856 break;
1857 default:
1858 break;
1861 if (msg_controllen < TARGET_CMSG_LEN(tgt_len)) {
1862 target_msgh->msg_flags |= tswap32(MSG_CTRUNC);
1863 tgt_len = msg_controllen - sizeof(struct target_cmsghdr);
1866 /* We must now copy-and-convert len bytes of payload
1867 * into tgt_len bytes of destination space. Bear in mind
1868 * that in both source and destination we may be dealing
1869 * with a truncated value!
1871 switch (cmsg->cmsg_level) {
1872 case SOL_SOCKET:
1873 switch (cmsg->cmsg_type) {
1874 case SCM_RIGHTS:
1876 int *fd = (int *)data;
1877 int *target_fd = (int *)target_data;
1878 int i, numfds = tgt_len / sizeof(int);
1880 for (i = 0; i < numfds; i++) {
1881 __put_user(fd[i], target_fd + i);
1883 break;
1885 case SO_TIMESTAMP:
1887 struct timeval *tv = (struct timeval *)data;
1888 struct target_timeval *target_tv =
1889 (struct target_timeval *)target_data;
1891 if (len != sizeof(struct timeval) ||
1892 tgt_len != sizeof(struct target_timeval)) {
1893 goto unimplemented;
1896 /* copy struct timeval to target */
1897 __put_user(tv->tv_sec, &target_tv->tv_sec);
1898 __put_user(tv->tv_usec, &target_tv->tv_usec);
1899 break;
1901 case SCM_CREDENTIALS:
1903 struct ucred *cred = (struct ucred *)data;
1904 struct target_ucred *target_cred =
1905 (struct target_ucred *)target_data;
1907 __put_user(cred->pid, &target_cred->pid);
1908 __put_user(cred->uid, &target_cred->uid);
1909 __put_user(cred->gid, &target_cred->gid);
1910 break;
1912 default:
1913 goto unimplemented;
1915 break;
1917 case SOL_IP:
1918 switch (cmsg->cmsg_type) {
1919 case IP_TTL:
1921 uint32_t *v = (uint32_t *)data;
1922 uint32_t *t_int = (uint32_t *)target_data;
1924 if (len != sizeof(uint32_t) ||
1925 tgt_len != sizeof(uint32_t)) {
1926 goto unimplemented;
1928 __put_user(*v, t_int);
1929 break;
1931 case IP_RECVERR:
1933 struct errhdr_t {
1934 struct sock_extended_err ee;
1935 struct sockaddr_in offender;
1937 struct errhdr_t *errh = (struct errhdr_t *)data;
1938 struct errhdr_t *target_errh =
1939 (struct errhdr_t *)target_data;
1941 if (len != sizeof(struct errhdr_t) ||
1942 tgt_len != sizeof(struct errhdr_t)) {
1943 goto unimplemented;
1945 __put_user(errh->ee.ee_errno, &target_errh->ee.ee_errno);
1946 __put_user(errh->ee.ee_origin, &target_errh->ee.ee_origin);
1947 __put_user(errh->ee.ee_type, &target_errh->ee.ee_type);
1948 __put_user(errh->ee.ee_code, &target_errh->ee.ee_code);
1949 __put_user(errh->ee.ee_pad, &target_errh->ee.ee_pad);
1950 __put_user(errh->ee.ee_info, &target_errh->ee.ee_info);
1951 __put_user(errh->ee.ee_data, &target_errh->ee.ee_data);
1952 host_to_target_sockaddr((unsigned long) &target_errh->offender,
1953 (void *) &errh->offender, sizeof(errh->offender));
1954 break;
1956 default:
1957 goto unimplemented;
1959 break;
1961 case SOL_IPV6:
1962 switch (cmsg->cmsg_type) {
1963 case IPV6_HOPLIMIT:
1965 uint32_t *v = (uint32_t *)data;
1966 uint32_t *t_int = (uint32_t *)target_data;
1968 if (len != sizeof(uint32_t) ||
1969 tgt_len != sizeof(uint32_t)) {
1970 goto unimplemented;
1972 __put_user(*v, t_int);
1973 break;
1975 case IPV6_RECVERR:
1977 struct errhdr6_t {
1978 struct sock_extended_err ee;
1979 struct sockaddr_in6 offender;
1981 struct errhdr6_t *errh = (struct errhdr6_t *)data;
1982 struct errhdr6_t *target_errh =
1983 (struct errhdr6_t *)target_data;
1985 if (len != sizeof(struct errhdr6_t) ||
1986 tgt_len != sizeof(struct errhdr6_t)) {
1987 goto unimplemented;
1989 __put_user(errh->ee.ee_errno, &target_errh->ee.ee_errno);
1990 __put_user(errh->ee.ee_origin, &target_errh->ee.ee_origin);
1991 __put_user(errh->ee.ee_type, &target_errh->ee.ee_type);
1992 __put_user(errh->ee.ee_code, &target_errh->ee.ee_code);
1993 __put_user(errh->ee.ee_pad, &target_errh->ee.ee_pad);
1994 __put_user(errh->ee.ee_info, &target_errh->ee.ee_info);
1995 __put_user(errh->ee.ee_data, &target_errh->ee.ee_data);
1996 host_to_target_sockaddr((unsigned long) &target_errh->offender,
1997 (void *) &errh->offender, sizeof(errh->offender));
1998 break;
2000 default:
2001 goto unimplemented;
2003 break;
2005 default:
2006 unimplemented:
2007 gemu_log("Unsupported ancillary data: %d/%d\n",
2008 cmsg->cmsg_level, cmsg->cmsg_type);
2009 memcpy(target_data, data, MIN(len, tgt_len));
2010 if (tgt_len > len) {
2011 memset(target_data + len, 0, tgt_len - len);
2015 target_cmsg->cmsg_len = tswapal(TARGET_CMSG_LEN(tgt_len));
2016 tgt_space = TARGET_CMSG_SPACE(tgt_len);
2017 if (msg_controllen < tgt_space) {
2018 tgt_space = msg_controllen;
2020 msg_controllen -= tgt_space;
2021 space += tgt_space;
2022 cmsg = CMSG_NXTHDR(msgh, cmsg);
2023 target_cmsg = TARGET_CMSG_NXTHDR(target_msgh, target_cmsg,
2024 target_cmsg_start);
2026 unlock_user(target_cmsg, target_cmsg_addr, space);
2027 the_end:
2028 target_msgh->msg_controllen = tswapal(space);
2029 return 0;
2032 static void tswap_nlmsghdr(struct nlmsghdr *nlh)
2034 nlh->nlmsg_len = tswap32(nlh->nlmsg_len);
2035 nlh->nlmsg_type = tswap16(nlh->nlmsg_type);
2036 nlh->nlmsg_flags = tswap16(nlh->nlmsg_flags);
2037 nlh->nlmsg_seq = tswap32(nlh->nlmsg_seq);
2038 nlh->nlmsg_pid = tswap32(nlh->nlmsg_pid);
2041 static abi_long host_to_target_for_each_nlmsg(struct nlmsghdr *nlh,
2042 size_t len,
2043 abi_long (*host_to_target_nlmsg)
2044 (struct nlmsghdr *))
2046 uint32_t nlmsg_len;
2047 abi_long ret;
2049 while (len > sizeof(struct nlmsghdr)) {
2051 nlmsg_len = nlh->nlmsg_len;
2052 if (nlmsg_len < sizeof(struct nlmsghdr) ||
2053 nlmsg_len > len) {
2054 break;
2057 switch (nlh->nlmsg_type) {
2058 case NLMSG_DONE:
2059 tswap_nlmsghdr(nlh);
2060 return 0;
2061 case NLMSG_NOOP:
2062 break;
2063 case NLMSG_ERROR:
2065 struct nlmsgerr *e = NLMSG_DATA(nlh);
2066 e->error = tswap32(e->error);
2067 tswap_nlmsghdr(&e->msg);
2068 tswap_nlmsghdr(nlh);
2069 return 0;
2071 default:
2072 ret = host_to_target_nlmsg(nlh);
2073 if (ret < 0) {
2074 tswap_nlmsghdr(nlh);
2075 return ret;
2077 break;
2079 tswap_nlmsghdr(nlh);
2080 len -= NLMSG_ALIGN(nlmsg_len);
2081 nlh = (struct nlmsghdr *)(((char*)nlh) + NLMSG_ALIGN(nlmsg_len));
2083 return 0;
2086 static abi_long target_to_host_for_each_nlmsg(struct nlmsghdr *nlh,
2087 size_t len,
2088 abi_long (*target_to_host_nlmsg)
2089 (struct nlmsghdr *))
2091 int ret;
2093 while (len > sizeof(struct nlmsghdr)) {
2094 if (tswap32(nlh->nlmsg_len) < sizeof(struct nlmsghdr) ||
2095 tswap32(nlh->nlmsg_len) > len) {
2096 break;
2098 tswap_nlmsghdr(nlh);
2099 switch (nlh->nlmsg_type) {
2100 case NLMSG_DONE:
2101 return 0;
2102 case NLMSG_NOOP:
2103 break;
2104 case NLMSG_ERROR:
2106 struct nlmsgerr *e = NLMSG_DATA(nlh);
2107 e->error = tswap32(e->error);
2108 tswap_nlmsghdr(&e->msg);
2109 return 0;
2111 default:
2112 ret = target_to_host_nlmsg(nlh);
2113 if (ret < 0) {
2114 return ret;
2117 len -= NLMSG_ALIGN(nlh->nlmsg_len);
2118 nlh = (struct nlmsghdr *)(((char *)nlh) + NLMSG_ALIGN(nlh->nlmsg_len));
2120 return 0;
2123 #ifdef CONFIG_RTNETLINK
2124 static abi_long host_to_target_for_each_nlattr(struct nlattr *nlattr,
2125 size_t len, void *context,
2126 abi_long (*host_to_target_nlattr)
2127 (struct nlattr *,
2128 void *context))
2130 unsigned short nla_len;
2131 abi_long ret;
2133 while (len > sizeof(struct nlattr)) {
2134 nla_len = nlattr->nla_len;
2135 if (nla_len < sizeof(struct nlattr) ||
2136 nla_len > len) {
2137 break;
2139 ret = host_to_target_nlattr(nlattr, context);
2140 nlattr->nla_len = tswap16(nlattr->nla_len);
2141 nlattr->nla_type = tswap16(nlattr->nla_type);
2142 if (ret < 0) {
2143 return ret;
2145 len -= NLA_ALIGN(nla_len);
2146 nlattr = (struct nlattr *)(((char *)nlattr) + NLA_ALIGN(nla_len));
2148 return 0;
2151 static abi_long host_to_target_for_each_rtattr(struct rtattr *rtattr,
2152 size_t len,
2153 abi_long (*host_to_target_rtattr)
2154 (struct rtattr *))
2156 unsigned short rta_len;
2157 abi_long ret;
2159 while (len > sizeof(struct rtattr)) {
2160 rta_len = rtattr->rta_len;
2161 if (rta_len < sizeof(struct rtattr) ||
2162 rta_len > len) {
2163 break;
2165 ret = host_to_target_rtattr(rtattr);
2166 rtattr->rta_len = tswap16(rtattr->rta_len);
2167 rtattr->rta_type = tswap16(rtattr->rta_type);
2168 if (ret < 0) {
2169 return ret;
2171 len -= RTA_ALIGN(rta_len);
2172 rtattr = (struct rtattr *)(((char *)rtattr) + RTA_ALIGN(rta_len));
2174 return 0;
2177 #define NLA_DATA(nla) ((void *)((char *)(nla)) + NLA_HDRLEN)
2179 static abi_long host_to_target_data_bridge_nlattr(struct nlattr *nlattr,
2180 void *context)
2182 uint16_t *u16;
2183 uint32_t *u32;
2184 uint64_t *u64;
2186 switch (nlattr->nla_type) {
2187 /* no data */
2188 case QEMU_IFLA_BR_FDB_FLUSH:
2189 break;
2190 /* binary */
2191 case QEMU_IFLA_BR_GROUP_ADDR:
2192 break;
2193 /* uint8_t */
2194 case QEMU_IFLA_BR_VLAN_FILTERING:
2195 case QEMU_IFLA_BR_TOPOLOGY_CHANGE:
2196 case QEMU_IFLA_BR_TOPOLOGY_CHANGE_DETECTED:
2197 case QEMU_IFLA_BR_MCAST_ROUTER:
2198 case QEMU_IFLA_BR_MCAST_SNOOPING:
2199 case QEMU_IFLA_BR_MCAST_QUERY_USE_IFADDR:
2200 case QEMU_IFLA_BR_MCAST_QUERIER:
2201 case QEMU_IFLA_BR_NF_CALL_IPTABLES:
2202 case QEMU_IFLA_BR_NF_CALL_IP6TABLES:
2203 case QEMU_IFLA_BR_NF_CALL_ARPTABLES:
2204 case QEMU_IFLA_BR_VLAN_STATS_ENABLED:
2205 case QEMU_IFLA_BR_MCAST_STATS_ENABLED:
2206 case QEMU_IFLA_BR_MCAST_IGMP_VERSION:
2207 case QEMU_IFLA_BR_MCAST_MLD_VERSION:
2208 break;
2209 /* uint16_t */
2210 case QEMU_IFLA_BR_PRIORITY:
2211 case QEMU_IFLA_BR_VLAN_PROTOCOL:
2212 case QEMU_IFLA_BR_GROUP_FWD_MASK:
2213 case QEMU_IFLA_BR_ROOT_PORT:
2214 case QEMU_IFLA_BR_VLAN_DEFAULT_PVID:
2215 u16 = NLA_DATA(nlattr);
2216 *u16 = tswap16(*u16);
2217 break;
2218 /* uint32_t */
2219 case QEMU_IFLA_BR_FORWARD_DELAY:
2220 case QEMU_IFLA_BR_HELLO_TIME:
2221 case QEMU_IFLA_BR_MAX_AGE:
2222 case QEMU_IFLA_BR_AGEING_TIME:
2223 case QEMU_IFLA_BR_STP_STATE:
2224 case QEMU_IFLA_BR_ROOT_PATH_COST:
2225 case QEMU_IFLA_BR_MCAST_HASH_ELASTICITY:
2226 case QEMU_IFLA_BR_MCAST_HASH_MAX:
2227 case QEMU_IFLA_BR_MCAST_LAST_MEMBER_CNT:
2228 case QEMU_IFLA_BR_MCAST_STARTUP_QUERY_CNT:
2229 u32 = NLA_DATA(nlattr);
2230 *u32 = tswap32(*u32);
2231 break;
2232 /* uint64_t */
2233 case QEMU_IFLA_BR_HELLO_TIMER:
2234 case QEMU_IFLA_BR_TCN_TIMER:
2235 case QEMU_IFLA_BR_GC_TIMER:
2236 case QEMU_IFLA_BR_TOPOLOGY_CHANGE_TIMER:
2237 case QEMU_IFLA_BR_MCAST_LAST_MEMBER_INTVL:
2238 case QEMU_IFLA_BR_MCAST_MEMBERSHIP_INTVL:
2239 case QEMU_IFLA_BR_MCAST_QUERIER_INTVL:
2240 case QEMU_IFLA_BR_MCAST_QUERY_INTVL:
2241 case QEMU_IFLA_BR_MCAST_QUERY_RESPONSE_INTVL:
2242 case QEMU_IFLA_BR_MCAST_STARTUP_QUERY_INTVL:
2243 u64 = NLA_DATA(nlattr);
2244 *u64 = tswap64(*u64);
2245 break;
2246 /* ifla_bridge_id: uin8_t[] */
2247 case QEMU_IFLA_BR_ROOT_ID:
2248 case QEMU_IFLA_BR_BRIDGE_ID:
2249 break;
2250 default:
2251 gemu_log("Unknown QEMU_IFLA_BR type %d\n", nlattr->nla_type);
2252 break;
2254 return 0;
2257 static abi_long host_to_target_slave_data_bridge_nlattr(struct nlattr *nlattr,
2258 void *context)
2260 uint16_t *u16;
2261 uint32_t *u32;
2262 uint64_t *u64;
2264 switch (nlattr->nla_type) {
2265 /* uint8_t */
2266 case QEMU_IFLA_BRPORT_STATE:
2267 case QEMU_IFLA_BRPORT_MODE:
2268 case QEMU_IFLA_BRPORT_GUARD:
2269 case QEMU_IFLA_BRPORT_PROTECT:
2270 case QEMU_IFLA_BRPORT_FAST_LEAVE:
2271 case QEMU_IFLA_BRPORT_LEARNING:
2272 case QEMU_IFLA_BRPORT_UNICAST_FLOOD:
2273 case QEMU_IFLA_BRPORT_PROXYARP:
2274 case QEMU_IFLA_BRPORT_LEARNING_SYNC:
2275 case QEMU_IFLA_BRPORT_PROXYARP_WIFI:
2276 case QEMU_IFLA_BRPORT_TOPOLOGY_CHANGE_ACK:
2277 case QEMU_IFLA_BRPORT_CONFIG_PENDING:
2278 case QEMU_IFLA_BRPORT_MULTICAST_ROUTER:
2279 case QEMU_IFLA_BRPORT_MCAST_FLOOD:
2280 case QEMU_IFLA_BRPORT_MCAST_TO_UCAST:
2281 case QEMU_IFLA_BRPORT_VLAN_TUNNEL:
2282 case QEMU_IFLA_BRPORT_BCAST_FLOOD:
2283 case QEMU_IFLA_BRPORT_NEIGH_SUPPRESS:
2284 break;
2285 /* uint16_t */
2286 case QEMU_IFLA_BRPORT_PRIORITY:
2287 case QEMU_IFLA_BRPORT_DESIGNATED_PORT:
2288 case QEMU_IFLA_BRPORT_DESIGNATED_COST:
2289 case QEMU_IFLA_BRPORT_ID:
2290 case QEMU_IFLA_BRPORT_NO:
2291 case QEMU_IFLA_BRPORT_GROUP_FWD_MASK:
2292 u16 = NLA_DATA(nlattr);
2293 *u16 = tswap16(*u16);
2294 break;
2295 /* uin32_t */
2296 case QEMU_IFLA_BRPORT_COST:
2297 u32 = NLA_DATA(nlattr);
2298 *u32 = tswap32(*u32);
2299 break;
2300 /* uint64_t */
2301 case QEMU_IFLA_BRPORT_MESSAGE_AGE_TIMER:
2302 case QEMU_IFLA_BRPORT_FORWARD_DELAY_TIMER:
2303 case QEMU_IFLA_BRPORT_HOLD_TIMER:
2304 u64 = NLA_DATA(nlattr);
2305 *u64 = tswap64(*u64);
2306 break;
2307 /* ifla_bridge_id: uint8_t[] */
2308 case QEMU_IFLA_BRPORT_ROOT_ID:
2309 case QEMU_IFLA_BRPORT_BRIDGE_ID:
2310 break;
2311 default:
2312 gemu_log("Unknown QEMU_IFLA_BRPORT type %d\n", nlattr->nla_type);
2313 break;
2315 return 0;
2318 struct linkinfo_context {
2319 int len;
2320 char *name;
2321 int slave_len;
2322 char *slave_name;
2325 static abi_long host_to_target_data_linkinfo_nlattr(struct nlattr *nlattr,
2326 void *context)
2328 struct linkinfo_context *li_context = context;
2330 switch (nlattr->nla_type) {
2331 /* string */
2332 case QEMU_IFLA_INFO_KIND:
2333 li_context->name = NLA_DATA(nlattr);
2334 li_context->len = nlattr->nla_len - NLA_HDRLEN;
2335 break;
2336 case QEMU_IFLA_INFO_SLAVE_KIND:
2337 li_context->slave_name = NLA_DATA(nlattr);
2338 li_context->slave_len = nlattr->nla_len - NLA_HDRLEN;
2339 break;
2340 /* stats */
2341 case QEMU_IFLA_INFO_XSTATS:
2342 /* FIXME: only used by CAN */
2343 break;
2344 /* nested */
2345 case QEMU_IFLA_INFO_DATA:
2346 if (strncmp(li_context->name, "bridge",
2347 li_context->len) == 0) {
2348 return host_to_target_for_each_nlattr(NLA_DATA(nlattr),
2349 nlattr->nla_len,
2350 NULL,
2351 host_to_target_data_bridge_nlattr);
2352 } else {
2353 gemu_log("Unknown QEMU_IFLA_INFO_KIND %s\n", li_context->name);
2355 break;
2356 case QEMU_IFLA_INFO_SLAVE_DATA:
2357 if (strncmp(li_context->slave_name, "bridge",
2358 li_context->slave_len) == 0) {
2359 return host_to_target_for_each_nlattr(NLA_DATA(nlattr),
2360 nlattr->nla_len,
2361 NULL,
2362 host_to_target_slave_data_bridge_nlattr);
2363 } else {
2364 gemu_log("Unknown QEMU_IFLA_INFO_SLAVE_KIND %s\n",
2365 li_context->slave_name);
2367 break;
2368 default:
2369 gemu_log("Unknown host QEMU_IFLA_INFO type: %d\n", nlattr->nla_type);
2370 break;
2373 return 0;
2376 static abi_long host_to_target_data_inet_nlattr(struct nlattr *nlattr,
2377 void *context)
2379 uint32_t *u32;
2380 int i;
2382 switch (nlattr->nla_type) {
2383 case QEMU_IFLA_INET_CONF:
2384 u32 = NLA_DATA(nlattr);
2385 for (i = 0; i < (nlattr->nla_len - NLA_HDRLEN) / sizeof(*u32);
2386 i++) {
2387 u32[i] = tswap32(u32[i]);
2389 break;
2390 default:
2391 gemu_log("Unknown host AF_INET type: %d\n", nlattr->nla_type);
2393 return 0;
2396 static abi_long host_to_target_data_inet6_nlattr(struct nlattr *nlattr,
2397 void *context)
2399 uint32_t *u32;
2400 uint64_t *u64;
2401 struct ifla_cacheinfo *ci;
2402 int i;
2404 switch (nlattr->nla_type) {
2405 /* binaries */
2406 case QEMU_IFLA_INET6_TOKEN:
2407 break;
2408 /* uint8_t */
2409 case QEMU_IFLA_INET6_ADDR_GEN_MODE:
2410 break;
2411 /* uint32_t */
2412 case QEMU_IFLA_INET6_FLAGS:
2413 u32 = NLA_DATA(nlattr);
2414 *u32 = tswap32(*u32);
2415 break;
2416 /* uint32_t[] */
2417 case QEMU_IFLA_INET6_CONF:
2418 u32 = NLA_DATA(nlattr);
2419 for (i = 0; i < (nlattr->nla_len - NLA_HDRLEN) / sizeof(*u32);
2420 i++) {
2421 u32[i] = tswap32(u32[i]);
2423 break;
2424 /* ifla_cacheinfo */
2425 case QEMU_IFLA_INET6_CACHEINFO:
2426 ci = NLA_DATA(nlattr);
2427 ci->max_reasm_len = tswap32(ci->max_reasm_len);
2428 ci->tstamp = tswap32(ci->tstamp);
2429 ci->reachable_time = tswap32(ci->reachable_time);
2430 ci->retrans_time = tswap32(ci->retrans_time);
2431 break;
2432 /* uint64_t[] */
2433 case QEMU_IFLA_INET6_STATS:
2434 case QEMU_IFLA_INET6_ICMP6STATS:
2435 u64 = NLA_DATA(nlattr);
2436 for (i = 0; i < (nlattr->nla_len - NLA_HDRLEN) / sizeof(*u64);
2437 i++) {
2438 u64[i] = tswap64(u64[i]);
2440 break;
2441 default:
2442 gemu_log("Unknown host AF_INET6 type: %d\n", nlattr->nla_type);
2444 return 0;
2447 static abi_long host_to_target_data_spec_nlattr(struct nlattr *nlattr,
2448 void *context)
2450 switch (nlattr->nla_type) {
2451 case AF_INET:
2452 return host_to_target_for_each_nlattr(NLA_DATA(nlattr), nlattr->nla_len,
2453 NULL,
2454 host_to_target_data_inet_nlattr);
2455 case AF_INET6:
2456 return host_to_target_for_each_nlattr(NLA_DATA(nlattr), nlattr->nla_len,
2457 NULL,
2458 host_to_target_data_inet6_nlattr);
2459 default:
2460 gemu_log("Unknown host AF_SPEC type: %d\n", nlattr->nla_type);
2461 break;
2463 return 0;
2466 static abi_long host_to_target_data_xdp_nlattr(struct nlattr *nlattr,
2467 void *context)
2469 uint32_t *u32;
2471 switch (nlattr->nla_type) {
2472 /* uint8_t */
2473 case QEMU_IFLA_XDP_ATTACHED:
2474 break;
2475 /* uint32_t */
2476 case QEMU_IFLA_XDP_PROG_ID:
2477 u32 = NLA_DATA(nlattr);
2478 *u32 = tswap32(*u32);
2479 break;
2480 default:
2481 gemu_log("Unknown host XDP type: %d\n", nlattr->nla_type);
2482 break;
2484 return 0;
2487 static abi_long host_to_target_data_link_rtattr(struct rtattr *rtattr)
2489 uint32_t *u32;
2490 struct rtnl_link_stats *st;
2491 struct rtnl_link_stats64 *st64;
2492 struct rtnl_link_ifmap *map;
2493 struct linkinfo_context li_context;
2495 switch (rtattr->rta_type) {
2496 /* binary stream */
2497 case QEMU_IFLA_ADDRESS:
2498 case QEMU_IFLA_BROADCAST:
2499 /* string */
2500 case QEMU_IFLA_IFNAME:
2501 case QEMU_IFLA_QDISC:
2502 break;
2503 /* uin8_t */
2504 case QEMU_IFLA_OPERSTATE:
2505 case QEMU_IFLA_LINKMODE:
2506 case QEMU_IFLA_CARRIER:
2507 case QEMU_IFLA_PROTO_DOWN:
2508 break;
2509 /* uint32_t */
2510 case QEMU_IFLA_MTU:
2511 case QEMU_IFLA_LINK:
2512 case QEMU_IFLA_WEIGHT:
2513 case QEMU_IFLA_TXQLEN:
2514 case QEMU_IFLA_CARRIER_CHANGES:
2515 case QEMU_IFLA_NUM_RX_QUEUES:
2516 case QEMU_IFLA_NUM_TX_QUEUES:
2517 case QEMU_IFLA_PROMISCUITY:
2518 case QEMU_IFLA_EXT_MASK:
2519 case QEMU_IFLA_LINK_NETNSID:
2520 case QEMU_IFLA_GROUP:
2521 case QEMU_IFLA_MASTER:
2522 case QEMU_IFLA_NUM_VF:
2523 case QEMU_IFLA_GSO_MAX_SEGS:
2524 case QEMU_IFLA_GSO_MAX_SIZE:
2525 u32 = RTA_DATA(rtattr);
2526 *u32 = tswap32(*u32);
2527 break;
2528 /* struct rtnl_link_stats */
2529 case QEMU_IFLA_STATS:
2530 st = RTA_DATA(rtattr);
2531 st->rx_packets = tswap32(st->rx_packets);
2532 st->tx_packets = tswap32(st->tx_packets);
2533 st->rx_bytes = tswap32(st->rx_bytes);
2534 st->tx_bytes = tswap32(st->tx_bytes);
2535 st->rx_errors = tswap32(st->rx_errors);
2536 st->tx_errors = tswap32(st->tx_errors);
2537 st->rx_dropped = tswap32(st->rx_dropped);
2538 st->tx_dropped = tswap32(st->tx_dropped);
2539 st->multicast = tswap32(st->multicast);
2540 st->collisions = tswap32(st->collisions);
2542 /* detailed rx_errors: */
2543 st->rx_length_errors = tswap32(st->rx_length_errors);
2544 st->rx_over_errors = tswap32(st->rx_over_errors);
2545 st->rx_crc_errors = tswap32(st->rx_crc_errors);
2546 st->rx_frame_errors = tswap32(st->rx_frame_errors);
2547 st->rx_fifo_errors = tswap32(st->rx_fifo_errors);
2548 st->rx_missed_errors = tswap32(st->rx_missed_errors);
2550 /* detailed tx_errors */
2551 st->tx_aborted_errors = tswap32(st->tx_aborted_errors);
2552 st->tx_carrier_errors = tswap32(st->tx_carrier_errors);
2553 st->tx_fifo_errors = tswap32(st->tx_fifo_errors);
2554 st->tx_heartbeat_errors = tswap32(st->tx_heartbeat_errors);
2555 st->tx_window_errors = tswap32(st->tx_window_errors);
2557 /* for cslip etc */
2558 st->rx_compressed = tswap32(st->rx_compressed);
2559 st->tx_compressed = tswap32(st->tx_compressed);
2560 break;
2561 /* struct rtnl_link_stats64 */
2562 case QEMU_IFLA_STATS64:
2563 st64 = RTA_DATA(rtattr);
2564 st64->rx_packets = tswap64(st64->rx_packets);
2565 st64->tx_packets = tswap64(st64->tx_packets);
2566 st64->rx_bytes = tswap64(st64->rx_bytes);
2567 st64->tx_bytes = tswap64(st64->tx_bytes);
2568 st64->rx_errors = tswap64(st64->rx_errors);
2569 st64->tx_errors = tswap64(st64->tx_errors);
2570 st64->rx_dropped = tswap64(st64->rx_dropped);
2571 st64->tx_dropped = tswap64(st64->tx_dropped);
2572 st64->multicast = tswap64(st64->multicast);
2573 st64->collisions = tswap64(st64->collisions);
2575 /* detailed rx_errors: */
2576 st64->rx_length_errors = tswap64(st64->rx_length_errors);
2577 st64->rx_over_errors = tswap64(st64->rx_over_errors);
2578 st64->rx_crc_errors = tswap64(st64->rx_crc_errors);
2579 st64->rx_frame_errors = tswap64(st64->rx_frame_errors);
2580 st64->rx_fifo_errors = tswap64(st64->rx_fifo_errors);
2581 st64->rx_missed_errors = tswap64(st64->rx_missed_errors);
2583 /* detailed tx_errors */
2584 st64->tx_aborted_errors = tswap64(st64->tx_aborted_errors);
2585 st64->tx_carrier_errors = tswap64(st64->tx_carrier_errors);
2586 st64->tx_fifo_errors = tswap64(st64->tx_fifo_errors);
2587 st64->tx_heartbeat_errors = tswap64(st64->tx_heartbeat_errors);
2588 st64->tx_window_errors = tswap64(st64->tx_window_errors);
2590 /* for cslip etc */
2591 st64->rx_compressed = tswap64(st64->rx_compressed);
2592 st64->tx_compressed = tswap64(st64->tx_compressed);
2593 break;
2594 /* struct rtnl_link_ifmap */
2595 case QEMU_IFLA_MAP:
2596 map = RTA_DATA(rtattr);
2597 map->mem_start = tswap64(map->mem_start);
2598 map->mem_end = tswap64(map->mem_end);
2599 map->base_addr = tswap64(map->base_addr);
2600 map->irq = tswap16(map->irq);
2601 break;
2602 /* nested */
2603 case QEMU_IFLA_LINKINFO:
2604 memset(&li_context, 0, sizeof(li_context));
2605 return host_to_target_for_each_nlattr(RTA_DATA(rtattr), rtattr->rta_len,
2606 &li_context,
2607 host_to_target_data_linkinfo_nlattr);
2608 case QEMU_IFLA_AF_SPEC:
2609 return host_to_target_for_each_nlattr(RTA_DATA(rtattr), rtattr->rta_len,
2610 NULL,
2611 host_to_target_data_spec_nlattr);
2612 case QEMU_IFLA_XDP:
2613 return host_to_target_for_each_nlattr(RTA_DATA(rtattr), rtattr->rta_len,
2614 NULL,
2615 host_to_target_data_xdp_nlattr);
2616 default:
2617 gemu_log("Unknown host QEMU_IFLA type: %d\n", rtattr->rta_type);
2618 break;
2620 return 0;
2623 static abi_long host_to_target_data_addr_rtattr(struct rtattr *rtattr)
2625 uint32_t *u32;
2626 struct ifa_cacheinfo *ci;
2628 switch (rtattr->rta_type) {
2629 /* binary: depends on family type */
2630 case IFA_ADDRESS:
2631 case IFA_LOCAL:
2632 break;
2633 /* string */
2634 case IFA_LABEL:
2635 break;
2636 /* u32 */
2637 case IFA_FLAGS:
2638 case IFA_BROADCAST:
2639 u32 = RTA_DATA(rtattr);
2640 *u32 = tswap32(*u32);
2641 break;
2642 /* struct ifa_cacheinfo */
2643 case IFA_CACHEINFO:
2644 ci = RTA_DATA(rtattr);
2645 ci->ifa_prefered = tswap32(ci->ifa_prefered);
2646 ci->ifa_valid = tswap32(ci->ifa_valid);
2647 ci->cstamp = tswap32(ci->cstamp);
2648 ci->tstamp = tswap32(ci->tstamp);
2649 break;
2650 default:
2651 gemu_log("Unknown host IFA type: %d\n", rtattr->rta_type);
2652 break;
2654 return 0;
2657 static abi_long host_to_target_data_route_rtattr(struct rtattr *rtattr)
2659 uint32_t *u32;
2660 switch (rtattr->rta_type) {
2661 /* binary: depends on family type */
2662 case RTA_GATEWAY:
2663 case RTA_DST:
2664 case RTA_PREFSRC:
2665 break;
2666 /* u32 */
2667 case RTA_PRIORITY:
2668 case RTA_TABLE:
2669 case RTA_OIF:
2670 u32 = RTA_DATA(rtattr);
2671 *u32 = tswap32(*u32);
2672 break;
2673 default:
2674 gemu_log("Unknown host RTA type: %d\n", rtattr->rta_type);
2675 break;
2677 return 0;
2680 static abi_long host_to_target_link_rtattr(struct rtattr *rtattr,
2681 uint32_t rtattr_len)
2683 return host_to_target_for_each_rtattr(rtattr, rtattr_len,
2684 host_to_target_data_link_rtattr);
2687 static abi_long host_to_target_addr_rtattr(struct rtattr *rtattr,
2688 uint32_t rtattr_len)
2690 return host_to_target_for_each_rtattr(rtattr, rtattr_len,
2691 host_to_target_data_addr_rtattr);
2694 static abi_long host_to_target_route_rtattr(struct rtattr *rtattr,
2695 uint32_t rtattr_len)
2697 return host_to_target_for_each_rtattr(rtattr, rtattr_len,
2698 host_to_target_data_route_rtattr);
2701 static abi_long host_to_target_data_route(struct nlmsghdr *nlh)
2703 uint32_t nlmsg_len;
2704 struct ifinfomsg *ifi;
2705 struct ifaddrmsg *ifa;
2706 struct rtmsg *rtm;
2708 nlmsg_len = nlh->nlmsg_len;
2709 switch (nlh->nlmsg_type) {
2710 case RTM_NEWLINK:
2711 case RTM_DELLINK:
2712 case RTM_GETLINK:
2713 if (nlh->nlmsg_len >= NLMSG_LENGTH(sizeof(*ifi))) {
2714 ifi = NLMSG_DATA(nlh);
2715 ifi->ifi_type = tswap16(ifi->ifi_type);
2716 ifi->ifi_index = tswap32(ifi->ifi_index);
2717 ifi->ifi_flags = tswap32(ifi->ifi_flags);
2718 ifi->ifi_change = tswap32(ifi->ifi_change);
2719 host_to_target_link_rtattr(IFLA_RTA(ifi),
2720 nlmsg_len - NLMSG_LENGTH(sizeof(*ifi)));
2722 break;
2723 case RTM_NEWADDR:
2724 case RTM_DELADDR:
2725 case RTM_GETADDR:
2726 if (nlh->nlmsg_len >= NLMSG_LENGTH(sizeof(*ifa))) {
2727 ifa = NLMSG_DATA(nlh);
2728 ifa->ifa_index = tswap32(ifa->ifa_index);
2729 host_to_target_addr_rtattr(IFA_RTA(ifa),
2730 nlmsg_len - NLMSG_LENGTH(sizeof(*ifa)));
2732 break;
2733 case RTM_NEWROUTE:
2734 case RTM_DELROUTE:
2735 case RTM_GETROUTE:
2736 if (nlh->nlmsg_len >= NLMSG_LENGTH(sizeof(*rtm))) {
2737 rtm = NLMSG_DATA(nlh);
2738 rtm->rtm_flags = tswap32(rtm->rtm_flags);
2739 host_to_target_route_rtattr(RTM_RTA(rtm),
2740 nlmsg_len - NLMSG_LENGTH(sizeof(*rtm)));
2742 break;
2743 default:
2744 return -TARGET_EINVAL;
2746 return 0;
2749 static inline abi_long host_to_target_nlmsg_route(struct nlmsghdr *nlh,
2750 size_t len)
2752 return host_to_target_for_each_nlmsg(nlh, len, host_to_target_data_route);
2755 static abi_long target_to_host_for_each_rtattr(struct rtattr *rtattr,
2756 size_t len,
2757 abi_long (*target_to_host_rtattr)
2758 (struct rtattr *))
2760 abi_long ret;
2762 while (len >= sizeof(struct rtattr)) {
2763 if (tswap16(rtattr->rta_len) < sizeof(struct rtattr) ||
2764 tswap16(rtattr->rta_len) > len) {
2765 break;
2767 rtattr->rta_len = tswap16(rtattr->rta_len);
2768 rtattr->rta_type = tswap16(rtattr->rta_type);
2769 ret = target_to_host_rtattr(rtattr);
2770 if (ret < 0) {
2771 return ret;
2773 len -= RTA_ALIGN(rtattr->rta_len);
2774 rtattr = (struct rtattr *)(((char *)rtattr) +
2775 RTA_ALIGN(rtattr->rta_len));
2777 return 0;
2780 static abi_long target_to_host_data_link_rtattr(struct rtattr *rtattr)
2782 switch (rtattr->rta_type) {
2783 default:
2784 gemu_log("Unknown target QEMU_IFLA type: %d\n", rtattr->rta_type);
2785 break;
2787 return 0;
2790 static abi_long target_to_host_data_addr_rtattr(struct rtattr *rtattr)
2792 switch (rtattr->rta_type) {
2793 /* binary: depends on family type */
2794 case IFA_LOCAL:
2795 case IFA_ADDRESS:
2796 break;
2797 default:
2798 gemu_log("Unknown target IFA type: %d\n", rtattr->rta_type);
2799 break;
2801 return 0;
2804 static abi_long target_to_host_data_route_rtattr(struct rtattr *rtattr)
2806 uint32_t *u32;
2807 switch (rtattr->rta_type) {
2808 /* binary: depends on family type */
2809 case RTA_DST:
2810 case RTA_SRC:
2811 case RTA_GATEWAY:
2812 break;
2813 /* u32 */
2814 case RTA_PRIORITY:
2815 case RTA_OIF:
2816 u32 = RTA_DATA(rtattr);
2817 *u32 = tswap32(*u32);
2818 break;
2819 default:
2820 gemu_log("Unknown target RTA type: %d\n", rtattr->rta_type);
2821 break;
2823 return 0;
2826 static void target_to_host_link_rtattr(struct rtattr *rtattr,
2827 uint32_t rtattr_len)
2829 target_to_host_for_each_rtattr(rtattr, rtattr_len,
2830 target_to_host_data_link_rtattr);
2833 static void target_to_host_addr_rtattr(struct rtattr *rtattr,
2834 uint32_t rtattr_len)
2836 target_to_host_for_each_rtattr(rtattr, rtattr_len,
2837 target_to_host_data_addr_rtattr);
2840 static void target_to_host_route_rtattr(struct rtattr *rtattr,
2841 uint32_t rtattr_len)
2843 target_to_host_for_each_rtattr(rtattr, rtattr_len,
2844 target_to_host_data_route_rtattr);
2847 static abi_long target_to_host_data_route(struct nlmsghdr *nlh)
2849 struct ifinfomsg *ifi;
2850 struct ifaddrmsg *ifa;
2851 struct rtmsg *rtm;
2853 switch (nlh->nlmsg_type) {
2854 case RTM_GETLINK:
2855 break;
2856 case RTM_NEWLINK:
2857 case RTM_DELLINK:
2858 if (nlh->nlmsg_len >= NLMSG_LENGTH(sizeof(*ifi))) {
2859 ifi = NLMSG_DATA(nlh);
2860 ifi->ifi_type = tswap16(ifi->ifi_type);
2861 ifi->ifi_index = tswap32(ifi->ifi_index);
2862 ifi->ifi_flags = tswap32(ifi->ifi_flags);
2863 ifi->ifi_change = tswap32(ifi->ifi_change);
2864 target_to_host_link_rtattr(IFLA_RTA(ifi), nlh->nlmsg_len -
2865 NLMSG_LENGTH(sizeof(*ifi)));
2867 break;
2868 case RTM_GETADDR:
2869 case RTM_NEWADDR:
2870 case RTM_DELADDR:
2871 if (nlh->nlmsg_len >= NLMSG_LENGTH(sizeof(*ifa))) {
2872 ifa = NLMSG_DATA(nlh);
2873 ifa->ifa_index = tswap32(ifa->ifa_index);
2874 target_to_host_addr_rtattr(IFA_RTA(ifa), nlh->nlmsg_len -
2875 NLMSG_LENGTH(sizeof(*ifa)));
2877 break;
2878 case RTM_GETROUTE:
2879 break;
2880 case RTM_NEWROUTE:
2881 case RTM_DELROUTE:
2882 if (nlh->nlmsg_len >= NLMSG_LENGTH(sizeof(*rtm))) {
2883 rtm = NLMSG_DATA(nlh);
2884 rtm->rtm_flags = tswap32(rtm->rtm_flags);
2885 target_to_host_route_rtattr(RTM_RTA(rtm), nlh->nlmsg_len -
2886 NLMSG_LENGTH(sizeof(*rtm)));
2888 break;
2889 default:
2890 return -TARGET_EOPNOTSUPP;
2892 return 0;
2895 static abi_long target_to_host_nlmsg_route(struct nlmsghdr *nlh, size_t len)
2897 return target_to_host_for_each_nlmsg(nlh, len, target_to_host_data_route);
2899 #endif /* CONFIG_RTNETLINK */
2901 static abi_long host_to_target_data_audit(struct nlmsghdr *nlh)
2903 switch (nlh->nlmsg_type) {
2904 default:
2905 gemu_log("Unknown host audit message type %d\n",
2906 nlh->nlmsg_type);
2907 return -TARGET_EINVAL;
2909 return 0;
2912 static inline abi_long host_to_target_nlmsg_audit(struct nlmsghdr *nlh,
2913 size_t len)
2915 return host_to_target_for_each_nlmsg(nlh, len, host_to_target_data_audit);
2918 static abi_long target_to_host_data_audit(struct nlmsghdr *nlh)
2920 switch (nlh->nlmsg_type) {
2921 case AUDIT_USER:
2922 case AUDIT_FIRST_USER_MSG ... AUDIT_LAST_USER_MSG:
2923 case AUDIT_FIRST_USER_MSG2 ... AUDIT_LAST_USER_MSG2:
2924 break;
2925 default:
2926 gemu_log("Unknown target audit message type %d\n",
2927 nlh->nlmsg_type);
2928 return -TARGET_EINVAL;
2931 return 0;
2934 static abi_long target_to_host_nlmsg_audit(struct nlmsghdr *nlh, size_t len)
2936 return target_to_host_for_each_nlmsg(nlh, len, target_to_host_data_audit);
2939 /* do_setsockopt() Must return target values and target errnos. */
2940 static abi_long do_setsockopt(int sockfd, int level, int optname,
2941 abi_ulong optval_addr, socklen_t optlen)
2943 abi_long ret;
2944 int val;
2945 struct ip_mreqn *ip_mreq;
2946 struct ip_mreq_source *ip_mreq_source;
2948 switch(level) {
2949 case SOL_TCP:
2950 /* TCP options all take an 'int' value. */
2951 if (optlen < sizeof(uint32_t))
2952 return -TARGET_EINVAL;
2954 if (get_user_u32(val, optval_addr))
2955 return -TARGET_EFAULT;
2956 ret = get_errno(setsockopt(sockfd, level, optname, &val, sizeof(val)));
2957 break;
2958 case SOL_IP:
2959 switch(optname) {
2960 case IP_TOS:
2961 case IP_TTL:
2962 case IP_HDRINCL:
2963 case IP_ROUTER_ALERT:
2964 case IP_RECVOPTS:
2965 case IP_RETOPTS:
2966 case IP_PKTINFO:
2967 case IP_MTU_DISCOVER:
2968 case IP_RECVERR:
2969 case IP_RECVTTL:
2970 case IP_RECVTOS:
2971 #ifdef IP_FREEBIND
2972 case IP_FREEBIND:
2973 #endif
2974 case IP_MULTICAST_TTL:
2975 case IP_MULTICAST_LOOP:
2976 val = 0;
2977 if (optlen >= sizeof(uint32_t)) {
2978 if (get_user_u32(val, optval_addr))
2979 return -TARGET_EFAULT;
2980 } else if (optlen >= 1) {
2981 if (get_user_u8(val, optval_addr))
2982 return -TARGET_EFAULT;
2984 ret = get_errno(setsockopt(sockfd, level, optname, &val, sizeof(val)));
2985 break;
2986 case IP_ADD_MEMBERSHIP:
2987 case IP_DROP_MEMBERSHIP:
2988 if (optlen < sizeof (struct target_ip_mreq) ||
2989 optlen > sizeof (struct target_ip_mreqn))
2990 return -TARGET_EINVAL;
2992 ip_mreq = (struct ip_mreqn *) alloca(optlen);
2993 target_to_host_ip_mreq(ip_mreq, optval_addr, optlen);
2994 ret = get_errno(setsockopt(sockfd, level, optname, ip_mreq, optlen));
2995 break;
2997 case IP_BLOCK_SOURCE:
2998 case IP_UNBLOCK_SOURCE:
2999 case IP_ADD_SOURCE_MEMBERSHIP:
3000 case IP_DROP_SOURCE_MEMBERSHIP:
3001 if (optlen != sizeof (struct target_ip_mreq_source))
3002 return -TARGET_EINVAL;
3004 ip_mreq_source = lock_user(VERIFY_READ, optval_addr, optlen, 1);
3005 ret = get_errno(setsockopt(sockfd, level, optname, ip_mreq_source, optlen));
3006 unlock_user (ip_mreq_source, optval_addr, 0);
3007 break;
3009 default:
3010 goto unimplemented;
3012 break;
3013 case SOL_IPV6:
3014 switch (optname) {
3015 case IPV6_MTU_DISCOVER:
3016 case IPV6_MTU:
3017 case IPV6_V6ONLY:
3018 case IPV6_RECVPKTINFO:
3019 case IPV6_UNICAST_HOPS:
3020 case IPV6_RECVERR:
3021 case IPV6_RECVHOPLIMIT:
3022 case IPV6_2292HOPLIMIT:
3023 case IPV6_CHECKSUM:
3024 val = 0;
3025 if (optlen < sizeof(uint32_t)) {
3026 return -TARGET_EINVAL;
3028 if (get_user_u32(val, optval_addr)) {
3029 return -TARGET_EFAULT;
3031 ret = get_errno(setsockopt(sockfd, level, optname,
3032 &val, sizeof(val)));
3033 break;
3034 case IPV6_PKTINFO:
3036 struct in6_pktinfo pki;
3038 if (optlen < sizeof(pki)) {
3039 return -TARGET_EINVAL;
3042 if (copy_from_user(&pki, optval_addr, sizeof(pki))) {
3043 return -TARGET_EFAULT;
3046 pki.ipi6_ifindex = tswap32(pki.ipi6_ifindex);
3048 ret = get_errno(setsockopt(sockfd, level, optname,
3049 &pki, sizeof(pki)));
3050 break;
3052 default:
3053 goto unimplemented;
3055 break;
3056 case SOL_ICMPV6:
3057 switch (optname) {
3058 case ICMPV6_FILTER:
3060 struct icmp6_filter icmp6f;
3062 if (optlen > sizeof(icmp6f)) {
3063 optlen = sizeof(icmp6f);
3066 if (copy_from_user(&icmp6f, optval_addr, optlen)) {
3067 return -TARGET_EFAULT;
3070 for (val = 0; val < 8; val++) {
3071 icmp6f.data[val] = tswap32(icmp6f.data[val]);
3074 ret = get_errno(setsockopt(sockfd, level, optname,
3075 &icmp6f, optlen));
3076 break;
3078 default:
3079 goto unimplemented;
3081 break;
3082 case SOL_RAW:
3083 switch (optname) {
3084 case ICMP_FILTER:
3085 case IPV6_CHECKSUM:
3086 /* those take an u32 value */
3087 if (optlen < sizeof(uint32_t)) {
3088 return -TARGET_EINVAL;
3091 if (get_user_u32(val, optval_addr)) {
3092 return -TARGET_EFAULT;
3094 ret = get_errno(setsockopt(sockfd, level, optname,
3095 &val, sizeof(val)));
3096 break;
3098 default:
3099 goto unimplemented;
3101 break;
3102 case TARGET_SOL_SOCKET:
3103 switch (optname) {
3104 case TARGET_SO_RCVTIMEO:
3106 struct timeval tv;
3108 optname = SO_RCVTIMEO;
3110 set_timeout:
3111 if (optlen != sizeof(struct target_timeval)) {
3112 return -TARGET_EINVAL;
3115 if (copy_from_user_timeval(&tv, optval_addr)) {
3116 return -TARGET_EFAULT;
3119 ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname,
3120 &tv, sizeof(tv)));
3121 return ret;
3123 case TARGET_SO_SNDTIMEO:
3124 optname = SO_SNDTIMEO;
3125 goto set_timeout;
3126 case TARGET_SO_ATTACH_FILTER:
3128 struct target_sock_fprog *tfprog;
3129 struct target_sock_filter *tfilter;
3130 struct sock_fprog fprog;
3131 struct sock_filter *filter;
3132 int i;
3134 if (optlen != sizeof(*tfprog)) {
3135 return -TARGET_EINVAL;
3137 if (!lock_user_struct(VERIFY_READ, tfprog, optval_addr, 0)) {
3138 return -TARGET_EFAULT;
3140 if (!lock_user_struct(VERIFY_READ, tfilter,
3141 tswapal(tfprog->filter), 0)) {
3142 unlock_user_struct(tfprog, optval_addr, 1);
3143 return -TARGET_EFAULT;
3146 fprog.len = tswap16(tfprog->len);
3147 filter = g_try_new(struct sock_filter, fprog.len);
3148 if (filter == NULL) {
3149 unlock_user_struct(tfilter, tfprog->filter, 1);
3150 unlock_user_struct(tfprog, optval_addr, 1);
3151 return -TARGET_ENOMEM;
3153 for (i = 0; i < fprog.len; i++) {
3154 filter[i].code = tswap16(tfilter[i].code);
3155 filter[i].jt = tfilter[i].jt;
3156 filter[i].jf = tfilter[i].jf;
3157 filter[i].k = tswap32(tfilter[i].k);
3159 fprog.filter = filter;
3161 ret = get_errno(setsockopt(sockfd, SOL_SOCKET,
3162 SO_ATTACH_FILTER, &fprog, sizeof(fprog)));
3163 g_free(filter);
3165 unlock_user_struct(tfilter, tfprog->filter, 1);
3166 unlock_user_struct(tfprog, optval_addr, 1);
3167 return ret;
3169 case TARGET_SO_BINDTODEVICE:
3171 char *dev_ifname, *addr_ifname;
3173 if (optlen > IFNAMSIZ - 1) {
3174 optlen = IFNAMSIZ - 1;
3176 dev_ifname = lock_user(VERIFY_READ, optval_addr, optlen, 1);
3177 if (!dev_ifname) {
3178 return -TARGET_EFAULT;
3180 optname = SO_BINDTODEVICE;
3181 addr_ifname = alloca(IFNAMSIZ);
3182 memcpy(addr_ifname, dev_ifname, optlen);
3183 addr_ifname[optlen] = 0;
3184 ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname,
3185 addr_ifname, optlen));
3186 unlock_user (dev_ifname, optval_addr, 0);
3187 return ret;
3189 /* Options with 'int' argument. */
3190 case TARGET_SO_DEBUG:
3191 optname = SO_DEBUG;
3192 break;
3193 case TARGET_SO_REUSEADDR:
3194 optname = SO_REUSEADDR;
3195 break;
3196 case TARGET_SO_TYPE:
3197 optname = SO_TYPE;
3198 break;
3199 case TARGET_SO_ERROR:
3200 optname = SO_ERROR;
3201 break;
3202 case TARGET_SO_DONTROUTE:
3203 optname = SO_DONTROUTE;
3204 break;
3205 case TARGET_SO_BROADCAST:
3206 optname = SO_BROADCAST;
3207 break;
3208 case TARGET_SO_SNDBUF:
3209 optname = SO_SNDBUF;
3210 break;
3211 case TARGET_SO_SNDBUFFORCE:
3212 optname = SO_SNDBUFFORCE;
3213 break;
3214 case TARGET_SO_RCVBUF:
3215 optname = SO_RCVBUF;
3216 break;
3217 case TARGET_SO_RCVBUFFORCE:
3218 optname = SO_RCVBUFFORCE;
3219 break;
3220 case TARGET_SO_KEEPALIVE:
3221 optname = SO_KEEPALIVE;
3222 break;
3223 case TARGET_SO_OOBINLINE:
3224 optname = SO_OOBINLINE;
3225 break;
3226 case TARGET_SO_NO_CHECK:
3227 optname = SO_NO_CHECK;
3228 break;
3229 case TARGET_SO_PRIORITY:
3230 optname = SO_PRIORITY;
3231 break;
3232 #ifdef SO_BSDCOMPAT
3233 case TARGET_SO_BSDCOMPAT:
3234 optname = SO_BSDCOMPAT;
3235 break;
3236 #endif
3237 case TARGET_SO_PASSCRED:
3238 optname = SO_PASSCRED;
3239 break;
3240 case TARGET_SO_PASSSEC:
3241 optname = SO_PASSSEC;
3242 break;
3243 case TARGET_SO_TIMESTAMP:
3244 optname = SO_TIMESTAMP;
3245 break;
3246 case TARGET_SO_RCVLOWAT:
3247 optname = SO_RCVLOWAT;
3248 break;
3249 default:
3250 goto unimplemented;
3252 if (optlen < sizeof(uint32_t))
3253 return -TARGET_EINVAL;
3255 if (get_user_u32(val, optval_addr))
3256 return -TARGET_EFAULT;
3257 ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname, &val, sizeof(val)));
3258 break;
3259 default:
3260 unimplemented:
3261 gemu_log("Unsupported setsockopt level=%d optname=%d\n", level, optname);
3262 ret = -TARGET_ENOPROTOOPT;
3264 return ret;
3267 /* do_getsockopt() Must return target values and target errnos. */
3268 static abi_long do_getsockopt(int sockfd, int level, int optname,
3269 abi_ulong optval_addr, abi_ulong optlen)
3271 abi_long ret;
3272 int len, val;
3273 socklen_t lv;
3275 switch(level) {
3276 case TARGET_SOL_SOCKET:
3277 level = SOL_SOCKET;
3278 switch (optname) {
3279 /* These don't just return a single integer */
3280 case TARGET_SO_LINGER:
3281 case TARGET_SO_RCVTIMEO:
3282 case TARGET_SO_SNDTIMEO:
3283 case TARGET_SO_PEERNAME:
3284 goto unimplemented;
3285 case TARGET_SO_PEERCRED: {
3286 struct ucred cr;
3287 socklen_t crlen;
3288 struct target_ucred *tcr;
3290 if (get_user_u32(len, optlen)) {
3291 return -TARGET_EFAULT;
3293 if (len < 0) {
3294 return -TARGET_EINVAL;
3297 crlen = sizeof(cr);
3298 ret = get_errno(getsockopt(sockfd, level, SO_PEERCRED,
3299 &cr, &crlen));
3300 if (ret < 0) {
3301 return ret;
3303 if (len > crlen) {
3304 len = crlen;
3306 if (!lock_user_struct(VERIFY_WRITE, tcr, optval_addr, 0)) {
3307 return -TARGET_EFAULT;
3309 __put_user(cr.pid, &tcr->pid);
3310 __put_user(cr.uid, &tcr->uid);
3311 __put_user(cr.gid, &tcr->gid);
3312 unlock_user_struct(tcr, optval_addr, 1);
3313 if (put_user_u32(len, optlen)) {
3314 return -TARGET_EFAULT;
3316 break;
3318 /* Options with 'int' argument. */
3319 case TARGET_SO_DEBUG:
3320 optname = SO_DEBUG;
3321 goto int_case;
3322 case TARGET_SO_REUSEADDR:
3323 optname = SO_REUSEADDR;
3324 goto int_case;
3325 case TARGET_SO_TYPE:
3326 optname = SO_TYPE;
3327 goto int_case;
3328 case TARGET_SO_ERROR:
3329 optname = SO_ERROR;
3330 goto int_case;
3331 case TARGET_SO_DONTROUTE:
3332 optname = SO_DONTROUTE;
3333 goto int_case;
3334 case TARGET_SO_BROADCAST:
3335 optname = SO_BROADCAST;
3336 goto int_case;
3337 case TARGET_SO_SNDBUF:
3338 optname = SO_SNDBUF;
3339 goto int_case;
3340 case TARGET_SO_RCVBUF:
3341 optname = SO_RCVBUF;
3342 goto int_case;
3343 case TARGET_SO_KEEPALIVE:
3344 optname = SO_KEEPALIVE;
3345 goto int_case;
3346 case TARGET_SO_OOBINLINE:
3347 optname = SO_OOBINLINE;
3348 goto int_case;
3349 case TARGET_SO_NO_CHECK:
3350 optname = SO_NO_CHECK;
3351 goto int_case;
3352 case TARGET_SO_PRIORITY:
3353 optname = SO_PRIORITY;
3354 goto int_case;
3355 #ifdef SO_BSDCOMPAT
3356 case TARGET_SO_BSDCOMPAT:
3357 optname = SO_BSDCOMPAT;
3358 goto int_case;
3359 #endif
3360 case TARGET_SO_PASSCRED:
3361 optname = SO_PASSCRED;
3362 goto int_case;
3363 case TARGET_SO_TIMESTAMP:
3364 optname = SO_TIMESTAMP;
3365 goto int_case;
3366 case TARGET_SO_RCVLOWAT:
3367 optname = SO_RCVLOWAT;
3368 goto int_case;
3369 case TARGET_SO_ACCEPTCONN:
3370 optname = SO_ACCEPTCONN;
3371 goto int_case;
3372 default:
3373 goto int_case;
3375 break;
3376 case SOL_TCP:
3377 /* TCP options all take an 'int' value. */
3378 int_case:
3379 if (get_user_u32(len, optlen))
3380 return -TARGET_EFAULT;
3381 if (len < 0)
3382 return -TARGET_EINVAL;
3383 lv = sizeof(lv);
3384 ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
3385 if (ret < 0)
3386 return ret;
3387 if (optname == SO_TYPE) {
3388 val = host_to_target_sock_type(val);
3390 if (len > lv)
3391 len = lv;
3392 if (len == 4) {
3393 if (put_user_u32(val, optval_addr))
3394 return -TARGET_EFAULT;
3395 } else {
3396 if (put_user_u8(val, optval_addr))
3397 return -TARGET_EFAULT;
3399 if (put_user_u32(len, optlen))
3400 return -TARGET_EFAULT;
3401 break;
3402 case SOL_IP:
3403 switch(optname) {
3404 case IP_TOS:
3405 case IP_TTL:
3406 case IP_HDRINCL:
3407 case IP_ROUTER_ALERT:
3408 case IP_RECVOPTS:
3409 case IP_RETOPTS:
3410 case IP_PKTINFO:
3411 case IP_MTU_DISCOVER:
3412 case IP_RECVERR:
3413 case IP_RECVTOS:
3414 #ifdef IP_FREEBIND
3415 case IP_FREEBIND:
3416 #endif
3417 case IP_MULTICAST_TTL:
3418 case IP_MULTICAST_LOOP:
3419 if (get_user_u32(len, optlen))
3420 return -TARGET_EFAULT;
3421 if (len < 0)
3422 return -TARGET_EINVAL;
3423 lv = sizeof(lv);
3424 ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
3425 if (ret < 0)
3426 return ret;
3427 if (len < sizeof(int) && len > 0 && val >= 0 && val < 255) {
3428 len = 1;
3429 if (put_user_u32(len, optlen)
3430 || put_user_u8(val, optval_addr))
3431 return -TARGET_EFAULT;
3432 } else {
3433 if (len > sizeof(int))
3434 len = sizeof(int);
3435 if (put_user_u32(len, optlen)
3436 || put_user_u32(val, optval_addr))
3437 return -TARGET_EFAULT;
3439 break;
3440 default:
3441 ret = -TARGET_ENOPROTOOPT;
3442 break;
3444 break;
3445 default:
3446 unimplemented:
3447 gemu_log("getsockopt level=%d optname=%d not yet supported\n",
3448 level, optname);
3449 ret = -TARGET_EOPNOTSUPP;
3450 break;
3452 return ret;
3455 /* Convert target low/high pair representing file offset into the host
3456 * low/high pair. This function doesn't handle offsets bigger than 64 bits
3457 * as the kernel doesn't handle them either.
3459 static void target_to_host_low_high(abi_ulong tlow,
3460 abi_ulong thigh,
3461 unsigned long *hlow,
3462 unsigned long *hhigh)
3464 uint64_t off = tlow |
3465 ((unsigned long long)thigh << TARGET_LONG_BITS / 2) <<
3466 TARGET_LONG_BITS / 2;
3468 *hlow = off;
3469 *hhigh = (off >> HOST_LONG_BITS / 2) >> HOST_LONG_BITS / 2;
3472 static struct iovec *lock_iovec(int type, abi_ulong target_addr,
3473 abi_ulong count, int copy)
3475 struct target_iovec *target_vec;
3476 struct iovec *vec;
3477 abi_ulong total_len, max_len;
3478 int i;
3479 int err = 0;
3480 bool bad_address = false;
3482 if (count == 0) {
3483 errno = 0;
3484 return NULL;
3486 if (count > IOV_MAX) {
3487 errno = EINVAL;
3488 return NULL;
3491 vec = g_try_new0(struct iovec, count);
3492 if (vec == NULL) {
3493 errno = ENOMEM;
3494 return NULL;
3497 target_vec = lock_user(VERIFY_READ, target_addr,
3498 count * sizeof(struct target_iovec), 1);
3499 if (target_vec == NULL) {
3500 err = EFAULT;
3501 goto fail2;
3504 /* ??? If host page size > target page size, this will result in a
3505 value larger than what we can actually support. */
3506 max_len = 0x7fffffff & TARGET_PAGE_MASK;
3507 total_len = 0;
3509 for (i = 0; i < count; i++) {
3510 abi_ulong base = tswapal(target_vec[i].iov_base);
3511 abi_long len = tswapal(target_vec[i].iov_len);
3513 if (len < 0) {
3514 err = EINVAL;
3515 goto fail;
3516 } else if (len == 0) {
3517 /* Zero length pointer is ignored. */
3518 vec[i].iov_base = 0;
3519 } else {
3520 vec[i].iov_base = lock_user(type, base, len, copy);
3521 /* If the first buffer pointer is bad, this is a fault. But
3522 * subsequent bad buffers will result in a partial write; this
3523 * is realized by filling the vector with null pointers and
3524 * zero lengths. */
3525 if (!vec[i].iov_base) {
3526 if (i == 0) {
3527 err = EFAULT;
3528 goto fail;
3529 } else {
3530 bad_address = true;
3533 if (bad_address) {
3534 len = 0;
3536 if (len > max_len - total_len) {
3537 len = max_len - total_len;
3540 vec[i].iov_len = len;
3541 total_len += len;
3544 unlock_user(target_vec, target_addr, 0);
3545 return vec;
3547 fail:
3548 while (--i >= 0) {
3549 if (tswapal(target_vec[i].iov_len) > 0) {
3550 unlock_user(vec[i].iov_base, tswapal(target_vec[i].iov_base), 0);
3553 unlock_user(target_vec, target_addr, 0);
3554 fail2:
3555 g_free(vec);
3556 errno = err;
3557 return NULL;
3560 static void unlock_iovec(struct iovec *vec, abi_ulong target_addr,
3561 abi_ulong count, int copy)
3563 struct target_iovec *target_vec;
3564 int i;
3566 target_vec = lock_user(VERIFY_READ, target_addr,
3567 count * sizeof(struct target_iovec), 1);
3568 if (target_vec) {
3569 for (i = 0; i < count; i++) {
3570 abi_ulong base = tswapal(target_vec[i].iov_base);
3571 abi_long len = tswapal(target_vec[i].iov_len);
3572 if (len < 0) {
3573 break;
3575 unlock_user(vec[i].iov_base, base, copy ? vec[i].iov_len : 0);
3577 unlock_user(target_vec, target_addr, 0);
3580 g_free(vec);
3583 static inline int target_to_host_sock_type(int *type)
3585 int host_type = 0;
3586 int target_type = *type;
3588 switch (target_type & TARGET_SOCK_TYPE_MASK) {
3589 case TARGET_SOCK_DGRAM:
3590 host_type = SOCK_DGRAM;
3591 break;
3592 case TARGET_SOCK_STREAM:
3593 host_type = SOCK_STREAM;
3594 break;
3595 default:
3596 host_type = target_type & TARGET_SOCK_TYPE_MASK;
3597 break;
3599 if (target_type & TARGET_SOCK_CLOEXEC) {
3600 #if defined(SOCK_CLOEXEC)
3601 host_type |= SOCK_CLOEXEC;
3602 #else
3603 return -TARGET_EINVAL;
3604 #endif
3606 if (target_type & TARGET_SOCK_NONBLOCK) {
3607 #if defined(SOCK_NONBLOCK)
3608 host_type |= SOCK_NONBLOCK;
3609 #elif !defined(O_NONBLOCK)
3610 return -TARGET_EINVAL;
3611 #endif
3613 *type = host_type;
3614 return 0;
3617 /* Try to emulate socket type flags after socket creation. */
3618 static int sock_flags_fixup(int fd, int target_type)
3620 #if !defined(SOCK_NONBLOCK) && defined(O_NONBLOCK)
3621 if (target_type & TARGET_SOCK_NONBLOCK) {
3622 int flags = fcntl(fd, F_GETFL);
3623 if (fcntl(fd, F_SETFL, O_NONBLOCK | flags) == -1) {
3624 close(fd);
3625 return -TARGET_EINVAL;
3628 #endif
3629 return fd;
3632 static abi_long packet_target_to_host_sockaddr(void *host_addr,
3633 abi_ulong target_addr,
3634 socklen_t len)
3636 struct sockaddr *addr = host_addr;
3637 struct target_sockaddr *target_saddr;
3639 target_saddr = lock_user(VERIFY_READ, target_addr, len, 1);
3640 if (!target_saddr) {
3641 return -TARGET_EFAULT;
3644 memcpy(addr, target_saddr, len);
3645 addr->sa_family = tswap16(target_saddr->sa_family);
3646 /* spkt_protocol is big-endian */
3648 unlock_user(target_saddr, target_addr, 0);
3649 return 0;
3652 static TargetFdTrans target_packet_trans = {
3653 .target_to_host_addr = packet_target_to_host_sockaddr,
3656 #ifdef CONFIG_RTNETLINK
3657 static abi_long netlink_route_target_to_host(void *buf, size_t len)
3659 abi_long ret;
3661 ret = target_to_host_nlmsg_route(buf, len);
3662 if (ret < 0) {
3663 return ret;
3666 return len;
3669 static abi_long netlink_route_host_to_target(void *buf, size_t len)
3671 abi_long ret;
3673 ret = host_to_target_nlmsg_route(buf, len);
3674 if (ret < 0) {
3675 return ret;
3678 return len;
3681 static TargetFdTrans target_netlink_route_trans = {
3682 .target_to_host_data = netlink_route_target_to_host,
3683 .host_to_target_data = netlink_route_host_to_target,
3685 #endif /* CONFIG_RTNETLINK */
3687 static abi_long netlink_audit_target_to_host(void *buf, size_t len)
3689 abi_long ret;
3691 ret = target_to_host_nlmsg_audit(buf, len);
3692 if (ret < 0) {
3693 return ret;
3696 return len;
3699 static abi_long netlink_audit_host_to_target(void *buf, size_t len)
3701 abi_long ret;
3703 ret = host_to_target_nlmsg_audit(buf, len);
3704 if (ret < 0) {
3705 return ret;
3708 return len;
3711 static TargetFdTrans target_netlink_audit_trans = {
3712 .target_to_host_data = netlink_audit_target_to_host,
3713 .host_to_target_data = netlink_audit_host_to_target,
3716 /* do_socket() Must return target values and target errnos. */
3717 static abi_long do_socket(int domain, int type, int protocol)
3719 int target_type = type;
3720 int ret;
3722 ret = target_to_host_sock_type(&type);
3723 if (ret) {
3724 return ret;
3727 if (domain == PF_NETLINK && !(
3728 #ifdef CONFIG_RTNETLINK
3729 protocol == NETLINK_ROUTE ||
3730 #endif
3731 protocol == NETLINK_KOBJECT_UEVENT ||
3732 protocol == NETLINK_AUDIT)) {
3733 return -EPFNOSUPPORT;
3736 if (domain == AF_PACKET ||
3737 (domain == AF_INET && type == SOCK_PACKET)) {
3738 protocol = tswap16(protocol);
3741 ret = get_errno(socket(domain, type, protocol));
3742 if (ret >= 0) {
3743 ret = sock_flags_fixup(ret, target_type);
3744 if (type == SOCK_PACKET) {
3745 /* Manage an obsolete case :
3746 * if socket type is SOCK_PACKET, bind by name
3748 fd_trans_register(ret, &target_packet_trans);
3749 } else if (domain == PF_NETLINK) {
3750 switch (protocol) {
3751 #ifdef CONFIG_RTNETLINK
3752 case NETLINK_ROUTE:
3753 fd_trans_register(ret, &target_netlink_route_trans);
3754 break;
3755 #endif
3756 case NETLINK_KOBJECT_UEVENT:
3757 /* nothing to do: messages are strings */
3758 break;
3759 case NETLINK_AUDIT:
3760 fd_trans_register(ret, &target_netlink_audit_trans);
3761 break;
3762 default:
3763 g_assert_not_reached();
3767 return ret;
3770 /* do_bind() Must return target values and target errnos. */
3771 static abi_long do_bind(int sockfd, abi_ulong target_addr,
3772 socklen_t addrlen)
3774 void *addr;
3775 abi_long ret;
3777 if ((int)addrlen < 0) {
3778 return -TARGET_EINVAL;
3781 addr = alloca(addrlen+1);
3783 ret = target_to_host_sockaddr(sockfd, addr, target_addr, addrlen);
3784 if (ret)
3785 return ret;
3787 return get_errno(bind(sockfd, addr, addrlen));
3790 /* do_connect() Must return target values and target errnos. */
3791 static abi_long do_connect(int sockfd, abi_ulong target_addr,
3792 socklen_t addrlen)
3794 void *addr;
3795 abi_long ret;
3797 if ((int)addrlen < 0) {
3798 return -TARGET_EINVAL;
3801 addr = alloca(addrlen+1);
3803 ret = target_to_host_sockaddr(sockfd, addr, target_addr, addrlen);
3804 if (ret)
3805 return ret;
3807 return get_errno(safe_connect(sockfd, addr, addrlen));
3810 /* do_sendrecvmsg_locked() Must return target values and target errnos. */
3811 static abi_long do_sendrecvmsg_locked(int fd, struct target_msghdr *msgp,
3812 int flags, int send)
3814 abi_long ret, len;
3815 struct msghdr msg;
3816 abi_ulong count;
3817 struct iovec *vec;
3818 abi_ulong target_vec;
3820 if (msgp->msg_name) {
3821 msg.msg_namelen = tswap32(msgp->msg_namelen);
3822 msg.msg_name = alloca(msg.msg_namelen+1);
3823 ret = target_to_host_sockaddr(fd, msg.msg_name,
3824 tswapal(msgp->msg_name),
3825 msg.msg_namelen);
3826 if (ret == -TARGET_EFAULT) {
3827 /* For connected sockets msg_name and msg_namelen must
3828 * be ignored, so returning EFAULT immediately is wrong.
3829 * Instead, pass a bad msg_name to the host kernel, and
3830 * let it decide whether to return EFAULT or not.
3832 msg.msg_name = (void *)-1;
3833 } else if (ret) {
3834 goto out2;
3836 } else {
3837 msg.msg_name = NULL;
3838 msg.msg_namelen = 0;
3840 msg.msg_controllen = 2 * tswapal(msgp->msg_controllen);
3841 msg.msg_control = alloca(msg.msg_controllen);
3842 msg.msg_flags = tswap32(msgp->msg_flags);
3844 count = tswapal(msgp->msg_iovlen);
3845 target_vec = tswapal(msgp->msg_iov);
3847 if (count > IOV_MAX) {
3848 /* sendrcvmsg returns a different errno for this condition than
3849 * readv/writev, so we must catch it here before lock_iovec() does.
3851 ret = -TARGET_EMSGSIZE;
3852 goto out2;
3855 vec = lock_iovec(send ? VERIFY_READ : VERIFY_WRITE,
3856 target_vec, count, send);
3857 if (vec == NULL) {
3858 ret = -host_to_target_errno(errno);
3859 goto out2;
3861 msg.msg_iovlen = count;
3862 msg.msg_iov = vec;
3864 if (send) {
3865 if (fd_trans_target_to_host_data(fd)) {
3866 void *host_msg;
3868 host_msg = g_malloc(msg.msg_iov->iov_len);
3869 memcpy(host_msg, msg.msg_iov->iov_base, msg.msg_iov->iov_len);
3870 ret = fd_trans_target_to_host_data(fd)(host_msg,
3871 msg.msg_iov->iov_len);
3872 if (ret >= 0) {
3873 msg.msg_iov->iov_base = host_msg;
3874 ret = get_errno(safe_sendmsg(fd, &msg, flags));
3876 g_free(host_msg);
3877 } else {
3878 ret = target_to_host_cmsg(&msg, msgp);
3879 if (ret == 0) {
3880 ret = get_errno(safe_sendmsg(fd, &msg, flags));
3883 } else {
3884 ret = get_errno(safe_recvmsg(fd, &msg, flags));
3885 if (!is_error(ret)) {
3886 len = ret;
3887 if (fd_trans_host_to_target_data(fd)) {
3888 ret = fd_trans_host_to_target_data(fd)(msg.msg_iov->iov_base,
3889 len);
3890 } else {
3891 ret = host_to_target_cmsg(msgp, &msg);
3893 if (!is_error(ret)) {
3894 msgp->msg_namelen = tswap32(msg.msg_namelen);
3895 if (msg.msg_name != NULL && msg.msg_name != (void *)-1) {
3896 ret = host_to_target_sockaddr(tswapal(msgp->msg_name),
3897 msg.msg_name, msg.msg_namelen);
3898 if (ret) {
3899 goto out;
3903 ret = len;
3908 out:
3909 unlock_iovec(vec, target_vec, count, !send);
3910 out2:
3911 return ret;
3914 static abi_long do_sendrecvmsg(int fd, abi_ulong target_msg,
3915 int flags, int send)
3917 abi_long ret;
3918 struct target_msghdr *msgp;
3920 if (!lock_user_struct(send ? VERIFY_READ : VERIFY_WRITE,
3921 msgp,
3922 target_msg,
3923 send ? 1 : 0)) {
3924 return -TARGET_EFAULT;
3926 ret = do_sendrecvmsg_locked(fd, msgp, flags, send);
3927 unlock_user_struct(msgp, target_msg, send ? 0 : 1);
3928 return ret;
3931 /* We don't rely on the C library to have sendmmsg/recvmmsg support,
3932 * so it might not have this *mmsg-specific flag either.
3934 #ifndef MSG_WAITFORONE
3935 #define MSG_WAITFORONE 0x10000
3936 #endif
3938 static abi_long do_sendrecvmmsg(int fd, abi_ulong target_msgvec,
3939 unsigned int vlen, unsigned int flags,
3940 int send)
3942 struct target_mmsghdr *mmsgp;
3943 abi_long ret = 0;
3944 int i;
3946 if (vlen > UIO_MAXIOV) {
3947 vlen = UIO_MAXIOV;
3950 mmsgp = lock_user(VERIFY_WRITE, target_msgvec, sizeof(*mmsgp) * vlen, 1);
3951 if (!mmsgp) {
3952 return -TARGET_EFAULT;
3955 for (i = 0; i < vlen; i++) {
3956 ret = do_sendrecvmsg_locked(fd, &mmsgp[i].msg_hdr, flags, send);
3957 if (is_error(ret)) {
3958 break;
3960 mmsgp[i].msg_len = tswap32(ret);
3961 /* MSG_WAITFORONE turns on MSG_DONTWAIT after one packet */
3962 if (flags & MSG_WAITFORONE) {
3963 flags |= MSG_DONTWAIT;
3967 unlock_user(mmsgp, target_msgvec, sizeof(*mmsgp) * i);
3969 /* Return number of datagrams sent if we sent any at all;
3970 * otherwise return the error.
3972 if (i) {
3973 return i;
3975 return ret;
3978 /* do_accept4() Must return target values and target errnos. */
3979 static abi_long do_accept4(int fd, abi_ulong target_addr,
3980 abi_ulong target_addrlen_addr, int flags)
3982 socklen_t addrlen;
3983 void *addr;
3984 abi_long ret;
3985 int host_flags;
3987 host_flags = target_to_host_bitmask(flags, fcntl_flags_tbl);
3989 if (target_addr == 0) {
3990 return get_errno(safe_accept4(fd, NULL, NULL, host_flags));
3993 /* linux returns EINVAL if addrlen pointer is invalid */
3994 if (get_user_u32(addrlen, target_addrlen_addr))
3995 return -TARGET_EINVAL;
3997 if ((int)addrlen < 0) {
3998 return -TARGET_EINVAL;
4001 if (!access_ok(VERIFY_WRITE, target_addr, addrlen))
4002 return -TARGET_EINVAL;
4004 addr = alloca(addrlen);
4006 ret = get_errno(safe_accept4(fd, addr, &addrlen, host_flags));
4007 if (!is_error(ret)) {
4008 host_to_target_sockaddr(target_addr, addr, addrlen);
4009 if (put_user_u32(addrlen, target_addrlen_addr))
4010 ret = -TARGET_EFAULT;
4012 return ret;
4015 /* do_getpeername() Must return target values and target errnos. */
4016 static abi_long do_getpeername(int fd, abi_ulong target_addr,
4017 abi_ulong target_addrlen_addr)
4019 socklen_t addrlen;
4020 void *addr;
4021 abi_long ret;
4023 if (get_user_u32(addrlen, target_addrlen_addr))
4024 return -TARGET_EFAULT;
4026 if ((int)addrlen < 0) {
4027 return -TARGET_EINVAL;
4030 if (!access_ok(VERIFY_WRITE, target_addr, addrlen))
4031 return -TARGET_EFAULT;
4033 addr = alloca(addrlen);
4035 ret = get_errno(getpeername(fd, addr, &addrlen));
4036 if (!is_error(ret)) {
4037 host_to_target_sockaddr(target_addr, addr, addrlen);
4038 if (put_user_u32(addrlen, target_addrlen_addr))
4039 ret = -TARGET_EFAULT;
4041 return ret;
4044 /* do_getsockname() Must return target values and target errnos. */
4045 static abi_long do_getsockname(int fd, abi_ulong target_addr,
4046 abi_ulong target_addrlen_addr)
4048 socklen_t addrlen;
4049 void *addr;
4050 abi_long ret;
4052 if (get_user_u32(addrlen, target_addrlen_addr))
4053 return -TARGET_EFAULT;
4055 if ((int)addrlen < 0) {
4056 return -TARGET_EINVAL;
4059 if (!access_ok(VERIFY_WRITE, target_addr, addrlen))
4060 return -TARGET_EFAULT;
4062 addr = alloca(addrlen);
4064 ret = get_errno(getsockname(fd, addr, &addrlen));
4065 if (!is_error(ret)) {
4066 host_to_target_sockaddr(target_addr, addr, addrlen);
4067 if (put_user_u32(addrlen, target_addrlen_addr))
4068 ret = -TARGET_EFAULT;
4070 return ret;
4073 /* do_socketpair() Must return target values and target errnos. */
4074 static abi_long do_socketpair(int domain, int type, int protocol,
4075 abi_ulong target_tab_addr)
4077 int tab[2];
4078 abi_long ret;
4080 target_to_host_sock_type(&type);
4082 ret = get_errno(socketpair(domain, type, protocol, tab));
4083 if (!is_error(ret)) {
4084 if (put_user_s32(tab[0], target_tab_addr)
4085 || put_user_s32(tab[1], target_tab_addr + sizeof(tab[0])))
4086 ret = -TARGET_EFAULT;
4088 return ret;
4091 /* do_sendto() Must return target values and target errnos. */
4092 static abi_long do_sendto(int fd, abi_ulong msg, size_t len, int flags,
4093 abi_ulong target_addr, socklen_t addrlen)
4095 void *addr;
4096 void *host_msg;
4097 void *copy_msg = NULL;
4098 abi_long ret;
4100 if ((int)addrlen < 0) {
4101 return -TARGET_EINVAL;
4104 host_msg = lock_user(VERIFY_READ, msg, len, 1);
4105 if (!host_msg)
4106 return -TARGET_EFAULT;
4107 if (fd_trans_target_to_host_data(fd)) {
4108 copy_msg = host_msg;
4109 host_msg = g_malloc(len);
4110 memcpy(host_msg, copy_msg, len);
4111 ret = fd_trans_target_to_host_data(fd)(host_msg, len);
4112 if (ret < 0) {
4113 goto fail;
4116 if (target_addr) {
4117 addr = alloca(addrlen+1);
4118 ret = target_to_host_sockaddr(fd, addr, target_addr, addrlen);
4119 if (ret) {
4120 goto fail;
4122 ret = get_errno(safe_sendto(fd, host_msg, len, flags, addr, addrlen));
4123 } else {
4124 ret = get_errno(safe_sendto(fd, host_msg, len, flags, NULL, 0));
4126 fail:
4127 if (copy_msg) {
4128 g_free(host_msg);
4129 host_msg = copy_msg;
4131 unlock_user(host_msg, msg, 0);
4132 return ret;
4135 /* do_recvfrom() Must return target values and target errnos. */
4136 static abi_long do_recvfrom(int fd, abi_ulong msg, size_t len, int flags,
4137 abi_ulong target_addr,
4138 abi_ulong target_addrlen)
4140 socklen_t addrlen;
4141 void *addr;
4142 void *host_msg;
4143 abi_long ret;
4145 host_msg = lock_user(VERIFY_WRITE, msg, len, 0);
4146 if (!host_msg)
4147 return -TARGET_EFAULT;
4148 if (target_addr) {
4149 if (get_user_u32(addrlen, target_addrlen)) {
4150 ret = -TARGET_EFAULT;
4151 goto fail;
4153 if ((int)addrlen < 0) {
4154 ret = -TARGET_EINVAL;
4155 goto fail;
4157 addr = alloca(addrlen);
4158 ret = get_errno(safe_recvfrom(fd, host_msg, len, flags,
4159 addr, &addrlen));
4160 } else {
4161 addr = NULL; /* To keep compiler quiet. */
4162 ret = get_errno(safe_recvfrom(fd, host_msg, len, flags, NULL, 0));
4164 if (!is_error(ret)) {
4165 if (fd_trans_host_to_target_data(fd)) {
4166 ret = fd_trans_host_to_target_data(fd)(host_msg, ret);
4168 if (target_addr) {
4169 host_to_target_sockaddr(target_addr, addr, addrlen);
4170 if (put_user_u32(addrlen, target_addrlen)) {
4171 ret = -TARGET_EFAULT;
4172 goto fail;
4175 unlock_user(host_msg, msg, len);
4176 } else {
4177 fail:
4178 unlock_user(host_msg, msg, 0);
4180 return ret;
4183 #ifdef TARGET_NR_socketcall
4184 /* do_socketcall() must return target values and target errnos. */
4185 static abi_long do_socketcall(int num, abi_ulong vptr)
4187 static const unsigned nargs[] = { /* number of arguments per operation */
4188 [TARGET_SYS_SOCKET] = 3, /* domain, type, protocol */
4189 [TARGET_SYS_BIND] = 3, /* fd, addr, addrlen */
4190 [TARGET_SYS_CONNECT] = 3, /* fd, addr, addrlen */
4191 [TARGET_SYS_LISTEN] = 2, /* fd, backlog */
4192 [TARGET_SYS_ACCEPT] = 3, /* fd, addr, addrlen */
4193 [TARGET_SYS_GETSOCKNAME] = 3, /* fd, addr, addrlen */
4194 [TARGET_SYS_GETPEERNAME] = 3, /* fd, addr, addrlen */
4195 [TARGET_SYS_SOCKETPAIR] = 4, /* domain, type, protocol, tab */
4196 [TARGET_SYS_SEND] = 4, /* fd, msg, len, flags */
4197 [TARGET_SYS_RECV] = 4, /* fd, msg, len, flags */
4198 [TARGET_SYS_SENDTO] = 6, /* fd, msg, len, flags, addr, addrlen */
4199 [TARGET_SYS_RECVFROM] = 6, /* fd, msg, len, flags, addr, addrlen */
4200 [TARGET_SYS_SHUTDOWN] = 2, /* fd, how */
4201 [TARGET_SYS_SETSOCKOPT] = 5, /* fd, level, optname, optval, optlen */
4202 [TARGET_SYS_GETSOCKOPT] = 5, /* fd, level, optname, optval, optlen */
4203 [TARGET_SYS_SENDMSG] = 3, /* fd, msg, flags */
4204 [TARGET_SYS_RECVMSG] = 3, /* fd, msg, flags */
4205 [TARGET_SYS_ACCEPT4] = 4, /* fd, addr, addrlen, flags */
4206 [TARGET_SYS_RECVMMSG] = 4, /* fd, msgvec, vlen, flags */
4207 [TARGET_SYS_SENDMMSG] = 4, /* fd, msgvec, vlen, flags */
4209 abi_long a[6]; /* max 6 args */
4210 unsigned i;
4212 /* check the range of the first argument num */
4213 /* (TARGET_SYS_SENDMMSG is the highest among TARGET_SYS_xxx) */
4214 if (num < 1 || num > TARGET_SYS_SENDMMSG) {
4215 return -TARGET_EINVAL;
4217 /* ensure we have space for args */
4218 if (nargs[num] > ARRAY_SIZE(a)) {
4219 return -TARGET_EINVAL;
4221 /* collect the arguments in a[] according to nargs[] */
4222 for (i = 0; i < nargs[num]; ++i) {
4223 if (get_user_ual(a[i], vptr + i * sizeof(abi_long)) != 0) {
4224 return -TARGET_EFAULT;
4227 /* now when we have the args, invoke the appropriate underlying function */
4228 switch (num) {
4229 case TARGET_SYS_SOCKET: /* domain, type, protocol */
4230 return do_socket(a[0], a[1], a[2]);
4231 case TARGET_SYS_BIND: /* sockfd, addr, addrlen */
4232 return do_bind(a[0], a[1], a[2]);
4233 case TARGET_SYS_CONNECT: /* sockfd, addr, addrlen */
4234 return do_connect(a[0], a[1], a[2]);
4235 case TARGET_SYS_LISTEN: /* sockfd, backlog */
4236 return get_errno(listen(a[0], a[1]));
4237 case TARGET_SYS_ACCEPT: /* sockfd, addr, addrlen */
4238 return do_accept4(a[0], a[1], a[2], 0);
4239 case TARGET_SYS_GETSOCKNAME: /* sockfd, addr, addrlen */
4240 return do_getsockname(a[0], a[1], a[2]);
4241 case TARGET_SYS_GETPEERNAME: /* sockfd, addr, addrlen */
4242 return do_getpeername(a[0], a[1], a[2]);
4243 case TARGET_SYS_SOCKETPAIR: /* domain, type, protocol, tab */
4244 return do_socketpair(a[0], a[1], a[2], a[3]);
4245 case TARGET_SYS_SEND: /* sockfd, msg, len, flags */
4246 return do_sendto(a[0], a[1], a[2], a[3], 0, 0);
4247 case TARGET_SYS_RECV: /* sockfd, msg, len, flags */
4248 return do_recvfrom(a[0], a[1], a[2], a[3], 0, 0);
4249 case TARGET_SYS_SENDTO: /* sockfd, msg, len, flags, addr, addrlen */
4250 return do_sendto(a[0], a[1], a[2], a[3], a[4], a[5]);
4251 case TARGET_SYS_RECVFROM: /* sockfd, msg, len, flags, addr, addrlen */
4252 return do_recvfrom(a[0], a[1], a[2], a[3], a[4], a[5]);
4253 case TARGET_SYS_SHUTDOWN: /* sockfd, how */
4254 return get_errno(shutdown(a[0], a[1]));
4255 case TARGET_SYS_SETSOCKOPT: /* sockfd, level, optname, optval, optlen */
4256 return do_setsockopt(a[0], a[1], a[2], a[3], a[4]);
4257 case TARGET_SYS_GETSOCKOPT: /* sockfd, level, optname, optval, optlen */
4258 return do_getsockopt(a[0], a[1], a[2], a[3], a[4]);
4259 case TARGET_SYS_SENDMSG: /* sockfd, msg, flags */
4260 return do_sendrecvmsg(a[0], a[1], a[2], 1);
4261 case TARGET_SYS_RECVMSG: /* sockfd, msg, flags */
4262 return do_sendrecvmsg(a[0], a[1], a[2], 0);
4263 case TARGET_SYS_ACCEPT4: /* sockfd, addr, addrlen, flags */
4264 return do_accept4(a[0], a[1], a[2], a[3]);
4265 case TARGET_SYS_RECVMMSG: /* sockfd, msgvec, vlen, flags */
4266 return do_sendrecvmmsg(a[0], a[1], a[2], a[3], 0);
4267 case TARGET_SYS_SENDMMSG: /* sockfd, msgvec, vlen, flags */
4268 return do_sendrecvmmsg(a[0], a[1], a[2], a[3], 1);
4269 default:
4270 gemu_log("Unsupported socketcall: %d\n", num);
4271 return -TARGET_EINVAL;
4274 #endif
4276 #define N_SHM_REGIONS 32
4278 static struct shm_region {
4279 abi_ulong start;
4280 abi_ulong size;
4281 bool in_use;
4282 } shm_regions[N_SHM_REGIONS];
4284 #ifndef TARGET_SEMID64_DS
4285 /* asm-generic version of this struct */
4286 struct target_semid64_ds
4288 struct target_ipc_perm sem_perm;
4289 abi_ulong sem_otime;
4290 #if TARGET_ABI_BITS == 32
4291 abi_ulong __unused1;
4292 #endif
4293 abi_ulong sem_ctime;
4294 #if TARGET_ABI_BITS == 32
4295 abi_ulong __unused2;
4296 #endif
4297 abi_ulong sem_nsems;
4298 abi_ulong __unused3;
4299 abi_ulong __unused4;
4301 #endif
4303 static inline abi_long target_to_host_ipc_perm(struct ipc_perm *host_ip,
4304 abi_ulong target_addr)
4306 struct target_ipc_perm *target_ip;
4307 struct target_semid64_ds *target_sd;
4309 if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
4310 return -TARGET_EFAULT;
4311 target_ip = &(target_sd->sem_perm);
4312 host_ip->__key = tswap32(target_ip->__key);
4313 host_ip->uid = tswap32(target_ip->uid);
4314 host_ip->gid = tswap32(target_ip->gid);
4315 host_ip->cuid = tswap32(target_ip->cuid);
4316 host_ip->cgid = tswap32(target_ip->cgid);
4317 #if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_PPC)
4318 host_ip->mode = tswap32(target_ip->mode);
4319 #else
4320 host_ip->mode = tswap16(target_ip->mode);
4321 #endif
4322 #if defined(TARGET_PPC)
4323 host_ip->__seq = tswap32(target_ip->__seq);
4324 #else
4325 host_ip->__seq = tswap16(target_ip->__seq);
4326 #endif
4327 unlock_user_struct(target_sd, target_addr, 0);
4328 return 0;
4331 static inline abi_long host_to_target_ipc_perm(abi_ulong target_addr,
4332 struct ipc_perm *host_ip)
4334 struct target_ipc_perm *target_ip;
4335 struct target_semid64_ds *target_sd;
4337 if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
4338 return -TARGET_EFAULT;
4339 target_ip = &(target_sd->sem_perm);
4340 target_ip->__key = tswap32(host_ip->__key);
4341 target_ip->uid = tswap32(host_ip->uid);
4342 target_ip->gid = tswap32(host_ip->gid);
4343 target_ip->cuid = tswap32(host_ip->cuid);
4344 target_ip->cgid = tswap32(host_ip->cgid);
4345 #if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_PPC)
4346 target_ip->mode = tswap32(host_ip->mode);
4347 #else
4348 target_ip->mode = tswap16(host_ip->mode);
4349 #endif
4350 #if defined(TARGET_PPC)
4351 target_ip->__seq = tswap32(host_ip->__seq);
4352 #else
4353 target_ip->__seq = tswap16(host_ip->__seq);
4354 #endif
4355 unlock_user_struct(target_sd, target_addr, 1);
4356 return 0;
4359 static inline abi_long target_to_host_semid_ds(struct semid_ds *host_sd,
4360 abi_ulong target_addr)
4362 struct target_semid64_ds *target_sd;
4364 if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
4365 return -TARGET_EFAULT;
4366 if (target_to_host_ipc_perm(&(host_sd->sem_perm),target_addr))
4367 return -TARGET_EFAULT;
4368 host_sd->sem_nsems = tswapal(target_sd->sem_nsems);
4369 host_sd->sem_otime = tswapal(target_sd->sem_otime);
4370 host_sd->sem_ctime = tswapal(target_sd->sem_ctime);
4371 unlock_user_struct(target_sd, target_addr, 0);
4372 return 0;
4375 static inline abi_long host_to_target_semid_ds(abi_ulong target_addr,
4376 struct semid_ds *host_sd)
4378 struct target_semid64_ds *target_sd;
4380 if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
4381 return -TARGET_EFAULT;
4382 if (host_to_target_ipc_perm(target_addr,&(host_sd->sem_perm)))
4383 return -TARGET_EFAULT;
4384 target_sd->sem_nsems = tswapal(host_sd->sem_nsems);
4385 target_sd->sem_otime = tswapal(host_sd->sem_otime);
4386 target_sd->sem_ctime = tswapal(host_sd->sem_ctime);
4387 unlock_user_struct(target_sd, target_addr, 1);
4388 return 0;
4391 struct target_seminfo {
4392 int semmap;
4393 int semmni;
4394 int semmns;
4395 int semmnu;
4396 int semmsl;
4397 int semopm;
4398 int semume;
4399 int semusz;
4400 int semvmx;
4401 int semaem;
4404 static inline abi_long host_to_target_seminfo(abi_ulong target_addr,
4405 struct seminfo *host_seminfo)
4407 struct target_seminfo *target_seminfo;
4408 if (!lock_user_struct(VERIFY_WRITE, target_seminfo, target_addr, 0))
4409 return -TARGET_EFAULT;
4410 __put_user(host_seminfo->semmap, &target_seminfo->semmap);
4411 __put_user(host_seminfo->semmni, &target_seminfo->semmni);
4412 __put_user(host_seminfo->semmns, &target_seminfo->semmns);
4413 __put_user(host_seminfo->semmnu, &target_seminfo->semmnu);
4414 __put_user(host_seminfo->semmsl, &target_seminfo->semmsl);
4415 __put_user(host_seminfo->semopm, &target_seminfo->semopm);
4416 __put_user(host_seminfo->semume, &target_seminfo->semume);
4417 __put_user(host_seminfo->semusz, &target_seminfo->semusz);
4418 __put_user(host_seminfo->semvmx, &target_seminfo->semvmx);
4419 __put_user(host_seminfo->semaem, &target_seminfo->semaem);
4420 unlock_user_struct(target_seminfo, target_addr, 1);
4421 return 0;
4424 union semun {
4425 int val;
4426 struct semid_ds *buf;
4427 unsigned short *array;
4428 struct seminfo *__buf;
4431 union target_semun {
4432 int val;
4433 abi_ulong buf;
4434 abi_ulong array;
4435 abi_ulong __buf;
4438 static inline abi_long target_to_host_semarray(int semid, unsigned short **host_array,
4439 abi_ulong target_addr)
4441 int nsems;
4442 unsigned short *array;
4443 union semun semun;
4444 struct semid_ds semid_ds;
4445 int i, ret;
4447 semun.buf = &semid_ds;
4449 ret = semctl(semid, 0, IPC_STAT, semun);
4450 if (ret == -1)
4451 return get_errno(ret);
4453 nsems = semid_ds.sem_nsems;
4455 *host_array = g_try_new(unsigned short, nsems);
4456 if (!*host_array) {
4457 return -TARGET_ENOMEM;
4459 array = lock_user(VERIFY_READ, target_addr,
4460 nsems*sizeof(unsigned short), 1);
4461 if (!array) {
4462 g_free(*host_array);
4463 return -TARGET_EFAULT;
4466 for(i=0; i<nsems; i++) {
4467 __get_user((*host_array)[i], &array[i]);
4469 unlock_user(array, target_addr, 0);
4471 return 0;
4474 static inline abi_long host_to_target_semarray(int semid, abi_ulong target_addr,
4475 unsigned short **host_array)
4477 int nsems;
4478 unsigned short *array;
4479 union semun semun;
4480 struct semid_ds semid_ds;
4481 int i, ret;
4483 semun.buf = &semid_ds;
4485 ret = semctl(semid, 0, IPC_STAT, semun);
4486 if (ret == -1)
4487 return get_errno(ret);
4489 nsems = semid_ds.sem_nsems;
4491 array = lock_user(VERIFY_WRITE, target_addr,
4492 nsems*sizeof(unsigned short), 0);
4493 if (!array)
4494 return -TARGET_EFAULT;
4496 for(i=0; i<nsems; i++) {
4497 __put_user((*host_array)[i], &array[i]);
4499 g_free(*host_array);
4500 unlock_user(array, target_addr, 1);
4502 return 0;
4505 static inline abi_long do_semctl(int semid, int semnum, int cmd,
4506 abi_ulong target_arg)
4508 union target_semun target_su = { .buf = target_arg };
4509 union semun arg;
4510 struct semid_ds dsarg;
4511 unsigned short *array = NULL;
4512 struct seminfo seminfo;
4513 abi_long ret = -TARGET_EINVAL;
4514 abi_long err;
4515 cmd &= 0xff;
4517 switch( cmd ) {
4518 case GETVAL:
4519 case SETVAL:
4520 /* In 64 bit cross-endian situations, we will erroneously pick up
4521 * the wrong half of the union for the "val" element. To rectify
4522 * this, the entire 8-byte structure is byteswapped, followed by
4523 * a swap of the 4 byte val field. In other cases, the data is
4524 * already in proper host byte order. */
4525 if (sizeof(target_su.val) != (sizeof(target_su.buf))) {
4526 target_su.buf = tswapal(target_su.buf);
4527 arg.val = tswap32(target_su.val);
4528 } else {
4529 arg.val = target_su.val;
4531 ret = get_errno(semctl(semid, semnum, cmd, arg));
4532 break;
4533 case GETALL:
4534 case SETALL:
4535 err = target_to_host_semarray(semid, &array, target_su.array);
4536 if (err)
4537 return err;
4538 arg.array = array;
4539 ret = get_errno(semctl(semid, semnum, cmd, arg));
4540 err = host_to_target_semarray(semid, target_su.array, &array);
4541 if (err)
4542 return err;
4543 break;
4544 case IPC_STAT:
4545 case IPC_SET:
4546 case SEM_STAT:
4547 err = target_to_host_semid_ds(&dsarg, target_su.buf);
4548 if (err)
4549 return err;
4550 arg.buf = &dsarg;
4551 ret = get_errno(semctl(semid, semnum, cmd, arg));
4552 err = host_to_target_semid_ds(target_su.buf, &dsarg);
4553 if (err)
4554 return err;
4555 break;
4556 case IPC_INFO:
4557 case SEM_INFO:
4558 arg.__buf = &seminfo;
4559 ret = get_errno(semctl(semid, semnum, cmd, arg));
4560 err = host_to_target_seminfo(target_su.__buf, &seminfo);
4561 if (err)
4562 return err;
4563 break;
4564 case IPC_RMID:
4565 case GETPID:
4566 case GETNCNT:
4567 case GETZCNT:
4568 ret = get_errno(semctl(semid, semnum, cmd, NULL));
4569 break;
4572 return ret;
4575 struct target_sembuf {
4576 unsigned short sem_num;
4577 short sem_op;
4578 short sem_flg;
4581 static inline abi_long target_to_host_sembuf(struct sembuf *host_sembuf,
4582 abi_ulong target_addr,
4583 unsigned nsops)
4585 struct target_sembuf *target_sembuf;
4586 int i;
4588 target_sembuf = lock_user(VERIFY_READ, target_addr,
4589 nsops*sizeof(struct target_sembuf), 1);
4590 if (!target_sembuf)
4591 return -TARGET_EFAULT;
4593 for(i=0; i<nsops; i++) {
4594 __get_user(host_sembuf[i].sem_num, &target_sembuf[i].sem_num);
4595 __get_user(host_sembuf[i].sem_op, &target_sembuf[i].sem_op);
4596 __get_user(host_sembuf[i].sem_flg, &target_sembuf[i].sem_flg);
4599 unlock_user(target_sembuf, target_addr, 0);
4601 return 0;
4604 static inline abi_long do_semop(int semid, abi_long ptr, unsigned nsops)
4606 struct sembuf sops[nsops];
4608 if (target_to_host_sembuf(sops, ptr, nsops))
4609 return -TARGET_EFAULT;
4611 return get_errno(safe_semtimedop(semid, sops, nsops, NULL));
4614 struct target_msqid_ds
4616 struct target_ipc_perm msg_perm;
4617 abi_ulong msg_stime;
4618 #if TARGET_ABI_BITS == 32
4619 abi_ulong __unused1;
4620 #endif
4621 abi_ulong msg_rtime;
4622 #if TARGET_ABI_BITS == 32
4623 abi_ulong __unused2;
4624 #endif
4625 abi_ulong msg_ctime;
4626 #if TARGET_ABI_BITS == 32
4627 abi_ulong __unused3;
4628 #endif
4629 abi_ulong __msg_cbytes;
4630 abi_ulong msg_qnum;
4631 abi_ulong msg_qbytes;
4632 abi_ulong msg_lspid;
4633 abi_ulong msg_lrpid;
4634 abi_ulong __unused4;
4635 abi_ulong __unused5;
4638 static inline abi_long target_to_host_msqid_ds(struct msqid_ds *host_md,
4639 abi_ulong target_addr)
4641 struct target_msqid_ds *target_md;
4643 if (!lock_user_struct(VERIFY_READ, target_md, target_addr, 1))
4644 return -TARGET_EFAULT;
4645 if (target_to_host_ipc_perm(&(host_md->msg_perm),target_addr))
4646 return -TARGET_EFAULT;
4647 host_md->msg_stime = tswapal(target_md->msg_stime);
4648 host_md->msg_rtime = tswapal(target_md->msg_rtime);
4649 host_md->msg_ctime = tswapal(target_md->msg_ctime);
4650 host_md->__msg_cbytes = tswapal(target_md->__msg_cbytes);
4651 host_md->msg_qnum = tswapal(target_md->msg_qnum);
4652 host_md->msg_qbytes = tswapal(target_md->msg_qbytes);
4653 host_md->msg_lspid = tswapal(target_md->msg_lspid);
4654 host_md->msg_lrpid = tswapal(target_md->msg_lrpid);
4655 unlock_user_struct(target_md, target_addr, 0);
4656 return 0;
4659 static inline abi_long host_to_target_msqid_ds(abi_ulong target_addr,
4660 struct msqid_ds *host_md)
4662 struct target_msqid_ds *target_md;
4664 if (!lock_user_struct(VERIFY_WRITE, target_md, target_addr, 0))
4665 return -TARGET_EFAULT;
4666 if (host_to_target_ipc_perm(target_addr,&(host_md->msg_perm)))
4667 return -TARGET_EFAULT;
4668 target_md->msg_stime = tswapal(host_md->msg_stime);
4669 target_md->msg_rtime = tswapal(host_md->msg_rtime);
4670 target_md->msg_ctime = tswapal(host_md->msg_ctime);
4671 target_md->__msg_cbytes = tswapal(host_md->__msg_cbytes);
4672 target_md->msg_qnum = tswapal(host_md->msg_qnum);
4673 target_md->msg_qbytes = tswapal(host_md->msg_qbytes);
4674 target_md->msg_lspid = tswapal(host_md->msg_lspid);
4675 target_md->msg_lrpid = tswapal(host_md->msg_lrpid);
4676 unlock_user_struct(target_md, target_addr, 1);
4677 return 0;
4680 struct target_msginfo {
4681 int msgpool;
4682 int msgmap;
4683 int msgmax;
4684 int msgmnb;
4685 int msgmni;
4686 int msgssz;
4687 int msgtql;
4688 unsigned short int msgseg;
4691 static inline abi_long host_to_target_msginfo(abi_ulong target_addr,
4692 struct msginfo *host_msginfo)
4694 struct target_msginfo *target_msginfo;
4695 if (!lock_user_struct(VERIFY_WRITE, target_msginfo, target_addr, 0))
4696 return -TARGET_EFAULT;
4697 __put_user(host_msginfo->msgpool, &target_msginfo->msgpool);
4698 __put_user(host_msginfo->msgmap, &target_msginfo->msgmap);
4699 __put_user(host_msginfo->msgmax, &target_msginfo->msgmax);
4700 __put_user(host_msginfo->msgmnb, &target_msginfo->msgmnb);
4701 __put_user(host_msginfo->msgmni, &target_msginfo->msgmni);
4702 __put_user(host_msginfo->msgssz, &target_msginfo->msgssz);
4703 __put_user(host_msginfo->msgtql, &target_msginfo->msgtql);
4704 __put_user(host_msginfo->msgseg, &target_msginfo->msgseg);
4705 unlock_user_struct(target_msginfo, target_addr, 1);
4706 return 0;
4709 static inline abi_long do_msgctl(int msgid, int cmd, abi_long ptr)
4711 struct msqid_ds dsarg;
4712 struct msginfo msginfo;
4713 abi_long ret = -TARGET_EINVAL;
4715 cmd &= 0xff;
4717 switch (cmd) {
4718 case IPC_STAT:
4719 case IPC_SET:
4720 case MSG_STAT:
4721 if (target_to_host_msqid_ds(&dsarg,ptr))
4722 return -TARGET_EFAULT;
4723 ret = get_errno(msgctl(msgid, cmd, &dsarg));
4724 if (host_to_target_msqid_ds(ptr,&dsarg))
4725 return -TARGET_EFAULT;
4726 break;
4727 case IPC_RMID:
4728 ret = get_errno(msgctl(msgid, cmd, NULL));
4729 break;
4730 case IPC_INFO:
4731 case MSG_INFO:
4732 ret = get_errno(msgctl(msgid, cmd, (struct msqid_ds *)&msginfo));
4733 if (host_to_target_msginfo(ptr, &msginfo))
4734 return -TARGET_EFAULT;
4735 break;
4738 return ret;
4741 struct target_msgbuf {
4742 abi_long mtype;
4743 char mtext[1];
4746 static inline abi_long do_msgsnd(int msqid, abi_long msgp,
4747 ssize_t msgsz, int msgflg)
4749 struct target_msgbuf *target_mb;
4750 struct msgbuf *host_mb;
4751 abi_long ret = 0;
4753 if (msgsz < 0) {
4754 return -TARGET_EINVAL;
4757 if (!lock_user_struct(VERIFY_READ, target_mb, msgp, 0))
4758 return -TARGET_EFAULT;
4759 host_mb = g_try_malloc(msgsz + sizeof(long));
4760 if (!host_mb) {
4761 unlock_user_struct(target_mb, msgp, 0);
4762 return -TARGET_ENOMEM;
4764 host_mb->mtype = (abi_long) tswapal(target_mb->mtype);
4765 memcpy(host_mb->mtext, target_mb->mtext, msgsz);
4766 ret = get_errno(safe_msgsnd(msqid, host_mb, msgsz, msgflg));
4767 g_free(host_mb);
4768 unlock_user_struct(target_mb, msgp, 0);
4770 return ret;
4773 static inline abi_long do_msgrcv(int msqid, abi_long msgp,
4774 ssize_t msgsz, abi_long msgtyp,
4775 int msgflg)
4777 struct target_msgbuf *target_mb;
4778 char *target_mtext;
4779 struct msgbuf *host_mb;
4780 abi_long ret = 0;
4782 if (msgsz < 0) {
4783 return -TARGET_EINVAL;
4786 if (!lock_user_struct(VERIFY_WRITE, target_mb, msgp, 0))
4787 return -TARGET_EFAULT;
4789 host_mb = g_try_malloc(msgsz + sizeof(long));
4790 if (!host_mb) {
4791 ret = -TARGET_ENOMEM;
4792 goto end;
4794 ret = get_errno(safe_msgrcv(msqid, host_mb, msgsz, msgtyp, msgflg));
4796 if (ret > 0) {
4797 abi_ulong target_mtext_addr = msgp + sizeof(abi_ulong);
4798 target_mtext = lock_user(VERIFY_WRITE, target_mtext_addr, ret, 0);
4799 if (!target_mtext) {
4800 ret = -TARGET_EFAULT;
4801 goto end;
4803 memcpy(target_mb->mtext, host_mb->mtext, ret);
4804 unlock_user(target_mtext, target_mtext_addr, ret);
4807 target_mb->mtype = tswapal(host_mb->mtype);
4809 end:
4810 if (target_mb)
4811 unlock_user_struct(target_mb, msgp, 1);
4812 g_free(host_mb);
4813 return ret;
4816 static inline abi_long target_to_host_shmid_ds(struct shmid_ds *host_sd,
4817 abi_ulong target_addr)
4819 struct target_shmid_ds *target_sd;
4821 if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
4822 return -TARGET_EFAULT;
4823 if (target_to_host_ipc_perm(&(host_sd->shm_perm), target_addr))
4824 return -TARGET_EFAULT;
4825 __get_user(host_sd->shm_segsz, &target_sd->shm_segsz);
4826 __get_user(host_sd->shm_atime, &target_sd->shm_atime);
4827 __get_user(host_sd->shm_dtime, &target_sd->shm_dtime);
4828 __get_user(host_sd->shm_ctime, &target_sd->shm_ctime);
4829 __get_user(host_sd->shm_cpid, &target_sd->shm_cpid);
4830 __get_user(host_sd->shm_lpid, &target_sd->shm_lpid);
4831 __get_user(host_sd->shm_nattch, &target_sd->shm_nattch);
4832 unlock_user_struct(target_sd, target_addr, 0);
4833 return 0;
4836 static inline abi_long host_to_target_shmid_ds(abi_ulong target_addr,
4837 struct shmid_ds *host_sd)
4839 struct target_shmid_ds *target_sd;
4841 if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
4842 return -TARGET_EFAULT;
4843 if (host_to_target_ipc_perm(target_addr, &(host_sd->shm_perm)))
4844 return -TARGET_EFAULT;
4845 __put_user(host_sd->shm_segsz, &target_sd->shm_segsz);
4846 __put_user(host_sd->shm_atime, &target_sd->shm_atime);
4847 __put_user(host_sd->shm_dtime, &target_sd->shm_dtime);
4848 __put_user(host_sd->shm_ctime, &target_sd->shm_ctime);
4849 __put_user(host_sd->shm_cpid, &target_sd->shm_cpid);
4850 __put_user(host_sd->shm_lpid, &target_sd->shm_lpid);
4851 __put_user(host_sd->shm_nattch, &target_sd->shm_nattch);
4852 unlock_user_struct(target_sd, target_addr, 1);
4853 return 0;
4856 struct target_shminfo {
4857 abi_ulong shmmax;
4858 abi_ulong shmmin;
4859 abi_ulong shmmni;
4860 abi_ulong shmseg;
4861 abi_ulong shmall;
4864 static inline abi_long host_to_target_shminfo(abi_ulong target_addr,
4865 struct shminfo *host_shminfo)
4867 struct target_shminfo *target_shminfo;
4868 if (!lock_user_struct(VERIFY_WRITE, target_shminfo, target_addr, 0))
4869 return -TARGET_EFAULT;
4870 __put_user(host_shminfo->shmmax, &target_shminfo->shmmax);
4871 __put_user(host_shminfo->shmmin, &target_shminfo->shmmin);
4872 __put_user(host_shminfo->shmmni, &target_shminfo->shmmni);
4873 __put_user(host_shminfo->shmseg, &target_shminfo->shmseg);
4874 __put_user(host_shminfo->shmall, &target_shminfo->shmall);
4875 unlock_user_struct(target_shminfo, target_addr, 1);
4876 return 0;
4879 struct target_shm_info {
4880 int used_ids;
4881 abi_ulong shm_tot;
4882 abi_ulong shm_rss;
4883 abi_ulong shm_swp;
4884 abi_ulong swap_attempts;
4885 abi_ulong swap_successes;
4888 static inline abi_long host_to_target_shm_info(abi_ulong target_addr,
4889 struct shm_info *host_shm_info)
4891 struct target_shm_info *target_shm_info;
4892 if (!lock_user_struct(VERIFY_WRITE, target_shm_info, target_addr, 0))
4893 return -TARGET_EFAULT;
4894 __put_user(host_shm_info->used_ids, &target_shm_info->used_ids);
4895 __put_user(host_shm_info->shm_tot, &target_shm_info->shm_tot);
4896 __put_user(host_shm_info->shm_rss, &target_shm_info->shm_rss);
4897 __put_user(host_shm_info->shm_swp, &target_shm_info->shm_swp);
4898 __put_user(host_shm_info->swap_attempts, &target_shm_info->swap_attempts);
4899 __put_user(host_shm_info->swap_successes, &target_shm_info->swap_successes);
4900 unlock_user_struct(target_shm_info, target_addr, 1);
4901 return 0;
4904 static inline abi_long do_shmctl(int shmid, int cmd, abi_long buf)
4906 struct shmid_ds dsarg;
4907 struct shminfo shminfo;
4908 struct shm_info shm_info;
4909 abi_long ret = -TARGET_EINVAL;
4911 cmd &= 0xff;
4913 switch(cmd) {
4914 case IPC_STAT:
4915 case IPC_SET:
4916 case SHM_STAT:
4917 if (target_to_host_shmid_ds(&dsarg, buf))
4918 return -TARGET_EFAULT;
4919 ret = get_errno(shmctl(shmid, cmd, &dsarg));
4920 if (host_to_target_shmid_ds(buf, &dsarg))
4921 return -TARGET_EFAULT;
4922 break;
4923 case IPC_INFO:
4924 ret = get_errno(shmctl(shmid, cmd, (struct shmid_ds *)&shminfo));
4925 if (host_to_target_shminfo(buf, &shminfo))
4926 return -TARGET_EFAULT;
4927 break;
4928 case SHM_INFO:
4929 ret = get_errno(shmctl(shmid, cmd, (struct shmid_ds *)&shm_info));
4930 if (host_to_target_shm_info(buf, &shm_info))
4931 return -TARGET_EFAULT;
4932 break;
4933 case IPC_RMID:
4934 case SHM_LOCK:
4935 case SHM_UNLOCK:
4936 ret = get_errno(shmctl(shmid, cmd, NULL));
4937 break;
4940 return ret;
4943 #ifndef TARGET_FORCE_SHMLBA
4944 /* For most architectures, SHMLBA is the same as the page size;
4945 * some architectures have larger values, in which case they should
4946 * define TARGET_FORCE_SHMLBA and provide a target_shmlba() function.
4947 * This corresponds to the kernel arch code defining __ARCH_FORCE_SHMLBA
4948 * and defining its own value for SHMLBA.
4950 * The kernel also permits SHMLBA to be set by the architecture to a
4951 * value larger than the page size without setting __ARCH_FORCE_SHMLBA;
4952 * this means that addresses are rounded to the large size if
4953 * SHM_RND is set but addresses not aligned to that size are not rejected
4954 * as long as they are at least page-aligned. Since the only architecture
4955 * which uses this is ia64 this code doesn't provide for that oddity.
4957 static inline abi_ulong target_shmlba(CPUArchState *cpu_env)
4959 return TARGET_PAGE_SIZE;
4961 #endif
4963 static inline abi_ulong do_shmat(CPUArchState *cpu_env,
4964 int shmid, abi_ulong shmaddr, int shmflg)
4966 abi_long raddr;
4967 void *host_raddr;
4968 struct shmid_ds shm_info;
4969 int i,ret;
4970 abi_ulong shmlba;
4972 /* find out the length of the shared memory segment */
4973 ret = get_errno(shmctl(shmid, IPC_STAT, &shm_info));
4974 if (is_error(ret)) {
4975 /* can't get length, bail out */
4976 return ret;
4979 shmlba = target_shmlba(cpu_env);
4981 if (shmaddr & (shmlba - 1)) {
4982 if (shmflg & SHM_RND) {
4983 shmaddr &= ~(shmlba - 1);
4984 } else {
4985 return -TARGET_EINVAL;
4988 if (!guest_range_valid(shmaddr, shm_info.shm_segsz)) {
4989 return -TARGET_EINVAL;
4992 mmap_lock();
4994 if (shmaddr)
4995 host_raddr = shmat(shmid, (void *)g2h(shmaddr), shmflg);
4996 else {
4997 abi_ulong mmap_start;
4999 mmap_start = mmap_find_vma(0, shm_info.shm_segsz);
5001 if (mmap_start == -1) {
5002 errno = ENOMEM;
5003 host_raddr = (void *)-1;
5004 } else
5005 host_raddr = shmat(shmid, g2h(mmap_start), shmflg | SHM_REMAP);
5008 if (host_raddr == (void *)-1) {
5009 mmap_unlock();
5010 return get_errno((long)host_raddr);
5012 raddr=h2g((unsigned long)host_raddr);
5014 page_set_flags(raddr, raddr + shm_info.shm_segsz,
5015 PAGE_VALID | PAGE_READ |
5016 ((shmflg & SHM_RDONLY)? 0 : PAGE_WRITE));
5018 for (i = 0; i < N_SHM_REGIONS; i++) {
5019 if (!shm_regions[i].in_use) {
5020 shm_regions[i].in_use = true;
5021 shm_regions[i].start = raddr;
5022 shm_regions[i].size = shm_info.shm_segsz;
5023 break;
5027 mmap_unlock();
5028 return raddr;
5032 static inline abi_long do_shmdt(abi_ulong shmaddr)
5034 int i;
5035 abi_long rv;
5037 mmap_lock();
5039 for (i = 0; i < N_SHM_REGIONS; ++i) {
5040 if (shm_regions[i].in_use && shm_regions[i].start == shmaddr) {
5041 shm_regions[i].in_use = false;
5042 page_set_flags(shmaddr, shmaddr + shm_regions[i].size, 0);
5043 break;
5046 rv = get_errno(shmdt(g2h(shmaddr)));
5048 mmap_unlock();
5050 return rv;
5053 #ifdef TARGET_NR_ipc
5054 /* ??? This only works with linear mappings. */
5055 /* do_ipc() must return target values and target errnos. */
5056 static abi_long do_ipc(CPUArchState *cpu_env,
5057 unsigned int call, abi_long first,
5058 abi_long second, abi_long third,
5059 abi_long ptr, abi_long fifth)
5061 int version;
5062 abi_long ret = 0;
5064 version = call >> 16;
5065 call &= 0xffff;
5067 switch (call) {
5068 case IPCOP_semop:
5069 ret = do_semop(first, ptr, second);
5070 break;
5072 case IPCOP_semget:
5073 ret = get_errno(semget(first, second, third));
5074 break;
5076 case IPCOP_semctl: {
5077 /* The semun argument to semctl is passed by value, so dereference the
5078 * ptr argument. */
5079 abi_ulong atptr;
5080 get_user_ual(atptr, ptr);
5081 ret = do_semctl(first, second, third, atptr);
5082 break;
5085 case IPCOP_msgget:
5086 ret = get_errno(msgget(first, second));
5087 break;
5089 case IPCOP_msgsnd:
5090 ret = do_msgsnd(first, ptr, second, third);
5091 break;
5093 case IPCOP_msgctl:
5094 ret = do_msgctl(first, second, ptr);
5095 break;
5097 case IPCOP_msgrcv:
5098 switch (version) {
5099 case 0:
5101 struct target_ipc_kludge {
5102 abi_long msgp;
5103 abi_long msgtyp;
5104 } *tmp;
5106 if (!lock_user_struct(VERIFY_READ, tmp, ptr, 1)) {
5107 ret = -TARGET_EFAULT;
5108 break;
5111 ret = do_msgrcv(first, tswapal(tmp->msgp), second, tswapal(tmp->msgtyp), third);
5113 unlock_user_struct(tmp, ptr, 0);
5114 break;
5116 default:
5117 ret = do_msgrcv(first, ptr, second, fifth, third);
5119 break;
5121 case IPCOP_shmat:
5122 switch (version) {
5123 default:
5125 abi_ulong raddr;
5126 raddr = do_shmat(cpu_env, first, ptr, second);
5127 if (is_error(raddr))
5128 return get_errno(raddr);
5129 if (put_user_ual(raddr, third))
5130 return -TARGET_EFAULT;
5131 break;
5133 case 1:
5134 ret = -TARGET_EINVAL;
5135 break;
5137 break;
5138 case IPCOP_shmdt:
5139 ret = do_shmdt(ptr);
5140 break;
5142 case IPCOP_shmget:
5143 /* IPC_* flag values are the same on all linux platforms */
5144 ret = get_errno(shmget(first, second, third));
5145 break;
5147 /* IPC_* and SHM_* command values are the same on all linux platforms */
5148 case IPCOP_shmctl:
5149 ret = do_shmctl(first, second, ptr);
5150 break;
5151 default:
5152 gemu_log("Unsupported ipc call: %d (version %d)\n", call, version);
5153 ret = -TARGET_ENOSYS;
5154 break;
5156 return ret;
5158 #endif
5160 /* kernel structure types definitions */
5162 #define STRUCT(name, ...) STRUCT_ ## name,
5163 #define STRUCT_SPECIAL(name) STRUCT_ ## name,
5164 enum {
5165 #include "syscall_types.h"
5166 STRUCT_MAX
5168 #undef STRUCT
5169 #undef STRUCT_SPECIAL
5171 #define STRUCT(name, ...) static const argtype struct_ ## name ## _def[] = { __VA_ARGS__, TYPE_NULL };
5172 #define STRUCT_SPECIAL(name)
5173 #include "syscall_types.h"
5174 #undef STRUCT
5175 #undef STRUCT_SPECIAL
5177 typedef struct IOCTLEntry IOCTLEntry;
5179 typedef abi_long do_ioctl_fn(const IOCTLEntry *ie, uint8_t *buf_temp,
5180 int fd, int cmd, abi_long arg);
5182 struct IOCTLEntry {
5183 int target_cmd;
5184 unsigned int host_cmd;
5185 const char *name;
5186 int access;
5187 do_ioctl_fn *do_ioctl;
5188 const argtype arg_type[5];
5191 #define IOC_R 0x0001
5192 #define IOC_W 0x0002
5193 #define IOC_RW (IOC_R | IOC_W)
5195 #define MAX_STRUCT_SIZE 4096
5197 #ifdef CONFIG_FIEMAP
5198 /* So fiemap access checks don't overflow on 32 bit systems.
5199 * This is very slightly smaller than the limit imposed by
5200 * the underlying kernel.
5202 #define FIEMAP_MAX_EXTENTS ((UINT_MAX - sizeof(struct fiemap)) \
5203 / sizeof(struct fiemap_extent))
5205 static abi_long do_ioctl_fs_ioc_fiemap(const IOCTLEntry *ie, uint8_t *buf_temp,
5206 int fd, int cmd, abi_long arg)
5208 /* The parameter for this ioctl is a struct fiemap followed
5209 * by an array of struct fiemap_extent whose size is set
5210 * in fiemap->fm_extent_count. The array is filled in by the
5211 * ioctl.
5213 int target_size_in, target_size_out;
5214 struct fiemap *fm;
5215 const argtype *arg_type = ie->arg_type;
5216 const argtype extent_arg_type[] = { MK_STRUCT(STRUCT_fiemap_extent) };
5217 void *argptr, *p;
5218 abi_long ret;
5219 int i, extent_size = thunk_type_size(extent_arg_type, 0);
5220 uint32_t outbufsz;
5221 int free_fm = 0;
5223 assert(arg_type[0] == TYPE_PTR);
5224 assert(ie->access == IOC_RW);
5225 arg_type++;
5226 target_size_in = thunk_type_size(arg_type, 0);
5227 argptr = lock_user(VERIFY_READ, arg, target_size_in, 1);
5228 if (!argptr) {
5229 return -TARGET_EFAULT;
5231 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
5232 unlock_user(argptr, arg, 0);
5233 fm = (struct fiemap *)buf_temp;
5234 if (fm->fm_extent_count > FIEMAP_MAX_EXTENTS) {
5235 return -TARGET_EINVAL;
5238 outbufsz = sizeof (*fm) +
5239 (sizeof(struct fiemap_extent) * fm->fm_extent_count);
5241 if (outbufsz > MAX_STRUCT_SIZE) {
5242 /* We can't fit all the extents into the fixed size buffer.
5243 * Allocate one that is large enough and use it instead.
5245 fm = g_try_malloc(outbufsz);
5246 if (!fm) {
5247 return -TARGET_ENOMEM;
5249 memcpy(fm, buf_temp, sizeof(struct fiemap));
5250 free_fm = 1;
5252 ret = get_errno(safe_ioctl(fd, ie->host_cmd, fm));
5253 if (!is_error(ret)) {
5254 target_size_out = target_size_in;
5255 /* An extent_count of 0 means we were only counting the extents
5256 * so there are no structs to copy
5258 if (fm->fm_extent_count != 0) {
5259 target_size_out += fm->fm_mapped_extents * extent_size;
5261 argptr = lock_user(VERIFY_WRITE, arg, target_size_out, 0);
5262 if (!argptr) {
5263 ret = -TARGET_EFAULT;
5264 } else {
5265 /* Convert the struct fiemap */
5266 thunk_convert(argptr, fm, arg_type, THUNK_TARGET);
5267 if (fm->fm_extent_count != 0) {
5268 p = argptr + target_size_in;
5269 /* ...and then all the struct fiemap_extents */
5270 for (i = 0; i < fm->fm_mapped_extents; i++) {
5271 thunk_convert(p, &fm->fm_extents[i], extent_arg_type,
5272 THUNK_TARGET);
5273 p += extent_size;
5276 unlock_user(argptr, arg, target_size_out);
5279 if (free_fm) {
5280 g_free(fm);
5282 return ret;
5284 #endif
5286 static abi_long do_ioctl_ifconf(const IOCTLEntry *ie, uint8_t *buf_temp,
5287 int fd, int cmd, abi_long arg)
5289 const argtype *arg_type = ie->arg_type;
5290 int target_size;
5291 void *argptr;
5292 int ret;
5293 struct ifconf *host_ifconf;
5294 uint32_t outbufsz;
5295 const argtype ifreq_arg_type[] = { MK_STRUCT(STRUCT_sockaddr_ifreq) };
5296 int target_ifreq_size;
5297 int nb_ifreq;
5298 int free_buf = 0;
5299 int i;
5300 int target_ifc_len;
5301 abi_long target_ifc_buf;
5302 int host_ifc_len;
5303 char *host_ifc_buf;
5305 assert(arg_type[0] == TYPE_PTR);
5306 assert(ie->access == IOC_RW);
5308 arg_type++;
5309 target_size = thunk_type_size(arg_type, 0);
5311 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5312 if (!argptr)
5313 return -TARGET_EFAULT;
5314 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
5315 unlock_user(argptr, arg, 0);
5317 host_ifconf = (struct ifconf *)(unsigned long)buf_temp;
5318 target_ifc_len = host_ifconf->ifc_len;
5319 target_ifc_buf = (abi_long)(unsigned long)host_ifconf->ifc_buf;
5321 target_ifreq_size = thunk_type_size(ifreq_arg_type, 0);
5322 nb_ifreq = target_ifc_len / target_ifreq_size;
5323 host_ifc_len = nb_ifreq * sizeof(struct ifreq);
5325 outbufsz = sizeof(*host_ifconf) + host_ifc_len;
5326 if (outbufsz > MAX_STRUCT_SIZE) {
5327 /* We can't fit all the extents into the fixed size buffer.
5328 * Allocate one that is large enough and use it instead.
5330 host_ifconf = malloc(outbufsz);
5331 if (!host_ifconf) {
5332 return -TARGET_ENOMEM;
5334 memcpy(host_ifconf, buf_temp, sizeof(*host_ifconf));
5335 free_buf = 1;
5337 host_ifc_buf = (char*)host_ifconf + sizeof(*host_ifconf);
5339 host_ifconf->ifc_len = host_ifc_len;
5340 host_ifconf->ifc_buf = host_ifc_buf;
5342 ret = get_errno(safe_ioctl(fd, ie->host_cmd, host_ifconf));
5343 if (!is_error(ret)) {
5344 /* convert host ifc_len to target ifc_len */
5346 nb_ifreq = host_ifconf->ifc_len / sizeof(struct ifreq);
5347 target_ifc_len = nb_ifreq * target_ifreq_size;
5348 host_ifconf->ifc_len = target_ifc_len;
5350 /* restore target ifc_buf */
5352 host_ifconf->ifc_buf = (char *)(unsigned long)target_ifc_buf;
5354 /* copy struct ifconf to target user */
5356 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
5357 if (!argptr)
5358 return -TARGET_EFAULT;
5359 thunk_convert(argptr, host_ifconf, arg_type, THUNK_TARGET);
5360 unlock_user(argptr, arg, target_size);
5362 /* copy ifreq[] to target user */
5364 argptr = lock_user(VERIFY_WRITE, target_ifc_buf, target_ifc_len, 0);
5365 for (i = 0; i < nb_ifreq ; i++) {
5366 thunk_convert(argptr + i * target_ifreq_size,
5367 host_ifc_buf + i * sizeof(struct ifreq),
5368 ifreq_arg_type, THUNK_TARGET);
5370 unlock_user(argptr, target_ifc_buf, target_ifc_len);
5373 if (free_buf) {
5374 free(host_ifconf);
5377 return ret;
5380 static abi_long do_ioctl_dm(const IOCTLEntry *ie, uint8_t *buf_temp, int fd,
5381 int cmd, abi_long arg)
5383 void *argptr;
5384 struct dm_ioctl *host_dm;
5385 abi_long guest_data;
5386 uint32_t guest_data_size;
5387 int target_size;
5388 const argtype *arg_type = ie->arg_type;
5389 abi_long ret;
5390 void *big_buf = NULL;
5391 char *host_data;
5393 arg_type++;
5394 target_size = thunk_type_size(arg_type, 0);
5395 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5396 if (!argptr) {
5397 ret = -TARGET_EFAULT;
5398 goto out;
5400 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
5401 unlock_user(argptr, arg, 0);
5403 /* buf_temp is too small, so fetch things into a bigger buffer */
5404 big_buf = g_malloc0(((struct dm_ioctl*)buf_temp)->data_size * 2);
5405 memcpy(big_buf, buf_temp, target_size);
5406 buf_temp = big_buf;
5407 host_dm = big_buf;
5409 guest_data = arg + host_dm->data_start;
5410 if ((guest_data - arg) < 0) {
5411 ret = -TARGET_EINVAL;
5412 goto out;
5414 guest_data_size = host_dm->data_size - host_dm->data_start;
5415 host_data = (char*)host_dm + host_dm->data_start;
5417 argptr = lock_user(VERIFY_READ, guest_data, guest_data_size, 1);
5418 if (!argptr) {
5419 ret = -TARGET_EFAULT;
5420 goto out;
5423 switch (ie->host_cmd) {
5424 case DM_REMOVE_ALL:
5425 case DM_LIST_DEVICES:
5426 case DM_DEV_CREATE:
5427 case DM_DEV_REMOVE:
5428 case DM_DEV_SUSPEND:
5429 case DM_DEV_STATUS:
5430 case DM_DEV_WAIT:
5431 case DM_TABLE_STATUS:
5432 case DM_TABLE_CLEAR:
5433 case DM_TABLE_DEPS:
5434 case DM_LIST_VERSIONS:
5435 /* no input data */
5436 break;
5437 case DM_DEV_RENAME:
5438 case DM_DEV_SET_GEOMETRY:
5439 /* data contains only strings */
5440 memcpy(host_data, argptr, guest_data_size);
5441 break;
5442 case DM_TARGET_MSG:
5443 memcpy(host_data, argptr, guest_data_size);
5444 *(uint64_t*)host_data = tswap64(*(uint64_t*)argptr);
5445 break;
5446 case DM_TABLE_LOAD:
5448 void *gspec = argptr;
5449 void *cur_data = host_data;
5450 const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_spec) };
5451 int spec_size = thunk_type_size(arg_type, 0);
5452 int i;
5454 for (i = 0; i < host_dm->target_count; i++) {
5455 struct dm_target_spec *spec = cur_data;
5456 uint32_t next;
5457 int slen;
5459 thunk_convert(spec, gspec, arg_type, THUNK_HOST);
5460 slen = strlen((char*)gspec + spec_size) + 1;
5461 next = spec->next;
5462 spec->next = sizeof(*spec) + slen;
5463 strcpy((char*)&spec[1], gspec + spec_size);
5464 gspec += next;
5465 cur_data += spec->next;
5467 break;
5469 default:
5470 ret = -TARGET_EINVAL;
5471 unlock_user(argptr, guest_data, 0);
5472 goto out;
5474 unlock_user(argptr, guest_data, 0);
5476 ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5477 if (!is_error(ret)) {
5478 guest_data = arg + host_dm->data_start;
5479 guest_data_size = host_dm->data_size - host_dm->data_start;
5480 argptr = lock_user(VERIFY_WRITE, guest_data, guest_data_size, 0);
5481 switch (ie->host_cmd) {
5482 case DM_REMOVE_ALL:
5483 case DM_DEV_CREATE:
5484 case DM_DEV_REMOVE:
5485 case DM_DEV_RENAME:
5486 case DM_DEV_SUSPEND:
5487 case DM_DEV_STATUS:
5488 case DM_TABLE_LOAD:
5489 case DM_TABLE_CLEAR:
5490 case DM_TARGET_MSG:
5491 case DM_DEV_SET_GEOMETRY:
5492 /* no return data */
5493 break;
5494 case DM_LIST_DEVICES:
5496 struct dm_name_list *nl = (void*)host_dm + host_dm->data_start;
5497 uint32_t remaining_data = guest_data_size;
5498 void *cur_data = argptr;
5499 const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_name_list) };
5500 int nl_size = 12; /* can't use thunk_size due to alignment */
5502 while (1) {
5503 uint32_t next = nl->next;
5504 if (next) {
5505 nl->next = nl_size + (strlen(nl->name) + 1);
5507 if (remaining_data < nl->next) {
5508 host_dm->flags |= DM_BUFFER_FULL_FLAG;
5509 break;
5511 thunk_convert(cur_data, nl, arg_type, THUNK_TARGET);
5512 strcpy(cur_data + nl_size, nl->name);
5513 cur_data += nl->next;
5514 remaining_data -= nl->next;
5515 if (!next) {
5516 break;
5518 nl = (void*)nl + next;
5520 break;
5522 case DM_DEV_WAIT:
5523 case DM_TABLE_STATUS:
5525 struct dm_target_spec *spec = (void*)host_dm + host_dm->data_start;
5526 void *cur_data = argptr;
5527 const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_spec) };
5528 int spec_size = thunk_type_size(arg_type, 0);
5529 int i;
5531 for (i = 0; i < host_dm->target_count; i++) {
5532 uint32_t next = spec->next;
5533 int slen = strlen((char*)&spec[1]) + 1;
5534 spec->next = (cur_data - argptr) + spec_size + slen;
5535 if (guest_data_size < spec->next) {
5536 host_dm->flags |= DM_BUFFER_FULL_FLAG;
5537 break;
5539 thunk_convert(cur_data, spec, arg_type, THUNK_TARGET);
5540 strcpy(cur_data + spec_size, (char*)&spec[1]);
5541 cur_data = argptr + spec->next;
5542 spec = (void*)host_dm + host_dm->data_start + next;
5544 break;
5546 case DM_TABLE_DEPS:
5548 void *hdata = (void*)host_dm + host_dm->data_start;
5549 int count = *(uint32_t*)hdata;
5550 uint64_t *hdev = hdata + 8;
5551 uint64_t *gdev = argptr + 8;
5552 int i;
5554 *(uint32_t*)argptr = tswap32(count);
5555 for (i = 0; i < count; i++) {
5556 *gdev = tswap64(*hdev);
5557 gdev++;
5558 hdev++;
5560 break;
5562 case DM_LIST_VERSIONS:
5564 struct dm_target_versions *vers = (void*)host_dm + host_dm->data_start;
5565 uint32_t remaining_data = guest_data_size;
5566 void *cur_data = argptr;
5567 const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_versions) };
5568 int vers_size = thunk_type_size(arg_type, 0);
5570 while (1) {
5571 uint32_t next = vers->next;
5572 if (next) {
5573 vers->next = vers_size + (strlen(vers->name) + 1);
5575 if (remaining_data < vers->next) {
5576 host_dm->flags |= DM_BUFFER_FULL_FLAG;
5577 break;
5579 thunk_convert(cur_data, vers, arg_type, THUNK_TARGET);
5580 strcpy(cur_data + vers_size, vers->name);
5581 cur_data += vers->next;
5582 remaining_data -= vers->next;
5583 if (!next) {
5584 break;
5586 vers = (void*)vers + next;
5588 break;
5590 default:
5591 unlock_user(argptr, guest_data, 0);
5592 ret = -TARGET_EINVAL;
5593 goto out;
5595 unlock_user(argptr, guest_data, guest_data_size);
5597 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
5598 if (!argptr) {
5599 ret = -TARGET_EFAULT;
5600 goto out;
5602 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
5603 unlock_user(argptr, arg, target_size);
5605 out:
5606 g_free(big_buf);
5607 return ret;
5610 static abi_long do_ioctl_blkpg(const IOCTLEntry *ie, uint8_t *buf_temp, int fd,
5611 int cmd, abi_long arg)
5613 void *argptr;
5614 int target_size;
5615 const argtype *arg_type = ie->arg_type;
5616 const argtype part_arg_type[] = { MK_STRUCT(STRUCT_blkpg_partition) };
5617 abi_long ret;
5619 struct blkpg_ioctl_arg *host_blkpg = (void*)buf_temp;
5620 struct blkpg_partition host_part;
5622 /* Read and convert blkpg */
5623 arg_type++;
5624 target_size = thunk_type_size(arg_type, 0);
5625 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5626 if (!argptr) {
5627 ret = -TARGET_EFAULT;
5628 goto out;
5630 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
5631 unlock_user(argptr, arg, 0);
5633 switch (host_blkpg->op) {
5634 case BLKPG_ADD_PARTITION:
5635 case BLKPG_DEL_PARTITION:
5636 /* payload is struct blkpg_partition */
5637 break;
5638 default:
5639 /* Unknown opcode */
5640 ret = -TARGET_EINVAL;
5641 goto out;
5644 /* Read and convert blkpg->data */
5645 arg = (abi_long)(uintptr_t)host_blkpg->data;
5646 target_size = thunk_type_size(part_arg_type, 0);
5647 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5648 if (!argptr) {
5649 ret = -TARGET_EFAULT;
5650 goto out;
5652 thunk_convert(&host_part, argptr, part_arg_type, THUNK_HOST);
5653 unlock_user(argptr, arg, 0);
5655 /* Swizzle the data pointer to our local copy and call! */
5656 host_blkpg->data = &host_part;
5657 ret = get_errno(safe_ioctl(fd, ie->host_cmd, host_blkpg));
5659 out:
5660 return ret;
5663 static abi_long do_ioctl_rt(const IOCTLEntry *ie, uint8_t *buf_temp,
5664 int fd, int cmd, abi_long arg)
5666 const argtype *arg_type = ie->arg_type;
5667 const StructEntry *se;
5668 const argtype *field_types;
5669 const int *dst_offsets, *src_offsets;
5670 int target_size;
5671 void *argptr;
5672 abi_ulong *target_rt_dev_ptr;
5673 unsigned long *host_rt_dev_ptr;
5674 abi_long ret;
5675 int i;
5677 assert(ie->access == IOC_W);
5678 assert(*arg_type == TYPE_PTR);
5679 arg_type++;
5680 assert(*arg_type == TYPE_STRUCT);
5681 target_size = thunk_type_size(arg_type, 0);
5682 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5683 if (!argptr) {
5684 return -TARGET_EFAULT;
5686 arg_type++;
5687 assert(*arg_type == (int)STRUCT_rtentry);
5688 se = struct_entries + *arg_type++;
5689 assert(se->convert[0] == NULL);
5690 /* convert struct here to be able to catch rt_dev string */
5691 field_types = se->field_types;
5692 dst_offsets = se->field_offsets[THUNK_HOST];
5693 src_offsets = se->field_offsets[THUNK_TARGET];
5694 for (i = 0; i < se->nb_fields; i++) {
5695 if (dst_offsets[i] == offsetof(struct rtentry, rt_dev)) {
5696 assert(*field_types == TYPE_PTRVOID);
5697 target_rt_dev_ptr = (abi_ulong *)(argptr + src_offsets[i]);
5698 host_rt_dev_ptr = (unsigned long *)(buf_temp + dst_offsets[i]);
5699 if (*target_rt_dev_ptr != 0) {
5700 *host_rt_dev_ptr = (unsigned long)lock_user_string(
5701 tswapal(*target_rt_dev_ptr));
5702 if (!*host_rt_dev_ptr) {
5703 unlock_user(argptr, arg, 0);
5704 return -TARGET_EFAULT;
5706 } else {
5707 *host_rt_dev_ptr = 0;
5709 field_types++;
5710 continue;
5712 field_types = thunk_convert(buf_temp + dst_offsets[i],
5713 argptr + src_offsets[i],
5714 field_types, THUNK_HOST);
5716 unlock_user(argptr, arg, 0);
5718 ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5719 if (*host_rt_dev_ptr != 0) {
5720 unlock_user((void *)*host_rt_dev_ptr,
5721 *target_rt_dev_ptr, 0);
5723 return ret;
5726 static abi_long do_ioctl_kdsigaccept(const IOCTLEntry *ie, uint8_t *buf_temp,
5727 int fd, int cmd, abi_long arg)
5729 int sig = target_to_host_signal(arg);
5730 return get_errno(safe_ioctl(fd, ie->host_cmd, sig));
5733 #ifdef TIOCGPTPEER
5734 static abi_long do_ioctl_tiocgptpeer(const IOCTLEntry *ie, uint8_t *buf_temp,
5735 int fd, int cmd, abi_long arg)
5737 int flags = target_to_host_bitmask(arg, fcntl_flags_tbl);
5738 return get_errno(safe_ioctl(fd, ie->host_cmd, flags));
5740 #endif
5742 static IOCTLEntry ioctl_entries[] = {
5743 #define IOCTL(cmd, access, ...) \
5744 { TARGET_ ## cmd, cmd, #cmd, access, 0, { __VA_ARGS__ } },
5745 #define IOCTL_SPECIAL(cmd, access, dofn, ...) \
5746 { TARGET_ ## cmd, cmd, #cmd, access, dofn, { __VA_ARGS__ } },
5747 #define IOCTL_IGNORE(cmd) \
5748 { TARGET_ ## cmd, 0, #cmd },
5749 #include "ioctls.h"
5750 { 0, 0, },
5753 /* ??? Implement proper locking for ioctls. */
5754 /* do_ioctl() Must return target values and target errnos. */
5755 static abi_long do_ioctl(int fd, int cmd, abi_long arg)
5757 const IOCTLEntry *ie;
5758 const argtype *arg_type;
5759 abi_long ret;
5760 uint8_t buf_temp[MAX_STRUCT_SIZE];
5761 int target_size;
5762 void *argptr;
5764 ie = ioctl_entries;
5765 for(;;) {
5766 if (ie->target_cmd == 0) {
5767 gemu_log("Unsupported ioctl: cmd=0x%04lx\n", (long)cmd);
5768 return -TARGET_ENOSYS;
5770 if (ie->target_cmd == cmd)
5771 break;
5772 ie++;
5774 arg_type = ie->arg_type;
5775 #if defined(DEBUG)
5776 gemu_log("ioctl: cmd=0x%04lx (%s)\n", (long)cmd, ie->name);
5777 #endif
5778 if (ie->do_ioctl) {
5779 return ie->do_ioctl(ie, buf_temp, fd, cmd, arg);
5780 } else if (!ie->host_cmd) {
5781 /* Some architectures define BSD ioctls in their headers
5782 that are not implemented in Linux. */
5783 return -TARGET_ENOSYS;
5786 switch(arg_type[0]) {
5787 case TYPE_NULL:
5788 /* no argument */
5789 ret = get_errno(safe_ioctl(fd, ie->host_cmd));
5790 break;
5791 case TYPE_PTRVOID:
5792 case TYPE_INT:
5793 ret = get_errno(safe_ioctl(fd, ie->host_cmd, arg));
5794 break;
5795 case TYPE_PTR:
5796 arg_type++;
5797 target_size = thunk_type_size(arg_type, 0);
5798 switch(ie->access) {
5799 case IOC_R:
5800 ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5801 if (!is_error(ret)) {
5802 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
5803 if (!argptr)
5804 return -TARGET_EFAULT;
5805 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
5806 unlock_user(argptr, arg, target_size);
5808 break;
5809 case IOC_W:
5810 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5811 if (!argptr)
5812 return -TARGET_EFAULT;
5813 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
5814 unlock_user(argptr, arg, 0);
5815 ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5816 break;
5817 default:
5818 case IOC_RW:
5819 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5820 if (!argptr)
5821 return -TARGET_EFAULT;
5822 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
5823 unlock_user(argptr, arg, 0);
5824 ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5825 if (!is_error(ret)) {
5826 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
5827 if (!argptr)
5828 return -TARGET_EFAULT;
5829 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
5830 unlock_user(argptr, arg, target_size);
5832 break;
5834 break;
5835 default:
5836 gemu_log("Unsupported ioctl type: cmd=0x%04lx type=%d\n",
5837 (long)cmd, arg_type[0]);
5838 ret = -TARGET_ENOSYS;
5839 break;
5841 return ret;
5844 static const bitmask_transtbl iflag_tbl[] = {
5845 { TARGET_IGNBRK, TARGET_IGNBRK, IGNBRK, IGNBRK },
5846 { TARGET_BRKINT, TARGET_BRKINT, BRKINT, BRKINT },
5847 { TARGET_IGNPAR, TARGET_IGNPAR, IGNPAR, IGNPAR },
5848 { TARGET_PARMRK, TARGET_PARMRK, PARMRK, PARMRK },
5849 { TARGET_INPCK, TARGET_INPCK, INPCK, INPCK },
5850 { TARGET_ISTRIP, TARGET_ISTRIP, ISTRIP, ISTRIP },
5851 { TARGET_INLCR, TARGET_INLCR, INLCR, INLCR },
5852 { TARGET_IGNCR, TARGET_IGNCR, IGNCR, IGNCR },
5853 { TARGET_ICRNL, TARGET_ICRNL, ICRNL, ICRNL },
5854 { TARGET_IUCLC, TARGET_IUCLC, IUCLC, IUCLC },
5855 { TARGET_IXON, TARGET_IXON, IXON, IXON },
5856 { TARGET_IXANY, TARGET_IXANY, IXANY, IXANY },
5857 { TARGET_IXOFF, TARGET_IXOFF, IXOFF, IXOFF },
5858 { TARGET_IMAXBEL, TARGET_IMAXBEL, IMAXBEL, IMAXBEL },
5859 { 0, 0, 0, 0 }
5862 static const bitmask_transtbl oflag_tbl[] = {
5863 { TARGET_OPOST, TARGET_OPOST, OPOST, OPOST },
5864 { TARGET_OLCUC, TARGET_OLCUC, OLCUC, OLCUC },
5865 { TARGET_ONLCR, TARGET_ONLCR, ONLCR, ONLCR },
5866 { TARGET_OCRNL, TARGET_OCRNL, OCRNL, OCRNL },
5867 { TARGET_ONOCR, TARGET_ONOCR, ONOCR, ONOCR },
5868 { TARGET_ONLRET, TARGET_ONLRET, ONLRET, ONLRET },
5869 { TARGET_OFILL, TARGET_OFILL, OFILL, OFILL },
5870 { TARGET_OFDEL, TARGET_OFDEL, OFDEL, OFDEL },
5871 { TARGET_NLDLY, TARGET_NL0, NLDLY, NL0 },
5872 { TARGET_NLDLY, TARGET_NL1, NLDLY, NL1 },
5873 { TARGET_CRDLY, TARGET_CR0, CRDLY, CR0 },
5874 { TARGET_CRDLY, TARGET_CR1, CRDLY, CR1 },
5875 { TARGET_CRDLY, TARGET_CR2, CRDLY, CR2 },
5876 { TARGET_CRDLY, TARGET_CR3, CRDLY, CR3 },
5877 { TARGET_TABDLY, TARGET_TAB0, TABDLY, TAB0 },
5878 { TARGET_TABDLY, TARGET_TAB1, TABDLY, TAB1 },
5879 { TARGET_TABDLY, TARGET_TAB2, TABDLY, TAB2 },
5880 { TARGET_TABDLY, TARGET_TAB3, TABDLY, TAB3 },
5881 { TARGET_BSDLY, TARGET_BS0, BSDLY, BS0 },
5882 { TARGET_BSDLY, TARGET_BS1, BSDLY, BS1 },
5883 { TARGET_VTDLY, TARGET_VT0, VTDLY, VT0 },
5884 { TARGET_VTDLY, TARGET_VT1, VTDLY, VT1 },
5885 { TARGET_FFDLY, TARGET_FF0, FFDLY, FF0 },
5886 { TARGET_FFDLY, TARGET_FF1, FFDLY, FF1 },
5887 { 0, 0, 0, 0 }
5890 static const bitmask_transtbl cflag_tbl[] = {
5891 { TARGET_CBAUD, TARGET_B0, CBAUD, B0 },
5892 { TARGET_CBAUD, TARGET_B50, CBAUD, B50 },
5893 { TARGET_CBAUD, TARGET_B75, CBAUD, B75 },
5894 { TARGET_CBAUD, TARGET_B110, CBAUD, B110 },
5895 { TARGET_CBAUD, TARGET_B134, CBAUD, B134 },
5896 { TARGET_CBAUD, TARGET_B150, CBAUD, B150 },
5897 { TARGET_CBAUD, TARGET_B200, CBAUD, B200 },
5898 { TARGET_CBAUD, TARGET_B300, CBAUD, B300 },
5899 { TARGET_CBAUD, TARGET_B600, CBAUD, B600 },
5900 { TARGET_CBAUD, TARGET_B1200, CBAUD, B1200 },
5901 { TARGET_CBAUD, TARGET_B1800, CBAUD, B1800 },
5902 { TARGET_CBAUD, TARGET_B2400, CBAUD, B2400 },
5903 { TARGET_CBAUD, TARGET_B4800, CBAUD, B4800 },
5904 { TARGET_CBAUD, TARGET_B9600, CBAUD, B9600 },
5905 { TARGET_CBAUD, TARGET_B19200, CBAUD, B19200 },
5906 { TARGET_CBAUD, TARGET_B38400, CBAUD, B38400 },
5907 { TARGET_CBAUD, TARGET_B57600, CBAUD, B57600 },
5908 { TARGET_CBAUD, TARGET_B115200, CBAUD, B115200 },
5909 { TARGET_CBAUD, TARGET_B230400, CBAUD, B230400 },
5910 { TARGET_CBAUD, TARGET_B460800, CBAUD, B460800 },
5911 { TARGET_CSIZE, TARGET_CS5, CSIZE, CS5 },
5912 { TARGET_CSIZE, TARGET_CS6, CSIZE, CS6 },
5913 { TARGET_CSIZE, TARGET_CS7, CSIZE, CS7 },
5914 { TARGET_CSIZE, TARGET_CS8, CSIZE, CS8 },
5915 { TARGET_CSTOPB, TARGET_CSTOPB, CSTOPB, CSTOPB },
5916 { TARGET_CREAD, TARGET_CREAD, CREAD, CREAD },
5917 { TARGET_PARENB, TARGET_PARENB, PARENB, PARENB },
5918 { TARGET_PARODD, TARGET_PARODD, PARODD, PARODD },
5919 { TARGET_HUPCL, TARGET_HUPCL, HUPCL, HUPCL },
5920 { TARGET_CLOCAL, TARGET_CLOCAL, CLOCAL, CLOCAL },
5921 { TARGET_CRTSCTS, TARGET_CRTSCTS, CRTSCTS, CRTSCTS },
5922 { 0, 0, 0, 0 }
5925 static const bitmask_transtbl lflag_tbl[] = {
5926 { TARGET_ISIG, TARGET_ISIG, ISIG, ISIG },
5927 { TARGET_ICANON, TARGET_ICANON, ICANON, ICANON },
5928 { TARGET_XCASE, TARGET_XCASE, XCASE, XCASE },
5929 { TARGET_ECHO, TARGET_ECHO, ECHO, ECHO },
5930 { TARGET_ECHOE, TARGET_ECHOE, ECHOE, ECHOE },
5931 { TARGET_ECHOK, TARGET_ECHOK, ECHOK, ECHOK },
5932 { TARGET_ECHONL, TARGET_ECHONL, ECHONL, ECHONL },
5933 { TARGET_NOFLSH, TARGET_NOFLSH, NOFLSH, NOFLSH },
5934 { TARGET_TOSTOP, TARGET_TOSTOP, TOSTOP, TOSTOP },
5935 { TARGET_ECHOCTL, TARGET_ECHOCTL, ECHOCTL, ECHOCTL },
5936 { TARGET_ECHOPRT, TARGET_ECHOPRT, ECHOPRT, ECHOPRT },
5937 { TARGET_ECHOKE, TARGET_ECHOKE, ECHOKE, ECHOKE },
5938 { TARGET_FLUSHO, TARGET_FLUSHO, FLUSHO, FLUSHO },
5939 { TARGET_PENDIN, TARGET_PENDIN, PENDIN, PENDIN },
5940 { TARGET_IEXTEN, TARGET_IEXTEN, IEXTEN, IEXTEN },
5941 { 0, 0, 0, 0 }
5944 static void target_to_host_termios (void *dst, const void *src)
5946 struct host_termios *host = dst;
5947 const struct target_termios *target = src;
5949 host->c_iflag =
5950 target_to_host_bitmask(tswap32(target->c_iflag), iflag_tbl);
5951 host->c_oflag =
5952 target_to_host_bitmask(tswap32(target->c_oflag), oflag_tbl);
5953 host->c_cflag =
5954 target_to_host_bitmask(tswap32(target->c_cflag), cflag_tbl);
5955 host->c_lflag =
5956 target_to_host_bitmask(tswap32(target->c_lflag), lflag_tbl);
5957 host->c_line = target->c_line;
5959 memset(host->c_cc, 0, sizeof(host->c_cc));
5960 host->c_cc[VINTR] = target->c_cc[TARGET_VINTR];
5961 host->c_cc[VQUIT] = target->c_cc[TARGET_VQUIT];
5962 host->c_cc[VERASE] = target->c_cc[TARGET_VERASE];
5963 host->c_cc[VKILL] = target->c_cc[TARGET_VKILL];
5964 host->c_cc[VEOF] = target->c_cc[TARGET_VEOF];
5965 host->c_cc[VTIME] = target->c_cc[TARGET_VTIME];
5966 host->c_cc[VMIN] = target->c_cc[TARGET_VMIN];
5967 host->c_cc[VSWTC] = target->c_cc[TARGET_VSWTC];
5968 host->c_cc[VSTART] = target->c_cc[TARGET_VSTART];
5969 host->c_cc[VSTOP] = target->c_cc[TARGET_VSTOP];
5970 host->c_cc[VSUSP] = target->c_cc[TARGET_VSUSP];
5971 host->c_cc[VEOL] = target->c_cc[TARGET_VEOL];
5972 host->c_cc[VREPRINT] = target->c_cc[TARGET_VREPRINT];
5973 host->c_cc[VDISCARD] = target->c_cc[TARGET_VDISCARD];
5974 host->c_cc[VWERASE] = target->c_cc[TARGET_VWERASE];
5975 host->c_cc[VLNEXT] = target->c_cc[TARGET_VLNEXT];
5976 host->c_cc[VEOL2] = target->c_cc[TARGET_VEOL2];
5979 static void host_to_target_termios (void *dst, const void *src)
5981 struct target_termios *target = dst;
5982 const struct host_termios *host = src;
5984 target->c_iflag =
5985 tswap32(host_to_target_bitmask(host->c_iflag, iflag_tbl));
5986 target->c_oflag =
5987 tswap32(host_to_target_bitmask(host->c_oflag, oflag_tbl));
5988 target->c_cflag =
5989 tswap32(host_to_target_bitmask(host->c_cflag, cflag_tbl));
5990 target->c_lflag =
5991 tswap32(host_to_target_bitmask(host->c_lflag, lflag_tbl));
5992 target->c_line = host->c_line;
5994 memset(target->c_cc, 0, sizeof(target->c_cc));
5995 target->c_cc[TARGET_VINTR] = host->c_cc[VINTR];
5996 target->c_cc[TARGET_VQUIT] = host->c_cc[VQUIT];
5997 target->c_cc[TARGET_VERASE] = host->c_cc[VERASE];
5998 target->c_cc[TARGET_VKILL] = host->c_cc[VKILL];
5999 target->c_cc[TARGET_VEOF] = host->c_cc[VEOF];
6000 target->c_cc[TARGET_VTIME] = host->c_cc[VTIME];
6001 target->c_cc[TARGET_VMIN] = host->c_cc[VMIN];
6002 target->c_cc[TARGET_VSWTC] = host->c_cc[VSWTC];
6003 target->c_cc[TARGET_VSTART] = host->c_cc[VSTART];
6004 target->c_cc[TARGET_VSTOP] = host->c_cc[VSTOP];
6005 target->c_cc[TARGET_VSUSP] = host->c_cc[VSUSP];
6006 target->c_cc[TARGET_VEOL] = host->c_cc[VEOL];
6007 target->c_cc[TARGET_VREPRINT] = host->c_cc[VREPRINT];
6008 target->c_cc[TARGET_VDISCARD] = host->c_cc[VDISCARD];
6009 target->c_cc[TARGET_VWERASE] = host->c_cc[VWERASE];
6010 target->c_cc[TARGET_VLNEXT] = host->c_cc[VLNEXT];
6011 target->c_cc[TARGET_VEOL2] = host->c_cc[VEOL2];
6014 static const StructEntry struct_termios_def = {
6015 .convert = { host_to_target_termios, target_to_host_termios },
6016 .size = { sizeof(struct target_termios), sizeof(struct host_termios) },
6017 .align = { __alignof__(struct target_termios), __alignof__(struct host_termios) },
6020 static bitmask_transtbl mmap_flags_tbl[] = {
6021 { TARGET_MAP_SHARED, TARGET_MAP_SHARED, MAP_SHARED, MAP_SHARED },
6022 { TARGET_MAP_PRIVATE, TARGET_MAP_PRIVATE, MAP_PRIVATE, MAP_PRIVATE },
6023 { TARGET_MAP_FIXED, TARGET_MAP_FIXED, MAP_FIXED, MAP_FIXED },
6024 { TARGET_MAP_ANONYMOUS, TARGET_MAP_ANONYMOUS,
6025 MAP_ANONYMOUS, MAP_ANONYMOUS },
6026 { TARGET_MAP_GROWSDOWN, TARGET_MAP_GROWSDOWN,
6027 MAP_GROWSDOWN, MAP_GROWSDOWN },
6028 { TARGET_MAP_DENYWRITE, TARGET_MAP_DENYWRITE,
6029 MAP_DENYWRITE, MAP_DENYWRITE },
6030 { TARGET_MAP_EXECUTABLE, TARGET_MAP_EXECUTABLE,
6031 MAP_EXECUTABLE, MAP_EXECUTABLE },
6032 { TARGET_MAP_LOCKED, TARGET_MAP_LOCKED, MAP_LOCKED, MAP_LOCKED },
6033 { TARGET_MAP_NORESERVE, TARGET_MAP_NORESERVE,
6034 MAP_NORESERVE, MAP_NORESERVE },
6035 { TARGET_MAP_HUGETLB, TARGET_MAP_HUGETLB, MAP_HUGETLB, MAP_HUGETLB },
6036 /* MAP_STACK had been ignored by the kernel for quite some time.
6037 Recognize it for the target insofar as we do not want to pass
6038 it through to the host. */
6039 { TARGET_MAP_STACK, TARGET_MAP_STACK, 0, 0 },
6040 { 0, 0, 0, 0 }
6043 #if defined(TARGET_I386)
6045 /* NOTE: there is really one LDT for all the threads */
6046 static uint8_t *ldt_table;
6048 static abi_long read_ldt(abi_ulong ptr, unsigned long bytecount)
6050 int size;
6051 void *p;
6053 if (!ldt_table)
6054 return 0;
6055 size = TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE;
6056 if (size > bytecount)
6057 size = bytecount;
6058 p = lock_user(VERIFY_WRITE, ptr, size, 0);
6059 if (!p)
6060 return -TARGET_EFAULT;
6061 /* ??? Should this by byteswapped? */
6062 memcpy(p, ldt_table, size);
6063 unlock_user(p, ptr, size);
6064 return size;
6067 /* XXX: add locking support */
6068 static abi_long write_ldt(CPUX86State *env,
6069 abi_ulong ptr, unsigned long bytecount, int oldmode)
6071 struct target_modify_ldt_ldt_s ldt_info;
6072 struct target_modify_ldt_ldt_s *target_ldt_info;
6073 int seg_32bit, contents, read_exec_only, limit_in_pages;
6074 int seg_not_present, useable, lm;
6075 uint32_t *lp, entry_1, entry_2;
6077 if (bytecount != sizeof(ldt_info))
6078 return -TARGET_EINVAL;
6079 if (!lock_user_struct(VERIFY_READ, target_ldt_info, ptr, 1))
6080 return -TARGET_EFAULT;
6081 ldt_info.entry_number = tswap32(target_ldt_info->entry_number);
6082 ldt_info.base_addr = tswapal(target_ldt_info->base_addr);
6083 ldt_info.limit = tswap32(target_ldt_info->limit);
6084 ldt_info.flags = tswap32(target_ldt_info->flags);
6085 unlock_user_struct(target_ldt_info, ptr, 0);
6087 if (ldt_info.entry_number >= TARGET_LDT_ENTRIES)
6088 return -TARGET_EINVAL;
6089 seg_32bit = ldt_info.flags & 1;
6090 contents = (ldt_info.flags >> 1) & 3;
6091 read_exec_only = (ldt_info.flags >> 3) & 1;
6092 limit_in_pages = (ldt_info.flags >> 4) & 1;
6093 seg_not_present = (ldt_info.flags >> 5) & 1;
6094 useable = (ldt_info.flags >> 6) & 1;
6095 #ifdef TARGET_ABI32
6096 lm = 0;
6097 #else
6098 lm = (ldt_info.flags >> 7) & 1;
6099 #endif
6100 if (contents == 3) {
6101 if (oldmode)
6102 return -TARGET_EINVAL;
6103 if (seg_not_present == 0)
6104 return -TARGET_EINVAL;
6106 /* allocate the LDT */
6107 if (!ldt_table) {
6108 env->ldt.base = target_mmap(0,
6109 TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE,
6110 PROT_READ|PROT_WRITE,
6111 MAP_ANONYMOUS|MAP_PRIVATE, -1, 0);
6112 if (env->ldt.base == -1)
6113 return -TARGET_ENOMEM;
6114 memset(g2h(env->ldt.base), 0,
6115 TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE);
6116 env->ldt.limit = 0xffff;
6117 ldt_table = g2h(env->ldt.base);
6120 /* NOTE: same code as Linux kernel */
6121 /* Allow LDTs to be cleared by the user. */
6122 if (ldt_info.base_addr == 0 && ldt_info.limit == 0) {
6123 if (oldmode ||
6124 (contents == 0 &&
6125 read_exec_only == 1 &&
6126 seg_32bit == 0 &&
6127 limit_in_pages == 0 &&
6128 seg_not_present == 1 &&
6129 useable == 0 )) {
6130 entry_1 = 0;
6131 entry_2 = 0;
6132 goto install;
6136 entry_1 = ((ldt_info.base_addr & 0x0000ffff) << 16) |
6137 (ldt_info.limit & 0x0ffff);
6138 entry_2 = (ldt_info.base_addr & 0xff000000) |
6139 ((ldt_info.base_addr & 0x00ff0000) >> 16) |
6140 (ldt_info.limit & 0xf0000) |
6141 ((read_exec_only ^ 1) << 9) |
6142 (contents << 10) |
6143 ((seg_not_present ^ 1) << 15) |
6144 (seg_32bit << 22) |
6145 (limit_in_pages << 23) |
6146 (lm << 21) |
6147 0x7000;
6148 if (!oldmode)
6149 entry_2 |= (useable << 20);
6151 /* Install the new entry ... */
6152 install:
6153 lp = (uint32_t *)(ldt_table + (ldt_info.entry_number << 3));
6154 lp[0] = tswap32(entry_1);
6155 lp[1] = tswap32(entry_2);
6156 return 0;
6159 /* specific and weird i386 syscalls */
6160 static abi_long do_modify_ldt(CPUX86State *env, int func, abi_ulong ptr,
6161 unsigned long bytecount)
6163 abi_long ret;
6165 switch (func) {
6166 case 0:
6167 ret = read_ldt(ptr, bytecount);
6168 break;
6169 case 1:
6170 ret = write_ldt(env, ptr, bytecount, 1);
6171 break;
6172 case 0x11:
6173 ret = write_ldt(env, ptr, bytecount, 0);
6174 break;
6175 default:
6176 ret = -TARGET_ENOSYS;
6177 break;
6179 return ret;
6182 #if defined(TARGET_I386) && defined(TARGET_ABI32)
6183 abi_long do_set_thread_area(CPUX86State *env, abi_ulong ptr)
6185 uint64_t *gdt_table = g2h(env->gdt.base);
6186 struct target_modify_ldt_ldt_s ldt_info;
6187 struct target_modify_ldt_ldt_s *target_ldt_info;
6188 int seg_32bit, contents, read_exec_only, limit_in_pages;
6189 int seg_not_present, useable, lm;
6190 uint32_t *lp, entry_1, entry_2;
6191 int i;
6193 lock_user_struct(VERIFY_WRITE, target_ldt_info, ptr, 1);
6194 if (!target_ldt_info)
6195 return -TARGET_EFAULT;
6196 ldt_info.entry_number = tswap32(target_ldt_info->entry_number);
6197 ldt_info.base_addr = tswapal(target_ldt_info->base_addr);
6198 ldt_info.limit = tswap32(target_ldt_info->limit);
6199 ldt_info.flags = tswap32(target_ldt_info->flags);
6200 if (ldt_info.entry_number == -1) {
6201 for (i=TARGET_GDT_ENTRY_TLS_MIN; i<=TARGET_GDT_ENTRY_TLS_MAX; i++) {
6202 if (gdt_table[i] == 0) {
6203 ldt_info.entry_number = i;
6204 target_ldt_info->entry_number = tswap32(i);
6205 break;
6209 unlock_user_struct(target_ldt_info, ptr, 1);
6211 if (ldt_info.entry_number < TARGET_GDT_ENTRY_TLS_MIN ||
6212 ldt_info.entry_number > TARGET_GDT_ENTRY_TLS_MAX)
6213 return -TARGET_EINVAL;
6214 seg_32bit = ldt_info.flags & 1;
6215 contents = (ldt_info.flags >> 1) & 3;
6216 read_exec_only = (ldt_info.flags >> 3) & 1;
6217 limit_in_pages = (ldt_info.flags >> 4) & 1;
6218 seg_not_present = (ldt_info.flags >> 5) & 1;
6219 useable = (ldt_info.flags >> 6) & 1;
6220 #ifdef TARGET_ABI32
6221 lm = 0;
6222 #else
6223 lm = (ldt_info.flags >> 7) & 1;
6224 #endif
6226 if (contents == 3) {
6227 if (seg_not_present == 0)
6228 return -TARGET_EINVAL;
6231 /* NOTE: same code as Linux kernel */
6232 /* Allow LDTs to be cleared by the user. */
6233 if (ldt_info.base_addr == 0 && ldt_info.limit == 0) {
6234 if ((contents == 0 &&
6235 read_exec_only == 1 &&
6236 seg_32bit == 0 &&
6237 limit_in_pages == 0 &&
6238 seg_not_present == 1 &&
6239 useable == 0 )) {
6240 entry_1 = 0;
6241 entry_2 = 0;
6242 goto install;
6246 entry_1 = ((ldt_info.base_addr & 0x0000ffff) << 16) |
6247 (ldt_info.limit & 0x0ffff);
6248 entry_2 = (ldt_info.base_addr & 0xff000000) |
6249 ((ldt_info.base_addr & 0x00ff0000) >> 16) |
6250 (ldt_info.limit & 0xf0000) |
6251 ((read_exec_only ^ 1) << 9) |
6252 (contents << 10) |
6253 ((seg_not_present ^ 1) << 15) |
6254 (seg_32bit << 22) |
6255 (limit_in_pages << 23) |
6256 (useable << 20) |
6257 (lm << 21) |
6258 0x7000;
6260 /* Install the new entry ... */
6261 install:
6262 lp = (uint32_t *)(gdt_table + ldt_info.entry_number);
6263 lp[0] = tswap32(entry_1);
6264 lp[1] = tswap32(entry_2);
6265 return 0;
6268 static abi_long do_get_thread_area(CPUX86State *env, abi_ulong ptr)
6270 struct target_modify_ldt_ldt_s *target_ldt_info;
6271 uint64_t *gdt_table = g2h(env->gdt.base);
6272 uint32_t base_addr, limit, flags;
6273 int seg_32bit, contents, read_exec_only, limit_in_pages, idx;
6274 int seg_not_present, useable, lm;
6275 uint32_t *lp, entry_1, entry_2;
6277 lock_user_struct(VERIFY_WRITE, target_ldt_info, ptr, 1);
6278 if (!target_ldt_info)
6279 return -TARGET_EFAULT;
6280 idx = tswap32(target_ldt_info->entry_number);
6281 if (idx < TARGET_GDT_ENTRY_TLS_MIN ||
6282 idx > TARGET_GDT_ENTRY_TLS_MAX) {
6283 unlock_user_struct(target_ldt_info, ptr, 1);
6284 return -TARGET_EINVAL;
6286 lp = (uint32_t *)(gdt_table + idx);
6287 entry_1 = tswap32(lp[0]);
6288 entry_2 = tswap32(lp[1]);
6290 read_exec_only = ((entry_2 >> 9) & 1) ^ 1;
6291 contents = (entry_2 >> 10) & 3;
6292 seg_not_present = ((entry_2 >> 15) & 1) ^ 1;
6293 seg_32bit = (entry_2 >> 22) & 1;
6294 limit_in_pages = (entry_2 >> 23) & 1;
6295 useable = (entry_2 >> 20) & 1;
6296 #ifdef TARGET_ABI32
6297 lm = 0;
6298 #else
6299 lm = (entry_2 >> 21) & 1;
6300 #endif
6301 flags = (seg_32bit << 0) | (contents << 1) |
6302 (read_exec_only << 3) | (limit_in_pages << 4) |
6303 (seg_not_present << 5) | (useable << 6) | (lm << 7);
6304 limit = (entry_1 & 0xffff) | (entry_2 & 0xf0000);
6305 base_addr = (entry_1 >> 16) |
6306 (entry_2 & 0xff000000) |
6307 ((entry_2 & 0xff) << 16);
6308 target_ldt_info->base_addr = tswapal(base_addr);
6309 target_ldt_info->limit = tswap32(limit);
6310 target_ldt_info->flags = tswap32(flags);
6311 unlock_user_struct(target_ldt_info, ptr, 1);
6312 return 0;
6314 #endif /* TARGET_I386 && TARGET_ABI32 */
6316 #ifndef TARGET_ABI32
6317 abi_long do_arch_prctl(CPUX86State *env, int code, abi_ulong addr)
6319 abi_long ret = 0;
6320 abi_ulong val;
6321 int idx;
6323 switch(code) {
6324 case TARGET_ARCH_SET_GS:
6325 case TARGET_ARCH_SET_FS:
6326 if (code == TARGET_ARCH_SET_GS)
6327 idx = R_GS;
6328 else
6329 idx = R_FS;
6330 cpu_x86_load_seg(env, idx, 0);
6331 env->segs[idx].base = addr;
6332 break;
6333 case TARGET_ARCH_GET_GS:
6334 case TARGET_ARCH_GET_FS:
6335 if (code == TARGET_ARCH_GET_GS)
6336 idx = R_GS;
6337 else
6338 idx = R_FS;
6339 val = env->segs[idx].base;
6340 if (put_user(val, addr, abi_ulong))
6341 ret = -TARGET_EFAULT;
6342 break;
6343 default:
6344 ret = -TARGET_EINVAL;
6345 break;
6347 return ret;
6349 #endif
6351 #endif /* defined(TARGET_I386) */
6353 #define NEW_STACK_SIZE 0x40000
6356 static pthread_mutex_t clone_lock = PTHREAD_MUTEX_INITIALIZER;
6357 typedef struct {
6358 CPUArchState *env;
6359 pthread_mutex_t mutex;
6360 pthread_cond_t cond;
6361 pthread_t thread;
6362 uint32_t tid;
6363 abi_ulong child_tidptr;
6364 abi_ulong parent_tidptr;
6365 sigset_t sigmask;
6366 } new_thread_info;
6368 static void *clone_func(void *arg)
6370 new_thread_info *info = arg;
6371 CPUArchState *env;
6372 CPUState *cpu;
6373 TaskState *ts;
6375 rcu_register_thread();
6376 tcg_register_thread();
6377 env = info->env;
6378 cpu = ENV_GET_CPU(env);
6379 thread_cpu = cpu;
6380 ts = (TaskState *)cpu->opaque;
6381 info->tid = gettid();
6382 task_settid(ts);
6383 if (info->child_tidptr)
6384 put_user_u32(info->tid, info->child_tidptr);
6385 if (info->parent_tidptr)
6386 put_user_u32(info->tid, info->parent_tidptr);
6387 /* Enable signals. */
6388 sigprocmask(SIG_SETMASK, &info->sigmask, NULL);
6389 /* Signal to the parent that we're ready. */
6390 pthread_mutex_lock(&info->mutex);
6391 pthread_cond_broadcast(&info->cond);
6392 pthread_mutex_unlock(&info->mutex);
6393 /* Wait until the parent has finished initializing the tls state. */
6394 pthread_mutex_lock(&clone_lock);
6395 pthread_mutex_unlock(&clone_lock);
6396 cpu_loop(env);
6397 /* never exits */
6398 return NULL;
6401 /* do_fork() Must return host values and target errnos (unlike most
6402 do_*() functions). */
6403 static int do_fork(CPUArchState *env, unsigned int flags, abi_ulong newsp,
6404 abi_ulong parent_tidptr, target_ulong newtls,
6405 abi_ulong child_tidptr)
6407 CPUState *cpu = ENV_GET_CPU(env);
6408 int ret;
6409 TaskState *ts;
6410 CPUState *new_cpu;
6411 CPUArchState *new_env;
6412 sigset_t sigmask;
6414 flags &= ~CLONE_IGNORED_FLAGS;
6416 /* Emulate vfork() with fork() */
6417 if (flags & CLONE_VFORK)
6418 flags &= ~(CLONE_VFORK | CLONE_VM);
6420 if (flags & CLONE_VM) {
6421 TaskState *parent_ts = (TaskState *)cpu->opaque;
6422 new_thread_info info;
6423 pthread_attr_t attr;
6425 if (((flags & CLONE_THREAD_FLAGS) != CLONE_THREAD_FLAGS) ||
6426 (flags & CLONE_INVALID_THREAD_FLAGS)) {
6427 return -TARGET_EINVAL;
6430 ts = g_new0(TaskState, 1);
6431 init_task_state(ts);
6433 /* Grab a mutex so that thread setup appears atomic. */
6434 pthread_mutex_lock(&clone_lock);
6436 /* we create a new CPU instance. */
6437 new_env = cpu_copy(env);
6438 /* Init regs that differ from the parent. */
6439 cpu_clone_regs(new_env, newsp);
6440 new_cpu = ENV_GET_CPU(new_env);
6441 new_cpu->opaque = ts;
6442 ts->bprm = parent_ts->bprm;
6443 ts->info = parent_ts->info;
6444 ts->signal_mask = parent_ts->signal_mask;
6446 if (flags & CLONE_CHILD_CLEARTID) {
6447 ts->child_tidptr = child_tidptr;
6450 if (flags & CLONE_SETTLS) {
6451 cpu_set_tls (new_env, newtls);
6454 memset(&info, 0, sizeof(info));
6455 pthread_mutex_init(&info.mutex, NULL);
6456 pthread_mutex_lock(&info.mutex);
6457 pthread_cond_init(&info.cond, NULL);
6458 info.env = new_env;
6459 if (flags & CLONE_CHILD_SETTID) {
6460 info.child_tidptr = child_tidptr;
6462 if (flags & CLONE_PARENT_SETTID) {
6463 info.parent_tidptr = parent_tidptr;
6466 ret = pthread_attr_init(&attr);
6467 ret = pthread_attr_setstacksize(&attr, NEW_STACK_SIZE);
6468 ret = pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED);
6469 /* It is not safe to deliver signals until the child has finished
6470 initializing, so temporarily block all signals. */
6471 sigfillset(&sigmask);
6472 sigprocmask(SIG_BLOCK, &sigmask, &info.sigmask);
6474 /* If this is our first additional thread, we need to ensure we
6475 * generate code for parallel execution and flush old translations.
6477 if (!parallel_cpus) {
6478 parallel_cpus = true;
6479 tb_flush(cpu);
6482 ret = pthread_create(&info.thread, &attr, clone_func, &info);
6483 /* TODO: Free new CPU state if thread creation failed. */
6485 sigprocmask(SIG_SETMASK, &info.sigmask, NULL);
6486 pthread_attr_destroy(&attr);
6487 if (ret == 0) {
6488 /* Wait for the child to initialize. */
6489 pthread_cond_wait(&info.cond, &info.mutex);
6490 ret = info.tid;
6491 } else {
6492 ret = -1;
6494 pthread_mutex_unlock(&info.mutex);
6495 pthread_cond_destroy(&info.cond);
6496 pthread_mutex_destroy(&info.mutex);
6497 pthread_mutex_unlock(&clone_lock);
6498 } else {
6499 /* if no CLONE_VM, we consider it is a fork */
6500 if (flags & CLONE_INVALID_FORK_FLAGS) {
6501 return -TARGET_EINVAL;
6504 /* We can't support custom termination signals */
6505 if ((flags & CSIGNAL) != TARGET_SIGCHLD) {
6506 return -TARGET_EINVAL;
6509 if (block_signals()) {
6510 return -TARGET_ERESTARTSYS;
6513 fork_start();
6514 ret = fork();
6515 if (ret == 0) {
6516 /* Child Process. */
6517 cpu_clone_regs(env, newsp);
6518 fork_end(1);
6519 /* There is a race condition here. The parent process could
6520 theoretically read the TID in the child process before the child
6521 tid is set. This would require using either ptrace
6522 (not implemented) or having *_tidptr to point at a shared memory
6523 mapping. We can't repeat the spinlock hack used above because
6524 the child process gets its own copy of the lock. */
6525 if (flags & CLONE_CHILD_SETTID)
6526 put_user_u32(gettid(), child_tidptr);
6527 if (flags & CLONE_PARENT_SETTID)
6528 put_user_u32(gettid(), parent_tidptr);
6529 ts = (TaskState *)cpu->opaque;
6530 if (flags & CLONE_SETTLS)
6531 cpu_set_tls (env, newtls);
6532 if (flags & CLONE_CHILD_CLEARTID)
6533 ts->child_tidptr = child_tidptr;
6534 } else {
6535 fork_end(0);
6538 return ret;
6541 /* warning : doesn't handle linux specific flags... */
6542 static int target_to_host_fcntl_cmd(int cmd)
6544 switch(cmd) {
6545 case TARGET_F_DUPFD:
6546 case TARGET_F_GETFD:
6547 case TARGET_F_SETFD:
6548 case TARGET_F_GETFL:
6549 case TARGET_F_SETFL:
6550 return cmd;
6551 case TARGET_F_GETLK:
6552 return F_GETLK64;
6553 case TARGET_F_SETLK:
6554 return F_SETLK64;
6555 case TARGET_F_SETLKW:
6556 return F_SETLKW64;
6557 case TARGET_F_GETOWN:
6558 return F_GETOWN;
6559 case TARGET_F_SETOWN:
6560 return F_SETOWN;
6561 case TARGET_F_GETSIG:
6562 return F_GETSIG;
6563 case TARGET_F_SETSIG:
6564 return F_SETSIG;
6565 #if TARGET_ABI_BITS == 32
6566 case TARGET_F_GETLK64:
6567 return F_GETLK64;
6568 case TARGET_F_SETLK64:
6569 return F_SETLK64;
6570 case TARGET_F_SETLKW64:
6571 return F_SETLKW64;
6572 #endif
6573 case TARGET_F_SETLEASE:
6574 return F_SETLEASE;
6575 case TARGET_F_GETLEASE:
6576 return F_GETLEASE;
6577 #ifdef F_DUPFD_CLOEXEC
6578 case TARGET_F_DUPFD_CLOEXEC:
6579 return F_DUPFD_CLOEXEC;
6580 #endif
6581 case TARGET_F_NOTIFY:
6582 return F_NOTIFY;
6583 #ifdef F_GETOWN_EX
6584 case TARGET_F_GETOWN_EX:
6585 return F_GETOWN_EX;
6586 #endif
6587 #ifdef F_SETOWN_EX
6588 case TARGET_F_SETOWN_EX:
6589 return F_SETOWN_EX;
6590 #endif
6591 #ifdef F_SETPIPE_SZ
6592 case TARGET_F_SETPIPE_SZ:
6593 return F_SETPIPE_SZ;
6594 case TARGET_F_GETPIPE_SZ:
6595 return F_GETPIPE_SZ;
6596 #endif
6597 default:
6598 return -TARGET_EINVAL;
6600 return -TARGET_EINVAL;
6603 #define FLOCK_TRANSTBL \
6604 switch (type) { \
6605 TRANSTBL_CONVERT(F_RDLCK); \
6606 TRANSTBL_CONVERT(F_WRLCK); \
6607 TRANSTBL_CONVERT(F_UNLCK); \
6608 TRANSTBL_CONVERT(F_EXLCK); \
6609 TRANSTBL_CONVERT(F_SHLCK); \
6612 static int target_to_host_flock(int type)
6614 #define TRANSTBL_CONVERT(a) case TARGET_##a: return a
6615 FLOCK_TRANSTBL
6616 #undef TRANSTBL_CONVERT
6617 return -TARGET_EINVAL;
6620 static int host_to_target_flock(int type)
6622 #define TRANSTBL_CONVERT(a) case a: return TARGET_##a
6623 FLOCK_TRANSTBL
6624 #undef TRANSTBL_CONVERT
6625 /* if we don't know how to convert the value coming
6626 * from the host we copy to the target field as-is
6628 return type;
6631 static inline abi_long copy_from_user_flock(struct flock64 *fl,
6632 abi_ulong target_flock_addr)
6634 struct target_flock *target_fl;
6635 int l_type;
6637 if (!lock_user_struct(VERIFY_READ, target_fl, target_flock_addr, 1)) {
6638 return -TARGET_EFAULT;
6641 __get_user(l_type, &target_fl->l_type);
6642 l_type = target_to_host_flock(l_type);
6643 if (l_type < 0) {
6644 return l_type;
6646 fl->l_type = l_type;
6647 __get_user(fl->l_whence, &target_fl->l_whence);
6648 __get_user(fl->l_start, &target_fl->l_start);
6649 __get_user(fl->l_len, &target_fl->l_len);
6650 __get_user(fl->l_pid, &target_fl->l_pid);
6651 unlock_user_struct(target_fl, target_flock_addr, 0);
6652 return 0;
6655 static inline abi_long copy_to_user_flock(abi_ulong target_flock_addr,
6656 const struct flock64 *fl)
6658 struct target_flock *target_fl;
6659 short l_type;
6661 if (!lock_user_struct(VERIFY_WRITE, target_fl, target_flock_addr, 0)) {
6662 return -TARGET_EFAULT;
6665 l_type = host_to_target_flock(fl->l_type);
6666 __put_user(l_type, &target_fl->l_type);
6667 __put_user(fl->l_whence, &target_fl->l_whence);
6668 __put_user(fl->l_start, &target_fl->l_start);
6669 __put_user(fl->l_len, &target_fl->l_len);
6670 __put_user(fl->l_pid, &target_fl->l_pid);
6671 unlock_user_struct(target_fl, target_flock_addr, 1);
6672 return 0;
6675 typedef abi_long from_flock64_fn(struct flock64 *fl, abi_ulong target_addr);
6676 typedef abi_long to_flock64_fn(abi_ulong target_addr, const struct flock64 *fl);
6678 #if defined(TARGET_ARM) && TARGET_ABI_BITS == 32
6679 static inline abi_long copy_from_user_oabi_flock64(struct flock64 *fl,
6680 abi_ulong target_flock_addr)
6682 struct target_oabi_flock64 *target_fl;
6683 int l_type;
6685 if (!lock_user_struct(VERIFY_READ, target_fl, target_flock_addr, 1)) {
6686 return -TARGET_EFAULT;
6689 __get_user(l_type, &target_fl->l_type);
6690 l_type = target_to_host_flock(l_type);
6691 if (l_type < 0) {
6692 return l_type;
6694 fl->l_type = l_type;
6695 __get_user(fl->l_whence, &target_fl->l_whence);
6696 __get_user(fl->l_start, &target_fl->l_start);
6697 __get_user(fl->l_len, &target_fl->l_len);
6698 __get_user(fl->l_pid, &target_fl->l_pid);
6699 unlock_user_struct(target_fl, target_flock_addr, 0);
6700 return 0;
6703 static inline abi_long copy_to_user_oabi_flock64(abi_ulong target_flock_addr,
6704 const struct flock64 *fl)
6706 struct target_oabi_flock64 *target_fl;
6707 short l_type;
6709 if (!lock_user_struct(VERIFY_WRITE, target_fl, target_flock_addr, 0)) {
6710 return -TARGET_EFAULT;
6713 l_type = host_to_target_flock(fl->l_type);
6714 __put_user(l_type, &target_fl->l_type);
6715 __put_user(fl->l_whence, &target_fl->l_whence);
6716 __put_user(fl->l_start, &target_fl->l_start);
6717 __put_user(fl->l_len, &target_fl->l_len);
6718 __put_user(fl->l_pid, &target_fl->l_pid);
6719 unlock_user_struct(target_fl, target_flock_addr, 1);
6720 return 0;
6722 #endif
6724 static inline abi_long copy_from_user_flock64(struct flock64 *fl,
6725 abi_ulong target_flock_addr)
6727 struct target_flock64 *target_fl;
6728 int l_type;
6730 if (!lock_user_struct(VERIFY_READ, target_fl, target_flock_addr, 1)) {
6731 return -TARGET_EFAULT;
6734 __get_user(l_type, &target_fl->l_type);
6735 l_type = target_to_host_flock(l_type);
6736 if (l_type < 0) {
6737 return l_type;
6739 fl->l_type = l_type;
6740 __get_user(fl->l_whence, &target_fl->l_whence);
6741 __get_user(fl->l_start, &target_fl->l_start);
6742 __get_user(fl->l_len, &target_fl->l_len);
6743 __get_user(fl->l_pid, &target_fl->l_pid);
6744 unlock_user_struct(target_fl, target_flock_addr, 0);
6745 return 0;
6748 static inline abi_long copy_to_user_flock64(abi_ulong target_flock_addr,
6749 const struct flock64 *fl)
6751 struct target_flock64 *target_fl;
6752 short l_type;
6754 if (!lock_user_struct(VERIFY_WRITE, target_fl, target_flock_addr, 0)) {
6755 return -TARGET_EFAULT;
6758 l_type = host_to_target_flock(fl->l_type);
6759 __put_user(l_type, &target_fl->l_type);
6760 __put_user(fl->l_whence, &target_fl->l_whence);
6761 __put_user(fl->l_start, &target_fl->l_start);
6762 __put_user(fl->l_len, &target_fl->l_len);
6763 __put_user(fl->l_pid, &target_fl->l_pid);
6764 unlock_user_struct(target_fl, target_flock_addr, 1);
6765 return 0;
6768 static abi_long do_fcntl(int fd, int cmd, abi_ulong arg)
6770 struct flock64 fl64;
6771 #ifdef F_GETOWN_EX
6772 struct f_owner_ex fox;
6773 struct target_f_owner_ex *target_fox;
6774 #endif
6775 abi_long ret;
6776 int host_cmd = target_to_host_fcntl_cmd(cmd);
6778 if (host_cmd == -TARGET_EINVAL)
6779 return host_cmd;
6781 switch(cmd) {
6782 case TARGET_F_GETLK:
6783 ret = copy_from_user_flock(&fl64, arg);
6784 if (ret) {
6785 return ret;
6787 ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
6788 if (ret == 0) {
6789 ret = copy_to_user_flock(arg, &fl64);
6791 break;
6793 case TARGET_F_SETLK:
6794 case TARGET_F_SETLKW:
6795 ret = copy_from_user_flock(&fl64, arg);
6796 if (ret) {
6797 return ret;
6799 ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
6800 break;
6802 case TARGET_F_GETLK64:
6803 ret = copy_from_user_flock64(&fl64, arg);
6804 if (ret) {
6805 return ret;
6807 ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
6808 if (ret == 0) {
6809 ret = copy_to_user_flock64(arg, &fl64);
6811 break;
6812 case TARGET_F_SETLK64:
6813 case TARGET_F_SETLKW64:
6814 ret = copy_from_user_flock64(&fl64, arg);
6815 if (ret) {
6816 return ret;
6818 ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
6819 break;
6821 case TARGET_F_GETFL:
6822 ret = get_errno(safe_fcntl(fd, host_cmd, arg));
6823 if (ret >= 0) {
6824 ret = host_to_target_bitmask(ret, fcntl_flags_tbl);
6826 break;
6828 case TARGET_F_SETFL:
6829 ret = get_errno(safe_fcntl(fd, host_cmd,
6830 target_to_host_bitmask(arg,
6831 fcntl_flags_tbl)));
6832 break;
6834 #ifdef F_GETOWN_EX
6835 case TARGET_F_GETOWN_EX:
6836 ret = get_errno(safe_fcntl(fd, host_cmd, &fox));
6837 if (ret >= 0) {
6838 if (!lock_user_struct(VERIFY_WRITE, target_fox, arg, 0))
6839 return -TARGET_EFAULT;
6840 target_fox->type = tswap32(fox.type);
6841 target_fox->pid = tswap32(fox.pid);
6842 unlock_user_struct(target_fox, arg, 1);
6844 break;
6845 #endif
6847 #ifdef F_SETOWN_EX
6848 case TARGET_F_SETOWN_EX:
6849 if (!lock_user_struct(VERIFY_READ, target_fox, arg, 1))
6850 return -TARGET_EFAULT;
6851 fox.type = tswap32(target_fox->type);
6852 fox.pid = tswap32(target_fox->pid);
6853 unlock_user_struct(target_fox, arg, 0);
6854 ret = get_errno(safe_fcntl(fd, host_cmd, &fox));
6855 break;
6856 #endif
6858 case TARGET_F_SETOWN:
6859 case TARGET_F_GETOWN:
6860 case TARGET_F_SETSIG:
6861 case TARGET_F_GETSIG:
6862 case TARGET_F_SETLEASE:
6863 case TARGET_F_GETLEASE:
6864 case TARGET_F_SETPIPE_SZ:
6865 case TARGET_F_GETPIPE_SZ:
6866 ret = get_errno(safe_fcntl(fd, host_cmd, arg));
6867 break;
6869 default:
6870 ret = get_errno(safe_fcntl(fd, cmd, arg));
6871 break;
6873 return ret;
6876 #ifdef USE_UID16
6878 static inline int high2lowuid(int uid)
6880 if (uid > 65535)
6881 return 65534;
6882 else
6883 return uid;
6886 static inline int high2lowgid(int gid)
6888 if (gid > 65535)
6889 return 65534;
6890 else
6891 return gid;
6894 static inline int low2highuid(int uid)
6896 if ((int16_t)uid == -1)
6897 return -1;
6898 else
6899 return uid;
6902 static inline int low2highgid(int gid)
6904 if ((int16_t)gid == -1)
6905 return -1;
6906 else
6907 return gid;
6909 static inline int tswapid(int id)
6911 return tswap16(id);
6914 #define put_user_id(x, gaddr) put_user_u16(x, gaddr)
6916 #else /* !USE_UID16 */
6917 static inline int high2lowuid(int uid)
6919 return uid;
6921 static inline int high2lowgid(int gid)
6923 return gid;
6925 static inline int low2highuid(int uid)
6927 return uid;
6929 static inline int low2highgid(int gid)
6931 return gid;
6933 static inline int tswapid(int id)
6935 return tswap32(id);
6938 #define put_user_id(x, gaddr) put_user_u32(x, gaddr)
6940 #endif /* USE_UID16 */
6942 /* We must do direct syscalls for setting UID/GID, because we want to
6943 * implement the Linux system call semantics of "change only for this thread",
6944 * not the libc/POSIX semantics of "change for all threads in process".
6945 * (See http://ewontfix.com/17/ for more details.)
6946 * We use the 32-bit version of the syscalls if present; if it is not
6947 * then either the host architecture supports 32-bit UIDs natively with
6948 * the standard syscall, or the 16-bit UID is the best we can do.
6950 #ifdef __NR_setuid32
6951 #define __NR_sys_setuid __NR_setuid32
6952 #else
6953 #define __NR_sys_setuid __NR_setuid
6954 #endif
6955 #ifdef __NR_setgid32
6956 #define __NR_sys_setgid __NR_setgid32
6957 #else
6958 #define __NR_sys_setgid __NR_setgid
6959 #endif
6960 #ifdef __NR_setresuid32
6961 #define __NR_sys_setresuid __NR_setresuid32
6962 #else
6963 #define __NR_sys_setresuid __NR_setresuid
6964 #endif
6965 #ifdef __NR_setresgid32
6966 #define __NR_sys_setresgid __NR_setresgid32
6967 #else
6968 #define __NR_sys_setresgid __NR_setresgid
6969 #endif
6971 _syscall1(int, sys_setuid, uid_t, uid)
6972 _syscall1(int, sys_setgid, gid_t, gid)
6973 _syscall3(int, sys_setresuid, uid_t, ruid, uid_t, euid, uid_t, suid)
6974 _syscall3(int, sys_setresgid, gid_t, rgid, gid_t, egid, gid_t, sgid)
6976 void syscall_init(void)
6978 IOCTLEntry *ie;
6979 const argtype *arg_type;
6980 int size;
6981 int i;
6983 thunk_init(STRUCT_MAX);
6985 #define STRUCT(name, ...) thunk_register_struct(STRUCT_ ## name, #name, struct_ ## name ## _def);
6986 #define STRUCT_SPECIAL(name) thunk_register_struct_direct(STRUCT_ ## name, #name, &struct_ ## name ## _def);
6987 #include "syscall_types.h"
6988 #undef STRUCT
6989 #undef STRUCT_SPECIAL
6991 /* Build target_to_host_errno_table[] table from
6992 * host_to_target_errno_table[]. */
6993 for (i = 0; i < ERRNO_TABLE_SIZE; i++) {
6994 target_to_host_errno_table[host_to_target_errno_table[i]] = i;
6997 /* we patch the ioctl size if necessary. We rely on the fact that
6998 no ioctl has all the bits at '1' in the size field */
6999 ie = ioctl_entries;
7000 while (ie->target_cmd != 0) {
7001 if (((ie->target_cmd >> TARGET_IOC_SIZESHIFT) & TARGET_IOC_SIZEMASK) ==
7002 TARGET_IOC_SIZEMASK) {
7003 arg_type = ie->arg_type;
7004 if (arg_type[0] != TYPE_PTR) {
7005 fprintf(stderr, "cannot patch size for ioctl 0x%x\n",
7006 ie->target_cmd);
7007 exit(1);
7009 arg_type++;
7010 size = thunk_type_size(arg_type, 0);
7011 ie->target_cmd = (ie->target_cmd &
7012 ~(TARGET_IOC_SIZEMASK << TARGET_IOC_SIZESHIFT)) |
7013 (size << TARGET_IOC_SIZESHIFT);
7016 /* automatic consistency check if same arch */
7017 #if (defined(__i386__) && defined(TARGET_I386) && defined(TARGET_ABI32)) || \
7018 (defined(__x86_64__) && defined(TARGET_X86_64))
7019 if (unlikely(ie->target_cmd != ie->host_cmd)) {
7020 fprintf(stderr, "ERROR: ioctl(%s): target=0x%x host=0x%x\n",
7021 ie->name, ie->target_cmd, ie->host_cmd);
7023 #endif
7024 ie++;
7028 #if TARGET_ABI_BITS == 32
7029 static inline uint64_t target_offset64(uint32_t word0, uint32_t word1)
7031 #ifdef TARGET_WORDS_BIGENDIAN
7032 return ((uint64_t)word0 << 32) | word1;
7033 #else
7034 return ((uint64_t)word1 << 32) | word0;
7035 #endif
7037 #else /* TARGET_ABI_BITS == 32 */
7038 static inline uint64_t target_offset64(uint64_t word0, uint64_t word1)
7040 return word0;
7042 #endif /* TARGET_ABI_BITS != 32 */
7044 #ifdef TARGET_NR_truncate64
7045 static inline abi_long target_truncate64(void *cpu_env, const char *arg1,
7046 abi_long arg2,
7047 abi_long arg3,
7048 abi_long arg4)
7050 if (regpairs_aligned(cpu_env, TARGET_NR_truncate64)) {
7051 arg2 = arg3;
7052 arg3 = arg4;
7054 return get_errno(truncate64(arg1, target_offset64(arg2, arg3)));
7056 #endif
7058 #ifdef TARGET_NR_ftruncate64
7059 static inline abi_long target_ftruncate64(void *cpu_env, abi_long arg1,
7060 abi_long arg2,
7061 abi_long arg3,
7062 abi_long arg4)
7064 if (regpairs_aligned(cpu_env, TARGET_NR_ftruncate64)) {
7065 arg2 = arg3;
7066 arg3 = arg4;
7068 return get_errno(ftruncate64(arg1, target_offset64(arg2, arg3)));
7070 #endif
7072 static inline abi_long target_to_host_timespec(struct timespec *host_ts,
7073 abi_ulong target_addr)
7075 struct target_timespec *target_ts;
7077 if (!lock_user_struct(VERIFY_READ, target_ts, target_addr, 1))
7078 return -TARGET_EFAULT;
7079 __get_user(host_ts->tv_sec, &target_ts->tv_sec);
7080 __get_user(host_ts->tv_nsec, &target_ts->tv_nsec);
7081 unlock_user_struct(target_ts, target_addr, 0);
7082 return 0;
7085 static inline abi_long host_to_target_timespec(abi_ulong target_addr,
7086 struct timespec *host_ts)
7088 struct target_timespec *target_ts;
7090 if (!lock_user_struct(VERIFY_WRITE, target_ts, target_addr, 0))
7091 return -TARGET_EFAULT;
7092 __put_user(host_ts->tv_sec, &target_ts->tv_sec);
7093 __put_user(host_ts->tv_nsec, &target_ts->tv_nsec);
7094 unlock_user_struct(target_ts, target_addr, 1);
7095 return 0;
7098 static inline abi_long target_to_host_itimerspec(struct itimerspec *host_itspec,
7099 abi_ulong target_addr)
7101 struct target_itimerspec *target_itspec;
7103 if (!lock_user_struct(VERIFY_READ, target_itspec, target_addr, 1)) {
7104 return -TARGET_EFAULT;
7107 host_itspec->it_interval.tv_sec =
7108 tswapal(target_itspec->it_interval.tv_sec);
7109 host_itspec->it_interval.tv_nsec =
7110 tswapal(target_itspec->it_interval.tv_nsec);
7111 host_itspec->it_value.tv_sec = tswapal(target_itspec->it_value.tv_sec);
7112 host_itspec->it_value.tv_nsec = tswapal(target_itspec->it_value.tv_nsec);
7114 unlock_user_struct(target_itspec, target_addr, 1);
7115 return 0;
7118 static inline abi_long host_to_target_itimerspec(abi_ulong target_addr,
7119 struct itimerspec *host_its)
7121 struct target_itimerspec *target_itspec;
7123 if (!lock_user_struct(VERIFY_WRITE, target_itspec, target_addr, 0)) {
7124 return -TARGET_EFAULT;
7127 target_itspec->it_interval.tv_sec = tswapal(host_its->it_interval.tv_sec);
7128 target_itspec->it_interval.tv_nsec = tswapal(host_its->it_interval.tv_nsec);
7130 target_itspec->it_value.tv_sec = tswapal(host_its->it_value.tv_sec);
7131 target_itspec->it_value.tv_nsec = tswapal(host_its->it_value.tv_nsec);
7133 unlock_user_struct(target_itspec, target_addr, 0);
7134 return 0;
7137 static inline abi_long target_to_host_timex(struct timex *host_tx,
7138 abi_long target_addr)
7140 struct target_timex *target_tx;
7142 if (!lock_user_struct(VERIFY_READ, target_tx, target_addr, 1)) {
7143 return -TARGET_EFAULT;
7146 __get_user(host_tx->modes, &target_tx->modes);
7147 __get_user(host_tx->offset, &target_tx->offset);
7148 __get_user(host_tx->freq, &target_tx->freq);
7149 __get_user(host_tx->maxerror, &target_tx->maxerror);
7150 __get_user(host_tx->esterror, &target_tx->esterror);
7151 __get_user(host_tx->status, &target_tx->status);
7152 __get_user(host_tx->constant, &target_tx->constant);
7153 __get_user(host_tx->precision, &target_tx->precision);
7154 __get_user(host_tx->tolerance, &target_tx->tolerance);
7155 __get_user(host_tx->time.tv_sec, &target_tx->time.tv_sec);
7156 __get_user(host_tx->time.tv_usec, &target_tx->time.tv_usec);
7157 __get_user(host_tx->tick, &target_tx->tick);
7158 __get_user(host_tx->ppsfreq, &target_tx->ppsfreq);
7159 __get_user(host_tx->jitter, &target_tx->jitter);
7160 __get_user(host_tx->shift, &target_tx->shift);
7161 __get_user(host_tx->stabil, &target_tx->stabil);
7162 __get_user(host_tx->jitcnt, &target_tx->jitcnt);
7163 __get_user(host_tx->calcnt, &target_tx->calcnt);
7164 __get_user(host_tx->errcnt, &target_tx->errcnt);
7165 __get_user(host_tx->stbcnt, &target_tx->stbcnt);
7166 __get_user(host_tx->tai, &target_tx->tai);
7168 unlock_user_struct(target_tx, target_addr, 0);
7169 return 0;
7172 static inline abi_long host_to_target_timex(abi_long target_addr,
7173 struct timex *host_tx)
7175 struct target_timex *target_tx;
7177 if (!lock_user_struct(VERIFY_WRITE, target_tx, target_addr, 0)) {
7178 return -TARGET_EFAULT;
7181 __put_user(host_tx->modes, &target_tx->modes);
7182 __put_user(host_tx->offset, &target_tx->offset);
7183 __put_user(host_tx->freq, &target_tx->freq);
7184 __put_user(host_tx->maxerror, &target_tx->maxerror);
7185 __put_user(host_tx->esterror, &target_tx->esterror);
7186 __put_user(host_tx->status, &target_tx->status);
7187 __put_user(host_tx->constant, &target_tx->constant);
7188 __put_user(host_tx->precision, &target_tx->precision);
7189 __put_user(host_tx->tolerance, &target_tx->tolerance);
7190 __put_user(host_tx->time.tv_sec, &target_tx->time.tv_sec);
7191 __put_user(host_tx->time.tv_usec, &target_tx->time.tv_usec);
7192 __put_user(host_tx->tick, &target_tx->tick);
7193 __put_user(host_tx->ppsfreq, &target_tx->ppsfreq);
7194 __put_user(host_tx->jitter, &target_tx->jitter);
7195 __put_user(host_tx->shift, &target_tx->shift);
7196 __put_user(host_tx->stabil, &target_tx->stabil);
7197 __put_user(host_tx->jitcnt, &target_tx->jitcnt);
7198 __put_user(host_tx->calcnt, &target_tx->calcnt);
7199 __put_user(host_tx->errcnt, &target_tx->errcnt);
7200 __put_user(host_tx->stbcnt, &target_tx->stbcnt);
7201 __put_user(host_tx->tai, &target_tx->tai);
7203 unlock_user_struct(target_tx, target_addr, 1);
7204 return 0;
7208 static inline abi_long target_to_host_sigevent(struct sigevent *host_sevp,
7209 abi_ulong target_addr)
7211 struct target_sigevent *target_sevp;
7213 if (!lock_user_struct(VERIFY_READ, target_sevp, target_addr, 1)) {
7214 return -TARGET_EFAULT;
7217 /* This union is awkward on 64 bit systems because it has a 32 bit
7218 * integer and a pointer in it; we follow the conversion approach
7219 * used for handling sigval types in signal.c so the guest should get
7220 * the correct value back even if we did a 64 bit byteswap and it's
7221 * using the 32 bit integer.
7223 host_sevp->sigev_value.sival_ptr =
7224 (void *)(uintptr_t)tswapal(target_sevp->sigev_value.sival_ptr);
7225 host_sevp->sigev_signo =
7226 target_to_host_signal(tswap32(target_sevp->sigev_signo));
7227 host_sevp->sigev_notify = tswap32(target_sevp->sigev_notify);
7228 host_sevp->_sigev_un._tid = tswap32(target_sevp->_sigev_un._tid);
7230 unlock_user_struct(target_sevp, target_addr, 1);
7231 return 0;
7234 #if defined(TARGET_NR_mlockall)
7235 static inline int target_to_host_mlockall_arg(int arg)
7237 int result = 0;
7239 if (arg & TARGET_MLOCKALL_MCL_CURRENT) {
7240 result |= MCL_CURRENT;
7242 if (arg & TARGET_MLOCKALL_MCL_FUTURE) {
7243 result |= MCL_FUTURE;
7245 return result;
7247 #endif
7249 static inline abi_long host_to_target_stat64(void *cpu_env,
7250 abi_ulong target_addr,
7251 struct stat *host_st)
7253 #if defined(TARGET_ARM) && defined(TARGET_ABI32)
7254 if (((CPUARMState *)cpu_env)->eabi) {
7255 struct target_eabi_stat64 *target_st;
7257 if (!lock_user_struct(VERIFY_WRITE, target_st, target_addr, 0))
7258 return -TARGET_EFAULT;
7259 memset(target_st, 0, sizeof(struct target_eabi_stat64));
7260 __put_user(host_st->st_dev, &target_st->st_dev);
7261 __put_user(host_st->st_ino, &target_st->st_ino);
7262 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
7263 __put_user(host_st->st_ino, &target_st->__st_ino);
7264 #endif
7265 __put_user(host_st->st_mode, &target_st->st_mode);
7266 __put_user(host_st->st_nlink, &target_st->st_nlink);
7267 __put_user(host_st->st_uid, &target_st->st_uid);
7268 __put_user(host_st->st_gid, &target_st->st_gid);
7269 __put_user(host_st->st_rdev, &target_st->st_rdev);
7270 __put_user(host_st->st_size, &target_st->st_size);
7271 __put_user(host_st->st_blksize, &target_st->st_blksize);
7272 __put_user(host_st->st_blocks, &target_st->st_blocks);
7273 __put_user(host_st->st_atime, &target_st->target_st_atime);
7274 __put_user(host_st->st_mtime, &target_st->target_st_mtime);
7275 __put_user(host_st->st_ctime, &target_st->target_st_ctime);
7276 unlock_user_struct(target_st, target_addr, 1);
7277 } else
7278 #endif
7280 #if defined(TARGET_HAS_STRUCT_STAT64)
7281 struct target_stat64 *target_st;
7282 #else
7283 struct target_stat *target_st;
7284 #endif
7286 if (!lock_user_struct(VERIFY_WRITE, target_st, target_addr, 0))
7287 return -TARGET_EFAULT;
7288 memset(target_st, 0, sizeof(*target_st));
7289 __put_user(host_st->st_dev, &target_st->st_dev);
7290 __put_user(host_st->st_ino, &target_st->st_ino);
7291 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
7292 __put_user(host_st->st_ino, &target_st->__st_ino);
7293 #endif
7294 __put_user(host_st->st_mode, &target_st->st_mode);
7295 __put_user(host_st->st_nlink, &target_st->st_nlink);
7296 __put_user(host_st->st_uid, &target_st->st_uid);
7297 __put_user(host_st->st_gid, &target_st->st_gid);
7298 __put_user(host_st->st_rdev, &target_st->st_rdev);
7299 /* XXX: better use of kernel struct */
7300 __put_user(host_st->st_size, &target_st->st_size);
7301 __put_user(host_st->st_blksize, &target_st->st_blksize);
7302 __put_user(host_st->st_blocks, &target_st->st_blocks);
7303 __put_user(host_st->st_atime, &target_st->target_st_atime);
7304 __put_user(host_st->st_mtime, &target_st->target_st_mtime);
7305 __put_user(host_st->st_ctime, &target_st->target_st_ctime);
7306 unlock_user_struct(target_st, target_addr, 1);
7309 return 0;
7312 /* ??? Using host futex calls even when target atomic operations
7313 are not really atomic probably breaks things. However implementing
7314 futexes locally would make futexes shared between multiple processes
7315 tricky. However they're probably useless because guest atomic
7316 operations won't work either. */
7317 static int do_futex(target_ulong uaddr, int op, int val, target_ulong timeout,
7318 target_ulong uaddr2, int val3)
7320 struct timespec ts, *pts;
7321 int base_op;
7323 /* ??? We assume FUTEX_* constants are the same on both host
7324 and target. */
7325 #ifdef FUTEX_CMD_MASK
7326 base_op = op & FUTEX_CMD_MASK;
7327 #else
7328 base_op = op;
7329 #endif
7330 switch (base_op) {
7331 case FUTEX_WAIT:
7332 case FUTEX_WAIT_BITSET:
7333 if (timeout) {
7334 pts = &ts;
7335 target_to_host_timespec(pts, timeout);
7336 } else {
7337 pts = NULL;
7339 return get_errno(safe_futex(g2h(uaddr), op, tswap32(val),
7340 pts, NULL, val3));
7341 case FUTEX_WAKE:
7342 return get_errno(safe_futex(g2h(uaddr), op, val, NULL, NULL, 0));
7343 case FUTEX_FD:
7344 return get_errno(safe_futex(g2h(uaddr), op, val, NULL, NULL, 0));
7345 case FUTEX_REQUEUE:
7346 case FUTEX_CMP_REQUEUE:
7347 case FUTEX_WAKE_OP:
7348 /* For FUTEX_REQUEUE, FUTEX_CMP_REQUEUE, and FUTEX_WAKE_OP, the
7349 TIMEOUT parameter is interpreted as a uint32_t by the kernel.
7350 But the prototype takes a `struct timespec *'; insert casts
7351 to satisfy the compiler. We do not need to tswap TIMEOUT
7352 since it's not compared to guest memory. */
7353 pts = (struct timespec *)(uintptr_t) timeout;
7354 return get_errno(safe_futex(g2h(uaddr), op, val, pts,
7355 g2h(uaddr2),
7356 (base_op == FUTEX_CMP_REQUEUE
7357 ? tswap32(val3)
7358 : val3)));
7359 default:
7360 return -TARGET_ENOSYS;
7363 #if defined(TARGET_NR_name_to_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
7364 static abi_long do_name_to_handle_at(abi_long dirfd, abi_long pathname,
7365 abi_long handle, abi_long mount_id,
7366 abi_long flags)
7368 struct file_handle *target_fh;
7369 struct file_handle *fh;
7370 int mid = 0;
7371 abi_long ret;
7372 char *name;
7373 unsigned int size, total_size;
7375 if (get_user_s32(size, handle)) {
7376 return -TARGET_EFAULT;
7379 name = lock_user_string(pathname);
7380 if (!name) {
7381 return -TARGET_EFAULT;
7384 total_size = sizeof(struct file_handle) + size;
7385 target_fh = lock_user(VERIFY_WRITE, handle, total_size, 0);
7386 if (!target_fh) {
7387 unlock_user(name, pathname, 0);
7388 return -TARGET_EFAULT;
7391 fh = g_malloc0(total_size);
7392 fh->handle_bytes = size;
7394 ret = get_errno(name_to_handle_at(dirfd, path(name), fh, &mid, flags));
7395 unlock_user(name, pathname, 0);
7397 /* man name_to_handle_at(2):
7398 * Other than the use of the handle_bytes field, the caller should treat
7399 * the file_handle structure as an opaque data type
7402 memcpy(target_fh, fh, total_size);
7403 target_fh->handle_bytes = tswap32(fh->handle_bytes);
7404 target_fh->handle_type = tswap32(fh->handle_type);
7405 g_free(fh);
7406 unlock_user(target_fh, handle, total_size);
7408 if (put_user_s32(mid, mount_id)) {
7409 return -TARGET_EFAULT;
7412 return ret;
7415 #endif
7417 #if defined(TARGET_NR_open_by_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
7418 static abi_long do_open_by_handle_at(abi_long mount_fd, abi_long handle,
7419 abi_long flags)
7421 struct file_handle *target_fh;
7422 struct file_handle *fh;
7423 unsigned int size, total_size;
7424 abi_long ret;
7426 if (get_user_s32(size, handle)) {
7427 return -TARGET_EFAULT;
7430 total_size = sizeof(struct file_handle) + size;
7431 target_fh = lock_user(VERIFY_READ, handle, total_size, 1);
7432 if (!target_fh) {
7433 return -TARGET_EFAULT;
7436 fh = g_memdup(target_fh, total_size);
7437 fh->handle_bytes = size;
7438 fh->handle_type = tswap32(target_fh->handle_type);
7440 ret = get_errno(open_by_handle_at(mount_fd, fh,
7441 target_to_host_bitmask(flags, fcntl_flags_tbl)));
7443 g_free(fh);
7445 unlock_user(target_fh, handle, total_size);
7447 return ret;
7449 #endif
7451 #if defined(TARGET_NR_signalfd) || defined(TARGET_NR_signalfd4)
7453 /* signalfd siginfo conversion */
7455 static void
7456 host_to_target_signalfd_siginfo(struct signalfd_siginfo *tinfo,
7457 const struct signalfd_siginfo *info)
7459 int sig = host_to_target_signal(info->ssi_signo);
7461 /* linux/signalfd.h defines a ssi_addr_lsb
7462 * not defined in sys/signalfd.h but used by some kernels
7465 #ifdef BUS_MCEERR_AO
7466 if (tinfo->ssi_signo == SIGBUS &&
7467 (tinfo->ssi_code == BUS_MCEERR_AR ||
7468 tinfo->ssi_code == BUS_MCEERR_AO)) {
7469 uint16_t *ssi_addr_lsb = (uint16_t *)(&info->ssi_addr + 1);
7470 uint16_t *tssi_addr_lsb = (uint16_t *)(&tinfo->ssi_addr + 1);
7471 *tssi_addr_lsb = tswap16(*ssi_addr_lsb);
7473 #endif
7475 tinfo->ssi_signo = tswap32(sig);
7476 tinfo->ssi_errno = tswap32(tinfo->ssi_errno);
7477 tinfo->ssi_code = tswap32(info->ssi_code);
7478 tinfo->ssi_pid = tswap32(info->ssi_pid);
7479 tinfo->ssi_uid = tswap32(info->ssi_uid);
7480 tinfo->ssi_fd = tswap32(info->ssi_fd);
7481 tinfo->ssi_tid = tswap32(info->ssi_tid);
7482 tinfo->ssi_band = tswap32(info->ssi_band);
7483 tinfo->ssi_overrun = tswap32(info->ssi_overrun);
7484 tinfo->ssi_trapno = tswap32(info->ssi_trapno);
7485 tinfo->ssi_status = tswap32(info->ssi_status);
7486 tinfo->ssi_int = tswap32(info->ssi_int);
7487 tinfo->ssi_ptr = tswap64(info->ssi_ptr);
7488 tinfo->ssi_utime = tswap64(info->ssi_utime);
7489 tinfo->ssi_stime = tswap64(info->ssi_stime);
7490 tinfo->ssi_addr = tswap64(info->ssi_addr);
7493 static abi_long host_to_target_data_signalfd(void *buf, size_t len)
7495 int i;
7497 for (i = 0; i < len; i += sizeof(struct signalfd_siginfo)) {
7498 host_to_target_signalfd_siginfo(buf + i, buf + i);
7501 return len;
7504 static TargetFdTrans target_signalfd_trans = {
7505 .host_to_target_data = host_to_target_data_signalfd,
7508 static abi_long do_signalfd4(int fd, abi_long mask, int flags)
7510 int host_flags;
7511 target_sigset_t *target_mask;
7512 sigset_t host_mask;
7513 abi_long ret;
7515 if (flags & ~(TARGET_O_NONBLOCK | TARGET_O_CLOEXEC)) {
7516 return -TARGET_EINVAL;
7518 if (!lock_user_struct(VERIFY_READ, target_mask, mask, 1)) {
7519 return -TARGET_EFAULT;
7522 target_to_host_sigset(&host_mask, target_mask);
7524 host_flags = target_to_host_bitmask(flags, fcntl_flags_tbl);
7526 ret = get_errno(signalfd(fd, &host_mask, host_flags));
7527 if (ret >= 0) {
7528 fd_trans_register(ret, &target_signalfd_trans);
7531 unlock_user_struct(target_mask, mask, 0);
7533 return ret;
7535 #endif
7537 /* Map host to target signal numbers for the wait family of syscalls.
7538 Assume all other status bits are the same. */
7539 int host_to_target_waitstatus(int status)
7541 if (WIFSIGNALED(status)) {
7542 return host_to_target_signal(WTERMSIG(status)) | (status & ~0x7f);
7544 if (WIFSTOPPED(status)) {
7545 return (host_to_target_signal(WSTOPSIG(status)) << 8)
7546 | (status & 0xff);
7548 return status;
7551 static int open_self_cmdline(void *cpu_env, int fd)
7553 CPUState *cpu = ENV_GET_CPU((CPUArchState *)cpu_env);
7554 struct linux_binprm *bprm = ((TaskState *)cpu->opaque)->bprm;
7555 int i;
7557 for (i = 0; i < bprm->argc; i++) {
7558 size_t len = strlen(bprm->argv[i]) + 1;
7560 if (write(fd, bprm->argv[i], len) != len) {
7561 return -1;
7565 return 0;
7568 static int open_self_maps(void *cpu_env, int fd)
7570 CPUState *cpu = ENV_GET_CPU((CPUArchState *)cpu_env);
7571 TaskState *ts = cpu->opaque;
7572 FILE *fp;
7573 char *line = NULL;
7574 size_t len = 0;
7575 ssize_t read;
7577 fp = fopen("/proc/self/maps", "r");
7578 if (fp == NULL) {
7579 return -1;
7582 while ((read = getline(&line, &len, fp)) != -1) {
7583 int fields, dev_maj, dev_min, inode;
7584 uint64_t min, max, offset;
7585 char flag_r, flag_w, flag_x, flag_p;
7586 char path[512] = "";
7587 fields = sscanf(line, "%"PRIx64"-%"PRIx64" %c%c%c%c %"PRIx64" %x:%x %d"
7588 " %512s", &min, &max, &flag_r, &flag_w, &flag_x,
7589 &flag_p, &offset, &dev_maj, &dev_min, &inode, path);
7591 if ((fields < 10) || (fields > 11)) {
7592 continue;
7594 if (h2g_valid(min)) {
7595 int flags = page_get_flags(h2g(min));
7596 max = h2g_valid(max - 1) ? max : (uintptr_t)g2h(GUEST_ADDR_MAX) + 1;
7597 if (page_check_range(h2g(min), max - min, flags) == -1) {
7598 continue;
7600 if (h2g(min) == ts->info->stack_limit) {
7601 pstrcpy(path, sizeof(path), " [stack]");
7603 dprintf(fd, TARGET_ABI_FMT_lx "-" TARGET_ABI_FMT_lx
7604 " %c%c%c%c %08" PRIx64 " %02x:%02x %d %s%s\n",
7605 h2g(min), h2g(max - 1) + 1, flag_r, flag_w,
7606 flag_x, flag_p, offset, dev_maj, dev_min, inode,
7607 path[0] ? " " : "", path);
7611 free(line);
7612 fclose(fp);
7614 return 0;
7617 static int open_self_stat(void *cpu_env, int fd)
7619 CPUState *cpu = ENV_GET_CPU((CPUArchState *)cpu_env);
7620 TaskState *ts = cpu->opaque;
7621 abi_ulong start_stack = ts->info->start_stack;
7622 int i;
7624 for (i = 0; i < 44; i++) {
7625 char buf[128];
7626 int len;
7627 uint64_t val = 0;
7629 if (i == 0) {
7630 /* pid */
7631 val = getpid();
7632 snprintf(buf, sizeof(buf), "%"PRId64 " ", val);
7633 } else if (i == 1) {
7634 /* app name */
7635 snprintf(buf, sizeof(buf), "(%s) ", ts->bprm->argv[0]);
7636 } else if (i == 27) {
7637 /* stack bottom */
7638 val = start_stack;
7639 snprintf(buf, sizeof(buf), "%"PRId64 " ", val);
7640 } else {
7641 /* for the rest, there is MasterCard */
7642 snprintf(buf, sizeof(buf), "0%c", i == 43 ? '\n' : ' ');
7645 len = strlen(buf);
7646 if (write(fd, buf, len) != len) {
7647 return -1;
7651 return 0;
7654 static int open_self_auxv(void *cpu_env, int fd)
7656 CPUState *cpu = ENV_GET_CPU((CPUArchState *)cpu_env);
7657 TaskState *ts = cpu->opaque;
7658 abi_ulong auxv = ts->info->saved_auxv;
7659 abi_ulong len = ts->info->auxv_len;
7660 char *ptr;
7663 * Auxiliary vector is stored in target process stack.
7664 * read in whole auxv vector and copy it to file
7666 ptr = lock_user(VERIFY_READ, auxv, len, 0);
7667 if (ptr != NULL) {
7668 while (len > 0) {
7669 ssize_t r;
7670 r = write(fd, ptr, len);
7671 if (r <= 0) {
7672 break;
7674 len -= r;
7675 ptr += r;
7677 lseek(fd, 0, SEEK_SET);
7678 unlock_user(ptr, auxv, len);
7681 return 0;
7684 static int is_proc_myself(const char *filename, const char *entry)
7686 if (!strncmp(filename, "/proc/", strlen("/proc/"))) {
7687 filename += strlen("/proc/");
7688 if (!strncmp(filename, "self/", strlen("self/"))) {
7689 filename += strlen("self/");
7690 } else if (*filename >= '1' && *filename <= '9') {
7691 char myself[80];
7692 snprintf(myself, sizeof(myself), "%d/", getpid());
7693 if (!strncmp(filename, myself, strlen(myself))) {
7694 filename += strlen(myself);
7695 } else {
7696 return 0;
7698 } else {
7699 return 0;
7701 if (!strcmp(filename, entry)) {
7702 return 1;
7705 return 0;
7708 #if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN)
7709 static int is_proc(const char *filename, const char *entry)
7711 return strcmp(filename, entry) == 0;
7714 static int open_net_route(void *cpu_env, int fd)
7716 FILE *fp;
7717 char *line = NULL;
7718 size_t len = 0;
7719 ssize_t read;
7721 fp = fopen("/proc/net/route", "r");
7722 if (fp == NULL) {
7723 return -1;
7726 /* read header */
7728 read = getline(&line, &len, fp);
7729 dprintf(fd, "%s", line);
7731 /* read routes */
7733 while ((read = getline(&line, &len, fp)) != -1) {
7734 char iface[16];
7735 uint32_t dest, gw, mask;
7736 unsigned int flags, refcnt, use, metric, mtu, window, irtt;
7737 sscanf(line, "%s\t%08x\t%08x\t%04x\t%d\t%d\t%d\t%08x\t%d\t%u\t%u\n",
7738 iface, &dest, &gw, &flags, &refcnt, &use, &metric,
7739 &mask, &mtu, &window, &irtt);
7740 dprintf(fd, "%s\t%08x\t%08x\t%04x\t%d\t%d\t%d\t%08x\t%d\t%u\t%u\n",
7741 iface, tswap32(dest), tswap32(gw), flags, refcnt, use,
7742 metric, tswap32(mask), mtu, window, irtt);
7745 free(line);
7746 fclose(fp);
7748 return 0;
7750 #endif
7752 static int do_openat(void *cpu_env, int dirfd, const char *pathname, int flags, mode_t mode)
7754 struct fake_open {
7755 const char *filename;
7756 int (*fill)(void *cpu_env, int fd);
7757 int (*cmp)(const char *s1, const char *s2);
7759 const struct fake_open *fake_open;
7760 static const struct fake_open fakes[] = {
7761 { "maps", open_self_maps, is_proc_myself },
7762 { "stat", open_self_stat, is_proc_myself },
7763 { "auxv", open_self_auxv, is_proc_myself },
7764 { "cmdline", open_self_cmdline, is_proc_myself },
7765 #if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN)
7766 { "/proc/net/route", open_net_route, is_proc },
7767 #endif
7768 { NULL, NULL, NULL }
7771 if (is_proc_myself(pathname, "exe")) {
7772 int execfd = qemu_getauxval(AT_EXECFD);
7773 return execfd ? execfd : safe_openat(dirfd, exec_path, flags, mode);
7776 for (fake_open = fakes; fake_open->filename; fake_open++) {
7777 if (fake_open->cmp(pathname, fake_open->filename)) {
7778 break;
7782 if (fake_open->filename) {
7783 const char *tmpdir;
7784 char filename[PATH_MAX];
7785 int fd, r;
7787 /* create temporary file to map stat to */
7788 tmpdir = getenv("TMPDIR");
7789 if (!tmpdir)
7790 tmpdir = "/tmp";
7791 snprintf(filename, sizeof(filename), "%s/qemu-open.XXXXXX", tmpdir);
7792 fd = mkstemp(filename);
7793 if (fd < 0) {
7794 return fd;
7796 unlink(filename);
7798 if ((r = fake_open->fill(cpu_env, fd))) {
7799 int e = errno;
7800 close(fd);
7801 errno = e;
7802 return r;
7804 lseek(fd, 0, SEEK_SET);
7806 return fd;
7809 return safe_openat(dirfd, path(pathname), flags, mode);
7812 #define TIMER_MAGIC 0x0caf0000
7813 #define TIMER_MAGIC_MASK 0xffff0000
7815 /* Convert QEMU provided timer ID back to internal 16bit index format */
7816 static target_timer_t get_timer_id(abi_long arg)
7818 target_timer_t timerid = arg;
7820 if ((timerid & TIMER_MAGIC_MASK) != TIMER_MAGIC) {
7821 return -TARGET_EINVAL;
7824 timerid &= 0xffff;
7826 if (timerid >= ARRAY_SIZE(g_posix_timers)) {
7827 return -TARGET_EINVAL;
7830 return timerid;
7833 static abi_long swap_data_eventfd(void *buf, size_t len)
7835 uint64_t *counter = buf;
7836 int i;
7838 if (len < sizeof(uint64_t)) {
7839 return -EINVAL;
7842 for (i = 0; i < len; i += sizeof(uint64_t)) {
7843 *counter = tswap64(*counter);
7844 counter++;
7847 return len;
7850 static TargetFdTrans target_eventfd_trans = {
7851 .host_to_target_data = swap_data_eventfd,
7852 .target_to_host_data = swap_data_eventfd,
7855 #if (defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init)) || \
7856 (defined(CONFIG_INOTIFY1) && defined(TARGET_NR_inotify_init1) && \
7857 defined(__NR_inotify_init1))
7858 static abi_long host_to_target_data_inotify(void *buf, size_t len)
7860 struct inotify_event *ev;
7861 int i;
7862 uint32_t name_len;
7864 for (i = 0; i < len; i += sizeof(struct inotify_event) + name_len) {
7865 ev = (struct inotify_event *)((char *)buf + i);
7866 name_len = ev->len;
7868 ev->wd = tswap32(ev->wd);
7869 ev->mask = tswap32(ev->mask);
7870 ev->cookie = tswap32(ev->cookie);
7871 ev->len = tswap32(name_len);
7874 return len;
7877 static TargetFdTrans target_inotify_trans = {
7878 .host_to_target_data = host_to_target_data_inotify,
7880 #endif
7882 static int target_to_host_cpu_mask(unsigned long *host_mask,
7883 size_t host_size,
7884 abi_ulong target_addr,
7885 size_t target_size)
7887 unsigned target_bits = sizeof(abi_ulong) * 8;
7888 unsigned host_bits = sizeof(*host_mask) * 8;
7889 abi_ulong *target_mask;
7890 unsigned i, j;
7892 assert(host_size >= target_size);
7894 target_mask = lock_user(VERIFY_READ, target_addr, target_size, 1);
7895 if (!target_mask) {
7896 return -TARGET_EFAULT;
7898 memset(host_mask, 0, host_size);
7900 for (i = 0 ; i < target_size / sizeof(abi_ulong); i++) {
7901 unsigned bit = i * target_bits;
7902 abi_ulong val;
7904 __get_user(val, &target_mask[i]);
7905 for (j = 0; j < target_bits; j++, bit++) {
7906 if (val & (1UL << j)) {
7907 host_mask[bit / host_bits] |= 1UL << (bit % host_bits);
7912 unlock_user(target_mask, target_addr, 0);
7913 return 0;
7916 static int host_to_target_cpu_mask(const unsigned long *host_mask,
7917 size_t host_size,
7918 abi_ulong target_addr,
7919 size_t target_size)
7921 unsigned target_bits = sizeof(abi_ulong) * 8;
7922 unsigned host_bits = sizeof(*host_mask) * 8;
7923 abi_ulong *target_mask;
7924 unsigned i, j;
7926 assert(host_size >= target_size);
7928 target_mask = lock_user(VERIFY_WRITE, target_addr, target_size, 0);
7929 if (!target_mask) {
7930 return -TARGET_EFAULT;
7933 for (i = 0 ; i < target_size / sizeof(abi_ulong); i++) {
7934 unsigned bit = i * target_bits;
7935 abi_ulong val = 0;
7937 for (j = 0; j < target_bits; j++, bit++) {
7938 if (host_mask[bit / host_bits] & (1UL << (bit % host_bits))) {
7939 val |= 1UL << j;
7942 __put_user(val, &target_mask[i]);
7945 unlock_user(target_mask, target_addr, target_size);
7946 return 0;
7949 /* do_syscall() should always have a single exit point at the end so
7950 that actions, such as logging of syscall results, can be performed.
7951 All errnos that do_syscall() returns must be -TARGET_<errcode>. */
7952 abi_long do_syscall(void *cpu_env, int num, abi_long arg1,
7953 abi_long arg2, abi_long arg3, abi_long arg4,
7954 abi_long arg5, abi_long arg6, abi_long arg7,
7955 abi_long arg8)
7957 CPUState *cpu = ENV_GET_CPU(cpu_env);
7958 abi_long ret;
7959 struct stat st;
7960 struct statfs stfs;
7961 void *p;
7963 #if defined(DEBUG_ERESTARTSYS)
7964 /* Debug-only code for exercising the syscall-restart code paths
7965 * in the per-architecture cpu main loops: restart every syscall
7966 * the guest makes once before letting it through.
7969 static int flag;
7971 flag = !flag;
7972 if (flag) {
7973 return -TARGET_ERESTARTSYS;
7976 #endif
7978 #ifdef DEBUG
7979 gemu_log("syscall %d", num);
7980 #endif
7981 trace_guest_user_syscall(cpu, num, arg1, arg2, arg3, arg4, arg5, arg6, arg7, arg8);
7982 if(do_strace)
7983 print_syscall(num, arg1, arg2, arg3, arg4, arg5, arg6);
7985 switch(num) {
7986 case TARGET_NR_exit:
7987 /* In old applications this may be used to implement _exit(2).
7988 However in threaded applictions it is used for thread termination,
7989 and _exit_group is used for application termination.
7990 Do thread termination if we have more then one thread. */
7992 if (block_signals()) {
7993 ret = -TARGET_ERESTARTSYS;
7994 break;
7997 cpu_list_lock();
7999 if (CPU_NEXT(first_cpu)) {
8000 TaskState *ts;
8002 /* Remove the CPU from the list. */
8003 QTAILQ_REMOVE(&cpus, cpu, node);
8005 cpu_list_unlock();
8007 ts = cpu->opaque;
8008 if (ts->child_tidptr) {
8009 put_user_u32(0, ts->child_tidptr);
8010 sys_futex(g2h(ts->child_tidptr), FUTEX_WAKE, INT_MAX,
8011 NULL, NULL, 0);
8013 thread_cpu = NULL;
8014 object_unref(OBJECT(cpu));
8015 g_free(ts);
8016 rcu_unregister_thread();
8017 pthread_exit(NULL);
8020 cpu_list_unlock();
8021 #ifdef TARGET_GPROF
8022 _mcleanup();
8023 #endif
8024 gdb_exit(cpu_env, arg1);
8025 _exit(arg1);
8026 ret = 0; /* avoid warning */
8027 break;
8028 case TARGET_NR_read:
8029 if (arg3 == 0)
8030 ret = 0;
8031 else {
8032 if (!(p = lock_user(VERIFY_WRITE, arg2, arg3, 0)))
8033 goto efault;
8034 ret = get_errno(safe_read(arg1, p, arg3));
8035 if (ret >= 0 &&
8036 fd_trans_host_to_target_data(arg1)) {
8037 ret = fd_trans_host_to_target_data(arg1)(p, ret);
8039 unlock_user(p, arg2, ret);
8041 break;
8042 case TARGET_NR_write:
8043 if (!(p = lock_user(VERIFY_READ, arg2, arg3, 1)))
8044 goto efault;
8045 if (fd_trans_target_to_host_data(arg1)) {
8046 void *copy = g_malloc(arg3);
8047 memcpy(copy, p, arg3);
8048 ret = fd_trans_target_to_host_data(arg1)(copy, arg3);
8049 if (ret >= 0) {
8050 ret = get_errno(safe_write(arg1, copy, ret));
8052 g_free(copy);
8053 } else {
8054 ret = get_errno(safe_write(arg1, p, arg3));
8056 unlock_user(p, arg2, 0);
8057 break;
8058 #ifdef TARGET_NR_open
8059 case TARGET_NR_open:
8060 if (!(p = lock_user_string(arg1)))
8061 goto efault;
8062 ret = get_errno(do_openat(cpu_env, AT_FDCWD, p,
8063 target_to_host_bitmask(arg2, fcntl_flags_tbl),
8064 arg3));
8065 fd_trans_unregister(ret);
8066 unlock_user(p, arg1, 0);
8067 break;
8068 #endif
8069 case TARGET_NR_openat:
8070 if (!(p = lock_user_string(arg2)))
8071 goto efault;
8072 ret = get_errno(do_openat(cpu_env, arg1, p,
8073 target_to_host_bitmask(arg3, fcntl_flags_tbl),
8074 arg4));
8075 fd_trans_unregister(ret);
8076 unlock_user(p, arg2, 0);
8077 break;
8078 #if defined(TARGET_NR_name_to_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
8079 case TARGET_NR_name_to_handle_at:
8080 ret = do_name_to_handle_at(arg1, arg2, arg3, arg4, arg5);
8081 break;
8082 #endif
8083 #if defined(TARGET_NR_open_by_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
8084 case TARGET_NR_open_by_handle_at:
8085 ret = do_open_by_handle_at(arg1, arg2, arg3);
8086 fd_trans_unregister(ret);
8087 break;
8088 #endif
8089 case TARGET_NR_close:
8090 fd_trans_unregister(arg1);
8091 ret = get_errno(close(arg1));
8092 break;
8093 case TARGET_NR_brk:
8094 ret = do_brk(arg1);
8095 break;
8096 #ifdef TARGET_NR_fork
8097 case TARGET_NR_fork:
8098 ret = get_errno(do_fork(cpu_env, TARGET_SIGCHLD, 0, 0, 0, 0));
8099 break;
8100 #endif
8101 #ifdef TARGET_NR_waitpid
8102 case TARGET_NR_waitpid:
8104 int status;
8105 ret = get_errno(safe_wait4(arg1, &status, arg3, 0));
8106 if (!is_error(ret) && arg2 && ret
8107 && put_user_s32(host_to_target_waitstatus(status), arg2))
8108 goto efault;
8110 break;
8111 #endif
8112 #ifdef TARGET_NR_waitid
8113 case TARGET_NR_waitid:
8115 siginfo_t info;
8116 info.si_pid = 0;
8117 ret = get_errno(safe_waitid(arg1, arg2, &info, arg4, NULL));
8118 if (!is_error(ret) && arg3 && info.si_pid != 0) {
8119 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_siginfo_t), 0)))
8120 goto efault;
8121 host_to_target_siginfo(p, &info);
8122 unlock_user(p, arg3, sizeof(target_siginfo_t));
8125 break;
8126 #endif
8127 #ifdef TARGET_NR_creat /* not on alpha */
8128 case TARGET_NR_creat:
8129 if (!(p = lock_user_string(arg1)))
8130 goto efault;
8131 ret = get_errno(creat(p, arg2));
8132 fd_trans_unregister(ret);
8133 unlock_user(p, arg1, 0);
8134 break;
8135 #endif
8136 #ifdef TARGET_NR_link
8137 case TARGET_NR_link:
8139 void * p2;
8140 p = lock_user_string(arg1);
8141 p2 = lock_user_string(arg2);
8142 if (!p || !p2)
8143 ret = -TARGET_EFAULT;
8144 else
8145 ret = get_errno(link(p, p2));
8146 unlock_user(p2, arg2, 0);
8147 unlock_user(p, arg1, 0);
8149 break;
8150 #endif
8151 #if defined(TARGET_NR_linkat)
8152 case TARGET_NR_linkat:
8154 void * p2 = NULL;
8155 if (!arg2 || !arg4)
8156 goto efault;
8157 p = lock_user_string(arg2);
8158 p2 = lock_user_string(arg4);
8159 if (!p || !p2)
8160 ret = -TARGET_EFAULT;
8161 else
8162 ret = get_errno(linkat(arg1, p, arg3, p2, arg5));
8163 unlock_user(p, arg2, 0);
8164 unlock_user(p2, arg4, 0);
8166 break;
8167 #endif
8168 #ifdef TARGET_NR_unlink
8169 case TARGET_NR_unlink:
8170 if (!(p = lock_user_string(arg1)))
8171 goto efault;
8172 ret = get_errno(unlink(p));
8173 unlock_user(p, arg1, 0);
8174 break;
8175 #endif
8176 #if defined(TARGET_NR_unlinkat)
8177 case TARGET_NR_unlinkat:
8178 if (!(p = lock_user_string(arg2)))
8179 goto efault;
8180 ret = get_errno(unlinkat(arg1, p, arg3));
8181 unlock_user(p, arg2, 0);
8182 break;
8183 #endif
8184 case TARGET_NR_execve:
8186 char **argp, **envp;
8187 int argc, envc;
8188 abi_ulong gp;
8189 abi_ulong guest_argp;
8190 abi_ulong guest_envp;
8191 abi_ulong addr;
8192 char **q;
8193 int total_size = 0;
8195 argc = 0;
8196 guest_argp = arg2;
8197 for (gp = guest_argp; gp; gp += sizeof(abi_ulong)) {
8198 if (get_user_ual(addr, gp))
8199 goto efault;
8200 if (!addr)
8201 break;
8202 argc++;
8204 envc = 0;
8205 guest_envp = arg3;
8206 for (gp = guest_envp; gp; gp += sizeof(abi_ulong)) {
8207 if (get_user_ual(addr, gp))
8208 goto efault;
8209 if (!addr)
8210 break;
8211 envc++;
8214 argp = g_new0(char *, argc + 1);
8215 envp = g_new0(char *, envc + 1);
8217 for (gp = guest_argp, q = argp; gp;
8218 gp += sizeof(abi_ulong), q++) {
8219 if (get_user_ual(addr, gp))
8220 goto execve_efault;
8221 if (!addr)
8222 break;
8223 if (!(*q = lock_user_string(addr)))
8224 goto execve_efault;
8225 total_size += strlen(*q) + 1;
8227 *q = NULL;
8229 for (gp = guest_envp, q = envp; gp;
8230 gp += sizeof(abi_ulong), q++) {
8231 if (get_user_ual(addr, gp))
8232 goto execve_efault;
8233 if (!addr)
8234 break;
8235 if (!(*q = lock_user_string(addr)))
8236 goto execve_efault;
8237 total_size += strlen(*q) + 1;
8239 *q = NULL;
8241 if (!(p = lock_user_string(arg1)))
8242 goto execve_efault;
8243 /* Although execve() is not an interruptible syscall it is
8244 * a special case where we must use the safe_syscall wrapper:
8245 * if we allow a signal to happen before we make the host
8246 * syscall then we will 'lose' it, because at the point of
8247 * execve the process leaves QEMU's control. So we use the
8248 * safe syscall wrapper to ensure that we either take the
8249 * signal as a guest signal, or else it does not happen
8250 * before the execve completes and makes it the other
8251 * program's problem.
8253 ret = get_errno(safe_execve(p, argp, envp));
8254 unlock_user(p, arg1, 0);
8256 goto execve_end;
8258 execve_efault:
8259 ret = -TARGET_EFAULT;
8261 execve_end:
8262 for (gp = guest_argp, q = argp; *q;
8263 gp += sizeof(abi_ulong), q++) {
8264 if (get_user_ual(addr, gp)
8265 || !addr)
8266 break;
8267 unlock_user(*q, addr, 0);
8269 for (gp = guest_envp, q = envp; *q;
8270 gp += sizeof(abi_ulong), q++) {
8271 if (get_user_ual(addr, gp)
8272 || !addr)
8273 break;
8274 unlock_user(*q, addr, 0);
8277 g_free(argp);
8278 g_free(envp);
8280 break;
8281 case TARGET_NR_chdir:
8282 if (!(p = lock_user_string(arg1)))
8283 goto efault;
8284 ret = get_errno(chdir(p));
8285 unlock_user(p, arg1, 0);
8286 break;
8287 #ifdef TARGET_NR_time
8288 case TARGET_NR_time:
8290 time_t host_time;
8291 ret = get_errno(time(&host_time));
8292 if (!is_error(ret)
8293 && arg1
8294 && put_user_sal(host_time, arg1))
8295 goto efault;
8297 break;
8298 #endif
8299 #ifdef TARGET_NR_mknod
8300 case TARGET_NR_mknod:
8301 if (!(p = lock_user_string(arg1)))
8302 goto efault;
8303 ret = get_errno(mknod(p, arg2, arg3));
8304 unlock_user(p, arg1, 0);
8305 break;
8306 #endif
8307 #if defined(TARGET_NR_mknodat)
8308 case TARGET_NR_mknodat:
8309 if (!(p = lock_user_string(arg2)))
8310 goto efault;
8311 ret = get_errno(mknodat(arg1, p, arg3, arg4));
8312 unlock_user(p, arg2, 0);
8313 break;
8314 #endif
8315 #ifdef TARGET_NR_chmod
8316 case TARGET_NR_chmod:
8317 if (!(p = lock_user_string(arg1)))
8318 goto efault;
8319 ret = get_errno(chmod(p, arg2));
8320 unlock_user(p, arg1, 0);
8321 break;
8322 #endif
8323 #ifdef TARGET_NR_break
8324 case TARGET_NR_break:
8325 goto unimplemented;
8326 #endif
8327 #ifdef TARGET_NR_oldstat
8328 case TARGET_NR_oldstat:
8329 goto unimplemented;
8330 #endif
8331 case TARGET_NR_lseek:
8332 ret = get_errno(lseek(arg1, arg2, arg3));
8333 break;
8334 #if defined(TARGET_NR_getxpid) && defined(TARGET_ALPHA)
8335 /* Alpha specific */
8336 case TARGET_NR_getxpid:
8337 ((CPUAlphaState *)cpu_env)->ir[IR_A4] = getppid();
8338 ret = get_errno(getpid());
8339 break;
8340 #endif
8341 #ifdef TARGET_NR_getpid
8342 case TARGET_NR_getpid:
8343 ret = get_errno(getpid());
8344 break;
8345 #endif
8346 case TARGET_NR_mount:
8348 /* need to look at the data field */
8349 void *p2, *p3;
8351 if (arg1) {
8352 p = lock_user_string(arg1);
8353 if (!p) {
8354 goto efault;
8356 } else {
8357 p = NULL;
8360 p2 = lock_user_string(arg2);
8361 if (!p2) {
8362 if (arg1) {
8363 unlock_user(p, arg1, 0);
8365 goto efault;
8368 if (arg3) {
8369 p3 = lock_user_string(arg3);
8370 if (!p3) {
8371 if (arg1) {
8372 unlock_user(p, arg1, 0);
8374 unlock_user(p2, arg2, 0);
8375 goto efault;
8377 } else {
8378 p3 = NULL;
8381 /* FIXME - arg5 should be locked, but it isn't clear how to
8382 * do that since it's not guaranteed to be a NULL-terminated
8383 * string.
8385 if (!arg5) {
8386 ret = mount(p, p2, p3, (unsigned long)arg4, NULL);
8387 } else {
8388 ret = mount(p, p2, p3, (unsigned long)arg4, g2h(arg5));
8390 ret = get_errno(ret);
8392 if (arg1) {
8393 unlock_user(p, arg1, 0);
8395 unlock_user(p2, arg2, 0);
8396 if (arg3) {
8397 unlock_user(p3, arg3, 0);
8400 break;
8401 #ifdef TARGET_NR_umount
8402 case TARGET_NR_umount:
8403 if (!(p = lock_user_string(arg1)))
8404 goto efault;
8405 ret = get_errno(umount(p));
8406 unlock_user(p, arg1, 0);
8407 break;
8408 #endif
8409 #ifdef TARGET_NR_stime /* not on alpha */
8410 case TARGET_NR_stime:
8412 time_t host_time;
8413 if (get_user_sal(host_time, arg1))
8414 goto efault;
8415 ret = get_errno(stime(&host_time));
8417 break;
8418 #endif
8419 case TARGET_NR_ptrace:
8420 goto unimplemented;
8421 #ifdef TARGET_NR_alarm /* not on alpha */
8422 case TARGET_NR_alarm:
8423 ret = alarm(arg1);
8424 break;
8425 #endif
8426 #ifdef TARGET_NR_oldfstat
8427 case TARGET_NR_oldfstat:
8428 goto unimplemented;
8429 #endif
8430 #ifdef TARGET_NR_pause /* not on alpha */
8431 case TARGET_NR_pause:
8432 if (!block_signals()) {
8433 sigsuspend(&((TaskState *)cpu->opaque)->signal_mask);
8435 ret = -TARGET_EINTR;
8436 break;
8437 #endif
8438 #ifdef TARGET_NR_utime
8439 case TARGET_NR_utime:
8441 struct utimbuf tbuf, *host_tbuf;
8442 struct target_utimbuf *target_tbuf;
8443 if (arg2) {
8444 if (!lock_user_struct(VERIFY_READ, target_tbuf, arg2, 1))
8445 goto efault;
8446 tbuf.actime = tswapal(target_tbuf->actime);
8447 tbuf.modtime = tswapal(target_tbuf->modtime);
8448 unlock_user_struct(target_tbuf, arg2, 0);
8449 host_tbuf = &tbuf;
8450 } else {
8451 host_tbuf = NULL;
8453 if (!(p = lock_user_string(arg1)))
8454 goto efault;
8455 ret = get_errno(utime(p, host_tbuf));
8456 unlock_user(p, arg1, 0);
8458 break;
8459 #endif
8460 #ifdef TARGET_NR_utimes
8461 case TARGET_NR_utimes:
8463 struct timeval *tvp, tv[2];
8464 if (arg2) {
8465 if (copy_from_user_timeval(&tv[0], arg2)
8466 || copy_from_user_timeval(&tv[1],
8467 arg2 + sizeof(struct target_timeval)))
8468 goto efault;
8469 tvp = tv;
8470 } else {
8471 tvp = NULL;
8473 if (!(p = lock_user_string(arg1)))
8474 goto efault;
8475 ret = get_errno(utimes(p, tvp));
8476 unlock_user(p, arg1, 0);
8478 break;
8479 #endif
8480 #if defined(TARGET_NR_futimesat)
8481 case TARGET_NR_futimesat:
8483 struct timeval *tvp, tv[2];
8484 if (arg3) {
8485 if (copy_from_user_timeval(&tv[0], arg3)
8486 || copy_from_user_timeval(&tv[1],
8487 arg3 + sizeof(struct target_timeval)))
8488 goto efault;
8489 tvp = tv;
8490 } else {
8491 tvp = NULL;
8493 if (!(p = lock_user_string(arg2)))
8494 goto efault;
8495 ret = get_errno(futimesat(arg1, path(p), tvp));
8496 unlock_user(p, arg2, 0);
8498 break;
8499 #endif
8500 #ifdef TARGET_NR_stty
8501 case TARGET_NR_stty:
8502 goto unimplemented;
8503 #endif
8504 #ifdef TARGET_NR_gtty
8505 case TARGET_NR_gtty:
8506 goto unimplemented;
8507 #endif
8508 #ifdef TARGET_NR_access
8509 case TARGET_NR_access:
8510 if (!(p = lock_user_string(arg1)))
8511 goto efault;
8512 ret = get_errno(access(path(p), arg2));
8513 unlock_user(p, arg1, 0);
8514 break;
8515 #endif
8516 #if defined(TARGET_NR_faccessat) && defined(__NR_faccessat)
8517 case TARGET_NR_faccessat:
8518 if (!(p = lock_user_string(arg2)))
8519 goto efault;
8520 ret = get_errno(faccessat(arg1, p, arg3, 0));
8521 unlock_user(p, arg2, 0);
8522 break;
8523 #endif
8524 #ifdef TARGET_NR_nice /* not on alpha */
8525 case TARGET_NR_nice:
8526 ret = get_errno(nice(arg1));
8527 break;
8528 #endif
8529 #ifdef TARGET_NR_ftime
8530 case TARGET_NR_ftime:
8531 goto unimplemented;
8532 #endif
8533 case TARGET_NR_sync:
8534 sync();
8535 ret = 0;
8536 break;
8537 #if defined(TARGET_NR_syncfs) && defined(CONFIG_SYNCFS)
8538 case TARGET_NR_syncfs:
8539 ret = get_errno(syncfs(arg1));
8540 break;
8541 #endif
8542 case TARGET_NR_kill:
8543 ret = get_errno(safe_kill(arg1, target_to_host_signal(arg2)));
8544 break;
8545 #ifdef TARGET_NR_rename
8546 case TARGET_NR_rename:
8548 void *p2;
8549 p = lock_user_string(arg1);
8550 p2 = lock_user_string(arg2);
8551 if (!p || !p2)
8552 ret = -TARGET_EFAULT;
8553 else
8554 ret = get_errno(rename(p, p2));
8555 unlock_user(p2, arg2, 0);
8556 unlock_user(p, arg1, 0);
8558 break;
8559 #endif
8560 #if defined(TARGET_NR_renameat)
8561 case TARGET_NR_renameat:
8563 void *p2;
8564 p = lock_user_string(arg2);
8565 p2 = lock_user_string(arg4);
8566 if (!p || !p2)
8567 ret = -TARGET_EFAULT;
8568 else
8569 ret = get_errno(renameat(arg1, p, arg3, p2));
8570 unlock_user(p2, arg4, 0);
8571 unlock_user(p, arg2, 0);
8573 break;
8574 #endif
8575 #if defined(TARGET_NR_renameat2)
8576 case TARGET_NR_renameat2:
8578 void *p2;
8579 p = lock_user_string(arg2);
8580 p2 = lock_user_string(arg4);
8581 if (!p || !p2) {
8582 ret = -TARGET_EFAULT;
8583 } else {
8584 ret = get_errno(sys_renameat2(arg1, p, arg3, p2, arg5));
8586 unlock_user(p2, arg4, 0);
8587 unlock_user(p, arg2, 0);
8589 break;
8590 #endif
8591 #ifdef TARGET_NR_mkdir
8592 case TARGET_NR_mkdir:
8593 if (!(p = lock_user_string(arg1)))
8594 goto efault;
8595 ret = get_errno(mkdir(p, arg2));
8596 unlock_user(p, arg1, 0);
8597 break;
8598 #endif
8599 #if defined(TARGET_NR_mkdirat)
8600 case TARGET_NR_mkdirat:
8601 if (!(p = lock_user_string(arg2)))
8602 goto efault;
8603 ret = get_errno(mkdirat(arg1, p, arg3));
8604 unlock_user(p, arg2, 0);
8605 break;
8606 #endif
8607 #ifdef TARGET_NR_rmdir
8608 case TARGET_NR_rmdir:
8609 if (!(p = lock_user_string(arg1)))
8610 goto efault;
8611 ret = get_errno(rmdir(p));
8612 unlock_user(p, arg1, 0);
8613 break;
8614 #endif
8615 case TARGET_NR_dup:
8616 ret = get_errno(dup(arg1));
8617 if (ret >= 0) {
8618 fd_trans_dup(arg1, ret);
8620 break;
8621 #ifdef TARGET_NR_pipe
8622 case TARGET_NR_pipe:
8623 ret = do_pipe(cpu_env, arg1, 0, 0);
8624 break;
8625 #endif
8626 #ifdef TARGET_NR_pipe2
8627 case TARGET_NR_pipe2:
8628 ret = do_pipe(cpu_env, arg1,
8629 target_to_host_bitmask(arg2, fcntl_flags_tbl), 1);
8630 break;
8631 #endif
8632 case TARGET_NR_times:
8634 struct target_tms *tmsp;
8635 struct tms tms;
8636 ret = get_errno(times(&tms));
8637 if (arg1) {
8638 tmsp = lock_user(VERIFY_WRITE, arg1, sizeof(struct target_tms), 0);
8639 if (!tmsp)
8640 goto efault;
8641 tmsp->tms_utime = tswapal(host_to_target_clock_t(tms.tms_utime));
8642 tmsp->tms_stime = tswapal(host_to_target_clock_t(tms.tms_stime));
8643 tmsp->tms_cutime = tswapal(host_to_target_clock_t(tms.tms_cutime));
8644 tmsp->tms_cstime = tswapal(host_to_target_clock_t(tms.tms_cstime));
8646 if (!is_error(ret))
8647 ret = host_to_target_clock_t(ret);
8649 break;
8650 #ifdef TARGET_NR_prof
8651 case TARGET_NR_prof:
8652 goto unimplemented;
8653 #endif
8654 #ifdef TARGET_NR_signal
8655 case TARGET_NR_signal:
8656 goto unimplemented;
8657 #endif
8658 case TARGET_NR_acct:
8659 if (arg1 == 0) {
8660 ret = get_errno(acct(NULL));
8661 } else {
8662 if (!(p = lock_user_string(arg1)))
8663 goto efault;
8664 ret = get_errno(acct(path(p)));
8665 unlock_user(p, arg1, 0);
8667 break;
8668 #ifdef TARGET_NR_umount2
8669 case TARGET_NR_umount2:
8670 if (!(p = lock_user_string(arg1)))
8671 goto efault;
8672 ret = get_errno(umount2(p, arg2));
8673 unlock_user(p, arg1, 0);
8674 break;
8675 #endif
8676 #ifdef TARGET_NR_lock
8677 case TARGET_NR_lock:
8678 goto unimplemented;
8679 #endif
8680 case TARGET_NR_ioctl:
8681 ret = do_ioctl(arg1, arg2, arg3);
8682 break;
8683 #ifdef TARGET_NR_fcntl
8684 case TARGET_NR_fcntl:
8685 ret = do_fcntl(arg1, arg2, arg3);
8686 break;
8687 #endif
8688 #ifdef TARGET_NR_mpx
8689 case TARGET_NR_mpx:
8690 goto unimplemented;
8691 #endif
8692 case TARGET_NR_setpgid:
8693 ret = get_errno(setpgid(arg1, arg2));
8694 break;
8695 #ifdef TARGET_NR_ulimit
8696 case TARGET_NR_ulimit:
8697 goto unimplemented;
8698 #endif
8699 #ifdef TARGET_NR_oldolduname
8700 case TARGET_NR_oldolduname:
8701 goto unimplemented;
8702 #endif
8703 case TARGET_NR_umask:
8704 ret = get_errno(umask(arg1));
8705 break;
8706 case TARGET_NR_chroot:
8707 if (!(p = lock_user_string(arg1)))
8708 goto efault;
8709 ret = get_errno(chroot(p));
8710 unlock_user(p, arg1, 0);
8711 break;
8712 #ifdef TARGET_NR_ustat
8713 case TARGET_NR_ustat:
8714 goto unimplemented;
8715 #endif
8716 #ifdef TARGET_NR_dup2
8717 case TARGET_NR_dup2:
8718 ret = get_errno(dup2(arg1, arg2));
8719 if (ret >= 0) {
8720 fd_trans_dup(arg1, arg2);
8722 break;
8723 #endif
8724 #if defined(CONFIG_DUP3) && defined(TARGET_NR_dup3)
8725 case TARGET_NR_dup3:
8727 int host_flags;
8729 if ((arg3 & ~TARGET_O_CLOEXEC) != 0) {
8730 return -EINVAL;
8732 host_flags = target_to_host_bitmask(arg3, fcntl_flags_tbl);
8733 ret = get_errno(dup3(arg1, arg2, host_flags));
8734 if (ret >= 0) {
8735 fd_trans_dup(arg1, arg2);
8737 break;
8739 #endif
8740 #ifdef TARGET_NR_getppid /* not on alpha */
8741 case TARGET_NR_getppid:
8742 ret = get_errno(getppid());
8743 break;
8744 #endif
8745 #ifdef TARGET_NR_getpgrp
8746 case TARGET_NR_getpgrp:
8747 ret = get_errno(getpgrp());
8748 break;
8749 #endif
8750 case TARGET_NR_setsid:
8751 ret = get_errno(setsid());
8752 break;
8753 #ifdef TARGET_NR_sigaction
8754 case TARGET_NR_sigaction:
8756 #if defined(TARGET_ALPHA)
8757 struct target_sigaction act, oact, *pact = 0;
8758 struct target_old_sigaction *old_act;
8759 if (arg2) {
8760 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
8761 goto efault;
8762 act._sa_handler = old_act->_sa_handler;
8763 target_siginitset(&act.sa_mask, old_act->sa_mask);
8764 act.sa_flags = old_act->sa_flags;
8765 act.sa_restorer = 0;
8766 unlock_user_struct(old_act, arg2, 0);
8767 pact = &act;
8769 ret = get_errno(do_sigaction(arg1, pact, &oact));
8770 if (!is_error(ret) && arg3) {
8771 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
8772 goto efault;
8773 old_act->_sa_handler = oact._sa_handler;
8774 old_act->sa_mask = oact.sa_mask.sig[0];
8775 old_act->sa_flags = oact.sa_flags;
8776 unlock_user_struct(old_act, arg3, 1);
8778 #elif defined(TARGET_MIPS)
8779 struct target_sigaction act, oact, *pact, *old_act;
8781 if (arg2) {
8782 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
8783 goto efault;
8784 act._sa_handler = old_act->_sa_handler;
8785 target_siginitset(&act.sa_mask, old_act->sa_mask.sig[0]);
8786 act.sa_flags = old_act->sa_flags;
8787 unlock_user_struct(old_act, arg2, 0);
8788 pact = &act;
8789 } else {
8790 pact = NULL;
8793 ret = get_errno(do_sigaction(arg1, pact, &oact));
8795 if (!is_error(ret) && arg3) {
8796 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
8797 goto efault;
8798 old_act->_sa_handler = oact._sa_handler;
8799 old_act->sa_flags = oact.sa_flags;
8800 old_act->sa_mask.sig[0] = oact.sa_mask.sig[0];
8801 old_act->sa_mask.sig[1] = 0;
8802 old_act->sa_mask.sig[2] = 0;
8803 old_act->sa_mask.sig[3] = 0;
8804 unlock_user_struct(old_act, arg3, 1);
8806 #else
8807 struct target_old_sigaction *old_act;
8808 struct target_sigaction act, oact, *pact;
8809 if (arg2) {
8810 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
8811 goto efault;
8812 act._sa_handler = old_act->_sa_handler;
8813 target_siginitset(&act.sa_mask, old_act->sa_mask);
8814 act.sa_flags = old_act->sa_flags;
8815 act.sa_restorer = old_act->sa_restorer;
8816 #ifdef TARGET_ARCH_HAS_KA_RESTORER
8817 act.ka_restorer = 0;
8818 #endif
8819 unlock_user_struct(old_act, arg2, 0);
8820 pact = &act;
8821 } else {
8822 pact = NULL;
8824 ret = get_errno(do_sigaction(arg1, pact, &oact));
8825 if (!is_error(ret) && arg3) {
8826 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
8827 goto efault;
8828 old_act->_sa_handler = oact._sa_handler;
8829 old_act->sa_mask = oact.sa_mask.sig[0];
8830 old_act->sa_flags = oact.sa_flags;
8831 old_act->sa_restorer = oact.sa_restorer;
8832 unlock_user_struct(old_act, arg3, 1);
8834 #endif
8836 break;
8837 #endif
8838 case TARGET_NR_rt_sigaction:
8840 #if defined(TARGET_ALPHA)
8841 /* For Alpha and SPARC this is a 5 argument syscall, with
8842 * a 'restorer' parameter which must be copied into the
8843 * sa_restorer field of the sigaction struct.
8844 * For Alpha that 'restorer' is arg5; for SPARC it is arg4,
8845 * and arg5 is the sigsetsize.
8846 * Alpha also has a separate rt_sigaction struct that it uses
8847 * here; SPARC uses the usual sigaction struct.
8849 struct target_rt_sigaction *rt_act;
8850 struct target_sigaction act, oact, *pact = 0;
8852 if (arg4 != sizeof(target_sigset_t)) {
8853 ret = -TARGET_EINVAL;
8854 break;
8856 if (arg2) {
8857 if (!lock_user_struct(VERIFY_READ, rt_act, arg2, 1))
8858 goto efault;
8859 act._sa_handler = rt_act->_sa_handler;
8860 act.sa_mask = rt_act->sa_mask;
8861 act.sa_flags = rt_act->sa_flags;
8862 act.sa_restorer = arg5;
8863 unlock_user_struct(rt_act, arg2, 0);
8864 pact = &act;
8866 ret = get_errno(do_sigaction(arg1, pact, &oact));
8867 if (!is_error(ret) && arg3) {
8868 if (!lock_user_struct(VERIFY_WRITE, rt_act, arg3, 0))
8869 goto efault;
8870 rt_act->_sa_handler = oact._sa_handler;
8871 rt_act->sa_mask = oact.sa_mask;
8872 rt_act->sa_flags = oact.sa_flags;
8873 unlock_user_struct(rt_act, arg3, 1);
8875 #else
8876 #ifdef TARGET_SPARC
8877 target_ulong restorer = arg4;
8878 target_ulong sigsetsize = arg5;
8879 #else
8880 target_ulong sigsetsize = arg4;
8881 #endif
8882 struct target_sigaction *act;
8883 struct target_sigaction *oact;
8885 if (sigsetsize != sizeof(target_sigset_t)) {
8886 ret = -TARGET_EINVAL;
8887 break;
8889 if (arg2) {
8890 if (!lock_user_struct(VERIFY_READ, act, arg2, 1)) {
8891 goto efault;
8893 #ifdef TARGET_ARCH_HAS_KA_RESTORER
8894 act->ka_restorer = restorer;
8895 #endif
8896 } else {
8897 act = NULL;
8899 if (arg3) {
8900 if (!lock_user_struct(VERIFY_WRITE, oact, arg3, 0)) {
8901 ret = -TARGET_EFAULT;
8902 goto rt_sigaction_fail;
8904 } else
8905 oact = NULL;
8906 ret = get_errno(do_sigaction(arg1, act, oact));
8907 rt_sigaction_fail:
8908 if (act)
8909 unlock_user_struct(act, arg2, 0);
8910 if (oact)
8911 unlock_user_struct(oact, arg3, 1);
8912 #endif
8914 break;
8915 #ifdef TARGET_NR_sgetmask /* not on alpha */
8916 case TARGET_NR_sgetmask:
8918 sigset_t cur_set;
8919 abi_ulong target_set;
8920 ret = do_sigprocmask(0, NULL, &cur_set);
8921 if (!ret) {
8922 host_to_target_old_sigset(&target_set, &cur_set);
8923 ret = target_set;
8926 break;
8927 #endif
8928 #ifdef TARGET_NR_ssetmask /* not on alpha */
8929 case TARGET_NR_ssetmask:
8931 sigset_t set, oset;
8932 abi_ulong target_set = arg1;
8933 target_to_host_old_sigset(&set, &target_set);
8934 ret = do_sigprocmask(SIG_SETMASK, &set, &oset);
8935 if (!ret) {
8936 host_to_target_old_sigset(&target_set, &oset);
8937 ret = target_set;
8940 break;
8941 #endif
8942 #ifdef TARGET_NR_sigprocmask
8943 case TARGET_NR_sigprocmask:
8945 #if defined(TARGET_ALPHA)
8946 sigset_t set, oldset;
8947 abi_ulong mask;
8948 int how;
8950 switch (arg1) {
8951 case TARGET_SIG_BLOCK:
8952 how = SIG_BLOCK;
8953 break;
8954 case TARGET_SIG_UNBLOCK:
8955 how = SIG_UNBLOCK;
8956 break;
8957 case TARGET_SIG_SETMASK:
8958 how = SIG_SETMASK;
8959 break;
8960 default:
8961 ret = -TARGET_EINVAL;
8962 goto fail;
8964 mask = arg2;
8965 target_to_host_old_sigset(&set, &mask);
8967 ret = do_sigprocmask(how, &set, &oldset);
8968 if (!is_error(ret)) {
8969 host_to_target_old_sigset(&mask, &oldset);
8970 ret = mask;
8971 ((CPUAlphaState *)cpu_env)->ir[IR_V0] = 0; /* force no error */
8973 #else
8974 sigset_t set, oldset, *set_ptr;
8975 int how;
8977 if (arg2) {
8978 switch (arg1) {
8979 case TARGET_SIG_BLOCK:
8980 how = SIG_BLOCK;
8981 break;
8982 case TARGET_SIG_UNBLOCK:
8983 how = SIG_UNBLOCK;
8984 break;
8985 case TARGET_SIG_SETMASK:
8986 how = SIG_SETMASK;
8987 break;
8988 default:
8989 ret = -TARGET_EINVAL;
8990 goto fail;
8992 if (!(p = lock_user(VERIFY_READ, arg2, sizeof(target_sigset_t), 1)))
8993 goto efault;
8994 target_to_host_old_sigset(&set, p);
8995 unlock_user(p, arg2, 0);
8996 set_ptr = &set;
8997 } else {
8998 how = 0;
8999 set_ptr = NULL;
9001 ret = do_sigprocmask(how, set_ptr, &oldset);
9002 if (!is_error(ret) && arg3) {
9003 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_sigset_t), 0)))
9004 goto efault;
9005 host_to_target_old_sigset(p, &oldset);
9006 unlock_user(p, arg3, sizeof(target_sigset_t));
9008 #endif
9010 break;
9011 #endif
9012 case TARGET_NR_rt_sigprocmask:
9014 int how = arg1;
9015 sigset_t set, oldset, *set_ptr;
9017 if (arg4 != sizeof(target_sigset_t)) {
9018 ret = -TARGET_EINVAL;
9019 break;
9022 if (arg2) {
9023 switch(how) {
9024 case TARGET_SIG_BLOCK:
9025 how = SIG_BLOCK;
9026 break;
9027 case TARGET_SIG_UNBLOCK:
9028 how = SIG_UNBLOCK;
9029 break;
9030 case TARGET_SIG_SETMASK:
9031 how = SIG_SETMASK;
9032 break;
9033 default:
9034 ret = -TARGET_EINVAL;
9035 goto fail;
9037 if (!(p = lock_user(VERIFY_READ, arg2, sizeof(target_sigset_t), 1)))
9038 goto efault;
9039 target_to_host_sigset(&set, p);
9040 unlock_user(p, arg2, 0);
9041 set_ptr = &set;
9042 } else {
9043 how = 0;
9044 set_ptr = NULL;
9046 ret = do_sigprocmask(how, set_ptr, &oldset);
9047 if (!is_error(ret) && arg3) {
9048 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_sigset_t), 0)))
9049 goto efault;
9050 host_to_target_sigset(p, &oldset);
9051 unlock_user(p, arg3, sizeof(target_sigset_t));
9054 break;
9055 #ifdef TARGET_NR_sigpending
9056 case TARGET_NR_sigpending:
9058 sigset_t set;
9059 ret = get_errno(sigpending(&set));
9060 if (!is_error(ret)) {
9061 if (!(p = lock_user(VERIFY_WRITE, arg1, sizeof(target_sigset_t), 0)))
9062 goto efault;
9063 host_to_target_old_sigset(p, &set);
9064 unlock_user(p, arg1, sizeof(target_sigset_t));
9067 break;
9068 #endif
9069 case TARGET_NR_rt_sigpending:
9071 sigset_t set;
9073 /* Yes, this check is >, not != like most. We follow the kernel's
9074 * logic and it does it like this because it implements
9075 * NR_sigpending through the same code path, and in that case
9076 * the old_sigset_t is smaller in size.
9078 if (arg2 > sizeof(target_sigset_t)) {
9079 ret = -TARGET_EINVAL;
9080 break;
9083 ret = get_errno(sigpending(&set));
9084 if (!is_error(ret)) {
9085 if (!(p = lock_user(VERIFY_WRITE, arg1, sizeof(target_sigset_t), 0)))
9086 goto efault;
9087 host_to_target_sigset(p, &set);
9088 unlock_user(p, arg1, sizeof(target_sigset_t));
9091 break;
9092 #ifdef TARGET_NR_sigsuspend
9093 case TARGET_NR_sigsuspend:
9095 TaskState *ts = cpu->opaque;
9096 #if defined(TARGET_ALPHA)
9097 abi_ulong mask = arg1;
9098 target_to_host_old_sigset(&ts->sigsuspend_mask, &mask);
9099 #else
9100 if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
9101 goto efault;
9102 target_to_host_old_sigset(&ts->sigsuspend_mask, p);
9103 unlock_user(p, arg1, 0);
9104 #endif
9105 ret = get_errno(safe_rt_sigsuspend(&ts->sigsuspend_mask,
9106 SIGSET_T_SIZE));
9107 if (ret != -TARGET_ERESTARTSYS) {
9108 ts->in_sigsuspend = 1;
9111 break;
9112 #endif
9113 case TARGET_NR_rt_sigsuspend:
9115 TaskState *ts = cpu->opaque;
9117 if (arg2 != sizeof(target_sigset_t)) {
9118 ret = -TARGET_EINVAL;
9119 break;
9121 if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
9122 goto efault;
9123 target_to_host_sigset(&ts->sigsuspend_mask, p);
9124 unlock_user(p, arg1, 0);
9125 ret = get_errno(safe_rt_sigsuspend(&ts->sigsuspend_mask,
9126 SIGSET_T_SIZE));
9127 if (ret != -TARGET_ERESTARTSYS) {
9128 ts->in_sigsuspend = 1;
9131 break;
9132 case TARGET_NR_rt_sigtimedwait:
9134 sigset_t set;
9135 struct timespec uts, *puts;
9136 siginfo_t uinfo;
9138 if (arg4 != sizeof(target_sigset_t)) {
9139 ret = -TARGET_EINVAL;
9140 break;
9143 if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
9144 goto efault;
9145 target_to_host_sigset(&set, p);
9146 unlock_user(p, arg1, 0);
9147 if (arg3) {
9148 puts = &uts;
9149 target_to_host_timespec(puts, arg3);
9150 } else {
9151 puts = NULL;
9153 ret = get_errno(safe_rt_sigtimedwait(&set, &uinfo, puts,
9154 SIGSET_T_SIZE));
9155 if (!is_error(ret)) {
9156 if (arg2) {
9157 p = lock_user(VERIFY_WRITE, arg2, sizeof(target_siginfo_t),
9159 if (!p) {
9160 goto efault;
9162 host_to_target_siginfo(p, &uinfo);
9163 unlock_user(p, arg2, sizeof(target_siginfo_t));
9165 ret = host_to_target_signal(ret);
9168 break;
9169 case TARGET_NR_rt_sigqueueinfo:
9171 siginfo_t uinfo;
9173 p = lock_user(VERIFY_READ, arg3, sizeof(target_siginfo_t), 1);
9174 if (!p) {
9175 goto efault;
9177 target_to_host_siginfo(&uinfo, p);
9178 unlock_user(p, arg3, 0);
9179 ret = get_errno(sys_rt_sigqueueinfo(arg1, arg2, &uinfo));
9181 break;
9182 case TARGET_NR_rt_tgsigqueueinfo:
9184 siginfo_t uinfo;
9186 p = lock_user(VERIFY_READ, arg4, sizeof(target_siginfo_t), 1);
9187 if (!p) {
9188 goto efault;
9190 target_to_host_siginfo(&uinfo, p);
9191 unlock_user(p, arg4, 0);
9192 ret = get_errno(sys_rt_tgsigqueueinfo(arg1, arg2, arg3, &uinfo));
9194 break;
9195 #ifdef TARGET_NR_sigreturn
9196 case TARGET_NR_sigreturn:
9197 if (block_signals()) {
9198 ret = -TARGET_ERESTARTSYS;
9199 } else {
9200 ret = do_sigreturn(cpu_env);
9202 break;
9203 #endif
9204 case TARGET_NR_rt_sigreturn:
9205 if (block_signals()) {
9206 ret = -TARGET_ERESTARTSYS;
9207 } else {
9208 ret = do_rt_sigreturn(cpu_env);
9210 break;
9211 case TARGET_NR_sethostname:
9212 if (!(p = lock_user_string(arg1)))
9213 goto efault;
9214 ret = get_errno(sethostname(p, arg2));
9215 unlock_user(p, arg1, 0);
9216 break;
9217 case TARGET_NR_setrlimit:
9219 int resource = target_to_host_resource(arg1);
9220 struct target_rlimit *target_rlim;
9221 struct rlimit rlim;
9222 if (!lock_user_struct(VERIFY_READ, target_rlim, arg2, 1))
9223 goto efault;
9224 rlim.rlim_cur = target_to_host_rlim(target_rlim->rlim_cur);
9225 rlim.rlim_max = target_to_host_rlim(target_rlim->rlim_max);
9226 unlock_user_struct(target_rlim, arg2, 0);
9227 ret = get_errno(setrlimit(resource, &rlim));
9229 break;
9230 case TARGET_NR_getrlimit:
9232 int resource = target_to_host_resource(arg1);
9233 struct target_rlimit *target_rlim;
9234 struct rlimit rlim;
9236 ret = get_errno(getrlimit(resource, &rlim));
9237 if (!is_error(ret)) {
9238 if (!lock_user_struct(VERIFY_WRITE, target_rlim, arg2, 0))
9239 goto efault;
9240 target_rlim->rlim_cur = host_to_target_rlim(rlim.rlim_cur);
9241 target_rlim->rlim_max = host_to_target_rlim(rlim.rlim_max);
9242 unlock_user_struct(target_rlim, arg2, 1);
9245 break;
9246 case TARGET_NR_getrusage:
9248 struct rusage rusage;
9249 ret = get_errno(getrusage(arg1, &rusage));
9250 if (!is_error(ret)) {
9251 ret = host_to_target_rusage(arg2, &rusage);
9254 break;
9255 case TARGET_NR_gettimeofday:
9257 struct timeval tv;
9258 ret = get_errno(gettimeofday(&tv, NULL));
9259 if (!is_error(ret)) {
9260 if (copy_to_user_timeval(arg1, &tv))
9261 goto efault;
9264 break;
9265 case TARGET_NR_settimeofday:
9267 struct timeval tv, *ptv = NULL;
9268 struct timezone tz, *ptz = NULL;
9270 if (arg1) {
9271 if (copy_from_user_timeval(&tv, arg1)) {
9272 goto efault;
9274 ptv = &tv;
9277 if (arg2) {
9278 if (copy_from_user_timezone(&tz, arg2)) {
9279 goto efault;
9281 ptz = &tz;
9284 ret = get_errno(settimeofday(ptv, ptz));
9286 break;
9287 #if defined(TARGET_NR_select)
9288 case TARGET_NR_select:
9289 #if defined(TARGET_WANT_NI_OLD_SELECT)
9290 /* some architectures used to have old_select here
9291 * but now ENOSYS it.
9293 ret = -TARGET_ENOSYS;
9294 #elif defined(TARGET_WANT_OLD_SYS_SELECT)
9295 ret = do_old_select(arg1);
9296 #else
9297 ret = do_select(arg1, arg2, arg3, arg4, arg5);
9298 #endif
9299 break;
9300 #endif
9301 #ifdef TARGET_NR_pselect6
9302 case TARGET_NR_pselect6:
9304 abi_long rfd_addr, wfd_addr, efd_addr, n, ts_addr;
9305 fd_set rfds, wfds, efds;
9306 fd_set *rfds_ptr, *wfds_ptr, *efds_ptr;
9307 struct timespec ts, *ts_ptr;
9310 * The 6th arg is actually two args smashed together,
9311 * so we cannot use the C library.
9313 sigset_t set;
9314 struct {
9315 sigset_t *set;
9316 size_t size;
9317 } sig, *sig_ptr;
9319 abi_ulong arg_sigset, arg_sigsize, *arg7;
9320 target_sigset_t *target_sigset;
9322 n = arg1;
9323 rfd_addr = arg2;
9324 wfd_addr = arg3;
9325 efd_addr = arg4;
9326 ts_addr = arg5;
9328 ret = copy_from_user_fdset_ptr(&rfds, &rfds_ptr, rfd_addr, n);
9329 if (ret) {
9330 goto fail;
9332 ret = copy_from_user_fdset_ptr(&wfds, &wfds_ptr, wfd_addr, n);
9333 if (ret) {
9334 goto fail;
9336 ret = copy_from_user_fdset_ptr(&efds, &efds_ptr, efd_addr, n);
9337 if (ret) {
9338 goto fail;
9342 * This takes a timespec, and not a timeval, so we cannot
9343 * use the do_select() helper ...
9345 if (ts_addr) {
9346 if (target_to_host_timespec(&ts, ts_addr)) {
9347 goto efault;
9349 ts_ptr = &ts;
9350 } else {
9351 ts_ptr = NULL;
9354 /* Extract the two packed args for the sigset */
9355 if (arg6) {
9356 sig_ptr = &sig;
9357 sig.size = SIGSET_T_SIZE;
9359 arg7 = lock_user(VERIFY_READ, arg6, sizeof(*arg7) * 2, 1);
9360 if (!arg7) {
9361 goto efault;
9363 arg_sigset = tswapal(arg7[0]);
9364 arg_sigsize = tswapal(arg7[1]);
9365 unlock_user(arg7, arg6, 0);
9367 if (arg_sigset) {
9368 sig.set = &set;
9369 if (arg_sigsize != sizeof(*target_sigset)) {
9370 /* Like the kernel, we enforce correct size sigsets */
9371 ret = -TARGET_EINVAL;
9372 goto fail;
9374 target_sigset = lock_user(VERIFY_READ, arg_sigset,
9375 sizeof(*target_sigset), 1);
9376 if (!target_sigset) {
9377 goto efault;
9379 target_to_host_sigset(&set, target_sigset);
9380 unlock_user(target_sigset, arg_sigset, 0);
9381 } else {
9382 sig.set = NULL;
9384 } else {
9385 sig_ptr = NULL;
9388 ret = get_errno(safe_pselect6(n, rfds_ptr, wfds_ptr, efds_ptr,
9389 ts_ptr, sig_ptr));
9391 if (!is_error(ret)) {
9392 if (rfd_addr && copy_to_user_fdset(rfd_addr, &rfds, n))
9393 goto efault;
9394 if (wfd_addr && copy_to_user_fdset(wfd_addr, &wfds, n))
9395 goto efault;
9396 if (efd_addr && copy_to_user_fdset(efd_addr, &efds, n))
9397 goto efault;
9399 if (ts_addr && host_to_target_timespec(ts_addr, &ts))
9400 goto efault;
9403 break;
9404 #endif
9405 #ifdef TARGET_NR_symlink
9406 case TARGET_NR_symlink:
9408 void *p2;
9409 p = lock_user_string(arg1);
9410 p2 = lock_user_string(arg2);
9411 if (!p || !p2)
9412 ret = -TARGET_EFAULT;
9413 else
9414 ret = get_errno(symlink(p, p2));
9415 unlock_user(p2, arg2, 0);
9416 unlock_user(p, arg1, 0);
9418 break;
9419 #endif
9420 #if defined(TARGET_NR_symlinkat)
9421 case TARGET_NR_symlinkat:
9423 void *p2;
9424 p = lock_user_string(arg1);
9425 p2 = lock_user_string(arg3);
9426 if (!p || !p2)
9427 ret = -TARGET_EFAULT;
9428 else
9429 ret = get_errno(symlinkat(p, arg2, p2));
9430 unlock_user(p2, arg3, 0);
9431 unlock_user(p, arg1, 0);
9433 break;
9434 #endif
9435 #ifdef TARGET_NR_oldlstat
9436 case TARGET_NR_oldlstat:
9437 goto unimplemented;
9438 #endif
9439 #ifdef TARGET_NR_readlink
9440 case TARGET_NR_readlink:
9442 void *p2;
9443 p = lock_user_string(arg1);
9444 p2 = lock_user(VERIFY_WRITE, arg2, arg3, 0);
9445 if (!p || !p2) {
9446 ret = -TARGET_EFAULT;
9447 } else if (!arg3) {
9448 /* Short circuit this for the magic exe check. */
9449 ret = -TARGET_EINVAL;
9450 } else if (is_proc_myself((const char *)p, "exe")) {
9451 char real[PATH_MAX], *temp;
9452 temp = realpath(exec_path, real);
9453 /* Return value is # of bytes that we wrote to the buffer. */
9454 if (temp == NULL) {
9455 ret = get_errno(-1);
9456 } else {
9457 /* Don't worry about sign mismatch as earlier mapping
9458 * logic would have thrown a bad address error. */
9459 ret = MIN(strlen(real), arg3);
9460 /* We cannot NUL terminate the string. */
9461 memcpy(p2, real, ret);
9463 } else {
9464 ret = get_errno(readlink(path(p), p2, arg3));
9466 unlock_user(p2, arg2, ret);
9467 unlock_user(p, arg1, 0);
9469 break;
9470 #endif
9471 #if defined(TARGET_NR_readlinkat)
9472 case TARGET_NR_readlinkat:
9474 void *p2;
9475 p = lock_user_string(arg2);
9476 p2 = lock_user(VERIFY_WRITE, arg3, arg4, 0);
9477 if (!p || !p2) {
9478 ret = -TARGET_EFAULT;
9479 } else if (is_proc_myself((const char *)p, "exe")) {
9480 char real[PATH_MAX], *temp;
9481 temp = realpath(exec_path, real);
9482 ret = temp == NULL ? get_errno(-1) : strlen(real) ;
9483 snprintf((char *)p2, arg4, "%s", real);
9484 } else {
9485 ret = get_errno(readlinkat(arg1, path(p), p2, arg4));
9487 unlock_user(p2, arg3, ret);
9488 unlock_user(p, arg2, 0);
9490 break;
9491 #endif
9492 #ifdef TARGET_NR_uselib
9493 case TARGET_NR_uselib:
9494 goto unimplemented;
9495 #endif
9496 #ifdef TARGET_NR_swapon
9497 case TARGET_NR_swapon:
9498 if (!(p = lock_user_string(arg1)))
9499 goto efault;
9500 ret = get_errno(swapon(p, arg2));
9501 unlock_user(p, arg1, 0);
9502 break;
9503 #endif
9504 case TARGET_NR_reboot:
9505 if (arg3 == LINUX_REBOOT_CMD_RESTART2) {
9506 /* arg4 must be ignored in all other cases */
9507 p = lock_user_string(arg4);
9508 if (!p) {
9509 goto efault;
9511 ret = get_errno(reboot(arg1, arg2, arg3, p));
9512 unlock_user(p, arg4, 0);
9513 } else {
9514 ret = get_errno(reboot(arg1, arg2, arg3, NULL));
9516 break;
9517 #ifdef TARGET_NR_readdir
9518 case TARGET_NR_readdir:
9519 goto unimplemented;
9520 #endif
9521 #ifdef TARGET_NR_mmap
9522 case TARGET_NR_mmap:
9523 #if (defined(TARGET_I386) && defined(TARGET_ABI32)) || \
9524 (defined(TARGET_ARM) && defined(TARGET_ABI32)) || \
9525 defined(TARGET_M68K) || defined(TARGET_CRIS) || defined(TARGET_MICROBLAZE) \
9526 || defined(TARGET_S390X)
9528 abi_ulong *v;
9529 abi_ulong v1, v2, v3, v4, v5, v6;
9530 if (!(v = lock_user(VERIFY_READ, arg1, 6 * sizeof(abi_ulong), 1)))
9531 goto efault;
9532 v1 = tswapal(v[0]);
9533 v2 = tswapal(v[1]);
9534 v3 = tswapal(v[2]);
9535 v4 = tswapal(v[3]);
9536 v5 = tswapal(v[4]);
9537 v6 = tswapal(v[5]);
9538 unlock_user(v, arg1, 0);
9539 ret = get_errno(target_mmap(v1, v2, v3,
9540 target_to_host_bitmask(v4, mmap_flags_tbl),
9541 v5, v6));
9543 #else
9544 ret = get_errno(target_mmap(arg1, arg2, arg3,
9545 target_to_host_bitmask(arg4, mmap_flags_tbl),
9546 arg5,
9547 arg6));
9548 #endif
9549 break;
9550 #endif
9551 #ifdef TARGET_NR_mmap2
9552 case TARGET_NR_mmap2:
9553 #ifndef MMAP_SHIFT
9554 #define MMAP_SHIFT 12
9555 #endif
9556 ret = get_errno(target_mmap(arg1, arg2, arg3,
9557 target_to_host_bitmask(arg4, mmap_flags_tbl),
9558 arg5,
9559 arg6 << MMAP_SHIFT));
9560 break;
9561 #endif
9562 case TARGET_NR_munmap:
9563 ret = get_errno(target_munmap(arg1, arg2));
9564 break;
9565 case TARGET_NR_mprotect:
9567 TaskState *ts = cpu->opaque;
9568 /* Special hack to detect libc making the stack executable. */
9569 if ((arg3 & PROT_GROWSDOWN)
9570 && arg1 >= ts->info->stack_limit
9571 && arg1 <= ts->info->start_stack) {
9572 arg3 &= ~PROT_GROWSDOWN;
9573 arg2 = arg2 + arg1 - ts->info->stack_limit;
9574 arg1 = ts->info->stack_limit;
9577 ret = get_errno(target_mprotect(arg1, arg2, arg3));
9578 break;
9579 #ifdef TARGET_NR_mremap
9580 case TARGET_NR_mremap:
9581 ret = get_errno(target_mremap(arg1, arg2, arg3, arg4, arg5));
9582 break;
9583 #endif
9584 /* ??? msync/mlock/munlock are broken for softmmu. */
9585 #ifdef TARGET_NR_msync
9586 case TARGET_NR_msync:
9587 ret = get_errno(msync(g2h(arg1), arg2, arg3));
9588 break;
9589 #endif
9590 #ifdef TARGET_NR_mlock
9591 case TARGET_NR_mlock:
9592 ret = get_errno(mlock(g2h(arg1), arg2));
9593 break;
9594 #endif
9595 #ifdef TARGET_NR_munlock
9596 case TARGET_NR_munlock:
9597 ret = get_errno(munlock(g2h(arg1), arg2));
9598 break;
9599 #endif
9600 #ifdef TARGET_NR_mlockall
9601 case TARGET_NR_mlockall:
9602 ret = get_errno(mlockall(target_to_host_mlockall_arg(arg1)));
9603 break;
9604 #endif
9605 #ifdef TARGET_NR_munlockall
9606 case TARGET_NR_munlockall:
9607 ret = get_errno(munlockall());
9608 break;
9609 #endif
9610 case TARGET_NR_truncate:
9611 if (!(p = lock_user_string(arg1)))
9612 goto efault;
9613 ret = get_errno(truncate(p, arg2));
9614 unlock_user(p, arg1, 0);
9615 break;
9616 case TARGET_NR_ftruncate:
9617 ret = get_errno(ftruncate(arg1, arg2));
9618 break;
9619 case TARGET_NR_fchmod:
9620 ret = get_errno(fchmod(arg1, arg2));
9621 break;
9622 #if defined(TARGET_NR_fchmodat)
9623 case TARGET_NR_fchmodat:
9624 if (!(p = lock_user_string(arg2)))
9625 goto efault;
9626 ret = get_errno(fchmodat(arg1, p, arg3, 0));
9627 unlock_user(p, arg2, 0);
9628 break;
9629 #endif
9630 case TARGET_NR_getpriority:
9631 /* Note that negative values are valid for getpriority, so we must
9632 differentiate based on errno settings. */
9633 errno = 0;
9634 ret = getpriority(arg1, arg2);
9635 if (ret == -1 && errno != 0) {
9636 ret = -host_to_target_errno(errno);
9637 break;
9639 #ifdef TARGET_ALPHA
9640 /* Return value is the unbiased priority. Signal no error. */
9641 ((CPUAlphaState *)cpu_env)->ir[IR_V0] = 0;
9642 #else
9643 /* Return value is a biased priority to avoid negative numbers. */
9644 ret = 20 - ret;
9645 #endif
9646 break;
9647 case TARGET_NR_setpriority:
9648 ret = get_errno(setpriority(arg1, arg2, arg3));
9649 break;
9650 #ifdef TARGET_NR_profil
9651 case TARGET_NR_profil:
9652 goto unimplemented;
9653 #endif
9654 case TARGET_NR_statfs:
9655 if (!(p = lock_user_string(arg1)))
9656 goto efault;
9657 ret = get_errno(statfs(path(p), &stfs));
9658 unlock_user(p, arg1, 0);
9659 convert_statfs:
9660 if (!is_error(ret)) {
9661 struct target_statfs *target_stfs;
9663 if (!lock_user_struct(VERIFY_WRITE, target_stfs, arg2, 0))
9664 goto efault;
9665 __put_user(stfs.f_type, &target_stfs->f_type);
9666 __put_user(stfs.f_bsize, &target_stfs->f_bsize);
9667 __put_user(stfs.f_blocks, &target_stfs->f_blocks);
9668 __put_user(stfs.f_bfree, &target_stfs->f_bfree);
9669 __put_user(stfs.f_bavail, &target_stfs->f_bavail);
9670 __put_user(stfs.f_files, &target_stfs->f_files);
9671 __put_user(stfs.f_ffree, &target_stfs->f_ffree);
9672 __put_user(stfs.f_fsid.__val[0], &target_stfs->f_fsid.val[0]);
9673 __put_user(stfs.f_fsid.__val[1], &target_stfs->f_fsid.val[1]);
9674 __put_user(stfs.f_namelen, &target_stfs->f_namelen);
9675 __put_user(stfs.f_frsize, &target_stfs->f_frsize);
9676 #ifdef _STATFS_F_FLAGS
9677 __put_user(stfs.f_flags, &target_stfs->f_flags);
9678 #else
9679 __put_user(0, &target_stfs->f_flags);
9680 #endif
9681 memset(target_stfs->f_spare, 0, sizeof(target_stfs->f_spare));
9682 unlock_user_struct(target_stfs, arg2, 1);
9684 break;
9685 case TARGET_NR_fstatfs:
9686 ret = get_errno(fstatfs(arg1, &stfs));
9687 goto convert_statfs;
9688 #ifdef TARGET_NR_statfs64
9689 case TARGET_NR_statfs64:
9690 if (!(p = lock_user_string(arg1)))
9691 goto efault;
9692 ret = get_errno(statfs(path(p), &stfs));
9693 unlock_user(p, arg1, 0);
9694 convert_statfs64:
9695 if (!is_error(ret)) {
9696 struct target_statfs64 *target_stfs;
9698 if (!lock_user_struct(VERIFY_WRITE, target_stfs, arg3, 0))
9699 goto efault;
9700 __put_user(stfs.f_type, &target_stfs->f_type);
9701 __put_user(stfs.f_bsize, &target_stfs->f_bsize);
9702 __put_user(stfs.f_blocks, &target_stfs->f_blocks);
9703 __put_user(stfs.f_bfree, &target_stfs->f_bfree);
9704 __put_user(stfs.f_bavail, &target_stfs->f_bavail);
9705 __put_user(stfs.f_files, &target_stfs->f_files);
9706 __put_user(stfs.f_ffree, &target_stfs->f_ffree);
9707 __put_user(stfs.f_fsid.__val[0], &target_stfs->f_fsid.val[0]);
9708 __put_user(stfs.f_fsid.__val[1], &target_stfs->f_fsid.val[1]);
9709 __put_user(stfs.f_namelen, &target_stfs->f_namelen);
9710 __put_user(stfs.f_frsize, &target_stfs->f_frsize);
9711 memset(target_stfs->f_spare, 0, sizeof(target_stfs->f_spare));
9712 unlock_user_struct(target_stfs, arg3, 1);
9714 break;
9715 case TARGET_NR_fstatfs64:
9716 ret = get_errno(fstatfs(arg1, &stfs));
9717 goto convert_statfs64;
9718 #endif
9719 #ifdef TARGET_NR_ioperm
9720 case TARGET_NR_ioperm:
9721 goto unimplemented;
9722 #endif
9723 #ifdef TARGET_NR_socketcall
9724 case TARGET_NR_socketcall:
9725 ret = do_socketcall(arg1, arg2);
9726 break;
9727 #endif
9728 #ifdef TARGET_NR_accept
9729 case TARGET_NR_accept:
9730 ret = do_accept4(arg1, arg2, arg3, 0);
9731 break;
9732 #endif
9733 #ifdef TARGET_NR_accept4
9734 case TARGET_NR_accept4:
9735 ret = do_accept4(arg1, arg2, arg3, arg4);
9736 break;
9737 #endif
9738 #ifdef TARGET_NR_bind
9739 case TARGET_NR_bind:
9740 ret = do_bind(arg1, arg2, arg3);
9741 break;
9742 #endif
9743 #ifdef TARGET_NR_connect
9744 case TARGET_NR_connect:
9745 ret = do_connect(arg1, arg2, arg3);
9746 break;
9747 #endif
9748 #ifdef TARGET_NR_getpeername
9749 case TARGET_NR_getpeername:
9750 ret = do_getpeername(arg1, arg2, arg3);
9751 break;
9752 #endif
9753 #ifdef TARGET_NR_getsockname
9754 case TARGET_NR_getsockname:
9755 ret = do_getsockname(arg1, arg2, arg3);
9756 break;
9757 #endif
9758 #ifdef TARGET_NR_getsockopt
9759 case TARGET_NR_getsockopt:
9760 ret = do_getsockopt(arg1, arg2, arg3, arg4, arg5);
9761 break;
9762 #endif
9763 #ifdef TARGET_NR_listen
9764 case TARGET_NR_listen:
9765 ret = get_errno(listen(arg1, arg2));
9766 break;
9767 #endif
9768 #ifdef TARGET_NR_recv
9769 case TARGET_NR_recv:
9770 ret = do_recvfrom(arg1, arg2, arg3, arg4, 0, 0);
9771 break;
9772 #endif
9773 #ifdef TARGET_NR_recvfrom
9774 case TARGET_NR_recvfrom:
9775 ret = do_recvfrom(arg1, arg2, arg3, arg4, arg5, arg6);
9776 break;
9777 #endif
9778 #ifdef TARGET_NR_recvmsg
9779 case TARGET_NR_recvmsg:
9780 ret = do_sendrecvmsg(arg1, arg2, arg3, 0);
9781 break;
9782 #endif
9783 #ifdef TARGET_NR_send
9784 case TARGET_NR_send:
9785 ret = do_sendto(arg1, arg2, arg3, arg4, 0, 0);
9786 break;
9787 #endif
9788 #ifdef TARGET_NR_sendmsg
9789 case TARGET_NR_sendmsg:
9790 ret = do_sendrecvmsg(arg1, arg2, arg3, 1);
9791 break;
9792 #endif
9793 #ifdef TARGET_NR_sendmmsg
9794 case TARGET_NR_sendmmsg:
9795 ret = do_sendrecvmmsg(arg1, arg2, arg3, arg4, 1);
9796 break;
9797 case TARGET_NR_recvmmsg:
9798 ret = do_sendrecvmmsg(arg1, arg2, arg3, arg4, 0);
9799 break;
9800 #endif
9801 #ifdef TARGET_NR_sendto
9802 case TARGET_NR_sendto:
9803 ret = do_sendto(arg1, arg2, arg3, arg4, arg5, arg6);
9804 break;
9805 #endif
9806 #ifdef TARGET_NR_shutdown
9807 case TARGET_NR_shutdown:
9808 ret = get_errno(shutdown(arg1, arg2));
9809 break;
9810 #endif
9811 #if defined(TARGET_NR_getrandom) && defined(__NR_getrandom)
9812 case TARGET_NR_getrandom:
9813 p = lock_user(VERIFY_WRITE, arg1, arg2, 0);
9814 if (!p) {
9815 goto efault;
9817 ret = get_errno(getrandom(p, arg2, arg3));
9818 unlock_user(p, arg1, ret);
9819 break;
9820 #endif
9821 #ifdef TARGET_NR_socket
9822 case TARGET_NR_socket:
9823 ret = do_socket(arg1, arg2, arg3);
9824 break;
9825 #endif
9826 #ifdef TARGET_NR_socketpair
9827 case TARGET_NR_socketpair:
9828 ret = do_socketpair(arg1, arg2, arg3, arg4);
9829 break;
9830 #endif
9831 #ifdef TARGET_NR_setsockopt
9832 case TARGET_NR_setsockopt:
9833 ret = do_setsockopt(arg1, arg2, arg3, arg4, (socklen_t) arg5);
9834 break;
9835 #endif
9836 #if defined(TARGET_NR_syslog)
9837 case TARGET_NR_syslog:
9839 int len = arg2;
9841 switch (arg1) {
9842 case TARGET_SYSLOG_ACTION_CLOSE: /* Close log */
9843 case TARGET_SYSLOG_ACTION_OPEN: /* Open log */
9844 case TARGET_SYSLOG_ACTION_CLEAR: /* Clear ring buffer */
9845 case TARGET_SYSLOG_ACTION_CONSOLE_OFF: /* Disable logging */
9846 case TARGET_SYSLOG_ACTION_CONSOLE_ON: /* Enable logging */
9847 case TARGET_SYSLOG_ACTION_CONSOLE_LEVEL: /* Set messages level */
9848 case TARGET_SYSLOG_ACTION_SIZE_UNREAD: /* Number of chars */
9849 case TARGET_SYSLOG_ACTION_SIZE_BUFFER: /* Size of the buffer */
9851 ret = get_errno(sys_syslog((int)arg1, NULL, (int)arg3));
9853 break;
9854 case TARGET_SYSLOG_ACTION_READ: /* Read from log */
9855 case TARGET_SYSLOG_ACTION_READ_CLEAR: /* Read/clear msgs */
9856 case TARGET_SYSLOG_ACTION_READ_ALL: /* Read last messages */
9858 ret = -TARGET_EINVAL;
9859 if (len < 0) {
9860 goto fail;
9862 ret = 0;
9863 if (len == 0) {
9864 break;
9866 p = lock_user(VERIFY_WRITE, arg2, arg3, 0);
9867 if (!p) {
9868 ret = -TARGET_EFAULT;
9869 goto fail;
9871 ret = get_errno(sys_syslog((int)arg1, p, (int)arg3));
9872 unlock_user(p, arg2, arg3);
9874 break;
9875 default:
9876 ret = -EINVAL;
9877 break;
9880 break;
9881 #endif
9882 case TARGET_NR_setitimer:
9884 struct itimerval value, ovalue, *pvalue;
9886 if (arg2) {
9887 pvalue = &value;
9888 if (copy_from_user_timeval(&pvalue->it_interval, arg2)
9889 || copy_from_user_timeval(&pvalue->it_value,
9890 arg2 + sizeof(struct target_timeval)))
9891 goto efault;
9892 } else {
9893 pvalue = NULL;
9895 ret = get_errno(setitimer(arg1, pvalue, &ovalue));
9896 if (!is_error(ret) && arg3) {
9897 if (copy_to_user_timeval(arg3,
9898 &ovalue.it_interval)
9899 || copy_to_user_timeval(arg3 + sizeof(struct target_timeval),
9900 &ovalue.it_value))
9901 goto efault;
9904 break;
9905 case TARGET_NR_getitimer:
9907 struct itimerval value;
9909 ret = get_errno(getitimer(arg1, &value));
9910 if (!is_error(ret) && arg2) {
9911 if (copy_to_user_timeval(arg2,
9912 &value.it_interval)
9913 || copy_to_user_timeval(arg2 + sizeof(struct target_timeval),
9914 &value.it_value))
9915 goto efault;
9918 break;
9919 #ifdef TARGET_NR_stat
9920 case TARGET_NR_stat:
9921 if (!(p = lock_user_string(arg1)))
9922 goto efault;
9923 ret = get_errno(stat(path(p), &st));
9924 unlock_user(p, arg1, 0);
9925 goto do_stat;
9926 #endif
9927 #ifdef TARGET_NR_lstat
9928 case TARGET_NR_lstat:
9929 if (!(p = lock_user_string(arg1)))
9930 goto efault;
9931 ret = get_errno(lstat(path(p), &st));
9932 unlock_user(p, arg1, 0);
9933 goto do_stat;
9934 #endif
9935 case TARGET_NR_fstat:
9937 ret = get_errno(fstat(arg1, &st));
9938 #if defined(TARGET_NR_stat) || defined(TARGET_NR_lstat)
9939 do_stat:
9940 #endif
9941 if (!is_error(ret)) {
9942 struct target_stat *target_st;
9944 if (!lock_user_struct(VERIFY_WRITE, target_st, arg2, 0))
9945 goto efault;
9946 memset(target_st, 0, sizeof(*target_st));
9947 __put_user(st.st_dev, &target_st->st_dev);
9948 __put_user(st.st_ino, &target_st->st_ino);
9949 __put_user(st.st_mode, &target_st->st_mode);
9950 __put_user(st.st_uid, &target_st->st_uid);
9951 __put_user(st.st_gid, &target_st->st_gid);
9952 __put_user(st.st_nlink, &target_st->st_nlink);
9953 __put_user(st.st_rdev, &target_st->st_rdev);
9954 __put_user(st.st_size, &target_st->st_size);
9955 __put_user(st.st_blksize, &target_st->st_blksize);
9956 __put_user(st.st_blocks, &target_st->st_blocks);
9957 __put_user(st.st_atime, &target_st->target_st_atime);
9958 __put_user(st.st_mtime, &target_st->target_st_mtime);
9959 __put_user(st.st_ctime, &target_st->target_st_ctime);
9960 unlock_user_struct(target_st, arg2, 1);
9963 break;
9964 #ifdef TARGET_NR_olduname
9965 case TARGET_NR_olduname:
9966 goto unimplemented;
9967 #endif
9968 #ifdef TARGET_NR_iopl
9969 case TARGET_NR_iopl:
9970 goto unimplemented;
9971 #endif
9972 case TARGET_NR_vhangup:
9973 ret = get_errno(vhangup());
9974 break;
9975 #ifdef TARGET_NR_idle
9976 case TARGET_NR_idle:
9977 goto unimplemented;
9978 #endif
9979 #ifdef TARGET_NR_syscall
9980 case TARGET_NR_syscall:
9981 ret = do_syscall(cpu_env, arg1 & 0xffff, arg2, arg3, arg4, arg5,
9982 arg6, arg7, arg8, 0);
9983 break;
9984 #endif
9985 case TARGET_NR_wait4:
9987 int status;
9988 abi_long status_ptr = arg2;
9989 struct rusage rusage, *rusage_ptr;
9990 abi_ulong target_rusage = arg4;
9991 abi_long rusage_err;
9992 if (target_rusage)
9993 rusage_ptr = &rusage;
9994 else
9995 rusage_ptr = NULL;
9996 ret = get_errno(safe_wait4(arg1, &status, arg3, rusage_ptr));
9997 if (!is_error(ret)) {
9998 if (status_ptr && ret) {
9999 status = host_to_target_waitstatus(status);
10000 if (put_user_s32(status, status_ptr))
10001 goto efault;
10003 if (target_rusage) {
10004 rusage_err = host_to_target_rusage(target_rusage, &rusage);
10005 if (rusage_err) {
10006 ret = rusage_err;
10011 break;
10012 #ifdef TARGET_NR_swapoff
10013 case TARGET_NR_swapoff:
10014 if (!(p = lock_user_string(arg1)))
10015 goto efault;
10016 ret = get_errno(swapoff(p));
10017 unlock_user(p, arg1, 0);
10018 break;
10019 #endif
10020 case TARGET_NR_sysinfo:
10022 struct target_sysinfo *target_value;
10023 struct sysinfo value;
10024 ret = get_errno(sysinfo(&value));
10025 if (!is_error(ret) && arg1)
10027 if (!lock_user_struct(VERIFY_WRITE, target_value, arg1, 0))
10028 goto efault;
10029 __put_user(value.uptime, &target_value->uptime);
10030 __put_user(value.loads[0], &target_value->loads[0]);
10031 __put_user(value.loads[1], &target_value->loads[1]);
10032 __put_user(value.loads[2], &target_value->loads[2]);
10033 __put_user(value.totalram, &target_value->totalram);
10034 __put_user(value.freeram, &target_value->freeram);
10035 __put_user(value.sharedram, &target_value->sharedram);
10036 __put_user(value.bufferram, &target_value->bufferram);
10037 __put_user(value.totalswap, &target_value->totalswap);
10038 __put_user(value.freeswap, &target_value->freeswap);
10039 __put_user(value.procs, &target_value->procs);
10040 __put_user(value.totalhigh, &target_value->totalhigh);
10041 __put_user(value.freehigh, &target_value->freehigh);
10042 __put_user(value.mem_unit, &target_value->mem_unit);
10043 unlock_user_struct(target_value, arg1, 1);
10046 break;
10047 #ifdef TARGET_NR_ipc
10048 case TARGET_NR_ipc:
10049 ret = do_ipc(cpu_env, arg1, arg2, arg3, arg4, arg5, arg6);
10050 break;
10051 #endif
10052 #ifdef TARGET_NR_semget
10053 case TARGET_NR_semget:
10054 ret = get_errno(semget(arg1, arg2, arg3));
10055 break;
10056 #endif
10057 #ifdef TARGET_NR_semop
10058 case TARGET_NR_semop:
10059 ret = do_semop(arg1, arg2, arg3);
10060 break;
10061 #endif
10062 #ifdef TARGET_NR_semctl
10063 case TARGET_NR_semctl:
10064 ret = do_semctl(arg1, arg2, arg3, arg4);
10065 break;
10066 #endif
10067 #ifdef TARGET_NR_msgctl
10068 case TARGET_NR_msgctl:
10069 ret = do_msgctl(arg1, arg2, arg3);
10070 break;
10071 #endif
10072 #ifdef TARGET_NR_msgget
10073 case TARGET_NR_msgget:
10074 ret = get_errno(msgget(arg1, arg2));
10075 break;
10076 #endif
10077 #ifdef TARGET_NR_msgrcv
10078 case TARGET_NR_msgrcv:
10079 ret = do_msgrcv(arg1, arg2, arg3, arg4, arg5);
10080 break;
10081 #endif
10082 #ifdef TARGET_NR_msgsnd
10083 case TARGET_NR_msgsnd:
10084 ret = do_msgsnd(arg1, arg2, arg3, arg4);
10085 break;
10086 #endif
10087 #ifdef TARGET_NR_shmget
10088 case TARGET_NR_shmget:
10089 ret = get_errno(shmget(arg1, arg2, arg3));
10090 break;
10091 #endif
10092 #ifdef TARGET_NR_shmctl
10093 case TARGET_NR_shmctl:
10094 ret = do_shmctl(arg1, arg2, arg3);
10095 break;
10096 #endif
10097 #ifdef TARGET_NR_shmat
10098 case TARGET_NR_shmat:
10099 ret = do_shmat(cpu_env, arg1, arg2, arg3);
10100 break;
10101 #endif
10102 #ifdef TARGET_NR_shmdt
10103 case TARGET_NR_shmdt:
10104 ret = do_shmdt(arg1);
10105 break;
10106 #endif
10107 case TARGET_NR_fsync:
10108 ret = get_errno(fsync(arg1));
10109 break;
10110 case TARGET_NR_clone:
10111 /* Linux manages to have three different orderings for its
10112 * arguments to clone(); the BACKWARDS and BACKWARDS2 defines
10113 * match the kernel's CONFIG_CLONE_* settings.
10114 * Microblaze is further special in that it uses a sixth
10115 * implicit argument to clone for the TLS pointer.
10117 #if defined(TARGET_MICROBLAZE)
10118 ret = get_errno(do_fork(cpu_env, arg1, arg2, arg4, arg6, arg5));
10119 #elif defined(TARGET_CLONE_BACKWARDS)
10120 ret = get_errno(do_fork(cpu_env, arg1, arg2, arg3, arg4, arg5));
10121 #elif defined(TARGET_CLONE_BACKWARDS2)
10122 ret = get_errno(do_fork(cpu_env, arg2, arg1, arg3, arg5, arg4));
10123 #else
10124 ret = get_errno(do_fork(cpu_env, arg1, arg2, arg3, arg5, arg4));
10125 #endif
10126 break;
10127 #ifdef __NR_exit_group
10128 /* new thread calls */
10129 case TARGET_NR_exit_group:
10130 #ifdef TARGET_GPROF
10131 _mcleanup();
10132 #endif
10133 gdb_exit(cpu_env, arg1);
10134 ret = get_errno(exit_group(arg1));
10135 break;
10136 #endif
10137 case TARGET_NR_setdomainname:
10138 if (!(p = lock_user_string(arg1)))
10139 goto efault;
10140 ret = get_errno(setdomainname(p, arg2));
10141 unlock_user(p, arg1, 0);
10142 break;
10143 case TARGET_NR_uname:
10144 /* no need to transcode because we use the linux syscall */
10146 struct new_utsname * buf;
10148 if (!lock_user_struct(VERIFY_WRITE, buf, arg1, 0))
10149 goto efault;
10150 ret = get_errno(sys_uname(buf));
10151 if (!is_error(ret)) {
10152 /* Overwrite the native machine name with whatever is being
10153 emulated. */
10154 g_strlcpy(buf->machine, cpu_to_uname_machine(cpu_env),
10155 sizeof(buf->machine));
10156 /* Allow the user to override the reported release. */
10157 if (qemu_uname_release && *qemu_uname_release) {
10158 g_strlcpy(buf->release, qemu_uname_release,
10159 sizeof(buf->release));
10162 unlock_user_struct(buf, arg1, 1);
10164 break;
10165 #ifdef TARGET_I386
10166 case TARGET_NR_modify_ldt:
10167 ret = do_modify_ldt(cpu_env, arg1, arg2, arg3);
10168 break;
10169 #if !defined(TARGET_X86_64)
10170 case TARGET_NR_vm86old:
10171 goto unimplemented;
10172 case TARGET_NR_vm86:
10173 ret = do_vm86(cpu_env, arg1, arg2);
10174 break;
10175 #endif
10176 #endif
10177 case TARGET_NR_adjtimex:
10179 struct timex host_buf;
10181 if (target_to_host_timex(&host_buf, arg1) != 0) {
10182 goto efault;
10184 ret = get_errno(adjtimex(&host_buf));
10185 if (!is_error(ret)) {
10186 if (host_to_target_timex(arg1, &host_buf) != 0) {
10187 goto efault;
10191 break;
10192 #if defined(TARGET_NR_clock_adjtime) && defined(CONFIG_CLOCK_ADJTIME)
10193 case TARGET_NR_clock_adjtime:
10195 struct timex htx, *phtx = &htx;
10197 if (target_to_host_timex(phtx, arg2) != 0) {
10198 goto efault;
10200 ret = get_errno(clock_adjtime(arg1, phtx));
10201 if (!is_error(ret) && phtx) {
10202 if (host_to_target_timex(arg2, phtx) != 0) {
10203 goto efault;
10207 break;
10208 #endif
10209 #ifdef TARGET_NR_create_module
10210 case TARGET_NR_create_module:
10211 #endif
10212 case TARGET_NR_init_module:
10213 case TARGET_NR_delete_module:
10214 #ifdef TARGET_NR_get_kernel_syms
10215 case TARGET_NR_get_kernel_syms:
10216 #endif
10217 goto unimplemented;
10218 case TARGET_NR_quotactl:
10219 goto unimplemented;
10220 case TARGET_NR_getpgid:
10221 ret = get_errno(getpgid(arg1));
10222 break;
10223 case TARGET_NR_fchdir:
10224 ret = get_errno(fchdir(arg1));
10225 break;
10226 #ifdef TARGET_NR_bdflush /* not on x86_64 */
10227 case TARGET_NR_bdflush:
10228 goto unimplemented;
10229 #endif
10230 #ifdef TARGET_NR_sysfs
10231 case TARGET_NR_sysfs:
10232 goto unimplemented;
10233 #endif
10234 case TARGET_NR_personality:
10235 ret = get_errno(personality(arg1));
10236 break;
10237 #ifdef TARGET_NR_afs_syscall
10238 case TARGET_NR_afs_syscall:
10239 goto unimplemented;
10240 #endif
10241 #ifdef TARGET_NR__llseek /* Not on alpha */
10242 case TARGET_NR__llseek:
10244 int64_t res;
10245 #if !defined(__NR_llseek)
10246 res = lseek(arg1, ((uint64_t)arg2 << 32) | (abi_ulong)arg3, arg5);
10247 if (res == -1) {
10248 ret = get_errno(res);
10249 } else {
10250 ret = 0;
10252 #else
10253 ret = get_errno(_llseek(arg1, arg2, arg3, &res, arg5));
10254 #endif
10255 if ((ret == 0) && put_user_s64(res, arg4)) {
10256 goto efault;
10259 break;
10260 #endif
10261 #ifdef TARGET_NR_getdents
10262 case TARGET_NR_getdents:
10263 #ifdef EMULATE_GETDENTS_WITH_GETDENTS
10264 #if TARGET_ABI_BITS == 32 && HOST_LONG_BITS == 64
10266 struct target_dirent *target_dirp;
10267 struct linux_dirent *dirp;
10268 abi_long count = arg3;
10270 dirp = g_try_malloc(count);
10271 if (!dirp) {
10272 ret = -TARGET_ENOMEM;
10273 goto fail;
10276 ret = get_errno(sys_getdents(arg1, dirp, count));
10277 if (!is_error(ret)) {
10278 struct linux_dirent *de;
10279 struct target_dirent *tde;
10280 int len = ret;
10281 int reclen, treclen;
10282 int count1, tnamelen;
10284 count1 = 0;
10285 de = dirp;
10286 if (!(target_dirp = lock_user(VERIFY_WRITE, arg2, count, 0)))
10287 goto efault;
10288 tde = target_dirp;
10289 while (len > 0) {
10290 reclen = de->d_reclen;
10291 tnamelen = reclen - offsetof(struct linux_dirent, d_name);
10292 assert(tnamelen >= 0);
10293 treclen = tnamelen + offsetof(struct target_dirent, d_name);
10294 assert(count1 + treclen <= count);
10295 tde->d_reclen = tswap16(treclen);
10296 tde->d_ino = tswapal(de->d_ino);
10297 tde->d_off = tswapal(de->d_off);
10298 memcpy(tde->d_name, de->d_name, tnamelen);
10299 de = (struct linux_dirent *)((char *)de + reclen);
10300 len -= reclen;
10301 tde = (struct target_dirent *)((char *)tde + treclen);
10302 count1 += treclen;
10304 ret = count1;
10305 unlock_user(target_dirp, arg2, ret);
10307 g_free(dirp);
10309 #else
10311 struct linux_dirent *dirp;
10312 abi_long count = arg3;
10314 if (!(dirp = lock_user(VERIFY_WRITE, arg2, count, 0)))
10315 goto efault;
10316 ret = get_errno(sys_getdents(arg1, dirp, count));
10317 if (!is_error(ret)) {
10318 struct linux_dirent *de;
10319 int len = ret;
10320 int reclen;
10321 de = dirp;
10322 while (len > 0) {
10323 reclen = de->d_reclen;
10324 if (reclen > len)
10325 break;
10326 de->d_reclen = tswap16(reclen);
10327 tswapls(&de->d_ino);
10328 tswapls(&de->d_off);
10329 de = (struct linux_dirent *)((char *)de + reclen);
10330 len -= reclen;
10333 unlock_user(dirp, arg2, ret);
10335 #endif
10336 #else
10337 /* Implement getdents in terms of getdents64 */
10339 struct linux_dirent64 *dirp;
10340 abi_long count = arg3;
10342 dirp = lock_user(VERIFY_WRITE, arg2, count, 0);
10343 if (!dirp) {
10344 goto efault;
10346 ret = get_errno(sys_getdents64(arg1, dirp, count));
10347 if (!is_error(ret)) {
10348 /* Convert the dirent64 structs to target dirent. We do this
10349 * in-place, since we can guarantee that a target_dirent is no
10350 * larger than a dirent64; however this means we have to be
10351 * careful to read everything before writing in the new format.
10353 struct linux_dirent64 *de;
10354 struct target_dirent *tde;
10355 int len = ret;
10356 int tlen = 0;
10358 de = dirp;
10359 tde = (struct target_dirent *)dirp;
10360 while (len > 0) {
10361 int namelen, treclen;
10362 int reclen = de->d_reclen;
10363 uint64_t ino = de->d_ino;
10364 int64_t off = de->d_off;
10365 uint8_t type = de->d_type;
10367 namelen = strlen(de->d_name);
10368 treclen = offsetof(struct target_dirent, d_name)
10369 + namelen + 2;
10370 treclen = QEMU_ALIGN_UP(treclen, sizeof(abi_long));
10372 memmove(tde->d_name, de->d_name, namelen + 1);
10373 tde->d_ino = tswapal(ino);
10374 tde->d_off = tswapal(off);
10375 tde->d_reclen = tswap16(treclen);
10376 /* The target_dirent type is in what was formerly a padding
10377 * byte at the end of the structure:
10379 *(((char *)tde) + treclen - 1) = type;
10381 de = (struct linux_dirent64 *)((char *)de + reclen);
10382 tde = (struct target_dirent *)((char *)tde + treclen);
10383 len -= reclen;
10384 tlen += treclen;
10386 ret = tlen;
10388 unlock_user(dirp, arg2, ret);
10390 #endif
10391 break;
10392 #endif /* TARGET_NR_getdents */
10393 #if defined(TARGET_NR_getdents64) && defined(__NR_getdents64)
10394 case TARGET_NR_getdents64:
10396 struct linux_dirent64 *dirp;
10397 abi_long count = arg3;
10398 if (!(dirp = lock_user(VERIFY_WRITE, arg2, count, 0)))
10399 goto efault;
10400 ret = get_errno(sys_getdents64(arg1, dirp, count));
10401 if (!is_error(ret)) {
10402 struct linux_dirent64 *de;
10403 int len = ret;
10404 int reclen;
10405 de = dirp;
10406 while (len > 0) {
10407 reclen = de->d_reclen;
10408 if (reclen > len)
10409 break;
10410 de->d_reclen = tswap16(reclen);
10411 tswap64s((uint64_t *)&de->d_ino);
10412 tswap64s((uint64_t *)&de->d_off);
10413 de = (struct linux_dirent64 *)((char *)de + reclen);
10414 len -= reclen;
10417 unlock_user(dirp, arg2, ret);
10419 break;
10420 #endif /* TARGET_NR_getdents64 */
10421 #if defined(TARGET_NR__newselect)
10422 case TARGET_NR__newselect:
10423 ret = do_select(arg1, arg2, arg3, arg4, arg5);
10424 break;
10425 #endif
10426 #if defined(TARGET_NR_poll) || defined(TARGET_NR_ppoll)
10427 # ifdef TARGET_NR_poll
10428 case TARGET_NR_poll:
10429 # endif
10430 # ifdef TARGET_NR_ppoll
10431 case TARGET_NR_ppoll:
10432 # endif
10434 struct target_pollfd *target_pfd;
10435 unsigned int nfds = arg2;
10436 struct pollfd *pfd;
10437 unsigned int i;
10439 pfd = NULL;
10440 target_pfd = NULL;
10441 if (nfds) {
10442 if (nfds > (INT_MAX / sizeof(struct target_pollfd))) {
10443 ret = -TARGET_EINVAL;
10444 break;
10447 target_pfd = lock_user(VERIFY_WRITE, arg1,
10448 sizeof(struct target_pollfd) * nfds, 1);
10449 if (!target_pfd) {
10450 goto efault;
10453 pfd = alloca(sizeof(struct pollfd) * nfds);
10454 for (i = 0; i < nfds; i++) {
10455 pfd[i].fd = tswap32(target_pfd[i].fd);
10456 pfd[i].events = tswap16(target_pfd[i].events);
10460 switch (num) {
10461 # ifdef TARGET_NR_ppoll
10462 case TARGET_NR_ppoll:
10464 struct timespec _timeout_ts, *timeout_ts = &_timeout_ts;
10465 target_sigset_t *target_set;
10466 sigset_t _set, *set = &_set;
10468 if (arg3) {
10469 if (target_to_host_timespec(timeout_ts, arg3)) {
10470 unlock_user(target_pfd, arg1, 0);
10471 goto efault;
10473 } else {
10474 timeout_ts = NULL;
10477 if (arg4) {
10478 if (arg5 != sizeof(target_sigset_t)) {
10479 unlock_user(target_pfd, arg1, 0);
10480 ret = -TARGET_EINVAL;
10481 break;
10484 target_set = lock_user(VERIFY_READ, arg4, sizeof(target_sigset_t), 1);
10485 if (!target_set) {
10486 unlock_user(target_pfd, arg1, 0);
10487 goto efault;
10489 target_to_host_sigset(set, target_set);
10490 } else {
10491 set = NULL;
10494 ret = get_errno(safe_ppoll(pfd, nfds, timeout_ts,
10495 set, SIGSET_T_SIZE));
10497 if (!is_error(ret) && arg3) {
10498 host_to_target_timespec(arg3, timeout_ts);
10500 if (arg4) {
10501 unlock_user(target_set, arg4, 0);
10503 break;
10505 # endif
10506 # ifdef TARGET_NR_poll
10507 case TARGET_NR_poll:
10509 struct timespec ts, *pts;
10511 if (arg3 >= 0) {
10512 /* Convert ms to secs, ns */
10513 ts.tv_sec = arg3 / 1000;
10514 ts.tv_nsec = (arg3 % 1000) * 1000000LL;
10515 pts = &ts;
10516 } else {
10517 /* -ve poll() timeout means "infinite" */
10518 pts = NULL;
10520 ret = get_errno(safe_ppoll(pfd, nfds, pts, NULL, 0));
10521 break;
10523 # endif
10524 default:
10525 g_assert_not_reached();
10528 if (!is_error(ret)) {
10529 for(i = 0; i < nfds; i++) {
10530 target_pfd[i].revents = tswap16(pfd[i].revents);
10533 unlock_user(target_pfd, arg1, sizeof(struct target_pollfd) * nfds);
10535 break;
10536 #endif
10537 case TARGET_NR_flock:
10538 /* NOTE: the flock constant seems to be the same for every
10539 Linux platform */
10540 ret = get_errno(safe_flock(arg1, arg2));
10541 break;
10542 case TARGET_NR_readv:
10544 struct iovec *vec = lock_iovec(VERIFY_WRITE, arg2, arg3, 0);
10545 if (vec != NULL) {
10546 ret = get_errno(safe_readv(arg1, vec, arg3));
10547 unlock_iovec(vec, arg2, arg3, 1);
10548 } else {
10549 ret = -host_to_target_errno(errno);
10552 break;
10553 case TARGET_NR_writev:
10555 struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1);
10556 if (vec != NULL) {
10557 ret = get_errno(safe_writev(arg1, vec, arg3));
10558 unlock_iovec(vec, arg2, arg3, 0);
10559 } else {
10560 ret = -host_to_target_errno(errno);
10563 break;
10564 #if defined(TARGET_NR_preadv)
10565 case TARGET_NR_preadv:
10567 struct iovec *vec = lock_iovec(VERIFY_WRITE, arg2, arg3, 0);
10568 if (vec != NULL) {
10569 unsigned long low, high;
10571 target_to_host_low_high(arg4, arg5, &low, &high);
10572 ret = get_errno(safe_preadv(arg1, vec, arg3, low, high));
10573 unlock_iovec(vec, arg2, arg3, 1);
10574 } else {
10575 ret = -host_to_target_errno(errno);
10578 break;
10579 #endif
10580 #if defined(TARGET_NR_pwritev)
10581 case TARGET_NR_pwritev:
10583 struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1);
10584 if (vec != NULL) {
10585 unsigned long low, high;
10587 target_to_host_low_high(arg4, arg5, &low, &high);
10588 ret = get_errno(safe_pwritev(arg1, vec, arg3, low, high));
10589 unlock_iovec(vec, arg2, arg3, 0);
10590 } else {
10591 ret = -host_to_target_errno(errno);
10594 break;
10595 #endif
10596 case TARGET_NR_getsid:
10597 ret = get_errno(getsid(arg1));
10598 break;
10599 #if defined(TARGET_NR_fdatasync) /* Not on alpha (osf_datasync ?) */
10600 case TARGET_NR_fdatasync:
10601 ret = get_errno(fdatasync(arg1));
10602 break;
10603 #endif
10604 #ifdef TARGET_NR__sysctl
10605 case TARGET_NR__sysctl:
10606 /* We don't implement this, but ENOTDIR is always a safe
10607 return value. */
10608 ret = -TARGET_ENOTDIR;
10609 break;
10610 #endif
10611 case TARGET_NR_sched_getaffinity:
10613 unsigned int mask_size;
10614 unsigned long *mask;
10617 * sched_getaffinity needs multiples of ulong, so need to take
10618 * care of mismatches between target ulong and host ulong sizes.
10620 if (arg2 & (sizeof(abi_ulong) - 1)) {
10621 ret = -TARGET_EINVAL;
10622 break;
10624 mask_size = (arg2 + (sizeof(*mask) - 1)) & ~(sizeof(*mask) - 1);
10626 mask = alloca(mask_size);
10627 memset(mask, 0, mask_size);
10628 ret = get_errno(sys_sched_getaffinity(arg1, mask_size, mask));
10630 if (!is_error(ret)) {
10631 if (ret > arg2) {
10632 /* More data returned than the caller's buffer will fit.
10633 * This only happens if sizeof(abi_long) < sizeof(long)
10634 * and the caller passed us a buffer holding an odd number
10635 * of abi_longs. If the host kernel is actually using the
10636 * extra 4 bytes then fail EINVAL; otherwise we can just
10637 * ignore them and only copy the interesting part.
10639 int numcpus = sysconf(_SC_NPROCESSORS_CONF);
10640 if (numcpus > arg2 * 8) {
10641 ret = -TARGET_EINVAL;
10642 break;
10644 ret = arg2;
10647 if (host_to_target_cpu_mask(mask, mask_size, arg3, ret)) {
10648 goto efault;
10652 break;
10653 case TARGET_NR_sched_setaffinity:
10655 unsigned int mask_size;
10656 unsigned long *mask;
10659 * sched_setaffinity needs multiples of ulong, so need to take
10660 * care of mismatches between target ulong and host ulong sizes.
10662 if (arg2 & (sizeof(abi_ulong) - 1)) {
10663 ret = -TARGET_EINVAL;
10664 break;
10666 mask_size = (arg2 + (sizeof(*mask) - 1)) & ~(sizeof(*mask) - 1);
10667 mask = alloca(mask_size);
10669 ret = target_to_host_cpu_mask(mask, mask_size, arg3, arg2);
10670 if (ret) {
10671 break;
10674 ret = get_errno(sys_sched_setaffinity(arg1, mask_size, mask));
10676 break;
10677 case TARGET_NR_getcpu:
10679 unsigned cpu, node;
10680 ret = get_errno(sys_getcpu(arg1 ? &cpu : NULL,
10681 arg2 ? &node : NULL,
10682 NULL));
10683 if (is_error(ret)) {
10684 goto fail;
10686 if (arg1 && put_user_u32(cpu, arg1)) {
10687 goto efault;
10689 if (arg2 && put_user_u32(node, arg2)) {
10690 goto efault;
10693 break;
10694 case TARGET_NR_sched_setparam:
10696 struct sched_param *target_schp;
10697 struct sched_param schp;
10699 if (arg2 == 0) {
10700 return -TARGET_EINVAL;
10702 if (!lock_user_struct(VERIFY_READ, target_schp, arg2, 1))
10703 goto efault;
10704 schp.sched_priority = tswap32(target_schp->sched_priority);
10705 unlock_user_struct(target_schp, arg2, 0);
10706 ret = get_errno(sched_setparam(arg1, &schp));
10708 break;
10709 case TARGET_NR_sched_getparam:
10711 struct sched_param *target_schp;
10712 struct sched_param schp;
10714 if (arg2 == 0) {
10715 return -TARGET_EINVAL;
10717 ret = get_errno(sched_getparam(arg1, &schp));
10718 if (!is_error(ret)) {
10719 if (!lock_user_struct(VERIFY_WRITE, target_schp, arg2, 0))
10720 goto efault;
10721 target_schp->sched_priority = tswap32(schp.sched_priority);
10722 unlock_user_struct(target_schp, arg2, 1);
10725 break;
10726 case TARGET_NR_sched_setscheduler:
10728 struct sched_param *target_schp;
10729 struct sched_param schp;
10730 if (arg3 == 0) {
10731 return -TARGET_EINVAL;
10733 if (!lock_user_struct(VERIFY_READ, target_schp, arg3, 1))
10734 goto efault;
10735 schp.sched_priority = tswap32(target_schp->sched_priority);
10736 unlock_user_struct(target_schp, arg3, 0);
10737 ret = get_errno(sched_setscheduler(arg1, arg2, &schp));
10739 break;
10740 case TARGET_NR_sched_getscheduler:
10741 ret = get_errno(sched_getscheduler(arg1));
10742 break;
10743 case TARGET_NR_sched_yield:
10744 ret = get_errno(sched_yield());
10745 break;
10746 case TARGET_NR_sched_get_priority_max:
10747 ret = get_errno(sched_get_priority_max(arg1));
10748 break;
10749 case TARGET_NR_sched_get_priority_min:
10750 ret = get_errno(sched_get_priority_min(arg1));
10751 break;
10752 case TARGET_NR_sched_rr_get_interval:
10754 struct timespec ts;
10755 ret = get_errno(sched_rr_get_interval(arg1, &ts));
10756 if (!is_error(ret)) {
10757 ret = host_to_target_timespec(arg2, &ts);
10760 break;
10761 case TARGET_NR_nanosleep:
10763 struct timespec req, rem;
10764 target_to_host_timespec(&req, arg1);
10765 ret = get_errno(safe_nanosleep(&req, &rem));
10766 if (is_error(ret) && arg2) {
10767 host_to_target_timespec(arg2, &rem);
10770 break;
10771 #ifdef TARGET_NR_query_module
10772 case TARGET_NR_query_module:
10773 goto unimplemented;
10774 #endif
10775 #ifdef TARGET_NR_nfsservctl
10776 case TARGET_NR_nfsservctl:
10777 goto unimplemented;
10778 #endif
10779 case TARGET_NR_prctl:
10780 switch (arg1) {
10781 case PR_GET_PDEATHSIG:
10783 int deathsig;
10784 ret = get_errno(prctl(arg1, &deathsig, arg3, arg4, arg5));
10785 if (!is_error(ret) && arg2
10786 && put_user_ual(deathsig, arg2)) {
10787 goto efault;
10789 break;
10791 #ifdef PR_GET_NAME
10792 case PR_GET_NAME:
10794 void *name = lock_user(VERIFY_WRITE, arg2, 16, 1);
10795 if (!name) {
10796 goto efault;
10798 ret = get_errno(prctl(arg1, (unsigned long)name,
10799 arg3, arg4, arg5));
10800 unlock_user(name, arg2, 16);
10801 break;
10803 case PR_SET_NAME:
10805 void *name = lock_user(VERIFY_READ, arg2, 16, 1);
10806 if (!name) {
10807 goto efault;
10809 ret = get_errno(prctl(arg1, (unsigned long)name,
10810 arg3, arg4, arg5));
10811 unlock_user(name, arg2, 0);
10812 break;
10814 #endif
10815 #ifdef TARGET_AARCH64
10816 case TARGET_PR_SVE_SET_VL:
10817 /* We cannot support either PR_SVE_SET_VL_ONEXEC
10818 or PR_SVE_VL_INHERIT. Therefore, anything above
10819 ARM_MAX_VQ results in EINVAL. */
10820 ret = -TARGET_EINVAL;
10821 if (arm_feature(cpu_env, ARM_FEATURE_SVE)
10822 && arg2 >= 0 && arg2 <= ARM_MAX_VQ * 16 && !(arg2 & 15)) {
10823 CPUARMState *env = cpu_env;
10824 int old_vq = (env->vfp.zcr_el[1] & 0xf) + 1;
10825 int vq = MAX(arg2 / 16, 1);
10827 if (vq < old_vq) {
10828 aarch64_sve_narrow_vq(env, vq);
10830 env->vfp.zcr_el[1] = vq - 1;
10831 ret = vq * 16;
10833 break;
10834 case TARGET_PR_SVE_GET_VL:
10835 ret = -TARGET_EINVAL;
10836 if (arm_feature(cpu_env, ARM_FEATURE_SVE)) {
10837 CPUARMState *env = cpu_env;
10838 ret = ((env->vfp.zcr_el[1] & 0xf) + 1) * 16;
10840 break;
10841 #endif /* AARCH64 */
10842 case PR_GET_SECCOMP:
10843 case PR_SET_SECCOMP:
10844 /* Disable seccomp to prevent the target disabling syscalls we
10845 * need. */
10846 ret = -TARGET_EINVAL;
10847 break;
10848 default:
10849 /* Most prctl options have no pointer arguments */
10850 ret = get_errno(prctl(arg1, arg2, arg3, arg4, arg5));
10851 break;
10853 break;
10854 #ifdef TARGET_NR_arch_prctl
10855 case TARGET_NR_arch_prctl:
10856 #if defined(TARGET_I386) && !defined(TARGET_ABI32)
10857 ret = do_arch_prctl(cpu_env, arg1, arg2);
10858 break;
10859 #else
10860 goto unimplemented;
10861 #endif
10862 #endif
10863 #ifdef TARGET_NR_pread64
10864 case TARGET_NR_pread64:
10865 if (regpairs_aligned(cpu_env, num)) {
10866 arg4 = arg5;
10867 arg5 = arg6;
10869 if (!(p = lock_user(VERIFY_WRITE, arg2, arg3, 0)))
10870 goto efault;
10871 ret = get_errno(pread64(arg1, p, arg3, target_offset64(arg4, arg5)));
10872 unlock_user(p, arg2, ret);
10873 break;
10874 case TARGET_NR_pwrite64:
10875 if (regpairs_aligned(cpu_env, num)) {
10876 arg4 = arg5;
10877 arg5 = arg6;
10879 if (!(p = lock_user(VERIFY_READ, arg2, arg3, 1)))
10880 goto efault;
10881 ret = get_errno(pwrite64(arg1, p, arg3, target_offset64(arg4, arg5)));
10882 unlock_user(p, arg2, 0);
10883 break;
10884 #endif
10885 case TARGET_NR_getcwd:
10886 if (!(p = lock_user(VERIFY_WRITE, arg1, arg2, 0)))
10887 goto efault;
10888 ret = get_errno(sys_getcwd1(p, arg2));
10889 unlock_user(p, arg1, ret);
10890 break;
10891 case TARGET_NR_capget:
10892 case TARGET_NR_capset:
10894 struct target_user_cap_header *target_header;
10895 struct target_user_cap_data *target_data = NULL;
10896 struct __user_cap_header_struct header;
10897 struct __user_cap_data_struct data[2];
10898 struct __user_cap_data_struct *dataptr = NULL;
10899 int i, target_datalen;
10900 int data_items = 1;
10902 if (!lock_user_struct(VERIFY_WRITE, target_header, arg1, 1)) {
10903 goto efault;
10905 header.version = tswap32(target_header->version);
10906 header.pid = tswap32(target_header->pid);
10908 if (header.version != _LINUX_CAPABILITY_VERSION) {
10909 /* Version 2 and up takes pointer to two user_data structs */
10910 data_items = 2;
10913 target_datalen = sizeof(*target_data) * data_items;
10915 if (arg2) {
10916 if (num == TARGET_NR_capget) {
10917 target_data = lock_user(VERIFY_WRITE, arg2, target_datalen, 0);
10918 } else {
10919 target_data = lock_user(VERIFY_READ, arg2, target_datalen, 1);
10921 if (!target_data) {
10922 unlock_user_struct(target_header, arg1, 0);
10923 goto efault;
10926 if (num == TARGET_NR_capset) {
10927 for (i = 0; i < data_items; i++) {
10928 data[i].effective = tswap32(target_data[i].effective);
10929 data[i].permitted = tswap32(target_data[i].permitted);
10930 data[i].inheritable = tswap32(target_data[i].inheritable);
10934 dataptr = data;
10937 if (num == TARGET_NR_capget) {
10938 ret = get_errno(capget(&header, dataptr));
10939 } else {
10940 ret = get_errno(capset(&header, dataptr));
10943 /* The kernel always updates version for both capget and capset */
10944 target_header->version = tswap32(header.version);
10945 unlock_user_struct(target_header, arg1, 1);
10947 if (arg2) {
10948 if (num == TARGET_NR_capget) {
10949 for (i = 0; i < data_items; i++) {
10950 target_data[i].effective = tswap32(data[i].effective);
10951 target_data[i].permitted = tswap32(data[i].permitted);
10952 target_data[i].inheritable = tswap32(data[i].inheritable);
10954 unlock_user(target_data, arg2, target_datalen);
10955 } else {
10956 unlock_user(target_data, arg2, 0);
10959 break;
10961 case TARGET_NR_sigaltstack:
10962 ret = do_sigaltstack(arg1, arg2, get_sp_from_cpustate((CPUArchState *)cpu_env));
10963 break;
10965 #ifdef CONFIG_SENDFILE
10966 case TARGET_NR_sendfile:
10968 off_t *offp = NULL;
10969 off_t off;
10970 if (arg3) {
10971 ret = get_user_sal(off, arg3);
10972 if (is_error(ret)) {
10973 break;
10975 offp = &off;
10977 ret = get_errno(sendfile(arg1, arg2, offp, arg4));
10978 if (!is_error(ret) && arg3) {
10979 abi_long ret2 = put_user_sal(off, arg3);
10980 if (is_error(ret2)) {
10981 ret = ret2;
10984 break;
10986 #ifdef TARGET_NR_sendfile64
10987 case TARGET_NR_sendfile64:
10989 off_t *offp = NULL;
10990 off_t off;
10991 if (arg3) {
10992 ret = get_user_s64(off, arg3);
10993 if (is_error(ret)) {
10994 break;
10996 offp = &off;
10998 ret = get_errno(sendfile(arg1, arg2, offp, arg4));
10999 if (!is_error(ret) && arg3) {
11000 abi_long ret2 = put_user_s64(off, arg3);
11001 if (is_error(ret2)) {
11002 ret = ret2;
11005 break;
11007 #endif
11008 #else
11009 case TARGET_NR_sendfile:
11010 #ifdef TARGET_NR_sendfile64
11011 case TARGET_NR_sendfile64:
11012 #endif
11013 goto unimplemented;
11014 #endif
11016 #ifdef TARGET_NR_getpmsg
11017 case TARGET_NR_getpmsg:
11018 goto unimplemented;
11019 #endif
11020 #ifdef TARGET_NR_putpmsg
11021 case TARGET_NR_putpmsg:
11022 goto unimplemented;
11023 #endif
11024 #ifdef TARGET_NR_vfork
11025 case TARGET_NR_vfork:
11026 ret = get_errno(do_fork(cpu_env,
11027 CLONE_VFORK | CLONE_VM | TARGET_SIGCHLD,
11028 0, 0, 0, 0));
11029 break;
11030 #endif
11031 #ifdef TARGET_NR_ugetrlimit
11032 case TARGET_NR_ugetrlimit:
11034 struct rlimit rlim;
11035 int resource = target_to_host_resource(arg1);
11036 ret = get_errno(getrlimit(resource, &rlim));
11037 if (!is_error(ret)) {
11038 struct target_rlimit *target_rlim;
11039 if (!lock_user_struct(VERIFY_WRITE, target_rlim, arg2, 0))
11040 goto efault;
11041 target_rlim->rlim_cur = host_to_target_rlim(rlim.rlim_cur);
11042 target_rlim->rlim_max = host_to_target_rlim(rlim.rlim_max);
11043 unlock_user_struct(target_rlim, arg2, 1);
11045 break;
11047 #endif
11048 #ifdef TARGET_NR_truncate64
11049 case TARGET_NR_truncate64:
11050 if (!(p = lock_user_string(arg1)))
11051 goto efault;
11052 ret = target_truncate64(cpu_env, p, arg2, arg3, arg4);
11053 unlock_user(p, arg1, 0);
11054 break;
11055 #endif
11056 #ifdef TARGET_NR_ftruncate64
11057 case TARGET_NR_ftruncate64:
11058 ret = target_ftruncate64(cpu_env, arg1, arg2, arg3, arg4);
11059 break;
11060 #endif
11061 #ifdef TARGET_NR_stat64
11062 case TARGET_NR_stat64:
11063 if (!(p = lock_user_string(arg1)))
11064 goto efault;
11065 ret = get_errno(stat(path(p), &st));
11066 unlock_user(p, arg1, 0);
11067 if (!is_error(ret))
11068 ret = host_to_target_stat64(cpu_env, arg2, &st);
11069 break;
11070 #endif
11071 #ifdef TARGET_NR_lstat64
11072 case TARGET_NR_lstat64:
11073 if (!(p = lock_user_string(arg1)))
11074 goto efault;
11075 ret = get_errno(lstat(path(p), &st));
11076 unlock_user(p, arg1, 0);
11077 if (!is_error(ret))
11078 ret = host_to_target_stat64(cpu_env, arg2, &st);
11079 break;
11080 #endif
11081 #ifdef TARGET_NR_fstat64
11082 case TARGET_NR_fstat64:
11083 ret = get_errno(fstat(arg1, &st));
11084 if (!is_error(ret))
11085 ret = host_to_target_stat64(cpu_env, arg2, &st);
11086 break;
11087 #endif
11088 #if (defined(TARGET_NR_fstatat64) || defined(TARGET_NR_newfstatat))
11089 #ifdef TARGET_NR_fstatat64
11090 case TARGET_NR_fstatat64:
11091 #endif
11092 #ifdef TARGET_NR_newfstatat
11093 case TARGET_NR_newfstatat:
11094 #endif
11095 if (!(p = lock_user_string(arg2)))
11096 goto efault;
11097 ret = get_errno(fstatat(arg1, path(p), &st, arg4));
11098 if (!is_error(ret))
11099 ret = host_to_target_stat64(cpu_env, arg3, &st);
11100 break;
11101 #endif
11102 #ifdef TARGET_NR_lchown
11103 case TARGET_NR_lchown:
11104 if (!(p = lock_user_string(arg1)))
11105 goto efault;
11106 ret = get_errno(lchown(p, low2highuid(arg2), low2highgid(arg3)));
11107 unlock_user(p, arg1, 0);
11108 break;
11109 #endif
11110 #ifdef TARGET_NR_getuid
11111 case TARGET_NR_getuid:
11112 ret = get_errno(high2lowuid(getuid()));
11113 break;
11114 #endif
11115 #ifdef TARGET_NR_getgid
11116 case TARGET_NR_getgid:
11117 ret = get_errno(high2lowgid(getgid()));
11118 break;
11119 #endif
11120 #ifdef TARGET_NR_geteuid
11121 case TARGET_NR_geteuid:
11122 ret = get_errno(high2lowuid(geteuid()));
11123 break;
11124 #endif
11125 #ifdef TARGET_NR_getegid
11126 case TARGET_NR_getegid:
11127 ret = get_errno(high2lowgid(getegid()));
11128 break;
11129 #endif
11130 case TARGET_NR_setreuid:
11131 ret = get_errno(setreuid(low2highuid(arg1), low2highuid(arg2)));
11132 break;
11133 case TARGET_NR_setregid:
11134 ret = get_errno(setregid(low2highgid(arg1), low2highgid(arg2)));
11135 break;
11136 case TARGET_NR_getgroups:
11138 int gidsetsize = arg1;
11139 target_id *target_grouplist;
11140 gid_t *grouplist;
11141 int i;
11143 grouplist = alloca(gidsetsize * sizeof(gid_t));
11144 ret = get_errno(getgroups(gidsetsize, grouplist));
11145 if (gidsetsize == 0)
11146 break;
11147 if (!is_error(ret)) {
11148 target_grouplist = lock_user(VERIFY_WRITE, arg2, gidsetsize * sizeof(target_id), 0);
11149 if (!target_grouplist)
11150 goto efault;
11151 for(i = 0;i < ret; i++)
11152 target_grouplist[i] = tswapid(high2lowgid(grouplist[i]));
11153 unlock_user(target_grouplist, arg2, gidsetsize * sizeof(target_id));
11156 break;
11157 case TARGET_NR_setgroups:
11159 int gidsetsize = arg1;
11160 target_id *target_grouplist;
11161 gid_t *grouplist = NULL;
11162 int i;
11163 if (gidsetsize) {
11164 grouplist = alloca(gidsetsize * sizeof(gid_t));
11165 target_grouplist = lock_user(VERIFY_READ, arg2, gidsetsize * sizeof(target_id), 1);
11166 if (!target_grouplist) {
11167 ret = -TARGET_EFAULT;
11168 goto fail;
11170 for (i = 0; i < gidsetsize; i++) {
11171 grouplist[i] = low2highgid(tswapid(target_grouplist[i]));
11173 unlock_user(target_grouplist, arg2, 0);
11175 ret = get_errno(setgroups(gidsetsize, grouplist));
11177 break;
11178 case TARGET_NR_fchown:
11179 ret = get_errno(fchown(arg1, low2highuid(arg2), low2highgid(arg3)));
11180 break;
11181 #if defined(TARGET_NR_fchownat)
11182 case TARGET_NR_fchownat:
11183 if (!(p = lock_user_string(arg2)))
11184 goto efault;
11185 ret = get_errno(fchownat(arg1, p, low2highuid(arg3),
11186 low2highgid(arg4), arg5));
11187 unlock_user(p, arg2, 0);
11188 break;
11189 #endif
11190 #ifdef TARGET_NR_setresuid
11191 case TARGET_NR_setresuid:
11192 ret = get_errno(sys_setresuid(low2highuid(arg1),
11193 low2highuid(arg2),
11194 low2highuid(arg3)));
11195 break;
11196 #endif
11197 #ifdef TARGET_NR_getresuid
11198 case TARGET_NR_getresuid:
11200 uid_t ruid, euid, suid;
11201 ret = get_errno(getresuid(&ruid, &euid, &suid));
11202 if (!is_error(ret)) {
11203 if (put_user_id(high2lowuid(ruid), arg1)
11204 || put_user_id(high2lowuid(euid), arg2)
11205 || put_user_id(high2lowuid(suid), arg3))
11206 goto efault;
11209 break;
11210 #endif
11211 #ifdef TARGET_NR_getresgid
11212 case TARGET_NR_setresgid:
11213 ret = get_errno(sys_setresgid(low2highgid(arg1),
11214 low2highgid(arg2),
11215 low2highgid(arg3)));
11216 break;
11217 #endif
11218 #ifdef TARGET_NR_getresgid
11219 case TARGET_NR_getresgid:
11221 gid_t rgid, egid, sgid;
11222 ret = get_errno(getresgid(&rgid, &egid, &sgid));
11223 if (!is_error(ret)) {
11224 if (put_user_id(high2lowgid(rgid), arg1)
11225 || put_user_id(high2lowgid(egid), arg2)
11226 || put_user_id(high2lowgid(sgid), arg3))
11227 goto efault;
11230 break;
11231 #endif
11232 #ifdef TARGET_NR_chown
11233 case TARGET_NR_chown:
11234 if (!(p = lock_user_string(arg1)))
11235 goto efault;
11236 ret = get_errno(chown(p, low2highuid(arg2), low2highgid(arg3)));
11237 unlock_user(p, arg1, 0);
11238 break;
11239 #endif
11240 case TARGET_NR_setuid:
11241 ret = get_errno(sys_setuid(low2highuid(arg1)));
11242 break;
11243 case TARGET_NR_setgid:
11244 ret = get_errno(sys_setgid(low2highgid(arg1)));
11245 break;
11246 case TARGET_NR_setfsuid:
11247 ret = get_errno(setfsuid(arg1));
11248 break;
11249 case TARGET_NR_setfsgid:
11250 ret = get_errno(setfsgid(arg1));
11251 break;
11253 #ifdef TARGET_NR_lchown32
11254 case TARGET_NR_lchown32:
11255 if (!(p = lock_user_string(arg1)))
11256 goto efault;
11257 ret = get_errno(lchown(p, arg2, arg3));
11258 unlock_user(p, arg1, 0);
11259 break;
11260 #endif
11261 #ifdef TARGET_NR_getuid32
11262 case TARGET_NR_getuid32:
11263 ret = get_errno(getuid());
11264 break;
11265 #endif
11267 #if defined(TARGET_NR_getxuid) && defined(TARGET_ALPHA)
11268 /* Alpha specific */
11269 case TARGET_NR_getxuid:
11271 uid_t euid;
11272 euid=geteuid();
11273 ((CPUAlphaState *)cpu_env)->ir[IR_A4]=euid;
11275 ret = get_errno(getuid());
11276 break;
11277 #endif
11278 #if defined(TARGET_NR_getxgid) && defined(TARGET_ALPHA)
11279 /* Alpha specific */
11280 case TARGET_NR_getxgid:
11282 uid_t egid;
11283 egid=getegid();
11284 ((CPUAlphaState *)cpu_env)->ir[IR_A4]=egid;
11286 ret = get_errno(getgid());
11287 break;
11288 #endif
11289 #if defined(TARGET_NR_osf_getsysinfo) && defined(TARGET_ALPHA)
11290 /* Alpha specific */
11291 case TARGET_NR_osf_getsysinfo:
11292 ret = -TARGET_EOPNOTSUPP;
11293 switch (arg1) {
11294 case TARGET_GSI_IEEE_FP_CONTROL:
11296 uint64_t swcr, fpcr = cpu_alpha_load_fpcr (cpu_env);
11298 /* Copied from linux ieee_fpcr_to_swcr. */
11299 swcr = (fpcr >> 35) & SWCR_STATUS_MASK;
11300 swcr |= (fpcr >> 36) & SWCR_MAP_DMZ;
11301 swcr |= (~fpcr >> 48) & (SWCR_TRAP_ENABLE_INV
11302 | SWCR_TRAP_ENABLE_DZE
11303 | SWCR_TRAP_ENABLE_OVF);
11304 swcr |= (~fpcr >> 57) & (SWCR_TRAP_ENABLE_UNF
11305 | SWCR_TRAP_ENABLE_INE);
11306 swcr |= (fpcr >> 47) & SWCR_MAP_UMZ;
11307 swcr |= (~fpcr >> 41) & SWCR_TRAP_ENABLE_DNO;
11309 if (put_user_u64 (swcr, arg2))
11310 goto efault;
11311 ret = 0;
11313 break;
11315 /* case GSI_IEEE_STATE_AT_SIGNAL:
11316 -- Not implemented in linux kernel.
11317 case GSI_UACPROC:
11318 -- Retrieves current unaligned access state; not much used.
11319 case GSI_PROC_TYPE:
11320 -- Retrieves implver information; surely not used.
11321 case GSI_GET_HWRPB:
11322 -- Grabs a copy of the HWRPB; surely not used.
11325 break;
11326 #endif
11327 #if defined(TARGET_NR_osf_setsysinfo) && defined(TARGET_ALPHA)
11328 /* Alpha specific */
11329 case TARGET_NR_osf_setsysinfo:
11330 ret = -TARGET_EOPNOTSUPP;
11331 switch (arg1) {
11332 case TARGET_SSI_IEEE_FP_CONTROL:
11334 uint64_t swcr, fpcr, orig_fpcr;
11336 if (get_user_u64 (swcr, arg2)) {
11337 goto efault;
11339 orig_fpcr = cpu_alpha_load_fpcr(cpu_env);
11340 fpcr = orig_fpcr & FPCR_DYN_MASK;
11342 /* Copied from linux ieee_swcr_to_fpcr. */
11343 fpcr |= (swcr & SWCR_STATUS_MASK) << 35;
11344 fpcr |= (swcr & SWCR_MAP_DMZ) << 36;
11345 fpcr |= (~swcr & (SWCR_TRAP_ENABLE_INV
11346 | SWCR_TRAP_ENABLE_DZE
11347 | SWCR_TRAP_ENABLE_OVF)) << 48;
11348 fpcr |= (~swcr & (SWCR_TRAP_ENABLE_UNF
11349 | SWCR_TRAP_ENABLE_INE)) << 57;
11350 fpcr |= (swcr & SWCR_MAP_UMZ ? FPCR_UNDZ | FPCR_UNFD : 0);
11351 fpcr |= (~swcr & SWCR_TRAP_ENABLE_DNO) << 41;
11353 cpu_alpha_store_fpcr(cpu_env, fpcr);
11354 ret = 0;
11356 break;
11358 case TARGET_SSI_IEEE_RAISE_EXCEPTION:
11360 uint64_t exc, fpcr, orig_fpcr;
11361 int si_code;
11363 if (get_user_u64(exc, arg2)) {
11364 goto efault;
11367 orig_fpcr = cpu_alpha_load_fpcr(cpu_env);
11369 /* We only add to the exception status here. */
11370 fpcr = orig_fpcr | ((exc & SWCR_STATUS_MASK) << 35);
11372 cpu_alpha_store_fpcr(cpu_env, fpcr);
11373 ret = 0;
11375 /* Old exceptions are not signaled. */
11376 fpcr &= ~(orig_fpcr & FPCR_STATUS_MASK);
11378 /* If any exceptions set by this call,
11379 and are unmasked, send a signal. */
11380 si_code = 0;
11381 if ((fpcr & (FPCR_INE | FPCR_INED)) == FPCR_INE) {
11382 si_code = TARGET_FPE_FLTRES;
11384 if ((fpcr & (FPCR_UNF | FPCR_UNFD)) == FPCR_UNF) {
11385 si_code = TARGET_FPE_FLTUND;
11387 if ((fpcr & (FPCR_OVF | FPCR_OVFD)) == FPCR_OVF) {
11388 si_code = TARGET_FPE_FLTOVF;
11390 if ((fpcr & (FPCR_DZE | FPCR_DZED)) == FPCR_DZE) {
11391 si_code = TARGET_FPE_FLTDIV;
11393 if ((fpcr & (FPCR_INV | FPCR_INVD)) == FPCR_INV) {
11394 si_code = TARGET_FPE_FLTINV;
11396 if (si_code != 0) {
11397 target_siginfo_t info;
11398 info.si_signo = SIGFPE;
11399 info.si_errno = 0;
11400 info.si_code = si_code;
11401 info._sifields._sigfault._addr
11402 = ((CPUArchState *)cpu_env)->pc;
11403 queue_signal((CPUArchState *)cpu_env, info.si_signo,
11404 QEMU_SI_FAULT, &info);
11407 break;
11409 /* case SSI_NVPAIRS:
11410 -- Used with SSIN_UACPROC to enable unaligned accesses.
11411 case SSI_IEEE_STATE_AT_SIGNAL:
11412 case SSI_IEEE_IGNORE_STATE_AT_SIGNAL:
11413 -- Not implemented in linux kernel
11416 break;
11417 #endif
11418 #ifdef TARGET_NR_osf_sigprocmask
11419 /* Alpha specific. */
11420 case TARGET_NR_osf_sigprocmask:
11422 abi_ulong mask;
11423 int how;
11424 sigset_t set, oldset;
11426 switch(arg1) {
11427 case TARGET_SIG_BLOCK:
11428 how = SIG_BLOCK;
11429 break;
11430 case TARGET_SIG_UNBLOCK:
11431 how = SIG_UNBLOCK;
11432 break;
11433 case TARGET_SIG_SETMASK:
11434 how = SIG_SETMASK;
11435 break;
11436 default:
11437 ret = -TARGET_EINVAL;
11438 goto fail;
11440 mask = arg2;
11441 target_to_host_old_sigset(&set, &mask);
11442 ret = do_sigprocmask(how, &set, &oldset);
11443 if (!ret) {
11444 host_to_target_old_sigset(&mask, &oldset);
11445 ret = mask;
11448 break;
11449 #endif
11451 #ifdef TARGET_NR_getgid32
11452 case TARGET_NR_getgid32:
11453 ret = get_errno(getgid());
11454 break;
11455 #endif
11456 #ifdef TARGET_NR_geteuid32
11457 case TARGET_NR_geteuid32:
11458 ret = get_errno(geteuid());
11459 break;
11460 #endif
11461 #ifdef TARGET_NR_getegid32
11462 case TARGET_NR_getegid32:
11463 ret = get_errno(getegid());
11464 break;
11465 #endif
11466 #ifdef TARGET_NR_setreuid32
11467 case TARGET_NR_setreuid32:
11468 ret = get_errno(setreuid(arg1, arg2));
11469 break;
11470 #endif
11471 #ifdef TARGET_NR_setregid32
11472 case TARGET_NR_setregid32:
11473 ret = get_errno(setregid(arg1, arg2));
11474 break;
11475 #endif
11476 #ifdef TARGET_NR_getgroups32
11477 case TARGET_NR_getgroups32:
11479 int gidsetsize = arg1;
11480 uint32_t *target_grouplist;
11481 gid_t *grouplist;
11482 int i;
11484 grouplist = alloca(gidsetsize * sizeof(gid_t));
11485 ret = get_errno(getgroups(gidsetsize, grouplist));
11486 if (gidsetsize == 0)
11487 break;
11488 if (!is_error(ret)) {
11489 target_grouplist = lock_user(VERIFY_WRITE, arg2, gidsetsize * 4, 0);
11490 if (!target_grouplist) {
11491 ret = -TARGET_EFAULT;
11492 goto fail;
11494 for(i = 0;i < ret; i++)
11495 target_grouplist[i] = tswap32(grouplist[i]);
11496 unlock_user(target_grouplist, arg2, gidsetsize * 4);
11499 break;
11500 #endif
11501 #ifdef TARGET_NR_setgroups32
11502 case TARGET_NR_setgroups32:
11504 int gidsetsize = arg1;
11505 uint32_t *target_grouplist;
11506 gid_t *grouplist;
11507 int i;
11509 grouplist = alloca(gidsetsize * sizeof(gid_t));
11510 target_grouplist = lock_user(VERIFY_READ, arg2, gidsetsize * 4, 1);
11511 if (!target_grouplist) {
11512 ret = -TARGET_EFAULT;
11513 goto fail;
11515 for(i = 0;i < gidsetsize; i++)
11516 grouplist[i] = tswap32(target_grouplist[i]);
11517 unlock_user(target_grouplist, arg2, 0);
11518 ret = get_errno(setgroups(gidsetsize, grouplist));
11520 break;
11521 #endif
11522 #ifdef TARGET_NR_fchown32
11523 case TARGET_NR_fchown32:
11524 ret = get_errno(fchown(arg1, arg2, arg3));
11525 break;
11526 #endif
11527 #ifdef TARGET_NR_setresuid32
11528 case TARGET_NR_setresuid32:
11529 ret = get_errno(sys_setresuid(arg1, arg2, arg3));
11530 break;
11531 #endif
11532 #ifdef TARGET_NR_getresuid32
11533 case TARGET_NR_getresuid32:
11535 uid_t ruid, euid, suid;
11536 ret = get_errno(getresuid(&ruid, &euid, &suid));
11537 if (!is_error(ret)) {
11538 if (put_user_u32(ruid, arg1)
11539 || put_user_u32(euid, arg2)
11540 || put_user_u32(suid, arg3))
11541 goto efault;
11544 break;
11545 #endif
11546 #ifdef TARGET_NR_setresgid32
11547 case TARGET_NR_setresgid32:
11548 ret = get_errno(sys_setresgid(arg1, arg2, arg3));
11549 break;
11550 #endif
11551 #ifdef TARGET_NR_getresgid32
11552 case TARGET_NR_getresgid32:
11554 gid_t rgid, egid, sgid;
11555 ret = get_errno(getresgid(&rgid, &egid, &sgid));
11556 if (!is_error(ret)) {
11557 if (put_user_u32(rgid, arg1)
11558 || put_user_u32(egid, arg2)
11559 || put_user_u32(sgid, arg3))
11560 goto efault;
11563 break;
11564 #endif
11565 #ifdef TARGET_NR_chown32
11566 case TARGET_NR_chown32:
11567 if (!(p = lock_user_string(arg1)))
11568 goto efault;
11569 ret = get_errno(chown(p, arg2, arg3));
11570 unlock_user(p, arg1, 0);
11571 break;
11572 #endif
11573 #ifdef TARGET_NR_setuid32
11574 case TARGET_NR_setuid32:
11575 ret = get_errno(sys_setuid(arg1));
11576 break;
11577 #endif
11578 #ifdef TARGET_NR_setgid32
11579 case TARGET_NR_setgid32:
11580 ret = get_errno(sys_setgid(arg1));
11581 break;
11582 #endif
11583 #ifdef TARGET_NR_setfsuid32
11584 case TARGET_NR_setfsuid32:
11585 ret = get_errno(setfsuid(arg1));
11586 break;
11587 #endif
11588 #ifdef TARGET_NR_setfsgid32
11589 case TARGET_NR_setfsgid32:
11590 ret = get_errno(setfsgid(arg1));
11591 break;
11592 #endif
11594 case TARGET_NR_pivot_root:
11595 goto unimplemented;
11596 #ifdef TARGET_NR_mincore
11597 case TARGET_NR_mincore:
11599 void *a;
11600 ret = -TARGET_ENOMEM;
11601 a = lock_user(VERIFY_READ, arg1, arg2, 0);
11602 if (!a) {
11603 goto fail;
11605 ret = -TARGET_EFAULT;
11606 p = lock_user_string(arg3);
11607 if (!p) {
11608 goto mincore_fail;
11610 ret = get_errno(mincore(a, arg2, p));
11611 unlock_user(p, arg3, ret);
11612 mincore_fail:
11613 unlock_user(a, arg1, 0);
11615 break;
11616 #endif
11617 #ifdef TARGET_NR_arm_fadvise64_64
11618 case TARGET_NR_arm_fadvise64_64:
11619 /* arm_fadvise64_64 looks like fadvise64_64 but
11620 * with different argument order: fd, advice, offset, len
11621 * rather than the usual fd, offset, len, advice.
11622 * Note that offset and len are both 64-bit so appear as
11623 * pairs of 32-bit registers.
11625 ret = posix_fadvise(arg1, target_offset64(arg3, arg4),
11626 target_offset64(arg5, arg6), arg2);
11627 ret = -host_to_target_errno(ret);
11628 break;
11629 #endif
11631 #if TARGET_ABI_BITS == 32
11633 #ifdef TARGET_NR_fadvise64_64
11634 case TARGET_NR_fadvise64_64:
11635 #if defined(TARGET_PPC) || defined(TARGET_XTENSA)
11636 /* 6 args: fd, advice, offset (high, low), len (high, low) */
11637 ret = arg2;
11638 arg2 = arg3;
11639 arg3 = arg4;
11640 arg4 = arg5;
11641 arg5 = arg6;
11642 arg6 = ret;
11643 #else
11644 /* 6 args: fd, offset (high, low), len (high, low), advice */
11645 if (regpairs_aligned(cpu_env, num)) {
11646 /* offset is in (3,4), len in (5,6) and advice in 7 */
11647 arg2 = arg3;
11648 arg3 = arg4;
11649 arg4 = arg5;
11650 arg5 = arg6;
11651 arg6 = arg7;
11653 #endif
11654 ret = -host_to_target_errno(posix_fadvise(arg1,
11655 target_offset64(arg2, arg3),
11656 target_offset64(arg4, arg5),
11657 arg6));
11658 break;
11659 #endif
11661 #ifdef TARGET_NR_fadvise64
11662 case TARGET_NR_fadvise64:
11663 /* 5 args: fd, offset (high, low), len, advice */
11664 if (regpairs_aligned(cpu_env, num)) {
11665 /* offset is in (3,4), len in 5 and advice in 6 */
11666 arg2 = arg3;
11667 arg3 = arg4;
11668 arg4 = arg5;
11669 arg5 = arg6;
11671 ret = -host_to_target_errno(posix_fadvise(arg1,
11672 target_offset64(arg2, arg3),
11673 arg4, arg5));
11674 break;
11675 #endif
11677 #else /* not a 32-bit ABI */
11678 #if defined(TARGET_NR_fadvise64_64) || defined(TARGET_NR_fadvise64)
11679 #ifdef TARGET_NR_fadvise64_64
11680 case TARGET_NR_fadvise64_64:
11681 #endif
11682 #ifdef TARGET_NR_fadvise64
11683 case TARGET_NR_fadvise64:
11684 #endif
11685 #ifdef TARGET_S390X
11686 switch (arg4) {
11687 case 4: arg4 = POSIX_FADV_NOREUSE + 1; break; /* make sure it's an invalid value */
11688 case 5: arg4 = POSIX_FADV_NOREUSE + 2; break; /* ditto */
11689 case 6: arg4 = POSIX_FADV_DONTNEED; break;
11690 case 7: arg4 = POSIX_FADV_NOREUSE; break;
11691 default: break;
11693 #endif
11694 ret = -host_to_target_errno(posix_fadvise(arg1, arg2, arg3, arg4));
11695 break;
11696 #endif
11697 #endif /* end of 64-bit ABI fadvise handling */
11699 #ifdef TARGET_NR_madvise
11700 case TARGET_NR_madvise:
11701 /* A straight passthrough may not be safe because qemu sometimes
11702 turns private file-backed mappings into anonymous mappings.
11703 This will break MADV_DONTNEED.
11704 This is a hint, so ignoring and returning success is ok. */
11705 ret = get_errno(0);
11706 break;
11707 #endif
11708 #if TARGET_ABI_BITS == 32
11709 case TARGET_NR_fcntl64:
11711 int cmd;
11712 struct flock64 fl;
11713 from_flock64_fn *copyfrom = copy_from_user_flock64;
11714 to_flock64_fn *copyto = copy_to_user_flock64;
11716 #ifdef TARGET_ARM
11717 if (!((CPUARMState *)cpu_env)->eabi) {
11718 copyfrom = copy_from_user_oabi_flock64;
11719 copyto = copy_to_user_oabi_flock64;
11721 #endif
11723 cmd = target_to_host_fcntl_cmd(arg2);
11724 if (cmd == -TARGET_EINVAL) {
11725 ret = cmd;
11726 break;
11729 switch(arg2) {
11730 case TARGET_F_GETLK64:
11731 ret = copyfrom(&fl, arg3);
11732 if (ret) {
11733 break;
11735 ret = get_errno(fcntl(arg1, cmd, &fl));
11736 if (ret == 0) {
11737 ret = copyto(arg3, &fl);
11739 break;
11741 case TARGET_F_SETLK64:
11742 case TARGET_F_SETLKW64:
11743 ret = copyfrom(&fl, arg3);
11744 if (ret) {
11745 break;
11747 ret = get_errno(safe_fcntl(arg1, cmd, &fl));
11748 break;
11749 default:
11750 ret = do_fcntl(arg1, arg2, arg3);
11751 break;
11753 break;
11755 #endif
11756 #ifdef TARGET_NR_cacheflush
11757 case TARGET_NR_cacheflush:
11758 /* self-modifying code is handled automatically, so nothing needed */
11759 ret = 0;
11760 break;
11761 #endif
11762 #ifdef TARGET_NR_security
11763 case TARGET_NR_security:
11764 goto unimplemented;
11765 #endif
11766 #ifdef TARGET_NR_getpagesize
11767 case TARGET_NR_getpagesize:
11768 ret = TARGET_PAGE_SIZE;
11769 break;
11770 #endif
11771 case TARGET_NR_gettid:
11772 ret = get_errno(gettid());
11773 break;
11774 #ifdef TARGET_NR_readahead
11775 case TARGET_NR_readahead:
11776 #if TARGET_ABI_BITS == 32
11777 if (regpairs_aligned(cpu_env, num)) {
11778 arg2 = arg3;
11779 arg3 = arg4;
11780 arg4 = arg5;
11782 ret = get_errno(readahead(arg1, target_offset64(arg2, arg3) , arg4));
11783 #else
11784 ret = get_errno(readahead(arg1, arg2, arg3));
11785 #endif
11786 break;
11787 #endif
11788 #ifdef CONFIG_ATTR
11789 #ifdef TARGET_NR_setxattr
11790 case TARGET_NR_listxattr:
11791 case TARGET_NR_llistxattr:
11793 void *p, *b = 0;
11794 if (arg2) {
11795 b = lock_user(VERIFY_WRITE, arg2, arg3, 0);
11796 if (!b) {
11797 ret = -TARGET_EFAULT;
11798 break;
11801 p = lock_user_string(arg1);
11802 if (p) {
11803 if (num == TARGET_NR_listxattr) {
11804 ret = get_errno(listxattr(p, b, arg3));
11805 } else {
11806 ret = get_errno(llistxattr(p, b, arg3));
11808 } else {
11809 ret = -TARGET_EFAULT;
11811 unlock_user(p, arg1, 0);
11812 unlock_user(b, arg2, arg3);
11813 break;
11815 case TARGET_NR_flistxattr:
11817 void *b = 0;
11818 if (arg2) {
11819 b = lock_user(VERIFY_WRITE, arg2, arg3, 0);
11820 if (!b) {
11821 ret = -TARGET_EFAULT;
11822 break;
11825 ret = get_errno(flistxattr(arg1, b, arg3));
11826 unlock_user(b, arg2, arg3);
11827 break;
11829 case TARGET_NR_setxattr:
11830 case TARGET_NR_lsetxattr:
11832 void *p, *n, *v = 0;
11833 if (arg3) {
11834 v = lock_user(VERIFY_READ, arg3, arg4, 1);
11835 if (!v) {
11836 ret = -TARGET_EFAULT;
11837 break;
11840 p = lock_user_string(arg1);
11841 n = lock_user_string(arg2);
11842 if (p && n) {
11843 if (num == TARGET_NR_setxattr) {
11844 ret = get_errno(setxattr(p, n, v, arg4, arg5));
11845 } else {
11846 ret = get_errno(lsetxattr(p, n, v, arg4, arg5));
11848 } else {
11849 ret = -TARGET_EFAULT;
11851 unlock_user(p, arg1, 0);
11852 unlock_user(n, arg2, 0);
11853 unlock_user(v, arg3, 0);
11855 break;
11856 case TARGET_NR_fsetxattr:
11858 void *n, *v = 0;
11859 if (arg3) {
11860 v = lock_user(VERIFY_READ, arg3, arg4, 1);
11861 if (!v) {
11862 ret = -TARGET_EFAULT;
11863 break;
11866 n = lock_user_string(arg2);
11867 if (n) {
11868 ret = get_errno(fsetxattr(arg1, n, v, arg4, arg5));
11869 } else {
11870 ret = -TARGET_EFAULT;
11872 unlock_user(n, arg2, 0);
11873 unlock_user(v, arg3, 0);
11875 break;
11876 case TARGET_NR_getxattr:
11877 case TARGET_NR_lgetxattr:
11879 void *p, *n, *v = 0;
11880 if (arg3) {
11881 v = lock_user(VERIFY_WRITE, arg3, arg4, 0);
11882 if (!v) {
11883 ret = -TARGET_EFAULT;
11884 break;
11887 p = lock_user_string(arg1);
11888 n = lock_user_string(arg2);
11889 if (p && n) {
11890 if (num == TARGET_NR_getxattr) {
11891 ret = get_errno(getxattr(p, n, v, arg4));
11892 } else {
11893 ret = get_errno(lgetxattr(p, n, v, arg4));
11895 } else {
11896 ret = -TARGET_EFAULT;
11898 unlock_user(p, arg1, 0);
11899 unlock_user(n, arg2, 0);
11900 unlock_user(v, arg3, arg4);
11902 break;
11903 case TARGET_NR_fgetxattr:
11905 void *n, *v = 0;
11906 if (arg3) {
11907 v = lock_user(VERIFY_WRITE, arg3, arg4, 0);
11908 if (!v) {
11909 ret = -TARGET_EFAULT;
11910 break;
11913 n = lock_user_string(arg2);
11914 if (n) {
11915 ret = get_errno(fgetxattr(arg1, n, v, arg4));
11916 } else {
11917 ret = -TARGET_EFAULT;
11919 unlock_user(n, arg2, 0);
11920 unlock_user(v, arg3, arg4);
11922 break;
11923 case TARGET_NR_removexattr:
11924 case TARGET_NR_lremovexattr:
11926 void *p, *n;
11927 p = lock_user_string(arg1);
11928 n = lock_user_string(arg2);
11929 if (p && n) {
11930 if (num == TARGET_NR_removexattr) {
11931 ret = get_errno(removexattr(p, n));
11932 } else {
11933 ret = get_errno(lremovexattr(p, n));
11935 } else {
11936 ret = -TARGET_EFAULT;
11938 unlock_user(p, arg1, 0);
11939 unlock_user(n, arg2, 0);
11941 break;
11942 case TARGET_NR_fremovexattr:
11944 void *n;
11945 n = lock_user_string(arg2);
11946 if (n) {
11947 ret = get_errno(fremovexattr(arg1, n));
11948 } else {
11949 ret = -TARGET_EFAULT;
11951 unlock_user(n, arg2, 0);
11953 break;
11954 #endif
11955 #endif /* CONFIG_ATTR */
11956 #ifdef TARGET_NR_set_thread_area
11957 case TARGET_NR_set_thread_area:
11958 #if defined(TARGET_MIPS)
11959 ((CPUMIPSState *) cpu_env)->active_tc.CP0_UserLocal = arg1;
11960 ret = 0;
11961 break;
11962 #elif defined(TARGET_CRIS)
11963 if (arg1 & 0xff)
11964 ret = -TARGET_EINVAL;
11965 else {
11966 ((CPUCRISState *) cpu_env)->pregs[PR_PID] = arg1;
11967 ret = 0;
11969 break;
11970 #elif defined(TARGET_I386) && defined(TARGET_ABI32)
11971 ret = do_set_thread_area(cpu_env, arg1);
11972 break;
11973 #elif defined(TARGET_M68K)
11975 TaskState *ts = cpu->opaque;
11976 ts->tp_value = arg1;
11977 ret = 0;
11978 break;
11980 #else
11981 goto unimplemented_nowarn;
11982 #endif
11983 #endif
11984 #ifdef TARGET_NR_get_thread_area
11985 case TARGET_NR_get_thread_area:
11986 #if defined(TARGET_I386) && defined(TARGET_ABI32)
11987 ret = do_get_thread_area(cpu_env, arg1);
11988 break;
11989 #elif defined(TARGET_M68K)
11991 TaskState *ts = cpu->opaque;
11992 ret = ts->tp_value;
11993 break;
11995 #else
11996 goto unimplemented_nowarn;
11997 #endif
11998 #endif
11999 #ifdef TARGET_NR_getdomainname
12000 case TARGET_NR_getdomainname:
12001 goto unimplemented_nowarn;
12002 #endif
12004 #ifdef TARGET_NR_clock_settime
12005 case TARGET_NR_clock_settime:
12007 struct timespec ts;
12009 ret = target_to_host_timespec(&ts, arg2);
12010 if (!is_error(ret)) {
12011 ret = get_errno(clock_settime(arg1, &ts));
12013 break;
12015 #endif
12016 #ifdef TARGET_NR_clock_gettime
12017 case TARGET_NR_clock_gettime:
12019 struct timespec ts;
12020 ret = get_errno(clock_gettime(arg1, &ts));
12021 if (!is_error(ret)) {
12022 ret = host_to_target_timespec(arg2, &ts);
12024 break;
12026 #endif
12027 #ifdef TARGET_NR_clock_getres
12028 case TARGET_NR_clock_getres:
12030 struct timespec ts;
12031 ret = get_errno(clock_getres(arg1, &ts));
12032 if (!is_error(ret)) {
12033 host_to_target_timespec(arg2, &ts);
12035 break;
12037 #endif
12038 #ifdef TARGET_NR_clock_nanosleep
12039 case TARGET_NR_clock_nanosleep:
12041 struct timespec ts;
12042 target_to_host_timespec(&ts, arg3);
12043 ret = get_errno(safe_clock_nanosleep(arg1, arg2,
12044 &ts, arg4 ? &ts : NULL));
12045 if (arg4)
12046 host_to_target_timespec(arg4, &ts);
12048 #if defined(TARGET_PPC)
12049 /* clock_nanosleep is odd in that it returns positive errno values.
12050 * On PPC, CR0 bit 3 should be set in such a situation. */
12051 if (ret && ret != -TARGET_ERESTARTSYS) {
12052 ((CPUPPCState *)cpu_env)->crf[0] |= 1;
12054 #endif
12055 break;
12057 #endif
12059 #if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address)
12060 case TARGET_NR_set_tid_address:
12061 ret = get_errno(set_tid_address((int *)g2h(arg1)));
12062 break;
12063 #endif
12065 case TARGET_NR_tkill:
12066 ret = get_errno(safe_tkill((int)arg1, target_to_host_signal(arg2)));
12067 break;
12069 case TARGET_NR_tgkill:
12070 ret = get_errno(safe_tgkill((int)arg1, (int)arg2,
12071 target_to_host_signal(arg3)));
12072 break;
12074 #ifdef TARGET_NR_set_robust_list
12075 case TARGET_NR_set_robust_list:
12076 case TARGET_NR_get_robust_list:
12077 /* The ABI for supporting robust futexes has userspace pass
12078 * the kernel a pointer to a linked list which is updated by
12079 * userspace after the syscall; the list is walked by the kernel
12080 * when the thread exits. Since the linked list in QEMU guest
12081 * memory isn't a valid linked list for the host and we have
12082 * no way to reliably intercept the thread-death event, we can't
12083 * support these. Silently return ENOSYS so that guest userspace
12084 * falls back to a non-robust futex implementation (which should
12085 * be OK except in the corner case of the guest crashing while
12086 * holding a mutex that is shared with another process via
12087 * shared memory).
12089 goto unimplemented_nowarn;
12090 #endif
12092 #if defined(TARGET_NR_utimensat)
12093 case TARGET_NR_utimensat:
12095 struct timespec *tsp, ts[2];
12096 if (!arg3) {
12097 tsp = NULL;
12098 } else {
12099 target_to_host_timespec(ts, arg3);
12100 target_to_host_timespec(ts+1, arg3+sizeof(struct target_timespec));
12101 tsp = ts;
12103 if (!arg2)
12104 ret = get_errno(sys_utimensat(arg1, NULL, tsp, arg4));
12105 else {
12106 if (!(p = lock_user_string(arg2))) {
12107 ret = -TARGET_EFAULT;
12108 goto fail;
12110 ret = get_errno(sys_utimensat(arg1, path(p), tsp, arg4));
12111 unlock_user(p, arg2, 0);
12114 break;
12115 #endif
12116 case TARGET_NR_futex:
12117 ret = do_futex(arg1, arg2, arg3, arg4, arg5, arg6);
12118 break;
12119 #if defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init)
12120 case TARGET_NR_inotify_init:
12121 ret = get_errno(sys_inotify_init());
12122 if (ret >= 0) {
12123 fd_trans_register(ret, &target_inotify_trans);
12125 break;
12126 #endif
12127 #ifdef CONFIG_INOTIFY1
12128 #if defined(TARGET_NR_inotify_init1) && defined(__NR_inotify_init1)
12129 case TARGET_NR_inotify_init1:
12130 ret = get_errno(sys_inotify_init1(target_to_host_bitmask(arg1,
12131 fcntl_flags_tbl)));
12132 if (ret >= 0) {
12133 fd_trans_register(ret, &target_inotify_trans);
12135 break;
12136 #endif
12137 #endif
12138 #if defined(TARGET_NR_inotify_add_watch) && defined(__NR_inotify_add_watch)
12139 case TARGET_NR_inotify_add_watch:
12140 p = lock_user_string(arg2);
12141 ret = get_errno(sys_inotify_add_watch(arg1, path(p), arg3));
12142 unlock_user(p, arg2, 0);
12143 break;
12144 #endif
12145 #if defined(TARGET_NR_inotify_rm_watch) && defined(__NR_inotify_rm_watch)
12146 case TARGET_NR_inotify_rm_watch:
12147 ret = get_errno(sys_inotify_rm_watch(arg1, arg2));
12148 break;
12149 #endif
12151 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
12152 case TARGET_NR_mq_open:
12154 struct mq_attr posix_mq_attr;
12155 struct mq_attr *pposix_mq_attr;
12156 int host_flags;
12158 host_flags = target_to_host_bitmask(arg2, fcntl_flags_tbl);
12159 pposix_mq_attr = NULL;
12160 if (arg4) {
12161 if (copy_from_user_mq_attr(&posix_mq_attr, arg4) != 0) {
12162 goto efault;
12164 pposix_mq_attr = &posix_mq_attr;
12166 p = lock_user_string(arg1 - 1);
12167 if (!p) {
12168 goto efault;
12170 ret = get_errno(mq_open(p, host_flags, arg3, pposix_mq_attr));
12171 unlock_user (p, arg1, 0);
12173 break;
12175 case TARGET_NR_mq_unlink:
12176 p = lock_user_string(arg1 - 1);
12177 if (!p) {
12178 ret = -TARGET_EFAULT;
12179 break;
12181 ret = get_errno(mq_unlink(p));
12182 unlock_user (p, arg1, 0);
12183 break;
12185 case TARGET_NR_mq_timedsend:
12187 struct timespec ts;
12189 p = lock_user (VERIFY_READ, arg2, arg3, 1);
12190 if (arg5 != 0) {
12191 target_to_host_timespec(&ts, arg5);
12192 ret = get_errno(safe_mq_timedsend(arg1, p, arg3, arg4, &ts));
12193 host_to_target_timespec(arg5, &ts);
12194 } else {
12195 ret = get_errno(safe_mq_timedsend(arg1, p, arg3, arg4, NULL));
12197 unlock_user (p, arg2, arg3);
12199 break;
12201 case TARGET_NR_mq_timedreceive:
12203 struct timespec ts;
12204 unsigned int prio;
12206 p = lock_user (VERIFY_READ, arg2, arg3, 1);
12207 if (arg5 != 0) {
12208 target_to_host_timespec(&ts, arg5);
12209 ret = get_errno(safe_mq_timedreceive(arg1, p, arg3,
12210 &prio, &ts));
12211 host_to_target_timespec(arg5, &ts);
12212 } else {
12213 ret = get_errno(safe_mq_timedreceive(arg1, p, arg3,
12214 &prio, NULL));
12216 unlock_user (p, arg2, arg3);
12217 if (arg4 != 0)
12218 put_user_u32(prio, arg4);
12220 break;
12222 /* Not implemented for now... */
12223 /* case TARGET_NR_mq_notify: */
12224 /* break; */
12226 case TARGET_NR_mq_getsetattr:
12228 struct mq_attr posix_mq_attr_in, posix_mq_attr_out;
12229 ret = 0;
12230 if (arg2 != 0) {
12231 copy_from_user_mq_attr(&posix_mq_attr_in, arg2);
12232 ret = get_errno(mq_setattr(arg1, &posix_mq_attr_in,
12233 &posix_mq_attr_out));
12234 } else if (arg3 != 0) {
12235 ret = get_errno(mq_getattr(arg1, &posix_mq_attr_out));
12237 if (ret == 0 && arg3 != 0) {
12238 copy_to_user_mq_attr(arg3, &posix_mq_attr_out);
12241 break;
12242 #endif
12244 #ifdef CONFIG_SPLICE
12245 #ifdef TARGET_NR_tee
12246 case TARGET_NR_tee:
12248 ret = get_errno(tee(arg1,arg2,arg3,arg4));
12250 break;
12251 #endif
12252 #ifdef TARGET_NR_splice
12253 case TARGET_NR_splice:
12255 loff_t loff_in, loff_out;
12256 loff_t *ploff_in = NULL, *ploff_out = NULL;
12257 if (arg2) {
12258 if (get_user_u64(loff_in, arg2)) {
12259 goto efault;
12261 ploff_in = &loff_in;
12263 if (arg4) {
12264 if (get_user_u64(loff_out, arg4)) {
12265 goto efault;
12267 ploff_out = &loff_out;
12269 ret = get_errno(splice(arg1, ploff_in, arg3, ploff_out, arg5, arg6));
12270 if (arg2) {
12271 if (put_user_u64(loff_in, arg2)) {
12272 goto efault;
12275 if (arg4) {
12276 if (put_user_u64(loff_out, arg4)) {
12277 goto efault;
12281 break;
12282 #endif
12283 #ifdef TARGET_NR_vmsplice
12284 case TARGET_NR_vmsplice:
12286 struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1);
12287 if (vec != NULL) {
12288 ret = get_errno(vmsplice(arg1, vec, arg3, arg4));
12289 unlock_iovec(vec, arg2, arg3, 0);
12290 } else {
12291 ret = -host_to_target_errno(errno);
12294 break;
12295 #endif
12296 #endif /* CONFIG_SPLICE */
12297 #ifdef CONFIG_EVENTFD
12298 #if defined(TARGET_NR_eventfd)
12299 case TARGET_NR_eventfd:
12300 ret = get_errno(eventfd(arg1, 0));
12301 if (ret >= 0) {
12302 fd_trans_register(ret, &target_eventfd_trans);
12304 break;
12305 #endif
12306 #if defined(TARGET_NR_eventfd2)
12307 case TARGET_NR_eventfd2:
12309 int host_flags = arg2 & (~(TARGET_O_NONBLOCK | TARGET_O_CLOEXEC));
12310 if (arg2 & TARGET_O_NONBLOCK) {
12311 host_flags |= O_NONBLOCK;
12313 if (arg2 & TARGET_O_CLOEXEC) {
12314 host_flags |= O_CLOEXEC;
12316 ret = get_errno(eventfd(arg1, host_flags));
12317 if (ret >= 0) {
12318 fd_trans_register(ret, &target_eventfd_trans);
12320 break;
12322 #endif
12323 #endif /* CONFIG_EVENTFD */
12324 #if defined(CONFIG_FALLOCATE) && defined(TARGET_NR_fallocate)
12325 case TARGET_NR_fallocate:
12326 #if TARGET_ABI_BITS == 32
12327 ret = get_errno(fallocate(arg1, arg2, target_offset64(arg3, arg4),
12328 target_offset64(arg5, arg6)));
12329 #else
12330 ret = get_errno(fallocate(arg1, arg2, arg3, arg4));
12331 #endif
12332 break;
12333 #endif
12334 #if defined(CONFIG_SYNC_FILE_RANGE)
12335 #if defined(TARGET_NR_sync_file_range)
12336 case TARGET_NR_sync_file_range:
12337 #if TARGET_ABI_BITS == 32
12338 #if defined(TARGET_MIPS)
12339 ret = get_errno(sync_file_range(arg1, target_offset64(arg3, arg4),
12340 target_offset64(arg5, arg6), arg7));
12341 #else
12342 ret = get_errno(sync_file_range(arg1, target_offset64(arg2, arg3),
12343 target_offset64(arg4, arg5), arg6));
12344 #endif /* !TARGET_MIPS */
12345 #else
12346 ret = get_errno(sync_file_range(arg1, arg2, arg3, arg4));
12347 #endif
12348 break;
12349 #endif
12350 #if defined(TARGET_NR_sync_file_range2)
12351 case TARGET_NR_sync_file_range2:
12352 /* This is like sync_file_range but the arguments are reordered */
12353 #if TARGET_ABI_BITS == 32
12354 ret = get_errno(sync_file_range(arg1, target_offset64(arg3, arg4),
12355 target_offset64(arg5, arg6), arg2));
12356 #else
12357 ret = get_errno(sync_file_range(arg1, arg3, arg4, arg2));
12358 #endif
12359 break;
12360 #endif
12361 #endif
12362 #if defined(TARGET_NR_signalfd4)
12363 case TARGET_NR_signalfd4:
12364 ret = do_signalfd4(arg1, arg2, arg4);
12365 break;
12366 #endif
12367 #if defined(TARGET_NR_signalfd)
12368 case TARGET_NR_signalfd:
12369 ret = do_signalfd4(arg1, arg2, 0);
12370 break;
12371 #endif
12372 #if defined(CONFIG_EPOLL)
12373 #if defined(TARGET_NR_epoll_create)
12374 case TARGET_NR_epoll_create:
12375 ret = get_errno(epoll_create(arg1));
12376 break;
12377 #endif
12378 #if defined(TARGET_NR_epoll_create1) && defined(CONFIG_EPOLL_CREATE1)
12379 case TARGET_NR_epoll_create1:
12380 ret = get_errno(epoll_create1(arg1));
12381 break;
12382 #endif
12383 #if defined(TARGET_NR_epoll_ctl)
12384 case TARGET_NR_epoll_ctl:
12386 struct epoll_event ep;
12387 struct epoll_event *epp = 0;
12388 if (arg4) {
12389 struct target_epoll_event *target_ep;
12390 if (!lock_user_struct(VERIFY_READ, target_ep, arg4, 1)) {
12391 goto efault;
12393 ep.events = tswap32(target_ep->events);
12394 /* The epoll_data_t union is just opaque data to the kernel,
12395 * so we transfer all 64 bits across and need not worry what
12396 * actual data type it is.
12398 ep.data.u64 = tswap64(target_ep->data.u64);
12399 unlock_user_struct(target_ep, arg4, 0);
12400 epp = &ep;
12402 ret = get_errno(epoll_ctl(arg1, arg2, arg3, epp));
12403 break;
12405 #endif
12407 #if defined(TARGET_NR_epoll_wait) || defined(TARGET_NR_epoll_pwait)
12408 #if defined(TARGET_NR_epoll_wait)
12409 case TARGET_NR_epoll_wait:
12410 #endif
12411 #if defined(TARGET_NR_epoll_pwait)
12412 case TARGET_NR_epoll_pwait:
12413 #endif
12415 struct target_epoll_event *target_ep;
12416 struct epoll_event *ep;
12417 int epfd = arg1;
12418 int maxevents = arg3;
12419 int timeout = arg4;
12421 if (maxevents <= 0 || maxevents > TARGET_EP_MAX_EVENTS) {
12422 ret = -TARGET_EINVAL;
12423 break;
12426 target_ep = lock_user(VERIFY_WRITE, arg2,
12427 maxevents * sizeof(struct target_epoll_event), 1);
12428 if (!target_ep) {
12429 goto efault;
12432 ep = g_try_new(struct epoll_event, maxevents);
12433 if (!ep) {
12434 unlock_user(target_ep, arg2, 0);
12435 ret = -TARGET_ENOMEM;
12436 break;
12439 switch (num) {
12440 #if defined(TARGET_NR_epoll_pwait)
12441 case TARGET_NR_epoll_pwait:
12443 target_sigset_t *target_set;
12444 sigset_t _set, *set = &_set;
12446 if (arg5) {
12447 if (arg6 != sizeof(target_sigset_t)) {
12448 ret = -TARGET_EINVAL;
12449 break;
12452 target_set = lock_user(VERIFY_READ, arg5,
12453 sizeof(target_sigset_t), 1);
12454 if (!target_set) {
12455 ret = -TARGET_EFAULT;
12456 break;
12458 target_to_host_sigset(set, target_set);
12459 unlock_user(target_set, arg5, 0);
12460 } else {
12461 set = NULL;
12464 ret = get_errno(safe_epoll_pwait(epfd, ep, maxevents, timeout,
12465 set, SIGSET_T_SIZE));
12466 break;
12468 #endif
12469 #if defined(TARGET_NR_epoll_wait)
12470 case TARGET_NR_epoll_wait:
12471 ret = get_errno(safe_epoll_pwait(epfd, ep, maxevents, timeout,
12472 NULL, 0));
12473 break;
12474 #endif
12475 default:
12476 ret = -TARGET_ENOSYS;
12478 if (!is_error(ret)) {
12479 int i;
12480 for (i = 0; i < ret; i++) {
12481 target_ep[i].events = tswap32(ep[i].events);
12482 target_ep[i].data.u64 = tswap64(ep[i].data.u64);
12484 unlock_user(target_ep, arg2,
12485 ret * sizeof(struct target_epoll_event));
12486 } else {
12487 unlock_user(target_ep, arg2, 0);
12489 g_free(ep);
12490 break;
12492 #endif
12493 #endif
12494 #ifdef TARGET_NR_prlimit64
12495 case TARGET_NR_prlimit64:
12497 /* args: pid, resource number, ptr to new rlimit, ptr to old rlimit */
12498 struct target_rlimit64 *target_rnew, *target_rold;
12499 struct host_rlimit64 rnew, rold, *rnewp = 0;
12500 int resource = target_to_host_resource(arg2);
12501 if (arg3) {
12502 if (!lock_user_struct(VERIFY_READ, target_rnew, arg3, 1)) {
12503 goto efault;
12505 rnew.rlim_cur = tswap64(target_rnew->rlim_cur);
12506 rnew.rlim_max = tswap64(target_rnew->rlim_max);
12507 unlock_user_struct(target_rnew, arg3, 0);
12508 rnewp = &rnew;
12511 ret = get_errno(sys_prlimit64(arg1, resource, rnewp, arg4 ? &rold : 0));
12512 if (!is_error(ret) && arg4) {
12513 if (!lock_user_struct(VERIFY_WRITE, target_rold, arg4, 1)) {
12514 goto efault;
12516 target_rold->rlim_cur = tswap64(rold.rlim_cur);
12517 target_rold->rlim_max = tswap64(rold.rlim_max);
12518 unlock_user_struct(target_rold, arg4, 1);
12520 break;
12522 #endif
12523 #ifdef TARGET_NR_gethostname
12524 case TARGET_NR_gethostname:
12526 char *name = lock_user(VERIFY_WRITE, arg1, arg2, 0);
12527 if (name) {
12528 ret = get_errno(gethostname(name, arg2));
12529 unlock_user(name, arg1, arg2);
12530 } else {
12531 ret = -TARGET_EFAULT;
12533 break;
12535 #endif
12536 #ifdef TARGET_NR_atomic_cmpxchg_32
12537 case TARGET_NR_atomic_cmpxchg_32:
12539 /* should use start_exclusive from main.c */
12540 abi_ulong mem_value;
12541 if (get_user_u32(mem_value, arg6)) {
12542 target_siginfo_t info;
12543 info.si_signo = SIGSEGV;
12544 info.si_errno = 0;
12545 info.si_code = TARGET_SEGV_MAPERR;
12546 info._sifields._sigfault._addr = arg6;
12547 queue_signal((CPUArchState *)cpu_env, info.si_signo,
12548 QEMU_SI_FAULT, &info);
12549 ret = 0xdeadbeef;
12552 if (mem_value == arg2)
12553 put_user_u32(arg1, arg6);
12554 ret = mem_value;
12555 break;
12557 #endif
12558 #ifdef TARGET_NR_atomic_barrier
12559 case TARGET_NR_atomic_barrier:
12561 /* Like the kernel implementation and the qemu arm barrier, no-op this? */
12562 ret = 0;
12563 break;
12565 #endif
12567 #ifdef TARGET_NR_timer_create
12568 case TARGET_NR_timer_create:
12570 /* args: clockid_t clockid, struct sigevent *sevp, timer_t *timerid */
12572 struct sigevent host_sevp = { {0}, }, *phost_sevp = NULL;
12574 int clkid = arg1;
12575 int timer_index = next_free_host_timer();
12577 if (timer_index < 0) {
12578 ret = -TARGET_EAGAIN;
12579 } else {
12580 timer_t *phtimer = g_posix_timers + timer_index;
12582 if (arg2) {
12583 phost_sevp = &host_sevp;
12584 ret = target_to_host_sigevent(phost_sevp, arg2);
12585 if (ret != 0) {
12586 break;
12590 ret = get_errno(timer_create(clkid, phost_sevp, phtimer));
12591 if (ret) {
12592 phtimer = NULL;
12593 } else {
12594 if (put_user(TIMER_MAGIC | timer_index, arg3, target_timer_t)) {
12595 goto efault;
12599 break;
12601 #endif
12603 #ifdef TARGET_NR_timer_settime
12604 case TARGET_NR_timer_settime:
12606 /* args: timer_t timerid, int flags, const struct itimerspec *new_value,
12607 * struct itimerspec * old_value */
12608 target_timer_t timerid = get_timer_id(arg1);
12610 if (timerid < 0) {
12611 ret = timerid;
12612 } else if (arg3 == 0) {
12613 ret = -TARGET_EINVAL;
12614 } else {
12615 timer_t htimer = g_posix_timers[timerid];
12616 struct itimerspec hspec_new = {{0},}, hspec_old = {{0},};
12618 if (target_to_host_itimerspec(&hspec_new, arg3)) {
12619 goto efault;
12621 ret = get_errno(
12622 timer_settime(htimer, arg2, &hspec_new, &hspec_old));
12623 if (arg4 && host_to_target_itimerspec(arg4, &hspec_old)) {
12624 goto efault;
12627 break;
12629 #endif
12631 #ifdef TARGET_NR_timer_gettime
12632 case TARGET_NR_timer_gettime:
12634 /* args: timer_t timerid, struct itimerspec *curr_value */
12635 target_timer_t timerid = get_timer_id(arg1);
12637 if (timerid < 0) {
12638 ret = timerid;
12639 } else if (!arg2) {
12640 ret = -TARGET_EFAULT;
12641 } else {
12642 timer_t htimer = g_posix_timers[timerid];
12643 struct itimerspec hspec;
12644 ret = get_errno(timer_gettime(htimer, &hspec));
12646 if (host_to_target_itimerspec(arg2, &hspec)) {
12647 ret = -TARGET_EFAULT;
12650 break;
12652 #endif
12654 #ifdef TARGET_NR_timer_getoverrun
12655 case TARGET_NR_timer_getoverrun:
12657 /* args: timer_t timerid */
12658 target_timer_t timerid = get_timer_id(arg1);
12660 if (timerid < 0) {
12661 ret = timerid;
12662 } else {
12663 timer_t htimer = g_posix_timers[timerid];
12664 ret = get_errno(timer_getoverrun(htimer));
12666 fd_trans_unregister(ret);
12667 break;
12669 #endif
12671 #ifdef TARGET_NR_timer_delete
12672 case TARGET_NR_timer_delete:
12674 /* args: timer_t timerid */
12675 target_timer_t timerid = get_timer_id(arg1);
12677 if (timerid < 0) {
12678 ret = timerid;
12679 } else {
12680 timer_t htimer = g_posix_timers[timerid];
12681 ret = get_errno(timer_delete(htimer));
12682 g_posix_timers[timerid] = 0;
12684 break;
12686 #endif
12688 #if defined(TARGET_NR_timerfd_create) && defined(CONFIG_TIMERFD)
12689 case TARGET_NR_timerfd_create:
12690 ret = get_errno(timerfd_create(arg1,
12691 target_to_host_bitmask(arg2, fcntl_flags_tbl)));
12692 break;
12693 #endif
12695 #if defined(TARGET_NR_timerfd_gettime) && defined(CONFIG_TIMERFD)
12696 case TARGET_NR_timerfd_gettime:
12698 struct itimerspec its_curr;
12700 ret = get_errno(timerfd_gettime(arg1, &its_curr));
12702 if (arg2 && host_to_target_itimerspec(arg2, &its_curr)) {
12703 goto efault;
12706 break;
12707 #endif
12709 #if defined(TARGET_NR_timerfd_settime) && defined(CONFIG_TIMERFD)
12710 case TARGET_NR_timerfd_settime:
12712 struct itimerspec its_new, its_old, *p_new;
12714 if (arg3) {
12715 if (target_to_host_itimerspec(&its_new, arg3)) {
12716 goto efault;
12718 p_new = &its_new;
12719 } else {
12720 p_new = NULL;
12723 ret = get_errno(timerfd_settime(arg1, arg2, p_new, &its_old));
12725 if (arg4 && host_to_target_itimerspec(arg4, &its_old)) {
12726 goto efault;
12729 break;
12730 #endif
12732 #if defined(TARGET_NR_ioprio_get) && defined(__NR_ioprio_get)
12733 case TARGET_NR_ioprio_get:
12734 ret = get_errno(ioprio_get(arg1, arg2));
12735 break;
12736 #endif
12738 #if defined(TARGET_NR_ioprio_set) && defined(__NR_ioprio_set)
12739 case TARGET_NR_ioprio_set:
12740 ret = get_errno(ioprio_set(arg1, arg2, arg3));
12741 break;
12742 #endif
12744 #if defined(TARGET_NR_setns) && defined(CONFIG_SETNS)
12745 case TARGET_NR_setns:
12746 ret = get_errno(setns(arg1, arg2));
12747 break;
12748 #endif
12749 #if defined(TARGET_NR_unshare) && defined(CONFIG_SETNS)
12750 case TARGET_NR_unshare:
12751 ret = get_errno(unshare(arg1));
12752 break;
12753 #endif
12754 #if defined(TARGET_NR_kcmp) && defined(__NR_kcmp)
12755 case TARGET_NR_kcmp:
12756 ret = get_errno(kcmp(arg1, arg2, arg3, arg4, arg5));
12757 break;
12758 #endif
12760 default:
12761 unimplemented:
12762 gemu_log("qemu: Unsupported syscall: %d\n", num);
12763 #if defined(TARGET_NR_setxattr) || defined(TARGET_NR_get_thread_area) || defined(TARGET_NR_getdomainname) || defined(TARGET_NR_set_robust_list)
12764 unimplemented_nowarn:
12765 #endif
12766 ret = -TARGET_ENOSYS;
12767 break;
12769 fail:
12770 #ifdef DEBUG
12771 gemu_log(" = " TARGET_ABI_FMT_ld "\n", ret);
12772 #endif
12773 if(do_strace)
12774 print_syscall_ret(num, ret);
12775 trace_guest_user_syscall_ret(cpu, num, ret);
12776 return ret;
12777 efault:
12778 ret = -TARGET_EFAULT;
12779 goto fail;