Merge remote-tracking branch 'remotes/vivier2/tags/linux-user-for-2.12-pull-request...
[qemu/ar7.git] / linux-user / syscall.c
blobf7ebe6233b1f473ef44e0e24fccb46e4002ca519
1 /*
2 * Linux syscalls
4 * Copyright (c) 2003 Fabrice Bellard
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, see <http://www.gnu.org/licenses/>.
19 #define _ATFILE_SOURCE
20 #include "qemu/osdep.h"
21 #include "qemu/cutils.h"
22 #include "qemu/path.h"
23 #include <elf.h>
24 #include <endian.h>
25 #include <grp.h>
26 #include <sys/ipc.h>
27 #include <sys/msg.h>
28 #include <sys/wait.h>
29 #include <sys/mount.h>
30 #include <sys/file.h>
31 #include <sys/fsuid.h>
32 #include <sys/personality.h>
33 #include <sys/prctl.h>
34 #include <sys/resource.h>
35 #include <sys/swap.h>
36 #include <linux/capability.h>
37 #include <sched.h>
38 #include <sys/timex.h>
39 #include <sys/socket.h>
40 #include <sys/un.h>
41 #include <sys/uio.h>
42 #include <poll.h>
43 #include <sys/times.h>
44 #include <sys/shm.h>
45 #include <sys/sem.h>
46 #include <sys/statfs.h>
47 #include <utime.h>
48 #include <sys/sysinfo.h>
49 #include <sys/signalfd.h>
50 //#include <sys/user.h>
51 #include <netinet/ip.h>
52 #include <netinet/tcp.h>
53 #include <linux/wireless.h>
54 #include <linux/icmp.h>
55 #include <linux/icmpv6.h>
56 #include <linux/errqueue.h>
57 #include <linux/random.h>
58 #include "qemu-common.h"
59 #ifdef CONFIG_TIMERFD
60 #include <sys/timerfd.h>
61 #endif
62 #ifdef TARGET_GPROF
63 #include <sys/gmon.h>
64 #endif
65 #ifdef CONFIG_EVENTFD
66 #include <sys/eventfd.h>
67 #endif
68 #ifdef CONFIG_EPOLL
69 #include <sys/epoll.h>
70 #endif
71 #ifdef CONFIG_ATTR
72 #include "qemu/xattr.h"
73 #endif
74 #ifdef CONFIG_SENDFILE
75 #include <sys/sendfile.h>
76 #endif
78 #define termios host_termios
79 #define winsize host_winsize
80 #define termio host_termio
81 #define sgttyb host_sgttyb /* same as target */
82 #define tchars host_tchars /* same as target */
83 #define ltchars host_ltchars /* same as target */
85 #include <linux/termios.h>
86 #include <linux/unistd.h>
87 #include <linux/cdrom.h>
88 #include <linux/hdreg.h>
89 #include <linux/soundcard.h>
90 #include <linux/kd.h>
91 #include <linux/mtio.h>
92 #include <linux/fs.h>
93 #if defined(CONFIG_FIEMAP)
94 #include <linux/fiemap.h>
95 #endif
96 #include <linux/fb.h>
97 #include <linux/vt.h>
98 #include <linux/dm-ioctl.h>
99 #include <linux/reboot.h>
100 #include <linux/route.h>
101 #include <linux/filter.h>
102 #include <linux/blkpg.h>
103 #include <netpacket/packet.h>
104 #include <linux/netlink.h>
105 #ifdef CONFIG_RTNETLINK
106 #include <linux/rtnetlink.h>
107 #include <linux/if_bridge.h>
108 #endif
109 #include <linux/audit.h>
110 #include "linux_loop.h"
111 #include "uname.h"
113 #include "qemu.h"
115 #ifndef CLONE_IO
116 #define CLONE_IO 0x80000000 /* Clone io context */
117 #endif
119 /* We can't directly call the host clone syscall, because this will
120 * badly confuse libc (breaking mutexes, for example). So we must
121 * divide clone flags into:
122 * * flag combinations that look like pthread_create()
123 * * flag combinations that look like fork()
124 * * flags we can implement within QEMU itself
125 * * flags we can't support and will return an error for
127 /* For thread creation, all these flags must be present; for
128 * fork, none must be present.
130 #define CLONE_THREAD_FLAGS \
131 (CLONE_VM | CLONE_FS | CLONE_FILES | \
132 CLONE_SIGHAND | CLONE_THREAD | CLONE_SYSVSEM)
134 /* These flags are ignored:
135 * CLONE_DETACHED is now ignored by the kernel;
136 * CLONE_IO is just an optimisation hint to the I/O scheduler
138 #define CLONE_IGNORED_FLAGS \
139 (CLONE_DETACHED | CLONE_IO)
141 /* Flags for fork which we can implement within QEMU itself */
142 #define CLONE_OPTIONAL_FORK_FLAGS \
143 (CLONE_SETTLS | CLONE_PARENT_SETTID | \
144 CLONE_CHILD_CLEARTID | CLONE_CHILD_SETTID)
146 /* Flags for thread creation which we can implement within QEMU itself */
147 #define CLONE_OPTIONAL_THREAD_FLAGS \
148 (CLONE_SETTLS | CLONE_PARENT_SETTID | \
149 CLONE_CHILD_CLEARTID | CLONE_CHILD_SETTID | CLONE_PARENT)
151 #define CLONE_INVALID_FORK_FLAGS \
152 (~(CSIGNAL | CLONE_OPTIONAL_FORK_FLAGS | CLONE_IGNORED_FLAGS))
154 #define CLONE_INVALID_THREAD_FLAGS \
155 (~(CSIGNAL | CLONE_THREAD_FLAGS | CLONE_OPTIONAL_THREAD_FLAGS | \
156 CLONE_IGNORED_FLAGS))
158 /* CLONE_VFORK is special cased early in do_fork(). The other flag bits
159 * have almost all been allocated. We cannot support any of
160 * CLONE_NEWNS, CLONE_NEWCGROUP, CLONE_NEWUTS, CLONE_NEWIPC,
161 * CLONE_NEWUSER, CLONE_NEWPID, CLONE_NEWNET, CLONE_PTRACE, CLONE_UNTRACED.
162 * The checks against the invalid thread masks above will catch these.
163 * (The one remaining unallocated bit is 0x1000 which used to be CLONE_PID.)
166 //#define DEBUG
167 /* Define DEBUG_ERESTARTSYS to force every syscall to be restarted
168 * once. This exercises the codepaths for restart.
170 //#define DEBUG_ERESTARTSYS
172 //#include <linux/msdos_fs.h>
173 #define VFAT_IOCTL_READDIR_BOTH _IOR('r', 1, struct linux_dirent [2])
174 #define VFAT_IOCTL_READDIR_SHORT _IOR('r', 2, struct linux_dirent [2])
176 #undef _syscall0
177 #undef _syscall1
178 #undef _syscall2
179 #undef _syscall3
180 #undef _syscall4
181 #undef _syscall5
182 #undef _syscall6
184 #define _syscall0(type,name) \
185 static type name (void) \
187 return syscall(__NR_##name); \
190 #define _syscall1(type,name,type1,arg1) \
191 static type name (type1 arg1) \
193 return syscall(__NR_##name, arg1); \
196 #define _syscall2(type,name,type1,arg1,type2,arg2) \
197 static type name (type1 arg1,type2 arg2) \
199 return syscall(__NR_##name, arg1, arg2); \
202 #define _syscall3(type,name,type1,arg1,type2,arg2,type3,arg3) \
203 static type name (type1 arg1,type2 arg2,type3 arg3) \
205 return syscall(__NR_##name, arg1, arg2, arg3); \
208 #define _syscall4(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4) \
209 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4) \
211 return syscall(__NR_##name, arg1, arg2, arg3, arg4); \
214 #define _syscall5(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4, \
215 type5,arg5) \
216 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5) \
218 return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5); \
222 #define _syscall6(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4, \
223 type5,arg5,type6,arg6) \
224 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5, \
225 type6 arg6) \
227 return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6); \
231 #define __NR_sys_uname __NR_uname
232 #define __NR_sys_getcwd1 __NR_getcwd
233 #define __NR_sys_getdents __NR_getdents
234 #define __NR_sys_getdents64 __NR_getdents64
235 #define __NR_sys_getpriority __NR_getpriority
236 #define __NR_sys_rt_sigqueueinfo __NR_rt_sigqueueinfo
237 #define __NR_sys_rt_tgsigqueueinfo __NR_rt_tgsigqueueinfo
238 #define __NR_sys_syslog __NR_syslog
239 #define __NR_sys_futex __NR_futex
240 #define __NR_sys_inotify_init __NR_inotify_init
241 #define __NR_sys_inotify_add_watch __NR_inotify_add_watch
242 #define __NR_sys_inotify_rm_watch __NR_inotify_rm_watch
244 #if defined(__alpha__) || defined(__x86_64__) || defined(__s390x__)
245 #define __NR__llseek __NR_lseek
246 #endif
248 /* Newer kernel ports have llseek() instead of _llseek() */
249 #if defined(TARGET_NR_llseek) && !defined(TARGET_NR__llseek)
250 #define TARGET_NR__llseek TARGET_NR_llseek
251 #endif
253 #ifdef __NR_gettid
254 _syscall0(int, gettid)
255 #else
256 /* This is a replacement for the host gettid() and must return a host
257 errno. */
258 static int gettid(void) {
259 return -ENOSYS;
261 #endif
262 #if defined(TARGET_NR_getdents) && defined(__NR_getdents)
263 _syscall3(int, sys_getdents, uint, fd, struct linux_dirent *, dirp, uint, count);
264 #endif
265 #if !defined(__NR_getdents) || \
266 (defined(TARGET_NR_getdents64) && defined(__NR_getdents64))
267 _syscall3(int, sys_getdents64, uint, fd, struct linux_dirent64 *, dirp, uint, count);
268 #endif
269 #if defined(TARGET_NR__llseek) && defined(__NR_llseek)
270 _syscall5(int, _llseek, uint, fd, ulong, hi, ulong, lo,
271 loff_t *, res, uint, wh);
272 #endif
273 _syscall3(int, sys_rt_sigqueueinfo, pid_t, pid, int, sig, siginfo_t *, uinfo)
274 _syscall4(int, sys_rt_tgsigqueueinfo, pid_t, pid, pid_t, tid, int, sig,
275 siginfo_t *, uinfo)
276 _syscall3(int,sys_syslog,int,type,char*,bufp,int,len)
277 #ifdef __NR_exit_group
278 _syscall1(int,exit_group,int,error_code)
279 #endif
280 #if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address)
281 _syscall1(int,set_tid_address,int *,tidptr)
282 #endif
283 #if defined(TARGET_NR_futex) && defined(__NR_futex)
284 _syscall6(int,sys_futex,int *,uaddr,int,op,int,val,
285 const struct timespec *,timeout,int *,uaddr2,int,val3)
286 #endif
287 #define __NR_sys_sched_getaffinity __NR_sched_getaffinity
288 _syscall3(int, sys_sched_getaffinity, pid_t, pid, unsigned int, len,
289 unsigned long *, user_mask_ptr);
290 #define __NR_sys_sched_setaffinity __NR_sched_setaffinity
291 _syscall3(int, sys_sched_setaffinity, pid_t, pid, unsigned int, len,
292 unsigned long *, user_mask_ptr);
293 #define __NR_sys_getcpu __NR_getcpu
294 _syscall3(int, sys_getcpu, unsigned *, cpu, unsigned *, node, void *, tcache);
295 _syscall4(int, reboot, int, magic1, int, magic2, unsigned int, cmd,
296 void *, arg);
297 _syscall2(int, capget, struct __user_cap_header_struct *, header,
298 struct __user_cap_data_struct *, data);
299 _syscall2(int, capset, struct __user_cap_header_struct *, header,
300 struct __user_cap_data_struct *, data);
301 #if defined(TARGET_NR_ioprio_get) && defined(__NR_ioprio_get)
302 _syscall2(int, ioprio_get, int, which, int, who)
303 #endif
304 #if defined(TARGET_NR_ioprio_set) && defined(__NR_ioprio_set)
305 _syscall3(int, ioprio_set, int, which, int, who, int, ioprio)
306 #endif
307 #if defined(TARGET_NR_getrandom) && defined(__NR_getrandom)
308 _syscall3(int, getrandom, void *, buf, size_t, buflen, unsigned int, flags)
309 #endif
311 #if defined(TARGET_NR_kcmp) && defined(__NR_kcmp)
312 _syscall5(int, kcmp, pid_t, pid1, pid_t, pid2, int, type,
313 unsigned long, idx1, unsigned long, idx2)
314 #endif
316 static bitmask_transtbl fcntl_flags_tbl[] = {
317 { TARGET_O_ACCMODE, TARGET_O_WRONLY, O_ACCMODE, O_WRONLY, },
318 { TARGET_O_ACCMODE, TARGET_O_RDWR, O_ACCMODE, O_RDWR, },
319 { TARGET_O_CREAT, TARGET_O_CREAT, O_CREAT, O_CREAT, },
320 { TARGET_O_EXCL, TARGET_O_EXCL, O_EXCL, O_EXCL, },
321 { TARGET_O_NOCTTY, TARGET_O_NOCTTY, O_NOCTTY, O_NOCTTY, },
322 { TARGET_O_TRUNC, TARGET_O_TRUNC, O_TRUNC, O_TRUNC, },
323 { TARGET_O_APPEND, TARGET_O_APPEND, O_APPEND, O_APPEND, },
324 { TARGET_O_NONBLOCK, TARGET_O_NONBLOCK, O_NONBLOCK, O_NONBLOCK, },
325 { TARGET_O_SYNC, TARGET_O_DSYNC, O_SYNC, O_DSYNC, },
326 { TARGET_O_SYNC, TARGET_O_SYNC, O_SYNC, O_SYNC, },
327 { TARGET_FASYNC, TARGET_FASYNC, FASYNC, FASYNC, },
328 { TARGET_O_DIRECTORY, TARGET_O_DIRECTORY, O_DIRECTORY, O_DIRECTORY, },
329 { TARGET_O_NOFOLLOW, TARGET_O_NOFOLLOW, O_NOFOLLOW, O_NOFOLLOW, },
330 #if defined(O_DIRECT)
331 { TARGET_O_DIRECT, TARGET_O_DIRECT, O_DIRECT, O_DIRECT, },
332 #endif
333 #if defined(O_NOATIME)
334 { TARGET_O_NOATIME, TARGET_O_NOATIME, O_NOATIME, O_NOATIME },
335 #endif
336 #if defined(O_CLOEXEC)
337 { TARGET_O_CLOEXEC, TARGET_O_CLOEXEC, O_CLOEXEC, O_CLOEXEC },
338 #endif
339 #if defined(O_PATH)
340 { TARGET_O_PATH, TARGET_O_PATH, O_PATH, O_PATH },
341 #endif
342 #if defined(O_TMPFILE)
343 { TARGET_O_TMPFILE, TARGET_O_TMPFILE, O_TMPFILE, O_TMPFILE },
344 #endif
345 /* Don't terminate the list prematurely on 64-bit host+guest. */
346 #if TARGET_O_LARGEFILE != 0 || O_LARGEFILE != 0
347 { TARGET_O_LARGEFILE, TARGET_O_LARGEFILE, O_LARGEFILE, O_LARGEFILE, },
348 #endif
349 { 0, 0, 0, 0 }
352 enum {
353 QEMU_IFLA_BR_UNSPEC,
354 QEMU_IFLA_BR_FORWARD_DELAY,
355 QEMU_IFLA_BR_HELLO_TIME,
356 QEMU_IFLA_BR_MAX_AGE,
357 QEMU_IFLA_BR_AGEING_TIME,
358 QEMU_IFLA_BR_STP_STATE,
359 QEMU_IFLA_BR_PRIORITY,
360 QEMU_IFLA_BR_VLAN_FILTERING,
361 QEMU_IFLA_BR_VLAN_PROTOCOL,
362 QEMU_IFLA_BR_GROUP_FWD_MASK,
363 QEMU_IFLA_BR_ROOT_ID,
364 QEMU_IFLA_BR_BRIDGE_ID,
365 QEMU_IFLA_BR_ROOT_PORT,
366 QEMU_IFLA_BR_ROOT_PATH_COST,
367 QEMU_IFLA_BR_TOPOLOGY_CHANGE,
368 QEMU_IFLA_BR_TOPOLOGY_CHANGE_DETECTED,
369 QEMU_IFLA_BR_HELLO_TIMER,
370 QEMU_IFLA_BR_TCN_TIMER,
371 QEMU_IFLA_BR_TOPOLOGY_CHANGE_TIMER,
372 QEMU_IFLA_BR_GC_TIMER,
373 QEMU_IFLA_BR_GROUP_ADDR,
374 QEMU_IFLA_BR_FDB_FLUSH,
375 QEMU_IFLA_BR_MCAST_ROUTER,
376 QEMU_IFLA_BR_MCAST_SNOOPING,
377 QEMU_IFLA_BR_MCAST_QUERY_USE_IFADDR,
378 QEMU_IFLA_BR_MCAST_QUERIER,
379 QEMU_IFLA_BR_MCAST_HASH_ELASTICITY,
380 QEMU_IFLA_BR_MCAST_HASH_MAX,
381 QEMU_IFLA_BR_MCAST_LAST_MEMBER_CNT,
382 QEMU_IFLA_BR_MCAST_STARTUP_QUERY_CNT,
383 QEMU_IFLA_BR_MCAST_LAST_MEMBER_INTVL,
384 QEMU_IFLA_BR_MCAST_MEMBERSHIP_INTVL,
385 QEMU_IFLA_BR_MCAST_QUERIER_INTVL,
386 QEMU_IFLA_BR_MCAST_QUERY_INTVL,
387 QEMU_IFLA_BR_MCAST_QUERY_RESPONSE_INTVL,
388 QEMU_IFLA_BR_MCAST_STARTUP_QUERY_INTVL,
389 QEMU_IFLA_BR_NF_CALL_IPTABLES,
390 QEMU_IFLA_BR_NF_CALL_IP6TABLES,
391 QEMU_IFLA_BR_NF_CALL_ARPTABLES,
392 QEMU_IFLA_BR_VLAN_DEFAULT_PVID,
393 QEMU_IFLA_BR_PAD,
394 QEMU_IFLA_BR_VLAN_STATS_ENABLED,
395 QEMU_IFLA_BR_MCAST_STATS_ENABLED,
396 QEMU___IFLA_BR_MAX,
399 enum {
400 QEMU_IFLA_UNSPEC,
401 QEMU_IFLA_ADDRESS,
402 QEMU_IFLA_BROADCAST,
403 QEMU_IFLA_IFNAME,
404 QEMU_IFLA_MTU,
405 QEMU_IFLA_LINK,
406 QEMU_IFLA_QDISC,
407 QEMU_IFLA_STATS,
408 QEMU_IFLA_COST,
409 QEMU_IFLA_PRIORITY,
410 QEMU_IFLA_MASTER,
411 QEMU_IFLA_WIRELESS,
412 QEMU_IFLA_PROTINFO,
413 QEMU_IFLA_TXQLEN,
414 QEMU_IFLA_MAP,
415 QEMU_IFLA_WEIGHT,
416 QEMU_IFLA_OPERSTATE,
417 QEMU_IFLA_LINKMODE,
418 QEMU_IFLA_LINKINFO,
419 QEMU_IFLA_NET_NS_PID,
420 QEMU_IFLA_IFALIAS,
421 QEMU_IFLA_NUM_VF,
422 QEMU_IFLA_VFINFO_LIST,
423 QEMU_IFLA_STATS64,
424 QEMU_IFLA_VF_PORTS,
425 QEMU_IFLA_PORT_SELF,
426 QEMU_IFLA_AF_SPEC,
427 QEMU_IFLA_GROUP,
428 QEMU_IFLA_NET_NS_FD,
429 QEMU_IFLA_EXT_MASK,
430 QEMU_IFLA_PROMISCUITY,
431 QEMU_IFLA_NUM_TX_QUEUES,
432 QEMU_IFLA_NUM_RX_QUEUES,
433 QEMU_IFLA_CARRIER,
434 QEMU_IFLA_PHYS_PORT_ID,
435 QEMU_IFLA_CARRIER_CHANGES,
436 QEMU_IFLA_PHYS_SWITCH_ID,
437 QEMU_IFLA_LINK_NETNSID,
438 QEMU_IFLA_PHYS_PORT_NAME,
439 QEMU_IFLA_PROTO_DOWN,
440 QEMU_IFLA_GSO_MAX_SEGS,
441 QEMU_IFLA_GSO_MAX_SIZE,
442 QEMU_IFLA_PAD,
443 QEMU_IFLA_XDP,
444 QEMU___IFLA_MAX
447 enum {
448 QEMU_IFLA_BRPORT_UNSPEC,
449 QEMU_IFLA_BRPORT_STATE,
450 QEMU_IFLA_BRPORT_PRIORITY,
451 QEMU_IFLA_BRPORT_COST,
452 QEMU_IFLA_BRPORT_MODE,
453 QEMU_IFLA_BRPORT_GUARD,
454 QEMU_IFLA_BRPORT_PROTECT,
455 QEMU_IFLA_BRPORT_FAST_LEAVE,
456 QEMU_IFLA_BRPORT_LEARNING,
457 QEMU_IFLA_BRPORT_UNICAST_FLOOD,
458 QEMU_IFLA_BRPORT_PROXYARP,
459 QEMU_IFLA_BRPORT_LEARNING_SYNC,
460 QEMU_IFLA_BRPORT_PROXYARP_WIFI,
461 QEMU_IFLA_BRPORT_ROOT_ID,
462 QEMU_IFLA_BRPORT_BRIDGE_ID,
463 QEMU_IFLA_BRPORT_DESIGNATED_PORT,
464 QEMU_IFLA_BRPORT_DESIGNATED_COST,
465 QEMU_IFLA_BRPORT_ID,
466 QEMU_IFLA_BRPORT_NO,
467 QEMU_IFLA_BRPORT_TOPOLOGY_CHANGE_ACK,
468 QEMU_IFLA_BRPORT_CONFIG_PENDING,
469 QEMU_IFLA_BRPORT_MESSAGE_AGE_TIMER,
470 QEMU_IFLA_BRPORT_FORWARD_DELAY_TIMER,
471 QEMU_IFLA_BRPORT_HOLD_TIMER,
472 QEMU_IFLA_BRPORT_FLUSH,
473 QEMU_IFLA_BRPORT_MULTICAST_ROUTER,
474 QEMU_IFLA_BRPORT_PAD,
475 QEMU___IFLA_BRPORT_MAX
478 enum {
479 QEMU_IFLA_INFO_UNSPEC,
480 QEMU_IFLA_INFO_KIND,
481 QEMU_IFLA_INFO_DATA,
482 QEMU_IFLA_INFO_XSTATS,
483 QEMU_IFLA_INFO_SLAVE_KIND,
484 QEMU_IFLA_INFO_SLAVE_DATA,
485 QEMU___IFLA_INFO_MAX,
488 enum {
489 QEMU_IFLA_INET_UNSPEC,
490 QEMU_IFLA_INET_CONF,
491 QEMU___IFLA_INET_MAX,
494 enum {
495 QEMU_IFLA_INET6_UNSPEC,
496 QEMU_IFLA_INET6_FLAGS,
497 QEMU_IFLA_INET6_CONF,
498 QEMU_IFLA_INET6_STATS,
499 QEMU_IFLA_INET6_MCAST,
500 QEMU_IFLA_INET6_CACHEINFO,
501 QEMU_IFLA_INET6_ICMP6STATS,
502 QEMU_IFLA_INET6_TOKEN,
503 QEMU_IFLA_INET6_ADDR_GEN_MODE,
504 QEMU___IFLA_INET6_MAX
507 typedef abi_long (*TargetFdDataFunc)(void *, size_t);
508 typedef abi_long (*TargetFdAddrFunc)(void *, abi_ulong, socklen_t);
509 typedef struct TargetFdTrans {
510 TargetFdDataFunc host_to_target_data;
511 TargetFdDataFunc target_to_host_data;
512 TargetFdAddrFunc target_to_host_addr;
513 } TargetFdTrans;
515 static TargetFdTrans **target_fd_trans;
517 static unsigned int target_fd_max;
519 static TargetFdDataFunc fd_trans_target_to_host_data(int fd)
521 if (fd >= 0 && fd < target_fd_max && target_fd_trans[fd]) {
522 return target_fd_trans[fd]->target_to_host_data;
524 return NULL;
527 static TargetFdDataFunc fd_trans_host_to_target_data(int fd)
529 if (fd >= 0 && fd < target_fd_max && target_fd_trans[fd]) {
530 return target_fd_trans[fd]->host_to_target_data;
532 return NULL;
535 static TargetFdAddrFunc fd_trans_target_to_host_addr(int fd)
537 if (fd >= 0 && fd < target_fd_max && target_fd_trans[fd]) {
538 return target_fd_trans[fd]->target_to_host_addr;
540 return NULL;
543 static void fd_trans_register(int fd, TargetFdTrans *trans)
545 unsigned int oldmax;
547 if (fd >= target_fd_max) {
548 oldmax = target_fd_max;
549 target_fd_max = ((fd >> 6) + 1) << 6; /* by slice of 64 entries */
550 target_fd_trans = g_renew(TargetFdTrans *,
551 target_fd_trans, target_fd_max);
552 memset((void *)(target_fd_trans + oldmax), 0,
553 (target_fd_max - oldmax) * sizeof(TargetFdTrans *));
555 target_fd_trans[fd] = trans;
558 static void fd_trans_unregister(int fd)
560 if (fd >= 0 && fd < target_fd_max) {
561 target_fd_trans[fd] = NULL;
565 static void fd_trans_dup(int oldfd, int newfd)
567 fd_trans_unregister(newfd);
568 if (oldfd < target_fd_max && target_fd_trans[oldfd]) {
569 fd_trans_register(newfd, target_fd_trans[oldfd]);
573 static int sys_getcwd1(char *buf, size_t size)
575 if (getcwd(buf, size) == NULL) {
576 /* getcwd() sets errno */
577 return (-1);
579 return strlen(buf)+1;
582 #ifdef TARGET_NR_utimensat
583 #if defined(__NR_utimensat)
584 #define __NR_sys_utimensat __NR_utimensat
585 _syscall4(int,sys_utimensat,int,dirfd,const char *,pathname,
586 const struct timespec *,tsp,int,flags)
587 #else
588 static int sys_utimensat(int dirfd, const char *pathname,
589 const struct timespec times[2], int flags)
591 errno = ENOSYS;
592 return -1;
594 #endif
595 #endif /* TARGET_NR_utimensat */
597 #ifdef TARGET_NR_renameat2
598 #if defined(__NR_renameat2)
599 #define __NR_sys_renameat2 __NR_renameat2
600 _syscall5(int, sys_renameat2, int, oldfd, const char *, old, int, newfd,
601 const char *, new, unsigned int, flags)
602 #else
603 static int sys_renameat2(int oldfd, const char *old,
604 int newfd, const char *new, int flags)
606 if (flags == 0) {
607 return renameat(oldfd, old, newfd, new);
609 errno = ENOSYS;
610 return -1;
612 #endif
613 #endif /* TARGET_NR_renameat2 */
615 #ifdef CONFIG_INOTIFY
616 #include <sys/inotify.h>
618 #if defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init)
619 static int sys_inotify_init(void)
621 return (inotify_init());
623 #endif
624 #if defined(TARGET_NR_inotify_add_watch) && defined(__NR_inotify_add_watch)
625 static int sys_inotify_add_watch(int fd,const char *pathname, int32_t mask)
627 return (inotify_add_watch(fd, pathname, mask));
629 #endif
630 #if defined(TARGET_NR_inotify_rm_watch) && defined(__NR_inotify_rm_watch)
631 static int sys_inotify_rm_watch(int fd, int32_t wd)
633 return (inotify_rm_watch(fd, wd));
635 #endif
636 #ifdef CONFIG_INOTIFY1
637 #if defined(TARGET_NR_inotify_init1) && defined(__NR_inotify_init1)
638 static int sys_inotify_init1(int flags)
640 return (inotify_init1(flags));
642 #endif
643 #endif
644 #else
645 /* Userspace can usually survive runtime without inotify */
646 #undef TARGET_NR_inotify_init
647 #undef TARGET_NR_inotify_init1
648 #undef TARGET_NR_inotify_add_watch
649 #undef TARGET_NR_inotify_rm_watch
650 #endif /* CONFIG_INOTIFY */
652 #if defined(TARGET_NR_prlimit64)
653 #ifndef __NR_prlimit64
654 # define __NR_prlimit64 -1
655 #endif
656 #define __NR_sys_prlimit64 __NR_prlimit64
657 /* The glibc rlimit structure may not be that used by the underlying syscall */
658 struct host_rlimit64 {
659 uint64_t rlim_cur;
660 uint64_t rlim_max;
662 _syscall4(int, sys_prlimit64, pid_t, pid, int, resource,
663 const struct host_rlimit64 *, new_limit,
664 struct host_rlimit64 *, old_limit)
665 #endif
668 #if defined(TARGET_NR_timer_create)
669 /* Maxiumum of 32 active POSIX timers allowed at any one time. */
670 static timer_t g_posix_timers[32] = { 0, } ;
672 static inline int next_free_host_timer(void)
674 int k ;
675 /* FIXME: Does finding the next free slot require a lock? */
676 for (k = 0; k < ARRAY_SIZE(g_posix_timers); k++) {
677 if (g_posix_timers[k] == 0) {
678 g_posix_timers[k] = (timer_t) 1;
679 return k;
682 return -1;
684 #endif
686 /* ARM EABI and MIPS expect 64bit types aligned even on pairs or registers */
687 #ifdef TARGET_ARM
688 static inline int regpairs_aligned(void *cpu_env, int num)
690 return ((((CPUARMState *)cpu_env)->eabi) == 1) ;
692 #elif defined(TARGET_MIPS) && (TARGET_ABI_BITS == 32)
693 static inline int regpairs_aligned(void *cpu_env, int num) { return 1; }
694 #elif defined(TARGET_PPC) && !defined(TARGET_PPC64)
695 /* SysV AVI for PPC32 expects 64bit parameters to be passed on odd/even pairs
696 * of registers which translates to the same as ARM/MIPS, because we start with
697 * r3 as arg1 */
698 static inline int regpairs_aligned(void *cpu_env, int num) { return 1; }
699 #elif defined(TARGET_SH4)
700 /* SH4 doesn't align register pairs, except for p{read,write}64 */
701 static inline int regpairs_aligned(void *cpu_env, int num)
703 switch (num) {
704 case TARGET_NR_pread64:
705 case TARGET_NR_pwrite64:
706 return 1;
708 default:
709 return 0;
712 #else
713 static inline int regpairs_aligned(void *cpu_env, int num) { return 0; }
714 #endif
716 #define ERRNO_TABLE_SIZE 1200
718 /* target_to_host_errno_table[] is initialized from
719 * host_to_target_errno_table[] in syscall_init(). */
720 static uint16_t target_to_host_errno_table[ERRNO_TABLE_SIZE] = {
724 * This list is the union of errno values overridden in asm-<arch>/errno.h
725 * minus the errnos that are not actually generic to all archs.
727 static uint16_t host_to_target_errno_table[ERRNO_TABLE_SIZE] = {
728 [EAGAIN] = TARGET_EAGAIN,
729 [EIDRM] = TARGET_EIDRM,
730 [ECHRNG] = TARGET_ECHRNG,
731 [EL2NSYNC] = TARGET_EL2NSYNC,
732 [EL3HLT] = TARGET_EL3HLT,
733 [EL3RST] = TARGET_EL3RST,
734 [ELNRNG] = TARGET_ELNRNG,
735 [EUNATCH] = TARGET_EUNATCH,
736 [ENOCSI] = TARGET_ENOCSI,
737 [EL2HLT] = TARGET_EL2HLT,
738 [EDEADLK] = TARGET_EDEADLK,
739 [ENOLCK] = TARGET_ENOLCK,
740 [EBADE] = TARGET_EBADE,
741 [EBADR] = TARGET_EBADR,
742 [EXFULL] = TARGET_EXFULL,
743 [ENOANO] = TARGET_ENOANO,
744 [EBADRQC] = TARGET_EBADRQC,
745 [EBADSLT] = TARGET_EBADSLT,
746 [EBFONT] = TARGET_EBFONT,
747 [ENOSTR] = TARGET_ENOSTR,
748 [ENODATA] = TARGET_ENODATA,
749 [ETIME] = TARGET_ETIME,
750 [ENOSR] = TARGET_ENOSR,
751 [ENONET] = TARGET_ENONET,
752 [ENOPKG] = TARGET_ENOPKG,
753 [EREMOTE] = TARGET_EREMOTE,
754 [ENOLINK] = TARGET_ENOLINK,
755 [EADV] = TARGET_EADV,
756 [ESRMNT] = TARGET_ESRMNT,
757 [ECOMM] = TARGET_ECOMM,
758 [EPROTO] = TARGET_EPROTO,
759 [EDOTDOT] = TARGET_EDOTDOT,
760 [EMULTIHOP] = TARGET_EMULTIHOP,
761 [EBADMSG] = TARGET_EBADMSG,
762 [ENAMETOOLONG] = TARGET_ENAMETOOLONG,
763 [EOVERFLOW] = TARGET_EOVERFLOW,
764 [ENOTUNIQ] = TARGET_ENOTUNIQ,
765 [EBADFD] = TARGET_EBADFD,
766 [EREMCHG] = TARGET_EREMCHG,
767 [ELIBACC] = TARGET_ELIBACC,
768 [ELIBBAD] = TARGET_ELIBBAD,
769 [ELIBSCN] = TARGET_ELIBSCN,
770 [ELIBMAX] = TARGET_ELIBMAX,
771 [ELIBEXEC] = TARGET_ELIBEXEC,
772 [EILSEQ] = TARGET_EILSEQ,
773 [ENOSYS] = TARGET_ENOSYS,
774 [ELOOP] = TARGET_ELOOP,
775 [ERESTART] = TARGET_ERESTART,
776 [ESTRPIPE] = TARGET_ESTRPIPE,
777 [ENOTEMPTY] = TARGET_ENOTEMPTY,
778 [EUSERS] = TARGET_EUSERS,
779 [ENOTSOCK] = TARGET_ENOTSOCK,
780 [EDESTADDRREQ] = TARGET_EDESTADDRREQ,
781 [EMSGSIZE] = TARGET_EMSGSIZE,
782 [EPROTOTYPE] = TARGET_EPROTOTYPE,
783 [ENOPROTOOPT] = TARGET_ENOPROTOOPT,
784 [EPROTONOSUPPORT] = TARGET_EPROTONOSUPPORT,
785 [ESOCKTNOSUPPORT] = TARGET_ESOCKTNOSUPPORT,
786 [EOPNOTSUPP] = TARGET_EOPNOTSUPP,
787 [EPFNOSUPPORT] = TARGET_EPFNOSUPPORT,
788 [EAFNOSUPPORT] = TARGET_EAFNOSUPPORT,
789 [EADDRINUSE] = TARGET_EADDRINUSE,
790 [EADDRNOTAVAIL] = TARGET_EADDRNOTAVAIL,
791 [ENETDOWN] = TARGET_ENETDOWN,
792 [ENETUNREACH] = TARGET_ENETUNREACH,
793 [ENETRESET] = TARGET_ENETRESET,
794 [ECONNABORTED] = TARGET_ECONNABORTED,
795 [ECONNRESET] = TARGET_ECONNRESET,
796 [ENOBUFS] = TARGET_ENOBUFS,
797 [EISCONN] = TARGET_EISCONN,
798 [ENOTCONN] = TARGET_ENOTCONN,
799 [EUCLEAN] = TARGET_EUCLEAN,
800 [ENOTNAM] = TARGET_ENOTNAM,
801 [ENAVAIL] = TARGET_ENAVAIL,
802 [EISNAM] = TARGET_EISNAM,
803 [EREMOTEIO] = TARGET_EREMOTEIO,
804 [EDQUOT] = TARGET_EDQUOT,
805 [ESHUTDOWN] = TARGET_ESHUTDOWN,
806 [ETOOMANYREFS] = TARGET_ETOOMANYREFS,
807 [ETIMEDOUT] = TARGET_ETIMEDOUT,
808 [ECONNREFUSED] = TARGET_ECONNREFUSED,
809 [EHOSTDOWN] = TARGET_EHOSTDOWN,
810 [EHOSTUNREACH] = TARGET_EHOSTUNREACH,
811 [EALREADY] = TARGET_EALREADY,
812 [EINPROGRESS] = TARGET_EINPROGRESS,
813 [ESTALE] = TARGET_ESTALE,
814 [ECANCELED] = TARGET_ECANCELED,
815 [ENOMEDIUM] = TARGET_ENOMEDIUM,
816 [EMEDIUMTYPE] = TARGET_EMEDIUMTYPE,
817 #ifdef ENOKEY
818 [ENOKEY] = TARGET_ENOKEY,
819 #endif
820 #ifdef EKEYEXPIRED
821 [EKEYEXPIRED] = TARGET_EKEYEXPIRED,
822 #endif
823 #ifdef EKEYREVOKED
824 [EKEYREVOKED] = TARGET_EKEYREVOKED,
825 #endif
826 #ifdef EKEYREJECTED
827 [EKEYREJECTED] = TARGET_EKEYREJECTED,
828 #endif
829 #ifdef EOWNERDEAD
830 [EOWNERDEAD] = TARGET_EOWNERDEAD,
831 #endif
832 #ifdef ENOTRECOVERABLE
833 [ENOTRECOVERABLE] = TARGET_ENOTRECOVERABLE,
834 #endif
835 #ifdef ENOMSG
836 [ENOMSG] = TARGET_ENOMSG,
837 #endif
838 #ifdef ERKFILL
839 [ERFKILL] = TARGET_ERFKILL,
840 #endif
841 #ifdef EHWPOISON
842 [EHWPOISON] = TARGET_EHWPOISON,
843 #endif
846 static inline int host_to_target_errno(int err)
848 if (err >= 0 && err < ERRNO_TABLE_SIZE &&
849 host_to_target_errno_table[err]) {
850 return host_to_target_errno_table[err];
852 return err;
855 static inline int target_to_host_errno(int err)
857 if (err >= 0 && err < ERRNO_TABLE_SIZE &&
858 target_to_host_errno_table[err]) {
859 return target_to_host_errno_table[err];
861 return err;
864 static inline abi_long get_errno(abi_long ret)
866 if (ret == -1)
867 return -host_to_target_errno(errno);
868 else
869 return ret;
872 static inline int is_error(abi_long ret)
874 return (abi_ulong)ret >= (abi_ulong)(-4096);
877 const char *target_strerror(int err)
879 if (err == TARGET_ERESTARTSYS) {
880 return "To be restarted";
882 if (err == TARGET_QEMU_ESIGRETURN) {
883 return "Successful exit from sigreturn";
886 if ((err >= ERRNO_TABLE_SIZE) || (err < 0)) {
887 return NULL;
889 return strerror(target_to_host_errno(err));
892 #define safe_syscall0(type, name) \
893 static type safe_##name(void) \
895 return safe_syscall(__NR_##name); \
898 #define safe_syscall1(type, name, type1, arg1) \
899 static type safe_##name(type1 arg1) \
901 return safe_syscall(__NR_##name, arg1); \
904 #define safe_syscall2(type, name, type1, arg1, type2, arg2) \
905 static type safe_##name(type1 arg1, type2 arg2) \
907 return safe_syscall(__NR_##name, arg1, arg2); \
910 #define safe_syscall3(type, name, type1, arg1, type2, arg2, type3, arg3) \
911 static type safe_##name(type1 arg1, type2 arg2, type3 arg3) \
913 return safe_syscall(__NR_##name, arg1, arg2, arg3); \
916 #define safe_syscall4(type, name, type1, arg1, type2, arg2, type3, arg3, \
917 type4, arg4) \
918 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4) \
920 return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4); \
923 #define safe_syscall5(type, name, type1, arg1, type2, arg2, type3, arg3, \
924 type4, arg4, type5, arg5) \
925 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, \
926 type5 arg5) \
928 return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5); \
931 #define safe_syscall6(type, name, type1, arg1, type2, arg2, type3, arg3, \
932 type4, arg4, type5, arg5, type6, arg6) \
933 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, \
934 type5 arg5, type6 arg6) \
936 return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6); \
939 safe_syscall3(ssize_t, read, int, fd, void *, buff, size_t, count)
940 safe_syscall3(ssize_t, write, int, fd, const void *, buff, size_t, count)
941 safe_syscall4(int, openat, int, dirfd, const char *, pathname, \
942 int, flags, mode_t, mode)
943 safe_syscall4(pid_t, wait4, pid_t, pid, int *, status, int, options, \
944 struct rusage *, rusage)
945 safe_syscall5(int, waitid, idtype_t, idtype, id_t, id, siginfo_t *, infop, \
946 int, options, struct rusage *, rusage)
947 safe_syscall3(int, execve, const char *, filename, char **, argv, char **, envp)
948 safe_syscall6(int, pselect6, int, nfds, fd_set *, readfds, fd_set *, writefds, \
949 fd_set *, exceptfds, struct timespec *, timeout, void *, sig)
950 safe_syscall5(int, ppoll, struct pollfd *, ufds, unsigned int, nfds,
951 struct timespec *, tsp, const sigset_t *, sigmask,
952 size_t, sigsetsize)
953 safe_syscall6(int, epoll_pwait, int, epfd, struct epoll_event *, events,
954 int, maxevents, int, timeout, const sigset_t *, sigmask,
955 size_t, sigsetsize)
956 safe_syscall6(int,futex,int *,uaddr,int,op,int,val, \
957 const struct timespec *,timeout,int *,uaddr2,int,val3)
958 safe_syscall2(int, rt_sigsuspend, sigset_t *, newset, size_t, sigsetsize)
959 safe_syscall2(int, kill, pid_t, pid, int, sig)
960 safe_syscall2(int, tkill, int, tid, int, sig)
961 safe_syscall3(int, tgkill, int, tgid, int, pid, int, sig)
962 safe_syscall3(ssize_t, readv, int, fd, const struct iovec *, iov, int, iovcnt)
963 safe_syscall3(ssize_t, writev, int, fd, const struct iovec *, iov, int, iovcnt)
964 safe_syscall5(ssize_t, preadv, int, fd, const struct iovec *, iov, int, iovcnt,
965 unsigned long, pos_l, unsigned long, pos_h)
966 safe_syscall5(ssize_t, pwritev, int, fd, const struct iovec *, iov, int, iovcnt,
967 unsigned long, pos_l, unsigned long, pos_h)
968 safe_syscall3(int, connect, int, fd, const struct sockaddr *, addr,
969 socklen_t, addrlen)
970 safe_syscall6(ssize_t, sendto, int, fd, const void *, buf, size_t, len,
971 int, flags, const struct sockaddr *, addr, socklen_t, addrlen)
972 safe_syscall6(ssize_t, recvfrom, int, fd, void *, buf, size_t, len,
973 int, flags, struct sockaddr *, addr, socklen_t *, addrlen)
974 safe_syscall3(ssize_t, sendmsg, int, fd, const struct msghdr *, msg, int, flags)
975 safe_syscall3(ssize_t, recvmsg, int, fd, struct msghdr *, msg, int, flags)
976 safe_syscall2(int, flock, int, fd, int, operation)
977 safe_syscall4(int, rt_sigtimedwait, const sigset_t *, these, siginfo_t *, uinfo,
978 const struct timespec *, uts, size_t, sigsetsize)
979 safe_syscall4(int, accept4, int, fd, struct sockaddr *, addr, socklen_t *, len,
980 int, flags)
981 safe_syscall2(int, nanosleep, const struct timespec *, req,
982 struct timespec *, rem)
983 #ifdef TARGET_NR_clock_nanosleep
984 safe_syscall4(int, clock_nanosleep, const clockid_t, clock, int, flags,
985 const struct timespec *, req, struct timespec *, rem)
986 #endif
987 #ifdef __NR_msgsnd
988 safe_syscall4(int, msgsnd, int, msgid, const void *, msgp, size_t, sz,
989 int, flags)
990 safe_syscall5(int, msgrcv, int, msgid, void *, msgp, size_t, sz,
991 long, msgtype, int, flags)
992 safe_syscall4(int, semtimedop, int, semid, struct sembuf *, tsops,
993 unsigned, nsops, const struct timespec *, timeout)
994 #else
995 /* This host kernel architecture uses a single ipc syscall; fake up
996 * wrappers for the sub-operations to hide this implementation detail.
997 * Annoyingly we can't include linux/ipc.h to get the constant definitions
998 * for the call parameter because some structs in there conflict with the
999 * sys/ipc.h ones. So we just define them here, and rely on them being
1000 * the same for all host architectures.
1002 #define Q_SEMTIMEDOP 4
1003 #define Q_MSGSND 11
1004 #define Q_MSGRCV 12
1005 #define Q_IPCCALL(VERSION, OP) ((VERSION) << 16 | (OP))
1007 safe_syscall6(int, ipc, int, call, long, first, long, second, long, third,
1008 void *, ptr, long, fifth)
1009 static int safe_msgsnd(int msgid, const void *msgp, size_t sz, int flags)
1011 return safe_ipc(Q_IPCCALL(0, Q_MSGSND), msgid, sz, flags, (void *)msgp, 0);
1013 static int safe_msgrcv(int msgid, void *msgp, size_t sz, long type, int flags)
1015 return safe_ipc(Q_IPCCALL(1, Q_MSGRCV), msgid, sz, flags, msgp, type);
1017 static int safe_semtimedop(int semid, struct sembuf *tsops, unsigned nsops,
1018 const struct timespec *timeout)
1020 return safe_ipc(Q_IPCCALL(0, Q_SEMTIMEDOP), semid, nsops, 0, tsops,
1021 (long)timeout);
1023 #endif
1024 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
1025 safe_syscall5(int, mq_timedsend, int, mqdes, const char *, msg_ptr,
1026 size_t, len, unsigned, prio, const struct timespec *, timeout)
1027 safe_syscall5(int, mq_timedreceive, int, mqdes, char *, msg_ptr,
1028 size_t, len, unsigned *, prio, const struct timespec *, timeout)
1029 #endif
1030 /* We do ioctl like this rather than via safe_syscall3 to preserve the
1031 * "third argument might be integer or pointer or not present" behaviour of
1032 * the libc function.
1034 #define safe_ioctl(...) safe_syscall(__NR_ioctl, __VA_ARGS__)
1035 /* Similarly for fcntl. Note that callers must always:
1036 * pass the F_GETLK64 etc constants rather than the unsuffixed F_GETLK
1037 * use the flock64 struct rather than unsuffixed flock
1038 * This will then work and use a 64-bit offset for both 32-bit and 64-bit hosts.
1040 #ifdef __NR_fcntl64
1041 #define safe_fcntl(...) safe_syscall(__NR_fcntl64, __VA_ARGS__)
1042 #else
1043 #define safe_fcntl(...) safe_syscall(__NR_fcntl, __VA_ARGS__)
1044 #endif
1046 static inline int host_to_target_sock_type(int host_type)
1048 int target_type;
1050 switch (host_type & 0xf /* SOCK_TYPE_MASK */) {
1051 case SOCK_DGRAM:
1052 target_type = TARGET_SOCK_DGRAM;
1053 break;
1054 case SOCK_STREAM:
1055 target_type = TARGET_SOCK_STREAM;
1056 break;
1057 default:
1058 target_type = host_type & 0xf /* SOCK_TYPE_MASK */;
1059 break;
1062 #if defined(SOCK_CLOEXEC)
1063 if (host_type & SOCK_CLOEXEC) {
1064 target_type |= TARGET_SOCK_CLOEXEC;
1066 #endif
1068 #if defined(SOCK_NONBLOCK)
1069 if (host_type & SOCK_NONBLOCK) {
1070 target_type |= TARGET_SOCK_NONBLOCK;
1072 #endif
1074 return target_type;
1077 static abi_ulong target_brk;
1078 static abi_ulong target_original_brk;
1079 static abi_ulong brk_page;
1081 void target_set_brk(abi_ulong new_brk)
1083 target_original_brk = target_brk = HOST_PAGE_ALIGN(new_brk);
1084 brk_page = HOST_PAGE_ALIGN(target_brk);
1087 //#define DEBUGF_BRK(message, args...) do { fprintf(stderr, (message), ## args); } while (0)
1088 #define DEBUGF_BRK(message, args...)
1090 /* do_brk() must return target values and target errnos. */
1091 abi_long do_brk(abi_ulong new_brk)
1093 abi_long mapped_addr;
1094 abi_ulong new_alloc_size;
1096 DEBUGF_BRK("do_brk(" TARGET_ABI_FMT_lx ") -> ", new_brk);
1098 if (!new_brk) {
1099 DEBUGF_BRK(TARGET_ABI_FMT_lx " (!new_brk)\n", target_brk);
1100 return target_brk;
1102 if (new_brk < target_original_brk) {
1103 DEBUGF_BRK(TARGET_ABI_FMT_lx " (new_brk < target_original_brk)\n",
1104 target_brk);
1105 return target_brk;
1108 /* If the new brk is less than the highest page reserved to the
1109 * target heap allocation, set it and we're almost done... */
1110 if (new_brk <= brk_page) {
1111 /* Heap contents are initialized to zero, as for anonymous
1112 * mapped pages. */
1113 if (new_brk > target_brk) {
1114 memset(g2h(target_brk), 0, new_brk - target_brk);
1116 target_brk = new_brk;
1117 DEBUGF_BRK(TARGET_ABI_FMT_lx " (new_brk <= brk_page)\n", target_brk);
1118 return target_brk;
1121 /* We need to allocate more memory after the brk... Note that
1122 * we don't use MAP_FIXED because that will map over the top of
1123 * any existing mapping (like the one with the host libc or qemu
1124 * itself); instead we treat "mapped but at wrong address" as
1125 * a failure and unmap again.
1127 new_alloc_size = HOST_PAGE_ALIGN(new_brk - brk_page);
1128 mapped_addr = get_errno(target_mmap(brk_page, new_alloc_size,
1129 PROT_READ|PROT_WRITE,
1130 MAP_ANON|MAP_PRIVATE, 0, 0));
1132 if (mapped_addr == brk_page) {
1133 /* Heap contents are initialized to zero, as for anonymous
1134 * mapped pages. Technically the new pages are already
1135 * initialized to zero since they *are* anonymous mapped
1136 * pages, however we have to take care with the contents that
1137 * come from the remaining part of the previous page: it may
1138 * contains garbage data due to a previous heap usage (grown
1139 * then shrunken). */
1140 memset(g2h(target_brk), 0, brk_page - target_brk);
1142 target_brk = new_brk;
1143 brk_page = HOST_PAGE_ALIGN(target_brk);
1144 DEBUGF_BRK(TARGET_ABI_FMT_lx " (mapped_addr == brk_page)\n",
1145 target_brk);
1146 return target_brk;
1147 } else if (mapped_addr != -1) {
1148 /* Mapped but at wrong address, meaning there wasn't actually
1149 * enough space for this brk.
1151 target_munmap(mapped_addr, new_alloc_size);
1152 mapped_addr = -1;
1153 DEBUGF_BRK(TARGET_ABI_FMT_lx " (mapped_addr != -1)\n", target_brk);
1155 else {
1156 DEBUGF_BRK(TARGET_ABI_FMT_lx " (otherwise)\n", target_brk);
1159 #if defined(TARGET_ALPHA)
1160 /* We (partially) emulate OSF/1 on Alpha, which requires we
1161 return a proper errno, not an unchanged brk value. */
1162 return -TARGET_ENOMEM;
1163 #endif
1164 /* For everything else, return the previous break. */
1165 return target_brk;
1168 static inline abi_long copy_from_user_fdset(fd_set *fds,
1169 abi_ulong target_fds_addr,
1170 int n)
1172 int i, nw, j, k;
1173 abi_ulong b, *target_fds;
1175 nw = DIV_ROUND_UP(n, TARGET_ABI_BITS);
1176 if (!(target_fds = lock_user(VERIFY_READ,
1177 target_fds_addr,
1178 sizeof(abi_ulong) * nw,
1179 1)))
1180 return -TARGET_EFAULT;
1182 FD_ZERO(fds);
1183 k = 0;
1184 for (i = 0; i < nw; i++) {
1185 /* grab the abi_ulong */
1186 __get_user(b, &target_fds[i]);
1187 for (j = 0; j < TARGET_ABI_BITS; j++) {
1188 /* check the bit inside the abi_ulong */
1189 if ((b >> j) & 1)
1190 FD_SET(k, fds);
1191 k++;
1195 unlock_user(target_fds, target_fds_addr, 0);
1197 return 0;
1200 static inline abi_ulong copy_from_user_fdset_ptr(fd_set *fds, fd_set **fds_ptr,
1201 abi_ulong target_fds_addr,
1202 int n)
1204 if (target_fds_addr) {
1205 if (copy_from_user_fdset(fds, target_fds_addr, n))
1206 return -TARGET_EFAULT;
1207 *fds_ptr = fds;
1208 } else {
1209 *fds_ptr = NULL;
1211 return 0;
1214 static inline abi_long copy_to_user_fdset(abi_ulong target_fds_addr,
1215 const fd_set *fds,
1216 int n)
1218 int i, nw, j, k;
1219 abi_long v;
1220 abi_ulong *target_fds;
1222 nw = DIV_ROUND_UP(n, TARGET_ABI_BITS);
1223 if (!(target_fds = lock_user(VERIFY_WRITE,
1224 target_fds_addr,
1225 sizeof(abi_ulong) * nw,
1226 0)))
1227 return -TARGET_EFAULT;
1229 k = 0;
1230 for (i = 0; i < nw; i++) {
1231 v = 0;
1232 for (j = 0; j < TARGET_ABI_BITS; j++) {
1233 v |= ((abi_ulong)(FD_ISSET(k, fds) != 0) << j);
1234 k++;
1236 __put_user(v, &target_fds[i]);
1239 unlock_user(target_fds, target_fds_addr, sizeof(abi_ulong) * nw);
1241 return 0;
1244 #if defined(__alpha__)
1245 #define HOST_HZ 1024
1246 #else
1247 #define HOST_HZ 100
1248 #endif
1250 static inline abi_long host_to_target_clock_t(long ticks)
1252 #if HOST_HZ == TARGET_HZ
1253 return ticks;
1254 #else
1255 return ((int64_t)ticks * TARGET_HZ) / HOST_HZ;
1256 #endif
1259 static inline abi_long host_to_target_rusage(abi_ulong target_addr,
1260 const struct rusage *rusage)
1262 struct target_rusage *target_rusage;
1264 if (!lock_user_struct(VERIFY_WRITE, target_rusage, target_addr, 0))
1265 return -TARGET_EFAULT;
1266 target_rusage->ru_utime.tv_sec = tswapal(rusage->ru_utime.tv_sec);
1267 target_rusage->ru_utime.tv_usec = tswapal(rusage->ru_utime.tv_usec);
1268 target_rusage->ru_stime.tv_sec = tswapal(rusage->ru_stime.tv_sec);
1269 target_rusage->ru_stime.tv_usec = tswapal(rusage->ru_stime.tv_usec);
1270 target_rusage->ru_maxrss = tswapal(rusage->ru_maxrss);
1271 target_rusage->ru_ixrss = tswapal(rusage->ru_ixrss);
1272 target_rusage->ru_idrss = tswapal(rusage->ru_idrss);
1273 target_rusage->ru_isrss = tswapal(rusage->ru_isrss);
1274 target_rusage->ru_minflt = tswapal(rusage->ru_minflt);
1275 target_rusage->ru_majflt = tswapal(rusage->ru_majflt);
1276 target_rusage->ru_nswap = tswapal(rusage->ru_nswap);
1277 target_rusage->ru_inblock = tswapal(rusage->ru_inblock);
1278 target_rusage->ru_oublock = tswapal(rusage->ru_oublock);
1279 target_rusage->ru_msgsnd = tswapal(rusage->ru_msgsnd);
1280 target_rusage->ru_msgrcv = tswapal(rusage->ru_msgrcv);
1281 target_rusage->ru_nsignals = tswapal(rusage->ru_nsignals);
1282 target_rusage->ru_nvcsw = tswapal(rusage->ru_nvcsw);
1283 target_rusage->ru_nivcsw = tswapal(rusage->ru_nivcsw);
1284 unlock_user_struct(target_rusage, target_addr, 1);
1286 return 0;
1289 static inline rlim_t target_to_host_rlim(abi_ulong target_rlim)
1291 abi_ulong target_rlim_swap;
1292 rlim_t result;
1294 target_rlim_swap = tswapal(target_rlim);
1295 if (target_rlim_swap == TARGET_RLIM_INFINITY)
1296 return RLIM_INFINITY;
1298 result = target_rlim_swap;
1299 if (target_rlim_swap != (rlim_t)result)
1300 return RLIM_INFINITY;
1302 return result;
1305 static inline abi_ulong host_to_target_rlim(rlim_t rlim)
1307 abi_ulong target_rlim_swap;
1308 abi_ulong result;
1310 if (rlim == RLIM_INFINITY || rlim != (abi_long)rlim)
1311 target_rlim_swap = TARGET_RLIM_INFINITY;
1312 else
1313 target_rlim_swap = rlim;
1314 result = tswapal(target_rlim_swap);
1316 return result;
1319 static inline int target_to_host_resource(int code)
1321 switch (code) {
1322 case TARGET_RLIMIT_AS:
1323 return RLIMIT_AS;
1324 case TARGET_RLIMIT_CORE:
1325 return RLIMIT_CORE;
1326 case TARGET_RLIMIT_CPU:
1327 return RLIMIT_CPU;
1328 case TARGET_RLIMIT_DATA:
1329 return RLIMIT_DATA;
1330 case TARGET_RLIMIT_FSIZE:
1331 return RLIMIT_FSIZE;
1332 case TARGET_RLIMIT_LOCKS:
1333 return RLIMIT_LOCKS;
1334 case TARGET_RLIMIT_MEMLOCK:
1335 return RLIMIT_MEMLOCK;
1336 case TARGET_RLIMIT_MSGQUEUE:
1337 return RLIMIT_MSGQUEUE;
1338 case TARGET_RLIMIT_NICE:
1339 return RLIMIT_NICE;
1340 case TARGET_RLIMIT_NOFILE:
1341 return RLIMIT_NOFILE;
1342 case TARGET_RLIMIT_NPROC:
1343 return RLIMIT_NPROC;
1344 case TARGET_RLIMIT_RSS:
1345 return RLIMIT_RSS;
1346 case TARGET_RLIMIT_RTPRIO:
1347 return RLIMIT_RTPRIO;
1348 case TARGET_RLIMIT_SIGPENDING:
1349 return RLIMIT_SIGPENDING;
1350 case TARGET_RLIMIT_STACK:
1351 return RLIMIT_STACK;
1352 default:
1353 return code;
1357 static inline abi_long copy_from_user_timeval(struct timeval *tv,
1358 abi_ulong target_tv_addr)
1360 struct target_timeval *target_tv;
1362 if (!lock_user_struct(VERIFY_READ, target_tv, target_tv_addr, 1))
1363 return -TARGET_EFAULT;
1365 __get_user(tv->tv_sec, &target_tv->tv_sec);
1366 __get_user(tv->tv_usec, &target_tv->tv_usec);
1368 unlock_user_struct(target_tv, target_tv_addr, 0);
1370 return 0;
1373 static inline abi_long copy_to_user_timeval(abi_ulong target_tv_addr,
1374 const struct timeval *tv)
1376 struct target_timeval *target_tv;
1378 if (!lock_user_struct(VERIFY_WRITE, target_tv, target_tv_addr, 0))
1379 return -TARGET_EFAULT;
1381 __put_user(tv->tv_sec, &target_tv->tv_sec);
1382 __put_user(tv->tv_usec, &target_tv->tv_usec);
1384 unlock_user_struct(target_tv, target_tv_addr, 1);
1386 return 0;
1389 static inline abi_long copy_from_user_timezone(struct timezone *tz,
1390 abi_ulong target_tz_addr)
1392 struct target_timezone *target_tz;
1394 if (!lock_user_struct(VERIFY_READ, target_tz, target_tz_addr, 1)) {
1395 return -TARGET_EFAULT;
1398 __get_user(tz->tz_minuteswest, &target_tz->tz_minuteswest);
1399 __get_user(tz->tz_dsttime, &target_tz->tz_dsttime);
1401 unlock_user_struct(target_tz, target_tz_addr, 0);
1403 return 0;
1406 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
1407 #include <mqueue.h>
1409 static inline abi_long copy_from_user_mq_attr(struct mq_attr *attr,
1410 abi_ulong target_mq_attr_addr)
1412 struct target_mq_attr *target_mq_attr;
1414 if (!lock_user_struct(VERIFY_READ, target_mq_attr,
1415 target_mq_attr_addr, 1))
1416 return -TARGET_EFAULT;
1418 __get_user(attr->mq_flags, &target_mq_attr->mq_flags);
1419 __get_user(attr->mq_maxmsg, &target_mq_attr->mq_maxmsg);
1420 __get_user(attr->mq_msgsize, &target_mq_attr->mq_msgsize);
1421 __get_user(attr->mq_curmsgs, &target_mq_attr->mq_curmsgs);
1423 unlock_user_struct(target_mq_attr, target_mq_attr_addr, 0);
1425 return 0;
1428 static inline abi_long copy_to_user_mq_attr(abi_ulong target_mq_attr_addr,
1429 const struct mq_attr *attr)
1431 struct target_mq_attr *target_mq_attr;
1433 if (!lock_user_struct(VERIFY_WRITE, target_mq_attr,
1434 target_mq_attr_addr, 0))
1435 return -TARGET_EFAULT;
1437 __put_user(attr->mq_flags, &target_mq_attr->mq_flags);
1438 __put_user(attr->mq_maxmsg, &target_mq_attr->mq_maxmsg);
1439 __put_user(attr->mq_msgsize, &target_mq_attr->mq_msgsize);
1440 __put_user(attr->mq_curmsgs, &target_mq_attr->mq_curmsgs);
1442 unlock_user_struct(target_mq_attr, target_mq_attr_addr, 1);
1444 return 0;
1446 #endif
1448 #if defined(TARGET_NR_select) || defined(TARGET_NR__newselect)
1449 /* do_select() must return target values and target errnos. */
1450 static abi_long do_select(int n,
1451 abi_ulong rfd_addr, abi_ulong wfd_addr,
1452 abi_ulong efd_addr, abi_ulong target_tv_addr)
1454 fd_set rfds, wfds, efds;
1455 fd_set *rfds_ptr, *wfds_ptr, *efds_ptr;
1456 struct timeval tv;
1457 struct timespec ts, *ts_ptr;
1458 abi_long ret;
1460 ret = copy_from_user_fdset_ptr(&rfds, &rfds_ptr, rfd_addr, n);
1461 if (ret) {
1462 return ret;
1464 ret = copy_from_user_fdset_ptr(&wfds, &wfds_ptr, wfd_addr, n);
1465 if (ret) {
1466 return ret;
1468 ret = copy_from_user_fdset_ptr(&efds, &efds_ptr, efd_addr, n);
1469 if (ret) {
1470 return ret;
1473 if (target_tv_addr) {
1474 if (copy_from_user_timeval(&tv, target_tv_addr))
1475 return -TARGET_EFAULT;
1476 ts.tv_sec = tv.tv_sec;
1477 ts.tv_nsec = tv.tv_usec * 1000;
1478 ts_ptr = &ts;
1479 } else {
1480 ts_ptr = NULL;
1483 ret = get_errno(safe_pselect6(n, rfds_ptr, wfds_ptr, efds_ptr,
1484 ts_ptr, NULL));
1486 if (!is_error(ret)) {
1487 if (rfd_addr && copy_to_user_fdset(rfd_addr, &rfds, n))
1488 return -TARGET_EFAULT;
1489 if (wfd_addr && copy_to_user_fdset(wfd_addr, &wfds, n))
1490 return -TARGET_EFAULT;
1491 if (efd_addr && copy_to_user_fdset(efd_addr, &efds, n))
1492 return -TARGET_EFAULT;
1494 if (target_tv_addr) {
1495 tv.tv_sec = ts.tv_sec;
1496 tv.tv_usec = ts.tv_nsec / 1000;
1497 if (copy_to_user_timeval(target_tv_addr, &tv)) {
1498 return -TARGET_EFAULT;
1503 return ret;
1506 #if defined(TARGET_WANT_OLD_SYS_SELECT)
1507 static abi_long do_old_select(abi_ulong arg1)
1509 struct target_sel_arg_struct *sel;
1510 abi_ulong inp, outp, exp, tvp;
1511 long nsel;
1513 if (!lock_user_struct(VERIFY_READ, sel, arg1, 1)) {
1514 return -TARGET_EFAULT;
1517 nsel = tswapal(sel->n);
1518 inp = tswapal(sel->inp);
1519 outp = tswapal(sel->outp);
1520 exp = tswapal(sel->exp);
1521 tvp = tswapal(sel->tvp);
1523 unlock_user_struct(sel, arg1, 0);
1525 return do_select(nsel, inp, outp, exp, tvp);
1527 #endif
1528 #endif
1530 static abi_long do_pipe2(int host_pipe[], int flags)
1532 #ifdef CONFIG_PIPE2
1533 return pipe2(host_pipe, flags);
1534 #else
1535 return -ENOSYS;
1536 #endif
1539 static abi_long do_pipe(void *cpu_env, abi_ulong pipedes,
1540 int flags, int is_pipe2)
1542 int host_pipe[2];
1543 abi_long ret;
1544 ret = flags ? do_pipe2(host_pipe, flags) : pipe(host_pipe);
1546 if (is_error(ret))
1547 return get_errno(ret);
1549 /* Several targets have special calling conventions for the original
1550 pipe syscall, but didn't replicate this into the pipe2 syscall. */
1551 if (!is_pipe2) {
1552 #if defined(TARGET_ALPHA)
1553 ((CPUAlphaState *)cpu_env)->ir[IR_A4] = host_pipe[1];
1554 return host_pipe[0];
1555 #elif defined(TARGET_MIPS)
1556 ((CPUMIPSState*)cpu_env)->active_tc.gpr[3] = host_pipe[1];
1557 return host_pipe[0];
1558 #elif defined(TARGET_SH4)
1559 ((CPUSH4State*)cpu_env)->gregs[1] = host_pipe[1];
1560 return host_pipe[0];
1561 #elif defined(TARGET_SPARC)
1562 ((CPUSPARCState*)cpu_env)->regwptr[1] = host_pipe[1];
1563 return host_pipe[0];
1564 #endif
1567 if (put_user_s32(host_pipe[0], pipedes)
1568 || put_user_s32(host_pipe[1], pipedes + sizeof(host_pipe[0])))
1569 return -TARGET_EFAULT;
1570 return get_errno(ret);
1573 static inline abi_long target_to_host_ip_mreq(struct ip_mreqn *mreqn,
1574 abi_ulong target_addr,
1575 socklen_t len)
1577 struct target_ip_mreqn *target_smreqn;
1579 target_smreqn = lock_user(VERIFY_READ, target_addr, len, 1);
1580 if (!target_smreqn)
1581 return -TARGET_EFAULT;
1582 mreqn->imr_multiaddr.s_addr = target_smreqn->imr_multiaddr.s_addr;
1583 mreqn->imr_address.s_addr = target_smreqn->imr_address.s_addr;
1584 if (len == sizeof(struct target_ip_mreqn))
1585 mreqn->imr_ifindex = tswapal(target_smreqn->imr_ifindex);
1586 unlock_user(target_smreqn, target_addr, 0);
1588 return 0;
1591 static inline abi_long target_to_host_sockaddr(int fd, struct sockaddr *addr,
1592 abi_ulong target_addr,
1593 socklen_t len)
1595 const socklen_t unix_maxlen = sizeof (struct sockaddr_un);
1596 sa_family_t sa_family;
1597 struct target_sockaddr *target_saddr;
1599 if (fd_trans_target_to_host_addr(fd)) {
1600 return fd_trans_target_to_host_addr(fd)(addr, target_addr, len);
1603 target_saddr = lock_user(VERIFY_READ, target_addr, len, 1);
1604 if (!target_saddr)
1605 return -TARGET_EFAULT;
1607 sa_family = tswap16(target_saddr->sa_family);
1609 /* Oops. The caller might send a incomplete sun_path; sun_path
1610 * must be terminated by \0 (see the manual page), but
1611 * unfortunately it is quite common to specify sockaddr_un
1612 * length as "strlen(x->sun_path)" while it should be
1613 * "strlen(...) + 1". We'll fix that here if needed.
1614 * Linux kernel has a similar feature.
1617 if (sa_family == AF_UNIX) {
1618 if (len < unix_maxlen && len > 0) {
1619 char *cp = (char*)target_saddr;
1621 if ( cp[len-1] && !cp[len] )
1622 len++;
1624 if (len > unix_maxlen)
1625 len = unix_maxlen;
1628 memcpy(addr, target_saddr, len);
1629 addr->sa_family = sa_family;
1630 if (sa_family == AF_NETLINK) {
1631 struct sockaddr_nl *nladdr;
1633 nladdr = (struct sockaddr_nl *)addr;
1634 nladdr->nl_pid = tswap32(nladdr->nl_pid);
1635 nladdr->nl_groups = tswap32(nladdr->nl_groups);
1636 } else if (sa_family == AF_PACKET) {
1637 struct target_sockaddr_ll *lladdr;
1639 lladdr = (struct target_sockaddr_ll *)addr;
1640 lladdr->sll_ifindex = tswap32(lladdr->sll_ifindex);
1641 lladdr->sll_hatype = tswap16(lladdr->sll_hatype);
1643 unlock_user(target_saddr, target_addr, 0);
1645 return 0;
1648 static inline abi_long host_to_target_sockaddr(abi_ulong target_addr,
1649 struct sockaddr *addr,
1650 socklen_t len)
1652 struct target_sockaddr *target_saddr;
1654 if (len == 0) {
1655 return 0;
1657 assert(addr);
1659 target_saddr = lock_user(VERIFY_WRITE, target_addr, len, 0);
1660 if (!target_saddr)
1661 return -TARGET_EFAULT;
1662 memcpy(target_saddr, addr, len);
1663 if (len >= offsetof(struct target_sockaddr, sa_family) +
1664 sizeof(target_saddr->sa_family)) {
1665 target_saddr->sa_family = tswap16(addr->sa_family);
1667 if (addr->sa_family == AF_NETLINK && len >= sizeof(struct sockaddr_nl)) {
1668 struct sockaddr_nl *target_nl = (struct sockaddr_nl *)target_saddr;
1669 target_nl->nl_pid = tswap32(target_nl->nl_pid);
1670 target_nl->nl_groups = tswap32(target_nl->nl_groups);
1671 } else if (addr->sa_family == AF_PACKET) {
1672 struct sockaddr_ll *target_ll = (struct sockaddr_ll *)target_saddr;
1673 target_ll->sll_ifindex = tswap32(target_ll->sll_ifindex);
1674 target_ll->sll_hatype = tswap16(target_ll->sll_hatype);
1675 } else if (addr->sa_family == AF_INET6 &&
1676 len >= sizeof(struct target_sockaddr_in6)) {
1677 struct target_sockaddr_in6 *target_in6 =
1678 (struct target_sockaddr_in6 *)target_saddr;
1679 target_in6->sin6_scope_id = tswap16(target_in6->sin6_scope_id);
1681 unlock_user(target_saddr, target_addr, len);
1683 return 0;
1686 static inline abi_long target_to_host_cmsg(struct msghdr *msgh,
1687 struct target_msghdr *target_msgh)
1689 struct cmsghdr *cmsg = CMSG_FIRSTHDR(msgh);
1690 abi_long msg_controllen;
1691 abi_ulong target_cmsg_addr;
1692 struct target_cmsghdr *target_cmsg, *target_cmsg_start;
1693 socklen_t space = 0;
1695 msg_controllen = tswapal(target_msgh->msg_controllen);
1696 if (msg_controllen < sizeof (struct target_cmsghdr))
1697 goto the_end;
1698 target_cmsg_addr = tswapal(target_msgh->msg_control);
1699 target_cmsg = lock_user(VERIFY_READ, target_cmsg_addr, msg_controllen, 1);
1700 target_cmsg_start = target_cmsg;
1701 if (!target_cmsg)
1702 return -TARGET_EFAULT;
1704 while (cmsg && target_cmsg) {
1705 void *data = CMSG_DATA(cmsg);
1706 void *target_data = TARGET_CMSG_DATA(target_cmsg);
1708 int len = tswapal(target_cmsg->cmsg_len)
1709 - sizeof(struct target_cmsghdr);
1711 space += CMSG_SPACE(len);
1712 if (space > msgh->msg_controllen) {
1713 space -= CMSG_SPACE(len);
1714 /* This is a QEMU bug, since we allocated the payload
1715 * area ourselves (unlike overflow in host-to-target
1716 * conversion, which is just the guest giving us a buffer
1717 * that's too small). It can't happen for the payload types
1718 * we currently support; if it becomes an issue in future
1719 * we would need to improve our allocation strategy to
1720 * something more intelligent than "twice the size of the
1721 * target buffer we're reading from".
1723 gemu_log("Host cmsg overflow\n");
1724 break;
1727 if (tswap32(target_cmsg->cmsg_level) == TARGET_SOL_SOCKET) {
1728 cmsg->cmsg_level = SOL_SOCKET;
1729 } else {
1730 cmsg->cmsg_level = tswap32(target_cmsg->cmsg_level);
1732 cmsg->cmsg_type = tswap32(target_cmsg->cmsg_type);
1733 cmsg->cmsg_len = CMSG_LEN(len);
1735 if (cmsg->cmsg_level == SOL_SOCKET && cmsg->cmsg_type == SCM_RIGHTS) {
1736 int *fd = (int *)data;
1737 int *target_fd = (int *)target_data;
1738 int i, numfds = len / sizeof(int);
1740 for (i = 0; i < numfds; i++) {
1741 __get_user(fd[i], target_fd + i);
1743 } else if (cmsg->cmsg_level == SOL_SOCKET
1744 && cmsg->cmsg_type == SCM_CREDENTIALS) {
1745 struct ucred *cred = (struct ucred *)data;
1746 struct target_ucred *target_cred =
1747 (struct target_ucred *)target_data;
1749 __get_user(cred->pid, &target_cred->pid);
1750 __get_user(cred->uid, &target_cred->uid);
1751 __get_user(cred->gid, &target_cred->gid);
1752 } else {
1753 gemu_log("Unsupported ancillary data: %d/%d\n",
1754 cmsg->cmsg_level, cmsg->cmsg_type);
1755 memcpy(data, target_data, len);
1758 cmsg = CMSG_NXTHDR(msgh, cmsg);
1759 target_cmsg = TARGET_CMSG_NXTHDR(target_msgh, target_cmsg,
1760 target_cmsg_start);
1762 unlock_user(target_cmsg, target_cmsg_addr, 0);
1763 the_end:
1764 msgh->msg_controllen = space;
1765 return 0;
1768 static inline abi_long host_to_target_cmsg(struct target_msghdr *target_msgh,
1769 struct msghdr *msgh)
1771 struct cmsghdr *cmsg = CMSG_FIRSTHDR(msgh);
1772 abi_long msg_controllen;
1773 abi_ulong target_cmsg_addr;
1774 struct target_cmsghdr *target_cmsg, *target_cmsg_start;
1775 socklen_t space = 0;
1777 msg_controllen = tswapal(target_msgh->msg_controllen);
1778 if (msg_controllen < sizeof (struct target_cmsghdr))
1779 goto the_end;
1780 target_cmsg_addr = tswapal(target_msgh->msg_control);
1781 target_cmsg = lock_user(VERIFY_WRITE, target_cmsg_addr, msg_controllen, 0);
1782 target_cmsg_start = target_cmsg;
1783 if (!target_cmsg)
1784 return -TARGET_EFAULT;
1786 while (cmsg && target_cmsg) {
1787 void *data = CMSG_DATA(cmsg);
1788 void *target_data = TARGET_CMSG_DATA(target_cmsg);
1790 int len = cmsg->cmsg_len - sizeof(struct cmsghdr);
1791 int tgt_len, tgt_space;
1793 /* We never copy a half-header but may copy half-data;
1794 * this is Linux's behaviour in put_cmsg(). Note that
1795 * truncation here is a guest problem (which we report
1796 * to the guest via the CTRUNC bit), unlike truncation
1797 * in target_to_host_cmsg, which is a QEMU bug.
1799 if (msg_controllen < sizeof(struct target_cmsghdr)) {
1800 target_msgh->msg_flags |= tswap32(MSG_CTRUNC);
1801 break;
1804 if (cmsg->cmsg_level == SOL_SOCKET) {
1805 target_cmsg->cmsg_level = tswap32(TARGET_SOL_SOCKET);
1806 } else {
1807 target_cmsg->cmsg_level = tswap32(cmsg->cmsg_level);
1809 target_cmsg->cmsg_type = tswap32(cmsg->cmsg_type);
1811 /* Payload types which need a different size of payload on
1812 * the target must adjust tgt_len here.
1814 switch (cmsg->cmsg_level) {
1815 case SOL_SOCKET:
1816 switch (cmsg->cmsg_type) {
1817 case SO_TIMESTAMP:
1818 tgt_len = sizeof(struct target_timeval);
1819 break;
1820 default:
1821 break;
1823 default:
1824 tgt_len = len;
1825 break;
1828 if (msg_controllen < TARGET_CMSG_LEN(tgt_len)) {
1829 target_msgh->msg_flags |= tswap32(MSG_CTRUNC);
1830 tgt_len = msg_controllen - sizeof(struct target_cmsghdr);
1833 /* We must now copy-and-convert len bytes of payload
1834 * into tgt_len bytes of destination space. Bear in mind
1835 * that in both source and destination we may be dealing
1836 * with a truncated value!
1838 switch (cmsg->cmsg_level) {
1839 case SOL_SOCKET:
1840 switch (cmsg->cmsg_type) {
1841 case SCM_RIGHTS:
1843 int *fd = (int *)data;
1844 int *target_fd = (int *)target_data;
1845 int i, numfds = tgt_len / sizeof(int);
1847 for (i = 0; i < numfds; i++) {
1848 __put_user(fd[i], target_fd + i);
1850 break;
1852 case SO_TIMESTAMP:
1854 struct timeval *tv = (struct timeval *)data;
1855 struct target_timeval *target_tv =
1856 (struct target_timeval *)target_data;
1858 if (len != sizeof(struct timeval) ||
1859 tgt_len != sizeof(struct target_timeval)) {
1860 goto unimplemented;
1863 /* copy struct timeval to target */
1864 __put_user(tv->tv_sec, &target_tv->tv_sec);
1865 __put_user(tv->tv_usec, &target_tv->tv_usec);
1866 break;
1868 case SCM_CREDENTIALS:
1870 struct ucred *cred = (struct ucred *)data;
1871 struct target_ucred *target_cred =
1872 (struct target_ucred *)target_data;
1874 __put_user(cred->pid, &target_cred->pid);
1875 __put_user(cred->uid, &target_cred->uid);
1876 __put_user(cred->gid, &target_cred->gid);
1877 break;
1879 default:
1880 goto unimplemented;
1882 break;
1884 case SOL_IP:
1885 switch (cmsg->cmsg_type) {
1886 case IP_TTL:
1888 uint32_t *v = (uint32_t *)data;
1889 uint32_t *t_int = (uint32_t *)target_data;
1891 if (len != sizeof(uint32_t) ||
1892 tgt_len != sizeof(uint32_t)) {
1893 goto unimplemented;
1895 __put_user(*v, t_int);
1896 break;
1898 case IP_RECVERR:
1900 struct errhdr_t {
1901 struct sock_extended_err ee;
1902 struct sockaddr_in offender;
1904 struct errhdr_t *errh = (struct errhdr_t *)data;
1905 struct errhdr_t *target_errh =
1906 (struct errhdr_t *)target_data;
1908 if (len != sizeof(struct errhdr_t) ||
1909 tgt_len != sizeof(struct errhdr_t)) {
1910 goto unimplemented;
1912 __put_user(errh->ee.ee_errno, &target_errh->ee.ee_errno);
1913 __put_user(errh->ee.ee_origin, &target_errh->ee.ee_origin);
1914 __put_user(errh->ee.ee_type, &target_errh->ee.ee_type);
1915 __put_user(errh->ee.ee_code, &target_errh->ee.ee_code);
1916 __put_user(errh->ee.ee_pad, &target_errh->ee.ee_pad);
1917 __put_user(errh->ee.ee_info, &target_errh->ee.ee_info);
1918 __put_user(errh->ee.ee_data, &target_errh->ee.ee_data);
1919 host_to_target_sockaddr((unsigned long) &target_errh->offender,
1920 (void *) &errh->offender, sizeof(errh->offender));
1921 break;
1923 default:
1924 goto unimplemented;
1926 break;
1928 case SOL_IPV6:
1929 switch (cmsg->cmsg_type) {
1930 case IPV6_HOPLIMIT:
1932 uint32_t *v = (uint32_t *)data;
1933 uint32_t *t_int = (uint32_t *)target_data;
1935 if (len != sizeof(uint32_t) ||
1936 tgt_len != sizeof(uint32_t)) {
1937 goto unimplemented;
1939 __put_user(*v, t_int);
1940 break;
1942 case IPV6_RECVERR:
1944 struct errhdr6_t {
1945 struct sock_extended_err ee;
1946 struct sockaddr_in6 offender;
1948 struct errhdr6_t *errh = (struct errhdr6_t *)data;
1949 struct errhdr6_t *target_errh =
1950 (struct errhdr6_t *)target_data;
1952 if (len != sizeof(struct errhdr6_t) ||
1953 tgt_len != sizeof(struct errhdr6_t)) {
1954 goto unimplemented;
1956 __put_user(errh->ee.ee_errno, &target_errh->ee.ee_errno);
1957 __put_user(errh->ee.ee_origin, &target_errh->ee.ee_origin);
1958 __put_user(errh->ee.ee_type, &target_errh->ee.ee_type);
1959 __put_user(errh->ee.ee_code, &target_errh->ee.ee_code);
1960 __put_user(errh->ee.ee_pad, &target_errh->ee.ee_pad);
1961 __put_user(errh->ee.ee_info, &target_errh->ee.ee_info);
1962 __put_user(errh->ee.ee_data, &target_errh->ee.ee_data);
1963 host_to_target_sockaddr((unsigned long) &target_errh->offender,
1964 (void *) &errh->offender, sizeof(errh->offender));
1965 break;
1967 default:
1968 goto unimplemented;
1970 break;
1972 default:
1973 unimplemented:
1974 gemu_log("Unsupported ancillary data: %d/%d\n",
1975 cmsg->cmsg_level, cmsg->cmsg_type);
1976 memcpy(target_data, data, MIN(len, tgt_len));
1977 if (tgt_len > len) {
1978 memset(target_data + len, 0, tgt_len - len);
1982 target_cmsg->cmsg_len = tswapal(TARGET_CMSG_LEN(tgt_len));
1983 tgt_space = TARGET_CMSG_SPACE(tgt_len);
1984 if (msg_controllen < tgt_space) {
1985 tgt_space = msg_controllen;
1987 msg_controllen -= tgt_space;
1988 space += tgt_space;
1989 cmsg = CMSG_NXTHDR(msgh, cmsg);
1990 target_cmsg = TARGET_CMSG_NXTHDR(target_msgh, target_cmsg,
1991 target_cmsg_start);
1993 unlock_user(target_cmsg, target_cmsg_addr, space);
1994 the_end:
1995 target_msgh->msg_controllen = tswapal(space);
1996 return 0;
1999 static void tswap_nlmsghdr(struct nlmsghdr *nlh)
2001 nlh->nlmsg_len = tswap32(nlh->nlmsg_len);
2002 nlh->nlmsg_type = tswap16(nlh->nlmsg_type);
2003 nlh->nlmsg_flags = tswap16(nlh->nlmsg_flags);
2004 nlh->nlmsg_seq = tswap32(nlh->nlmsg_seq);
2005 nlh->nlmsg_pid = tswap32(nlh->nlmsg_pid);
2008 static abi_long host_to_target_for_each_nlmsg(struct nlmsghdr *nlh,
2009 size_t len,
2010 abi_long (*host_to_target_nlmsg)
2011 (struct nlmsghdr *))
2013 uint32_t nlmsg_len;
2014 abi_long ret;
2016 while (len > sizeof(struct nlmsghdr)) {
2018 nlmsg_len = nlh->nlmsg_len;
2019 if (nlmsg_len < sizeof(struct nlmsghdr) ||
2020 nlmsg_len > len) {
2021 break;
2024 switch (nlh->nlmsg_type) {
2025 case NLMSG_DONE:
2026 tswap_nlmsghdr(nlh);
2027 return 0;
2028 case NLMSG_NOOP:
2029 break;
2030 case NLMSG_ERROR:
2032 struct nlmsgerr *e = NLMSG_DATA(nlh);
2033 e->error = tswap32(e->error);
2034 tswap_nlmsghdr(&e->msg);
2035 tswap_nlmsghdr(nlh);
2036 return 0;
2038 default:
2039 ret = host_to_target_nlmsg(nlh);
2040 if (ret < 0) {
2041 tswap_nlmsghdr(nlh);
2042 return ret;
2044 break;
2046 tswap_nlmsghdr(nlh);
2047 len -= NLMSG_ALIGN(nlmsg_len);
2048 nlh = (struct nlmsghdr *)(((char*)nlh) + NLMSG_ALIGN(nlmsg_len));
2050 return 0;
2053 static abi_long target_to_host_for_each_nlmsg(struct nlmsghdr *nlh,
2054 size_t len,
2055 abi_long (*target_to_host_nlmsg)
2056 (struct nlmsghdr *))
2058 int ret;
2060 while (len > sizeof(struct nlmsghdr)) {
2061 if (tswap32(nlh->nlmsg_len) < sizeof(struct nlmsghdr) ||
2062 tswap32(nlh->nlmsg_len) > len) {
2063 break;
2065 tswap_nlmsghdr(nlh);
2066 switch (nlh->nlmsg_type) {
2067 case NLMSG_DONE:
2068 return 0;
2069 case NLMSG_NOOP:
2070 break;
2071 case NLMSG_ERROR:
2073 struct nlmsgerr *e = NLMSG_DATA(nlh);
2074 e->error = tswap32(e->error);
2075 tswap_nlmsghdr(&e->msg);
2076 return 0;
2078 default:
2079 ret = target_to_host_nlmsg(nlh);
2080 if (ret < 0) {
2081 return ret;
2084 len -= NLMSG_ALIGN(nlh->nlmsg_len);
2085 nlh = (struct nlmsghdr *)(((char *)nlh) + NLMSG_ALIGN(nlh->nlmsg_len));
2087 return 0;
2090 #ifdef CONFIG_RTNETLINK
2091 static abi_long host_to_target_for_each_nlattr(struct nlattr *nlattr,
2092 size_t len, void *context,
2093 abi_long (*host_to_target_nlattr)
2094 (struct nlattr *,
2095 void *context))
2097 unsigned short nla_len;
2098 abi_long ret;
2100 while (len > sizeof(struct nlattr)) {
2101 nla_len = nlattr->nla_len;
2102 if (nla_len < sizeof(struct nlattr) ||
2103 nla_len > len) {
2104 break;
2106 ret = host_to_target_nlattr(nlattr, context);
2107 nlattr->nla_len = tswap16(nlattr->nla_len);
2108 nlattr->nla_type = tswap16(nlattr->nla_type);
2109 if (ret < 0) {
2110 return ret;
2112 len -= NLA_ALIGN(nla_len);
2113 nlattr = (struct nlattr *)(((char *)nlattr) + NLA_ALIGN(nla_len));
2115 return 0;
2118 static abi_long host_to_target_for_each_rtattr(struct rtattr *rtattr,
2119 size_t len,
2120 abi_long (*host_to_target_rtattr)
2121 (struct rtattr *))
2123 unsigned short rta_len;
2124 abi_long ret;
2126 while (len > sizeof(struct rtattr)) {
2127 rta_len = rtattr->rta_len;
2128 if (rta_len < sizeof(struct rtattr) ||
2129 rta_len > len) {
2130 break;
2132 ret = host_to_target_rtattr(rtattr);
2133 rtattr->rta_len = tswap16(rtattr->rta_len);
2134 rtattr->rta_type = tswap16(rtattr->rta_type);
2135 if (ret < 0) {
2136 return ret;
2138 len -= RTA_ALIGN(rta_len);
2139 rtattr = (struct rtattr *)(((char *)rtattr) + RTA_ALIGN(rta_len));
2141 return 0;
2144 #define NLA_DATA(nla) ((void *)((char *)(nla)) + NLA_HDRLEN)
2146 static abi_long host_to_target_data_bridge_nlattr(struct nlattr *nlattr,
2147 void *context)
2149 uint16_t *u16;
2150 uint32_t *u32;
2151 uint64_t *u64;
2153 switch (nlattr->nla_type) {
2154 /* no data */
2155 case QEMU_IFLA_BR_FDB_FLUSH:
2156 break;
2157 /* binary */
2158 case QEMU_IFLA_BR_GROUP_ADDR:
2159 break;
2160 /* uint8_t */
2161 case QEMU_IFLA_BR_VLAN_FILTERING:
2162 case QEMU_IFLA_BR_TOPOLOGY_CHANGE:
2163 case QEMU_IFLA_BR_TOPOLOGY_CHANGE_DETECTED:
2164 case QEMU_IFLA_BR_MCAST_ROUTER:
2165 case QEMU_IFLA_BR_MCAST_SNOOPING:
2166 case QEMU_IFLA_BR_MCAST_QUERY_USE_IFADDR:
2167 case QEMU_IFLA_BR_MCAST_QUERIER:
2168 case QEMU_IFLA_BR_NF_CALL_IPTABLES:
2169 case QEMU_IFLA_BR_NF_CALL_IP6TABLES:
2170 case QEMU_IFLA_BR_NF_CALL_ARPTABLES:
2171 break;
2172 /* uint16_t */
2173 case QEMU_IFLA_BR_PRIORITY:
2174 case QEMU_IFLA_BR_VLAN_PROTOCOL:
2175 case QEMU_IFLA_BR_GROUP_FWD_MASK:
2176 case QEMU_IFLA_BR_ROOT_PORT:
2177 case QEMU_IFLA_BR_VLAN_DEFAULT_PVID:
2178 u16 = NLA_DATA(nlattr);
2179 *u16 = tswap16(*u16);
2180 break;
2181 /* uint32_t */
2182 case QEMU_IFLA_BR_FORWARD_DELAY:
2183 case QEMU_IFLA_BR_HELLO_TIME:
2184 case QEMU_IFLA_BR_MAX_AGE:
2185 case QEMU_IFLA_BR_AGEING_TIME:
2186 case QEMU_IFLA_BR_STP_STATE:
2187 case QEMU_IFLA_BR_ROOT_PATH_COST:
2188 case QEMU_IFLA_BR_MCAST_HASH_ELASTICITY:
2189 case QEMU_IFLA_BR_MCAST_HASH_MAX:
2190 case QEMU_IFLA_BR_MCAST_LAST_MEMBER_CNT:
2191 case QEMU_IFLA_BR_MCAST_STARTUP_QUERY_CNT:
2192 u32 = NLA_DATA(nlattr);
2193 *u32 = tswap32(*u32);
2194 break;
2195 /* uint64_t */
2196 case QEMU_IFLA_BR_HELLO_TIMER:
2197 case QEMU_IFLA_BR_TCN_TIMER:
2198 case QEMU_IFLA_BR_GC_TIMER:
2199 case QEMU_IFLA_BR_TOPOLOGY_CHANGE_TIMER:
2200 case QEMU_IFLA_BR_MCAST_LAST_MEMBER_INTVL:
2201 case QEMU_IFLA_BR_MCAST_MEMBERSHIP_INTVL:
2202 case QEMU_IFLA_BR_MCAST_QUERIER_INTVL:
2203 case QEMU_IFLA_BR_MCAST_QUERY_INTVL:
2204 case QEMU_IFLA_BR_MCAST_QUERY_RESPONSE_INTVL:
2205 case QEMU_IFLA_BR_MCAST_STARTUP_QUERY_INTVL:
2206 u64 = NLA_DATA(nlattr);
2207 *u64 = tswap64(*u64);
2208 break;
2209 /* ifla_bridge_id: uin8_t[] */
2210 case QEMU_IFLA_BR_ROOT_ID:
2211 case QEMU_IFLA_BR_BRIDGE_ID:
2212 break;
2213 default:
2214 gemu_log("Unknown QEMU_IFLA_BR type %d\n", nlattr->nla_type);
2215 break;
2217 return 0;
2220 static abi_long host_to_target_slave_data_bridge_nlattr(struct nlattr *nlattr,
2221 void *context)
2223 uint16_t *u16;
2224 uint32_t *u32;
2225 uint64_t *u64;
2227 switch (nlattr->nla_type) {
2228 /* uint8_t */
2229 case QEMU_IFLA_BRPORT_STATE:
2230 case QEMU_IFLA_BRPORT_MODE:
2231 case QEMU_IFLA_BRPORT_GUARD:
2232 case QEMU_IFLA_BRPORT_PROTECT:
2233 case QEMU_IFLA_BRPORT_FAST_LEAVE:
2234 case QEMU_IFLA_BRPORT_LEARNING:
2235 case QEMU_IFLA_BRPORT_UNICAST_FLOOD:
2236 case QEMU_IFLA_BRPORT_PROXYARP:
2237 case QEMU_IFLA_BRPORT_LEARNING_SYNC:
2238 case QEMU_IFLA_BRPORT_PROXYARP_WIFI:
2239 case QEMU_IFLA_BRPORT_TOPOLOGY_CHANGE_ACK:
2240 case QEMU_IFLA_BRPORT_CONFIG_PENDING:
2241 case QEMU_IFLA_BRPORT_MULTICAST_ROUTER:
2242 break;
2243 /* uint16_t */
2244 case QEMU_IFLA_BRPORT_PRIORITY:
2245 case QEMU_IFLA_BRPORT_DESIGNATED_PORT:
2246 case QEMU_IFLA_BRPORT_DESIGNATED_COST:
2247 case QEMU_IFLA_BRPORT_ID:
2248 case QEMU_IFLA_BRPORT_NO:
2249 u16 = NLA_DATA(nlattr);
2250 *u16 = tswap16(*u16);
2251 break;
2252 /* uin32_t */
2253 case QEMU_IFLA_BRPORT_COST:
2254 u32 = NLA_DATA(nlattr);
2255 *u32 = tswap32(*u32);
2256 break;
2257 /* uint64_t */
2258 case QEMU_IFLA_BRPORT_MESSAGE_AGE_TIMER:
2259 case QEMU_IFLA_BRPORT_FORWARD_DELAY_TIMER:
2260 case QEMU_IFLA_BRPORT_HOLD_TIMER:
2261 u64 = NLA_DATA(nlattr);
2262 *u64 = tswap64(*u64);
2263 break;
2264 /* ifla_bridge_id: uint8_t[] */
2265 case QEMU_IFLA_BRPORT_ROOT_ID:
2266 case QEMU_IFLA_BRPORT_BRIDGE_ID:
2267 break;
2268 default:
2269 gemu_log("Unknown QEMU_IFLA_BRPORT type %d\n", nlattr->nla_type);
2270 break;
2272 return 0;
2275 struct linkinfo_context {
2276 int len;
2277 char *name;
2278 int slave_len;
2279 char *slave_name;
2282 static abi_long host_to_target_data_linkinfo_nlattr(struct nlattr *nlattr,
2283 void *context)
2285 struct linkinfo_context *li_context = context;
2287 switch (nlattr->nla_type) {
2288 /* string */
2289 case QEMU_IFLA_INFO_KIND:
2290 li_context->name = NLA_DATA(nlattr);
2291 li_context->len = nlattr->nla_len - NLA_HDRLEN;
2292 break;
2293 case QEMU_IFLA_INFO_SLAVE_KIND:
2294 li_context->slave_name = NLA_DATA(nlattr);
2295 li_context->slave_len = nlattr->nla_len - NLA_HDRLEN;
2296 break;
2297 /* stats */
2298 case QEMU_IFLA_INFO_XSTATS:
2299 /* FIXME: only used by CAN */
2300 break;
2301 /* nested */
2302 case QEMU_IFLA_INFO_DATA:
2303 if (strncmp(li_context->name, "bridge",
2304 li_context->len) == 0) {
2305 return host_to_target_for_each_nlattr(NLA_DATA(nlattr),
2306 nlattr->nla_len,
2307 NULL,
2308 host_to_target_data_bridge_nlattr);
2309 } else {
2310 gemu_log("Unknown QEMU_IFLA_INFO_KIND %s\n", li_context->name);
2312 break;
2313 case QEMU_IFLA_INFO_SLAVE_DATA:
2314 if (strncmp(li_context->slave_name, "bridge",
2315 li_context->slave_len) == 0) {
2316 return host_to_target_for_each_nlattr(NLA_DATA(nlattr),
2317 nlattr->nla_len,
2318 NULL,
2319 host_to_target_slave_data_bridge_nlattr);
2320 } else {
2321 gemu_log("Unknown QEMU_IFLA_INFO_SLAVE_KIND %s\n",
2322 li_context->slave_name);
2324 break;
2325 default:
2326 gemu_log("Unknown host QEMU_IFLA_INFO type: %d\n", nlattr->nla_type);
2327 break;
2330 return 0;
2333 static abi_long host_to_target_data_inet_nlattr(struct nlattr *nlattr,
2334 void *context)
2336 uint32_t *u32;
2337 int i;
2339 switch (nlattr->nla_type) {
2340 case QEMU_IFLA_INET_CONF:
2341 u32 = NLA_DATA(nlattr);
2342 for (i = 0; i < (nlattr->nla_len - NLA_HDRLEN) / sizeof(*u32);
2343 i++) {
2344 u32[i] = tswap32(u32[i]);
2346 break;
2347 default:
2348 gemu_log("Unknown host AF_INET type: %d\n", nlattr->nla_type);
2350 return 0;
2353 static abi_long host_to_target_data_inet6_nlattr(struct nlattr *nlattr,
2354 void *context)
2356 uint32_t *u32;
2357 uint64_t *u64;
2358 struct ifla_cacheinfo *ci;
2359 int i;
2361 switch (nlattr->nla_type) {
2362 /* binaries */
2363 case QEMU_IFLA_INET6_TOKEN:
2364 break;
2365 /* uint8_t */
2366 case QEMU_IFLA_INET6_ADDR_GEN_MODE:
2367 break;
2368 /* uint32_t */
2369 case QEMU_IFLA_INET6_FLAGS:
2370 u32 = NLA_DATA(nlattr);
2371 *u32 = tswap32(*u32);
2372 break;
2373 /* uint32_t[] */
2374 case QEMU_IFLA_INET6_CONF:
2375 u32 = NLA_DATA(nlattr);
2376 for (i = 0; i < (nlattr->nla_len - NLA_HDRLEN) / sizeof(*u32);
2377 i++) {
2378 u32[i] = tswap32(u32[i]);
2380 break;
2381 /* ifla_cacheinfo */
2382 case QEMU_IFLA_INET6_CACHEINFO:
2383 ci = NLA_DATA(nlattr);
2384 ci->max_reasm_len = tswap32(ci->max_reasm_len);
2385 ci->tstamp = tswap32(ci->tstamp);
2386 ci->reachable_time = tswap32(ci->reachable_time);
2387 ci->retrans_time = tswap32(ci->retrans_time);
2388 break;
2389 /* uint64_t[] */
2390 case QEMU_IFLA_INET6_STATS:
2391 case QEMU_IFLA_INET6_ICMP6STATS:
2392 u64 = NLA_DATA(nlattr);
2393 for (i = 0; i < (nlattr->nla_len - NLA_HDRLEN) / sizeof(*u64);
2394 i++) {
2395 u64[i] = tswap64(u64[i]);
2397 break;
2398 default:
2399 gemu_log("Unknown host AF_INET6 type: %d\n", nlattr->nla_type);
2401 return 0;
2404 static abi_long host_to_target_data_spec_nlattr(struct nlattr *nlattr,
2405 void *context)
2407 switch (nlattr->nla_type) {
2408 case AF_INET:
2409 return host_to_target_for_each_nlattr(NLA_DATA(nlattr), nlattr->nla_len,
2410 NULL,
2411 host_to_target_data_inet_nlattr);
2412 case AF_INET6:
2413 return host_to_target_for_each_nlattr(NLA_DATA(nlattr), nlattr->nla_len,
2414 NULL,
2415 host_to_target_data_inet6_nlattr);
2416 default:
2417 gemu_log("Unknown host AF_SPEC type: %d\n", nlattr->nla_type);
2418 break;
2420 return 0;
2423 static abi_long host_to_target_data_link_rtattr(struct rtattr *rtattr)
2425 uint32_t *u32;
2426 struct rtnl_link_stats *st;
2427 struct rtnl_link_stats64 *st64;
2428 struct rtnl_link_ifmap *map;
2429 struct linkinfo_context li_context;
2431 switch (rtattr->rta_type) {
2432 /* binary stream */
2433 case QEMU_IFLA_ADDRESS:
2434 case QEMU_IFLA_BROADCAST:
2435 /* string */
2436 case QEMU_IFLA_IFNAME:
2437 case QEMU_IFLA_QDISC:
2438 break;
2439 /* uin8_t */
2440 case QEMU_IFLA_OPERSTATE:
2441 case QEMU_IFLA_LINKMODE:
2442 case QEMU_IFLA_CARRIER:
2443 case QEMU_IFLA_PROTO_DOWN:
2444 break;
2445 /* uint32_t */
2446 case QEMU_IFLA_MTU:
2447 case QEMU_IFLA_LINK:
2448 case QEMU_IFLA_WEIGHT:
2449 case QEMU_IFLA_TXQLEN:
2450 case QEMU_IFLA_CARRIER_CHANGES:
2451 case QEMU_IFLA_NUM_RX_QUEUES:
2452 case QEMU_IFLA_NUM_TX_QUEUES:
2453 case QEMU_IFLA_PROMISCUITY:
2454 case QEMU_IFLA_EXT_MASK:
2455 case QEMU_IFLA_LINK_NETNSID:
2456 case QEMU_IFLA_GROUP:
2457 case QEMU_IFLA_MASTER:
2458 case QEMU_IFLA_NUM_VF:
2459 case QEMU_IFLA_GSO_MAX_SEGS:
2460 case QEMU_IFLA_GSO_MAX_SIZE:
2461 u32 = RTA_DATA(rtattr);
2462 *u32 = tswap32(*u32);
2463 break;
2464 /* struct rtnl_link_stats */
2465 case QEMU_IFLA_STATS:
2466 st = RTA_DATA(rtattr);
2467 st->rx_packets = tswap32(st->rx_packets);
2468 st->tx_packets = tswap32(st->tx_packets);
2469 st->rx_bytes = tswap32(st->rx_bytes);
2470 st->tx_bytes = tswap32(st->tx_bytes);
2471 st->rx_errors = tswap32(st->rx_errors);
2472 st->tx_errors = tswap32(st->tx_errors);
2473 st->rx_dropped = tswap32(st->rx_dropped);
2474 st->tx_dropped = tswap32(st->tx_dropped);
2475 st->multicast = tswap32(st->multicast);
2476 st->collisions = tswap32(st->collisions);
2478 /* detailed rx_errors: */
2479 st->rx_length_errors = tswap32(st->rx_length_errors);
2480 st->rx_over_errors = tswap32(st->rx_over_errors);
2481 st->rx_crc_errors = tswap32(st->rx_crc_errors);
2482 st->rx_frame_errors = tswap32(st->rx_frame_errors);
2483 st->rx_fifo_errors = tswap32(st->rx_fifo_errors);
2484 st->rx_missed_errors = tswap32(st->rx_missed_errors);
2486 /* detailed tx_errors */
2487 st->tx_aborted_errors = tswap32(st->tx_aborted_errors);
2488 st->tx_carrier_errors = tswap32(st->tx_carrier_errors);
2489 st->tx_fifo_errors = tswap32(st->tx_fifo_errors);
2490 st->tx_heartbeat_errors = tswap32(st->tx_heartbeat_errors);
2491 st->tx_window_errors = tswap32(st->tx_window_errors);
2493 /* for cslip etc */
2494 st->rx_compressed = tswap32(st->rx_compressed);
2495 st->tx_compressed = tswap32(st->tx_compressed);
2496 break;
2497 /* struct rtnl_link_stats64 */
2498 case QEMU_IFLA_STATS64:
2499 st64 = RTA_DATA(rtattr);
2500 st64->rx_packets = tswap64(st64->rx_packets);
2501 st64->tx_packets = tswap64(st64->tx_packets);
2502 st64->rx_bytes = tswap64(st64->rx_bytes);
2503 st64->tx_bytes = tswap64(st64->tx_bytes);
2504 st64->rx_errors = tswap64(st64->rx_errors);
2505 st64->tx_errors = tswap64(st64->tx_errors);
2506 st64->rx_dropped = tswap64(st64->rx_dropped);
2507 st64->tx_dropped = tswap64(st64->tx_dropped);
2508 st64->multicast = tswap64(st64->multicast);
2509 st64->collisions = tswap64(st64->collisions);
2511 /* detailed rx_errors: */
2512 st64->rx_length_errors = tswap64(st64->rx_length_errors);
2513 st64->rx_over_errors = tswap64(st64->rx_over_errors);
2514 st64->rx_crc_errors = tswap64(st64->rx_crc_errors);
2515 st64->rx_frame_errors = tswap64(st64->rx_frame_errors);
2516 st64->rx_fifo_errors = tswap64(st64->rx_fifo_errors);
2517 st64->rx_missed_errors = tswap64(st64->rx_missed_errors);
2519 /* detailed tx_errors */
2520 st64->tx_aborted_errors = tswap64(st64->tx_aborted_errors);
2521 st64->tx_carrier_errors = tswap64(st64->tx_carrier_errors);
2522 st64->tx_fifo_errors = tswap64(st64->tx_fifo_errors);
2523 st64->tx_heartbeat_errors = tswap64(st64->tx_heartbeat_errors);
2524 st64->tx_window_errors = tswap64(st64->tx_window_errors);
2526 /* for cslip etc */
2527 st64->rx_compressed = tswap64(st64->rx_compressed);
2528 st64->tx_compressed = tswap64(st64->tx_compressed);
2529 break;
2530 /* struct rtnl_link_ifmap */
2531 case QEMU_IFLA_MAP:
2532 map = RTA_DATA(rtattr);
2533 map->mem_start = tswap64(map->mem_start);
2534 map->mem_end = tswap64(map->mem_end);
2535 map->base_addr = tswap64(map->base_addr);
2536 map->irq = tswap16(map->irq);
2537 break;
2538 /* nested */
2539 case QEMU_IFLA_LINKINFO:
2540 memset(&li_context, 0, sizeof(li_context));
2541 return host_to_target_for_each_nlattr(RTA_DATA(rtattr), rtattr->rta_len,
2542 &li_context,
2543 host_to_target_data_linkinfo_nlattr);
2544 case QEMU_IFLA_AF_SPEC:
2545 return host_to_target_for_each_nlattr(RTA_DATA(rtattr), rtattr->rta_len,
2546 NULL,
2547 host_to_target_data_spec_nlattr);
2548 default:
2549 gemu_log("Unknown host QEMU_IFLA type: %d\n", rtattr->rta_type);
2550 break;
2552 return 0;
2555 static abi_long host_to_target_data_addr_rtattr(struct rtattr *rtattr)
2557 uint32_t *u32;
2558 struct ifa_cacheinfo *ci;
2560 switch (rtattr->rta_type) {
2561 /* binary: depends on family type */
2562 case IFA_ADDRESS:
2563 case IFA_LOCAL:
2564 break;
2565 /* string */
2566 case IFA_LABEL:
2567 break;
2568 /* u32 */
2569 case IFA_FLAGS:
2570 case IFA_BROADCAST:
2571 u32 = RTA_DATA(rtattr);
2572 *u32 = tswap32(*u32);
2573 break;
2574 /* struct ifa_cacheinfo */
2575 case IFA_CACHEINFO:
2576 ci = RTA_DATA(rtattr);
2577 ci->ifa_prefered = tswap32(ci->ifa_prefered);
2578 ci->ifa_valid = tswap32(ci->ifa_valid);
2579 ci->cstamp = tswap32(ci->cstamp);
2580 ci->tstamp = tswap32(ci->tstamp);
2581 break;
2582 default:
2583 gemu_log("Unknown host IFA type: %d\n", rtattr->rta_type);
2584 break;
2586 return 0;
2589 static abi_long host_to_target_data_route_rtattr(struct rtattr *rtattr)
2591 uint32_t *u32;
2592 switch (rtattr->rta_type) {
2593 /* binary: depends on family type */
2594 case RTA_GATEWAY:
2595 case RTA_DST:
2596 case RTA_PREFSRC:
2597 break;
2598 /* u32 */
2599 case RTA_PRIORITY:
2600 case RTA_TABLE:
2601 case RTA_OIF:
2602 u32 = RTA_DATA(rtattr);
2603 *u32 = tswap32(*u32);
2604 break;
2605 default:
2606 gemu_log("Unknown host RTA type: %d\n", rtattr->rta_type);
2607 break;
2609 return 0;
2612 static abi_long host_to_target_link_rtattr(struct rtattr *rtattr,
2613 uint32_t rtattr_len)
2615 return host_to_target_for_each_rtattr(rtattr, rtattr_len,
2616 host_to_target_data_link_rtattr);
2619 static abi_long host_to_target_addr_rtattr(struct rtattr *rtattr,
2620 uint32_t rtattr_len)
2622 return host_to_target_for_each_rtattr(rtattr, rtattr_len,
2623 host_to_target_data_addr_rtattr);
2626 static abi_long host_to_target_route_rtattr(struct rtattr *rtattr,
2627 uint32_t rtattr_len)
2629 return host_to_target_for_each_rtattr(rtattr, rtattr_len,
2630 host_to_target_data_route_rtattr);
2633 static abi_long host_to_target_data_route(struct nlmsghdr *nlh)
2635 uint32_t nlmsg_len;
2636 struct ifinfomsg *ifi;
2637 struct ifaddrmsg *ifa;
2638 struct rtmsg *rtm;
2640 nlmsg_len = nlh->nlmsg_len;
2641 switch (nlh->nlmsg_type) {
2642 case RTM_NEWLINK:
2643 case RTM_DELLINK:
2644 case RTM_GETLINK:
2645 if (nlh->nlmsg_len >= NLMSG_LENGTH(sizeof(*ifi))) {
2646 ifi = NLMSG_DATA(nlh);
2647 ifi->ifi_type = tswap16(ifi->ifi_type);
2648 ifi->ifi_index = tswap32(ifi->ifi_index);
2649 ifi->ifi_flags = tswap32(ifi->ifi_flags);
2650 ifi->ifi_change = tswap32(ifi->ifi_change);
2651 host_to_target_link_rtattr(IFLA_RTA(ifi),
2652 nlmsg_len - NLMSG_LENGTH(sizeof(*ifi)));
2654 break;
2655 case RTM_NEWADDR:
2656 case RTM_DELADDR:
2657 case RTM_GETADDR:
2658 if (nlh->nlmsg_len >= NLMSG_LENGTH(sizeof(*ifa))) {
2659 ifa = NLMSG_DATA(nlh);
2660 ifa->ifa_index = tswap32(ifa->ifa_index);
2661 host_to_target_addr_rtattr(IFA_RTA(ifa),
2662 nlmsg_len - NLMSG_LENGTH(sizeof(*ifa)));
2664 break;
2665 case RTM_NEWROUTE:
2666 case RTM_DELROUTE:
2667 case RTM_GETROUTE:
2668 if (nlh->nlmsg_len >= NLMSG_LENGTH(sizeof(*rtm))) {
2669 rtm = NLMSG_DATA(nlh);
2670 rtm->rtm_flags = tswap32(rtm->rtm_flags);
2671 host_to_target_route_rtattr(RTM_RTA(rtm),
2672 nlmsg_len - NLMSG_LENGTH(sizeof(*rtm)));
2674 break;
2675 default:
2676 return -TARGET_EINVAL;
2678 return 0;
2681 static inline abi_long host_to_target_nlmsg_route(struct nlmsghdr *nlh,
2682 size_t len)
2684 return host_to_target_for_each_nlmsg(nlh, len, host_to_target_data_route);
2687 static abi_long target_to_host_for_each_rtattr(struct rtattr *rtattr,
2688 size_t len,
2689 abi_long (*target_to_host_rtattr)
2690 (struct rtattr *))
2692 abi_long ret;
2694 while (len >= sizeof(struct rtattr)) {
2695 if (tswap16(rtattr->rta_len) < sizeof(struct rtattr) ||
2696 tswap16(rtattr->rta_len) > len) {
2697 break;
2699 rtattr->rta_len = tswap16(rtattr->rta_len);
2700 rtattr->rta_type = tswap16(rtattr->rta_type);
2701 ret = target_to_host_rtattr(rtattr);
2702 if (ret < 0) {
2703 return ret;
2705 len -= RTA_ALIGN(rtattr->rta_len);
2706 rtattr = (struct rtattr *)(((char *)rtattr) +
2707 RTA_ALIGN(rtattr->rta_len));
2709 return 0;
2712 static abi_long target_to_host_data_link_rtattr(struct rtattr *rtattr)
2714 switch (rtattr->rta_type) {
2715 default:
2716 gemu_log("Unknown target QEMU_IFLA type: %d\n", rtattr->rta_type);
2717 break;
2719 return 0;
2722 static abi_long target_to_host_data_addr_rtattr(struct rtattr *rtattr)
2724 switch (rtattr->rta_type) {
2725 /* binary: depends on family type */
2726 case IFA_LOCAL:
2727 case IFA_ADDRESS:
2728 break;
2729 default:
2730 gemu_log("Unknown target IFA type: %d\n", rtattr->rta_type);
2731 break;
2733 return 0;
2736 static abi_long target_to_host_data_route_rtattr(struct rtattr *rtattr)
2738 uint32_t *u32;
2739 switch (rtattr->rta_type) {
2740 /* binary: depends on family type */
2741 case RTA_DST:
2742 case RTA_SRC:
2743 case RTA_GATEWAY:
2744 break;
2745 /* u32 */
2746 case RTA_PRIORITY:
2747 case RTA_OIF:
2748 u32 = RTA_DATA(rtattr);
2749 *u32 = tswap32(*u32);
2750 break;
2751 default:
2752 gemu_log("Unknown target RTA type: %d\n", rtattr->rta_type);
2753 break;
2755 return 0;
2758 static void target_to_host_link_rtattr(struct rtattr *rtattr,
2759 uint32_t rtattr_len)
2761 target_to_host_for_each_rtattr(rtattr, rtattr_len,
2762 target_to_host_data_link_rtattr);
2765 static void target_to_host_addr_rtattr(struct rtattr *rtattr,
2766 uint32_t rtattr_len)
2768 target_to_host_for_each_rtattr(rtattr, rtattr_len,
2769 target_to_host_data_addr_rtattr);
2772 static void target_to_host_route_rtattr(struct rtattr *rtattr,
2773 uint32_t rtattr_len)
2775 target_to_host_for_each_rtattr(rtattr, rtattr_len,
2776 target_to_host_data_route_rtattr);
2779 static abi_long target_to_host_data_route(struct nlmsghdr *nlh)
2781 struct ifinfomsg *ifi;
2782 struct ifaddrmsg *ifa;
2783 struct rtmsg *rtm;
2785 switch (nlh->nlmsg_type) {
2786 case RTM_GETLINK:
2787 break;
2788 case RTM_NEWLINK:
2789 case RTM_DELLINK:
2790 if (nlh->nlmsg_len >= NLMSG_LENGTH(sizeof(*ifi))) {
2791 ifi = NLMSG_DATA(nlh);
2792 ifi->ifi_type = tswap16(ifi->ifi_type);
2793 ifi->ifi_index = tswap32(ifi->ifi_index);
2794 ifi->ifi_flags = tswap32(ifi->ifi_flags);
2795 ifi->ifi_change = tswap32(ifi->ifi_change);
2796 target_to_host_link_rtattr(IFLA_RTA(ifi), nlh->nlmsg_len -
2797 NLMSG_LENGTH(sizeof(*ifi)));
2799 break;
2800 case RTM_GETADDR:
2801 case RTM_NEWADDR:
2802 case RTM_DELADDR:
2803 if (nlh->nlmsg_len >= NLMSG_LENGTH(sizeof(*ifa))) {
2804 ifa = NLMSG_DATA(nlh);
2805 ifa->ifa_index = tswap32(ifa->ifa_index);
2806 target_to_host_addr_rtattr(IFA_RTA(ifa), nlh->nlmsg_len -
2807 NLMSG_LENGTH(sizeof(*ifa)));
2809 break;
2810 case RTM_GETROUTE:
2811 break;
2812 case RTM_NEWROUTE:
2813 case RTM_DELROUTE:
2814 if (nlh->nlmsg_len >= NLMSG_LENGTH(sizeof(*rtm))) {
2815 rtm = NLMSG_DATA(nlh);
2816 rtm->rtm_flags = tswap32(rtm->rtm_flags);
2817 target_to_host_route_rtattr(RTM_RTA(rtm), nlh->nlmsg_len -
2818 NLMSG_LENGTH(sizeof(*rtm)));
2820 break;
2821 default:
2822 return -TARGET_EOPNOTSUPP;
2824 return 0;
2827 static abi_long target_to_host_nlmsg_route(struct nlmsghdr *nlh, size_t len)
2829 return target_to_host_for_each_nlmsg(nlh, len, target_to_host_data_route);
2831 #endif /* CONFIG_RTNETLINK */
2833 static abi_long host_to_target_data_audit(struct nlmsghdr *nlh)
2835 switch (nlh->nlmsg_type) {
2836 default:
2837 gemu_log("Unknown host audit message type %d\n",
2838 nlh->nlmsg_type);
2839 return -TARGET_EINVAL;
2841 return 0;
2844 static inline abi_long host_to_target_nlmsg_audit(struct nlmsghdr *nlh,
2845 size_t len)
2847 return host_to_target_for_each_nlmsg(nlh, len, host_to_target_data_audit);
2850 static abi_long target_to_host_data_audit(struct nlmsghdr *nlh)
2852 switch (nlh->nlmsg_type) {
2853 case AUDIT_USER:
2854 case AUDIT_FIRST_USER_MSG ... AUDIT_LAST_USER_MSG:
2855 case AUDIT_FIRST_USER_MSG2 ... AUDIT_LAST_USER_MSG2:
2856 break;
2857 default:
2858 gemu_log("Unknown target audit message type %d\n",
2859 nlh->nlmsg_type);
2860 return -TARGET_EINVAL;
2863 return 0;
2866 static abi_long target_to_host_nlmsg_audit(struct nlmsghdr *nlh, size_t len)
2868 return target_to_host_for_each_nlmsg(nlh, len, target_to_host_data_audit);
2871 /* do_setsockopt() Must return target values and target errnos. */
2872 static abi_long do_setsockopt(int sockfd, int level, int optname,
2873 abi_ulong optval_addr, socklen_t optlen)
2875 abi_long ret;
2876 int val;
2877 struct ip_mreqn *ip_mreq;
2878 struct ip_mreq_source *ip_mreq_source;
2880 switch(level) {
2881 case SOL_TCP:
2882 /* TCP options all take an 'int' value. */
2883 if (optlen < sizeof(uint32_t))
2884 return -TARGET_EINVAL;
2886 if (get_user_u32(val, optval_addr))
2887 return -TARGET_EFAULT;
2888 ret = get_errno(setsockopt(sockfd, level, optname, &val, sizeof(val)));
2889 break;
2890 case SOL_IP:
2891 switch(optname) {
2892 case IP_TOS:
2893 case IP_TTL:
2894 case IP_HDRINCL:
2895 case IP_ROUTER_ALERT:
2896 case IP_RECVOPTS:
2897 case IP_RETOPTS:
2898 case IP_PKTINFO:
2899 case IP_MTU_DISCOVER:
2900 case IP_RECVERR:
2901 case IP_RECVTTL:
2902 case IP_RECVTOS:
2903 #ifdef IP_FREEBIND
2904 case IP_FREEBIND:
2905 #endif
2906 case IP_MULTICAST_TTL:
2907 case IP_MULTICAST_LOOP:
2908 val = 0;
2909 if (optlen >= sizeof(uint32_t)) {
2910 if (get_user_u32(val, optval_addr))
2911 return -TARGET_EFAULT;
2912 } else if (optlen >= 1) {
2913 if (get_user_u8(val, optval_addr))
2914 return -TARGET_EFAULT;
2916 ret = get_errno(setsockopt(sockfd, level, optname, &val, sizeof(val)));
2917 break;
2918 case IP_ADD_MEMBERSHIP:
2919 case IP_DROP_MEMBERSHIP:
2920 if (optlen < sizeof (struct target_ip_mreq) ||
2921 optlen > sizeof (struct target_ip_mreqn))
2922 return -TARGET_EINVAL;
2924 ip_mreq = (struct ip_mreqn *) alloca(optlen);
2925 target_to_host_ip_mreq(ip_mreq, optval_addr, optlen);
2926 ret = get_errno(setsockopt(sockfd, level, optname, ip_mreq, optlen));
2927 break;
2929 case IP_BLOCK_SOURCE:
2930 case IP_UNBLOCK_SOURCE:
2931 case IP_ADD_SOURCE_MEMBERSHIP:
2932 case IP_DROP_SOURCE_MEMBERSHIP:
2933 if (optlen != sizeof (struct target_ip_mreq_source))
2934 return -TARGET_EINVAL;
2936 ip_mreq_source = lock_user(VERIFY_READ, optval_addr, optlen, 1);
2937 ret = get_errno(setsockopt(sockfd, level, optname, ip_mreq_source, optlen));
2938 unlock_user (ip_mreq_source, optval_addr, 0);
2939 break;
2941 default:
2942 goto unimplemented;
2944 break;
2945 case SOL_IPV6:
2946 switch (optname) {
2947 case IPV6_MTU_DISCOVER:
2948 case IPV6_MTU:
2949 case IPV6_V6ONLY:
2950 case IPV6_RECVPKTINFO:
2951 case IPV6_UNICAST_HOPS:
2952 case IPV6_RECVERR:
2953 case IPV6_RECVHOPLIMIT:
2954 case IPV6_2292HOPLIMIT:
2955 case IPV6_CHECKSUM:
2956 val = 0;
2957 if (optlen < sizeof(uint32_t)) {
2958 return -TARGET_EINVAL;
2960 if (get_user_u32(val, optval_addr)) {
2961 return -TARGET_EFAULT;
2963 ret = get_errno(setsockopt(sockfd, level, optname,
2964 &val, sizeof(val)));
2965 break;
2966 case IPV6_PKTINFO:
2968 struct in6_pktinfo pki;
2970 if (optlen < sizeof(pki)) {
2971 return -TARGET_EINVAL;
2974 if (copy_from_user(&pki, optval_addr, sizeof(pki))) {
2975 return -TARGET_EFAULT;
2978 pki.ipi6_ifindex = tswap32(pki.ipi6_ifindex);
2980 ret = get_errno(setsockopt(sockfd, level, optname,
2981 &pki, sizeof(pki)));
2982 break;
2984 default:
2985 goto unimplemented;
2987 break;
2988 case SOL_ICMPV6:
2989 switch (optname) {
2990 case ICMPV6_FILTER:
2992 struct icmp6_filter icmp6f;
2994 if (optlen > sizeof(icmp6f)) {
2995 optlen = sizeof(icmp6f);
2998 if (copy_from_user(&icmp6f, optval_addr, optlen)) {
2999 return -TARGET_EFAULT;
3002 for (val = 0; val < 8; val++) {
3003 icmp6f.data[val] = tswap32(icmp6f.data[val]);
3006 ret = get_errno(setsockopt(sockfd, level, optname,
3007 &icmp6f, optlen));
3008 break;
3010 default:
3011 goto unimplemented;
3013 break;
3014 case SOL_RAW:
3015 switch (optname) {
3016 case ICMP_FILTER:
3017 case IPV6_CHECKSUM:
3018 /* those take an u32 value */
3019 if (optlen < sizeof(uint32_t)) {
3020 return -TARGET_EINVAL;
3023 if (get_user_u32(val, optval_addr)) {
3024 return -TARGET_EFAULT;
3026 ret = get_errno(setsockopt(sockfd, level, optname,
3027 &val, sizeof(val)));
3028 break;
3030 default:
3031 goto unimplemented;
3033 break;
3034 case TARGET_SOL_SOCKET:
3035 switch (optname) {
3036 case TARGET_SO_RCVTIMEO:
3038 struct timeval tv;
3040 optname = SO_RCVTIMEO;
3042 set_timeout:
3043 if (optlen != sizeof(struct target_timeval)) {
3044 return -TARGET_EINVAL;
3047 if (copy_from_user_timeval(&tv, optval_addr)) {
3048 return -TARGET_EFAULT;
3051 ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname,
3052 &tv, sizeof(tv)));
3053 return ret;
3055 case TARGET_SO_SNDTIMEO:
3056 optname = SO_SNDTIMEO;
3057 goto set_timeout;
3058 case TARGET_SO_ATTACH_FILTER:
3060 struct target_sock_fprog *tfprog;
3061 struct target_sock_filter *tfilter;
3062 struct sock_fprog fprog;
3063 struct sock_filter *filter;
3064 int i;
3066 if (optlen != sizeof(*tfprog)) {
3067 return -TARGET_EINVAL;
3069 if (!lock_user_struct(VERIFY_READ, tfprog, optval_addr, 0)) {
3070 return -TARGET_EFAULT;
3072 if (!lock_user_struct(VERIFY_READ, tfilter,
3073 tswapal(tfprog->filter), 0)) {
3074 unlock_user_struct(tfprog, optval_addr, 1);
3075 return -TARGET_EFAULT;
3078 fprog.len = tswap16(tfprog->len);
3079 filter = g_try_new(struct sock_filter, fprog.len);
3080 if (filter == NULL) {
3081 unlock_user_struct(tfilter, tfprog->filter, 1);
3082 unlock_user_struct(tfprog, optval_addr, 1);
3083 return -TARGET_ENOMEM;
3085 for (i = 0; i < fprog.len; i++) {
3086 filter[i].code = tswap16(tfilter[i].code);
3087 filter[i].jt = tfilter[i].jt;
3088 filter[i].jf = tfilter[i].jf;
3089 filter[i].k = tswap32(tfilter[i].k);
3091 fprog.filter = filter;
3093 ret = get_errno(setsockopt(sockfd, SOL_SOCKET,
3094 SO_ATTACH_FILTER, &fprog, sizeof(fprog)));
3095 g_free(filter);
3097 unlock_user_struct(tfilter, tfprog->filter, 1);
3098 unlock_user_struct(tfprog, optval_addr, 1);
3099 return ret;
3101 case TARGET_SO_BINDTODEVICE:
3103 char *dev_ifname, *addr_ifname;
3105 if (optlen > IFNAMSIZ - 1) {
3106 optlen = IFNAMSIZ - 1;
3108 dev_ifname = lock_user(VERIFY_READ, optval_addr, optlen, 1);
3109 if (!dev_ifname) {
3110 return -TARGET_EFAULT;
3112 optname = SO_BINDTODEVICE;
3113 addr_ifname = alloca(IFNAMSIZ);
3114 memcpy(addr_ifname, dev_ifname, optlen);
3115 addr_ifname[optlen] = 0;
3116 ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname,
3117 addr_ifname, optlen));
3118 unlock_user (dev_ifname, optval_addr, 0);
3119 return ret;
3121 /* Options with 'int' argument. */
3122 case TARGET_SO_DEBUG:
3123 optname = SO_DEBUG;
3124 break;
3125 case TARGET_SO_REUSEADDR:
3126 optname = SO_REUSEADDR;
3127 break;
3128 case TARGET_SO_TYPE:
3129 optname = SO_TYPE;
3130 break;
3131 case TARGET_SO_ERROR:
3132 optname = SO_ERROR;
3133 break;
3134 case TARGET_SO_DONTROUTE:
3135 optname = SO_DONTROUTE;
3136 break;
3137 case TARGET_SO_BROADCAST:
3138 optname = SO_BROADCAST;
3139 break;
3140 case TARGET_SO_SNDBUF:
3141 optname = SO_SNDBUF;
3142 break;
3143 case TARGET_SO_SNDBUFFORCE:
3144 optname = SO_SNDBUFFORCE;
3145 break;
3146 case TARGET_SO_RCVBUF:
3147 optname = SO_RCVBUF;
3148 break;
3149 case TARGET_SO_RCVBUFFORCE:
3150 optname = SO_RCVBUFFORCE;
3151 break;
3152 case TARGET_SO_KEEPALIVE:
3153 optname = SO_KEEPALIVE;
3154 break;
3155 case TARGET_SO_OOBINLINE:
3156 optname = SO_OOBINLINE;
3157 break;
3158 case TARGET_SO_NO_CHECK:
3159 optname = SO_NO_CHECK;
3160 break;
3161 case TARGET_SO_PRIORITY:
3162 optname = SO_PRIORITY;
3163 break;
3164 #ifdef SO_BSDCOMPAT
3165 case TARGET_SO_BSDCOMPAT:
3166 optname = SO_BSDCOMPAT;
3167 break;
3168 #endif
3169 case TARGET_SO_PASSCRED:
3170 optname = SO_PASSCRED;
3171 break;
3172 case TARGET_SO_PASSSEC:
3173 optname = SO_PASSSEC;
3174 break;
3175 case TARGET_SO_TIMESTAMP:
3176 optname = SO_TIMESTAMP;
3177 break;
3178 case TARGET_SO_RCVLOWAT:
3179 optname = SO_RCVLOWAT;
3180 break;
3181 default:
3182 goto unimplemented;
3184 if (optlen < sizeof(uint32_t))
3185 return -TARGET_EINVAL;
3187 if (get_user_u32(val, optval_addr))
3188 return -TARGET_EFAULT;
3189 ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname, &val, sizeof(val)));
3190 break;
3191 default:
3192 unimplemented:
3193 gemu_log("Unsupported setsockopt level=%d optname=%d\n", level, optname);
3194 ret = -TARGET_ENOPROTOOPT;
3196 return ret;
3199 /* do_getsockopt() Must return target values and target errnos. */
3200 static abi_long do_getsockopt(int sockfd, int level, int optname,
3201 abi_ulong optval_addr, abi_ulong optlen)
3203 abi_long ret;
3204 int len, val;
3205 socklen_t lv;
3207 switch(level) {
3208 case TARGET_SOL_SOCKET:
3209 level = SOL_SOCKET;
3210 switch (optname) {
3211 /* These don't just return a single integer */
3212 case TARGET_SO_LINGER:
3213 case TARGET_SO_RCVTIMEO:
3214 case TARGET_SO_SNDTIMEO:
3215 case TARGET_SO_PEERNAME:
3216 goto unimplemented;
3217 case TARGET_SO_PEERCRED: {
3218 struct ucred cr;
3219 socklen_t crlen;
3220 struct target_ucred *tcr;
3222 if (get_user_u32(len, optlen)) {
3223 return -TARGET_EFAULT;
3225 if (len < 0) {
3226 return -TARGET_EINVAL;
3229 crlen = sizeof(cr);
3230 ret = get_errno(getsockopt(sockfd, level, SO_PEERCRED,
3231 &cr, &crlen));
3232 if (ret < 0) {
3233 return ret;
3235 if (len > crlen) {
3236 len = crlen;
3238 if (!lock_user_struct(VERIFY_WRITE, tcr, optval_addr, 0)) {
3239 return -TARGET_EFAULT;
3241 __put_user(cr.pid, &tcr->pid);
3242 __put_user(cr.uid, &tcr->uid);
3243 __put_user(cr.gid, &tcr->gid);
3244 unlock_user_struct(tcr, optval_addr, 1);
3245 if (put_user_u32(len, optlen)) {
3246 return -TARGET_EFAULT;
3248 break;
3250 /* Options with 'int' argument. */
3251 case TARGET_SO_DEBUG:
3252 optname = SO_DEBUG;
3253 goto int_case;
3254 case TARGET_SO_REUSEADDR:
3255 optname = SO_REUSEADDR;
3256 goto int_case;
3257 case TARGET_SO_TYPE:
3258 optname = SO_TYPE;
3259 goto int_case;
3260 case TARGET_SO_ERROR:
3261 optname = SO_ERROR;
3262 goto int_case;
3263 case TARGET_SO_DONTROUTE:
3264 optname = SO_DONTROUTE;
3265 goto int_case;
3266 case TARGET_SO_BROADCAST:
3267 optname = SO_BROADCAST;
3268 goto int_case;
3269 case TARGET_SO_SNDBUF:
3270 optname = SO_SNDBUF;
3271 goto int_case;
3272 case TARGET_SO_RCVBUF:
3273 optname = SO_RCVBUF;
3274 goto int_case;
3275 case TARGET_SO_KEEPALIVE:
3276 optname = SO_KEEPALIVE;
3277 goto int_case;
3278 case TARGET_SO_OOBINLINE:
3279 optname = SO_OOBINLINE;
3280 goto int_case;
3281 case TARGET_SO_NO_CHECK:
3282 optname = SO_NO_CHECK;
3283 goto int_case;
3284 case TARGET_SO_PRIORITY:
3285 optname = SO_PRIORITY;
3286 goto int_case;
3287 #ifdef SO_BSDCOMPAT
3288 case TARGET_SO_BSDCOMPAT:
3289 optname = SO_BSDCOMPAT;
3290 goto int_case;
3291 #endif
3292 case TARGET_SO_PASSCRED:
3293 optname = SO_PASSCRED;
3294 goto int_case;
3295 case TARGET_SO_TIMESTAMP:
3296 optname = SO_TIMESTAMP;
3297 goto int_case;
3298 case TARGET_SO_RCVLOWAT:
3299 optname = SO_RCVLOWAT;
3300 goto int_case;
3301 case TARGET_SO_ACCEPTCONN:
3302 optname = SO_ACCEPTCONN;
3303 goto int_case;
3304 default:
3305 goto int_case;
3307 break;
3308 case SOL_TCP:
3309 /* TCP options all take an 'int' value. */
3310 int_case:
3311 if (get_user_u32(len, optlen))
3312 return -TARGET_EFAULT;
3313 if (len < 0)
3314 return -TARGET_EINVAL;
3315 lv = sizeof(lv);
3316 ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
3317 if (ret < 0)
3318 return ret;
3319 if (optname == SO_TYPE) {
3320 val = host_to_target_sock_type(val);
3322 if (len > lv)
3323 len = lv;
3324 if (len == 4) {
3325 if (put_user_u32(val, optval_addr))
3326 return -TARGET_EFAULT;
3327 } else {
3328 if (put_user_u8(val, optval_addr))
3329 return -TARGET_EFAULT;
3331 if (put_user_u32(len, optlen))
3332 return -TARGET_EFAULT;
3333 break;
3334 case SOL_IP:
3335 switch(optname) {
3336 case IP_TOS:
3337 case IP_TTL:
3338 case IP_HDRINCL:
3339 case IP_ROUTER_ALERT:
3340 case IP_RECVOPTS:
3341 case IP_RETOPTS:
3342 case IP_PKTINFO:
3343 case IP_MTU_DISCOVER:
3344 case IP_RECVERR:
3345 case IP_RECVTOS:
3346 #ifdef IP_FREEBIND
3347 case IP_FREEBIND:
3348 #endif
3349 case IP_MULTICAST_TTL:
3350 case IP_MULTICAST_LOOP:
3351 if (get_user_u32(len, optlen))
3352 return -TARGET_EFAULT;
3353 if (len < 0)
3354 return -TARGET_EINVAL;
3355 lv = sizeof(lv);
3356 ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
3357 if (ret < 0)
3358 return ret;
3359 if (len < sizeof(int) && len > 0 && val >= 0 && val < 255) {
3360 len = 1;
3361 if (put_user_u32(len, optlen)
3362 || put_user_u8(val, optval_addr))
3363 return -TARGET_EFAULT;
3364 } else {
3365 if (len > sizeof(int))
3366 len = sizeof(int);
3367 if (put_user_u32(len, optlen)
3368 || put_user_u32(val, optval_addr))
3369 return -TARGET_EFAULT;
3371 break;
3372 default:
3373 ret = -TARGET_ENOPROTOOPT;
3374 break;
3376 break;
3377 default:
3378 unimplemented:
3379 gemu_log("getsockopt level=%d optname=%d not yet supported\n",
3380 level, optname);
3381 ret = -TARGET_EOPNOTSUPP;
3382 break;
3384 return ret;
3387 static struct iovec *lock_iovec(int type, abi_ulong target_addr,
3388 abi_ulong count, int copy)
3390 struct target_iovec *target_vec;
3391 struct iovec *vec;
3392 abi_ulong total_len, max_len;
3393 int i;
3394 int err = 0;
3395 bool bad_address = false;
3397 if (count == 0) {
3398 errno = 0;
3399 return NULL;
3401 if (count > IOV_MAX) {
3402 errno = EINVAL;
3403 return NULL;
3406 vec = g_try_new0(struct iovec, count);
3407 if (vec == NULL) {
3408 errno = ENOMEM;
3409 return NULL;
3412 target_vec = lock_user(VERIFY_READ, target_addr,
3413 count * sizeof(struct target_iovec), 1);
3414 if (target_vec == NULL) {
3415 err = EFAULT;
3416 goto fail2;
3419 /* ??? If host page size > target page size, this will result in a
3420 value larger than what we can actually support. */
3421 max_len = 0x7fffffff & TARGET_PAGE_MASK;
3422 total_len = 0;
3424 for (i = 0; i < count; i++) {
3425 abi_ulong base = tswapal(target_vec[i].iov_base);
3426 abi_long len = tswapal(target_vec[i].iov_len);
3428 if (len < 0) {
3429 err = EINVAL;
3430 goto fail;
3431 } else if (len == 0) {
3432 /* Zero length pointer is ignored. */
3433 vec[i].iov_base = 0;
3434 } else {
3435 vec[i].iov_base = lock_user(type, base, len, copy);
3436 /* If the first buffer pointer is bad, this is a fault. But
3437 * subsequent bad buffers will result in a partial write; this
3438 * is realized by filling the vector with null pointers and
3439 * zero lengths. */
3440 if (!vec[i].iov_base) {
3441 if (i == 0) {
3442 err = EFAULT;
3443 goto fail;
3444 } else {
3445 bad_address = true;
3448 if (bad_address) {
3449 len = 0;
3451 if (len > max_len - total_len) {
3452 len = max_len - total_len;
3455 vec[i].iov_len = len;
3456 total_len += len;
3459 unlock_user(target_vec, target_addr, 0);
3460 return vec;
3462 fail:
3463 while (--i >= 0) {
3464 if (tswapal(target_vec[i].iov_len) > 0) {
3465 unlock_user(vec[i].iov_base, tswapal(target_vec[i].iov_base), 0);
3468 unlock_user(target_vec, target_addr, 0);
3469 fail2:
3470 g_free(vec);
3471 errno = err;
3472 return NULL;
3475 static void unlock_iovec(struct iovec *vec, abi_ulong target_addr,
3476 abi_ulong count, int copy)
3478 struct target_iovec *target_vec;
3479 int i;
3481 target_vec = lock_user(VERIFY_READ, target_addr,
3482 count * sizeof(struct target_iovec), 1);
3483 if (target_vec) {
3484 for (i = 0; i < count; i++) {
3485 abi_ulong base = tswapal(target_vec[i].iov_base);
3486 abi_long len = tswapal(target_vec[i].iov_len);
3487 if (len < 0) {
3488 break;
3490 unlock_user(vec[i].iov_base, base, copy ? vec[i].iov_len : 0);
3492 unlock_user(target_vec, target_addr, 0);
3495 g_free(vec);
3498 static inline int target_to_host_sock_type(int *type)
3500 int host_type = 0;
3501 int target_type = *type;
3503 switch (target_type & TARGET_SOCK_TYPE_MASK) {
3504 case TARGET_SOCK_DGRAM:
3505 host_type = SOCK_DGRAM;
3506 break;
3507 case TARGET_SOCK_STREAM:
3508 host_type = SOCK_STREAM;
3509 break;
3510 default:
3511 host_type = target_type & TARGET_SOCK_TYPE_MASK;
3512 break;
3514 if (target_type & TARGET_SOCK_CLOEXEC) {
3515 #if defined(SOCK_CLOEXEC)
3516 host_type |= SOCK_CLOEXEC;
3517 #else
3518 return -TARGET_EINVAL;
3519 #endif
3521 if (target_type & TARGET_SOCK_NONBLOCK) {
3522 #if defined(SOCK_NONBLOCK)
3523 host_type |= SOCK_NONBLOCK;
3524 #elif !defined(O_NONBLOCK)
3525 return -TARGET_EINVAL;
3526 #endif
3528 *type = host_type;
3529 return 0;
3532 /* Try to emulate socket type flags after socket creation. */
3533 static int sock_flags_fixup(int fd, int target_type)
3535 #if !defined(SOCK_NONBLOCK) && defined(O_NONBLOCK)
3536 if (target_type & TARGET_SOCK_NONBLOCK) {
3537 int flags = fcntl(fd, F_GETFL);
3538 if (fcntl(fd, F_SETFL, O_NONBLOCK | flags) == -1) {
3539 close(fd);
3540 return -TARGET_EINVAL;
3543 #endif
3544 return fd;
3547 static abi_long packet_target_to_host_sockaddr(void *host_addr,
3548 abi_ulong target_addr,
3549 socklen_t len)
3551 struct sockaddr *addr = host_addr;
3552 struct target_sockaddr *target_saddr;
3554 target_saddr = lock_user(VERIFY_READ, target_addr, len, 1);
3555 if (!target_saddr) {
3556 return -TARGET_EFAULT;
3559 memcpy(addr, target_saddr, len);
3560 addr->sa_family = tswap16(target_saddr->sa_family);
3561 /* spkt_protocol is big-endian */
3563 unlock_user(target_saddr, target_addr, 0);
3564 return 0;
3567 static TargetFdTrans target_packet_trans = {
3568 .target_to_host_addr = packet_target_to_host_sockaddr,
3571 #ifdef CONFIG_RTNETLINK
3572 static abi_long netlink_route_target_to_host(void *buf, size_t len)
3574 abi_long ret;
3576 ret = target_to_host_nlmsg_route(buf, len);
3577 if (ret < 0) {
3578 return ret;
3581 return len;
3584 static abi_long netlink_route_host_to_target(void *buf, size_t len)
3586 abi_long ret;
3588 ret = host_to_target_nlmsg_route(buf, len);
3589 if (ret < 0) {
3590 return ret;
3593 return len;
3596 static TargetFdTrans target_netlink_route_trans = {
3597 .target_to_host_data = netlink_route_target_to_host,
3598 .host_to_target_data = netlink_route_host_to_target,
3600 #endif /* CONFIG_RTNETLINK */
3602 static abi_long netlink_audit_target_to_host(void *buf, size_t len)
3604 abi_long ret;
3606 ret = target_to_host_nlmsg_audit(buf, len);
3607 if (ret < 0) {
3608 return ret;
3611 return len;
3614 static abi_long netlink_audit_host_to_target(void *buf, size_t len)
3616 abi_long ret;
3618 ret = host_to_target_nlmsg_audit(buf, len);
3619 if (ret < 0) {
3620 return ret;
3623 return len;
3626 static TargetFdTrans target_netlink_audit_trans = {
3627 .target_to_host_data = netlink_audit_target_to_host,
3628 .host_to_target_data = netlink_audit_host_to_target,
3631 /* do_socket() Must return target values and target errnos. */
3632 static abi_long do_socket(int domain, int type, int protocol)
3634 int target_type = type;
3635 int ret;
3637 ret = target_to_host_sock_type(&type);
3638 if (ret) {
3639 return ret;
3642 if (domain == PF_NETLINK && !(
3643 #ifdef CONFIG_RTNETLINK
3644 protocol == NETLINK_ROUTE ||
3645 #endif
3646 protocol == NETLINK_KOBJECT_UEVENT ||
3647 protocol == NETLINK_AUDIT)) {
3648 return -EPFNOSUPPORT;
3651 if (domain == AF_PACKET ||
3652 (domain == AF_INET && type == SOCK_PACKET)) {
3653 protocol = tswap16(protocol);
3656 ret = get_errno(socket(domain, type, protocol));
3657 if (ret >= 0) {
3658 ret = sock_flags_fixup(ret, target_type);
3659 if (type == SOCK_PACKET) {
3660 /* Manage an obsolete case :
3661 * if socket type is SOCK_PACKET, bind by name
3663 fd_trans_register(ret, &target_packet_trans);
3664 } else if (domain == PF_NETLINK) {
3665 switch (protocol) {
3666 #ifdef CONFIG_RTNETLINK
3667 case NETLINK_ROUTE:
3668 fd_trans_register(ret, &target_netlink_route_trans);
3669 break;
3670 #endif
3671 case NETLINK_KOBJECT_UEVENT:
3672 /* nothing to do: messages are strings */
3673 break;
3674 case NETLINK_AUDIT:
3675 fd_trans_register(ret, &target_netlink_audit_trans);
3676 break;
3677 default:
3678 g_assert_not_reached();
3682 return ret;
3685 /* do_bind() Must return target values and target errnos. */
3686 static abi_long do_bind(int sockfd, abi_ulong target_addr,
3687 socklen_t addrlen)
3689 void *addr;
3690 abi_long ret;
3692 if ((int)addrlen < 0) {
3693 return -TARGET_EINVAL;
3696 addr = alloca(addrlen+1);
3698 ret = target_to_host_sockaddr(sockfd, addr, target_addr, addrlen);
3699 if (ret)
3700 return ret;
3702 return get_errno(bind(sockfd, addr, addrlen));
3705 /* do_connect() Must return target values and target errnos. */
3706 static abi_long do_connect(int sockfd, abi_ulong target_addr,
3707 socklen_t addrlen)
3709 void *addr;
3710 abi_long ret;
3712 if ((int)addrlen < 0) {
3713 return -TARGET_EINVAL;
3716 addr = alloca(addrlen+1);
3718 ret = target_to_host_sockaddr(sockfd, addr, target_addr, addrlen);
3719 if (ret)
3720 return ret;
3722 return get_errno(safe_connect(sockfd, addr, addrlen));
3725 /* do_sendrecvmsg_locked() Must return target values and target errnos. */
3726 static abi_long do_sendrecvmsg_locked(int fd, struct target_msghdr *msgp,
3727 int flags, int send)
3729 abi_long ret, len;
3730 struct msghdr msg;
3731 abi_ulong count;
3732 struct iovec *vec;
3733 abi_ulong target_vec;
3735 if (msgp->msg_name) {
3736 msg.msg_namelen = tswap32(msgp->msg_namelen);
3737 msg.msg_name = alloca(msg.msg_namelen+1);
3738 ret = target_to_host_sockaddr(fd, msg.msg_name,
3739 tswapal(msgp->msg_name),
3740 msg.msg_namelen);
3741 if (ret == -TARGET_EFAULT) {
3742 /* For connected sockets msg_name and msg_namelen must
3743 * be ignored, so returning EFAULT immediately is wrong.
3744 * Instead, pass a bad msg_name to the host kernel, and
3745 * let it decide whether to return EFAULT or not.
3747 msg.msg_name = (void *)-1;
3748 } else if (ret) {
3749 goto out2;
3751 } else {
3752 msg.msg_name = NULL;
3753 msg.msg_namelen = 0;
3755 msg.msg_controllen = 2 * tswapal(msgp->msg_controllen);
3756 msg.msg_control = alloca(msg.msg_controllen);
3757 msg.msg_flags = tswap32(msgp->msg_flags);
3759 count = tswapal(msgp->msg_iovlen);
3760 target_vec = tswapal(msgp->msg_iov);
3762 if (count > IOV_MAX) {
3763 /* sendrcvmsg returns a different errno for this condition than
3764 * readv/writev, so we must catch it here before lock_iovec() does.
3766 ret = -TARGET_EMSGSIZE;
3767 goto out2;
3770 vec = lock_iovec(send ? VERIFY_READ : VERIFY_WRITE,
3771 target_vec, count, send);
3772 if (vec == NULL) {
3773 ret = -host_to_target_errno(errno);
3774 goto out2;
3776 msg.msg_iovlen = count;
3777 msg.msg_iov = vec;
3779 if (send) {
3780 if (fd_trans_target_to_host_data(fd)) {
3781 void *host_msg;
3783 host_msg = g_malloc(msg.msg_iov->iov_len);
3784 memcpy(host_msg, msg.msg_iov->iov_base, msg.msg_iov->iov_len);
3785 ret = fd_trans_target_to_host_data(fd)(host_msg,
3786 msg.msg_iov->iov_len);
3787 if (ret >= 0) {
3788 msg.msg_iov->iov_base = host_msg;
3789 ret = get_errno(safe_sendmsg(fd, &msg, flags));
3791 g_free(host_msg);
3792 } else {
3793 ret = target_to_host_cmsg(&msg, msgp);
3794 if (ret == 0) {
3795 ret = get_errno(safe_sendmsg(fd, &msg, flags));
3798 } else {
3799 ret = get_errno(safe_recvmsg(fd, &msg, flags));
3800 if (!is_error(ret)) {
3801 len = ret;
3802 if (fd_trans_host_to_target_data(fd)) {
3803 ret = fd_trans_host_to_target_data(fd)(msg.msg_iov->iov_base,
3804 len);
3805 } else {
3806 ret = host_to_target_cmsg(msgp, &msg);
3808 if (!is_error(ret)) {
3809 msgp->msg_namelen = tswap32(msg.msg_namelen);
3810 if (msg.msg_name != NULL && msg.msg_name != (void *)-1) {
3811 ret = host_to_target_sockaddr(tswapal(msgp->msg_name),
3812 msg.msg_name, msg.msg_namelen);
3813 if (ret) {
3814 goto out;
3818 ret = len;
3823 out:
3824 unlock_iovec(vec, target_vec, count, !send);
3825 out2:
3826 return ret;
3829 static abi_long do_sendrecvmsg(int fd, abi_ulong target_msg,
3830 int flags, int send)
3832 abi_long ret;
3833 struct target_msghdr *msgp;
3835 if (!lock_user_struct(send ? VERIFY_READ : VERIFY_WRITE,
3836 msgp,
3837 target_msg,
3838 send ? 1 : 0)) {
3839 return -TARGET_EFAULT;
3841 ret = do_sendrecvmsg_locked(fd, msgp, flags, send);
3842 unlock_user_struct(msgp, target_msg, send ? 0 : 1);
3843 return ret;
3846 /* We don't rely on the C library to have sendmmsg/recvmmsg support,
3847 * so it might not have this *mmsg-specific flag either.
3849 #ifndef MSG_WAITFORONE
3850 #define MSG_WAITFORONE 0x10000
3851 #endif
3853 static abi_long do_sendrecvmmsg(int fd, abi_ulong target_msgvec,
3854 unsigned int vlen, unsigned int flags,
3855 int send)
3857 struct target_mmsghdr *mmsgp;
3858 abi_long ret = 0;
3859 int i;
3861 if (vlen > UIO_MAXIOV) {
3862 vlen = UIO_MAXIOV;
3865 mmsgp = lock_user(VERIFY_WRITE, target_msgvec, sizeof(*mmsgp) * vlen, 1);
3866 if (!mmsgp) {
3867 return -TARGET_EFAULT;
3870 for (i = 0; i < vlen; i++) {
3871 ret = do_sendrecvmsg_locked(fd, &mmsgp[i].msg_hdr, flags, send);
3872 if (is_error(ret)) {
3873 break;
3875 mmsgp[i].msg_len = tswap32(ret);
3876 /* MSG_WAITFORONE turns on MSG_DONTWAIT after one packet */
3877 if (flags & MSG_WAITFORONE) {
3878 flags |= MSG_DONTWAIT;
3882 unlock_user(mmsgp, target_msgvec, sizeof(*mmsgp) * i);
3884 /* Return number of datagrams sent if we sent any at all;
3885 * otherwise return the error.
3887 if (i) {
3888 return i;
3890 return ret;
3893 /* do_accept4() Must return target values and target errnos. */
3894 static abi_long do_accept4(int fd, abi_ulong target_addr,
3895 abi_ulong target_addrlen_addr, int flags)
3897 socklen_t addrlen;
3898 void *addr;
3899 abi_long ret;
3900 int host_flags;
3902 host_flags = target_to_host_bitmask(flags, fcntl_flags_tbl);
3904 if (target_addr == 0) {
3905 return get_errno(safe_accept4(fd, NULL, NULL, host_flags));
3908 /* linux returns EINVAL if addrlen pointer is invalid */
3909 if (get_user_u32(addrlen, target_addrlen_addr))
3910 return -TARGET_EINVAL;
3912 if ((int)addrlen < 0) {
3913 return -TARGET_EINVAL;
3916 if (!access_ok(VERIFY_WRITE, target_addr, addrlen))
3917 return -TARGET_EINVAL;
3919 addr = alloca(addrlen);
3921 ret = get_errno(safe_accept4(fd, addr, &addrlen, host_flags));
3922 if (!is_error(ret)) {
3923 host_to_target_sockaddr(target_addr, addr, addrlen);
3924 if (put_user_u32(addrlen, target_addrlen_addr))
3925 ret = -TARGET_EFAULT;
3927 return ret;
3930 /* do_getpeername() Must return target values and target errnos. */
3931 static abi_long do_getpeername(int fd, abi_ulong target_addr,
3932 abi_ulong target_addrlen_addr)
3934 socklen_t addrlen;
3935 void *addr;
3936 abi_long ret;
3938 if (get_user_u32(addrlen, target_addrlen_addr))
3939 return -TARGET_EFAULT;
3941 if ((int)addrlen < 0) {
3942 return -TARGET_EINVAL;
3945 if (!access_ok(VERIFY_WRITE, target_addr, addrlen))
3946 return -TARGET_EFAULT;
3948 addr = alloca(addrlen);
3950 ret = get_errno(getpeername(fd, addr, &addrlen));
3951 if (!is_error(ret)) {
3952 host_to_target_sockaddr(target_addr, addr, addrlen);
3953 if (put_user_u32(addrlen, target_addrlen_addr))
3954 ret = -TARGET_EFAULT;
3956 return ret;
3959 /* do_getsockname() Must return target values and target errnos. */
3960 static abi_long do_getsockname(int fd, abi_ulong target_addr,
3961 abi_ulong target_addrlen_addr)
3963 socklen_t addrlen;
3964 void *addr;
3965 abi_long ret;
3967 if (get_user_u32(addrlen, target_addrlen_addr))
3968 return -TARGET_EFAULT;
3970 if ((int)addrlen < 0) {
3971 return -TARGET_EINVAL;
3974 if (!access_ok(VERIFY_WRITE, target_addr, addrlen))
3975 return -TARGET_EFAULT;
3977 addr = alloca(addrlen);
3979 ret = get_errno(getsockname(fd, addr, &addrlen));
3980 if (!is_error(ret)) {
3981 host_to_target_sockaddr(target_addr, addr, addrlen);
3982 if (put_user_u32(addrlen, target_addrlen_addr))
3983 ret = -TARGET_EFAULT;
3985 return ret;
3988 /* do_socketpair() Must return target values and target errnos. */
3989 static abi_long do_socketpair(int domain, int type, int protocol,
3990 abi_ulong target_tab_addr)
3992 int tab[2];
3993 abi_long ret;
3995 target_to_host_sock_type(&type);
3997 ret = get_errno(socketpair(domain, type, protocol, tab));
3998 if (!is_error(ret)) {
3999 if (put_user_s32(tab[0], target_tab_addr)
4000 || put_user_s32(tab[1], target_tab_addr + sizeof(tab[0])))
4001 ret = -TARGET_EFAULT;
4003 return ret;
4006 /* do_sendto() Must return target values and target errnos. */
4007 static abi_long do_sendto(int fd, abi_ulong msg, size_t len, int flags,
4008 abi_ulong target_addr, socklen_t addrlen)
4010 void *addr;
4011 void *host_msg;
4012 void *copy_msg = NULL;
4013 abi_long ret;
4015 if ((int)addrlen < 0) {
4016 return -TARGET_EINVAL;
4019 host_msg = lock_user(VERIFY_READ, msg, len, 1);
4020 if (!host_msg)
4021 return -TARGET_EFAULT;
4022 if (fd_trans_target_to_host_data(fd)) {
4023 copy_msg = host_msg;
4024 host_msg = g_malloc(len);
4025 memcpy(host_msg, copy_msg, len);
4026 ret = fd_trans_target_to_host_data(fd)(host_msg, len);
4027 if (ret < 0) {
4028 goto fail;
4031 if (target_addr) {
4032 addr = alloca(addrlen+1);
4033 ret = target_to_host_sockaddr(fd, addr, target_addr, addrlen);
4034 if (ret) {
4035 goto fail;
4037 ret = get_errno(safe_sendto(fd, host_msg, len, flags, addr, addrlen));
4038 } else {
4039 ret = get_errno(safe_sendto(fd, host_msg, len, flags, NULL, 0));
4041 fail:
4042 if (copy_msg) {
4043 g_free(host_msg);
4044 host_msg = copy_msg;
4046 unlock_user(host_msg, msg, 0);
4047 return ret;
4050 /* do_recvfrom() Must return target values and target errnos. */
4051 static abi_long do_recvfrom(int fd, abi_ulong msg, size_t len, int flags,
4052 abi_ulong target_addr,
4053 abi_ulong target_addrlen)
4055 socklen_t addrlen;
4056 void *addr;
4057 void *host_msg;
4058 abi_long ret;
4060 host_msg = lock_user(VERIFY_WRITE, msg, len, 0);
4061 if (!host_msg)
4062 return -TARGET_EFAULT;
4063 if (target_addr) {
4064 if (get_user_u32(addrlen, target_addrlen)) {
4065 ret = -TARGET_EFAULT;
4066 goto fail;
4068 if ((int)addrlen < 0) {
4069 ret = -TARGET_EINVAL;
4070 goto fail;
4072 addr = alloca(addrlen);
4073 ret = get_errno(safe_recvfrom(fd, host_msg, len, flags,
4074 addr, &addrlen));
4075 } else {
4076 addr = NULL; /* To keep compiler quiet. */
4077 ret = get_errno(safe_recvfrom(fd, host_msg, len, flags, NULL, 0));
4079 if (!is_error(ret)) {
4080 if (fd_trans_host_to_target_data(fd)) {
4081 ret = fd_trans_host_to_target_data(fd)(host_msg, ret);
4083 if (target_addr) {
4084 host_to_target_sockaddr(target_addr, addr, addrlen);
4085 if (put_user_u32(addrlen, target_addrlen)) {
4086 ret = -TARGET_EFAULT;
4087 goto fail;
4090 unlock_user(host_msg, msg, len);
4091 } else {
4092 fail:
4093 unlock_user(host_msg, msg, 0);
4095 return ret;
4098 #ifdef TARGET_NR_socketcall
4099 /* do_socketcall() must return target values and target errnos. */
4100 static abi_long do_socketcall(int num, abi_ulong vptr)
4102 static const unsigned nargs[] = { /* number of arguments per operation */
4103 [TARGET_SYS_SOCKET] = 3, /* domain, type, protocol */
4104 [TARGET_SYS_BIND] = 3, /* fd, addr, addrlen */
4105 [TARGET_SYS_CONNECT] = 3, /* fd, addr, addrlen */
4106 [TARGET_SYS_LISTEN] = 2, /* fd, backlog */
4107 [TARGET_SYS_ACCEPT] = 3, /* fd, addr, addrlen */
4108 [TARGET_SYS_GETSOCKNAME] = 3, /* fd, addr, addrlen */
4109 [TARGET_SYS_GETPEERNAME] = 3, /* fd, addr, addrlen */
4110 [TARGET_SYS_SOCKETPAIR] = 4, /* domain, type, protocol, tab */
4111 [TARGET_SYS_SEND] = 4, /* fd, msg, len, flags */
4112 [TARGET_SYS_RECV] = 4, /* fd, msg, len, flags */
4113 [TARGET_SYS_SENDTO] = 6, /* fd, msg, len, flags, addr, addrlen */
4114 [TARGET_SYS_RECVFROM] = 6, /* fd, msg, len, flags, addr, addrlen */
4115 [TARGET_SYS_SHUTDOWN] = 2, /* fd, how */
4116 [TARGET_SYS_SETSOCKOPT] = 5, /* fd, level, optname, optval, optlen */
4117 [TARGET_SYS_GETSOCKOPT] = 5, /* fd, level, optname, optval, optlen */
4118 [TARGET_SYS_SENDMSG] = 3, /* fd, msg, flags */
4119 [TARGET_SYS_RECVMSG] = 3, /* fd, msg, flags */
4120 [TARGET_SYS_ACCEPT4] = 4, /* fd, addr, addrlen, flags */
4121 [TARGET_SYS_RECVMMSG] = 4, /* fd, msgvec, vlen, flags */
4122 [TARGET_SYS_SENDMMSG] = 4, /* fd, msgvec, vlen, flags */
4124 abi_long a[6]; /* max 6 args */
4125 unsigned i;
4127 /* check the range of the first argument num */
4128 /* (TARGET_SYS_SENDMMSG is the highest among TARGET_SYS_xxx) */
4129 if (num < 1 || num > TARGET_SYS_SENDMMSG) {
4130 return -TARGET_EINVAL;
4132 /* ensure we have space for args */
4133 if (nargs[num] > ARRAY_SIZE(a)) {
4134 return -TARGET_EINVAL;
4136 /* collect the arguments in a[] according to nargs[] */
4137 for (i = 0; i < nargs[num]; ++i) {
4138 if (get_user_ual(a[i], vptr + i * sizeof(abi_long)) != 0) {
4139 return -TARGET_EFAULT;
4142 /* now when we have the args, invoke the appropriate underlying function */
4143 switch (num) {
4144 case TARGET_SYS_SOCKET: /* domain, type, protocol */
4145 return do_socket(a[0], a[1], a[2]);
4146 case TARGET_SYS_BIND: /* sockfd, addr, addrlen */
4147 return do_bind(a[0], a[1], a[2]);
4148 case TARGET_SYS_CONNECT: /* sockfd, addr, addrlen */
4149 return do_connect(a[0], a[1], a[2]);
4150 case TARGET_SYS_LISTEN: /* sockfd, backlog */
4151 return get_errno(listen(a[0], a[1]));
4152 case TARGET_SYS_ACCEPT: /* sockfd, addr, addrlen */
4153 return do_accept4(a[0], a[1], a[2], 0);
4154 case TARGET_SYS_GETSOCKNAME: /* sockfd, addr, addrlen */
4155 return do_getsockname(a[0], a[1], a[2]);
4156 case TARGET_SYS_GETPEERNAME: /* sockfd, addr, addrlen */
4157 return do_getpeername(a[0], a[1], a[2]);
4158 case TARGET_SYS_SOCKETPAIR: /* domain, type, protocol, tab */
4159 return do_socketpair(a[0], a[1], a[2], a[3]);
4160 case TARGET_SYS_SEND: /* sockfd, msg, len, flags */
4161 return do_sendto(a[0], a[1], a[2], a[3], 0, 0);
4162 case TARGET_SYS_RECV: /* sockfd, msg, len, flags */
4163 return do_recvfrom(a[0], a[1], a[2], a[3], 0, 0);
4164 case TARGET_SYS_SENDTO: /* sockfd, msg, len, flags, addr, addrlen */
4165 return do_sendto(a[0], a[1], a[2], a[3], a[4], a[5]);
4166 case TARGET_SYS_RECVFROM: /* sockfd, msg, len, flags, addr, addrlen */
4167 return do_recvfrom(a[0], a[1], a[2], a[3], a[4], a[5]);
4168 case TARGET_SYS_SHUTDOWN: /* sockfd, how */
4169 return get_errno(shutdown(a[0], a[1]));
4170 case TARGET_SYS_SETSOCKOPT: /* sockfd, level, optname, optval, optlen */
4171 return do_setsockopt(a[0], a[1], a[2], a[3], a[4]);
4172 case TARGET_SYS_GETSOCKOPT: /* sockfd, level, optname, optval, optlen */
4173 return do_getsockopt(a[0], a[1], a[2], a[3], a[4]);
4174 case TARGET_SYS_SENDMSG: /* sockfd, msg, flags */
4175 return do_sendrecvmsg(a[0], a[1], a[2], 1);
4176 case TARGET_SYS_RECVMSG: /* sockfd, msg, flags */
4177 return do_sendrecvmsg(a[0], a[1], a[2], 0);
4178 case TARGET_SYS_ACCEPT4: /* sockfd, addr, addrlen, flags */
4179 return do_accept4(a[0], a[1], a[2], a[3]);
4180 case TARGET_SYS_RECVMMSG: /* sockfd, msgvec, vlen, flags */
4181 return do_sendrecvmmsg(a[0], a[1], a[2], a[3], 0);
4182 case TARGET_SYS_SENDMMSG: /* sockfd, msgvec, vlen, flags */
4183 return do_sendrecvmmsg(a[0], a[1], a[2], a[3], 1);
4184 default:
4185 gemu_log("Unsupported socketcall: %d\n", num);
4186 return -TARGET_EINVAL;
4189 #endif
4191 #define N_SHM_REGIONS 32
4193 static struct shm_region {
4194 abi_ulong start;
4195 abi_ulong size;
4196 bool in_use;
4197 } shm_regions[N_SHM_REGIONS];
4199 #ifndef TARGET_SEMID64_DS
4200 /* asm-generic version of this struct */
4201 struct target_semid64_ds
4203 struct target_ipc_perm sem_perm;
4204 abi_ulong sem_otime;
4205 #if TARGET_ABI_BITS == 32
4206 abi_ulong __unused1;
4207 #endif
4208 abi_ulong sem_ctime;
4209 #if TARGET_ABI_BITS == 32
4210 abi_ulong __unused2;
4211 #endif
4212 abi_ulong sem_nsems;
4213 abi_ulong __unused3;
4214 abi_ulong __unused4;
4216 #endif
4218 static inline abi_long target_to_host_ipc_perm(struct ipc_perm *host_ip,
4219 abi_ulong target_addr)
4221 struct target_ipc_perm *target_ip;
4222 struct target_semid64_ds *target_sd;
4224 if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
4225 return -TARGET_EFAULT;
4226 target_ip = &(target_sd->sem_perm);
4227 host_ip->__key = tswap32(target_ip->__key);
4228 host_ip->uid = tswap32(target_ip->uid);
4229 host_ip->gid = tswap32(target_ip->gid);
4230 host_ip->cuid = tswap32(target_ip->cuid);
4231 host_ip->cgid = tswap32(target_ip->cgid);
4232 #if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_PPC)
4233 host_ip->mode = tswap32(target_ip->mode);
4234 #else
4235 host_ip->mode = tswap16(target_ip->mode);
4236 #endif
4237 #if defined(TARGET_PPC)
4238 host_ip->__seq = tswap32(target_ip->__seq);
4239 #else
4240 host_ip->__seq = tswap16(target_ip->__seq);
4241 #endif
4242 unlock_user_struct(target_sd, target_addr, 0);
4243 return 0;
4246 static inline abi_long host_to_target_ipc_perm(abi_ulong target_addr,
4247 struct ipc_perm *host_ip)
4249 struct target_ipc_perm *target_ip;
4250 struct target_semid64_ds *target_sd;
4252 if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
4253 return -TARGET_EFAULT;
4254 target_ip = &(target_sd->sem_perm);
4255 target_ip->__key = tswap32(host_ip->__key);
4256 target_ip->uid = tswap32(host_ip->uid);
4257 target_ip->gid = tswap32(host_ip->gid);
4258 target_ip->cuid = tswap32(host_ip->cuid);
4259 target_ip->cgid = tswap32(host_ip->cgid);
4260 #if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_PPC)
4261 target_ip->mode = tswap32(host_ip->mode);
4262 #else
4263 target_ip->mode = tswap16(host_ip->mode);
4264 #endif
4265 #if defined(TARGET_PPC)
4266 target_ip->__seq = tswap32(host_ip->__seq);
4267 #else
4268 target_ip->__seq = tswap16(host_ip->__seq);
4269 #endif
4270 unlock_user_struct(target_sd, target_addr, 1);
4271 return 0;
4274 static inline abi_long target_to_host_semid_ds(struct semid_ds *host_sd,
4275 abi_ulong target_addr)
4277 struct target_semid64_ds *target_sd;
4279 if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
4280 return -TARGET_EFAULT;
4281 if (target_to_host_ipc_perm(&(host_sd->sem_perm),target_addr))
4282 return -TARGET_EFAULT;
4283 host_sd->sem_nsems = tswapal(target_sd->sem_nsems);
4284 host_sd->sem_otime = tswapal(target_sd->sem_otime);
4285 host_sd->sem_ctime = tswapal(target_sd->sem_ctime);
4286 unlock_user_struct(target_sd, target_addr, 0);
4287 return 0;
4290 static inline abi_long host_to_target_semid_ds(abi_ulong target_addr,
4291 struct semid_ds *host_sd)
4293 struct target_semid64_ds *target_sd;
4295 if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
4296 return -TARGET_EFAULT;
4297 if (host_to_target_ipc_perm(target_addr,&(host_sd->sem_perm)))
4298 return -TARGET_EFAULT;
4299 target_sd->sem_nsems = tswapal(host_sd->sem_nsems);
4300 target_sd->sem_otime = tswapal(host_sd->sem_otime);
4301 target_sd->sem_ctime = tswapal(host_sd->sem_ctime);
4302 unlock_user_struct(target_sd, target_addr, 1);
4303 return 0;
4306 struct target_seminfo {
4307 int semmap;
4308 int semmni;
4309 int semmns;
4310 int semmnu;
4311 int semmsl;
4312 int semopm;
4313 int semume;
4314 int semusz;
4315 int semvmx;
4316 int semaem;
4319 static inline abi_long host_to_target_seminfo(abi_ulong target_addr,
4320 struct seminfo *host_seminfo)
4322 struct target_seminfo *target_seminfo;
4323 if (!lock_user_struct(VERIFY_WRITE, target_seminfo, target_addr, 0))
4324 return -TARGET_EFAULT;
4325 __put_user(host_seminfo->semmap, &target_seminfo->semmap);
4326 __put_user(host_seminfo->semmni, &target_seminfo->semmni);
4327 __put_user(host_seminfo->semmns, &target_seminfo->semmns);
4328 __put_user(host_seminfo->semmnu, &target_seminfo->semmnu);
4329 __put_user(host_seminfo->semmsl, &target_seminfo->semmsl);
4330 __put_user(host_seminfo->semopm, &target_seminfo->semopm);
4331 __put_user(host_seminfo->semume, &target_seminfo->semume);
4332 __put_user(host_seminfo->semusz, &target_seminfo->semusz);
4333 __put_user(host_seminfo->semvmx, &target_seminfo->semvmx);
4334 __put_user(host_seminfo->semaem, &target_seminfo->semaem);
4335 unlock_user_struct(target_seminfo, target_addr, 1);
4336 return 0;
4339 union semun {
4340 int val;
4341 struct semid_ds *buf;
4342 unsigned short *array;
4343 struct seminfo *__buf;
4346 union target_semun {
4347 int val;
4348 abi_ulong buf;
4349 abi_ulong array;
4350 abi_ulong __buf;
4353 static inline abi_long target_to_host_semarray(int semid, unsigned short **host_array,
4354 abi_ulong target_addr)
4356 int nsems;
4357 unsigned short *array;
4358 union semun semun;
4359 struct semid_ds semid_ds;
4360 int i, ret;
4362 semun.buf = &semid_ds;
4364 ret = semctl(semid, 0, IPC_STAT, semun);
4365 if (ret == -1)
4366 return get_errno(ret);
4368 nsems = semid_ds.sem_nsems;
4370 *host_array = g_try_new(unsigned short, nsems);
4371 if (!*host_array) {
4372 return -TARGET_ENOMEM;
4374 array = lock_user(VERIFY_READ, target_addr,
4375 nsems*sizeof(unsigned short), 1);
4376 if (!array) {
4377 g_free(*host_array);
4378 return -TARGET_EFAULT;
4381 for(i=0; i<nsems; i++) {
4382 __get_user((*host_array)[i], &array[i]);
4384 unlock_user(array, target_addr, 0);
4386 return 0;
4389 static inline abi_long host_to_target_semarray(int semid, abi_ulong target_addr,
4390 unsigned short **host_array)
4392 int nsems;
4393 unsigned short *array;
4394 union semun semun;
4395 struct semid_ds semid_ds;
4396 int i, ret;
4398 semun.buf = &semid_ds;
4400 ret = semctl(semid, 0, IPC_STAT, semun);
4401 if (ret == -1)
4402 return get_errno(ret);
4404 nsems = semid_ds.sem_nsems;
4406 array = lock_user(VERIFY_WRITE, target_addr,
4407 nsems*sizeof(unsigned short), 0);
4408 if (!array)
4409 return -TARGET_EFAULT;
4411 for(i=0; i<nsems; i++) {
4412 __put_user((*host_array)[i], &array[i]);
4414 g_free(*host_array);
4415 unlock_user(array, target_addr, 1);
4417 return 0;
4420 static inline abi_long do_semctl(int semid, int semnum, int cmd,
4421 abi_ulong target_arg)
4423 union target_semun target_su = { .buf = target_arg };
4424 union semun arg;
4425 struct semid_ds dsarg;
4426 unsigned short *array = NULL;
4427 struct seminfo seminfo;
4428 abi_long ret = -TARGET_EINVAL;
4429 abi_long err;
4430 cmd &= 0xff;
4432 switch( cmd ) {
4433 case GETVAL:
4434 case SETVAL:
4435 /* In 64 bit cross-endian situations, we will erroneously pick up
4436 * the wrong half of the union for the "val" element. To rectify
4437 * this, the entire 8-byte structure is byteswapped, followed by
4438 * a swap of the 4 byte val field. In other cases, the data is
4439 * already in proper host byte order. */
4440 if (sizeof(target_su.val) != (sizeof(target_su.buf))) {
4441 target_su.buf = tswapal(target_su.buf);
4442 arg.val = tswap32(target_su.val);
4443 } else {
4444 arg.val = target_su.val;
4446 ret = get_errno(semctl(semid, semnum, cmd, arg));
4447 break;
4448 case GETALL:
4449 case SETALL:
4450 err = target_to_host_semarray(semid, &array, target_su.array);
4451 if (err)
4452 return err;
4453 arg.array = array;
4454 ret = get_errno(semctl(semid, semnum, cmd, arg));
4455 err = host_to_target_semarray(semid, target_su.array, &array);
4456 if (err)
4457 return err;
4458 break;
4459 case IPC_STAT:
4460 case IPC_SET:
4461 case SEM_STAT:
4462 err = target_to_host_semid_ds(&dsarg, target_su.buf);
4463 if (err)
4464 return err;
4465 arg.buf = &dsarg;
4466 ret = get_errno(semctl(semid, semnum, cmd, arg));
4467 err = host_to_target_semid_ds(target_su.buf, &dsarg);
4468 if (err)
4469 return err;
4470 break;
4471 case IPC_INFO:
4472 case SEM_INFO:
4473 arg.__buf = &seminfo;
4474 ret = get_errno(semctl(semid, semnum, cmd, arg));
4475 err = host_to_target_seminfo(target_su.__buf, &seminfo);
4476 if (err)
4477 return err;
4478 break;
4479 case IPC_RMID:
4480 case GETPID:
4481 case GETNCNT:
4482 case GETZCNT:
4483 ret = get_errno(semctl(semid, semnum, cmd, NULL));
4484 break;
4487 return ret;
4490 struct target_sembuf {
4491 unsigned short sem_num;
4492 short sem_op;
4493 short sem_flg;
4496 static inline abi_long target_to_host_sembuf(struct sembuf *host_sembuf,
4497 abi_ulong target_addr,
4498 unsigned nsops)
4500 struct target_sembuf *target_sembuf;
4501 int i;
4503 target_sembuf = lock_user(VERIFY_READ, target_addr,
4504 nsops*sizeof(struct target_sembuf), 1);
4505 if (!target_sembuf)
4506 return -TARGET_EFAULT;
4508 for(i=0; i<nsops; i++) {
4509 __get_user(host_sembuf[i].sem_num, &target_sembuf[i].sem_num);
4510 __get_user(host_sembuf[i].sem_op, &target_sembuf[i].sem_op);
4511 __get_user(host_sembuf[i].sem_flg, &target_sembuf[i].sem_flg);
4514 unlock_user(target_sembuf, target_addr, 0);
4516 return 0;
4519 static inline abi_long do_semop(int semid, abi_long ptr, unsigned nsops)
4521 struct sembuf sops[nsops];
4523 if (target_to_host_sembuf(sops, ptr, nsops))
4524 return -TARGET_EFAULT;
4526 return get_errno(safe_semtimedop(semid, sops, nsops, NULL));
4529 struct target_msqid_ds
4531 struct target_ipc_perm msg_perm;
4532 abi_ulong msg_stime;
4533 #if TARGET_ABI_BITS == 32
4534 abi_ulong __unused1;
4535 #endif
4536 abi_ulong msg_rtime;
4537 #if TARGET_ABI_BITS == 32
4538 abi_ulong __unused2;
4539 #endif
4540 abi_ulong msg_ctime;
4541 #if TARGET_ABI_BITS == 32
4542 abi_ulong __unused3;
4543 #endif
4544 abi_ulong __msg_cbytes;
4545 abi_ulong msg_qnum;
4546 abi_ulong msg_qbytes;
4547 abi_ulong msg_lspid;
4548 abi_ulong msg_lrpid;
4549 abi_ulong __unused4;
4550 abi_ulong __unused5;
4553 static inline abi_long target_to_host_msqid_ds(struct msqid_ds *host_md,
4554 abi_ulong target_addr)
4556 struct target_msqid_ds *target_md;
4558 if (!lock_user_struct(VERIFY_READ, target_md, target_addr, 1))
4559 return -TARGET_EFAULT;
4560 if (target_to_host_ipc_perm(&(host_md->msg_perm),target_addr))
4561 return -TARGET_EFAULT;
4562 host_md->msg_stime = tswapal(target_md->msg_stime);
4563 host_md->msg_rtime = tswapal(target_md->msg_rtime);
4564 host_md->msg_ctime = tswapal(target_md->msg_ctime);
4565 host_md->__msg_cbytes = tswapal(target_md->__msg_cbytes);
4566 host_md->msg_qnum = tswapal(target_md->msg_qnum);
4567 host_md->msg_qbytes = tswapal(target_md->msg_qbytes);
4568 host_md->msg_lspid = tswapal(target_md->msg_lspid);
4569 host_md->msg_lrpid = tswapal(target_md->msg_lrpid);
4570 unlock_user_struct(target_md, target_addr, 0);
4571 return 0;
4574 static inline abi_long host_to_target_msqid_ds(abi_ulong target_addr,
4575 struct msqid_ds *host_md)
4577 struct target_msqid_ds *target_md;
4579 if (!lock_user_struct(VERIFY_WRITE, target_md, target_addr, 0))
4580 return -TARGET_EFAULT;
4581 if (host_to_target_ipc_perm(target_addr,&(host_md->msg_perm)))
4582 return -TARGET_EFAULT;
4583 target_md->msg_stime = tswapal(host_md->msg_stime);
4584 target_md->msg_rtime = tswapal(host_md->msg_rtime);
4585 target_md->msg_ctime = tswapal(host_md->msg_ctime);
4586 target_md->__msg_cbytes = tswapal(host_md->__msg_cbytes);
4587 target_md->msg_qnum = tswapal(host_md->msg_qnum);
4588 target_md->msg_qbytes = tswapal(host_md->msg_qbytes);
4589 target_md->msg_lspid = tswapal(host_md->msg_lspid);
4590 target_md->msg_lrpid = tswapal(host_md->msg_lrpid);
4591 unlock_user_struct(target_md, target_addr, 1);
4592 return 0;
4595 struct target_msginfo {
4596 int msgpool;
4597 int msgmap;
4598 int msgmax;
4599 int msgmnb;
4600 int msgmni;
4601 int msgssz;
4602 int msgtql;
4603 unsigned short int msgseg;
4606 static inline abi_long host_to_target_msginfo(abi_ulong target_addr,
4607 struct msginfo *host_msginfo)
4609 struct target_msginfo *target_msginfo;
4610 if (!lock_user_struct(VERIFY_WRITE, target_msginfo, target_addr, 0))
4611 return -TARGET_EFAULT;
4612 __put_user(host_msginfo->msgpool, &target_msginfo->msgpool);
4613 __put_user(host_msginfo->msgmap, &target_msginfo->msgmap);
4614 __put_user(host_msginfo->msgmax, &target_msginfo->msgmax);
4615 __put_user(host_msginfo->msgmnb, &target_msginfo->msgmnb);
4616 __put_user(host_msginfo->msgmni, &target_msginfo->msgmni);
4617 __put_user(host_msginfo->msgssz, &target_msginfo->msgssz);
4618 __put_user(host_msginfo->msgtql, &target_msginfo->msgtql);
4619 __put_user(host_msginfo->msgseg, &target_msginfo->msgseg);
4620 unlock_user_struct(target_msginfo, target_addr, 1);
4621 return 0;
4624 static inline abi_long do_msgctl(int msgid, int cmd, abi_long ptr)
4626 struct msqid_ds dsarg;
4627 struct msginfo msginfo;
4628 abi_long ret = -TARGET_EINVAL;
4630 cmd &= 0xff;
4632 switch (cmd) {
4633 case IPC_STAT:
4634 case IPC_SET:
4635 case MSG_STAT:
4636 if (target_to_host_msqid_ds(&dsarg,ptr))
4637 return -TARGET_EFAULT;
4638 ret = get_errno(msgctl(msgid, cmd, &dsarg));
4639 if (host_to_target_msqid_ds(ptr,&dsarg))
4640 return -TARGET_EFAULT;
4641 break;
4642 case IPC_RMID:
4643 ret = get_errno(msgctl(msgid, cmd, NULL));
4644 break;
4645 case IPC_INFO:
4646 case MSG_INFO:
4647 ret = get_errno(msgctl(msgid, cmd, (struct msqid_ds *)&msginfo));
4648 if (host_to_target_msginfo(ptr, &msginfo))
4649 return -TARGET_EFAULT;
4650 break;
4653 return ret;
4656 struct target_msgbuf {
4657 abi_long mtype;
4658 char mtext[1];
4661 static inline abi_long do_msgsnd(int msqid, abi_long msgp,
4662 ssize_t msgsz, int msgflg)
4664 struct target_msgbuf *target_mb;
4665 struct msgbuf *host_mb;
4666 abi_long ret = 0;
4668 if (msgsz < 0) {
4669 return -TARGET_EINVAL;
4672 if (!lock_user_struct(VERIFY_READ, target_mb, msgp, 0))
4673 return -TARGET_EFAULT;
4674 host_mb = g_try_malloc(msgsz + sizeof(long));
4675 if (!host_mb) {
4676 unlock_user_struct(target_mb, msgp, 0);
4677 return -TARGET_ENOMEM;
4679 host_mb->mtype = (abi_long) tswapal(target_mb->mtype);
4680 memcpy(host_mb->mtext, target_mb->mtext, msgsz);
4681 ret = get_errno(safe_msgsnd(msqid, host_mb, msgsz, msgflg));
4682 g_free(host_mb);
4683 unlock_user_struct(target_mb, msgp, 0);
4685 return ret;
4688 static inline abi_long do_msgrcv(int msqid, abi_long msgp,
4689 ssize_t msgsz, abi_long msgtyp,
4690 int msgflg)
4692 struct target_msgbuf *target_mb;
4693 char *target_mtext;
4694 struct msgbuf *host_mb;
4695 abi_long ret = 0;
4697 if (msgsz < 0) {
4698 return -TARGET_EINVAL;
4701 if (!lock_user_struct(VERIFY_WRITE, target_mb, msgp, 0))
4702 return -TARGET_EFAULT;
4704 host_mb = g_try_malloc(msgsz + sizeof(long));
4705 if (!host_mb) {
4706 ret = -TARGET_ENOMEM;
4707 goto end;
4709 ret = get_errno(safe_msgrcv(msqid, host_mb, msgsz, msgtyp, msgflg));
4711 if (ret > 0) {
4712 abi_ulong target_mtext_addr = msgp + sizeof(abi_ulong);
4713 target_mtext = lock_user(VERIFY_WRITE, target_mtext_addr, ret, 0);
4714 if (!target_mtext) {
4715 ret = -TARGET_EFAULT;
4716 goto end;
4718 memcpy(target_mb->mtext, host_mb->mtext, ret);
4719 unlock_user(target_mtext, target_mtext_addr, ret);
4722 target_mb->mtype = tswapal(host_mb->mtype);
4724 end:
4725 if (target_mb)
4726 unlock_user_struct(target_mb, msgp, 1);
4727 g_free(host_mb);
4728 return ret;
4731 static inline abi_long target_to_host_shmid_ds(struct shmid_ds *host_sd,
4732 abi_ulong target_addr)
4734 struct target_shmid_ds *target_sd;
4736 if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
4737 return -TARGET_EFAULT;
4738 if (target_to_host_ipc_perm(&(host_sd->shm_perm), target_addr))
4739 return -TARGET_EFAULT;
4740 __get_user(host_sd->shm_segsz, &target_sd->shm_segsz);
4741 __get_user(host_sd->shm_atime, &target_sd->shm_atime);
4742 __get_user(host_sd->shm_dtime, &target_sd->shm_dtime);
4743 __get_user(host_sd->shm_ctime, &target_sd->shm_ctime);
4744 __get_user(host_sd->shm_cpid, &target_sd->shm_cpid);
4745 __get_user(host_sd->shm_lpid, &target_sd->shm_lpid);
4746 __get_user(host_sd->shm_nattch, &target_sd->shm_nattch);
4747 unlock_user_struct(target_sd, target_addr, 0);
4748 return 0;
4751 static inline abi_long host_to_target_shmid_ds(abi_ulong target_addr,
4752 struct shmid_ds *host_sd)
4754 struct target_shmid_ds *target_sd;
4756 if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
4757 return -TARGET_EFAULT;
4758 if (host_to_target_ipc_perm(target_addr, &(host_sd->shm_perm)))
4759 return -TARGET_EFAULT;
4760 __put_user(host_sd->shm_segsz, &target_sd->shm_segsz);
4761 __put_user(host_sd->shm_atime, &target_sd->shm_atime);
4762 __put_user(host_sd->shm_dtime, &target_sd->shm_dtime);
4763 __put_user(host_sd->shm_ctime, &target_sd->shm_ctime);
4764 __put_user(host_sd->shm_cpid, &target_sd->shm_cpid);
4765 __put_user(host_sd->shm_lpid, &target_sd->shm_lpid);
4766 __put_user(host_sd->shm_nattch, &target_sd->shm_nattch);
4767 unlock_user_struct(target_sd, target_addr, 1);
4768 return 0;
4771 struct target_shminfo {
4772 abi_ulong shmmax;
4773 abi_ulong shmmin;
4774 abi_ulong shmmni;
4775 abi_ulong shmseg;
4776 abi_ulong shmall;
4779 static inline abi_long host_to_target_shminfo(abi_ulong target_addr,
4780 struct shminfo *host_shminfo)
4782 struct target_shminfo *target_shminfo;
4783 if (!lock_user_struct(VERIFY_WRITE, target_shminfo, target_addr, 0))
4784 return -TARGET_EFAULT;
4785 __put_user(host_shminfo->shmmax, &target_shminfo->shmmax);
4786 __put_user(host_shminfo->shmmin, &target_shminfo->shmmin);
4787 __put_user(host_shminfo->shmmni, &target_shminfo->shmmni);
4788 __put_user(host_shminfo->shmseg, &target_shminfo->shmseg);
4789 __put_user(host_shminfo->shmall, &target_shminfo->shmall);
4790 unlock_user_struct(target_shminfo, target_addr, 1);
4791 return 0;
4794 struct target_shm_info {
4795 int used_ids;
4796 abi_ulong shm_tot;
4797 abi_ulong shm_rss;
4798 abi_ulong shm_swp;
4799 abi_ulong swap_attempts;
4800 abi_ulong swap_successes;
4803 static inline abi_long host_to_target_shm_info(abi_ulong target_addr,
4804 struct shm_info *host_shm_info)
4806 struct target_shm_info *target_shm_info;
4807 if (!lock_user_struct(VERIFY_WRITE, target_shm_info, target_addr, 0))
4808 return -TARGET_EFAULT;
4809 __put_user(host_shm_info->used_ids, &target_shm_info->used_ids);
4810 __put_user(host_shm_info->shm_tot, &target_shm_info->shm_tot);
4811 __put_user(host_shm_info->shm_rss, &target_shm_info->shm_rss);
4812 __put_user(host_shm_info->shm_swp, &target_shm_info->shm_swp);
4813 __put_user(host_shm_info->swap_attempts, &target_shm_info->swap_attempts);
4814 __put_user(host_shm_info->swap_successes, &target_shm_info->swap_successes);
4815 unlock_user_struct(target_shm_info, target_addr, 1);
4816 return 0;
4819 static inline abi_long do_shmctl(int shmid, int cmd, abi_long buf)
4821 struct shmid_ds dsarg;
4822 struct shminfo shminfo;
4823 struct shm_info shm_info;
4824 abi_long ret = -TARGET_EINVAL;
4826 cmd &= 0xff;
4828 switch(cmd) {
4829 case IPC_STAT:
4830 case IPC_SET:
4831 case SHM_STAT:
4832 if (target_to_host_shmid_ds(&dsarg, buf))
4833 return -TARGET_EFAULT;
4834 ret = get_errno(shmctl(shmid, cmd, &dsarg));
4835 if (host_to_target_shmid_ds(buf, &dsarg))
4836 return -TARGET_EFAULT;
4837 break;
4838 case IPC_INFO:
4839 ret = get_errno(shmctl(shmid, cmd, (struct shmid_ds *)&shminfo));
4840 if (host_to_target_shminfo(buf, &shminfo))
4841 return -TARGET_EFAULT;
4842 break;
4843 case SHM_INFO:
4844 ret = get_errno(shmctl(shmid, cmd, (struct shmid_ds *)&shm_info));
4845 if (host_to_target_shm_info(buf, &shm_info))
4846 return -TARGET_EFAULT;
4847 break;
4848 case IPC_RMID:
4849 case SHM_LOCK:
4850 case SHM_UNLOCK:
4851 ret = get_errno(shmctl(shmid, cmd, NULL));
4852 break;
4855 return ret;
4858 #ifndef TARGET_FORCE_SHMLBA
4859 /* For most architectures, SHMLBA is the same as the page size;
4860 * some architectures have larger values, in which case they should
4861 * define TARGET_FORCE_SHMLBA and provide a target_shmlba() function.
4862 * This corresponds to the kernel arch code defining __ARCH_FORCE_SHMLBA
4863 * and defining its own value for SHMLBA.
4865 * The kernel also permits SHMLBA to be set by the architecture to a
4866 * value larger than the page size without setting __ARCH_FORCE_SHMLBA;
4867 * this means that addresses are rounded to the large size if
4868 * SHM_RND is set but addresses not aligned to that size are not rejected
4869 * as long as they are at least page-aligned. Since the only architecture
4870 * which uses this is ia64 this code doesn't provide for that oddity.
4872 static inline abi_ulong target_shmlba(CPUArchState *cpu_env)
4874 return TARGET_PAGE_SIZE;
4876 #endif
4878 static inline abi_ulong do_shmat(CPUArchState *cpu_env,
4879 int shmid, abi_ulong shmaddr, int shmflg)
4881 abi_long raddr;
4882 void *host_raddr;
4883 struct shmid_ds shm_info;
4884 int i,ret;
4885 abi_ulong shmlba;
4887 /* find out the length of the shared memory segment */
4888 ret = get_errno(shmctl(shmid, IPC_STAT, &shm_info));
4889 if (is_error(ret)) {
4890 /* can't get length, bail out */
4891 return ret;
4894 shmlba = target_shmlba(cpu_env);
4896 if (shmaddr & (shmlba - 1)) {
4897 if (shmflg & SHM_RND) {
4898 shmaddr &= ~(shmlba - 1);
4899 } else {
4900 return -TARGET_EINVAL;
4903 if (!guest_range_valid(shmaddr, shm_info.shm_segsz)) {
4904 return -TARGET_EINVAL;
4907 mmap_lock();
4909 if (shmaddr)
4910 host_raddr = shmat(shmid, (void *)g2h(shmaddr), shmflg);
4911 else {
4912 abi_ulong mmap_start;
4914 mmap_start = mmap_find_vma(0, shm_info.shm_segsz);
4916 if (mmap_start == -1) {
4917 errno = ENOMEM;
4918 host_raddr = (void *)-1;
4919 } else
4920 host_raddr = shmat(shmid, g2h(mmap_start), shmflg | SHM_REMAP);
4923 if (host_raddr == (void *)-1) {
4924 mmap_unlock();
4925 return get_errno((long)host_raddr);
4927 raddr=h2g((unsigned long)host_raddr);
4929 page_set_flags(raddr, raddr + shm_info.shm_segsz,
4930 PAGE_VALID | PAGE_READ |
4931 ((shmflg & SHM_RDONLY)? 0 : PAGE_WRITE));
4933 for (i = 0; i < N_SHM_REGIONS; i++) {
4934 if (!shm_regions[i].in_use) {
4935 shm_regions[i].in_use = true;
4936 shm_regions[i].start = raddr;
4937 shm_regions[i].size = shm_info.shm_segsz;
4938 break;
4942 mmap_unlock();
4943 return raddr;
4947 static inline abi_long do_shmdt(abi_ulong shmaddr)
4949 int i;
4950 abi_long rv;
4952 mmap_lock();
4954 for (i = 0; i < N_SHM_REGIONS; ++i) {
4955 if (shm_regions[i].in_use && shm_regions[i].start == shmaddr) {
4956 shm_regions[i].in_use = false;
4957 page_set_flags(shmaddr, shmaddr + shm_regions[i].size, 0);
4958 break;
4961 rv = get_errno(shmdt(g2h(shmaddr)));
4963 mmap_unlock();
4965 return rv;
4968 #ifdef TARGET_NR_ipc
4969 /* ??? This only works with linear mappings. */
4970 /* do_ipc() must return target values and target errnos. */
4971 static abi_long do_ipc(CPUArchState *cpu_env,
4972 unsigned int call, abi_long first,
4973 abi_long second, abi_long third,
4974 abi_long ptr, abi_long fifth)
4976 int version;
4977 abi_long ret = 0;
4979 version = call >> 16;
4980 call &= 0xffff;
4982 switch (call) {
4983 case IPCOP_semop:
4984 ret = do_semop(first, ptr, second);
4985 break;
4987 case IPCOP_semget:
4988 ret = get_errno(semget(first, second, third));
4989 break;
4991 case IPCOP_semctl: {
4992 /* The semun argument to semctl is passed by value, so dereference the
4993 * ptr argument. */
4994 abi_ulong atptr;
4995 get_user_ual(atptr, ptr);
4996 ret = do_semctl(first, second, third, atptr);
4997 break;
5000 case IPCOP_msgget:
5001 ret = get_errno(msgget(first, second));
5002 break;
5004 case IPCOP_msgsnd:
5005 ret = do_msgsnd(first, ptr, second, third);
5006 break;
5008 case IPCOP_msgctl:
5009 ret = do_msgctl(first, second, ptr);
5010 break;
5012 case IPCOP_msgrcv:
5013 switch (version) {
5014 case 0:
5016 struct target_ipc_kludge {
5017 abi_long msgp;
5018 abi_long msgtyp;
5019 } *tmp;
5021 if (!lock_user_struct(VERIFY_READ, tmp, ptr, 1)) {
5022 ret = -TARGET_EFAULT;
5023 break;
5026 ret = do_msgrcv(first, tswapal(tmp->msgp), second, tswapal(tmp->msgtyp), third);
5028 unlock_user_struct(tmp, ptr, 0);
5029 break;
5031 default:
5032 ret = do_msgrcv(first, ptr, second, fifth, third);
5034 break;
5036 case IPCOP_shmat:
5037 switch (version) {
5038 default:
5040 abi_ulong raddr;
5041 raddr = do_shmat(cpu_env, first, ptr, second);
5042 if (is_error(raddr))
5043 return get_errno(raddr);
5044 if (put_user_ual(raddr, third))
5045 return -TARGET_EFAULT;
5046 break;
5048 case 1:
5049 ret = -TARGET_EINVAL;
5050 break;
5052 break;
5053 case IPCOP_shmdt:
5054 ret = do_shmdt(ptr);
5055 break;
5057 case IPCOP_shmget:
5058 /* IPC_* flag values are the same on all linux platforms */
5059 ret = get_errno(shmget(first, second, third));
5060 break;
5062 /* IPC_* and SHM_* command values are the same on all linux platforms */
5063 case IPCOP_shmctl:
5064 ret = do_shmctl(first, second, ptr);
5065 break;
5066 default:
5067 gemu_log("Unsupported ipc call: %d (version %d)\n", call, version);
5068 ret = -TARGET_ENOSYS;
5069 break;
5071 return ret;
5073 #endif
5075 /* kernel structure types definitions */
5077 #define STRUCT(name, ...) STRUCT_ ## name,
5078 #define STRUCT_SPECIAL(name) STRUCT_ ## name,
5079 enum {
5080 #include "syscall_types.h"
5081 STRUCT_MAX
5083 #undef STRUCT
5084 #undef STRUCT_SPECIAL
5086 #define STRUCT(name, ...) static const argtype struct_ ## name ## _def[] = { __VA_ARGS__, TYPE_NULL };
5087 #define STRUCT_SPECIAL(name)
5088 #include "syscall_types.h"
5089 #undef STRUCT
5090 #undef STRUCT_SPECIAL
5092 typedef struct IOCTLEntry IOCTLEntry;
5094 typedef abi_long do_ioctl_fn(const IOCTLEntry *ie, uint8_t *buf_temp,
5095 int fd, int cmd, abi_long arg);
5097 struct IOCTLEntry {
5098 int target_cmd;
5099 unsigned int host_cmd;
5100 const char *name;
5101 int access;
5102 do_ioctl_fn *do_ioctl;
5103 const argtype arg_type[5];
5106 #define IOC_R 0x0001
5107 #define IOC_W 0x0002
5108 #define IOC_RW (IOC_R | IOC_W)
5110 #define MAX_STRUCT_SIZE 4096
5112 #ifdef CONFIG_FIEMAP
5113 /* So fiemap access checks don't overflow on 32 bit systems.
5114 * This is very slightly smaller than the limit imposed by
5115 * the underlying kernel.
5117 #define FIEMAP_MAX_EXTENTS ((UINT_MAX - sizeof(struct fiemap)) \
5118 / sizeof(struct fiemap_extent))
5120 static abi_long do_ioctl_fs_ioc_fiemap(const IOCTLEntry *ie, uint8_t *buf_temp,
5121 int fd, int cmd, abi_long arg)
5123 /* The parameter for this ioctl is a struct fiemap followed
5124 * by an array of struct fiemap_extent whose size is set
5125 * in fiemap->fm_extent_count. The array is filled in by the
5126 * ioctl.
5128 int target_size_in, target_size_out;
5129 struct fiemap *fm;
5130 const argtype *arg_type = ie->arg_type;
5131 const argtype extent_arg_type[] = { MK_STRUCT(STRUCT_fiemap_extent) };
5132 void *argptr, *p;
5133 abi_long ret;
5134 int i, extent_size = thunk_type_size(extent_arg_type, 0);
5135 uint32_t outbufsz;
5136 int free_fm = 0;
5138 assert(arg_type[0] == TYPE_PTR);
5139 assert(ie->access == IOC_RW);
5140 arg_type++;
5141 target_size_in = thunk_type_size(arg_type, 0);
5142 argptr = lock_user(VERIFY_READ, arg, target_size_in, 1);
5143 if (!argptr) {
5144 return -TARGET_EFAULT;
5146 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
5147 unlock_user(argptr, arg, 0);
5148 fm = (struct fiemap *)buf_temp;
5149 if (fm->fm_extent_count > FIEMAP_MAX_EXTENTS) {
5150 return -TARGET_EINVAL;
5153 outbufsz = sizeof (*fm) +
5154 (sizeof(struct fiemap_extent) * fm->fm_extent_count);
5156 if (outbufsz > MAX_STRUCT_SIZE) {
5157 /* We can't fit all the extents into the fixed size buffer.
5158 * Allocate one that is large enough and use it instead.
5160 fm = g_try_malloc(outbufsz);
5161 if (!fm) {
5162 return -TARGET_ENOMEM;
5164 memcpy(fm, buf_temp, sizeof(struct fiemap));
5165 free_fm = 1;
5167 ret = get_errno(safe_ioctl(fd, ie->host_cmd, fm));
5168 if (!is_error(ret)) {
5169 target_size_out = target_size_in;
5170 /* An extent_count of 0 means we were only counting the extents
5171 * so there are no structs to copy
5173 if (fm->fm_extent_count != 0) {
5174 target_size_out += fm->fm_mapped_extents * extent_size;
5176 argptr = lock_user(VERIFY_WRITE, arg, target_size_out, 0);
5177 if (!argptr) {
5178 ret = -TARGET_EFAULT;
5179 } else {
5180 /* Convert the struct fiemap */
5181 thunk_convert(argptr, fm, arg_type, THUNK_TARGET);
5182 if (fm->fm_extent_count != 0) {
5183 p = argptr + target_size_in;
5184 /* ...and then all the struct fiemap_extents */
5185 for (i = 0; i < fm->fm_mapped_extents; i++) {
5186 thunk_convert(p, &fm->fm_extents[i], extent_arg_type,
5187 THUNK_TARGET);
5188 p += extent_size;
5191 unlock_user(argptr, arg, target_size_out);
5194 if (free_fm) {
5195 g_free(fm);
5197 return ret;
5199 #endif
5201 static abi_long do_ioctl_ifconf(const IOCTLEntry *ie, uint8_t *buf_temp,
5202 int fd, int cmd, abi_long arg)
5204 const argtype *arg_type = ie->arg_type;
5205 int target_size;
5206 void *argptr;
5207 int ret;
5208 struct ifconf *host_ifconf;
5209 uint32_t outbufsz;
5210 const argtype ifreq_arg_type[] = { MK_STRUCT(STRUCT_sockaddr_ifreq) };
5211 int target_ifreq_size;
5212 int nb_ifreq;
5213 int free_buf = 0;
5214 int i;
5215 int target_ifc_len;
5216 abi_long target_ifc_buf;
5217 int host_ifc_len;
5218 char *host_ifc_buf;
5220 assert(arg_type[0] == TYPE_PTR);
5221 assert(ie->access == IOC_RW);
5223 arg_type++;
5224 target_size = thunk_type_size(arg_type, 0);
5226 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5227 if (!argptr)
5228 return -TARGET_EFAULT;
5229 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
5230 unlock_user(argptr, arg, 0);
5232 host_ifconf = (struct ifconf *)(unsigned long)buf_temp;
5233 target_ifc_len = host_ifconf->ifc_len;
5234 target_ifc_buf = (abi_long)(unsigned long)host_ifconf->ifc_buf;
5236 target_ifreq_size = thunk_type_size(ifreq_arg_type, 0);
5237 nb_ifreq = target_ifc_len / target_ifreq_size;
5238 host_ifc_len = nb_ifreq * sizeof(struct ifreq);
5240 outbufsz = sizeof(*host_ifconf) + host_ifc_len;
5241 if (outbufsz > MAX_STRUCT_SIZE) {
5242 /* We can't fit all the extents into the fixed size buffer.
5243 * Allocate one that is large enough and use it instead.
5245 host_ifconf = malloc(outbufsz);
5246 if (!host_ifconf) {
5247 return -TARGET_ENOMEM;
5249 memcpy(host_ifconf, buf_temp, sizeof(*host_ifconf));
5250 free_buf = 1;
5252 host_ifc_buf = (char*)host_ifconf + sizeof(*host_ifconf);
5254 host_ifconf->ifc_len = host_ifc_len;
5255 host_ifconf->ifc_buf = host_ifc_buf;
5257 ret = get_errno(safe_ioctl(fd, ie->host_cmd, host_ifconf));
5258 if (!is_error(ret)) {
5259 /* convert host ifc_len to target ifc_len */
5261 nb_ifreq = host_ifconf->ifc_len / sizeof(struct ifreq);
5262 target_ifc_len = nb_ifreq * target_ifreq_size;
5263 host_ifconf->ifc_len = target_ifc_len;
5265 /* restore target ifc_buf */
5267 host_ifconf->ifc_buf = (char *)(unsigned long)target_ifc_buf;
5269 /* copy struct ifconf to target user */
5271 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
5272 if (!argptr)
5273 return -TARGET_EFAULT;
5274 thunk_convert(argptr, host_ifconf, arg_type, THUNK_TARGET);
5275 unlock_user(argptr, arg, target_size);
5277 /* copy ifreq[] to target user */
5279 argptr = lock_user(VERIFY_WRITE, target_ifc_buf, target_ifc_len, 0);
5280 for (i = 0; i < nb_ifreq ; i++) {
5281 thunk_convert(argptr + i * target_ifreq_size,
5282 host_ifc_buf + i * sizeof(struct ifreq),
5283 ifreq_arg_type, THUNK_TARGET);
5285 unlock_user(argptr, target_ifc_buf, target_ifc_len);
5288 if (free_buf) {
5289 free(host_ifconf);
5292 return ret;
5295 static abi_long do_ioctl_dm(const IOCTLEntry *ie, uint8_t *buf_temp, int fd,
5296 int cmd, abi_long arg)
5298 void *argptr;
5299 struct dm_ioctl *host_dm;
5300 abi_long guest_data;
5301 uint32_t guest_data_size;
5302 int target_size;
5303 const argtype *arg_type = ie->arg_type;
5304 abi_long ret;
5305 void *big_buf = NULL;
5306 char *host_data;
5308 arg_type++;
5309 target_size = thunk_type_size(arg_type, 0);
5310 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5311 if (!argptr) {
5312 ret = -TARGET_EFAULT;
5313 goto out;
5315 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
5316 unlock_user(argptr, arg, 0);
5318 /* buf_temp is too small, so fetch things into a bigger buffer */
5319 big_buf = g_malloc0(((struct dm_ioctl*)buf_temp)->data_size * 2);
5320 memcpy(big_buf, buf_temp, target_size);
5321 buf_temp = big_buf;
5322 host_dm = big_buf;
5324 guest_data = arg + host_dm->data_start;
5325 if ((guest_data - arg) < 0) {
5326 ret = -TARGET_EINVAL;
5327 goto out;
5329 guest_data_size = host_dm->data_size - host_dm->data_start;
5330 host_data = (char*)host_dm + host_dm->data_start;
5332 argptr = lock_user(VERIFY_READ, guest_data, guest_data_size, 1);
5333 if (!argptr) {
5334 ret = -TARGET_EFAULT;
5335 goto out;
5338 switch (ie->host_cmd) {
5339 case DM_REMOVE_ALL:
5340 case DM_LIST_DEVICES:
5341 case DM_DEV_CREATE:
5342 case DM_DEV_REMOVE:
5343 case DM_DEV_SUSPEND:
5344 case DM_DEV_STATUS:
5345 case DM_DEV_WAIT:
5346 case DM_TABLE_STATUS:
5347 case DM_TABLE_CLEAR:
5348 case DM_TABLE_DEPS:
5349 case DM_LIST_VERSIONS:
5350 /* no input data */
5351 break;
5352 case DM_DEV_RENAME:
5353 case DM_DEV_SET_GEOMETRY:
5354 /* data contains only strings */
5355 memcpy(host_data, argptr, guest_data_size);
5356 break;
5357 case DM_TARGET_MSG:
5358 memcpy(host_data, argptr, guest_data_size);
5359 *(uint64_t*)host_data = tswap64(*(uint64_t*)argptr);
5360 break;
5361 case DM_TABLE_LOAD:
5363 void *gspec = argptr;
5364 void *cur_data = host_data;
5365 const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_spec) };
5366 int spec_size = thunk_type_size(arg_type, 0);
5367 int i;
5369 for (i = 0; i < host_dm->target_count; i++) {
5370 struct dm_target_spec *spec = cur_data;
5371 uint32_t next;
5372 int slen;
5374 thunk_convert(spec, gspec, arg_type, THUNK_HOST);
5375 slen = strlen((char*)gspec + spec_size) + 1;
5376 next = spec->next;
5377 spec->next = sizeof(*spec) + slen;
5378 strcpy((char*)&spec[1], gspec + spec_size);
5379 gspec += next;
5380 cur_data += spec->next;
5382 break;
5384 default:
5385 ret = -TARGET_EINVAL;
5386 unlock_user(argptr, guest_data, 0);
5387 goto out;
5389 unlock_user(argptr, guest_data, 0);
5391 ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5392 if (!is_error(ret)) {
5393 guest_data = arg + host_dm->data_start;
5394 guest_data_size = host_dm->data_size - host_dm->data_start;
5395 argptr = lock_user(VERIFY_WRITE, guest_data, guest_data_size, 0);
5396 switch (ie->host_cmd) {
5397 case DM_REMOVE_ALL:
5398 case DM_DEV_CREATE:
5399 case DM_DEV_REMOVE:
5400 case DM_DEV_RENAME:
5401 case DM_DEV_SUSPEND:
5402 case DM_DEV_STATUS:
5403 case DM_TABLE_LOAD:
5404 case DM_TABLE_CLEAR:
5405 case DM_TARGET_MSG:
5406 case DM_DEV_SET_GEOMETRY:
5407 /* no return data */
5408 break;
5409 case DM_LIST_DEVICES:
5411 struct dm_name_list *nl = (void*)host_dm + host_dm->data_start;
5412 uint32_t remaining_data = guest_data_size;
5413 void *cur_data = argptr;
5414 const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_name_list) };
5415 int nl_size = 12; /* can't use thunk_size due to alignment */
5417 while (1) {
5418 uint32_t next = nl->next;
5419 if (next) {
5420 nl->next = nl_size + (strlen(nl->name) + 1);
5422 if (remaining_data < nl->next) {
5423 host_dm->flags |= DM_BUFFER_FULL_FLAG;
5424 break;
5426 thunk_convert(cur_data, nl, arg_type, THUNK_TARGET);
5427 strcpy(cur_data + nl_size, nl->name);
5428 cur_data += nl->next;
5429 remaining_data -= nl->next;
5430 if (!next) {
5431 break;
5433 nl = (void*)nl + next;
5435 break;
5437 case DM_DEV_WAIT:
5438 case DM_TABLE_STATUS:
5440 struct dm_target_spec *spec = (void*)host_dm + host_dm->data_start;
5441 void *cur_data = argptr;
5442 const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_spec) };
5443 int spec_size = thunk_type_size(arg_type, 0);
5444 int i;
5446 for (i = 0; i < host_dm->target_count; i++) {
5447 uint32_t next = spec->next;
5448 int slen = strlen((char*)&spec[1]) + 1;
5449 spec->next = (cur_data - argptr) + spec_size + slen;
5450 if (guest_data_size < spec->next) {
5451 host_dm->flags |= DM_BUFFER_FULL_FLAG;
5452 break;
5454 thunk_convert(cur_data, spec, arg_type, THUNK_TARGET);
5455 strcpy(cur_data + spec_size, (char*)&spec[1]);
5456 cur_data = argptr + spec->next;
5457 spec = (void*)host_dm + host_dm->data_start + next;
5459 break;
5461 case DM_TABLE_DEPS:
5463 void *hdata = (void*)host_dm + host_dm->data_start;
5464 int count = *(uint32_t*)hdata;
5465 uint64_t *hdev = hdata + 8;
5466 uint64_t *gdev = argptr + 8;
5467 int i;
5469 *(uint32_t*)argptr = tswap32(count);
5470 for (i = 0; i < count; i++) {
5471 *gdev = tswap64(*hdev);
5472 gdev++;
5473 hdev++;
5475 break;
5477 case DM_LIST_VERSIONS:
5479 struct dm_target_versions *vers = (void*)host_dm + host_dm->data_start;
5480 uint32_t remaining_data = guest_data_size;
5481 void *cur_data = argptr;
5482 const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_versions) };
5483 int vers_size = thunk_type_size(arg_type, 0);
5485 while (1) {
5486 uint32_t next = vers->next;
5487 if (next) {
5488 vers->next = vers_size + (strlen(vers->name) + 1);
5490 if (remaining_data < vers->next) {
5491 host_dm->flags |= DM_BUFFER_FULL_FLAG;
5492 break;
5494 thunk_convert(cur_data, vers, arg_type, THUNK_TARGET);
5495 strcpy(cur_data + vers_size, vers->name);
5496 cur_data += vers->next;
5497 remaining_data -= vers->next;
5498 if (!next) {
5499 break;
5501 vers = (void*)vers + next;
5503 break;
5505 default:
5506 unlock_user(argptr, guest_data, 0);
5507 ret = -TARGET_EINVAL;
5508 goto out;
5510 unlock_user(argptr, guest_data, guest_data_size);
5512 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
5513 if (!argptr) {
5514 ret = -TARGET_EFAULT;
5515 goto out;
5517 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
5518 unlock_user(argptr, arg, target_size);
5520 out:
5521 g_free(big_buf);
5522 return ret;
5525 static abi_long do_ioctl_blkpg(const IOCTLEntry *ie, uint8_t *buf_temp, int fd,
5526 int cmd, abi_long arg)
5528 void *argptr;
5529 int target_size;
5530 const argtype *arg_type = ie->arg_type;
5531 const argtype part_arg_type[] = { MK_STRUCT(STRUCT_blkpg_partition) };
5532 abi_long ret;
5534 struct blkpg_ioctl_arg *host_blkpg = (void*)buf_temp;
5535 struct blkpg_partition host_part;
5537 /* Read and convert blkpg */
5538 arg_type++;
5539 target_size = thunk_type_size(arg_type, 0);
5540 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5541 if (!argptr) {
5542 ret = -TARGET_EFAULT;
5543 goto out;
5545 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
5546 unlock_user(argptr, arg, 0);
5548 switch (host_blkpg->op) {
5549 case BLKPG_ADD_PARTITION:
5550 case BLKPG_DEL_PARTITION:
5551 /* payload is struct blkpg_partition */
5552 break;
5553 default:
5554 /* Unknown opcode */
5555 ret = -TARGET_EINVAL;
5556 goto out;
5559 /* Read and convert blkpg->data */
5560 arg = (abi_long)(uintptr_t)host_blkpg->data;
5561 target_size = thunk_type_size(part_arg_type, 0);
5562 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5563 if (!argptr) {
5564 ret = -TARGET_EFAULT;
5565 goto out;
5567 thunk_convert(&host_part, argptr, part_arg_type, THUNK_HOST);
5568 unlock_user(argptr, arg, 0);
5570 /* Swizzle the data pointer to our local copy and call! */
5571 host_blkpg->data = &host_part;
5572 ret = get_errno(safe_ioctl(fd, ie->host_cmd, host_blkpg));
5574 out:
5575 return ret;
5578 static abi_long do_ioctl_rt(const IOCTLEntry *ie, uint8_t *buf_temp,
5579 int fd, int cmd, abi_long arg)
5581 const argtype *arg_type = ie->arg_type;
5582 const StructEntry *se;
5583 const argtype *field_types;
5584 const int *dst_offsets, *src_offsets;
5585 int target_size;
5586 void *argptr;
5587 abi_ulong *target_rt_dev_ptr;
5588 unsigned long *host_rt_dev_ptr;
5589 abi_long ret;
5590 int i;
5592 assert(ie->access == IOC_W);
5593 assert(*arg_type == TYPE_PTR);
5594 arg_type++;
5595 assert(*arg_type == TYPE_STRUCT);
5596 target_size = thunk_type_size(arg_type, 0);
5597 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5598 if (!argptr) {
5599 return -TARGET_EFAULT;
5601 arg_type++;
5602 assert(*arg_type == (int)STRUCT_rtentry);
5603 se = struct_entries + *arg_type++;
5604 assert(se->convert[0] == NULL);
5605 /* convert struct here to be able to catch rt_dev string */
5606 field_types = se->field_types;
5607 dst_offsets = se->field_offsets[THUNK_HOST];
5608 src_offsets = se->field_offsets[THUNK_TARGET];
5609 for (i = 0; i < se->nb_fields; i++) {
5610 if (dst_offsets[i] == offsetof(struct rtentry, rt_dev)) {
5611 assert(*field_types == TYPE_PTRVOID);
5612 target_rt_dev_ptr = (abi_ulong *)(argptr + src_offsets[i]);
5613 host_rt_dev_ptr = (unsigned long *)(buf_temp + dst_offsets[i]);
5614 if (*target_rt_dev_ptr != 0) {
5615 *host_rt_dev_ptr = (unsigned long)lock_user_string(
5616 tswapal(*target_rt_dev_ptr));
5617 if (!*host_rt_dev_ptr) {
5618 unlock_user(argptr, arg, 0);
5619 return -TARGET_EFAULT;
5621 } else {
5622 *host_rt_dev_ptr = 0;
5624 field_types++;
5625 continue;
5627 field_types = thunk_convert(buf_temp + dst_offsets[i],
5628 argptr + src_offsets[i],
5629 field_types, THUNK_HOST);
5631 unlock_user(argptr, arg, 0);
5633 ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5634 if (*host_rt_dev_ptr != 0) {
5635 unlock_user((void *)*host_rt_dev_ptr,
5636 *target_rt_dev_ptr, 0);
5638 return ret;
5641 static abi_long do_ioctl_kdsigaccept(const IOCTLEntry *ie, uint8_t *buf_temp,
5642 int fd, int cmd, abi_long arg)
5644 int sig = target_to_host_signal(arg);
5645 return get_errno(safe_ioctl(fd, ie->host_cmd, sig));
5648 #ifdef TIOCGPTPEER
5649 static abi_long do_ioctl_tiocgptpeer(const IOCTLEntry *ie, uint8_t *buf_temp,
5650 int fd, int cmd, abi_long arg)
5652 int flags = target_to_host_bitmask(arg, fcntl_flags_tbl);
5653 return get_errno(safe_ioctl(fd, ie->host_cmd, flags));
5655 #endif
5657 static IOCTLEntry ioctl_entries[] = {
5658 #define IOCTL(cmd, access, ...) \
5659 { TARGET_ ## cmd, cmd, #cmd, access, 0, { __VA_ARGS__ } },
5660 #define IOCTL_SPECIAL(cmd, access, dofn, ...) \
5661 { TARGET_ ## cmd, cmd, #cmd, access, dofn, { __VA_ARGS__ } },
5662 #define IOCTL_IGNORE(cmd) \
5663 { TARGET_ ## cmd, 0, #cmd },
5664 #include "ioctls.h"
5665 { 0, 0, },
5668 /* ??? Implement proper locking for ioctls. */
5669 /* do_ioctl() Must return target values and target errnos. */
5670 static abi_long do_ioctl(int fd, int cmd, abi_long arg)
5672 const IOCTLEntry *ie;
5673 const argtype *arg_type;
5674 abi_long ret;
5675 uint8_t buf_temp[MAX_STRUCT_SIZE];
5676 int target_size;
5677 void *argptr;
5679 ie = ioctl_entries;
5680 for(;;) {
5681 if (ie->target_cmd == 0) {
5682 gemu_log("Unsupported ioctl: cmd=0x%04lx\n", (long)cmd);
5683 return -TARGET_ENOSYS;
5685 if (ie->target_cmd == cmd)
5686 break;
5687 ie++;
5689 arg_type = ie->arg_type;
5690 #if defined(DEBUG)
5691 gemu_log("ioctl: cmd=0x%04lx (%s)\n", (long)cmd, ie->name);
5692 #endif
5693 if (ie->do_ioctl) {
5694 return ie->do_ioctl(ie, buf_temp, fd, cmd, arg);
5695 } else if (!ie->host_cmd) {
5696 /* Some architectures define BSD ioctls in their headers
5697 that are not implemented in Linux. */
5698 return -TARGET_ENOSYS;
5701 switch(arg_type[0]) {
5702 case TYPE_NULL:
5703 /* no argument */
5704 ret = get_errno(safe_ioctl(fd, ie->host_cmd));
5705 break;
5706 case TYPE_PTRVOID:
5707 case TYPE_INT:
5708 ret = get_errno(safe_ioctl(fd, ie->host_cmd, arg));
5709 break;
5710 case TYPE_PTR:
5711 arg_type++;
5712 target_size = thunk_type_size(arg_type, 0);
5713 switch(ie->access) {
5714 case IOC_R:
5715 ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5716 if (!is_error(ret)) {
5717 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
5718 if (!argptr)
5719 return -TARGET_EFAULT;
5720 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
5721 unlock_user(argptr, arg, target_size);
5723 break;
5724 case IOC_W:
5725 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5726 if (!argptr)
5727 return -TARGET_EFAULT;
5728 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
5729 unlock_user(argptr, arg, 0);
5730 ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5731 break;
5732 default:
5733 case IOC_RW:
5734 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5735 if (!argptr)
5736 return -TARGET_EFAULT;
5737 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
5738 unlock_user(argptr, arg, 0);
5739 ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5740 if (!is_error(ret)) {
5741 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
5742 if (!argptr)
5743 return -TARGET_EFAULT;
5744 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
5745 unlock_user(argptr, arg, target_size);
5747 break;
5749 break;
5750 default:
5751 gemu_log("Unsupported ioctl type: cmd=0x%04lx type=%d\n",
5752 (long)cmd, arg_type[0]);
5753 ret = -TARGET_ENOSYS;
5754 break;
5756 return ret;
5759 static const bitmask_transtbl iflag_tbl[] = {
5760 { TARGET_IGNBRK, TARGET_IGNBRK, IGNBRK, IGNBRK },
5761 { TARGET_BRKINT, TARGET_BRKINT, BRKINT, BRKINT },
5762 { TARGET_IGNPAR, TARGET_IGNPAR, IGNPAR, IGNPAR },
5763 { TARGET_PARMRK, TARGET_PARMRK, PARMRK, PARMRK },
5764 { TARGET_INPCK, TARGET_INPCK, INPCK, INPCK },
5765 { TARGET_ISTRIP, TARGET_ISTRIP, ISTRIP, ISTRIP },
5766 { TARGET_INLCR, TARGET_INLCR, INLCR, INLCR },
5767 { TARGET_IGNCR, TARGET_IGNCR, IGNCR, IGNCR },
5768 { TARGET_ICRNL, TARGET_ICRNL, ICRNL, ICRNL },
5769 { TARGET_IUCLC, TARGET_IUCLC, IUCLC, IUCLC },
5770 { TARGET_IXON, TARGET_IXON, IXON, IXON },
5771 { TARGET_IXANY, TARGET_IXANY, IXANY, IXANY },
5772 { TARGET_IXOFF, TARGET_IXOFF, IXOFF, IXOFF },
5773 { TARGET_IMAXBEL, TARGET_IMAXBEL, IMAXBEL, IMAXBEL },
5774 { 0, 0, 0, 0 }
5777 static const bitmask_transtbl oflag_tbl[] = {
5778 { TARGET_OPOST, TARGET_OPOST, OPOST, OPOST },
5779 { TARGET_OLCUC, TARGET_OLCUC, OLCUC, OLCUC },
5780 { TARGET_ONLCR, TARGET_ONLCR, ONLCR, ONLCR },
5781 { TARGET_OCRNL, TARGET_OCRNL, OCRNL, OCRNL },
5782 { TARGET_ONOCR, TARGET_ONOCR, ONOCR, ONOCR },
5783 { TARGET_ONLRET, TARGET_ONLRET, ONLRET, ONLRET },
5784 { TARGET_OFILL, TARGET_OFILL, OFILL, OFILL },
5785 { TARGET_OFDEL, TARGET_OFDEL, OFDEL, OFDEL },
5786 { TARGET_NLDLY, TARGET_NL0, NLDLY, NL0 },
5787 { TARGET_NLDLY, TARGET_NL1, NLDLY, NL1 },
5788 { TARGET_CRDLY, TARGET_CR0, CRDLY, CR0 },
5789 { TARGET_CRDLY, TARGET_CR1, CRDLY, CR1 },
5790 { TARGET_CRDLY, TARGET_CR2, CRDLY, CR2 },
5791 { TARGET_CRDLY, TARGET_CR3, CRDLY, CR3 },
5792 { TARGET_TABDLY, TARGET_TAB0, TABDLY, TAB0 },
5793 { TARGET_TABDLY, TARGET_TAB1, TABDLY, TAB1 },
5794 { TARGET_TABDLY, TARGET_TAB2, TABDLY, TAB2 },
5795 { TARGET_TABDLY, TARGET_TAB3, TABDLY, TAB3 },
5796 { TARGET_BSDLY, TARGET_BS0, BSDLY, BS0 },
5797 { TARGET_BSDLY, TARGET_BS1, BSDLY, BS1 },
5798 { TARGET_VTDLY, TARGET_VT0, VTDLY, VT0 },
5799 { TARGET_VTDLY, TARGET_VT1, VTDLY, VT1 },
5800 { TARGET_FFDLY, TARGET_FF0, FFDLY, FF0 },
5801 { TARGET_FFDLY, TARGET_FF1, FFDLY, FF1 },
5802 { 0, 0, 0, 0 }
5805 static const bitmask_transtbl cflag_tbl[] = {
5806 { TARGET_CBAUD, TARGET_B0, CBAUD, B0 },
5807 { TARGET_CBAUD, TARGET_B50, CBAUD, B50 },
5808 { TARGET_CBAUD, TARGET_B75, CBAUD, B75 },
5809 { TARGET_CBAUD, TARGET_B110, CBAUD, B110 },
5810 { TARGET_CBAUD, TARGET_B134, CBAUD, B134 },
5811 { TARGET_CBAUD, TARGET_B150, CBAUD, B150 },
5812 { TARGET_CBAUD, TARGET_B200, CBAUD, B200 },
5813 { TARGET_CBAUD, TARGET_B300, CBAUD, B300 },
5814 { TARGET_CBAUD, TARGET_B600, CBAUD, B600 },
5815 { TARGET_CBAUD, TARGET_B1200, CBAUD, B1200 },
5816 { TARGET_CBAUD, TARGET_B1800, CBAUD, B1800 },
5817 { TARGET_CBAUD, TARGET_B2400, CBAUD, B2400 },
5818 { TARGET_CBAUD, TARGET_B4800, CBAUD, B4800 },
5819 { TARGET_CBAUD, TARGET_B9600, CBAUD, B9600 },
5820 { TARGET_CBAUD, TARGET_B19200, CBAUD, B19200 },
5821 { TARGET_CBAUD, TARGET_B38400, CBAUD, B38400 },
5822 { TARGET_CBAUD, TARGET_B57600, CBAUD, B57600 },
5823 { TARGET_CBAUD, TARGET_B115200, CBAUD, B115200 },
5824 { TARGET_CBAUD, TARGET_B230400, CBAUD, B230400 },
5825 { TARGET_CBAUD, TARGET_B460800, CBAUD, B460800 },
5826 { TARGET_CSIZE, TARGET_CS5, CSIZE, CS5 },
5827 { TARGET_CSIZE, TARGET_CS6, CSIZE, CS6 },
5828 { TARGET_CSIZE, TARGET_CS7, CSIZE, CS7 },
5829 { TARGET_CSIZE, TARGET_CS8, CSIZE, CS8 },
5830 { TARGET_CSTOPB, TARGET_CSTOPB, CSTOPB, CSTOPB },
5831 { TARGET_CREAD, TARGET_CREAD, CREAD, CREAD },
5832 { TARGET_PARENB, TARGET_PARENB, PARENB, PARENB },
5833 { TARGET_PARODD, TARGET_PARODD, PARODD, PARODD },
5834 { TARGET_HUPCL, TARGET_HUPCL, HUPCL, HUPCL },
5835 { TARGET_CLOCAL, TARGET_CLOCAL, CLOCAL, CLOCAL },
5836 { TARGET_CRTSCTS, TARGET_CRTSCTS, CRTSCTS, CRTSCTS },
5837 { 0, 0, 0, 0 }
5840 static const bitmask_transtbl lflag_tbl[] = {
5841 { TARGET_ISIG, TARGET_ISIG, ISIG, ISIG },
5842 { TARGET_ICANON, TARGET_ICANON, ICANON, ICANON },
5843 { TARGET_XCASE, TARGET_XCASE, XCASE, XCASE },
5844 { TARGET_ECHO, TARGET_ECHO, ECHO, ECHO },
5845 { TARGET_ECHOE, TARGET_ECHOE, ECHOE, ECHOE },
5846 { TARGET_ECHOK, TARGET_ECHOK, ECHOK, ECHOK },
5847 { TARGET_ECHONL, TARGET_ECHONL, ECHONL, ECHONL },
5848 { TARGET_NOFLSH, TARGET_NOFLSH, NOFLSH, NOFLSH },
5849 { TARGET_TOSTOP, TARGET_TOSTOP, TOSTOP, TOSTOP },
5850 { TARGET_ECHOCTL, TARGET_ECHOCTL, ECHOCTL, ECHOCTL },
5851 { TARGET_ECHOPRT, TARGET_ECHOPRT, ECHOPRT, ECHOPRT },
5852 { TARGET_ECHOKE, TARGET_ECHOKE, ECHOKE, ECHOKE },
5853 { TARGET_FLUSHO, TARGET_FLUSHO, FLUSHO, FLUSHO },
5854 { TARGET_PENDIN, TARGET_PENDIN, PENDIN, PENDIN },
5855 { TARGET_IEXTEN, TARGET_IEXTEN, IEXTEN, IEXTEN },
5856 { 0, 0, 0, 0 }
5859 static void target_to_host_termios (void *dst, const void *src)
5861 struct host_termios *host = dst;
5862 const struct target_termios *target = src;
5864 host->c_iflag =
5865 target_to_host_bitmask(tswap32(target->c_iflag), iflag_tbl);
5866 host->c_oflag =
5867 target_to_host_bitmask(tswap32(target->c_oflag), oflag_tbl);
5868 host->c_cflag =
5869 target_to_host_bitmask(tswap32(target->c_cflag), cflag_tbl);
5870 host->c_lflag =
5871 target_to_host_bitmask(tswap32(target->c_lflag), lflag_tbl);
5872 host->c_line = target->c_line;
5874 memset(host->c_cc, 0, sizeof(host->c_cc));
5875 host->c_cc[VINTR] = target->c_cc[TARGET_VINTR];
5876 host->c_cc[VQUIT] = target->c_cc[TARGET_VQUIT];
5877 host->c_cc[VERASE] = target->c_cc[TARGET_VERASE];
5878 host->c_cc[VKILL] = target->c_cc[TARGET_VKILL];
5879 host->c_cc[VEOF] = target->c_cc[TARGET_VEOF];
5880 host->c_cc[VTIME] = target->c_cc[TARGET_VTIME];
5881 host->c_cc[VMIN] = target->c_cc[TARGET_VMIN];
5882 host->c_cc[VSWTC] = target->c_cc[TARGET_VSWTC];
5883 host->c_cc[VSTART] = target->c_cc[TARGET_VSTART];
5884 host->c_cc[VSTOP] = target->c_cc[TARGET_VSTOP];
5885 host->c_cc[VSUSP] = target->c_cc[TARGET_VSUSP];
5886 host->c_cc[VEOL] = target->c_cc[TARGET_VEOL];
5887 host->c_cc[VREPRINT] = target->c_cc[TARGET_VREPRINT];
5888 host->c_cc[VDISCARD] = target->c_cc[TARGET_VDISCARD];
5889 host->c_cc[VWERASE] = target->c_cc[TARGET_VWERASE];
5890 host->c_cc[VLNEXT] = target->c_cc[TARGET_VLNEXT];
5891 host->c_cc[VEOL2] = target->c_cc[TARGET_VEOL2];
5894 static void host_to_target_termios (void *dst, const void *src)
5896 struct target_termios *target = dst;
5897 const struct host_termios *host = src;
5899 target->c_iflag =
5900 tswap32(host_to_target_bitmask(host->c_iflag, iflag_tbl));
5901 target->c_oflag =
5902 tswap32(host_to_target_bitmask(host->c_oflag, oflag_tbl));
5903 target->c_cflag =
5904 tswap32(host_to_target_bitmask(host->c_cflag, cflag_tbl));
5905 target->c_lflag =
5906 tswap32(host_to_target_bitmask(host->c_lflag, lflag_tbl));
5907 target->c_line = host->c_line;
5909 memset(target->c_cc, 0, sizeof(target->c_cc));
5910 target->c_cc[TARGET_VINTR] = host->c_cc[VINTR];
5911 target->c_cc[TARGET_VQUIT] = host->c_cc[VQUIT];
5912 target->c_cc[TARGET_VERASE] = host->c_cc[VERASE];
5913 target->c_cc[TARGET_VKILL] = host->c_cc[VKILL];
5914 target->c_cc[TARGET_VEOF] = host->c_cc[VEOF];
5915 target->c_cc[TARGET_VTIME] = host->c_cc[VTIME];
5916 target->c_cc[TARGET_VMIN] = host->c_cc[VMIN];
5917 target->c_cc[TARGET_VSWTC] = host->c_cc[VSWTC];
5918 target->c_cc[TARGET_VSTART] = host->c_cc[VSTART];
5919 target->c_cc[TARGET_VSTOP] = host->c_cc[VSTOP];
5920 target->c_cc[TARGET_VSUSP] = host->c_cc[VSUSP];
5921 target->c_cc[TARGET_VEOL] = host->c_cc[VEOL];
5922 target->c_cc[TARGET_VREPRINT] = host->c_cc[VREPRINT];
5923 target->c_cc[TARGET_VDISCARD] = host->c_cc[VDISCARD];
5924 target->c_cc[TARGET_VWERASE] = host->c_cc[VWERASE];
5925 target->c_cc[TARGET_VLNEXT] = host->c_cc[VLNEXT];
5926 target->c_cc[TARGET_VEOL2] = host->c_cc[VEOL2];
5929 static const StructEntry struct_termios_def = {
5930 .convert = { host_to_target_termios, target_to_host_termios },
5931 .size = { sizeof(struct target_termios), sizeof(struct host_termios) },
5932 .align = { __alignof__(struct target_termios), __alignof__(struct host_termios) },
5935 static bitmask_transtbl mmap_flags_tbl[] = {
5936 { TARGET_MAP_SHARED, TARGET_MAP_SHARED, MAP_SHARED, MAP_SHARED },
5937 { TARGET_MAP_PRIVATE, TARGET_MAP_PRIVATE, MAP_PRIVATE, MAP_PRIVATE },
5938 { TARGET_MAP_FIXED, TARGET_MAP_FIXED, MAP_FIXED, MAP_FIXED },
5939 { TARGET_MAP_ANONYMOUS, TARGET_MAP_ANONYMOUS,
5940 MAP_ANONYMOUS, MAP_ANONYMOUS },
5941 { TARGET_MAP_GROWSDOWN, TARGET_MAP_GROWSDOWN,
5942 MAP_GROWSDOWN, MAP_GROWSDOWN },
5943 { TARGET_MAP_DENYWRITE, TARGET_MAP_DENYWRITE,
5944 MAP_DENYWRITE, MAP_DENYWRITE },
5945 { TARGET_MAP_EXECUTABLE, TARGET_MAP_EXECUTABLE,
5946 MAP_EXECUTABLE, MAP_EXECUTABLE },
5947 { TARGET_MAP_LOCKED, TARGET_MAP_LOCKED, MAP_LOCKED, MAP_LOCKED },
5948 { TARGET_MAP_NORESERVE, TARGET_MAP_NORESERVE,
5949 MAP_NORESERVE, MAP_NORESERVE },
5950 { TARGET_MAP_HUGETLB, TARGET_MAP_HUGETLB, MAP_HUGETLB, MAP_HUGETLB },
5951 /* MAP_STACK had been ignored by the kernel for quite some time.
5952 Recognize it for the target insofar as we do not want to pass
5953 it through to the host. */
5954 { TARGET_MAP_STACK, TARGET_MAP_STACK, 0, 0 },
5955 { 0, 0, 0, 0 }
5958 #if defined(TARGET_I386)
5960 /* NOTE: there is really one LDT for all the threads */
5961 static uint8_t *ldt_table;
5963 static abi_long read_ldt(abi_ulong ptr, unsigned long bytecount)
5965 int size;
5966 void *p;
5968 if (!ldt_table)
5969 return 0;
5970 size = TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE;
5971 if (size > bytecount)
5972 size = bytecount;
5973 p = lock_user(VERIFY_WRITE, ptr, size, 0);
5974 if (!p)
5975 return -TARGET_EFAULT;
5976 /* ??? Should this by byteswapped? */
5977 memcpy(p, ldt_table, size);
5978 unlock_user(p, ptr, size);
5979 return size;
5982 /* XXX: add locking support */
5983 static abi_long write_ldt(CPUX86State *env,
5984 abi_ulong ptr, unsigned long bytecount, int oldmode)
5986 struct target_modify_ldt_ldt_s ldt_info;
5987 struct target_modify_ldt_ldt_s *target_ldt_info;
5988 int seg_32bit, contents, read_exec_only, limit_in_pages;
5989 int seg_not_present, useable, lm;
5990 uint32_t *lp, entry_1, entry_2;
5992 if (bytecount != sizeof(ldt_info))
5993 return -TARGET_EINVAL;
5994 if (!lock_user_struct(VERIFY_READ, target_ldt_info, ptr, 1))
5995 return -TARGET_EFAULT;
5996 ldt_info.entry_number = tswap32(target_ldt_info->entry_number);
5997 ldt_info.base_addr = tswapal(target_ldt_info->base_addr);
5998 ldt_info.limit = tswap32(target_ldt_info->limit);
5999 ldt_info.flags = tswap32(target_ldt_info->flags);
6000 unlock_user_struct(target_ldt_info, ptr, 0);
6002 if (ldt_info.entry_number >= TARGET_LDT_ENTRIES)
6003 return -TARGET_EINVAL;
6004 seg_32bit = ldt_info.flags & 1;
6005 contents = (ldt_info.flags >> 1) & 3;
6006 read_exec_only = (ldt_info.flags >> 3) & 1;
6007 limit_in_pages = (ldt_info.flags >> 4) & 1;
6008 seg_not_present = (ldt_info.flags >> 5) & 1;
6009 useable = (ldt_info.flags >> 6) & 1;
6010 #ifdef TARGET_ABI32
6011 lm = 0;
6012 #else
6013 lm = (ldt_info.flags >> 7) & 1;
6014 #endif
6015 if (contents == 3) {
6016 if (oldmode)
6017 return -TARGET_EINVAL;
6018 if (seg_not_present == 0)
6019 return -TARGET_EINVAL;
6021 /* allocate the LDT */
6022 if (!ldt_table) {
6023 env->ldt.base = target_mmap(0,
6024 TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE,
6025 PROT_READ|PROT_WRITE,
6026 MAP_ANONYMOUS|MAP_PRIVATE, -1, 0);
6027 if (env->ldt.base == -1)
6028 return -TARGET_ENOMEM;
6029 memset(g2h(env->ldt.base), 0,
6030 TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE);
6031 env->ldt.limit = 0xffff;
6032 ldt_table = g2h(env->ldt.base);
6035 /* NOTE: same code as Linux kernel */
6036 /* Allow LDTs to be cleared by the user. */
6037 if (ldt_info.base_addr == 0 && ldt_info.limit == 0) {
6038 if (oldmode ||
6039 (contents == 0 &&
6040 read_exec_only == 1 &&
6041 seg_32bit == 0 &&
6042 limit_in_pages == 0 &&
6043 seg_not_present == 1 &&
6044 useable == 0 )) {
6045 entry_1 = 0;
6046 entry_2 = 0;
6047 goto install;
6051 entry_1 = ((ldt_info.base_addr & 0x0000ffff) << 16) |
6052 (ldt_info.limit & 0x0ffff);
6053 entry_2 = (ldt_info.base_addr & 0xff000000) |
6054 ((ldt_info.base_addr & 0x00ff0000) >> 16) |
6055 (ldt_info.limit & 0xf0000) |
6056 ((read_exec_only ^ 1) << 9) |
6057 (contents << 10) |
6058 ((seg_not_present ^ 1) << 15) |
6059 (seg_32bit << 22) |
6060 (limit_in_pages << 23) |
6061 (lm << 21) |
6062 0x7000;
6063 if (!oldmode)
6064 entry_2 |= (useable << 20);
6066 /* Install the new entry ... */
6067 install:
6068 lp = (uint32_t *)(ldt_table + (ldt_info.entry_number << 3));
6069 lp[0] = tswap32(entry_1);
6070 lp[1] = tswap32(entry_2);
6071 return 0;
6074 /* specific and weird i386 syscalls */
6075 static abi_long do_modify_ldt(CPUX86State *env, int func, abi_ulong ptr,
6076 unsigned long bytecount)
6078 abi_long ret;
6080 switch (func) {
6081 case 0:
6082 ret = read_ldt(ptr, bytecount);
6083 break;
6084 case 1:
6085 ret = write_ldt(env, ptr, bytecount, 1);
6086 break;
6087 case 0x11:
6088 ret = write_ldt(env, ptr, bytecount, 0);
6089 break;
6090 default:
6091 ret = -TARGET_ENOSYS;
6092 break;
6094 return ret;
6097 #if defined(TARGET_I386) && defined(TARGET_ABI32)
6098 abi_long do_set_thread_area(CPUX86State *env, abi_ulong ptr)
6100 uint64_t *gdt_table = g2h(env->gdt.base);
6101 struct target_modify_ldt_ldt_s ldt_info;
6102 struct target_modify_ldt_ldt_s *target_ldt_info;
6103 int seg_32bit, contents, read_exec_only, limit_in_pages;
6104 int seg_not_present, useable, lm;
6105 uint32_t *lp, entry_1, entry_2;
6106 int i;
6108 lock_user_struct(VERIFY_WRITE, target_ldt_info, ptr, 1);
6109 if (!target_ldt_info)
6110 return -TARGET_EFAULT;
6111 ldt_info.entry_number = tswap32(target_ldt_info->entry_number);
6112 ldt_info.base_addr = tswapal(target_ldt_info->base_addr);
6113 ldt_info.limit = tswap32(target_ldt_info->limit);
6114 ldt_info.flags = tswap32(target_ldt_info->flags);
6115 if (ldt_info.entry_number == -1) {
6116 for (i=TARGET_GDT_ENTRY_TLS_MIN; i<=TARGET_GDT_ENTRY_TLS_MAX; i++) {
6117 if (gdt_table[i] == 0) {
6118 ldt_info.entry_number = i;
6119 target_ldt_info->entry_number = tswap32(i);
6120 break;
6124 unlock_user_struct(target_ldt_info, ptr, 1);
6126 if (ldt_info.entry_number < TARGET_GDT_ENTRY_TLS_MIN ||
6127 ldt_info.entry_number > TARGET_GDT_ENTRY_TLS_MAX)
6128 return -TARGET_EINVAL;
6129 seg_32bit = ldt_info.flags & 1;
6130 contents = (ldt_info.flags >> 1) & 3;
6131 read_exec_only = (ldt_info.flags >> 3) & 1;
6132 limit_in_pages = (ldt_info.flags >> 4) & 1;
6133 seg_not_present = (ldt_info.flags >> 5) & 1;
6134 useable = (ldt_info.flags >> 6) & 1;
6135 #ifdef TARGET_ABI32
6136 lm = 0;
6137 #else
6138 lm = (ldt_info.flags >> 7) & 1;
6139 #endif
6141 if (contents == 3) {
6142 if (seg_not_present == 0)
6143 return -TARGET_EINVAL;
6146 /* NOTE: same code as Linux kernel */
6147 /* Allow LDTs to be cleared by the user. */
6148 if (ldt_info.base_addr == 0 && ldt_info.limit == 0) {
6149 if ((contents == 0 &&
6150 read_exec_only == 1 &&
6151 seg_32bit == 0 &&
6152 limit_in_pages == 0 &&
6153 seg_not_present == 1 &&
6154 useable == 0 )) {
6155 entry_1 = 0;
6156 entry_2 = 0;
6157 goto install;
6161 entry_1 = ((ldt_info.base_addr & 0x0000ffff) << 16) |
6162 (ldt_info.limit & 0x0ffff);
6163 entry_2 = (ldt_info.base_addr & 0xff000000) |
6164 ((ldt_info.base_addr & 0x00ff0000) >> 16) |
6165 (ldt_info.limit & 0xf0000) |
6166 ((read_exec_only ^ 1) << 9) |
6167 (contents << 10) |
6168 ((seg_not_present ^ 1) << 15) |
6169 (seg_32bit << 22) |
6170 (limit_in_pages << 23) |
6171 (useable << 20) |
6172 (lm << 21) |
6173 0x7000;
6175 /* Install the new entry ... */
6176 install:
6177 lp = (uint32_t *)(gdt_table + ldt_info.entry_number);
6178 lp[0] = tswap32(entry_1);
6179 lp[1] = tswap32(entry_2);
6180 return 0;
6183 static abi_long do_get_thread_area(CPUX86State *env, abi_ulong ptr)
6185 struct target_modify_ldt_ldt_s *target_ldt_info;
6186 uint64_t *gdt_table = g2h(env->gdt.base);
6187 uint32_t base_addr, limit, flags;
6188 int seg_32bit, contents, read_exec_only, limit_in_pages, idx;
6189 int seg_not_present, useable, lm;
6190 uint32_t *lp, entry_1, entry_2;
6192 lock_user_struct(VERIFY_WRITE, target_ldt_info, ptr, 1);
6193 if (!target_ldt_info)
6194 return -TARGET_EFAULT;
6195 idx = tswap32(target_ldt_info->entry_number);
6196 if (idx < TARGET_GDT_ENTRY_TLS_MIN ||
6197 idx > TARGET_GDT_ENTRY_TLS_MAX) {
6198 unlock_user_struct(target_ldt_info, ptr, 1);
6199 return -TARGET_EINVAL;
6201 lp = (uint32_t *)(gdt_table + idx);
6202 entry_1 = tswap32(lp[0]);
6203 entry_2 = tswap32(lp[1]);
6205 read_exec_only = ((entry_2 >> 9) & 1) ^ 1;
6206 contents = (entry_2 >> 10) & 3;
6207 seg_not_present = ((entry_2 >> 15) & 1) ^ 1;
6208 seg_32bit = (entry_2 >> 22) & 1;
6209 limit_in_pages = (entry_2 >> 23) & 1;
6210 useable = (entry_2 >> 20) & 1;
6211 #ifdef TARGET_ABI32
6212 lm = 0;
6213 #else
6214 lm = (entry_2 >> 21) & 1;
6215 #endif
6216 flags = (seg_32bit << 0) | (contents << 1) |
6217 (read_exec_only << 3) | (limit_in_pages << 4) |
6218 (seg_not_present << 5) | (useable << 6) | (lm << 7);
6219 limit = (entry_1 & 0xffff) | (entry_2 & 0xf0000);
6220 base_addr = (entry_1 >> 16) |
6221 (entry_2 & 0xff000000) |
6222 ((entry_2 & 0xff) << 16);
6223 target_ldt_info->base_addr = tswapal(base_addr);
6224 target_ldt_info->limit = tswap32(limit);
6225 target_ldt_info->flags = tswap32(flags);
6226 unlock_user_struct(target_ldt_info, ptr, 1);
6227 return 0;
6229 #endif /* TARGET_I386 && TARGET_ABI32 */
6231 #ifndef TARGET_ABI32
6232 abi_long do_arch_prctl(CPUX86State *env, int code, abi_ulong addr)
6234 abi_long ret = 0;
6235 abi_ulong val;
6236 int idx;
6238 switch(code) {
6239 case TARGET_ARCH_SET_GS:
6240 case TARGET_ARCH_SET_FS:
6241 if (code == TARGET_ARCH_SET_GS)
6242 idx = R_GS;
6243 else
6244 idx = R_FS;
6245 cpu_x86_load_seg(env, idx, 0);
6246 env->segs[idx].base = addr;
6247 break;
6248 case TARGET_ARCH_GET_GS:
6249 case TARGET_ARCH_GET_FS:
6250 if (code == TARGET_ARCH_GET_GS)
6251 idx = R_GS;
6252 else
6253 idx = R_FS;
6254 val = env->segs[idx].base;
6255 if (put_user(val, addr, abi_ulong))
6256 ret = -TARGET_EFAULT;
6257 break;
6258 default:
6259 ret = -TARGET_EINVAL;
6260 break;
6262 return ret;
6264 #endif
6266 #endif /* defined(TARGET_I386) */
6268 #define NEW_STACK_SIZE 0x40000
6271 static pthread_mutex_t clone_lock = PTHREAD_MUTEX_INITIALIZER;
6272 typedef struct {
6273 CPUArchState *env;
6274 pthread_mutex_t mutex;
6275 pthread_cond_t cond;
6276 pthread_t thread;
6277 uint32_t tid;
6278 abi_ulong child_tidptr;
6279 abi_ulong parent_tidptr;
6280 sigset_t sigmask;
6281 } new_thread_info;
6283 static void *clone_func(void *arg)
6285 new_thread_info *info = arg;
6286 CPUArchState *env;
6287 CPUState *cpu;
6288 TaskState *ts;
6290 rcu_register_thread();
6291 tcg_register_thread();
6292 env = info->env;
6293 cpu = ENV_GET_CPU(env);
6294 thread_cpu = cpu;
6295 ts = (TaskState *)cpu->opaque;
6296 info->tid = gettid();
6297 task_settid(ts);
6298 if (info->child_tidptr)
6299 put_user_u32(info->tid, info->child_tidptr);
6300 if (info->parent_tidptr)
6301 put_user_u32(info->tid, info->parent_tidptr);
6302 /* Enable signals. */
6303 sigprocmask(SIG_SETMASK, &info->sigmask, NULL);
6304 /* Signal to the parent that we're ready. */
6305 pthread_mutex_lock(&info->mutex);
6306 pthread_cond_broadcast(&info->cond);
6307 pthread_mutex_unlock(&info->mutex);
6308 /* Wait until the parent has finished initializing the tls state. */
6309 pthread_mutex_lock(&clone_lock);
6310 pthread_mutex_unlock(&clone_lock);
6311 cpu_loop(env);
6312 /* never exits */
6313 return NULL;
6316 /* do_fork() Must return host values and target errnos (unlike most
6317 do_*() functions). */
6318 static int do_fork(CPUArchState *env, unsigned int flags, abi_ulong newsp,
6319 abi_ulong parent_tidptr, target_ulong newtls,
6320 abi_ulong child_tidptr)
6322 CPUState *cpu = ENV_GET_CPU(env);
6323 int ret;
6324 TaskState *ts;
6325 CPUState *new_cpu;
6326 CPUArchState *new_env;
6327 sigset_t sigmask;
6329 flags &= ~CLONE_IGNORED_FLAGS;
6331 /* Emulate vfork() with fork() */
6332 if (flags & CLONE_VFORK)
6333 flags &= ~(CLONE_VFORK | CLONE_VM);
6335 if (flags & CLONE_VM) {
6336 TaskState *parent_ts = (TaskState *)cpu->opaque;
6337 new_thread_info info;
6338 pthread_attr_t attr;
6340 if (((flags & CLONE_THREAD_FLAGS) != CLONE_THREAD_FLAGS) ||
6341 (flags & CLONE_INVALID_THREAD_FLAGS)) {
6342 return -TARGET_EINVAL;
6345 ts = g_new0(TaskState, 1);
6346 init_task_state(ts);
6347 /* we create a new CPU instance. */
6348 new_env = cpu_copy(env);
6349 /* Init regs that differ from the parent. */
6350 cpu_clone_regs(new_env, newsp);
6351 new_cpu = ENV_GET_CPU(new_env);
6352 new_cpu->opaque = ts;
6353 ts->bprm = parent_ts->bprm;
6354 ts->info = parent_ts->info;
6355 ts->signal_mask = parent_ts->signal_mask;
6357 if (flags & CLONE_CHILD_CLEARTID) {
6358 ts->child_tidptr = child_tidptr;
6361 if (flags & CLONE_SETTLS) {
6362 cpu_set_tls (new_env, newtls);
6365 /* Grab a mutex so that thread setup appears atomic. */
6366 pthread_mutex_lock(&clone_lock);
6368 memset(&info, 0, sizeof(info));
6369 pthread_mutex_init(&info.mutex, NULL);
6370 pthread_mutex_lock(&info.mutex);
6371 pthread_cond_init(&info.cond, NULL);
6372 info.env = new_env;
6373 if (flags & CLONE_CHILD_SETTID) {
6374 info.child_tidptr = child_tidptr;
6376 if (flags & CLONE_PARENT_SETTID) {
6377 info.parent_tidptr = parent_tidptr;
6380 ret = pthread_attr_init(&attr);
6381 ret = pthread_attr_setstacksize(&attr, NEW_STACK_SIZE);
6382 ret = pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED);
6383 /* It is not safe to deliver signals until the child has finished
6384 initializing, so temporarily block all signals. */
6385 sigfillset(&sigmask);
6386 sigprocmask(SIG_BLOCK, &sigmask, &info.sigmask);
6388 /* If this is our first additional thread, we need to ensure we
6389 * generate code for parallel execution and flush old translations.
6391 if (!parallel_cpus) {
6392 parallel_cpus = true;
6393 tb_flush(cpu);
6396 ret = pthread_create(&info.thread, &attr, clone_func, &info);
6397 /* TODO: Free new CPU state if thread creation failed. */
6399 sigprocmask(SIG_SETMASK, &info.sigmask, NULL);
6400 pthread_attr_destroy(&attr);
6401 if (ret == 0) {
6402 /* Wait for the child to initialize. */
6403 pthread_cond_wait(&info.cond, &info.mutex);
6404 ret = info.tid;
6405 } else {
6406 ret = -1;
6408 pthread_mutex_unlock(&info.mutex);
6409 pthread_cond_destroy(&info.cond);
6410 pthread_mutex_destroy(&info.mutex);
6411 pthread_mutex_unlock(&clone_lock);
6412 } else {
6413 /* if no CLONE_VM, we consider it is a fork */
6414 if (flags & CLONE_INVALID_FORK_FLAGS) {
6415 return -TARGET_EINVAL;
6418 /* We can't support custom termination signals */
6419 if ((flags & CSIGNAL) != TARGET_SIGCHLD) {
6420 return -TARGET_EINVAL;
6423 if (block_signals()) {
6424 return -TARGET_ERESTARTSYS;
6427 fork_start();
6428 ret = fork();
6429 if (ret == 0) {
6430 /* Child Process. */
6431 cpu_clone_regs(env, newsp);
6432 fork_end(1);
6433 /* There is a race condition here. The parent process could
6434 theoretically read the TID in the child process before the child
6435 tid is set. This would require using either ptrace
6436 (not implemented) or having *_tidptr to point at a shared memory
6437 mapping. We can't repeat the spinlock hack used above because
6438 the child process gets its own copy of the lock. */
6439 if (flags & CLONE_CHILD_SETTID)
6440 put_user_u32(gettid(), child_tidptr);
6441 if (flags & CLONE_PARENT_SETTID)
6442 put_user_u32(gettid(), parent_tidptr);
6443 ts = (TaskState *)cpu->opaque;
6444 if (flags & CLONE_SETTLS)
6445 cpu_set_tls (env, newtls);
6446 if (flags & CLONE_CHILD_CLEARTID)
6447 ts->child_tidptr = child_tidptr;
6448 } else {
6449 fork_end(0);
6452 return ret;
6455 /* warning : doesn't handle linux specific flags... */
6456 static int target_to_host_fcntl_cmd(int cmd)
6458 switch(cmd) {
6459 case TARGET_F_DUPFD:
6460 case TARGET_F_GETFD:
6461 case TARGET_F_SETFD:
6462 case TARGET_F_GETFL:
6463 case TARGET_F_SETFL:
6464 return cmd;
6465 case TARGET_F_GETLK:
6466 return F_GETLK64;
6467 case TARGET_F_SETLK:
6468 return F_SETLK64;
6469 case TARGET_F_SETLKW:
6470 return F_SETLKW64;
6471 case TARGET_F_GETOWN:
6472 return F_GETOWN;
6473 case TARGET_F_SETOWN:
6474 return F_SETOWN;
6475 case TARGET_F_GETSIG:
6476 return F_GETSIG;
6477 case TARGET_F_SETSIG:
6478 return F_SETSIG;
6479 #if TARGET_ABI_BITS == 32
6480 case TARGET_F_GETLK64:
6481 return F_GETLK64;
6482 case TARGET_F_SETLK64:
6483 return F_SETLK64;
6484 case TARGET_F_SETLKW64:
6485 return F_SETLKW64;
6486 #endif
6487 case TARGET_F_SETLEASE:
6488 return F_SETLEASE;
6489 case TARGET_F_GETLEASE:
6490 return F_GETLEASE;
6491 #ifdef F_DUPFD_CLOEXEC
6492 case TARGET_F_DUPFD_CLOEXEC:
6493 return F_DUPFD_CLOEXEC;
6494 #endif
6495 case TARGET_F_NOTIFY:
6496 return F_NOTIFY;
6497 #ifdef F_GETOWN_EX
6498 case TARGET_F_GETOWN_EX:
6499 return F_GETOWN_EX;
6500 #endif
6501 #ifdef F_SETOWN_EX
6502 case TARGET_F_SETOWN_EX:
6503 return F_SETOWN_EX;
6504 #endif
6505 #ifdef F_SETPIPE_SZ
6506 case TARGET_F_SETPIPE_SZ:
6507 return F_SETPIPE_SZ;
6508 case TARGET_F_GETPIPE_SZ:
6509 return F_GETPIPE_SZ;
6510 #endif
6511 default:
6512 return -TARGET_EINVAL;
6514 return -TARGET_EINVAL;
6517 #define TRANSTBL_CONVERT(a) { -1, TARGET_##a, -1, a }
6518 static const bitmask_transtbl flock_tbl[] = {
6519 TRANSTBL_CONVERT(F_RDLCK),
6520 TRANSTBL_CONVERT(F_WRLCK),
6521 TRANSTBL_CONVERT(F_UNLCK),
6522 TRANSTBL_CONVERT(F_EXLCK),
6523 TRANSTBL_CONVERT(F_SHLCK),
6524 { 0, 0, 0, 0 }
6527 static inline abi_long copy_from_user_flock(struct flock64 *fl,
6528 abi_ulong target_flock_addr)
6530 struct target_flock *target_fl;
6531 short l_type;
6533 if (!lock_user_struct(VERIFY_READ, target_fl, target_flock_addr, 1)) {
6534 return -TARGET_EFAULT;
6537 __get_user(l_type, &target_fl->l_type);
6538 fl->l_type = target_to_host_bitmask(l_type, flock_tbl);
6539 __get_user(fl->l_whence, &target_fl->l_whence);
6540 __get_user(fl->l_start, &target_fl->l_start);
6541 __get_user(fl->l_len, &target_fl->l_len);
6542 __get_user(fl->l_pid, &target_fl->l_pid);
6543 unlock_user_struct(target_fl, target_flock_addr, 0);
6544 return 0;
6547 static inline abi_long copy_to_user_flock(abi_ulong target_flock_addr,
6548 const struct flock64 *fl)
6550 struct target_flock *target_fl;
6551 short l_type;
6553 if (!lock_user_struct(VERIFY_WRITE, target_fl, target_flock_addr, 0)) {
6554 return -TARGET_EFAULT;
6557 l_type = host_to_target_bitmask(fl->l_type, flock_tbl);
6558 __put_user(l_type, &target_fl->l_type);
6559 __put_user(fl->l_whence, &target_fl->l_whence);
6560 __put_user(fl->l_start, &target_fl->l_start);
6561 __put_user(fl->l_len, &target_fl->l_len);
6562 __put_user(fl->l_pid, &target_fl->l_pid);
6563 unlock_user_struct(target_fl, target_flock_addr, 1);
6564 return 0;
6567 typedef abi_long from_flock64_fn(struct flock64 *fl, abi_ulong target_addr);
6568 typedef abi_long to_flock64_fn(abi_ulong target_addr, const struct flock64 *fl);
6570 #if defined(TARGET_ARM) && TARGET_ABI_BITS == 32
6571 static inline abi_long copy_from_user_eabi_flock64(struct flock64 *fl,
6572 abi_ulong target_flock_addr)
6574 struct target_eabi_flock64 *target_fl;
6575 short l_type;
6577 if (!lock_user_struct(VERIFY_READ, target_fl, target_flock_addr, 1)) {
6578 return -TARGET_EFAULT;
6581 __get_user(l_type, &target_fl->l_type);
6582 fl->l_type = target_to_host_bitmask(l_type, flock_tbl);
6583 __get_user(fl->l_whence, &target_fl->l_whence);
6584 __get_user(fl->l_start, &target_fl->l_start);
6585 __get_user(fl->l_len, &target_fl->l_len);
6586 __get_user(fl->l_pid, &target_fl->l_pid);
6587 unlock_user_struct(target_fl, target_flock_addr, 0);
6588 return 0;
6591 static inline abi_long copy_to_user_eabi_flock64(abi_ulong target_flock_addr,
6592 const struct flock64 *fl)
6594 struct target_eabi_flock64 *target_fl;
6595 short l_type;
6597 if (!lock_user_struct(VERIFY_WRITE, target_fl, target_flock_addr, 0)) {
6598 return -TARGET_EFAULT;
6601 l_type = host_to_target_bitmask(fl->l_type, flock_tbl);
6602 __put_user(l_type, &target_fl->l_type);
6603 __put_user(fl->l_whence, &target_fl->l_whence);
6604 __put_user(fl->l_start, &target_fl->l_start);
6605 __put_user(fl->l_len, &target_fl->l_len);
6606 __put_user(fl->l_pid, &target_fl->l_pid);
6607 unlock_user_struct(target_fl, target_flock_addr, 1);
6608 return 0;
6610 #endif
6612 static inline abi_long copy_from_user_flock64(struct flock64 *fl,
6613 abi_ulong target_flock_addr)
6615 struct target_flock64 *target_fl;
6616 short l_type;
6618 if (!lock_user_struct(VERIFY_READ, target_fl, target_flock_addr, 1)) {
6619 return -TARGET_EFAULT;
6622 __get_user(l_type, &target_fl->l_type);
6623 fl->l_type = target_to_host_bitmask(l_type, flock_tbl);
6624 __get_user(fl->l_whence, &target_fl->l_whence);
6625 __get_user(fl->l_start, &target_fl->l_start);
6626 __get_user(fl->l_len, &target_fl->l_len);
6627 __get_user(fl->l_pid, &target_fl->l_pid);
6628 unlock_user_struct(target_fl, target_flock_addr, 0);
6629 return 0;
6632 static inline abi_long copy_to_user_flock64(abi_ulong target_flock_addr,
6633 const struct flock64 *fl)
6635 struct target_flock64 *target_fl;
6636 short l_type;
6638 if (!lock_user_struct(VERIFY_WRITE, target_fl, target_flock_addr, 0)) {
6639 return -TARGET_EFAULT;
6642 l_type = host_to_target_bitmask(fl->l_type, flock_tbl);
6643 __put_user(l_type, &target_fl->l_type);
6644 __put_user(fl->l_whence, &target_fl->l_whence);
6645 __put_user(fl->l_start, &target_fl->l_start);
6646 __put_user(fl->l_len, &target_fl->l_len);
6647 __put_user(fl->l_pid, &target_fl->l_pid);
6648 unlock_user_struct(target_fl, target_flock_addr, 1);
6649 return 0;
6652 static abi_long do_fcntl(int fd, int cmd, abi_ulong arg)
6654 struct flock64 fl64;
6655 #ifdef F_GETOWN_EX
6656 struct f_owner_ex fox;
6657 struct target_f_owner_ex *target_fox;
6658 #endif
6659 abi_long ret;
6660 int host_cmd = target_to_host_fcntl_cmd(cmd);
6662 if (host_cmd == -TARGET_EINVAL)
6663 return host_cmd;
6665 switch(cmd) {
6666 case TARGET_F_GETLK:
6667 ret = copy_from_user_flock(&fl64, arg);
6668 if (ret) {
6669 return ret;
6671 ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
6672 if (ret == 0) {
6673 ret = copy_to_user_flock(arg, &fl64);
6675 break;
6677 case TARGET_F_SETLK:
6678 case TARGET_F_SETLKW:
6679 ret = copy_from_user_flock(&fl64, arg);
6680 if (ret) {
6681 return ret;
6683 ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
6684 break;
6686 case TARGET_F_GETLK64:
6687 ret = copy_from_user_flock64(&fl64, arg);
6688 if (ret) {
6689 return ret;
6691 ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
6692 if (ret == 0) {
6693 ret = copy_to_user_flock64(arg, &fl64);
6695 break;
6696 case TARGET_F_SETLK64:
6697 case TARGET_F_SETLKW64:
6698 ret = copy_from_user_flock64(&fl64, arg);
6699 if (ret) {
6700 return ret;
6702 ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
6703 break;
6705 case TARGET_F_GETFL:
6706 ret = get_errno(safe_fcntl(fd, host_cmd, arg));
6707 if (ret >= 0) {
6708 ret = host_to_target_bitmask(ret, fcntl_flags_tbl);
6710 break;
6712 case TARGET_F_SETFL:
6713 ret = get_errno(safe_fcntl(fd, host_cmd,
6714 target_to_host_bitmask(arg,
6715 fcntl_flags_tbl)));
6716 break;
6718 #ifdef F_GETOWN_EX
6719 case TARGET_F_GETOWN_EX:
6720 ret = get_errno(safe_fcntl(fd, host_cmd, &fox));
6721 if (ret >= 0) {
6722 if (!lock_user_struct(VERIFY_WRITE, target_fox, arg, 0))
6723 return -TARGET_EFAULT;
6724 target_fox->type = tswap32(fox.type);
6725 target_fox->pid = tswap32(fox.pid);
6726 unlock_user_struct(target_fox, arg, 1);
6728 break;
6729 #endif
6731 #ifdef F_SETOWN_EX
6732 case TARGET_F_SETOWN_EX:
6733 if (!lock_user_struct(VERIFY_READ, target_fox, arg, 1))
6734 return -TARGET_EFAULT;
6735 fox.type = tswap32(target_fox->type);
6736 fox.pid = tswap32(target_fox->pid);
6737 unlock_user_struct(target_fox, arg, 0);
6738 ret = get_errno(safe_fcntl(fd, host_cmd, &fox));
6739 break;
6740 #endif
6742 case TARGET_F_SETOWN:
6743 case TARGET_F_GETOWN:
6744 case TARGET_F_SETSIG:
6745 case TARGET_F_GETSIG:
6746 case TARGET_F_SETLEASE:
6747 case TARGET_F_GETLEASE:
6748 case TARGET_F_SETPIPE_SZ:
6749 case TARGET_F_GETPIPE_SZ:
6750 ret = get_errno(safe_fcntl(fd, host_cmd, arg));
6751 break;
6753 default:
6754 ret = get_errno(safe_fcntl(fd, cmd, arg));
6755 break;
6757 return ret;
6760 #ifdef USE_UID16
6762 static inline int high2lowuid(int uid)
6764 if (uid > 65535)
6765 return 65534;
6766 else
6767 return uid;
6770 static inline int high2lowgid(int gid)
6772 if (gid > 65535)
6773 return 65534;
6774 else
6775 return gid;
6778 static inline int low2highuid(int uid)
6780 if ((int16_t)uid == -1)
6781 return -1;
6782 else
6783 return uid;
6786 static inline int low2highgid(int gid)
6788 if ((int16_t)gid == -1)
6789 return -1;
6790 else
6791 return gid;
6793 static inline int tswapid(int id)
6795 return tswap16(id);
6798 #define put_user_id(x, gaddr) put_user_u16(x, gaddr)
6800 #else /* !USE_UID16 */
6801 static inline int high2lowuid(int uid)
6803 return uid;
6805 static inline int high2lowgid(int gid)
6807 return gid;
6809 static inline int low2highuid(int uid)
6811 return uid;
6813 static inline int low2highgid(int gid)
6815 return gid;
6817 static inline int tswapid(int id)
6819 return tswap32(id);
6822 #define put_user_id(x, gaddr) put_user_u32(x, gaddr)
6824 #endif /* USE_UID16 */
6826 /* We must do direct syscalls for setting UID/GID, because we want to
6827 * implement the Linux system call semantics of "change only for this thread",
6828 * not the libc/POSIX semantics of "change for all threads in process".
6829 * (See http://ewontfix.com/17/ for more details.)
6830 * We use the 32-bit version of the syscalls if present; if it is not
6831 * then either the host architecture supports 32-bit UIDs natively with
6832 * the standard syscall, or the 16-bit UID is the best we can do.
6834 #ifdef __NR_setuid32
6835 #define __NR_sys_setuid __NR_setuid32
6836 #else
6837 #define __NR_sys_setuid __NR_setuid
6838 #endif
6839 #ifdef __NR_setgid32
6840 #define __NR_sys_setgid __NR_setgid32
6841 #else
6842 #define __NR_sys_setgid __NR_setgid
6843 #endif
6844 #ifdef __NR_setresuid32
6845 #define __NR_sys_setresuid __NR_setresuid32
6846 #else
6847 #define __NR_sys_setresuid __NR_setresuid
6848 #endif
6849 #ifdef __NR_setresgid32
6850 #define __NR_sys_setresgid __NR_setresgid32
6851 #else
6852 #define __NR_sys_setresgid __NR_setresgid
6853 #endif
6855 _syscall1(int, sys_setuid, uid_t, uid)
6856 _syscall1(int, sys_setgid, gid_t, gid)
6857 _syscall3(int, sys_setresuid, uid_t, ruid, uid_t, euid, uid_t, suid)
6858 _syscall3(int, sys_setresgid, gid_t, rgid, gid_t, egid, gid_t, sgid)
6860 void syscall_init(void)
6862 IOCTLEntry *ie;
6863 const argtype *arg_type;
6864 int size;
6865 int i;
6867 thunk_init(STRUCT_MAX);
6869 #define STRUCT(name, ...) thunk_register_struct(STRUCT_ ## name, #name, struct_ ## name ## _def);
6870 #define STRUCT_SPECIAL(name) thunk_register_struct_direct(STRUCT_ ## name, #name, &struct_ ## name ## _def);
6871 #include "syscall_types.h"
6872 #undef STRUCT
6873 #undef STRUCT_SPECIAL
6875 /* Build target_to_host_errno_table[] table from
6876 * host_to_target_errno_table[]. */
6877 for (i = 0; i < ERRNO_TABLE_SIZE; i++) {
6878 target_to_host_errno_table[host_to_target_errno_table[i]] = i;
6881 /* we patch the ioctl size if necessary. We rely on the fact that
6882 no ioctl has all the bits at '1' in the size field */
6883 ie = ioctl_entries;
6884 while (ie->target_cmd != 0) {
6885 if (((ie->target_cmd >> TARGET_IOC_SIZESHIFT) & TARGET_IOC_SIZEMASK) ==
6886 TARGET_IOC_SIZEMASK) {
6887 arg_type = ie->arg_type;
6888 if (arg_type[0] != TYPE_PTR) {
6889 fprintf(stderr, "cannot patch size for ioctl 0x%x\n",
6890 ie->target_cmd);
6891 exit(1);
6893 arg_type++;
6894 size = thunk_type_size(arg_type, 0);
6895 ie->target_cmd = (ie->target_cmd &
6896 ~(TARGET_IOC_SIZEMASK << TARGET_IOC_SIZESHIFT)) |
6897 (size << TARGET_IOC_SIZESHIFT);
6900 /* automatic consistency check if same arch */
6901 #if (defined(__i386__) && defined(TARGET_I386) && defined(TARGET_ABI32)) || \
6902 (defined(__x86_64__) && defined(TARGET_X86_64))
6903 if (unlikely(ie->target_cmd != ie->host_cmd)) {
6904 fprintf(stderr, "ERROR: ioctl(%s): target=0x%x host=0x%x\n",
6905 ie->name, ie->target_cmd, ie->host_cmd);
6907 #endif
6908 ie++;
6912 #if TARGET_ABI_BITS == 32
6913 static inline uint64_t target_offset64(uint32_t word0, uint32_t word1)
6915 #ifdef TARGET_WORDS_BIGENDIAN
6916 return ((uint64_t)word0 << 32) | word1;
6917 #else
6918 return ((uint64_t)word1 << 32) | word0;
6919 #endif
6921 #else /* TARGET_ABI_BITS == 32 */
6922 static inline uint64_t target_offset64(uint64_t word0, uint64_t word1)
6924 return word0;
6926 #endif /* TARGET_ABI_BITS != 32 */
6928 #ifdef TARGET_NR_truncate64
6929 static inline abi_long target_truncate64(void *cpu_env, const char *arg1,
6930 abi_long arg2,
6931 abi_long arg3,
6932 abi_long arg4)
6934 if (regpairs_aligned(cpu_env, TARGET_NR_truncate64)) {
6935 arg2 = arg3;
6936 arg3 = arg4;
6938 return get_errno(truncate64(arg1, target_offset64(arg2, arg3)));
6940 #endif
6942 #ifdef TARGET_NR_ftruncate64
6943 static inline abi_long target_ftruncate64(void *cpu_env, abi_long arg1,
6944 abi_long arg2,
6945 abi_long arg3,
6946 abi_long arg4)
6948 if (regpairs_aligned(cpu_env, TARGET_NR_ftruncate64)) {
6949 arg2 = arg3;
6950 arg3 = arg4;
6952 return get_errno(ftruncate64(arg1, target_offset64(arg2, arg3)));
6954 #endif
6956 static inline abi_long target_to_host_timespec(struct timespec *host_ts,
6957 abi_ulong target_addr)
6959 struct target_timespec *target_ts;
6961 if (!lock_user_struct(VERIFY_READ, target_ts, target_addr, 1))
6962 return -TARGET_EFAULT;
6963 __get_user(host_ts->tv_sec, &target_ts->tv_sec);
6964 __get_user(host_ts->tv_nsec, &target_ts->tv_nsec);
6965 unlock_user_struct(target_ts, target_addr, 0);
6966 return 0;
6969 static inline abi_long host_to_target_timespec(abi_ulong target_addr,
6970 struct timespec *host_ts)
6972 struct target_timespec *target_ts;
6974 if (!lock_user_struct(VERIFY_WRITE, target_ts, target_addr, 0))
6975 return -TARGET_EFAULT;
6976 __put_user(host_ts->tv_sec, &target_ts->tv_sec);
6977 __put_user(host_ts->tv_nsec, &target_ts->tv_nsec);
6978 unlock_user_struct(target_ts, target_addr, 1);
6979 return 0;
6982 static inline abi_long target_to_host_itimerspec(struct itimerspec *host_itspec,
6983 abi_ulong target_addr)
6985 struct target_itimerspec *target_itspec;
6987 if (!lock_user_struct(VERIFY_READ, target_itspec, target_addr, 1)) {
6988 return -TARGET_EFAULT;
6991 host_itspec->it_interval.tv_sec =
6992 tswapal(target_itspec->it_interval.tv_sec);
6993 host_itspec->it_interval.tv_nsec =
6994 tswapal(target_itspec->it_interval.tv_nsec);
6995 host_itspec->it_value.tv_sec = tswapal(target_itspec->it_value.tv_sec);
6996 host_itspec->it_value.tv_nsec = tswapal(target_itspec->it_value.tv_nsec);
6998 unlock_user_struct(target_itspec, target_addr, 1);
6999 return 0;
7002 static inline abi_long host_to_target_itimerspec(abi_ulong target_addr,
7003 struct itimerspec *host_its)
7005 struct target_itimerspec *target_itspec;
7007 if (!lock_user_struct(VERIFY_WRITE, target_itspec, target_addr, 0)) {
7008 return -TARGET_EFAULT;
7011 target_itspec->it_interval.tv_sec = tswapal(host_its->it_interval.tv_sec);
7012 target_itspec->it_interval.tv_nsec = tswapal(host_its->it_interval.tv_nsec);
7014 target_itspec->it_value.tv_sec = tswapal(host_its->it_value.tv_sec);
7015 target_itspec->it_value.tv_nsec = tswapal(host_its->it_value.tv_nsec);
7017 unlock_user_struct(target_itspec, target_addr, 0);
7018 return 0;
7021 static inline abi_long target_to_host_timex(struct timex *host_tx,
7022 abi_long target_addr)
7024 struct target_timex *target_tx;
7026 if (!lock_user_struct(VERIFY_READ, target_tx, target_addr, 1)) {
7027 return -TARGET_EFAULT;
7030 __get_user(host_tx->modes, &target_tx->modes);
7031 __get_user(host_tx->offset, &target_tx->offset);
7032 __get_user(host_tx->freq, &target_tx->freq);
7033 __get_user(host_tx->maxerror, &target_tx->maxerror);
7034 __get_user(host_tx->esterror, &target_tx->esterror);
7035 __get_user(host_tx->status, &target_tx->status);
7036 __get_user(host_tx->constant, &target_tx->constant);
7037 __get_user(host_tx->precision, &target_tx->precision);
7038 __get_user(host_tx->tolerance, &target_tx->tolerance);
7039 __get_user(host_tx->time.tv_sec, &target_tx->time.tv_sec);
7040 __get_user(host_tx->time.tv_usec, &target_tx->time.tv_usec);
7041 __get_user(host_tx->tick, &target_tx->tick);
7042 __get_user(host_tx->ppsfreq, &target_tx->ppsfreq);
7043 __get_user(host_tx->jitter, &target_tx->jitter);
7044 __get_user(host_tx->shift, &target_tx->shift);
7045 __get_user(host_tx->stabil, &target_tx->stabil);
7046 __get_user(host_tx->jitcnt, &target_tx->jitcnt);
7047 __get_user(host_tx->calcnt, &target_tx->calcnt);
7048 __get_user(host_tx->errcnt, &target_tx->errcnt);
7049 __get_user(host_tx->stbcnt, &target_tx->stbcnt);
7050 __get_user(host_tx->tai, &target_tx->tai);
7052 unlock_user_struct(target_tx, target_addr, 0);
7053 return 0;
7056 static inline abi_long host_to_target_timex(abi_long target_addr,
7057 struct timex *host_tx)
7059 struct target_timex *target_tx;
7061 if (!lock_user_struct(VERIFY_WRITE, target_tx, target_addr, 0)) {
7062 return -TARGET_EFAULT;
7065 __put_user(host_tx->modes, &target_tx->modes);
7066 __put_user(host_tx->offset, &target_tx->offset);
7067 __put_user(host_tx->freq, &target_tx->freq);
7068 __put_user(host_tx->maxerror, &target_tx->maxerror);
7069 __put_user(host_tx->esterror, &target_tx->esterror);
7070 __put_user(host_tx->status, &target_tx->status);
7071 __put_user(host_tx->constant, &target_tx->constant);
7072 __put_user(host_tx->precision, &target_tx->precision);
7073 __put_user(host_tx->tolerance, &target_tx->tolerance);
7074 __put_user(host_tx->time.tv_sec, &target_tx->time.tv_sec);
7075 __put_user(host_tx->time.tv_usec, &target_tx->time.tv_usec);
7076 __put_user(host_tx->tick, &target_tx->tick);
7077 __put_user(host_tx->ppsfreq, &target_tx->ppsfreq);
7078 __put_user(host_tx->jitter, &target_tx->jitter);
7079 __put_user(host_tx->shift, &target_tx->shift);
7080 __put_user(host_tx->stabil, &target_tx->stabil);
7081 __put_user(host_tx->jitcnt, &target_tx->jitcnt);
7082 __put_user(host_tx->calcnt, &target_tx->calcnt);
7083 __put_user(host_tx->errcnt, &target_tx->errcnt);
7084 __put_user(host_tx->stbcnt, &target_tx->stbcnt);
7085 __put_user(host_tx->tai, &target_tx->tai);
7087 unlock_user_struct(target_tx, target_addr, 1);
7088 return 0;
7092 static inline abi_long target_to_host_sigevent(struct sigevent *host_sevp,
7093 abi_ulong target_addr)
7095 struct target_sigevent *target_sevp;
7097 if (!lock_user_struct(VERIFY_READ, target_sevp, target_addr, 1)) {
7098 return -TARGET_EFAULT;
7101 /* This union is awkward on 64 bit systems because it has a 32 bit
7102 * integer and a pointer in it; we follow the conversion approach
7103 * used for handling sigval types in signal.c so the guest should get
7104 * the correct value back even if we did a 64 bit byteswap and it's
7105 * using the 32 bit integer.
7107 host_sevp->sigev_value.sival_ptr =
7108 (void *)(uintptr_t)tswapal(target_sevp->sigev_value.sival_ptr);
7109 host_sevp->sigev_signo =
7110 target_to_host_signal(tswap32(target_sevp->sigev_signo));
7111 host_sevp->sigev_notify = tswap32(target_sevp->sigev_notify);
7112 host_sevp->_sigev_un._tid = tswap32(target_sevp->_sigev_un._tid);
7114 unlock_user_struct(target_sevp, target_addr, 1);
7115 return 0;
7118 #if defined(TARGET_NR_mlockall)
7119 static inline int target_to_host_mlockall_arg(int arg)
7121 int result = 0;
7123 if (arg & TARGET_MLOCKALL_MCL_CURRENT) {
7124 result |= MCL_CURRENT;
7126 if (arg & TARGET_MLOCKALL_MCL_FUTURE) {
7127 result |= MCL_FUTURE;
7129 return result;
7131 #endif
7133 static inline abi_long host_to_target_stat64(void *cpu_env,
7134 abi_ulong target_addr,
7135 struct stat *host_st)
7137 #if defined(TARGET_ARM) && defined(TARGET_ABI32)
7138 if (((CPUARMState *)cpu_env)->eabi) {
7139 struct target_eabi_stat64 *target_st;
7141 if (!lock_user_struct(VERIFY_WRITE, target_st, target_addr, 0))
7142 return -TARGET_EFAULT;
7143 memset(target_st, 0, sizeof(struct target_eabi_stat64));
7144 __put_user(host_st->st_dev, &target_st->st_dev);
7145 __put_user(host_st->st_ino, &target_st->st_ino);
7146 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
7147 __put_user(host_st->st_ino, &target_st->__st_ino);
7148 #endif
7149 __put_user(host_st->st_mode, &target_st->st_mode);
7150 __put_user(host_st->st_nlink, &target_st->st_nlink);
7151 __put_user(host_st->st_uid, &target_st->st_uid);
7152 __put_user(host_st->st_gid, &target_st->st_gid);
7153 __put_user(host_st->st_rdev, &target_st->st_rdev);
7154 __put_user(host_st->st_size, &target_st->st_size);
7155 __put_user(host_st->st_blksize, &target_st->st_blksize);
7156 __put_user(host_st->st_blocks, &target_st->st_blocks);
7157 __put_user(host_st->st_atime, &target_st->target_st_atime);
7158 __put_user(host_st->st_mtime, &target_st->target_st_mtime);
7159 __put_user(host_st->st_ctime, &target_st->target_st_ctime);
7160 unlock_user_struct(target_st, target_addr, 1);
7161 } else
7162 #endif
7164 #if defined(TARGET_HAS_STRUCT_STAT64)
7165 struct target_stat64 *target_st;
7166 #else
7167 struct target_stat *target_st;
7168 #endif
7170 if (!lock_user_struct(VERIFY_WRITE, target_st, target_addr, 0))
7171 return -TARGET_EFAULT;
7172 memset(target_st, 0, sizeof(*target_st));
7173 __put_user(host_st->st_dev, &target_st->st_dev);
7174 __put_user(host_st->st_ino, &target_st->st_ino);
7175 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
7176 __put_user(host_st->st_ino, &target_st->__st_ino);
7177 #endif
7178 __put_user(host_st->st_mode, &target_st->st_mode);
7179 __put_user(host_st->st_nlink, &target_st->st_nlink);
7180 __put_user(host_st->st_uid, &target_st->st_uid);
7181 __put_user(host_st->st_gid, &target_st->st_gid);
7182 __put_user(host_st->st_rdev, &target_st->st_rdev);
7183 /* XXX: better use of kernel struct */
7184 __put_user(host_st->st_size, &target_st->st_size);
7185 __put_user(host_st->st_blksize, &target_st->st_blksize);
7186 __put_user(host_st->st_blocks, &target_st->st_blocks);
7187 __put_user(host_st->st_atime, &target_st->target_st_atime);
7188 __put_user(host_st->st_mtime, &target_st->target_st_mtime);
7189 __put_user(host_st->st_ctime, &target_st->target_st_ctime);
7190 unlock_user_struct(target_st, target_addr, 1);
7193 return 0;
7196 /* ??? Using host futex calls even when target atomic operations
7197 are not really atomic probably breaks things. However implementing
7198 futexes locally would make futexes shared between multiple processes
7199 tricky. However they're probably useless because guest atomic
7200 operations won't work either. */
7201 static int do_futex(target_ulong uaddr, int op, int val, target_ulong timeout,
7202 target_ulong uaddr2, int val3)
7204 struct timespec ts, *pts;
7205 int base_op;
7207 /* ??? We assume FUTEX_* constants are the same on both host
7208 and target. */
7209 #ifdef FUTEX_CMD_MASK
7210 base_op = op & FUTEX_CMD_MASK;
7211 #else
7212 base_op = op;
7213 #endif
7214 switch (base_op) {
7215 case FUTEX_WAIT:
7216 case FUTEX_WAIT_BITSET:
7217 if (timeout) {
7218 pts = &ts;
7219 target_to_host_timespec(pts, timeout);
7220 } else {
7221 pts = NULL;
7223 return get_errno(safe_futex(g2h(uaddr), op, tswap32(val),
7224 pts, NULL, val3));
7225 case FUTEX_WAKE:
7226 return get_errno(safe_futex(g2h(uaddr), op, val, NULL, NULL, 0));
7227 case FUTEX_FD:
7228 return get_errno(safe_futex(g2h(uaddr), op, val, NULL, NULL, 0));
7229 case FUTEX_REQUEUE:
7230 case FUTEX_CMP_REQUEUE:
7231 case FUTEX_WAKE_OP:
7232 /* For FUTEX_REQUEUE, FUTEX_CMP_REQUEUE, and FUTEX_WAKE_OP, the
7233 TIMEOUT parameter is interpreted as a uint32_t by the kernel.
7234 But the prototype takes a `struct timespec *'; insert casts
7235 to satisfy the compiler. We do not need to tswap TIMEOUT
7236 since it's not compared to guest memory. */
7237 pts = (struct timespec *)(uintptr_t) timeout;
7238 return get_errno(safe_futex(g2h(uaddr), op, val, pts,
7239 g2h(uaddr2),
7240 (base_op == FUTEX_CMP_REQUEUE
7241 ? tswap32(val3)
7242 : val3)));
7243 default:
7244 return -TARGET_ENOSYS;
7247 #if defined(TARGET_NR_name_to_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
7248 static abi_long do_name_to_handle_at(abi_long dirfd, abi_long pathname,
7249 abi_long handle, abi_long mount_id,
7250 abi_long flags)
7252 struct file_handle *target_fh;
7253 struct file_handle *fh;
7254 int mid = 0;
7255 abi_long ret;
7256 char *name;
7257 unsigned int size, total_size;
7259 if (get_user_s32(size, handle)) {
7260 return -TARGET_EFAULT;
7263 name = lock_user_string(pathname);
7264 if (!name) {
7265 return -TARGET_EFAULT;
7268 total_size = sizeof(struct file_handle) + size;
7269 target_fh = lock_user(VERIFY_WRITE, handle, total_size, 0);
7270 if (!target_fh) {
7271 unlock_user(name, pathname, 0);
7272 return -TARGET_EFAULT;
7275 fh = g_malloc0(total_size);
7276 fh->handle_bytes = size;
7278 ret = get_errno(name_to_handle_at(dirfd, path(name), fh, &mid, flags));
7279 unlock_user(name, pathname, 0);
7281 /* man name_to_handle_at(2):
7282 * Other than the use of the handle_bytes field, the caller should treat
7283 * the file_handle structure as an opaque data type
7286 memcpy(target_fh, fh, total_size);
7287 target_fh->handle_bytes = tswap32(fh->handle_bytes);
7288 target_fh->handle_type = tswap32(fh->handle_type);
7289 g_free(fh);
7290 unlock_user(target_fh, handle, total_size);
7292 if (put_user_s32(mid, mount_id)) {
7293 return -TARGET_EFAULT;
7296 return ret;
7299 #endif
7301 #if defined(TARGET_NR_open_by_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
7302 static abi_long do_open_by_handle_at(abi_long mount_fd, abi_long handle,
7303 abi_long flags)
7305 struct file_handle *target_fh;
7306 struct file_handle *fh;
7307 unsigned int size, total_size;
7308 abi_long ret;
7310 if (get_user_s32(size, handle)) {
7311 return -TARGET_EFAULT;
7314 total_size = sizeof(struct file_handle) + size;
7315 target_fh = lock_user(VERIFY_READ, handle, total_size, 1);
7316 if (!target_fh) {
7317 return -TARGET_EFAULT;
7320 fh = g_memdup(target_fh, total_size);
7321 fh->handle_bytes = size;
7322 fh->handle_type = tswap32(target_fh->handle_type);
7324 ret = get_errno(open_by_handle_at(mount_fd, fh,
7325 target_to_host_bitmask(flags, fcntl_flags_tbl)));
7327 g_free(fh);
7329 unlock_user(target_fh, handle, total_size);
7331 return ret;
7333 #endif
7335 #if defined(TARGET_NR_signalfd) || defined(TARGET_NR_signalfd4)
7337 /* signalfd siginfo conversion */
7339 static void
7340 host_to_target_signalfd_siginfo(struct signalfd_siginfo *tinfo,
7341 const struct signalfd_siginfo *info)
7343 int sig = host_to_target_signal(info->ssi_signo);
7345 /* linux/signalfd.h defines a ssi_addr_lsb
7346 * not defined in sys/signalfd.h but used by some kernels
7349 #ifdef BUS_MCEERR_AO
7350 if (tinfo->ssi_signo == SIGBUS &&
7351 (tinfo->ssi_code == BUS_MCEERR_AR ||
7352 tinfo->ssi_code == BUS_MCEERR_AO)) {
7353 uint16_t *ssi_addr_lsb = (uint16_t *)(&info->ssi_addr + 1);
7354 uint16_t *tssi_addr_lsb = (uint16_t *)(&tinfo->ssi_addr + 1);
7355 *tssi_addr_lsb = tswap16(*ssi_addr_lsb);
7357 #endif
7359 tinfo->ssi_signo = tswap32(sig);
7360 tinfo->ssi_errno = tswap32(tinfo->ssi_errno);
7361 tinfo->ssi_code = tswap32(info->ssi_code);
7362 tinfo->ssi_pid = tswap32(info->ssi_pid);
7363 tinfo->ssi_uid = tswap32(info->ssi_uid);
7364 tinfo->ssi_fd = tswap32(info->ssi_fd);
7365 tinfo->ssi_tid = tswap32(info->ssi_tid);
7366 tinfo->ssi_band = tswap32(info->ssi_band);
7367 tinfo->ssi_overrun = tswap32(info->ssi_overrun);
7368 tinfo->ssi_trapno = tswap32(info->ssi_trapno);
7369 tinfo->ssi_status = tswap32(info->ssi_status);
7370 tinfo->ssi_int = tswap32(info->ssi_int);
7371 tinfo->ssi_ptr = tswap64(info->ssi_ptr);
7372 tinfo->ssi_utime = tswap64(info->ssi_utime);
7373 tinfo->ssi_stime = tswap64(info->ssi_stime);
7374 tinfo->ssi_addr = tswap64(info->ssi_addr);
7377 static abi_long host_to_target_data_signalfd(void *buf, size_t len)
7379 int i;
7381 for (i = 0; i < len; i += sizeof(struct signalfd_siginfo)) {
7382 host_to_target_signalfd_siginfo(buf + i, buf + i);
7385 return len;
7388 static TargetFdTrans target_signalfd_trans = {
7389 .host_to_target_data = host_to_target_data_signalfd,
7392 static abi_long do_signalfd4(int fd, abi_long mask, int flags)
7394 int host_flags;
7395 target_sigset_t *target_mask;
7396 sigset_t host_mask;
7397 abi_long ret;
7399 if (flags & ~(TARGET_O_NONBLOCK | TARGET_O_CLOEXEC)) {
7400 return -TARGET_EINVAL;
7402 if (!lock_user_struct(VERIFY_READ, target_mask, mask, 1)) {
7403 return -TARGET_EFAULT;
7406 target_to_host_sigset(&host_mask, target_mask);
7408 host_flags = target_to_host_bitmask(flags, fcntl_flags_tbl);
7410 ret = get_errno(signalfd(fd, &host_mask, host_flags));
7411 if (ret >= 0) {
7412 fd_trans_register(ret, &target_signalfd_trans);
7415 unlock_user_struct(target_mask, mask, 0);
7417 return ret;
7419 #endif
7421 /* Map host to target signal numbers for the wait family of syscalls.
7422 Assume all other status bits are the same. */
7423 int host_to_target_waitstatus(int status)
7425 if (WIFSIGNALED(status)) {
7426 return host_to_target_signal(WTERMSIG(status)) | (status & ~0x7f);
7428 if (WIFSTOPPED(status)) {
7429 return (host_to_target_signal(WSTOPSIG(status)) << 8)
7430 | (status & 0xff);
7432 return status;
7435 static int open_self_cmdline(void *cpu_env, int fd)
7437 CPUState *cpu = ENV_GET_CPU((CPUArchState *)cpu_env);
7438 struct linux_binprm *bprm = ((TaskState *)cpu->opaque)->bprm;
7439 int i;
7441 for (i = 0; i < bprm->argc; i++) {
7442 size_t len = strlen(bprm->argv[i]) + 1;
7444 if (write(fd, bprm->argv[i], len) != len) {
7445 return -1;
7449 return 0;
7452 static int open_self_maps(void *cpu_env, int fd)
7454 CPUState *cpu = ENV_GET_CPU((CPUArchState *)cpu_env);
7455 TaskState *ts = cpu->opaque;
7456 FILE *fp;
7457 char *line = NULL;
7458 size_t len = 0;
7459 ssize_t read;
7461 fp = fopen("/proc/self/maps", "r");
7462 if (fp == NULL) {
7463 return -1;
7466 while ((read = getline(&line, &len, fp)) != -1) {
7467 int fields, dev_maj, dev_min, inode;
7468 uint64_t min, max, offset;
7469 char flag_r, flag_w, flag_x, flag_p;
7470 char path[512] = "";
7471 fields = sscanf(line, "%"PRIx64"-%"PRIx64" %c%c%c%c %"PRIx64" %x:%x %d"
7472 " %512s", &min, &max, &flag_r, &flag_w, &flag_x,
7473 &flag_p, &offset, &dev_maj, &dev_min, &inode, path);
7475 if ((fields < 10) || (fields > 11)) {
7476 continue;
7478 if (h2g_valid(min)) {
7479 int flags = page_get_flags(h2g(min));
7480 max = h2g_valid(max - 1) ? max : (uintptr_t)g2h(GUEST_ADDR_MAX) + 1;
7481 if (page_check_range(h2g(min), max - min, flags) == -1) {
7482 continue;
7484 if (h2g(min) == ts->info->stack_limit) {
7485 pstrcpy(path, sizeof(path), " [stack]");
7487 dprintf(fd, TARGET_ABI_FMT_lx "-" TARGET_ABI_FMT_lx
7488 " %c%c%c%c %08" PRIx64 " %02x:%02x %d %s%s\n",
7489 h2g(min), h2g(max - 1) + 1, flag_r, flag_w,
7490 flag_x, flag_p, offset, dev_maj, dev_min, inode,
7491 path[0] ? " " : "", path);
7495 free(line);
7496 fclose(fp);
7498 return 0;
7501 static int open_self_stat(void *cpu_env, int fd)
7503 CPUState *cpu = ENV_GET_CPU((CPUArchState *)cpu_env);
7504 TaskState *ts = cpu->opaque;
7505 abi_ulong start_stack = ts->info->start_stack;
7506 int i;
7508 for (i = 0; i < 44; i++) {
7509 char buf[128];
7510 int len;
7511 uint64_t val = 0;
7513 if (i == 0) {
7514 /* pid */
7515 val = getpid();
7516 snprintf(buf, sizeof(buf), "%"PRId64 " ", val);
7517 } else if (i == 1) {
7518 /* app name */
7519 snprintf(buf, sizeof(buf), "(%s) ", ts->bprm->argv[0]);
7520 } else if (i == 27) {
7521 /* stack bottom */
7522 val = start_stack;
7523 snprintf(buf, sizeof(buf), "%"PRId64 " ", val);
7524 } else {
7525 /* for the rest, there is MasterCard */
7526 snprintf(buf, sizeof(buf), "0%c", i == 43 ? '\n' : ' ');
7529 len = strlen(buf);
7530 if (write(fd, buf, len) != len) {
7531 return -1;
7535 return 0;
7538 static int open_self_auxv(void *cpu_env, int fd)
7540 CPUState *cpu = ENV_GET_CPU((CPUArchState *)cpu_env);
7541 TaskState *ts = cpu->opaque;
7542 abi_ulong auxv = ts->info->saved_auxv;
7543 abi_ulong len = ts->info->auxv_len;
7544 char *ptr;
7547 * Auxiliary vector is stored in target process stack.
7548 * read in whole auxv vector and copy it to file
7550 ptr = lock_user(VERIFY_READ, auxv, len, 0);
7551 if (ptr != NULL) {
7552 while (len > 0) {
7553 ssize_t r;
7554 r = write(fd, ptr, len);
7555 if (r <= 0) {
7556 break;
7558 len -= r;
7559 ptr += r;
7561 lseek(fd, 0, SEEK_SET);
7562 unlock_user(ptr, auxv, len);
7565 return 0;
7568 static int is_proc_myself(const char *filename, const char *entry)
7570 if (!strncmp(filename, "/proc/", strlen("/proc/"))) {
7571 filename += strlen("/proc/");
7572 if (!strncmp(filename, "self/", strlen("self/"))) {
7573 filename += strlen("self/");
7574 } else if (*filename >= '1' && *filename <= '9') {
7575 char myself[80];
7576 snprintf(myself, sizeof(myself), "%d/", getpid());
7577 if (!strncmp(filename, myself, strlen(myself))) {
7578 filename += strlen(myself);
7579 } else {
7580 return 0;
7582 } else {
7583 return 0;
7585 if (!strcmp(filename, entry)) {
7586 return 1;
7589 return 0;
7592 #if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN)
7593 static int is_proc(const char *filename, const char *entry)
7595 return strcmp(filename, entry) == 0;
7598 static int open_net_route(void *cpu_env, int fd)
7600 FILE *fp;
7601 char *line = NULL;
7602 size_t len = 0;
7603 ssize_t read;
7605 fp = fopen("/proc/net/route", "r");
7606 if (fp == NULL) {
7607 return -1;
7610 /* read header */
7612 read = getline(&line, &len, fp);
7613 dprintf(fd, "%s", line);
7615 /* read routes */
7617 while ((read = getline(&line, &len, fp)) != -1) {
7618 char iface[16];
7619 uint32_t dest, gw, mask;
7620 unsigned int flags, refcnt, use, metric, mtu, window, irtt;
7621 sscanf(line, "%s\t%08x\t%08x\t%04x\t%d\t%d\t%d\t%08x\t%d\t%u\t%u\n",
7622 iface, &dest, &gw, &flags, &refcnt, &use, &metric,
7623 &mask, &mtu, &window, &irtt);
7624 dprintf(fd, "%s\t%08x\t%08x\t%04x\t%d\t%d\t%d\t%08x\t%d\t%u\t%u\n",
7625 iface, tswap32(dest), tswap32(gw), flags, refcnt, use,
7626 metric, tswap32(mask), mtu, window, irtt);
7629 free(line);
7630 fclose(fp);
7632 return 0;
7634 #endif
7636 static int do_openat(void *cpu_env, int dirfd, const char *pathname, int flags, mode_t mode)
7638 struct fake_open {
7639 const char *filename;
7640 int (*fill)(void *cpu_env, int fd);
7641 int (*cmp)(const char *s1, const char *s2);
7643 const struct fake_open *fake_open;
7644 static const struct fake_open fakes[] = {
7645 { "maps", open_self_maps, is_proc_myself },
7646 { "stat", open_self_stat, is_proc_myself },
7647 { "auxv", open_self_auxv, is_proc_myself },
7648 { "cmdline", open_self_cmdline, is_proc_myself },
7649 #if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN)
7650 { "/proc/net/route", open_net_route, is_proc },
7651 #endif
7652 { NULL, NULL, NULL }
7655 if (is_proc_myself(pathname, "exe")) {
7656 int execfd = qemu_getauxval(AT_EXECFD);
7657 return execfd ? execfd : safe_openat(dirfd, exec_path, flags, mode);
7660 for (fake_open = fakes; fake_open->filename; fake_open++) {
7661 if (fake_open->cmp(pathname, fake_open->filename)) {
7662 break;
7666 if (fake_open->filename) {
7667 const char *tmpdir;
7668 char filename[PATH_MAX];
7669 int fd, r;
7671 /* create temporary file to map stat to */
7672 tmpdir = getenv("TMPDIR");
7673 if (!tmpdir)
7674 tmpdir = "/tmp";
7675 snprintf(filename, sizeof(filename), "%s/qemu-open.XXXXXX", tmpdir);
7676 fd = mkstemp(filename);
7677 if (fd < 0) {
7678 return fd;
7680 unlink(filename);
7682 if ((r = fake_open->fill(cpu_env, fd))) {
7683 int e = errno;
7684 close(fd);
7685 errno = e;
7686 return r;
7688 lseek(fd, 0, SEEK_SET);
7690 return fd;
7693 return safe_openat(dirfd, path(pathname), flags, mode);
7696 #define TIMER_MAGIC 0x0caf0000
7697 #define TIMER_MAGIC_MASK 0xffff0000
7699 /* Convert QEMU provided timer ID back to internal 16bit index format */
7700 static target_timer_t get_timer_id(abi_long arg)
7702 target_timer_t timerid = arg;
7704 if ((timerid & TIMER_MAGIC_MASK) != TIMER_MAGIC) {
7705 return -TARGET_EINVAL;
7708 timerid &= 0xffff;
7710 if (timerid >= ARRAY_SIZE(g_posix_timers)) {
7711 return -TARGET_EINVAL;
7714 return timerid;
7717 static abi_long swap_data_eventfd(void *buf, size_t len)
7719 uint64_t *counter = buf;
7720 int i;
7722 if (len < sizeof(uint64_t)) {
7723 return -EINVAL;
7726 for (i = 0; i < len; i += sizeof(uint64_t)) {
7727 *counter = tswap64(*counter);
7728 counter++;
7731 return len;
7734 static TargetFdTrans target_eventfd_trans = {
7735 .host_to_target_data = swap_data_eventfd,
7736 .target_to_host_data = swap_data_eventfd,
7739 #if (defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init)) || \
7740 (defined(CONFIG_INOTIFY1) && defined(TARGET_NR_inotify_init1) && \
7741 defined(__NR_inotify_init1))
7742 static abi_long host_to_target_data_inotify(void *buf, size_t len)
7744 struct inotify_event *ev;
7745 int i;
7746 uint32_t name_len;
7748 for (i = 0; i < len; i += sizeof(struct inotify_event) + name_len) {
7749 ev = (struct inotify_event *)((char *)buf + i);
7750 name_len = ev->len;
7752 ev->wd = tswap32(ev->wd);
7753 ev->mask = tswap32(ev->mask);
7754 ev->cookie = tswap32(ev->cookie);
7755 ev->len = tswap32(name_len);
7758 return len;
7761 static TargetFdTrans target_inotify_trans = {
7762 .host_to_target_data = host_to_target_data_inotify,
7764 #endif
7766 static int target_to_host_cpu_mask(unsigned long *host_mask,
7767 size_t host_size,
7768 abi_ulong target_addr,
7769 size_t target_size)
7771 unsigned target_bits = sizeof(abi_ulong) * 8;
7772 unsigned host_bits = sizeof(*host_mask) * 8;
7773 abi_ulong *target_mask;
7774 unsigned i, j;
7776 assert(host_size >= target_size);
7778 target_mask = lock_user(VERIFY_READ, target_addr, target_size, 1);
7779 if (!target_mask) {
7780 return -TARGET_EFAULT;
7782 memset(host_mask, 0, host_size);
7784 for (i = 0 ; i < target_size / sizeof(abi_ulong); i++) {
7785 unsigned bit = i * target_bits;
7786 abi_ulong val;
7788 __get_user(val, &target_mask[i]);
7789 for (j = 0; j < target_bits; j++, bit++) {
7790 if (val & (1UL << j)) {
7791 host_mask[bit / host_bits] |= 1UL << (bit % host_bits);
7796 unlock_user(target_mask, target_addr, 0);
7797 return 0;
7800 static int host_to_target_cpu_mask(const unsigned long *host_mask,
7801 size_t host_size,
7802 abi_ulong target_addr,
7803 size_t target_size)
7805 unsigned target_bits = sizeof(abi_ulong) * 8;
7806 unsigned host_bits = sizeof(*host_mask) * 8;
7807 abi_ulong *target_mask;
7808 unsigned i, j;
7810 assert(host_size >= target_size);
7812 target_mask = lock_user(VERIFY_WRITE, target_addr, target_size, 0);
7813 if (!target_mask) {
7814 return -TARGET_EFAULT;
7817 for (i = 0 ; i < target_size / sizeof(abi_ulong); i++) {
7818 unsigned bit = i * target_bits;
7819 abi_ulong val = 0;
7821 for (j = 0; j < target_bits; j++, bit++) {
7822 if (host_mask[bit / host_bits] & (1UL << (bit % host_bits))) {
7823 val |= 1UL << j;
7826 __put_user(val, &target_mask[i]);
7829 unlock_user(target_mask, target_addr, target_size);
7830 return 0;
7833 /* do_syscall() should always have a single exit point at the end so
7834 that actions, such as logging of syscall results, can be performed.
7835 All errnos that do_syscall() returns must be -TARGET_<errcode>. */
7836 abi_long do_syscall(void *cpu_env, int num, abi_long arg1,
7837 abi_long arg2, abi_long arg3, abi_long arg4,
7838 abi_long arg5, abi_long arg6, abi_long arg7,
7839 abi_long arg8)
7841 CPUState *cpu = ENV_GET_CPU(cpu_env);
7842 abi_long ret;
7843 struct stat st;
7844 struct statfs stfs;
7845 void *p;
7847 #if defined(DEBUG_ERESTARTSYS)
7848 /* Debug-only code for exercising the syscall-restart code paths
7849 * in the per-architecture cpu main loops: restart every syscall
7850 * the guest makes once before letting it through.
7853 static int flag;
7855 flag = !flag;
7856 if (flag) {
7857 return -TARGET_ERESTARTSYS;
7860 #endif
7862 #ifdef DEBUG
7863 gemu_log("syscall %d", num);
7864 #endif
7865 trace_guest_user_syscall(cpu, num, arg1, arg2, arg3, arg4, arg5, arg6, arg7, arg8);
7866 if(do_strace)
7867 print_syscall(num, arg1, arg2, arg3, arg4, arg5, arg6);
7869 switch(num) {
7870 case TARGET_NR_exit:
7871 /* In old applications this may be used to implement _exit(2).
7872 However in threaded applictions it is used for thread termination,
7873 and _exit_group is used for application termination.
7874 Do thread termination if we have more then one thread. */
7876 if (block_signals()) {
7877 ret = -TARGET_ERESTARTSYS;
7878 break;
7881 cpu_list_lock();
7883 if (CPU_NEXT(first_cpu)) {
7884 TaskState *ts;
7886 /* Remove the CPU from the list. */
7887 QTAILQ_REMOVE(&cpus, cpu, node);
7889 cpu_list_unlock();
7891 ts = cpu->opaque;
7892 if (ts->child_tidptr) {
7893 put_user_u32(0, ts->child_tidptr);
7894 sys_futex(g2h(ts->child_tidptr), FUTEX_WAKE, INT_MAX,
7895 NULL, NULL, 0);
7897 thread_cpu = NULL;
7898 object_unref(OBJECT(cpu));
7899 g_free(ts);
7900 rcu_unregister_thread();
7901 pthread_exit(NULL);
7904 cpu_list_unlock();
7905 #ifdef TARGET_GPROF
7906 _mcleanup();
7907 #endif
7908 gdb_exit(cpu_env, arg1);
7909 _exit(arg1);
7910 ret = 0; /* avoid warning */
7911 break;
7912 case TARGET_NR_read:
7913 if (arg3 == 0)
7914 ret = 0;
7915 else {
7916 if (!(p = lock_user(VERIFY_WRITE, arg2, arg3, 0)))
7917 goto efault;
7918 ret = get_errno(safe_read(arg1, p, arg3));
7919 if (ret >= 0 &&
7920 fd_trans_host_to_target_data(arg1)) {
7921 ret = fd_trans_host_to_target_data(arg1)(p, ret);
7923 unlock_user(p, arg2, ret);
7925 break;
7926 case TARGET_NR_write:
7927 if (!(p = lock_user(VERIFY_READ, arg2, arg3, 1)))
7928 goto efault;
7929 if (fd_trans_target_to_host_data(arg1)) {
7930 void *copy = g_malloc(arg3);
7931 memcpy(copy, p, arg3);
7932 ret = fd_trans_target_to_host_data(arg1)(copy, arg3);
7933 if (ret >= 0) {
7934 ret = get_errno(safe_write(arg1, copy, ret));
7936 g_free(copy);
7937 } else {
7938 ret = get_errno(safe_write(arg1, p, arg3));
7940 unlock_user(p, arg2, 0);
7941 break;
7942 #ifdef TARGET_NR_open
7943 case TARGET_NR_open:
7944 if (!(p = lock_user_string(arg1)))
7945 goto efault;
7946 ret = get_errno(do_openat(cpu_env, AT_FDCWD, p,
7947 target_to_host_bitmask(arg2, fcntl_flags_tbl),
7948 arg3));
7949 fd_trans_unregister(ret);
7950 unlock_user(p, arg1, 0);
7951 break;
7952 #endif
7953 case TARGET_NR_openat:
7954 if (!(p = lock_user_string(arg2)))
7955 goto efault;
7956 ret = get_errno(do_openat(cpu_env, arg1, p,
7957 target_to_host_bitmask(arg3, fcntl_flags_tbl),
7958 arg4));
7959 fd_trans_unregister(ret);
7960 unlock_user(p, arg2, 0);
7961 break;
7962 #if defined(TARGET_NR_name_to_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
7963 case TARGET_NR_name_to_handle_at:
7964 ret = do_name_to_handle_at(arg1, arg2, arg3, arg4, arg5);
7965 break;
7966 #endif
7967 #if defined(TARGET_NR_open_by_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
7968 case TARGET_NR_open_by_handle_at:
7969 ret = do_open_by_handle_at(arg1, arg2, arg3);
7970 fd_trans_unregister(ret);
7971 break;
7972 #endif
7973 case TARGET_NR_close:
7974 fd_trans_unregister(arg1);
7975 ret = get_errno(close(arg1));
7976 break;
7977 case TARGET_NR_brk:
7978 ret = do_brk(arg1);
7979 break;
7980 #ifdef TARGET_NR_fork
7981 case TARGET_NR_fork:
7982 ret = get_errno(do_fork(cpu_env, TARGET_SIGCHLD, 0, 0, 0, 0));
7983 break;
7984 #endif
7985 #ifdef TARGET_NR_waitpid
7986 case TARGET_NR_waitpid:
7988 int status;
7989 ret = get_errno(safe_wait4(arg1, &status, arg3, 0));
7990 if (!is_error(ret) && arg2 && ret
7991 && put_user_s32(host_to_target_waitstatus(status), arg2))
7992 goto efault;
7994 break;
7995 #endif
7996 #ifdef TARGET_NR_waitid
7997 case TARGET_NR_waitid:
7999 siginfo_t info;
8000 info.si_pid = 0;
8001 ret = get_errno(safe_waitid(arg1, arg2, &info, arg4, NULL));
8002 if (!is_error(ret) && arg3 && info.si_pid != 0) {
8003 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_siginfo_t), 0)))
8004 goto efault;
8005 host_to_target_siginfo(p, &info);
8006 unlock_user(p, arg3, sizeof(target_siginfo_t));
8009 break;
8010 #endif
8011 #ifdef TARGET_NR_creat /* not on alpha */
8012 case TARGET_NR_creat:
8013 if (!(p = lock_user_string(arg1)))
8014 goto efault;
8015 ret = get_errno(creat(p, arg2));
8016 fd_trans_unregister(ret);
8017 unlock_user(p, arg1, 0);
8018 break;
8019 #endif
8020 #ifdef TARGET_NR_link
8021 case TARGET_NR_link:
8023 void * p2;
8024 p = lock_user_string(arg1);
8025 p2 = lock_user_string(arg2);
8026 if (!p || !p2)
8027 ret = -TARGET_EFAULT;
8028 else
8029 ret = get_errno(link(p, p2));
8030 unlock_user(p2, arg2, 0);
8031 unlock_user(p, arg1, 0);
8033 break;
8034 #endif
8035 #if defined(TARGET_NR_linkat)
8036 case TARGET_NR_linkat:
8038 void * p2 = NULL;
8039 if (!arg2 || !arg4)
8040 goto efault;
8041 p = lock_user_string(arg2);
8042 p2 = lock_user_string(arg4);
8043 if (!p || !p2)
8044 ret = -TARGET_EFAULT;
8045 else
8046 ret = get_errno(linkat(arg1, p, arg3, p2, arg5));
8047 unlock_user(p, arg2, 0);
8048 unlock_user(p2, arg4, 0);
8050 break;
8051 #endif
8052 #ifdef TARGET_NR_unlink
8053 case TARGET_NR_unlink:
8054 if (!(p = lock_user_string(arg1)))
8055 goto efault;
8056 ret = get_errno(unlink(p));
8057 unlock_user(p, arg1, 0);
8058 break;
8059 #endif
8060 #if defined(TARGET_NR_unlinkat)
8061 case TARGET_NR_unlinkat:
8062 if (!(p = lock_user_string(arg2)))
8063 goto efault;
8064 ret = get_errno(unlinkat(arg1, p, arg3));
8065 unlock_user(p, arg2, 0);
8066 break;
8067 #endif
8068 case TARGET_NR_execve:
8070 char **argp, **envp;
8071 int argc, envc;
8072 abi_ulong gp;
8073 abi_ulong guest_argp;
8074 abi_ulong guest_envp;
8075 abi_ulong addr;
8076 char **q;
8077 int total_size = 0;
8079 argc = 0;
8080 guest_argp = arg2;
8081 for (gp = guest_argp; gp; gp += sizeof(abi_ulong)) {
8082 if (get_user_ual(addr, gp))
8083 goto efault;
8084 if (!addr)
8085 break;
8086 argc++;
8088 envc = 0;
8089 guest_envp = arg3;
8090 for (gp = guest_envp; gp; gp += sizeof(abi_ulong)) {
8091 if (get_user_ual(addr, gp))
8092 goto efault;
8093 if (!addr)
8094 break;
8095 envc++;
8098 argp = g_new0(char *, argc + 1);
8099 envp = g_new0(char *, envc + 1);
8101 for (gp = guest_argp, q = argp; gp;
8102 gp += sizeof(abi_ulong), q++) {
8103 if (get_user_ual(addr, gp))
8104 goto execve_efault;
8105 if (!addr)
8106 break;
8107 if (!(*q = lock_user_string(addr)))
8108 goto execve_efault;
8109 total_size += strlen(*q) + 1;
8111 *q = NULL;
8113 for (gp = guest_envp, q = envp; gp;
8114 gp += sizeof(abi_ulong), q++) {
8115 if (get_user_ual(addr, gp))
8116 goto execve_efault;
8117 if (!addr)
8118 break;
8119 if (!(*q = lock_user_string(addr)))
8120 goto execve_efault;
8121 total_size += strlen(*q) + 1;
8123 *q = NULL;
8125 if (!(p = lock_user_string(arg1)))
8126 goto execve_efault;
8127 /* Although execve() is not an interruptible syscall it is
8128 * a special case where we must use the safe_syscall wrapper:
8129 * if we allow a signal to happen before we make the host
8130 * syscall then we will 'lose' it, because at the point of
8131 * execve the process leaves QEMU's control. So we use the
8132 * safe syscall wrapper to ensure that we either take the
8133 * signal as a guest signal, or else it does not happen
8134 * before the execve completes and makes it the other
8135 * program's problem.
8137 ret = get_errno(safe_execve(p, argp, envp));
8138 unlock_user(p, arg1, 0);
8140 goto execve_end;
8142 execve_efault:
8143 ret = -TARGET_EFAULT;
8145 execve_end:
8146 for (gp = guest_argp, q = argp; *q;
8147 gp += sizeof(abi_ulong), q++) {
8148 if (get_user_ual(addr, gp)
8149 || !addr)
8150 break;
8151 unlock_user(*q, addr, 0);
8153 for (gp = guest_envp, q = envp; *q;
8154 gp += sizeof(abi_ulong), q++) {
8155 if (get_user_ual(addr, gp)
8156 || !addr)
8157 break;
8158 unlock_user(*q, addr, 0);
8161 g_free(argp);
8162 g_free(envp);
8164 break;
8165 case TARGET_NR_chdir:
8166 if (!(p = lock_user_string(arg1)))
8167 goto efault;
8168 ret = get_errno(chdir(p));
8169 unlock_user(p, arg1, 0);
8170 break;
8171 #ifdef TARGET_NR_time
8172 case TARGET_NR_time:
8174 time_t host_time;
8175 ret = get_errno(time(&host_time));
8176 if (!is_error(ret)
8177 && arg1
8178 && put_user_sal(host_time, arg1))
8179 goto efault;
8181 break;
8182 #endif
8183 #ifdef TARGET_NR_mknod
8184 case TARGET_NR_mknod:
8185 if (!(p = lock_user_string(arg1)))
8186 goto efault;
8187 ret = get_errno(mknod(p, arg2, arg3));
8188 unlock_user(p, arg1, 0);
8189 break;
8190 #endif
8191 #if defined(TARGET_NR_mknodat)
8192 case TARGET_NR_mknodat:
8193 if (!(p = lock_user_string(arg2)))
8194 goto efault;
8195 ret = get_errno(mknodat(arg1, p, arg3, arg4));
8196 unlock_user(p, arg2, 0);
8197 break;
8198 #endif
8199 #ifdef TARGET_NR_chmod
8200 case TARGET_NR_chmod:
8201 if (!(p = lock_user_string(arg1)))
8202 goto efault;
8203 ret = get_errno(chmod(p, arg2));
8204 unlock_user(p, arg1, 0);
8205 break;
8206 #endif
8207 #ifdef TARGET_NR_break
8208 case TARGET_NR_break:
8209 goto unimplemented;
8210 #endif
8211 #ifdef TARGET_NR_oldstat
8212 case TARGET_NR_oldstat:
8213 goto unimplemented;
8214 #endif
8215 case TARGET_NR_lseek:
8216 ret = get_errno(lseek(arg1, arg2, arg3));
8217 break;
8218 #if defined(TARGET_NR_getxpid) && defined(TARGET_ALPHA)
8219 /* Alpha specific */
8220 case TARGET_NR_getxpid:
8221 ((CPUAlphaState *)cpu_env)->ir[IR_A4] = getppid();
8222 ret = get_errno(getpid());
8223 break;
8224 #endif
8225 #ifdef TARGET_NR_getpid
8226 case TARGET_NR_getpid:
8227 ret = get_errno(getpid());
8228 break;
8229 #endif
8230 case TARGET_NR_mount:
8232 /* need to look at the data field */
8233 void *p2, *p3;
8235 if (arg1) {
8236 p = lock_user_string(arg1);
8237 if (!p) {
8238 goto efault;
8240 } else {
8241 p = NULL;
8244 p2 = lock_user_string(arg2);
8245 if (!p2) {
8246 if (arg1) {
8247 unlock_user(p, arg1, 0);
8249 goto efault;
8252 if (arg3) {
8253 p3 = lock_user_string(arg3);
8254 if (!p3) {
8255 if (arg1) {
8256 unlock_user(p, arg1, 0);
8258 unlock_user(p2, arg2, 0);
8259 goto efault;
8261 } else {
8262 p3 = NULL;
8265 /* FIXME - arg5 should be locked, but it isn't clear how to
8266 * do that since it's not guaranteed to be a NULL-terminated
8267 * string.
8269 if (!arg5) {
8270 ret = mount(p, p2, p3, (unsigned long)arg4, NULL);
8271 } else {
8272 ret = mount(p, p2, p3, (unsigned long)arg4, g2h(arg5));
8274 ret = get_errno(ret);
8276 if (arg1) {
8277 unlock_user(p, arg1, 0);
8279 unlock_user(p2, arg2, 0);
8280 if (arg3) {
8281 unlock_user(p3, arg3, 0);
8284 break;
8285 #ifdef TARGET_NR_umount
8286 case TARGET_NR_umount:
8287 if (!(p = lock_user_string(arg1)))
8288 goto efault;
8289 ret = get_errno(umount(p));
8290 unlock_user(p, arg1, 0);
8291 break;
8292 #endif
8293 #ifdef TARGET_NR_stime /* not on alpha */
8294 case TARGET_NR_stime:
8296 time_t host_time;
8297 if (get_user_sal(host_time, arg1))
8298 goto efault;
8299 ret = get_errno(stime(&host_time));
8301 break;
8302 #endif
8303 case TARGET_NR_ptrace:
8304 goto unimplemented;
8305 #ifdef TARGET_NR_alarm /* not on alpha */
8306 case TARGET_NR_alarm:
8307 ret = alarm(arg1);
8308 break;
8309 #endif
8310 #ifdef TARGET_NR_oldfstat
8311 case TARGET_NR_oldfstat:
8312 goto unimplemented;
8313 #endif
8314 #ifdef TARGET_NR_pause /* not on alpha */
8315 case TARGET_NR_pause:
8316 if (!block_signals()) {
8317 sigsuspend(&((TaskState *)cpu->opaque)->signal_mask);
8319 ret = -TARGET_EINTR;
8320 break;
8321 #endif
8322 #ifdef TARGET_NR_utime
8323 case TARGET_NR_utime:
8325 struct utimbuf tbuf, *host_tbuf;
8326 struct target_utimbuf *target_tbuf;
8327 if (arg2) {
8328 if (!lock_user_struct(VERIFY_READ, target_tbuf, arg2, 1))
8329 goto efault;
8330 tbuf.actime = tswapal(target_tbuf->actime);
8331 tbuf.modtime = tswapal(target_tbuf->modtime);
8332 unlock_user_struct(target_tbuf, arg2, 0);
8333 host_tbuf = &tbuf;
8334 } else {
8335 host_tbuf = NULL;
8337 if (!(p = lock_user_string(arg1)))
8338 goto efault;
8339 ret = get_errno(utime(p, host_tbuf));
8340 unlock_user(p, arg1, 0);
8342 break;
8343 #endif
8344 #ifdef TARGET_NR_utimes
8345 case TARGET_NR_utimes:
8347 struct timeval *tvp, tv[2];
8348 if (arg2) {
8349 if (copy_from_user_timeval(&tv[0], arg2)
8350 || copy_from_user_timeval(&tv[1],
8351 arg2 + sizeof(struct target_timeval)))
8352 goto efault;
8353 tvp = tv;
8354 } else {
8355 tvp = NULL;
8357 if (!(p = lock_user_string(arg1)))
8358 goto efault;
8359 ret = get_errno(utimes(p, tvp));
8360 unlock_user(p, arg1, 0);
8362 break;
8363 #endif
8364 #if defined(TARGET_NR_futimesat)
8365 case TARGET_NR_futimesat:
8367 struct timeval *tvp, tv[2];
8368 if (arg3) {
8369 if (copy_from_user_timeval(&tv[0], arg3)
8370 || copy_from_user_timeval(&tv[1],
8371 arg3 + sizeof(struct target_timeval)))
8372 goto efault;
8373 tvp = tv;
8374 } else {
8375 tvp = NULL;
8377 if (!(p = lock_user_string(arg2)))
8378 goto efault;
8379 ret = get_errno(futimesat(arg1, path(p), tvp));
8380 unlock_user(p, arg2, 0);
8382 break;
8383 #endif
8384 #ifdef TARGET_NR_stty
8385 case TARGET_NR_stty:
8386 goto unimplemented;
8387 #endif
8388 #ifdef TARGET_NR_gtty
8389 case TARGET_NR_gtty:
8390 goto unimplemented;
8391 #endif
8392 #ifdef TARGET_NR_access
8393 case TARGET_NR_access:
8394 if (!(p = lock_user_string(arg1)))
8395 goto efault;
8396 ret = get_errno(access(path(p), arg2));
8397 unlock_user(p, arg1, 0);
8398 break;
8399 #endif
8400 #if defined(TARGET_NR_faccessat) && defined(__NR_faccessat)
8401 case TARGET_NR_faccessat:
8402 if (!(p = lock_user_string(arg2)))
8403 goto efault;
8404 ret = get_errno(faccessat(arg1, p, arg3, 0));
8405 unlock_user(p, arg2, 0);
8406 break;
8407 #endif
8408 #ifdef TARGET_NR_nice /* not on alpha */
8409 case TARGET_NR_nice:
8410 ret = get_errno(nice(arg1));
8411 break;
8412 #endif
8413 #ifdef TARGET_NR_ftime
8414 case TARGET_NR_ftime:
8415 goto unimplemented;
8416 #endif
8417 case TARGET_NR_sync:
8418 sync();
8419 ret = 0;
8420 break;
8421 #if defined(TARGET_NR_syncfs) && defined(CONFIG_SYNCFS)
8422 case TARGET_NR_syncfs:
8423 ret = get_errno(syncfs(arg1));
8424 break;
8425 #endif
8426 case TARGET_NR_kill:
8427 ret = get_errno(safe_kill(arg1, target_to_host_signal(arg2)));
8428 break;
8429 #ifdef TARGET_NR_rename
8430 case TARGET_NR_rename:
8432 void *p2;
8433 p = lock_user_string(arg1);
8434 p2 = lock_user_string(arg2);
8435 if (!p || !p2)
8436 ret = -TARGET_EFAULT;
8437 else
8438 ret = get_errno(rename(p, p2));
8439 unlock_user(p2, arg2, 0);
8440 unlock_user(p, arg1, 0);
8442 break;
8443 #endif
8444 #if defined(TARGET_NR_renameat)
8445 case TARGET_NR_renameat:
8447 void *p2;
8448 p = lock_user_string(arg2);
8449 p2 = lock_user_string(arg4);
8450 if (!p || !p2)
8451 ret = -TARGET_EFAULT;
8452 else
8453 ret = get_errno(renameat(arg1, p, arg3, p2));
8454 unlock_user(p2, arg4, 0);
8455 unlock_user(p, arg2, 0);
8457 break;
8458 #endif
8459 #if defined(TARGET_NR_renameat2)
8460 case TARGET_NR_renameat2:
8462 void *p2;
8463 p = lock_user_string(arg2);
8464 p2 = lock_user_string(arg4);
8465 if (!p || !p2) {
8466 ret = -TARGET_EFAULT;
8467 } else {
8468 ret = get_errno(sys_renameat2(arg1, p, arg3, p2, arg5));
8470 unlock_user(p2, arg4, 0);
8471 unlock_user(p, arg2, 0);
8473 break;
8474 #endif
8475 #ifdef TARGET_NR_mkdir
8476 case TARGET_NR_mkdir:
8477 if (!(p = lock_user_string(arg1)))
8478 goto efault;
8479 ret = get_errno(mkdir(p, arg2));
8480 unlock_user(p, arg1, 0);
8481 break;
8482 #endif
8483 #if defined(TARGET_NR_mkdirat)
8484 case TARGET_NR_mkdirat:
8485 if (!(p = lock_user_string(arg2)))
8486 goto efault;
8487 ret = get_errno(mkdirat(arg1, p, arg3));
8488 unlock_user(p, arg2, 0);
8489 break;
8490 #endif
8491 #ifdef TARGET_NR_rmdir
8492 case TARGET_NR_rmdir:
8493 if (!(p = lock_user_string(arg1)))
8494 goto efault;
8495 ret = get_errno(rmdir(p));
8496 unlock_user(p, arg1, 0);
8497 break;
8498 #endif
8499 case TARGET_NR_dup:
8500 ret = get_errno(dup(arg1));
8501 if (ret >= 0) {
8502 fd_trans_dup(arg1, ret);
8504 break;
8505 #ifdef TARGET_NR_pipe
8506 case TARGET_NR_pipe:
8507 ret = do_pipe(cpu_env, arg1, 0, 0);
8508 break;
8509 #endif
8510 #ifdef TARGET_NR_pipe2
8511 case TARGET_NR_pipe2:
8512 ret = do_pipe(cpu_env, arg1,
8513 target_to_host_bitmask(arg2, fcntl_flags_tbl), 1);
8514 break;
8515 #endif
8516 case TARGET_NR_times:
8518 struct target_tms *tmsp;
8519 struct tms tms;
8520 ret = get_errno(times(&tms));
8521 if (arg1) {
8522 tmsp = lock_user(VERIFY_WRITE, arg1, sizeof(struct target_tms), 0);
8523 if (!tmsp)
8524 goto efault;
8525 tmsp->tms_utime = tswapal(host_to_target_clock_t(tms.tms_utime));
8526 tmsp->tms_stime = tswapal(host_to_target_clock_t(tms.tms_stime));
8527 tmsp->tms_cutime = tswapal(host_to_target_clock_t(tms.tms_cutime));
8528 tmsp->tms_cstime = tswapal(host_to_target_clock_t(tms.tms_cstime));
8530 if (!is_error(ret))
8531 ret = host_to_target_clock_t(ret);
8533 break;
8534 #ifdef TARGET_NR_prof
8535 case TARGET_NR_prof:
8536 goto unimplemented;
8537 #endif
8538 #ifdef TARGET_NR_signal
8539 case TARGET_NR_signal:
8540 goto unimplemented;
8541 #endif
8542 case TARGET_NR_acct:
8543 if (arg1 == 0) {
8544 ret = get_errno(acct(NULL));
8545 } else {
8546 if (!(p = lock_user_string(arg1)))
8547 goto efault;
8548 ret = get_errno(acct(path(p)));
8549 unlock_user(p, arg1, 0);
8551 break;
8552 #ifdef TARGET_NR_umount2
8553 case TARGET_NR_umount2:
8554 if (!(p = lock_user_string(arg1)))
8555 goto efault;
8556 ret = get_errno(umount2(p, arg2));
8557 unlock_user(p, arg1, 0);
8558 break;
8559 #endif
8560 #ifdef TARGET_NR_lock
8561 case TARGET_NR_lock:
8562 goto unimplemented;
8563 #endif
8564 case TARGET_NR_ioctl:
8565 ret = do_ioctl(arg1, arg2, arg3);
8566 break;
8567 #ifdef TARGET_NR_fcntl
8568 case TARGET_NR_fcntl:
8569 ret = do_fcntl(arg1, arg2, arg3);
8570 break;
8571 #endif
8572 #ifdef TARGET_NR_mpx
8573 case TARGET_NR_mpx:
8574 goto unimplemented;
8575 #endif
8576 case TARGET_NR_setpgid:
8577 ret = get_errno(setpgid(arg1, arg2));
8578 break;
8579 #ifdef TARGET_NR_ulimit
8580 case TARGET_NR_ulimit:
8581 goto unimplemented;
8582 #endif
8583 #ifdef TARGET_NR_oldolduname
8584 case TARGET_NR_oldolduname:
8585 goto unimplemented;
8586 #endif
8587 case TARGET_NR_umask:
8588 ret = get_errno(umask(arg1));
8589 break;
8590 case TARGET_NR_chroot:
8591 if (!(p = lock_user_string(arg1)))
8592 goto efault;
8593 ret = get_errno(chroot(p));
8594 unlock_user(p, arg1, 0);
8595 break;
8596 #ifdef TARGET_NR_ustat
8597 case TARGET_NR_ustat:
8598 goto unimplemented;
8599 #endif
8600 #ifdef TARGET_NR_dup2
8601 case TARGET_NR_dup2:
8602 ret = get_errno(dup2(arg1, arg2));
8603 if (ret >= 0) {
8604 fd_trans_dup(arg1, arg2);
8606 break;
8607 #endif
8608 #if defined(CONFIG_DUP3) && defined(TARGET_NR_dup3)
8609 case TARGET_NR_dup3:
8611 int host_flags;
8613 if ((arg3 & ~TARGET_O_CLOEXEC) != 0) {
8614 return -EINVAL;
8616 host_flags = target_to_host_bitmask(arg3, fcntl_flags_tbl);
8617 ret = get_errno(dup3(arg1, arg2, host_flags));
8618 if (ret >= 0) {
8619 fd_trans_dup(arg1, arg2);
8621 break;
8623 #endif
8624 #ifdef TARGET_NR_getppid /* not on alpha */
8625 case TARGET_NR_getppid:
8626 ret = get_errno(getppid());
8627 break;
8628 #endif
8629 #ifdef TARGET_NR_getpgrp
8630 case TARGET_NR_getpgrp:
8631 ret = get_errno(getpgrp());
8632 break;
8633 #endif
8634 case TARGET_NR_setsid:
8635 ret = get_errno(setsid());
8636 break;
8637 #ifdef TARGET_NR_sigaction
8638 case TARGET_NR_sigaction:
8640 #if defined(TARGET_ALPHA)
8641 struct target_sigaction act, oact, *pact = 0;
8642 struct target_old_sigaction *old_act;
8643 if (arg2) {
8644 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
8645 goto efault;
8646 act._sa_handler = old_act->_sa_handler;
8647 target_siginitset(&act.sa_mask, old_act->sa_mask);
8648 act.sa_flags = old_act->sa_flags;
8649 act.sa_restorer = 0;
8650 unlock_user_struct(old_act, arg2, 0);
8651 pact = &act;
8653 ret = get_errno(do_sigaction(arg1, pact, &oact));
8654 if (!is_error(ret) && arg3) {
8655 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
8656 goto efault;
8657 old_act->_sa_handler = oact._sa_handler;
8658 old_act->sa_mask = oact.sa_mask.sig[0];
8659 old_act->sa_flags = oact.sa_flags;
8660 unlock_user_struct(old_act, arg3, 1);
8662 #elif defined(TARGET_MIPS)
8663 struct target_sigaction act, oact, *pact, *old_act;
8665 if (arg2) {
8666 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
8667 goto efault;
8668 act._sa_handler = old_act->_sa_handler;
8669 target_siginitset(&act.sa_mask, old_act->sa_mask.sig[0]);
8670 act.sa_flags = old_act->sa_flags;
8671 unlock_user_struct(old_act, arg2, 0);
8672 pact = &act;
8673 } else {
8674 pact = NULL;
8677 ret = get_errno(do_sigaction(arg1, pact, &oact));
8679 if (!is_error(ret) && arg3) {
8680 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
8681 goto efault;
8682 old_act->_sa_handler = oact._sa_handler;
8683 old_act->sa_flags = oact.sa_flags;
8684 old_act->sa_mask.sig[0] = oact.sa_mask.sig[0];
8685 old_act->sa_mask.sig[1] = 0;
8686 old_act->sa_mask.sig[2] = 0;
8687 old_act->sa_mask.sig[3] = 0;
8688 unlock_user_struct(old_act, arg3, 1);
8690 #else
8691 struct target_old_sigaction *old_act;
8692 struct target_sigaction act, oact, *pact;
8693 if (arg2) {
8694 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
8695 goto efault;
8696 act._sa_handler = old_act->_sa_handler;
8697 target_siginitset(&act.sa_mask, old_act->sa_mask);
8698 act.sa_flags = old_act->sa_flags;
8699 act.sa_restorer = old_act->sa_restorer;
8700 unlock_user_struct(old_act, arg2, 0);
8701 pact = &act;
8702 } else {
8703 pact = NULL;
8705 ret = get_errno(do_sigaction(arg1, pact, &oact));
8706 if (!is_error(ret) && arg3) {
8707 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
8708 goto efault;
8709 old_act->_sa_handler = oact._sa_handler;
8710 old_act->sa_mask = oact.sa_mask.sig[0];
8711 old_act->sa_flags = oact.sa_flags;
8712 old_act->sa_restorer = oact.sa_restorer;
8713 unlock_user_struct(old_act, arg3, 1);
8715 #endif
8717 break;
8718 #endif
8719 case TARGET_NR_rt_sigaction:
8721 #if defined(TARGET_ALPHA)
8722 /* For Alpha and SPARC this is a 5 argument syscall, with
8723 * a 'restorer' parameter which must be copied into the
8724 * sa_restorer field of the sigaction struct.
8725 * For Alpha that 'restorer' is arg5; for SPARC it is arg4,
8726 * and arg5 is the sigsetsize.
8727 * Alpha also has a separate rt_sigaction struct that it uses
8728 * here; SPARC uses the usual sigaction struct.
8730 struct target_rt_sigaction *rt_act;
8731 struct target_sigaction act, oact, *pact = 0;
8733 if (arg4 != sizeof(target_sigset_t)) {
8734 ret = -TARGET_EINVAL;
8735 break;
8737 if (arg2) {
8738 if (!lock_user_struct(VERIFY_READ, rt_act, arg2, 1))
8739 goto efault;
8740 act._sa_handler = rt_act->_sa_handler;
8741 act.sa_mask = rt_act->sa_mask;
8742 act.sa_flags = rt_act->sa_flags;
8743 act.sa_restorer = arg5;
8744 unlock_user_struct(rt_act, arg2, 0);
8745 pact = &act;
8747 ret = get_errno(do_sigaction(arg1, pact, &oact));
8748 if (!is_error(ret) && arg3) {
8749 if (!lock_user_struct(VERIFY_WRITE, rt_act, arg3, 0))
8750 goto efault;
8751 rt_act->_sa_handler = oact._sa_handler;
8752 rt_act->sa_mask = oact.sa_mask;
8753 rt_act->sa_flags = oact.sa_flags;
8754 unlock_user_struct(rt_act, arg3, 1);
8756 #else
8757 #ifdef TARGET_SPARC
8758 target_ulong restorer = arg4;
8759 target_ulong sigsetsize = arg5;
8760 #else
8761 target_ulong sigsetsize = arg4;
8762 #endif
8763 struct target_sigaction *act;
8764 struct target_sigaction *oact;
8766 if (sigsetsize != sizeof(target_sigset_t)) {
8767 ret = -TARGET_EINVAL;
8768 break;
8770 if (arg2) {
8771 if (!lock_user_struct(VERIFY_READ, act, arg2, 1)) {
8772 goto efault;
8774 #ifdef TARGET_SPARC
8775 act->sa_restorer = restorer;
8776 #endif
8777 } else {
8778 act = NULL;
8780 if (arg3) {
8781 if (!lock_user_struct(VERIFY_WRITE, oact, arg3, 0)) {
8782 ret = -TARGET_EFAULT;
8783 goto rt_sigaction_fail;
8785 } else
8786 oact = NULL;
8787 ret = get_errno(do_sigaction(arg1, act, oact));
8788 rt_sigaction_fail:
8789 if (act)
8790 unlock_user_struct(act, arg2, 0);
8791 if (oact)
8792 unlock_user_struct(oact, arg3, 1);
8793 #endif
8795 break;
8796 #ifdef TARGET_NR_sgetmask /* not on alpha */
8797 case TARGET_NR_sgetmask:
8799 sigset_t cur_set;
8800 abi_ulong target_set;
8801 ret = do_sigprocmask(0, NULL, &cur_set);
8802 if (!ret) {
8803 host_to_target_old_sigset(&target_set, &cur_set);
8804 ret = target_set;
8807 break;
8808 #endif
8809 #ifdef TARGET_NR_ssetmask /* not on alpha */
8810 case TARGET_NR_ssetmask:
8812 sigset_t set, oset;
8813 abi_ulong target_set = arg1;
8814 target_to_host_old_sigset(&set, &target_set);
8815 ret = do_sigprocmask(SIG_SETMASK, &set, &oset);
8816 if (!ret) {
8817 host_to_target_old_sigset(&target_set, &oset);
8818 ret = target_set;
8821 break;
8822 #endif
8823 #ifdef TARGET_NR_sigprocmask
8824 case TARGET_NR_sigprocmask:
8826 #if defined(TARGET_ALPHA)
8827 sigset_t set, oldset;
8828 abi_ulong mask;
8829 int how;
8831 switch (arg1) {
8832 case TARGET_SIG_BLOCK:
8833 how = SIG_BLOCK;
8834 break;
8835 case TARGET_SIG_UNBLOCK:
8836 how = SIG_UNBLOCK;
8837 break;
8838 case TARGET_SIG_SETMASK:
8839 how = SIG_SETMASK;
8840 break;
8841 default:
8842 ret = -TARGET_EINVAL;
8843 goto fail;
8845 mask = arg2;
8846 target_to_host_old_sigset(&set, &mask);
8848 ret = do_sigprocmask(how, &set, &oldset);
8849 if (!is_error(ret)) {
8850 host_to_target_old_sigset(&mask, &oldset);
8851 ret = mask;
8852 ((CPUAlphaState *)cpu_env)->ir[IR_V0] = 0; /* force no error */
8854 #else
8855 sigset_t set, oldset, *set_ptr;
8856 int how;
8858 if (arg2) {
8859 switch (arg1) {
8860 case TARGET_SIG_BLOCK:
8861 how = SIG_BLOCK;
8862 break;
8863 case TARGET_SIG_UNBLOCK:
8864 how = SIG_UNBLOCK;
8865 break;
8866 case TARGET_SIG_SETMASK:
8867 how = SIG_SETMASK;
8868 break;
8869 default:
8870 ret = -TARGET_EINVAL;
8871 goto fail;
8873 if (!(p = lock_user(VERIFY_READ, arg2, sizeof(target_sigset_t), 1)))
8874 goto efault;
8875 target_to_host_old_sigset(&set, p);
8876 unlock_user(p, arg2, 0);
8877 set_ptr = &set;
8878 } else {
8879 how = 0;
8880 set_ptr = NULL;
8882 ret = do_sigprocmask(how, set_ptr, &oldset);
8883 if (!is_error(ret) && arg3) {
8884 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_sigset_t), 0)))
8885 goto efault;
8886 host_to_target_old_sigset(p, &oldset);
8887 unlock_user(p, arg3, sizeof(target_sigset_t));
8889 #endif
8891 break;
8892 #endif
8893 case TARGET_NR_rt_sigprocmask:
8895 int how = arg1;
8896 sigset_t set, oldset, *set_ptr;
8898 if (arg4 != sizeof(target_sigset_t)) {
8899 ret = -TARGET_EINVAL;
8900 break;
8903 if (arg2) {
8904 switch(how) {
8905 case TARGET_SIG_BLOCK:
8906 how = SIG_BLOCK;
8907 break;
8908 case TARGET_SIG_UNBLOCK:
8909 how = SIG_UNBLOCK;
8910 break;
8911 case TARGET_SIG_SETMASK:
8912 how = SIG_SETMASK;
8913 break;
8914 default:
8915 ret = -TARGET_EINVAL;
8916 goto fail;
8918 if (!(p = lock_user(VERIFY_READ, arg2, sizeof(target_sigset_t), 1)))
8919 goto efault;
8920 target_to_host_sigset(&set, p);
8921 unlock_user(p, arg2, 0);
8922 set_ptr = &set;
8923 } else {
8924 how = 0;
8925 set_ptr = NULL;
8927 ret = do_sigprocmask(how, set_ptr, &oldset);
8928 if (!is_error(ret) && arg3) {
8929 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_sigset_t), 0)))
8930 goto efault;
8931 host_to_target_sigset(p, &oldset);
8932 unlock_user(p, arg3, sizeof(target_sigset_t));
8935 break;
8936 #ifdef TARGET_NR_sigpending
8937 case TARGET_NR_sigpending:
8939 sigset_t set;
8940 ret = get_errno(sigpending(&set));
8941 if (!is_error(ret)) {
8942 if (!(p = lock_user(VERIFY_WRITE, arg1, sizeof(target_sigset_t), 0)))
8943 goto efault;
8944 host_to_target_old_sigset(p, &set);
8945 unlock_user(p, arg1, sizeof(target_sigset_t));
8948 break;
8949 #endif
8950 case TARGET_NR_rt_sigpending:
8952 sigset_t set;
8954 /* Yes, this check is >, not != like most. We follow the kernel's
8955 * logic and it does it like this because it implements
8956 * NR_sigpending through the same code path, and in that case
8957 * the old_sigset_t is smaller in size.
8959 if (arg2 > sizeof(target_sigset_t)) {
8960 ret = -TARGET_EINVAL;
8961 break;
8964 ret = get_errno(sigpending(&set));
8965 if (!is_error(ret)) {
8966 if (!(p = lock_user(VERIFY_WRITE, arg1, sizeof(target_sigset_t), 0)))
8967 goto efault;
8968 host_to_target_sigset(p, &set);
8969 unlock_user(p, arg1, sizeof(target_sigset_t));
8972 break;
8973 #ifdef TARGET_NR_sigsuspend
8974 case TARGET_NR_sigsuspend:
8976 TaskState *ts = cpu->opaque;
8977 #if defined(TARGET_ALPHA)
8978 abi_ulong mask = arg1;
8979 target_to_host_old_sigset(&ts->sigsuspend_mask, &mask);
8980 #else
8981 if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
8982 goto efault;
8983 target_to_host_old_sigset(&ts->sigsuspend_mask, p);
8984 unlock_user(p, arg1, 0);
8985 #endif
8986 ret = get_errno(safe_rt_sigsuspend(&ts->sigsuspend_mask,
8987 SIGSET_T_SIZE));
8988 if (ret != -TARGET_ERESTARTSYS) {
8989 ts->in_sigsuspend = 1;
8992 break;
8993 #endif
8994 case TARGET_NR_rt_sigsuspend:
8996 TaskState *ts = cpu->opaque;
8998 if (arg2 != sizeof(target_sigset_t)) {
8999 ret = -TARGET_EINVAL;
9000 break;
9002 if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
9003 goto efault;
9004 target_to_host_sigset(&ts->sigsuspend_mask, p);
9005 unlock_user(p, arg1, 0);
9006 ret = get_errno(safe_rt_sigsuspend(&ts->sigsuspend_mask,
9007 SIGSET_T_SIZE));
9008 if (ret != -TARGET_ERESTARTSYS) {
9009 ts->in_sigsuspend = 1;
9012 break;
9013 case TARGET_NR_rt_sigtimedwait:
9015 sigset_t set;
9016 struct timespec uts, *puts;
9017 siginfo_t uinfo;
9019 if (arg4 != sizeof(target_sigset_t)) {
9020 ret = -TARGET_EINVAL;
9021 break;
9024 if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
9025 goto efault;
9026 target_to_host_sigset(&set, p);
9027 unlock_user(p, arg1, 0);
9028 if (arg3) {
9029 puts = &uts;
9030 target_to_host_timespec(puts, arg3);
9031 } else {
9032 puts = NULL;
9034 ret = get_errno(safe_rt_sigtimedwait(&set, &uinfo, puts,
9035 SIGSET_T_SIZE));
9036 if (!is_error(ret)) {
9037 if (arg2) {
9038 p = lock_user(VERIFY_WRITE, arg2, sizeof(target_siginfo_t),
9040 if (!p) {
9041 goto efault;
9043 host_to_target_siginfo(p, &uinfo);
9044 unlock_user(p, arg2, sizeof(target_siginfo_t));
9046 ret = host_to_target_signal(ret);
9049 break;
9050 case TARGET_NR_rt_sigqueueinfo:
9052 siginfo_t uinfo;
9054 p = lock_user(VERIFY_READ, arg3, sizeof(target_siginfo_t), 1);
9055 if (!p) {
9056 goto efault;
9058 target_to_host_siginfo(&uinfo, p);
9059 unlock_user(p, arg3, 0);
9060 ret = get_errno(sys_rt_sigqueueinfo(arg1, arg2, &uinfo));
9062 break;
9063 case TARGET_NR_rt_tgsigqueueinfo:
9065 siginfo_t uinfo;
9067 p = lock_user(VERIFY_READ, arg4, sizeof(target_siginfo_t), 1);
9068 if (!p) {
9069 goto efault;
9071 target_to_host_siginfo(&uinfo, p);
9072 unlock_user(p, arg4, 0);
9073 ret = get_errno(sys_rt_tgsigqueueinfo(arg1, arg2, arg3, &uinfo));
9075 break;
9076 #ifdef TARGET_NR_sigreturn
9077 case TARGET_NR_sigreturn:
9078 if (block_signals()) {
9079 ret = -TARGET_ERESTARTSYS;
9080 } else {
9081 ret = do_sigreturn(cpu_env);
9083 break;
9084 #endif
9085 case TARGET_NR_rt_sigreturn:
9086 if (block_signals()) {
9087 ret = -TARGET_ERESTARTSYS;
9088 } else {
9089 ret = do_rt_sigreturn(cpu_env);
9091 break;
9092 case TARGET_NR_sethostname:
9093 if (!(p = lock_user_string(arg1)))
9094 goto efault;
9095 ret = get_errno(sethostname(p, arg2));
9096 unlock_user(p, arg1, 0);
9097 break;
9098 case TARGET_NR_setrlimit:
9100 int resource = target_to_host_resource(arg1);
9101 struct target_rlimit *target_rlim;
9102 struct rlimit rlim;
9103 if (!lock_user_struct(VERIFY_READ, target_rlim, arg2, 1))
9104 goto efault;
9105 rlim.rlim_cur = target_to_host_rlim(target_rlim->rlim_cur);
9106 rlim.rlim_max = target_to_host_rlim(target_rlim->rlim_max);
9107 unlock_user_struct(target_rlim, arg2, 0);
9108 ret = get_errno(setrlimit(resource, &rlim));
9110 break;
9111 case TARGET_NR_getrlimit:
9113 int resource = target_to_host_resource(arg1);
9114 struct target_rlimit *target_rlim;
9115 struct rlimit rlim;
9117 ret = get_errno(getrlimit(resource, &rlim));
9118 if (!is_error(ret)) {
9119 if (!lock_user_struct(VERIFY_WRITE, target_rlim, arg2, 0))
9120 goto efault;
9121 target_rlim->rlim_cur = host_to_target_rlim(rlim.rlim_cur);
9122 target_rlim->rlim_max = host_to_target_rlim(rlim.rlim_max);
9123 unlock_user_struct(target_rlim, arg2, 1);
9126 break;
9127 case TARGET_NR_getrusage:
9129 struct rusage rusage;
9130 ret = get_errno(getrusage(arg1, &rusage));
9131 if (!is_error(ret)) {
9132 ret = host_to_target_rusage(arg2, &rusage);
9135 break;
9136 case TARGET_NR_gettimeofday:
9138 struct timeval tv;
9139 ret = get_errno(gettimeofday(&tv, NULL));
9140 if (!is_error(ret)) {
9141 if (copy_to_user_timeval(arg1, &tv))
9142 goto efault;
9145 break;
9146 case TARGET_NR_settimeofday:
9148 struct timeval tv, *ptv = NULL;
9149 struct timezone tz, *ptz = NULL;
9151 if (arg1) {
9152 if (copy_from_user_timeval(&tv, arg1)) {
9153 goto efault;
9155 ptv = &tv;
9158 if (arg2) {
9159 if (copy_from_user_timezone(&tz, arg2)) {
9160 goto efault;
9162 ptz = &tz;
9165 ret = get_errno(settimeofday(ptv, ptz));
9167 break;
9168 #if defined(TARGET_NR_select)
9169 case TARGET_NR_select:
9170 #if defined(TARGET_WANT_NI_OLD_SELECT)
9171 /* some architectures used to have old_select here
9172 * but now ENOSYS it.
9174 ret = -TARGET_ENOSYS;
9175 #elif defined(TARGET_WANT_OLD_SYS_SELECT)
9176 ret = do_old_select(arg1);
9177 #else
9178 ret = do_select(arg1, arg2, arg3, arg4, arg5);
9179 #endif
9180 break;
9181 #endif
9182 #ifdef TARGET_NR_pselect6
9183 case TARGET_NR_pselect6:
9185 abi_long rfd_addr, wfd_addr, efd_addr, n, ts_addr;
9186 fd_set rfds, wfds, efds;
9187 fd_set *rfds_ptr, *wfds_ptr, *efds_ptr;
9188 struct timespec ts, *ts_ptr;
9191 * The 6th arg is actually two args smashed together,
9192 * so we cannot use the C library.
9194 sigset_t set;
9195 struct {
9196 sigset_t *set;
9197 size_t size;
9198 } sig, *sig_ptr;
9200 abi_ulong arg_sigset, arg_sigsize, *arg7;
9201 target_sigset_t *target_sigset;
9203 n = arg1;
9204 rfd_addr = arg2;
9205 wfd_addr = arg3;
9206 efd_addr = arg4;
9207 ts_addr = arg5;
9209 ret = copy_from_user_fdset_ptr(&rfds, &rfds_ptr, rfd_addr, n);
9210 if (ret) {
9211 goto fail;
9213 ret = copy_from_user_fdset_ptr(&wfds, &wfds_ptr, wfd_addr, n);
9214 if (ret) {
9215 goto fail;
9217 ret = copy_from_user_fdset_ptr(&efds, &efds_ptr, efd_addr, n);
9218 if (ret) {
9219 goto fail;
9223 * This takes a timespec, and not a timeval, so we cannot
9224 * use the do_select() helper ...
9226 if (ts_addr) {
9227 if (target_to_host_timespec(&ts, ts_addr)) {
9228 goto efault;
9230 ts_ptr = &ts;
9231 } else {
9232 ts_ptr = NULL;
9235 /* Extract the two packed args for the sigset */
9236 if (arg6) {
9237 sig_ptr = &sig;
9238 sig.size = SIGSET_T_SIZE;
9240 arg7 = lock_user(VERIFY_READ, arg6, sizeof(*arg7) * 2, 1);
9241 if (!arg7) {
9242 goto efault;
9244 arg_sigset = tswapal(arg7[0]);
9245 arg_sigsize = tswapal(arg7[1]);
9246 unlock_user(arg7, arg6, 0);
9248 if (arg_sigset) {
9249 sig.set = &set;
9250 if (arg_sigsize != sizeof(*target_sigset)) {
9251 /* Like the kernel, we enforce correct size sigsets */
9252 ret = -TARGET_EINVAL;
9253 goto fail;
9255 target_sigset = lock_user(VERIFY_READ, arg_sigset,
9256 sizeof(*target_sigset), 1);
9257 if (!target_sigset) {
9258 goto efault;
9260 target_to_host_sigset(&set, target_sigset);
9261 unlock_user(target_sigset, arg_sigset, 0);
9262 } else {
9263 sig.set = NULL;
9265 } else {
9266 sig_ptr = NULL;
9269 ret = get_errno(safe_pselect6(n, rfds_ptr, wfds_ptr, efds_ptr,
9270 ts_ptr, sig_ptr));
9272 if (!is_error(ret)) {
9273 if (rfd_addr && copy_to_user_fdset(rfd_addr, &rfds, n))
9274 goto efault;
9275 if (wfd_addr && copy_to_user_fdset(wfd_addr, &wfds, n))
9276 goto efault;
9277 if (efd_addr && copy_to_user_fdset(efd_addr, &efds, n))
9278 goto efault;
9280 if (ts_addr && host_to_target_timespec(ts_addr, &ts))
9281 goto efault;
9284 break;
9285 #endif
9286 #ifdef TARGET_NR_symlink
9287 case TARGET_NR_symlink:
9289 void *p2;
9290 p = lock_user_string(arg1);
9291 p2 = lock_user_string(arg2);
9292 if (!p || !p2)
9293 ret = -TARGET_EFAULT;
9294 else
9295 ret = get_errno(symlink(p, p2));
9296 unlock_user(p2, arg2, 0);
9297 unlock_user(p, arg1, 0);
9299 break;
9300 #endif
9301 #if defined(TARGET_NR_symlinkat)
9302 case TARGET_NR_symlinkat:
9304 void *p2;
9305 p = lock_user_string(arg1);
9306 p2 = lock_user_string(arg3);
9307 if (!p || !p2)
9308 ret = -TARGET_EFAULT;
9309 else
9310 ret = get_errno(symlinkat(p, arg2, p2));
9311 unlock_user(p2, arg3, 0);
9312 unlock_user(p, arg1, 0);
9314 break;
9315 #endif
9316 #ifdef TARGET_NR_oldlstat
9317 case TARGET_NR_oldlstat:
9318 goto unimplemented;
9319 #endif
9320 #ifdef TARGET_NR_readlink
9321 case TARGET_NR_readlink:
9323 void *p2;
9324 p = lock_user_string(arg1);
9325 p2 = lock_user(VERIFY_WRITE, arg2, arg3, 0);
9326 if (!p || !p2) {
9327 ret = -TARGET_EFAULT;
9328 } else if (!arg3) {
9329 /* Short circuit this for the magic exe check. */
9330 ret = -TARGET_EINVAL;
9331 } else if (is_proc_myself((const char *)p, "exe")) {
9332 char real[PATH_MAX], *temp;
9333 temp = realpath(exec_path, real);
9334 /* Return value is # of bytes that we wrote to the buffer. */
9335 if (temp == NULL) {
9336 ret = get_errno(-1);
9337 } else {
9338 /* Don't worry about sign mismatch as earlier mapping
9339 * logic would have thrown a bad address error. */
9340 ret = MIN(strlen(real), arg3);
9341 /* We cannot NUL terminate the string. */
9342 memcpy(p2, real, ret);
9344 } else {
9345 ret = get_errno(readlink(path(p), p2, arg3));
9347 unlock_user(p2, arg2, ret);
9348 unlock_user(p, arg1, 0);
9350 break;
9351 #endif
9352 #if defined(TARGET_NR_readlinkat)
9353 case TARGET_NR_readlinkat:
9355 void *p2;
9356 p = lock_user_string(arg2);
9357 p2 = lock_user(VERIFY_WRITE, arg3, arg4, 0);
9358 if (!p || !p2) {
9359 ret = -TARGET_EFAULT;
9360 } else if (is_proc_myself((const char *)p, "exe")) {
9361 char real[PATH_MAX], *temp;
9362 temp = realpath(exec_path, real);
9363 ret = temp == NULL ? get_errno(-1) : strlen(real) ;
9364 snprintf((char *)p2, arg4, "%s", real);
9365 } else {
9366 ret = get_errno(readlinkat(arg1, path(p), p2, arg4));
9368 unlock_user(p2, arg3, ret);
9369 unlock_user(p, arg2, 0);
9371 break;
9372 #endif
9373 #ifdef TARGET_NR_uselib
9374 case TARGET_NR_uselib:
9375 goto unimplemented;
9376 #endif
9377 #ifdef TARGET_NR_swapon
9378 case TARGET_NR_swapon:
9379 if (!(p = lock_user_string(arg1)))
9380 goto efault;
9381 ret = get_errno(swapon(p, arg2));
9382 unlock_user(p, arg1, 0);
9383 break;
9384 #endif
9385 case TARGET_NR_reboot:
9386 if (arg3 == LINUX_REBOOT_CMD_RESTART2) {
9387 /* arg4 must be ignored in all other cases */
9388 p = lock_user_string(arg4);
9389 if (!p) {
9390 goto efault;
9392 ret = get_errno(reboot(arg1, arg2, arg3, p));
9393 unlock_user(p, arg4, 0);
9394 } else {
9395 ret = get_errno(reboot(arg1, arg2, arg3, NULL));
9397 break;
9398 #ifdef TARGET_NR_readdir
9399 case TARGET_NR_readdir:
9400 goto unimplemented;
9401 #endif
9402 #ifdef TARGET_NR_mmap
9403 case TARGET_NR_mmap:
9404 #if (defined(TARGET_I386) && defined(TARGET_ABI32)) || \
9405 (defined(TARGET_ARM) && defined(TARGET_ABI32)) || \
9406 defined(TARGET_M68K) || defined(TARGET_CRIS) || defined(TARGET_MICROBLAZE) \
9407 || defined(TARGET_S390X)
9409 abi_ulong *v;
9410 abi_ulong v1, v2, v3, v4, v5, v6;
9411 if (!(v = lock_user(VERIFY_READ, arg1, 6 * sizeof(abi_ulong), 1)))
9412 goto efault;
9413 v1 = tswapal(v[0]);
9414 v2 = tswapal(v[1]);
9415 v3 = tswapal(v[2]);
9416 v4 = tswapal(v[3]);
9417 v5 = tswapal(v[4]);
9418 v6 = tswapal(v[5]);
9419 unlock_user(v, arg1, 0);
9420 ret = get_errno(target_mmap(v1, v2, v3,
9421 target_to_host_bitmask(v4, mmap_flags_tbl),
9422 v5, v6));
9424 #else
9425 ret = get_errno(target_mmap(arg1, arg2, arg3,
9426 target_to_host_bitmask(arg4, mmap_flags_tbl),
9427 arg5,
9428 arg6));
9429 #endif
9430 break;
9431 #endif
9432 #ifdef TARGET_NR_mmap2
9433 case TARGET_NR_mmap2:
9434 #ifndef MMAP_SHIFT
9435 #define MMAP_SHIFT 12
9436 #endif
9437 ret = get_errno(target_mmap(arg1, arg2, arg3,
9438 target_to_host_bitmask(arg4, mmap_flags_tbl),
9439 arg5,
9440 arg6 << MMAP_SHIFT));
9441 break;
9442 #endif
9443 case TARGET_NR_munmap:
9444 ret = get_errno(target_munmap(arg1, arg2));
9445 break;
9446 case TARGET_NR_mprotect:
9448 TaskState *ts = cpu->opaque;
9449 /* Special hack to detect libc making the stack executable. */
9450 if ((arg3 & PROT_GROWSDOWN)
9451 && arg1 >= ts->info->stack_limit
9452 && arg1 <= ts->info->start_stack) {
9453 arg3 &= ~PROT_GROWSDOWN;
9454 arg2 = arg2 + arg1 - ts->info->stack_limit;
9455 arg1 = ts->info->stack_limit;
9458 ret = get_errno(target_mprotect(arg1, arg2, arg3));
9459 break;
9460 #ifdef TARGET_NR_mremap
9461 case TARGET_NR_mremap:
9462 ret = get_errno(target_mremap(arg1, arg2, arg3, arg4, arg5));
9463 break;
9464 #endif
9465 /* ??? msync/mlock/munlock are broken for softmmu. */
9466 #ifdef TARGET_NR_msync
9467 case TARGET_NR_msync:
9468 ret = get_errno(msync(g2h(arg1), arg2, arg3));
9469 break;
9470 #endif
9471 #ifdef TARGET_NR_mlock
9472 case TARGET_NR_mlock:
9473 ret = get_errno(mlock(g2h(arg1), arg2));
9474 break;
9475 #endif
9476 #ifdef TARGET_NR_munlock
9477 case TARGET_NR_munlock:
9478 ret = get_errno(munlock(g2h(arg1), arg2));
9479 break;
9480 #endif
9481 #ifdef TARGET_NR_mlockall
9482 case TARGET_NR_mlockall:
9483 ret = get_errno(mlockall(target_to_host_mlockall_arg(arg1)));
9484 break;
9485 #endif
9486 #ifdef TARGET_NR_munlockall
9487 case TARGET_NR_munlockall:
9488 ret = get_errno(munlockall());
9489 break;
9490 #endif
9491 case TARGET_NR_truncate:
9492 if (!(p = lock_user_string(arg1)))
9493 goto efault;
9494 ret = get_errno(truncate(p, arg2));
9495 unlock_user(p, arg1, 0);
9496 break;
9497 case TARGET_NR_ftruncate:
9498 ret = get_errno(ftruncate(arg1, arg2));
9499 break;
9500 case TARGET_NR_fchmod:
9501 ret = get_errno(fchmod(arg1, arg2));
9502 break;
9503 #if defined(TARGET_NR_fchmodat)
9504 case TARGET_NR_fchmodat:
9505 if (!(p = lock_user_string(arg2)))
9506 goto efault;
9507 ret = get_errno(fchmodat(arg1, p, arg3, 0));
9508 unlock_user(p, arg2, 0);
9509 break;
9510 #endif
9511 case TARGET_NR_getpriority:
9512 /* Note that negative values are valid for getpriority, so we must
9513 differentiate based on errno settings. */
9514 errno = 0;
9515 ret = getpriority(arg1, arg2);
9516 if (ret == -1 && errno != 0) {
9517 ret = -host_to_target_errno(errno);
9518 break;
9520 #ifdef TARGET_ALPHA
9521 /* Return value is the unbiased priority. Signal no error. */
9522 ((CPUAlphaState *)cpu_env)->ir[IR_V0] = 0;
9523 #else
9524 /* Return value is a biased priority to avoid negative numbers. */
9525 ret = 20 - ret;
9526 #endif
9527 break;
9528 case TARGET_NR_setpriority:
9529 ret = get_errno(setpriority(arg1, arg2, arg3));
9530 break;
9531 #ifdef TARGET_NR_profil
9532 case TARGET_NR_profil:
9533 goto unimplemented;
9534 #endif
9535 case TARGET_NR_statfs:
9536 if (!(p = lock_user_string(arg1)))
9537 goto efault;
9538 ret = get_errno(statfs(path(p), &stfs));
9539 unlock_user(p, arg1, 0);
9540 convert_statfs:
9541 if (!is_error(ret)) {
9542 struct target_statfs *target_stfs;
9544 if (!lock_user_struct(VERIFY_WRITE, target_stfs, arg2, 0))
9545 goto efault;
9546 __put_user(stfs.f_type, &target_stfs->f_type);
9547 __put_user(stfs.f_bsize, &target_stfs->f_bsize);
9548 __put_user(stfs.f_blocks, &target_stfs->f_blocks);
9549 __put_user(stfs.f_bfree, &target_stfs->f_bfree);
9550 __put_user(stfs.f_bavail, &target_stfs->f_bavail);
9551 __put_user(stfs.f_files, &target_stfs->f_files);
9552 __put_user(stfs.f_ffree, &target_stfs->f_ffree);
9553 __put_user(stfs.f_fsid.__val[0], &target_stfs->f_fsid.val[0]);
9554 __put_user(stfs.f_fsid.__val[1], &target_stfs->f_fsid.val[1]);
9555 __put_user(stfs.f_namelen, &target_stfs->f_namelen);
9556 __put_user(stfs.f_frsize, &target_stfs->f_frsize);
9557 #ifdef _STATFS_F_FLAGS
9558 __put_user(stfs.f_flags, &target_stfs->f_flags);
9559 #else
9560 __put_user(0, &target_stfs->f_flags);
9561 #endif
9562 memset(target_stfs->f_spare, 0, sizeof(target_stfs->f_spare));
9563 unlock_user_struct(target_stfs, arg2, 1);
9565 break;
9566 case TARGET_NR_fstatfs:
9567 ret = get_errno(fstatfs(arg1, &stfs));
9568 goto convert_statfs;
9569 #ifdef TARGET_NR_statfs64
9570 case TARGET_NR_statfs64:
9571 if (!(p = lock_user_string(arg1)))
9572 goto efault;
9573 ret = get_errno(statfs(path(p), &stfs));
9574 unlock_user(p, arg1, 0);
9575 convert_statfs64:
9576 if (!is_error(ret)) {
9577 struct target_statfs64 *target_stfs;
9579 if (!lock_user_struct(VERIFY_WRITE, target_stfs, arg3, 0))
9580 goto efault;
9581 __put_user(stfs.f_type, &target_stfs->f_type);
9582 __put_user(stfs.f_bsize, &target_stfs->f_bsize);
9583 __put_user(stfs.f_blocks, &target_stfs->f_blocks);
9584 __put_user(stfs.f_bfree, &target_stfs->f_bfree);
9585 __put_user(stfs.f_bavail, &target_stfs->f_bavail);
9586 __put_user(stfs.f_files, &target_stfs->f_files);
9587 __put_user(stfs.f_ffree, &target_stfs->f_ffree);
9588 __put_user(stfs.f_fsid.__val[0], &target_stfs->f_fsid.val[0]);
9589 __put_user(stfs.f_fsid.__val[1], &target_stfs->f_fsid.val[1]);
9590 __put_user(stfs.f_namelen, &target_stfs->f_namelen);
9591 __put_user(stfs.f_frsize, &target_stfs->f_frsize);
9592 memset(target_stfs->f_spare, 0, sizeof(target_stfs->f_spare));
9593 unlock_user_struct(target_stfs, arg3, 1);
9595 break;
9596 case TARGET_NR_fstatfs64:
9597 ret = get_errno(fstatfs(arg1, &stfs));
9598 goto convert_statfs64;
9599 #endif
9600 #ifdef TARGET_NR_ioperm
9601 case TARGET_NR_ioperm:
9602 goto unimplemented;
9603 #endif
9604 #ifdef TARGET_NR_socketcall
9605 case TARGET_NR_socketcall:
9606 ret = do_socketcall(arg1, arg2);
9607 break;
9608 #endif
9609 #ifdef TARGET_NR_accept
9610 case TARGET_NR_accept:
9611 ret = do_accept4(arg1, arg2, arg3, 0);
9612 break;
9613 #endif
9614 #ifdef TARGET_NR_accept4
9615 case TARGET_NR_accept4:
9616 ret = do_accept4(arg1, arg2, arg3, arg4);
9617 break;
9618 #endif
9619 #ifdef TARGET_NR_bind
9620 case TARGET_NR_bind:
9621 ret = do_bind(arg1, arg2, arg3);
9622 break;
9623 #endif
9624 #ifdef TARGET_NR_connect
9625 case TARGET_NR_connect:
9626 ret = do_connect(arg1, arg2, arg3);
9627 break;
9628 #endif
9629 #ifdef TARGET_NR_getpeername
9630 case TARGET_NR_getpeername:
9631 ret = do_getpeername(arg1, arg2, arg3);
9632 break;
9633 #endif
9634 #ifdef TARGET_NR_getsockname
9635 case TARGET_NR_getsockname:
9636 ret = do_getsockname(arg1, arg2, arg3);
9637 break;
9638 #endif
9639 #ifdef TARGET_NR_getsockopt
9640 case TARGET_NR_getsockopt:
9641 ret = do_getsockopt(arg1, arg2, arg3, arg4, arg5);
9642 break;
9643 #endif
9644 #ifdef TARGET_NR_listen
9645 case TARGET_NR_listen:
9646 ret = get_errno(listen(arg1, arg2));
9647 break;
9648 #endif
9649 #ifdef TARGET_NR_recv
9650 case TARGET_NR_recv:
9651 ret = do_recvfrom(arg1, arg2, arg3, arg4, 0, 0);
9652 break;
9653 #endif
9654 #ifdef TARGET_NR_recvfrom
9655 case TARGET_NR_recvfrom:
9656 ret = do_recvfrom(arg1, arg2, arg3, arg4, arg5, arg6);
9657 break;
9658 #endif
9659 #ifdef TARGET_NR_recvmsg
9660 case TARGET_NR_recvmsg:
9661 ret = do_sendrecvmsg(arg1, arg2, arg3, 0);
9662 break;
9663 #endif
9664 #ifdef TARGET_NR_send
9665 case TARGET_NR_send:
9666 ret = do_sendto(arg1, arg2, arg3, arg4, 0, 0);
9667 break;
9668 #endif
9669 #ifdef TARGET_NR_sendmsg
9670 case TARGET_NR_sendmsg:
9671 ret = do_sendrecvmsg(arg1, arg2, arg3, 1);
9672 break;
9673 #endif
9674 #ifdef TARGET_NR_sendmmsg
9675 case TARGET_NR_sendmmsg:
9676 ret = do_sendrecvmmsg(arg1, arg2, arg3, arg4, 1);
9677 break;
9678 case TARGET_NR_recvmmsg:
9679 ret = do_sendrecvmmsg(arg1, arg2, arg3, arg4, 0);
9680 break;
9681 #endif
9682 #ifdef TARGET_NR_sendto
9683 case TARGET_NR_sendto:
9684 ret = do_sendto(arg1, arg2, arg3, arg4, arg5, arg6);
9685 break;
9686 #endif
9687 #ifdef TARGET_NR_shutdown
9688 case TARGET_NR_shutdown:
9689 ret = get_errno(shutdown(arg1, arg2));
9690 break;
9691 #endif
9692 #if defined(TARGET_NR_getrandom) && defined(__NR_getrandom)
9693 case TARGET_NR_getrandom:
9694 p = lock_user(VERIFY_WRITE, arg1, arg2, 0);
9695 if (!p) {
9696 goto efault;
9698 ret = get_errno(getrandom(p, arg2, arg3));
9699 unlock_user(p, arg1, ret);
9700 break;
9701 #endif
9702 #ifdef TARGET_NR_socket
9703 case TARGET_NR_socket:
9704 ret = do_socket(arg1, arg2, arg3);
9705 break;
9706 #endif
9707 #ifdef TARGET_NR_socketpair
9708 case TARGET_NR_socketpair:
9709 ret = do_socketpair(arg1, arg2, arg3, arg4);
9710 break;
9711 #endif
9712 #ifdef TARGET_NR_setsockopt
9713 case TARGET_NR_setsockopt:
9714 ret = do_setsockopt(arg1, arg2, arg3, arg4, (socklen_t) arg5);
9715 break;
9716 #endif
9717 #if defined(TARGET_NR_syslog)
9718 case TARGET_NR_syslog:
9720 int len = arg2;
9722 switch (arg1) {
9723 case TARGET_SYSLOG_ACTION_CLOSE: /* Close log */
9724 case TARGET_SYSLOG_ACTION_OPEN: /* Open log */
9725 case TARGET_SYSLOG_ACTION_CLEAR: /* Clear ring buffer */
9726 case TARGET_SYSLOG_ACTION_CONSOLE_OFF: /* Disable logging */
9727 case TARGET_SYSLOG_ACTION_CONSOLE_ON: /* Enable logging */
9728 case TARGET_SYSLOG_ACTION_CONSOLE_LEVEL: /* Set messages level */
9729 case TARGET_SYSLOG_ACTION_SIZE_UNREAD: /* Number of chars */
9730 case TARGET_SYSLOG_ACTION_SIZE_BUFFER: /* Size of the buffer */
9732 ret = get_errno(sys_syslog((int)arg1, NULL, (int)arg3));
9734 break;
9735 case TARGET_SYSLOG_ACTION_READ: /* Read from log */
9736 case TARGET_SYSLOG_ACTION_READ_CLEAR: /* Read/clear msgs */
9737 case TARGET_SYSLOG_ACTION_READ_ALL: /* Read last messages */
9739 ret = -TARGET_EINVAL;
9740 if (len < 0) {
9741 goto fail;
9743 ret = 0;
9744 if (len == 0) {
9745 break;
9747 p = lock_user(VERIFY_WRITE, arg2, arg3, 0);
9748 if (!p) {
9749 ret = -TARGET_EFAULT;
9750 goto fail;
9752 ret = get_errno(sys_syslog((int)arg1, p, (int)arg3));
9753 unlock_user(p, arg2, arg3);
9755 break;
9756 default:
9757 ret = -EINVAL;
9758 break;
9761 break;
9762 #endif
9763 case TARGET_NR_setitimer:
9765 struct itimerval value, ovalue, *pvalue;
9767 if (arg2) {
9768 pvalue = &value;
9769 if (copy_from_user_timeval(&pvalue->it_interval, arg2)
9770 || copy_from_user_timeval(&pvalue->it_value,
9771 arg2 + sizeof(struct target_timeval)))
9772 goto efault;
9773 } else {
9774 pvalue = NULL;
9776 ret = get_errno(setitimer(arg1, pvalue, &ovalue));
9777 if (!is_error(ret) && arg3) {
9778 if (copy_to_user_timeval(arg3,
9779 &ovalue.it_interval)
9780 || copy_to_user_timeval(arg3 + sizeof(struct target_timeval),
9781 &ovalue.it_value))
9782 goto efault;
9785 break;
9786 case TARGET_NR_getitimer:
9788 struct itimerval value;
9790 ret = get_errno(getitimer(arg1, &value));
9791 if (!is_error(ret) && arg2) {
9792 if (copy_to_user_timeval(arg2,
9793 &value.it_interval)
9794 || copy_to_user_timeval(arg2 + sizeof(struct target_timeval),
9795 &value.it_value))
9796 goto efault;
9799 break;
9800 #ifdef TARGET_NR_stat
9801 case TARGET_NR_stat:
9802 if (!(p = lock_user_string(arg1)))
9803 goto efault;
9804 ret = get_errno(stat(path(p), &st));
9805 unlock_user(p, arg1, 0);
9806 goto do_stat;
9807 #endif
9808 #ifdef TARGET_NR_lstat
9809 case TARGET_NR_lstat:
9810 if (!(p = lock_user_string(arg1)))
9811 goto efault;
9812 ret = get_errno(lstat(path(p), &st));
9813 unlock_user(p, arg1, 0);
9814 goto do_stat;
9815 #endif
9816 case TARGET_NR_fstat:
9818 ret = get_errno(fstat(arg1, &st));
9819 #if defined(TARGET_NR_stat) || defined(TARGET_NR_lstat)
9820 do_stat:
9821 #endif
9822 if (!is_error(ret)) {
9823 struct target_stat *target_st;
9825 if (!lock_user_struct(VERIFY_WRITE, target_st, arg2, 0))
9826 goto efault;
9827 memset(target_st, 0, sizeof(*target_st));
9828 __put_user(st.st_dev, &target_st->st_dev);
9829 __put_user(st.st_ino, &target_st->st_ino);
9830 __put_user(st.st_mode, &target_st->st_mode);
9831 __put_user(st.st_uid, &target_st->st_uid);
9832 __put_user(st.st_gid, &target_st->st_gid);
9833 __put_user(st.st_nlink, &target_st->st_nlink);
9834 __put_user(st.st_rdev, &target_st->st_rdev);
9835 __put_user(st.st_size, &target_st->st_size);
9836 __put_user(st.st_blksize, &target_st->st_blksize);
9837 __put_user(st.st_blocks, &target_st->st_blocks);
9838 __put_user(st.st_atime, &target_st->target_st_atime);
9839 __put_user(st.st_mtime, &target_st->target_st_mtime);
9840 __put_user(st.st_ctime, &target_st->target_st_ctime);
9841 unlock_user_struct(target_st, arg2, 1);
9844 break;
9845 #ifdef TARGET_NR_olduname
9846 case TARGET_NR_olduname:
9847 goto unimplemented;
9848 #endif
9849 #ifdef TARGET_NR_iopl
9850 case TARGET_NR_iopl:
9851 goto unimplemented;
9852 #endif
9853 case TARGET_NR_vhangup:
9854 ret = get_errno(vhangup());
9855 break;
9856 #ifdef TARGET_NR_idle
9857 case TARGET_NR_idle:
9858 goto unimplemented;
9859 #endif
9860 #ifdef TARGET_NR_syscall
9861 case TARGET_NR_syscall:
9862 ret = do_syscall(cpu_env, arg1 & 0xffff, arg2, arg3, arg4, arg5,
9863 arg6, arg7, arg8, 0);
9864 break;
9865 #endif
9866 case TARGET_NR_wait4:
9868 int status;
9869 abi_long status_ptr = arg2;
9870 struct rusage rusage, *rusage_ptr;
9871 abi_ulong target_rusage = arg4;
9872 abi_long rusage_err;
9873 if (target_rusage)
9874 rusage_ptr = &rusage;
9875 else
9876 rusage_ptr = NULL;
9877 ret = get_errno(safe_wait4(arg1, &status, arg3, rusage_ptr));
9878 if (!is_error(ret)) {
9879 if (status_ptr && ret) {
9880 status = host_to_target_waitstatus(status);
9881 if (put_user_s32(status, status_ptr))
9882 goto efault;
9884 if (target_rusage) {
9885 rusage_err = host_to_target_rusage(target_rusage, &rusage);
9886 if (rusage_err) {
9887 ret = rusage_err;
9892 break;
9893 #ifdef TARGET_NR_swapoff
9894 case TARGET_NR_swapoff:
9895 if (!(p = lock_user_string(arg1)))
9896 goto efault;
9897 ret = get_errno(swapoff(p));
9898 unlock_user(p, arg1, 0);
9899 break;
9900 #endif
9901 case TARGET_NR_sysinfo:
9903 struct target_sysinfo *target_value;
9904 struct sysinfo value;
9905 ret = get_errno(sysinfo(&value));
9906 if (!is_error(ret) && arg1)
9908 if (!lock_user_struct(VERIFY_WRITE, target_value, arg1, 0))
9909 goto efault;
9910 __put_user(value.uptime, &target_value->uptime);
9911 __put_user(value.loads[0], &target_value->loads[0]);
9912 __put_user(value.loads[1], &target_value->loads[1]);
9913 __put_user(value.loads[2], &target_value->loads[2]);
9914 __put_user(value.totalram, &target_value->totalram);
9915 __put_user(value.freeram, &target_value->freeram);
9916 __put_user(value.sharedram, &target_value->sharedram);
9917 __put_user(value.bufferram, &target_value->bufferram);
9918 __put_user(value.totalswap, &target_value->totalswap);
9919 __put_user(value.freeswap, &target_value->freeswap);
9920 __put_user(value.procs, &target_value->procs);
9921 __put_user(value.totalhigh, &target_value->totalhigh);
9922 __put_user(value.freehigh, &target_value->freehigh);
9923 __put_user(value.mem_unit, &target_value->mem_unit);
9924 unlock_user_struct(target_value, arg1, 1);
9927 break;
9928 #ifdef TARGET_NR_ipc
9929 case TARGET_NR_ipc:
9930 ret = do_ipc(cpu_env, arg1, arg2, arg3, arg4, arg5, arg6);
9931 break;
9932 #endif
9933 #ifdef TARGET_NR_semget
9934 case TARGET_NR_semget:
9935 ret = get_errno(semget(arg1, arg2, arg3));
9936 break;
9937 #endif
9938 #ifdef TARGET_NR_semop
9939 case TARGET_NR_semop:
9940 ret = do_semop(arg1, arg2, arg3);
9941 break;
9942 #endif
9943 #ifdef TARGET_NR_semctl
9944 case TARGET_NR_semctl:
9945 ret = do_semctl(arg1, arg2, arg3, arg4);
9946 break;
9947 #endif
9948 #ifdef TARGET_NR_msgctl
9949 case TARGET_NR_msgctl:
9950 ret = do_msgctl(arg1, arg2, arg3);
9951 break;
9952 #endif
9953 #ifdef TARGET_NR_msgget
9954 case TARGET_NR_msgget:
9955 ret = get_errno(msgget(arg1, arg2));
9956 break;
9957 #endif
9958 #ifdef TARGET_NR_msgrcv
9959 case TARGET_NR_msgrcv:
9960 ret = do_msgrcv(arg1, arg2, arg3, arg4, arg5);
9961 break;
9962 #endif
9963 #ifdef TARGET_NR_msgsnd
9964 case TARGET_NR_msgsnd:
9965 ret = do_msgsnd(arg1, arg2, arg3, arg4);
9966 break;
9967 #endif
9968 #ifdef TARGET_NR_shmget
9969 case TARGET_NR_shmget:
9970 ret = get_errno(shmget(arg1, arg2, arg3));
9971 break;
9972 #endif
9973 #ifdef TARGET_NR_shmctl
9974 case TARGET_NR_shmctl:
9975 ret = do_shmctl(arg1, arg2, arg3);
9976 break;
9977 #endif
9978 #ifdef TARGET_NR_shmat
9979 case TARGET_NR_shmat:
9980 ret = do_shmat(cpu_env, arg1, arg2, arg3);
9981 break;
9982 #endif
9983 #ifdef TARGET_NR_shmdt
9984 case TARGET_NR_shmdt:
9985 ret = do_shmdt(arg1);
9986 break;
9987 #endif
9988 case TARGET_NR_fsync:
9989 ret = get_errno(fsync(arg1));
9990 break;
9991 case TARGET_NR_clone:
9992 /* Linux manages to have three different orderings for its
9993 * arguments to clone(); the BACKWARDS and BACKWARDS2 defines
9994 * match the kernel's CONFIG_CLONE_* settings.
9995 * Microblaze is further special in that it uses a sixth
9996 * implicit argument to clone for the TLS pointer.
9998 #if defined(TARGET_MICROBLAZE)
9999 ret = get_errno(do_fork(cpu_env, arg1, arg2, arg4, arg6, arg5));
10000 #elif defined(TARGET_CLONE_BACKWARDS)
10001 ret = get_errno(do_fork(cpu_env, arg1, arg2, arg3, arg4, arg5));
10002 #elif defined(TARGET_CLONE_BACKWARDS2)
10003 ret = get_errno(do_fork(cpu_env, arg2, arg1, arg3, arg5, arg4));
10004 #else
10005 ret = get_errno(do_fork(cpu_env, arg1, arg2, arg3, arg5, arg4));
10006 #endif
10007 break;
10008 #ifdef __NR_exit_group
10009 /* new thread calls */
10010 case TARGET_NR_exit_group:
10011 #ifdef TARGET_GPROF
10012 _mcleanup();
10013 #endif
10014 gdb_exit(cpu_env, arg1);
10015 ret = get_errno(exit_group(arg1));
10016 break;
10017 #endif
10018 case TARGET_NR_setdomainname:
10019 if (!(p = lock_user_string(arg1)))
10020 goto efault;
10021 ret = get_errno(setdomainname(p, arg2));
10022 unlock_user(p, arg1, 0);
10023 break;
10024 case TARGET_NR_uname:
10025 /* no need to transcode because we use the linux syscall */
10027 struct new_utsname * buf;
10029 if (!lock_user_struct(VERIFY_WRITE, buf, arg1, 0))
10030 goto efault;
10031 ret = get_errno(sys_uname(buf));
10032 if (!is_error(ret)) {
10033 /* Overwrite the native machine name with whatever is being
10034 emulated. */
10035 strcpy (buf->machine, cpu_to_uname_machine(cpu_env));
10036 /* Allow the user to override the reported release. */
10037 if (qemu_uname_release && *qemu_uname_release) {
10038 g_strlcpy(buf->release, qemu_uname_release,
10039 sizeof(buf->release));
10042 unlock_user_struct(buf, arg1, 1);
10044 break;
10045 #ifdef TARGET_I386
10046 case TARGET_NR_modify_ldt:
10047 ret = do_modify_ldt(cpu_env, arg1, arg2, arg3);
10048 break;
10049 #if !defined(TARGET_X86_64)
10050 case TARGET_NR_vm86old:
10051 goto unimplemented;
10052 case TARGET_NR_vm86:
10053 ret = do_vm86(cpu_env, arg1, arg2);
10054 break;
10055 #endif
10056 #endif
10057 case TARGET_NR_adjtimex:
10059 struct timex host_buf;
10061 if (target_to_host_timex(&host_buf, arg1) != 0) {
10062 goto efault;
10064 ret = get_errno(adjtimex(&host_buf));
10065 if (!is_error(ret)) {
10066 if (host_to_target_timex(arg1, &host_buf) != 0) {
10067 goto efault;
10071 break;
10072 #if defined(TARGET_NR_clock_adjtime) && defined(CONFIG_CLOCK_ADJTIME)
10073 case TARGET_NR_clock_adjtime:
10075 struct timex htx, *phtx = &htx;
10077 if (target_to_host_timex(phtx, arg2) != 0) {
10078 goto efault;
10080 ret = get_errno(clock_adjtime(arg1, phtx));
10081 if (!is_error(ret) && phtx) {
10082 if (host_to_target_timex(arg2, phtx) != 0) {
10083 goto efault;
10087 break;
10088 #endif
10089 #ifdef TARGET_NR_create_module
10090 case TARGET_NR_create_module:
10091 #endif
10092 case TARGET_NR_init_module:
10093 case TARGET_NR_delete_module:
10094 #ifdef TARGET_NR_get_kernel_syms
10095 case TARGET_NR_get_kernel_syms:
10096 #endif
10097 goto unimplemented;
10098 case TARGET_NR_quotactl:
10099 goto unimplemented;
10100 case TARGET_NR_getpgid:
10101 ret = get_errno(getpgid(arg1));
10102 break;
10103 case TARGET_NR_fchdir:
10104 ret = get_errno(fchdir(arg1));
10105 break;
10106 #ifdef TARGET_NR_bdflush /* not on x86_64 */
10107 case TARGET_NR_bdflush:
10108 goto unimplemented;
10109 #endif
10110 #ifdef TARGET_NR_sysfs
10111 case TARGET_NR_sysfs:
10112 goto unimplemented;
10113 #endif
10114 case TARGET_NR_personality:
10115 ret = get_errno(personality(arg1));
10116 break;
10117 #ifdef TARGET_NR_afs_syscall
10118 case TARGET_NR_afs_syscall:
10119 goto unimplemented;
10120 #endif
10121 #ifdef TARGET_NR__llseek /* Not on alpha */
10122 case TARGET_NR__llseek:
10124 int64_t res;
10125 #if !defined(__NR_llseek)
10126 res = lseek(arg1, ((uint64_t)arg2 << 32) | (abi_ulong)arg3, arg5);
10127 if (res == -1) {
10128 ret = get_errno(res);
10129 } else {
10130 ret = 0;
10132 #else
10133 ret = get_errno(_llseek(arg1, arg2, arg3, &res, arg5));
10134 #endif
10135 if ((ret == 0) && put_user_s64(res, arg4)) {
10136 goto efault;
10139 break;
10140 #endif
10141 #ifdef TARGET_NR_getdents
10142 case TARGET_NR_getdents:
10143 #ifdef __NR_getdents
10144 #if TARGET_ABI_BITS == 32 && HOST_LONG_BITS == 64
10146 struct target_dirent *target_dirp;
10147 struct linux_dirent *dirp;
10148 abi_long count = arg3;
10150 dirp = g_try_malloc(count);
10151 if (!dirp) {
10152 ret = -TARGET_ENOMEM;
10153 goto fail;
10156 ret = get_errno(sys_getdents(arg1, dirp, count));
10157 if (!is_error(ret)) {
10158 struct linux_dirent *de;
10159 struct target_dirent *tde;
10160 int len = ret;
10161 int reclen, treclen;
10162 int count1, tnamelen;
10164 count1 = 0;
10165 de = dirp;
10166 if (!(target_dirp = lock_user(VERIFY_WRITE, arg2, count, 0)))
10167 goto efault;
10168 tde = target_dirp;
10169 while (len > 0) {
10170 reclen = de->d_reclen;
10171 tnamelen = reclen - offsetof(struct linux_dirent, d_name);
10172 assert(tnamelen >= 0);
10173 treclen = tnamelen + offsetof(struct target_dirent, d_name);
10174 assert(count1 + treclen <= count);
10175 tde->d_reclen = tswap16(treclen);
10176 tde->d_ino = tswapal(de->d_ino);
10177 tde->d_off = tswapal(de->d_off);
10178 memcpy(tde->d_name, de->d_name, tnamelen);
10179 de = (struct linux_dirent *)((char *)de + reclen);
10180 len -= reclen;
10181 tde = (struct target_dirent *)((char *)tde + treclen);
10182 count1 += treclen;
10184 ret = count1;
10185 unlock_user(target_dirp, arg2, ret);
10187 g_free(dirp);
10189 #else
10191 struct linux_dirent *dirp;
10192 abi_long count = arg3;
10194 if (!(dirp = lock_user(VERIFY_WRITE, arg2, count, 0)))
10195 goto efault;
10196 ret = get_errno(sys_getdents(arg1, dirp, count));
10197 if (!is_error(ret)) {
10198 struct linux_dirent *de;
10199 int len = ret;
10200 int reclen;
10201 de = dirp;
10202 while (len > 0) {
10203 reclen = de->d_reclen;
10204 if (reclen > len)
10205 break;
10206 de->d_reclen = tswap16(reclen);
10207 tswapls(&de->d_ino);
10208 tswapls(&de->d_off);
10209 de = (struct linux_dirent *)((char *)de + reclen);
10210 len -= reclen;
10213 unlock_user(dirp, arg2, ret);
10215 #endif
10216 #else
10217 /* Implement getdents in terms of getdents64 */
10219 struct linux_dirent64 *dirp;
10220 abi_long count = arg3;
10222 dirp = lock_user(VERIFY_WRITE, arg2, count, 0);
10223 if (!dirp) {
10224 goto efault;
10226 ret = get_errno(sys_getdents64(arg1, dirp, count));
10227 if (!is_error(ret)) {
10228 /* Convert the dirent64 structs to target dirent. We do this
10229 * in-place, since we can guarantee that a target_dirent is no
10230 * larger than a dirent64; however this means we have to be
10231 * careful to read everything before writing in the new format.
10233 struct linux_dirent64 *de;
10234 struct target_dirent *tde;
10235 int len = ret;
10236 int tlen = 0;
10238 de = dirp;
10239 tde = (struct target_dirent *)dirp;
10240 while (len > 0) {
10241 int namelen, treclen;
10242 int reclen = de->d_reclen;
10243 uint64_t ino = de->d_ino;
10244 int64_t off = de->d_off;
10245 uint8_t type = de->d_type;
10247 namelen = strlen(de->d_name);
10248 treclen = offsetof(struct target_dirent, d_name)
10249 + namelen + 2;
10250 treclen = QEMU_ALIGN_UP(treclen, sizeof(abi_long));
10252 memmove(tde->d_name, de->d_name, namelen + 1);
10253 tde->d_ino = tswapal(ino);
10254 tde->d_off = tswapal(off);
10255 tde->d_reclen = tswap16(treclen);
10256 /* The target_dirent type is in what was formerly a padding
10257 * byte at the end of the structure:
10259 *(((char *)tde) + treclen - 1) = type;
10261 de = (struct linux_dirent64 *)((char *)de + reclen);
10262 tde = (struct target_dirent *)((char *)tde + treclen);
10263 len -= reclen;
10264 tlen += treclen;
10266 ret = tlen;
10268 unlock_user(dirp, arg2, ret);
10270 #endif
10271 break;
10272 #endif /* TARGET_NR_getdents */
10273 #if defined(TARGET_NR_getdents64) && defined(__NR_getdents64)
10274 case TARGET_NR_getdents64:
10276 struct linux_dirent64 *dirp;
10277 abi_long count = arg3;
10278 if (!(dirp = lock_user(VERIFY_WRITE, arg2, count, 0)))
10279 goto efault;
10280 ret = get_errno(sys_getdents64(arg1, dirp, count));
10281 if (!is_error(ret)) {
10282 struct linux_dirent64 *de;
10283 int len = ret;
10284 int reclen;
10285 de = dirp;
10286 while (len > 0) {
10287 reclen = de->d_reclen;
10288 if (reclen > len)
10289 break;
10290 de->d_reclen = tswap16(reclen);
10291 tswap64s((uint64_t *)&de->d_ino);
10292 tswap64s((uint64_t *)&de->d_off);
10293 de = (struct linux_dirent64 *)((char *)de + reclen);
10294 len -= reclen;
10297 unlock_user(dirp, arg2, ret);
10299 break;
10300 #endif /* TARGET_NR_getdents64 */
10301 #if defined(TARGET_NR__newselect)
10302 case TARGET_NR__newselect:
10303 ret = do_select(arg1, arg2, arg3, arg4, arg5);
10304 break;
10305 #endif
10306 #if defined(TARGET_NR_poll) || defined(TARGET_NR_ppoll)
10307 # ifdef TARGET_NR_poll
10308 case TARGET_NR_poll:
10309 # endif
10310 # ifdef TARGET_NR_ppoll
10311 case TARGET_NR_ppoll:
10312 # endif
10314 struct target_pollfd *target_pfd;
10315 unsigned int nfds = arg2;
10316 struct pollfd *pfd;
10317 unsigned int i;
10319 pfd = NULL;
10320 target_pfd = NULL;
10321 if (nfds) {
10322 if (nfds > (INT_MAX / sizeof(struct target_pollfd))) {
10323 ret = -TARGET_EINVAL;
10324 break;
10327 target_pfd = lock_user(VERIFY_WRITE, arg1,
10328 sizeof(struct target_pollfd) * nfds, 1);
10329 if (!target_pfd) {
10330 goto efault;
10333 pfd = alloca(sizeof(struct pollfd) * nfds);
10334 for (i = 0; i < nfds; i++) {
10335 pfd[i].fd = tswap32(target_pfd[i].fd);
10336 pfd[i].events = tswap16(target_pfd[i].events);
10340 switch (num) {
10341 # ifdef TARGET_NR_ppoll
10342 case TARGET_NR_ppoll:
10344 struct timespec _timeout_ts, *timeout_ts = &_timeout_ts;
10345 target_sigset_t *target_set;
10346 sigset_t _set, *set = &_set;
10348 if (arg3) {
10349 if (target_to_host_timespec(timeout_ts, arg3)) {
10350 unlock_user(target_pfd, arg1, 0);
10351 goto efault;
10353 } else {
10354 timeout_ts = NULL;
10357 if (arg4) {
10358 if (arg5 != sizeof(target_sigset_t)) {
10359 unlock_user(target_pfd, arg1, 0);
10360 ret = -TARGET_EINVAL;
10361 break;
10364 target_set = lock_user(VERIFY_READ, arg4, sizeof(target_sigset_t), 1);
10365 if (!target_set) {
10366 unlock_user(target_pfd, arg1, 0);
10367 goto efault;
10369 target_to_host_sigset(set, target_set);
10370 } else {
10371 set = NULL;
10374 ret = get_errno(safe_ppoll(pfd, nfds, timeout_ts,
10375 set, SIGSET_T_SIZE));
10377 if (!is_error(ret) && arg3) {
10378 host_to_target_timespec(arg3, timeout_ts);
10380 if (arg4) {
10381 unlock_user(target_set, arg4, 0);
10383 break;
10385 # endif
10386 # ifdef TARGET_NR_poll
10387 case TARGET_NR_poll:
10389 struct timespec ts, *pts;
10391 if (arg3 >= 0) {
10392 /* Convert ms to secs, ns */
10393 ts.tv_sec = arg3 / 1000;
10394 ts.tv_nsec = (arg3 % 1000) * 1000000LL;
10395 pts = &ts;
10396 } else {
10397 /* -ve poll() timeout means "infinite" */
10398 pts = NULL;
10400 ret = get_errno(safe_ppoll(pfd, nfds, pts, NULL, 0));
10401 break;
10403 # endif
10404 default:
10405 g_assert_not_reached();
10408 if (!is_error(ret)) {
10409 for(i = 0; i < nfds; i++) {
10410 target_pfd[i].revents = tswap16(pfd[i].revents);
10413 unlock_user(target_pfd, arg1, sizeof(struct target_pollfd) * nfds);
10415 break;
10416 #endif
10417 case TARGET_NR_flock:
10418 /* NOTE: the flock constant seems to be the same for every
10419 Linux platform */
10420 ret = get_errno(safe_flock(arg1, arg2));
10421 break;
10422 case TARGET_NR_readv:
10424 struct iovec *vec = lock_iovec(VERIFY_WRITE, arg2, arg3, 0);
10425 if (vec != NULL) {
10426 ret = get_errno(safe_readv(arg1, vec, arg3));
10427 unlock_iovec(vec, arg2, arg3, 1);
10428 } else {
10429 ret = -host_to_target_errno(errno);
10432 break;
10433 case TARGET_NR_writev:
10435 struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1);
10436 if (vec != NULL) {
10437 ret = get_errno(safe_writev(arg1, vec, arg3));
10438 unlock_iovec(vec, arg2, arg3, 0);
10439 } else {
10440 ret = -host_to_target_errno(errno);
10443 break;
10444 #if defined(TARGET_NR_preadv)
10445 case TARGET_NR_preadv:
10447 struct iovec *vec = lock_iovec(VERIFY_WRITE, arg2, arg3, 0);
10448 if (vec != NULL) {
10449 ret = get_errno(safe_preadv(arg1, vec, arg3, arg4, arg5));
10450 unlock_iovec(vec, arg2, arg3, 1);
10451 } else {
10452 ret = -host_to_target_errno(errno);
10455 break;
10456 #endif
10457 #if defined(TARGET_NR_pwritev)
10458 case TARGET_NR_pwritev:
10460 struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1);
10461 if (vec != NULL) {
10462 ret = get_errno(safe_pwritev(arg1, vec, arg3, arg4, arg5));
10463 unlock_iovec(vec, arg2, arg3, 0);
10464 } else {
10465 ret = -host_to_target_errno(errno);
10468 break;
10469 #endif
10470 case TARGET_NR_getsid:
10471 ret = get_errno(getsid(arg1));
10472 break;
10473 #if defined(TARGET_NR_fdatasync) /* Not on alpha (osf_datasync ?) */
10474 case TARGET_NR_fdatasync:
10475 ret = get_errno(fdatasync(arg1));
10476 break;
10477 #endif
10478 #ifdef TARGET_NR__sysctl
10479 case TARGET_NR__sysctl:
10480 /* We don't implement this, but ENOTDIR is always a safe
10481 return value. */
10482 ret = -TARGET_ENOTDIR;
10483 break;
10484 #endif
10485 case TARGET_NR_sched_getaffinity:
10487 unsigned int mask_size;
10488 unsigned long *mask;
10491 * sched_getaffinity needs multiples of ulong, so need to take
10492 * care of mismatches between target ulong and host ulong sizes.
10494 if (arg2 & (sizeof(abi_ulong) - 1)) {
10495 ret = -TARGET_EINVAL;
10496 break;
10498 mask_size = (arg2 + (sizeof(*mask) - 1)) & ~(sizeof(*mask) - 1);
10500 mask = alloca(mask_size);
10501 memset(mask, 0, mask_size);
10502 ret = get_errno(sys_sched_getaffinity(arg1, mask_size, mask));
10504 if (!is_error(ret)) {
10505 if (ret > arg2) {
10506 /* More data returned than the caller's buffer will fit.
10507 * This only happens if sizeof(abi_long) < sizeof(long)
10508 * and the caller passed us a buffer holding an odd number
10509 * of abi_longs. If the host kernel is actually using the
10510 * extra 4 bytes then fail EINVAL; otherwise we can just
10511 * ignore them and only copy the interesting part.
10513 int numcpus = sysconf(_SC_NPROCESSORS_CONF);
10514 if (numcpus > arg2 * 8) {
10515 ret = -TARGET_EINVAL;
10516 break;
10518 ret = arg2;
10521 if (host_to_target_cpu_mask(mask, mask_size, arg3, ret)) {
10522 goto efault;
10526 break;
10527 case TARGET_NR_sched_setaffinity:
10529 unsigned int mask_size;
10530 unsigned long *mask;
10533 * sched_setaffinity needs multiples of ulong, so need to take
10534 * care of mismatches between target ulong and host ulong sizes.
10536 if (arg2 & (sizeof(abi_ulong) - 1)) {
10537 ret = -TARGET_EINVAL;
10538 break;
10540 mask_size = (arg2 + (sizeof(*mask) - 1)) & ~(sizeof(*mask) - 1);
10541 mask = alloca(mask_size);
10543 ret = target_to_host_cpu_mask(mask, mask_size, arg3, arg2);
10544 if (ret) {
10545 break;
10548 ret = get_errno(sys_sched_setaffinity(arg1, mask_size, mask));
10550 break;
10551 case TARGET_NR_getcpu:
10553 unsigned cpu, node;
10554 ret = get_errno(sys_getcpu(arg1 ? &cpu : NULL,
10555 arg2 ? &node : NULL,
10556 NULL));
10557 if (is_error(ret)) {
10558 goto fail;
10560 if (arg1 && put_user_u32(cpu, arg1)) {
10561 goto efault;
10563 if (arg2 && put_user_u32(node, arg2)) {
10564 goto efault;
10567 break;
10568 case TARGET_NR_sched_setparam:
10570 struct sched_param *target_schp;
10571 struct sched_param schp;
10573 if (arg2 == 0) {
10574 return -TARGET_EINVAL;
10576 if (!lock_user_struct(VERIFY_READ, target_schp, arg2, 1))
10577 goto efault;
10578 schp.sched_priority = tswap32(target_schp->sched_priority);
10579 unlock_user_struct(target_schp, arg2, 0);
10580 ret = get_errno(sched_setparam(arg1, &schp));
10582 break;
10583 case TARGET_NR_sched_getparam:
10585 struct sched_param *target_schp;
10586 struct sched_param schp;
10588 if (arg2 == 0) {
10589 return -TARGET_EINVAL;
10591 ret = get_errno(sched_getparam(arg1, &schp));
10592 if (!is_error(ret)) {
10593 if (!lock_user_struct(VERIFY_WRITE, target_schp, arg2, 0))
10594 goto efault;
10595 target_schp->sched_priority = tswap32(schp.sched_priority);
10596 unlock_user_struct(target_schp, arg2, 1);
10599 break;
10600 case TARGET_NR_sched_setscheduler:
10602 struct sched_param *target_schp;
10603 struct sched_param schp;
10604 if (arg3 == 0) {
10605 return -TARGET_EINVAL;
10607 if (!lock_user_struct(VERIFY_READ, target_schp, arg3, 1))
10608 goto efault;
10609 schp.sched_priority = tswap32(target_schp->sched_priority);
10610 unlock_user_struct(target_schp, arg3, 0);
10611 ret = get_errno(sched_setscheduler(arg1, arg2, &schp));
10613 break;
10614 case TARGET_NR_sched_getscheduler:
10615 ret = get_errno(sched_getscheduler(arg1));
10616 break;
10617 case TARGET_NR_sched_yield:
10618 ret = get_errno(sched_yield());
10619 break;
10620 case TARGET_NR_sched_get_priority_max:
10621 ret = get_errno(sched_get_priority_max(arg1));
10622 break;
10623 case TARGET_NR_sched_get_priority_min:
10624 ret = get_errno(sched_get_priority_min(arg1));
10625 break;
10626 case TARGET_NR_sched_rr_get_interval:
10628 struct timespec ts;
10629 ret = get_errno(sched_rr_get_interval(arg1, &ts));
10630 if (!is_error(ret)) {
10631 ret = host_to_target_timespec(arg2, &ts);
10634 break;
10635 case TARGET_NR_nanosleep:
10637 struct timespec req, rem;
10638 target_to_host_timespec(&req, arg1);
10639 ret = get_errno(safe_nanosleep(&req, &rem));
10640 if (is_error(ret) && arg2) {
10641 host_to_target_timespec(arg2, &rem);
10644 break;
10645 #ifdef TARGET_NR_query_module
10646 case TARGET_NR_query_module:
10647 goto unimplemented;
10648 #endif
10649 #ifdef TARGET_NR_nfsservctl
10650 case TARGET_NR_nfsservctl:
10651 goto unimplemented;
10652 #endif
10653 case TARGET_NR_prctl:
10654 switch (arg1) {
10655 case PR_GET_PDEATHSIG:
10657 int deathsig;
10658 ret = get_errno(prctl(arg1, &deathsig, arg3, arg4, arg5));
10659 if (!is_error(ret) && arg2
10660 && put_user_ual(deathsig, arg2)) {
10661 goto efault;
10663 break;
10665 #ifdef PR_GET_NAME
10666 case PR_GET_NAME:
10668 void *name = lock_user(VERIFY_WRITE, arg2, 16, 1);
10669 if (!name) {
10670 goto efault;
10672 ret = get_errno(prctl(arg1, (unsigned long)name,
10673 arg3, arg4, arg5));
10674 unlock_user(name, arg2, 16);
10675 break;
10677 case PR_SET_NAME:
10679 void *name = lock_user(VERIFY_READ, arg2, 16, 1);
10680 if (!name) {
10681 goto efault;
10683 ret = get_errno(prctl(arg1, (unsigned long)name,
10684 arg3, arg4, arg5));
10685 unlock_user(name, arg2, 0);
10686 break;
10688 #endif
10689 #ifdef TARGET_AARCH64
10690 case TARGET_PR_SVE_SET_VL:
10691 /* We cannot support either PR_SVE_SET_VL_ONEXEC
10692 or PR_SVE_VL_INHERIT. Therefore, anything above
10693 ARM_MAX_VQ results in EINVAL. */
10694 ret = -TARGET_EINVAL;
10695 if (arm_feature(cpu_env, ARM_FEATURE_SVE)
10696 && arg2 >= 0 && arg2 <= ARM_MAX_VQ * 16 && !(arg2 & 15)) {
10697 CPUARMState *env = cpu_env;
10698 int old_vq = (env->vfp.zcr_el[1] & 0xf) + 1;
10699 int vq = MAX(arg2 / 16, 1);
10701 if (vq < old_vq) {
10702 aarch64_sve_narrow_vq(env, vq);
10704 env->vfp.zcr_el[1] = vq - 1;
10705 ret = vq * 16;
10707 break;
10708 case TARGET_PR_SVE_GET_VL:
10709 ret = -TARGET_EINVAL;
10710 if (arm_feature(cpu_env, ARM_FEATURE_SVE)) {
10711 CPUARMState *env = cpu_env;
10712 ret = ((env->vfp.zcr_el[1] & 0xf) + 1) * 16;
10714 break;
10715 #endif /* AARCH64 */
10716 case PR_GET_SECCOMP:
10717 case PR_SET_SECCOMP:
10718 /* Disable seccomp to prevent the target disabling syscalls we
10719 * need. */
10720 ret = -TARGET_EINVAL;
10721 break;
10722 default:
10723 /* Most prctl options have no pointer arguments */
10724 ret = get_errno(prctl(arg1, arg2, arg3, arg4, arg5));
10725 break;
10727 break;
10728 #ifdef TARGET_NR_arch_prctl
10729 case TARGET_NR_arch_prctl:
10730 #if defined(TARGET_I386) && !defined(TARGET_ABI32)
10731 ret = do_arch_prctl(cpu_env, arg1, arg2);
10732 break;
10733 #else
10734 goto unimplemented;
10735 #endif
10736 #endif
10737 #ifdef TARGET_NR_pread64
10738 case TARGET_NR_pread64:
10739 if (regpairs_aligned(cpu_env, num)) {
10740 arg4 = arg5;
10741 arg5 = arg6;
10743 if (!(p = lock_user(VERIFY_WRITE, arg2, arg3, 0)))
10744 goto efault;
10745 ret = get_errno(pread64(arg1, p, arg3, target_offset64(arg4, arg5)));
10746 unlock_user(p, arg2, ret);
10747 break;
10748 case TARGET_NR_pwrite64:
10749 if (regpairs_aligned(cpu_env, num)) {
10750 arg4 = arg5;
10751 arg5 = arg6;
10753 if (!(p = lock_user(VERIFY_READ, arg2, arg3, 1)))
10754 goto efault;
10755 ret = get_errno(pwrite64(arg1, p, arg3, target_offset64(arg4, arg5)));
10756 unlock_user(p, arg2, 0);
10757 break;
10758 #endif
10759 case TARGET_NR_getcwd:
10760 if (!(p = lock_user(VERIFY_WRITE, arg1, arg2, 0)))
10761 goto efault;
10762 ret = get_errno(sys_getcwd1(p, arg2));
10763 unlock_user(p, arg1, ret);
10764 break;
10765 case TARGET_NR_capget:
10766 case TARGET_NR_capset:
10768 struct target_user_cap_header *target_header;
10769 struct target_user_cap_data *target_data = NULL;
10770 struct __user_cap_header_struct header;
10771 struct __user_cap_data_struct data[2];
10772 struct __user_cap_data_struct *dataptr = NULL;
10773 int i, target_datalen;
10774 int data_items = 1;
10776 if (!lock_user_struct(VERIFY_WRITE, target_header, arg1, 1)) {
10777 goto efault;
10779 header.version = tswap32(target_header->version);
10780 header.pid = tswap32(target_header->pid);
10782 if (header.version != _LINUX_CAPABILITY_VERSION) {
10783 /* Version 2 and up takes pointer to two user_data structs */
10784 data_items = 2;
10787 target_datalen = sizeof(*target_data) * data_items;
10789 if (arg2) {
10790 if (num == TARGET_NR_capget) {
10791 target_data = lock_user(VERIFY_WRITE, arg2, target_datalen, 0);
10792 } else {
10793 target_data = lock_user(VERIFY_READ, arg2, target_datalen, 1);
10795 if (!target_data) {
10796 unlock_user_struct(target_header, arg1, 0);
10797 goto efault;
10800 if (num == TARGET_NR_capset) {
10801 for (i = 0; i < data_items; i++) {
10802 data[i].effective = tswap32(target_data[i].effective);
10803 data[i].permitted = tswap32(target_data[i].permitted);
10804 data[i].inheritable = tswap32(target_data[i].inheritable);
10808 dataptr = data;
10811 if (num == TARGET_NR_capget) {
10812 ret = get_errno(capget(&header, dataptr));
10813 } else {
10814 ret = get_errno(capset(&header, dataptr));
10817 /* The kernel always updates version for both capget and capset */
10818 target_header->version = tswap32(header.version);
10819 unlock_user_struct(target_header, arg1, 1);
10821 if (arg2) {
10822 if (num == TARGET_NR_capget) {
10823 for (i = 0; i < data_items; i++) {
10824 target_data[i].effective = tswap32(data[i].effective);
10825 target_data[i].permitted = tswap32(data[i].permitted);
10826 target_data[i].inheritable = tswap32(data[i].inheritable);
10828 unlock_user(target_data, arg2, target_datalen);
10829 } else {
10830 unlock_user(target_data, arg2, 0);
10833 break;
10835 case TARGET_NR_sigaltstack:
10836 ret = do_sigaltstack(arg1, arg2, get_sp_from_cpustate((CPUArchState *)cpu_env));
10837 break;
10839 #ifdef CONFIG_SENDFILE
10840 case TARGET_NR_sendfile:
10842 off_t *offp = NULL;
10843 off_t off;
10844 if (arg3) {
10845 ret = get_user_sal(off, arg3);
10846 if (is_error(ret)) {
10847 break;
10849 offp = &off;
10851 ret = get_errno(sendfile(arg1, arg2, offp, arg4));
10852 if (!is_error(ret) && arg3) {
10853 abi_long ret2 = put_user_sal(off, arg3);
10854 if (is_error(ret2)) {
10855 ret = ret2;
10858 break;
10860 #ifdef TARGET_NR_sendfile64
10861 case TARGET_NR_sendfile64:
10863 off_t *offp = NULL;
10864 off_t off;
10865 if (arg3) {
10866 ret = get_user_s64(off, arg3);
10867 if (is_error(ret)) {
10868 break;
10870 offp = &off;
10872 ret = get_errno(sendfile(arg1, arg2, offp, arg4));
10873 if (!is_error(ret) && arg3) {
10874 abi_long ret2 = put_user_s64(off, arg3);
10875 if (is_error(ret2)) {
10876 ret = ret2;
10879 break;
10881 #endif
10882 #else
10883 case TARGET_NR_sendfile:
10884 #ifdef TARGET_NR_sendfile64
10885 case TARGET_NR_sendfile64:
10886 #endif
10887 goto unimplemented;
10888 #endif
10890 #ifdef TARGET_NR_getpmsg
10891 case TARGET_NR_getpmsg:
10892 goto unimplemented;
10893 #endif
10894 #ifdef TARGET_NR_putpmsg
10895 case TARGET_NR_putpmsg:
10896 goto unimplemented;
10897 #endif
10898 #ifdef TARGET_NR_vfork
10899 case TARGET_NR_vfork:
10900 ret = get_errno(do_fork(cpu_env,
10901 CLONE_VFORK | CLONE_VM | TARGET_SIGCHLD,
10902 0, 0, 0, 0));
10903 break;
10904 #endif
10905 #ifdef TARGET_NR_ugetrlimit
10906 case TARGET_NR_ugetrlimit:
10908 struct rlimit rlim;
10909 int resource = target_to_host_resource(arg1);
10910 ret = get_errno(getrlimit(resource, &rlim));
10911 if (!is_error(ret)) {
10912 struct target_rlimit *target_rlim;
10913 if (!lock_user_struct(VERIFY_WRITE, target_rlim, arg2, 0))
10914 goto efault;
10915 target_rlim->rlim_cur = host_to_target_rlim(rlim.rlim_cur);
10916 target_rlim->rlim_max = host_to_target_rlim(rlim.rlim_max);
10917 unlock_user_struct(target_rlim, arg2, 1);
10919 break;
10921 #endif
10922 #ifdef TARGET_NR_truncate64
10923 case TARGET_NR_truncate64:
10924 if (!(p = lock_user_string(arg1)))
10925 goto efault;
10926 ret = target_truncate64(cpu_env, p, arg2, arg3, arg4);
10927 unlock_user(p, arg1, 0);
10928 break;
10929 #endif
10930 #ifdef TARGET_NR_ftruncate64
10931 case TARGET_NR_ftruncate64:
10932 ret = target_ftruncate64(cpu_env, arg1, arg2, arg3, arg4);
10933 break;
10934 #endif
10935 #ifdef TARGET_NR_stat64
10936 case TARGET_NR_stat64:
10937 if (!(p = lock_user_string(arg1)))
10938 goto efault;
10939 ret = get_errno(stat(path(p), &st));
10940 unlock_user(p, arg1, 0);
10941 if (!is_error(ret))
10942 ret = host_to_target_stat64(cpu_env, arg2, &st);
10943 break;
10944 #endif
10945 #ifdef TARGET_NR_lstat64
10946 case TARGET_NR_lstat64:
10947 if (!(p = lock_user_string(arg1)))
10948 goto efault;
10949 ret = get_errno(lstat(path(p), &st));
10950 unlock_user(p, arg1, 0);
10951 if (!is_error(ret))
10952 ret = host_to_target_stat64(cpu_env, arg2, &st);
10953 break;
10954 #endif
10955 #ifdef TARGET_NR_fstat64
10956 case TARGET_NR_fstat64:
10957 ret = get_errno(fstat(arg1, &st));
10958 if (!is_error(ret))
10959 ret = host_to_target_stat64(cpu_env, arg2, &st);
10960 break;
10961 #endif
10962 #if (defined(TARGET_NR_fstatat64) || defined(TARGET_NR_newfstatat))
10963 #ifdef TARGET_NR_fstatat64
10964 case TARGET_NR_fstatat64:
10965 #endif
10966 #ifdef TARGET_NR_newfstatat
10967 case TARGET_NR_newfstatat:
10968 #endif
10969 if (!(p = lock_user_string(arg2)))
10970 goto efault;
10971 ret = get_errno(fstatat(arg1, path(p), &st, arg4));
10972 if (!is_error(ret))
10973 ret = host_to_target_stat64(cpu_env, arg3, &st);
10974 break;
10975 #endif
10976 #ifdef TARGET_NR_lchown
10977 case TARGET_NR_lchown:
10978 if (!(p = lock_user_string(arg1)))
10979 goto efault;
10980 ret = get_errno(lchown(p, low2highuid(arg2), low2highgid(arg3)));
10981 unlock_user(p, arg1, 0);
10982 break;
10983 #endif
10984 #ifdef TARGET_NR_getuid
10985 case TARGET_NR_getuid:
10986 ret = get_errno(high2lowuid(getuid()));
10987 break;
10988 #endif
10989 #ifdef TARGET_NR_getgid
10990 case TARGET_NR_getgid:
10991 ret = get_errno(high2lowgid(getgid()));
10992 break;
10993 #endif
10994 #ifdef TARGET_NR_geteuid
10995 case TARGET_NR_geteuid:
10996 ret = get_errno(high2lowuid(geteuid()));
10997 break;
10998 #endif
10999 #ifdef TARGET_NR_getegid
11000 case TARGET_NR_getegid:
11001 ret = get_errno(high2lowgid(getegid()));
11002 break;
11003 #endif
11004 case TARGET_NR_setreuid:
11005 ret = get_errno(setreuid(low2highuid(arg1), low2highuid(arg2)));
11006 break;
11007 case TARGET_NR_setregid:
11008 ret = get_errno(setregid(low2highgid(arg1), low2highgid(arg2)));
11009 break;
11010 case TARGET_NR_getgroups:
11012 int gidsetsize = arg1;
11013 target_id *target_grouplist;
11014 gid_t *grouplist;
11015 int i;
11017 grouplist = alloca(gidsetsize * sizeof(gid_t));
11018 ret = get_errno(getgroups(gidsetsize, grouplist));
11019 if (gidsetsize == 0)
11020 break;
11021 if (!is_error(ret)) {
11022 target_grouplist = lock_user(VERIFY_WRITE, arg2, gidsetsize * sizeof(target_id), 0);
11023 if (!target_grouplist)
11024 goto efault;
11025 for(i = 0;i < ret; i++)
11026 target_grouplist[i] = tswapid(high2lowgid(grouplist[i]));
11027 unlock_user(target_grouplist, arg2, gidsetsize * sizeof(target_id));
11030 break;
11031 case TARGET_NR_setgroups:
11033 int gidsetsize = arg1;
11034 target_id *target_grouplist;
11035 gid_t *grouplist = NULL;
11036 int i;
11037 if (gidsetsize) {
11038 grouplist = alloca(gidsetsize * sizeof(gid_t));
11039 target_grouplist = lock_user(VERIFY_READ, arg2, gidsetsize * sizeof(target_id), 1);
11040 if (!target_grouplist) {
11041 ret = -TARGET_EFAULT;
11042 goto fail;
11044 for (i = 0; i < gidsetsize; i++) {
11045 grouplist[i] = low2highgid(tswapid(target_grouplist[i]));
11047 unlock_user(target_grouplist, arg2, 0);
11049 ret = get_errno(setgroups(gidsetsize, grouplist));
11051 break;
11052 case TARGET_NR_fchown:
11053 ret = get_errno(fchown(arg1, low2highuid(arg2), low2highgid(arg3)));
11054 break;
11055 #if defined(TARGET_NR_fchownat)
11056 case TARGET_NR_fchownat:
11057 if (!(p = lock_user_string(arg2)))
11058 goto efault;
11059 ret = get_errno(fchownat(arg1, p, low2highuid(arg3),
11060 low2highgid(arg4), arg5));
11061 unlock_user(p, arg2, 0);
11062 break;
11063 #endif
11064 #ifdef TARGET_NR_setresuid
11065 case TARGET_NR_setresuid:
11066 ret = get_errno(sys_setresuid(low2highuid(arg1),
11067 low2highuid(arg2),
11068 low2highuid(arg3)));
11069 break;
11070 #endif
11071 #ifdef TARGET_NR_getresuid
11072 case TARGET_NR_getresuid:
11074 uid_t ruid, euid, suid;
11075 ret = get_errno(getresuid(&ruid, &euid, &suid));
11076 if (!is_error(ret)) {
11077 if (put_user_id(high2lowuid(ruid), arg1)
11078 || put_user_id(high2lowuid(euid), arg2)
11079 || put_user_id(high2lowuid(suid), arg3))
11080 goto efault;
11083 break;
11084 #endif
11085 #ifdef TARGET_NR_getresgid
11086 case TARGET_NR_setresgid:
11087 ret = get_errno(sys_setresgid(low2highgid(arg1),
11088 low2highgid(arg2),
11089 low2highgid(arg3)));
11090 break;
11091 #endif
11092 #ifdef TARGET_NR_getresgid
11093 case TARGET_NR_getresgid:
11095 gid_t rgid, egid, sgid;
11096 ret = get_errno(getresgid(&rgid, &egid, &sgid));
11097 if (!is_error(ret)) {
11098 if (put_user_id(high2lowgid(rgid), arg1)
11099 || put_user_id(high2lowgid(egid), arg2)
11100 || put_user_id(high2lowgid(sgid), arg3))
11101 goto efault;
11104 break;
11105 #endif
11106 #ifdef TARGET_NR_chown
11107 case TARGET_NR_chown:
11108 if (!(p = lock_user_string(arg1)))
11109 goto efault;
11110 ret = get_errno(chown(p, low2highuid(arg2), low2highgid(arg3)));
11111 unlock_user(p, arg1, 0);
11112 break;
11113 #endif
11114 case TARGET_NR_setuid:
11115 ret = get_errno(sys_setuid(low2highuid(arg1)));
11116 break;
11117 case TARGET_NR_setgid:
11118 ret = get_errno(sys_setgid(low2highgid(arg1)));
11119 break;
11120 case TARGET_NR_setfsuid:
11121 ret = get_errno(setfsuid(arg1));
11122 break;
11123 case TARGET_NR_setfsgid:
11124 ret = get_errno(setfsgid(arg1));
11125 break;
11127 #ifdef TARGET_NR_lchown32
11128 case TARGET_NR_lchown32:
11129 if (!(p = lock_user_string(arg1)))
11130 goto efault;
11131 ret = get_errno(lchown(p, arg2, arg3));
11132 unlock_user(p, arg1, 0);
11133 break;
11134 #endif
11135 #ifdef TARGET_NR_getuid32
11136 case TARGET_NR_getuid32:
11137 ret = get_errno(getuid());
11138 break;
11139 #endif
11141 #if defined(TARGET_NR_getxuid) && defined(TARGET_ALPHA)
11142 /* Alpha specific */
11143 case TARGET_NR_getxuid:
11145 uid_t euid;
11146 euid=geteuid();
11147 ((CPUAlphaState *)cpu_env)->ir[IR_A4]=euid;
11149 ret = get_errno(getuid());
11150 break;
11151 #endif
11152 #if defined(TARGET_NR_getxgid) && defined(TARGET_ALPHA)
11153 /* Alpha specific */
11154 case TARGET_NR_getxgid:
11156 uid_t egid;
11157 egid=getegid();
11158 ((CPUAlphaState *)cpu_env)->ir[IR_A4]=egid;
11160 ret = get_errno(getgid());
11161 break;
11162 #endif
11163 #if defined(TARGET_NR_osf_getsysinfo) && defined(TARGET_ALPHA)
11164 /* Alpha specific */
11165 case TARGET_NR_osf_getsysinfo:
11166 ret = -TARGET_EOPNOTSUPP;
11167 switch (arg1) {
11168 case TARGET_GSI_IEEE_FP_CONTROL:
11170 uint64_t swcr, fpcr = cpu_alpha_load_fpcr (cpu_env);
11172 /* Copied from linux ieee_fpcr_to_swcr. */
11173 swcr = (fpcr >> 35) & SWCR_STATUS_MASK;
11174 swcr |= (fpcr >> 36) & SWCR_MAP_DMZ;
11175 swcr |= (~fpcr >> 48) & (SWCR_TRAP_ENABLE_INV
11176 | SWCR_TRAP_ENABLE_DZE
11177 | SWCR_TRAP_ENABLE_OVF);
11178 swcr |= (~fpcr >> 57) & (SWCR_TRAP_ENABLE_UNF
11179 | SWCR_TRAP_ENABLE_INE);
11180 swcr |= (fpcr >> 47) & SWCR_MAP_UMZ;
11181 swcr |= (~fpcr >> 41) & SWCR_TRAP_ENABLE_DNO;
11183 if (put_user_u64 (swcr, arg2))
11184 goto efault;
11185 ret = 0;
11187 break;
11189 /* case GSI_IEEE_STATE_AT_SIGNAL:
11190 -- Not implemented in linux kernel.
11191 case GSI_UACPROC:
11192 -- Retrieves current unaligned access state; not much used.
11193 case GSI_PROC_TYPE:
11194 -- Retrieves implver information; surely not used.
11195 case GSI_GET_HWRPB:
11196 -- Grabs a copy of the HWRPB; surely not used.
11199 break;
11200 #endif
11201 #if defined(TARGET_NR_osf_setsysinfo) && defined(TARGET_ALPHA)
11202 /* Alpha specific */
11203 case TARGET_NR_osf_setsysinfo:
11204 ret = -TARGET_EOPNOTSUPP;
11205 switch (arg1) {
11206 case TARGET_SSI_IEEE_FP_CONTROL:
11208 uint64_t swcr, fpcr, orig_fpcr;
11210 if (get_user_u64 (swcr, arg2)) {
11211 goto efault;
11213 orig_fpcr = cpu_alpha_load_fpcr(cpu_env);
11214 fpcr = orig_fpcr & FPCR_DYN_MASK;
11216 /* Copied from linux ieee_swcr_to_fpcr. */
11217 fpcr |= (swcr & SWCR_STATUS_MASK) << 35;
11218 fpcr |= (swcr & SWCR_MAP_DMZ) << 36;
11219 fpcr |= (~swcr & (SWCR_TRAP_ENABLE_INV
11220 | SWCR_TRAP_ENABLE_DZE
11221 | SWCR_TRAP_ENABLE_OVF)) << 48;
11222 fpcr |= (~swcr & (SWCR_TRAP_ENABLE_UNF
11223 | SWCR_TRAP_ENABLE_INE)) << 57;
11224 fpcr |= (swcr & SWCR_MAP_UMZ ? FPCR_UNDZ | FPCR_UNFD : 0);
11225 fpcr |= (~swcr & SWCR_TRAP_ENABLE_DNO) << 41;
11227 cpu_alpha_store_fpcr(cpu_env, fpcr);
11228 ret = 0;
11230 break;
11232 case TARGET_SSI_IEEE_RAISE_EXCEPTION:
11234 uint64_t exc, fpcr, orig_fpcr;
11235 int si_code;
11237 if (get_user_u64(exc, arg2)) {
11238 goto efault;
11241 orig_fpcr = cpu_alpha_load_fpcr(cpu_env);
11243 /* We only add to the exception status here. */
11244 fpcr = orig_fpcr | ((exc & SWCR_STATUS_MASK) << 35);
11246 cpu_alpha_store_fpcr(cpu_env, fpcr);
11247 ret = 0;
11249 /* Old exceptions are not signaled. */
11250 fpcr &= ~(orig_fpcr & FPCR_STATUS_MASK);
11252 /* If any exceptions set by this call,
11253 and are unmasked, send a signal. */
11254 si_code = 0;
11255 if ((fpcr & (FPCR_INE | FPCR_INED)) == FPCR_INE) {
11256 si_code = TARGET_FPE_FLTRES;
11258 if ((fpcr & (FPCR_UNF | FPCR_UNFD)) == FPCR_UNF) {
11259 si_code = TARGET_FPE_FLTUND;
11261 if ((fpcr & (FPCR_OVF | FPCR_OVFD)) == FPCR_OVF) {
11262 si_code = TARGET_FPE_FLTOVF;
11264 if ((fpcr & (FPCR_DZE | FPCR_DZED)) == FPCR_DZE) {
11265 si_code = TARGET_FPE_FLTDIV;
11267 if ((fpcr & (FPCR_INV | FPCR_INVD)) == FPCR_INV) {
11268 si_code = TARGET_FPE_FLTINV;
11270 if (si_code != 0) {
11271 target_siginfo_t info;
11272 info.si_signo = SIGFPE;
11273 info.si_errno = 0;
11274 info.si_code = si_code;
11275 info._sifields._sigfault._addr
11276 = ((CPUArchState *)cpu_env)->pc;
11277 queue_signal((CPUArchState *)cpu_env, info.si_signo,
11278 QEMU_SI_FAULT, &info);
11281 break;
11283 /* case SSI_NVPAIRS:
11284 -- Used with SSIN_UACPROC to enable unaligned accesses.
11285 case SSI_IEEE_STATE_AT_SIGNAL:
11286 case SSI_IEEE_IGNORE_STATE_AT_SIGNAL:
11287 -- Not implemented in linux kernel
11290 break;
11291 #endif
11292 #ifdef TARGET_NR_osf_sigprocmask
11293 /* Alpha specific. */
11294 case TARGET_NR_osf_sigprocmask:
11296 abi_ulong mask;
11297 int how;
11298 sigset_t set, oldset;
11300 switch(arg1) {
11301 case TARGET_SIG_BLOCK:
11302 how = SIG_BLOCK;
11303 break;
11304 case TARGET_SIG_UNBLOCK:
11305 how = SIG_UNBLOCK;
11306 break;
11307 case TARGET_SIG_SETMASK:
11308 how = SIG_SETMASK;
11309 break;
11310 default:
11311 ret = -TARGET_EINVAL;
11312 goto fail;
11314 mask = arg2;
11315 target_to_host_old_sigset(&set, &mask);
11316 ret = do_sigprocmask(how, &set, &oldset);
11317 if (!ret) {
11318 host_to_target_old_sigset(&mask, &oldset);
11319 ret = mask;
11322 break;
11323 #endif
11325 #ifdef TARGET_NR_getgid32
11326 case TARGET_NR_getgid32:
11327 ret = get_errno(getgid());
11328 break;
11329 #endif
11330 #ifdef TARGET_NR_geteuid32
11331 case TARGET_NR_geteuid32:
11332 ret = get_errno(geteuid());
11333 break;
11334 #endif
11335 #ifdef TARGET_NR_getegid32
11336 case TARGET_NR_getegid32:
11337 ret = get_errno(getegid());
11338 break;
11339 #endif
11340 #ifdef TARGET_NR_setreuid32
11341 case TARGET_NR_setreuid32:
11342 ret = get_errno(setreuid(arg1, arg2));
11343 break;
11344 #endif
11345 #ifdef TARGET_NR_setregid32
11346 case TARGET_NR_setregid32:
11347 ret = get_errno(setregid(arg1, arg2));
11348 break;
11349 #endif
11350 #ifdef TARGET_NR_getgroups32
11351 case TARGET_NR_getgroups32:
11353 int gidsetsize = arg1;
11354 uint32_t *target_grouplist;
11355 gid_t *grouplist;
11356 int i;
11358 grouplist = alloca(gidsetsize * sizeof(gid_t));
11359 ret = get_errno(getgroups(gidsetsize, grouplist));
11360 if (gidsetsize == 0)
11361 break;
11362 if (!is_error(ret)) {
11363 target_grouplist = lock_user(VERIFY_WRITE, arg2, gidsetsize * 4, 0);
11364 if (!target_grouplist) {
11365 ret = -TARGET_EFAULT;
11366 goto fail;
11368 for(i = 0;i < ret; i++)
11369 target_grouplist[i] = tswap32(grouplist[i]);
11370 unlock_user(target_grouplist, arg2, gidsetsize * 4);
11373 break;
11374 #endif
11375 #ifdef TARGET_NR_setgroups32
11376 case TARGET_NR_setgroups32:
11378 int gidsetsize = arg1;
11379 uint32_t *target_grouplist;
11380 gid_t *grouplist;
11381 int i;
11383 grouplist = alloca(gidsetsize * sizeof(gid_t));
11384 target_grouplist = lock_user(VERIFY_READ, arg2, gidsetsize * 4, 1);
11385 if (!target_grouplist) {
11386 ret = -TARGET_EFAULT;
11387 goto fail;
11389 for(i = 0;i < gidsetsize; i++)
11390 grouplist[i] = tswap32(target_grouplist[i]);
11391 unlock_user(target_grouplist, arg2, 0);
11392 ret = get_errno(setgroups(gidsetsize, grouplist));
11394 break;
11395 #endif
11396 #ifdef TARGET_NR_fchown32
11397 case TARGET_NR_fchown32:
11398 ret = get_errno(fchown(arg1, arg2, arg3));
11399 break;
11400 #endif
11401 #ifdef TARGET_NR_setresuid32
11402 case TARGET_NR_setresuid32:
11403 ret = get_errno(sys_setresuid(arg1, arg2, arg3));
11404 break;
11405 #endif
11406 #ifdef TARGET_NR_getresuid32
11407 case TARGET_NR_getresuid32:
11409 uid_t ruid, euid, suid;
11410 ret = get_errno(getresuid(&ruid, &euid, &suid));
11411 if (!is_error(ret)) {
11412 if (put_user_u32(ruid, arg1)
11413 || put_user_u32(euid, arg2)
11414 || put_user_u32(suid, arg3))
11415 goto efault;
11418 break;
11419 #endif
11420 #ifdef TARGET_NR_setresgid32
11421 case TARGET_NR_setresgid32:
11422 ret = get_errno(sys_setresgid(arg1, arg2, arg3));
11423 break;
11424 #endif
11425 #ifdef TARGET_NR_getresgid32
11426 case TARGET_NR_getresgid32:
11428 gid_t rgid, egid, sgid;
11429 ret = get_errno(getresgid(&rgid, &egid, &sgid));
11430 if (!is_error(ret)) {
11431 if (put_user_u32(rgid, arg1)
11432 || put_user_u32(egid, arg2)
11433 || put_user_u32(sgid, arg3))
11434 goto efault;
11437 break;
11438 #endif
11439 #ifdef TARGET_NR_chown32
11440 case TARGET_NR_chown32:
11441 if (!(p = lock_user_string(arg1)))
11442 goto efault;
11443 ret = get_errno(chown(p, arg2, arg3));
11444 unlock_user(p, arg1, 0);
11445 break;
11446 #endif
11447 #ifdef TARGET_NR_setuid32
11448 case TARGET_NR_setuid32:
11449 ret = get_errno(sys_setuid(arg1));
11450 break;
11451 #endif
11452 #ifdef TARGET_NR_setgid32
11453 case TARGET_NR_setgid32:
11454 ret = get_errno(sys_setgid(arg1));
11455 break;
11456 #endif
11457 #ifdef TARGET_NR_setfsuid32
11458 case TARGET_NR_setfsuid32:
11459 ret = get_errno(setfsuid(arg1));
11460 break;
11461 #endif
11462 #ifdef TARGET_NR_setfsgid32
11463 case TARGET_NR_setfsgid32:
11464 ret = get_errno(setfsgid(arg1));
11465 break;
11466 #endif
11468 case TARGET_NR_pivot_root:
11469 goto unimplemented;
11470 #ifdef TARGET_NR_mincore
11471 case TARGET_NR_mincore:
11473 void *a;
11474 ret = -TARGET_ENOMEM;
11475 a = lock_user(VERIFY_READ, arg1, arg2, 0);
11476 if (!a) {
11477 goto fail;
11479 ret = -TARGET_EFAULT;
11480 p = lock_user_string(arg3);
11481 if (!p) {
11482 goto mincore_fail;
11484 ret = get_errno(mincore(a, arg2, p));
11485 unlock_user(p, arg3, ret);
11486 mincore_fail:
11487 unlock_user(a, arg1, 0);
11489 break;
11490 #endif
11491 #ifdef TARGET_NR_arm_fadvise64_64
11492 case TARGET_NR_arm_fadvise64_64:
11493 /* arm_fadvise64_64 looks like fadvise64_64 but
11494 * with different argument order: fd, advice, offset, len
11495 * rather than the usual fd, offset, len, advice.
11496 * Note that offset and len are both 64-bit so appear as
11497 * pairs of 32-bit registers.
11499 ret = posix_fadvise(arg1, target_offset64(arg3, arg4),
11500 target_offset64(arg5, arg6), arg2);
11501 ret = -host_to_target_errno(ret);
11502 break;
11503 #endif
11505 #if TARGET_ABI_BITS == 32
11507 #ifdef TARGET_NR_fadvise64_64
11508 case TARGET_NR_fadvise64_64:
11509 #if defined(TARGET_PPC)
11510 /* 6 args: fd, advice, offset (high, low), len (high, low) */
11511 ret = arg2;
11512 arg2 = arg3;
11513 arg3 = arg4;
11514 arg4 = arg5;
11515 arg5 = arg6;
11516 arg6 = ret;
11517 #else
11518 /* 6 args: fd, offset (high, low), len (high, low), advice */
11519 if (regpairs_aligned(cpu_env, num)) {
11520 /* offset is in (3,4), len in (5,6) and advice in 7 */
11521 arg2 = arg3;
11522 arg3 = arg4;
11523 arg4 = arg5;
11524 arg5 = arg6;
11525 arg6 = arg7;
11527 #endif
11528 ret = -host_to_target_errno(posix_fadvise(arg1,
11529 target_offset64(arg2, arg3),
11530 target_offset64(arg4, arg5),
11531 arg6));
11532 break;
11533 #endif
11535 #ifdef TARGET_NR_fadvise64
11536 case TARGET_NR_fadvise64:
11537 /* 5 args: fd, offset (high, low), len, advice */
11538 if (regpairs_aligned(cpu_env, num)) {
11539 /* offset is in (3,4), len in 5 and advice in 6 */
11540 arg2 = arg3;
11541 arg3 = arg4;
11542 arg4 = arg5;
11543 arg5 = arg6;
11545 ret = -host_to_target_errno(posix_fadvise(arg1,
11546 target_offset64(arg2, arg3),
11547 arg4, arg5));
11548 break;
11549 #endif
11551 #else /* not a 32-bit ABI */
11552 #if defined(TARGET_NR_fadvise64_64) || defined(TARGET_NR_fadvise64)
11553 #ifdef TARGET_NR_fadvise64_64
11554 case TARGET_NR_fadvise64_64:
11555 #endif
11556 #ifdef TARGET_NR_fadvise64
11557 case TARGET_NR_fadvise64:
11558 #endif
11559 #ifdef TARGET_S390X
11560 switch (arg4) {
11561 case 4: arg4 = POSIX_FADV_NOREUSE + 1; break; /* make sure it's an invalid value */
11562 case 5: arg4 = POSIX_FADV_NOREUSE + 2; break; /* ditto */
11563 case 6: arg4 = POSIX_FADV_DONTNEED; break;
11564 case 7: arg4 = POSIX_FADV_NOREUSE; break;
11565 default: break;
11567 #endif
11568 ret = -host_to_target_errno(posix_fadvise(arg1, arg2, arg3, arg4));
11569 break;
11570 #endif
11571 #endif /* end of 64-bit ABI fadvise handling */
11573 #ifdef TARGET_NR_madvise
11574 case TARGET_NR_madvise:
11575 /* A straight passthrough may not be safe because qemu sometimes
11576 turns private file-backed mappings into anonymous mappings.
11577 This will break MADV_DONTNEED.
11578 This is a hint, so ignoring and returning success is ok. */
11579 ret = get_errno(0);
11580 break;
11581 #endif
11582 #if TARGET_ABI_BITS == 32
11583 case TARGET_NR_fcntl64:
11585 int cmd;
11586 struct flock64 fl;
11587 from_flock64_fn *copyfrom = copy_from_user_flock64;
11588 to_flock64_fn *copyto = copy_to_user_flock64;
11590 #ifdef TARGET_ARM
11591 if (((CPUARMState *)cpu_env)->eabi) {
11592 copyfrom = copy_from_user_eabi_flock64;
11593 copyto = copy_to_user_eabi_flock64;
11595 #endif
11597 cmd = target_to_host_fcntl_cmd(arg2);
11598 if (cmd == -TARGET_EINVAL) {
11599 ret = cmd;
11600 break;
11603 switch(arg2) {
11604 case TARGET_F_GETLK64:
11605 ret = copyfrom(&fl, arg3);
11606 if (ret) {
11607 break;
11609 ret = get_errno(fcntl(arg1, cmd, &fl));
11610 if (ret == 0) {
11611 ret = copyto(arg3, &fl);
11613 break;
11615 case TARGET_F_SETLK64:
11616 case TARGET_F_SETLKW64:
11617 ret = copyfrom(&fl, arg3);
11618 if (ret) {
11619 break;
11621 ret = get_errno(safe_fcntl(arg1, cmd, &fl));
11622 break;
11623 default:
11624 ret = do_fcntl(arg1, arg2, arg3);
11625 break;
11627 break;
11629 #endif
11630 #ifdef TARGET_NR_cacheflush
11631 case TARGET_NR_cacheflush:
11632 /* self-modifying code is handled automatically, so nothing needed */
11633 ret = 0;
11634 break;
11635 #endif
11636 #ifdef TARGET_NR_security
11637 case TARGET_NR_security:
11638 goto unimplemented;
11639 #endif
11640 #ifdef TARGET_NR_getpagesize
11641 case TARGET_NR_getpagesize:
11642 ret = TARGET_PAGE_SIZE;
11643 break;
11644 #endif
11645 case TARGET_NR_gettid:
11646 ret = get_errno(gettid());
11647 break;
11648 #ifdef TARGET_NR_readahead
11649 case TARGET_NR_readahead:
11650 #if TARGET_ABI_BITS == 32
11651 if (regpairs_aligned(cpu_env, num)) {
11652 arg2 = arg3;
11653 arg3 = arg4;
11654 arg4 = arg5;
11656 ret = get_errno(readahead(arg1, target_offset64(arg2, arg3) , arg4));
11657 #else
11658 ret = get_errno(readahead(arg1, arg2, arg3));
11659 #endif
11660 break;
11661 #endif
11662 #ifdef CONFIG_ATTR
11663 #ifdef TARGET_NR_setxattr
11664 case TARGET_NR_listxattr:
11665 case TARGET_NR_llistxattr:
11667 void *p, *b = 0;
11668 if (arg2) {
11669 b = lock_user(VERIFY_WRITE, arg2, arg3, 0);
11670 if (!b) {
11671 ret = -TARGET_EFAULT;
11672 break;
11675 p = lock_user_string(arg1);
11676 if (p) {
11677 if (num == TARGET_NR_listxattr) {
11678 ret = get_errno(listxattr(p, b, arg3));
11679 } else {
11680 ret = get_errno(llistxattr(p, b, arg3));
11682 } else {
11683 ret = -TARGET_EFAULT;
11685 unlock_user(p, arg1, 0);
11686 unlock_user(b, arg2, arg3);
11687 break;
11689 case TARGET_NR_flistxattr:
11691 void *b = 0;
11692 if (arg2) {
11693 b = lock_user(VERIFY_WRITE, arg2, arg3, 0);
11694 if (!b) {
11695 ret = -TARGET_EFAULT;
11696 break;
11699 ret = get_errno(flistxattr(arg1, b, arg3));
11700 unlock_user(b, arg2, arg3);
11701 break;
11703 case TARGET_NR_setxattr:
11704 case TARGET_NR_lsetxattr:
11706 void *p, *n, *v = 0;
11707 if (arg3) {
11708 v = lock_user(VERIFY_READ, arg3, arg4, 1);
11709 if (!v) {
11710 ret = -TARGET_EFAULT;
11711 break;
11714 p = lock_user_string(arg1);
11715 n = lock_user_string(arg2);
11716 if (p && n) {
11717 if (num == TARGET_NR_setxattr) {
11718 ret = get_errno(setxattr(p, n, v, arg4, arg5));
11719 } else {
11720 ret = get_errno(lsetxattr(p, n, v, arg4, arg5));
11722 } else {
11723 ret = -TARGET_EFAULT;
11725 unlock_user(p, arg1, 0);
11726 unlock_user(n, arg2, 0);
11727 unlock_user(v, arg3, 0);
11729 break;
11730 case TARGET_NR_fsetxattr:
11732 void *n, *v = 0;
11733 if (arg3) {
11734 v = lock_user(VERIFY_READ, arg3, arg4, 1);
11735 if (!v) {
11736 ret = -TARGET_EFAULT;
11737 break;
11740 n = lock_user_string(arg2);
11741 if (n) {
11742 ret = get_errno(fsetxattr(arg1, n, v, arg4, arg5));
11743 } else {
11744 ret = -TARGET_EFAULT;
11746 unlock_user(n, arg2, 0);
11747 unlock_user(v, arg3, 0);
11749 break;
11750 case TARGET_NR_getxattr:
11751 case TARGET_NR_lgetxattr:
11753 void *p, *n, *v = 0;
11754 if (arg3) {
11755 v = lock_user(VERIFY_WRITE, arg3, arg4, 0);
11756 if (!v) {
11757 ret = -TARGET_EFAULT;
11758 break;
11761 p = lock_user_string(arg1);
11762 n = lock_user_string(arg2);
11763 if (p && n) {
11764 if (num == TARGET_NR_getxattr) {
11765 ret = get_errno(getxattr(p, n, v, arg4));
11766 } else {
11767 ret = get_errno(lgetxattr(p, n, v, arg4));
11769 } else {
11770 ret = -TARGET_EFAULT;
11772 unlock_user(p, arg1, 0);
11773 unlock_user(n, arg2, 0);
11774 unlock_user(v, arg3, arg4);
11776 break;
11777 case TARGET_NR_fgetxattr:
11779 void *n, *v = 0;
11780 if (arg3) {
11781 v = lock_user(VERIFY_WRITE, arg3, arg4, 0);
11782 if (!v) {
11783 ret = -TARGET_EFAULT;
11784 break;
11787 n = lock_user_string(arg2);
11788 if (n) {
11789 ret = get_errno(fgetxattr(arg1, n, v, arg4));
11790 } else {
11791 ret = -TARGET_EFAULT;
11793 unlock_user(n, arg2, 0);
11794 unlock_user(v, arg3, arg4);
11796 break;
11797 case TARGET_NR_removexattr:
11798 case TARGET_NR_lremovexattr:
11800 void *p, *n;
11801 p = lock_user_string(arg1);
11802 n = lock_user_string(arg2);
11803 if (p && n) {
11804 if (num == TARGET_NR_removexattr) {
11805 ret = get_errno(removexattr(p, n));
11806 } else {
11807 ret = get_errno(lremovexattr(p, n));
11809 } else {
11810 ret = -TARGET_EFAULT;
11812 unlock_user(p, arg1, 0);
11813 unlock_user(n, arg2, 0);
11815 break;
11816 case TARGET_NR_fremovexattr:
11818 void *n;
11819 n = lock_user_string(arg2);
11820 if (n) {
11821 ret = get_errno(fremovexattr(arg1, n));
11822 } else {
11823 ret = -TARGET_EFAULT;
11825 unlock_user(n, arg2, 0);
11827 break;
11828 #endif
11829 #endif /* CONFIG_ATTR */
11830 #ifdef TARGET_NR_set_thread_area
11831 case TARGET_NR_set_thread_area:
11832 #if defined(TARGET_MIPS)
11833 ((CPUMIPSState *) cpu_env)->active_tc.CP0_UserLocal = arg1;
11834 ret = 0;
11835 break;
11836 #elif defined(TARGET_CRIS)
11837 if (arg1 & 0xff)
11838 ret = -TARGET_EINVAL;
11839 else {
11840 ((CPUCRISState *) cpu_env)->pregs[PR_PID] = arg1;
11841 ret = 0;
11843 break;
11844 #elif defined(TARGET_I386) && defined(TARGET_ABI32)
11845 ret = do_set_thread_area(cpu_env, arg1);
11846 break;
11847 #elif defined(TARGET_M68K)
11849 TaskState *ts = cpu->opaque;
11850 ts->tp_value = arg1;
11851 ret = 0;
11852 break;
11854 #else
11855 goto unimplemented_nowarn;
11856 #endif
11857 #endif
11858 #ifdef TARGET_NR_get_thread_area
11859 case TARGET_NR_get_thread_area:
11860 #if defined(TARGET_I386) && defined(TARGET_ABI32)
11861 ret = do_get_thread_area(cpu_env, arg1);
11862 break;
11863 #elif defined(TARGET_M68K)
11865 TaskState *ts = cpu->opaque;
11866 ret = ts->tp_value;
11867 break;
11869 #else
11870 goto unimplemented_nowarn;
11871 #endif
11872 #endif
11873 #ifdef TARGET_NR_getdomainname
11874 case TARGET_NR_getdomainname:
11875 goto unimplemented_nowarn;
11876 #endif
11878 #ifdef TARGET_NR_clock_gettime
11879 case TARGET_NR_clock_gettime:
11881 struct timespec ts;
11882 ret = get_errno(clock_gettime(arg1, &ts));
11883 if (!is_error(ret)) {
11884 host_to_target_timespec(arg2, &ts);
11886 break;
11888 #endif
11889 #ifdef TARGET_NR_clock_getres
11890 case TARGET_NR_clock_getres:
11892 struct timespec ts;
11893 ret = get_errno(clock_getres(arg1, &ts));
11894 if (!is_error(ret)) {
11895 host_to_target_timespec(arg2, &ts);
11897 break;
11899 #endif
11900 #ifdef TARGET_NR_clock_nanosleep
11901 case TARGET_NR_clock_nanosleep:
11903 struct timespec ts;
11904 target_to_host_timespec(&ts, arg3);
11905 ret = get_errno(safe_clock_nanosleep(arg1, arg2,
11906 &ts, arg4 ? &ts : NULL));
11907 if (arg4)
11908 host_to_target_timespec(arg4, &ts);
11910 #if defined(TARGET_PPC)
11911 /* clock_nanosleep is odd in that it returns positive errno values.
11912 * On PPC, CR0 bit 3 should be set in such a situation. */
11913 if (ret && ret != -TARGET_ERESTARTSYS) {
11914 ((CPUPPCState *)cpu_env)->crf[0] |= 1;
11916 #endif
11917 break;
11919 #endif
11921 #if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address)
11922 case TARGET_NR_set_tid_address:
11923 ret = get_errno(set_tid_address((int *)g2h(arg1)));
11924 break;
11925 #endif
11927 case TARGET_NR_tkill:
11928 ret = get_errno(safe_tkill((int)arg1, target_to_host_signal(arg2)));
11929 break;
11931 case TARGET_NR_tgkill:
11932 ret = get_errno(safe_tgkill((int)arg1, (int)arg2,
11933 target_to_host_signal(arg3)));
11934 break;
11936 #ifdef TARGET_NR_set_robust_list
11937 case TARGET_NR_set_robust_list:
11938 case TARGET_NR_get_robust_list:
11939 /* The ABI for supporting robust futexes has userspace pass
11940 * the kernel a pointer to a linked list which is updated by
11941 * userspace after the syscall; the list is walked by the kernel
11942 * when the thread exits. Since the linked list in QEMU guest
11943 * memory isn't a valid linked list for the host and we have
11944 * no way to reliably intercept the thread-death event, we can't
11945 * support these. Silently return ENOSYS so that guest userspace
11946 * falls back to a non-robust futex implementation (which should
11947 * be OK except in the corner case of the guest crashing while
11948 * holding a mutex that is shared with another process via
11949 * shared memory).
11951 goto unimplemented_nowarn;
11952 #endif
11954 #if defined(TARGET_NR_utimensat)
11955 case TARGET_NR_utimensat:
11957 struct timespec *tsp, ts[2];
11958 if (!arg3) {
11959 tsp = NULL;
11960 } else {
11961 target_to_host_timespec(ts, arg3);
11962 target_to_host_timespec(ts+1, arg3+sizeof(struct target_timespec));
11963 tsp = ts;
11965 if (!arg2)
11966 ret = get_errno(sys_utimensat(arg1, NULL, tsp, arg4));
11967 else {
11968 if (!(p = lock_user_string(arg2))) {
11969 ret = -TARGET_EFAULT;
11970 goto fail;
11972 ret = get_errno(sys_utimensat(arg1, path(p), tsp, arg4));
11973 unlock_user(p, arg2, 0);
11976 break;
11977 #endif
11978 case TARGET_NR_futex:
11979 ret = do_futex(arg1, arg2, arg3, arg4, arg5, arg6);
11980 break;
11981 #if defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init)
11982 case TARGET_NR_inotify_init:
11983 ret = get_errno(sys_inotify_init());
11984 if (ret >= 0) {
11985 fd_trans_register(ret, &target_inotify_trans);
11987 break;
11988 #endif
11989 #ifdef CONFIG_INOTIFY1
11990 #if defined(TARGET_NR_inotify_init1) && defined(__NR_inotify_init1)
11991 case TARGET_NR_inotify_init1:
11992 ret = get_errno(sys_inotify_init1(target_to_host_bitmask(arg1,
11993 fcntl_flags_tbl)));
11994 if (ret >= 0) {
11995 fd_trans_register(ret, &target_inotify_trans);
11997 break;
11998 #endif
11999 #endif
12000 #if defined(TARGET_NR_inotify_add_watch) && defined(__NR_inotify_add_watch)
12001 case TARGET_NR_inotify_add_watch:
12002 p = lock_user_string(arg2);
12003 ret = get_errno(sys_inotify_add_watch(arg1, path(p), arg3));
12004 unlock_user(p, arg2, 0);
12005 break;
12006 #endif
12007 #if defined(TARGET_NR_inotify_rm_watch) && defined(__NR_inotify_rm_watch)
12008 case TARGET_NR_inotify_rm_watch:
12009 ret = get_errno(sys_inotify_rm_watch(arg1, arg2));
12010 break;
12011 #endif
12013 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
12014 case TARGET_NR_mq_open:
12016 struct mq_attr posix_mq_attr;
12017 struct mq_attr *pposix_mq_attr;
12018 int host_flags;
12020 host_flags = target_to_host_bitmask(arg2, fcntl_flags_tbl);
12021 pposix_mq_attr = NULL;
12022 if (arg4) {
12023 if (copy_from_user_mq_attr(&posix_mq_attr, arg4) != 0) {
12024 goto efault;
12026 pposix_mq_attr = &posix_mq_attr;
12028 p = lock_user_string(arg1 - 1);
12029 if (!p) {
12030 goto efault;
12032 ret = get_errno(mq_open(p, host_flags, arg3, pposix_mq_attr));
12033 unlock_user (p, arg1, 0);
12035 break;
12037 case TARGET_NR_mq_unlink:
12038 p = lock_user_string(arg1 - 1);
12039 if (!p) {
12040 ret = -TARGET_EFAULT;
12041 break;
12043 ret = get_errno(mq_unlink(p));
12044 unlock_user (p, arg1, 0);
12045 break;
12047 case TARGET_NR_mq_timedsend:
12049 struct timespec ts;
12051 p = lock_user (VERIFY_READ, arg2, arg3, 1);
12052 if (arg5 != 0) {
12053 target_to_host_timespec(&ts, arg5);
12054 ret = get_errno(safe_mq_timedsend(arg1, p, arg3, arg4, &ts));
12055 host_to_target_timespec(arg5, &ts);
12056 } else {
12057 ret = get_errno(safe_mq_timedsend(arg1, p, arg3, arg4, NULL));
12059 unlock_user (p, arg2, arg3);
12061 break;
12063 case TARGET_NR_mq_timedreceive:
12065 struct timespec ts;
12066 unsigned int prio;
12068 p = lock_user (VERIFY_READ, arg2, arg3, 1);
12069 if (arg5 != 0) {
12070 target_to_host_timespec(&ts, arg5);
12071 ret = get_errno(safe_mq_timedreceive(arg1, p, arg3,
12072 &prio, &ts));
12073 host_to_target_timespec(arg5, &ts);
12074 } else {
12075 ret = get_errno(safe_mq_timedreceive(arg1, p, arg3,
12076 &prio, NULL));
12078 unlock_user (p, arg2, arg3);
12079 if (arg4 != 0)
12080 put_user_u32(prio, arg4);
12082 break;
12084 /* Not implemented for now... */
12085 /* case TARGET_NR_mq_notify: */
12086 /* break; */
12088 case TARGET_NR_mq_getsetattr:
12090 struct mq_attr posix_mq_attr_in, posix_mq_attr_out;
12091 ret = 0;
12092 if (arg3 != 0) {
12093 ret = mq_getattr(arg1, &posix_mq_attr_out);
12094 copy_to_user_mq_attr(arg3, &posix_mq_attr_out);
12096 if (arg2 != 0) {
12097 copy_from_user_mq_attr(&posix_mq_attr_in, arg2);
12098 ret |= mq_setattr(arg1, &posix_mq_attr_in, &posix_mq_attr_out);
12102 break;
12103 #endif
12105 #ifdef CONFIG_SPLICE
12106 #ifdef TARGET_NR_tee
12107 case TARGET_NR_tee:
12109 ret = get_errno(tee(arg1,arg2,arg3,arg4));
12111 break;
12112 #endif
12113 #ifdef TARGET_NR_splice
12114 case TARGET_NR_splice:
12116 loff_t loff_in, loff_out;
12117 loff_t *ploff_in = NULL, *ploff_out = NULL;
12118 if (arg2) {
12119 if (get_user_u64(loff_in, arg2)) {
12120 goto efault;
12122 ploff_in = &loff_in;
12124 if (arg4) {
12125 if (get_user_u64(loff_out, arg4)) {
12126 goto efault;
12128 ploff_out = &loff_out;
12130 ret = get_errno(splice(arg1, ploff_in, arg3, ploff_out, arg5, arg6));
12131 if (arg2) {
12132 if (put_user_u64(loff_in, arg2)) {
12133 goto efault;
12136 if (arg4) {
12137 if (put_user_u64(loff_out, arg4)) {
12138 goto efault;
12142 break;
12143 #endif
12144 #ifdef TARGET_NR_vmsplice
12145 case TARGET_NR_vmsplice:
12147 struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1);
12148 if (vec != NULL) {
12149 ret = get_errno(vmsplice(arg1, vec, arg3, arg4));
12150 unlock_iovec(vec, arg2, arg3, 0);
12151 } else {
12152 ret = -host_to_target_errno(errno);
12155 break;
12156 #endif
12157 #endif /* CONFIG_SPLICE */
12158 #ifdef CONFIG_EVENTFD
12159 #if defined(TARGET_NR_eventfd)
12160 case TARGET_NR_eventfd:
12161 ret = get_errno(eventfd(arg1, 0));
12162 if (ret >= 0) {
12163 fd_trans_register(ret, &target_eventfd_trans);
12165 break;
12166 #endif
12167 #if defined(TARGET_NR_eventfd2)
12168 case TARGET_NR_eventfd2:
12170 int host_flags = arg2 & (~(TARGET_O_NONBLOCK | TARGET_O_CLOEXEC));
12171 if (arg2 & TARGET_O_NONBLOCK) {
12172 host_flags |= O_NONBLOCK;
12174 if (arg2 & TARGET_O_CLOEXEC) {
12175 host_flags |= O_CLOEXEC;
12177 ret = get_errno(eventfd(arg1, host_flags));
12178 if (ret >= 0) {
12179 fd_trans_register(ret, &target_eventfd_trans);
12181 break;
12183 #endif
12184 #endif /* CONFIG_EVENTFD */
12185 #if defined(CONFIG_FALLOCATE) && defined(TARGET_NR_fallocate)
12186 case TARGET_NR_fallocate:
12187 #if TARGET_ABI_BITS == 32
12188 ret = get_errno(fallocate(arg1, arg2, target_offset64(arg3, arg4),
12189 target_offset64(arg5, arg6)));
12190 #else
12191 ret = get_errno(fallocate(arg1, arg2, arg3, arg4));
12192 #endif
12193 break;
12194 #endif
12195 #if defined(CONFIG_SYNC_FILE_RANGE)
12196 #if defined(TARGET_NR_sync_file_range)
12197 case TARGET_NR_sync_file_range:
12198 #if TARGET_ABI_BITS == 32
12199 #if defined(TARGET_MIPS)
12200 ret = get_errno(sync_file_range(arg1, target_offset64(arg3, arg4),
12201 target_offset64(arg5, arg6), arg7));
12202 #else
12203 ret = get_errno(sync_file_range(arg1, target_offset64(arg2, arg3),
12204 target_offset64(arg4, arg5), arg6));
12205 #endif /* !TARGET_MIPS */
12206 #else
12207 ret = get_errno(sync_file_range(arg1, arg2, arg3, arg4));
12208 #endif
12209 break;
12210 #endif
12211 #if defined(TARGET_NR_sync_file_range2)
12212 case TARGET_NR_sync_file_range2:
12213 /* This is like sync_file_range but the arguments are reordered */
12214 #if TARGET_ABI_BITS == 32
12215 ret = get_errno(sync_file_range(arg1, target_offset64(arg3, arg4),
12216 target_offset64(arg5, arg6), arg2));
12217 #else
12218 ret = get_errno(sync_file_range(arg1, arg3, arg4, arg2));
12219 #endif
12220 break;
12221 #endif
12222 #endif
12223 #if defined(TARGET_NR_signalfd4)
12224 case TARGET_NR_signalfd4:
12225 ret = do_signalfd4(arg1, arg2, arg4);
12226 break;
12227 #endif
12228 #if defined(TARGET_NR_signalfd)
12229 case TARGET_NR_signalfd:
12230 ret = do_signalfd4(arg1, arg2, 0);
12231 break;
12232 #endif
12233 #if defined(CONFIG_EPOLL)
12234 #if defined(TARGET_NR_epoll_create)
12235 case TARGET_NR_epoll_create:
12236 ret = get_errno(epoll_create(arg1));
12237 break;
12238 #endif
12239 #if defined(TARGET_NR_epoll_create1) && defined(CONFIG_EPOLL_CREATE1)
12240 case TARGET_NR_epoll_create1:
12241 ret = get_errno(epoll_create1(arg1));
12242 break;
12243 #endif
12244 #if defined(TARGET_NR_epoll_ctl)
12245 case TARGET_NR_epoll_ctl:
12247 struct epoll_event ep;
12248 struct epoll_event *epp = 0;
12249 if (arg4) {
12250 struct target_epoll_event *target_ep;
12251 if (!lock_user_struct(VERIFY_READ, target_ep, arg4, 1)) {
12252 goto efault;
12254 ep.events = tswap32(target_ep->events);
12255 /* The epoll_data_t union is just opaque data to the kernel,
12256 * so we transfer all 64 bits across and need not worry what
12257 * actual data type it is.
12259 ep.data.u64 = tswap64(target_ep->data.u64);
12260 unlock_user_struct(target_ep, arg4, 0);
12261 epp = &ep;
12263 ret = get_errno(epoll_ctl(arg1, arg2, arg3, epp));
12264 break;
12266 #endif
12268 #if defined(TARGET_NR_epoll_wait) || defined(TARGET_NR_epoll_pwait)
12269 #if defined(TARGET_NR_epoll_wait)
12270 case TARGET_NR_epoll_wait:
12271 #endif
12272 #if defined(TARGET_NR_epoll_pwait)
12273 case TARGET_NR_epoll_pwait:
12274 #endif
12276 struct target_epoll_event *target_ep;
12277 struct epoll_event *ep;
12278 int epfd = arg1;
12279 int maxevents = arg3;
12280 int timeout = arg4;
12282 if (maxevents <= 0 || maxevents > TARGET_EP_MAX_EVENTS) {
12283 ret = -TARGET_EINVAL;
12284 break;
12287 target_ep = lock_user(VERIFY_WRITE, arg2,
12288 maxevents * sizeof(struct target_epoll_event), 1);
12289 if (!target_ep) {
12290 goto efault;
12293 ep = g_try_new(struct epoll_event, maxevents);
12294 if (!ep) {
12295 unlock_user(target_ep, arg2, 0);
12296 ret = -TARGET_ENOMEM;
12297 break;
12300 switch (num) {
12301 #if defined(TARGET_NR_epoll_pwait)
12302 case TARGET_NR_epoll_pwait:
12304 target_sigset_t *target_set;
12305 sigset_t _set, *set = &_set;
12307 if (arg5) {
12308 if (arg6 != sizeof(target_sigset_t)) {
12309 ret = -TARGET_EINVAL;
12310 break;
12313 target_set = lock_user(VERIFY_READ, arg5,
12314 sizeof(target_sigset_t), 1);
12315 if (!target_set) {
12316 ret = -TARGET_EFAULT;
12317 break;
12319 target_to_host_sigset(set, target_set);
12320 unlock_user(target_set, arg5, 0);
12321 } else {
12322 set = NULL;
12325 ret = get_errno(safe_epoll_pwait(epfd, ep, maxevents, timeout,
12326 set, SIGSET_T_SIZE));
12327 break;
12329 #endif
12330 #if defined(TARGET_NR_epoll_wait)
12331 case TARGET_NR_epoll_wait:
12332 ret = get_errno(safe_epoll_pwait(epfd, ep, maxevents, timeout,
12333 NULL, 0));
12334 break;
12335 #endif
12336 default:
12337 ret = -TARGET_ENOSYS;
12339 if (!is_error(ret)) {
12340 int i;
12341 for (i = 0; i < ret; i++) {
12342 target_ep[i].events = tswap32(ep[i].events);
12343 target_ep[i].data.u64 = tswap64(ep[i].data.u64);
12345 unlock_user(target_ep, arg2,
12346 ret * sizeof(struct target_epoll_event));
12347 } else {
12348 unlock_user(target_ep, arg2, 0);
12350 g_free(ep);
12351 break;
12353 #endif
12354 #endif
12355 #ifdef TARGET_NR_prlimit64
12356 case TARGET_NR_prlimit64:
12358 /* args: pid, resource number, ptr to new rlimit, ptr to old rlimit */
12359 struct target_rlimit64 *target_rnew, *target_rold;
12360 struct host_rlimit64 rnew, rold, *rnewp = 0;
12361 int resource = target_to_host_resource(arg2);
12362 if (arg3) {
12363 if (!lock_user_struct(VERIFY_READ, target_rnew, arg3, 1)) {
12364 goto efault;
12366 rnew.rlim_cur = tswap64(target_rnew->rlim_cur);
12367 rnew.rlim_max = tswap64(target_rnew->rlim_max);
12368 unlock_user_struct(target_rnew, arg3, 0);
12369 rnewp = &rnew;
12372 ret = get_errno(sys_prlimit64(arg1, resource, rnewp, arg4 ? &rold : 0));
12373 if (!is_error(ret) && arg4) {
12374 if (!lock_user_struct(VERIFY_WRITE, target_rold, arg4, 1)) {
12375 goto efault;
12377 target_rold->rlim_cur = tswap64(rold.rlim_cur);
12378 target_rold->rlim_max = tswap64(rold.rlim_max);
12379 unlock_user_struct(target_rold, arg4, 1);
12381 break;
12383 #endif
12384 #ifdef TARGET_NR_gethostname
12385 case TARGET_NR_gethostname:
12387 char *name = lock_user(VERIFY_WRITE, arg1, arg2, 0);
12388 if (name) {
12389 ret = get_errno(gethostname(name, arg2));
12390 unlock_user(name, arg1, arg2);
12391 } else {
12392 ret = -TARGET_EFAULT;
12394 break;
12396 #endif
12397 #ifdef TARGET_NR_atomic_cmpxchg_32
12398 case TARGET_NR_atomic_cmpxchg_32:
12400 /* should use start_exclusive from main.c */
12401 abi_ulong mem_value;
12402 if (get_user_u32(mem_value, arg6)) {
12403 target_siginfo_t info;
12404 info.si_signo = SIGSEGV;
12405 info.si_errno = 0;
12406 info.si_code = TARGET_SEGV_MAPERR;
12407 info._sifields._sigfault._addr = arg6;
12408 queue_signal((CPUArchState *)cpu_env, info.si_signo,
12409 QEMU_SI_FAULT, &info);
12410 ret = 0xdeadbeef;
12413 if (mem_value == arg2)
12414 put_user_u32(arg1, arg6);
12415 ret = mem_value;
12416 break;
12418 #endif
12419 #ifdef TARGET_NR_atomic_barrier
12420 case TARGET_NR_atomic_barrier:
12422 /* Like the kernel implementation and the qemu arm barrier, no-op this? */
12423 ret = 0;
12424 break;
12426 #endif
12428 #ifdef TARGET_NR_timer_create
12429 case TARGET_NR_timer_create:
12431 /* args: clockid_t clockid, struct sigevent *sevp, timer_t *timerid */
12433 struct sigevent host_sevp = { {0}, }, *phost_sevp = NULL;
12435 int clkid = arg1;
12436 int timer_index = next_free_host_timer();
12438 if (timer_index < 0) {
12439 ret = -TARGET_EAGAIN;
12440 } else {
12441 timer_t *phtimer = g_posix_timers + timer_index;
12443 if (arg2) {
12444 phost_sevp = &host_sevp;
12445 ret = target_to_host_sigevent(phost_sevp, arg2);
12446 if (ret != 0) {
12447 break;
12451 ret = get_errno(timer_create(clkid, phost_sevp, phtimer));
12452 if (ret) {
12453 phtimer = NULL;
12454 } else {
12455 if (put_user(TIMER_MAGIC | timer_index, arg3, target_timer_t)) {
12456 goto efault;
12460 break;
12462 #endif
12464 #ifdef TARGET_NR_timer_settime
12465 case TARGET_NR_timer_settime:
12467 /* args: timer_t timerid, int flags, const struct itimerspec *new_value,
12468 * struct itimerspec * old_value */
12469 target_timer_t timerid = get_timer_id(arg1);
12471 if (timerid < 0) {
12472 ret = timerid;
12473 } else if (arg3 == 0) {
12474 ret = -TARGET_EINVAL;
12475 } else {
12476 timer_t htimer = g_posix_timers[timerid];
12477 struct itimerspec hspec_new = {{0},}, hspec_old = {{0},};
12479 if (target_to_host_itimerspec(&hspec_new, arg3)) {
12480 goto efault;
12482 ret = get_errno(
12483 timer_settime(htimer, arg2, &hspec_new, &hspec_old));
12484 if (arg4 && host_to_target_itimerspec(arg4, &hspec_old)) {
12485 goto efault;
12488 break;
12490 #endif
12492 #ifdef TARGET_NR_timer_gettime
12493 case TARGET_NR_timer_gettime:
12495 /* args: timer_t timerid, struct itimerspec *curr_value */
12496 target_timer_t timerid = get_timer_id(arg1);
12498 if (timerid < 0) {
12499 ret = timerid;
12500 } else if (!arg2) {
12501 ret = -TARGET_EFAULT;
12502 } else {
12503 timer_t htimer = g_posix_timers[timerid];
12504 struct itimerspec hspec;
12505 ret = get_errno(timer_gettime(htimer, &hspec));
12507 if (host_to_target_itimerspec(arg2, &hspec)) {
12508 ret = -TARGET_EFAULT;
12511 break;
12513 #endif
12515 #ifdef TARGET_NR_timer_getoverrun
12516 case TARGET_NR_timer_getoverrun:
12518 /* args: timer_t timerid */
12519 target_timer_t timerid = get_timer_id(arg1);
12521 if (timerid < 0) {
12522 ret = timerid;
12523 } else {
12524 timer_t htimer = g_posix_timers[timerid];
12525 ret = get_errno(timer_getoverrun(htimer));
12527 fd_trans_unregister(ret);
12528 break;
12530 #endif
12532 #ifdef TARGET_NR_timer_delete
12533 case TARGET_NR_timer_delete:
12535 /* args: timer_t timerid */
12536 target_timer_t timerid = get_timer_id(arg1);
12538 if (timerid < 0) {
12539 ret = timerid;
12540 } else {
12541 timer_t htimer = g_posix_timers[timerid];
12542 ret = get_errno(timer_delete(htimer));
12543 g_posix_timers[timerid] = 0;
12545 break;
12547 #endif
12549 #if defined(TARGET_NR_timerfd_create) && defined(CONFIG_TIMERFD)
12550 case TARGET_NR_timerfd_create:
12551 ret = get_errno(timerfd_create(arg1,
12552 target_to_host_bitmask(arg2, fcntl_flags_tbl)));
12553 break;
12554 #endif
12556 #if defined(TARGET_NR_timerfd_gettime) && defined(CONFIG_TIMERFD)
12557 case TARGET_NR_timerfd_gettime:
12559 struct itimerspec its_curr;
12561 ret = get_errno(timerfd_gettime(arg1, &its_curr));
12563 if (arg2 && host_to_target_itimerspec(arg2, &its_curr)) {
12564 goto efault;
12567 break;
12568 #endif
12570 #if defined(TARGET_NR_timerfd_settime) && defined(CONFIG_TIMERFD)
12571 case TARGET_NR_timerfd_settime:
12573 struct itimerspec its_new, its_old, *p_new;
12575 if (arg3) {
12576 if (target_to_host_itimerspec(&its_new, arg3)) {
12577 goto efault;
12579 p_new = &its_new;
12580 } else {
12581 p_new = NULL;
12584 ret = get_errno(timerfd_settime(arg1, arg2, p_new, &its_old));
12586 if (arg4 && host_to_target_itimerspec(arg4, &its_old)) {
12587 goto efault;
12590 break;
12591 #endif
12593 #if defined(TARGET_NR_ioprio_get) && defined(__NR_ioprio_get)
12594 case TARGET_NR_ioprio_get:
12595 ret = get_errno(ioprio_get(arg1, arg2));
12596 break;
12597 #endif
12599 #if defined(TARGET_NR_ioprio_set) && defined(__NR_ioprio_set)
12600 case TARGET_NR_ioprio_set:
12601 ret = get_errno(ioprio_set(arg1, arg2, arg3));
12602 break;
12603 #endif
12605 #if defined(TARGET_NR_setns) && defined(CONFIG_SETNS)
12606 case TARGET_NR_setns:
12607 ret = get_errno(setns(arg1, arg2));
12608 break;
12609 #endif
12610 #if defined(TARGET_NR_unshare) && defined(CONFIG_SETNS)
12611 case TARGET_NR_unshare:
12612 ret = get_errno(unshare(arg1));
12613 break;
12614 #endif
12615 #if defined(TARGET_NR_kcmp) && defined(__NR_kcmp)
12616 case TARGET_NR_kcmp:
12617 ret = get_errno(kcmp(arg1, arg2, arg3, arg4, arg5));
12618 break;
12619 #endif
12621 default:
12622 unimplemented:
12623 gemu_log("qemu: Unsupported syscall: %d\n", num);
12624 #if defined(TARGET_NR_setxattr) || defined(TARGET_NR_get_thread_area) || defined(TARGET_NR_getdomainname) || defined(TARGET_NR_set_robust_list)
12625 unimplemented_nowarn:
12626 #endif
12627 ret = -TARGET_ENOSYS;
12628 break;
12630 fail:
12631 #ifdef DEBUG
12632 gemu_log(" = " TARGET_ABI_FMT_ld "\n", ret);
12633 #endif
12634 if(do_strace)
12635 print_syscall_ret(num, ret);
12636 trace_guest_user_syscall_ret(cpu, num, ret);
12637 return ret;
12638 efault:
12639 ret = -TARGET_EFAULT;
12640 goto fail;