Fix build of console and GUI executables for Windows
[qemu/ar7.git] / linux-user / syscall.c
blob0d8198efa1d1b51831525284fcab26f8b6aec4f8
1 /*
2 * Linux syscalls
4 * Copyright (c) 2003 Fabrice Bellard
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, see <http://www.gnu.org/licenses/>.
19 #define _ATFILE_SOURCE
20 #include "qemu/osdep.h"
21 #include "qemu/cutils.h"
22 #include "qemu/path.h"
23 #include <elf.h>
24 #include <endian.h>
25 #include <grp.h>
26 #include <sys/ipc.h>
27 #include <sys/msg.h>
28 #include <sys/wait.h>
29 #include <sys/mount.h>
30 #include <sys/file.h>
31 #include <sys/fsuid.h>
32 #include <sys/personality.h>
33 #include <sys/prctl.h>
34 #include <sys/resource.h>
35 #include <sys/swap.h>
36 #include <linux/capability.h>
37 #include <sched.h>
38 #include <sys/timex.h>
39 #ifdef __ia64__
40 int __clone2(int (*fn)(void *), void *child_stack_base,
41 size_t stack_size, int flags, void *arg, ...);
42 #endif
43 #include <sys/socket.h>
44 #include <sys/un.h>
45 #include <sys/uio.h>
46 #include <poll.h>
47 #include <sys/times.h>
48 #include <sys/shm.h>
49 #include <sys/sem.h>
50 #include <sys/statfs.h>
51 #include <time.h>
52 #include <utime.h>
53 #include <sys/sysinfo.h>
54 #include <sys/signalfd.h>
55 //#include <sys/user.h>
56 #include <netinet/ip.h>
57 #include <netinet/tcp.h>
58 #include <linux/wireless.h>
59 #include <linux/icmp.h>
60 #include <linux/icmpv6.h>
61 #include <linux/errqueue.h>
62 #include <linux/random.h>
63 #include "qemu-common.h"
64 #include "qemu/sockets.h"
65 #ifdef CONFIG_TIMERFD
66 #include <sys/timerfd.h>
67 #endif
68 #ifdef TARGET_GPROF
69 #include <sys/gmon.h>
70 #endif
71 #ifdef CONFIG_EVENTFD
72 #include <sys/eventfd.h>
73 #endif
74 #ifdef CONFIG_EPOLL
75 #include <sys/epoll.h>
76 #endif
77 #ifdef CONFIG_ATTR
78 #include "qemu/xattr.h"
79 #endif
80 #ifdef CONFIG_SENDFILE
81 #include <sys/sendfile.h>
82 #endif
84 #define termios host_termios
85 #define winsize host_winsize
86 #define termio host_termio
87 #define sgttyb host_sgttyb /* same as target */
88 #define tchars host_tchars /* same as target */
89 #define ltchars host_ltchars /* same as target */
91 #include <linux/termios.h>
92 #include <linux/unistd.h>
93 #include <linux/cdrom.h>
94 #include <linux/hdreg.h>
95 #include <linux/soundcard.h>
96 #include <linux/kd.h>
97 #include <linux/mtio.h>
98 #include <linux/fs.h>
99 #if defined(CONFIG_FIEMAP)
100 #include <linux/fiemap.h>
101 #endif
102 #include <linux/fb.h>
103 #include <linux/vt.h>
104 #include <linux/dm-ioctl.h>
105 #include <linux/reboot.h>
106 #include <linux/route.h>
107 #include <linux/filter.h>
108 #include <linux/blkpg.h>
109 #include <netpacket/packet.h>
110 #include <linux/netlink.h>
111 #ifdef CONFIG_RTNETLINK
112 #include <linux/rtnetlink.h>
113 #include <linux/if_bridge.h>
114 #endif
115 #include <linux/audit.h>
116 #include "linux_loop.h"
117 #include "uname.h"
119 #include "qemu.h"
121 #ifndef CLONE_IO
122 #define CLONE_IO 0x80000000 /* Clone io context */
123 #endif
125 /* We can't directly call the host clone syscall, because this will
126 * badly confuse libc (breaking mutexes, for example). So we must
127 * divide clone flags into:
128 * * flag combinations that look like pthread_create()
129 * * flag combinations that look like fork()
130 * * flags we can implement within QEMU itself
131 * * flags we can't support and will return an error for
133 /* For thread creation, all these flags must be present; for
134 * fork, none must be present.
136 #define CLONE_THREAD_FLAGS \
137 (CLONE_VM | CLONE_FS | CLONE_FILES | \
138 CLONE_SIGHAND | CLONE_THREAD | CLONE_SYSVSEM)
140 /* These flags are ignored:
141 * CLONE_DETACHED is now ignored by the kernel;
142 * CLONE_IO is just an optimisation hint to the I/O scheduler
144 #define CLONE_IGNORED_FLAGS \
145 (CLONE_DETACHED | CLONE_IO)
147 /* Flags for fork which we can implement within QEMU itself */
148 #define CLONE_OPTIONAL_FORK_FLAGS \
149 (CLONE_SETTLS | CLONE_PARENT_SETTID | \
150 CLONE_CHILD_CLEARTID | CLONE_CHILD_SETTID)
152 /* Flags for thread creation which we can implement within QEMU itself */
153 #define CLONE_OPTIONAL_THREAD_FLAGS \
154 (CLONE_SETTLS | CLONE_PARENT_SETTID | \
155 CLONE_CHILD_CLEARTID | CLONE_CHILD_SETTID | CLONE_PARENT)
157 #define CLONE_INVALID_FORK_FLAGS \
158 (~(CSIGNAL | CLONE_OPTIONAL_FORK_FLAGS | CLONE_IGNORED_FLAGS))
160 #define CLONE_INVALID_THREAD_FLAGS \
161 (~(CSIGNAL | CLONE_THREAD_FLAGS | CLONE_OPTIONAL_THREAD_FLAGS | \
162 CLONE_IGNORED_FLAGS))
164 /* CLONE_VFORK is special cased early in do_fork(). The other flag bits
165 * have almost all been allocated. We cannot support any of
166 * CLONE_NEWNS, CLONE_NEWCGROUP, CLONE_NEWUTS, CLONE_NEWIPC,
167 * CLONE_NEWUSER, CLONE_NEWPID, CLONE_NEWNET, CLONE_PTRACE, CLONE_UNTRACED.
168 * The checks against the invalid thread masks above will catch these.
169 * (The one remaining unallocated bit is 0x1000 which used to be CLONE_PID.)
172 //#define DEBUG
173 /* Define DEBUG_ERESTARTSYS to force every syscall to be restarted
174 * once. This exercises the codepaths for restart.
176 //#define DEBUG_ERESTARTSYS
178 //#include <linux/msdos_fs.h>
179 #define VFAT_IOCTL_READDIR_BOTH _IOR('r', 1, struct linux_dirent [2])
180 #define VFAT_IOCTL_READDIR_SHORT _IOR('r', 2, struct linux_dirent [2])
182 #undef _syscall0
183 #undef _syscall1
184 #undef _syscall2
185 #undef _syscall3
186 #undef _syscall4
187 #undef _syscall5
188 #undef _syscall6
190 #define _syscall0(type,name) \
191 static type name (void) \
193 return syscall(__NR_##name); \
196 #define _syscall1(type,name,type1,arg1) \
197 static type name (type1 arg1) \
199 return syscall(__NR_##name, arg1); \
202 #define _syscall2(type,name,type1,arg1,type2,arg2) \
203 static type name (type1 arg1,type2 arg2) \
205 return syscall(__NR_##name, arg1, arg2); \
208 #define _syscall3(type,name,type1,arg1,type2,arg2,type3,arg3) \
209 static type name (type1 arg1,type2 arg2,type3 arg3) \
211 return syscall(__NR_##name, arg1, arg2, arg3); \
214 #define _syscall4(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4) \
215 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4) \
217 return syscall(__NR_##name, arg1, arg2, arg3, arg4); \
220 #define _syscall5(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4, \
221 type5,arg5) \
222 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5) \
224 return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5); \
228 #define _syscall6(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4, \
229 type5,arg5,type6,arg6) \
230 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5, \
231 type6 arg6) \
233 return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6); \
237 #define __NR_sys_uname __NR_uname
238 #define __NR_sys_getcwd1 __NR_getcwd
239 #define __NR_sys_getdents __NR_getdents
240 #define __NR_sys_getdents64 __NR_getdents64
241 #define __NR_sys_getpriority __NR_getpriority
242 #define __NR_sys_rt_sigqueueinfo __NR_rt_sigqueueinfo
243 #define __NR_sys_rt_tgsigqueueinfo __NR_rt_tgsigqueueinfo
244 #define __NR_sys_syslog __NR_syslog
245 #define __NR_sys_futex __NR_futex
246 #define __NR_sys_inotify_init __NR_inotify_init
247 #define __NR_sys_inotify_add_watch __NR_inotify_add_watch
248 #define __NR_sys_inotify_rm_watch __NR_inotify_rm_watch
250 #if defined(__alpha__) || defined (__ia64__) || defined(__x86_64__) || \
251 defined(__s390x__)
252 #define __NR__llseek __NR_lseek
253 #endif
255 /* Newer kernel ports have llseek() instead of _llseek() */
256 #if defined(TARGET_NR_llseek) && !defined(TARGET_NR__llseek)
257 #define TARGET_NR__llseek TARGET_NR_llseek
258 #endif
260 #ifdef __NR_gettid
261 _syscall0(int, gettid)
262 #else
263 /* This is a replacement for the host gettid() and must return a host
264 errno. */
265 static int gettid(void) {
266 return -ENOSYS;
268 #endif
269 #if defined(TARGET_NR_getdents) && defined(__NR_getdents)
270 _syscall3(int, sys_getdents, uint, fd, struct linux_dirent *, dirp, uint, count);
271 #endif
272 #if !defined(__NR_getdents) || \
273 (defined(TARGET_NR_getdents64) && defined(__NR_getdents64))
274 _syscall3(int, sys_getdents64, uint, fd, struct linux_dirent64 *, dirp, uint, count);
275 #endif
276 #if defined(TARGET_NR__llseek) && defined(__NR_llseek)
277 _syscall5(int, _llseek, uint, fd, ulong, hi, ulong, lo,
278 loff_t *, res, uint, wh);
279 #endif
280 _syscall3(int, sys_rt_sigqueueinfo, pid_t, pid, int, sig, siginfo_t *, uinfo)
281 _syscall4(int, sys_rt_tgsigqueueinfo, pid_t, pid, pid_t, tid, int, sig,
282 siginfo_t *, uinfo)
283 _syscall3(int,sys_syslog,int,type,char*,bufp,int,len)
284 #ifdef __NR_exit_group
285 _syscall1(int,exit_group,int,error_code)
286 #endif
287 #if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address)
288 _syscall1(int,set_tid_address,int *,tidptr)
289 #endif
290 #if defined(TARGET_NR_futex) && defined(__NR_futex)
291 _syscall6(int,sys_futex,int *,uaddr,int,op,int,val,
292 const struct timespec *,timeout,int *,uaddr2,int,val3)
293 #endif
294 #define __NR_sys_sched_getaffinity __NR_sched_getaffinity
295 _syscall3(int, sys_sched_getaffinity, pid_t, pid, unsigned int, len,
296 unsigned long *, user_mask_ptr);
297 #define __NR_sys_sched_setaffinity __NR_sched_setaffinity
298 _syscall3(int, sys_sched_setaffinity, pid_t, pid, unsigned int, len,
299 unsigned long *, user_mask_ptr);
300 _syscall4(int, reboot, int, magic1, int, magic2, unsigned int, cmd,
301 void *, arg);
302 _syscall2(int, capget, struct __user_cap_header_struct *, header,
303 struct __user_cap_data_struct *, data);
304 _syscall2(int, capset, struct __user_cap_header_struct *, header,
305 struct __user_cap_data_struct *, data);
306 #if defined(TARGET_NR_ioprio_get) && defined(__NR_ioprio_get)
307 _syscall2(int, ioprio_get, int, which, int, who)
308 #endif
309 #if defined(TARGET_NR_ioprio_set) && defined(__NR_ioprio_set)
310 _syscall3(int, ioprio_set, int, which, int, who, int, ioprio)
311 #endif
312 #if defined(TARGET_NR_getrandom) && defined(__NR_getrandom)
313 _syscall3(int, getrandom, void *, buf, size_t, buflen, unsigned int, flags)
314 #endif
316 #if defined(TARGET_NR_kcmp) && defined(__NR_kcmp)
317 _syscall5(int, kcmp, pid_t, pid1, pid_t, pid2, int, type,
318 unsigned long, idx1, unsigned long, idx2)
319 #endif
321 static bitmask_transtbl fcntl_flags_tbl[] = {
322 { TARGET_O_ACCMODE, TARGET_O_WRONLY, O_ACCMODE, O_WRONLY, },
323 { TARGET_O_ACCMODE, TARGET_O_RDWR, O_ACCMODE, O_RDWR, },
324 { TARGET_O_CREAT, TARGET_O_CREAT, O_CREAT, O_CREAT, },
325 { TARGET_O_EXCL, TARGET_O_EXCL, O_EXCL, O_EXCL, },
326 { TARGET_O_NOCTTY, TARGET_O_NOCTTY, O_NOCTTY, O_NOCTTY, },
327 { TARGET_O_TRUNC, TARGET_O_TRUNC, O_TRUNC, O_TRUNC, },
328 { TARGET_O_APPEND, TARGET_O_APPEND, O_APPEND, O_APPEND, },
329 { TARGET_O_NONBLOCK, TARGET_O_NONBLOCK, O_NONBLOCK, O_NONBLOCK, },
330 { TARGET_O_SYNC, TARGET_O_DSYNC, O_SYNC, O_DSYNC, },
331 { TARGET_O_SYNC, TARGET_O_SYNC, O_SYNC, O_SYNC, },
332 { TARGET_FASYNC, TARGET_FASYNC, FASYNC, FASYNC, },
333 { TARGET_O_DIRECTORY, TARGET_O_DIRECTORY, O_DIRECTORY, O_DIRECTORY, },
334 { TARGET_O_NOFOLLOW, TARGET_O_NOFOLLOW, O_NOFOLLOW, O_NOFOLLOW, },
335 #if defined(O_DIRECT)
336 { TARGET_O_DIRECT, TARGET_O_DIRECT, O_DIRECT, O_DIRECT, },
337 #endif
338 #if defined(O_NOATIME)
339 { TARGET_O_NOATIME, TARGET_O_NOATIME, O_NOATIME, O_NOATIME },
340 #endif
341 #if defined(O_CLOEXEC)
342 { TARGET_O_CLOEXEC, TARGET_O_CLOEXEC, O_CLOEXEC, O_CLOEXEC },
343 #endif
344 #if defined(O_PATH)
345 { TARGET_O_PATH, TARGET_O_PATH, O_PATH, O_PATH },
346 #endif
347 #if defined(O_TMPFILE)
348 { TARGET_O_TMPFILE, TARGET_O_TMPFILE, O_TMPFILE, O_TMPFILE },
349 #endif
350 /* Don't terminate the list prematurely on 64-bit host+guest. */
351 #if TARGET_O_LARGEFILE != 0 || O_LARGEFILE != 0
352 { TARGET_O_LARGEFILE, TARGET_O_LARGEFILE, O_LARGEFILE, O_LARGEFILE, },
353 #endif
354 { 0, 0, 0, 0 }
357 enum {
358 QEMU_IFLA_BR_UNSPEC,
359 QEMU_IFLA_BR_FORWARD_DELAY,
360 QEMU_IFLA_BR_HELLO_TIME,
361 QEMU_IFLA_BR_MAX_AGE,
362 QEMU_IFLA_BR_AGEING_TIME,
363 QEMU_IFLA_BR_STP_STATE,
364 QEMU_IFLA_BR_PRIORITY,
365 QEMU_IFLA_BR_VLAN_FILTERING,
366 QEMU_IFLA_BR_VLAN_PROTOCOL,
367 QEMU_IFLA_BR_GROUP_FWD_MASK,
368 QEMU_IFLA_BR_ROOT_ID,
369 QEMU_IFLA_BR_BRIDGE_ID,
370 QEMU_IFLA_BR_ROOT_PORT,
371 QEMU_IFLA_BR_ROOT_PATH_COST,
372 QEMU_IFLA_BR_TOPOLOGY_CHANGE,
373 QEMU_IFLA_BR_TOPOLOGY_CHANGE_DETECTED,
374 QEMU_IFLA_BR_HELLO_TIMER,
375 QEMU_IFLA_BR_TCN_TIMER,
376 QEMU_IFLA_BR_TOPOLOGY_CHANGE_TIMER,
377 QEMU_IFLA_BR_GC_TIMER,
378 QEMU_IFLA_BR_GROUP_ADDR,
379 QEMU_IFLA_BR_FDB_FLUSH,
380 QEMU_IFLA_BR_MCAST_ROUTER,
381 QEMU_IFLA_BR_MCAST_SNOOPING,
382 QEMU_IFLA_BR_MCAST_QUERY_USE_IFADDR,
383 QEMU_IFLA_BR_MCAST_QUERIER,
384 QEMU_IFLA_BR_MCAST_HASH_ELASTICITY,
385 QEMU_IFLA_BR_MCAST_HASH_MAX,
386 QEMU_IFLA_BR_MCAST_LAST_MEMBER_CNT,
387 QEMU_IFLA_BR_MCAST_STARTUP_QUERY_CNT,
388 QEMU_IFLA_BR_MCAST_LAST_MEMBER_INTVL,
389 QEMU_IFLA_BR_MCAST_MEMBERSHIP_INTVL,
390 QEMU_IFLA_BR_MCAST_QUERIER_INTVL,
391 QEMU_IFLA_BR_MCAST_QUERY_INTVL,
392 QEMU_IFLA_BR_MCAST_QUERY_RESPONSE_INTVL,
393 QEMU_IFLA_BR_MCAST_STARTUP_QUERY_INTVL,
394 QEMU_IFLA_BR_NF_CALL_IPTABLES,
395 QEMU_IFLA_BR_NF_CALL_IP6TABLES,
396 QEMU_IFLA_BR_NF_CALL_ARPTABLES,
397 QEMU_IFLA_BR_VLAN_DEFAULT_PVID,
398 QEMU_IFLA_BR_PAD,
399 QEMU_IFLA_BR_VLAN_STATS_ENABLED,
400 QEMU_IFLA_BR_MCAST_STATS_ENABLED,
401 QEMU___IFLA_BR_MAX,
404 enum {
405 QEMU_IFLA_UNSPEC,
406 QEMU_IFLA_ADDRESS,
407 QEMU_IFLA_BROADCAST,
408 QEMU_IFLA_IFNAME,
409 QEMU_IFLA_MTU,
410 QEMU_IFLA_LINK,
411 QEMU_IFLA_QDISC,
412 QEMU_IFLA_STATS,
413 QEMU_IFLA_COST,
414 QEMU_IFLA_PRIORITY,
415 QEMU_IFLA_MASTER,
416 QEMU_IFLA_WIRELESS,
417 QEMU_IFLA_PROTINFO,
418 QEMU_IFLA_TXQLEN,
419 QEMU_IFLA_MAP,
420 QEMU_IFLA_WEIGHT,
421 QEMU_IFLA_OPERSTATE,
422 QEMU_IFLA_LINKMODE,
423 QEMU_IFLA_LINKINFO,
424 QEMU_IFLA_NET_NS_PID,
425 QEMU_IFLA_IFALIAS,
426 QEMU_IFLA_NUM_VF,
427 QEMU_IFLA_VFINFO_LIST,
428 QEMU_IFLA_STATS64,
429 QEMU_IFLA_VF_PORTS,
430 QEMU_IFLA_PORT_SELF,
431 QEMU_IFLA_AF_SPEC,
432 QEMU_IFLA_GROUP,
433 QEMU_IFLA_NET_NS_FD,
434 QEMU_IFLA_EXT_MASK,
435 QEMU_IFLA_PROMISCUITY,
436 QEMU_IFLA_NUM_TX_QUEUES,
437 QEMU_IFLA_NUM_RX_QUEUES,
438 QEMU_IFLA_CARRIER,
439 QEMU_IFLA_PHYS_PORT_ID,
440 QEMU_IFLA_CARRIER_CHANGES,
441 QEMU_IFLA_PHYS_SWITCH_ID,
442 QEMU_IFLA_LINK_NETNSID,
443 QEMU_IFLA_PHYS_PORT_NAME,
444 QEMU_IFLA_PROTO_DOWN,
445 QEMU_IFLA_GSO_MAX_SEGS,
446 QEMU_IFLA_GSO_MAX_SIZE,
447 QEMU_IFLA_PAD,
448 QEMU_IFLA_XDP,
449 QEMU___IFLA_MAX
452 enum {
453 QEMU_IFLA_BRPORT_UNSPEC,
454 QEMU_IFLA_BRPORT_STATE,
455 QEMU_IFLA_BRPORT_PRIORITY,
456 QEMU_IFLA_BRPORT_COST,
457 QEMU_IFLA_BRPORT_MODE,
458 QEMU_IFLA_BRPORT_GUARD,
459 QEMU_IFLA_BRPORT_PROTECT,
460 QEMU_IFLA_BRPORT_FAST_LEAVE,
461 QEMU_IFLA_BRPORT_LEARNING,
462 QEMU_IFLA_BRPORT_UNICAST_FLOOD,
463 QEMU_IFLA_BRPORT_PROXYARP,
464 QEMU_IFLA_BRPORT_LEARNING_SYNC,
465 QEMU_IFLA_BRPORT_PROXYARP_WIFI,
466 QEMU_IFLA_BRPORT_ROOT_ID,
467 QEMU_IFLA_BRPORT_BRIDGE_ID,
468 QEMU_IFLA_BRPORT_DESIGNATED_PORT,
469 QEMU_IFLA_BRPORT_DESIGNATED_COST,
470 QEMU_IFLA_BRPORT_ID,
471 QEMU_IFLA_BRPORT_NO,
472 QEMU_IFLA_BRPORT_TOPOLOGY_CHANGE_ACK,
473 QEMU_IFLA_BRPORT_CONFIG_PENDING,
474 QEMU_IFLA_BRPORT_MESSAGE_AGE_TIMER,
475 QEMU_IFLA_BRPORT_FORWARD_DELAY_TIMER,
476 QEMU_IFLA_BRPORT_HOLD_TIMER,
477 QEMU_IFLA_BRPORT_FLUSH,
478 QEMU_IFLA_BRPORT_MULTICAST_ROUTER,
479 QEMU_IFLA_BRPORT_PAD,
480 QEMU___IFLA_BRPORT_MAX
483 enum {
484 QEMU_IFLA_INFO_UNSPEC,
485 QEMU_IFLA_INFO_KIND,
486 QEMU_IFLA_INFO_DATA,
487 QEMU_IFLA_INFO_XSTATS,
488 QEMU_IFLA_INFO_SLAVE_KIND,
489 QEMU_IFLA_INFO_SLAVE_DATA,
490 QEMU___IFLA_INFO_MAX,
493 enum {
494 QEMU_IFLA_INET_UNSPEC,
495 QEMU_IFLA_INET_CONF,
496 QEMU___IFLA_INET_MAX,
499 enum {
500 QEMU_IFLA_INET6_UNSPEC,
501 QEMU_IFLA_INET6_FLAGS,
502 QEMU_IFLA_INET6_CONF,
503 QEMU_IFLA_INET6_STATS,
504 QEMU_IFLA_INET6_MCAST,
505 QEMU_IFLA_INET6_CACHEINFO,
506 QEMU_IFLA_INET6_ICMP6STATS,
507 QEMU_IFLA_INET6_TOKEN,
508 QEMU_IFLA_INET6_ADDR_GEN_MODE,
509 QEMU___IFLA_INET6_MAX
512 typedef abi_long (*TargetFdDataFunc)(void *, size_t);
513 typedef abi_long (*TargetFdAddrFunc)(void *, abi_ulong, socklen_t);
514 typedef struct TargetFdTrans {
515 TargetFdDataFunc host_to_target_data;
516 TargetFdDataFunc target_to_host_data;
517 TargetFdAddrFunc target_to_host_addr;
518 } TargetFdTrans;
520 static TargetFdTrans **target_fd_trans;
522 static unsigned int target_fd_max;
524 static TargetFdDataFunc fd_trans_target_to_host_data(int fd)
526 if (fd >= 0 && fd < target_fd_max && target_fd_trans[fd]) {
527 return target_fd_trans[fd]->target_to_host_data;
529 return NULL;
532 static TargetFdDataFunc fd_trans_host_to_target_data(int fd)
534 if (fd >= 0 && fd < target_fd_max && target_fd_trans[fd]) {
535 return target_fd_trans[fd]->host_to_target_data;
537 return NULL;
540 static TargetFdAddrFunc fd_trans_target_to_host_addr(int fd)
542 if (fd >= 0 && fd < target_fd_max && target_fd_trans[fd]) {
543 return target_fd_trans[fd]->target_to_host_addr;
545 return NULL;
548 static void fd_trans_register(int fd, TargetFdTrans *trans)
550 unsigned int oldmax;
552 if (fd >= target_fd_max) {
553 oldmax = target_fd_max;
554 target_fd_max = ((fd >> 6) + 1) << 6; /* by slice of 64 entries */
555 target_fd_trans = g_renew(TargetFdTrans *,
556 target_fd_trans, target_fd_max);
557 memset((void *)(target_fd_trans + oldmax), 0,
558 (target_fd_max - oldmax) * sizeof(TargetFdTrans *));
560 target_fd_trans[fd] = trans;
563 static void fd_trans_unregister(int fd)
565 if (fd >= 0 && fd < target_fd_max) {
566 target_fd_trans[fd] = NULL;
570 static void fd_trans_dup(int oldfd, int newfd)
572 fd_trans_unregister(newfd);
573 if (oldfd < target_fd_max && target_fd_trans[oldfd]) {
574 fd_trans_register(newfd, target_fd_trans[oldfd]);
578 static int sys_getcwd1(char *buf, size_t size)
580 if (getcwd(buf, size) == NULL) {
581 /* getcwd() sets errno */
582 return (-1);
584 return strlen(buf)+1;
587 #ifdef TARGET_NR_utimensat
588 #if defined(__NR_utimensat)
589 #define __NR_sys_utimensat __NR_utimensat
590 _syscall4(int,sys_utimensat,int,dirfd,const char *,pathname,
591 const struct timespec *,tsp,int,flags)
592 #else
593 static int sys_utimensat(int dirfd, const char *pathname,
594 const struct timespec times[2], int flags)
596 errno = ENOSYS;
597 return -1;
599 #endif
600 #endif /* TARGET_NR_utimensat */
602 #ifdef CONFIG_INOTIFY
603 #include <sys/inotify.h>
605 #if defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init)
606 static int sys_inotify_init(void)
608 return (inotify_init());
610 #endif
611 #if defined(TARGET_NR_inotify_add_watch) && defined(__NR_inotify_add_watch)
612 static int sys_inotify_add_watch(int fd,const char *pathname, int32_t mask)
614 return (inotify_add_watch(fd, pathname, mask));
616 #endif
617 #if defined(TARGET_NR_inotify_rm_watch) && defined(__NR_inotify_rm_watch)
618 static int sys_inotify_rm_watch(int fd, int32_t wd)
620 return (inotify_rm_watch(fd, wd));
622 #endif
623 #ifdef CONFIG_INOTIFY1
624 #if defined(TARGET_NR_inotify_init1) && defined(__NR_inotify_init1)
625 static int sys_inotify_init1(int flags)
627 return (inotify_init1(flags));
629 #endif
630 #endif
631 #else
632 /* Userspace can usually survive runtime without inotify */
633 #undef TARGET_NR_inotify_init
634 #undef TARGET_NR_inotify_init1
635 #undef TARGET_NR_inotify_add_watch
636 #undef TARGET_NR_inotify_rm_watch
637 #endif /* CONFIG_INOTIFY */
639 #if defined(TARGET_NR_prlimit64)
640 #ifndef __NR_prlimit64
641 # define __NR_prlimit64 -1
642 #endif
643 #define __NR_sys_prlimit64 __NR_prlimit64
644 /* The glibc rlimit structure may not be that used by the underlying syscall */
645 struct host_rlimit64 {
646 uint64_t rlim_cur;
647 uint64_t rlim_max;
649 _syscall4(int, sys_prlimit64, pid_t, pid, int, resource,
650 const struct host_rlimit64 *, new_limit,
651 struct host_rlimit64 *, old_limit)
652 #endif
655 #if defined(TARGET_NR_timer_create)
656 /* Maxiumum of 32 active POSIX timers allowed at any one time. */
657 static timer_t g_posix_timers[32] = { 0, } ;
659 static inline int next_free_host_timer(void)
661 int k ;
662 /* FIXME: Does finding the next free slot require a lock? */
663 for (k = 0; k < ARRAY_SIZE(g_posix_timers); k++) {
664 if (g_posix_timers[k] == 0) {
665 g_posix_timers[k] = (timer_t) 1;
666 return k;
669 return -1;
671 #endif
673 /* ARM EABI and MIPS expect 64bit types aligned even on pairs or registers */
674 #ifdef TARGET_ARM
675 static inline int regpairs_aligned(void *cpu_env) {
676 return ((((CPUARMState *)cpu_env)->eabi) == 1) ;
678 #elif defined(TARGET_MIPS) && (TARGET_ABI_BITS == 32)
679 static inline int regpairs_aligned(void *cpu_env) { return 1; }
680 #elif defined(TARGET_PPC) && !defined(TARGET_PPC64)
681 /* SysV AVI for PPC32 expects 64bit parameters to be passed on odd/even pairs
682 * of registers which translates to the same as ARM/MIPS, because we start with
683 * r3 as arg1 */
684 static inline int regpairs_aligned(void *cpu_env) { return 1; }
685 #else
686 static inline int regpairs_aligned(void *cpu_env) { return 0; }
687 #endif
689 #define ERRNO_TABLE_SIZE 1200
691 /* target_to_host_errno_table[] is initialized from
692 * host_to_target_errno_table[] in syscall_init(). */
693 static uint16_t target_to_host_errno_table[ERRNO_TABLE_SIZE] = {
697 * This list is the union of errno values overridden in asm-<arch>/errno.h
698 * minus the errnos that are not actually generic to all archs.
700 static uint16_t host_to_target_errno_table[ERRNO_TABLE_SIZE] = {
701 [EAGAIN] = TARGET_EAGAIN,
702 [EIDRM] = TARGET_EIDRM,
703 [ECHRNG] = TARGET_ECHRNG,
704 [EL2NSYNC] = TARGET_EL2NSYNC,
705 [EL3HLT] = TARGET_EL3HLT,
706 [EL3RST] = TARGET_EL3RST,
707 [ELNRNG] = TARGET_ELNRNG,
708 [EUNATCH] = TARGET_EUNATCH,
709 [ENOCSI] = TARGET_ENOCSI,
710 [EL2HLT] = TARGET_EL2HLT,
711 [EDEADLK] = TARGET_EDEADLK,
712 [ENOLCK] = TARGET_ENOLCK,
713 [EBADE] = TARGET_EBADE,
714 [EBADR] = TARGET_EBADR,
715 [EXFULL] = TARGET_EXFULL,
716 [ENOANO] = TARGET_ENOANO,
717 [EBADRQC] = TARGET_EBADRQC,
718 [EBADSLT] = TARGET_EBADSLT,
719 [EBFONT] = TARGET_EBFONT,
720 [ENOSTR] = TARGET_ENOSTR,
721 [ENODATA] = TARGET_ENODATA,
722 [ETIME] = TARGET_ETIME,
723 [ENOSR] = TARGET_ENOSR,
724 [ENONET] = TARGET_ENONET,
725 [ENOPKG] = TARGET_ENOPKG,
726 [EREMOTE] = TARGET_EREMOTE,
727 [ENOLINK] = TARGET_ENOLINK,
728 [EADV] = TARGET_EADV,
729 [ESRMNT] = TARGET_ESRMNT,
730 [ECOMM] = TARGET_ECOMM,
731 [EPROTO] = TARGET_EPROTO,
732 [EDOTDOT] = TARGET_EDOTDOT,
733 [EMULTIHOP] = TARGET_EMULTIHOP,
734 [EBADMSG] = TARGET_EBADMSG,
735 [ENAMETOOLONG] = TARGET_ENAMETOOLONG,
736 [EOVERFLOW] = TARGET_EOVERFLOW,
737 [ENOTUNIQ] = TARGET_ENOTUNIQ,
738 [EBADFD] = TARGET_EBADFD,
739 [EREMCHG] = TARGET_EREMCHG,
740 [ELIBACC] = TARGET_ELIBACC,
741 [ELIBBAD] = TARGET_ELIBBAD,
742 [ELIBSCN] = TARGET_ELIBSCN,
743 [ELIBMAX] = TARGET_ELIBMAX,
744 [ELIBEXEC] = TARGET_ELIBEXEC,
745 [EILSEQ] = TARGET_EILSEQ,
746 [ENOSYS] = TARGET_ENOSYS,
747 [ELOOP] = TARGET_ELOOP,
748 [ERESTART] = TARGET_ERESTART,
749 [ESTRPIPE] = TARGET_ESTRPIPE,
750 [ENOTEMPTY] = TARGET_ENOTEMPTY,
751 [EUSERS] = TARGET_EUSERS,
752 [ENOTSOCK] = TARGET_ENOTSOCK,
753 [EDESTADDRREQ] = TARGET_EDESTADDRREQ,
754 [EMSGSIZE] = TARGET_EMSGSIZE,
755 [EPROTOTYPE] = TARGET_EPROTOTYPE,
756 [ENOPROTOOPT] = TARGET_ENOPROTOOPT,
757 [EPROTONOSUPPORT] = TARGET_EPROTONOSUPPORT,
758 [ESOCKTNOSUPPORT] = TARGET_ESOCKTNOSUPPORT,
759 [EOPNOTSUPP] = TARGET_EOPNOTSUPP,
760 [EPFNOSUPPORT] = TARGET_EPFNOSUPPORT,
761 [EAFNOSUPPORT] = TARGET_EAFNOSUPPORT,
762 [EADDRINUSE] = TARGET_EADDRINUSE,
763 [EADDRNOTAVAIL] = TARGET_EADDRNOTAVAIL,
764 [ENETDOWN] = TARGET_ENETDOWN,
765 [ENETUNREACH] = TARGET_ENETUNREACH,
766 [ENETRESET] = TARGET_ENETRESET,
767 [ECONNABORTED] = TARGET_ECONNABORTED,
768 [ECONNRESET] = TARGET_ECONNRESET,
769 [ENOBUFS] = TARGET_ENOBUFS,
770 [EISCONN] = TARGET_EISCONN,
771 [ENOTCONN] = TARGET_ENOTCONN,
772 [EUCLEAN] = TARGET_EUCLEAN,
773 [ENOTNAM] = TARGET_ENOTNAM,
774 [ENAVAIL] = TARGET_ENAVAIL,
775 [EISNAM] = TARGET_EISNAM,
776 [EREMOTEIO] = TARGET_EREMOTEIO,
777 [EDQUOT] = TARGET_EDQUOT,
778 [ESHUTDOWN] = TARGET_ESHUTDOWN,
779 [ETOOMANYREFS] = TARGET_ETOOMANYREFS,
780 [ETIMEDOUT] = TARGET_ETIMEDOUT,
781 [ECONNREFUSED] = TARGET_ECONNREFUSED,
782 [EHOSTDOWN] = TARGET_EHOSTDOWN,
783 [EHOSTUNREACH] = TARGET_EHOSTUNREACH,
784 [EALREADY] = TARGET_EALREADY,
785 [EINPROGRESS] = TARGET_EINPROGRESS,
786 [ESTALE] = TARGET_ESTALE,
787 [ECANCELED] = TARGET_ECANCELED,
788 [ENOMEDIUM] = TARGET_ENOMEDIUM,
789 [EMEDIUMTYPE] = TARGET_EMEDIUMTYPE,
790 #ifdef ENOKEY
791 [ENOKEY] = TARGET_ENOKEY,
792 #endif
793 #ifdef EKEYEXPIRED
794 [EKEYEXPIRED] = TARGET_EKEYEXPIRED,
795 #endif
796 #ifdef EKEYREVOKED
797 [EKEYREVOKED] = TARGET_EKEYREVOKED,
798 #endif
799 #ifdef EKEYREJECTED
800 [EKEYREJECTED] = TARGET_EKEYREJECTED,
801 #endif
802 #ifdef EOWNERDEAD
803 [EOWNERDEAD] = TARGET_EOWNERDEAD,
804 #endif
805 #ifdef ENOTRECOVERABLE
806 [ENOTRECOVERABLE] = TARGET_ENOTRECOVERABLE,
807 #endif
808 #ifdef ENOMSG
809 [ENOMSG] = TARGET_ENOMSG,
810 #endif
811 #ifdef ERKFILL
812 [ERFKILL] = TARGET_ERFKILL,
813 #endif
814 #ifdef EHWPOISON
815 [EHWPOISON] = TARGET_EHWPOISON,
816 #endif
819 static inline int host_to_target_errno(int err)
821 if (err >= 0 && err < ERRNO_TABLE_SIZE &&
822 host_to_target_errno_table[err]) {
823 return host_to_target_errno_table[err];
825 return err;
828 static inline int target_to_host_errno(int err)
830 if (err >= 0 && err < ERRNO_TABLE_SIZE &&
831 target_to_host_errno_table[err]) {
832 return target_to_host_errno_table[err];
834 return err;
837 static inline abi_long get_errno(abi_long ret)
839 if (ret == -1)
840 return -host_to_target_errno(errno);
841 else
842 return ret;
845 static inline int is_error(abi_long ret)
847 return (abi_ulong)ret >= (abi_ulong)(-4096);
850 const char *target_strerror(int err)
852 if (err == TARGET_ERESTARTSYS) {
853 return "To be restarted";
855 if (err == TARGET_QEMU_ESIGRETURN) {
856 return "Successful exit from sigreturn";
859 if ((err >= ERRNO_TABLE_SIZE) || (err < 0)) {
860 return NULL;
862 return strerror(target_to_host_errno(err));
865 #define safe_syscall0(type, name) \
866 static type safe_##name(void) \
868 return safe_syscall(__NR_##name); \
871 #define safe_syscall1(type, name, type1, arg1) \
872 static type safe_##name(type1 arg1) \
874 return safe_syscall(__NR_##name, arg1); \
877 #define safe_syscall2(type, name, type1, arg1, type2, arg2) \
878 static type safe_##name(type1 arg1, type2 arg2) \
880 return safe_syscall(__NR_##name, arg1, arg2); \
883 #define safe_syscall3(type, name, type1, arg1, type2, arg2, type3, arg3) \
884 static type safe_##name(type1 arg1, type2 arg2, type3 arg3) \
886 return safe_syscall(__NR_##name, arg1, arg2, arg3); \
889 #define safe_syscall4(type, name, type1, arg1, type2, arg2, type3, arg3, \
890 type4, arg4) \
891 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4) \
893 return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4); \
896 #define safe_syscall5(type, name, type1, arg1, type2, arg2, type3, arg3, \
897 type4, arg4, type5, arg5) \
898 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, \
899 type5 arg5) \
901 return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5); \
904 #define safe_syscall6(type, name, type1, arg1, type2, arg2, type3, arg3, \
905 type4, arg4, type5, arg5, type6, arg6) \
906 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, \
907 type5 arg5, type6 arg6) \
909 return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6); \
912 safe_syscall3(ssize_t, read, int, fd, void *, buff, size_t, count)
913 safe_syscall3(ssize_t, write, int, fd, const void *, buff, size_t, count)
914 safe_syscall4(int, openat, int, dirfd, const char *, pathname, \
915 int, flags, mode_t, mode)
916 safe_syscall4(pid_t, wait4, pid_t, pid, int *, status, int, options, \
917 struct rusage *, rusage)
918 safe_syscall5(int, waitid, idtype_t, idtype, id_t, id, siginfo_t *, infop, \
919 int, options, struct rusage *, rusage)
920 safe_syscall3(int, execve, const char *, filename, char **, argv, char **, envp)
921 safe_syscall6(int, pselect6, int, nfds, fd_set *, readfds, fd_set *, writefds, \
922 fd_set *, exceptfds, struct timespec *, timeout, void *, sig)
923 safe_syscall5(int, ppoll, struct pollfd *, ufds, unsigned int, nfds,
924 struct timespec *, tsp, const sigset_t *, sigmask,
925 size_t, sigsetsize)
926 safe_syscall6(int, epoll_pwait, int, epfd, struct epoll_event *, events,
927 int, maxevents, int, timeout, const sigset_t *, sigmask,
928 size_t, sigsetsize)
929 safe_syscall6(int,futex,int *,uaddr,int,op,int,val, \
930 const struct timespec *,timeout,int *,uaddr2,int,val3)
931 safe_syscall2(int, rt_sigsuspend, sigset_t *, newset, size_t, sigsetsize)
932 safe_syscall2(int, kill, pid_t, pid, int, sig)
933 safe_syscall2(int, tkill, int, tid, int, sig)
934 safe_syscall3(int, tgkill, int, tgid, int, pid, int, sig)
935 safe_syscall3(ssize_t, readv, int, fd, const struct iovec *, iov, int, iovcnt)
936 safe_syscall3(ssize_t, writev, int, fd, const struct iovec *, iov, int, iovcnt)
937 safe_syscall5(ssize_t, preadv, int, fd, const struct iovec *, iov, int, iovcnt,
938 unsigned long, pos_l, unsigned long, pos_h)
939 safe_syscall5(ssize_t, pwritev, int, fd, const struct iovec *, iov, int, iovcnt,
940 unsigned long, pos_l, unsigned long, pos_h)
941 safe_syscall3(int, connect, int, fd, const struct sockaddr *, addr,
942 socklen_t, addrlen)
943 safe_syscall6(ssize_t, sendto, int, fd, const void *, buf, size_t, len,
944 int, flags, const struct sockaddr *, addr, socklen_t, addrlen)
945 safe_syscall6(ssize_t, recvfrom, int, fd, void *, buf, size_t, len,
946 int, flags, struct sockaddr *, addr, socklen_t *, addrlen)
947 safe_syscall3(ssize_t, sendmsg, int, fd, const struct msghdr *, msg, int, flags)
948 safe_syscall3(ssize_t, recvmsg, int, fd, struct msghdr *, msg, int, flags)
949 safe_syscall2(int, flock, int, fd, int, operation)
950 safe_syscall4(int, rt_sigtimedwait, const sigset_t *, these, siginfo_t *, uinfo,
951 const struct timespec *, uts, size_t, sigsetsize)
952 safe_syscall4(int, accept4, int, fd, struct sockaddr *, addr, socklen_t *, len,
953 int, flags)
954 safe_syscall2(int, nanosleep, const struct timespec *, req,
955 struct timespec *, rem)
956 #ifdef TARGET_NR_clock_nanosleep
957 safe_syscall4(int, clock_nanosleep, const clockid_t, clock, int, flags,
958 const struct timespec *, req, struct timespec *, rem)
959 #endif
960 #ifdef __NR_msgsnd
961 safe_syscall4(int, msgsnd, int, msgid, const void *, msgp, size_t, sz,
962 int, flags)
963 safe_syscall5(int, msgrcv, int, msgid, void *, msgp, size_t, sz,
964 long, msgtype, int, flags)
965 safe_syscall4(int, semtimedop, int, semid, struct sembuf *, tsops,
966 unsigned, nsops, const struct timespec *, timeout)
967 #else
968 /* This host kernel architecture uses a single ipc syscall; fake up
969 * wrappers for the sub-operations to hide this implementation detail.
970 * Annoyingly we can't include linux/ipc.h to get the constant definitions
971 * for the call parameter because some structs in there conflict with the
972 * sys/ipc.h ones. So we just define them here, and rely on them being
973 * the same for all host architectures.
975 #define Q_SEMTIMEDOP 4
976 #define Q_MSGSND 11
977 #define Q_MSGRCV 12
978 #define Q_IPCCALL(VERSION, OP) ((VERSION) << 16 | (OP))
980 safe_syscall6(int, ipc, int, call, long, first, long, second, long, third,
981 void *, ptr, long, fifth)
982 static int safe_msgsnd(int msgid, const void *msgp, size_t sz, int flags)
984 return safe_ipc(Q_IPCCALL(0, Q_MSGSND), msgid, sz, flags, (void *)msgp, 0);
986 static int safe_msgrcv(int msgid, void *msgp, size_t sz, long type, int flags)
988 return safe_ipc(Q_IPCCALL(1, Q_MSGRCV), msgid, sz, flags, msgp, type);
990 static int safe_semtimedop(int semid, struct sembuf *tsops, unsigned nsops,
991 const struct timespec *timeout)
993 return safe_ipc(Q_IPCCALL(0, Q_SEMTIMEDOP), semid, nsops, 0, tsops,
994 (long)timeout);
996 #endif
997 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
998 safe_syscall5(int, mq_timedsend, int, mqdes, const char *, msg_ptr,
999 size_t, len, unsigned, prio, const struct timespec *, timeout)
1000 safe_syscall5(int, mq_timedreceive, int, mqdes, char *, msg_ptr,
1001 size_t, len, unsigned *, prio, const struct timespec *, timeout)
1002 #endif
1003 /* We do ioctl like this rather than via safe_syscall3 to preserve the
1004 * "third argument might be integer or pointer or not present" behaviour of
1005 * the libc function.
1007 #define safe_ioctl(...) safe_syscall(__NR_ioctl, __VA_ARGS__)
1008 /* Similarly for fcntl. Note that callers must always:
1009 * pass the F_GETLK64 etc constants rather than the unsuffixed F_GETLK
1010 * use the flock64 struct rather than unsuffixed flock
1011 * This will then work and use a 64-bit offset for both 32-bit and 64-bit hosts.
1013 #ifdef __NR_fcntl64
1014 #define safe_fcntl(...) safe_syscall(__NR_fcntl64, __VA_ARGS__)
1015 #else
1016 #define safe_fcntl(...) safe_syscall(__NR_fcntl, __VA_ARGS__)
1017 #endif
1019 static inline int host_to_target_sock_type(int host_type)
1021 int target_type;
1023 switch (host_type & 0xf /* SOCK_TYPE_MASK */) {
1024 case SOCK_DGRAM:
1025 target_type = TARGET_SOCK_DGRAM;
1026 break;
1027 case SOCK_STREAM:
1028 target_type = TARGET_SOCK_STREAM;
1029 break;
1030 default:
1031 target_type = host_type & 0xf /* SOCK_TYPE_MASK */;
1032 break;
1035 #if defined(SOCK_CLOEXEC)
1036 if (host_type & SOCK_CLOEXEC) {
1037 target_type |= TARGET_SOCK_CLOEXEC;
1039 #endif
1041 #if defined(SOCK_NONBLOCK)
1042 if (host_type & SOCK_NONBLOCK) {
1043 target_type |= TARGET_SOCK_NONBLOCK;
1045 #endif
1047 return target_type;
1050 static abi_ulong target_brk;
1051 static abi_ulong target_original_brk;
1052 static abi_ulong brk_page;
1054 void target_set_brk(abi_ulong new_brk)
1056 target_original_brk = target_brk = HOST_PAGE_ALIGN(new_brk);
1057 brk_page = HOST_PAGE_ALIGN(target_brk);
1060 //#define DEBUGF_BRK(message, args...) do { fprintf(stderr, (message), ## args); } while (0)
1061 #define DEBUGF_BRK(message, args...)
1063 /* do_brk() must return target values and target errnos. */
1064 abi_long do_brk(abi_ulong new_brk)
1066 abi_long mapped_addr;
1067 abi_ulong new_alloc_size;
1069 DEBUGF_BRK("do_brk(" TARGET_ABI_FMT_lx ") -> ", new_brk);
1071 if (!new_brk) {
1072 DEBUGF_BRK(TARGET_ABI_FMT_lx " (!new_brk)\n", target_brk);
1073 return target_brk;
1075 if (new_brk < target_original_brk) {
1076 DEBUGF_BRK(TARGET_ABI_FMT_lx " (new_brk < target_original_brk)\n",
1077 target_brk);
1078 return target_brk;
1081 /* If the new brk is less than the highest page reserved to the
1082 * target heap allocation, set it and we're almost done... */
1083 if (new_brk <= brk_page) {
1084 /* Heap contents are initialized to zero, as for anonymous
1085 * mapped pages. */
1086 if (new_brk > target_brk) {
1087 memset(g2h(target_brk), 0, new_brk - target_brk);
1089 target_brk = new_brk;
1090 DEBUGF_BRK(TARGET_ABI_FMT_lx " (new_brk <= brk_page)\n", target_brk);
1091 return target_brk;
1094 /* We need to allocate more memory after the brk... Note that
1095 * we don't use MAP_FIXED because that will map over the top of
1096 * any existing mapping (like the one with the host libc or qemu
1097 * itself); instead we treat "mapped but at wrong address" as
1098 * a failure and unmap again.
1100 new_alloc_size = HOST_PAGE_ALIGN(new_brk - brk_page);
1101 mapped_addr = get_errno(target_mmap(brk_page, new_alloc_size,
1102 PROT_READ|PROT_WRITE,
1103 MAP_ANON|MAP_PRIVATE, 0, 0));
1105 if (mapped_addr == brk_page) {
1106 /* Heap contents are initialized to zero, as for anonymous
1107 * mapped pages. Technically the new pages are already
1108 * initialized to zero since they *are* anonymous mapped
1109 * pages, however we have to take care with the contents that
1110 * come from the remaining part of the previous page: it may
1111 * contains garbage data due to a previous heap usage (grown
1112 * then shrunken). */
1113 memset(g2h(target_brk), 0, brk_page - target_brk);
1115 target_brk = new_brk;
1116 brk_page = HOST_PAGE_ALIGN(target_brk);
1117 DEBUGF_BRK(TARGET_ABI_FMT_lx " (mapped_addr == brk_page)\n",
1118 target_brk);
1119 return target_brk;
1120 } else if (mapped_addr != -1) {
1121 /* Mapped but at wrong address, meaning there wasn't actually
1122 * enough space for this brk.
1124 target_munmap(mapped_addr, new_alloc_size);
1125 mapped_addr = -1;
1126 DEBUGF_BRK(TARGET_ABI_FMT_lx " (mapped_addr != -1)\n", target_brk);
1128 else {
1129 DEBUGF_BRK(TARGET_ABI_FMT_lx " (otherwise)\n", target_brk);
1132 #if defined(TARGET_ALPHA)
1133 /* We (partially) emulate OSF/1 on Alpha, which requires we
1134 return a proper errno, not an unchanged brk value. */
1135 return -TARGET_ENOMEM;
1136 #endif
1137 /* For everything else, return the previous break. */
1138 return target_brk;
1141 static inline abi_long copy_from_user_fdset(fd_set *fds,
1142 abi_ulong target_fds_addr,
1143 int n)
1145 int i, nw, j, k;
1146 abi_ulong b, *target_fds;
1148 nw = DIV_ROUND_UP(n, TARGET_ABI_BITS);
1149 if (!(target_fds = lock_user(VERIFY_READ,
1150 target_fds_addr,
1151 sizeof(abi_ulong) * nw,
1152 1)))
1153 return -TARGET_EFAULT;
1155 FD_ZERO(fds);
1156 k = 0;
1157 for (i = 0; i < nw; i++) {
1158 /* grab the abi_ulong */
1159 __get_user(b, &target_fds[i]);
1160 for (j = 0; j < TARGET_ABI_BITS; j++) {
1161 /* check the bit inside the abi_ulong */
1162 if ((b >> j) & 1)
1163 FD_SET(k, fds);
1164 k++;
1168 unlock_user(target_fds, target_fds_addr, 0);
1170 return 0;
1173 static inline abi_ulong copy_from_user_fdset_ptr(fd_set *fds, fd_set **fds_ptr,
1174 abi_ulong target_fds_addr,
1175 int n)
1177 if (target_fds_addr) {
1178 if (copy_from_user_fdset(fds, target_fds_addr, n))
1179 return -TARGET_EFAULT;
1180 *fds_ptr = fds;
1181 } else {
1182 *fds_ptr = NULL;
1184 return 0;
1187 static inline abi_long copy_to_user_fdset(abi_ulong target_fds_addr,
1188 const fd_set *fds,
1189 int n)
1191 int i, nw, j, k;
1192 abi_long v;
1193 abi_ulong *target_fds;
1195 nw = DIV_ROUND_UP(n, TARGET_ABI_BITS);
1196 if (!(target_fds = lock_user(VERIFY_WRITE,
1197 target_fds_addr,
1198 sizeof(abi_ulong) * nw,
1199 0)))
1200 return -TARGET_EFAULT;
1202 k = 0;
1203 for (i = 0; i < nw; i++) {
1204 v = 0;
1205 for (j = 0; j < TARGET_ABI_BITS; j++) {
1206 v |= ((abi_ulong)(FD_ISSET(k, fds) != 0) << j);
1207 k++;
1209 __put_user(v, &target_fds[i]);
1212 unlock_user(target_fds, target_fds_addr, sizeof(abi_ulong) * nw);
1214 return 0;
1217 #if defined(__alpha__)
1218 #define HOST_HZ 1024
1219 #else
1220 #define HOST_HZ 100
1221 #endif
1223 static inline abi_long host_to_target_clock_t(long ticks)
1225 #if HOST_HZ == TARGET_HZ
1226 return ticks;
1227 #else
1228 return ((int64_t)ticks * TARGET_HZ) / HOST_HZ;
1229 #endif
1232 static inline abi_long host_to_target_rusage(abi_ulong target_addr,
1233 const struct rusage *rusage)
1235 struct target_rusage *target_rusage;
1237 if (!lock_user_struct(VERIFY_WRITE, target_rusage, target_addr, 0))
1238 return -TARGET_EFAULT;
1239 target_rusage->ru_utime.tv_sec = tswapal(rusage->ru_utime.tv_sec);
1240 target_rusage->ru_utime.tv_usec = tswapal(rusage->ru_utime.tv_usec);
1241 target_rusage->ru_stime.tv_sec = tswapal(rusage->ru_stime.tv_sec);
1242 target_rusage->ru_stime.tv_usec = tswapal(rusage->ru_stime.tv_usec);
1243 target_rusage->ru_maxrss = tswapal(rusage->ru_maxrss);
1244 target_rusage->ru_ixrss = tswapal(rusage->ru_ixrss);
1245 target_rusage->ru_idrss = tswapal(rusage->ru_idrss);
1246 target_rusage->ru_isrss = tswapal(rusage->ru_isrss);
1247 target_rusage->ru_minflt = tswapal(rusage->ru_minflt);
1248 target_rusage->ru_majflt = tswapal(rusage->ru_majflt);
1249 target_rusage->ru_nswap = tswapal(rusage->ru_nswap);
1250 target_rusage->ru_inblock = tswapal(rusage->ru_inblock);
1251 target_rusage->ru_oublock = tswapal(rusage->ru_oublock);
1252 target_rusage->ru_msgsnd = tswapal(rusage->ru_msgsnd);
1253 target_rusage->ru_msgrcv = tswapal(rusage->ru_msgrcv);
1254 target_rusage->ru_nsignals = tswapal(rusage->ru_nsignals);
1255 target_rusage->ru_nvcsw = tswapal(rusage->ru_nvcsw);
1256 target_rusage->ru_nivcsw = tswapal(rusage->ru_nivcsw);
1257 unlock_user_struct(target_rusage, target_addr, 1);
1259 return 0;
1262 static inline rlim_t target_to_host_rlim(abi_ulong target_rlim)
1264 abi_ulong target_rlim_swap;
1265 rlim_t result;
1267 target_rlim_swap = tswapal(target_rlim);
1268 if (target_rlim_swap == TARGET_RLIM_INFINITY)
1269 return RLIM_INFINITY;
1271 result = target_rlim_swap;
1272 if (target_rlim_swap != (rlim_t)result)
1273 return RLIM_INFINITY;
1275 return result;
1278 static inline abi_ulong host_to_target_rlim(rlim_t rlim)
1280 abi_ulong target_rlim_swap;
1281 abi_ulong result;
1283 if (rlim == RLIM_INFINITY || rlim != (abi_long)rlim)
1284 target_rlim_swap = TARGET_RLIM_INFINITY;
1285 else
1286 target_rlim_swap = rlim;
1287 result = tswapal(target_rlim_swap);
1289 return result;
1292 static inline int target_to_host_resource(int code)
1294 switch (code) {
1295 case TARGET_RLIMIT_AS:
1296 return RLIMIT_AS;
1297 case TARGET_RLIMIT_CORE:
1298 return RLIMIT_CORE;
1299 case TARGET_RLIMIT_CPU:
1300 return RLIMIT_CPU;
1301 case TARGET_RLIMIT_DATA:
1302 return RLIMIT_DATA;
1303 case TARGET_RLIMIT_FSIZE:
1304 return RLIMIT_FSIZE;
1305 case TARGET_RLIMIT_LOCKS:
1306 return RLIMIT_LOCKS;
1307 case TARGET_RLIMIT_MEMLOCK:
1308 return RLIMIT_MEMLOCK;
1309 case TARGET_RLIMIT_MSGQUEUE:
1310 return RLIMIT_MSGQUEUE;
1311 case TARGET_RLIMIT_NICE:
1312 return RLIMIT_NICE;
1313 case TARGET_RLIMIT_NOFILE:
1314 return RLIMIT_NOFILE;
1315 case TARGET_RLIMIT_NPROC:
1316 return RLIMIT_NPROC;
1317 case TARGET_RLIMIT_RSS:
1318 return RLIMIT_RSS;
1319 case TARGET_RLIMIT_RTPRIO:
1320 return RLIMIT_RTPRIO;
1321 case TARGET_RLIMIT_SIGPENDING:
1322 return RLIMIT_SIGPENDING;
1323 case TARGET_RLIMIT_STACK:
1324 return RLIMIT_STACK;
1325 default:
1326 return code;
1330 static inline abi_long copy_from_user_timeval(struct timeval *tv,
1331 abi_ulong target_tv_addr)
1333 struct target_timeval *target_tv;
1335 if (!lock_user_struct(VERIFY_READ, target_tv, target_tv_addr, 1))
1336 return -TARGET_EFAULT;
1338 __get_user(tv->tv_sec, &target_tv->tv_sec);
1339 __get_user(tv->tv_usec, &target_tv->tv_usec);
1341 unlock_user_struct(target_tv, target_tv_addr, 0);
1343 return 0;
1346 static inline abi_long copy_to_user_timeval(abi_ulong target_tv_addr,
1347 const struct timeval *tv)
1349 struct target_timeval *target_tv;
1351 if (!lock_user_struct(VERIFY_WRITE, target_tv, target_tv_addr, 0))
1352 return -TARGET_EFAULT;
1354 __put_user(tv->tv_sec, &target_tv->tv_sec);
1355 __put_user(tv->tv_usec, &target_tv->tv_usec);
1357 unlock_user_struct(target_tv, target_tv_addr, 1);
1359 return 0;
1362 static inline abi_long copy_from_user_timezone(struct timezone *tz,
1363 abi_ulong target_tz_addr)
1365 struct target_timezone *target_tz;
1367 if (!lock_user_struct(VERIFY_READ, target_tz, target_tz_addr, 1)) {
1368 return -TARGET_EFAULT;
1371 __get_user(tz->tz_minuteswest, &target_tz->tz_minuteswest);
1372 __get_user(tz->tz_dsttime, &target_tz->tz_dsttime);
1374 unlock_user_struct(target_tz, target_tz_addr, 0);
1376 return 0;
1379 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
1380 #include <mqueue.h>
1382 static inline abi_long copy_from_user_mq_attr(struct mq_attr *attr,
1383 abi_ulong target_mq_attr_addr)
1385 struct target_mq_attr *target_mq_attr;
1387 if (!lock_user_struct(VERIFY_READ, target_mq_attr,
1388 target_mq_attr_addr, 1))
1389 return -TARGET_EFAULT;
1391 __get_user(attr->mq_flags, &target_mq_attr->mq_flags);
1392 __get_user(attr->mq_maxmsg, &target_mq_attr->mq_maxmsg);
1393 __get_user(attr->mq_msgsize, &target_mq_attr->mq_msgsize);
1394 __get_user(attr->mq_curmsgs, &target_mq_attr->mq_curmsgs);
1396 unlock_user_struct(target_mq_attr, target_mq_attr_addr, 0);
1398 return 0;
1401 static inline abi_long copy_to_user_mq_attr(abi_ulong target_mq_attr_addr,
1402 const struct mq_attr *attr)
1404 struct target_mq_attr *target_mq_attr;
1406 if (!lock_user_struct(VERIFY_WRITE, target_mq_attr,
1407 target_mq_attr_addr, 0))
1408 return -TARGET_EFAULT;
1410 __put_user(attr->mq_flags, &target_mq_attr->mq_flags);
1411 __put_user(attr->mq_maxmsg, &target_mq_attr->mq_maxmsg);
1412 __put_user(attr->mq_msgsize, &target_mq_attr->mq_msgsize);
1413 __put_user(attr->mq_curmsgs, &target_mq_attr->mq_curmsgs);
1415 unlock_user_struct(target_mq_attr, target_mq_attr_addr, 1);
1417 return 0;
1419 #endif
1421 #if defined(TARGET_NR_select) || defined(TARGET_NR__newselect)
1422 /* do_select() must return target values and target errnos. */
1423 static abi_long do_select(int n,
1424 abi_ulong rfd_addr, abi_ulong wfd_addr,
1425 abi_ulong efd_addr, abi_ulong target_tv_addr)
1427 fd_set rfds, wfds, efds;
1428 fd_set *rfds_ptr, *wfds_ptr, *efds_ptr;
1429 struct timeval tv;
1430 struct timespec ts, *ts_ptr;
1431 abi_long ret;
1433 ret = copy_from_user_fdset_ptr(&rfds, &rfds_ptr, rfd_addr, n);
1434 if (ret) {
1435 return ret;
1437 ret = copy_from_user_fdset_ptr(&wfds, &wfds_ptr, wfd_addr, n);
1438 if (ret) {
1439 return ret;
1441 ret = copy_from_user_fdset_ptr(&efds, &efds_ptr, efd_addr, n);
1442 if (ret) {
1443 return ret;
1446 if (target_tv_addr) {
1447 if (copy_from_user_timeval(&tv, target_tv_addr))
1448 return -TARGET_EFAULT;
1449 ts.tv_sec = tv.tv_sec;
1450 ts.tv_nsec = tv.tv_usec * 1000;
1451 ts_ptr = &ts;
1452 } else {
1453 ts_ptr = NULL;
1456 ret = get_errno(safe_pselect6(n, rfds_ptr, wfds_ptr, efds_ptr,
1457 ts_ptr, NULL));
1459 if (!is_error(ret)) {
1460 if (rfd_addr && copy_to_user_fdset(rfd_addr, &rfds, n))
1461 return -TARGET_EFAULT;
1462 if (wfd_addr && copy_to_user_fdset(wfd_addr, &wfds, n))
1463 return -TARGET_EFAULT;
1464 if (efd_addr && copy_to_user_fdset(efd_addr, &efds, n))
1465 return -TARGET_EFAULT;
1467 if (target_tv_addr) {
1468 tv.tv_sec = ts.tv_sec;
1469 tv.tv_usec = ts.tv_nsec / 1000;
1470 if (copy_to_user_timeval(target_tv_addr, &tv)) {
1471 return -TARGET_EFAULT;
1476 return ret;
1479 #if defined(TARGET_WANT_OLD_SYS_SELECT)
1480 static abi_long do_old_select(abi_ulong arg1)
1482 struct target_sel_arg_struct *sel;
1483 abi_ulong inp, outp, exp, tvp;
1484 long nsel;
1486 if (!lock_user_struct(VERIFY_READ, sel, arg1, 1)) {
1487 return -TARGET_EFAULT;
1490 nsel = tswapal(sel->n);
1491 inp = tswapal(sel->inp);
1492 outp = tswapal(sel->outp);
1493 exp = tswapal(sel->exp);
1494 tvp = tswapal(sel->tvp);
1496 unlock_user_struct(sel, arg1, 0);
1498 return do_select(nsel, inp, outp, exp, tvp);
1500 #endif
1501 #endif
1503 static abi_long do_pipe2(int host_pipe[], int flags)
1505 #ifdef CONFIG_PIPE2
1506 return pipe2(host_pipe, flags);
1507 #else
1508 return -ENOSYS;
1509 #endif
1512 static abi_long do_pipe(void *cpu_env, abi_ulong pipedes,
1513 int flags, int is_pipe2)
1515 int host_pipe[2];
1516 abi_long ret;
1517 ret = flags ? do_pipe2(host_pipe, flags) : pipe(host_pipe);
1519 if (is_error(ret))
1520 return get_errno(ret);
1522 /* Several targets have special calling conventions for the original
1523 pipe syscall, but didn't replicate this into the pipe2 syscall. */
1524 if (!is_pipe2) {
1525 #if defined(TARGET_ALPHA)
1526 ((CPUAlphaState *)cpu_env)->ir[IR_A4] = host_pipe[1];
1527 return host_pipe[0];
1528 #elif defined(TARGET_MIPS)
1529 ((CPUMIPSState*)cpu_env)->active_tc.gpr[3] = host_pipe[1];
1530 return host_pipe[0];
1531 #elif defined(TARGET_SH4)
1532 ((CPUSH4State*)cpu_env)->gregs[1] = host_pipe[1];
1533 return host_pipe[0];
1534 #elif defined(TARGET_SPARC)
1535 ((CPUSPARCState*)cpu_env)->regwptr[1] = host_pipe[1];
1536 return host_pipe[0];
1537 #endif
1540 if (put_user_s32(host_pipe[0], pipedes)
1541 || put_user_s32(host_pipe[1], pipedes + sizeof(host_pipe[0])))
1542 return -TARGET_EFAULT;
1543 return get_errno(ret);
1546 static inline abi_long target_to_host_ip_mreq(struct ip_mreqn *mreqn,
1547 abi_ulong target_addr,
1548 socklen_t len)
1550 struct target_ip_mreqn *target_smreqn;
1552 target_smreqn = lock_user(VERIFY_READ, target_addr, len, 1);
1553 if (!target_smreqn)
1554 return -TARGET_EFAULT;
1555 mreqn->imr_multiaddr.s_addr = target_smreqn->imr_multiaddr.s_addr;
1556 mreqn->imr_address.s_addr = target_smreqn->imr_address.s_addr;
1557 if (len == sizeof(struct target_ip_mreqn))
1558 mreqn->imr_ifindex = tswapal(target_smreqn->imr_ifindex);
1559 unlock_user(target_smreqn, target_addr, 0);
1561 return 0;
1564 static inline abi_long target_to_host_sockaddr(int fd, struct sockaddr *addr,
1565 abi_ulong target_addr,
1566 socklen_t len)
1568 const socklen_t unix_maxlen = sizeof (struct sockaddr_un);
1569 sa_family_t sa_family;
1570 struct target_sockaddr *target_saddr;
1572 if (fd_trans_target_to_host_addr(fd)) {
1573 return fd_trans_target_to_host_addr(fd)(addr, target_addr, len);
1576 target_saddr = lock_user(VERIFY_READ, target_addr, len, 1);
1577 if (!target_saddr)
1578 return -TARGET_EFAULT;
1580 sa_family = tswap16(target_saddr->sa_family);
1582 /* Oops. The caller might send a incomplete sun_path; sun_path
1583 * must be terminated by \0 (see the manual page), but
1584 * unfortunately it is quite common to specify sockaddr_un
1585 * length as "strlen(x->sun_path)" while it should be
1586 * "strlen(...) + 1". We'll fix that here if needed.
1587 * Linux kernel has a similar feature.
1590 if (sa_family == AF_UNIX) {
1591 if (len < unix_maxlen && len > 0) {
1592 char *cp = (char*)target_saddr;
1594 if ( cp[len-1] && !cp[len] )
1595 len++;
1597 if (len > unix_maxlen)
1598 len = unix_maxlen;
1601 memcpy(addr, target_saddr, len);
1602 addr->sa_family = sa_family;
1603 if (sa_family == AF_NETLINK) {
1604 struct sockaddr_nl *nladdr;
1606 nladdr = (struct sockaddr_nl *)addr;
1607 nladdr->nl_pid = tswap32(nladdr->nl_pid);
1608 nladdr->nl_groups = tswap32(nladdr->nl_groups);
1609 } else if (sa_family == AF_PACKET) {
1610 struct target_sockaddr_ll *lladdr;
1612 lladdr = (struct target_sockaddr_ll *)addr;
1613 lladdr->sll_ifindex = tswap32(lladdr->sll_ifindex);
1614 lladdr->sll_hatype = tswap16(lladdr->sll_hatype);
1616 unlock_user(target_saddr, target_addr, 0);
1618 return 0;
1621 static inline abi_long host_to_target_sockaddr(abi_ulong target_addr,
1622 struct sockaddr *addr,
1623 socklen_t len)
1625 struct target_sockaddr *target_saddr;
1627 if (len == 0) {
1628 return 0;
1630 assert(addr);
1632 target_saddr = lock_user(VERIFY_WRITE, target_addr, len, 0);
1633 if (!target_saddr)
1634 return -TARGET_EFAULT;
1635 memcpy(target_saddr, addr, len);
1636 if (len >= offsetof(struct target_sockaddr, sa_family) +
1637 sizeof(target_saddr->sa_family)) {
1638 target_saddr->sa_family = tswap16(addr->sa_family);
1640 if (addr->sa_family == AF_NETLINK && len >= sizeof(struct sockaddr_nl)) {
1641 struct sockaddr_nl *target_nl = (struct sockaddr_nl *)target_saddr;
1642 target_nl->nl_pid = tswap32(target_nl->nl_pid);
1643 target_nl->nl_groups = tswap32(target_nl->nl_groups);
1644 } else if (addr->sa_family == AF_PACKET) {
1645 struct sockaddr_ll *target_ll = (struct sockaddr_ll *)target_saddr;
1646 target_ll->sll_ifindex = tswap32(target_ll->sll_ifindex);
1647 target_ll->sll_hatype = tswap16(target_ll->sll_hatype);
1648 } else if (addr->sa_family == AF_INET6 &&
1649 len >= sizeof(struct target_sockaddr_in6)) {
1650 struct target_sockaddr_in6 *target_in6 =
1651 (struct target_sockaddr_in6 *)target_saddr;
1652 target_in6->sin6_scope_id = tswap16(target_in6->sin6_scope_id);
1654 unlock_user(target_saddr, target_addr, len);
1656 return 0;
1659 static inline abi_long target_to_host_cmsg(struct msghdr *msgh,
1660 struct target_msghdr *target_msgh)
1662 struct cmsghdr *cmsg = CMSG_FIRSTHDR(msgh);
1663 abi_long msg_controllen;
1664 abi_ulong target_cmsg_addr;
1665 struct target_cmsghdr *target_cmsg, *target_cmsg_start;
1666 socklen_t space = 0;
1668 msg_controllen = tswapal(target_msgh->msg_controllen);
1669 if (msg_controllen < sizeof (struct target_cmsghdr))
1670 goto the_end;
1671 target_cmsg_addr = tswapal(target_msgh->msg_control);
1672 target_cmsg = lock_user(VERIFY_READ, target_cmsg_addr, msg_controllen, 1);
1673 target_cmsg_start = target_cmsg;
1674 if (!target_cmsg)
1675 return -TARGET_EFAULT;
1677 while (cmsg && target_cmsg) {
1678 void *data = CMSG_DATA(cmsg);
1679 void *target_data = TARGET_CMSG_DATA(target_cmsg);
1681 int len = tswapal(target_cmsg->cmsg_len)
1682 - TARGET_CMSG_ALIGN(sizeof (struct target_cmsghdr));
1684 space += CMSG_SPACE(len);
1685 if (space > msgh->msg_controllen) {
1686 space -= CMSG_SPACE(len);
1687 /* This is a QEMU bug, since we allocated the payload
1688 * area ourselves (unlike overflow in host-to-target
1689 * conversion, which is just the guest giving us a buffer
1690 * that's too small). It can't happen for the payload types
1691 * we currently support; if it becomes an issue in future
1692 * we would need to improve our allocation strategy to
1693 * something more intelligent than "twice the size of the
1694 * target buffer we're reading from".
1696 gemu_log("Host cmsg overflow\n");
1697 break;
1700 if (tswap32(target_cmsg->cmsg_level) == TARGET_SOL_SOCKET) {
1701 cmsg->cmsg_level = SOL_SOCKET;
1702 } else {
1703 cmsg->cmsg_level = tswap32(target_cmsg->cmsg_level);
1705 cmsg->cmsg_type = tswap32(target_cmsg->cmsg_type);
1706 cmsg->cmsg_len = CMSG_LEN(len);
1708 if (cmsg->cmsg_level == SOL_SOCKET && cmsg->cmsg_type == SCM_RIGHTS) {
1709 int *fd = (int *)data;
1710 int *target_fd = (int *)target_data;
1711 int i, numfds = len / sizeof(int);
1713 for (i = 0; i < numfds; i++) {
1714 __get_user(fd[i], target_fd + i);
1716 } else if (cmsg->cmsg_level == SOL_SOCKET
1717 && cmsg->cmsg_type == SCM_CREDENTIALS) {
1718 struct ucred *cred = (struct ucred *)data;
1719 struct target_ucred *target_cred =
1720 (struct target_ucred *)target_data;
1722 __get_user(cred->pid, &target_cred->pid);
1723 __get_user(cred->uid, &target_cred->uid);
1724 __get_user(cred->gid, &target_cred->gid);
1725 } else {
1726 gemu_log("Unsupported ancillary data: %d/%d\n",
1727 cmsg->cmsg_level, cmsg->cmsg_type);
1728 memcpy(data, target_data, len);
1731 cmsg = CMSG_NXTHDR(msgh, cmsg);
1732 target_cmsg = TARGET_CMSG_NXTHDR(target_msgh, target_cmsg,
1733 target_cmsg_start);
1735 unlock_user(target_cmsg, target_cmsg_addr, 0);
1736 the_end:
1737 msgh->msg_controllen = space;
1738 return 0;
1741 static inline abi_long host_to_target_cmsg(struct target_msghdr *target_msgh,
1742 struct msghdr *msgh)
1744 struct cmsghdr *cmsg = CMSG_FIRSTHDR(msgh);
1745 abi_long msg_controllen;
1746 abi_ulong target_cmsg_addr;
1747 struct target_cmsghdr *target_cmsg, *target_cmsg_start;
1748 socklen_t space = 0;
1750 msg_controllen = tswapal(target_msgh->msg_controllen);
1751 if (msg_controllen < sizeof (struct target_cmsghdr))
1752 goto the_end;
1753 target_cmsg_addr = tswapal(target_msgh->msg_control);
1754 target_cmsg = lock_user(VERIFY_WRITE, target_cmsg_addr, msg_controllen, 0);
1755 target_cmsg_start = target_cmsg;
1756 if (!target_cmsg)
1757 return -TARGET_EFAULT;
1759 while (cmsg && target_cmsg) {
1760 void *data = CMSG_DATA(cmsg);
1761 void *target_data = TARGET_CMSG_DATA(target_cmsg);
1763 int len = cmsg->cmsg_len - CMSG_ALIGN(sizeof (struct cmsghdr));
1764 int tgt_len, tgt_space;
1766 /* We never copy a half-header but may copy half-data;
1767 * this is Linux's behaviour in put_cmsg(). Note that
1768 * truncation here is a guest problem (which we report
1769 * to the guest via the CTRUNC bit), unlike truncation
1770 * in target_to_host_cmsg, which is a QEMU bug.
1772 if (msg_controllen < sizeof(struct cmsghdr)) {
1773 target_msgh->msg_flags |= tswap32(MSG_CTRUNC);
1774 break;
1777 if (cmsg->cmsg_level == SOL_SOCKET) {
1778 target_cmsg->cmsg_level = tswap32(TARGET_SOL_SOCKET);
1779 } else {
1780 target_cmsg->cmsg_level = tswap32(cmsg->cmsg_level);
1782 target_cmsg->cmsg_type = tswap32(cmsg->cmsg_type);
1784 tgt_len = TARGET_CMSG_LEN(len);
1786 /* Payload types which need a different size of payload on
1787 * the target must adjust tgt_len here.
1789 switch (cmsg->cmsg_level) {
1790 case SOL_SOCKET:
1791 switch (cmsg->cmsg_type) {
1792 case SO_TIMESTAMP:
1793 tgt_len = sizeof(struct target_timeval);
1794 break;
1795 default:
1796 break;
1798 default:
1799 break;
1802 if (msg_controllen < tgt_len) {
1803 target_msgh->msg_flags |= tswap32(MSG_CTRUNC);
1804 tgt_len = msg_controllen;
1807 /* We must now copy-and-convert len bytes of payload
1808 * into tgt_len bytes of destination space. Bear in mind
1809 * that in both source and destination we may be dealing
1810 * with a truncated value!
1812 switch (cmsg->cmsg_level) {
1813 case SOL_SOCKET:
1814 switch (cmsg->cmsg_type) {
1815 case SCM_RIGHTS:
1817 int *fd = (int *)data;
1818 int *target_fd = (int *)target_data;
1819 int i, numfds = tgt_len / sizeof(int);
1821 for (i = 0; i < numfds; i++) {
1822 __put_user(fd[i], target_fd + i);
1824 break;
1826 case SO_TIMESTAMP:
1828 struct timeval *tv = (struct timeval *)data;
1829 struct target_timeval *target_tv =
1830 (struct target_timeval *)target_data;
1832 if (len != sizeof(struct timeval) ||
1833 tgt_len != sizeof(struct target_timeval)) {
1834 goto unimplemented;
1837 /* copy struct timeval to target */
1838 __put_user(tv->tv_sec, &target_tv->tv_sec);
1839 __put_user(tv->tv_usec, &target_tv->tv_usec);
1840 break;
1842 case SCM_CREDENTIALS:
1844 struct ucred *cred = (struct ucred *)data;
1845 struct target_ucred *target_cred =
1846 (struct target_ucred *)target_data;
1848 __put_user(cred->pid, &target_cred->pid);
1849 __put_user(cred->uid, &target_cred->uid);
1850 __put_user(cred->gid, &target_cred->gid);
1851 break;
1853 default:
1854 goto unimplemented;
1856 break;
1858 case SOL_IP:
1859 switch (cmsg->cmsg_type) {
1860 case IP_TTL:
1862 uint32_t *v = (uint32_t *)data;
1863 uint32_t *t_int = (uint32_t *)target_data;
1865 __put_user(*v, t_int);
1866 break;
1868 case IP_RECVERR:
1870 struct errhdr_t {
1871 struct sock_extended_err ee;
1872 struct sockaddr_in offender;
1874 struct errhdr_t *errh = (struct errhdr_t *)data;
1875 struct errhdr_t *target_errh =
1876 (struct errhdr_t *)target_data;
1878 __put_user(errh->ee.ee_errno, &target_errh->ee.ee_errno);
1879 __put_user(errh->ee.ee_origin, &target_errh->ee.ee_origin);
1880 __put_user(errh->ee.ee_type, &target_errh->ee.ee_type);
1881 __put_user(errh->ee.ee_code, &target_errh->ee.ee_code);
1882 __put_user(errh->ee.ee_pad, &target_errh->ee.ee_pad);
1883 __put_user(errh->ee.ee_info, &target_errh->ee.ee_info);
1884 __put_user(errh->ee.ee_data, &target_errh->ee.ee_data);
1885 host_to_target_sockaddr((unsigned long) &target_errh->offender,
1886 (void *) &errh->offender, sizeof(errh->offender));
1887 break;
1889 default:
1890 goto unimplemented;
1892 break;
1894 case SOL_IPV6:
1895 switch (cmsg->cmsg_type) {
1896 case IPV6_HOPLIMIT:
1898 uint32_t *v = (uint32_t *)data;
1899 uint32_t *t_int = (uint32_t *)target_data;
1901 __put_user(*v, t_int);
1902 break;
1904 case IPV6_RECVERR:
1906 struct errhdr6_t {
1907 struct sock_extended_err ee;
1908 struct sockaddr_in6 offender;
1910 struct errhdr6_t *errh = (struct errhdr6_t *)data;
1911 struct errhdr6_t *target_errh =
1912 (struct errhdr6_t *)target_data;
1914 __put_user(errh->ee.ee_errno, &target_errh->ee.ee_errno);
1915 __put_user(errh->ee.ee_origin, &target_errh->ee.ee_origin);
1916 __put_user(errh->ee.ee_type, &target_errh->ee.ee_type);
1917 __put_user(errh->ee.ee_code, &target_errh->ee.ee_code);
1918 __put_user(errh->ee.ee_pad, &target_errh->ee.ee_pad);
1919 __put_user(errh->ee.ee_info, &target_errh->ee.ee_info);
1920 __put_user(errh->ee.ee_data, &target_errh->ee.ee_data);
1921 host_to_target_sockaddr((unsigned long) &target_errh->offender,
1922 (void *) &errh->offender, sizeof(errh->offender));
1923 break;
1925 default:
1926 goto unimplemented;
1928 break;
1930 default:
1931 unimplemented:
1932 gemu_log("Unsupported ancillary data: %d/%d\n",
1933 cmsg->cmsg_level, cmsg->cmsg_type);
1934 memcpy(target_data, data, MIN(len, tgt_len));
1935 if (tgt_len > len) {
1936 memset(target_data + len, 0, tgt_len - len);
1940 target_cmsg->cmsg_len = tswapal(tgt_len);
1941 tgt_space = TARGET_CMSG_SPACE(len);
1942 if (msg_controllen < tgt_space) {
1943 tgt_space = msg_controllen;
1945 msg_controllen -= tgt_space;
1946 space += tgt_space;
1947 cmsg = CMSG_NXTHDR(msgh, cmsg);
1948 target_cmsg = TARGET_CMSG_NXTHDR(target_msgh, target_cmsg,
1949 target_cmsg_start);
1951 unlock_user(target_cmsg, target_cmsg_addr, space);
1952 the_end:
1953 target_msgh->msg_controllen = tswapal(space);
1954 return 0;
1957 static void tswap_nlmsghdr(struct nlmsghdr *nlh)
1959 nlh->nlmsg_len = tswap32(nlh->nlmsg_len);
1960 nlh->nlmsg_type = tswap16(nlh->nlmsg_type);
1961 nlh->nlmsg_flags = tswap16(nlh->nlmsg_flags);
1962 nlh->nlmsg_seq = tswap32(nlh->nlmsg_seq);
1963 nlh->nlmsg_pid = tswap32(nlh->nlmsg_pid);
1966 static abi_long host_to_target_for_each_nlmsg(struct nlmsghdr *nlh,
1967 size_t len,
1968 abi_long (*host_to_target_nlmsg)
1969 (struct nlmsghdr *))
1971 uint32_t nlmsg_len;
1972 abi_long ret;
1974 while (len > sizeof(struct nlmsghdr)) {
1976 nlmsg_len = nlh->nlmsg_len;
1977 if (nlmsg_len < sizeof(struct nlmsghdr) ||
1978 nlmsg_len > len) {
1979 break;
1982 switch (nlh->nlmsg_type) {
1983 case NLMSG_DONE:
1984 tswap_nlmsghdr(nlh);
1985 return 0;
1986 case NLMSG_NOOP:
1987 break;
1988 case NLMSG_ERROR:
1990 struct nlmsgerr *e = NLMSG_DATA(nlh);
1991 e->error = tswap32(e->error);
1992 tswap_nlmsghdr(&e->msg);
1993 tswap_nlmsghdr(nlh);
1994 return 0;
1996 default:
1997 ret = host_to_target_nlmsg(nlh);
1998 if (ret < 0) {
1999 tswap_nlmsghdr(nlh);
2000 return ret;
2002 break;
2004 tswap_nlmsghdr(nlh);
2005 len -= NLMSG_ALIGN(nlmsg_len);
2006 nlh = (struct nlmsghdr *)(((char*)nlh) + NLMSG_ALIGN(nlmsg_len));
2008 return 0;
2011 static abi_long target_to_host_for_each_nlmsg(struct nlmsghdr *nlh,
2012 size_t len,
2013 abi_long (*target_to_host_nlmsg)
2014 (struct nlmsghdr *))
2016 int ret;
2018 while (len > sizeof(struct nlmsghdr)) {
2019 if (tswap32(nlh->nlmsg_len) < sizeof(struct nlmsghdr) ||
2020 tswap32(nlh->nlmsg_len) > len) {
2021 break;
2023 tswap_nlmsghdr(nlh);
2024 switch (nlh->nlmsg_type) {
2025 case NLMSG_DONE:
2026 return 0;
2027 case NLMSG_NOOP:
2028 break;
2029 case NLMSG_ERROR:
2031 struct nlmsgerr *e = NLMSG_DATA(nlh);
2032 e->error = tswap32(e->error);
2033 tswap_nlmsghdr(&e->msg);
2034 return 0;
2036 default:
2037 ret = target_to_host_nlmsg(nlh);
2038 if (ret < 0) {
2039 return ret;
2042 len -= NLMSG_ALIGN(nlh->nlmsg_len);
2043 nlh = (struct nlmsghdr *)(((char *)nlh) + NLMSG_ALIGN(nlh->nlmsg_len));
2045 return 0;
2048 #ifdef CONFIG_RTNETLINK
2049 static abi_long host_to_target_for_each_nlattr(struct nlattr *nlattr,
2050 size_t len, void *context,
2051 abi_long (*host_to_target_nlattr)
2052 (struct nlattr *,
2053 void *context))
2055 unsigned short nla_len;
2056 abi_long ret;
2058 while (len > sizeof(struct nlattr)) {
2059 nla_len = nlattr->nla_len;
2060 if (nla_len < sizeof(struct nlattr) ||
2061 nla_len > len) {
2062 break;
2064 ret = host_to_target_nlattr(nlattr, context);
2065 nlattr->nla_len = tswap16(nlattr->nla_len);
2066 nlattr->nla_type = tswap16(nlattr->nla_type);
2067 if (ret < 0) {
2068 return ret;
2070 len -= NLA_ALIGN(nla_len);
2071 nlattr = (struct nlattr *)(((char *)nlattr) + NLA_ALIGN(nla_len));
2073 return 0;
2076 static abi_long host_to_target_for_each_rtattr(struct rtattr *rtattr,
2077 size_t len,
2078 abi_long (*host_to_target_rtattr)
2079 (struct rtattr *))
2081 unsigned short rta_len;
2082 abi_long ret;
2084 while (len > sizeof(struct rtattr)) {
2085 rta_len = rtattr->rta_len;
2086 if (rta_len < sizeof(struct rtattr) ||
2087 rta_len > len) {
2088 break;
2090 ret = host_to_target_rtattr(rtattr);
2091 rtattr->rta_len = tswap16(rtattr->rta_len);
2092 rtattr->rta_type = tswap16(rtattr->rta_type);
2093 if (ret < 0) {
2094 return ret;
2096 len -= RTA_ALIGN(rta_len);
2097 rtattr = (struct rtattr *)(((char *)rtattr) + RTA_ALIGN(rta_len));
2099 return 0;
2102 #define NLA_DATA(nla) ((void *)((char *)(nla)) + NLA_HDRLEN)
2104 static abi_long host_to_target_data_bridge_nlattr(struct nlattr *nlattr,
2105 void *context)
2107 uint16_t *u16;
2108 uint32_t *u32;
2109 uint64_t *u64;
2111 switch (nlattr->nla_type) {
2112 /* no data */
2113 case QEMU_IFLA_BR_FDB_FLUSH:
2114 break;
2115 /* binary */
2116 case QEMU_IFLA_BR_GROUP_ADDR:
2117 break;
2118 /* uint8_t */
2119 case QEMU_IFLA_BR_VLAN_FILTERING:
2120 case QEMU_IFLA_BR_TOPOLOGY_CHANGE:
2121 case QEMU_IFLA_BR_TOPOLOGY_CHANGE_DETECTED:
2122 case QEMU_IFLA_BR_MCAST_ROUTER:
2123 case QEMU_IFLA_BR_MCAST_SNOOPING:
2124 case QEMU_IFLA_BR_MCAST_QUERY_USE_IFADDR:
2125 case QEMU_IFLA_BR_MCAST_QUERIER:
2126 case QEMU_IFLA_BR_NF_CALL_IPTABLES:
2127 case QEMU_IFLA_BR_NF_CALL_IP6TABLES:
2128 case QEMU_IFLA_BR_NF_CALL_ARPTABLES:
2129 break;
2130 /* uint16_t */
2131 case QEMU_IFLA_BR_PRIORITY:
2132 case QEMU_IFLA_BR_VLAN_PROTOCOL:
2133 case QEMU_IFLA_BR_GROUP_FWD_MASK:
2134 case QEMU_IFLA_BR_ROOT_PORT:
2135 case QEMU_IFLA_BR_VLAN_DEFAULT_PVID:
2136 u16 = NLA_DATA(nlattr);
2137 *u16 = tswap16(*u16);
2138 break;
2139 /* uint32_t */
2140 case QEMU_IFLA_BR_FORWARD_DELAY:
2141 case QEMU_IFLA_BR_HELLO_TIME:
2142 case QEMU_IFLA_BR_MAX_AGE:
2143 case QEMU_IFLA_BR_AGEING_TIME:
2144 case QEMU_IFLA_BR_STP_STATE:
2145 case QEMU_IFLA_BR_ROOT_PATH_COST:
2146 case QEMU_IFLA_BR_MCAST_HASH_ELASTICITY:
2147 case QEMU_IFLA_BR_MCAST_HASH_MAX:
2148 case QEMU_IFLA_BR_MCAST_LAST_MEMBER_CNT:
2149 case QEMU_IFLA_BR_MCAST_STARTUP_QUERY_CNT:
2150 u32 = NLA_DATA(nlattr);
2151 *u32 = tswap32(*u32);
2152 break;
2153 /* uint64_t */
2154 case QEMU_IFLA_BR_HELLO_TIMER:
2155 case QEMU_IFLA_BR_TCN_TIMER:
2156 case QEMU_IFLA_BR_GC_TIMER:
2157 case QEMU_IFLA_BR_TOPOLOGY_CHANGE_TIMER:
2158 case QEMU_IFLA_BR_MCAST_LAST_MEMBER_INTVL:
2159 case QEMU_IFLA_BR_MCAST_MEMBERSHIP_INTVL:
2160 case QEMU_IFLA_BR_MCAST_QUERIER_INTVL:
2161 case QEMU_IFLA_BR_MCAST_QUERY_INTVL:
2162 case QEMU_IFLA_BR_MCAST_QUERY_RESPONSE_INTVL:
2163 case QEMU_IFLA_BR_MCAST_STARTUP_QUERY_INTVL:
2164 u64 = NLA_DATA(nlattr);
2165 *u64 = tswap64(*u64);
2166 break;
2167 /* ifla_bridge_id: uin8_t[] */
2168 case QEMU_IFLA_BR_ROOT_ID:
2169 case QEMU_IFLA_BR_BRIDGE_ID:
2170 break;
2171 default:
2172 gemu_log("Unknown QEMU_IFLA_BR type %d\n", nlattr->nla_type);
2173 break;
2175 return 0;
2178 static abi_long host_to_target_slave_data_bridge_nlattr(struct nlattr *nlattr,
2179 void *context)
2181 uint16_t *u16;
2182 uint32_t *u32;
2183 uint64_t *u64;
2185 switch (nlattr->nla_type) {
2186 /* uint8_t */
2187 case QEMU_IFLA_BRPORT_STATE:
2188 case QEMU_IFLA_BRPORT_MODE:
2189 case QEMU_IFLA_BRPORT_GUARD:
2190 case QEMU_IFLA_BRPORT_PROTECT:
2191 case QEMU_IFLA_BRPORT_FAST_LEAVE:
2192 case QEMU_IFLA_BRPORT_LEARNING:
2193 case QEMU_IFLA_BRPORT_UNICAST_FLOOD:
2194 case QEMU_IFLA_BRPORT_PROXYARP:
2195 case QEMU_IFLA_BRPORT_LEARNING_SYNC:
2196 case QEMU_IFLA_BRPORT_PROXYARP_WIFI:
2197 case QEMU_IFLA_BRPORT_TOPOLOGY_CHANGE_ACK:
2198 case QEMU_IFLA_BRPORT_CONFIG_PENDING:
2199 case QEMU_IFLA_BRPORT_MULTICAST_ROUTER:
2200 break;
2201 /* uint16_t */
2202 case QEMU_IFLA_BRPORT_PRIORITY:
2203 case QEMU_IFLA_BRPORT_DESIGNATED_PORT:
2204 case QEMU_IFLA_BRPORT_DESIGNATED_COST:
2205 case QEMU_IFLA_BRPORT_ID:
2206 case QEMU_IFLA_BRPORT_NO:
2207 u16 = NLA_DATA(nlattr);
2208 *u16 = tswap16(*u16);
2209 break;
2210 /* uin32_t */
2211 case QEMU_IFLA_BRPORT_COST:
2212 u32 = NLA_DATA(nlattr);
2213 *u32 = tswap32(*u32);
2214 break;
2215 /* uint64_t */
2216 case QEMU_IFLA_BRPORT_MESSAGE_AGE_TIMER:
2217 case QEMU_IFLA_BRPORT_FORWARD_DELAY_TIMER:
2218 case QEMU_IFLA_BRPORT_HOLD_TIMER:
2219 u64 = NLA_DATA(nlattr);
2220 *u64 = tswap64(*u64);
2221 break;
2222 /* ifla_bridge_id: uint8_t[] */
2223 case QEMU_IFLA_BRPORT_ROOT_ID:
2224 case QEMU_IFLA_BRPORT_BRIDGE_ID:
2225 break;
2226 default:
2227 gemu_log("Unknown QEMU_IFLA_BRPORT type %d\n", nlattr->nla_type);
2228 break;
2230 return 0;
2233 struct linkinfo_context {
2234 int len;
2235 char *name;
2236 int slave_len;
2237 char *slave_name;
2240 static abi_long host_to_target_data_linkinfo_nlattr(struct nlattr *nlattr,
2241 void *context)
2243 struct linkinfo_context *li_context = context;
2245 switch (nlattr->nla_type) {
2246 /* string */
2247 case QEMU_IFLA_INFO_KIND:
2248 li_context->name = NLA_DATA(nlattr);
2249 li_context->len = nlattr->nla_len - NLA_HDRLEN;
2250 break;
2251 case QEMU_IFLA_INFO_SLAVE_KIND:
2252 li_context->slave_name = NLA_DATA(nlattr);
2253 li_context->slave_len = nlattr->nla_len - NLA_HDRLEN;
2254 break;
2255 /* stats */
2256 case QEMU_IFLA_INFO_XSTATS:
2257 /* FIXME: only used by CAN */
2258 break;
2259 /* nested */
2260 case QEMU_IFLA_INFO_DATA:
2261 if (strncmp(li_context->name, "bridge",
2262 li_context->len) == 0) {
2263 return host_to_target_for_each_nlattr(NLA_DATA(nlattr),
2264 nlattr->nla_len,
2265 NULL,
2266 host_to_target_data_bridge_nlattr);
2267 } else {
2268 gemu_log("Unknown QEMU_IFLA_INFO_KIND %s\n", li_context->name);
2270 break;
2271 case QEMU_IFLA_INFO_SLAVE_DATA:
2272 if (strncmp(li_context->slave_name, "bridge",
2273 li_context->slave_len) == 0) {
2274 return host_to_target_for_each_nlattr(NLA_DATA(nlattr),
2275 nlattr->nla_len,
2276 NULL,
2277 host_to_target_slave_data_bridge_nlattr);
2278 } else {
2279 gemu_log("Unknown QEMU_IFLA_INFO_SLAVE_KIND %s\n",
2280 li_context->slave_name);
2282 break;
2283 default:
2284 gemu_log("Unknown host QEMU_IFLA_INFO type: %d\n", nlattr->nla_type);
2285 break;
2288 return 0;
2291 static abi_long host_to_target_data_inet_nlattr(struct nlattr *nlattr,
2292 void *context)
2294 uint32_t *u32;
2295 int i;
2297 switch (nlattr->nla_type) {
2298 case QEMU_IFLA_INET_CONF:
2299 u32 = NLA_DATA(nlattr);
2300 for (i = 0; i < (nlattr->nla_len - NLA_HDRLEN) / sizeof(*u32);
2301 i++) {
2302 u32[i] = tswap32(u32[i]);
2304 break;
2305 default:
2306 gemu_log("Unknown host AF_INET type: %d\n", nlattr->nla_type);
2308 return 0;
2311 static abi_long host_to_target_data_inet6_nlattr(struct nlattr *nlattr,
2312 void *context)
2314 uint32_t *u32;
2315 uint64_t *u64;
2316 struct ifla_cacheinfo *ci;
2317 int i;
2319 switch (nlattr->nla_type) {
2320 /* binaries */
2321 case QEMU_IFLA_INET6_TOKEN:
2322 break;
2323 /* uint8_t */
2324 case QEMU_IFLA_INET6_ADDR_GEN_MODE:
2325 break;
2326 /* uint32_t */
2327 case QEMU_IFLA_INET6_FLAGS:
2328 u32 = NLA_DATA(nlattr);
2329 *u32 = tswap32(*u32);
2330 break;
2331 /* uint32_t[] */
2332 case QEMU_IFLA_INET6_CONF:
2333 u32 = NLA_DATA(nlattr);
2334 for (i = 0; i < (nlattr->nla_len - NLA_HDRLEN) / sizeof(*u32);
2335 i++) {
2336 u32[i] = tswap32(u32[i]);
2338 break;
2339 /* ifla_cacheinfo */
2340 case QEMU_IFLA_INET6_CACHEINFO:
2341 ci = NLA_DATA(nlattr);
2342 ci->max_reasm_len = tswap32(ci->max_reasm_len);
2343 ci->tstamp = tswap32(ci->tstamp);
2344 ci->reachable_time = tswap32(ci->reachable_time);
2345 ci->retrans_time = tswap32(ci->retrans_time);
2346 break;
2347 /* uint64_t[] */
2348 case QEMU_IFLA_INET6_STATS:
2349 case QEMU_IFLA_INET6_ICMP6STATS:
2350 u64 = NLA_DATA(nlattr);
2351 for (i = 0; i < (nlattr->nla_len - NLA_HDRLEN) / sizeof(*u64);
2352 i++) {
2353 u64[i] = tswap64(u64[i]);
2355 break;
2356 default:
2357 gemu_log("Unknown host AF_INET6 type: %d\n", nlattr->nla_type);
2359 return 0;
2362 static abi_long host_to_target_data_spec_nlattr(struct nlattr *nlattr,
2363 void *context)
2365 switch (nlattr->nla_type) {
2366 case AF_INET:
2367 return host_to_target_for_each_nlattr(NLA_DATA(nlattr), nlattr->nla_len,
2368 NULL,
2369 host_to_target_data_inet_nlattr);
2370 case AF_INET6:
2371 return host_to_target_for_each_nlattr(NLA_DATA(nlattr), nlattr->nla_len,
2372 NULL,
2373 host_to_target_data_inet6_nlattr);
2374 default:
2375 gemu_log("Unknown host AF_SPEC type: %d\n", nlattr->nla_type);
2376 break;
2378 return 0;
2381 static abi_long host_to_target_data_link_rtattr(struct rtattr *rtattr)
2383 uint32_t *u32;
2384 struct rtnl_link_stats *st;
2385 struct rtnl_link_stats64 *st64;
2386 struct rtnl_link_ifmap *map;
2387 struct linkinfo_context li_context;
2389 switch (rtattr->rta_type) {
2390 /* binary stream */
2391 case QEMU_IFLA_ADDRESS:
2392 case QEMU_IFLA_BROADCAST:
2393 /* string */
2394 case QEMU_IFLA_IFNAME:
2395 case QEMU_IFLA_QDISC:
2396 break;
2397 /* uin8_t */
2398 case QEMU_IFLA_OPERSTATE:
2399 case QEMU_IFLA_LINKMODE:
2400 case QEMU_IFLA_CARRIER:
2401 case QEMU_IFLA_PROTO_DOWN:
2402 break;
2403 /* uint32_t */
2404 case QEMU_IFLA_MTU:
2405 case QEMU_IFLA_LINK:
2406 case QEMU_IFLA_WEIGHT:
2407 case QEMU_IFLA_TXQLEN:
2408 case QEMU_IFLA_CARRIER_CHANGES:
2409 case QEMU_IFLA_NUM_RX_QUEUES:
2410 case QEMU_IFLA_NUM_TX_QUEUES:
2411 case QEMU_IFLA_PROMISCUITY:
2412 case QEMU_IFLA_EXT_MASK:
2413 case QEMU_IFLA_LINK_NETNSID:
2414 case QEMU_IFLA_GROUP:
2415 case QEMU_IFLA_MASTER:
2416 case QEMU_IFLA_NUM_VF:
2417 case QEMU_IFLA_GSO_MAX_SEGS:
2418 case QEMU_IFLA_GSO_MAX_SIZE:
2419 u32 = RTA_DATA(rtattr);
2420 *u32 = tswap32(*u32);
2421 break;
2422 /* struct rtnl_link_stats */
2423 case QEMU_IFLA_STATS:
2424 st = RTA_DATA(rtattr);
2425 st->rx_packets = tswap32(st->rx_packets);
2426 st->tx_packets = tswap32(st->tx_packets);
2427 st->rx_bytes = tswap32(st->rx_bytes);
2428 st->tx_bytes = tswap32(st->tx_bytes);
2429 st->rx_errors = tswap32(st->rx_errors);
2430 st->tx_errors = tswap32(st->tx_errors);
2431 st->rx_dropped = tswap32(st->rx_dropped);
2432 st->tx_dropped = tswap32(st->tx_dropped);
2433 st->multicast = tswap32(st->multicast);
2434 st->collisions = tswap32(st->collisions);
2436 /* detailed rx_errors: */
2437 st->rx_length_errors = tswap32(st->rx_length_errors);
2438 st->rx_over_errors = tswap32(st->rx_over_errors);
2439 st->rx_crc_errors = tswap32(st->rx_crc_errors);
2440 st->rx_frame_errors = tswap32(st->rx_frame_errors);
2441 st->rx_fifo_errors = tswap32(st->rx_fifo_errors);
2442 st->rx_missed_errors = tswap32(st->rx_missed_errors);
2444 /* detailed tx_errors */
2445 st->tx_aborted_errors = tswap32(st->tx_aborted_errors);
2446 st->tx_carrier_errors = tswap32(st->tx_carrier_errors);
2447 st->tx_fifo_errors = tswap32(st->tx_fifo_errors);
2448 st->tx_heartbeat_errors = tswap32(st->tx_heartbeat_errors);
2449 st->tx_window_errors = tswap32(st->tx_window_errors);
2451 /* for cslip etc */
2452 st->rx_compressed = tswap32(st->rx_compressed);
2453 st->tx_compressed = tswap32(st->tx_compressed);
2454 break;
2455 /* struct rtnl_link_stats64 */
2456 case QEMU_IFLA_STATS64:
2457 st64 = RTA_DATA(rtattr);
2458 st64->rx_packets = tswap64(st64->rx_packets);
2459 st64->tx_packets = tswap64(st64->tx_packets);
2460 st64->rx_bytes = tswap64(st64->rx_bytes);
2461 st64->tx_bytes = tswap64(st64->tx_bytes);
2462 st64->rx_errors = tswap64(st64->rx_errors);
2463 st64->tx_errors = tswap64(st64->tx_errors);
2464 st64->rx_dropped = tswap64(st64->rx_dropped);
2465 st64->tx_dropped = tswap64(st64->tx_dropped);
2466 st64->multicast = tswap64(st64->multicast);
2467 st64->collisions = tswap64(st64->collisions);
2469 /* detailed rx_errors: */
2470 st64->rx_length_errors = tswap64(st64->rx_length_errors);
2471 st64->rx_over_errors = tswap64(st64->rx_over_errors);
2472 st64->rx_crc_errors = tswap64(st64->rx_crc_errors);
2473 st64->rx_frame_errors = tswap64(st64->rx_frame_errors);
2474 st64->rx_fifo_errors = tswap64(st64->rx_fifo_errors);
2475 st64->rx_missed_errors = tswap64(st64->rx_missed_errors);
2477 /* detailed tx_errors */
2478 st64->tx_aborted_errors = tswap64(st64->tx_aborted_errors);
2479 st64->tx_carrier_errors = tswap64(st64->tx_carrier_errors);
2480 st64->tx_fifo_errors = tswap64(st64->tx_fifo_errors);
2481 st64->tx_heartbeat_errors = tswap64(st64->tx_heartbeat_errors);
2482 st64->tx_window_errors = tswap64(st64->tx_window_errors);
2484 /* for cslip etc */
2485 st64->rx_compressed = tswap64(st64->rx_compressed);
2486 st64->tx_compressed = tswap64(st64->tx_compressed);
2487 break;
2488 /* struct rtnl_link_ifmap */
2489 case QEMU_IFLA_MAP:
2490 map = RTA_DATA(rtattr);
2491 map->mem_start = tswap64(map->mem_start);
2492 map->mem_end = tswap64(map->mem_end);
2493 map->base_addr = tswap64(map->base_addr);
2494 map->irq = tswap16(map->irq);
2495 break;
2496 /* nested */
2497 case QEMU_IFLA_LINKINFO:
2498 memset(&li_context, 0, sizeof(li_context));
2499 return host_to_target_for_each_nlattr(RTA_DATA(rtattr), rtattr->rta_len,
2500 &li_context,
2501 host_to_target_data_linkinfo_nlattr);
2502 case QEMU_IFLA_AF_SPEC:
2503 return host_to_target_for_each_nlattr(RTA_DATA(rtattr), rtattr->rta_len,
2504 NULL,
2505 host_to_target_data_spec_nlattr);
2506 default:
2507 gemu_log("Unknown host QEMU_IFLA type: %d\n", rtattr->rta_type);
2508 break;
2510 return 0;
2513 static abi_long host_to_target_data_addr_rtattr(struct rtattr *rtattr)
2515 uint32_t *u32;
2516 struct ifa_cacheinfo *ci;
2518 switch (rtattr->rta_type) {
2519 /* binary: depends on family type */
2520 case IFA_ADDRESS:
2521 case IFA_LOCAL:
2522 break;
2523 /* string */
2524 case IFA_LABEL:
2525 break;
2526 /* u32 */
2527 case IFA_FLAGS:
2528 case IFA_BROADCAST:
2529 u32 = RTA_DATA(rtattr);
2530 *u32 = tswap32(*u32);
2531 break;
2532 /* struct ifa_cacheinfo */
2533 case IFA_CACHEINFO:
2534 ci = RTA_DATA(rtattr);
2535 ci->ifa_prefered = tswap32(ci->ifa_prefered);
2536 ci->ifa_valid = tswap32(ci->ifa_valid);
2537 ci->cstamp = tswap32(ci->cstamp);
2538 ci->tstamp = tswap32(ci->tstamp);
2539 break;
2540 default:
2541 gemu_log("Unknown host IFA type: %d\n", rtattr->rta_type);
2542 break;
2544 return 0;
2547 static abi_long host_to_target_data_route_rtattr(struct rtattr *rtattr)
2549 uint32_t *u32;
2550 switch (rtattr->rta_type) {
2551 /* binary: depends on family type */
2552 case RTA_GATEWAY:
2553 case RTA_DST:
2554 case RTA_PREFSRC:
2555 break;
2556 /* u32 */
2557 case RTA_PRIORITY:
2558 case RTA_TABLE:
2559 case RTA_OIF:
2560 u32 = RTA_DATA(rtattr);
2561 *u32 = tswap32(*u32);
2562 break;
2563 default:
2564 gemu_log("Unknown host RTA type: %d\n", rtattr->rta_type);
2565 break;
2567 return 0;
2570 static abi_long host_to_target_link_rtattr(struct rtattr *rtattr,
2571 uint32_t rtattr_len)
2573 return host_to_target_for_each_rtattr(rtattr, rtattr_len,
2574 host_to_target_data_link_rtattr);
2577 static abi_long host_to_target_addr_rtattr(struct rtattr *rtattr,
2578 uint32_t rtattr_len)
2580 return host_to_target_for_each_rtattr(rtattr, rtattr_len,
2581 host_to_target_data_addr_rtattr);
2584 static abi_long host_to_target_route_rtattr(struct rtattr *rtattr,
2585 uint32_t rtattr_len)
2587 return host_to_target_for_each_rtattr(rtattr, rtattr_len,
2588 host_to_target_data_route_rtattr);
2591 static abi_long host_to_target_data_route(struct nlmsghdr *nlh)
2593 uint32_t nlmsg_len;
2594 struct ifinfomsg *ifi;
2595 struct ifaddrmsg *ifa;
2596 struct rtmsg *rtm;
2598 nlmsg_len = nlh->nlmsg_len;
2599 switch (nlh->nlmsg_type) {
2600 case RTM_NEWLINK:
2601 case RTM_DELLINK:
2602 case RTM_GETLINK:
2603 if (nlh->nlmsg_len >= NLMSG_LENGTH(sizeof(*ifi))) {
2604 ifi = NLMSG_DATA(nlh);
2605 ifi->ifi_type = tswap16(ifi->ifi_type);
2606 ifi->ifi_index = tswap32(ifi->ifi_index);
2607 ifi->ifi_flags = tswap32(ifi->ifi_flags);
2608 ifi->ifi_change = tswap32(ifi->ifi_change);
2609 host_to_target_link_rtattr(IFLA_RTA(ifi),
2610 nlmsg_len - NLMSG_LENGTH(sizeof(*ifi)));
2612 break;
2613 case RTM_NEWADDR:
2614 case RTM_DELADDR:
2615 case RTM_GETADDR:
2616 if (nlh->nlmsg_len >= NLMSG_LENGTH(sizeof(*ifa))) {
2617 ifa = NLMSG_DATA(nlh);
2618 ifa->ifa_index = tswap32(ifa->ifa_index);
2619 host_to_target_addr_rtattr(IFA_RTA(ifa),
2620 nlmsg_len - NLMSG_LENGTH(sizeof(*ifa)));
2622 break;
2623 case RTM_NEWROUTE:
2624 case RTM_DELROUTE:
2625 case RTM_GETROUTE:
2626 if (nlh->nlmsg_len >= NLMSG_LENGTH(sizeof(*rtm))) {
2627 rtm = NLMSG_DATA(nlh);
2628 rtm->rtm_flags = tswap32(rtm->rtm_flags);
2629 host_to_target_route_rtattr(RTM_RTA(rtm),
2630 nlmsg_len - NLMSG_LENGTH(sizeof(*rtm)));
2632 break;
2633 default:
2634 return -TARGET_EINVAL;
2636 return 0;
2639 static inline abi_long host_to_target_nlmsg_route(struct nlmsghdr *nlh,
2640 size_t len)
2642 return host_to_target_for_each_nlmsg(nlh, len, host_to_target_data_route);
2645 static abi_long target_to_host_for_each_rtattr(struct rtattr *rtattr,
2646 size_t len,
2647 abi_long (*target_to_host_rtattr)
2648 (struct rtattr *))
2650 abi_long ret;
2652 while (len >= sizeof(struct rtattr)) {
2653 if (tswap16(rtattr->rta_len) < sizeof(struct rtattr) ||
2654 tswap16(rtattr->rta_len) > len) {
2655 break;
2657 rtattr->rta_len = tswap16(rtattr->rta_len);
2658 rtattr->rta_type = tswap16(rtattr->rta_type);
2659 ret = target_to_host_rtattr(rtattr);
2660 if (ret < 0) {
2661 return ret;
2663 len -= RTA_ALIGN(rtattr->rta_len);
2664 rtattr = (struct rtattr *)(((char *)rtattr) +
2665 RTA_ALIGN(rtattr->rta_len));
2667 return 0;
2670 static abi_long target_to_host_data_link_rtattr(struct rtattr *rtattr)
2672 switch (rtattr->rta_type) {
2673 default:
2674 gemu_log("Unknown target QEMU_IFLA type: %d\n", rtattr->rta_type);
2675 break;
2677 return 0;
2680 static abi_long target_to_host_data_addr_rtattr(struct rtattr *rtattr)
2682 switch (rtattr->rta_type) {
2683 /* binary: depends on family type */
2684 case IFA_LOCAL:
2685 case IFA_ADDRESS:
2686 break;
2687 default:
2688 gemu_log("Unknown target IFA type: %d\n", rtattr->rta_type);
2689 break;
2691 return 0;
2694 static abi_long target_to_host_data_route_rtattr(struct rtattr *rtattr)
2696 uint32_t *u32;
2697 switch (rtattr->rta_type) {
2698 /* binary: depends on family type */
2699 case RTA_DST:
2700 case RTA_SRC:
2701 case RTA_GATEWAY:
2702 break;
2703 /* u32 */
2704 case RTA_PRIORITY:
2705 case RTA_OIF:
2706 u32 = RTA_DATA(rtattr);
2707 *u32 = tswap32(*u32);
2708 break;
2709 default:
2710 gemu_log("Unknown target RTA type: %d\n", rtattr->rta_type);
2711 break;
2713 return 0;
2716 static void target_to_host_link_rtattr(struct rtattr *rtattr,
2717 uint32_t rtattr_len)
2719 target_to_host_for_each_rtattr(rtattr, rtattr_len,
2720 target_to_host_data_link_rtattr);
2723 static void target_to_host_addr_rtattr(struct rtattr *rtattr,
2724 uint32_t rtattr_len)
2726 target_to_host_for_each_rtattr(rtattr, rtattr_len,
2727 target_to_host_data_addr_rtattr);
2730 static void target_to_host_route_rtattr(struct rtattr *rtattr,
2731 uint32_t rtattr_len)
2733 target_to_host_for_each_rtattr(rtattr, rtattr_len,
2734 target_to_host_data_route_rtattr);
2737 static abi_long target_to_host_data_route(struct nlmsghdr *nlh)
2739 struct ifinfomsg *ifi;
2740 struct ifaddrmsg *ifa;
2741 struct rtmsg *rtm;
2743 switch (nlh->nlmsg_type) {
2744 case RTM_GETLINK:
2745 break;
2746 case RTM_NEWLINK:
2747 case RTM_DELLINK:
2748 if (nlh->nlmsg_len >= NLMSG_LENGTH(sizeof(*ifi))) {
2749 ifi = NLMSG_DATA(nlh);
2750 ifi->ifi_type = tswap16(ifi->ifi_type);
2751 ifi->ifi_index = tswap32(ifi->ifi_index);
2752 ifi->ifi_flags = tswap32(ifi->ifi_flags);
2753 ifi->ifi_change = tswap32(ifi->ifi_change);
2754 target_to_host_link_rtattr(IFLA_RTA(ifi), nlh->nlmsg_len -
2755 NLMSG_LENGTH(sizeof(*ifi)));
2757 break;
2758 case RTM_GETADDR:
2759 case RTM_NEWADDR:
2760 case RTM_DELADDR:
2761 if (nlh->nlmsg_len >= NLMSG_LENGTH(sizeof(*ifa))) {
2762 ifa = NLMSG_DATA(nlh);
2763 ifa->ifa_index = tswap32(ifa->ifa_index);
2764 target_to_host_addr_rtattr(IFA_RTA(ifa), nlh->nlmsg_len -
2765 NLMSG_LENGTH(sizeof(*ifa)));
2767 break;
2768 case RTM_GETROUTE:
2769 break;
2770 case RTM_NEWROUTE:
2771 case RTM_DELROUTE:
2772 if (nlh->nlmsg_len >= NLMSG_LENGTH(sizeof(*rtm))) {
2773 rtm = NLMSG_DATA(nlh);
2774 rtm->rtm_flags = tswap32(rtm->rtm_flags);
2775 target_to_host_route_rtattr(RTM_RTA(rtm), nlh->nlmsg_len -
2776 NLMSG_LENGTH(sizeof(*rtm)));
2778 break;
2779 default:
2780 return -TARGET_EOPNOTSUPP;
2782 return 0;
2785 static abi_long target_to_host_nlmsg_route(struct nlmsghdr *nlh, size_t len)
2787 return target_to_host_for_each_nlmsg(nlh, len, target_to_host_data_route);
2789 #endif /* CONFIG_RTNETLINK */
2791 static abi_long host_to_target_data_audit(struct nlmsghdr *nlh)
2793 switch (nlh->nlmsg_type) {
2794 default:
2795 gemu_log("Unknown host audit message type %d\n",
2796 nlh->nlmsg_type);
2797 return -TARGET_EINVAL;
2799 return 0;
2802 static inline abi_long host_to_target_nlmsg_audit(struct nlmsghdr *nlh,
2803 size_t len)
2805 return host_to_target_for_each_nlmsg(nlh, len, host_to_target_data_audit);
2808 static abi_long target_to_host_data_audit(struct nlmsghdr *nlh)
2810 switch (nlh->nlmsg_type) {
2811 case AUDIT_USER:
2812 case AUDIT_FIRST_USER_MSG ... AUDIT_LAST_USER_MSG:
2813 case AUDIT_FIRST_USER_MSG2 ... AUDIT_LAST_USER_MSG2:
2814 break;
2815 default:
2816 gemu_log("Unknown target audit message type %d\n",
2817 nlh->nlmsg_type);
2818 return -TARGET_EINVAL;
2821 return 0;
2824 static abi_long target_to_host_nlmsg_audit(struct nlmsghdr *nlh, size_t len)
2826 return target_to_host_for_each_nlmsg(nlh, len, target_to_host_data_audit);
2829 /* do_setsockopt() Must return target values and target errnos. */
2830 static abi_long do_setsockopt(int sockfd, int level, int optname,
2831 abi_ulong optval_addr, socklen_t optlen)
2833 abi_long ret;
2834 int val;
2835 struct ip_mreqn *ip_mreq;
2836 struct ip_mreq_source *ip_mreq_source;
2838 switch(level) {
2839 case SOL_TCP:
2840 /* TCP options all take an 'int' value. */
2841 if (optlen < sizeof(uint32_t))
2842 return -TARGET_EINVAL;
2844 if (get_user_u32(val, optval_addr))
2845 return -TARGET_EFAULT;
2846 ret = get_errno(setsockopt(sockfd, level, optname, &val, sizeof(val)));
2847 break;
2848 case SOL_IP:
2849 switch(optname) {
2850 case IP_TOS:
2851 case IP_TTL:
2852 case IP_HDRINCL:
2853 case IP_ROUTER_ALERT:
2854 case IP_RECVOPTS:
2855 case IP_RETOPTS:
2856 case IP_PKTINFO:
2857 case IP_MTU_DISCOVER:
2858 case IP_RECVERR:
2859 case IP_RECVTTL:
2860 case IP_RECVTOS:
2861 #ifdef IP_FREEBIND
2862 case IP_FREEBIND:
2863 #endif
2864 case IP_MULTICAST_TTL:
2865 case IP_MULTICAST_LOOP:
2866 val = 0;
2867 if (optlen >= sizeof(uint32_t)) {
2868 if (get_user_u32(val, optval_addr))
2869 return -TARGET_EFAULT;
2870 } else if (optlen >= 1) {
2871 if (get_user_u8(val, optval_addr))
2872 return -TARGET_EFAULT;
2874 ret = get_errno(setsockopt(sockfd, level, optname, &val, sizeof(val)));
2875 break;
2876 case IP_ADD_MEMBERSHIP:
2877 case IP_DROP_MEMBERSHIP:
2878 if (optlen < sizeof (struct target_ip_mreq) ||
2879 optlen > sizeof (struct target_ip_mreqn))
2880 return -TARGET_EINVAL;
2882 ip_mreq = (struct ip_mreqn *) alloca(optlen);
2883 target_to_host_ip_mreq(ip_mreq, optval_addr, optlen);
2884 ret = get_errno(setsockopt(sockfd, level, optname, ip_mreq, optlen));
2885 break;
2887 case IP_BLOCK_SOURCE:
2888 case IP_UNBLOCK_SOURCE:
2889 case IP_ADD_SOURCE_MEMBERSHIP:
2890 case IP_DROP_SOURCE_MEMBERSHIP:
2891 if (optlen != sizeof (struct target_ip_mreq_source))
2892 return -TARGET_EINVAL;
2894 ip_mreq_source = lock_user(VERIFY_READ, optval_addr, optlen, 1);
2895 ret = get_errno(setsockopt(sockfd, level, optname, ip_mreq_source, optlen));
2896 unlock_user (ip_mreq_source, optval_addr, 0);
2897 break;
2899 default:
2900 goto unimplemented;
2902 break;
2903 case SOL_IPV6:
2904 switch (optname) {
2905 case IPV6_MTU_DISCOVER:
2906 case IPV6_MTU:
2907 case IPV6_V6ONLY:
2908 case IPV6_RECVPKTINFO:
2909 case IPV6_UNICAST_HOPS:
2910 case IPV6_RECVERR:
2911 case IPV6_RECVHOPLIMIT:
2912 case IPV6_2292HOPLIMIT:
2913 case IPV6_CHECKSUM:
2914 val = 0;
2915 if (optlen < sizeof(uint32_t)) {
2916 return -TARGET_EINVAL;
2918 if (get_user_u32(val, optval_addr)) {
2919 return -TARGET_EFAULT;
2921 ret = get_errno(setsockopt(sockfd, level, optname,
2922 &val, sizeof(val)));
2923 break;
2924 case IPV6_PKTINFO:
2926 struct in6_pktinfo pki;
2928 if (optlen < sizeof(pki)) {
2929 return -TARGET_EINVAL;
2932 if (copy_from_user(&pki, optval_addr, sizeof(pki))) {
2933 return -TARGET_EFAULT;
2936 pki.ipi6_ifindex = tswap32(pki.ipi6_ifindex);
2938 ret = get_errno(setsockopt(sockfd, level, optname,
2939 &pki, sizeof(pki)));
2940 break;
2942 default:
2943 goto unimplemented;
2945 break;
2946 case SOL_ICMPV6:
2947 switch (optname) {
2948 case ICMPV6_FILTER:
2950 struct icmp6_filter icmp6f;
2952 if (optlen > sizeof(icmp6f)) {
2953 optlen = sizeof(icmp6f);
2956 if (copy_from_user(&icmp6f, optval_addr, optlen)) {
2957 return -TARGET_EFAULT;
2960 for (val = 0; val < 8; val++) {
2961 icmp6f.data[val] = tswap32(icmp6f.data[val]);
2964 ret = get_errno(setsockopt(sockfd, level, optname,
2965 &icmp6f, optlen));
2966 break;
2968 default:
2969 goto unimplemented;
2971 break;
2972 case SOL_RAW:
2973 switch (optname) {
2974 case ICMP_FILTER:
2975 case IPV6_CHECKSUM:
2976 /* those take an u32 value */
2977 if (optlen < sizeof(uint32_t)) {
2978 return -TARGET_EINVAL;
2981 if (get_user_u32(val, optval_addr)) {
2982 return -TARGET_EFAULT;
2984 ret = get_errno(setsockopt(sockfd, level, optname,
2985 &val, sizeof(val)));
2986 break;
2988 default:
2989 goto unimplemented;
2991 break;
2992 case TARGET_SOL_SOCKET:
2993 switch (optname) {
2994 case TARGET_SO_RCVTIMEO:
2996 struct timeval tv;
2998 optname = SO_RCVTIMEO;
3000 set_timeout:
3001 if (optlen != sizeof(struct target_timeval)) {
3002 return -TARGET_EINVAL;
3005 if (copy_from_user_timeval(&tv, optval_addr)) {
3006 return -TARGET_EFAULT;
3009 ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname,
3010 &tv, sizeof(tv)));
3011 return ret;
3013 case TARGET_SO_SNDTIMEO:
3014 optname = SO_SNDTIMEO;
3015 goto set_timeout;
3016 case TARGET_SO_ATTACH_FILTER:
3018 struct target_sock_fprog *tfprog;
3019 struct target_sock_filter *tfilter;
3020 struct sock_fprog fprog;
3021 struct sock_filter *filter;
3022 int i;
3024 if (optlen != sizeof(*tfprog)) {
3025 return -TARGET_EINVAL;
3027 if (!lock_user_struct(VERIFY_READ, tfprog, optval_addr, 0)) {
3028 return -TARGET_EFAULT;
3030 if (!lock_user_struct(VERIFY_READ, tfilter,
3031 tswapal(tfprog->filter), 0)) {
3032 unlock_user_struct(tfprog, optval_addr, 1);
3033 return -TARGET_EFAULT;
3036 fprog.len = tswap16(tfprog->len);
3037 filter = g_try_new(struct sock_filter, fprog.len);
3038 if (filter == NULL) {
3039 unlock_user_struct(tfilter, tfprog->filter, 1);
3040 unlock_user_struct(tfprog, optval_addr, 1);
3041 return -TARGET_ENOMEM;
3043 for (i = 0; i < fprog.len; i++) {
3044 filter[i].code = tswap16(tfilter[i].code);
3045 filter[i].jt = tfilter[i].jt;
3046 filter[i].jf = tfilter[i].jf;
3047 filter[i].k = tswap32(tfilter[i].k);
3049 fprog.filter = filter;
3051 ret = get_errno(setsockopt(sockfd, SOL_SOCKET,
3052 SO_ATTACH_FILTER, &fprog, sizeof(fprog)));
3053 g_free(filter);
3055 unlock_user_struct(tfilter, tfprog->filter, 1);
3056 unlock_user_struct(tfprog, optval_addr, 1);
3057 return ret;
3059 case TARGET_SO_BINDTODEVICE:
3061 char *dev_ifname, *addr_ifname;
3063 if (optlen > IFNAMSIZ - 1) {
3064 optlen = IFNAMSIZ - 1;
3066 dev_ifname = lock_user(VERIFY_READ, optval_addr, optlen, 1);
3067 if (!dev_ifname) {
3068 return -TARGET_EFAULT;
3070 optname = SO_BINDTODEVICE;
3071 addr_ifname = alloca(IFNAMSIZ);
3072 memcpy(addr_ifname, dev_ifname, optlen);
3073 addr_ifname[optlen] = 0;
3074 ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname,
3075 addr_ifname, optlen));
3076 unlock_user (dev_ifname, optval_addr, 0);
3077 return ret;
3079 /* Options with 'int' argument. */
3080 case TARGET_SO_DEBUG:
3081 optname = SO_DEBUG;
3082 break;
3083 case TARGET_SO_REUSEADDR:
3084 optname = SO_REUSEADDR;
3085 break;
3086 case TARGET_SO_TYPE:
3087 optname = SO_TYPE;
3088 break;
3089 case TARGET_SO_ERROR:
3090 optname = SO_ERROR;
3091 break;
3092 case TARGET_SO_DONTROUTE:
3093 optname = SO_DONTROUTE;
3094 break;
3095 case TARGET_SO_BROADCAST:
3096 optname = SO_BROADCAST;
3097 break;
3098 case TARGET_SO_SNDBUF:
3099 optname = SO_SNDBUF;
3100 break;
3101 case TARGET_SO_SNDBUFFORCE:
3102 optname = SO_SNDBUFFORCE;
3103 break;
3104 case TARGET_SO_RCVBUF:
3105 optname = SO_RCVBUF;
3106 break;
3107 case TARGET_SO_RCVBUFFORCE:
3108 optname = SO_RCVBUFFORCE;
3109 break;
3110 case TARGET_SO_KEEPALIVE:
3111 optname = SO_KEEPALIVE;
3112 break;
3113 case TARGET_SO_OOBINLINE:
3114 optname = SO_OOBINLINE;
3115 break;
3116 case TARGET_SO_NO_CHECK:
3117 optname = SO_NO_CHECK;
3118 break;
3119 case TARGET_SO_PRIORITY:
3120 optname = SO_PRIORITY;
3121 break;
3122 #ifdef SO_BSDCOMPAT
3123 case TARGET_SO_BSDCOMPAT:
3124 optname = SO_BSDCOMPAT;
3125 break;
3126 #endif
3127 case TARGET_SO_PASSCRED:
3128 optname = SO_PASSCRED;
3129 break;
3130 case TARGET_SO_PASSSEC:
3131 optname = SO_PASSSEC;
3132 break;
3133 case TARGET_SO_TIMESTAMP:
3134 optname = SO_TIMESTAMP;
3135 break;
3136 case TARGET_SO_RCVLOWAT:
3137 optname = SO_RCVLOWAT;
3138 break;
3139 default:
3140 goto unimplemented;
3142 if (optlen < sizeof(uint32_t))
3143 return -TARGET_EINVAL;
3145 if (get_user_u32(val, optval_addr))
3146 return -TARGET_EFAULT;
3147 ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname, &val, sizeof(val)));
3148 break;
3149 default:
3150 unimplemented:
3151 gemu_log("Unsupported setsockopt level=%d optname=%d\n", level, optname);
3152 ret = -TARGET_ENOPROTOOPT;
3154 return ret;
3157 /* do_getsockopt() Must return target values and target errnos. */
3158 static abi_long do_getsockopt(int sockfd, int level, int optname,
3159 abi_ulong optval_addr, abi_ulong optlen)
3161 abi_long ret;
3162 int len, val;
3163 socklen_t lv;
3165 switch(level) {
3166 case TARGET_SOL_SOCKET:
3167 level = SOL_SOCKET;
3168 switch (optname) {
3169 /* These don't just return a single integer */
3170 case TARGET_SO_LINGER:
3171 case TARGET_SO_RCVTIMEO:
3172 case TARGET_SO_SNDTIMEO:
3173 case TARGET_SO_PEERNAME:
3174 goto unimplemented;
3175 case TARGET_SO_PEERCRED: {
3176 struct ucred cr;
3177 socklen_t crlen;
3178 struct target_ucred *tcr;
3180 if (get_user_u32(len, optlen)) {
3181 return -TARGET_EFAULT;
3183 if (len < 0) {
3184 return -TARGET_EINVAL;
3187 crlen = sizeof(cr);
3188 ret = get_errno(getsockopt(sockfd, level, SO_PEERCRED,
3189 &cr, &crlen));
3190 if (ret < 0) {
3191 return ret;
3193 if (len > crlen) {
3194 len = crlen;
3196 if (!lock_user_struct(VERIFY_WRITE, tcr, optval_addr, 0)) {
3197 return -TARGET_EFAULT;
3199 __put_user(cr.pid, &tcr->pid);
3200 __put_user(cr.uid, &tcr->uid);
3201 __put_user(cr.gid, &tcr->gid);
3202 unlock_user_struct(tcr, optval_addr, 1);
3203 if (put_user_u32(len, optlen)) {
3204 return -TARGET_EFAULT;
3206 break;
3208 /* Options with 'int' argument. */
3209 case TARGET_SO_DEBUG:
3210 optname = SO_DEBUG;
3211 goto int_case;
3212 case TARGET_SO_REUSEADDR:
3213 optname = SO_REUSEADDR;
3214 goto int_case;
3215 case TARGET_SO_TYPE:
3216 optname = SO_TYPE;
3217 goto int_case;
3218 case TARGET_SO_ERROR:
3219 optname = SO_ERROR;
3220 goto int_case;
3221 case TARGET_SO_DONTROUTE:
3222 optname = SO_DONTROUTE;
3223 goto int_case;
3224 case TARGET_SO_BROADCAST:
3225 optname = SO_BROADCAST;
3226 goto int_case;
3227 case TARGET_SO_SNDBUF:
3228 optname = SO_SNDBUF;
3229 goto int_case;
3230 case TARGET_SO_RCVBUF:
3231 optname = SO_RCVBUF;
3232 goto int_case;
3233 case TARGET_SO_KEEPALIVE:
3234 optname = SO_KEEPALIVE;
3235 goto int_case;
3236 case TARGET_SO_OOBINLINE:
3237 optname = SO_OOBINLINE;
3238 goto int_case;
3239 case TARGET_SO_NO_CHECK:
3240 optname = SO_NO_CHECK;
3241 goto int_case;
3242 case TARGET_SO_PRIORITY:
3243 optname = SO_PRIORITY;
3244 goto int_case;
3245 #ifdef SO_BSDCOMPAT
3246 case TARGET_SO_BSDCOMPAT:
3247 optname = SO_BSDCOMPAT;
3248 goto int_case;
3249 #endif
3250 case TARGET_SO_PASSCRED:
3251 optname = SO_PASSCRED;
3252 goto int_case;
3253 case TARGET_SO_TIMESTAMP:
3254 optname = SO_TIMESTAMP;
3255 goto int_case;
3256 case TARGET_SO_RCVLOWAT:
3257 optname = SO_RCVLOWAT;
3258 goto int_case;
3259 case TARGET_SO_ACCEPTCONN:
3260 optname = SO_ACCEPTCONN;
3261 goto int_case;
3262 default:
3263 goto int_case;
3265 break;
3266 case SOL_TCP:
3267 /* TCP options all take an 'int' value. */
3268 int_case:
3269 if (get_user_u32(len, optlen))
3270 return -TARGET_EFAULT;
3271 if (len < 0)
3272 return -TARGET_EINVAL;
3273 lv = sizeof(lv);
3274 ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
3275 if (ret < 0)
3276 return ret;
3277 if (optname == SO_TYPE) {
3278 val = host_to_target_sock_type(val);
3280 if (len > lv)
3281 len = lv;
3282 if (len == 4) {
3283 if (put_user_u32(val, optval_addr))
3284 return -TARGET_EFAULT;
3285 } else {
3286 if (put_user_u8(val, optval_addr))
3287 return -TARGET_EFAULT;
3289 if (put_user_u32(len, optlen))
3290 return -TARGET_EFAULT;
3291 break;
3292 case SOL_IP:
3293 switch(optname) {
3294 case IP_TOS:
3295 case IP_TTL:
3296 case IP_HDRINCL:
3297 case IP_ROUTER_ALERT:
3298 case IP_RECVOPTS:
3299 case IP_RETOPTS:
3300 case IP_PKTINFO:
3301 case IP_MTU_DISCOVER:
3302 case IP_RECVERR:
3303 case IP_RECVTOS:
3304 #ifdef IP_FREEBIND
3305 case IP_FREEBIND:
3306 #endif
3307 case IP_MULTICAST_TTL:
3308 case IP_MULTICAST_LOOP:
3309 if (get_user_u32(len, optlen))
3310 return -TARGET_EFAULT;
3311 if (len < 0)
3312 return -TARGET_EINVAL;
3313 lv = sizeof(lv);
3314 ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
3315 if (ret < 0)
3316 return ret;
3317 if (len < sizeof(int) && len > 0 && val >= 0 && val < 255) {
3318 len = 1;
3319 if (put_user_u32(len, optlen)
3320 || put_user_u8(val, optval_addr))
3321 return -TARGET_EFAULT;
3322 } else {
3323 if (len > sizeof(int))
3324 len = sizeof(int);
3325 if (put_user_u32(len, optlen)
3326 || put_user_u32(val, optval_addr))
3327 return -TARGET_EFAULT;
3329 break;
3330 default:
3331 ret = -TARGET_ENOPROTOOPT;
3332 break;
3334 break;
3335 default:
3336 unimplemented:
3337 gemu_log("getsockopt level=%d optname=%d not yet supported\n",
3338 level, optname);
3339 ret = -TARGET_EOPNOTSUPP;
3340 break;
3342 return ret;
3345 static struct iovec *lock_iovec(int type, abi_ulong target_addr,
3346 abi_ulong count, int copy)
3348 struct target_iovec *target_vec;
3349 struct iovec *vec;
3350 abi_ulong total_len, max_len;
3351 int i;
3352 int err = 0;
3353 bool bad_address = false;
3355 if (count == 0) {
3356 errno = 0;
3357 return NULL;
3359 if (count > IOV_MAX) {
3360 errno = EINVAL;
3361 return NULL;
3364 vec = g_try_new0(struct iovec, count);
3365 if (vec == NULL) {
3366 errno = ENOMEM;
3367 return NULL;
3370 target_vec = lock_user(VERIFY_READ, target_addr,
3371 count * sizeof(struct target_iovec), 1);
3372 if (target_vec == NULL) {
3373 err = EFAULT;
3374 goto fail2;
3377 /* ??? If host page size > target page size, this will result in a
3378 value larger than what we can actually support. */
3379 max_len = 0x7fffffff & TARGET_PAGE_MASK;
3380 total_len = 0;
3382 for (i = 0; i < count; i++) {
3383 abi_ulong base = tswapal(target_vec[i].iov_base);
3384 abi_long len = tswapal(target_vec[i].iov_len);
3386 if (len < 0) {
3387 err = EINVAL;
3388 goto fail;
3389 } else if (len == 0) {
3390 /* Zero length pointer is ignored. */
3391 vec[i].iov_base = 0;
3392 } else {
3393 vec[i].iov_base = lock_user(type, base, len, copy);
3394 /* If the first buffer pointer is bad, this is a fault. But
3395 * subsequent bad buffers will result in a partial write; this
3396 * is realized by filling the vector with null pointers and
3397 * zero lengths. */
3398 if (!vec[i].iov_base) {
3399 if (i == 0) {
3400 err = EFAULT;
3401 goto fail;
3402 } else {
3403 bad_address = true;
3406 if (bad_address) {
3407 len = 0;
3409 if (len > max_len - total_len) {
3410 len = max_len - total_len;
3413 vec[i].iov_len = len;
3414 total_len += len;
3417 unlock_user(target_vec, target_addr, 0);
3418 return vec;
3420 fail:
3421 while (--i >= 0) {
3422 if (tswapal(target_vec[i].iov_len) > 0) {
3423 unlock_user(vec[i].iov_base, tswapal(target_vec[i].iov_base), 0);
3426 unlock_user(target_vec, target_addr, 0);
3427 fail2:
3428 g_free(vec);
3429 errno = err;
3430 return NULL;
3433 static void unlock_iovec(struct iovec *vec, abi_ulong target_addr,
3434 abi_ulong count, int copy)
3436 struct target_iovec *target_vec;
3437 int i;
3439 target_vec = lock_user(VERIFY_READ, target_addr,
3440 count * sizeof(struct target_iovec), 1);
3441 if (target_vec) {
3442 for (i = 0; i < count; i++) {
3443 abi_ulong base = tswapal(target_vec[i].iov_base);
3444 abi_long len = tswapal(target_vec[i].iov_len);
3445 if (len < 0) {
3446 break;
3448 unlock_user(vec[i].iov_base, base, copy ? vec[i].iov_len : 0);
3450 unlock_user(target_vec, target_addr, 0);
3453 g_free(vec);
3456 static inline int target_to_host_sock_type(int *type)
3458 int host_type = 0;
3459 int target_type = *type;
3461 switch (target_type & TARGET_SOCK_TYPE_MASK) {
3462 case TARGET_SOCK_DGRAM:
3463 host_type = SOCK_DGRAM;
3464 break;
3465 case TARGET_SOCK_STREAM:
3466 host_type = SOCK_STREAM;
3467 break;
3468 default:
3469 host_type = target_type & TARGET_SOCK_TYPE_MASK;
3470 break;
3472 if (target_type & TARGET_SOCK_CLOEXEC) {
3473 #if defined(SOCK_CLOEXEC)
3474 host_type |= SOCK_CLOEXEC;
3475 #else
3476 return -TARGET_EINVAL;
3477 #endif
3479 if (target_type & TARGET_SOCK_NONBLOCK) {
3480 #if defined(SOCK_NONBLOCK)
3481 host_type |= SOCK_NONBLOCK;
3482 #elif !defined(O_NONBLOCK)
3483 return -TARGET_EINVAL;
3484 #endif
3486 *type = host_type;
3487 return 0;
3490 /* Try to emulate socket type flags after socket creation. */
3491 static int sock_flags_fixup(int fd, int target_type)
3493 #if !defined(SOCK_NONBLOCK) && defined(O_NONBLOCK)
3494 if (target_type & TARGET_SOCK_NONBLOCK) {
3495 int flags = fcntl(fd, F_GETFL);
3496 if (fcntl(fd, F_SETFL, O_NONBLOCK | flags) == -1) {
3497 close(fd);
3498 return -TARGET_EINVAL;
3501 #endif
3502 return fd;
3505 static abi_long packet_target_to_host_sockaddr(void *host_addr,
3506 abi_ulong target_addr,
3507 socklen_t len)
3509 struct sockaddr *addr = host_addr;
3510 struct target_sockaddr *target_saddr;
3512 target_saddr = lock_user(VERIFY_READ, target_addr, len, 1);
3513 if (!target_saddr) {
3514 return -TARGET_EFAULT;
3517 memcpy(addr, target_saddr, len);
3518 addr->sa_family = tswap16(target_saddr->sa_family);
3519 /* spkt_protocol is big-endian */
3521 unlock_user(target_saddr, target_addr, 0);
3522 return 0;
3525 static TargetFdTrans target_packet_trans = {
3526 .target_to_host_addr = packet_target_to_host_sockaddr,
3529 #ifdef CONFIG_RTNETLINK
3530 static abi_long netlink_route_target_to_host(void *buf, size_t len)
3532 abi_long ret;
3534 ret = target_to_host_nlmsg_route(buf, len);
3535 if (ret < 0) {
3536 return ret;
3539 return len;
3542 static abi_long netlink_route_host_to_target(void *buf, size_t len)
3544 abi_long ret;
3546 ret = host_to_target_nlmsg_route(buf, len);
3547 if (ret < 0) {
3548 return ret;
3551 return len;
3554 static TargetFdTrans target_netlink_route_trans = {
3555 .target_to_host_data = netlink_route_target_to_host,
3556 .host_to_target_data = netlink_route_host_to_target,
3558 #endif /* CONFIG_RTNETLINK */
3560 static abi_long netlink_audit_target_to_host(void *buf, size_t len)
3562 abi_long ret;
3564 ret = target_to_host_nlmsg_audit(buf, len);
3565 if (ret < 0) {
3566 return ret;
3569 return len;
3572 static abi_long netlink_audit_host_to_target(void *buf, size_t len)
3574 abi_long ret;
3576 ret = host_to_target_nlmsg_audit(buf, len);
3577 if (ret < 0) {
3578 return ret;
3581 return len;
3584 static TargetFdTrans target_netlink_audit_trans = {
3585 .target_to_host_data = netlink_audit_target_to_host,
3586 .host_to_target_data = netlink_audit_host_to_target,
3589 /* do_socket() Must return target values and target errnos. */
3590 static abi_long do_socket(int domain, int type, int protocol)
3592 int target_type = type;
3593 int ret;
3595 ret = target_to_host_sock_type(&type);
3596 if (ret) {
3597 return ret;
3600 if (domain == PF_NETLINK && !(
3601 #ifdef CONFIG_RTNETLINK
3602 protocol == NETLINK_ROUTE ||
3603 #endif
3604 protocol == NETLINK_KOBJECT_UEVENT ||
3605 protocol == NETLINK_AUDIT)) {
3606 return -EPFNOSUPPORT;
3609 if (domain == AF_PACKET ||
3610 (domain == AF_INET && type == SOCK_PACKET)) {
3611 protocol = tswap16(protocol);
3614 ret = get_errno(socket(domain, type, protocol));
3615 if (ret >= 0) {
3616 ret = sock_flags_fixup(ret, target_type);
3617 if (type == SOCK_PACKET) {
3618 /* Manage an obsolete case :
3619 * if socket type is SOCK_PACKET, bind by name
3621 fd_trans_register(ret, &target_packet_trans);
3622 } else if (domain == PF_NETLINK) {
3623 switch (protocol) {
3624 #ifdef CONFIG_RTNETLINK
3625 case NETLINK_ROUTE:
3626 fd_trans_register(ret, &target_netlink_route_trans);
3627 break;
3628 #endif
3629 case NETLINK_KOBJECT_UEVENT:
3630 /* nothing to do: messages are strings */
3631 break;
3632 case NETLINK_AUDIT:
3633 fd_trans_register(ret, &target_netlink_audit_trans);
3634 break;
3635 default:
3636 g_assert_not_reached();
3640 return ret;
3643 /* do_bind() Must return target values and target errnos. */
3644 static abi_long do_bind(int sockfd, abi_ulong target_addr,
3645 socklen_t addrlen)
3647 void *addr;
3648 abi_long ret;
3650 if ((int)addrlen < 0) {
3651 return -TARGET_EINVAL;
3654 addr = alloca(addrlen+1);
3656 ret = target_to_host_sockaddr(sockfd, addr, target_addr, addrlen);
3657 if (ret)
3658 return ret;
3660 return get_errno(bind(sockfd, addr, addrlen));
3663 /* do_connect() Must return target values and target errnos. */
3664 static abi_long do_connect(int sockfd, abi_ulong target_addr,
3665 socklen_t addrlen)
3667 void *addr;
3668 abi_long ret;
3670 if ((int)addrlen < 0) {
3671 return -TARGET_EINVAL;
3674 addr = alloca(addrlen+1);
3676 ret = target_to_host_sockaddr(sockfd, addr, target_addr, addrlen);
3677 if (ret)
3678 return ret;
3680 return get_errno(safe_connect(sockfd, addr, addrlen));
3683 /* do_sendrecvmsg_locked() Must return target values and target errnos. */
3684 static abi_long do_sendrecvmsg_locked(int fd, struct target_msghdr *msgp,
3685 int flags, int send)
3687 abi_long ret, len;
3688 struct msghdr msg;
3689 abi_ulong count;
3690 struct iovec *vec;
3691 abi_ulong target_vec;
3693 if (msgp->msg_name) {
3694 msg.msg_namelen = tswap32(msgp->msg_namelen);
3695 msg.msg_name = alloca(msg.msg_namelen+1);
3696 ret = target_to_host_sockaddr(fd, msg.msg_name,
3697 tswapal(msgp->msg_name),
3698 msg.msg_namelen);
3699 if (ret == -TARGET_EFAULT) {
3700 /* For connected sockets msg_name and msg_namelen must
3701 * be ignored, so returning EFAULT immediately is wrong.
3702 * Instead, pass a bad msg_name to the host kernel, and
3703 * let it decide whether to return EFAULT or not.
3705 msg.msg_name = (void *)-1;
3706 } else if (ret) {
3707 goto out2;
3709 } else {
3710 msg.msg_name = NULL;
3711 msg.msg_namelen = 0;
3713 msg.msg_controllen = 2 * tswapal(msgp->msg_controllen);
3714 msg.msg_control = alloca(msg.msg_controllen);
3715 msg.msg_flags = tswap32(msgp->msg_flags);
3717 count = tswapal(msgp->msg_iovlen);
3718 target_vec = tswapal(msgp->msg_iov);
3720 if (count > IOV_MAX) {
3721 /* sendrcvmsg returns a different errno for this condition than
3722 * readv/writev, so we must catch it here before lock_iovec() does.
3724 ret = -TARGET_EMSGSIZE;
3725 goto out2;
3728 vec = lock_iovec(send ? VERIFY_READ : VERIFY_WRITE,
3729 target_vec, count, send);
3730 if (vec == NULL) {
3731 ret = -host_to_target_errno(errno);
3732 goto out2;
3734 msg.msg_iovlen = count;
3735 msg.msg_iov = vec;
3737 if (send) {
3738 if (fd_trans_target_to_host_data(fd)) {
3739 void *host_msg;
3741 host_msg = g_malloc(msg.msg_iov->iov_len);
3742 memcpy(host_msg, msg.msg_iov->iov_base, msg.msg_iov->iov_len);
3743 ret = fd_trans_target_to_host_data(fd)(host_msg,
3744 msg.msg_iov->iov_len);
3745 if (ret >= 0) {
3746 msg.msg_iov->iov_base = host_msg;
3747 ret = get_errno(safe_sendmsg(fd, &msg, flags));
3749 g_free(host_msg);
3750 } else {
3751 ret = target_to_host_cmsg(&msg, msgp);
3752 if (ret == 0) {
3753 ret = get_errno(safe_sendmsg(fd, &msg, flags));
3756 } else {
3757 ret = get_errno(safe_recvmsg(fd, &msg, flags));
3758 if (!is_error(ret)) {
3759 len = ret;
3760 if (fd_trans_host_to_target_data(fd)) {
3761 ret = fd_trans_host_to_target_data(fd)(msg.msg_iov->iov_base,
3762 len);
3763 } else {
3764 ret = host_to_target_cmsg(msgp, &msg);
3766 if (!is_error(ret)) {
3767 msgp->msg_namelen = tswap32(msg.msg_namelen);
3768 if (msg.msg_name != NULL && msg.msg_name != (void *)-1) {
3769 ret = host_to_target_sockaddr(tswapal(msgp->msg_name),
3770 msg.msg_name, msg.msg_namelen);
3771 if (ret) {
3772 goto out;
3776 ret = len;
3781 out:
3782 unlock_iovec(vec, target_vec, count, !send);
3783 out2:
3784 return ret;
3787 static abi_long do_sendrecvmsg(int fd, abi_ulong target_msg,
3788 int flags, int send)
3790 abi_long ret;
3791 struct target_msghdr *msgp;
3793 if (!lock_user_struct(send ? VERIFY_READ : VERIFY_WRITE,
3794 msgp,
3795 target_msg,
3796 send ? 1 : 0)) {
3797 return -TARGET_EFAULT;
3799 ret = do_sendrecvmsg_locked(fd, msgp, flags, send);
3800 unlock_user_struct(msgp, target_msg, send ? 0 : 1);
3801 return ret;
3804 /* We don't rely on the C library to have sendmmsg/recvmmsg support,
3805 * so it might not have this *mmsg-specific flag either.
3807 #ifndef MSG_WAITFORONE
3808 #define MSG_WAITFORONE 0x10000
3809 #endif
3811 static abi_long do_sendrecvmmsg(int fd, abi_ulong target_msgvec,
3812 unsigned int vlen, unsigned int flags,
3813 int send)
3815 struct target_mmsghdr *mmsgp;
3816 abi_long ret = 0;
3817 int i;
3819 if (vlen > UIO_MAXIOV) {
3820 vlen = UIO_MAXIOV;
3823 mmsgp = lock_user(VERIFY_WRITE, target_msgvec, sizeof(*mmsgp) * vlen, 1);
3824 if (!mmsgp) {
3825 return -TARGET_EFAULT;
3828 for (i = 0; i < vlen; i++) {
3829 ret = do_sendrecvmsg_locked(fd, &mmsgp[i].msg_hdr, flags, send);
3830 if (is_error(ret)) {
3831 break;
3833 mmsgp[i].msg_len = tswap32(ret);
3834 /* MSG_WAITFORONE turns on MSG_DONTWAIT after one packet */
3835 if (flags & MSG_WAITFORONE) {
3836 flags |= MSG_DONTWAIT;
3840 unlock_user(mmsgp, target_msgvec, sizeof(*mmsgp) * i);
3842 /* Return number of datagrams sent if we sent any at all;
3843 * otherwise return the error.
3845 if (i) {
3846 return i;
3848 return ret;
3851 /* do_accept4() Must return target values and target errnos. */
3852 static abi_long do_accept4(int fd, abi_ulong target_addr,
3853 abi_ulong target_addrlen_addr, int flags)
3855 socklen_t addrlen;
3856 void *addr;
3857 abi_long ret;
3858 int host_flags;
3860 host_flags = target_to_host_bitmask(flags, fcntl_flags_tbl);
3862 if (target_addr == 0) {
3863 return get_errno(safe_accept4(fd, NULL, NULL, host_flags));
3866 /* linux returns EINVAL if addrlen pointer is invalid */
3867 if (get_user_u32(addrlen, target_addrlen_addr))
3868 return -TARGET_EINVAL;
3870 if ((int)addrlen < 0) {
3871 return -TARGET_EINVAL;
3874 if (!access_ok(VERIFY_WRITE, target_addr, addrlen))
3875 return -TARGET_EINVAL;
3877 addr = alloca(addrlen);
3879 ret = get_errno(safe_accept4(fd, addr, &addrlen, host_flags));
3880 if (!is_error(ret)) {
3881 host_to_target_sockaddr(target_addr, addr, addrlen);
3882 if (put_user_u32(addrlen, target_addrlen_addr))
3883 ret = -TARGET_EFAULT;
3885 return ret;
3888 /* do_getpeername() Must return target values and target errnos. */
3889 static abi_long do_getpeername(int fd, abi_ulong target_addr,
3890 abi_ulong target_addrlen_addr)
3892 socklen_t addrlen;
3893 void *addr;
3894 abi_long ret;
3896 if (get_user_u32(addrlen, target_addrlen_addr))
3897 return -TARGET_EFAULT;
3899 if ((int)addrlen < 0) {
3900 return -TARGET_EINVAL;
3903 if (!access_ok(VERIFY_WRITE, target_addr, addrlen))
3904 return -TARGET_EFAULT;
3906 addr = alloca(addrlen);
3908 ret = get_errno(getpeername(fd, addr, &addrlen));
3909 if (!is_error(ret)) {
3910 host_to_target_sockaddr(target_addr, addr, addrlen);
3911 if (put_user_u32(addrlen, target_addrlen_addr))
3912 ret = -TARGET_EFAULT;
3914 return ret;
3917 /* do_getsockname() Must return target values and target errnos. */
3918 static abi_long do_getsockname(int fd, abi_ulong target_addr,
3919 abi_ulong target_addrlen_addr)
3921 socklen_t addrlen;
3922 void *addr;
3923 abi_long ret;
3925 if (get_user_u32(addrlen, target_addrlen_addr))
3926 return -TARGET_EFAULT;
3928 if ((int)addrlen < 0) {
3929 return -TARGET_EINVAL;
3932 if (!access_ok(VERIFY_WRITE, target_addr, addrlen))
3933 return -TARGET_EFAULT;
3935 addr = alloca(addrlen);
3937 ret = get_errno(getsockname(fd, addr, &addrlen));
3938 if (!is_error(ret)) {
3939 host_to_target_sockaddr(target_addr, addr, addrlen);
3940 if (put_user_u32(addrlen, target_addrlen_addr))
3941 ret = -TARGET_EFAULT;
3943 return ret;
3946 /* do_socketpair() Must return target values and target errnos. */
3947 static abi_long do_socketpair(int domain, int type, int protocol,
3948 abi_ulong target_tab_addr)
3950 int tab[2];
3951 abi_long ret;
3953 target_to_host_sock_type(&type);
3955 ret = get_errno(socketpair(domain, type, protocol, tab));
3956 if (!is_error(ret)) {
3957 if (put_user_s32(tab[0], target_tab_addr)
3958 || put_user_s32(tab[1], target_tab_addr + sizeof(tab[0])))
3959 ret = -TARGET_EFAULT;
3961 return ret;
3964 /* do_sendto() Must return target values and target errnos. */
3965 static abi_long do_sendto(int fd, abi_ulong msg, size_t len, int flags,
3966 abi_ulong target_addr, socklen_t addrlen)
3968 void *addr;
3969 void *host_msg;
3970 void *copy_msg = NULL;
3971 abi_long ret;
3973 if ((int)addrlen < 0) {
3974 return -TARGET_EINVAL;
3977 host_msg = lock_user(VERIFY_READ, msg, len, 1);
3978 if (!host_msg)
3979 return -TARGET_EFAULT;
3980 if (fd_trans_target_to_host_data(fd)) {
3981 copy_msg = host_msg;
3982 host_msg = g_malloc(len);
3983 memcpy(host_msg, copy_msg, len);
3984 ret = fd_trans_target_to_host_data(fd)(host_msg, len);
3985 if (ret < 0) {
3986 goto fail;
3989 if (target_addr) {
3990 addr = alloca(addrlen+1);
3991 ret = target_to_host_sockaddr(fd, addr, target_addr, addrlen);
3992 if (ret) {
3993 goto fail;
3995 ret = get_errno(safe_sendto(fd, host_msg, len, flags, addr, addrlen));
3996 } else {
3997 ret = get_errno(safe_sendto(fd, host_msg, len, flags, NULL, 0));
3999 fail:
4000 if (copy_msg) {
4001 g_free(host_msg);
4002 host_msg = copy_msg;
4004 unlock_user(host_msg, msg, 0);
4005 return ret;
4008 /* do_recvfrom() Must return target values and target errnos. */
4009 static abi_long do_recvfrom(int fd, abi_ulong msg, size_t len, int flags,
4010 abi_ulong target_addr,
4011 abi_ulong target_addrlen)
4013 socklen_t addrlen;
4014 void *addr;
4015 void *host_msg;
4016 abi_long ret;
4018 host_msg = lock_user(VERIFY_WRITE, msg, len, 0);
4019 if (!host_msg)
4020 return -TARGET_EFAULT;
4021 if (target_addr) {
4022 if (get_user_u32(addrlen, target_addrlen)) {
4023 ret = -TARGET_EFAULT;
4024 goto fail;
4026 if ((int)addrlen < 0) {
4027 ret = -TARGET_EINVAL;
4028 goto fail;
4030 addr = alloca(addrlen);
4031 ret = get_errno(safe_recvfrom(fd, host_msg, len, flags,
4032 addr, &addrlen));
4033 } else {
4034 addr = NULL; /* To keep compiler quiet. */
4035 ret = get_errno(safe_recvfrom(fd, host_msg, len, flags, NULL, 0));
4037 if (!is_error(ret)) {
4038 if (fd_trans_host_to_target_data(fd)) {
4039 ret = fd_trans_host_to_target_data(fd)(host_msg, ret);
4041 if (target_addr) {
4042 host_to_target_sockaddr(target_addr, addr, addrlen);
4043 if (put_user_u32(addrlen, target_addrlen)) {
4044 ret = -TARGET_EFAULT;
4045 goto fail;
4048 unlock_user(host_msg, msg, len);
4049 } else {
4050 fail:
4051 unlock_user(host_msg, msg, 0);
4053 return ret;
4056 #ifdef TARGET_NR_socketcall
4057 /* do_socketcall() must return target values and target errnos. */
4058 static abi_long do_socketcall(int num, abi_ulong vptr)
4060 static const unsigned nargs[] = { /* number of arguments per operation */
4061 [TARGET_SYS_SOCKET] = 3, /* domain, type, protocol */
4062 [TARGET_SYS_BIND] = 3, /* fd, addr, addrlen */
4063 [TARGET_SYS_CONNECT] = 3, /* fd, addr, addrlen */
4064 [TARGET_SYS_LISTEN] = 2, /* fd, backlog */
4065 [TARGET_SYS_ACCEPT] = 3, /* fd, addr, addrlen */
4066 [TARGET_SYS_GETSOCKNAME] = 3, /* fd, addr, addrlen */
4067 [TARGET_SYS_GETPEERNAME] = 3, /* fd, addr, addrlen */
4068 [TARGET_SYS_SOCKETPAIR] = 4, /* domain, type, protocol, tab */
4069 [TARGET_SYS_SEND] = 4, /* fd, msg, len, flags */
4070 [TARGET_SYS_RECV] = 4, /* fd, msg, len, flags */
4071 [TARGET_SYS_SENDTO] = 6, /* fd, msg, len, flags, addr, addrlen */
4072 [TARGET_SYS_RECVFROM] = 6, /* fd, msg, len, flags, addr, addrlen */
4073 [TARGET_SYS_SHUTDOWN] = 2, /* fd, how */
4074 [TARGET_SYS_SETSOCKOPT] = 5, /* fd, level, optname, optval, optlen */
4075 [TARGET_SYS_GETSOCKOPT] = 5, /* fd, level, optname, optval, optlen */
4076 [TARGET_SYS_SENDMSG] = 3, /* fd, msg, flags */
4077 [TARGET_SYS_RECVMSG] = 3, /* fd, msg, flags */
4078 [TARGET_SYS_ACCEPT4] = 4, /* fd, addr, addrlen, flags */
4079 [TARGET_SYS_RECVMMSG] = 4, /* fd, msgvec, vlen, flags */
4080 [TARGET_SYS_SENDMMSG] = 4, /* fd, msgvec, vlen, flags */
4082 abi_long a[6]; /* max 6 args */
4083 unsigned i;
4085 /* check the range of the first argument num */
4086 /* (TARGET_SYS_SENDMMSG is the highest among TARGET_SYS_xxx) */
4087 if (num < 1 || num > TARGET_SYS_SENDMMSG) {
4088 return -TARGET_EINVAL;
4090 /* ensure we have space for args */
4091 if (nargs[num] > ARRAY_SIZE(a)) {
4092 return -TARGET_EINVAL;
4094 /* collect the arguments in a[] according to nargs[] */
4095 for (i = 0; i < nargs[num]; ++i) {
4096 if (get_user_ual(a[i], vptr + i * sizeof(abi_long)) != 0) {
4097 return -TARGET_EFAULT;
4100 /* now when we have the args, invoke the appropriate underlying function */
4101 switch (num) {
4102 case TARGET_SYS_SOCKET: /* domain, type, protocol */
4103 return do_socket(a[0], a[1], a[2]);
4104 case TARGET_SYS_BIND: /* sockfd, addr, addrlen */
4105 return do_bind(a[0], a[1], a[2]);
4106 case TARGET_SYS_CONNECT: /* sockfd, addr, addrlen */
4107 return do_connect(a[0], a[1], a[2]);
4108 case TARGET_SYS_LISTEN: /* sockfd, backlog */
4109 return get_errno(listen(a[0], a[1]));
4110 case TARGET_SYS_ACCEPT: /* sockfd, addr, addrlen */
4111 return do_accept4(a[0], a[1], a[2], 0);
4112 case TARGET_SYS_GETSOCKNAME: /* sockfd, addr, addrlen */
4113 return do_getsockname(a[0], a[1], a[2]);
4114 case TARGET_SYS_GETPEERNAME: /* sockfd, addr, addrlen */
4115 return do_getpeername(a[0], a[1], a[2]);
4116 case TARGET_SYS_SOCKETPAIR: /* domain, type, protocol, tab */
4117 return do_socketpair(a[0], a[1], a[2], a[3]);
4118 case TARGET_SYS_SEND: /* sockfd, msg, len, flags */
4119 return do_sendto(a[0], a[1], a[2], a[3], 0, 0);
4120 case TARGET_SYS_RECV: /* sockfd, msg, len, flags */
4121 return do_recvfrom(a[0], a[1], a[2], a[3], 0, 0);
4122 case TARGET_SYS_SENDTO: /* sockfd, msg, len, flags, addr, addrlen */
4123 return do_sendto(a[0], a[1], a[2], a[3], a[4], a[5]);
4124 case TARGET_SYS_RECVFROM: /* sockfd, msg, len, flags, addr, addrlen */
4125 return do_recvfrom(a[0], a[1], a[2], a[3], a[4], a[5]);
4126 case TARGET_SYS_SHUTDOWN: /* sockfd, how */
4127 return get_errno(shutdown(a[0], a[1]));
4128 case TARGET_SYS_SETSOCKOPT: /* sockfd, level, optname, optval, optlen */
4129 return do_setsockopt(a[0], a[1], a[2], a[3], a[4]);
4130 case TARGET_SYS_GETSOCKOPT: /* sockfd, level, optname, optval, optlen */
4131 return do_getsockopt(a[0], a[1], a[2], a[3], a[4]);
4132 case TARGET_SYS_SENDMSG: /* sockfd, msg, flags */
4133 return do_sendrecvmsg(a[0], a[1], a[2], 1);
4134 case TARGET_SYS_RECVMSG: /* sockfd, msg, flags */
4135 return do_sendrecvmsg(a[0], a[1], a[2], 0);
4136 case TARGET_SYS_ACCEPT4: /* sockfd, addr, addrlen, flags */
4137 return do_accept4(a[0], a[1], a[2], a[3]);
4138 case TARGET_SYS_RECVMMSG: /* sockfd, msgvec, vlen, flags */
4139 return do_sendrecvmmsg(a[0], a[1], a[2], a[3], 0);
4140 case TARGET_SYS_SENDMMSG: /* sockfd, msgvec, vlen, flags */
4141 return do_sendrecvmmsg(a[0], a[1], a[2], a[3], 1);
4142 default:
4143 gemu_log("Unsupported socketcall: %d\n", num);
4144 return -TARGET_EINVAL;
4147 #endif
4149 #define N_SHM_REGIONS 32
4151 static struct shm_region {
4152 abi_ulong start;
4153 abi_ulong size;
4154 bool in_use;
4155 } shm_regions[N_SHM_REGIONS];
4157 #ifndef TARGET_SEMID64_DS
4158 /* asm-generic version of this struct */
4159 struct target_semid64_ds
4161 struct target_ipc_perm sem_perm;
4162 abi_ulong sem_otime;
4163 #if TARGET_ABI_BITS == 32
4164 abi_ulong __unused1;
4165 #endif
4166 abi_ulong sem_ctime;
4167 #if TARGET_ABI_BITS == 32
4168 abi_ulong __unused2;
4169 #endif
4170 abi_ulong sem_nsems;
4171 abi_ulong __unused3;
4172 abi_ulong __unused4;
4174 #endif
4176 static inline abi_long target_to_host_ipc_perm(struct ipc_perm *host_ip,
4177 abi_ulong target_addr)
4179 struct target_ipc_perm *target_ip;
4180 struct target_semid64_ds *target_sd;
4182 if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
4183 return -TARGET_EFAULT;
4184 target_ip = &(target_sd->sem_perm);
4185 host_ip->__key = tswap32(target_ip->__key);
4186 host_ip->uid = tswap32(target_ip->uid);
4187 host_ip->gid = tswap32(target_ip->gid);
4188 host_ip->cuid = tswap32(target_ip->cuid);
4189 host_ip->cgid = tswap32(target_ip->cgid);
4190 #if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_PPC)
4191 host_ip->mode = tswap32(target_ip->mode);
4192 #else
4193 host_ip->mode = tswap16(target_ip->mode);
4194 #endif
4195 #if defined(TARGET_PPC)
4196 host_ip->__seq = tswap32(target_ip->__seq);
4197 #else
4198 host_ip->__seq = tswap16(target_ip->__seq);
4199 #endif
4200 unlock_user_struct(target_sd, target_addr, 0);
4201 return 0;
4204 static inline abi_long host_to_target_ipc_perm(abi_ulong target_addr,
4205 struct ipc_perm *host_ip)
4207 struct target_ipc_perm *target_ip;
4208 struct target_semid64_ds *target_sd;
4210 if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
4211 return -TARGET_EFAULT;
4212 target_ip = &(target_sd->sem_perm);
4213 target_ip->__key = tswap32(host_ip->__key);
4214 target_ip->uid = tswap32(host_ip->uid);
4215 target_ip->gid = tswap32(host_ip->gid);
4216 target_ip->cuid = tswap32(host_ip->cuid);
4217 target_ip->cgid = tswap32(host_ip->cgid);
4218 #if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_PPC)
4219 target_ip->mode = tswap32(host_ip->mode);
4220 #else
4221 target_ip->mode = tswap16(host_ip->mode);
4222 #endif
4223 #if defined(TARGET_PPC)
4224 target_ip->__seq = tswap32(host_ip->__seq);
4225 #else
4226 target_ip->__seq = tswap16(host_ip->__seq);
4227 #endif
4228 unlock_user_struct(target_sd, target_addr, 1);
4229 return 0;
4232 static inline abi_long target_to_host_semid_ds(struct semid_ds *host_sd,
4233 abi_ulong target_addr)
4235 struct target_semid64_ds *target_sd;
4237 if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
4238 return -TARGET_EFAULT;
4239 if (target_to_host_ipc_perm(&(host_sd->sem_perm),target_addr))
4240 return -TARGET_EFAULT;
4241 host_sd->sem_nsems = tswapal(target_sd->sem_nsems);
4242 host_sd->sem_otime = tswapal(target_sd->sem_otime);
4243 host_sd->sem_ctime = tswapal(target_sd->sem_ctime);
4244 unlock_user_struct(target_sd, target_addr, 0);
4245 return 0;
4248 static inline abi_long host_to_target_semid_ds(abi_ulong target_addr,
4249 struct semid_ds *host_sd)
4251 struct target_semid64_ds *target_sd;
4253 if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
4254 return -TARGET_EFAULT;
4255 if (host_to_target_ipc_perm(target_addr,&(host_sd->sem_perm)))
4256 return -TARGET_EFAULT;
4257 target_sd->sem_nsems = tswapal(host_sd->sem_nsems);
4258 target_sd->sem_otime = tswapal(host_sd->sem_otime);
4259 target_sd->sem_ctime = tswapal(host_sd->sem_ctime);
4260 unlock_user_struct(target_sd, target_addr, 1);
4261 return 0;
4264 struct target_seminfo {
4265 int semmap;
4266 int semmni;
4267 int semmns;
4268 int semmnu;
4269 int semmsl;
4270 int semopm;
4271 int semume;
4272 int semusz;
4273 int semvmx;
4274 int semaem;
4277 static inline abi_long host_to_target_seminfo(abi_ulong target_addr,
4278 struct seminfo *host_seminfo)
4280 struct target_seminfo *target_seminfo;
4281 if (!lock_user_struct(VERIFY_WRITE, target_seminfo, target_addr, 0))
4282 return -TARGET_EFAULT;
4283 __put_user(host_seminfo->semmap, &target_seminfo->semmap);
4284 __put_user(host_seminfo->semmni, &target_seminfo->semmni);
4285 __put_user(host_seminfo->semmns, &target_seminfo->semmns);
4286 __put_user(host_seminfo->semmnu, &target_seminfo->semmnu);
4287 __put_user(host_seminfo->semmsl, &target_seminfo->semmsl);
4288 __put_user(host_seminfo->semopm, &target_seminfo->semopm);
4289 __put_user(host_seminfo->semume, &target_seminfo->semume);
4290 __put_user(host_seminfo->semusz, &target_seminfo->semusz);
4291 __put_user(host_seminfo->semvmx, &target_seminfo->semvmx);
4292 __put_user(host_seminfo->semaem, &target_seminfo->semaem);
4293 unlock_user_struct(target_seminfo, target_addr, 1);
4294 return 0;
4297 union semun {
4298 int val;
4299 struct semid_ds *buf;
4300 unsigned short *array;
4301 struct seminfo *__buf;
4304 union target_semun {
4305 int val;
4306 abi_ulong buf;
4307 abi_ulong array;
4308 abi_ulong __buf;
4311 static inline abi_long target_to_host_semarray(int semid, unsigned short **host_array,
4312 abi_ulong target_addr)
4314 int nsems;
4315 unsigned short *array;
4316 union semun semun;
4317 struct semid_ds semid_ds;
4318 int i, ret;
4320 semun.buf = &semid_ds;
4322 ret = semctl(semid, 0, IPC_STAT, semun);
4323 if (ret == -1)
4324 return get_errno(ret);
4326 nsems = semid_ds.sem_nsems;
4328 *host_array = g_try_new(unsigned short, nsems);
4329 if (!*host_array) {
4330 return -TARGET_ENOMEM;
4332 array = lock_user(VERIFY_READ, target_addr,
4333 nsems*sizeof(unsigned short), 1);
4334 if (!array) {
4335 g_free(*host_array);
4336 return -TARGET_EFAULT;
4339 for(i=0; i<nsems; i++) {
4340 __get_user((*host_array)[i], &array[i]);
4342 unlock_user(array, target_addr, 0);
4344 return 0;
4347 static inline abi_long host_to_target_semarray(int semid, abi_ulong target_addr,
4348 unsigned short **host_array)
4350 int nsems;
4351 unsigned short *array;
4352 union semun semun;
4353 struct semid_ds semid_ds;
4354 int i, ret;
4356 semun.buf = &semid_ds;
4358 ret = semctl(semid, 0, IPC_STAT, semun);
4359 if (ret == -1)
4360 return get_errno(ret);
4362 nsems = semid_ds.sem_nsems;
4364 array = lock_user(VERIFY_WRITE, target_addr,
4365 nsems*sizeof(unsigned short), 0);
4366 if (!array)
4367 return -TARGET_EFAULT;
4369 for(i=0; i<nsems; i++) {
4370 __put_user((*host_array)[i], &array[i]);
4372 g_free(*host_array);
4373 unlock_user(array, target_addr, 1);
4375 return 0;
4378 static inline abi_long do_semctl(int semid, int semnum, int cmd,
4379 abi_ulong target_arg)
4381 union target_semun target_su = { .buf = target_arg };
4382 union semun arg;
4383 struct semid_ds dsarg;
4384 unsigned short *array = NULL;
4385 struct seminfo seminfo;
4386 abi_long ret = -TARGET_EINVAL;
4387 abi_long err;
4388 cmd &= 0xff;
4390 switch( cmd ) {
4391 case GETVAL:
4392 case SETVAL:
4393 /* In 64 bit cross-endian situations, we will erroneously pick up
4394 * the wrong half of the union for the "val" element. To rectify
4395 * this, the entire 8-byte structure is byteswapped, followed by
4396 * a swap of the 4 byte val field. In other cases, the data is
4397 * already in proper host byte order. */
4398 if (sizeof(target_su.val) != (sizeof(target_su.buf))) {
4399 target_su.buf = tswapal(target_su.buf);
4400 arg.val = tswap32(target_su.val);
4401 } else {
4402 arg.val = target_su.val;
4404 ret = get_errno(semctl(semid, semnum, cmd, arg));
4405 break;
4406 case GETALL:
4407 case SETALL:
4408 err = target_to_host_semarray(semid, &array, target_su.array);
4409 if (err)
4410 return err;
4411 arg.array = array;
4412 ret = get_errno(semctl(semid, semnum, cmd, arg));
4413 err = host_to_target_semarray(semid, target_su.array, &array);
4414 if (err)
4415 return err;
4416 break;
4417 case IPC_STAT:
4418 case IPC_SET:
4419 case SEM_STAT:
4420 err = target_to_host_semid_ds(&dsarg, target_su.buf);
4421 if (err)
4422 return err;
4423 arg.buf = &dsarg;
4424 ret = get_errno(semctl(semid, semnum, cmd, arg));
4425 err = host_to_target_semid_ds(target_su.buf, &dsarg);
4426 if (err)
4427 return err;
4428 break;
4429 case IPC_INFO:
4430 case SEM_INFO:
4431 arg.__buf = &seminfo;
4432 ret = get_errno(semctl(semid, semnum, cmd, arg));
4433 err = host_to_target_seminfo(target_su.__buf, &seminfo);
4434 if (err)
4435 return err;
4436 break;
4437 case IPC_RMID:
4438 case GETPID:
4439 case GETNCNT:
4440 case GETZCNT:
4441 ret = get_errno(semctl(semid, semnum, cmd, NULL));
4442 break;
4445 return ret;
4448 struct target_sembuf {
4449 unsigned short sem_num;
4450 short sem_op;
4451 short sem_flg;
4454 static inline abi_long target_to_host_sembuf(struct sembuf *host_sembuf,
4455 abi_ulong target_addr,
4456 unsigned nsops)
4458 struct target_sembuf *target_sembuf;
4459 int i;
4461 target_sembuf = lock_user(VERIFY_READ, target_addr,
4462 nsops*sizeof(struct target_sembuf), 1);
4463 if (!target_sembuf)
4464 return -TARGET_EFAULT;
4466 for(i=0; i<nsops; i++) {
4467 __get_user(host_sembuf[i].sem_num, &target_sembuf[i].sem_num);
4468 __get_user(host_sembuf[i].sem_op, &target_sembuf[i].sem_op);
4469 __get_user(host_sembuf[i].sem_flg, &target_sembuf[i].sem_flg);
4472 unlock_user(target_sembuf, target_addr, 0);
4474 return 0;
4477 static inline abi_long do_semop(int semid, abi_long ptr, unsigned nsops)
4479 struct sembuf sops[nsops];
4481 if (target_to_host_sembuf(sops, ptr, nsops))
4482 return -TARGET_EFAULT;
4484 return get_errno(safe_semtimedop(semid, sops, nsops, NULL));
4487 struct target_msqid_ds
4489 struct target_ipc_perm msg_perm;
4490 abi_ulong msg_stime;
4491 #if TARGET_ABI_BITS == 32
4492 abi_ulong __unused1;
4493 #endif
4494 abi_ulong msg_rtime;
4495 #if TARGET_ABI_BITS == 32
4496 abi_ulong __unused2;
4497 #endif
4498 abi_ulong msg_ctime;
4499 #if TARGET_ABI_BITS == 32
4500 abi_ulong __unused3;
4501 #endif
4502 abi_ulong __msg_cbytes;
4503 abi_ulong msg_qnum;
4504 abi_ulong msg_qbytes;
4505 abi_ulong msg_lspid;
4506 abi_ulong msg_lrpid;
4507 abi_ulong __unused4;
4508 abi_ulong __unused5;
4511 static inline abi_long target_to_host_msqid_ds(struct msqid_ds *host_md,
4512 abi_ulong target_addr)
4514 struct target_msqid_ds *target_md;
4516 if (!lock_user_struct(VERIFY_READ, target_md, target_addr, 1))
4517 return -TARGET_EFAULT;
4518 if (target_to_host_ipc_perm(&(host_md->msg_perm),target_addr))
4519 return -TARGET_EFAULT;
4520 host_md->msg_stime = tswapal(target_md->msg_stime);
4521 host_md->msg_rtime = tswapal(target_md->msg_rtime);
4522 host_md->msg_ctime = tswapal(target_md->msg_ctime);
4523 host_md->__msg_cbytes = tswapal(target_md->__msg_cbytes);
4524 host_md->msg_qnum = tswapal(target_md->msg_qnum);
4525 host_md->msg_qbytes = tswapal(target_md->msg_qbytes);
4526 host_md->msg_lspid = tswapal(target_md->msg_lspid);
4527 host_md->msg_lrpid = tswapal(target_md->msg_lrpid);
4528 unlock_user_struct(target_md, target_addr, 0);
4529 return 0;
4532 static inline abi_long host_to_target_msqid_ds(abi_ulong target_addr,
4533 struct msqid_ds *host_md)
4535 struct target_msqid_ds *target_md;
4537 if (!lock_user_struct(VERIFY_WRITE, target_md, target_addr, 0))
4538 return -TARGET_EFAULT;
4539 if (host_to_target_ipc_perm(target_addr,&(host_md->msg_perm)))
4540 return -TARGET_EFAULT;
4541 target_md->msg_stime = tswapal(host_md->msg_stime);
4542 target_md->msg_rtime = tswapal(host_md->msg_rtime);
4543 target_md->msg_ctime = tswapal(host_md->msg_ctime);
4544 target_md->__msg_cbytes = tswapal(host_md->__msg_cbytes);
4545 target_md->msg_qnum = tswapal(host_md->msg_qnum);
4546 target_md->msg_qbytes = tswapal(host_md->msg_qbytes);
4547 target_md->msg_lspid = tswapal(host_md->msg_lspid);
4548 target_md->msg_lrpid = tswapal(host_md->msg_lrpid);
4549 unlock_user_struct(target_md, target_addr, 1);
4550 return 0;
4553 struct target_msginfo {
4554 int msgpool;
4555 int msgmap;
4556 int msgmax;
4557 int msgmnb;
4558 int msgmni;
4559 int msgssz;
4560 int msgtql;
4561 unsigned short int msgseg;
4564 static inline abi_long host_to_target_msginfo(abi_ulong target_addr,
4565 struct msginfo *host_msginfo)
4567 struct target_msginfo *target_msginfo;
4568 if (!lock_user_struct(VERIFY_WRITE, target_msginfo, target_addr, 0))
4569 return -TARGET_EFAULT;
4570 __put_user(host_msginfo->msgpool, &target_msginfo->msgpool);
4571 __put_user(host_msginfo->msgmap, &target_msginfo->msgmap);
4572 __put_user(host_msginfo->msgmax, &target_msginfo->msgmax);
4573 __put_user(host_msginfo->msgmnb, &target_msginfo->msgmnb);
4574 __put_user(host_msginfo->msgmni, &target_msginfo->msgmni);
4575 __put_user(host_msginfo->msgssz, &target_msginfo->msgssz);
4576 __put_user(host_msginfo->msgtql, &target_msginfo->msgtql);
4577 __put_user(host_msginfo->msgseg, &target_msginfo->msgseg);
4578 unlock_user_struct(target_msginfo, target_addr, 1);
4579 return 0;
4582 static inline abi_long do_msgctl(int msgid, int cmd, abi_long ptr)
4584 struct msqid_ds dsarg;
4585 struct msginfo msginfo;
4586 abi_long ret = -TARGET_EINVAL;
4588 cmd &= 0xff;
4590 switch (cmd) {
4591 case IPC_STAT:
4592 case IPC_SET:
4593 case MSG_STAT:
4594 if (target_to_host_msqid_ds(&dsarg,ptr))
4595 return -TARGET_EFAULT;
4596 ret = get_errno(msgctl(msgid, cmd, &dsarg));
4597 if (host_to_target_msqid_ds(ptr,&dsarg))
4598 return -TARGET_EFAULT;
4599 break;
4600 case IPC_RMID:
4601 ret = get_errno(msgctl(msgid, cmd, NULL));
4602 break;
4603 case IPC_INFO:
4604 case MSG_INFO:
4605 ret = get_errno(msgctl(msgid, cmd, (struct msqid_ds *)&msginfo));
4606 if (host_to_target_msginfo(ptr, &msginfo))
4607 return -TARGET_EFAULT;
4608 break;
4611 return ret;
4614 struct target_msgbuf {
4615 abi_long mtype;
4616 char mtext[1];
4619 static inline abi_long do_msgsnd(int msqid, abi_long msgp,
4620 ssize_t msgsz, int msgflg)
4622 struct target_msgbuf *target_mb;
4623 struct msgbuf *host_mb;
4624 abi_long ret = 0;
4626 if (msgsz < 0) {
4627 return -TARGET_EINVAL;
4630 if (!lock_user_struct(VERIFY_READ, target_mb, msgp, 0))
4631 return -TARGET_EFAULT;
4632 host_mb = g_try_malloc(msgsz + sizeof(long));
4633 if (!host_mb) {
4634 unlock_user_struct(target_mb, msgp, 0);
4635 return -TARGET_ENOMEM;
4637 host_mb->mtype = (abi_long) tswapal(target_mb->mtype);
4638 memcpy(host_mb->mtext, target_mb->mtext, msgsz);
4639 ret = get_errno(safe_msgsnd(msqid, host_mb, msgsz, msgflg));
4640 g_free(host_mb);
4641 unlock_user_struct(target_mb, msgp, 0);
4643 return ret;
4646 static inline abi_long do_msgrcv(int msqid, abi_long msgp,
4647 ssize_t msgsz, abi_long msgtyp,
4648 int msgflg)
4650 struct target_msgbuf *target_mb;
4651 char *target_mtext;
4652 struct msgbuf *host_mb;
4653 abi_long ret = 0;
4655 if (msgsz < 0) {
4656 return -TARGET_EINVAL;
4659 if (!lock_user_struct(VERIFY_WRITE, target_mb, msgp, 0))
4660 return -TARGET_EFAULT;
4662 host_mb = g_try_malloc(msgsz + sizeof(long));
4663 if (!host_mb) {
4664 ret = -TARGET_ENOMEM;
4665 goto end;
4667 ret = get_errno(safe_msgrcv(msqid, host_mb, msgsz, msgtyp, msgflg));
4669 if (ret > 0) {
4670 abi_ulong target_mtext_addr = msgp + sizeof(abi_ulong);
4671 target_mtext = lock_user(VERIFY_WRITE, target_mtext_addr, ret, 0);
4672 if (!target_mtext) {
4673 ret = -TARGET_EFAULT;
4674 goto end;
4676 memcpy(target_mb->mtext, host_mb->mtext, ret);
4677 unlock_user(target_mtext, target_mtext_addr, ret);
4680 target_mb->mtype = tswapal(host_mb->mtype);
4682 end:
4683 if (target_mb)
4684 unlock_user_struct(target_mb, msgp, 1);
4685 g_free(host_mb);
4686 return ret;
4689 static inline abi_long target_to_host_shmid_ds(struct shmid_ds *host_sd,
4690 abi_ulong target_addr)
4692 struct target_shmid_ds *target_sd;
4694 if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
4695 return -TARGET_EFAULT;
4696 if (target_to_host_ipc_perm(&(host_sd->shm_perm), target_addr))
4697 return -TARGET_EFAULT;
4698 __get_user(host_sd->shm_segsz, &target_sd->shm_segsz);
4699 __get_user(host_sd->shm_atime, &target_sd->shm_atime);
4700 __get_user(host_sd->shm_dtime, &target_sd->shm_dtime);
4701 __get_user(host_sd->shm_ctime, &target_sd->shm_ctime);
4702 __get_user(host_sd->shm_cpid, &target_sd->shm_cpid);
4703 __get_user(host_sd->shm_lpid, &target_sd->shm_lpid);
4704 __get_user(host_sd->shm_nattch, &target_sd->shm_nattch);
4705 unlock_user_struct(target_sd, target_addr, 0);
4706 return 0;
4709 static inline abi_long host_to_target_shmid_ds(abi_ulong target_addr,
4710 struct shmid_ds *host_sd)
4712 struct target_shmid_ds *target_sd;
4714 if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
4715 return -TARGET_EFAULT;
4716 if (host_to_target_ipc_perm(target_addr, &(host_sd->shm_perm)))
4717 return -TARGET_EFAULT;
4718 __put_user(host_sd->shm_segsz, &target_sd->shm_segsz);
4719 __put_user(host_sd->shm_atime, &target_sd->shm_atime);
4720 __put_user(host_sd->shm_dtime, &target_sd->shm_dtime);
4721 __put_user(host_sd->shm_ctime, &target_sd->shm_ctime);
4722 __put_user(host_sd->shm_cpid, &target_sd->shm_cpid);
4723 __put_user(host_sd->shm_lpid, &target_sd->shm_lpid);
4724 __put_user(host_sd->shm_nattch, &target_sd->shm_nattch);
4725 unlock_user_struct(target_sd, target_addr, 1);
4726 return 0;
4729 struct target_shminfo {
4730 abi_ulong shmmax;
4731 abi_ulong shmmin;
4732 abi_ulong shmmni;
4733 abi_ulong shmseg;
4734 abi_ulong shmall;
4737 static inline abi_long host_to_target_shminfo(abi_ulong target_addr,
4738 struct shminfo *host_shminfo)
4740 struct target_shminfo *target_shminfo;
4741 if (!lock_user_struct(VERIFY_WRITE, target_shminfo, target_addr, 0))
4742 return -TARGET_EFAULT;
4743 __put_user(host_shminfo->shmmax, &target_shminfo->shmmax);
4744 __put_user(host_shminfo->shmmin, &target_shminfo->shmmin);
4745 __put_user(host_shminfo->shmmni, &target_shminfo->shmmni);
4746 __put_user(host_shminfo->shmseg, &target_shminfo->shmseg);
4747 __put_user(host_shminfo->shmall, &target_shminfo->shmall);
4748 unlock_user_struct(target_shminfo, target_addr, 1);
4749 return 0;
4752 struct target_shm_info {
4753 int used_ids;
4754 abi_ulong shm_tot;
4755 abi_ulong shm_rss;
4756 abi_ulong shm_swp;
4757 abi_ulong swap_attempts;
4758 abi_ulong swap_successes;
4761 static inline abi_long host_to_target_shm_info(abi_ulong target_addr,
4762 struct shm_info *host_shm_info)
4764 struct target_shm_info *target_shm_info;
4765 if (!lock_user_struct(VERIFY_WRITE, target_shm_info, target_addr, 0))
4766 return -TARGET_EFAULT;
4767 __put_user(host_shm_info->used_ids, &target_shm_info->used_ids);
4768 __put_user(host_shm_info->shm_tot, &target_shm_info->shm_tot);
4769 __put_user(host_shm_info->shm_rss, &target_shm_info->shm_rss);
4770 __put_user(host_shm_info->shm_swp, &target_shm_info->shm_swp);
4771 __put_user(host_shm_info->swap_attempts, &target_shm_info->swap_attempts);
4772 __put_user(host_shm_info->swap_successes, &target_shm_info->swap_successes);
4773 unlock_user_struct(target_shm_info, target_addr, 1);
4774 return 0;
4777 static inline abi_long do_shmctl(int shmid, int cmd, abi_long buf)
4779 struct shmid_ds dsarg;
4780 struct shminfo shminfo;
4781 struct shm_info shm_info;
4782 abi_long ret = -TARGET_EINVAL;
4784 cmd &= 0xff;
4786 switch(cmd) {
4787 case IPC_STAT:
4788 case IPC_SET:
4789 case SHM_STAT:
4790 if (target_to_host_shmid_ds(&dsarg, buf))
4791 return -TARGET_EFAULT;
4792 ret = get_errno(shmctl(shmid, cmd, &dsarg));
4793 if (host_to_target_shmid_ds(buf, &dsarg))
4794 return -TARGET_EFAULT;
4795 break;
4796 case IPC_INFO:
4797 ret = get_errno(shmctl(shmid, cmd, (struct shmid_ds *)&shminfo));
4798 if (host_to_target_shminfo(buf, &shminfo))
4799 return -TARGET_EFAULT;
4800 break;
4801 case SHM_INFO:
4802 ret = get_errno(shmctl(shmid, cmd, (struct shmid_ds *)&shm_info));
4803 if (host_to_target_shm_info(buf, &shm_info))
4804 return -TARGET_EFAULT;
4805 break;
4806 case IPC_RMID:
4807 case SHM_LOCK:
4808 case SHM_UNLOCK:
4809 ret = get_errno(shmctl(shmid, cmd, NULL));
4810 break;
4813 return ret;
4816 #ifndef TARGET_FORCE_SHMLBA
4817 /* For most architectures, SHMLBA is the same as the page size;
4818 * some architectures have larger values, in which case they should
4819 * define TARGET_FORCE_SHMLBA and provide a target_shmlba() function.
4820 * This corresponds to the kernel arch code defining __ARCH_FORCE_SHMLBA
4821 * and defining its own value for SHMLBA.
4823 * The kernel also permits SHMLBA to be set by the architecture to a
4824 * value larger than the page size without setting __ARCH_FORCE_SHMLBA;
4825 * this means that addresses are rounded to the large size if
4826 * SHM_RND is set but addresses not aligned to that size are not rejected
4827 * as long as they are at least page-aligned. Since the only architecture
4828 * which uses this is ia64 this code doesn't provide for that oddity.
4830 static inline abi_ulong target_shmlba(CPUArchState *cpu_env)
4832 return TARGET_PAGE_SIZE;
4834 #endif
4836 static inline abi_ulong do_shmat(CPUArchState *cpu_env,
4837 int shmid, abi_ulong shmaddr, int shmflg)
4839 abi_long raddr;
4840 void *host_raddr;
4841 struct shmid_ds shm_info;
4842 int i,ret;
4843 abi_ulong shmlba;
4845 /* find out the length of the shared memory segment */
4846 ret = get_errno(shmctl(shmid, IPC_STAT, &shm_info));
4847 if (is_error(ret)) {
4848 /* can't get length, bail out */
4849 return ret;
4852 shmlba = target_shmlba(cpu_env);
4854 if (shmaddr & (shmlba - 1)) {
4855 if (shmflg & SHM_RND) {
4856 shmaddr &= ~(shmlba - 1);
4857 } else {
4858 return -TARGET_EINVAL;
4862 mmap_lock();
4864 if (shmaddr)
4865 host_raddr = shmat(shmid, (void *)g2h(shmaddr), shmflg);
4866 else {
4867 abi_ulong mmap_start;
4869 mmap_start = mmap_find_vma(0, shm_info.shm_segsz);
4871 if (mmap_start == -1) {
4872 errno = ENOMEM;
4873 host_raddr = (void *)-1;
4874 } else
4875 host_raddr = shmat(shmid, g2h(mmap_start), shmflg | SHM_REMAP);
4878 if (host_raddr == (void *)-1) {
4879 mmap_unlock();
4880 return get_errno((long)host_raddr);
4882 raddr=h2g((unsigned long)host_raddr);
4884 page_set_flags(raddr, raddr + shm_info.shm_segsz,
4885 PAGE_VALID | PAGE_READ |
4886 ((shmflg & SHM_RDONLY)? 0 : PAGE_WRITE));
4888 for (i = 0; i < N_SHM_REGIONS; i++) {
4889 if (!shm_regions[i].in_use) {
4890 shm_regions[i].in_use = true;
4891 shm_regions[i].start = raddr;
4892 shm_regions[i].size = shm_info.shm_segsz;
4893 break;
4897 mmap_unlock();
4898 return raddr;
4902 static inline abi_long do_shmdt(abi_ulong shmaddr)
4904 int i;
4906 for (i = 0; i < N_SHM_REGIONS; ++i) {
4907 if (shm_regions[i].in_use && shm_regions[i].start == shmaddr) {
4908 shm_regions[i].in_use = false;
4909 page_set_flags(shmaddr, shmaddr + shm_regions[i].size, 0);
4910 break;
4914 return get_errno(shmdt(g2h(shmaddr)));
4917 #ifdef TARGET_NR_ipc
4918 /* ??? This only works with linear mappings. */
4919 /* do_ipc() must return target values and target errnos. */
4920 static abi_long do_ipc(CPUArchState *cpu_env,
4921 unsigned int call, abi_long first,
4922 abi_long second, abi_long third,
4923 abi_long ptr, abi_long fifth)
4925 int version;
4926 abi_long ret = 0;
4928 version = call >> 16;
4929 call &= 0xffff;
4931 switch (call) {
4932 case IPCOP_semop:
4933 ret = do_semop(first, ptr, second);
4934 break;
4936 case IPCOP_semget:
4937 ret = get_errno(semget(first, second, third));
4938 break;
4940 case IPCOP_semctl: {
4941 /* The semun argument to semctl is passed by value, so dereference the
4942 * ptr argument. */
4943 abi_ulong atptr;
4944 get_user_ual(atptr, ptr);
4945 ret = do_semctl(first, second, third, atptr);
4946 break;
4949 case IPCOP_msgget:
4950 ret = get_errno(msgget(first, second));
4951 break;
4953 case IPCOP_msgsnd:
4954 ret = do_msgsnd(first, ptr, second, third);
4955 break;
4957 case IPCOP_msgctl:
4958 ret = do_msgctl(first, second, ptr);
4959 break;
4961 case IPCOP_msgrcv:
4962 switch (version) {
4963 case 0:
4965 struct target_ipc_kludge {
4966 abi_long msgp;
4967 abi_long msgtyp;
4968 } *tmp;
4970 if (!lock_user_struct(VERIFY_READ, tmp, ptr, 1)) {
4971 ret = -TARGET_EFAULT;
4972 break;
4975 ret = do_msgrcv(first, tswapal(tmp->msgp), second, tswapal(tmp->msgtyp), third);
4977 unlock_user_struct(tmp, ptr, 0);
4978 break;
4980 default:
4981 ret = do_msgrcv(first, ptr, second, fifth, third);
4983 break;
4985 case IPCOP_shmat:
4986 switch (version) {
4987 default:
4989 abi_ulong raddr;
4990 raddr = do_shmat(cpu_env, first, ptr, second);
4991 if (is_error(raddr))
4992 return get_errno(raddr);
4993 if (put_user_ual(raddr, third))
4994 return -TARGET_EFAULT;
4995 break;
4997 case 1:
4998 ret = -TARGET_EINVAL;
4999 break;
5001 break;
5002 case IPCOP_shmdt:
5003 ret = do_shmdt(ptr);
5004 break;
5006 case IPCOP_shmget:
5007 /* IPC_* flag values are the same on all linux platforms */
5008 ret = get_errno(shmget(first, second, third));
5009 break;
5011 /* IPC_* and SHM_* command values are the same on all linux platforms */
5012 case IPCOP_shmctl:
5013 ret = do_shmctl(first, second, ptr);
5014 break;
5015 default:
5016 gemu_log("Unsupported ipc call: %d (version %d)\n", call, version);
5017 ret = -TARGET_ENOSYS;
5018 break;
5020 return ret;
5022 #endif
5024 /* kernel structure types definitions */
5026 #define STRUCT(name, ...) STRUCT_ ## name,
5027 #define STRUCT_SPECIAL(name) STRUCT_ ## name,
5028 enum {
5029 #include "syscall_types.h"
5030 STRUCT_MAX
5032 #undef STRUCT
5033 #undef STRUCT_SPECIAL
5035 #define STRUCT(name, ...) static const argtype struct_ ## name ## _def[] = { __VA_ARGS__, TYPE_NULL };
5036 #define STRUCT_SPECIAL(name)
5037 #include "syscall_types.h"
5038 #undef STRUCT
5039 #undef STRUCT_SPECIAL
5041 typedef struct IOCTLEntry IOCTLEntry;
5043 typedef abi_long do_ioctl_fn(const IOCTLEntry *ie, uint8_t *buf_temp,
5044 int fd, int cmd, abi_long arg);
5046 struct IOCTLEntry {
5047 int target_cmd;
5048 unsigned int host_cmd;
5049 const char *name;
5050 int access;
5051 do_ioctl_fn *do_ioctl;
5052 const argtype arg_type[5];
5055 #define IOC_R 0x0001
5056 #define IOC_W 0x0002
5057 #define IOC_RW (IOC_R | IOC_W)
5059 #define MAX_STRUCT_SIZE 4096
5061 #ifdef CONFIG_FIEMAP
5062 /* So fiemap access checks don't overflow on 32 bit systems.
5063 * This is very slightly smaller than the limit imposed by
5064 * the underlying kernel.
5066 #define FIEMAP_MAX_EXTENTS ((UINT_MAX - sizeof(struct fiemap)) \
5067 / sizeof(struct fiemap_extent))
5069 static abi_long do_ioctl_fs_ioc_fiemap(const IOCTLEntry *ie, uint8_t *buf_temp,
5070 int fd, int cmd, abi_long arg)
5072 /* The parameter for this ioctl is a struct fiemap followed
5073 * by an array of struct fiemap_extent whose size is set
5074 * in fiemap->fm_extent_count. The array is filled in by the
5075 * ioctl.
5077 int target_size_in, target_size_out;
5078 struct fiemap *fm;
5079 const argtype *arg_type = ie->arg_type;
5080 const argtype extent_arg_type[] = { MK_STRUCT(STRUCT_fiemap_extent) };
5081 void *argptr, *p;
5082 abi_long ret;
5083 int i, extent_size = thunk_type_size(extent_arg_type, 0);
5084 uint32_t outbufsz;
5085 int free_fm = 0;
5087 assert(arg_type[0] == TYPE_PTR);
5088 assert(ie->access == IOC_RW);
5089 arg_type++;
5090 target_size_in = thunk_type_size(arg_type, 0);
5091 argptr = lock_user(VERIFY_READ, arg, target_size_in, 1);
5092 if (!argptr) {
5093 return -TARGET_EFAULT;
5095 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
5096 unlock_user(argptr, arg, 0);
5097 fm = (struct fiemap *)buf_temp;
5098 if (fm->fm_extent_count > FIEMAP_MAX_EXTENTS) {
5099 return -TARGET_EINVAL;
5102 outbufsz = sizeof (*fm) +
5103 (sizeof(struct fiemap_extent) * fm->fm_extent_count);
5105 if (outbufsz > MAX_STRUCT_SIZE) {
5106 /* We can't fit all the extents into the fixed size buffer.
5107 * Allocate one that is large enough and use it instead.
5109 fm = g_try_malloc(outbufsz);
5110 if (!fm) {
5111 return -TARGET_ENOMEM;
5113 memcpy(fm, buf_temp, sizeof(struct fiemap));
5114 free_fm = 1;
5116 ret = get_errno(safe_ioctl(fd, ie->host_cmd, fm));
5117 if (!is_error(ret)) {
5118 target_size_out = target_size_in;
5119 /* An extent_count of 0 means we were only counting the extents
5120 * so there are no structs to copy
5122 if (fm->fm_extent_count != 0) {
5123 target_size_out += fm->fm_mapped_extents * extent_size;
5125 argptr = lock_user(VERIFY_WRITE, arg, target_size_out, 0);
5126 if (!argptr) {
5127 ret = -TARGET_EFAULT;
5128 } else {
5129 /* Convert the struct fiemap */
5130 thunk_convert(argptr, fm, arg_type, THUNK_TARGET);
5131 if (fm->fm_extent_count != 0) {
5132 p = argptr + target_size_in;
5133 /* ...and then all the struct fiemap_extents */
5134 for (i = 0; i < fm->fm_mapped_extents; i++) {
5135 thunk_convert(p, &fm->fm_extents[i], extent_arg_type,
5136 THUNK_TARGET);
5137 p += extent_size;
5140 unlock_user(argptr, arg, target_size_out);
5143 if (free_fm) {
5144 g_free(fm);
5146 return ret;
5148 #endif
5150 static abi_long do_ioctl_ifconf(const IOCTLEntry *ie, uint8_t *buf_temp,
5151 int fd, int cmd, abi_long arg)
5153 const argtype *arg_type = ie->arg_type;
5154 int target_size;
5155 void *argptr;
5156 int ret;
5157 struct ifconf *host_ifconf;
5158 uint32_t outbufsz;
5159 const argtype ifreq_arg_type[] = { MK_STRUCT(STRUCT_sockaddr_ifreq) };
5160 int target_ifreq_size;
5161 int nb_ifreq;
5162 int free_buf = 0;
5163 int i;
5164 int target_ifc_len;
5165 abi_long target_ifc_buf;
5166 int host_ifc_len;
5167 char *host_ifc_buf;
5169 assert(arg_type[0] == TYPE_PTR);
5170 assert(ie->access == IOC_RW);
5172 arg_type++;
5173 target_size = thunk_type_size(arg_type, 0);
5175 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5176 if (!argptr)
5177 return -TARGET_EFAULT;
5178 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
5179 unlock_user(argptr, arg, 0);
5181 host_ifconf = (struct ifconf *)(unsigned long)buf_temp;
5182 target_ifc_len = host_ifconf->ifc_len;
5183 target_ifc_buf = (abi_long)(unsigned long)host_ifconf->ifc_buf;
5185 target_ifreq_size = thunk_type_size(ifreq_arg_type, 0);
5186 nb_ifreq = target_ifc_len / target_ifreq_size;
5187 host_ifc_len = nb_ifreq * sizeof(struct ifreq);
5189 outbufsz = sizeof(*host_ifconf) + host_ifc_len;
5190 if (outbufsz > MAX_STRUCT_SIZE) {
5191 /* We can't fit all the extents into the fixed size buffer.
5192 * Allocate one that is large enough and use it instead.
5194 host_ifconf = malloc(outbufsz);
5195 if (!host_ifconf) {
5196 return -TARGET_ENOMEM;
5198 memcpy(host_ifconf, buf_temp, sizeof(*host_ifconf));
5199 free_buf = 1;
5201 host_ifc_buf = (char*)host_ifconf + sizeof(*host_ifconf);
5203 host_ifconf->ifc_len = host_ifc_len;
5204 host_ifconf->ifc_buf = host_ifc_buf;
5206 ret = get_errno(safe_ioctl(fd, ie->host_cmd, host_ifconf));
5207 if (!is_error(ret)) {
5208 /* convert host ifc_len to target ifc_len */
5210 nb_ifreq = host_ifconf->ifc_len / sizeof(struct ifreq);
5211 target_ifc_len = nb_ifreq * target_ifreq_size;
5212 host_ifconf->ifc_len = target_ifc_len;
5214 /* restore target ifc_buf */
5216 host_ifconf->ifc_buf = (char *)(unsigned long)target_ifc_buf;
5218 /* copy struct ifconf to target user */
5220 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
5221 if (!argptr)
5222 return -TARGET_EFAULT;
5223 thunk_convert(argptr, host_ifconf, arg_type, THUNK_TARGET);
5224 unlock_user(argptr, arg, target_size);
5226 /* copy ifreq[] to target user */
5228 argptr = lock_user(VERIFY_WRITE, target_ifc_buf, target_ifc_len, 0);
5229 for (i = 0; i < nb_ifreq ; i++) {
5230 thunk_convert(argptr + i * target_ifreq_size,
5231 host_ifc_buf + i * sizeof(struct ifreq),
5232 ifreq_arg_type, THUNK_TARGET);
5234 unlock_user(argptr, target_ifc_buf, target_ifc_len);
5237 if (free_buf) {
5238 free(host_ifconf);
5241 return ret;
5244 static abi_long do_ioctl_dm(const IOCTLEntry *ie, uint8_t *buf_temp, int fd,
5245 int cmd, abi_long arg)
5247 void *argptr;
5248 struct dm_ioctl *host_dm;
5249 abi_long guest_data;
5250 uint32_t guest_data_size;
5251 int target_size;
5252 const argtype *arg_type = ie->arg_type;
5253 abi_long ret;
5254 void *big_buf = NULL;
5255 char *host_data;
5257 arg_type++;
5258 target_size = thunk_type_size(arg_type, 0);
5259 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5260 if (!argptr) {
5261 ret = -TARGET_EFAULT;
5262 goto out;
5264 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
5265 unlock_user(argptr, arg, 0);
5267 /* buf_temp is too small, so fetch things into a bigger buffer */
5268 big_buf = g_malloc0(((struct dm_ioctl*)buf_temp)->data_size * 2);
5269 memcpy(big_buf, buf_temp, target_size);
5270 buf_temp = big_buf;
5271 host_dm = big_buf;
5273 guest_data = arg + host_dm->data_start;
5274 if ((guest_data - arg) < 0) {
5275 ret = -TARGET_EINVAL;
5276 goto out;
5278 guest_data_size = host_dm->data_size - host_dm->data_start;
5279 host_data = (char*)host_dm + host_dm->data_start;
5281 argptr = lock_user(VERIFY_READ, guest_data, guest_data_size, 1);
5282 if (!argptr) {
5283 ret = -TARGET_EFAULT;
5284 goto out;
5287 switch (ie->host_cmd) {
5288 case DM_REMOVE_ALL:
5289 case DM_LIST_DEVICES:
5290 case DM_DEV_CREATE:
5291 case DM_DEV_REMOVE:
5292 case DM_DEV_SUSPEND:
5293 case DM_DEV_STATUS:
5294 case DM_DEV_WAIT:
5295 case DM_TABLE_STATUS:
5296 case DM_TABLE_CLEAR:
5297 case DM_TABLE_DEPS:
5298 case DM_LIST_VERSIONS:
5299 /* no input data */
5300 break;
5301 case DM_DEV_RENAME:
5302 case DM_DEV_SET_GEOMETRY:
5303 /* data contains only strings */
5304 memcpy(host_data, argptr, guest_data_size);
5305 break;
5306 case DM_TARGET_MSG:
5307 memcpy(host_data, argptr, guest_data_size);
5308 *(uint64_t*)host_data = tswap64(*(uint64_t*)argptr);
5309 break;
5310 case DM_TABLE_LOAD:
5312 void *gspec = argptr;
5313 void *cur_data = host_data;
5314 const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_spec) };
5315 int spec_size = thunk_type_size(arg_type, 0);
5316 int i;
5318 for (i = 0; i < host_dm->target_count; i++) {
5319 struct dm_target_spec *spec = cur_data;
5320 uint32_t next;
5321 int slen;
5323 thunk_convert(spec, gspec, arg_type, THUNK_HOST);
5324 slen = strlen((char*)gspec + spec_size) + 1;
5325 next = spec->next;
5326 spec->next = sizeof(*spec) + slen;
5327 strcpy((char*)&spec[1], gspec + spec_size);
5328 gspec += next;
5329 cur_data += spec->next;
5331 break;
5333 default:
5334 ret = -TARGET_EINVAL;
5335 unlock_user(argptr, guest_data, 0);
5336 goto out;
5338 unlock_user(argptr, guest_data, 0);
5340 ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5341 if (!is_error(ret)) {
5342 guest_data = arg + host_dm->data_start;
5343 guest_data_size = host_dm->data_size - host_dm->data_start;
5344 argptr = lock_user(VERIFY_WRITE, guest_data, guest_data_size, 0);
5345 switch (ie->host_cmd) {
5346 case DM_REMOVE_ALL:
5347 case DM_DEV_CREATE:
5348 case DM_DEV_REMOVE:
5349 case DM_DEV_RENAME:
5350 case DM_DEV_SUSPEND:
5351 case DM_DEV_STATUS:
5352 case DM_TABLE_LOAD:
5353 case DM_TABLE_CLEAR:
5354 case DM_TARGET_MSG:
5355 case DM_DEV_SET_GEOMETRY:
5356 /* no return data */
5357 break;
5358 case DM_LIST_DEVICES:
5360 struct dm_name_list *nl = (void*)host_dm + host_dm->data_start;
5361 uint32_t remaining_data = guest_data_size;
5362 void *cur_data = argptr;
5363 const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_name_list) };
5364 int nl_size = 12; /* can't use thunk_size due to alignment */
5366 while (1) {
5367 uint32_t next = nl->next;
5368 if (next) {
5369 nl->next = nl_size + (strlen(nl->name) + 1);
5371 if (remaining_data < nl->next) {
5372 host_dm->flags |= DM_BUFFER_FULL_FLAG;
5373 break;
5375 thunk_convert(cur_data, nl, arg_type, THUNK_TARGET);
5376 strcpy(cur_data + nl_size, nl->name);
5377 cur_data += nl->next;
5378 remaining_data -= nl->next;
5379 if (!next) {
5380 break;
5382 nl = (void*)nl + next;
5384 break;
5386 case DM_DEV_WAIT:
5387 case DM_TABLE_STATUS:
5389 struct dm_target_spec *spec = (void*)host_dm + host_dm->data_start;
5390 void *cur_data = argptr;
5391 const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_spec) };
5392 int spec_size = thunk_type_size(arg_type, 0);
5393 int i;
5395 for (i = 0; i < host_dm->target_count; i++) {
5396 uint32_t next = spec->next;
5397 int slen = strlen((char*)&spec[1]) + 1;
5398 spec->next = (cur_data - argptr) + spec_size + slen;
5399 if (guest_data_size < spec->next) {
5400 host_dm->flags |= DM_BUFFER_FULL_FLAG;
5401 break;
5403 thunk_convert(cur_data, spec, arg_type, THUNK_TARGET);
5404 strcpy(cur_data + spec_size, (char*)&spec[1]);
5405 cur_data = argptr + spec->next;
5406 spec = (void*)host_dm + host_dm->data_start + next;
5408 break;
5410 case DM_TABLE_DEPS:
5412 void *hdata = (void*)host_dm + host_dm->data_start;
5413 int count = *(uint32_t*)hdata;
5414 uint64_t *hdev = hdata + 8;
5415 uint64_t *gdev = argptr + 8;
5416 int i;
5418 *(uint32_t*)argptr = tswap32(count);
5419 for (i = 0; i < count; i++) {
5420 *gdev = tswap64(*hdev);
5421 gdev++;
5422 hdev++;
5424 break;
5426 case DM_LIST_VERSIONS:
5428 struct dm_target_versions *vers = (void*)host_dm + host_dm->data_start;
5429 uint32_t remaining_data = guest_data_size;
5430 void *cur_data = argptr;
5431 const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_versions) };
5432 int vers_size = thunk_type_size(arg_type, 0);
5434 while (1) {
5435 uint32_t next = vers->next;
5436 if (next) {
5437 vers->next = vers_size + (strlen(vers->name) + 1);
5439 if (remaining_data < vers->next) {
5440 host_dm->flags |= DM_BUFFER_FULL_FLAG;
5441 break;
5443 thunk_convert(cur_data, vers, arg_type, THUNK_TARGET);
5444 strcpy(cur_data + vers_size, vers->name);
5445 cur_data += vers->next;
5446 remaining_data -= vers->next;
5447 if (!next) {
5448 break;
5450 vers = (void*)vers + next;
5452 break;
5454 default:
5455 unlock_user(argptr, guest_data, 0);
5456 ret = -TARGET_EINVAL;
5457 goto out;
5459 unlock_user(argptr, guest_data, guest_data_size);
5461 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
5462 if (!argptr) {
5463 ret = -TARGET_EFAULT;
5464 goto out;
5466 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
5467 unlock_user(argptr, arg, target_size);
5469 out:
5470 g_free(big_buf);
5471 return ret;
5474 static abi_long do_ioctl_blkpg(const IOCTLEntry *ie, uint8_t *buf_temp, int fd,
5475 int cmd, abi_long arg)
5477 void *argptr;
5478 int target_size;
5479 const argtype *arg_type = ie->arg_type;
5480 const argtype part_arg_type[] = { MK_STRUCT(STRUCT_blkpg_partition) };
5481 abi_long ret;
5483 struct blkpg_ioctl_arg *host_blkpg = (void*)buf_temp;
5484 struct blkpg_partition host_part;
5486 /* Read and convert blkpg */
5487 arg_type++;
5488 target_size = thunk_type_size(arg_type, 0);
5489 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5490 if (!argptr) {
5491 ret = -TARGET_EFAULT;
5492 goto out;
5494 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
5495 unlock_user(argptr, arg, 0);
5497 switch (host_blkpg->op) {
5498 case BLKPG_ADD_PARTITION:
5499 case BLKPG_DEL_PARTITION:
5500 /* payload is struct blkpg_partition */
5501 break;
5502 default:
5503 /* Unknown opcode */
5504 ret = -TARGET_EINVAL;
5505 goto out;
5508 /* Read and convert blkpg->data */
5509 arg = (abi_long)(uintptr_t)host_blkpg->data;
5510 target_size = thunk_type_size(part_arg_type, 0);
5511 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5512 if (!argptr) {
5513 ret = -TARGET_EFAULT;
5514 goto out;
5516 thunk_convert(&host_part, argptr, part_arg_type, THUNK_HOST);
5517 unlock_user(argptr, arg, 0);
5519 /* Swizzle the data pointer to our local copy and call! */
5520 host_blkpg->data = &host_part;
5521 ret = get_errno(safe_ioctl(fd, ie->host_cmd, host_blkpg));
5523 out:
5524 return ret;
5527 static abi_long do_ioctl_rt(const IOCTLEntry *ie, uint8_t *buf_temp,
5528 int fd, int cmd, abi_long arg)
5530 const argtype *arg_type = ie->arg_type;
5531 const StructEntry *se;
5532 const argtype *field_types;
5533 const int *dst_offsets, *src_offsets;
5534 int target_size;
5535 void *argptr;
5536 abi_ulong *target_rt_dev_ptr;
5537 unsigned long *host_rt_dev_ptr;
5538 abi_long ret;
5539 int i;
5541 assert(ie->access == IOC_W);
5542 assert(*arg_type == TYPE_PTR);
5543 arg_type++;
5544 assert(*arg_type == TYPE_STRUCT);
5545 target_size = thunk_type_size(arg_type, 0);
5546 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5547 if (!argptr) {
5548 return -TARGET_EFAULT;
5550 arg_type++;
5551 assert(*arg_type == (int)STRUCT_rtentry);
5552 se = struct_entries + *arg_type++;
5553 assert(se->convert[0] == NULL);
5554 /* convert struct here to be able to catch rt_dev string */
5555 field_types = se->field_types;
5556 dst_offsets = se->field_offsets[THUNK_HOST];
5557 src_offsets = se->field_offsets[THUNK_TARGET];
5558 for (i = 0; i < se->nb_fields; i++) {
5559 if (dst_offsets[i] == offsetof(struct rtentry, rt_dev)) {
5560 assert(*field_types == TYPE_PTRVOID);
5561 target_rt_dev_ptr = (abi_ulong *)(argptr + src_offsets[i]);
5562 host_rt_dev_ptr = (unsigned long *)(buf_temp + dst_offsets[i]);
5563 if (*target_rt_dev_ptr != 0) {
5564 *host_rt_dev_ptr = (unsigned long)lock_user_string(
5565 tswapal(*target_rt_dev_ptr));
5566 if (!*host_rt_dev_ptr) {
5567 unlock_user(argptr, arg, 0);
5568 return -TARGET_EFAULT;
5570 } else {
5571 *host_rt_dev_ptr = 0;
5573 field_types++;
5574 continue;
5576 field_types = thunk_convert(buf_temp + dst_offsets[i],
5577 argptr + src_offsets[i],
5578 field_types, THUNK_HOST);
5580 unlock_user(argptr, arg, 0);
5582 ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5583 if (*host_rt_dev_ptr != 0) {
5584 unlock_user((void *)*host_rt_dev_ptr,
5585 *target_rt_dev_ptr, 0);
5587 return ret;
5590 static abi_long do_ioctl_kdsigaccept(const IOCTLEntry *ie, uint8_t *buf_temp,
5591 int fd, int cmd, abi_long arg)
5593 int sig = target_to_host_signal(arg);
5594 return get_errno(safe_ioctl(fd, ie->host_cmd, sig));
5597 static IOCTLEntry ioctl_entries[] = {
5598 #define IOCTL(cmd, access, ...) \
5599 { TARGET_ ## cmd, cmd, #cmd, access, 0, { __VA_ARGS__ } },
5600 #define IOCTL_SPECIAL(cmd, access, dofn, ...) \
5601 { TARGET_ ## cmd, cmd, #cmd, access, dofn, { __VA_ARGS__ } },
5602 #define IOCTL_IGNORE(cmd) \
5603 { TARGET_ ## cmd, 0, #cmd },
5604 #include "ioctls.h"
5605 { 0, 0, },
5608 /* ??? Implement proper locking for ioctls. */
5609 /* do_ioctl() Must return target values and target errnos. */
5610 static abi_long do_ioctl(int fd, int cmd, abi_long arg)
5612 const IOCTLEntry *ie;
5613 const argtype *arg_type;
5614 abi_long ret;
5615 uint8_t buf_temp[MAX_STRUCT_SIZE];
5616 int target_size;
5617 void *argptr;
5619 ie = ioctl_entries;
5620 for(;;) {
5621 if (ie->target_cmd == 0) {
5622 gemu_log("Unsupported ioctl: cmd=0x%04lx\n", (long)cmd);
5623 return -TARGET_ENOSYS;
5625 if (ie->target_cmd == cmd)
5626 break;
5627 ie++;
5629 arg_type = ie->arg_type;
5630 #if defined(DEBUG)
5631 gemu_log("ioctl: cmd=0x%04lx (%s)\n", (long)cmd, ie->name);
5632 #endif
5633 if (ie->do_ioctl) {
5634 return ie->do_ioctl(ie, buf_temp, fd, cmd, arg);
5635 } else if (!ie->host_cmd) {
5636 /* Some architectures define BSD ioctls in their headers
5637 that are not implemented in Linux. */
5638 return -TARGET_ENOSYS;
5641 switch(arg_type[0]) {
5642 case TYPE_NULL:
5643 /* no argument */
5644 ret = get_errno(safe_ioctl(fd, ie->host_cmd));
5645 break;
5646 case TYPE_PTRVOID:
5647 case TYPE_INT:
5648 ret = get_errno(safe_ioctl(fd, ie->host_cmd, arg));
5649 break;
5650 case TYPE_PTR:
5651 arg_type++;
5652 target_size = thunk_type_size(arg_type, 0);
5653 switch(ie->access) {
5654 case IOC_R:
5655 ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5656 if (!is_error(ret)) {
5657 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
5658 if (!argptr)
5659 return -TARGET_EFAULT;
5660 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
5661 unlock_user(argptr, arg, target_size);
5663 break;
5664 case IOC_W:
5665 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5666 if (!argptr)
5667 return -TARGET_EFAULT;
5668 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
5669 unlock_user(argptr, arg, 0);
5670 ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5671 break;
5672 default:
5673 case IOC_RW:
5674 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5675 if (!argptr)
5676 return -TARGET_EFAULT;
5677 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
5678 unlock_user(argptr, arg, 0);
5679 ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5680 if (!is_error(ret)) {
5681 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
5682 if (!argptr)
5683 return -TARGET_EFAULT;
5684 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
5685 unlock_user(argptr, arg, target_size);
5687 break;
5689 break;
5690 default:
5691 gemu_log("Unsupported ioctl type: cmd=0x%04lx type=%d\n",
5692 (long)cmd, arg_type[0]);
5693 ret = -TARGET_ENOSYS;
5694 break;
5696 return ret;
5699 static const bitmask_transtbl iflag_tbl[] = {
5700 { TARGET_IGNBRK, TARGET_IGNBRK, IGNBRK, IGNBRK },
5701 { TARGET_BRKINT, TARGET_BRKINT, BRKINT, BRKINT },
5702 { TARGET_IGNPAR, TARGET_IGNPAR, IGNPAR, IGNPAR },
5703 { TARGET_PARMRK, TARGET_PARMRK, PARMRK, PARMRK },
5704 { TARGET_INPCK, TARGET_INPCK, INPCK, INPCK },
5705 { TARGET_ISTRIP, TARGET_ISTRIP, ISTRIP, ISTRIP },
5706 { TARGET_INLCR, TARGET_INLCR, INLCR, INLCR },
5707 { TARGET_IGNCR, TARGET_IGNCR, IGNCR, IGNCR },
5708 { TARGET_ICRNL, TARGET_ICRNL, ICRNL, ICRNL },
5709 { TARGET_IUCLC, TARGET_IUCLC, IUCLC, IUCLC },
5710 { TARGET_IXON, TARGET_IXON, IXON, IXON },
5711 { TARGET_IXANY, TARGET_IXANY, IXANY, IXANY },
5712 { TARGET_IXOFF, TARGET_IXOFF, IXOFF, IXOFF },
5713 { TARGET_IMAXBEL, TARGET_IMAXBEL, IMAXBEL, IMAXBEL },
5714 { 0, 0, 0, 0 }
5717 static const bitmask_transtbl oflag_tbl[] = {
5718 { TARGET_OPOST, TARGET_OPOST, OPOST, OPOST },
5719 { TARGET_OLCUC, TARGET_OLCUC, OLCUC, OLCUC },
5720 { TARGET_ONLCR, TARGET_ONLCR, ONLCR, ONLCR },
5721 { TARGET_OCRNL, TARGET_OCRNL, OCRNL, OCRNL },
5722 { TARGET_ONOCR, TARGET_ONOCR, ONOCR, ONOCR },
5723 { TARGET_ONLRET, TARGET_ONLRET, ONLRET, ONLRET },
5724 { TARGET_OFILL, TARGET_OFILL, OFILL, OFILL },
5725 { TARGET_OFDEL, TARGET_OFDEL, OFDEL, OFDEL },
5726 { TARGET_NLDLY, TARGET_NL0, NLDLY, NL0 },
5727 { TARGET_NLDLY, TARGET_NL1, NLDLY, NL1 },
5728 { TARGET_CRDLY, TARGET_CR0, CRDLY, CR0 },
5729 { TARGET_CRDLY, TARGET_CR1, CRDLY, CR1 },
5730 { TARGET_CRDLY, TARGET_CR2, CRDLY, CR2 },
5731 { TARGET_CRDLY, TARGET_CR3, CRDLY, CR3 },
5732 { TARGET_TABDLY, TARGET_TAB0, TABDLY, TAB0 },
5733 { TARGET_TABDLY, TARGET_TAB1, TABDLY, TAB1 },
5734 { TARGET_TABDLY, TARGET_TAB2, TABDLY, TAB2 },
5735 { TARGET_TABDLY, TARGET_TAB3, TABDLY, TAB3 },
5736 { TARGET_BSDLY, TARGET_BS0, BSDLY, BS0 },
5737 { TARGET_BSDLY, TARGET_BS1, BSDLY, BS1 },
5738 { TARGET_VTDLY, TARGET_VT0, VTDLY, VT0 },
5739 { TARGET_VTDLY, TARGET_VT1, VTDLY, VT1 },
5740 { TARGET_FFDLY, TARGET_FF0, FFDLY, FF0 },
5741 { TARGET_FFDLY, TARGET_FF1, FFDLY, FF1 },
5742 { 0, 0, 0, 0 }
5745 static const bitmask_transtbl cflag_tbl[] = {
5746 { TARGET_CBAUD, TARGET_B0, CBAUD, B0 },
5747 { TARGET_CBAUD, TARGET_B50, CBAUD, B50 },
5748 { TARGET_CBAUD, TARGET_B75, CBAUD, B75 },
5749 { TARGET_CBAUD, TARGET_B110, CBAUD, B110 },
5750 { TARGET_CBAUD, TARGET_B134, CBAUD, B134 },
5751 { TARGET_CBAUD, TARGET_B150, CBAUD, B150 },
5752 { TARGET_CBAUD, TARGET_B200, CBAUD, B200 },
5753 { TARGET_CBAUD, TARGET_B300, CBAUD, B300 },
5754 { TARGET_CBAUD, TARGET_B600, CBAUD, B600 },
5755 { TARGET_CBAUD, TARGET_B1200, CBAUD, B1200 },
5756 { TARGET_CBAUD, TARGET_B1800, CBAUD, B1800 },
5757 { TARGET_CBAUD, TARGET_B2400, CBAUD, B2400 },
5758 { TARGET_CBAUD, TARGET_B4800, CBAUD, B4800 },
5759 { TARGET_CBAUD, TARGET_B9600, CBAUD, B9600 },
5760 { TARGET_CBAUD, TARGET_B19200, CBAUD, B19200 },
5761 { TARGET_CBAUD, TARGET_B38400, CBAUD, B38400 },
5762 { TARGET_CBAUD, TARGET_B57600, CBAUD, B57600 },
5763 { TARGET_CBAUD, TARGET_B115200, CBAUD, B115200 },
5764 { TARGET_CBAUD, TARGET_B230400, CBAUD, B230400 },
5765 { TARGET_CBAUD, TARGET_B460800, CBAUD, B460800 },
5766 { TARGET_CSIZE, TARGET_CS5, CSIZE, CS5 },
5767 { TARGET_CSIZE, TARGET_CS6, CSIZE, CS6 },
5768 { TARGET_CSIZE, TARGET_CS7, CSIZE, CS7 },
5769 { TARGET_CSIZE, TARGET_CS8, CSIZE, CS8 },
5770 { TARGET_CSTOPB, TARGET_CSTOPB, CSTOPB, CSTOPB },
5771 { TARGET_CREAD, TARGET_CREAD, CREAD, CREAD },
5772 { TARGET_PARENB, TARGET_PARENB, PARENB, PARENB },
5773 { TARGET_PARODD, TARGET_PARODD, PARODD, PARODD },
5774 { TARGET_HUPCL, TARGET_HUPCL, HUPCL, HUPCL },
5775 { TARGET_CLOCAL, TARGET_CLOCAL, CLOCAL, CLOCAL },
5776 { TARGET_CRTSCTS, TARGET_CRTSCTS, CRTSCTS, CRTSCTS },
5777 { 0, 0, 0, 0 }
5780 static const bitmask_transtbl lflag_tbl[] = {
5781 { TARGET_ISIG, TARGET_ISIG, ISIG, ISIG },
5782 { TARGET_ICANON, TARGET_ICANON, ICANON, ICANON },
5783 { TARGET_XCASE, TARGET_XCASE, XCASE, XCASE },
5784 { TARGET_ECHO, TARGET_ECHO, ECHO, ECHO },
5785 { TARGET_ECHOE, TARGET_ECHOE, ECHOE, ECHOE },
5786 { TARGET_ECHOK, TARGET_ECHOK, ECHOK, ECHOK },
5787 { TARGET_ECHONL, TARGET_ECHONL, ECHONL, ECHONL },
5788 { TARGET_NOFLSH, TARGET_NOFLSH, NOFLSH, NOFLSH },
5789 { TARGET_TOSTOP, TARGET_TOSTOP, TOSTOP, TOSTOP },
5790 { TARGET_ECHOCTL, TARGET_ECHOCTL, ECHOCTL, ECHOCTL },
5791 { TARGET_ECHOPRT, TARGET_ECHOPRT, ECHOPRT, ECHOPRT },
5792 { TARGET_ECHOKE, TARGET_ECHOKE, ECHOKE, ECHOKE },
5793 { TARGET_FLUSHO, TARGET_FLUSHO, FLUSHO, FLUSHO },
5794 { TARGET_PENDIN, TARGET_PENDIN, PENDIN, PENDIN },
5795 { TARGET_IEXTEN, TARGET_IEXTEN, IEXTEN, IEXTEN },
5796 { 0, 0, 0, 0 }
5799 static void target_to_host_termios (void *dst, const void *src)
5801 struct host_termios *host = dst;
5802 const struct target_termios *target = src;
5804 host->c_iflag =
5805 target_to_host_bitmask(tswap32(target->c_iflag), iflag_tbl);
5806 host->c_oflag =
5807 target_to_host_bitmask(tswap32(target->c_oflag), oflag_tbl);
5808 host->c_cflag =
5809 target_to_host_bitmask(tswap32(target->c_cflag), cflag_tbl);
5810 host->c_lflag =
5811 target_to_host_bitmask(tswap32(target->c_lflag), lflag_tbl);
5812 host->c_line = target->c_line;
5814 memset(host->c_cc, 0, sizeof(host->c_cc));
5815 host->c_cc[VINTR] = target->c_cc[TARGET_VINTR];
5816 host->c_cc[VQUIT] = target->c_cc[TARGET_VQUIT];
5817 host->c_cc[VERASE] = target->c_cc[TARGET_VERASE];
5818 host->c_cc[VKILL] = target->c_cc[TARGET_VKILL];
5819 host->c_cc[VEOF] = target->c_cc[TARGET_VEOF];
5820 host->c_cc[VTIME] = target->c_cc[TARGET_VTIME];
5821 host->c_cc[VMIN] = target->c_cc[TARGET_VMIN];
5822 host->c_cc[VSWTC] = target->c_cc[TARGET_VSWTC];
5823 host->c_cc[VSTART] = target->c_cc[TARGET_VSTART];
5824 host->c_cc[VSTOP] = target->c_cc[TARGET_VSTOP];
5825 host->c_cc[VSUSP] = target->c_cc[TARGET_VSUSP];
5826 host->c_cc[VEOL] = target->c_cc[TARGET_VEOL];
5827 host->c_cc[VREPRINT] = target->c_cc[TARGET_VREPRINT];
5828 host->c_cc[VDISCARD] = target->c_cc[TARGET_VDISCARD];
5829 host->c_cc[VWERASE] = target->c_cc[TARGET_VWERASE];
5830 host->c_cc[VLNEXT] = target->c_cc[TARGET_VLNEXT];
5831 host->c_cc[VEOL2] = target->c_cc[TARGET_VEOL2];
5834 static void host_to_target_termios (void *dst, const void *src)
5836 struct target_termios *target = dst;
5837 const struct host_termios *host = src;
5839 target->c_iflag =
5840 tswap32(host_to_target_bitmask(host->c_iflag, iflag_tbl));
5841 target->c_oflag =
5842 tswap32(host_to_target_bitmask(host->c_oflag, oflag_tbl));
5843 target->c_cflag =
5844 tswap32(host_to_target_bitmask(host->c_cflag, cflag_tbl));
5845 target->c_lflag =
5846 tswap32(host_to_target_bitmask(host->c_lflag, lflag_tbl));
5847 target->c_line = host->c_line;
5849 memset(target->c_cc, 0, sizeof(target->c_cc));
5850 target->c_cc[TARGET_VINTR] = host->c_cc[VINTR];
5851 target->c_cc[TARGET_VQUIT] = host->c_cc[VQUIT];
5852 target->c_cc[TARGET_VERASE] = host->c_cc[VERASE];
5853 target->c_cc[TARGET_VKILL] = host->c_cc[VKILL];
5854 target->c_cc[TARGET_VEOF] = host->c_cc[VEOF];
5855 target->c_cc[TARGET_VTIME] = host->c_cc[VTIME];
5856 target->c_cc[TARGET_VMIN] = host->c_cc[VMIN];
5857 target->c_cc[TARGET_VSWTC] = host->c_cc[VSWTC];
5858 target->c_cc[TARGET_VSTART] = host->c_cc[VSTART];
5859 target->c_cc[TARGET_VSTOP] = host->c_cc[VSTOP];
5860 target->c_cc[TARGET_VSUSP] = host->c_cc[VSUSP];
5861 target->c_cc[TARGET_VEOL] = host->c_cc[VEOL];
5862 target->c_cc[TARGET_VREPRINT] = host->c_cc[VREPRINT];
5863 target->c_cc[TARGET_VDISCARD] = host->c_cc[VDISCARD];
5864 target->c_cc[TARGET_VWERASE] = host->c_cc[VWERASE];
5865 target->c_cc[TARGET_VLNEXT] = host->c_cc[VLNEXT];
5866 target->c_cc[TARGET_VEOL2] = host->c_cc[VEOL2];
5869 static const StructEntry struct_termios_def = {
5870 .convert = { host_to_target_termios, target_to_host_termios },
5871 .size = { sizeof(struct target_termios), sizeof(struct host_termios) },
5872 .align = { __alignof__(struct target_termios), __alignof__(struct host_termios) },
5875 static bitmask_transtbl mmap_flags_tbl[] = {
5876 { TARGET_MAP_SHARED, TARGET_MAP_SHARED, MAP_SHARED, MAP_SHARED },
5877 { TARGET_MAP_PRIVATE, TARGET_MAP_PRIVATE, MAP_PRIVATE, MAP_PRIVATE },
5878 { TARGET_MAP_FIXED, TARGET_MAP_FIXED, MAP_FIXED, MAP_FIXED },
5879 { TARGET_MAP_ANONYMOUS, TARGET_MAP_ANONYMOUS, MAP_ANONYMOUS, MAP_ANONYMOUS },
5880 { TARGET_MAP_GROWSDOWN, TARGET_MAP_GROWSDOWN, MAP_GROWSDOWN, MAP_GROWSDOWN },
5881 { TARGET_MAP_DENYWRITE, TARGET_MAP_DENYWRITE, MAP_DENYWRITE, MAP_DENYWRITE },
5882 { TARGET_MAP_EXECUTABLE, TARGET_MAP_EXECUTABLE, MAP_EXECUTABLE, MAP_EXECUTABLE },
5883 { TARGET_MAP_LOCKED, TARGET_MAP_LOCKED, MAP_LOCKED, MAP_LOCKED },
5884 { TARGET_MAP_NORESERVE, TARGET_MAP_NORESERVE, MAP_NORESERVE,
5885 MAP_NORESERVE },
5886 { 0, 0, 0, 0 }
5889 #if defined(TARGET_I386)
5891 /* NOTE: there is really one LDT for all the threads */
5892 static uint8_t *ldt_table;
5894 static abi_long read_ldt(abi_ulong ptr, unsigned long bytecount)
5896 int size;
5897 void *p;
5899 if (!ldt_table)
5900 return 0;
5901 size = TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE;
5902 if (size > bytecount)
5903 size = bytecount;
5904 p = lock_user(VERIFY_WRITE, ptr, size, 0);
5905 if (!p)
5906 return -TARGET_EFAULT;
5907 /* ??? Should this by byteswapped? */
5908 memcpy(p, ldt_table, size);
5909 unlock_user(p, ptr, size);
5910 return size;
5913 /* XXX: add locking support */
5914 static abi_long write_ldt(CPUX86State *env,
5915 abi_ulong ptr, unsigned long bytecount, int oldmode)
5917 struct target_modify_ldt_ldt_s ldt_info;
5918 struct target_modify_ldt_ldt_s *target_ldt_info;
5919 int seg_32bit, contents, read_exec_only, limit_in_pages;
5920 int seg_not_present, useable, lm;
5921 uint32_t *lp, entry_1, entry_2;
5923 if (bytecount != sizeof(ldt_info))
5924 return -TARGET_EINVAL;
5925 if (!lock_user_struct(VERIFY_READ, target_ldt_info, ptr, 1))
5926 return -TARGET_EFAULT;
5927 ldt_info.entry_number = tswap32(target_ldt_info->entry_number);
5928 ldt_info.base_addr = tswapal(target_ldt_info->base_addr);
5929 ldt_info.limit = tswap32(target_ldt_info->limit);
5930 ldt_info.flags = tswap32(target_ldt_info->flags);
5931 unlock_user_struct(target_ldt_info, ptr, 0);
5933 if (ldt_info.entry_number >= TARGET_LDT_ENTRIES)
5934 return -TARGET_EINVAL;
5935 seg_32bit = ldt_info.flags & 1;
5936 contents = (ldt_info.flags >> 1) & 3;
5937 read_exec_only = (ldt_info.flags >> 3) & 1;
5938 limit_in_pages = (ldt_info.flags >> 4) & 1;
5939 seg_not_present = (ldt_info.flags >> 5) & 1;
5940 useable = (ldt_info.flags >> 6) & 1;
5941 #ifdef TARGET_ABI32
5942 lm = 0;
5943 #else
5944 lm = (ldt_info.flags >> 7) & 1;
5945 #endif
5946 if (contents == 3) {
5947 if (oldmode)
5948 return -TARGET_EINVAL;
5949 if (seg_not_present == 0)
5950 return -TARGET_EINVAL;
5952 /* allocate the LDT */
5953 if (!ldt_table) {
5954 env->ldt.base = target_mmap(0,
5955 TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE,
5956 PROT_READ|PROT_WRITE,
5957 MAP_ANONYMOUS|MAP_PRIVATE, -1, 0);
5958 if (env->ldt.base == -1)
5959 return -TARGET_ENOMEM;
5960 memset(g2h(env->ldt.base), 0,
5961 TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE);
5962 env->ldt.limit = 0xffff;
5963 ldt_table = g2h(env->ldt.base);
5966 /* NOTE: same code as Linux kernel */
5967 /* Allow LDTs to be cleared by the user. */
5968 if (ldt_info.base_addr == 0 && ldt_info.limit == 0) {
5969 if (oldmode ||
5970 (contents == 0 &&
5971 read_exec_only == 1 &&
5972 seg_32bit == 0 &&
5973 limit_in_pages == 0 &&
5974 seg_not_present == 1 &&
5975 useable == 0 )) {
5976 entry_1 = 0;
5977 entry_2 = 0;
5978 goto install;
5982 entry_1 = ((ldt_info.base_addr & 0x0000ffff) << 16) |
5983 (ldt_info.limit & 0x0ffff);
5984 entry_2 = (ldt_info.base_addr & 0xff000000) |
5985 ((ldt_info.base_addr & 0x00ff0000) >> 16) |
5986 (ldt_info.limit & 0xf0000) |
5987 ((read_exec_only ^ 1) << 9) |
5988 (contents << 10) |
5989 ((seg_not_present ^ 1) << 15) |
5990 (seg_32bit << 22) |
5991 (limit_in_pages << 23) |
5992 (lm << 21) |
5993 0x7000;
5994 if (!oldmode)
5995 entry_2 |= (useable << 20);
5997 /* Install the new entry ... */
5998 install:
5999 lp = (uint32_t *)(ldt_table + (ldt_info.entry_number << 3));
6000 lp[0] = tswap32(entry_1);
6001 lp[1] = tswap32(entry_2);
6002 return 0;
6005 /* specific and weird i386 syscalls */
6006 static abi_long do_modify_ldt(CPUX86State *env, int func, abi_ulong ptr,
6007 unsigned long bytecount)
6009 abi_long ret;
6011 switch (func) {
6012 case 0:
6013 ret = read_ldt(ptr, bytecount);
6014 break;
6015 case 1:
6016 ret = write_ldt(env, ptr, bytecount, 1);
6017 break;
6018 case 0x11:
6019 ret = write_ldt(env, ptr, bytecount, 0);
6020 break;
6021 default:
6022 ret = -TARGET_ENOSYS;
6023 break;
6025 return ret;
6028 #if defined(TARGET_I386) && defined(TARGET_ABI32)
6029 abi_long do_set_thread_area(CPUX86State *env, abi_ulong ptr)
6031 uint64_t *gdt_table = g2h(env->gdt.base);
6032 struct target_modify_ldt_ldt_s ldt_info;
6033 struct target_modify_ldt_ldt_s *target_ldt_info;
6034 int seg_32bit, contents, read_exec_only, limit_in_pages;
6035 int seg_not_present, useable, lm;
6036 uint32_t *lp, entry_1, entry_2;
6037 int i;
6039 lock_user_struct(VERIFY_WRITE, target_ldt_info, ptr, 1);
6040 if (!target_ldt_info)
6041 return -TARGET_EFAULT;
6042 ldt_info.entry_number = tswap32(target_ldt_info->entry_number);
6043 ldt_info.base_addr = tswapal(target_ldt_info->base_addr);
6044 ldt_info.limit = tswap32(target_ldt_info->limit);
6045 ldt_info.flags = tswap32(target_ldt_info->flags);
6046 if (ldt_info.entry_number == -1) {
6047 for (i=TARGET_GDT_ENTRY_TLS_MIN; i<=TARGET_GDT_ENTRY_TLS_MAX; i++) {
6048 if (gdt_table[i] == 0) {
6049 ldt_info.entry_number = i;
6050 target_ldt_info->entry_number = tswap32(i);
6051 break;
6055 unlock_user_struct(target_ldt_info, ptr, 1);
6057 if (ldt_info.entry_number < TARGET_GDT_ENTRY_TLS_MIN ||
6058 ldt_info.entry_number > TARGET_GDT_ENTRY_TLS_MAX)
6059 return -TARGET_EINVAL;
6060 seg_32bit = ldt_info.flags & 1;
6061 contents = (ldt_info.flags >> 1) & 3;
6062 read_exec_only = (ldt_info.flags >> 3) & 1;
6063 limit_in_pages = (ldt_info.flags >> 4) & 1;
6064 seg_not_present = (ldt_info.flags >> 5) & 1;
6065 useable = (ldt_info.flags >> 6) & 1;
6066 #ifdef TARGET_ABI32
6067 lm = 0;
6068 #else
6069 lm = (ldt_info.flags >> 7) & 1;
6070 #endif
6072 if (contents == 3) {
6073 if (seg_not_present == 0)
6074 return -TARGET_EINVAL;
6077 /* NOTE: same code as Linux kernel */
6078 /* Allow LDTs to be cleared by the user. */
6079 if (ldt_info.base_addr == 0 && ldt_info.limit == 0) {
6080 if ((contents == 0 &&
6081 read_exec_only == 1 &&
6082 seg_32bit == 0 &&
6083 limit_in_pages == 0 &&
6084 seg_not_present == 1 &&
6085 useable == 0 )) {
6086 entry_1 = 0;
6087 entry_2 = 0;
6088 goto install;
6092 entry_1 = ((ldt_info.base_addr & 0x0000ffff) << 16) |
6093 (ldt_info.limit & 0x0ffff);
6094 entry_2 = (ldt_info.base_addr & 0xff000000) |
6095 ((ldt_info.base_addr & 0x00ff0000) >> 16) |
6096 (ldt_info.limit & 0xf0000) |
6097 ((read_exec_only ^ 1) << 9) |
6098 (contents << 10) |
6099 ((seg_not_present ^ 1) << 15) |
6100 (seg_32bit << 22) |
6101 (limit_in_pages << 23) |
6102 (useable << 20) |
6103 (lm << 21) |
6104 0x7000;
6106 /* Install the new entry ... */
6107 install:
6108 lp = (uint32_t *)(gdt_table + ldt_info.entry_number);
6109 lp[0] = tswap32(entry_1);
6110 lp[1] = tswap32(entry_2);
6111 return 0;
6114 static abi_long do_get_thread_area(CPUX86State *env, abi_ulong ptr)
6116 struct target_modify_ldt_ldt_s *target_ldt_info;
6117 uint64_t *gdt_table = g2h(env->gdt.base);
6118 uint32_t base_addr, limit, flags;
6119 int seg_32bit, contents, read_exec_only, limit_in_pages, idx;
6120 int seg_not_present, useable, lm;
6121 uint32_t *lp, entry_1, entry_2;
6123 lock_user_struct(VERIFY_WRITE, target_ldt_info, ptr, 1);
6124 if (!target_ldt_info)
6125 return -TARGET_EFAULT;
6126 idx = tswap32(target_ldt_info->entry_number);
6127 if (idx < TARGET_GDT_ENTRY_TLS_MIN ||
6128 idx > TARGET_GDT_ENTRY_TLS_MAX) {
6129 unlock_user_struct(target_ldt_info, ptr, 1);
6130 return -TARGET_EINVAL;
6132 lp = (uint32_t *)(gdt_table + idx);
6133 entry_1 = tswap32(lp[0]);
6134 entry_2 = tswap32(lp[1]);
6136 read_exec_only = ((entry_2 >> 9) & 1) ^ 1;
6137 contents = (entry_2 >> 10) & 3;
6138 seg_not_present = ((entry_2 >> 15) & 1) ^ 1;
6139 seg_32bit = (entry_2 >> 22) & 1;
6140 limit_in_pages = (entry_2 >> 23) & 1;
6141 useable = (entry_2 >> 20) & 1;
6142 #ifdef TARGET_ABI32
6143 lm = 0;
6144 #else
6145 lm = (entry_2 >> 21) & 1;
6146 #endif
6147 flags = (seg_32bit << 0) | (contents << 1) |
6148 (read_exec_only << 3) | (limit_in_pages << 4) |
6149 (seg_not_present << 5) | (useable << 6) | (lm << 7);
6150 limit = (entry_1 & 0xffff) | (entry_2 & 0xf0000);
6151 base_addr = (entry_1 >> 16) |
6152 (entry_2 & 0xff000000) |
6153 ((entry_2 & 0xff) << 16);
6154 target_ldt_info->base_addr = tswapal(base_addr);
6155 target_ldt_info->limit = tswap32(limit);
6156 target_ldt_info->flags = tswap32(flags);
6157 unlock_user_struct(target_ldt_info, ptr, 1);
6158 return 0;
6160 #endif /* TARGET_I386 && TARGET_ABI32 */
6162 #ifndef TARGET_ABI32
6163 abi_long do_arch_prctl(CPUX86State *env, int code, abi_ulong addr)
6165 abi_long ret = 0;
6166 abi_ulong val;
6167 int idx;
6169 switch(code) {
6170 case TARGET_ARCH_SET_GS:
6171 case TARGET_ARCH_SET_FS:
6172 if (code == TARGET_ARCH_SET_GS)
6173 idx = R_GS;
6174 else
6175 idx = R_FS;
6176 cpu_x86_load_seg(env, idx, 0);
6177 env->segs[idx].base = addr;
6178 break;
6179 case TARGET_ARCH_GET_GS:
6180 case TARGET_ARCH_GET_FS:
6181 if (code == TARGET_ARCH_GET_GS)
6182 idx = R_GS;
6183 else
6184 idx = R_FS;
6185 val = env->segs[idx].base;
6186 if (put_user(val, addr, abi_ulong))
6187 ret = -TARGET_EFAULT;
6188 break;
6189 default:
6190 ret = -TARGET_EINVAL;
6191 break;
6193 return ret;
6195 #endif
6197 #endif /* defined(TARGET_I386) */
6199 #define NEW_STACK_SIZE 0x40000
6202 static pthread_mutex_t clone_lock = PTHREAD_MUTEX_INITIALIZER;
6203 typedef struct {
6204 CPUArchState *env;
6205 pthread_mutex_t mutex;
6206 pthread_cond_t cond;
6207 pthread_t thread;
6208 uint32_t tid;
6209 abi_ulong child_tidptr;
6210 abi_ulong parent_tidptr;
6211 sigset_t sigmask;
6212 } new_thread_info;
6214 static void * QEMU_NORETURN clone_func(void *arg)
6216 new_thread_info *info = arg;
6217 CPUArchState *env;
6218 CPUState *cpu;
6219 TaskState *ts;
6221 rcu_register_thread();
6222 tcg_register_thread();
6223 env = info->env;
6224 cpu = ENV_GET_CPU(env);
6225 thread_cpu = cpu;
6226 ts = (TaskState *)cpu->opaque;
6227 info->tid = gettid();
6228 task_settid(ts);
6229 if (info->child_tidptr)
6230 put_user_u32(info->tid, info->child_tidptr);
6231 if (info->parent_tidptr)
6232 put_user_u32(info->tid, info->parent_tidptr);
6233 /* Enable signals. */
6234 sigprocmask(SIG_SETMASK, &info->sigmask, NULL);
6235 /* Signal to the parent that we're ready. */
6236 pthread_mutex_lock(&info->mutex);
6237 pthread_cond_broadcast(&info->cond);
6238 pthread_mutex_unlock(&info->mutex);
6239 /* Wait until the parent has finshed initializing the tls state. */
6240 pthread_mutex_lock(&clone_lock);
6241 pthread_mutex_unlock(&clone_lock);
6242 cpu_loop(env);
6243 /* never exits */
6246 /* do_fork() Must return host values and target errnos (unlike most
6247 do_*() functions). */
6248 static int do_fork(CPUArchState *env, unsigned int flags, abi_ulong newsp,
6249 abi_ulong parent_tidptr, target_ulong newtls,
6250 abi_ulong child_tidptr)
6252 CPUState *cpu = ENV_GET_CPU(env);
6253 int ret;
6254 TaskState *ts;
6255 CPUState *new_cpu;
6256 CPUArchState *new_env;
6257 sigset_t sigmask;
6259 flags &= ~CLONE_IGNORED_FLAGS;
6261 /* Emulate vfork() with fork() */
6262 if (flags & CLONE_VFORK)
6263 flags &= ~(CLONE_VFORK | CLONE_VM);
6265 if (flags & CLONE_VM) {
6266 TaskState *parent_ts = (TaskState *)cpu->opaque;
6267 new_thread_info info;
6268 pthread_attr_t attr;
6270 if (((flags & CLONE_THREAD_FLAGS) != CLONE_THREAD_FLAGS) ||
6271 (flags & CLONE_INVALID_THREAD_FLAGS)) {
6272 return -TARGET_EINVAL;
6275 ts = g_new0(TaskState, 1);
6276 init_task_state(ts);
6277 /* we create a new CPU instance. */
6278 new_env = cpu_copy(env);
6279 /* Init regs that differ from the parent. */
6280 cpu_clone_regs(new_env, newsp);
6281 new_cpu = ENV_GET_CPU(new_env);
6282 new_cpu->opaque = ts;
6283 ts->bprm = parent_ts->bprm;
6284 ts->info = parent_ts->info;
6285 ts->signal_mask = parent_ts->signal_mask;
6287 if (flags & CLONE_CHILD_CLEARTID) {
6288 ts->child_tidptr = child_tidptr;
6291 if (flags & CLONE_SETTLS) {
6292 cpu_set_tls (new_env, newtls);
6295 /* Grab a mutex so that thread setup appears atomic. */
6296 pthread_mutex_lock(&clone_lock);
6298 memset(&info, 0, sizeof(info));
6299 pthread_mutex_init(&info.mutex, NULL);
6300 pthread_mutex_lock(&info.mutex);
6301 pthread_cond_init(&info.cond, NULL);
6302 info.env = new_env;
6303 if (flags & CLONE_CHILD_SETTID) {
6304 info.child_tidptr = child_tidptr;
6306 if (flags & CLONE_PARENT_SETTID) {
6307 info.parent_tidptr = parent_tidptr;
6310 ret = pthread_attr_init(&attr);
6311 ret = pthread_attr_setstacksize(&attr, NEW_STACK_SIZE);
6312 ret = pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED);
6313 /* It is not safe to deliver signals until the child has finished
6314 initializing, so temporarily block all signals. */
6315 sigfillset(&sigmask);
6316 sigprocmask(SIG_BLOCK, &sigmask, &info.sigmask);
6318 /* If this is our first additional thread, we need to ensure we
6319 * generate code for parallel execution and flush old translations.
6321 if (!parallel_cpus) {
6322 parallel_cpus = true;
6323 tb_flush(cpu);
6326 ret = pthread_create(&info.thread, &attr, clone_func, &info);
6327 /* TODO: Free new CPU state if thread creation failed. */
6329 sigprocmask(SIG_SETMASK, &info.sigmask, NULL);
6330 pthread_attr_destroy(&attr);
6331 if (ret == 0) {
6332 /* Wait for the child to initialize. */
6333 pthread_cond_wait(&info.cond, &info.mutex);
6334 ret = info.tid;
6335 } else {
6336 ret = -1;
6338 pthread_mutex_unlock(&info.mutex);
6339 pthread_cond_destroy(&info.cond);
6340 pthread_mutex_destroy(&info.mutex);
6341 pthread_mutex_unlock(&clone_lock);
6342 } else {
6343 /* if no CLONE_VM, we consider it is a fork */
6344 if (flags & CLONE_INVALID_FORK_FLAGS) {
6345 return -TARGET_EINVAL;
6348 /* We can't support custom termination signals */
6349 if ((flags & CSIGNAL) != TARGET_SIGCHLD) {
6350 return -TARGET_EINVAL;
6353 if (block_signals()) {
6354 return -TARGET_ERESTARTSYS;
6357 fork_start();
6358 ret = fork();
6359 if (ret == 0) {
6360 /* Child Process. */
6361 cpu_clone_regs(env, newsp);
6362 fork_end(1);
6363 /* There is a race condition here. The parent process could
6364 theoretically read the TID in the child process before the child
6365 tid is set. This would require using either ptrace
6366 (not implemented) or having *_tidptr to point at a shared memory
6367 mapping. We can't repeat the spinlock hack used above because
6368 the child process gets its own copy of the lock. */
6369 if (flags & CLONE_CHILD_SETTID)
6370 put_user_u32(gettid(), child_tidptr);
6371 if (flags & CLONE_PARENT_SETTID)
6372 put_user_u32(gettid(), parent_tidptr);
6373 ts = (TaskState *)cpu->opaque;
6374 if (flags & CLONE_SETTLS)
6375 cpu_set_tls (env, newtls);
6376 if (flags & CLONE_CHILD_CLEARTID)
6377 ts->child_tidptr = child_tidptr;
6378 } else {
6379 fork_end(0);
6382 return ret;
6385 /* warning : doesn't handle linux specific flags... */
6386 static int target_to_host_fcntl_cmd(int cmd)
6388 switch(cmd) {
6389 case TARGET_F_DUPFD:
6390 case TARGET_F_GETFD:
6391 case TARGET_F_SETFD:
6392 case TARGET_F_GETFL:
6393 case TARGET_F_SETFL:
6394 return cmd;
6395 case TARGET_F_GETLK:
6396 return F_GETLK64;
6397 case TARGET_F_SETLK:
6398 return F_SETLK64;
6399 case TARGET_F_SETLKW:
6400 return F_SETLKW64;
6401 case TARGET_F_GETOWN:
6402 return F_GETOWN;
6403 case TARGET_F_SETOWN:
6404 return F_SETOWN;
6405 case TARGET_F_GETSIG:
6406 return F_GETSIG;
6407 case TARGET_F_SETSIG:
6408 return F_SETSIG;
6409 #if TARGET_ABI_BITS == 32
6410 case TARGET_F_GETLK64:
6411 return F_GETLK64;
6412 case TARGET_F_SETLK64:
6413 return F_SETLK64;
6414 case TARGET_F_SETLKW64:
6415 return F_SETLKW64;
6416 #endif
6417 case TARGET_F_SETLEASE:
6418 return F_SETLEASE;
6419 case TARGET_F_GETLEASE:
6420 return F_GETLEASE;
6421 #ifdef F_DUPFD_CLOEXEC
6422 case TARGET_F_DUPFD_CLOEXEC:
6423 return F_DUPFD_CLOEXEC;
6424 #endif
6425 case TARGET_F_NOTIFY:
6426 return F_NOTIFY;
6427 #ifdef F_GETOWN_EX
6428 case TARGET_F_GETOWN_EX:
6429 return F_GETOWN_EX;
6430 #endif
6431 #ifdef F_SETOWN_EX
6432 case TARGET_F_SETOWN_EX:
6433 return F_SETOWN_EX;
6434 #endif
6435 #ifdef F_SETPIPE_SZ
6436 case TARGET_F_SETPIPE_SZ:
6437 return F_SETPIPE_SZ;
6438 case TARGET_F_GETPIPE_SZ:
6439 return F_GETPIPE_SZ;
6440 #endif
6441 default:
6442 return -TARGET_EINVAL;
6444 return -TARGET_EINVAL;
6447 #define TRANSTBL_CONVERT(a) { -1, TARGET_##a, -1, a }
6448 static const bitmask_transtbl flock_tbl[] = {
6449 TRANSTBL_CONVERT(F_RDLCK),
6450 TRANSTBL_CONVERT(F_WRLCK),
6451 TRANSTBL_CONVERT(F_UNLCK),
6452 TRANSTBL_CONVERT(F_EXLCK),
6453 TRANSTBL_CONVERT(F_SHLCK),
6454 { 0, 0, 0, 0 }
6457 static inline abi_long copy_from_user_flock(struct flock64 *fl,
6458 abi_ulong target_flock_addr)
6460 struct target_flock *target_fl;
6461 short l_type;
6463 if (!lock_user_struct(VERIFY_READ, target_fl, target_flock_addr, 1)) {
6464 return -TARGET_EFAULT;
6467 __get_user(l_type, &target_fl->l_type);
6468 fl->l_type = target_to_host_bitmask(l_type, flock_tbl);
6469 __get_user(fl->l_whence, &target_fl->l_whence);
6470 __get_user(fl->l_start, &target_fl->l_start);
6471 __get_user(fl->l_len, &target_fl->l_len);
6472 __get_user(fl->l_pid, &target_fl->l_pid);
6473 unlock_user_struct(target_fl, target_flock_addr, 0);
6474 return 0;
6477 static inline abi_long copy_to_user_flock(abi_ulong target_flock_addr,
6478 const struct flock64 *fl)
6480 struct target_flock *target_fl;
6481 short l_type;
6483 if (!lock_user_struct(VERIFY_WRITE, target_fl, target_flock_addr, 0)) {
6484 return -TARGET_EFAULT;
6487 l_type = host_to_target_bitmask(fl->l_type, flock_tbl);
6488 __put_user(l_type, &target_fl->l_type);
6489 __put_user(fl->l_whence, &target_fl->l_whence);
6490 __put_user(fl->l_start, &target_fl->l_start);
6491 __put_user(fl->l_len, &target_fl->l_len);
6492 __put_user(fl->l_pid, &target_fl->l_pid);
6493 unlock_user_struct(target_fl, target_flock_addr, 1);
6494 return 0;
6497 typedef abi_long from_flock64_fn(struct flock64 *fl, abi_ulong target_addr);
6498 typedef abi_long to_flock64_fn(abi_ulong target_addr, const struct flock64 *fl);
6500 #if defined(TARGET_ARM) && TARGET_ABI_BITS == 32
6501 static inline abi_long copy_from_user_eabi_flock64(struct flock64 *fl,
6502 abi_ulong target_flock_addr)
6504 struct target_eabi_flock64 *target_fl;
6505 short l_type;
6507 if (!lock_user_struct(VERIFY_READ, target_fl, target_flock_addr, 1)) {
6508 return -TARGET_EFAULT;
6511 __get_user(l_type, &target_fl->l_type);
6512 fl->l_type = target_to_host_bitmask(l_type, flock_tbl);
6513 __get_user(fl->l_whence, &target_fl->l_whence);
6514 __get_user(fl->l_start, &target_fl->l_start);
6515 __get_user(fl->l_len, &target_fl->l_len);
6516 __get_user(fl->l_pid, &target_fl->l_pid);
6517 unlock_user_struct(target_fl, target_flock_addr, 0);
6518 return 0;
6521 static inline abi_long copy_to_user_eabi_flock64(abi_ulong target_flock_addr,
6522 const struct flock64 *fl)
6524 struct target_eabi_flock64 *target_fl;
6525 short l_type;
6527 if (!lock_user_struct(VERIFY_WRITE, target_fl, target_flock_addr, 0)) {
6528 return -TARGET_EFAULT;
6531 l_type = host_to_target_bitmask(fl->l_type, flock_tbl);
6532 __put_user(l_type, &target_fl->l_type);
6533 __put_user(fl->l_whence, &target_fl->l_whence);
6534 __put_user(fl->l_start, &target_fl->l_start);
6535 __put_user(fl->l_len, &target_fl->l_len);
6536 __put_user(fl->l_pid, &target_fl->l_pid);
6537 unlock_user_struct(target_fl, target_flock_addr, 1);
6538 return 0;
6540 #endif
6542 static inline abi_long copy_from_user_flock64(struct flock64 *fl,
6543 abi_ulong target_flock_addr)
6545 struct target_flock64 *target_fl;
6546 short l_type;
6548 if (!lock_user_struct(VERIFY_READ, target_fl, target_flock_addr, 1)) {
6549 return -TARGET_EFAULT;
6552 __get_user(l_type, &target_fl->l_type);
6553 fl->l_type = target_to_host_bitmask(l_type, flock_tbl);
6554 __get_user(fl->l_whence, &target_fl->l_whence);
6555 __get_user(fl->l_start, &target_fl->l_start);
6556 __get_user(fl->l_len, &target_fl->l_len);
6557 __get_user(fl->l_pid, &target_fl->l_pid);
6558 unlock_user_struct(target_fl, target_flock_addr, 0);
6559 return 0;
6562 static inline abi_long copy_to_user_flock64(abi_ulong target_flock_addr,
6563 const struct flock64 *fl)
6565 struct target_flock64 *target_fl;
6566 short l_type;
6568 if (!lock_user_struct(VERIFY_WRITE, target_fl, target_flock_addr, 0)) {
6569 return -TARGET_EFAULT;
6572 l_type = host_to_target_bitmask(fl->l_type, flock_tbl);
6573 __put_user(l_type, &target_fl->l_type);
6574 __put_user(fl->l_whence, &target_fl->l_whence);
6575 __put_user(fl->l_start, &target_fl->l_start);
6576 __put_user(fl->l_len, &target_fl->l_len);
6577 __put_user(fl->l_pid, &target_fl->l_pid);
6578 unlock_user_struct(target_fl, target_flock_addr, 1);
6579 return 0;
6582 static abi_long do_fcntl(int fd, int cmd, abi_ulong arg)
6584 struct flock64 fl64;
6585 #ifdef F_GETOWN_EX
6586 struct f_owner_ex fox;
6587 struct target_f_owner_ex *target_fox;
6588 #endif
6589 abi_long ret;
6590 int host_cmd = target_to_host_fcntl_cmd(cmd);
6592 if (host_cmd == -TARGET_EINVAL)
6593 return host_cmd;
6595 switch(cmd) {
6596 case TARGET_F_GETLK:
6597 ret = copy_from_user_flock(&fl64, arg);
6598 if (ret) {
6599 return ret;
6601 ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
6602 if (ret == 0) {
6603 ret = copy_to_user_flock(arg, &fl64);
6605 break;
6607 case TARGET_F_SETLK:
6608 case TARGET_F_SETLKW:
6609 ret = copy_from_user_flock(&fl64, arg);
6610 if (ret) {
6611 return ret;
6613 ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
6614 break;
6616 case TARGET_F_GETLK64:
6617 ret = copy_from_user_flock64(&fl64, arg);
6618 if (ret) {
6619 return ret;
6621 ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
6622 if (ret == 0) {
6623 ret = copy_to_user_flock64(arg, &fl64);
6625 break;
6626 case TARGET_F_SETLK64:
6627 case TARGET_F_SETLKW64:
6628 ret = copy_from_user_flock64(&fl64, arg);
6629 if (ret) {
6630 return ret;
6632 ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
6633 break;
6635 case TARGET_F_GETFL:
6636 ret = get_errno(safe_fcntl(fd, host_cmd, arg));
6637 if (ret >= 0) {
6638 ret = host_to_target_bitmask(ret, fcntl_flags_tbl);
6640 break;
6642 case TARGET_F_SETFL:
6643 ret = get_errno(safe_fcntl(fd, host_cmd,
6644 target_to_host_bitmask(arg,
6645 fcntl_flags_tbl)));
6646 break;
6648 #ifdef F_GETOWN_EX
6649 case TARGET_F_GETOWN_EX:
6650 ret = get_errno(safe_fcntl(fd, host_cmd, &fox));
6651 if (ret >= 0) {
6652 if (!lock_user_struct(VERIFY_WRITE, target_fox, arg, 0))
6653 return -TARGET_EFAULT;
6654 target_fox->type = tswap32(fox.type);
6655 target_fox->pid = tswap32(fox.pid);
6656 unlock_user_struct(target_fox, arg, 1);
6658 break;
6659 #endif
6661 #ifdef F_SETOWN_EX
6662 case TARGET_F_SETOWN_EX:
6663 if (!lock_user_struct(VERIFY_READ, target_fox, arg, 1))
6664 return -TARGET_EFAULT;
6665 fox.type = tswap32(target_fox->type);
6666 fox.pid = tswap32(target_fox->pid);
6667 unlock_user_struct(target_fox, arg, 0);
6668 ret = get_errno(safe_fcntl(fd, host_cmd, &fox));
6669 break;
6670 #endif
6672 case TARGET_F_SETOWN:
6673 case TARGET_F_GETOWN:
6674 case TARGET_F_SETSIG:
6675 case TARGET_F_GETSIG:
6676 case TARGET_F_SETLEASE:
6677 case TARGET_F_GETLEASE:
6678 case TARGET_F_SETPIPE_SZ:
6679 case TARGET_F_GETPIPE_SZ:
6680 ret = get_errno(safe_fcntl(fd, host_cmd, arg));
6681 break;
6683 default:
6684 ret = get_errno(safe_fcntl(fd, cmd, arg));
6685 break;
6687 return ret;
6690 #ifdef USE_UID16
6692 static inline int high2lowuid(int uid)
6694 if (uid > 65535)
6695 return 65534;
6696 else
6697 return uid;
6700 static inline int high2lowgid(int gid)
6702 if (gid > 65535)
6703 return 65534;
6704 else
6705 return gid;
6708 static inline int low2highuid(int uid)
6710 if ((int16_t)uid == -1)
6711 return -1;
6712 else
6713 return uid;
6716 static inline int low2highgid(int gid)
6718 if ((int16_t)gid == -1)
6719 return -1;
6720 else
6721 return gid;
6723 static inline int tswapid(int id)
6725 return tswap16(id);
6728 #define put_user_id(x, gaddr) put_user_u16(x, gaddr)
6730 #else /* !USE_UID16 */
6731 static inline int high2lowuid(int uid)
6733 return uid;
6735 static inline int high2lowgid(int gid)
6737 return gid;
6739 static inline int low2highuid(int uid)
6741 return uid;
6743 static inline int low2highgid(int gid)
6745 return gid;
6747 static inline int tswapid(int id)
6749 return tswap32(id);
6752 #define put_user_id(x, gaddr) put_user_u32(x, gaddr)
6754 #endif /* USE_UID16 */
6756 /* We must do direct syscalls for setting UID/GID, because we want to
6757 * implement the Linux system call semantics of "change only for this thread",
6758 * not the libc/POSIX semantics of "change for all threads in process".
6759 * (See http://ewontfix.com/17/ for more details.)
6760 * We use the 32-bit version of the syscalls if present; if it is not
6761 * then either the host architecture supports 32-bit UIDs natively with
6762 * the standard syscall, or the 16-bit UID is the best we can do.
6764 #ifdef __NR_setuid32
6765 #define __NR_sys_setuid __NR_setuid32
6766 #else
6767 #define __NR_sys_setuid __NR_setuid
6768 #endif
6769 #ifdef __NR_setgid32
6770 #define __NR_sys_setgid __NR_setgid32
6771 #else
6772 #define __NR_sys_setgid __NR_setgid
6773 #endif
6774 #ifdef __NR_setresuid32
6775 #define __NR_sys_setresuid __NR_setresuid32
6776 #else
6777 #define __NR_sys_setresuid __NR_setresuid
6778 #endif
6779 #ifdef __NR_setresgid32
6780 #define __NR_sys_setresgid __NR_setresgid32
6781 #else
6782 #define __NR_sys_setresgid __NR_setresgid
6783 #endif
6785 _syscall1(int, sys_setuid, uid_t, uid)
6786 _syscall1(int, sys_setgid, gid_t, gid)
6787 _syscall3(int, sys_setresuid, uid_t, ruid, uid_t, euid, uid_t, suid)
6788 _syscall3(int, sys_setresgid, gid_t, rgid, gid_t, egid, gid_t, sgid)
6790 void syscall_init(void)
6792 IOCTLEntry *ie;
6793 const argtype *arg_type;
6794 int size;
6795 int i;
6797 thunk_init(STRUCT_MAX);
6799 #define STRUCT(name, ...) thunk_register_struct(STRUCT_ ## name, #name, struct_ ## name ## _def);
6800 #define STRUCT_SPECIAL(name) thunk_register_struct_direct(STRUCT_ ## name, #name, &struct_ ## name ## _def);
6801 #include "syscall_types.h"
6802 #undef STRUCT
6803 #undef STRUCT_SPECIAL
6805 /* Build target_to_host_errno_table[] table from
6806 * host_to_target_errno_table[]. */
6807 for (i = 0; i < ERRNO_TABLE_SIZE; i++) {
6808 target_to_host_errno_table[host_to_target_errno_table[i]] = i;
6811 /* we patch the ioctl size if necessary. We rely on the fact that
6812 no ioctl has all the bits at '1' in the size field */
6813 ie = ioctl_entries;
6814 while (ie->target_cmd != 0) {
6815 if (((ie->target_cmd >> TARGET_IOC_SIZESHIFT) & TARGET_IOC_SIZEMASK) ==
6816 TARGET_IOC_SIZEMASK) {
6817 arg_type = ie->arg_type;
6818 if (arg_type[0] != TYPE_PTR) {
6819 fprintf(stderr, "cannot patch size for ioctl 0x%x\n",
6820 ie->target_cmd);
6821 exit(1);
6823 arg_type++;
6824 size = thunk_type_size(arg_type, 0);
6825 ie->target_cmd = (ie->target_cmd &
6826 ~(TARGET_IOC_SIZEMASK << TARGET_IOC_SIZESHIFT)) |
6827 (size << TARGET_IOC_SIZESHIFT);
6830 /* automatic consistency check if same arch */
6831 #if (defined(__i386__) && defined(TARGET_I386) && defined(TARGET_ABI32)) || \
6832 (defined(__x86_64__) && defined(TARGET_X86_64))
6833 if (unlikely(ie->target_cmd != ie->host_cmd)) {
6834 fprintf(stderr, "ERROR: ioctl(%s): target=0x%x host=0x%x\n",
6835 ie->name, ie->target_cmd, ie->host_cmd);
6837 #endif
6838 ie++;
6842 #if TARGET_ABI_BITS == 32
6843 static inline uint64_t target_offset64(uint32_t word0, uint32_t word1)
6845 #ifdef TARGET_WORDS_BIGENDIAN
6846 return ((uint64_t)word0 << 32) | word1;
6847 #else
6848 return ((uint64_t)word1 << 32) | word0;
6849 #endif
6851 #else /* TARGET_ABI_BITS == 32 */
6852 static inline uint64_t target_offset64(uint64_t word0, uint64_t word1)
6854 return word0;
6856 #endif /* TARGET_ABI_BITS != 32 */
6858 #ifdef TARGET_NR_truncate64
6859 static inline abi_long target_truncate64(void *cpu_env, const char *arg1,
6860 abi_long arg2,
6861 abi_long arg3,
6862 abi_long arg4)
6864 if (regpairs_aligned(cpu_env)) {
6865 arg2 = arg3;
6866 arg3 = arg4;
6868 return get_errno(truncate64(arg1, target_offset64(arg2, arg3)));
6870 #endif
6872 #ifdef TARGET_NR_ftruncate64
6873 static inline abi_long target_ftruncate64(void *cpu_env, abi_long arg1,
6874 abi_long arg2,
6875 abi_long arg3,
6876 abi_long arg4)
6878 if (regpairs_aligned(cpu_env)) {
6879 arg2 = arg3;
6880 arg3 = arg4;
6882 return get_errno(ftruncate64(arg1, target_offset64(arg2, arg3)));
6884 #endif
6886 static inline abi_long target_to_host_timespec(struct timespec *host_ts,
6887 abi_ulong target_addr)
6889 struct target_timespec *target_ts;
6891 if (!lock_user_struct(VERIFY_READ, target_ts, target_addr, 1))
6892 return -TARGET_EFAULT;
6893 __get_user(host_ts->tv_sec, &target_ts->tv_sec);
6894 __get_user(host_ts->tv_nsec, &target_ts->tv_nsec);
6895 unlock_user_struct(target_ts, target_addr, 0);
6896 return 0;
6899 static inline abi_long host_to_target_timespec(abi_ulong target_addr,
6900 struct timespec *host_ts)
6902 struct target_timespec *target_ts;
6904 if (!lock_user_struct(VERIFY_WRITE, target_ts, target_addr, 0))
6905 return -TARGET_EFAULT;
6906 __put_user(host_ts->tv_sec, &target_ts->tv_sec);
6907 __put_user(host_ts->tv_nsec, &target_ts->tv_nsec);
6908 unlock_user_struct(target_ts, target_addr, 1);
6909 return 0;
6912 static inline abi_long target_to_host_itimerspec(struct itimerspec *host_itspec,
6913 abi_ulong target_addr)
6915 struct target_itimerspec *target_itspec;
6917 if (!lock_user_struct(VERIFY_READ, target_itspec, target_addr, 1)) {
6918 return -TARGET_EFAULT;
6921 host_itspec->it_interval.tv_sec =
6922 tswapal(target_itspec->it_interval.tv_sec);
6923 host_itspec->it_interval.tv_nsec =
6924 tswapal(target_itspec->it_interval.tv_nsec);
6925 host_itspec->it_value.tv_sec = tswapal(target_itspec->it_value.tv_sec);
6926 host_itspec->it_value.tv_nsec = tswapal(target_itspec->it_value.tv_nsec);
6928 unlock_user_struct(target_itspec, target_addr, 1);
6929 return 0;
6932 static inline abi_long host_to_target_itimerspec(abi_ulong target_addr,
6933 struct itimerspec *host_its)
6935 struct target_itimerspec *target_itspec;
6937 if (!lock_user_struct(VERIFY_WRITE, target_itspec, target_addr, 0)) {
6938 return -TARGET_EFAULT;
6941 target_itspec->it_interval.tv_sec = tswapal(host_its->it_interval.tv_sec);
6942 target_itspec->it_interval.tv_nsec = tswapal(host_its->it_interval.tv_nsec);
6944 target_itspec->it_value.tv_sec = tswapal(host_its->it_value.tv_sec);
6945 target_itspec->it_value.tv_nsec = tswapal(host_its->it_value.tv_nsec);
6947 unlock_user_struct(target_itspec, target_addr, 0);
6948 return 0;
6951 static inline abi_long target_to_host_timex(struct timex *host_tx,
6952 abi_long target_addr)
6954 struct target_timex *target_tx;
6956 if (!lock_user_struct(VERIFY_READ, target_tx, target_addr, 1)) {
6957 return -TARGET_EFAULT;
6960 __get_user(host_tx->modes, &target_tx->modes);
6961 __get_user(host_tx->offset, &target_tx->offset);
6962 __get_user(host_tx->freq, &target_tx->freq);
6963 __get_user(host_tx->maxerror, &target_tx->maxerror);
6964 __get_user(host_tx->esterror, &target_tx->esterror);
6965 __get_user(host_tx->status, &target_tx->status);
6966 __get_user(host_tx->constant, &target_tx->constant);
6967 __get_user(host_tx->precision, &target_tx->precision);
6968 __get_user(host_tx->tolerance, &target_tx->tolerance);
6969 __get_user(host_tx->time.tv_sec, &target_tx->time.tv_sec);
6970 __get_user(host_tx->time.tv_usec, &target_tx->time.tv_usec);
6971 __get_user(host_tx->tick, &target_tx->tick);
6972 __get_user(host_tx->ppsfreq, &target_tx->ppsfreq);
6973 __get_user(host_tx->jitter, &target_tx->jitter);
6974 __get_user(host_tx->shift, &target_tx->shift);
6975 __get_user(host_tx->stabil, &target_tx->stabil);
6976 __get_user(host_tx->jitcnt, &target_tx->jitcnt);
6977 __get_user(host_tx->calcnt, &target_tx->calcnt);
6978 __get_user(host_tx->errcnt, &target_tx->errcnt);
6979 __get_user(host_tx->stbcnt, &target_tx->stbcnt);
6980 __get_user(host_tx->tai, &target_tx->tai);
6982 unlock_user_struct(target_tx, target_addr, 0);
6983 return 0;
6986 static inline abi_long host_to_target_timex(abi_long target_addr,
6987 struct timex *host_tx)
6989 struct target_timex *target_tx;
6991 if (!lock_user_struct(VERIFY_WRITE, target_tx, target_addr, 0)) {
6992 return -TARGET_EFAULT;
6995 __put_user(host_tx->modes, &target_tx->modes);
6996 __put_user(host_tx->offset, &target_tx->offset);
6997 __put_user(host_tx->freq, &target_tx->freq);
6998 __put_user(host_tx->maxerror, &target_tx->maxerror);
6999 __put_user(host_tx->esterror, &target_tx->esterror);
7000 __put_user(host_tx->status, &target_tx->status);
7001 __put_user(host_tx->constant, &target_tx->constant);
7002 __put_user(host_tx->precision, &target_tx->precision);
7003 __put_user(host_tx->tolerance, &target_tx->tolerance);
7004 __put_user(host_tx->time.tv_sec, &target_tx->time.tv_sec);
7005 __put_user(host_tx->time.tv_usec, &target_tx->time.tv_usec);
7006 __put_user(host_tx->tick, &target_tx->tick);
7007 __put_user(host_tx->ppsfreq, &target_tx->ppsfreq);
7008 __put_user(host_tx->jitter, &target_tx->jitter);
7009 __put_user(host_tx->shift, &target_tx->shift);
7010 __put_user(host_tx->stabil, &target_tx->stabil);
7011 __put_user(host_tx->jitcnt, &target_tx->jitcnt);
7012 __put_user(host_tx->calcnt, &target_tx->calcnt);
7013 __put_user(host_tx->errcnt, &target_tx->errcnt);
7014 __put_user(host_tx->stbcnt, &target_tx->stbcnt);
7015 __put_user(host_tx->tai, &target_tx->tai);
7017 unlock_user_struct(target_tx, target_addr, 1);
7018 return 0;
7022 static inline abi_long target_to_host_sigevent(struct sigevent *host_sevp,
7023 abi_ulong target_addr)
7025 struct target_sigevent *target_sevp;
7027 if (!lock_user_struct(VERIFY_READ, target_sevp, target_addr, 1)) {
7028 return -TARGET_EFAULT;
7031 /* This union is awkward on 64 bit systems because it has a 32 bit
7032 * integer and a pointer in it; we follow the conversion approach
7033 * used for handling sigval types in signal.c so the guest should get
7034 * the correct value back even if we did a 64 bit byteswap and it's
7035 * using the 32 bit integer.
7037 host_sevp->sigev_value.sival_ptr =
7038 (void *)(uintptr_t)tswapal(target_sevp->sigev_value.sival_ptr);
7039 host_sevp->sigev_signo =
7040 target_to_host_signal(tswap32(target_sevp->sigev_signo));
7041 host_sevp->sigev_notify = tswap32(target_sevp->sigev_notify);
7042 host_sevp->_sigev_un._tid = tswap32(target_sevp->_sigev_un._tid);
7044 unlock_user_struct(target_sevp, target_addr, 1);
7045 return 0;
7048 #if defined(TARGET_NR_mlockall)
7049 static inline int target_to_host_mlockall_arg(int arg)
7051 int result = 0;
7053 if (arg & TARGET_MLOCKALL_MCL_CURRENT) {
7054 result |= MCL_CURRENT;
7056 if (arg & TARGET_MLOCKALL_MCL_FUTURE) {
7057 result |= MCL_FUTURE;
7059 return result;
7061 #endif
7063 static inline abi_long host_to_target_stat64(void *cpu_env,
7064 abi_ulong target_addr,
7065 struct stat *host_st)
7067 #if defined(TARGET_ARM) && defined(TARGET_ABI32)
7068 if (((CPUARMState *)cpu_env)->eabi) {
7069 struct target_eabi_stat64 *target_st;
7071 if (!lock_user_struct(VERIFY_WRITE, target_st, target_addr, 0))
7072 return -TARGET_EFAULT;
7073 memset(target_st, 0, sizeof(struct target_eabi_stat64));
7074 __put_user(host_st->st_dev, &target_st->st_dev);
7075 __put_user(host_st->st_ino, &target_st->st_ino);
7076 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
7077 __put_user(host_st->st_ino, &target_st->__st_ino);
7078 #endif
7079 __put_user(host_st->st_mode, &target_st->st_mode);
7080 __put_user(host_st->st_nlink, &target_st->st_nlink);
7081 __put_user(host_st->st_uid, &target_st->st_uid);
7082 __put_user(host_st->st_gid, &target_st->st_gid);
7083 __put_user(host_st->st_rdev, &target_st->st_rdev);
7084 __put_user(host_st->st_size, &target_st->st_size);
7085 __put_user(host_st->st_blksize, &target_st->st_blksize);
7086 __put_user(host_st->st_blocks, &target_st->st_blocks);
7087 __put_user(host_st->st_atime, &target_st->target_st_atime);
7088 __put_user(host_st->st_mtime, &target_st->target_st_mtime);
7089 __put_user(host_st->st_ctime, &target_st->target_st_ctime);
7090 unlock_user_struct(target_st, target_addr, 1);
7091 } else
7092 #endif
7094 #if defined(TARGET_HAS_STRUCT_STAT64)
7095 struct target_stat64 *target_st;
7096 #else
7097 struct target_stat *target_st;
7098 #endif
7100 if (!lock_user_struct(VERIFY_WRITE, target_st, target_addr, 0))
7101 return -TARGET_EFAULT;
7102 memset(target_st, 0, sizeof(*target_st));
7103 __put_user(host_st->st_dev, &target_st->st_dev);
7104 __put_user(host_st->st_ino, &target_st->st_ino);
7105 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
7106 __put_user(host_st->st_ino, &target_st->__st_ino);
7107 #endif
7108 __put_user(host_st->st_mode, &target_st->st_mode);
7109 __put_user(host_st->st_nlink, &target_st->st_nlink);
7110 __put_user(host_st->st_uid, &target_st->st_uid);
7111 __put_user(host_st->st_gid, &target_st->st_gid);
7112 __put_user(host_st->st_rdev, &target_st->st_rdev);
7113 /* XXX: better use of kernel struct */
7114 __put_user(host_st->st_size, &target_st->st_size);
7115 __put_user(host_st->st_blksize, &target_st->st_blksize);
7116 __put_user(host_st->st_blocks, &target_st->st_blocks);
7117 __put_user(host_st->st_atime, &target_st->target_st_atime);
7118 __put_user(host_st->st_mtime, &target_st->target_st_mtime);
7119 __put_user(host_st->st_ctime, &target_st->target_st_ctime);
7120 unlock_user_struct(target_st, target_addr, 1);
7123 return 0;
7126 /* ??? Using host futex calls even when target atomic operations
7127 are not really atomic probably breaks things. However implementing
7128 futexes locally would make futexes shared between multiple processes
7129 tricky. However they're probably useless because guest atomic
7130 operations won't work either. */
7131 static int do_futex(target_ulong uaddr, int op, int val, target_ulong timeout,
7132 target_ulong uaddr2, int val3)
7134 struct timespec ts, *pts;
7135 int base_op;
7137 /* ??? We assume FUTEX_* constants are the same on both host
7138 and target. */
7139 #ifdef FUTEX_CMD_MASK
7140 base_op = op & FUTEX_CMD_MASK;
7141 #else
7142 base_op = op;
7143 #endif
7144 switch (base_op) {
7145 case FUTEX_WAIT:
7146 case FUTEX_WAIT_BITSET:
7147 if (timeout) {
7148 pts = &ts;
7149 target_to_host_timespec(pts, timeout);
7150 } else {
7151 pts = NULL;
7153 return get_errno(safe_futex(g2h(uaddr), op, tswap32(val),
7154 pts, NULL, val3));
7155 case FUTEX_WAKE:
7156 return get_errno(safe_futex(g2h(uaddr), op, val, NULL, NULL, 0));
7157 case FUTEX_FD:
7158 return get_errno(safe_futex(g2h(uaddr), op, val, NULL, NULL, 0));
7159 case FUTEX_REQUEUE:
7160 case FUTEX_CMP_REQUEUE:
7161 case FUTEX_WAKE_OP:
7162 /* For FUTEX_REQUEUE, FUTEX_CMP_REQUEUE, and FUTEX_WAKE_OP, the
7163 TIMEOUT parameter is interpreted as a uint32_t by the kernel.
7164 But the prototype takes a `struct timespec *'; insert casts
7165 to satisfy the compiler. We do not need to tswap TIMEOUT
7166 since it's not compared to guest memory. */
7167 pts = (struct timespec *)(uintptr_t) timeout;
7168 return get_errno(safe_futex(g2h(uaddr), op, val, pts,
7169 g2h(uaddr2),
7170 (base_op == FUTEX_CMP_REQUEUE
7171 ? tswap32(val3)
7172 : val3)));
7173 default:
7174 return -TARGET_ENOSYS;
7177 #if defined(TARGET_NR_name_to_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
7178 static abi_long do_name_to_handle_at(abi_long dirfd, abi_long pathname,
7179 abi_long handle, abi_long mount_id,
7180 abi_long flags)
7182 struct file_handle *target_fh;
7183 struct file_handle *fh;
7184 int mid = 0;
7185 abi_long ret;
7186 char *name;
7187 unsigned int size, total_size;
7189 if (get_user_s32(size, handle)) {
7190 return -TARGET_EFAULT;
7193 name = lock_user_string(pathname);
7194 if (!name) {
7195 return -TARGET_EFAULT;
7198 total_size = sizeof(struct file_handle) + size;
7199 target_fh = lock_user(VERIFY_WRITE, handle, total_size, 0);
7200 if (!target_fh) {
7201 unlock_user(name, pathname, 0);
7202 return -TARGET_EFAULT;
7205 fh = g_malloc0(total_size);
7206 fh->handle_bytes = size;
7208 ret = get_errno(name_to_handle_at(dirfd, path(name), fh, &mid, flags));
7209 unlock_user(name, pathname, 0);
7211 /* man name_to_handle_at(2):
7212 * Other than the use of the handle_bytes field, the caller should treat
7213 * the file_handle structure as an opaque data type
7216 memcpy(target_fh, fh, total_size);
7217 target_fh->handle_bytes = tswap32(fh->handle_bytes);
7218 target_fh->handle_type = tswap32(fh->handle_type);
7219 g_free(fh);
7220 unlock_user(target_fh, handle, total_size);
7222 if (put_user_s32(mid, mount_id)) {
7223 return -TARGET_EFAULT;
7226 return ret;
7229 #endif
7231 #if defined(TARGET_NR_open_by_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
7232 static abi_long do_open_by_handle_at(abi_long mount_fd, abi_long handle,
7233 abi_long flags)
7235 struct file_handle *target_fh;
7236 struct file_handle *fh;
7237 unsigned int size, total_size;
7238 abi_long ret;
7240 if (get_user_s32(size, handle)) {
7241 return -TARGET_EFAULT;
7244 total_size = sizeof(struct file_handle) + size;
7245 target_fh = lock_user(VERIFY_READ, handle, total_size, 1);
7246 if (!target_fh) {
7247 return -TARGET_EFAULT;
7250 fh = g_memdup(target_fh, total_size);
7251 fh->handle_bytes = size;
7252 fh->handle_type = tswap32(target_fh->handle_type);
7254 ret = get_errno(open_by_handle_at(mount_fd, fh,
7255 target_to_host_bitmask(flags, fcntl_flags_tbl)));
7257 g_free(fh);
7259 unlock_user(target_fh, handle, total_size);
7261 return ret;
7263 #endif
7265 #if defined(TARGET_NR_signalfd) || defined(TARGET_NR_signalfd4)
7267 /* signalfd siginfo conversion */
7269 static void
7270 host_to_target_signalfd_siginfo(struct signalfd_siginfo *tinfo,
7271 const struct signalfd_siginfo *info)
7273 int sig = host_to_target_signal(info->ssi_signo);
7275 /* linux/signalfd.h defines a ssi_addr_lsb
7276 * not defined in sys/signalfd.h but used by some kernels
7279 #ifdef BUS_MCEERR_AO
7280 if (tinfo->ssi_signo == SIGBUS &&
7281 (tinfo->ssi_code == BUS_MCEERR_AR ||
7282 tinfo->ssi_code == BUS_MCEERR_AO)) {
7283 uint16_t *ssi_addr_lsb = (uint16_t *)(&info->ssi_addr + 1);
7284 uint16_t *tssi_addr_lsb = (uint16_t *)(&tinfo->ssi_addr + 1);
7285 *tssi_addr_lsb = tswap16(*ssi_addr_lsb);
7287 #endif
7289 tinfo->ssi_signo = tswap32(sig);
7290 tinfo->ssi_errno = tswap32(tinfo->ssi_errno);
7291 tinfo->ssi_code = tswap32(info->ssi_code);
7292 tinfo->ssi_pid = tswap32(info->ssi_pid);
7293 tinfo->ssi_uid = tswap32(info->ssi_uid);
7294 tinfo->ssi_fd = tswap32(info->ssi_fd);
7295 tinfo->ssi_tid = tswap32(info->ssi_tid);
7296 tinfo->ssi_band = tswap32(info->ssi_band);
7297 tinfo->ssi_overrun = tswap32(info->ssi_overrun);
7298 tinfo->ssi_trapno = tswap32(info->ssi_trapno);
7299 tinfo->ssi_status = tswap32(info->ssi_status);
7300 tinfo->ssi_int = tswap32(info->ssi_int);
7301 tinfo->ssi_ptr = tswap64(info->ssi_ptr);
7302 tinfo->ssi_utime = tswap64(info->ssi_utime);
7303 tinfo->ssi_stime = tswap64(info->ssi_stime);
7304 tinfo->ssi_addr = tswap64(info->ssi_addr);
7307 static abi_long host_to_target_data_signalfd(void *buf, size_t len)
7309 int i;
7311 for (i = 0; i < len; i += sizeof(struct signalfd_siginfo)) {
7312 host_to_target_signalfd_siginfo(buf + i, buf + i);
7315 return len;
7318 static TargetFdTrans target_signalfd_trans = {
7319 .host_to_target_data = host_to_target_data_signalfd,
7322 static abi_long do_signalfd4(int fd, abi_long mask, int flags)
7324 int host_flags;
7325 target_sigset_t *target_mask;
7326 sigset_t host_mask;
7327 abi_long ret;
7329 if (flags & ~(TARGET_O_NONBLOCK | TARGET_O_CLOEXEC)) {
7330 return -TARGET_EINVAL;
7332 if (!lock_user_struct(VERIFY_READ, target_mask, mask, 1)) {
7333 return -TARGET_EFAULT;
7336 target_to_host_sigset(&host_mask, target_mask);
7338 host_flags = target_to_host_bitmask(flags, fcntl_flags_tbl);
7340 ret = get_errno(signalfd(fd, &host_mask, host_flags));
7341 if (ret >= 0) {
7342 fd_trans_register(ret, &target_signalfd_trans);
7345 unlock_user_struct(target_mask, mask, 0);
7347 return ret;
7349 #endif
7351 /* Map host to target signal numbers for the wait family of syscalls.
7352 Assume all other status bits are the same. */
7353 int host_to_target_waitstatus(int status)
7355 if (WIFSIGNALED(status)) {
7356 return host_to_target_signal(WTERMSIG(status)) | (status & ~0x7f);
7358 if (WIFSTOPPED(status)) {
7359 return (host_to_target_signal(WSTOPSIG(status)) << 8)
7360 | (status & 0xff);
7362 return status;
7365 static int open_self_cmdline(void *cpu_env, int fd)
7367 CPUState *cpu = ENV_GET_CPU((CPUArchState *)cpu_env);
7368 struct linux_binprm *bprm = ((TaskState *)cpu->opaque)->bprm;
7369 int i;
7371 for (i = 0; i < bprm->argc; i++) {
7372 size_t len = strlen(bprm->argv[i]) + 1;
7374 if (write(fd, bprm->argv[i], len) != len) {
7375 return -1;
7379 return 0;
7382 static int open_self_maps(void *cpu_env, int fd)
7384 CPUState *cpu = ENV_GET_CPU((CPUArchState *)cpu_env);
7385 TaskState *ts = cpu->opaque;
7386 FILE *fp;
7387 char *line = NULL;
7388 size_t len = 0;
7389 ssize_t read;
7391 fp = fopen("/proc/self/maps", "r");
7392 if (fp == NULL) {
7393 return -1;
7396 while ((read = getline(&line, &len, fp)) != -1) {
7397 int fields, dev_maj, dev_min, inode;
7398 uint64_t min, max, offset;
7399 char flag_r, flag_w, flag_x, flag_p;
7400 char path[512] = "";
7401 fields = sscanf(line, "%"PRIx64"-%"PRIx64" %c%c%c%c %"PRIx64" %x:%x %d"
7402 " %512s", &min, &max, &flag_r, &flag_w, &flag_x,
7403 &flag_p, &offset, &dev_maj, &dev_min, &inode, path);
7405 if ((fields < 10) || (fields > 11)) {
7406 continue;
7408 if (h2g_valid(min)) {
7409 int flags = page_get_flags(h2g(min));
7410 max = h2g_valid(max - 1) ? max : (uintptr_t)g2h(GUEST_ADDR_MAX);
7411 if (page_check_range(h2g(min), max - min, flags) == -1) {
7412 continue;
7414 if (h2g(min) == ts->info->stack_limit) {
7415 pstrcpy(path, sizeof(path), " [stack]");
7417 dprintf(fd, TARGET_ABI_FMT_lx "-" TARGET_ABI_FMT_lx
7418 " %c%c%c%c %08" PRIx64 " %02x:%02x %d %s%s\n",
7419 h2g(min), h2g(max - 1) + 1, flag_r, flag_w,
7420 flag_x, flag_p, offset, dev_maj, dev_min, inode,
7421 path[0] ? " " : "", path);
7425 free(line);
7426 fclose(fp);
7428 return 0;
7431 static int open_self_stat(void *cpu_env, int fd)
7433 CPUState *cpu = ENV_GET_CPU((CPUArchState *)cpu_env);
7434 TaskState *ts = cpu->opaque;
7435 abi_ulong start_stack = ts->info->start_stack;
7436 int i;
7438 for (i = 0; i < 44; i++) {
7439 char buf[128];
7440 int len;
7441 uint64_t val = 0;
7443 if (i == 0) {
7444 /* pid */
7445 val = getpid();
7446 snprintf(buf, sizeof(buf), "%"PRId64 " ", val);
7447 } else if (i == 1) {
7448 /* app name */
7449 snprintf(buf, sizeof(buf), "(%s) ", ts->bprm->argv[0]);
7450 } else if (i == 27) {
7451 /* stack bottom */
7452 val = start_stack;
7453 snprintf(buf, sizeof(buf), "%"PRId64 " ", val);
7454 } else {
7455 /* for the rest, there is MasterCard */
7456 snprintf(buf, sizeof(buf), "0%c", i == 43 ? '\n' : ' ');
7459 len = strlen(buf);
7460 if (write(fd, buf, len) != len) {
7461 return -1;
7465 return 0;
7468 static int open_self_auxv(void *cpu_env, int fd)
7470 CPUState *cpu = ENV_GET_CPU((CPUArchState *)cpu_env);
7471 TaskState *ts = cpu->opaque;
7472 abi_ulong auxv = ts->info->saved_auxv;
7473 abi_ulong len = ts->info->auxv_len;
7474 char *ptr;
7477 * Auxiliary vector is stored in target process stack.
7478 * read in whole auxv vector and copy it to file
7480 ptr = lock_user(VERIFY_READ, auxv, len, 0);
7481 if (ptr != NULL) {
7482 while (len > 0) {
7483 ssize_t r;
7484 r = write(fd, ptr, len);
7485 if (r <= 0) {
7486 break;
7488 len -= r;
7489 ptr += r;
7491 lseek(fd, 0, SEEK_SET);
7492 unlock_user(ptr, auxv, len);
7495 return 0;
7498 static int is_proc_myself(const char *filename, const char *entry)
7500 if (!strncmp(filename, "/proc/", strlen("/proc/"))) {
7501 filename += strlen("/proc/");
7502 if (!strncmp(filename, "self/", strlen("self/"))) {
7503 filename += strlen("self/");
7504 } else if (*filename >= '1' && *filename <= '9') {
7505 char myself[80];
7506 snprintf(myself, sizeof(myself), "%d/", getpid());
7507 if (!strncmp(filename, myself, strlen(myself))) {
7508 filename += strlen(myself);
7509 } else {
7510 return 0;
7512 } else {
7513 return 0;
7515 if (!strcmp(filename, entry)) {
7516 return 1;
7519 return 0;
7522 #if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN)
7523 static int is_proc(const char *filename, const char *entry)
7525 return strcmp(filename, entry) == 0;
7528 static int open_net_route(void *cpu_env, int fd)
7530 FILE *fp;
7531 char *line = NULL;
7532 size_t len = 0;
7533 ssize_t read;
7535 fp = fopen("/proc/net/route", "r");
7536 if (fp == NULL) {
7537 return -1;
7540 /* read header */
7542 read = getline(&line, &len, fp);
7543 dprintf(fd, "%s", line);
7545 /* read routes */
7547 while ((read = getline(&line, &len, fp)) != -1) {
7548 char iface[16];
7549 uint32_t dest, gw, mask;
7550 unsigned int flags, refcnt, use, metric, mtu, window, irtt;
7551 sscanf(line, "%s\t%08x\t%08x\t%04x\t%d\t%d\t%d\t%08x\t%d\t%u\t%u\n",
7552 iface, &dest, &gw, &flags, &refcnt, &use, &metric,
7553 &mask, &mtu, &window, &irtt);
7554 dprintf(fd, "%s\t%08x\t%08x\t%04x\t%d\t%d\t%d\t%08x\t%d\t%u\t%u\n",
7555 iface, tswap32(dest), tswap32(gw), flags, refcnt, use,
7556 metric, tswap32(mask), mtu, window, irtt);
7559 free(line);
7560 fclose(fp);
7562 return 0;
7564 #endif
7566 static int do_openat(void *cpu_env, int dirfd, const char *pathname, int flags, mode_t mode)
7568 struct fake_open {
7569 const char *filename;
7570 int (*fill)(void *cpu_env, int fd);
7571 int (*cmp)(const char *s1, const char *s2);
7573 const struct fake_open *fake_open;
7574 static const struct fake_open fakes[] = {
7575 { "maps", open_self_maps, is_proc_myself },
7576 { "stat", open_self_stat, is_proc_myself },
7577 { "auxv", open_self_auxv, is_proc_myself },
7578 { "cmdline", open_self_cmdline, is_proc_myself },
7579 #if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN)
7580 { "/proc/net/route", open_net_route, is_proc },
7581 #endif
7582 { NULL, NULL, NULL }
7585 if (is_proc_myself(pathname, "exe")) {
7586 int execfd = qemu_getauxval(AT_EXECFD);
7587 return execfd ? execfd : safe_openat(dirfd, exec_path, flags, mode);
7590 for (fake_open = fakes; fake_open->filename; fake_open++) {
7591 if (fake_open->cmp(pathname, fake_open->filename)) {
7592 break;
7596 if (fake_open->filename) {
7597 const char *tmpdir;
7598 char filename[PATH_MAX];
7599 int fd, r;
7601 /* create temporary file to map stat to */
7602 tmpdir = getenv("TMPDIR");
7603 if (!tmpdir)
7604 tmpdir = "/tmp";
7605 snprintf(filename, sizeof(filename), "%s/qemu-open.XXXXXX", tmpdir);
7606 fd = mkstemp(filename);
7607 if (fd < 0) {
7608 return fd;
7610 unlink(filename);
7612 if ((r = fake_open->fill(cpu_env, fd))) {
7613 int e = errno;
7614 close(fd);
7615 errno = e;
7616 return r;
7618 lseek(fd, 0, SEEK_SET);
7620 return fd;
7623 return safe_openat(dirfd, path(pathname), flags, mode);
7626 #define TIMER_MAGIC 0x0caf0000
7627 #define TIMER_MAGIC_MASK 0xffff0000
7629 /* Convert QEMU provided timer ID back to internal 16bit index format */
7630 static target_timer_t get_timer_id(abi_long arg)
7632 target_timer_t timerid = arg;
7634 if ((timerid & TIMER_MAGIC_MASK) != TIMER_MAGIC) {
7635 return -TARGET_EINVAL;
7638 timerid &= 0xffff;
7640 if (timerid >= ARRAY_SIZE(g_posix_timers)) {
7641 return -TARGET_EINVAL;
7644 return timerid;
7647 static abi_long swap_data_eventfd(void *buf, size_t len)
7649 uint64_t *counter = buf;
7650 int i;
7652 if (len < sizeof(uint64_t)) {
7653 return -EINVAL;
7656 for (i = 0; i < len; i += sizeof(uint64_t)) {
7657 *counter = tswap64(*counter);
7658 counter++;
7661 return len;
7664 static TargetFdTrans target_eventfd_trans = {
7665 .host_to_target_data = swap_data_eventfd,
7666 .target_to_host_data = swap_data_eventfd,
7669 #if (defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init)) || \
7670 (defined(CONFIG_INOTIFY1) && defined(TARGET_NR_inotify_init1) && \
7671 defined(__NR_inotify_init1))
7672 static abi_long host_to_target_data_inotify(void *buf, size_t len)
7674 struct inotify_event *ev;
7675 int i;
7676 uint32_t name_len;
7678 for (i = 0; i < len; i += sizeof(struct inotify_event) + name_len) {
7679 ev = (struct inotify_event *)((char *)buf + i);
7680 name_len = ev->len;
7682 ev->wd = tswap32(ev->wd);
7683 ev->mask = tswap32(ev->mask);
7684 ev->cookie = tswap32(ev->cookie);
7685 ev->len = tswap32(name_len);
7688 return len;
7691 static TargetFdTrans target_inotify_trans = {
7692 .host_to_target_data = host_to_target_data_inotify,
7694 #endif
7696 /* do_syscall() should always have a single exit point at the end so
7697 that actions, such as logging of syscall results, can be performed.
7698 All errnos that do_syscall() returns must be -TARGET_<errcode>. */
7699 abi_long do_syscall(void *cpu_env, int num, abi_long arg1,
7700 abi_long arg2, abi_long arg3, abi_long arg4,
7701 abi_long arg5, abi_long arg6, abi_long arg7,
7702 abi_long arg8)
7704 CPUState *cpu = ENV_GET_CPU(cpu_env);
7705 abi_long ret;
7706 struct stat st;
7707 struct statfs stfs;
7708 void *p;
7710 #if defined(DEBUG_ERESTARTSYS)
7711 /* Debug-only code for exercising the syscall-restart code paths
7712 * in the per-architecture cpu main loops: restart every syscall
7713 * the guest makes once before letting it through.
7716 static int flag;
7718 flag = !flag;
7719 if (flag) {
7720 return -TARGET_ERESTARTSYS;
7723 #endif
7725 #ifdef DEBUG
7726 gemu_log("syscall %d", num);
7727 #endif
7728 trace_guest_user_syscall(cpu, num, arg1, arg2, arg3, arg4, arg5, arg6, arg7, arg8);
7729 if(do_strace)
7730 print_syscall(num, arg1, arg2, arg3, arg4, arg5, arg6);
7732 switch(num) {
7733 case TARGET_NR_exit:
7734 /* In old applications this may be used to implement _exit(2).
7735 However in threaded applictions it is used for thread termination,
7736 and _exit_group is used for application termination.
7737 Do thread termination if we have more then one thread. */
7739 if (block_signals()) {
7740 ret = -TARGET_ERESTARTSYS;
7741 break;
7744 cpu_list_lock();
7746 if (CPU_NEXT(first_cpu)) {
7747 TaskState *ts;
7749 /* Remove the CPU from the list. */
7750 QTAILQ_REMOVE(&cpus, cpu, node);
7752 cpu_list_unlock();
7754 ts = cpu->opaque;
7755 if (ts->child_tidptr) {
7756 put_user_u32(0, ts->child_tidptr);
7757 sys_futex(g2h(ts->child_tidptr), FUTEX_WAKE, INT_MAX,
7758 NULL, NULL, 0);
7760 thread_cpu = NULL;
7761 object_unref(OBJECT(cpu));
7762 g_free(ts);
7763 rcu_unregister_thread();
7764 pthread_exit(NULL);
7767 cpu_list_unlock();
7768 #ifdef TARGET_GPROF
7769 _mcleanup();
7770 #endif
7771 gdb_exit(cpu_env, arg1);
7772 _exit(arg1);
7773 ret = 0; /* avoid warning */
7774 break;
7775 case TARGET_NR_read:
7776 if (arg3 == 0)
7777 ret = 0;
7778 else {
7779 if (!(p = lock_user(VERIFY_WRITE, arg2, arg3, 0)))
7780 goto efault;
7781 ret = get_errno(safe_read(arg1, p, arg3));
7782 if (ret >= 0 &&
7783 fd_trans_host_to_target_data(arg1)) {
7784 ret = fd_trans_host_to_target_data(arg1)(p, ret);
7786 unlock_user(p, arg2, ret);
7788 break;
7789 case TARGET_NR_write:
7790 if (!(p = lock_user(VERIFY_READ, arg2, arg3, 1)))
7791 goto efault;
7792 if (fd_trans_target_to_host_data(arg1)) {
7793 void *copy = g_malloc(arg3);
7794 memcpy(copy, p, arg3);
7795 ret = fd_trans_target_to_host_data(arg1)(copy, arg3);
7796 if (ret >= 0) {
7797 ret = get_errno(safe_write(arg1, copy, ret));
7799 g_free(copy);
7800 } else {
7801 ret = get_errno(safe_write(arg1, p, arg3));
7803 unlock_user(p, arg2, 0);
7804 break;
7805 #ifdef TARGET_NR_open
7806 case TARGET_NR_open:
7807 if (!(p = lock_user_string(arg1)))
7808 goto efault;
7809 ret = get_errno(do_openat(cpu_env, AT_FDCWD, p,
7810 target_to_host_bitmask(arg2, fcntl_flags_tbl),
7811 arg3));
7812 fd_trans_unregister(ret);
7813 unlock_user(p, arg1, 0);
7814 break;
7815 #endif
7816 case TARGET_NR_openat:
7817 if (!(p = lock_user_string(arg2)))
7818 goto efault;
7819 ret = get_errno(do_openat(cpu_env, arg1, p,
7820 target_to_host_bitmask(arg3, fcntl_flags_tbl),
7821 arg4));
7822 fd_trans_unregister(ret);
7823 unlock_user(p, arg2, 0);
7824 break;
7825 #if defined(TARGET_NR_name_to_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
7826 case TARGET_NR_name_to_handle_at:
7827 ret = do_name_to_handle_at(arg1, arg2, arg3, arg4, arg5);
7828 break;
7829 #endif
7830 #if defined(TARGET_NR_open_by_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
7831 case TARGET_NR_open_by_handle_at:
7832 ret = do_open_by_handle_at(arg1, arg2, arg3);
7833 fd_trans_unregister(ret);
7834 break;
7835 #endif
7836 case TARGET_NR_close:
7837 fd_trans_unregister(arg1);
7838 ret = get_errno(close(arg1));
7839 break;
7840 case TARGET_NR_brk:
7841 ret = do_brk(arg1);
7842 break;
7843 #ifdef TARGET_NR_fork
7844 case TARGET_NR_fork:
7845 ret = get_errno(do_fork(cpu_env, TARGET_SIGCHLD, 0, 0, 0, 0));
7846 break;
7847 #endif
7848 #ifdef TARGET_NR_waitpid
7849 case TARGET_NR_waitpid:
7851 int status;
7852 ret = get_errno(safe_wait4(arg1, &status, arg3, 0));
7853 if (!is_error(ret) && arg2 && ret
7854 && put_user_s32(host_to_target_waitstatus(status), arg2))
7855 goto efault;
7857 break;
7858 #endif
7859 #ifdef TARGET_NR_waitid
7860 case TARGET_NR_waitid:
7862 siginfo_t info;
7863 info.si_pid = 0;
7864 ret = get_errno(safe_waitid(arg1, arg2, &info, arg4, NULL));
7865 if (!is_error(ret) && arg3 && info.si_pid != 0) {
7866 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_siginfo_t), 0)))
7867 goto efault;
7868 host_to_target_siginfo(p, &info);
7869 unlock_user(p, arg3, sizeof(target_siginfo_t));
7872 break;
7873 #endif
7874 #ifdef TARGET_NR_creat /* not on alpha */
7875 case TARGET_NR_creat:
7876 if (!(p = lock_user_string(arg1)))
7877 goto efault;
7878 ret = get_errno(creat(p, arg2));
7879 fd_trans_unregister(ret);
7880 unlock_user(p, arg1, 0);
7881 break;
7882 #endif
7883 #ifdef TARGET_NR_link
7884 case TARGET_NR_link:
7886 void * p2;
7887 p = lock_user_string(arg1);
7888 p2 = lock_user_string(arg2);
7889 if (!p || !p2)
7890 ret = -TARGET_EFAULT;
7891 else
7892 ret = get_errno(link(p, p2));
7893 unlock_user(p2, arg2, 0);
7894 unlock_user(p, arg1, 0);
7896 break;
7897 #endif
7898 #if defined(TARGET_NR_linkat)
7899 case TARGET_NR_linkat:
7901 void * p2 = NULL;
7902 if (!arg2 || !arg4)
7903 goto efault;
7904 p = lock_user_string(arg2);
7905 p2 = lock_user_string(arg4);
7906 if (!p || !p2)
7907 ret = -TARGET_EFAULT;
7908 else
7909 ret = get_errno(linkat(arg1, p, arg3, p2, arg5));
7910 unlock_user(p, arg2, 0);
7911 unlock_user(p2, arg4, 0);
7913 break;
7914 #endif
7915 #ifdef TARGET_NR_unlink
7916 case TARGET_NR_unlink:
7917 if (!(p = lock_user_string(arg1)))
7918 goto efault;
7919 ret = get_errno(unlink(p));
7920 unlock_user(p, arg1, 0);
7921 break;
7922 #endif
7923 #if defined(TARGET_NR_unlinkat)
7924 case TARGET_NR_unlinkat:
7925 if (!(p = lock_user_string(arg2)))
7926 goto efault;
7927 ret = get_errno(unlinkat(arg1, p, arg3));
7928 unlock_user(p, arg2, 0);
7929 break;
7930 #endif
7931 case TARGET_NR_execve:
7933 char **argp, **envp;
7934 int argc, envc;
7935 abi_ulong gp;
7936 abi_ulong guest_argp;
7937 abi_ulong guest_envp;
7938 abi_ulong addr;
7939 char **q;
7940 int total_size = 0;
7942 argc = 0;
7943 guest_argp = arg2;
7944 for (gp = guest_argp; gp; gp += sizeof(abi_ulong)) {
7945 if (get_user_ual(addr, gp))
7946 goto efault;
7947 if (!addr)
7948 break;
7949 argc++;
7951 envc = 0;
7952 guest_envp = arg3;
7953 for (gp = guest_envp; gp; gp += sizeof(abi_ulong)) {
7954 if (get_user_ual(addr, gp))
7955 goto efault;
7956 if (!addr)
7957 break;
7958 envc++;
7961 argp = g_new0(char *, argc + 1);
7962 envp = g_new0(char *, envc + 1);
7964 for (gp = guest_argp, q = argp; gp;
7965 gp += sizeof(abi_ulong), q++) {
7966 if (get_user_ual(addr, gp))
7967 goto execve_efault;
7968 if (!addr)
7969 break;
7970 if (!(*q = lock_user_string(addr)))
7971 goto execve_efault;
7972 total_size += strlen(*q) + 1;
7974 *q = NULL;
7976 for (gp = guest_envp, q = envp; gp;
7977 gp += sizeof(abi_ulong), q++) {
7978 if (get_user_ual(addr, gp))
7979 goto execve_efault;
7980 if (!addr)
7981 break;
7982 if (!(*q = lock_user_string(addr)))
7983 goto execve_efault;
7984 total_size += strlen(*q) + 1;
7986 *q = NULL;
7988 if (!(p = lock_user_string(arg1)))
7989 goto execve_efault;
7990 /* Although execve() is not an interruptible syscall it is
7991 * a special case where we must use the safe_syscall wrapper:
7992 * if we allow a signal to happen before we make the host
7993 * syscall then we will 'lose' it, because at the point of
7994 * execve the process leaves QEMU's control. So we use the
7995 * safe syscall wrapper to ensure that we either take the
7996 * signal as a guest signal, or else it does not happen
7997 * before the execve completes and makes it the other
7998 * program's problem.
8000 ret = get_errno(safe_execve(p, argp, envp));
8001 unlock_user(p, arg1, 0);
8003 goto execve_end;
8005 execve_efault:
8006 ret = -TARGET_EFAULT;
8008 execve_end:
8009 for (gp = guest_argp, q = argp; *q;
8010 gp += sizeof(abi_ulong), q++) {
8011 if (get_user_ual(addr, gp)
8012 || !addr)
8013 break;
8014 unlock_user(*q, addr, 0);
8016 for (gp = guest_envp, q = envp; *q;
8017 gp += sizeof(abi_ulong), q++) {
8018 if (get_user_ual(addr, gp)
8019 || !addr)
8020 break;
8021 unlock_user(*q, addr, 0);
8024 g_free(argp);
8025 g_free(envp);
8027 break;
8028 case TARGET_NR_chdir:
8029 if (!(p = lock_user_string(arg1)))
8030 goto efault;
8031 ret = get_errno(chdir(p));
8032 unlock_user(p, arg1, 0);
8033 break;
8034 #ifdef TARGET_NR_time
8035 case TARGET_NR_time:
8037 time_t host_time;
8038 ret = get_errno(time(&host_time));
8039 if (!is_error(ret)
8040 && arg1
8041 && put_user_sal(host_time, arg1))
8042 goto efault;
8044 break;
8045 #endif
8046 #ifdef TARGET_NR_mknod
8047 case TARGET_NR_mknod:
8048 if (!(p = lock_user_string(arg1)))
8049 goto efault;
8050 ret = get_errno(mknod(p, arg2, arg3));
8051 unlock_user(p, arg1, 0);
8052 break;
8053 #endif
8054 #if defined(TARGET_NR_mknodat)
8055 case TARGET_NR_mknodat:
8056 if (!(p = lock_user_string(arg2)))
8057 goto efault;
8058 ret = get_errno(mknodat(arg1, p, arg3, arg4));
8059 unlock_user(p, arg2, 0);
8060 break;
8061 #endif
8062 #ifdef TARGET_NR_chmod
8063 case TARGET_NR_chmod:
8064 if (!(p = lock_user_string(arg1)))
8065 goto efault;
8066 ret = get_errno(chmod(p, arg2));
8067 unlock_user(p, arg1, 0);
8068 break;
8069 #endif
8070 #ifdef TARGET_NR_break
8071 case TARGET_NR_break:
8072 goto unimplemented;
8073 #endif
8074 #ifdef TARGET_NR_oldstat
8075 case TARGET_NR_oldstat:
8076 goto unimplemented;
8077 #endif
8078 case TARGET_NR_lseek:
8079 ret = get_errno(lseek(arg1, arg2, arg3));
8080 break;
8081 #if defined(TARGET_NR_getxpid) && defined(TARGET_ALPHA)
8082 /* Alpha specific */
8083 case TARGET_NR_getxpid:
8084 ((CPUAlphaState *)cpu_env)->ir[IR_A4] = getppid();
8085 ret = get_errno(getpid());
8086 break;
8087 #endif
8088 #ifdef TARGET_NR_getpid
8089 case TARGET_NR_getpid:
8090 ret = get_errno(getpid());
8091 break;
8092 #endif
8093 case TARGET_NR_mount:
8095 /* need to look at the data field */
8096 void *p2, *p3;
8098 if (arg1) {
8099 p = lock_user_string(arg1);
8100 if (!p) {
8101 goto efault;
8103 } else {
8104 p = NULL;
8107 p2 = lock_user_string(arg2);
8108 if (!p2) {
8109 if (arg1) {
8110 unlock_user(p, arg1, 0);
8112 goto efault;
8115 if (arg3) {
8116 p3 = lock_user_string(arg3);
8117 if (!p3) {
8118 if (arg1) {
8119 unlock_user(p, arg1, 0);
8121 unlock_user(p2, arg2, 0);
8122 goto efault;
8124 } else {
8125 p3 = NULL;
8128 /* FIXME - arg5 should be locked, but it isn't clear how to
8129 * do that since it's not guaranteed to be a NULL-terminated
8130 * string.
8132 if (!arg5) {
8133 ret = mount(p, p2, p3, (unsigned long)arg4, NULL);
8134 } else {
8135 ret = mount(p, p2, p3, (unsigned long)arg4, g2h(arg5));
8137 ret = get_errno(ret);
8139 if (arg1) {
8140 unlock_user(p, arg1, 0);
8142 unlock_user(p2, arg2, 0);
8143 if (arg3) {
8144 unlock_user(p3, arg3, 0);
8147 break;
8148 #ifdef TARGET_NR_umount
8149 case TARGET_NR_umount:
8150 if (!(p = lock_user_string(arg1)))
8151 goto efault;
8152 ret = get_errno(umount(p));
8153 unlock_user(p, arg1, 0);
8154 break;
8155 #endif
8156 #ifdef TARGET_NR_stime /* not on alpha */
8157 case TARGET_NR_stime:
8159 time_t host_time;
8160 if (get_user_sal(host_time, arg1))
8161 goto efault;
8162 ret = get_errno(stime(&host_time));
8164 break;
8165 #endif
8166 case TARGET_NR_ptrace:
8167 goto unimplemented;
8168 #ifdef TARGET_NR_alarm /* not on alpha */
8169 case TARGET_NR_alarm:
8170 ret = alarm(arg1);
8171 break;
8172 #endif
8173 #ifdef TARGET_NR_oldfstat
8174 case TARGET_NR_oldfstat:
8175 goto unimplemented;
8176 #endif
8177 #ifdef TARGET_NR_pause /* not on alpha */
8178 case TARGET_NR_pause:
8179 if (!block_signals()) {
8180 sigsuspend(&((TaskState *)cpu->opaque)->signal_mask);
8182 ret = -TARGET_EINTR;
8183 break;
8184 #endif
8185 #ifdef TARGET_NR_utime
8186 case TARGET_NR_utime:
8188 struct utimbuf tbuf, *host_tbuf;
8189 struct target_utimbuf *target_tbuf;
8190 if (arg2) {
8191 if (!lock_user_struct(VERIFY_READ, target_tbuf, arg2, 1))
8192 goto efault;
8193 tbuf.actime = tswapal(target_tbuf->actime);
8194 tbuf.modtime = tswapal(target_tbuf->modtime);
8195 unlock_user_struct(target_tbuf, arg2, 0);
8196 host_tbuf = &tbuf;
8197 } else {
8198 host_tbuf = NULL;
8200 if (!(p = lock_user_string(arg1)))
8201 goto efault;
8202 ret = get_errno(utime(p, host_tbuf));
8203 unlock_user(p, arg1, 0);
8205 break;
8206 #endif
8207 #ifdef TARGET_NR_utimes
8208 case TARGET_NR_utimes:
8210 struct timeval *tvp, tv[2];
8211 if (arg2) {
8212 if (copy_from_user_timeval(&tv[0], arg2)
8213 || copy_from_user_timeval(&tv[1],
8214 arg2 + sizeof(struct target_timeval)))
8215 goto efault;
8216 tvp = tv;
8217 } else {
8218 tvp = NULL;
8220 if (!(p = lock_user_string(arg1)))
8221 goto efault;
8222 ret = get_errno(utimes(p, tvp));
8223 unlock_user(p, arg1, 0);
8225 break;
8226 #endif
8227 #if defined(TARGET_NR_futimesat)
8228 case TARGET_NR_futimesat:
8230 struct timeval *tvp, tv[2];
8231 if (arg3) {
8232 if (copy_from_user_timeval(&tv[0], arg3)
8233 || copy_from_user_timeval(&tv[1],
8234 arg3 + sizeof(struct target_timeval)))
8235 goto efault;
8236 tvp = tv;
8237 } else {
8238 tvp = NULL;
8240 if (!(p = lock_user_string(arg2)))
8241 goto efault;
8242 ret = get_errno(futimesat(arg1, path(p), tvp));
8243 unlock_user(p, arg2, 0);
8245 break;
8246 #endif
8247 #ifdef TARGET_NR_stty
8248 case TARGET_NR_stty:
8249 goto unimplemented;
8250 #endif
8251 #ifdef TARGET_NR_gtty
8252 case TARGET_NR_gtty:
8253 goto unimplemented;
8254 #endif
8255 #ifdef TARGET_NR_access
8256 case TARGET_NR_access:
8257 if (!(p = lock_user_string(arg1)))
8258 goto efault;
8259 ret = get_errno(access(path(p), arg2));
8260 unlock_user(p, arg1, 0);
8261 break;
8262 #endif
8263 #if defined(TARGET_NR_faccessat) && defined(__NR_faccessat)
8264 case TARGET_NR_faccessat:
8265 if (!(p = lock_user_string(arg2)))
8266 goto efault;
8267 ret = get_errno(faccessat(arg1, p, arg3, 0));
8268 unlock_user(p, arg2, 0);
8269 break;
8270 #endif
8271 #ifdef TARGET_NR_nice /* not on alpha */
8272 case TARGET_NR_nice:
8273 ret = get_errno(nice(arg1));
8274 break;
8275 #endif
8276 #ifdef TARGET_NR_ftime
8277 case TARGET_NR_ftime:
8278 goto unimplemented;
8279 #endif
8280 case TARGET_NR_sync:
8281 sync();
8282 ret = 0;
8283 break;
8284 #if defined(TARGET_NR_syncfs) && defined(CONFIG_SYNCFS)
8285 case TARGET_NR_syncfs:
8286 ret = get_errno(syncfs(arg1));
8287 break;
8288 #endif
8289 case TARGET_NR_kill:
8290 ret = get_errno(safe_kill(arg1, target_to_host_signal(arg2)));
8291 break;
8292 #ifdef TARGET_NR_rename
8293 case TARGET_NR_rename:
8295 void *p2;
8296 p = lock_user_string(arg1);
8297 p2 = lock_user_string(arg2);
8298 if (!p || !p2)
8299 ret = -TARGET_EFAULT;
8300 else
8301 ret = get_errno(rename(p, p2));
8302 unlock_user(p2, arg2, 0);
8303 unlock_user(p, arg1, 0);
8305 break;
8306 #endif
8307 #if defined(TARGET_NR_renameat)
8308 case TARGET_NR_renameat:
8310 void *p2;
8311 p = lock_user_string(arg2);
8312 p2 = lock_user_string(arg4);
8313 if (!p || !p2)
8314 ret = -TARGET_EFAULT;
8315 else
8316 ret = get_errno(renameat(arg1, p, arg3, p2));
8317 unlock_user(p2, arg4, 0);
8318 unlock_user(p, arg2, 0);
8320 break;
8321 #endif
8322 #ifdef TARGET_NR_mkdir
8323 case TARGET_NR_mkdir:
8324 if (!(p = lock_user_string(arg1)))
8325 goto efault;
8326 ret = get_errno(mkdir(p, arg2));
8327 unlock_user(p, arg1, 0);
8328 break;
8329 #endif
8330 #if defined(TARGET_NR_mkdirat)
8331 case TARGET_NR_mkdirat:
8332 if (!(p = lock_user_string(arg2)))
8333 goto efault;
8334 ret = get_errno(mkdirat(arg1, p, arg3));
8335 unlock_user(p, arg2, 0);
8336 break;
8337 #endif
8338 #ifdef TARGET_NR_rmdir
8339 case TARGET_NR_rmdir:
8340 if (!(p = lock_user_string(arg1)))
8341 goto efault;
8342 ret = get_errno(rmdir(p));
8343 unlock_user(p, arg1, 0);
8344 break;
8345 #endif
8346 case TARGET_NR_dup:
8347 ret = get_errno(dup(arg1));
8348 if (ret >= 0) {
8349 fd_trans_dup(arg1, ret);
8351 break;
8352 #ifdef TARGET_NR_pipe
8353 case TARGET_NR_pipe:
8354 ret = do_pipe(cpu_env, arg1, 0, 0);
8355 break;
8356 #endif
8357 #ifdef TARGET_NR_pipe2
8358 case TARGET_NR_pipe2:
8359 ret = do_pipe(cpu_env, arg1,
8360 target_to_host_bitmask(arg2, fcntl_flags_tbl), 1);
8361 break;
8362 #endif
8363 case TARGET_NR_times:
8365 struct target_tms *tmsp;
8366 struct tms tms;
8367 ret = get_errno(times(&tms));
8368 if (arg1) {
8369 tmsp = lock_user(VERIFY_WRITE, arg1, sizeof(struct target_tms), 0);
8370 if (!tmsp)
8371 goto efault;
8372 tmsp->tms_utime = tswapal(host_to_target_clock_t(tms.tms_utime));
8373 tmsp->tms_stime = tswapal(host_to_target_clock_t(tms.tms_stime));
8374 tmsp->tms_cutime = tswapal(host_to_target_clock_t(tms.tms_cutime));
8375 tmsp->tms_cstime = tswapal(host_to_target_clock_t(tms.tms_cstime));
8377 if (!is_error(ret))
8378 ret = host_to_target_clock_t(ret);
8380 break;
8381 #ifdef TARGET_NR_prof
8382 case TARGET_NR_prof:
8383 goto unimplemented;
8384 #endif
8385 #ifdef TARGET_NR_signal
8386 case TARGET_NR_signal:
8387 goto unimplemented;
8388 #endif
8389 case TARGET_NR_acct:
8390 if (arg1 == 0) {
8391 ret = get_errno(acct(NULL));
8392 } else {
8393 if (!(p = lock_user_string(arg1)))
8394 goto efault;
8395 ret = get_errno(acct(path(p)));
8396 unlock_user(p, arg1, 0);
8398 break;
8399 #ifdef TARGET_NR_umount2
8400 case TARGET_NR_umount2:
8401 if (!(p = lock_user_string(arg1)))
8402 goto efault;
8403 ret = get_errno(umount2(p, arg2));
8404 unlock_user(p, arg1, 0);
8405 break;
8406 #endif
8407 #ifdef TARGET_NR_lock
8408 case TARGET_NR_lock:
8409 goto unimplemented;
8410 #endif
8411 case TARGET_NR_ioctl:
8412 ret = do_ioctl(arg1, arg2, arg3);
8413 break;
8414 case TARGET_NR_fcntl:
8415 ret = do_fcntl(arg1, arg2, arg3);
8416 break;
8417 #ifdef TARGET_NR_mpx
8418 case TARGET_NR_mpx:
8419 goto unimplemented;
8420 #endif
8421 case TARGET_NR_setpgid:
8422 ret = get_errno(setpgid(arg1, arg2));
8423 break;
8424 #ifdef TARGET_NR_ulimit
8425 case TARGET_NR_ulimit:
8426 goto unimplemented;
8427 #endif
8428 #ifdef TARGET_NR_oldolduname
8429 case TARGET_NR_oldolduname:
8430 goto unimplemented;
8431 #endif
8432 case TARGET_NR_umask:
8433 ret = get_errno(umask(arg1));
8434 break;
8435 case TARGET_NR_chroot:
8436 if (!(p = lock_user_string(arg1)))
8437 goto efault;
8438 ret = get_errno(chroot(p));
8439 unlock_user(p, arg1, 0);
8440 break;
8441 #ifdef TARGET_NR_ustat
8442 case TARGET_NR_ustat:
8443 goto unimplemented;
8444 #endif
8445 #ifdef TARGET_NR_dup2
8446 case TARGET_NR_dup2:
8447 ret = get_errno(dup2(arg1, arg2));
8448 if (ret >= 0) {
8449 fd_trans_dup(arg1, arg2);
8451 break;
8452 #endif
8453 #if defined(CONFIG_DUP3) && defined(TARGET_NR_dup3)
8454 case TARGET_NR_dup3:
8455 ret = get_errno(dup3(arg1, arg2, arg3));
8456 if (ret >= 0) {
8457 fd_trans_dup(arg1, arg2);
8459 break;
8460 #endif
8461 #ifdef TARGET_NR_getppid /* not on alpha */
8462 case TARGET_NR_getppid:
8463 ret = get_errno(getppid());
8464 break;
8465 #endif
8466 #ifdef TARGET_NR_getpgrp
8467 case TARGET_NR_getpgrp:
8468 ret = get_errno(getpgrp());
8469 break;
8470 #endif
8471 case TARGET_NR_setsid:
8472 ret = get_errno(setsid());
8473 break;
8474 #ifdef TARGET_NR_sigaction
8475 case TARGET_NR_sigaction:
8477 #if defined(TARGET_ALPHA)
8478 struct target_sigaction act, oact, *pact = 0;
8479 struct target_old_sigaction *old_act;
8480 if (arg2) {
8481 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
8482 goto efault;
8483 act._sa_handler = old_act->_sa_handler;
8484 target_siginitset(&act.sa_mask, old_act->sa_mask);
8485 act.sa_flags = old_act->sa_flags;
8486 act.sa_restorer = 0;
8487 unlock_user_struct(old_act, arg2, 0);
8488 pact = &act;
8490 ret = get_errno(do_sigaction(arg1, pact, &oact));
8491 if (!is_error(ret) && arg3) {
8492 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
8493 goto efault;
8494 old_act->_sa_handler = oact._sa_handler;
8495 old_act->sa_mask = oact.sa_mask.sig[0];
8496 old_act->sa_flags = oact.sa_flags;
8497 unlock_user_struct(old_act, arg3, 1);
8499 #elif defined(TARGET_MIPS)
8500 struct target_sigaction act, oact, *pact, *old_act;
8502 if (arg2) {
8503 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
8504 goto efault;
8505 act._sa_handler = old_act->_sa_handler;
8506 target_siginitset(&act.sa_mask, old_act->sa_mask.sig[0]);
8507 act.sa_flags = old_act->sa_flags;
8508 unlock_user_struct(old_act, arg2, 0);
8509 pact = &act;
8510 } else {
8511 pact = NULL;
8514 ret = get_errno(do_sigaction(arg1, pact, &oact));
8516 if (!is_error(ret) && arg3) {
8517 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
8518 goto efault;
8519 old_act->_sa_handler = oact._sa_handler;
8520 old_act->sa_flags = oact.sa_flags;
8521 old_act->sa_mask.sig[0] = oact.sa_mask.sig[0];
8522 old_act->sa_mask.sig[1] = 0;
8523 old_act->sa_mask.sig[2] = 0;
8524 old_act->sa_mask.sig[3] = 0;
8525 unlock_user_struct(old_act, arg3, 1);
8527 #else
8528 struct target_old_sigaction *old_act;
8529 struct target_sigaction act, oact, *pact;
8530 if (arg2) {
8531 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
8532 goto efault;
8533 act._sa_handler = old_act->_sa_handler;
8534 target_siginitset(&act.sa_mask, old_act->sa_mask);
8535 act.sa_flags = old_act->sa_flags;
8536 act.sa_restorer = old_act->sa_restorer;
8537 unlock_user_struct(old_act, arg2, 0);
8538 pact = &act;
8539 } else {
8540 pact = NULL;
8542 ret = get_errno(do_sigaction(arg1, pact, &oact));
8543 if (!is_error(ret) && arg3) {
8544 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
8545 goto efault;
8546 old_act->_sa_handler = oact._sa_handler;
8547 old_act->sa_mask = oact.sa_mask.sig[0];
8548 old_act->sa_flags = oact.sa_flags;
8549 old_act->sa_restorer = oact.sa_restorer;
8550 unlock_user_struct(old_act, arg3, 1);
8552 #endif
8554 break;
8555 #endif
8556 case TARGET_NR_rt_sigaction:
8558 #if defined(TARGET_ALPHA)
8559 struct target_sigaction act, oact, *pact = 0;
8560 struct target_rt_sigaction *rt_act;
8562 if (arg4 != sizeof(target_sigset_t)) {
8563 ret = -TARGET_EINVAL;
8564 break;
8566 if (arg2) {
8567 if (!lock_user_struct(VERIFY_READ, rt_act, arg2, 1))
8568 goto efault;
8569 act._sa_handler = rt_act->_sa_handler;
8570 act.sa_mask = rt_act->sa_mask;
8571 act.sa_flags = rt_act->sa_flags;
8572 act.sa_restorer = arg5;
8573 unlock_user_struct(rt_act, arg2, 0);
8574 pact = &act;
8576 ret = get_errno(do_sigaction(arg1, pact, &oact));
8577 if (!is_error(ret) && arg3) {
8578 if (!lock_user_struct(VERIFY_WRITE, rt_act, arg3, 0))
8579 goto efault;
8580 rt_act->_sa_handler = oact._sa_handler;
8581 rt_act->sa_mask = oact.sa_mask;
8582 rt_act->sa_flags = oact.sa_flags;
8583 unlock_user_struct(rt_act, arg3, 1);
8585 #else
8586 struct target_sigaction *act;
8587 struct target_sigaction *oact;
8589 if (arg4 != sizeof(target_sigset_t)) {
8590 ret = -TARGET_EINVAL;
8591 break;
8593 if (arg2) {
8594 if (!lock_user_struct(VERIFY_READ, act, arg2, 1))
8595 goto efault;
8596 } else
8597 act = NULL;
8598 if (arg3) {
8599 if (!lock_user_struct(VERIFY_WRITE, oact, arg3, 0)) {
8600 ret = -TARGET_EFAULT;
8601 goto rt_sigaction_fail;
8603 } else
8604 oact = NULL;
8605 ret = get_errno(do_sigaction(arg1, act, oact));
8606 rt_sigaction_fail:
8607 if (act)
8608 unlock_user_struct(act, arg2, 0);
8609 if (oact)
8610 unlock_user_struct(oact, arg3, 1);
8611 #endif
8613 break;
8614 #ifdef TARGET_NR_sgetmask /* not on alpha */
8615 case TARGET_NR_sgetmask:
8617 sigset_t cur_set;
8618 abi_ulong target_set;
8619 ret = do_sigprocmask(0, NULL, &cur_set);
8620 if (!ret) {
8621 host_to_target_old_sigset(&target_set, &cur_set);
8622 ret = target_set;
8625 break;
8626 #endif
8627 #ifdef TARGET_NR_ssetmask /* not on alpha */
8628 case TARGET_NR_ssetmask:
8630 sigset_t set, oset;
8631 abi_ulong target_set = arg1;
8632 target_to_host_old_sigset(&set, &target_set);
8633 ret = do_sigprocmask(SIG_SETMASK, &set, &oset);
8634 if (!ret) {
8635 host_to_target_old_sigset(&target_set, &oset);
8636 ret = target_set;
8639 break;
8640 #endif
8641 #ifdef TARGET_NR_sigprocmask
8642 case TARGET_NR_sigprocmask:
8644 #if defined(TARGET_ALPHA)
8645 sigset_t set, oldset;
8646 abi_ulong mask;
8647 int how;
8649 switch (arg1) {
8650 case TARGET_SIG_BLOCK:
8651 how = SIG_BLOCK;
8652 break;
8653 case TARGET_SIG_UNBLOCK:
8654 how = SIG_UNBLOCK;
8655 break;
8656 case TARGET_SIG_SETMASK:
8657 how = SIG_SETMASK;
8658 break;
8659 default:
8660 ret = -TARGET_EINVAL;
8661 goto fail;
8663 mask = arg2;
8664 target_to_host_old_sigset(&set, &mask);
8666 ret = do_sigprocmask(how, &set, &oldset);
8667 if (!is_error(ret)) {
8668 host_to_target_old_sigset(&mask, &oldset);
8669 ret = mask;
8670 ((CPUAlphaState *)cpu_env)->ir[IR_V0] = 0; /* force no error */
8672 #else
8673 sigset_t set, oldset, *set_ptr;
8674 int how;
8676 if (arg2) {
8677 switch (arg1) {
8678 case TARGET_SIG_BLOCK:
8679 how = SIG_BLOCK;
8680 break;
8681 case TARGET_SIG_UNBLOCK:
8682 how = SIG_UNBLOCK;
8683 break;
8684 case TARGET_SIG_SETMASK:
8685 how = SIG_SETMASK;
8686 break;
8687 default:
8688 ret = -TARGET_EINVAL;
8689 goto fail;
8691 if (!(p = lock_user(VERIFY_READ, arg2, sizeof(target_sigset_t), 1)))
8692 goto efault;
8693 target_to_host_old_sigset(&set, p);
8694 unlock_user(p, arg2, 0);
8695 set_ptr = &set;
8696 } else {
8697 how = 0;
8698 set_ptr = NULL;
8700 ret = do_sigprocmask(how, set_ptr, &oldset);
8701 if (!is_error(ret) && arg3) {
8702 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_sigset_t), 0)))
8703 goto efault;
8704 host_to_target_old_sigset(p, &oldset);
8705 unlock_user(p, arg3, sizeof(target_sigset_t));
8707 #endif
8709 break;
8710 #endif
8711 case TARGET_NR_rt_sigprocmask:
8713 int how = arg1;
8714 sigset_t set, oldset, *set_ptr;
8716 if (arg4 != sizeof(target_sigset_t)) {
8717 ret = -TARGET_EINVAL;
8718 break;
8721 if (arg2) {
8722 switch(how) {
8723 case TARGET_SIG_BLOCK:
8724 how = SIG_BLOCK;
8725 break;
8726 case TARGET_SIG_UNBLOCK:
8727 how = SIG_UNBLOCK;
8728 break;
8729 case TARGET_SIG_SETMASK:
8730 how = SIG_SETMASK;
8731 break;
8732 default:
8733 ret = -TARGET_EINVAL;
8734 goto fail;
8736 if (!(p = lock_user(VERIFY_READ, arg2, sizeof(target_sigset_t), 1)))
8737 goto efault;
8738 target_to_host_sigset(&set, p);
8739 unlock_user(p, arg2, 0);
8740 set_ptr = &set;
8741 } else {
8742 how = 0;
8743 set_ptr = NULL;
8745 ret = do_sigprocmask(how, set_ptr, &oldset);
8746 if (!is_error(ret) && arg3) {
8747 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_sigset_t), 0)))
8748 goto efault;
8749 host_to_target_sigset(p, &oldset);
8750 unlock_user(p, arg3, sizeof(target_sigset_t));
8753 break;
8754 #ifdef TARGET_NR_sigpending
8755 case TARGET_NR_sigpending:
8757 sigset_t set;
8758 ret = get_errno(sigpending(&set));
8759 if (!is_error(ret)) {
8760 if (!(p = lock_user(VERIFY_WRITE, arg1, sizeof(target_sigset_t), 0)))
8761 goto efault;
8762 host_to_target_old_sigset(p, &set);
8763 unlock_user(p, arg1, sizeof(target_sigset_t));
8766 break;
8767 #endif
8768 case TARGET_NR_rt_sigpending:
8770 sigset_t set;
8772 /* Yes, this check is >, not != like most. We follow the kernel's
8773 * logic and it does it like this because it implements
8774 * NR_sigpending through the same code path, and in that case
8775 * the old_sigset_t is smaller in size.
8777 if (arg2 > sizeof(target_sigset_t)) {
8778 ret = -TARGET_EINVAL;
8779 break;
8782 ret = get_errno(sigpending(&set));
8783 if (!is_error(ret)) {
8784 if (!(p = lock_user(VERIFY_WRITE, arg1, sizeof(target_sigset_t), 0)))
8785 goto efault;
8786 host_to_target_sigset(p, &set);
8787 unlock_user(p, arg1, sizeof(target_sigset_t));
8790 break;
8791 #ifdef TARGET_NR_sigsuspend
8792 case TARGET_NR_sigsuspend:
8794 TaskState *ts = cpu->opaque;
8795 #if defined(TARGET_ALPHA)
8796 abi_ulong mask = arg1;
8797 target_to_host_old_sigset(&ts->sigsuspend_mask, &mask);
8798 #else
8799 if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
8800 goto efault;
8801 target_to_host_old_sigset(&ts->sigsuspend_mask, p);
8802 unlock_user(p, arg1, 0);
8803 #endif
8804 ret = get_errno(safe_rt_sigsuspend(&ts->sigsuspend_mask,
8805 SIGSET_T_SIZE));
8806 if (ret != -TARGET_ERESTARTSYS) {
8807 ts->in_sigsuspend = 1;
8810 break;
8811 #endif
8812 case TARGET_NR_rt_sigsuspend:
8814 TaskState *ts = cpu->opaque;
8816 if (arg2 != sizeof(target_sigset_t)) {
8817 ret = -TARGET_EINVAL;
8818 break;
8820 if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
8821 goto efault;
8822 target_to_host_sigset(&ts->sigsuspend_mask, p);
8823 unlock_user(p, arg1, 0);
8824 ret = get_errno(safe_rt_sigsuspend(&ts->sigsuspend_mask,
8825 SIGSET_T_SIZE));
8826 if (ret != -TARGET_ERESTARTSYS) {
8827 ts->in_sigsuspend = 1;
8830 break;
8831 case TARGET_NR_rt_sigtimedwait:
8833 sigset_t set;
8834 struct timespec uts, *puts;
8835 siginfo_t uinfo;
8837 if (arg4 != sizeof(target_sigset_t)) {
8838 ret = -TARGET_EINVAL;
8839 break;
8842 if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
8843 goto efault;
8844 target_to_host_sigset(&set, p);
8845 unlock_user(p, arg1, 0);
8846 if (arg3) {
8847 puts = &uts;
8848 target_to_host_timespec(puts, arg3);
8849 } else {
8850 puts = NULL;
8852 ret = get_errno(safe_rt_sigtimedwait(&set, &uinfo, puts,
8853 SIGSET_T_SIZE));
8854 if (!is_error(ret)) {
8855 if (arg2) {
8856 p = lock_user(VERIFY_WRITE, arg2, sizeof(target_siginfo_t),
8858 if (!p) {
8859 goto efault;
8861 host_to_target_siginfo(p, &uinfo);
8862 unlock_user(p, arg2, sizeof(target_siginfo_t));
8864 ret = host_to_target_signal(ret);
8867 break;
8868 case TARGET_NR_rt_sigqueueinfo:
8870 siginfo_t uinfo;
8872 p = lock_user(VERIFY_READ, arg3, sizeof(target_siginfo_t), 1);
8873 if (!p) {
8874 goto efault;
8876 target_to_host_siginfo(&uinfo, p);
8877 unlock_user(p, arg3, 0);
8878 ret = get_errno(sys_rt_sigqueueinfo(arg1, arg2, &uinfo));
8880 break;
8881 case TARGET_NR_rt_tgsigqueueinfo:
8883 siginfo_t uinfo;
8885 p = lock_user(VERIFY_READ, arg4, sizeof(target_siginfo_t), 1);
8886 if (!p) {
8887 goto efault;
8889 target_to_host_siginfo(&uinfo, p);
8890 unlock_user(p, arg4, 0);
8891 ret = get_errno(sys_rt_tgsigqueueinfo(arg1, arg2, arg3, &uinfo));
8893 break;
8894 #ifdef TARGET_NR_sigreturn
8895 case TARGET_NR_sigreturn:
8896 if (block_signals()) {
8897 ret = -TARGET_ERESTARTSYS;
8898 } else {
8899 ret = do_sigreturn(cpu_env);
8901 break;
8902 #endif
8903 case TARGET_NR_rt_sigreturn:
8904 if (block_signals()) {
8905 ret = -TARGET_ERESTARTSYS;
8906 } else {
8907 ret = do_rt_sigreturn(cpu_env);
8909 break;
8910 case TARGET_NR_sethostname:
8911 if (!(p = lock_user_string(arg1)))
8912 goto efault;
8913 ret = get_errno(sethostname(p, arg2));
8914 unlock_user(p, arg1, 0);
8915 break;
8916 case TARGET_NR_setrlimit:
8918 int resource = target_to_host_resource(arg1);
8919 struct target_rlimit *target_rlim;
8920 struct rlimit rlim;
8921 if (!lock_user_struct(VERIFY_READ, target_rlim, arg2, 1))
8922 goto efault;
8923 rlim.rlim_cur = target_to_host_rlim(target_rlim->rlim_cur);
8924 rlim.rlim_max = target_to_host_rlim(target_rlim->rlim_max);
8925 unlock_user_struct(target_rlim, arg2, 0);
8926 ret = get_errno(setrlimit(resource, &rlim));
8928 break;
8929 case TARGET_NR_getrlimit:
8931 int resource = target_to_host_resource(arg1);
8932 struct target_rlimit *target_rlim;
8933 struct rlimit rlim;
8935 ret = get_errno(getrlimit(resource, &rlim));
8936 if (!is_error(ret)) {
8937 if (!lock_user_struct(VERIFY_WRITE, target_rlim, arg2, 0))
8938 goto efault;
8939 target_rlim->rlim_cur = host_to_target_rlim(rlim.rlim_cur);
8940 target_rlim->rlim_max = host_to_target_rlim(rlim.rlim_max);
8941 unlock_user_struct(target_rlim, arg2, 1);
8944 break;
8945 case TARGET_NR_getrusage:
8947 struct rusage rusage;
8948 ret = get_errno(getrusage(arg1, &rusage));
8949 if (!is_error(ret)) {
8950 ret = host_to_target_rusage(arg2, &rusage);
8953 break;
8954 case TARGET_NR_gettimeofday:
8956 struct timeval tv;
8957 ret = get_errno(gettimeofday(&tv, NULL));
8958 if (!is_error(ret)) {
8959 if (copy_to_user_timeval(arg1, &tv))
8960 goto efault;
8963 break;
8964 case TARGET_NR_settimeofday:
8966 struct timeval tv, *ptv = NULL;
8967 struct timezone tz, *ptz = NULL;
8969 if (arg1) {
8970 if (copy_from_user_timeval(&tv, arg1)) {
8971 goto efault;
8973 ptv = &tv;
8976 if (arg2) {
8977 if (copy_from_user_timezone(&tz, arg2)) {
8978 goto efault;
8980 ptz = &tz;
8983 ret = get_errno(settimeofday(ptv, ptz));
8985 break;
8986 #if defined(TARGET_NR_select)
8987 case TARGET_NR_select:
8988 #if defined(TARGET_WANT_NI_OLD_SELECT)
8989 /* some architectures used to have old_select here
8990 * but now ENOSYS it.
8992 ret = -TARGET_ENOSYS;
8993 #elif defined(TARGET_WANT_OLD_SYS_SELECT)
8994 ret = do_old_select(arg1);
8995 #else
8996 ret = do_select(arg1, arg2, arg3, arg4, arg5);
8997 #endif
8998 break;
8999 #endif
9000 #ifdef TARGET_NR_pselect6
9001 case TARGET_NR_pselect6:
9003 abi_long rfd_addr, wfd_addr, efd_addr, n, ts_addr;
9004 fd_set rfds, wfds, efds;
9005 fd_set *rfds_ptr, *wfds_ptr, *efds_ptr;
9006 struct timespec ts, *ts_ptr;
9009 * The 6th arg is actually two args smashed together,
9010 * so we cannot use the C library.
9012 sigset_t set;
9013 struct {
9014 sigset_t *set;
9015 size_t size;
9016 } sig, *sig_ptr;
9018 abi_ulong arg_sigset, arg_sigsize, *arg7;
9019 target_sigset_t *target_sigset;
9021 n = arg1;
9022 rfd_addr = arg2;
9023 wfd_addr = arg3;
9024 efd_addr = arg4;
9025 ts_addr = arg5;
9027 ret = copy_from_user_fdset_ptr(&rfds, &rfds_ptr, rfd_addr, n);
9028 if (ret) {
9029 goto fail;
9031 ret = copy_from_user_fdset_ptr(&wfds, &wfds_ptr, wfd_addr, n);
9032 if (ret) {
9033 goto fail;
9035 ret = copy_from_user_fdset_ptr(&efds, &efds_ptr, efd_addr, n);
9036 if (ret) {
9037 goto fail;
9041 * This takes a timespec, and not a timeval, so we cannot
9042 * use the do_select() helper ...
9044 if (ts_addr) {
9045 if (target_to_host_timespec(&ts, ts_addr)) {
9046 goto efault;
9048 ts_ptr = &ts;
9049 } else {
9050 ts_ptr = NULL;
9053 /* Extract the two packed args for the sigset */
9054 if (arg6) {
9055 sig_ptr = &sig;
9056 sig.size = SIGSET_T_SIZE;
9058 arg7 = lock_user(VERIFY_READ, arg6, sizeof(*arg7) * 2, 1);
9059 if (!arg7) {
9060 goto efault;
9062 arg_sigset = tswapal(arg7[0]);
9063 arg_sigsize = tswapal(arg7[1]);
9064 unlock_user(arg7, arg6, 0);
9066 if (arg_sigset) {
9067 sig.set = &set;
9068 if (arg_sigsize != sizeof(*target_sigset)) {
9069 /* Like the kernel, we enforce correct size sigsets */
9070 ret = -TARGET_EINVAL;
9071 goto fail;
9073 target_sigset = lock_user(VERIFY_READ, arg_sigset,
9074 sizeof(*target_sigset), 1);
9075 if (!target_sigset) {
9076 goto efault;
9078 target_to_host_sigset(&set, target_sigset);
9079 unlock_user(target_sigset, arg_sigset, 0);
9080 } else {
9081 sig.set = NULL;
9083 } else {
9084 sig_ptr = NULL;
9087 ret = get_errno(safe_pselect6(n, rfds_ptr, wfds_ptr, efds_ptr,
9088 ts_ptr, sig_ptr));
9090 if (!is_error(ret)) {
9091 if (rfd_addr && copy_to_user_fdset(rfd_addr, &rfds, n))
9092 goto efault;
9093 if (wfd_addr && copy_to_user_fdset(wfd_addr, &wfds, n))
9094 goto efault;
9095 if (efd_addr && copy_to_user_fdset(efd_addr, &efds, n))
9096 goto efault;
9098 if (ts_addr && host_to_target_timespec(ts_addr, &ts))
9099 goto efault;
9102 break;
9103 #endif
9104 #ifdef TARGET_NR_symlink
9105 case TARGET_NR_symlink:
9107 void *p2;
9108 p = lock_user_string(arg1);
9109 p2 = lock_user_string(arg2);
9110 if (!p || !p2)
9111 ret = -TARGET_EFAULT;
9112 else
9113 ret = get_errno(symlink(p, p2));
9114 unlock_user(p2, arg2, 0);
9115 unlock_user(p, arg1, 0);
9117 break;
9118 #endif
9119 #if defined(TARGET_NR_symlinkat)
9120 case TARGET_NR_symlinkat:
9122 void *p2;
9123 p = lock_user_string(arg1);
9124 p2 = lock_user_string(arg3);
9125 if (!p || !p2)
9126 ret = -TARGET_EFAULT;
9127 else
9128 ret = get_errno(symlinkat(p, arg2, p2));
9129 unlock_user(p2, arg3, 0);
9130 unlock_user(p, arg1, 0);
9132 break;
9133 #endif
9134 #ifdef TARGET_NR_oldlstat
9135 case TARGET_NR_oldlstat:
9136 goto unimplemented;
9137 #endif
9138 #ifdef TARGET_NR_readlink
9139 case TARGET_NR_readlink:
9141 void *p2;
9142 p = lock_user_string(arg1);
9143 p2 = lock_user(VERIFY_WRITE, arg2, arg3, 0);
9144 if (!p || !p2) {
9145 ret = -TARGET_EFAULT;
9146 } else if (!arg3) {
9147 /* Short circuit this for the magic exe check. */
9148 ret = -TARGET_EINVAL;
9149 } else if (is_proc_myself((const char *)p, "exe")) {
9150 char real[PATH_MAX], *temp;
9151 temp = realpath(exec_path, real);
9152 /* Return value is # of bytes that we wrote to the buffer. */
9153 if (temp == NULL) {
9154 ret = get_errno(-1);
9155 } else {
9156 /* Don't worry about sign mismatch as earlier mapping
9157 * logic would have thrown a bad address error. */
9158 ret = MIN(strlen(real), arg3);
9159 /* We cannot NUL terminate the string. */
9160 memcpy(p2, real, ret);
9162 } else {
9163 ret = get_errno(readlink(path(p), p2, arg3));
9165 unlock_user(p2, arg2, ret);
9166 unlock_user(p, arg1, 0);
9168 break;
9169 #endif
9170 #if defined(TARGET_NR_readlinkat)
9171 case TARGET_NR_readlinkat:
9173 void *p2;
9174 p = lock_user_string(arg2);
9175 p2 = lock_user(VERIFY_WRITE, arg3, arg4, 0);
9176 if (!p || !p2) {
9177 ret = -TARGET_EFAULT;
9178 } else if (is_proc_myself((const char *)p, "exe")) {
9179 char real[PATH_MAX], *temp;
9180 temp = realpath(exec_path, real);
9181 ret = temp == NULL ? get_errno(-1) : strlen(real) ;
9182 snprintf((char *)p2, arg4, "%s", real);
9183 } else {
9184 ret = get_errno(readlinkat(arg1, path(p), p2, arg4));
9186 unlock_user(p2, arg3, ret);
9187 unlock_user(p, arg2, 0);
9189 break;
9190 #endif
9191 #ifdef TARGET_NR_uselib
9192 case TARGET_NR_uselib:
9193 goto unimplemented;
9194 #endif
9195 #ifdef TARGET_NR_swapon
9196 case TARGET_NR_swapon:
9197 if (!(p = lock_user_string(arg1)))
9198 goto efault;
9199 ret = get_errno(swapon(p, arg2));
9200 unlock_user(p, arg1, 0);
9201 break;
9202 #endif
9203 case TARGET_NR_reboot:
9204 if (arg3 == LINUX_REBOOT_CMD_RESTART2) {
9205 /* arg4 must be ignored in all other cases */
9206 p = lock_user_string(arg4);
9207 if (!p) {
9208 goto efault;
9210 ret = get_errno(reboot(arg1, arg2, arg3, p));
9211 unlock_user(p, arg4, 0);
9212 } else {
9213 ret = get_errno(reboot(arg1, arg2, arg3, NULL));
9215 break;
9216 #ifdef TARGET_NR_readdir
9217 case TARGET_NR_readdir:
9218 goto unimplemented;
9219 #endif
9220 #ifdef TARGET_NR_mmap
9221 case TARGET_NR_mmap:
9222 #if (defined(TARGET_I386) && defined(TARGET_ABI32)) || \
9223 (defined(TARGET_ARM) && defined(TARGET_ABI32)) || \
9224 defined(TARGET_M68K) || defined(TARGET_CRIS) || defined(TARGET_MICROBLAZE) \
9225 || defined(TARGET_S390X)
9227 abi_ulong *v;
9228 abi_ulong v1, v2, v3, v4, v5, v6;
9229 if (!(v = lock_user(VERIFY_READ, arg1, 6 * sizeof(abi_ulong), 1)))
9230 goto efault;
9231 v1 = tswapal(v[0]);
9232 v2 = tswapal(v[1]);
9233 v3 = tswapal(v[2]);
9234 v4 = tswapal(v[3]);
9235 v5 = tswapal(v[4]);
9236 v6 = tswapal(v[5]);
9237 unlock_user(v, arg1, 0);
9238 ret = get_errno(target_mmap(v1, v2, v3,
9239 target_to_host_bitmask(v4, mmap_flags_tbl),
9240 v5, v6));
9242 #else
9243 ret = get_errno(target_mmap(arg1, arg2, arg3,
9244 target_to_host_bitmask(arg4, mmap_flags_tbl),
9245 arg5,
9246 arg6));
9247 #endif
9248 break;
9249 #endif
9250 #ifdef TARGET_NR_mmap2
9251 case TARGET_NR_mmap2:
9252 #ifndef MMAP_SHIFT
9253 #define MMAP_SHIFT 12
9254 #endif
9255 ret = get_errno(target_mmap(arg1, arg2, arg3,
9256 target_to_host_bitmask(arg4, mmap_flags_tbl),
9257 arg5,
9258 arg6 << MMAP_SHIFT));
9259 break;
9260 #endif
9261 case TARGET_NR_munmap:
9262 ret = get_errno(target_munmap(arg1, arg2));
9263 break;
9264 case TARGET_NR_mprotect:
9266 TaskState *ts = cpu->opaque;
9267 /* Special hack to detect libc making the stack executable. */
9268 if ((arg3 & PROT_GROWSDOWN)
9269 && arg1 >= ts->info->stack_limit
9270 && arg1 <= ts->info->start_stack) {
9271 arg3 &= ~PROT_GROWSDOWN;
9272 arg2 = arg2 + arg1 - ts->info->stack_limit;
9273 arg1 = ts->info->stack_limit;
9276 ret = get_errno(target_mprotect(arg1, arg2, arg3));
9277 break;
9278 #ifdef TARGET_NR_mremap
9279 case TARGET_NR_mremap:
9280 ret = get_errno(target_mremap(arg1, arg2, arg3, arg4, arg5));
9281 break;
9282 #endif
9283 /* ??? msync/mlock/munlock are broken for softmmu. */
9284 #ifdef TARGET_NR_msync
9285 case TARGET_NR_msync:
9286 ret = get_errno(msync(g2h(arg1), arg2, arg3));
9287 break;
9288 #endif
9289 #ifdef TARGET_NR_mlock
9290 case TARGET_NR_mlock:
9291 ret = get_errno(mlock(g2h(arg1), arg2));
9292 break;
9293 #endif
9294 #ifdef TARGET_NR_munlock
9295 case TARGET_NR_munlock:
9296 ret = get_errno(munlock(g2h(arg1), arg2));
9297 break;
9298 #endif
9299 #ifdef TARGET_NR_mlockall
9300 case TARGET_NR_mlockall:
9301 ret = get_errno(mlockall(target_to_host_mlockall_arg(arg1)));
9302 break;
9303 #endif
9304 #ifdef TARGET_NR_munlockall
9305 case TARGET_NR_munlockall:
9306 ret = get_errno(munlockall());
9307 break;
9308 #endif
9309 case TARGET_NR_truncate:
9310 if (!(p = lock_user_string(arg1)))
9311 goto efault;
9312 ret = get_errno(truncate(p, arg2));
9313 unlock_user(p, arg1, 0);
9314 break;
9315 case TARGET_NR_ftruncate:
9316 ret = get_errno(ftruncate(arg1, arg2));
9317 break;
9318 case TARGET_NR_fchmod:
9319 ret = get_errno(fchmod(arg1, arg2));
9320 break;
9321 #if defined(TARGET_NR_fchmodat)
9322 case TARGET_NR_fchmodat:
9323 if (!(p = lock_user_string(arg2)))
9324 goto efault;
9325 ret = get_errno(fchmodat(arg1, p, arg3, 0));
9326 unlock_user(p, arg2, 0);
9327 break;
9328 #endif
9329 case TARGET_NR_getpriority:
9330 /* Note that negative values are valid for getpriority, so we must
9331 differentiate based on errno settings. */
9332 errno = 0;
9333 ret = getpriority(arg1, arg2);
9334 if (ret == -1 && errno != 0) {
9335 ret = -host_to_target_errno(errno);
9336 break;
9338 #ifdef TARGET_ALPHA
9339 /* Return value is the unbiased priority. Signal no error. */
9340 ((CPUAlphaState *)cpu_env)->ir[IR_V0] = 0;
9341 #else
9342 /* Return value is a biased priority to avoid negative numbers. */
9343 ret = 20 - ret;
9344 #endif
9345 break;
9346 case TARGET_NR_setpriority:
9347 ret = get_errno(setpriority(arg1, arg2, arg3));
9348 break;
9349 #ifdef TARGET_NR_profil
9350 case TARGET_NR_profil:
9351 goto unimplemented;
9352 #endif
9353 case TARGET_NR_statfs:
9354 if (!(p = lock_user_string(arg1)))
9355 goto efault;
9356 ret = get_errno(statfs(path(p), &stfs));
9357 unlock_user(p, arg1, 0);
9358 convert_statfs:
9359 if (!is_error(ret)) {
9360 struct target_statfs *target_stfs;
9362 if (!lock_user_struct(VERIFY_WRITE, target_stfs, arg2, 0))
9363 goto efault;
9364 __put_user(stfs.f_type, &target_stfs->f_type);
9365 __put_user(stfs.f_bsize, &target_stfs->f_bsize);
9366 __put_user(stfs.f_blocks, &target_stfs->f_blocks);
9367 __put_user(stfs.f_bfree, &target_stfs->f_bfree);
9368 __put_user(stfs.f_bavail, &target_stfs->f_bavail);
9369 __put_user(stfs.f_files, &target_stfs->f_files);
9370 __put_user(stfs.f_ffree, &target_stfs->f_ffree);
9371 __put_user(stfs.f_fsid.__val[0], &target_stfs->f_fsid.val[0]);
9372 __put_user(stfs.f_fsid.__val[1], &target_stfs->f_fsid.val[1]);
9373 __put_user(stfs.f_namelen, &target_stfs->f_namelen);
9374 __put_user(stfs.f_frsize, &target_stfs->f_frsize);
9375 memset(target_stfs->f_spare, 0, sizeof(target_stfs->f_spare));
9376 unlock_user_struct(target_stfs, arg2, 1);
9378 break;
9379 case TARGET_NR_fstatfs:
9380 ret = get_errno(fstatfs(arg1, &stfs));
9381 goto convert_statfs;
9382 #ifdef TARGET_NR_statfs64
9383 case TARGET_NR_statfs64:
9384 if (!(p = lock_user_string(arg1)))
9385 goto efault;
9386 ret = get_errno(statfs(path(p), &stfs));
9387 unlock_user(p, arg1, 0);
9388 convert_statfs64:
9389 if (!is_error(ret)) {
9390 struct target_statfs64 *target_stfs;
9392 if (!lock_user_struct(VERIFY_WRITE, target_stfs, arg3, 0))
9393 goto efault;
9394 __put_user(stfs.f_type, &target_stfs->f_type);
9395 __put_user(stfs.f_bsize, &target_stfs->f_bsize);
9396 __put_user(stfs.f_blocks, &target_stfs->f_blocks);
9397 __put_user(stfs.f_bfree, &target_stfs->f_bfree);
9398 __put_user(stfs.f_bavail, &target_stfs->f_bavail);
9399 __put_user(stfs.f_files, &target_stfs->f_files);
9400 __put_user(stfs.f_ffree, &target_stfs->f_ffree);
9401 __put_user(stfs.f_fsid.__val[0], &target_stfs->f_fsid.val[0]);
9402 __put_user(stfs.f_fsid.__val[1], &target_stfs->f_fsid.val[1]);
9403 __put_user(stfs.f_namelen, &target_stfs->f_namelen);
9404 __put_user(stfs.f_frsize, &target_stfs->f_frsize);
9405 memset(target_stfs->f_spare, 0, sizeof(target_stfs->f_spare));
9406 unlock_user_struct(target_stfs, arg3, 1);
9408 break;
9409 case TARGET_NR_fstatfs64:
9410 ret = get_errno(fstatfs(arg1, &stfs));
9411 goto convert_statfs64;
9412 #endif
9413 #ifdef TARGET_NR_ioperm
9414 case TARGET_NR_ioperm:
9415 goto unimplemented;
9416 #endif
9417 #ifdef TARGET_NR_socketcall
9418 case TARGET_NR_socketcall:
9419 ret = do_socketcall(arg1, arg2);
9420 break;
9421 #endif
9422 #ifdef TARGET_NR_accept
9423 case TARGET_NR_accept:
9424 ret = do_accept4(arg1, arg2, arg3, 0);
9425 break;
9426 #endif
9427 #ifdef TARGET_NR_accept4
9428 case TARGET_NR_accept4:
9429 ret = do_accept4(arg1, arg2, arg3, arg4);
9430 break;
9431 #endif
9432 #ifdef TARGET_NR_bind
9433 case TARGET_NR_bind:
9434 ret = do_bind(arg1, arg2, arg3);
9435 break;
9436 #endif
9437 #ifdef TARGET_NR_connect
9438 case TARGET_NR_connect:
9439 ret = do_connect(arg1, arg2, arg3);
9440 break;
9441 #endif
9442 #ifdef TARGET_NR_getpeername
9443 case TARGET_NR_getpeername:
9444 ret = do_getpeername(arg1, arg2, arg3);
9445 break;
9446 #endif
9447 #ifdef TARGET_NR_getsockname
9448 case TARGET_NR_getsockname:
9449 ret = do_getsockname(arg1, arg2, arg3);
9450 break;
9451 #endif
9452 #ifdef TARGET_NR_getsockopt
9453 case TARGET_NR_getsockopt:
9454 ret = do_getsockopt(arg1, arg2, arg3, arg4, arg5);
9455 break;
9456 #endif
9457 #ifdef TARGET_NR_listen
9458 case TARGET_NR_listen:
9459 ret = get_errno(listen(arg1, arg2));
9460 break;
9461 #endif
9462 #ifdef TARGET_NR_recv
9463 case TARGET_NR_recv:
9464 ret = do_recvfrom(arg1, arg2, arg3, arg4, 0, 0);
9465 break;
9466 #endif
9467 #ifdef TARGET_NR_recvfrom
9468 case TARGET_NR_recvfrom:
9469 ret = do_recvfrom(arg1, arg2, arg3, arg4, arg5, arg6);
9470 break;
9471 #endif
9472 #ifdef TARGET_NR_recvmsg
9473 case TARGET_NR_recvmsg:
9474 ret = do_sendrecvmsg(arg1, arg2, arg3, 0);
9475 break;
9476 #endif
9477 #ifdef TARGET_NR_send
9478 case TARGET_NR_send:
9479 ret = do_sendto(arg1, arg2, arg3, arg4, 0, 0);
9480 break;
9481 #endif
9482 #ifdef TARGET_NR_sendmsg
9483 case TARGET_NR_sendmsg:
9484 ret = do_sendrecvmsg(arg1, arg2, arg3, 1);
9485 break;
9486 #endif
9487 #ifdef TARGET_NR_sendmmsg
9488 case TARGET_NR_sendmmsg:
9489 ret = do_sendrecvmmsg(arg1, arg2, arg3, arg4, 1);
9490 break;
9491 case TARGET_NR_recvmmsg:
9492 ret = do_sendrecvmmsg(arg1, arg2, arg3, arg4, 0);
9493 break;
9494 #endif
9495 #ifdef TARGET_NR_sendto
9496 case TARGET_NR_sendto:
9497 ret = do_sendto(arg1, arg2, arg3, arg4, arg5, arg6);
9498 break;
9499 #endif
9500 #ifdef TARGET_NR_shutdown
9501 case TARGET_NR_shutdown:
9502 ret = get_errno(shutdown(arg1, arg2));
9503 break;
9504 #endif
9505 #if defined(TARGET_NR_getrandom) && defined(__NR_getrandom)
9506 case TARGET_NR_getrandom:
9507 p = lock_user(VERIFY_WRITE, arg1, arg2, 0);
9508 if (!p) {
9509 goto efault;
9511 ret = get_errno(getrandom(p, arg2, arg3));
9512 unlock_user(p, arg1, ret);
9513 break;
9514 #endif
9515 #ifdef TARGET_NR_socket
9516 case TARGET_NR_socket:
9517 ret = do_socket(arg1, arg2, arg3);
9518 break;
9519 #endif
9520 #ifdef TARGET_NR_socketpair
9521 case TARGET_NR_socketpair:
9522 ret = do_socketpair(arg1, arg2, arg3, arg4);
9523 break;
9524 #endif
9525 #ifdef TARGET_NR_setsockopt
9526 case TARGET_NR_setsockopt:
9527 ret = do_setsockopt(arg1, arg2, arg3, arg4, (socklen_t) arg5);
9528 break;
9529 #endif
9530 #if defined(TARGET_NR_syslog)
9531 case TARGET_NR_syslog:
9533 int len = arg2;
9535 switch (arg1) {
9536 case TARGET_SYSLOG_ACTION_CLOSE: /* Close log */
9537 case TARGET_SYSLOG_ACTION_OPEN: /* Open log */
9538 case TARGET_SYSLOG_ACTION_CLEAR: /* Clear ring buffer */
9539 case TARGET_SYSLOG_ACTION_CONSOLE_OFF: /* Disable logging */
9540 case TARGET_SYSLOG_ACTION_CONSOLE_ON: /* Enable logging */
9541 case TARGET_SYSLOG_ACTION_CONSOLE_LEVEL: /* Set messages level */
9542 case TARGET_SYSLOG_ACTION_SIZE_UNREAD: /* Number of chars */
9543 case TARGET_SYSLOG_ACTION_SIZE_BUFFER: /* Size of the buffer */
9545 ret = get_errno(sys_syslog((int)arg1, NULL, (int)arg3));
9547 break;
9548 case TARGET_SYSLOG_ACTION_READ: /* Read from log */
9549 case TARGET_SYSLOG_ACTION_READ_CLEAR: /* Read/clear msgs */
9550 case TARGET_SYSLOG_ACTION_READ_ALL: /* Read last messages */
9552 ret = -TARGET_EINVAL;
9553 if (len < 0) {
9554 goto fail;
9556 ret = 0;
9557 if (len == 0) {
9558 break;
9560 p = lock_user(VERIFY_WRITE, arg2, arg3, 0);
9561 if (!p) {
9562 ret = -TARGET_EFAULT;
9563 goto fail;
9565 ret = get_errno(sys_syslog((int)arg1, p, (int)arg3));
9566 unlock_user(p, arg2, arg3);
9568 break;
9569 default:
9570 ret = -EINVAL;
9571 break;
9574 break;
9575 #endif
9576 case TARGET_NR_setitimer:
9578 struct itimerval value, ovalue, *pvalue;
9580 if (arg2) {
9581 pvalue = &value;
9582 if (copy_from_user_timeval(&pvalue->it_interval, arg2)
9583 || copy_from_user_timeval(&pvalue->it_value,
9584 arg2 + sizeof(struct target_timeval)))
9585 goto efault;
9586 } else {
9587 pvalue = NULL;
9589 ret = get_errno(setitimer(arg1, pvalue, &ovalue));
9590 if (!is_error(ret) && arg3) {
9591 if (copy_to_user_timeval(arg3,
9592 &ovalue.it_interval)
9593 || copy_to_user_timeval(arg3 + sizeof(struct target_timeval),
9594 &ovalue.it_value))
9595 goto efault;
9598 break;
9599 case TARGET_NR_getitimer:
9601 struct itimerval value;
9603 ret = get_errno(getitimer(arg1, &value));
9604 if (!is_error(ret) && arg2) {
9605 if (copy_to_user_timeval(arg2,
9606 &value.it_interval)
9607 || copy_to_user_timeval(arg2 + sizeof(struct target_timeval),
9608 &value.it_value))
9609 goto efault;
9612 break;
9613 #ifdef TARGET_NR_stat
9614 case TARGET_NR_stat:
9615 if (!(p = lock_user_string(arg1)))
9616 goto efault;
9617 ret = get_errno(stat(path(p), &st));
9618 unlock_user(p, arg1, 0);
9619 goto do_stat;
9620 #endif
9621 #ifdef TARGET_NR_lstat
9622 case TARGET_NR_lstat:
9623 if (!(p = lock_user_string(arg1)))
9624 goto efault;
9625 ret = get_errno(lstat(path(p), &st));
9626 unlock_user(p, arg1, 0);
9627 goto do_stat;
9628 #endif
9629 case TARGET_NR_fstat:
9631 ret = get_errno(fstat(arg1, &st));
9632 #if defined(TARGET_NR_stat) || defined(TARGET_NR_lstat)
9633 do_stat:
9634 #endif
9635 if (!is_error(ret)) {
9636 struct target_stat *target_st;
9638 if (!lock_user_struct(VERIFY_WRITE, target_st, arg2, 0))
9639 goto efault;
9640 memset(target_st, 0, sizeof(*target_st));
9641 __put_user(st.st_dev, &target_st->st_dev);
9642 __put_user(st.st_ino, &target_st->st_ino);
9643 __put_user(st.st_mode, &target_st->st_mode);
9644 __put_user(st.st_uid, &target_st->st_uid);
9645 __put_user(st.st_gid, &target_st->st_gid);
9646 __put_user(st.st_nlink, &target_st->st_nlink);
9647 __put_user(st.st_rdev, &target_st->st_rdev);
9648 __put_user(st.st_size, &target_st->st_size);
9649 __put_user(st.st_blksize, &target_st->st_blksize);
9650 __put_user(st.st_blocks, &target_st->st_blocks);
9651 __put_user(st.st_atime, &target_st->target_st_atime);
9652 __put_user(st.st_mtime, &target_st->target_st_mtime);
9653 __put_user(st.st_ctime, &target_st->target_st_ctime);
9654 unlock_user_struct(target_st, arg2, 1);
9657 break;
9658 #ifdef TARGET_NR_olduname
9659 case TARGET_NR_olduname:
9660 goto unimplemented;
9661 #endif
9662 #ifdef TARGET_NR_iopl
9663 case TARGET_NR_iopl:
9664 goto unimplemented;
9665 #endif
9666 case TARGET_NR_vhangup:
9667 ret = get_errno(vhangup());
9668 break;
9669 #ifdef TARGET_NR_idle
9670 case TARGET_NR_idle:
9671 goto unimplemented;
9672 #endif
9673 #ifdef TARGET_NR_syscall
9674 case TARGET_NR_syscall:
9675 ret = do_syscall(cpu_env, arg1 & 0xffff, arg2, arg3, arg4, arg5,
9676 arg6, arg7, arg8, 0);
9677 break;
9678 #endif
9679 case TARGET_NR_wait4:
9681 int status;
9682 abi_long status_ptr = arg2;
9683 struct rusage rusage, *rusage_ptr;
9684 abi_ulong target_rusage = arg4;
9685 abi_long rusage_err;
9686 if (target_rusage)
9687 rusage_ptr = &rusage;
9688 else
9689 rusage_ptr = NULL;
9690 ret = get_errno(safe_wait4(arg1, &status, arg3, rusage_ptr));
9691 if (!is_error(ret)) {
9692 if (status_ptr && ret) {
9693 status = host_to_target_waitstatus(status);
9694 if (put_user_s32(status, status_ptr))
9695 goto efault;
9697 if (target_rusage) {
9698 rusage_err = host_to_target_rusage(target_rusage, &rusage);
9699 if (rusage_err) {
9700 ret = rusage_err;
9705 break;
9706 #ifdef TARGET_NR_swapoff
9707 case TARGET_NR_swapoff:
9708 if (!(p = lock_user_string(arg1)))
9709 goto efault;
9710 ret = get_errno(swapoff(p));
9711 unlock_user(p, arg1, 0);
9712 break;
9713 #endif
9714 case TARGET_NR_sysinfo:
9716 struct target_sysinfo *target_value;
9717 struct sysinfo value;
9718 ret = get_errno(sysinfo(&value));
9719 if (!is_error(ret) && arg1)
9721 if (!lock_user_struct(VERIFY_WRITE, target_value, arg1, 0))
9722 goto efault;
9723 __put_user(value.uptime, &target_value->uptime);
9724 __put_user(value.loads[0], &target_value->loads[0]);
9725 __put_user(value.loads[1], &target_value->loads[1]);
9726 __put_user(value.loads[2], &target_value->loads[2]);
9727 __put_user(value.totalram, &target_value->totalram);
9728 __put_user(value.freeram, &target_value->freeram);
9729 __put_user(value.sharedram, &target_value->sharedram);
9730 __put_user(value.bufferram, &target_value->bufferram);
9731 __put_user(value.totalswap, &target_value->totalswap);
9732 __put_user(value.freeswap, &target_value->freeswap);
9733 __put_user(value.procs, &target_value->procs);
9734 __put_user(value.totalhigh, &target_value->totalhigh);
9735 __put_user(value.freehigh, &target_value->freehigh);
9736 __put_user(value.mem_unit, &target_value->mem_unit);
9737 unlock_user_struct(target_value, arg1, 1);
9740 break;
9741 #ifdef TARGET_NR_ipc
9742 case TARGET_NR_ipc:
9743 ret = do_ipc(cpu_env, arg1, arg2, arg3, arg4, arg5, arg6);
9744 break;
9745 #endif
9746 #ifdef TARGET_NR_semget
9747 case TARGET_NR_semget:
9748 ret = get_errno(semget(arg1, arg2, arg3));
9749 break;
9750 #endif
9751 #ifdef TARGET_NR_semop
9752 case TARGET_NR_semop:
9753 ret = do_semop(arg1, arg2, arg3);
9754 break;
9755 #endif
9756 #ifdef TARGET_NR_semctl
9757 case TARGET_NR_semctl:
9758 ret = do_semctl(arg1, arg2, arg3, arg4);
9759 break;
9760 #endif
9761 #ifdef TARGET_NR_msgctl
9762 case TARGET_NR_msgctl:
9763 ret = do_msgctl(arg1, arg2, arg3);
9764 break;
9765 #endif
9766 #ifdef TARGET_NR_msgget
9767 case TARGET_NR_msgget:
9768 ret = get_errno(msgget(arg1, arg2));
9769 break;
9770 #endif
9771 #ifdef TARGET_NR_msgrcv
9772 case TARGET_NR_msgrcv:
9773 ret = do_msgrcv(arg1, arg2, arg3, arg4, arg5);
9774 break;
9775 #endif
9776 #ifdef TARGET_NR_msgsnd
9777 case TARGET_NR_msgsnd:
9778 ret = do_msgsnd(arg1, arg2, arg3, arg4);
9779 break;
9780 #endif
9781 #ifdef TARGET_NR_shmget
9782 case TARGET_NR_shmget:
9783 ret = get_errno(shmget(arg1, arg2, arg3));
9784 break;
9785 #endif
9786 #ifdef TARGET_NR_shmctl
9787 case TARGET_NR_shmctl:
9788 ret = do_shmctl(arg1, arg2, arg3);
9789 break;
9790 #endif
9791 #ifdef TARGET_NR_shmat
9792 case TARGET_NR_shmat:
9793 ret = do_shmat(cpu_env, arg1, arg2, arg3);
9794 break;
9795 #endif
9796 #ifdef TARGET_NR_shmdt
9797 case TARGET_NR_shmdt:
9798 ret = do_shmdt(arg1);
9799 break;
9800 #endif
9801 case TARGET_NR_fsync:
9802 ret = get_errno(fsync(arg1));
9803 break;
9804 case TARGET_NR_clone:
9805 /* Linux manages to have three different orderings for its
9806 * arguments to clone(); the BACKWARDS and BACKWARDS2 defines
9807 * match the kernel's CONFIG_CLONE_* settings.
9808 * Microblaze is further special in that it uses a sixth
9809 * implicit argument to clone for the TLS pointer.
9811 #if defined(TARGET_MICROBLAZE)
9812 ret = get_errno(do_fork(cpu_env, arg1, arg2, arg4, arg6, arg5));
9813 #elif defined(TARGET_CLONE_BACKWARDS)
9814 ret = get_errno(do_fork(cpu_env, arg1, arg2, arg3, arg4, arg5));
9815 #elif defined(TARGET_CLONE_BACKWARDS2)
9816 ret = get_errno(do_fork(cpu_env, arg2, arg1, arg3, arg5, arg4));
9817 #else
9818 ret = get_errno(do_fork(cpu_env, arg1, arg2, arg3, arg5, arg4));
9819 #endif
9820 break;
9821 #ifdef __NR_exit_group
9822 /* new thread calls */
9823 case TARGET_NR_exit_group:
9824 #ifdef TARGET_GPROF
9825 _mcleanup();
9826 #endif
9827 gdb_exit(cpu_env, arg1);
9828 ret = get_errno(exit_group(arg1));
9829 break;
9830 #endif
9831 case TARGET_NR_setdomainname:
9832 if (!(p = lock_user_string(arg1)))
9833 goto efault;
9834 ret = get_errno(setdomainname(p, arg2));
9835 unlock_user(p, arg1, 0);
9836 break;
9837 case TARGET_NR_uname:
9838 /* no need to transcode because we use the linux syscall */
9840 struct new_utsname * buf;
9842 if (!lock_user_struct(VERIFY_WRITE, buf, arg1, 0))
9843 goto efault;
9844 ret = get_errno(sys_uname(buf));
9845 if (!is_error(ret)) {
9846 /* Overwrite the native machine name with whatever is being
9847 emulated. */
9848 strcpy (buf->machine, cpu_to_uname_machine(cpu_env));
9849 /* Allow the user to override the reported release. */
9850 if (qemu_uname_release && *qemu_uname_release) {
9851 g_strlcpy(buf->release, qemu_uname_release,
9852 sizeof(buf->release));
9855 unlock_user_struct(buf, arg1, 1);
9857 break;
9858 #ifdef TARGET_I386
9859 case TARGET_NR_modify_ldt:
9860 ret = do_modify_ldt(cpu_env, arg1, arg2, arg3);
9861 break;
9862 #if !defined(TARGET_X86_64)
9863 case TARGET_NR_vm86old:
9864 goto unimplemented;
9865 case TARGET_NR_vm86:
9866 ret = do_vm86(cpu_env, arg1, arg2);
9867 break;
9868 #endif
9869 #endif
9870 case TARGET_NR_adjtimex:
9872 struct timex host_buf;
9874 if (target_to_host_timex(&host_buf, arg1) != 0) {
9875 goto efault;
9877 ret = get_errno(adjtimex(&host_buf));
9878 if (!is_error(ret)) {
9879 if (host_to_target_timex(arg1, &host_buf) != 0) {
9880 goto efault;
9884 break;
9885 #if defined(TARGET_NR_clock_adjtime) && defined(CONFIG_CLOCK_ADJTIME)
9886 case TARGET_NR_clock_adjtime:
9888 struct timex htx, *phtx = &htx;
9890 if (target_to_host_timex(phtx, arg2) != 0) {
9891 goto efault;
9893 ret = get_errno(clock_adjtime(arg1, phtx));
9894 if (!is_error(ret) && phtx) {
9895 if (host_to_target_timex(arg2, phtx) != 0) {
9896 goto efault;
9900 break;
9901 #endif
9902 #ifdef TARGET_NR_create_module
9903 case TARGET_NR_create_module:
9904 #endif
9905 case TARGET_NR_init_module:
9906 case TARGET_NR_delete_module:
9907 #ifdef TARGET_NR_get_kernel_syms
9908 case TARGET_NR_get_kernel_syms:
9909 #endif
9910 goto unimplemented;
9911 case TARGET_NR_quotactl:
9912 goto unimplemented;
9913 case TARGET_NR_getpgid:
9914 ret = get_errno(getpgid(arg1));
9915 break;
9916 case TARGET_NR_fchdir:
9917 ret = get_errno(fchdir(arg1));
9918 break;
9919 #ifdef TARGET_NR_bdflush /* not on x86_64 */
9920 case TARGET_NR_bdflush:
9921 goto unimplemented;
9922 #endif
9923 #ifdef TARGET_NR_sysfs
9924 case TARGET_NR_sysfs:
9925 goto unimplemented;
9926 #endif
9927 case TARGET_NR_personality:
9928 ret = get_errno(personality(arg1));
9929 break;
9930 #ifdef TARGET_NR_afs_syscall
9931 case TARGET_NR_afs_syscall:
9932 goto unimplemented;
9933 #endif
9934 #ifdef TARGET_NR__llseek /* Not on alpha */
9935 case TARGET_NR__llseek:
9937 int64_t res;
9938 #if !defined(__NR_llseek)
9939 res = lseek(arg1, ((uint64_t)arg2 << 32) | (abi_ulong)arg3, arg5);
9940 if (res == -1) {
9941 ret = get_errno(res);
9942 } else {
9943 ret = 0;
9945 #else
9946 ret = get_errno(_llseek(arg1, arg2, arg3, &res, arg5));
9947 #endif
9948 if ((ret == 0) && put_user_s64(res, arg4)) {
9949 goto efault;
9952 break;
9953 #endif
9954 #ifdef TARGET_NR_getdents
9955 case TARGET_NR_getdents:
9956 #ifdef __NR_getdents
9957 #if TARGET_ABI_BITS == 32 && HOST_LONG_BITS == 64
9959 struct target_dirent *target_dirp;
9960 struct linux_dirent *dirp;
9961 abi_long count = arg3;
9963 dirp = g_try_malloc(count);
9964 if (!dirp) {
9965 ret = -TARGET_ENOMEM;
9966 goto fail;
9969 ret = get_errno(sys_getdents(arg1, dirp, count));
9970 if (!is_error(ret)) {
9971 struct linux_dirent *de;
9972 struct target_dirent *tde;
9973 int len = ret;
9974 int reclen, treclen;
9975 int count1, tnamelen;
9977 count1 = 0;
9978 de = dirp;
9979 if (!(target_dirp = lock_user(VERIFY_WRITE, arg2, count, 0)))
9980 goto efault;
9981 tde = target_dirp;
9982 while (len > 0) {
9983 reclen = de->d_reclen;
9984 tnamelen = reclen - offsetof(struct linux_dirent, d_name);
9985 assert(tnamelen >= 0);
9986 treclen = tnamelen + offsetof(struct target_dirent, d_name);
9987 assert(count1 + treclen <= count);
9988 tde->d_reclen = tswap16(treclen);
9989 tde->d_ino = tswapal(de->d_ino);
9990 tde->d_off = tswapal(de->d_off);
9991 memcpy(tde->d_name, de->d_name, tnamelen);
9992 de = (struct linux_dirent *)((char *)de + reclen);
9993 len -= reclen;
9994 tde = (struct target_dirent *)((char *)tde + treclen);
9995 count1 += treclen;
9997 ret = count1;
9998 unlock_user(target_dirp, arg2, ret);
10000 g_free(dirp);
10002 #else
10004 struct linux_dirent *dirp;
10005 abi_long count = arg3;
10007 if (!(dirp = lock_user(VERIFY_WRITE, arg2, count, 0)))
10008 goto efault;
10009 ret = get_errno(sys_getdents(arg1, dirp, count));
10010 if (!is_error(ret)) {
10011 struct linux_dirent *de;
10012 int len = ret;
10013 int reclen;
10014 de = dirp;
10015 while (len > 0) {
10016 reclen = de->d_reclen;
10017 if (reclen > len)
10018 break;
10019 de->d_reclen = tswap16(reclen);
10020 tswapls(&de->d_ino);
10021 tswapls(&de->d_off);
10022 de = (struct linux_dirent *)((char *)de + reclen);
10023 len -= reclen;
10026 unlock_user(dirp, arg2, ret);
10028 #endif
10029 #else
10030 /* Implement getdents in terms of getdents64 */
10032 struct linux_dirent64 *dirp;
10033 abi_long count = arg3;
10035 dirp = lock_user(VERIFY_WRITE, arg2, count, 0);
10036 if (!dirp) {
10037 goto efault;
10039 ret = get_errno(sys_getdents64(arg1, dirp, count));
10040 if (!is_error(ret)) {
10041 /* Convert the dirent64 structs to target dirent. We do this
10042 * in-place, since we can guarantee that a target_dirent is no
10043 * larger than a dirent64; however this means we have to be
10044 * careful to read everything before writing in the new format.
10046 struct linux_dirent64 *de;
10047 struct target_dirent *tde;
10048 int len = ret;
10049 int tlen = 0;
10051 de = dirp;
10052 tde = (struct target_dirent *)dirp;
10053 while (len > 0) {
10054 int namelen, treclen;
10055 int reclen = de->d_reclen;
10056 uint64_t ino = de->d_ino;
10057 int64_t off = de->d_off;
10058 uint8_t type = de->d_type;
10060 namelen = strlen(de->d_name);
10061 treclen = offsetof(struct target_dirent, d_name)
10062 + namelen + 2;
10063 treclen = QEMU_ALIGN_UP(treclen, sizeof(abi_long));
10065 memmove(tde->d_name, de->d_name, namelen + 1);
10066 tde->d_ino = tswapal(ino);
10067 tde->d_off = tswapal(off);
10068 tde->d_reclen = tswap16(treclen);
10069 /* The target_dirent type is in what was formerly a padding
10070 * byte at the end of the structure:
10072 *(((char *)tde) + treclen - 1) = type;
10074 de = (struct linux_dirent64 *)((char *)de + reclen);
10075 tde = (struct target_dirent *)((char *)tde + treclen);
10076 len -= reclen;
10077 tlen += treclen;
10079 ret = tlen;
10081 unlock_user(dirp, arg2, ret);
10083 #endif
10084 break;
10085 #endif /* TARGET_NR_getdents */
10086 #if defined(TARGET_NR_getdents64) && defined(__NR_getdents64)
10087 case TARGET_NR_getdents64:
10089 struct linux_dirent64 *dirp;
10090 abi_long count = arg3;
10091 if (!(dirp = lock_user(VERIFY_WRITE, arg2, count, 0)))
10092 goto efault;
10093 ret = get_errno(sys_getdents64(arg1, dirp, count));
10094 if (!is_error(ret)) {
10095 struct linux_dirent64 *de;
10096 int len = ret;
10097 int reclen;
10098 de = dirp;
10099 while (len > 0) {
10100 reclen = de->d_reclen;
10101 if (reclen > len)
10102 break;
10103 de->d_reclen = tswap16(reclen);
10104 tswap64s((uint64_t *)&de->d_ino);
10105 tswap64s((uint64_t *)&de->d_off);
10106 de = (struct linux_dirent64 *)((char *)de + reclen);
10107 len -= reclen;
10110 unlock_user(dirp, arg2, ret);
10112 break;
10113 #endif /* TARGET_NR_getdents64 */
10114 #if defined(TARGET_NR__newselect)
10115 case TARGET_NR__newselect:
10116 ret = do_select(arg1, arg2, arg3, arg4, arg5);
10117 break;
10118 #endif
10119 #if defined(TARGET_NR_poll) || defined(TARGET_NR_ppoll)
10120 # ifdef TARGET_NR_poll
10121 case TARGET_NR_poll:
10122 # endif
10123 # ifdef TARGET_NR_ppoll
10124 case TARGET_NR_ppoll:
10125 # endif
10127 struct target_pollfd *target_pfd;
10128 unsigned int nfds = arg2;
10129 struct pollfd *pfd;
10130 unsigned int i;
10132 pfd = NULL;
10133 target_pfd = NULL;
10134 if (nfds) {
10135 if (nfds > (INT_MAX / sizeof(struct target_pollfd))) {
10136 ret = -TARGET_EINVAL;
10137 break;
10140 target_pfd = lock_user(VERIFY_WRITE, arg1,
10141 sizeof(struct target_pollfd) * nfds, 1);
10142 if (!target_pfd) {
10143 goto efault;
10146 pfd = alloca(sizeof(struct pollfd) * nfds);
10147 for (i = 0; i < nfds; i++) {
10148 pfd[i].fd = tswap32(target_pfd[i].fd);
10149 pfd[i].events = tswap16(target_pfd[i].events);
10153 switch (num) {
10154 # ifdef TARGET_NR_ppoll
10155 case TARGET_NR_ppoll:
10157 struct timespec _timeout_ts, *timeout_ts = &_timeout_ts;
10158 target_sigset_t *target_set;
10159 sigset_t _set, *set = &_set;
10161 if (arg3) {
10162 if (target_to_host_timespec(timeout_ts, arg3)) {
10163 unlock_user(target_pfd, arg1, 0);
10164 goto efault;
10166 } else {
10167 timeout_ts = NULL;
10170 if (arg4) {
10171 if (arg5 != sizeof(target_sigset_t)) {
10172 unlock_user(target_pfd, arg1, 0);
10173 ret = -TARGET_EINVAL;
10174 break;
10177 target_set = lock_user(VERIFY_READ, arg4, sizeof(target_sigset_t), 1);
10178 if (!target_set) {
10179 unlock_user(target_pfd, arg1, 0);
10180 goto efault;
10182 target_to_host_sigset(set, target_set);
10183 } else {
10184 set = NULL;
10187 ret = get_errno(safe_ppoll(pfd, nfds, timeout_ts,
10188 set, SIGSET_T_SIZE));
10190 if (!is_error(ret) && arg3) {
10191 host_to_target_timespec(arg3, timeout_ts);
10193 if (arg4) {
10194 unlock_user(target_set, arg4, 0);
10196 break;
10198 # endif
10199 # ifdef TARGET_NR_poll
10200 case TARGET_NR_poll:
10202 struct timespec ts, *pts;
10204 if (arg3 >= 0) {
10205 /* Convert ms to secs, ns */
10206 ts.tv_sec = arg3 / 1000;
10207 ts.tv_nsec = (arg3 % 1000) * 1000000LL;
10208 pts = &ts;
10209 } else {
10210 /* -ve poll() timeout means "infinite" */
10211 pts = NULL;
10213 ret = get_errno(safe_ppoll(pfd, nfds, pts, NULL, 0));
10214 break;
10216 # endif
10217 default:
10218 g_assert_not_reached();
10221 if (!is_error(ret)) {
10222 for(i = 0; i < nfds; i++) {
10223 target_pfd[i].revents = tswap16(pfd[i].revents);
10226 unlock_user(target_pfd, arg1, sizeof(struct target_pollfd) * nfds);
10228 break;
10229 #endif
10230 case TARGET_NR_flock:
10231 /* NOTE: the flock constant seems to be the same for every
10232 Linux platform */
10233 ret = get_errno(safe_flock(arg1, arg2));
10234 break;
10235 case TARGET_NR_readv:
10237 struct iovec *vec = lock_iovec(VERIFY_WRITE, arg2, arg3, 0);
10238 if (vec != NULL) {
10239 ret = get_errno(safe_readv(arg1, vec, arg3));
10240 unlock_iovec(vec, arg2, arg3, 1);
10241 } else {
10242 ret = -host_to_target_errno(errno);
10245 break;
10246 case TARGET_NR_writev:
10248 struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1);
10249 if (vec != NULL) {
10250 ret = get_errno(safe_writev(arg1, vec, arg3));
10251 unlock_iovec(vec, arg2, arg3, 0);
10252 } else {
10253 ret = -host_to_target_errno(errno);
10256 break;
10257 #if defined(TARGET_NR_preadv)
10258 case TARGET_NR_preadv:
10260 struct iovec *vec = lock_iovec(VERIFY_WRITE, arg2, arg3, 0);
10261 if (vec != NULL) {
10262 ret = get_errno(safe_preadv(arg1, vec, arg3, arg4, arg5));
10263 unlock_iovec(vec, arg2, arg3, 1);
10264 } else {
10265 ret = -host_to_target_errno(errno);
10268 break;
10269 #endif
10270 #if defined(TARGET_NR_pwritev)
10271 case TARGET_NR_pwritev:
10273 struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1);
10274 if (vec != NULL) {
10275 ret = get_errno(safe_pwritev(arg1, vec, arg3, arg4, arg5));
10276 unlock_iovec(vec, arg2, arg3, 0);
10277 } else {
10278 ret = -host_to_target_errno(errno);
10281 break;
10282 #endif
10283 case TARGET_NR_getsid:
10284 ret = get_errno(getsid(arg1));
10285 break;
10286 #if defined(TARGET_NR_fdatasync) /* Not on alpha (osf_datasync ?) */
10287 case TARGET_NR_fdatasync:
10288 ret = get_errno(fdatasync(arg1));
10289 break;
10290 #endif
10291 #ifdef TARGET_NR__sysctl
10292 case TARGET_NR__sysctl:
10293 /* We don't implement this, but ENOTDIR is always a safe
10294 return value. */
10295 ret = -TARGET_ENOTDIR;
10296 break;
10297 #endif
10298 case TARGET_NR_sched_getaffinity:
10300 unsigned int mask_size;
10301 unsigned long *mask;
10304 * sched_getaffinity needs multiples of ulong, so need to take
10305 * care of mismatches between target ulong and host ulong sizes.
10307 if (arg2 & (sizeof(abi_ulong) - 1)) {
10308 ret = -TARGET_EINVAL;
10309 break;
10311 mask_size = (arg2 + (sizeof(*mask) - 1)) & ~(sizeof(*mask) - 1);
10313 mask = alloca(mask_size);
10314 ret = get_errno(sys_sched_getaffinity(arg1, mask_size, mask));
10316 if (!is_error(ret)) {
10317 if (ret > arg2) {
10318 /* More data returned than the caller's buffer will fit.
10319 * This only happens if sizeof(abi_long) < sizeof(long)
10320 * and the caller passed us a buffer holding an odd number
10321 * of abi_longs. If the host kernel is actually using the
10322 * extra 4 bytes then fail EINVAL; otherwise we can just
10323 * ignore them and only copy the interesting part.
10325 int numcpus = sysconf(_SC_NPROCESSORS_CONF);
10326 if (numcpus > arg2 * 8) {
10327 ret = -TARGET_EINVAL;
10328 break;
10330 ret = arg2;
10333 if (copy_to_user(arg3, mask, ret)) {
10334 goto efault;
10338 break;
10339 case TARGET_NR_sched_setaffinity:
10341 unsigned int mask_size;
10342 unsigned long *mask;
10345 * sched_setaffinity needs multiples of ulong, so need to take
10346 * care of mismatches between target ulong and host ulong sizes.
10348 if (arg2 & (sizeof(abi_ulong) - 1)) {
10349 ret = -TARGET_EINVAL;
10350 break;
10352 mask_size = (arg2 + (sizeof(*mask) - 1)) & ~(sizeof(*mask) - 1);
10354 mask = alloca(mask_size);
10355 if (!lock_user_struct(VERIFY_READ, p, arg3, 1)) {
10356 goto efault;
10358 memcpy(mask, p, arg2);
10359 unlock_user_struct(p, arg2, 0);
10361 ret = get_errno(sys_sched_setaffinity(arg1, mask_size, mask));
10363 break;
10364 case TARGET_NR_sched_setparam:
10366 struct sched_param *target_schp;
10367 struct sched_param schp;
10369 if (arg2 == 0) {
10370 return -TARGET_EINVAL;
10372 if (!lock_user_struct(VERIFY_READ, target_schp, arg2, 1))
10373 goto efault;
10374 schp.sched_priority = tswap32(target_schp->sched_priority);
10375 unlock_user_struct(target_schp, arg2, 0);
10376 ret = get_errno(sched_setparam(arg1, &schp));
10378 break;
10379 case TARGET_NR_sched_getparam:
10381 struct sched_param *target_schp;
10382 struct sched_param schp;
10384 if (arg2 == 0) {
10385 return -TARGET_EINVAL;
10387 ret = get_errno(sched_getparam(arg1, &schp));
10388 if (!is_error(ret)) {
10389 if (!lock_user_struct(VERIFY_WRITE, target_schp, arg2, 0))
10390 goto efault;
10391 target_schp->sched_priority = tswap32(schp.sched_priority);
10392 unlock_user_struct(target_schp, arg2, 1);
10395 break;
10396 case TARGET_NR_sched_setscheduler:
10398 struct sched_param *target_schp;
10399 struct sched_param schp;
10400 if (arg3 == 0) {
10401 return -TARGET_EINVAL;
10403 if (!lock_user_struct(VERIFY_READ, target_schp, arg3, 1))
10404 goto efault;
10405 schp.sched_priority = tswap32(target_schp->sched_priority);
10406 unlock_user_struct(target_schp, arg3, 0);
10407 ret = get_errno(sched_setscheduler(arg1, arg2, &schp));
10409 break;
10410 case TARGET_NR_sched_getscheduler:
10411 ret = get_errno(sched_getscheduler(arg1));
10412 break;
10413 case TARGET_NR_sched_yield:
10414 ret = get_errno(sched_yield());
10415 break;
10416 case TARGET_NR_sched_get_priority_max:
10417 ret = get_errno(sched_get_priority_max(arg1));
10418 break;
10419 case TARGET_NR_sched_get_priority_min:
10420 ret = get_errno(sched_get_priority_min(arg1));
10421 break;
10422 case TARGET_NR_sched_rr_get_interval:
10424 struct timespec ts;
10425 ret = get_errno(sched_rr_get_interval(arg1, &ts));
10426 if (!is_error(ret)) {
10427 ret = host_to_target_timespec(arg2, &ts);
10430 break;
10431 case TARGET_NR_nanosleep:
10433 struct timespec req, rem;
10434 target_to_host_timespec(&req, arg1);
10435 ret = get_errno(safe_nanosleep(&req, &rem));
10436 if (is_error(ret) && arg2) {
10437 host_to_target_timespec(arg2, &rem);
10440 break;
10441 #ifdef TARGET_NR_query_module
10442 case TARGET_NR_query_module:
10443 goto unimplemented;
10444 #endif
10445 #ifdef TARGET_NR_nfsservctl
10446 case TARGET_NR_nfsservctl:
10447 goto unimplemented;
10448 #endif
10449 case TARGET_NR_prctl:
10450 switch (arg1) {
10451 case PR_GET_PDEATHSIG:
10453 int deathsig;
10454 ret = get_errno(prctl(arg1, &deathsig, arg3, arg4, arg5));
10455 if (!is_error(ret) && arg2
10456 && put_user_ual(deathsig, arg2)) {
10457 goto efault;
10459 break;
10461 #ifdef PR_GET_NAME
10462 case PR_GET_NAME:
10464 void *name = lock_user(VERIFY_WRITE, arg2, 16, 1);
10465 if (!name) {
10466 goto efault;
10468 ret = get_errno(prctl(arg1, (unsigned long)name,
10469 arg3, arg4, arg5));
10470 unlock_user(name, arg2, 16);
10471 break;
10473 case PR_SET_NAME:
10475 void *name = lock_user(VERIFY_READ, arg2, 16, 1);
10476 if (!name) {
10477 goto efault;
10479 ret = get_errno(prctl(arg1, (unsigned long)name,
10480 arg3, arg4, arg5));
10481 unlock_user(name, arg2, 0);
10482 break;
10484 #endif
10485 default:
10486 /* Most prctl options have no pointer arguments */
10487 ret = get_errno(prctl(arg1, arg2, arg3, arg4, arg5));
10488 break;
10490 break;
10491 #ifdef TARGET_NR_arch_prctl
10492 case TARGET_NR_arch_prctl:
10493 #if defined(TARGET_I386) && !defined(TARGET_ABI32)
10494 ret = do_arch_prctl(cpu_env, arg1, arg2);
10495 break;
10496 #else
10497 goto unimplemented;
10498 #endif
10499 #endif
10500 #ifdef TARGET_NR_pread64
10501 case TARGET_NR_pread64:
10502 if (regpairs_aligned(cpu_env)) {
10503 arg4 = arg5;
10504 arg5 = arg6;
10506 if (!(p = lock_user(VERIFY_WRITE, arg2, arg3, 0)))
10507 goto efault;
10508 ret = get_errno(pread64(arg1, p, arg3, target_offset64(arg4, arg5)));
10509 unlock_user(p, arg2, ret);
10510 break;
10511 case TARGET_NR_pwrite64:
10512 if (regpairs_aligned(cpu_env)) {
10513 arg4 = arg5;
10514 arg5 = arg6;
10516 if (!(p = lock_user(VERIFY_READ, arg2, arg3, 1)))
10517 goto efault;
10518 ret = get_errno(pwrite64(arg1, p, arg3, target_offset64(arg4, arg5)));
10519 unlock_user(p, arg2, 0);
10520 break;
10521 #endif
10522 case TARGET_NR_getcwd:
10523 if (!(p = lock_user(VERIFY_WRITE, arg1, arg2, 0)))
10524 goto efault;
10525 ret = get_errno(sys_getcwd1(p, arg2));
10526 unlock_user(p, arg1, ret);
10527 break;
10528 case TARGET_NR_capget:
10529 case TARGET_NR_capset:
10531 struct target_user_cap_header *target_header;
10532 struct target_user_cap_data *target_data = NULL;
10533 struct __user_cap_header_struct header;
10534 struct __user_cap_data_struct data[2];
10535 struct __user_cap_data_struct *dataptr = NULL;
10536 int i, target_datalen;
10537 int data_items = 1;
10539 if (!lock_user_struct(VERIFY_WRITE, target_header, arg1, 1)) {
10540 goto efault;
10542 header.version = tswap32(target_header->version);
10543 header.pid = tswap32(target_header->pid);
10545 if (header.version != _LINUX_CAPABILITY_VERSION) {
10546 /* Version 2 and up takes pointer to two user_data structs */
10547 data_items = 2;
10550 target_datalen = sizeof(*target_data) * data_items;
10552 if (arg2) {
10553 if (num == TARGET_NR_capget) {
10554 target_data = lock_user(VERIFY_WRITE, arg2, target_datalen, 0);
10555 } else {
10556 target_data = lock_user(VERIFY_READ, arg2, target_datalen, 1);
10558 if (!target_data) {
10559 unlock_user_struct(target_header, arg1, 0);
10560 goto efault;
10563 if (num == TARGET_NR_capset) {
10564 for (i = 0; i < data_items; i++) {
10565 data[i].effective = tswap32(target_data[i].effective);
10566 data[i].permitted = tswap32(target_data[i].permitted);
10567 data[i].inheritable = tswap32(target_data[i].inheritable);
10571 dataptr = data;
10574 if (num == TARGET_NR_capget) {
10575 ret = get_errno(capget(&header, dataptr));
10576 } else {
10577 ret = get_errno(capset(&header, dataptr));
10580 /* The kernel always updates version for both capget and capset */
10581 target_header->version = tswap32(header.version);
10582 unlock_user_struct(target_header, arg1, 1);
10584 if (arg2) {
10585 if (num == TARGET_NR_capget) {
10586 for (i = 0; i < data_items; i++) {
10587 target_data[i].effective = tswap32(data[i].effective);
10588 target_data[i].permitted = tswap32(data[i].permitted);
10589 target_data[i].inheritable = tswap32(data[i].inheritable);
10591 unlock_user(target_data, arg2, target_datalen);
10592 } else {
10593 unlock_user(target_data, arg2, 0);
10596 break;
10598 case TARGET_NR_sigaltstack:
10599 ret = do_sigaltstack(arg1, arg2, get_sp_from_cpustate((CPUArchState *)cpu_env));
10600 break;
10602 #ifdef CONFIG_SENDFILE
10603 case TARGET_NR_sendfile:
10605 off_t *offp = NULL;
10606 off_t off;
10607 if (arg3) {
10608 ret = get_user_sal(off, arg3);
10609 if (is_error(ret)) {
10610 break;
10612 offp = &off;
10614 ret = get_errno(sendfile(arg1, arg2, offp, arg4));
10615 if (!is_error(ret) && arg3) {
10616 abi_long ret2 = put_user_sal(off, arg3);
10617 if (is_error(ret2)) {
10618 ret = ret2;
10621 break;
10623 #ifdef TARGET_NR_sendfile64
10624 case TARGET_NR_sendfile64:
10626 off_t *offp = NULL;
10627 off_t off;
10628 if (arg3) {
10629 ret = get_user_s64(off, arg3);
10630 if (is_error(ret)) {
10631 break;
10633 offp = &off;
10635 ret = get_errno(sendfile(arg1, arg2, offp, arg4));
10636 if (!is_error(ret) && arg3) {
10637 abi_long ret2 = put_user_s64(off, arg3);
10638 if (is_error(ret2)) {
10639 ret = ret2;
10642 break;
10644 #endif
10645 #else
10646 case TARGET_NR_sendfile:
10647 #ifdef TARGET_NR_sendfile64
10648 case TARGET_NR_sendfile64:
10649 #endif
10650 goto unimplemented;
10651 #endif
10653 #ifdef TARGET_NR_getpmsg
10654 case TARGET_NR_getpmsg:
10655 goto unimplemented;
10656 #endif
10657 #ifdef TARGET_NR_putpmsg
10658 case TARGET_NR_putpmsg:
10659 goto unimplemented;
10660 #endif
10661 #ifdef TARGET_NR_vfork
10662 case TARGET_NR_vfork:
10663 ret = get_errno(do_fork(cpu_env,
10664 CLONE_VFORK | CLONE_VM | TARGET_SIGCHLD,
10665 0, 0, 0, 0));
10666 break;
10667 #endif
10668 #ifdef TARGET_NR_ugetrlimit
10669 case TARGET_NR_ugetrlimit:
10671 struct rlimit rlim;
10672 int resource = target_to_host_resource(arg1);
10673 ret = get_errno(getrlimit(resource, &rlim));
10674 if (!is_error(ret)) {
10675 struct target_rlimit *target_rlim;
10676 if (!lock_user_struct(VERIFY_WRITE, target_rlim, arg2, 0))
10677 goto efault;
10678 target_rlim->rlim_cur = host_to_target_rlim(rlim.rlim_cur);
10679 target_rlim->rlim_max = host_to_target_rlim(rlim.rlim_max);
10680 unlock_user_struct(target_rlim, arg2, 1);
10682 break;
10684 #endif
10685 #ifdef TARGET_NR_truncate64
10686 case TARGET_NR_truncate64:
10687 if (!(p = lock_user_string(arg1)))
10688 goto efault;
10689 ret = target_truncate64(cpu_env, p, arg2, arg3, arg4);
10690 unlock_user(p, arg1, 0);
10691 break;
10692 #endif
10693 #ifdef TARGET_NR_ftruncate64
10694 case TARGET_NR_ftruncate64:
10695 ret = target_ftruncate64(cpu_env, arg1, arg2, arg3, arg4);
10696 break;
10697 #endif
10698 #ifdef TARGET_NR_stat64
10699 case TARGET_NR_stat64:
10700 if (!(p = lock_user_string(arg1)))
10701 goto efault;
10702 ret = get_errno(stat(path(p), &st));
10703 unlock_user(p, arg1, 0);
10704 if (!is_error(ret))
10705 ret = host_to_target_stat64(cpu_env, arg2, &st);
10706 break;
10707 #endif
10708 #ifdef TARGET_NR_lstat64
10709 case TARGET_NR_lstat64:
10710 if (!(p = lock_user_string(arg1)))
10711 goto efault;
10712 ret = get_errno(lstat(path(p), &st));
10713 unlock_user(p, arg1, 0);
10714 if (!is_error(ret))
10715 ret = host_to_target_stat64(cpu_env, arg2, &st);
10716 break;
10717 #endif
10718 #ifdef TARGET_NR_fstat64
10719 case TARGET_NR_fstat64:
10720 ret = get_errno(fstat(arg1, &st));
10721 if (!is_error(ret))
10722 ret = host_to_target_stat64(cpu_env, arg2, &st);
10723 break;
10724 #endif
10725 #if (defined(TARGET_NR_fstatat64) || defined(TARGET_NR_newfstatat))
10726 #ifdef TARGET_NR_fstatat64
10727 case TARGET_NR_fstatat64:
10728 #endif
10729 #ifdef TARGET_NR_newfstatat
10730 case TARGET_NR_newfstatat:
10731 #endif
10732 if (!(p = lock_user_string(arg2)))
10733 goto efault;
10734 ret = get_errno(fstatat(arg1, path(p), &st, arg4));
10735 if (!is_error(ret))
10736 ret = host_to_target_stat64(cpu_env, arg3, &st);
10737 break;
10738 #endif
10739 #ifdef TARGET_NR_lchown
10740 case TARGET_NR_lchown:
10741 if (!(p = lock_user_string(arg1)))
10742 goto efault;
10743 ret = get_errno(lchown(p, low2highuid(arg2), low2highgid(arg3)));
10744 unlock_user(p, arg1, 0);
10745 break;
10746 #endif
10747 #ifdef TARGET_NR_getuid
10748 case TARGET_NR_getuid:
10749 ret = get_errno(high2lowuid(getuid()));
10750 break;
10751 #endif
10752 #ifdef TARGET_NR_getgid
10753 case TARGET_NR_getgid:
10754 ret = get_errno(high2lowgid(getgid()));
10755 break;
10756 #endif
10757 #ifdef TARGET_NR_geteuid
10758 case TARGET_NR_geteuid:
10759 ret = get_errno(high2lowuid(geteuid()));
10760 break;
10761 #endif
10762 #ifdef TARGET_NR_getegid
10763 case TARGET_NR_getegid:
10764 ret = get_errno(high2lowgid(getegid()));
10765 break;
10766 #endif
10767 case TARGET_NR_setreuid:
10768 ret = get_errno(setreuid(low2highuid(arg1), low2highuid(arg2)));
10769 break;
10770 case TARGET_NR_setregid:
10771 ret = get_errno(setregid(low2highgid(arg1), low2highgid(arg2)));
10772 break;
10773 case TARGET_NR_getgroups:
10775 int gidsetsize = arg1;
10776 target_id *target_grouplist;
10777 gid_t *grouplist;
10778 int i;
10780 grouplist = alloca(gidsetsize * sizeof(gid_t));
10781 ret = get_errno(getgroups(gidsetsize, grouplist));
10782 if (gidsetsize == 0)
10783 break;
10784 if (!is_error(ret)) {
10785 target_grouplist = lock_user(VERIFY_WRITE, arg2, gidsetsize * sizeof(target_id), 0);
10786 if (!target_grouplist)
10787 goto efault;
10788 for(i = 0;i < ret; i++)
10789 target_grouplist[i] = tswapid(high2lowgid(grouplist[i]));
10790 unlock_user(target_grouplist, arg2, gidsetsize * sizeof(target_id));
10793 break;
10794 case TARGET_NR_setgroups:
10796 int gidsetsize = arg1;
10797 target_id *target_grouplist;
10798 gid_t *grouplist = NULL;
10799 int i;
10800 if (gidsetsize) {
10801 grouplist = alloca(gidsetsize * sizeof(gid_t));
10802 target_grouplist = lock_user(VERIFY_READ, arg2, gidsetsize * sizeof(target_id), 1);
10803 if (!target_grouplist) {
10804 ret = -TARGET_EFAULT;
10805 goto fail;
10807 for (i = 0; i < gidsetsize; i++) {
10808 grouplist[i] = low2highgid(tswapid(target_grouplist[i]));
10810 unlock_user(target_grouplist, arg2, 0);
10812 ret = get_errno(setgroups(gidsetsize, grouplist));
10814 break;
10815 case TARGET_NR_fchown:
10816 ret = get_errno(fchown(arg1, low2highuid(arg2), low2highgid(arg3)));
10817 break;
10818 #if defined(TARGET_NR_fchownat)
10819 case TARGET_NR_fchownat:
10820 if (!(p = lock_user_string(arg2)))
10821 goto efault;
10822 ret = get_errno(fchownat(arg1, p, low2highuid(arg3),
10823 low2highgid(arg4), arg5));
10824 unlock_user(p, arg2, 0);
10825 break;
10826 #endif
10827 #ifdef TARGET_NR_setresuid
10828 case TARGET_NR_setresuid:
10829 ret = get_errno(sys_setresuid(low2highuid(arg1),
10830 low2highuid(arg2),
10831 low2highuid(arg3)));
10832 break;
10833 #endif
10834 #ifdef TARGET_NR_getresuid
10835 case TARGET_NR_getresuid:
10837 uid_t ruid, euid, suid;
10838 ret = get_errno(getresuid(&ruid, &euid, &suid));
10839 if (!is_error(ret)) {
10840 if (put_user_id(high2lowuid(ruid), arg1)
10841 || put_user_id(high2lowuid(euid), arg2)
10842 || put_user_id(high2lowuid(suid), arg3))
10843 goto efault;
10846 break;
10847 #endif
10848 #ifdef TARGET_NR_getresgid
10849 case TARGET_NR_setresgid:
10850 ret = get_errno(sys_setresgid(low2highgid(arg1),
10851 low2highgid(arg2),
10852 low2highgid(arg3)));
10853 break;
10854 #endif
10855 #ifdef TARGET_NR_getresgid
10856 case TARGET_NR_getresgid:
10858 gid_t rgid, egid, sgid;
10859 ret = get_errno(getresgid(&rgid, &egid, &sgid));
10860 if (!is_error(ret)) {
10861 if (put_user_id(high2lowgid(rgid), arg1)
10862 || put_user_id(high2lowgid(egid), arg2)
10863 || put_user_id(high2lowgid(sgid), arg3))
10864 goto efault;
10867 break;
10868 #endif
10869 #ifdef TARGET_NR_chown
10870 case TARGET_NR_chown:
10871 if (!(p = lock_user_string(arg1)))
10872 goto efault;
10873 ret = get_errno(chown(p, low2highuid(arg2), low2highgid(arg3)));
10874 unlock_user(p, arg1, 0);
10875 break;
10876 #endif
10877 case TARGET_NR_setuid:
10878 ret = get_errno(sys_setuid(low2highuid(arg1)));
10879 break;
10880 case TARGET_NR_setgid:
10881 ret = get_errno(sys_setgid(low2highgid(arg1)));
10882 break;
10883 case TARGET_NR_setfsuid:
10884 ret = get_errno(setfsuid(arg1));
10885 break;
10886 case TARGET_NR_setfsgid:
10887 ret = get_errno(setfsgid(arg1));
10888 break;
10890 #ifdef TARGET_NR_lchown32
10891 case TARGET_NR_lchown32:
10892 if (!(p = lock_user_string(arg1)))
10893 goto efault;
10894 ret = get_errno(lchown(p, arg2, arg3));
10895 unlock_user(p, arg1, 0);
10896 break;
10897 #endif
10898 #ifdef TARGET_NR_getuid32
10899 case TARGET_NR_getuid32:
10900 ret = get_errno(getuid());
10901 break;
10902 #endif
10904 #if defined(TARGET_NR_getxuid) && defined(TARGET_ALPHA)
10905 /* Alpha specific */
10906 case TARGET_NR_getxuid:
10908 uid_t euid;
10909 euid=geteuid();
10910 ((CPUAlphaState *)cpu_env)->ir[IR_A4]=euid;
10912 ret = get_errno(getuid());
10913 break;
10914 #endif
10915 #if defined(TARGET_NR_getxgid) && defined(TARGET_ALPHA)
10916 /* Alpha specific */
10917 case TARGET_NR_getxgid:
10919 uid_t egid;
10920 egid=getegid();
10921 ((CPUAlphaState *)cpu_env)->ir[IR_A4]=egid;
10923 ret = get_errno(getgid());
10924 break;
10925 #endif
10926 #if defined(TARGET_NR_osf_getsysinfo) && defined(TARGET_ALPHA)
10927 /* Alpha specific */
10928 case TARGET_NR_osf_getsysinfo:
10929 ret = -TARGET_EOPNOTSUPP;
10930 switch (arg1) {
10931 case TARGET_GSI_IEEE_FP_CONTROL:
10933 uint64_t swcr, fpcr = cpu_alpha_load_fpcr (cpu_env);
10935 /* Copied from linux ieee_fpcr_to_swcr. */
10936 swcr = (fpcr >> 35) & SWCR_STATUS_MASK;
10937 swcr |= (fpcr >> 36) & SWCR_MAP_DMZ;
10938 swcr |= (~fpcr >> 48) & (SWCR_TRAP_ENABLE_INV
10939 | SWCR_TRAP_ENABLE_DZE
10940 | SWCR_TRAP_ENABLE_OVF);
10941 swcr |= (~fpcr >> 57) & (SWCR_TRAP_ENABLE_UNF
10942 | SWCR_TRAP_ENABLE_INE);
10943 swcr |= (fpcr >> 47) & SWCR_MAP_UMZ;
10944 swcr |= (~fpcr >> 41) & SWCR_TRAP_ENABLE_DNO;
10946 if (put_user_u64 (swcr, arg2))
10947 goto efault;
10948 ret = 0;
10950 break;
10952 /* case GSI_IEEE_STATE_AT_SIGNAL:
10953 -- Not implemented in linux kernel.
10954 case GSI_UACPROC:
10955 -- Retrieves current unaligned access state; not much used.
10956 case GSI_PROC_TYPE:
10957 -- Retrieves implver information; surely not used.
10958 case GSI_GET_HWRPB:
10959 -- Grabs a copy of the HWRPB; surely not used.
10962 break;
10963 #endif
10964 #if defined(TARGET_NR_osf_setsysinfo) && defined(TARGET_ALPHA)
10965 /* Alpha specific */
10966 case TARGET_NR_osf_setsysinfo:
10967 ret = -TARGET_EOPNOTSUPP;
10968 switch (arg1) {
10969 case TARGET_SSI_IEEE_FP_CONTROL:
10971 uint64_t swcr, fpcr, orig_fpcr;
10973 if (get_user_u64 (swcr, arg2)) {
10974 goto efault;
10976 orig_fpcr = cpu_alpha_load_fpcr(cpu_env);
10977 fpcr = orig_fpcr & FPCR_DYN_MASK;
10979 /* Copied from linux ieee_swcr_to_fpcr. */
10980 fpcr |= (swcr & SWCR_STATUS_MASK) << 35;
10981 fpcr |= (swcr & SWCR_MAP_DMZ) << 36;
10982 fpcr |= (~swcr & (SWCR_TRAP_ENABLE_INV
10983 | SWCR_TRAP_ENABLE_DZE
10984 | SWCR_TRAP_ENABLE_OVF)) << 48;
10985 fpcr |= (~swcr & (SWCR_TRAP_ENABLE_UNF
10986 | SWCR_TRAP_ENABLE_INE)) << 57;
10987 fpcr |= (swcr & SWCR_MAP_UMZ ? FPCR_UNDZ | FPCR_UNFD : 0);
10988 fpcr |= (~swcr & SWCR_TRAP_ENABLE_DNO) << 41;
10990 cpu_alpha_store_fpcr(cpu_env, fpcr);
10991 ret = 0;
10993 break;
10995 case TARGET_SSI_IEEE_RAISE_EXCEPTION:
10997 uint64_t exc, fpcr, orig_fpcr;
10998 int si_code;
11000 if (get_user_u64(exc, arg2)) {
11001 goto efault;
11004 orig_fpcr = cpu_alpha_load_fpcr(cpu_env);
11006 /* We only add to the exception status here. */
11007 fpcr = orig_fpcr | ((exc & SWCR_STATUS_MASK) << 35);
11009 cpu_alpha_store_fpcr(cpu_env, fpcr);
11010 ret = 0;
11012 /* Old exceptions are not signaled. */
11013 fpcr &= ~(orig_fpcr & FPCR_STATUS_MASK);
11015 /* If any exceptions set by this call,
11016 and are unmasked, send a signal. */
11017 si_code = 0;
11018 if ((fpcr & (FPCR_INE | FPCR_INED)) == FPCR_INE) {
11019 si_code = TARGET_FPE_FLTRES;
11021 if ((fpcr & (FPCR_UNF | FPCR_UNFD)) == FPCR_UNF) {
11022 si_code = TARGET_FPE_FLTUND;
11024 if ((fpcr & (FPCR_OVF | FPCR_OVFD)) == FPCR_OVF) {
11025 si_code = TARGET_FPE_FLTOVF;
11027 if ((fpcr & (FPCR_DZE | FPCR_DZED)) == FPCR_DZE) {
11028 si_code = TARGET_FPE_FLTDIV;
11030 if ((fpcr & (FPCR_INV | FPCR_INVD)) == FPCR_INV) {
11031 si_code = TARGET_FPE_FLTINV;
11033 if (si_code != 0) {
11034 target_siginfo_t info;
11035 info.si_signo = SIGFPE;
11036 info.si_errno = 0;
11037 info.si_code = si_code;
11038 info._sifields._sigfault._addr
11039 = ((CPUArchState *)cpu_env)->pc;
11040 queue_signal((CPUArchState *)cpu_env, info.si_signo,
11041 QEMU_SI_FAULT, &info);
11044 break;
11046 /* case SSI_NVPAIRS:
11047 -- Used with SSIN_UACPROC to enable unaligned accesses.
11048 case SSI_IEEE_STATE_AT_SIGNAL:
11049 case SSI_IEEE_IGNORE_STATE_AT_SIGNAL:
11050 -- Not implemented in linux kernel
11053 break;
11054 #endif
11055 #ifdef TARGET_NR_osf_sigprocmask
11056 /* Alpha specific. */
11057 case TARGET_NR_osf_sigprocmask:
11059 abi_ulong mask;
11060 int how;
11061 sigset_t set, oldset;
11063 switch(arg1) {
11064 case TARGET_SIG_BLOCK:
11065 how = SIG_BLOCK;
11066 break;
11067 case TARGET_SIG_UNBLOCK:
11068 how = SIG_UNBLOCK;
11069 break;
11070 case TARGET_SIG_SETMASK:
11071 how = SIG_SETMASK;
11072 break;
11073 default:
11074 ret = -TARGET_EINVAL;
11075 goto fail;
11077 mask = arg2;
11078 target_to_host_old_sigset(&set, &mask);
11079 ret = do_sigprocmask(how, &set, &oldset);
11080 if (!ret) {
11081 host_to_target_old_sigset(&mask, &oldset);
11082 ret = mask;
11085 break;
11086 #endif
11088 #ifdef TARGET_NR_getgid32
11089 case TARGET_NR_getgid32:
11090 ret = get_errno(getgid());
11091 break;
11092 #endif
11093 #ifdef TARGET_NR_geteuid32
11094 case TARGET_NR_geteuid32:
11095 ret = get_errno(geteuid());
11096 break;
11097 #endif
11098 #ifdef TARGET_NR_getegid32
11099 case TARGET_NR_getegid32:
11100 ret = get_errno(getegid());
11101 break;
11102 #endif
11103 #ifdef TARGET_NR_setreuid32
11104 case TARGET_NR_setreuid32:
11105 ret = get_errno(setreuid(arg1, arg2));
11106 break;
11107 #endif
11108 #ifdef TARGET_NR_setregid32
11109 case TARGET_NR_setregid32:
11110 ret = get_errno(setregid(arg1, arg2));
11111 break;
11112 #endif
11113 #ifdef TARGET_NR_getgroups32
11114 case TARGET_NR_getgroups32:
11116 int gidsetsize = arg1;
11117 uint32_t *target_grouplist;
11118 gid_t *grouplist;
11119 int i;
11121 grouplist = alloca(gidsetsize * sizeof(gid_t));
11122 ret = get_errno(getgroups(gidsetsize, grouplist));
11123 if (gidsetsize == 0)
11124 break;
11125 if (!is_error(ret)) {
11126 target_grouplist = lock_user(VERIFY_WRITE, arg2, gidsetsize * 4, 0);
11127 if (!target_grouplist) {
11128 ret = -TARGET_EFAULT;
11129 goto fail;
11131 for(i = 0;i < ret; i++)
11132 target_grouplist[i] = tswap32(grouplist[i]);
11133 unlock_user(target_grouplist, arg2, gidsetsize * 4);
11136 break;
11137 #endif
11138 #ifdef TARGET_NR_setgroups32
11139 case TARGET_NR_setgroups32:
11141 int gidsetsize = arg1;
11142 uint32_t *target_grouplist;
11143 gid_t *grouplist;
11144 int i;
11146 grouplist = alloca(gidsetsize * sizeof(gid_t));
11147 target_grouplist = lock_user(VERIFY_READ, arg2, gidsetsize * 4, 1);
11148 if (!target_grouplist) {
11149 ret = -TARGET_EFAULT;
11150 goto fail;
11152 for(i = 0;i < gidsetsize; i++)
11153 grouplist[i] = tswap32(target_grouplist[i]);
11154 unlock_user(target_grouplist, arg2, 0);
11155 ret = get_errno(setgroups(gidsetsize, grouplist));
11157 break;
11158 #endif
11159 #ifdef TARGET_NR_fchown32
11160 case TARGET_NR_fchown32:
11161 ret = get_errno(fchown(arg1, arg2, arg3));
11162 break;
11163 #endif
11164 #ifdef TARGET_NR_setresuid32
11165 case TARGET_NR_setresuid32:
11166 ret = get_errno(sys_setresuid(arg1, arg2, arg3));
11167 break;
11168 #endif
11169 #ifdef TARGET_NR_getresuid32
11170 case TARGET_NR_getresuid32:
11172 uid_t ruid, euid, suid;
11173 ret = get_errno(getresuid(&ruid, &euid, &suid));
11174 if (!is_error(ret)) {
11175 if (put_user_u32(ruid, arg1)
11176 || put_user_u32(euid, arg2)
11177 || put_user_u32(suid, arg3))
11178 goto efault;
11181 break;
11182 #endif
11183 #ifdef TARGET_NR_setresgid32
11184 case TARGET_NR_setresgid32:
11185 ret = get_errno(sys_setresgid(arg1, arg2, arg3));
11186 break;
11187 #endif
11188 #ifdef TARGET_NR_getresgid32
11189 case TARGET_NR_getresgid32:
11191 gid_t rgid, egid, sgid;
11192 ret = get_errno(getresgid(&rgid, &egid, &sgid));
11193 if (!is_error(ret)) {
11194 if (put_user_u32(rgid, arg1)
11195 || put_user_u32(egid, arg2)
11196 || put_user_u32(sgid, arg3))
11197 goto efault;
11200 break;
11201 #endif
11202 #ifdef TARGET_NR_chown32
11203 case TARGET_NR_chown32:
11204 if (!(p = lock_user_string(arg1)))
11205 goto efault;
11206 ret = get_errno(chown(p, arg2, arg3));
11207 unlock_user(p, arg1, 0);
11208 break;
11209 #endif
11210 #ifdef TARGET_NR_setuid32
11211 case TARGET_NR_setuid32:
11212 ret = get_errno(sys_setuid(arg1));
11213 break;
11214 #endif
11215 #ifdef TARGET_NR_setgid32
11216 case TARGET_NR_setgid32:
11217 ret = get_errno(sys_setgid(arg1));
11218 break;
11219 #endif
11220 #ifdef TARGET_NR_setfsuid32
11221 case TARGET_NR_setfsuid32:
11222 ret = get_errno(setfsuid(arg1));
11223 break;
11224 #endif
11225 #ifdef TARGET_NR_setfsgid32
11226 case TARGET_NR_setfsgid32:
11227 ret = get_errno(setfsgid(arg1));
11228 break;
11229 #endif
11231 case TARGET_NR_pivot_root:
11232 goto unimplemented;
11233 #ifdef TARGET_NR_mincore
11234 case TARGET_NR_mincore:
11236 void *a;
11237 ret = -TARGET_ENOMEM;
11238 a = lock_user(VERIFY_READ, arg1, arg2, 0);
11239 if (!a) {
11240 goto fail;
11242 ret = -TARGET_EFAULT;
11243 p = lock_user_string(arg3);
11244 if (!p) {
11245 goto mincore_fail;
11247 ret = get_errno(mincore(a, arg2, p));
11248 unlock_user(p, arg3, ret);
11249 mincore_fail:
11250 unlock_user(a, arg1, 0);
11252 break;
11253 #endif
11254 #ifdef TARGET_NR_arm_fadvise64_64
11255 case TARGET_NR_arm_fadvise64_64:
11256 /* arm_fadvise64_64 looks like fadvise64_64 but
11257 * with different argument order: fd, advice, offset, len
11258 * rather than the usual fd, offset, len, advice.
11259 * Note that offset and len are both 64-bit so appear as
11260 * pairs of 32-bit registers.
11262 ret = posix_fadvise(arg1, target_offset64(arg3, arg4),
11263 target_offset64(arg5, arg6), arg2);
11264 ret = -host_to_target_errno(ret);
11265 break;
11266 #endif
11268 #if TARGET_ABI_BITS == 32
11270 #ifdef TARGET_NR_fadvise64_64
11271 case TARGET_NR_fadvise64_64:
11272 #if defined(TARGET_PPC)
11273 /* 6 args: fd, advice, offset (high, low), len (high, low) */
11274 ret = arg2;
11275 arg2 = arg3;
11276 arg3 = arg4;
11277 arg4 = arg5;
11278 arg5 = arg6;
11279 arg6 = ret;
11280 #else
11281 /* 6 args: fd, offset (high, low), len (high, low), advice */
11282 if (regpairs_aligned(cpu_env)) {
11283 /* offset is in (3,4), len in (5,6) and advice in 7 */
11284 arg2 = arg3;
11285 arg3 = arg4;
11286 arg4 = arg5;
11287 arg5 = arg6;
11288 arg6 = arg7;
11290 #endif
11291 ret = -host_to_target_errno(posix_fadvise(arg1,
11292 target_offset64(arg2, arg3),
11293 target_offset64(arg4, arg5),
11294 arg6));
11295 break;
11296 #endif
11298 #ifdef TARGET_NR_fadvise64
11299 case TARGET_NR_fadvise64:
11300 /* 5 args: fd, offset (high, low), len, advice */
11301 if (regpairs_aligned(cpu_env)) {
11302 /* offset is in (3,4), len in 5 and advice in 6 */
11303 arg2 = arg3;
11304 arg3 = arg4;
11305 arg4 = arg5;
11306 arg5 = arg6;
11308 ret = -host_to_target_errno(posix_fadvise(arg1,
11309 target_offset64(arg2, arg3),
11310 arg4, arg5));
11311 break;
11312 #endif
11314 #else /* not a 32-bit ABI */
11315 #if defined(TARGET_NR_fadvise64_64) || defined(TARGET_NR_fadvise64)
11316 #ifdef TARGET_NR_fadvise64_64
11317 case TARGET_NR_fadvise64_64:
11318 #endif
11319 #ifdef TARGET_NR_fadvise64
11320 case TARGET_NR_fadvise64:
11321 #endif
11322 #ifdef TARGET_S390X
11323 switch (arg4) {
11324 case 4: arg4 = POSIX_FADV_NOREUSE + 1; break; /* make sure it's an invalid value */
11325 case 5: arg4 = POSIX_FADV_NOREUSE + 2; break; /* ditto */
11326 case 6: arg4 = POSIX_FADV_DONTNEED; break;
11327 case 7: arg4 = POSIX_FADV_NOREUSE; break;
11328 default: break;
11330 #endif
11331 ret = -host_to_target_errno(posix_fadvise(arg1, arg2, arg3, arg4));
11332 break;
11333 #endif
11334 #endif /* end of 64-bit ABI fadvise handling */
11336 #ifdef TARGET_NR_madvise
11337 case TARGET_NR_madvise:
11338 /* A straight passthrough may not be safe because qemu sometimes
11339 turns private file-backed mappings into anonymous mappings.
11340 This will break MADV_DONTNEED.
11341 This is a hint, so ignoring and returning success is ok. */
11342 ret = get_errno(0);
11343 break;
11344 #endif
11345 #if TARGET_ABI_BITS == 32
11346 case TARGET_NR_fcntl64:
11348 int cmd;
11349 struct flock64 fl;
11350 from_flock64_fn *copyfrom = copy_from_user_flock64;
11351 to_flock64_fn *copyto = copy_to_user_flock64;
11353 #ifdef TARGET_ARM
11354 if (((CPUARMState *)cpu_env)->eabi) {
11355 copyfrom = copy_from_user_eabi_flock64;
11356 copyto = copy_to_user_eabi_flock64;
11358 #endif
11360 cmd = target_to_host_fcntl_cmd(arg2);
11361 if (cmd == -TARGET_EINVAL) {
11362 ret = cmd;
11363 break;
11366 switch(arg2) {
11367 case TARGET_F_GETLK64:
11368 ret = copyfrom(&fl, arg3);
11369 if (ret) {
11370 break;
11372 ret = get_errno(fcntl(arg1, cmd, &fl));
11373 if (ret == 0) {
11374 ret = copyto(arg3, &fl);
11376 break;
11378 case TARGET_F_SETLK64:
11379 case TARGET_F_SETLKW64:
11380 ret = copyfrom(&fl, arg3);
11381 if (ret) {
11382 break;
11384 ret = get_errno(safe_fcntl(arg1, cmd, &fl));
11385 break;
11386 default:
11387 ret = do_fcntl(arg1, arg2, arg3);
11388 break;
11390 break;
11392 #endif
11393 #ifdef TARGET_NR_cacheflush
11394 case TARGET_NR_cacheflush:
11395 /* self-modifying code is handled automatically, so nothing needed */
11396 ret = 0;
11397 break;
11398 #endif
11399 #ifdef TARGET_NR_security
11400 case TARGET_NR_security:
11401 goto unimplemented;
11402 #endif
11403 #ifdef TARGET_NR_getpagesize
11404 case TARGET_NR_getpagesize:
11405 ret = TARGET_PAGE_SIZE;
11406 break;
11407 #endif
11408 case TARGET_NR_gettid:
11409 ret = get_errno(gettid());
11410 break;
11411 #ifdef TARGET_NR_readahead
11412 case TARGET_NR_readahead:
11413 #if TARGET_ABI_BITS == 32
11414 if (regpairs_aligned(cpu_env)) {
11415 arg2 = arg3;
11416 arg3 = arg4;
11417 arg4 = arg5;
11419 ret = get_errno(readahead(arg1, target_offset64(arg2, arg3) , arg4));
11420 #else
11421 ret = get_errno(readahead(arg1, arg2, arg3));
11422 #endif
11423 break;
11424 #endif
11425 #ifdef CONFIG_ATTR
11426 #ifdef TARGET_NR_setxattr
11427 case TARGET_NR_listxattr:
11428 case TARGET_NR_llistxattr:
11430 void *p, *b = 0;
11431 if (arg2) {
11432 b = lock_user(VERIFY_WRITE, arg2, arg3, 0);
11433 if (!b) {
11434 ret = -TARGET_EFAULT;
11435 break;
11438 p = lock_user_string(arg1);
11439 if (p) {
11440 if (num == TARGET_NR_listxattr) {
11441 ret = get_errno(listxattr(p, b, arg3));
11442 } else {
11443 ret = get_errno(llistxattr(p, b, arg3));
11445 } else {
11446 ret = -TARGET_EFAULT;
11448 unlock_user(p, arg1, 0);
11449 unlock_user(b, arg2, arg3);
11450 break;
11452 case TARGET_NR_flistxattr:
11454 void *b = 0;
11455 if (arg2) {
11456 b = lock_user(VERIFY_WRITE, arg2, arg3, 0);
11457 if (!b) {
11458 ret = -TARGET_EFAULT;
11459 break;
11462 ret = get_errno(flistxattr(arg1, b, arg3));
11463 unlock_user(b, arg2, arg3);
11464 break;
11466 case TARGET_NR_setxattr:
11467 case TARGET_NR_lsetxattr:
11469 void *p, *n, *v = 0;
11470 if (arg3) {
11471 v = lock_user(VERIFY_READ, arg3, arg4, 1);
11472 if (!v) {
11473 ret = -TARGET_EFAULT;
11474 break;
11477 p = lock_user_string(arg1);
11478 n = lock_user_string(arg2);
11479 if (p && n) {
11480 if (num == TARGET_NR_setxattr) {
11481 ret = get_errno(setxattr(p, n, v, arg4, arg5));
11482 } else {
11483 ret = get_errno(lsetxattr(p, n, v, arg4, arg5));
11485 } else {
11486 ret = -TARGET_EFAULT;
11488 unlock_user(p, arg1, 0);
11489 unlock_user(n, arg2, 0);
11490 unlock_user(v, arg3, 0);
11492 break;
11493 case TARGET_NR_fsetxattr:
11495 void *n, *v = 0;
11496 if (arg3) {
11497 v = lock_user(VERIFY_READ, arg3, arg4, 1);
11498 if (!v) {
11499 ret = -TARGET_EFAULT;
11500 break;
11503 n = lock_user_string(arg2);
11504 if (n) {
11505 ret = get_errno(fsetxattr(arg1, n, v, arg4, arg5));
11506 } else {
11507 ret = -TARGET_EFAULT;
11509 unlock_user(n, arg2, 0);
11510 unlock_user(v, arg3, 0);
11512 break;
11513 case TARGET_NR_getxattr:
11514 case TARGET_NR_lgetxattr:
11516 void *p, *n, *v = 0;
11517 if (arg3) {
11518 v = lock_user(VERIFY_WRITE, arg3, arg4, 0);
11519 if (!v) {
11520 ret = -TARGET_EFAULT;
11521 break;
11524 p = lock_user_string(arg1);
11525 n = lock_user_string(arg2);
11526 if (p && n) {
11527 if (num == TARGET_NR_getxattr) {
11528 ret = get_errno(getxattr(p, n, v, arg4));
11529 } else {
11530 ret = get_errno(lgetxattr(p, n, v, arg4));
11532 } else {
11533 ret = -TARGET_EFAULT;
11535 unlock_user(p, arg1, 0);
11536 unlock_user(n, arg2, 0);
11537 unlock_user(v, arg3, arg4);
11539 break;
11540 case TARGET_NR_fgetxattr:
11542 void *n, *v = 0;
11543 if (arg3) {
11544 v = lock_user(VERIFY_WRITE, arg3, arg4, 0);
11545 if (!v) {
11546 ret = -TARGET_EFAULT;
11547 break;
11550 n = lock_user_string(arg2);
11551 if (n) {
11552 ret = get_errno(fgetxattr(arg1, n, v, arg4));
11553 } else {
11554 ret = -TARGET_EFAULT;
11556 unlock_user(n, arg2, 0);
11557 unlock_user(v, arg3, arg4);
11559 break;
11560 case TARGET_NR_removexattr:
11561 case TARGET_NR_lremovexattr:
11563 void *p, *n;
11564 p = lock_user_string(arg1);
11565 n = lock_user_string(arg2);
11566 if (p && n) {
11567 if (num == TARGET_NR_removexattr) {
11568 ret = get_errno(removexattr(p, n));
11569 } else {
11570 ret = get_errno(lremovexattr(p, n));
11572 } else {
11573 ret = -TARGET_EFAULT;
11575 unlock_user(p, arg1, 0);
11576 unlock_user(n, arg2, 0);
11578 break;
11579 case TARGET_NR_fremovexattr:
11581 void *n;
11582 n = lock_user_string(arg2);
11583 if (n) {
11584 ret = get_errno(fremovexattr(arg1, n));
11585 } else {
11586 ret = -TARGET_EFAULT;
11588 unlock_user(n, arg2, 0);
11590 break;
11591 #endif
11592 #endif /* CONFIG_ATTR */
11593 #ifdef TARGET_NR_set_thread_area
11594 case TARGET_NR_set_thread_area:
11595 #if defined(TARGET_MIPS)
11596 ((CPUMIPSState *) cpu_env)->active_tc.CP0_UserLocal = arg1;
11597 ret = 0;
11598 break;
11599 #elif defined(TARGET_CRIS)
11600 if (arg1 & 0xff)
11601 ret = -TARGET_EINVAL;
11602 else {
11603 ((CPUCRISState *) cpu_env)->pregs[PR_PID] = arg1;
11604 ret = 0;
11606 break;
11607 #elif defined(TARGET_I386) && defined(TARGET_ABI32)
11608 ret = do_set_thread_area(cpu_env, arg1);
11609 break;
11610 #elif defined(TARGET_M68K)
11612 TaskState *ts = cpu->opaque;
11613 ts->tp_value = arg1;
11614 ret = 0;
11615 break;
11617 #else
11618 goto unimplemented_nowarn;
11619 #endif
11620 #endif
11621 #ifdef TARGET_NR_get_thread_area
11622 case TARGET_NR_get_thread_area:
11623 #if defined(TARGET_I386) && defined(TARGET_ABI32)
11624 ret = do_get_thread_area(cpu_env, arg1);
11625 break;
11626 #elif defined(TARGET_M68K)
11628 TaskState *ts = cpu->opaque;
11629 ret = ts->tp_value;
11630 break;
11632 #else
11633 goto unimplemented_nowarn;
11634 #endif
11635 #endif
11636 #ifdef TARGET_NR_getdomainname
11637 case TARGET_NR_getdomainname:
11638 goto unimplemented_nowarn;
11639 #endif
11641 #ifdef TARGET_NR_clock_gettime
11642 case TARGET_NR_clock_gettime:
11644 struct timespec ts;
11645 ret = get_errno(clock_gettime(arg1, &ts));
11646 if (!is_error(ret)) {
11647 host_to_target_timespec(arg2, &ts);
11649 break;
11651 #endif
11652 #ifdef TARGET_NR_clock_getres
11653 case TARGET_NR_clock_getres:
11655 struct timespec ts;
11656 ret = get_errno(clock_getres(arg1, &ts));
11657 if (!is_error(ret)) {
11658 host_to_target_timespec(arg2, &ts);
11660 break;
11662 #endif
11663 #ifdef TARGET_NR_clock_nanosleep
11664 case TARGET_NR_clock_nanosleep:
11666 struct timespec ts;
11667 target_to_host_timespec(&ts, arg3);
11668 ret = get_errno(safe_clock_nanosleep(arg1, arg2,
11669 &ts, arg4 ? &ts : NULL));
11670 if (arg4)
11671 host_to_target_timespec(arg4, &ts);
11673 #if defined(TARGET_PPC)
11674 /* clock_nanosleep is odd in that it returns positive errno values.
11675 * On PPC, CR0 bit 3 should be set in such a situation. */
11676 if (ret && ret != -TARGET_ERESTARTSYS) {
11677 ((CPUPPCState *)cpu_env)->crf[0] |= 1;
11679 #endif
11680 break;
11682 #endif
11684 #if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address)
11685 case TARGET_NR_set_tid_address:
11686 ret = get_errno(set_tid_address((int *)g2h(arg1)));
11687 break;
11688 #endif
11690 case TARGET_NR_tkill:
11691 ret = get_errno(safe_tkill((int)arg1, target_to_host_signal(arg2)));
11692 break;
11694 case TARGET_NR_tgkill:
11695 ret = get_errno(safe_tgkill((int)arg1, (int)arg2,
11696 target_to_host_signal(arg3)));
11697 break;
11699 #ifdef TARGET_NR_set_robust_list
11700 case TARGET_NR_set_robust_list:
11701 case TARGET_NR_get_robust_list:
11702 /* The ABI for supporting robust futexes has userspace pass
11703 * the kernel a pointer to a linked list which is updated by
11704 * userspace after the syscall; the list is walked by the kernel
11705 * when the thread exits. Since the linked list in QEMU guest
11706 * memory isn't a valid linked list for the host and we have
11707 * no way to reliably intercept the thread-death event, we can't
11708 * support these. Silently return ENOSYS so that guest userspace
11709 * falls back to a non-robust futex implementation (which should
11710 * be OK except in the corner case of the guest crashing while
11711 * holding a mutex that is shared with another process via
11712 * shared memory).
11714 goto unimplemented_nowarn;
11715 #endif
11717 #if defined(TARGET_NR_utimensat)
11718 case TARGET_NR_utimensat:
11720 struct timespec *tsp, ts[2];
11721 if (!arg3) {
11722 tsp = NULL;
11723 } else {
11724 target_to_host_timespec(ts, arg3);
11725 target_to_host_timespec(ts+1, arg3+sizeof(struct target_timespec));
11726 tsp = ts;
11728 if (!arg2)
11729 ret = get_errno(sys_utimensat(arg1, NULL, tsp, arg4));
11730 else {
11731 if (!(p = lock_user_string(arg2))) {
11732 ret = -TARGET_EFAULT;
11733 goto fail;
11735 ret = get_errno(sys_utimensat(arg1, path(p), tsp, arg4));
11736 unlock_user(p, arg2, 0);
11739 break;
11740 #endif
11741 case TARGET_NR_futex:
11742 ret = do_futex(arg1, arg2, arg3, arg4, arg5, arg6);
11743 break;
11744 #if defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init)
11745 case TARGET_NR_inotify_init:
11746 ret = get_errno(sys_inotify_init());
11747 if (ret >= 0) {
11748 fd_trans_register(ret, &target_inotify_trans);
11750 break;
11751 #endif
11752 #ifdef CONFIG_INOTIFY1
11753 #if defined(TARGET_NR_inotify_init1) && defined(__NR_inotify_init1)
11754 case TARGET_NR_inotify_init1:
11755 ret = get_errno(sys_inotify_init1(target_to_host_bitmask(arg1,
11756 fcntl_flags_tbl)));
11757 if (ret >= 0) {
11758 fd_trans_register(ret, &target_inotify_trans);
11760 break;
11761 #endif
11762 #endif
11763 #if defined(TARGET_NR_inotify_add_watch) && defined(__NR_inotify_add_watch)
11764 case TARGET_NR_inotify_add_watch:
11765 p = lock_user_string(arg2);
11766 ret = get_errno(sys_inotify_add_watch(arg1, path(p), arg3));
11767 unlock_user(p, arg2, 0);
11768 break;
11769 #endif
11770 #if defined(TARGET_NR_inotify_rm_watch) && defined(__NR_inotify_rm_watch)
11771 case TARGET_NR_inotify_rm_watch:
11772 ret = get_errno(sys_inotify_rm_watch(arg1, arg2));
11773 break;
11774 #endif
11776 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
11777 case TARGET_NR_mq_open:
11779 struct mq_attr posix_mq_attr;
11780 struct mq_attr *pposix_mq_attr;
11781 int host_flags;
11783 host_flags = target_to_host_bitmask(arg2, fcntl_flags_tbl);
11784 pposix_mq_attr = NULL;
11785 if (arg4) {
11786 if (copy_from_user_mq_attr(&posix_mq_attr, arg4) != 0) {
11787 goto efault;
11789 pposix_mq_attr = &posix_mq_attr;
11791 p = lock_user_string(arg1 - 1);
11792 if (!p) {
11793 goto efault;
11795 ret = get_errno(mq_open(p, host_flags, arg3, pposix_mq_attr));
11796 unlock_user (p, arg1, 0);
11798 break;
11800 case TARGET_NR_mq_unlink:
11801 p = lock_user_string(arg1 - 1);
11802 if (!p) {
11803 ret = -TARGET_EFAULT;
11804 break;
11806 ret = get_errno(mq_unlink(p));
11807 unlock_user (p, arg1, 0);
11808 break;
11810 case TARGET_NR_mq_timedsend:
11812 struct timespec ts;
11814 p = lock_user (VERIFY_READ, arg2, arg3, 1);
11815 if (arg5 != 0) {
11816 target_to_host_timespec(&ts, arg5);
11817 ret = get_errno(safe_mq_timedsend(arg1, p, arg3, arg4, &ts));
11818 host_to_target_timespec(arg5, &ts);
11819 } else {
11820 ret = get_errno(safe_mq_timedsend(arg1, p, arg3, arg4, NULL));
11822 unlock_user (p, arg2, arg3);
11824 break;
11826 case TARGET_NR_mq_timedreceive:
11828 struct timespec ts;
11829 unsigned int prio;
11831 p = lock_user (VERIFY_READ, arg2, arg3, 1);
11832 if (arg5 != 0) {
11833 target_to_host_timespec(&ts, arg5);
11834 ret = get_errno(safe_mq_timedreceive(arg1, p, arg3,
11835 &prio, &ts));
11836 host_to_target_timespec(arg5, &ts);
11837 } else {
11838 ret = get_errno(safe_mq_timedreceive(arg1, p, arg3,
11839 &prio, NULL));
11841 unlock_user (p, arg2, arg3);
11842 if (arg4 != 0)
11843 put_user_u32(prio, arg4);
11845 break;
11847 /* Not implemented for now... */
11848 /* case TARGET_NR_mq_notify: */
11849 /* break; */
11851 case TARGET_NR_mq_getsetattr:
11853 struct mq_attr posix_mq_attr_in, posix_mq_attr_out;
11854 ret = 0;
11855 if (arg3 != 0) {
11856 ret = mq_getattr(arg1, &posix_mq_attr_out);
11857 copy_to_user_mq_attr(arg3, &posix_mq_attr_out);
11859 if (arg2 != 0) {
11860 copy_from_user_mq_attr(&posix_mq_attr_in, arg2);
11861 ret |= mq_setattr(arg1, &posix_mq_attr_in, &posix_mq_attr_out);
11865 break;
11866 #endif
11868 #ifdef CONFIG_SPLICE
11869 #ifdef TARGET_NR_tee
11870 case TARGET_NR_tee:
11872 ret = get_errno(tee(arg1,arg2,arg3,arg4));
11874 break;
11875 #endif
11876 #ifdef TARGET_NR_splice
11877 case TARGET_NR_splice:
11879 loff_t loff_in, loff_out;
11880 loff_t *ploff_in = NULL, *ploff_out = NULL;
11881 if (arg2) {
11882 if (get_user_u64(loff_in, arg2)) {
11883 goto efault;
11885 ploff_in = &loff_in;
11887 if (arg4) {
11888 if (get_user_u64(loff_out, arg4)) {
11889 goto efault;
11891 ploff_out = &loff_out;
11893 ret = get_errno(splice(arg1, ploff_in, arg3, ploff_out, arg5, arg6));
11894 if (arg2) {
11895 if (put_user_u64(loff_in, arg2)) {
11896 goto efault;
11899 if (arg4) {
11900 if (put_user_u64(loff_out, arg4)) {
11901 goto efault;
11905 break;
11906 #endif
11907 #ifdef TARGET_NR_vmsplice
11908 case TARGET_NR_vmsplice:
11910 struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1);
11911 if (vec != NULL) {
11912 ret = get_errno(vmsplice(arg1, vec, arg3, arg4));
11913 unlock_iovec(vec, arg2, arg3, 0);
11914 } else {
11915 ret = -host_to_target_errno(errno);
11918 break;
11919 #endif
11920 #endif /* CONFIG_SPLICE */
11921 #ifdef CONFIG_EVENTFD
11922 #if defined(TARGET_NR_eventfd)
11923 case TARGET_NR_eventfd:
11924 ret = get_errno(eventfd(arg1, 0));
11925 if (ret >= 0) {
11926 fd_trans_register(ret, &target_eventfd_trans);
11928 break;
11929 #endif
11930 #if defined(TARGET_NR_eventfd2)
11931 case TARGET_NR_eventfd2:
11933 int host_flags = arg2 & (~(TARGET_O_NONBLOCK | TARGET_O_CLOEXEC));
11934 if (arg2 & TARGET_O_NONBLOCK) {
11935 host_flags |= O_NONBLOCK;
11937 if (arg2 & TARGET_O_CLOEXEC) {
11938 host_flags |= O_CLOEXEC;
11940 ret = get_errno(eventfd(arg1, host_flags));
11941 if (ret >= 0) {
11942 fd_trans_register(ret, &target_eventfd_trans);
11944 break;
11946 #endif
11947 #endif /* CONFIG_EVENTFD */
11948 #if defined(CONFIG_FALLOCATE) && defined(TARGET_NR_fallocate)
11949 case TARGET_NR_fallocate:
11950 #if TARGET_ABI_BITS == 32
11951 ret = get_errno(fallocate(arg1, arg2, target_offset64(arg3, arg4),
11952 target_offset64(arg5, arg6)));
11953 #else
11954 ret = get_errno(fallocate(arg1, arg2, arg3, arg4));
11955 #endif
11956 break;
11957 #endif
11958 #if defined(CONFIG_SYNC_FILE_RANGE)
11959 #if defined(TARGET_NR_sync_file_range)
11960 case TARGET_NR_sync_file_range:
11961 #if TARGET_ABI_BITS == 32
11962 #if defined(TARGET_MIPS)
11963 ret = get_errno(sync_file_range(arg1, target_offset64(arg3, arg4),
11964 target_offset64(arg5, arg6), arg7));
11965 #else
11966 ret = get_errno(sync_file_range(arg1, target_offset64(arg2, arg3),
11967 target_offset64(arg4, arg5), arg6));
11968 #endif /* !TARGET_MIPS */
11969 #else
11970 ret = get_errno(sync_file_range(arg1, arg2, arg3, arg4));
11971 #endif
11972 break;
11973 #endif
11974 #if defined(TARGET_NR_sync_file_range2)
11975 case TARGET_NR_sync_file_range2:
11976 /* This is like sync_file_range but the arguments are reordered */
11977 #if TARGET_ABI_BITS == 32
11978 ret = get_errno(sync_file_range(arg1, target_offset64(arg3, arg4),
11979 target_offset64(arg5, arg6), arg2));
11980 #else
11981 ret = get_errno(sync_file_range(arg1, arg3, arg4, arg2));
11982 #endif
11983 break;
11984 #endif
11985 #endif
11986 #if defined(TARGET_NR_signalfd4)
11987 case TARGET_NR_signalfd4:
11988 ret = do_signalfd4(arg1, arg2, arg4);
11989 break;
11990 #endif
11991 #if defined(TARGET_NR_signalfd)
11992 case TARGET_NR_signalfd:
11993 ret = do_signalfd4(arg1, arg2, 0);
11994 break;
11995 #endif
11996 #if defined(CONFIG_EPOLL)
11997 #if defined(TARGET_NR_epoll_create)
11998 case TARGET_NR_epoll_create:
11999 ret = get_errno(epoll_create(arg1));
12000 break;
12001 #endif
12002 #if defined(TARGET_NR_epoll_create1) && defined(CONFIG_EPOLL_CREATE1)
12003 case TARGET_NR_epoll_create1:
12004 ret = get_errno(epoll_create1(arg1));
12005 break;
12006 #endif
12007 #if defined(TARGET_NR_epoll_ctl)
12008 case TARGET_NR_epoll_ctl:
12010 struct epoll_event ep;
12011 struct epoll_event *epp = 0;
12012 if (arg4) {
12013 struct target_epoll_event *target_ep;
12014 if (!lock_user_struct(VERIFY_READ, target_ep, arg4, 1)) {
12015 goto efault;
12017 ep.events = tswap32(target_ep->events);
12018 /* The epoll_data_t union is just opaque data to the kernel,
12019 * so we transfer all 64 bits across and need not worry what
12020 * actual data type it is.
12022 ep.data.u64 = tswap64(target_ep->data.u64);
12023 unlock_user_struct(target_ep, arg4, 0);
12024 epp = &ep;
12026 ret = get_errno(epoll_ctl(arg1, arg2, arg3, epp));
12027 break;
12029 #endif
12031 #if defined(TARGET_NR_epoll_wait) || defined(TARGET_NR_epoll_pwait)
12032 #if defined(TARGET_NR_epoll_wait)
12033 case TARGET_NR_epoll_wait:
12034 #endif
12035 #if defined(TARGET_NR_epoll_pwait)
12036 case TARGET_NR_epoll_pwait:
12037 #endif
12039 struct target_epoll_event *target_ep;
12040 struct epoll_event *ep;
12041 int epfd = arg1;
12042 int maxevents = arg3;
12043 int timeout = arg4;
12045 if (maxevents <= 0 || maxevents > TARGET_EP_MAX_EVENTS) {
12046 ret = -TARGET_EINVAL;
12047 break;
12050 target_ep = lock_user(VERIFY_WRITE, arg2,
12051 maxevents * sizeof(struct target_epoll_event), 1);
12052 if (!target_ep) {
12053 goto efault;
12056 ep = g_try_new(struct epoll_event, maxevents);
12057 if (!ep) {
12058 unlock_user(target_ep, arg2, 0);
12059 ret = -TARGET_ENOMEM;
12060 break;
12063 switch (num) {
12064 #if defined(TARGET_NR_epoll_pwait)
12065 case TARGET_NR_epoll_pwait:
12067 target_sigset_t *target_set;
12068 sigset_t _set, *set = &_set;
12070 if (arg5) {
12071 if (arg6 != sizeof(target_sigset_t)) {
12072 ret = -TARGET_EINVAL;
12073 break;
12076 target_set = lock_user(VERIFY_READ, arg5,
12077 sizeof(target_sigset_t), 1);
12078 if (!target_set) {
12079 ret = -TARGET_EFAULT;
12080 break;
12082 target_to_host_sigset(set, target_set);
12083 unlock_user(target_set, arg5, 0);
12084 } else {
12085 set = NULL;
12088 ret = get_errno(safe_epoll_pwait(epfd, ep, maxevents, timeout,
12089 set, SIGSET_T_SIZE));
12090 break;
12092 #endif
12093 #if defined(TARGET_NR_epoll_wait)
12094 case TARGET_NR_epoll_wait:
12095 ret = get_errno(safe_epoll_pwait(epfd, ep, maxevents, timeout,
12096 NULL, 0));
12097 break;
12098 #endif
12099 default:
12100 ret = -TARGET_ENOSYS;
12102 if (!is_error(ret)) {
12103 int i;
12104 for (i = 0; i < ret; i++) {
12105 target_ep[i].events = tswap32(ep[i].events);
12106 target_ep[i].data.u64 = tswap64(ep[i].data.u64);
12108 unlock_user(target_ep, arg2,
12109 ret * sizeof(struct target_epoll_event));
12110 } else {
12111 unlock_user(target_ep, arg2, 0);
12113 g_free(ep);
12114 break;
12116 #endif
12117 #endif
12118 #ifdef TARGET_NR_prlimit64
12119 case TARGET_NR_prlimit64:
12121 /* args: pid, resource number, ptr to new rlimit, ptr to old rlimit */
12122 struct target_rlimit64 *target_rnew, *target_rold;
12123 struct host_rlimit64 rnew, rold, *rnewp = 0;
12124 int resource = target_to_host_resource(arg2);
12125 if (arg3) {
12126 if (!lock_user_struct(VERIFY_READ, target_rnew, arg3, 1)) {
12127 goto efault;
12129 rnew.rlim_cur = tswap64(target_rnew->rlim_cur);
12130 rnew.rlim_max = tswap64(target_rnew->rlim_max);
12131 unlock_user_struct(target_rnew, arg3, 0);
12132 rnewp = &rnew;
12135 ret = get_errno(sys_prlimit64(arg1, resource, rnewp, arg4 ? &rold : 0));
12136 if (!is_error(ret) && arg4) {
12137 if (!lock_user_struct(VERIFY_WRITE, target_rold, arg4, 1)) {
12138 goto efault;
12140 target_rold->rlim_cur = tswap64(rold.rlim_cur);
12141 target_rold->rlim_max = tswap64(rold.rlim_max);
12142 unlock_user_struct(target_rold, arg4, 1);
12144 break;
12146 #endif
12147 #ifdef TARGET_NR_gethostname
12148 case TARGET_NR_gethostname:
12150 char *name = lock_user(VERIFY_WRITE, arg1, arg2, 0);
12151 if (name) {
12152 ret = get_errno(gethostname(name, arg2));
12153 unlock_user(name, arg1, arg2);
12154 } else {
12155 ret = -TARGET_EFAULT;
12157 break;
12159 #endif
12160 #ifdef TARGET_NR_atomic_cmpxchg_32
12161 case TARGET_NR_atomic_cmpxchg_32:
12163 /* should use start_exclusive from main.c */
12164 abi_ulong mem_value;
12165 if (get_user_u32(mem_value, arg6)) {
12166 target_siginfo_t info;
12167 info.si_signo = SIGSEGV;
12168 info.si_errno = 0;
12169 info.si_code = TARGET_SEGV_MAPERR;
12170 info._sifields._sigfault._addr = arg6;
12171 queue_signal((CPUArchState *)cpu_env, info.si_signo,
12172 QEMU_SI_FAULT, &info);
12173 ret = 0xdeadbeef;
12176 if (mem_value == arg2)
12177 put_user_u32(arg1, arg6);
12178 ret = mem_value;
12179 break;
12181 #endif
12182 #ifdef TARGET_NR_atomic_barrier
12183 case TARGET_NR_atomic_barrier:
12185 /* Like the kernel implementation and the qemu arm barrier, no-op this? */
12186 ret = 0;
12187 break;
12189 #endif
12191 #ifdef TARGET_NR_timer_create
12192 case TARGET_NR_timer_create:
12194 /* args: clockid_t clockid, struct sigevent *sevp, timer_t *timerid */
12196 struct sigevent host_sevp = { {0}, }, *phost_sevp = NULL;
12198 int clkid = arg1;
12199 int timer_index = next_free_host_timer();
12201 if (timer_index < 0) {
12202 ret = -TARGET_EAGAIN;
12203 } else {
12204 timer_t *phtimer = g_posix_timers + timer_index;
12206 if (arg2) {
12207 phost_sevp = &host_sevp;
12208 ret = target_to_host_sigevent(phost_sevp, arg2);
12209 if (ret != 0) {
12210 break;
12214 ret = get_errno(timer_create(clkid, phost_sevp, phtimer));
12215 if (ret) {
12216 phtimer = NULL;
12217 } else {
12218 if (put_user(TIMER_MAGIC | timer_index, arg3, target_timer_t)) {
12219 goto efault;
12223 break;
12225 #endif
12227 #ifdef TARGET_NR_timer_settime
12228 case TARGET_NR_timer_settime:
12230 /* args: timer_t timerid, int flags, const struct itimerspec *new_value,
12231 * struct itimerspec * old_value */
12232 target_timer_t timerid = get_timer_id(arg1);
12234 if (timerid < 0) {
12235 ret = timerid;
12236 } else if (arg3 == 0) {
12237 ret = -TARGET_EINVAL;
12238 } else {
12239 timer_t htimer = g_posix_timers[timerid];
12240 struct itimerspec hspec_new = {{0},}, hspec_old = {{0},};
12242 if (target_to_host_itimerspec(&hspec_new, arg3)) {
12243 goto efault;
12245 ret = get_errno(
12246 timer_settime(htimer, arg2, &hspec_new, &hspec_old));
12247 if (arg4 && host_to_target_itimerspec(arg4, &hspec_old)) {
12248 goto efault;
12251 break;
12253 #endif
12255 #ifdef TARGET_NR_timer_gettime
12256 case TARGET_NR_timer_gettime:
12258 /* args: timer_t timerid, struct itimerspec *curr_value */
12259 target_timer_t timerid = get_timer_id(arg1);
12261 if (timerid < 0) {
12262 ret = timerid;
12263 } else if (!arg2) {
12264 ret = -TARGET_EFAULT;
12265 } else {
12266 timer_t htimer = g_posix_timers[timerid];
12267 struct itimerspec hspec;
12268 ret = get_errno(timer_gettime(htimer, &hspec));
12270 if (host_to_target_itimerspec(arg2, &hspec)) {
12271 ret = -TARGET_EFAULT;
12274 break;
12276 #endif
12278 #ifdef TARGET_NR_timer_getoverrun
12279 case TARGET_NR_timer_getoverrun:
12281 /* args: timer_t timerid */
12282 target_timer_t timerid = get_timer_id(arg1);
12284 if (timerid < 0) {
12285 ret = timerid;
12286 } else {
12287 timer_t htimer = g_posix_timers[timerid];
12288 ret = get_errno(timer_getoverrun(htimer));
12290 fd_trans_unregister(ret);
12291 break;
12293 #endif
12295 #ifdef TARGET_NR_timer_delete
12296 case TARGET_NR_timer_delete:
12298 /* args: timer_t timerid */
12299 target_timer_t timerid = get_timer_id(arg1);
12301 if (timerid < 0) {
12302 ret = timerid;
12303 } else {
12304 timer_t htimer = g_posix_timers[timerid];
12305 ret = get_errno(timer_delete(htimer));
12306 g_posix_timers[timerid] = 0;
12308 break;
12310 #endif
12312 #if defined(TARGET_NR_timerfd_create) && defined(CONFIG_TIMERFD)
12313 case TARGET_NR_timerfd_create:
12314 ret = get_errno(timerfd_create(arg1,
12315 target_to_host_bitmask(arg2, fcntl_flags_tbl)));
12316 break;
12317 #endif
12319 #if defined(TARGET_NR_timerfd_gettime) && defined(CONFIG_TIMERFD)
12320 case TARGET_NR_timerfd_gettime:
12322 struct itimerspec its_curr;
12324 ret = get_errno(timerfd_gettime(arg1, &its_curr));
12326 if (arg2 && host_to_target_itimerspec(arg2, &its_curr)) {
12327 goto efault;
12330 break;
12331 #endif
12333 #if defined(TARGET_NR_timerfd_settime) && defined(CONFIG_TIMERFD)
12334 case TARGET_NR_timerfd_settime:
12336 struct itimerspec its_new, its_old, *p_new;
12338 if (arg3) {
12339 if (target_to_host_itimerspec(&its_new, arg3)) {
12340 goto efault;
12342 p_new = &its_new;
12343 } else {
12344 p_new = NULL;
12347 ret = get_errno(timerfd_settime(arg1, arg2, p_new, &its_old));
12349 if (arg4 && host_to_target_itimerspec(arg4, &its_old)) {
12350 goto efault;
12353 break;
12354 #endif
12356 #if defined(TARGET_NR_ioprio_get) && defined(__NR_ioprio_get)
12357 case TARGET_NR_ioprio_get:
12358 ret = get_errno(ioprio_get(arg1, arg2));
12359 break;
12360 #endif
12362 #if defined(TARGET_NR_ioprio_set) && defined(__NR_ioprio_set)
12363 case TARGET_NR_ioprio_set:
12364 ret = get_errno(ioprio_set(arg1, arg2, arg3));
12365 break;
12366 #endif
12368 #if defined(TARGET_NR_setns) && defined(CONFIG_SETNS)
12369 case TARGET_NR_setns:
12370 ret = get_errno(setns(arg1, arg2));
12371 break;
12372 #endif
12373 #if defined(TARGET_NR_unshare) && defined(CONFIG_SETNS)
12374 case TARGET_NR_unshare:
12375 ret = get_errno(unshare(arg1));
12376 break;
12377 #endif
12378 #if defined(TARGET_NR_kcmp) && defined(__NR_kcmp)
12379 case TARGET_NR_kcmp:
12380 ret = get_errno(kcmp(arg1, arg2, arg3, arg4, arg5));
12381 break;
12382 #endif
12384 default:
12385 unimplemented:
12386 gemu_log("qemu: Unsupported syscall: %d\n", num);
12387 #if defined(TARGET_NR_setxattr) || defined(TARGET_NR_get_thread_area) || defined(TARGET_NR_getdomainname) || defined(TARGET_NR_set_robust_list)
12388 unimplemented_nowarn:
12389 #endif
12390 ret = -TARGET_ENOSYS;
12391 break;
12393 fail:
12394 #ifdef DEBUG
12395 gemu_log(" = " TARGET_ABI_FMT_ld "\n", ret);
12396 #endif
12397 if(do_strace)
12398 print_syscall_ret(num, ret);
12399 trace_guest_user_syscall_ret(cpu, num, ret);
12400 return ret;
12401 efault:
12402 ret = -TARGET_EFAULT;
12403 goto fail;