linux-user: Fix mq_open() syscall support
[qemu.git] / linux-user / syscall.c
blob99be4f2f3eadc5d3294e3499ac54ed61bce3becb
1 /*
2 * Linux syscalls
4 * Copyright (c) 2003 Fabrice Bellard
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, see <http://www.gnu.org/licenses/>.
19 #define _ATFILE_SOURCE
20 #include "qemu/osdep.h"
21 #include "qemu/cutils.h"
22 #include "qemu/path.h"
23 #include <elf.h>
24 #include <endian.h>
25 #include <grp.h>
26 #include <sys/ipc.h>
27 #include <sys/msg.h>
28 #include <sys/wait.h>
29 #include <sys/mount.h>
30 #include <sys/file.h>
31 #include <sys/fsuid.h>
32 #include <sys/personality.h>
33 #include <sys/prctl.h>
34 #include <sys/resource.h>
35 #include <sys/swap.h>
36 #include <linux/capability.h>
37 #include <sched.h>
38 #include <sys/timex.h>
39 #ifdef __ia64__
40 int __clone2(int (*fn)(void *), void *child_stack_base,
41 size_t stack_size, int flags, void *arg, ...);
42 #endif
43 #include <sys/socket.h>
44 #include <sys/un.h>
45 #include <sys/uio.h>
46 #include <poll.h>
47 #include <sys/times.h>
48 #include <sys/shm.h>
49 #include <sys/sem.h>
50 #include <sys/statfs.h>
51 #include <utime.h>
52 #include <sys/sysinfo.h>
53 #include <sys/signalfd.h>
54 //#include <sys/user.h>
55 #include <netinet/ip.h>
56 #include <netinet/tcp.h>
57 #include <linux/wireless.h>
58 #include <linux/icmp.h>
59 #include "qemu-common.h"
60 #ifdef CONFIG_TIMERFD
61 #include <sys/timerfd.h>
62 #endif
63 #ifdef TARGET_GPROF
64 #include <sys/gmon.h>
65 #endif
66 #ifdef CONFIG_EVENTFD
67 #include <sys/eventfd.h>
68 #endif
69 #ifdef CONFIG_EPOLL
70 #include <sys/epoll.h>
71 #endif
72 #ifdef CONFIG_ATTR
73 #include "qemu/xattr.h"
74 #endif
75 #ifdef CONFIG_SENDFILE
76 #include <sys/sendfile.h>
77 #endif
79 #define termios host_termios
80 #define winsize host_winsize
81 #define termio host_termio
82 #define sgttyb host_sgttyb /* same as target */
83 #define tchars host_tchars /* same as target */
84 #define ltchars host_ltchars /* same as target */
86 #include <linux/termios.h>
87 #include <linux/unistd.h>
88 #include <linux/cdrom.h>
89 #include <linux/hdreg.h>
90 #include <linux/soundcard.h>
91 #include <linux/kd.h>
92 #include <linux/mtio.h>
93 #include <linux/fs.h>
94 #if defined(CONFIG_FIEMAP)
95 #include <linux/fiemap.h>
96 #endif
97 #include <linux/fb.h>
98 #include <linux/vt.h>
99 #include <linux/dm-ioctl.h>
100 #include <linux/reboot.h>
101 #include <linux/route.h>
102 #include <linux/filter.h>
103 #include <linux/blkpg.h>
104 #include <netpacket/packet.h>
105 #include <linux/netlink.h>
106 #ifdef CONFIG_RTNETLINK
107 #include <linux/rtnetlink.h>
108 #include <linux/if_bridge.h>
109 #endif
110 #include <linux/audit.h>
111 #include "linux_loop.h"
112 #include "uname.h"
114 #include "qemu.h"
116 #ifndef CLONE_IO
117 #define CLONE_IO 0x80000000 /* Clone io context */
118 #endif
120 /* We can't directly call the host clone syscall, because this will
121 * badly confuse libc (breaking mutexes, for example). So we must
122 * divide clone flags into:
123 * * flag combinations that look like pthread_create()
124 * * flag combinations that look like fork()
125 * * flags we can implement within QEMU itself
126 * * flags we can't support and will return an error for
128 /* For thread creation, all these flags must be present; for
129 * fork, none must be present.
131 #define CLONE_THREAD_FLAGS \
132 (CLONE_VM | CLONE_FS | CLONE_FILES | \
133 CLONE_SIGHAND | CLONE_THREAD | CLONE_SYSVSEM)
135 /* These flags are ignored:
136 * CLONE_DETACHED is now ignored by the kernel;
137 * CLONE_IO is just an optimisation hint to the I/O scheduler
139 #define CLONE_IGNORED_FLAGS \
140 (CLONE_DETACHED | CLONE_IO)
142 /* Flags for fork which we can implement within QEMU itself */
143 #define CLONE_OPTIONAL_FORK_FLAGS \
144 (CLONE_SETTLS | CLONE_PARENT_SETTID | \
145 CLONE_CHILD_CLEARTID | CLONE_CHILD_SETTID)
147 /* Flags for thread creation which we can implement within QEMU itself */
148 #define CLONE_OPTIONAL_THREAD_FLAGS \
149 (CLONE_SETTLS | CLONE_PARENT_SETTID | \
150 CLONE_CHILD_CLEARTID | CLONE_CHILD_SETTID | CLONE_PARENT)
152 #define CLONE_INVALID_FORK_FLAGS \
153 (~(CSIGNAL | CLONE_OPTIONAL_FORK_FLAGS | CLONE_IGNORED_FLAGS))
155 #define CLONE_INVALID_THREAD_FLAGS \
156 (~(CSIGNAL | CLONE_THREAD_FLAGS | CLONE_OPTIONAL_THREAD_FLAGS | \
157 CLONE_IGNORED_FLAGS))
159 /* CLONE_VFORK is special cased early in do_fork(). The other flag bits
160 * have almost all been allocated. We cannot support any of
161 * CLONE_NEWNS, CLONE_NEWCGROUP, CLONE_NEWUTS, CLONE_NEWIPC,
162 * CLONE_NEWUSER, CLONE_NEWPID, CLONE_NEWNET, CLONE_PTRACE, CLONE_UNTRACED.
163 * The checks against the invalid thread masks above will catch these.
164 * (The one remaining unallocated bit is 0x1000 which used to be CLONE_PID.)
167 //#define DEBUG
168 /* Define DEBUG_ERESTARTSYS to force every syscall to be restarted
169 * once. This exercises the codepaths for restart.
171 //#define DEBUG_ERESTARTSYS
173 //#include <linux/msdos_fs.h>
174 #define VFAT_IOCTL_READDIR_BOTH _IOR('r', 1, struct linux_dirent [2])
175 #define VFAT_IOCTL_READDIR_SHORT _IOR('r', 2, struct linux_dirent [2])
177 #undef _syscall0
178 #undef _syscall1
179 #undef _syscall2
180 #undef _syscall3
181 #undef _syscall4
182 #undef _syscall5
183 #undef _syscall6
185 #define _syscall0(type,name) \
186 static type name (void) \
188 return syscall(__NR_##name); \
191 #define _syscall1(type,name,type1,arg1) \
192 static type name (type1 arg1) \
194 return syscall(__NR_##name, arg1); \
197 #define _syscall2(type,name,type1,arg1,type2,arg2) \
198 static type name (type1 arg1,type2 arg2) \
200 return syscall(__NR_##name, arg1, arg2); \
203 #define _syscall3(type,name,type1,arg1,type2,arg2,type3,arg3) \
204 static type name (type1 arg1,type2 arg2,type3 arg3) \
206 return syscall(__NR_##name, arg1, arg2, arg3); \
209 #define _syscall4(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4) \
210 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4) \
212 return syscall(__NR_##name, arg1, arg2, arg3, arg4); \
215 #define _syscall5(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4, \
216 type5,arg5) \
217 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5) \
219 return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5); \
223 #define _syscall6(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4, \
224 type5,arg5,type6,arg6) \
225 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5, \
226 type6 arg6) \
228 return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6); \
232 #define __NR_sys_uname __NR_uname
233 #define __NR_sys_getcwd1 __NR_getcwd
234 #define __NR_sys_getdents __NR_getdents
235 #define __NR_sys_getdents64 __NR_getdents64
236 #define __NR_sys_getpriority __NR_getpriority
237 #define __NR_sys_rt_sigqueueinfo __NR_rt_sigqueueinfo
238 #define __NR_sys_syslog __NR_syslog
239 #define __NR_sys_futex __NR_futex
240 #define __NR_sys_inotify_init __NR_inotify_init
241 #define __NR_sys_inotify_add_watch __NR_inotify_add_watch
242 #define __NR_sys_inotify_rm_watch __NR_inotify_rm_watch
244 #if defined(__alpha__) || defined (__ia64__) || defined(__x86_64__) || \
245 defined(__s390x__)
246 #define __NR__llseek __NR_lseek
247 #endif
249 /* Newer kernel ports have llseek() instead of _llseek() */
250 #if defined(TARGET_NR_llseek) && !defined(TARGET_NR__llseek)
251 #define TARGET_NR__llseek TARGET_NR_llseek
252 #endif
254 #ifdef __NR_gettid
255 _syscall0(int, gettid)
256 #else
257 /* This is a replacement for the host gettid() and must return a host
258 errno. */
259 static int gettid(void) {
260 return -ENOSYS;
262 #endif
263 #if defined(TARGET_NR_getdents) && defined(__NR_getdents)
264 _syscall3(int, sys_getdents, uint, fd, struct linux_dirent *, dirp, uint, count);
265 #endif
266 #if !defined(__NR_getdents) || \
267 (defined(TARGET_NR_getdents64) && defined(__NR_getdents64))
268 _syscall3(int, sys_getdents64, uint, fd, struct linux_dirent64 *, dirp, uint, count);
269 #endif
270 #if defined(TARGET_NR__llseek) && defined(__NR_llseek)
271 _syscall5(int, _llseek, uint, fd, ulong, hi, ulong, lo,
272 loff_t *, res, uint, wh);
273 #endif
274 _syscall3(int,sys_rt_sigqueueinfo,int,pid,int,sig,siginfo_t *,uinfo)
275 _syscall3(int,sys_syslog,int,type,char*,bufp,int,len)
276 #ifdef __NR_exit_group
277 _syscall1(int,exit_group,int,error_code)
278 #endif
279 #if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address)
280 _syscall1(int,set_tid_address,int *,tidptr)
281 #endif
282 #if defined(TARGET_NR_futex) && defined(__NR_futex)
283 _syscall6(int,sys_futex,int *,uaddr,int,op,int,val,
284 const struct timespec *,timeout,int *,uaddr2,int,val3)
285 #endif
286 #define __NR_sys_sched_getaffinity __NR_sched_getaffinity
287 _syscall3(int, sys_sched_getaffinity, pid_t, pid, unsigned int, len,
288 unsigned long *, user_mask_ptr);
289 #define __NR_sys_sched_setaffinity __NR_sched_setaffinity
290 _syscall3(int, sys_sched_setaffinity, pid_t, pid, unsigned int, len,
291 unsigned long *, user_mask_ptr);
292 _syscall4(int, reboot, int, magic1, int, magic2, unsigned int, cmd,
293 void *, arg);
294 _syscall2(int, capget, struct __user_cap_header_struct *, header,
295 struct __user_cap_data_struct *, data);
296 _syscall2(int, capset, struct __user_cap_header_struct *, header,
297 struct __user_cap_data_struct *, data);
298 #if defined(TARGET_NR_ioprio_get) && defined(__NR_ioprio_get)
299 _syscall2(int, ioprio_get, int, which, int, who)
300 #endif
301 #if defined(TARGET_NR_ioprio_set) && defined(__NR_ioprio_set)
302 _syscall3(int, ioprio_set, int, which, int, who, int, ioprio)
303 #endif
304 #if defined(TARGET_NR_getrandom) && defined(__NR_getrandom)
305 _syscall3(int, getrandom, void *, buf, size_t, buflen, unsigned int, flags)
306 #endif
308 static bitmask_transtbl fcntl_flags_tbl[] = {
309 { TARGET_O_ACCMODE, TARGET_O_WRONLY, O_ACCMODE, O_WRONLY, },
310 { TARGET_O_ACCMODE, TARGET_O_RDWR, O_ACCMODE, O_RDWR, },
311 { TARGET_O_CREAT, TARGET_O_CREAT, O_CREAT, O_CREAT, },
312 { TARGET_O_EXCL, TARGET_O_EXCL, O_EXCL, O_EXCL, },
313 { TARGET_O_NOCTTY, TARGET_O_NOCTTY, O_NOCTTY, O_NOCTTY, },
314 { TARGET_O_TRUNC, TARGET_O_TRUNC, O_TRUNC, O_TRUNC, },
315 { TARGET_O_APPEND, TARGET_O_APPEND, O_APPEND, O_APPEND, },
316 { TARGET_O_NONBLOCK, TARGET_O_NONBLOCK, O_NONBLOCK, O_NONBLOCK, },
317 { TARGET_O_SYNC, TARGET_O_DSYNC, O_SYNC, O_DSYNC, },
318 { TARGET_O_SYNC, TARGET_O_SYNC, O_SYNC, O_SYNC, },
319 { TARGET_FASYNC, TARGET_FASYNC, FASYNC, FASYNC, },
320 { TARGET_O_DIRECTORY, TARGET_O_DIRECTORY, O_DIRECTORY, O_DIRECTORY, },
321 { TARGET_O_NOFOLLOW, TARGET_O_NOFOLLOW, O_NOFOLLOW, O_NOFOLLOW, },
322 #if defined(O_DIRECT)
323 { TARGET_O_DIRECT, TARGET_O_DIRECT, O_DIRECT, O_DIRECT, },
324 #endif
325 #if defined(O_NOATIME)
326 { TARGET_O_NOATIME, TARGET_O_NOATIME, O_NOATIME, O_NOATIME },
327 #endif
328 #if defined(O_CLOEXEC)
329 { TARGET_O_CLOEXEC, TARGET_O_CLOEXEC, O_CLOEXEC, O_CLOEXEC },
330 #endif
331 #if defined(O_PATH)
332 { TARGET_O_PATH, TARGET_O_PATH, O_PATH, O_PATH },
333 #endif
334 /* Don't terminate the list prematurely on 64-bit host+guest. */
335 #if TARGET_O_LARGEFILE != 0 || O_LARGEFILE != 0
336 { TARGET_O_LARGEFILE, TARGET_O_LARGEFILE, O_LARGEFILE, O_LARGEFILE, },
337 #endif
338 { 0, 0, 0, 0 }
341 enum {
342 QEMU_IFLA_BR_UNSPEC,
343 QEMU_IFLA_BR_FORWARD_DELAY,
344 QEMU_IFLA_BR_HELLO_TIME,
345 QEMU_IFLA_BR_MAX_AGE,
346 QEMU_IFLA_BR_AGEING_TIME,
347 QEMU_IFLA_BR_STP_STATE,
348 QEMU_IFLA_BR_PRIORITY,
349 QEMU_IFLA_BR_VLAN_FILTERING,
350 QEMU_IFLA_BR_VLAN_PROTOCOL,
351 QEMU_IFLA_BR_GROUP_FWD_MASK,
352 QEMU_IFLA_BR_ROOT_ID,
353 QEMU_IFLA_BR_BRIDGE_ID,
354 QEMU_IFLA_BR_ROOT_PORT,
355 QEMU_IFLA_BR_ROOT_PATH_COST,
356 QEMU_IFLA_BR_TOPOLOGY_CHANGE,
357 QEMU_IFLA_BR_TOPOLOGY_CHANGE_DETECTED,
358 QEMU_IFLA_BR_HELLO_TIMER,
359 QEMU_IFLA_BR_TCN_TIMER,
360 QEMU_IFLA_BR_TOPOLOGY_CHANGE_TIMER,
361 QEMU_IFLA_BR_GC_TIMER,
362 QEMU_IFLA_BR_GROUP_ADDR,
363 QEMU_IFLA_BR_FDB_FLUSH,
364 QEMU_IFLA_BR_MCAST_ROUTER,
365 QEMU_IFLA_BR_MCAST_SNOOPING,
366 QEMU_IFLA_BR_MCAST_QUERY_USE_IFADDR,
367 QEMU_IFLA_BR_MCAST_QUERIER,
368 QEMU_IFLA_BR_MCAST_HASH_ELASTICITY,
369 QEMU_IFLA_BR_MCAST_HASH_MAX,
370 QEMU_IFLA_BR_MCAST_LAST_MEMBER_CNT,
371 QEMU_IFLA_BR_MCAST_STARTUP_QUERY_CNT,
372 QEMU_IFLA_BR_MCAST_LAST_MEMBER_INTVL,
373 QEMU_IFLA_BR_MCAST_MEMBERSHIP_INTVL,
374 QEMU_IFLA_BR_MCAST_QUERIER_INTVL,
375 QEMU_IFLA_BR_MCAST_QUERY_INTVL,
376 QEMU_IFLA_BR_MCAST_QUERY_RESPONSE_INTVL,
377 QEMU_IFLA_BR_MCAST_STARTUP_QUERY_INTVL,
378 QEMU_IFLA_BR_NF_CALL_IPTABLES,
379 QEMU_IFLA_BR_NF_CALL_IP6TABLES,
380 QEMU_IFLA_BR_NF_CALL_ARPTABLES,
381 QEMU_IFLA_BR_VLAN_DEFAULT_PVID,
382 QEMU_IFLA_BR_PAD,
383 QEMU_IFLA_BR_VLAN_STATS_ENABLED,
384 QEMU_IFLA_BR_MCAST_STATS_ENABLED,
385 QEMU___IFLA_BR_MAX,
388 enum {
389 QEMU_IFLA_UNSPEC,
390 QEMU_IFLA_ADDRESS,
391 QEMU_IFLA_BROADCAST,
392 QEMU_IFLA_IFNAME,
393 QEMU_IFLA_MTU,
394 QEMU_IFLA_LINK,
395 QEMU_IFLA_QDISC,
396 QEMU_IFLA_STATS,
397 QEMU_IFLA_COST,
398 QEMU_IFLA_PRIORITY,
399 QEMU_IFLA_MASTER,
400 QEMU_IFLA_WIRELESS,
401 QEMU_IFLA_PROTINFO,
402 QEMU_IFLA_TXQLEN,
403 QEMU_IFLA_MAP,
404 QEMU_IFLA_WEIGHT,
405 QEMU_IFLA_OPERSTATE,
406 QEMU_IFLA_LINKMODE,
407 QEMU_IFLA_LINKINFO,
408 QEMU_IFLA_NET_NS_PID,
409 QEMU_IFLA_IFALIAS,
410 QEMU_IFLA_NUM_VF,
411 QEMU_IFLA_VFINFO_LIST,
412 QEMU_IFLA_STATS64,
413 QEMU_IFLA_VF_PORTS,
414 QEMU_IFLA_PORT_SELF,
415 QEMU_IFLA_AF_SPEC,
416 QEMU_IFLA_GROUP,
417 QEMU_IFLA_NET_NS_FD,
418 QEMU_IFLA_EXT_MASK,
419 QEMU_IFLA_PROMISCUITY,
420 QEMU_IFLA_NUM_TX_QUEUES,
421 QEMU_IFLA_NUM_RX_QUEUES,
422 QEMU_IFLA_CARRIER,
423 QEMU_IFLA_PHYS_PORT_ID,
424 QEMU_IFLA_CARRIER_CHANGES,
425 QEMU_IFLA_PHYS_SWITCH_ID,
426 QEMU_IFLA_LINK_NETNSID,
427 QEMU_IFLA_PHYS_PORT_NAME,
428 QEMU_IFLA_PROTO_DOWN,
429 QEMU_IFLA_GSO_MAX_SEGS,
430 QEMU_IFLA_GSO_MAX_SIZE,
431 QEMU_IFLA_PAD,
432 QEMU_IFLA_XDP,
433 QEMU___IFLA_MAX
436 enum {
437 QEMU_IFLA_BRPORT_UNSPEC,
438 QEMU_IFLA_BRPORT_STATE,
439 QEMU_IFLA_BRPORT_PRIORITY,
440 QEMU_IFLA_BRPORT_COST,
441 QEMU_IFLA_BRPORT_MODE,
442 QEMU_IFLA_BRPORT_GUARD,
443 QEMU_IFLA_BRPORT_PROTECT,
444 QEMU_IFLA_BRPORT_FAST_LEAVE,
445 QEMU_IFLA_BRPORT_LEARNING,
446 QEMU_IFLA_BRPORT_UNICAST_FLOOD,
447 QEMU_IFLA_BRPORT_PROXYARP,
448 QEMU_IFLA_BRPORT_LEARNING_SYNC,
449 QEMU_IFLA_BRPORT_PROXYARP_WIFI,
450 QEMU_IFLA_BRPORT_ROOT_ID,
451 QEMU_IFLA_BRPORT_BRIDGE_ID,
452 QEMU_IFLA_BRPORT_DESIGNATED_PORT,
453 QEMU_IFLA_BRPORT_DESIGNATED_COST,
454 QEMU_IFLA_BRPORT_ID,
455 QEMU_IFLA_BRPORT_NO,
456 QEMU_IFLA_BRPORT_TOPOLOGY_CHANGE_ACK,
457 QEMU_IFLA_BRPORT_CONFIG_PENDING,
458 QEMU_IFLA_BRPORT_MESSAGE_AGE_TIMER,
459 QEMU_IFLA_BRPORT_FORWARD_DELAY_TIMER,
460 QEMU_IFLA_BRPORT_HOLD_TIMER,
461 QEMU_IFLA_BRPORT_FLUSH,
462 QEMU_IFLA_BRPORT_MULTICAST_ROUTER,
463 QEMU_IFLA_BRPORT_PAD,
464 QEMU___IFLA_BRPORT_MAX
467 enum {
468 QEMU_IFLA_INFO_UNSPEC,
469 QEMU_IFLA_INFO_KIND,
470 QEMU_IFLA_INFO_DATA,
471 QEMU_IFLA_INFO_XSTATS,
472 QEMU_IFLA_INFO_SLAVE_KIND,
473 QEMU_IFLA_INFO_SLAVE_DATA,
474 QEMU___IFLA_INFO_MAX,
477 enum {
478 QEMU_IFLA_INET_UNSPEC,
479 QEMU_IFLA_INET_CONF,
480 QEMU___IFLA_INET_MAX,
483 enum {
484 QEMU_IFLA_INET6_UNSPEC,
485 QEMU_IFLA_INET6_FLAGS,
486 QEMU_IFLA_INET6_CONF,
487 QEMU_IFLA_INET6_STATS,
488 QEMU_IFLA_INET6_MCAST,
489 QEMU_IFLA_INET6_CACHEINFO,
490 QEMU_IFLA_INET6_ICMP6STATS,
491 QEMU_IFLA_INET6_TOKEN,
492 QEMU_IFLA_INET6_ADDR_GEN_MODE,
493 QEMU___IFLA_INET6_MAX
496 typedef abi_long (*TargetFdDataFunc)(void *, size_t);
497 typedef abi_long (*TargetFdAddrFunc)(void *, abi_ulong, socklen_t);
498 typedef struct TargetFdTrans {
499 TargetFdDataFunc host_to_target_data;
500 TargetFdDataFunc target_to_host_data;
501 TargetFdAddrFunc target_to_host_addr;
502 } TargetFdTrans;
504 static TargetFdTrans **target_fd_trans;
506 static unsigned int target_fd_max;
508 static TargetFdDataFunc fd_trans_target_to_host_data(int fd)
510 if (fd >= 0 && fd < target_fd_max && target_fd_trans[fd]) {
511 return target_fd_trans[fd]->target_to_host_data;
513 return NULL;
516 static TargetFdDataFunc fd_trans_host_to_target_data(int fd)
518 if (fd >= 0 && fd < target_fd_max && target_fd_trans[fd]) {
519 return target_fd_trans[fd]->host_to_target_data;
521 return NULL;
524 static TargetFdAddrFunc fd_trans_target_to_host_addr(int fd)
526 if (fd >= 0 && fd < target_fd_max && target_fd_trans[fd]) {
527 return target_fd_trans[fd]->target_to_host_addr;
529 return NULL;
532 static void fd_trans_register(int fd, TargetFdTrans *trans)
534 unsigned int oldmax;
536 if (fd >= target_fd_max) {
537 oldmax = target_fd_max;
538 target_fd_max = ((fd >> 6) + 1) << 6; /* by slice of 64 entries */
539 target_fd_trans = g_renew(TargetFdTrans *,
540 target_fd_trans, target_fd_max);
541 memset((void *)(target_fd_trans + oldmax), 0,
542 (target_fd_max - oldmax) * sizeof(TargetFdTrans *));
544 target_fd_trans[fd] = trans;
547 static void fd_trans_unregister(int fd)
549 if (fd >= 0 && fd < target_fd_max) {
550 target_fd_trans[fd] = NULL;
554 static void fd_trans_dup(int oldfd, int newfd)
556 fd_trans_unregister(newfd);
557 if (oldfd < target_fd_max && target_fd_trans[oldfd]) {
558 fd_trans_register(newfd, target_fd_trans[oldfd]);
562 static int sys_getcwd1(char *buf, size_t size)
564 if (getcwd(buf, size) == NULL) {
565 /* getcwd() sets errno */
566 return (-1);
568 return strlen(buf)+1;
571 #ifdef TARGET_NR_utimensat
572 #if defined(__NR_utimensat)
573 #define __NR_sys_utimensat __NR_utimensat
574 _syscall4(int,sys_utimensat,int,dirfd,const char *,pathname,
575 const struct timespec *,tsp,int,flags)
576 #else
577 static int sys_utimensat(int dirfd, const char *pathname,
578 const struct timespec times[2], int flags)
580 errno = ENOSYS;
581 return -1;
583 #endif
584 #endif /* TARGET_NR_utimensat */
586 #ifdef CONFIG_INOTIFY
587 #include <sys/inotify.h>
589 #if defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init)
590 static int sys_inotify_init(void)
592 return (inotify_init());
594 #endif
595 #if defined(TARGET_NR_inotify_add_watch) && defined(__NR_inotify_add_watch)
596 static int sys_inotify_add_watch(int fd,const char *pathname, int32_t mask)
598 return (inotify_add_watch(fd, pathname, mask));
600 #endif
601 #if defined(TARGET_NR_inotify_rm_watch) && defined(__NR_inotify_rm_watch)
602 static int sys_inotify_rm_watch(int fd, int32_t wd)
604 return (inotify_rm_watch(fd, wd));
606 #endif
607 #ifdef CONFIG_INOTIFY1
608 #if defined(TARGET_NR_inotify_init1) && defined(__NR_inotify_init1)
609 static int sys_inotify_init1(int flags)
611 return (inotify_init1(flags));
613 #endif
614 #endif
615 #else
616 /* Userspace can usually survive runtime without inotify */
617 #undef TARGET_NR_inotify_init
618 #undef TARGET_NR_inotify_init1
619 #undef TARGET_NR_inotify_add_watch
620 #undef TARGET_NR_inotify_rm_watch
621 #endif /* CONFIG_INOTIFY */
623 #if defined(TARGET_NR_prlimit64)
624 #ifndef __NR_prlimit64
625 # define __NR_prlimit64 -1
626 #endif
627 #define __NR_sys_prlimit64 __NR_prlimit64
628 /* The glibc rlimit structure may not be that used by the underlying syscall */
629 struct host_rlimit64 {
630 uint64_t rlim_cur;
631 uint64_t rlim_max;
633 _syscall4(int, sys_prlimit64, pid_t, pid, int, resource,
634 const struct host_rlimit64 *, new_limit,
635 struct host_rlimit64 *, old_limit)
636 #endif
639 #if defined(TARGET_NR_timer_create)
640 /* Maxiumum of 32 active POSIX timers allowed at any one time. */
641 static timer_t g_posix_timers[32] = { 0, } ;
643 static inline int next_free_host_timer(void)
645 int k ;
646 /* FIXME: Does finding the next free slot require a lock? */
647 for (k = 0; k < ARRAY_SIZE(g_posix_timers); k++) {
648 if (g_posix_timers[k] == 0) {
649 g_posix_timers[k] = (timer_t) 1;
650 return k;
653 return -1;
655 #endif
657 /* ARM EABI and MIPS expect 64bit types aligned even on pairs or registers */
658 #ifdef TARGET_ARM
659 static inline int regpairs_aligned(void *cpu_env) {
660 return ((((CPUARMState *)cpu_env)->eabi) == 1) ;
662 #elif defined(TARGET_MIPS) && (TARGET_ABI_BITS == 32)
663 static inline int regpairs_aligned(void *cpu_env) { return 1; }
664 #elif defined(TARGET_PPC) && !defined(TARGET_PPC64)
665 /* SysV AVI for PPC32 expects 64bit parameters to be passed on odd/even pairs
666 * of registers which translates to the same as ARM/MIPS, because we start with
667 * r3 as arg1 */
668 static inline int regpairs_aligned(void *cpu_env) { return 1; }
669 #else
670 static inline int regpairs_aligned(void *cpu_env) { return 0; }
671 #endif
673 #define ERRNO_TABLE_SIZE 1200
675 /* target_to_host_errno_table[] is initialized from
676 * host_to_target_errno_table[] in syscall_init(). */
677 static uint16_t target_to_host_errno_table[ERRNO_TABLE_SIZE] = {
681 * This list is the union of errno values overridden in asm-<arch>/errno.h
682 * minus the errnos that are not actually generic to all archs.
684 static uint16_t host_to_target_errno_table[ERRNO_TABLE_SIZE] = {
685 [EAGAIN] = TARGET_EAGAIN,
686 [EIDRM] = TARGET_EIDRM,
687 [ECHRNG] = TARGET_ECHRNG,
688 [EL2NSYNC] = TARGET_EL2NSYNC,
689 [EL3HLT] = TARGET_EL3HLT,
690 [EL3RST] = TARGET_EL3RST,
691 [ELNRNG] = TARGET_ELNRNG,
692 [EUNATCH] = TARGET_EUNATCH,
693 [ENOCSI] = TARGET_ENOCSI,
694 [EL2HLT] = TARGET_EL2HLT,
695 [EDEADLK] = TARGET_EDEADLK,
696 [ENOLCK] = TARGET_ENOLCK,
697 [EBADE] = TARGET_EBADE,
698 [EBADR] = TARGET_EBADR,
699 [EXFULL] = TARGET_EXFULL,
700 [ENOANO] = TARGET_ENOANO,
701 [EBADRQC] = TARGET_EBADRQC,
702 [EBADSLT] = TARGET_EBADSLT,
703 [EBFONT] = TARGET_EBFONT,
704 [ENOSTR] = TARGET_ENOSTR,
705 [ENODATA] = TARGET_ENODATA,
706 [ETIME] = TARGET_ETIME,
707 [ENOSR] = TARGET_ENOSR,
708 [ENONET] = TARGET_ENONET,
709 [ENOPKG] = TARGET_ENOPKG,
710 [EREMOTE] = TARGET_EREMOTE,
711 [ENOLINK] = TARGET_ENOLINK,
712 [EADV] = TARGET_EADV,
713 [ESRMNT] = TARGET_ESRMNT,
714 [ECOMM] = TARGET_ECOMM,
715 [EPROTO] = TARGET_EPROTO,
716 [EDOTDOT] = TARGET_EDOTDOT,
717 [EMULTIHOP] = TARGET_EMULTIHOP,
718 [EBADMSG] = TARGET_EBADMSG,
719 [ENAMETOOLONG] = TARGET_ENAMETOOLONG,
720 [EOVERFLOW] = TARGET_EOVERFLOW,
721 [ENOTUNIQ] = TARGET_ENOTUNIQ,
722 [EBADFD] = TARGET_EBADFD,
723 [EREMCHG] = TARGET_EREMCHG,
724 [ELIBACC] = TARGET_ELIBACC,
725 [ELIBBAD] = TARGET_ELIBBAD,
726 [ELIBSCN] = TARGET_ELIBSCN,
727 [ELIBMAX] = TARGET_ELIBMAX,
728 [ELIBEXEC] = TARGET_ELIBEXEC,
729 [EILSEQ] = TARGET_EILSEQ,
730 [ENOSYS] = TARGET_ENOSYS,
731 [ELOOP] = TARGET_ELOOP,
732 [ERESTART] = TARGET_ERESTART,
733 [ESTRPIPE] = TARGET_ESTRPIPE,
734 [ENOTEMPTY] = TARGET_ENOTEMPTY,
735 [EUSERS] = TARGET_EUSERS,
736 [ENOTSOCK] = TARGET_ENOTSOCK,
737 [EDESTADDRREQ] = TARGET_EDESTADDRREQ,
738 [EMSGSIZE] = TARGET_EMSGSIZE,
739 [EPROTOTYPE] = TARGET_EPROTOTYPE,
740 [ENOPROTOOPT] = TARGET_ENOPROTOOPT,
741 [EPROTONOSUPPORT] = TARGET_EPROTONOSUPPORT,
742 [ESOCKTNOSUPPORT] = TARGET_ESOCKTNOSUPPORT,
743 [EOPNOTSUPP] = TARGET_EOPNOTSUPP,
744 [EPFNOSUPPORT] = TARGET_EPFNOSUPPORT,
745 [EAFNOSUPPORT] = TARGET_EAFNOSUPPORT,
746 [EADDRINUSE] = TARGET_EADDRINUSE,
747 [EADDRNOTAVAIL] = TARGET_EADDRNOTAVAIL,
748 [ENETDOWN] = TARGET_ENETDOWN,
749 [ENETUNREACH] = TARGET_ENETUNREACH,
750 [ENETRESET] = TARGET_ENETRESET,
751 [ECONNABORTED] = TARGET_ECONNABORTED,
752 [ECONNRESET] = TARGET_ECONNRESET,
753 [ENOBUFS] = TARGET_ENOBUFS,
754 [EISCONN] = TARGET_EISCONN,
755 [ENOTCONN] = TARGET_ENOTCONN,
756 [EUCLEAN] = TARGET_EUCLEAN,
757 [ENOTNAM] = TARGET_ENOTNAM,
758 [ENAVAIL] = TARGET_ENAVAIL,
759 [EISNAM] = TARGET_EISNAM,
760 [EREMOTEIO] = TARGET_EREMOTEIO,
761 [EDQUOT] = TARGET_EDQUOT,
762 [ESHUTDOWN] = TARGET_ESHUTDOWN,
763 [ETOOMANYREFS] = TARGET_ETOOMANYREFS,
764 [ETIMEDOUT] = TARGET_ETIMEDOUT,
765 [ECONNREFUSED] = TARGET_ECONNREFUSED,
766 [EHOSTDOWN] = TARGET_EHOSTDOWN,
767 [EHOSTUNREACH] = TARGET_EHOSTUNREACH,
768 [EALREADY] = TARGET_EALREADY,
769 [EINPROGRESS] = TARGET_EINPROGRESS,
770 [ESTALE] = TARGET_ESTALE,
771 [ECANCELED] = TARGET_ECANCELED,
772 [ENOMEDIUM] = TARGET_ENOMEDIUM,
773 [EMEDIUMTYPE] = TARGET_EMEDIUMTYPE,
774 #ifdef ENOKEY
775 [ENOKEY] = TARGET_ENOKEY,
776 #endif
777 #ifdef EKEYEXPIRED
778 [EKEYEXPIRED] = TARGET_EKEYEXPIRED,
779 #endif
780 #ifdef EKEYREVOKED
781 [EKEYREVOKED] = TARGET_EKEYREVOKED,
782 #endif
783 #ifdef EKEYREJECTED
784 [EKEYREJECTED] = TARGET_EKEYREJECTED,
785 #endif
786 #ifdef EOWNERDEAD
787 [EOWNERDEAD] = TARGET_EOWNERDEAD,
788 #endif
789 #ifdef ENOTRECOVERABLE
790 [ENOTRECOVERABLE] = TARGET_ENOTRECOVERABLE,
791 #endif
794 static inline int host_to_target_errno(int err)
796 if (err >= 0 && err < ERRNO_TABLE_SIZE &&
797 host_to_target_errno_table[err]) {
798 return host_to_target_errno_table[err];
800 return err;
803 static inline int target_to_host_errno(int err)
805 if (err >= 0 && err < ERRNO_TABLE_SIZE &&
806 target_to_host_errno_table[err]) {
807 return target_to_host_errno_table[err];
809 return err;
812 static inline abi_long get_errno(abi_long ret)
814 if (ret == -1)
815 return -host_to_target_errno(errno);
816 else
817 return ret;
820 static inline int is_error(abi_long ret)
822 return (abi_ulong)ret >= (abi_ulong)(-4096);
825 const char *target_strerror(int err)
827 if (err == TARGET_ERESTARTSYS) {
828 return "To be restarted";
830 if (err == TARGET_QEMU_ESIGRETURN) {
831 return "Successful exit from sigreturn";
834 if ((err >= ERRNO_TABLE_SIZE) || (err < 0)) {
835 return NULL;
837 return strerror(target_to_host_errno(err));
840 #define safe_syscall0(type, name) \
841 static type safe_##name(void) \
843 return safe_syscall(__NR_##name); \
846 #define safe_syscall1(type, name, type1, arg1) \
847 static type safe_##name(type1 arg1) \
849 return safe_syscall(__NR_##name, arg1); \
852 #define safe_syscall2(type, name, type1, arg1, type2, arg2) \
853 static type safe_##name(type1 arg1, type2 arg2) \
855 return safe_syscall(__NR_##name, arg1, arg2); \
858 #define safe_syscall3(type, name, type1, arg1, type2, arg2, type3, arg3) \
859 static type safe_##name(type1 arg1, type2 arg2, type3 arg3) \
861 return safe_syscall(__NR_##name, arg1, arg2, arg3); \
864 #define safe_syscall4(type, name, type1, arg1, type2, arg2, type3, arg3, \
865 type4, arg4) \
866 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4) \
868 return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4); \
871 #define safe_syscall5(type, name, type1, arg1, type2, arg2, type3, arg3, \
872 type4, arg4, type5, arg5) \
873 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, \
874 type5 arg5) \
876 return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5); \
879 #define safe_syscall6(type, name, type1, arg1, type2, arg2, type3, arg3, \
880 type4, arg4, type5, arg5, type6, arg6) \
881 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, \
882 type5 arg5, type6 arg6) \
884 return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6); \
887 safe_syscall3(ssize_t, read, int, fd, void *, buff, size_t, count)
888 safe_syscall3(ssize_t, write, int, fd, const void *, buff, size_t, count)
889 safe_syscall4(int, openat, int, dirfd, const char *, pathname, \
890 int, flags, mode_t, mode)
891 safe_syscall4(pid_t, wait4, pid_t, pid, int *, status, int, options, \
892 struct rusage *, rusage)
893 safe_syscall5(int, waitid, idtype_t, idtype, id_t, id, siginfo_t *, infop, \
894 int, options, struct rusage *, rusage)
895 safe_syscall3(int, execve, const char *, filename, char **, argv, char **, envp)
896 safe_syscall6(int, pselect6, int, nfds, fd_set *, readfds, fd_set *, writefds, \
897 fd_set *, exceptfds, struct timespec *, timeout, void *, sig)
898 safe_syscall5(int, ppoll, struct pollfd *, ufds, unsigned int, nfds,
899 struct timespec *, tsp, const sigset_t *, sigmask,
900 size_t, sigsetsize)
901 safe_syscall6(int, epoll_pwait, int, epfd, struct epoll_event *, events,
902 int, maxevents, int, timeout, const sigset_t *, sigmask,
903 size_t, sigsetsize)
904 safe_syscall6(int,futex,int *,uaddr,int,op,int,val, \
905 const struct timespec *,timeout,int *,uaddr2,int,val3)
906 safe_syscall2(int, rt_sigsuspend, sigset_t *, newset, size_t, sigsetsize)
907 safe_syscall2(int, kill, pid_t, pid, int, sig)
908 safe_syscall2(int, tkill, int, tid, int, sig)
909 safe_syscall3(int, tgkill, int, tgid, int, pid, int, sig)
910 safe_syscall3(ssize_t, readv, int, fd, const struct iovec *, iov, int, iovcnt)
911 safe_syscall3(ssize_t, writev, int, fd, const struct iovec *, iov, int, iovcnt)
912 safe_syscall3(int, connect, int, fd, const struct sockaddr *, addr,
913 socklen_t, addrlen)
914 safe_syscall6(ssize_t, sendto, int, fd, const void *, buf, size_t, len,
915 int, flags, const struct sockaddr *, addr, socklen_t, addrlen)
916 safe_syscall6(ssize_t, recvfrom, int, fd, void *, buf, size_t, len,
917 int, flags, struct sockaddr *, addr, socklen_t *, addrlen)
918 safe_syscall3(ssize_t, sendmsg, int, fd, const struct msghdr *, msg, int, flags)
919 safe_syscall3(ssize_t, recvmsg, int, fd, struct msghdr *, msg, int, flags)
920 safe_syscall2(int, flock, int, fd, int, operation)
921 safe_syscall4(int, rt_sigtimedwait, const sigset_t *, these, siginfo_t *, uinfo,
922 const struct timespec *, uts, size_t, sigsetsize)
923 safe_syscall4(int, accept4, int, fd, struct sockaddr *, addr, socklen_t *, len,
924 int, flags)
925 safe_syscall2(int, nanosleep, const struct timespec *, req,
926 struct timespec *, rem)
927 #ifdef TARGET_NR_clock_nanosleep
928 safe_syscall4(int, clock_nanosleep, const clockid_t, clock, int, flags,
929 const struct timespec *, req, struct timespec *, rem)
930 #endif
931 #ifdef __NR_msgsnd
932 safe_syscall4(int, msgsnd, int, msgid, const void *, msgp, size_t, sz,
933 int, flags)
934 safe_syscall5(int, msgrcv, int, msgid, void *, msgp, size_t, sz,
935 long, msgtype, int, flags)
936 safe_syscall4(int, semtimedop, int, semid, struct sembuf *, tsops,
937 unsigned, nsops, const struct timespec *, timeout)
938 #else
939 /* This host kernel architecture uses a single ipc syscall; fake up
940 * wrappers for the sub-operations to hide this implementation detail.
941 * Annoyingly we can't include linux/ipc.h to get the constant definitions
942 * for the call parameter because some structs in there conflict with the
943 * sys/ipc.h ones. So we just define them here, and rely on them being
944 * the same for all host architectures.
946 #define Q_SEMTIMEDOP 4
947 #define Q_MSGSND 11
948 #define Q_MSGRCV 12
949 #define Q_IPCCALL(VERSION, OP) ((VERSION) << 16 | (OP))
951 safe_syscall6(int, ipc, int, call, long, first, long, second, long, third,
952 void *, ptr, long, fifth)
953 static int safe_msgsnd(int msgid, const void *msgp, size_t sz, int flags)
955 return safe_ipc(Q_IPCCALL(0, Q_MSGSND), msgid, sz, flags, (void *)msgp, 0);
957 static int safe_msgrcv(int msgid, void *msgp, size_t sz, long type, int flags)
959 return safe_ipc(Q_IPCCALL(1, Q_MSGRCV), msgid, sz, flags, msgp, type);
961 static int safe_semtimedop(int semid, struct sembuf *tsops, unsigned nsops,
962 const struct timespec *timeout)
964 return safe_ipc(Q_IPCCALL(0, Q_SEMTIMEDOP), semid, nsops, 0, tsops,
965 (long)timeout);
967 #endif
968 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
969 safe_syscall5(int, mq_timedsend, int, mqdes, const char *, msg_ptr,
970 size_t, len, unsigned, prio, const struct timespec *, timeout)
971 safe_syscall5(int, mq_timedreceive, int, mqdes, char *, msg_ptr,
972 size_t, len, unsigned *, prio, const struct timespec *, timeout)
973 #endif
974 /* We do ioctl like this rather than via safe_syscall3 to preserve the
975 * "third argument might be integer or pointer or not present" behaviour of
976 * the libc function.
978 #define safe_ioctl(...) safe_syscall(__NR_ioctl, __VA_ARGS__)
979 /* Similarly for fcntl. Note that callers must always:
980 * pass the F_GETLK64 etc constants rather than the unsuffixed F_GETLK
981 * use the flock64 struct rather than unsuffixed flock
982 * This will then work and use a 64-bit offset for both 32-bit and 64-bit hosts.
984 #ifdef __NR_fcntl64
985 #define safe_fcntl(...) safe_syscall(__NR_fcntl64, __VA_ARGS__)
986 #else
987 #define safe_fcntl(...) safe_syscall(__NR_fcntl, __VA_ARGS__)
988 #endif
990 static inline int host_to_target_sock_type(int host_type)
992 int target_type;
994 switch (host_type & 0xf /* SOCK_TYPE_MASK */) {
995 case SOCK_DGRAM:
996 target_type = TARGET_SOCK_DGRAM;
997 break;
998 case SOCK_STREAM:
999 target_type = TARGET_SOCK_STREAM;
1000 break;
1001 default:
1002 target_type = host_type & 0xf /* SOCK_TYPE_MASK */;
1003 break;
1006 #if defined(SOCK_CLOEXEC)
1007 if (host_type & SOCK_CLOEXEC) {
1008 target_type |= TARGET_SOCK_CLOEXEC;
1010 #endif
1012 #if defined(SOCK_NONBLOCK)
1013 if (host_type & SOCK_NONBLOCK) {
1014 target_type |= TARGET_SOCK_NONBLOCK;
1016 #endif
1018 return target_type;
1021 static abi_ulong target_brk;
1022 static abi_ulong target_original_brk;
1023 static abi_ulong brk_page;
1025 void target_set_brk(abi_ulong new_brk)
1027 target_original_brk = target_brk = HOST_PAGE_ALIGN(new_brk);
1028 brk_page = HOST_PAGE_ALIGN(target_brk);
1031 //#define DEBUGF_BRK(message, args...) do { fprintf(stderr, (message), ## args); } while (0)
1032 #define DEBUGF_BRK(message, args...)
1034 /* do_brk() must return target values and target errnos. */
1035 abi_long do_brk(abi_ulong new_brk)
1037 abi_long mapped_addr;
1038 abi_ulong new_alloc_size;
1040 DEBUGF_BRK("do_brk(" TARGET_ABI_FMT_lx ") -> ", new_brk);
1042 if (!new_brk) {
1043 DEBUGF_BRK(TARGET_ABI_FMT_lx " (!new_brk)\n", target_brk);
1044 return target_brk;
1046 if (new_brk < target_original_brk) {
1047 DEBUGF_BRK(TARGET_ABI_FMT_lx " (new_brk < target_original_brk)\n",
1048 target_brk);
1049 return target_brk;
1052 /* If the new brk is less than the highest page reserved to the
1053 * target heap allocation, set it and we're almost done... */
1054 if (new_brk <= brk_page) {
1055 /* Heap contents are initialized to zero, as for anonymous
1056 * mapped pages. */
1057 if (new_brk > target_brk) {
1058 memset(g2h(target_brk), 0, new_brk - target_brk);
1060 target_brk = new_brk;
1061 DEBUGF_BRK(TARGET_ABI_FMT_lx " (new_brk <= brk_page)\n", target_brk);
1062 return target_brk;
1065 /* We need to allocate more memory after the brk... Note that
1066 * we don't use MAP_FIXED because that will map over the top of
1067 * any existing mapping (like the one with the host libc or qemu
1068 * itself); instead we treat "mapped but at wrong address" as
1069 * a failure and unmap again.
1071 new_alloc_size = HOST_PAGE_ALIGN(new_brk - brk_page);
1072 mapped_addr = get_errno(target_mmap(brk_page, new_alloc_size,
1073 PROT_READ|PROT_WRITE,
1074 MAP_ANON|MAP_PRIVATE, 0, 0));
1076 if (mapped_addr == brk_page) {
1077 /* Heap contents are initialized to zero, as for anonymous
1078 * mapped pages. Technically the new pages are already
1079 * initialized to zero since they *are* anonymous mapped
1080 * pages, however we have to take care with the contents that
1081 * come from the remaining part of the previous page: it may
1082 * contains garbage data due to a previous heap usage (grown
1083 * then shrunken). */
1084 memset(g2h(target_brk), 0, brk_page - target_brk);
1086 target_brk = new_brk;
1087 brk_page = HOST_PAGE_ALIGN(target_brk);
1088 DEBUGF_BRK(TARGET_ABI_FMT_lx " (mapped_addr == brk_page)\n",
1089 target_brk);
1090 return target_brk;
1091 } else if (mapped_addr != -1) {
1092 /* Mapped but at wrong address, meaning there wasn't actually
1093 * enough space for this brk.
1095 target_munmap(mapped_addr, new_alloc_size);
1096 mapped_addr = -1;
1097 DEBUGF_BRK(TARGET_ABI_FMT_lx " (mapped_addr != -1)\n", target_brk);
1099 else {
1100 DEBUGF_BRK(TARGET_ABI_FMT_lx " (otherwise)\n", target_brk);
1103 #if defined(TARGET_ALPHA)
1104 /* We (partially) emulate OSF/1 on Alpha, which requires we
1105 return a proper errno, not an unchanged brk value. */
1106 return -TARGET_ENOMEM;
1107 #endif
1108 /* For everything else, return the previous break. */
1109 return target_brk;
1112 static inline abi_long copy_from_user_fdset(fd_set *fds,
1113 abi_ulong target_fds_addr,
1114 int n)
1116 int i, nw, j, k;
1117 abi_ulong b, *target_fds;
1119 nw = DIV_ROUND_UP(n, TARGET_ABI_BITS);
1120 if (!(target_fds = lock_user(VERIFY_READ,
1121 target_fds_addr,
1122 sizeof(abi_ulong) * nw,
1123 1)))
1124 return -TARGET_EFAULT;
1126 FD_ZERO(fds);
1127 k = 0;
1128 for (i = 0; i < nw; i++) {
1129 /* grab the abi_ulong */
1130 __get_user(b, &target_fds[i]);
1131 for (j = 0; j < TARGET_ABI_BITS; j++) {
1132 /* check the bit inside the abi_ulong */
1133 if ((b >> j) & 1)
1134 FD_SET(k, fds);
1135 k++;
1139 unlock_user(target_fds, target_fds_addr, 0);
1141 return 0;
1144 static inline abi_ulong copy_from_user_fdset_ptr(fd_set *fds, fd_set **fds_ptr,
1145 abi_ulong target_fds_addr,
1146 int n)
1148 if (target_fds_addr) {
1149 if (copy_from_user_fdset(fds, target_fds_addr, n))
1150 return -TARGET_EFAULT;
1151 *fds_ptr = fds;
1152 } else {
1153 *fds_ptr = NULL;
1155 return 0;
1158 static inline abi_long copy_to_user_fdset(abi_ulong target_fds_addr,
1159 const fd_set *fds,
1160 int n)
1162 int i, nw, j, k;
1163 abi_long v;
1164 abi_ulong *target_fds;
1166 nw = DIV_ROUND_UP(n, TARGET_ABI_BITS);
1167 if (!(target_fds = lock_user(VERIFY_WRITE,
1168 target_fds_addr,
1169 sizeof(abi_ulong) * nw,
1170 0)))
1171 return -TARGET_EFAULT;
1173 k = 0;
1174 for (i = 0; i < nw; i++) {
1175 v = 0;
1176 for (j = 0; j < TARGET_ABI_BITS; j++) {
1177 v |= ((abi_ulong)(FD_ISSET(k, fds) != 0) << j);
1178 k++;
1180 __put_user(v, &target_fds[i]);
1183 unlock_user(target_fds, target_fds_addr, sizeof(abi_ulong) * nw);
1185 return 0;
1188 #if defined(__alpha__)
1189 #define HOST_HZ 1024
1190 #else
1191 #define HOST_HZ 100
1192 #endif
1194 static inline abi_long host_to_target_clock_t(long ticks)
1196 #if HOST_HZ == TARGET_HZ
1197 return ticks;
1198 #else
1199 return ((int64_t)ticks * TARGET_HZ) / HOST_HZ;
1200 #endif
1203 static inline abi_long host_to_target_rusage(abi_ulong target_addr,
1204 const struct rusage *rusage)
1206 struct target_rusage *target_rusage;
1208 if (!lock_user_struct(VERIFY_WRITE, target_rusage, target_addr, 0))
1209 return -TARGET_EFAULT;
1210 target_rusage->ru_utime.tv_sec = tswapal(rusage->ru_utime.tv_sec);
1211 target_rusage->ru_utime.tv_usec = tswapal(rusage->ru_utime.tv_usec);
1212 target_rusage->ru_stime.tv_sec = tswapal(rusage->ru_stime.tv_sec);
1213 target_rusage->ru_stime.tv_usec = tswapal(rusage->ru_stime.tv_usec);
1214 target_rusage->ru_maxrss = tswapal(rusage->ru_maxrss);
1215 target_rusage->ru_ixrss = tswapal(rusage->ru_ixrss);
1216 target_rusage->ru_idrss = tswapal(rusage->ru_idrss);
1217 target_rusage->ru_isrss = tswapal(rusage->ru_isrss);
1218 target_rusage->ru_minflt = tswapal(rusage->ru_minflt);
1219 target_rusage->ru_majflt = tswapal(rusage->ru_majflt);
1220 target_rusage->ru_nswap = tswapal(rusage->ru_nswap);
1221 target_rusage->ru_inblock = tswapal(rusage->ru_inblock);
1222 target_rusage->ru_oublock = tswapal(rusage->ru_oublock);
1223 target_rusage->ru_msgsnd = tswapal(rusage->ru_msgsnd);
1224 target_rusage->ru_msgrcv = tswapal(rusage->ru_msgrcv);
1225 target_rusage->ru_nsignals = tswapal(rusage->ru_nsignals);
1226 target_rusage->ru_nvcsw = tswapal(rusage->ru_nvcsw);
1227 target_rusage->ru_nivcsw = tswapal(rusage->ru_nivcsw);
1228 unlock_user_struct(target_rusage, target_addr, 1);
1230 return 0;
1233 static inline rlim_t target_to_host_rlim(abi_ulong target_rlim)
1235 abi_ulong target_rlim_swap;
1236 rlim_t result;
1238 target_rlim_swap = tswapal(target_rlim);
1239 if (target_rlim_swap == TARGET_RLIM_INFINITY)
1240 return RLIM_INFINITY;
1242 result = target_rlim_swap;
1243 if (target_rlim_swap != (rlim_t)result)
1244 return RLIM_INFINITY;
1246 return result;
1249 static inline abi_ulong host_to_target_rlim(rlim_t rlim)
1251 abi_ulong target_rlim_swap;
1252 abi_ulong result;
1254 if (rlim == RLIM_INFINITY || rlim != (abi_long)rlim)
1255 target_rlim_swap = TARGET_RLIM_INFINITY;
1256 else
1257 target_rlim_swap = rlim;
1258 result = tswapal(target_rlim_swap);
1260 return result;
1263 static inline int target_to_host_resource(int code)
1265 switch (code) {
1266 case TARGET_RLIMIT_AS:
1267 return RLIMIT_AS;
1268 case TARGET_RLIMIT_CORE:
1269 return RLIMIT_CORE;
1270 case TARGET_RLIMIT_CPU:
1271 return RLIMIT_CPU;
1272 case TARGET_RLIMIT_DATA:
1273 return RLIMIT_DATA;
1274 case TARGET_RLIMIT_FSIZE:
1275 return RLIMIT_FSIZE;
1276 case TARGET_RLIMIT_LOCKS:
1277 return RLIMIT_LOCKS;
1278 case TARGET_RLIMIT_MEMLOCK:
1279 return RLIMIT_MEMLOCK;
1280 case TARGET_RLIMIT_MSGQUEUE:
1281 return RLIMIT_MSGQUEUE;
1282 case TARGET_RLIMIT_NICE:
1283 return RLIMIT_NICE;
1284 case TARGET_RLIMIT_NOFILE:
1285 return RLIMIT_NOFILE;
1286 case TARGET_RLIMIT_NPROC:
1287 return RLIMIT_NPROC;
1288 case TARGET_RLIMIT_RSS:
1289 return RLIMIT_RSS;
1290 case TARGET_RLIMIT_RTPRIO:
1291 return RLIMIT_RTPRIO;
1292 case TARGET_RLIMIT_SIGPENDING:
1293 return RLIMIT_SIGPENDING;
1294 case TARGET_RLIMIT_STACK:
1295 return RLIMIT_STACK;
1296 default:
1297 return code;
1301 static inline abi_long copy_from_user_timeval(struct timeval *tv,
1302 abi_ulong target_tv_addr)
1304 struct target_timeval *target_tv;
1306 if (!lock_user_struct(VERIFY_READ, target_tv, target_tv_addr, 1))
1307 return -TARGET_EFAULT;
1309 __get_user(tv->tv_sec, &target_tv->tv_sec);
1310 __get_user(tv->tv_usec, &target_tv->tv_usec);
1312 unlock_user_struct(target_tv, target_tv_addr, 0);
1314 return 0;
1317 static inline abi_long copy_to_user_timeval(abi_ulong target_tv_addr,
1318 const struct timeval *tv)
1320 struct target_timeval *target_tv;
1322 if (!lock_user_struct(VERIFY_WRITE, target_tv, target_tv_addr, 0))
1323 return -TARGET_EFAULT;
1325 __put_user(tv->tv_sec, &target_tv->tv_sec);
1326 __put_user(tv->tv_usec, &target_tv->tv_usec);
1328 unlock_user_struct(target_tv, target_tv_addr, 1);
1330 return 0;
1333 static inline abi_long copy_from_user_timezone(struct timezone *tz,
1334 abi_ulong target_tz_addr)
1336 struct target_timezone *target_tz;
1338 if (!lock_user_struct(VERIFY_READ, target_tz, target_tz_addr, 1)) {
1339 return -TARGET_EFAULT;
1342 __get_user(tz->tz_minuteswest, &target_tz->tz_minuteswest);
1343 __get_user(tz->tz_dsttime, &target_tz->tz_dsttime);
1345 unlock_user_struct(target_tz, target_tz_addr, 0);
1347 return 0;
1350 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
1351 #include <mqueue.h>
1353 static inline abi_long copy_from_user_mq_attr(struct mq_attr *attr,
1354 abi_ulong target_mq_attr_addr)
1356 struct target_mq_attr *target_mq_attr;
1358 if (!lock_user_struct(VERIFY_READ, target_mq_attr,
1359 target_mq_attr_addr, 1))
1360 return -TARGET_EFAULT;
1362 __get_user(attr->mq_flags, &target_mq_attr->mq_flags);
1363 __get_user(attr->mq_maxmsg, &target_mq_attr->mq_maxmsg);
1364 __get_user(attr->mq_msgsize, &target_mq_attr->mq_msgsize);
1365 __get_user(attr->mq_curmsgs, &target_mq_attr->mq_curmsgs);
1367 unlock_user_struct(target_mq_attr, target_mq_attr_addr, 0);
1369 return 0;
1372 static inline abi_long copy_to_user_mq_attr(abi_ulong target_mq_attr_addr,
1373 const struct mq_attr *attr)
1375 struct target_mq_attr *target_mq_attr;
1377 if (!lock_user_struct(VERIFY_WRITE, target_mq_attr,
1378 target_mq_attr_addr, 0))
1379 return -TARGET_EFAULT;
1381 __put_user(attr->mq_flags, &target_mq_attr->mq_flags);
1382 __put_user(attr->mq_maxmsg, &target_mq_attr->mq_maxmsg);
1383 __put_user(attr->mq_msgsize, &target_mq_attr->mq_msgsize);
1384 __put_user(attr->mq_curmsgs, &target_mq_attr->mq_curmsgs);
1386 unlock_user_struct(target_mq_attr, target_mq_attr_addr, 1);
1388 return 0;
1390 #endif
1392 #if defined(TARGET_NR_select) || defined(TARGET_NR__newselect)
1393 /* do_select() must return target values and target errnos. */
1394 static abi_long do_select(int n,
1395 abi_ulong rfd_addr, abi_ulong wfd_addr,
1396 abi_ulong efd_addr, abi_ulong target_tv_addr)
1398 fd_set rfds, wfds, efds;
1399 fd_set *rfds_ptr, *wfds_ptr, *efds_ptr;
1400 struct timeval tv;
1401 struct timespec ts, *ts_ptr;
1402 abi_long ret;
1404 ret = copy_from_user_fdset_ptr(&rfds, &rfds_ptr, rfd_addr, n);
1405 if (ret) {
1406 return ret;
1408 ret = copy_from_user_fdset_ptr(&wfds, &wfds_ptr, wfd_addr, n);
1409 if (ret) {
1410 return ret;
1412 ret = copy_from_user_fdset_ptr(&efds, &efds_ptr, efd_addr, n);
1413 if (ret) {
1414 return ret;
1417 if (target_tv_addr) {
1418 if (copy_from_user_timeval(&tv, target_tv_addr))
1419 return -TARGET_EFAULT;
1420 ts.tv_sec = tv.tv_sec;
1421 ts.tv_nsec = tv.tv_usec * 1000;
1422 ts_ptr = &ts;
1423 } else {
1424 ts_ptr = NULL;
1427 ret = get_errno(safe_pselect6(n, rfds_ptr, wfds_ptr, efds_ptr,
1428 ts_ptr, NULL));
1430 if (!is_error(ret)) {
1431 if (rfd_addr && copy_to_user_fdset(rfd_addr, &rfds, n))
1432 return -TARGET_EFAULT;
1433 if (wfd_addr && copy_to_user_fdset(wfd_addr, &wfds, n))
1434 return -TARGET_EFAULT;
1435 if (efd_addr && copy_to_user_fdset(efd_addr, &efds, n))
1436 return -TARGET_EFAULT;
1438 if (target_tv_addr) {
1439 tv.tv_sec = ts.tv_sec;
1440 tv.tv_usec = ts.tv_nsec / 1000;
1441 if (copy_to_user_timeval(target_tv_addr, &tv)) {
1442 return -TARGET_EFAULT;
1447 return ret;
1450 #if defined(TARGET_WANT_OLD_SYS_SELECT)
1451 static abi_long do_old_select(abi_ulong arg1)
1453 struct target_sel_arg_struct *sel;
1454 abi_ulong inp, outp, exp, tvp;
1455 long nsel;
1457 if (!lock_user_struct(VERIFY_READ, sel, arg1, 1)) {
1458 return -TARGET_EFAULT;
1461 nsel = tswapal(sel->n);
1462 inp = tswapal(sel->inp);
1463 outp = tswapal(sel->outp);
1464 exp = tswapal(sel->exp);
1465 tvp = tswapal(sel->tvp);
1467 unlock_user_struct(sel, arg1, 0);
1469 return do_select(nsel, inp, outp, exp, tvp);
1471 #endif
1472 #endif
1474 static abi_long do_pipe2(int host_pipe[], int flags)
1476 #ifdef CONFIG_PIPE2
1477 return pipe2(host_pipe, flags);
1478 #else
1479 return -ENOSYS;
1480 #endif
1483 static abi_long do_pipe(void *cpu_env, abi_ulong pipedes,
1484 int flags, int is_pipe2)
1486 int host_pipe[2];
1487 abi_long ret;
1488 ret = flags ? do_pipe2(host_pipe, flags) : pipe(host_pipe);
1490 if (is_error(ret))
1491 return get_errno(ret);
1493 /* Several targets have special calling conventions for the original
1494 pipe syscall, but didn't replicate this into the pipe2 syscall. */
1495 if (!is_pipe2) {
1496 #if defined(TARGET_ALPHA)
1497 ((CPUAlphaState *)cpu_env)->ir[IR_A4] = host_pipe[1];
1498 return host_pipe[0];
1499 #elif defined(TARGET_MIPS)
1500 ((CPUMIPSState*)cpu_env)->active_tc.gpr[3] = host_pipe[1];
1501 return host_pipe[0];
1502 #elif defined(TARGET_SH4)
1503 ((CPUSH4State*)cpu_env)->gregs[1] = host_pipe[1];
1504 return host_pipe[0];
1505 #elif defined(TARGET_SPARC)
1506 ((CPUSPARCState*)cpu_env)->regwptr[1] = host_pipe[1];
1507 return host_pipe[0];
1508 #endif
1511 if (put_user_s32(host_pipe[0], pipedes)
1512 || put_user_s32(host_pipe[1], pipedes + sizeof(host_pipe[0])))
1513 return -TARGET_EFAULT;
1514 return get_errno(ret);
1517 static inline abi_long target_to_host_ip_mreq(struct ip_mreqn *mreqn,
1518 abi_ulong target_addr,
1519 socklen_t len)
1521 struct target_ip_mreqn *target_smreqn;
1523 target_smreqn = lock_user(VERIFY_READ, target_addr, len, 1);
1524 if (!target_smreqn)
1525 return -TARGET_EFAULT;
1526 mreqn->imr_multiaddr.s_addr = target_smreqn->imr_multiaddr.s_addr;
1527 mreqn->imr_address.s_addr = target_smreqn->imr_address.s_addr;
1528 if (len == sizeof(struct target_ip_mreqn))
1529 mreqn->imr_ifindex = tswapal(target_smreqn->imr_ifindex);
1530 unlock_user(target_smreqn, target_addr, 0);
1532 return 0;
1535 static inline abi_long target_to_host_sockaddr(int fd, struct sockaddr *addr,
1536 abi_ulong target_addr,
1537 socklen_t len)
1539 const socklen_t unix_maxlen = sizeof (struct sockaddr_un);
1540 sa_family_t sa_family;
1541 struct target_sockaddr *target_saddr;
1543 if (fd_trans_target_to_host_addr(fd)) {
1544 return fd_trans_target_to_host_addr(fd)(addr, target_addr, len);
1547 target_saddr = lock_user(VERIFY_READ, target_addr, len, 1);
1548 if (!target_saddr)
1549 return -TARGET_EFAULT;
1551 sa_family = tswap16(target_saddr->sa_family);
1553 /* Oops. The caller might send a incomplete sun_path; sun_path
1554 * must be terminated by \0 (see the manual page), but
1555 * unfortunately it is quite common to specify sockaddr_un
1556 * length as "strlen(x->sun_path)" while it should be
1557 * "strlen(...) + 1". We'll fix that here if needed.
1558 * Linux kernel has a similar feature.
1561 if (sa_family == AF_UNIX) {
1562 if (len < unix_maxlen && len > 0) {
1563 char *cp = (char*)target_saddr;
1565 if ( cp[len-1] && !cp[len] )
1566 len++;
1568 if (len > unix_maxlen)
1569 len = unix_maxlen;
1572 memcpy(addr, target_saddr, len);
1573 addr->sa_family = sa_family;
1574 if (sa_family == AF_NETLINK) {
1575 struct sockaddr_nl *nladdr;
1577 nladdr = (struct sockaddr_nl *)addr;
1578 nladdr->nl_pid = tswap32(nladdr->nl_pid);
1579 nladdr->nl_groups = tswap32(nladdr->nl_groups);
1580 } else if (sa_family == AF_PACKET) {
1581 struct target_sockaddr_ll *lladdr;
1583 lladdr = (struct target_sockaddr_ll *)addr;
1584 lladdr->sll_ifindex = tswap32(lladdr->sll_ifindex);
1585 lladdr->sll_hatype = tswap16(lladdr->sll_hatype);
1587 unlock_user(target_saddr, target_addr, 0);
1589 return 0;
1592 static inline abi_long host_to_target_sockaddr(abi_ulong target_addr,
1593 struct sockaddr *addr,
1594 socklen_t len)
1596 struct target_sockaddr *target_saddr;
1598 if (len == 0) {
1599 return 0;
1602 target_saddr = lock_user(VERIFY_WRITE, target_addr, len, 0);
1603 if (!target_saddr)
1604 return -TARGET_EFAULT;
1605 memcpy(target_saddr, addr, len);
1606 if (len >= offsetof(struct target_sockaddr, sa_family) +
1607 sizeof(target_saddr->sa_family)) {
1608 target_saddr->sa_family = tswap16(addr->sa_family);
1610 if (addr->sa_family == AF_NETLINK && len >= sizeof(struct sockaddr_nl)) {
1611 struct sockaddr_nl *target_nl = (struct sockaddr_nl *)target_saddr;
1612 target_nl->nl_pid = tswap32(target_nl->nl_pid);
1613 target_nl->nl_groups = tswap32(target_nl->nl_groups);
1614 } else if (addr->sa_family == AF_PACKET) {
1615 struct sockaddr_ll *target_ll = (struct sockaddr_ll *)target_saddr;
1616 target_ll->sll_ifindex = tswap32(target_ll->sll_ifindex);
1617 target_ll->sll_hatype = tswap16(target_ll->sll_hatype);
1619 unlock_user(target_saddr, target_addr, len);
1621 return 0;
1624 static inline abi_long target_to_host_cmsg(struct msghdr *msgh,
1625 struct target_msghdr *target_msgh)
1627 struct cmsghdr *cmsg = CMSG_FIRSTHDR(msgh);
1628 abi_long msg_controllen;
1629 abi_ulong target_cmsg_addr;
1630 struct target_cmsghdr *target_cmsg, *target_cmsg_start;
1631 socklen_t space = 0;
1633 msg_controllen = tswapal(target_msgh->msg_controllen);
1634 if (msg_controllen < sizeof (struct target_cmsghdr))
1635 goto the_end;
1636 target_cmsg_addr = tswapal(target_msgh->msg_control);
1637 target_cmsg = lock_user(VERIFY_READ, target_cmsg_addr, msg_controllen, 1);
1638 target_cmsg_start = target_cmsg;
1639 if (!target_cmsg)
1640 return -TARGET_EFAULT;
1642 while (cmsg && target_cmsg) {
1643 void *data = CMSG_DATA(cmsg);
1644 void *target_data = TARGET_CMSG_DATA(target_cmsg);
1646 int len = tswapal(target_cmsg->cmsg_len)
1647 - TARGET_CMSG_ALIGN(sizeof (struct target_cmsghdr));
1649 space += CMSG_SPACE(len);
1650 if (space > msgh->msg_controllen) {
1651 space -= CMSG_SPACE(len);
1652 /* This is a QEMU bug, since we allocated the payload
1653 * area ourselves (unlike overflow in host-to-target
1654 * conversion, which is just the guest giving us a buffer
1655 * that's too small). It can't happen for the payload types
1656 * we currently support; if it becomes an issue in future
1657 * we would need to improve our allocation strategy to
1658 * something more intelligent than "twice the size of the
1659 * target buffer we're reading from".
1661 gemu_log("Host cmsg overflow\n");
1662 break;
1665 if (tswap32(target_cmsg->cmsg_level) == TARGET_SOL_SOCKET) {
1666 cmsg->cmsg_level = SOL_SOCKET;
1667 } else {
1668 cmsg->cmsg_level = tswap32(target_cmsg->cmsg_level);
1670 cmsg->cmsg_type = tswap32(target_cmsg->cmsg_type);
1671 cmsg->cmsg_len = CMSG_LEN(len);
1673 if (cmsg->cmsg_level == SOL_SOCKET && cmsg->cmsg_type == SCM_RIGHTS) {
1674 int *fd = (int *)data;
1675 int *target_fd = (int *)target_data;
1676 int i, numfds = len / sizeof(int);
1678 for (i = 0; i < numfds; i++) {
1679 __get_user(fd[i], target_fd + i);
1681 } else if (cmsg->cmsg_level == SOL_SOCKET
1682 && cmsg->cmsg_type == SCM_CREDENTIALS) {
1683 struct ucred *cred = (struct ucred *)data;
1684 struct target_ucred *target_cred =
1685 (struct target_ucred *)target_data;
1687 __get_user(cred->pid, &target_cred->pid);
1688 __get_user(cred->uid, &target_cred->uid);
1689 __get_user(cred->gid, &target_cred->gid);
1690 } else {
1691 gemu_log("Unsupported ancillary data: %d/%d\n",
1692 cmsg->cmsg_level, cmsg->cmsg_type);
1693 memcpy(data, target_data, len);
1696 cmsg = CMSG_NXTHDR(msgh, cmsg);
1697 target_cmsg = TARGET_CMSG_NXTHDR(target_msgh, target_cmsg,
1698 target_cmsg_start);
1700 unlock_user(target_cmsg, target_cmsg_addr, 0);
1701 the_end:
1702 msgh->msg_controllen = space;
1703 return 0;
1706 static inline abi_long host_to_target_cmsg(struct target_msghdr *target_msgh,
1707 struct msghdr *msgh)
1709 struct cmsghdr *cmsg = CMSG_FIRSTHDR(msgh);
1710 abi_long msg_controllen;
1711 abi_ulong target_cmsg_addr;
1712 struct target_cmsghdr *target_cmsg, *target_cmsg_start;
1713 socklen_t space = 0;
1715 msg_controllen = tswapal(target_msgh->msg_controllen);
1716 if (msg_controllen < sizeof (struct target_cmsghdr))
1717 goto the_end;
1718 target_cmsg_addr = tswapal(target_msgh->msg_control);
1719 target_cmsg = lock_user(VERIFY_WRITE, target_cmsg_addr, msg_controllen, 0);
1720 target_cmsg_start = target_cmsg;
1721 if (!target_cmsg)
1722 return -TARGET_EFAULT;
1724 while (cmsg && target_cmsg) {
1725 void *data = CMSG_DATA(cmsg);
1726 void *target_data = TARGET_CMSG_DATA(target_cmsg);
1728 int len = cmsg->cmsg_len - CMSG_ALIGN(sizeof (struct cmsghdr));
1729 int tgt_len, tgt_space;
1731 /* We never copy a half-header but may copy half-data;
1732 * this is Linux's behaviour in put_cmsg(). Note that
1733 * truncation here is a guest problem (which we report
1734 * to the guest via the CTRUNC bit), unlike truncation
1735 * in target_to_host_cmsg, which is a QEMU bug.
1737 if (msg_controllen < sizeof(struct cmsghdr)) {
1738 target_msgh->msg_flags |= tswap32(MSG_CTRUNC);
1739 break;
1742 if (cmsg->cmsg_level == SOL_SOCKET) {
1743 target_cmsg->cmsg_level = tswap32(TARGET_SOL_SOCKET);
1744 } else {
1745 target_cmsg->cmsg_level = tswap32(cmsg->cmsg_level);
1747 target_cmsg->cmsg_type = tswap32(cmsg->cmsg_type);
1749 tgt_len = TARGET_CMSG_LEN(len);
1751 /* Payload types which need a different size of payload on
1752 * the target must adjust tgt_len here.
1754 switch (cmsg->cmsg_level) {
1755 case SOL_SOCKET:
1756 switch (cmsg->cmsg_type) {
1757 case SO_TIMESTAMP:
1758 tgt_len = sizeof(struct target_timeval);
1759 break;
1760 default:
1761 break;
1763 default:
1764 break;
1767 if (msg_controllen < tgt_len) {
1768 target_msgh->msg_flags |= tswap32(MSG_CTRUNC);
1769 tgt_len = msg_controllen;
1772 /* We must now copy-and-convert len bytes of payload
1773 * into tgt_len bytes of destination space. Bear in mind
1774 * that in both source and destination we may be dealing
1775 * with a truncated value!
1777 switch (cmsg->cmsg_level) {
1778 case SOL_SOCKET:
1779 switch (cmsg->cmsg_type) {
1780 case SCM_RIGHTS:
1782 int *fd = (int *)data;
1783 int *target_fd = (int *)target_data;
1784 int i, numfds = tgt_len / sizeof(int);
1786 for (i = 0; i < numfds; i++) {
1787 __put_user(fd[i], target_fd + i);
1789 break;
1791 case SO_TIMESTAMP:
1793 struct timeval *tv = (struct timeval *)data;
1794 struct target_timeval *target_tv =
1795 (struct target_timeval *)target_data;
1797 if (len != sizeof(struct timeval) ||
1798 tgt_len != sizeof(struct target_timeval)) {
1799 goto unimplemented;
1802 /* copy struct timeval to target */
1803 __put_user(tv->tv_sec, &target_tv->tv_sec);
1804 __put_user(tv->tv_usec, &target_tv->tv_usec);
1805 break;
1807 case SCM_CREDENTIALS:
1809 struct ucred *cred = (struct ucred *)data;
1810 struct target_ucred *target_cred =
1811 (struct target_ucred *)target_data;
1813 __put_user(cred->pid, &target_cred->pid);
1814 __put_user(cred->uid, &target_cred->uid);
1815 __put_user(cred->gid, &target_cred->gid);
1816 break;
1818 default:
1819 goto unimplemented;
1821 break;
1823 default:
1824 unimplemented:
1825 gemu_log("Unsupported ancillary data: %d/%d\n",
1826 cmsg->cmsg_level, cmsg->cmsg_type);
1827 memcpy(target_data, data, MIN(len, tgt_len));
1828 if (tgt_len > len) {
1829 memset(target_data + len, 0, tgt_len - len);
1833 target_cmsg->cmsg_len = tswapal(tgt_len);
1834 tgt_space = TARGET_CMSG_SPACE(len);
1835 if (msg_controllen < tgt_space) {
1836 tgt_space = msg_controllen;
1838 msg_controllen -= tgt_space;
1839 space += tgt_space;
1840 cmsg = CMSG_NXTHDR(msgh, cmsg);
1841 target_cmsg = TARGET_CMSG_NXTHDR(target_msgh, target_cmsg,
1842 target_cmsg_start);
1844 unlock_user(target_cmsg, target_cmsg_addr, space);
1845 the_end:
1846 target_msgh->msg_controllen = tswapal(space);
1847 return 0;
1850 static void tswap_nlmsghdr(struct nlmsghdr *nlh)
1852 nlh->nlmsg_len = tswap32(nlh->nlmsg_len);
1853 nlh->nlmsg_type = tswap16(nlh->nlmsg_type);
1854 nlh->nlmsg_flags = tswap16(nlh->nlmsg_flags);
1855 nlh->nlmsg_seq = tswap32(nlh->nlmsg_seq);
1856 nlh->nlmsg_pid = tswap32(nlh->nlmsg_pid);
1859 static abi_long host_to_target_for_each_nlmsg(struct nlmsghdr *nlh,
1860 size_t len,
1861 abi_long (*host_to_target_nlmsg)
1862 (struct nlmsghdr *))
1864 uint32_t nlmsg_len;
1865 abi_long ret;
1867 while (len > sizeof(struct nlmsghdr)) {
1869 nlmsg_len = nlh->nlmsg_len;
1870 if (nlmsg_len < sizeof(struct nlmsghdr) ||
1871 nlmsg_len > len) {
1872 break;
1875 switch (nlh->nlmsg_type) {
1876 case NLMSG_DONE:
1877 tswap_nlmsghdr(nlh);
1878 return 0;
1879 case NLMSG_NOOP:
1880 break;
1881 case NLMSG_ERROR:
1883 struct nlmsgerr *e = NLMSG_DATA(nlh);
1884 e->error = tswap32(e->error);
1885 tswap_nlmsghdr(&e->msg);
1886 tswap_nlmsghdr(nlh);
1887 return 0;
1889 default:
1890 ret = host_to_target_nlmsg(nlh);
1891 if (ret < 0) {
1892 tswap_nlmsghdr(nlh);
1893 return ret;
1895 break;
1897 tswap_nlmsghdr(nlh);
1898 len -= NLMSG_ALIGN(nlmsg_len);
1899 nlh = (struct nlmsghdr *)(((char*)nlh) + NLMSG_ALIGN(nlmsg_len));
1901 return 0;
1904 static abi_long target_to_host_for_each_nlmsg(struct nlmsghdr *nlh,
1905 size_t len,
1906 abi_long (*target_to_host_nlmsg)
1907 (struct nlmsghdr *))
1909 int ret;
1911 while (len > sizeof(struct nlmsghdr)) {
1912 if (tswap32(nlh->nlmsg_len) < sizeof(struct nlmsghdr) ||
1913 tswap32(nlh->nlmsg_len) > len) {
1914 break;
1916 tswap_nlmsghdr(nlh);
1917 switch (nlh->nlmsg_type) {
1918 case NLMSG_DONE:
1919 return 0;
1920 case NLMSG_NOOP:
1921 break;
1922 case NLMSG_ERROR:
1924 struct nlmsgerr *e = NLMSG_DATA(nlh);
1925 e->error = tswap32(e->error);
1926 tswap_nlmsghdr(&e->msg);
1927 return 0;
1929 default:
1930 ret = target_to_host_nlmsg(nlh);
1931 if (ret < 0) {
1932 return ret;
1935 len -= NLMSG_ALIGN(nlh->nlmsg_len);
1936 nlh = (struct nlmsghdr *)(((char *)nlh) + NLMSG_ALIGN(nlh->nlmsg_len));
1938 return 0;
1941 #ifdef CONFIG_RTNETLINK
1942 static abi_long host_to_target_for_each_nlattr(struct nlattr *nlattr,
1943 size_t len, void *context,
1944 abi_long (*host_to_target_nlattr)
1945 (struct nlattr *,
1946 void *context))
1948 unsigned short nla_len;
1949 abi_long ret;
1951 while (len > sizeof(struct nlattr)) {
1952 nla_len = nlattr->nla_len;
1953 if (nla_len < sizeof(struct nlattr) ||
1954 nla_len > len) {
1955 break;
1957 ret = host_to_target_nlattr(nlattr, context);
1958 nlattr->nla_len = tswap16(nlattr->nla_len);
1959 nlattr->nla_type = tswap16(nlattr->nla_type);
1960 if (ret < 0) {
1961 return ret;
1963 len -= NLA_ALIGN(nla_len);
1964 nlattr = (struct nlattr *)(((char *)nlattr) + NLA_ALIGN(nla_len));
1966 return 0;
1969 static abi_long host_to_target_for_each_rtattr(struct rtattr *rtattr,
1970 size_t len,
1971 abi_long (*host_to_target_rtattr)
1972 (struct rtattr *))
1974 unsigned short rta_len;
1975 abi_long ret;
1977 while (len > sizeof(struct rtattr)) {
1978 rta_len = rtattr->rta_len;
1979 if (rta_len < sizeof(struct rtattr) ||
1980 rta_len > len) {
1981 break;
1983 ret = host_to_target_rtattr(rtattr);
1984 rtattr->rta_len = tswap16(rtattr->rta_len);
1985 rtattr->rta_type = tswap16(rtattr->rta_type);
1986 if (ret < 0) {
1987 return ret;
1989 len -= RTA_ALIGN(rta_len);
1990 rtattr = (struct rtattr *)(((char *)rtattr) + RTA_ALIGN(rta_len));
1992 return 0;
1995 #define NLA_DATA(nla) ((void *)((char *)(nla)) + NLA_HDRLEN)
1997 static abi_long host_to_target_data_bridge_nlattr(struct nlattr *nlattr,
1998 void *context)
2000 uint16_t *u16;
2001 uint32_t *u32;
2002 uint64_t *u64;
2004 switch (nlattr->nla_type) {
2005 /* no data */
2006 case QEMU_IFLA_BR_FDB_FLUSH:
2007 break;
2008 /* binary */
2009 case QEMU_IFLA_BR_GROUP_ADDR:
2010 break;
2011 /* uint8_t */
2012 case QEMU_IFLA_BR_VLAN_FILTERING:
2013 case QEMU_IFLA_BR_TOPOLOGY_CHANGE:
2014 case QEMU_IFLA_BR_TOPOLOGY_CHANGE_DETECTED:
2015 case QEMU_IFLA_BR_MCAST_ROUTER:
2016 case QEMU_IFLA_BR_MCAST_SNOOPING:
2017 case QEMU_IFLA_BR_MCAST_QUERY_USE_IFADDR:
2018 case QEMU_IFLA_BR_MCAST_QUERIER:
2019 case QEMU_IFLA_BR_NF_CALL_IPTABLES:
2020 case QEMU_IFLA_BR_NF_CALL_IP6TABLES:
2021 case QEMU_IFLA_BR_NF_CALL_ARPTABLES:
2022 break;
2023 /* uint16_t */
2024 case QEMU_IFLA_BR_PRIORITY:
2025 case QEMU_IFLA_BR_VLAN_PROTOCOL:
2026 case QEMU_IFLA_BR_GROUP_FWD_MASK:
2027 case QEMU_IFLA_BR_ROOT_PORT:
2028 case QEMU_IFLA_BR_VLAN_DEFAULT_PVID:
2029 u16 = NLA_DATA(nlattr);
2030 *u16 = tswap16(*u16);
2031 break;
2032 /* uint32_t */
2033 case QEMU_IFLA_BR_FORWARD_DELAY:
2034 case QEMU_IFLA_BR_HELLO_TIME:
2035 case QEMU_IFLA_BR_MAX_AGE:
2036 case QEMU_IFLA_BR_AGEING_TIME:
2037 case QEMU_IFLA_BR_STP_STATE:
2038 case QEMU_IFLA_BR_ROOT_PATH_COST:
2039 case QEMU_IFLA_BR_MCAST_HASH_ELASTICITY:
2040 case QEMU_IFLA_BR_MCAST_HASH_MAX:
2041 case QEMU_IFLA_BR_MCAST_LAST_MEMBER_CNT:
2042 case QEMU_IFLA_BR_MCAST_STARTUP_QUERY_CNT:
2043 u32 = NLA_DATA(nlattr);
2044 *u32 = tswap32(*u32);
2045 break;
2046 /* uint64_t */
2047 case QEMU_IFLA_BR_HELLO_TIMER:
2048 case QEMU_IFLA_BR_TCN_TIMER:
2049 case QEMU_IFLA_BR_GC_TIMER:
2050 case QEMU_IFLA_BR_TOPOLOGY_CHANGE_TIMER:
2051 case QEMU_IFLA_BR_MCAST_LAST_MEMBER_INTVL:
2052 case QEMU_IFLA_BR_MCAST_MEMBERSHIP_INTVL:
2053 case QEMU_IFLA_BR_MCAST_QUERIER_INTVL:
2054 case QEMU_IFLA_BR_MCAST_QUERY_INTVL:
2055 case QEMU_IFLA_BR_MCAST_QUERY_RESPONSE_INTVL:
2056 case QEMU_IFLA_BR_MCAST_STARTUP_QUERY_INTVL:
2057 u64 = NLA_DATA(nlattr);
2058 *u64 = tswap64(*u64);
2059 break;
2060 /* ifla_bridge_id: uin8_t[] */
2061 case QEMU_IFLA_BR_ROOT_ID:
2062 case QEMU_IFLA_BR_BRIDGE_ID:
2063 break;
2064 default:
2065 gemu_log("Unknown QEMU_IFLA_BR type %d\n", nlattr->nla_type);
2066 break;
2068 return 0;
2071 static abi_long host_to_target_slave_data_bridge_nlattr(struct nlattr *nlattr,
2072 void *context)
2074 uint16_t *u16;
2075 uint32_t *u32;
2076 uint64_t *u64;
2078 switch (nlattr->nla_type) {
2079 /* uint8_t */
2080 case QEMU_IFLA_BRPORT_STATE:
2081 case QEMU_IFLA_BRPORT_MODE:
2082 case QEMU_IFLA_BRPORT_GUARD:
2083 case QEMU_IFLA_BRPORT_PROTECT:
2084 case QEMU_IFLA_BRPORT_FAST_LEAVE:
2085 case QEMU_IFLA_BRPORT_LEARNING:
2086 case QEMU_IFLA_BRPORT_UNICAST_FLOOD:
2087 case QEMU_IFLA_BRPORT_PROXYARP:
2088 case QEMU_IFLA_BRPORT_LEARNING_SYNC:
2089 case QEMU_IFLA_BRPORT_PROXYARP_WIFI:
2090 case QEMU_IFLA_BRPORT_TOPOLOGY_CHANGE_ACK:
2091 case QEMU_IFLA_BRPORT_CONFIG_PENDING:
2092 case QEMU_IFLA_BRPORT_MULTICAST_ROUTER:
2093 break;
2094 /* uint16_t */
2095 case QEMU_IFLA_BRPORT_PRIORITY:
2096 case QEMU_IFLA_BRPORT_DESIGNATED_PORT:
2097 case QEMU_IFLA_BRPORT_DESIGNATED_COST:
2098 case QEMU_IFLA_BRPORT_ID:
2099 case QEMU_IFLA_BRPORT_NO:
2100 u16 = NLA_DATA(nlattr);
2101 *u16 = tswap16(*u16);
2102 break;
2103 /* uin32_t */
2104 case QEMU_IFLA_BRPORT_COST:
2105 u32 = NLA_DATA(nlattr);
2106 *u32 = tswap32(*u32);
2107 break;
2108 /* uint64_t */
2109 case QEMU_IFLA_BRPORT_MESSAGE_AGE_TIMER:
2110 case QEMU_IFLA_BRPORT_FORWARD_DELAY_TIMER:
2111 case QEMU_IFLA_BRPORT_HOLD_TIMER:
2112 u64 = NLA_DATA(nlattr);
2113 *u64 = tswap64(*u64);
2114 break;
2115 /* ifla_bridge_id: uint8_t[] */
2116 case QEMU_IFLA_BRPORT_ROOT_ID:
2117 case QEMU_IFLA_BRPORT_BRIDGE_ID:
2118 break;
2119 default:
2120 gemu_log("Unknown QEMU_IFLA_BRPORT type %d\n", nlattr->nla_type);
2121 break;
2123 return 0;
2126 struct linkinfo_context {
2127 int len;
2128 char *name;
2129 int slave_len;
2130 char *slave_name;
2133 static abi_long host_to_target_data_linkinfo_nlattr(struct nlattr *nlattr,
2134 void *context)
2136 struct linkinfo_context *li_context = context;
2138 switch (nlattr->nla_type) {
2139 /* string */
2140 case QEMU_IFLA_INFO_KIND:
2141 li_context->name = NLA_DATA(nlattr);
2142 li_context->len = nlattr->nla_len - NLA_HDRLEN;
2143 break;
2144 case QEMU_IFLA_INFO_SLAVE_KIND:
2145 li_context->slave_name = NLA_DATA(nlattr);
2146 li_context->slave_len = nlattr->nla_len - NLA_HDRLEN;
2147 break;
2148 /* stats */
2149 case QEMU_IFLA_INFO_XSTATS:
2150 /* FIXME: only used by CAN */
2151 break;
2152 /* nested */
2153 case QEMU_IFLA_INFO_DATA:
2154 if (strncmp(li_context->name, "bridge",
2155 li_context->len) == 0) {
2156 return host_to_target_for_each_nlattr(NLA_DATA(nlattr),
2157 nlattr->nla_len,
2158 NULL,
2159 host_to_target_data_bridge_nlattr);
2160 } else {
2161 gemu_log("Unknown QEMU_IFLA_INFO_KIND %s\n", li_context->name);
2163 break;
2164 case QEMU_IFLA_INFO_SLAVE_DATA:
2165 if (strncmp(li_context->slave_name, "bridge",
2166 li_context->slave_len) == 0) {
2167 return host_to_target_for_each_nlattr(NLA_DATA(nlattr),
2168 nlattr->nla_len,
2169 NULL,
2170 host_to_target_slave_data_bridge_nlattr);
2171 } else {
2172 gemu_log("Unknown QEMU_IFLA_INFO_SLAVE_KIND %s\n",
2173 li_context->slave_name);
2175 break;
2176 default:
2177 gemu_log("Unknown host QEMU_IFLA_INFO type: %d\n", nlattr->nla_type);
2178 break;
2181 return 0;
2184 static abi_long host_to_target_data_inet_nlattr(struct nlattr *nlattr,
2185 void *context)
2187 uint32_t *u32;
2188 int i;
2190 switch (nlattr->nla_type) {
2191 case QEMU_IFLA_INET_CONF:
2192 u32 = NLA_DATA(nlattr);
2193 for (i = 0; i < (nlattr->nla_len - NLA_HDRLEN) / sizeof(*u32);
2194 i++) {
2195 u32[i] = tswap32(u32[i]);
2197 break;
2198 default:
2199 gemu_log("Unknown host AF_INET type: %d\n", nlattr->nla_type);
2201 return 0;
2204 static abi_long host_to_target_data_inet6_nlattr(struct nlattr *nlattr,
2205 void *context)
2207 uint32_t *u32;
2208 uint64_t *u64;
2209 struct ifla_cacheinfo *ci;
2210 int i;
2212 switch (nlattr->nla_type) {
2213 /* binaries */
2214 case QEMU_IFLA_INET6_TOKEN:
2215 break;
2216 /* uint8_t */
2217 case QEMU_IFLA_INET6_ADDR_GEN_MODE:
2218 break;
2219 /* uint32_t */
2220 case QEMU_IFLA_INET6_FLAGS:
2221 u32 = NLA_DATA(nlattr);
2222 *u32 = tswap32(*u32);
2223 break;
2224 /* uint32_t[] */
2225 case QEMU_IFLA_INET6_CONF:
2226 u32 = NLA_DATA(nlattr);
2227 for (i = 0; i < (nlattr->nla_len - NLA_HDRLEN) / sizeof(*u32);
2228 i++) {
2229 u32[i] = tswap32(u32[i]);
2231 break;
2232 /* ifla_cacheinfo */
2233 case QEMU_IFLA_INET6_CACHEINFO:
2234 ci = NLA_DATA(nlattr);
2235 ci->max_reasm_len = tswap32(ci->max_reasm_len);
2236 ci->tstamp = tswap32(ci->tstamp);
2237 ci->reachable_time = tswap32(ci->reachable_time);
2238 ci->retrans_time = tswap32(ci->retrans_time);
2239 break;
2240 /* uint64_t[] */
2241 case QEMU_IFLA_INET6_STATS:
2242 case QEMU_IFLA_INET6_ICMP6STATS:
2243 u64 = NLA_DATA(nlattr);
2244 for (i = 0; i < (nlattr->nla_len - NLA_HDRLEN) / sizeof(*u64);
2245 i++) {
2246 u64[i] = tswap64(u64[i]);
2248 break;
2249 default:
2250 gemu_log("Unknown host AF_INET6 type: %d\n", nlattr->nla_type);
2252 return 0;
2255 static abi_long host_to_target_data_spec_nlattr(struct nlattr *nlattr,
2256 void *context)
2258 switch (nlattr->nla_type) {
2259 case AF_INET:
2260 return host_to_target_for_each_nlattr(NLA_DATA(nlattr), nlattr->nla_len,
2261 NULL,
2262 host_to_target_data_inet_nlattr);
2263 case AF_INET6:
2264 return host_to_target_for_each_nlattr(NLA_DATA(nlattr), nlattr->nla_len,
2265 NULL,
2266 host_to_target_data_inet6_nlattr);
2267 default:
2268 gemu_log("Unknown host AF_SPEC type: %d\n", nlattr->nla_type);
2269 break;
2271 return 0;
2274 static abi_long host_to_target_data_link_rtattr(struct rtattr *rtattr)
2276 uint32_t *u32;
2277 struct rtnl_link_stats *st;
2278 struct rtnl_link_stats64 *st64;
2279 struct rtnl_link_ifmap *map;
2280 struct linkinfo_context li_context;
2282 switch (rtattr->rta_type) {
2283 /* binary stream */
2284 case QEMU_IFLA_ADDRESS:
2285 case QEMU_IFLA_BROADCAST:
2286 /* string */
2287 case QEMU_IFLA_IFNAME:
2288 case QEMU_IFLA_QDISC:
2289 break;
2290 /* uin8_t */
2291 case QEMU_IFLA_OPERSTATE:
2292 case QEMU_IFLA_LINKMODE:
2293 case QEMU_IFLA_CARRIER:
2294 case QEMU_IFLA_PROTO_DOWN:
2295 break;
2296 /* uint32_t */
2297 case QEMU_IFLA_MTU:
2298 case QEMU_IFLA_LINK:
2299 case QEMU_IFLA_WEIGHT:
2300 case QEMU_IFLA_TXQLEN:
2301 case QEMU_IFLA_CARRIER_CHANGES:
2302 case QEMU_IFLA_NUM_RX_QUEUES:
2303 case QEMU_IFLA_NUM_TX_QUEUES:
2304 case QEMU_IFLA_PROMISCUITY:
2305 case QEMU_IFLA_EXT_MASK:
2306 case QEMU_IFLA_LINK_NETNSID:
2307 case QEMU_IFLA_GROUP:
2308 case QEMU_IFLA_MASTER:
2309 case QEMU_IFLA_NUM_VF:
2310 u32 = RTA_DATA(rtattr);
2311 *u32 = tswap32(*u32);
2312 break;
2313 /* struct rtnl_link_stats */
2314 case QEMU_IFLA_STATS:
2315 st = RTA_DATA(rtattr);
2316 st->rx_packets = tswap32(st->rx_packets);
2317 st->tx_packets = tswap32(st->tx_packets);
2318 st->rx_bytes = tswap32(st->rx_bytes);
2319 st->tx_bytes = tswap32(st->tx_bytes);
2320 st->rx_errors = tswap32(st->rx_errors);
2321 st->tx_errors = tswap32(st->tx_errors);
2322 st->rx_dropped = tswap32(st->rx_dropped);
2323 st->tx_dropped = tswap32(st->tx_dropped);
2324 st->multicast = tswap32(st->multicast);
2325 st->collisions = tswap32(st->collisions);
2327 /* detailed rx_errors: */
2328 st->rx_length_errors = tswap32(st->rx_length_errors);
2329 st->rx_over_errors = tswap32(st->rx_over_errors);
2330 st->rx_crc_errors = tswap32(st->rx_crc_errors);
2331 st->rx_frame_errors = tswap32(st->rx_frame_errors);
2332 st->rx_fifo_errors = tswap32(st->rx_fifo_errors);
2333 st->rx_missed_errors = tswap32(st->rx_missed_errors);
2335 /* detailed tx_errors */
2336 st->tx_aborted_errors = tswap32(st->tx_aborted_errors);
2337 st->tx_carrier_errors = tswap32(st->tx_carrier_errors);
2338 st->tx_fifo_errors = tswap32(st->tx_fifo_errors);
2339 st->tx_heartbeat_errors = tswap32(st->tx_heartbeat_errors);
2340 st->tx_window_errors = tswap32(st->tx_window_errors);
2342 /* for cslip etc */
2343 st->rx_compressed = tswap32(st->rx_compressed);
2344 st->tx_compressed = tswap32(st->tx_compressed);
2345 break;
2346 /* struct rtnl_link_stats64 */
2347 case QEMU_IFLA_STATS64:
2348 st64 = RTA_DATA(rtattr);
2349 st64->rx_packets = tswap64(st64->rx_packets);
2350 st64->tx_packets = tswap64(st64->tx_packets);
2351 st64->rx_bytes = tswap64(st64->rx_bytes);
2352 st64->tx_bytes = tswap64(st64->tx_bytes);
2353 st64->rx_errors = tswap64(st64->rx_errors);
2354 st64->tx_errors = tswap64(st64->tx_errors);
2355 st64->rx_dropped = tswap64(st64->rx_dropped);
2356 st64->tx_dropped = tswap64(st64->tx_dropped);
2357 st64->multicast = tswap64(st64->multicast);
2358 st64->collisions = tswap64(st64->collisions);
2360 /* detailed rx_errors: */
2361 st64->rx_length_errors = tswap64(st64->rx_length_errors);
2362 st64->rx_over_errors = tswap64(st64->rx_over_errors);
2363 st64->rx_crc_errors = tswap64(st64->rx_crc_errors);
2364 st64->rx_frame_errors = tswap64(st64->rx_frame_errors);
2365 st64->rx_fifo_errors = tswap64(st64->rx_fifo_errors);
2366 st64->rx_missed_errors = tswap64(st64->rx_missed_errors);
2368 /* detailed tx_errors */
2369 st64->tx_aborted_errors = tswap64(st64->tx_aborted_errors);
2370 st64->tx_carrier_errors = tswap64(st64->tx_carrier_errors);
2371 st64->tx_fifo_errors = tswap64(st64->tx_fifo_errors);
2372 st64->tx_heartbeat_errors = tswap64(st64->tx_heartbeat_errors);
2373 st64->tx_window_errors = tswap64(st64->tx_window_errors);
2375 /* for cslip etc */
2376 st64->rx_compressed = tswap64(st64->rx_compressed);
2377 st64->tx_compressed = tswap64(st64->tx_compressed);
2378 break;
2379 /* struct rtnl_link_ifmap */
2380 case QEMU_IFLA_MAP:
2381 map = RTA_DATA(rtattr);
2382 map->mem_start = tswap64(map->mem_start);
2383 map->mem_end = tswap64(map->mem_end);
2384 map->base_addr = tswap64(map->base_addr);
2385 map->irq = tswap16(map->irq);
2386 break;
2387 /* nested */
2388 case QEMU_IFLA_LINKINFO:
2389 memset(&li_context, 0, sizeof(li_context));
2390 return host_to_target_for_each_nlattr(RTA_DATA(rtattr), rtattr->rta_len,
2391 &li_context,
2392 host_to_target_data_linkinfo_nlattr);
2393 case QEMU_IFLA_AF_SPEC:
2394 return host_to_target_for_each_nlattr(RTA_DATA(rtattr), rtattr->rta_len,
2395 NULL,
2396 host_to_target_data_spec_nlattr);
2397 default:
2398 gemu_log("Unknown host QEMU_IFLA type: %d\n", rtattr->rta_type);
2399 break;
2401 return 0;
2404 static abi_long host_to_target_data_addr_rtattr(struct rtattr *rtattr)
2406 uint32_t *u32;
2407 struct ifa_cacheinfo *ci;
2409 switch (rtattr->rta_type) {
2410 /* binary: depends on family type */
2411 case IFA_ADDRESS:
2412 case IFA_LOCAL:
2413 break;
2414 /* string */
2415 case IFA_LABEL:
2416 break;
2417 /* u32 */
2418 case IFA_FLAGS:
2419 case IFA_BROADCAST:
2420 u32 = RTA_DATA(rtattr);
2421 *u32 = tswap32(*u32);
2422 break;
2423 /* struct ifa_cacheinfo */
2424 case IFA_CACHEINFO:
2425 ci = RTA_DATA(rtattr);
2426 ci->ifa_prefered = tswap32(ci->ifa_prefered);
2427 ci->ifa_valid = tswap32(ci->ifa_valid);
2428 ci->cstamp = tswap32(ci->cstamp);
2429 ci->tstamp = tswap32(ci->tstamp);
2430 break;
2431 default:
2432 gemu_log("Unknown host IFA type: %d\n", rtattr->rta_type);
2433 break;
2435 return 0;
2438 static abi_long host_to_target_data_route_rtattr(struct rtattr *rtattr)
2440 uint32_t *u32;
2441 switch (rtattr->rta_type) {
2442 /* binary: depends on family type */
2443 case RTA_GATEWAY:
2444 case RTA_DST:
2445 case RTA_PREFSRC:
2446 break;
2447 /* u32 */
2448 case RTA_PRIORITY:
2449 case RTA_TABLE:
2450 case RTA_OIF:
2451 u32 = RTA_DATA(rtattr);
2452 *u32 = tswap32(*u32);
2453 break;
2454 default:
2455 gemu_log("Unknown host RTA type: %d\n", rtattr->rta_type);
2456 break;
2458 return 0;
2461 static abi_long host_to_target_link_rtattr(struct rtattr *rtattr,
2462 uint32_t rtattr_len)
2464 return host_to_target_for_each_rtattr(rtattr, rtattr_len,
2465 host_to_target_data_link_rtattr);
2468 static abi_long host_to_target_addr_rtattr(struct rtattr *rtattr,
2469 uint32_t rtattr_len)
2471 return host_to_target_for_each_rtattr(rtattr, rtattr_len,
2472 host_to_target_data_addr_rtattr);
2475 static abi_long host_to_target_route_rtattr(struct rtattr *rtattr,
2476 uint32_t rtattr_len)
2478 return host_to_target_for_each_rtattr(rtattr, rtattr_len,
2479 host_to_target_data_route_rtattr);
2482 static abi_long host_to_target_data_route(struct nlmsghdr *nlh)
2484 uint32_t nlmsg_len;
2485 struct ifinfomsg *ifi;
2486 struct ifaddrmsg *ifa;
2487 struct rtmsg *rtm;
2489 nlmsg_len = nlh->nlmsg_len;
2490 switch (nlh->nlmsg_type) {
2491 case RTM_NEWLINK:
2492 case RTM_DELLINK:
2493 case RTM_GETLINK:
2494 if (nlh->nlmsg_len >= NLMSG_LENGTH(sizeof(*ifi))) {
2495 ifi = NLMSG_DATA(nlh);
2496 ifi->ifi_type = tswap16(ifi->ifi_type);
2497 ifi->ifi_index = tswap32(ifi->ifi_index);
2498 ifi->ifi_flags = tswap32(ifi->ifi_flags);
2499 ifi->ifi_change = tswap32(ifi->ifi_change);
2500 host_to_target_link_rtattr(IFLA_RTA(ifi),
2501 nlmsg_len - NLMSG_LENGTH(sizeof(*ifi)));
2503 break;
2504 case RTM_NEWADDR:
2505 case RTM_DELADDR:
2506 case RTM_GETADDR:
2507 if (nlh->nlmsg_len >= NLMSG_LENGTH(sizeof(*ifa))) {
2508 ifa = NLMSG_DATA(nlh);
2509 ifa->ifa_index = tswap32(ifa->ifa_index);
2510 host_to_target_addr_rtattr(IFA_RTA(ifa),
2511 nlmsg_len - NLMSG_LENGTH(sizeof(*ifa)));
2513 break;
2514 case RTM_NEWROUTE:
2515 case RTM_DELROUTE:
2516 case RTM_GETROUTE:
2517 if (nlh->nlmsg_len >= NLMSG_LENGTH(sizeof(*rtm))) {
2518 rtm = NLMSG_DATA(nlh);
2519 rtm->rtm_flags = tswap32(rtm->rtm_flags);
2520 host_to_target_route_rtattr(RTM_RTA(rtm),
2521 nlmsg_len - NLMSG_LENGTH(sizeof(*rtm)));
2523 break;
2524 default:
2525 return -TARGET_EINVAL;
2527 return 0;
2530 static inline abi_long host_to_target_nlmsg_route(struct nlmsghdr *nlh,
2531 size_t len)
2533 return host_to_target_for_each_nlmsg(nlh, len, host_to_target_data_route);
2536 static abi_long target_to_host_for_each_rtattr(struct rtattr *rtattr,
2537 size_t len,
2538 abi_long (*target_to_host_rtattr)
2539 (struct rtattr *))
2541 abi_long ret;
2543 while (len >= sizeof(struct rtattr)) {
2544 if (tswap16(rtattr->rta_len) < sizeof(struct rtattr) ||
2545 tswap16(rtattr->rta_len) > len) {
2546 break;
2548 rtattr->rta_len = tswap16(rtattr->rta_len);
2549 rtattr->rta_type = tswap16(rtattr->rta_type);
2550 ret = target_to_host_rtattr(rtattr);
2551 if (ret < 0) {
2552 return ret;
2554 len -= RTA_ALIGN(rtattr->rta_len);
2555 rtattr = (struct rtattr *)(((char *)rtattr) +
2556 RTA_ALIGN(rtattr->rta_len));
2558 return 0;
2561 static abi_long target_to_host_data_link_rtattr(struct rtattr *rtattr)
2563 switch (rtattr->rta_type) {
2564 default:
2565 gemu_log("Unknown target QEMU_IFLA type: %d\n", rtattr->rta_type);
2566 break;
2568 return 0;
2571 static abi_long target_to_host_data_addr_rtattr(struct rtattr *rtattr)
2573 switch (rtattr->rta_type) {
2574 /* binary: depends on family type */
2575 case IFA_LOCAL:
2576 case IFA_ADDRESS:
2577 break;
2578 default:
2579 gemu_log("Unknown target IFA type: %d\n", rtattr->rta_type);
2580 break;
2582 return 0;
2585 static abi_long target_to_host_data_route_rtattr(struct rtattr *rtattr)
2587 uint32_t *u32;
2588 switch (rtattr->rta_type) {
2589 /* binary: depends on family type */
2590 case RTA_DST:
2591 case RTA_SRC:
2592 case RTA_GATEWAY:
2593 break;
2594 /* u32 */
2595 case RTA_OIF:
2596 u32 = RTA_DATA(rtattr);
2597 *u32 = tswap32(*u32);
2598 break;
2599 default:
2600 gemu_log("Unknown target RTA type: %d\n", rtattr->rta_type);
2601 break;
2603 return 0;
2606 static void target_to_host_link_rtattr(struct rtattr *rtattr,
2607 uint32_t rtattr_len)
2609 target_to_host_for_each_rtattr(rtattr, rtattr_len,
2610 target_to_host_data_link_rtattr);
2613 static void target_to_host_addr_rtattr(struct rtattr *rtattr,
2614 uint32_t rtattr_len)
2616 target_to_host_for_each_rtattr(rtattr, rtattr_len,
2617 target_to_host_data_addr_rtattr);
2620 static void target_to_host_route_rtattr(struct rtattr *rtattr,
2621 uint32_t rtattr_len)
2623 target_to_host_for_each_rtattr(rtattr, rtattr_len,
2624 target_to_host_data_route_rtattr);
2627 static abi_long target_to_host_data_route(struct nlmsghdr *nlh)
2629 struct ifinfomsg *ifi;
2630 struct ifaddrmsg *ifa;
2631 struct rtmsg *rtm;
2633 switch (nlh->nlmsg_type) {
2634 case RTM_GETLINK:
2635 break;
2636 case RTM_NEWLINK:
2637 case RTM_DELLINK:
2638 if (nlh->nlmsg_len >= NLMSG_LENGTH(sizeof(*ifi))) {
2639 ifi = NLMSG_DATA(nlh);
2640 ifi->ifi_type = tswap16(ifi->ifi_type);
2641 ifi->ifi_index = tswap32(ifi->ifi_index);
2642 ifi->ifi_flags = tswap32(ifi->ifi_flags);
2643 ifi->ifi_change = tswap32(ifi->ifi_change);
2644 target_to_host_link_rtattr(IFLA_RTA(ifi), nlh->nlmsg_len -
2645 NLMSG_LENGTH(sizeof(*ifi)));
2647 break;
2648 case RTM_GETADDR:
2649 case RTM_NEWADDR:
2650 case RTM_DELADDR:
2651 if (nlh->nlmsg_len >= NLMSG_LENGTH(sizeof(*ifa))) {
2652 ifa = NLMSG_DATA(nlh);
2653 ifa->ifa_index = tswap32(ifa->ifa_index);
2654 target_to_host_addr_rtattr(IFA_RTA(ifa), nlh->nlmsg_len -
2655 NLMSG_LENGTH(sizeof(*ifa)));
2657 break;
2658 case RTM_GETROUTE:
2659 break;
2660 case RTM_NEWROUTE:
2661 case RTM_DELROUTE:
2662 if (nlh->nlmsg_len >= NLMSG_LENGTH(sizeof(*rtm))) {
2663 rtm = NLMSG_DATA(nlh);
2664 rtm->rtm_flags = tswap32(rtm->rtm_flags);
2665 target_to_host_route_rtattr(RTM_RTA(rtm), nlh->nlmsg_len -
2666 NLMSG_LENGTH(sizeof(*rtm)));
2668 break;
2669 default:
2670 return -TARGET_EOPNOTSUPP;
2672 return 0;
2675 static abi_long target_to_host_nlmsg_route(struct nlmsghdr *nlh, size_t len)
2677 return target_to_host_for_each_nlmsg(nlh, len, target_to_host_data_route);
2679 #endif /* CONFIG_RTNETLINK */
2681 static abi_long host_to_target_data_audit(struct nlmsghdr *nlh)
2683 switch (nlh->nlmsg_type) {
2684 default:
2685 gemu_log("Unknown host audit message type %d\n",
2686 nlh->nlmsg_type);
2687 return -TARGET_EINVAL;
2689 return 0;
2692 static inline abi_long host_to_target_nlmsg_audit(struct nlmsghdr *nlh,
2693 size_t len)
2695 return host_to_target_for_each_nlmsg(nlh, len, host_to_target_data_audit);
2698 static abi_long target_to_host_data_audit(struct nlmsghdr *nlh)
2700 switch (nlh->nlmsg_type) {
2701 case AUDIT_USER:
2702 case AUDIT_FIRST_USER_MSG ... AUDIT_LAST_USER_MSG:
2703 case AUDIT_FIRST_USER_MSG2 ... AUDIT_LAST_USER_MSG2:
2704 break;
2705 default:
2706 gemu_log("Unknown target audit message type %d\n",
2707 nlh->nlmsg_type);
2708 return -TARGET_EINVAL;
2711 return 0;
2714 static abi_long target_to_host_nlmsg_audit(struct nlmsghdr *nlh, size_t len)
2716 return target_to_host_for_each_nlmsg(nlh, len, target_to_host_data_audit);
2719 /* do_setsockopt() Must return target values and target errnos. */
2720 static abi_long do_setsockopt(int sockfd, int level, int optname,
2721 abi_ulong optval_addr, socklen_t optlen)
2723 abi_long ret;
2724 int val;
2725 struct ip_mreqn *ip_mreq;
2726 struct ip_mreq_source *ip_mreq_source;
2728 switch(level) {
2729 case SOL_TCP:
2730 /* TCP options all take an 'int' value. */
2731 if (optlen < sizeof(uint32_t))
2732 return -TARGET_EINVAL;
2734 if (get_user_u32(val, optval_addr))
2735 return -TARGET_EFAULT;
2736 ret = get_errno(setsockopt(sockfd, level, optname, &val, sizeof(val)));
2737 break;
2738 case SOL_IP:
2739 switch(optname) {
2740 case IP_TOS:
2741 case IP_TTL:
2742 case IP_HDRINCL:
2743 case IP_ROUTER_ALERT:
2744 case IP_RECVOPTS:
2745 case IP_RETOPTS:
2746 case IP_PKTINFO:
2747 case IP_MTU_DISCOVER:
2748 case IP_RECVERR:
2749 case IP_RECVTOS:
2750 #ifdef IP_FREEBIND
2751 case IP_FREEBIND:
2752 #endif
2753 case IP_MULTICAST_TTL:
2754 case IP_MULTICAST_LOOP:
2755 val = 0;
2756 if (optlen >= sizeof(uint32_t)) {
2757 if (get_user_u32(val, optval_addr))
2758 return -TARGET_EFAULT;
2759 } else if (optlen >= 1) {
2760 if (get_user_u8(val, optval_addr))
2761 return -TARGET_EFAULT;
2763 ret = get_errno(setsockopt(sockfd, level, optname, &val, sizeof(val)));
2764 break;
2765 case IP_ADD_MEMBERSHIP:
2766 case IP_DROP_MEMBERSHIP:
2767 if (optlen < sizeof (struct target_ip_mreq) ||
2768 optlen > sizeof (struct target_ip_mreqn))
2769 return -TARGET_EINVAL;
2771 ip_mreq = (struct ip_mreqn *) alloca(optlen);
2772 target_to_host_ip_mreq(ip_mreq, optval_addr, optlen);
2773 ret = get_errno(setsockopt(sockfd, level, optname, ip_mreq, optlen));
2774 break;
2776 case IP_BLOCK_SOURCE:
2777 case IP_UNBLOCK_SOURCE:
2778 case IP_ADD_SOURCE_MEMBERSHIP:
2779 case IP_DROP_SOURCE_MEMBERSHIP:
2780 if (optlen != sizeof (struct target_ip_mreq_source))
2781 return -TARGET_EINVAL;
2783 ip_mreq_source = lock_user(VERIFY_READ, optval_addr, optlen, 1);
2784 ret = get_errno(setsockopt(sockfd, level, optname, ip_mreq_source, optlen));
2785 unlock_user (ip_mreq_source, optval_addr, 0);
2786 break;
2788 default:
2789 goto unimplemented;
2791 break;
2792 case SOL_IPV6:
2793 switch (optname) {
2794 case IPV6_MTU_DISCOVER:
2795 case IPV6_MTU:
2796 case IPV6_V6ONLY:
2797 case IPV6_RECVPKTINFO:
2798 val = 0;
2799 if (optlen < sizeof(uint32_t)) {
2800 return -TARGET_EINVAL;
2802 if (get_user_u32(val, optval_addr)) {
2803 return -TARGET_EFAULT;
2805 ret = get_errno(setsockopt(sockfd, level, optname,
2806 &val, sizeof(val)));
2807 break;
2808 default:
2809 goto unimplemented;
2811 break;
2812 case SOL_RAW:
2813 switch (optname) {
2814 case ICMP_FILTER:
2815 /* struct icmp_filter takes an u32 value */
2816 if (optlen < sizeof(uint32_t)) {
2817 return -TARGET_EINVAL;
2820 if (get_user_u32(val, optval_addr)) {
2821 return -TARGET_EFAULT;
2823 ret = get_errno(setsockopt(sockfd, level, optname,
2824 &val, sizeof(val)));
2825 break;
2827 default:
2828 goto unimplemented;
2830 break;
2831 case TARGET_SOL_SOCKET:
2832 switch (optname) {
2833 case TARGET_SO_RCVTIMEO:
2835 struct timeval tv;
2837 optname = SO_RCVTIMEO;
2839 set_timeout:
2840 if (optlen != sizeof(struct target_timeval)) {
2841 return -TARGET_EINVAL;
2844 if (copy_from_user_timeval(&tv, optval_addr)) {
2845 return -TARGET_EFAULT;
2848 ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname,
2849 &tv, sizeof(tv)));
2850 return ret;
2852 case TARGET_SO_SNDTIMEO:
2853 optname = SO_SNDTIMEO;
2854 goto set_timeout;
2855 case TARGET_SO_ATTACH_FILTER:
2857 struct target_sock_fprog *tfprog;
2858 struct target_sock_filter *tfilter;
2859 struct sock_fprog fprog;
2860 struct sock_filter *filter;
2861 int i;
2863 if (optlen != sizeof(*tfprog)) {
2864 return -TARGET_EINVAL;
2866 if (!lock_user_struct(VERIFY_READ, tfprog, optval_addr, 0)) {
2867 return -TARGET_EFAULT;
2869 if (!lock_user_struct(VERIFY_READ, tfilter,
2870 tswapal(tfprog->filter), 0)) {
2871 unlock_user_struct(tfprog, optval_addr, 1);
2872 return -TARGET_EFAULT;
2875 fprog.len = tswap16(tfprog->len);
2876 filter = g_try_new(struct sock_filter, fprog.len);
2877 if (filter == NULL) {
2878 unlock_user_struct(tfilter, tfprog->filter, 1);
2879 unlock_user_struct(tfprog, optval_addr, 1);
2880 return -TARGET_ENOMEM;
2882 for (i = 0; i < fprog.len; i++) {
2883 filter[i].code = tswap16(tfilter[i].code);
2884 filter[i].jt = tfilter[i].jt;
2885 filter[i].jf = tfilter[i].jf;
2886 filter[i].k = tswap32(tfilter[i].k);
2888 fprog.filter = filter;
2890 ret = get_errno(setsockopt(sockfd, SOL_SOCKET,
2891 SO_ATTACH_FILTER, &fprog, sizeof(fprog)));
2892 g_free(filter);
2894 unlock_user_struct(tfilter, tfprog->filter, 1);
2895 unlock_user_struct(tfprog, optval_addr, 1);
2896 return ret;
2898 case TARGET_SO_BINDTODEVICE:
2900 char *dev_ifname, *addr_ifname;
2902 if (optlen > IFNAMSIZ - 1) {
2903 optlen = IFNAMSIZ - 1;
2905 dev_ifname = lock_user(VERIFY_READ, optval_addr, optlen, 1);
2906 if (!dev_ifname) {
2907 return -TARGET_EFAULT;
2909 optname = SO_BINDTODEVICE;
2910 addr_ifname = alloca(IFNAMSIZ);
2911 memcpy(addr_ifname, dev_ifname, optlen);
2912 addr_ifname[optlen] = 0;
2913 ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname,
2914 addr_ifname, optlen));
2915 unlock_user (dev_ifname, optval_addr, 0);
2916 return ret;
2918 /* Options with 'int' argument. */
2919 case TARGET_SO_DEBUG:
2920 optname = SO_DEBUG;
2921 break;
2922 case TARGET_SO_REUSEADDR:
2923 optname = SO_REUSEADDR;
2924 break;
2925 case TARGET_SO_TYPE:
2926 optname = SO_TYPE;
2927 break;
2928 case TARGET_SO_ERROR:
2929 optname = SO_ERROR;
2930 break;
2931 case TARGET_SO_DONTROUTE:
2932 optname = SO_DONTROUTE;
2933 break;
2934 case TARGET_SO_BROADCAST:
2935 optname = SO_BROADCAST;
2936 break;
2937 case TARGET_SO_SNDBUF:
2938 optname = SO_SNDBUF;
2939 break;
2940 case TARGET_SO_SNDBUFFORCE:
2941 optname = SO_SNDBUFFORCE;
2942 break;
2943 case TARGET_SO_RCVBUF:
2944 optname = SO_RCVBUF;
2945 break;
2946 case TARGET_SO_RCVBUFFORCE:
2947 optname = SO_RCVBUFFORCE;
2948 break;
2949 case TARGET_SO_KEEPALIVE:
2950 optname = SO_KEEPALIVE;
2951 break;
2952 case TARGET_SO_OOBINLINE:
2953 optname = SO_OOBINLINE;
2954 break;
2955 case TARGET_SO_NO_CHECK:
2956 optname = SO_NO_CHECK;
2957 break;
2958 case TARGET_SO_PRIORITY:
2959 optname = SO_PRIORITY;
2960 break;
2961 #ifdef SO_BSDCOMPAT
2962 case TARGET_SO_BSDCOMPAT:
2963 optname = SO_BSDCOMPAT;
2964 break;
2965 #endif
2966 case TARGET_SO_PASSCRED:
2967 optname = SO_PASSCRED;
2968 break;
2969 case TARGET_SO_PASSSEC:
2970 optname = SO_PASSSEC;
2971 break;
2972 case TARGET_SO_TIMESTAMP:
2973 optname = SO_TIMESTAMP;
2974 break;
2975 case TARGET_SO_RCVLOWAT:
2976 optname = SO_RCVLOWAT;
2977 break;
2978 break;
2979 default:
2980 goto unimplemented;
2982 if (optlen < sizeof(uint32_t))
2983 return -TARGET_EINVAL;
2985 if (get_user_u32(val, optval_addr))
2986 return -TARGET_EFAULT;
2987 ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname, &val, sizeof(val)));
2988 break;
2989 default:
2990 unimplemented:
2991 gemu_log("Unsupported setsockopt level=%d optname=%d\n", level, optname);
2992 ret = -TARGET_ENOPROTOOPT;
2994 return ret;
2997 /* do_getsockopt() Must return target values and target errnos. */
2998 static abi_long do_getsockopt(int sockfd, int level, int optname,
2999 abi_ulong optval_addr, abi_ulong optlen)
3001 abi_long ret;
3002 int len, val;
3003 socklen_t lv;
3005 switch(level) {
3006 case TARGET_SOL_SOCKET:
3007 level = SOL_SOCKET;
3008 switch (optname) {
3009 /* These don't just return a single integer */
3010 case TARGET_SO_LINGER:
3011 case TARGET_SO_RCVTIMEO:
3012 case TARGET_SO_SNDTIMEO:
3013 case TARGET_SO_PEERNAME:
3014 goto unimplemented;
3015 case TARGET_SO_PEERCRED: {
3016 struct ucred cr;
3017 socklen_t crlen;
3018 struct target_ucred *tcr;
3020 if (get_user_u32(len, optlen)) {
3021 return -TARGET_EFAULT;
3023 if (len < 0) {
3024 return -TARGET_EINVAL;
3027 crlen = sizeof(cr);
3028 ret = get_errno(getsockopt(sockfd, level, SO_PEERCRED,
3029 &cr, &crlen));
3030 if (ret < 0) {
3031 return ret;
3033 if (len > crlen) {
3034 len = crlen;
3036 if (!lock_user_struct(VERIFY_WRITE, tcr, optval_addr, 0)) {
3037 return -TARGET_EFAULT;
3039 __put_user(cr.pid, &tcr->pid);
3040 __put_user(cr.uid, &tcr->uid);
3041 __put_user(cr.gid, &tcr->gid);
3042 unlock_user_struct(tcr, optval_addr, 1);
3043 if (put_user_u32(len, optlen)) {
3044 return -TARGET_EFAULT;
3046 break;
3048 /* Options with 'int' argument. */
3049 case TARGET_SO_DEBUG:
3050 optname = SO_DEBUG;
3051 goto int_case;
3052 case TARGET_SO_REUSEADDR:
3053 optname = SO_REUSEADDR;
3054 goto int_case;
3055 case TARGET_SO_TYPE:
3056 optname = SO_TYPE;
3057 goto int_case;
3058 case TARGET_SO_ERROR:
3059 optname = SO_ERROR;
3060 goto int_case;
3061 case TARGET_SO_DONTROUTE:
3062 optname = SO_DONTROUTE;
3063 goto int_case;
3064 case TARGET_SO_BROADCAST:
3065 optname = SO_BROADCAST;
3066 goto int_case;
3067 case TARGET_SO_SNDBUF:
3068 optname = SO_SNDBUF;
3069 goto int_case;
3070 case TARGET_SO_RCVBUF:
3071 optname = SO_RCVBUF;
3072 goto int_case;
3073 case TARGET_SO_KEEPALIVE:
3074 optname = SO_KEEPALIVE;
3075 goto int_case;
3076 case TARGET_SO_OOBINLINE:
3077 optname = SO_OOBINLINE;
3078 goto int_case;
3079 case TARGET_SO_NO_CHECK:
3080 optname = SO_NO_CHECK;
3081 goto int_case;
3082 case TARGET_SO_PRIORITY:
3083 optname = SO_PRIORITY;
3084 goto int_case;
3085 #ifdef SO_BSDCOMPAT
3086 case TARGET_SO_BSDCOMPAT:
3087 optname = SO_BSDCOMPAT;
3088 goto int_case;
3089 #endif
3090 case TARGET_SO_PASSCRED:
3091 optname = SO_PASSCRED;
3092 goto int_case;
3093 case TARGET_SO_TIMESTAMP:
3094 optname = SO_TIMESTAMP;
3095 goto int_case;
3096 case TARGET_SO_RCVLOWAT:
3097 optname = SO_RCVLOWAT;
3098 goto int_case;
3099 case TARGET_SO_ACCEPTCONN:
3100 optname = SO_ACCEPTCONN;
3101 goto int_case;
3102 default:
3103 goto int_case;
3105 break;
3106 case SOL_TCP:
3107 /* TCP options all take an 'int' value. */
3108 int_case:
3109 if (get_user_u32(len, optlen))
3110 return -TARGET_EFAULT;
3111 if (len < 0)
3112 return -TARGET_EINVAL;
3113 lv = sizeof(lv);
3114 ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
3115 if (ret < 0)
3116 return ret;
3117 if (optname == SO_TYPE) {
3118 val = host_to_target_sock_type(val);
3120 if (len > lv)
3121 len = lv;
3122 if (len == 4) {
3123 if (put_user_u32(val, optval_addr))
3124 return -TARGET_EFAULT;
3125 } else {
3126 if (put_user_u8(val, optval_addr))
3127 return -TARGET_EFAULT;
3129 if (put_user_u32(len, optlen))
3130 return -TARGET_EFAULT;
3131 break;
3132 case SOL_IP:
3133 switch(optname) {
3134 case IP_TOS:
3135 case IP_TTL:
3136 case IP_HDRINCL:
3137 case IP_ROUTER_ALERT:
3138 case IP_RECVOPTS:
3139 case IP_RETOPTS:
3140 case IP_PKTINFO:
3141 case IP_MTU_DISCOVER:
3142 case IP_RECVERR:
3143 case IP_RECVTOS:
3144 #ifdef IP_FREEBIND
3145 case IP_FREEBIND:
3146 #endif
3147 case IP_MULTICAST_TTL:
3148 case IP_MULTICAST_LOOP:
3149 if (get_user_u32(len, optlen))
3150 return -TARGET_EFAULT;
3151 if (len < 0)
3152 return -TARGET_EINVAL;
3153 lv = sizeof(lv);
3154 ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
3155 if (ret < 0)
3156 return ret;
3157 if (len < sizeof(int) && len > 0 && val >= 0 && val < 255) {
3158 len = 1;
3159 if (put_user_u32(len, optlen)
3160 || put_user_u8(val, optval_addr))
3161 return -TARGET_EFAULT;
3162 } else {
3163 if (len > sizeof(int))
3164 len = sizeof(int);
3165 if (put_user_u32(len, optlen)
3166 || put_user_u32(val, optval_addr))
3167 return -TARGET_EFAULT;
3169 break;
3170 default:
3171 ret = -TARGET_ENOPROTOOPT;
3172 break;
3174 break;
3175 default:
3176 unimplemented:
3177 gemu_log("getsockopt level=%d optname=%d not yet supported\n",
3178 level, optname);
3179 ret = -TARGET_EOPNOTSUPP;
3180 break;
3182 return ret;
3185 static struct iovec *lock_iovec(int type, abi_ulong target_addr,
3186 abi_ulong count, int copy)
3188 struct target_iovec *target_vec;
3189 struct iovec *vec;
3190 abi_ulong total_len, max_len;
3191 int i;
3192 int err = 0;
3193 bool bad_address = false;
3195 if (count == 0) {
3196 errno = 0;
3197 return NULL;
3199 if (count > IOV_MAX) {
3200 errno = EINVAL;
3201 return NULL;
3204 vec = g_try_new0(struct iovec, count);
3205 if (vec == NULL) {
3206 errno = ENOMEM;
3207 return NULL;
3210 target_vec = lock_user(VERIFY_READ, target_addr,
3211 count * sizeof(struct target_iovec), 1);
3212 if (target_vec == NULL) {
3213 err = EFAULT;
3214 goto fail2;
3217 /* ??? If host page size > target page size, this will result in a
3218 value larger than what we can actually support. */
3219 max_len = 0x7fffffff & TARGET_PAGE_MASK;
3220 total_len = 0;
3222 for (i = 0; i < count; i++) {
3223 abi_ulong base = tswapal(target_vec[i].iov_base);
3224 abi_long len = tswapal(target_vec[i].iov_len);
3226 if (len < 0) {
3227 err = EINVAL;
3228 goto fail;
3229 } else if (len == 0) {
3230 /* Zero length pointer is ignored. */
3231 vec[i].iov_base = 0;
3232 } else {
3233 vec[i].iov_base = lock_user(type, base, len, copy);
3234 /* If the first buffer pointer is bad, this is a fault. But
3235 * subsequent bad buffers will result in a partial write; this
3236 * is realized by filling the vector with null pointers and
3237 * zero lengths. */
3238 if (!vec[i].iov_base) {
3239 if (i == 0) {
3240 err = EFAULT;
3241 goto fail;
3242 } else {
3243 bad_address = true;
3246 if (bad_address) {
3247 len = 0;
3249 if (len > max_len - total_len) {
3250 len = max_len - total_len;
3253 vec[i].iov_len = len;
3254 total_len += len;
3257 unlock_user(target_vec, target_addr, 0);
3258 return vec;
3260 fail:
3261 while (--i >= 0) {
3262 if (tswapal(target_vec[i].iov_len) > 0) {
3263 unlock_user(vec[i].iov_base, tswapal(target_vec[i].iov_base), 0);
3266 unlock_user(target_vec, target_addr, 0);
3267 fail2:
3268 g_free(vec);
3269 errno = err;
3270 return NULL;
3273 static void unlock_iovec(struct iovec *vec, abi_ulong target_addr,
3274 abi_ulong count, int copy)
3276 struct target_iovec *target_vec;
3277 int i;
3279 target_vec = lock_user(VERIFY_READ, target_addr,
3280 count * sizeof(struct target_iovec), 1);
3281 if (target_vec) {
3282 for (i = 0; i < count; i++) {
3283 abi_ulong base = tswapal(target_vec[i].iov_base);
3284 abi_long len = tswapal(target_vec[i].iov_len);
3285 if (len < 0) {
3286 break;
3288 unlock_user(vec[i].iov_base, base, copy ? vec[i].iov_len : 0);
3290 unlock_user(target_vec, target_addr, 0);
3293 g_free(vec);
3296 static inline int target_to_host_sock_type(int *type)
3298 int host_type = 0;
3299 int target_type = *type;
3301 switch (target_type & TARGET_SOCK_TYPE_MASK) {
3302 case TARGET_SOCK_DGRAM:
3303 host_type = SOCK_DGRAM;
3304 break;
3305 case TARGET_SOCK_STREAM:
3306 host_type = SOCK_STREAM;
3307 break;
3308 default:
3309 host_type = target_type & TARGET_SOCK_TYPE_MASK;
3310 break;
3312 if (target_type & TARGET_SOCK_CLOEXEC) {
3313 #if defined(SOCK_CLOEXEC)
3314 host_type |= SOCK_CLOEXEC;
3315 #else
3316 return -TARGET_EINVAL;
3317 #endif
3319 if (target_type & TARGET_SOCK_NONBLOCK) {
3320 #if defined(SOCK_NONBLOCK)
3321 host_type |= SOCK_NONBLOCK;
3322 #elif !defined(O_NONBLOCK)
3323 return -TARGET_EINVAL;
3324 #endif
3326 *type = host_type;
3327 return 0;
3330 /* Try to emulate socket type flags after socket creation. */
3331 static int sock_flags_fixup(int fd, int target_type)
3333 #if !defined(SOCK_NONBLOCK) && defined(O_NONBLOCK)
3334 if (target_type & TARGET_SOCK_NONBLOCK) {
3335 int flags = fcntl(fd, F_GETFL);
3336 if (fcntl(fd, F_SETFL, O_NONBLOCK | flags) == -1) {
3337 close(fd);
3338 return -TARGET_EINVAL;
3341 #endif
3342 return fd;
3345 static abi_long packet_target_to_host_sockaddr(void *host_addr,
3346 abi_ulong target_addr,
3347 socklen_t len)
3349 struct sockaddr *addr = host_addr;
3350 struct target_sockaddr *target_saddr;
3352 target_saddr = lock_user(VERIFY_READ, target_addr, len, 1);
3353 if (!target_saddr) {
3354 return -TARGET_EFAULT;
3357 memcpy(addr, target_saddr, len);
3358 addr->sa_family = tswap16(target_saddr->sa_family);
3359 /* spkt_protocol is big-endian */
3361 unlock_user(target_saddr, target_addr, 0);
3362 return 0;
3365 static TargetFdTrans target_packet_trans = {
3366 .target_to_host_addr = packet_target_to_host_sockaddr,
3369 #ifdef CONFIG_RTNETLINK
3370 static abi_long netlink_route_target_to_host(void *buf, size_t len)
3372 abi_long ret;
3374 ret = target_to_host_nlmsg_route(buf, len);
3375 if (ret < 0) {
3376 return ret;
3379 return len;
3382 static abi_long netlink_route_host_to_target(void *buf, size_t len)
3384 abi_long ret;
3386 ret = host_to_target_nlmsg_route(buf, len);
3387 if (ret < 0) {
3388 return ret;
3391 return len;
3394 static TargetFdTrans target_netlink_route_trans = {
3395 .target_to_host_data = netlink_route_target_to_host,
3396 .host_to_target_data = netlink_route_host_to_target,
3398 #endif /* CONFIG_RTNETLINK */
3400 static abi_long netlink_audit_target_to_host(void *buf, size_t len)
3402 abi_long ret;
3404 ret = target_to_host_nlmsg_audit(buf, len);
3405 if (ret < 0) {
3406 return ret;
3409 return len;
3412 static abi_long netlink_audit_host_to_target(void *buf, size_t len)
3414 abi_long ret;
3416 ret = host_to_target_nlmsg_audit(buf, len);
3417 if (ret < 0) {
3418 return ret;
3421 return len;
3424 static TargetFdTrans target_netlink_audit_trans = {
3425 .target_to_host_data = netlink_audit_target_to_host,
3426 .host_to_target_data = netlink_audit_host_to_target,
3429 /* do_socket() Must return target values and target errnos. */
3430 static abi_long do_socket(int domain, int type, int protocol)
3432 int target_type = type;
3433 int ret;
3435 ret = target_to_host_sock_type(&type);
3436 if (ret) {
3437 return ret;
3440 if (domain == PF_NETLINK && !(
3441 #ifdef CONFIG_RTNETLINK
3442 protocol == NETLINK_ROUTE ||
3443 #endif
3444 protocol == NETLINK_KOBJECT_UEVENT ||
3445 protocol == NETLINK_AUDIT)) {
3446 return -EPFNOSUPPORT;
3449 if (domain == AF_PACKET ||
3450 (domain == AF_INET && type == SOCK_PACKET)) {
3451 protocol = tswap16(protocol);
3454 ret = get_errno(socket(domain, type, protocol));
3455 if (ret >= 0) {
3456 ret = sock_flags_fixup(ret, target_type);
3457 if (type == SOCK_PACKET) {
3458 /* Manage an obsolete case :
3459 * if socket type is SOCK_PACKET, bind by name
3461 fd_trans_register(ret, &target_packet_trans);
3462 } else if (domain == PF_NETLINK) {
3463 switch (protocol) {
3464 #ifdef CONFIG_RTNETLINK
3465 case NETLINK_ROUTE:
3466 fd_trans_register(ret, &target_netlink_route_trans);
3467 break;
3468 #endif
3469 case NETLINK_KOBJECT_UEVENT:
3470 /* nothing to do: messages are strings */
3471 break;
3472 case NETLINK_AUDIT:
3473 fd_trans_register(ret, &target_netlink_audit_trans);
3474 break;
3475 default:
3476 g_assert_not_reached();
3480 return ret;
3483 /* do_bind() Must return target values and target errnos. */
3484 static abi_long do_bind(int sockfd, abi_ulong target_addr,
3485 socklen_t addrlen)
3487 void *addr;
3488 abi_long ret;
3490 if ((int)addrlen < 0) {
3491 return -TARGET_EINVAL;
3494 addr = alloca(addrlen+1);
3496 ret = target_to_host_sockaddr(sockfd, addr, target_addr, addrlen);
3497 if (ret)
3498 return ret;
3500 return get_errno(bind(sockfd, addr, addrlen));
3503 /* do_connect() Must return target values and target errnos. */
3504 static abi_long do_connect(int sockfd, abi_ulong target_addr,
3505 socklen_t addrlen)
3507 void *addr;
3508 abi_long ret;
3510 if ((int)addrlen < 0) {
3511 return -TARGET_EINVAL;
3514 addr = alloca(addrlen+1);
3516 ret = target_to_host_sockaddr(sockfd, addr, target_addr, addrlen);
3517 if (ret)
3518 return ret;
3520 return get_errno(safe_connect(sockfd, addr, addrlen));
3523 /* do_sendrecvmsg_locked() Must return target values and target errnos. */
3524 static abi_long do_sendrecvmsg_locked(int fd, struct target_msghdr *msgp,
3525 int flags, int send)
3527 abi_long ret, len;
3528 struct msghdr msg;
3529 abi_ulong count;
3530 struct iovec *vec;
3531 abi_ulong target_vec;
3533 if (msgp->msg_name) {
3534 msg.msg_namelen = tswap32(msgp->msg_namelen);
3535 msg.msg_name = alloca(msg.msg_namelen+1);
3536 ret = target_to_host_sockaddr(fd, msg.msg_name,
3537 tswapal(msgp->msg_name),
3538 msg.msg_namelen);
3539 if (ret == -TARGET_EFAULT) {
3540 /* For connected sockets msg_name and msg_namelen must
3541 * be ignored, so returning EFAULT immediately is wrong.
3542 * Instead, pass a bad msg_name to the host kernel, and
3543 * let it decide whether to return EFAULT or not.
3545 msg.msg_name = (void *)-1;
3546 } else if (ret) {
3547 goto out2;
3549 } else {
3550 msg.msg_name = NULL;
3551 msg.msg_namelen = 0;
3553 msg.msg_controllen = 2 * tswapal(msgp->msg_controllen);
3554 msg.msg_control = alloca(msg.msg_controllen);
3555 msg.msg_flags = tswap32(msgp->msg_flags);
3557 count = tswapal(msgp->msg_iovlen);
3558 target_vec = tswapal(msgp->msg_iov);
3560 if (count > IOV_MAX) {
3561 /* sendrcvmsg returns a different errno for this condition than
3562 * readv/writev, so we must catch it here before lock_iovec() does.
3564 ret = -TARGET_EMSGSIZE;
3565 goto out2;
3568 vec = lock_iovec(send ? VERIFY_READ : VERIFY_WRITE,
3569 target_vec, count, send);
3570 if (vec == NULL) {
3571 ret = -host_to_target_errno(errno);
3572 goto out2;
3574 msg.msg_iovlen = count;
3575 msg.msg_iov = vec;
3577 if (send) {
3578 if (fd_trans_target_to_host_data(fd)) {
3579 void *host_msg;
3581 host_msg = g_malloc(msg.msg_iov->iov_len);
3582 memcpy(host_msg, msg.msg_iov->iov_base, msg.msg_iov->iov_len);
3583 ret = fd_trans_target_to_host_data(fd)(host_msg,
3584 msg.msg_iov->iov_len);
3585 if (ret >= 0) {
3586 msg.msg_iov->iov_base = host_msg;
3587 ret = get_errno(safe_sendmsg(fd, &msg, flags));
3589 g_free(host_msg);
3590 } else {
3591 ret = target_to_host_cmsg(&msg, msgp);
3592 if (ret == 0) {
3593 ret = get_errno(safe_sendmsg(fd, &msg, flags));
3596 } else {
3597 ret = get_errno(safe_recvmsg(fd, &msg, flags));
3598 if (!is_error(ret)) {
3599 len = ret;
3600 if (fd_trans_host_to_target_data(fd)) {
3601 ret = fd_trans_host_to_target_data(fd)(msg.msg_iov->iov_base,
3602 len);
3603 } else {
3604 ret = host_to_target_cmsg(msgp, &msg);
3606 if (!is_error(ret)) {
3607 msgp->msg_namelen = tswap32(msg.msg_namelen);
3608 if (msg.msg_name != NULL && msg.msg_name != (void *)-1) {
3609 ret = host_to_target_sockaddr(tswapal(msgp->msg_name),
3610 msg.msg_name, msg.msg_namelen);
3611 if (ret) {
3612 goto out;
3616 ret = len;
3621 out:
3622 unlock_iovec(vec, target_vec, count, !send);
3623 out2:
3624 return ret;
3627 static abi_long do_sendrecvmsg(int fd, abi_ulong target_msg,
3628 int flags, int send)
3630 abi_long ret;
3631 struct target_msghdr *msgp;
3633 if (!lock_user_struct(send ? VERIFY_READ : VERIFY_WRITE,
3634 msgp,
3635 target_msg,
3636 send ? 1 : 0)) {
3637 return -TARGET_EFAULT;
3639 ret = do_sendrecvmsg_locked(fd, msgp, flags, send);
3640 unlock_user_struct(msgp, target_msg, send ? 0 : 1);
3641 return ret;
3644 /* We don't rely on the C library to have sendmmsg/recvmmsg support,
3645 * so it might not have this *mmsg-specific flag either.
3647 #ifndef MSG_WAITFORONE
3648 #define MSG_WAITFORONE 0x10000
3649 #endif
3651 static abi_long do_sendrecvmmsg(int fd, abi_ulong target_msgvec,
3652 unsigned int vlen, unsigned int flags,
3653 int send)
3655 struct target_mmsghdr *mmsgp;
3656 abi_long ret = 0;
3657 int i;
3659 if (vlen > UIO_MAXIOV) {
3660 vlen = UIO_MAXIOV;
3663 mmsgp = lock_user(VERIFY_WRITE, target_msgvec, sizeof(*mmsgp) * vlen, 1);
3664 if (!mmsgp) {
3665 return -TARGET_EFAULT;
3668 for (i = 0; i < vlen; i++) {
3669 ret = do_sendrecvmsg_locked(fd, &mmsgp[i].msg_hdr, flags, send);
3670 if (is_error(ret)) {
3671 break;
3673 mmsgp[i].msg_len = tswap32(ret);
3674 /* MSG_WAITFORONE turns on MSG_DONTWAIT after one packet */
3675 if (flags & MSG_WAITFORONE) {
3676 flags |= MSG_DONTWAIT;
3680 unlock_user(mmsgp, target_msgvec, sizeof(*mmsgp) * i);
3682 /* Return number of datagrams sent if we sent any at all;
3683 * otherwise return the error.
3685 if (i) {
3686 return i;
3688 return ret;
3691 /* do_accept4() Must return target values and target errnos. */
3692 static abi_long do_accept4(int fd, abi_ulong target_addr,
3693 abi_ulong target_addrlen_addr, int flags)
3695 socklen_t addrlen;
3696 void *addr;
3697 abi_long ret;
3698 int host_flags;
3700 host_flags = target_to_host_bitmask(flags, fcntl_flags_tbl);
3702 if (target_addr == 0) {
3703 return get_errno(safe_accept4(fd, NULL, NULL, host_flags));
3706 /* linux returns EINVAL if addrlen pointer is invalid */
3707 if (get_user_u32(addrlen, target_addrlen_addr))
3708 return -TARGET_EINVAL;
3710 if ((int)addrlen < 0) {
3711 return -TARGET_EINVAL;
3714 if (!access_ok(VERIFY_WRITE, target_addr, addrlen))
3715 return -TARGET_EINVAL;
3717 addr = alloca(addrlen);
3719 ret = get_errno(safe_accept4(fd, addr, &addrlen, host_flags));
3720 if (!is_error(ret)) {
3721 host_to_target_sockaddr(target_addr, addr, addrlen);
3722 if (put_user_u32(addrlen, target_addrlen_addr))
3723 ret = -TARGET_EFAULT;
3725 return ret;
3728 /* do_getpeername() Must return target values and target errnos. */
3729 static abi_long do_getpeername(int fd, abi_ulong target_addr,
3730 abi_ulong target_addrlen_addr)
3732 socklen_t addrlen;
3733 void *addr;
3734 abi_long ret;
3736 if (get_user_u32(addrlen, target_addrlen_addr))
3737 return -TARGET_EFAULT;
3739 if ((int)addrlen < 0) {
3740 return -TARGET_EINVAL;
3743 if (!access_ok(VERIFY_WRITE, target_addr, addrlen))
3744 return -TARGET_EFAULT;
3746 addr = alloca(addrlen);
3748 ret = get_errno(getpeername(fd, addr, &addrlen));
3749 if (!is_error(ret)) {
3750 host_to_target_sockaddr(target_addr, addr, addrlen);
3751 if (put_user_u32(addrlen, target_addrlen_addr))
3752 ret = -TARGET_EFAULT;
3754 return ret;
3757 /* do_getsockname() Must return target values and target errnos. */
3758 static abi_long do_getsockname(int fd, abi_ulong target_addr,
3759 abi_ulong target_addrlen_addr)
3761 socklen_t addrlen;
3762 void *addr;
3763 abi_long ret;
3765 if (get_user_u32(addrlen, target_addrlen_addr))
3766 return -TARGET_EFAULT;
3768 if ((int)addrlen < 0) {
3769 return -TARGET_EINVAL;
3772 if (!access_ok(VERIFY_WRITE, target_addr, addrlen))
3773 return -TARGET_EFAULT;
3775 addr = alloca(addrlen);
3777 ret = get_errno(getsockname(fd, addr, &addrlen));
3778 if (!is_error(ret)) {
3779 host_to_target_sockaddr(target_addr, addr, addrlen);
3780 if (put_user_u32(addrlen, target_addrlen_addr))
3781 ret = -TARGET_EFAULT;
3783 return ret;
3786 /* do_socketpair() Must return target values and target errnos. */
3787 static abi_long do_socketpair(int domain, int type, int protocol,
3788 abi_ulong target_tab_addr)
3790 int tab[2];
3791 abi_long ret;
3793 target_to_host_sock_type(&type);
3795 ret = get_errno(socketpair(domain, type, protocol, tab));
3796 if (!is_error(ret)) {
3797 if (put_user_s32(tab[0], target_tab_addr)
3798 || put_user_s32(tab[1], target_tab_addr + sizeof(tab[0])))
3799 ret = -TARGET_EFAULT;
3801 return ret;
3804 /* do_sendto() Must return target values and target errnos. */
3805 static abi_long do_sendto(int fd, abi_ulong msg, size_t len, int flags,
3806 abi_ulong target_addr, socklen_t addrlen)
3808 void *addr;
3809 void *host_msg;
3810 void *copy_msg = NULL;
3811 abi_long ret;
3813 if ((int)addrlen < 0) {
3814 return -TARGET_EINVAL;
3817 host_msg = lock_user(VERIFY_READ, msg, len, 1);
3818 if (!host_msg)
3819 return -TARGET_EFAULT;
3820 if (fd_trans_target_to_host_data(fd)) {
3821 copy_msg = host_msg;
3822 host_msg = g_malloc(len);
3823 memcpy(host_msg, copy_msg, len);
3824 ret = fd_trans_target_to_host_data(fd)(host_msg, len);
3825 if (ret < 0) {
3826 goto fail;
3829 if (target_addr) {
3830 addr = alloca(addrlen+1);
3831 ret = target_to_host_sockaddr(fd, addr, target_addr, addrlen);
3832 if (ret) {
3833 goto fail;
3835 ret = get_errno(safe_sendto(fd, host_msg, len, flags, addr, addrlen));
3836 } else {
3837 ret = get_errno(safe_sendto(fd, host_msg, len, flags, NULL, 0));
3839 fail:
3840 if (copy_msg) {
3841 g_free(host_msg);
3842 host_msg = copy_msg;
3844 unlock_user(host_msg, msg, 0);
3845 return ret;
3848 /* do_recvfrom() Must return target values and target errnos. */
3849 static abi_long do_recvfrom(int fd, abi_ulong msg, size_t len, int flags,
3850 abi_ulong target_addr,
3851 abi_ulong target_addrlen)
3853 socklen_t addrlen;
3854 void *addr;
3855 void *host_msg;
3856 abi_long ret;
3858 host_msg = lock_user(VERIFY_WRITE, msg, len, 0);
3859 if (!host_msg)
3860 return -TARGET_EFAULT;
3861 if (target_addr) {
3862 if (get_user_u32(addrlen, target_addrlen)) {
3863 ret = -TARGET_EFAULT;
3864 goto fail;
3866 if ((int)addrlen < 0) {
3867 ret = -TARGET_EINVAL;
3868 goto fail;
3870 addr = alloca(addrlen);
3871 ret = get_errno(safe_recvfrom(fd, host_msg, len, flags,
3872 addr, &addrlen));
3873 } else {
3874 addr = NULL; /* To keep compiler quiet. */
3875 ret = get_errno(safe_recvfrom(fd, host_msg, len, flags, NULL, 0));
3877 if (!is_error(ret)) {
3878 if (fd_trans_host_to_target_data(fd)) {
3879 ret = fd_trans_host_to_target_data(fd)(host_msg, ret);
3881 if (target_addr) {
3882 host_to_target_sockaddr(target_addr, addr, addrlen);
3883 if (put_user_u32(addrlen, target_addrlen)) {
3884 ret = -TARGET_EFAULT;
3885 goto fail;
3888 unlock_user(host_msg, msg, len);
3889 } else {
3890 fail:
3891 unlock_user(host_msg, msg, 0);
3893 return ret;
3896 #ifdef TARGET_NR_socketcall
3897 /* do_socketcall() Must return target values and target errnos. */
3898 static abi_long do_socketcall(int num, abi_ulong vptr)
3900 static const unsigned ac[] = { /* number of arguments per call */
3901 [SOCKOP_socket] = 3, /* domain, type, protocol */
3902 [SOCKOP_bind] = 3, /* sockfd, addr, addrlen */
3903 [SOCKOP_connect] = 3, /* sockfd, addr, addrlen */
3904 [SOCKOP_listen] = 2, /* sockfd, backlog */
3905 [SOCKOP_accept] = 3, /* sockfd, addr, addrlen */
3906 [SOCKOP_accept4] = 4, /* sockfd, addr, addrlen, flags */
3907 [SOCKOP_getsockname] = 3, /* sockfd, addr, addrlen */
3908 [SOCKOP_getpeername] = 3, /* sockfd, addr, addrlen */
3909 [SOCKOP_socketpair] = 4, /* domain, type, protocol, tab */
3910 [SOCKOP_send] = 4, /* sockfd, msg, len, flags */
3911 [SOCKOP_recv] = 4, /* sockfd, msg, len, flags */
3912 [SOCKOP_sendto] = 6, /* sockfd, msg, len, flags, addr, addrlen */
3913 [SOCKOP_recvfrom] = 6, /* sockfd, msg, len, flags, addr, addrlen */
3914 [SOCKOP_shutdown] = 2, /* sockfd, how */
3915 [SOCKOP_sendmsg] = 3, /* sockfd, msg, flags */
3916 [SOCKOP_recvmsg] = 3, /* sockfd, msg, flags */
3917 [SOCKOP_sendmmsg] = 4, /* sockfd, msgvec, vlen, flags */
3918 [SOCKOP_recvmmsg] = 4, /* sockfd, msgvec, vlen, flags */
3919 [SOCKOP_setsockopt] = 5, /* sockfd, level, optname, optval, optlen */
3920 [SOCKOP_getsockopt] = 5, /* sockfd, level, optname, optval, optlen */
3922 abi_long a[6]; /* max 6 args */
3924 /* first, collect the arguments in a[] according to ac[] */
3925 if (num >= 0 && num < ARRAY_SIZE(ac)) {
3926 unsigned i;
3927 assert(ARRAY_SIZE(a) >= ac[num]); /* ensure we have space for args */
3928 for (i = 0; i < ac[num]; ++i) {
3929 if (get_user_ual(a[i], vptr + i * sizeof(abi_long)) != 0) {
3930 return -TARGET_EFAULT;
3935 /* now when we have the args, actually handle the call */
3936 switch (num) {
3937 case SOCKOP_socket: /* domain, type, protocol */
3938 return do_socket(a[0], a[1], a[2]);
3939 case SOCKOP_bind: /* sockfd, addr, addrlen */
3940 return do_bind(a[0], a[1], a[2]);
3941 case SOCKOP_connect: /* sockfd, addr, addrlen */
3942 return do_connect(a[0], a[1], a[2]);
3943 case SOCKOP_listen: /* sockfd, backlog */
3944 return get_errno(listen(a[0], a[1]));
3945 case SOCKOP_accept: /* sockfd, addr, addrlen */
3946 return do_accept4(a[0], a[1], a[2], 0);
3947 case SOCKOP_accept4: /* sockfd, addr, addrlen, flags */
3948 return do_accept4(a[0], a[1], a[2], a[3]);
3949 case SOCKOP_getsockname: /* sockfd, addr, addrlen */
3950 return do_getsockname(a[0], a[1], a[2]);
3951 case SOCKOP_getpeername: /* sockfd, addr, addrlen */
3952 return do_getpeername(a[0], a[1], a[2]);
3953 case SOCKOP_socketpair: /* domain, type, protocol, tab */
3954 return do_socketpair(a[0], a[1], a[2], a[3]);
3955 case SOCKOP_send: /* sockfd, msg, len, flags */
3956 return do_sendto(a[0], a[1], a[2], a[3], 0, 0);
3957 case SOCKOP_recv: /* sockfd, msg, len, flags */
3958 return do_recvfrom(a[0], a[1], a[2], a[3], 0, 0);
3959 case SOCKOP_sendto: /* sockfd, msg, len, flags, addr, addrlen */
3960 return do_sendto(a[0], a[1], a[2], a[3], a[4], a[5]);
3961 case SOCKOP_recvfrom: /* sockfd, msg, len, flags, addr, addrlen */
3962 return do_recvfrom(a[0], a[1], a[2], a[3], a[4], a[5]);
3963 case SOCKOP_shutdown: /* sockfd, how */
3964 return get_errno(shutdown(a[0], a[1]));
3965 case SOCKOP_sendmsg: /* sockfd, msg, flags */
3966 return do_sendrecvmsg(a[0], a[1], a[2], 1);
3967 case SOCKOP_recvmsg: /* sockfd, msg, flags */
3968 return do_sendrecvmsg(a[0], a[1], a[2], 0);
3969 case SOCKOP_sendmmsg: /* sockfd, msgvec, vlen, flags */
3970 return do_sendrecvmmsg(a[0], a[1], a[2], a[3], 1);
3971 case SOCKOP_recvmmsg: /* sockfd, msgvec, vlen, flags */
3972 return do_sendrecvmmsg(a[0], a[1], a[2], a[3], 0);
3973 case SOCKOP_setsockopt: /* sockfd, level, optname, optval, optlen */
3974 return do_setsockopt(a[0], a[1], a[2], a[3], a[4]);
3975 case SOCKOP_getsockopt: /* sockfd, level, optname, optval, optlen */
3976 return do_getsockopt(a[0], a[1], a[2], a[3], a[4]);
3977 default:
3978 gemu_log("Unsupported socketcall: %d\n", num);
3979 return -TARGET_ENOSYS;
3982 #endif
3984 #define N_SHM_REGIONS 32
3986 static struct shm_region {
3987 abi_ulong start;
3988 abi_ulong size;
3989 bool in_use;
3990 } shm_regions[N_SHM_REGIONS];
3992 #ifndef TARGET_SEMID64_DS
3993 /* asm-generic version of this struct */
3994 struct target_semid64_ds
3996 struct target_ipc_perm sem_perm;
3997 abi_ulong sem_otime;
3998 #if TARGET_ABI_BITS == 32
3999 abi_ulong __unused1;
4000 #endif
4001 abi_ulong sem_ctime;
4002 #if TARGET_ABI_BITS == 32
4003 abi_ulong __unused2;
4004 #endif
4005 abi_ulong sem_nsems;
4006 abi_ulong __unused3;
4007 abi_ulong __unused4;
4009 #endif
4011 static inline abi_long target_to_host_ipc_perm(struct ipc_perm *host_ip,
4012 abi_ulong target_addr)
4014 struct target_ipc_perm *target_ip;
4015 struct target_semid64_ds *target_sd;
4017 if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
4018 return -TARGET_EFAULT;
4019 target_ip = &(target_sd->sem_perm);
4020 host_ip->__key = tswap32(target_ip->__key);
4021 host_ip->uid = tswap32(target_ip->uid);
4022 host_ip->gid = tswap32(target_ip->gid);
4023 host_ip->cuid = tswap32(target_ip->cuid);
4024 host_ip->cgid = tswap32(target_ip->cgid);
4025 #if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_PPC)
4026 host_ip->mode = tswap32(target_ip->mode);
4027 #else
4028 host_ip->mode = tswap16(target_ip->mode);
4029 #endif
4030 #if defined(TARGET_PPC)
4031 host_ip->__seq = tswap32(target_ip->__seq);
4032 #else
4033 host_ip->__seq = tswap16(target_ip->__seq);
4034 #endif
4035 unlock_user_struct(target_sd, target_addr, 0);
4036 return 0;
4039 static inline abi_long host_to_target_ipc_perm(abi_ulong target_addr,
4040 struct ipc_perm *host_ip)
4042 struct target_ipc_perm *target_ip;
4043 struct target_semid64_ds *target_sd;
4045 if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
4046 return -TARGET_EFAULT;
4047 target_ip = &(target_sd->sem_perm);
4048 target_ip->__key = tswap32(host_ip->__key);
4049 target_ip->uid = tswap32(host_ip->uid);
4050 target_ip->gid = tswap32(host_ip->gid);
4051 target_ip->cuid = tswap32(host_ip->cuid);
4052 target_ip->cgid = tswap32(host_ip->cgid);
4053 #if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_PPC)
4054 target_ip->mode = tswap32(host_ip->mode);
4055 #else
4056 target_ip->mode = tswap16(host_ip->mode);
4057 #endif
4058 #if defined(TARGET_PPC)
4059 target_ip->__seq = tswap32(host_ip->__seq);
4060 #else
4061 target_ip->__seq = tswap16(host_ip->__seq);
4062 #endif
4063 unlock_user_struct(target_sd, target_addr, 1);
4064 return 0;
4067 static inline abi_long target_to_host_semid_ds(struct semid_ds *host_sd,
4068 abi_ulong target_addr)
4070 struct target_semid64_ds *target_sd;
4072 if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
4073 return -TARGET_EFAULT;
4074 if (target_to_host_ipc_perm(&(host_sd->sem_perm),target_addr))
4075 return -TARGET_EFAULT;
4076 host_sd->sem_nsems = tswapal(target_sd->sem_nsems);
4077 host_sd->sem_otime = tswapal(target_sd->sem_otime);
4078 host_sd->sem_ctime = tswapal(target_sd->sem_ctime);
4079 unlock_user_struct(target_sd, target_addr, 0);
4080 return 0;
4083 static inline abi_long host_to_target_semid_ds(abi_ulong target_addr,
4084 struct semid_ds *host_sd)
4086 struct target_semid64_ds *target_sd;
4088 if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
4089 return -TARGET_EFAULT;
4090 if (host_to_target_ipc_perm(target_addr,&(host_sd->sem_perm)))
4091 return -TARGET_EFAULT;
4092 target_sd->sem_nsems = tswapal(host_sd->sem_nsems);
4093 target_sd->sem_otime = tswapal(host_sd->sem_otime);
4094 target_sd->sem_ctime = tswapal(host_sd->sem_ctime);
4095 unlock_user_struct(target_sd, target_addr, 1);
4096 return 0;
4099 struct target_seminfo {
4100 int semmap;
4101 int semmni;
4102 int semmns;
4103 int semmnu;
4104 int semmsl;
4105 int semopm;
4106 int semume;
4107 int semusz;
4108 int semvmx;
4109 int semaem;
4112 static inline abi_long host_to_target_seminfo(abi_ulong target_addr,
4113 struct seminfo *host_seminfo)
4115 struct target_seminfo *target_seminfo;
4116 if (!lock_user_struct(VERIFY_WRITE, target_seminfo, target_addr, 0))
4117 return -TARGET_EFAULT;
4118 __put_user(host_seminfo->semmap, &target_seminfo->semmap);
4119 __put_user(host_seminfo->semmni, &target_seminfo->semmni);
4120 __put_user(host_seminfo->semmns, &target_seminfo->semmns);
4121 __put_user(host_seminfo->semmnu, &target_seminfo->semmnu);
4122 __put_user(host_seminfo->semmsl, &target_seminfo->semmsl);
4123 __put_user(host_seminfo->semopm, &target_seminfo->semopm);
4124 __put_user(host_seminfo->semume, &target_seminfo->semume);
4125 __put_user(host_seminfo->semusz, &target_seminfo->semusz);
4126 __put_user(host_seminfo->semvmx, &target_seminfo->semvmx);
4127 __put_user(host_seminfo->semaem, &target_seminfo->semaem);
4128 unlock_user_struct(target_seminfo, target_addr, 1);
4129 return 0;
4132 union semun {
4133 int val;
4134 struct semid_ds *buf;
4135 unsigned short *array;
4136 struct seminfo *__buf;
4139 union target_semun {
4140 int val;
4141 abi_ulong buf;
4142 abi_ulong array;
4143 abi_ulong __buf;
4146 static inline abi_long target_to_host_semarray(int semid, unsigned short **host_array,
4147 abi_ulong target_addr)
4149 int nsems;
4150 unsigned short *array;
4151 union semun semun;
4152 struct semid_ds semid_ds;
4153 int i, ret;
4155 semun.buf = &semid_ds;
4157 ret = semctl(semid, 0, IPC_STAT, semun);
4158 if (ret == -1)
4159 return get_errno(ret);
4161 nsems = semid_ds.sem_nsems;
4163 *host_array = g_try_new(unsigned short, nsems);
4164 if (!*host_array) {
4165 return -TARGET_ENOMEM;
4167 array = lock_user(VERIFY_READ, target_addr,
4168 nsems*sizeof(unsigned short), 1);
4169 if (!array) {
4170 g_free(*host_array);
4171 return -TARGET_EFAULT;
4174 for(i=0; i<nsems; i++) {
4175 __get_user((*host_array)[i], &array[i]);
4177 unlock_user(array, target_addr, 0);
4179 return 0;
4182 static inline abi_long host_to_target_semarray(int semid, abi_ulong target_addr,
4183 unsigned short **host_array)
4185 int nsems;
4186 unsigned short *array;
4187 union semun semun;
4188 struct semid_ds semid_ds;
4189 int i, ret;
4191 semun.buf = &semid_ds;
4193 ret = semctl(semid, 0, IPC_STAT, semun);
4194 if (ret == -1)
4195 return get_errno(ret);
4197 nsems = semid_ds.sem_nsems;
4199 array = lock_user(VERIFY_WRITE, target_addr,
4200 nsems*sizeof(unsigned short), 0);
4201 if (!array)
4202 return -TARGET_EFAULT;
4204 for(i=0; i<nsems; i++) {
4205 __put_user((*host_array)[i], &array[i]);
4207 g_free(*host_array);
4208 unlock_user(array, target_addr, 1);
4210 return 0;
4213 static inline abi_long do_semctl(int semid, int semnum, int cmd,
4214 abi_ulong target_arg)
4216 union target_semun target_su = { .buf = target_arg };
4217 union semun arg;
4218 struct semid_ds dsarg;
4219 unsigned short *array = NULL;
4220 struct seminfo seminfo;
4221 abi_long ret = -TARGET_EINVAL;
4222 abi_long err;
4223 cmd &= 0xff;
4225 switch( cmd ) {
4226 case GETVAL:
4227 case SETVAL:
4228 /* In 64 bit cross-endian situations, we will erroneously pick up
4229 * the wrong half of the union for the "val" element. To rectify
4230 * this, the entire 8-byte structure is byteswapped, followed by
4231 * a swap of the 4 byte val field. In other cases, the data is
4232 * already in proper host byte order. */
4233 if (sizeof(target_su.val) != (sizeof(target_su.buf))) {
4234 target_su.buf = tswapal(target_su.buf);
4235 arg.val = tswap32(target_su.val);
4236 } else {
4237 arg.val = target_su.val;
4239 ret = get_errno(semctl(semid, semnum, cmd, arg));
4240 break;
4241 case GETALL:
4242 case SETALL:
4243 err = target_to_host_semarray(semid, &array, target_su.array);
4244 if (err)
4245 return err;
4246 arg.array = array;
4247 ret = get_errno(semctl(semid, semnum, cmd, arg));
4248 err = host_to_target_semarray(semid, target_su.array, &array);
4249 if (err)
4250 return err;
4251 break;
4252 case IPC_STAT:
4253 case IPC_SET:
4254 case SEM_STAT:
4255 err = target_to_host_semid_ds(&dsarg, target_su.buf);
4256 if (err)
4257 return err;
4258 arg.buf = &dsarg;
4259 ret = get_errno(semctl(semid, semnum, cmd, arg));
4260 err = host_to_target_semid_ds(target_su.buf, &dsarg);
4261 if (err)
4262 return err;
4263 break;
4264 case IPC_INFO:
4265 case SEM_INFO:
4266 arg.__buf = &seminfo;
4267 ret = get_errno(semctl(semid, semnum, cmd, arg));
4268 err = host_to_target_seminfo(target_su.__buf, &seminfo);
4269 if (err)
4270 return err;
4271 break;
4272 case IPC_RMID:
4273 case GETPID:
4274 case GETNCNT:
4275 case GETZCNT:
4276 ret = get_errno(semctl(semid, semnum, cmd, NULL));
4277 break;
4280 return ret;
4283 struct target_sembuf {
4284 unsigned short sem_num;
4285 short sem_op;
4286 short sem_flg;
4289 static inline abi_long target_to_host_sembuf(struct sembuf *host_sembuf,
4290 abi_ulong target_addr,
4291 unsigned nsops)
4293 struct target_sembuf *target_sembuf;
4294 int i;
4296 target_sembuf = lock_user(VERIFY_READ, target_addr,
4297 nsops*sizeof(struct target_sembuf), 1);
4298 if (!target_sembuf)
4299 return -TARGET_EFAULT;
4301 for(i=0; i<nsops; i++) {
4302 __get_user(host_sembuf[i].sem_num, &target_sembuf[i].sem_num);
4303 __get_user(host_sembuf[i].sem_op, &target_sembuf[i].sem_op);
4304 __get_user(host_sembuf[i].sem_flg, &target_sembuf[i].sem_flg);
4307 unlock_user(target_sembuf, target_addr, 0);
4309 return 0;
4312 static inline abi_long do_semop(int semid, abi_long ptr, unsigned nsops)
4314 struct sembuf sops[nsops];
4316 if (target_to_host_sembuf(sops, ptr, nsops))
4317 return -TARGET_EFAULT;
4319 return get_errno(safe_semtimedop(semid, sops, nsops, NULL));
4322 struct target_msqid_ds
4324 struct target_ipc_perm msg_perm;
4325 abi_ulong msg_stime;
4326 #if TARGET_ABI_BITS == 32
4327 abi_ulong __unused1;
4328 #endif
4329 abi_ulong msg_rtime;
4330 #if TARGET_ABI_BITS == 32
4331 abi_ulong __unused2;
4332 #endif
4333 abi_ulong msg_ctime;
4334 #if TARGET_ABI_BITS == 32
4335 abi_ulong __unused3;
4336 #endif
4337 abi_ulong __msg_cbytes;
4338 abi_ulong msg_qnum;
4339 abi_ulong msg_qbytes;
4340 abi_ulong msg_lspid;
4341 abi_ulong msg_lrpid;
4342 abi_ulong __unused4;
4343 abi_ulong __unused5;
4346 static inline abi_long target_to_host_msqid_ds(struct msqid_ds *host_md,
4347 abi_ulong target_addr)
4349 struct target_msqid_ds *target_md;
4351 if (!lock_user_struct(VERIFY_READ, target_md, target_addr, 1))
4352 return -TARGET_EFAULT;
4353 if (target_to_host_ipc_perm(&(host_md->msg_perm),target_addr))
4354 return -TARGET_EFAULT;
4355 host_md->msg_stime = tswapal(target_md->msg_stime);
4356 host_md->msg_rtime = tswapal(target_md->msg_rtime);
4357 host_md->msg_ctime = tswapal(target_md->msg_ctime);
4358 host_md->__msg_cbytes = tswapal(target_md->__msg_cbytes);
4359 host_md->msg_qnum = tswapal(target_md->msg_qnum);
4360 host_md->msg_qbytes = tswapal(target_md->msg_qbytes);
4361 host_md->msg_lspid = tswapal(target_md->msg_lspid);
4362 host_md->msg_lrpid = tswapal(target_md->msg_lrpid);
4363 unlock_user_struct(target_md, target_addr, 0);
4364 return 0;
4367 static inline abi_long host_to_target_msqid_ds(abi_ulong target_addr,
4368 struct msqid_ds *host_md)
4370 struct target_msqid_ds *target_md;
4372 if (!lock_user_struct(VERIFY_WRITE, target_md, target_addr, 0))
4373 return -TARGET_EFAULT;
4374 if (host_to_target_ipc_perm(target_addr,&(host_md->msg_perm)))
4375 return -TARGET_EFAULT;
4376 target_md->msg_stime = tswapal(host_md->msg_stime);
4377 target_md->msg_rtime = tswapal(host_md->msg_rtime);
4378 target_md->msg_ctime = tswapal(host_md->msg_ctime);
4379 target_md->__msg_cbytes = tswapal(host_md->__msg_cbytes);
4380 target_md->msg_qnum = tswapal(host_md->msg_qnum);
4381 target_md->msg_qbytes = tswapal(host_md->msg_qbytes);
4382 target_md->msg_lspid = tswapal(host_md->msg_lspid);
4383 target_md->msg_lrpid = tswapal(host_md->msg_lrpid);
4384 unlock_user_struct(target_md, target_addr, 1);
4385 return 0;
4388 struct target_msginfo {
4389 int msgpool;
4390 int msgmap;
4391 int msgmax;
4392 int msgmnb;
4393 int msgmni;
4394 int msgssz;
4395 int msgtql;
4396 unsigned short int msgseg;
4399 static inline abi_long host_to_target_msginfo(abi_ulong target_addr,
4400 struct msginfo *host_msginfo)
4402 struct target_msginfo *target_msginfo;
4403 if (!lock_user_struct(VERIFY_WRITE, target_msginfo, target_addr, 0))
4404 return -TARGET_EFAULT;
4405 __put_user(host_msginfo->msgpool, &target_msginfo->msgpool);
4406 __put_user(host_msginfo->msgmap, &target_msginfo->msgmap);
4407 __put_user(host_msginfo->msgmax, &target_msginfo->msgmax);
4408 __put_user(host_msginfo->msgmnb, &target_msginfo->msgmnb);
4409 __put_user(host_msginfo->msgmni, &target_msginfo->msgmni);
4410 __put_user(host_msginfo->msgssz, &target_msginfo->msgssz);
4411 __put_user(host_msginfo->msgtql, &target_msginfo->msgtql);
4412 __put_user(host_msginfo->msgseg, &target_msginfo->msgseg);
4413 unlock_user_struct(target_msginfo, target_addr, 1);
4414 return 0;
4417 static inline abi_long do_msgctl(int msgid, int cmd, abi_long ptr)
4419 struct msqid_ds dsarg;
4420 struct msginfo msginfo;
4421 abi_long ret = -TARGET_EINVAL;
4423 cmd &= 0xff;
4425 switch (cmd) {
4426 case IPC_STAT:
4427 case IPC_SET:
4428 case MSG_STAT:
4429 if (target_to_host_msqid_ds(&dsarg,ptr))
4430 return -TARGET_EFAULT;
4431 ret = get_errno(msgctl(msgid, cmd, &dsarg));
4432 if (host_to_target_msqid_ds(ptr,&dsarg))
4433 return -TARGET_EFAULT;
4434 break;
4435 case IPC_RMID:
4436 ret = get_errno(msgctl(msgid, cmd, NULL));
4437 break;
4438 case IPC_INFO:
4439 case MSG_INFO:
4440 ret = get_errno(msgctl(msgid, cmd, (struct msqid_ds *)&msginfo));
4441 if (host_to_target_msginfo(ptr, &msginfo))
4442 return -TARGET_EFAULT;
4443 break;
4446 return ret;
4449 struct target_msgbuf {
4450 abi_long mtype;
4451 char mtext[1];
4454 static inline abi_long do_msgsnd(int msqid, abi_long msgp,
4455 ssize_t msgsz, int msgflg)
4457 struct target_msgbuf *target_mb;
4458 struct msgbuf *host_mb;
4459 abi_long ret = 0;
4461 if (msgsz < 0) {
4462 return -TARGET_EINVAL;
4465 if (!lock_user_struct(VERIFY_READ, target_mb, msgp, 0))
4466 return -TARGET_EFAULT;
4467 host_mb = g_try_malloc(msgsz + sizeof(long));
4468 if (!host_mb) {
4469 unlock_user_struct(target_mb, msgp, 0);
4470 return -TARGET_ENOMEM;
4472 host_mb->mtype = (abi_long) tswapal(target_mb->mtype);
4473 memcpy(host_mb->mtext, target_mb->mtext, msgsz);
4474 ret = get_errno(safe_msgsnd(msqid, host_mb, msgsz, msgflg));
4475 g_free(host_mb);
4476 unlock_user_struct(target_mb, msgp, 0);
4478 return ret;
4481 static inline abi_long do_msgrcv(int msqid, abi_long msgp,
4482 ssize_t msgsz, abi_long msgtyp,
4483 int msgflg)
4485 struct target_msgbuf *target_mb;
4486 char *target_mtext;
4487 struct msgbuf *host_mb;
4488 abi_long ret = 0;
4490 if (msgsz < 0) {
4491 return -TARGET_EINVAL;
4494 if (!lock_user_struct(VERIFY_WRITE, target_mb, msgp, 0))
4495 return -TARGET_EFAULT;
4497 host_mb = g_try_malloc(msgsz + sizeof(long));
4498 if (!host_mb) {
4499 ret = -TARGET_ENOMEM;
4500 goto end;
4502 ret = get_errno(safe_msgrcv(msqid, host_mb, msgsz, msgtyp, msgflg));
4504 if (ret > 0) {
4505 abi_ulong target_mtext_addr = msgp + sizeof(abi_ulong);
4506 target_mtext = lock_user(VERIFY_WRITE, target_mtext_addr, ret, 0);
4507 if (!target_mtext) {
4508 ret = -TARGET_EFAULT;
4509 goto end;
4511 memcpy(target_mb->mtext, host_mb->mtext, ret);
4512 unlock_user(target_mtext, target_mtext_addr, ret);
4515 target_mb->mtype = tswapal(host_mb->mtype);
4517 end:
4518 if (target_mb)
4519 unlock_user_struct(target_mb, msgp, 1);
4520 g_free(host_mb);
4521 return ret;
4524 static inline abi_long target_to_host_shmid_ds(struct shmid_ds *host_sd,
4525 abi_ulong target_addr)
4527 struct target_shmid_ds *target_sd;
4529 if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
4530 return -TARGET_EFAULT;
4531 if (target_to_host_ipc_perm(&(host_sd->shm_perm), target_addr))
4532 return -TARGET_EFAULT;
4533 __get_user(host_sd->shm_segsz, &target_sd->shm_segsz);
4534 __get_user(host_sd->shm_atime, &target_sd->shm_atime);
4535 __get_user(host_sd->shm_dtime, &target_sd->shm_dtime);
4536 __get_user(host_sd->shm_ctime, &target_sd->shm_ctime);
4537 __get_user(host_sd->shm_cpid, &target_sd->shm_cpid);
4538 __get_user(host_sd->shm_lpid, &target_sd->shm_lpid);
4539 __get_user(host_sd->shm_nattch, &target_sd->shm_nattch);
4540 unlock_user_struct(target_sd, target_addr, 0);
4541 return 0;
4544 static inline abi_long host_to_target_shmid_ds(abi_ulong target_addr,
4545 struct shmid_ds *host_sd)
4547 struct target_shmid_ds *target_sd;
4549 if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
4550 return -TARGET_EFAULT;
4551 if (host_to_target_ipc_perm(target_addr, &(host_sd->shm_perm)))
4552 return -TARGET_EFAULT;
4553 __put_user(host_sd->shm_segsz, &target_sd->shm_segsz);
4554 __put_user(host_sd->shm_atime, &target_sd->shm_atime);
4555 __put_user(host_sd->shm_dtime, &target_sd->shm_dtime);
4556 __put_user(host_sd->shm_ctime, &target_sd->shm_ctime);
4557 __put_user(host_sd->shm_cpid, &target_sd->shm_cpid);
4558 __put_user(host_sd->shm_lpid, &target_sd->shm_lpid);
4559 __put_user(host_sd->shm_nattch, &target_sd->shm_nattch);
4560 unlock_user_struct(target_sd, target_addr, 1);
4561 return 0;
4564 struct target_shminfo {
4565 abi_ulong shmmax;
4566 abi_ulong shmmin;
4567 abi_ulong shmmni;
4568 abi_ulong shmseg;
4569 abi_ulong shmall;
4572 static inline abi_long host_to_target_shminfo(abi_ulong target_addr,
4573 struct shminfo *host_shminfo)
4575 struct target_shminfo *target_shminfo;
4576 if (!lock_user_struct(VERIFY_WRITE, target_shminfo, target_addr, 0))
4577 return -TARGET_EFAULT;
4578 __put_user(host_shminfo->shmmax, &target_shminfo->shmmax);
4579 __put_user(host_shminfo->shmmin, &target_shminfo->shmmin);
4580 __put_user(host_shminfo->shmmni, &target_shminfo->shmmni);
4581 __put_user(host_shminfo->shmseg, &target_shminfo->shmseg);
4582 __put_user(host_shminfo->shmall, &target_shminfo->shmall);
4583 unlock_user_struct(target_shminfo, target_addr, 1);
4584 return 0;
4587 struct target_shm_info {
4588 int used_ids;
4589 abi_ulong shm_tot;
4590 abi_ulong shm_rss;
4591 abi_ulong shm_swp;
4592 abi_ulong swap_attempts;
4593 abi_ulong swap_successes;
4596 static inline abi_long host_to_target_shm_info(abi_ulong target_addr,
4597 struct shm_info *host_shm_info)
4599 struct target_shm_info *target_shm_info;
4600 if (!lock_user_struct(VERIFY_WRITE, target_shm_info, target_addr, 0))
4601 return -TARGET_EFAULT;
4602 __put_user(host_shm_info->used_ids, &target_shm_info->used_ids);
4603 __put_user(host_shm_info->shm_tot, &target_shm_info->shm_tot);
4604 __put_user(host_shm_info->shm_rss, &target_shm_info->shm_rss);
4605 __put_user(host_shm_info->shm_swp, &target_shm_info->shm_swp);
4606 __put_user(host_shm_info->swap_attempts, &target_shm_info->swap_attempts);
4607 __put_user(host_shm_info->swap_successes, &target_shm_info->swap_successes);
4608 unlock_user_struct(target_shm_info, target_addr, 1);
4609 return 0;
4612 static inline abi_long do_shmctl(int shmid, int cmd, abi_long buf)
4614 struct shmid_ds dsarg;
4615 struct shminfo shminfo;
4616 struct shm_info shm_info;
4617 abi_long ret = -TARGET_EINVAL;
4619 cmd &= 0xff;
4621 switch(cmd) {
4622 case IPC_STAT:
4623 case IPC_SET:
4624 case SHM_STAT:
4625 if (target_to_host_shmid_ds(&dsarg, buf))
4626 return -TARGET_EFAULT;
4627 ret = get_errno(shmctl(shmid, cmd, &dsarg));
4628 if (host_to_target_shmid_ds(buf, &dsarg))
4629 return -TARGET_EFAULT;
4630 break;
4631 case IPC_INFO:
4632 ret = get_errno(shmctl(shmid, cmd, (struct shmid_ds *)&shminfo));
4633 if (host_to_target_shminfo(buf, &shminfo))
4634 return -TARGET_EFAULT;
4635 break;
4636 case SHM_INFO:
4637 ret = get_errno(shmctl(shmid, cmd, (struct shmid_ds *)&shm_info));
4638 if (host_to_target_shm_info(buf, &shm_info))
4639 return -TARGET_EFAULT;
4640 break;
4641 case IPC_RMID:
4642 case SHM_LOCK:
4643 case SHM_UNLOCK:
4644 ret = get_errno(shmctl(shmid, cmd, NULL));
4645 break;
4648 return ret;
4651 #ifndef TARGET_FORCE_SHMLBA
4652 /* For most architectures, SHMLBA is the same as the page size;
4653 * some architectures have larger values, in which case they should
4654 * define TARGET_FORCE_SHMLBA and provide a target_shmlba() function.
4655 * This corresponds to the kernel arch code defining __ARCH_FORCE_SHMLBA
4656 * and defining its own value for SHMLBA.
4658 * The kernel also permits SHMLBA to be set by the architecture to a
4659 * value larger than the page size without setting __ARCH_FORCE_SHMLBA;
4660 * this means that addresses are rounded to the large size if
4661 * SHM_RND is set but addresses not aligned to that size are not rejected
4662 * as long as they are at least page-aligned. Since the only architecture
4663 * which uses this is ia64 this code doesn't provide for that oddity.
4665 static inline abi_ulong target_shmlba(CPUArchState *cpu_env)
4667 return TARGET_PAGE_SIZE;
4669 #endif
4671 static inline abi_ulong do_shmat(CPUArchState *cpu_env,
4672 int shmid, abi_ulong shmaddr, int shmflg)
4674 abi_long raddr;
4675 void *host_raddr;
4676 struct shmid_ds shm_info;
4677 int i,ret;
4678 abi_ulong shmlba;
4680 /* find out the length of the shared memory segment */
4681 ret = get_errno(shmctl(shmid, IPC_STAT, &shm_info));
4682 if (is_error(ret)) {
4683 /* can't get length, bail out */
4684 return ret;
4687 shmlba = target_shmlba(cpu_env);
4689 if (shmaddr & (shmlba - 1)) {
4690 if (shmflg & SHM_RND) {
4691 shmaddr &= ~(shmlba - 1);
4692 } else {
4693 return -TARGET_EINVAL;
4697 mmap_lock();
4699 if (shmaddr)
4700 host_raddr = shmat(shmid, (void *)g2h(shmaddr), shmflg);
4701 else {
4702 abi_ulong mmap_start;
4704 mmap_start = mmap_find_vma(0, shm_info.shm_segsz);
4706 if (mmap_start == -1) {
4707 errno = ENOMEM;
4708 host_raddr = (void *)-1;
4709 } else
4710 host_raddr = shmat(shmid, g2h(mmap_start), shmflg | SHM_REMAP);
4713 if (host_raddr == (void *)-1) {
4714 mmap_unlock();
4715 return get_errno((long)host_raddr);
4717 raddr=h2g((unsigned long)host_raddr);
4719 page_set_flags(raddr, raddr + shm_info.shm_segsz,
4720 PAGE_VALID | PAGE_READ |
4721 ((shmflg & SHM_RDONLY)? 0 : PAGE_WRITE));
4723 for (i = 0; i < N_SHM_REGIONS; i++) {
4724 if (!shm_regions[i].in_use) {
4725 shm_regions[i].in_use = true;
4726 shm_regions[i].start = raddr;
4727 shm_regions[i].size = shm_info.shm_segsz;
4728 break;
4732 mmap_unlock();
4733 return raddr;
4737 static inline abi_long do_shmdt(abi_ulong shmaddr)
4739 int i;
4741 for (i = 0; i < N_SHM_REGIONS; ++i) {
4742 if (shm_regions[i].in_use && shm_regions[i].start == shmaddr) {
4743 shm_regions[i].in_use = false;
4744 page_set_flags(shmaddr, shmaddr + shm_regions[i].size, 0);
4745 break;
4749 return get_errno(shmdt(g2h(shmaddr)));
4752 #ifdef TARGET_NR_ipc
4753 /* ??? This only works with linear mappings. */
4754 /* do_ipc() must return target values and target errnos. */
4755 static abi_long do_ipc(CPUArchState *cpu_env,
4756 unsigned int call, abi_long first,
4757 abi_long second, abi_long third,
4758 abi_long ptr, abi_long fifth)
4760 int version;
4761 abi_long ret = 0;
4763 version = call >> 16;
4764 call &= 0xffff;
4766 switch (call) {
4767 case IPCOP_semop:
4768 ret = do_semop(first, ptr, second);
4769 break;
4771 case IPCOP_semget:
4772 ret = get_errno(semget(first, second, third));
4773 break;
4775 case IPCOP_semctl: {
4776 /* The semun argument to semctl is passed by value, so dereference the
4777 * ptr argument. */
4778 abi_ulong atptr;
4779 get_user_ual(atptr, ptr);
4780 ret = do_semctl(first, second, third, atptr);
4781 break;
4784 case IPCOP_msgget:
4785 ret = get_errno(msgget(first, second));
4786 break;
4788 case IPCOP_msgsnd:
4789 ret = do_msgsnd(first, ptr, second, third);
4790 break;
4792 case IPCOP_msgctl:
4793 ret = do_msgctl(first, second, ptr);
4794 break;
4796 case IPCOP_msgrcv:
4797 switch (version) {
4798 case 0:
4800 struct target_ipc_kludge {
4801 abi_long msgp;
4802 abi_long msgtyp;
4803 } *tmp;
4805 if (!lock_user_struct(VERIFY_READ, tmp, ptr, 1)) {
4806 ret = -TARGET_EFAULT;
4807 break;
4810 ret = do_msgrcv(first, tswapal(tmp->msgp), second, tswapal(tmp->msgtyp), third);
4812 unlock_user_struct(tmp, ptr, 0);
4813 break;
4815 default:
4816 ret = do_msgrcv(first, ptr, second, fifth, third);
4818 break;
4820 case IPCOP_shmat:
4821 switch (version) {
4822 default:
4824 abi_ulong raddr;
4825 raddr = do_shmat(cpu_env, first, ptr, second);
4826 if (is_error(raddr))
4827 return get_errno(raddr);
4828 if (put_user_ual(raddr, third))
4829 return -TARGET_EFAULT;
4830 break;
4832 case 1:
4833 ret = -TARGET_EINVAL;
4834 break;
4836 break;
4837 case IPCOP_shmdt:
4838 ret = do_shmdt(ptr);
4839 break;
4841 case IPCOP_shmget:
4842 /* IPC_* flag values are the same on all linux platforms */
4843 ret = get_errno(shmget(first, second, third));
4844 break;
4846 /* IPC_* and SHM_* command values are the same on all linux platforms */
4847 case IPCOP_shmctl:
4848 ret = do_shmctl(first, second, ptr);
4849 break;
4850 default:
4851 gemu_log("Unsupported ipc call: %d (version %d)\n", call, version);
4852 ret = -TARGET_ENOSYS;
4853 break;
4855 return ret;
4857 #endif
4859 /* kernel structure types definitions */
4861 #define STRUCT(name, ...) STRUCT_ ## name,
4862 #define STRUCT_SPECIAL(name) STRUCT_ ## name,
4863 enum {
4864 #include "syscall_types.h"
4865 STRUCT_MAX
4867 #undef STRUCT
4868 #undef STRUCT_SPECIAL
4870 #define STRUCT(name, ...) static const argtype struct_ ## name ## _def[] = { __VA_ARGS__, TYPE_NULL };
4871 #define STRUCT_SPECIAL(name)
4872 #include "syscall_types.h"
4873 #undef STRUCT
4874 #undef STRUCT_SPECIAL
4876 typedef struct IOCTLEntry IOCTLEntry;
4878 typedef abi_long do_ioctl_fn(const IOCTLEntry *ie, uint8_t *buf_temp,
4879 int fd, int cmd, abi_long arg);
4881 struct IOCTLEntry {
4882 int target_cmd;
4883 unsigned int host_cmd;
4884 const char *name;
4885 int access;
4886 do_ioctl_fn *do_ioctl;
4887 const argtype arg_type[5];
4890 #define IOC_R 0x0001
4891 #define IOC_W 0x0002
4892 #define IOC_RW (IOC_R | IOC_W)
4894 #define MAX_STRUCT_SIZE 4096
4896 #ifdef CONFIG_FIEMAP
4897 /* So fiemap access checks don't overflow on 32 bit systems.
4898 * This is very slightly smaller than the limit imposed by
4899 * the underlying kernel.
4901 #define FIEMAP_MAX_EXTENTS ((UINT_MAX - sizeof(struct fiemap)) \
4902 / sizeof(struct fiemap_extent))
4904 static abi_long do_ioctl_fs_ioc_fiemap(const IOCTLEntry *ie, uint8_t *buf_temp,
4905 int fd, int cmd, abi_long arg)
4907 /* The parameter for this ioctl is a struct fiemap followed
4908 * by an array of struct fiemap_extent whose size is set
4909 * in fiemap->fm_extent_count. The array is filled in by the
4910 * ioctl.
4912 int target_size_in, target_size_out;
4913 struct fiemap *fm;
4914 const argtype *arg_type = ie->arg_type;
4915 const argtype extent_arg_type[] = { MK_STRUCT(STRUCT_fiemap_extent) };
4916 void *argptr, *p;
4917 abi_long ret;
4918 int i, extent_size = thunk_type_size(extent_arg_type, 0);
4919 uint32_t outbufsz;
4920 int free_fm = 0;
4922 assert(arg_type[0] == TYPE_PTR);
4923 assert(ie->access == IOC_RW);
4924 arg_type++;
4925 target_size_in = thunk_type_size(arg_type, 0);
4926 argptr = lock_user(VERIFY_READ, arg, target_size_in, 1);
4927 if (!argptr) {
4928 return -TARGET_EFAULT;
4930 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
4931 unlock_user(argptr, arg, 0);
4932 fm = (struct fiemap *)buf_temp;
4933 if (fm->fm_extent_count > FIEMAP_MAX_EXTENTS) {
4934 return -TARGET_EINVAL;
4937 outbufsz = sizeof (*fm) +
4938 (sizeof(struct fiemap_extent) * fm->fm_extent_count);
4940 if (outbufsz > MAX_STRUCT_SIZE) {
4941 /* We can't fit all the extents into the fixed size buffer.
4942 * Allocate one that is large enough and use it instead.
4944 fm = g_try_malloc(outbufsz);
4945 if (!fm) {
4946 return -TARGET_ENOMEM;
4948 memcpy(fm, buf_temp, sizeof(struct fiemap));
4949 free_fm = 1;
4951 ret = get_errno(safe_ioctl(fd, ie->host_cmd, fm));
4952 if (!is_error(ret)) {
4953 target_size_out = target_size_in;
4954 /* An extent_count of 0 means we were only counting the extents
4955 * so there are no structs to copy
4957 if (fm->fm_extent_count != 0) {
4958 target_size_out += fm->fm_mapped_extents * extent_size;
4960 argptr = lock_user(VERIFY_WRITE, arg, target_size_out, 0);
4961 if (!argptr) {
4962 ret = -TARGET_EFAULT;
4963 } else {
4964 /* Convert the struct fiemap */
4965 thunk_convert(argptr, fm, arg_type, THUNK_TARGET);
4966 if (fm->fm_extent_count != 0) {
4967 p = argptr + target_size_in;
4968 /* ...and then all the struct fiemap_extents */
4969 for (i = 0; i < fm->fm_mapped_extents; i++) {
4970 thunk_convert(p, &fm->fm_extents[i], extent_arg_type,
4971 THUNK_TARGET);
4972 p += extent_size;
4975 unlock_user(argptr, arg, target_size_out);
4978 if (free_fm) {
4979 g_free(fm);
4981 return ret;
4983 #endif
4985 static abi_long do_ioctl_ifconf(const IOCTLEntry *ie, uint8_t *buf_temp,
4986 int fd, int cmd, abi_long arg)
4988 const argtype *arg_type = ie->arg_type;
4989 int target_size;
4990 void *argptr;
4991 int ret;
4992 struct ifconf *host_ifconf;
4993 uint32_t outbufsz;
4994 const argtype ifreq_arg_type[] = { MK_STRUCT(STRUCT_sockaddr_ifreq) };
4995 int target_ifreq_size;
4996 int nb_ifreq;
4997 int free_buf = 0;
4998 int i;
4999 int target_ifc_len;
5000 abi_long target_ifc_buf;
5001 int host_ifc_len;
5002 char *host_ifc_buf;
5004 assert(arg_type[0] == TYPE_PTR);
5005 assert(ie->access == IOC_RW);
5007 arg_type++;
5008 target_size = thunk_type_size(arg_type, 0);
5010 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5011 if (!argptr)
5012 return -TARGET_EFAULT;
5013 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
5014 unlock_user(argptr, arg, 0);
5016 host_ifconf = (struct ifconf *)(unsigned long)buf_temp;
5017 target_ifc_len = host_ifconf->ifc_len;
5018 target_ifc_buf = (abi_long)(unsigned long)host_ifconf->ifc_buf;
5020 target_ifreq_size = thunk_type_size(ifreq_arg_type, 0);
5021 nb_ifreq = target_ifc_len / target_ifreq_size;
5022 host_ifc_len = nb_ifreq * sizeof(struct ifreq);
5024 outbufsz = sizeof(*host_ifconf) + host_ifc_len;
5025 if (outbufsz > MAX_STRUCT_SIZE) {
5026 /* We can't fit all the extents into the fixed size buffer.
5027 * Allocate one that is large enough and use it instead.
5029 host_ifconf = malloc(outbufsz);
5030 if (!host_ifconf) {
5031 return -TARGET_ENOMEM;
5033 memcpy(host_ifconf, buf_temp, sizeof(*host_ifconf));
5034 free_buf = 1;
5036 host_ifc_buf = (char*)host_ifconf + sizeof(*host_ifconf);
5038 host_ifconf->ifc_len = host_ifc_len;
5039 host_ifconf->ifc_buf = host_ifc_buf;
5041 ret = get_errno(safe_ioctl(fd, ie->host_cmd, host_ifconf));
5042 if (!is_error(ret)) {
5043 /* convert host ifc_len to target ifc_len */
5045 nb_ifreq = host_ifconf->ifc_len / sizeof(struct ifreq);
5046 target_ifc_len = nb_ifreq * target_ifreq_size;
5047 host_ifconf->ifc_len = target_ifc_len;
5049 /* restore target ifc_buf */
5051 host_ifconf->ifc_buf = (char *)(unsigned long)target_ifc_buf;
5053 /* copy struct ifconf to target user */
5055 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
5056 if (!argptr)
5057 return -TARGET_EFAULT;
5058 thunk_convert(argptr, host_ifconf, arg_type, THUNK_TARGET);
5059 unlock_user(argptr, arg, target_size);
5061 /* copy ifreq[] to target user */
5063 argptr = lock_user(VERIFY_WRITE, target_ifc_buf, target_ifc_len, 0);
5064 for (i = 0; i < nb_ifreq ; i++) {
5065 thunk_convert(argptr + i * target_ifreq_size,
5066 host_ifc_buf + i * sizeof(struct ifreq),
5067 ifreq_arg_type, THUNK_TARGET);
5069 unlock_user(argptr, target_ifc_buf, target_ifc_len);
5072 if (free_buf) {
5073 free(host_ifconf);
5076 return ret;
5079 static abi_long do_ioctl_dm(const IOCTLEntry *ie, uint8_t *buf_temp, int fd,
5080 int cmd, abi_long arg)
5082 void *argptr;
5083 struct dm_ioctl *host_dm;
5084 abi_long guest_data;
5085 uint32_t guest_data_size;
5086 int target_size;
5087 const argtype *arg_type = ie->arg_type;
5088 abi_long ret;
5089 void *big_buf = NULL;
5090 char *host_data;
5092 arg_type++;
5093 target_size = thunk_type_size(arg_type, 0);
5094 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5095 if (!argptr) {
5096 ret = -TARGET_EFAULT;
5097 goto out;
5099 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
5100 unlock_user(argptr, arg, 0);
5102 /* buf_temp is too small, so fetch things into a bigger buffer */
5103 big_buf = g_malloc0(((struct dm_ioctl*)buf_temp)->data_size * 2);
5104 memcpy(big_buf, buf_temp, target_size);
5105 buf_temp = big_buf;
5106 host_dm = big_buf;
5108 guest_data = arg + host_dm->data_start;
5109 if ((guest_data - arg) < 0) {
5110 ret = -TARGET_EINVAL;
5111 goto out;
5113 guest_data_size = host_dm->data_size - host_dm->data_start;
5114 host_data = (char*)host_dm + host_dm->data_start;
5116 argptr = lock_user(VERIFY_READ, guest_data, guest_data_size, 1);
5117 if (!argptr) {
5118 ret = -TARGET_EFAULT;
5119 goto out;
5122 switch (ie->host_cmd) {
5123 case DM_REMOVE_ALL:
5124 case DM_LIST_DEVICES:
5125 case DM_DEV_CREATE:
5126 case DM_DEV_REMOVE:
5127 case DM_DEV_SUSPEND:
5128 case DM_DEV_STATUS:
5129 case DM_DEV_WAIT:
5130 case DM_TABLE_STATUS:
5131 case DM_TABLE_CLEAR:
5132 case DM_TABLE_DEPS:
5133 case DM_LIST_VERSIONS:
5134 /* no input data */
5135 break;
5136 case DM_DEV_RENAME:
5137 case DM_DEV_SET_GEOMETRY:
5138 /* data contains only strings */
5139 memcpy(host_data, argptr, guest_data_size);
5140 break;
5141 case DM_TARGET_MSG:
5142 memcpy(host_data, argptr, guest_data_size);
5143 *(uint64_t*)host_data = tswap64(*(uint64_t*)argptr);
5144 break;
5145 case DM_TABLE_LOAD:
5147 void *gspec = argptr;
5148 void *cur_data = host_data;
5149 const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_spec) };
5150 int spec_size = thunk_type_size(arg_type, 0);
5151 int i;
5153 for (i = 0; i < host_dm->target_count; i++) {
5154 struct dm_target_spec *spec = cur_data;
5155 uint32_t next;
5156 int slen;
5158 thunk_convert(spec, gspec, arg_type, THUNK_HOST);
5159 slen = strlen((char*)gspec + spec_size) + 1;
5160 next = spec->next;
5161 spec->next = sizeof(*spec) + slen;
5162 strcpy((char*)&spec[1], gspec + spec_size);
5163 gspec += next;
5164 cur_data += spec->next;
5166 break;
5168 default:
5169 ret = -TARGET_EINVAL;
5170 unlock_user(argptr, guest_data, 0);
5171 goto out;
5173 unlock_user(argptr, guest_data, 0);
5175 ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5176 if (!is_error(ret)) {
5177 guest_data = arg + host_dm->data_start;
5178 guest_data_size = host_dm->data_size - host_dm->data_start;
5179 argptr = lock_user(VERIFY_WRITE, guest_data, guest_data_size, 0);
5180 switch (ie->host_cmd) {
5181 case DM_REMOVE_ALL:
5182 case DM_DEV_CREATE:
5183 case DM_DEV_REMOVE:
5184 case DM_DEV_RENAME:
5185 case DM_DEV_SUSPEND:
5186 case DM_DEV_STATUS:
5187 case DM_TABLE_LOAD:
5188 case DM_TABLE_CLEAR:
5189 case DM_TARGET_MSG:
5190 case DM_DEV_SET_GEOMETRY:
5191 /* no return data */
5192 break;
5193 case DM_LIST_DEVICES:
5195 struct dm_name_list *nl = (void*)host_dm + host_dm->data_start;
5196 uint32_t remaining_data = guest_data_size;
5197 void *cur_data = argptr;
5198 const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_name_list) };
5199 int nl_size = 12; /* can't use thunk_size due to alignment */
5201 while (1) {
5202 uint32_t next = nl->next;
5203 if (next) {
5204 nl->next = nl_size + (strlen(nl->name) + 1);
5206 if (remaining_data < nl->next) {
5207 host_dm->flags |= DM_BUFFER_FULL_FLAG;
5208 break;
5210 thunk_convert(cur_data, nl, arg_type, THUNK_TARGET);
5211 strcpy(cur_data + nl_size, nl->name);
5212 cur_data += nl->next;
5213 remaining_data -= nl->next;
5214 if (!next) {
5215 break;
5217 nl = (void*)nl + next;
5219 break;
5221 case DM_DEV_WAIT:
5222 case DM_TABLE_STATUS:
5224 struct dm_target_spec *spec = (void*)host_dm + host_dm->data_start;
5225 void *cur_data = argptr;
5226 const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_spec) };
5227 int spec_size = thunk_type_size(arg_type, 0);
5228 int i;
5230 for (i = 0; i < host_dm->target_count; i++) {
5231 uint32_t next = spec->next;
5232 int slen = strlen((char*)&spec[1]) + 1;
5233 spec->next = (cur_data - argptr) + spec_size + slen;
5234 if (guest_data_size < spec->next) {
5235 host_dm->flags |= DM_BUFFER_FULL_FLAG;
5236 break;
5238 thunk_convert(cur_data, spec, arg_type, THUNK_TARGET);
5239 strcpy(cur_data + spec_size, (char*)&spec[1]);
5240 cur_data = argptr + spec->next;
5241 spec = (void*)host_dm + host_dm->data_start + next;
5243 break;
5245 case DM_TABLE_DEPS:
5247 void *hdata = (void*)host_dm + host_dm->data_start;
5248 int count = *(uint32_t*)hdata;
5249 uint64_t *hdev = hdata + 8;
5250 uint64_t *gdev = argptr + 8;
5251 int i;
5253 *(uint32_t*)argptr = tswap32(count);
5254 for (i = 0; i < count; i++) {
5255 *gdev = tswap64(*hdev);
5256 gdev++;
5257 hdev++;
5259 break;
5261 case DM_LIST_VERSIONS:
5263 struct dm_target_versions *vers = (void*)host_dm + host_dm->data_start;
5264 uint32_t remaining_data = guest_data_size;
5265 void *cur_data = argptr;
5266 const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_versions) };
5267 int vers_size = thunk_type_size(arg_type, 0);
5269 while (1) {
5270 uint32_t next = vers->next;
5271 if (next) {
5272 vers->next = vers_size + (strlen(vers->name) + 1);
5274 if (remaining_data < vers->next) {
5275 host_dm->flags |= DM_BUFFER_FULL_FLAG;
5276 break;
5278 thunk_convert(cur_data, vers, arg_type, THUNK_TARGET);
5279 strcpy(cur_data + vers_size, vers->name);
5280 cur_data += vers->next;
5281 remaining_data -= vers->next;
5282 if (!next) {
5283 break;
5285 vers = (void*)vers + next;
5287 break;
5289 default:
5290 unlock_user(argptr, guest_data, 0);
5291 ret = -TARGET_EINVAL;
5292 goto out;
5294 unlock_user(argptr, guest_data, guest_data_size);
5296 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
5297 if (!argptr) {
5298 ret = -TARGET_EFAULT;
5299 goto out;
5301 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
5302 unlock_user(argptr, arg, target_size);
5304 out:
5305 g_free(big_buf);
5306 return ret;
5309 static abi_long do_ioctl_blkpg(const IOCTLEntry *ie, uint8_t *buf_temp, int fd,
5310 int cmd, abi_long arg)
5312 void *argptr;
5313 int target_size;
5314 const argtype *arg_type = ie->arg_type;
5315 const argtype part_arg_type[] = { MK_STRUCT(STRUCT_blkpg_partition) };
5316 abi_long ret;
5318 struct blkpg_ioctl_arg *host_blkpg = (void*)buf_temp;
5319 struct blkpg_partition host_part;
5321 /* Read and convert blkpg */
5322 arg_type++;
5323 target_size = thunk_type_size(arg_type, 0);
5324 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5325 if (!argptr) {
5326 ret = -TARGET_EFAULT;
5327 goto out;
5329 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
5330 unlock_user(argptr, arg, 0);
5332 switch (host_blkpg->op) {
5333 case BLKPG_ADD_PARTITION:
5334 case BLKPG_DEL_PARTITION:
5335 /* payload is struct blkpg_partition */
5336 break;
5337 default:
5338 /* Unknown opcode */
5339 ret = -TARGET_EINVAL;
5340 goto out;
5343 /* Read and convert blkpg->data */
5344 arg = (abi_long)(uintptr_t)host_blkpg->data;
5345 target_size = thunk_type_size(part_arg_type, 0);
5346 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5347 if (!argptr) {
5348 ret = -TARGET_EFAULT;
5349 goto out;
5351 thunk_convert(&host_part, argptr, part_arg_type, THUNK_HOST);
5352 unlock_user(argptr, arg, 0);
5354 /* Swizzle the data pointer to our local copy and call! */
5355 host_blkpg->data = &host_part;
5356 ret = get_errno(safe_ioctl(fd, ie->host_cmd, host_blkpg));
5358 out:
5359 return ret;
5362 static abi_long do_ioctl_rt(const IOCTLEntry *ie, uint8_t *buf_temp,
5363 int fd, int cmd, abi_long arg)
5365 const argtype *arg_type = ie->arg_type;
5366 const StructEntry *se;
5367 const argtype *field_types;
5368 const int *dst_offsets, *src_offsets;
5369 int target_size;
5370 void *argptr;
5371 abi_ulong *target_rt_dev_ptr;
5372 unsigned long *host_rt_dev_ptr;
5373 abi_long ret;
5374 int i;
5376 assert(ie->access == IOC_W);
5377 assert(*arg_type == TYPE_PTR);
5378 arg_type++;
5379 assert(*arg_type == TYPE_STRUCT);
5380 target_size = thunk_type_size(arg_type, 0);
5381 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5382 if (!argptr) {
5383 return -TARGET_EFAULT;
5385 arg_type++;
5386 assert(*arg_type == (int)STRUCT_rtentry);
5387 se = struct_entries + *arg_type++;
5388 assert(se->convert[0] == NULL);
5389 /* convert struct here to be able to catch rt_dev string */
5390 field_types = se->field_types;
5391 dst_offsets = se->field_offsets[THUNK_HOST];
5392 src_offsets = se->field_offsets[THUNK_TARGET];
5393 for (i = 0; i < se->nb_fields; i++) {
5394 if (dst_offsets[i] == offsetof(struct rtentry, rt_dev)) {
5395 assert(*field_types == TYPE_PTRVOID);
5396 target_rt_dev_ptr = (abi_ulong *)(argptr + src_offsets[i]);
5397 host_rt_dev_ptr = (unsigned long *)(buf_temp + dst_offsets[i]);
5398 if (*target_rt_dev_ptr != 0) {
5399 *host_rt_dev_ptr = (unsigned long)lock_user_string(
5400 tswapal(*target_rt_dev_ptr));
5401 if (!*host_rt_dev_ptr) {
5402 unlock_user(argptr, arg, 0);
5403 return -TARGET_EFAULT;
5405 } else {
5406 *host_rt_dev_ptr = 0;
5408 field_types++;
5409 continue;
5411 field_types = thunk_convert(buf_temp + dst_offsets[i],
5412 argptr + src_offsets[i],
5413 field_types, THUNK_HOST);
5415 unlock_user(argptr, arg, 0);
5417 ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5418 if (*host_rt_dev_ptr != 0) {
5419 unlock_user((void *)*host_rt_dev_ptr,
5420 *target_rt_dev_ptr, 0);
5422 return ret;
5425 static abi_long do_ioctl_kdsigaccept(const IOCTLEntry *ie, uint8_t *buf_temp,
5426 int fd, int cmd, abi_long arg)
5428 int sig = target_to_host_signal(arg);
5429 return get_errno(safe_ioctl(fd, ie->host_cmd, sig));
5432 static IOCTLEntry ioctl_entries[] = {
5433 #define IOCTL(cmd, access, ...) \
5434 { TARGET_ ## cmd, cmd, #cmd, access, 0, { __VA_ARGS__ } },
5435 #define IOCTL_SPECIAL(cmd, access, dofn, ...) \
5436 { TARGET_ ## cmd, cmd, #cmd, access, dofn, { __VA_ARGS__ } },
5437 #include "ioctls.h"
5438 { 0, 0, },
5441 /* ??? Implement proper locking for ioctls. */
5442 /* do_ioctl() Must return target values and target errnos. */
5443 static abi_long do_ioctl(int fd, int cmd, abi_long arg)
5445 const IOCTLEntry *ie;
5446 const argtype *arg_type;
5447 abi_long ret;
5448 uint8_t buf_temp[MAX_STRUCT_SIZE];
5449 int target_size;
5450 void *argptr;
5452 ie = ioctl_entries;
5453 for(;;) {
5454 if (ie->target_cmd == 0) {
5455 gemu_log("Unsupported ioctl: cmd=0x%04lx\n", (long)cmd);
5456 return -TARGET_ENOSYS;
5458 if (ie->target_cmd == cmd)
5459 break;
5460 ie++;
5462 arg_type = ie->arg_type;
5463 #if defined(DEBUG)
5464 gemu_log("ioctl: cmd=0x%04lx (%s)\n", (long)cmd, ie->name);
5465 #endif
5466 if (ie->do_ioctl) {
5467 return ie->do_ioctl(ie, buf_temp, fd, cmd, arg);
5470 switch(arg_type[0]) {
5471 case TYPE_NULL:
5472 /* no argument */
5473 ret = get_errno(safe_ioctl(fd, ie->host_cmd));
5474 break;
5475 case TYPE_PTRVOID:
5476 case TYPE_INT:
5477 ret = get_errno(safe_ioctl(fd, ie->host_cmd, arg));
5478 break;
5479 case TYPE_PTR:
5480 arg_type++;
5481 target_size = thunk_type_size(arg_type, 0);
5482 switch(ie->access) {
5483 case IOC_R:
5484 ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5485 if (!is_error(ret)) {
5486 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
5487 if (!argptr)
5488 return -TARGET_EFAULT;
5489 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
5490 unlock_user(argptr, arg, target_size);
5492 break;
5493 case IOC_W:
5494 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5495 if (!argptr)
5496 return -TARGET_EFAULT;
5497 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
5498 unlock_user(argptr, arg, 0);
5499 ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5500 break;
5501 default:
5502 case IOC_RW:
5503 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5504 if (!argptr)
5505 return -TARGET_EFAULT;
5506 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
5507 unlock_user(argptr, arg, 0);
5508 ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5509 if (!is_error(ret)) {
5510 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
5511 if (!argptr)
5512 return -TARGET_EFAULT;
5513 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
5514 unlock_user(argptr, arg, target_size);
5516 break;
5518 break;
5519 default:
5520 gemu_log("Unsupported ioctl type: cmd=0x%04lx type=%d\n",
5521 (long)cmd, arg_type[0]);
5522 ret = -TARGET_ENOSYS;
5523 break;
5525 return ret;
5528 static const bitmask_transtbl iflag_tbl[] = {
5529 { TARGET_IGNBRK, TARGET_IGNBRK, IGNBRK, IGNBRK },
5530 { TARGET_BRKINT, TARGET_BRKINT, BRKINT, BRKINT },
5531 { TARGET_IGNPAR, TARGET_IGNPAR, IGNPAR, IGNPAR },
5532 { TARGET_PARMRK, TARGET_PARMRK, PARMRK, PARMRK },
5533 { TARGET_INPCK, TARGET_INPCK, INPCK, INPCK },
5534 { TARGET_ISTRIP, TARGET_ISTRIP, ISTRIP, ISTRIP },
5535 { TARGET_INLCR, TARGET_INLCR, INLCR, INLCR },
5536 { TARGET_IGNCR, TARGET_IGNCR, IGNCR, IGNCR },
5537 { TARGET_ICRNL, TARGET_ICRNL, ICRNL, ICRNL },
5538 { TARGET_IUCLC, TARGET_IUCLC, IUCLC, IUCLC },
5539 { TARGET_IXON, TARGET_IXON, IXON, IXON },
5540 { TARGET_IXANY, TARGET_IXANY, IXANY, IXANY },
5541 { TARGET_IXOFF, TARGET_IXOFF, IXOFF, IXOFF },
5542 { TARGET_IMAXBEL, TARGET_IMAXBEL, IMAXBEL, IMAXBEL },
5543 { 0, 0, 0, 0 }
5546 static const bitmask_transtbl oflag_tbl[] = {
5547 { TARGET_OPOST, TARGET_OPOST, OPOST, OPOST },
5548 { TARGET_OLCUC, TARGET_OLCUC, OLCUC, OLCUC },
5549 { TARGET_ONLCR, TARGET_ONLCR, ONLCR, ONLCR },
5550 { TARGET_OCRNL, TARGET_OCRNL, OCRNL, OCRNL },
5551 { TARGET_ONOCR, TARGET_ONOCR, ONOCR, ONOCR },
5552 { TARGET_ONLRET, TARGET_ONLRET, ONLRET, ONLRET },
5553 { TARGET_OFILL, TARGET_OFILL, OFILL, OFILL },
5554 { TARGET_OFDEL, TARGET_OFDEL, OFDEL, OFDEL },
5555 { TARGET_NLDLY, TARGET_NL0, NLDLY, NL0 },
5556 { TARGET_NLDLY, TARGET_NL1, NLDLY, NL1 },
5557 { TARGET_CRDLY, TARGET_CR0, CRDLY, CR0 },
5558 { TARGET_CRDLY, TARGET_CR1, CRDLY, CR1 },
5559 { TARGET_CRDLY, TARGET_CR2, CRDLY, CR2 },
5560 { TARGET_CRDLY, TARGET_CR3, CRDLY, CR3 },
5561 { TARGET_TABDLY, TARGET_TAB0, TABDLY, TAB0 },
5562 { TARGET_TABDLY, TARGET_TAB1, TABDLY, TAB1 },
5563 { TARGET_TABDLY, TARGET_TAB2, TABDLY, TAB2 },
5564 { TARGET_TABDLY, TARGET_TAB3, TABDLY, TAB3 },
5565 { TARGET_BSDLY, TARGET_BS0, BSDLY, BS0 },
5566 { TARGET_BSDLY, TARGET_BS1, BSDLY, BS1 },
5567 { TARGET_VTDLY, TARGET_VT0, VTDLY, VT0 },
5568 { TARGET_VTDLY, TARGET_VT1, VTDLY, VT1 },
5569 { TARGET_FFDLY, TARGET_FF0, FFDLY, FF0 },
5570 { TARGET_FFDLY, TARGET_FF1, FFDLY, FF1 },
5571 { 0, 0, 0, 0 }
5574 static const bitmask_transtbl cflag_tbl[] = {
5575 { TARGET_CBAUD, TARGET_B0, CBAUD, B0 },
5576 { TARGET_CBAUD, TARGET_B50, CBAUD, B50 },
5577 { TARGET_CBAUD, TARGET_B75, CBAUD, B75 },
5578 { TARGET_CBAUD, TARGET_B110, CBAUD, B110 },
5579 { TARGET_CBAUD, TARGET_B134, CBAUD, B134 },
5580 { TARGET_CBAUD, TARGET_B150, CBAUD, B150 },
5581 { TARGET_CBAUD, TARGET_B200, CBAUD, B200 },
5582 { TARGET_CBAUD, TARGET_B300, CBAUD, B300 },
5583 { TARGET_CBAUD, TARGET_B600, CBAUD, B600 },
5584 { TARGET_CBAUD, TARGET_B1200, CBAUD, B1200 },
5585 { TARGET_CBAUD, TARGET_B1800, CBAUD, B1800 },
5586 { TARGET_CBAUD, TARGET_B2400, CBAUD, B2400 },
5587 { TARGET_CBAUD, TARGET_B4800, CBAUD, B4800 },
5588 { TARGET_CBAUD, TARGET_B9600, CBAUD, B9600 },
5589 { TARGET_CBAUD, TARGET_B19200, CBAUD, B19200 },
5590 { TARGET_CBAUD, TARGET_B38400, CBAUD, B38400 },
5591 { TARGET_CBAUD, TARGET_B57600, CBAUD, B57600 },
5592 { TARGET_CBAUD, TARGET_B115200, CBAUD, B115200 },
5593 { TARGET_CBAUD, TARGET_B230400, CBAUD, B230400 },
5594 { TARGET_CBAUD, TARGET_B460800, CBAUD, B460800 },
5595 { TARGET_CSIZE, TARGET_CS5, CSIZE, CS5 },
5596 { TARGET_CSIZE, TARGET_CS6, CSIZE, CS6 },
5597 { TARGET_CSIZE, TARGET_CS7, CSIZE, CS7 },
5598 { TARGET_CSIZE, TARGET_CS8, CSIZE, CS8 },
5599 { TARGET_CSTOPB, TARGET_CSTOPB, CSTOPB, CSTOPB },
5600 { TARGET_CREAD, TARGET_CREAD, CREAD, CREAD },
5601 { TARGET_PARENB, TARGET_PARENB, PARENB, PARENB },
5602 { TARGET_PARODD, TARGET_PARODD, PARODD, PARODD },
5603 { TARGET_HUPCL, TARGET_HUPCL, HUPCL, HUPCL },
5604 { TARGET_CLOCAL, TARGET_CLOCAL, CLOCAL, CLOCAL },
5605 { TARGET_CRTSCTS, TARGET_CRTSCTS, CRTSCTS, CRTSCTS },
5606 { 0, 0, 0, 0 }
5609 static const bitmask_transtbl lflag_tbl[] = {
5610 { TARGET_ISIG, TARGET_ISIG, ISIG, ISIG },
5611 { TARGET_ICANON, TARGET_ICANON, ICANON, ICANON },
5612 { TARGET_XCASE, TARGET_XCASE, XCASE, XCASE },
5613 { TARGET_ECHO, TARGET_ECHO, ECHO, ECHO },
5614 { TARGET_ECHOE, TARGET_ECHOE, ECHOE, ECHOE },
5615 { TARGET_ECHOK, TARGET_ECHOK, ECHOK, ECHOK },
5616 { TARGET_ECHONL, TARGET_ECHONL, ECHONL, ECHONL },
5617 { TARGET_NOFLSH, TARGET_NOFLSH, NOFLSH, NOFLSH },
5618 { TARGET_TOSTOP, TARGET_TOSTOP, TOSTOP, TOSTOP },
5619 { TARGET_ECHOCTL, TARGET_ECHOCTL, ECHOCTL, ECHOCTL },
5620 { TARGET_ECHOPRT, TARGET_ECHOPRT, ECHOPRT, ECHOPRT },
5621 { TARGET_ECHOKE, TARGET_ECHOKE, ECHOKE, ECHOKE },
5622 { TARGET_FLUSHO, TARGET_FLUSHO, FLUSHO, FLUSHO },
5623 { TARGET_PENDIN, TARGET_PENDIN, PENDIN, PENDIN },
5624 { TARGET_IEXTEN, TARGET_IEXTEN, IEXTEN, IEXTEN },
5625 { 0, 0, 0, 0 }
5628 static void target_to_host_termios (void *dst, const void *src)
5630 struct host_termios *host = dst;
5631 const struct target_termios *target = src;
5633 host->c_iflag =
5634 target_to_host_bitmask(tswap32(target->c_iflag), iflag_tbl);
5635 host->c_oflag =
5636 target_to_host_bitmask(tswap32(target->c_oflag), oflag_tbl);
5637 host->c_cflag =
5638 target_to_host_bitmask(tswap32(target->c_cflag), cflag_tbl);
5639 host->c_lflag =
5640 target_to_host_bitmask(tswap32(target->c_lflag), lflag_tbl);
5641 host->c_line = target->c_line;
5643 memset(host->c_cc, 0, sizeof(host->c_cc));
5644 host->c_cc[VINTR] = target->c_cc[TARGET_VINTR];
5645 host->c_cc[VQUIT] = target->c_cc[TARGET_VQUIT];
5646 host->c_cc[VERASE] = target->c_cc[TARGET_VERASE];
5647 host->c_cc[VKILL] = target->c_cc[TARGET_VKILL];
5648 host->c_cc[VEOF] = target->c_cc[TARGET_VEOF];
5649 host->c_cc[VTIME] = target->c_cc[TARGET_VTIME];
5650 host->c_cc[VMIN] = target->c_cc[TARGET_VMIN];
5651 host->c_cc[VSWTC] = target->c_cc[TARGET_VSWTC];
5652 host->c_cc[VSTART] = target->c_cc[TARGET_VSTART];
5653 host->c_cc[VSTOP] = target->c_cc[TARGET_VSTOP];
5654 host->c_cc[VSUSP] = target->c_cc[TARGET_VSUSP];
5655 host->c_cc[VEOL] = target->c_cc[TARGET_VEOL];
5656 host->c_cc[VREPRINT] = target->c_cc[TARGET_VREPRINT];
5657 host->c_cc[VDISCARD] = target->c_cc[TARGET_VDISCARD];
5658 host->c_cc[VWERASE] = target->c_cc[TARGET_VWERASE];
5659 host->c_cc[VLNEXT] = target->c_cc[TARGET_VLNEXT];
5660 host->c_cc[VEOL2] = target->c_cc[TARGET_VEOL2];
5663 static void host_to_target_termios (void *dst, const void *src)
5665 struct target_termios *target = dst;
5666 const struct host_termios *host = src;
5668 target->c_iflag =
5669 tswap32(host_to_target_bitmask(host->c_iflag, iflag_tbl));
5670 target->c_oflag =
5671 tswap32(host_to_target_bitmask(host->c_oflag, oflag_tbl));
5672 target->c_cflag =
5673 tswap32(host_to_target_bitmask(host->c_cflag, cflag_tbl));
5674 target->c_lflag =
5675 tswap32(host_to_target_bitmask(host->c_lflag, lflag_tbl));
5676 target->c_line = host->c_line;
5678 memset(target->c_cc, 0, sizeof(target->c_cc));
5679 target->c_cc[TARGET_VINTR] = host->c_cc[VINTR];
5680 target->c_cc[TARGET_VQUIT] = host->c_cc[VQUIT];
5681 target->c_cc[TARGET_VERASE] = host->c_cc[VERASE];
5682 target->c_cc[TARGET_VKILL] = host->c_cc[VKILL];
5683 target->c_cc[TARGET_VEOF] = host->c_cc[VEOF];
5684 target->c_cc[TARGET_VTIME] = host->c_cc[VTIME];
5685 target->c_cc[TARGET_VMIN] = host->c_cc[VMIN];
5686 target->c_cc[TARGET_VSWTC] = host->c_cc[VSWTC];
5687 target->c_cc[TARGET_VSTART] = host->c_cc[VSTART];
5688 target->c_cc[TARGET_VSTOP] = host->c_cc[VSTOP];
5689 target->c_cc[TARGET_VSUSP] = host->c_cc[VSUSP];
5690 target->c_cc[TARGET_VEOL] = host->c_cc[VEOL];
5691 target->c_cc[TARGET_VREPRINT] = host->c_cc[VREPRINT];
5692 target->c_cc[TARGET_VDISCARD] = host->c_cc[VDISCARD];
5693 target->c_cc[TARGET_VWERASE] = host->c_cc[VWERASE];
5694 target->c_cc[TARGET_VLNEXT] = host->c_cc[VLNEXT];
5695 target->c_cc[TARGET_VEOL2] = host->c_cc[VEOL2];
5698 static const StructEntry struct_termios_def = {
5699 .convert = { host_to_target_termios, target_to_host_termios },
5700 .size = { sizeof(struct target_termios), sizeof(struct host_termios) },
5701 .align = { __alignof__(struct target_termios), __alignof__(struct host_termios) },
5704 static bitmask_transtbl mmap_flags_tbl[] = {
5705 { TARGET_MAP_SHARED, TARGET_MAP_SHARED, MAP_SHARED, MAP_SHARED },
5706 { TARGET_MAP_PRIVATE, TARGET_MAP_PRIVATE, MAP_PRIVATE, MAP_PRIVATE },
5707 { TARGET_MAP_FIXED, TARGET_MAP_FIXED, MAP_FIXED, MAP_FIXED },
5708 { TARGET_MAP_ANONYMOUS, TARGET_MAP_ANONYMOUS, MAP_ANONYMOUS, MAP_ANONYMOUS },
5709 { TARGET_MAP_GROWSDOWN, TARGET_MAP_GROWSDOWN, MAP_GROWSDOWN, MAP_GROWSDOWN },
5710 { TARGET_MAP_DENYWRITE, TARGET_MAP_DENYWRITE, MAP_DENYWRITE, MAP_DENYWRITE },
5711 { TARGET_MAP_EXECUTABLE, TARGET_MAP_EXECUTABLE, MAP_EXECUTABLE, MAP_EXECUTABLE },
5712 { TARGET_MAP_LOCKED, TARGET_MAP_LOCKED, MAP_LOCKED, MAP_LOCKED },
5713 { TARGET_MAP_NORESERVE, TARGET_MAP_NORESERVE, MAP_NORESERVE,
5714 MAP_NORESERVE },
5715 { 0, 0, 0, 0 }
5718 #if defined(TARGET_I386)
5720 /* NOTE: there is really one LDT for all the threads */
5721 static uint8_t *ldt_table;
5723 static abi_long read_ldt(abi_ulong ptr, unsigned long bytecount)
5725 int size;
5726 void *p;
5728 if (!ldt_table)
5729 return 0;
5730 size = TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE;
5731 if (size > bytecount)
5732 size = bytecount;
5733 p = lock_user(VERIFY_WRITE, ptr, size, 0);
5734 if (!p)
5735 return -TARGET_EFAULT;
5736 /* ??? Should this by byteswapped? */
5737 memcpy(p, ldt_table, size);
5738 unlock_user(p, ptr, size);
5739 return size;
5742 /* XXX: add locking support */
5743 static abi_long write_ldt(CPUX86State *env,
5744 abi_ulong ptr, unsigned long bytecount, int oldmode)
5746 struct target_modify_ldt_ldt_s ldt_info;
5747 struct target_modify_ldt_ldt_s *target_ldt_info;
5748 int seg_32bit, contents, read_exec_only, limit_in_pages;
5749 int seg_not_present, useable, lm;
5750 uint32_t *lp, entry_1, entry_2;
5752 if (bytecount != sizeof(ldt_info))
5753 return -TARGET_EINVAL;
5754 if (!lock_user_struct(VERIFY_READ, target_ldt_info, ptr, 1))
5755 return -TARGET_EFAULT;
5756 ldt_info.entry_number = tswap32(target_ldt_info->entry_number);
5757 ldt_info.base_addr = tswapal(target_ldt_info->base_addr);
5758 ldt_info.limit = tswap32(target_ldt_info->limit);
5759 ldt_info.flags = tswap32(target_ldt_info->flags);
5760 unlock_user_struct(target_ldt_info, ptr, 0);
5762 if (ldt_info.entry_number >= TARGET_LDT_ENTRIES)
5763 return -TARGET_EINVAL;
5764 seg_32bit = ldt_info.flags & 1;
5765 contents = (ldt_info.flags >> 1) & 3;
5766 read_exec_only = (ldt_info.flags >> 3) & 1;
5767 limit_in_pages = (ldt_info.flags >> 4) & 1;
5768 seg_not_present = (ldt_info.flags >> 5) & 1;
5769 useable = (ldt_info.flags >> 6) & 1;
5770 #ifdef TARGET_ABI32
5771 lm = 0;
5772 #else
5773 lm = (ldt_info.flags >> 7) & 1;
5774 #endif
5775 if (contents == 3) {
5776 if (oldmode)
5777 return -TARGET_EINVAL;
5778 if (seg_not_present == 0)
5779 return -TARGET_EINVAL;
5781 /* allocate the LDT */
5782 if (!ldt_table) {
5783 env->ldt.base = target_mmap(0,
5784 TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE,
5785 PROT_READ|PROT_WRITE,
5786 MAP_ANONYMOUS|MAP_PRIVATE, -1, 0);
5787 if (env->ldt.base == -1)
5788 return -TARGET_ENOMEM;
5789 memset(g2h(env->ldt.base), 0,
5790 TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE);
5791 env->ldt.limit = 0xffff;
5792 ldt_table = g2h(env->ldt.base);
5795 /* NOTE: same code as Linux kernel */
5796 /* Allow LDTs to be cleared by the user. */
5797 if (ldt_info.base_addr == 0 && ldt_info.limit == 0) {
5798 if (oldmode ||
5799 (contents == 0 &&
5800 read_exec_only == 1 &&
5801 seg_32bit == 0 &&
5802 limit_in_pages == 0 &&
5803 seg_not_present == 1 &&
5804 useable == 0 )) {
5805 entry_1 = 0;
5806 entry_2 = 0;
5807 goto install;
5811 entry_1 = ((ldt_info.base_addr & 0x0000ffff) << 16) |
5812 (ldt_info.limit & 0x0ffff);
5813 entry_2 = (ldt_info.base_addr & 0xff000000) |
5814 ((ldt_info.base_addr & 0x00ff0000) >> 16) |
5815 (ldt_info.limit & 0xf0000) |
5816 ((read_exec_only ^ 1) << 9) |
5817 (contents << 10) |
5818 ((seg_not_present ^ 1) << 15) |
5819 (seg_32bit << 22) |
5820 (limit_in_pages << 23) |
5821 (lm << 21) |
5822 0x7000;
5823 if (!oldmode)
5824 entry_2 |= (useable << 20);
5826 /* Install the new entry ... */
5827 install:
5828 lp = (uint32_t *)(ldt_table + (ldt_info.entry_number << 3));
5829 lp[0] = tswap32(entry_1);
5830 lp[1] = tswap32(entry_2);
5831 return 0;
5834 /* specific and weird i386 syscalls */
5835 static abi_long do_modify_ldt(CPUX86State *env, int func, abi_ulong ptr,
5836 unsigned long bytecount)
5838 abi_long ret;
5840 switch (func) {
5841 case 0:
5842 ret = read_ldt(ptr, bytecount);
5843 break;
5844 case 1:
5845 ret = write_ldt(env, ptr, bytecount, 1);
5846 break;
5847 case 0x11:
5848 ret = write_ldt(env, ptr, bytecount, 0);
5849 break;
5850 default:
5851 ret = -TARGET_ENOSYS;
5852 break;
5854 return ret;
5857 #if defined(TARGET_I386) && defined(TARGET_ABI32)
5858 abi_long do_set_thread_area(CPUX86State *env, abi_ulong ptr)
5860 uint64_t *gdt_table = g2h(env->gdt.base);
5861 struct target_modify_ldt_ldt_s ldt_info;
5862 struct target_modify_ldt_ldt_s *target_ldt_info;
5863 int seg_32bit, contents, read_exec_only, limit_in_pages;
5864 int seg_not_present, useable, lm;
5865 uint32_t *lp, entry_1, entry_2;
5866 int i;
5868 lock_user_struct(VERIFY_WRITE, target_ldt_info, ptr, 1);
5869 if (!target_ldt_info)
5870 return -TARGET_EFAULT;
5871 ldt_info.entry_number = tswap32(target_ldt_info->entry_number);
5872 ldt_info.base_addr = tswapal(target_ldt_info->base_addr);
5873 ldt_info.limit = tswap32(target_ldt_info->limit);
5874 ldt_info.flags = tswap32(target_ldt_info->flags);
5875 if (ldt_info.entry_number == -1) {
5876 for (i=TARGET_GDT_ENTRY_TLS_MIN; i<=TARGET_GDT_ENTRY_TLS_MAX; i++) {
5877 if (gdt_table[i] == 0) {
5878 ldt_info.entry_number = i;
5879 target_ldt_info->entry_number = tswap32(i);
5880 break;
5884 unlock_user_struct(target_ldt_info, ptr, 1);
5886 if (ldt_info.entry_number < TARGET_GDT_ENTRY_TLS_MIN ||
5887 ldt_info.entry_number > TARGET_GDT_ENTRY_TLS_MAX)
5888 return -TARGET_EINVAL;
5889 seg_32bit = ldt_info.flags & 1;
5890 contents = (ldt_info.flags >> 1) & 3;
5891 read_exec_only = (ldt_info.flags >> 3) & 1;
5892 limit_in_pages = (ldt_info.flags >> 4) & 1;
5893 seg_not_present = (ldt_info.flags >> 5) & 1;
5894 useable = (ldt_info.flags >> 6) & 1;
5895 #ifdef TARGET_ABI32
5896 lm = 0;
5897 #else
5898 lm = (ldt_info.flags >> 7) & 1;
5899 #endif
5901 if (contents == 3) {
5902 if (seg_not_present == 0)
5903 return -TARGET_EINVAL;
5906 /* NOTE: same code as Linux kernel */
5907 /* Allow LDTs to be cleared by the user. */
5908 if (ldt_info.base_addr == 0 && ldt_info.limit == 0) {
5909 if ((contents == 0 &&
5910 read_exec_only == 1 &&
5911 seg_32bit == 0 &&
5912 limit_in_pages == 0 &&
5913 seg_not_present == 1 &&
5914 useable == 0 )) {
5915 entry_1 = 0;
5916 entry_2 = 0;
5917 goto install;
5921 entry_1 = ((ldt_info.base_addr & 0x0000ffff) << 16) |
5922 (ldt_info.limit & 0x0ffff);
5923 entry_2 = (ldt_info.base_addr & 0xff000000) |
5924 ((ldt_info.base_addr & 0x00ff0000) >> 16) |
5925 (ldt_info.limit & 0xf0000) |
5926 ((read_exec_only ^ 1) << 9) |
5927 (contents << 10) |
5928 ((seg_not_present ^ 1) << 15) |
5929 (seg_32bit << 22) |
5930 (limit_in_pages << 23) |
5931 (useable << 20) |
5932 (lm << 21) |
5933 0x7000;
5935 /* Install the new entry ... */
5936 install:
5937 lp = (uint32_t *)(gdt_table + ldt_info.entry_number);
5938 lp[0] = tswap32(entry_1);
5939 lp[1] = tswap32(entry_2);
5940 return 0;
5943 static abi_long do_get_thread_area(CPUX86State *env, abi_ulong ptr)
5945 struct target_modify_ldt_ldt_s *target_ldt_info;
5946 uint64_t *gdt_table = g2h(env->gdt.base);
5947 uint32_t base_addr, limit, flags;
5948 int seg_32bit, contents, read_exec_only, limit_in_pages, idx;
5949 int seg_not_present, useable, lm;
5950 uint32_t *lp, entry_1, entry_2;
5952 lock_user_struct(VERIFY_WRITE, target_ldt_info, ptr, 1);
5953 if (!target_ldt_info)
5954 return -TARGET_EFAULT;
5955 idx = tswap32(target_ldt_info->entry_number);
5956 if (idx < TARGET_GDT_ENTRY_TLS_MIN ||
5957 idx > TARGET_GDT_ENTRY_TLS_MAX) {
5958 unlock_user_struct(target_ldt_info, ptr, 1);
5959 return -TARGET_EINVAL;
5961 lp = (uint32_t *)(gdt_table + idx);
5962 entry_1 = tswap32(lp[0]);
5963 entry_2 = tswap32(lp[1]);
5965 read_exec_only = ((entry_2 >> 9) & 1) ^ 1;
5966 contents = (entry_2 >> 10) & 3;
5967 seg_not_present = ((entry_2 >> 15) & 1) ^ 1;
5968 seg_32bit = (entry_2 >> 22) & 1;
5969 limit_in_pages = (entry_2 >> 23) & 1;
5970 useable = (entry_2 >> 20) & 1;
5971 #ifdef TARGET_ABI32
5972 lm = 0;
5973 #else
5974 lm = (entry_2 >> 21) & 1;
5975 #endif
5976 flags = (seg_32bit << 0) | (contents << 1) |
5977 (read_exec_only << 3) | (limit_in_pages << 4) |
5978 (seg_not_present << 5) | (useable << 6) | (lm << 7);
5979 limit = (entry_1 & 0xffff) | (entry_2 & 0xf0000);
5980 base_addr = (entry_1 >> 16) |
5981 (entry_2 & 0xff000000) |
5982 ((entry_2 & 0xff) << 16);
5983 target_ldt_info->base_addr = tswapal(base_addr);
5984 target_ldt_info->limit = tswap32(limit);
5985 target_ldt_info->flags = tswap32(flags);
5986 unlock_user_struct(target_ldt_info, ptr, 1);
5987 return 0;
5989 #endif /* TARGET_I386 && TARGET_ABI32 */
5991 #ifndef TARGET_ABI32
5992 abi_long do_arch_prctl(CPUX86State *env, int code, abi_ulong addr)
5994 abi_long ret = 0;
5995 abi_ulong val;
5996 int idx;
5998 switch(code) {
5999 case TARGET_ARCH_SET_GS:
6000 case TARGET_ARCH_SET_FS:
6001 if (code == TARGET_ARCH_SET_GS)
6002 idx = R_GS;
6003 else
6004 idx = R_FS;
6005 cpu_x86_load_seg(env, idx, 0);
6006 env->segs[idx].base = addr;
6007 break;
6008 case TARGET_ARCH_GET_GS:
6009 case TARGET_ARCH_GET_FS:
6010 if (code == TARGET_ARCH_GET_GS)
6011 idx = R_GS;
6012 else
6013 idx = R_FS;
6014 val = env->segs[idx].base;
6015 if (put_user(val, addr, abi_ulong))
6016 ret = -TARGET_EFAULT;
6017 break;
6018 default:
6019 ret = -TARGET_EINVAL;
6020 break;
6022 return ret;
6024 #endif
6026 #endif /* defined(TARGET_I386) */
6028 #define NEW_STACK_SIZE 0x40000
6031 static pthread_mutex_t clone_lock = PTHREAD_MUTEX_INITIALIZER;
6032 typedef struct {
6033 CPUArchState *env;
6034 pthread_mutex_t mutex;
6035 pthread_cond_t cond;
6036 pthread_t thread;
6037 uint32_t tid;
6038 abi_ulong child_tidptr;
6039 abi_ulong parent_tidptr;
6040 sigset_t sigmask;
6041 } new_thread_info;
6043 static void *clone_func(void *arg)
6045 new_thread_info *info = arg;
6046 CPUArchState *env;
6047 CPUState *cpu;
6048 TaskState *ts;
6050 rcu_register_thread();
6051 env = info->env;
6052 cpu = ENV_GET_CPU(env);
6053 thread_cpu = cpu;
6054 ts = (TaskState *)cpu->opaque;
6055 info->tid = gettid();
6056 cpu->host_tid = info->tid;
6057 task_settid(ts);
6058 if (info->child_tidptr)
6059 put_user_u32(info->tid, info->child_tidptr);
6060 if (info->parent_tidptr)
6061 put_user_u32(info->tid, info->parent_tidptr);
6062 /* Enable signals. */
6063 sigprocmask(SIG_SETMASK, &info->sigmask, NULL);
6064 /* Signal to the parent that we're ready. */
6065 pthread_mutex_lock(&info->mutex);
6066 pthread_cond_broadcast(&info->cond);
6067 pthread_mutex_unlock(&info->mutex);
6068 /* Wait until the parent has finshed initializing the tls state. */
6069 pthread_mutex_lock(&clone_lock);
6070 pthread_mutex_unlock(&clone_lock);
6071 cpu_loop(env);
6072 /* never exits */
6073 return NULL;
6076 /* do_fork() Must return host values and target errnos (unlike most
6077 do_*() functions). */
6078 static int do_fork(CPUArchState *env, unsigned int flags, abi_ulong newsp,
6079 abi_ulong parent_tidptr, target_ulong newtls,
6080 abi_ulong child_tidptr)
6082 CPUState *cpu = ENV_GET_CPU(env);
6083 int ret;
6084 TaskState *ts;
6085 CPUState *new_cpu;
6086 CPUArchState *new_env;
6087 sigset_t sigmask;
6089 flags &= ~CLONE_IGNORED_FLAGS;
6091 /* Emulate vfork() with fork() */
6092 if (flags & CLONE_VFORK)
6093 flags &= ~(CLONE_VFORK | CLONE_VM);
6095 if (flags & CLONE_VM) {
6096 TaskState *parent_ts = (TaskState *)cpu->opaque;
6097 new_thread_info info;
6098 pthread_attr_t attr;
6100 if (((flags & CLONE_THREAD_FLAGS) != CLONE_THREAD_FLAGS) ||
6101 (flags & CLONE_INVALID_THREAD_FLAGS)) {
6102 return -TARGET_EINVAL;
6105 ts = g_new0(TaskState, 1);
6106 init_task_state(ts);
6107 /* we create a new CPU instance. */
6108 new_env = cpu_copy(env);
6109 /* Init regs that differ from the parent. */
6110 cpu_clone_regs(new_env, newsp);
6111 new_cpu = ENV_GET_CPU(new_env);
6112 new_cpu->opaque = ts;
6113 ts->bprm = parent_ts->bprm;
6114 ts->info = parent_ts->info;
6115 ts->signal_mask = parent_ts->signal_mask;
6117 if (flags & CLONE_CHILD_CLEARTID) {
6118 ts->child_tidptr = child_tidptr;
6121 if (flags & CLONE_SETTLS) {
6122 cpu_set_tls (new_env, newtls);
6125 /* Grab a mutex so that thread setup appears atomic. */
6126 pthread_mutex_lock(&clone_lock);
6128 memset(&info, 0, sizeof(info));
6129 pthread_mutex_init(&info.mutex, NULL);
6130 pthread_mutex_lock(&info.mutex);
6131 pthread_cond_init(&info.cond, NULL);
6132 info.env = new_env;
6133 if (flags & CLONE_CHILD_SETTID) {
6134 info.child_tidptr = child_tidptr;
6136 if (flags & CLONE_PARENT_SETTID) {
6137 info.parent_tidptr = parent_tidptr;
6140 ret = pthread_attr_init(&attr);
6141 ret = pthread_attr_setstacksize(&attr, NEW_STACK_SIZE);
6142 ret = pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED);
6143 /* It is not safe to deliver signals until the child has finished
6144 initializing, so temporarily block all signals. */
6145 sigfillset(&sigmask);
6146 sigprocmask(SIG_BLOCK, &sigmask, &info.sigmask);
6148 ret = pthread_create(&info.thread, &attr, clone_func, &info);
6149 /* TODO: Free new CPU state if thread creation failed. */
6151 sigprocmask(SIG_SETMASK, &info.sigmask, NULL);
6152 pthread_attr_destroy(&attr);
6153 if (ret == 0) {
6154 /* Wait for the child to initialize. */
6155 pthread_cond_wait(&info.cond, &info.mutex);
6156 ret = info.tid;
6157 } else {
6158 ret = -1;
6160 pthread_mutex_unlock(&info.mutex);
6161 pthread_cond_destroy(&info.cond);
6162 pthread_mutex_destroy(&info.mutex);
6163 pthread_mutex_unlock(&clone_lock);
6164 } else {
6165 /* if no CLONE_VM, we consider it is a fork */
6166 if (flags & CLONE_INVALID_FORK_FLAGS) {
6167 return -TARGET_EINVAL;
6170 /* We can't support custom termination signals */
6171 if ((flags & CSIGNAL) != TARGET_SIGCHLD) {
6172 return -TARGET_EINVAL;
6175 if (block_signals()) {
6176 return -TARGET_ERESTARTSYS;
6179 fork_start();
6180 ret = fork();
6181 if (ret == 0) {
6182 /* Child Process. */
6183 rcu_after_fork();
6184 cpu_clone_regs(env, newsp);
6185 fork_end(1);
6186 /* There is a race condition here. The parent process could
6187 theoretically read the TID in the child process before the child
6188 tid is set. This would require using either ptrace
6189 (not implemented) or having *_tidptr to point at a shared memory
6190 mapping. We can't repeat the spinlock hack used above because
6191 the child process gets its own copy of the lock. */
6192 if (flags & CLONE_CHILD_SETTID)
6193 put_user_u32(gettid(), child_tidptr);
6194 if (flags & CLONE_PARENT_SETTID)
6195 put_user_u32(gettid(), parent_tidptr);
6196 ts = (TaskState *)cpu->opaque;
6197 if (flags & CLONE_SETTLS)
6198 cpu_set_tls (env, newtls);
6199 if (flags & CLONE_CHILD_CLEARTID)
6200 ts->child_tidptr = child_tidptr;
6201 } else {
6202 fork_end(0);
6205 return ret;
6208 /* warning : doesn't handle linux specific flags... */
6209 static int target_to_host_fcntl_cmd(int cmd)
6211 switch(cmd) {
6212 case TARGET_F_DUPFD:
6213 case TARGET_F_GETFD:
6214 case TARGET_F_SETFD:
6215 case TARGET_F_GETFL:
6216 case TARGET_F_SETFL:
6217 return cmd;
6218 case TARGET_F_GETLK:
6219 return F_GETLK64;
6220 case TARGET_F_SETLK:
6221 return F_SETLK64;
6222 case TARGET_F_SETLKW:
6223 return F_SETLKW64;
6224 case TARGET_F_GETOWN:
6225 return F_GETOWN;
6226 case TARGET_F_SETOWN:
6227 return F_SETOWN;
6228 case TARGET_F_GETSIG:
6229 return F_GETSIG;
6230 case TARGET_F_SETSIG:
6231 return F_SETSIG;
6232 #if TARGET_ABI_BITS == 32
6233 case TARGET_F_GETLK64:
6234 return F_GETLK64;
6235 case TARGET_F_SETLK64:
6236 return F_SETLK64;
6237 case TARGET_F_SETLKW64:
6238 return F_SETLKW64;
6239 #endif
6240 case TARGET_F_SETLEASE:
6241 return F_SETLEASE;
6242 case TARGET_F_GETLEASE:
6243 return F_GETLEASE;
6244 #ifdef F_DUPFD_CLOEXEC
6245 case TARGET_F_DUPFD_CLOEXEC:
6246 return F_DUPFD_CLOEXEC;
6247 #endif
6248 case TARGET_F_NOTIFY:
6249 return F_NOTIFY;
6250 #ifdef F_GETOWN_EX
6251 case TARGET_F_GETOWN_EX:
6252 return F_GETOWN_EX;
6253 #endif
6254 #ifdef F_SETOWN_EX
6255 case TARGET_F_SETOWN_EX:
6256 return F_SETOWN_EX;
6257 #endif
6258 #ifdef F_SETPIPE_SZ
6259 case TARGET_F_SETPIPE_SZ:
6260 return F_SETPIPE_SZ;
6261 case TARGET_F_GETPIPE_SZ:
6262 return F_GETPIPE_SZ;
6263 #endif
6264 default:
6265 return -TARGET_EINVAL;
6267 return -TARGET_EINVAL;
6270 #define TRANSTBL_CONVERT(a) { -1, TARGET_##a, -1, a }
6271 static const bitmask_transtbl flock_tbl[] = {
6272 TRANSTBL_CONVERT(F_RDLCK),
6273 TRANSTBL_CONVERT(F_WRLCK),
6274 TRANSTBL_CONVERT(F_UNLCK),
6275 TRANSTBL_CONVERT(F_EXLCK),
6276 TRANSTBL_CONVERT(F_SHLCK),
6277 { 0, 0, 0, 0 }
6280 static inline abi_long copy_from_user_flock(struct flock64 *fl,
6281 abi_ulong target_flock_addr)
6283 struct target_flock *target_fl;
6284 short l_type;
6286 if (!lock_user_struct(VERIFY_READ, target_fl, target_flock_addr, 1)) {
6287 return -TARGET_EFAULT;
6290 __get_user(l_type, &target_fl->l_type);
6291 fl->l_type = target_to_host_bitmask(l_type, flock_tbl);
6292 __get_user(fl->l_whence, &target_fl->l_whence);
6293 __get_user(fl->l_start, &target_fl->l_start);
6294 __get_user(fl->l_len, &target_fl->l_len);
6295 __get_user(fl->l_pid, &target_fl->l_pid);
6296 unlock_user_struct(target_fl, target_flock_addr, 0);
6297 return 0;
6300 static inline abi_long copy_to_user_flock(abi_ulong target_flock_addr,
6301 const struct flock64 *fl)
6303 struct target_flock *target_fl;
6304 short l_type;
6306 if (!lock_user_struct(VERIFY_WRITE, target_fl, target_flock_addr, 0)) {
6307 return -TARGET_EFAULT;
6310 l_type = host_to_target_bitmask(fl->l_type, flock_tbl);
6311 __put_user(l_type, &target_fl->l_type);
6312 __put_user(fl->l_whence, &target_fl->l_whence);
6313 __put_user(fl->l_start, &target_fl->l_start);
6314 __put_user(fl->l_len, &target_fl->l_len);
6315 __put_user(fl->l_pid, &target_fl->l_pid);
6316 unlock_user_struct(target_fl, target_flock_addr, 1);
6317 return 0;
6320 typedef abi_long from_flock64_fn(struct flock64 *fl, abi_ulong target_addr);
6321 typedef abi_long to_flock64_fn(abi_ulong target_addr, const struct flock64 *fl);
6323 #if defined(TARGET_ARM) && TARGET_ABI_BITS == 32
6324 static inline abi_long copy_from_user_eabi_flock64(struct flock64 *fl,
6325 abi_ulong target_flock_addr)
6327 struct target_eabi_flock64 *target_fl;
6328 short l_type;
6330 if (!lock_user_struct(VERIFY_READ, target_fl, target_flock_addr, 1)) {
6331 return -TARGET_EFAULT;
6334 __get_user(l_type, &target_fl->l_type);
6335 fl->l_type = target_to_host_bitmask(l_type, flock_tbl);
6336 __get_user(fl->l_whence, &target_fl->l_whence);
6337 __get_user(fl->l_start, &target_fl->l_start);
6338 __get_user(fl->l_len, &target_fl->l_len);
6339 __get_user(fl->l_pid, &target_fl->l_pid);
6340 unlock_user_struct(target_fl, target_flock_addr, 0);
6341 return 0;
6344 static inline abi_long copy_to_user_eabi_flock64(abi_ulong target_flock_addr,
6345 const struct flock64 *fl)
6347 struct target_eabi_flock64 *target_fl;
6348 short l_type;
6350 if (!lock_user_struct(VERIFY_WRITE, target_fl, target_flock_addr, 0)) {
6351 return -TARGET_EFAULT;
6354 l_type = host_to_target_bitmask(fl->l_type, flock_tbl);
6355 __put_user(l_type, &target_fl->l_type);
6356 __put_user(fl->l_whence, &target_fl->l_whence);
6357 __put_user(fl->l_start, &target_fl->l_start);
6358 __put_user(fl->l_len, &target_fl->l_len);
6359 __put_user(fl->l_pid, &target_fl->l_pid);
6360 unlock_user_struct(target_fl, target_flock_addr, 1);
6361 return 0;
6363 #endif
6365 static inline abi_long copy_from_user_flock64(struct flock64 *fl,
6366 abi_ulong target_flock_addr)
6368 struct target_flock64 *target_fl;
6369 short l_type;
6371 if (!lock_user_struct(VERIFY_READ, target_fl, target_flock_addr, 1)) {
6372 return -TARGET_EFAULT;
6375 __get_user(l_type, &target_fl->l_type);
6376 fl->l_type = target_to_host_bitmask(l_type, flock_tbl);
6377 __get_user(fl->l_whence, &target_fl->l_whence);
6378 __get_user(fl->l_start, &target_fl->l_start);
6379 __get_user(fl->l_len, &target_fl->l_len);
6380 __get_user(fl->l_pid, &target_fl->l_pid);
6381 unlock_user_struct(target_fl, target_flock_addr, 0);
6382 return 0;
6385 static inline abi_long copy_to_user_flock64(abi_ulong target_flock_addr,
6386 const struct flock64 *fl)
6388 struct target_flock64 *target_fl;
6389 short l_type;
6391 if (!lock_user_struct(VERIFY_WRITE, target_fl, target_flock_addr, 0)) {
6392 return -TARGET_EFAULT;
6395 l_type = host_to_target_bitmask(fl->l_type, flock_tbl);
6396 __put_user(l_type, &target_fl->l_type);
6397 __put_user(fl->l_whence, &target_fl->l_whence);
6398 __put_user(fl->l_start, &target_fl->l_start);
6399 __put_user(fl->l_len, &target_fl->l_len);
6400 __put_user(fl->l_pid, &target_fl->l_pid);
6401 unlock_user_struct(target_fl, target_flock_addr, 1);
6402 return 0;
6405 static abi_long do_fcntl(int fd, int cmd, abi_ulong arg)
6407 struct flock64 fl64;
6408 #ifdef F_GETOWN_EX
6409 struct f_owner_ex fox;
6410 struct target_f_owner_ex *target_fox;
6411 #endif
6412 abi_long ret;
6413 int host_cmd = target_to_host_fcntl_cmd(cmd);
6415 if (host_cmd == -TARGET_EINVAL)
6416 return host_cmd;
6418 switch(cmd) {
6419 case TARGET_F_GETLK:
6420 ret = copy_from_user_flock(&fl64, arg);
6421 if (ret) {
6422 return ret;
6424 ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
6425 if (ret == 0) {
6426 ret = copy_to_user_flock(arg, &fl64);
6428 break;
6430 case TARGET_F_SETLK:
6431 case TARGET_F_SETLKW:
6432 ret = copy_from_user_flock(&fl64, arg);
6433 if (ret) {
6434 return ret;
6436 ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
6437 break;
6439 case TARGET_F_GETLK64:
6440 ret = copy_from_user_flock64(&fl64, arg);
6441 if (ret) {
6442 return ret;
6444 ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
6445 if (ret == 0) {
6446 ret = copy_to_user_flock64(arg, &fl64);
6448 break;
6449 case TARGET_F_SETLK64:
6450 case TARGET_F_SETLKW64:
6451 ret = copy_from_user_flock64(&fl64, arg);
6452 if (ret) {
6453 return ret;
6455 ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
6456 break;
6458 case TARGET_F_GETFL:
6459 ret = get_errno(safe_fcntl(fd, host_cmd, arg));
6460 if (ret >= 0) {
6461 ret = host_to_target_bitmask(ret, fcntl_flags_tbl);
6463 break;
6465 case TARGET_F_SETFL:
6466 ret = get_errno(safe_fcntl(fd, host_cmd,
6467 target_to_host_bitmask(arg,
6468 fcntl_flags_tbl)));
6469 break;
6471 #ifdef F_GETOWN_EX
6472 case TARGET_F_GETOWN_EX:
6473 ret = get_errno(safe_fcntl(fd, host_cmd, &fox));
6474 if (ret >= 0) {
6475 if (!lock_user_struct(VERIFY_WRITE, target_fox, arg, 0))
6476 return -TARGET_EFAULT;
6477 target_fox->type = tswap32(fox.type);
6478 target_fox->pid = tswap32(fox.pid);
6479 unlock_user_struct(target_fox, arg, 1);
6481 break;
6482 #endif
6484 #ifdef F_SETOWN_EX
6485 case TARGET_F_SETOWN_EX:
6486 if (!lock_user_struct(VERIFY_READ, target_fox, arg, 1))
6487 return -TARGET_EFAULT;
6488 fox.type = tswap32(target_fox->type);
6489 fox.pid = tswap32(target_fox->pid);
6490 unlock_user_struct(target_fox, arg, 0);
6491 ret = get_errno(safe_fcntl(fd, host_cmd, &fox));
6492 break;
6493 #endif
6495 case TARGET_F_SETOWN:
6496 case TARGET_F_GETOWN:
6497 case TARGET_F_SETSIG:
6498 case TARGET_F_GETSIG:
6499 case TARGET_F_SETLEASE:
6500 case TARGET_F_GETLEASE:
6501 case TARGET_F_SETPIPE_SZ:
6502 case TARGET_F_GETPIPE_SZ:
6503 ret = get_errno(safe_fcntl(fd, host_cmd, arg));
6504 break;
6506 default:
6507 ret = get_errno(safe_fcntl(fd, cmd, arg));
6508 break;
6510 return ret;
6513 #ifdef USE_UID16
6515 static inline int high2lowuid(int uid)
6517 if (uid > 65535)
6518 return 65534;
6519 else
6520 return uid;
6523 static inline int high2lowgid(int gid)
6525 if (gid > 65535)
6526 return 65534;
6527 else
6528 return gid;
6531 static inline int low2highuid(int uid)
6533 if ((int16_t)uid == -1)
6534 return -1;
6535 else
6536 return uid;
6539 static inline int low2highgid(int gid)
6541 if ((int16_t)gid == -1)
6542 return -1;
6543 else
6544 return gid;
6546 static inline int tswapid(int id)
6548 return tswap16(id);
6551 #define put_user_id(x, gaddr) put_user_u16(x, gaddr)
6553 #else /* !USE_UID16 */
6554 static inline int high2lowuid(int uid)
6556 return uid;
6558 static inline int high2lowgid(int gid)
6560 return gid;
6562 static inline int low2highuid(int uid)
6564 return uid;
6566 static inline int low2highgid(int gid)
6568 return gid;
6570 static inline int tswapid(int id)
6572 return tswap32(id);
6575 #define put_user_id(x, gaddr) put_user_u32(x, gaddr)
6577 #endif /* USE_UID16 */
6579 /* We must do direct syscalls for setting UID/GID, because we want to
6580 * implement the Linux system call semantics of "change only for this thread",
6581 * not the libc/POSIX semantics of "change for all threads in process".
6582 * (See http://ewontfix.com/17/ for more details.)
6583 * We use the 32-bit version of the syscalls if present; if it is not
6584 * then either the host architecture supports 32-bit UIDs natively with
6585 * the standard syscall, or the 16-bit UID is the best we can do.
6587 #ifdef __NR_setuid32
6588 #define __NR_sys_setuid __NR_setuid32
6589 #else
6590 #define __NR_sys_setuid __NR_setuid
6591 #endif
6592 #ifdef __NR_setgid32
6593 #define __NR_sys_setgid __NR_setgid32
6594 #else
6595 #define __NR_sys_setgid __NR_setgid
6596 #endif
6597 #ifdef __NR_setresuid32
6598 #define __NR_sys_setresuid __NR_setresuid32
6599 #else
6600 #define __NR_sys_setresuid __NR_setresuid
6601 #endif
6602 #ifdef __NR_setresgid32
6603 #define __NR_sys_setresgid __NR_setresgid32
6604 #else
6605 #define __NR_sys_setresgid __NR_setresgid
6606 #endif
6608 _syscall1(int, sys_setuid, uid_t, uid)
6609 _syscall1(int, sys_setgid, gid_t, gid)
6610 _syscall3(int, sys_setresuid, uid_t, ruid, uid_t, euid, uid_t, suid)
6611 _syscall3(int, sys_setresgid, gid_t, rgid, gid_t, egid, gid_t, sgid)
6613 void syscall_init(void)
6615 IOCTLEntry *ie;
6616 const argtype *arg_type;
6617 int size;
6618 int i;
6620 thunk_init(STRUCT_MAX);
6622 #define STRUCT(name, ...) thunk_register_struct(STRUCT_ ## name, #name, struct_ ## name ## _def);
6623 #define STRUCT_SPECIAL(name) thunk_register_struct_direct(STRUCT_ ## name, #name, &struct_ ## name ## _def);
6624 #include "syscall_types.h"
6625 #undef STRUCT
6626 #undef STRUCT_SPECIAL
6628 /* Build target_to_host_errno_table[] table from
6629 * host_to_target_errno_table[]. */
6630 for (i = 0; i < ERRNO_TABLE_SIZE; i++) {
6631 target_to_host_errno_table[host_to_target_errno_table[i]] = i;
6634 /* we patch the ioctl size if necessary. We rely on the fact that
6635 no ioctl has all the bits at '1' in the size field */
6636 ie = ioctl_entries;
6637 while (ie->target_cmd != 0) {
6638 if (((ie->target_cmd >> TARGET_IOC_SIZESHIFT) & TARGET_IOC_SIZEMASK) ==
6639 TARGET_IOC_SIZEMASK) {
6640 arg_type = ie->arg_type;
6641 if (arg_type[0] != TYPE_PTR) {
6642 fprintf(stderr, "cannot patch size for ioctl 0x%x\n",
6643 ie->target_cmd);
6644 exit(1);
6646 arg_type++;
6647 size = thunk_type_size(arg_type, 0);
6648 ie->target_cmd = (ie->target_cmd &
6649 ~(TARGET_IOC_SIZEMASK << TARGET_IOC_SIZESHIFT)) |
6650 (size << TARGET_IOC_SIZESHIFT);
6653 /* automatic consistency check if same arch */
6654 #if (defined(__i386__) && defined(TARGET_I386) && defined(TARGET_ABI32)) || \
6655 (defined(__x86_64__) && defined(TARGET_X86_64))
6656 if (unlikely(ie->target_cmd != ie->host_cmd)) {
6657 fprintf(stderr, "ERROR: ioctl(%s): target=0x%x host=0x%x\n",
6658 ie->name, ie->target_cmd, ie->host_cmd);
6660 #endif
6661 ie++;
6665 #if TARGET_ABI_BITS == 32
6666 static inline uint64_t target_offset64(uint32_t word0, uint32_t word1)
6668 #ifdef TARGET_WORDS_BIGENDIAN
6669 return ((uint64_t)word0 << 32) | word1;
6670 #else
6671 return ((uint64_t)word1 << 32) | word0;
6672 #endif
6674 #else /* TARGET_ABI_BITS == 32 */
6675 static inline uint64_t target_offset64(uint64_t word0, uint64_t word1)
6677 return word0;
6679 #endif /* TARGET_ABI_BITS != 32 */
6681 #ifdef TARGET_NR_truncate64
6682 static inline abi_long target_truncate64(void *cpu_env, const char *arg1,
6683 abi_long arg2,
6684 abi_long arg3,
6685 abi_long arg4)
6687 if (regpairs_aligned(cpu_env)) {
6688 arg2 = arg3;
6689 arg3 = arg4;
6691 return get_errno(truncate64(arg1, target_offset64(arg2, arg3)));
6693 #endif
6695 #ifdef TARGET_NR_ftruncate64
6696 static inline abi_long target_ftruncate64(void *cpu_env, abi_long arg1,
6697 abi_long arg2,
6698 abi_long arg3,
6699 abi_long arg4)
6701 if (regpairs_aligned(cpu_env)) {
6702 arg2 = arg3;
6703 arg3 = arg4;
6705 return get_errno(ftruncate64(arg1, target_offset64(arg2, arg3)));
6707 #endif
6709 static inline abi_long target_to_host_timespec(struct timespec *host_ts,
6710 abi_ulong target_addr)
6712 struct target_timespec *target_ts;
6714 if (!lock_user_struct(VERIFY_READ, target_ts, target_addr, 1))
6715 return -TARGET_EFAULT;
6716 __get_user(host_ts->tv_sec, &target_ts->tv_sec);
6717 __get_user(host_ts->tv_nsec, &target_ts->tv_nsec);
6718 unlock_user_struct(target_ts, target_addr, 0);
6719 return 0;
6722 static inline abi_long host_to_target_timespec(abi_ulong target_addr,
6723 struct timespec *host_ts)
6725 struct target_timespec *target_ts;
6727 if (!lock_user_struct(VERIFY_WRITE, target_ts, target_addr, 0))
6728 return -TARGET_EFAULT;
6729 __put_user(host_ts->tv_sec, &target_ts->tv_sec);
6730 __put_user(host_ts->tv_nsec, &target_ts->tv_nsec);
6731 unlock_user_struct(target_ts, target_addr, 1);
6732 return 0;
6735 static inline abi_long target_to_host_itimerspec(struct itimerspec *host_itspec,
6736 abi_ulong target_addr)
6738 struct target_itimerspec *target_itspec;
6740 if (!lock_user_struct(VERIFY_READ, target_itspec, target_addr, 1)) {
6741 return -TARGET_EFAULT;
6744 host_itspec->it_interval.tv_sec =
6745 tswapal(target_itspec->it_interval.tv_sec);
6746 host_itspec->it_interval.tv_nsec =
6747 tswapal(target_itspec->it_interval.tv_nsec);
6748 host_itspec->it_value.tv_sec = tswapal(target_itspec->it_value.tv_sec);
6749 host_itspec->it_value.tv_nsec = tswapal(target_itspec->it_value.tv_nsec);
6751 unlock_user_struct(target_itspec, target_addr, 1);
6752 return 0;
6755 static inline abi_long host_to_target_itimerspec(abi_ulong target_addr,
6756 struct itimerspec *host_its)
6758 struct target_itimerspec *target_itspec;
6760 if (!lock_user_struct(VERIFY_WRITE, target_itspec, target_addr, 0)) {
6761 return -TARGET_EFAULT;
6764 target_itspec->it_interval.tv_sec = tswapal(host_its->it_interval.tv_sec);
6765 target_itspec->it_interval.tv_nsec = tswapal(host_its->it_interval.tv_nsec);
6767 target_itspec->it_value.tv_sec = tswapal(host_its->it_value.tv_sec);
6768 target_itspec->it_value.tv_nsec = tswapal(host_its->it_value.tv_nsec);
6770 unlock_user_struct(target_itspec, target_addr, 0);
6771 return 0;
6774 static inline abi_long target_to_host_timex(struct timex *host_tx,
6775 abi_long target_addr)
6777 struct target_timex *target_tx;
6779 if (!lock_user_struct(VERIFY_READ, target_tx, target_addr, 1)) {
6780 return -TARGET_EFAULT;
6783 __get_user(host_tx->modes, &target_tx->modes);
6784 __get_user(host_tx->offset, &target_tx->offset);
6785 __get_user(host_tx->freq, &target_tx->freq);
6786 __get_user(host_tx->maxerror, &target_tx->maxerror);
6787 __get_user(host_tx->esterror, &target_tx->esterror);
6788 __get_user(host_tx->status, &target_tx->status);
6789 __get_user(host_tx->constant, &target_tx->constant);
6790 __get_user(host_tx->precision, &target_tx->precision);
6791 __get_user(host_tx->tolerance, &target_tx->tolerance);
6792 __get_user(host_tx->time.tv_sec, &target_tx->time.tv_sec);
6793 __get_user(host_tx->time.tv_usec, &target_tx->time.tv_usec);
6794 __get_user(host_tx->tick, &target_tx->tick);
6795 __get_user(host_tx->ppsfreq, &target_tx->ppsfreq);
6796 __get_user(host_tx->jitter, &target_tx->jitter);
6797 __get_user(host_tx->shift, &target_tx->shift);
6798 __get_user(host_tx->stabil, &target_tx->stabil);
6799 __get_user(host_tx->jitcnt, &target_tx->jitcnt);
6800 __get_user(host_tx->calcnt, &target_tx->calcnt);
6801 __get_user(host_tx->errcnt, &target_tx->errcnt);
6802 __get_user(host_tx->stbcnt, &target_tx->stbcnt);
6803 __get_user(host_tx->tai, &target_tx->tai);
6805 unlock_user_struct(target_tx, target_addr, 0);
6806 return 0;
6809 static inline abi_long host_to_target_timex(abi_long target_addr,
6810 struct timex *host_tx)
6812 struct target_timex *target_tx;
6814 if (!lock_user_struct(VERIFY_WRITE, target_tx, target_addr, 0)) {
6815 return -TARGET_EFAULT;
6818 __put_user(host_tx->modes, &target_tx->modes);
6819 __put_user(host_tx->offset, &target_tx->offset);
6820 __put_user(host_tx->freq, &target_tx->freq);
6821 __put_user(host_tx->maxerror, &target_tx->maxerror);
6822 __put_user(host_tx->esterror, &target_tx->esterror);
6823 __put_user(host_tx->status, &target_tx->status);
6824 __put_user(host_tx->constant, &target_tx->constant);
6825 __put_user(host_tx->precision, &target_tx->precision);
6826 __put_user(host_tx->tolerance, &target_tx->tolerance);
6827 __put_user(host_tx->time.tv_sec, &target_tx->time.tv_sec);
6828 __put_user(host_tx->time.tv_usec, &target_tx->time.tv_usec);
6829 __put_user(host_tx->tick, &target_tx->tick);
6830 __put_user(host_tx->ppsfreq, &target_tx->ppsfreq);
6831 __put_user(host_tx->jitter, &target_tx->jitter);
6832 __put_user(host_tx->shift, &target_tx->shift);
6833 __put_user(host_tx->stabil, &target_tx->stabil);
6834 __put_user(host_tx->jitcnt, &target_tx->jitcnt);
6835 __put_user(host_tx->calcnt, &target_tx->calcnt);
6836 __put_user(host_tx->errcnt, &target_tx->errcnt);
6837 __put_user(host_tx->stbcnt, &target_tx->stbcnt);
6838 __put_user(host_tx->tai, &target_tx->tai);
6840 unlock_user_struct(target_tx, target_addr, 1);
6841 return 0;
6845 static inline abi_long target_to_host_sigevent(struct sigevent *host_sevp,
6846 abi_ulong target_addr)
6848 struct target_sigevent *target_sevp;
6850 if (!lock_user_struct(VERIFY_READ, target_sevp, target_addr, 1)) {
6851 return -TARGET_EFAULT;
6854 /* This union is awkward on 64 bit systems because it has a 32 bit
6855 * integer and a pointer in it; we follow the conversion approach
6856 * used for handling sigval types in signal.c so the guest should get
6857 * the correct value back even if we did a 64 bit byteswap and it's
6858 * using the 32 bit integer.
6860 host_sevp->sigev_value.sival_ptr =
6861 (void *)(uintptr_t)tswapal(target_sevp->sigev_value.sival_ptr);
6862 host_sevp->sigev_signo =
6863 target_to_host_signal(tswap32(target_sevp->sigev_signo));
6864 host_sevp->sigev_notify = tswap32(target_sevp->sigev_notify);
6865 host_sevp->_sigev_un._tid = tswap32(target_sevp->_sigev_un._tid);
6867 unlock_user_struct(target_sevp, target_addr, 1);
6868 return 0;
6871 #if defined(TARGET_NR_mlockall)
6872 static inline int target_to_host_mlockall_arg(int arg)
6874 int result = 0;
6876 if (arg & TARGET_MLOCKALL_MCL_CURRENT) {
6877 result |= MCL_CURRENT;
6879 if (arg & TARGET_MLOCKALL_MCL_FUTURE) {
6880 result |= MCL_FUTURE;
6882 return result;
6884 #endif
6886 static inline abi_long host_to_target_stat64(void *cpu_env,
6887 abi_ulong target_addr,
6888 struct stat *host_st)
6890 #if defined(TARGET_ARM) && defined(TARGET_ABI32)
6891 if (((CPUARMState *)cpu_env)->eabi) {
6892 struct target_eabi_stat64 *target_st;
6894 if (!lock_user_struct(VERIFY_WRITE, target_st, target_addr, 0))
6895 return -TARGET_EFAULT;
6896 memset(target_st, 0, sizeof(struct target_eabi_stat64));
6897 __put_user(host_st->st_dev, &target_st->st_dev);
6898 __put_user(host_st->st_ino, &target_st->st_ino);
6899 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
6900 __put_user(host_st->st_ino, &target_st->__st_ino);
6901 #endif
6902 __put_user(host_st->st_mode, &target_st->st_mode);
6903 __put_user(host_st->st_nlink, &target_st->st_nlink);
6904 __put_user(host_st->st_uid, &target_st->st_uid);
6905 __put_user(host_st->st_gid, &target_st->st_gid);
6906 __put_user(host_st->st_rdev, &target_st->st_rdev);
6907 __put_user(host_st->st_size, &target_st->st_size);
6908 __put_user(host_st->st_blksize, &target_st->st_blksize);
6909 __put_user(host_st->st_blocks, &target_st->st_blocks);
6910 __put_user(host_st->st_atime, &target_st->target_st_atime);
6911 __put_user(host_st->st_mtime, &target_st->target_st_mtime);
6912 __put_user(host_st->st_ctime, &target_st->target_st_ctime);
6913 unlock_user_struct(target_st, target_addr, 1);
6914 } else
6915 #endif
6917 #if defined(TARGET_HAS_STRUCT_STAT64)
6918 struct target_stat64 *target_st;
6919 #else
6920 struct target_stat *target_st;
6921 #endif
6923 if (!lock_user_struct(VERIFY_WRITE, target_st, target_addr, 0))
6924 return -TARGET_EFAULT;
6925 memset(target_st, 0, sizeof(*target_st));
6926 __put_user(host_st->st_dev, &target_st->st_dev);
6927 __put_user(host_st->st_ino, &target_st->st_ino);
6928 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
6929 __put_user(host_st->st_ino, &target_st->__st_ino);
6930 #endif
6931 __put_user(host_st->st_mode, &target_st->st_mode);
6932 __put_user(host_st->st_nlink, &target_st->st_nlink);
6933 __put_user(host_st->st_uid, &target_st->st_uid);
6934 __put_user(host_st->st_gid, &target_st->st_gid);
6935 __put_user(host_st->st_rdev, &target_st->st_rdev);
6936 /* XXX: better use of kernel struct */
6937 __put_user(host_st->st_size, &target_st->st_size);
6938 __put_user(host_st->st_blksize, &target_st->st_blksize);
6939 __put_user(host_st->st_blocks, &target_st->st_blocks);
6940 __put_user(host_st->st_atime, &target_st->target_st_atime);
6941 __put_user(host_st->st_mtime, &target_st->target_st_mtime);
6942 __put_user(host_st->st_ctime, &target_st->target_st_ctime);
6943 unlock_user_struct(target_st, target_addr, 1);
6946 return 0;
6949 /* ??? Using host futex calls even when target atomic operations
6950 are not really atomic probably breaks things. However implementing
6951 futexes locally would make futexes shared between multiple processes
6952 tricky. However they're probably useless because guest atomic
6953 operations won't work either. */
6954 static int do_futex(target_ulong uaddr, int op, int val, target_ulong timeout,
6955 target_ulong uaddr2, int val3)
6957 struct timespec ts, *pts;
6958 int base_op;
6960 /* ??? We assume FUTEX_* constants are the same on both host
6961 and target. */
6962 #ifdef FUTEX_CMD_MASK
6963 base_op = op & FUTEX_CMD_MASK;
6964 #else
6965 base_op = op;
6966 #endif
6967 switch (base_op) {
6968 case FUTEX_WAIT:
6969 case FUTEX_WAIT_BITSET:
6970 if (timeout) {
6971 pts = &ts;
6972 target_to_host_timespec(pts, timeout);
6973 } else {
6974 pts = NULL;
6976 return get_errno(safe_futex(g2h(uaddr), op, tswap32(val),
6977 pts, NULL, val3));
6978 case FUTEX_WAKE:
6979 return get_errno(safe_futex(g2h(uaddr), op, val, NULL, NULL, 0));
6980 case FUTEX_FD:
6981 return get_errno(safe_futex(g2h(uaddr), op, val, NULL, NULL, 0));
6982 case FUTEX_REQUEUE:
6983 case FUTEX_CMP_REQUEUE:
6984 case FUTEX_WAKE_OP:
6985 /* For FUTEX_REQUEUE, FUTEX_CMP_REQUEUE, and FUTEX_WAKE_OP, the
6986 TIMEOUT parameter is interpreted as a uint32_t by the kernel.
6987 But the prototype takes a `struct timespec *'; insert casts
6988 to satisfy the compiler. We do not need to tswap TIMEOUT
6989 since it's not compared to guest memory. */
6990 pts = (struct timespec *)(uintptr_t) timeout;
6991 return get_errno(safe_futex(g2h(uaddr), op, val, pts,
6992 g2h(uaddr2),
6993 (base_op == FUTEX_CMP_REQUEUE
6994 ? tswap32(val3)
6995 : val3)));
6996 default:
6997 return -TARGET_ENOSYS;
7000 #if defined(TARGET_NR_name_to_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
7001 static abi_long do_name_to_handle_at(abi_long dirfd, abi_long pathname,
7002 abi_long handle, abi_long mount_id,
7003 abi_long flags)
7005 struct file_handle *target_fh;
7006 struct file_handle *fh;
7007 int mid = 0;
7008 abi_long ret;
7009 char *name;
7010 unsigned int size, total_size;
7012 if (get_user_s32(size, handle)) {
7013 return -TARGET_EFAULT;
7016 name = lock_user_string(pathname);
7017 if (!name) {
7018 return -TARGET_EFAULT;
7021 total_size = sizeof(struct file_handle) + size;
7022 target_fh = lock_user(VERIFY_WRITE, handle, total_size, 0);
7023 if (!target_fh) {
7024 unlock_user(name, pathname, 0);
7025 return -TARGET_EFAULT;
7028 fh = g_malloc0(total_size);
7029 fh->handle_bytes = size;
7031 ret = get_errno(name_to_handle_at(dirfd, path(name), fh, &mid, flags));
7032 unlock_user(name, pathname, 0);
7034 /* man name_to_handle_at(2):
7035 * Other than the use of the handle_bytes field, the caller should treat
7036 * the file_handle structure as an opaque data type
7039 memcpy(target_fh, fh, total_size);
7040 target_fh->handle_bytes = tswap32(fh->handle_bytes);
7041 target_fh->handle_type = tswap32(fh->handle_type);
7042 g_free(fh);
7043 unlock_user(target_fh, handle, total_size);
7045 if (put_user_s32(mid, mount_id)) {
7046 return -TARGET_EFAULT;
7049 return ret;
7052 #endif
7054 #if defined(TARGET_NR_open_by_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
7055 static abi_long do_open_by_handle_at(abi_long mount_fd, abi_long handle,
7056 abi_long flags)
7058 struct file_handle *target_fh;
7059 struct file_handle *fh;
7060 unsigned int size, total_size;
7061 abi_long ret;
7063 if (get_user_s32(size, handle)) {
7064 return -TARGET_EFAULT;
7067 total_size = sizeof(struct file_handle) + size;
7068 target_fh = lock_user(VERIFY_READ, handle, total_size, 1);
7069 if (!target_fh) {
7070 return -TARGET_EFAULT;
7073 fh = g_memdup(target_fh, total_size);
7074 fh->handle_bytes = size;
7075 fh->handle_type = tswap32(target_fh->handle_type);
7077 ret = get_errno(open_by_handle_at(mount_fd, fh,
7078 target_to_host_bitmask(flags, fcntl_flags_tbl)));
7080 g_free(fh);
7082 unlock_user(target_fh, handle, total_size);
7084 return ret;
7086 #endif
7088 #if defined(TARGET_NR_signalfd) || defined(TARGET_NR_signalfd4)
7090 /* signalfd siginfo conversion */
7092 static void
7093 host_to_target_signalfd_siginfo(struct signalfd_siginfo *tinfo,
7094 const struct signalfd_siginfo *info)
7096 int sig = host_to_target_signal(info->ssi_signo);
7098 /* linux/signalfd.h defines a ssi_addr_lsb
7099 * not defined in sys/signalfd.h but used by some kernels
7102 #ifdef BUS_MCEERR_AO
7103 if (tinfo->ssi_signo == SIGBUS &&
7104 (tinfo->ssi_code == BUS_MCEERR_AR ||
7105 tinfo->ssi_code == BUS_MCEERR_AO)) {
7106 uint16_t *ssi_addr_lsb = (uint16_t *)(&info->ssi_addr + 1);
7107 uint16_t *tssi_addr_lsb = (uint16_t *)(&tinfo->ssi_addr + 1);
7108 *tssi_addr_lsb = tswap16(*ssi_addr_lsb);
7110 #endif
7112 tinfo->ssi_signo = tswap32(sig);
7113 tinfo->ssi_errno = tswap32(tinfo->ssi_errno);
7114 tinfo->ssi_code = tswap32(info->ssi_code);
7115 tinfo->ssi_pid = tswap32(info->ssi_pid);
7116 tinfo->ssi_uid = tswap32(info->ssi_uid);
7117 tinfo->ssi_fd = tswap32(info->ssi_fd);
7118 tinfo->ssi_tid = tswap32(info->ssi_tid);
7119 tinfo->ssi_band = tswap32(info->ssi_band);
7120 tinfo->ssi_overrun = tswap32(info->ssi_overrun);
7121 tinfo->ssi_trapno = tswap32(info->ssi_trapno);
7122 tinfo->ssi_status = tswap32(info->ssi_status);
7123 tinfo->ssi_int = tswap32(info->ssi_int);
7124 tinfo->ssi_ptr = tswap64(info->ssi_ptr);
7125 tinfo->ssi_utime = tswap64(info->ssi_utime);
7126 tinfo->ssi_stime = tswap64(info->ssi_stime);
7127 tinfo->ssi_addr = tswap64(info->ssi_addr);
7130 static abi_long host_to_target_data_signalfd(void *buf, size_t len)
7132 int i;
7134 for (i = 0; i < len; i += sizeof(struct signalfd_siginfo)) {
7135 host_to_target_signalfd_siginfo(buf + i, buf + i);
7138 return len;
7141 static TargetFdTrans target_signalfd_trans = {
7142 .host_to_target_data = host_to_target_data_signalfd,
7145 static abi_long do_signalfd4(int fd, abi_long mask, int flags)
7147 int host_flags;
7148 target_sigset_t *target_mask;
7149 sigset_t host_mask;
7150 abi_long ret;
7152 if (flags & ~(TARGET_O_NONBLOCK | TARGET_O_CLOEXEC)) {
7153 return -TARGET_EINVAL;
7155 if (!lock_user_struct(VERIFY_READ, target_mask, mask, 1)) {
7156 return -TARGET_EFAULT;
7159 target_to_host_sigset(&host_mask, target_mask);
7161 host_flags = target_to_host_bitmask(flags, fcntl_flags_tbl);
7163 ret = get_errno(signalfd(fd, &host_mask, host_flags));
7164 if (ret >= 0) {
7165 fd_trans_register(ret, &target_signalfd_trans);
7168 unlock_user_struct(target_mask, mask, 0);
7170 return ret;
7172 #endif
7174 /* Map host to target signal numbers for the wait family of syscalls.
7175 Assume all other status bits are the same. */
7176 int host_to_target_waitstatus(int status)
7178 if (WIFSIGNALED(status)) {
7179 return host_to_target_signal(WTERMSIG(status)) | (status & ~0x7f);
7181 if (WIFSTOPPED(status)) {
7182 return (host_to_target_signal(WSTOPSIG(status)) << 8)
7183 | (status & 0xff);
7185 return status;
7188 static int open_self_cmdline(void *cpu_env, int fd)
7190 int fd_orig = -1;
7191 bool word_skipped = false;
7193 fd_orig = open("/proc/self/cmdline", O_RDONLY);
7194 if (fd_orig < 0) {
7195 return fd_orig;
7198 while (true) {
7199 ssize_t nb_read;
7200 char buf[128];
7201 char *cp_buf = buf;
7203 nb_read = read(fd_orig, buf, sizeof(buf));
7204 if (nb_read < 0) {
7205 int e = errno;
7206 fd_orig = close(fd_orig);
7207 errno = e;
7208 return -1;
7209 } else if (nb_read == 0) {
7210 break;
7213 if (!word_skipped) {
7214 /* Skip the first string, which is the path to qemu-*-static
7215 instead of the actual command. */
7216 cp_buf = memchr(buf, 0, nb_read);
7217 if (cp_buf) {
7218 /* Null byte found, skip one string */
7219 cp_buf++;
7220 nb_read -= cp_buf - buf;
7221 word_skipped = true;
7225 if (word_skipped) {
7226 if (write(fd, cp_buf, nb_read) != nb_read) {
7227 int e = errno;
7228 close(fd_orig);
7229 errno = e;
7230 return -1;
7235 return close(fd_orig);
7238 static int open_self_maps(void *cpu_env, int fd)
7240 CPUState *cpu = ENV_GET_CPU((CPUArchState *)cpu_env);
7241 TaskState *ts = cpu->opaque;
7242 FILE *fp;
7243 char *line = NULL;
7244 size_t len = 0;
7245 ssize_t read;
7247 fp = fopen("/proc/self/maps", "r");
7248 if (fp == NULL) {
7249 return -1;
7252 while ((read = getline(&line, &len, fp)) != -1) {
7253 int fields, dev_maj, dev_min, inode;
7254 uint64_t min, max, offset;
7255 char flag_r, flag_w, flag_x, flag_p;
7256 char path[512] = "";
7257 fields = sscanf(line, "%"PRIx64"-%"PRIx64" %c%c%c%c %"PRIx64" %x:%x %d"
7258 " %512s", &min, &max, &flag_r, &flag_w, &flag_x,
7259 &flag_p, &offset, &dev_maj, &dev_min, &inode, path);
7261 if ((fields < 10) || (fields > 11)) {
7262 continue;
7264 if (h2g_valid(min)) {
7265 int flags = page_get_flags(h2g(min));
7266 max = h2g_valid(max - 1) ? max : (uintptr_t)g2h(GUEST_ADDR_MAX);
7267 if (page_check_range(h2g(min), max - min, flags) == -1) {
7268 continue;
7270 if (h2g(min) == ts->info->stack_limit) {
7271 pstrcpy(path, sizeof(path), " [stack]");
7273 dprintf(fd, TARGET_ABI_FMT_lx "-" TARGET_ABI_FMT_lx
7274 " %c%c%c%c %08" PRIx64 " %02x:%02x %d %s%s\n",
7275 h2g(min), h2g(max - 1) + 1, flag_r, flag_w,
7276 flag_x, flag_p, offset, dev_maj, dev_min, inode,
7277 path[0] ? " " : "", path);
7281 free(line);
7282 fclose(fp);
7284 return 0;
7287 static int open_self_stat(void *cpu_env, int fd)
7289 CPUState *cpu = ENV_GET_CPU((CPUArchState *)cpu_env);
7290 TaskState *ts = cpu->opaque;
7291 abi_ulong start_stack = ts->info->start_stack;
7292 int i;
7294 for (i = 0; i < 44; i++) {
7295 char buf[128];
7296 int len;
7297 uint64_t val = 0;
7299 if (i == 0) {
7300 /* pid */
7301 val = getpid();
7302 snprintf(buf, sizeof(buf), "%"PRId64 " ", val);
7303 } else if (i == 1) {
7304 /* app name */
7305 snprintf(buf, sizeof(buf), "(%s) ", ts->bprm->argv[0]);
7306 } else if (i == 27) {
7307 /* stack bottom */
7308 val = start_stack;
7309 snprintf(buf, sizeof(buf), "%"PRId64 " ", val);
7310 } else {
7311 /* for the rest, there is MasterCard */
7312 snprintf(buf, sizeof(buf), "0%c", i == 43 ? '\n' : ' ');
7315 len = strlen(buf);
7316 if (write(fd, buf, len) != len) {
7317 return -1;
7321 return 0;
7324 static int open_self_auxv(void *cpu_env, int fd)
7326 CPUState *cpu = ENV_GET_CPU((CPUArchState *)cpu_env);
7327 TaskState *ts = cpu->opaque;
7328 abi_ulong auxv = ts->info->saved_auxv;
7329 abi_ulong len = ts->info->auxv_len;
7330 char *ptr;
7333 * Auxiliary vector is stored in target process stack.
7334 * read in whole auxv vector and copy it to file
7336 ptr = lock_user(VERIFY_READ, auxv, len, 0);
7337 if (ptr != NULL) {
7338 while (len > 0) {
7339 ssize_t r;
7340 r = write(fd, ptr, len);
7341 if (r <= 0) {
7342 break;
7344 len -= r;
7345 ptr += r;
7347 lseek(fd, 0, SEEK_SET);
7348 unlock_user(ptr, auxv, len);
7351 return 0;
7354 static int is_proc_myself(const char *filename, const char *entry)
7356 if (!strncmp(filename, "/proc/", strlen("/proc/"))) {
7357 filename += strlen("/proc/");
7358 if (!strncmp(filename, "self/", strlen("self/"))) {
7359 filename += strlen("self/");
7360 } else if (*filename >= '1' && *filename <= '9') {
7361 char myself[80];
7362 snprintf(myself, sizeof(myself), "%d/", getpid());
7363 if (!strncmp(filename, myself, strlen(myself))) {
7364 filename += strlen(myself);
7365 } else {
7366 return 0;
7368 } else {
7369 return 0;
7371 if (!strcmp(filename, entry)) {
7372 return 1;
7375 return 0;
7378 #if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN)
7379 static int is_proc(const char *filename, const char *entry)
7381 return strcmp(filename, entry) == 0;
7384 static int open_net_route(void *cpu_env, int fd)
7386 FILE *fp;
7387 char *line = NULL;
7388 size_t len = 0;
7389 ssize_t read;
7391 fp = fopen("/proc/net/route", "r");
7392 if (fp == NULL) {
7393 return -1;
7396 /* read header */
7398 read = getline(&line, &len, fp);
7399 dprintf(fd, "%s", line);
7401 /* read routes */
7403 while ((read = getline(&line, &len, fp)) != -1) {
7404 char iface[16];
7405 uint32_t dest, gw, mask;
7406 unsigned int flags, refcnt, use, metric, mtu, window, irtt;
7407 sscanf(line, "%s\t%08x\t%08x\t%04x\t%d\t%d\t%d\t%08x\t%d\t%u\t%u\n",
7408 iface, &dest, &gw, &flags, &refcnt, &use, &metric,
7409 &mask, &mtu, &window, &irtt);
7410 dprintf(fd, "%s\t%08x\t%08x\t%04x\t%d\t%d\t%d\t%08x\t%d\t%u\t%u\n",
7411 iface, tswap32(dest), tswap32(gw), flags, refcnt, use,
7412 metric, tswap32(mask), mtu, window, irtt);
7415 free(line);
7416 fclose(fp);
7418 return 0;
7420 #endif
7422 static int do_openat(void *cpu_env, int dirfd, const char *pathname, int flags, mode_t mode)
7424 struct fake_open {
7425 const char *filename;
7426 int (*fill)(void *cpu_env, int fd);
7427 int (*cmp)(const char *s1, const char *s2);
7429 const struct fake_open *fake_open;
7430 static const struct fake_open fakes[] = {
7431 { "maps", open_self_maps, is_proc_myself },
7432 { "stat", open_self_stat, is_proc_myself },
7433 { "auxv", open_self_auxv, is_proc_myself },
7434 { "cmdline", open_self_cmdline, is_proc_myself },
7435 #if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN)
7436 { "/proc/net/route", open_net_route, is_proc },
7437 #endif
7438 { NULL, NULL, NULL }
7441 if (is_proc_myself(pathname, "exe")) {
7442 int execfd = qemu_getauxval(AT_EXECFD);
7443 return execfd ? execfd : safe_openat(dirfd, exec_path, flags, mode);
7446 for (fake_open = fakes; fake_open->filename; fake_open++) {
7447 if (fake_open->cmp(pathname, fake_open->filename)) {
7448 break;
7452 if (fake_open->filename) {
7453 const char *tmpdir;
7454 char filename[PATH_MAX];
7455 int fd, r;
7457 /* create temporary file to map stat to */
7458 tmpdir = getenv("TMPDIR");
7459 if (!tmpdir)
7460 tmpdir = "/tmp";
7461 snprintf(filename, sizeof(filename), "%s/qemu-open.XXXXXX", tmpdir);
7462 fd = mkstemp(filename);
7463 if (fd < 0) {
7464 return fd;
7466 unlink(filename);
7468 if ((r = fake_open->fill(cpu_env, fd))) {
7469 int e = errno;
7470 close(fd);
7471 errno = e;
7472 return r;
7474 lseek(fd, 0, SEEK_SET);
7476 return fd;
7479 return safe_openat(dirfd, path(pathname), flags, mode);
7482 #define TIMER_MAGIC 0x0caf0000
7483 #define TIMER_MAGIC_MASK 0xffff0000
7485 /* Convert QEMU provided timer ID back to internal 16bit index format */
7486 static target_timer_t get_timer_id(abi_long arg)
7488 target_timer_t timerid = arg;
7490 if ((timerid & TIMER_MAGIC_MASK) != TIMER_MAGIC) {
7491 return -TARGET_EINVAL;
7494 timerid &= 0xffff;
7496 if (timerid >= ARRAY_SIZE(g_posix_timers)) {
7497 return -TARGET_EINVAL;
7500 return timerid;
7503 /* do_syscall() should always have a single exit point at the end so
7504 that actions, such as logging of syscall results, can be performed.
7505 All errnos that do_syscall() returns must be -TARGET_<errcode>. */
7506 abi_long do_syscall(void *cpu_env, int num, abi_long arg1,
7507 abi_long arg2, abi_long arg3, abi_long arg4,
7508 abi_long arg5, abi_long arg6, abi_long arg7,
7509 abi_long arg8)
7511 CPUState *cpu = ENV_GET_CPU(cpu_env);
7512 abi_long ret;
7513 struct stat st;
7514 struct statfs stfs;
7515 void *p;
7517 #if defined(DEBUG_ERESTARTSYS)
7518 /* Debug-only code for exercising the syscall-restart code paths
7519 * in the per-architecture cpu main loops: restart every syscall
7520 * the guest makes once before letting it through.
7523 static int flag;
7525 flag = !flag;
7526 if (flag) {
7527 return -TARGET_ERESTARTSYS;
7530 #endif
7532 #ifdef DEBUG
7533 gemu_log("syscall %d", num);
7534 #endif
7535 trace_guest_user_syscall(cpu, num, arg1, arg2, arg3, arg4, arg5, arg6, arg7, arg8);
7536 if(do_strace)
7537 print_syscall(num, arg1, arg2, arg3, arg4, arg5, arg6);
7539 switch(num) {
7540 case TARGET_NR_exit:
7541 /* In old applications this may be used to implement _exit(2).
7542 However in threaded applictions it is used for thread termination,
7543 and _exit_group is used for application termination.
7544 Do thread termination if we have more then one thread. */
7546 if (block_signals()) {
7547 ret = -TARGET_ERESTARTSYS;
7548 break;
7551 cpu_list_lock();
7553 if (CPU_NEXT(first_cpu)) {
7554 TaskState *ts;
7556 /* Remove the CPU from the list. */
7557 QTAILQ_REMOVE(&cpus, cpu, node);
7559 cpu_list_unlock();
7561 ts = cpu->opaque;
7562 if (ts->child_tidptr) {
7563 put_user_u32(0, ts->child_tidptr);
7564 sys_futex(g2h(ts->child_tidptr), FUTEX_WAKE, INT_MAX,
7565 NULL, NULL, 0);
7567 thread_cpu = NULL;
7568 object_unref(OBJECT(cpu));
7569 g_free(ts);
7570 rcu_unregister_thread();
7571 pthread_exit(NULL);
7574 cpu_list_unlock();
7575 #ifdef TARGET_GPROF
7576 _mcleanup();
7577 #endif
7578 gdb_exit(cpu_env, arg1);
7579 _exit(arg1);
7580 ret = 0; /* avoid warning */
7581 break;
7582 case TARGET_NR_read:
7583 if (arg3 == 0)
7584 ret = 0;
7585 else {
7586 if (!(p = lock_user(VERIFY_WRITE, arg2, arg3, 0)))
7587 goto efault;
7588 ret = get_errno(safe_read(arg1, p, arg3));
7589 if (ret >= 0 &&
7590 fd_trans_host_to_target_data(arg1)) {
7591 ret = fd_trans_host_to_target_data(arg1)(p, ret);
7593 unlock_user(p, arg2, ret);
7595 break;
7596 case TARGET_NR_write:
7597 if (!(p = lock_user(VERIFY_READ, arg2, arg3, 1)))
7598 goto efault;
7599 ret = get_errno(safe_write(arg1, p, arg3));
7600 unlock_user(p, arg2, 0);
7601 break;
7602 #ifdef TARGET_NR_open
7603 case TARGET_NR_open:
7604 if (!(p = lock_user_string(arg1)))
7605 goto efault;
7606 ret = get_errno(do_openat(cpu_env, AT_FDCWD, p,
7607 target_to_host_bitmask(arg2, fcntl_flags_tbl),
7608 arg3));
7609 fd_trans_unregister(ret);
7610 unlock_user(p, arg1, 0);
7611 break;
7612 #endif
7613 case TARGET_NR_openat:
7614 if (!(p = lock_user_string(arg2)))
7615 goto efault;
7616 ret = get_errno(do_openat(cpu_env, arg1, p,
7617 target_to_host_bitmask(arg3, fcntl_flags_tbl),
7618 arg4));
7619 fd_trans_unregister(ret);
7620 unlock_user(p, arg2, 0);
7621 break;
7622 #if defined(TARGET_NR_name_to_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
7623 case TARGET_NR_name_to_handle_at:
7624 ret = do_name_to_handle_at(arg1, arg2, arg3, arg4, arg5);
7625 break;
7626 #endif
7627 #if defined(TARGET_NR_open_by_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
7628 case TARGET_NR_open_by_handle_at:
7629 ret = do_open_by_handle_at(arg1, arg2, arg3);
7630 fd_trans_unregister(ret);
7631 break;
7632 #endif
7633 case TARGET_NR_close:
7634 fd_trans_unregister(arg1);
7635 ret = get_errno(close(arg1));
7636 break;
7637 case TARGET_NR_brk:
7638 ret = do_brk(arg1);
7639 break;
7640 #ifdef TARGET_NR_fork
7641 case TARGET_NR_fork:
7642 ret = get_errno(do_fork(cpu_env, SIGCHLD, 0, 0, 0, 0));
7643 break;
7644 #endif
7645 #ifdef TARGET_NR_waitpid
7646 case TARGET_NR_waitpid:
7648 int status;
7649 ret = get_errno(safe_wait4(arg1, &status, arg3, 0));
7650 if (!is_error(ret) && arg2 && ret
7651 && put_user_s32(host_to_target_waitstatus(status), arg2))
7652 goto efault;
7654 break;
7655 #endif
7656 #ifdef TARGET_NR_waitid
7657 case TARGET_NR_waitid:
7659 siginfo_t info;
7660 info.si_pid = 0;
7661 ret = get_errno(safe_waitid(arg1, arg2, &info, arg4, NULL));
7662 if (!is_error(ret) && arg3 && info.si_pid != 0) {
7663 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_siginfo_t), 0)))
7664 goto efault;
7665 host_to_target_siginfo(p, &info);
7666 unlock_user(p, arg3, sizeof(target_siginfo_t));
7669 break;
7670 #endif
7671 #ifdef TARGET_NR_creat /* not on alpha */
7672 case TARGET_NR_creat:
7673 if (!(p = lock_user_string(arg1)))
7674 goto efault;
7675 ret = get_errno(creat(p, arg2));
7676 fd_trans_unregister(ret);
7677 unlock_user(p, arg1, 0);
7678 break;
7679 #endif
7680 #ifdef TARGET_NR_link
7681 case TARGET_NR_link:
7683 void * p2;
7684 p = lock_user_string(arg1);
7685 p2 = lock_user_string(arg2);
7686 if (!p || !p2)
7687 ret = -TARGET_EFAULT;
7688 else
7689 ret = get_errno(link(p, p2));
7690 unlock_user(p2, arg2, 0);
7691 unlock_user(p, arg1, 0);
7693 break;
7694 #endif
7695 #if defined(TARGET_NR_linkat)
7696 case TARGET_NR_linkat:
7698 void * p2 = NULL;
7699 if (!arg2 || !arg4)
7700 goto efault;
7701 p = lock_user_string(arg2);
7702 p2 = lock_user_string(arg4);
7703 if (!p || !p2)
7704 ret = -TARGET_EFAULT;
7705 else
7706 ret = get_errno(linkat(arg1, p, arg3, p2, arg5));
7707 unlock_user(p, arg2, 0);
7708 unlock_user(p2, arg4, 0);
7710 break;
7711 #endif
7712 #ifdef TARGET_NR_unlink
7713 case TARGET_NR_unlink:
7714 if (!(p = lock_user_string(arg1)))
7715 goto efault;
7716 ret = get_errno(unlink(p));
7717 unlock_user(p, arg1, 0);
7718 break;
7719 #endif
7720 #if defined(TARGET_NR_unlinkat)
7721 case TARGET_NR_unlinkat:
7722 if (!(p = lock_user_string(arg2)))
7723 goto efault;
7724 ret = get_errno(unlinkat(arg1, p, arg3));
7725 unlock_user(p, arg2, 0);
7726 break;
7727 #endif
7728 case TARGET_NR_execve:
7730 char **argp, **envp;
7731 int argc, envc;
7732 abi_ulong gp;
7733 abi_ulong guest_argp;
7734 abi_ulong guest_envp;
7735 abi_ulong addr;
7736 char **q;
7737 int total_size = 0;
7739 argc = 0;
7740 guest_argp = arg2;
7741 for (gp = guest_argp; gp; gp += sizeof(abi_ulong)) {
7742 if (get_user_ual(addr, gp))
7743 goto efault;
7744 if (!addr)
7745 break;
7746 argc++;
7748 envc = 0;
7749 guest_envp = arg3;
7750 for (gp = guest_envp; gp; gp += sizeof(abi_ulong)) {
7751 if (get_user_ual(addr, gp))
7752 goto efault;
7753 if (!addr)
7754 break;
7755 envc++;
7758 argp = alloca((argc + 1) * sizeof(void *));
7759 envp = alloca((envc + 1) * sizeof(void *));
7761 for (gp = guest_argp, q = argp; gp;
7762 gp += sizeof(abi_ulong), q++) {
7763 if (get_user_ual(addr, gp))
7764 goto execve_efault;
7765 if (!addr)
7766 break;
7767 if (!(*q = lock_user_string(addr)))
7768 goto execve_efault;
7769 total_size += strlen(*q) + 1;
7771 *q = NULL;
7773 for (gp = guest_envp, q = envp; gp;
7774 gp += sizeof(abi_ulong), q++) {
7775 if (get_user_ual(addr, gp))
7776 goto execve_efault;
7777 if (!addr)
7778 break;
7779 if (!(*q = lock_user_string(addr)))
7780 goto execve_efault;
7781 total_size += strlen(*q) + 1;
7783 *q = NULL;
7785 if (!(p = lock_user_string(arg1)))
7786 goto execve_efault;
7787 /* Although execve() is not an interruptible syscall it is
7788 * a special case where we must use the safe_syscall wrapper:
7789 * if we allow a signal to happen before we make the host
7790 * syscall then we will 'lose' it, because at the point of
7791 * execve the process leaves QEMU's control. So we use the
7792 * safe syscall wrapper to ensure that we either take the
7793 * signal as a guest signal, or else it does not happen
7794 * before the execve completes and makes it the other
7795 * program's problem.
7797 ret = get_errno(safe_execve(p, argp, envp));
7798 unlock_user(p, arg1, 0);
7800 goto execve_end;
7802 execve_efault:
7803 ret = -TARGET_EFAULT;
7805 execve_end:
7806 for (gp = guest_argp, q = argp; *q;
7807 gp += sizeof(abi_ulong), q++) {
7808 if (get_user_ual(addr, gp)
7809 || !addr)
7810 break;
7811 unlock_user(*q, addr, 0);
7813 for (gp = guest_envp, q = envp; *q;
7814 gp += sizeof(abi_ulong), q++) {
7815 if (get_user_ual(addr, gp)
7816 || !addr)
7817 break;
7818 unlock_user(*q, addr, 0);
7821 break;
7822 case TARGET_NR_chdir:
7823 if (!(p = lock_user_string(arg1)))
7824 goto efault;
7825 ret = get_errno(chdir(p));
7826 unlock_user(p, arg1, 0);
7827 break;
7828 #ifdef TARGET_NR_time
7829 case TARGET_NR_time:
7831 time_t host_time;
7832 ret = get_errno(time(&host_time));
7833 if (!is_error(ret)
7834 && arg1
7835 && put_user_sal(host_time, arg1))
7836 goto efault;
7838 break;
7839 #endif
7840 #ifdef TARGET_NR_mknod
7841 case TARGET_NR_mknod:
7842 if (!(p = lock_user_string(arg1)))
7843 goto efault;
7844 ret = get_errno(mknod(p, arg2, arg3));
7845 unlock_user(p, arg1, 0);
7846 break;
7847 #endif
7848 #if defined(TARGET_NR_mknodat)
7849 case TARGET_NR_mknodat:
7850 if (!(p = lock_user_string(arg2)))
7851 goto efault;
7852 ret = get_errno(mknodat(arg1, p, arg3, arg4));
7853 unlock_user(p, arg2, 0);
7854 break;
7855 #endif
7856 #ifdef TARGET_NR_chmod
7857 case TARGET_NR_chmod:
7858 if (!(p = lock_user_string(arg1)))
7859 goto efault;
7860 ret = get_errno(chmod(p, arg2));
7861 unlock_user(p, arg1, 0);
7862 break;
7863 #endif
7864 #ifdef TARGET_NR_break
7865 case TARGET_NR_break:
7866 goto unimplemented;
7867 #endif
7868 #ifdef TARGET_NR_oldstat
7869 case TARGET_NR_oldstat:
7870 goto unimplemented;
7871 #endif
7872 case TARGET_NR_lseek:
7873 ret = get_errno(lseek(arg1, arg2, arg3));
7874 break;
7875 #if defined(TARGET_NR_getxpid) && defined(TARGET_ALPHA)
7876 /* Alpha specific */
7877 case TARGET_NR_getxpid:
7878 ((CPUAlphaState *)cpu_env)->ir[IR_A4] = getppid();
7879 ret = get_errno(getpid());
7880 break;
7881 #endif
7882 #ifdef TARGET_NR_getpid
7883 case TARGET_NR_getpid:
7884 ret = get_errno(getpid());
7885 break;
7886 #endif
7887 case TARGET_NR_mount:
7889 /* need to look at the data field */
7890 void *p2, *p3;
7892 if (arg1) {
7893 p = lock_user_string(arg1);
7894 if (!p) {
7895 goto efault;
7897 } else {
7898 p = NULL;
7901 p2 = lock_user_string(arg2);
7902 if (!p2) {
7903 if (arg1) {
7904 unlock_user(p, arg1, 0);
7906 goto efault;
7909 if (arg3) {
7910 p3 = lock_user_string(arg3);
7911 if (!p3) {
7912 if (arg1) {
7913 unlock_user(p, arg1, 0);
7915 unlock_user(p2, arg2, 0);
7916 goto efault;
7918 } else {
7919 p3 = NULL;
7922 /* FIXME - arg5 should be locked, but it isn't clear how to
7923 * do that since it's not guaranteed to be a NULL-terminated
7924 * string.
7926 if (!arg5) {
7927 ret = mount(p, p2, p3, (unsigned long)arg4, NULL);
7928 } else {
7929 ret = mount(p, p2, p3, (unsigned long)arg4, g2h(arg5));
7931 ret = get_errno(ret);
7933 if (arg1) {
7934 unlock_user(p, arg1, 0);
7936 unlock_user(p2, arg2, 0);
7937 if (arg3) {
7938 unlock_user(p3, arg3, 0);
7941 break;
7942 #ifdef TARGET_NR_umount
7943 case TARGET_NR_umount:
7944 if (!(p = lock_user_string(arg1)))
7945 goto efault;
7946 ret = get_errno(umount(p));
7947 unlock_user(p, arg1, 0);
7948 break;
7949 #endif
7950 #ifdef TARGET_NR_stime /* not on alpha */
7951 case TARGET_NR_stime:
7953 time_t host_time;
7954 if (get_user_sal(host_time, arg1))
7955 goto efault;
7956 ret = get_errno(stime(&host_time));
7958 break;
7959 #endif
7960 case TARGET_NR_ptrace:
7961 goto unimplemented;
7962 #ifdef TARGET_NR_alarm /* not on alpha */
7963 case TARGET_NR_alarm:
7964 ret = alarm(arg1);
7965 break;
7966 #endif
7967 #ifdef TARGET_NR_oldfstat
7968 case TARGET_NR_oldfstat:
7969 goto unimplemented;
7970 #endif
7971 #ifdef TARGET_NR_pause /* not on alpha */
7972 case TARGET_NR_pause:
7973 if (!block_signals()) {
7974 sigsuspend(&((TaskState *)cpu->opaque)->signal_mask);
7976 ret = -TARGET_EINTR;
7977 break;
7978 #endif
7979 #ifdef TARGET_NR_utime
7980 case TARGET_NR_utime:
7982 struct utimbuf tbuf, *host_tbuf;
7983 struct target_utimbuf *target_tbuf;
7984 if (arg2) {
7985 if (!lock_user_struct(VERIFY_READ, target_tbuf, arg2, 1))
7986 goto efault;
7987 tbuf.actime = tswapal(target_tbuf->actime);
7988 tbuf.modtime = tswapal(target_tbuf->modtime);
7989 unlock_user_struct(target_tbuf, arg2, 0);
7990 host_tbuf = &tbuf;
7991 } else {
7992 host_tbuf = NULL;
7994 if (!(p = lock_user_string(arg1)))
7995 goto efault;
7996 ret = get_errno(utime(p, host_tbuf));
7997 unlock_user(p, arg1, 0);
7999 break;
8000 #endif
8001 #ifdef TARGET_NR_utimes
8002 case TARGET_NR_utimes:
8004 struct timeval *tvp, tv[2];
8005 if (arg2) {
8006 if (copy_from_user_timeval(&tv[0], arg2)
8007 || copy_from_user_timeval(&tv[1],
8008 arg2 + sizeof(struct target_timeval)))
8009 goto efault;
8010 tvp = tv;
8011 } else {
8012 tvp = NULL;
8014 if (!(p = lock_user_string(arg1)))
8015 goto efault;
8016 ret = get_errno(utimes(p, tvp));
8017 unlock_user(p, arg1, 0);
8019 break;
8020 #endif
8021 #if defined(TARGET_NR_futimesat)
8022 case TARGET_NR_futimesat:
8024 struct timeval *tvp, tv[2];
8025 if (arg3) {
8026 if (copy_from_user_timeval(&tv[0], arg3)
8027 || copy_from_user_timeval(&tv[1],
8028 arg3 + sizeof(struct target_timeval)))
8029 goto efault;
8030 tvp = tv;
8031 } else {
8032 tvp = NULL;
8034 if (!(p = lock_user_string(arg2)))
8035 goto efault;
8036 ret = get_errno(futimesat(arg1, path(p), tvp));
8037 unlock_user(p, arg2, 0);
8039 break;
8040 #endif
8041 #ifdef TARGET_NR_stty
8042 case TARGET_NR_stty:
8043 goto unimplemented;
8044 #endif
8045 #ifdef TARGET_NR_gtty
8046 case TARGET_NR_gtty:
8047 goto unimplemented;
8048 #endif
8049 #ifdef TARGET_NR_access
8050 case TARGET_NR_access:
8051 if (!(p = lock_user_string(arg1)))
8052 goto efault;
8053 ret = get_errno(access(path(p), arg2));
8054 unlock_user(p, arg1, 0);
8055 break;
8056 #endif
8057 #if defined(TARGET_NR_faccessat) && defined(__NR_faccessat)
8058 case TARGET_NR_faccessat:
8059 if (!(p = lock_user_string(arg2)))
8060 goto efault;
8061 ret = get_errno(faccessat(arg1, p, arg3, 0));
8062 unlock_user(p, arg2, 0);
8063 break;
8064 #endif
8065 #ifdef TARGET_NR_nice /* not on alpha */
8066 case TARGET_NR_nice:
8067 ret = get_errno(nice(arg1));
8068 break;
8069 #endif
8070 #ifdef TARGET_NR_ftime
8071 case TARGET_NR_ftime:
8072 goto unimplemented;
8073 #endif
8074 case TARGET_NR_sync:
8075 sync();
8076 ret = 0;
8077 break;
8078 case TARGET_NR_kill:
8079 ret = get_errno(safe_kill(arg1, target_to_host_signal(arg2)));
8080 break;
8081 #ifdef TARGET_NR_rename
8082 case TARGET_NR_rename:
8084 void *p2;
8085 p = lock_user_string(arg1);
8086 p2 = lock_user_string(arg2);
8087 if (!p || !p2)
8088 ret = -TARGET_EFAULT;
8089 else
8090 ret = get_errno(rename(p, p2));
8091 unlock_user(p2, arg2, 0);
8092 unlock_user(p, arg1, 0);
8094 break;
8095 #endif
8096 #if defined(TARGET_NR_renameat)
8097 case TARGET_NR_renameat:
8099 void *p2;
8100 p = lock_user_string(arg2);
8101 p2 = lock_user_string(arg4);
8102 if (!p || !p2)
8103 ret = -TARGET_EFAULT;
8104 else
8105 ret = get_errno(renameat(arg1, p, arg3, p2));
8106 unlock_user(p2, arg4, 0);
8107 unlock_user(p, arg2, 0);
8109 break;
8110 #endif
8111 #ifdef TARGET_NR_mkdir
8112 case TARGET_NR_mkdir:
8113 if (!(p = lock_user_string(arg1)))
8114 goto efault;
8115 ret = get_errno(mkdir(p, arg2));
8116 unlock_user(p, arg1, 0);
8117 break;
8118 #endif
8119 #if defined(TARGET_NR_mkdirat)
8120 case TARGET_NR_mkdirat:
8121 if (!(p = lock_user_string(arg2)))
8122 goto efault;
8123 ret = get_errno(mkdirat(arg1, p, arg3));
8124 unlock_user(p, arg2, 0);
8125 break;
8126 #endif
8127 #ifdef TARGET_NR_rmdir
8128 case TARGET_NR_rmdir:
8129 if (!(p = lock_user_string(arg1)))
8130 goto efault;
8131 ret = get_errno(rmdir(p));
8132 unlock_user(p, arg1, 0);
8133 break;
8134 #endif
8135 case TARGET_NR_dup:
8136 ret = get_errno(dup(arg1));
8137 if (ret >= 0) {
8138 fd_trans_dup(arg1, ret);
8140 break;
8141 #ifdef TARGET_NR_pipe
8142 case TARGET_NR_pipe:
8143 ret = do_pipe(cpu_env, arg1, 0, 0);
8144 break;
8145 #endif
8146 #ifdef TARGET_NR_pipe2
8147 case TARGET_NR_pipe2:
8148 ret = do_pipe(cpu_env, arg1,
8149 target_to_host_bitmask(arg2, fcntl_flags_tbl), 1);
8150 break;
8151 #endif
8152 case TARGET_NR_times:
8154 struct target_tms *tmsp;
8155 struct tms tms;
8156 ret = get_errno(times(&tms));
8157 if (arg1) {
8158 tmsp = lock_user(VERIFY_WRITE, arg1, sizeof(struct target_tms), 0);
8159 if (!tmsp)
8160 goto efault;
8161 tmsp->tms_utime = tswapal(host_to_target_clock_t(tms.tms_utime));
8162 tmsp->tms_stime = tswapal(host_to_target_clock_t(tms.tms_stime));
8163 tmsp->tms_cutime = tswapal(host_to_target_clock_t(tms.tms_cutime));
8164 tmsp->tms_cstime = tswapal(host_to_target_clock_t(tms.tms_cstime));
8166 if (!is_error(ret))
8167 ret = host_to_target_clock_t(ret);
8169 break;
8170 #ifdef TARGET_NR_prof
8171 case TARGET_NR_prof:
8172 goto unimplemented;
8173 #endif
8174 #ifdef TARGET_NR_signal
8175 case TARGET_NR_signal:
8176 goto unimplemented;
8177 #endif
8178 case TARGET_NR_acct:
8179 if (arg1 == 0) {
8180 ret = get_errno(acct(NULL));
8181 } else {
8182 if (!(p = lock_user_string(arg1)))
8183 goto efault;
8184 ret = get_errno(acct(path(p)));
8185 unlock_user(p, arg1, 0);
8187 break;
8188 #ifdef TARGET_NR_umount2
8189 case TARGET_NR_umount2:
8190 if (!(p = lock_user_string(arg1)))
8191 goto efault;
8192 ret = get_errno(umount2(p, arg2));
8193 unlock_user(p, arg1, 0);
8194 break;
8195 #endif
8196 #ifdef TARGET_NR_lock
8197 case TARGET_NR_lock:
8198 goto unimplemented;
8199 #endif
8200 case TARGET_NR_ioctl:
8201 ret = do_ioctl(arg1, arg2, arg3);
8202 break;
8203 case TARGET_NR_fcntl:
8204 ret = do_fcntl(arg1, arg2, arg3);
8205 break;
8206 #ifdef TARGET_NR_mpx
8207 case TARGET_NR_mpx:
8208 goto unimplemented;
8209 #endif
8210 case TARGET_NR_setpgid:
8211 ret = get_errno(setpgid(arg1, arg2));
8212 break;
8213 #ifdef TARGET_NR_ulimit
8214 case TARGET_NR_ulimit:
8215 goto unimplemented;
8216 #endif
8217 #ifdef TARGET_NR_oldolduname
8218 case TARGET_NR_oldolduname:
8219 goto unimplemented;
8220 #endif
8221 case TARGET_NR_umask:
8222 ret = get_errno(umask(arg1));
8223 break;
8224 case TARGET_NR_chroot:
8225 if (!(p = lock_user_string(arg1)))
8226 goto efault;
8227 ret = get_errno(chroot(p));
8228 unlock_user(p, arg1, 0);
8229 break;
8230 #ifdef TARGET_NR_ustat
8231 case TARGET_NR_ustat:
8232 goto unimplemented;
8233 #endif
8234 #ifdef TARGET_NR_dup2
8235 case TARGET_NR_dup2:
8236 ret = get_errno(dup2(arg1, arg2));
8237 if (ret >= 0) {
8238 fd_trans_dup(arg1, arg2);
8240 break;
8241 #endif
8242 #if defined(CONFIG_DUP3) && defined(TARGET_NR_dup3)
8243 case TARGET_NR_dup3:
8244 ret = get_errno(dup3(arg1, arg2, arg3));
8245 if (ret >= 0) {
8246 fd_trans_dup(arg1, arg2);
8248 break;
8249 #endif
8250 #ifdef TARGET_NR_getppid /* not on alpha */
8251 case TARGET_NR_getppid:
8252 ret = get_errno(getppid());
8253 break;
8254 #endif
8255 #ifdef TARGET_NR_getpgrp
8256 case TARGET_NR_getpgrp:
8257 ret = get_errno(getpgrp());
8258 break;
8259 #endif
8260 case TARGET_NR_setsid:
8261 ret = get_errno(setsid());
8262 break;
8263 #ifdef TARGET_NR_sigaction
8264 case TARGET_NR_sigaction:
8266 #if defined(TARGET_ALPHA)
8267 struct target_sigaction act, oact, *pact = 0;
8268 struct target_old_sigaction *old_act;
8269 if (arg2) {
8270 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
8271 goto efault;
8272 act._sa_handler = old_act->_sa_handler;
8273 target_siginitset(&act.sa_mask, old_act->sa_mask);
8274 act.sa_flags = old_act->sa_flags;
8275 act.sa_restorer = 0;
8276 unlock_user_struct(old_act, arg2, 0);
8277 pact = &act;
8279 ret = get_errno(do_sigaction(arg1, pact, &oact));
8280 if (!is_error(ret) && arg3) {
8281 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
8282 goto efault;
8283 old_act->_sa_handler = oact._sa_handler;
8284 old_act->sa_mask = oact.sa_mask.sig[0];
8285 old_act->sa_flags = oact.sa_flags;
8286 unlock_user_struct(old_act, arg3, 1);
8288 #elif defined(TARGET_MIPS)
8289 struct target_sigaction act, oact, *pact, *old_act;
8291 if (arg2) {
8292 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
8293 goto efault;
8294 act._sa_handler = old_act->_sa_handler;
8295 target_siginitset(&act.sa_mask, old_act->sa_mask.sig[0]);
8296 act.sa_flags = old_act->sa_flags;
8297 unlock_user_struct(old_act, arg2, 0);
8298 pact = &act;
8299 } else {
8300 pact = NULL;
8303 ret = get_errno(do_sigaction(arg1, pact, &oact));
8305 if (!is_error(ret) && arg3) {
8306 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
8307 goto efault;
8308 old_act->_sa_handler = oact._sa_handler;
8309 old_act->sa_flags = oact.sa_flags;
8310 old_act->sa_mask.sig[0] = oact.sa_mask.sig[0];
8311 old_act->sa_mask.sig[1] = 0;
8312 old_act->sa_mask.sig[2] = 0;
8313 old_act->sa_mask.sig[3] = 0;
8314 unlock_user_struct(old_act, arg3, 1);
8316 #else
8317 struct target_old_sigaction *old_act;
8318 struct target_sigaction act, oact, *pact;
8319 if (arg2) {
8320 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
8321 goto efault;
8322 act._sa_handler = old_act->_sa_handler;
8323 target_siginitset(&act.sa_mask, old_act->sa_mask);
8324 act.sa_flags = old_act->sa_flags;
8325 act.sa_restorer = old_act->sa_restorer;
8326 unlock_user_struct(old_act, arg2, 0);
8327 pact = &act;
8328 } else {
8329 pact = NULL;
8331 ret = get_errno(do_sigaction(arg1, pact, &oact));
8332 if (!is_error(ret) && arg3) {
8333 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
8334 goto efault;
8335 old_act->_sa_handler = oact._sa_handler;
8336 old_act->sa_mask = oact.sa_mask.sig[0];
8337 old_act->sa_flags = oact.sa_flags;
8338 old_act->sa_restorer = oact.sa_restorer;
8339 unlock_user_struct(old_act, arg3, 1);
8341 #endif
8343 break;
8344 #endif
8345 case TARGET_NR_rt_sigaction:
8347 #if defined(TARGET_ALPHA)
8348 struct target_sigaction act, oact, *pact = 0;
8349 struct target_rt_sigaction *rt_act;
8351 if (arg4 != sizeof(target_sigset_t)) {
8352 ret = -TARGET_EINVAL;
8353 break;
8355 if (arg2) {
8356 if (!lock_user_struct(VERIFY_READ, rt_act, arg2, 1))
8357 goto efault;
8358 act._sa_handler = rt_act->_sa_handler;
8359 act.sa_mask = rt_act->sa_mask;
8360 act.sa_flags = rt_act->sa_flags;
8361 act.sa_restorer = arg5;
8362 unlock_user_struct(rt_act, arg2, 0);
8363 pact = &act;
8365 ret = get_errno(do_sigaction(arg1, pact, &oact));
8366 if (!is_error(ret) && arg3) {
8367 if (!lock_user_struct(VERIFY_WRITE, rt_act, arg3, 0))
8368 goto efault;
8369 rt_act->_sa_handler = oact._sa_handler;
8370 rt_act->sa_mask = oact.sa_mask;
8371 rt_act->sa_flags = oact.sa_flags;
8372 unlock_user_struct(rt_act, arg3, 1);
8374 #else
8375 struct target_sigaction *act;
8376 struct target_sigaction *oact;
8378 if (arg4 != sizeof(target_sigset_t)) {
8379 ret = -TARGET_EINVAL;
8380 break;
8382 if (arg2) {
8383 if (!lock_user_struct(VERIFY_READ, act, arg2, 1))
8384 goto efault;
8385 } else
8386 act = NULL;
8387 if (arg3) {
8388 if (!lock_user_struct(VERIFY_WRITE, oact, arg3, 0)) {
8389 ret = -TARGET_EFAULT;
8390 goto rt_sigaction_fail;
8392 } else
8393 oact = NULL;
8394 ret = get_errno(do_sigaction(arg1, act, oact));
8395 rt_sigaction_fail:
8396 if (act)
8397 unlock_user_struct(act, arg2, 0);
8398 if (oact)
8399 unlock_user_struct(oact, arg3, 1);
8400 #endif
8402 break;
8403 #ifdef TARGET_NR_sgetmask /* not on alpha */
8404 case TARGET_NR_sgetmask:
8406 sigset_t cur_set;
8407 abi_ulong target_set;
8408 ret = do_sigprocmask(0, NULL, &cur_set);
8409 if (!ret) {
8410 host_to_target_old_sigset(&target_set, &cur_set);
8411 ret = target_set;
8414 break;
8415 #endif
8416 #ifdef TARGET_NR_ssetmask /* not on alpha */
8417 case TARGET_NR_ssetmask:
8419 sigset_t set, oset, cur_set;
8420 abi_ulong target_set = arg1;
8421 /* We only have one word of the new mask so we must read
8422 * the rest of it with do_sigprocmask() and OR in this word.
8423 * We are guaranteed that a do_sigprocmask() that only queries
8424 * the signal mask will not fail.
8426 ret = do_sigprocmask(0, NULL, &cur_set);
8427 assert(!ret);
8428 target_to_host_old_sigset(&set, &target_set);
8429 sigorset(&set, &set, &cur_set);
8430 ret = do_sigprocmask(SIG_SETMASK, &set, &oset);
8431 if (!ret) {
8432 host_to_target_old_sigset(&target_set, &oset);
8433 ret = target_set;
8436 break;
8437 #endif
8438 #ifdef TARGET_NR_sigprocmask
8439 case TARGET_NR_sigprocmask:
8441 #if defined(TARGET_ALPHA)
8442 sigset_t set, oldset;
8443 abi_ulong mask;
8444 int how;
8446 switch (arg1) {
8447 case TARGET_SIG_BLOCK:
8448 how = SIG_BLOCK;
8449 break;
8450 case TARGET_SIG_UNBLOCK:
8451 how = SIG_UNBLOCK;
8452 break;
8453 case TARGET_SIG_SETMASK:
8454 how = SIG_SETMASK;
8455 break;
8456 default:
8457 ret = -TARGET_EINVAL;
8458 goto fail;
8460 mask = arg2;
8461 target_to_host_old_sigset(&set, &mask);
8463 ret = do_sigprocmask(how, &set, &oldset);
8464 if (!is_error(ret)) {
8465 host_to_target_old_sigset(&mask, &oldset);
8466 ret = mask;
8467 ((CPUAlphaState *)cpu_env)->ir[IR_V0] = 0; /* force no error */
8469 #else
8470 sigset_t set, oldset, *set_ptr;
8471 int how;
8473 if (arg2) {
8474 switch (arg1) {
8475 case TARGET_SIG_BLOCK:
8476 how = SIG_BLOCK;
8477 break;
8478 case TARGET_SIG_UNBLOCK:
8479 how = SIG_UNBLOCK;
8480 break;
8481 case TARGET_SIG_SETMASK:
8482 how = SIG_SETMASK;
8483 break;
8484 default:
8485 ret = -TARGET_EINVAL;
8486 goto fail;
8488 if (!(p = lock_user(VERIFY_READ, arg2, sizeof(target_sigset_t), 1)))
8489 goto efault;
8490 target_to_host_old_sigset(&set, p);
8491 unlock_user(p, arg2, 0);
8492 set_ptr = &set;
8493 } else {
8494 how = 0;
8495 set_ptr = NULL;
8497 ret = do_sigprocmask(how, set_ptr, &oldset);
8498 if (!is_error(ret) && arg3) {
8499 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_sigset_t), 0)))
8500 goto efault;
8501 host_to_target_old_sigset(p, &oldset);
8502 unlock_user(p, arg3, sizeof(target_sigset_t));
8504 #endif
8506 break;
8507 #endif
8508 case TARGET_NR_rt_sigprocmask:
8510 int how = arg1;
8511 sigset_t set, oldset, *set_ptr;
8513 if (arg4 != sizeof(target_sigset_t)) {
8514 ret = -TARGET_EINVAL;
8515 break;
8518 if (arg2) {
8519 switch(how) {
8520 case TARGET_SIG_BLOCK:
8521 how = SIG_BLOCK;
8522 break;
8523 case TARGET_SIG_UNBLOCK:
8524 how = SIG_UNBLOCK;
8525 break;
8526 case TARGET_SIG_SETMASK:
8527 how = SIG_SETMASK;
8528 break;
8529 default:
8530 ret = -TARGET_EINVAL;
8531 goto fail;
8533 if (!(p = lock_user(VERIFY_READ, arg2, sizeof(target_sigset_t), 1)))
8534 goto efault;
8535 target_to_host_sigset(&set, p);
8536 unlock_user(p, arg2, 0);
8537 set_ptr = &set;
8538 } else {
8539 how = 0;
8540 set_ptr = NULL;
8542 ret = do_sigprocmask(how, set_ptr, &oldset);
8543 if (!is_error(ret) && arg3) {
8544 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_sigset_t), 0)))
8545 goto efault;
8546 host_to_target_sigset(p, &oldset);
8547 unlock_user(p, arg3, sizeof(target_sigset_t));
8550 break;
8551 #ifdef TARGET_NR_sigpending
8552 case TARGET_NR_sigpending:
8554 sigset_t set;
8555 ret = get_errno(sigpending(&set));
8556 if (!is_error(ret)) {
8557 if (!(p = lock_user(VERIFY_WRITE, arg1, sizeof(target_sigset_t), 0)))
8558 goto efault;
8559 host_to_target_old_sigset(p, &set);
8560 unlock_user(p, arg1, sizeof(target_sigset_t));
8563 break;
8564 #endif
8565 case TARGET_NR_rt_sigpending:
8567 sigset_t set;
8569 /* Yes, this check is >, not != like most. We follow the kernel's
8570 * logic and it does it like this because it implements
8571 * NR_sigpending through the same code path, and in that case
8572 * the old_sigset_t is smaller in size.
8574 if (arg2 > sizeof(target_sigset_t)) {
8575 ret = -TARGET_EINVAL;
8576 break;
8579 ret = get_errno(sigpending(&set));
8580 if (!is_error(ret)) {
8581 if (!(p = lock_user(VERIFY_WRITE, arg1, sizeof(target_sigset_t), 0)))
8582 goto efault;
8583 host_to_target_sigset(p, &set);
8584 unlock_user(p, arg1, sizeof(target_sigset_t));
8587 break;
8588 #ifdef TARGET_NR_sigsuspend
8589 case TARGET_NR_sigsuspend:
8591 TaskState *ts = cpu->opaque;
8592 #if defined(TARGET_ALPHA)
8593 abi_ulong mask = arg1;
8594 target_to_host_old_sigset(&ts->sigsuspend_mask, &mask);
8595 #else
8596 if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
8597 goto efault;
8598 target_to_host_old_sigset(&ts->sigsuspend_mask, p);
8599 unlock_user(p, arg1, 0);
8600 #endif
8601 ret = get_errno(safe_rt_sigsuspend(&ts->sigsuspend_mask,
8602 SIGSET_T_SIZE));
8603 if (ret != -TARGET_ERESTARTSYS) {
8604 ts->in_sigsuspend = 1;
8607 break;
8608 #endif
8609 case TARGET_NR_rt_sigsuspend:
8611 TaskState *ts = cpu->opaque;
8613 if (arg2 != sizeof(target_sigset_t)) {
8614 ret = -TARGET_EINVAL;
8615 break;
8617 if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
8618 goto efault;
8619 target_to_host_sigset(&ts->sigsuspend_mask, p);
8620 unlock_user(p, arg1, 0);
8621 ret = get_errno(safe_rt_sigsuspend(&ts->sigsuspend_mask,
8622 SIGSET_T_SIZE));
8623 if (ret != -TARGET_ERESTARTSYS) {
8624 ts->in_sigsuspend = 1;
8627 break;
8628 case TARGET_NR_rt_sigtimedwait:
8630 sigset_t set;
8631 struct timespec uts, *puts;
8632 siginfo_t uinfo;
8634 if (arg4 != sizeof(target_sigset_t)) {
8635 ret = -TARGET_EINVAL;
8636 break;
8639 if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
8640 goto efault;
8641 target_to_host_sigset(&set, p);
8642 unlock_user(p, arg1, 0);
8643 if (arg3) {
8644 puts = &uts;
8645 target_to_host_timespec(puts, arg3);
8646 } else {
8647 puts = NULL;
8649 ret = get_errno(safe_rt_sigtimedwait(&set, &uinfo, puts,
8650 SIGSET_T_SIZE));
8651 if (!is_error(ret)) {
8652 if (arg2) {
8653 p = lock_user(VERIFY_WRITE, arg2, sizeof(target_siginfo_t),
8655 if (!p) {
8656 goto efault;
8658 host_to_target_siginfo(p, &uinfo);
8659 unlock_user(p, arg2, sizeof(target_siginfo_t));
8661 ret = host_to_target_signal(ret);
8664 break;
8665 case TARGET_NR_rt_sigqueueinfo:
8667 siginfo_t uinfo;
8669 p = lock_user(VERIFY_READ, arg3, sizeof(target_siginfo_t), 1);
8670 if (!p) {
8671 goto efault;
8673 target_to_host_siginfo(&uinfo, p);
8674 unlock_user(p, arg1, 0);
8675 ret = get_errno(sys_rt_sigqueueinfo(arg1, arg2, &uinfo));
8677 break;
8678 #ifdef TARGET_NR_sigreturn
8679 case TARGET_NR_sigreturn:
8680 if (block_signals()) {
8681 ret = -TARGET_ERESTARTSYS;
8682 } else {
8683 ret = do_sigreturn(cpu_env);
8685 break;
8686 #endif
8687 case TARGET_NR_rt_sigreturn:
8688 if (block_signals()) {
8689 ret = -TARGET_ERESTARTSYS;
8690 } else {
8691 ret = do_rt_sigreturn(cpu_env);
8693 break;
8694 case TARGET_NR_sethostname:
8695 if (!(p = lock_user_string(arg1)))
8696 goto efault;
8697 ret = get_errno(sethostname(p, arg2));
8698 unlock_user(p, arg1, 0);
8699 break;
8700 case TARGET_NR_setrlimit:
8702 int resource = target_to_host_resource(arg1);
8703 struct target_rlimit *target_rlim;
8704 struct rlimit rlim;
8705 if (!lock_user_struct(VERIFY_READ, target_rlim, arg2, 1))
8706 goto efault;
8707 rlim.rlim_cur = target_to_host_rlim(target_rlim->rlim_cur);
8708 rlim.rlim_max = target_to_host_rlim(target_rlim->rlim_max);
8709 unlock_user_struct(target_rlim, arg2, 0);
8710 ret = get_errno(setrlimit(resource, &rlim));
8712 break;
8713 case TARGET_NR_getrlimit:
8715 int resource = target_to_host_resource(arg1);
8716 struct target_rlimit *target_rlim;
8717 struct rlimit rlim;
8719 ret = get_errno(getrlimit(resource, &rlim));
8720 if (!is_error(ret)) {
8721 if (!lock_user_struct(VERIFY_WRITE, target_rlim, arg2, 0))
8722 goto efault;
8723 target_rlim->rlim_cur = host_to_target_rlim(rlim.rlim_cur);
8724 target_rlim->rlim_max = host_to_target_rlim(rlim.rlim_max);
8725 unlock_user_struct(target_rlim, arg2, 1);
8728 break;
8729 case TARGET_NR_getrusage:
8731 struct rusage rusage;
8732 ret = get_errno(getrusage(arg1, &rusage));
8733 if (!is_error(ret)) {
8734 ret = host_to_target_rusage(arg2, &rusage);
8737 break;
8738 case TARGET_NR_gettimeofday:
8740 struct timeval tv;
8741 ret = get_errno(gettimeofday(&tv, NULL));
8742 if (!is_error(ret)) {
8743 if (copy_to_user_timeval(arg1, &tv))
8744 goto efault;
8747 break;
8748 case TARGET_NR_settimeofday:
8750 struct timeval tv, *ptv = NULL;
8751 struct timezone tz, *ptz = NULL;
8753 if (arg1) {
8754 if (copy_from_user_timeval(&tv, arg1)) {
8755 goto efault;
8757 ptv = &tv;
8760 if (arg2) {
8761 if (copy_from_user_timezone(&tz, arg2)) {
8762 goto efault;
8764 ptz = &tz;
8767 ret = get_errno(settimeofday(ptv, ptz));
8769 break;
8770 #if defined(TARGET_NR_select)
8771 case TARGET_NR_select:
8772 #if defined(TARGET_WANT_NI_OLD_SELECT)
8773 /* some architectures used to have old_select here
8774 * but now ENOSYS it.
8776 ret = -TARGET_ENOSYS;
8777 #elif defined(TARGET_WANT_OLD_SYS_SELECT)
8778 ret = do_old_select(arg1);
8779 #else
8780 ret = do_select(arg1, arg2, arg3, arg4, arg5);
8781 #endif
8782 break;
8783 #endif
8784 #ifdef TARGET_NR_pselect6
8785 case TARGET_NR_pselect6:
8787 abi_long rfd_addr, wfd_addr, efd_addr, n, ts_addr;
8788 fd_set rfds, wfds, efds;
8789 fd_set *rfds_ptr, *wfds_ptr, *efds_ptr;
8790 struct timespec ts, *ts_ptr;
8793 * The 6th arg is actually two args smashed together,
8794 * so we cannot use the C library.
8796 sigset_t set;
8797 struct {
8798 sigset_t *set;
8799 size_t size;
8800 } sig, *sig_ptr;
8802 abi_ulong arg_sigset, arg_sigsize, *arg7;
8803 target_sigset_t *target_sigset;
8805 n = arg1;
8806 rfd_addr = arg2;
8807 wfd_addr = arg3;
8808 efd_addr = arg4;
8809 ts_addr = arg5;
8811 ret = copy_from_user_fdset_ptr(&rfds, &rfds_ptr, rfd_addr, n);
8812 if (ret) {
8813 goto fail;
8815 ret = copy_from_user_fdset_ptr(&wfds, &wfds_ptr, wfd_addr, n);
8816 if (ret) {
8817 goto fail;
8819 ret = copy_from_user_fdset_ptr(&efds, &efds_ptr, efd_addr, n);
8820 if (ret) {
8821 goto fail;
8825 * This takes a timespec, and not a timeval, so we cannot
8826 * use the do_select() helper ...
8828 if (ts_addr) {
8829 if (target_to_host_timespec(&ts, ts_addr)) {
8830 goto efault;
8832 ts_ptr = &ts;
8833 } else {
8834 ts_ptr = NULL;
8837 /* Extract the two packed args for the sigset */
8838 if (arg6) {
8839 sig_ptr = &sig;
8840 sig.size = SIGSET_T_SIZE;
8842 arg7 = lock_user(VERIFY_READ, arg6, sizeof(*arg7) * 2, 1);
8843 if (!arg7) {
8844 goto efault;
8846 arg_sigset = tswapal(arg7[0]);
8847 arg_sigsize = tswapal(arg7[1]);
8848 unlock_user(arg7, arg6, 0);
8850 if (arg_sigset) {
8851 sig.set = &set;
8852 if (arg_sigsize != sizeof(*target_sigset)) {
8853 /* Like the kernel, we enforce correct size sigsets */
8854 ret = -TARGET_EINVAL;
8855 goto fail;
8857 target_sigset = lock_user(VERIFY_READ, arg_sigset,
8858 sizeof(*target_sigset), 1);
8859 if (!target_sigset) {
8860 goto efault;
8862 target_to_host_sigset(&set, target_sigset);
8863 unlock_user(target_sigset, arg_sigset, 0);
8864 } else {
8865 sig.set = NULL;
8867 } else {
8868 sig_ptr = NULL;
8871 ret = get_errno(safe_pselect6(n, rfds_ptr, wfds_ptr, efds_ptr,
8872 ts_ptr, sig_ptr));
8874 if (!is_error(ret)) {
8875 if (rfd_addr && copy_to_user_fdset(rfd_addr, &rfds, n))
8876 goto efault;
8877 if (wfd_addr && copy_to_user_fdset(wfd_addr, &wfds, n))
8878 goto efault;
8879 if (efd_addr && copy_to_user_fdset(efd_addr, &efds, n))
8880 goto efault;
8882 if (ts_addr && host_to_target_timespec(ts_addr, &ts))
8883 goto efault;
8886 break;
8887 #endif
8888 #ifdef TARGET_NR_symlink
8889 case TARGET_NR_symlink:
8891 void *p2;
8892 p = lock_user_string(arg1);
8893 p2 = lock_user_string(arg2);
8894 if (!p || !p2)
8895 ret = -TARGET_EFAULT;
8896 else
8897 ret = get_errno(symlink(p, p2));
8898 unlock_user(p2, arg2, 0);
8899 unlock_user(p, arg1, 0);
8901 break;
8902 #endif
8903 #if defined(TARGET_NR_symlinkat)
8904 case TARGET_NR_symlinkat:
8906 void *p2;
8907 p = lock_user_string(arg1);
8908 p2 = lock_user_string(arg3);
8909 if (!p || !p2)
8910 ret = -TARGET_EFAULT;
8911 else
8912 ret = get_errno(symlinkat(p, arg2, p2));
8913 unlock_user(p2, arg3, 0);
8914 unlock_user(p, arg1, 0);
8916 break;
8917 #endif
8918 #ifdef TARGET_NR_oldlstat
8919 case TARGET_NR_oldlstat:
8920 goto unimplemented;
8921 #endif
8922 #ifdef TARGET_NR_readlink
8923 case TARGET_NR_readlink:
8925 void *p2;
8926 p = lock_user_string(arg1);
8927 p2 = lock_user(VERIFY_WRITE, arg2, arg3, 0);
8928 if (!p || !p2) {
8929 ret = -TARGET_EFAULT;
8930 } else if (!arg3) {
8931 /* Short circuit this for the magic exe check. */
8932 ret = -TARGET_EINVAL;
8933 } else if (is_proc_myself((const char *)p, "exe")) {
8934 char real[PATH_MAX], *temp;
8935 temp = realpath(exec_path, real);
8936 /* Return value is # of bytes that we wrote to the buffer. */
8937 if (temp == NULL) {
8938 ret = get_errno(-1);
8939 } else {
8940 /* Don't worry about sign mismatch as earlier mapping
8941 * logic would have thrown a bad address error. */
8942 ret = MIN(strlen(real), arg3);
8943 /* We cannot NUL terminate the string. */
8944 memcpy(p2, real, ret);
8946 } else {
8947 ret = get_errno(readlink(path(p), p2, arg3));
8949 unlock_user(p2, arg2, ret);
8950 unlock_user(p, arg1, 0);
8952 break;
8953 #endif
8954 #if defined(TARGET_NR_readlinkat)
8955 case TARGET_NR_readlinkat:
8957 void *p2;
8958 p = lock_user_string(arg2);
8959 p2 = lock_user(VERIFY_WRITE, arg3, arg4, 0);
8960 if (!p || !p2) {
8961 ret = -TARGET_EFAULT;
8962 } else if (is_proc_myself((const char *)p, "exe")) {
8963 char real[PATH_MAX], *temp;
8964 temp = realpath(exec_path, real);
8965 ret = temp == NULL ? get_errno(-1) : strlen(real) ;
8966 snprintf((char *)p2, arg4, "%s", real);
8967 } else {
8968 ret = get_errno(readlinkat(arg1, path(p), p2, arg4));
8970 unlock_user(p2, arg3, ret);
8971 unlock_user(p, arg2, 0);
8973 break;
8974 #endif
8975 #ifdef TARGET_NR_uselib
8976 case TARGET_NR_uselib:
8977 goto unimplemented;
8978 #endif
8979 #ifdef TARGET_NR_swapon
8980 case TARGET_NR_swapon:
8981 if (!(p = lock_user_string(arg1)))
8982 goto efault;
8983 ret = get_errno(swapon(p, arg2));
8984 unlock_user(p, arg1, 0);
8985 break;
8986 #endif
8987 case TARGET_NR_reboot:
8988 if (arg3 == LINUX_REBOOT_CMD_RESTART2) {
8989 /* arg4 must be ignored in all other cases */
8990 p = lock_user_string(arg4);
8991 if (!p) {
8992 goto efault;
8994 ret = get_errno(reboot(arg1, arg2, arg3, p));
8995 unlock_user(p, arg4, 0);
8996 } else {
8997 ret = get_errno(reboot(arg1, arg2, arg3, NULL));
8999 break;
9000 #ifdef TARGET_NR_readdir
9001 case TARGET_NR_readdir:
9002 goto unimplemented;
9003 #endif
9004 #ifdef TARGET_NR_mmap
9005 case TARGET_NR_mmap:
9006 #if (defined(TARGET_I386) && defined(TARGET_ABI32)) || \
9007 (defined(TARGET_ARM) && defined(TARGET_ABI32)) || \
9008 defined(TARGET_M68K) || defined(TARGET_CRIS) || defined(TARGET_MICROBLAZE) \
9009 || defined(TARGET_S390X)
9011 abi_ulong *v;
9012 abi_ulong v1, v2, v3, v4, v5, v6;
9013 if (!(v = lock_user(VERIFY_READ, arg1, 6 * sizeof(abi_ulong), 1)))
9014 goto efault;
9015 v1 = tswapal(v[0]);
9016 v2 = tswapal(v[1]);
9017 v3 = tswapal(v[2]);
9018 v4 = tswapal(v[3]);
9019 v5 = tswapal(v[4]);
9020 v6 = tswapal(v[5]);
9021 unlock_user(v, arg1, 0);
9022 ret = get_errno(target_mmap(v1, v2, v3,
9023 target_to_host_bitmask(v4, mmap_flags_tbl),
9024 v5, v6));
9026 #else
9027 ret = get_errno(target_mmap(arg1, arg2, arg3,
9028 target_to_host_bitmask(arg4, mmap_flags_tbl),
9029 arg5,
9030 arg6));
9031 #endif
9032 break;
9033 #endif
9034 #ifdef TARGET_NR_mmap2
9035 case TARGET_NR_mmap2:
9036 #ifndef MMAP_SHIFT
9037 #define MMAP_SHIFT 12
9038 #endif
9039 ret = get_errno(target_mmap(arg1, arg2, arg3,
9040 target_to_host_bitmask(arg4, mmap_flags_tbl),
9041 arg5,
9042 arg6 << MMAP_SHIFT));
9043 break;
9044 #endif
9045 case TARGET_NR_munmap:
9046 ret = get_errno(target_munmap(arg1, arg2));
9047 break;
9048 case TARGET_NR_mprotect:
9050 TaskState *ts = cpu->opaque;
9051 /* Special hack to detect libc making the stack executable. */
9052 if ((arg3 & PROT_GROWSDOWN)
9053 && arg1 >= ts->info->stack_limit
9054 && arg1 <= ts->info->start_stack) {
9055 arg3 &= ~PROT_GROWSDOWN;
9056 arg2 = arg2 + arg1 - ts->info->stack_limit;
9057 arg1 = ts->info->stack_limit;
9060 ret = get_errno(target_mprotect(arg1, arg2, arg3));
9061 break;
9062 #ifdef TARGET_NR_mremap
9063 case TARGET_NR_mremap:
9064 ret = get_errno(target_mremap(arg1, arg2, arg3, arg4, arg5));
9065 break;
9066 #endif
9067 /* ??? msync/mlock/munlock are broken for softmmu. */
9068 #ifdef TARGET_NR_msync
9069 case TARGET_NR_msync:
9070 ret = get_errno(msync(g2h(arg1), arg2, arg3));
9071 break;
9072 #endif
9073 #ifdef TARGET_NR_mlock
9074 case TARGET_NR_mlock:
9075 ret = get_errno(mlock(g2h(arg1), arg2));
9076 break;
9077 #endif
9078 #ifdef TARGET_NR_munlock
9079 case TARGET_NR_munlock:
9080 ret = get_errno(munlock(g2h(arg1), arg2));
9081 break;
9082 #endif
9083 #ifdef TARGET_NR_mlockall
9084 case TARGET_NR_mlockall:
9085 ret = get_errno(mlockall(target_to_host_mlockall_arg(arg1)));
9086 break;
9087 #endif
9088 #ifdef TARGET_NR_munlockall
9089 case TARGET_NR_munlockall:
9090 ret = get_errno(munlockall());
9091 break;
9092 #endif
9093 case TARGET_NR_truncate:
9094 if (!(p = lock_user_string(arg1)))
9095 goto efault;
9096 ret = get_errno(truncate(p, arg2));
9097 unlock_user(p, arg1, 0);
9098 break;
9099 case TARGET_NR_ftruncate:
9100 ret = get_errno(ftruncate(arg1, arg2));
9101 break;
9102 case TARGET_NR_fchmod:
9103 ret = get_errno(fchmod(arg1, arg2));
9104 break;
9105 #if defined(TARGET_NR_fchmodat)
9106 case TARGET_NR_fchmodat:
9107 if (!(p = lock_user_string(arg2)))
9108 goto efault;
9109 ret = get_errno(fchmodat(arg1, p, arg3, 0));
9110 unlock_user(p, arg2, 0);
9111 break;
9112 #endif
9113 case TARGET_NR_getpriority:
9114 /* Note that negative values are valid for getpriority, so we must
9115 differentiate based on errno settings. */
9116 errno = 0;
9117 ret = getpriority(arg1, arg2);
9118 if (ret == -1 && errno != 0) {
9119 ret = -host_to_target_errno(errno);
9120 break;
9122 #ifdef TARGET_ALPHA
9123 /* Return value is the unbiased priority. Signal no error. */
9124 ((CPUAlphaState *)cpu_env)->ir[IR_V0] = 0;
9125 #else
9126 /* Return value is a biased priority to avoid negative numbers. */
9127 ret = 20 - ret;
9128 #endif
9129 break;
9130 case TARGET_NR_setpriority:
9131 ret = get_errno(setpriority(arg1, arg2, arg3));
9132 break;
9133 #ifdef TARGET_NR_profil
9134 case TARGET_NR_profil:
9135 goto unimplemented;
9136 #endif
9137 case TARGET_NR_statfs:
9138 if (!(p = lock_user_string(arg1)))
9139 goto efault;
9140 ret = get_errno(statfs(path(p), &stfs));
9141 unlock_user(p, arg1, 0);
9142 convert_statfs:
9143 if (!is_error(ret)) {
9144 struct target_statfs *target_stfs;
9146 if (!lock_user_struct(VERIFY_WRITE, target_stfs, arg2, 0))
9147 goto efault;
9148 __put_user(stfs.f_type, &target_stfs->f_type);
9149 __put_user(stfs.f_bsize, &target_stfs->f_bsize);
9150 __put_user(stfs.f_blocks, &target_stfs->f_blocks);
9151 __put_user(stfs.f_bfree, &target_stfs->f_bfree);
9152 __put_user(stfs.f_bavail, &target_stfs->f_bavail);
9153 __put_user(stfs.f_files, &target_stfs->f_files);
9154 __put_user(stfs.f_ffree, &target_stfs->f_ffree);
9155 __put_user(stfs.f_fsid.__val[0], &target_stfs->f_fsid.val[0]);
9156 __put_user(stfs.f_fsid.__val[1], &target_stfs->f_fsid.val[1]);
9157 __put_user(stfs.f_namelen, &target_stfs->f_namelen);
9158 __put_user(stfs.f_frsize, &target_stfs->f_frsize);
9159 memset(target_stfs->f_spare, 0, sizeof(target_stfs->f_spare));
9160 unlock_user_struct(target_stfs, arg2, 1);
9162 break;
9163 case TARGET_NR_fstatfs:
9164 ret = get_errno(fstatfs(arg1, &stfs));
9165 goto convert_statfs;
9166 #ifdef TARGET_NR_statfs64
9167 case TARGET_NR_statfs64:
9168 if (!(p = lock_user_string(arg1)))
9169 goto efault;
9170 ret = get_errno(statfs(path(p), &stfs));
9171 unlock_user(p, arg1, 0);
9172 convert_statfs64:
9173 if (!is_error(ret)) {
9174 struct target_statfs64 *target_stfs;
9176 if (!lock_user_struct(VERIFY_WRITE, target_stfs, arg3, 0))
9177 goto efault;
9178 __put_user(stfs.f_type, &target_stfs->f_type);
9179 __put_user(stfs.f_bsize, &target_stfs->f_bsize);
9180 __put_user(stfs.f_blocks, &target_stfs->f_blocks);
9181 __put_user(stfs.f_bfree, &target_stfs->f_bfree);
9182 __put_user(stfs.f_bavail, &target_stfs->f_bavail);
9183 __put_user(stfs.f_files, &target_stfs->f_files);
9184 __put_user(stfs.f_ffree, &target_stfs->f_ffree);
9185 __put_user(stfs.f_fsid.__val[0], &target_stfs->f_fsid.val[0]);
9186 __put_user(stfs.f_fsid.__val[1], &target_stfs->f_fsid.val[1]);
9187 __put_user(stfs.f_namelen, &target_stfs->f_namelen);
9188 __put_user(stfs.f_frsize, &target_stfs->f_frsize);
9189 memset(target_stfs->f_spare, 0, sizeof(target_stfs->f_spare));
9190 unlock_user_struct(target_stfs, arg3, 1);
9192 break;
9193 case TARGET_NR_fstatfs64:
9194 ret = get_errno(fstatfs(arg1, &stfs));
9195 goto convert_statfs64;
9196 #endif
9197 #ifdef TARGET_NR_ioperm
9198 case TARGET_NR_ioperm:
9199 goto unimplemented;
9200 #endif
9201 #ifdef TARGET_NR_socketcall
9202 case TARGET_NR_socketcall:
9203 ret = do_socketcall(arg1, arg2);
9204 break;
9205 #endif
9206 #ifdef TARGET_NR_accept
9207 case TARGET_NR_accept:
9208 ret = do_accept4(arg1, arg2, arg3, 0);
9209 break;
9210 #endif
9211 #ifdef TARGET_NR_accept4
9212 case TARGET_NR_accept4:
9213 ret = do_accept4(arg1, arg2, arg3, arg4);
9214 break;
9215 #endif
9216 #ifdef TARGET_NR_bind
9217 case TARGET_NR_bind:
9218 ret = do_bind(arg1, arg2, arg3);
9219 break;
9220 #endif
9221 #ifdef TARGET_NR_connect
9222 case TARGET_NR_connect:
9223 ret = do_connect(arg1, arg2, arg3);
9224 break;
9225 #endif
9226 #ifdef TARGET_NR_getpeername
9227 case TARGET_NR_getpeername:
9228 ret = do_getpeername(arg1, arg2, arg3);
9229 break;
9230 #endif
9231 #ifdef TARGET_NR_getsockname
9232 case TARGET_NR_getsockname:
9233 ret = do_getsockname(arg1, arg2, arg3);
9234 break;
9235 #endif
9236 #ifdef TARGET_NR_getsockopt
9237 case TARGET_NR_getsockopt:
9238 ret = do_getsockopt(arg1, arg2, arg3, arg4, arg5);
9239 break;
9240 #endif
9241 #ifdef TARGET_NR_listen
9242 case TARGET_NR_listen:
9243 ret = get_errno(listen(arg1, arg2));
9244 break;
9245 #endif
9246 #ifdef TARGET_NR_recv
9247 case TARGET_NR_recv:
9248 ret = do_recvfrom(arg1, arg2, arg3, arg4, 0, 0);
9249 break;
9250 #endif
9251 #ifdef TARGET_NR_recvfrom
9252 case TARGET_NR_recvfrom:
9253 ret = do_recvfrom(arg1, arg2, arg3, arg4, arg5, arg6);
9254 break;
9255 #endif
9256 #ifdef TARGET_NR_recvmsg
9257 case TARGET_NR_recvmsg:
9258 ret = do_sendrecvmsg(arg1, arg2, arg3, 0);
9259 break;
9260 #endif
9261 #ifdef TARGET_NR_send
9262 case TARGET_NR_send:
9263 ret = do_sendto(arg1, arg2, arg3, arg4, 0, 0);
9264 break;
9265 #endif
9266 #ifdef TARGET_NR_sendmsg
9267 case TARGET_NR_sendmsg:
9268 ret = do_sendrecvmsg(arg1, arg2, arg3, 1);
9269 break;
9270 #endif
9271 #ifdef TARGET_NR_sendmmsg
9272 case TARGET_NR_sendmmsg:
9273 ret = do_sendrecvmmsg(arg1, arg2, arg3, arg4, 1);
9274 break;
9275 case TARGET_NR_recvmmsg:
9276 ret = do_sendrecvmmsg(arg1, arg2, arg3, arg4, 0);
9277 break;
9278 #endif
9279 #ifdef TARGET_NR_sendto
9280 case TARGET_NR_sendto:
9281 ret = do_sendto(arg1, arg2, arg3, arg4, arg5, arg6);
9282 break;
9283 #endif
9284 #ifdef TARGET_NR_shutdown
9285 case TARGET_NR_shutdown:
9286 ret = get_errno(shutdown(arg1, arg2));
9287 break;
9288 #endif
9289 #if defined(TARGET_NR_getrandom) && defined(__NR_getrandom)
9290 case TARGET_NR_getrandom:
9291 p = lock_user(VERIFY_WRITE, arg1, arg2, 0);
9292 if (!p) {
9293 goto efault;
9295 ret = get_errno(getrandom(p, arg2, arg3));
9296 unlock_user(p, arg1, ret);
9297 break;
9298 #endif
9299 #ifdef TARGET_NR_socket
9300 case TARGET_NR_socket:
9301 ret = do_socket(arg1, arg2, arg3);
9302 fd_trans_unregister(ret);
9303 break;
9304 #endif
9305 #ifdef TARGET_NR_socketpair
9306 case TARGET_NR_socketpair:
9307 ret = do_socketpair(arg1, arg2, arg3, arg4);
9308 break;
9309 #endif
9310 #ifdef TARGET_NR_setsockopt
9311 case TARGET_NR_setsockopt:
9312 ret = do_setsockopt(arg1, arg2, arg3, arg4, (socklen_t) arg5);
9313 break;
9314 #endif
9316 case TARGET_NR_syslog:
9317 if (!(p = lock_user_string(arg2)))
9318 goto efault;
9319 ret = get_errno(sys_syslog((int)arg1, p, (int)arg3));
9320 unlock_user(p, arg2, 0);
9321 break;
9323 case TARGET_NR_setitimer:
9325 struct itimerval value, ovalue, *pvalue;
9327 if (arg2) {
9328 pvalue = &value;
9329 if (copy_from_user_timeval(&pvalue->it_interval, arg2)
9330 || copy_from_user_timeval(&pvalue->it_value,
9331 arg2 + sizeof(struct target_timeval)))
9332 goto efault;
9333 } else {
9334 pvalue = NULL;
9336 ret = get_errno(setitimer(arg1, pvalue, &ovalue));
9337 if (!is_error(ret) && arg3) {
9338 if (copy_to_user_timeval(arg3,
9339 &ovalue.it_interval)
9340 || copy_to_user_timeval(arg3 + sizeof(struct target_timeval),
9341 &ovalue.it_value))
9342 goto efault;
9345 break;
9346 case TARGET_NR_getitimer:
9348 struct itimerval value;
9350 ret = get_errno(getitimer(arg1, &value));
9351 if (!is_error(ret) && arg2) {
9352 if (copy_to_user_timeval(arg2,
9353 &value.it_interval)
9354 || copy_to_user_timeval(arg2 + sizeof(struct target_timeval),
9355 &value.it_value))
9356 goto efault;
9359 break;
9360 #ifdef TARGET_NR_stat
9361 case TARGET_NR_stat:
9362 if (!(p = lock_user_string(arg1)))
9363 goto efault;
9364 ret = get_errno(stat(path(p), &st));
9365 unlock_user(p, arg1, 0);
9366 goto do_stat;
9367 #endif
9368 #ifdef TARGET_NR_lstat
9369 case TARGET_NR_lstat:
9370 if (!(p = lock_user_string(arg1)))
9371 goto efault;
9372 ret = get_errno(lstat(path(p), &st));
9373 unlock_user(p, arg1, 0);
9374 goto do_stat;
9375 #endif
9376 case TARGET_NR_fstat:
9378 ret = get_errno(fstat(arg1, &st));
9379 #if defined(TARGET_NR_stat) || defined(TARGET_NR_lstat)
9380 do_stat:
9381 #endif
9382 if (!is_error(ret)) {
9383 struct target_stat *target_st;
9385 if (!lock_user_struct(VERIFY_WRITE, target_st, arg2, 0))
9386 goto efault;
9387 memset(target_st, 0, sizeof(*target_st));
9388 __put_user(st.st_dev, &target_st->st_dev);
9389 __put_user(st.st_ino, &target_st->st_ino);
9390 __put_user(st.st_mode, &target_st->st_mode);
9391 __put_user(st.st_uid, &target_st->st_uid);
9392 __put_user(st.st_gid, &target_st->st_gid);
9393 __put_user(st.st_nlink, &target_st->st_nlink);
9394 __put_user(st.st_rdev, &target_st->st_rdev);
9395 __put_user(st.st_size, &target_st->st_size);
9396 __put_user(st.st_blksize, &target_st->st_blksize);
9397 __put_user(st.st_blocks, &target_st->st_blocks);
9398 __put_user(st.st_atime, &target_st->target_st_atime);
9399 __put_user(st.st_mtime, &target_st->target_st_mtime);
9400 __put_user(st.st_ctime, &target_st->target_st_ctime);
9401 unlock_user_struct(target_st, arg2, 1);
9404 break;
9405 #ifdef TARGET_NR_olduname
9406 case TARGET_NR_olduname:
9407 goto unimplemented;
9408 #endif
9409 #ifdef TARGET_NR_iopl
9410 case TARGET_NR_iopl:
9411 goto unimplemented;
9412 #endif
9413 case TARGET_NR_vhangup:
9414 ret = get_errno(vhangup());
9415 break;
9416 #ifdef TARGET_NR_idle
9417 case TARGET_NR_idle:
9418 goto unimplemented;
9419 #endif
9420 #ifdef TARGET_NR_syscall
9421 case TARGET_NR_syscall:
9422 ret = do_syscall(cpu_env, arg1 & 0xffff, arg2, arg3, arg4, arg5,
9423 arg6, arg7, arg8, 0);
9424 break;
9425 #endif
9426 case TARGET_NR_wait4:
9428 int status;
9429 abi_long status_ptr = arg2;
9430 struct rusage rusage, *rusage_ptr;
9431 abi_ulong target_rusage = arg4;
9432 abi_long rusage_err;
9433 if (target_rusage)
9434 rusage_ptr = &rusage;
9435 else
9436 rusage_ptr = NULL;
9437 ret = get_errno(safe_wait4(arg1, &status, arg3, rusage_ptr));
9438 if (!is_error(ret)) {
9439 if (status_ptr && ret) {
9440 status = host_to_target_waitstatus(status);
9441 if (put_user_s32(status, status_ptr))
9442 goto efault;
9444 if (target_rusage) {
9445 rusage_err = host_to_target_rusage(target_rusage, &rusage);
9446 if (rusage_err) {
9447 ret = rusage_err;
9452 break;
9453 #ifdef TARGET_NR_swapoff
9454 case TARGET_NR_swapoff:
9455 if (!(p = lock_user_string(arg1)))
9456 goto efault;
9457 ret = get_errno(swapoff(p));
9458 unlock_user(p, arg1, 0);
9459 break;
9460 #endif
9461 case TARGET_NR_sysinfo:
9463 struct target_sysinfo *target_value;
9464 struct sysinfo value;
9465 ret = get_errno(sysinfo(&value));
9466 if (!is_error(ret) && arg1)
9468 if (!lock_user_struct(VERIFY_WRITE, target_value, arg1, 0))
9469 goto efault;
9470 __put_user(value.uptime, &target_value->uptime);
9471 __put_user(value.loads[0], &target_value->loads[0]);
9472 __put_user(value.loads[1], &target_value->loads[1]);
9473 __put_user(value.loads[2], &target_value->loads[2]);
9474 __put_user(value.totalram, &target_value->totalram);
9475 __put_user(value.freeram, &target_value->freeram);
9476 __put_user(value.sharedram, &target_value->sharedram);
9477 __put_user(value.bufferram, &target_value->bufferram);
9478 __put_user(value.totalswap, &target_value->totalswap);
9479 __put_user(value.freeswap, &target_value->freeswap);
9480 __put_user(value.procs, &target_value->procs);
9481 __put_user(value.totalhigh, &target_value->totalhigh);
9482 __put_user(value.freehigh, &target_value->freehigh);
9483 __put_user(value.mem_unit, &target_value->mem_unit);
9484 unlock_user_struct(target_value, arg1, 1);
9487 break;
9488 #ifdef TARGET_NR_ipc
9489 case TARGET_NR_ipc:
9490 ret = do_ipc(cpu_env, arg1, arg2, arg3, arg4, arg5, arg6);
9491 break;
9492 #endif
9493 #ifdef TARGET_NR_semget
9494 case TARGET_NR_semget:
9495 ret = get_errno(semget(arg1, arg2, arg3));
9496 break;
9497 #endif
9498 #ifdef TARGET_NR_semop
9499 case TARGET_NR_semop:
9500 ret = do_semop(arg1, arg2, arg3);
9501 break;
9502 #endif
9503 #ifdef TARGET_NR_semctl
9504 case TARGET_NR_semctl:
9505 ret = do_semctl(arg1, arg2, arg3, arg4);
9506 break;
9507 #endif
9508 #ifdef TARGET_NR_msgctl
9509 case TARGET_NR_msgctl:
9510 ret = do_msgctl(arg1, arg2, arg3);
9511 break;
9512 #endif
9513 #ifdef TARGET_NR_msgget
9514 case TARGET_NR_msgget:
9515 ret = get_errno(msgget(arg1, arg2));
9516 break;
9517 #endif
9518 #ifdef TARGET_NR_msgrcv
9519 case TARGET_NR_msgrcv:
9520 ret = do_msgrcv(arg1, arg2, arg3, arg4, arg5);
9521 break;
9522 #endif
9523 #ifdef TARGET_NR_msgsnd
9524 case TARGET_NR_msgsnd:
9525 ret = do_msgsnd(arg1, arg2, arg3, arg4);
9526 break;
9527 #endif
9528 #ifdef TARGET_NR_shmget
9529 case TARGET_NR_shmget:
9530 ret = get_errno(shmget(arg1, arg2, arg3));
9531 break;
9532 #endif
9533 #ifdef TARGET_NR_shmctl
9534 case TARGET_NR_shmctl:
9535 ret = do_shmctl(arg1, arg2, arg3);
9536 break;
9537 #endif
9538 #ifdef TARGET_NR_shmat
9539 case TARGET_NR_shmat:
9540 ret = do_shmat(cpu_env, arg1, arg2, arg3);
9541 break;
9542 #endif
9543 #ifdef TARGET_NR_shmdt
9544 case TARGET_NR_shmdt:
9545 ret = do_shmdt(arg1);
9546 break;
9547 #endif
9548 case TARGET_NR_fsync:
9549 ret = get_errno(fsync(arg1));
9550 break;
9551 case TARGET_NR_clone:
9552 /* Linux manages to have three different orderings for its
9553 * arguments to clone(); the BACKWARDS and BACKWARDS2 defines
9554 * match the kernel's CONFIG_CLONE_* settings.
9555 * Microblaze is further special in that it uses a sixth
9556 * implicit argument to clone for the TLS pointer.
9558 #if defined(TARGET_MICROBLAZE)
9559 ret = get_errno(do_fork(cpu_env, arg1, arg2, arg4, arg6, arg5));
9560 #elif defined(TARGET_CLONE_BACKWARDS)
9561 ret = get_errno(do_fork(cpu_env, arg1, arg2, arg3, arg4, arg5));
9562 #elif defined(TARGET_CLONE_BACKWARDS2)
9563 ret = get_errno(do_fork(cpu_env, arg2, arg1, arg3, arg5, arg4));
9564 #else
9565 ret = get_errno(do_fork(cpu_env, arg1, arg2, arg3, arg5, arg4));
9566 #endif
9567 break;
9568 #ifdef __NR_exit_group
9569 /* new thread calls */
9570 case TARGET_NR_exit_group:
9571 #ifdef TARGET_GPROF
9572 _mcleanup();
9573 #endif
9574 gdb_exit(cpu_env, arg1);
9575 ret = get_errno(exit_group(arg1));
9576 break;
9577 #endif
9578 case TARGET_NR_setdomainname:
9579 if (!(p = lock_user_string(arg1)))
9580 goto efault;
9581 ret = get_errno(setdomainname(p, arg2));
9582 unlock_user(p, arg1, 0);
9583 break;
9584 case TARGET_NR_uname:
9585 /* no need to transcode because we use the linux syscall */
9587 struct new_utsname * buf;
9589 if (!lock_user_struct(VERIFY_WRITE, buf, arg1, 0))
9590 goto efault;
9591 ret = get_errno(sys_uname(buf));
9592 if (!is_error(ret)) {
9593 /* Overwrite the native machine name with whatever is being
9594 emulated. */
9595 strcpy (buf->machine, cpu_to_uname_machine(cpu_env));
9596 /* Allow the user to override the reported release. */
9597 if (qemu_uname_release && *qemu_uname_release) {
9598 g_strlcpy(buf->release, qemu_uname_release,
9599 sizeof(buf->release));
9602 unlock_user_struct(buf, arg1, 1);
9604 break;
9605 #ifdef TARGET_I386
9606 case TARGET_NR_modify_ldt:
9607 ret = do_modify_ldt(cpu_env, arg1, arg2, arg3);
9608 break;
9609 #if !defined(TARGET_X86_64)
9610 case TARGET_NR_vm86old:
9611 goto unimplemented;
9612 case TARGET_NR_vm86:
9613 ret = do_vm86(cpu_env, arg1, arg2);
9614 break;
9615 #endif
9616 #endif
9617 case TARGET_NR_adjtimex:
9619 struct timex host_buf;
9621 if (target_to_host_timex(&host_buf, arg1) != 0) {
9622 goto efault;
9624 ret = get_errno(adjtimex(&host_buf));
9625 if (!is_error(ret)) {
9626 if (host_to_target_timex(arg1, &host_buf) != 0) {
9627 goto efault;
9631 break;
9632 #ifdef TARGET_NR_create_module
9633 case TARGET_NR_create_module:
9634 #endif
9635 case TARGET_NR_init_module:
9636 case TARGET_NR_delete_module:
9637 #ifdef TARGET_NR_get_kernel_syms
9638 case TARGET_NR_get_kernel_syms:
9639 #endif
9640 goto unimplemented;
9641 case TARGET_NR_quotactl:
9642 goto unimplemented;
9643 case TARGET_NR_getpgid:
9644 ret = get_errno(getpgid(arg1));
9645 break;
9646 case TARGET_NR_fchdir:
9647 ret = get_errno(fchdir(arg1));
9648 break;
9649 #ifdef TARGET_NR_bdflush /* not on x86_64 */
9650 case TARGET_NR_bdflush:
9651 goto unimplemented;
9652 #endif
9653 #ifdef TARGET_NR_sysfs
9654 case TARGET_NR_sysfs:
9655 goto unimplemented;
9656 #endif
9657 case TARGET_NR_personality:
9658 ret = get_errno(personality(arg1));
9659 break;
9660 #ifdef TARGET_NR_afs_syscall
9661 case TARGET_NR_afs_syscall:
9662 goto unimplemented;
9663 #endif
9664 #ifdef TARGET_NR__llseek /* Not on alpha */
9665 case TARGET_NR__llseek:
9667 int64_t res;
9668 #if !defined(__NR_llseek)
9669 res = lseek(arg1, ((uint64_t)arg2 << 32) | (abi_ulong)arg3, arg5);
9670 if (res == -1) {
9671 ret = get_errno(res);
9672 } else {
9673 ret = 0;
9675 #else
9676 ret = get_errno(_llseek(arg1, arg2, arg3, &res, arg5));
9677 #endif
9678 if ((ret == 0) && put_user_s64(res, arg4)) {
9679 goto efault;
9682 break;
9683 #endif
9684 #ifdef TARGET_NR_getdents
9685 case TARGET_NR_getdents:
9686 #ifdef __NR_getdents
9687 #if TARGET_ABI_BITS == 32 && HOST_LONG_BITS == 64
9689 struct target_dirent *target_dirp;
9690 struct linux_dirent *dirp;
9691 abi_long count = arg3;
9693 dirp = g_try_malloc(count);
9694 if (!dirp) {
9695 ret = -TARGET_ENOMEM;
9696 goto fail;
9699 ret = get_errno(sys_getdents(arg1, dirp, count));
9700 if (!is_error(ret)) {
9701 struct linux_dirent *de;
9702 struct target_dirent *tde;
9703 int len = ret;
9704 int reclen, treclen;
9705 int count1, tnamelen;
9707 count1 = 0;
9708 de = dirp;
9709 if (!(target_dirp = lock_user(VERIFY_WRITE, arg2, count, 0)))
9710 goto efault;
9711 tde = target_dirp;
9712 while (len > 0) {
9713 reclen = de->d_reclen;
9714 tnamelen = reclen - offsetof(struct linux_dirent, d_name);
9715 assert(tnamelen >= 0);
9716 treclen = tnamelen + offsetof(struct target_dirent, d_name);
9717 assert(count1 + treclen <= count);
9718 tde->d_reclen = tswap16(treclen);
9719 tde->d_ino = tswapal(de->d_ino);
9720 tde->d_off = tswapal(de->d_off);
9721 memcpy(tde->d_name, de->d_name, tnamelen);
9722 de = (struct linux_dirent *)((char *)de + reclen);
9723 len -= reclen;
9724 tde = (struct target_dirent *)((char *)tde + treclen);
9725 count1 += treclen;
9727 ret = count1;
9728 unlock_user(target_dirp, arg2, ret);
9730 g_free(dirp);
9732 #else
9734 struct linux_dirent *dirp;
9735 abi_long count = arg3;
9737 if (!(dirp = lock_user(VERIFY_WRITE, arg2, count, 0)))
9738 goto efault;
9739 ret = get_errno(sys_getdents(arg1, dirp, count));
9740 if (!is_error(ret)) {
9741 struct linux_dirent *de;
9742 int len = ret;
9743 int reclen;
9744 de = dirp;
9745 while (len > 0) {
9746 reclen = de->d_reclen;
9747 if (reclen > len)
9748 break;
9749 de->d_reclen = tswap16(reclen);
9750 tswapls(&de->d_ino);
9751 tswapls(&de->d_off);
9752 de = (struct linux_dirent *)((char *)de + reclen);
9753 len -= reclen;
9756 unlock_user(dirp, arg2, ret);
9758 #endif
9759 #else
9760 /* Implement getdents in terms of getdents64 */
9762 struct linux_dirent64 *dirp;
9763 abi_long count = arg3;
9765 dirp = lock_user(VERIFY_WRITE, arg2, count, 0);
9766 if (!dirp) {
9767 goto efault;
9769 ret = get_errno(sys_getdents64(arg1, dirp, count));
9770 if (!is_error(ret)) {
9771 /* Convert the dirent64 structs to target dirent. We do this
9772 * in-place, since we can guarantee that a target_dirent is no
9773 * larger than a dirent64; however this means we have to be
9774 * careful to read everything before writing in the new format.
9776 struct linux_dirent64 *de;
9777 struct target_dirent *tde;
9778 int len = ret;
9779 int tlen = 0;
9781 de = dirp;
9782 tde = (struct target_dirent *)dirp;
9783 while (len > 0) {
9784 int namelen, treclen;
9785 int reclen = de->d_reclen;
9786 uint64_t ino = de->d_ino;
9787 int64_t off = de->d_off;
9788 uint8_t type = de->d_type;
9790 namelen = strlen(de->d_name);
9791 treclen = offsetof(struct target_dirent, d_name)
9792 + namelen + 2;
9793 treclen = QEMU_ALIGN_UP(treclen, sizeof(abi_long));
9795 memmove(tde->d_name, de->d_name, namelen + 1);
9796 tde->d_ino = tswapal(ino);
9797 tde->d_off = tswapal(off);
9798 tde->d_reclen = tswap16(treclen);
9799 /* The target_dirent type is in what was formerly a padding
9800 * byte at the end of the structure:
9802 *(((char *)tde) + treclen - 1) = type;
9804 de = (struct linux_dirent64 *)((char *)de + reclen);
9805 tde = (struct target_dirent *)((char *)tde + treclen);
9806 len -= reclen;
9807 tlen += treclen;
9809 ret = tlen;
9811 unlock_user(dirp, arg2, ret);
9813 #endif
9814 break;
9815 #endif /* TARGET_NR_getdents */
9816 #if defined(TARGET_NR_getdents64) && defined(__NR_getdents64)
9817 case TARGET_NR_getdents64:
9819 struct linux_dirent64 *dirp;
9820 abi_long count = arg3;
9821 if (!(dirp = lock_user(VERIFY_WRITE, arg2, count, 0)))
9822 goto efault;
9823 ret = get_errno(sys_getdents64(arg1, dirp, count));
9824 if (!is_error(ret)) {
9825 struct linux_dirent64 *de;
9826 int len = ret;
9827 int reclen;
9828 de = dirp;
9829 while (len > 0) {
9830 reclen = de->d_reclen;
9831 if (reclen > len)
9832 break;
9833 de->d_reclen = tswap16(reclen);
9834 tswap64s((uint64_t *)&de->d_ino);
9835 tswap64s((uint64_t *)&de->d_off);
9836 de = (struct linux_dirent64 *)((char *)de + reclen);
9837 len -= reclen;
9840 unlock_user(dirp, arg2, ret);
9842 break;
9843 #endif /* TARGET_NR_getdents64 */
9844 #if defined(TARGET_NR__newselect)
9845 case TARGET_NR__newselect:
9846 ret = do_select(arg1, arg2, arg3, arg4, arg5);
9847 break;
9848 #endif
9849 #if defined(TARGET_NR_poll) || defined(TARGET_NR_ppoll)
9850 # ifdef TARGET_NR_poll
9851 case TARGET_NR_poll:
9852 # endif
9853 # ifdef TARGET_NR_ppoll
9854 case TARGET_NR_ppoll:
9855 # endif
9857 struct target_pollfd *target_pfd;
9858 unsigned int nfds = arg2;
9859 struct pollfd *pfd;
9860 unsigned int i;
9862 pfd = NULL;
9863 target_pfd = NULL;
9864 if (nfds) {
9865 if (nfds > (INT_MAX / sizeof(struct target_pollfd))) {
9866 ret = -TARGET_EINVAL;
9867 break;
9870 target_pfd = lock_user(VERIFY_WRITE, arg1,
9871 sizeof(struct target_pollfd) * nfds, 1);
9872 if (!target_pfd) {
9873 goto efault;
9876 pfd = alloca(sizeof(struct pollfd) * nfds);
9877 for (i = 0; i < nfds; i++) {
9878 pfd[i].fd = tswap32(target_pfd[i].fd);
9879 pfd[i].events = tswap16(target_pfd[i].events);
9883 switch (num) {
9884 # ifdef TARGET_NR_ppoll
9885 case TARGET_NR_ppoll:
9887 struct timespec _timeout_ts, *timeout_ts = &_timeout_ts;
9888 target_sigset_t *target_set;
9889 sigset_t _set, *set = &_set;
9891 if (arg3) {
9892 if (target_to_host_timespec(timeout_ts, arg3)) {
9893 unlock_user(target_pfd, arg1, 0);
9894 goto efault;
9896 } else {
9897 timeout_ts = NULL;
9900 if (arg4) {
9901 if (arg5 != sizeof(target_sigset_t)) {
9902 unlock_user(target_pfd, arg1, 0);
9903 ret = -TARGET_EINVAL;
9904 break;
9907 target_set = lock_user(VERIFY_READ, arg4, sizeof(target_sigset_t), 1);
9908 if (!target_set) {
9909 unlock_user(target_pfd, arg1, 0);
9910 goto efault;
9912 target_to_host_sigset(set, target_set);
9913 } else {
9914 set = NULL;
9917 ret = get_errno(safe_ppoll(pfd, nfds, timeout_ts,
9918 set, SIGSET_T_SIZE));
9920 if (!is_error(ret) && arg3) {
9921 host_to_target_timespec(arg3, timeout_ts);
9923 if (arg4) {
9924 unlock_user(target_set, arg4, 0);
9926 break;
9928 # endif
9929 # ifdef TARGET_NR_poll
9930 case TARGET_NR_poll:
9932 struct timespec ts, *pts;
9934 if (arg3 >= 0) {
9935 /* Convert ms to secs, ns */
9936 ts.tv_sec = arg3 / 1000;
9937 ts.tv_nsec = (arg3 % 1000) * 1000000LL;
9938 pts = &ts;
9939 } else {
9940 /* -ve poll() timeout means "infinite" */
9941 pts = NULL;
9943 ret = get_errno(safe_ppoll(pfd, nfds, pts, NULL, 0));
9944 break;
9946 # endif
9947 default:
9948 g_assert_not_reached();
9951 if (!is_error(ret)) {
9952 for(i = 0; i < nfds; i++) {
9953 target_pfd[i].revents = tswap16(pfd[i].revents);
9956 unlock_user(target_pfd, arg1, sizeof(struct target_pollfd) * nfds);
9958 break;
9959 #endif
9960 case TARGET_NR_flock:
9961 /* NOTE: the flock constant seems to be the same for every
9962 Linux platform */
9963 ret = get_errno(safe_flock(arg1, arg2));
9964 break;
9965 case TARGET_NR_readv:
9967 struct iovec *vec = lock_iovec(VERIFY_WRITE, arg2, arg3, 0);
9968 if (vec != NULL) {
9969 ret = get_errno(safe_readv(arg1, vec, arg3));
9970 unlock_iovec(vec, arg2, arg3, 1);
9971 } else {
9972 ret = -host_to_target_errno(errno);
9975 break;
9976 case TARGET_NR_writev:
9978 struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1);
9979 if (vec != NULL) {
9980 ret = get_errno(safe_writev(arg1, vec, arg3));
9981 unlock_iovec(vec, arg2, arg3, 0);
9982 } else {
9983 ret = -host_to_target_errno(errno);
9986 break;
9987 case TARGET_NR_getsid:
9988 ret = get_errno(getsid(arg1));
9989 break;
9990 #if defined(TARGET_NR_fdatasync) /* Not on alpha (osf_datasync ?) */
9991 case TARGET_NR_fdatasync:
9992 ret = get_errno(fdatasync(arg1));
9993 break;
9994 #endif
9995 #ifdef TARGET_NR__sysctl
9996 case TARGET_NR__sysctl:
9997 /* We don't implement this, but ENOTDIR is always a safe
9998 return value. */
9999 ret = -TARGET_ENOTDIR;
10000 break;
10001 #endif
10002 case TARGET_NR_sched_getaffinity:
10004 unsigned int mask_size;
10005 unsigned long *mask;
10008 * sched_getaffinity needs multiples of ulong, so need to take
10009 * care of mismatches between target ulong and host ulong sizes.
10011 if (arg2 & (sizeof(abi_ulong) - 1)) {
10012 ret = -TARGET_EINVAL;
10013 break;
10015 mask_size = (arg2 + (sizeof(*mask) - 1)) & ~(sizeof(*mask) - 1);
10017 mask = alloca(mask_size);
10018 ret = get_errno(sys_sched_getaffinity(arg1, mask_size, mask));
10020 if (!is_error(ret)) {
10021 if (ret > arg2) {
10022 /* More data returned than the caller's buffer will fit.
10023 * This only happens if sizeof(abi_long) < sizeof(long)
10024 * and the caller passed us a buffer holding an odd number
10025 * of abi_longs. If the host kernel is actually using the
10026 * extra 4 bytes then fail EINVAL; otherwise we can just
10027 * ignore them and only copy the interesting part.
10029 int numcpus = sysconf(_SC_NPROCESSORS_CONF);
10030 if (numcpus > arg2 * 8) {
10031 ret = -TARGET_EINVAL;
10032 break;
10034 ret = arg2;
10037 if (copy_to_user(arg3, mask, ret)) {
10038 goto efault;
10042 break;
10043 case TARGET_NR_sched_setaffinity:
10045 unsigned int mask_size;
10046 unsigned long *mask;
10049 * sched_setaffinity needs multiples of ulong, so need to take
10050 * care of mismatches between target ulong and host ulong sizes.
10052 if (arg2 & (sizeof(abi_ulong) - 1)) {
10053 ret = -TARGET_EINVAL;
10054 break;
10056 mask_size = (arg2 + (sizeof(*mask) - 1)) & ~(sizeof(*mask) - 1);
10058 mask = alloca(mask_size);
10059 if (!lock_user_struct(VERIFY_READ, p, arg3, 1)) {
10060 goto efault;
10062 memcpy(mask, p, arg2);
10063 unlock_user_struct(p, arg2, 0);
10065 ret = get_errno(sys_sched_setaffinity(arg1, mask_size, mask));
10067 break;
10068 case TARGET_NR_sched_setparam:
10070 struct sched_param *target_schp;
10071 struct sched_param schp;
10073 if (arg2 == 0) {
10074 return -TARGET_EINVAL;
10076 if (!lock_user_struct(VERIFY_READ, target_schp, arg2, 1))
10077 goto efault;
10078 schp.sched_priority = tswap32(target_schp->sched_priority);
10079 unlock_user_struct(target_schp, arg2, 0);
10080 ret = get_errno(sched_setparam(arg1, &schp));
10082 break;
10083 case TARGET_NR_sched_getparam:
10085 struct sched_param *target_schp;
10086 struct sched_param schp;
10088 if (arg2 == 0) {
10089 return -TARGET_EINVAL;
10091 ret = get_errno(sched_getparam(arg1, &schp));
10092 if (!is_error(ret)) {
10093 if (!lock_user_struct(VERIFY_WRITE, target_schp, arg2, 0))
10094 goto efault;
10095 target_schp->sched_priority = tswap32(schp.sched_priority);
10096 unlock_user_struct(target_schp, arg2, 1);
10099 break;
10100 case TARGET_NR_sched_setscheduler:
10102 struct sched_param *target_schp;
10103 struct sched_param schp;
10104 if (arg3 == 0) {
10105 return -TARGET_EINVAL;
10107 if (!lock_user_struct(VERIFY_READ, target_schp, arg3, 1))
10108 goto efault;
10109 schp.sched_priority = tswap32(target_schp->sched_priority);
10110 unlock_user_struct(target_schp, arg3, 0);
10111 ret = get_errno(sched_setscheduler(arg1, arg2, &schp));
10113 break;
10114 case TARGET_NR_sched_getscheduler:
10115 ret = get_errno(sched_getscheduler(arg1));
10116 break;
10117 case TARGET_NR_sched_yield:
10118 ret = get_errno(sched_yield());
10119 break;
10120 case TARGET_NR_sched_get_priority_max:
10121 ret = get_errno(sched_get_priority_max(arg1));
10122 break;
10123 case TARGET_NR_sched_get_priority_min:
10124 ret = get_errno(sched_get_priority_min(arg1));
10125 break;
10126 case TARGET_NR_sched_rr_get_interval:
10128 struct timespec ts;
10129 ret = get_errno(sched_rr_get_interval(arg1, &ts));
10130 if (!is_error(ret)) {
10131 ret = host_to_target_timespec(arg2, &ts);
10134 break;
10135 case TARGET_NR_nanosleep:
10137 struct timespec req, rem;
10138 target_to_host_timespec(&req, arg1);
10139 ret = get_errno(safe_nanosleep(&req, &rem));
10140 if (is_error(ret) && arg2) {
10141 host_to_target_timespec(arg2, &rem);
10144 break;
10145 #ifdef TARGET_NR_query_module
10146 case TARGET_NR_query_module:
10147 goto unimplemented;
10148 #endif
10149 #ifdef TARGET_NR_nfsservctl
10150 case TARGET_NR_nfsservctl:
10151 goto unimplemented;
10152 #endif
10153 case TARGET_NR_prctl:
10154 switch (arg1) {
10155 case PR_GET_PDEATHSIG:
10157 int deathsig;
10158 ret = get_errno(prctl(arg1, &deathsig, arg3, arg4, arg5));
10159 if (!is_error(ret) && arg2
10160 && put_user_ual(deathsig, arg2)) {
10161 goto efault;
10163 break;
10165 #ifdef PR_GET_NAME
10166 case PR_GET_NAME:
10168 void *name = lock_user(VERIFY_WRITE, arg2, 16, 1);
10169 if (!name) {
10170 goto efault;
10172 ret = get_errno(prctl(arg1, (unsigned long)name,
10173 arg3, arg4, arg5));
10174 unlock_user(name, arg2, 16);
10175 break;
10177 case PR_SET_NAME:
10179 void *name = lock_user(VERIFY_READ, arg2, 16, 1);
10180 if (!name) {
10181 goto efault;
10183 ret = get_errno(prctl(arg1, (unsigned long)name,
10184 arg3, arg4, arg5));
10185 unlock_user(name, arg2, 0);
10186 break;
10188 #endif
10189 default:
10190 /* Most prctl options have no pointer arguments */
10191 ret = get_errno(prctl(arg1, arg2, arg3, arg4, arg5));
10192 break;
10194 break;
10195 #ifdef TARGET_NR_arch_prctl
10196 case TARGET_NR_arch_prctl:
10197 #if defined(TARGET_I386) && !defined(TARGET_ABI32)
10198 ret = do_arch_prctl(cpu_env, arg1, arg2);
10199 break;
10200 #else
10201 goto unimplemented;
10202 #endif
10203 #endif
10204 #ifdef TARGET_NR_pread64
10205 case TARGET_NR_pread64:
10206 if (regpairs_aligned(cpu_env)) {
10207 arg4 = arg5;
10208 arg5 = arg6;
10210 if (!(p = lock_user(VERIFY_WRITE, arg2, arg3, 0)))
10211 goto efault;
10212 ret = get_errno(pread64(arg1, p, arg3, target_offset64(arg4, arg5)));
10213 unlock_user(p, arg2, ret);
10214 break;
10215 case TARGET_NR_pwrite64:
10216 if (regpairs_aligned(cpu_env)) {
10217 arg4 = arg5;
10218 arg5 = arg6;
10220 if (!(p = lock_user(VERIFY_READ, arg2, arg3, 1)))
10221 goto efault;
10222 ret = get_errno(pwrite64(arg1, p, arg3, target_offset64(arg4, arg5)));
10223 unlock_user(p, arg2, 0);
10224 break;
10225 #endif
10226 case TARGET_NR_getcwd:
10227 if (!(p = lock_user(VERIFY_WRITE, arg1, arg2, 0)))
10228 goto efault;
10229 ret = get_errno(sys_getcwd1(p, arg2));
10230 unlock_user(p, arg1, ret);
10231 break;
10232 case TARGET_NR_capget:
10233 case TARGET_NR_capset:
10235 struct target_user_cap_header *target_header;
10236 struct target_user_cap_data *target_data = NULL;
10237 struct __user_cap_header_struct header;
10238 struct __user_cap_data_struct data[2];
10239 struct __user_cap_data_struct *dataptr = NULL;
10240 int i, target_datalen;
10241 int data_items = 1;
10243 if (!lock_user_struct(VERIFY_WRITE, target_header, arg1, 1)) {
10244 goto efault;
10246 header.version = tswap32(target_header->version);
10247 header.pid = tswap32(target_header->pid);
10249 if (header.version != _LINUX_CAPABILITY_VERSION) {
10250 /* Version 2 and up takes pointer to two user_data structs */
10251 data_items = 2;
10254 target_datalen = sizeof(*target_data) * data_items;
10256 if (arg2) {
10257 if (num == TARGET_NR_capget) {
10258 target_data = lock_user(VERIFY_WRITE, arg2, target_datalen, 0);
10259 } else {
10260 target_data = lock_user(VERIFY_READ, arg2, target_datalen, 1);
10262 if (!target_data) {
10263 unlock_user_struct(target_header, arg1, 0);
10264 goto efault;
10267 if (num == TARGET_NR_capset) {
10268 for (i = 0; i < data_items; i++) {
10269 data[i].effective = tswap32(target_data[i].effective);
10270 data[i].permitted = tswap32(target_data[i].permitted);
10271 data[i].inheritable = tswap32(target_data[i].inheritable);
10275 dataptr = data;
10278 if (num == TARGET_NR_capget) {
10279 ret = get_errno(capget(&header, dataptr));
10280 } else {
10281 ret = get_errno(capset(&header, dataptr));
10284 /* The kernel always updates version for both capget and capset */
10285 target_header->version = tswap32(header.version);
10286 unlock_user_struct(target_header, arg1, 1);
10288 if (arg2) {
10289 if (num == TARGET_NR_capget) {
10290 for (i = 0; i < data_items; i++) {
10291 target_data[i].effective = tswap32(data[i].effective);
10292 target_data[i].permitted = tswap32(data[i].permitted);
10293 target_data[i].inheritable = tswap32(data[i].inheritable);
10295 unlock_user(target_data, arg2, target_datalen);
10296 } else {
10297 unlock_user(target_data, arg2, 0);
10300 break;
10302 case TARGET_NR_sigaltstack:
10303 ret = do_sigaltstack(arg1, arg2, get_sp_from_cpustate((CPUArchState *)cpu_env));
10304 break;
10306 #ifdef CONFIG_SENDFILE
10307 case TARGET_NR_sendfile:
10309 off_t *offp = NULL;
10310 off_t off;
10311 if (arg3) {
10312 ret = get_user_sal(off, arg3);
10313 if (is_error(ret)) {
10314 break;
10316 offp = &off;
10318 ret = get_errno(sendfile(arg1, arg2, offp, arg4));
10319 if (!is_error(ret) && arg3) {
10320 abi_long ret2 = put_user_sal(off, arg3);
10321 if (is_error(ret2)) {
10322 ret = ret2;
10325 break;
10327 #ifdef TARGET_NR_sendfile64
10328 case TARGET_NR_sendfile64:
10330 off_t *offp = NULL;
10331 off_t off;
10332 if (arg3) {
10333 ret = get_user_s64(off, arg3);
10334 if (is_error(ret)) {
10335 break;
10337 offp = &off;
10339 ret = get_errno(sendfile(arg1, arg2, offp, arg4));
10340 if (!is_error(ret) && arg3) {
10341 abi_long ret2 = put_user_s64(off, arg3);
10342 if (is_error(ret2)) {
10343 ret = ret2;
10346 break;
10348 #endif
10349 #else
10350 case TARGET_NR_sendfile:
10351 #ifdef TARGET_NR_sendfile64
10352 case TARGET_NR_sendfile64:
10353 #endif
10354 goto unimplemented;
10355 #endif
10357 #ifdef TARGET_NR_getpmsg
10358 case TARGET_NR_getpmsg:
10359 goto unimplemented;
10360 #endif
10361 #ifdef TARGET_NR_putpmsg
10362 case TARGET_NR_putpmsg:
10363 goto unimplemented;
10364 #endif
10365 #ifdef TARGET_NR_vfork
10366 case TARGET_NR_vfork:
10367 ret = get_errno(do_fork(cpu_env, CLONE_VFORK | CLONE_VM | SIGCHLD,
10368 0, 0, 0, 0));
10369 break;
10370 #endif
10371 #ifdef TARGET_NR_ugetrlimit
10372 case TARGET_NR_ugetrlimit:
10374 struct rlimit rlim;
10375 int resource = target_to_host_resource(arg1);
10376 ret = get_errno(getrlimit(resource, &rlim));
10377 if (!is_error(ret)) {
10378 struct target_rlimit *target_rlim;
10379 if (!lock_user_struct(VERIFY_WRITE, target_rlim, arg2, 0))
10380 goto efault;
10381 target_rlim->rlim_cur = host_to_target_rlim(rlim.rlim_cur);
10382 target_rlim->rlim_max = host_to_target_rlim(rlim.rlim_max);
10383 unlock_user_struct(target_rlim, arg2, 1);
10385 break;
10387 #endif
10388 #ifdef TARGET_NR_truncate64
10389 case TARGET_NR_truncate64:
10390 if (!(p = lock_user_string(arg1)))
10391 goto efault;
10392 ret = target_truncate64(cpu_env, p, arg2, arg3, arg4);
10393 unlock_user(p, arg1, 0);
10394 break;
10395 #endif
10396 #ifdef TARGET_NR_ftruncate64
10397 case TARGET_NR_ftruncate64:
10398 ret = target_ftruncate64(cpu_env, arg1, arg2, arg3, arg4);
10399 break;
10400 #endif
10401 #ifdef TARGET_NR_stat64
10402 case TARGET_NR_stat64:
10403 if (!(p = lock_user_string(arg1)))
10404 goto efault;
10405 ret = get_errno(stat(path(p), &st));
10406 unlock_user(p, arg1, 0);
10407 if (!is_error(ret))
10408 ret = host_to_target_stat64(cpu_env, arg2, &st);
10409 break;
10410 #endif
10411 #ifdef TARGET_NR_lstat64
10412 case TARGET_NR_lstat64:
10413 if (!(p = lock_user_string(arg1)))
10414 goto efault;
10415 ret = get_errno(lstat(path(p), &st));
10416 unlock_user(p, arg1, 0);
10417 if (!is_error(ret))
10418 ret = host_to_target_stat64(cpu_env, arg2, &st);
10419 break;
10420 #endif
10421 #ifdef TARGET_NR_fstat64
10422 case TARGET_NR_fstat64:
10423 ret = get_errno(fstat(arg1, &st));
10424 if (!is_error(ret))
10425 ret = host_to_target_stat64(cpu_env, arg2, &st);
10426 break;
10427 #endif
10428 #if (defined(TARGET_NR_fstatat64) || defined(TARGET_NR_newfstatat))
10429 #ifdef TARGET_NR_fstatat64
10430 case TARGET_NR_fstatat64:
10431 #endif
10432 #ifdef TARGET_NR_newfstatat
10433 case TARGET_NR_newfstatat:
10434 #endif
10435 if (!(p = lock_user_string(arg2)))
10436 goto efault;
10437 ret = get_errno(fstatat(arg1, path(p), &st, arg4));
10438 if (!is_error(ret))
10439 ret = host_to_target_stat64(cpu_env, arg3, &st);
10440 break;
10441 #endif
10442 #ifdef TARGET_NR_lchown
10443 case TARGET_NR_lchown:
10444 if (!(p = lock_user_string(arg1)))
10445 goto efault;
10446 ret = get_errno(lchown(p, low2highuid(arg2), low2highgid(arg3)));
10447 unlock_user(p, arg1, 0);
10448 break;
10449 #endif
10450 #ifdef TARGET_NR_getuid
10451 case TARGET_NR_getuid:
10452 ret = get_errno(high2lowuid(getuid()));
10453 break;
10454 #endif
10455 #ifdef TARGET_NR_getgid
10456 case TARGET_NR_getgid:
10457 ret = get_errno(high2lowgid(getgid()));
10458 break;
10459 #endif
10460 #ifdef TARGET_NR_geteuid
10461 case TARGET_NR_geteuid:
10462 ret = get_errno(high2lowuid(geteuid()));
10463 break;
10464 #endif
10465 #ifdef TARGET_NR_getegid
10466 case TARGET_NR_getegid:
10467 ret = get_errno(high2lowgid(getegid()));
10468 break;
10469 #endif
10470 case TARGET_NR_setreuid:
10471 ret = get_errno(setreuid(low2highuid(arg1), low2highuid(arg2)));
10472 break;
10473 case TARGET_NR_setregid:
10474 ret = get_errno(setregid(low2highgid(arg1), low2highgid(arg2)));
10475 break;
10476 case TARGET_NR_getgroups:
10478 int gidsetsize = arg1;
10479 target_id *target_grouplist;
10480 gid_t *grouplist;
10481 int i;
10483 grouplist = alloca(gidsetsize * sizeof(gid_t));
10484 ret = get_errno(getgroups(gidsetsize, grouplist));
10485 if (gidsetsize == 0)
10486 break;
10487 if (!is_error(ret)) {
10488 target_grouplist = lock_user(VERIFY_WRITE, arg2, gidsetsize * sizeof(target_id), 0);
10489 if (!target_grouplist)
10490 goto efault;
10491 for(i = 0;i < ret; i++)
10492 target_grouplist[i] = tswapid(high2lowgid(grouplist[i]));
10493 unlock_user(target_grouplist, arg2, gidsetsize * sizeof(target_id));
10496 break;
10497 case TARGET_NR_setgroups:
10499 int gidsetsize = arg1;
10500 target_id *target_grouplist;
10501 gid_t *grouplist = NULL;
10502 int i;
10503 if (gidsetsize) {
10504 grouplist = alloca(gidsetsize * sizeof(gid_t));
10505 target_grouplist = lock_user(VERIFY_READ, arg2, gidsetsize * sizeof(target_id), 1);
10506 if (!target_grouplist) {
10507 ret = -TARGET_EFAULT;
10508 goto fail;
10510 for (i = 0; i < gidsetsize; i++) {
10511 grouplist[i] = low2highgid(tswapid(target_grouplist[i]));
10513 unlock_user(target_grouplist, arg2, 0);
10515 ret = get_errno(setgroups(gidsetsize, grouplist));
10517 break;
10518 case TARGET_NR_fchown:
10519 ret = get_errno(fchown(arg1, low2highuid(arg2), low2highgid(arg3)));
10520 break;
10521 #if defined(TARGET_NR_fchownat)
10522 case TARGET_NR_fchownat:
10523 if (!(p = lock_user_string(arg2)))
10524 goto efault;
10525 ret = get_errno(fchownat(arg1, p, low2highuid(arg3),
10526 low2highgid(arg4), arg5));
10527 unlock_user(p, arg2, 0);
10528 break;
10529 #endif
10530 #ifdef TARGET_NR_setresuid
10531 case TARGET_NR_setresuid:
10532 ret = get_errno(sys_setresuid(low2highuid(arg1),
10533 low2highuid(arg2),
10534 low2highuid(arg3)));
10535 break;
10536 #endif
10537 #ifdef TARGET_NR_getresuid
10538 case TARGET_NR_getresuid:
10540 uid_t ruid, euid, suid;
10541 ret = get_errno(getresuid(&ruid, &euid, &suid));
10542 if (!is_error(ret)) {
10543 if (put_user_id(high2lowuid(ruid), arg1)
10544 || put_user_id(high2lowuid(euid), arg2)
10545 || put_user_id(high2lowuid(suid), arg3))
10546 goto efault;
10549 break;
10550 #endif
10551 #ifdef TARGET_NR_getresgid
10552 case TARGET_NR_setresgid:
10553 ret = get_errno(sys_setresgid(low2highgid(arg1),
10554 low2highgid(arg2),
10555 low2highgid(arg3)));
10556 break;
10557 #endif
10558 #ifdef TARGET_NR_getresgid
10559 case TARGET_NR_getresgid:
10561 gid_t rgid, egid, sgid;
10562 ret = get_errno(getresgid(&rgid, &egid, &sgid));
10563 if (!is_error(ret)) {
10564 if (put_user_id(high2lowgid(rgid), arg1)
10565 || put_user_id(high2lowgid(egid), arg2)
10566 || put_user_id(high2lowgid(sgid), arg3))
10567 goto efault;
10570 break;
10571 #endif
10572 #ifdef TARGET_NR_chown
10573 case TARGET_NR_chown:
10574 if (!(p = lock_user_string(arg1)))
10575 goto efault;
10576 ret = get_errno(chown(p, low2highuid(arg2), low2highgid(arg3)));
10577 unlock_user(p, arg1, 0);
10578 break;
10579 #endif
10580 case TARGET_NR_setuid:
10581 ret = get_errno(sys_setuid(low2highuid(arg1)));
10582 break;
10583 case TARGET_NR_setgid:
10584 ret = get_errno(sys_setgid(low2highgid(arg1)));
10585 break;
10586 case TARGET_NR_setfsuid:
10587 ret = get_errno(setfsuid(arg1));
10588 break;
10589 case TARGET_NR_setfsgid:
10590 ret = get_errno(setfsgid(arg1));
10591 break;
10593 #ifdef TARGET_NR_lchown32
10594 case TARGET_NR_lchown32:
10595 if (!(p = lock_user_string(arg1)))
10596 goto efault;
10597 ret = get_errno(lchown(p, arg2, arg3));
10598 unlock_user(p, arg1, 0);
10599 break;
10600 #endif
10601 #ifdef TARGET_NR_getuid32
10602 case TARGET_NR_getuid32:
10603 ret = get_errno(getuid());
10604 break;
10605 #endif
10607 #if defined(TARGET_NR_getxuid) && defined(TARGET_ALPHA)
10608 /* Alpha specific */
10609 case TARGET_NR_getxuid:
10611 uid_t euid;
10612 euid=geteuid();
10613 ((CPUAlphaState *)cpu_env)->ir[IR_A4]=euid;
10615 ret = get_errno(getuid());
10616 break;
10617 #endif
10618 #if defined(TARGET_NR_getxgid) && defined(TARGET_ALPHA)
10619 /* Alpha specific */
10620 case TARGET_NR_getxgid:
10622 uid_t egid;
10623 egid=getegid();
10624 ((CPUAlphaState *)cpu_env)->ir[IR_A4]=egid;
10626 ret = get_errno(getgid());
10627 break;
10628 #endif
10629 #if defined(TARGET_NR_osf_getsysinfo) && defined(TARGET_ALPHA)
10630 /* Alpha specific */
10631 case TARGET_NR_osf_getsysinfo:
10632 ret = -TARGET_EOPNOTSUPP;
10633 switch (arg1) {
10634 case TARGET_GSI_IEEE_FP_CONTROL:
10636 uint64_t swcr, fpcr = cpu_alpha_load_fpcr (cpu_env);
10638 /* Copied from linux ieee_fpcr_to_swcr. */
10639 swcr = (fpcr >> 35) & SWCR_STATUS_MASK;
10640 swcr |= (fpcr >> 36) & SWCR_MAP_DMZ;
10641 swcr |= (~fpcr >> 48) & (SWCR_TRAP_ENABLE_INV
10642 | SWCR_TRAP_ENABLE_DZE
10643 | SWCR_TRAP_ENABLE_OVF);
10644 swcr |= (~fpcr >> 57) & (SWCR_TRAP_ENABLE_UNF
10645 | SWCR_TRAP_ENABLE_INE);
10646 swcr |= (fpcr >> 47) & SWCR_MAP_UMZ;
10647 swcr |= (~fpcr >> 41) & SWCR_TRAP_ENABLE_DNO;
10649 if (put_user_u64 (swcr, arg2))
10650 goto efault;
10651 ret = 0;
10653 break;
10655 /* case GSI_IEEE_STATE_AT_SIGNAL:
10656 -- Not implemented in linux kernel.
10657 case GSI_UACPROC:
10658 -- Retrieves current unaligned access state; not much used.
10659 case GSI_PROC_TYPE:
10660 -- Retrieves implver information; surely not used.
10661 case GSI_GET_HWRPB:
10662 -- Grabs a copy of the HWRPB; surely not used.
10665 break;
10666 #endif
10667 #if defined(TARGET_NR_osf_setsysinfo) && defined(TARGET_ALPHA)
10668 /* Alpha specific */
10669 case TARGET_NR_osf_setsysinfo:
10670 ret = -TARGET_EOPNOTSUPP;
10671 switch (arg1) {
10672 case TARGET_SSI_IEEE_FP_CONTROL:
10674 uint64_t swcr, fpcr, orig_fpcr;
10676 if (get_user_u64 (swcr, arg2)) {
10677 goto efault;
10679 orig_fpcr = cpu_alpha_load_fpcr(cpu_env);
10680 fpcr = orig_fpcr & FPCR_DYN_MASK;
10682 /* Copied from linux ieee_swcr_to_fpcr. */
10683 fpcr |= (swcr & SWCR_STATUS_MASK) << 35;
10684 fpcr |= (swcr & SWCR_MAP_DMZ) << 36;
10685 fpcr |= (~swcr & (SWCR_TRAP_ENABLE_INV
10686 | SWCR_TRAP_ENABLE_DZE
10687 | SWCR_TRAP_ENABLE_OVF)) << 48;
10688 fpcr |= (~swcr & (SWCR_TRAP_ENABLE_UNF
10689 | SWCR_TRAP_ENABLE_INE)) << 57;
10690 fpcr |= (swcr & SWCR_MAP_UMZ ? FPCR_UNDZ | FPCR_UNFD : 0);
10691 fpcr |= (~swcr & SWCR_TRAP_ENABLE_DNO) << 41;
10693 cpu_alpha_store_fpcr(cpu_env, fpcr);
10694 ret = 0;
10696 break;
10698 case TARGET_SSI_IEEE_RAISE_EXCEPTION:
10700 uint64_t exc, fpcr, orig_fpcr;
10701 int si_code;
10703 if (get_user_u64(exc, arg2)) {
10704 goto efault;
10707 orig_fpcr = cpu_alpha_load_fpcr(cpu_env);
10709 /* We only add to the exception status here. */
10710 fpcr = orig_fpcr | ((exc & SWCR_STATUS_MASK) << 35);
10712 cpu_alpha_store_fpcr(cpu_env, fpcr);
10713 ret = 0;
10715 /* Old exceptions are not signaled. */
10716 fpcr &= ~(orig_fpcr & FPCR_STATUS_MASK);
10718 /* If any exceptions set by this call,
10719 and are unmasked, send a signal. */
10720 si_code = 0;
10721 if ((fpcr & (FPCR_INE | FPCR_INED)) == FPCR_INE) {
10722 si_code = TARGET_FPE_FLTRES;
10724 if ((fpcr & (FPCR_UNF | FPCR_UNFD)) == FPCR_UNF) {
10725 si_code = TARGET_FPE_FLTUND;
10727 if ((fpcr & (FPCR_OVF | FPCR_OVFD)) == FPCR_OVF) {
10728 si_code = TARGET_FPE_FLTOVF;
10730 if ((fpcr & (FPCR_DZE | FPCR_DZED)) == FPCR_DZE) {
10731 si_code = TARGET_FPE_FLTDIV;
10733 if ((fpcr & (FPCR_INV | FPCR_INVD)) == FPCR_INV) {
10734 si_code = TARGET_FPE_FLTINV;
10736 if (si_code != 0) {
10737 target_siginfo_t info;
10738 info.si_signo = SIGFPE;
10739 info.si_errno = 0;
10740 info.si_code = si_code;
10741 info._sifields._sigfault._addr
10742 = ((CPUArchState *)cpu_env)->pc;
10743 queue_signal((CPUArchState *)cpu_env, info.si_signo,
10744 QEMU_SI_FAULT, &info);
10747 break;
10749 /* case SSI_NVPAIRS:
10750 -- Used with SSIN_UACPROC to enable unaligned accesses.
10751 case SSI_IEEE_STATE_AT_SIGNAL:
10752 case SSI_IEEE_IGNORE_STATE_AT_SIGNAL:
10753 -- Not implemented in linux kernel
10756 break;
10757 #endif
10758 #ifdef TARGET_NR_osf_sigprocmask
10759 /* Alpha specific. */
10760 case TARGET_NR_osf_sigprocmask:
10762 abi_ulong mask;
10763 int how;
10764 sigset_t set, oldset;
10766 switch(arg1) {
10767 case TARGET_SIG_BLOCK:
10768 how = SIG_BLOCK;
10769 break;
10770 case TARGET_SIG_UNBLOCK:
10771 how = SIG_UNBLOCK;
10772 break;
10773 case TARGET_SIG_SETMASK:
10774 how = SIG_SETMASK;
10775 break;
10776 default:
10777 ret = -TARGET_EINVAL;
10778 goto fail;
10780 mask = arg2;
10781 target_to_host_old_sigset(&set, &mask);
10782 ret = do_sigprocmask(how, &set, &oldset);
10783 if (!ret) {
10784 host_to_target_old_sigset(&mask, &oldset);
10785 ret = mask;
10788 break;
10789 #endif
10791 #ifdef TARGET_NR_getgid32
10792 case TARGET_NR_getgid32:
10793 ret = get_errno(getgid());
10794 break;
10795 #endif
10796 #ifdef TARGET_NR_geteuid32
10797 case TARGET_NR_geteuid32:
10798 ret = get_errno(geteuid());
10799 break;
10800 #endif
10801 #ifdef TARGET_NR_getegid32
10802 case TARGET_NR_getegid32:
10803 ret = get_errno(getegid());
10804 break;
10805 #endif
10806 #ifdef TARGET_NR_setreuid32
10807 case TARGET_NR_setreuid32:
10808 ret = get_errno(setreuid(arg1, arg2));
10809 break;
10810 #endif
10811 #ifdef TARGET_NR_setregid32
10812 case TARGET_NR_setregid32:
10813 ret = get_errno(setregid(arg1, arg2));
10814 break;
10815 #endif
10816 #ifdef TARGET_NR_getgroups32
10817 case TARGET_NR_getgroups32:
10819 int gidsetsize = arg1;
10820 uint32_t *target_grouplist;
10821 gid_t *grouplist;
10822 int i;
10824 grouplist = alloca(gidsetsize * sizeof(gid_t));
10825 ret = get_errno(getgroups(gidsetsize, grouplist));
10826 if (gidsetsize == 0)
10827 break;
10828 if (!is_error(ret)) {
10829 target_grouplist = lock_user(VERIFY_WRITE, arg2, gidsetsize * 4, 0);
10830 if (!target_grouplist) {
10831 ret = -TARGET_EFAULT;
10832 goto fail;
10834 for(i = 0;i < ret; i++)
10835 target_grouplist[i] = tswap32(grouplist[i]);
10836 unlock_user(target_grouplist, arg2, gidsetsize * 4);
10839 break;
10840 #endif
10841 #ifdef TARGET_NR_setgroups32
10842 case TARGET_NR_setgroups32:
10844 int gidsetsize = arg1;
10845 uint32_t *target_grouplist;
10846 gid_t *grouplist;
10847 int i;
10849 grouplist = alloca(gidsetsize * sizeof(gid_t));
10850 target_grouplist = lock_user(VERIFY_READ, arg2, gidsetsize * 4, 1);
10851 if (!target_grouplist) {
10852 ret = -TARGET_EFAULT;
10853 goto fail;
10855 for(i = 0;i < gidsetsize; i++)
10856 grouplist[i] = tswap32(target_grouplist[i]);
10857 unlock_user(target_grouplist, arg2, 0);
10858 ret = get_errno(setgroups(gidsetsize, grouplist));
10860 break;
10861 #endif
10862 #ifdef TARGET_NR_fchown32
10863 case TARGET_NR_fchown32:
10864 ret = get_errno(fchown(arg1, arg2, arg3));
10865 break;
10866 #endif
10867 #ifdef TARGET_NR_setresuid32
10868 case TARGET_NR_setresuid32:
10869 ret = get_errno(sys_setresuid(arg1, arg2, arg3));
10870 break;
10871 #endif
10872 #ifdef TARGET_NR_getresuid32
10873 case TARGET_NR_getresuid32:
10875 uid_t ruid, euid, suid;
10876 ret = get_errno(getresuid(&ruid, &euid, &suid));
10877 if (!is_error(ret)) {
10878 if (put_user_u32(ruid, arg1)
10879 || put_user_u32(euid, arg2)
10880 || put_user_u32(suid, arg3))
10881 goto efault;
10884 break;
10885 #endif
10886 #ifdef TARGET_NR_setresgid32
10887 case TARGET_NR_setresgid32:
10888 ret = get_errno(sys_setresgid(arg1, arg2, arg3));
10889 break;
10890 #endif
10891 #ifdef TARGET_NR_getresgid32
10892 case TARGET_NR_getresgid32:
10894 gid_t rgid, egid, sgid;
10895 ret = get_errno(getresgid(&rgid, &egid, &sgid));
10896 if (!is_error(ret)) {
10897 if (put_user_u32(rgid, arg1)
10898 || put_user_u32(egid, arg2)
10899 || put_user_u32(sgid, arg3))
10900 goto efault;
10903 break;
10904 #endif
10905 #ifdef TARGET_NR_chown32
10906 case TARGET_NR_chown32:
10907 if (!(p = lock_user_string(arg1)))
10908 goto efault;
10909 ret = get_errno(chown(p, arg2, arg3));
10910 unlock_user(p, arg1, 0);
10911 break;
10912 #endif
10913 #ifdef TARGET_NR_setuid32
10914 case TARGET_NR_setuid32:
10915 ret = get_errno(sys_setuid(arg1));
10916 break;
10917 #endif
10918 #ifdef TARGET_NR_setgid32
10919 case TARGET_NR_setgid32:
10920 ret = get_errno(sys_setgid(arg1));
10921 break;
10922 #endif
10923 #ifdef TARGET_NR_setfsuid32
10924 case TARGET_NR_setfsuid32:
10925 ret = get_errno(setfsuid(arg1));
10926 break;
10927 #endif
10928 #ifdef TARGET_NR_setfsgid32
10929 case TARGET_NR_setfsgid32:
10930 ret = get_errno(setfsgid(arg1));
10931 break;
10932 #endif
10934 case TARGET_NR_pivot_root:
10935 goto unimplemented;
10936 #ifdef TARGET_NR_mincore
10937 case TARGET_NR_mincore:
10939 void *a;
10940 ret = -TARGET_EFAULT;
10941 if (!(a = lock_user(VERIFY_READ, arg1,arg2, 0)))
10942 goto efault;
10943 if (!(p = lock_user_string(arg3)))
10944 goto mincore_fail;
10945 ret = get_errno(mincore(a, arg2, p));
10946 unlock_user(p, arg3, ret);
10947 mincore_fail:
10948 unlock_user(a, arg1, 0);
10950 break;
10951 #endif
10952 #ifdef TARGET_NR_arm_fadvise64_64
10953 case TARGET_NR_arm_fadvise64_64:
10954 /* arm_fadvise64_64 looks like fadvise64_64 but
10955 * with different argument order: fd, advice, offset, len
10956 * rather than the usual fd, offset, len, advice.
10957 * Note that offset and len are both 64-bit so appear as
10958 * pairs of 32-bit registers.
10960 ret = posix_fadvise(arg1, target_offset64(arg3, arg4),
10961 target_offset64(arg5, arg6), arg2);
10962 ret = -host_to_target_errno(ret);
10963 break;
10964 #endif
10966 #if TARGET_ABI_BITS == 32
10968 #ifdef TARGET_NR_fadvise64_64
10969 case TARGET_NR_fadvise64_64:
10970 /* 6 args: fd, offset (high, low), len (high, low), advice */
10971 if (regpairs_aligned(cpu_env)) {
10972 /* offset is in (3,4), len in (5,6) and advice in 7 */
10973 arg2 = arg3;
10974 arg3 = arg4;
10975 arg4 = arg5;
10976 arg5 = arg6;
10977 arg6 = arg7;
10979 ret = -host_to_target_errno(posix_fadvise(arg1,
10980 target_offset64(arg2, arg3),
10981 target_offset64(arg4, arg5),
10982 arg6));
10983 break;
10984 #endif
10986 #ifdef TARGET_NR_fadvise64
10987 case TARGET_NR_fadvise64:
10988 /* 5 args: fd, offset (high, low), len, advice */
10989 if (regpairs_aligned(cpu_env)) {
10990 /* offset is in (3,4), len in 5 and advice in 6 */
10991 arg2 = arg3;
10992 arg3 = arg4;
10993 arg4 = arg5;
10994 arg5 = arg6;
10996 ret = -host_to_target_errno(posix_fadvise(arg1,
10997 target_offset64(arg2, arg3),
10998 arg4, arg5));
10999 break;
11000 #endif
11002 #else /* not a 32-bit ABI */
11003 #if defined(TARGET_NR_fadvise64_64) || defined(TARGET_NR_fadvise64)
11004 #ifdef TARGET_NR_fadvise64_64
11005 case TARGET_NR_fadvise64_64:
11006 #endif
11007 #ifdef TARGET_NR_fadvise64
11008 case TARGET_NR_fadvise64:
11009 #endif
11010 #ifdef TARGET_S390X
11011 switch (arg4) {
11012 case 4: arg4 = POSIX_FADV_NOREUSE + 1; break; /* make sure it's an invalid value */
11013 case 5: arg4 = POSIX_FADV_NOREUSE + 2; break; /* ditto */
11014 case 6: arg4 = POSIX_FADV_DONTNEED; break;
11015 case 7: arg4 = POSIX_FADV_NOREUSE; break;
11016 default: break;
11018 #endif
11019 ret = -host_to_target_errno(posix_fadvise(arg1, arg2, arg3, arg4));
11020 break;
11021 #endif
11022 #endif /* end of 64-bit ABI fadvise handling */
11024 #ifdef TARGET_NR_madvise
11025 case TARGET_NR_madvise:
11026 /* A straight passthrough may not be safe because qemu sometimes
11027 turns private file-backed mappings into anonymous mappings.
11028 This will break MADV_DONTNEED.
11029 This is a hint, so ignoring and returning success is ok. */
11030 ret = get_errno(0);
11031 break;
11032 #endif
11033 #if TARGET_ABI_BITS == 32
11034 case TARGET_NR_fcntl64:
11036 int cmd;
11037 struct flock64 fl;
11038 from_flock64_fn *copyfrom = copy_from_user_flock64;
11039 to_flock64_fn *copyto = copy_to_user_flock64;
11041 #ifdef TARGET_ARM
11042 if (((CPUARMState *)cpu_env)->eabi) {
11043 copyfrom = copy_from_user_eabi_flock64;
11044 copyto = copy_to_user_eabi_flock64;
11046 #endif
11048 cmd = target_to_host_fcntl_cmd(arg2);
11049 if (cmd == -TARGET_EINVAL) {
11050 ret = cmd;
11051 break;
11054 switch(arg2) {
11055 case TARGET_F_GETLK64:
11056 ret = copyfrom(&fl, arg3);
11057 if (ret) {
11058 break;
11060 ret = get_errno(fcntl(arg1, cmd, &fl));
11061 if (ret == 0) {
11062 ret = copyto(arg3, &fl);
11064 break;
11066 case TARGET_F_SETLK64:
11067 case TARGET_F_SETLKW64:
11068 ret = copyfrom(&fl, arg3);
11069 if (ret) {
11070 break;
11072 ret = get_errno(safe_fcntl(arg1, cmd, &fl));
11073 break;
11074 default:
11075 ret = do_fcntl(arg1, arg2, arg3);
11076 break;
11078 break;
11080 #endif
11081 #ifdef TARGET_NR_cacheflush
11082 case TARGET_NR_cacheflush:
11083 /* self-modifying code is handled automatically, so nothing needed */
11084 ret = 0;
11085 break;
11086 #endif
11087 #ifdef TARGET_NR_security
11088 case TARGET_NR_security:
11089 goto unimplemented;
11090 #endif
11091 #ifdef TARGET_NR_getpagesize
11092 case TARGET_NR_getpagesize:
11093 ret = TARGET_PAGE_SIZE;
11094 break;
11095 #endif
11096 case TARGET_NR_gettid:
11097 ret = get_errno(gettid());
11098 break;
11099 #ifdef TARGET_NR_readahead
11100 case TARGET_NR_readahead:
11101 #if TARGET_ABI_BITS == 32
11102 if (regpairs_aligned(cpu_env)) {
11103 arg2 = arg3;
11104 arg3 = arg4;
11105 arg4 = arg5;
11107 ret = get_errno(readahead(arg1, ((off64_t)arg3 << 32) | arg2, arg4));
11108 #else
11109 ret = get_errno(readahead(arg1, arg2, arg3));
11110 #endif
11111 break;
11112 #endif
11113 #ifdef CONFIG_ATTR
11114 #ifdef TARGET_NR_setxattr
11115 case TARGET_NR_listxattr:
11116 case TARGET_NR_llistxattr:
11118 void *p, *b = 0;
11119 if (arg2) {
11120 b = lock_user(VERIFY_WRITE, arg2, arg3, 0);
11121 if (!b) {
11122 ret = -TARGET_EFAULT;
11123 break;
11126 p = lock_user_string(arg1);
11127 if (p) {
11128 if (num == TARGET_NR_listxattr) {
11129 ret = get_errno(listxattr(p, b, arg3));
11130 } else {
11131 ret = get_errno(llistxattr(p, b, arg3));
11133 } else {
11134 ret = -TARGET_EFAULT;
11136 unlock_user(p, arg1, 0);
11137 unlock_user(b, arg2, arg3);
11138 break;
11140 case TARGET_NR_flistxattr:
11142 void *b = 0;
11143 if (arg2) {
11144 b = lock_user(VERIFY_WRITE, arg2, arg3, 0);
11145 if (!b) {
11146 ret = -TARGET_EFAULT;
11147 break;
11150 ret = get_errno(flistxattr(arg1, b, arg3));
11151 unlock_user(b, arg2, arg3);
11152 break;
11154 case TARGET_NR_setxattr:
11155 case TARGET_NR_lsetxattr:
11157 void *p, *n, *v = 0;
11158 if (arg3) {
11159 v = lock_user(VERIFY_READ, arg3, arg4, 1);
11160 if (!v) {
11161 ret = -TARGET_EFAULT;
11162 break;
11165 p = lock_user_string(arg1);
11166 n = lock_user_string(arg2);
11167 if (p && n) {
11168 if (num == TARGET_NR_setxattr) {
11169 ret = get_errno(setxattr(p, n, v, arg4, arg5));
11170 } else {
11171 ret = get_errno(lsetxattr(p, n, v, arg4, arg5));
11173 } else {
11174 ret = -TARGET_EFAULT;
11176 unlock_user(p, arg1, 0);
11177 unlock_user(n, arg2, 0);
11178 unlock_user(v, arg3, 0);
11180 break;
11181 case TARGET_NR_fsetxattr:
11183 void *n, *v = 0;
11184 if (arg3) {
11185 v = lock_user(VERIFY_READ, arg3, arg4, 1);
11186 if (!v) {
11187 ret = -TARGET_EFAULT;
11188 break;
11191 n = lock_user_string(arg2);
11192 if (n) {
11193 ret = get_errno(fsetxattr(arg1, n, v, arg4, arg5));
11194 } else {
11195 ret = -TARGET_EFAULT;
11197 unlock_user(n, arg2, 0);
11198 unlock_user(v, arg3, 0);
11200 break;
11201 case TARGET_NR_getxattr:
11202 case TARGET_NR_lgetxattr:
11204 void *p, *n, *v = 0;
11205 if (arg3) {
11206 v = lock_user(VERIFY_WRITE, arg3, arg4, 0);
11207 if (!v) {
11208 ret = -TARGET_EFAULT;
11209 break;
11212 p = lock_user_string(arg1);
11213 n = lock_user_string(arg2);
11214 if (p && n) {
11215 if (num == TARGET_NR_getxattr) {
11216 ret = get_errno(getxattr(p, n, v, arg4));
11217 } else {
11218 ret = get_errno(lgetxattr(p, n, v, arg4));
11220 } else {
11221 ret = -TARGET_EFAULT;
11223 unlock_user(p, arg1, 0);
11224 unlock_user(n, arg2, 0);
11225 unlock_user(v, arg3, arg4);
11227 break;
11228 case TARGET_NR_fgetxattr:
11230 void *n, *v = 0;
11231 if (arg3) {
11232 v = lock_user(VERIFY_WRITE, arg3, arg4, 0);
11233 if (!v) {
11234 ret = -TARGET_EFAULT;
11235 break;
11238 n = lock_user_string(arg2);
11239 if (n) {
11240 ret = get_errno(fgetxattr(arg1, n, v, arg4));
11241 } else {
11242 ret = -TARGET_EFAULT;
11244 unlock_user(n, arg2, 0);
11245 unlock_user(v, arg3, arg4);
11247 break;
11248 case TARGET_NR_removexattr:
11249 case TARGET_NR_lremovexattr:
11251 void *p, *n;
11252 p = lock_user_string(arg1);
11253 n = lock_user_string(arg2);
11254 if (p && n) {
11255 if (num == TARGET_NR_removexattr) {
11256 ret = get_errno(removexattr(p, n));
11257 } else {
11258 ret = get_errno(lremovexattr(p, n));
11260 } else {
11261 ret = -TARGET_EFAULT;
11263 unlock_user(p, arg1, 0);
11264 unlock_user(n, arg2, 0);
11266 break;
11267 case TARGET_NR_fremovexattr:
11269 void *n;
11270 n = lock_user_string(arg2);
11271 if (n) {
11272 ret = get_errno(fremovexattr(arg1, n));
11273 } else {
11274 ret = -TARGET_EFAULT;
11276 unlock_user(n, arg2, 0);
11278 break;
11279 #endif
11280 #endif /* CONFIG_ATTR */
11281 #ifdef TARGET_NR_set_thread_area
11282 case TARGET_NR_set_thread_area:
11283 #if defined(TARGET_MIPS)
11284 ((CPUMIPSState *) cpu_env)->active_tc.CP0_UserLocal = arg1;
11285 ret = 0;
11286 break;
11287 #elif defined(TARGET_CRIS)
11288 if (arg1 & 0xff)
11289 ret = -TARGET_EINVAL;
11290 else {
11291 ((CPUCRISState *) cpu_env)->pregs[PR_PID] = arg1;
11292 ret = 0;
11294 break;
11295 #elif defined(TARGET_I386) && defined(TARGET_ABI32)
11296 ret = do_set_thread_area(cpu_env, arg1);
11297 break;
11298 #elif defined(TARGET_M68K)
11300 TaskState *ts = cpu->opaque;
11301 ts->tp_value = arg1;
11302 ret = 0;
11303 break;
11305 #else
11306 goto unimplemented_nowarn;
11307 #endif
11308 #endif
11309 #ifdef TARGET_NR_get_thread_area
11310 case TARGET_NR_get_thread_area:
11311 #if defined(TARGET_I386) && defined(TARGET_ABI32)
11312 ret = do_get_thread_area(cpu_env, arg1);
11313 break;
11314 #elif defined(TARGET_M68K)
11316 TaskState *ts = cpu->opaque;
11317 ret = ts->tp_value;
11318 break;
11320 #else
11321 goto unimplemented_nowarn;
11322 #endif
11323 #endif
11324 #ifdef TARGET_NR_getdomainname
11325 case TARGET_NR_getdomainname:
11326 goto unimplemented_nowarn;
11327 #endif
11329 #ifdef TARGET_NR_clock_gettime
11330 case TARGET_NR_clock_gettime:
11332 struct timespec ts;
11333 ret = get_errno(clock_gettime(arg1, &ts));
11334 if (!is_error(ret)) {
11335 host_to_target_timespec(arg2, &ts);
11337 break;
11339 #endif
11340 #ifdef TARGET_NR_clock_getres
11341 case TARGET_NR_clock_getres:
11343 struct timespec ts;
11344 ret = get_errno(clock_getres(arg1, &ts));
11345 if (!is_error(ret)) {
11346 host_to_target_timespec(arg2, &ts);
11348 break;
11350 #endif
11351 #ifdef TARGET_NR_clock_nanosleep
11352 case TARGET_NR_clock_nanosleep:
11354 struct timespec ts;
11355 target_to_host_timespec(&ts, arg3);
11356 ret = get_errno(safe_clock_nanosleep(arg1, arg2,
11357 &ts, arg4 ? &ts : NULL));
11358 if (arg4)
11359 host_to_target_timespec(arg4, &ts);
11361 #if defined(TARGET_PPC)
11362 /* clock_nanosleep is odd in that it returns positive errno values.
11363 * On PPC, CR0 bit 3 should be set in such a situation. */
11364 if (ret && ret != -TARGET_ERESTARTSYS) {
11365 ((CPUPPCState *)cpu_env)->crf[0] |= 1;
11367 #endif
11368 break;
11370 #endif
11372 #if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address)
11373 case TARGET_NR_set_tid_address:
11374 ret = get_errno(set_tid_address((int *)g2h(arg1)));
11375 break;
11376 #endif
11378 case TARGET_NR_tkill:
11379 ret = get_errno(safe_tkill((int)arg1, target_to_host_signal(arg2)));
11380 break;
11382 case TARGET_NR_tgkill:
11383 ret = get_errno(safe_tgkill((int)arg1, (int)arg2,
11384 target_to_host_signal(arg3)));
11385 break;
11387 #ifdef TARGET_NR_set_robust_list
11388 case TARGET_NR_set_robust_list:
11389 case TARGET_NR_get_robust_list:
11390 /* The ABI for supporting robust futexes has userspace pass
11391 * the kernel a pointer to a linked list which is updated by
11392 * userspace after the syscall; the list is walked by the kernel
11393 * when the thread exits. Since the linked list in QEMU guest
11394 * memory isn't a valid linked list for the host and we have
11395 * no way to reliably intercept the thread-death event, we can't
11396 * support these. Silently return ENOSYS so that guest userspace
11397 * falls back to a non-robust futex implementation (which should
11398 * be OK except in the corner case of the guest crashing while
11399 * holding a mutex that is shared with another process via
11400 * shared memory).
11402 goto unimplemented_nowarn;
11403 #endif
11405 #if defined(TARGET_NR_utimensat)
11406 case TARGET_NR_utimensat:
11408 struct timespec *tsp, ts[2];
11409 if (!arg3) {
11410 tsp = NULL;
11411 } else {
11412 target_to_host_timespec(ts, arg3);
11413 target_to_host_timespec(ts+1, arg3+sizeof(struct target_timespec));
11414 tsp = ts;
11416 if (!arg2)
11417 ret = get_errno(sys_utimensat(arg1, NULL, tsp, arg4));
11418 else {
11419 if (!(p = lock_user_string(arg2))) {
11420 ret = -TARGET_EFAULT;
11421 goto fail;
11423 ret = get_errno(sys_utimensat(arg1, path(p), tsp, arg4));
11424 unlock_user(p, arg2, 0);
11427 break;
11428 #endif
11429 case TARGET_NR_futex:
11430 ret = do_futex(arg1, arg2, arg3, arg4, arg5, arg6);
11431 break;
11432 #if defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init)
11433 case TARGET_NR_inotify_init:
11434 ret = get_errno(sys_inotify_init());
11435 break;
11436 #endif
11437 #ifdef CONFIG_INOTIFY1
11438 #if defined(TARGET_NR_inotify_init1) && defined(__NR_inotify_init1)
11439 case TARGET_NR_inotify_init1:
11440 ret = get_errno(sys_inotify_init1(arg1));
11441 break;
11442 #endif
11443 #endif
11444 #if defined(TARGET_NR_inotify_add_watch) && defined(__NR_inotify_add_watch)
11445 case TARGET_NR_inotify_add_watch:
11446 p = lock_user_string(arg2);
11447 ret = get_errno(sys_inotify_add_watch(arg1, path(p), arg3));
11448 unlock_user(p, arg2, 0);
11449 break;
11450 #endif
11451 #if defined(TARGET_NR_inotify_rm_watch) && defined(__NR_inotify_rm_watch)
11452 case TARGET_NR_inotify_rm_watch:
11453 ret = get_errno(sys_inotify_rm_watch(arg1, arg2));
11454 break;
11455 #endif
11457 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
11458 case TARGET_NR_mq_open:
11460 struct mq_attr posix_mq_attr;
11461 int host_flags;
11463 host_flags = target_to_host_bitmask(arg2, fcntl_flags_tbl);
11464 if (copy_from_user_mq_attr(&posix_mq_attr, arg4) != 0) {
11465 goto efault;
11467 p = lock_user_string(arg1 - 1);
11468 if (!p) {
11469 goto efault;
11471 ret = get_errno(mq_open(p, host_flags, arg3, &posix_mq_attr));
11472 unlock_user (p, arg1, 0);
11474 break;
11476 case TARGET_NR_mq_unlink:
11477 p = lock_user_string(arg1 - 1);
11478 if (!p) {
11479 ret = -TARGET_EFAULT;
11480 break;
11482 ret = get_errno(mq_unlink(p));
11483 unlock_user (p, arg1, 0);
11484 break;
11486 case TARGET_NR_mq_timedsend:
11488 struct timespec ts;
11490 p = lock_user (VERIFY_READ, arg2, arg3, 1);
11491 if (arg5 != 0) {
11492 target_to_host_timespec(&ts, arg5);
11493 ret = get_errno(safe_mq_timedsend(arg1, p, arg3, arg4, &ts));
11494 host_to_target_timespec(arg5, &ts);
11495 } else {
11496 ret = get_errno(safe_mq_timedsend(arg1, p, arg3, arg4, NULL));
11498 unlock_user (p, arg2, arg3);
11500 break;
11502 case TARGET_NR_mq_timedreceive:
11504 struct timespec ts;
11505 unsigned int prio;
11507 p = lock_user (VERIFY_READ, arg2, arg3, 1);
11508 if (arg5 != 0) {
11509 target_to_host_timespec(&ts, arg5);
11510 ret = get_errno(safe_mq_timedreceive(arg1, p, arg3,
11511 &prio, &ts));
11512 host_to_target_timespec(arg5, &ts);
11513 } else {
11514 ret = get_errno(safe_mq_timedreceive(arg1, p, arg3,
11515 &prio, NULL));
11517 unlock_user (p, arg2, arg3);
11518 if (arg4 != 0)
11519 put_user_u32(prio, arg4);
11521 break;
11523 /* Not implemented for now... */
11524 /* case TARGET_NR_mq_notify: */
11525 /* break; */
11527 case TARGET_NR_mq_getsetattr:
11529 struct mq_attr posix_mq_attr_in, posix_mq_attr_out;
11530 ret = 0;
11531 if (arg3 != 0) {
11532 ret = mq_getattr(arg1, &posix_mq_attr_out);
11533 copy_to_user_mq_attr(arg3, &posix_mq_attr_out);
11535 if (arg2 != 0) {
11536 copy_from_user_mq_attr(&posix_mq_attr_in, arg2);
11537 ret |= mq_setattr(arg1, &posix_mq_attr_in, &posix_mq_attr_out);
11541 break;
11542 #endif
11544 #ifdef CONFIG_SPLICE
11545 #ifdef TARGET_NR_tee
11546 case TARGET_NR_tee:
11548 ret = get_errno(tee(arg1,arg2,arg3,arg4));
11550 break;
11551 #endif
11552 #ifdef TARGET_NR_splice
11553 case TARGET_NR_splice:
11555 loff_t loff_in, loff_out;
11556 loff_t *ploff_in = NULL, *ploff_out = NULL;
11557 if (arg2) {
11558 if (get_user_u64(loff_in, arg2)) {
11559 goto efault;
11561 ploff_in = &loff_in;
11563 if (arg4) {
11564 if (get_user_u64(loff_out, arg4)) {
11565 goto efault;
11567 ploff_out = &loff_out;
11569 ret = get_errno(splice(arg1, ploff_in, arg3, ploff_out, arg5, arg6));
11570 if (arg2) {
11571 if (put_user_u64(loff_in, arg2)) {
11572 goto efault;
11575 if (arg4) {
11576 if (put_user_u64(loff_out, arg4)) {
11577 goto efault;
11581 break;
11582 #endif
11583 #ifdef TARGET_NR_vmsplice
11584 case TARGET_NR_vmsplice:
11586 struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1);
11587 if (vec != NULL) {
11588 ret = get_errno(vmsplice(arg1, vec, arg3, arg4));
11589 unlock_iovec(vec, arg2, arg3, 0);
11590 } else {
11591 ret = -host_to_target_errno(errno);
11594 break;
11595 #endif
11596 #endif /* CONFIG_SPLICE */
11597 #ifdef CONFIG_EVENTFD
11598 #if defined(TARGET_NR_eventfd)
11599 case TARGET_NR_eventfd:
11600 ret = get_errno(eventfd(arg1, 0));
11601 fd_trans_unregister(ret);
11602 break;
11603 #endif
11604 #if defined(TARGET_NR_eventfd2)
11605 case TARGET_NR_eventfd2:
11607 int host_flags = arg2 & (~(TARGET_O_NONBLOCK | TARGET_O_CLOEXEC));
11608 if (arg2 & TARGET_O_NONBLOCK) {
11609 host_flags |= O_NONBLOCK;
11611 if (arg2 & TARGET_O_CLOEXEC) {
11612 host_flags |= O_CLOEXEC;
11614 ret = get_errno(eventfd(arg1, host_flags));
11615 fd_trans_unregister(ret);
11616 break;
11618 #endif
11619 #endif /* CONFIG_EVENTFD */
11620 #if defined(CONFIG_FALLOCATE) && defined(TARGET_NR_fallocate)
11621 case TARGET_NR_fallocate:
11622 #if TARGET_ABI_BITS == 32
11623 ret = get_errno(fallocate(arg1, arg2, target_offset64(arg3, arg4),
11624 target_offset64(arg5, arg6)));
11625 #else
11626 ret = get_errno(fallocate(arg1, arg2, arg3, arg4));
11627 #endif
11628 break;
11629 #endif
11630 #if defined(CONFIG_SYNC_FILE_RANGE)
11631 #if defined(TARGET_NR_sync_file_range)
11632 case TARGET_NR_sync_file_range:
11633 #if TARGET_ABI_BITS == 32
11634 #if defined(TARGET_MIPS)
11635 ret = get_errno(sync_file_range(arg1, target_offset64(arg3, arg4),
11636 target_offset64(arg5, arg6), arg7));
11637 #else
11638 ret = get_errno(sync_file_range(arg1, target_offset64(arg2, arg3),
11639 target_offset64(arg4, arg5), arg6));
11640 #endif /* !TARGET_MIPS */
11641 #else
11642 ret = get_errno(sync_file_range(arg1, arg2, arg3, arg4));
11643 #endif
11644 break;
11645 #endif
11646 #if defined(TARGET_NR_sync_file_range2)
11647 case TARGET_NR_sync_file_range2:
11648 /* This is like sync_file_range but the arguments are reordered */
11649 #if TARGET_ABI_BITS == 32
11650 ret = get_errno(sync_file_range(arg1, target_offset64(arg3, arg4),
11651 target_offset64(arg5, arg6), arg2));
11652 #else
11653 ret = get_errno(sync_file_range(arg1, arg3, arg4, arg2));
11654 #endif
11655 break;
11656 #endif
11657 #endif
11658 #if defined(TARGET_NR_signalfd4)
11659 case TARGET_NR_signalfd4:
11660 ret = do_signalfd4(arg1, arg2, arg4);
11661 break;
11662 #endif
11663 #if defined(TARGET_NR_signalfd)
11664 case TARGET_NR_signalfd:
11665 ret = do_signalfd4(arg1, arg2, 0);
11666 break;
11667 #endif
11668 #if defined(CONFIG_EPOLL)
11669 #if defined(TARGET_NR_epoll_create)
11670 case TARGET_NR_epoll_create:
11671 ret = get_errno(epoll_create(arg1));
11672 break;
11673 #endif
11674 #if defined(TARGET_NR_epoll_create1) && defined(CONFIG_EPOLL_CREATE1)
11675 case TARGET_NR_epoll_create1:
11676 ret = get_errno(epoll_create1(arg1));
11677 break;
11678 #endif
11679 #if defined(TARGET_NR_epoll_ctl)
11680 case TARGET_NR_epoll_ctl:
11682 struct epoll_event ep;
11683 struct epoll_event *epp = 0;
11684 if (arg4) {
11685 struct target_epoll_event *target_ep;
11686 if (!lock_user_struct(VERIFY_READ, target_ep, arg4, 1)) {
11687 goto efault;
11689 ep.events = tswap32(target_ep->events);
11690 /* The epoll_data_t union is just opaque data to the kernel,
11691 * so we transfer all 64 bits across and need not worry what
11692 * actual data type it is.
11694 ep.data.u64 = tswap64(target_ep->data.u64);
11695 unlock_user_struct(target_ep, arg4, 0);
11696 epp = &ep;
11698 ret = get_errno(epoll_ctl(arg1, arg2, arg3, epp));
11699 break;
11701 #endif
11703 #if defined(TARGET_NR_epoll_wait) || defined(TARGET_NR_epoll_pwait)
11704 #if defined(TARGET_NR_epoll_wait)
11705 case TARGET_NR_epoll_wait:
11706 #endif
11707 #if defined(TARGET_NR_epoll_pwait)
11708 case TARGET_NR_epoll_pwait:
11709 #endif
11711 struct target_epoll_event *target_ep;
11712 struct epoll_event *ep;
11713 int epfd = arg1;
11714 int maxevents = arg3;
11715 int timeout = arg4;
11717 if (maxevents <= 0 || maxevents > TARGET_EP_MAX_EVENTS) {
11718 ret = -TARGET_EINVAL;
11719 break;
11722 target_ep = lock_user(VERIFY_WRITE, arg2,
11723 maxevents * sizeof(struct target_epoll_event), 1);
11724 if (!target_ep) {
11725 goto efault;
11728 ep = alloca(maxevents * sizeof(struct epoll_event));
11730 switch (num) {
11731 #if defined(TARGET_NR_epoll_pwait)
11732 case TARGET_NR_epoll_pwait:
11734 target_sigset_t *target_set;
11735 sigset_t _set, *set = &_set;
11737 if (arg5) {
11738 if (arg6 != sizeof(target_sigset_t)) {
11739 ret = -TARGET_EINVAL;
11740 break;
11743 target_set = lock_user(VERIFY_READ, arg5,
11744 sizeof(target_sigset_t), 1);
11745 if (!target_set) {
11746 unlock_user(target_ep, arg2, 0);
11747 goto efault;
11749 target_to_host_sigset(set, target_set);
11750 unlock_user(target_set, arg5, 0);
11751 } else {
11752 set = NULL;
11755 ret = get_errno(safe_epoll_pwait(epfd, ep, maxevents, timeout,
11756 set, SIGSET_T_SIZE));
11757 break;
11759 #endif
11760 #if defined(TARGET_NR_epoll_wait)
11761 case TARGET_NR_epoll_wait:
11762 ret = get_errno(safe_epoll_pwait(epfd, ep, maxevents, timeout,
11763 NULL, 0));
11764 break;
11765 #endif
11766 default:
11767 ret = -TARGET_ENOSYS;
11769 if (!is_error(ret)) {
11770 int i;
11771 for (i = 0; i < ret; i++) {
11772 target_ep[i].events = tswap32(ep[i].events);
11773 target_ep[i].data.u64 = tswap64(ep[i].data.u64);
11776 unlock_user(target_ep, arg2, ret * sizeof(struct target_epoll_event));
11777 break;
11779 #endif
11780 #endif
11781 #ifdef TARGET_NR_prlimit64
11782 case TARGET_NR_prlimit64:
11784 /* args: pid, resource number, ptr to new rlimit, ptr to old rlimit */
11785 struct target_rlimit64 *target_rnew, *target_rold;
11786 struct host_rlimit64 rnew, rold, *rnewp = 0;
11787 int resource = target_to_host_resource(arg2);
11788 if (arg3) {
11789 if (!lock_user_struct(VERIFY_READ, target_rnew, arg3, 1)) {
11790 goto efault;
11792 rnew.rlim_cur = tswap64(target_rnew->rlim_cur);
11793 rnew.rlim_max = tswap64(target_rnew->rlim_max);
11794 unlock_user_struct(target_rnew, arg3, 0);
11795 rnewp = &rnew;
11798 ret = get_errno(sys_prlimit64(arg1, resource, rnewp, arg4 ? &rold : 0));
11799 if (!is_error(ret) && arg4) {
11800 if (!lock_user_struct(VERIFY_WRITE, target_rold, arg4, 1)) {
11801 goto efault;
11803 target_rold->rlim_cur = tswap64(rold.rlim_cur);
11804 target_rold->rlim_max = tswap64(rold.rlim_max);
11805 unlock_user_struct(target_rold, arg4, 1);
11807 break;
11809 #endif
11810 #ifdef TARGET_NR_gethostname
11811 case TARGET_NR_gethostname:
11813 char *name = lock_user(VERIFY_WRITE, arg1, arg2, 0);
11814 if (name) {
11815 ret = get_errno(gethostname(name, arg2));
11816 unlock_user(name, arg1, arg2);
11817 } else {
11818 ret = -TARGET_EFAULT;
11820 break;
11822 #endif
11823 #ifdef TARGET_NR_atomic_cmpxchg_32
11824 case TARGET_NR_atomic_cmpxchg_32:
11826 /* should use start_exclusive from main.c */
11827 abi_ulong mem_value;
11828 if (get_user_u32(mem_value, arg6)) {
11829 target_siginfo_t info;
11830 info.si_signo = SIGSEGV;
11831 info.si_errno = 0;
11832 info.si_code = TARGET_SEGV_MAPERR;
11833 info._sifields._sigfault._addr = arg6;
11834 queue_signal((CPUArchState *)cpu_env, info.si_signo,
11835 QEMU_SI_FAULT, &info);
11836 ret = 0xdeadbeef;
11839 if (mem_value == arg2)
11840 put_user_u32(arg1, arg6);
11841 ret = mem_value;
11842 break;
11844 #endif
11845 #ifdef TARGET_NR_atomic_barrier
11846 case TARGET_NR_atomic_barrier:
11848 /* Like the kernel implementation and the qemu arm barrier, no-op this? */
11849 ret = 0;
11850 break;
11852 #endif
11854 #ifdef TARGET_NR_timer_create
11855 case TARGET_NR_timer_create:
11857 /* args: clockid_t clockid, struct sigevent *sevp, timer_t *timerid */
11859 struct sigevent host_sevp = { {0}, }, *phost_sevp = NULL;
11861 int clkid = arg1;
11862 int timer_index = next_free_host_timer();
11864 if (timer_index < 0) {
11865 ret = -TARGET_EAGAIN;
11866 } else {
11867 timer_t *phtimer = g_posix_timers + timer_index;
11869 if (arg2) {
11870 phost_sevp = &host_sevp;
11871 ret = target_to_host_sigevent(phost_sevp, arg2);
11872 if (ret != 0) {
11873 break;
11877 ret = get_errno(timer_create(clkid, phost_sevp, phtimer));
11878 if (ret) {
11879 phtimer = NULL;
11880 } else {
11881 if (put_user(TIMER_MAGIC | timer_index, arg3, target_timer_t)) {
11882 goto efault;
11886 break;
11888 #endif
11890 #ifdef TARGET_NR_timer_settime
11891 case TARGET_NR_timer_settime:
11893 /* args: timer_t timerid, int flags, const struct itimerspec *new_value,
11894 * struct itimerspec * old_value */
11895 target_timer_t timerid = get_timer_id(arg1);
11897 if (timerid < 0) {
11898 ret = timerid;
11899 } else if (arg3 == 0) {
11900 ret = -TARGET_EINVAL;
11901 } else {
11902 timer_t htimer = g_posix_timers[timerid];
11903 struct itimerspec hspec_new = {{0},}, hspec_old = {{0},};
11905 target_to_host_itimerspec(&hspec_new, arg3);
11906 ret = get_errno(
11907 timer_settime(htimer, arg2, &hspec_new, &hspec_old));
11908 host_to_target_itimerspec(arg2, &hspec_old);
11910 break;
11912 #endif
11914 #ifdef TARGET_NR_timer_gettime
11915 case TARGET_NR_timer_gettime:
11917 /* args: timer_t timerid, struct itimerspec *curr_value */
11918 target_timer_t timerid = get_timer_id(arg1);
11920 if (timerid < 0) {
11921 ret = timerid;
11922 } else if (!arg2) {
11923 ret = -TARGET_EFAULT;
11924 } else {
11925 timer_t htimer = g_posix_timers[timerid];
11926 struct itimerspec hspec;
11927 ret = get_errno(timer_gettime(htimer, &hspec));
11929 if (host_to_target_itimerspec(arg2, &hspec)) {
11930 ret = -TARGET_EFAULT;
11933 break;
11935 #endif
11937 #ifdef TARGET_NR_timer_getoverrun
11938 case TARGET_NR_timer_getoverrun:
11940 /* args: timer_t timerid */
11941 target_timer_t timerid = get_timer_id(arg1);
11943 if (timerid < 0) {
11944 ret = timerid;
11945 } else {
11946 timer_t htimer = g_posix_timers[timerid];
11947 ret = get_errno(timer_getoverrun(htimer));
11949 fd_trans_unregister(ret);
11950 break;
11952 #endif
11954 #ifdef TARGET_NR_timer_delete
11955 case TARGET_NR_timer_delete:
11957 /* args: timer_t timerid */
11958 target_timer_t timerid = get_timer_id(arg1);
11960 if (timerid < 0) {
11961 ret = timerid;
11962 } else {
11963 timer_t htimer = g_posix_timers[timerid];
11964 ret = get_errno(timer_delete(htimer));
11965 g_posix_timers[timerid] = 0;
11967 break;
11969 #endif
11971 #if defined(TARGET_NR_timerfd_create) && defined(CONFIG_TIMERFD)
11972 case TARGET_NR_timerfd_create:
11973 ret = get_errno(timerfd_create(arg1,
11974 target_to_host_bitmask(arg2, fcntl_flags_tbl)));
11975 break;
11976 #endif
11978 #if defined(TARGET_NR_timerfd_gettime) && defined(CONFIG_TIMERFD)
11979 case TARGET_NR_timerfd_gettime:
11981 struct itimerspec its_curr;
11983 ret = get_errno(timerfd_gettime(arg1, &its_curr));
11985 if (arg2 && host_to_target_itimerspec(arg2, &its_curr)) {
11986 goto efault;
11989 break;
11990 #endif
11992 #if defined(TARGET_NR_timerfd_settime) && defined(CONFIG_TIMERFD)
11993 case TARGET_NR_timerfd_settime:
11995 struct itimerspec its_new, its_old, *p_new;
11997 if (arg3) {
11998 if (target_to_host_itimerspec(&its_new, arg3)) {
11999 goto efault;
12001 p_new = &its_new;
12002 } else {
12003 p_new = NULL;
12006 ret = get_errno(timerfd_settime(arg1, arg2, p_new, &its_old));
12008 if (arg4 && host_to_target_itimerspec(arg4, &its_old)) {
12009 goto efault;
12012 break;
12013 #endif
12015 #if defined(TARGET_NR_ioprio_get) && defined(__NR_ioprio_get)
12016 case TARGET_NR_ioprio_get:
12017 ret = get_errno(ioprio_get(arg1, arg2));
12018 break;
12019 #endif
12021 #if defined(TARGET_NR_ioprio_set) && defined(__NR_ioprio_set)
12022 case TARGET_NR_ioprio_set:
12023 ret = get_errno(ioprio_set(arg1, arg2, arg3));
12024 break;
12025 #endif
12027 #if defined(TARGET_NR_setns) && defined(CONFIG_SETNS)
12028 case TARGET_NR_setns:
12029 ret = get_errno(setns(arg1, arg2));
12030 break;
12031 #endif
12032 #if defined(TARGET_NR_unshare) && defined(CONFIG_SETNS)
12033 case TARGET_NR_unshare:
12034 ret = get_errno(unshare(arg1));
12035 break;
12036 #endif
12038 default:
12039 unimplemented:
12040 gemu_log("qemu: Unsupported syscall: %d\n", num);
12041 #if defined(TARGET_NR_setxattr) || defined(TARGET_NR_get_thread_area) || defined(TARGET_NR_getdomainname) || defined(TARGET_NR_set_robust_list)
12042 unimplemented_nowarn:
12043 #endif
12044 ret = -TARGET_ENOSYS;
12045 break;
12047 fail:
12048 #ifdef DEBUG
12049 gemu_log(" = " TARGET_ABI_FMT_ld "\n", ret);
12050 #endif
12051 if(do_strace)
12052 print_syscall_ret(num, ret);
12053 trace_guest_user_syscall_ret(cpu, num, ret);
12054 return ret;
12055 efault:
12056 ret = -TARGET_EFAULT;
12057 goto fail;