linux-user: Fix msgrcv() and msgsnd() syscalls support
[qemu/ar7.git] / linux-user / syscall.c
blob36ca921a7ee51de87e725c3fd6f6d5b718504333
1 /*
2 * Linux syscalls
4 * Copyright (c) 2003 Fabrice Bellard
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, see <http://www.gnu.org/licenses/>.
19 #define _ATFILE_SOURCE
20 #include "qemu/osdep.h"
21 #include "qemu/cutils.h"
22 #include "qemu/path.h"
23 #include <elf.h>
24 #include <endian.h>
25 #include <grp.h>
26 #include <sys/ipc.h>
27 #include <sys/msg.h>
28 #include <sys/wait.h>
29 #include <sys/mount.h>
30 #include <sys/file.h>
31 #include <sys/fsuid.h>
32 #include <sys/personality.h>
33 #include <sys/prctl.h>
34 #include <sys/resource.h>
35 #include <sys/swap.h>
36 #include <linux/capability.h>
37 #include <sched.h>
38 #include <sys/timex.h>
39 #ifdef __ia64__
40 int __clone2(int (*fn)(void *), void *child_stack_base,
41 size_t stack_size, int flags, void *arg, ...);
42 #endif
43 #include <sys/socket.h>
44 #include <sys/un.h>
45 #include <sys/uio.h>
46 #include <poll.h>
47 #include <sys/times.h>
48 #include <sys/shm.h>
49 #include <sys/sem.h>
50 #include <sys/statfs.h>
51 #include <utime.h>
52 #include <sys/sysinfo.h>
53 #include <sys/signalfd.h>
54 //#include <sys/user.h>
55 #include <netinet/ip.h>
56 #include <netinet/tcp.h>
57 #include <linux/wireless.h>
58 #include <linux/icmp.h>
59 #include "qemu-common.h"
60 #ifdef CONFIG_TIMERFD
61 #include <sys/timerfd.h>
62 #endif
63 #ifdef TARGET_GPROF
64 #include <sys/gmon.h>
65 #endif
66 #ifdef CONFIG_EVENTFD
67 #include <sys/eventfd.h>
68 #endif
69 #ifdef CONFIG_EPOLL
70 #include <sys/epoll.h>
71 #endif
72 #ifdef CONFIG_ATTR
73 #include "qemu/xattr.h"
74 #endif
75 #ifdef CONFIG_SENDFILE
76 #include <sys/sendfile.h>
77 #endif
79 #define termios host_termios
80 #define winsize host_winsize
81 #define termio host_termio
82 #define sgttyb host_sgttyb /* same as target */
83 #define tchars host_tchars /* same as target */
84 #define ltchars host_ltchars /* same as target */
86 #include <linux/termios.h>
87 #include <linux/unistd.h>
88 #include <linux/cdrom.h>
89 #include <linux/hdreg.h>
90 #include <linux/soundcard.h>
91 #include <linux/kd.h>
92 #include <linux/mtio.h>
93 #include <linux/fs.h>
94 #if defined(CONFIG_FIEMAP)
95 #include <linux/fiemap.h>
96 #endif
97 #include <linux/fb.h>
98 #include <linux/vt.h>
99 #include <linux/dm-ioctl.h>
100 #include <linux/reboot.h>
101 #include <linux/route.h>
102 #include <linux/filter.h>
103 #include <linux/blkpg.h>
104 #include <netpacket/packet.h>
105 #include <linux/netlink.h>
106 #ifdef CONFIG_RTNETLINK
107 #include <linux/rtnetlink.h>
108 #include <linux/if_bridge.h>
109 #endif
110 #include <linux/audit.h>
111 #include "linux_loop.h"
112 #include "uname.h"
114 #include "qemu.h"
116 #ifndef CLONE_IO
117 #define CLONE_IO 0x80000000 /* Clone io context */
118 #endif
120 /* We can't directly call the host clone syscall, because this will
121 * badly confuse libc (breaking mutexes, for example). So we must
122 * divide clone flags into:
123 * * flag combinations that look like pthread_create()
124 * * flag combinations that look like fork()
125 * * flags we can implement within QEMU itself
126 * * flags we can't support and will return an error for
128 /* For thread creation, all these flags must be present; for
129 * fork, none must be present.
131 #define CLONE_THREAD_FLAGS \
132 (CLONE_VM | CLONE_FS | CLONE_FILES | \
133 CLONE_SIGHAND | CLONE_THREAD | CLONE_SYSVSEM)
135 /* These flags are ignored:
136 * CLONE_DETACHED is now ignored by the kernel;
137 * CLONE_IO is just an optimisation hint to the I/O scheduler
139 #define CLONE_IGNORED_FLAGS \
140 (CLONE_DETACHED | CLONE_IO)
142 /* Flags for fork which we can implement within QEMU itself */
143 #define CLONE_OPTIONAL_FORK_FLAGS \
144 (CLONE_SETTLS | CLONE_PARENT_SETTID | \
145 CLONE_CHILD_CLEARTID | CLONE_CHILD_SETTID)
147 /* Flags for thread creation which we can implement within QEMU itself */
148 #define CLONE_OPTIONAL_THREAD_FLAGS \
149 (CLONE_SETTLS | CLONE_PARENT_SETTID | \
150 CLONE_CHILD_CLEARTID | CLONE_CHILD_SETTID | CLONE_PARENT)
152 #define CLONE_INVALID_FORK_FLAGS \
153 (~(CSIGNAL | CLONE_OPTIONAL_FORK_FLAGS | CLONE_IGNORED_FLAGS))
155 #define CLONE_INVALID_THREAD_FLAGS \
156 (~(CSIGNAL | CLONE_THREAD_FLAGS | CLONE_OPTIONAL_THREAD_FLAGS | \
157 CLONE_IGNORED_FLAGS))
159 /* CLONE_VFORK is special cased early in do_fork(). The other flag bits
160 * have almost all been allocated. We cannot support any of
161 * CLONE_NEWNS, CLONE_NEWCGROUP, CLONE_NEWUTS, CLONE_NEWIPC,
162 * CLONE_NEWUSER, CLONE_NEWPID, CLONE_NEWNET, CLONE_PTRACE, CLONE_UNTRACED.
163 * The checks against the invalid thread masks above will catch these.
164 * (The one remaining unallocated bit is 0x1000 which used to be CLONE_PID.)
167 //#define DEBUG
168 /* Define DEBUG_ERESTARTSYS to force every syscall to be restarted
169 * once. This exercises the codepaths for restart.
171 //#define DEBUG_ERESTARTSYS
173 //#include <linux/msdos_fs.h>
174 #define VFAT_IOCTL_READDIR_BOTH _IOR('r', 1, struct linux_dirent [2])
175 #define VFAT_IOCTL_READDIR_SHORT _IOR('r', 2, struct linux_dirent [2])
177 #undef _syscall0
178 #undef _syscall1
179 #undef _syscall2
180 #undef _syscall3
181 #undef _syscall4
182 #undef _syscall5
183 #undef _syscall6
185 #define _syscall0(type,name) \
186 static type name (void) \
188 return syscall(__NR_##name); \
191 #define _syscall1(type,name,type1,arg1) \
192 static type name (type1 arg1) \
194 return syscall(__NR_##name, arg1); \
197 #define _syscall2(type,name,type1,arg1,type2,arg2) \
198 static type name (type1 arg1,type2 arg2) \
200 return syscall(__NR_##name, arg1, arg2); \
203 #define _syscall3(type,name,type1,arg1,type2,arg2,type3,arg3) \
204 static type name (type1 arg1,type2 arg2,type3 arg3) \
206 return syscall(__NR_##name, arg1, arg2, arg3); \
209 #define _syscall4(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4) \
210 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4) \
212 return syscall(__NR_##name, arg1, arg2, arg3, arg4); \
215 #define _syscall5(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4, \
216 type5,arg5) \
217 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5) \
219 return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5); \
223 #define _syscall6(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4, \
224 type5,arg5,type6,arg6) \
225 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5, \
226 type6 arg6) \
228 return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6); \
232 #define __NR_sys_uname __NR_uname
233 #define __NR_sys_getcwd1 __NR_getcwd
234 #define __NR_sys_getdents __NR_getdents
235 #define __NR_sys_getdents64 __NR_getdents64
236 #define __NR_sys_getpriority __NR_getpriority
237 #define __NR_sys_rt_sigqueueinfo __NR_rt_sigqueueinfo
238 #define __NR_sys_syslog __NR_syslog
239 #define __NR_sys_futex __NR_futex
240 #define __NR_sys_inotify_init __NR_inotify_init
241 #define __NR_sys_inotify_add_watch __NR_inotify_add_watch
242 #define __NR_sys_inotify_rm_watch __NR_inotify_rm_watch
244 #if defined(__alpha__) || defined (__ia64__) || defined(__x86_64__) || \
245 defined(__s390x__)
246 #define __NR__llseek __NR_lseek
247 #endif
249 /* Newer kernel ports have llseek() instead of _llseek() */
250 #if defined(TARGET_NR_llseek) && !defined(TARGET_NR__llseek)
251 #define TARGET_NR__llseek TARGET_NR_llseek
252 #endif
254 #ifdef __NR_gettid
255 _syscall0(int, gettid)
256 #else
257 /* This is a replacement for the host gettid() and must return a host
258 errno. */
259 static int gettid(void) {
260 return -ENOSYS;
262 #endif
263 #if defined(TARGET_NR_getdents) && defined(__NR_getdents)
264 _syscall3(int, sys_getdents, uint, fd, struct linux_dirent *, dirp, uint, count);
265 #endif
266 #if !defined(__NR_getdents) || \
267 (defined(TARGET_NR_getdents64) && defined(__NR_getdents64))
268 _syscall3(int, sys_getdents64, uint, fd, struct linux_dirent64 *, dirp, uint, count);
269 #endif
270 #if defined(TARGET_NR__llseek) && defined(__NR_llseek)
271 _syscall5(int, _llseek, uint, fd, ulong, hi, ulong, lo,
272 loff_t *, res, uint, wh);
273 #endif
274 _syscall3(int,sys_rt_sigqueueinfo,int,pid,int,sig,siginfo_t *,uinfo)
275 _syscall3(int,sys_syslog,int,type,char*,bufp,int,len)
276 #ifdef __NR_exit_group
277 _syscall1(int,exit_group,int,error_code)
278 #endif
279 #if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address)
280 _syscall1(int,set_tid_address,int *,tidptr)
281 #endif
282 #if defined(TARGET_NR_futex) && defined(__NR_futex)
283 _syscall6(int,sys_futex,int *,uaddr,int,op,int,val,
284 const struct timespec *,timeout,int *,uaddr2,int,val3)
285 #endif
286 #define __NR_sys_sched_getaffinity __NR_sched_getaffinity
287 _syscall3(int, sys_sched_getaffinity, pid_t, pid, unsigned int, len,
288 unsigned long *, user_mask_ptr);
289 #define __NR_sys_sched_setaffinity __NR_sched_setaffinity
290 _syscall3(int, sys_sched_setaffinity, pid_t, pid, unsigned int, len,
291 unsigned long *, user_mask_ptr);
292 _syscall4(int, reboot, int, magic1, int, magic2, unsigned int, cmd,
293 void *, arg);
294 _syscall2(int, capget, struct __user_cap_header_struct *, header,
295 struct __user_cap_data_struct *, data);
296 _syscall2(int, capset, struct __user_cap_header_struct *, header,
297 struct __user_cap_data_struct *, data);
298 #if defined(TARGET_NR_ioprio_get) && defined(__NR_ioprio_get)
299 _syscall2(int, ioprio_get, int, which, int, who)
300 #endif
301 #if defined(TARGET_NR_ioprio_set) && defined(__NR_ioprio_set)
302 _syscall3(int, ioprio_set, int, which, int, who, int, ioprio)
303 #endif
304 #if defined(TARGET_NR_getrandom) && defined(__NR_getrandom)
305 _syscall3(int, getrandom, void *, buf, size_t, buflen, unsigned int, flags)
306 #endif
308 static bitmask_transtbl fcntl_flags_tbl[] = {
309 { TARGET_O_ACCMODE, TARGET_O_WRONLY, O_ACCMODE, O_WRONLY, },
310 { TARGET_O_ACCMODE, TARGET_O_RDWR, O_ACCMODE, O_RDWR, },
311 { TARGET_O_CREAT, TARGET_O_CREAT, O_CREAT, O_CREAT, },
312 { TARGET_O_EXCL, TARGET_O_EXCL, O_EXCL, O_EXCL, },
313 { TARGET_O_NOCTTY, TARGET_O_NOCTTY, O_NOCTTY, O_NOCTTY, },
314 { TARGET_O_TRUNC, TARGET_O_TRUNC, O_TRUNC, O_TRUNC, },
315 { TARGET_O_APPEND, TARGET_O_APPEND, O_APPEND, O_APPEND, },
316 { TARGET_O_NONBLOCK, TARGET_O_NONBLOCK, O_NONBLOCK, O_NONBLOCK, },
317 { TARGET_O_SYNC, TARGET_O_DSYNC, O_SYNC, O_DSYNC, },
318 { TARGET_O_SYNC, TARGET_O_SYNC, O_SYNC, O_SYNC, },
319 { TARGET_FASYNC, TARGET_FASYNC, FASYNC, FASYNC, },
320 { TARGET_O_DIRECTORY, TARGET_O_DIRECTORY, O_DIRECTORY, O_DIRECTORY, },
321 { TARGET_O_NOFOLLOW, TARGET_O_NOFOLLOW, O_NOFOLLOW, O_NOFOLLOW, },
322 #if defined(O_DIRECT)
323 { TARGET_O_DIRECT, TARGET_O_DIRECT, O_DIRECT, O_DIRECT, },
324 #endif
325 #if defined(O_NOATIME)
326 { TARGET_O_NOATIME, TARGET_O_NOATIME, O_NOATIME, O_NOATIME },
327 #endif
328 #if defined(O_CLOEXEC)
329 { TARGET_O_CLOEXEC, TARGET_O_CLOEXEC, O_CLOEXEC, O_CLOEXEC },
330 #endif
331 #if defined(O_PATH)
332 { TARGET_O_PATH, TARGET_O_PATH, O_PATH, O_PATH },
333 #endif
334 /* Don't terminate the list prematurely on 64-bit host+guest. */
335 #if TARGET_O_LARGEFILE != 0 || O_LARGEFILE != 0
336 { TARGET_O_LARGEFILE, TARGET_O_LARGEFILE, O_LARGEFILE, O_LARGEFILE, },
337 #endif
338 { 0, 0, 0, 0 }
341 enum {
342 QEMU_IFLA_BR_UNSPEC,
343 QEMU_IFLA_BR_FORWARD_DELAY,
344 QEMU_IFLA_BR_HELLO_TIME,
345 QEMU_IFLA_BR_MAX_AGE,
346 QEMU_IFLA_BR_AGEING_TIME,
347 QEMU_IFLA_BR_STP_STATE,
348 QEMU_IFLA_BR_PRIORITY,
349 QEMU_IFLA_BR_VLAN_FILTERING,
350 QEMU_IFLA_BR_VLAN_PROTOCOL,
351 QEMU_IFLA_BR_GROUP_FWD_MASK,
352 QEMU_IFLA_BR_ROOT_ID,
353 QEMU_IFLA_BR_BRIDGE_ID,
354 QEMU_IFLA_BR_ROOT_PORT,
355 QEMU_IFLA_BR_ROOT_PATH_COST,
356 QEMU_IFLA_BR_TOPOLOGY_CHANGE,
357 QEMU_IFLA_BR_TOPOLOGY_CHANGE_DETECTED,
358 QEMU_IFLA_BR_HELLO_TIMER,
359 QEMU_IFLA_BR_TCN_TIMER,
360 QEMU_IFLA_BR_TOPOLOGY_CHANGE_TIMER,
361 QEMU_IFLA_BR_GC_TIMER,
362 QEMU_IFLA_BR_GROUP_ADDR,
363 QEMU_IFLA_BR_FDB_FLUSH,
364 QEMU_IFLA_BR_MCAST_ROUTER,
365 QEMU_IFLA_BR_MCAST_SNOOPING,
366 QEMU_IFLA_BR_MCAST_QUERY_USE_IFADDR,
367 QEMU_IFLA_BR_MCAST_QUERIER,
368 QEMU_IFLA_BR_MCAST_HASH_ELASTICITY,
369 QEMU_IFLA_BR_MCAST_HASH_MAX,
370 QEMU_IFLA_BR_MCAST_LAST_MEMBER_CNT,
371 QEMU_IFLA_BR_MCAST_STARTUP_QUERY_CNT,
372 QEMU_IFLA_BR_MCAST_LAST_MEMBER_INTVL,
373 QEMU_IFLA_BR_MCAST_MEMBERSHIP_INTVL,
374 QEMU_IFLA_BR_MCAST_QUERIER_INTVL,
375 QEMU_IFLA_BR_MCAST_QUERY_INTVL,
376 QEMU_IFLA_BR_MCAST_QUERY_RESPONSE_INTVL,
377 QEMU_IFLA_BR_MCAST_STARTUP_QUERY_INTVL,
378 QEMU_IFLA_BR_NF_CALL_IPTABLES,
379 QEMU_IFLA_BR_NF_CALL_IP6TABLES,
380 QEMU_IFLA_BR_NF_CALL_ARPTABLES,
381 QEMU_IFLA_BR_VLAN_DEFAULT_PVID,
382 QEMU_IFLA_BR_PAD,
383 QEMU_IFLA_BR_VLAN_STATS_ENABLED,
384 QEMU_IFLA_BR_MCAST_STATS_ENABLED,
385 QEMU___IFLA_BR_MAX,
388 enum {
389 QEMU_IFLA_UNSPEC,
390 QEMU_IFLA_ADDRESS,
391 QEMU_IFLA_BROADCAST,
392 QEMU_IFLA_IFNAME,
393 QEMU_IFLA_MTU,
394 QEMU_IFLA_LINK,
395 QEMU_IFLA_QDISC,
396 QEMU_IFLA_STATS,
397 QEMU_IFLA_COST,
398 QEMU_IFLA_PRIORITY,
399 QEMU_IFLA_MASTER,
400 QEMU_IFLA_WIRELESS,
401 QEMU_IFLA_PROTINFO,
402 QEMU_IFLA_TXQLEN,
403 QEMU_IFLA_MAP,
404 QEMU_IFLA_WEIGHT,
405 QEMU_IFLA_OPERSTATE,
406 QEMU_IFLA_LINKMODE,
407 QEMU_IFLA_LINKINFO,
408 QEMU_IFLA_NET_NS_PID,
409 QEMU_IFLA_IFALIAS,
410 QEMU_IFLA_NUM_VF,
411 QEMU_IFLA_VFINFO_LIST,
412 QEMU_IFLA_STATS64,
413 QEMU_IFLA_VF_PORTS,
414 QEMU_IFLA_PORT_SELF,
415 QEMU_IFLA_AF_SPEC,
416 QEMU_IFLA_GROUP,
417 QEMU_IFLA_NET_NS_FD,
418 QEMU_IFLA_EXT_MASK,
419 QEMU_IFLA_PROMISCUITY,
420 QEMU_IFLA_NUM_TX_QUEUES,
421 QEMU_IFLA_NUM_RX_QUEUES,
422 QEMU_IFLA_CARRIER,
423 QEMU_IFLA_PHYS_PORT_ID,
424 QEMU_IFLA_CARRIER_CHANGES,
425 QEMU_IFLA_PHYS_SWITCH_ID,
426 QEMU_IFLA_LINK_NETNSID,
427 QEMU_IFLA_PHYS_PORT_NAME,
428 QEMU_IFLA_PROTO_DOWN,
429 QEMU_IFLA_GSO_MAX_SEGS,
430 QEMU_IFLA_GSO_MAX_SIZE,
431 QEMU_IFLA_PAD,
432 QEMU_IFLA_XDP,
433 QEMU___IFLA_MAX
436 enum {
437 QEMU_IFLA_BRPORT_UNSPEC,
438 QEMU_IFLA_BRPORT_STATE,
439 QEMU_IFLA_BRPORT_PRIORITY,
440 QEMU_IFLA_BRPORT_COST,
441 QEMU_IFLA_BRPORT_MODE,
442 QEMU_IFLA_BRPORT_GUARD,
443 QEMU_IFLA_BRPORT_PROTECT,
444 QEMU_IFLA_BRPORT_FAST_LEAVE,
445 QEMU_IFLA_BRPORT_LEARNING,
446 QEMU_IFLA_BRPORT_UNICAST_FLOOD,
447 QEMU_IFLA_BRPORT_PROXYARP,
448 QEMU_IFLA_BRPORT_LEARNING_SYNC,
449 QEMU_IFLA_BRPORT_PROXYARP_WIFI,
450 QEMU_IFLA_BRPORT_ROOT_ID,
451 QEMU_IFLA_BRPORT_BRIDGE_ID,
452 QEMU_IFLA_BRPORT_DESIGNATED_PORT,
453 QEMU_IFLA_BRPORT_DESIGNATED_COST,
454 QEMU_IFLA_BRPORT_ID,
455 QEMU_IFLA_BRPORT_NO,
456 QEMU_IFLA_BRPORT_TOPOLOGY_CHANGE_ACK,
457 QEMU_IFLA_BRPORT_CONFIG_PENDING,
458 QEMU_IFLA_BRPORT_MESSAGE_AGE_TIMER,
459 QEMU_IFLA_BRPORT_FORWARD_DELAY_TIMER,
460 QEMU_IFLA_BRPORT_HOLD_TIMER,
461 QEMU_IFLA_BRPORT_FLUSH,
462 QEMU_IFLA_BRPORT_MULTICAST_ROUTER,
463 QEMU_IFLA_BRPORT_PAD,
464 QEMU___IFLA_BRPORT_MAX
467 enum {
468 QEMU_IFLA_INFO_UNSPEC,
469 QEMU_IFLA_INFO_KIND,
470 QEMU_IFLA_INFO_DATA,
471 QEMU_IFLA_INFO_XSTATS,
472 QEMU_IFLA_INFO_SLAVE_KIND,
473 QEMU_IFLA_INFO_SLAVE_DATA,
474 QEMU___IFLA_INFO_MAX,
477 enum {
478 QEMU_IFLA_INET_UNSPEC,
479 QEMU_IFLA_INET_CONF,
480 QEMU___IFLA_INET_MAX,
483 enum {
484 QEMU_IFLA_INET6_UNSPEC,
485 QEMU_IFLA_INET6_FLAGS,
486 QEMU_IFLA_INET6_CONF,
487 QEMU_IFLA_INET6_STATS,
488 QEMU_IFLA_INET6_MCAST,
489 QEMU_IFLA_INET6_CACHEINFO,
490 QEMU_IFLA_INET6_ICMP6STATS,
491 QEMU_IFLA_INET6_TOKEN,
492 QEMU_IFLA_INET6_ADDR_GEN_MODE,
493 QEMU___IFLA_INET6_MAX
496 typedef abi_long (*TargetFdDataFunc)(void *, size_t);
497 typedef abi_long (*TargetFdAddrFunc)(void *, abi_ulong, socklen_t);
498 typedef struct TargetFdTrans {
499 TargetFdDataFunc host_to_target_data;
500 TargetFdDataFunc target_to_host_data;
501 TargetFdAddrFunc target_to_host_addr;
502 } TargetFdTrans;
504 static TargetFdTrans **target_fd_trans;
506 static unsigned int target_fd_max;
508 static TargetFdDataFunc fd_trans_target_to_host_data(int fd)
510 if (fd >= 0 && fd < target_fd_max && target_fd_trans[fd]) {
511 return target_fd_trans[fd]->target_to_host_data;
513 return NULL;
516 static TargetFdDataFunc fd_trans_host_to_target_data(int fd)
518 if (fd >= 0 && fd < target_fd_max && target_fd_trans[fd]) {
519 return target_fd_trans[fd]->host_to_target_data;
521 return NULL;
524 static TargetFdAddrFunc fd_trans_target_to_host_addr(int fd)
526 if (fd >= 0 && fd < target_fd_max && target_fd_trans[fd]) {
527 return target_fd_trans[fd]->target_to_host_addr;
529 return NULL;
532 static void fd_trans_register(int fd, TargetFdTrans *trans)
534 unsigned int oldmax;
536 if (fd >= target_fd_max) {
537 oldmax = target_fd_max;
538 target_fd_max = ((fd >> 6) + 1) << 6; /* by slice of 64 entries */
539 target_fd_trans = g_renew(TargetFdTrans *,
540 target_fd_trans, target_fd_max);
541 memset((void *)(target_fd_trans + oldmax), 0,
542 (target_fd_max - oldmax) * sizeof(TargetFdTrans *));
544 target_fd_trans[fd] = trans;
547 static void fd_trans_unregister(int fd)
549 if (fd >= 0 && fd < target_fd_max) {
550 target_fd_trans[fd] = NULL;
554 static void fd_trans_dup(int oldfd, int newfd)
556 fd_trans_unregister(newfd);
557 if (oldfd < target_fd_max && target_fd_trans[oldfd]) {
558 fd_trans_register(newfd, target_fd_trans[oldfd]);
562 static int sys_getcwd1(char *buf, size_t size)
564 if (getcwd(buf, size) == NULL) {
565 /* getcwd() sets errno */
566 return (-1);
568 return strlen(buf)+1;
571 #ifdef TARGET_NR_utimensat
572 #if defined(__NR_utimensat)
573 #define __NR_sys_utimensat __NR_utimensat
574 _syscall4(int,sys_utimensat,int,dirfd,const char *,pathname,
575 const struct timespec *,tsp,int,flags)
576 #else
577 static int sys_utimensat(int dirfd, const char *pathname,
578 const struct timespec times[2], int flags)
580 errno = ENOSYS;
581 return -1;
583 #endif
584 #endif /* TARGET_NR_utimensat */
586 #ifdef CONFIG_INOTIFY
587 #include <sys/inotify.h>
589 #if defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init)
590 static int sys_inotify_init(void)
592 return (inotify_init());
594 #endif
595 #if defined(TARGET_NR_inotify_add_watch) && defined(__NR_inotify_add_watch)
596 static int sys_inotify_add_watch(int fd,const char *pathname, int32_t mask)
598 return (inotify_add_watch(fd, pathname, mask));
600 #endif
601 #if defined(TARGET_NR_inotify_rm_watch) && defined(__NR_inotify_rm_watch)
602 static int sys_inotify_rm_watch(int fd, int32_t wd)
604 return (inotify_rm_watch(fd, wd));
606 #endif
607 #ifdef CONFIG_INOTIFY1
608 #if defined(TARGET_NR_inotify_init1) && defined(__NR_inotify_init1)
609 static int sys_inotify_init1(int flags)
611 return (inotify_init1(flags));
613 #endif
614 #endif
615 #else
616 /* Userspace can usually survive runtime without inotify */
617 #undef TARGET_NR_inotify_init
618 #undef TARGET_NR_inotify_init1
619 #undef TARGET_NR_inotify_add_watch
620 #undef TARGET_NR_inotify_rm_watch
621 #endif /* CONFIG_INOTIFY */
623 #if defined(TARGET_NR_prlimit64)
624 #ifndef __NR_prlimit64
625 # define __NR_prlimit64 -1
626 #endif
627 #define __NR_sys_prlimit64 __NR_prlimit64
628 /* The glibc rlimit structure may not be that used by the underlying syscall */
629 struct host_rlimit64 {
630 uint64_t rlim_cur;
631 uint64_t rlim_max;
633 _syscall4(int, sys_prlimit64, pid_t, pid, int, resource,
634 const struct host_rlimit64 *, new_limit,
635 struct host_rlimit64 *, old_limit)
636 #endif
639 #if defined(TARGET_NR_timer_create)
640 /* Maxiumum of 32 active POSIX timers allowed at any one time. */
641 static timer_t g_posix_timers[32] = { 0, } ;
643 static inline int next_free_host_timer(void)
645 int k ;
646 /* FIXME: Does finding the next free slot require a lock? */
647 for (k = 0; k < ARRAY_SIZE(g_posix_timers); k++) {
648 if (g_posix_timers[k] == 0) {
649 g_posix_timers[k] = (timer_t) 1;
650 return k;
653 return -1;
655 #endif
657 /* ARM EABI and MIPS expect 64bit types aligned even on pairs or registers */
658 #ifdef TARGET_ARM
659 static inline int regpairs_aligned(void *cpu_env) {
660 return ((((CPUARMState *)cpu_env)->eabi) == 1) ;
662 #elif defined(TARGET_MIPS) && (TARGET_ABI_BITS == 32)
663 static inline int regpairs_aligned(void *cpu_env) { return 1; }
664 #elif defined(TARGET_PPC) && !defined(TARGET_PPC64)
665 /* SysV AVI for PPC32 expects 64bit parameters to be passed on odd/even pairs
666 * of registers which translates to the same as ARM/MIPS, because we start with
667 * r3 as arg1 */
668 static inline int regpairs_aligned(void *cpu_env) { return 1; }
669 #else
670 static inline int regpairs_aligned(void *cpu_env) { return 0; }
671 #endif
673 #define ERRNO_TABLE_SIZE 1200
675 /* target_to_host_errno_table[] is initialized from
676 * host_to_target_errno_table[] in syscall_init(). */
677 static uint16_t target_to_host_errno_table[ERRNO_TABLE_SIZE] = {
681 * This list is the union of errno values overridden in asm-<arch>/errno.h
682 * minus the errnos that are not actually generic to all archs.
684 static uint16_t host_to_target_errno_table[ERRNO_TABLE_SIZE] = {
685 [EAGAIN] = TARGET_EAGAIN,
686 [EIDRM] = TARGET_EIDRM,
687 [ECHRNG] = TARGET_ECHRNG,
688 [EL2NSYNC] = TARGET_EL2NSYNC,
689 [EL3HLT] = TARGET_EL3HLT,
690 [EL3RST] = TARGET_EL3RST,
691 [ELNRNG] = TARGET_ELNRNG,
692 [EUNATCH] = TARGET_EUNATCH,
693 [ENOCSI] = TARGET_ENOCSI,
694 [EL2HLT] = TARGET_EL2HLT,
695 [EDEADLK] = TARGET_EDEADLK,
696 [ENOLCK] = TARGET_ENOLCK,
697 [EBADE] = TARGET_EBADE,
698 [EBADR] = TARGET_EBADR,
699 [EXFULL] = TARGET_EXFULL,
700 [ENOANO] = TARGET_ENOANO,
701 [EBADRQC] = TARGET_EBADRQC,
702 [EBADSLT] = TARGET_EBADSLT,
703 [EBFONT] = TARGET_EBFONT,
704 [ENOSTR] = TARGET_ENOSTR,
705 [ENODATA] = TARGET_ENODATA,
706 [ETIME] = TARGET_ETIME,
707 [ENOSR] = TARGET_ENOSR,
708 [ENONET] = TARGET_ENONET,
709 [ENOPKG] = TARGET_ENOPKG,
710 [EREMOTE] = TARGET_EREMOTE,
711 [ENOLINK] = TARGET_ENOLINK,
712 [EADV] = TARGET_EADV,
713 [ESRMNT] = TARGET_ESRMNT,
714 [ECOMM] = TARGET_ECOMM,
715 [EPROTO] = TARGET_EPROTO,
716 [EDOTDOT] = TARGET_EDOTDOT,
717 [EMULTIHOP] = TARGET_EMULTIHOP,
718 [EBADMSG] = TARGET_EBADMSG,
719 [ENAMETOOLONG] = TARGET_ENAMETOOLONG,
720 [EOVERFLOW] = TARGET_EOVERFLOW,
721 [ENOTUNIQ] = TARGET_ENOTUNIQ,
722 [EBADFD] = TARGET_EBADFD,
723 [EREMCHG] = TARGET_EREMCHG,
724 [ELIBACC] = TARGET_ELIBACC,
725 [ELIBBAD] = TARGET_ELIBBAD,
726 [ELIBSCN] = TARGET_ELIBSCN,
727 [ELIBMAX] = TARGET_ELIBMAX,
728 [ELIBEXEC] = TARGET_ELIBEXEC,
729 [EILSEQ] = TARGET_EILSEQ,
730 [ENOSYS] = TARGET_ENOSYS,
731 [ELOOP] = TARGET_ELOOP,
732 [ERESTART] = TARGET_ERESTART,
733 [ESTRPIPE] = TARGET_ESTRPIPE,
734 [ENOTEMPTY] = TARGET_ENOTEMPTY,
735 [EUSERS] = TARGET_EUSERS,
736 [ENOTSOCK] = TARGET_ENOTSOCK,
737 [EDESTADDRREQ] = TARGET_EDESTADDRREQ,
738 [EMSGSIZE] = TARGET_EMSGSIZE,
739 [EPROTOTYPE] = TARGET_EPROTOTYPE,
740 [ENOPROTOOPT] = TARGET_ENOPROTOOPT,
741 [EPROTONOSUPPORT] = TARGET_EPROTONOSUPPORT,
742 [ESOCKTNOSUPPORT] = TARGET_ESOCKTNOSUPPORT,
743 [EOPNOTSUPP] = TARGET_EOPNOTSUPP,
744 [EPFNOSUPPORT] = TARGET_EPFNOSUPPORT,
745 [EAFNOSUPPORT] = TARGET_EAFNOSUPPORT,
746 [EADDRINUSE] = TARGET_EADDRINUSE,
747 [EADDRNOTAVAIL] = TARGET_EADDRNOTAVAIL,
748 [ENETDOWN] = TARGET_ENETDOWN,
749 [ENETUNREACH] = TARGET_ENETUNREACH,
750 [ENETRESET] = TARGET_ENETRESET,
751 [ECONNABORTED] = TARGET_ECONNABORTED,
752 [ECONNRESET] = TARGET_ECONNRESET,
753 [ENOBUFS] = TARGET_ENOBUFS,
754 [EISCONN] = TARGET_EISCONN,
755 [ENOTCONN] = TARGET_ENOTCONN,
756 [EUCLEAN] = TARGET_EUCLEAN,
757 [ENOTNAM] = TARGET_ENOTNAM,
758 [ENAVAIL] = TARGET_ENAVAIL,
759 [EISNAM] = TARGET_EISNAM,
760 [EREMOTEIO] = TARGET_EREMOTEIO,
761 [EDQUOT] = TARGET_EDQUOT,
762 [ESHUTDOWN] = TARGET_ESHUTDOWN,
763 [ETOOMANYREFS] = TARGET_ETOOMANYREFS,
764 [ETIMEDOUT] = TARGET_ETIMEDOUT,
765 [ECONNREFUSED] = TARGET_ECONNREFUSED,
766 [EHOSTDOWN] = TARGET_EHOSTDOWN,
767 [EHOSTUNREACH] = TARGET_EHOSTUNREACH,
768 [EALREADY] = TARGET_EALREADY,
769 [EINPROGRESS] = TARGET_EINPROGRESS,
770 [ESTALE] = TARGET_ESTALE,
771 [ECANCELED] = TARGET_ECANCELED,
772 [ENOMEDIUM] = TARGET_ENOMEDIUM,
773 [EMEDIUMTYPE] = TARGET_EMEDIUMTYPE,
774 #ifdef ENOKEY
775 [ENOKEY] = TARGET_ENOKEY,
776 #endif
777 #ifdef EKEYEXPIRED
778 [EKEYEXPIRED] = TARGET_EKEYEXPIRED,
779 #endif
780 #ifdef EKEYREVOKED
781 [EKEYREVOKED] = TARGET_EKEYREVOKED,
782 #endif
783 #ifdef EKEYREJECTED
784 [EKEYREJECTED] = TARGET_EKEYREJECTED,
785 #endif
786 #ifdef EOWNERDEAD
787 [EOWNERDEAD] = TARGET_EOWNERDEAD,
788 #endif
789 #ifdef ENOTRECOVERABLE
790 [ENOTRECOVERABLE] = TARGET_ENOTRECOVERABLE,
791 #endif
792 #ifdef ENOMSG
793 [ENOMSG] = TARGET_ENOMSG,
794 #endif
797 static inline int host_to_target_errno(int err)
799 if (err >= 0 && err < ERRNO_TABLE_SIZE &&
800 host_to_target_errno_table[err]) {
801 return host_to_target_errno_table[err];
803 return err;
806 static inline int target_to_host_errno(int err)
808 if (err >= 0 && err < ERRNO_TABLE_SIZE &&
809 target_to_host_errno_table[err]) {
810 return target_to_host_errno_table[err];
812 return err;
815 static inline abi_long get_errno(abi_long ret)
817 if (ret == -1)
818 return -host_to_target_errno(errno);
819 else
820 return ret;
823 static inline int is_error(abi_long ret)
825 return (abi_ulong)ret >= (abi_ulong)(-4096);
828 const char *target_strerror(int err)
830 if (err == TARGET_ERESTARTSYS) {
831 return "To be restarted";
833 if (err == TARGET_QEMU_ESIGRETURN) {
834 return "Successful exit from sigreturn";
837 if ((err >= ERRNO_TABLE_SIZE) || (err < 0)) {
838 return NULL;
840 return strerror(target_to_host_errno(err));
843 #define safe_syscall0(type, name) \
844 static type safe_##name(void) \
846 return safe_syscall(__NR_##name); \
849 #define safe_syscall1(type, name, type1, arg1) \
850 static type safe_##name(type1 arg1) \
852 return safe_syscall(__NR_##name, arg1); \
855 #define safe_syscall2(type, name, type1, arg1, type2, arg2) \
856 static type safe_##name(type1 arg1, type2 arg2) \
858 return safe_syscall(__NR_##name, arg1, arg2); \
861 #define safe_syscall3(type, name, type1, arg1, type2, arg2, type3, arg3) \
862 static type safe_##name(type1 arg1, type2 arg2, type3 arg3) \
864 return safe_syscall(__NR_##name, arg1, arg2, arg3); \
867 #define safe_syscall4(type, name, type1, arg1, type2, arg2, type3, arg3, \
868 type4, arg4) \
869 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4) \
871 return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4); \
874 #define safe_syscall5(type, name, type1, arg1, type2, arg2, type3, arg3, \
875 type4, arg4, type5, arg5) \
876 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, \
877 type5 arg5) \
879 return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5); \
882 #define safe_syscall6(type, name, type1, arg1, type2, arg2, type3, arg3, \
883 type4, arg4, type5, arg5, type6, arg6) \
884 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, \
885 type5 arg5, type6 arg6) \
887 return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6); \
890 safe_syscall3(ssize_t, read, int, fd, void *, buff, size_t, count)
891 safe_syscall3(ssize_t, write, int, fd, const void *, buff, size_t, count)
892 safe_syscall4(int, openat, int, dirfd, const char *, pathname, \
893 int, flags, mode_t, mode)
894 safe_syscall4(pid_t, wait4, pid_t, pid, int *, status, int, options, \
895 struct rusage *, rusage)
896 safe_syscall5(int, waitid, idtype_t, idtype, id_t, id, siginfo_t *, infop, \
897 int, options, struct rusage *, rusage)
898 safe_syscall3(int, execve, const char *, filename, char **, argv, char **, envp)
899 safe_syscall6(int, pselect6, int, nfds, fd_set *, readfds, fd_set *, writefds, \
900 fd_set *, exceptfds, struct timespec *, timeout, void *, sig)
901 safe_syscall5(int, ppoll, struct pollfd *, ufds, unsigned int, nfds,
902 struct timespec *, tsp, const sigset_t *, sigmask,
903 size_t, sigsetsize)
904 safe_syscall6(int, epoll_pwait, int, epfd, struct epoll_event *, events,
905 int, maxevents, int, timeout, const sigset_t *, sigmask,
906 size_t, sigsetsize)
907 safe_syscall6(int,futex,int *,uaddr,int,op,int,val, \
908 const struct timespec *,timeout,int *,uaddr2,int,val3)
909 safe_syscall2(int, rt_sigsuspend, sigset_t *, newset, size_t, sigsetsize)
910 safe_syscall2(int, kill, pid_t, pid, int, sig)
911 safe_syscall2(int, tkill, int, tid, int, sig)
912 safe_syscall3(int, tgkill, int, tgid, int, pid, int, sig)
913 safe_syscall3(ssize_t, readv, int, fd, const struct iovec *, iov, int, iovcnt)
914 safe_syscall3(ssize_t, writev, int, fd, const struct iovec *, iov, int, iovcnt)
915 safe_syscall3(int, connect, int, fd, const struct sockaddr *, addr,
916 socklen_t, addrlen)
917 safe_syscall6(ssize_t, sendto, int, fd, const void *, buf, size_t, len,
918 int, flags, const struct sockaddr *, addr, socklen_t, addrlen)
919 safe_syscall6(ssize_t, recvfrom, int, fd, void *, buf, size_t, len,
920 int, flags, struct sockaddr *, addr, socklen_t *, addrlen)
921 safe_syscall3(ssize_t, sendmsg, int, fd, const struct msghdr *, msg, int, flags)
922 safe_syscall3(ssize_t, recvmsg, int, fd, struct msghdr *, msg, int, flags)
923 safe_syscall2(int, flock, int, fd, int, operation)
924 safe_syscall4(int, rt_sigtimedwait, const sigset_t *, these, siginfo_t *, uinfo,
925 const struct timespec *, uts, size_t, sigsetsize)
926 safe_syscall4(int, accept4, int, fd, struct sockaddr *, addr, socklen_t *, len,
927 int, flags)
928 safe_syscall2(int, nanosleep, const struct timespec *, req,
929 struct timespec *, rem)
930 #ifdef TARGET_NR_clock_nanosleep
931 safe_syscall4(int, clock_nanosleep, const clockid_t, clock, int, flags,
932 const struct timespec *, req, struct timespec *, rem)
933 #endif
934 #ifdef __NR_msgsnd
935 safe_syscall4(int, msgsnd, int, msgid, const void *, msgp, size_t, sz,
936 int, flags)
937 safe_syscall5(int, msgrcv, int, msgid, void *, msgp, size_t, sz,
938 long, msgtype, int, flags)
939 safe_syscall4(int, semtimedop, int, semid, struct sembuf *, tsops,
940 unsigned, nsops, const struct timespec *, timeout)
941 #else
942 /* This host kernel architecture uses a single ipc syscall; fake up
943 * wrappers for the sub-operations to hide this implementation detail.
944 * Annoyingly we can't include linux/ipc.h to get the constant definitions
945 * for the call parameter because some structs in there conflict with the
946 * sys/ipc.h ones. So we just define them here, and rely on them being
947 * the same for all host architectures.
949 #define Q_SEMTIMEDOP 4
950 #define Q_MSGSND 11
951 #define Q_MSGRCV 12
952 #define Q_IPCCALL(VERSION, OP) ((VERSION) << 16 | (OP))
954 safe_syscall6(int, ipc, int, call, long, first, long, second, long, third,
955 void *, ptr, long, fifth)
956 static int safe_msgsnd(int msgid, const void *msgp, size_t sz, int flags)
958 return safe_ipc(Q_IPCCALL(0, Q_MSGSND), msgid, sz, flags, (void *)msgp, 0);
960 static int safe_msgrcv(int msgid, void *msgp, size_t sz, long type, int flags)
962 return safe_ipc(Q_IPCCALL(1, Q_MSGRCV), msgid, sz, flags, msgp, type);
964 static int safe_semtimedop(int semid, struct sembuf *tsops, unsigned nsops,
965 const struct timespec *timeout)
967 return safe_ipc(Q_IPCCALL(0, Q_SEMTIMEDOP), semid, nsops, 0, tsops,
968 (long)timeout);
970 #endif
971 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
972 safe_syscall5(int, mq_timedsend, int, mqdes, const char *, msg_ptr,
973 size_t, len, unsigned, prio, const struct timespec *, timeout)
974 safe_syscall5(int, mq_timedreceive, int, mqdes, char *, msg_ptr,
975 size_t, len, unsigned *, prio, const struct timespec *, timeout)
976 #endif
977 /* We do ioctl like this rather than via safe_syscall3 to preserve the
978 * "third argument might be integer or pointer or not present" behaviour of
979 * the libc function.
981 #define safe_ioctl(...) safe_syscall(__NR_ioctl, __VA_ARGS__)
982 /* Similarly for fcntl. Note that callers must always:
983 * pass the F_GETLK64 etc constants rather than the unsuffixed F_GETLK
984 * use the flock64 struct rather than unsuffixed flock
985 * This will then work and use a 64-bit offset for both 32-bit and 64-bit hosts.
987 #ifdef __NR_fcntl64
988 #define safe_fcntl(...) safe_syscall(__NR_fcntl64, __VA_ARGS__)
989 #else
990 #define safe_fcntl(...) safe_syscall(__NR_fcntl, __VA_ARGS__)
991 #endif
993 static inline int host_to_target_sock_type(int host_type)
995 int target_type;
997 switch (host_type & 0xf /* SOCK_TYPE_MASK */) {
998 case SOCK_DGRAM:
999 target_type = TARGET_SOCK_DGRAM;
1000 break;
1001 case SOCK_STREAM:
1002 target_type = TARGET_SOCK_STREAM;
1003 break;
1004 default:
1005 target_type = host_type & 0xf /* SOCK_TYPE_MASK */;
1006 break;
1009 #if defined(SOCK_CLOEXEC)
1010 if (host_type & SOCK_CLOEXEC) {
1011 target_type |= TARGET_SOCK_CLOEXEC;
1013 #endif
1015 #if defined(SOCK_NONBLOCK)
1016 if (host_type & SOCK_NONBLOCK) {
1017 target_type |= TARGET_SOCK_NONBLOCK;
1019 #endif
1021 return target_type;
1024 static abi_ulong target_brk;
1025 static abi_ulong target_original_brk;
1026 static abi_ulong brk_page;
1028 void target_set_brk(abi_ulong new_brk)
1030 target_original_brk = target_brk = HOST_PAGE_ALIGN(new_brk);
1031 brk_page = HOST_PAGE_ALIGN(target_brk);
1034 //#define DEBUGF_BRK(message, args...) do { fprintf(stderr, (message), ## args); } while (0)
1035 #define DEBUGF_BRK(message, args...)
1037 /* do_brk() must return target values and target errnos. */
1038 abi_long do_brk(abi_ulong new_brk)
1040 abi_long mapped_addr;
1041 abi_ulong new_alloc_size;
1043 DEBUGF_BRK("do_brk(" TARGET_ABI_FMT_lx ") -> ", new_brk);
1045 if (!new_brk) {
1046 DEBUGF_BRK(TARGET_ABI_FMT_lx " (!new_brk)\n", target_brk);
1047 return target_brk;
1049 if (new_brk < target_original_brk) {
1050 DEBUGF_BRK(TARGET_ABI_FMT_lx " (new_brk < target_original_brk)\n",
1051 target_brk);
1052 return target_brk;
1055 /* If the new brk is less than the highest page reserved to the
1056 * target heap allocation, set it and we're almost done... */
1057 if (new_brk <= brk_page) {
1058 /* Heap contents are initialized to zero, as for anonymous
1059 * mapped pages. */
1060 if (new_brk > target_brk) {
1061 memset(g2h(target_brk), 0, new_brk - target_brk);
1063 target_brk = new_brk;
1064 DEBUGF_BRK(TARGET_ABI_FMT_lx " (new_brk <= brk_page)\n", target_brk);
1065 return target_brk;
1068 /* We need to allocate more memory after the brk... Note that
1069 * we don't use MAP_FIXED because that will map over the top of
1070 * any existing mapping (like the one with the host libc or qemu
1071 * itself); instead we treat "mapped but at wrong address" as
1072 * a failure and unmap again.
1074 new_alloc_size = HOST_PAGE_ALIGN(new_brk - brk_page);
1075 mapped_addr = get_errno(target_mmap(brk_page, new_alloc_size,
1076 PROT_READ|PROT_WRITE,
1077 MAP_ANON|MAP_PRIVATE, 0, 0));
1079 if (mapped_addr == brk_page) {
1080 /* Heap contents are initialized to zero, as for anonymous
1081 * mapped pages. Technically the new pages are already
1082 * initialized to zero since they *are* anonymous mapped
1083 * pages, however we have to take care with the contents that
1084 * come from the remaining part of the previous page: it may
1085 * contains garbage data due to a previous heap usage (grown
1086 * then shrunken). */
1087 memset(g2h(target_brk), 0, brk_page - target_brk);
1089 target_brk = new_brk;
1090 brk_page = HOST_PAGE_ALIGN(target_brk);
1091 DEBUGF_BRK(TARGET_ABI_FMT_lx " (mapped_addr == brk_page)\n",
1092 target_brk);
1093 return target_brk;
1094 } else if (mapped_addr != -1) {
1095 /* Mapped but at wrong address, meaning there wasn't actually
1096 * enough space for this brk.
1098 target_munmap(mapped_addr, new_alloc_size);
1099 mapped_addr = -1;
1100 DEBUGF_BRK(TARGET_ABI_FMT_lx " (mapped_addr != -1)\n", target_brk);
1102 else {
1103 DEBUGF_BRK(TARGET_ABI_FMT_lx " (otherwise)\n", target_brk);
1106 #if defined(TARGET_ALPHA)
1107 /* We (partially) emulate OSF/1 on Alpha, which requires we
1108 return a proper errno, not an unchanged brk value. */
1109 return -TARGET_ENOMEM;
1110 #endif
1111 /* For everything else, return the previous break. */
1112 return target_brk;
1115 static inline abi_long copy_from_user_fdset(fd_set *fds,
1116 abi_ulong target_fds_addr,
1117 int n)
1119 int i, nw, j, k;
1120 abi_ulong b, *target_fds;
1122 nw = DIV_ROUND_UP(n, TARGET_ABI_BITS);
1123 if (!(target_fds = lock_user(VERIFY_READ,
1124 target_fds_addr,
1125 sizeof(abi_ulong) * nw,
1126 1)))
1127 return -TARGET_EFAULT;
1129 FD_ZERO(fds);
1130 k = 0;
1131 for (i = 0; i < nw; i++) {
1132 /* grab the abi_ulong */
1133 __get_user(b, &target_fds[i]);
1134 for (j = 0; j < TARGET_ABI_BITS; j++) {
1135 /* check the bit inside the abi_ulong */
1136 if ((b >> j) & 1)
1137 FD_SET(k, fds);
1138 k++;
1142 unlock_user(target_fds, target_fds_addr, 0);
1144 return 0;
1147 static inline abi_ulong copy_from_user_fdset_ptr(fd_set *fds, fd_set **fds_ptr,
1148 abi_ulong target_fds_addr,
1149 int n)
1151 if (target_fds_addr) {
1152 if (copy_from_user_fdset(fds, target_fds_addr, n))
1153 return -TARGET_EFAULT;
1154 *fds_ptr = fds;
1155 } else {
1156 *fds_ptr = NULL;
1158 return 0;
1161 static inline abi_long copy_to_user_fdset(abi_ulong target_fds_addr,
1162 const fd_set *fds,
1163 int n)
1165 int i, nw, j, k;
1166 abi_long v;
1167 abi_ulong *target_fds;
1169 nw = DIV_ROUND_UP(n, TARGET_ABI_BITS);
1170 if (!(target_fds = lock_user(VERIFY_WRITE,
1171 target_fds_addr,
1172 sizeof(abi_ulong) * nw,
1173 0)))
1174 return -TARGET_EFAULT;
1176 k = 0;
1177 for (i = 0; i < nw; i++) {
1178 v = 0;
1179 for (j = 0; j < TARGET_ABI_BITS; j++) {
1180 v |= ((abi_ulong)(FD_ISSET(k, fds) != 0) << j);
1181 k++;
1183 __put_user(v, &target_fds[i]);
1186 unlock_user(target_fds, target_fds_addr, sizeof(abi_ulong) * nw);
1188 return 0;
1191 #if defined(__alpha__)
1192 #define HOST_HZ 1024
1193 #else
1194 #define HOST_HZ 100
1195 #endif
1197 static inline abi_long host_to_target_clock_t(long ticks)
1199 #if HOST_HZ == TARGET_HZ
1200 return ticks;
1201 #else
1202 return ((int64_t)ticks * TARGET_HZ) / HOST_HZ;
1203 #endif
1206 static inline abi_long host_to_target_rusage(abi_ulong target_addr,
1207 const struct rusage *rusage)
1209 struct target_rusage *target_rusage;
1211 if (!lock_user_struct(VERIFY_WRITE, target_rusage, target_addr, 0))
1212 return -TARGET_EFAULT;
1213 target_rusage->ru_utime.tv_sec = tswapal(rusage->ru_utime.tv_sec);
1214 target_rusage->ru_utime.tv_usec = tswapal(rusage->ru_utime.tv_usec);
1215 target_rusage->ru_stime.tv_sec = tswapal(rusage->ru_stime.tv_sec);
1216 target_rusage->ru_stime.tv_usec = tswapal(rusage->ru_stime.tv_usec);
1217 target_rusage->ru_maxrss = tswapal(rusage->ru_maxrss);
1218 target_rusage->ru_ixrss = tswapal(rusage->ru_ixrss);
1219 target_rusage->ru_idrss = tswapal(rusage->ru_idrss);
1220 target_rusage->ru_isrss = tswapal(rusage->ru_isrss);
1221 target_rusage->ru_minflt = tswapal(rusage->ru_minflt);
1222 target_rusage->ru_majflt = tswapal(rusage->ru_majflt);
1223 target_rusage->ru_nswap = tswapal(rusage->ru_nswap);
1224 target_rusage->ru_inblock = tswapal(rusage->ru_inblock);
1225 target_rusage->ru_oublock = tswapal(rusage->ru_oublock);
1226 target_rusage->ru_msgsnd = tswapal(rusage->ru_msgsnd);
1227 target_rusage->ru_msgrcv = tswapal(rusage->ru_msgrcv);
1228 target_rusage->ru_nsignals = tswapal(rusage->ru_nsignals);
1229 target_rusage->ru_nvcsw = tswapal(rusage->ru_nvcsw);
1230 target_rusage->ru_nivcsw = tswapal(rusage->ru_nivcsw);
1231 unlock_user_struct(target_rusage, target_addr, 1);
1233 return 0;
1236 static inline rlim_t target_to_host_rlim(abi_ulong target_rlim)
1238 abi_ulong target_rlim_swap;
1239 rlim_t result;
1241 target_rlim_swap = tswapal(target_rlim);
1242 if (target_rlim_swap == TARGET_RLIM_INFINITY)
1243 return RLIM_INFINITY;
1245 result = target_rlim_swap;
1246 if (target_rlim_swap != (rlim_t)result)
1247 return RLIM_INFINITY;
1249 return result;
1252 static inline abi_ulong host_to_target_rlim(rlim_t rlim)
1254 abi_ulong target_rlim_swap;
1255 abi_ulong result;
1257 if (rlim == RLIM_INFINITY || rlim != (abi_long)rlim)
1258 target_rlim_swap = TARGET_RLIM_INFINITY;
1259 else
1260 target_rlim_swap = rlim;
1261 result = tswapal(target_rlim_swap);
1263 return result;
1266 static inline int target_to_host_resource(int code)
1268 switch (code) {
1269 case TARGET_RLIMIT_AS:
1270 return RLIMIT_AS;
1271 case TARGET_RLIMIT_CORE:
1272 return RLIMIT_CORE;
1273 case TARGET_RLIMIT_CPU:
1274 return RLIMIT_CPU;
1275 case TARGET_RLIMIT_DATA:
1276 return RLIMIT_DATA;
1277 case TARGET_RLIMIT_FSIZE:
1278 return RLIMIT_FSIZE;
1279 case TARGET_RLIMIT_LOCKS:
1280 return RLIMIT_LOCKS;
1281 case TARGET_RLIMIT_MEMLOCK:
1282 return RLIMIT_MEMLOCK;
1283 case TARGET_RLIMIT_MSGQUEUE:
1284 return RLIMIT_MSGQUEUE;
1285 case TARGET_RLIMIT_NICE:
1286 return RLIMIT_NICE;
1287 case TARGET_RLIMIT_NOFILE:
1288 return RLIMIT_NOFILE;
1289 case TARGET_RLIMIT_NPROC:
1290 return RLIMIT_NPROC;
1291 case TARGET_RLIMIT_RSS:
1292 return RLIMIT_RSS;
1293 case TARGET_RLIMIT_RTPRIO:
1294 return RLIMIT_RTPRIO;
1295 case TARGET_RLIMIT_SIGPENDING:
1296 return RLIMIT_SIGPENDING;
1297 case TARGET_RLIMIT_STACK:
1298 return RLIMIT_STACK;
1299 default:
1300 return code;
1304 static inline abi_long copy_from_user_timeval(struct timeval *tv,
1305 abi_ulong target_tv_addr)
1307 struct target_timeval *target_tv;
1309 if (!lock_user_struct(VERIFY_READ, target_tv, target_tv_addr, 1))
1310 return -TARGET_EFAULT;
1312 __get_user(tv->tv_sec, &target_tv->tv_sec);
1313 __get_user(tv->tv_usec, &target_tv->tv_usec);
1315 unlock_user_struct(target_tv, target_tv_addr, 0);
1317 return 0;
1320 static inline abi_long copy_to_user_timeval(abi_ulong target_tv_addr,
1321 const struct timeval *tv)
1323 struct target_timeval *target_tv;
1325 if (!lock_user_struct(VERIFY_WRITE, target_tv, target_tv_addr, 0))
1326 return -TARGET_EFAULT;
1328 __put_user(tv->tv_sec, &target_tv->tv_sec);
1329 __put_user(tv->tv_usec, &target_tv->tv_usec);
1331 unlock_user_struct(target_tv, target_tv_addr, 1);
1333 return 0;
1336 static inline abi_long copy_from_user_timezone(struct timezone *tz,
1337 abi_ulong target_tz_addr)
1339 struct target_timezone *target_tz;
1341 if (!lock_user_struct(VERIFY_READ, target_tz, target_tz_addr, 1)) {
1342 return -TARGET_EFAULT;
1345 __get_user(tz->tz_minuteswest, &target_tz->tz_minuteswest);
1346 __get_user(tz->tz_dsttime, &target_tz->tz_dsttime);
1348 unlock_user_struct(target_tz, target_tz_addr, 0);
1350 return 0;
1353 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
1354 #include <mqueue.h>
1356 static inline abi_long copy_from_user_mq_attr(struct mq_attr *attr,
1357 abi_ulong target_mq_attr_addr)
1359 struct target_mq_attr *target_mq_attr;
1361 if (!lock_user_struct(VERIFY_READ, target_mq_attr,
1362 target_mq_attr_addr, 1))
1363 return -TARGET_EFAULT;
1365 __get_user(attr->mq_flags, &target_mq_attr->mq_flags);
1366 __get_user(attr->mq_maxmsg, &target_mq_attr->mq_maxmsg);
1367 __get_user(attr->mq_msgsize, &target_mq_attr->mq_msgsize);
1368 __get_user(attr->mq_curmsgs, &target_mq_attr->mq_curmsgs);
1370 unlock_user_struct(target_mq_attr, target_mq_attr_addr, 0);
1372 return 0;
1375 static inline abi_long copy_to_user_mq_attr(abi_ulong target_mq_attr_addr,
1376 const struct mq_attr *attr)
1378 struct target_mq_attr *target_mq_attr;
1380 if (!lock_user_struct(VERIFY_WRITE, target_mq_attr,
1381 target_mq_attr_addr, 0))
1382 return -TARGET_EFAULT;
1384 __put_user(attr->mq_flags, &target_mq_attr->mq_flags);
1385 __put_user(attr->mq_maxmsg, &target_mq_attr->mq_maxmsg);
1386 __put_user(attr->mq_msgsize, &target_mq_attr->mq_msgsize);
1387 __put_user(attr->mq_curmsgs, &target_mq_attr->mq_curmsgs);
1389 unlock_user_struct(target_mq_attr, target_mq_attr_addr, 1);
1391 return 0;
1393 #endif
1395 #if defined(TARGET_NR_select) || defined(TARGET_NR__newselect)
1396 /* do_select() must return target values and target errnos. */
1397 static abi_long do_select(int n,
1398 abi_ulong rfd_addr, abi_ulong wfd_addr,
1399 abi_ulong efd_addr, abi_ulong target_tv_addr)
1401 fd_set rfds, wfds, efds;
1402 fd_set *rfds_ptr, *wfds_ptr, *efds_ptr;
1403 struct timeval tv;
1404 struct timespec ts, *ts_ptr;
1405 abi_long ret;
1407 ret = copy_from_user_fdset_ptr(&rfds, &rfds_ptr, rfd_addr, n);
1408 if (ret) {
1409 return ret;
1411 ret = copy_from_user_fdset_ptr(&wfds, &wfds_ptr, wfd_addr, n);
1412 if (ret) {
1413 return ret;
1415 ret = copy_from_user_fdset_ptr(&efds, &efds_ptr, efd_addr, n);
1416 if (ret) {
1417 return ret;
1420 if (target_tv_addr) {
1421 if (copy_from_user_timeval(&tv, target_tv_addr))
1422 return -TARGET_EFAULT;
1423 ts.tv_sec = tv.tv_sec;
1424 ts.tv_nsec = tv.tv_usec * 1000;
1425 ts_ptr = &ts;
1426 } else {
1427 ts_ptr = NULL;
1430 ret = get_errno(safe_pselect6(n, rfds_ptr, wfds_ptr, efds_ptr,
1431 ts_ptr, NULL));
1433 if (!is_error(ret)) {
1434 if (rfd_addr && copy_to_user_fdset(rfd_addr, &rfds, n))
1435 return -TARGET_EFAULT;
1436 if (wfd_addr && copy_to_user_fdset(wfd_addr, &wfds, n))
1437 return -TARGET_EFAULT;
1438 if (efd_addr && copy_to_user_fdset(efd_addr, &efds, n))
1439 return -TARGET_EFAULT;
1441 if (target_tv_addr) {
1442 tv.tv_sec = ts.tv_sec;
1443 tv.tv_usec = ts.tv_nsec / 1000;
1444 if (copy_to_user_timeval(target_tv_addr, &tv)) {
1445 return -TARGET_EFAULT;
1450 return ret;
1453 #if defined(TARGET_WANT_OLD_SYS_SELECT)
1454 static abi_long do_old_select(abi_ulong arg1)
1456 struct target_sel_arg_struct *sel;
1457 abi_ulong inp, outp, exp, tvp;
1458 long nsel;
1460 if (!lock_user_struct(VERIFY_READ, sel, arg1, 1)) {
1461 return -TARGET_EFAULT;
1464 nsel = tswapal(sel->n);
1465 inp = tswapal(sel->inp);
1466 outp = tswapal(sel->outp);
1467 exp = tswapal(sel->exp);
1468 tvp = tswapal(sel->tvp);
1470 unlock_user_struct(sel, arg1, 0);
1472 return do_select(nsel, inp, outp, exp, tvp);
1474 #endif
1475 #endif
1477 static abi_long do_pipe2(int host_pipe[], int flags)
1479 #ifdef CONFIG_PIPE2
1480 return pipe2(host_pipe, flags);
1481 #else
1482 return -ENOSYS;
1483 #endif
1486 static abi_long do_pipe(void *cpu_env, abi_ulong pipedes,
1487 int flags, int is_pipe2)
1489 int host_pipe[2];
1490 abi_long ret;
1491 ret = flags ? do_pipe2(host_pipe, flags) : pipe(host_pipe);
1493 if (is_error(ret))
1494 return get_errno(ret);
1496 /* Several targets have special calling conventions for the original
1497 pipe syscall, but didn't replicate this into the pipe2 syscall. */
1498 if (!is_pipe2) {
1499 #if defined(TARGET_ALPHA)
1500 ((CPUAlphaState *)cpu_env)->ir[IR_A4] = host_pipe[1];
1501 return host_pipe[0];
1502 #elif defined(TARGET_MIPS)
1503 ((CPUMIPSState*)cpu_env)->active_tc.gpr[3] = host_pipe[1];
1504 return host_pipe[0];
1505 #elif defined(TARGET_SH4)
1506 ((CPUSH4State*)cpu_env)->gregs[1] = host_pipe[1];
1507 return host_pipe[0];
1508 #elif defined(TARGET_SPARC)
1509 ((CPUSPARCState*)cpu_env)->regwptr[1] = host_pipe[1];
1510 return host_pipe[0];
1511 #endif
1514 if (put_user_s32(host_pipe[0], pipedes)
1515 || put_user_s32(host_pipe[1], pipedes + sizeof(host_pipe[0])))
1516 return -TARGET_EFAULT;
1517 return get_errno(ret);
1520 static inline abi_long target_to_host_ip_mreq(struct ip_mreqn *mreqn,
1521 abi_ulong target_addr,
1522 socklen_t len)
1524 struct target_ip_mreqn *target_smreqn;
1526 target_smreqn = lock_user(VERIFY_READ, target_addr, len, 1);
1527 if (!target_smreqn)
1528 return -TARGET_EFAULT;
1529 mreqn->imr_multiaddr.s_addr = target_smreqn->imr_multiaddr.s_addr;
1530 mreqn->imr_address.s_addr = target_smreqn->imr_address.s_addr;
1531 if (len == sizeof(struct target_ip_mreqn))
1532 mreqn->imr_ifindex = tswapal(target_smreqn->imr_ifindex);
1533 unlock_user(target_smreqn, target_addr, 0);
1535 return 0;
1538 static inline abi_long target_to_host_sockaddr(int fd, struct sockaddr *addr,
1539 abi_ulong target_addr,
1540 socklen_t len)
1542 const socklen_t unix_maxlen = sizeof (struct sockaddr_un);
1543 sa_family_t sa_family;
1544 struct target_sockaddr *target_saddr;
1546 if (fd_trans_target_to_host_addr(fd)) {
1547 return fd_trans_target_to_host_addr(fd)(addr, target_addr, len);
1550 target_saddr = lock_user(VERIFY_READ, target_addr, len, 1);
1551 if (!target_saddr)
1552 return -TARGET_EFAULT;
1554 sa_family = tswap16(target_saddr->sa_family);
1556 /* Oops. The caller might send a incomplete sun_path; sun_path
1557 * must be terminated by \0 (see the manual page), but
1558 * unfortunately it is quite common to specify sockaddr_un
1559 * length as "strlen(x->sun_path)" while it should be
1560 * "strlen(...) + 1". We'll fix that here if needed.
1561 * Linux kernel has a similar feature.
1564 if (sa_family == AF_UNIX) {
1565 if (len < unix_maxlen && len > 0) {
1566 char *cp = (char*)target_saddr;
1568 if ( cp[len-1] && !cp[len] )
1569 len++;
1571 if (len > unix_maxlen)
1572 len = unix_maxlen;
1575 memcpy(addr, target_saddr, len);
1576 addr->sa_family = sa_family;
1577 if (sa_family == AF_NETLINK) {
1578 struct sockaddr_nl *nladdr;
1580 nladdr = (struct sockaddr_nl *)addr;
1581 nladdr->nl_pid = tswap32(nladdr->nl_pid);
1582 nladdr->nl_groups = tswap32(nladdr->nl_groups);
1583 } else if (sa_family == AF_PACKET) {
1584 struct target_sockaddr_ll *lladdr;
1586 lladdr = (struct target_sockaddr_ll *)addr;
1587 lladdr->sll_ifindex = tswap32(lladdr->sll_ifindex);
1588 lladdr->sll_hatype = tswap16(lladdr->sll_hatype);
1590 unlock_user(target_saddr, target_addr, 0);
1592 return 0;
1595 static inline abi_long host_to_target_sockaddr(abi_ulong target_addr,
1596 struct sockaddr *addr,
1597 socklen_t len)
1599 struct target_sockaddr *target_saddr;
1601 if (len == 0) {
1602 return 0;
1605 target_saddr = lock_user(VERIFY_WRITE, target_addr, len, 0);
1606 if (!target_saddr)
1607 return -TARGET_EFAULT;
1608 memcpy(target_saddr, addr, len);
1609 if (len >= offsetof(struct target_sockaddr, sa_family) +
1610 sizeof(target_saddr->sa_family)) {
1611 target_saddr->sa_family = tswap16(addr->sa_family);
1613 if (addr->sa_family == AF_NETLINK && len >= sizeof(struct sockaddr_nl)) {
1614 struct sockaddr_nl *target_nl = (struct sockaddr_nl *)target_saddr;
1615 target_nl->nl_pid = tswap32(target_nl->nl_pid);
1616 target_nl->nl_groups = tswap32(target_nl->nl_groups);
1617 } else if (addr->sa_family == AF_PACKET) {
1618 struct sockaddr_ll *target_ll = (struct sockaddr_ll *)target_saddr;
1619 target_ll->sll_ifindex = tswap32(target_ll->sll_ifindex);
1620 target_ll->sll_hatype = tswap16(target_ll->sll_hatype);
1622 unlock_user(target_saddr, target_addr, len);
1624 return 0;
1627 static inline abi_long target_to_host_cmsg(struct msghdr *msgh,
1628 struct target_msghdr *target_msgh)
1630 struct cmsghdr *cmsg = CMSG_FIRSTHDR(msgh);
1631 abi_long msg_controllen;
1632 abi_ulong target_cmsg_addr;
1633 struct target_cmsghdr *target_cmsg, *target_cmsg_start;
1634 socklen_t space = 0;
1636 msg_controllen = tswapal(target_msgh->msg_controllen);
1637 if (msg_controllen < sizeof (struct target_cmsghdr))
1638 goto the_end;
1639 target_cmsg_addr = tswapal(target_msgh->msg_control);
1640 target_cmsg = lock_user(VERIFY_READ, target_cmsg_addr, msg_controllen, 1);
1641 target_cmsg_start = target_cmsg;
1642 if (!target_cmsg)
1643 return -TARGET_EFAULT;
1645 while (cmsg && target_cmsg) {
1646 void *data = CMSG_DATA(cmsg);
1647 void *target_data = TARGET_CMSG_DATA(target_cmsg);
1649 int len = tswapal(target_cmsg->cmsg_len)
1650 - TARGET_CMSG_ALIGN(sizeof (struct target_cmsghdr));
1652 space += CMSG_SPACE(len);
1653 if (space > msgh->msg_controllen) {
1654 space -= CMSG_SPACE(len);
1655 /* This is a QEMU bug, since we allocated the payload
1656 * area ourselves (unlike overflow in host-to-target
1657 * conversion, which is just the guest giving us a buffer
1658 * that's too small). It can't happen for the payload types
1659 * we currently support; if it becomes an issue in future
1660 * we would need to improve our allocation strategy to
1661 * something more intelligent than "twice the size of the
1662 * target buffer we're reading from".
1664 gemu_log("Host cmsg overflow\n");
1665 break;
1668 if (tswap32(target_cmsg->cmsg_level) == TARGET_SOL_SOCKET) {
1669 cmsg->cmsg_level = SOL_SOCKET;
1670 } else {
1671 cmsg->cmsg_level = tswap32(target_cmsg->cmsg_level);
1673 cmsg->cmsg_type = tswap32(target_cmsg->cmsg_type);
1674 cmsg->cmsg_len = CMSG_LEN(len);
1676 if (cmsg->cmsg_level == SOL_SOCKET && cmsg->cmsg_type == SCM_RIGHTS) {
1677 int *fd = (int *)data;
1678 int *target_fd = (int *)target_data;
1679 int i, numfds = len / sizeof(int);
1681 for (i = 0; i < numfds; i++) {
1682 __get_user(fd[i], target_fd + i);
1684 } else if (cmsg->cmsg_level == SOL_SOCKET
1685 && cmsg->cmsg_type == SCM_CREDENTIALS) {
1686 struct ucred *cred = (struct ucred *)data;
1687 struct target_ucred *target_cred =
1688 (struct target_ucred *)target_data;
1690 __get_user(cred->pid, &target_cred->pid);
1691 __get_user(cred->uid, &target_cred->uid);
1692 __get_user(cred->gid, &target_cred->gid);
1693 } else {
1694 gemu_log("Unsupported ancillary data: %d/%d\n",
1695 cmsg->cmsg_level, cmsg->cmsg_type);
1696 memcpy(data, target_data, len);
1699 cmsg = CMSG_NXTHDR(msgh, cmsg);
1700 target_cmsg = TARGET_CMSG_NXTHDR(target_msgh, target_cmsg,
1701 target_cmsg_start);
1703 unlock_user(target_cmsg, target_cmsg_addr, 0);
1704 the_end:
1705 msgh->msg_controllen = space;
1706 return 0;
1709 static inline abi_long host_to_target_cmsg(struct target_msghdr *target_msgh,
1710 struct msghdr *msgh)
1712 struct cmsghdr *cmsg = CMSG_FIRSTHDR(msgh);
1713 abi_long msg_controllen;
1714 abi_ulong target_cmsg_addr;
1715 struct target_cmsghdr *target_cmsg, *target_cmsg_start;
1716 socklen_t space = 0;
1718 msg_controllen = tswapal(target_msgh->msg_controllen);
1719 if (msg_controllen < sizeof (struct target_cmsghdr))
1720 goto the_end;
1721 target_cmsg_addr = tswapal(target_msgh->msg_control);
1722 target_cmsg = lock_user(VERIFY_WRITE, target_cmsg_addr, msg_controllen, 0);
1723 target_cmsg_start = target_cmsg;
1724 if (!target_cmsg)
1725 return -TARGET_EFAULT;
1727 while (cmsg && target_cmsg) {
1728 void *data = CMSG_DATA(cmsg);
1729 void *target_data = TARGET_CMSG_DATA(target_cmsg);
1731 int len = cmsg->cmsg_len - CMSG_ALIGN(sizeof (struct cmsghdr));
1732 int tgt_len, tgt_space;
1734 /* We never copy a half-header but may copy half-data;
1735 * this is Linux's behaviour in put_cmsg(). Note that
1736 * truncation here is a guest problem (which we report
1737 * to the guest via the CTRUNC bit), unlike truncation
1738 * in target_to_host_cmsg, which is a QEMU bug.
1740 if (msg_controllen < sizeof(struct cmsghdr)) {
1741 target_msgh->msg_flags |= tswap32(MSG_CTRUNC);
1742 break;
1745 if (cmsg->cmsg_level == SOL_SOCKET) {
1746 target_cmsg->cmsg_level = tswap32(TARGET_SOL_SOCKET);
1747 } else {
1748 target_cmsg->cmsg_level = tswap32(cmsg->cmsg_level);
1750 target_cmsg->cmsg_type = tswap32(cmsg->cmsg_type);
1752 tgt_len = TARGET_CMSG_LEN(len);
1754 /* Payload types which need a different size of payload on
1755 * the target must adjust tgt_len here.
1757 switch (cmsg->cmsg_level) {
1758 case SOL_SOCKET:
1759 switch (cmsg->cmsg_type) {
1760 case SO_TIMESTAMP:
1761 tgt_len = sizeof(struct target_timeval);
1762 break;
1763 default:
1764 break;
1766 default:
1767 break;
1770 if (msg_controllen < tgt_len) {
1771 target_msgh->msg_flags |= tswap32(MSG_CTRUNC);
1772 tgt_len = msg_controllen;
1775 /* We must now copy-and-convert len bytes of payload
1776 * into tgt_len bytes of destination space. Bear in mind
1777 * that in both source and destination we may be dealing
1778 * with a truncated value!
1780 switch (cmsg->cmsg_level) {
1781 case SOL_SOCKET:
1782 switch (cmsg->cmsg_type) {
1783 case SCM_RIGHTS:
1785 int *fd = (int *)data;
1786 int *target_fd = (int *)target_data;
1787 int i, numfds = tgt_len / sizeof(int);
1789 for (i = 0; i < numfds; i++) {
1790 __put_user(fd[i], target_fd + i);
1792 break;
1794 case SO_TIMESTAMP:
1796 struct timeval *tv = (struct timeval *)data;
1797 struct target_timeval *target_tv =
1798 (struct target_timeval *)target_data;
1800 if (len != sizeof(struct timeval) ||
1801 tgt_len != sizeof(struct target_timeval)) {
1802 goto unimplemented;
1805 /* copy struct timeval to target */
1806 __put_user(tv->tv_sec, &target_tv->tv_sec);
1807 __put_user(tv->tv_usec, &target_tv->tv_usec);
1808 break;
1810 case SCM_CREDENTIALS:
1812 struct ucred *cred = (struct ucred *)data;
1813 struct target_ucred *target_cred =
1814 (struct target_ucred *)target_data;
1816 __put_user(cred->pid, &target_cred->pid);
1817 __put_user(cred->uid, &target_cred->uid);
1818 __put_user(cred->gid, &target_cred->gid);
1819 break;
1821 default:
1822 goto unimplemented;
1824 break;
1826 default:
1827 unimplemented:
1828 gemu_log("Unsupported ancillary data: %d/%d\n",
1829 cmsg->cmsg_level, cmsg->cmsg_type);
1830 memcpy(target_data, data, MIN(len, tgt_len));
1831 if (tgt_len > len) {
1832 memset(target_data + len, 0, tgt_len - len);
1836 target_cmsg->cmsg_len = tswapal(tgt_len);
1837 tgt_space = TARGET_CMSG_SPACE(len);
1838 if (msg_controllen < tgt_space) {
1839 tgt_space = msg_controllen;
1841 msg_controllen -= tgt_space;
1842 space += tgt_space;
1843 cmsg = CMSG_NXTHDR(msgh, cmsg);
1844 target_cmsg = TARGET_CMSG_NXTHDR(target_msgh, target_cmsg,
1845 target_cmsg_start);
1847 unlock_user(target_cmsg, target_cmsg_addr, space);
1848 the_end:
1849 target_msgh->msg_controllen = tswapal(space);
1850 return 0;
1853 static void tswap_nlmsghdr(struct nlmsghdr *nlh)
1855 nlh->nlmsg_len = tswap32(nlh->nlmsg_len);
1856 nlh->nlmsg_type = tswap16(nlh->nlmsg_type);
1857 nlh->nlmsg_flags = tswap16(nlh->nlmsg_flags);
1858 nlh->nlmsg_seq = tswap32(nlh->nlmsg_seq);
1859 nlh->nlmsg_pid = tswap32(nlh->nlmsg_pid);
1862 static abi_long host_to_target_for_each_nlmsg(struct nlmsghdr *nlh,
1863 size_t len,
1864 abi_long (*host_to_target_nlmsg)
1865 (struct nlmsghdr *))
1867 uint32_t nlmsg_len;
1868 abi_long ret;
1870 while (len > sizeof(struct nlmsghdr)) {
1872 nlmsg_len = nlh->nlmsg_len;
1873 if (nlmsg_len < sizeof(struct nlmsghdr) ||
1874 nlmsg_len > len) {
1875 break;
1878 switch (nlh->nlmsg_type) {
1879 case NLMSG_DONE:
1880 tswap_nlmsghdr(nlh);
1881 return 0;
1882 case NLMSG_NOOP:
1883 break;
1884 case NLMSG_ERROR:
1886 struct nlmsgerr *e = NLMSG_DATA(nlh);
1887 e->error = tswap32(e->error);
1888 tswap_nlmsghdr(&e->msg);
1889 tswap_nlmsghdr(nlh);
1890 return 0;
1892 default:
1893 ret = host_to_target_nlmsg(nlh);
1894 if (ret < 0) {
1895 tswap_nlmsghdr(nlh);
1896 return ret;
1898 break;
1900 tswap_nlmsghdr(nlh);
1901 len -= NLMSG_ALIGN(nlmsg_len);
1902 nlh = (struct nlmsghdr *)(((char*)nlh) + NLMSG_ALIGN(nlmsg_len));
1904 return 0;
1907 static abi_long target_to_host_for_each_nlmsg(struct nlmsghdr *nlh,
1908 size_t len,
1909 abi_long (*target_to_host_nlmsg)
1910 (struct nlmsghdr *))
1912 int ret;
1914 while (len > sizeof(struct nlmsghdr)) {
1915 if (tswap32(nlh->nlmsg_len) < sizeof(struct nlmsghdr) ||
1916 tswap32(nlh->nlmsg_len) > len) {
1917 break;
1919 tswap_nlmsghdr(nlh);
1920 switch (nlh->nlmsg_type) {
1921 case NLMSG_DONE:
1922 return 0;
1923 case NLMSG_NOOP:
1924 break;
1925 case NLMSG_ERROR:
1927 struct nlmsgerr *e = NLMSG_DATA(nlh);
1928 e->error = tswap32(e->error);
1929 tswap_nlmsghdr(&e->msg);
1930 return 0;
1932 default:
1933 ret = target_to_host_nlmsg(nlh);
1934 if (ret < 0) {
1935 return ret;
1938 len -= NLMSG_ALIGN(nlh->nlmsg_len);
1939 nlh = (struct nlmsghdr *)(((char *)nlh) + NLMSG_ALIGN(nlh->nlmsg_len));
1941 return 0;
1944 #ifdef CONFIG_RTNETLINK
1945 static abi_long host_to_target_for_each_nlattr(struct nlattr *nlattr,
1946 size_t len, void *context,
1947 abi_long (*host_to_target_nlattr)
1948 (struct nlattr *,
1949 void *context))
1951 unsigned short nla_len;
1952 abi_long ret;
1954 while (len > sizeof(struct nlattr)) {
1955 nla_len = nlattr->nla_len;
1956 if (nla_len < sizeof(struct nlattr) ||
1957 nla_len > len) {
1958 break;
1960 ret = host_to_target_nlattr(nlattr, context);
1961 nlattr->nla_len = tswap16(nlattr->nla_len);
1962 nlattr->nla_type = tswap16(nlattr->nla_type);
1963 if (ret < 0) {
1964 return ret;
1966 len -= NLA_ALIGN(nla_len);
1967 nlattr = (struct nlattr *)(((char *)nlattr) + NLA_ALIGN(nla_len));
1969 return 0;
1972 static abi_long host_to_target_for_each_rtattr(struct rtattr *rtattr,
1973 size_t len,
1974 abi_long (*host_to_target_rtattr)
1975 (struct rtattr *))
1977 unsigned short rta_len;
1978 abi_long ret;
1980 while (len > sizeof(struct rtattr)) {
1981 rta_len = rtattr->rta_len;
1982 if (rta_len < sizeof(struct rtattr) ||
1983 rta_len > len) {
1984 break;
1986 ret = host_to_target_rtattr(rtattr);
1987 rtattr->rta_len = tswap16(rtattr->rta_len);
1988 rtattr->rta_type = tswap16(rtattr->rta_type);
1989 if (ret < 0) {
1990 return ret;
1992 len -= RTA_ALIGN(rta_len);
1993 rtattr = (struct rtattr *)(((char *)rtattr) + RTA_ALIGN(rta_len));
1995 return 0;
1998 #define NLA_DATA(nla) ((void *)((char *)(nla)) + NLA_HDRLEN)
2000 static abi_long host_to_target_data_bridge_nlattr(struct nlattr *nlattr,
2001 void *context)
2003 uint16_t *u16;
2004 uint32_t *u32;
2005 uint64_t *u64;
2007 switch (nlattr->nla_type) {
2008 /* no data */
2009 case QEMU_IFLA_BR_FDB_FLUSH:
2010 break;
2011 /* binary */
2012 case QEMU_IFLA_BR_GROUP_ADDR:
2013 break;
2014 /* uint8_t */
2015 case QEMU_IFLA_BR_VLAN_FILTERING:
2016 case QEMU_IFLA_BR_TOPOLOGY_CHANGE:
2017 case QEMU_IFLA_BR_TOPOLOGY_CHANGE_DETECTED:
2018 case QEMU_IFLA_BR_MCAST_ROUTER:
2019 case QEMU_IFLA_BR_MCAST_SNOOPING:
2020 case QEMU_IFLA_BR_MCAST_QUERY_USE_IFADDR:
2021 case QEMU_IFLA_BR_MCAST_QUERIER:
2022 case QEMU_IFLA_BR_NF_CALL_IPTABLES:
2023 case QEMU_IFLA_BR_NF_CALL_IP6TABLES:
2024 case QEMU_IFLA_BR_NF_CALL_ARPTABLES:
2025 break;
2026 /* uint16_t */
2027 case QEMU_IFLA_BR_PRIORITY:
2028 case QEMU_IFLA_BR_VLAN_PROTOCOL:
2029 case QEMU_IFLA_BR_GROUP_FWD_MASK:
2030 case QEMU_IFLA_BR_ROOT_PORT:
2031 case QEMU_IFLA_BR_VLAN_DEFAULT_PVID:
2032 u16 = NLA_DATA(nlattr);
2033 *u16 = tswap16(*u16);
2034 break;
2035 /* uint32_t */
2036 case QEMU_IFLA_BR_FORWARD_DELAY:
2037 case QEMU_IFLA_BR_HELLO_TIME:
2038 case QEMU_IFLA_BR_MAX_AGE:
2039 case QEMU_IFLA_BR_AGEING_TIME:
2040 case QEMU_IFLA_BR_STP_STATE:
2041 case QEMU_IFLA_BR_ROOT_PATH_COST:
2042 case QEMU_IFLA_BR_MCAST_HASH_ELASTICITY:
2043 case QEMU_IFLA_BR_MCAST_HASH_MAX:
2044 case QEMU_IFLA_BR_MCAST_LAST_MEMBER_CNT:
2045 case QEMU_IFLA_BR_MCAST_STARTUP_QUERY_CNT:
2046 u32 = NLA_DATA(nlattr);
2047 *u32 = tswap32(*u32);
2048 break;
2049 /* uint64_t */
2050 case QEMU_IFLA_BR_HELLO_TIMER:
2051 case QEMU_IFLA_BR_TCN_TIMER:
2052 case QEMU_IFLA_BR_GC_TIMER:
2053 case QEMU_IFLA_BR_TOPOLOGY_CHANGE_TIMER:
2054 case QEMU_IFLA_BR_MCAST_LAST_MEMBER_INTVL:
2055 case QEMU_IFLA_BR_MCAST_MEMBERSHIP_INTVL:
2056 case QEMU_IFLA_BR_MCAST_QUERIER_INTVL:
2057 case QEMU_IFLA_BR_MCAST_QUERY_INTVL:
2058 case QEMU_IFLA_BR_MCAST_QUERY_RESPONSE_INTVL:
2059 case QEMU_IFLA_BR_MCAST_STARTUP_QUERY_INTVL:
2060 u64 = NLA_DATA(nlattr);
2061 *u64 = tswap64(*u64);
2062 break;
2063 /* ifla_bridge_id: uin8_t[] */
2064 case QEMU_IFLA_BR_ROOT_ID:
2065 case QEMU_IFLA_BR_BRIDGE_ID:
2066 break;
2067 default:
2068 gemu_log("Unknown QEMU_IFLA_BR type %d\n", nlattr->nla_type);
2069 break;
2071 return 0;
2074 static abi_long host_to_target_slave_data_bridge_nlattr(struct nlattr *nlattr,
2075 void *context)
2077 uint16_t *u16;
2078 uint32_t *u32;
2079 uint64_t *u64;
2081 switch (nlattr->nla_type) {
2082 /* uint8_t */
2083 case QEMU_IFLA_BRPORT_STATE:
2084 case QEMU_IFLA_BRPORT_MODE:
2085 case QEMU_IFLA_BRPORT_GUARD:
2086 case QEMU_IFLA_BRPORT_PROTECT:
2087 case QEMU_IFLA_BRPORT_FAST_LEAVE:
2088 case QEMU_IFLA_BRPORT_LEARNING:
2089 case QEMU_IFLA_BRPORT_UNICAST_FLOOD:
2090 case QEMU_IFLA_BRPORT_PROXYARP:
2091 case QEMU_IFLA_BRPORT_LEARNING_SYNC:
2092 case QEMU_IFLA_BRPORT_PROXYARP_WIFI:
2093 case QEMU_IFLA_BRPORT_TOPOLOGY_CHANGE_ACK:
2094 case QEMU_IFLA_BRPORT_CONFIG_PENDING:
2095 case QEMU_IFLA_BRPORT_MULTICAST_ROUTER:
2096 break;
2097 /* uint16_t */
2098 case QEMU_IFLA_BRPORT_PRIORITY:
2099 case QEMU_IFLA_BRPORT_DESIGNATED_PORT:
2100 case QEMU_IFLA_BRPORT_DESIGNATED_COST:
2101 case QEMU_IFLA_BRPORT_ID:
2102 case QEMU_IFLA_BRPORT_NO:
2103 u16 = NLA_DATA(nlattr);
2104 *u16 = tswap16(*u16);
2105 break;
2106 /* uin32_t */
2107 case QEMU_IFLA_BRPORT_COST:
2108 u32 = NLA_DATA(nlattr);
2109 *u32 = tswap32(*u32);
2110 break;
2111 /* uint64_t */
2112 case QEMU_IFLA_BRPORT_MESSAGE_AGE_TIMER:
2113 case QEMU_IFLA_BRPORT_FORWARD_DELAY_TIMER:
2114 case QEMU_IFLA_BRPORT_HOLD_TIMER:
2115 u64 = NLA_DATA(nlattr);
2116 *u64 = tswap64(*u64);
2117 break;
2118 /* ifla_bridge_id: uint8_t[] */
2119 case QEMU_IFLA_BRPORT_ROOT_ID:
2120 case QEMU_IFLA_BRPORT_BRIDGE_ID:
2121 break;
2122 default:
2123 gemu_log("Unknown QEMU_IFLA_BRPORT type %d\n", nlattr->nla_type);
2124 break;
2126 return 0;
2129 struct linkinfo_context {
2130 int len;
2131 char *name;
2132 int slave_len;
2133 char *slave_name;
2136 static abi_long host_to_target_data_linkinfo_nlattr(struct nlattr *nlattr,
2137 void *context)
2139 struct linkinfo_context *li_context = context;
2141 switch (nlattr->nla_type) {
2142 /* string */
2143 case QEMU_IFLA_INFO_KIND:
2144 li_context->name = NLA_DATA(nlattr);
2145 li_context->len = nlattr->nla_len - NLA_HDRLEN;
2146 break;
2147 case QEMU_IFLA_INFO_SLAVE_KIND:
2148 li_context->slave_name = NLA_DATA(nlattr);
2149 li_context->slave_len = nlattr->nla_len - NLA_HDRLEN;
2150 break;
2151 /* stats */
2152 case QEMU_IFLA_INFO_XSTATS:
2153 /* FIXME: only used by CAN */
2154 break;
2155 /* nested */
2156 case QEMU_IFLA_INFO_DATA:
2157 if (strncmp(li_context->name, "bridge",
2158 li_context->len) == 0) {
2159 return host_to_target_for_each_nlattr(NLA_DATA(nlattr),
2160 nlattr->nla_len,
2161 NULL,
2162 host_to_target_data_bridge_nlattr);
2163 } else {
2164 gemu_log("Unknown QEMU_IFLA_INFO_KIND %s\n", li_context->name);
2166 break;
2167 case QEMU_IFLA_INFO_SLAVE_DATA:
2168 if (strncmp(li_context->slave_name, "bridge",
2169 li_context->slave_len) == 0) {
2170 return host_to_target_for_each_nlattr(NLA_DATA(nlattr),
2171 nlattr->nla_len,
2172 NULL,
2173 host_to_target_slave_data_bridge_nlattr);
2174 } else {
2175 gemu_log("Unknown QEMU_IFLA_INFO_SLAVE_KIND %s\n",
2176 li_context->slave_name);
2178 break;
2179 default:
2180 gemu_log("Unknown host QEMU_IFLA_INFO type: %d\n", nlattr->nla_type);
2181 break;
2184 return 0;
2187 static abi_long host_to_target_data_inet_nlattr(struct nlattr *nlattr,
2188 void *context)
2190 uint32_t *u32;
2191 int i;
2193 switch (nlattr->nla_type) {
2194 case QEMU_IFLA_INET_CONF:
2195 u32 = NLA_DATA(nlattr);
2196 for (i = 0; i < (nlattr->nla_len - NLA_HDRLEN) / sizeof(*u32);
2197 i++) {
2198 u32[i] = tswap32(u32[i]);
2200 break;
2201 default:
2202 gemu_log("Unknown host AF_INET type: %d\n", nlattr->nla_type);
2204 return 0;
2207 static abi_long host_to_target_data_inet6_nlattr(struct nlattr *nlattr,
2208 void *context)
2210 uint32_t *u32;
2211 uint64_t *u64;
2212 struct ifla_cacheinfo *ci;
2213 int i;
2215 switch (nlattr->nla_type) {
2216 /* binaries */
2217 case QEMU_IFLA_INET6_TOKEN:
2218 break;
2219 /* uint8_t */
2220 case QEMU_IFLA_INET6_ADDR_GEN_MODE:
2221 break;
2222 /* uint32_t */
2223 case QEMU_IFLA_INET6_FLAGS:
2224 u32 = NLA_DATA(nlattr);
2225 *u32 = tswap32(*u32);
2226 break;
2227 /* uint32_t[] */
2228 case QEMU_IFLA_INET6_CONF:
2229 u32 = NLA_DATA(nlattr);
2230 for (i = 0; i < (nlattr->nla_len - NLA_HDRLEN) / sizeof(*u32);
2231 i++) {
2232 u32[i] = tswap32(u32[i]);
2234 break;
2235 /* ifla_cacheinfo */
2236 case QEMU_IFLA_INET6_CACHEINFO:
2237 ci = NLA_DATA(nlattr);
2238 ci->max_reasm_len = tswap32(ci->max_reasm_len);
2239 ci->tstamp = tswap32(ci->tstamp);
2240 ci->reachable_time = tswap32(ci->reachable_time);
2241 ci->retrans_time = tswap32(ci->retrans_time);
2242 break;
2243 /* uint64_t[] */
2244 case QEMU_IFLA_INET6_STATS:
2245 case QEMU_IFLA_INET6_ICMP6STATS:
2246 u64 = NLA_DATA(nlattr);
2247 for (i = 0; i < (nlattr->nla_len - NLA_HDRLEN) / sizeof(*u64);
2248 i++) {
2249 u64[i] = tswap64(u64[i]);
2251 break;
2252 default:
2253 gemu_log("Unknown host AF_INET6 type: %d\n", nlattr->nla_type);
2255 return 0;
2258 static abi_long host_to_target_data_spec_nlattr(struct nlattr *nlattr,
2259 void *context)
2261 switch (nlattr->nla_type) {
2262 case AF_INET:
2263 return host_to_target_for_each_nlattr(NLA_DATA(nlattr), nlattr->nla_len,
2264 NULL,
2265 host_to_target_data_inet_nlattr);
2266 case AF_INET6:
2267 return host_to_target_for_each_nlattr(NLA_DATA(nlattr), nlattr->nla_len,
2268 NULL,
2269 host_to_target_data_inet6_nlattr);
2270 default:
2271 gemu_log("Unknown host AF_SPEC type: %d\n", nlattr->nla_type);
2272 break;
2274 return 0;
2277 static abi_long host_to_target_data_link_rtattr(struct rtattr *rtattr)
2279 uint32_t *u32;
2280 struct rtnl_link_stats *st;
2281 struct rtnl_link_stats64 *st64;
2282 struct rtnl_link_ifmap *map;
2283 struct linkinfo_context li_context;
2285 switch (rtattr->rta_type) {
2286 /* binary stream */
2287 case QEMU_IFLA_ADDRESS:
2288 case QEMU_IFLA_BROADCAST:
2289 /* string */
2290 case QEMU_IFLA_IFNAME:
2291 case QEMU_IFLA_QDISC:
2292 break;
2293 /* uin8_t */
2294 case QEMU_IFLA_OPERSTATE:
2295 case QEMU_IFLA_LINKMODE:
2296 case QEMU_IFLA_CARRIER:
2297 case QEMU_IFLA_PROTO_DOWN:
2298 break;
2299 /* uint32_t */
2300 case QEMU_IFLA_MTU:
2301 case QEMU_IFLA_LINK:
2302 case QEMU_IFLA_WEIGHT:
2303 case QEMU_IFLA_TXQLEN:
2304 case QEMU_IFLA_CARRIER_CHANGES:
2305 case QEMU_IFLA_NUM_RX_QUEUES:
2306 case QEMU_IFLA_NUM_TX_QUEUES:
2307 case QEMU_IFLA_PROMISCUITY:
2308 case QEMU_IFLA_EXT_MASK:
2309 case QEMU_IFLA_LINK_NETNSID:
2310 case QEMU_IFLA_GROUP:
2311 case QEMU_IFLA_MASTER:
2312 case QEMU_IFLA_NUM_VF:
2313 u32 = RTA_DATA(rtattr);
2314 *u32 = tswap32(*u32);
2315 break;
2316 /* struct rtnl_link_stats */
2317 case QEMU_IFLA_STATS:
2318 st = RTA_DATA(rtattr);
2319 st->rx_packets = tswap32(st->rx_packets);
2320 st->tx_packets = tswap32(st->tx_packets);
2321 st->rx_bytes = tswap32(st->rx_bytes);
2322 st->tx_bytes = tswap32(st->tx_bytes);
2323 st->rx_errors = tswap32(st->rx_errors);
2324 st->tx_errors = tswap32(st->tx_errors);
2325 st->rx_dropped = tswap32(st->rx_dropped);
2326 st->tx_dropped = tswap32(st->tx_dropped);
2327 st->multicast = tswap32(st->multicast);
2328 st->collisions = tswap32(st->collisions);
2330 /* detailed rx_errors: */
2331 st->rx_length_errors = tswap32(st->rx_length_errors);
2332 st->rx_over_errors = tswap32(st->rx_over_errors);
2333 st->rx_crc_errors = tswap32(st->rx_crc_errors);
2334 st->rx_frame_errors = tswap32(st->rx_frame_errors);
2335 st->rx_fifo_errors = tswap32(st->rx_fifo_errors);
2336 st->rx_missed_errors = tswap32(st->rx_missed_errors);
2338 /* detailed tx_errors */
2339 st->tx_aborted_errors = tswap32(st->tx_aborted_errors);
2340 st->tx_carrier_errors = tswap32(st->tx_carrier_errors);
2341 st->tx_fifo_errors = tswap32(st->tx_fifo_errors);
2342 st->tx_heartbeat_errors = tswap32(st->tx_heartbeat_errors);
2343 st->tx_window_errors = tswap32(st->tx_window_errors);
2345 /* for cslip etc */
2346 st->rx_compressed = tswap32(st->rx_compressed);
2347 st->tx_compressed = tswap32(st->tx_compressed);
2348 break;
2349 /* struct rtnl_link_stats64 */
2350 case QEMU_IFLA_STATS64:
2351 st64 = RTA_DATA(rtattr);
2352 st64->rx_packets = tswap64(st64->rx_packets);
2353 st64->tx_packets = tswap64(st64->tx_packets);
2354 st64->rx_bytes = tswap64(st64->rx_bytes);
2355 st64->tx_bytes = tswap64(st64->tx_bytes);
2356 st64->rx_errors = tswap64(st64->rx_errors);
2357 st64->tx_errors = tswap64(st64->tx_errors);
2358 st64->rx_dropped = tswap64(st64->rx_dropped);
2359 st64->tx_dropped = tswap64(st64->tx_dropped);
2360 st64->multicast = tswap64(st64->multicast);
2361 st64->collisions = tswap64(st64->collisions);
2363 /* detailed rx_errors: */
2364 st64->rx_length_errors = tswap64(st64->rx_length_errors);
2365 st64->rx_over_errors = tswap64(st64->rx_over_errors);
2366 st64->rx_crc_errors = tswap64(st64->rx_crc_errors);
2367 st64->rx_frame_errors = tswap64(st64->rx_frame_errors);
2368 st64->rx_fifo_errors = tswap64(st64->rx_fifo_errors);
2369 st64->rx_missed_errors = tswap64(st64->rx_missed_errors);
2371 /* detailed tx_errors */
2372 st64->tx_aborted_errors = tswap64(st64->tx_aborted_errors);
2373 st64->tx_carrier_errors = tswap64(st64->tx_carrier_errors);
2374 st64->tx_fifo_errors = tswap64(st64->tx_fifo_errors);
2375 st64->tx_heartbeat_errors = tswap64(st64->tx_heartbeat_errors);
2376 st64->tx_window_errors = tswap64(st64->tx_window_errors);
2378 /* for cslip etc */
2379 st64->rx_compressed = tswap64(st64->rx_compressed);
2380 st64->tx_compressed = tswap64(st64->tx_compressed);
2381 break;
2382 /* struct rtnl_link_ifmap */
2383 case QEMU_IFLA_MAP:
2384 map = RTA_DATA(rtattr);
2385 map->mem_start = tswap64(map->mem_start);
2386 map->mem_end = tswap64(map->mem_end);
2387 map->base_addr = tswap64(map->base_addr);
2388 map->irq = tswap16(map->irq);
2389 break;
2390 /* nested */
2391 case QEMU_IFLA_LINKINFO:
2392 memset(&li_context, 0, sizeof(li_context));
2393 return host_to_target_for_each_nlattr(RTA_DATA(rtattr), rtattr->rta_len,
2394 &li_context,
2395 host_to_target_data_linkinfo_nlattr);
2396 case QEMU_IFLA_AF_SPEC:
2397 return host_to_target_for_each_nlattr(RTA_DATA(rtattr), rtattr->rta_len,
2398 NULL,
2399 host_to_target_data_spec_nlattr);
2400 default:
2401 gemu_log("Unknown host QEMU_IFLA type: %d\n", rtattr->rta_type);
2402 break;
2404 return 0;
2407 static abi_long host_to_target_data_addr_rtattr(struct rtattr *rtattr)
2409 uint32_t *u32;
2410 struct ifa_cacheinfo *ci;
2412 switch (rtattr->rta_type) {
2413 /* binary: depends on family type */
2414 case IFA_ADDRESS:
2415 case IFA_LOCAL:
2416 break;
2417 /* string */
2418 case IFA_LABEL:
2419 break;
2420 /* u32 */
2421 case IFA_FLAGS:
2422 case IFA_BROADCAST:
2423 u32 = RTA_DATA(rtattr);
2424 *u32 = tswap32(*u32);
2425 break;
2426 /* struct ifa_cacheinfo */
2427 case IFA_CACHEINFO:
2428 ci = RTA_DATA(rtattr);
2429 ci->ifa_prefered = tswap32(ci->ifa_prefered);
2430 ci->ifa_valid = tswap32(ci->ifa_valid);
2431 ci->cstamp = tswap32(ci->cstamp);
2432 ci->tstamp = tswap32(ci->tstamp);
2433 break;
2434 default:
2435 gemu_log("Unknown host IFA type: %d\n", rtattr->rta_type);
2436 break;
2438 return 0;
2441 static abi_long host_to_target_data_route_rtattr(struct rtattr *rtattr)
2443 uint32_t *u32;
2444 switch (rtattr->rta_type) {
2445 /* binary: depends on family type */
2446 case RTA_GATEWAY:
2447 case RTA_DST:
2448 case RTA_PREFSRC:
2449 break;
2450 /* u32 */
2451 case RTA_PRIORITY:
2452 case RTA_TABLE:
2453 case RTA_OIF:
2454 u32 = RTA_DATA(rtattr);
2455 *u32 = tswap32(*u32);
2456 break;
2457 default:
2458 gemu_log("Unknown host RTA type: %d\n", rtattr->rta_type);
2459 break;
2461 return 0;
2464 static abi_long host_to_target_link_rtattr(struct rtattr *rtattr,
2465 uint32_t rtattr_len)
2467 return host_to_target_for_each_rtattr(rtattr, rtattr_len,
2468 host_to_target_data_link_rtattr);
2471 static abi_long host_to_target_addr_rtattr(struct rtattr *rtattr,
2472 uint32_t rtattr_len)
2474 return host_to_target_for_each_rtattr(rtattr, rtattr_len,
2475 host_to_target_data_addr_rtattr);
2478 static abi_long host_to_target_route_rtattr(struct rtattr *rtattr,
2479 uint32_t rtattr_len)
2481 return host_to_target_for_each_rtattr(rtattr, rtattr_len,
2482 host_to_target_data_route_rtattr);
2485 static abi_long host_to_target_data_route(struct nlmsghdr *nlh)
2487 uint32_t nlmsg_len;
2488 struct ifinfomsg *ifi;
2489 struct ifaddrmsg *ifa;
2490 struct rtmsg *rtm;
2492 nlmsg_len = nlh->nlmsg_len;
2493 switch (nlh->nlmsg_type) {
2494 case RTM_NEWLINK:
2495 case RTM_DELLINK:
2496 case RTM_GETLINK:
2497 if (nlh->nlmsg_len >= NLMSG_LENGTH(sizeof(*ifi))) {
2498 ifi = NLMSG_DATA(nlh);
2499 ifi->ifi_type = tswap16(ifi->ifi_type);
2500 ifi->ifi_index = tswap32(ifi->ifi_index);
2501 ifi->ifi_flags = tswap32(ifi->ifi_flags);
2502 ifi->ifi_change = tswap32(ifi->ifi_change);
2503 host_to_target_link_rtattr(IFLA_RTA(ifi),
2504 nlmsg_len - NLMSG_LENGTH(sizeof(*ifi)));
2506 break;
2507 case RTM_NEWADDR:
2508 case RTM_DELADDR:
2509 case RTM_GETADDR:
2510 if (nlh->nlmsg_len >= NLMSG_LENGTH(sizeof(*ifa))) {
2511 ifa = NLMSG_DATA(nlh);
2512 ifa->ifa_index = tswap32(ifa->ifa_index);
2513 host_to_target_addr_rtattr(IFA_RTA(ifa),
2514 nlmsg_len - NLMSG_LENGTH(sizeof(*ifa)));
2516 break;
2517 case RTM_NEWROUTE:
2518 case RTM_DELROUTE:
2519 case RTM_GETROUTE:
2520 if (nlh->nlmsg_len >= NLMSG_LENGTH(sizeof(*rtm))) {
2521 rtm = NLMSG_DATA(nlh);
2522 rtm->rtm_flags = tswap32(rtm->rtm_flags);
2523 host_to_target_route_rtattr(RTM_RTA(rtm),
2524 nlmsg_len - NLMSG_LENGTH(sizeof(*rtm)));
2526 break;
2527 default:
2528 return -TARGET_EINVAL;
2530 return 0;
2533 static inline abi_long host_to_target_nlmsg_route(struct nlmsghdr *nlh,
2534 size_t len)
2536 return host_to_target_for_each_nlmsg(nlh, len, host_to_target_data_route);
2539 static abi_long target_to_host_for_each_rtattr(struct rtattr *rtattr,
2540 size_t len,
2541 abi_long (*target_to_host_rtattr)
2542 (struct rtattr *))
2544 abi_long ret;
2546 while (len >= sizeof(struct rtattr)) {
2547 if (tswap16(rtattr->rta_len) < sizeof(struct rtattr) ||
2548 tswap16(rtattr->rta_len) > len) {
2549 break;
2551 rtattr->rta_len = tswap16(rtattr->rta_len);
2552 rtattr->rta_type = tswap16(rtattr->rta_type);
2553 ret = target_to_host_rtattr(rtattr);
2554 if (ret < 0) {
2555 return ret;
2557 len -= RTA_ALIGN(rtattr->rta_len);
2558 rtattr = (struct rtattr *)(((char *)rtattr) +
2559 RTA_ALIGN(rtattr->rta_len));
2561 return 0;
2564 static abi_long target_to_host_data_link_rtattr(struct rtattr *rtattr)
2566 switch (rtattr->rta_type) {
2567 default:
2568 gemu_log("Unknown target QEMU_IFLA type: %d\n", rtattr->rta_type);
2569 break;
2571 return 0;
2574 static abi_long target_to_host_data_addr_rtattr(struct rtattr *rtattr)
2576 switch (rtattr->rta_type) {
2577 /* binary: depends on family type */
2578 case IFA_LOCAL:
2579 case IFA_ADDRESS:
2580 break;
2581 default:
2582 gemu_log("Unknown target IFA type: %d\n", rtattr->rta_type);
2583 break;
2585 return 0;
2588 static abi_long target_to_host_data_route_rtattr(struct rtattr *rtattr)
2590 uint32_t *u32;
2591 switch (rtattr->rta_type) {
2592 /* binary: depends on family type */
2593 case RTA_DST:
2594 case RTA_SRC:
2595 case RTA_GATEWAY:
2596 break;
2597 /* u32 */
2598 case RTA_OIF:
2599 u32 = RTA_DATA(rtattr);
2600 *u32 = tswap32(*u32);
2601 break;
2602 default:
2603 gemu_log("Unknown target RTA type: %d\n", rtattr->rta_type);
2604 break;
2606 return 0;
2609 static void target_to_host_link_rtattr(struct rtattr *rtattr,
2610 uint32_t rtattr_len)
2612 target_to_host_for_each_rtattr(rtattr, rtattr_len,
2613 target_to_host_data_link_rtattr);
2616 static void target_to_host_addr_rtattr(struct rtattr *rtattr,
2617 uint32_t rtattr_len)
2619 target_to_host_for_each_rtattr(rtattr, rtattr_len,
2620 target_to_host_data_addr_rtattr);
2623 static void target_to_host_route_rtattr(struct rtattr *rtattr,
2624 uint32_t rtattr_len)
2626 target_to_host_for_each_rtattr(rtattr, rtattr_len,
2627 target_to_host_data_route_rtattr);
2630 static abi_long target_to_host_data_route(struct nlmsghdr *nlh)
2632 struct ifinfomsg *ifi;
2633 struct ifaddrmsg *ifa;
2634 struct rtmsg *rtm;
2636 switch (nlh->nlmsg_type) {
2637 case RTM_GETLINK:
2638 break;
2639 case RTM_NEWLINK:
2640 case RTM_DELLINK:
2641 if (nlh->nlmsg_len >= NLMSG_LENGTH(sizeof(*ifi))) {
2642 ifi = NLMSG_DATA(nlh);
2643 ifi->ifi_type = tswap16(ifi->ifi_type);
2644 ifi->ifi_index = tswap32(ifi->ifi_index);
2645 ifi->ifi_flags = tswap32(ifi->ifi_flags);
2646 ifi->ifi_change = tswap32(ifi->ifi_change);
2647 target_to_host_link_rtattr(IFLA_RTA(ifi), nlh->nlmsg_len -
2648 NLMSG_LENGTH(sizeof(*ifi)));
2650 break;
2651 case RTM_GETADDR:
2652 case RTM_NEWADDR:
2653 case RTM_DELADDR:
2654 if (nlh->nlmsg_len >= NLMSG_LENGTH(sizeof(*ifa))) {
2655 ifa = NLMSG_DATA(nlh);
2656 ifa->ifa_index = tswap32(ifa->ifa_index);
2657 target_to_host_addr_rtattr(IFA_RTA(ifa), nlh->nlmsg_len -
2658 NLMSG_LENGTH(sizeof(*ifa)));
2660 break;
2661 case RTM_GETROUTE:
2662 break;
2663 case RTM_NEWROUTE:
2664 case RTM_DELROUTE:
2665 if (nlh->nlmsg_len >= NLMSG_LENGTH(sizeof(*rtm))) {
2666 rtm = NLMSG_DATA(nlh);
2667 rtm->rtm_flags = tswap32(rtm->rtm_flags);
2668 target_to_host_route_rtattr(RTM_RTA(rtm), nlh->nlmsg_len -
2669 NLMSG_LENGTH(sizeof(*rtm)));
2671 break;
2672 default:
2673 return -TARGET_EOPNOTSUPP;
2675 return 0;
2678 static abi_long target_to_host_nlmsg_route(struct nlmsghdr *nlh, size_t len)
2680 return target_to_host_for_each_nlmsg(nlh, len, target_to_host_data_route);
2682 #endif /* CONFIG_RTNETLINK */
2684 static abi_long host_to_target_data_audit(struct nlmsghdr *nlh)
2686 switch (nlh->nlmsg_type) {
2687 default:
2688 gemu_log("Unknown host audit message type %d\n",
2689 nlh->nlmsg_type);
2690 return -TARGET_EINVAL;
2692 return 0;
2695 static inline abi_long host_to_target_nlmsg_audit(struct nlmsghdr *nlh,
2696 size_t len)
2698 return host_to_target_for_each_nlmsg(nlh, len, host_to_target_data_audit);
2701 static abi_long target_to_host_data_audit(struct nlmsghdr *nlh)
2703 switch (nlh->nlmsg_type) {
2704 case AUDIT_USER:
2705 case AUDIT_FIRST_USER_MSG ... AUDIT_LAST_USER_MSG:
2706 case AUDIT_FIRST_USER_MSG2 ... AUDIT_LAST_USER_MSG2:
2707 break;
2708 default:
2709 gemu_log("Unknown target audit message type %d\n",
2710 nlh->nlmsg_type);
2711 return -TARGET_EINVAL;
2714 return 0;
2717 static abi_long target_to_host_nlmsg_audit(struct nlmsghdr *nlh, size_t len)
2719 return target_to_host_for_each_nlmsg(nlh, len, target_to_host_data_audit);
2722 /* do_setsockopt() Must return target values and target errnos. */
2723 static abi_long do_setsockopt(int sockfd, int level, int optname,
2724 abi_ulong optval_addr, socklen_t optlen)
2726 abi_long ret;
2727 int val;
2728 struct ip_mreqn *ip_mreq;
2729 struct ip_mreq_source *ip_mreq_source;
2731 switch(level) {
2732 case SOL_TCP:
2733 /* TCP options all take an 'int' value. */
2734 if (optlen < sizeof(uint32_t))
2735 return -TARGET_EINVAL;
2737 if (get_user_u32(val, optval_addr))
2738 return -TARGET_EFAULT;
2739 ret = get_errno(setsockopt(sockfd, level, optname, &val, sizeof(val)));
2740 break;
2741 case SOL_IP:
2742 switch(optname) {
2743 case IP_TOS:
2744 case IP_TTL:
2745 case IP_HDRINCL:
2746 case IP_ROUTER_ALERT:
2747 case IP_RECVOPTS:
2748 case IP_RETOPTS:
2749 case IP_PKTINFO:
2750 case IP_MTU_DISCOVER:
2751 case IP_RECVERR:
2752 case IP_RECVTOS:
2753 #ifdef IP_FREEBIND
2754 case IP_FREEBIND:
2755 #endif
2756 case IP_MULTICAST_TTL:
2757 case IP_MULTICAST_LOOP:
2758 val = 0;
2759 if (optlen >= sizeof(uint32_t)) {
2760 if (get_user_u32(val, optval_addr))
2761 return -TARGET_EFAULT;
2762 } else if (optlen >= 1) {
2763 if (get_user_u8(val, optval_addr))
2764 return -TARGET_EFAULT;
2766 ret = get_errno(setsockopt(sockfd, level, optname, &val, sizeof(val)));
2767 break;
2768 case IP_ADD_MEMBERSHIP:
2769 case IP_DROP_MEMBERSHIP:
2770 if (optlen < sizeof (struct target_ip_mreq) ||
2771 optlen > sizeof (struct target_ip_mreqn))
2772 return -TARGET_EINVAL;
2774 ip_mreq = (struct ip_mreqn *) alloca(optlen);
2775 target_to_host_ip_mreq(ip_mreq, optval_addr, optlen);
2776 ret = get_errno(setsockopt(sockfd, level, optname, ip_mreq, optlen));
2777 break;
2779 case IP_BLOCK_SOURCE:
2780 case IP_UNBLOCK_SOURCE:
2781 case IP_ADD_SOURCE_MEMBERSHIP:
2782 case IP_DROP_SOURCE_MEMBERSHIP:
2783 if (optlen != sizeof (struct target_ip_mreq_source))
2784 return -TARGET_EINVAL;
2786 ip_mreq_source = lock_user(VERIFY_READ, optval_addr, optlen, 1);
2787 ret = get_errno(setsockopt(sockfd, level, optname, ip_mreq_source, optlen));
2788 unlock_user (ip_mreq_source, optval_addr, 0);
2789 break;
2791 default:
2792 goto unimplemented;
2794 break;
2795 case SOL_IPV6:
2796 switch (optname) {
2797 case IPV6_MTU_DISCOVER:
2798 case IPV6_MTU:
2799 case IPV6_V6ONLY:
2800 case IPV6_RECVPKTINFO:
2801 val = 0;
2802 if (optlen < sizeof(uint32_t)) {
2803 return -TARGET_EINVAL;
2805 if (get_user_u32(val, optval_addr)) {
2806 return -TARGET_EFAULT;
2808 ret = get_errno(setsockopt(sockfd, level, optname,
2809 &val, sizeof(val)));
2810 break;
2811 default:
2812 goto unimplemented;
2814 break;
2815 case SOL_RAW:
2816 switch (optname) {
2817 case ICMP_FILTER:
2818 /* struct icmp_filter takes an u32 value */
2819 if (optlen < sizeof(uint32_t)) {
2820 return -TARGET_EINVAL;
2823 if (get_user_u32(val, optval_addr)) {
2824 return -TARGET_EFAULT;
2826 ret = get_errno(setsockopt(sockfd, level, optname,
2827 &val, sizeof(val)));
2828 break;
2830 default:
2831 goto unimplemented;
2833 break;
2834 case TARGET_SOL_SOCKET:
2835 switch (optname) {
2836 case TARGET_SO_RCVTIMEO:
2838 struct timeval tv;
2840 optname = SO_RCVTIMEO;
2842 set_timeout:
2843 if (optlen != sizeof(struct target_timeval)) {
2844 return -TARGET_EINVAL;
2847 if (copy_from_user_timeval(&tv, optval_addr)) {
2848 return -TARGET_EFAULT;
2851 ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname,
2852 &tv, sizeof(tv)));
2853 return ret;
2855 case TARGET_SO_SNDTIMEO:
2856 optname = SO_SNDTIMEO;
2857 goto set_timeout;
2858 case TARGET_SO_ATTACH_FILTER:
2860 struct target_sock_fprog *tfprog;
2861 struct target_sock_filter *tfilter;
2862 struct sock_fprog fprog;
2863 struct sock_filter *filter;
2864 int i;
2866 if (optlen != sizeof(*tfprog)) {
2867 return -TARGET_EINVAL;
2869 if (!lock_user_struct(VERIFY_READ, tfprog, optval_addr, 0)) {
2870 return -TARGET_EFAULT;
2872 if (!lock_user_struct(VERIFY_READ, tfilter,
2873 tswapal(tfprog->filter), 0)) {
2874 unlock_user_struct(tfprog, optval_addr, 1);
2875 return -TARGET_EFAULT;
2878 fprog.len = tswap16(tfprog->len);
2879 filter = g_try_new(struct sock_filter, fprog.len);
2880 if (filter == NULL) {
2881 unlock_user_struct(tfilter, tfprog->filter, 1);
2882 unlock_user_struct(tfprog, optval_addr, 1);
2883 return -TARGET_ENOMEM;
2885 for (i = 0; i < fprog.len; i++) {
2886 filter[i].code = tswap16(tfilter[i].code);
2887 filter[i].jt = tfilter[i].jt;
2888 filter[i].jf = tfilter[i].jf;
2889 filter[i].k = tswap32(tfilter[i].k);
2891 fprog.filter = filter;
2893 ret = get_errno(setsockopt(sockfd, SOL_SOCKET,
2894 SO_ATTACH_FILTER, &fprog, sizeof(fprog)));
2895 g_free(filter);
2897 unlock_user_struct(tfilter, tfprog->filter, 1);
2898 unlock_user_struct(tfprog, optval_addr, 1);
2899 return ret;
2901 case TARGET_SO_BINDTODEVICE:
2903 char *dev_ifname, *addr_ifname;
2905 if (optlen > IFNAMSIZ - 1) {
2906 optlen = IFNAMSIZ - 1;
2908 dev_ifname = lock_user(VERIFY_READ, optval_addr, optlen, 1);
2909 if (!dev_ifname) {
2910 return -TARGET_EFAULT;
2912 optname = SO_BINDTODEVICE;
2913 addr_ifname = alloca(IFNAMSIZ);
2914 memcpy(addr_ifname, dev_ifname, optlen);
2915 addr_ifname[optlen] = 0;
2916 ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname,
2917 addr_ifname, optlen));
2918 unlock_user (dev_ifname, optval_addr, 0);
2919 return ret;
2921 /* Options with 'int' argument. */
2922 case TARGET_SO_DEBUG:
2923 optname = SO_DEBUG;
2924 break;
2925 case TARGET_SO_REUSEADDR:
2926 optname = SO_REUSEADDR;
2927 break;
2928 case TARGET_SO_TYPE:
2929 optname = SO_TYPE;
2930 break;
2931 case TARGET_SO_ERROR:
2932 optname = SO_ERROR;
2933 break;
2934 case TARGET_SO_DONTROUTE:
2935 optname = SO_DONTROUTE;
2936 break;
2937 case TARGET_SO_BROADCAST:
2938 optname = SO_BROADCAST;
2939 break;
2940 case TARGET_SO_SNDBUF:
2941 optname = SO_SNDBUF;
2942 break;
2943 case TARGET_SO_SNDBUFFORCE:
2944 optname = SO_SNDBUFFORCE;
2945 break;
2946 case TARGET_SO_RCVBUF:
2947 optname = SO_RCVBUF;
2948 break;
2949 case TARGET_SO_RCVBUFFORCE:
2950 optname = SO_RCVBUFFORCE;
2951 break;
2952 case TARGET_SO_KEEPALIVE:
2953 optname = SO_KEEPALIVE;
2954 break;
2955 case TARGET_SO_OOBINLINE:
2956 optname = SO_OOBINLINE;
2957 break;
2958 case TARGET_SO_NO_CHECK:
2959 optname = SO_NO_CHECK;
2960 break;
2961 case TARGET_SO_PRIORITY:
2962 optname = SO_PRIORITY;
2963 break;
2964 #ifdef SO_BSDCOMPAT
2965 case TARGET_SO_BSDCOMPAT:
2966 optname = SO_BSDCOMPAT;
2967 break;
2968 #endif
2969 case TARGET_SO_PASSCRED:
2970 optname = SO_PASSCRED;
2971 break;
2972 case TARGET_SO_PASSSEC:
2973 optname = SO_PASSSEC;
2974 break;
2975 case TARGET_SO_TIMESTAMP:
2976 optname = SO_TIMESTAMP;
2977 break;
2978 case TARGET_SO_RCVLOWAT:
2979 optname = SO_RCVLOWAT;
2980 break;
2981 break;
2982 default:
2983 goto unimplemented;
2985 if (optlen < sizeof(uint32_t))
2986 return -TARGET_EINVAL;
2988 if (get_user_u32(val, optval_addr))
2989 return -TARGET_EFAULT;
2990 ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname, &val, sizeof(val)));
2991 break;
2992 default:
2993 unimplemented:
2994 gemu_log("Unsupported setsockopt level=%d optname=%d\n", level, optname);
2995 ret = -TARGET_ENOPROTOOPT;
2997 return ret;
3000 /* do_getsockopt() Must return target values and target errnos. */
3001 static abi_long do_getsockopt(int sockfd, int level, int optname,
3002 abi_ulong optval_addr, abi_ulong optlen)
3004 abi_long ret;
3005 int len, val;
3006 socklen_t lv;
3008 switch(level) {
3009 case TARGET_SOL_SOCKET:
3010 level = SOL_SOCKET;
3011 switch (optname) {
3012 /* These don't just return a single integer */
3013 case TARGET_SO_LINGER:
3014 case TARGET_SO_RCVTIMEO:
3015 case TARGET_SO_SNDTIMEO:
3016 case TARGET_SO_PEERNAME:
3017 goto unimplemented;
3018 case TARGET_SO_PEERCRED: {
3019 struct ucred cr;
3020 socklen_t crlen;
3021 struct target_ucred *tcr;
3023 if (get_user_u32(len, optlen)) {
3024 return -TARGET_EFAULT;
3026 if (len < 0) {
3027 return -TARGET_EINVAL;
3030 crlen = sizeof(cr);
3031 ret = get_errno(getsockopt(sockfd, level, SO_PEERCRED,
3032 &cr, &crlen));
3033 if (ret < 0) {
3034 return ret;
3036 if (len > crlen) {
3037 len = crlen;
3039 if (!lock_user_struct(VERIFY_WRITE, tcr, optval_addr, 0)) {
3040 return -TARGET_EFAULT;
3042 __put_user(cr.pid, &tcr->pid);
3043 __put_user(cr.uid, &tcr->uid);
3044 __put_user(cr.gid, &tcr->gid);
3045 unlock_user_struct(tcr, optval_addr, 1);
3046 if (put_user_u32(len, optlen)) {
3047 return -TARGET_EFAULT;
3049 break;
3051 /* Options with 'int' argument. */
3052 case TARGET_SO_DEBUG:
3053 optname = SO_DEBUG;
3054 goto int_case;
3055 case TARGET_SO_REUSEADDR:
3056 optname = SO_REUSEADDR;
3057 goto int_case;
3058 case TARGET_SO_TYPE:
3059 optname = SO_TYPE;
3060 goto int_case;
3061 case TARGET_SO_ERROR:
3062 optname = SO_ERROR;
3063 goto int_case;
3064 case TARGET_SO_DONTROUTE:
3065 optname = SO_DONTROUTE;
3066 goto int_case;
3067 case TARGET_SO_BROADCAST:
3068 optname = SO_BROADCAST;
3069 goto int_case;
3070 case TARGET_SO_SNDBUF:
3071 optname = SO_SNDBUF;
3072 goto int_case;
3073 case TARGET_SO_RCVBUF:
3074 optname = SO_RCVBUF;
3075 goto int_case;
3076 case TARGET_SO_KEEPALIVE:
3077 optname = SO_KEEPALIVE;
3078 goto int_case;
3079 case TARGET_SO_OOBINLINE:
3080 optname = SO_OOBINLINE;
3081 goto int_case;
3082 case TARGET_SO_NO_CHECK:
3083 optname = SO_NO_CHECK;
3084 goto int_case;
3085 case TARGET_SO_PRIORITY:
3086 optname = SO_PRIORITY;
3087 goto int_case;
3088 #ifdef SO_BSDCOMPAT
3089 case TARGET_SO_BSDCOMPAT:
3090 optname = SO_BSDCOMPAT;
3091 goto int_case;
3092 #endif
3093 case TARGET_SO_PASSCRED:
3094 optname = SO_PASSCRED;
3095 goto int_case;
3096 case TARGET_SO_TIMESTAMP:
3097 optname = SO_TIMESTAMP;
3098 goto int_case;
3099 case TARGET_SO_RCVLOWAT:
3100 optname = SO_RCVLOWAT;
3101 goto int_case;
3102 case TARGET_SO_ACCEPTCONN:
3103 optname = SO_ACCEPTCONN;
3104 goto int_case;
3105 default:
3106 goto int_case;
3108 break;
3109 case SOL_TCP:
3110 /* TCP options all take an 'int' value. */
3111 int_case:
3112 if (get_user_u32(len, optlen))
3113 return -TARGET_EFAULT;
3114 if (len < 0)
3115 return -TARGET_EINVAL;
3116 lv = sizeof(lv);
3117 ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
3118 if (ret < 0)
3119 return ret;
3120 if (optname == SO_TYPE) {
3121 val = host_to_target_sock_type(val);
3123 if (len > lv)
3124 len = lv;
3125 if (len == 4) {
3126 if (put_user_u32(val, optval_addr))
3127 return -TARGET_EFAULT;
3128 } else {
3129 if (put_user_u8(val, optval_addr))
3130 return -TARGET_EFAULT;
3132 if (put_user_u32(len, optlen))
3133 return -TARGET_EFAULT;
3134 break;
3135 case SOL_IP:
3136 switch(optname) {
3137 case IP_TOS:
3138 case IP_TTL:
3139 case IP_HDRINCL:
3140 case IP_ROUTER_ALERT:
3141 case IP_RECVOPTS:
3142 case IP_RETOPTS:
3143 case IP_PKTINFO:
3144 case IP_MTU_DISCOVER:
3145 case IP_RECVERR:
3146 case IP_RECVTOS:
3147 #ifdef IP_FREEBIND
3148 case IP_FREEBIND:
3149 #endif
3150 case IP_MULTICAST_TTL:
3151 case IP_MULTICAST_LOOP:
3152 if (get_user_u32(len, optlen))
3153 return -TARGET_EFAULT;
3154 if (len < 0)
3155 return -TARGET_EINVAL;
3156 lv = sizeof(lv);
3157 ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
3158 if (ret < 0)
3159 return ret;
3160 if (len < sizeof(int) && len > 0 && val >= 0 && val < 255) {
3161 len = 1;
3162 if (put_user_u32(len, optlen)
3163 || put_user_u8(val, optval_addr))
3164 return -TARGET_EFAULT;
3165 } else {
3166 if (len > sizeof(int))
3167 len = sizeof(int);
3168 if (put_user_u32(len, optlen)
3169 || put_user_u32(val, optval_addr))
3170 return -TARGET_EFAULT;
3172 break;
3173 default:
3174 ret = -TARGET_ENOPROTOOPT;
3175 break;
3177 break;
3178 default:
3179 unimplemented:
3180 gemu_log("getsockopt level=%d optname=%d not yet supported\n",
3181 level, optname);
3182 ret = -TARGET_EOPNOTSUPP;
3183 break;
3185 return ret;
3188 static struct iovec *lock_iovec(int type, abi_ulong target_addr,
3189 abi_ulong count, int copy)
3191 struct target_iovec *target_vec;
3192 struct iovec *vec;
3193 abi_ulong total_len, max_len;
3194 int i;
3195 int err = 0;
3196 bool bad_address = false;
3198 if (count == 0) {
3199 errno = 0;
3200 return NULL;
3202 if (count > IOV_MAX) {
3203 errno = EINVAL;
3204 return NULL;
3207 vec = g_try_new0(struct iovec, count);
3208 if (vec == NULL) {
3209 errno = ENOMEM;
3210 return NULL;
3213 target_vec = lock_user(VERIFY_READ, target_addr,
3214 count * sizeof(struct target_iovec), 1);
3215 if (target_vec == NULL) {
3216 err = EFAULT;
3217 goto fail2;
3220 /* ??? If host page size > target page size, this will result in a
3221 value larger than what we can actually support. */
3222 max_len = 0x7fffffff & TARGET_PAGE_MASK;
3223 total_len = 0;
3225 for (i = 0; i < count; i++) {
3226 abi_ulong base = tswapal(target_vec[i].iov_base);
3227 abi_long len = tswapal(target_vec[i].iov_len);
3229 if (len < 0) {
3230 err = EINVAL;
3231 goto fail;
3232 } else if (len == 0) {
3233 /* Zero length pointer is ignored. */
3234 vec[i].iov_base = 0;
3235 } else {
3236 vec[i].iov_base = lock_user(type, base, len, copy);
3237 /* If the first buffer pointer is bad, this is a fault. But
3238 * subsequent bad buffers will result in a partial write; this
3239 * is realized by filling the vector with null pointers and
3240 * zero lengths. */
3241 if (!vec[i].iov_base) {
3242 if (i == 0) {
3243 err = EFAULT;
3244 goto fail;
3245 } else {
3246 bad_address = true;
3249 if (bad_address) {
3250 len = 0;
3252 if (len > max_len - total_len) {
3253 len = max_len - total_len;
3256 vec[i].iov_len = len;
3257 total_len += len;
3260 unlock_user(target_vec, target_addr, 0);
3261 return vec;
3263 fail:
3264 while (--i >= 0) {
3265 if (tswapal(target_vec[i].iov_len) > 0) {
3266 unlock_user(vec[i].iov_base, tswapal(target_vec[i].iov_base), 0);
3269 unlock_user(target_vec, target_addr, 0);
3270 fail2:
3271 g_free(vec);
3272 errno = err;
3273 return NULL;
3276 static void unlock_iovec(struct iovec *vec, abi_ulong target_addr,
3277 abi_ulong count, int copy)
3279 struct target_iovec *target_vec;
3280 int i;
3282 target_vec = lock_user(VERIFY_READ, target_addr,
3283 count * sizeof(struct target_iovec), 1);
3284 if (target_vec) {
3285 for (i = 0; i < count; i++) {
3286 abi_ulong base = tswapal(target_vec[i].iov_base);
3287 abi_long len = tswapal(target_vec[i].iov_len);
3288 if (len < 0) {
3289 break;
3291 unlock_user(vec[i].iov_base, base, copy ? vec[i].iov_len : 0);
3293 unlock_user(target_vec, target_addr, 0);
3296 g_free(vec);
3299 static inline int target_to_host_sock_type(int *type)
3301 int host_type = 0;
3302 int target_type = *type;
3304 switch (target_type & TARGET_SOCK_TYPE_MASK) {
3305 case TARGET_SOCK_DGRAM:
3306 host_type = SOCK_DGRAM;
3307 break;
3308 case TARGET_SOCK_STREAM:
3309 host_type = SOCK_STREAM;
3310 break;
3311 default:
3312 host_type = target_type & TARGET_SOCK_TYPE_MASK;
3313 break;
3315 if (target_type & TARGET_SOCK_CLOEXEC) {
3316 #if defined(SOCK_CLOEXEC)
3317 host_type |= SOCK_CLOEXEC;
3318 #else
3319 return -TARGET_EINVAL;
3320 #endif
3322 if (target_type & TARGET_SOCK_NONBLOCK) {
3323 #if defined(SOCK_NONBLOCK)
3324 host_type |= SOCK_NONBLOCK;
3325 #elif !defined(O_NONBLOCK)
3326 return -TARGET_EINVAL;
3327 #endif
3329 *type = host_type;
3330 return 0;
3333 /* Try to emulate socket type flags after socket creation. */
3334 static int sock_flags_fixup(int fd, int target_type)
3336 #if !defined(SOCK_NONBLOCK) && defined(O_NONBLOCK)
3337 if (target_type & TARGET_SOCK_NONBLOCK) {
3338 int flags = fcntl(fd, F_GETFL);
3339 if (fcntl(fd, F_SETFL, O_NONBLOCK | flags) == -1) {
3340 close(fd);
3341 return -TARGET_EINVAL;
3344 #endif
3345 return fd;
3348 static abi_long packet_target_to_host_sockaddr(void *host_addr,
3349 abi_ulong target_addr,
3350 socklen_t len)
3352 struct sockaddr *addr = host_addr;
3353 struct target_sockaddr *target_saddr;
3355 target_saddr = lock_user(VERIFY_READ, target_addr, len, 1);
3356 if (!target_saddr) {
3357 return -TARGET_EFAULT;
3360 memcpy(addr, target_saddr, len);
3361 addr->sa_family = tswap16(target_saddr->sa_family);
3362 /* spkt_protocol is big-endian */
3364 unlock_user(target_saddr, target_addr, 0);
3365 return 0;
3368 static TargetFdTrans target_packet_trans = {
3369 .target_to_host_addr = packet_target_to_host_sockaddr,
3372 #ifdef CONFIG_RTNETLINK
3373 static abi_long netlink_route_target_to_host(void *buf, size_t len)
3375 abi_long ret;
3377 ret = target_to_host_nlmsg_route(buf, len);
3378 if (ret < 0) {
3379 return ret;
3382 return len;
3385 static abi_long netlink_route_host_to_target(void *buf, size_t len)
3387 abi_long ret;
3389 ret = host_to_target_nlmsg_route(buf, len);
3390 if (ret < 0) {
3391 return ret;
3394 return len;
3397 static TargetFdTrans target_netlink_route_trans = {
3398 .target_to_host_data = netlink_route_target_to_host,
3399 .host_to_target_data = netlink_route_host_to_target,
3401 #endif /* CONFIG_RTNETLINK */
3403 static abi_long netlink_audit_target_to_host(void *buf, size_t len)
3405 abi_long ret;
3407 ret = target_to_host_nlmsg_audit(buf, len);
3408 if (ret < 0) {
3409 return ret;
3412 return len;
3415 static abi_long netlink_audit_host_to_target(void *buf, size_t len)
3417 abi_long ret;
3419 ret = host_to_target_nlmsg_audit(buf, len);
3420 if (ret < 0) {
3421 return ret;
3424 return len;
3427 static TargetFdTrans target_netlink_audit_trans = {
3428 .target_to_host_data = netlink_audit_target_to_host,
3429 .host_to_target_data = netlink_audit_host_to_target,
3432 /* do_socket() Must return target values and target errnos. */
3433 static abi_long do_socket(int domain, int type, int protocol)
3435 int target_type = type;
3436 int ret;
3438 ret = target_to_host_sock_type(&type);
3439 if (ret) {
3440 return ret;
3443 if (domain == PF_NETLINK && !(
3444 #ifdef CONFIG_RTNETLINK
3445 protocol == NETLINK_ROUTE ||
3446 #endif
3447 protocol == NETLINK_KOBJECT_UEVENT ||
3448 protocol == NETLINK_AUDIT)) {
3449 return -EPFNOSUPPORT;
3452 if (domain == AF_PACKET ||
3453 (domain == AF_INET && type == SOCK_PACKET)) {
3454 protocol = tswap16(protocol);
3457 ret = get_errno(socket(domain, type, protocol));
3458 if (ret >= 0) {
3459 ret = sock_flags_fixup(ret, target_type);
3460 if (type == SOCK_PACKET) {
3461 /* Manage an obsolete case :
3462 * if socket type is SOCK_PACKET, bind by name
3464 fd_trans_register(ret, &target_packet_trans);
3465 } else if (domain == PF_NETLINK) {
3466 switch (protocol) {
3467 #ifdef CONFIG_RTNETLINK
3468 case NETLINK_ROUTE:
3469 fd_trans_register(ret, &target_netlink_route_trans);
3470 break;
3471 #endif
3472 case NETLINK_KOBJECT_UEVENT:
3473 /* nothing to do: messages are strings */
3474 break;
3475 case NETLINK_AUDIT:
3476 fd_trans_register(ret, &target_netlink_audit_trans);
3477 break;
3478 default:
3479 g_assert_not_reached();
3483 return ret;
3486 /* do_bind() Must return target values and target errnos. */
3487 static abi_long do_bind(int sockfd, abi_ulong target_addr,
3488 socklen_t addrlen)
3490 void *addr;
3491 abi_long ret;
3493 if ((int)addrlen < 0) {
3494 return -TARGET_EINVAL;
3497 addr = alloca(addrlen+1);
3499 ret = target_to_host_sockaddr(sockfd, addr, target_addr, addrlen);
3500 if (ret)
3501 return ret;
3503 return get_errno(bind(sockfd, addr, addrlen));
3506 /* do_connect() Must return target values and target errnos. */
3507 static abi_long do_connect(int sockfd, abi_ulong target_addr,
3508 socklen_t addrlen)
3510 void *addr;
3511 abi_long ret;
3513 if ((int)addrlen < 0) {
3514 return -TARGET_EINVAL;
3517 addr = alloca(addrlen+1);
3519 ret = target_to_host_sockaddr(sockfd, addr, target_addr, addrlen);
3520 if (ret)
3521 return ret;
3523 return get_errno(safe_connect(sockfd, addr, addrlen));
3526 /* do_sendrecvmsg_locked() Must return target values and target errnos. */
3527 static abi_long do_sendrecvmsg_locked(int fd, struct target_msghdr *msgp,
3528 int flags, int send)
3530 abi_long ret, len;
3531 struct msghdr msg;
3532 abi_ulong count;
3533 struct iovec *vec;
3534 abi_ulong target_vec;
3536 if (msgp->msg_name) {
3537 msg.msg_namelen = tswap32(msgp->msg_namelen);
3538 msg.msg_name = alloca(msg.msg_namelen+1);
3539 ret = target_to_host_sockaddr(fd, msg.msg_name,
3540 tswapal(msgp->msg_name),
3541 msg.msg_namelen);
3542 if (ret == -TARGET_EFAULT) {
3543 /* For connected sockets msg_name and msg_namelen must
3544 * be ignored, so returning EFAULT immediately is wrong.
3545 * Instead, pass a bad msg_name to the host kernel, and
3546 * let it decide whether to return EFAULT or not.
3548 msg.msg_name = (void *)-1;
3549 } else if (ret) {
3550 goto out2;
3552 } else {
3553 msg.msg_name = NULL;
3554 msg.msg_namelen = 0;
3556 msg.msg_controllen = 2 * tswapal(msgp->msg_controllen);
3557 msg.msg_control = alloca(msg.msg_controllen);
3558 msg.msg_flags = tswap32(msgp->msg_flags);
3560 count = tswapal(msgp->msg_iovlen);
3561 target_vec = tswapal(msgp->msg_iov);
3563 if (count > IOV_MAX) {
3564 /* sendrcvmsg returns a different errno for this condition than
3565 * readv/writev, so we must catch it here before lock_iovec() does.
3567 ret = -TARGET_EMSGSIZE;
3568 goto out2;
3571 vec = lock_iovec(send ? VERIFY_READ : VERIFY_WRITE,
3572 target_vec, count, send);
3573 if (vec == NULL) {
3574 ret = -host_to_target_errno(errno);
3575 goto out2;
3577 msg.msg_iovlen = count;
3578 msg.msg_iov = vec;
3580 if (send) {
3581 if (fd_trans_target_to_host_data(fd)) {
3582 void *host_msg;
3584 host_msg = g_malloc(msg.msg_iov->iov_len);
3585 memcpy(host_msg, msg.msg_iov->iov_base, msg.msg_iov->iov_len);
3586 ret = fd_trans_target_to_host_data(fd)(host_msg,
3587 msg.msg_iov->iov_len);
3588 if (ret >= 0) {
3589 msg.msg_iov->iov_base = host_msg;
3590 ret = get_errno(safe_sendmsg(fd, &msg, flags));
3592 g_free(host_msg);
3593 } else {
3594 ret = target_to_host_cmsg(&msg, msgp);
3595 if (ret == 0) {
3596 ret = get_errno(safe_sendmsg(fd, &msg, flags));
3599 } else {
3600 ret = get_errno(safe_recvmsg(fd, &msg, flags));
3601 if (!is_error(ret)) {
3602 len = ret;
3603 if (fd_trans_host_to_target_data(fd)) {
3604 ret = fd_trans_host_to_target_data(fd)(msg.msg_iov->iov_base,
3605 len);
3606 } else {
3607 ret = host_to_target_cmsg(msgp, &msg);
3609 if (!is_error(ret)) {
3610 msgp->msg_namelen = tswap32(msg.msg_namelen);
3611 if (msg.msg_name != NULL && msg.msg_name != (void *)-1) {
3612 ret = host_to_target_sockaddr(tswapal(msgp->msg_name),
3613 msg.msg_name, msg.msg_namelen);
3614 if (ret) {
3615 goto out;
3619 ret = len;
3624 out:
3625 unlock_iovec(vec, target_vec, count, !send);
3626 out2:
3627 return ret;
3630 static abi_long do_sendrecvmsg(int fd, abi_ulong target_msg,
3631 int flags, int send)
3633 abi_long ret;
3634 struct target_msghdr *msgp;
3636 if (!lock_user_struct(send ? VERIFY_READ : VERIFY_WRITE,
3637 msgp,
3638 target_msg,
3639 send ? 1 : 0)) {
3640 return -TARGET_EFAULT;
3642 ret = do_sendrecvmsg_locked(fd, msgp, flags, send);
3643 unlock_user_struct(msgp, target_msg, send ? 0 : 1);
3644 return ret;
3647 /* We don't rely on the C library to have sendmmsg/recvmmsg support,
3648 * so it might not have this *mmsg-specific flag either.
3650 #ifndef MSG_WAITFORONE
3651 #define MSG_WAITFORONE 0x10000
3652 #endif
3654 static abi_long do_sendrecvmmsg(int fd, abi_ulong target_msgvec,
3655 unsigned int vlen, unsigned int flags,
3656 int send)
3658 struct target_mmsghdr *mmsgp;
3659 abi_long ret = 0;
3660 int i;
3662 if (vlen > UIO_MAXIOV) {
3663 vlen = UIO_MAXIOV;
3666 mmsgp = lock_user(VERIFY_WRITE, target_msgvec, sizeof(*mmsgp) * vlen, 1);
3667 if (!mmsgp) {
3668 return -TARGET_EFAULT;
3671 for (i = 0; i < vlen; i++) {
3672 ret = do_sendrecvmsg_locked(fd, &mmsgp[i].msg_hdr, flags, send);
3673 if (is_error(ret)) {
3674 break;
3676 mmsgp[i].msg_len = tswap32(ret);
3677 /* MSG_WAITFORONE turns on MSG_DONTWAIT after one packet */
3678 if (flags & MSG_WAITFORONE) {
3679 flags |= MSG_DONTWAIT;
3683 unlock_user(mmsgp, target_msgvec, sizeof(*mmsgp) * i);
3685 /* Return number of datagrams sent if we sent any at all;
3686 * otherwise return the error.
3688 if (i) {
3689 return i;
3691 return ret;
3694 /* do_accept4() Must return target values and target errnos. */
3695 static abi_long do_accept4(int fd, abi_ulong target_addr,
3696 abi_ulong target_addrlen_addr, int flags)
3698 socklen_t addrlen;
3699 void *addr;
3700 abi_long ret;
3701 int host_flags;
3703 host_flags = target_to_host_bitmask(flags, fcntl_flags_tbl);
3705 if (target_addr == 0) {
3706 return get_errno(safe_accept4(fd, NULL, NULL, host_flags));
3709 /* linux returns EINVAL if addrlen pointer is invalid */
3710 if (get_user_u32(addrlen, target_addrlen_addr))
3711 return -TARGET_EINVAL;
3713 if ((int)addrlen < 0) {
3714 return -TARGET_EINVAL;
3717 if (!access_ok(VERIFY_WRITE, target_addr, addrlen))
3718 return -TARGET_EINVAL;
3720 addr = alloca(addrlen);
3722 ret = get_errno(safe_accept4(fd, addr, &addrlen, host_flags));
3723 if (!is_error(ret)) {
3724 host_to_target_sockaddr(target_addr, addr, addrlen);
3725 if (put_user_u32(addrlen, target_addrlen_addr))
3726 ret = -TARGET_EFAULT;
3728 return ret;
3731 /* do_getpeername() Must return target values and target errnos. */
3732 static abi_long do_getpeername(int fd, abi_ulong target_addr,
3733 abi_ulong target_addrlen_addr)
3735 socklen_t addrlen;
3736 void *addr;
3737 abi_long ret;
3739 if (get_user_u32(addrlen, target_addrlen_addr))
3740 return -TARGET_EFAULT;
3742 if ((int)addrlen < 0) {
3743 return -TARGET_EINVAL;
3746 if (!access_ok(VERIFY_WRITE, target_addr, addrlen))
3747 return -TARGET_EFAULT;
3749 addr = alloca(addrlen);
3751 ret = get_errno(getpeername(fd, addr, &addrlen));
3752 if (!is_error(ret)) {
3753 host_to_target_sockaddr(target_addr, addr, addrlen);
3754 if (put_user_u32(addrlen, target_addrlen_addr))
3755 ret = -TARGET_EFAULT;
3757 return ret;
3760 /* do_getsockname() Must return target values and target errnos. */
3761 static abi_long do_getsockname(int fd, abi_ulong target_addr,
3762 abi_ulong target_addrlen_addr)
3764 socklen_t addrlen;
3765 void *addr;
3766 abi_long ret;
3768 if (get_user_u32(addrlen, target_addrlen_addr))
3769 return -TARGET_EFAULT;
3771 if ((int)addrlen < 0) {
3772 return -TARGET_EINVAL;
3775 if (!access_ok(VERIFY_WRITE, target_addr, addrlen))
3776 return -TARGET_EFAULT;
3778 addr = alloca(addrlen);
3780 ret = get_errno(getsockname(fd, addr, &addrlen));
3781 if (!is_error(ret)) {
3782 host_to_target_sockaddr(target_addr, addr, addrlen);
3783 if (put_user_u32(addrlen, target_addrlen_addr))
3784 ret = -TARGET_EFAULT;
3786 return ret;
3789 /* do_socketpair() Must return target values and target errnos. */
3790 static abi_long do_socketpair(int domain, int type, int protocol,
3791 abi_ulong target_tab_addr)
3793 int tab[2];
3794 abi_long ret;
3796 target_to_host_sock_type(&type);
3798 ret = get_errno(socketpair(domain, type, protocol, tab));
3799 if (!is_error(ret)) {
3800 if (put_user_s32(tab[0], target_tab_addr)
3801 || put_user_s32(tab[1], target_tab_addr + sizeof(tab[0])))
3802 ret = -TARGET_EFAULT;
3804 return ret;
3807 /* do_sendto() Must return target values and target errnos. */
3808 static abi_long do_sendto(int fd, abi_ulong msg, size_t len, int flags,
3809 abi_ulong target_addr, socklen_t addrlen)
3811 void *addr;
3812 void *host_msg;
3813 void *copy_msg = NULL;
3814 abi_long ret;
3816 if ((int)addrlen < 0) {
3817 return -TARGET_EINVAL;
3820 host_msg = lock_user(VERIFY_READ, msg, len, 1);
3821 if (!host_msg)
3822 return -TARGET_EFAULT;
3823 if (fd_trans_target_to_host_data(fd)) {
3824 copy_msg = host_msg;
3825 host_msg = g_malloc(len);
3826 memcpy(host_msg, copy_msg, len);
3827 ret = fd_trans_target_to_host_data(fd)(host_msg, len);
3828 if (ret < 0) {
3829 goto fail;
3832 if (target_addr) {
3833 addr = alloca(addrlen+1);
3834 ret = target_to_host_sockaddr(fd, addr, target_addr, addrlen);
3835 if (ret) {
3836 goto fail;
3838 ret = get_errno(safe_sendto(fd, host_msg, len, flags, addr, addrlen));
3839 } else {
3840 ret = get_errno(safe_sendto(fd, host_msg, len, flags, NULL, 0));
3842 fail:
3843 if (copy_msg) {
3844 g_free(host_msg);
3845 host_msg = copy_msg;
3847 unlock_user(host_msg, msg, 0);
3848 return ret;
3851 /* do_recvfrom() Must return target values and target errnos. */
3852 static abi_long do_recvfrom(int fd, abi_ulong msg, size_t len, int flags,
3853 abi_ulong target_addr,
3854 abi_ulong target_addrlen)
3856 socklen_t addrlen;
3857 void *addr;
3858 void *host_msg;
3859 abi_long ret;
3861 host_msg = lock_user(VERIFY_WRITE, msg, len, 0);
3862 if (!host_msg)
3863 return -TARGET_EFAULT;
3864 if (target_addr) {
3865 if (get_user_u32(addrlen, target_addrlen)) {
3866 ret = -TARGET_EFAULT;
3867 goto fail;
3869 if ((int)addrlen < 0) {
3870 ret = -TARGET_EINVAL;
3871 goto fail;
3873 addr = alloca(addrlen);
3874 ret = get_errno(safe_recvfrom(fd, host_msg, len, flags,
3875 addr, &addrlen));
3876 } else {
3877 addr = NULL; /* To keep compiler quiet. */
3878 ret = get_errno(safe_recvfrom(fd, host_msg, len, flags, NULL, 0));
3880 if (!is_error(ret)) {
3881 if (fd_trans_host_to_target_data(fd)) {
3882 ret = fd_trans_host_to_target_data(fd)(host_msg, ret);
3884 if (target_addr) {
3885 host_to_target_sockaddr(target_addr, addr, addrlen);
3886 if (put_user_u32(addrlen, target_addrlen)) {
3887 ret = -TARGET_EFAULT;
3888 goto fail;
3891 unlock_user(host_msg, msg, len);
3892 } else {
3893 fail:
3894 unlock_user(host_msg, msg, 0);
3896 return ret;
3899 #ifdef TARGET_NR_socketcall
3900 /* do_socketcall() Must return target values and target errnos. */
3901 static abi_long do_socketcall(int num, abi_ulong vptr)
3903 static const unsigned ac[] = { /* number of arguments per call */
3904 [SOCKOP_socket] = 3, /* domain, type, protocol */
3905 [SOCKOP_bind] = 3, /* sockfd, addr, addrlen */
3906 [SOCKOP_connect] = 3, /* sockfd, addr, addrlen */
3907 [SOCKOP_listen] = 2, /* sockfd, backlog */
3908 [SOCKOP_accept] = 3, /* sockfd, addr, addrlen */
3909 [SOCKOP_accept4] = 4, /* sockfd, addr, addrlen, flags */
3910 [SOCKOP_getsockname] = 3, /* sockfd, addr, addrlen */
3911 [SOCKOP_getpeername] = 3, /* sockfd, addr, addrlen */
3912 [SOCKOP_socketpair] = 4, /* domain, type, protocol, tab */
3913 [SOCKOP_send] = 4, /* sockfd, msg, len, flags */
3914 [SOCKOP_recv] = 4, /* sockfd, msg, len, flags */
3915 [SOCKOP_sendto] = 6, /* sockfd, msg, len, flags, addr, addrlen */
3916 [SOCKOP_recvfrom] = 6, /* sockfd, msg, len, flags, addr, addrlen */
3917 [SOCKOP_shutdown] = 2, /* sockfd, how */
3918 [SOCKOP_sendmsg] = 3, /* sockfd, msg, flags */
3919 [SOCKOP_recvmsg] = 3, /* sockfd, msg, flags */
3920 [SOCKOP_sendmmsg] = 4, /* sockfd, msgvec, vlen, flags */
3921 [SOCKOP_recvmmsg] = 4, /* sockfd, msgvec, vlen, flags */
3922 [SOCKOP_setsockopt] = 5, /* sockfd, level, optname, optval, optlen */
3923 [SOCKOP_getsockopt] = 5, /* sockfd, level, optname, optval, optlen */
3925 abi_long a[6]; /* max 6 args */
3927 /* first, collect the arguments in a[] according to ac[] */
3928 if (num >= 0 && num < ARRAY_SIZE(ac)) {
3929 unsigned i;
3930 assert(ARRAY_SIZE(a) >= ac[num]); /* ensure we have space for args */
3931 for (i = 0; i < ac[num]; ++i) {
3932 if (get_user_ual(a[i], vptr + i * sizeof(abi_long)) != 0) {
3933 return -TARGET_EFAULT;
3938 /* now when we have the args, actually handle the call */
3939 switch (num) {
3940 case SOCKOP_socket: /* domain, type, protocol */
3941 return do_socket(a[0], a[1], a[2]);
3942 case SOCKOP_bind: /* sockfd, addr, addrlen */
3943 return do_bind(a[0], a[1], a[2]);
3944 case SOCKOP_connect: /* sockfd, addr, addrlen */
3945 return do_connect(a[0], a[1], a[2]);
3946 case SOCKOP_listen: /* sockfd, backlog */
3947 return get_errno(listen(a[0], a[1]));
3948 case SOCKOP_accept: /* sockfd, addr, addrlen */
3949 return do_accept4(a[0], a[1], a[2], 0);
3950 case SOCKOP_accept4: /* sockfd, addr, addrlen, flags */
3951 return do_accept4(a[0], a[1], a[2], a[3]);
3952 case SOCKOP_getsockname: /* sockfd, addr, addrlen */
3953 return do_getsockname(a[0], a[1], a[2]);
3954 case SOCKOP_getpeername: /* sockfd, addr, addrlen */
3955 return do_getpeername(a[0], a[1], a[2]);
3956 case SOCKOP_socketpair: /* domain, type, protocol, tab */
3957 return do_socketpair(a[0], a[1], a[2], a[3]);
3958 case SOCKOP_send: /* sockfd, msg, len, flags */
3959 return do_sendto(a[0], a[1], a[2], a[3], 0, 0);
3960 case SOCKOP_recv: /* sockfd, msg, len, flags */
3961 return do_recvfrom(a[0], a[1], a[2], a[3], 0, 0);
3962 case SOCKOP_sendto: /* sockfd, msg, len, flags, addr, addrlen */
3963 return do_sendto(a[0], a[1], a[2], a[3], a[4], a[5]);
3964 case SOCKOP_recvfrom: /* sockfd, msg, len, flags, addr, addrlen */
3965 return do_recvfrom(a[0], a[1], a[2], a[3], a[4], a[5]);
3966 case SOCKOP_shutdown: /* sockfd, how */
3967 return get_errno(shutdown(a[0], a[1]));
3968 case SOCKOP_sendmsg: /* sockfd, msg, flags */
3969 return do_sendrecvmsg(a[0], a[1], a[2], 1);
3970 case SOCKOP_recvmsg: /* sockfd, msg, flags */
3971 return do_sendrecvmsg(a[0], a[1], a[2], 0);
3972 case SOCKOP_sendmmsg: /* sockfd, msgvec, vlen, flags */
3973 return do_sendrecvmmsg(a[0], a[1], a[2], a[3], 1);
3974 case SOCKOP_recvmmsg: /* sockfd, msgvec, vlen, flags */
3975 return do_sendrecvmmsg(a[0], a[1], a[2], a[3], 0);
3976 case SOCKOP_setsockopt: /* sockfd, level, optname, optval, optlen */
3977 return do_setsockopt(a[0], a[1], a[2], a[3], a[4]);
3978 case SOCKOP_getsockopt: /* sockfd, level, optname, optval, optlen */
3979 return do_getsockopt(a[0], a[1], a[2], a[3], a[4]);
3980 default:
3981 gemu_log("Unsupported socketcall: %d\n", num);
3982 return -TARGET_ENOSYS;
3985 #endif
3987 #define N_SHM_REGIONS 32
3989 static struct shm_region {
3990 abi_ulong start;
3991 abi_ulong size;
3992 bool in_use;
3993 } shm_regions[N_SHM_REGIONS];
3995 #ifndef TARGET_SEMID64_DS
3996 /* asm-generic version of this struct */
3997 struct target_semid64_ds
3999 struct target_ipc_perm sem_perm;
4000 abi_ulong sem_otime;
4001 #if TARGET_ABI_BITS == 32
4002 abi_ulong __unused1;
4003 #endif
4004 abi_ulong sem_ctime;
4005 #if TARGET_ABI_BITS == 32
4006 abi_ulong __unused2;
4007 #endif
4008 abi_ulong sem_nsems;
4009 abi_ulong __unused3;
4010 abi_ulong __unused4;
4012 #endif
4014 static inline abi_long target_to_host_ipc_perm(struct ipc_perm *host_ip,
4015 abi_ulong target_addr)
4017 struct target_ipc_perm *target_ip;
4018 struct target_semid64_ds *target_sd;
4020 if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
4021 return -TARGET_EFAULT;
4022 target_ip = &(target_sd->sem_perm);
4023 host_ip->__key = tswap32(target_ip->__key);
4024 host_ip->uid = tswap32(target_ip->uid);
4025 host_ip->gid = tswap32(target_ip->gid);
4026 host_ip->cuid = tswap32(target_ip->cuid);
4027 host_ip->cgid = tswap32(target_ip->cgid);
4028 #if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_PPC)
4029 host_ip->mode = tswap32(target_ip->mode);
4030 #else
4031 host_ip->mode = tswap16(target_ip->mode);
4032 #endif
4033 #if defined(TARGET_PPC)
4034 host_ip->__seq = tswap32(target_ip->__seq);
4035 #else
4036 host_ip->__seq = tswap16(target_ip->__seq);
4037 #endif
4038 unlock_user_struct(target_sd, target_addr, 0);
4039 return 0;
4042 static inline abi_long host_to_target_ipc_perm(abi_ulong target_addr,
4043 struct ipc_perm *host_ip)
4045 struct target_ipc_perm *target_ip;
4046 struct target_semid64_ds *target_sd;
4048 if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
4049 return -TARGET_EFAULT;
4050 target_ip = &(target_sd->sem_perm);
4051 target_ip->__key = tswap32(host_ip->__key);
4052 target_ip->uid = tswap32(host_ip->uid);
4053 target_ip->gid = tswap32(host_ip->gid);
4054 target_ip->cuid = tswap32(host_ip->cuid);
4055 target_ip->cgid = tswap32(host_ip->cgid);
4056 #if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_PPC)
4057 target_ip->mode = tswap32(host_ip->mode);
4058 #else
4059 target_ip->mode = tswap16(host_ip->mode);
4060 #endif
4061 #if defined(TARGET_PPC)
4062 target_ip->__seq = tswap32(host_ip->__seq);
4063 #else
4064 target_ip->__seq = tswap16(host_ip->__seq);
4065 #endif
4066 unlock_user_struct(target_sd, target_addr, 1);
4067 return 0;
4070 static inline abi_long target_to_host_semid_ds(struct semid_ds *host_sd,
4071 abi_ulong target_addr)
4073 struct target_semid64_ds *target_sd;
4075 if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
4076 return -TARGET_EFAULT;
4077 if (target_to_host_ipc_perm(&(host_sd->sem_perm),target_addr))
4078 return -TARGET_EFAULT;
4079 host_sd->sem_nsems = tswapal(target_sd->sem_nsems);
4080 host_sd->sem_otime = tswapal(target_sd->sem_otime);
4081 host_sd->sem_ctime = tswapal(target_sd->sem_ctime);
4082 unlock_user_struct(target_sd, target_addr, 0);
4083 return 0;
4086 static inline abi_long host_to_target_semid_ds(abi_ulong target_addr,
4087 struct semid_ds *host_sd)
4089 struct target_semid64_ds *target_sd;
4091 if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
4092 return -TARGET_EFAULT;
4093 if (host_to_target_ipc_perm(target_addr,&(host_sd->sem_perm)))
4094 return -TARGET_EFAULT;
4095 target_sd->sem_nsems = tswapal(host_sd->sem_nsems);
4096 target_sd->sem_otime = tswapal(host_sd->sem_otime);
4097 target_sd->sem_ctime = tswapal(host_sd->sem_ctime);
4098 unlock_user_struct(target_sd, target_addr, 1);
4099 return 0;
4102 struct target_seminfo {
4103 int semmap;
4104 int semmni;
4105 int semmns;
4106 int semmnu;
4107 int semmsl;
4108 int semopm;
4109 int semume;
4110 int semusz;
4111 int semvmx;
4112 int semaem;
4115 static inline abi_long host_to_target_seminfo(abi_ulong target_addr,
4116 struct seminfo *host_seminfo)
4118 struct target_seminfo *target_seminfo;
4119 if (!lock_user_struct(VERIFY_WRITE, target_seminfo, target_addr, 0))
4120 return -TARGET_EFAULT;
4121 __put_user(host_seminfo->semmap, &target_seminfo->semmap);
4122 __put_user(host_seminfo->semmni, &target_seminfo->semmni);
4123 __put_user(host_seminfo->semmns, &target_seminfo->semmns);
4124 __put_user(host_seminfo->semmnu, &target_seminfo->semmnu);
4125 __put_user(host_seminfo->semmsl, &target_seminfo->semmsl);
4126 __put_user(host_seminfo->semopm, &target_seminfo->semopm);
4127 __put_user(host_seminfo->semume, &target_seminfo->semume);
4128 __put_user(host_seminfo->semusz, &target_seminfo->semusz);
4129 __put_user(host_seminfo->semvmx, &target_seminfo->semvmx);
4130 __put_user(host_seminfo->semaem, &target_seminfo->semaem);
4131 unlock_user_struct(target_seminfo, target_addr, 1);
4132 return 0;
4135 union semun {
4136 int val;
4137 struct semid_ds *buf;
4138 unsigned short *array;
4139 struct seminfo *__buf;
4142 union target_semun {
4143 int val;
4144 abi_ulong buf;
4145 abi_ulong array;
4146 abi_ulong __buf;
4149 static inline abi_long target_to_host_semarray(int semid, unsigned short **host_array,
4150 abi_ulong target_addr)
4152 int nsems;
4153 unsigned short *array;
4154 union semun semun;
4155 struct semid_ds semid_ds;
4156 int i, ret;
4158 semun.buf = &semid_ds;
4160 ret = semctl(semid, 0, IPC_STAT, semun);
4161 if (ret == -1)
4162 return get_errno(ret);
4164 nsems = semid_ds.sem_nsems;
4166 *host_array = g_try_new(unsigned short, nsems);
4167 if (!*host_array) {
4168 return -TARGET_ENOMEM;
4170 array = lock_user(VERIFY_READ, target_addr,
4171 nsems*sizeof(unsigned short), 1);
4172 if (!array) {
4173 g_free(*host_array);
4174 return -TARGET_EFAULT;
4177 for(i=0; i<nsems; i++) {
4178 __get_user((*host_array)[i], &array[i]);
4180 unlock_user(array, target_addr, 0);
4182 return 0;
4185 static inline abi_long host_to_target_semarray(int semid, abi_ulong target_addr,
4186 unsigned short **host_array)
4188 int nsems;
4189 unsigned short *array;
4190 union semun semun;
4191 struct semid_ds semid_ds;
4192 int i, ret;
4194 semun.buf = &semid_ds;
4196 ret = semctl(semid, 0, IPC_STAT, semun);
4197 if (ret == -1)
4198 return get_errno(ret);
4200 nsems = semid_ds.sem_nsems;
4202 array = lock_user(VERIFY_WRITE, target_addr,
4203 nsems*sizeof(unsigned short), 0);
4204 if (!array)
4205 return -TARGET_EFAULT;
4207 for(i=0; i<nsems; i++) {
4208 __put_user((*host_array)[i], &array[i]);
4210 g_free(*host_array);
4211 unlock_user(array, target_addr, 1);
4213 return 0;
4216 static inline abi_long do_semctl(int semid, int semnum, int cmd,
4217 abi_ulong target_arg)
4219 union target_semun target_su = { .buf = target_arg };
4220 union semun arg;
4221 struct semid_ds dsarg;
4222 unsigned short *array = NULL;
4223 struct seminfo seminfo;
4224 abi_long ret = -TARGET_EINVAL;
4225 abi_long err;
4226 cmd &= 0xff;
4228 switch( cmd ) {
4229 case GETVAL:
4230 case SETVAL:
4231 /* In 64 bit cross-endian situations, we will erroneously pick up
4232 * the wrong half of the union for the "val" element. To rectify
4233 * this, the entire 8-byte structure is byteswapped, followed by
4234 * a swap of the 4 byte val field. In other cases, the data is
4235 * already in proper host byte order. */
4236 if (sizeof(target_su.val) != (sizeof(target_su.buf))) {
4237 target_su.buf = tswapal(target_su.buf);
4238 arg.val = tswap32(target_su.val);
4239 } else {
4240 arg.val = target_su.val;
4242 ret = get_errno(semctl(semid, semnum, cmd, arg));
4243 break;
4244 case GETALL:
4245 case SETALL:
4246 err = target_to_host_semarray(semid, &array, target_su.array);
4247 if (err)
4248 return err;
4249 arg.array = array;
4250 ret = get_errno(semctl(semid, semnum, cmd, arg));
4251 err = host_to_target_semarray(semid, target_su.array, &array);
4252 if (err)
4253 return err;
4254 break;
4255 case IPC_STAT:
4256 case IPC_SET:
4257 case SEM_STAT:
4258 err = target_to_host_semid_ds(&dsarg, target_su.buf);
4259 if (err)
4260 return err;
4261 arg.buf = &dsarg;
4262 ret = get_errno(semctl(semid, semnum, cmd, arg));
4263 err = host_to_target_semid_ds(target_su.buf, &dsarg);
4264 if (err)
4265 return err;
4266 break;
4267 case IPC_INFO:
4268 case SEM_INFO:
4269 arg.__buf = &seminfo;
4270 ret = get_errno(semctl(semid, semnum, cmd, arg));
4271 err = host_to_target_seminfo(target_su.__buf, &seminfo);
4272 if (err)
4273 return err;
4274 break;
4275 case IPC_RMID:
4276 case GETPID:
4277 case GETNCNT:
4278 case GETZCNT:
4279 ret = get_errno(semctl(semid, semnum, cmd, NULL));
4280 break;
4283 return ret;
4286 struct target_sembuf {
4287 unsigned short sem_num;
4288 short sem_op;
4289 short sem_flg;
4292 static inline abi_long target_to_host_sembuf(struct sembuf *host_sembuf,
4293 abi_ulong target_addr,
4294 unsigned nsops)
4296 struct target_sembuf *target_sembuf;
4297 int i;
4299 target_sembuf = lock_user(VERIFY_READ, target_addr,
4300 nsops*sizeof(struct target_sembuf), 1);
4301 if (!target_sembuf)
4302 return -TARGET_EFAULT;
4304 for(i=0; i<nsops; i++) {
4305 __get_user(host_sembuf[i].sem_num, &target_sembuf[i].sem_num);
4306 __get_user(host_sembuf[i].sem_op, &target_sembuf[i].sem_op);
4307 __get_user(host_sembuf[i].sem_flg, &target_sembuf[i].sem_flg);
4310 unlock_user(target_sembuf, target_addr, 0);
4312 return 0;
4315 static inline abi_long do_semop(int semid, abi_long ptr, unsigned nsops)
4317 struct sembuf sops[nsops];
4319 if (target_to_host_sembuf(sops, ptr, nsops))
4320 return -TARGET_EFAULT;
4322 return get_errno(safe_semtimedop(semid, sops, nsops, NULL));
4325 struct target_msqid_ds
4327 struct target_ipc_perm msg_perm;
4328 abi_ulong msg_stime;
4329 #if TARGET_ABI_BITS == 32
4330 abi_ulong __unused1;
4331 #endif
4332 abi_ulong msg_rtime;
4333 #if TARGET_ABI_BITS == 32
4334 abi_ulong __unused2;
4335 #endif
4336 abi_ulong msg_ctime;
4337 #if TARGET_ABI_BITS == 32
4338 abi_ulong __unused3;
4339 #endif
4340 abi_ulong __msg_cbytes;
4341 abi_ulong msg_qnum;
4342 abi_ulong msg_qbytes;
4343 abi_ulong msg_lspid;
4344 abi_ulong msg_lrpid;
4345 abi_ulong __unused4;
4346 abi_ulong __unused5;
4349 static inline abi_long target_to_host_msqid_ds(struct msqid_ds *host_md,
4350 abi_ulong target_addr)
4352 struct target_msqid_ds *target_md;
4354 if (!lock_user_struct(VERIFY_READ, target_md, target_addr, 1))
4355 return -TARGET_EFAULT;
4356 if (target_to_host_ipc_perm(&(host_md->msg_perm),target_addr))
4357 return -TARGET_EFAULT;
4358 host_md->msg_stime = tswapal(target_md->msg_stime);
4359 host_md->msg_rtime = tswapal(target_md->msg_rtime);
4360 host_md->msg_ctime = tswapal(target_md->msg_ctime);
4361 host_md->__msg_cbytes = tswapal(target_md->__msg_cbytes);
4362 host_md->msg_qnum = tswapal(target_md->msg_qnum);
4363 host_md->msg_qbytes = tswapal(target_md->msg_qbytes);
4364 host_md->msg_lspid = tswapal(target_md->msg_lspid);
4365 host_md->msg_lrpid = tswapal(target_md->msg_lrpid);
4366 unlock_user_struct(target_md, target_addr, 0);
4367 return 0;
4370 static inline abi_long host_to_target_msqid_ds(abi_ulong target_addr,
4371 struct msqid_ds *host_md)
4373 struct target_msqid_ds *target_md;
4375 if (!lock_user_struct(VERIFY_WRITE, target_md, target_addr, 0))
4376 return -TARGET_EFAULT;
4377 if (host_to_target_ipc_perm(target_addr,&(host_md->msg_perm)))
4378 return -TARGET_EFAULT;
4379 target_md->msg_stime = tswapal(host_md->msg_stime);
4380 target_md->msg_rtime = tswapal(host_md->msg_rtime);
4381 target_md->msg_ctime = tswapal(host_md->msg_ctime);
4382 target_md->__msg_cbytes = tswapal(host_md->__msg_cbytes);
4383 target_md->msg_qnum = tswapal(host_md->msg_qnum);
4384 target_md->msg_qbytes = tswapal(host_md->msg_qbytes);
4385 target_md->msg_lspid = tswapal(host_md->msg_lspid);
4386 target_md->msg_lrpid = tswapal(host_md->msg_lrpid);
4387 unlock_user_struct(target_md, target_addr, 1);
4388 return 0;
4391 struct target_msginfo {
4392 int msgpool;
4393 int msgmap;
4394 int msgmax;
4395 int msgmnb;
4396 int msgmni;
4397 int msgssz;
4398 int msgtql;
4399 unsigned short int msgseg;
4402 static inline abi_long host_to_target_msginfo(abi_ulong target_addr,
4403 struct msginfo *host_msginfo)
4405 struct target_msginfo *target_msginfo;
4406 if (!lock_user_struct(VERIFY_WRITE, target_msginfo, target_addr, 0))
4407 return -TARGET_EFAULT;
4408 __put_user(host_msginfo->msgpool, &target_msginfo->msgpool);
4409 __put_user(host_msginfo->msgmap, &target_msginfo->msgmap);
4410 __put_user(host_msginfo->msgmax, &target_msginfo->msgmax);
4411 __put_user(host_msginfo->msgmnb, &target_msginfo->msgmnb);
4412 __put_user(host_msginfo->msgmni, &target_msginfo->msgmni);
4413 __put_user(host_msginfo->msgssz, &target_msginfo->msgssz);
4414 __put_user(host_msginfo->msgtql, &target_msginfo->msgtql);
4415 __put_user(host_msginfo->msgseg, &target_msginfo->msgseg);
4416 unlock_user_struct(target_msginfo, target_addr, 1);
4417 return 0;
4420 static inline abi_long do_msgctl(int msgid, int cmd, abi_long ptr)
4422 struct msqid_ds dsarg;
4423 struct msginfo msginfo;
4424 abi_long ret = -TARGET_EINVAL;
4426 cmd &= 0xff;
4428 switch (cmd) {
4429 case IPC_STAT:
4430 case IPC_SET:
4431 case MSG_STAT:
4432 if (target_to_host_msqid_ds(&dsarg,ptr))
4433 return -TARGET_EFAULT;
4434 ret = get_errno(msgctl(msgid, cmd, &dsarg));
4435 if (host_to_target_msqid_ds(ptr,&dsarg))
4436 return -TARGET_EFAULT;
4437 break;
4438 case IPC_RMID:
4439 ret = get_errno(msgctl(msgid, cmd, NULL));
4440 break;
4441 case IPC_INFO:
4442 case MSG_INFO:
4443 ret = get_errno(msgctl(msgid, cmd, (struct msqid_ds *)&msginfo));
4444 if (host_to_target_msginfo(ptr, &msginfo))
4445 return -TARGET_EFAULT;
4446 break;
4449 return ret;
4452 struct target_msgbuf {
4453 abi_long mtype;
4454 char mtext[1];
4457 static inline abi_long do_msgsnd(int msqid, abi_long msgp,
4458 ssize_t msgsz, int msgflg)
4460 struct target_msgbuf *target_mb;
4461 struct msgbuf *host_mb;
4462 abi_long ret = 0;
4464 if (msgsz < 0) {
4465 return -TARGET_EINVAL;
4468 if (!lock_user_struct(VERIFY_READ, target_mb, msgp, 0))
4469 return -TARGET_EFAULT;
4470 host_mb = g_try_malloc(msgsz + sizeof(long));
4471 if (!host_mb) {
4472 unlock_user_struct(target_mb, msgp, 0);
4473 return -TARGET_ENOMEM;
4475 host_mb->mtype = (abi_long) tswapal(target_mb->mtype);
4476 memcpy(host_mb->mtext, target_mb->mtext, msgsz);
4477 ret = get_errno(safe_msgsnd(msqid, host_mb, msgsz, msgflg));
4478 g_free(host_mb);
4479 unlock_user_struct(target_mb, msgp, 0);
4481 return ret;
4484 static inline abi_long do_msgrcv(int msqid, abi_long msgp,
4485 ssize_t msgsz, abi_long msgtyp,
4486 int msgflg)
4488 struct target_msgbuf *target_mb;
4489 char *target_mtext;
4490 struct msgbuf *host_mb;
4491 abi_long ret = 0;
4493 if (msgsz < 0) {
4494 return -TARGET_EINVAL;
4497 if (!lock_user_struct(VERIFY_WRITE, target_mb, msgp, 0))
4498 return -TARGET_EFAULT;
4500 host_mb = g_try_malloc(msgsz + sizeof(long));
4501 if (!host_mb) {
4502 ret = -TARGET_ENOMEM;
4503 goto end;
4505 ret = get_errno(safe_msgrcv(msqid, host_mb, msgsz, msgtyp, msgflg));
4507 if (ret > 0) {
4508 abi_ulong target_mtext_addr = msgp + sizeof(abi_ulong);
4509 target_mtext = lock_user(VERIFY_WRITE, target_mtext_addr, ret, 0);
4510 if (!target_mtext) {
4511 ret = -TARGET_EFAULT;
4512 goto end;
4514 memcpy(target_mb->mtext, host_mb->mtext, ret);
4515 unlock_user(target_mtext, target_mtext_addr, ret);
4518 target_mb->mtype = tswapal(host_mb->mtype);
4520 end:
4521 if (target_mb)
4522 unlock_user_struct(target_mb, msgp, 1);
4523 g_free(host_mb);
4524 return ret;
4527 static inline abi_long target_to_host_shmid_ds(struct shmid_ds *host_sd,
4528 abi_ulong target_addr)
4530 struct target_shmid_ds *target_sd;
4532 if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
4533 return -TARGET_EFAULT;
4534 if (target_to_host_ipc_perm(&(host_sd->shm_perm), target_addr))
4535 return -TARGET_EFAULT;
4536 __get_user(host_sd->shm_segsz, &target_sd->shm_segsz);
4537 __get_user(host_sd->shm_atime, &target_sd->shm_atime);
4538 __get_user(host_sd->shm_dtime, &target_sd->shm_dtime);
4539 __get_user(host_sd->shm_ctime, &target_sd->shm_ctime);
4540 __get_user(host_sd->shm_cpid, &target_sd->shm_cpid);
4541 __get_user(host_sd->shm_lpid, &target_sd->shm_lpid);
4542 __get_user(host_sd->shm_nattch, &target_sd->shm_nattch);
4543 unlock_user_struct(target_sd, target_addr, 0);
4544 return 0;
4547 static inline abi_long host_to_target_shmid_ds(abi_ulong target_addr,
4548 struct shmid_ds *host_sd)
4550 struct target_shmid_ds *target_sd;
4552 if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
4553 return -TARGET_EFAULT;
4554 if (host_to_target_ipc_perm(target_addr, &(host_sd->shm_perm)))
4555 return -TARGET_EFAULT;
4556 __put_user(host_sd->shm_segsz, &target_sd->shm_segsz);
4557 __put_user(host_sd->shm_atime, &target_sd->shm_atime);
4558 __put_user(host_sd->shm_dtime, &target_sd->shm_dtime);
4559 __put_user(host_sd->shm_ctime, &target_sd->shm_ctime);
4560 __put_user(host_sd->shm_cpid, &target_sd->shm_cpid);
4561 __put_user(host_sd->shm_lpid, &target_sd->shm_lpid);
4562 __put_user(host_sd->shm_nattch, &target_sd->shm_nattch);
4563 unlock_user_struct(target_sd, target_addr, 1);
4564 return 0;
4567 struct target_shminfo {
4568 abi_ulong shmmax;
4569 abi_ulong shmmin;
4570 abi_ulong shmmni;
4571 abi_ulong shmseg;
4572 abi_ulong shmall;
4575 static inline abi_long host_to_target_shminfo(abi_ulong target_addr,
4576 struct shminfo *host_shminfo)
4578 struct target_shminfo *target_shminfo;
4579 if (!lock_user_struct(VERIFY_WRITE, target_shminfo, target_addr, 0))
4580 return -TARGET_EFAULT;
4581 __put_user(host_shminfo->shmmax, &target_shminfo->shmmax);
4582 __put_user(host_shminfo->shmmin, &target_shminfo->shmmin);
4583 __put_user(host_shminfo->shmmni, &target_shminfo->shmmni);
4584 __put_user(host_shminfo->shmseg, &target_shminfo->shmseg);
4585 __put_user(host_shminfo->shmall, &target_shminfo->shmall);
4586 unlock_user_struct(target_shminfo, target_addr, 1);
4587 return 0;
4590 struct target_shm_info {
4591 int used_ids;
4592 abi_ulong shm_tot;
4593 abi_ulong shm_rss;
4594 abi_ulong shm_swp;
4595 abi_ulong swap_attempts;
4596 abi_ulong swap_successes;
4599 static inline abi_long host_to_target_shm_info(abi_ulong target_addr,
4600 struct shm_info *host_shm_info)
4602 struct target_shm_info *target_shm_info;
4603 if (!lock_user_struct(VERIFY_WRITE, target_shm_info, target_addr, 0))
4604 return -TARGET_EFAULT;
4605 __put_user(host_shm_info->used_ids, &target_shm_info->used_ids);
4606 __put_user(host_shm_info->shm_tot, &target_shm_info->shm_tot);
4607 __put_user(host_shm_info->shm_rss, &target_shm_info->shm_rss);
4608 __put_user(host_shm_info->shm_swp, &target_shm_info->shm_swp);
4609 __put_user(host_shm_info->swap_attempts, &target_shm_info->swap_attempts);
4610 __put_user(host_shm_info->swap_successes, &target_shm_info->swap_successes);
4611 unlock_user_struct(target_shm_info, target_addr, 1);
4612 return 0;
4615 static inline abi_long do_shmctl(int shmid, int cmd, abi_long buf)
4617 struct shmid_ds dsarg;
4618 struct shminfo shminfo;
4619 struct shm_info shm_info;
4620 abi_long ret = -TARGET_EINVAL;
4622 cmd &= 0xff;
4624 switch(cmd) {
4625 case IPC_STAT:
4626 case IPC_SET:
4627 case SHM_STAT:
4628 if (target_to_host_shmid_ds(&dsarg, buf))
4629 return -TARGET_EFAULT;
4630 ret = get_errno(shmctl(shmid, cmd, &dsarg));
4631 if (host_to_target_shmid_ds(buf, &dsarg))
4632 return -TARGET_EFAULT;
4633 break;
4634 case IPC_INFO:
4635 ret = get_errno(shmctl(shmid, cmd, (struct shmid_ds *)&shminfo));
4636 if (host_to_target_shminfo(buf, &shminfo))
4637 return -TARGET_EFAULT;
4638 break;
4639 case SHM_INFO:
4640 ret = get_errno(shmctl(shmid, cmd, (struct shmid_ds *)&shm_info));
4641 if (host_to_target_shm_info(buf, &shm_info))
4642 return -TARGET_EFAULT;
4643 break;
4644 case IPC_RMID:
4645 case SHM_LOCK:
4646 case SHM_UNLOCK:
4647 ret = get_errno(shmctl(shmid, cmd, NULL));
4648 break;
4651 return ret;
4654 #ifndef TARGET_FORCE_SHMLBA
4655 /* For most architectures, SHMLBA is the same as the page size;
4656 * some architectures have larger values, in which case they should
4657 * define TARGET_FORCE_SHMLBA and provide a target_shmlba() function.
4658 * This corresponds to the kernel arch code defining __ARCH_FORCE_SHMLBA
4659 * and defining its own value for SHMLBA.
4661 * The kernel also permits SHMLBA to be set by the architecture to a
4662 * value larger than the page size without setting __ARCH_FORCE_SHMLBA;
4663 * this means that addresses are rounded to the large size if
4664 * SHM_RND is set but addresses not aligned to that size are not rejected
4665 * as long as they are at least page-aligned. Since the only architecture
4666 * which uses this is ia64 this code doesn't provide for that oddity.
4668 static inline abi_ulong target_shmlba(CPUArchState *cpu_env)
4670 return TARGET_PAGE_SIZE;
4672 #endif
4674 static inline abi_ulong do_shmat(CPUArchState *cpu_env,
4675 int shmid, abi_ulong shmaddr, int shmflg)
4677 abi_long raddr;
4678 void *host_raddr;
4679 struct shmid_ds shm_info;
4680 int i,ret;
4681 abi_ulong shmlba;
4683 /* find out the length of the shared memory segment */
4684 ret = get_errno(shmctl(shmid, IPC_STAT, &shm_info));
4685 if (is_error(ret)) {
4686 /* can't get length, bail out */
4687 return ret;
4690 shmlba = target_shmlba(cpu_env);
4692 if (shmaddr & (shmlba - 1)) {
4693 if (shmflg & SHM_RND) {
4694 shmaddr &= ~(shmlba - 1);
4695 } else {
4696 return -TARGET_EINVAL;
4700 mmap_lock();
4702 if (shmaddr)
4703 host_raddr = shmat(shmid, (void *)g2h(shmaddr), shmflg);
4704 else {
4705 abi_ulong mmap_start;
4707 mmap_start = mmap_find_vma(0, shm_info.shm_segsz);
4709 if (mmap_start == -1) {
4710 errno = ENOMEM;
4711 host_raddr = (void *)-1;
4712 } else
4713 host_raddr = shmat(shmid, g2h(mmap_start), shmflg | SHM_REMAP);
4716 if (host_raddr == (void *)-1) {
4717 mmap_unlock();
4718 return get_errno((long)host_raddr);
4720 raddr=h2g((unsigned long)host_raddr);
4722 page_set_flags(raddr, raddr + shm_info.shm_segsz,
4723 PAGE_VALID | PAGE_READ |
4724 ((shmflg & SHM_RDONLY)? 0 : PAGE_WRITE));
4726 for (i = 0; i < N_SHM_REGIONS; i++) {
4727 if (!shm_regions[i].in_use) {
4728 shm_regions[i].in_use = true;
4729 shm_regions[i].start = raddr;
4730 shm_regions[i].size = shm_info.shm_segsz;
4731 break;
4735 mmap_unlock();
4736 return raddr;
4740 static inline abi_long do_shmdt(abi_ulong shmaddr)
4742 int i;
4744 for (i = 0; i < N_SHM_REGIONS; ++i) {
4745 if (shm_regions[i].in_use && shm_regions[i].start == shmaddr) {
4746 shm_regions[i].in_use = false;
4747 page_set_flags(shmaddr, shmaddr + shm_regions[i].size, 0);
4748 break;
4752 return get_errno(shmdt(g2h(shmaddr)));
4755 #ifdef TARGET_NR_ipc
4756 /* ??? This only works with linear mappings. */
4757 /* do_ipc() must return target values and target errnos. */
4758 static abi_long do_ipc(CPUArchState *cpu_env,
4759 unsigned int call, abi_long first,
4760 abi_long second, abi_long third,
4761 abi_long ptr, abi_long fifth)
4763 int version;
4764 abi_long ret = 0;
4766 version = call >> 16;
4767 call &= 0xffff;
4769 switch (call) {
4770 case IPCOP_semop:
4771 ret = do_semop(first, ptr, second);
4772 break;
4774 case IPCOP_semget:
4775 ret = get_errno(semget(first, second, third));
4776 break;
4778 case IPCOP_semctl: {
4779 /* The semun argument to semctl is passed by value, so dereference the
4780 * ptr argument. */
4781 abi_ulong atptr;
4782 get_user_ual(atptr, ptr);
4783 ret = do_semctl(first, second, third, atptr);
4784 break;
4787 case IPCOP_msgget:
4788 ret = get_errno(msgget(first, second));
4789 break;
4791 case IPCOP_msgsnd:
4792 ret = do_msgsnd(first, ptr, second, third);
4793 break;
4795 case IPCOP_msgctl:
4796 ret = do_msgctl(first, second, ptr);
4797 break;
4799 case IPCOP_msgrcv:
4800 switch (version) {
4801 case 0:
4803 struct target_ipc_kludge {
4804 abi_long msgp;
4805 abi_long msgtyp;
4806 } *tmp;
4808 if (!lock_user_struct(VERIFY_READ, tmp, ptr, 1)) {
4809 ret = -TARGET_EFAULT;
4810 break;
4813 ret = do_msgrcv(first, tswapal(tmp->msgp), second, tswapal(tmp->msgtyp), third);
4815 unlock_user_struct(tmp, ptr, 0);
4816 break;
4818 default:
4819 ret = do_msgrcv(first, ptr, second, fifth, third);
4821 break;
4823 case IPCOP_shmat:
4824 switch (version) {
4825 default:
4827 abi_ulong raddr;
4828 raddr = do_shmat(cpu_env, first, ptr, second);
4829 if (is_error(raddr))
4830 return get_errno(raddr);
4831 if (put_user_ual(raddr, third))
4832 return -TARGET_EFAULT;
4833 break;
4835 case 1:
4836 ret = -TARGET_EINVAL;
4837 break;
4839 break;
4840 case IPCOP_shmdt:
4841 ret = do_shmdt(ptr);
4842 break;
4844 case IPCOP_shmget:
4845 /* IPC_* flag values are the same on all linux platforms */
4846 ret = get_errno(shmget(first, second, third));
4847 break;
4849 /* IPC_* and SHM_* command values are the same on all linux platforms */
4850 case IPCOP_shmctl:
4851 ret = do_shmctl(first, second, ptr);
4852 break;
4853 default:
4854 gemu_log("Unsupported ipc call: %d (version %d)\n", call, version);
4855 ret = -TARGET_ENOSYS;
4856 break;
4858 return ret;
4860 #endif
4862 /* kernel structure types definitions */
4864 #define STRUCT(name, ...) STRUCT_ ## name,
4865 #define STRUCT_SPECIAL(name) STRUCT_ ## name,
4866 enum {
4867 #include "syscall_types.h"
4868 STRUCT_MAX
4870 #undef STRUCT
4871 #undef STRUCT_SPECIAL
4873 #define STRUCT(name, ...) static const argtype struct_ ## name ## _def[] = { __VA_ARGS__, TYPE_NULL };
4874 #define STRUCT_SPECIAL(name)
4875 #include "syscall_types.h"
4876 #undef STRUCT
4877 #undef STRUCT_SPECIAL
4879 typedef struct IOCTLEntry IOCTLEntry;
4881 typedef abi_long do_ioctl_fn(const IOCTLEntry *ie, uint8_t *buf_temp,
4882 int fd, int cmd, abi_long arg);
4884 struct IOCTLEntry {
4885 int target_cmd;
4886 unsigned int host_cmd;
4887 const char *name;
4888 int access;
4889 do_ioctl_fn *do_ioctl;
4890 const argtype arg_type[5];
4893 #define IOC_R 0x0001
4894 #define IOC_W 0x0002
4895 #define IOC_RW (IOC_R | IOC_W)
4897 #define MAX_STRUCT_SIZE 4096
4899 #ifdef CONFIG_FIEMAP
4900 /* So fiemap access checks don't overflow on 32 bit systems.
4901 * This is very slightly smaller than the limit imposed by
4902 * the underlying kernel.
4904 #define FIEMAP_MAX_EXTENTS ((UINT_MAX - sizeof(struct fiemap)) \
4905 / sizeof(struct fiemap_extent))
4907 static abi_long do_ioctl_fs_ioc_fiemap(const IOCTLEntry *ie, uint8_t *buf_temp,
4908 int fd, int cmd, abi_long arg)
4910 /* The parameter for this ioctl is a struct fiemap followed
4911 * by an array of struct fiemap_extent whose size is set
4912 * in fiemap->fm_extent_count. The array is filled in by the
4913 * ioctl.
4915 int target_size_in, target_size_out;
4916 struct fiemap *fm;
4917 const argtype *arg_type = ie->arg_type;
4918 const argtype extent_arg_type[] = { MK_STRUCT(STRUCT_fiemap_extent) };
4919 void *argptr, *p;
4920 abi_long ret;
4921 int i, extent_size = thunk_type_size(extent_arg_type, 0);
4922 uint32_t outbufsz;
4923 int free_fm = 0;
4925 assert(arg_type[0] == TYPE_PTR);
4926 assert(ie->access == IOC_RW);
4927 arg_type++;
4928 target_size_in = thunk_type_size(arg_type, 0);
4929 argptr = lock_user(VERIFY_READ, arg, target_size_in, 1);
4930 if (!argptr) {
4931 return -TARGET_EFAULT;
4933 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
4934 unlock_user(argptr, arg, 0);
4935 fm = (struct fiemap *)buf_temp;
4936 if (fm->fm_extent_count > FIEMAP_MAX_EXTENTS) {
4937 return -TARGET_EINVAL;
4940 outbufsz = sizeof (*fm) +
4941 (sizeof(struct fiemap_extent) * fm->fm_extent_count);
4943 if (outbufsz > MAX_STRUCT_SIZE) {
4944 /* We can't fit all the extents into the fixed size buffer.
4945 * Allocate one that is large enough and use it instead.
4947 fm = g_try_malloc(outbufsz);
4948 if (!fm) {
4949 return -TARGET_ENOMEM;
4951 memcpy(fm, buf_temp, sizeof(struct fiemap));
4952 free_fm = 1;
4954 ret = get_errno(safe_ioctl(fd, ie->host_cmd, fm));
4955 if (!is_error(ret)) {
4956 target_size_out = target_size_in;
4957 /* An extent_count of 0 means we were only counting the extents
4958 * so there are no structs to copy
4960 if (fm->fm_extent_count != 0) {
4961 target_size_out += fm->fm_mapped_extents * extent_size;
4963 argptr = lock_user(VERIFY_WRITE, arg, target_size_out, 0);
4964 if (!argptr) {
4965 ret = -TARGET_EFAULT;
4966 } else {
4967 /* Convert the struct fiemap */
4968 thunk_convert(argptr, fm, arg_type, THUNK_TARGET);
4969 if (fm->fm_extent_count != 0) {
4970 p = argptr + target_size_in;
4971 /* ...and then all the struct fiemap_extents */
4972 for (i = 0; i < fm->fm_mapped_extents; i++) {
4973 thunk_convert(p, &fm->fm_extents[i], extent_arg_type,
4974 THUNK_TARGET);
4975 p += extent_size;
4978 unlock_user(argptr, arg, target_size_out);
4981 if (free_fm) {
4982 g_free(fm);
4984 return ret;
4986 #endif
4988 static abi_long do_ioctl_ifconf(const IOCTLEntry *ie, uint8_t *buf_temp,
4989 int fd, int cmd, abi_long arg)
4991 const argtype *arg_type = ie->arg_type;
4992 int target_size;
4993 void *argptr;
4994 int ret;
4995 struct ifconf *host_ifconf;
4996 uint32_t outbufsz;
4997 const argtype ifreq_arg_type[] = { MK_STRUCT(STRUCT_sockaddr_ifreq) };
4998 int target_ifreq_size;
4999 int nb_ifreq;
5000 int free_buf = 0;
5001 int i;
5002 int target_ifc_len;
5003 abi_long target_ifc_buf;
5004 int host_ifc_len;
5005 char *host_ifc_buf;
5007 assert(arg_type[0] == TYPE_PTR);
5008 assert(ie->access == IOC_RW);
5010 arg_type++;
5011 target_size = thunk_type_size(arg_type, 0);
5013 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5014 if (!argptr)
5015 return -TARGET_EFAULT;
5016 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
5017 unlock_user(argptr, arg, 0);
5019 host_ifconf = (struct ifconf *)(unsigned long)buf_temp;
5020 target_ifc_len = host_ifconf->ifc_len;
5021 target_ifc_buf = (abi_long)(unsigned long)host_ifconf->ifc_buf;
5023 target_ifreq_size = thunk_type_size(ifreq_arg_type, 0);
5024 nb_ifreq = target_ifc_len / target_ifreq_size;
5025 host_ifc_len = nb_ifreq * sizeof(struct ifreq);
5027 outbufsz = sizeof(*host_ifconf) + host_ifc_len;
5028 if (outbufsz > MAX_STRUCT_SIZE) {
5029 /* We can't fit all the extents into the fixed size buffer.
5030 * Allocate one that is large enough and use it instead.
5032 host_ifconf = malloc(outbufsz);
5033 if (!host_ifconf) {
5034 return -TARGET_ENOMEM;
5036 memcpy(host_ifconf, buf_temp, sizeof(*host_ifconf));
5037 free_buf = 1;
5039 host_ifc_buf = (char*)host_ifconf + sizeof(*host_ifconf);
5041 host_ifconf->ifc_len = host_ifc_len;
5042 host_ifconf->ifc_buf = host_ifc_buf;
5044 ret = get_errno(safe_ioctl(fd, ie->host_cmd, host_ifconf));
5045 if (!is_error(ret)) {
5046 /* convert host ifc_len to target ifc_len */
5048 nb_ifreq = host_ifconf->ifc_len / sizeof(struct ifreq);
5049 target_ifc_len = nb_ifreq * target_ifreq_size;
5050 host_ifconf->ifc_len = target_ifc_len;
5052 /* restore target ifc_buf */
5054 host_ifconf->ifc_buf = (char *)(unsigned long)target_ifc_buf;
5056 /* copy struct ifconf to target user */
5058 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
5059 if (!argptr)
5060 return -TARGET_EFAULT;
5061 thunk_convert(argptr, host_ifconf, arg_type, THUNK_TARGET);
5062 unlock_user(argptr, arg, target_size);
5064 /* copy ifreq[] to target user */
5066 argptr = lock_user(VERIFY_WRITE, target_ifc_buf, target_ifc_len, 0);
5067 for (i = 0; i < nb_ifreq ; i++) {
5068 thunk_convert(argptr + i * target_ifreq_size,
5069 host_ifc_buf + i * sizeof(struct ifreq),
5070 ifreq_arg_type, THUNK_TARGET);
5072 unlock_user(argptr, target_ifc_buf, target_ifc_len);
5075 if (free_buf) {
5076 free(host_ifconf);
5079 return ret;
5082 static abi_long do_ioctl_dm(const IOCTLEntry *ie, uint8_t *buf_temp, int fd,
5083 int cmd, abi_long arg)
5085 void *argptr;
5086 struct dm_ioctl *host_dm;
5087 abi_long guest_data;
5088 uint32_t guest_data_size;
5089 int target_size;
5090 const argtype *arg_type = ie->arg_type;
5091 abi_long ret;
5092 void *big_buf = NULL;
5093 char *host_data;
5095 arg_type++;
5096 target_size = thunk_type_size(arg_type, 0);
5097 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5098 if (!argptr) {
5099 ret = -TARGET_EFAULT;
5100 goto out;
5102 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
5103 unlock_user(argptr, arg, 0);
5105 /* buf_temp is too small, so fetch things into a bigger buffer */
5106 big_buf = g_malloc0(((struct dm_ioctl*)buf_temp)->data_size * 2);
5107 memcpy(big_buf, buf_temp, target_size);
5108 buf_temp = big_buf;
5109 host_dm = big_buf;
5111 guest_data = arg + host_dm->data_start;
5112 if ((guest_data - arg) < 0) {
5113 ret = -TARGET_EINVAL;
5114 goto out;
5116 guest_data_size = host_dm->data_size - host_dm->data_start;
5117 host_data = (char*)host_dm + host_dm->data_start;
5119 argptr = lock_user(VERIFY_READ, guest_data, guest_data_size, 1);
5120 if (!argptr) {
5121 ret = -TARGET_EFAULT;
5122 goto out;
5125 switch (ie->host_cmd) {
5126 case DM_REMOVE_ALL:
5127 case DM_LIST_DEVICES:
5128 case DM_DEV_CREATE:
5129 case DM_DEV_REMOVE:
5130 case DM_DEV_SUSPEND:
5131 case DM_DEV_STATUS:
5132 case DM_DEV_WAIT:
5133 case DM_TABLE_STATUS:
5134 case DM_TABLE_CLEAR:
5135 case DM_TABLE_DEPS:
5136 case DM_LIST_VERSIONS:
5137 /* no input data */
5138 break;
5139 case DM_DEV_RENAME:
5140 case DM_DEV_SET_GEOMETRY:
5141 /* data contains only strings */
5142 memcpy(host_data, argptr, guest_data_size);
5143 break;
5144 case DM_TARGET_MSG:
5145 memcpy(host_data, argptr, guest_data_size);
5146 *(uint64_t*)host_data = tswap64(*(uint64_t*)argptr);
5147 break;
5148 case DM_TABLE_LOAD:
5150 void *gspec = argptr;
5151 void *cur_data = host_data;
5152 const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_spec) };
5153 int spec_size = thunk_type_size(arg_type, 0);
5154 int i;
5156 for (i = 0; i < host_dm->target_count; i++) {
5157 struct dm_target_spec *spec = cur_data;
5158 uint32_t next;
5159 int slen;
5161 thunk_convert(spec, gspec, arg_type, THUNK_HOST);
5162 slen = strlen((char*)gspec + spec_size) + 1;
5163 next = spec->next;
5164 spec->next = sizeof(*spec) + slen;
5165 strcpy((char*)&spec[1], gspec + spec_size);
5166 gspec += next;
5167 cur_data += spec->next;
5169 break;
5171 default:
5172 ret = -TARGET_EINVAL;
5173 unlock_user(argptr, guest_data, 0);
5174 goto out;
5176 unlock_user(argptr, guest_data, 0);
5178 ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5179 if (!is_error(ret)) {
5180 guest_data = arg + host_dm->data_start;
5181 guest_data_size = host_dm->data_size - host_dm->data_start;
5182 argptr = lock_user(VERIFY_WRITE, guest_data, guest_data_size, 0);
5183 switch (ie->host_cmd) {
5184 case DM_REMOVE_ALL:
5185 case DM_DEV_CREATE:
5186 case DM_DEV_REMOVE:
5187 case DM_DEV_RENAME:
5188 case DM_DEV_SUSPEND:
5189 case DM_DEV_STATUS:
5190 case DM_TABLE_LOAD:
5191 case DM_TABLE_CLEAR:
5192 case DM_TARGET_MSG:
5193 case DM_DEV_SET_GEOMETRY:
5194 /* no return data */
5195 break;
5196 case DM_LIST_DEVICES:
5198 struct dm_name_list *nl = (void*)host_dm + host_dm->data_start;
5199 uint32_t remaining_data = guest_data_size;
5200 void *cur_data = argptr;
5201 const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_name_list) };
5202 int nl_size = 12; /* can't use thunk_size due to alignment */
5204 while (1) {
5205 uint32_t next = nl->next;
5206 if (next) {
5207 nl->next = nl_size + (strlen(nl->name) + 1);
5209 if (remaining_data < nl->next) {
5210 host_dm->flags |= DM_BUFFER_FULL_FLAG;
5211 break;
5213 thunk_convert(cur_data, nl, arg_type, THUNK_TARGET);
5214 strcpy(cur_data + nl_size, nl->name);
5215 cur_data += nl->next;
5216 remaining_data -= nl->next;
5217 if (!next) {
5218 break;
5220 nl = (void*)nl + next;
5222 break;
5224 case DM_DEV_WAIT:
5225 case DM_TABLE_STATUS:
5227 struct dm_target_spec *spec = (void*)host_dm + host_dm->data_start;
5228 void *cur_data = argptr;
5229 const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_spec) };
5230 int spec_size = thunk_type_size(arg_type, 0);
5231 int i;
5233 for (i = 0; i < host_dm->target_count; i++) {
5234 uint32_t next = spec->next;
5235 int slen = strlen((char*)&spec[1]) + 1;
5236 spec->next = (cur_data - argptr) + spec_size + slen;
5237 if (guest_data_size < spec->next) {
5238 host_dm->flags |= DM_BUFFER_FULL_FLAG;
5239 break;
5241 thunk_convert(cur_data, spec, arg_type, THUNK_TARGET);
5242 strcpy(cur_data + spec_size, (char*)&spec[1]);
5243 cur_data = argptr + spec->next;
5244 spec = (void*)host_dm + host_dm->data_start + next;
5246 break;
5248 case DM_TABLE_DEPS:
5250 void *hdata = (void*)host_dm + host_dm->data_start;
5251 int count = *(uint32_t*)hdata;
5252 uint64_t *hdev = hdata + 8;
5253 uint64_t *gdev = argptr + 8;
5254 int i;
5256 *(uint32_t*)argptr = tswap32(count);
5257 for (i = 0; i < count; i++) {
5258 *gdev = tswap64(*hdev);
5259 gdev++;
5260 hdev++;
5262 break;
5264 case DM_LIST_VERSIONS:
5266 struct dm_target_versions *vers = (void*)host_dm + host_dm->data_start;
5267 uint32_t remaining_data = guest_data_size;
5268 void *cur_data = argptr;
5269 const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_versions) };
5270 int vers_size = thunk_type_size(arg_type, 0);
5272 while (1) {
5273 uint32_t next = vers->next;
5274 if (next) {
5275 vers->next = vers_size + (strlen(vers->name) + 1);
5277 if (remaining_data < vers->next) {
5278 host_dm->flags |= DM_BUFFER_FULL_FLAG;
5279 break;
5281 thunk_convert(cur_data, vers, arg_type, THUNK_TARGET);
5282 strcpy(cur_data + vers_size, vers->name);
5283 cur_data += vers->next;
5284 remaining_data -= vers->next;
5285 if (!next) {
5286 break;
5288 vers = (void*)vers + next;
5290 break;
5292 default:
5293 unlock_user(argptr, guest_data, 0);
5294 ret = -TARGET_EINVAL;
5295 goto out;
5297 unlock_user(argptr, guest_data, guest_data_size);
5299 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
5300 if (!argptr) {
5301 ret = -TARGET_EFAULT;
5302 goto out;
5304 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
5305 unlock_user(argptr, arg, target_size);
5307 out:
5308 g_free(big_buf);
5309 return ret;
5312 static abi_long do_ioctl_blkpg(const IOCTLEntry *ie, uint8_t *buf_temp, int fd,
5313 int cmd, abi_long arg)
5315 void *argptr;
5316 int target_size;
5317 const argtype *arg_type = ie->arg_type;
5318 const argtype part_arg_type[] = { MK_STRUCT(STRUCT_blkpg_partition) };
5319 abi_long ret;
5321 struct blkpg_ioctl_arg *host_blkpg = (void*)buf_temp;
5322 struct blkpg_partition host_part;
5324 /* Read and convert blkpg */
5325 arg_type++;
5326 target_size = thunk_type_size(arg_type, 0);
5327 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5328 if (!argptr) {
5329 ret = -TARGET_EFAULT;
5330 goto out;
5332 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
5333 unlock_user(argptr, arg, 0);
5335 switch (host_blkpg->op) {
5336 case BLKPG_ADD_PARTITION:
5337 case BLKPG_DEL_PARTITION:
5338 /* payload is struct blkpg_partition */
5339 break;
5340 default:
5341 /* Unknown opcode */
5342 ret = -TARGET_EINVAL;
5343 goto out;
5346 /* Read and convert blkpg->data */
5347 arg = (abi_long)(uintptr_t)host_blkpg->data;
5348 target_size = thunk_type_size(part_arg_type, 0);
5349 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5350 if (!argptr) {
5351 ret = -TARGET_EFAULT;
5352 goto out;
5354 thunk_convert(&host_part, argptr, part_arg_type, THUNK_HOST);
5355 unlock_user(argptr, arg, 0);
5357 /* Swizzle the data pointer to our local copy and call! */
5358 host_blkpg->data = &host_part;
5359 ret = get_errno(safe_ioctl(fd, ie->host_cmd, host_blkpg));
5361 out:
5362 return ret;
5365 static abi_long do_ioctl_rt(const IOCTLEntry *ie, uint8_t *buf_temp,
5366 int fd, int cmd, abi_long arg)
5368 const argtype *arg_type = ie->arg_type;
5369 const StructEntry *se;
5370 const argtype *field_types;
5371 const int *dst_offsets, *src_offsets;
5372 int target_size;
5373 void *argptr;
5374 abi_ulong *target_rt_dev_ptr;
5375 unsigned long *host_rt_dev_ptr;
5376 abi_long ret;
5377 int i;
5379 assert(ie->access == IOC_W);
5380 assert(*arg_type == TYPE_PTR);
5381 arg_type++;
5382 assert(*arg_type == TYPE_STRUCT);
5383 target_size = thunk_type_size(arg_type, 0);
5384 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5385 if (!argptr) {
5386 return -TARGET_EFAULT;
5388 arg_type++;
5389 assert(*arg_type == (int)STRUCT_rtentry);
5390 se = struct_entries + *arg_type++;
5391 assert(se->convert[0] == NULL);
5392 /* convert struct here to be able to catch rt_dev string */
5393 field_types = se->field_types;
5394 dst_offsets = se->field_offsets[THUNK_HOST];
5395 src_offsets = se->field_offsets[THUNK_TARGET];
5396 for (i = 0; i < se->nb_fields; i++) {
5397 if (dst_offsets[i] == offsetof(struct rtentry, rt_dev)) {
5398 assert(*field_types == TYPE_PTRVOID);
5399 target_rt_dev_ptr = (abi_ulong *)(argptr + src_offsets[i]);
5400 host_rt_dev_ptr = (unsigned long *)(buf_temp + dst_offsets[i]);
5401 if (*target_rt_dev_ptr != 0) {
5402 *host_rt_dev_ptr = (unsigned long)lock_user_string(
5403 tswapal(*target_rt_dev_ptr));
5404 if (!*host_rt_dev_ptr) {
5405 unlock_user(argptr, arg, 0);
5406 return -TARGET_EFAULT;
5408 } else {
5409 *host_rt_dev_ptr = 0;
5411 field_types++;
5412 continue;
5414 field_types = thunk_convert(buf_temp + dst_offsets[i],
5415 argptr + src_offsets[i],
5416 field_types, THUNK_HOST);
5418 unlock_user(argptr, arg, 0);
5420 ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5421 if (*host_rt_dev_ptr != 0) {
5422 unlock_user((void *)*host_rt_dev_ptr,
5423 *target_rt_dev_ptr, 0);
5425 return ret;
5428 static abi_long do_ioctl_kdsigaccept(const IOCTLEntry *ie, uint8_t *buf_temp,
5429 int fd, int cmd, abi_long arg)
5431 int sig = target_to_host_signal(arg);
5432 return get_errno(safe_ioctl(fd, ie->host_cmd, sig));
5435 static IOCTLEntry ioctl_entries[] = {
5436 #define IOCTL(cmd, access, ...) \
5437 { TARGET_ ## cmd, cmd, #cmd, access, 0, { __VA_ARGS__ } },
5438 #define IOCTL_SPECIAL(cmd, access, dofn, ...) \
5439 { TARGET_ ## cmd, cmd, #cmd, access, dofn, { __VA_ARGS__ } },
5440 #include "ioctls.h"
5441 { 0, 0, },
5444 /* ??? Implement proper locking for ioctls. */
5445 /* do_ioctl() Must return target values and target errnos. */
5446 static abi_long do_ioctl(int fd, int cmd, abi_long arg)
5448 const IOCTLEntry *ie;
5449 const argtype *arg_type;
5450 abi_long ret;
5451 uint8_t buf_temp[MAX_STRUCT_SIZE];
5452 int target_size;
5453 void *argptr;
5455 ie = ioctl_entries;
5456 for(;;) {
5457 if (ie->target_cmd == 0) {
5458 gemu_log("Unsupported ioctl: cmd=0x%04lx\n", (long)cmd);
5459 return -TARGET_ENOSYS;
5461 if (ie->target_cmd == cmd)
5462 break;
5463 ie++;
5465 arg_type = ie->arg_type;
5466 #if defined(DEBUG)
5467 gemu_log("ioctl: cmd=0x%04lx (%s)\n", (long)cmd, ie->name);
5468 #endif
5469 if (ie->do_ioctl) {
5470 return ie->do_ioctl(ie, buf_temp, fd, cmd, arg);
5473 switch(arg_type[0]) {
5474 case TYPE_NULL:
5475 /* no argument */
5476 ret = get_errno(safe_ioctl(fd, ie->host_cmd));
5477 break;
5478 case TYPE_PTRVOID:
5479 case TYPE_INT:
5480 ret = get_errno(safe_ioctl(fd, ie->host_cmd, arg));
5481 break;
5482 case TYPE_PTR:
5483 arg_type++;
5484 target_size = thunk_type_size(arg_type, 0);
5485 switch(ie->access) {
5486 case IOC_R:
5487 ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5488 if (!is_error(ret)) {
5489 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
5490 if (!argptr)
5491 return -TARGET_EFAULT;
5492 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
5493 unlock_user(argptr, arg, target_size);
5495 break;
5496 case IOC_W:
5497 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5498 if (!argptr)
5499 return -TARGET_EFAULT;
5500 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
5501 unlock_user(argptr, arg, 0);
5502 ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5503 break;
5504 default:
5505 case IOC_RW:
5506 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5507 if (!argptr)
5508 return -TARGET_EFAULT;
5509 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
5510 unlock_user(argptr, arg, 0);
5511 ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5512 if (!is_error(ret)) {
5513 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
5514 if (!argptr)
5515 return -TARGET_EFAULT;
5516 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
5517 unlock_user(argptr, arg, target_size);
5519 break;
5521 break;
5522 default:
5523 gemu_log("Unsupported ioctl type: cmd=0x%04lx type=%d\n",
5524 (long)cmd, arg_type[0]);
5525 ret = -TARGET_ENOSYS;
5526 break;
5528 return ret;
5531 static const bitmask_transtbl iflag_tbl[] = {
5532 { TARGET_IGNBRK, TARGET_IGNBRK, IGNBRK, IGNBRK },
5533 { TARGET_BRKINT, TARGET_BRKINT, BRKINT, BRKINT },
5534 { TARGET_IGNPAR, TARGET_IGNPAR, IGNPAR, IGNPAR },
5535 { TARGET_PARMRK, TARGET_PARMRK, PARMRK, PARMRK },
5536 { TARGET_INPCK, TARGET_INPCK, INPCK, INPCK },
5537 { TARGET_ISTRIP, TARGET_ISTRIP, ISTRIP, ISTRIP },
5538 { TARGET_INLCR, TARGET_INLCR, INLCR, INLCR },
5539 { TARGET_IGNCR, TARGET_IGNCR, IGNCR, IGNCR },
5540 { TARGET_ICRNL, TARGET_ICRNL, ICRNL, ICRNL },
5541 { TARGET_IUCLC, TARGET_IUCLC, IUCLC, IUCLC },
5542 { TARGET_IXON, TARGET_IXON, IXON, IXON },
5543 { TARGET_IXANY, TARGET_IXANY, IXANY, IXANY },
5544 { TARGET_IXOFF, TARGET_IXOFF, IXOFF, IXOFF },
5545 { TARGET_IMAXBEL, TARGET_IMAXBEL, IMAXBEL, IMAXBEL },
5546 { 0, 0, 0, 0 }
5549 static const bitmask_transtbl oflag_tbl[] = {
5550 { TARGET_OPOST, TARGET_OPOST, OPOST, OPOST },
5551 { TARGET_OLCUC, TARGET_OLCUC, OLCUC, OLCUC },
5552 { TARGET_ONLCR, TARGET_ONLCR, ONLCR, ONLCR },
5553 { TARGET_OCRNL, TARGET_OCRNL, OCRNL, OCRNL },
5554 { TARGET_ONOCR, TARGET_ONOCR, ONOCR, ONOCR },
5555 { TARGET_ONLRET, TARGET_ONLRET, ONLRET, ONLRET },
5556 { TARGET_OFILL, TARGET_OFILL, OFILL, OFILL },
5557 { TARGET_OFDEL, TARGET_OFDEL, OFDEL, OFDEL },
5558 { TARGET_NLDLY, TARGET_NL0, NLDLY, NL0 },
5559 { TARGET_NLDLY, TARGET_NL1, NLDLY, NL1 },
5560 { TARGET_CRDLY, TARGET_CR0, CRDLY, CR0 },
5561 { TARGET_CRDLY, TARGET_CR1, CRDLY, CR1 },
5562 { TARGET_CRDLY, TARGET_CR2, CRDLY, CR2 },
5563 { TARGET_CRDLY, TARGET_CR3, CRDLY, CR3 },
5564 { TARGET_TABDLY, TARGET_TAB0, TABDLY, TAB0 },
5565 { TARGET_TABDLY, TARGET_TAB1, TABDLY, TAB1 },
5566 { TARGET_TABDLY, TARGET_TAB2, TABDLY, TAB2 },
5567 { TARGET_TABDLY, TARGET_TAB3, TABDLY, TAB3 },
5568 { TARGET_BSDLY, TARGET_BS0, BSDLY, BS0 },
5569 { TARGET_BSDLY, TARGET_BS1, BSDLY, BS1 },
5570 { TARGET_VTDLY, TARGET_VT0, VTDLY, VT0 },
5571 { TARGET_VTDLY, TARGET_VT1, VTDLY, VT1 },
5572 { TARGET_FFDLY, TARGET_FF0, FFDLY, FF0 },
5573 { TARGET_FFDLY, TARGET_FF1, FFDLY, FF1 },
5574 { 0, 0, 0, 0 }
5577 static const bitmask_transtbl cflag_tbl[] = {
5578 { TARGET_CBAUD, TARGET_B0, CBAUD, B0 },
5579 { TARGET_CBAUD, TARGET_B50, CBAUD, B50 },
5580 { TARGET_CBAUD, TARGET_B75, CBAUD, B75 },
5581 { TARGET_CBAUD, TARGET_B110, CBAUD, B110 },
5582 { TARGET_CBAUD, TARGET_B134, CBAUD, B134 },
5583 { TARGET_CBAUD, TARGET_B150, CBAUD, B150 },
5584 { TARGET_CBAUD, TARGET_B200, CBAUD, B200 },
5585 { TARGET_CBAUD, TARGET_B300, CBAUD, B300 },
5586 { TARGET_CBAUD, TARGET_B600, CBAUD, B600 },
5587 { TARGET_CBAUD, TARGET_B1200, CBAUD, B1200 },
5588 { TARGET_CBAUD, TARGET_B1800, CBAUD, B1800 },
5589 { TARGET_CBAUD, TARGET_B2400, CBAUD, B2400 },
5590 { TARGET_CBAUD, TARGET_B4800, CBAUD, B4800 },
5591 { TARGET_CBAUD, TARGET_B9600, CBAUD, B9600 },
5592 { TARGET_CBAUD, TARGET_B19200, CBAUD, B19200 },
5593 { TARGET_CBAUD, TARGET_B38400, CBAUD, B38400 },
5594 { TARGET_CBAUD, TARGET_B57600, CBAUD, B57600 },
5595 { TARGET_CBAUD, TARGET_B115200, CBAUD, B115200 },
5596 { TARGET_CBAUD, TARGET_B230400, CBAUD, B230400 },
5597 { TARGET_CBAUD, TARGET_B460800, CBAUD, B460800 },
5598 { TARGET_CSIZE, TARGET_CS5, CSIZE, CS5 },
5599 { TARGET_CSIZE, TARGET_CS6, CSIZE, CS6 },
5600 { TARGET_CSIZE, TARGET_CS7, CSIZE, CS7 },
5601 { TARGET_CSIZE, TARGET_CS8, CSIZE, CS8 },
5602 { TARGET_CSTOPB, TARGET_CSTOPB, CSTOPB, CSTOPB },
5603 { TARGET_CREAD, TARGET_CREAD, CREAD, CREAD },
5604 { TARGET_PARENB, TARGET_PARENB, PARENB, PARENB },
5605 { TARGET_PARODD, TARGET_PARODD, PARODD, PARODD },
5606 { TARGET_HUPCL, TARGET_HUPCL, HUPCL, HUPCL },
5607 { TARGET_CLOCAL, TARGET_CLOCAL, CLOCAL, CLOCAL },
5608 { TARGET_CRTSCTS, TARGET_CRTSCTS, CRTSCTS, CRTSCTS },
5609 { 0, 0, 0, 0 }
5612 static const bitmask_transtbl lflag_tbl[] = {
5613 { TARGET_ISIG, TARGET_ISIG, ISIG, ISIG },
5614 { TARGET_ICANON, TARGET_ICANON, ICANON, ICANON },
5615 { TARGET_XCASE, TARGET_XCASE, XCASE, XCASE },
5616 { TARGET_ECHO, TARGET_ECHO, ECHO, ECHO },
5617 { TARGET_ECHOE, TARGET_ECHOE, ECHOE, ECHOE },
5618 { TARGET_ECHOK, TARGET_ECHOK, ECHOK, ECHOK },
5619 { TARGET_ECHONL, TARGET_ECHONL, ECHONL, ECHONL },
5620 { TARGET_NOFLSH, TARGET_NOFLSH, NOFLSH, NOFLSH },
5621 { TARGET_TOSTOP, TARGET_TOSTOP, TOSTOP, TOSTOP },
5622 { TARGET_ECHOCTL, TARGET_ECHOCTL, ECHOCTL, ECHOCTL },
5623 { TARGET_ECHOPRT, TARGET_ECHOPRT, ECHOPRT, ECHOPRT },
5624 { TARGET_ECHOKE, TARGET_ECHOKE, ECHOKE, ECHOKE },
5625 { TARGET_FLUSHO, TARGET_FLUSHO, FLUSHO, FLUSHO },
5626 { TARGET_PENDIN, TARGET_PENDIN, PENDIN, PENDIN },
5627 { TARGET_IEXTEN, TARGET_IEXTEN, IEXTEN, IEXTEN },
5628 { 0, 0, 0, 0 }
5631 static void target_to_host_termios (void *dst, const void *src)
5633 struct host_termios *host = dst;
5634 const struct target_termios *target = src;
5636 host->c_iflag =
5637 target_to_host_bitmask(tswap32(target->c_iflag), iflag_tbl);
5638 host->c_oflag =
5639 target_to_host_bitmask(tswap32(target->c_oflag), oflag_tbl);
5640 host->c_cflag =
5641 target_to_host_bitmask(tswap32(target->c_cflag), cflag_tbl);
5642 host->c_lflag =
5643 target_to_host_bitmask(tswap32(target->c_lflag), lflag_tbl);
5644 host->c_line = target->c_line;
5646 memset(host->c_cc, 0, sizeof(host->c_cc));
5647 host->c_cc[VINTR] = target->c_cc[TARGET_VINTR];
5648 host->c_cc[VQUIT] = target->c_cc[TARGET_VQUIT];
5649 host->c_cc[VERASE] = target->c_cc[TARGET_VERASE];
5650 host->c_cc[VKILL] = target->c_cc[TARGET_VKILL];
5651 host->c_cc[VEOF] = target->c_cc[TARGET_VEOF];
5652 host->c_cc[VTIME] = target->c_cc[TARGET_VTIME];
5653 host->c_cc[VMIN] = target->c_cc[TARGET_VMIN];
5654 host->c_cc[VSWTC] = target->c_cc[TARGET_VSWTC];
5655 host->c_cc[VSTART] = target->c_cc[TARGET_VSTART];
5656 host->c_cc[VSTOP] = target->c_cc[TARGET_VSTOP];
5657 host->c_cc[VSUSP] = target->c_cc[TARGET_VSUSP];
5658 host->c_cc[VEOL] = target->c_cc[TARGET_VEOL];
5659 host->c_cc[VREPRINT] = target->c_cc[TARGET_VREPRINT];
5660 host->c_cc[VDISCARD] = target->c_cc[TARGET_VDISCARD];
5661 host->c_cc[VWERASE] = target->c_cc[TARGET_VWERASE];
5662 host->c_cc[VLNEXT] = target->c_cc[TARGET_VLNEXT];
5663 host->c_cc[VEOL2] = target->c_cc[TARGET_VEOL2];
5666 static void host_to_target_termios (void *dst, const void *src)
5668 struct target_termios *target = dst;
5669 const struct host_termios *host = src;
5671 target->c_iflag =
5672 tswap32(host_to_target_bitmask(host->c_iflag, iflag_tbl));
5673 target->c_oflag =
5674 tswap32(host_to_target_bitmask(host->c_oflag, oflag_tbl));
5675 target->c_cflag =
5676 tswap32(host_to_target_bitmask(host->c_cflag, cflag_tbl));
5677 target->c_lflag =
5678 tswap32(host_to_target_bitmask(host->c_lflag, lflag_tbl));
5679 target->c_line = host->c_line;
5681 memset(target->c_cc, 0, sizeof(target->c_cc));
5682 target->c_cc[TARGET_VINTR] = host->c_cc[VINTR];
5683 target->c_cc[TARGET_VQUIT] = host->c_cc[VQUIT];
5684 target->c_cc[TARGET_VERASE] = host->c_cc[VERASE];
5685 target->c_cc[TARGET_VKILL] = host->c_cc[VKILL];
5686 target->c_cc[TARGET_VEOF] = host->c_cc[VEOF];
5687 target->c_cc[TARGET_VTIME] = host->c_cc[VTIME];
5688 target->c_cc[TARGET_VMIN] = host->c_cc[VMIN];
5689 target->c_cc[TARGET_VSWTC] = host->c_cc[VSWTC];
5690 target->c_cc[TARGET_VSTART] = host->c_cc[VSTART];
5691 target->c_cc[TARGET_VSTOP] = host->c_cc[VSTOP];
5692 target->c_cc[TARGET_VSUSP] = host->c_cc[VSUSP];
5693 target->c_cc[TARGET_VEOL] = host->c_cc[VEOL];
5694 target->c_cc[TARGET_VREPRINT] = host->c_cc[VREPRINT];
5695 target->c_cc[TARGET_VDISCARD] = host->c_cc[VDISCARD];
5696 target->c_cc[TARGET_VWERASE] = host->c_cc[VWERASE];
5697 target->c_cc[TARGET_VLNEXT] = host->c_cc[VLNEXT];
5698 target->c_cc[TARGET_VEOL2] = host->c_cc[VEOL2];
5701 static const StructEntry struct_termios_def = {
5702 .convert = { host_to_target_termios, target_to_host_termios },
5703 .size = { sizeof(struct target_termios), sizeof(struct host_termios) },
5704 .align = { __alignof__(struct target_termios), __alignof__(struct host_termios) },
5707 static bitmask_transtbl mmap_flags_tbl[] = {
5708 { TARGET_MAP_SHARED, TARGET_MAP_SHARED, MAP_SHARED, MAP_SHARED },
5709 { TARGET_MAP_PRIVATE, TARGET_MAP_PRIVATE, MAP_PRIVATE, MAP_PRIVATE },
5710 { TARGET_MAP_FIXED, TARGET_MAP_FIXED, MAP_FIXED, MAP_FIXED },
5711 { TARGET_MAP_ANONYMOUS, TARGET_MAP_ANONYMOUS, MAP_ANONYMOUS, MAP_ANONYMOUS },
5712 { TARGET_MAP_GROWSDOWN, TARGET_MAP_GROWSDOWN, MAP_GROWSDOWN, MAP_GROWSDOWN },
5713 { TARGET_MAP_DENYWRITE, TARGET_MAP_DENYWRITE, MAP_DENYWRITE, MAP_DENYWRITE },
5714 { TARGET_MAP_EXECUTABLE, TARGET_MAP_EXECUTABLE, MAP_EXECUTABLE, MAP_EXECUTABLE },
5715 { TARGET_MAP_LOCKED, TARGET_MAP_LOCKED, MAP_LOCKED, MAP_LOCKED },
5716 { TARGET_MAP_NORESERVE, TARGET_MAP_NORESERVE, MAP_NORESERVE,
5717 MAP_NORESERVE },
5718 { 0, 0, 0, 0 }
5721 #if defined(TARGET_I386)
5723 /* NOTE: there is really one LDT for all the threads */
5724 static uint8_t *ldt_table;
5726 static abi_long read_ldt(abi_ulong ptr, unsigned long bytecount)
5728 int size;
5729 void *p;
5731 if (!ldt_table)
5732 return 0;
5733 size = TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE;
5734 if (size > bytecount)
5735 size = bytecount;
5736 p = lock_user(VERIFY_WRITE, ptr, size, 0);
5737 if (!p)
5738 return -TARGET_EFAULT;
5739 /* ??? Should this by byteswapped? */
5740 memcpy(p, ldt_table, size);
5741 unlock_user(p, ptr, size);
5742 return size;
5745 /* XXX: add locking support */
5746 static abi_long write_ldt(CPUX86State *env,
5747 abi_ulong ptr, unsigned long bytecount, int oldmode)
5749 struct target_modify_ldt_ldt_s ldt_info;
5750 struct target_modify_ldt_ldt_s *target_ldt_info;
5751 int seg_32bit, contents, read_exec_only, limit_in_pages;
5752 int seg_not_present, useable, lm;
5753 uint32_t *lp, entry_1, entry_2;
5755 if (bytecount != sizeof(ldt_info))
5756 return -TARGET_EINVAL;
5757 if (!lock_user_struct(VERIFY_READ, target_ldt_info, ptr, 1))
5758 return -TARGET_EFAULT;
5759 ldt_info.entry_number = tswap32(target_ldt_info->entry_number);
5760 ldt_info.base_addr = tswapal(target_ldt_info->base_addr);
5761 ldt_info.limit = tswap32(target_ldt_info->limit);
5762 ldt_info.flags = tswap32(target_ldt_info->flags);
5763 unlock_user_struct(target_ldt_info, ptr, 0);
5765 if (ldt_info.entry_number >= TARGET_LDT_ENTRIES)
5766 return -TARGET_EINVAL;
5767 seg_32bit = ldt_info.flags & 1;
5768 contents = (ldt_info.flags >> 1) & 3;
5769 read_exec_only = (ldt_info.flags >> 3) & 1;
5770 limit_in_pages = (ldt_info.flags >> 4) & 1;
5771 seg_not_present = (ldt_info.flags >> 5) & 1;
5772 useable = (ldt_info.flags >> 6) & 1;
5773 #ifdef TARGET_ABI32
5774 lm = 0;
5775 #else
5776 lm = (ldt_info.flags >> 7) & 1;
5777 #endif
5778 if (contents == 3) {
5779 if (oldmode)
5780 return -TARGET_EINVAL;
5781 if (seg_not_present == 0)
5782 return -TARGET_EINVAL;
5784 /* allocate the LDT */
5785 if (!ldt_table) {
5786 env->ldt.base = target_mmap(0,
5787 TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE,
5788 PROT_READ|PROT_WRITE,
5789 MAP_ANONYMOUS|MAP_PRIVATE, -1, 0);
5790 if (env->ldt.base == -1)
5791 return -TARGET_ENOMEM;
5792 memset(g2h(env->ldt.base), 0,
5793 TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE);
5794 env->ldt.limit = 0xffff;
5795 ldt_table = g2h(env->ldt.base);
5798 /* NOTE: same code as Linux kernel */
5799 /* Allow LDTs to be cleared by the user. */
5800 if (ldt_info.base_addr == 0 && ldt_info.limit == 0) {
5801 if (oldmode ||
5802 (contents == 0 &&
5803 read_exec_only == 1 &&
5804 seg_32bit == 0 &&
5805 limit_in_pages == 0 &&
5806 seg_not_present == 1 &&
5807 useable == 0 )) {
5808 entry_1 = 0;
5809 entry_2 = 0;
5810 goto install;
5814 entry_1 = ((ldt_info.base_addr & 0x0000ffff) << 16) |
5815 (ldt_info.limit & 0x0ffff);
5816 entry_2 = (ldt_info.base_addr & 0xff000000) |
5817 ((ldt_info.base_addr & 0x00ff0000) >> 16) |
5818 (ldt_info.limit & 0xf0000) |
5819 ((read_exec_only ^ 1) << 9) |
5820 (contents << 10) |
5821 ((seg_not_present ^ 1) << 15) |
5822 (seg_32bit << 22) |
5823 (limit_in_pages << 23) |
5824 (lm << 21) |
5825 0x7000;
5826 if (!oldmode)
5827 entry_2 |= (useable << 20);
5829 /* Install the new entry ... */
5830 install:
5831 lp = (uint32_t *)(ldt_table + (ldt_info.entry_number << 3));
5832 lp[0] = tswap32(entry_1);
5833 lp[1] = tswap32(entry_2);
5834 return 0;
5837 /* specific and weird i386 syscalls */
5838 static abi_long do_modify_ldt(CPUX86State *env, int func, abi_ulong ptr,
5839 unsigned long bytecount)
5841 abi_long ret;
5843 switch (func) {
5844 case 0:
5845 ret = read_ldt(ptr, bytecount);
5846 break;
5847 case 1:
5848 ret = write_ldt(env, ptr, bytecount, 1);
5849 break;
5850 case 0x11:
5851 ret = write_ldt(env, ptr, bytecount, 0);
5852 break;
5853 default:
5854 ret = -TARGET_ENOSYS;
5855 break;
5857 return ret;
5860 #if defined(TARGET_I386) && defined(TARGET_ABI32)
5861 abi_long do_set_thread_area(CPUX86State *env, abi_ulong ptr)
5863 uint64_t *gdt_table = g2h(env->gdt.base);
5864 struct target_modify_ldt_ldt_s ldt_info;
5865 struct target_modify_ldt_ldt_s *target_ldt_info;
5866 int seg_32bit, contents, read_exec_only, limit_in_pages;
5867 int seg_not_present, useable, lm;
5868 uint32_t *lp, entry_1, entry_2;
5869 int i;
5871 lock_user_struct(VERIFY_WRITE, target_ldt_info, ptr, 1);
5872 if (!target_ldt_info)
5873 return -TARGET_EFAULT;
5874 ldt_info.entry_number = tswap32(target_ldt_info->entry_number);
5875 ldt_info.base_addr = tswapal(target_ldt_info->base_addr);
5876 ldt_info.limit = tswap32(target_ldt_info->limit);
5877 ldt_info.flags = tswap32(target_ldt_info->flags);
5878 if (ldt_info.entry_number == -1) {
5879 for (i=TARGET_GDT_ENTRY_TLS_MIN; i<=TARGET_GDT_ENTRY_TLS_MAX; i++) {
5880 if (gdt_table[i] == 0) {
5881 ldt_info.entry_number = i;
5882 target_ldt_info->entry_number = tswap32(i);
5883 break;
5887 unlock_user_struct(target_ldt_info, ptr, 1);
5889 if (ldt_info.entry_number < TARGET_GDT_ENTRY_TLS_MIN ||
5890 ldt_info.entry_number > TARGET_GDT_ENTRY_TLS_MAX)
5891 return -TARGET_EINVAL;
5892 seg_32bit = ldt_info.flags & 1;
5893 contents = (ldt_info.flags >> 1) & 3;
5894 read_exec_only = (ldt_info.flags >> 3) & 1;
5895 limit_in_pages = (ldt_info.flags >> 4) & 1;
5896 seg_not_present = (ldt_info.flags >> 5) & 1;
5897 useable = (ldt_info.flags >> 6) & 1;
5898 #ifdef TARGET_ABI32
5899 lm = 0;
5900 #else
5901 lm = (ldt_info.flags >> 7) & 1;
5902 #endif
5904 if (contents == 3) {
5905 if (seg_not_present == 0)
5906 return -TARGET_EINVAL;
5909 /* NOTE: same code as Linux kernel */
5910 /* Allow LDTs to be cleared by the user. */
5911 if (ldt_info.base_addr == 0 && ldt_info.limit == 0) {
5912 if ((contents == 0 &&
5913 read_exec_only == 1 &&
5914 seg_32bit == 0 &&
5915 limit_in_pages == 0 &&
5916 seg_not_present == 1 &&
5917 useable == 0 )) {
5918 entry_1 = 0;
5919 entry_2 = 0;
5920 goto install;
5924 entry_1 = ((ldt_info.base_addr & 0x0000ffff) << 16) |
5925 (ldt_info.limit & 0x0ffff);
5926 entry_2 = (ldt_info.base_addr & 0xff000000) |
5927 ((ldt_info.base_addr & 0x00ff0000) >> 16) |
5928 (ldt_info.limit & 0xf0000) |
5929 ((read_exec_only ^ 1) << 9) |
5930 (contents << 10) |
5931 ((seg_not_present ^ 1) << 15) |
5932 (seg_32bit << 22) |
5933 (limit_in_pages << 23) |
5934 (useable << 20) |
5935 (lm << 21) |
5936 0x7000;
5938 /* Install the new entry ... */
5939 install:
5940 lp = (uint32_t *)(gdt_table + ldt_info.entry_number);
5941 lp[0] = tswap32(entry_1);
5942 lp[1] = tswap32(entry_2);
5943 return 0;
5946 static abi_long do_get_thread_area(CPUX86State *env, abi_ulong ptr)
5948 struct target_modify_ldt_ldt_s *target_ldt_info;
5949 uint64_t *gdt_table = g2h(env->gdt.base);
5950 uint32_t base_addr, limit, flags;
5951 int seg_32bit, contents, read_exec_only, limit_in_pages, idx;
5952 int seg_not_present, useable, lm;
5953 uint32_t *lp, entry_1, entry_2;
5955 lock_user_struct(VERIFY_WRITE, target_ldt_info, ptr, 1);
5956 if (!target_ldt_info)
5957 return -TARGET_EFAULT;
5958 idx = tswap32(target_ldt_info->entry_number);
5959 if (idx < TARGET_GDT_ENTRY_TLS_MIN ||
5960 idx > TARGET_GDT_ENTRY_TLS_MAX) {
5961 unlock_user_struct(target_ldt_info, ptr, 1);
5962 return -TARGET_EINVAL;
5964 lp = (uint32_t *)(gdt_table + idx);
5965 entry_1 = tswap32(lp[0]);
5966 entry_2 = tswap32(lp[1]);
5968 read_exec_only = ((entry_2 >> 9) & 1) ^ 1;
5969 contents = (entry_2 >> 10) & 3;
5970 seg_not_present = ((entry_2 >> 15) & 1) ^ 1;
5971 seg_32bit = (entry_2 >> 22) & 1;
5972 limit_in_pages = (entry_2 >> 23) & 1;
5973 useable = (entry_2 >> 20) & 1;
5974 #ifdef TARGET_ABI32
5975 lm = 0;
5976 #else
5977 lm = (entry_2 >> 21) & 1;
5978 #endif
5979 flags = (seg_32bit << 0) | (contents << 1) |
5980 (read_exec_only << 3) | (limit_in_pages << 4) |
5981 (seg_not_present << 5) | (useable << 6) | (lm << 7);
5982 limit = (entry_1 & 0xffff) | (entry_2 & 0xf0000);
5983 base_addr = (entry_1 >> 16) |
5984 (entry_2 & 0xff000000) |
5985 ((entry_2 & 0xff) << 16);
5986 target_ldt_info->base_addr = tswapal(base_addr);
5987 target_ldt_info->limit = tswap32(limit);
5988 target_ldt_info->flags = tswap32(flags);
5989 unlock_user_struct(target_ldt_info, ptr, 1);
5990 return 0;
5992 #endif /* TARGET_I386 && TARGET_ABI32 */
5994 #ifndef TARGET_ABI32
5995 abi_long do_arch_prctl(CPUX86State *env, int code, abi_ulong addr)
5997 abi_long ret = 0;
5998 abi_ulong val;
5999 int idx;
6001 switch(code) {
6002 case TARGET_ARCH_SET_GS:
6003 case TARGET_ARCH_SET_FS:
6004 if (code == TARGET_ARCH_SET_GS)
6005 idx = R_GS;
6006 else
6007 idx = R_FS;
6008 cpu_x86_load_seg(env, idx, 0);
6009 env->segs[idx].base = addr;
6010 break;
6011 case TARGET_ARCH_GET_GS:
6012 case TARGET_ARCH_GET_FS:
6013 if (code == TARGET_ARCH_GET_GS)
6014 idx = R_GS;
6015 else
6016 idx = R_FS;
6017 val = env->segs[idx].base;
6018 if (put_user(val, addr, abi_ulong))
6019 ret = -TARGET_EFAULT;
6020 break;
6021 default:
6022 ret = -TARGET_EINVAL;
6023 break;
6025 return ret;
6027 #endif
6029 #endif /* defined(TARGET_I386) */
6031 #define NEW_STACK_SIZE 0x40000
6034 static pthread_mutex_t clone_lock = PTHREAD_MUTEX_INITIALIZER;
6035 typedef struct {
6036 CPUArchState *env;
6037 pthread_mutex_t mutex;
6038 pthread_cond_t cond;
6039 pthread_t thread;
6040 uint32_t tid;
6041 abi_ulong child_tidptr;
6042 abi_ulong parent_tidptr;
6043 sigset_t sigmask;
6044 } new_thread_info;
6046 static void *clone_func(void *arg)
6048 new_thread_info *info = arg;
6049 CPUArchState *env;
6050 CPUState *cpu;
6051 TaskState *ts;
6053 rcu_register_thread();
6054 env = info->env;
6055 cpu = ENV_GET_CPU(env);
6056 thread_cpu = cpu;
6057 ts = (TaskState *)cpu->opaque;
6058 info->tid = gettid();
6059 cpu->host_tid = info->tid;
6060 task_settid(ts);
6061 if (info->child_tidptr)
6062 put_user_u32(info->tid, info->child_tidptr);
6063 if (info->parent_tidptr)
6064 put_user_u32(info->tid, info->parent_tidptr);
6065 /* Enable signals. */
6066 sigprocmask(SIG_SETMASK, &info->sigmask, NULL);
6067 /* Signal to the parent that we're ready. */
6068 pthread_mutex_lock(&info->mutex);
6069 pthread_cond_broadcast(&info->cond);
6070 pthread_mutex_unlock(&info->mutex);
6071 /* Wait until the parent has finshed initializing the tls state. */
6072 pthread_mutex_lock(&clone_lock);
6073 pthread_mutex_unlock(&clone_lock);
6074 cpu_loop(env);
6075 /* never exits */
6076 return NULL;
6079 /* do_fork() Must return host values and target errnos (unlike most
6080 do_*() functions). */
6081 static int do_fork(CPUArchState *env, unsigned int flags, abi_ulong newsp,
6082 abi_ulong parent_tidptr, target_ulong newtls,
6083 abi_ulong child_tidptr)
6085 CPUState *cpu = ENV_GET_CPU(env);
6086 int ret;
6087 TaskState *ts;
6088 CPUState *new_cpu;
6089 CPUArchState *new_env;
6090 sigset_t sigmask;
6092 flags &= ~CLONE_IGNORED_FLAGS;
6094 /* Emulate vfork() with fork() */
6095 if (flags & CLONE_VFORK)
6096 flags &= ~(CLONE_VFORK | CLONE_VM);
6098 if (flags & CLONE_VM) {
6099 TaskState *parent_ts = (TaskState *)cpu->opaque;
6100 new_thread_info info;
6101 pthread_attr_t attr;
6103 if (((flags & CLONE_THREAD_FLAGS) != CLONE_THREAD_FLAGS) ||
6104 (flags & CLONE_INVALID_THREAD_FLAGS)) {
6105 return -TARGET_EINVAL;
6108 ts = g_new0(TaskState, 1);
6109 init_task_state(ts);
6110 /* we create a new CPU instance. */
6111 new_env = cpu_copy(env);
6112 /* Init regs that differ from the parent. */
6113 cpu_clone_regs(new_env, newsp);
6114 new_cpu = ENV_GET_CPU(new_env);
6115 new_cpu->opaque = ts;
6116 ts->bprm = parent_ts->bprm;
6117 ts->info = parent_ts->info;
6118 ts->signal_mask = parent_ts->signal_mask;
6120 if (flags & CLONE_CHILD_CLEARTID) {
6121 ts->child_tidptr = child_tidptr;
6124 if (flags & CLONE_SETTLS) {
6125 cpu_set_tls (new_env, newtls);
6128 /* Grab a mutex so that thread setup appears atomic. */
6129 pthread_mutex_lock(&clone_lock);
6131 memset(&info, 0, sizeof(info));
6132 pthread_mutex_init(&info.mutex, NULL);
6133 pthread_mutex_lock(&info.mutex);
6134 pthread_cond_init(&info.cond, NULL);
6135 info.env = new_env;
6136 if (flags & CLONE_CHILD_SETTID) {
6137 info.child_tidptr = child_tidptr;
6139 if (flags & CLONE_PARENT_SETTID) {
6140 info.parent_tidptr = parent_tidptr;
6143 ret = pthread_attr_init(&attr);
6144 ret = pthread_attr_setstacksize(&attr, NEW_STACK_SIZE);
6145 ret = pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED);
6146 /* It is not safe to deliver signals until the child has finished
6147 initializing, so temporarily block all signals. */
6148 sigfillset(&sigmask);
6149 sigprocmask(SIG_BLOCK, &sigmask, &info.sigmask);
6151 ret = pthread_create(&info.thread, &attr, clone_func, &info);
6152 /* TODO: Free new CPU state if thread creation failed. */
6154 sigprocmask(SIG_SETMASK, &info.sigmask, NULL);
6155 pthread_attr_destroy(&attr);
6156 if (ret == 0) {
6157 /* Wait for the child to initialize. */
6158 pthread_cond_wait(&info.cond, &info.mutex);
6159 ret = info.tid;
6160 } else {
6161 ret = -1;
6163 pthread_mutex_unlock(&info.mutex);
6164 pthread_cond_destroy(&info.cond);
6165 pthread_mutex_destroy(&info.mutex);
6166 pthread_mutex_unlock(&clone_lock);
6167 } else {
6168 /* if no CLONE_VM, we consider it is a fork */
6169 if (flags & CLONE_INVALID_FORK_FLAGS) {
6170 return -TARGET_EINVAL;
6173 /* We can't support custom termination signals */
6174 if ((flags & CSIGNAL) != TARGET_SIGCHLD) {
6175 return -TARGET_EINVAL;
6178 if (block_signals()) {
6179 return -TARGET_ERESTARTSYS;
6182 fork_start();
6183 ret = fork();
6184 if (ret == 0) {
6185 /* Child Process. */
6186 rcu_after_fork();
6187 cpu_clone_regs(env, newsp);
6188 fork_end(1);
6189 /* There is a race condition here. The parent process could
6190 theoretically read the TID in the child process before the child
6191 tid is set. This would require using either ptrace
6192 (not implemented) or having *_tidptr to point at a shared memory
6193 mapping. We can't repeat the spinlock hack used above because
6194 the child process gets its own copy of the lock. */
6195 if (flags & CLONE_CHILD_SETTID)
6196 put_user_u32(gettid(), child_tidptr);
6197 if (flags & CLONE_PARENT_SETTID)
6198 put_user_u32(gettid(), parent_tidptr);
6199 ts = (TaskState *)cpu->opaque;
6200 if (flags & CLONE_SETTLS)
6201 cpu_set_tls (env, newtls);
6202 if (flags & CLONE_CHILD_CLEARTID)
6203 ts->child_tidptr = child_tidptr;
6204 } else {
6205 fork_end(0);
6208 return ret;
6211 /* warning : doesn't handle linux specific flags... */
6212 static int target_to_host_fcntl_cmd(int cmd)
6214 switch(cmd) {
6215 case TARGET_F_DUPFD:
6216 case TARGET_F_GETFD:
6217 case TARGET_F_SETFD:
6218 case TARGET_F_GETFL:
6219 case TARGET_F_SETFL:
6220 return cmd;
6221 case TARGET_F_GETLK:
6222 return F_GETLK64;
6223 case TARGET_F_SETLK:
6224 return F_SETLK64;
6225 case TARGET_F_SETLKW:
6226 return F_SETLKW64;
6227 case TARGET_F_GETOWN:
6228 return F_GETOWN;
6229 case TARGET_F_SETOWN:
6230 return F_SETOWN;
6231 case TARGET_F_GETSIG:
6232 return F_GETSIG;
6233 case TARGET_F_SETSIG:
6234 return F_SETSIG;
6235 #if TARGET_ABI_BITS == 32
6236 case TARGET_F_GETLK64:
6237 return F_GETLK64;
6238 case TARGET_F_SETLK64:
6239 return F_SETLK64;
6240 case TARGET_F_SETLKW64:
6241 return F_SETLKW64;
6242 #endif
6243 case TARGET_F_SETLEASE:
6244 return F_SETLEASE;
6245 case TARGET_F_GETLEASE:
6246 return F_GETLEASE;
6247 #ifdef F_DUPFD_CLOEXEC
6248 case TARGET_F_DUPFD_CLOEXEC:
6249 return F_DUPFD_CLOEXEC;
6250 #endif
6251 case TARGET_F_NOTIFY:
6252 return F_NOTIFY;
6253 #ifdef F_GETOWN_EX
6254 case TARGET_F_GETOWN_EX:
6255 return F_GETOWN_EX;
6256 #endif
6257 #ifdef F_SETOWN_EX
6258 case TARGET_F_SETOWN_EX:
6259 return F_SETOWN_EX;
6260 #endif
6261 #ifdef F_SETPIPE_SZ
6262 case TARGET_F_SETPIPE_SZ:
6263 return F_SETPIPE_SZ;
6264 case TARGET_F_GETPIPE_SZ:
6265 return F_GETPIPE_SZ;
6266 #endif
6267 default:
6268 return -TARGET_EINVAL;
6270 return -TARGET_EINVAL;
6273 #define TRANSTBL_CONVERT(a) { -1, TARGET_##a, -1, a }
6274 static const bitmask_transtbl flock_tbl[] = {
6275 TRANSTBL_CONVERT(F_RDLCK),
6276 TRANSTBL_CONVERT(F_WRLCK),
6277 TRANSTBL_CONVERT(F_UNLCK),
6278 TRANSTBL_CONVERT(F_EXLCK),
6279 TRANSTBL_CONVERT(F_SHLCK),
6280 { 0, 0, 0, 0 }
6283 static inline abi_long copy_from_user_flock(struct flock64 *fl,
6284 abi_ulong target_flock_addr)
6286 struct target_flock *target_fl;
6287 short l_type;
6289 if (!lock_user_struct(VERIFY_READ, target_fl, target_flock_addr, 1)) {
6290 return -TARGET_EFAULT;
6293 __get_user(l_type, &target_fl->l_type);
6294 fl->l_type = target_to_host_bitmask(l_type, flock_tbl);
6295 __get_user(fl->l_whence, &target_fl->l_whence);
6296 __get_user(fl->l_start, &target_fl->l_start);
6297 __get_user(fl->l_len, &target_fl->l_len);
6298 __get_user(fl->l_pid, &target_fl->l_pid);
6299 unlock_user_struct(target_fl, target_flock_addr, 0);
6300 return 0;
6303 static inline abi_long copy_to_user_flock(abi_ulong target_flock_addr,
6304 const struct flock64 *fl)
6306 struct target_flock *target_fl;
6307 short l_type;
6309 if (!lock_user_struct(VERIFY_WRITE, target_fl, target_flock_addr, 0)) {
6310 return -TARGET_EFAULT;
6313 l_type = host_to_target_bitmask(fl->l_type, flock_tbl);
6314 __put_user(l_type, &target_fl->l_type);
6315 __put_user(fl->l_whence, &target_fl->l_whence);
6316 __put_user(fl->l_start, &target_fl->l_start);
6317 __put_user(fl->l_len, &target_fl->l_len);
6318 __put_user(fl->l_pid, &target_fl->l_pid);
6319 unlock_user_struct(target_fl, target_flock_addr, 1);
6320 return 0;
6323 typedef abi_long from_flock64_fn(struct flock64 *fl, abi_ulong target_addr);
6324 typedef abi_long to_flock64_fn(abi_ulong target_addr, const struct flock64 *fl);
6326 #if defined(TARGET_ARM) && TARGET_ABI_BITS == 32
6327 static inline abi_long copy_from_user_eabi_flock64(struct flock64 *fl,
6328 abi_ulong target_flock_addr)
6330 struct target_eabi_flock64 *target_fl;
6331 short l_type;
6333 if (!lock_user_struct(VERIFY_READ, target_fl, target_flock_addr, 1)) {
6334 return -TARGET_EFAULT;
6337 __get_user(l_type, &target_fl->l_type);
6338 fl->l_type = target_to_host_bitmask(l_type, flock_tbl);
6339 __get_user(fl->l_whence, &target_fl->l_whence);
6340 __get_user(fl->l_start, &target_fl->l_start);
6341 __get_user(fl->l_len, &target_fl->l_len);
6342 __get_user(fl->l_pid, &target_fl->l_pid);
6343 unlock_user_struct(target_fl, target_flock_addr, 0);
6344 return 0;
6347 static inline abi_long copy_to_user_eabi_flock64(abi_ulong target_flock_addr,
6348 const struct flock64 *fl)
6350 struct target_eabi_flock64 *target_fl;
6351 short l_type;
6353 if (!lock_user_struct(VERIFY_WRITE, target_fl, target_flock_addr, 0)) {
6354 return -TARGET_EFAULT;
6357 l_type = host_to_target_bitmask(fl->l_type, flock_tbl);
6358 __put_user(l_type, &target_fl->l_type);
6359 __put_user(fl->l_whence, &target_fl->l_whence);
6360 __put_user(fl->l_start, &target_fl->l_start);
6361 __put_user(fl->l_len, &target_fl->l_len);
6362 __put_user(fl->l_pid, &target_fl->l_pid);
6363 unlock_user_struct(target_fl, target_flock_addr, 1);
6364 return 0;
6366 #endif
6368 static inline abi_long copy_from_user_flock64(struct flock64 *fl,
6369 abi_ulong target_flock_addr)
6371 struct target_flock64 *target_fl;
6372 short l_type;
6374 if (!lock_user_struct(VERIFY_READ, target_fl, target_flock_addr, 1)) {
6375 return -TARGET_EFAULT;
6378 __get_user(l_type, &target_fl->l_type);
6379 fl->l_type = target_to_host_bitmask(l_type, flock_tbl);
6380 __get_user(fl->l_whence, &target_fl->l_whence);
6381 __get_user(fl->l_start, &target_fl->l_start);
6382 __get_user(fl->l_len, &target_fl->l_len);
6383 __get_user(fl->l_pid, &target_fl->l_pid);
6384 unlock_user_struct(target_fl, target_flock_addr, 0);
6385 return 0;
6388 static inline abi_long copy_to_user_flock64(abi_ulong target_flock_addr,
6389 const struct flock64 *fl)
6391 struct target_flock64 *target_fl;
6392 short l_type;
6394 if (!lock_user_struct(VERIFY_WRITE, target_fl, target_flock_addr, 0)) {
6395 return -TARGET_EFAULT;
6398 l_type = host_to_target_bitmask(fl->l_type, flock_tbl);
6399 __put_user(l_type, &target_fl->l_type);
6400 __put_user(fl->l_whence, &target_fl->l_whence);
6401 __put_user(fl->l_start, &target_fl->l_start);
6402 __put_user(fl->l_len, &target_fl->l_len);
6403 __put_user(fl->l_pid, &target_fl->l_pid);
6404 unlock_user_struct(target_fl, target_flock_addr, 1);
6405 return 0;
6408 static abi_long do_fcntl(int fd, int cmd, abi_ulong arg)
6410 struct flock64 fl64;
6411 #ifdef F_GETOWN_EX
6412 struct f_owner_ex fox;
6413 struct target_f_owner_ex *target_fox;
6414 #endif
6415 abi_long ret;
6416 int host_cmd = target_to_host_fcntl_cmd(cmd);
6418 if (host_cmd == -TARGET_EINVAL)
6419 return host_cmd;
6421 switch(cmd) {
6422 case TARGET_F_GETLK:
6423 ret = copy_from_user_flock(&fl64, arg);
6424 if (ret) {
6425 return ret;
6427 ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
6428 if (ret == 0) {
6429 ret = copy_to_user_flock(arg, &fl64);
6431 break;
6433 case TARGET_F_SETLK:
6434 case TARGET_F_SETLKW:
6435 ret = copy_from_user_flock(&fl64, arg);
6436 if (ret) {
6437 return ret;
6439 ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
6440 break;
6442 case TARGET_F_GETLK64:
6443 ret = copy_from_user_flock64(&fl64, arg);
6444 if (ret) {
6445 return ret;
6447 ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
6448 if (ret == 0) {
6449 ret = copy_to_user_flock64(arg, &fl64);
6451 break;
6452 case TARGET_F_SETLK64:
6453 case TARGET_F_SETLKW64:
6454 ret = copy_from_user_flock64(&fl64, arg);
6455 if (ret) {
6456 return ret;
6458 ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
6459 break;
6461 case TARGET_F_GETFL:
6462 ret = get_errno(safe_fcntl(fd, host_cmd, arg));
6463 if (ret >= 0) {
6464 ret = host_to_target_bitmask(ret, fcntl_flags_tbl);
6466 break;
6468 case TARGET_F_SETFL:
6469 ret = get_errno(safe_fcntl(fd, host_cmd,
6470 target_to_host_bitmask(arg,
6471 fcntl_flags_tbl)));
6472 break;
6474 #ifdef F_GETOWN_EX
6475 case TARGET_F_GETOWN_EX:
6476 ret = get_errno(safe_fcntl(fd, host_cmd, &fox));
6477 if (ret >= 0) {
6478 if (!lock_user_struct(VERIFY_WRITE, target_fox, arg, 0))
6479 return -TARGET_EFAULT;
6480 target_fox->type = tswap32(fox.type);
6481 target_fox->pid = tswap32(fox.pid);
6482 unlock_user_struct(target_fox, arg, 1);
6484 break;
6485 #endif
6487 #ifdef F_SETOWN_EX
6488 case TARGET_F_SETOWN_EX:
6489 if (!lock_user_struct(VERIFY_READ, target_fox, arg, 1))
6490 return -TARGET_EFAULT;
6491 fox.type = tswap32(target_fox->type);
6492 fox.pid = tswap32(target_fox->pid);
6493 unlock_user_struct(target_fox, arg, 0);
6494 ret = get_errno(safe_fcntl(fd, host_cmd, &fox));
6495 break;
6496 #endif
6498 case TARGET_F_SETOWN:
6499 case TARGET_F_GETOWN:
6500 case TARGET_F_SETSIG:
6501 case TARGET_F_GETSIG:
6502 case TARGET_F_SETLEASE:
6503 case TARGET_F_GETLEASE:
6504 case TARGET_F_SETPIPE_SZ:
6505 case TARGET_F_GETPIPE_SZ:
6506 ret = get_errno(safe_fcntl(fd, host_cmd, arg));
6507 break;
6509 default:
6510 ret = get_errno(safe_fcntl(fd, cmd, arg));
6511 break;
6513 return ret;
6516 #ifdef USE_UID16
6518 static inline int high2lowuid(int uid)
6520 if (uid > 65535)
6521 return 65534;
6522 else
6523 return uid;
6526 static inline int high2lowgid(int gid)
6528 if (gid > 65535)
6529 return 65534;
6530 else
6531 return gid;
6534 static inline int low2highuid(int uid)
6536 if ((int16_t)uid == -1)
6537 return -1;
6538 else
6539 return uid;
6542 static inline int low2highgid(int gid)
6544 if ((int16_t)gid == -1)
6545 return -1;
6546 else
6547 return gid;
6549 static inline int tswapid(int id)
6551 return tswap16(id);
6554 #define put_user_id(x, gaddr) put_user_u16(x, gaddr)
6556 #else /* !USE_UID16 */
6557 static inline int high2lowuid(int uid)
6559 return uid;
6561 static inline int high2lowgid(int gid)
6563 return gid;
6565 static inline int low2highuid(int uid)
6567 return uid;
6569 static inline int low2highgid(int gid)
6571 return gid;
6573 static inline int tswapid(int id)
6575 return tswap32(id);
6578 #define put_user_id(x, gaddr) put_user_u32(x, gaddr)
6580 #endif /* USE_UID16 */
6582 /* We must do direct syscalls for setting UID/GID, because we want to
6583 * implement the Linux system call semantics of "change only for this thread",
6584 * not the libc/POSIX semantics of "change for all threads in process".
6585 * (See http://ewontfix.com/17/ for more details.)
6586 * We use the 32-bit version of the syscalls if present; if it is not
6587 * then either the host architecture supports 32-bit UIDs natively with
6588 * the standard syscall, or the 16-bit UID is the best we can do.
6590 #ifdef __NR_setuid32
6591 #define __NR_sys_setuid __NR_setuid32
6592 #else
6593 #define __NR_sys_setuid __NR_setuid
6594 #endif
6595 #ifdef __NR_setgid32
6596 #define __NR_sys_setgid __NR_setgid32
6597 #else
6598 #define __NR_sys_setgid __NR_setgid
6599 #endif
6600 #ifdef __NR_setresuid32
6601 #define __NR_sys_setresuid __NR_setresuid32
6602 #else
6603 #define __NR_sys_setresuid __NR_setresuid
6604 #endif
6605 #ifdef __NR_setresgid32
6606 #define __NR_sys_setresgid __NR_setresgid32
6607 #else
6608 #define __NR_sys_setresgid __NR_setresgid
6609 #endif
6611 _syscall1(int, sys_setuid, uid_t, uid)
6612 _syscall1(int, sys_setgid, gid_t, gid)
6613 _syscall3(int, sys_setresuid, uid_t, ruid, uid_t, euid, uid_t, suid)
6614 _syscall3(int, sys_setresgid, gid_t, rgid, gid_t, egid, gid_t, sgid)
6616 void syscall_init(void)
6618 IOCTLEntry *ie;
6619 const argtype *arg_type;
6620 int size;
6621 int i;
6623 thunk_init(STRUCT_MAX);
6625 #define STRUCT(name, ...) thunk_register_struct(STRUCT_ ## name, #name, struct_ ## name ## _def);
6626 #define STRUCT_SPECIAL(name) thunk_register_struct_direct(STRUCT_ ## name, #name, &struct_ ## name ## _def);
6627 #include "syscall_types.h"
6628 #undef STRUCT
6629 #undef STRUCT_SPECIAL
6631 /* Build target_to_host_errno_table[] table from
6632 * host_to_target_errno_table[]. */
6633 for (i = 0; i < ERRNO_TABLE_SIZE; i++) {
6634 target_to_host_errno_table[host_to_target_errno_table[i]] = i;
6637 /* we patch the ioctl size if necessary. We rely on the fact that
6638 no ioctl has all the bits at '1' in the size field */
6639 ie = ioctl_entries;
6640 while (ie->target_cmd != 0) {
6641 if (((ie->target_cmd >> TARGET_IOC_SIZESHIFT) & TARGET_IOC_SIZEMASK) ==
6642 TARGET_IOC_SIZEMASK) {
6643 arg_type = ie->arg_type;
6644 if (arg_type[0] != TYPE_PTR) {
6645 fprintf(stderr, "cannot patch size for ioctl 0x%x\n",
6646 ie->target_cmd);
6647 exit(1);
6649 arg_type++;
6650 size = thunk_type_size(arg_type, 0);
6651 ie->target_cmd = (ie->target_cmd &
6652 ~(TARGET_IOC_SIZEMASK << TARGET_IOC_SIZESHIFT)) |
6653 (size << TARGET_IOC_SIZESHIFT);
6656 /* automatic consistency check if same arch */
6657 #if (defined(__i386__) && defined(TARGET_I386) && defined(TARGET_ABI32)) || \
6658 (defined(__x86_64__) && defined(TARGET_X86_64))
6659 if (unlikely(ie->target_cmd != ie->host_cmd)) {
6660 fprintf(stderr, "ERROR: ioctl(%s): target=0x%x host=0x%x\n",
6661 ie->name, ie->target_cmd, ie->host_cmd);
6663 #endif
6664 ie++;
6668 #if TARGET_ABI_BITS == 32
6669 static inline uint64_t target_offset64(uint32_t word0, uint32_t word1)
6671 #ifdef TARGET_WORDS_BIGENDIAN
6672 return ((uint64_t)word0 << 32) | word1;
6673 #else
6674 return ((uint64_t)word1 << 32) | word0;
6675 #endif
6677 #else /* TARGET_ABI_BITS == 32 */
6678 static inline uint64_t target_offset64(uint64_t word0, uint64_t word1)
6680 return word0;
6682 #endif /* TARGET_ABI_BITS != 32 */
6684 #ifdef TARGET_NR_truncate64
6685 static inline abi_long target_truncate64(void *cpu_env, const char *arg1,
6686 abi_long arg2,
6687 abi_long arg3,
6688 abi_long arg4)
6690 if (regpairs_aligned(cpu_env)) {
6691 arg2 = arg3;
6692 arg3 = arg4;
6694 return get_errno(truncate64(arg1, target_offset64(arg2, arg3)));
6696 #endif
6698 #ifdef TARGET_NR_ftruncate64
6699 static inline abi_long target_ftruncate64(void *cpu_env, abi_long arg1,
6700 abi_long arg2,
6701 abi_long arg3,
6702 abi_long arg4)
6704 if (regpairs_aligned(cpu_env)) {
6705 arg2 = arg3;
6706 arg3 = arg4;
6708 return get_errno(ftruncate64(arg1, target_offset64(arg2, arg3)));
6710 #endif
6712 static inline abi_long target_to_host_timespec(struct timespec *host_ts,
6713 abi_ulong target_addr)
6715 struct target_timespec *target_ts;
6717 if (!lock_user_struct(VERIFY_READ, target_ts, target_addr, 1))
6718 return -TARGET_EFAULT;
6719 __get_user(host_ts->tv_sec, &target_ts->tv_sec);
6720 __get_user(host_ts->tv_nsec, &target_ts->tv_nsec);
6721 unlock_user_struct(target_ts, target_addr, 0);
6722 return 0;
6725 static inline abi_long host_to_target_timespec(abi_ulong target_addr,
6726 struct timespec *host_ts)
6728 struct target_timespec *target_ts;
6730 if (!lock_user_struct(VERIFY_WRITE, target_ts, target_addr, 0))
6731 return -TARGET_EFAULT;
6732 __put_user(host_ts->tv_sec, &target_ts->tv_sec);
6733 __put_user(host_ts->tv_nsec, &target_ts->tv_nsec);
6734 unlock_user_struct(target_ts, target_addr, 1);
6735 return 0;
6738 static inline abi_long target_to_host_itimerspec(struct itimerspec *host_itspec,
6739 abi_ulong target_addr)
6741 struct target_itimerspec *target_itspec;
6743 if (!lock_user_struct(VERIFY_READ, target_itspec, target_addr, 1)) {
6744 return -TARGET_EFAULT;
6747 host_itspec->it_interval.tv_sec =
6748 tswapal(target_itspec->it_interval.tv_sec);
6749 host_itspec->it_interval.tv_nsec =
6750 tswapal(target_itspec->it_interval.tv_nsec);
6751 host_itspec->it_value.tv_sec = tswapal(target_itspec->it_value.tv_sec);
6752 host_itspec->it_value.tv_nsec = tswapal(target_itspec->it_value.tv_nsec);
6754 unlock_user_struct(target_itspec, target_addr, 1);
6755 return 0;
6758 static inline abi_long host_to_target_itimerspec(abi_ulong target_addr,
6759 struct itimerspec *host_its)
6761 struct target_itimerspec *target_itspec;
6763 if (!lock_user_struct(VERIFY_WRITE, target_itspec, target_addr, 0)) {
6764 return -TARGET_EFAULT;
6767 target_itspec->it_interval.tv_sec = tswapal(host_its->it_interval.tv_sec);
6768 target_itspec->it_interval.tv_nsec = tswapal(host_its->it_interval.tv_nsec);
6770 target_itspec->it_value.tv_sec = tswapal(host_its->it_value.tv_sec);
6771 target_itspec->it_value.tv_nsec = tswapal(host_its->it_value.tv_nsec);
6773 unlock_user_struct(target_itspec, target_addr, 0);
6774 return 0;
6777 static inline abi_long target_to_host_timex(struct timex *host_tx,
6778 abi_long target_addr)
6780 struct target_timex *target_tx;
6782 if (!lock_user_struct(VERIFY_READ, target_tx, target_addr, 1)) {
6783 return -TARGET_EFAULT;
6786 __get_user(host_tx->modes, &target_tx->modes);
6787 __get_user(host_tx->offset, &target_tx->offset);
6788 __get_user(host_tx->freq, &target_tx->freq);
6789 __get_user(host_tx->maxerror, &target_tx->maxerror);
6790 __get_user(host_tx->esterror, &target_tx->esterror);
6791 __get_user(host_tx->status, &target_tx->status);
6792 __get_user(host_tx->constant, &target_tx->constant);
6793 __get_user(host_tx->precision, &target_tx->precision);
6794 __get_user(host_tx->tolerance, &target_tx->tolerance);
6795 __get_user(host_tx->time.tv_sec, &target_tx->time.tv_sec);
6796 __get_user(host_tx->time.tv_usec, &target_tx->time.tv_usec);
6797 __get_user(host_tx->tick, &target_tx->tick);
6798 __get_user(host_tx->ppsfreq, &target_tx->ppsfreq);
6799 __get_user(host_tx->jitter, &target_tx->jitter);
6800 __get_user(host_tx->shift, &target_tx->shift);
6801 __get_user(host_tx->stabil, &target_tx->stabil);
6802 __get_user(host_tx->jitcnt, &target_tx->jitcnt);
6803 __get_user(host_tx->calcnt, &target_tx->calcnt);
6804 __get_user(host_tx->errcnt, &target_tx->errcnt);
6805 __get_user(host_tx->stbcnt, &target_tx->stbcnt);
6806 __get_user(host_tx->tai, &target_tx->tai);
6808 unlock_user_struct(target_tx, target_addr, 0);
6809 return 0;
6812 static inline abi_long host_to_target_timex(abi_long target_addr,
6813 struct timex *host_tx)
6815 struct target_timex *target_tx;
6817 if (!lock_user_struct(VERIFY_WRITE, target_tx, target_addr, 0)) {
6818 return -TARGET_EFAULT;
6821 __put_user(host_tx->modes, &target_tx->modes);
6822 __put_user(host_tx->offset, &target_tx->offset);
6823 __put_user(host_tx->freq, &target_tx->freq);
6824 __put_user(host_tx->maxerror, &target_tx->maxerror);
6825 __put_user(host_tx->esterror, &target_tx->esterror);
6826 __put_user(host_tx->status, &target_tx->status);
6827 __put_user(host_tx->constant, &target_tx->constant);
6828 __put_user(host_tx->precision, &target_tx->precision);
6829 __put_user(host_tx->tolerance, &target_tx->tolerance);
6830 __put_user(host_tx->time.tv_sec, &target_tx->time.tv_sec);
6831 __put_user(host_tx->time.tv_usec, &target_tx->time.tv_usec);
6832 __put_user(host_tx->tick, &target_tx->tick);
6833 __put_user(host_tx->ppsfreq, &target_tx->ppsfreq);
6834 __put_user(host_tx->jitter, &target_tx->jitter);
6835 __put_user(host_tx->shift, &target_tx->shift);
6836 __put_user(host_tx->stabil, &target_tx->stabil);
6837 __put_user(host_tx->jitcnt, &target_tx->jitcnt);
6838 __put_user(host_tx->calcnt, &target_tx->calcnt);
6839 __put_user(host_tx->errcnt, &target_tx->errcnt);
6840 __put_user(host_tx->stbcnt, &target_tx->stbcnt);
6841 __put_user(host_tx->tai, &target_tx->tai);
6843 unlock_user_struct(target_tx, target_addr, 1);
6844 return 0;
6848 static inline abi_long target_to_host_sigevent(struct sigevent *host_sevp,
6849 abi_ulong target_addr)
6851 struct target_sigevent *target_sevp;
6853 if (!lock_user_struct(VERIFY_READ, target_sevp, target_addr, 1)) {
6854 return -TARGET_EFAULT;
6857 /* This union is awkward on 64 bit systems because it has a 32 bit
6858 * integer and a pointer in it; we follow the conversion approach
6859 * used for handling sigval types in signal.c so the guest should get
6860 * the correct value back even if we did a 64 bit byteswap and it's
6861 * using the 32 bit integer.
6863 host_sevp->sigev_value.sival_ptr =
6864 (void *)(uintptr_t)tswapal(target_sevp->sigev_value.sival_ptr);
6865 host_sevp->sigev_signo =
6866 target_to_host_signal(tswap32(target_sevp->sigev_signo));
6867 host_sevp->sigev_notify = tswap32(target_sevp->sigev_notify);
6868 host_sevp->_sigev_un._tid = tswap32(target_sevp->_sigev_un._tid);
6870 unlock_user_struct(target_sevp, target_addr, 1);
6871 return 0;
6874 #if defined(TARGET_NR_mlockall)
6875 static inline int target_to_host_mlockall_arg(int arg)
6877 int result = 0;
6879 if (arg & TARGET_MLOCKALL_MCL_CURRENT) {
6880 result |= MCL_CURRENT;
6882 if (arg & TARGET_MLOCKALL_MCL_FUTURE) {
6883 result |= MCL_FUTURE;
6885 return result;
6887 #endif
6889 static inline abi_long host_to_target_stat64(void *cpu_env,
6890 abi_ulong target_addr,
6891 struct stat *host_st)
6893 #if defined(TARGET_ARM) && defined(TARGET_ABI32)
6894 if (((CPUARMState *)cpu_env)->eabi) {
6895 struct target_eabi_stat64 *target_st;
6897 if (!lock_user_struct(VERIFY_WRITE, target_st, target_addr, 0))
6898 return -TARGET_EFAULT;
6899 memset(target_st, 0, sizeof(struct target_eabi_stat64));
6900 __put_user(host_st->st_dev, &target_st->st_dev);
6901 __put_user(host_st->st_ino, &target_st->st_ino);
6902 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
6903 __put_user(host_st->st_ino, &target_st->__st_ino);
6904 #endif
6905 __put_user(host_st->st_mode, &target_st->st_mode);
6906 __put_user(host_st->st_nlink, &target_st->st_nlink);
6907 __put_user(host_st->st_uid, &target_st->st_uid);
6908 __put_user(host_st->st_gid, &target_st->st_gid);
6909 __put_user(host_st->st_rdev, &target_st->st_rdev);
6910 __put_user(host_st->st_size, &target_st->st_size);
6911 __put_user(host_st->st_blksize, &target_st->st_blksize);
6912 __put_user(host_st->st_blocks, &target_st->st_blocks);
6913 __put_user(host_st->st_atime, &target_st->target_st_atime);
6914 __put_user(host_st->st_mtime, &target_st->target_st_mtime);
6915 __put_user(host_st->st_ctime, &target_st->target_st_ctime);
6916 unlock_user_struct(target_st, target_addr, 1);
6917 } else
6918 #endif
6920 #if defined(TARGET_HAS_STRUCT_STAT64)
6921 struct target_stat64 *target_st;
6922 #else
6923 struct target_stat *target_st;
6924 #endif
6926 if (!lock_user_struct(VERIFY_WRITE, target_st, target_addr, 0))
6927 return -TARGET_EFAULT;
6928 memset(target_st, 0, sizeof(*target_st));
6929 __put_user(host_st->st_dev, &target_st->st_dev);
6930 __put_user(host_st->st_ino, &target_st->st_ino);
6931 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
6932 __put_user(host_st->st_ino, &target_st->__st_ino);
6933 #endif
6934 __put_user(host_st->st_mode, &target_st->st_mode);
6935 __put_user(host_st->st_nlink, &target_st->st_nlink);
6936 __put_user(host_st->st_uid, &target_st->st_uid);
6937 __put_user(host_st->st_gid, &target_st->st_gid);
6938 __put_user(host_st->st_rdev, &target_st->st_rdev);
6939 /* XXX: better use of kernel struct */
6940 __put_user(host_st->st_size, &target_st->st_size);
6941 __put_user(host_st->st_blksize, &target_st->st_blksize);
6942 __put_user(host_st->st_blocks, &target_st->st_blocks);
6943 __put_user(host_st->st_atime, &target_st->target_st_atime);
6944 __put_user(host_st->st_mtime, &target_st->target_st_mtime);
6945 __put_user(host_st->st_ctime, &target_st->target_st_ctime);
6946 unlock_user_struct(target_st, target_addr, 1);
6949 return 0;
6952 /* ??? Using host futex calls even when target atomic operations
6953 are not really atomic probably breaks things. However implementing
6954 futexes locally would make futexes shared between multiple processes
6955 tricky. However they're probably useless because guest atomic
6956 operations won't work either. */
6957 static int do_futex(target_ulong uaddr, int op, int val, target_ulong timeout,
6958 target_ulong uaddr2, int val3)
6960 struct timespec ts, *pts;
6961 int base_op;
6963 /* ??? We assume FUTEX_* constants are the same on both host
6964 and target. */
6965 #ifdef FUTEX_CMD_MASK
6966 base_op = op & FUTEX_CMD_MASK;
6967 #else
6968 base_op = op;
6969 #endif
6970 switch (base_op) {
6971 case FUTEX_WAIT:
6972 case FUTEX_WAIT_BITSET:
6973 if (timeout) {
6974 pts = &ts;
6975 target_to_host_timespec(pts, timeout);
6976 } else {
6977 pts = NULL;
6979 return get_errno(safe_futex(g2h(uaddr), op, tswap32(val),
6980 pts, NULL, val3));
6981 case FUTEX_WAKE:
6982 return get_errno(safe_futex(g2h(uaddr), op, val, NULL, NULL, 0));
6983 case FUTEX_FD:
6984 return get_errno(safe_futex(g2h(uaddr), op, val, NULL, NULL, 0));
6985 case FUTEX_REQUEUE:
6986 case FUTEX_CMP_REQUEUE:
6987 case FUTEX_WAKE_OP:
6988 /* For FUTEX_REQUEUE, FUTEX_CMP_REQUEUE, and FUTEX_WAKE_OP, the
6989 TIMEOUT parameter is interpreted as a uint32_t by the kernel.
6990 But the prototype takes a `struct timespec *'; insert casts
6991 to satisfy the compiler. We do not need to tswap TIMEOUT
6992 since it's not compared to guest memory. */
6993 pts = (struct timespec *)(uintptr_t) timeout;
6994 return get_errno(safe_futex(g2h(uaddr), op, val, pts,
6995 g2h(uaddr2),
6996 (base_op == FUTEX_CMP_REQUEUE
6997 ? tswap32(val3)
6998 : val3)));
6999 default:
7000 return -TARGET_ENOSYS;
7003 #if defined(TARGET_NR_name_to_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
7004 static abi_long do_name_to_handle_at(abi_long dirfd, abi_long pathname,
7005 abi_long handle, abi_long mount_id,
7006 abi_long flags)
7008 struct file_handle *target_fh;
7009 struct file_handle *fh;
7010 int mid = 0;
7011 abi_long ret;
7012 char *name;
7013 unsigned int size, total_size;
7015 if (get_user_s32(size, handle)) {
7016 return -TARGET_EFAULT;
7019 name = lock_user_string(pathname);
7020 if (!name) {
7021 return -TARGET_EFAULT;
7024 total_size = sizeof(struct file_handle) + size;
7025 target_fh = lock_user(VERIFY_WRITE, handle, total_size, 0);
7026 if (!target_fh) {
7027 unlock_user(name, pathname, 0);
7028 return -TARGET_EFAULT;
7031 fh = g_malloc0(total_size);
7032 fh->handle_bytes = size;
7034 ret = get_errno(name_to_handle_at(dirfd, path(name), fh, &mid, flags));
7035 unlock_user(name, pathname, 0);
7037 /* man name_to_handle_at(2):
7038 * Other than the use of the handle_bytes field, the caller should treat
7039 * the file_handle structure as an opaque data type
7042 memcpy(target_fh, fh, total_size);
7043 target_fh->handle_bytes = tswap32(fh->handle_bytes);
7044 target_fh->handle_type = tswap32(fh->handle_type);
7045 g_free(fh);
7046 unlock_user(target_fh, handle, total_size);
7048 if (put_user_s32(mid, mount_id)) {
7049 return -TARGET_EFAULT;
7052 return ret;
7055 #endif
7057 #if defined(TARGET_NR_open_by_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
7058 static abi_long do_open_by_handle_at(abi_long mount_fd, abi_long handle,
7059 abi_long flags)
7061 struct file_handle *target_fh;
7062 struct file_handle *fh;
7063 unsigned int size, total_size;
7064 abi_long ret;
7066 if (get_user_s32(size, handle)) {
7067 return -TARGET_EFAULT;
7070 total_size = sizeof(struct file_handle) + size;
7071 target_fh = lock_user(VERIFY_READ, handle, total_size, 1);
7072 if (!target_fh) {
7073 return -TARGET_EFAULT;
7076 fh = g_memdup(target_fh, total_size);
7077 fh->handle_bytes = size;
7078 fh->handle_type = tswap32(target_fh->handle_type);
7080 ret = get_errno(open_by_handle_at(mount_fd, fh,
7081 target_to_host_bitmask(flags, fcntl_flags_tbl)));
7083 g_free(fh);
7085 unlock_user(target_fh, handle, total_size);
7087 return ret;
7089 #endif
7091 #if defined(TARGET_NR_signalfd) || defined(TARGET_NR_signalfd4)
7093 /* signalfd siginfo conversion */
7095 static void
7096 host_to_target_signalfd_siginfo(struct signalfd_siginfo *tinfo,
7097 const struct signalfd_siginfo *info)
7099 int sig = host_to_target_signal(info->ssi_signo);
7101 /* linux/signalfd.h defines a ssi_addr_lsb
7102 * not defined in sys/signalfd.h but used by some kernels
7105 #ifdef BUS_MCEERR_AO
7106 if (tinfo->ssi_signo == SIGBUS &&
7107 (tinfo->ssi_code == BUS_MCEERR_AR ||
7108 tinfo->ssi_code == BUS_MCEERR_AO)) {
7109 uint16_t *ssi_addr_lsb = (uint16_t *)(&info->ssi_addr + 1);
7110 uint16_t *tssi_addr_lsb = (uint16_t *)(&tinfo->ssi_addr + 1);
7111 *tssi_addr_lsb = tswap16(*ssi_addr_lsb);
7113 #endif
7115 tinfo->ssi_signo = tswap32(sig);
7116 tinfo->ssi_errno = tswap32(tinfo->ssi_errno);
7117 tinfo->ssi_code = tswap32(info->ssi_code);
7118 tinfo->ssi_pid = tswap32(info->ssi_pid);
7119 tinfo->ssi_uid = tswap32(info->ssi_uid);
7120 tinfo->ssi_fd = tswap32(info->ssi_fd);
7121 tinfo->ssi_tid = tswap32(info->ssi_tid);
7122 tinfo->ssi_band = tswap32(info->ssi_band);
7123 tinfo->ssi_overrun = tswap32(info->ssi_overrun);
7124 tinfo->ssi_trapno = tswap32(info->ssi_trapno);
7125 tinfo->ssi_status = tswap32(info->ssi_status);
7126 tinfo->ssi_int = tswap32(info->ssi_int);
7127 tinfo->ssi_ptr = tswap64(info->ssi_ptr);
7128 tinfo->ssi_utime = tswap64(info->ssi_utime);
7129 tinfo->ssi_stime = tswap64(info->ssi_stime);
7130 tinfo->ssi_addr = tswap64(info->ssi_addr);
7133 static abi_long host_to_target_data_signalfd(void *buf, size_t len)
7135 int i;
7137 for (i = 0; i < len; i += sizeof(struct signalfd_siginfo)) {
7138 host_to_target_signalfd_siginfo(buf + i, buf + i);
7141 return len;
7144 static TargetFdTrans target_signalfd_trans = {
7145 .host_to_target_data = host_to_target_data_signalfd,
7148 static abi_long do_signalfd4(int fd, abi_long mask, int flags)
7150 int host_flags;
7151 target_sigset_t *target_mask;
7152 sigset_t host_mask;
7153 abi_long ret;
7155 if (flags & ~(TARGET_O_NONBLOCK | TARGET_O_CLOEXEC)) {
7156 return -TARGET_EINVAL;
7158 if (!lock_user_struct(VERIFY_READ, target_mask, mask, 1)) {
7159 return -TARGET_EFAULT;
7162 target_to_host_sigset(&host_mask, target_mask);
7164 host_flags = target_to_host_bitmask(flags, fcntl_flags_tbl);
7166 ret = get_errno(signalfd(fd, &host_mask, host_flags));
7167 if (ret >= 0) {
7168 fd_trans_register(ret, &target_signalfd_trans);
7171 unlock_user_struct(target_mask, mask, 0);
7173 return ret;
7175 #endif
7177 /* Map host to target signal numbers for the wait family of syscalls.
7178 Assume all other status bits are the same. */
7179 int host_to_target_waitstatus(int status)
7181 if (WIFSIGNALED(status)) {
7182 return host_to_target_signal(WTERMSIG(status)) | (status & ~0x7f);
7184 if (WIFSTOPPED(status)) {
7185 return (host_to_target_signal(WSTOPSIG(status)) << 8)
7186 | (status & 0xff);
7188 return status;
7191 static int open_self_cmdline(void *cpu_env, int fd)
7193 int fd_orig = -1;
7194 bool word_skipped = false;
7196 fd_orig = open("/proc/self/cmdline", O_RDONLY);
7197 if (fd_orig < 0) {
7198 return fd_orig;
7201 while (true) {
7202 ssize_t nb_read;
7203 char buf[128];
7204 char *cp_buf = buf;
7206 nb_read = read(fd_orig, buf, sizeof(buf));
7207 if (nb_read < 0) {
7208 int e = errno;
7209 fd_orig = close(fd_orig);
7210 errno = e;
7211 return -1;
7212 } else if (nb_read == 0) {
7213 break;
7216 if (!word_skipped) {
7217 /* Skip the first string, which is the path to qemu-*-static
7218 instead of the actual command. */
7219 cp_buf = memchr(buf, 0, nb_read);
7220 if (cp_buf) {
7221 /* Null byte found, skip one string */
7222 cp_buf++;
7223 nb_read -= cp_buf - buf;
7224 word_skipped = true;
7228 if (word_skipped) {
7229 if (write(fd, cp_buf, nb_read) != nb_read) {
7230 int e = errno;
7231 close(fd_orig);
7232 errno = e;
7233 return -1;
7238 return close(fd_orig);
7241 static int open_self_maps(void *cpu_env, int fd)
7243 CPUState *cpu = ENV_GET_CPU((CPUArchState *)cpu_env);
7244 TaskState *ts = cpu->opaque;
7245 FILE *fp;
7246 char *line = NULL;
7247 size_t len = 0;
7248 ssize_t read;
7250 fp = fopen("/proc/self/maps", "r");
7251 if (fp == NULL) {
7252 return -1;
7255 while ((read = getline(&line, &len, fp)) != -1) {
7256 int fields, dev_maj, dev_min, inode;
7257 uint64_t min, max, offset;
7258 char flag_r, flag_w, flag_x, flag_p;
7259 char path[512] = "";
7260 fields = sscanf(line, "%"PRIx64"-%"PRIx64" %c%c%c%c %"PRIx64" %x:%x %d"
7261 " %512s", &min, &max, &flag_r, &flag_w, &flag_x,
7262 &flag_p, &offset, &dev_maj, &dev_min, &inode, path);
7264 if ((fields < 10) || (fields > 11)) {
7265 continue;
7267 if (h2g_valid(min)) {
7268 int flags = page_get_flags(h2g(min));
7269 max = h2g_valid(max - 1) ? max : (uintptr_t)g2h(GUEST_ADDR_MAX);
7270 if (page_check_range(h2g(min), max - min, flags) == -1) {
7271 continue;
7273 if (h2g(min) == ts->info->stack_limit) {
7274 pstrcpy(path, sizeof(path), " [stack]");
7276 dprintf(fd, TARGET_ABI_FMT_lx "-" TARGET_ABI_FMT_lx
7277 " %c%c%c%c %08" PRIx64 " %02x:%02x %d %s%s\n",
7278 h2g(min), h2g(max - 1) + 1, flag_r, flag_w,
7279 flag_x, flag_p, offset, dev_maj, dev_min, inode,
7280 path[0] ? " " : "", path);
7284 free(line);
7285 fclose(fp);
7287 return 0;
7290 static int open_self_stat(void *cpu_env, int fd)
7292 CPUState *cpu = ENV_GET_CPU((CPUArchState *)cpu_env);
7293 TaskState *ts = cpu->opaque;
7294 abi_ulong start_stack = ts->info->start_stack;
7295 int i;
7297 for (i = 0; i < 44; i++) {
7298 char buf[128];
7299 int len;
7300 uint64_t val = 0;
7302 if (i == 0) {
7303 /* pid */
7304 val = getpid();
7305 snprintf(buf, sizeof(buf), "%"PRId64 " ", val);
7306 } else if (i == 1) {
7307 /* app name */
7308 snprintf(buf, sizeof(buf), "(%s) ", ts->bprm->argv[0]);
7309 } else if (i == 27) {
7310 /* stack bottom */
7311 val = start_stack;
7312 snprintf(buf, sizeof(buf), "%"PRId64 " ", val);
7313 } else {
7314 /* for the rest, there is MasterCard */
7315 snprintf(buf, sizeof(buf), "0%c", i == 43 ? '\n' : ' ');
7318 len = strlen(buf);
7319 if (write(fd, buf, len) != len) {
7320 return -1;
7324 return 0;
7327 static int open_self_auxv(void *cpu_env, int fd)
7329 CPUState *cpu = ENV_GET_CPU((CPUArchState *)cpu_env);
7330 TaskState *ts = cpu->opaque;
7331 abi_ulong auxv = ts->info->saved_auxv;
7332 abi_ulong len = ts->info->auxv_len;
7333 char *ptr;
7336 * Auxiliary vector is stored in target process stack.
7337 * read in whole auxv vector and copy it to file
7339 ptr = lock_user(VERIFY_READ, auxv, len, 0);
7340 if (ptr != NULL) {
7341 while (len > 0) {
7342 ssize_t r;
7343 r = write(fd, ptr, len);
7344 if (r <= 0) {
7345 break;
7347 len -= r;
7348 ptr += r;
7350 lseek(fd, 0, SEEK_SET);
7351 unlock_user(ptr, auxv, len);
7354 return 0;
7357 static int is_proc_myself(const char *filename, const char *entry)
7359 if (!strncmp(filename, "/proc/", strlen("/proc/"))) {
7360 filename += strlen("/proc/");
7361 if (!strncmp(filename, "self/", strlen("self/"))) {
7362 filename += strlen("self/");
7363 } else if (*filename >= '1' && *filename <= '9') {
7364 char myself[80];
7365 snprintf(myself, sizeof(myself), "%d/", getpid());
7366 if (!strncmp(filename, myself, strlen(myself))) {
7367 filename += strlen(myself);
7368 } else {
7369 return 0;
7371 } else {
7372 return 0;
7374 if (!strcmp(filename, entry)) {
7375 return 1;
7378 return 0;
7381 #if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN)
7382 static int is_proc(const char *filename, const char *entry)
7384 return strcmp(filename, entry) == 0;
7387 static int open_net_route(void *cpu_env, int fd)
7389 FILE *fp;
7390 char *line = NULL;
7391 size_t len = 0;
7392 ssize_t read;
7394 fp = fopen("/proc/net/route", "r");
7395 if (fp == NULL) {
7396 return -1;
7399 /* read header */
7401 read = getline(&line, &len, fp);
7402 dprintf(fd, "%s", line);
7404 /* read routes */
7406 while ((read = getline(&line, &len, fp)) != -1) {
7407 char iface[16];
7408 uint32_t dest, gw, mask;
7409 unsigned int flags, refcnt, use, metric, mtu, window, irtt;
7410 sscanf(line, "%s\t%08x\t%08x\t%04x\t%d\t%d\t%d\t%08x\t%d\t%u\t%u\n",
7411 iface, &dest, &gw, &flags, &refcnt, &use, &metric,
7412 &mask, &mtu, &window, &irtt);
7413 dprintf(fd, "%s\t%08x\t%08x\t%04x\t%d\t%d\t%d\t%08x\t%d\t%u\t%u\n",
7414 iface, tswap32(dest), tswap32(gw), flags, refcnt, use,
7415 metric, tswap32(mask), mtu, window, irtt);
7418 free(line);
7419 fclose(fp);
7421 return 0;
7423 #endif
7425 static int do_openat(void *cpu_env, int dirfd, const char *pathname, int flags, mode_t mode)
7427 struct fake_open {
7428 const char *filename;
7429 int (*fill)(void *cpu_env, int fd);
7430 int (*cmp)(const char *s1, const char *s2);
7432 const struct fake_open *fake_open;
7433 static const struct fake_open fakes[] = {
7434 { "maps", open_self_maps, is_proc_myself },
7435 { "stat", open_self_stat, is_proc_myself },
7436 { "auxv", open_self_auxv, is_proc_myself },
7437 { "cmdline", open_self_cmdline, is_proc_myself },
7438 #if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN)
7439 { "/proc/net/route", open_net_route, is_proc },
7440 #endif
7441 { NULL, NULL, NULL }
7444 if (is_proc_myself(pathname, "exe")) {
7445 int execfd = qemu_getauxval(AT_EXECFD);
7446 return execfd ? execfd : safe_openat(dirfd, exec_path, flags, mode);
7449 for (fake_open = fakes; fake_open->filename; fake_open++) {
7450 if (fake_open->cmp(pathname, fake_open->filename)) {
7451 break;
7455 if (fake_open->filename) {
7456 const char *tmpdir;
7457 char filename[PATH_MAX];
7458 int fd, r;
7460 /* create temporary file to map stat to */
7461 tmpdir = getenv("TMPDIR");
7462 if (!tmpdir)
7463 tmpdir = "/tmp";
7464 snprintf(filename, sizeof(filename), "%s/qemu-open.XXXXXX", tmpdir);
7465 fd = mkstemp(filename);
7466 if (fd < 0) {
7467 return fd;
7469 unlink(filename);
7471 if ((r = fake_open->fill(cpu_env, fd))) {
7472 int e = errno;
7473 close(fd);
7474 errno = e;
7475 return r;
7477 lseek(fd, 0, SEEK_SET);
7479 return fd;
7482 return safe_openat(dirfd, path(pathname), flags, mode);
7485 #define TIMER_MAGIC 0x0caf0000
7486 #define TIMER_MAGIC_MASK 0xffff0000
7488 /* Convert QEMU provided timer ID back to internal 16bit index format */
7489 static target_timer_t get_timer_id(abi_long arg)
7491 target_timer_t timerid = arg;
7493 if ((timerid & TIMER_MAGIC_MASK) != TIMER_MAGIC) {
7494 return -TARGET_EINVAL;
7497 timerid &= 0xffff;
7499 if (timerid >= ARRAY_SIZE(g_posix_timers)) {
7500 return -TARGET_EINVAL;
7503 return timerid;
7506 /* do_syscall() should always have a single exit point at the end so
7507 that actions, such as logging of syscall results, can be performed.
7508 All errnos that do_syscall() returns must be -TARGET_<errcode>. */
7509 abi_long do_syscall(void *cpu_env, int num, abi_long arg1,
7510 abi_long arg2, abi_long arg3, abi_long arg4,
7511 abi_long arg5, abi_long arg6, abi_long arg7,
7512 abi_long arg8)
7514 CPUState *cpu = ENV_GET_CPU(cpu_env);
7515 abi_long ret;
7516 struct stat st;
7517 struct statfs stfs;
7518 void *p;
7520 #if defined(DEBUG_ERESTARTSYS)
7521 /* Debug-only code for exercising the syscall-restart code paths
7522 * in the per-architecture cpu main loops: restart every syscall
7523 * the guest makes once before letting it through.
7526 static int flag;
7528 flag = !flag;
7529 if (flag) {
7530 return -TARGET_ERESTARTSYS;
7533 #endif
7535 #ifdef DEBUG
7536 gemu_log("syscall %d", num);
7537 #endif
7538 trace_guest_user_syscall(cpu, num, arg1, arg2, arg3, arg4, arg5, arg6, arg7, arg8);
7539 if(do_strace)
7540 print_syscall(num, arg1, arg2, arg3, arg4, arg5, arg6);
7542 switch(num) {
7543 case TARGET_NR_exit:
7544 /* In old applications this may be used to implement _exit(2).
7545 However in threaded applictions it is used for thread termination,
7546 and _exit_group is used for application termination.
7547 Do thread termination if we have more then one thread. */
7549 if (block_signals()) {
7550 ret = -TARGET_ERESTARTSYS;
7551 break;
7554 cpu_list_lock();
7556 if (CPU_NEXT(first_cpu)) {
7557 TaskState *ts;
7559 /* Remove the CPU from the list. */
7560 QTAILQ_REMOVE(&cpus, cpu, node);
7562 cpu_list_unlock();
7564 ts = cpu->opaque;
7565 if (ts->child_tidptr) {
7566 put_user_u32(0, ts->child_tidptr);
7567 sys_futex(g2h(ts->child_tidptr), FUTEX_WAKE, INT_MAX,
7568 NULL, NULL, 0);
7570 thread_cpu = NULL;
7571 object_unref(OBJECT(cpu));
7572 g_free(ts);
7573 rcu_unregister_thread();
7574 pthread_exit(NULL);
7577 cpu_list_unlock();
7578 #ifdef TARGET_GPROF
7579 _mcleanup();
7580 #endif
7581 gdb_exit(cpu_env, arg1);
7582 _exit(arg1);
7583 ret = 0; /* avoid warning */
7584 break;
7585 case TARGET_NR_read:
7586 if (arg3 == 0)
7587 ret = 0;
7588 else {
7589 if (!(p = lock_user(VERIFY_WRITE, arg2, arg3, 0)))
7590 goto efault;
7591 ret = get_errno(safe_read(arg1, p, arg3));
7592 if (ret >= 0 &&
7593 fd_trans_host_to_target_data(arg1)) {
7594 ret = fd_trans_host_to_target_data(arg1)(p, ret);
7596 unlock_user(p, arg2, ret);
7598 break;
7599 case TARGET_NR_write:
7600 if (!(p = lock_user(VERIFY_READ, arg2, arg3, 1)))
7601 goto efault;
7602 ret = get_errno(safe_write(arg1, p, arg3));
7603 unlock_user(p, arg2, 0);
7604 break;
7605 #ifdef TARGET_NR_open
7606 case TARGET_NR_open:
7607 if (!(p = lock_user_string(arg1)))
7608 goto efault;
7609 ret = get_errno(do_openat(cpu_env, AT_FDCWD, p,
7610 target_to_host_bitmask(arg2, fcntl_flags_tbl),
7611 arg3));
7612 fd_trans_unregister(ret);
7613 unlock_user(p, arg1, 0);
7614 break;
7615 #endif
7616 case TARGET_NR_openat:
7617 if (!(p = lock_user_string(arg2)))
7618 goto efault;
7619 ret = get_errno(do_openat(cpu_env, arg1, p,
7620 target_to_host_bitmask(arg3, fcntl_flags_tbl),
7621 arg4));
7622 fd_trans_unregister(ret);
7623 unlock_user(p, arg2, 0);
7624 break;
7625 #if defined(TARGET_NR_name_to_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
7626 case TARGET_NR_name_to_handle_at:
7627 ret = do_name_to_handle_at(arg1, arg2, arg3, arg4, arg5);
7628 break;
7629 #endif
7630 #if defined(TARGET_NR_open_by_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
7631 case TARGET_NR_open_by_handle_at:
7632 ret = do_open_by_handle_at(arg1, arg2, arg3);
7633 fd_trans_unregister(ret);
7634 break;
7635 #endif
7636 case TARGET_NR_close:
7637 fd_trans_unregister(arg1);
7638 ret = get_errno(close(arg1));
7639 break;
7640 case TARGET_NR_brk:
7641 ret = do_brk(arg1);
7642 break;
7643 #ifdef TARGET_NR_fork
7644 case TARGET_NR_fork:
7645 ret = get_errno(do_fork(cpu_env, SIGCHLD, 0, 0, 0, 0));
7646 break;
7647 #endif
7648 #ifdef TARGET_NR_waitpid
7649 case TARGET_NR_waitpid:
7651 int status;
7652 ret = get_errno(safe_wait4(arg1, &status, arg3, 0));
7653 if (!is_error(ret) && arg2 && ret
7654 && put_user_s32(host_to_target_waitstatus(status), arg2))
7655 goto efault;
7657 break;
7658 #endif
7659 #ifdef TARGET_NR_waitid
7660 case TARGET_NR_waitid:
7662 siginfo_t info;
7663 info.si_pid = 0;
7664 ret = get_errno(safe_waitid(arg1, arg2, &info, arg4, NULL));
7665 if (!is_error(ret) && arg3 && info.si_pid != 0) {
7666 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_siginfo_t), 0)))
7667 goto efault;
7668 host_to_target_siginfo(p, &info);
7669 unlock_user(p, arg3, sizeof(target_siginfo_t));
7672 break;
7673 #endif
7674 #ifdef TARGET_NR_creat /* not on alpha */
7675 case TARGET_NR_creat:
7676 if (!(p = lock_user_string(arg1)))
7677 goto efault;
7678 ret = get_errno(creat(p, arg2));
7679 fd_trans_unregister(ret);
7680 unlock_user(p, arg1, 0);
7681 break;
7682 #endif
7683 #ifdef TARGET_NR_link
7684 case TARGET_NR_link:
7686 void * p2;
7687 p = lock_user_string(arg1);
7688 p2 = lock_user_string(arg2);
7689 if (!p || !p2)
7690 ret = -TARGET_EFAULT;
7691 else
7692 ret = get_errno(link(p, p2));
7693 unlock_user(p2, arg2, 0);
7694 unlock_user(p, arg1, 0);
7696 break;
7697 #endif
7698 #if defined(TARGET_NR_linkat)
7699 case TARGET_NR_linkat:
7701 void * p2 = NULL;
7702 if (!arg2 || !arg4)
7703 goto efault;
7704 p = lock_user_string(arg2);
7705 p2 = lock_user_string(arg4);
7706 if (!p || !p2)
7707 ret = -TARGET_EFAULT;
7708 else
7709 ret = get_errno(linkat(arg1, p, arg3, p2, arg5));
7710 unlock_user(p, arg2, 0);
7711 unlock_user(p2, arg4, 0);
7713 break;
7714 #endif
7715 #ifdef TARGET_NR_unlink
7716 case TARGET_NR_unlink:
7717 if (!(p = lock_user_string(arg1)))
7718 goto efault;
7719 ret = get_errno(unlink(p));
7720 unlock_user(p, arg1, 0);
7721 break;
7722 #endif
7723 #if defined(TARGET_NR_unlinkat)
7724 case TARGET_NR_unlinkat:
7725 if (!(p = lock_user_string(arg2)))
7726 goto efault;
7727 ret = get_errno(unlinkat(arg1, p, arg3));
7728 unlock_user(p, arg2, 0);
7729 break;
7730 #endif
7731 case TARGET_NR_execve:
7733 char **argp, **envp;
7734 int argc, envc;
7735 abi_ulong gp;
7736 abi_ulong guest_argp;
7737 abi_ulong guest_envp;
7738 abi_ulong addr;
7739 char **q;
7740 int total_size = 0;
7742 argc = 0;
7743 guest_argp = arg2;
7744 for (gp = guest_argp; gp; gp += sizeof(abi_ulong)) {
7745 if (get_user_ual(addr, gp))
7746 goto efault;
7747 if (!addr)
7748 break;
7749 argc++;
7751 envc = 0;
7752 guest_envp = arg3;
7753 for (gp = guest_envp; gp; gp += sizeof(abi_ulong)) {
7754 if (get_user_ual(addr, gp))
7755 goto efault;
7756 if (!addr)
7757 break;
7758 envc++;
7761 argp = alloca((argc + 1) * sizeof(void *));
7762 envp = alloca((envc + 1) * sizeof(void *));
7764 for (gp = guest_argp, q = argp; gp;
7765 gp += sizeof(abi_ulong), q++) {
7766 if (get_user_ual(addr, gp))
7767 goto execve_efault;
7768 if (!addr)
7769 break;
7770 if (!(*q = lock_user_string(addr)))
7771 goto execve_efault;
7772 total_size += strlen(*q) + 1;
7774 *q = NULL;
7776 for (gp = guest_envp, q = envp; gp;
7777 gp += sizeof(abi_ulong), q++) {
7778 if (get_user_ual(addr, gp))
7779 goto execve_efault;
7780 if (!addr)
7781 break;
7782 if (!(*q = lock_user_string(addr)))
7783 goto execve_efault;
7784 total_size += strlen(*q) + 1;
7786 *q = NULL;
7788 if (!(p = lock_user_string(arg1)))
7789 goto execve_efault;
7790 /* Although execve() is not an interruptible syscall it is
7791 * a special case where we must use the safe_syscall wrapper:
7792 * if we allow a signal to happen before we make the host
7793 * syscall then we will 'lose' it, because at the point of
7794 * execve the process leaves QEMU's control. So we use the
7795 * safe syscall wrapper to ensure that we either take the
7796 * signal as a guest signal, or else it does not happen
7797 * before the execve completes and makes it the other
7798 * program's problem.
7800 ret = get_errno(safe_execve(p, argp, envp));
7801 unlock_user(p, arg1, 0);
7803 goto execve_end;
7805 execve_efault:
7806 ret = -TARGET_EFAULT;
7808 execve_end:
7809 for (gp = guest_argp, q = argp; *q;
7810 gp += sizeof(abi_ulong), q++) {
7811 if (get_user_ual(addr, gp)
7812 || !addr)
7813 break;
7814 unlock_user(*q, addr, 0);
7816 for (gp = guest_envp, q = envp; *q;
7817 gp += sizeof(abi_ulong), q++) {
7818 if (get_user_ual(addr, gp)
7819 || !addr)
7820 break;
7821 unlock_user(*q, addr, 0);
7824 break;
7825 case TARGET_NR_chdir:
7826 if (!(p = lock_user_string(arg1)))
7827 goto efault;
7828 ret = get_errno(chdir(p));
7829 unlock_user(p, arg1, 0);
7830 break;
7831 #ifdef TARGET_NR_time
7832 case TARGET_NR_time:
7834 time_t host_time;
7835 ret = get_errno(time(&host_time));
7836 if (!is_error(ret)
7837 && arg1
7838 && put_user_sal(host_time, arg1))
7839 goto efault;
7841 break;
7842 #endif
7843 #ifdef TARGET_NR_mknod
7844 case TARGET_NR_mknod:
7845 if (!(p = lock_user_string(arg1)))
7846 goto efault;
7847 ret = get_errno(mknod(p, arg2, arg3));
7848 unlock_user(p, arg1, 0);
7849 break;
7850 #endif
7851 #if defined(TARGET_NR_mknodat)
7852 case TARGET_NR_mknodat:
7853 if (!(p = lock_user_string(arg2)))
7854 goto efault;
7855 ret = get_errno(mknodat(arg1, p, arg3, arg4));
7856 unlock_user(p, arg2, 0);
7857 break;
7858 #endif
7859 #ifdef TARGET_NR_chmod
7860 case TARGET_NR_chmod:
7861 if (!(p = lock_user_string(arg1)))
7862 goto efault;
7863 ret = get_errno(chmod(p, arg2));
7864 unlock_user(p, arg1, 0);
7865 break;
7866 #endif
7867 #ifdef TARGET_NR_break
7868 case TARGET_NR_break:
7869 goto unimplemented;
7870 #endif
7871 #ifdef TARGET_NR_oldstat
7872 case TARGET_NR_oldstat:
7873 goto unimplemented;
7874 #endif
7875 case TARGET_NR_lseek:
7876 ret = get_errno(lseek(arg1, arg2, arg3));
7877 break;
7878 #if defined(TARGET_NR_getxpid) && defined(TARGET_ALPHA)
7879 /* Alpha specific */
7880 case TARGET_NR_getxpid:
7881 ((CPUAlphaState *)cpu_env)->ir[IR_A4] = getppid();
7882 ret = get_errno(getpid());
7883 break;
7884 #endif
7885 #ifdef TARGET_NR_getpid
7886 case TARGET_NR_getpid:
7887 ret = get_errno(getpid());
7888 break;
7889 #endif
7890 case TARGET_NR_mount:
7892 /* need to look at the data field */
7893 void *p2, *p3;
7895 if (arg1) {
7896 p = lock_user_string(arg1);
7897 if (!p) {
7898 goto efault;
7900 } else {
7901 p = NULL;
7904 p2 = lock_user_string(arg2);
7905 if (!p2) {
7906 if (arg1) {
7907 unlock_user(p, arg1, 0);
7909 goto efault;
7912 if (arg3) {
7913 p3 = lock_user_string(arg3);
7914 if (!p3) {
7915 if (arg1) {
7916 unlock_user(p, arg1, 0);
7918 unlock_user(p2, arg2, 0);
7919 goto efault;
7921 } else {
7922 p3 = NULL;
7925 /* FIXME - arg5 should be locked, but it isn't clear how to
7926 * do that since it's not guaranteed to be a NULL-terminated
7927 * string.
7929 if (!arg5) {
7930 ret = mount(p, p2, p3, (unsigned long)arg4, NULL);
7931 } else {
7932 ret = mount(p, p2, p3, (unsigned long)arg4, g2h(arg5));
7934 ret = get_errno(ret);
7936 if (arg1) {
7937 unlock_user(p, arg1, 0);
7939 unlock_user(p2, arg2, 0);
7940 if (arg3) {
7941 unlock_user(p3, arg3, 0);
7944 break;
7945 #ifdef TARGET_NR_umount
7946 case TARGET_NR_umount:
7947 if (!(p = lock_user_string(arg1)))
7948 goto efault;
7949 ret = get_errno(umount(p));
7950 unlock_user(p, arg1, 0);
7951 break;
7952 #endif
7953 #ifdef TARGET_NR_stime /* not on alpha */
7954 case TARGET_NR_stime:
7956 time_t host_time;
7957 if (get_user_sal(host_time, arg1))
7958 goto efault;
7959 ret = get_errno(stime(&host_time));
7961 break;
7962 #endif
7963 case TARGET_NR_ptrace:
7964 goto unimplemented;
7965 #ifdef TARGET_NR_alarm /* not on alpha */
7966 case TARGET_NR_alarm:
7967 ret = alarm(arg1);
7968 break;
7969 #endif
7970 #ifdef TARGET_NR_oldfstat
7971 case TARGET_NR_oldfstat:
7972 goto unimplemented;
7973 #endif
7974 #ifdef TARGET_NR_pause /* not on alpha */
7975 case TARGET_NR_pause:
7976 if (!block_signals()) {
7977 sigsuspend(&((TaskState *)cpu->opaque)->signal_mask);
7979 ret = -TARGET_EINTR;
7980 break;
7981 #endif
7982 #ifdef TARGET_NR_utime
7983 case TARGET_NR_utime:
7985 struct utimbuf tbuf, *host_tbuf;
7986 struct target_utimbuf *target_tbuf;
7987 if (arg2) {
7988 if (!lock_user_struct(VERIFY_READ, target_tbuf, arg2, 1))
7989 goto efault;
7990 tbuf.actime = tswapal(target_tbuf->actime);
7991 tbuf.modtime = tswapal(target_tbuf->modtime);
7992 unlock_user_struct(target_tbuf, arg2, 0);
7993 host_tbuf = &tbuf;
7994 } else {
7995 host_tbuf = NULL;
7997 if (!(p = lock_user_string(arg1)))
7998 goto efault;
7999 ret = get_errno(utime(p, host_tbuf));
8000 unlock_user(p, arg1, 0);
8002 break;
8003 #endif
8004 #ifdef TARGET_NR_utimes
8005 case TARGET_NR_utimes:
8007 struct timeval *tvp, tv[2];
8008 if (arg2) {
8009 if (copy_from_user_timeval(&tv[0], arg2)
8010 || copy_from_user_timeval(&tv[1],
8011 arg2 + sizeof(struct target_timeval)))
8012 goto efault;
8013 tvp = tv;
8014 } else {
8015 tvp = NULL;
8017 if (!(p = lock_user_string(arg1)))
8018 goto efault;
8019 ret = get_errno(utimes(p, tvp));
8020 unlock_user(p, arg1, 0);
8022 break;
8023 #endif
8024 #if defined(TARGET_NR_futimesat)
8025 case TARGET_NR_futimesat:
8027 struct timeval *tvp, tv[2];
8028 if (arg3) {
8029 if (copy_from_user_timeval(&tv[0], arg3)
8030 || copy_from_user_timeval(&tv[1],
8031 arg3 + sizeof(struct target_timeval)))
8032 goto efault;
8033 tvp = tv;
8034 } else {
8035 tvp = NULL;
8037 if (!(p = lock_user_string(arg2)))
8038 goto efault;
8039 ret = get_errno(futimesat(arg1, path(p), tvp));
8040 unlock_user(p, arg2, 0);
8042 break;
8043 #endif
8044 #ifdef TARGET_NR_stty
8045 case TARGET_NR_stty:
8046 goto unimplemented;
8047 #endif
8048 #ifdef TARGET_NR_gtty
8049 case TARGET_NR_gtty:
8050 goto unimplemented;
8051 #endif
8052 #ifdef TARGET_NR_access
8053 case TARGET_NR_access:
8054 if (!(p = lock_user_string(arg1)))
8055 goto efault;
8056 ret = get_errno(access(path(p), arg2));
8057 unlock_user(p, arg1, 0);
8058 break;
8059 #endif
8060 #if defined(TARGET_NR_faccessat) && defined(__NR_faccessat)
8061 case TARGET_NR_faccessat:
8062 if (!(p = lock_user_string(arg2)))
8063 goto efault;
8064 ret = get_errno(faccessat(arg1, p, arg3, 0));
8065 unlock_user(p, arg2, 0);
8066 break;
8067 #endif
8068 #ifdef TARGET_NR_nice /* not on alpha */
8069 case TARGET_NR_nice:
8070 ret = get_errno(nice(arg1));
8071 break;
8072 #endif
8073 #ifdef TARGET_NR_ftime
8074 case TARGET_NR_ftime:
8075 goto unimplemented;
8076 #endif
8077 case TARGET_NR_sync:
8078 sync();
8079 ret = 0;
8080 break;
8081 case TARGET_NR_kill:
8082 ret = get_errno(safe_kill(arg1, target_to_host_signal(arg2)));
8083 break;
8084 #ifdef TARGET_NR_rename
8085 case TARGET_NR_rename:
8087 void *p2;
8088 p = lock_user_string(arg1);
8089 p2 = lock_user_string(arg2);
8090 if (!p || !p2)
8091 ret = -TARGET_EFAULT;
8092 else
8093 ret = get_errno(rename(p, p2));
8094 unlock_user(p2, arg2, 0);
8095 unlock_user(p, arg1, 0);
8097 break;
8098 #endif
8099 #if defined(TARGET_NR_renameat)
8100 case TARGET_NR_renameat:
8102 void *p2;
8103 p = lock_user_string(arg2);
8104 p2 = lock_user_string(arg4);
8105 if (!p || !p2)
8106 ret = -TARGET_EFAULT;
8107 else
8108 ret = get_errno(renameat(arg1, p, arg3, p2));
8109 unlock_user(p2, arg4, 0);
8110 unlock_user(p, arg2, 0);
8112 break;
8113 #endif
8114 #ifdef TARGET_NR_mkdir
8115 case TARGET_NR_mkdir:
8116 if (!(p = lock_user_string(arg1)))
8117 goto efault;
8118 ret = get_errno(mkdir(p, arg2));
8119 unlock_user(p, arg1, 0);
8120 break;
8121 #endif
8122 #if defined(TARGET_NR_mkdirat)
8123 case TARGET_NR_mkdirat:
8124 if (!(p = lock_user_string(arg2)))
8125 goto efault;
8126 ret = get_errno(mkdirat(arg1, p, arg3));
8127 unlock_user(p, arg2, 0);
8128 break;
8129 #endif
8130 #ifdef TARGET_NR_rmdir
8131 case TARGET_NR_rmdir:
8132 if (!(p = lock_user_string(arg1)))
8133 goto efault;
8134 ret = get_errno(rmdir(p));
8135 unlock_user(p, arg1, 0);
8136 break;
8137 #endif
8138 case TARGET_NR_dup:
8139 ret = get_errno(dup(arg1));
8140 if (ret >= 0) {
8141 fd_trans_dup(arg1, ret);
8143 break;
8144 #ifdef TARGET_NR_pipe
8145 case TARGET_NR_pipe:
8146 ret = do_pipe(cpu_env, arg1, 0, 0);
8147 break;
8148 #endif
8149 #ifdef TARGET_NR_pipe2
8150 case TARGET_NR_pipe2:
8151 ret = do_pipe(cpu_env, arg1,
8152 target_to_host_bitmask(arg2, fcntl_flags_tbl), 1);
8153 break;
8154 #endif
8155 case TARGET_NR_times:
8157 struct target_tms *tmsp;
8158 struct tms tms;
8159 ret = get_errno(times(&tms));
8160 if (arg1) {
8161 tmsp = lock_user(VERIFY_WRITE, arg1, sizeof(struct target_tms), 0);
8162 if (!tmsp)
8163 goto efault;
8164 tmsp->tms_utime = tswapal(host_to_target_clock_t(tms.tms_utime));
8165 tmsp->tms_stime = tswapal(host_to_target_clock_t(tms.tms_stime));
8166 tmsp->tms_cutime = tswapal(host_to_target_clock_t(tms.tms_cutime));
8167 tmsp->tms_cstime = tswapal(host_to_target_clock_t(tms.tms_cstime));
8169 if (!is_error(ret))
8170 ret = host_to_target_clock_t(ret);
8172 break;
8173 #ifdef TARGET_NR_prof
8174 case TARGET_NR_prof:
8175 goto unimplemented;
8176 #endif
8177 #ifdef TARGET_NR_signal
8178 case TARGET_NR_signal:
8179 goto unimplemented;
8180 #endif
8181 case TARGET_NR_acct:
8182 if (arg1 == 0) {
8183 ret = get_errno(acct(NULL));
8184 } else {
8185 if (!(p = lock_user_string(arg1)))
8186 goto efault;
8187 ret = get_errno(acct(path(p)));
8188 unlock_user(p, arg1, 0);
8190 break;
8191 #ifdef TARGET_NR_umount2
8192 case TARGET_NR_umount2:
8193 if (!(p = lock_user_string(arg1)))
8194 goto efault;
8195 ret = get_errno(umount2(p, arg2));
8196 unlock_user(p, arg1, 0);
8197 break;
8198 #endif
8199 #ifdef TARGET_NR_lock
8200 case TARGET_NR_lock:
8201 goto unimplemented;
8202 #endif
8203 case TARGET_NR_ioctl:
8204 ret = do_ioctl(arg1, arg2, arg3);
8205 break;
8206 case TARGET_NR_fcntl:
8207 ret = do_fcntl(arg1, arg2, arg3);
8208 break;
8209 #ifdef TARGET_NR_mpx
8210 case TARGET_NR_mpx:
8211 goto unimplemented;
8212 #endif
8213 case TARGET_NR_setpgid:
8214 ret = get_errno(setpgid(arg1, arg2));
8215 break;
8216 #ifdef TARGET_NR_ulimit
8217 case TARGET_NR_ulimit:
8218 goto unimplemented;
8219 #endif
8220 #ifdef TARGET_NR_oldolduname
8221 case TARGET_NR_oldolduname:
8222 goto unimplemented;
8223 #endif
8224 case TARGET_NR_umask:
8225 ret = get_errno(umask(arg1));
8226 break;
8227 case TARGET_NR_chroot:
8228 if (!(p = lock_user_string(arg1)))
8229 goto efault;
8230 ret = get_errno(chroot(p));
8231 unlock_user(p, arg1, 0);
8232 break;
8233 #ifdef TARGET_NR_ustat
8234 case TARGET_NR_ustat:
8235 goto unimplemented;
8236 #endif
8237 #ifdef TARGET_NR_dup2
8238 case TARGET_NR_dup2:
8239 ret = get_errno(dup2(arg1, arg2));
8240 if (ret >= 0) {
8241 fd_trans_dup(arg1, arg2);
8243 break;
8244 #endif
8245 #if defined(CONFIG_DUP3) && defined(TARGET_NR_dup3)
8246 case TARGET_NR_dup3:
8247 ret = get_errno(dup3(arg1, arg2, arg3));
8248 if (ret >= 0) {
8249 fd_trans_dup(arg1, arg2);
8251 break;
8252 #endif
8253 #ifdef TARGET_NR_getppid /* not on alpha */
8254 case TARGET_NR_getppid:
8255 ret = get_errno(getppid());
8256 break;
8257 #endif
8258 #ifdef TARGET_NR_getpgrp
8259 case TARGET_NR_getpgrp:
8260 ret = get_errno(getpgrp());
8261 break;
8262 #endif
8263 case TARGET_NR_setsid:
8264 ret = get_errno(setsid());
8265 break;
8266 #ifdef TARGET_NR_sigaction
8267 case TARGET_NR_sigaction:
8269 #if defined(TARGET_ALPHA)
8270 struct target_sigaction act, oact, *pact = 0;
8271 struct target_old_sigaction *old_act;
8272 if (arg2) {
8273 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
8274 goto efault;
8275 act._sa_handler = old_act->_sa_handler;
8276 target_siginitset(&act.sa_mask, old_act->sa_mask);
8277 act.sa_flags = old_act->sa_flags;
8278 act.sa_restorer = 0;
8279 unlock_user_struct(old_act, arg2, 0);
8280 pact = &act;
8282 ret = get_errno(do_sigaction(arg1, pact, &oact));
8283 if (!is_error(ret) && arg3) {
8284 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
8285 goto efault;
8286 old_act->_sa_handler = oact._sa_handler;
8287 old_act->sa_mask = oact.sa_mask.sig[0];
8288 old_act->sa_flags = oact.sa_flags;
8289 unlock_user_struct(old_act, arg3, 1);
8291 #elif defined(TARGET_MIPS)
8292 struct target_sigaction act, oact, *pact, *old_act;
8294 if (arg2) {
8295 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
8296 goto efault;
8297 act._sa_handler = old_act->_sa_handler;
8298 target_siginitset(&act.sa_mask, old_act->sa_mask.sig[0]);
8299 act.sa_flags = old_act->sa_flags;
8300 unlock_user_struct(old_act, arg2, 0);
8301 pact = &act;
8302 } else {
8303 pact = NULL;
8306 ret = get_errno(do_sigaction(arg1, pact, &oact));
8308 if (!is_error(ret) && arg3) {
8309 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
8310 goto efault;
8311 old_act->_sa_handler = oact._sa_handler;
8312 old_act->sa_flags = oact.sa_flags;
8313 old_act->sa_mask.sig[0] = oact.sa_mask.sig[0];
8314 old_act->sa_mask.sig[1] = 0;
8315 old_act->sa_mask.sig[2] = 0;
8316 old_act->sa_mask.sig[3] = 0;
8317 unlock_user_struct(old_act, arg3, 1);
8319 #else
8320 struct target_old_sigaction *old_act;
8321 struct target_sigaction act, oact, *pact;
8322 if (arg2) {
8323 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
8324 goto efault;
8325 act._sa_handler = old_act->_sa_handler;
8326 target_siginitset(&act.sa_mask, old_act->sa_mask);
8327 act.sa_flags = old_act->sa_flags;
8328 act.sa_restorer = old_act->sa_restorer;
8329 unlock_user_struct(old_act, arg2, 0);
8330 pact = &act;
8331 } else {
8332 pact = NULL;
8334 ret = get_errno(do_sigaction(arg1, pact, &oact));
8335 if (!is_error(ret) && arg3) {
8336 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
8337 goto efault;
8338 old_act->_sa_handler = oact._sa_handler;
8339 old_act->sa_mask = oact.sa_mask.sig[0];
8340 old_act->sa_flags = oact.sa_flags;
8341 old_act->sa_restorer = oact.sa_restorer;
8342 unlock_user_struct(old_act, arg3, 1);
8344 #endif
8346 break;
8347 #endif
8348 case TARGET_NR_rt_sigaction:
8350 #if defined(TARGET_ALPHA)
8351 struct target_sigaction act, oact, *pact = 0;
8352 struct target_rt_sigaction *rt_act;
8354 if (arg4 != sizeof(target_sigset_t)) {
8355 ret = -TARGET_EINVAL;
8356 break;
8358 if (arg2) {
8359 if (!lock_user_struct(VERIFY_READ, rt_act, arg2, 1))
8360 goto efault;
8361 act._sa_handler = rt_act->_sa_handler;
8362 act.sa_mask = rt_act->sa_mask;
8363 act.sa_flags = rt_act->sa_flags;
8364 act.sa_restorer = arg5;
8365 unlock_user_struct(rt_act, arg2, 0);
8366 pact = &act;
8368 ret = get_errno(do_sigaction(arg1, pact, &oact));
8369 if (!is_error(ret) && arg3) {
8370 if (!lock_user_struct(VERIFY_WRITE, rt_act, arg3, 0))
8371 goto efault;
8372 rt_act->_sa_handler = oact._sa_handler;
8373 rt_act->sa_mask = oact.sa_mask;
8374 rt_act->sa_flags = oact.sa_flags;
8375 unlock_user_struct(rt_act, arg3, 1);
8377 #else
8378 struct target_sigaction *act;
8379 struct target_sigaction *oact;
8381 if (arg4 != sizeof(target_sigset_t)) {
8382 ret = -TARGET_EINVAL;
8383 break;
8385 if (arg2) {
8386 if (!lock_user_struct(VERIFY_READ, act, arg2, 1))
8387 goto efault;
8388 } else
8389 act = NULL;
8390 if (arg3) {
8391 if (!lock_user_struct(VERIFY_WRITE, oact, arg3, 0)) {
8392 ret = -TARGET_EFAULT;
8393 goto rt_sigaction_fail;
8395 } else
8396 oact = NULL;
8397 ret = get_errno(do_sigaction(arg1, act, oact));
8398 rt_sigaction_fail:
8399 if (act)
8400 unlock_user_struct(act, arg2, 0);
8401 if (oact)
8402 unlock_user_struct(oact, arg3, 1);
8403 #endif
8405 break;
8406 #ifdef TARGET_NR_sgetmask /* not on alpha */
8407 case TARGET_NR_sgetmask:
8409 sigset_t cur_set;
8410 abi_ulong target_set;
8411 ret = do_sigprocmask(0, NULL, &cur_set);
8412 if (!ret) {
8413 host_to_target_old_sigset(&target_set, &cur_set);
8414 ret = target_set;
8417 break;
8418 #endif
8419 #ifdef TARGET_NR_ssetmask /* not on alpha */
8420 case TARGET_NR_ssetmask:
8422 sigset_t set, oset, cur_set;
8423 abi_ulong target_set = arg1;
8424 /* We only have one word of the new mask so we must read
8425 * the rest of it with do_sigprocmask() and OR in this word.
8426 * We are guaranteed that a do_sigprocmask() that only queries
8427 * the signal mask will not fail.
8429 ret = do_sigprocmask(0, NULL, &cur_set);
8430 assert(!ret);
8431 target_to_host_old_sigset(&set, &target_set);
8432 sigorset(&set, &set, &cur_set);
8433 ret = do_sigprocmask(SIG_SETMASK, &set, &oset);
8434 if (!ret) {
8435 host_to_target_old_sigset(&target_set, &oset);
8436 ret = target_set;
8439 break;
8440 #endif
8441 #ifdef TARGET_NR_sigprocmask
8442 case TARGET_NR_sigprocmask:
8444 #if defined(TARGET_ALPHA)
8445 sigset_t set, oldset;
8446 abi_ulong mask;
8447 int how;
8449 switch (arg1) {
8450 case TARGET_SIG_BLOCK:
8451 how = SIG_BLOCK;
8452 break;
8453 case TARGET_SIG_UNBLOCK:
8454 how = SIG_UNBLOCK;
8455 break;
8456 case TARGET_SIG_SETMASK:
8457 how = SIG_SETMASK;
8458 break;
8459 default:
8460 ret = -TARGET_EINVAL;
8461 goto fail;
8463 mask = arg2;
8464 target_to_host_old_sigset(&set, &mask);
8466 ret = do_sigprocmask(how, &set, &oldset);
8467 if (!is_error(ret)) {
8468 host_to_target_old_sigset(&mask, &oldset);
8469 ret = mask;
8470 ((CPUAlphaState *)cpu_env)->ir[IR_V0] = 0; /* force no error */
8472 #else
8473 sigset_t set, oldset, *set_ptr;
8474 int how;
8476 if (arg2) {
8477 switch (arg1) {
8478 case TARGET_SIG_BLOCK:
8479 how = SIG_BLOCK;
8480 break;
8481 case TARGET_SIG_UNBLOCK:
8482 how = SIG_UNBLOCK;
8483 break;
8484 case TARGET_SIG_SETMASK:
8485 how = SIG_SETMASK;
8486 break;
8487 default:
8488 ret = -TARGET_EINVAL;
8489 goto fail;
8491 if (!(p = lock_user(VERIFY_READ, arg2, sizeof(target_sigset_t), 1)))
8492 goto efault;
8493 target_to_host_old_sigset(&set, p);
8494 unlock_user(p, arg2, 0);
8495 set_ptr = &set;
8496 } else {
8497 how = 0;
8498 set_ptr = NULL;
8500 ret = do_sigprocmask(how, set_ptr, &oldset);
8501 if (!is_error(ret) && arg3) {
8502 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_sigset_t), 0)))
8503 goto efault;
8504 host_to_target_old_sigset(p, &oldset);
8505 unlock_user(p, arg3, sizeof(target_sigset_t));
8507 #endif
8509 break;
8510 #endif
8511 case TARGET_NR_rt_sigprocmask:
8513 int how = arg1;
8514 sigset_t set, oldset, *set_ptr;
8516 if (arg4 != sizeof(target_sigset_t)) {
8517 ret = -TARGET_EINVAL;
8518 break;
8521 if (arg2) {
8522 switch(how) {
8523 case TARGET_SIG_BLOCK:
8524 how = SIG_BLOCK;
8525 break;
8526 case TARGET_SIG_UNBLOCK:
8527 how = SIG_UNBLOCK;
8528 break;
8529 case TARGET_SIG_SETMASK:
8530 how = SIG_SETMASK;
8531 break;
8532 default:
8533 ret = -TARGET_EINVAL;
8534 goto fail;
8536 if (!(p = lock_user(VERIFY_READ, arg2, sizeof(target_sigset_t), 1)))
8537 goto efault;
8538 target_to_host_sigset(&set, p);
8539 unlock_user(p, arg2, 0);
8540 set_ptr = &set;
8541 } else {
8542 how = 0;
8543 set_ptr = NULL;
8545 ret = do_sigprocmask(how, set_ptr, &oldset);
8546 if (!is_error(ret) && arg3) {
8547 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_sigset_t), 0)))
8548 goto efault;
8549 host_to_target_sigset(p, &oldset);
8550 unlock_user(p, arg3, sizeof(target_sigset_t));
8553 break;
8554 #ifdef TARGET_NR_sigpending
8555 case TARGET_NR_sigpending:
8557 sigset_t set;
8558 ret = get_errno(sigpending(&set));
8559 if (!is_error(ret)) {
8560 if (!(p = lock_user(VERIFY_WRITE, arg1, sizeof(target_sigset_t), 0)))
8561 goto efault;
8562 host_to_target_old_sigset(p, &set);
8563 unlock_user(p, arg1, sizeof(target_sigset_t));
8566 break;
8567 #endif
8568 case TARGET_NR_rt_sigpending:
8570 sigset_t set;
8572 /* Yes, this check is >, not != like most. We follow the kernel's
8573 * logic and it does it like this because it implements
8574 * NR_sigpending through the same code path, and in that case
8575 * the old_sigset_t is smaller in size.
8577 if (arg2 > sizeof(target_sigset_t)) {
8578 ret = -TARGET_EINVAL;
8579 break;
8582 ret = get_errno(sigpending(&set));
8583 if (!is_error(ret)) {
8584 if (!(p = lock_user(VERIFY_WRITE, arg1, sizeof(target_sigset_t), 0)))
8585 goto efault;
8586 host_to_target_sigset(p, &set);
8587 unlock_user(p, arg1, sizeof(target_sigset_t));
8590 break;
8591 #ifdef TARGET_NR_sigsuspend
8592 case TARGET_NR_sigsuspend:
8594 TaskState *ts = cpu->opaque;
8595 #if defined(TARGET_ALPHA)
8596 abi_ulong mask = arg1;
8597 target_to_host_old_sigset(&ts->sigsuspend_mask, &mask);
8598 #else
8599 if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
8600 goto efault;
8601 target_to_host_old_sigset(&ts->sigsuspend_mask, p);
8602 unlock_user(p, arg1, 0);
8603 #endif
8604 ret = get_errno(safe_rt_sigsuspend(&ts->sigsuspend_mask,
8605 SIGSET_T_SIZE));
8606 if (ret != -TARGET_ERESTARTSYS) {
8607 ts->in_sigsuspend = 1;
8610 break;
8611 #endif
8612 case TARGET_NR_rt_sigsuspend:
8614 TaskState *ts = cpu->opaque;
8616 if (arg2 != sizeof(target_sigset_t)) {
8617 ret = -TARGET_EINVAL;
8618 break;
8620 if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
8621 goto efault;
8622 target_to_host_sigset(&ts->sigsuspend_mask, p);
8623 unlock_user(p, arg1, 0);
8624 ret = get_errno(safe_rt_sigsuspend(&ts->sigsuspend_mask,
8625 SIGSET_T_SIZE));
8626 if (ret != -TARGET_ERESTARTSYS) {
8627 ts->in_sigsuspend = 1;
8630 break;
8631 case TARGET_NR_rt_sigtimedwait:
8633 sigset_t set;
8634 struct timespec uts, *puts;
8635 siginfo_t uinfo;
8637 if (arg4 != sizeof(target_sigset_t)) {
8638 ret = -TARGET_EINVAL;
8639 break;
8642 if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
8643 goto efault;
8644 target_to_host_sigset(&set, p);
8645 unlock_user(p, arg1, 0);
8646 if (arg3) {
8647 puts = &uts;
8648 target_to_host_timespec(puts, arg3);
8649 } else {
8650 puts = NULL;
8652 ret = get_errno(safe_rt_sigtimedwait(&set, &uinfo, puts,
8653 SIGSET_T_SIZE));
8654 if (!is_error(ret)) {
8655 if (arg2) {
8656 p = lock_user(VERIFY_WRITE, arg2, sizeof(target_siginfo_t),
8658 if (!p) {
8659 goto efault;
8661 host_to_target_siginfo(p, &uinfo);
8662 unlock_user(p, arg2, sizeof(target_siginfo_t));
8664 ret = host_to_target_signal(ret);
8667 break;
8668 case TARGET_NR_rt_sigqueueinfo:
8670 siginfo_t uinfo;
8672 p = lock_user(VERIFY_READ, arg3, sizeof(target_siginfo_t), 1);
8673 if (!p) {
8674 goto efault;
8676 target_to_host_siginfo(&uinfo, p);
8677 unlock_user(p, arg1, 0);
8678 ret = get_errno(sys_rt_sigqueueinfo(arg1, arg2, &uinfo));
8680 break;
8681 #ifdef TARGET_NR_sigreturn
8682 case TARGET_NR_sigreturn:
8683 if (block_signals()) {
8684 ret = -TARGET_ERESTARTSYS;
8685 } else {
8686 ret = do_sigreturn(cpu_env);
8688 break;
8689 #endif
8690 case TARGET_NR_rt_sigreturn:
8691 if (block_signals()) {
8692 ret = -TARGET_ERESTARTSYS;
8693 } else {
8694 ret = do_rt_sigreturn(cpu_env);
8696 break;
8697 case TARGET_NR_sethostname:
8698 if (!(p = lock_user_string(arg1)))
8699 goto efault;
8700 ret = get_errno(sethostname(p, arg2));
8701 unlock_user(p, arg1, 0);
8702 break;
8703 case TARGET_NR_setrlimit:
8705 int resource = target_to_host_resource(arg1);
8706 struct target_rlimit *target_rlim;
8707 struct rlimit rlim;
8708 if (!lock_user_struct(VERIFY_READ, target_rlim, arg2, 1))
8709 goto efault;
8710 rlim.rlim_cur = target_to_host_rlim(target_rlim->rlim_cur);
8711 rlim.rlim_max = target_to_host_rlim(target_rlim->rlim_max);
8712 unlock_user_struct(target_rlim, arg2, 0);
8713 ret = get_errno(setrlimit(resource, &rlim));
8715 break;
8716 case TARGET_NR_getrlimit:
8718 int resource = target_to_host_resource(arg1);
8719 struct target_rlimit *target_rlim;
8720 struct rlimit rlim;
8722 ret = get_errno(getrlimit(resource, &rlim));
8723 if (!is_error(ret)) {
8724 if (!lock_user_struct(VERIFY_WRITE, target_rlim, arg2, 0))
8725 goto efault;
8726 target_rlim->rlim_cur = host_to_target_rlim(rlim.rlim_cur);
8727 target_rlim->rlim_max = host_to_target_rlim(rlim.rlim_max);
8728 unlock_user_struct(target_rlim, arg2, 1);
8731 break;
8732 case TARGET_NR_getrusage:
8734 struct rusage rusage;
8735 ret = get_errno(getrusage(arg1, &rusage));
8736 if (!is_error(ret)) {
8737 ret = host_to_target_rusage(arg2, &rusage);
8740 break;
8741 case TARGET_NR_gettimeofday:
8743 struct timeval tv;
8744 ret = get_errno(gettimeofday(&tv, NULL));
8745 if (!is_error(ret)) {
8746 if (copy_to_user_timeval(arg1, &tv))
8747 goto efault;
8750 break;
8751 case TARGET_NR_settimeofday:
8753 struct timeval tv, *ptv = NULL;
8754 struct timezone tz, *ptz = NULL;
8756 if (arg1) {
8757 if (copy_from_user_timeval(&tv, arg1)) {
8758 goto efault;
8760 ptv = &tv;
8763 if (arg2) {
8764 if (copy_from_user_timezone(&tz, arg2)) {
8765 goto efault;
8767 ptz = &tz;
8770 ret = get_errno(settimeofday(ptv, ptz));
8772 break;
8773 #if defined(TARGET_NR_select)
8774 case TARGET_NR_select:
8775 #if defined(TARGET_WANT_NI_OLD_SELECT)
8776 /* some architectures used to have old_select here
8777 * but now ENOSYS it.
8779 ret = -TARGET_ENOSYS;
8780 #elif defined(TARGET_WANT_OLD_SYS_SELECT)
8781 ret = do_old_select(arg1);
8782 #else
8783 ret = do_select(arg1, arg2, arg3, arg4, arg5);
8784 #endif
8785 break;
8786 #endif
8787 #ifdef TARGET_NR_pselect6
8788 case TARGET_NR_pselect6:
8790 abi_long rfd_addr, wfd_addr, efd_addr, n, ts_addr;
8791 fd_set rfds, wfds, efds;
8792 fd_set *rfds_ptr, *wfds_ptr, *efds_ptr;
8793 struct timespec ts, *ts_ptr;
8796 * The 6th arg is actually two args smashed together,
8797 * so we cannot use the C library.
8799 sigset_t set;
8800 struct {
8801 sigset_t *set;
8802 size_t size;
8803 } sig, *sig_ptr;
8805 abi_ulong arg_sigset, arg_sigsize, *arg7;
8806 target_sigset_t *target_sigset;
8808 n = arg1;
8809 rfd_addr = arg2;
8810 wfd_addr = arg3;
8811 efd_addr = arg4;
8812 ts_addr = arg5;
8814 ret = copy_from_user_fdset_ptr(&rfds, &rfds_ptr, rfd_addr, n);
8815 if (ret) {
8816 goto fail;
8818 ret = copy_from_user_fdset_ptr(&wfds, &wfds_ptr, wfd_addr, n);
8819 if (ret) {
8820 goto fail;
8822 ret = copy_from_user_fdset_ptr(&efds, &efds_ptr, efd_addr, n);
8823 if (ret) {
8824 goto fail;
8828 * This takes a timespec, and not a timeval, so we cannot
8829 * use the do_select() helper ...
8831 if (ts_addr) {
8832 if (target_to_host_timespec(&ts, ts_addr)) {
8833 goto efault;
8835 ts_ptr = &ts;
8836 } else {
8837 ts_ptr = NULL;
8840 /* Extract the two packed args for the sigset */
8841 if (arg6) {
8842 sig_ptr = &sig;
8843 sig.size = SIGSET_T_SIZE;
8845 arg7 = lock_user(VERIFY_READ, arg6, sizeof(*arg7) * 2, 1);
8846 if (!arg7) {
8847 goto efault;
8849 arg_sigset = tswapal(arg7[0]);
8850 arg_sigsize = tswapal(arg7[1]);
8851 unlock_user(arg7, arg6, 0);
8853 if (arg_sigset) {
8854 sig.set = &set;
8855 if (arg_sigsize != sizeof(*target_sigset)) {
8856 /* Like the kernel, we enforce correct size sigsets */
8857 ret = -TARGET_EINVAL;
8858 goto fail;
8860 target_sigset = lock_user(VERIFY_READ, arg_sigset,
8861 sizeof(*target_sigset), 1);
8862 if (!target_sigset) {
8863 goto efault;
8865 target_to_host_sigset(&set, target_sigset);
8866 unlock_user(target_sigset, arg_sigset, 0);
8867 } else {
8868 sig.set = NULL;
8870 } else {
8871 sig_ptr = NULL;
8874 ret = get_errno(safe_pselect6(n, rfds_ptr, wfds_ptr, efds_ptr,
8875 ts_ptr, sig_ptr));
8877 if (!is_error(ret)) {
8878 if (rfd_addr && copy_to_user_fdset(rfd_addr, &rfds, n))
8879 goto efault;
8880 if (wfd_addr && copy_to_user_fdset(wfd_addr, &wfds, n))
8881 goto efault;
8882 if (efd_addr && copy_to_user_fdset(efd_addr, &efds, n))
8883 goto efault;
8885 if (ts_addr && host_to_target_timespec(ts_addr, &ts))
8886 goto efault;
8889 break;
8890 #endif
8891 #ifdef TARGET_NR_symlink
8892 case TARGET_NR_symlink:
8894 void *p2;
8895 p = lock_user_string(arg1);
8896 p2 = lock_user_string(arg2);
8897 if (!p || !p2)
8898 ret = -TARGET_EFAULT;
8899 else
8900 ret = get_errno(symlink(p, p2));
8901 unlock_user(p2, arg2, 0);
8902 unlock_user(p, arg1, 0);
8904 break;
8905 #endif
8906 #if defined(TARGET_NR_symlinkat)
8907 case TARGET_NR_symlinkat:
8909 void *p2;
8910 p = lock_user_string(arg1);
8911 p2 = lock_user_string(arg3);
8912 if (!p || !p2)
8913 ret = -TARGET_EFAULT;
8914 else
8915 ret = get_errno(symlinkat(p, arg2, p2));
8916 unlock_user(p2, arg3, 0);
8917 unlock_user(p, arg1, 0);
8919 break;
8920 #endif
8921 #ifdef TARGET_NR_oldlstat
8922 case TARGET_NR_oldlstat:
8923 goto unimplemented;
8924 #endif
8925 #ifdef TARGET_NR_readlink
8926 case TARGET_NR_readlink:
8928 void *p2;
8929 p = lock_user_string(arg1);
8930 p2 = lock_user(VERIFY_WRITE, arg2, arg3, 0);
8931 if (!p || !p2) {
8932 ret = -TARGET_EFAULT;
8933 } else if (!arg3) {
8934 /* Short circuit this for the magic exe check. */
8935 ret = -TARGET_EINVAL;
8936 } else if (is_proc_myself((const char *)p, "exe")) {
8937 char real[PATH_MAX], *temp;
8938 temp = realpath(exec_path, real);
8939 /* Return value is # of bytes that we wrote to the buffer. */
8940 if (temp == NULL) {
8941 ret = get_errno(-1);
8942 } else {
8943 /* Don't worry about sign mismatch as earlier mapping
8944 * logic would have thrown a bad address error. */
8945 ret = MIN(strlen(real), arg3);
8946 /* We cannot NUL terminate the string. */
8947 memcpy(p2, real, ret);
8949 } else {
8950 ret = get_errno(readlink(path(p), p2, arg3));
8952 unlock_user(p2, arg2, ret);
8953 unlock_user(p, arg1, 0);
8955 break;
8956 #endif
8957 #if defined(TARGET_NR_readlinkat)
8958 case TARGET_NR_readlinkat:
8960 void *p2;
8961 p = lock_user_string(arg2);
8962 p2 = lock_user(VERIFY_WRITE, arg3, arg4, 0);
8963 if (!p || !p2) {
8964 ret = -TARGET_EFAULT;
8965 } else if (is_proc_myself((const char *)p, "exe")) {
8966 char real[PATH_MAX], *temp;
8967 temp = realpath(exec_path, real);
8968 ret = temp == NULL ? get_errno(-1) : strlen(real) ;
8969 snprintf((char *)p2, arg4, "%s", real);
8970 } else {
8971 ret = get_errno(readlinkat(arg1, path(p), p2, arg4));
8973 unlock_user(p2, arg3, ret);
8974 unlock_user(p, arg2, 0);
8976 break;
8977 #endif
8978 #ifdef TARGET_NR_uselib
8979 case TARGET_NR_uselib:
8980 goto unimplemented;
8981 #endif
8982 #ifdef TARGET_NR_swapon
8983 case TARGET_NR_swapon:
8984 if (!(p = lock_user_string(arg1)))
8985 goto efault;
8986 ret = get_errno(swapon(p, arg2));
8987 unlock_user(p, arg1, 0);
8988 break;
8989 #endif
8990 case TARGET_NR_reboot:
8991 if (arg3 == LINUX_REBOOT_CMD_RESTART2) {
8992 /* arg4 must be ignored in all other cases */
8993 p = lock_user_string(arg4);
8994 if (!p) {
8995 goto efault;
8997 ret = get_errno(reboot(arg1, arg2, arg3, p));
8998 unlock_user(p, arg4, 0);
8999 } else {
9000 ret = get_errno(reboot(arg1, arg2, arg3, NULL));
9002 break;
9003 #ifdef TARGET_NR_readdir
9004 case TARGET_NR_readdir:
9005 goto unimplemented;
9006 #endif
9007 #ifdef TARGET_NR_mmap
9008 case TARGET_NR_mmap:
9009 #if (defined(TARGET_I386) && defined(TARGET_ABI32)) || \
9010 (defined(TARGET_ARM) && defined(TARGET_ABI32)) || \
9011 defined(TARGET_M68K) || defined(TARGET_CRIS) || defined(TARGET_MICROBLAZE) \
9012 || defined(TARGET_S390X)
9014 abi_ulong *v;
9015 abi_ulong v1, v2, v3, v4, v5, v6;
9016 if (!(v = lock_user(VERIFY_READ, arg1, 6 * sizeof(abi_ulong), 1)))
9017 goto efault;
9018 v1 = tswapal(v[0]);
9019 v2 = tswapal(v[1]);
9020 v3 = tswapal(v[2]);
9021 v4 = tswapal(v[3]);
9022 v5 = tswapal(v[4]);
9023 v6 = tswapal(v[5]);
9024 unlock_user(v, arg1, 0);
9025 ret = get_errno(target_mmap(v1, v2, v3,
9026 target_to_host_bitmask(v4, mmap_flags_tbl),
9027 v5, v6));
9029 #else
9030 ret = get_errno(target_mmap(arg1, arg2, arg3,
9031 target_to_host_bitmask(arg4, mmap_flags_tbl),
9032 arg5,
9033 arg6));
9034 #endif
9035 break;
9036 #endif
9037 #ifdef TARGET_NR_mmap2
9038 case TARGET_NR_mmap2:
9039 #ifndef MMAP_SHIFT
9040 #define MMAP_SHIFT 12
9041 #endif
9042 ret = get_errno(target_mmap(arg1, arg2, arg3,
9043 target_to_host_bitmask(arg4, mmap_flags_tbl),
9044 arg5,
9045 arg6 << MMAP_SHIFT));
9046 break;
9047 #endif
9048 case TARGET_NR_munmap:
9049 ret = get_errno(target_munmap(arg1, arg2));
9050 break;
9051 case TARGET_NR_mprotect:
9053 TaskState *ts = cpu->opaque;
9054 /* Special hack to detect libc making the stack executable. */
9055 if ((arg3 & PROT_GROWSDOWN)
9056 && arg1 >= ts->info->stack_limit
9057 && arg1 <= ts->info->start_stack) {
9058 arg3 &= ~PROT_GROWSDOWN;
9059 arg2 = arg2 + arg1 - ts->info->stack_limit;
9060 arg1 = ts->info->stack_limit;
9063 ret = get_errno(target_mprotect(arg1, arg2, arg3));
9064 break;
9065 #ifdef TARGET_NR_mremap
9066 case TARGET_NR_mremap:
9067 ret = get_errno(target_mremap(arg1, arg2, arg3, arg4, arg5));
9068 break;
9069 #endif
9070 /* ??? msync/mlock/munlock are broken for softmmu. */
9071 #ifdef TARGET_NR_msync
9072 case TARGET_NR_msync:
9073 ret = get_errno(msync(g2h(arg1), arg2, arg3));
9074 break;
9075 #endif
9076 #ifdef TARGET_NR_mlock
9077 case TARGET_NR_mlock:
9078 ret = get_errno(mlock(g2h(arg1), arg2));
9079 break;
9080 #endif
9081 #ifdef TARGET_NR_munlock
9082 case TARGET_NR_munlock:
9083 ret = get_errno(munlock(g2h(arg1), arg2));
9084 break;
9085 #endif
9086 #ifdef TARGET_NR_mlockall
9087 case TARGET_NR_mlockall:
9088 ret = get_errno(mlockall(target_to_host_mlockall_arg(arg1)));
9089 break;
9090 #endif
9091 #ifdef TARGET_NR_munlockall
9092 case TARGET_NR_munlockall:
9093 ret = get_errno(munlockall());
9094 break;
9095 #endif
9096 case TARGET_NR_truncate:
9097 if (!(p = lock_user_string(arg1)))
9098 goto efault;
9099 ret = get_errno(truncate(p, arg2));
9100 unlock_user(p, arg1, 0);
9101 break;
9102 case TARGET_NR_ftruncate:
9103 ret = get_errno(ftruncate(arg1, arg2));
9104 break;
9105 case TARGET_NR_fchmod:
9106 ret = get_errno(fchmod(arg1, arg2));
9107 break;
9108 #if defined(TARGET_NR_fchmodat)
9109 case TARGET_NR_fchmodat:
9110 if (!(p = lock_user_string(arg2)))
9111 goto efault;
9112 ret = get_errno(fchmodat(arg1, p, arg3, 0));
9113 unlock_user(p, arg2, 0);
9114 break;
9115 #endif
9116 case TARGET_NR_getpriority:
9117 /* Note that negative values are valid for getpriority, so we must
9118 differentiate based on errno settings. */
9119 errno = 0;
9120 ret = getpriority(arg1, arg2);
9121 if (ret == -1 && errno != 0) {
9122 ret = -host_to_target_errno(errno);
9123 break;
9125 #ifdef TARGET_ALPHA
9126 /* Return value is the unbiased priority. Signal no error. */
9127 ((CPUAlphaState *)cpu_env)->ir[IR_V0] = 0;
9128 #else
9129 /* Return value is a biased priority to avoid negative numbers. */
9130 ret = 20 - ret;
9131 #endif
9132 break;
9133 case TARGET_NR_setpriority:
9134 ret = get_errno(setpriority(arg1, arg2, arg3));
9135 break;
9136 #ifdef TARGET_NR_profil
9137 case TARGET_NR_profil:
9138 goto unimplemented;
9139 #endif
9140 case TARGET_NR_statfs:
9141 if (!(p = lock_user_string(arg1)))
9142 goto efault;
9143 ret = get_errno(statfs(path(p), &stfs));
9144 unlock_user(p, arg1, 0);
9145 convert_statfs:
9146 if (!is_error(ret)) {
9147 struct target_statfs *target_stfs;
9149 if (!lock_user_struct(VERIFY_WRITE, target_stfs, arg2, 0))
9150 goto efault;
9151 __put_user(stfs.f_type, &target_stfs->f_type);
9152 __put_user(stfs.f_bsize, &target_stfs->f_bsize);
9153 __put_user(stfs.f_blocks, &target_stfs->f_blocks);
9154 __put_user(stfs.f_bfree, &target_stfs->f_bfree);
9155 __put_user(stfs.f_bavail, &target_stfs->f_bavail);
9156 __put_user(stfs.f_files, &target_stfs->f_files);
9157 __put_user(stfs.f_ffree, &target_stfs->f_ffree);
9158 __put_user(stfs.f_fsid.__val[0], &target_stfs->f_fsid.val[0]);
9159 __put_user(stfs.f_fsid.__val[1], &target_stfs->f_fsid.val[1]);
9160 __put_user(stfs.f_namelen, &target_stfs->f_namelen);
9161 __put_user(stfs.f_frsize, &target_stfs->f_frsize);
9162 memset(target_stfs->f_spare, 0, sizeof(target_stfs->f_spare));
9163 unlock_user_struct(target_stfs, arg2, 1);
9165 break;
9166 case TARGET_NR_fstatfs:
9167 ret = get_errno(fstatfs(arg1, &stfs));
9168 goto convert_statfs;
9169 #ifdef TARGET_NR_statfs64
9170 case TARGET_NR_statfs64:
9171 if (!(p = lock_user_string(arg1)))
9172 goto efault;
9173 ret = get_errno(statfs(path(p), &stfs));
9174 unlock_user(p, arg1, 0);
9175 convert_statfs64:
9176 if (!is_error(ret)) {
9177 struct target_statfs64 *target_stfs;
9179 if (!lock_user_struct(VERIFY_WRITE, target_stfs, arg3, 0))
9180 goto efault;
9181 __put_user(stfs.f_type, &target_stfs->f_type);
9182 __put_user(stfs.f_bsize, &target_stfs->f_bsize);
9183 __put_user(stfs.f_blocks, &target_stfs->f_blocks);
9184 __put_user(stfs.f_bfree, &target_stfs->f_bfree);
9185 __put_user(stfs.f_bavail, &target_stfs->f_bavail);
9186 __put_user(stfs.f_files, &target_stfs->f_files);
9187 __put_user(stfs.f_ffree, &target_stfs->f_ffree);
9188 __put_user(stfs.f_fsid.__val[0], &target_stfs->f_fsid.val[0]);
9189 __put_user(stfs.f_fsid.__val[1], &target_stfs->f_fsid.val[1]);
9190 __put_user(stfs.f_namelen, &target_stfs->f_namelen);
9191 __put_user(stfs.f_frsize, &target_stfs->f_frsize);
9192 memset(target_stfs->f_spare, 0, sizeof(target_stfs->f_spare));
9193 unlock_user_struct(target_stfs, arg3, 1);
9195 break;
9196 case TARGET_NR_fstatfs64:
9197 ret = get_errno(fstatfs(arg1, &stfs));
9198 goto convert_statfs64;
9199 #endif
9200 #ifdef TARGET_NR_ioperm
9201 case TARGET_NR_ioperm:
9202 goto unimplemented;
9203 #endif
9204 #ifdef TARGET_NR_socketcall
9205 case TARGET_NR_socketcall:
9206 ret = do_socketcall(arg1, arg2);
9207 break;
9208 #endif
9209 #ifdef TARGET_NR_accept
9210 case TARGET_NR_accept:
9211 ret = do_accept4(arg1, arg2, arg3, 0);
9212 break;
9213 #endif
9214 #ifdef TARGET_NR_accept4
9215 case TARGET_NR_accept4:
9216 ret = do_accept4(arg1, arg2, arg3, arg4);
9217 break;
9218 #endif
9219 #ifdef TARGET_NR_bind
9220 case TARGET_NR_bind:
9221 ret = do_bind(arg1, arg2, arg3);
9222 break;
9223 #endif
9224 #ifdef TARGET_NR_connect
9225 case TARGET_NR_connect:
9226 ret = do_connect(arg1, arg2, arg3);
9227 break;
9228 #endif
9229 #ifdef TARGET_NR_getpeername
9230 case TARGET_NR_getpeername:
9231 ret = do_getpeername(arg1, arg2, arg3);
9232 break;
9233 #endif
9234 #ifdef TARGET_NR_getsockname
9235 case TARGET_NR_getsockname:
9236 ret = do_getsockname(arg1, arg2, arg3);
9237 break;
9238 #endif
9239 #ifdef TARGET_NR_getsockopt
9240 case TARGET_NR_getsockopt:
9241 ret = do_getsockopt(arg1, arg2, arg3, arg4, arg5);
9242 break;
9243 #endif
9244 #ifdef TARGET_NR_listen
9245 case TARGET_NR_listen:
9246 ret = get_errno(listen(arg1, arg2));
9247 break;
9248 #endif
9249 #ifdef TARGET_NR_recv
9250 case TARGET_NR_recv:
9251 ret = do_recvfrom(arg1, arg2, arg3, arg4, 0, 0);
9252 break;
9253 #endif
9254 #ifdef TARGET_NR_recvfrom
9255 case TARGET_NR_recvfrom:
9256 ret = do_recvfrom(arg1, arg2, arg3, arg4, arg5, arg6);
9257 break;
9258 #endif
9259 #ifdef TARGET_NR_recvmsg
9260 case TARGET_NR_recvmsg:
9261 ret = do_sendrecvmsg(arg1, arg2, arg3, 0);
9262 break;
9263 #endif
9264 #ifdef TARGET_NR_send
9265 case TARGET_NR_send:
9266 ret = do_sendto(arg1, arg2, arg3, arg4, 0, 0);
9267 break;
9268 #endif
9269 #ifdef TARGET_NR_sendmsg
9270 case TARGET_NR_sendmsg:
9271 ret = do_sendrecvmsg(arg1, arg2, arg3, 1);
9272 break;
9273 #endif
9274 #ifdef TARGET_NR_sendmmsg
9275 case TARGET_NR_sendmmsg:
9276 ret = do_sendrecvmmsg(arg1, arg2, arg3, arg4, 1);
9277 break;
9278 case TARGET_NR_recvmmsg:
9279 ret = do_sendrecvmmsg(arg1, arg2, arg3, arg4, 0);
9280 break;
9281 #endif
9282 #ifdef TARGET_NR_sendto
9283 case TARGET_NR_sendto:
9284 ret = do_sendto(arg1, arg2, arg3, arg4, arg5, arg6);
9285 break;
9286 #endif
9287 #ifdef TARGET_NR_shutdown
9288 case TARGET_NR_shutdown:
9289 ret = get_errno(shutdown(arg1, arg2));
9290 break;
9291 #endif
9292 #if defined(TARGET_NR_getrandom) && defined(__NR_getrandom)
9293 case TARGET_NR_getrandom:
9294 p = lock_user(VERIFY_WRITE, arg1, arg2, 0);
9295 if (!p) {
9296 goto efault;
9298 ret = get_errno(getrandom(p, arg2, arg3));
9299 unlock_user(p, arg1, ret);
9300 break;
9301 #endif
9302 #ifdef TARGET_NR_socket
9303 case TARGET_NR_socket:
9304 ret = do_socket(arg1, arg2, arg3);
9305 fd_trans_unregister(ret);
9306 break;
9307 #endif
9308 #ifdef TARGET_NR_socketpair
9309 case TARGET_NR_socketpair:
9310 ret = do_socketpair(arg1, arg2, arg3, arg4);
9311 break;
9312 #endif
9313 #ifdef TARGET_NR_setsockopt
9314 case TARGET_NR_setsockopt:
9315 ret = do_setsockopt(arg1, arg2, arg3, arg4, (socklen_t) arg5);
9316 break;
9317 #endif
9319 case TARGET_NR_syslog:
9320 if (!(p = lock_user_string(arg2)))
9321 goto efault;
9322 ret = get_errno(sys_syslog((int)arg1, p, (int)arg3));
9323 unlock_user(p, arg2, 0);
9324 break;
9326 case TARGET_NR_setitimer:
9328 struct itimerval value, ovalue, *pvalue;
9330 if (arg2) {
9331 pvalue = &value;
9332 if (copy_from_user_timeval(&pvalue->it_interval, arg2)
9333 || copy_from_user_timeval(&pvalue->it_value,
9334 arg2 + sizeof(struct target_timeval)))
9335 goto efault;
9336 } else {
9337 pvalue = NULL;
9339 ret = get_errno(setitimer(arg1, pvalue, &ovalue));
9340 if (!is_error(ret) && arg3) {
9341 if (copy_to_user_timeval(arg3,
9342 &ovalue.it_interval)
9343 || copy_to_user_timeval(arg3 + sizeof(struct target_timeval),
9344 &ovalue.it_value))
9345 goto efault;
9348 break;
9349 case TARGET_NR_getitimer:
9351 struct itimerval value;
9353 ret = get_errno(getitimer(arg1, &value));
9354 if (!is_error(ret) && arg2) {
9355 if (copy_to_user_timeval(arg2,
9356 &value.it_interval)
9357 || copy_to_user_timeval(arg2 + sizeof(struct target_timeval),
9358 &value.it_value))
9359 goto efault;
9362 break;
9363 #ifdef TARGET_NR_stat
9364 case TARGET_NR_stat:
9365 if (!(p = lock_user_string(arg1)))
9366 goto efault;
9367 ret = get_errno(stat(path(p), &st));
9368 unlock_user(p, arg1, 0);
9369 goto do_stat;
9370 #endif
9371 #ifdef TARGET_NR_lstat
9372 case TARGET_NR_lstat:
9373 if (!(p = lock_user_string(arg1)))
9374 goto efault;
9375 ret = get_errno(lstat(path(p), &st));
9376 unlock_user(p, arg1, 0);
9377 goto do_stat;
9378 #endif
9379 case TARGET_NR_fstat:
9381 ret = get_errno(fstat(arg1, &st));
9382 #if defined(TARGET_NR_stat) || defined(TARGET_NR_lstat)
9383 do_stat:
9384 #endif
9385 if (!is_error(ret)) {
9386 struct target_stat *target_st;
9388 if (!lock_user_struct(VERIFY_WRITE, target_st, arg2, 0))
9389 goto efault;
9390 memset(target_st, 0, sizeof(*target_st));
9391 __put_user(st.st_dev, &target_st->st_dev);
9392 __put_user(st.st_ino, &target_st->st_ino);
9393 __put_user(st.st_mode, &target_st->st_mode);
9394 __put_user(st.st_uid, &target_st->st_uid);
9395 __put_user(st.st_gid, &target_st->st_gid);
9396 __put_user(st.st_nlink, &target_st->st_nlink);
9397 __put_user(st.st_rdev, &target_st->st_rdev);
9398 __put_user(st.st_size, &target_st->st_size);
9399 __put_user(st.st_blksize, &target_st->st_blksize);
9400 __put_user(st.st_blocks, &target_st->st_blocks);
9401 __put_user(st.st_atime, &target_st->target_st_atime);
9402 __put_user(st.st_mtime, &target_st->target_st_mtime);
9403 __put_user(st.st_ctime, &target_st->target_st_ctime);
9404 unlock_user_struct(target_st, arg2, 1);
9407 break;
9408 #ifdef TARGET_NR_olduname
9409 case TARGET_NR_olduname:
9410 goto unimplemented;
9411 #endif
9412 #ifdef TARGET_NR_iopl
9413 case TARGET_NR_iopl:
9414 goto unimplemented;
9415 #endif
9416 case TARGET_NR_vhangup:
9417 ret = get_errno(vhangup());
9418 break;
9419 #ifdef TARGET_NR_idle
9420 case TARGET_NR_idle:
9421 goto unimplemented;
9422 #endif
9423 #ifdef TARGET_NR_syscall
9424 case TARGET_NR_syscall:
9425 ret = do_syscall(cpu_env, arg1 & 0xffff, arg2, arg3, arg4, arg5,
9426 arg6, arg7, arg8, 0);
9427 break;
9428 #endif
9429 case TARGET_NR_wait4:
9431 int status;
9432 abi_long status_ptr = arg2;
9433 struct rusage rusage, *rusage_ptr;
9434 abi_ulong target_rusage = arg4;
9435 abi_long rusage_err;
9436 if (target_rusage)
9437 rusage_ptr = &rusage;
9438 else
9439 rusage_ptr = NULL;
9440 ret = get_errno(safe_wait4(arg1, &status, arg3, rusage_ptr));
9441 if (!is_error(ret)) {
9442 if (status_ptr && ret) {
9443 status = host_to_target_waitstatus(status);
9444 if (put_user_s32(status, status_ptr))
9445 goto efault;
9447 if (target_rusage) {
9448 rusage_err = host_to_target_rusage(target_rusage, &rusage);
9449 if (rusage_err) {
9450 ret = rusage_err;
9455 break;
9456 #ifdef TARGET_NR_swapoff
9457 case TARGET_NR_swapoff:
9458 if (!(p = lock_user_string(arg1)))
9459 goto efault;
9460 ret = get_errno(swapoff(p));
9461 unlock_user(p, arg1, 0);
9462 break;
9463 #endif
9464 case TARGET_NR_sysinfo:
9466 struct target_sysinfo *target_value;
9467 struct sysinfo value;
9468 ret = get_errno(sysinfo(&value));
9469 if (!is_error(ret) && arg1)
9471 if (!lock_user_struct(VERIFY_WRITE, target_value, arg1, 0))
9472 goto efault;
9473 __put_user(value.uptime, &target_value->uptime);
9474 __put_user(value.loads[0], &target_value->loads[0]);
9475 __put_user(value.loads[1], &target_value->loads[1]);
9476 __put_user(value.loads[2], &target_value->loads[2]);
9477 __put_user(value.totalram, &target_value->totalram);
9478 __put_user(value.freeram, &target_value->freeram);
9479 __put_user(value.sharedram, &target_value->sharedram);
9480 __put_user(value.bufferram, &target_value->bufferram);
9481 __put_user(value.totalswap, &target_value->totalswap);
9482 __put_user(value.freeswap, &target_value->freeswap);
9483 __put_user(value.procs, &target_value->procs);
9484 __put_user(value.totalhigh, &target_value->totalhigh);
9485 __put_user(value.freehigh, &target_value->freehigh);
9486 __put_user(value.mem_unit, &target_value->mem_unit);
9487 unlock_user_struct(target_value, arg1, 1);
9490 break;
9491 #ifdef TARGET_NR_ipc
9492 case TARGET_NR_ipc:
9493 ret = do_ipc(cpu_env, arg1, arg2, arg3, arg4, arg5, arg6);
9494 break;
9495 #endif
9496 #ifdef TARGET_NR_semget
9497 case TARGET_NR_semget:
9498 ret = get_errno(semget(arg1, arg2, arg3));
9499 break;
9500 #endif
9501 #ifdef TARGET_NR_semop
9502 case TARGET_NR_semop:
9503 ret = do_semop(arg1, arg2, arg3);
9504 break;
9505 #endif
9506 #ifdef TARGET_NR_semctl
9507 case TARGET_NR_semctl:
9508 ret = do_semctl(arg1, arg2, arg3, arg4);
9509 break;
9510 #endif
9511 #ifdef TARGET_NR_msgctl
9512 case TARGET_NR_msgctl:
9513 ret = do_msgctl(arg1, arg2, arg3);
9514 break;
9515 #endif
9516 #ifdef TARGET_NR_msgget
9517 case TARGET_NR_msgget:
9518 ret = get_errno(msgget(arg1, arg2));
9519 break;
9520 #endif
9521 #ifdef TARGET_NR_msgrcv
9522 case TARGET_NR_msgrcv:
9523 ret = do_msgrcv(arg1, arg2, arg3, arg4, arg5);
9524 break;
9525 #endif
9526 #ifdef TARGET_NR_msgsnd
9527 case TARGET_NR_msgsnd:
9528 ret = do_msgsnd(arg1, arg2, arg3, arg4);
9529 break;
9530 #endif
9531 #ifdef TARGET_NR_shmget
9532 case TARGET_NR_shmget:
9533 ret = get_errno(shmget(arg1, arg2, arg3));
9534 break;
9535 #endif
9536 #ifdef TARGET_NR_shmctl
9537 case TARGET_NR_shmctl:
9538 ret = do_shmctl(arg1, arg2, arg3);
9539 break;
9540 #endif
9541 #ifdef TARGET_NR_shmat
9542 case TARGET_NR_shmat:
9543 ret = do_shmat(cpu_env, arg1, arg2, arg3);
9544 break;
9545 #endif
9546 #ifdef TARGET_NR_shmdt
9547 case TARGET_NR_shmdt:
9548 ret = do_shmdt(arg1);
9549 break;
9550 #endif
9551 case TARGET_NR_fsync:
9552 ret = get_errno(fsync(arg1));
9553 break;
9554 case TARGET_NR_clone:
9555 /* Linux manages to have three different orderings for its
9556 * arguments to clone(); the BACKWARDS and BACKWARDS2 defines
9557 * match the kernel's CONFIG_CLONE_* settings.
9558 * Microblaze is further special in that it uses a sixth
9559 * implicit argument to clone for the TLS pointer.
9561 #if defined(TARGET_MICROBLAZE)
9562 ret = get_errno(do_fork(cpu_env, arg1, arg2, arg4, arg6, arg5));
9563 #elif defined(TARGET_CLONE_BACKWARDS)
9564 ret = get_errno(do_fork(cpu_env, arg1, arg2, arg3, arg4, arg5));
9565 #elif defined(TARGET_CLONE_BACKWARDS2)
9566 ret = get_errno(do_fork(cpu_env, arg2, arg1, arg3, arg5, arg4));
9567 #else
9568 ret = get_errno(do_fork(cpu_env, arg1, arg2, arg3, arg5, arg4));
9569 #endif
9570 break;
9571 #ifdef __NR_exit_group
9572 /* new thread calls */
9573 case TARGET_NR_exit_group:
9574 #ifdef TARGET_GPROF
9575 _mcleanup();
9576 #endif
9577 gdb_exit(cpu_env, arg1);
9578 ret = get_errno(exit_group(arg1));
9579 break;
9580 #endif
9581 case TARGET_NR_setdomainname:
9582 if (!(p = lock_user_string(arg1)))
9583 goto efault;
9584 ret = get_errno(setdomainname(p, arg2));
9585 unlock_user(p, arg1, 0);
9586 break;
9587 case TARGET_NR_uname:
9588 /* no need to transcode because we use the linux syscall */
9590 struct new_utsname * buf;
9592 if (!lock_user_struct(VERIFY_WRITE, buf, arg1, 0))
9593 goto efault;
9594 ret = get_errno(sys_uname(buf));
9595 if (!is_error(ret)) {
9596 /* Overwrite the native machine name with whatever is being
9597 emulated. */
9598 strcpy (buf->machine, cpu_to_uname_machine(cpu_env));
9599 /* Allow the user to override the reported release. */
9600 if (qemu_uname_release && *qemu_uname_release) {
9601 g_strlcpy(buf->release, qemu_uname_release,
9602 sizeof(buf->release));
9605 unlock_user_struct(buf, arg1, 1);
9607 break;
9608 #ifdef TARGET_I386
9609 case TARGET_NR_modify_ldt:
9610 ret = do_modify_ldt(cpu_env, arg1, arg2, arg3);
9611 break;
9612 #if !defined(TARGET_X86_64)
9613 case TARGET_NR_vm86old:
9614 goto unimplemented;
9615 case TARGET_NR_vm86:
9616 ret = do_vm86(cpu_env, arg1, arg2);
9617 break;
9618 #endif
9619 #endif
9620 case TARGET_NR_adjtimex:
9622 struct timex host_buf;
9624 if (target_to_host_timex(&host_buf, arg1) != 0) {
9625 goto efault;
9627 ret = get_errno(adjtimex(&host_buf));
9628 if (!is_error(ret)) {
9629 if (host_to_target_timex(arg1, &host_buf) != 0) {
9630 goto efault;
9634 break;
9635 #ifdef TARGET_NR_create_module
9636 case TARGET_NR_create_module:
9637 #endif
9638 case TARGET_NR_init_module:
9639 case TARGET_NR_delete_module:
9640 #ifdef TARGET_NR_get_kernel_syms
9641 case TARGET_NR_get_kernel_syms:
9642 #endif
9643 goto unimplemented;
9644 case TARGET_NR_quotactl:
9645 goto unimplemented;
9646 case TARGET_NR_getpgid:
9647 ret = get_errno(getpgid(arg1));
9648 break;
9649 case TARGET_NR_fchdir:
9650 ret = get_errno(fchdir(arg1));
9651 break;
9652 #ifdef TARGET_NR_bdflush /* not on x86_64 */
9653 case TARGET_NR_bdflush:
9654 goto unimplemented;
9655 #endif
9656 #ifdef TARGET_NR_sysfs
9657 case TARGET_NR_sysfs:
9658 goto unimplemented;
9659 #endif
9660 case TARGET_NR_personality:
9661 ret = get_errno(personality(arg1));
9662 break;
9663 #ifdef TARGET_NR_afs_syscall
9664 case TARGET_NR_afs_syscall:
9665 goto unimplemented;
9666 #endif
9667 #ifdef TARGET_NR__llseek /* Not on alpha */
9668 case TARGET_NR__llseek:
9670 int64_t res;
9671 #if !defined(__NR_llseek)
9672 res = lseek(arg1, ((uint64_t)arg2 << 32) | (abi_ulong)arg3, arg5);
9673 if (res == -1) {
9674 ret = get_errno(res);
9675 } else {
9676 ret = 0;
9678 #else
9679 ret = get_errno(_llseek(arg1, arg2, arg3, &res, arg5));
9680 #endif
9681 if ((ret == 0) && put_user_s64(res, arg4)) {
9682 goto efault;
9685 break;
9686 #endif
9687 #ifdef TARGET_NR_getdents
9688 case TARGET_NR_getdents:
9689 #ifdef __NR_getdents
9690 #if TARGET_ABI_BITS == 32 && HOST_LONG_BITS == 64
9692 struct target_dirent *target_dirp;
9693 struct linux_dirent *dirp;
9694 abi_long count = arg3;
9696 dirp = g_try_malloc(count);
9697 if (!dirp) {
9698 ret = -TARGET_ENOMEM;
9699 goto fail;
9702 ret = get_errno(sys_getdents(arg1, dirp, count));
9703 if (!is_error(ret)) {
9704 struct linux_dirent *de;
9705 struct target_dirent *tde;
9706 int len = ret;
9707 int reclen, treclen;
9708 int count1, tnamelen;
9710 count1 = 0;
9711 de = dirp;
9712 if (!(target_dirp = lock_user(VERIFY_WRITE, arg2, count, 0)))
9713 goto efault;
9714 tde = target_dirp;
9715 while (len > 0) {
9716 reclen = de->d_reclen;
9717 tnamelen = reclen - offsetof(struct linux_dirent, d_name);
9718 assert(tnamelen >= 0);
9719 treclen = tnamelen + offsetof(struct target_dirent, d_name);
9720 assert(count1 + treclen <= count);
9721 tde->d_reclen = tswap16(treclen);
9722 tde->d_ino = tswapal(de->d_ino);
9723 tde->d_off = tswapal(de->d_off);
9724 memcpy(tde->d_name, de->d_name, tnamelen);
9725 de = (struct linux_dirent *)((char *)de + reclen);
9726 len -= reclen;
9727 tde = (struct target_dirent *)((char *)tde + treclen);
9728 count1 += treclen;
9730 ret = count1;
9731 unlock_user(target_dirp, arg2, ret);
9733 g_free(dirp);
9735 #else
9737 struct linux_dirent *dirp;
9738 abi_long count = arg3;
9740 if (!(dirp = lock_user(VERIFY_WRITE, arg2, count, 0)))
9741 goto efault;
9742 ret = get_errno(sys_getdents(arg1, dirp, count));
9743 if (!is_error(ret)) {
9744 struct linux_dirent *de;
9745 int len = ret;
9746 int reclen;
9747 de = dirp;
9748 while (len > 0) {
9749 reclen = de->d_reclen;
9750 if (reclen > len)
9751 break;
9752 de->d_reclen = tswap16(reclen);
9753 tswapls(&de->d_ino);
9754 tswapls(&de->d_off);
9755 de = (struct linux_dirent *)((char *)de + reclen);
9756 len -= reclen;
9759 unlock_user(dirp, arg2, ret);
9761 #endif
9762 #else
9763 /* Implement getdents in terms of getdents64 */
9765 struct linux_dirent64 *dirp;
9766 abi_long count = arg3;
9768 dirp = lock_user(VERIFY_WRITE, arg2, count, 0);
9769 if (!dirp) {
9770 goto efault;
9772 ret = get_errno(sys_getdents64(arg1, dirp, count));
9773 if (!is_error(ret)) {
9774 /* Convert the dirent64 structs to target dirent. We do this
9775 * in-place, since we can guarantee that a target_dirent is no
9776 * larger than a dirent64; however this means we have to be
9777 * careful to read everything before writing in the new format.
9779 struct linux_dirent64 *de;
9780 struct target_dirent *tde;
9781 int len = ret;
9782 int tlen = 0;
9784 de = dirp;
9785 tde = (struct target_dirent *)dirp;
9786 while (len > 0) {
9787 int namelen, treclen;
9788 int reclen = de->d_reclen;
9789 uint64_t ino = de->d_ino;
9790 int64_t off = de->d_off;
9791 uint8_t type = de->d_type;
9793 namelen = strlen(de->d_name);
9794 treclen = offsetof(struct target_dirent, d_name)
9795 + namelen + 2;
9796 treclen = QEMU_ALIGN_UP(treclen, sizeof(abi_long));
9798 memmove(tde->d_name, de->d_name, namelen + 1);
9799 tde->d_ino = tswapal(ino);
9800 tde->d_off = tswapal(off);
9801 tde->d_reclen = tswap16(treclen);
9802 /* The target_dirent type is in what was formerly a padding
9803 * byte at the end of the structure:
9805 *(((char *)tde) + treclen - 1) = type;
9807 de = (struct linux_dirent64 *)((char *)de + reclen);
9808 tde = (struct target_dirent *)((char *)tde + treclen);
9809 len -= reclen;
9810 tlen += treclen;
9812 ret = tlen;
9814 unlock_user(dirp, arg2, ret);
9816 #endif
9817 break;
9818 #endif /* TARGET_NR_getdents */
9819 #if defined(TARGET_NR_getdents64) && defined(__NR_getdents64)
9820 case TARGET_NR_getdents64:
9822 struct linux_dirent64 *dirp;
9823 abi_long count = arg3;
9824 if (!(dirp = lock_user(VERIFY_WRITE, arg2, count, 0)))
9825 goto efault;
9826 ret = get_errno(sys_getdents64(arg1, dirp, count));
9827 if (!is_error(ret)) {
9828 struct linux_dirent64 *de;
9829 int len = ret;
9830 int reclen;
9831 de = dirp;
9832 while (len > 0) {
9833 reclen = de->d_reclen;
9834 if (reclen > len)
9835 break;
9836 de->d_reclen = tswap16(reclen);
9837 tswap64s((uint64_t *)&de->d_ino);
9838 tswap64s((uint64_t *)&de->d_off);
9839 de = (struct linux_dirent64 *)((char *)de + reclen);
9840 len -= reclen;
9843 unlock_user(dirp, arg2, ret);
9845 break;
9846 #endif /* TARGET_NR_getdents64 */
9847 #if defined(TARGET_NR__newselect)
9848 case TARGET_NR__newselect:
9849 ret = do_select(arg1, arg2, arg3, arg4, arg5);
9850 break;
9851 #endif
9852 #if defined(TARGET_NR_poll) || defined(TARGET_NR_ppoll)
9853 # ifdef TARGET_NR_poll
9854 case TARGET_NR_poll:
9855 # endif
9856 # ifdef TARGET_NR_ppoll
9857 case TARGET_NR_ppoll:
9858 # endif
9860 struct target_pollfd *target_pfd;
9861 unsigned int nfds = arg2;
9862 struct pollfd *pfd;
9863 unsigned int i;
9865 pfd = NULL;
9866 target_pfd = NULL;
9867 if (nfds) {
9868 if (nfds > (INT_MAX / sizeof(struct target_pollfd))) {
9869 ret = -TARGET_EINVAL;
9870 break;
9873 target_pfd = lock_user(VERIFY_WRITE, arg1,
9874 sizeof(struct target_pollfd) * nfds, 1);
9875 if (!target_pfd) {
9876 goto efault;
9879 pfd = alloca(sizeof(struct pollfd) * nfds);
9880 for (i = 0; i < nfds; i++) {
9881 pfd[i].fd = tswap32(target_pfd[i].fd);
9882 pfd[i].events = tswap16(target_pfd[i].events);
9886 switch (num) {
9887 # ifdef TARGET_NR_ppoll
9888 case TARGET_NR_ppoll:
9890 struct timespec _timeout_ts, *timeout_ts = &_timeout_ts;
9891 target_sigset_t *target_set;
9892 sigset_t _set, *set = &_set;
9894 if (arg3) {
9895 if (target_to_host_timespec(timeout_ts, arg3)) {
9896 unlock_user(target_pfd, arg1, 0);
9897 goto efault;
9899 } else {
9900 timeout_ts = NULL;
9903 if (arg4) {
9904 if (arg5 != sizeof(target_sigset_t)) {
9905 unlock_user(target_pfd, arg1, 0);
9906 ret = -TARGET_EINVAL;
9907 break;
9910 target_set = lock_user(VERIFY_READ, arg4, sizeof(target_sigset_t), 1);
9911 if (!target_set) {
9912 unlock_user(target_pfd, arg1, 0);
9913 goto efault;
9915 target_to_host_sigset(set, target_set);
9916 } else {
9917 set = NULL;
9920 ret = get_errno(safe_ppoll(pfd, nfds, timeout_ts,
9921 set, SIGSET_T_SIZE));
9923 if (!is_error(ret) && arg3) {
9924 host_to_target_timespec(arg3, timeout_ts);
9926 if (arg4) {
9927 unlock_user(target_set, arg4, 0);
9929 break;
9931 # endif
9932 # ifdef TARGET_NR_poll
9933 case TARGET_NR_poll:
9935 struct timespec ts, *pts;
9937 if (arg3 >= 0) {
9938 /* Convert ms to secs, ns */
9939 ts.tv_sec = arg3 / 1000;
9940 ts.tv_nsec = (arg3 % 1000) * 1000000LL;
9941 pts = &ts;
9942 } else {
9943 /* -ve poll() timeout means "infinite" */
9944 pts = NULL;
9946 ret = get_errno(safe_ppoll(pfd, nfds, pts, NULL, 0));
9947 break;
9949 # endif
9950 default:
9951 g_assert_not_reached();
9954 if (!is_error(ret)) {
9955 for(i = 0; i < nfds; i++) {
9956 target_pfd[i].revents = tswap16(pfd[i].revents);
9959 unlock_user(target_pfd, arg1, sizeof(struct target_pollfd) * nfds);
9961 break;
9962 #endif
9963 case TARGET_NR_flock:
9964 /* NOTE: the flock constant seems to be the same for every
9965 Linux platform */
9966 ret = get_errno(safe_flock(arg1, arg2));
9967 break;
9968 case TARGET_NR_readv:
9970 struct iovec *vec = lock_iovec(VERIFY_WRITE, arg2, arg3, 0);
9971 if (vec != NULL) {
9972 ret = get_errno(safe_readv(arg1, vec, arg3));
9973 unlock_iovec(vec, arg2, arg3, 1);
9974 } else {
9975 ret = -host_to_target_errno(errno);
9978 break;
9979 case TARGET_NR_writev:
9981 struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1);
9982 if (vec != NULL) {
9983 ret = get_errno(safe_writev(arg1, vec, arg3));
9984 unlock_iovec(vec, arg2, arg3, 0);
9985 } else {
9986 ret = -host_to_target_errno(errno);
9989 break;
9990 case TARGET_NR_getsid:
9991 ret = get_errno(getsid(arg1));
9992 break;
9993 #if defined(TARGET_NR_fdatasync) /* Not on alpha (osf_datasync ?) */
9994 case TARGET_NR_fdatasync:
9995 ret = get_errno(fdatasync(arg1));
9996 break;
9997 #endif
9998 #ifdef TARGET_NR__sysctl
9999 case TARGET_NR__sysctl:
10000 /* We don't implement this, but ENOTDIR is always a safe
10001 return value. */
10002 ret = -TARGET_ENOTDIR;
10003 break;
10004 #endif
10005 case TARGET_NR_sched_getaffinity:
10007 unsigned int mask_size;
10008 unsigned long *mask;
10011 * sched_getaffinity needs multiples of ulong, so need to take
10012 * care of mismatches between target ulong and host ulong sizes.
10014 if (arg2 & (sizeof(abi_ulong) - 1)) {
10015 ret = -TARGET_EINVAL;
10016 break;
10018 mask_size = (arg2 + (sizeof(*mask) - 1)) & ~(sizeof(*mask) - 1);
10020 mask = alloca(mask_size);
10021 ret = get_errno(sys_sched_getaffinity(arg1, mask_size, mask));
10023 if (!is_error(ret)) {
10024 if (ret > arg2) {
10025 /* More data returned than the caller's buffer will fit.
10026 * This only happens if sizeof(abi_long) < sizeof(long)
10027 * and the caller passed us a buffer holding an odd number
10028 * of abi_longs. If the host kernel is actually using the
10029 * extra 4 bytes then fail EINVAL; otherwise we can just
10030 * ignore them and only copy the interesting part.
10032 int numcpus = sysconf(_SC_NPROCESSORS_CONF);
10033 if (numcpus > arg2 * 8) {
10034 ret = -TARGET_EINVAL;
10035 break;
10037 ret = arg2;
10040 if (copy_to_user(arg3, mask, ret)) {
10041 goto efault;
10045 break;
10046 case TARGET_NR_sched_setaffinity:
10048 unsigned int mask_size;
10049 unsigned long *mask;
10052 * sched_setaffinity needs multiples of ulong, so need to take
10053 * care of mismatches between target ulong and host ulong sizes.
10055 if (arg2 & (sizeof(abi_ulong) - 1)) {
10056 ret = -TARGET_EINVAL;
10057 break;
10059 mask_size = (arg2 + (sizeof(*mask) - 1)) & ~(sizeof(*mask) - 1);
10061 mask = alloca(mask_size);
10062 if (!lock_user_struct(VERIFY_READ, p, arg3, 1)) {
10063 goto efault;
10065 memcpy(mask, p, arg2);
10066 unlock_user_struct(p, arg2, 0);
10068 ret = get_errno(sys_sched_setaffinity(arg1, mask_size, mask));
10070 break;
10071 case TARGET_NR_sched_setparam:
10073 struct sched_param *target_schp;
10074 struct sched_param schp;
10076 if (arg2 == 0) {
10077 return -TARGET_EINVAL;
10079 if (!lock_user_struct(VERIFY_READ, target_schp, arg2, 1))
10080 goto efault;
10081 schp.sched_priority = tswap32(target_schp->sched_priority);
10082 unlock_user_struct(target_schp, arg2, 0);
10083 ret = get_errno(sched_setparam(arg1, &schp));
10085 break;
10086 case TARGET_NR_sched_getparam:
10088 struct sched_param *target_schp;
10089 struct sched_param schp;
10091 if (arg2 == 0) {
10092 return -TARGET_EINVAL;
10094 ret = get_errno(sched_getparam(arg1, &schp));
10095 if (!is_error(ret)) {
10096 if (!lock_user_struct(VERIFY_WRITE, target_schp, arg2, 0))
10097 goto efault;
10098 target_schp->sched_priority = tswap32(schp.sched_priority);
10099 unlock_user_struct(target_schp, arg2, 1);
10102 break;
10103 case TARGET_NR_sched_setscheduler:
10105 struct sched_param *target_schp;
10106 struct sched_param schp;
10107 if (arg3 == 0) {
10108 return -TARGET_EINVAL;
10110 if (!lock_user_struct(VERIFY_READ, target_schp, arg3, 1))
10111 goto efault;
10112 schp.sched_priority = tswap32(target_schp->sched_priority);
10113 unlock_user_struct(target_schp, arg3, 0);
10114 ret = get_errno(sched_setscheduler(arg1, arg2, &schp));
10116 break;
10117 case TARGET_NR_sched_getscheduler:
10118 ret = get_errno(sched_getscheduler(arg1));
10119 break;
10120 case TARGET_NR_sched_yield:
10121 ret = get_errno(sched_yield());
10122 break;
10123 case TARGET_NR_sched_get_priority_max:
10124 ret = get_errno(sched_get_priority_max(arg1));
10125 break;
10126 case TARGET_NR_sched_get_priority_min:
10127 ret = get_errno(sched_get_priority_min(arg1));
10128 break;
10129 case TARGET_NR_sched_rr_get_interval:
10131 struct timespec ts;
10132 ret = get_errno(sched_rr_get_interval(arg1, &ts));
10133 if (!is_error(ret)) {
10134 ret = host_to_target_timespec(arg2, &ts);
10137 break;
10138 case TARGET_NR_nanosleep:
10140 struct timespec req, rem;
10141 target_to_host_timespec(&req, arg1);
10142 ret = get_errno(safe_nanosleep(&req, &rem));
10143 if (is_error(ret) && arg2) {
10144 host_to_target_timespec(arg2, &rem);
10147 break;
10148 #ifdef TARGET_NR_query_module
10149 case TARGET_NR_query_module:
10150 goto unimplemented;
10151 #endif
10152 #ifdef TARGET_NR_nfsservctl
10153 case TARGET_NR_nfsservctl:
10154 goto unimplemented;
10155 #endif
10156 case TARGET_NR_prctl:
10157 switch (arg1) {
10158 case PR_GET_PDEATHSIG:
10160 int deathsig;
10161 ret = get_errno(prctl(arg1, &deathsig, arg3, arg4, arg5));
10162 if (!is_error(ret) && arg2
10163 && put_user_ual(deathsig, arg2)) {
10164 goto efault;
10166 break;
10168 #ifdef PR_GET_NAME
10169 case PR_GET_NAME:
10171 void *name = lock_user(VERIFY_WRITE, arg2, 16, 1);
10172 if (!name) {
10173 goto efault;
10175 ret = get_errno(prctl(arg1, (unsigned long)name,
10176 arg3, arg4, arg5));
10177 unlock_user(name, arg2, 16);
10178 break;
10180 case PR_SET_NAME:
10182 void *name = lock_user(VERIFY_READ, arg2, 16, 1);
10183 if (!name) {
10184 goto efault;
10186 ret = get_errno(prctl(arg1, (unsigned long)name,
10187 arg3, arg4, arg5));
10188 unlock_user(name, arg2, 0);
10189 break;
10191 #endif
10192 default:
10193 /* Most prctl options have no pointer arguments */
10194 ret = get_errno(prctl(arg1, arg2, arg3, arg4, arg5));
10195 break;
10197 break;
10198 #ifdef TARGET_NR_arch_prctl
10199 case TARGET_NR_arch_prctl:
10200 #if defined(TARGET_I386) && !defined(TARGET_ABI32)
10201 ret = do_arch_prctl(cpu_env, arg1, arg2);
10202 break;
10203 #else
10204 goto unimplemented;
10205 #endif
10206 #endif
10207 #ifdef TARGET_NR_pread64
10208 case TARGET_NR_pread64:
10209 if (regpairs_aligned(cpu_env)) {
10210 arg4 = arg5;
10211 arg5 = arg6;
10213 if (!(p = lock_user(VERIFY_WRITE, arg2, arg3, 0)))
10214 goto efault;
10215 ret = get_errno(pread64(arg1, p, arg3, target_offset64(arg4, arg5)));
10216 unlock_user(p, arg2, ret);
10217 break;
10218 case TARGET_NR_pwrite64:
10219 if (regpairs_aligned(cpu_env)) {
10220 arg4 = arg5;
10221 arg5 = arg6;
10223 if (!(p = lock_user(VERIFY_READ, arg2, arg3, 1)))
10224 goto efault;
10225 ret = get_errno(pwrite64(arg1, p, arg3, target_offset64(arg4, arg5)));
10226 unlock_user(p, arg2, 0);
10227 break;
10228 #endif
10229 case TARGET_NR_getcwd:
10230 if (!(p = lock_user(VERIFY_WRITE, arg1, arg2, 0)))
10231 goto efault;
10232 ret = get_errno(sys_getcwd1(p, arg2));
10233 unlock_user(p, arg1, ret);
10234 break;
10235 case TARGET_NR_capget:
10236 case TARGET_NR_capset:
10238 struct target_user_cap_header *target_header;
10239 struct target_user_cap_data *target_data = NULL;
10240 struct __user_cap_header_struct header;
10241 struct __user_cap_data_struct data[2];
10242 struct __user_cap_data_struct *dataptr = NULL;
10243 int i, target_datalen;
10244 int data_items = 1;
10246 if (!lock_user_struct(VERIFY_WRITE, target_header, arg1, 1)) {
10247 goto efault;
10249 header.version = tswap32(target_header->version);
10250 header.pid = tswap32(target_header->pid);
10252 if (header.version != _LINUX_CAPABILITY_VERSION) {
10253 /* Version 2 and up takes pointer to two user_data structs */
10254 data_items = 2;
10257 target_datalen = sizeof(*target_data) * data_items;
10259 if (arg2) {
10260 if (num == TARGET_NR_capget) {
10261 target_data = lock_user(VERIFY_WRITE, arg2, target_datalen, 0);
10262 } else {
10263 target_data = lock_user(VERIFY_READ, arg2, target_datalen, 1);
10265 if (!target_data) {
10266 unlock_user_struct(target_header, arg1, 0);
10267 goto efault;
10270 if (num == TARGET_NR_capset) {
10271 for (i = 0; i < data_items; i++) {
10272 data[i].effective = tswap32(target_data[i].effective);
10273 data[i].permitted = tswap32(target_data[i].permitted);
10274 data[i].inheritable = tswap32(target_data[i].inheritable);
10278 dataptr = data;
10281 if (num == TARGET_NR_capget) {
10282 ret = get_errno(capget(&header, dataptr));
10283 } else {
10284 ret = get_errno(capset(&header, dataptr));
10287 /* The kernel always updates version for both capget and capset */
10288 target_header->version = tswap32(header.version);
10289 unlock_user_struct(target_header, arg1, 1);
10291 if (arg2) {
10292 if (num == TARGET_NR_capget) {
10293 for (i = 0; i < data_items; i++) {
10294 target_data[i].effective = tswap32(data[i].effective);
10295 target_data[i].permitted = tswap32(data[i].permitted);
10296 target_data[i].inheritable = tswap32(data[i].inheritable);
10298 unlock_user(target_data, arg2, target_datalen);
10299 } else {
10300 unlock_user(target_data, arg2, 0);
10303 break;
10305 case TARGET_NR_sigaltstack:
10306 ret = do_sigaltstack(arg1, arg2, get_sp_from_cpustate((CPUArchState *)cpu_env));
10307 break;
10309 #ifdef CONFIG_SENDFILE
10310 case TARGET_NR_sendfile:
10312 off_t *offp = NULL;
10313 off_t off;
10314 if (arg3) {
10315 ret = get_user_sal(off, arg3);
10316 if (is_error(ret)) {
10317 break;
10319 offp = &off;
10321 ret = get_errno(sendfile(arg1, arg2, offp, arg4));
10322 if (!is_error(ret) && arg3) {
10323 abi_long ret2 = put_user_sal(off, arg3);
10324 if (is_error(ret2)) {
10325 ret = ret2;
10328 break;
10330 #ifdef TARGET_NR_sendfile64
10331 case TARGET_NR_sendfile64:
10333 off_t *offp = NULL;
10334 off_t off;
10335 if (arg3) {
10336 ret = get_user_s64(off, arg3);
10337 if (is_error(ret)) {
10338 break;
10340 offp = &off;
10342 ret = get_errno(sendfile(arg1, arg2, offp, arg4));
10343 if (!is_error(ret) && arg3) {
10344 abi_long ret2 = put_user_s64(off, arg3);
10345 if (is_error(ret2)) {
10346 ret = ret2;
10349 break;
10351 #endif
10352 #else
10353 case TARGET_NR_sendfile:
10354 #ifdef TARGET_NR_sendfile64
10355 case TARGET_NR_sendfile64:
10356 #endif
10357 goto unimplemented;
10358 #endif
10360 #ifdef TARGET_NR_getpmsg
10361 case TARGET_NR_getpmsg:
10362 goto unimplemented;
10363 #endif
10364 #ifdef TARGET_NR_putpmsg
10365 case TARGET_NR_putpmsg:
10366 goto unimplemented;
10367 #endif
10368 #ifdef TARGET_NR_vfork
10369 case TARGET_NR_vfork:
10370 ret = get_errno(do_fork(cpu_env, CLONE_VFORK | CLONE_VM | SIGCHLD,
10371 0, 0, 0, 0));
10372 break;
10373 #endif
10374 #ifdef TARGET_NR_ugetrlimit
10375 case TARGET_NR_ugetrlimit:
10377 struct rlimit rlim;
10378 int resource = target_to_host_resource(arg1);
10379 ret = get_errno(getrlimit(resource, &rlim));
10380 if (!is_error(ret)) {
10381 struct target_rlimit *target_rlim;
10382 if (!lock_user_struct(VERIFY_WRITE, target_rlim, arg2, 0))
10383 goto efault;
10384 target_rlim->rlim_cur = host_to_target_rlim(rlim.rlim_cur);
10385 target_rlim->rlim_max = host_to_target_rlim(rlim.rlim_max);
10386 unlock_user_struct(target_rlim, arg2, 1);
10388 break;
10390 #endif
10391 #ifdef TARGET_NR_truncate64
10392 case TARGET_NR_truncate64:
10393 if (!(p = lock_user_string(arg1)))
10394 goto efault;
10395 ret = target_truncate64(cpu_env, p, arg2, arg3, arg4);
10396 unlock_user(p, arg1, 0);
10397 break;
10398 #endif
10399 #ifdef TARGET_NR_ftruncate64
10400 case TARGET_NR_ftruncate64:
10401 ret = target_ftruncate64(cpu_env, arg1, arg2, arg3, arg4);
10402 break;
10403 #endif
10404 #ifdef TARGET_NR_stat64
10405 case TARGET_NR_stat64:
10406 if (!(p = lock_user_string(arg1)))
10407 goto efault;
10408 ret = get_errno(stat(path(p), &st));
10409 unlock_user(p, arg1, 0);
10410 if (!is_error(ret))
10411 ret = host_to_target_stat64(cpu_env, arg2, &st);
10412 break;
10413 #endif
10414 #ifdef TARGET_NR_lstat64
10415 case TARGET_NR_lstat64:
10416 if (!(p = lock_user_string(arg1)))
10417 goto efault;
10418 ret = get_errno(lstat(path(p), &st));
10419 unlock_user(p, arg1, 0);
10420 if (!is_error(ret))
10421 ret = host_to_target_stat64(cpu_env, arg2, &st);
10422 break;
10423 #endif
10424 #ifdef TARGET_NR_fstat64
10425 case TARGET_NR_fstat64:
10426 ret = get_errno(fstat(arg1, &st));
10427 if (!is_error(ret))
10428 ret = host_to_target_stat64(cpu_env, arg2, &st);
10429 break;
10430 #endif
10431 #if (defined(TARGET_NR_fstatat64) || defined(TARGET_NR_newfstatat))
10432 #ifdef TARGET_NR_fstatat64
10433 case TARGET_NR_fstatat64:
10434 #endif
10435 #ifdef TARGET_NR_newfstatat
10436 case TARGET_NR_newfstatat:
10437 #endif
10438 if (!(p = lock_user_string(arg2)))
10439 goto efault;
10440 ret = get_errno(fstatat(arg1, path(p), &st, arg4));
10441 if (!is_error(ret))
10442 ret = host_to_target_stat64(cpu_env, arg3, &st);
10443 break;
10444 #endif
10445 #ifdef TARGET_NR_lchown
10446 case TARGET_NR_lchown:
10447 if (!(p = lock_user_string(arg1)))
10448 goto efault;
10449 ret = get_errno(lchown(p, low2highuid(arg2), low2highgid(arg3)));
10450 unlock_user(p, arg1, 0);
10451 break;
10452 #endif
10453 #ifdef TARGET_NR_getuid
10454 case TARGET_NR_getuid:
10455 ret = get_errno(high2lowuid(getuid()));
10456 break;
10457 #endif
10458 #ifdef TARGET_NR_getgid
10459 case TARGET_NR_getgid:
10460 ret = get_errno(high2lowgid(getgid()));
10461 break;
10462 #endif
10463 #ifdef TARGET_NR_geteuid
10464 case TARGET_NR_geteuid:
10465 ret = get_errno(high2lowuid(geteuid()));
10466 break;
10467 #endif
10468 #ifdef TARGET_NR_getegid
10469 case TARGET_NR_getegid:
10470 ret = get_errno(high2lowgid(getegid()));
10471 break;
10472 #endif
10473 case TARGET_NR_setreuid:
10474 ret = get_errno(setreuid(low2highuid(arg1), low2highuid(arg2)));
10475 break;
10476 case TARGET_NR_setregid:
10477 ret = get_errno(setregid(low2highgid(arg1), low2highgid(arg2)));
10478 break;
10479 case TARGET_NR_getgroups:
10481 int gidsetsize = arg1;
10482 target_id *target_grouplist;
10483 gid_t *grouplist;
10484 int i;
10486 grouplist = alloca(gidsetsize * sizeof(gid_t));
10487 ret = get_errno(getgroups(gidsetsize, grouplist));
10488 if (gidsetsize == 0)
10489 break;
10490 if (!is_error(ret)) {
10491 target_grouplist = lock_user(VERIFY_WRITE, arg2, gidsetsize * sizeof(target_id), 0);
10492 if (!target_grouplist)
10493 goto efault;
10494 for(i = 0;i < ret; i++)
10495 target_grouplist[i] = tswapid(high2lowgid(grouplist[i]));
10496 unlock_user(target_grouplist, arg2, gidsetsize * sizeof(target_id));
10499 break;
10500 case TARGET_NR_setgroups:
10502 int gidsetsize = arg1;
10503 target_id *target_grouplist;
10504 gid_t *grouplist = NULL;
10505 int i;
10506 if (gidsetsize) {
10507 grouplist = alloca(gidsetsize * sizeof(gid_t));
10508 target_grouplist = lock_user(VERIFY_READ, arg2, gidsetsize * sizeof(target_id), 1);
10509 if (!target_grouplist) {
10510 ret = -TARGET_EFAULT;
10511 goto fail;
10513 for (i = 0; i < gidsetsize; i++) {
10514 grouplist[i] = low2highgid(tswapid(target_grouplist[i]));
10516 unlock_user(target_grouplist, arg2, 0);
10518 ret = get_errno(setgroups(gidsetsize, grouplist));
10520 break;
10521 case TARGET_NR_fchown:
10522 ret = get_errno(fchown(arg1, low2highuid(arg2), low2highgid(arg3)));
10523 break;
10524 #if defined(TARGET_NR_fchownat)
10525 case TARGET_NR_fchownat:
10526 if (!(p = lock_user_string(arg2)))
10527 goto efault;
10528 ret = get_errno(fchownat(arg1, p, low2highuid(arg3),
10529 low2highgid(arg4), arg5));
10530 unlock_user(p, arg2, 0);
10531 break;
10532 #endif
10533 #ifdef TARGET_NR_setresuid
10534 case TARGET_NR_setresuid:
10535 ret = get_errno(sys_setresuid(low2highuid(arg1),
10536 low2highuid(arg2),
10537 low2highuid(arg3)));
10538 break;
10539 #endif
10540 #ifdef TARGET_NR_getresuid
10541 case TARGET_NR_getresuid:
10543 uid_t ruid, euid, suid;
10544 ret = get_errno(getresuid(&ruid, &euid, &suid));
10545 if (!is_error(ret)) {
10546 if (put_user_id(high2lowuid(ruid), arg1)
10547 || put_user_id(high2lowuid(euid), arg2)
10548 || put_user_id(high2lowuid(suid), arg3))
10549 goto efault;
10552 break;
10553 #endif
10554 #ifdef TARGET_NR_getresgid
10555 case TARGET_NR_setresgid:
10556 ret = get_errno(sys_setresgid(low2highgid(arg1),
10557 low2highgid(arg2),
10558 low2highgid(arg3)));
10559 break;
10560 #endif
10561 #ifdef TARGET_NR_getresgid
10562 case TARGET_NR_getresgid:
10564 gid_t rgid, egid, sgid;
10565 ret = get_errno(getresgid(&rgid, &egid, &sgid));
10566 if (!is_error(ret)) {
10567 if (put_user_id(high2lowgid(rgid), arg1)
10568 || put_user_id(high2lowgid(egid), arg2)
10569 || put_user_id(high2lowgid(sgid), arg3))
10570 goto efault;
10573 break;
10574 #endif
10575 #ifdef TARGET_NR_chown
10576 case TARGET_NR_chown:
10577 if (!(p = lock_user_string(arg1)))
10578 goto efault;
10579 ret = get_errno(chown(p, low2highuid(arg2), low2highgid(arg3)));
10580 unlock_user(p, arg1, 0);
10581 break;
10582 #endif
10583 case TARGET_NR_setuid:
10584 ret = get_errno(sys_setuid(low2highuid(arg1)));
10585 break;
10586 case TARGET_NR_setgid:
10587 ret = get_errno(sys_setgid(low2highgid(arg1)));
10588 break;
10589 case TARGET_NR_setfsuid:
10590 ret = get_errno(setfsuid(arg1));
10591 break;
10592 case TARGET_NR_setfsgid:
10593 ret = get_errno(setfsgid(arg1));
10594 break;
10596 #ifdef TARGET_NR_lchown32
10597 case TARGET_NR_lchown32:
10598 if (!(p = lock_user_string(arg1)))
10599 goto efault;
10600 ret = get_errno(lchown(p, arg2, arg3));
10601 unlock_user(p, arg1, 0);
10602 break;
10603 #endif
10604 #ifdef TARGET_NR_getuid32
10605 case TARGET_NR_getuid32:
10606 ret = get_errno(getuid());
10607 break;
10608 #endif
10610 #if defined(TARGET_NR_getxuid) && defined(TARGET_ALPHA)
10611 /* Alpha specific */
10612 case TARGET_NR_getxuid:
10614 uid_t euid;
10615 euid=geteuid();
10616 ((CPUAlphaState *)cpu_env)->ir[IR_A4]=euid;
10618 ret = get_errno(getuid());
10619 break;
10620 #endif
10621 #if defined(TARGET_NR_getxgid) && defined(TARGET_ALPHA)
10622 /* Alpha specific */
10623 case TARGET_NR_getxgid:
10625 uid_t egid;
10626 egid=getegid();
10627 ((CPUAlphaState *)cpu_env)->ir[IR_A4]=egid;
10629 ret = get_errno(getgid());
10630 break;
10631 #endif
10632 #if defined(TARGET_NR_osf_getsysinfo) && defined(TARGET_ALPHA)
10633 /* Alpha specific */
10634 case TARGET_NR_osf_getsysinfo:
10635 ret = -TARGET_EOPNOTSUPP;
10636 switch (arg1) {
10637 case TARGET_GSI_IEEE_FP_CONTROL:
10639 uint64_t swcr, fpcr = cpu_alpha_load_fpcr (cpu_env);
10641 /* Copied from linux ieee_fpcr_to_swcr. */
10642 swcr = (fpcr >> 35) & SWCR_STATUS_MASK;
10643 swcr |= (fpcr >> 36) & SWCR_MAP_DMZ;
10644 swcr |= (~fpcr >> 48) & (SWCR_TRAP_ENABLE_INV
10645 | SWCR_TRAP_ENABLE_DZE
10646 | SWCR_TRAP_ENABLE_OVF);
10647 swcr |= (~fpcr >> 57) & (SWCR_TRAP_ENABLE_UNF
10648 | SWCR_TRAP_ENABLE_INE);
10649 swcr |= (fpcr >> 47) & SWCR_MAP_UMZ;
10650 swcr |= (~fpcr >> 41) & SWCR_TRAP_ENABLE_DNO;
10652 if (put_user_u64 (swcr, arg2))
10653 goto efault;
10654 ret = 0;
10656 break;
10658 /* case GSI_IEEE_STATE_AT_SIGNAL:
10659 -- Not implemented in linux kernel.
10660 case GSI_UACPROC:
10661 -- Retrieves current unaligned access state; not much used.
10662 case GSI_PROC_TYPE:
10663 -- Retrieves implver information; surely not used.
10664 case GSI_GET_HWRPB:
10665 -- Grabs a copy of the HWRPB; surely not used.
10668 break;
10669 #endif
10670 #if defined(TARGET_NR_osf_setsysinfo) && defined(TARGET_ALPHA)
10671 /* Alpha specific */
10672 case TARGET_NR_osf_setsysinfo:
10673 ret = -TARGET_EOPNOTSUPP;
10674 switch (arg1) {
10675 case TARGET_SSI_IEEE_FP_CONTROL:
10677 uint64_t swcr, fpcr, orig_fpcr;
10679 if (get_user_u64 (swcr, arg2)) {
10680 goto efault;
10682 orig_fpcr = cpu_alpha_load_fpcr(cpu_env);
10683 fpcr = orig_fpcr & FPCR_DYN_MASK;
10685 /* Copied from linux ieee_swcr_to_fpcr. */
10686 fpcr |= (swcr & SWCR_STATUS_MASK) << 35;
10687 fpcr |= (swcr & SWCR_MAP_DMZ) << 36;
10688 fpcr |= (~swcr & (SWCR_TRAP_ENABLE_INV
10689 | SWCR_TRAP_ENABLE_DZE
10690 | SWCR_TRAP_ENABLE_OVF)) << 48;
10691 fpcr |= (~swcr & (SWCR_TRAP_ENABLE_UNF
10692 | SWCR_TRAP_ENABLE_INE)) << 57;
10693 fpcr |= (swcr & SWCR_MAP_UMZ ? FPCR_UNDZ | FPCR_UNFD : 0);
10694 fpcr |= (~swcr & SWCR_TRAP_ENABLE_DNO) << 41;
10696 cpu_alpha_store_fpcr(cpu_env, fpcr);
10697 ret = 0;
10699 break;
10701 case TARGET_SSI_IEEE_RAISE_EXCEPTION:
10703 uint64_t exc, fpcr, orig_fpcr;
10704 int si_code;
10706 if (get_user_u64(exc, arg2)) {
10707 goto efault;
10710 orig_fpcr = cpu_alpha_load_fpcr(cpu_env);
10712 /* We only add to the exception status here. */
10713 fpcr = orig_fpcr | ((exc & SWCR_STATUS_MASK) << 35);
10715 cpu_alpha_store_fpcr(cpu_env, fpcr);
10716 ret = 0;
10718 /* Old exceptions are not signaled. */
10719 fpcr &= ~(orig_fpcr & FPCR_STATUS_MASK);
10721 /* If any exceptions set by this call,
10722 and are unmasked, send a signal. */
10723 si_code = 0;
10724 if ((fpcr & (FPCR_INE | FPCR_INED)) == FPCR_INE) {
10725 si_code = TARGET_FPE_FLTRES;
10727 if ((fpcr & (FPCR_UNF | FPCR_UNFD)) == FPCR_UNF) {
10728 si_code = TARGET_FPE_FLTUND;
10730 if ((fpcr & (FPCR_OVF | FPCR_OVFD)) == FPCR_OVF) {
10731 si_code = TARGET_FPE_FLTOVF;
10733 if ((fpcr & (FPCR_DZE | FPCR_DZED)) == FPCR_DZE) {
10734 si_code = TARGET_FPE_FLTDIV;
10736 if ((fpcr & (FPCR_INV | FPCR_INVD)) == FPCR_INV) {
10737 si_code = TARGET_FPE_FLTINV;
10739 if (si_code != 0) {
10740 target_siginfo_t info;
10741 info.si_signo = SIGFPE;
10742 info.si_errno = 0;
10743 info.si_code = si_code;
10744 info._sifields._sigfault._addr
10745 = ((CPUArchState *)cpu_env)->pc;
10746 queue_signal((CPUArchState *)cpu_env, info.si_signo,
10747 QEMU_SI_FAULT, &info);
10750 break;
10752 /* case SSI_NVPAIRS:
10753 -- Used with SSIN_UACPROC to enable unaligned accesses.
10754 case SSI_IEEE_STATE_AT_SIGNAL:
10755 case SSI_IEEE_IGNORE_STATE_AT_SIGNAL:
10756 -- Not implemented in linux kernel
10759 break;
10760 #endif
10761 #ifdef TARGET_NR_osf_sigprocmask
10762 /* Alpha specific. */
10763 case TARGET_NR_osf_sigprocmask:
10765 abi_ulong mask;
10766 int how;
10767 sigset_t set, oldset;
10769 switch(arg1) {
10770 case TARGET_SIG_BLOCK:
10771 how = SIG_BLOCK;
10772 break;
10773 case TARGET_SIG_UNBLOCK:
10774 how = SIG_UNBLOCK;
10775 break;
10776 case TARGET_SIG_SETMASK:
10777 how = SIG_SETMASK;
10778 break;
10779 default:
10780 ret = -TARGET_EINVAL;
10781 goto fail;
10783 mask = arg2;
10784 target_to_host_old_sigset(&set, &mask);
10785 ret = do_sigprocmask(how, &set, &oldset);
10786 if (!ret) {
10787 host_to_target_old_sigset(&mask, &oldset);
10788 ret = mask;
10791 break;
10792 #endif
10794 #ifdef TARGET_NR_getgid32
10795 case TARGET_NR_getgid32:
10796 ret = get_errno(getgid());
10797 break;
10798 #endif
10799 #ifdef TARGET_NR_geteuid32
10800 case TARGET_NR_geteuid32:
10801 ret = get_errno(geteuid());
10802 break;
10803 #endif
10804 #ifdef TARGET_NR_getegid32
10805 case TARGET_NR_getegid32:
10806 ret = get_errno(getegid());
10807 break;
10808 #endif
10809 #ifdef TARGET_NR_setreuid32
10810 case TARGET_NR_setreuid32:
10811 ret = get_errno(setreuid(arg1, arg2));
10812 break;
10813 #endif
10814 #ifdef TARGET_NR_setregid32
10815 case TARGET_NR_setregid32:
10816 ret = get_errno(setregid(arg1, arg2));
10817 break;
10818 #endif
10819 #ifdef TARGET_NR_getgroups32
10820 case TARGET_NR_getgroups32:
10822 int gidsetsize = arg1;
10823 uint32_t *target_grouplist;
10824 gid_t *grouplist;
10825 int i;
10827 grouplist = alloca(gidsetsize * sizeof(gid_t));
10828 ret = get_errno(getgroups(gidsetsize, grouplist));
10829 if (gidsetsize == 0)
10830 break;
10831 if (!is_error(ret)) {
10832 target_grouplist = lock_user(VERIFY_WRITE, arg2, gidsetsize * 4, 0);
10833 if (!target_grouplist) {
10834 ret = -TARGET_EFAULT;
10835 goto fail;
10837 for(i = 0;i < ret; i++)
10838 target_grouplist[i] = tswap32(grouplist[i]);
10839 unlock_user(target_grouplist, arg2, gidsetsize * 4);
10842 break;
10843 #endif
10844 #ifdef TARGET_NR_setgroups32
10845 case TARGET_NR_setgroups32:
10847 int gidsetsize = arg1;
10848 uint32_t *target_grouplist;
10849 gid_t *grouplist;
10850 int i;
10852 grouplist = alloca(gidsetsize * sizeof(gid_t));
10853 target_grouplist = lock_user(VERIFY_READ, arg2, gidsetsize * 4, 1);
10854 if (!target_grouplist) {
10855 ret = -TARGET_EFAULT;
10856 goto fail;
10858 for(i = 0;i < gidsetsize; i++)
10859 grouplist[i] = tswap32(target_grouplist[i]);
10860 unlock_user(target_grouplist, arg2, 0);
10861 ret = get_errno(setgroups(gidsetsize, grouplist));
10863 break;
10864 #endif
10865 #ifdef TARGET_NR_fchown32
10866 case TARGET_NR_fchown32:
10867 ret = get_errno(fchown(arg1, arg2, arg3));
10868 break;
10869 #endif
10870 #ifdef TARGET_NR_setresuid32
10871 case TARGET_NR_setresuid32:
10872 ret = get_errno(sys_setresuid(arg1, arg2, arg3));
10873 break;
10874 #endif
10875 #ifdef TARGET_NR_getresuid32
10876 case TARGET_NR_getresuid32:
10878 uid_t ruid, euid, suid;
10879 ret = get_errno(getresuid(&ruid, &euid, &suid));
10880 if (!is_error(ret)) {
10881 if (put_user_u32(ruid, arg1)
10882 || put_user_u32(euid, arg2)
10883 || put_user_u32(suid, arg3))
10884 goto efault;
10887 break;
10888 #endif
10889 #ifdef TARGET_NR_setresgid32
10890 case TARGET_NR_setresgid32:
10891 ret = get_errno(sys_setresgid(arg1, arg2, arg3));
10892 break;
10893 #endif
10894 #ifdef TARGET_NR_getresgid32
10895 case TARGET_NR_getresgid32:
10897 gid_t rgid, egid, sgid;
10898 ret = get_errno(getresgid(&rgid, &egid, &sgid));
10899 if (!is_error(ret)) {
10900 if (put_user_u32(rgid, arg1)
10901 || put_user_u32(egid, arg2)
10902 || put_user_u32(sgid, arg3))
10903 goto efault;
10906 break;
10907 #endif
10908 #ifdef TARGET_NR_chown32
10909 case TARGET_NR_chown32:
10910 if (!(p = lock_user_string(arg1)))
10911 goto efault;
10912 ret = get_errno(chown(p, arg2, arg3));
10913 unlock_user(p, arg1, 0);
10914 break;
10915 #endif
10916 #ifdef TARGET_NR_setuid32
10917 case TARGET_NR_setuid32:
10918 ret = get_errno(sys_setuid(arg1));
10919 break;
10920 #endif
10921 #ifdef TARGET_NR_setgid32
10922 case TARGET_NR_setgid32:
10923 ret = get_errno(sys_setgid(arg1));
10924 break;
10925 #endif
10926 #ifdef TARGET_NR_setfsuid32
10927 case TARGET_NR_setfsuid32:
10928 ret = get_errno(setfsuid(arg1));
10929 break;
10930 #endif
10931 #ifdef TARGET_NR_setfsgid32
10932 case TARGET_NR_setfsgid32:
10933 ret = get_errno(setfsgid(arg1));
10934 break;
10935 #endif
10937 case TARGET_NR_pivot_root:
10938 goto unimplemented;
10939 #ifdef TARGET_NR_mincore
10940 case TARGET_NR_mincore:
10942 void *a;
10943 ret = -TARGET_EFAULT;
10944 if (!(a = lock_user(VERIFY_READ, arg1,arg2, 0)))
10945 goto efault;
10946 if (!(p = lock_user_string(arg3)))
10947 goto mincore_fail;
10948 ret = get_errno(mincore(a, arg2, p));
10949 unlock_user(p, arg3, ret);
10950 mincore_fail:
10951 unlock_user(a, arg1, 0);
10953 break;
10954 #endif
10955 #ifdef TARGET_NR_arm_fadvise64_64
10956 case TARGET_NR_arm_fadvise64_64:
10957 /* arm_fadvise64_64 looks like fadvise64_64 but
10958 * with different argument order: fd, advice, offset, len
10959 * rather than the usual fd, offset, len, advice.
10960 * Note that offset and len are both 64-bit so appear as
10961 * pairs of 32-bit registers.
10963 ret = posix_fadvise(arg1, target_offset64(arg3, arg4),
10964 target_offset64(arg5, arg6), arg2);
10965 ret = -host_to_target_errno(ret);
10966 break;
10967 #endif
10969 #if TARGET_ABI_BITS == 32
10971 #ifdef TARGET_NR_fadvise64_64
10972 case TARGET_NR_fadvise64_64:
10973 /* 6 args: fd, offset (high, low), len (high, low), advice */
10974 if (regpairs_aligned(cpu_env)) {
10975 /* offset is in (3,4), len in (5,6) and advice in 7 */
10976 arg2 = arg3;
10977 arg3 = arg4;
10978 arg4 = arg5;
10979 arg5 = arg6;
10980 arg6 = arg7;
10982 ret = -host_to_target_errno(posix_fadvise(arg1,
10983 target_offset64(arg2, arg3),
10984 target_offset64(arg4, arg5),
10985 arg6));
10986 break;
10987 #endif
10989 #ifdef TARGET_NR_fadvise64
10990 case TARGET_NR_fadvise64:
10991 /* 5 args: fd, offset (high, low), len, advice */
10992 if (regpairs_aligned(cpu_env)) {
10993 /* offset is in (3,4), len in 5 and advice in 6 */
10994 arg2 = arg3;
10995 arg3 = arg4;
10996 arg4 = arg5;
10997 arg5 = arg6;
10999 ret = -host_to_target_errno(posix_fadvise(arg1,
11000 target_offset64(arg2, arg3),
11001 arg4, arg5));
11002 break;
11003 #endif
11005 #else /* not a 32-bit ABI */
11006 #if defined(TARGET_NR_fadvise64_64) || defined(TARGET_NR_fadvise64)
11007 #ifdef TARGET_NR_fadvise64_64
11008 case TARGET_NR_fadvise64_64:
11009 #endif
11010 #ifdef TARGET_NR_fadvise64
11011 case TARGET_NR_fadvise64:
11012 #endif
11013 #ifdef TARGET_S390X
11014 switch (arg4) {
11015 case 4: arg4 = POSIX_FADV_NOREUSE + 1; break; /* make sure it's an invalid value */
11016 case 5: arg4 = POSIX_FADV_NOREUSE + 2; break; /* ditto */
11017 case 6: arg4 = POSIX_FADV_DONTNEED; break;
11018 case 7: arg4 = POSIX_FADV_NOREUSE; break;
11019 default: break;
11021 #endif
11022 ret = -host_to_target_errno(posix_fadvise(arg1, arg2, arg3, arg4));
11023 break;
11024 #endif
11025 #endif /* end of 64-bit ABI fadvise handling */
11027 #ifdef TARGET_NR_madvise
11028 case TARGET_NR_madvise:
11029 /* A straight passthrough may not be safe because qemu sometimes
11030 turns private file-backed mappings into anonymous mappings.
11031 This will break MADV_DONTNEED.
11032 This is a hint, so ignoring and returning success is ok. */
11033 ret = get_errno(0);
11034 break;
11035 #endif
11036 #if TARGET_ABI_BITS == 32
11037 case TARGET_NR_fcntl64:
11039 int cmd;
11040 struct flock64 fl;
11041 from_flock64_fn *copyfrom = copy_from_user_flock64;
11042 to_flock64_fn *copyto = copy_to_user_flock64;
11044 #ifdef TARGET_ARM
11045 if (((CPUARMState *)cpu_env)->eabi) {
11046 copyfrom = copy_from_user_eabi_flock64;
11047 copyto = copy_to_user_eabi_flock64;
11049 #endif
11051 cmd = target_to_host_fcntl_cmd(arg2);
11052 if (cmd == -TARGET_EINVAL) {
11053 ret = cmd;
11054 break;
11057 switch(arg2) {
11058 case TARGET_F_GETLK64:
11059 ret = copyfrom(&fl, arg3);
11060 if (ret) {
11061 break;
11063 ret = get_errno(fcntl(arg1, cmd, &fl));
11064 if (ret == 0) {
11065 ret = copyto(arg3, &fl);
11067 break;
11069 case TARGET_F_SETLK64:
11070 case TARGET_F_SETLKW64:
11071 ret = copyfrom(&fl, arg3);
11072 if (ret) {
11073 break;
11075 ret = get_errno(safe_fcntl(arg1, cmd, &fl));
11076 break;
11077 default:
11078 ret = do_fcntl(arg1, arg2, arg3);
11079 break;
11081 break;
11083 #endif
11084 #ifdef TARGET_NR_cacheflush
11085 case TARGET_NR_cacheflush:
11086 /* self-modifying code is handled automatically, so nothing needed */
11087 ret = 0;
11088 break;
11089 #endif
11090 #ifdef TARGET_NR_security
11091 case TARGET_NR_security:
11092 goto unimplemented;
11093 #endif
11094 #ifdef TARGET_NR_getpagesize
11095 case TARGET_NR_getpagesize:
11096 ret = TARGET_PAGE_SIZE;
11097 break;
11098 #endif
11099 case TARGET_NR_gettid:
11100 ret = get_errno(gettid());
11101 break;
11102 #ifdef TARGET_NR_readahead
11103 case TARGET_NR_readahead:
11104 #if TARGET_ABI_BITS == 32
11105 if (regpairs_aligned(cpu_env)) {
11106 arg2 = arg3;
11107 arg3 = arg4;
11108 arg4 = arg5;
11110 ret = get_errno(readahead(arg1, ((off64_t)arg3 << 32) | arg2, arg4));
11111 #else
11112 ret = get_errno(readahead(arg1, arg2, arg3));
11113 #endif
11114 break;
11115 #endif
11116 #ifdef CONFIG_ATTR
11117 #ifdef TARGET_NR_setxattr
11118 case TARGET_NR_listxattr:
11119 case TARGET_NR_llistxattr:
11121 void *p, *b = 0;
11122 if (arg2) {
11123 b = lock_user(VERIFY_WRITE, arg2, arg3, 0);
11124 if (!b) {
11125 ret = -TARGET_EFAULT;
11126 break;
11129 p = lock_user_string(arg1);
11130 if (p) {
11131 if (num == TARGET_NR_listxattr) {
11132 ret = get_errno(listxattr(p, b, arg3));
11133 } else {
11134 ret = get_errno(llistxattr(p, b, arg3));
11136 } else {
11137 ret = -TARGET_EFAULT;
11139 unlock_user(p, arg1, 0);
11140 unlock_user(b, arg2, arg3);
11141 break;
11143 case TARGET_NR_flistxattr:
11145 void *b = 0;
11146 if (arg2) {
11147 b = lock_user(VERIFY_WRITE, arg2, arg3, 0);
11148 if (!b) {
11149 ret = -TARGET_EFAULT;
11150 break;
11153 ret = get_errno(flistxattr(arg1, b, arg3));
11154 unlock_user(b, arg2, arg3);
11155 break;
11157 case TARGET_NR_setxattr:
11158 case TARGET_NR_lsetxattr:
11160 void *p, *n, *v = 0;
11161 if (arg3) {
11162 v = lock_user(VERIFY_READ, arg3, arg4, 1);
11163 if (!v) {
11164 ret = -TARGET_EFAULT;
11165 break;
11168 p = lock_user_string(arg1);
11169 n = lock_user_string(arg2);
11170 if (p && n) {
11171 if (num == TARGET_NR_setxattr) {
11172 ret = get_errno(setxattr(p, n, v, arg4, arg5));
11173 } else {
11174 ret = get_errno(lsetxattr(p, n, v, arg4, arg5));
11176 } else {
11177 ret = -TARGET_EFAULT;
11179 unlock_user(p, arg1, 0);
11180 unlock_user(n, arg2, 0);
11181 unlock_user(v, arg3, 0);
11183 break;
11184 case TARGET_NR_fsetxattr:
11186 void *n, *v = 0;
11187 if (arg3) {
11188 v = lock_user(VERIFY_READ, arg3, arg4, 1);
11189 if (!v) {
11190 ret = -TARGET_EFAULT;
11191 break;
11194 n = lock_user_string(arg2);
11195 if (n) {
11196 ret = get_errno(fsetxattr(arg1, n, v, arg4, arg5));
11197 } else {
11198 ret = -TARGET_EFAULT;
11200 unlock_user(n, arg2, 0);
11201 unlock_user(v, arg3, 0);
11203 break;
11204 case TARGET_NR_getxattr:
11205 case TARGET_NR_lgetxattr:
11207 void *p, *n, *v = 0;
11208 if (arg3) {
11209 v = lock_user(VERIFY_WRITE, arg3, arg4, 0);
11210 if (!v) {
11211 ret = -TARGET_EFAULT;
11212 break;
11215 p = lock_user_string(arg1);
11216 n = lock_user_string(arg2);
11217 if (p && n) {
11218 if (num == TARGET_NR_getxattr) {
11219 ret = get_errno(getxattr(p, n, v, arg4));
11220 } else {
11221 ret = get_errno(lgetxattr(p, n, v, arg4));
11223 } else {
11224 ret = -TARGET_EFAULT;
11226 unlock_user(p, arg1, 0);
11227 unlock_user(n, arg2, 0);
11228 unlock_user(v, arg3, arg4);
11230 break;
11231 case TARGET_NR_fgetxattr:
11233 void *n, *v = 0;
11234 if (arg3) {
11235 v = lock_user(VERIFY_WRITE, arg3, arg4, 0);
11236 if (!v) {
11237 ret = -TARGET_EFAULT;
11238 break;
11241 n = lock_user_string(arg2);
11242 if (n) {
11243 ret = get_errno(fgetxattr(arg1, n, v, arg4));
11244 } else {
11245 ret = -TARGET_EFAULT;
11247 unlock_user(n, arg2, 0);
11248 unlock_user(v, arg3, arg4);
11250 break;
11251 case TARGET_NR_removexattr:
11252 case TARGET_NR_lremovexattr:
11254 void *p, *n;
11255 p = lock_user_string(arg1);
11256 n = lock_user_string(arg2);
11257 if (p && n) {
11258 if (num == TARGET_NR_removexattr) {
11259 ret = get_errno(removexattr(p, n));
11260 } else {
11261 ret = get_errno(lremovexattr(p, n));
11263 } else {
11264 ret = -TARGET_EFAULT;
11266 unlock_user(p, arg1, 0);
11267 unlock_user(n, arg2, 0);
11269 break;
11270 case TARGET_NR_fremovexattr:
11272 void *n;
11273 n = lock_user_string(arg2);
11274 if (n) {
11275 ret = get_errno(fremovexattr(arg1, n));
11276 } else {
11277 ret = -TARGET_EFAULT;
11279 unlock_user(n, arg2, 0);
11281 break;
11282 #endif
11283 #endif /* CONFIG_ATTR */
11284 #ifdef TARGET_NR_set_thread_area
11285 case TARGET_NR_set_thread_area:
11286 #if defined(TARGET_MIPS)
11287 ((CPUMIPSState *) cpu_env)->active_tc.CP0_UserLocal = arg1;
11288 ret = 0;
11289 break;
11290 #elif defined(TARGET_CRIS)
11291 if (arg1 & 0xff)
11292 ret = -TARGET_EINVAL;
11293 else {
11294 ((CPUCRISState *) cpu_env)->pregs[PR_PID] = arg1;
11295 ret = 0;
11297 break;
11298 #elif defined(TARGET_I386) && defined(TARGET_ABI32)
11299 ret = do_set_thread_area(cpu_env, arg1);
11300 break;
11301 #elif defined(TARGET_M68K)
11303 TaskState *ts = cpu->opaque;
11304 ts->tp_value = arg1;
11305 ret = 0;
11306 break;
11308 #else
11309 goto unimplemented_nowarn;
11310 #endif
11311 #endif
11312 #ifdef TARGET_NR_get_thread_area
11313 case TARGET_NR_get_thread_area:
11314 #if defined(TARGET_I386) && defined(TARGET_ABI32)
11315 ret = do_get_thread_area(cpu_env, arg1);
11316 break;
11317 #elif defined(TARGET_M68K)
11319 TaskState *ts = cpu->opaque;
11320 ret = ts->tp_value;
11321 break;
11323 #else
11324 goto unimplemented_nowarn;
11325 #endif
11326 #endif
11327 #ifdef TARGET_NR_getdomainname
11328 case TARGET_NR_getdomainname:
11329 goto unimplemented_nowarn;
11330 #endif
11332 #ifdef TARGET_NR_clock_gettime
11333 case TARGET_NR_clock_gettime:
11335 struct timespec ts;
11336 ret = get_errno(clock_gettime(arg1, &ts));
11337 if (!is_error(ret)) {
11338 host_to_target_timespec(arg2, &ts);
11340 break;
11342 #endif
11343 #ifdef TARGET_NR_clock_getres
11344 case TARGET_NR_clock_getres:
11346 struct timespec ts;
11347 ret = get_errno(clock_getres(arg1, &ts));
11348 if (!is_error(ret)) {
11349 host_to_target_timespec(arg2, &ts);
11351 break;
11353 #endif
11354 #ifdef TARGET_NR_clock_nanosleep
11355 case TARGET_NR_clock_nanosleep:
11357 struct timespec ts;
11358 target_to_host_timespec(&ts, arg3);
11359 ret = get_errno(safe_clock_nanosleep(arg1, arg2,
11360 &ts, arg4 ? &ts : NULL));
11361 if (arg4)
11362 host_to_target_timespec(arg4, &ts);
11364 #if defined(TARGET_PPC)
11365 /* clock_nanosleep is odd in that it returns positive errno values.
11366 * On PPC, CR0 bit 3 should be set in such a situation. */
11367 if (ret && ret != -TARGET_ERESTARTSYS) {
11368 ((CPUPPCState *)cpu_env)->crf[0] |= 1;
11370 #endif
11371 break;
11373 #endif
11375 #if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address)
11376 case TARGET_NR_set_tid_address:
11377 ret = get_errno(set_tid_address((int *)g2h(arg1)));
11378 break;
11379 #endif
11381 case TARGET_NR_tkill:
11382 ret = get_errno(safe_tkill((int)arg1, target_to_host_signal(arg2)));
11383 break;
11385 case TARGET_NR_tgkill:
11386 ret = get_errno(safe_tgkill((int)arg1, (int)arg2,
11387 target_to_host_signal(arg3)));
11388 break;
11390 #ifdef TARGET_NR_set_robust_list
11391 case TARGET_NR_set_robust_list:
11392 case TARGET_NR_get_robust_list:
11393 /* The ABI for supporting robust futexes has userspace pass
11394 * the kernel a pointer to a linked list which is updated by
11395 * userspace after the syscall; the list is walked by the kernel
11396 * when the thread exits. Since the linked list in QEMU guest
11397 * memory isn't a valid linked list for the host and we have
11398 * no way to reliably intercept the thread-death event, we can't
11399 * support these. Silently return ENOSYS so that guest userspace
11400 * falls back to a non-robust futex implementation (which should
11401 * be OK except in the corner case of the guest crashing while
11402 * holding a mutex that is shared with another process via
11403 * shared memory).
11405 goto unimplemented_nowarn;
11406 #endif
11408 #if defined(TARGET_NR_utimensat)
11409 case TARGET_NR_utimensat:
11411 struct timespec *tsp, ts[2];
11412 if (!arg3) {
11413 tsp = NULL;
11414 } else {
11415 target_to_host_timespec(ts, arg3);
11416 target_to_host_timespec(ts+1, arg3+sizeof(struct target_timespec));
11417 tsp = ts;
11419 if (!arg2)
11420 ret = get_errno(sys_utimensat(arg1, NULL, tsp, arg4));
11421 else {
11422 if (!(p = lock_user_string(arg2))) {
11423 ret = -TARGET_EFAULT;
11424 goto fail;
11426 ret = get_errno(sys_utimensat(arg1, path(p), tsp, arg4));
11427 unlock_user(p, arg2, 0);
11430 break;
11431 #endif
11432 case TARGET_NR_futex:
11433 ret = do_futex(arg1, arg2, arg3, arg4, arg5, arg6);
11434 break;
11435 #if defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init)
11436 case TARGET_NR_inotify_init:
11437 ret = get_errno(sys_inotify_init());
11438 break;
11439 #endif
11440 #ifdef CONFIG_INOTIFY1
11441 #if defined(TARGET_NR_inotify_init1) && defined(__NR_inotify_init1)
11442 case TARGET_NR_inotify_init1:
11443 ret = get_errno(sys_inotify_init1(arg1));
11444 break;
11445 #endif
11446 #endif
11447 #if defined(TARGET_NR_inotify_add_watch) && defined(__NR_inotify_add_watch)
11448 case TARGET_NR_inotify_add_watch:
11449 p = lock_user_string(arg2);
11450 ret = get_errno(sys_inotify_add_watch(arg1, path(p), arg3));
11451 unlock_user(p, arg2, 0);
11452 break;
11453 #endif
11454 #if defined(TARGET_NR_inotify_rm_watch) && defined(__NR_inotify_rm_watch)
11455 case TARGET_NR_inotify_rm_watch:
11456 ret = get_errno(sys_inotify_rm_watch(arg1, arg2));
11457 break;
11458 #endif
11460 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
11461 case TARGET_NR_mq_open:
11463 struct mq_attr posix_mq_attr;
11464 int host_flags;
11466 host_flags = target_to_host_bitmask(arg2, fcntl_flags_tbl);
11467 if (copy_from_user_mq_attr(&posix_mq_attr, arg4) != 0) {
11468 goto efault;
11470 p = lock_user_string(arg1 - 1);
11471 if (!p) {
11472 goto efault;
11474 ret = get_errno(mq_open(p, host_flags, arg3, &posix_mq_attr));
11475 unlock_user (p, arg1, 0);
11477 break;
11479 case TARGET_NR_mq_unlink:
11480 p = lock_user_string(arg1 - 1);
11481 if (!p) {
11482 ret = -TARGET_EFAULT;
11483 break;
11485 ret = get_errno(mq_unlink(p));
11486 unlock_user (p, arg1, 0);
11487 break;
11489 case TARGET_NR_mq_timedsend:
11491 struct timespec ts;
11493 p = lock_user (VERIFY_READ, arg2, arg3, 1);
11494 if (arg5 != 0) {
11495 target_to_host_timespec(&ts, arg5);
11496 ret = get_errno(safe_mq_timedsend(arg1, p, arg3, arg4, &ts));
11497 host_to_target_timespec(arg5, &ts);
11498 } else {
11499 ret = get_errno(safe_mq_timedsend(arg1, p, arg3, arg4, NULL));
11501 unlock_user (p, arg2, arg3);
11503 break;
11505 case TARGET_NR_mq_timedreceive:
11507 struct timespec ts;
11508 unsigned int prio;
11510 p = lock_user (VERIFY_READ, arg2, arg3, 1);
11511 if (arg5 != 0) {
11512 target_to_host_timespec(&ts, arg5);
11513 ret = get_errno(safe_mq_timedreceive(arg1, p, arg3,
11514 &prio, &ts));
11515 host_to_target_timespec(arg5, &ts);
11516 } else {
11517 ret = get_errno(safe_mq_timedreceive(arg1, p, arg3,
11518 &prio, NULL));
11520 unlock_user (p, arg2, arg3);
11521 if (arg4 != 0)
11522 put_user_u32(prio, arg4);
11524 break;
11526 /* Not implemented for now... */
11527 /* case TARGET_NR_mq_notify: */
11528 /* break; */
11530 case TARGET_NR_mq_getsetattr:
11532 struct mq_attr posix_mq_attr_in, posix_mq_attr_out;
11533 ret = 0;
11534 if (arg3 != 0) {
11535 ret = mq_getattr(arg1, &posix_mq_attr_out);
11536 copy_to_user_mq_attr(arg3, &posix_mq_attr_out);
11538 if (arg2 != 0) {
11539 copy_from_user_mq_attr(&posix_mq_attr_in, arg2);
11540 ret |= mq_setattr(arg1, &posix_mq_attr_in, &posix_mq_attr_out);
11544 break;
11545 #endif
11547 #ifdef CONFIG_SPLICE
11548 #ifdef TARGET_NR_tee
11549 case TARGET_NR_tee:
11551 ret = get_errno(tee(arg1,arg2,arg3,arg4));
11553 break;
11554 #endif
11555 #ifdef TARGET_NR_splice
11556 case TARGET_NR_splice:
11558 loff_t loff_in, loff_out;
11559 loff_t *ploff_in = NULL, *ploff_out = NULL;
11560 if (arg2) {
11561 if (get_user_u64(loff_in, arg2)) {
11562 goto efault;
11564 ploff_in = &loff_in;
11566 if (arg4) {
11567 if (get_user_u64(loff_out, arg4)) {
11568 goto efault;
11570 ploff_out = &loff_out;
11572 ret = get_errno(splice(arg1, ploff_in, arg3, ploff_out, arg5, arg6));
11573 if (arg2) {
11574 if (put_user_u64(loff_in, arg2)) {
11575 goto efault;
11578 if (arg4) {
11579 if (put_user_u64(loff_out, arg4)) {
11580 goto efault;
11584 break;
11585 #endif
11586 #ifdef TARGET_NR_vmsplice
11587 case TARGET_NR_vmsplice:
11589 struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1);
11590 if (vec != NULL) {
11591 ret = get_errno(vmsplice(arg1, vec, arg3, arg4));
11592 unlock_iovec(vec, arg2, arg3, 0);
11593 } else {
11594 ret = -host_to_target_errno(errno);
11597 break;
11598 #endif
11599 #endif /* CONFIG_SPLICE */
11600 #ifdef CONFIG_EVENTFD
11601 #if defined(TARGET_NR_eventfd)
11602 case TARGET_NR_eventfd:
11603 ret = get_errno(eventfd(arg1, 0));
11604 fd_trans_unregister(ret);
11605 break;
11606 #endif
11607 #if defined(TARGET_NR_eventfd2)
11608 case TARGET_NR_eventfd2:
11610 int host_flags = arg2 & (~(TARGET_O_NONBLOCK | TARGET_O_CLOEXEC));
11611 if (arg2 & TARGET_O_NONBLOCK) {
11612 host_flags |= O_NONBLOCK;
11614 if (arg2 & TARGET_O_CLOEXEC) {
11615 host_flags |= O_CLOEXEC;
11617 ret = get_errno(eventfd(arg1, host_flags));
11618 fd_trans_unregister(ret);
11619 break;
11621 #endif
11622 #endif /* CONFIG_EVENTFD */
11623 #if defined(CONFIG_FALLOCATE) && defined(TARGET_NR_fallocate)
11624 case TARGET_NR_fallocate:
11625 #if TARGET_ABI_BITS == 32
11626 ret = get_errno(fallocate(arg1, arg2, target_offset64(arg3, arg4),
11627 target_offset64(arg5, arg6)));
11628 #else
11629 ret = get_errno(fallocate(arg1, arg2, arg3, arg4));
11630 #endif
11631 break;
11632 #endif
11633 #if defined(CONFIG_SYNC_FILE_RANGE)
11634 #if defined(TARGET_NR_sync_file_range)
11635 case TARGET_NR_sync_file_range:
11636 #if TARGET_ABI_BITS == 32
11637 #if defined(TARGET_MIPS)
11638 ret = get_errno(sync_file_range(arg1, target_offset64(arg3, arg4),
11639 target_offset64(arg5, arg6), arg7));
11640 #else
11641 ret = get_errno(sync_file_range(arg1, target_offset64(arg2, arg3),
11642 target_offset64(arg4, arg5), arg6));
11643 #endif /* !TARGET_MIPS */
11644 #else
11645 ret = get_errno(sync_file_range(arg1, arg2, arg3, arg4));
11646 #endif
11647 break;
11648 #endif
11649 #if defined(TARGET_NR_sync_file_range2)
11650 case TARGET_NR_sync_file_range2:
11651 /* This is like sync_file_range but the arguments are reordered */
11652 #if TARGET_ABI_BITS == 32
11653 ret = get_errno(sync_file_range(arg1, target_offset64(arg3, arg4),
11654 target_offset64(arg5, arg6), arg2));
11655 #else
11656 ret = get_errno(sync_file_range(arg1, arg3, arg4, arg2));
11657 #endif
11658 break;
11659 #endif
11660 #endif
11661 #if defined(TARGET_NR_signalfd4)
11662 case TARGET_NR_signalfd4:
11663 ret = do_signalfd4(arg1, arg2, arg4);
11664 break;
11665 #endif
11666 #if defined(TARGET_NR_signalfd)
11667 case TARGET_NR_signalfd:
11668 ret = do_signalfd4(arg1, arg2, 0);
11669 break;
11670 #endif
11671 #if defined(CONFIG_EPOLL)
11672 #if defined(TARGET_NR_epoll_create)
11673 case TARGET_NR_epoll_create:
11674 ret = get_errno(epoll_create(arg1));
11675 break;
11676 #endif
11677 #if defined(TARGET_NR_epoll_create1) && defined(CONFIG_EPOLL_CREATE1)
11678 case TARGET_NR_epoll_create1:
11679 ret = get_errno(epoll_create1(arg1));
11680 break;
11681 #endif
11682 #if defined(TARGET_NR_epoll_ctl)
11683 case TARGET_NR_epoll_ctl:
11685 struct epoll_event ep;
11686 struct epoll_event *epp = 0;
11687 if (arg4) {
11688 struct target_epoll_event *target_ep;
11689 if (!lock_user_struct(VERIFY_READ, target_ep, arg4, 1)) {
11690 goto efault;
11692 ep.events = tswap32(target_ep->events);
11693 /* The epoll_data_t union is just opaque data to the kernel,
11694 * so we transfer all 64 bits across and need not worry what
11695 * actual data type it is.
11697 ep.data.u64 = tswap64(target_ep->data.u64);
11698 unlock_user_struct(target_ep, arg4, 0);
11699 epp = &ep;
11701 ret = get_errno(epoll_ctl(arg1, arg2, arg3, epp));
11702 break;
11704 #endif
11706 #if defined(TARGET_NR_epoll_wait) || defined(TARGET_NR_epoll_pwait)
11707 #if defined(TARGET_NR_epoll_wait)
11708 case TARGET_NR_epoll_wait:
11709 #endif
11710 #if defined(TARGET_NR_epoll_pwait)
11711 case TARGET_NR_epoll_pwait:
11712 #endif
11714 struct target_epoll_event *target_ep;
11715 struct epoll_event *ep;
11716 int epfd = arg1;
11717 int maxevents = arg3;
11718 int timeout = arg4;
11720 if (maxevents <= 0 || maxevents > TARGET_EP_MAX_EVENTS) {
11721 ret = -TARGET_EINVAL;
11722 break;
11725 target_ep = lock_user(VERIFY_WRITE, arg2,
11726 maxevents * sizeof(struct target_epoll_event), 1);
11727 if (!target_ep) {
11728 goto efault;
11731 ep = alloca(maxevents * sizeof(struct epoll_event));
11733 switch (num) {
11734 #if defined(TARGET_NR_epoll_pwait)
11735 case TARGET_NR_epoll_pwait:
11737 target_sigset_t *target_set;
11738 sigset_t _set, *set = &_set;
11740 if (arg5) {
11741 if (arg6 != sizeof(target_sigset_t)) {
11742 ret = -TARGET_EINVAL;
11743 break;
11746 target_set = lock_user(VERIFY_READ, arg5,
11747 sizeof(target_sigset_t), 1);
11748 if (!target_set) {
11749 unlock_user(target_ep, arg2, 0);
11750 goto efault;
11752 target_to_host_sigset(set, target_set);
11753 unlock_user(target_set, arg5, 0);
11754 } else {
11755 set = NULL;
11758 ret = get_errno(safe_epoll_pwait(epfd, ep, maxevents, timeout,
11759 set, SIGSET_T_SIZE));
11760 break;
11762 #endif
11763 #if defined(TARGET_NR_epoll_wait)
11764 case TARGET_NR_epoll_wait:
11765 ret = get_errno(safe_epoll_pwait(epfd, ep, maxevents, timeout,
11766 NULL, 0));
11767 break;
11768 #endif
11769 default:
11770 ret = -TARGET_ENOSYS;
11772 if (!is_error(ret)) {
11773 int i;
11774 for (i = 0; i < ret; i++) {
11775 target_ep[i].events = tswap32(ep[i].events);
11776 target_ep[i].data.u64 = tswap64(ep[i].data.u64);
11779 unlock_user(target_ep, arg2, ret * sizeof(struct target_epoll_event));
11780 break;
11782 #endif
11783 #endif
11784 #ifdef TARGET_NR_prlimit64
11785 case TARGET_NR_prlimit64:
11787 /* args: pid, resource number, ptr to new rlimit, ptr to old rlimit */
11788 struct target_rlimit64 *target_rnew, *target_rold;
11789 struct host_rlimit64 rnew, rold, *rnewp = 0;
11790 int resource = target_to_host_resource(arg2);
11791 if (arg3) {
11792 if (!lock_user_struct(VERIFY_READ, target_rnew, arg3, 1)) {
11793 goto efault;
11795 rnew.rlim_cur = tswap64(target_rnew->rlim_cur);
11796 rnew.rlim_max = tswap64(target_rnew->rlim_max);
11797 unlock_user_struct(target_rnew, arg3, 0);
11798 rnewp = &rnew;
11801 ret = get_errno(sys_prlimit64(arg1, resource, rnewp, arg4 ? &rold : 0));
11802 if (!is_error(ret) && arg4) {
11803 if (!lock_user_struct(VERIFY_WRITE, target_rold, arg4, 1)) {
11804 goto efault;
11806 target_rold->rlim_cur = tswap64(rold.rlim_cur);
11807 target_rold->rlim_max = tswap64(rold.rlim_max);
11808 unlock_user_struct(target_rold, arg4, 1);
11810 break;
11812 #endif
11813 #ifdef TARGET_NR_gethostname
11814 case TARGET_NR_gethostname:
11816 char *name = lock_user(VERIFY_WRITE, arg1, arg2, 0);
11817 if (name) {
11818 ret = get_errno(gethostname(name, arg2));
11819 unlock_user(name, arg1, arg2);
11820 } else {
11821 ret = -TARGET_EFAULT;
11823 break;
11825 #endif
11826 #ifdef TARGET_NR_atomic_cmpxchg_32
11827 case TARGET_NR_atomic_cmpxchg_32:
11829 /* should use start_exclusive from main.c */
11830 abi_ulong mem_value;
11831 if (get_user_u32(mem_value, arg6)) {
11832 target_siginfo_t info;
11833 info.si_signo = SIGSEGV;
11834 info.si_errno = 0;
11835 info.si_code = TARGET_SEGV_MAPERR;
11836 info._sifields._sigfault._addr = arg6;
11837 queue_signal((CPUArchState *)cpu_env, info.si_signo,
11838 QEMU_SI_FAULT, &info);
11839 ret = 0xdeadbeef;
11842 if (mem_value == arg2)
11843 put_user_u32(arg1, arg6);
11844 ret = mem_value;
11845 break;
11847 #endif
11848 #ifdef TARGET_NR_atomic_barrier
11849 case TARGET_NR_atomic_barrier:
11851 /* Like the kernel implementation and the qemu arm barrier, no-op this? */
11852 ret = 0;
11853 break;
11855 #endif
11857 #ifdef TARGET_NR_timer_create
11858 case TARGET_NR_timer_create:
11860 /* args: clockid_t clockid, struct sigevent *sevp, timer_t *timerid */
11862 struct sigevent host_sevp = { {0}, }, *phost_sevp = NULL;
11864 int clkid = arg1;
11865 int timer_index = next_free_host_timer();
11867 if (timer_index < 0) {
11868 ret = -TARGET_EAGAIN;
11869 } else {
11870 timer_t *phtimer = g_posix_timers + timer_index;
11872 if (arg2) {
11873 phost_sevp = &host_sevp;
11874 ret = target_to_host_sigevent(phost_sevp, arg2);
11875 if (ret != 0) {
11876 break;
11880 ret = get_errno(timer_create(clkid, phost_sevp, phtimer));
11881 if (ret) {
11882 phtimer = NULL;
11883 } else {
11884 if (put_user(TIMER_MAGIC | timer_index, arg3, target_timer_t)) {
11885 goto efault;
11889 break;
11891 #endif
11893 #ifdef TARGET_NR_timer_settime
11894 case TARGET_NR_timer_settime:
11896 /* args: timer_t timerid, int flags, const struct itimerspec *new_value,
11897 * struct itimerspec * old_value */
11898 target_timer_t timerid = get_timer_id(arg1);
11900 if (timerid < 0) {
11901 ret = timerid;
11902 } else if (arg3 == 0) {
11903 ret = -TARGET_EINVAL;
11904 } else {
11905 timer_t htimer = g_posix_timers[timerid];
11906 struct itimerspec hspec_new = {{0},}, hspec_old = {{0},};
11908 target_to_host_itimerspec(&hspec_new, arg3);
11909 ret = get_errno(
11910 timer_settime(htimer, arg2, &hspec_new, &hspec_old));
11911 host_to_target_itimerspec(arg2, &hspec_old);
11913 break;
11915 #endif
11917 #ifdef TARGET_NR_timer_gettime
11918 case TARGET_NR_timer_gettime:
11920 /* args: timer_t timerid, struct itimerspec *curr_value */
11921 target_timer_t timerid = get_timer_id(arg1);
11923 if (timerid < 0) {
11924 ret = timerid;
11925 } else if (!arg2) {
11926 ret = -TARGET_EFAULT;
11927 } else {
11928 timer_t htimer = g_posix_timers[timerid];
11929 struct itimerspec hspec;
11930 ret = get_errno(timer_gettime(htimer, &hspec));
11932 if (host_to_target_itimerspec(arg2, &hspec)) {
11933 ret = -TARGET_EFAULT;
11936 break;
11938 #endif
11940 #ifdef TARGET_NR_timer_getoverrun
11941 case TARGET_NR_timer_getoverrun:
11943 /* args: timer_t timerid */
11944 target_timer_t timerid = get_timer_id(arg1);
11946 if (timerid < 0) {
11947 ret = timerid;
11948 } else {
11949 timer_t htimer = g_posix_timers[timerid];
11950 ret = get_errno(timer_getoverrun(htimer));
11952 fd_trans_unregister(ret);
11953 break;
11955 #endif
11957 #ifdef TARGET_NR_timer_delete
11958 case TARGET_NR_timer_delete:
11960 /* args: timer_t timerid */
11961 target_timer_t timerid = get_timer_id(arg1);
11963 if (timerid < 0) {
11964 ret = timerid;
11965 } else {
11966 timer_t htimer = g_posix_timers[timerid];
11967 ret = get_errno(timer_delete(htimer));
11968 g_posix_timers[timerid] = 0;
11970 break;
11972 #endif
11974 #if defined(TARGET_NR_timerfd_create) && defined(CONFIG_TIMERFD)
11975 case TARGET_NR_timerfd_create:
11976 ret = get_errno(timerfd_create(arg1,
11977 target_to_host_bitmask(arg2, fcntl_flags_tbl)));
11978 break;
11979 #endif
11981 #if defined(TARGET_NR_timerfd_gettime) && defined(CONFIG_TIMERFD)
11982 case TARGET_NR_timerfd_gettime:
11984 struct itimerspec its_curr;
11986 ret = get_errno(timerfd_gettime(arg1, &its_curr));
11988 if (arg2 && host_to_target_itimerspec(arg2, &its_curr)) {
11989 goto efault;
11992 break;
11993 #endif
11995 #if defined(TARGET_NR_timerfd_settime) && defined(CONFIG_TIMERFD)
11996 case TARGET_NR_timerfd_settime:
11998 struct itimerspec its_new, its_old, *p_new;
12000 if (arg3) {
12001 if (target_to_host_itimerspec(&its_new, arg3)) {
12002 goto efault;
12004 p_new = &its_new;
12005 } else {
12006 p_new = NULL;
12009 ret = get_errno(timerfd_settime(arg1, arg2, p_new, &its_old));
12011 if (arg4 && host_to_target_itimerspec(arg4, &its_old)) {
12012 goto efault;
12015 break;
12016 #endif
12018 #if defined(TARGET_NR_ioprio_get) && defined(__NR_ioprio_get)
12019 case TARGET_NR_ioprio_get:
12020 ret = get_errno(ioprio_get(arg1, arg2));
12021 break;
12022 #endif
12024 #if defined(TARGET_NR_ioprio_set) && defined(__NR_ioprio_set)
12025 case TARGET_NR_ioprio_set:
12026 ret = get_errno(ioprio_set(arg1, arg2, arg3));
12027 break;
12028 #endif
12030 #if defined(TARGET_NR_setns) && defined(CONFIG_SETNS)
12031 case TARGET_NR_setns:
12032 ret = get_errno(setns(arg1, arg2));
12033 break;
12034 #endif
12035 #if defined(TARGET_NR_unshare) && defined(CONFIG_SETNS)
12036 case TARGET_NR_unshare:
12037 ret = get_errno(unshare(arg1));
12038 break;
12039 #endif
12041 default:
12042 unimplemented:
12043 gemu_log("qemu: Unsupported syscall: %d\n", num);
12044 #if defined(TARGET_NR_setxattr) || defined(TARGET_NR_get_thread_area) || defined(TARGET_NR_getdomainname) || defined(TARGET_NR_set_robust_list)
12045 unimplemented_nowarn:
12046 #endif
12047 ret = -TARGET_ENOSYS;
12048 break;
12050 fail:
12051 #ifdef DEBUG
12052 gemu_log(" = " TARGET_ABI_FMT_ld "\n", ret);
12053 #endif
12054 if(do_strace)
12055 print_syscall_ret(num, ret);
12056 trace_guest_user_syscall_ret(cpu, num, ret);
12057 return ret;
12058 efault:
12059 ret = -TARGET_EFAULT;
12060 goto fail;