linux-user: Handle TARGET_MAP_STACK and TARGET_MAP_HUGETLB
[qemu/ar7.git] / linux-user / syscall.c
blob8047bf3aacad4338960772c83c26d68743efccd7
1 /*
2 * Linux syscalls
4 * Copyright (c) 2003 Fabrice Bellard
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, see <http://www.gnu.org/licenses/>.
19 #define _ATFILE_SOURCE
20 #include "qemu/osdep.h"
21 #include "qemu/cutils.h"
22 #include "qemu/path.h"
23 #include <elf.h>
24 #include <endian.h>
25 #include <grp.h>
26 #include <sys/ipc.h>
27 #include <sys/msg.h>
28 #include <sys/wait.h>
29 #include <sys/mount.h>
30 #include <sys/file.h>
31 #include <sys/fsuid.h>
32 #include <sys/personality.h>
33 #include <sys/prctl.h>
34 #include <sys/resource.h>
35 #include <sys/swap.h>
36 #include <linux/capability.h>
37 #include <sched.h>
38 #include <sys/timex.h>
39 #ifdef __ia64__
40 int __clone2(int (*fn)(void *), void *child_stack_base,
41 size_t stack_size, int flags, void *arg, ...);
42 #endif
43 #include <sys/socket.h>
44 #include <sys/un.h>
45 #include <sys/uio.h>
46 #include <poll.h>
47 #include <sys/times.h>
48 #include <sys/shm.h>
49 #include <sys/sem.h>
50 #include <sys/statfs.h>
51 #include <time.h>
52 #include <utime.h>
53 #include <sys/sysinfo.h>
54 #include <sys/signalfd.h>
55 //#include <sys/user.h>
56 #include <netinet/ip.h>
57 #include <netinet/tcp.h>
58 #include <linux/wireless.h>
59 #include <linux/icmp.h>
60 #include <linux/icmpv6.h>
61 #include <linux/errqueue.h>
62 #include <linux/random.h>
63 #include "qemu-common.h"
64 #ifdef CONFIG_TIMERFD
65 #include <sys/timerfd.h>
66 #endif
67 #ifdef TARGET_GPROF
68 #include <sys/gmon.h>
69 #endif
70 #ifdef CONFIG_EVENTFD
71 #include <sys/eventfd.h>
72 #endif
73 #ifdef CONFIG_EPOLL
74 #include <sys/epoll.h>
75 #endif
76 #ifdef CONFIG_ATTR
77 #include "qemu/xattr.h"
78 #endif
79 #ifdef CONFIG_SENDFILE
80 #include <sys/sendfile.h>
81 #endif
83 #define termios host_termios
84 #define winsize host_winsize
85 #define termio host_termio
86 #define sgttyb host_sgttyb /* same as target */
87 #define tchars host_tchars /* same as target */
88 #define ltchars host_ltchars /* same as target */
90 #include <linux/termios.h>
91 #include <linux/unistd.h>
92 #include <linux/cdrom.h>
93 #include <linux/hdreg.h>
94 #include <linux/soundcard.h>
95 #include <linux/kd.h>
96 #include <linux/mtio.h>
97 #include <linux/fs.h>
98 #if defined(CONFIG_FIEMAP)
99 #include <linux/fiemap.h>
100 #endif
101 #include <linux/fb.h>
102 #include <linux/vt.h>
103 #include <linux/dm-ioctl.h>
104 #include <linux/reboot.h>
105 #include <linux/route.h>
106 #include <linux/filter.h>
107 #include <linux/blkpg.h>
108 #include <netpacket/packet.h>
109 #include <linux/netlink.h>
110 #ifdef CONFIG_RTNETLINK
111 #include <linux/rtnetlink.h>
112 #include <linux/if_bridge.h>
113 #endif
114 #include <linux/audit.h>
115 #include "linux_loop.h"
116 #include "uname.h"
118 #include "qemu.h"
120 #ifndef CLONE_IO
121 #define CLONE_IO 0x80000000 /* Clone io context */
122 #endif
124 /* We can't directly call the host clone syscall, because this will
125 * badly confuse libc (breaking mutexes, for example). So we must
126 * divide clone flags into:
127 * * flag combinations that look like pthread_create()
128 * * flag combinations that look like fork()
129 * * flags we can implement within QEMU itself
130 * * flags we can't support and will return an error for
132 /* For thread creation, all these flags must be present; for
133 * fork, none must be present.
135 #define CLONE_THREAD_FLAGS \
136 (CLONE_VM | CLONE_FS | CLONE_FILES | \
137 CLONE_SIGHAND | CLONE_THREAD | CLONE_SYSVSEM)
139 /* These flags are ignored:
140 * CLONE_DETACHED is now ignored by the kernel;
141 * CLONE_IO is just an optimisation hint to the I/O scheduler
143 #define CLONE_IGNORED_FLAGS \
144 (CLONE_DETACHED | CLONE_IO)
146 /* Flags for fork which we can implement within QEMU itself */
147 #define CLONE_OPTIONAL_FORK_FLAGS \
148 (CLONE_SETTLS | CLONE_PARENT_SETTID | \
149 CLONE_CHILD_CLEARTID | CLONE_CHILD_SETTID)
151 /* Flags for thread creation which we can implement within QEMU itself */
152 #define CLONE_OPTIONAL_THREAD_FLAGS \
153 (CLONE_SETTLS | CLONE_PARENT_SETTID | \
154 CLONE_CHILD_CLEARTID | CLONE_CHILD_SETTID | CLONE_PARENT)
156 #define CLONE_INVALID_FORK_FLAGS \
157 (~(CSIGNAL | CLONE_OPTIONAL_FORK_FLAGS | CLONE_IGNORED_FLAGS))
159 #define CLONE_INVALID_THREAD_FLAGS \
160 (~(CSIGNAL | CLONE_THREAD_FLAGS | CLONE_OPTIONAL_THREAD_FLAGS | \
161 CLONE_IGNORED_FLAGS))
163 /* CLONE_VFORK is special cased early in do_fork(). The other flag bits
164 * have almost all been allocated. We cannot support any of
165 * CLONE_NEWNS, CLONE_NEWCGROUP, CLONE_NEWUTS, CLONE_NEWIPC,
166 * CLONE_NEWUSER, CLONE_NEWPID, CLONE_NEWNET, CLONE_PTRACE, CLONE_UNTRACED.
167 * The checks against the invalid thread masks above will catch these.
168 * (The one remaining unallocated bit is 0x1000 which used to be CLONE_PID.)
171 //#define DEBUG
172 /* Define DEBUG_ERESTARTSYS to force every syscall to be restarted
173 * once. This exercises the codepaths for restart.
175 //#define DEBUG_ERESTARTSYS
177 //#include <linux/msdos_fs.h>
178 #define VFAT_IOCTL_READDIR_BOTH _IOR('r', 1, struct linux_dirent [2])
179 #define VFAT_IOCTL_READDIR_SHORT _IOR('r', 2, struct linux_dirent [2])
181 #undef _syscall0
182 #undef _syscall1
183 #undef _syscall2
184 #undef _syscall3
185 #undef _syscall4
186 #undef _syscall5
187 #undef _syscall6
189 #define _syscall0(type,name) \
190 static type name (void) \
192 return syscall(__NR_##name); \
195 #define _syscall1(type,name,type1,arg1) \
196 static type name (type1 arg1) \
198 return syscall(__NR_##name, arg1); \
201 #define _syscall2(type,name,type1,arg1,type2,arg2) \
202 static type name (type1 arg1,type2 arg2) \
204 return syscall(__NR_##name, arg1, arg2); \
207 #define _syscall3(type,name,type1,arg1,type2,arg2,type3,arg3) \
208 static type name (type1 arg1,type2 arg2,type3 arg3) \
210 return syscall(__NR_##name, arg1, arg2, arg3); \
213 #define _syscall4(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4) \
214 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4) \
216 return syscall(__NR_##name, arg1, arg2, arg3, arg4); \
219 #define _syscall5(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4, \
220 type5,arg5) \
221 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5) \
223 return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5); \
227 #define _syscall6(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4, \
228 type5,arg5,type6,arg6) \
229 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5, \
230 type6 arg6) \
232 return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6); \
236 #define __NR_sys_uname __NR_uname
237 #define __NR_sys_getcwd1 __NR_getcwd
238 #define __NR_sys_getdents __NR_getdents
239 #define __NR_sys_getdents64 __NR_getdents64
240 #define __NR_sys_getpriority __NR_getpriority
241 #define __NR_sys_rt_sigqueueinfo __NR_rt_sigqueueinfo
242 #define __NR_sys_rt_tgsigqueueinfo __NR_rt_tgsigqueueinfo
243 #define __NR_sys_syslog __NR_syslog
244 #define __NR_sys_futex __NR_futex
245 #define __NR_sys_inotify_init __NR_inotify_init
246 #define __NR_sys_inotify_add_watch __NR_inotify_add_watch
247 #define __NR_sys_inotify_rm_watch __NR_inotify_rm_watch
249 #if defined(__alpha__) || defined (__ia64__) || defined(__x86_64__) || \
250 defined(__s390x__)
251 #define __NR__llseek __NR_lseek
252 #endif
254 /* Newer kernel ports have llseek() instead of _llseek() */
255 #if defined(TARGET_NR_llseek) && !defined(TARGET_NR__llseek)
256 #define TARGET_NR__llseek TARGET_NR_llseek
257 #endif
259 #ifdef __NR_gettid
260 _syscall0(int, gettid)
261 #else
262 /* This is a replacement for the host gettid() and must return a host
263 errno. */
264 static int gettid(void) {
265 return -ENOSYS;
267 #endif
268 #if defined(TARGET_NR_getdents) && defined(__NR_getdents)
269 _syscall3(int, sys_getdents, uint, fd, struct linux_dirent *, dirp, uint, count);
270 #endif
271 #if !defined(__NR_getdents) || \
272 (defined(TARGET_NR_getdents64) && defined(__NR_getdents64))
273 _syscall3(int, sys_getdents64, uint, fd, struct linux_dirent64 *, dirp, uint, count);
274 #endif
275 #if defined(TARGET_NR__llseek) && defined(__NR_llseek)
276 _syscall5(int, _llseek, uint, fd, ulong, hi, ulong, lo,
277 loff_t *, res, uint, wh);
278 #endif
279 _syscall3(int, sys_rt_sigqueueinfo, pid_t, pid, int, sig, siginfo_t *, uinfo)
280 _syscall4(int, sys_rt_tgsigqueueinfo, pid_t, pid, pid_t, tid, int, sig,
281 siginfo_t *, uinfo)
282 _syscall3(int,sys_syslog,int,type,char*,bufp,int,len)
283 #ifdef __NR_exit_group
284 _syscall1(int,exit_group,int,error_code)
285 #endif
286 #if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address)
287 _syscall1(int,set_tid_address,int *,tidptr)
288 #endif
289 #if defined(TARGET_NR_futex) && defined(__NR_futex)
290 _syscall6(int,sys_futex,int *,uaddr,int,op,int,val,
291 const struct timespec *,timeout,int *,uaddr2,int,val3)
292 #endif
293 #define __NR_sys_sched_getaffinity __NR_sched_getaffinity
294 _syscall3(int, sys_sched_getaffinity, pid_t, pid, unsigned int, len,
295 unsigned long *, user_mask_ptr);
296 #define __NR_sys_sched_setaffinity __NR_sched_setaffinity
297 _syscall3(int, sys_sched_setaffinity, pid_t, pid, unsigned int, len,
298 unsigned long *, user_mask_ptr);
299 _syscall4(int, reboot, int, magic1, int, magic2, unsigned int, cmd,
300 void *, arg);
301 _syscall2(int, capget, struct __user_cap_header_struct *, header,
302 struct __user_cap_data_struct *, data);
303 _syscall2(int, capset, struct __user_cap_header_struct *, header,
304 struct __user_cap_data_struct *, data);
305 #if defined(TARGET_NR_ioprio_get) && defined(__NR_ioprio_get)
306 _syscall2(int, ioprio_get, int, which, int, who)
307 #endif
308 #if defined(TARGET_NR_ioprio_set) && defined(__NR_ioprio_set)
309 _syscall3(int, ioprio_set, int, which, int, who, int, ioprio)
310 #endif
311 #if defined(TARGET_NR_getrandom) && defined(__NR_getrandom)
312 _syscall3(int, getrandom, void *, buf, size_t, buflen, unsigned int, flags)
313 #endif
315 #if defined(TARGET_NR_kcmp) && defined(__NR_kcmp)
316 _syscall5(int, kcmp, pid_t, pid1, pid_t, pid2, int, type,
317 unsigned long, idx1, unsigned long, idx2)
318 #endif
320 static bitmask_transtbl fcntl_flags_tbl[] = {
321 { TARGET_O_ACCMODE, TARGET_O_WRONLY, O_ACCMODE, O_WRONLY, },
322 { TARGET_O_ACCMODE, TARGET_O_RDWR, O_ACCMODE, O_RDWR, },
323 { TARGET_O_CREAT, TARGET_O_CREAT, O_CREAT, O_CREAT, },
324 { TARGET_O_EXCL, TARGET_O_EXCL, O_EXCL, O_EXCL, },
325 { TARGET_O_NOCTTY, TARGET_O_NOCTTY, O_NOCTTY, O_NOCTTY, },
326 { TARGET_O_TRUNC, TARGET_O_TRUNC, O_TRUNC, O_TRUNC, },
327 { TARGET_O_APPEND, TARGET_O_APPEND, O_APPEND, O_APPEND, },
328 { TARGET_O_NONBLOCK, TARGET_O_NONBLOCK, O_NONBLOCK, O_NONBLOCK, },
329 { TARGET_O_SYNC, TARGET_O_DSYNC, O_SYNC, O_DSYNC, },
330 { TARGET_O_SYNC, TARGET_O_SYNC, O_SYNC, O_SYNC, },
331 { TARGET_FASYNC, TARGET_FASYNC, FASYNC, FASYNC, },
332 { TARGET_O_DIRECTORY, TARGET_O_DIRECTORY, O_DIRECTORY, O_DIRECTORY, },
333 { TARGET_O_NOFOLLOW, TARGET_O_NOFOLLOW, O_NOFOLLOW, O_NOFOLLOW, },
334 #if defined(O_DIRECT)
335 { TARGET_O_DIRECT, TARGET_O_DIRECT, O_DIRECT, O_DIRECT, },
336 #endif
337 #if defined(O_NOATIME)
338 { TARGET_O_NOATIME, TARGET_O_NOATIME, O_NOATIME, O_NOATIME },
339 #endif
340 #if defined(O_CLOEXEC)
341 { TARGET_O_CLOEXEC, TARGET_O_CLOEXEC, O_CLOEXEC, O_CLOEXEC },
342 #endif
343 #if defined(O_PATH)
344 { TARGET_O_PATH, TARGET_O_PATH, O_PATH, O_PATH },
345 #endif
346 #if defined(O_TMPFILE)
347 { TARGET_O_TMPFILE, TARGET_O_TMPFILE, O_TMPFILE, O_TMPFILE },
348 #endif
349 /* Don't terminate the list prematurely on 64-bit host+guest. */
350 #if TARGET_O_LARGEFILE != 0 || O_LARGEFILE != 0
351 { TARGET_O_LARGEFILE, TARGET_O_LARGEFILE, O_LARGEFILE, O_LARGEFILE, },
352 #endif
353 { 0, 0, 0, 0 }
356 enum {
357 QEMU_IFLA_BR_UNSPEC,
358 QEMU_IFLA_BR_FORWARD_DELAY,
359 QEMU_IFLA_BR_HELLO_TIME,
360 QEMU_IFLA_BR_MAX_AGE,
361 QEMU_IFLA_BR_AGEING_TIME,
362 QEMU_IFLA_BR_STP_STATE,
363 QEMU_IFLA_BR_PRIORITY,
364 QEMU_IFLA_BR_VLAN_FILTERING,
365 QEMU_IFLA_BR_VLAN_PROTOCOL,
366 QEMU_IFLA_BR_GROUP_FWD_MASK,
367 QEMU_IFLA_BR_ROOT_ID,
368 QEMU_IFLA_BR_BRIDGE_ID,
369 QEMU_IFLA_BR_ROOT_PORT,
370 QEMU_IFLA_BR_ROOT_PATH_COST,
371 QEMU_IFLA_BR_TOPOLOGY_CHANGE,
372 QEMU_IFLA_BR_TOPOLOGY_CHANGE_DETECTED,
373 QEMU_IFLA_BR_HELLO_TIMER,
374 QEMU_IFLA_BR_TCN_TIMER,
375 QEMU_IFLA_BR_TOPOLOGY_CHANGE_TIMER,
376 QEMU_IFLA_BR_GC_TIMER,
377 QEMU_IFLA_BR_GROUP_ADDR,
378 QEMU_IFLA_BR_FDB_FLUSH,
379 QEMU_IFLA_BR_MCAST_ROUTER,
380 QEMU_IFLA_BR_MCAST_SNOOPING,
381 QEMU_IFLA_BR_MCAST_QUERY_USE_IFADDR,
382 QEMU_IFLA_BR_MCAST_QUERIER,
383 QEMU_IFLA_BR_MCAST_HASH_ELASTICITY,
384 QEMU_IFLA_BR_MCAST_HASH_MAX,
385 QEMU_IFLA_BR_MCAST_LAST_MEMBER_CNT,
386 QEMU_IFLA_BR_MCAST_STARTUP_QUERY_CNT,
387 QEMU_IFLA_BR_MCAST_LAST_MEMBER_INTVL,
388 QEMU_IFLA_BR_MCAST_MEMBERSHIP_INTVL,
389 QEMU_IFLA_BR_MCAST_QUERIER_INTVL,
390 QEMU_IFLA_BR_MCAST_QUERY_INTVL,
391 QEMU_IFLA_BR_MCAST_QUERY_RESPONSE_INTVL,
392 QEMU_IFLA_BR_MCAST_STARTUP_QUERY_INTVL,
393 QEMU_IFLA_BR_NF_CALL_IPTABLES,
394 QEMU_IFLA_BR_NF_CALL_IP6TABLES,
395 QEMU_IFLA_BR_NF_CALL_ARPTABLES,
396 QEMU_IFLA_BR_VLAN_DEFAULT_PVID,
397 QEMU_IFLA_BR_PAD,
398 QEMU_IFLA_BR_VLAN_STATS_ENABLED,
399 QEMU_IFLA_BR_MCAST_STATS_ENABLED,
400 QEMU___IFLA_BR_MAX,
403 enum {
404 QEMU_IFLA_UNSPEC,
405 QEMU_IFLA_ADDRESS,
406 QEMU_IFLA_BROADCAST,
407 QEMU_IFLA_IFNAME,
408 QEMU_IFLA_MTU,
409 QEMU_IFLA_LINK,
410 QEMU_IFLA_QDISC,
411 QEMU_IFLA_STATS,
412 QEMU_IFLA_COST,
413 QEMU_IFLA_PRIORITY,
414 QEMU_IFLA_MASTER,
415 QEMU_IFLA_WIRELESS,
416 QEMU_IFLA_PROTINFO,
417 QEMU_IFLA_TXQLEN,
418 QEMU_IFLA_MAP,
419 QEMU_IFLA_WEIGHT,
420 QEMU_IFLA_OPERSTATE,
421 QEMU_IFLA_LINKMODE,
422 QEMU_IFLA_LINKINFO,
423 QEMU_IFLA_NET_NS_PID,
424 QEMU_IFLA_IFALIAS,
425 QEMU_IFLA_NUM_VF,
426 QEMU_IFLA_VFINFO_LIST,
427 QEMU_IFLA_STATS64,
428 QEMU_IFLA_VF_PORTS,
429 QEMU_IFLA_PORT_SELF,
430 QEMU_IFLA_AF_SPEC,
431 QEMU_IFLA_GROUP,
432 QEMU_IFLA_NET_NS_FD,
433 QEMU_IFLA_EXT_MASK,
434 QEMU_IFLA_PROMISCUITY,
435 QEMU_IFLA_NUM_TX_QUEUES,
436 QEMU_IFLA_NUM_RX_QUEUES,
437 QEMU_IFLA_CARRIER,
438 QEMU_IFLA_PHYS_PORT_ID,
439 QEMU_IFLA_CARRIER_CHANGES,
440 QEMU_IFLA_PHYS_SWITCH_ID,
441 QEMU_IFLA_LINK_NETNSID,
442 QEMU_IFLA_PHYS_PORT_NAME,
443 QEMU_IFLA_PROTO_DOWN,
444 QEMU_IFLA_GSO_MAX_SEGS,
445 QEMU_IFLA_GSO_MAX_SIZE,
446 QEMU_IFLA_PAD,
447 QEMU_IFLA_XDP,
448 QEMU___IFLA_MAX
451 enum {
452 QEMU_IFLA_BRPORT_UNSPEC,
453 QEMU_IFLA_BRPORT_STATE,
454 QEMU_IFLA_BRPORT_PRIORITY,
455 QEMU_IFLA_BRPORT_COST,
456 QEMU_IFLA_BRPORT_MODE,
457 QEMU_IFLA_BRPORT_GUARD,
458 QEMU_IFLA_BRPORT_PROTECT,
459 QEMU_IFLA_BRPORT_FAST_LEAVE,
460 QEMU_IFLA_BRPORT_LEARNING,
461 QEMU_IFLA_BRPORT_UNICAST_FLOOD,
462 QEMU_IFLA_BRPORT_PROXYARP,
463 QEMU_IFLA_BRPORT_LEARNING_SYNC,
464 QEMU_IFLA_BRPORT_PROXYARP_WIFI,
465 QEMU_IFLA_BRPORT_ROOT_ID,
466 QEMU_IFLA_BRPORT_BRIDGE_ID,
467 QEMU_IFLA_BRPORT_DESIGNATED_PORT,
468 QEMU_IFLA_BRPORT_DESIGNATED_COST,
469 QEMU_IFLA_BRPORT_ID,
470 QEMU_IFLA_BRPORT_NO,
471 QEMU_IFLA_BRPORT_TOPOLOGY_CHANGE_ACK,
472 QEMU_IFLA_BRPORT_CONFIG_PENDING,
473 QEMU_IFLA_BRPORT_MESSAGE_AGE_TIMER,
474 QEMU_IFLA_BRPORT_FORWARD_DELAY_TIMER,
475 QEMU_IFLA_BRPORT_HOLD_TIMER,
476 QEMU_IFLA_BRPORT_FLUSH,
477 QEMU_IFLA_BRPORT_MULTICAST_ROUTER,
478 QEMU_IFLA_BRPORT_PAD,
479 QEMU___IFLA_BRPORT_MAX
482 enum {
483 QEMU_IFLA_INFO_UNSPEC,
484 QEMU_IFLA_INFO_KIND,
485 QEMU_IFLA_INFO_DATA,
486 QEMU_IFLA_INFO_XSTATS,
487 QEMU_IFLA_INFO_SLAVE_KIND,
488 QEMU_IFLA_INFO_SLAVE_DATA,
489 QEMU___IFLA_INFO_MAX,
492 enum {
493 QEMU_IFLA_INET_UNSPEC,
494 QEMU_IFLA_INET_CONF,
495 QEMU___IFLA_INET_MAX,
498 enum {
499 QEMU_IFLA_INET6_UNSPEC,
500 QEMU_IFLA_INET6_FLAGS,
501 QEMU_IFLA_INET6_CONF,
502 QEMU_IFLA_INET6_STATS,
503 QEMU_IFLA_INET6_MCAST,
504 QEMU_IFLA_INET6_CACHEINFO,
505 QEMU_IFLA_INET6_ICMP6STATS,
506 QEMU_IFLA_INET6_TOKEN,
507 QEMU_IFLA_INET6_ADDR_GEN_MODE,
508 QEMU___IFLA_INET6_MAX
511 typedef abi_long (*TargetFdDataFunc)(void *, size_t);
512 typedef abi_long (*TargetFdAddrFunc)(void *, abi_ulong, socklen_t);
513 typedef struct TargetFdTrans {
514 TargetFdDataFunc host_to_target_data;
515 TargetFdDataFunc target_to_host_data;
516 TargetFdAddrFunc target_to_host_addr;
517 } TargetFdTrans;
519 static TargetFdTrans **target_fd_trans;
521 static unsigned int target_fd_max;
523 static TargetFdDataFunc fd_trans_target_to_host_data(int fd)
525 if (fd >= 0 && fd < target_fd_max && target_fd_trans[fd]) {
526 return target_fd_trans[fd]->target_to_host_data;
528 return NULL;
531 static TargetFdDataFunc fd_trans_host_to_target_data(int fd)
533 if (fd >= 0 && fd < target_fd_max && target_fd_trans[fd]) {
534 return target_fd_trans[fd]->host_to_target_data;
536 return NULL;
539 static TargetFdAddrFunc fd_trans_target_to_host_addr(int fd)
541 if (fd >= 0 && fd < target_fd_max && target_fd_trans[fd]) {
542 return target_fd_trans[fd]->target_to_host_addr;
544 return NULL;
547 static void fd_trans_register(int fd, TargetFdTrans *trans)
549 unsigned int oldmax;
551 if (fd >= target_fd_max) {
552 oldmax = target_fd_max;
553 target_fd_max = ((fd >> 6) + 1) << 6; /* by slice of 64 entries */
554 target_fd_trans = g_renew(TargetFdTrans *,
555 target_fd_trans, target_fd_max);
556 memset((void *)(target_fd_trans + oldmax), 0,
557 (target_fd_max - oldmax) * sizeof(TargetFdTrans *));
559 target_fd_trans[fd] = trans;
562 static void fd_trans_unregister(int fd)
564 if (fd >= 0 && fd < target_fd_max) {
565 target_fd_trans[fd] = NULL;
569 static void fd_trans_dup(int oldfd, int newfd)
571 fd_trans_unregister(newfd);
572 if (oldfd < target_fd_max && target_fd_trans[oldfd]) {
573 fd_trans_register(newfd, target_fd_trans[oldfd]);
577 static int sys_getcwd1(char *buf, size_t size)
579 if (getcwd(buf, size) == NULL) {
580 /* getcwd() sets errno */
581 return (-1);
583 return strlen(buf)+1;
586 #ifdef TARGET_NR_utimensat
587 #if defined(__NR_utimensat)
588 #define __NR_sys_utimensat __NR_utimensat
589 _syscall4(int,sys_utimensat,int,dirfd,const char *,pathname,
590 const struct timespec *,tsp,int,flags)
591 #else
592 static int sys_utimensat(int dirfd, const char *pathname,
593 const struct timespec times[2], int flags)
595 errno = ENOSYS;
596 return -1;
598 #endif
599 #endif /* TARGET_NR_utimensat */
601 #ifdef CONFIG_INOTIFY
602 #include <sys/inotify.h>
604 #if defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init)
605 static int sys_inotify_init(void)
607 return (inotify_init());
609 #endif
610 #if defined(TARGET_NR_inotify_add_watch) && defined(__NR_inotify_add_watch)
611 static int sys_inotify_add_watch(int fd,const char *pathname, int32_t mask)
613 return (inotify_add_watch(fd, pathname, mask));
615 #endif
616 #if defined(TARGET_NR_inotify_rm_watch) && defined(__NR_inotify_rm_watch)
617 static int sys_inotify_rm_watch(int fd, int32_t wd)
619 return (inotify_rm_watch(fd, wd));
621 #endif
622 #ifdef CONFIG_INOTIFY1
623 #if defined(TARGET_NR_inotify_init1) && defined(__NR_inotify_init1)
624 static int sys_inotify_init1(int flags)
626 return (inotify_init1(flags));
628 #endif
629 #endif
630 #else
631 /* Userspace can usually survive runtime without inotify */
632 #undef TARGET_NR_inotify_init
633 #undef TARGET_NR_inotify_init1
634 #undef TARGET_NR_inotify_add_watch
635 #undef TARGET_NR_inotify_rm_watch
636 #endif /* CONFIG_INOTIFY */
638 #if defined(TARGET_NR_prlimit64)
639 #ifndef __NR_prlimit64
640 # define __NR_prlimit64 -1
641 #endif
642 #define __NR_sys_prlimit64 __NR_prlimit64
643 /* The glibc rlimit structure may not be that used by the underlying syscall */
644 struct host_rlimit64 {
645 uint64_t rlim_cur;
646 uint64_t rlim_max;
648 _syscall4(int, sys_prlimit64, pid_t, pid, int, resource,
649 const struct host_rlimit64 *, new_limit,
650 struct host_rlimit64 *, old_limit)
651 #endif
654 #if defined(TARGET_NR_timer_create)
655 /* Maxiumum of 32 active POSIX timers allowed at any one time. */
656 static timer_t g_posix_timers[32] = { 0, } ;
658 static inline int next_free_host_timer(void)
660 int k ;
661 /* FIXME: Does finding the next free slot require a lock? */
662 for (k = 0; k < ARRAY_SIZE(g_posix_timers); k++) {
663 if (g_posix_timers[k] == 0) {
664 g_posix_timers[k] = (timer_t) 1;
665 return k;
668 return -1;
670 #endif
672 /* ARM EABI and MIPS expect 64bit types aligned even on pairs or registers */
673 #ifdef TARGET_ARM
674 static inline int regpairs_aligned(void *cpu_env) {
675 return ((((CPUARMState *)cpu_env)->eabi) == 1) ;
677 #elif defined(TARGET_MIPS) && (TARGET_ABI_BITS == 32)
678 static inline int regpairs_aligned(void *cpu_env) { return 1; }
679 #elif defined(TARGET_PPC) && !defined(TARGET_PPC64)
680 /* SysV AVI for PPC32 expects 64bit parameters to be passed on odd/even pairs
681 * of registers which translates to the same as ARM/MIPS, because we start with
682 * r3 as arg1 */
683 static inline int regpairs_aligned(void *cpu_env) { return 1; }
684 #else
685 static inline int regpairs_aligned(void *cpu_env) { return 0; }
686 #endif
688 #define ERRNO_TABLE_SIZE 1200
690 /* target_to_host_errno_table[] is initialized from
691 * host_to_target_errno_table[] in syscall_init(). */
692 static uint16_t target_to_host_errno_table[ERRNO_TABLE_SIZE] = {
696 * This list is the union of errno values overridden in asm-<arch>/errno.h
697 * minus the errnos that are not actually generic to all archs.
699 static uint16_t host_to_target_errno_table[ERRNO_TABLE_SIZE] = {
700 [EAGAIN] = TARGET_EAGAIN,
701 [EIDRM] = TARGET_EIDRM,
702 [ECHRNG] = TARGET_ECHRNG,
703 [EL2NSYNC] = TARGET_EL2NSYNC,
704 [EL3HLT] = TARGET_EL3HLT,
705 [EL3RST] = TARGET_EL3RST,
706 [ELNRNG] = TARGET_ELNRNG,
707 [EUNATCH] = TARGET_EUNATCH,
708 [ENOCSI] = TARGET_ENOCSI,
709 [EL2HLT] = TARGET_EL2HLT,
710 [EDEADLK] = TARGET_EDEADLK,
711 [ENOLCK] = TARGET_ENOLCK,
712 [EBADE] = TARGET_EBADE,
713 [EBADR] = TARGET_EBADR,
714 [EXFULL] = TARGET_EXFULL,
715 [ENOANO] = TARGET_ENOANO,
716 [EBADRQC] = TARGET_EBADRQC,
717 [EBADSLT] = TARGET_EBADSLT,
718 [EBFONT] = TARGET_EBFONT,
719 [ENOSTR] = TARGET_ENOSTR,
720 [ENODATA] = TARGET_ENODATA,
721 [ETIME] = TARGET_ETIME,
722 [ENOSR] = TARGET_ENOSR,
723 [ENONET] = TARGET_ENONET,
724 [ENOPKG] = TARGET_ENOPKG,
725 [EREMOTE] = TARGET_EREMOTE,
726 [ENOLINK] = TARGET_ENOLINK,
727 [EADV] = TARGET_EADV,
728 [ESRMNT] = TARGET_ESRMNT,
729 [ECOMM] = TARGET_ECOMM,
730 [EPROTO] = TARGET_EPROTO,
731 [EDOTDOT] = TARGET_EDOTDOT,
732 [EMULTIHOP] = TARGET_EMULTIHOP,
733 [EBADMSG] = TARGET_EBADMSG,
734 [ENAMETOOLONG] = TARGET_ENAMETOOLONG,
735 [EOVERFLOW] = TARGET_EOVERFLOW,
736 [ENOTUNIQ] = TARGET_ENOTUNIQ,
737 [EBADFD] = TARGET_EBADFD,
738 [EREMCHG] = TARGET_EREMCHG,
739 [ELIBACC] = TARGET_ELIBACC,
740 [ELIBBAD] = TARGET_ELIBBAD,
741 [ELIBSCN] = TARGET_ELIBSCN,
742 [ELIBMAX] = TARGET_ELIBMAX,
743 [ELIBEXEC] = TARGET_ELIBEXEC,
744 [EILSEQ] = TARGET_EILSEQ,
745 [ENOSYS] = TARGET_ENOSYS,
746 [ELOOP] = TARGET_ELOOP,
747 [ERESTART] = TARGET_ERESTART,
748 [ESTRPIPE] = TARGET_ESTRPIPE,
749 [ENOTEMPTY] = TARGET_ENOTEMPTY,
750 [EUSERS] = TARGET_EUSERS,
751 [ENOTSOCK] = TARGET_ENOTSOCK,
752 [EDESTADDRREQ] = TARGET_EDESTADDRREQ,
753 [EMSGSIZE] = TARGET_EMSGSIZE,
754 [EPROTOTYPE] = TARGET_EPROTOTYPE,
755 [ENOPROTOOPT] = TARGET_ENOPROTOOPT,
756 [EPROTONOSUPPORT] = TARGET_EPROTONOSUPPORT,
757 [ESOCKTNOSUPPORT] = TARGET_ESOCKTNOSUPPORT,
758 [EOPNOTSUPP] = TARGET_EOPNOTSUPP,
759 [EPFNOSUPPORT] = TARGET_EPFNOSUPPORT,
760 [EAFNOSUPPORT] = TARGET_EAFNOSUPPORT,
761 [EADDRINUSE] = TARGET_EADDRINUSE,
762 [EADDRNOTAVAIL] = TARGET_EADDRNOTAVAIL,
763 [ENETDOWN] = TARGET_ENETDOWN,
764 [ENETUNREACH] = TARGET_ENETUNREACH,
765 [ENETRESET] = TARGET_ENETRESET,
766 [ECONNABORTED] = TARGET_ECONNABORTED,
767 [ECONNRESET] = TARGET_ECONNRESET,
768 [ENOBUFS] = TARGET_ENOBUFS,
769 [EISCONN] = TARGET_EISCONN,
770 [ENOTCONN] = TARGET_ENOTCONN,
771 [EUCLEAN] = TARGET_EUCLEAN,
772 [ENOTNAM] = TARGET_ENOTNAM,
773 [ENAVAIL] = TARGET_ENAVAIL,
774 [EISNAM] = TARGET_EISNAM,
775 [EREMOTEIO] = TARGET_EREMOTEIO,
776 [EDQUOT] = TARGET_EDQUOT,
777 [ESHUTDOWN] = TARGET_ESHUTDOWN,
778 [ETOOMANYREFS] = TARGET_ETOOMANYREFS,
779 [ETIMEDOUT] = TARGET_ETIMEDOUT,
780 [ECONNREFUSED] = TARGET_ECONNREFUSED,
781 [EHOSTDOWN] = TARGET_EHOSTDOWN,
782 [EHOSTUNREACH] = TARGET_EHOSTUNREACH,
783 [EALREADY] = TARGET_EALREADY,
784 [EINPROGRESS] = TARGET_EINPROGRESS,
785 [ESTALE] = TARGET_ESTALE,
786 [ECANCELED] = TARGET_ECANCELED,
787 [ENOMEDIUM] = TARGET_ENOMEDIUM,
788 [EMEDIUMTYPE] = TARGET_EMEDIUMTYPE,
789 #ifdef ENOKEY
790 [ENOKEY] = TARGET_ENOKEY,
791 #endif
792 #ifdef EKEYEXPIRED
793 [EKEYEXPIRED] = TARGET_EKEYEXPIRED,
794 #endif
795 #ifdef EKEYREVOKED
796 [EKEYREVOKED] = TARGET_EKEYREVOKED,
797 #endif
798 #ifdef EKEYREJECTED
799 [EKEYREJECTED] = TARGET_EKEYREJECTED,
800 #endif
801 #ifdef EOWNERDEAD
802 [EOWNERDEAD] = TARGET_EOWNERDEAD,
803 #endif
804 #ifdef ENOTRECOVERABLE
805 [ENOTRECOVERABLE] = TARGET_ENOTRECOVERABLE,
806 #endif
807 #ifdef ENOMSG
808 [ENOMSG] = TARGET_ENOMSG,
809 #endif
810 #ifdef ERKFILL
811 [ERFKILL] = TARGET_ERFKILL,
812 #endif
813 #ifdef EHWPOISON
814 [EHWPOISON] = TARGET_EHWPOISON,
815 #endif
818 static inline int host_to_target_errno(int err)
820 if (err >= 0 && err < ERRNO_TABLE_SIZE &&
821 host_to_target_errno_table[err]) {
822 return host_to_target_errno_table[err];
824 return err;
827 static inline int target_to_host_errno(int err)
829 if (err >= 0 && err < ERRNO_TABLE_SIZE &&
830 target_to_host_errno_table[err]) {
831 return target_to_host_errno_table[err];
833 return err;
836 static inline abi_long get_errno(abi_long ret)
838 if (ret == -1)
839 return -host_to_target_errno(errno);
840 else
841 return ret;
844 static inline int is_error(abi_long ret)
846 return (abi_ulong)ret >= (abi_ulong)(-4096);
849 const char *target_strerror(int err)
851 if (err == TARGET_ERESTARTSYS) {
852 return "To be restarted";
854 if (err == TARGET_QEMU_ESIGRETURN) {
855 return "Successful exit from sigreturn";
858 if ((err >= ERRNO_TABLE_SIZE) || (err < 0)) {
859 return NULL;
861 return strerror(target_to_host_errno(err));
864 #define safe_syscall0(type, name) \
865 static type safe_##name(void) \
867 return safe_syscall(__NR_##name); \
870 #define safe_syscall1(type, name, type1, arg1) \
871 static type safe_##name(type1 arg1) \
873 return safe_syscall(__NR_##name, arg1); \
876 #define safe_syscall2(type, name, type1, arg1, type2, arg2) \
877 static type safe_##name(type1 arg1, type2 arg2) \
879 return safe_syscall(__NR_##name, arg1, arg2); \
882 #define safe_syscall3(type, name, type1, arg1, type2, arg2, type3, arg3) \
883 static type safe_##name(type1 arg1, type2 arg2, type3 arg3) \
885 return safe_syscall(__NR_##name, arg1, arg2, arg3); \
888 #define safe_syscall4(type, name, type1, arg1, type2, arg2, type3, arg3, \
889 type4, arg4) \
890 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4) \
892 return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4); \
895 #define safe_syscall5(type, name, type1, arg1, type2, arg2, type3, arg3, \
896 type4, arg4, type5, arg5) \
897 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, \
898 type5 arg5) \
900 return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5); \
903 #define safe_syscall6(type, name, type1, arg1, type2, arg2, type3, arg3, \
904 type4, arg4, type5, arg5, type6, arg6) \
905 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, \
906 type5 arg5, type6 arg6) \
908 return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6); \
911 safe_syscall3(ssize_t, read, int, fd, void *, buff, size_t, count)
912 safe_syscall3(ssize_t, write, int, fd, const void *, buff, size_t, count)
913 safe_syscall4(int, openat, int, dirfd, const char *, pathname, \
914 int, flags, mode_t, mode)
915 safe_syscall4(pid_t, wait4, pid_t, pid, int *, status, int, options, \
916 struct rusage *, rusage)
917 safe_syscall5(int, waitid, idtype_t, idtype, id_t, id, siginfo_t *, infop, \
918 int, options, struct rusage *, rusage)
919 safe_syscall3(int, execve, const char *, filename, char **, argv, char **, envp)
920 safe_syscall6(int, pselect6, int, nfds, fd_set *, readfds, fd_set *, writefds, \
921 fd_set *, exceptfds, struct timespec *, timeout, void *, sig)
922 safe_syscall5(int, ppoll, struct pollfd *, ufds, unsigned int, nfds,
923 struct timespec *, tsp, const sigset_t *, sigmask,
924 size_t, sigsetsize)
925 safe_syscall6(int, epoll_pwait, int, epfd, struct epoll_event *, events,
926 int, maxevents, int, timeout, const sigset_t *, sigmask,
927 size_t, sigsetsize)
928 safe_syscall6(int,futex,int *,uaddr,int,op,int,val, \
929 const struct timespec *,timeout,int *,uaddr2,int,val3)
930 safe_syscall2(int, rt_sigsuspend, sigset_t *, newset, size_t, sigsetsize)
931 safe_syscall2(int, kill, pid_t, pid, int, sig)
932 safe_syscall2(int, tkill, int, tid, int, sig)
933 safe_syscall3(int, tgkill, int, tgid, int, pid, int, sig)
934 safe_syscall3(ssize_t, readv, int, fd, const struct iovec *, iov, int, iovcnt)
935 safe_syscall3(ssize_t, writev, int, fd, const struct iovec *, iov, int, iovcnt)
936 safe_syscall5(ssize_t, preadv, int, fd, const struct iovec *, iov, int, iovcnt,
937 unsigned long, pos_l, unsigned long, pos_h)
938 safe_syscall5(ssize_t, pwritev, int, fd, const struct iovec *, iov, int, iovcnt,
939 unsigned long, pos_l, unsigned long, pos_h)
940 safe_syscall3(int, connect, int, fd, const struct sockaddr *, addr,
941 socklen_t, addrlen)
942 safe_syscall6(ssize_t, sendto, int, fd, const void *, buf, size_t, len,
943 int, flags, const struct sockaddr *, addr, socklen_t, addrlen)
944 safe_syscall6(ssize_t, recvfrom, int, fd, void *, buf, size_t, len,
945 int, flags, struct sockaddr *, addr, socklen_t *, addrlen)
946 safe_syscall3(ssize_t, sendmsg, int, fd, const struct msghdr *, msg, int, flags)
947 safe_syscall3(ssize_t, recvmsg, int, fd, struct msghdr *, msg, int, flags)
948 safe_syscall2(int, flock, int, fd, int, operation)
949 safe_syscall4(int, rt_sigtimedwait, const sigset_t *, these, siginfo_t *, uinfo,
950 const struct timespec *, uts, size_t, sigsetsize)
951 safe_syscall4(int, accept4, int, fd, struct sockaddr *, addr, socklen_t *, len,
952 int, flags)
953 safe_syscall2(int, nanosleep, const struct timespec *, req,
954 struct timespec *, rem)
955 #ifdef TARGET_NR_clock_nanosleep
956 safe_syscall4(int, clock_nanosleep, const clockid_t, clock, int, flags,
957 const struct timespec *, req, struct timespec *, rem)
958 #endif
959 #ifdef __NR_msgsnd
960 safe_syscall4(int, msgsnd, int, msgid, const void *, msgp, size_t, sz,
961 int, flags)
962 safe_syscall5(int, msgrcv, int, msgid, void *, msgp, size_t, sz,
963 long, msgtype, int, flags)
964 safe_syscall4(int, semtimedop, int, semid, struct sembuf *, tsops,
965 unsigned, nsops, const struct timespec *, timeout)
966 #else
967 /* This host kernel architecture uses a single ipc syscall; fake up
968 * wrappers for the sub-operations to hide this implementation detail.
969 * Annoyingly we can't include linux/ipc.h to get the constant definitions
970 * for the call parameter because some structs in there conflict with the
971 * sys/ipc.h ones. So we just define them here, and rely on them being
972 * the same for all host architectures.
974 #define Q_SEMTIMEDOP 4
975 #define Q_MSGSND 11
976 #define Q_MSGRCV 12
977 #define Q_IPCCALL(VERSION, OP) ((VERSION) << 16 | (OP))
979 safe_syscall6(int, ipc, int, call, long, first, long, second, long, third,
980 void *, ptr, long, fifth)
981 static int safe_msgsnd(int msgid, const void *msgp, size_t sz, int flags)
983 return safe_ipc(Q_IPCCALL(0, Q_MSGSND), msgid, sz, flags, (void *)msgp, 0);
985 static int safe_msgrcv(int msgid, void *msgp, size_t sz, long type, int flags)
987 return safe_ipc(Q_IPCCALL(1, Q_MSGRCV), msgid, sz, flags, msgp, type);
989 static int safe_semtimedop(int semid, struct sembuf *tsops, unsigned nsops,
990 const struct timespec *timeout)
992 return safe_ipc(Q_IPCCALL(0, Q_SEMTIMEDOP), semid, nsops, 0, tsops,
993 (long)timeout);
995 #endif
996 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
997 safe_syscall5(int, mq_timedsend, int, mqdes, const char *, msg_ptr,
998 size_t, len, unsigned, prio, const struct timespec *, timeout)
999 safe_syscall5(int, mq_timedreceive, int, mqdes, char *, msg_ptr,
1000 size_t, len, unsigned *, prio, const struct timespec *, timeout)
1001 #endif
1002 /* We do ioctl like this rather than via safe_syscall3 to preserve the
1003 * "third argument might be integer or pointer or not present" behaviour of
1004 * the libc function.
1006 #define safe_ioctl(...) safe_syscall(__NR_ioctl, __VA_ARGS__)
1007 /* Similarly for fcntl. Note that callers must always:
1008 * pass the F_GETLK64 etc constants rather than the unsuffixed F_GETLK
1009 * use the flock64 struct rather than unsuffixed flock
1010 * This will then work and use a 64-bit offset for both 32-bit and 64-bit hosts.
1012 #ifdef __NR_fcntl64
1013 #define safe_fcntl(...) safe_syscall(__NR_fcntl64, __VA_ARGS__)
1014 #else
1015 #define safe_fcntl(...) safe_syscall(__NR_fcntl, __VA_ARGS__)
1016 #endif
1018 static inline int host_to_target_sock_type(int host_type)
1020 int target_type;
1022 switch (host_type & 0xf /* SOCK_TYPE_MASK */) {
1023 case SOCK_DGRAM:
1024 target_type = TARGET_SOCK_DGRAM;
1025 break;
1026 case SOCK_STREAM:
1027 target_type = TARGET_SOCK_STREAM;
1028 break;
1029 default:
1030 target_type = host_type & 0xf /* SOCK_TYPE_MASK */;
1031 break;
1034 #if defined(SOCK_CLOEXEC)
1035 if (host_type & SOCK_CLOEXEC) {
1036 target_type |= TARGET_SOCK_CLOEXEC;
1038 #endif
1040 #if defined(SOCK_NONBLOCK)
1041 if (host_type & SOCK_NONBLOCK) {
1042 target_type |= TARGET_SOCK_NONBLOCK;
1044 #endif
1046 return target_type;
1049 static abi_ulong target_brk;
1050 static abi_ulong target_original_brk;
1051 static abi_ulong brk_page;
1053 void target_set_brk(abi_ulong new_brk)
1055 target_original_brk = target_brk = HOST_PAGE_ALIGN(new_brk);
1056 brk_page = HOST_PAGE_ALIGN(target_brk);
1059 //#define DEBUGF_BRK(message, args...) do { fprintf(stderr, (message), ## args); } while (0)
1060 #define DEBUGF_BRK(message, args...)
1062 /* do_brk() must return target values and target errnos. */
1063 abi_long do_brk(abi_ulong new_brk)
1065 abi_long mapped_addr;
1066 abi_ulong new_alloc_size;
1068 DEBUGF_BRK("do_brk(" TARGET_ABI_FMT_lx ") -> ", new_brk);
1070 if (!new_brk) {
1071 DEBUGF_BRK(TARGET_ABI_FMT_lx " (!new_brk)\n", target_brk);
1072 return target_brk;
1074 if (new_brk < target_original_brk) {
1075 DEBUGF_BRK(TARGET_ABI_FMT_lx " (new_brk < target_original_brk)\n",
1076 target_brk);
1077 return target_brk;
1080 /* If the new brk is less than the highest page reserved to the
1081 * target heap allocation, set it and we're almost done... */
1082 if (new_brk <= brk_page) {
1083 /* Heap contents are initialized to zero, as for anonymous
1084 * mapped pages. */
1085 if (new_brk > target_brk) {
1086 memset(g2h(target_brk), 0, new_brk - target_brk);
1088 target_brk = new_brk;
1089 DEBUGF_BRK(TARGET_ABI_FMT_lx " (new_brk <= brk_page)\n", target_brk);
1090 return target_brk;
1093 /* We need to allocate more memory after the brk... Note that
1094 * we don't use MAP_FIXED because that will map over the top of
1095 * any existing mapping (like the one with the host libc or qemu
1096 * itself); instead we treat "mapped but at wrong address" as
1097 * a failure and unmap again.
1099 new_alloc_size = HOST_PAGE_ALIGN(new_brk - brk_page);
1100 mapped_addr = get_errno(target_mmap(brk_page, new_alloc_size,
1101 PROT_READ|PROT_WRITE,
1102 MAP_ANON|MAP_PRIVATE, 0, 0));
1104 if (mapped_addr == brk_page) {
1105 /* Heap contents are initialized to zero, as for anonymous
1106 * mapped pages. Technically the new pages are already
1107 * initialized to zero since they *are* anonymous mapped
1108 * pages, however we have to take care with the contents that
1109 * come from the remaining part of the previous page: it may
1110 * contains garbage data due to a previous heap usage (grown
1111 * then shrunken). */
1112 memset(g2h(target_brk), 0, brk_page - target_brk);
1114 target_brk = new_brk;
1115 brk_page = HOST_PAGE_ALIGN(target_brk);
1116 DEBUGF_BRK(TARGET_ABI_FMT_lx " (mapped_addr == brk_page)\n",
1117 target_brk);
1118 return target_brk;
1119 } else if (mapped_addr != -1) {
1120 /* Mapped but at wrong address, meaning there wasn't actually
1121 * enough space for this brk.
1123 target_munmap(mapped_addr, new_alloc_size);
1124 mapped_addr = -1;
1125 DEBUGF_BRK(TARGET_ABI_FMT_lx " (mapped_addr != -1)\n", target_brk);
1127 else {
1128 DEBUGF_BRK(TARGET_ABI_FMT_lx " (otherwise)\n", target_brk);
1131 #if defined(TARGET_ALPHA)
1132 /* We (partially) emulate OSF/1 on Alpha, which requires we
1133 return a proper errno, not an unchanged brk value. */
1134 return -TARGET_ENOMEM;
1135 #endif
1136 /* For everything else, return the previous break. */
1137 return target_brk;
1140 static inline abi_long copy_from_user_fdset(fd_set *fds,
1141 abi_ulong target_fds_addr,
1142 int n)
1144 int i, nw, j, k;
1145 abi_ulong b, *target_fds;
1147 nw = DIV_ROUND_UP(n, TARGET_ABI_BITS);
1148 if (!(target_fds = lock_user(VERIFY_READ,
1149 target_fds_addr,
1150 sizeof(abi_ulong) * nw,
1151 1)))
1152 return -TARGET_EFAULT;
1154 FD_ZERO(fds);
1155 k = 0;
1156 for (i = 0; i < nw; i++) {
1157 /* grab the abi_ulong */
1158 __get_user(b, &target_fds[i]);
1159 for (j = 0; j < TARGET_ABI_BITS; j++) {
1160 /* check the bit inside the abi_ulong */
1161 if ((b >> j) & 1)
1162 FD_SET(k, fds);
1163 k++;
1167 unlock_user(target_fds, target_fds_addr, 0);
1169 return 0;
1172 static inline abi_ulong copy_from_user_fdset_ptr(fd_set *fds, fd_set **fds_ptr,
1173 abi_ulong target_fds_addr,
1174 int n)
1176 if (target_fds_addr) {
1177 if (copy_from_user_fdset(fds, target_fds_addr, n))
1178 return -TARGET_EFAULT;
1179 *fds_ptr = fds;
1180 } else {
1181 *fds_ptr = NULL;
1183 return 0;
1186 static inline abi_long copy_to_user_fdset(abi_ulong target_fds_addr,
1187 const fd_set *fds,
1188 int n)
1190 int i, nw, j, k;
1191 abi_long v;
1192 abi_ulong *target_fds;
1194 nw = DIV_ROUND_UP(n, TARGET_ABI_BITS);
1195 if (!(target_fds = lock_user(VERIFY_WRITE,
1196 target_fds_addr,
1197 sizeof(abi_ulong) * nw,
1198 0)))
1199 return -TARGET_EFAULT;
1201 k = 0;
1202 for (i = 0; i < nw; i++) {
1203 v = 0;
1204 for (j = 0; j < TARGET_ABI_BITS; j++) {
1205 v |= ((abi_ulong)(FD_ISSET(k, fds) != 0) << j);
1206 k++;
1208 __put_user(v, &target_fds[i]);
1211 unlock_user(target_fds, target_fds_addr, sizeof(abi_ulong) * nw);
1213 return 0;
1216 #if defined(__alpha__)
1217 #define HOST_HZ 1024
1218 #else
1219 #define HOST_HZ 100
1220 #endif
1222 static inline abi_long host_to_target_clock_t(long ticks)
1224 #if HOST_HZ == TARGET_HZ
1225 return ticks;
1226 #else
1227 return ((int64_t)ticks * TARGET_HZ) / HOST_HZ;
1228 #endif
1231 static inline abi_long host_to_target_rusage(abi_ulong target_addr,
1232 const struct rusage *rusage)
1234 struct target_rusage *target_rusage;
1236 if (!lock_user_struct(VERIFY_WRITE, target_rusage, target_addr, 0))
1237 return -TARGET_EFAULT;
1238 target_rusage->ru_utime.tv_sec = tswapal(rusage->ru_utime.tv_sec);
1239 target_rusage->ru_utime.tv_usec = tswapal(rusage->ru_utime.tv_usec);
1240 target_rusage->ru_stime.tv_sec = tswapal(rusage->ru_stime.tv_sec);
1241 target_rusage->ru_stime.tv_usec = tswapal(rusage->ru_stime.tv_usec);
1242 target_rusage->ru_maxrss = tswapal(rusage->ru_maxrss);
1243 target_rusage->ru_ixrss = tswapal(rusage->ru_ixrss);
1244 target_rusage->ru_idrss = tswapal(rusage->ru_idrss);
1245 target_rusage->ru_isrss = tswapal(rusage->ru_isrss);
1246 target_rusage->ru_minflt = tswapal(rusage->ru_minflt);
1247 target_rusage->ru_majflt = tswapal(rusage->ru_majflt);
1248 target_rusage->ru_nswap = tswapal(rusage->ru_nswap);
1249 target_rusage->ru_inblock = tswapal(rusage->ru_inblock);
1250 target_rusage->ru_oublock = tswapal(rusage->ru_oublock);
1251 target_rusage->ru_msgsnd = tswapal(rusage->ru_msgsnd);
1252 target_rusage->ru_msgrcv = tswapal(rusage->ru_msgrcv);
1253 target_rusage->ru_nsignals = tswapal(rusage->ru_nsignals);
1254 target_rusage->ru_nvcsw = tswapal(rusage->ru_nvcsw);
1255 target_rusage->ru_nivcsw = tswapal(rusage->ru_nivcsw);
1256 unlock_user_struct(target_rusage, target_addr, 1);
1258 return 0;
1261 static inline rlim_t target_to_host_rlim(abi_ulong target_rlim)
1263 abi_ulong target_rlim_swap;
1264 rlim_t result;
1266 target_rlim_swap = tswapal(target_rlim);
1267 if (target_rlim_swap == TARGET_RLIM_INFINITY)
1268 return RLIM_INFINITY;
1270 result = target_rlim_swap;
1271 if (target_rlim_swap != (rlim_t)result)
1272 return RLIM_INFINITY;
1274 return result;
1277 static inline abi_ulong host_to_target_rlim(rlim_t rlim)
1279 abi_ulong target_rlim_swap;
1280 abi_ulong result;
1282 if (rlim == RLIM_INFINITY || rlim != (abi_long)rlim)
1283 target_rlim_swap = TARGET_RLIM_INFINITY;
1284 else
1285 target_rlim_swap = rlim;
1286 result = tswapal(target_rlim_swap);
1288 return result;
1291 static inline int target_to_host_resource(int code)
1293 switch (code) {
1294 case TARGET_RLIMIT_AS:
1295 return RLIMIT_AS;
1296 case TARGET_RLIMIT_CORE:
1297 return RLIMIT_CORE;
1298 case TARGET_RLIMIT_CPU:
1299 return RLIMIT_CPU;
1300 case TARGET_RLIMIT_DATA:
1301 return RLIMIT_DATA;
1302 case TARGET_RLIMIT_FSIZE:
1303 return RLIMIT_FSIZE;
1304 case TARGET_RLIMIT_LOCKS:
1305 return RLIMIT_LOCKS;
1306 case TARGET_RLIMIT_MEMLOCK:
1307 return RLIMIT_MEMLOCK;
1308 case TARGET_RLIMIT_MSGQUEUE:
1309 return RLIMIT_MSGQUEUE;
1310 case TARGET_RLIMIT_NICE:
1311 return RLIMIT_NICE;
1312 case TARGET_RLIMIT_NOFILE:
1313 return RLIMIT_NOFILE;
1314 case TARGET_RLIMIT_NPROC:
1315 return RLIMIT_NPROC;
1316 case TARGET_RLIMIT_RSS:
1317 return RLIMIT_RSS;
1318 case TARGET_RLIMIT_RTPRIO:
1319 return RLIMIT_RTPRIO;
1320 case TARGET_RLIMIT_SIGPENDING:
1321 return RLIMIT_SIGPENDING;
1322 case TARGET_RLIMIT_STACK:
1323 return RLIMIT_STACK;
1324 default:
1325 return code;
1329 static inline abi_long copy_from_user_timeval(struct timeval *tv,
1330 abi_ulong target_tv_addr)
1332 struct target_timeval *target_tv;
1334 if (!lock_user_struct(VERIFY_READ, target_tv, target_tv_addr, 1))
1335 return -TARGET_EFAULT;
1337 __get_user(tv->tv_sec, &target_tv->tv_sec);
1338 __get_user(tv->tv_usec, &target_tv->tv_usec);
1340 unlock_user_struct(target_tv, target_tv_addr, 0);
1342 return 0;
1345 static inline abi_long copy_to_user_timeval(abi_ulong target_tv_addr,
1346 const struct timeval *tv)
1348 struct target_timeval *target_tv;
1350 if (!lock_user_struct(VERIFY_WRITE, target_tv, target_tv_addr, 0))
1351 return -TARGET_EFAULT;
1353 __put_user(tv->tv_sec, &target_tv->tv_sec);
1354 __put_user(tv->tv_usec, &target_tv->tv_usec);
1356 unlock_user_struct(target_tv, target_tv_addr, 1);
1358 return 0;
1361 static inline abi_long copy_from_user_timezone(struct timezone *tz,
1362 abi_ulong target_tz_addr)
1364 struct target_timezone *target_tz;
1366 if (!lock_user_struct(VERIFY_READ, target_tz, target_tz_addr, 1)) {
1367 return -TARGET_EFAULT;
1370 __get_user(tz->tz_minuteswest, &target_tz->tz_minuteswest);
1371 __get_user(tz->tz_dsttime, &target_tz->tz_dsttime);
1373 unlock_user_struct(target_tz, target_tz_addr, 0);
1375 return 0;
1378 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
1379 #include <mqueue.h>
1381 static inline abi_long copy_from_user_mq_attr(struct mq_attr *attr,
1382 abi_ulong target_mq_attr_addr)
1384 struct target_mq_attr *target_mq_attr;
1386 if (!lock_user_struct(VERIFY_READ, target_mq_attr,
1387 target_mq_attr_addr, 1))
1388 return -TARGET_EFAULT;
1390 __get_user(attr->mq_flags, &target_mq_attr->mq_flags);
1391 __get_user(attr->mq_maxmsg, &target_mq_attr->mq_maxmsg);
1392 __get_user(attr->mq_msgsize, &target_mq_attr->mq_msgsize);
1393 __get_user(attr->mq_curmsgs, &target_mq_attr->mq_curmsgs);
1395 unlock_user_struct(target_mq_attr, target_mq_attr_addr, 0);
1397 return 0;
1400 static inline abi_long copy_to_user_mq_attr(abi_ulong target_mq_attr_addr,
1401 const struct mq_attr *attr)
1403 struct target_mq_attr *target_mq_attr;
1405 if (!lock_user_struct(VERIFY_WRITE, target_mq_attr,
1406 target_mq_attr_addr, 0))
1407 return -TARGET_EFAULT;
1409 __put_user(attr->mq_flags, &target_mq_attr->mq_flags);
1410 __put_user(attr->mq_maxmsg, &target_mq_attr->mq_maxmsg);
1411 __put_user(attr->mq_msgsize, &target_mq_attr->mq_msgsize);
1412 __put_user(attr->mq_curmsgs, &target_mq_attr->mq_curmsgs);
1414 unlock_user_struct(target_mq_attr, target_mq_attr_addr, 1);
1416 return 0;
1418 #endif
1420 #if defined(TARGET_NR_select) || defined(TARGET_NR__newselect)
1421 /* do_select() must return target values and target errnos. */
1422 static abi_long do_select(int n,
1423 abi_ulong rfd_addr, abi_ulong wfd_addr,
1424 abi_ulong efd_addr, abi_ulong target_tv_addr)
1426 fd_set rfds, wfds, efds;
1427 fd_set *rfds_ptr, *wfds_ptr, *efds_ptr;
1428 struct timeval tv;
1429 struct timespec ts, *ts_ptr;
1430 abi_long ret;
1432 ret = copy_from_user_fdset_ptr(&rfds, &rfds_ptr, rfd_addr, n);
1433 if (ret) {
1434 return ret;
1436 ret = copy_from_user_fdset_ptr(&wfds, &wfds_ptr, wfd_addr, n);
1437 if (ret) {
1438 return ret;
1440 ret = copy_from_user_fdset_ptr(&efds, &efds_ptr, efd_addr, n);
1441 if (ret) {
1442 return ret;
1445 if (target_tv_addr) {
1446 if (copy_from_user_timeval(&tv, target_tv_addr))
1447 return -TARGET_EFAULT;
1448 ts.tv_sec = tv.tv_sec;
1449 ts.tv_nsec = tv.tv_usec * 1000;
1450 ts_ptr = &ts;
1451 } else {
1452 ts_ptr = NULL;
1455 ret = get_errno(safe_pselect6(n, rfds_ptr, wfds_ptr, efds_ptr,
1456 ts_ptr, NULL));
1458 if (!is_error(ret)) {
1459 if (rfd_addr && copy_to_user_fdset(rfd_addr, &rfds, n))
1460 return -TARGET_EFAULT;
1461 if (wfd_addr && copy_to_user_fdset(wfd_addr, &wfds, n))
1462 return -TARGET_EFAULT;
1463 if (efd_addr && copy_to_user_fdset(efd_addr, &efds, n))
1464 return -TARGET_EFAULT;
1466 if (target_tv_addr) {
1467 tv.tv_sec = ts.tv_sec;
1468 tv.tv_usec = ts.tv_nsec / 1000;
1469 if (copy_to_user_timeval(target_tv_addr, &tv)) {
1470 return -TARGET_EFAULT;
1475 return ret;
1478 #if defined(TARGET_WANT_OLD_SYS_SELECT)
1479 static abi_long do_old_select(abi_ulong arg1)
1481 struct target_sel_arg_struct *sel;
1482 abi_ulong inp, outp, exp, tvp;
1483 long nsel;
1485 if (!lock_user_struct(VERIFY_READ, sel, arg1, 1)) {
1486 return -TARGET_EFAULT;
1489 nsel = tswapal(sel->n);
1490 inp = tswapal(sel->inp);
1491 outp = tswapal(sel->outp);
1492 exp = tswapal(sel->exp);
1493 tvp = tswapal(sel->tvp);
1495 unlock_user_struct(sel, arg1, 0);
1497 return do_select(nsel, inp, outp, exp, tvp);
1499 #endif
1500 #endif
1502 static abi_long do_pipe2(int host_pipe[], int flags)
1504 #ifdef CONFIG_PIPE2
1505 return pipe2(host_pipe, flags);
1506 #else
1507 return -ENOSYS;
1508 #endif
1511 static abi_long do_pipe(void *cpu_env, abi_ulong pipedes,
1512 int flags, int is_pipe2)
1514 int host_pipe[2];
1515 abi_long ret;
1516 ret = flags ? do_pipe2(host_pipe, flags) : pipe(host_pipe);
1518 if (is_error(ret))
1519 return get_errno(ret);
1521 /* Several targets have special calling conventions for the original
1522 pipe syscall, but didn't replicate this into the pipe2 syscall. */
1523 if (!is_pipe2) {
1524 #if defined(TARGET_ALPHA)
1525 ((CPUAlphaState *)cpu_env)->ir[IR_A4] = host_pipe[1];
1526 return host_pipe[0];
1527 #elif defined(TARGET_MIPS)
1528 ((CPUMIPSState*)cpu_env)->active_tc.gpr[3] = host_pipe[1];
1529 return host_pipe[0];
1530 #elif defined(TARGET_SH4)
1531 ((CPUSH4State*)cpu_env)->gregs[1] = host_pipe[1];
1532 return host_pipe[0];
1533 #elif defined(TARGET_SPARC)
1534 ((CPUSPARCState*)cpu_env)->regwptr[1] = host_pipe[1];
1535 return host_pipe[0];
1536 #endif
1539 if (put_user_s32(host_pipe[0], pipedes)
1540 || put_user_s32(host_pipe[1], pipedes + sizeof(host_pipe[0])))
1541 return -TARGET_EFAULT;
1542 return get_errno(ret);
1545 static inline abi_long target_to_host_ip_mreq(struct ip_mreqn *mreqn,
1546 abi_ulong target_addr,
1547 socklen_t len)
1549 struct target_ip_mreqn *target_smreqn;
1551 target_smreqn = lock_user(VERIFY_READ, target_addr, len, 1);
1552 if (!target_smreqn)
1553 return -TARGET_EFAULT;
1554 mreqn->imr_multiaddr.s_addr = target_smreqn->imr_multiaddr.s_addr;
1555 mreqn->imr_address.s_addr = target_smreqn->imr_address.s_addr;
1556 if (len == sizeof(struct target_ip_mreqn))
1557 mreqn->imr_ifindex = tswapal(target_smreqn->imr_ifindex);
1558 unlock_user(target_smreqn, target_addr, 0);
1560 return 0;
1563 static inline abi_long target_to_host_sockaddr(int fd, struct sockaddr *addr,
1564 abi_ulong target_addr,
1565 socklen_t len)
1567 const socklen_t unix_maxlen = sizeof (struct sockaddr_un);
1568 sa_family_t sa_family;
1569 struct target_sockaddr *target_saddr;
1571 if (fd_trans_target_to_host_addr(fd)) {
1572 return fd_trans_target_to_host_addr(fd)(addr, target_addr, len);
1575 target_saddr = lock_user(VERIFY_READ, target_addr, len, 1);
1576 if (!target_saddr)
1577 return -TARGET_EFAULT;
1579 sa_family = tswap16(target_saddr->sa_family);
1581 /* Oops. The caller might send a incomplete sun_path; sun_path
1582 * must be terminated by \0 (see the manual page), but
1583 * unfortunately it is quite common to specify sockaddr_un
1584 * length as "strlen(x->sun_path)" while it should be
1585 * "strlen(...) + 1". We'll fix that here if needed.
1586 * Linux kernel has a similar feature.
1589 if (sa_family == AF_UNIX) {
1590 if (len < unix_maxlen && len > 0) {
1591 char *cp = (char*)target_saddr;
1593 if ( cp[len-1] && !cp[len] )
1594 len++;
1596 if (len > unix_maxlen)
1597 len = unix_maxlen;
1600 memcpy(addr, target_saddr, len);
1601 addr->sa_family = sa_family;
1602 if (sa_family == AF_NETLINK) {
1603 struct sockaddr_nl *nladdr;
1605 nladdr = (struct sockaddr_nl *)addr;
1606 nladdr->nl_pid = tswap32(nladdr->nl_pid);
1607 nladdr->nl_groups = tswap32(nladdr->nl_groups);
1608 } else if (sa_family == AF_PACKET) {
1609 struct target_sockaddr_ll *lladdr;
1611 lladdr = (struct target_sockaddr_ll *)addr;
1612 lladdr->sll_ifindex = tswap32(lladdr->sll_ifindex);
1613 lladdr->sll_hatype = tswap16(lladdr->sll_hatype);
1615 unlock_user(target_saddr, target_addr, 0);
1617 return 0;
1620 static inline abi_long host_to_target_sockaddr(abi_ulong target_addr,
1621 struct sockaddr *addr,
1622 socklen_t len)
1624 struct target_sockaddr *target_saddr;
1626 if (len == 0) {
1627 return 0;
1629 assert(addr);
1631 target_saddr = lock_user(VERIFY_WRITE, target_addr, len, 0);
1632 if (!target_saddr)
1633 return -TARGET_EFAULT;
1634 memcpy(target_saddr, addr, len);
1635 if (len >= offsetof(struct target_sockaddr, sa_family) +
1636 sizeof(target_saddr->sa_family)) {
1637 target_saddr->sa_family = tswap16(addr->sa_family);
1639 if (addr->sa_family == AF_NETLINK && len >= sizeof(struct sockaddr_nl)) {
1640 struct sockaddr_nl *target_nl = (struct sockaddr_nl *)target_saddr;
1641 target_nl->nl_pid = tswap32(target_nl->nl_pid);
1642 target_nl->nl_groups = tswap32(target_nl->nl_groups);
1643 } else if (addr->sa_family == AF_PACKET) {
1644 struct sockaddr_ll *target_ll = (struct sockaddr_ll *)target_saddr;
1645 target_ll->sll_ifindex = tswap32(target_ll->sll_ifindex);
1646 target_ll->sll_hatype = tswap16(target_ll->sll_hatype);
1647 } else if (addr->sa_family == AF_INET6 &&
1648 len >= sizeof(struct target_sockaddr_in6)) {
1649 struct target_sockaddr_in6 *target_in6 =
1650 (struct target_sockaddr_in6 *)target_saddr;
1651 target_in6->sin6_scope_id = tswap16(target_in6->sin6_scope_id);
1653 unlock_user(target_saddr, target_addr, len);
1655 return 0;
1658 static inline abi_long target_to_host_cmsg(struct msghdr *msgh,
1659 struct target_msghdr *target_msgh)
1661 struct cmsghdr *cmsg = CMSG_FIRSTHDR(msgh);
1662 abi_long msg_controllen;
1663 abi_ulong target_cmsg_addr;
1664 struct target_cmsghdr *target_cmsg, *target_cmsg_start;
1665 socklen_t space = 0;
1667 msg_controllen = tswapal(target_msgh->msg_controllen);
1668 if (msg_controllen < sizeof (struct target_cmsghdr))
1669 goto the_end;
1670 target_cmsg_addr = tswapal(target_msgh->msg_control);
1671 target_cmsg = lock_user(VERIFY_READ, target_cmsg_addr, msg_controllen, 1);
1672 target_cmsg_start = target_cmsg;
1673 if (!target_cmsg)
1674 return -TARGET_EFAULT;
1676 while (cmsg && target_cmsg) {
1677 void *data = CMSG_DATA(cmsg);
1678 void *target_data = TARGET_CMSG_DATA(target_cmsg);
1680 int len = tswapal(target_cmsg->cmsg_len)
1681 - TARGET_CMSG_ALIGN(sizeof (struct target_cmsghdr));
1683 space += CMSG_SPACE(len);
1684 if (space > msgh->msg_controllen) {
1685 space -= CMSG_SPACE(len);
1686 /* This is a QEMU bug, since we allocated the payload
1687 * area ourselves (unlike overflow in host-to-target
1688 * conversion, which is just the guest giving us a buffer
1689 * that's too small). It can't happen for the payload types
1690 * we currently support; if it becomes an issue in future
1691 * we would need to improve our allocation strategy to
1692 * something more intelligent than "twice the size of the
1693 * target buffer we're reading from".
1695 gemu_log("Host cmsg overflow\n");
1696 break;
1699 if (tswap32(target_cmsg->cmsg_level) == TARGET_SOL_SOCKET) {
1700 cmsg->cmsg_level = SOL_SOCKET;
1701 } else {
1702 cmsg->cmsg_level = tswap32(target_cmsg->cmsg_level);
1704 cmsg->cmsg_type = tswap32(target_cmsg->cmsg_type);
1705 cmsg->cmsg_len = CMSG_LEN(len);
1707 if (cmsg->cmsg_level == SOL_SOCKET && cmsg->cmsg_type == SCM_RIGHTS) {
1708 int *fd = (int *)data;
1709 int *target_fd = (int *)target_data;
1710 int i, numfds = len / sizeof(int);
1712 for (i = 0; i < numfds; i++) {
1713 __get_user(fd[i], target_fd + i);
1715 } else if (cmsg->cmsg_level == SOL_SOCKET
1716 && cmsg->cmsg_type == SCM_CREDENTIALS) {
1717 struct ucred *cred = (struct ucred *)data;
1718 struct target_ucred *target_cred =
1719 (struct target_ucred *)target_data;
1721 __get_user(cred->pid, &target_cred->pid);
1722 __get_user(cred->uid, &target_cred->uid);
1723 __get_user(cred->gid, &target_cred->gid);
1724 } else {
1725 gemu_log("Unsupported ancillary data: %d/%d\n",
1726 cmsg->cmsg_level, cmsg->cmsg_type);
1727 memcpy(data, target_data, len);
1730 cmsg = CMSG_NXTHDR(msgh, cmsg);
1731 target_cmsg = TARGET_CMSG_NXTHDR(target_msgh, target_cmsg,
1732 target_cmsg_start);
1734 unlock_user(target_cmsg, target_cmsg_addr, 0);
1735 the_end:
1736 msgh->msg_controllen = space;
1737 return 0;
1740 static inline abi_long host_to_target_cmsg(struct target_msghdr *target_msgh,
1741 struct msghdr *msgh)
1743 struct cmsghdr *cmsg = CMSG_FIRSTHDR(msgh);
1744 abi_long msg_controllen;
1745 abi_ulong target_cmsg_addr;
1746 struct target_cmsghdr *target_cmsg, *target_cmsg_start;
1747 socklen_t space = 0;
1749 msg_controllen = tswapal(target_msgh->msg_controllen);
1750 if (msg_controllen < sizeof (struct target_cmsghdr))
1751 goto the_end;
1752 target_cmsg_addr = tswapal(target_msgh->msg_control);
1753 target_cmsg = lock_user(VERIFY_WRITE, target_cmsg_addr, msg_controllen, 0);
1754 target_cmsg_start = target_cmsg;
1755 if (!target_cmsg)
1756 return -TARGET_EFAULT;
1758 while (cmsg && target_cmsg) {
1759 void *data = CMSG_DATA(cmsg);
1760 void *target_data = TARGET_CMSG_DATA(target_cmsg);
1762 int len = cmsg->cmsg_len - CMSG_ALIGN(sizeof (struct cmsghdr));
1763 int tgt_len, tgt_space;
1765 /* We never copy a half-header but may copy half-data;
1766 * this is Linux's behaviour in put_cmsg(). Note that
1767 * truncation here is a guest problem (which we report
1768 * to the guest via the CTRUNC bit), unlike truncation
1769 * in target_to_host_cmsg, which is a QEMU bug.
1771 if (msg_controllen < sizeof(struct cmsghdr)) {
1772 target_msgh->msg_flags |= tswap32(MSG_CTRUNC);
1773 break;
1776 if (cmsg->cmsg_level == SOL_SOCKET) {
1777 target_cmsg->cmsg_level = tswap32(TARGET_SOL_SOCKET);
1778 } else {
1779 target_cmsg->cmsg_level = tswap32(cmsg->cmsg_level);
1781 target_cmsg->cmsg_type = tswap32(cmsg->cmsg_type);
1783 tgt_len = TARGET_CMSG_LEN(len);
1785 /* Payload types which need a different size of payload on
1786 * the target must adjust tgt_len here.
1788 switch (cmsg->cmsg_level) {
1789 case SOL_SOCKET:
1790 switch (cmsg->cmsg_type) {
1791 case SO_TIMESTAMP:
1792 tgt_len = sizeof(struct target_timeval);
1793 break;
1794 default:
1795 break;
1797 default:
1798 break;
1801 if (msg_controllen < tgt_len) {
1802 target_msgh->msg_flags |= tswap32(MSG_CTRUNC);
1803 tgt_len = msg_controllen;
1806 /* We must now copy-and-convert len bytes of payload
1807 * into tgt_len bytes of destination space. Bear in mind
1808 * that in both source and destination we may be dealing
1809 * with a truncated value!
1811 switch (cmsg->cmsg_level) {
1812 case SOL_SOCKET:
1813 switch (cmsg->cmsg_type) {
1814 case SCM_RIGHTS:
1816 int *fd = (int *)data;
1817 int *target_fd = (int *)target_data;
1818 int i, numfds = tgt_len / sizeof(int);
1820 for (i = 0; i < numfds; i++) {
1821 __put_user(fd[i], target_fd + i);
1823 break;
1825 case SO_TIMESTAMP:
1827 struct timeval *tv = (struct timeval *)data;
1828 struct target_timeval *target_tv =
1829 (struct target_timeval *)target_data;
1831 if (len != sizeof(struct timeval) ||
1832 tgt_len != sizeof(struct target_timeval)) {
1833 goto unimplemented;
1836 /* copy struct timeval to target */
1837 __put_user(tv->tv_sec, &target_tv->tv_sec);
1838 __put_user(tv->tv_usec, &target_tv->tv_usec);
1839 break;
1841 case SCM_CREDENTIALS:
1843 struct ucred *cred = (struct ucred *)data;
1844 struct target_ucred *target_cred =
1845 (struct target_ucred *)target_data;
1847 __put_user(cred->pid, &target_cred->pid);
1848 __put_user(cred->uid, &target_cred->uid);
1849 __put_user(cred->gid, &target_cred->gid);
1850 break;
1852 default:
1853 goto unimplemented;
1855 break;
1857 case SOL_IP:
1858 switch (cmsg->cmsg_type) {
1859 case IP_TTL:
1861 uint32_t *v = (uint32_t *)data;
1862 uint32_t *t_int = (uint32_t *)target_data;
1864 __put_user(*v, t_int);
1865 break;
1867 case IP_RECVERR:
1869 struct errhdr_t {
1870 struct sock_extended_err ee;
1871 struct sockaddr_in offender;
1873 struct errhdr_t *errh = (struct errhdr_t *)data;
1874 struct errhdr_t *target_errh =
1875 (struct errhdr_t *)target_data;
1877 __put_user(errh->ee.ee_errno, &target_errh->ee.ee_errno);
1878 __put_user(errh->ee.ee_origin, &target_errh->ee.ee_origin);
1879 __put_user(errh->ee.ee_type, &target_errh->ee.ee_type);
1880 __put_user(errh->ee.ee_code, &target_errh->ee.ee_code);
1881 __put_user(errh->ee.ee_pad, &target_errh->ee.ee_pad);
1882 __put_user(errh->ee.ee_info, &target_errh->ee.ee_info);
1883 __put_user(errh->ee.ee_data, &target_errh->ee.ee_data);
1884 host_to_target_sockaddr((unsigned long) &target_errh->offender,
1885 (void *) &errh->offender, sizeof(errh->offender));
1886 break;
1888 default:
1889 goto unimplemented;
1891 break;
1893 case SOL_IPV6:
1894 switch (cmsg->cmsg_type) {
1895 case IPV6_HOPLIMIT:
1897 uint32_t *v = (uint32_t *)data;
1898 uint32_t *t_int = (uint32_t *)target_data;
1900 __put_user(*v, t_int);
1901 break;
1903 case IPV6_RECVERR:
1905 struct errhdr6_t {
1906 struct sock_extended_err ee;
1907 struct sockaddr_in6 offender;
1909 struct errhdr6_t *errh = (struct errhdr6_t *)data;
1910 struct errhdr6_t *target_errh =
1911 (struct errhdr6_t *)target_data;
1913 __put_user(errh->ee.ee_errno, &target_errh->ee.ee_errno);
1914 __put_user(errh->ee.ee_origin, &target_errh->ee.ee_origin);
1915 __put_user(errh->ee.ee_type, &target_errh->ee.ee_type);
1916 __put_user(errh->ee.ee_code, &target_errh->ee.ee_code);
1917 __put_user(errh->ee.ee_pad, &target_errh->ee.ee_pad);
1918 __put_user(errh->ee.ee_info, &target_errh->ee.ee_info);
1919 __put_user(errh->ee.ee_data, &target_errh->ee.ee_data);
1920 host_to_target_sockaddr((unsigned long) &target_errh->offender,
1921 (void *) &errh->offender, sizeof(errh->offender));
1922 break;
1924 default:
1925 goto unimplemented;
1927 break;
1929 default:
1930 unimplemented:
1931 gemu_log("Unsupported ancillary data: %d/%d\n",
1932 cmsg->cmsg_level, cmsg->cmsg_type);
1933 memcpy(target_data, data, MIN(len, tgt_len));
1934 if (tgt_len > len) {
1935 memset(target_data + len, 0, tgt_len - len);
1939 target_cmsg->cmsg_len = tswapal(tgt_len);
1940 tgt_space = TARGET_CMSG_SPACE(len);
1941 if (msg_controllen < tgt_space) {
1942 tgt_space = msg_controllen;
1944 msg_controllen -= tgt_space;
1945 space += tgt_space;
1946 cmsg = CMSG_NXTHDR(msgh, cmsg);
1947 target_cmsg = TARGET_CMSG_NXTHDR(target_msgh, target_cmsg,
1948 target_cmsg_start);
1950 unlock_user(target_cmsg, target_cmsg_addr, space);
1951 the_end:
1952 target_msgh->msg_controllen = tswapal(space);
1953 return 0;
1956 static void tswap_nlmsghdr(struct nlmsghdr *nlh)
1958 nlh->nlmsg_len = tswap32(nlh->nlmsg_len);
1959 nlh->nlmsg_type = tswap16(nlh->nlmsg_type);
1960 nlh->nlmsg_flags = tswap16(nlh->nlmsg_flags);
1961 nlh->nlmsg_seq = tswap32(nlh->nlmsg_seq);
1962 nlh->nlmsg_pid = tswap32(nlh->nlmsg_pid);
1965 static abi_long host_to_target_for_each_nlmsg(struct nlmsghdr *nlh,
1966 size_t len,
1967 abi_long (*host_to_target_nlmsg)
1968 (struct nlmsghdr *))
1970 uint32_t nlmsg_len;
1971 abi_long ret;
1973 while (len > sizeof(struct nlmsghdr)) {
1975 nlmsg_len = nlh->nlmsg_len;
1976 if (nlmsg_len < sizeof(struct nlmsghdr) ||
1977 nlmsg_len > len) {
1978 break;
1981 switch (nlh->nlmsg_type) {
1982 case NLMSG_DONE:
1983 tswap_nlmsghdr(nlh);
1984 return 0;
1985 case NLMSG_NOOP:
1986 break;
1987 case NLMSG_ERROR:
1989 struct nlmsgerr *e = NLMSG_DATA(nlh);
1990 e->error = tswap32(e->error);
1991 tswap_nlmsghdr(&e->msg);
1992 tswap_nlmsghdr(nlh);
1993 return 0;
1995 default:
1996 ret = host_to_target_nlmsg(nlh);
1997 if (ret < 0) {
1998 tswap_nlmsghdr(nlh);
1999 return ret;
2001 break;
2003 tswap_nlmsghdr(nlh);
2004 len -= NLMSG_ALIGN(nlmsg_len);
2005 nlh = (struct nlmsghdr *)(((char*)nlh) + NLMSG_ALIGN(nlmsg_len));
2007 return 0;
2010 static abi_long target_to_host_for_each_nlmsg(struct nlmsghdr *nlh,
2011 size_t len,
2012 abi_long (*target_to_host_nlmsg)
2013 (struct nlmsghdr *))
2015 int ret;
2017 while (len > sizeof(struct nlmsghdr)) {
2018 if (tswap32(nlh->nlmsg_len) < sizeof(struct nlmsghdr) ||
2019 tswap32(nlh->nlmsg_len) > len) {
2020 break;
2022 tswap_nlmsghdr(nlh);
2023 switch (nlh->nlmsg_type) {
2024 case NLMSG_DONE:
2025 return 0;
2026 case NLMSG_NOOP:
2027 break;
2028 case NLMSG_ERROR:
2030 struct nlmsgerr *e = NLMSG_DATA(nlh);
2031 e->error = tswap32(e->error);
2032 tswap_nlmsghdr(&e->msg);
2033 return 0;
2035 default:
2036 ret = target_to_host_nlmsg(nlh);
2037 if (ret < 0) {
2038 return ret;
2041 len -= NLMSG_ALIGN(nlh->nlmsg_len);
2042 nlh = (struct nlmsghdr *)(((char *)nlh) + NLMSG_ALIGN(nlh->nlmsg_len));
2044 return 0;
2047 #ifdef CONFIG_RTNETLINK
2048 static abi_long host_to_target_for_each_nlattr(struct nlattr *nlattr,
2049 size_t len, void *context,
2050 abi_long (*host_to_target_nlattr)
2051 (struct nlattr *,
2052 void *context))
2054 unsigned short nla_len;
2055 abi_long ret;
2057 while (len > sizeof(struct nlattr)) {
2058 nla_len = nlattr->nla_len;
2059 if (nla_len < sizeof(struct nlattr) ||
2060 nla_len > len) {
2061 break;
2063 ret = host_to_target_nlattr(nlattr, context);
2064 nlattr->nla_len = tswap16(nlattr->nla_len);
2065 nlattr->nla_type = tswap16(nlattr->nla_type);
2066 if (ret < 0) {
2067 return ret;
2069 len -= NLA_ALIGN(nla_len);
2070 nlattr = (struct nlattr *)(((char *)nlattr) + NLA_ALIGN(nla_len));
2072 return 0;
2075 static abi_long host_to_target_for_each_rtattr(struct rtattr *rtattr,
2076 size_t len,
2077 abi_long (*host_to_target_rtattr)
2078 (struct rtattr *))
2080 unsigned short rta_len;
2081 abi_long ret;
2083 while (len > sizeof(struct rtattr)) {
2084 rta_len = rtattr->rta_len;
2085 if (rta_len < sizeof(struct rtattr) ||
2086 rta_len > len) {
2087 break;
2089 ret = host_to_target_rtattr(rtattr);
2090 rtattr->rta_len = tswap16(rtattr->rta_len);
2091 rtattr->rta_type = tswap16(rtattr->rta_type);
2092 if (ret < 0) {
2093 return ret;
2095 len -= RTA_ALIGN(rta_len);
2096 rtattr = (struct rtattr *)(((char *)rtattr) + RTA_ALIGN(rta_len));
2098 return 0;
2101 #define NLA_DATA(nla) ((void *)((char *)(nla)) + NLA_HDRLEN)
2103 static abi_long host_to_target_data_bridge_nlattr(struct nlattr *nlattr,
2104 void *context)
2106 uint16_t *u16;
2107 uint32_t *u32;
2108 uint64_t *u64;
2110 switch (nlattr->nla_type) {
2111 /* no data */
2112 case QEMU_IFLA_BR_FDB_FLUSH:
2113 break;
2114 /* binary */
2115 case QEMU_IFLA_BR_GROUP_ADDR:
2116 break;
2117 /* uint8_t */
2118 case QEMU_IFLA_BR_VLAN_FILTERING:
2119 case QEMU_IFLA_BR_TOPOLOGY_CHANGE:
2120 case QEMU_IFLA_BR_TOPOLOGY_CHANGE_DETECTED:
2121 case QEMU_IFLA_BR_MCAST_ROUTER:
2122 case QEMU_IFLA_BR_MCAST_SNOOPING:
2123 case QEMU_IFLA_BR_MCAST_QUERY_USE_IFADDR:
2124 case QEMU_IFLA_BR_MCAST_QUERIER:
2125 case QEMU_IFLA_BR_NF_CALL_IPTABLES:
2126 case QEMU_IFLA_BR_NF_CALL_IP6TABLES:
2127 case QEMU_IFLA_BR_NF_CALL_ARPTABLES:
2128 break;
2129 /* uint16_t */
2130 case QEMU_IFLA_BR_PRIORITY:
2131 case QEMU_IFLA_BR_VLAN_PROTOCOL:
2132 case QEMU_IFLA_BR_GROUP_FWD_MASK:
2133 case QEMU_IFLA_BR_ROOT_PORT:
2134 case QEMU_IFLA_BR_VLAN_DEFAULT_PVID:
2135 u16 = NLA_DATA(nlattr);
2136 *u16 = tswap16(*u16);
2137 break;
2138 /* uint32_t */
2139 case QEMU_IFLA_BR_FORWARD_DELAY:
2140 case QEMU_IFLA_BR_HELLO_TIME:
2141 case QEMU_IFLA_BR_MAX_AGE:
2142 case QEMU_IFLA_BR_AGEING_TIME:
2143 case QEMU_IFLA_BR_STP_STATE:
2144 case QEMU_IFLA_BR_ROOT_PATH_COST:
2145 case QEMU_IFLA_BR_MCAST_HASH_ELASTICITY:
2146 case QEMU_IFLA_BR_MCAST_HASH_MAX:
2147 case QEMU_IFLA_BR_MCAST_LAST_MEMBER_CNT:
2148 case QEMU_IFLA_BR_MCAST_STARTUP_QUERY_CNT:
2149 u32 = NLA_DATA(nlattr);
2150 *u32 = tswap32(*u32);
2151 break;
2152 /* uint64_t */
2153 case QEMU_IFLA_BR_HELLO_TIMER:
2154 case QEMU_IFLA_BR_TCN_TIMER:
2155 case QEMU_IFLA_BR_GC_TIMER:
2156 case QEMU_IFLA_BR_TOPOLOGY_CHANGE_TIMER:
2157 case QEMU_IFLA_BR_MCAST_LAST_MEMBER_INTVL:
2158 case QEMU_IFLA_BR_MCAST_MEMBERSHIP_INTVL:
2159 case QEMU_IFLA_BR_MCAST_QUERIER_INTVL:
2160 case QEMU_IFLA_BR_MCAST_QUERY_INTVL:
2161 case QEMU_IFLA_BR_MCAST_QUERY_RESPONSE_INTVL:
2162 case QEMU_IFLA_BR_MCAST_STARTUP_QUERY_INTVL:
2163 u64 = NLA_DATA(nlattr);
2164 *u64 = tswap64(*u64);
2165 break;
2166 /* ifla_bridge_id: uin8_t[] */
2167 case QEMU_IFLA_BR_ROOT_ID:
2168 case QEMU_IFLA_BR_BRIDGE_ID:
2169 break;
2170 default:
2171 gemu_log("Unknown QEMU_IFLA_BR type %d\n", nlattr->nla_type);
2172 break;
2174 return 0;
2177 static abi_long host_to_target_slave_data_bridge_nlattr(struct nlattr *nlattr,
2178 void *context)
2180 uint16_t *u16;
2181 uint32_t *u32;
2182 uint64_t *u64;
2184 switch (nlattr->nla_type) {
2185 /* uint8_t */
2186 case QEMU_IFLA_BRPORT_STATE:
2187 case QEMU_IFLA_BRPORT_MODE:
2188 case QEMU_IFLA_BRPORT_GUARD:
2189 case QEMU_IFLA_BRPORT_PROTECT:
2190 case QEMU_IFLA_BRPORT_FAST_LEAVE:
2191 case QEMU_IFLA_BRPORT_LEARNING:
2192 case QEMU_IFLA_BRPORT_UNICAST_FLOOD:
2193 case QEMU_IFLA_BRPORT_PROXYARP:
2194 case QEMU_IFLA_BRPORT_LEARNING_SYNC:
2195 case QEMU_IFLA_BRPORT_PROXYARP_WIFI:
2196 case QEMU_IFLA_BRPORT_TOPOLOGY_CHANGE_ACK:
2197 case QEMU_IFLA_BRPORT_CONFIG_PENDING:
2198 case QEMU_IFLA_BRPORT_MULTICAST_ROUTER:
2199 break;
2200 /* uint16_t */
2201 case QEMU_IFLA_BRPORT_PRIORITY:
2202 case QEMU_IFLA_BRPORT_DESIGNATED_PORT:
2203 case QEMU_IFLA_BRPORT_DESIGNATED_COST:
2204 case QEMU_IFLA_BRPORT_ID:
2205 case QEMU_IFLA_BRPORT_NO:
2206 u16 = NLA_DATA(nlattr);
2207 *u16 = tswap16(*u16);
2208 break;
2209 /* uin32_t */
2210 case QEMU_IFLA_BRPORT_COST:
2211 u32 = NLA_DATA(nlattr);
2212 *u32 = tswap32(*u32);
2213 break;
2214 /* uint64_t */
2215 case QEMU_IFLA_BRPORT_MESSAGE_AGE_TIMER:
2216 case QEMU_IFLA_BRPORT_FORWARD_DELAY_TIMER:
2217 case QEMU_IFLA_BRPORT_HOLD_TIMER:
2218 u64 = NLA_DATA(nlattr);
2219 *u64 = tswap64(*u64);
2220 break;
2221 /* ifla_bridge_id: uint8_t[] */
2222 case QEMU_IFLA_BRPORT_ROOT_ID:
2223 case QEMU_IFLA_BRPORT_BRIDGE_ID:
2224 break;
2225 default:
2226 gemu_log("Unknown QEMU_IFLA_BRPORT type %d\n", nlattr->nla_type);
2227 break;
2229 return 0;
2232 struct linkinfo_context {
2233 int len;
2234 char *name;
2235 int slave_len;
2236 char *slave_name;
2239 static abi_long host_to_target_data_linkinfo_nlattr(struct nlattr *nlattr,
2240 void *context)
2242 struct linkinfo_context *li_context = context;
2244 switch (nlattr->nla_type) {
2245 /* string */
2246 case QEMU_IFLA_INFO_KIND:
2247 li_context->name = NLA_DATA(nlattr);
2248 li_context->len = nlattr->nla_len - NLA_HDRLEN;
2249 break;
2250 case QEMU_IFLA_INFO_SLAVE_KIND:
2251 li_context->slave_name = NLA_DATA(nlattr);
2252 li_context->slave_len = nlattr->nla_len - NLA_HDRLEN;
2253 break;
2254 /* stats */
2255 case QEMU_IFLA_INFO_XSTATS:
2256 /* FIXME: only used by CAN */
2257 break;
2258 /* nested */
2259 case QEMU_IFLA_INFO_DATA:
2260 if (strncmp(li_context->name, "bridge",
2261 li_context->len) == 0) {
2262 return host_to_target_for_each_nlattr(NLA_DATA(nlattr),
2263 nlattr->nla_len,
2264 NULL,
2265 host_to_target_data_bridge_nlattr);
2266 } else {
2267 gemu_log("Unknown QEMU_IFLA_INFO_KIND %s\n", li_context->name);
2269 break;
2270 case QEMU_IFLA_INFO_SLAVE_DATA:
2271 if (strncmp(li_context->slave_name, "bridge",
2272 li_context->slave_len) == 0) {
2273 return host_to_target_for_each_nlattr(NLA_DATA(nlattr),
2274 nlattr->nla_len,
2275 NULL,
2276 host_to_target_slave_data_bridge_nlattr);
2277 } else {
2278 gemu_log("Unknown QEMU_IFLA_INFO_SLAVE_KIND %s\n",
2279 li_context->slave_name);
2281 break;
2282 default:
2283 gemu_log("Unknown host QEMU_IFLA_INFO type: %d\n", nlattr->nla_type);
2284 break;
2287 return 0;
2290 static abi_long host_to_target_data_inet_nlattr(struct nlattr *nlattr,
2291 void *context)
2293 uint32_t *u32;
2294 int i;
2296 switch (nlattr->nla_type) {
2297 case QEMU_IFLA_INET_CONF:
2298 u32 = NLA_DATA(nlattr);
2299 for (i = 0; i < (nlattr->nla_len - NLA_HDRLEN) / sizeof(*u32);
2300 i++) {
2301 u32[i] = tswap32(u32[i]);
2303 break;
2304 default:
2305 gemu_log("Unknown host AF_INET type: %d\n", nlattr->nla_type);
2307 return 0;
2310 static abi_long host_to_target_data_inet6_nlattr(struct nlattr *nlattr,
2311 void *context)
2313 uint32_t *u32;
2314 uint64_t *u64;
2315 struct ifla_cacheinfo *ci;
2316 int i;
2318 switch (nlattr->nla_type) {
2319 /* binaries */
2320 case QEMU_IFLA_INET6_TOKEN:
2321 break;
2322 /* uint8_t */
2323 case QEMU_IFLA_INET6_ADDR_GEN_MODE:
2324 break;
2325 /* uint32_t */
2326 case QEMU_IFLA_INET6_FLAGS:
2327 u32 = NLA_DATA(nlattr);
2328 *u32 = tswap32(*u32);
2329 break;
2330 /* uint32_t[] */
2331 case QEMU_IFLA_INET6_CONF:
2332 u32 = NLA_DATA(nlattr);
2333 for (i = 0; i < (nlattr->nla_len - NLA_HDRLEN) / sizeof(*u32);
2334 i++) {
2335 u32[i] = tswap32(u32[i]);
2337 break;
2338 /* ifla_cacheinfo */
2339 case QEMU_IFLA_INET6_CACHEINFO:
2340 ci = NLA_DATA(nlattr);
2341 ci->max_reasm_len = tswap32(ci->max_reasm_len);
2342 ci->tstamp = tswap32(ci->tstamp);
2343 ci->reachable_time = tswap32(ci->reachable_time);
2344 ci->retrans_time = tswap32(ci->retrans_time);
2345 break;
2346 /* uint64_t[] */
2347 case QEMU_IFLA_INET6_STATS:
2348 case QEMU_IFLA_INET6_ICMP6STATS:
2349 u64 = NLA_DATA(nlattr);
2350 for (i = 0; i < (nlattr->nla_len - NLA_HDRLEN) / sizeof(*u64);
2351 i++) {
2352 u64[i] = tswap64(u64[i]);
2354 break;
2355 default:
2356 gemu_log("Unknown host AF_INET6 type: %d\n", nlattr->nla_type);
2358 return 0;
2361 static abi_long host_to_target_data_spec_nlattr(struct nlattr *nlattr,
2362 void *context)
2364 switch (nlattr->nla_type) {
2365 case AF_INET:
2366 return host_to_target_for_each_nlattr(NLA_DATA(nlattr), nlattr->nla_len,
2367 NULL,
2368 host_to_target_data_inet_nlattr);
2369 case AF_INET6:
2370 return host_to_target_for_each_nlattr(NLA_DATA(nlattr), nlattr->nla_len,
2371 NULL,
2372 host_to_target_data_inet6_nlattr);
2373 default:
2374 gemu_log("Unknown host AF_SPEC type: %d\n", nlattr->nla_type);
2375 break;
2377 return 0;
2380 static abi_long host_to_target_data_link_rtattr(struct rtattr *rtattr)
2382 uint32_t *u32;
2383 struct rtnl_link_stats *st;
2384 struct rtnl_link_stats64 *st64;
2385 struct rtnl_link_ifmap *map;
2386 struct linkinfo_context li_context;
2388 switch (rtattr->rta_type) {
2389 /* binary stream */
2390 case QEMU_IFLA_ADDRESS:
2391 case QEMU_IFLA_BROADCAST:
2392 /* string */
2393 case QEMU_IFLA_IFNAME:
2394 case QEMU_IFLA_QDISC:
2395 break;
2396 /* uin8_t */
2397 case QEMU_IFLA_OPERSTATE:
2398 case QEMU_IFLA_LINKMODE:
2399 case QEMU_IFLA_CARRIER:
2400 case QEMU_IFLA_PROTO_DOWN:
2401 break;
2402 /* uint32_t */
2403 case QEMU_IFLA_MTU:
2404 case QEMU_IFLA_LINK:
2405 case QEMU_IFLA_WEIGHT:
2406 case QEMU_IFLA_TXQLEN:
2407 case QEMU_IFLA_CARRIER_CHANGES:
2408 case QEMU_IFLA_NUM_RX_QUEUES:
2409 case QEMU_IFLA_NUM_TX_QUEUES:
2410 case QEMU_IFLA_PROMISCUITY:
2411 case QEMU_IFLA_EXT_MASK:
2412 case QEMU_IFLA_LINK_NETNSID:
2413 case QEMU_IFLA_GROUP:
2414 case QEMU_IFLA_MASTER:
2415 case QEMU_IFLA_NUM_VF:
2416 case QEMU_IFLA_GSO_MAX_SEGS:
2417 case QEMU_IFLA_GSO_MAX_SIZE:
2418 u32 = RTA_DATA(rtattr);
2419 *u32 = tswap32(*u32);
2420 break;
2421 /* struct rtnl_link_stats */
2422 case QEMU_IFLA_STATS:
2423 st = RTA_DATA(rtattr);
2424 st->rx_packets = tswap32(st->rx_packets);
2425 st->tx_packets = tswap32(st->tx_packets);
2426 st->rx_bytes = tswap32(st->rx_bytes);
2427 st->tx_bytes = tswap32(st->tx_bytes);
2428 st->rx_errors = tswap32(st->rx_errors);
2429 st->tx_errors = tswap32(st->tx_errors);
2430 st->rx_dropped = tswap32(st->rx_dropped);
2431 st->tx_dropped = tswap32(st->tx_dropped);
2432 st->multicast = tswap32(st->multicast);
2433 st->collisions = tswap32(st->collisions);
2435 /* detailed rx_errors: */
2436 st->rx_length_errors = tswap32(st->rx_length_errors);
2437 st->rx_over_errors = tswap32(st->rx_over_errors);
2438 st->rx_crc_errors = tswap32(st->rx_crc_errors);
2439 st->rx_frame_errors = tswap32(st->rx_frame_errors);
2440 st->rx_fifo_errors = tswap32(st->rx_fifo_errors);
2441 st->rx_missed_errors = tswap32(st->rx_missed_errors);
2443 /* detailed tx_errors */
2444 st->tx_aborted_errors = tswap32(st->tx_aborted_errors);
2445 st->tx_carrier_errors = tswap32(st->tx_carrier_errors);
2446 st->tx_fifo_errors = tswap32(st->tx_fifo_errors);
2447 st->tx_heartbeat_errors = tswap32(st->tx_heartbeat_errors);
2448 st->tx_window_errors = tswap32(st->tx_window_errors);
2450 /* for cslip etc */
2451 st->rx_compressed = tswap32(st->rx_compressed);
2452 st->tx_compressed = tswap32(st->tx_compressed);
2453 break;
2454 /* struct rtnl_link_stats64 */
2455 case QEMU_IFLA_STATS64:
2456 st64 = RTA_DATA(rtattr);
2457 st64->rx_packets = tswap64(st64->rx_packets);
2458 st64->tx_packets = tswap64(st64->tx_packets);
2459 st64->rx_bytes = tswap64(st64->rx_bytes);
2460 st64->tx_bytes = tswap64(st64->tx_bytes);
2461 st64->rx_errors = tswap64(st64->rx_errors);
2462 st64->tx_errors = tswap64(st64->tx_errors);
2463 st64->rx_dropped = tswap64(st64->rx_dropped);
2464 st64->tx_dropped = tswap64(st64->tx_dropped);
2465 st64->multicast = tswap64(st64->multicast);
2466 st64->collisions = tswap64(st64->collisions);
2468 /* detailed rx_errors: */
2469 st64->rx_length_errors = tswap64(st64->rx_length_errors);
2470 st64->rx_over_errors = tswap64(st64->rx_over_errors);
2471 st64->rx_crc_errors = tswap64(st64->rx_crc_errors);
2472 st64->rx_frame_errors = tswap64(st64->rx_frame_errors);
2473 st64->rx_fifo_errors = tswap64(st64->rx_fifo_errors);
2474 st64->rx_missed_errors = tswap64(st64->rx_missed_errors);
2476 /* detailed tx_errors */
2477 st64->tx_aborted_errors = tswap64(st64->tx_aborted_errors);
2478 st64->tx_carrier_errors = tswap64(st64->tx_carrier_errors);
2479 st64->tx_fifo_errors = tswap64(st64->tx_fifo_errors);
2480 st64->tx_heartbeat_errors = tswap64(st64->tx_heartbeat_errors);
2481 st64->tx_window_errors = tswap64(st64->tx_window_errors);
2483 /* for cslip etc */
2484 st64->rx_compressed = tswap64(st64->rx_compressed);
2485 st64->tx_compressed = tswap64(st64->tx_compressed);
2486 break;
2487 /* struct rtnl_link_ifmap */
2488 case QEMU_IFLA_MAP:
2489 map = RTA_DATA(rtattr);
2490 map->mem_start = tswap64(map->mem_start);
2491 map->mem_end = tswap64(map->mem_end);
2492 map->base_addr = tswap64(map->base_addr);
2493 map->irq = tswap16(map->irq);
2494 break;
2495 /* nested */
2496 case QEMU_IFLA_LINKINFO:
2497 memset(&li_context, 0, sizeof(li_context));
2498 return host_to_target_for_each_nlattr(RTA_DATA(rtattr), rtattr->rta_len,
2499 &li_context,
2500 host_to_target_data_linkinfo_nlattr);
2501 case QEMU_IFLA_AF_SPEC:
2502 return host_to_target_for_each_nlattr(RTA_DATA(rtattr), rtattr->rta_len,
2503 NULL,
2504 host_to_target_data_spec_nlattr);
2505 default:
2506 gemu_log("Unknown host QEMU_IFLA type: %d\n", rtattr->rta_type);
2507 break;
2509 return 0;
2512 static abi_long host_to_target_data_addr_rtattr(struct rtattr *rtattr)
2514 uint32_t *u32;
2515 struct ifa_cacheinfo *ci;
2517 switch (rtattr->rta_type) {
2518 /* binary: depends on family type */
2519 case IFA_ADDRESS:
2520 case IFA_LOCAL:
2521 break;
2522 /* string */
2523 case IFA_LABEL:
2524 break;
2525 /* u32 */
2526 case IFA_FLAGS:
2527 case IFA_BROADCAST:
2528 u32 = RTA_DATA(rtattr);
2529 *u32 = tswap32(*u32);
2530 break;
2531 /* struct ifa_cacheinfo */
2532 case IFA_CACHEINFO:
2533 ci = RTA_DATA(rtattr);
2534 ci->ifa_prefered = tswap32(ci->ifa_prefered);
2535 ci->ifa_valid = tswap32(ci->ifa_valid);
2536 ci->cstamp = tswap32(ci->cstamp);
2537 ci->tstamp = tswap32(ci->tstamp);
2538 break;
2539 default:
2540 gemu_log("Unknown host IFA type: %d\n", rtattr->rta_type);
2541 break;
2543 return 0;
2546 static abi_long host_to_target_data_route_rtattr(struct rtattr *rtattr)
2548 uint32_t *u32;
2549 switch (rtattr->rta_type) {
2550 /* binary: depends on family type */
2551 case RTA_GATEWAY:
2552 case RTA_DST:
2553 case RTA_PREFSRC:
2554 break;
2555 /* u32 */
2556 case RTA_PRIORITY:
2557 case RTA_TABLE:
2558 case RTA_OIF:
2559 u32 = RTA_DATA(rtattr);
2560 *u32 = tswap32(*u32);
2561 break;
2562 default:
2563 gemu_log("Unknown host RTA type: %d\n", rtattr->rta_type);
2564 break;
2566 return 0;
2569 static abi_long host_to_target_link_rtattr(struct rtattr *rtattr,
2570 uint32_t rtattr_len)
2572 return host_to_target_for_each_rtattr(rtattr, rtattr_len,
2573 host_to_target_data_link_rtattr);
2576 static abi_long host_to_target_addr_rtattr(struct rtattr *rtattr,
2577 uint32_t rtattr_len)
2579 return host_to_target_for_each_rtattr(rtattr, rtattr_len,
2580 host_to_target_data_addr_rtattr);
2583 static abi_long host_to_target_route_rtattr(struct rtattr *rtattr,
2584 uint32_t rtattr_len)
2586 return host_to_target_for_each_rtattr(rtattr, rtattr_len,
2587 host_to_target_data_route_rtattr);
2590 static abi_long host_to_target_data_route(struct nlmsghdr *nlh)
2592 uint32_t nlmsg_len;
2593 struct ifinfomsg *ifi;
2594 struct ifaddrmsg *ifa;
2595 struct rtmsg *rtm;
2597 nlmsg_len = nlh->nlmsg_len;
2598 switch (nlh->nlmsg_type) {
2599 case RTM_NEWLINK:
2600 case RTM_DELLINK:
2601 case RTM_GETLINK:
2602 if (nlh->nlmsg_len >= NLMSG_LENGTH(sizeof(*ifi))) {
2603 ifi = NLMSG_DATA(nlh);
2604 ifi->ifi_type = tswap16(ifi->ifi_type);
2605 ifi->ifi_index = tswap32(ifi->ifi_index);
2606 ifi->ifi_flags = tswap32(ifi->ifi_flags);
2607 ifi->ifi_change = tswap32(ifi->ifi_change);
2608 host_to_target_link_rtattr(IFLA_RTA(ifi),
2609 nlmsg_len - NLMSG_LENGTH(sizeof(*ifi)));
2611 break;
2612 case RTM_NEWADDR:
2613 case RTM_DELADDR:
2614 case RTM_GETADDR:
2615 if (nlh->nlmsg_len >= NLMSG_LENGTH(sizeof(*ifa))) {
2616 ifa = NLMSG_DATA(nlh);
2617 ifa->ifa_index = tswap32(ifa->ifa_index);
2618 host_to_target_addr_rtattr(IFA_RTA(ifa),
2619 nlmsg_len - NLMSG_LENGTH(sizeof(*ifa)));
2621 break;
2622 case RTM_NEWROUTE:
2623 case RTM_DELROUTE:
2624 case RTM_GETROUTE:
2625 if (nlh->nlmsg_len >= NLMSG_LENGTH(sizeof(*rtm))) {
2626 rtm = NLMSG_DATA(nlh);
2627 rtm->rtm_flags = tswap32(rtm->rtm_flags);
2628 host_to_target_route_rtattr(RTM_RTA(rtm),
2629 nlmsg_len - NLMSG_LENGTH(sizeof(*rtm)));
2631 break;
2632 default:
2633 return -TARGET_EINVAL;
2635 return 0;
2638 static inline abi_long host_to_target_nlmsg_route(struct nlmsghdr *nlh,
2639 size_t len)
2641 return host_to_target_for_each_nlmsg(nlh, len, host_to_target_data_route);
2644 static abi_long target_to_host_for_each_rtattr(struct rtattr *rtattr,
2645 size_t len,
2646 abi_long (*target_to_host_rtattr)
2647 (struct rtattr *))
2649 abi_long ret;
2651 while (len >= sizeof(struct rtattr)) {
2652 if (tswap16(rtattr->rta_len) < sizeof(struct rtattr) ||
2653 tswap16(rtattr->rta_len) > len) {
2654 break;
2656 rtattr->rta_len = tswap16(rtattr->rta_len);
2657 rtattr->rta_type = tswap16(rtattr->rta_type);
2658 ret = target_to_host_rtattr(rtattr);
2659 if (ret < 0) {
2660 return ret;
2662 len -= RTA_ALIGN(rtattr->rta_len);
2663 rtattr = (struct rtattr *)(((char *)rtattr) +
2664 RTA_ALIGN(rtattr->rta_len));
2666 return 0;
2669 static abi_long target_to_host_data_link_rtattr(struct rtattr *rtattr)
2671 switch (rtattr->rta_type) {
2672 default:
2673 gemu_log("Unknown target QEMU_IFLA type: %d\n", rtattr->rta_type);
2674 break;
2676 return 0;
2679 static abi_long target_to_host_data_addr_rtattr(struct rtattr *rtattr)
2681 switch (rtattr->rta_type) {
2682 /* binary: depends on family type */
2683 case IFA_LOCAL:
2684 case IFA_ADDRESS:
2685 break;
2686 default:
2687 gemu_log("Unknown target IFA type: %d\n", rtattr->rta_type);
2688 break;
2690 return 0;
2693 static abi_long target_to_host_data_route_rtattr(struct rtattr *rtattr)
2695 uint32_t *u32;
2696 switch (rtattr->rta_type) {
2697 /* binary: depends on family type */
2698 case RTA_DST:
2699 case RTA_SRC:
2700 case RTA_GATEWAY:
2701 break;
2702 /* u32 */
2703 case RTA_PRIORITY:
2704 case RTA_OIF:
2705 u32 = RTA_DATA(rtattr);
2706 *u32 = tswap32(*u32);
2707 break;
2708 default:
2709 gemu_log("Unknown target RTA type: %d\n", rtattr->rta_type);
2710 break;
2712 return 0;
2715 static void target_to_host_link_rtattr(struct rtattr *rtattr,
2716 uint32_t rtattr_len)
2718 target_to_host_for_each_rtattr(rtattr, rtattr_len,
2719 target_to_host_data_link_rtattr);
2722 static void target_to_host_addr_rtattr(struct rtattr *rtattr,
2723 uint32_t rtattr_len)
2725 target_to_host_for_each_rtattr(rtattr, rtattr_len,
2726 target_to_host_data_addr_rtattr);
2729 static void target_to_host_route_rtattr(struct rtattr *rtattr,
2730 uint32_t rtattr_len)
2732 target_to_host_for_each_rtattr(rtattr, rtattr_len,
2733 target_to_host_data_route_rtattr);
2736 static abi_long target_to_host_data_route(struct nlmsghdr *nlh)
2738 struct ifinfomsg *ifi;
2739 struct ifaddrmsg *ifa;
2740 struct rtmsg *rtm;
2742 switch (nlh->nlmsg_type) {
2743 case RTM_GETLINK:
2744 break;
2745 case RTM_NEWLINK:
2746 case RTM_DELLINK:
2747 if (nlh->nlmsg_len >= NLMSG_LENGTH(sizeof(*ifi))) {
2748 ifi = NLMSG_DATA(nlh);
2749 ifi->ifi_type = tswap16(ifi->ifi_type);
2750 ifi->ifi_index = tswap32(ifi->ifi_index);
2751 ifi->ifi_flags = tswap32(ifi->ifi_flags);
2752 ifi->ifi_change = tswap32(ifi->ifi_change);
2753 target_to_host_link_rtattr(IFLA_RTA(ifi), nlh->nlmsg_len -
2754 NLMSG_LENGTH(sizeof(*ifi)));
2756 break;
2757 case RTM_GETADDR:
2758 case RTM_NEWADDR:
2759 case RTM_DELADDR:
2760 if (nlh->nlmsg_len >= NLMSG_LENGTH(sizeof(*ifa))) {
2761 ifa = NLMSG_DATA(nlh);
2762 ifa->ifa_index = tswap32(ifa->ifa_index);
2763 target_to_host_addr_rtattr(IFA_RTA(ifa), nlh->nlmsg_len -
2764 NLMSG_LENGTH(sizeof(*ifa)));
2766 break;
2767 case RTM_GETROUTE:
2768 break;
2769 case RTM_NEWROUTE:
2770 case RTM_DELROUTE:
2771 if (nlh->nlmsg_len >= NLMSG_LENGTH(sizeof(*rtm))) {
2772 rtm = NLMSG_DATA(nlh);
2773 rtm->rtm_flags = tswap32(rtm->rtm_flags);
2774 target_to_host_route_rtattr(RTM_RTA(rtm), nlh->nlmsg_len -
2775 NLMSG_LENGTH(sizeof(*rtm)));
2777 break;
2778 default:
2779 return -TARGET_EOPNOTSUPP;
2781 return 0;
2784 static abi_long target_to_host_nlmsg_route(struct nlmsghdr *nlh, size_t len)
2786 return target_to_host_for_each_nlmsg(nlh, len, target_to_host_data_route);
2788 #endif /* CONFIG_RTNETLINK */
2790 static abi_long host_to_target_data_audit(struct nlmsghdr *nlh)
2792 switch (nlh->nlmsg_type) {
2793 default:
2794 gemu_log("Unknown host audit message type %d\n",
2795 nlh->nlmsg_type);
2796 return -TARGET_EINVAL;
2798 return 0;
2801 static inline abi_long host_to_target_nlmsg_audit(struct nlmsghdr *nlh,
2802 size_t len)
2804 return host_to_target_for_each_nlmsg(nlh, len, host_to_target_data_audit);
2807 static abi_long target_to_host_data_audit(struct nlmsghdr *nlh)
2809 switch (nlh->nlmsg_type) {
2810 case AUDIT_USER:
2811 case AUDIT_FIRST_USER_MSG ... AUDIT_LAST_USER_MSG:
2812 case AUDIT_FIRST_USER_MSG2 ... AUDIT_LAST_USER_MSG2:
2813 break;
2814 default:
2815 gemu_log("Unknown target audit message type %d\n",
2816 nlh->nlmsg_type);
2817 return -TARGET_EINVAL;
2820 return 0;
2823 static abi_long target_to_host_nlmsg_audit(struct nlmsghdr *nlh, size_t len)
2825 return target_to_host_for_each_nlmsg(nlh, len, target_to_host_data_audit);
2828 /* do_setsockopt() Must return target values and target errnos. */
2829 static abi_long do_setsockopt(int sockfd, int level, int optname,
2830 abi_ulong optval_addr, socklen_t optlen)
2832 abi_long ret;
2833 int val;
2834 struct ip_mreqn *ip_mreq;
2835 struct ip_mreq_source *ip_mreq_source;
2837 switch(level) {
2838 case SOL_TCP:
2839 /* TCP options all take an 'int' value. */
2840 if (optlen < sizeof(uint32_t))
2841 return -TARGET_EINVAL;
2843 if (get_user_u32(val, optval_addr))
2844 return -TARGET_EFAULT;
2845 ret = get_errno(setsockopt(sockfd, level, optname, &val, sizeof(val)));
2846 break;
2847 case SOL_IP:
2848 switch(optname) {
2849 case IP_TOS:
2850 case IP_TTL:
2851 case IP_HDRINCL:
2852 case IP_ROUTER_ALERT:
2853 case IP_RECVOPTS:
2854 case IP_RETOPTS:
2855 case IP_PKTINFO:
2856 case IP_MTU_DISCOVER:
2857 case IP_RECVERR:
2858 case IP_RECVTTL:
2859 case IP_RECVTOS:
2860 #ifdef IP_FREEBIND
2861 case IP_FREEBIND:
2862 #endif
2863 case IP_MULTICAST_TTL:
2864 case IP_MULTICAST_LOOP:
2865 val = 0;
2866 if (optlen >= sizeof(uint32_t)) {
2867 if (get_user_u32(val, optval_addr))
2868 return -TARGET_EFAULT;
2869 } else if (optlen >= 1) {
2870 if (get_user_u8(val, optval_addr))
2871 return -TARGET_EFAULT;
2873 ret = get_errno(setsockopt(sockfd, level, optname, &val, sizeof(val)));
2874 break;
2875 case IP_ADD_MEMBERSHIP:
2876 case IP_DROP_MEMBERSHIP:
2877 if (optlen < sizeof (struct target_ip_mreq) ||
2878 optlen > sizeof (struct target_ip_mreqn))
2879 return -TARGET_EINVAL;
2881 ip_mreq = (struct ip_mreqn *) alloca(optlen);
2882 target_to_host_ip_mreq(ip_mreq, optval_addr, optlen);
2883 ret = get_errno(setsockopt(sockfd, level, optname, ip_mreq, optlen));
2884 break;
2886 case IP_BLOCK_SOURCE:
2887 case IP_UNBLOCK_SOURCE:
2888 case IP_ADD_SOURCE_MEMBERSHIP:
2889 case IP_DROP_SOURCE_MEMBERSHIP:
2890 if (optlen != sizeof (struct target_ip_mreq_source))
2891 return -TARGET_EINVAL;
2893 ip_mreq_source = lock_user(VERIFY_READ, optval_addr, optlen, 1);
2894 ret = get_errno(setsockopt(sockfd, level, optname, ip_mreq_source, optlen));
2895 unlock_user (ip_mreq_source, optval_addr, 0);
2896 break;
2898 default:
2899 goto unimplemented;
2901 break;
2902 case SOL_IPV6:
2903 switch (optname) {
2904 case IPV6_MTU_DISCOVER:
2905 case IPV6_MTU:
2906 case IPV6_V6ONLY:
2907 case IPV6_RECVPKTINFO:
2908 case IPV6_UNICAST_HOPS:
2909 case IPV6_RECVERR:
2910 case IPV6_RECVHOPLIMIT:
2911 case IPV6_2292HOPLIMIT:
2912 case IPV6_CHECKSUM:
2913 val = 0;
2914 if (optlen < sizeof(uint32_t)) {
2915 return -TARGET_EINVAL;
2917 if (get_user_u32(val, optval_addr)) {
2918 return -TARGET_EFAULT;
2920 ret = get_errno(setsockopt(sockfd, level, optname,
2921 &val, sizeof(val)));
2922 break;
2923 case IPV6_PKTINFO:
2925 struct in6_pktinfo pki;
2927 if (optlen < sizeof(pki)) {
2928 return -TARGET_EINVAL;
2931 if (copy_from_user(&pki, optval_addr, sizeof(pki))) {
2932 return -TARGET_EFAULT;
2935 pki.ipi6_ifindex = tswap32(pki.ipi6_ifindex);
2937 ret = get_errno(setsockopt(sockfd, level, optname,
2938 &pki, sizeof(pki)));
2939 break;
2941 default:
2942 goto unimplemented;
2944 break;
2945 case SOL_ICMPV6:
2946 switch (optname) {
2947 case ICMPV6_FILTER:
2949 struct icmp6_filter icmp6f;
2951 if (optlen > sizeof(icmp6f)) {
2952 optlen = sizeof(icmp6f);
2955 if (copy_from_user(&icmp6f, optval_addr, optlen)) {
2956 return -TARGET_EFAULT;
2959 for (val = 0; val < 8; val++) {
2960 icmp6f.data[val] = tswap32(icmp6f.data[val]);
2963 ret = get_errno(setsockopt(sockfd, level, optname,
2964 &icmp6f, optlen));
2965 break;
2967 default:
2968 goto unimplemented;
2970 break;
2971 case SOL_RAW:
2972 switch (optname) {
2973 case ICMP_FILTER:
2974 case IPV6_CHECKSUM:
2975 /* those take an u32 value */
2976 if (optlen < sizeof(uint32_t)) {
2977 return -TARGET_EINVAL;
2980 if (get_user_u32(val, optval_addr)) {
2981 return -TARGET_EFAULT;
2983 ret = get_errno(setsockopt(sockfd, level, optname,
2984 &val, sizeof(val)));
2985 break;
2987 default:
2988 goto unimplemented;
2990 break;
2991 case TARGET_SOL_SOCKET:
2992 switch (optname) {
2993 case TARGET_SO_RCVTIMEO:
2995 struct timeval tv;
2997 optname = SO_RCVTIMEO;
2999 set_timeout:
3000 if (optlen != sizeof(struct target_timeval)) {
3001 return -TARGET_EINVAL;
3004 if (copy_from_user_timeval(&tv, optval_addr)) {
3005 return -TARGET_EFAULT;
3008 ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname,
3009 &tv, sizeof(tv)));
3010 return ret;
3012 case TARGET_SO_SNDTIMEO:
3013 optname = SO_SNDTIMEO;
3014 goto set_timeout;
3015 case TARGET_SO_ATTACH_FILTER:
3017 struct target_sock_fprog *tfprog;
3018 struct target_sock_filter *tfilter;
3019 struct sock_fprog fprog;
3020 struct sock_filter *filter;
3021 int i;
3023 if (optlen != sizeof(*tfprog)) {
3024 return -TARGET_EINVAL;
3026 if (!lock_user_struct(VERIFY_READ, tfprog, optval_addr, 0)) {
3027 return -TARGET_EFAULT;
3029 if (!lock_user_struct(VERIFY_READ, tfilter,
3030 tswapal(tfprog->filter), 0)) {
3031 unlock_user_struct(tfprog, optval_addr, 1);
3032 return -TARGET_EFAULT;
3035 fprog.len = tswap16(tfprog->len);
3036 filter = g_try_new(struct sock_filter, fprog.len);
3037 if (filter == NULL) {
3038 unlock_user_struct(tfilter, tfprog->filter, 1);
3039 unlock_user_struct(tfprog, optval_addr, 1);
3040 return -TARGET_ENOMEM;
3042 for (i = 0; i < fprog.len; i++) {
3043 filter[i].code = tswap16(tfilter[i].code);
3044 filter[i].jt = tfilter[i].jt;
3045 filter[i].jf = tfilter[i].jf;
3046 filter[i].k = tswap32(tfilter[i].k);
3048 fprog.filter = filter;
3050 ret = get_errno(setsockopt(sockfd, SOL_SOCKET,
3051 SO_ATTACH_FILTER, &fprog, sizeof(fprog)));
3052 g_free(filter);
3054 unlock_user_struct(tfilter, tfprog->filter, 1);
3055 unlock_user_struct(tfprog, optval_addr, 1);
3056 return ret;
3058 case TARGET_SO_BINDTODEVICE:
3060 char *dev_ifname, *addr_ifname;
3062 if (optlen > IFNAMSIZ - 1) {
3063 optlen = IFNAMSIZ - 1;
3065 dev_ifname = lock_user(VERIFY_READ, optval_addr, optlen, 1);
3066 if (!dev_ifname) {
3067 return -TARGET_EFAULT;
3069 optname = SO_BINDTODEVICE;
3070 addr_ifname = alloca(IFNAMSIZ);
3071 memcpy(addr_ifname, dev_ifname, optlen);
3072 addr_ifname[optlen] = 0;
3073 ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname,
3074 addr_ifname, optlen));
3075 unlock_user (dev_ifname, optval_addr, 0);
3076 return ret;
3078 /* Options with 'int' argument. */
3079 case TARGET_SO_DEBUG:
3080 optname = SO_DEBUG;
3081 break;
3082 case TARGET_SO_REUSEADDR:
3083 optname = SO_REUSEADDR;
3084 break;
3085 case TARGET_SO_TYPE:
3086 optname = SO_TYPE;
3087 break;
3088 case TARGET_SO_ERROR:
3089 optname = SO_ERROR;
3090 break;
3091 case TARGET_SO_DONTROUTE:
3092 optname = SO_DONTROUTE;
3093 break;
3094 case TARGET_SO_BROADCAST:
3095 optname = SO_BROADCAST;
3096 break;
3097 case TARGET_SO_SNDBUF:
3098 optname = SO_SNDBUF;
3099 break;
3100 case TARGET_SO_SNDBUFFORCE:
3101 optname = SO_SNDBUFFORCE;
3102 break;
3103 case TARGET_SO_RCVBUF:
3104 optname = SO_RCVBUF;
3105 break;
3106 case TARGET_SO_RCVBUFFORCE:
3107 optname = SO_RCVBUFFORCE;
3108 break;
3109 case TARGET_SO_KEEPALIVE:
3110 optname = SO_KEEPALIVE;
3111 break;
3112 case TARGET_SO_OOBINLINE:
3113 optname = SO_OOBINLINE;
3114 break;
3115 case TARGET_SO_NO_CHECK:
3116 optname = SO_NO_CHECK;
3117 break;
3118 case TARGET_SO_PRIORITY:
3119 optname = SO_PRIORITY;
3120 break;
3121 #ifdef SO_BSDCOMPAT
3122 case TARGET_SO_BSDCOMPAT:
3123 optname = SO_BSDCOMPAT;
3124 break;
3125 #endif
3126 case TARGET_SO_PASSCRED:
3127 optname = SO_PASSCRED;
3128 break;
3129 case TARGET_SO_PASSSEC:
3130 optname = SO_PASSSEC;
3131 break;
3132 case TARGET_SO_TIMESTAMP:
3133 optname = SO_TIMESTAMP;
3134 break;
3135 case TARGET_SO_RCVLOWAT:
3136 optname = SO_RCVLOWAT;
3137 break;
3138 default:
3139 goto unimplemented;
3141 if (optlen < sizeof(uint32_t))
3142 return -TARGET_EINVAL;
3144 if (get_user_u32(val, optval_addr))
3145 return -TARGET_EFAULT;
3146 ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname, &val, sizeof(val)));
3147 break;
3148 default:
3149 unimplemented:
3150 gemu_log("Unsupported setsockopt level=%d optname=%d\n", level, optname);
3151 ret = -TARGET_ENOPROTOOPT;
3153 return ret;
3156 /* do_getsockopt() Must return target values and target errnos. */
3157 static abi_long do_getsockopt(int sockfd, int level, int optname,
3158 abi_ulong optval_addr, abi_ulong optlen)
3160 abi_long ret;
3161 int len, val;
3162 socklen_t lv;
3164 switch(level) {
3165 case TARGET_SOL_SOCKET:
3166 level = SOL_SOCKET;
3167 switch (optname) {
3168 /* These don't just return a single integer */
3169 case TARGET_SO_LINGER:
3170 case TARGET_SO_RCVTIMEO:
3171 case TARGET_SO_SNDTIMEO:
3172 case TARGET_SO_PEERNAME:
3173 goto unimplemented;
3174 case TARGET_SO_PEERCRED: {
3175 struct ucred cr;
3176 socklen_t crlen;
3177 struct target_ucred *tcr;
3179 if (get_user_u32(len, optlen)) {
3180 return -TARGET_EFAULT;
3182 if (len < 0) {
3183 return -TARGET_EINVAL;
3186 crlen = sizeof(cr);
3187 ret = get_errno(getsockopt(sockfd, level, SO_PEERCRED,
3188 &cr, &crlen));
3189 if (ret < 0) {
3190 return ret;
3192 if (len > crlen) {
3193 len = crlen;
3195 if (!lock_user_struct(VERIFY_WRITE, tcr, optval_addr, 0)) {
3196 return -TARGET_EFAULT;
3198 __put_user(cr.pid, &tcr->pid);
3199 __put_user(cr.uid, &tcr->uid);
3200 __put_user(cr.gid, &tcr->gid);
3201 unlock_user_struct(tcr, optval_addr, 1);
3202 if (put_user_u32(len, optlen)) {
3203 return -TARGET_EFAULT;
3205 break;
3207 /* Options with 'int' argument. */
3208 case TARGET_SO_DEBUG:
3209 optname = SO_DEBUG;
3210 goto int_case;
3211 case TARGET_SO_REUSEADDR:
3212 optname = SO_REUSEADDR;
3213 goto int_case;
3214 case TARGET_SO_TYPE:
3215 optname = SO_TYPE;
3216 goto int_case;
3217 case TARGET_SO_ERROR:
3218 optname = SO_ERROR;
3219 goto int_case;
3220 case TARGET_SO_DONTROUTE:
3221 optname = SO_DONTROUTE;
3222 goto int_case;
3223 case TARGET_SO_BROADCAST:
3224 optname = SO_BROADCAST;
3225 goto int_case;
3226 case TARGET_SO_SNDBUF:
3227 optname = SO_SNDBUF;
3228 goto int_case;
3229 case TARGET_SO_RCVBUF:
3230 optname = SO_RCVBUF;
3231 goto int_case;
3232 case TARGET_SO_KEEPALIVE:
3233 optname = SO_KEEPALIVE;
3234 goto int_case;
3235 case TARGET_SO_OOBINLINE:
3236 optname = SO_OOBINLINE;
3237 goto int_case;
3238 case TARGET_SO_NO_CHECK:
3239 optname = SO_NO_CHECK;
3240 goto int_case;
3241 case TARGET_SO_PRIORITY:
3242 optname = SO_PRIORITY;
3243 goto int_case;
3244 #ifdef SO_BSDCOMPAT
3245 case TARGET_SO_BSDCOMPAT:
3246 optname = SO_BSDCOMPAT;
3247 goto int_case;
3248 #endif
3249 case TARGET_SO_PASSCRED:
3250 optname = SO_PASSCRED;
3251 goto int_case;
3252 case TARGET_SO_TIMESTAMP:
3253 optname = SO_TIMESTAMP;
3254 goto int_case;
3255 case TARGET_SO_RCVLOWAT:
3256 optname = SO_RCVLOWAT;
3257 goto int_case;
3258 case TARGET_SO_ACCEPTCONN:
3259 optname = SO_ACCEPTCONN;
3260 goto int_case;
3261 default:
3262 goto int_case;
3264 break;
3265 case SOL_TCP:
3266 /* TCP options all take an 'int' value. */
3267 int_case:
3268 if (get_user_u32(len, optlen))
3269 return -TARGET_EFAULT;
3270 if (len < 0)
3271 return -TARGET_EINVAL;
3272 lv = sizeof(lv);
3273 ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
3274 if (ret < 0)
3275 return ret;
3276 if (optname == SO_TYPE) {
3277 val = host_to_target_sock_type(val);
3279 if (len > lv)
3280 len = lv;
3281 if (len == 4) {
3282 if (put_user_u32(val, optval_addr))
3283 return -TARGET_EFAULT;
3284 } else {
3285 if (put_user_u8(val, optval_addr))
3286 return -TARGET_EFAULT;
3288 if (put_user_u32(len, optlen))
3289 return -TARGET_EFAULT;
3290 break;
3291 case SOL_IP:
3292 switch(optname) {
3293 case IP_TOS:
3294 case IP_TTL:
3295 case IP_HDRINCL:
3296 case IP_ROUTER_ALERT:
3297 case IP_RECVOPTS:
3298 case IP_RETOPTS:
3299 case IP_PKTINFO:
3300 case IP_MTU_DISCOVER:
3301 case IP_RECVERR:
3302 case IP_RECVTOS:
3303 #ifdef IP_FREEBIND
3304 case IP_FREEBIND:
3305 #endif
3306 case IP_MULTICAST_TTL:
3307 case IP_MULTICAST_LOOP:
3308 if (get_user_u32(len, optlen))
3309 return -TARGET_EFAULT;
3310 if (len < 0)
3311 return -TARGET_EINVAL;
3312 lv = sizeof(lv);
3313 ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
3314 if (ret < 0)
3315 return ret;
3316 if (len < sizeof(int) && len > 0 && val >= 0 && val < 255) {
3317 len = 1;
3318 if (put_user_u32(len, optlen)
3319 || put_user_u8(val, optval_addr))
3320 return -TARGET_EFAULT;
3321 } else {
3322 if (len > sizeof(int))
3323 len = sizeof(int);
3324 if (put_user_u32(len, optlen)
3325 || put_user_u32(val, optval_addr))
3326 return -TARGET_EFAULT;
3328 break;
3329 default:
3330 ret = -TARGET_ENOPROTOOPT;
3331 break;
3333 break;
3334 default:
3335 unimplemented:
3336 gemu_log("getsockopt level=%d optname=%d not yet supported\n",
3337 level, optname);
3338 ret = -TARGET_EOPNOTSUPP;
3339 break;
3341 return ret;
3344 static struct iovec *lock_iovec(int type, abi_ulong target_addr,
3345 abi_ulong count, int copy)
3347 struct target_iovec *target_vec;
3348 struct iovec *vec;
3349 abi_ulong total_len, max_len;
3350 int i;
3351 int err = 0;
3352 bool bad_address = false;
3354 if (count == 0) {
3355 errno = 0;
3356 return NULL;
3358 if (count > IOV_MAX) {
3359 errno = EINVAL;
3360 return NULL;
3363 vec = g_try_new0(struct iovec, count);
3364 if (vec == NULL) {
3365 errno = ENOMEM;
3366 return NULL;
3369 target_vec = lock_user(VERIFY_READ, target_addr,
3370 count * sizeof(struct target_iovec), 1);
3371 if (target_vec == NULL) {
3372 err = EFAULT;
3373 goto fail2;
3376 /* ??? If host page size > target page size, this will result in a
3377 value larger than what we can actually support. */
3378 max_len = 0x7fffffff & TARGET_PAGE_MASK;
3379 total_len = 0;
3381 for (i = 0; i < count; i++) {
3382 abi_ulong base = tswapal(target_vec[i].iov_base);
3383 abi_long len = tswapal(target_vec[i].iov_len);
3385 if (len < 0) {
3386 err = EINVAL;
3387 goto fail;
3388 } else if (len == 0) {
3389 /* Zero length pointer is ignored. */
3390 vec[i].iov_base = 0;
3391 } else {
3392 vec[i].iov_base = lock_user(type, base, len, copy);
3393 /* If the first buffer pointer is bad, this is a fault. But
3394 * subsequent bad buffers will result in a partial write; this
3395 * is realized by filling the vector with null pointers and
3396 * zero lengths. */
3397 if (!vec[i].iov_base) {
3398 if (i == 0) {
3399 err = EFAULT;
3400 goto fail;
3401 } else {
3402 bad_address = true;
3405 if (bad_address) {
3406 len = 0;
3408 if (len > max_len - total_len) {
3409 len = max_len - total_len;
3412 vec[i].iov_len = len;
3413 total_len += len;
3416 unlock_user(target_vec, target_addr, 0);
3417 return vec;
3419 fail:
3420 while (--i >= 0) {
3421 if (tswapal(target_vec[i].iov_len) > 0) {
3422 unlock_user(vec[i].iov_base, tswapal(target_vec[i].iov_base), 0);
3425 unlock_user(target_vec, target_addr, 0);
3426 fail2:
3427 g_free(vec);
3428 errno = err;
3429 return NULL;
3432 static void unlock_iovec(struct iovec *vec, abi_ulong target_addr,
3433 abi_ulong count, int copy)
3435 struct target_iovec *target_vec;
3436 int i;
3438 target_vec = lock_user(VERIFY_READ, target_addr,
3439 count * sizeof(struct target_iovec), 1);
3440 if (target_vec) {
3441 for (i = 0; i < count; i++) {
3442 abi_ulong base = tswapal(target_vec[i].iov_base);
3443 abi_long len = tswapal(target_vec[i].iov_len);
3444 if (len < 0) {
3445 break;
3447 unlock_user(vec[i].iov_base, base, copy ? vec[i].iov_len : 0);
3449 unlock_user(target_vec, target_addr, 0);
3452 g_free(vec);
3455 static inline int target_to_host_sock_type(int *type)
3457 int host_type = 0;
3458 int target_type = *type;
3460 switch (target_type & TARGET_SOCK_TYPE_MASK) {
3461 case TARGET_SOCK_DGRAM:
3462 host_type = SOCK_DGRAM;
3463 break;
3464 case TARGET_SOCK_STREAM:
3465 host_type = SOCK_STREAM;
3466 break;
3467 default:
3468 host_type = target_type & TARGET_SOCK_TYPE_MASK;
3469 break;
3471 if (target_type & TARGET_SOCK_CLOEXEC) {
3472 #if defined(SOCK_CLOEXEC)
3473 host_type |= SOCK_CLOEXEC;
3474 #else
3475 return -TARGET_EINVAL;
3476 #endif
3478 if (target_type & TARGET_SOCK_NONBLOCK) {
3479 #if defined(SOCK_NONBLOCK)
3480 host_type |= SOCK_NONBLOCK;
3481 #elif !defined(O_NONBLOCK)
3482 return -TARGET_EINVAL;
3483 #endif
3485 *type = host_type;
3486 return 0;
3489 /* Try to emulate socket type flags after socket creation. */
3490 static int sock_flags_fixup(int fd, int target_type)
3492 #if !defined(SOCK_NONBLOCK) && defined(O_NONBLOCK)
3493 if (target_type & TARGET_SOCK_NONBLOCK) {
3494 int flags = fcntl(fd, F_GETFL);
3495 if (fcntl(fd, F_SETFL, O_NONBLOCK | flags) == -1) {
3496 close(fd);
3497 return -TARGET_EINVAL;
3500 #endif
3501 return fd;
3504 static abi_long packet_target_to_host_sockaddr(void *host_addr,
3505 abi_ulong target_addr,
3506 socklen_t len)
3508 struct sockaddr *addr = host_addr;
3509 struct target_sockaddr *target_saddr;
3511 target_saddr = lock_user(VERIFY_READ, target_addr, len, 1);
3512 if (!target_saddr) {
3513 return -TARGET_EFAULT;
3516 memcpy(addr, target_saddr, len);
3517 addr->sa_family = tswap16(target_saddr->sa_family);
3518 /* spkt_protocol is big-endian */
3520 unlock_user(target_saddr, target_addr, 0);
3521 return 0;
3524 static TargetFdTrans target_packet_trans = {
3525 .target_to_host_addr = packet_target_to_host_sockaddr,
3528 #ifdef CONFIG_RTNETLINK
3529 static abi_long netlink_route_target_to_host(void *buf, size_t len)
3531 abi_long ret;
3533 ret = target_to_host_nlmsg_route(buf, len);
3534 if (ret < 0) {
3535 return ret;
3538 return len;
3541 static abi_long netlink_route_host_to_target(void *buf, size_t len)
3543 abi_long ret;
3545 ret = host_to_target_nlmsg_route(buf, len);
3546 if (ret < 0) {
3547 return ret;
3550 return len;
3553 static TargetFdTrans target_netlink_route_trans = {
3554 .target_to_host_data = netlink_route_target_to_host,
3555 .host_to_target_data = netlink_route_host_to_target,
3557 #endif /* CONFIG_RTNETLINK */
3559 static abi_long netlink_audit_target_to_host(void *buf, size_t len)
3561 abi_long ret;
3563 ret = target_to_host_nlmsg_audit(buf, len);
3564 if (ret < 0) {
3565 return ret;
3568 return len;
3571 static abi_long netlink_audit_host_to_target(void *buf, size_t len)
3573 abi_long ret;
3575 ret = host_to_target_nlmsg_audit(buf, len);
3576 if (ret < 0) {
3577 return ret;
3580 return len;
3583 static TargetFdTrans target_netlink_audit_trans = {
3584 .target_to_host_data = netlink_audit_target_to_host,
3585 .host_to_target_data = netlink_audit_host_to_target,
3588 /* do_socket() Must return target values and target errnos. */
3589 static abi_long do_socket(int domain, int type, int protocol)
3591 int target_type = type;
3592 int ret;
3594 ret = target_to_host_sock_type(&type);
3595 if (ret) {
3596 return ret;
3599 if (domain == PF_NETLINK && !(
3600 #ifdef CONFIG_RTNETLINK
3601 protocol == NETLINK_ROUTE ||
3602 #endif
3603 protocol == NETLINK_KOBJECT_UEVENT ||
3604 protocol == NETLINK_AUDIT)) {
3605 return -EPFNOSUPPORT;
3608 if (domain == AF_PACKET ||
3609 (domain == AF_INET && type == SOCK_PACKET)) {
3610 protocol = tswap16(protocol);
3613 ret = get_errno(socket(domain, type, protocol));
3614 if (ret >= 0) {
3615 ret = sock_flags_fixup(ret, target_type);
3616 if (type == SOCK_PACKET) {
3617 /* Manage an obsolete case :
3618 * if socket type is SOCK_PACKET, bind by name
3620 fd_trans_register(ret, &target_packet_trans);
3621 } else if (domain == PF_NETLINK) {
3622 switch (protocol) {
3623 #ifdef CONFIG_RTNETLINK
3624 case NETLINK_ROUTE:
3625 fd_trans_register(ret, &target_netlink_route_trans);
3626 break;
3627 #endif
3628 case NETLINK_KOBJECT_UEVENT:
3629 /* nothing to do: messages are strings */
3630 break;
3631 case NETLINK_AUDIT:
3632 fd_trans_register(ret, &target_netlink_audit_trans);
3633 break;
3634 default:
3635 g_assert_not_reached();
3639 return ret;
3642 /* do_bind() Must return target values and target errnos. */
3643 static abi_long do_bind(int sockfd, abi_ulong target_addr,
3644 socklen_t addrlen)
3646 void *addr;
3647 abi_long ret;
3649 if ((int)addrlen < 0) {
3650 return -TARGET_EINVAL;
3653 addr = alloca(addrlen+1);
3655 ret = target_to_host_sockaddr(sockfd, addr, target_addr, addrlen);
3656 if (ret)
3657 return ret;
3659 return get_errno(bind(sockfd, addr, addrlen));
3662 /* do_connect() Must return target values and target errnos. */
3663 static abi_long do_connect(int sockfd, abi_ulong target_addr,
3664 socklen_t addrlen)
3666 void *addr;
3667 abi_long ret;
3669 if ((int)addrlen < 0) {
3670 return -TARGET_EINVAL;
3673 addr = alloca(addrlen+1);
3675 ret = target_to_host_sockaddr(sockfd, addr, target_addr, addrlen);
3676 if (ret)
3677 return ret;
3679 return get_errno(safe_connect(sockfd, addr, addrlen));
3682 /* do_sendrecvmsg_locked() Must return target values and target errnos. */
3683 static abi_long do_sendrecvmsg_locked(int fd, struct target_msghdr *msgp,
3684 int flags, int send)
3686 abi_long ret, len;
3687 struct msghdr msg;
3688 abi_ulong count;
3689 struct iovec *vec;
3690 abi_ulong target_vec;
3692 if (msgp->msg_name) {
3693 msg.msg_namelen = tswap32(msgp->msg_namelen);
3694 msg.msg_name = alloca(msg.msg_namelen+1);
3695 ret = target_to_host_sockaddr(fd, msg.msg_name,
3696 tswapal(msgp->msg_name),
3697 msg.msg_namelen);
3698 if (ret == -TARGET_EFAULT) {
3699 /* For connected sockets msg_name and msg_namelen must
3700 * be ignored, so returning EFAULT immediately is wrong.
3701 * Instead, pass a bad msg_name to the host kernel, and
3702 * let it decide whether to return EFAULT or not.
3704 msg.msg_name = (void *)-1;
3705 } else if (ret) {
3706 goto out2;
3708 } else {
3709 msg.msg_name = NULL;
3710 msg.msg_namelen = 0;
3712 msg.msg_controllen = 2 * tswapal(msgp->msg_controllen);
3713 msg.msg_control = alloca(msg.msg_controllen);
3714 msg.msg_flags = tswap32(msgp->msg_flags);
3716 count = tswapal(msgp->msg_iovlen);
3717 target_vec = tswapal(msgp->msg_iov);
3719 if (count > IOV_MAX) {
3720 /* sendrcvmsg returns a different errno for this condition than
3721 * readv/writev, so we must catch it here before lock_iovec() does.
3723 ret = -TARGET_EMSGSIZE;
3724 goto out2;
3727 vec = lock_iovec(send ? VERIFY_READ : VERIFY_WRITE,
3728 target_vec, count, send);
3729 if (vec == NULL) {
3730 ret = -host_to_target_errno(errno);
3731 goto out2;
3733 msg.msg_iovlen = count;
3734 msg.msg_iov = vec;
3736 if (send) {
3737 if (fd_trans_target_to_host_data(fd)) {
3738 void *host_msg;
3740 host_msg = g_malloc(msg.msg_iov->iov_len);
3741 memcpy(host_msg, msg.msg_iov->iov_base, msg.msg_iov->iov_len);
3742 ret = fd_trans_target_to_host_data(fd)(host_msg,
3743 msg.msg_iov->iov_len);
3744 if (ret >= 0) {
3745 msg.msg_iov->iov_base = host_msg;
3746 ret = get_errno(safe_sendmsg(fd, &msg, flags));
3748 g_free(host_msg);
3749 } else {
3750 ret = target_to_host_cmsg(&msg, msgp);
3751 if (ret == 0) {
3752 ret = get_errno(safe_sendmsg(fd, &msg, flags));
3755 } else {
3756 ret = get_errno(safe_recvmsg(fd, &msg, flags));
3757 if (!is_error(ret)) {
3758 len = ret;
3759 if (fd_trans_host_to_target_data(fd)) {
3760 ret = fd_trans_host_to_target_data(fd)(msg.msg_iov->iov_base,
3761 len);
3762 } else {
3763 ret = host_to_target_cmsg(msgp, &msg);
3765 if (!is_error(ret)) {
3766 msgp->msg_namelen = tswap32(msg.msg_namelen);
3767 if (msg.msg_name != NULL && msg.msg_name != (void *)-1) {
3768 ret = host_to_target_sockaddr(tswapal(msgp->msg_name),
3769 msg.msg_name, msg.msg_namelen);
3770 if (ret) {
3771 goto out;
3775 ret = len;
3780 out:
3781 unlock_iovec(vec, target_vec, count, !send);
3782 out2:
3783 return ret;
3786 static abi_long do_sendrecvmsg(int fd, abi_ulong target_msg,
3787 int flags, int send)
3789 abi_long ret;
3790 struct target_msghdr *msgp;
3792 if (!lock_user_struct(send ? VERIFY_READ : VERIFY_WRITE,
3793 msgp,
3794 target_msg,
3795 send ? 1 : 0)) {
3796 return -TARGET_EFAULT;
3798 ret = do_sendrecvmsg_locked(fd, msgp, flags, send);
3799 unlock_user_struct(msgp, target_msg, send ? 0 : 1);
3800 return ret;
3803 /* We don't rely on the C library to have sendmmsg/recvmmsg support,
3804 * so it might not have this *mmsg-specific flag either.
3806 #ifndef MSG_WAITFORONE
3807 #define MSG_WAITFORONE 0x10000
3808 #endif
3810 static abi_long do_sendrecvmmsg(int fd, abi_ulong target_msgvec,
3811 unsigned int vlen, unsigned int flags,
3812 int send)
3814 struct target_mmsghdr *mmsgp;
3815 abi_long ret = 0;
3816 int i;
3818 if (vlen > UIO_MAXIOV) {
3819 vlen = UIO_MAXIOV;
3822 mmsgp = lock_user(VERIFY_WRITE, target_msgvec, sizeof(*mmsgp) * vlen, 1);
3823 if (!mmsgp) {
3824 return -TARGET_EFAULT;
3827 for (i = 0; i < vlen; i++) {
3828 ret = do_sendrecvmsg_locked(fd, &mmsgp[i].msg_hdr, flags, send);
3829 if (is_error(ret)) {
3830 break;
3832 mmsgp[i].msg_len = tswap32(ret);
3833 /* MSG_WAITFORONE turns on MSG_DONTWAIT after one packet */
3834 if (flags & MSG_WAITFORONE) {
3835 flags |= MSG_DONTWAIT;
3839 unlock_user(mmsgp, target_msgvec, sizeof(*mmsgp) * i);
3841 /* Return number of datagrams sent if we sent any at all;
3842 * otherwise return the error.
3844 if (i) {
3845 return i;
3847 return ret;
3850 /* do_accept4() Must return target values and target errnos. */
3851 static abi_long do_accept4(int fd, abi_ulong target_addr,
3852 abi_ulong target_addrlen_addr, int flags)
3854 socklen_t addrlen;
3855 void *addr;
3856 abi_long ret;
3857 int host_flags;
3859 host_flags = target_to_host_bitmask(flags, fcntl_flags_tbl);
3861 if (target_addr == 0) {
3862 return get_errno(safe_accept4(fd, NULL, NULL, host_flags));
3865 /* linux returns EINVAL if addrlen pointer is invalid */
3866 if (get_user_u32(addrlen, target_addrlen_addr))
3867 return -TARGET_EINVAL;
3869 if ((int)addrlen < 0) {
3870 return -TARGET_EINVAL;
3873 if (!access_ok(VERIFY_WRITE, target_addr, addrlen))
3874 return -TARGET_EINVAL;
3876 addr = alloca(addrlen);
3878 ret = get_errno(safe_accept4(fd, addr, &addrlen, host_flags));
3879 if (!is_error(ret)) {
3880 host_to_target_sockaddr(target_addr, addr, addrlen);
3881 if (put_user_u32(addrlen, target_addrlen_addr))
3882 ret = -TARGET_EFAULT;
3884 return ret;
3887 /* do_getpeername() Must return target values and target errnos. */
3888 static abi_long do_getpeername(int fd, abi_ulong target_addr,
3889 abi_ulong target_addrlen_addr)
3891 socklen_t addrlen;
3892 void *addr;
3893 abi_long ret;
3895 if (get_user_u32(addrlen, target_addrlen_addr))
3896 return -TARGET_EFAULT;
3898 if ((int)addrlen < 0) {
3899 return -TARGET_EINVAL;
3902 if (!access_ok(VERIFY_WRITE, target_addr, addrlen))
3903 return -TARGET_EFAULT;
3905 addr = alloca(addrlen);
3907 ret = get_errno(getpeername(fd, addr, &addrlen));
3908 if (!is_error(ret)) {
3909 host_to_target_sockaddr(target_addr, addr, addrlen);
3910 if (put_user_u32(addrlen, target_addrlen_addr))
3911 ret = -TARGET_EFAULT;
3913 return ret;
3916 /* do_getsockname() Must return target values and target errnos. */
3917 static abi_long do_getsockname(int fd, abi_ulong target_addr,
3918 abi_ulong target_addrlen_addr)
3920 socklen_t addrlen;
3921 void *addr;
3922 abi_long ret;
3924 if (get_user_u32(addrlen, target_addrlen_addr))
3925 return -TARGET_EFAULT;
3927 if ((int)addrlen < 0) {
3928 return -TARGET_EINVAL;
3931 if (!access_ok(VERIFY_WRITE, target_addr, addrlen))
3932 return -TARGET_EFAULT;
3934 addr = alloca(addrlen);
3936 ret = get_errno(getsockname(fd, addr, &addrlen));
3937 if (!is_error(ret)) {
3938 host_to_target_sockaddr(target_addr, addr, addrlen);
3939 if (put_user_u32(addrlen, target_addrlen_addr))
3940 ret = -TARGET_EFAULT;
3942 return ret;
3945 /* do_socketpair() Must return target values and target errnos. */
3946 static abi_long do_socketpair(int domain, int type, int protocol,
3947 abi_ulong target_tab_addr)
3949 int tab[2];
3950 abi_long ret;
3952 target_to_host_sock_type(&type);
3954 ret = get_errno(socketpair(domain, type, protocol, tab));
3955 if (!is_error(ret)) {
3956 if (put_user_s32(tab[0], target_tab_addr)
3957 || put_user_s32(tab[1], target_tab_addr + sizeof(tab[0])))
3958 ret = -TARGET_EFAULT;
3960 return ret;
3963 /* do_sendto() Must return target values and target errnos. */
3964 static abi_long do_sendto(int fd, abi_ulong msg, size_t len, int flags,
3965 abi_ulong target_addr, socklen_t addrlen)
3967 void *addr;
3968 void *host_msg;
3969 void *copy_msg = NULL;
3970 abi_long ret;
3972 if ((int)addrlen < 0) {
3973 return -TARGET_EINVAL;
3976 host_msg = lock_user(VERIFY_READ, msg, len, 1);
3977 if (!host_msg)
3978 return -TARGET_EFAULT;
3979 if (fd_trans_target_to_host_data(fd)) {
3980 copy_msg = host_msg;
3981 host_msg = g_malloc(len);
3982 memcpy(host_msg, copy_msg, len);
3983 ret = fd_trans_target_to_host_data(fd)(host_msg, len);
3984 if (ret < 0) {
3985 goto fail;
3988 if (target_addr) {
3989 addr = alloca(addrlen+1);
3990 ret = target_to_host_sockaddr(fd, addr, target_addr, addrlen);
3991 if (ret) {
3992 goto fail;
3994 ret = get_errno(safe_sendto(fd, host_msg, len, flags, addr, addrlen));
3995 } else {
3996 ret = get_errno(safe_sendto(fd, host_msg, len, flags, NULL, 0));
3998 fail:
3999 if (copy_msg) {
4000 g_free(host_msg);
4001 host_msg = copy_msg;
4003 unlock_user(host_msg, msg, 0);
4004 return ret;
4007 /* do_recvfrom() Must return target values and target errnos. */
4008 static abi_long do_recvfrom(int fd, abi_ulong msg, size_t len, int flags,
4009 abi_ulong target_addr,
4010 abi_ulong target_addrlen)
4012 socklen_t addrlen;
4013 void *addr;
4014 void *host_msg;
4015 abi_long ret;
4017 host_msg = lock_user(VERIFY_WRITE, msg, len, 0);
4018 if (!host_msg)
4019 return -TARGET_EFAULT;
4020 if (target_addr) {
4021 if (get_user_u32(addrlen, target_addrlen)) {
4022 ret = -TARGET_EFAULT;
4023 goto fail;
4025 if ((int)addrlen < 0) {
4026 ret = -TARGET_EINVAL;
4027 goto fail;
4029 addr = alloca(addrlen);
4030 ret = get_errno(safe_recvfrom(fd, host_msg, len, flags,
4031 addr, &addrlen));
4032 } else {
4033 addr = NULL; /* To keep compiler quiet. */
4034 ret = get_errno(safe_recvfrom(fd, host_msg, len, flags, NULL, 0));
4036 if (!is_error(ret)) {
4037 if (fd_trans_host_to_target_data(fd)) {
4038 ret = fd_trans_host_to_target_data(fd)(host_msg, ret);
4040 if (target_addr) {
4041 host_to_target_sockaddr(target_addr, addr, addrlen);
4042 if (put_user_u32(addrlen, target_addrlen)) {
4043 ret = -TARGET_EFAULT;
4044 goto fail;
4047 unlock_user(host_msg, msg, len);
4048 } else {
4049 fail:
4050 unlock_user(host_msg, msg, 0);
4052 return ret;
4055 #ifdef TARGET_NR_socketcall
4056 /* do_socketcall() must return target values and target errnos. */
4057 static abi_long do_socketcall(int num, abi_ulong vptr)
4059 static const unsigned nargs[] = { /* number of arguments per operation */
4060 [TARGET_SYS_SOCKET] = 3, /* domain, type, protocol */
4061 [TARGET_SYS_BIND] = 3, /* fd, addr, addrlen */
4062 [TARGET_SYS_CONNECT] = 3, /* fd, addr, addrlen */
4063 [TARGET_SYS_LISTEN] = 2, /* fd, backlog */
4064 [TARGET_SYS_ACCEPT] = 3, /* fd, addr, addrlen */
4065 [TARGET_SYS_GETSOCKNAME] = 3, /* fd, addr, addrlen */
4066 [TARGET_SYS_GETPEERNAME] = 3, /* fd, addr, addrlen */
4067 [TARGET_SYS_SOCKETPAIR] = 4, /* domain, type, protocol, tab */
4068 [TARGET_SYS_SEND] = 4, /* fd, msg, len, flags */
4069 [TARGET_SYS_RECV] = 4, /* fd, msg, len, flags */
4070 [TARGET_SYS_SENDTO] = 6, /* fd, msg, len, flags, addr, addrlen */
4071 [TARGET_SYS_RECVFROM] = 6, /* fd, msg, len, flags, addr, addrlen */
4072 [TARGET_SYS_SHUTDOWN] = 2, /* fd, how */
4073 [TARGET_SYS_SETSOCKOPT] = 5, /* fd, level, optname, optval, optlen */
4074 [TARGET_SYS_GETSOCKOPT] = 5, /* fd, level, optname, optval, optlen */
4075 [TARGET_SYS_SENDMSG] = 3, /* fd, msg, flags */
4076 [TARGET_SYS_RECVMSG] = 3, /* fd, msg, flags */
4077 [TARGET_SYS_ACCEPT4] = 4, /* fd, addr, addrlen, flags */
4078 [TARGET_SYS_RECVMMSG] = 4, /* fd, msgvec, vlen, flags */
4079 [TARGET_SYS_SENDMMSG] = 4, /* fd, msgvec, vlen, flags */
4081 abi_long a[6]; /* max 6 args */
4082 unsigned i;
4084 /* check the range of the first argument num */
4085 /* (TARGET_SYS_SENDMMSG is the highest among TARGET_SYS_xxx) */
4086 if (num < 1 || num > TARGET_SYS_SENDMMSG) {
4087 return -TARGET_EINVAL;
4089 /* ensure we have space for args */
4090 if (nargs[num] > ARRAY_SIZE(a)) {
4091 return -TARGET_EINVAL;
4093 /* collect the arguments in a[] according to nargs[] */
4094 for (i = 0; i < nargs[num]; ++i) {
4095 if (get_user_ual(a[i], vptr + i * sizeof(abi_long)) != 0) {
4096 return -TARGET_EFAULT;
4099 /* now when we have the args, invoke the appropriate underlying function */
4100 switch (num) {
4101 case TARGET_SYS_SOCKET: /* domain, type, protocol */
4102 return do_socket(a[0], a[1], a[2]);
4103 case TARGET_SYS_BIND: /* sockfd, addr, addrlen */
4104 return do_bind(a[0], a[1], a[2]);
4105 case TARGET_SYS_CONNECT: /* sockfd, addr, addrlen */
4106 return do_connect(a[0], a[1], a[2]);
4107 case TARGET_SYS_LISTEN: /* sockfd, backlog */
4108 return get_errno(listen(a[0], a[1]));
4109 case TARGET_SYS_ACCEPT: /* sockfd, addr, addrlen */
4110 return do_accept4(a[0], a[1], a[2], 0);
4111 case TARGET_SYS_GETSOCKNAME: /* sockfd, addr, addrlen */
4112 return do_getsockname(a[0], a[1], a[2]);
4113 case TARGET_SYS_GETPEERNAME: /* sockfd, addr, addrlen */
4114 return do_getpeername(a[0], a[1], a[2]);
4115 case TARGET_SYS_SOCKETPAIR: /* domain, type, protocol, tab */
4116 return do_socketpair(a[0], a[1], a[2], a[3]);
4117 case TARGET_SYS_SEND: /* sockfd, msg, len, flags */
4118 return do_sendto(a[0], a[1], a[2], a[3], 0, 0);
4119 case TARGET_SYS_RECV: /* sockfd, msg, len, flags */
4120 return do_recvfrom(a[0], a[1], a[2], a[3], 0, 0);
4121 case TARGET_SYS_SENDTO: /* sockfd, msg, len, flags, addr, addrlen */
4122 return do_sendto(a[0], a[1], a[2], a[3], a[4], a[5]);
4123 case TARGET_SYS_RECVFROM: /* sockfd, msg, len, flags, addr, addrlen */
4124 return do_recvfrom(a[0], a[1], a[2], a[3], a[4], a[5]);
4125 case TARGET_SYS_SHUTDOWN: /* sockfd, how */
4126 return get_errno(shutdown(a[0], a[1]));
4127 case TARGET_SYS_SETSOCKOPT: /* sockfd, level, optname, optval, optlen */
4128 return do_setsockopt(a[0], a[1], a[2], a[3], a[4]);
4129 case TARGET_SYS_GETSOCKOPT: /* sockfd, level, optname, optval, optlen */
4130 return do_getsockopt(a[0], a[1], a[2], a[3], a[4]);
4131 case TARGET_SYS_SENDMSG: /* sockfd, msg, flags */
4132 return do_sendrecvmsg(a[0], a[1], a[2], 1);
4133 case TARGET_SYS_RECVMSG: /* sockfd, msg, flags */
4134 return do_sendrecvmsg(a[0], a[1], a[2], 0);
4135 case TARGET_SYS_ACCEPT4: /* sockfd, addr, addrlen, flags */
4136 return do_accept4(a[0], a[1], a[2], a[3]);
4137 case TARGET_SYS_RECVMMSG: /* sockfd, msgvec, vlen, flags */
4138 return do_sendrecvmmsg(a[0], a[1], a[2], a[3], 0);
4139 case TARGET_SYS_SENDMMSG: /* sockfd, msgvec, vlen, flags */
4140 return do_sendrecvmmsg(a[0], a[1], a[2], a[3], 1);
4141 default:
4142 gemu_log("Unsupported socketcall: %d\n", num);
4143 return -TARGET_EINVAL;
4146 #endif
4148 #define N_SHM_REGIONS 32
4150 static struct shm_region {
4151 abi_ulong start;
4152 abi_ulong size;
4153 bool in_use;
4154 } shm_regions[N_SHM_REGIONS];
4156 #ifndef TARGET_SEMID64_DS
4157 /* asm-generic version of this struct */
4158 struct target_semid64_ds
4160 struct target_ipc_perm sem_perm;
4161 abi_ulong sem_otime;
4162 #if TARGET_ABI_BITS == 32
4163 abi_ulong __unused1;
4164 #endif
4165 abi_ulong sem_ctime;
4166 #if TARGET_ABI_BITS == 32
4167 abi_ulong __unused2;
4168 #endif
4169 abi_ulong sem_nsems;
4170 abi_ulong __unused3;
4171 abi_ulong __unused4;
4173 #endif
4175 static inline abi_long target_to_host_ipc_perm(struct ipc_perm *host_ip,
4176 abi_ulong target_addr)
4178 struct target_ipc_perm *target_ip;
4179 struct target_semid64_ds *target_sd;
4181 if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
4182 return -TARGET_EFAULT;
4183 target_ip = &(target_sd->sem_perm);
4184 host_ip->__key = tswap32(target_ip->__key);
4185 host_ip->uid = tswap32(target_ip->uid);
4186 host_ip->gid = tswap32(target_ip->gid);
4187 host_ip->cuid = tswap32(target_ip->cuid);
4188 host_ip->cgid = tswap32(target_ip->cgid);
4189 #if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_PPC)
4190 host_ip->mode = tswap32(target_ip->mode);
4191 #else
4192 host_ip->mode = tswap16(target_ip->mode);
4193 #endif
4194 #if defined(TARGET_PPC)
4195 host_ip->__seq = tswap32(target_ip->__seq);
4196 #else
4197 host_ip->__seq = tswap16(target_ip->__seq);
4198 #endif
4199 unlock_user_struct(target_sd, target_addr, 0);
4200 return 0;
4203 static inline abi_long host_to_target_ipc_perm(abi_ulong target_addr,
4204 struct ipc_perm *host_ip)
4206 struct target_ipc_perm *target_ip;
4207 struct target_semid64_ds *target_sd;
4209 if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
4210 return -TARGET_EFAULT;
4211 target_ip = &(target_sd->sem_perm);
4212 target_ip->__key = tswap32(host_ip->__key);
4213 target_ip->uid = tswap32(host_ip->uid);
4214 target_ip->gid = tswap32(host_ip->gid);
4215 target_ip->cuid = tswap32(host_ip->cuid);
4216 target_ip->cgid = tswap32(host_ip->cgid);
4217 #if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_PPC)
4218 target_ip->mode = tswap32(host_ip->mode);
4219 #else
4220 target_ip->mode = tswap16(host_ip->mode);
4221 #endif
4222 #if defined(TARGET_PPC)
4223 target_ip->__seq = tswap32(host_ip->__seq);
4224 #else
4225 target_ip->__seq = tswap16(host_ip->__seq);
4226 #endif
4227 unlock_user_struct(target_sd, target_addr, 1);
4228 return 0;
4231 static inline abi_long target_to_host_semid_ds(struct semid_ds *host_sd,
4232 abi_ulong target_addr)
4234 struct target_semid64_ds *target_sd;
4236 if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
4237 return -TARGET_EFAULT;
4238 if (target_to_host_ipc_perm(&(host_sd->sem_perm),target_addr))
4239 return -TARGET_EFAULT;
4240 host_sd->sem_nsems = tswapal(target_sd->sem_nsems);
4241 host_sd->sem_otime = tswapal(target_sd->sem_otime);
4242 host_sd->sem_ctime = tswapal(target_sd->sem_ctime);
4243 unlock_user_struct(target_sd, target_addr, 0);
4244 return 0;
4247 static inline abi_long host_to_target_semid_ds(abi_ulong target_addr,
4248 struct semid_ds *host_sd)
4250 struct target_semid64_ds *target_sd;
4252 if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
4253 return -TARGET_EFAULT;
4254 if (host_to_target_ipc_perm(target_addr,&(host_sd->sem_perm)))
4255 return -TARGET_EFAULT;
4256 target_sd->sem_nsems = tswapal(host_sd->sem_nsems);
4257 target_sd->sem_otime = tswapal(host_sd->sem_otime);
4258 target_sd->sem_ctime = tswapal(host_sd->sem_ctime);
4259 unlock_user_struct(target_sd, target_addr, 1);
4260 return 0;
4263 struct target_seminfo {
4264 int semmap;
4265 int semmni;
4266 int semmns;
4267 int semmnu;
4268 int semmsl;
4269 int semopm;
4270 int semume;
4271 int semusz;
4272 int semvmx;
4273 int semaem;
4276 static inline abi_long host_to_target_seminfo(abi_ulong target_addr,
4277 struct seminfo *host_seminfo)
4279 struct target_seminfo *target_seminfo;
4280 if (!lock_user_struct(VERIFY_WRITE, target_seminfo, target_addr, 0))
4281 return -TARGET_EFAULT;
4282 __put_user(host_seminfo->semmap, &target_seminfo->semmap);
4283 __put_user(host_seminfo->semmni, &target_seminfo->semmni);
4284 __put_user(host_seminfo->semmns, &target_seminfo->semmns);
4285 __put_user(host_seminfo->semmnu, &target_seminfo->semmnu);
4286 __put_user(host_seminfo->semmsl, &target_seminfo->semmsl);
4287 __put_user(host_seminfo->semopm, &target_seminfo->semopm);
4288 __put_user(host_seminfo->semume, &target_seminfo->semume);
4289 __put_user(host_seminfo->semusz, &target_seminfo->semusz);
4290 __put_user(host_seminfo->semvmx, &target_seminfo->semvmx);
4291 __put_user(host_seminfo->semaem, &target_seminfo->semaem);
4292 unlock_user_struct(target_seminfo, target_addr, 1);
4293 return 0;
4296 union semun {
4297 int val;
4298 struct semid_ds *buf;
4299 unsigned short *array;
4300 struct seminfo *__buf;
4303 union target_semun {
4304 int val;
4305 abi_ulong buf;
4306 abi_ulong array;
4307 abi_ulong __buf;
4310 static inline abi_long target_to_host_semarray(int semid, unsigned short **host_array,
4311 abi_ulong target_addr)
4313 int nsems;
4314 unsigned short *array;
4315 union semun semun;
4316 struct semid_ds semid_ds;
4317 int i, ret;
4319 semun.buf = &semid_ds;
4321 ret = semctl(semid, 0, IPC_STAT, semun);
4322 if (ret == -1)
4323 return get_errno(ret);
4325 nsems = semid_ds.sem_nsems;
4327 *host_array = g_try_new(unsigned short, nsems);
4328 if (!*host_array) {
4329 return -TARGET_ENOMEM;
4331 array = lock_user(VERIFY_READ, target_addr,
4332 nsems*sizeof(unsigned short), 1);
4333 if (!array) {
4334 g_free(*host_array);
4335 return -TARGET_EFAULT;
4338 for(i=0; i<nsems; i++) {
4339 __get_user((*host_array)[i], &array[i]);
4341 unlock_user(array, target_addr, 0);
4343 return 0;
4346 static inline abi_long host_to_target_semarray(int semid, abi_ulong target_addr,
4347 unsigned short **host_array)
4349 int nsems;
4350 unsigned short *array;
4351 union semun semun;
4352 struct semid_ds semid_ds;
4353 int i, ret;
4355 semun.buf = &semid_ds;
4357 ret = semctl(semid, 0, IPC_STAT, semun);
4358 if (ret == -1)
4359 return get_errno(ret);
4361 nsems = semid_ds.sem_nsems;
4363 array = lock_user(VERIFY_WRITE, target_addr,
4364 nsems*sizeof(unsigned short), 0);
4365 if (!array)
4366 return -TARGET_EFAULT;
4368 for(i=0; i<nsems; i++) {
4369 __put_user((*host_array)[i], &array[i]);
4371 g_free(*host_array);
4372 unlock_user(array, target_addr, 1);
4374 return 0;
4377 static inline abi_long do_semctl(int semid, int semnum, int cmd,
4378 abi_ulong target_arg)
4380 union target_semun target_su = { .buf = target_arg };
4381 union semun arg;
4382 struct semid_ds dsarg;
4383 unsigned short *array = NULL;
4384 struct seminfo seminfo;
4385 abi_long ret = -TARGET_EINVAL;
4386 abi_long err;
4387 cmd &= 0xff;
4389 switch( cmd ) {
4390 case GETVAL:
4391 case SETVAL:
4392 /* In 64 bit cross-endian situations, we will erroneously pick up
4393 * the wrong half of the union for the "val" element. To rectify
4394 * this, the entire 8-byte structure is byteswapped, followed by
4395 * a swap of the 4 byte val field. In other cases, the data is
4396 * already in proper host byte order. */
4397 if (sizeof(target_su.val) != (sizeof(target_su.buf))) {
4398 target_su.buf = tswapal(target_su.buf);
4399 arg.val = tswap32(target_su.val);
4400 } else {
4401 arg.val = target_su.val;
4403 ret = get_errno(semctl(semid, semnum, cmd, arg));
4404 break;
4405 case GETALL:
4406 case SETALL:
4407 err = target_to_host_semarray(semid, &array, target_su.array);
4408 if (err)
4409 return err;
4410 arg.array = array;
4411 ret = get_errno(semctl(semid, semnum, cmd, arg));
4412 err = host_to_target_semarray(semid, target_su.array, &array);
4413 if (err)
4414 return err;
4415 break;
4416 case IPC_STAT:
4417 case IPC_SET:
4418 case SEM_STAT:
4419 err = target_to_host_semid_ds(&dsarg, target_su.buf);
4420 if (err)
4421 return err;
4422 arg.buf = &dsarg;
4423 ret = get_errno(semctl(semid, semnum, cmd, arg));
4424 err = host_to_target_semid_ds(target_su.buf, &dsarg);
4425 if (err)
4426 return err;
4427 break;
4428 case IPC_INFO:
4429 case SEM_INFO:
4430 arg.__buf = &seminfo;
4431 ret = get_errno(semctl(semid, semnum, cmd, arg));
4432 err = host_to_target_seminfo(target_su.__buf, &seminfo);
4433 if (err)
4434 return err;
4435 break;
4436 case IPC_RMID:
4437 case GETPID:
4438 case GETNCNT:
4439 case GETZCNT:
4440 ret = get_errno(semctl(semid, semnum, cmd, NULL));
4441 break;
4444 return ret;
4447 struct target_sembuf {
4448 unsigned short sem_num;
4449 short sem_op;
4450 short sem_flg;
4453 static inline abi_long target_to_host_sembuf(struct sembuf *host_sembuf,
4454 abi_ulong target_addr,
4455 unsigned nsops)
4457 struct target_sembuf *target_sembuf;
4458 int i;
4460 target_sembuf = lock_user(VERIFY_READ, target_addr,
4461 nsops*sizeof(struct target_sembuf), 1);
4462 if (!target_sembuf)
4463 return -TARGET_EFAULT;
4465 for(i=0; i<nsops; i++) {
4466 __get_user(host_sembuf[i].sem_num, &target_sembuf[i].sem_num);
4467 __get_user(host_sembuf[i].sem_op, &target_sembuf[i].sem_op);
4468 __get_user(host_sembuf[i].sem_flg, &target_sembuf[i].sem_flg);
4471 unlock_user(target_sembuf, target_addr, 0);
4473 return 0;
4476 static inline abi_long do_semop(int semid, abi_long ptr, unsigned nsops)
4478 struct sembuf sops[nsops];
4480 if (target_to_host_sembuf(sops, ptr, nsops))
4481 return -TARGET_EFAULT;
4483 return get_errno(safe_semtimedop(semid, sops, nsops, NULL));
4486 struct target_msqid_ds
4488 struct target_ipc_perm msg_perm;
4489 abi_ulong msg_stime;
4490 #if TARGET_ABI_BITS == 32
4491 abi_ulong __unused1;
4492 #endif
4493 abi_ulong msg_rtime;
4494 #if TARGET_ABI_BITS == 32
4495 abi_ulong __unused2;
4496 #endif
4497 abi_ulong msg_ctime;
4498 #if TARGET_ABI_BITS == 32
4499 abi_ulong __unused3;
4500 #endif
4501 abi_ulong __msg_cbytes;
4502 abi_ulong msg_qnum;
4503 abi_ulong msg_qbytes;
4504 abi_ulong msg_lspid;
4505 abi_ulong msg_lrpid;
4506 abi_ulong __unused4;
4507 abi_ulong __unused5;
4510 static inline abi_long target_to_host_msqid_ds(struct msqid_ds *host_md,
4511 abi_ulong target_addr)
4513 struct target_msqid_ds *target_md;
4515 if (!lock_user_struct(VERIFY_READ, target_md, target_addr, 1))
4516 return -TARGET_EFAULT;
4517 if (target_to_host_ipc_perm(&(host_md->msg_perm),target_addr))
4518 return -TARGET_EFAULT;
4519 host_md->msg_stime = tswapal(target_md->msg_stime);
4520 host_md->msg_rtime = tswapal(target_md->msg_rtime);
4521 host_md->msg_ctime = tswapal(target_md->msg_ctime);
4522 host_md->__msg_cbytes = tswapal(target_md->__msg_cbytes);
4523 host_md->msg_qnum = tswapal(target_md->msg_qnum);
4524 host_md->msg_qbytes = tswapal(target_md->msg_qbytes);
4525 host_md->msg_lspid = tswapal(target_md->msg_lspid);
4526 host_md->msg_lrpid = tswapal(target_md->msg_lrpid);
4527 unlock_user_struct(target_md, target_addr, 0);
4528 return 0;
4531 static inline abi_long host_to_target_msqid_ds(abi_ulong target_addr,
4532 struct msqid_ds *host_md)
4534 struct target_msqid_ds *target_md;
4536 if (!lock_user_struct(VERIFY_WRITE, target_md, target_addr, 0))
4537 return -TARGET_EFAULT;
4538 if (host_to_target_ipc_perm(target_addr,&(host_md->msg_perm)))
4539 return -TARGET_EFAULT;
4540 target_md->msg_stime = tswapal(host_md->msg_stime);
4541 target_md->msg_rtime = tswapal(host_md->msg_rtime);
4542 target_md->msg_ctime = tswapal(host_md->msg_ctime);
4543 target_md->__msg_cbytes = tswapal(host_md->__msg_cbytes);
4544 target_md->msg_qnum = tswapal(host_md->msg_qnum);
4545 target_md->msg_qbytes = tswapal(host_md->msg_qbytes);
4546 target_md->msg_lspid = tswapal(host_md->msg_lspid);
4547 target_md->msg_lrpid = tswapal(host_md->msg_lrpid);
4548 unlock_user_struct(target_md, target_addr, 1);
4549 return 0;
4552 struct target_msginfo {
4553 int msgpool;
4554 int msgmap;
4555 int msgmax;
4556 int msgmnb;
4557 int msgmni;
4558 int msgssz;
4559 int msgtql;
4560 unsigned short int msgseg;
4563 static inline abi_long host_to_target_msginfo(abi_ulong target_addr,
4564 struct msginfo *host_msginfo)
4566 struct target_msginfo *target_msginfo;
4567 if (!lock_user_struct(VERIFY_WRITE, target_msginfo, target_addr, 0))
4568 return -TARGET_EFAULT;
4569 __put_user(host_msginfo->msgpool, &target_msginfo->msgpool);
4570 __put_user(host_msginfo->msgmap, &target_msginfo->msgmap);
4571 __put_user(host_msginfo->msgmax, &target_msginfo->msgmax);
4572 __put_user(host_msginfo->msgmnb, &target_msginfo->msgmnb);
4573 __put_user(host_msginfo->msgmni, &target_msginfo->msgmni);
4574 __put_user(host_msginfo->msgssz, &target_msginfo->msgssz);
4575 __put_user(host_msginfo->msgtql, &target_msginfo->msgtql);
4576 __put_user(host_msginfo->msgseg, &target_msginfo->msgseg);
4577 unlock_user_struct(target_msginfo, target_addr, 1);
4578 return 0;
4581 static inline abi_long do_msgctl(int msgid, int cmd, abi_long ptr)
4583 struct msqid_ds dsarg;
4584 struct msginfo msginfo;
4585 abi_long ret = -TARGET_EINVAL;
4587 cmd &= 0xff;
4589 switch (cmd) {
4590 case IPC_STAT:
4591 case IPC_SET:
4592 case MSG_STAT:
4593 if (target_to_host_msqid_ds(&dsarg,ptr))
4594 return -TARGET_EFAULT;
4595 ret = get_errno(msgctl(msgid, cmd, &dsarg));
4596 if (host_to_target_msqid_ds(ptr,&dsarg))
4597 return -TARGET_EFAULT;
4598 break;
4599 case IPC_RMID:
4600 ret = get_errno(msgctl(msgid, cmd, NULL));
4601 break;
4602 case IPC_INFO:
4603 case MSG_INFO:
4604 ret = get_errno(msgctl(msgid, cmd, (struct msqid_ds *)&msginfo));
4605 if (host_to_target_msginfo(ptr, &msginfo))
4606 return -TARGET_EFAULT;
4607 break;
4610 return ret;
4613 struct target_msgbuf {
4614 abi_long mtype;
4615 char mtext[1];
4618 static inline abi_long do_msgsnd(int msqid, abi_long msgp,
4619 ssize_t msgsz, int msgflg)
4621 struct target_msgbuf *target_mb;
4622 struct msgbuf *host_mb;
4623 abi_long ret = 0;
4625 if (msgsz < 0) {
4626 return -TARGET_EINVAL;
4629 if (!lock_user_struct(VERIFY_READ, target_mb, msgp, 0))
4630 return -TARGET_EFAULT;
4631 host_mb = g_try_malloc(msgsz + sizeof(long));
4632 if (!host_mb) {
4633 unlock_user_struct(target_mb, msgp, 0);
4634 return -TARGET_ENOMEM;
4636 host_mb->mtype = (abi_long) tswapal(target_mb->mtype);
4637 memcpy(host_mb->mtext, target_mb->mtext, msgsz);
4638 ret = get_errno(safe_msgsnd(msqid, host_mb, msgsz, msgflg));
4639 g_free(host_mb);
4640 unlock_user_struct(target_mb, msgp, 0);
4642 return ret;
4645 static inline abi_long do_msgrcv(int msqid, abi_long msgp,
4646 ssize_t msgsz, abi_long msgtyp,
4647 int msgflg)
4649 struct target_msgbuf *target_mb;
4650 char *target_mtext;
4651 struct msgbuf *host_mb;
4652 abi_long ret = 0;
4654 if (msgsz < 0) {
4655 return -TARGET_EINVAL;
4658 if (!lock_user_struct(VERIFY_WRITE, target_mb, msgp, 0))
4659 return -TARGET_EFAULT;
4661 host_mb = g_try_malloc(msgsz + sizeof(long));
4662 if (!host_mb) {
4663 ret = -TARGET_ENOMEM;
4664 goto end;
4666 ret = get_errno(safe_msgrcv(msqid, host_mb, msgsz, msgtyp, msgflg));
4668 if (ret > 0) {
4669 abi_ulong target_mtext_addr = msgp + sizeof(abi_ulong);
4670 target_mtext = lock_user(VERIFY_WRITE, target_mtext_addr, ret, 0);
4671 if (!target_mtext) {
4672 ret = -TARGET_EFAULT;
4673 goto end;
4675 memcpy(target_mb->mtext, host_mb->mtext, ret);
4676 unlock_user(target_mtext, target_mtext_addr, ret);
4679 target_mb->mtype = tswapal(host_mb->mtype);
4681 end:
4682 if (target_mb)
4683 unlock_user_struct(target_mb, msgp, 1);
4684 g_free(host_mb);
4685 return ret;
4688 static inline abi_long target_to_host_shmid_ds(struct shmid_ds *host_sd,
4689 abi_ulong target_addr)
4691 struct target_shmid_ds *target_sd;
4693 if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
4694 return -TARGET_EFAULT;
4695 if (target_to_host_ipc_perm(&(host_sd->shm_perm), target_addr))
4696 return -TARGET_EFAULT;
4697 __get_user(host_sd->shm_segsz, &target_sd->shm_segsz);
4698 __get_user(host_sd->shm_atime, &target_sd->shm_atime);
4699 __get_user(host_sd->shm_dtime, &target_sd->shm_dtime);
4700 __get_user(host_sd->shm_ctime, &target_sd->shm_ctime);
4701 __get_user(host_sd->shm_cpid, &target_sd->shm_cpid);
4702 __get_user(host_sd->shm_lpid, &target_sd->shm_lpid);
4703 __get_user(host_sd->shm_nattch, &target_sd->shm_nattch);
4704 unlock_user_struct(target_sd, target_addr, 0);
4705 return 0;
4708 static inline abi_long host_to_target_shmid_ds(abi_ulong target_addr,
4709 struct shmid_ds *host_sd)
4711 struct target_shmid_ds *target_sd;
4713 if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
4714 return -TARGET_EFAULT;
4715 if (host_to_target_ipc_perm(target_addr, &(host_sd->shm_perm)))
4716 return -TARGET_EFAULT;
4717 __put_user(host_sd->shm_segsz, &target_sd->shm_segsz);
4718 __put_user(host_sd->shm_atime, &target_sd->shm_atime);
4719 __put_user(host_sd->shm_dtime, &target_sd->shm_dtime);
4720 __put_user(host_sd->shm_ctime, &target_sd->shm_ctime);
4721 __put_user(host_sd->shm_cpid, &target_sd->shm_cpid);
4722 __put_user(host_sd->shm_lpid, &target_sd->shm_lpid);
4723 __put_user(host_sd->shm_nattch, &target_sd->shm_nattch);
4724 unlock_user_struct(target_sd, target_addr, 1);
4725 return 0;
4728 struct target_shminfo {
4729 abi_ulong shmmax;
4730 abi_ulong shmmin;
4731 abi_ulong shmmni;
4732 abi_ulong shmseg;
4733 abi_ulong shmall;
4736 static inline abi_long host_to_target_shminfo(abi_ulong target_addr,
4737 struct shminfo *host_shminfo)
4739 struct target_shminfo *target_shminfo;
4740 if (!lock_user_struct(VERIFY_WRITE, target_shminfo, target_addr, 0))
4741 return -TARGET_EFAULT;
4742 __put_user(host_shminfo->shmmax, &target_shminfo->shmmax);
4743 __put_user(host_shminfo->shmmin, &target_shminfo->shmmin);
4744 __put_user(host_shminfo->shmmni, &target_shminfo->shmmni);
4745 __put_user(host_shminfo->shmseg, &target_shminfo->shmseg);
4746 __put_user(host_shminfo->shmall, &target_shminfo->shmall);
4747 unlock_user_struct(target_shminfo, target_addr, 1);
4748 return 0;
4751 struct target_shm_info {
4752 int used_ids;
4753 abi_ulong shm_tot;
4754 abi_ulong shm_rss;
4755 abi_ulong shm_swp;
4756 abi_ulong swap_attempts;
4757 abi_ulong swap_successes;
4760 static inline abi_long host_to_target_shm_info(abi_ulong target_addr,
4761 struct shm_info *host_shm_info)
4763 struct target_shm_info *target_shm_info;
4764 if (!lock_user_struct(VERIFY_WRITE, target_shm_info, target_addr, 0))
4765 return -TARGET_EFAULT;
4766 __put_user(host_shm_info->used_ids, &target_shm_info->used_ids);
4767 __put_user(host_shm_info->shm_tot, &target_shm_info->shm_tot);
4768 __put_user(host_shm_info->shm_rss, &target_shm_info->shm_rss);
4769 __put_user(host_shm_info->shm_swp, &target_shm_info->shm_swp);
4770 __put_user(host_shm_info->swap_attempts, &target_shm_info->swap_attempts);
4771 __put_user(host_shm_info->swap_successes, &target_shm_info->swap_successes);
4772 unlock_user_struct(target_shm_info, target_addr, 1);
4773 return 0;
4776 static inline abi_long do_shmctl(int shmid, int cmd, abi_long buf)
4778 struct shmid_ds dsarg;
4779 struct shminfo shminfo;
4780 struct shm_info shm_info;
4781 abi_long ret = -TARGET_EINVAL;
4783 cmd &= 0xff;
4785 switch(cmd) {
4786 case IPC_STAT:
4787 case IPC_SET:
4788 case SHM_STAT:
4789 if (target_to_host_shmid_ds(&dsarg, buf))
4790 return -TARGET_EFAULT;
4791 ret = get_errno(shmctl(shmid, cmd, &dsarg));
4792 if (host_to_target_shmid_ds(buf, &dsarg))
4793 return -TARGET_EFAULT;
4794 break;
4795 case IPC_INFO:
4796 ret = get_errno(shmctl(shmid, cmd, (struct shmid_ds *)&shminfo));
4797 if (host_to_target_shminfo(buf, &shminfo))
4798 return -TARGET_EFAULT;
4799 break;
4800 case SHM_INFO:
4801 ret = get_errno(shmctl(shmid, cmd, (struct shmid_ds *)&shm_info));
4802 if (host_to_target_shm_info(buf, &shm_info))
4803 return -TARGET_EFAULT;
4804 break;
4805 case IPC_RMID:
4806 case SHM_LOCK:
4807 case SHM_UNLOCK:
4808 ret = get_errno(shmctl(shmid, cmd, NULL));
4809 break;
4812 return ret;
4815 #ifndef TARGET_FORCE_SHMLBA
4816 /* For most architectures, SHMLBA is the same as the page size;
4817 * some architectures have larger values, in which case they should
4818 * define TARGET_FORCE_SHMLBA and provide a target_shmlba() function.
4819 * This corresponds to the kernel arch code defining __ARCH_FORCE_SHMLBA
4820 * and defining its own value for SHMLBA.
4822 * The kernel also permits SHMLBA to be set by the architecture to a
4823 * value larger than the page size without setting __ARCH_FORCE_SHMLBA;
4824 * this means that addresses are rounded to the large size if
4825 * SHM_RND is set but addresses not aligned to that size are not rejected
4826 * as long as they are at least page-aligned. Since the only architecture
4827 * which uses this is ia64 this code doesn't provide for that oddity.
4829 static inline abi_ulong target_shmlba(CPUArchState *cpu_env)
4831 return TARGET_PAGE_SIZE;
4833 #endif
4835 static inline abi_ulong do_shmat(CPUArchState *cpu_env,
4836 int shmid, abi_ulong shmaddr, int shmflg)
4838 abi_long raddr;
4839 void *host_raddr;
4840 struct shmid_ds shm_info;
4841 int i,ret;
4842 abi_ulong shmlba;
4844 /* find out the length of the shared memory segment */
4845 ret = get_errno(shmctl(shmid, IPC_STAT, &shm_info));
4846 if (is_error(ret)) {
4847 /* can't get length, bail out */
4848 return ret;
4851 shmlba = target_shmlba(cpu_env);
4853 if (shmaddr & (shmlba - 1)) {
4854 if (shmflg & SHM_RND) {
4855 shmaddr &= ~(shmlba - 1);
4856 } else {
4857 return -TARGET_EINVAL;
4861 mmap_lock();
4863 if (shmaddr)
4864 host_raddr = shmat(shmid, (void *)g2h(shmaddr), shmflg);
4865 else {
4866 abi_ulong mmap_start;
4868 mmap_start = mmap_find_vma(0, shm_info.shm_segsz);
4870 if (mmap_start == -1) {
4871 errno = ENOMEM;
4872 host_raddr = (void *)-1;
4873 } else
4874 host_raddr = shmat(shmid, g2h(mmap_start), shmflg | SHM_REMAP);
4877 if (host_raddr == (void *)-1) {
4878 mmap_unlock();
4879 return get_errno((long)host_raddr);
4881 raddr=h2g((unsigned long)host_raddr);
4883 page_set_flags(raddr, raddr + shm_info.shm_segsz,
4884 PAGE_VALID | PAGE_READ |
4885 ((shmflg & SHM_RDONLY)? 0 : PAGE_WRITE));
4887 for (i = 0; i < N_SHM_REGIONS; i++) {
4888 if (!shm_regions[i].in_use) {
4889 shm_regions[i].in_use = true;
4890 shm_regions[i].start = raddr;
4891 shm_regions[i].size = shm_info.shm_segsz;
4892 break;
4896 mmap_unlock();
4897 return raddr;
4901 static inline abi_long do_shmdt(abi_ulong shmaddr)
4903 int i;
4905 for (i = 0; i < N_SHM_REGIONS; ++i) {
4906 if (shm_regions[i].in_use && shm_regions[i].start == shmaddr) {
4907 shm_regions[i].in_use = false;
4908 page_set_flags(shmaddr, shmaddr + shm_regions[i].size, 0);
4909 break;
4913 return get_errno(shmdt(g2h(shmaddr)));
4916 #ifdef TARGET_NR_ipc
4917 /* ??? This only works with linear mappings. */
4918 /* do_ipc() must return target values and target errnos. */
4919 static abi_long do_ipc(CPUArchState *cpu_env,
4920 unsigned int call, abi_long first,
4921 abi_long second, abi_long third,
4922 abi_long ptr, abi_long fifth)
4924 int version;
4925 abi_long ret = 0;
4927 version = call >> 16;
4928 call &= 0xffff;
4930 switch (call) {
4931 case IPCOP_semop:
4932 ret = do_semop(first, ptr, second);
4933 break;
4935 case IPCOP_semget:
4936 ret = get_errno(semget(first, second, third));
4937 break;
4939 case IPCOP_semctl: {
4940 /* The semun argument to semctl is passed by value, so dereference the
4941 * ptr argument. */
4942 abi_ulong atptr;
4943 get_user_ual(atptr, ptr);
4944 ret = do_semctl(first, second, third, atptr);
4945 break;
4948 case IPCOP_msgget:
4949 ret = get_errno(msgget(first, second));
4950 break;
4952 case IPCOP_msgsnd:
4953 ret = do_msgsnd(first, ptr, second, third);
4954 break;
4956 case IPCOP_msgctl:
4957 ret = do_msgctl(first, second, ptr);
4958 break;
4960 case IPCOP_msgrcv:
4961 switch (version) {
4962 case 0:
4964 struct target_ipc_kludge {
4965 abi_long msgp;
4966 abi_long msgtyp;
4967 } *tmp;
4969 if (!lock_user_struct(VERIFY_READ, tmp, ptr, 1)) {
4970 ret = -TARGET_EFAULT;
4971 break;
4974 ret = do_msgrcv(first, tswapal(tmp->msgp), second, tswapal(tmp->msgtyp), third);
4976 unlock_user_struct(tmp, ptr, 0);
4977 break;
4979 default:
4980 ret = do_msgrcv(first, ptr, second, fifth, third);
4982 break;
4984 case IPCOP_shmat:
4985 switch (version) {
4986 default:
4988 abi_ulong raddr;
4989 raddr = do_shmat(cpu_env, first, ptr, second);
4990 if (is_error(raddr))
4991 return get_errno(raddr);
4992 if (put_user_ual(raddr, third))
4993 return -TARGET_EFAULT;
4994 break;
4996 case 1:
4997 ret = -TARGET_EINVAL;
4998 break;
5000 break;
5001 case IPCOP_shmdt:
5002 ret = do_shmdt(ptr);
5003 break;
5005 case IPCOP_shmget:
5006 /* IPC_* flag values are the same on all linux platforms */
5007 ret = get_errno(shmget(first, second, third));
5008 break;
5010 /* IPC_* and SHM_* command values are the same on all linux platforms */
5011 case IPCOP_shmctl:
5012 ret = do_shmctl(first, second, ptr);
5013 break;
5014 default:
5015 gemu_log("Unsupported ipc call: %d (version %d)\n", call, version);
5016 ret = -TARGET_ENOSYS;
5017 break;
5019 return ret;
5021 #endif
5023 /* kernel structure types definitions */
5025 #define STRUCT(name, ...) STRUCT_ ## name,
5026 #define STRUCT_SPECIAL(name) STRUCT_ ## name,
5027 enum {
5028 #include "syscall_types.h"
5029 STRUCT_MAX
5031 #undef STRUCT
5032 #undef STRUCT_SPECIAL
5034 #define STRUCT(name, ...) static const argtype struct_ ## name ## _def[] = { __VA_ARGS__, TYPE_NULL };
5035 #define STRUCT_SPECIAL(name)
5036 #include "syscall_types.h"
5037 #undef STRUCT
5038 #undef STRUCT_SPECIAL
5040 typedef struct IOCTLEntry IOCTLEntry;
5042 typedef abi_long do_ioctl_fn(const IOCTLEntry *ie, uint8_t *buf_temp,
5043 int fd, int cmd, abi_long arg);
5045 struct IOCTLEntry {
5046 int target_cmd;
5047 unsigned int host_cmd;
5048 const char *name;
5049 int access;
5050 do_ioctl_fn *do_ioctl;
5051 const argtype arg_type[5];
5054 #define IOC_R 0x0001
5055 #define IOC_W 0x0002
5056 #define IOC_RW (IOC_R | IOC_W)
5058 #define MAX_STRUCT_SIZE 4096
5060 #ifdef CONFIG_FIEMAP
5061 /* So fiemap access checks don't overflow on 32 bit systems.
5062 * This is very slightly smaller than the limit imposed by
5063 * the underlying kernel.
5065 #define FIEMAP_MAX_EXTENTS ((UINT_MAX - sizeof(struct fiemap)) \
5066 / sizeof(struct fiemap_extent))
5068 static abi_long do_ioctl_fs_ioc_fiemap(const IOCTLEntry *ie, uint8_t *buf_temp,
5069 int fd, int cmd, abi_long arg)
5071 /* The parameter for this ioctl is a struct fiemap followed
5072 * by an array of struct fiemap_extent whose size is set
5073 * in fiemap->fm_extent_count. The array is filled in by the
5074 * ioctl.
5076 int target_size_in, target_size_out;
5077 struct fiemap *fm;
5078 const argtype *arg_type = ie->arg_type;
5079 const argtype extent_arg_type[] = { MK_STRUCT(STRUCT_fiemap_extent) };
5080 void *argptr, *p;
5081 abi_long ret;
5082 int i, extent_size = thunk_type_size(extent_arg_type, 0);
5083 uint32_t outbufsz;
5084 int free_fm = 0;
5086 assert(arg_type[0] == TYPE_PTR);
5087 assert(ie->access == IOC_RW);
5088 arg_type++;
5089 target_size_in = thunk_type_size(arg_type, 0);
5090 argptr = lock_user(VERIFY_READ, arg, target_size_in, 1);
5091 if (!argptr) {
5092 return -TARGET_EFAULT;
5094 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
5095 unlock_user(argptr, arg, 0);
5096 fm = (struct fiemap *)buf_temp;
5097 if (fm->fm_extent_count > FIEMAP_MAX_EXTENTS) {
5098 return -TARGET_EINVAL;
5101 outbufsz = sizeof (*fm) +
5102 (sizeof(struct fiemap_extent) * fm->fm_extent_count);
5104 if (outbufsz > MAX_STRUCT_SIZE) {
5105 /* We can't fit all the extents into the fixed size buffer.
5106 * Allocate one that is large enough and use it instead.
5108 fm = g_try_malloc(outbufsz);
5109 if (!fm) {
5110 return -TARGET_ENOMEM;
5112 memcpy(fm, buf_temp, sizeof(struct fiemap));
5113 free_fm = 1;
5115 ret = get_errno(safe_ioctl(fd, ie->host_cmd, fm));
5116 if (!is_error(ret)) {
5117 target_size_out = target_size_in;
5118 /* An extent_count of 0 means we were only counting the extents
5119 * so there are no structs to copy
5121 if (fm->fm_extent_count != 0) {
5122 target_size_out += fm->fm_mapped_extents * extent_size;
5124 argptr = lock_user(VERIFY_WRITE, arg, target_size_out, 0);
5125 if (!argptr) {
5126 ret = -TARGET_EFAULT;
5127 } else {
5128 /* Convert the struct fiemap */
5129 thunk_convert(argptr, fm, arg_type, THUNK_TARGET);
5130 if (fm->fm_extent_count != 0) {
5131 p = argptr + target_size_in;
5132 /* ...and then all the struct fiemap_extents */
5133 for (i = 0; i < fm->fm_mapped_extents; i++) {
5134 thunk_convert(p, &fm->fm_extents[i], extent_arg_type,
5135 THUNK_TARGET);
5136 p += extent_size;
5139 unlock_user(argptr, arg, target_size_out);
5142 if (free_fm) {
5143 g_free(fm);
5145 return ret;
5147 #endif
5149 static abi_long do_ioctl_ifconf(const IOCTLEntry *ie, uint8_t *buf_temp,
5150 int fd, int cmd, abi_long arg)
5152 const argtype *arg_type = ie->arg_type;
5153 int target_size;
5154 void *argptr;
5155 int ret;
5156 struct ifconf *host_ifconf;
5157 uint32_t outbufsz;
5158 const argtype ifreq_arg_type[] = { MK_STRUCT(STRUCT_sockaddr_ifreq) };
5159 int target_ifreq_size;
5160 int nb_ifreq;
5161 int free_buf = 0;
5162 int i;
5163 int target_ifc_len;
5164 abi_long target_ifc_buf;
5165 int host_ifc_len;
5166 char *host_ifc_buf;
5168 assert(arg_type[0] == TYPE_PTR);
5169 assert(ie->access == IOC_RW);
5171 arg_type++;
5172 target_size = thunk_type_size(arg_type, 0);
5174 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5175 if (!argptr)
5176 return -TARGET_EFAULT;
5177 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
5178 unlock_user(argptr, arg, 0);
5180 host_ifconf = (struct ifconf *)(unsigned long)buf_temp;
5181 target_ifc_len = host_ifconf->ifc_len;
5182 target_ifc_buf = (abi_long)(unsigned long)host_ifconf->ifc_buf;
5184 target_ifreq_size = thunk_type_size(ifreq_arg_type, 0);
5185 nb_ifreq = target_ifc_len / target_ifreq_size;
5186 host_ifc_len = nb_ifreq * sizeof(struct ifreq);
5188 outbufsz = sizeof(*host_ifconf) + host_ifc_len;
5189 if (outbufsz > MAX_STRUCT_SIZE) {
5190 /* We can't fit all the extents into the fixed size buffer.
5191 * Allocate one that is large enough and use it instead.
5193 host_ifconf = malloc(outbufsz);
5194 if (!host_ifconf) {
5195 return -TARGET_ENOMEM;
5197 memcpy(host_ifconf, buf_temp, sizeof(*host_ifconf));
5198 free_buf = 1;
5200 host_ifc_buf = (char*)host_ifconf + sizeof(*host_ifconf);
5202 host_ifconf->ifc_len = host_ifc_len;
5203 host_ifconf->ifc_buf = host_ifc_buf;
5205 ret = get_errno(safe_ioctl(fd, ie->host_cmd, host_ifconf));
5206 if (!is_error(ret)) {
5207 /* convert host ifc_len to target ifc_len */
5209 nb_ifreq = host_ifconf->ifc_len / sizeof(struct ifreq);
5210 target_ifc_len = nb_ifreq * target_ifreq_size;
5211 host_ifconf->ifc_len = target_ifc_len;
5213 /* restore target ifc_buf */
5215 host_ifconf->ifc_buf = (char *)(unsigned long)target_ifc_buf;
5217 /* copy struct ifconf to target user */
5219 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
5220 if (!argptr)
5221 return -TARGET_EFAULT;
5222 thunk_convert(argptr, host_ifconf, arg_type, THUNK_TARGET);
5223 unlock_user(argptr, arg, target_size);
5225 /* copy ifreq[] to target user */
5227 argptr = lock_user(VERIFY_WRITE, target_ifc_buf, target_ifc_len, 0);
5228 for (i = 0; i < nb_ifreq ; i++) {
5229 thunk_convert(argptr + i * target_ifreq_size,
5230 host_ifc_buf + i * sizeof(struct ifreq),
5231 ifreq_arg_type, THUNK_TARGET);
5233 unlock_user(argptr, target_ifc_buf, target_ifc_len);
5236 if (free_buf) {
5237 free(host_ifconf);
5240 return ret;
5243 static abi_long do_ioctl_dm(const IOCTLEntry *ie, uint8_t *buf_temp, int fd,
5244 int cmd, abi_long arg)
5246 void *argptr;
5247 struct dm_ioctl *host_dm;
5248 abi_long guest_data;
5249 uint32_t guest_data_size;
5250 int target_size;
5251 const argtype *arg_type = ie->arg_type;
5252 abi_long ret;
5253 void *big_buf = NULL;
5254 char *host_data;
5256 arg_type++;
5257 target_size = thunk_type_size(arg_type, 0);
5258 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5259 if (!argptr) {
5260 ret = -TARGET_EFAULT;
5261 goto out;
5263 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
5264 unlock_user(argptr, arg, 0);
5266 /* buf_temp is too small, so fetch things into a bigger buffer */
5267 big_buf = g_malloc0(((struct dm_ioctl*)buf_temp)->data_size * 2);
5268 memcpy(big_buf, buf_temp, target_size);
5269 buf_temp = big_buf;
5270 host_dm = big_buf;
5272 guest_data = arg + host_dm->data_start;
5273 if ((guest_data - arg) < 0) {
5274 ret = -TARGET_EINVAL;
5275 goto out;
5277 guest_data_size = host_dm->data_size - host_dm->data_start;
5278 host_data = (char*)host_dm + host_dm->data_start;
5280 argptr = lock_user(VERIFY_READ, guest_data, guest_data_size, 1);
5281 if (!argptr) {
5282 ret = -TARGET_EFAULT;
5283 goto out;
5286 switch (ie->host_cmd) {
5287 case DM_REMOVE_ALL:
5288 case DM_LIST_DEVICES:
5289 case DM_DEV_CREATE:
5290 case DM_DEV_REMOVE:
5291 case DM_DEV_SUSPEND:
5292 case DM_DEV_STATUS:
5293 case DM_DEV_WAIT:
5294 case DM_TABLE_STATUS:
5295 case DM_TABLE_CLEAR:
5296 case DM_TABLE_DEPS:
5297 case DM_LIST_VERSIONS:
5298 /* no input data */
5299 break;
5300 case DM_DEV_RENAME:
5301 case DM_DEV_SET_GEOMETRY:
5302 /* data contains only strings */
5303 memcpy(host_data, argptr, guest_data_size);
5304 break;
5305 case DM_TARGET_MSG:
5306 memcpy(host_data, argptr, guest_data_size);
5307 *(uint64_t*)host_data = tswap64(*(uint64_t*)argptr);
5308 break;
5309 case DM_TABLE_LOAD:
5311 void *gspec = argptr;
5312 void *cur_data = host_data;
5313 const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_spec) };
5314 int spec_size = thunk_type_size(arg_type, 0);
5315 int i;
5317 for (i = 0; i < host_dm->target_count; i++) {
5318 struct dm_target_spec *spec = cur_data;
5319 uint32_t next;
5320 int slen;
5322 thunk_convert(spec, gspec, arg_type, THUNK_HOST);
5323 slen = strlen((char*)gspec + spec_size) + 1;
5324 next = spec->next;
5325 spec->next = sizeof(*spec) + slen;
5326 strcpy((char*)&spec[1], gspec + spec_size);
5327 gspec += next;
5328 cur_data += spec->next;
5330 break;
5332 default:
5333 ret = -TARGET_EINVAL;
5334 unlock_user(argptr, guest_data, 0);
5335 goto out;
5337 unlock_user(argptr, guest_data, 0);
5339 ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5340 if (!is_error(ret)) {
5341 guest_data = arg + host_dm->data_start;
5342 guest_data_size = host_dm->data_size - host_dm->data_start;
5343 argptr = lock_user(VERIFY_WRITE, guest_data, guest_data_size, 0);
5344 switch (ie->host_cmd) {
5345 case DM_REMOVE_ALL:
5346 case DM_DEV_CREATE:
5347 case DM_DEV_REMOVE:
5348 case DM_DEV_RENAME:
5349 case DM_DEV_SUSPEND:
5350 case DM_DEV_STATUS:
5351 case DM_TABLE_LOAD:
5352 case DM_TABLE_CLEAR:
5353 case DM_TARGET_MSG:
5354 case DM_DEV_SET_GEOMETRY:
5355 /* no return data */
5356 break;
5357 case DM_LIST_DEVICES:
5359 struct dm_name_list *nl = (void*)host_dm + host_dm->data_start;
5360 uint32_t remaining_data = guest_data_size;
5361 void *cur_data = argptr;
5362 const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_name_list) };
5363 int nl_size = 12; /* can't use thunk_size due to alignment */
5365 while (1) {
5366 uint32_t next = nl->next;
5367 if (next) {
5368 nl->next = nl_size + (strlen(nl->name) + 1);
5370 if (remaining_data < nl->next) {
5371 host_dm->flags |= DM_BUFFER_FULL_FLAG;
5372 break;
5374 thunk_convert(cur_data, nl, arg_type, THUNK_TARGET);
5375 strcpy(cur_data + nl_size, nl->name);
5376 cur_data += nl->next;
5377 remaining_data -= nl->next;
5378 if (!next) {
5379 break;
5381 nl = (void*)nl + next;
5383 break;
5385 case DM_DEV_WAIT:
5386 case DM_TABLE_STATUS:
5388 struct dm_target_spec *spec = (void*)host_dm + host_dm->data_start;
5389 void *cur_data = argptr;
5390 const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_spec) };
5391 int spec_size = thunk_type_size(arg_type, 0);
5392 int i;
5394 for (i = 0; i < host_dm->target_count; i++) {
5395 uint32_t next = spec->next;
5396 int slen = strlen((char*)&spec[1]) + 1;
5397 spec->next = (cur_data - argptr) + spec_size + slen;
5398 if (guest_data_size < spec->next) {
5399 host_dm->flags |= DM_BUFFER_FULL_FLAG;
5400 break;
5402 thunk_convert(cur_data, spec, arg_type, THUNK_TARGET);
5403 strcpy(cur_data + spec_size, (char*)&spec[1]);
5404 cur_data = argptr + spec->next;
5405 spec = (void*)host_dm + host_dm->data_start + next;
5407 break;
5409 case DM_TABLE_DEPS:
5411 void *hdata = (void*)host_dm + host_dm->data_start;
5412 int count = *(uint32_t*)hdata;
5413 uint64_t *hdev = hdata + 8;
5414 uint64_t *gdev = argptr + 8;
5415 int i;
5417 *(uint32_t*)argptr = tswap32(count);
5418 for (i = 0; i < count; i++) {
5419 *gdev = tswap64(*hdev);
5420 gdev++;
5421 hdev++;
5423 break;
5425 case DM_LIST_VERSIONS:
5427 struct dm_target_versions *vers = (void*)host_dm + host_dm->data_start;
5428 uint32_t remaining_data = guest_data_size;
5429 void *cur_data = argptr;
5430 const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_versions) };
5431 int vers_size = thunk_type_size(arg_type, 0);
5433 while (1) {
5434 uint32_t next = vers->next;
5435 if (next) {
5436 vers->next = vers_size + (strlen(vers->name) + 1);
5438 if (remaining_data < vers->next) {
5439 host_dm->flags |= DM_BUFFER_FULL_FLAG;
5440 break;
5442 thunk_convert(cur_data, vers, arg_type, THUNK_TARGET);
5443 strcpy(cur_data + vers_size, vers->name);
5444 cur_data += vers->next;
5445 remaining_data -= vers->next;
5446 if (!next) {
5447 break;
5449 vers = (void*)vers + next;
5451 break;
5453 default:
5454 unlock_user(argptr, guest_data, 0);
5455 ret = -TARGET_EINVAL;
5456 goto out;
5458 unlock_user(argptr, guest_data, guest_data_size);
5460 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
5461 if (!argptr) {
5462 ret = -TARGET_EFAULT;
5463 goto out;
5465 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
5466 unlock_user(argptr, arg, target_size);
5468 out:
5469 g_free(big_buf);
5470 return ret;
5473 static abi_long do_ioctl_blkpg(const IOCTLEntry *ie, uint8_t *buf_temp, int fd,
5474 int cmd, abi_long arg)
5476 void *argptr;
5477 int target_size;
5478 const argtype *arg_type = ie->arg_type;
5479 const argtype part_arg_type[] = { MK_STRUCT(STRUCT_blkpg_partition) };
5480 abi_long ret;
5482 struct blkpg_ioctl_arg *host_blkpg = (void*)buf_temp;
5483 struct blkpg_partition host_part;
5485 /* Read and convert blkpg */
5486 arg_type++;
5487 target_size = thunk_type_size(arg_type, 0);
5488 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5489 if (!argptr) {
5490 ret = -TARGET_EFAULT;
5491 goto out;
5493 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
5494 unlock_user(argptr, arg, 0);
5496 switch (host_blkpg->op) {
5497 case BLKPG_ADD_PARTITION:
5498 case BLKPG_DEL_PARTITION:
5499 /* payload is struct blkpg_partition */
5500 break;
5501 default:
5502 /* Unknown opcode */
5503 ret = -TARGET_EINVAL;
5504 goto out;
5507 /* Read and convert blkpg->data */
5508 arg = (abi_long)(uintptr_t)host_blkpg->data;
5509 target_size = thunk_type_size(part_arg_type, 0);
5510 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5511 if (!argptr) {
5512 ret = -TARGET_EFAULT;
5513 goto out;
5515 thunk_convert(&host_part, argptr, part_arg_type, THUNK_HOST);
5516 unlock_user(argptr, arg, 0);
5518 /* Swizzle the data pointer to our local copy and call! */
5519 host_blkpg->data = &host_part;
5520 ret = get_errno(safe_ioctl(fd, ie->host_cmd, host_blkpg));
5522 out:
5523 return ret;
5526 static abi_long do_ioctl_rt(const IOCTLEntry *ie, uint8_t *buf_temp,
5527 int fd, int cmd, abi_long arg)
5529 const argtype *arg_type = ie->arg_type;
5530 const StructEntry *se;
5531 const argtype *field_types;
5532 const int *dst_offsets, *src_offsets;
5533 int target_size;
5534 void *argptr;
5535 abi_ulong *target_rt_dev_ptr;
5536 unsigned long *host_rt_dev_ptr;
5537 abi_long ret;
5538 int i;
5540 assert(ie->access == IOC_W);
5541 assert(*arg_type == TYPE_PTR);
5542 arg_type++;
5543 assert(*arg_type == TYPE_STRUCT);
5544 target_size = thunk_type_size(arg_type, 0);
5545 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5546 if (!argptr) {
5547 return -TARGET_EFAULT;
5549 arg_type++;
5550 assert(*arg_type == (int)STRUCT_rtentry);
5551 se = struct_entries + *arg_type++;
5552 assert(se->convert[0] == NULL);
5553 /* convert struct here to be able to catch rt_dev string */
5554 field_types = se->field_types;
5555 dst_offsets = se->field_offsets[THUNK_HOST];
5556 src_offsets = se->field_offsets[THUNK_TARGET];
5557 for (i = 0; i < se->nb_fields; i++) {
5558 if (dst_offsets[i] == offsetof(struct rtentry, rt_dev)) {
5559 assert(*field_types == TYPE_PTRVOID);
5560 target_rt_dev_ptr = (abi_ulong *)(argptr + src_offsets[i]);
5561 host_rt_dev_ptr = (unsigned long *)(buf_temp + dst_offsets[i]);
5562 if (*target_rt_dev_ptr != 0) {
5563 *host_rt_dev_ptr = (unsigned long)lock_user_string(
5564 tswapal(*target_rt_dev_ptr));
5565 if (!*host_rt_dev_ptr) {
5566 unlock_user(argptr, arg, 0);
5567 return -TARGET_EFAULT;
5569 } else {
5570 *host_rt_dev_ptr = 0;
5572 field_types++;
5573 continue;
5575 field_types = thunk_convert(buf_temp + dst_offsets[i],
5576 argptr + src_offsets[i],
5577 field_types, THUNK_HOST);
5579 unlock_user(argptr, arg, 0);
5581 ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5582 if (*host_rt_dev_ptr != 0) {
5583 unlock_user((void *)*host_rt_dev_ptr,
5584 *target_rt_dev_ptr, 0);
5586 return ret;
5589 static abi_long do_ioctl_kdsigaccept(const IOCTLEntry *ie, uint8_t *buf_temp,
5590 int fd, int cmd, abi_long arg)
5592 int sig = target_to_host_signal(arg);
5593 return get_errno(safe_ioctl(fd, ie->host_cmd, sig));
5596 static IOCTLEntry ioctl_entries[] = {
5597 #define IOCTL(cmd, access, ...) \
5598 { TARGET_ ## cmd, cmd, #cmd, access, 0, { __VA_ARGS__ } },
5599 #define IOCTL_SPECIAL(cmd, access, dofn, ...) \
5600 { TARGET_ ## cmd, cmd, #cmd, access, dofn, { __VA_ARGS__ } },
5601 #define IOCTL_IGNORE(cmd) \
5602 { TARGET_ ## cmd, 0, #cmd },
5603 #include "ioctls.h"
5604 { 0, 0, },
5607 /* ??? Implement proper locking for ioctls. */
5608 /* do_ioctl() Must return target values and target errnos. */
5609 static abi_long do_ioctl(int fd, int cmd, abi_long arg)
5611 const IOCTLEntry *ie;
5612 const argtype *arg_type;
5613 abi_long ret;
5614 uint8_t buf_temp[MAX_STRUCT_SIZE];
5615 int target_size;
5616 void *argptr;
5618 ie = ioctl_entries;
5619 for(;;) {
5620 if (ie->target_cmd == 0) {
5621 gemu_log("Unsupported ioctl: cmd=0x%04lx\n", (long)cmd);
5622 return -TARGET_ENOSYS;
5624 if (ie->target_cmd == cmd)
5625 break;
5626 ie++;
5628 arg_type = ie->arg_type;
5629 #if defined(DEBUG)
5630 gemu_log("ioctl: cmd=0x%04lx (%s)\n", (long)cmd, ie->name);
5631 #endif
5632 if (ie->do_ioctl) {
5633 return ie->do_ioctl(ie, buf_temp, fd, cmd, arg);
5634 } else if (!ie->host_cmd) {
5635 /* Some architectures define BSD ioctls in their headers
5636 that are not implemented in Linux. */
5637 return -TARGET_ENOSYS;
5640 switch(arg_type[0]) {
5641 case TYPE_NULL:
5642 /* no argument */
5643 ret = get_errno(safe_ioctl(fd, ie->host_cmd));
5644 break;
5645 case TYPE_PTRVOID:
5646 case TYPE_INT:
5647 ret = get_errno(safe_ioctl(fd, ie->host_cmd, arg));
5648 break;
5649 case TYPE_PTR:
5650 arg_type++;
5651 target_size = thunk_type_size(arg_type, 0);
5652 switch(ie->access) {
5653 case IOC_R:
5654 ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5655 if (!is_error(ret)) {
5656 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
5657 if (!argptr)
5658 return -TARGET_EFAULT;
5659 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
5660 unlock_user(argptr, arg, target_size);
5662 break;
5663 case IOC_W:
5664 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5665 if (!argptr)
5666 return -TARGET_EFAULT;
5667 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
5668 unlock_user(argptr, arg, 0);
5669 ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5670 break;
5671 default:
5672 case IOC_RW:
5673 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5674 if (!argptr)
5675 return -TARGET_EFAULT;
5676 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
5677 unlock_user(argptr, arg, 0);
5678 ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5679 if (!is_error(ret)) {
5680 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
5681 if (!argptr)
5682 return -TARGET_EFAULT;
5683 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
5684 unlock_user(argptr, arg, target_size);
5686 break;
5688 break;
5689 default:
5690 gemu_log("Unsupported ioctl type: cmd=0x%04lx type=%d\n",
5691 (long)cmd, arg_type[0]);
5692 ret = -TARGET_ENOSYS;
5693 break;
5695 return ret;
5698 static const bitmask_transtbl iflag_tbl[] = {
5699 { TARGET_IGNBRK, TARGET_IGNBRK, IGNBRK, IGNBRK },
5700 { TARGET_BRKINT, TARGET_BRKINT, BRKINT, BRKINT },
5701 { TARGET_IGNPAR, TARGET_IGNPAR, IGNPAR, IGNPAR },
5702 { TARGET_PARMRK, TARGET_PARMRK, PARMRK, PARMRK },
5703 { TARGET_INPCK, TARGET_INPCK, INPCK, INPCK },
5704 { TARGET_ISTRIP, TARGET_ISTRIP, ISTRIP, ISTRIP },
5705 { TARGET_INLCR, TARGET_INLCR, INLCR, INLCR },
5706 { TARGET_IGNCR, TARGET_IGNCR, IGNCR, IGNCR },
5707 { TARGET_ICRNL, TARGET_ICRNL, ICRNL, ICRNL },
5708 { TARGET_IUCLC, TARGET_IUCLC, IUCLC, IUCLC },
5709 { TARGET_IXON, TARGET_IXON, IXON, IXON },
5710 { TARGET_IXANY, TARGET_IXANY, IXANY, IXANY },
5711 { TARGET_IXOFF, TARGET_IXOFF, IXOFF, IXOFF },
5712 { TARGET_IMAXBEL, TARGET_IMAXBEL, IMAXBEL, IMAXBEL },
5713 { 0, 0, 0, 0 }
5716 static const bitmask_transtbl oflag_tbl[] = {
5717 { TARGET_OPOST, TARGET_OPOST, OPOST, OPOST },
5718 { TARGET_OLCUC, TARGET_OLCUC, OLCUC, OLCUC },
5719 { TARGET_ONLCR, TARGET_ONLCR, ONLCR, ONLCR },
5720 { TARGET_OCRNL, TARGET_OCRNL, OCRNL, OCRNL },
5721 { TARGET_ONOCR, TARGET_ONOCR, ONOCR, ONOCR },
5722 { TARGET_ONLRET, TARGET_ONLRET, ONLRET, ONLRET },
5723 { TARGET_OFILL, TARGET_OFILL, OFILL, OFILL },
5724 { TARGET_OFDEL, TARGET_OFDEL, OFDEL, OFDEL },
5725 { TARGET_NLDLY, TARGET_NL0, NLDLY, NL0 },
5726 { TARGET_NLDLY, TARGET_NL1, NLDLY, NL1 },
5727 { TARGET_CRDLY, TARGET_CR0, CRDLY, CR0 },
5728 { TARGET_CRDLY, TARGET_CR1, CRDLY, CR1 },
5729 { TARGET_CRDLY, TARGET_CR2, CRDLY, CR2 },
5730 { TARGET_CRDLY, TARGET_CR3, CRDLY, CR3 },
5731 { TARGET_TABDLY, TARGET_TAB0, TABDLY, TAB0 },
5732 { TARGET_TABDLY, TARGET_TAB1, TABDLY, TAB1 },
5733 { TARGET_TABDLY, TARGET_TAB2, TABDLY, TAB2 },
5734 { TARGET_TABDLY, TARGET_TAB3, TABDLY, TAB3 },
5735 { TARGET_BSDLY, TARGET_BS0, BSDLY, BS0 },
5736 { TARGET_BSDLY, TARGET_BS1, BSDLY, BS1 },
5737 { TARGET_VTDLY, TARGET_VT0, VTDLY, VT0 },
5738 { TARGET_VTDLY, TARGET_VT1, VTDLY, VT1 },
5739 { TARGET_FFDLY, TARGET_FF0, FFDLY, FF0 },
5740 { TARGET_FFDLY, TARGET_FF1, FFDLY, FF1 },
5741 { 0, 0, 0, 0 }
5744 static const bitmask_transtbl cflag_tbl[] = {
5745 { TARGET_CBAUD, TARGET_B0, CBAUD, B0 },
5746 { TARGET_CBAUD, TARGET_B50, CBAUD, B50 },
5747 { TARGET_CBAUD, TARGET_B75, CBAUD, B75 },
5748 { TARGET_CBAUD, TARGET_B110, CBAUD, B110 },
5749 { TARGET_CBAUD, TARGET_B134, CBAUD, B134 },
5750 { TARGET_CBAUD, TARGET_B150, CBAUD, B150 },
5751 { TARGET_CBAUD, TARGET_B200, CBAUD, B200 },
5752 { TARGET_CBAUD, TARGET_B300, CBAUD, B300 },
5753 { TARGET_CBAUD, TARGET_B600, CBAUD, B600 },
5754 { TARGET_CBAUD, TARGET_B1200, CBAUD, B1200 },
5755 { TARGET_CBAUD, TARGET_B1800, CBAUD, B1800 },
5756 { TARGET_CBAUD, TARGET_B2400, CBAUD, B2400 },
5757 { TARGET_CBAUD, TARGET_B4800, CBAUD, B4800 },
5758 { TARGET_CBAUD, TARGET_B9600, CBAUD, B9600 },
5759 { TARGET_CBAUD, TARGET_B19200, CBAUD, B19200 },
5760 { TARGET_CBAUD, TARGET_B38400, CBAUD, B38400 },
5761 { TARGET_CBAUD, TARGET_B57600, CBAUD, B57600 },
5762 { TARGET_CBAUD, TARGET_B115200, CBAUD, B115200 },
5763 { TARGET_CBAUD, TARGET_B230400, CBAUD, B230400 },
5764 { TARGET_CBAUD, TARGET_B460800, CBAUD, B460800 },
5765 { TARGET_CSIZE, TARGET_CS5, CSIZE, CS5 },
5766 { TARGET_CSIZE, TARGET_CS6, CSIZE, CS6 },
5767 { TARGET_CSIZE, TARGET_CS7, CSIZE, CS7 },
5768 { TARGET_CSIZE, TARGET_CS8, CSIZE, CS8 },
5769 { TARGET_CSTOPB, TARGET_CSTOPB, CSTOPB, CSTOPB },
5770 { TARGET_CREAD, TARGET_CREAD, CREAD, CREAD },
5771 { TARGET_PARENB, TARGET_PARENB, PARENB, PARENB },
5772 { TARGET_PARODD, TARGET_PARODD, PARODD, PARODD },
5773 { TARGET_HUPCL, TARGET_HUPCL, HUPCL, HUPCL },
5774 { TARGET_CLOCAL, TARGET_CLOCAL, CLOCAL, CLOCAL },
5775 { TARGET_CRTSCTS, TARGET_CRTSCTS, CRTSCTS, CRTSCTS },
5776 { 0, 0, 0, 0 }
5779 static const bitmask_transtbl lflag_tbl[] = {
5780 { TARGET_ISIG, TARGET_ISIG, ISIG, ISIG },
5781 { TARGET_ICANON, TARGET_ICANON, ICANON, ICANON },
5782 { TARGET_XCASE, TARGET_XCASE, XCASE, XCASE },
5783 { TARGET_ECHO, TARGET_ECHO, ECHO, ECHO },
5784 { TARGET_ECHOE, TARGET_ECHOE, ECHOE, ECHOE },
5785 { TARGET_ECHOK, TARGET_ECHOK, ECHOK, ECHOK },
5786 { TARGET_ECHONL, TARGET_ECHONL, ECHONL, ECHONL },
5787 { TARGET_NOFLSH, TARGET_NOFLSH, NOFLSH, NOFLSH },
5788 { TARGET_TOSTOP, TARGET_TOSTOP, TOSTOP, TOSTOP },
5789 { TARGET_ECHOCTL, TARGET_ECHOCTL, ECHOCTL, ECHOCTL },
5790 { TARGET_ECHOPRT, TARGET_ECHOPRT, ECHOPRT, ECHOPRT },
5791 { TARGET_ECHOKE, TARGET_ECHOKE, ECHOKE, ECHOKE },
5792 { TARGET_FLUSHO, TARGET_FLUSHO, FLUSHO, FLUSHO },
5793 { TARGET_PENDIN, TARGET_PENDIN, PENDIN, PENDIN },
5794 { TARGET_IEXTEN, TARGET_IEXTEN, IEXTEN, IEXTEN },
5795 { 0, 0, 0, 0 }
5798 static void target_to_host_termios (void *dst, const void *src)
5800 struct host_termios *host = dst;
5801 const struct target_termios *target = src;
5803 host->c_iflag =
5804 target_to_host_bitmask(tswap32(target->c_iflag), iflag_tbl);
5805 host->c_oflag =
5806 target_to_host_bitmask(tswap32(target->c_oflag), oflag_tbl);
5807 host->c_cflag =
5808 target_to_host_bitmask(tswap32(target->c_cflag), cflag_tbl);
5809 host->c_lflag =
5810 target_to_host_bitmask(tswap32(target->c_lflag), lflag_tbl);
5811 host->c_line = target->c_line;
5813 memset(host->c_cc, 0, sizeof(host->c_cc));
5814 host->c_cc[VINTR] = target->c_cc[TARGET_VINTR];
5815 host->c_cc[VQUIT] = target->c_cc[TARGET_VQUIT];
5816 host->c_cc[VERASE] = target->c_cc[TARGET_VERASE];
5817 host->c_cc[VKILL] = target->c_cc[TARGET_VKILL];
5818 host->c_cc[VEOF] = target->c_cc[TARGET_VEOF];
5819 host->c_cc[VTIME] = target->c_cc[TARGET_VTIME];
5820 host->c_cc[VMIN] = target->c_cc[TARGET_VMIN];
5821 host->c_cc[VSWTC] = target->c_cc[TARGET_VSWTC];
5822 host->c_cc[VSTART] = target->c_cc[TARGET_VSTART];
5823 host->c_cc[VSTOP] = target->c_cc[TARGET_VSTOP];
5824 host->c_cc[VSUSP] = target->c_cc[TARGET_VSUSP];
5825 host->c_cc[VEOL] = target->c_cc[TARGET_VEOL];
5826 host->c_cc[VREPRINT] = target->c_cc[TARGET_VREPRINT];
5827 host->c_cc[VDISCARD] = target->c_cc[TARGET_VDISCARD];
5828 host->c_cc[VWERASE] = target->c_cc[TARGET_VWERASE];
5829 host->c_cc[VLNEXT] = target->c_cc[TARGET_VLNEXT];
5830 host->c_cc[VEOL2] = target->c_cc[TARGET_VEOL2];
5833 static void host_to_target_termios (void *dst, const void *src)
5835 struct target_termios *target = dst;
5836 const struct host_termios *host = src;
5838 target->c_iflag =
5839 tswap32(host_to_target_bitmask(host->c_iflag, iflag_tbl));
5840 target->c_oflag =
5841 tswap32(host_to_target_bitmask(host->c_oflag, oflag_tbl));
5842 target->c_cflag =
5843 tswap32(host_to_target_bitmask(host->c_cflag, cflag_tbl));
5844 target->c_lflag =
5845 tswap32(host_to_target_bitmask(host->c_lflag, lflag_tbl));
5846 target->c_line = host->c_line;
5848 memset(target->c_cc, 0, sizeof(target->c_cc));
5849 target->c_cc[TARGET_VINTR] = host->c_cc[VINTR];
5850 target->c_cc[TARGET_VQUIT] = host->c_cc[VQUIT];
5851 target->c_cc[TARGET_VERASE] = host->c_cc[VERASE];
5852 target->c_cc[TARGET_VKILL] = host->c_cc[VKILL];
5853 target->c_cc[TARGET_VEOF] = host->c_cc[VEOF];
5854 target->c_cc[TARGET_VTIME] = host->c_cc[VTIME];
5855 target->c_cc[TARGET_VMIN] = host->c_cc[VMIN];
5856 target->c_cc[TARGET_VSWTC] = host->c_cc[VSWTC];
5857 target->c_cc[TARGET_VSTART] = host->c_cc[VSTART];
5858 target->c_cc[TARGET_VSTOP] = host->c_cc[VSTOP];
5859 target->c_cc[TARGET_VSUSP] = host->c_cc[VSUSP];
5860 target->c_cc[TARGET_VEOL] = host->c_cc[VEOL];
5861 target->c_cc[TARGET_VREPRINT] = host->c_cc[VREPRINT];
5862 target->c_cc[TARGET_VDISCARD] = host->c_cc[VDISCARD];
5863 target->c_cc[TARGET_VWERASE] = host->c_cc[VWERASE];
5864 target->c_cc[TARGET_VLNEXT] = host->c_cc[VLNEXT];
5865 target->c_cc[TARGET_VEOL2] = host->c_cc[VEOL2];
5868 static const StructEntry struct_termios_def = {
5869 .convert = { host_to_target_termios, target_to_host_termios },
5870 .size = { sizeof(struct target_termios), sizeof(struct host_termios) },
5871 .align = { __alignof__(struct target_termios), __alignof__(struct host_termios) },
5874 static bitmask_transtbl mmap_flags_tbl[] = {
5875 { TARGET_MAP_SHARED, TARGET_MAP_SHARED, MAP_SHARED, MAP_SHARED },
5876 { TARGET_MAP_PRIVATE, TARGET_MAP_PRIVATE, MAP_PRIVATE, MAP_PRIVATE },
5877 { TARGET_MAP_FIXED, TARGET_MAP_FIXED, MAP_FIXED, MAP_FIXED },
5878 { TARGET_MAP_ANONYMOUS, TARGET_MAP_ANONYMOUS,
5879 MAP_ANONYMOUS, MAP_ANONYMOUS },
5880 { TARGET_MAP_GROWSDOWN, TARGET_MAP_GROWSDOWN,
5881 MAP_GROWSDOWN, MAP_GROWSDOWN },
5882 { TARGET_MAP_DENYWRITE, TARGET_MAP_DENYWRITE,
5883 MAP_DENYWRITE, MAP_DENYWRITE },
5884 { TARGET_MAP_EXECUTABLE, TARGET_MAP_EXECUTABLE,
5885 MAP_EXECUTABLE, MAP_EXECUTABLE },
5886 { TARGET_MAP_LOCKED, TARGET_MAP_LOCKED, MAP_LOCKED, MAP_LOCKED },
5887 { TARGET_MAP_NORESERVE, TARGET_MAP_NORESERVE,
5888 MAP_NORESERVE, MAP_NORESERVE },
5889 { TARGET_MAP_HUGETLB, TARGET_MAP_HUGETLB, MAP_HUGETLB, MAP_HUGETLB },
5890 /* MAP_STACK had been ignored by the kernel for quite some time.
5891 Recognize it for the target insofar as we do not want to pass
5892 it through to the host. */
5893 { TARGET_MAP_STACK, TARGET_MAP_STACK, 0, 0 },
5894 { 0, 0, 0, 0 }
5897 #if defined(TARGET_I386)
5899 /* NOTE: there is really one LDT for all the threads */
5900 static uint8_t *ldt_table;
5902 static abi_long read_ldt(abi_ulong ptr, unsigned long bytecount)
5904 int size;
5905 void *p;
5907 if (!ldt_table)
5908 return 0;
5909 size = TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE;
5910 if (size > bytecount)
5911 size = bytecount;
5912 p = lock_user(VERIFY_WRITE, ptr, size, 0);
5913 if (!p)
5914 return -TARGET_EFAULT;
5915 /* ??? Should this by byteswapped? */
5916 memcpy(p, ldt_table, size);
5917 unlock_user(p, ptr, size);
5918 return size;
5921 /* XXX: add locking support */
5922 static abi_long write_ldt(CPUX86State *env,
5923 abi_ulong ptr, unsigned long bytecount, int oldmode)
5925 struct target_modify_ldt_ldt_s ldt_info;
5926 struct target_modify_ldt_ldt_s *target_ldt_info;
5927 int seg_32bit, contents, read_exec_only, limit_in_pages;
5928 int seg_not_present, useable, lm;
5929 uint32_t *lp, entry_1, entry_2;
5931 if (bytecount != sizeof(ldt_info))
5932 return -TARGET_EINVAL;
5933 if (!lock_user_struct(VERIFY_READ, target_ldt_info, ptr, 1))
5934 return -TARGET_EFAULT;
5935 ldt_info.entry_number = tswap32(target_ldt_info->entry_number);
5936 ldt_info.base_addr = tswapal(target_ldt_info->base_addr);
5937 ldt_info.limit = tswap32(target_ldt_info->limit);
5938 ldt_info.flags = tswap32(target_ldt_info->flags);
5939 unlock_user_struct(target_ldt_info, ptr, 0);
5941 if (ldt_info.entry_number >= TARGET_LDT_ENTRIES)
5942 return -TARGET_EINVAL;
5943 seg_32bit = ldt_info.flags & 1;
5944 contents = (ldt_info.flags >> 1) & 3;
5945 read_exec_only = (ldt_info.flags >> 3) & 1;
5946 limit_in_pages = (ldt_info.flags >> 4) & 1;
5947 seg_not_present = (ldt_info.flags >> 5) & 1;
5948 useable = (ldt_info.flags >> 6) & 1;
5949 #ifdef TARGET_ABI32
5950 lm = 0;
5951 #else
5952 lm = (ldt_info.flags >> 7) & 1;
5953 #endif
5954 if (contents == 3) {
5955 if (oldmode)
5956 return -TARGET_EINVAL;
5957 if (seg_not_present == 0)
5958 return -TARGET_EINVAL;
5960 /* allocate the LDT */
5961 if (!ldt_table) {
5962 env->ldt.base = target_mmap(0,
5963 TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE,
5964 PROT_READ|PROT_WRITE,
5965 MAP_ANONYMOUS|MAP_PRIVATE, -1, 0);
5966 if (env->ldt.base == -1)
5967 return -TARGET_ENOMEM;
5968 memset(g2h(env->ldt.base), 0,
5969 TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE);
5970 env->ldt.limit = 0xffff;
5971 ldt_table = g2h(env->ldt.base);
5974 /* NOTE: same code as Linux kernel */
5975 /* Allow LDTs to be cleared by the user. */
5976 if (ldt_info.base_addr == 0 && ldt_info.limit == 0) {
5977 if (oldmode ||
5978 (contents == 0 &&
5979 read_exec_only == 1 &&
5980 seg_32bit == 0 &&
5981 limit_in_pages == 0 &&
5982 seg_not_present == 1 &&
5983 useable == 0 )) {
5984 entry_1 = 0;
5985 entry_2 = 0;
5986 goto install;
5990 entry_1 = ((ldt_info.base_addr & 0x0000ffff) << 16) |
5991 (ldt_info.limit & 0x0ffff);
5992 entry_2 = (ldt_info.base_addr & 0xff000000) |
5993 ((ldt_info.base_addr & 0x00ff0000) >> 16) |
5994 (ldt_info.limit & 0xf0000) |
5995 ((read_exec_only ^ 1) << 9) |
5996 (contents << 10) |
5997 ((seg_not_present ^ 1) << 15) |
5998 (seg_32bit << 22) |
5999 (limit_in_pages << 23) |
6000 (lm << 21) |
6001 0x7000;
6002 if (!oldmode)
6003 entry_2 |= (useable << 20);
6005 /* Install the new entry ... */
6006 install:
6007 lp = (uint32_t *)(ldt_table + (ldt_info.entry_number << 3));
6008 lp[0] = tswap32(entry_1);
6009 lp[1] = tswap32(entry_2);
6010 return 0;
6013 /* specific and weird i386 syscalls */
6014 static abi_long do_modify_ldt(CPUX86State *env, int func, abi_ulong ptr,
6015 unsigned long bytecount)
6017 abi_long ret;
6019 switch (func) {
6020 case 0:
6021 ret = read_ldt(ptr, bytecount);
6022 break;
6023 case 1:
6024 ret = write_ldt(env, ptr, bytecount, 1);
6025 break;
6026 case 0x11:
6027 ret = write_ldt(env, ptr, bytecount, 0);
6028 break;
6029 default:
6030 ret = -TARGET_ENOSYS;
6031 break;
6033 return ret;
6036 #if defined(TARGET_I386) && defined(TARGET_ABI32)
6037 abi_long do_set_thread_area(CPUX86State *env, abi_ulong ptr)
6039 uint64_t *gdt_table = g2h(env->gdt.base);
6040 struct target_modify_ldt_ldt_s ldt_info;
6041 struct target_modify_ldt_ldt_s *target_ldt_info;
6042 int seg_32bit, contents, read_exec_only, limit_in_pages;
6043 int seg_not_present, useable, lm;
6044 uint32_t *lp, entry_1, entry_2;
6045 int i;
6047 lock_user_struct(VERIFY_WRITE, target_ldt_info, ptr, 1);
6048 if (!target_ldt_info)
6049 return -TARGET_EFAULT;
6050 ldt_info.entry_number = tswap32(target_ldt_info->entry_number);
6051 ldt_info.base_addr = tswapal(target_ldt_info->base_addr);
6052 ldt_info.limit = tswap32(target_ldt_info->limit);
6053 ldt_info.flags = tswap32(target_ldt_info->flags);
6054 if (ldt_info.entry_number == -1) {
6055 for (i=TARGET_GDT_ENTRY_TLS_MIN; i<=TARGET_GDT_ENTRY_TLS_MAX; i++) {
6056 if (gdt_table[i] == 0) {
6057 ldt_info.entry_number = i;
6058 target_ldt_info->entry_number = tswap32(i);
6059 break;
6063 unlock_user_struct(target_ldt_info, ptr, 1);
6065 if (ldt_info.entry_number < TARGET_GDT_ENTRY_TLS_MIN ||
6066 ldt_info.entry_number > TARGET_GDT_ENTRY_TLS_MAX)
6067 return -TARGET_EINVAL;
6068 seg_32bit = ldt_info.flags & 1;
6069 contents = (ldt_info.flags >> 1) & 3;
6070 read_exec_only = (ldt_info.flags >> 3) & 1;
6071 limit_in_pages = (ldt_info.flags >> 4) & 1;
6072 seg_not_present = (ldt_info.flags >> 5) & 1;
6073 useable = (ldt_info.flags >> 6) & 1;
6074 #ifdef TARGET_ABI32
6075 lm = 0;
6076 #else
6077 lm = (ldt_info.flags >> 7) & 1;
6078 #endif
6080 if (contents == 3) {
6081 if (seg_not_present == 0)
6082 return -TARGET_EINVAL;
6085 /* NOTE: same code as Linux kernel */
6086 /* Allow LDTs to be cleared by the user. */
6087 if (ldt_info.base_addr == 0 && ldt_info.limit == 0) {
6088 if ((contents == 0 &&
6089 read_exec_only == 1 &&
6090 seg_32bit == 0 &&
6091 limit_in_pages == 0 &&
6092 seg_not_present == 1 &&
6093 useable == 0 )) {
6094 entry_1 = 0;
6095 entry_2 = 0;
6096 goto install;
6100 entry_1 = ((ldt_info.base_addr & 0x0000ffff) << 16) |
6101 (ldt_info.limit & 0x0ffff);
6102 entry_2 = (ldt_info.base_addr & 0xff000000) |
6103 ((ldt_info.base_addr & 0x00ff0000) >> 16) |
6104 (ldt_info.limit & 0xf0000) |
6105 ((read_exec_only ^ 1) << 9) |
6106 (contents << 10) |
6107 ((seg_not_present ^ 1) << 15) |
6108 (seg_32bit << 22) |
6109 (limit_in_pages << 23) |
6110 (useable << 20) |
6111 (lm << 21) |
6112 0x7000;
6114 /* Install the new entry ... */
6115 install:
6116 lp = (uint32_t *)(gdt_table + ldt_info.entry_number);
6117 lp[0] = tswap32(entry_1);
6118 lp[1] = tswap32(entry_2);
6119 return 0;
6122 static abi_long do_get_thread_area(CPUX86State *env, abi_ulong ptr)
6124 struct target_modify_ldt_ldt_s *target_ldt_info;
6125 uint64_t *gdt_table = g2h(env->gdt.base);
6126 uint32_t base_addr, limit, flags;
6127 int seg_32bit, contents, read_exec_only, limit_in_pages, idx;
6128 int seg_not_present, useable, lm;
6129 uint32_t *lp, entry_1, entry_2;
6131 lock_user_struct(VERIFY_WRITE, target_ldt_info, ptr, 1);
6132 if (!target_ldt_info)
6133 return -TARGET_EFAULT;
6134 idx = tswap32(target_ldt_info->entry_number);
6135 if (idx < TARGET_GDT_ENTRY_TLS_MIN ||
6136 idx > TARGET_GDT_ENTRY_TLS_MAX) {
6137 unlock_user_struct(target_ldt_info, ptr, 1);
6138 return -TARGET_EINVAL;
6140 lp = (uint32_t *)(gdt_table + idx);
6141 entry_1 = tswap32(lp[0]);
6142 entry_2 = tswap32(lp[1]);
6144 read_exec_only = ((entry_2 >> 9) & 1) ^ 1;
6145 contents = (entry_2 >> 10) & 3;
6146 seg_not_present = ((entry_2 >> 15) & 1) ^ 1;
6147 seg_32bit = (entry_2 >> 22) & 1;
6148 limit_in_pages = (entry_2 >> 23) & 1;
6149 useable = (entry_2 >> 20) & 1;
6150 #ifdef TARGET_ABI32
6151 lm = 0;
6152 #else
6153 lm = (entry_2 >> 21) & 1;
6154 #endif
6155 flags = (seg_32bit << 0) | (contents << 1) |
6156 (read_exec_only << 3) | (limit_in_pages << 4) |
6157 (seg_not_present << 5) | (useable << 6) | (lm << 7);
6158 limit = (entry_1 & 0xffff) | (entry_2 & 0xf0000);
6159 base_addr = (entry_1 >> 16) |
6160 (entry_2 & 0xff000000) |
6161 ((entry_2 & 0xff) << 16);
6162 target_ldt_info->base_addr = tswapal(base_addr);
6163 target_ldt_info->limit = tswap32(limit);
6164 target_ldt_info->flags = tswap32(flags);
6165 unlock_user_struct(target_ldt_info, ptr, 1);
6166 return 0;
6168 #endif /* TARGET_I386 && TARGET_ABI32 */
6170 #ifndef TARGET_ABI32
6171 abi_long do_arch_prctl(CPUX86State *env, int code, abi_ulong addr)
6173 abi_long ret = 0;
6174 abi_ulong val;
6175 int idx;
6177 switch(code) {
6178 case TARGET_ARCH_SET_GS:
6179 case TARGET_ARCH_SET_FS:
6180 if (code == TARGET_ARCH_SET_GS)
6181 idx = R_GS;
6182 else
6183 idx = R_FS;
6184 cpu_x86_load_seg(env, idx, 0);
6185 env->segs[idx].base = addr;
6186 break;
6187 case TARGET_ARCH_GET_GS:
6188 case TARGET_ARCH_GET_FS:
6189 if (code == TARGET_ARCH_GET_GS)
6190 idx = R_GS;
6191 else
6192 idx = R_FS;
6193 val = env->segs[idx].base;
6194 if (put_user(val, addr, abi_ulong))
6195 ret = -TARGET_EFAULT;
6196 break;
6197 default:
6198 ret = -TARGET_EINVAL;
6199 break;
6201 return ret;
6203 #endif
6205 #endif /* defined(TARGET_I386) */
6207 #define NEW_STACK_SIZE 0x40000
6210 static pthread_mutex_t clone_lock = PTHREAD_MUTEX_INITIALIZER;
6211 typedef struct {
6212 CPUArchState *env;
6213 pthread_mutex_t mutex;
6214 pthread_cond_t cond;
6215 pthread_t thread;
6216 uint32_t tid;
6217 abi_ulong child_tidptr;
6218 abi_ulong parent_tidptr;
6219 sigset_t sigmask;
6220 } new_thread_info;
6222 static void *clone_func(void *arg)
6224 new_thread_info *info = arg;
6225 CPUArchState *env;
6226 CPUState *cpu;
6227 TaskState *ts;
6229 rcu_register_thread();
6230 tcg_register_thread();
6231 env = info->env;
6232 cpu = ENV_GET_CPU(env);
6233 thread_cpu = cpu;
6234 ts = (TaskState *)cpu->opaque;
6235 info->tid = gettid();
6236 task_settid(ts);
6237 if (info->child_tidptr)
6238 put_user_u32(info->tid, info->child_tidptr);
6239 if (info->parent_tidptr)
6240 put_user_u32(info->tid, info->parent_tidptr);
6241 /* Enable signals. */
6242 sigprocmask(SIG_SETMASK, &info->sigmask, NULL);
6243 /* Signal to the parent that we're ready. */
6244 pthread_mutex_lock(&info->mutex);
6245 pthread_cond_broadcast(&info->cond);
6246 pthread_mutex_unlock(&info->mutex);
6247 /* Wait until the parent has finshed initializing the tls state. */
6248 pthread_mutex_lock(&clone_lock);
6249 pthread_mutex_unlock(&clone_lock);
6250 cpu_loop(env);
6251 /* never exits */
6252 return NULL;
6255 /* do_fork() Must return host values and target errnos (unlike most
6256 do_*() functions). */
6257 static int do_fork(CPUArchState *env, unsigned int flags, abi_ulong newsp,
6258 abi_ulong parent_tidptr, target_ulong newtls,
6259 abi_ulong child_tidptr)
6261 CPUState *cpu = ENV_GET_CPU(env);
6262 int ret;
6263 TaskState *ts;
6264 CPUState *new_cpu;
6265 CPUArchState *new_env;
6266 sigset_t sigmask;
6268 flags &= ~CLONE_IGNORED_FLAGS;
6270 /* Emulate vfork() with fork() */
6271 if (flags & CLONE_VFORK)
6272 flags &= ~(CLONE_VFORK | CLONE_VM);
6274 if (flags & CLONE_VM) {
6275 TaskState *parent_ts = (TaskState *)cpu->opaque;
6276 new_thread_info info;
6277 pthread_attr_t attr;
6279 if (((flags & CLONE_THREAD_FLAGS) != CLONE_THREAD_FLAGS) ||
6280 (flags & CLONE_INVALID_THREAD_FLAGS)) {
6281 return -TARGET_EINVAL;
6284 ts = g_new0(TaskState, 1);
6285 init_task_state(ts);
6286 /* we create a new CPU instance. */
6287 new_env = cpu_copy(env);
6288 /* Init regs that differ from the parent. */
6289 cpu_clone_regs(new_env, newsp);
6290 new_cpu = ENV_GET_CPU(new_env);
6291 new_cpu->opaque = ts;
6292 ts->bprm = parent_ts->bprm;
6293 ts->info = parent_ts->info;
6294 ts->signal_mask = parent_ts->signal_mask;
6296 if (flags & CLONE_CHILD_CLEARTID) {
6297 ts->child_tidptr = child_tidptr;
6300 if (flags & CLONE_SETTLS) {
6301 cpu_set_tls (new_env, newtls);
6304 /* Grab a mutex so that thread setup appears atomic. */
6305 pthread_mutex_lock(&clone_lock);
6307 memset(&info, 0, sizeof(info));
6308 pthread_mutex_init(&info.mutex, NULL);
6309 pthread_mutex_lock(&info.mutex);
6310 pthread_cond_init(&info.cond, NULL);
6311 info.env = new_env;
6312 if (flags & CLONE_CHILD_SETTID) {
6313 info.child_tidptr = child_tidptr;
6315 if (flags & CLONE_PARENT_SETTID) {
6316 info.parent_tidptr = parent_tidptr;
6319 ret = pthread_attr_init(&attr);
6320 ret = pthread_attr_setstacksize(&attr, NEW_STACK_SIZE);
6321 ret = pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED);
6322 /* It is not safe to deliver signals until the child has finished
6323 initializing, so temporarily block all signals. */
6324 sigfillset(&sigmask);
6325 sigprocmask(SIG_BLOCK, &sigmask, &info.sigmask);
6327 /* If this is our first additional thread, we need to ensure we
6328 * generate code for parallel execution and flush old translations.
6330 if (!parallel_cpus) {
6331 parallel_cpus = true;
6332 tb_flush(cpu);
6335 ret = pthread_create(&info.thread, &attr, clone_func, &info);
6336 /* TODO: Free new CPU state if thread creation failed. */
6338 sigprocmask(SIG_SETMASK, &info.sigmask, NULL);
6339 pthread_attr_destroy(&attr);
6340 if (ret == 0) {
6341 /* Wait for the child to initialize. */
6342 pthread_cond_wait(&info.cond, &info.mutex);
6343 ret = info.tid;
6344 } else {
6345 ret = -1;
6347 pthread_mutex_unlock(&info.mutex);
6348 pthread_cond_destroy(&info.cond);
6349 pthread_mutex_destroy(&info.mutex);
6350 pthread_mutex_unlock(&clone_lock);
6351 } else {
6352 /* if no CLONE_VM, we consider it is a fork */
6353 if (flags & CLONE_INVALID_FORK_FLAGS) {
6354 return -TARGET_EINVAL;
6357 /* We can't support custom termination signals */
6358 if ((flags & CSIGNAL) != TARGET_SIGCHLD) {
6359 return -TARGET_EINVAL;
6362 if (block_signals()) {
6363 return -TARGET_ERESTARTSYS;
6366 fork_start();
6367 ret = fork();
6368 if (ret == 0) {
6369 /* Child Process. */
6370 cpu_clone_regs(env, newsp);
6371 fork_end(1);
6372 /* There is a race condition here. The parent process could
6373 theoretically read the TID in the child process before the child
6374 tid is set. This would require using either ptrace
6375 (not implemented) or having *_tidptr to point at a shared memory
6376 mapping. We can't repeat the spinlock hack used above because
6377 the child process gets its own copy of the lock. */
6378 if (flags & CLONE_CHILD_SETTID)
6379 put_user_u32(gettid(), child_tidptr);
6380 if (flags & CLONE_PARENT_SETTID)
6381 put_user_u32(gettid(), parent_tidptr);
6382 ts = (TaskState *)cpu->opaque;
6383 if (flags & CLONE_SETTLS)
6384 cpu_set_tls (env, newtls);
6385 if (flags & CLONE_CHILD_CLEARTID)
6386 ts->child_tidptr = child_tidptr;
6387 } else {
6388 fork_end(0);
6391 return ret;
6394 /* warning : doesn't handle linux specific flags... */
6395 static int target_to_host_fcntl_cmd(int cmd)
6397 switch(cmd) {
6398 case TARGET_F_DUPFD:
6399 case TARGET_F_GETFD:
6400 case TARGET_F_SETFD:
6401 case TARGET_F_GETFL:
6402 case TARGET_F_SETFL:
6403 return cmd;
6404 case TARGET_F_GETLK:
6405 return F_GETLK64;
6406 case TARGET_F_SETLK:
6407 return F_SETLK64;
6408 case TARGET_F_SETLKW:
6409 return F_SETLKW64;
6410 case TARGET_F_GETOWN:
6411 return F_GETOWN;
6412 case TARGET_F_SETOWN:
6413 return F_SETOWN;
6414 case TARGET_F_GETSIG:
6415 return F_GETSIG;
6416 case TARGET_F_SETSIG:
6417 return F_SETSIG;
6418 #if TARGET_ABI_BITS == 32
6419 case TARGET_F_GETLK64:
6420 return F_GETLK64;
6421 case TARGET_F_SETLK64:
6422 return F_SETLK64;
6423 case TARGET_F_SETLKW64:
6424 return F_SETLKW64;
6425 #endif
6426 case TARGET_F_SETLEASE:
6427 return F_SETLEASE;
6428 case TARGET_F_GETLEASE:
6429 return F_GETLEASE;
6430 #ifdef F_DUPFD_CLOEXEC
6431 case TARGET_F_DUPFD_CLOEXEC:
6432 return F_DUPFD_CLOEXEC;
6433 #endif
6434 case TARGET_F_NOTIFY:
6435 return F_NOTIFY;
6436 #ifdef F_GETOWN_EX
6437 case TARGET_F_GETOWN_EX:
6438 return F_GETOWN_EX;
6439 #endif
6440 #ifdef F_SETOWN_EX
6441 case TARGET_F_SETOWN_EX:
6442 return F_SETOWN_EX;
6443 #endif
6444 #ifdef F_SETPIPE_SZ
6445 case TARGET_F_SETPIPE_SZ:
6446 return F_SETPIPE_SZ;
6447 case TARGET_F_GETPIPE_SZ:
6448 return F_GETPIPE_SZ;
6449 #endif
6450 default:
6451 return -TARGET_EINVAL;
6453 return -TARGET_EINVAL;
6456 #define TRANSTBL_CONVERT(a) { -1, TARGET_##a, -1, a }
6457 static const bitmask_transtbl flock_tbl[] = {
6458 TRANSTBL_CONVERT(F_RDLCK),
6459 TRANSTBL_CONVERT(F_WRLCK),
6460 TRANSTBL_CONVERT(F_UNLCK),
6461 TRANSTBL_CONVERT(F_EXLCK),
6462 TRANSTBL_CONVERT(F_SHLCK),
6463 { 0, 0, 0, 0 }
6466 static inline abi_long copy_from_user_flock(struct flock64 *fl,
6467 abi_ulong target_flock_addr)
6469 struct target_flock *target_fl;
6470 short l_type;
6472 if (!lock_user_struct(VERIFY_READ, target_fl, target_flock_addr, 1)) {
6473 return -TARGET_EFAULT;
6476 __get_user(l_type, &target_fl->l_type);
6477 fl->l_type = target_to_host_bitmask(l_type, flock_tbl);
6478 __get_user(fl->l_whence, &target_fl->l_whence);
6479 __get_user(fl->l_start, &target_fl->l_start);
6480 __get_user(fl->l_len, &target_fl->l_len);
6481 __get_user(fl->l_pid, &target_fl->l_pid);
6482 unlock_user_struct(target_fl, target_flock_addr, 0);
6483 return 0;
6486 static inline abi_long copy_to_user_flock(abi_ulong target_flock_addr,
6487 const struct flock64 *fl)
6489 struct target_flock *target_fl;
6490 short l_type;
6492 if (!lock_user_struct(VERIFY_WRITE, target_fl, target_flock_addr, 0)) {
6493 return -TARGET_EFAULT;
6496 l_type = host_to_target_bitmask(fl->l_type, flock_tbl);
6497 __put_user(l_type, &target_fl->l_type);
6498 __put_user(fl->l_whence, &target_fl->l_whence);
6499 __put_user(fl->l_start, &target_fl->l_start);
6500 __put_user(fl->l_len, &target_fl->l_len);
6501 __put_user(fl->l_pid, &target_fl->l_pid);
6502 unlock_user_struct(target_fl, target_flock_addr, 1);
6503 return 0;
6506 typedef abi_long from_flock64_fn(struct flock64 *fl, abi_ulong target_addr);
6507 typedef abi_long to_flock64_fn(abi_ulong target_addr, const struct flock64 *fl);
6509 #if defined(TARGET_ARM) && TARGET_ABI_BITS == 32
6510 static inline abi_long copy_from_user_eabi_flock64(struct flock64 *fl,
6511 abi_ulong target_flock_addr)
6513 struct target_eabi_flock64 *target_fl;
6514 short l_type;
6516 if (!lock_user_struct(VERIFY_READ, target_fl, target_flock_addr, 1)) {
6517 return -TARGET_EFAULT;
6520 __get_user(l_type, &target_fl->l_type);
6521 fl->l_type = target_to_host_bitmask(l_type, flock_tbl);
6522 __get_user(fl->l_whence, &target_fl->l_whence);
6523 __get_user(fl->l_start, &target_fl->l_start);
6524 __get_user(fl->l_len, &target_fl->l_len);
6525 __get_user(fl->l_pid, &target_fl->l_pid);
6526 unlock_user_struct(target_fl, target_flock_addr, 0);
6527 return 0;
6530 static inline abi_long copy_to_user_eabi_flock64(abi_ulong target_flock_addr,
6531 const struct flock64 *fl)
6533 struct target_eabi_flock64 *target_fl;
6534 short l_type;
6536 if (!lock_user_struct(VERIFY_WRITE, target_fl, target_flock_addr, 0)) {
6537 return -TARGET_EFAULT;
6540 l_type = host_to_target_bitmask(fl->l_type, flock_tbl);
6541 __put_user(l_type, &target_fl->l_type);
6542 __put_user(fl->l_whence, &target_fl->l_whence);
6543 __put_user(fl->l_start, &target_fl->l_start);
6544 __put_user(fl->l_len, &target_fl->l_len);
6545 __put_user(fl->l_pid, &target_fl->l_pid);
6546 unlock_user_struct(target_fl, target_flock_addr, 1);
6547 return 0;
6549 #endif
6551 static inline abi_long copy_from_user_flock64(struct flock64 *fl,
6552 abi_ulong target_flock_addr)
6554 struct target_flock64 *target_fl;
6555 short l_type;
6557 if (!lock_user_struct(VERIFY_READ, target_fl, target_flock_addr, 1)) {
6558 return -TARGET_EFAULT;
6561 __get_user(l_type, &target_fl->l_type);
6562 fl->l_type = target_to_host_bitmask(l_type, flock_tbl);
6563 __get_user(fl->l_whence, &target_fl->l_whence);
6564 __get_user(fl->l_start, &target_fl->l_start);
6565 __get_user(fl->l_len, &target_fl->l_len);
6566 __get_user(fl->l_pid, &target_fl->l_pid);
6567 unlock_user_struct(target_fl, target_flock_addr, 0);
6568 return 0;
6571 static inline abi_long copy_to_user_flock64(abi_ulong target_flock_addr,
6572 const struct flock64 *fl)
6574 struct target_flock64 *target_fl;
6575 short l_type;
6577 if (!lock_user_struct(VERIFY_WRITE, target_fl, target_flock_addr, 0)) {
6578 return -TARGET_EFAULT;
6581 l_type = host_to_target_bitmask(fl->l_type, flock_tbl);
6582 __put_user(l_type, &target_fl->l_type);
6583 __put_user(fl->l_whence, &target_fl->l_whence);
6584 __put_user(fl->l_start, &target_fl->l_start);
6585 __put_user(fl->l_len, &target_fl->l_len);
6586 __put_user(fl->l_pid, &target_fl->l_pid);
6587 unlock_user_struct(target_fl, target_flock_addr, 1);
6588 return 0;
6591 static abi_long do_fcntl(int fd, int cmd, abi_ulong arg)
6593 struct flock64 fl64;
6594 #ifdef F_GETOWN_EX
6595 struct f_owner_ex fox;
6596 struct target_f_owner_ex *target_fox;
6597 #endif
6598 abi_long ret;
6599 int host_cmd = target_to_host_fcntl_cmd(cmd);
6601 if (host_cmd == -TARGET_EINVAL)
6602 return host_cmd;
6604 switch(cmd) {
6605 case TARGET_F_GETLK:
6606 ret = copy_from_user_flock(&fl64, arg);
6607 if (ret) {
6608 return ret;
6610 ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
6611 if (ret == 0) {
6612 ret = copy_to_user_flock(arg, &fl64);
6614 break;
6616 case TARGET_F_SETLK:
6617 case TARGET_F_SETLKW:
6618 ret = copy_from_user_flock(&fl64, arg);
6619 if (ret) {
6620 return ret;
6622 ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
6623 break;
6625 case TARGET_F_GETLK64:
6626 ret = copy_from_user_flock64(&fl64, arg);
6627 if (ret) {
6628 return ret;
6630 ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
6631 if (ret == 0) {
6632 ret = copy_to_user_flock64(arg, &fl64);
6634 break;
6635 case TARGET_F_SETLK64:
6636 case TARGET_F_SETLKW64:
6637 ret = copy_from_user_flock64(&fl64, arg);
6638 if (ret) {
6639 return ret;
6641 ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
6642 break;
6644 case TARGET_F_GETFL:
6645 ret = get_errno(safe_fcntl(fd, host_cmd, arg));
6646 if (ret >= 0) {
6647 ret = host_to_target_bitmask(ret, fcntl_flags_tbl);
6649 break;
6651 case TARGET_F_SETFL:
6652 ret = get_errno(safe_fcntl(fd, host_cmd,
6653 target_to_host_bitmask(arg,
6654 fcntl_flags_tbl)));
6655 break;
6657 #ifdef F_GETOWN_EX
6658 case TARGET_F_GETOWN_EX:
6659 ret = get_errno(safe_fcntl(fd, host_cmd, &fox));
6660 if (ret >= 0) {
6661 if (!lock_user_struct(VERIFY_WRITE, target_fox, arg, 0))
6662 return -TARGET_EFAULT;
6663 target_fox->type = tswap32(fox.type);
6664 target_fox->pid = tswap32(fox.pid);
6665 unlock_user_struct(target_fox, arg, 1);
6667 break;
6668 #endif
6670 #ifdef F_SETOWN_EX
6671 case TARGET_F_SETOWN_EX:
6672 if (!lock_user_struct(VERIFY_READ, target_fox, arg, 1))
6673 return -TARGET_EFAULT;
6674 fox.type = tswap32(target_fox->type);
6675 fox.pid = tswap32(target_fox->pid);
6676 unlock_user_struct(target_fox, arg, 0);
6677 ret = get_errno(safe_fcntl(fd, host_cmd, &fox));
6678 break;
6679 #endif
6681 case TARGET_F_SETOWN:
6682 case TARGET_F_GETOWN:
6683 case TARGET_F_SETSIG:
6684 case TARGET_F_GETSIG:
6685 case TARGET_F_SETLEASE:
6686 case TARGET_F_GETLEASE:
6687 case TARGET_F_SETPIPE_SZ:
6688 case TARGET_F_GETPIPE_SZ:
6689 ret = get_errno(safe_fcntl(fd, host_cmd, arg));
6690 break;
6692 default:
6693 ret = get_errno(safe_fcntl(fd, cmd, arg));
6694 break;
6696 return ret;
6699 #ifdef USE_UID16
6701 static inline int high2lowuid(int uid)
6703 if (uid > 65535)
6704 return 65534;
6705 else
6706 return uid;
6709 static inline int high2lowgid(int gid)
6711 if (gid > 65535)
6712 return 65534;
6713 else
6714 return gid;
6717 static inline int low2highuid(int uid)
6719 if ((int16_t)uid == -1)
6720 return -1;
6721 else
6722 return uid;
6725 static inline int low2highgid(int gid)
6727 if ((int16_t)gid == -1)
6728 return -1;
6729 else
6730 return gid;
6732 static inline int tswapid(int id)
6734 return tswap16(id);
6737 #define put_user_id(x, gaddr) put_user_u16(x, gaddr)
6739 #else /* !USE_UID16 */
6740 static inline int high2lowuid(int uid)
6742 return uid;
6744 static inline int high2lowgid(int gid)
6746 return gid;
6748 static inline int low2highuid(int uid)
6750 return uid;
6752 static inline int low2highgid(int gid)
6754 return gid;
6756 static inline int tswapid(int id)
6758 return tswap32(id);
6761 #define put_user_id(x, gaddr) put_user_u32(x, gaddr)
6763 #endif /* USE_UID16 */
6765 /* We must do direct syscalls for setting UID/GID, because we want to
6766 * implement the Linux system call semantics of "change only for this thread",
6767 * not the libc/POSIX semantics of "change for all threads in process".
6768 * (See http://ewontfix.com/17/ for more details.)
6769 * We use the 32-bit version of the syscalls if present; if it is not
6770 * then either the host architecture supports 32-bit UIDs natively with
6771 * the standard syscall, or the 16-bit UID is the best we can do.
6773 #ifdef __NR_setuid32
6774 #define __NR_sys_setuid __NR_setuid32
6775 #else
6776 #define __NR_sys_setuid __NR_setuid
6777 #endif
6778 #ifdef __NR_setgid32
6779 #define __NR_sys_setgid __NR_setgid32
6780 #else
6781 #define __NR_sys_setgid __NR_setgid
6782 #endif
6783 #ifdef __NR_setresuid32
6784 #define __NR_sys_setresuid __NR_setresuid32
6785 #else
6786 #define __NR_sys_setresuid __NR_setresuid
6787 #endif
6788 #ifdef __NR_setresgid32
6789 #define __NR_sys_setresgid __NR_setresgid32
6790 #else
6791 #define __NR_sys_setresgid __NR_setresgid
6792 #endif
6794 _syscall1(int, sys_setuid, uid_t, uid)
6795 _syscall1(int, sys_setgid, gid_t, gid)
6796 _syscall3(int, sys_setresuid, uid_t, ruid, uid_t, euid, uid_t, suid)
6797 _syscall3(int, sys_setresgid, gid_t, rgid, gid_t, egid, gid_t, sgid)
6799 void syscall_init(void)
6801 IOCTLEntry *ie;
6802 const argtype *arg_type;
6803 int size;
6804 int i;
6806 thunk_init(STRUCT_MAX);
6808 #define STRUCT(name, ...) thunk_register_struct(STRUCT_ ## name, #name, struct_ ## name ## _def);
6809 #define STRUCT_SPECIAL(name) thunk_register_struct_direct(STRUCT_ ## name, #name, &struct_ ## name ## _def);
6810 #include "syscall_types.h"
6811 #undef STRUCT
6812 #undef STRUCT_SPECIAL
6814 /* Build target_to_host_errno_table[] table from
6815 * host_to_target_errno_table[]. */
6816 for (i = 0; i < ERRNO_TABLE_SIZE; i++) {
6817 target_to_host_errno_table[host_to_target_errno_table[i]] = i;
6820 /* we patch the ioctl size if necessary. We rely on the fact that
6821 no ioctl has all the bits at '1' in the size field */
6822 ie = ioctl_entries;
6823 while (ie->target_cmd != 0) {
6824 if (((ie->target_cmd >> TARGET_IOC_SIZESHIFT) & TARGET_IOC_SIZEMASK) ==
6825 TARGET_IOC_SIZEMASK) {
6826 arg_type = ie->arg_type;
6827 if (arg_type[0] != TYPE_PTR) {
6828 fprintf(stderr, "cannot patch size for ioctl 0x%x\n",
6829 ie->target_cmd);
6830 exit(1);
6832 arg_type++;
6833 size = thunk_type_size(arg_type, 0);
6834 ie->target_cmd = (ie->target_cmd &
6835 ~(TARGET_IOC_SIZEMASK << TARGET_IOC_SIZESHIFT)) |
6836 (size << TARGET_IOC_SIZESHIFT);
6839 /* automatic consistency check if same arch */
6840 #if (defined(__i386__) && defined(TARGET_I386) && defined(TARGET_ABI32)) || \
6841 (defined(__x86_64__) && defined(TARGET_X86_64))
6842 if (unlikely(ie->target_cmd != ie->host_cmd)) {
6843 fprintf(stderr, "ERROR: ioctl(%s): target=0x%x host=0x%x\n",
6844 ie->name, ie->target_cmd, ie->host_cmd);
6846 #endif
6847 ie++;
6851 #if TARGET_ABI_BITS == 32
6852 static inline uint64_t target_offset64(uint32_t word0, uint32_t word1)
6854 #ifdef TARGET_WORDS_BIGENDIAN
6855 return ((uint64_t)word0 << 32) | word1;
6856 #else
6857 return ((uint64_t)word1 << 32) | word0;
6858 #endif
6860 #else /* TARGET_ABI_BITS == 32 */
6861 static inline uint64_t target_offset64(uint64_t word0, uint64_t word1)
6863 return word0;
6865 #endif /* TARGET_ABI_BITS != 32 */
6867 #ifdef TARGET_NR_truncate64
6868 static inline abi_long target_truncate64(void *cpu_env, const char *arg1,
6869 abi_long arg2,
6870 abi_long arg3,
6871 abi_long arg4)
6873 if (regpairs_aligned(cpu_env)) {
6874 arg2 = arg3;
6875 arg3 = arg4;
6877 return get_errno(truncate64(arg1, target_offset64(arg2, arg3)));
6879 #endif
6881 #ifdef TARGET_NR_ftruncate64
6882 static inline abi_long target_ftruncate64(void *cpu_env, abi_long arg1,
6883 abi_long arg2,
6884 abi_long arg3,
6885 abi_long arg4)
6887 if (regpairs_aligned(cpu_env)) {
6888 arg2 = arg3;
6889 arg3 = arg4;
6891 return get_errno(ftruncate64(arg1, target_offset64(arg2, arg3)));
6893 #endif
6895 static inline abi_long target_to_host_timespec(struct timespec *host_ts,
6896 abi_ulong target_addr)
6898 struct target_timespec *target_ts;
6900 if (!lock_user_struct(VERIFY_READ, target_ts, target_addr, 1))
6901 return -TARGET_EFAULT;
6902 __get_user(host_ts->tv_sec, &target_ts->tv_sec);
6903 __get_user(host_ts->tv_nsec, &target_ts->tv_nsec);
6904 unlock_user_struct(target_ts, target_addr, 0);
6905 return 0;
6908 static inline abi_long host_to_target_timespec(abi_ulong target_addr,
6909 struct timespec *host_ts)
6911 struct target_timespec *target_ts;
6913 if (!lock_user_struct(VERIFY_WRITE, target_ts, target_addr, 0))
6914 return -TARGET_EFAULT;
6915 __put_user(host_ts->tv_sec, &target_ts->tv_sec);
6916 __put_user(host_ts->tv_nsec, &target_ts->tv_nsec);
6917 unlock_user_struct(target_ts, target_addr, 1);
6918 return 0;
6921 static inline abi_long target_to_host_itimerspec(struct itimerspec *host_itspec,
6922 abi_ulong target_addr)
6924 struct target_itimerspec *target_itspec;
6926 if (!lock_user_struct(VERIFY_READ, target_itspec, target_addr, 1)) {
6927 return -TARGET_EFAULT;
6930 host_itspec->it_interval.tv_sec =
6931 tswapal(target_itspec->it_interval.tv_sec);
6932 host_itspec->it_interval.tv_nsec =
6933 tswapal(target_itspec->it_interval.tv_nsec);
6934 host_itspec->it_value.tv_sec = tswapal(target_itspec->it_value.tv_sec);
6935 host_itspec->it_value.tv_nsec = tswapal(target_itspec->it_value.tv_nsec);
6937 unlock_user_struct(target_itspec, target_addr, 1);
6938 return 0;
6941 static inline abi_long host_to_target_itimerspec(abi_ulong target_addr,
6942 struct itimerspec *host_its)
6944 struct target_itimerspec *target_itspec;
6946 if (!lock_user_struct(VERIFY_WRITE, target_itspec, target_addr, 0)) {
6947 return -TARGET_EFAULT;
6950 target_itspec->it_interval.tv_sec = tswapal(host_its->it_interval.tv_sec);
6951 target_itspec->it_interval.tv_nsec = tswapal(host_its->it_interval.tv_nsec);
6953 target_itspec->it_value.tv_sec = tswapal(host_its->it_value.tv_sec);
6954 target_itspec->it_value.tv_nsec = tswapal(host_its->it_value.tv_nsec);
6956 unlock_user_struct(target_itspec, target_addr, 0);
6957 return 0;
6960 static inline abi_long target_to_host_timex(struct timex *host_tx,
6961 abi_long target_addr)
6963 struct target_timex *target_tx;
6965 if (!lock_user_struct(VERIFY_READ, target_tx, target_addr, 1)) {
6966 return -TARGET_EFAULT;
6969 __get_user(host_tx->modes, &target_tx->modes);
6970 __get_user(host_tx->offset, &target_tx->offset);
6971 __get_user(host_tx->freq, &target_tx->freq);
6972 __get_user(host_tx->maxerror, &target_tx->maxerror);
6973 __get_user(host_tx->esterror, &target_tx->esterror);
6974 __get_user(host_tx->status, &target_tx->status);
6975 __get_user(host_tx->constant, &target_tx->constant);
6976 __get_user(host_tx->precision, &target_tx->precision);
6977 __get_user(host_tx->tolerance, &target_tx->tolerance);
6978 __get_user(host_tx->time.tv_sec, &target_tx->time.tv_sec);
6979 __get_user(host_tx->time.tv_usec, &target_tx->time.tv_usec);
6980 __get_user(host_tx->tick, &target_tx->tick);
6981 __get_user(host_tx->ppsfreq, &target_tx->ppsfreq);
6982 __get_user(host_tx->jitter, &target_tx->jitter);
6983 __get_user(host_tx->shift, &target_tx->shift);
6984 __get_user(host_tx->stabil, &target_tx->stabil);
6985 __get_user(host_tx->jitcnt, &target_tx->jitcnt);
6986 __get_user(host_tx->calcnt, &target_tx->calcnt);
6987 __get_user(host_tx->errcnt, &target_tx->errcnt);
6988 __get_user(host_tx->stbcnt, &target_tx->stbcnt);
6989 __get_user(host_tx->tai, &target_tx->tai);
6991 unlock_user_struct(target_tx, target_addr, 0);
6992 return 0;
6995 static inline abi_long host_to_target_timex(abi_long target_addr,
6996 struct timex *host_tx)
6998 struct target_timex *target_tx;
7000 if (!lock_user_struct(VERIFY_WRITE, target_tx, target_addr, 0)) {
7001 return -TARGET_EFAULT;
7004 __put_user(host_tx->modes, &target_tx->modes);
7005 __put_user(host_tx->offset, &target_tx->offset);
7006 __put_user(host_tx->freq, &target_tx->freq);
7007 __put_user(host_tx->maxerror, &target_tx->maxerror);
7008 __put_user(host_tx->esterror, &target_tx->esterror);
7009 __put_user(host_tx->status, &target_tx->status);
7010 __put_user(host_tx->constant, &target_tx->constant);
7011 __put_user(host_tx->precision, &target_tx->precision);
7012 __put_user(host_tx->tolerance, &target_tx->tolerance);
7013 __put_user(host_tx->time.tv_sec, &target_tx->time.tv_sec);
7014 __put_user(host_tx->time.tv_usec, &target_tx->time.tv_usec);
7015 __put_user(host_tx->tick, &target_tx->tick);
7016 __put_user(host_tx->ppsfreq, &target_tx->ppsfreq);
7017 __put_user(host_tx->jitter, &target_tx->jitter);
7018 __put_user(host_tx->shift, &target_tx->shift);
7019 __put_user(host_tx->stabil, &target_tx->stabil);
7020 __put_user(host_tx->jitcnt, &target_tx->jitcnt);
7021 __put_user(host_tx->calcnt, &target_tx->calcnt);
7022 __put_user(host_tx->errcnt, &target_tx->errcnt);
7023 __put_user(host_tx->stbcnt, &target_tx->stbcnt);
7024 __put_user(host_tx->tai, &target_tx->tai);
7026 unlock_user_struct(target_tx, target_addr, 1);
7027 return 0;
7031 static inline abi_long target_to_host_sigevent(struct sigevent *host_sevp,
7032 abi_ulong target_addr)
7034 struct target_sigevent *target_sevp;
7036 if (!lock_user_struct(VERIFY_READ, target_sevp, target_addr, 1)) {
7037 return -TARGET_EFAULT;
7040 /* This union is awkward on 64 bit systems because it has a 32 bit
7041 * integer and a pointer in it; we follow the conversion approach
7042 * used for handling sigval types in signal.c so the guest should get
7043 * the correct value back even if we did a 64 bit byteswap and it's
7044 * using the 32 bit integer.
7046 host_sevp->sigev_value.sival_ptr =
7047 (void *)(uintptr_t)tswapal(target_sevp->sigev_value.sival_ptr);
7048 host_sevp->sigev_signo =
7049 target_to_host_signal(tswap32(target_sevp->sigev_signo));
7050 host_sevp->sigev_notify = tswap32(target_sevp->sigev_notify);
7051 host_sevp->_sigev_un._tid = tswap32(target_sevp->_sigev_un._tid);
7053 unlock_user_struct(target_sevp, target_addr, 1);
7054 return 0;
7057 #if defined(TARGET_NR_mlockall)
7058 static inline int target_to_host_mlockall_arg(int arg)
7060 int result = 0;
7062 if (arg & TARGET_MLOCKALL_MCL_CURRENT) {
7063 result |= MCL_CURRENT;
7065 if (arg & TARGET_MLOCKALL_MCL_FUTURE) {
7066 result |= MCL_FUTURE;
7068 return result;
7070 #endif
7072 static inline abi_long host_to_target_stat64(void *cpu_env,
7073 abi_ulong target_addr,
7074 struct stat *host_st)
7076 #if defined(TARGET_ARM) && defined(TARGET_ABI32)
7077 if (((CPUARMState *)cpu_env)->eabi) {
7078 struct target_eabi_stat64 *target_st;
7080 if (!lock_user_struct(VERIFY_WRITE, target_st, target_addr, 0))
7081 return -TARGET_EFAULT;
7082 memset(target_st, 0, sizeof(struct target_eabi_stat64));
7083 __put_user(host_st->st_dev, &target_st->st_dev);
7084 __put_user(host_st->st_ino, &target_st->st_ino);
7085 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
7086 __put_user(host_st->st_ino, &target_st->__st_ino);
7087 #endif
7088 __put_user(host_st->st_mode, &target_st->st_mode);
7089 __put_user(host_st->st_nlink, &target_st->st_nlink);
7090 __put_user(host_st->st_uid, &target_st->st_uid);
7091 __put_user(host_st->st_gid, &target_st->st_gid);
7092 __put_user(host_st->st_rdev, &target_st->st_rdev);
7093 __put_user(host_st->st_size, &target_st->st_size);
7094 __put_user(host_st->st_blksize, &target_st->st_blksize);
7095 __put_user(host_st->st_blocks, &target_st->st_blocks);
7096 __put_user(host_st->st_atime, &target_st->target_st_atime);
7097 __put_user(host_st->st_mtime, &target_st->target_st_mtime);
7098 __put_user(host_st->st_ctime, &target_st->target_st_ctime);
7099 unlock_user_struct(target_st, target_addr, 1);
7100 } else
7101 #endif
7103 #if defined(TARGET_HAS_STRUCT_STAT64)
7104 struct target_stat64 *target_st;
7105 #else
7106 struct target_stat *target_st;
7107 #endif
7109 if (!lock_user_struct(VERIFY_WRITE, target_st, target_addr, 0))
7110 return -TARGET_EFAULT;
7111 memset(target_st, 0, sizeof(*target_st));
7112 __put_user(host_st->st_dev, &target_st->st_dev);
7113 __put_user(host_st->st_ino, &target_st->st_ino);
7114 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
7115 __put_user(host_st->st_ino, &target_st->__st_ino);
7116 #endif
7117 __put_user(host_st->st_mode, &target_st->st_mode);
7118 __put_user(host_st->st_nlink, &target_st->st_nlink);
7119 __put_user(host_st->st_uid, &target_st->st_uid);
7120 __put_user(host_st->st_gid, &target_st->st_gid);
7121 __put_user(host_st->st_rdev, &target_st->st_rdev);
7122 /* XXX: better use of kernel struct */
7123 __put_user(host_st->st_size, &target_st->st_size);
7124 __put_user(host_st->st_blksize, &target_st->st_blksize);
7125 __put_user(host_st->st_blocks, &target_st->st_blocks);
7126 __put_user(host_st->st_atime, &target_st->target_st_atime);
7127 __put_user(host_st->st_mtime, &target_st->target_st_mtime);
7128 __put_user(host_st->st_ctime, &target_st->target_st_ctime);
7129 unlock_user_struct(target_st, target_addr, 1);
7132 return 0;
7135 /* ??? Using host futex calls even when target atomic operations
7136 are not really atomic probably breaks things. However implementing
7137 futexes locally would make futexes shared between multiple processes
7138 tricky. However they're probably useless because guest atomic
7139 operations won't work either. */
7140 static int do_futex(target_ulong uaddr, int op, int val, target_ulong timeout,
7141 target_ulong uaddr2, int val3)
7143 struct timespec ts, *pts;
7144 int base_op;
7146 /* ??? We assume FUTEX_* constants are the same on both host
7147 and target. */
7148 #ifdef FUTEX_CMD_MASK
7149 base_op = op & FUTEX_CMD_MASK;
7150 #else
7151 base_op = op;
7152 #endif
7153 switch (base_op) {
7154 case FUTEX_WAIT:
7155 case FUTEX_WAIT_BITSET:
7156 if (timeout) {
7157 pts = &ts;
7158 target_to_host_timespec(pts, timeout);
7159 } else {
7160 pts = NULL;
7162 return get_errno(safe_futex(g2h(uaddr), op, tswap32(val),
7163 pts, NULL, val3));
7164 case FUTEX_WAKE:
7165 return get_errno(safe_futex(g2h(uaddr), op, val, NULL, NULL, 0));
7166 case FUTEX_FD:
7167 return get_errno(safe_futex(g2h(uaddr), op, val, NULL, NULL, 0));
7168 case FUTEX_REQUEUE:
7169 case FUTEX_CMP_REQUEUE:
7170 case FUTEX_WAKE_OP:
7171 /* For FUTEX_REQUEUE, FUTEX_CMP_REQUEUE, and FUTEX_WAKE_OP, the
7172 TIMEOUT parameter is interpreted as a uint32_t by the kernel.
7173 But the prototype takes a `struct timespec *'; insert casts
7174 to satisfy the compiler. We do not need to tswap TIMEOUT
7175 since it's not compared to guest memory. */
7176 pts = (struct timespec *)(uintptr_t) timeout;
7177 return get_errno(safe_futex(g2h(uaddr), op, val, pts,
7178 g2h(uaddr2),
7179 (base_op == FUTEX_CMP_REQUEUE
7180 ? tswap32(val3)
7181 : val3)));
7182 default:
7183 return -TARGET_ENOSYS;
7186 #if defined(TARGET_NR_name_to_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
7187 static abi_long do_name_to_handle_at(abi_long dirfd, abi_long pathname,
7188 abi_long handle, abi_long mount_id,
7189 abi_long flags)
7191 struct file_handle *target_fh;
7192 struct file_handle *fh;
7193 int mid = 0;
7194 abi_long ret;
7195 char *name;
7196 unsigned int size, total_size;
7198 if (get_user_s32(size, handle)) {
7199 return -TARGET_EFAULT;
7202 name = lock_user_string(pathname);
7203 if (!name) {
7204 return -TARGET_EFAULT;
7207 total_size = sizeof(struct file_handle) + size;
7208 target_fh = lock_user(VERIFY_WRITE, handle, total_size, 0);
7209 if (!target_fh) {
7210 unlock_user(name, pathname, 0);
7211 return -TARGET_EFAULT;
7214 fh = g_malloc0(total_size);
7215 fh->handle_bytes = size;
7217 ret = get_errno(name_to_handle_at(dirfd, path(name), fh, &mid, flags));
7218 unlock_user(name, pathname, 0);
7220 /* man name_to_handle_at(2):
7221 * Other than the use of the handle_bytes field, the caller should treat
7222 * the file_handle structure as an opaque data type
7225 memcpy(target_fh, fh, total_size);
7226 target_fh->handle_bytes = tswap32(fh->handle_bytes);
7227 target_fh->handle_type = tswap32(fh->handle_type);
7228 g_free(fh);
7229 unlock_user(target_fh, handle, total_size);
7231 if (put_user_s32(mid, mount_id)) {
7232 return -TARGET_EFAULT;
7235 return ret;
7238 #endif
7240 #if defined(TARGET_NR_open_by_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
7241 static abi_long do_open_by_handle_at(abi_long mount_fd, abi_long handle,
7242 abi_long flags)
7244 struct file_handle *target_fh;
7245 struct file_handle *fh;
7246 unsigned int size, total_size;
7247 abi_long ret;
7249 if (get_user_s32(size, handle)) {
7250 return -TARGET_EFAULT;
7253 total_size = sizeof(struct file_handle) + size;
7254 target_fh = lock_user(VERIFY_READ, handle, total_size, 1);
7255 if (!target_fh) {
7256 return -TARGET_EFAULT;
7259 fh = g_memdup(target_fh, total_size);
7260 fh->handle_bytes = size;
7261 fh->handle_type = tswap32(target_fh->handle_type);
7263 ret = get_errno(open_by_handle_at(mount_fd, fh,
7264 target_to_host_bitmask(flags, fcntl_flags_tbl)));
7266 g_free(fh);
7268 unlock_user(target_fh, handle, total_size);
7270 return ret;
7272 #endif
7274 #if defined(TARGET_NR_signalfd) || defined(TARGET_NR_signalfd4)
7276 /* signalfd siginfo conversion */
7278 static void
7279 host_to_target_signalfd_siginfo(struct signalfd_siginfo *tinfo,
7280 const struct signalfd_siginfo *info)
7282 int sig = host_to_target_signal(info->ssi_signo);
7284 /* linux/signalfd.h defines a ssi_addr_lsb
7285 * not defined in sys/signalfd.h but used by some kernels
7288 #ifdef BUS_MCEERR_AO
7289 if (tinfo->ssi_signo == SIGBUS &&
7290 (tinfo->ssi_code == BUS_MCEERR_AR ||
7291 tinfo->ssi_code == BUS_MCEERR_AO)) {
7292 uint16_t *ssi_addr_lsb = (uint16_t *)(&info->ssi_addr + 1);
7293 uint16_t *tssi_addr_lsb = (uint16_t *)(&tinfo->ssi_addr + 1);
7294 *tssi_addr_lsb = tswap16(*ssi_addr_lsb);
7296 #endif
7298 tinfo->ssi_signo = tswap32(sig);
7299 tinfo->ssi_errno = tswap32(tinfo->ssi_errno);
7300 tinfo->ssi_code = tswap32(info->ssi_code);
7301 tinfo->ssi_pid = tswap32(info->ssi_pid);
7302 tinfo->ssi_uid = tswap32(info->ssi_uid);
7303 tinfo->ssi_fd = tswap32(info->ssi_fd);
7304 tinfo->ssi_tid = tswap32(info->ssi_tid);
7305 tinfo->ssi_band = tswap32(info->ssi_band);
7306 tinfo->ssi_overrun = tswap32(info->ssi_overrun);
7307 tinfo->ssi_trapno = tswap32(info->ssi_trapno);
7308 tinfo->ssi_status = tswap32(info->ssi_status);
7309 tinfo->ssi_int = tswap32(info->ssi_int);
7310 tinfo->ssi_ptr = tswap64(info->ssi_ptr);
7311 tinfo->ssi_utime = tswap64(info->ssi_utime);
7312 tinfo->ssi_stime = tswap64(info->ssi_stime);
7313 tinfo->ssi_addr = tswap64(info->ssi_addr);
7316 static abi_long host_to_target_data_signalfd(void *buf, size_t len)
7318 int i;
7320 for (i = 0; i < len; i += sizeof(struct signalfd_siginfo)) {
7321 host_to_target_signalfd_siginfo(buf + i, buf + i);
7324 return len;
7327 static TargetFdTrans target_signalfd_trans = {
7328 .host_to_target_data = host_to_target_data_signalfd,
7331 static abi_long do_signalfd4(int fd, abi_long mask, int flags)
7333 int host_flags;
7334 target_sigset_t *target_mask;
7335 sigset_t host_mask;
7336 abi_long ret;
7338 if (flags & ~(TARGET_O_NONBLOCK | TARGET_O_CLOEXEC)) {
7339 return -TARGET_EINVAL;
7341 if (!lock_user_struct(VERIFY_READ, target_mask, mask, 1)) {
7342 return -TARGET_EFAULT;
7345 target_to_host_sigset(&host_mask, target_mask);
7347 host_flags = target_to_host_bitmask(flags, fcntl_flags_tbl);
7349 ret = get_errno(signalfd(fd, &host_mask, host_flags));
7350 if (ret >= 0) {
7351 fd_trans_register(ret, &target_signalfd_trans);
7354 unlock_user_struct(target_mask, mask, 0);
7356 return ret;
7358 #endif
7360 /* Map host to target signal numbers for the wait family of syscalls.
7361 Assume all other status bits are the same. */
7362 int host_to_target_waitstatus(int status)
7364 if (WIFSIGNALED(status)) {
7365 return host_to_target_signal(WTERMSIG(status)) | (status & ~0x7f);
7367 if (WIFSTOPPED(status)) {
7368 return (host_to_target_signal(WSTOPSIG(status)) << 8)
7369 | (status & 0xff);
7371 return status;
7374 static int open_self_cmdline(void *cpu_env, int fd)
7376 CPUState *cpu = ENV_GET_CPU((CPUArchState *)cpu_env);
7377 struct linux_binprm *bprm = ((TaskState *)cpu->opaque)->bprm;
7378 int i;
7380 for (i = 0; i < bprm->argc; i++) {
7381 size_t len = strlen(bprm->argv[i]) + 1;
7383 if (write(fd, bprm->argv[i], len) != len) {
7384 return -1;
7388 return 0;
7391 static int open_self_maps(void *cpu_env, int fd)
7393 CPUState *cpu = ENV_GET_CPU((CPUArchState *)cpu_env);
7394 TaskState *ts = cpu->opaque;
7395 FILE *fp;
7396 char *line = NULL;
7397 size_t len = 0;
7398 ssize_t read;
7400 fp = fopen("/proc/self/maps", "r");
7401 if (fp == NULL) {
7402 return -1;
7405 while ((read = getline(&line, &len, fp)) != -1) {
7406 int fields, dev_maj, dev_min, inode;
7407 uint64_t min, max, offset;
7408 char flag_r, flag_w, flag_x, flag_p;
7409 char path[512] = "";
7410 fields = sscanf(line, "%"PRIx64"-%"PRIx64" %c%c%c%c %"PRIx64" %x:%x %d"
7411 " %512s", &min, &max, &flag_r, &flag_w, &flag_x,
7412 &flag_p, &offset, &dev_maj, &dev_min, &inode, path);
7414 if ((fields < 10) || (fields > 11)) {
7415 continue;
7417 if (h2g_valid(min)) {
7418 int flags = page_get_flags(h2g(min));
7419 max = h2g_valid(max - 1) ? max : (uintptr_t)g2h(GUEST_ADDR_MAX);
7420 if (page_check_range(h2g(min), max - min, flags) == -1) {
7421 continue;
7423 if (h2g(min) == ts->info->stack_limit) {
7424 pstrcpy(path, sizeof(path), " [stack]");
7426 dprintf(fd, TARGET_ABI_FMT_lx "-" TARGET_ABI_FMT_lx
7427 " %c%c%c%c %08" PRIx64 " %02x:%02x %d %s%s\n",
7428 h2g(min), h2g(max - 1) + 1, flag_r, flag_w,
7429 flag_x, flag_p, offset, dev_maj, dev_min, inode,
7430 path[0] ? " " : "", path);
7434 free(line);
7435 fclose(fp);
7437 return 0;
7440 static int open_self_stat(void *cpu_env, int fd)
7442 CPUState *cpu = ENV_GET_CPU((CPUArchState *)cpu_env);
7443 TaskState *ts = cpu->opaque;
7444 abi_ulong start_stack = ts->info->start_stack;
7445 int i;
7447 for (i = 0; i < 44; i++) {
7448 char buf[128];
7449 int len;
7450 uint64_t val = 0;
7452 if (i == 0) {
7453 /* pid */
7454 val = getpid();
7455 snprintf(buf, sizeof(buf), "%"PRId64 " ", val);
7456 } else if (i == 1) {
7457 /* app name */
7458 snprintf(buf, sizeof(buf), "(%s) ", ts->bprm->argv[0]);
7459 } else if (i == 27) {
7460 /* stack bottom */
7461 val = start_stack;
7462 snprintf(buf, sizeof(buf), "%"PRId64 " ", val);
7463 } else {
7464 /* for the rest, there is MasterCard */
7465 snprintf(buf, sizeof(buf), "0%c", i == 43 ? '\n' : ' ');
7468 len = strlen(buf);
7469 if (write(fd, buf, len) != len) {
7470 return -1;
7474 return 0;
7477 static int open_self_auxv(void *cpu_env, int fd)
7479 CPUState *cpu = ENV_GET_CPU((CPUArchState *)cpu_env);
7480 TaskState *ts = cpu->opaque;
7481 abi_ulong auxv = ts->info->saved_auxv;
7482 abi_ulong len = ts->info->auxv_len;
7483 char *ptr;
7486 * Auxiliary vector is stored in target process stack.
7487 * read in whole auxv vector and copy it to file
7489 ptr = lock_user(VERIFY_READ, auxv, len, 0);
7490 if (ptr != NULL) {
7491 while (len > 0) {
7492 ssize_t r;
7493 r = write(fd, ptr, len);
7494 if (r <= 0) {
7495 break;
7497 len -= r;
7498 ptr += r;
7500 lseek(fd, 0, SEEK_SET);
7501 unlock_user(ptr, auxv, len);
7504 return 0;
7507 static int is_proc_myself(const char *filename, const char *entry)
7509 if (!strncmp(filename, "/proc/", strlen("/proc/"))) {
7510 filename += strlen("/proc/");
7511 if (!strncmp(filename, "self/", strlen("self/"))) {
7512 filename += strlen("self/");
7513 } else if (*filename >= '1' && *filename <= '9') {
7514 char myself[80];
7515 snprintf(myself, sizeof(myself), "%d/", getpid());
7516 if (!strncmp(filename, myself, strlen(myself))) {
7517 filename += strlen(myself);
7518 } else {
7519 return 0;
7521 } else {
7522 return 0;
7524 if (!strcmp(filename, entry)) {
7525 return 1;
7528 return 0;
7531 #if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN)
7532 static int is_proc(const char *filename, const char *entry)
7534 return strcmp(filename, entry) == 0;
7537 static int open_net_route(void *cpu_env, int fd)
7539 FILE *fp;
7540 char *line = NULL;
7541 size_t len = 0;
7542 ssize_t read;
7544 fp = fopen("/proc/net/route", "r");
7545 if (fp == NULL) {
7546 return -1;
7549 /* read header */
7551 read = getline(&line, &len, fp);
7552 dprintf(fd, "%s", line);
7554 /* read routes */
7556 while ((read = getline(&line, &len, fp)) != -1) {
7557 char iface[16];
7558 uint32_t dest, gw, mask;
7559 unsigned int flags, refcnt, use, metric, mtu, window, irtt;
7560 sscanf(line, "%s\t%08x\t%08x\t%04x\t%d\t%d\t%d\t%08x\t%d\t%u\t%u\n",
7561 iface, &dest, &gw, &flags, &refcnt, &use, &metric,
7562 &mask, &mtu, &window, &irtt);
7563 dprintf(fd, "%s\t%08x\t%08x\t%04x\t%d\t%d\t%d\t%08x\t%d\t%u\t%u\n",
7564 iface, tswap32(dest), tswap32(gw), flags, refcnt, use,
7565 metric, tswap32(mask), mtu, window, irtt);
7568 free(line);
7569 fclose(fp);
7571 return 0;
7573 #endif
7575 static int do_openat(void *cpu_env, int dirfd, const char *pathname, int flags, mode_t mode)
7577 struct fake_open {
7578 const char *filename;
7579 int (*fill)(void *cpu_env, int fd);
7580 int (*cmp)(const char *s1, const char *s2);
7582 const struct fake_open *fake_open;
7583 static const struct fake_open fakes[] = {
7584 { "maps", open_self_maps, is_proc_myself },
7585 { "stat", open_self_stat, is_proc_myself },
7586 { "auxv", open_self_auxv, is_proc_myself },
7587 { "cmdline", open_self_cmdline, is_proc_myself },
7588 #if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN)
7589 { "/proc/net/route", open_net_route, is_proc },
7590 #endif
7591 { NULL, NULL, NULL }
7594 if (is_proc_myself(pathname, "exe")) {
7595 int execfd = qemu_getauxval(AT_EXECFD);
7596 return execfd ? execfd : safe_openat(dirfd, exec_path, flags, mode);
7599 for (fake_open = fakes; fake_open->filename; fake_open++) {
7600 if (fake_open->cmp(pathname, fake_open->filename)) {
7601 break;
7605 if (fake_open->filename) {
7606 const char *tmpdir;
7607 char filename[PATH_MAX];
7608 int fd, r;
7610 /* create temporary file to map stat to */
7611 tmpdir = getenv("TMPDIR");
7612 if (!tmpdir)
7613 tmpdir = "/tmp";
7614 snprintf(filename, sizeof(filename), "%s/qemu-open.XXXXXX", tmpdir);
7615 fd = mkstemp(filename);
7616 if (fd < 0) {
7617 return fd;
7619 unlink(filename);
7621 if ((r = fake_open->fill(cpu_env, fd))) {
7622 int e = errno;
7623 close(fd);
7624 errno = e;
7625 return r;
7627 lseek(fd, 0, SEEK_SET);
7629 return fd;
7632 return safe_openat(dirfd, path(pathname), flags, mode);
7635 #define TIMER_MAGIC 0x0caf0000
7636 #define TIMER_MAGIC_MASK 0xffff0000
7638 /* Convert QEMU provided timer ID back to internal 16bit index format */
7639 static target_timer_t get_timer_id(abi_long arg)
7641 target_timer_t timerid = arg;
7643 if ((timerid & TIMER_MAGIC_MASK) != TIMER_MAGIC) {
7644 return -TARGET_EINVAL;
7647 timerid &= 0xffff;
7649 if (timerid >= ARRAY_SIZE(g_posix_timers)) {
7650 return -TARGET_EINVAL;
7653 return timerid;
7656 static abi_long swap_data_eventfd(void *buf, size_t len)
7658 uint64_t *counter = buf;
7659 int i;
7661 if (len < sizeof(uint64_t)) {
7662 return -EINVAL;
7665 for (i = 0; i < len; i += sizeof(uint64_t)) {
7666 *counter = tswap64(*counter);
7667 counter++;
7670 return len;
7673 static TargetFdTrans target_eventfd_trans = {
7674 .host_to_target_data = swap_data_eventfd,
7675 .target_to_host_data = swap_data_eventfd,
7678 #if (defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init)) || \
7679 (defined(CONFIG_INOTIFY1) && defined(TARGET_NR_inotify_init1) && \
7680 defined(__NR_inotify_init1))
7681 static abi_long host_to_target_data_inotify(void *buf, size_t len)
7683 struct inotify_event *ev;
7684 int i;
7685 uint32_t name_len;
7687 for (i = 0; i < len; i += sizeof(struct inotify_event) + name_len) {
7688 ev = (struct inotify_event *)((char *)buf + i);
7689 name_len = ev->len;
7691 ev->wd = tswap32(ev->wd);
7692 ev->mask = tswap32(ev->mask);
7693 ev->cookie = tswap32(ev->cookie);
7694 ev->len = tswap32(name_len);
7697 return len;
7700 static TargetFdTrans target_inotify_trans = {
7701 .host_to_target_data = host_to_target_data_inotify,
7703 #endif
7705 /* do_syscall() should always have a single exit point at the end so
7706 that actions, such as logging of syscall results, can be performed.
7707 All errnos that do_syscall() returns must be -TARGET_<errcode>. */
7708 abi_long do_syscall(void *cpu_env, int num, abi_long arg1,
7709 abi_long arg2, abi_long arg3, abi_long arg4,
7710 abi_long arg5, abi_long arg6, abi_long arg7,
7711 abi_long arg8)
7713 CPUState *cpu = ENV_GET_CPU(cpu_env);
7714 abi_long ret;
7715 struct stat st;
7716 struct statfs stfs;
7717 void *p;
7719 #if defined(DEBUG_ERESTARTSYS)
7720 /* Debug-only code for exercising the syscall-restart code paths
7721 * in the per-architecture cpu main loops: restart every syscall
7722 * the guest makes once before letting it through.
7725 static int flag;
7727 flag = !flag;
7728 if (flag) {
7729 return -TARGET_ERESTARTSYS;
7732 #endif
7734 #ifdef DEBUG
7735 gemu_log("syscall %d", num);
7736 #endif
7737 trace_guest_user_syscall(cpu, num, arg1, arg2, arg3, arg4, arg5, arg6, arg7, arg8);
7738 if(do_strace)
7739 print_syscall(num, arg1, arg2, arg3, arg4, arg5, arg6);
7741 switch(num) {
7742 case TARGET_NR_exit:
7743 /* In old applications this may be used to implement _exit(2).
7744 However in threaded applictions it is used for thread termination,
7745 and _exit_group is used for application termination.
7746 Do thread termination if we have more then one thread. */
7748 if (block_signals()) {
7749 ret = -TARGET_ERESTARTSYS;
7750 break;
7753 cpu_list_lock();
7755 if (CPU_NEXT(first_cpu)) {
7756 TaskState *ts;
7758 /* Remove the CPU from the list. */
7759 QTAILQ_REMOVE(&cpus, cpu, node);
7761 cpu_list_unlock();
7763 ts = cpu->opaque;
7764 if (ts->child_tidptr) {
7765 put_user_u32(0, ts->child_tidptr);
7766 sys_futex(g2h(ts->child_tidptr), FUTEX_WAKE, INT_MAX,
7767 NULL, NULL, 0);
7769 thread_cpu = NULL;
7770 object_unref(OBJECT(cpu));
7771 g_free(ts);
7772 rcu_unregister_thread();
7773 pthread_exit(NULL);
7776 cpu_list_unlock();
7777 #ifdef TARGET_GPROF
7778 _mcleanup();
7779 #endif
7780 gdb_exit(cpu_env, arg1);
7781 _exit(arg1);
7782 ret = 0; /* avoid warning */
7783 break;
7784 case TARGET_NR_read:
7785 if (arg3 == 0)
7786 ret = 0;
7787 else {
7788 if (!(p = lock_user(VERIFY_WRITE, arg2, arg3, 0)))
7789 goto efault;
7790 ret = get_errno(safe_read(arg1, p, arg3));
7791 if (ret >= 0 &&
7792 fd_trans_host_to_target_data(arg1)) {
7793 ret = fd_trans_host_to_target_data(arg1)(p, ret);
7795 unlock_user(p, arg2, ret);
7797 break;
7798 case TARGET_NR_write:
7799 if (!(p = lock_user(VERIFY_READ, arg2, arg3, 1)))
7800 goto efault;
7801 if (fd_trans_target_to_host_data(arg1)) {
7802 void *copy = g_malloc(arg3);
7803 memcpy(copy, p, arg3);
7804 ret = fd_trans_target_to_host_data(arg1)(copy, arg3);
7805 if (ret >= 0) {
7806 ret = get_errno(safe_write(arg1, copy, ret));
7808 g_free(copy);
7809 } else {
7810 ret = get_errno(safe_write(arg1, p, arg3));
7812 unlock_user(p, arg2, 0);
7813 break;
7814 #ifdef TARGET_NR_open
7815 case TARGET_NR_open:
7816 if (!(p = lock_user_string(arg1)))
7817 goto efault;
7818 ret = get_errno(do_openat(cpu_env, AT_FDCWD, p,
7819 target_to_host_bitmask(arg2, fcntl_flags_tbl),
7820 arg3));
7821 fd_trans_unregister(ret);
7822 unlock_user(p, arg1, 0);
7823 break;
7824 #endif
7825 case TARGET_NR_openat:
7826 if (!(p = lock_user_string(arg2)))
7827 goto efault;
7828 ret = get_errno(do_openat(cpu_env, arg1, p,
7829 target_to_host_bitmask(arg3, fcntl_flags_tbl),
7830 arg4));
7831 fd_trans_unregister(ret);
7832 unlock_user(p, arg2, 0);
7833 break;
7834 #if defined(TARGET_NR_name_to_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
7835 case TARGET_NR_name_to_handle_at:
7836 ret = do_name_to_handle_at(arg1, arg2, arg3, arg4, arg5);
7837 break;
7838 #endif
7839 #if defined(TARGET_NR_open_by_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
7840 case TARGET_NR_open_by_handle_at:
7841 ret = do_open_by_handle_at(arg1, arg2, arg3);
7842 fd_trans_unregister(ret);
7843 break;
7844 #endif
7845 case TARGET_NR_close:
7846 fd_trans_unregister(arg1);
7847 ret = get_errno(close(arg1));
7848 break;
7849 case TARGET_NR_brk:
7850 ret = do_brk(arg1);
7851 break;
7852 #ifdef TARGET_NR_fork
7853 case TARGET_NR_fork:
7854 ret = get_errno(do_fork(cpu_env, TARGET_SIGCHLD, 0, 0, 0, 0));
7855 break;
7856 #endif
7857 #ifdef TARGET_NR_waitpid
7858 case TARGET_NR_waitpid:
7860 int status;
7861 ret = get_errno(safe_wait4(arg1, &status, arg3, 0));
7862 if (!is_error(ret) && arg2 && ret
7863 && put_user_s32(host_to_target_waitstatus(status), arg2))
7864 goto efault;
7866 break;
7867 #endif
7868 #ifdef TARGET_NR_waitid
7869 case TARGET_NR_waitid:
7871 siginfo_t info;
7872 info.si_pid = 0;
7873 ret = get_errno(safe_waitid(arg1, arg2, &info, arg4, NULL));
7874 if (!is_error(ret) && arg3 && info.si_pid != 0) {
7875 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_siginfo_t), 0)))
7876 goto efault;
7877 host_to_target_siginfo(p, &info);
7878 unlock_user(p, arg3, sizeof(target_siginfo_t));
7881 break;
7882 #endif
7883 #ifdef TARGET_NR_creat /* not on alpha */
7884 case TARGET_NR_creat:
7885 if (!(p = lock_user_string(arg1)))
7886 goto efault;
7887 ret = get_errno(creat(p, arg2));
7888 fd_trans_unregister(ret);
7889 unlock_user(p, arg1, 0);
7890 break;
7891 #endif
7892 #ifdef TARGET_NR_link
7893 case TARGET_NR_link:
7895 void * p2;
7896 p = lock_user_string(arg1);
7897 p2 = lock_user_string(arg2);
7898 if (!p || !p2)
7899 ret = -TARGET_EFAULT;
7900 else
7901 ret = get_errno(link(p, p2));
7902 unlock_user(p2, arg2, 0);
7903 unlock_user(p, arg1, 0);
7905 break;
7906 #endif
7907 #if defined(TARGET_NR_linkat)
7908 case TARGET_NR_linkat:
7910 void * p2 = NULL;
7911 if (!arg2 || !arg4)
7912 goto efault;
7913 p = lock_user_string(arg2);
7914 p2 = lock_user_string(arg4);
7915 if (!p || !p2)
7916 ret = -TARGET_EFAULT;
7917 else
7918 ret = get_errno(linkat(arg1, p, arg3, p2, arg5));
7919 unlock_user(p, arg2, 0);
7920 unlock_user(p2, arg4, 0);
7922 break;
7923 #endif
7924 #ifdef TARGET_NR_unlink
7925 case TARGET_NR_unlink:
7926 if (!(p = lock_user_string(arg1)))
7927 goto efault;
7928 ret = get_errno(unlink(p));
7929 unlock_user(p, arg1, 0);
7930 break;
7931 #endif
7932 #if defined(TARGET_NR_unlinkat)
7933 case TARGET_NR_unlinkat:
7934 if (!(p = lock_user_string(arg2)))
7935 goto efault;
7936 ret = get_errno(unlinkat(arg1, p, arg3));
7937 unlock_user(p, arg2, 0);
7938 break;
7939 #endif
7940 case TARGET_NR_execve:
7942 char **argp, **envp;
7943 int argc, envc;
7944 abi_ulong gp;
7945 abi_ulong guest_argp;
7946 abi_ulong guest_envp;
7947 abi_ulong addr;
7948 char **q;
7949 int total_size = 0;
7951 argc = 0;
7952 guest_argp = arg2;
7953 for (gp = guest_argp; gp; gp += sizeof(abi_ulong)) {
7954 if (get_user_ual(addr, gp))
7955 goto efault;
7956 if (!addr)
7957 break;
7958 argc++;
7960 envc = 0;
7961 guest_envp = arg3;
7962 for (gp = guest_envp; gp; gp += sizeof(abi_ulong)) {
7963 if (get_user_ual(addr, gp))
7964 goto efault;
7965 if (!addr)
7966 break;
7967 envc++;
7970 argp = g_new0(char *, argc + 1);
7971 envp = g_new0(char *, envc + 1);
7973 for (gp = guest_argp, q = argp; gp;
7974 gp += sizeof(abi_ulong), q++) {
7975 if (get_user_ual(addr, gp))
7976 goto execve_efault;
7977 if (!addr)
7978 break;
7979 if (!(*q = lock_user_string(addr)))
7980 goto execve_efault;
7981 total_size += strlen(*q) + 1;
7983 *q = NULL;
7985 for (gp = guest_envp, q = envp; gp;
7986 gp += sizeof(abi_ulong), q++) {
7987 if (get_user_ual(addr, gp))
7988 goto execve_efault;
7989 if (!addr)
7990 break;
7991 if (!(*q = lock_user_string(addr)))
7992 goto execve_efault;
7993 total_size += strlen(*q) + 1;
7995 *q = NULL;
7997 if (!(p = lock_user_string(arg1)))
7998 goto execve_efault;
7999 /* Although execve() is not an interruptible syscall it is
8000 * a special case where we must use the safe_syscall wrapper:
8001 * if we allow a signal to happen before we make the host
8002 * syscall then we will 'lose' it, because at the point of
8003 * execve the process leaves QEMU's control. So we use the
8004 * safe syscall wrapper to ensure that we either take the
8005 * signal as a guest signal, or else it does not happen
8006 * before the execve completes and makes it the other
8007 * program's problem.
8009 ret = get_errno(safe_execve(p, argp, envp));
8010 unlock_user(p, arg1, 0);
8012 goto execve_end;
8014 execve_efault:
8015 ret = -TARGET_EFAULT;
8017 execve_end:
8018 for (gp = guest_argp, q = argp; *q;
8019 gp += sizeof(abi_ulong), q++) {
8020 if (get_user_ual(addr, gp)
8021 || !addr)
8022 break;
8023 unlock_user(*q, addr, 0);
8025 for (gp = guest_envp, q = envp; *q;
8026 gp += sizeof(abi_ulong), q++) {
8027 if (get_user_ual(addr, gp)
8028 || !addr)
8029 break;
8030 unlock_user(*q, addr, 0);
8033 g_free(argp);
8034 g_free(envp);
8036 break;
8037 case TARGET_NR_chdir:
8038 if (!(p = lock_user_string(arg1)))
8039 goto efault;
8040 ret = get_errno(chdir(p));
8041 unlock_user(p, arg1, 0);
8042 break;
8043 #ifdef TARGET_NR_time
8044 case TARGET_NR_time:
8046 time_t host_time;
8047 ret = get_errno(time(&host_time));
8048 if (!is_error(ret)
8049 && arg1
8050 && put_user_sal(host_time, arg1))
8051 goto efault;
8053 break;
8054 #endif
8055 #ifdef TARGET_NR_mknod
8056 case TARGET_NR_mknod:
8057 if (!(p = lock_user_string(arg1)))
8058 goto efault;
8059 ret = get_errno(mknod(p, arg2, arg3));
8060 unlock_user(p, arg1, 0);
8061 break;
8062 #endif
8063 #if defined(TARGET_NR_mknodat)
8064 case TARGET_NR_mknodat:
8065 if (!(p = lock_user_string(arg2)))
8066 goto efault;
8067 ret = get_errno(mknodat(arg1, p, arg3, arg4));
8068 unlock_user(p, arg2, 0);
8069 break;
8070 #endif
8071 #ifdef TARGET_NR_chmod
8072 case TARGET_NR_chmod:
8073 if (!(p = lock_user_string(arg1)))
8074 goto efault;
8075 ret = get_errno(chmod(p, arg2));
8076 unlock_user(p, arg1, 0);
8077 break;
8078 #endif
8079 #ifdef TARGET_NR_break
8080 case TARGET_NR_break:
8081 goto unimplemented;
8082 #endif
8083 #ifdef TARGET_NR_oldstat
8084 case TARGET_NR_oldstat:
8085 goto unimplemented;
8086 #endif
8087 case TARGET_NR_lseek:
8088 ret = get_errno(lseek(arg1, arg2, arg3));
8089 break;
8090 #if defined(TARGET_NR_getxpid) && defined(TARGET_ALPHA)
8091 /* Alpha specific */
8092 case TARGET_NR_getxpid:
8093 ((CPUAlphaState *)cpu_env)->ir[IR_A4] = getppid();
8094 ret = get_errno(getpid());
8095 break;
8096 #endif
8097 #ifdef TARGET_NR_getpid
8098 case TARGET_NR_getpid:
8099 ret = get_errno(getpid());
8100 break;
8101 #endif
8102 case TARGET_NR_mount:
8104 /* need to look at the data field */
8105 void *p2, *p3;
8107 if (arg1) {
8108 p = lock_user_string(arg1);
8109 if (!p) {
8110 goto efault;
8112 } else {
8113 p = NULL;
8116 p2 = lock_user_string(arg2);
8117 if (!p2) {
8118 if (arg1) {
8119 unlock_user(p, arg1, 0);
8121 goto efault;
8124 if (arg3) {
8125 p3 = lock_user_string(arg3);
8126 if (!p3) {
8127 if (arg1) {
8128 unlock_user(p, arg1, 0);
8130 unlock_user(p2, arg2, 0);
8131 goto efault;
8133 } else {
8134 p3 = NULL;
8137 /* FIXME - arg5 should be locked, but it isn't clear how to
8138 * do that since it's not guaranteed to be a NULL-terminated
8139 * string.
8141 if (!arg5) {
8142 ret = mount(p, p2, p3, (unsigned long)arg4, NULL);
8143 } else {
8144 ret = mount(p, p2, p3, (unsigned long)arg4, g2h(arg5));
8146 ret = get_errno(ret);
8148 if (arg1) {
8149 unlock_user(p, arg1, 0);
8151 unlock_user(p2, arg2, 0);
8152 if (arg3) {
8153 unlock_user(p3, arg3, 0);
8156 break;
8157 #ifdef TARGET_NR_umount
8158 case TARGET_NR_umount:
8159 if (!(p = lock_user_string(arg1)))
8160 goto efault;
8161 ret = get_errno(umount(p));
8162 unlock_user(p, arg1, 0);
8163 break;
8164 #endif
8165 #ifdef TARGET_NR_stime /* not on alpha */
8166 case TARGET_NR_stime:
8168 time_t host_time;
8169 if (get_user_sal(host_time, arg1))
8170 goto efault;
8171 ret = get_errno(stime(&host_time));
8173 break;
8174 #endif
8175 case TARGET_NR_ptrace:
8176 goto unimplemented;
8177 #ifdef TARGET_NR_alarm /* not on alpha */
8178 case TARGET_NR_alarm:
8179 ret = alarm(arg1);
8180 break;
8181 #endif
8182 #ifdef TARGET_NR_oldfstat
8183 case TARGET_NR_oldfstat:
8184 goto unimplemented;
8185 #endif
8186 #ifdef TARGET_NR_pause /* not on alpha */
8187 case TARGET_NR_pause:
8188 if (!block_signals()) {
8189 sigsuspend(&((TaskState *)cpu->opaque)->signal_mask);
8191 ret = -TARGET_EINTR;
8192 break;
8193 #endif
8194 #ifdef TARGET_NR_utime
8195 case TARGET_NR_utime:
8197 struct utimbuf tbuf, *host_tbuf;
8198 struct target_utimbuf *target_tbuf;
8199 if (arg2) {
8200 if (!lock_user_struct(VERIFY_READ, target_tbuf, arg2, 1))
8201 goto efault;
8202 tbuf.actime = tswapal(target_tbuf->actime);
8203 tbuf.modtime = tswapal(target_tbuf->modtime);
8204 unlock_user_struct(target_tbuf, arg2, 0);
8205 host_tbuf = &tbuf;
8206 } else {
8207 host_tbuf = NULL;
8209 if (!(p = lock_user_string(arg1)))
8210 goto efault;
8211 ret = get_errno(utime(p, host_tbuf));
8212 unlock_user(p, arg1, 0);
8214 break;
8215 #endif
8216 #ifdef TARGET_NR_utimes
8217 case TARGET_NR_utimes:
8219 struct timeval *tvp, tv[2];
8220 if (arg2) {
8221 if (copy_from_user_timeval(&tv[0], arg2)
8222 || copy_from_user_timeval(&tv[1],
8223 arg2 + sizeof(struct target_timeval)))
8224 goto efault;
8225 tvp = tv;
8226 } else {
8227 tvp = NULL;
8229 if (!(p = lock_user_string(arg1)))
8230 goto efault;
8231 ret = get_errno(utimes(p, tvp));
8232 unlock_user(p, arg1, 0);
8234 break;
8235 #endif
8236 #if defined(TARGET_NR_futimesat)
8237 case TARGET_NR_futimesat:
8239 struct timeval *tvp, tv[2];
8240 if (arg3) {
8241 if (copy_from_user_timeval(&tv[0], arg3)
8242 || copy_from_user_timeval(&tv[1],
8243 arg3 + sizeof(struct target_timeval)))
8244 goto efault;
8245 tvp = tv;
8246 } else {
8247 tvp = NULL;
8249 if (!(p = lock_user_string(arg2)))
8250 goto efault;
8251 ret = get_errno(futimesat(arg1, path(p), tvp));
8252 unlock_user(p, arg2, 0);
8254 break;
8255 #endif
8256 #ifdef TARGET_NR_stty
8257 case TARGET_NR_stty:
8258 goto unimplemented;
8259 #endif
8260 #ifdef TARGET_NR_gtty
8261 case TARGET_NR_gtty:
8262 goto unimplemented;
8263 #endif
8264 #ifdef TARGET_NR_access
8265 case TARGET_NR_access:
8266 if (!(p = lock_user_string(arg1)))
8267 goto efault;
8268 ret = get_errno(access(path(p), arg2));
8269 unlock_user(p, arg1, 0);
8270 break;
8271 #endif
8272 #if defined(TARGET_NR_faccessat) && defined(__NR_faccessat)
8273 case TARGET_NR_faccessat:
8274 if (!(p = lock_user_string(arg2)))
8275 goto efault;
8276 ret = get_errno(faccessat(arg1, p, arg3, 0));
8277 unlock_user(p, arg2, 0);
8278 break;
8279 #endif
8280 #ifdef TARGET_NR_nice /* not on alpha */
8281 case TARGET_NR_nice:
8282 ret = get_errno(nice(arg1));
8283 break;
8284 #endif
8285 #ifdef TARGET_NR_ftime
8286 case TARGET_NR_ftime:
8287 goto unimplemented;
8288 #endif
8289 case TARGET_NR_sync:
8290 sync();
8291 ret = 0;
8292 break;
8293 #if defined(TARGET_NR_syncfs) && defined(CONFIG_SYNCFS)
8294 case TARGET_NR_syncfs:
8295 ret = get_errno(syncfs(arg1));
8296 break;
8297 #endif
8298 case TARGET_NR_kill:
8299 ret = get_errno(safe_kill(arg1, target_to_host_signal(arg2)));
8300 break;
8301 #ifdef TARGET_NR_rename
8302 case TARGET_NR_rename:
8304 void *p2;
8305 p = lock_user_string(arg1);
8306 p2 = lock_user_string(arg2);
8307 if (!p || !p2)
8308 ret = -TARGET_EFAULT;
8309 else
8310 ret = get_errno(rename(p, p2));
8311 unlock_user(p2, arg2, 0);
8312 unlock_user(p, arg1, 0);
8314 break;
8315 #endif
8316 #if defined(TARGET_NR_renameat)
8317 case TARGET_NR_renameat:
8319 void *p2;
8320 p = lock_user_string(arg2);
8321 p2 = lock_user_string(arg4);
8322 if (!p || !p2)
8323 ret = -TARGET_EFAULT;
8324 else
8325 ret = get_errno(renameat(arg1, p, arg3, p2));
8326 unlock_user(p2, arg4, 0);
8327 unlock_user(p, arg2, 0);
8329 break;
8330 #endif
8331 #ifdef TARGET_NR_mkdir
8332 case TARGET_NR_mkdir:
8333 if (!(p = lock_user_string(arg1)))
8334 goto efault;
8335 ret = get_errno(mkdir(p, arg2));
8336 unlock_user(p, arg1, 0);
8337 break;
8338 #endif
8339 #if defined(TARGET_NR_mkdirat)
8340 case TARGET_NR_mkdirat:
8341 if (!(p = lock_user_string(arg2)))
8342 goto efault;
8343 ret = get_errno(mkdirat(arg1, p, arg3));
8344 unlock_user(p, arg2, 0);
8345 break;
8346 #endif
8347 #ifdef TARGET_NR_rmdir
8348 case TARGET_NR_rmdir:
8349 if (!(p = lock_user_string(arg1)))
8350 goto efault;
8351 ret = get_errno(rmdir(p));
8352 unlock_user(p, arg1, 0);
8353 break;
8354 #endif
8355 case TARGET_NR_dup:
8356 ret = get_errno(dup(arg1));
8357 if (ret >= 0) {
8358 fd_trans_dup(arg1, ret);
8360 break;
8361 #ifdef TARGET_NR_pipe
8362 case TARGET_NR_pipe:
8363 ret = do_pipe(cpu_env, arg1, 0, 0);
8364 break;
8365 #endif
8366 #ifdef TARGET_NR_pipe2
8367 case TARGET_NR_pipe2:
8368 ret = do_pipe(cpu_env, arg1,
8369 target_to_host_bitmask(arg2, fcntl_flags_tbl), 1);
8370 break;
8371 #endif
8372 case TARGET_NR_times:
8374 struct target_tms *tmsp;
8375 struct tms tms;
8376 ret = get_errno(times(&tms));
8377 if (arg1) {
8378 tmsp = lock_user(VERIFY_WRITE, arg1, sizeof(struct target_tms), 0);
8379 if (!tmsp)
8380 goto efault;
8381 tmsp->tms_utime = tswapal(host_to_target_clock_t(tms.tms_utime));
8382 tmsp->tms_stime = tswapal(host_to_target_clock_t(tms.tms_stime));
8383 tmsp->tms_cutime = tswapal(host_to_target_clock_t(tms.tms_cutime));
8384 tmsp->tms_cstime = tswapal(host_to_target_clock_t(tms.tms_cstime));
8386 if (!is_error(ret))
8387 ret = host_to_target_clock_t(ret);
8389 break;
8390 #ifdef TARGET_NR_prof
8391 case TARGET_NR_prof:
8392 goto unimplemented;
8393 #endif
8394 #ifdef TARGET_NR_signal
8395 case TARGET_NR_signal:
8396 goto unimplemented;
8397 #endif
8398 case TARGET_NR_acct:
8399 if (arg1 == 0) {
8400 ret = get_errno(acct(NULL));
8401 } else {
8402 if (!(p = lock_user_string(arg1)))
8403 goto efault;
8404 ret = get_errno(acct(path(p)));
8405 unlock_user(p, arg1, 0);
8407 break;
8408 #ifdef TARGET_NR_umount2
8409 case TARGET_NR_umount2:
8410 if (!(p = lock_user_string(arg1)))
8411 goto efault;
8412 ret = get_errno(umount2(p, arg2));
8413 unlock_user(p, arg1, 0);
8414 break;
8415 #endif
8416 #ifdef TARGET_NR_lock
8417 case TARGET_NR_lock:
8418 goto unimplemented;
8419 #endif
8420 case TARGET_NR_ioctl:
8421 ret = do_ioctl(arg1, arg2, arg3);
8422 break;
8423 case TARGET_NR_fcntl:
8424 ret = do_fcntl(arg1, arg2, arg3);
8425 break;
8426 #ifdef TARGET_NR_mpx
8427 case TARGET_NR_mpx:
8428 goto unimplemented;
8429 #endif
8430 case TARGET_NR_setpgid:
8431 ret = get_errno(setpgid(arg1, arg2));
8432 break;
8433 #ifdef TARGET_NR_ulimit
8434 case TARGET_NR_ulimit:
8435 goto unimplemented;
8436 #endif
8437 #ifdef TARGET_NR_oldolduname
8438 case TARGET_NR_oldolduname:
8439 goto unimplemented;
8440 #endif
8441 case TARGET_NR_umask:
8442 ret = get_errno(umask(arg1));
8443 break;
8444 case TARGET_NR_chroot:
8445 if (!(p = lock_user_string(arg1)))
8446 goto efault;
8447 ret = get_errno(chroot(p));
8448 unlock_user(p, arg1, 0);
8449 break;
8450 #ifdef TARGET_NR_ustat
8451 case TARGET_NR_ustat:
8452 goto unimplemented;
8453 #endif
8454 #ifdef TARGET_NR_dup2
8455 case TARGET_NR_dup2:
8456 ret = get_errno(dup2(arg1, arg2));
8457 if (ret >= 0) {
8458 fd_trans_dup(arg1, arg2);
8460 break;
8461 #endif
8462 #if defined(CONFIG_DUP3) && defined(TARGET_NR_dup3)
8463 case TARGET_NR_dup3:
8464 ret = get_errno(dup3(arg1, arg2, arg3));
8465 if (ret >= 0) {
8466 fd_trans_dup(arg1, arg2);
8468 break;
8469 #endif
8470 #ifdef TARGET_NR_getppid /* not on alpha */
8471 case TARGET_NR_getppid:
8472 ret = get_errno(getppid());
8473 break;
8474 #endif
8475 #ifdef TARGET_NR_getpgrp
8476 case TARGET_NR_getpgrp:
8477 ret = get_errno(getpgrp());
8478 break;
8479 #endif
8480 case TARGET_NR_setsid:
8481 ret = get_errno(setsid());
8482 break;
8483 #ifdef TARGET_NR_sigaction
8484 case TARGET_NR_sigaction:
8486 #if defined(TARGET_ALPHA)
8487 struct target_sigaction act, oact, *pact = 0;
8488 struct target_old_sigaction *old_act;
8489 if (arg2) {
8490 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
8491 goto efault;
8492 act._sa_handler = old_act->_sa_handler;
8493 target_siginitset(&act.sa_mask, old_act->sa_mask);
8494 act.sa_flags = old_act->sa_flags;
8495 act.sa_restorer = 0;
8496 unlock_user_struct(old_act, arg2, 0);
8497 pact = &act;
8499 ret = get_errno(do_sigaction(arg1, pact, &oact));
8500 if (!is_error(ret) && arg3) {
8501 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
8502 goto efault;
8503 old_act->_sa_handler = oact._sa_handler;
8504 old_act->sa_mask = oact.sa_mask.sig[0];
8505 old_act->sa_flags = oact.sa_flags;
8506 unlock_user_struct(old_act, arg3, 1);
8508 #elif defined(TARGET_MIPS)
8509 struct target_sigaction act, oact, *pact, *old_act;
8511 if (arg2) {
8512 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
8513 goto efault;
8514 act._sa_handler = old_act->_sa_handler;
8515 target_siginitset(&act.sa_mask, old_act->sa_mask.sig[0]);
8516 act.sa_flags = old_act->sa_flags;
8517 unlock_user_struct(old_act, arg2, 0);
8518 pact = &act;
8519 } else {
8520 pact = NULL;
8523 ret = get_errno(do_sigaction(arg1, pact, &oact));
8525 if (!is_error(ret) && arg3) {
8526 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
8527 goto efault;
8528 old_act->_sa_handler = oact._sa_handler;
8529 old_act->sa_flags = oact.sa_flags;
8530 old_act->sa_mask.sig[0] = oact.sa_mask.sig[0];
8531 old_act->sa_mask.sig[1] = 0;
8532 old_act->sa_mask.sig[2] = 0;
8533 old_act->sa_mask.sig[3] = 0;
8534 unlock_user_struct(old_act, arg3, 1);
8536 #else
8537 struct target_old_sigaction *old_act;
8538 struct target_sigaction act, oact, *pact;
8539 if (arg2) {
8540 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
8541 goto efault;
8542 act._sa_handler = old_act->_sa_handler;
8543 target_siginitset(&act.sa_mask, old_act->sa_mask);
8544 act.sa_flags = old_act->sa_flags;
8545 act.sa_restorer = old_act->sa_restorer;
8546 unlock_user_struct(old_act, arg2, 0);
8547 pact = &act;
8548 } else {
8549 pact = NULL;
8551 ret = get_errno(do_sigaction(arg1, pact, &oact));
8552 if (!is_error(ret) && arg3) {
8553 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
8554 goto efault;
8555 old_act->_sa_handler = oact._sa_handler;
8556 old_act->sa_mask = oact.sa_mask.sig[0];
8557 old_act->sa_flags = oact.sa_flags;
8558 old_act->sa_restorer = oact.sa_restorer;
8559 unlock_user_struct(old_act, arg3, 1);
8561 #endif
8563 break;
8564 #endif
8565 case TARGET_NR_rt_sigaction:
8567 #if defined(TARGET_ALPHA)
8568 struct target_sigaction act, oact, *pact = 0;
8569 struct target_rt_sigaction *rt_act;
8571 if (arg4 != sizeof(target_sigset_t)) {
8572 ret = -TARGET_EINVAL;
8573 break;
8575 if (arg2) {
8576 if (!lock_user_struct(VERIFY_READ, rt_act, arg2, 1))
8577 goto efault;
8578 act._sa_handler = rt_act->_sa_handler;
8579 act.sa_mask = rt_act->sa_mask;
8580 act.sa_flags = rt_act->sa_flags;
8581 act.sa_restorer = arg5;
8582 unlock_user_struct(rt_act, arg2, 0);
8583 pact = &act;
8585 ret = get_errno(do_sigaction(arg1, pact, &oact));
8586 if (!is_error(ret) && arg3) {
8587 if (!lock_user_struct(VERIFY_WRITE, rt_act, arg3, 0))
8588 goto efault;
8589 rt_act->_sa_handler = oact._sa_handler;
8590 rt_act->sa_mask = oact.sa_mask;
8591 rt_act->sa_flags = oact.sa_flags;
8592 unlock_user_struct(rt_act, arg3, 1);
8594 #else
8595 struct target_sigaction *act;
8596 struct target_sigaction *oact;
8598 if (arg4 != sizeof(target_sigset_t)) {
8599 ret = -TARGET_EINVAL;
8600 break;
8602 if (arg2) {
8603 if (!lock_user_struct(VERIFY_READ, act, arg2, 1))
8604 goto efault;
8605 } else
8606 act = NULL;
8607 if (arg3) {
8608 if (!lock_user_struct(VERIFY_WRITE, oact, arg3, 0)) {
8609 ret = -TARGET_EFAULT;
8610 goto rt_sigaction_fail;
8612 } else
8613 oact = NULL;
8614 ret = get_errno(do_sigaction(arg1, act, oact));
8615 rt_sigaction_fail:
8616 if (act)
8617 unlock_user_struct(act, arg2, 0);
8618 if (oact)
8619 unlock_user_struct(oact, arg3, 1);
8620 #endif
8622 break;
8623 #ifdef TARGET_NR_sgetmask /* not on alpha */
8624 case TARGET_NR_sgetmask:
8626 sigset_t cur_set;
8627 abi_ulong target_set;
8628 ret = do_sigprocmask(0, NULL, &cur_set);
8629 if (!ret) {
8630 host_to_target_old_sigset(&target_set, &cur_set);
8631 ret = target_set;
8634 break;
8635 #endif
8636 #ifdef TARGET_NR_ssetmask /* not on alpha */
8637 case TARGET_NR_ssetmask:
8639 sigset_t set, oset;
8640 abi_ulong target_set = arg1;
8641 target_to_host_old_sigset(&set, &target_set);
8642 ret = do_sigprocmask(SIG_SETMASK, &set, &oset);
8643 if (!ret) {
8644 host_to_target_old_sigset(&target_set, &oset);
8645 ret = target_set;
8648 break;
8649 #endif
8650 #ifdef TARGET_NR_sigprocmask
8651 case TARGET_NR_sigprocmask:
8653 #if defined(TARGET_ALPHA)
8654 sigset_t set, oldset;
8655 abi_ulong mask;
8656 int how;
8658 switch (arg1) {
8659 case TARGET_SIG_BLOCK:
8660 how = SIG_BLOCK;
8661 break;
8662 case TARGET_SIG_UNBLOCK:
8663 how = SIG_UNBLOCK;
8664 break;
8665 case TARGET_SIG_SETMASK:
8666 how = SIG_SETMASK;
8667 break;
8668 default:
8669 ret = -TARGET_EINVAL;
8670 goto fail;
8672 mask = arg2;
8673 target_to_host_old_sigset(&set, &mask);
8675 ret = do_sigprocmask(how, &set, &oldset);
8676 if (!is_error(ret)) {
8677 host_to_target_old_sigset(&mask, &oldset);
8678 ret = mask;
8679 ((CPUAlphaState *)cpu_env)->ir[IR_V0] = 0; /* force no error */
8681 #else
8682 sigset_t set, oldset, *set_ptr;
8683 int how;
8685 if (arg2) {
8686 switch (arg1) {
8687 case TARGET_SIG_BLOCK:
8688 how = SIG_BLOCK;
8689 break;
8690 case TARGET_SIG_UNBLOCK:
8691 how = SIG_UNBLOCK;
8692 break;
8693 case TARGET_SIG_SETMASK:
8694 how = SIG_SETMASK;
8695 break;
8696 default:
8697 ret = -TARGET_EINVAL;
8698 goto fail;
8700 if (!(p = lock_user(VERIFY_READ, arg2, sizeof(target_sigset_t), 1)))
8701 goto efault;
8702 target_to_host_old_sigset(&set, p);
8703 unlock_user(p, arg2, 0);
8704 set_ptr = &set;
8705 } else {
8706 how = 0;
8707 set_ptr = NULL;
8709 ret = do_sigprocmask(how, set_ptr, &oldset);
8710 if (!is_error(ret) && arg3) {
8711 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_sigset_t), 0)))
8712 goto efault;
8713 host_to_target_old_sigset(p, &oldset);
8714 unlock_user(p, arg3, sizeof(target_sigset_t));
8716 #endif
8718 break;
8719 #endif
8720 case TARGET_NR_rt_sigprocmask:
8722 int how = arg1;
8723 sigset_t set, oldset, *set_ptr;
8725 if (arg4 != sizeof(target_sigset_t)) {
8726 ret = -TARGET_EINVAL;
8727 break;
8730 if (arg2) {
8731 switch(how) {
8732 case TARGET_SIG_BLOCK:
8733 how = SIG_BLOCK;
8734 break;
8735 case TARGET_SIG_UNBLOCK:
8736 how = SIG_UNBLOCK;
8737 break;
8738 case TARGET_SIG_SETMASK:
8739 how = SIG_SETMASK;
8740 break;
8741 default:
8742 ret = -TARGET_EINVAL;
8743 goto fail;
8745 if (!(p = lock_user(VERIFY_READ, arg2, sizeof(target_sigset_t), 1)))
8746 goto efault;
8747 target_to_host_sigset(&set, p);
8748 unlock_user(p, arg2, 0);
8749 set_ptr = &set;
8750 } else {
8751 how = 0;
8752 set_ptr = NULL;
8754 ret = do_sigprocmask(how, set_ptr, &oldset);
8755 if (!is_error(ret) && arg3) {
8756 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_sigset_t), 0)))
8757 goto efault;
8758 host_to_target_sigset(p, &oldset);
8759 unlock_user(p, arg3, sizeof(target_sigset_t));
8762 break;
8763 #ifdef TARGET_NR_sigpending
8764 case TARGET_NR_sigpending:
8766 sigset_t set;
8767 ret = get_errno(sigpending(&set));
8768 if (!is_error(ret)) {
8769 if (!(p = lock_user(VERIFY_WRITE, arg1, sizeof(target_sigset_t), 0)))
8770 goto efault;
8771 host_to_target_old_sigset(p, &set);
8772 unlock_user(p, arg1, sizeof(target_sigset_t));
8775 break;
8776 #endif
8777 case TARGET_NR_rt_sigpending:
8779 sigset_t set;
8781 /* Yes, this check is >, not != like most. We follow the kernel's
8782 * logic and it does it like this because it implements
8783 * NR_sigpending through the same code path, and in that case
8784 * the old_sigset_t is smaller in size.
8786 if (arg2 > sizeof(target_sigset_t)) {
8787 ret = -TARGET_EINVAL;
8788 break;
8791 ret = get_errno(sigpending(&set));
8792 if (!is_error(ret)) {
8793 if (!(p = lock_user(VERIFY_WRITE, arg1, sizeof(target_sigset_t), 0)))
8794 goto efault;
8795 host_to_target_sigset(p, &set);
8796 unlock_user(p, arg1, sizeof(target_sigset_t));
8799 break;
8800 #ifdef TARGET_NR_sigsuspend
8801 case TARGET_NR_sigsuspend:
8803 TaskState *ts = cpu->opaque;
8804 #if defined(TARGET_ALPHA)
8805 abi_ulong mask = arg1;
8806 target_to_host_old_sigset(&ts->sigsuspend_mask, &mask);
8807 #else
8808 if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
8809 goto efault;
8810 target_to_host_old_sigset(&ts->sigsuspend_mask, p);
8811 unlock_user(p, arg1, 0);
8812 #endif
8813 ret = get_errno(safe_rt_sigsuspend(&ts->sigsuspend_mask,
8814 SIGSET_T_SIZE));
8815 if (ret != -TARGET_ERESTARTSYS) {
8816 ts->in_sigsuspend = 1;
8819 break;
8820 #endif
8821 case TARGET_NR_rt_sigsuspend:
8823 TaskState *ts = cpu->opaque;
8825 if (arg2 != sizeof(target_sigset_t)) {
8826 ret = -TARGET_EINVAL;
8827 break;
8829 if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
8830 goto efault;
8831 target_to_host_sigset(&ts->sigsuspend_mask, p);
8832 unlock_user(p, arg1, 0);
8833 ret = get_errno(safe_rt_sigsuspend(&ts->sigsuspend_mask,
8834 SIGSET_T_SIZE));
8835 if (ret != -TARGET_ERESTARTSYS) {
8836 ts->in_sigsuspend = 1;
8839 break;
8840 case TARGET_NR_rt_sigtimedwait:
8842 sigset_t set;
8843 struct timespec uts, *puts;
8844 siginfo_t uinfo;
8846 if (arg4 != sizeof(target_sigset_t)) {
8847 ret = -TARGET_EINVAL;
8848 break;
8851 if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
8852 goto efault;
8853 target_to_host_sigset(&set, p);
8854 unlock_user(p, arg1, 0);
8855 if (arg3) {
8856 puts = &uts;
8857 target_to_host_timespec(puts, arg3);
8858 } else {
8859 puts = NULL;
8861 ret = get_errno(safe_rt_sigtimedwait(&set, &uinfo, puts,
8862 SIGSET_T_SIZE));
8863 if (!is_error(ret)) {
8864 if (arg2) {
8865 p = lock_user(VERIFY_WRITE, arg2, sizeof(target_siginfo_t),
8867 if (!p) {
8868 goto efault;
8870 host_to_target_siginfo(p, &uinfo);
8871 unlock_user(p, arg2, sizeof(target_siginfo_t));
8873 ret = host_to_target_signal(ret);
8876 break;
8877 case TARGET_NR_rt_sigqueueinfo:
8879 siginfo_t uinfo;
8881 p = lock_user(VERIFY_READ, arg3, sizeof(target_siginfo_t), 1);
8882 if (!p) {
8883 goto efault;
8885 target_to_host_siginfo(&uinfo, p);
8886 unlock_user(p, arg3, 0);
8887 ret = get_errno(sys_rt_sigqueueinfo(arg1, arg2, &uinfo));
8889 break;
8890 case TARGET_NR_rt_tgsigqueueinfo:
8892 siginfo_t uinfo;
8894 p = lock_user(VERIFY_READ, arg4, sizeof(target_siginfo_t), 1);
8895 if (!p) {
8896 goto efault;
8898 target_to_host_siginfo(&uinfo, p);
8899 unlock_user(p, arg4, 0);
8900 ret = get_errno(sys_rt_tgsigqueueinfo(arg1, arg2, arg3, &uinfo));
8902 break;
8903 #ifdef TARGET_NR_sigreturn
8904 case TARGET_NR_sigreturn:
8905 if (block_signals()) {
8906 ret = -TARGET_ERESTARTSYS;
8907 } else {
8908 ret = do_sigreturn(cpu_env);
8910 break;
8911 #endif
8912 case TARGET_NR_rt_sigreturn:
8913 if (block_signals()) {
8914 ret = -TARGET_ERESTARTSYS;
8915 } else {
8916 ret = do_rt_sigreturn(cpu_env);
8918 break;
8919 case TARGET_NR_sethostname:
8920 if (!(p = lock_user_string(arg1)))
8921 goto efault;
8922 ret = get_errno(sethostname(p, arg2));
8923 unlock_user(p, arg1, 0);
8924 break;
8925 case TARGET_NR_setrlimit:
8927 int resource = target_to_host_resource(arg1);
8928 struct target_rlimit *target_rlim;
8929 struct rlimit rlim;
8930 if (!lock_user_struct(VERIFY_READ, target_rlim, arg2, 1))
8931 goto efault;
8932 rlim.rlim_cur = target_to_host_rlim(target_rlim->rlim_cur);
8933 rlim.rlim_max = target_to_host_rlim(target_rlim->rlim_max);
8934 unlock_user_struct(target_rlim, arg2, 0);
8935 ret = get_errno(setrlimit(resource, &rlim));
8937 break;
8938 case TARGET_NR_getrlimit:
8940 int resource = target_to_host_resource(arg1);
8941 struct target_rlimit *target_rlim;
8942 struct rlimit rlim;
8944 ret = get_errno(getrlimit(resource, &rlim));
8945 if (!is_error(ret)) {
8946 if (!lock_user_struct(VERIFY_WRITE, target_rlim, arg2, 0))
8947 goto efault;
8948 target_rlim->rlim_cur = host_to_target_rlim(rlim.rlim_cur);
8949 target_rlim->rlim_max = host_to_target_rlim(rlim.rlim_max);
8950 unlock_user_struct(target_rlim, arg2, 1);
8953 break;
8954 case TARGET_NR_getrusage:
8956 struct rusage rusage;
8957 ret = get_errno(getrusage(arg1, &rusage));
8958 if (!is_error(ret)) {
8959 ret = host_to_target_rusage(arg2, &rusage);
8962 break;
8963 case TARGET_NR_gettimeofday:
8965 struct timeval tv;
8966 ret = get_errno(gettimeofday(&tv, NULL));
8967 if (!is_error(ret)) {
8968 if (copy_to_user_timeval(arg1, &tv))
8969 goto efault;
8972 break;
8973 case TARGET_NR_settimeofday:
8975 struct timeval tv, *ptv = NULL;
8976 struct timezone tz, *ptz = NULL;
8978 if (arg1) {
8979 if (copy_from_user_timeval(&tv, arg1)) {
8980 goto efault;
8982 ptv = &tv;
8985 if (arg2) {
8986 if (copy_from_user_timezone(&tz, arg2)) {
8987 goto efault;
8989 ptz = &tz;
8992 ret = get_errno(settimeofday(ptv, ptz));
8994 break;
8995 #if defined(TARGET_NR_select)
8996 case TARGET_NR_select:
8997 #if defined(TARGET_WANT_NI_OLD_SELECT)
8998 /* some architectures used to have old_select here
8999 * but now ENOSYS it.
9001 ret = -TARGET_ENOSYS;
9002 #elif defined(TARGET_WANT_OLD_SYS_SELECT)
9003 ret = do_old_select(arg1);
9004 #else
9005 ret = do_select(arg1, arg2, arg3, arg4, arg5);
9006 #endif
9007 break;
9008 #endif
9009 #ifdef TARGET_NR_pselect6
9010 case TARGET_NR_pselect6:
9012 abi_long rfd_addr, wfd_addr, efd_addr, n, ts_addr;
9013 fd_set rfds, wfds, efds;
9014 fd_set *rfds_ptr, *wfds_ptr, *efds_ptr;
9015 struct timespec ts, *ts_ptr;
9018 * The 6th arg is actually two args smashed together,
9019 * so we cannot use the C library.
9021 sigset_t set;
9022 struct {
9023 sigset_t *set;
9024 size_t size;
9025 } sig, *sig_ptr;
9027 abi_ulong arg_sigset, arg_sigsize, *arg7;
9028 target_sigset_t *target_sigset;
9030 n = arg1;
9031 rfd_addr = arg2;
9032 wfd_addr = arg3;
9033 efd_addr = arg4;
9034 ts_addr = arg5;
9036 ret = copy_from_user_fdset_ptr(&rfds, &rfds_ptr, rfd_addr, n);
9037 if (ret) {
9038 goto fail;
9040 ret = copy_from_user_fdset_ptr(&wfds, &wfds_ptr, wfd_addr, n);
9041 if (ret) {
9042 goto fail;
9044 ret = copy_from_user_fdset_ptr(&efds, &efds_ptr, efd_addr, n);
9045 if (ret) {
9046 goto fail;
9050 * This takes a timespec, and not a timeval, so we cannot
9051 * use the do_select() helper ...
9053 if (ts_addr) {
9054 if (target_to_host_timespec(&ts, ts_addr)) {
9055 goto efault;
9057 ts_ptr = &ts;
9058 } else {
9059 ts_ptr = NULL;
9062 /* Extract the two packed args for the sigset */
9063 if (arg6) {
9064 sig_ptr = &sig;
9065 sig.size = SIGSET_T_SIZE;
9067 arg7 = lock_user(VERIFY_READ, arg6, sizeof(*arg7) * 2, 1);
9068 if (!arg7) {
9069 goto efault;
9071 arg_sigset = tswapal(arg7[0]);
9072 arg_sigsize = tswapal(arg7[1]);
9073 unlock_user(arg7, arg6, 0);
9075 if (arg_sigset) {
9076 sig.set = &set;
9077 if (arg_sigsize != sizeof(*target_sigset)) {
9078 /* Like the kernel, we enforce correct size sigsets */
9079 ret = -TARGET_EINVAL;
9080 goto fail;
9082 target_sigset = lock_user(VERIFY_READ, arg_sigset,
9083 sizeof(*target_sigset), 1);
9084 if (!target_sigset) {
9085 goto efault;
9087 target_to_host_sigset(&set, target_sigset);
9088 unlock_user(target_sigset, arg_sigset, 0);
9089 } else {
9090 sig.set = NULL;
9092 } else {
9093 sig_ptr = NULL;
9096 ret = get_errno(safe_pselect6(n, rfds_ptr, wfds_ptr, efds_ptr,
9097 ts_ptr, sig_ptr));
9099 if (!is_error(ret)) {
9100 if (rfd_addr && copy_to_user_fdset(rfd_addr, &rfds, n))
9101 goto efault;
9102 if (wfd_addr && copy_to_user_fdset(wfd_addr, &wfds, n))
9103 goto efault;
9104 if (efd_addr && copy_to_user_fdset(efd_addr, &efds, n))
9105 goto efault;
9107 if (ts_addr && host_to_target_timespec(ts_addr, &ts))
9108 goto efault;
9111 break;
9112 #endif
9113 #ifdef TARGET_NR_symlink
9114 case TARGET_NR_symlink:
9116 void *p2;
9117 p = lock_user_string(arg1);
9118 p2 = lock_user_string(arg2);
9119 if (!p || !p2)
9120 ret = -TARGET_EFAULT;
9121 else
9122 ret = get_errno(symlink(p, p2));
9123 unlock_user(p2, arg2, 0);
9124 unlock_user(p, arg1, 0);
9126 break;
9127 #endif
9128 #if defined(TARGET_NR_symlinkat)
9129 case TARGET_NR_symlinkat:
9131 void *p2;
9132 p = lock_user_string(arg1);
9133 p2 = lock_user_string(arg3);
9134 if (!p || !p2)
9135 ret = -TARGET_EFAULT;
9136 else
9137 ret = get_errno(symlinkat(p, arg2, p2));
9138 unlock_user(p2, arg3, 0);
9139 unlock_user(p, arg1, 0);
9141 break;
9142 #endif
9143 #ifdef TARGET_NR_oldlstat
9144 case TARGET_NR_oldlstat:
9145 goto unimplemented;
9146 #endif
9147 #ifdef TARGET_NR_readlink
9148 case TARGET_NR_readlink:
9150 void *p2;
9151 p = lock_user_string(arg1);
9152 p2 = lock_user(VERIFY_WRITE, arg2, arg3, 0);
9153 if (!p || !p2) {
9154 ret = -TARGET_EFAULT;
9155 } else if (!arg3) {
9156 /* Short circuit this for the magic exe check. */
9157 ret = -TARGET_EINVAL;
9158 } else if (is_proc_myself((const char *)p, "exe")) {
9159 char real[PATH_MAX], *temp;
9160 temp = realpath(exec_path, real);
9161 /* Return value is # of bytes that we wrote to the buffer. */
9162 if (temp == NULL) {
9163 ret = get_errno(-1);
9164 } else {
9165 /* Don't worry about sign mismatch as earlier mapping
9166 * logic would have thrown a bad address error. */
9167 ret = MIN(strlen(real), arg3);
9168 /* We cannot NUL terminate the string. */
9169 memcpy(p2, real, ret);
9171 } else {
9172 ret = get_errno(readlink(path(p), p2, arg3));
9174 unlock_user(p2, arg2, ret);
9175 unlock_user(p, arg1, 0);
9177 break;
9178 #endif
9179 #if defined(TARGET_NR_readlinkat)
9180 case TARGET_NR_readlinkat:
9182 void *p2;
9183 p = lock_user_string(arg2);
9184 p2 = lock_user(VERIFY_WRITE, arg3, arg4, 0);
9185 if (!p || !p2) {
9186 ret = -TARGET_EFAULT;
9187 } else if (is_proc_myself((const char *)p, "exe")) {
9188 char real[PATH_MAX], *temp;
9189 temp = realpath(exec_path, real);
9190 ret = temp == NULL ? get_errno(-1) : strlen(real) ;
9191 snprintf((char *)p2, arg4, "%s", real);
9192 } else {
9193 ret = get_errno(readlinkat(arg1, path(p), p2, arg4));
9195 unlock_user(p2, arg3, ret);
9196 unlock_user(p, arg2, 0);
9198 break;
9199 #endif
9200 #ifdef TARGET_NR_uselib
9201 case TARGET_NR_uselib:
9202 goto unimplemented;
9203 #endif
9204 #ifdef TARGET_NR_swapon
9205 case TARGET_NR_swapon:
9206 if (!(p = lock_user_string(arg1)))
9207 goto efault;
9208 ret = get_errno(swapon(p, arg2));
9209 unlock_user(p, arg1, 0);
9210 break;
9211 #endif
9212 case TARGET_NR_reboot:
9213 if (arg3 == LINUX_REBOOT_CMD_RESTART2) {
9214 /* arg4 must be ignored in all other cases */
9215 p = lock_user_string(arg4);
9216 if (!p) {
9217 goto efault;
9219 ret = get_errno(reboot(arg1, arg2, arg3, p));
9220 unlock_user(p, arg4, 0);
9221 } else {
9222 ret = get_errno(reboot(arg1, arg2, arg3, NULL));
9224 break;
9225 #ifdef TARGET_NR_readdir
9226 case TARGET_NR_readdir:
9227 goto unimplemented;
9228 #endif
9229 #ifdef TARGET_NR_mmap
9230 case TARGET_NR_mmap:
9231 #if (defined(TARGET_I386) && defined(TARGET_ABI32)) || \
9232 (defined(TARGET_ARM) && defined(TARGET_ABI32)) || \
9233 defined(TARGET_M68K) || defined(TARGET_CRIS) || defined(TARGET_MICROBLAZE) \
9234 || defined(TARGET_S390X)
9236 abi_ulong *v;
9237 abi_ulong v1, v2, v3, v4, v5, v6;
9238 if (!(v = lock_user(VERIFY_READ, arg1, 6 * sizeof(abi_ulong), 1)))
9239 goto efault;
9240 v1 = tswapal(v[0]);
9241 v2 = tswapal(v[1]);
9242 v3 = tswapal(v[2]);
9243 v4 = tswapal(v[3]);
9244 v5 = tswapal(v[4]);
9245 v6 = tswapal(v[5]);
9246 unlock_user(v, arg1, 0);
9247 ret = get_errno(target_mmap(v1, v2, v3,
9248 target_to_host_bitmask(v4, mmap_flags_tbl),
9249 v5, v6));
9251 #else
9252 ret = get_errno(target_mmap(arg1, arg2, arg3,
9253 target_to_host_bitmask(arg4, mmap_flags_tbl),
9254 arg5,
9255 arg6));
9256 #endif
9257 break;
9258 #endif
9259 #ifdef TARGET_NR_mmap2
9260 case TARGET_NR_mmap2:
9261 #ifndef MMAP_SHIFT
9262 #define MMAP_SHIFT 12
9263 #endif
9264 ret = get_errno(target_mmap(arg1, arg2, arg3,
9265 target_to_host_bitmask(arg4, mmap_flags_tbl),
9266 arg5,
9267 arg6 << MMAP_SHIFT));
9268 break;
9269 #endif
9270 case TARGET_NR_munmap:
9271 ret = get_errno(target_munmap(arg1, arg2));
9272 break;
9273 case TARGET_NR_mprotect:
9275 TaskState *ts = cpu->opaque;
9276 /* Special hack to detect libc making the stack executable. */
9277 if ((arg3 & PROT_GROWSDOWN)
9278 && arg1 >= ts->info->stack_limit
9279 && arg1 <= ts->info->start_stack) {
9280 arg3 &= ~PROT_GROWSDOWN;
9281 arg2 = arg2 + arg1 - ts->info->stack_limit;
9282 arg1 = ts->info->stack_limit;
9285 ret = get_errno(target_mprotect(arg1, arg2, arg3));
9286 break;
9287 #ifdef TARGET_NR_mremap
9288 case TARGET_NR_mremap:
9289 ret = get_errno(target_mremap(arg1, arg2, arg3, arg4, arg5));
9290 break;
9291 #endif
9292 /* ??? msync/mlock/munlock are broken for softmmu. */
9293 #ifdef TARGET_NR_msync
9294 case TARGET_NR_msync:
9295 ret = get_errno(msync(g2h(arg1), arg2, arg3));
9296 break;
9297 #endif
9298 #ifdef TARGET_NR_mlock
9299 case TARGET_NR_mlock:
9300 ret = get_errno(mlock(g2h(arg1), arg2));
9301 break;
9302 #endif
9303 #ifdef TARGET_NR_munlock
9304 case TARGET_NR_munlock:
9305 ret = get_errno(munlock(g2h(arg1), arg2));
9306 break;
9307 #endif
9308 #ifdef TARGET_NR_mlockall
9309 case TARGET_NR_mlockall:
9310 ret = get_errno(mlockall(target_to_host_mlockall_arg(arg1)));
9311 break;
9312 #endif
9313 #ifdef TARGET_NR_munlockall
9314 case TARGET_NR_munlockall:
9315 ret = get_errno(munlockall());
9316 break;
9317 #endif
9318 case TARGET_NR_truncate:
9319 if (!(p = lock_user_string(arg1)))
9320 goto efault;
9321 ret = get_errno(truncate(p, arg2));
9322 unlock_user(p, arg1, 0);
9323 break;
9324 case TARGET_NR_ftruncate:
9325 ret = get_errno(ftruncate(arg1, arg2));
9326 break;
9327 case TARGET_NR_fchmod:
9328 ret = get_errno(fchmod(arg1, arg2));
9329 break;
9330 #if defined(TARGET_NR_fchmodat)
9331 case TARGET_NR_fchmodat:
9332 if (!(p = lock_user_string(arg2)))
9333 goto efault;
9334 ret = get_errno(fchmodat(arg1, p, arg3, 0));
9335 unlock_user(p, arg2, 0);
9336 break;
9337 #endif
9338 case TARGET_NR_getpriority:
9339 /* Note that negative values are valid for getpriority, so we must
9340 differentiate based on errno settings. */
9341 errno = 0;
9342 ret = getpriority(arg1, arg2);
9343 if (ret == -1 && errno != 0) {
9344 ret = -host_to_target_errno(errno);
9345 break;
9347 #ifdef TARGET_ALPHA
9348 /* Return value is the unbiased priority. Signal no error. */
9349 ((CPUAlphaState *)cpu_env)->ir[IR_V0] = 0;
9350 #else
9351 /* Return value is a biased priority to avoid negative numbers. */
9352 ret = 20 - ret;
9353 #endif
9354 break;
9355 case TARGET_NR_setpriority:
9356 ret = get_errno(setpriority(arg1, arg2, arg3));
9357 break;
9358 #ifdef TARGET_NR_profil
9359 case TARGET_NR_profil:
9360 goto unimplemented;
9361 #endif
9362 case TARGET_NR_statfs:
9363 if (!(p = lock_user_string(arg1)))
9364 goto efault;
9365 ret = get_errno(statfs(path(p), &stfs));
9366 unlock_user(p, arg1, 0);
9367 convert_statfs:
9368 if (!is_error(ret)) {
9369 struct target_statfs *target_stfs;
9371 if (!lock_user_struct(VERIFY_WRITE, target_stfs, arg2, 0))
9372 goto efault;
9373 __put_user(stfs.f_type, &target_stfs->f_type);
9374 __put_user(stfs.f_bsize, &target_stfs->f_bsize);
9375 __put_user(stfs.f_blocks, &target_stfs->f_blocks);
9376 __put_user(stfs.f_bfree, &target_stfs->f_bfree);
9377 __put_user(stfs.f_bavail, &target_stfs->f_bavail);
9378 __put_user(stfs.f_files, &target_stfs->f_files);
9379 __put_user(stfs.f_ffree, &target_stfs->f_ffree);
9380 __put_user(stfs.f_fsid.__val[0], &target_stfs->f_fsid.val[0]);
9381 __put_user(stfs.f_fsid.__val[1], &target_stfs->f_fsid.val[1]);
9382 __put_user(stfs.f_namelen, &target_stfs->f_namelen);
9383 __put_user(stfs.f_frsize, &target_stfs->f_frsize);
9384 memset(target_stfs->f_spare, 0, sizeof(target_stfs->f_spare));
9385 unlock_user_struct(target_stfs, arg2, 1);
9387 break;
9388 case TARGET_NR_fstatfs:
9389 ret = get_errno(fstatfs(arg1, &stfs));
9390 goto convert_statfs;
9391 #ifdef TARGET_NR_statfs64
9392 case TARGET_NR_statfs64:
9393 if (!(p = lock_user_string(arg1)))
9394 goto efault;
9395 ret = get_errno(statfs(path(p), &stfs));
9396 unlock_user(p, arg1, 0);
9397 convert_statfs64:
9398 if (!is_error(ret)) {
9399 struct target_statfs64 *target_stfs;
9401 if (!lock_user_struct(VERIFY_WRITE, target_stfs, arg3, 0))
9402 goto efault;
9403 __put_user(stfs.f_type, &target_stfs->f_type);
9404 __put_user(stfs.f_bsize, &target_stfs->f_bsize);
9405 __put_user(stfs.f_blocks, &target_stfs->f_blocks);
9406 __put_user(stfs.f_bfree, &target_stfs->f_bfree);
9407 __put_user(stfs.f_bavail, &target_stfs->f_bavail);
9408 __put_user(stfs.f_files, &target_stfs->f_files);
9409 __put_user(stfs.f_ffree, &target_stfs->f_ffree);
9410 __put_user(stfs.f_fsid.__val[0], &target_stfs->f_fsid.val[0]);
9411 __put_user(stfs.f_fsid.__val[1], &target_stfs->f_fsid.val[1]);
9412 __put_user(stfs.f_namelen, &target_stfs->f_namelen);
9413 __put_user(stfs.f_frsize, &target_stfs->f_frsize);
9414 memset(target_stfs->f_spare, 0, sizeof(target_stfs->f_spare));
9415 unlock_user_struct(target_stfs, arg3, 1);
9417 break;
9418 case TARGET_NR_fstatfs64:
9419 ret = get_errno(fstatfs(arg1, &stfs));
9420 goto convert_statfs64;
9421 #endif
9422 #ifdef TARGET_NR_ioperm
9423 case TARGET_NR_ioperm:
9424 goto unimplemented;
9425 #endif
9426 #ifdef TARGET_NR_socketcall
9427 case TARGET_NR_socketcall:
9428 ret = do_socketcall(arg1, arg2);
9429 break;
9430 #endif
9431 #ifdef TARGET_NR_accept
9432 case TARGET_NR_accept:
9433 ret = do_accept4(arg1, arg2, arg3, 0);
9434 break;
9435 #endif
9436 #ifdef TARGET_NR_accept4
9437 case TARGET_NR_accept4:
9438 ret = do_accept4(arg1, arg2, arg3, arg4);
9439 break;
9440 #endif
9441 #ifdef TARGET_NR_bind
9442 case TARGET_NR_bind:
9443 ret = do_bind(arg1, arg2, arg3);
9444 break;
9445 #endif
9446 #ifdef TARGET_NR_connect
9447 case TARGET_NR_connect:
9448 ret = do_connect(arg1, arg2, arg3);
9449 break;
9450 #endif
9451 #ifdef TARGET_NR_getpeername
9452 case TARGET_NR_getpeername:
9453 ret = do_getpeername(arg1, arg2, arg3);
9454 break;
9455 #endif
9456 #ifdef TARGET_NR_getsockname
9457 case TARGET_NR_getsockname:
9458 ret = do_getsockname(arg1, arg2, arg3);
9459 break;
9460 #endif
9461 #ifdef TARGET_NR_getsockopt
9462 case TARGET_NR_getsockopt:
9463 ret = do_getsockopt(arg1, arg2, arg3, arg4, arg5);
9464 break;
9465 #endif
9466 #ifdef TARGET_NR_listen
9467 case TARGET_NR_listen:
9468 ret = get_errno(listen(arg1, arg2));
9469 break;
9470 #endif
9471 #ifdef TARGET_NR_recv
9472 case TARGET_NR_recv:
9473 ret = do_recvfrom(arg1, arg2, arg3, arg4, 0, 0);
9474 break;
9475 #endif
9476 #ifdef TARGET_NR_recvfrom
9477 case TARGET_NR_recvfrom:
9478 ret = do_recvfrom(arg1, arg2, arg3, arg4, arg5, arg6);
9479 break;
9480 #endif
9481 #ifdef TARGET_NR_recvmsg
9482 case TARGET_NR_recvmsg:
9483 ret = do_sendrecvmsg(arg1, arg2, arg3, 0);
9484 break;
9485 #endif
9486 #ifdef TARGET_NR_send
9487 case TARGET_NR_send:
9488 ret = do_sendto(arg1, arg2, arg3, arg4, 0, 0);
9489 break;
9490 #endif
9491 #ifdef TARGET_NR_sendmsg
9492 case TARGET_NR_sendmsg:
9493 ret = do_sendrecvmsg(arg1, arg2, arg3, 1);
9494 break;
9495 #endif
9496 #ifdef TARGET_NR_sendmmsg
9497 case TARGET_NR_sendmmsg:
9498 ret = do_sendrecvmmsg(arg1, arg2, arg3, arg4, 1);
9499 break;
9500 case TARGET_NR_recvmmsg:
9501 ret = do_sendrecvmmsg(arg1, arg2, arg3, arg4, 0);
9502 break;
9503 #endif
9504 #ifdef TARGET_NR_sendto
9505 case TARGET_NR_sendto:
9506 ret = do_sendto(arg1, arg2, arg3, arg4, arg5, arg6);
9507 break;
9508 #endif
9509 #ifdef TARGET_NR_shutdown
9510 case TARGET_NR_shutdown:
9511 ret = get_errno(shutdown(arg1, arg2));
9512 break;
9513 #endif
9514 #if defined(TARGET_NR_getrandom) && defined(__NR_getrandom)
9515 case TARGET_NR_getrandom:
9516 p = lock_user(VERIFY_WRITE, arg1, arg2, 0);
9517 if (!p) {
9518 goto efault;
9520 ret = get_errno(getrandom(p, arg2, arg3));
9521 unlock_user(p, arg1, ret);
9522 break;
9523 #endif
9524 #ifdef TARGET_NR_socket
9525 case TARGET_NR_socket:
9526 ret = do_socket(arg1, arg2, arg3);
9527 break;
9528 #endif
9529 #ifdef TARGET_NR_socketpair
9530 case TARGET_NR_socketpair:
9531 ret = do_socketpair(arg1, arg2, arg3, arg4);
9532 break;
9533 #endif
9534 #ifdef TARGET_NR_setsockopt
9535 case TARGET_NR_setsockopt:
9536 ret = do_setsockopt(arg1, arg2, arg3, arg4, (socklen_t) arg5);
9537 break;
9538 #endif
9539 #if defined(TARGET_NR_syslog)
9540 case TARGET_NR_syslog:
9542 int len = arg2;
9544 switch (arg1) {
9545 case TARGET_SYSLOG_ACTION_CLOSE: /* Close log */
9546 case TARGET_SYSLOG_ACTION_OPEN: /* Open log */
9547 case TARGET_SYSLOG_ACTION_CLEAR: /* Clear ring buffer */
9548 case TARGET_SYSLOG_ACTION_CONSOLE_OFF: /* Disable logging */
9549 case TARGET_SYSLOG_ACTION_CONSOLE_ON: /* Enable logging */
9550 case TARGET_SYSLOG_ACTION_CONSOLE_LEVEL: /* Set messages level */
9551 case TARGET_SYSLOG_ACTION_SIZE_UNREAD: /* Number of chars */
9552 case TARGET_SYSLOG_ACTION_SIZE_BUFFER: /* Size of the buffer */
9554 ret = get_errno(sys_syslog((int)arg1, NULL, (int)arg3));
9556 break;
9557 case TARGET_SYSLOG_ACTION_READ: /* Read from log */
9558 case TARGET_SYSLOG_ACTION_READ_CLEAR: /* Read/clear msgs */
9559 case TARGET_SYSLOG_ACTION_READ_ALL: /* Read last messages */
9561 ret = -TARGET_EINVAL;
9562 if (len < 0) {
9563 goto fail;
9565 ret = 0;
9566 if (len == 0) {
9567 break;
9569 p = lock_user(VERIFY_WRITE, arg2, arg3, 0);
9570 if (!p) {
9571 ret = -TARGET_EFAULT;
9572 goto fail;
9574 ret = get_errno(sys_syslog((int)arg1, p, (int)arg3));
9575 unlock_user(p, arg2, arg3);
9577 break;
9578 default:
9579 ret = -EINVAL;
9580 break;
9583 break;
9584 #endif
9585 case TARGET_NR_setitimer:
9587 struct itimerval value, ovalue, *pvalue;
9589 if (arg2) {
9590 pvalue = &value;
9591 if (copy_from_user_timeval(&pvalue->it_interval, arg2)
9592 || copy_from_user_timeval(&pvalue->it_value,
9593 arg2 + sizeof(struct target_timeval)))
9594 goto efault;
9595 } else {
9596 pvalue = NULL;
9598 ret = get_errno(setitimer(arg1, pvalue, &ovalue));
9599 if (!is_error(ret) && arg3) {
9600 if (copy_to_user_timeval(arg3,
9601 &ovalue.it_interval)
9602 || copy_to_user_timeval(arg3 + sizeof(struct target_timeval),
9603 &ovalue.it_value))
9604 goto efault;
9607 break;
9608 case TARGET_NR_getitimer:
9610 struct itimerval value;
9612 ret = get_errno(getitimer(arg1, &value));
9613 if (!is_error(ret) && arg2) {
9614 if (copy_to_user_timeval(arg2,
9615 &value.it_interval)
9616 || copy_to_user_timeval(arg2 + sizeof(struct target_timeval),
9617 &value.it_value))
9618 goto efault;
9621 break;
9622 #ifdef TARGET_NR_stat
9623 case TARGET_NR_stat:
9624 if (!(p = lock_user_string(arg1)))
9625 goto efault;
9626 ret = get_errno(stat(path(p), &st));
9627 unlock_user(p, arg1, 0);
9628 goto do_stat;
9629 #endif
9630 #ifdef TARGET_NR_lstat
9631 case TARGET_NR_lstat:
9632 if (!(p = lock_user_string(arg1)))
9633 goto efault;
9634 ret = get_errno(lstat(path(p), &st));
9635 unlock_user(p, arg1, 0);
9636 goto do_stat;
9637 #endif
9638 case TARGET_NR_fstat:
9640 ret = get_errno(fstat(arg1, &st));
9641 #if defined(TARGET_NR_stat) || defined(TARGET_NR_lstat)
9642 do_stat:
9643 #endif
9644 if (!is_error(ret)) {
9645 struct target_stat *target_st;
9647 if (!lock_user_struct(VERIFY_WRITE, target_st, arg2, 0))
9648 goto efault;
9649 memset(target_st, 0, sizeof(*target_st));
9650 __put_user(st.st_dev, &target_st->st_dev);
9651 __put_user(st.st_ino, &target_st->st_ino);
9652 __put_user(st.st_mode, &target_st->st_mode);
9653 __put_user(st.st_uid, &target_st->st_uid);
9654 __put_user(st.st_gid, &target_st->st_gid);
9655 __put_user(st.st_nlink, &target_st->st_nlink);
9656 __put_user(st.st_rdev, &target_st->st_rdev);
9657 __put_user(st.st_size, &target_st->st_size);
9658 __put_user(st.st_blksize, &target_st->st_blksize);
9659 __put_user(st.st_blocks, &target_st->st_blocks);
9660 __put_user(st.st_atime, &target_st->target_st_atime);
9661 __put_user(st.st_mtime, &target_st->target_st_mtime);
9662 __put_user(st.st_ctime, &target_st->target_st_ctime);
9663 unlock_user_struct(target_st, arg2, 1);
9666 break;
9667 #ifdef TARGET_NR_olduname
9668 case TARGET_NR_olduname:
9669 goto unimplemented;
9670 #endif
9671 #ifdef TARGET_NR_iopl
9672 case TARGET_NR_iopl:
9673 goto unimplemented;
9674 #endif
9675 case TARGET_NR_vhangup:
9676 ret = get_errno(vhangup());
9677 break;
9678 #ifdef TARGET_NR_idle
9679 case TARGET_NR_idle:
9680 goto unimplemented;
9681 #endif
9682 #ifdef TARGET_NR_syscall
9683 case TARGET_NR_syscall:
9684 ret = do_syscall(cpu_env, arg1 & 0xffff, arg2, arg3, arg4, arg5,
9685 arg6, arg7, arg8, 0);
9686 break;
9687 #endif
9688 case TARGET_NR_wait4:
9690 int status;
9691 abi_long status_ptr = arg2;
9692 struct rusage rusage, *rusage_ptr;
9693 abi_ulong target_rusage = arg4;
9694 abi_long rusage_err;
9695 if (target_rusage)
9696 rusage_ptr = &rusage;
9697 else
9698 rusage_ptr = NULL;
9699 ret = get_errno(safe_wait4(arg1, &status, arg3, rusage_ptr));
9700 if (!is_error(ret)) {
9701 if (status_ptr && ret) {
9702 status = host_to_target_waitstatus(status);
9703 if (put_user_s32(status, status_ptr))
9704 goto efault;
9706 if (target_rusage) {
9707 rusage_err = host_to_target_rusage(target_rusage, &rusage);
9708 if (rusage_err) {
9709 ret = rusage_err;
9714 break;
9715 #ifdef TARGET_NR_swapoff
9716 case TARGET_NR_swapoff:
9717 if (!(p = lock_user_string(arg1)))
9718 goto efault;
9719 ret = get_errno(swapoff(p));
9720 unlock_user(p, arg1, 0);
9721 break;
9722 #endif
9723 case TARGET_NR_sysinfo:
9725 struct target_sysinfo *target_value;
9726 struct sysinfo value;
9727 ret = get_errno(sysinfo(&value));
9728 if (!is_error(ret) && arg1)
9730 if (!lock_user_struct(VERIFY_WRITE, target_value, arg1, 0))
9731 goto efault;
9732 __put_user(value.uptime, &target_value->uptime);
9733 __put_user(value.loads[0], &target_value->loads[0]);
9734 __put_user(value.loads[1], &target_value->loads[1]);
9735 __put_user(value.loads[2], &target_value->loads[2]);
9736 __put_user(value.totalram, &target_value->totalram);
9737 __put_user(value.freeram, &target_value->freeram);
9738 __put_user(value.sharedram, &target_value->sharedram);
9739 __put_user(value.bufferram, &target_value->bufferram);
9740 __put_user(value.totalswap, &target_value->totalswap);
9741 __put_user(value.freeswap, &target_value->freeswap);
9742 __put_user(value.procs, &target_value->procs);
9743 __put_user(value.totalhigh, &target_value->totalhigh);
9744 __put_user(value.freehigh, &target_value->freehigh);
9745 __put_user(value.mem_unit, &target_value->mem_unit);
9746 unlock_user_struct(target_value, arg1, 1);
9749 break;
9750 #ifdef TARGET_NR_ipc
9751 case TARGET_NR_ipc:
9752 ret = do_ipc(cpu_env, arg1, arg2, arg3, arg4, arg5, arg6);
9753 break;
9754 #endif
9755 #ifdef TARGET_NR_semget
9756 case TARGET_NR_semget:
9757 ret = get_errno(semget(arg1, arg2, arg3));
9758 break;
9759 #endif
9760 #ifdef TARGET_NR_semop
9761 case TARGET_NR_semop:
9762 ret = do_semop(arg1, arg2, arg3);
9763 break;
9764 #endif
9765 #ifdef TARGET_NR_semctl
9766 case TARGET_NR_semctl:
9767 ret = do_semctl(arg1, arg2, arg3, arg4);
9768 break;
9769 #endif
9770 #ifdef TARGET_NR_msgctl
9771 case TARGET_NR_msgctl:
9772 ret = do_msgctl(arg1, arg2, arg3);
9773 break;
9774 #endif
9775 #ifdef TARGET_NR_msgget
9776 case TARGET_NR_msgget:
9777 ret = get_errno(msgget(arg1, arg2));
9778 break;
9779 #endif
9780 #ifdef TARGET_NR_msgrcv
9781 case TARGET_NR_msgrcv:
9782 ret = do_msgrcv(arg1, arg2, arg3, arg4, arg5);
9783 break;
9784 #endif
9785 #ifdef TARGET_NR_msgsnd
9786 case TARGET_NR_msgsnd:
9787 ret = do_msgsnd(arg1, arg2, arg3, arg4);
9788 break;
9789 #endif
9790 #ifdef TARGET_NR_shmget
9791 case TARGET_NR_shmget:
9792 ret = get_errno(shmget(arg1, arg2, arg3));
9793 break;
9794 #endif
9795 #ifdef TARGET_NR_shmctl
9796 case TARGET_NR_shmctl:
9797 ret = do_shmctl(arg1, arg2, arg3);
9798 break;
9799 #endif
9800 #ifdef TARGET_NR_shmat
9801 case TARGET_NR_shmat:
9802 ret = do_shmat(cpu_env, arg1, arg2, arg3);
9803 break;
9804 #endif
9805 #ifdef TARGET_NR_shmdt
9806 case TARGET_NR_shmdt:
9807 ret = do_shmdt(arg1);
9808 break;
9809 #endif
9810 case TARGET_NR_fsync:
9811 ret = get_errno(fsync(arg1));
9812 break;
9813 case TARGET_NR_clone:
9814 /* Linux manages to have three different orderings for its
9815 * arguments to clone(); the BACKWARDS and BACKWARDS2 defines
9816 * match the kernel's CONFIG_CLONE_* settings.
9817 * Microblaze is further special in that it uses a sixth
9818 * implicit argument to clone for the TLS pointer.
9820 #if defined(TARGET_MICROBLAZE)
9821 ret = get_errno(do_fork(cpu_env, arg1, arg2, arg4, arg6, arg5));
9822 #elif defined(TARGET_CLONE_BACKWARDS)
9823 ret = get_errno(do_fork(cpu_env, arg1, arg2, arg3, arg4, arg5));
9824 #elif defined(TARGET_CLONE_BACKWARDS2)
9825 ret = get_errno(do_fork(cpu_env, arg2, arg1, arg3, arg5, arg4));
9826 #else
9827 ret = get_errno(do_fork(cpu_env, arg1, arg2, arg3, arg5, arg4));
9828 #endif
9829 break;
9830 #ifdef __NR_exit_group
9831 /* new thread calls */
9832 case TARGET_NR_exit_group:
9833 #ifdef TARGET_GPROF
9834 _mcleanup();
9835 #endif
9836 gdb_exit(cpu_env, arg1);
9837 ret = get_errno(exit_group(arg1));
9838 break;
9839 #endif
9840 case TARGET_NR_setdomainname:
9841 if (!(p = lock_user_string(arg1)))
9842 goto efault;
9843 ret = get_errno(setdomainname(p, arg2));
9844 unlock_user(p, arg1, 0);
9845 break;
9846 case TARGET_NR_uname:
9847 /* no need to transcode because we use the linux syscall */
9849 struct new_utsname * buf;
9851 if (!lock_user_struct(VERIFY_WRITE, buf, arg1, 0))
9852 goto efault;
9853 ret = get_errno(sys_uname(buf));
9854 if (!is_error(ret)) {
9855 /* Overwrite the native machine name with whatever is being
9856 emulated. */
9857 strcpy (buf->machine, cpu_to_uname_machine(cpu_env));
9858 /* Allow the user to override the reported release. */
9859 if (qemu_uname_release && *qemu_uname_release) {
9860 g_strlcpy(buf->release, qemu_uname_release,
9861 sizeof(buf->release));
9864 unlock_user_struct(buf, arg1, 1);
9866 break;
9867 #ifdef TARGET_I386
9868 case TARGET_NR_modify_ldt:
9869 ret = do_modify_ldt(cpu_env, arg1, arg2, arg3);
9870 break;
9871 #if !defined(TARGET_X86_64)
9872 case TARGET_NR_vm86old:
9873 goto unimplemented;
9874 case TARGET_NR_vm86:
9875 ret = do_vm86(cpu_env, arg1, arg2);
9876 break;
9877 #endif
9878 #endif
9879 case TARGET_NR_adjtimex:
9881 struct timex host_buf;
9883 if (target_to_host_timex(&host_buf, arg1) != 0) {
9884 goto efault;
9886 ret = get_errno(adjtimex(&host_buf));
9887 if (!is_error(ret)) {
9888 if (host_to_target_timex(arg1, &host_buf) != 0) {
9889 goto efault;
9893 break;
9894 #if defined(TARGET_NR_clock_adjtime) && defined(CONFIG_CLOCK_ADJTIME)
9895 case TARGET_NR_clock_adjtime:
9897 struct timex htx, *phtx = &htx;
9899 if (target_to_host_timex(phtx, arg2) != 0) {
9900 goto efault;
9902 ret = get_errno(clock_adjtime(arg1, phtx));
9903 if (!is_error(ret) && phtx) {
9904 if (host_to_target_timex(arg2, phtx) != 0) {
9905 goto efault;
9909 break;
9910 #endif
9911 #ifdef TARGET_NR_create_module
9912 case TARGET_NR_create_module:
9913 #endif
9914 case TARGET_NR_init_module:
9915 case TARGET_NR_delete_module:
9916 #ifdef TARGET_NR_get_kernel_syms
9917 case TARGET_NR_get_kernel_syms:
9918 #endif
9919 goto unimplemented;
9920 case TARGET_NR_quotactl:
9921 goto unimplemented;
9922 case TARGET_NR_getpgid:
9923 ret = get_errno(getpgid(arg1));
9924 break;
9925 case TARGET_NR_fchdir:
9926 ret = get_errno(fchdir(arg1));
9927 break;
9928 #ifdef TARGET_NR_bdflush /* not on x86_64 */
9929 case TARGET_NR_bdflush:
9930 goto unimplemented;
9931 #endif
9932 #ifdef TARGET_NR_sysfs
9933 case TARGET_NR_sysfs:
9934 goto unimplemented;
9935 #endif
9936 case TARGET_NR_personality:
9937 ret = get_errno(personality(arg1));
9938 break;
9939 #ifdef TARGET_NR_afs_syscall
9940 case TARGET_NR_afs_syscall:
9941 goto unimplemented;
9942 #endif
9943 #ifdef TARGET_NR__llseek /* Not on alpha */
9944 case TARGET_NR__llseek:
9946 int64_t res;
9947 #if !defined(__NR_llseek)
9948 res = lseek(arg1, ((uint64_t)arg2 << 32) | (abi_ulong)arg3, arg5);
9949 if (res == -1) {
9950 ret = get_errno(res);
9951 } else {
9952 ret = 0;
9954 #else
9955 ret = get_errno(_llseek(arg1, arg2, arg3, &res, arg5));
9956 #endif
9957 if ((ret == 0) && put_user_s64(res, arg4)) {
9958 goto efault;
9961 break;
9962 #endif
9963 #ifdef TARGET_NR_getdents
9964 case TARGET_NR_getdents:
9965 #ifdef __NR_getdents
9966 #if TARGET_ABI_BITS == 32 && HOST_LONG_BITS == 64
9968 struct target_dirent *target_dirp;
9969 struct linux_dirent *dirp;
9970 abi_long count = arg3;
9972 dirp = g_try_malloc(count);
9973 if (!dirp) {
9974 ret = -TARGET_ENOMEM;
9975 goto fail;
9978 ret = get_errno(sys_getdents(arg1, dirp, count));
9979 if (!is_error(ret)) {
9980 struct linux_dirent *de;
9981 struct target_dirent *tde;
9982 int len = ret;
9983 int reclen, treclen;
9984 int count1, tnamelen;
9986 count1 = 0;
9987 de = dirp;
9988 if (!(target_dirp = lock_user(VERIFY_WRITE, arg2, count, 0)))
9989 goto efault;
9990 tde = target_dirp;
9991 while (len > 0) {
9992 reclen = de->d_reclen;
9993 tnamelen = reclen - offsetof(struct linux_dirent, d_name);
9994 assert(tnamelen >= 0);
9995 treclen = tnamelen + offsetof(struct target_dirent, d_name);
9996 assert(count1 + treclen <= count);
9997 tde->d_reclen = tswap16(treclen);
9998 tde->d_ino = tswapal(de->d_ino);
9999 tde->d_off = tswapal(de->d_off);
10000 memcpy(tde->d_name, de->d_name, tnamelen);
10001 de = (struct linux_dirent *)((char *)de + reclen);
10002 len -= reclen;
10003 tde = (struct target_dirent *)((char *)tde + treclen);
10004 count1 += treclen;
10006 ret = count1;
10007 unlock_user(target_dirp, arg2, ret);
10009 g_free(dirp);
10011 #else
10013 struct linux_dirent *dirp;
10014 abi_long count = arg3;
10016 if (!(dirp = lock_user(VERIFY_WRITE, arg2, count, 0)))
10017 goto efault;
10018 ret = get_errno(sys_getdents(arg1, dirp, count));
10019 if (!is_error(ret)) {
10020 struct linux_dirent *de;
10021 int len = ret;
10022 int reclen;
10023 de = dirp;
10024 while (len > 0) {
10025 reclen = de->d_reclen;
10026 if (reclen > len)
10027 break;
10028 de->d_reclen = tswap16(reclen);
10029 tswapls(&de->d_ino);
10030 tswapls(&de->d_off);
10031 de = (struct linux_dirent *)((char *)de + reclen);
10032 len -= reclen;
10035 unlock_user(dirp, arg2, ret);
10037 #endif
10038 #else
10039 /* Implement getdents in terms of getdents64 */
10041 struct linux_dirent64 *dirp;
10042 abi_long count = arg3;
10044 dirp = lock_user(VERIFY_WRITE, arg2, count, 0);
10045 if (!dirp) {
10046 goto efault;
10048 ret = get_errno(sys_getdents64(arg1, dirp, count));
10049 if (!is_error(ret)) {
10050 /* Convert the dirent64 structs to target dirent. We do this
10051 * in-place, since we can guarantee that a target_dirent is no
10052 * larger than a dirent64; however this means we have to be
10053 * careful to read everything before writing in the new format.
10055 struct linux_dirent64 *de;
10056 struct target_dirent *tde;
10057 int len = ret;
10058 int tlen = 0;
10060 de = dirp;
10061 tde = (struct target_dirent *)dirp;
10062 while (len > 0) {
10063 int namelen, treclen;
10064 int reclen = de->d_reclen;
10065 uint64_t ino = de->d_ino;
10066 int64_t off = de->d_off;
10067 uint8_t type = de->d_type;
10069 namelen = strlen(de->d_name);
10070 treclen = offsetof(struct target_dirent, d_name)
10071 + namelen + 2;
10072 treclen = QEMU_ALIGN_UP(treclen, sizeof(abi_long));
10074 memmove(tde->d_name, de->d_name, namelen + 1);
10075 tde->d_ino = tswapal(ino);
10076 tde->d_off = tswapal(off);
10077 tde->d_reclen = tswap16(treclen);
10078 /* The target_dirent type is in what was formerly a padding
10079 * byte at the end of the structure:
10081 *(((char *)tde) + treclen - 1) = type;
10083 de = (struct linux_dirent64 *)((char *)de + reclen);
10084 tde = (struct target_dirent *)((char *)tde + treclen);
10085 len -= reclen;
10086 tlen += treclen;
10088 ret = tlen;
10090 unlock_user(dirp, arg2, ret);
10092 #endif
10093 break;
10094 #endif /* TARGET_NR_getdents */
10095 #if defined(TARGET_NR_getdents64) && defined(__NR_getdents64)
10096 case TARGET_NR_getdents64:
10098 struct linux_dirent64 *dirp;
10099 abi_long count = arg3;
10100 if (!(dirp = lock_user(VERIFY_WRITE, arg2, count, 0)))
10101 goto efault;
10102 ret = get_errno(sys_getdents64(arg1, dirp, count));
10103 if (!is_error(ret)) {
10104 struct linux_dirent64 *de;
10105 int len = ret;
10106 int reclen;
10107 de = dirp;
10108 while (len > 0) {
10109 reclen = de->d_reclen;
10110 if (reclen > len)
10111 break;
10112 de->d_reclen = tswap16(reclen);
10113 tswap64s((uint64_t *)&de->d_ino);
10114 tswap64s((uint64_t *)&de->d_off);
10115 de = (struct linux_dirent64 *)((char *)de + reclen);
10116 len -= reclen;
10119 unlock_user(dirp, arg2, ret);
10121 break;
10122 #endif /* TARGET_NR_getdents64 */
10123 #if defined(TARGET_NR__newselect)
10124 case TARGET_NR__newselect:
10125 ret = do_select(arg1, arg2, arg3, arg4, arg5);
10126 break;
10127 #endif
10128 #if defined(TARGET_NR_poll) || defined(TARGET_NR_ppoll)
10129 # ifdef TARGET_NR_poll
10130 case TARGET_NR_poll:
10131 # endif
10132 # ifdef TARGET_NR_ppoll
10133 case TARGET_NR_ppoll:
10134 # endif
10136 struct target_pollfd *target_pfd;
10137 unsigned int nfds = arg2;
10138 struct pollfd *pfd;
10139 unsigned int i;
10141 pfd = NULL;
10142 target_pfd = NULL;
10143 if (nfds) {
10144 if (nfds > (INT_MAX / sizeof(struct target_pollfd))) {
10145 ret = -TARGET_EINVAL;
10146 break;
10149 target_pfd = lock_user(VERIFY_WRITE, arg1,
10150 sizeof(struct target_pollfd) * nfds, 1);
10151 if (!target_pfd) {
10152 goto efault;
10155 pfd = alloca(sizeof(struct pollfd) * nfds);
10156 for (i = 0; i < nfds; i++) {
10157 pfd[i].fd = tswap32(target_pfd[i].fd);
10158 pfd[i].events = tswap16(target_pfd[i].events);
10162 switch (num) {
10163 # ifdef TARGET_NR_ppoll
10164 case TARGET_NR_ppoll:
10166 struct timespec _timeout_ts, *timeout_ts = &_timeout_ts;
10167 target_sigset_t *target_set;
10168 sigset_t _set, *set = &_set;
10170 if (arg3) {
10171 if (target_to_host_timespec(timeout_ts, arg3)) {
10172 unlock_user(target_pfd, arg1, 0);
10173 goto efault;
10175 } else {
10176 timeout_ts = NULL;
10179 if (arg4) {
10180 if (arg5 != sizeof(target_sigset_t)) {
10181 unlock_user(target_pfd, arg1, 0);
10182 ret = -TARGET_EINVAL;
10183 break;
10186 target_set = lock_user(VERIFY_READ, arg4, sizeof(target_sigset_t), 1);
10187 if (!target_set) {
10188 unlock_user(target_pfd, arg1, 0);
10189 goto efault;
10191 target_to_host_sigset(set, target_set);
10192 } else {
10193 set = NULL;
10196 ret = get_errno(safe_ppoll(pfd, nfds, timeout_ts,
10197 set, SIGSET_T_SIZE));
10199 if (!is_error(ret) && arg3) {
10200 host_to_target_timespec(arg3, timeout_ts);
10202 if (arg4) {
10203 unlock_user(target_set, arg4, 0);
10205 break;
10207 # endif
10208 # ifdef TARGET_NR_poll
10209 case TARGET_NR_poll:
10211 struct timespec ts, *pts;
10213 if (arg3 >= 0) {
10214 /* Convert ms to secs, ns */
10215 ts.tv_sec = arg3 / 1000;
10216 ts.tv_nsec = (arg3 % 1000) * 1000000LL;
10217 pts = &ts;
10218 } else {
10219 /* -ve poll() timeout means "infinite" */
10220 pts = NULL;
10222 ret = get_errno(safe_ppoll(pfd, nfds, pts, NULL, 0));
10223 break;
10225 # endif
10226 default:
10227 g_assert_not_reached();
10230 if (!is_error(ret)) {
10231 for(i = 0; i < nfds; i++) {
10232 target_pfd[i].revents = tswap16(pfd[i].revents);
10235 unlock_user(target_pfd, arg1, sizeof(struct target_pollfd) * nfds);
10237 break;
10238 #endif
10239 case TARGET_NR_flock:
10240 /* NOTE: the flock constant seems to be the same for every
10241 Linux platform */
10242 ret = get_errno(safe_flock(arg1, arg2));
10243 break;
10244 case TARGET_NR_readv:
10246 struct iovec *vec = lock_iovec(VERIFY_WRITE, arg2, arg3, 0);
10247 if (vec != NULL) {
10248 ret = get_errno(safe_readv(arg1, vec, arg3));
10249 unlock_iovec(vec, arg2, arg3, 1);
10250 } else {
10251 ret = -host_to_target_errno(errno);
10254 break;
10255 case TARGET_NR_writev:
10257 struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1);
10258 if (vec != NULL) {
10259 ret = get_errno(safe_writev(arg1, vec, arg3));
10260 unlock_iovec(vec, arg2, arg3, 0);
10261 } else {
10262 ret = -host_to_target_errno(errno);
10265 break;
10266 #if defined(TARGET_NR_preadv)
10267 case TARGET_NR_preadv:
10269 struct iovec *vec = lock_iovec(VERIFY_WRITE, arg2, arg3, 0);
10270 if (vec != NULL) {
10271 ret = get_errno(safe_preadv(arg1, vec, arg3, arg4, arg5));
10272 unlock_iovec(vec, arg2, arg3, 1);
10273 } else {
10274 ret = -host_to_target_errno(errno);
10277 break;
10278 #endif
10279 #if defined(TARGET_NR_pwritev)
10280 case TARGET_NR_pwritev:
10282 struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1);
10283 if (vec != NULL) {
10284 ret = get_errno(safe_pwritev(arg1, vec, arg3, arg4, arg5));
10285 unlock_iovec(vec, arg2, arg3, 0);
10286 } else {
10287 ret = -host_to_target_errno(errno);
10290 break;
10291 #endif
10292 case TARGET_NR_getsid:
10293 ret = get_errno(getsid(arg1));
10294 break;
10295 #if defined(TARGET_NR_fdatasync) /* Not on alpha (osf_datasync ?) */
10296 case TARGET_NR_fdatasync:
10297 ret = get_errno(fdatasync(arg1));
10298 break;
10299 #endif
10300 #ifdef TARGET_NR__sysctl
10301 case TARGET_NR__sysctl:
10302 /* We don't implement this, but ENOTDIR is always a safe
10303 return value. */
10304 ret = -TARGET_ENOTDIR;
10305 break;
10306 #endif
10307 case TARGET_NR_sched_getaffinity:
10309 unsigned int mask_size;
10310 unsigned long *mask;
10313 * sched_getaffinity needs multiples of ulong, so need to take
10314 * care of mismatches between target ulong and host ulong sizes.
10316 if (arg2 & (sizeof(abi_ulong) - 1)) {
10317 ret = -TARGET_EINVAL;
10318 break;
10320 mask_size = (arg2 + (sizeof(*mask) - 1)) & ~(sizeof(*mask) - 1);
10322 mask = alloca(mask_size);
10323 ret = get_errno(sys_sched_getaffinity(arg1, mask_size, mask));
10325 if (!is_error(ret)) {
10326 if (ret > arg2) {
10327 /* More data returned than the caller's buffer will fit.
10328 * This only happens if sizeof(abi_long) < sizeof(long)
10329 * and the caller passed us a buffer holding an odd number
10330 * of abi_longs. If the host kernel is actually using the
10331 * extra 4 bytes then fail EINVAL; otherwise we can just
10332 * ignore them and only copy the interesting part.
10334 int numcpus = sysconf(_SC_NPROCESSORS_CONF);
10335 if (numcpus > arg2 * 8) {
10336 ret = -TARGET_EINVAL;
10337 break;
10339 ret = arg2;
10342 if (copy_to_user(arg3, mask, ret)) {
10343 goto efault;
10347 break;
10348 case TARGET_NR_sched_setaffinity:
10350 unsigned int mask_size;
10351 unsigned long *mask;
10354 * sched_setaffinity needs multiples of ulong, so need to take
10355 * care of mismatches between target ulong and host ulong sizes.
10357 if (arg2 & (sizeof(abi_ulong) - 1)) {
10358 ret = -TARGET_EINVAL;
10359 break;
10361 mask_size = (arg2 + (sizeof(*mask) - 1)) & ~(sizeof(*mask) - 1);
10363 mask = alloca(mask_size);
10364 if (!lock_user_struct(VERIFY_READ, p, arg3, 1)) {
10365 goto efault;
10367 memcpy(mask, p, arg2);
10368 unlock_user_struct(p, arg2, 0);
10370 ret = get_errno(sys_sched_setaffinity(arg1, mask_size, mask));
10372 break;
10373 case TARGET_NR_sched_setparam:
10375 struct sched_param *target_schp;
10376 struct sched_param schp;
10378 if (arg2 == 0) {
10379 return -TARGET_EINVAL;
10381 if (!lock_user_struct(VERIFY_READ, target_schp, arg2, 1))
10382 goto efault;
10383 schp.sched_priority = tswap32(target_schp->sched_priority);
10384 unlock_user_struct(target_schp, arg2, 0);
10385 ret = get_errno(sched_setparam(arg1, &schp));
10387 break;
10388 case TARGET_NR_sched_getparam:
10390 struct sched_param *target_schp;
10391 struct sched_param schp;
10393 if (arg2 == 0) {
10394 return -TARGET_EINVAL;
10396 ret = get_errno(sched_getparam(arg1, &schp));
10397 if (!is_error(ret)) {
10398 if (!lock_user_struct(VERIFY_WRITE, target_schp, arg2, 0))
10399 goto efault;
10400 target_schp->sched_priority = tswap32(schp.sched_priority);
10401 unlock_user_struct(target_schp, arg2, 1);
10404 break;
10405 case TARGET_NR_sched_setscheduler:
10407 struct sched_param *target_schp;
10408 struct sched_param schp;
10409 if (arg3 == 0) {
10410 return -TARGET_EINVAL;
10412 if (!lock_user_struct(VERIFY_READ, target_schp, arg3, 1))
10413 goto efault;
10414 schp.sched_priority = tswap32(target_schp->sched_priority);
10415 unlock_user_struct(target_schp, arg3, 0);
10416 ret = get_errno(sched_setscheduler(arg1, arg2, &schp));
10418 break;
10419 case TARGET_NR_sched_getscheduler:
10420 ret = get_errno(sched_getscheduler(arg1));
10421 break;
10422 case TARGET_NR_sched_yield:
10423 ret = get_errno(sched_yield());
10424 break;
10425 case TARGET_NR_sched_get_priority_max:
10426 ret = get_errno(sched_get_priority_max(arg1));
10427 break;
10428 case TARGET_NR_sched_get_priority_min:
10429 ret = get_errno(sched_get_priority_min(arg1));
10430 break;
10431 case TARGET_NR_sched_rr_get_interval:
10433 struct timespec ts;
10434 ret = get_errno(sched_rr_get_interval(arg1, &ts));
10435 if (!is_error(ret)) {
10436 ret = host_to_target_timespec(arg2, &ts);
10439 break;
10440 case TARGET_NR_nanosleep:
10442 struct timespec req, rem;
10443 target_to_host_timespec(&req, arg1);
10444 ret = get_errno(safe_nanosleep(&req, &rem));
10445 if (is_error(ret) && arg2) {
10446 host_to_target_timespec(arg2, &rem);
10449 break;
10450 #ifdef TARGET_NR_query_module
10451 case TARGET_NR_query_module:
10452 goto unimplemented;
10453 #endif
10454 #ifdef TARGET_NR_nfsservctl
10455 case TARGET_NR_nfsservctl:
10456 goto unimplemented;
10457 #endif
10458 case TARGET_NR_prctl:
10459 switch (arg1) {
10460 case PR_GET_PDEATHSIG:
10462 int deathsig;
10463 ret = get_errno(prctl(arg1, &deathsig, arg3, arg4, arg5));
10464 if (!is_error(ret) && arg2
10465 && put_user_ual(deathsig, arg2)) {
10466 goto efault;
10468 break;
10470 #ifdef PR_GET_NAME
10471 case PR_GET_NAME:
10473 void *name = lock_user(VERIFY_WRITE, arg2, 16, 1);
10474 if (!name) {
10475 goto efault;
10477 ret = get_errno(prctl(arg1, (unsigned long)name,
10478 arg3, arg4, arg5));
10479 unlock_user(name, arg2, 16);
10480 break;
10482 case PR_SET_NAME:
10484 void *name = lock_user(VERIFY_READ, arg2, 16, 1);
10485 if (!name) {
10486 goto efault;
10488 ret = get_errno(prctl(arg1, (unsigned long)name,
10489 arg3, arg4, arg5));
10490 unlock_user(name, arg2, 0);
10491 break;
10493 #endif
10494 default:
10495 /* Most prctl options have no pointer arguments */
10496 ret = get_errno(prctl(arg1, arg2, arg3, arg4, arg5));
10497 break;
10499 break;
10500 #ifdef TARGET_NR_arch_prctl
10501 case TARGET_NR_arch_prctl:
10502 #if defined(TARGET_I386) && !defined(TARGET_ABI32)
10503 ret = do_arch_prctl(cpu_env, arg1, arg2);
10504 break;
10505 #else
10506 goto unimplemented;
10507 #endif
10508 #endif
10509 #ifdef TARGET_NR_pread64
10510 case TARGET_NR_pread64:
10511 if (regpairs_aligned(cpu_env)) {
10512 arg4 = arg5;
10513 arg5 = arg6;
10515 if (!(p = lock_user(VERIFY_WRITE, arg2, arg3, 0)))
10516 goto efault;
10517 ret = get_errno(pread64(arg1, p, arg3, target_offset64(arg4, arg5)));
10518 unlock_user(p, arg2, ret);
10519 break;
10520 case TARGET_NR_pwrite64:
10521 if (regpairs_aligned(cpu_env)) {
10522 arg4 = arg5;
10523 arg5 = arg6;
10525 if (!(p = lock_user(VERIFY_READ, arg2, arg3, 1)))
10526 goto efault;
10527 ret = get_errno(pwrite64(arg1, p, arg3, target_offset64(arg4, arg5)));
10528 unlock_user(p, arg2, 0);
10529 break;
10530 #endif
10531 case TARGET_NR_getcwd:
10532 if (!(p = lock_user(VERIFY_WRITE, arg1, arg2, 0)))
10533 goto efault;
10534 ret = get_errno(sys_getcwd1(p, arg2));
10535 unlock_user(p, arg1, ret);
10536 break;
10537 case TARGET_NR_capget:
10538 case TARGET_NR_capset:
10540 struct target_user_cap_header *target_header;
10541 struct target_user_cap_data *target_data = NULL;
10542 struct __user_cap_header_struct header;
10543 struct __user_cap_data_struct data[2];
10544 struct __user_cap_data_struct *dataptr = NULL;
10545 int i, target_datalen;
10546 int data_items = 1;
10548 if (!lock_user_struct(VERIFY_WRITE, target_header, arg1, 1)) {
10549 goto efault;
10551 header.version = tswap32(target_header->version);
10552 header.pid = tswap32(target_header->pid);
10554 if (header.version != _LINUX_CAPABILITY_VERSION) {
10555 /* Version 2 and up takes pointer to two user_data structs */
10556 data_items = 2;
10559 target_datalen = sizeof(*target_data) * data_items;
10561 if (arg2) {
10562 if (num == TARGET_NR_capget) {
10563 target_data = lock_user(VERIFY_WRITE, arg2, target_datalen, 0);
10564 } else {
10565 target_data = lock_user(VERIFY_READ, arg2, target_datalen, 1);
10567 if (!target_data) {
10568 unlock_user_struct(target_header, arg1, 0);
10569 goto efault;
10572 if (num == TARGET_NR_capset) {
10573 for (i = 0; i < data_items; i++) {
10574 data[i].effective = tswap32(target_data[i].effective);
10575 data[i].permitted = tswap32(target_data[i].permitted);
10576 data[i].inheritable = tswap32(target_data[i].inheritable);
10580 dataptr = data;
10583 if (num == TARGET_NR_capget) {
10584 ret = get_errno(capget(&header, dataptr));
10585 } else {
10586 ret = get_errno(capset(&header, dataptr));
10589 /* The kernel always updates version for both capget and capset */
10590 target_header->version = tswap32(header.version);
10591 unlock_user_struct(target_header, arg1, 1);
10593 if (arg2) {
10594 if (num == TARGET_NR_capget) {
10595 for (i = 0; i < data_items; i++) {
10596 target_data[i].effective = tswap32(data[i].effective);
10597 target_data[i].permitted = tswap32(data[i].permitted);
10598 target_data[i].inheritable = tswap32(data[i].inheritable);
10600 unlock_user(target_data, arg2, target_datalen);
10601 } else {
10602 unlock_user(target_data, arg2, 0);
10605 break;
10607 case TARGET_NR_sigaltstack:
10608 ret = do_sigaltstack(arg1, arg2, get_sp_from_cpustate((CPUArchState *)cpu_env));
10609 break;
10611 #ifdef CONFIG_SENDFILE
10612 case TARGET_NR_sendfile:
10614 off_t *offp = NULL;
10615 off_t off;
10616 if (arg3) {
10617 ret = get_user_sal(off, arg3);
10618 if (is_error(ret)) {
10619 break;
10621 offp = &off;
10623 ret = get_errno(sendfile(arg1, arg2, offp, arg4));
10624 if (!is_error(ret) && arg3) {
10625 abi_long ret2 = put_user_sal(off, arg3);
10626 if (is_error(ret2)) {
10627 ret = ret2;
10630 break;
10632 #ifdef TARGET_NR_sendfile64
10633 case TARGET_NR_sendfile64:
10635 off_t *offp = NULL;
10636 off_t off;
10637 if (arg3) {
10638 ret = get_user_s64(off, arg3);
10639 if (is_error(ret)) {
10640 break;
10642 offp = &off;
10644 ret = get_errno(sendfile(arg1, arg2, offp, arg4));
10645 if (!is_error(ret) && arg3) {
10646 abi_long ret2 = put_user_s64(off, arg3);
10647 if (is_error(ret2)) {
10648 ret = ret2;
10651 break;
10653 #endif
10654 #else
10655 case TARGET_NR_sendfile:
10656 #ifdef TARGET_NR_sendfile64
10657 case TARGET_NR_sendfile64:
10658 #endif
10659 goto unimplemented;
10660 #endif
10662 #ifdef TARGET_NR_getpmsg
10663 case TARGET_NR_getpmsg:
10664 goto unimplemented;
10665 #endif
10666 #ifdef TARGET_NR_putpmsg
10667 case TARGET_NR_putpmsg:
10668 goto unimplemented;
10669 #endif
10670 #ifdef TARGET_NR_vfork
10671 case TARGET_NR_vfork:
10672 ret = get_errno(do_fork(cpu_env,
10673 CLONE_VFORK | CLONE_VM | TARGET_SIGCHLD,
10674 0, 0, 0, 0));
10675 break;
10676 #endif
10677 #ifdef TARGET_NR_ugetrlimit
10678 case TARGET_NR_ugetrlimit:
10680 struct rlimit rlim;
10681 int resource = target_to_host_resource(arg1);
10682 ret = get_errno(getrlimit(resource, &rlim));
10683 if (!is_error(ret)) {
10684 struct target_rlimit *target_rlim;
10685 if (!lock_user_struct(VERIFY_WRITE, target_rlim, arg2, 0))
10686 goto efault;
10687 target_rlim->rlim_cur = host_to_target_rlim(rlim.rlim_cur);
10688 target_rlim->rlim_max = host_to_target_rlim(rlim.rlim_max);
10689 unlock_user_struct(target_rlim, arg2, 1);
10691 break;
10693 #endif
10694 #ifdef TARGET_NR_truncate64
10695 case TARGET_NR_truncate64:
10696 if (!(p = lock_user_string(arg1)))
10697 goto efault;
10698 ret = target_truncate64(cpu_env, p, arg2, arg3, arg4);
10699 unlock_user(p, arg1, 0);
10700 break;
10701 #endif
10702 #ifdef TARGET_NR_ftruncate64
10703 case TARGET_NR_ftruncate64:
10704 ret = target_ftruncate64(cpu_env, arg1, arg2, arg3, arg4);
10705 break;
10706 #endif
10707 #ifdef TARGET_NR_stat64
10708 case TARGET_NR_stat64:
10709 if (!(p = lock_user_string(arg1)))
10710 goto efault;
10711 ret = get_errno(stat(path(p), &st));
10712 unlock_user(p, arg1, 0);
10713 if (!is_error(ret))
10714 ret = host_to_target_stat64(cpu_env, arg2, &st);
10715 break;
10716 #endif
10717 #ifdef TARGET_NR_lstat64
10718 case TARGET_NR_lstat64:
10719 if (!(p = lock_user_string(arg1)))
10720 goto efault;
10721 ret = get_errno(lstat(path(p), &st));
10722 unlock_user(p, arg1, 0);
10723 if (!is_error(ret))
10724 ret = host_to_target_stat64(cpu_env, arg2, &st);
10725 break;
10726 #endif
10727 #ifdef TARGET_NR_fstat64
10728 case TARGET_NR_fstat64:
10729 ret = get_errno(fstat(arg1, &st));
10730 if (!is_error(ret))
10731 ret = host_to_target_stat64(cpu_env, arg2, &st);
10732 break;
10733 #endif
10734 #if (defined(TARGET_NR_fstatat64) || defined(TARGET_NR_newfstatat))
10735 #ifdef TARGET_NR_fstatat64
10736 case TARGET_NR_fstatat64:
10737 #endif
10738 #ifdef TARGET_NR_newfstatat
10739 case TARGET_NR_newfstatat:
10740 #endif
10741 if (!(p = lock_user_string(arg2)))
10742 goto efault;
10743 ret = get_errno(fstatat(arg1, path(p), &st, arg4));
10744 if (!is_error(ret))
10745 ret = host_to_target_stat64(cpu_env, arg3, &st);
10746 break;
10747 #endif
10748 #ifdef TARGET_NR_lchown
10749 case TARGET_NR_lchown:
10750 if (!(p = lock_user_string(arg1)))
10751 goto efault;
10752 ret = get_errno(lchown(p, low2highuid(arg2), low2highgid(arg3)));
10753 unlock_user(p, arg1, 0);
10754 break;
10755 #endif
10756 #ifdef TARGET_NR_getuid
10757 case TARGET_NR_getuid:
10758 ret = get_errno(high2lowuid(getuid()));
10759 break;
10760 #endif
10761 #ifdef TARGET_NR_getgid
10762 case TARGET_NR_getgid:
10763 ret = get_errno(high2lowgid(getgid()));
10764 break;
10765 #endif
10766 #ifdef TARGET_NR_geteuid
10767 case TARGET_NR_geteuid:
10768 ret = get_errno(high2lowuid(geteuid()));
10769 break;
10770 #endif
10771 #ifdef TARGET_NR_getegid
10772 case TARGET_NR_getegid:
10773 ret = get_errno(high2lowgid(getegid()));
10774 break;
10775 #endif
10776 case TARGET_NR_setreuid:
10777 ret = get_errno(setreuid(low2highuid(arg1), low2highuid(arg2)));
10778 break;
10779 case TARGET_NR_setregid:
10780 ret = get_errno(setregid(low2highgid(arg1), low2highgid(arg2)));
10781 break;
10782 case TARGET_NR_getgroups:
10784 int gidsetsize = arg1;
10785 target_id *target_grouplist;
10786 gid_t *grouplist;
10787 int i;
10789 grouplist = alloca(gidsetsize * sizeof(gid_t));
10790 ret = get_errno(getgroups(gidsetsize, grouplist));
10791 if (gidsetsize == 0)
10792 break;
10793 if (!is_error(ret)) {
10794 target_grouplist = lock_user(VERIFY_WRITE, arg2, gidsetsize * sizeof(target_id), 0);
10795 if (!target_grouplist)
10796 goto efault;
10797 for(i = 0;i < ret; i++)
10798 target_grouplist[i] = tswapid(high2lowgid(grouplist[i]));
10799 unlock_user(target_grouplist, arg2, gidsetsize * sizeof(target_id));
10802 break;
10803 case TARGET_NR_setgroups:
10805 int gidsetsize = arg1;
10806 target_id *target_grouplist;
10807 gid_t *grouplist = NULL;
10808 int i;
10809 if (gidsetsize) {
10810 grouplist = alloca(gidsetsize * sizeof(gid_t));
10811 target_grouplist = lock_user(VERIFY_READ, arg2, gidsetsize * sizeof(target_id), 1);
10812 if (!target_grouplist) {
10813 ret = -TARGET_EFAULT;
10814 goto fail;
10816 for (i = 0; i < gidsetsize; i++) {
10817 grouplist[i] = low2highgid(tswapid(target_grouplist[i]));
10819 unlock_user(target_grouplist, arg2, 0);
10821 ret = get_errno(setgroups(gidsetsize, grouplist));
10823 break;
10824 case TARGET_NR_fchown:
10825 ret = get_errno(fchown(arg1, low2highuid(arg2), low2highgid(arg3)));
10826 break;
10827 #if defined(TARGET_NR_fchownat)
10828 case TARGET_NR_fchownat:
10829 if (!(p = lock_user_string(arg2)))
10830 goto efault;
10831 ret = get_errno(fchownat(arg1, p, low2highuid(arg3),
10832 low2highgid(arg4), arg5));
10833 unlock_user(p, arg2, 0);
10834 break;
10835 #endif
10836 #ifdef TARGET_NR_setresuid
10837 case TARGET_NR_setresuid:
10838 ret = get_errno(sys_setresuid(low2highuid(arg1),
10839 low2highuid(arg2),
10840 low2highuid(arg3)));
10841 break;
10842 #endif
10843 #ifdef TARGET_NR_getresuid
10844 case TARGET_NR_getresuid:
10846 uid_t ruid, euid, suid;
10847 ret = get_errno(getresuid(&ruid, &euid, &suid));
10848 if (!is_error(ret)) {
10849 if (put_user_id(high2lowuid(ruid), arg1)
10850 || put_user_id(high2lowuid(euid), arg2)
10851 || put_user_id(high2lowuid(suid), arg3))
10852 goto efault;
10855 break;
10856 #endif
10857 #ifdef TARGET_NR_getresgid
10858 case TARGET_NR_setresgid:
10859 ret = get_errno(sys_setresgid(low2highgid(arg1),
10860 low2highgid(arg2),
10861 low2highgid(arg3)));
10862 break;
10863 #endif
10864 #ifdef TARGET_NR_getresgid
10865 case TARGET_NR_getresgid:
10867 gid_t rgid, egid, sgid;
10868 ret = get_errno(getresgid(&rgid, &egid, &sgid));
10869 if (!is_error(ret)) {
10870 if (put_user_id(high2lowgid(rgid), arg1)
10871 || put_user_id(high2lowgid(egid), arg2)
10872 || put_user_id(high2lowgid(sgid), arg3))
10873 goto efault;
10876 break;
10877 #endif
10878 #ifdef TARGET_NR_chown
10879 case TARGET_NR_chown:
10880 if (!(p = lock_user_string(arg1)))
10881 goto efault;
10882 ret = get_errno(chown(p, low2highuid(arg2), low2highgid(arg3)));
10883 unlock_user(p, arg1, 0);
10884 break;
10885 #endif
10886 case TARGET_NR_setuid:
10887 ret = get_errno(sys_setuid(low2highuid(arg1)));
10888 break;
10889 case TARGET_NR_setgid:
10890 ret = get_errno(sys_setgid(low2highgid(arg1)));
10891 break;
10892 case TARGET_NR_setfsuid:
10893 ret = get_errno(setfsuid(arg1));
10894 break;
10895 case TARGET_NR_setfsgid:
10896 ret = get_errno(setfsgid(arg1));
10897 break;
10899 #ifdef TARGET_NR_lchown32
10900 case TARGET_NR_lchown32:
10901 if (!(p = lock_user_string(arg1)))
10902 goto efault;
10903 ret = get_errno(lchown(p, arg2, arg3));
10904 unlock_user(p, arg1, 0);
10905 break;
10906 #endif
10907 #ifdef TARGET_NR_getuid32
10908 case TARGET_NR_getuid32:
10909 ret = get_errno(getuid());
10910 break;
10911 #endif
10913 #if defined(TARGET_NR_getxuid) && defined(TARGET_ALPHA)
10914 /* Alpha specific */
10915 case TARGET_NR_getxuid:
10917 uid_t euid;
10918 euid=geteuid();
10919 ((CPUAlphaState *)cpu_env)->ir[IR_A4]=euid;
10921 ret = get_errno(getuid());
10922 break;
10923 #endif
10924 #if defined(TARGET_NR_getxgid) && defined(TARGET_ALPHA)
10925 /* Alpha specific */
10926 case TARGET_NR_getxgid:
10928 uid_t egid;
10929 egid=getegid();
10930 ((CPUAlphaState *)cpu_env)->ir[IR_A4]=egid;
10932 ret = get_errno(getgid());
10933 break;
10934 #endif
10935 #if defined(TARGET_NR_osf_getsysinfo) && defined(TARGET_ALPHA)
10936 /* Alpha specific */
10937 case TARGET_NR_osf_getsysinfo:
10938 ret = -TARGET_EOPNOTSUPP;
10939 switch (arg1) {
10940 case TARGET_GSI_IEEE_FP_CONTROL:
10942 uint64_t swcr, fpcr = cpu_alpha_load_fpcr (cpu_env);
10944 /* Copied from linux ieee_fpcr_to_swcr. */
10945 swcr = (fpcr >> 35) & SWCR_STATUS_MASK;
10946 swcr |= (fpcr >> 36) & SWCR_MAP_DMZ;
10947 swcr |= (~fpcr >> 48) & (SWCR_TRAP_ENABLE_INV
10948 | SWCR_TRAP_ENABLE_DZE
10949 | SWCR_TRAP_ENABLE_OVF);
10950 swcr |= (~fpcr >> 57) & (SWCR_TRAP_ENABLE_UNF
10951 | SWCR_TRAP_ENABLE_INE);
10952 swcr |= (fpcr >> 47) & SWCR_MAP_UMZ;
10953 swcr |= (~fpcr >> 41) & SWCR_TRAP_ENABLE_DNO;
10955 if (put_user_u64 (swcr, arg2))
10956 goto efault;
10957 ret = 0;
10959 break;
10961 /* case GSI_IEEE_STATE_AT_SIGNAL:
10962 -- Not implemented in linux kernel.
10963 case GSI_UACPROC:
10964 -- Retrieves current unaligned access state; not much used.
10965 case GSI_PROC_TYPE:
10966 -- Retrieves implver information; surely not used.
10967 case GSI_GET_HWRPB:
10968 -- Grabs a copy of the HWRPB; surely not used.
10971 break;
10972 #endif
10973 #if defined(TARGET_NR_osf_setsysinfo) && defined(TARGET_ALPHA)
10974 /* Alpha specific */
10975 case TARGET_NR_osf_setsysinfo:
10976 ret = -TARGET_EOPNOTSUPP;
10977 switch (arg1) {
10978 case TARGET_SSI_IEEE_FP_CONTROL:
10980 uint64_t swcr, fpcr, orig_fpcr;
10982 if (get_user_u64 (swcr, arg2)) {
10983 goto efault;
10985 orig_fpcr = cpu_alpha_load_fpcr(cpu_env);
10986 fpcr = orig_fpcr & FPCR_DYN_MASK;
10988 /* Copied from linux ieee_swcr_to_fpcr. */
10989 fpcr |= (swcr & SWCR_STATUS_MASK) << 35;
10990 fpcr |= (swcr & SWCR_MAP_DMZ) << 36;
10991 fpcr |= (~swcr & (SWCR_TRAP_ENABLE_INV
10992 | SWCR_TRAP_ENABLE_DZE
10993 | SWCR_TRAP_ENABLE_OVF)) << 48;
10994 fpcr |= (~swcr & (SWCR_TRAP_ENABLE_UNF
10995 | SWCR_TRAP_ENABLE_INE)) << 57;
10996 fpcr |= (swcr & SWCR_MAP_UMZ ? FPCR_UNDZ | FPCR_UNFD : 0);
10997 fpcr |= (~swcr & SWCR_TRAP_ENABLE_DNO) << 41;
10999 cpu_alpha_store_fpcr(cpu_env, fpcr);
11000 ret = 0;
11002 break;
11004 case TARGET_SSI_IEEE_RAISE_EXCEPTION:
11006 uint64_t exc, fpcr, orig_fpcr;
11007 int si_code;
11009 if (get_user_u64(exc, arg2)) {
11010 goto efault;
11013 orig_fpcr = cpu_alpha_load_fpcr(cpu_env);
11015 /* We only add to the exception status here. */
11016 fpcr = orig_fpcr | ((exc & SWCR_STATUS_MASK) << 35);
11018 cpu_alpha_store_fpcr(cpu_env, fpcr);
11019 ret = 0;
11021 /* Old exceptions are not signaled. */
11022 fpcr &= ~(orig_fpcr & FPCR_STATUS_MASK);
11024 /* If any exceptions set by this call,
11025 and are unmasked, send a signal. */
11026 si_code = 0;
11027 if ((fpcr & (FPCR_INE | FPCR_INED)) == FPCR_INE) {
11028 si_code = TARGET_FPE_FLTRES;
11030 if ((fpcr & (FPCR_UNF | FPCR_UNFD)) == FPCR_UNF) {
11031 si_code = TARGET_FPE_FLTUND;
11033 if ((fpcr & (FPCR_OVF | FPCR_OVFD)) == FPCR_OVF) {
11034 si_code = TARGET_FPE_FLTOVF;
11036 if ((fpcr & (FPCR_DZE | FPCR_DZED)) == FPCR_DZE) {
11037 si_code = TARGET_FPE_FLTDIV;
11039 if ((fpcr & (FPCR_INV | FPCR_INVD)) == FPCR_INV) {
11040 si_code = TARGET_FPE_FLTINV;
11042 if (si_code != 0) {
11043 target_siginfo_t info;
11044 info.si_signo = SIGFPE;
11045 info.si_errno = 0;
11046 info.si_code = si_code;
11047 info._sifields._sigfault._addr
11048 = ((CPUArchState *)cpu_env)->pc;
11049 queue_signal((CPUArchState *)cpu_env, info.si_signo,
11050 QEMU_SI_FAULT, &info);
11053 break;
11055 /* case SSI_NVPAIRS:
11056 -- Used with SSIN_UACPROC to enable unaligned accesses.
11057 case SSI_IEEE_STATE_AT_SIGNAL:
11058 case SSI_IEEE_IGNORE_STATE_AT_SIGNAL:
11059 -- Not implemented in linux kernel
11062 break;
11063 #endif
11064 #ifdef TARGET_NR_osf_sigprocmask
11065 /* Alpha specific. */
11066 case TARGET_NR_osf_sigprocmask:
11068 abi_ulong mask;
11069 int how;
11070 sigset_t set, oldset;
11072 switch(arg1) {
11073 case TARGET_SIG_BLOCK:
11074 how = SIG_BLOCK;
11075 break;
11076 case TARGET_SIG_UNBLOCK:
11077 how = SIG_UNBLOCK;
11078 break;
11079 case TARGET_SIG_SETMASK:
11080 how = SIG_SETMASK;
11081 break;
11082 default:
11083 ret = -TARGET_EINVAL;
11084 goto fail;
11086 mask = arg2;
11087 target_to_host_old_sigset(&set, &mask);
11088 ret = do_sigprocmask(how, &set, &oldset);
11089 if (!ret) {
11090 host_to_target_old_sigset(&mask, &oldset);
11091 ret = mask;
11094 break;
11095 #endif
11097 #ifdef TARGET_NR_getgid32
11098 case TARGET_NR_getgid32:
11099 ret = get_errno(getgid());
11100 break;
11101 #endif
11102 #ifdef TARGET_NR_geteuid32
11103 case TARGET_NR_geteuid32:
11104 ret = get_errno(geteuid());
11105 break;
11106 #endif
11107 #ifdef TARGET_NR_getegid32
11108 case TARGET_NR_getegid32:
11109 ret = get_errno(getegid());
11110 break;
11111 #endif
11112 #ifdef TARGET_NR_setreuid32
11113 case TARGET_NR_setreuid32:
11114 ret = get_errno(setreuid(arg1, arg2));
11115 break;
11116 #endif
11117 #ifdef TARGET_NR_setregid32
11118 case TARGET_NR_setregid32:
11119 ret = get_errno(setregid(arg1, arg2));
11120 break;
11121 #endif
11122 #ifdef TARGET_NR_getgroups32
11123 case TARGET_NR_getgroups32:
11125 int gidsetsize = arg1;
11126 uint32_t *target_grouplist;
11127 gid_t *grouplist;
11128 int i;
11130 grouplist = alloca(gidsetsize * sizeof(gid_t));
11131 ret = get_errno(getgroups(gidsetsize, grouplist));
11132 if (gidsetsize == 0)
11133 break;
11134 if (!is_error(ret)) {
11135 target_grouplist = lock_user(VERIFY_WRITE, arg2, gidsetsize * 4, 0);
11136 if (!target_grouplist) {
11137 ret = -TARGET_EFAULT;
11138 goto fail;
11140 for(i = 0;i < ret; i++)
11141 target_grouplist[i] = tswap32(grouplist[i]);
11142 unlock_user(target_grouplist, arg2, gidsetsize * 4);
11145 break;
11146 #endif
11147 #ifdef TARGET_NR_setgroups32
11148 case TARGET_NR_setgroups32:
11150 int gidsetsize = arg1;
11151 uint32_t *target_grouplist;
11152 gid_t *grouplist;
11153 int i;
11155 grouplist = alloca(gidsetsize * sizeof(gid_t));
11156 target_grouplist = lock_user(VERIFY_READ, arg2, gidsetsize * 4, 1);
11157 if (!target_grouplist) {
11158 ret = -TARGET_EFAULT;
11159 goto fail;
11161 for(i = 0;i < gidsetsize; i++)
11162 grouplist[i] = tswap32(target_grouplist[i]);
11163 unlock_user(target_grouplist, arg2, 0);
11164 ret = get_errno(setgroups(gidsetsize, grouplist));
11166 break;
11167 #endif
11168 #ifdef TARGET_NR_fchown32
11169 case TARGET_NR_fchown32:
11170 ret = get_errno(fchown(arg1, arg2, arg3));
11171 break;
11172 #endif
11173 #ifdef TARGET_NR_setresuid32
11174 case TARGET_NR_setresuid32:
11175 ret = get_errno(sys_setresuid(arg1, arg2, arg3));
11176 break;
11177 #endif
11178 #ifdef TARGET_NR_getresuid32
11179 case TARGET_NR_getresuid32:
11181 uid_t ruid, euid, suid;
11182 ret = get_errno(getresuid(&ruid, &euid, &suid));
11183 if (!is_error(ret)) {
11184 if (put_user_u32(ruid, arg1)
11185 || put_user_u32(euid, arg2)
11186 || put_user_u32(suid, arg3))
11187 goto efault;
11190 break;
11191 #endif
11192 #ifdef TARGET_NR_setresgid32
11193 case TARGET_NR_setresgid32:
11194 ret = get_errno(sys_setresgid(arg1, arg2, arg3));
11195 break;
11196 #endif
11197 #ifdef TARGET_NR_getresgid32
11198 case TARGET_NR_getresgid32:
11200 gid_t rgid, egid, sgid;
11201 ret = get_errno(getresgid(&rgid, &egid, &sgid));
11202 if (!is_error(ret)) {
11203 if (put_user_u32(rgid, arg1)
11204 || put_user_u32(egid, arg2)
11205 || put_user_u32(sgid, arg3))
11206 goto efault;
11209 break;
11210 #endif
11211 #ifdef TARGET_NR_chown32
11212 case TARGET_NR_chown32:
11213 if (!(p = lock_user_string(arg1)))
11214 goto efault;
11215 ret = get_errno(chown(p, arg2, arg3));
11216 unlock_user(p, arg1, 0);
11217 break;
11218 #endif
11219 #ifdef TARGET_NR_setuid32
11220 case TARGET_NR_setuid32:
11221 ret = get_errno(sys_setuid(arg1));
11222 break;
11223 #endif
11224 #ifdef TARGET_NR_setgid32
11225 case TARGET_NR_setgid32:
11226 ret = get_errno(sys_setgid(arg1));
11227 break;
11228 #endif
11229 #ifdef TARGET_NR_setfsuid32
11230 case TARGET_NR_setfsuid32:
11231 ret = get_errno(setfsuid(arg1));
11232 break;
11233 #endif
11234 #ifdef TARGET_NR_setfsgid32
11235 case TARGET_NR_setfsgid32:
11236 ret = get_errno(setfsgid(arg1));
11237 break;
11238 #endif
11240 case TARGET_NR_pivot_root:
11241 goto unimplemented;
11242 #ifdef TARGET_NR_mincore
11243 case TARGET_NR_mincore:
11245 void *a;
11246 ret = -TARGET_ENOMEM;
11247 a = lock_user(VERIFY_READ, arg1, arg2, 0);
11248 if (!a) {
11249 goto fail;
11251 ret = -TARGET_EFAULT;
11252 p = lock_user_string(arg3);
11253 if (!p) {
11254 goto mincore_fail;
11256 ret = get_errno(mincore(a, arg2, p));
11257 unlock_user(p, arg3, ret);
11258 mincore_fail:
11259 unlock_user(a, arg1, 0);
11261 break;
11262 #endif
11263 #ifdef TARGET_NR_arm_fadvise64_64
11264 case TARGET_NR_arm_fadvise64_64:
11265 /* arm_fadvise64_64 looks like fadvise64_64 but
11266 * with different argument order: fd, advice, offset, len
11267 * rather than the usual fd, offset, len, advice.
11268 * Note that offset and len are both 64-bit so appear as
11269 * pairs of 32-bit registers.
11271 ret = posix_fadvise(arg1, target_offset64(arg3, arg4),
11272 target_offset64(arg5, arg6), arg2);
11273 ret = -host_to_target_errno(ret);
11274 break;
11275 #endif
11277 #if TARGET_ABI_BITS == 32
11279 #ifdef TARGET_NR_fadvise64_64
11280 case TARGET_NR_fadvise64_64:
11281 #if defined(TARGET_PPC)
11282 /* 6 args: fd, advice, offset (high, low), len (high, low) */
11283 ret = arg2;
11284 arg2 = arg3;
11285 arg3 = arg4;
11286 arg4 = arg5;
11287 arg5 = arg6;
11288 arg6 = ret;
11289 #else
11290 /* 6 args: fd, offset (high, low), len (high, low), advice */
11291 if (regpairs_aligned(cpu_env)) {
11292 /* offset is in (3,4), len in (5,6) and advice in 7 */
11293 arg2 = arg3;
11294 arg3 = arg4;
11295 arg4 = arg5;
11296 arg5 = arg6;
11297 arg6 = arg7;
11299 #endif
11300 ret = -host_to_target_errno(posix_fadvise(arg1,
11301 target_offset64(arg2, arg3),
11302 target_offset64(arg4, arg5),
11303 arg6));
11304 break;
11305 #endif
11307 #ifdef TARGET_NR_fadvise64
11308 case TARGET_NR_fadvise64:
11309 /* 5 args: fd, offset (high, low), len, advice */
11310 if (regpairs_aligned(cpu_env)) {
11311 /* offset is in (3,4), len in 5 and advice in 6 */
11312 arg2 = arg3;
11313 arg3 = arg4;
11314 arg4 = arg5;
11315 arg5 = arg6;
11317 ret = -host_to_target_errno(posix_fadvise(arg1,
11318 target_offset64(arg2, arg3),
11319 arg4, arg5));
11320 break;
11321 #endif
11323 #else /* not a 32-bit ABI */
11324 #if defined(TARGET_NR_fadvise64_64) || defined(TARGET_NR_fadvise64)
11325 #ifdef TARGET_NR_fadvise64_64
11326 case TARGET_NR_fadvise64_64:
11327 #endif
11328 #ifdef TARGET_NR_fadvise64
11329 case TARGET_NR_fadvise64:
11330 #endif
11331 #ifdef TARGET_S390X
11332 switch (arg4) {
11333 case 4: arg4 = POSIX_FADV_NOREUSE + 1; break; /* make sure it's an invalid value */
11334 case 5: arg4 = POSIX_FADV_NOREUSE + 2; break; /* ditto */
11335 case 6: arg4 = POSIX_FADV_DONTNEED; break;
11336 case 7: arg4 = POSIX_FADV_NOREUSE; break;
11337 default: break;
11339 #endif
11340 ret = -host_to_target_errno(posix_fadvise(arg1, arg2, arg3, arg4));
11341 break;
11342 #endif
11343 #endif /* end of 64-bit ABI fadvise handling */
11345 #ifdef TARGET_NR_madvise
11346 case TARGET_NR_madvise:
11347 /* A straight passthrough may not be safe because qemu sometimes
11348 turns private file-backed mappings into anonymous mappings.
11349 This will break MADV_DONTNEED.
11350 This is a hint, so ignoring and returning success is ok. */
11351 ret = get_errno(0);
11352 break;
11353 #endif
11354 #if TARGET_ABI_BITS == 32
11355 case TARGET_NR_fcntl64:
11357 int cmd;
11358 struct flock64 fl;
11359 from_flock64_fn *copyfrom = copy_from_user_flock64;
11360 to_flock64_fn *copyto = copy_to_user_flock64;
11362 #ifdef TARGET_ARM
11363 if (((CPUARMState *)cpu_env)->eabi) {
11364 copyfrom = copy_from_user_eabi_flock64;
11365 copyto = copy_to_user_eabi_flock64;
11367 #endif
11369 cmd = target_to_host_fcntl_cmd(arg2);
11370 if (cmd == -TARGET_EINVAL) {
11371 ret = cmd;
11372 break;
11375 switch(arg2) {
11376 case TARGET_F_GETLK64:
11377 ret = copyfrom(&fl, arg3);
11378 if (ret) {
11379 break;
11381 ret = get_errno(fcntl(arg1, cmd, &fl));
11382 if (ret == 0) {
11383 ret = copyto(arg3, &fl);
11385 break;
11387 case TARGET_F_SETLK64:
11388 case TARGET_F_SETLKW64:
11389 ret = copyfrom(&fl, arg3);
11390 if (ret) {
11391 break;
11393 ret = get_errno(safe_fcntl(arg1, cmd, &fl));
11394 break;
11395 default:
11396 ret = do_fcntl(arg1, arg2, arg3);
11397 break;
11399 break;
11401 #endif
11402 #ifdef TARGET_NR_cacheflush
11403 case TARGET_NR_cacheflush:
11404 /* self-modifying code is handled automatically, so nothing needed */
11405 ret = 0;
11406 break;
11407 #endif
11408 #ifdef TARGET_NR_security
11409 case TARGET_NR_security:
11410 goto unimplemented;
11411 #endif
11412 #ifdef TARGET_NR_getpagesize
11413 case TARGET_NR_getpagesize:
11414 ret = TARGET_PAGE_SIZE;
11415 break;
11416 #endif
11417 case TARGET_NR_gettid:
11418 ret = get_errno(gettid());
11419 break;
11420 #ifdef TARGET_NR_readahead
11421 case TARGET_NR_readahead:
11422 #if TARGET_ABI_BITS == 32
11423 if (regpairs_aligned(cpu_env)) {
11424 arg2 = arg3;
11425 arg3 = arg4;
11426 arg4 = arg5;
11428 ret = get_errno(readahead(arg1, target_offset64(arg2, arg3) , arg4));
11429 #else
11430 ret = get_errno(readahead(arg1, arg2, arg3));
11431 #endif
11432 break;
11433 #endif
11434 #ifdef CONFIG_ATTR
11435 #ifdef TARGET_NR_setxattr
11436 case TARGET_NR_listxattr:
11437 case TARGET_NR_llistxattr:
11439 void *p, *b = 0;
11440 if (arg2) {
11441 b = lock_user(VERIFY_WRITE, arg2, arg3, 0);
11442 if (!b) {
11443 ret = -TARGET_EFAULT;
11444 break;
11447 p = lock_user_string(arg1);
11448 if (p) {
11449 if (num == TARGET_NR_listxattr) {
11450 ret = get_errno(listxattr(p, b, arg3));
11451 } else {
11452 ret = get_errno(llistxattr(p, b, arg3));
11454 } else {
11455 ret = -TARGET_EFAULT;
11457 unlock_user(p, arg1, 0);
11458 unlock_user(b, arg2, arg3);
11459 break;
11461 case TARGET_NR_flistxattr:
11463 void *b = 0;
11464 if (arg2) {
11465 b = lock_user(VERIFY_WRITE, arg2, arg3, 0);
11466 if (!b) {
11467 ret = -TARGET_EFAULT;
11468 break;
11471 ret = get_errno(flistxattr(arg1, b, arg3));
11472 unlock_user(b, arg2, arg3);
11473 break;
11475 case TARGET_NR_setxattr:
11476 case TARGET_NR_lsetxattr:
11478 void *p, *n, *v = 0;
11479 if (arg3) {
11480 v = lock_user(VERIFY_READ, arg3, arg4, 1);
11481 if (!v) {
11482 ret = -TARGET_EFAULT;
11483 break;
11486 p = lock_user_string(arg1);
11487 n = lock_user_string(arg2);
11488 if (p && n) {
11489 if (num == TARGET_NR_setxattr) {
11490 ret = get_errno(setxattr(p, n, v, arg4, arg5));
11491 } else {
11492 ret = get_errno(lsetxattr(p, n, v, arg4, arg5));
11494 } else {
11495 ret = -TARGET_EFAULT;
11497 unlock_user(p, arg1, 0);
11498 unlock_user(n, arg2, 0);
11499 unlock_user(v, arg3, 0);
11501 break;
11502 case TARGET_NR_fsetxattr:
11504 void *n, *v = 0;
11505 if (arg3) {
11506 v = lock_user(VERIFY_READ, arg3, arg4, 1);
11507 if (!v) {
11508 ret = -TARGET_EFAULT;
11509 break;
11512 n = lock_user_string(arg2);
11513 if (n) {
11514 ret = get_errno(fsetxattr(arg1, n, v, arg4, arg5));
11515 } else {
11516 ret = -TARGET_EFAULT;
11518 unlock_user(n, arg2, 0);
11519 unlock_user(v, arg3, 0);
11521 break;
11522 case TARGET_NR_getxattr:
11523 case TARGET_NR_lgetxattr:
11525 void *p, *n, *v = 0;
11526 if (arg3) {
11527 v = lock_user(VERIFY_WRITE, arg3, arg4, 0);
11528 if (!v) {
11529 ret = -TARGET_EFAULT;
11530 break;
11533 p = lock_user_string(arg1);
11534 n = lock_user_string(arg2);
11535 if (p && n) {
11536 if (num == TARGET_NR_getxattr) {
11537 ret = get_errno(getxattr(p, n, v, arg4));
11538 } else {
11539 ret = get_errno(lgetxattr(p, n, v, arg4));
11541 } else {
11542 ret = -TARGET_EFAULT;
11544 unlock_user(p, arg1, 0);
11545 unlock_user(n, arg2, 0);
11546 unlock_user(v, arg3, arg4);
11548 break;
11549 case TARGET_NR_fgetxattr:
11551 void *n, *v = 0;
11552 if (arg3) {
11553 v = lock_user(VERIFY_WRITE, arg3, arg4, 0);
11554 if (!v) {
11555 ret = -TARGET_EFAULT;
11556 break;
11559 n = lock_user_string(arg2);
11560 if (n) {
11561 ret = get_errno(fgetxattr(arg1, n, v, arg4));
11562 } else {
11563 ret = -TARGET_EFAULT;
11565 unlock_user(n, arg2, 0);
11566 unlock_user(v, arg3, arg4);
11568 break;
11569 case TARGET_NR_removexattr:
11570 case TARGET_NR_lremovexattr:
11572 void *p, *n;
11573 p = lock_user_string(arg1);
11574 n = lock_user_string(arg2);
11575 if (p && n) {
11576 if (num == TARGET_NR_removexattr) {
11577 ret = get_errno(removexattr(p, n));
11578 } else {
11579 ret = get_errno(lremovexattr(p, n));
11581 } else {
11582 ret = -TARGET_EFAULT;
11584 unlock_user(p, arg1, 0);
11585 unlock_user(n, arg2, 0);
11587 break;
11588 case TARGET_NR_fremovexattr:
11590 void *n;
11591 n = lock_user_string(arg2);
11592 if (n) {
11593 ret = get_errno(fremovexattr(arg1, n));
11594 } else {
11595 ret = -TARGET_EFAULT;
11597 unlock_user(n, arg2, 0);
11599 break;
11600 #endif
11601 #endif /* CONFIG_ATTR */
11602 #ifdef TARGET_NR_set_thread_area
11603 case TARGET_NR_set_thread_area:
11604 #if defined(TARGET_MIPS)
11605 ((CPUMIPSState *) cpu_env)->active_tc.CP0_UserLocal = arg1;
11606 ret = 0;
11607 break;
11608 #elif defined(TARGET_CRIS)
11609 if (arg1 & 0xff)
11610 ret = -TARGET_EINVAL;
11611 else {
11612 ((CPUCRISState *) cpu_env)->pregs[PR_PID] = arg1;
11613 ret = 0;
11615 break;
11616 #elif defined(TARGET_I386) && defined(TARGET_ABI32)
11617 ret = do_set_thread_area(cpu_env, arg1);
11618 break;
11619 #elif defined(TARGET_M68K)
11621 TaskState *ts = cpu->opaque;
11622 ts->tp_value = arg1;
11623 ret = 0;
11624 break;
11626 #else
11627 goto unimplemented_nowarn;
11628 #endif
11629 #endif
11630 #ifdef TARGET_NR_get_thread_area
11631 case TARGET_NR_get_thread_area:
11632 #if defined(TARGET_I386) && defined(TARGET_ABI32)
11633 ret = do_get_thread_area(cpu_env, arg1);
11634 break;
11635 #elif defined(TARGET_M68K)
11637 TaskState *ts = cpu->opaque;
11638 ret = ts->tp_value;
11639 break;
11641 #else
11642 goto unimplemented_nowarn;
11643 #endif
11644 #endif
11645 #ifdef TARGET_NR_getdomainname
11646 case TARGET_NR_getdomainname:
11647 goto unimplemented_nowarn;
11648 #endif
11650 #ifdef TARGET_NR_clock_gettime
11651 case TARGET_NR_clock_gettime:
11653 struct timespec ts;
11654 ret = get_errno(clock_gettime(arg1, &ts));
11655 if (!is_error(ret)) {
11656 host_to_target_timespec(arg2, &ts);
11658 break;
11660 #endif
11661 #ifdef TARGET_NR_clock_getres
11662 case TARGET_NR_clock_getres:
11664 struct timespec ts;
11665 ret = get_errno(clock_getres(arg1, &ts));
11666 if (!is_error(ret)) {
11667 host_to_target_timespec(arg2, &ts);
11669 break;
11671 #endif
11672 #ifdef TARGET_NR_clock_nanosleep
11673 case TARGET_NR_clock_nanosleep:
11675 struct timespec ts;
11676 target_to_host_timespec(&ts, arg3);
11677 ret = get_errno(safe_clock_nanosleep(arg1, arg2,
11678 &ts, arg4 ? &ts : NULL));
11679 if (arg4)
11680 host_to_target_timespec(arg4, &ts);
11682 #if defined(TARGET_PPC)
11683 /* clock_nanosleep is odd in that it returns positive errno values.
11684 * On PPC, CR0 bit 3 should be set in such a situation. */
11685 if (ret && ret != -TARGET_ERESTARTSYS) {
11686 ((CPUPPCState *)cpu_env)->crf[0] |= 1;
11688 #endif
11689 break;
11691 #endif
11693 #if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address)
11694 case TARGET_NR_set_tid_address:
11695 ret = get_errno(set_tid_address((int *)g2h(arg1)));
11696 break;
11697 #endif
11699 case TARGET_NR_tkill:
11700 ret = get_errno(safe_tkill((int)arg1, target_to_host_signal(arg2)));
11701 break;
11703 case TARGET_NR_tgkill:
11704 ret = get_errno(safe_tgkill((int)arg1, (int)arg2,
11705 target_to_host_signal(arg3)));
11706 break;
11708 #ifdef TARGET_NR_set_robust_list
11709 case TARGET_NR_set_robust_list:
11710 case TARGET_NR_get_robust_list:
11711 /* The ABI for supporting robust futexes has userspace pass
11712 * the kernel a pointer to a linked list which is updated by
11713 * userspace after the syscall; the list is walked by the kernel
11714 * when the thread exits. Since the linked list in QEMU guest
11715 * memory isn't a valid linked list for the host and we have
11716 * no way to reliably intercept the thread-death event, we can't
11717 * support these. Silently return ENOSYS so that guest userspace
11718 * falls back to a non-robust futex implementation (which should
11719 * be OK except in the corner case of the guest crashing while
11720 * holding a mutex that is shared with another process via
11721 * shared memory).
11723 goto unimplemented_nowarn;
11724 #endif
11726 #if defined(TARGET_NR_utimensat)
11727 case TARGET_NR_utimensat:
11729 struct timespec *tsp, ts[2];
11730 if (!arg3) {
11731 tsp = NULL;
11732 } else {
11733 target_to_host_timespec(ts, arg3);
11734 target_to_host_timespec(ts+1, arg3+sizeof(struct target_timespec));
11735 tsp = ts;
11737 if (!arg2)
11738 ret = get_errno(sys_utimensat(arg1, NULL, tsp, arg4));
11739 else {
11740 if (!(p = lock_user_string(arg2))) {
11741 ret = -TARGET_EFAULT;
11742 goto fail;
11744 ret = get_errno(sys_utimensat(arg1, path(p), tsp, arg4));
11745 unlock_user(p, arg2, 0);
11748 break;
11749 #endif
11750 case TARGET_NR_futex:
11751 ret = do_futex(arg1, arg2, arg3, arg4, arg5, arg6);
11752 break;
11753 #if defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init)
11754 case TARGET_NR_inotify_init:
11755 ret = get_errno(sys_inotify_init());
11756 if (ret >= 0) {
11757 fd_trans_register(ret, &target_inotify_trans);
11759 break;
11760 #endif
11761 #ifdef CONFIG_INOTIFY1
11762 #if defined(TARGET_NR_inotify_init1) && defined(__NR_inotify_init1)
11763 case TARGET_NR_inotify_init1:
11764 ret = get_errno(sys_inotify_init1(target_to_host_bitmask(arg1,
11765 fcntl_flags_tbl)));
11766 if (ret >= 0) {
11767 fd_trans_register(ret, &target_inotify_trans);
11769 break;
11770 #endif
11771 #endif
11772 #if defined(TARGET_NR_inotify_add_watch) && defined(__NR_inotify_add_watch)
11773 case TARGET_NR_inotify_add_watch:
11774 p = lock_user_string(arg2);
11775 ret = get_errno(sys_inotify_add_watch(arg1, path(p), arg3));
11776 unlock_user(p, arg2, 0);
11777 break;
11778 #endif
11779 #if defined(TARGET_NR_inotify_rm_watch) && defined(__NR_inotify_rm_watch)
11780 case TARGET_NR_inotify_rm_watch:
11781 ret = get_errno(sys_inotify_rm_watch(arg1, arg2));
11782 break;
11783 #endif
11785 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
11786 case TARGET_NR_mq_open:
11788 struct mq_attr posix_mq_attr;
11789 struct mq_attr *pposix_mq_attr;
11790 int host_flags;
11792 host_flags = target_to_host_bitmask(arg2, fcntl_flags_tbl);
11793 pposix_mq_attr = NULL;
11794 if (arg4) {
11795 if (copy_from_user_mq_attr(&posix_mq_attr, arg4) != 0) {
11796 goto efault;
11798 pposix_mq_attr = &posix_mq_attr;
11800 p = lock_user_string(arg1 - 1);
11801 if (!p) {
11802 goto efault;
11804 ret = get_errno(mq_open(p, host_flags, arg3, pposix_mq_attr));
11805 unlock_user (p, arg1, 0);
11807 break;
11809 case TARGET_NR_mq_unlink:
11810 p = lock_user_string(arg1 - 1);
11811 if (!p) {
11812 ret = -TARGET_EFAULT;
11813 break;
11815 ret = get_errno(mq_unlink(p));
11816 unlock_user (p, arg1, 0);
11817 break;
11819 case TARGET_NR_mq_timedsend:
11821 struct timespec ts;
11823 p = lock_user (VERIFY_READ, arg2, arg3, 1);
11824 if (arg5 != 0) {
11825 target_to_host_timespec(&ts, arg5);
11826 ret = get_errno(safe_mq_timedsend(arg1, p, arg3, arg4, &ts));
11827 host_to_target_timespec(arg5, &ts);
11828 } else {
11829 ret = get_errno(safe_mq_timedsend(arg1, p, arg3, arg4, NULL));
11831 unlock_user (p, arg2, arg3);
11833 break;
11835 case TARGET_NR_mq_timedreceive:
11837 struct timespec ts;
11838 unsigned int prio;
11840 p = lock_user (VERIFY_READ, arg2, arg3, 1);
11841 if (arg5 != 0) {
11842 target_to_host_timespec(&ts, arg5);
11843 ret = get_errno(safe_mq_timedreceive(arg1, p, arg3,
11844 &prio, &ts));
11845 host_to_target_timespec(arg5, &ts);
11846 } else {
11847 ret = get_errno(safe_mq_timedreceive(arg1, p, arg3,
11848 &prio, NULL));
11850 unlock_user (p, arg2, arg3);
11851 if (arg4 != 0)
11852 put_user_u32(prio, arg4);
11854 break;
11856 /* Not implemented for now... */
11857 /* case TARGET_NR_mq_notify: */
11858 /* break; */
11860 case TARGET_NR_mq_getsetattr:
11862 struct mq_attr posix_mq_attr_in, posix_mq_attr_out;
11863 ret = 0;
11864 if (arg3 != 0) {
11865 ret = mq_getattr(arg1, &posix_mq_attr_out);
11866 copy_to_user_mq_attr(arg3, &posix_mq_attr_out);
11868 if (arg2 != 0) {
11869 copy_from_user_mq_attr(&posix_mq_attr_in, arg2);
11870 ret |= mq_setattr(arg1, &posix_mq_attr_in, &posix_mq_attr_out);
11874 break;
11875 #endif
11877 #ifdef CONFIG_SPLICE
11878 #ifdef TARGET_NR_tee
11879 case TARGET_NR_tee:
11881 ret = get_errno(tee(arg1,arg2,arg3,arg4));
11883 break;
11884 #endif
11885 #ifdef TARGET_NR_splice
11886 case TARGET_NR_splice:
11888 loff_t loff_in, loff_out;
11889 loff_t *ploff_in = NULL, *ploff_out = NULL;
11890 if (arg2) {
11891 if (get_user_u64(loff_in, arg2)) {
11892 goto efault;
11894 ploff_in = &loff_in;
11896 if (arg4) {
11897 if (get_user_u64(loff_out, arg4)) {
11898 goto efault;
11900 ploff_out = &loff_out;
11902 ret = get_errno(splice(arg1, ploff_in, arg3, ploff_out, arg5, arg6));
11903 if (arg2) {
11904 if (put_user_u64(loff_in, arg2)) {
11905 goto efault;
11908 if (arg4) {
11909 if (put_user_u64(loff_out, arg4)) {
11910 goto efault;
11914 break;
11915 #endif
11916 #ifdef TARGET_NR_vmsplice
11917 case TARGET_NR_vmsplice:
11919 struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1);
11920 if (vec != NULL) {
11921 ret = get_errno(vmsplice(arg1, vec, arg3, arg4));
11922 unlock_iovec(vec, arg2, arg3, 0);
11923 } else {
11924 ret = -host_to_target_errno(errno);
11927 break;
11928 #endif
11929 #endif /* CONFIG_SPLICE */
11930 #ifdef CONFIG_EVENTFD
11931 #if defined(TARGET_NR_eventfd)
11932 case TARGET_NR_eventfd:
11933 ret = get_errno(eventfd(arg1, 0));
11934 if (ret >= 0) {
11935 fd_trans_register(ret, &target_eventfd_trans);
11937 break;
11938 #endif
11939 #if defined(TARGET_NR_eventfd2)
11940 case TARGET_NR_eventfd2:
11942 int host_flags = arg2 & (~(TARGET_O_NONBLOCK | TARGET_O_CLOEXEC));
11943 if (arg2 & TARGET_O_NONBLOCK) {
11944 host_flags |= O_NONBLOCK;
11946 if (arg2 & TARGET_O_CLOEXEC) {
11947 host_flags |= O_CLOEXEC;
11949 ret = get_errno(eventfd(arg1, host_flags));
11950 if (ret >= 0) {
11951 fd_trans_register(ret, &target_eventfd_trans);
11953 break;
11955 #endif
11956 #endif /* CONFIG_EVENTFD */
11957 #if defined(CONFIG_FALLOCATE) && defined(TARGET_NR_fallocate)
11958 case TARGET_NR_fallocate:
11959 #if TARGET_ABI_BITS == 32
11960 ret = get_errno(fallocate(arg1, arg2, target_offset64(arg3, arg4),
11961 target_offset64(arg5, arg6)));
11962 #else
11963 ret = get_errno(fallocate(arg1, arg2, arg3, arg4));
11964 #endif
11965 break;
11966 #endif
11967 #if defined(CONFIG_SYNC_FILE_RANGE)
11968 #if defined(TARGET_NR_sync_file_range)
11969 case TARGET_NR_sync_file_range:
11970 #if TARGET_ABI_BITS == 32
11971 #if defined(TARGET_MIPS)
11972 ret = get_errno(sync_file_range(arg1, target_offset64(arg3, arg4),
11973 target_offset64(arg5, arg6), arg7));
11974 #else
11975 ret = get_errno(sync_file_range(arg1, target_offset64(arg2, arg3),
11976 target_offset64(arg4, arg5), arg6));
11977 #endif /* !TARGET_MIPS */
11978 #else
11979 ret = get_errno(sync_file_range(arg1, arg2, arg3, arg4));
11980 #endif
11981 break;
11982 #endif
11983 #if defined(TARGET_NR_sync_file_range2)
11984 case TARGET_NR_sync_file_range2:
11985 /* This is like sync_file_range but the arguments are reordered */
11986 #if TARGET_ABI_BITS == 32
11987 ret = get_errno(sync_file_range(arg1, target_offset64(arg3, arg4),
11988 target_offset64(arg5, arg6), arg2));
11989 #else
11990 ret = get_errno(sync_file_range(arg1, arg3, arg4, arg2));
11991 #endif
11992 break;
11993 #endif
11994 #endif
11995 #if defined(TARGET_NR_signalfd4)
11996 case TARGET_NR_signalfd4:
11997 ret = do_signalfd4(arg1, arg2, arg4);
11998 break;
11999 #endif
12000 #if defined(TARGET_NR_signalfd)
12001 case TARGET_NR_signalfd:
12002 ret = do_signalfd4(arg1, arg2, 0);
12003 break;
12004 #endif
12005 #if defined(CONFIG_EPOLL)
12006 #if defined(TARGET_NR_epoll_create)
12007 case TARGET_NR_epoll_create:
12008 ret = get_errno(epoll_create(arg1));
12009 break;
12010 #endif
12011 #if defined(TARGET_NR_epoll_create1) && defined(CONFIG_EPOLL_CREATE1)
12012 case TARGET_NR_epoll_create1:
12013 ret = get_errno(epoll_create1(arg1));
12014 break;
12015 #endif
12016 #if defined(TARGET_NR_epoll_ctl)
12017 case TARGET_NR_epoll_ctl:
12019 struct epoll_event ep;
12020 struct epoll_event *epp = 0;
12021 if (arg4) {
12022 struct target_epoll_event *target_ep;
12023 if (!lock_user_struct(VERIFY_READ, target_ep, arg4, 1)) {
12024 goto efault;
12026 ep.events = tswap32(target_ep->events);
12027 /* The epoll_data_t union is just opaque data to the kernel,
12028 * so we transfer all 64 bits across and need not worry what
12029 * actual data type it is.
12031 ep.data.u64 = tswap64(target_ep->data.u64);
12032 unlock_user_struct(target_ep, arg4, 0);
12033 epp = &ep;
12035 ret = get_errno(epoll_ctl(arg1, arg2, arg3, epp));
12036 break;
12038 #endif
12040 #if defined(TARGET_NR_epoll_wait) || defined(TARGET_NR_epoll_pwait)
12041 #if defined(TARGET_NR_epoll_wait)
12042 case TARGET_NR_epoll_wait:
12043 #endif
12044 #if defined(TARGET_NR_epoll_pwait)
12045 case TARGET_NR_epoll_pwait:
12046 #endif
12048 struct target_epoll_event *target_ep;
12049 struct epoll_event *ep;
12050 int epfd = arg1;
12051 int maxevents = arg3;
12052 int timeout = arg4;
12054 if (maxevents <= 0 || maxevents > TARGET_EP_MAX_EVENTS) {
12055 ret = -TARGET_EINVAL;
12056 break;
12059 target_ep = lock_user(VERIFY_WRITE, arg2,
12060 maxevents * sizeof(struct target_epoll_event), 1);
12061 if (!target_ep) {
12062 goto efault;
12065 ep = g_try_new(struct epoll_event, maxevents);
12066 if (!ep) {
12067 unlock_user(target_ep, arg2, 0);
12068 ret = -TARGET_ENOMEM;
12069 break;
12072 switch (num) {
12073 #if defined(TARGET_NR_epoll_pwait)
12074 case TARGET_NR_epoll_pwait:
12076 target_sigset_t *target_set;
12077 sigset_t _set, *set = &_set;
12079 if (arg5) {
12080 if (arg6 != sizeof(target_sigset_t)) {
12081 ret = -TARGET_EINVAL;
12082 break;
12085 target_set = lock_user(VERIFY_READ, arg5,
12086 sizeof(target_sigset_t), 1);
12087 if (!target_set) {
12088 ret = -TARGET_EFAULT;
12089 break;
12091 target_to_host_sigset(set, target_set);
12092 unlock_user(target_set, arg5, 0);
12093 } else {
12094 set = NULL;
12097 ret = get_errno(safe_epoll_pwait(epfd, ep, maxevents, timeout,
12098 set, SIGSET_T_SIZE));
12099 break;
12101 #endif
12102 #if defined(TARGET_NR_epoll_wait)
12103 case TARGET_NR_epoll_wait:
12104 ret = get_errno(safe_epoll_pwait(epfd, ep, maxevents, timeout,
12105 NULL, 0));
12106 break;
12107 #endif
12108 default:
12109 ret = -TARGET_ENOSYS;
12111 if (!is_error(ret)) {
12112 int i;
12113 for (i = 0; i < ret; i++) {
12114 target_ep[i].events = tswap32(ep[i].events);
12115 target_ep[i].data.u64 = tswap64(ep[i].data.u64);
12117 unlock_user(target_ep, arg2,
12118 ret * sizeof(struct target_epoll_event));
12119 } else {
12120 unlock_user(target_ep, arg2, 0);
12122 g_free(ep);
12123 break;
12125 #endif
12126 #endif
12127 #ifdef TARGET_NR_prlimit64
12128 case TARGET_NR_prlimit64:
12130 /* args: pid, resource number, ptr to new rlimit, ptr to old rlimit */
12131 struct target_rlimit64 *target_rnew, *target_rold;
12132 struct host_rlimit64 rnew, rold, *rnewp = 0;
12133 int resource = target_to_host_resource(arg2);
12134 if (arg3) {
12135 if (!lock_user_struct(VERIFY_READ, target_rnew, arg3, 1)) {
12136 goto efault;
12138 rnew.rlim_cur = tswap64(target_rnew->rlim_cur);
12139 rnew.rlim_max = tswap64(target_rnew->rlim_max);
12140 unlock_user_struct(target_rnew, arg3, 0);
12141 rnewp = &rnew;
12144 ret = get_errno(sys_prlimit64(arg1, resource, rnewp, arg4 ? &rold : 0));
12145 if (!is_error(ret) && arg4) {
12146 if (!lock_user_struct(VERIFY_WRITE, target_rold, arg4, 1)) {
12147 goto efault;
12149 target_rold->rlim_cur = tswap64(rold.rlim_cur);
12150 target_rold->rlim_max = tswap64(rold.rlim_max);
12151 unlock_user_struct(target_rold, arg4, 1);
12153 break;
12155 #endif
12156 #ifdef TARGET_NR_gethostname
12157 case TARGET_NR_gethostname:
12159 char *name = lock_user(VERIFY_WRITE, arg1, arg2, 0);
12160 if (name) {
12161 ret = get_errno(gethostname(name, arg2));
12162 unlock_user(name, arg1, arg2);
12163 } else {
12164 ret = -TARGET_EFAULT;
12166 break;
12168 #endif
12169 #ifdef TARGET_NR_atomic_cmpxchg_32
12170 case TARGET_NR_atomic_cmpxchg_32:
12172 /* should use start_exclusive from main.c */
12173 abi_ulong mem_value;
12174 if (get_user_u32(mem_value, arg6)) {
12175 target_siginfo_t info;
12176 info.si_signo = SIGSEGV;
12177 info.si_errno = 0;
12178 info.si_code = TARGET_SEGV_MAPERR;
12179 info._sifields._sigfault._addr = arg6;
12180 queue_signal((CPUArchState *)cpu_env, info.si_signo,
12181 QEMU_SI_FAULT, &info);
12182 ret = 0xdeadbeef;
12185 if (mem_value == arg2)
12186 put_user_u32(arg1, arg6);
12187 ret = mem_value;
12188 break;
12190 #endif
12191 #ifdef TARGET_NR_atomic_barrier
12192 case TARGET_NR_atomic_barrier:
12194 /* Like the kernel implementation and the qemu arm barrier, no-op this? */
12195 ret = 0;
12196 break;
12198 #endif
12200 #ifdef TARGET_NR_timer_create
12201 case TARGET_NR_timer_create:
12203 /* args: clockid_t clockid, struct sigevent *sevp, timer_t *timerid */
12205 struct sigevent host_sevp = { {0}, }, *phost_sevp = NULL;
12207 int clkid = arg1;
12208 int timer_index = next_free_host_timer();
12210 if (timer_index < 0) {
12211 ret = -TARGET_EAGAIN;
12212 } else {
12213 timer_t *phtimer = g_posix_timers + timer_index;
12215 if (arg2) {
12216 phost_sevp = &host_sevp;
12217 ret = target_to_host_sigevent(phost_sevp, arg2);
12218 if (ret != 0) {
12219 break;
12223 ret = get_errno(timer_create(clkid, phost_sevp, phtimer));
12224 if (ret) {
12225 phtimer = NULL;
12226 } else {
12227 if (put_user(TIMER_MAGIC | timer_index, arg3, target_timer_t)) {
12228 goto efault;
12232 break;
12234 #endif
12236 #ifdef TARGET_NR_timer_settime
12237 case TARGET_NR_timer_settime:
12239 /* args: timer_t timerid, int flags, const struct itimerspec *new_value,
12240 * struct itimerspec * old_value */
12241 target_timer_t timerid = get_timer_id(arg1);
12243 if (timerid < 0) {
12244 ret = timerid;
12245 } else if (arg3 == 0) {
12246 ret = -TARGET_EINVAL;
12247 } else {
12248 timer_t htimer = g_posix_timers[timerid];
12249 struct itimerspec hspec_new = {{0},}, hspec_old = {{0},};
12251 if (target_to_host_itimerspec(&hspec_new, arg3)) {
12252 goto efault;
12254 ret = get_errno(
12255 timer_settime(htimer, arg2, &hspec_new, &hspec_old));
12256 if (arg4 && host_to_target_itimerspec(arg4, &hspec_old)) {
12257 goto efault;
12260 break;
12262 #endif
12264 #ifdef TARGET_NR_timer_gettime
12265 case TARGET_NR_timer_gettime:
12267 /* args: timer_t timerid, struct itimerspec *curr_value */
12268 target_timer_t timerid = get_timer_id(arg1);
12270 if (timerid < 0) {
12271 ret = timerid;
12272 } else if (!arg2) {
12273 ret = -TARGET_EFAULT;
12274 } else {
12275 timer_t htimer = g_posix_timers[timerid];
12276 struct itimerspec hspec;
12277 ret = get_errno(timer_gettime(htimer, &hspec));
12279 if (host_to_target_itimerspec(arg2, &hspec)) {
12280 ret = -TARGET_EFAULT;
12283 break;
12285 #endif
12287 #ifdef TARGET_NR_timer_getoverrun
12288 case TARGET_NR_timer_getoverrun:
12290 /* args: timer_t timerid */
12291 target_timer_t timerid = get_timer_id(arg1);
12293 if (timerid < 0) {
12294 ret = timerid;
12295 } else {
12296 timer_t htimer = g_posix_timers[timerid];
12297 ret = get_errno(timer_getoverrun(htimer));
12299 fd_trans_unregister(ret);
12300 break;
12302 #endif
12304 #ifdef TARGET_NR_timer_delete
12305 case TARGET_NR_timer_delete:
12307 /* args: timer_t timerid */
12308 target_timer_t timerid = get_timer_id(arg1);
12310 if (timerid < 0) {
12311 ret = timerid;
12312 } else {
12313 timer_t htimer = g_posix_timers[timerid];
12314 ret = get_errno(timer_delete(htimer));
12315 g_posix_timers[timerid] = 0;
12317 break;
12319 #endif
12321 #if defined(TARGET_NR_timerfd_create) && defined(CONFIG_TIMERFD)
12322 case TARGET_NR_timerfd_create:
12323 ret = get_errno(timerfd_create(arg1,
12324 target_to_host_bitmask(arg2, fcntl_flags_tbl)));
12325 break;
12326 #endif
12328 #if defined(TARGET_NR_timerfd_gettime) && defined(CONFIG_TIMERFD)
12329 case TARGET_NR_timerfd_gettime:
12331 struct itimerspec its_curr;
12333 ret = get_errno(timerfd_gettime(arg1, &its_curr));
12335 if (arg2 && host_to_target_itimerspec(arg2, &its_curr)) {
12336 goto efault;
12339 break;
12340 #endif
12342 #if defined(TARGET_NR_timerfd_settime) && defined(CONFIG_TIMERFD)
12343 case TARGET_NR_timerfd_settime:
12345 struct itimerspec its_new, its_old, *p_new;
12347 if (arg3) {
12348 if (target_to_host_itimerspec(&its_new, arg3)) {
12349 goto efault;
12351 p_new = &its_new;
12352 } else {
12353 p_new = NULL;
12356 ret = get_errno(timerfd_settime(arg1, arg2, p_new, &its_old));
12358 if (arg4 && host_to_target_itimerspec(arg4, &its_old)) {
12359 goto efault;
12362 break;
12363 #endif
12365 #if defined(TARGET_NR_ioprio_get) && defined(__NR_ioprio_get)
12366 case TARGET_NR_ioprio_get:
12367 ret = get_errno(ioprio_get(arg1, arg2));
12368 break;
12369 #endif
12371 #if defined(TARGET_NR_ioprio_set) && defined(__NR_ioprio_set)
12372 case TARGET_NR_ioprio_set:
12373 ret = get_errno(ioprio_set(arg1, arg2, arg3));
12374 break;
12375 #endif
12377 #if defined(TARGET_NR_setns) && defined(CONFIG_SETNS)
12378 case TARGET_NR_setns:
12379 ret = get_errno(setns(arg1, arg2));
12380 break;
12381 #endif
12382 #if defined(TARGET_NR_unshare) && defined(CONFIG_SETNS)
12383 case TARGET_NR_unshare:
12384 ret = get_errno(unshare(arg1));
12385 break;
12386 #endif
12387 #if defined(TARGET_NR_kcmp) && defined(__NR_kcmp)
12388 case TARGET_NR_kcmp:
12389 ret = get_errno(kcmp(arg1, arg2, arg3, arg4, arg5));
12390 break;
12391 #endif
12393 default:
12394 unimplemented:
12395 gemu_log("qemu: Unsupported syscall: %d\n", num);
12396 #if defined(TARGET_NR_setxattr) || defined(TARGET_NR_get_thread_area) || defined(TARGET_NR_getdomainname) || defined(TARGET_NR_set_robust_list)
12397 unimplemented_nowarn:
12398 #endif
12399 ret = -TARGET_ENOSYS;
12400 break;
12402 fail:
12403 #ifdef DEBUG
12404 gemu_log(" = " TARGET_ABI_FMT_ld "\n", ret);
12405 #endif
12406 if(do_strace)
12407 print_syscall_ret(num, ret);
12408 trace_guest_user_syscall_ret(cpu, num, ret);
12409 return ret;
12410 efault:
12411 ret = -TARGET_EFAULT;
12412 goto fail;