s390x/tcg: wire up pci instructions
[qemu/ar7.git] / linux-user / syscall.c
blobdf1edf0cd34935aa92cae45899e30f69820be2bb
1 /*
2 * Linux syscalls
4 * Copyright (c) 2003 Fabrice Bellard
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, see <http://www.gnu.org/licenses/>.
19 #define _ATFILE_SOURCE
20 #include "qemu/osdep.h"
21 #include "qemu/cutils.h"
22 #include "qemu/path.h"
23 #include <elf.h>
24 #include <endian.h>
25 #include <grp.h>
26 #include <sys/ipc.h>
27 #include <sys/msg.h>
28 #include <sys/wait.h>
29 #include <sys/mount.h>
30 #include <sys/file.h>
31 #include <sys/fsuid.h>
32 #include <sys/personality.h>
33 #include <sys/prctl.h>
34 #include <sys/resource.h>
35 #include <sys/swap.h>
36 #include <linux/capability.h>
37 #include <sched.h>
38 #include <sys/timex.h>
39 #include <sys/socket.h>
40 #include <sys/un.h>
41 #include <sys/uio.h>
42 #include <poll.h>
43 #include <sys/times.h>
44 #include <sys/shm.h>
45 #include <sys/sem.h>
46 #include <sys/statfs.h>
47 #include <time.h>
48 #include <utime.h>
49 #include <sys/sysinfo.h>
50 #include <sys/signalfd.h>
51 //#include <sys/user.h>
52 #include <netinet/ip.h>
53 #include <netinet/tcp.h>
54 #include <linux/wireless.h>
55 #include <linux/icmp.h>
56 #include <linux/icmpv6.h>
57 #include <linux/errqueue.h>
58 #include <linux/random.h>
59 #include "qemu-common.h"
60 #ifdef CONFIG_TIMERFD
61 #include <sys/timerfd.h>
62 #endif
63 #ifdef TARGET_GPROF
64 #include <sys/gmon.h>
65 #endif
66 #ifdef CONFIG_EVENTFD
67 #include <sys/eventfd.h>
68 #endif
69 #ifdef CONFIG_EPOLL
70 #include <sys/epoll.h>
71 #endif
72 #ifdef CONFIG_ATTR
73 #include "qemu/xattr.h"
74 #endif
75 #ifdef CONFIG_SENDFILE
76 #include <sys/sendfile.h>
77 #endif
79 #define termios host_termios
80 #define winsize host_winsize
81 #define termio host_termio
82 #define sgttyb host_sgttyb /* same as target */
83 #define tchars host_tchars /* same as target */
84 #define ltchars host_ltchars /* same as target */
86 #include <linux/termios.h>
87 #include <linux/unistd.h>
88 #include <linux/cdrom.h>
89 #include <linux/hdreg.h>
90 #include <linux/soundcard.h>
91 #include <linux/kd.h>
92 #include <linux/mtio.h>
93 #include <linux/fs.h>
94 #if defined(CONFIG_FIEMAP)
95 #include <linux/fiemap.h>
96 #endif
97 #include <linux/fb.h>
98 #include <linux/vt.h>
99 #include <linux/dm-ioctl.h>
100 #include <linux/reboot.h>
101 #include <linux/route.h>
102 #include <linux/filter.h>
103 #include <linux/blkpg.h>
104 #include <netpacket/packet.h>
105 #include <linux/netlink.h>
106 #ifdef CONFIG_RTNETLINK
107 #include <linux/rtnetlink.h>
108 #include <linux/if_bridge.h>
109 #endif
110 #include <linux/audit.h>
111 #include "linux_loop.h"
112 #include "uname.h"
114 #include "qemu.h"
116 #ifndef CLONE_IO
117 #define CLONE_IO 0x80000000 /* Clone io context */
118 #endif
120 /* We can't directly call the host clone syscall, because this will
121 * badly confuse libc (breaking mutexes, for example). So we must
122 * divide clone flags into:
123 * * flag combinations that look like pthread_create()
124 * * flag combinations that look like fork()
125 * * flags we can implement within QEMU itself
126 * * flags we can't support and will return an error for
128 /* For thread creation, all these flags must be present; for
129 * fork, none must be present.
131 #define CLONE_THREAD_FLAGS \
132 (CLONE_VM | CLONE_FS | CLONE_FILES | \
133 CLONE_SIGHAND | CLONE_THREAD | CLONE_SYSVSEM)
135 /* These flags are ignored:
136 * CLONE_DETACHED is now ignored by the kernel;
137 * CLONE_IO is just an optimisation hint to the I/O scheduler
139 #define CLONE_IGNORED_FLAGS \
140 (CLONE_DETACHED | CLONE_IO)
142 /* Flags for fork which we can implement within QEMU itself */
143 #define CLONE_OPTIONAL_FORK_FLAGS \
144 (CLONE_SETTLS | CLONE_PARENT_SETTID | \
145 CLONE_CHILD_CLEARTID | CLONE_CHILD_SETTID)
147 /* Flags for thread creation which we can implement within QEMU itself */
148 #define CLONE_OPTIONAL_THREAD_FLAGS \
149 (CLONE_SETTLS | CLONE_PARENT_SETTID | \
150 CLONE_CHILD_CLEARTID | CLONE_CHILD_SETTID | CLONE_PARENT)
152 #define CLONE_INVALID_FORK_FLAGS \
153 (~(CSIGNAL | CLONE_OPTIONAL_FORK_FLAGS | CLONE_IGNORED_FLAGS))
155 #define CLONE_INVALID_THREAD_FLAGS \
156 (~(CSIGNAL | CLONE_THREAD_FLAGS | CLONE_OPTIONAL_THREAD_FLAGS | \
157 CLONE_IGNORED_FLAGS))
159 /* CLONE_VFORK is special cased early in do_fork(). The other flag bits
160 * have almost all been allocated. We cannot support any of
161 * CLONE_NEWNS, CLONE_NEWCGROUP, CLONE_NEWUTS, CLONE_NEWIPC,
162 * CLONE_NEWUSER, CLONE_NEWPID, CLONE_NEWNET, CLONE_PTRACE, CLONE_UNTRACED.
163 * The checks against the invalid thread masks above will catch these.
164 * (The one remaining unallocated bit is 0x1000 which used to be CLONE_PID.)
167 //#define DEBUG
168 /* Define DEBUG_ERESTARTSYS to force every syscall to be restarted
169 * once. This exercises the codepaths for restart.
171 //#define DEBUG_ERESTARTSYS
173 //#include <linux/msdos_fs.h>
174 #define VFAT_IOCTL_READDIR_BOTH _IOR('r', 1, struct linux_dirent [2])
175 #define VFAT_IOCTL_READDIR_SHORT _IOR('r', 2, struct linux_dirent [2])
177 #undef _syscall0
178 #undef _syscall1
179 #undef _syscall2
180 #undef _syscall3
181 #undef _syscall4
182 #undef _syscall5
183 #undef _syscall6
185 #define _syscall0(type,name) \
186 static type name (void) \
188 return syscall(__NR_##name); \
191 #define _syscall1(type,name,type1,arg1) \
192 static type name (type1 arg1) \
194 return syscall(__NR_##name, arg1); \
197 #define _syscall2(type,name,type1,arg1,type2,arg2) \
198 static type name (type1 arg1,type2 arg2) \
200 return syscall(__NR_##name, arg1, arg2); \
203 #define _syscall3(type,name,type1,arg1,type2,arg2,type3,arg3) \
204 static type name (type1 arg1,type2 arg2,type3 arg3) \
206 return syscall(__NR_##name, arg1, arg2, arg3); \
209 #define _syscall4(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4) \
210 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4) \
212 return syscall(__NR_##name, arg1, arg2, arg3, arg4); \
215 #define _syscall5(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4, \
216 type5,arg5) \
217 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5) \
219 return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5); \
223 #define _syscall6(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4, \
224 type5,arg5,type6,arg6) \
225 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5, \
226 type6 arg6) \
228 return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6); \
232 #define __NR_sys_uname __NR_uname
233 #define __NR_sys_getcwd1 __NR_getcwd
234 #define __NR_sys_getdents __NR_getdents
235 #define __NR_sys_getdents64 __NR_getdents64
236 #define __NR_sys_getpriority __NR_getpriority
237 #define __NR_sys_rt_sigqueueinfo __NR_rt_sigqueueinfo
238 #define __NR_sys_rt_tgsigqueueinfo __NR_rt_tgsigqueueinfo
239 #define __NR_sys_syslog __NR_syslog
240 #define __NR_sys_futex __NR_futex
241 #define __NR_sys_inotify_init __NR_inotify_init
242 #define __NR_sys_inotify_add_watch __NR_inotify_add_watch
243 #define __NR_sys_inotify_rm_watch __NR_inotify_rm_watch
245 #if defined(__alpha__) || defined(__x86_64__) || defined(__s390x__)
246 #define __NR__llseek __NR_lseek
247 #endif
249 /* Newer kernel ports have llseek() instead of _llseek() */
250 #if defined(TARGET_NR_llseek) && !defined(TARGET_NR__llseek)
251 #define TARGET_NR__llseek TARGET_NR_llseek
252 #endif
254 #ifdef __NR_gettid
255 _syscall0(int, gettid)
256 #else
257 /* This is a replacement for the host gettid() and must return a host
258 errno. */
259 static int gettid(void) {
260 return -ENOSYS;
262 #endif
263 #if defined(TARGET_NR_getdents) && defined(__NR_getdents)
264 _syscall3(int, sys_getdents, uint, fd, struct linux_dirent *, dirp, uint, count);
265 #endif
266 #if !defined(__NR_getdents) || \
267 (defined(TARGET_NR_getdents64) && defined(__NR_getdents64))
268 _syscall3(int, sys_getdents64, uint, fd, struct linux_dirent64 *, dirp, uint, count);
269 #endif
270 #if defined(TARGET_NR__llseek) && defined(__NR_llseek)
271 _syscall5(int, _llseek, uint, fd, ulong, hi, ulong, lo,
272 loff_t *, res, uint, wh);
273 #endif
274 _syscall3(int, sys_rt_sigqueueinfo, pid_t, pid, int, sig, siginfo_t *, uinfo)
275 _syscall4(int, sys_rt_tgsigqueueinfo, pid_t, pid, pid_t, tid, int, sig,
276 siginfo_t *, uinfo)
277 _syscall3(int,sys_syslog,int,type,char*,bufp,int,len)
278 #ifdef __NR_exit_group
279 _syscall1(int,exit_group,int,error_code)
280 #endif
281 #if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address)
282 _syscall1(int,set_tid_address,int *,tidptr)
283 #endif
284 #if defined(TARGET_NR_futex) && defined(__NR_futex)
285 _syscall6(int,sys_futex,int *,uaddr,int,op,int,val,
286 const struct timespec *,timeout,int *,uaddr2,int,val3)
287 #endif
288 #define __NR_sys_sched_getaffinity __NR_sched_getaffinity
289 _syscall3(int, sys_sched_getaffinity, pid_t, pid, unsigned int, len,
290 unsigned long *, user_mask_ptr);
291 #define __NR_sys_sched_setaffinity __NR_sched_setaffinity
292 _syscall3(int, sys_sched_setaffinity, pid_t, pid, unsigned int, len,
293 unsigned long *, user_mask_ptr);
294 #define __NR_sys_getcpu __NR_getcpu
295 _syscall3(int, sys_getcpu, unsigned *, cpu, unsigned *, node, void *, tcache);
296 _syscall4(int, reboot, int, magic1, int, magic2, unsigned int, cmd,
297 void *, arg);
298 _syscall2(int, capget, struct __user_cap_header_struct *, header,
299 struct __user_cap_data_struct *, data);
300 _syscall2(int, capset, struct __user_cap_header_struct *, header,
301 struct __user_cap_data_struct *, data);
302 #if defined(TARGET_NR_ioprio_get) && defined(__NR_ioprio_get)
303 _syscall2(int, ioprio_get, int, which, int, who)
304 #endif
305 #if defined(TARGET_NR_ioprio_set) && defined(__NR_ioprio_set)
306 _syscall3(int, ioprio_set, int, which, int, who, int, ioprio)
307 #endif
308 #if defined(TARGET_NR_getrandom) && defined(__NR_getrandom)
309 _syscall3(int, getrandom, void *, buf, size_t, buflen, unsigned int, flags)
310 #endif
312 #if defined(TARGET_NR_kcmp) && defined(__NR_kcmp)
313 _syscall5(int, kcmp, pid_t, pid1, pid_t, pid2, int, type,
314 unsigned long, idx1, unsigned long, idx2)
315 #endif
317 static bitmask_transtbl fcntl_flags_tbl[] = {
318 { TARGET_O_ACCMODE, TARGET_O_WRONLY, O_ACCMODE, O_WRONLY, },
319 { TARGET_O_ACCMODE, TARGET_O_RDWR, O_ACCMODE, O_RDWR, },
320 { TARGET_O_CREAT, TARGET_O_CREAT, O_CREAT, O_CREAT, },
321 { TARGET_O_EXCL, TARGET_O_EXCL, O_EXCL, O_EXCL, },
322 { TARGET_O_NOCTTY, TARGET_O_NOCTTY, O_NOCTTY, O_NOCTTY, },
323 { TARGET_O_TRUNC, TARGET_O_TRUNC, O_TRUNC, O_TRUNC, },
324 { TARGET_O_APPEND, TARGET_O_APPEND, O_APPEND, O_APPEND, },
325 { TARGET_O_NONBLOCK, TARGET_O_NONBLOCK, O_NONBLOCK, O_NONBLOCK, },
326 { TARGET_O_SYNC, TARGET_O_DSYNC, O_SYNC, O_DSYNC, },
327 { TARGET_O_SYNC, TARGET_O_SYNC, O_SYNC, O_SYNC, },
328 { TARGET_FASYNC, TARGET_FASYNC, FASYNC, FASYNC, },
329 { TARGET_O_DIRECTORY, TARGET_O_DIRECTORY, O_DIRECTORY, O_DIRECTORY, },
330 { TARGET_O_NOFOLLOW, TARGET_O_NOFOLLOW, O_NOFOLLOW, O_NOFOLLOW, },
331 #if defined(O_DIRECT)
332 { TARGET_O_DIRECT, TARGET_O_DIRECT, O_DIRECT, O_DIRECT, },
333 #endif
334 #if defined(O_NOATIME)
335 { TARGET_O_NOATIME, TARGET_O_NOATIME, O_NOATIME, O_NOATIME },
336 #endif
337 #if defined(O_CLOEXEC)
338 { TARGET_O_CLOEXEC, TARGET_O_CLOEXEC, O_CLOEXEC, O_CLOEXEC },
339 #endif
340 #if defined(O_PATH)
341 { TARGET_O_PATH, TARGET_O_PATH, O_PATH, O_PATH },
342 #endif
343 #if defined(O_TMPFILE)
344 { TARGET_O_TMPFILE, TARGET_O_TMPFILE, O_TMPFILE, O_TMPFILE },
345 #endif
346 /* Don't terminate the list prematurely on 64-bit host+guest. */
347 #if TARGET_O_LARGEFILE != 0 || O_LARGEFILE != 0
348 { TARGET_O_LARGEFILE, TARGET_O_LARGEFILE, O_LARGEFILE, O_LARGEFILE, },
349 #endif
350 { 0, 0, 0, 0 }
353 enum {
354 QEMU_IFLA_BR_UNSPEC,
355 QEMU_IFLA_BR_FORWARD_DELAY,
356 QEMU_IFLA_BR_HELLO_TIME,
357 QEMU_IFLA_BR_MAX_AGE,
358 QEMU_IFLA_BR_AGEING_TIME,
359 QEMU_IFLA_BR_STP_STATE,
360 QEMU_IFLA_BR_PRIORITY,
361 QEMU_IFLA_BR_VLAN_FILTERING,
362 QEMU_IFLA_BR_VLAN_PROTOCOL,
363 QEMU_IFLA_BR_GROUP_FWD_MASK,
364 QEMU_IFLA_BR_ROOT_ID,
365 QEMU_IFLA_BR_BRIDGE_ID,
366 QEMU_IFLA_BR_ROOT_PORT,
367 QEMU_IFLA_BR_ROOT_PATH_COST,
368 QEMU_IFLA_BR_TOPOLOGY_CHANGE,
369 QEMU_IFLA_BR_TOPOLOGY_CHANGE_DETECTED,
370 QEMU_IFLA_BR_HELLO_TIMER,
371 QEMU_IFLA_BR_TCN_TIMER,
372 QEMU_IFLA_BR_TOPOLOGY_CHANGE_TIMER,
373 QEMU_IFLA_BR_GC_TIMER,
374 QEMU_IFLA_BR_GROUP_ADDR,
375 QEMU_IFLA_BR_FDB_FLUSH,
376 QEMU_IFLA_BR_MCAST_ROUTER,
377 QEMU_IFLA_BR_MCAST_SNOOPING,
378 QEMU_IFLA_BR_MCAST_QUERY_USE_IFADDR,
379 QEMU_IFLA_BR_MCAST_QUERIER,
380 QEMU_IFLA_BR_MCAST_HASH_ELASTICITY,
381 QEMU_IFLA_BR_MCAST_HASH_MAX,
382 QEMU_IFLA_BR_MCAST_LAST_MEMBER_CNT,
383 QEMU_IFLA_BR_MCAST_STARTUP_QUERY_CNT,
384 QEMU_IFLA_BR_MCAST_LAST_MEMBER_INTVL,
385 QEMU_IFLA_BR_MCAST_MEMBERSHIP_INTVL,
386 QEMU_IFLA_BR_MCAST_QUERIER_INTVL,
387 QEMU_IFLA_BR_MCAST_QUERY_INTVL,
388 QEMU_IFLA_BR_MCAST_QUERY_RESPONSE_INTVL,
389 QEMU_IFLA_BR_MCAST_STARTUP_QUERY_INTVL,
390 QEMU_IFLA_BR_NF_CALL_IPTABLES,
391 QEMU_IFLA_BR_NF_CALL_IP6TABLES,
392 QEMU_IFLA_BR_NF_CALL_ARPTABLES,
393 QEMU_IFLA_BR_VLAN_DEFAULT_PVID,
394 QEMU_IFLA_BR_PAD,
395 QEMU_IFLA_BR_VLAN_STATS_ENABLED,
396 QEMU_IFLA_BR_MCAST_STATS_ENABLED,
397 QEMU___IFLA_BR_MAX,
400 enum {
401 QEMU_IFLA_UNSPEC,
402 QEMU_IFLA_ADDRESS,
403 QEMU_IFLA_BROADCAST,
404 QEMU_IFLA_IFNAME,
405 QEMU_IFLA_MTU,
406 QEMU_IFLA_LINK,
407 QEMU_IFLA_QDISC,
408 QEMU_IFLA_STATS,
409 QEMU_IFLA_COST,
410 QEMU_IFLA_PRIORITY,
411 QEMU_IFLA_MASTER,
412 QEMU_IFLA_WIRELESS,
413 QEMU_IFLA_PROTINFO,
414 QEMU_IFLA_TXQLEN,
415 QEMU_IFLA_MAP,
416 QEMU_IFLA_WEIGHT,
417 QEMU_IFLA_OPERSTATE,
418 QEMU_IFLA_LINKMODE,
419 QEMU_IFLA_LINKINFO,
420 QEMU_IFLA_NET_NS_PID,
421 QEMU_IFLA_IFALIAS,
422 QEMU_IFLA_NUM_VF,
423 QEMU_IFLA_VFINFO_LIST,
424 QEMU_IFLA_STATS64,
425 QEMU_IFLA_VF_PORTS,
426 QEMU_IFLA_PORT_SELF,
427 QEMU_IFLA_AF_SPEC,
428 QEMU_IFLA_GROUP,
429 QEMU_IFLA_NET_NS_FD,
430 QEMU_IFLA_EXT_MASK,
431 QEMU_IFLA_PROMISCUITY,
432 QEMU_IFLA_NUM_TX_QUEUES,
433 QEMU_IFLA_NUM_RX_QUEUES,
434 QEMU_IFLA_CARRIER,
435 QEMU_IFLA_PHYS_PORT_ID,
436 QEMU_IFLA_CARRIER_CHANGES,
437 QEMU_IFLA_PHYS_SWITCH_ID,
438 QEMU_IFLA_LINK_NETNSID,
439 QEMU_IFLA_PHYS_PORT_NAME,
440 QEMU_IFLA_PROTO_DOWN,
441 QEMU_IFLA_GSO_MAX_SEGS,
442 QEMU_IFLA_GSO_MAX_SIZE,
443 QEMU_IFLA_PAD,
444 QEMU_IFLA_XDP,
445 QEMU___IFLA_MAX
448 enum {
449 QEMU_IFLA_BRPORT_UNSPEC,
450 QEMU_IFLA_BRPORT_STATE,
451 QEMU_IFLA_BRPORT_PRIORITY,
452 QEMU_IFLA_BRPORT_COST,
453 QEMU_IFLA_BRPORT_MODE,
454 QEMU_IFLA_BRPORT_GUARD,
455 QEMU_IFLA_BRPORT_PROTECT,
456 QEMU_IFLA_BRPORT_FAST_LEAVE,
457 QEMU_IFLA_BRPORT_LEARNING,
458 QEMU_IFLA_BRPORT_UNICAST_FLOOD,
459 QEMU_IFLA_BRPORT_PROXYARP,
460 QEMU_IFLA_BRPORT_LEARNING_SYNC,
461 QEMU_IFLA_BRPORT_PROXYARP_WIFI,
462 QEMU_IFLA_BRPORT_ROOT_ID,
463 QEMU_IFLA_BRPORT_BRIDGE_ID,
464 QEMU_IFLA_BRPORT_DESIGNATED_PORT,
465 QEMU_IFLA_BRPORT_DESIGNATED_COST,
466 QEMU_IFLA_BRPORT_ID,
467 QEMU_IFLA_BRPORT_NO,
468 QEMU_IFLA_BRPORT_TOPOLOGY_CHANGE_ACK,
469 QEMU_IFLA_BRPORT_CONFIG_PENDING,
470 QEMU_IFLA_BRPORT_MESSAGE_AGE_TIMER,
471 QEMU_IFLA_BRPORT_FORWARD_DELAY_TIMER,
472 QEMU_IFLA_BRPORT_HOLD_TIMER,
473 QEMU_IFLA_BRPORT_FLUSH,
474 QEMU_IFLA_BRPORT_MULTICAST_ROUTER,
475 QEMU_IFLA_BRPORT_PAD,
476 QEMU___IFLA_BRPORT_MAX
479 enum {
480 QEMU_IFLA_INFO_UNSPEC,
481 QEMU_IFLA_INFO_KIND,
482 QEMU_IFLA_INFO_DATA,
483 QEMU_IFLA_INFO_XSTATS,
484 QEMU_IFLA_INFO_SLAVE_KIND,
485 QEMU_IFLA_INFO_SLAVE_DATA,
486 QEMU___IFLA_INFO_MAX,
489 enum {
490 QEMU_IFLA_INET_UNSPEC,
491 QEMU_IFLA_INET_CONF,
492 QEMU___IFLA_INET_MAX,
495 enum {
496 QEMU_IFLA_INET6_UNSPEC,
497 QEMU_IFLA_INET6_FLAGS,
498 QEMU_IFLA_INET6_CONF,
499 QEMU_IFLA_INET6_STATS,
500 QEMU_IFLA_INET6_MCAST,
501 QEMU_IFLA_INET6_CACHEINFO,
502 QEMU_IFLA_INET6_ICMP6STATS,
503 QEMU_IFLA_INET6_TOKEN,
504 QEMU_IFLA_INET6_ADDR_GEN_MODE,
505 QEMU___IFLA_INET6_MAX
508 typedef abi_long (*TargetFdDataFunc)(void *, size_t);
509 typedef abi_long (*TargetFdAddrFunc)(void *, abi_ulong, socklen_t);
510 typedef struct TargetFdTrans {
511 TargetFdDataFunc host_to_target_data;
512 TargetFdDataFunc target_to_host_data;
513 TargetFdAddrFunc target_to_host_addr;
514 } TargetFdTrans;
516 static TargetFdTrans **target_fd_trans;
518 static unsigned int target_fd_max;
520 static TargetFdDataFunc fd_trans_target_to_host_data(int fd)
522 if (fd >= 0 && fd < target_fd_max && target_fd_trans[fd]) {
523 return target_fd_trans[fd]->target_to_host_data;
525 return NULL;
528 static TargetFdDataFunc fd_trans_host_to_target_data(int fd)
530 if (fd >= 0 && fd < target_fd_max && target_fd_trans[fd]) {
531 return target_fd_trans[fd]->host_to_target_data;
533 return NULL;
536 static TargetFdAddrFunc fd_trans_target_to_host_addr(int fd)
538 if (fd >= 0 && fd < target_fd_max && target_fd_trans[fd]) {
539 return target_fd_trans[fd]->target_to_host_addr;
541 return NULL;
544 static void fd_trans_register(int fd, TargetFdTrans *trans)
546 unsigned int oldmax;
548 if (fd >= target_fd_max) {
549 oldmax = target_fd_max;
550 target_fd_max = ((fd >> 6) + 1) << 6; /* by slice of 64 entries */
551 target_fd_trans = g_renew(TargetFdTrans *,
552 target_fd_trans, target_fd_max);
553 memset((void *)(target_fd_trans + oldmax), 0,
554 (target_fd_max - oldmax) * sizeof(TargetFdTrans *));
556 target_fd_trans[fd] = trans;
559 static void fd_trans_unregister(int fd)
561 if (fd >= 0 && fd < target_fd_max) {
562 target_fd_trans[fd] = NULL;
566 static void fd_trans_dup(int oldfd, int newfd)
568 fd_trans_unregister(newfd);
569 if (oldfd < target_fd_max && target_fd_trans[oldfd]) {
570 fd_trans_register(newfd, target_fd_trans[oldfd]);
574 static int sys_getcwd1(char *buf, size_t size)
576 if (getcwd(buf, size) == NULL) {
577 /* getcwd() sets errno */
578 return (-1);
580 return strlen(buf)+1;
583 #ifdef TARGET_NR_utimensat
584 #if defined(__NR_utimensat)
585 #define __NR_sys_utimensat __NR_utimensat
586 _syscall4(int,sys_utimensat,int,dirfd,const char *,pathname,
587 const struct timespec *,tsp,int,flags)
588 #else
589 static int sys_utimensat(int dirfd, const char *pathname,
590 const struct timespec times[2], int flags)
592 errno = ENOSYS;
593 return -1;
595 #endif
596 #endif /* TARGET_NR_utimensat */
598 #ifdef TARGET_NR_renameat2
599 #if defined(__NR_renameat2)
600 #define __NR_sys_renameat2 __NR_renameat2
601 _syscall5(int, sys_renameat2, int, oldfd, const char *, old, int, newfd,
602 const char *, new, unsigned int, flags)
603 #else
604 static int sys_renameat2(int oldfd, const char *old,
605 int newfd, const char *new, int flags)
607 if (flags == 0) {
608 return renameat(oldfd, old, newfd, new);
610 errno = ENOSYS;
611 return -1;
613 #endif
614 #endif /* TARGET_NR_renameat2 */
616 #ifdef CONFIG_INOTIFY
617 #include <sys/inotify.h>
619 #if defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init)
620 static int sys_inotify_init(void)
622 return (inotify_init());
624 #endif
625 #if defined(TARGET_NR_inotify_add_watch) && defined(__NR_inotify_add_watch)
626 static int sys_inotify_add_watch(int fd,const char *pathname, int32_t mask)
628 return (inotify_add_watch(fd, pathname, mask));
630 #endif
631 #if defined(TARGET_NR_inotify_rm_watch) && defined(__NR_inotify_rm_watch)
632 static int sys_inotify_rm_watch(int fd, int32_t wd)
634 return (inotify_rm_watch(fd, wd));
636 #endif
637 #ifdef CONFIG_INOTIFY1
638 #if defined(TARGET_NR_inotify_init1) && defined(__NR_inotify_init1)
639 static int sys_inotify_init1(int flags)
641 return (inotify_init1(flags));
643 #endif
644 #endif
645 #else
646 /* Userspace can usually survive runtime without inotify */
647 #undef TARGET_NR_inotify_init
648 #undef TARGET_NR_inotify_init1
649 #undef TARGET_NR_inotify_add_watch
650 #undef TARGET_NR_inotify_rm_watch
651 #endif /* CONFIG_INOTIFY */
653 #if defined(TARGET_NR_prlimit64)
654 #ifndef __NR_prlimit64
655 # define __NR_prlimit64 -1
656 #endif
657 #define __NR_sys_prlimit64 __NR_prlimit64
658 /* The glibc rlimit structure may not be that used by the underlying syscall */
659 struct host_rlimit64 {
660 uint64_t rlim_cur;
661 uint64_t rlim_max;
663 _syscall4(int, sys_prlimit64, pid_t, pid, int, resource,
664 const struct host_rlimit64 *, new_limit,
665 struct host_rlimit64 *, old_limit)
666 #endif
669 #if defined(TARGET_NR_timer_create)
670 /* Maxiumum of 32 active POSIX timers allowed at any one time. */
671 static timer_t g_posix_timers[32] = { 0, } ;
673 static inline int next_free_host_timer(void)
675 int k ;
676 /* FIXME: Does finding the next free slot require a lock? */
677 for (k = 0; k < ARRAY_SIZE(g_posix_timers); k++) {
678 if (g_posix_timers[k] == 0) {
679 g_posix_timers[k] = (timer_t) 1;
680 return k;
683 return -1;
685 #endif
687 /* ARM EABI and MIPS expect 64bit types aligned even on pairs or registers */
688 #ifdef TARGET_ARM
689 static inline int regpairs_aligned(void *cpu_env, int num)
691 return ((((CPUARMState *)cpu_env)->eabi) == 1) ;
693 #elif defined(TARGET_MIPS) && (TARGET_ABI_BITS == 32)
694 static inline int regpairs_aligned(void *cpu_env, int num) { return 1; }
695 #elif defined(TARGET_PPC) && !defined(TARGET_PPC64)
696 /* SysV AVI for PPC32 expects 64bit parameters to be passed on odd/even pairs
697 * of registers which translates to the same as ARM/MIPS, because we start with
698 * r3 as arg1 */
699 static inline int regpairs_aligned(void *cpu_env, int num) { return 1; }
700 #elif defined(TARGET_SH4)
701 /* SH4 doesn't align register pairs, except for p{read,write}64 */
702 static inline int regpairs_aligned(void *cpu_env, int num)
704 switch (num) {
705 case TARGET_NR_pread64:
706 case TARGET_NR_pwrite64:
707 return 1;
709 default:
710 return 0;
713 #else
714 static inline int regpairs_aligned(void *cpu_env, int num) { return 0; }
715 #endif
717 #define ERRNO_TABLE_SIZE 1200
719 /* target_to_host_errno_table[] is initialized from
720 * host_to_target_errno_table[] in syscall_init(). */
721 static uint16_t target_to_host_errno_table[ERRNO_TABLE_SIZE] = {
725 * This list is the union of errno values overridden in asm-<arch>/errno.h
726 * minus the errnos that are not actually generic to all archs.
728 static uint16_t host_to_target_errno_table[ERRNO_TABLE_SIZE] = {
729 [EAGAIN] = TARGET_EAGAIN,
730 [EIDRM] = TARGET_EIDRM,
731 [ECHRNG] = TARGET_ECHRNG,
732 [EL2NSYNC] = TARGET_EL2NSYNC,
733 [EL3HLT] = TARGET_EL3HLT,
734 [EL3RST] = TARGET_EL3RST,
735 [ELNRNG] = TARGET_ELNRNG,
736 [EUNATCH] = TARGET_EUNATCH,
737 [ENOCSI] = TARGET_ENOCSI,
738 [EL2HLT] = TARGET_EL2HLT,
739 [EDEADLK] = TARGET_EDEADLK,
740 [ENOLCK] = TARGET_ENOLCK,
741 [EBADE] = TARGET_EBADE,
742 [EBADR] = TARGET_EBADR,
743 [EXFULL] = TARGET_EXFULL,
744 [ENOANO] = TARGET_ENOANO,
745 [EBADRQC] = TARGET_EBADRQC,
746 [EBADSLT] = TARGET_EBADSLT,
747 [EBFONT] = TARGET_EBFONT,
748 [ENOSTR] = TARGET_ENOSTR,
749 [ENODATA] = TARGET_ENODATA,
750 [ETIME] = TARGET_ETIME,
751 [ENOSR] = TARGET_ENOSR,
752 [ENONET] = TARGET_ENONET,
753 [ENOPKG] = TARGET_ENOPKG,
754 [EREMOTE] = TARGET_EREMOTE,
755 [ENOLINK] = TARGET_ENOLINK,
756 [EADV] = TARGET_EADV,
757 [ESRMNT] = TARGET_ESRMNT,
758 [ECOMM] = TARGET_ECOMM,
759 [EPROTO] = TARGET_EPROTO,
760 [EDOTDOT] = TARGET_EDOTDOT,
761 [EMULTIHOP] = TARGET_EMULTIHOP,
762 [EBADMSG] = TARGET_EBADMSG,
763 [ENAMETOOLONG] = TARGET_ENAMETOOLONG,
764 [EOVERFLOW] = TARGET_EOVERFLOW,
765 [ENOTUNIQ] = TARGET_ENOTUNIQ,
766 [EBADFD] = TARGET_EBADFD,
767 [EREMCHG] = TARGET_EREMCHG,
768 [ELIBACC] = TARGET_ELIBACC,
769 [ELIBBAD] = TARGET_ELIBBAD,
770 [ELIBSCN] = TARGET_ELIBSCN,
771 [ELIBMAX] = TARGET_ELIBMAX,
772 [ELIBEXEC] = TARGET_ELIBEXEC,
773 [EILSEQ] = TARGET_EILSEQ,
774 [ENOSYS] = TARGET_ENOSYS,
775 [ELOOP] = TARGET_ELOOP,
776 [ERESTART] = TARGET_ERESTART,
777 [ESTRPIPE] = TARGET_ESTRPIPE,
778 [ENOTEMPTY] = TARGET_ENOTEMPTY,
779 [EUSERS] = TARGET_EUSERS,
780 [ENOTSOCK] = TARGET_ENOTSOCK,
781 [EDESTADDRREQ] = TARGET_EDESTADDRREQ,
782 [EMSGSIZE] = TARGET_EMSGSIZE,
783 [EPROTOTYPE] = TARGET_EPROTOTYPE,
784 [ENOPROTOOPT] = TARGET_ENOPROTOOPT,
785 [EPROTONOSUPPORT] = TARGET_EPROTONOSUPPORT,
786 [ESOCKTNOSUPPORT] = TARGET_ESOCKTNOSUPPORT,
787 [EOPNOTSUPP] = TARGET_EOPNOTSUPP,
788 [EPFNOSUPPORT] = TARGET_EPFNOSUPPORT,
789 [EAFNOSUPPORT] = TARGET_EAFNOSUPPORT,
790 [EADDRINUSE] = TARGET_EADDRINUSE,
791 [EADDRNOTAVAIL] = TARGET_EADDRNOTAVAIL,
792 [ENETDOWN] = TARGET_ENETDOWN,
793 [ENETUNREACH] = TARGET_ENETUNREACH,
794 [ENETRESET] = TARGET_ENETRESET,
795 [ECONNABORTED] = TARGET_ECONNABORTED,
796 [ECONNRESET] = TARGET_ECONNRESET,
797 [ENOBUFS] = TARGET_ENOBUFS,
798 [EISCONN] = TARGET_EISCONN,
799 [ENOTCONN] = TARGET_ENOTCONN,
800 [EUCLEAN] = TARGET_EUCLEAN,
801 [ENOTNAM] = TARGET_ENOTNAM,
802 [ENAVAIL] = TARGET_ENAVAIL,
803 [EISNAM] = TARGET_EISNAM,
804 [EREMOTEIO] = TARGET_EREMOTEIO,
805 [EDQUOT] = TARGET_EDQUOT,
806 [ESHUTDOWN] = TARGET_ESHUTDOWN,
807 [ETOOMANYREFS] = TARGET_ETOOMANYREFS,
808 [ETIMEDOUT] = TARGET_ETIMEDOUT,
809 [ECONNREFUSED] = TARGET_ECONNREFUSED,
810 [EHOSTDOWN] = TARGET_EHOSTDOWN,
811 [EHOSTUNREACH] = TARGET_EHOSTUNREACH,
812 [EALREADY] = TARGET_EALREADY,
813 [EINPROGRESS] = TARGET_EINPROGRESS,
814 [ESTALE] = TARGET_ESTALE,
815 [ECANCELED] = TARGET_ECANCELED,
816 [ENOMEDIUM] = TARGET_ENOMEDIUM,
817 [EMEDIUMTYPE] = TARGET_EMEDIUMTYPE,
818 #ifdef ENOKEY
819 [ENOKEY] = TARGET_ENOKEY,
820 #endif
821 #ifdef EKEYEXPIRED
822 [EKEYEXPIRED] = TARGET_EKEYEXPIRED,
823 #endif
824 #ifdef EKEYREVOKED
825 [EKEYREVOKED] = TARGET_EKEYREVOKED,
826 #endif
827 #ifdef EKEYREJECTED
828 [EKEYREJECTED] = TARGET_EKEYREJECTED,
829 #endif
830 #ifdef EOWNERDEAD
831 [EOWNERDEAD] = TARGET_EOWNERDEAD,
832 #endif
833 #ifdef ENOTRECOVERABLE
834 [ENOTRECOVERABLE] = TARGET_ENOTRECOVERABLE,
835 #endif
836 #ifdef ENOMSG
837 [ENOMSG] = TARGET_ENOMSG,
838 #endif
839 #ifdef ERKFILL
840 [ERFKILL] = TARGET_ERFKILL,
841 #endif
842 #ifdef EHWPOISON
843 [EHWPOISON] = TARGET_EHWPOISON,
844 #endif
847 static inline int host_to_target_errno(int err)
849 if (err >= 0 && err < ERRNO_TABLE_SIZE &&
850 host_to_target_errno_table[err]) {
851 return host_to_target_errno_table[err];
853 return err;
856 static inline int target_to_host_errno(int err)
858 if (err >= 0 && err < ERRNO_TABLE_SIZE &&
859 target_to_host_errno_table[err]) {
860 return target_to_host_errno_table[err];
862 return err;
865 static inline abi_long get_errno(abi_long ret)
867 if (ret == -1)
868 return -host_to_target_errno(errno);
869 else
870 return ret;
873 static inline int is_error(abi_long ret)
875 return (abi_ulong)ret >= (abi_ulong)(-4096);
878 const char *target_strerror(int err)
880 if (err == TARGET_ERESTARTSYS) {
881 return "To be restarted";
883 if (err == TARGET_QEMU_ESIGRETURN) {
884 return "Successful exit from sigreturn";
887 if ((err >= ERRNO_TABLE_SIZE) || (err < 0)) {
888 return NULL;
890 return strerror(target_to_host_errno(err));
893 #define safe_syscall0(type, name) \
894 static type safe_##name(void) \
896 return safe_syscall(__NR_##name); \
899 #define safe_syscall1(type, name, type1, arg1) \
900 static type safe_##name(type1 arg1) \
902 return safe_syscall(__NR_##name, arg1); \
905 #define safe_syscall2(type, name, type1, arg1, type2, arg2) \
906 static type safe_##name(type1 arg1, type2 arg2) \
908 return safe_syscall(__NR_##name, arg1, arg2); \
911 #define safe_syscall3(type, name, type1, arg1, type2, arg2, type3, arg3) \
912 static type safe_##name(type1 arg1, type2 arg2, type3 arg3) \
914 return safe_syscall(__NR_##name, arg1, arg2, arg3); \
917 #define safe_syscall4(type, name, type1, arg1, type2, arg2, type3, arg3, \
918 type4, arg4) \
919 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4) \
921 return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4); \
924 #define safe_syscall5(type, name, type1, arg1, type2, arg2, type3, arg3, \
925 type4, arg4, type5, arg5) \
926 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, \
927 type5 arg5) \
929 return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5); \
932 #define safe_syscall6(type, name, type1, arg1, type2, arg2, type3, arg3, \
933 type4, arg4, type5, arg5, type6, arg6) \
934 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, \
935 type5 arg5, type6 arg6) \
937 return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6); \
940 safe_syscall3(ssize_t, read, int, fd, void *, buff, size_t, count)
941 safe_syscall3(ssize_t, write, int, fd, const void *, buff, size_t, count)
942 safe_syscall4(int, openat, int, dirfd, const char *, pathname, \
943 int, flags, mode_t, mode)
944 safe_syscall4(pid_t, wait4, pid_t, pid, int *, status, int, options, \
945 struct rusage *, rusage)
946 safe_syscall5(int, waitid, idtype_t, idtype, id_t, id, siginfo_t *, infop, \
947 int, options, struct rusage *, rusage)
948 safe_syscall3(int, execve, const char *, filename, char **, argv, char **, envp)
949 safe_syscall6(int, pselect6, int, nfds, fd_set *, readfds, fd_set *, writefds, \
950 fd_set *, exceptfds, struct timespec *, timeout, void *, sig)
951 safe_syscall5(int, ppoll, struct pollfd *, ufds, unsigned int, nfds,
952 struct timespec *, tsp, const sigset_t *, sigmask,
953 size_t, sigsetsize)
954 safe_syscall6(int, epoll_pwait, int, epfd, struct epoll_event *, events,
955 int, maxevents, int, timeout, const sigset_t *, sigmask,
956 size_t, sigsetsize)
957 safe_syscall6(int,futex,int *,uaddr,int,op,int,val, \
958 const struct timespec *,timeout,int *,uaddr2,int,val3)
959 safe_syscall2(int, rt_sigsuspend, sigset_t *, newset, size_t, sigsetsize)
960 safe_syscall2(int, kill, pid_t, pid, int, sig)
961 safe_syscall2(int, tkill, int, tid, int, sig)
962 safe_syscall3(int, tgkill, int, tgid, int, pid, int, sig)
963 safe_syscall3(ssize_t, readv, int, fd, const struct iovec *, iov, int, iovcnt)
964 safe_syscall3(ssize_t, writev, int, fd, const struct iovec *, iov, int, iovcnt)
965 safe_syscall5(ssize_t, preadv, int, fd, const struct iovec *, iov, int, iovcnt,
966 unsigned long, pos_l, unsigned long, pos_h)
967 safe_syscall5(ssize_t, pwritev, int, fd, const struct iovec *, iov, int, iovcnt,
968 unsigned long, pos_l, unsigned long, pos_h)
969 safe_syscall3(int, connect, int, fd, const struct sockaddr *, addr,
970 socklen_t, addrlen)
971 safe_syscall6(ssize_t, sendto, int, fd, const void *, buf, size_t, len,
972 int, flags, const struct sockaddr *, addr, socklen_t, addrlen)
973 safe_syscall6(ssize_t, recvfrom, int, fd, void *, buf, size_t, len,
974 int, flags, struct sockaddr *, addr, socklen_t *, addrlen)
975 safe_syscall3(ssize_t, sendmsg, int, fd, const struct msghdr *, msg, int, flags)
976 safe_syscall3(ssize_t, recvmsg, int, fd, struct msghdr *, msg, int, flags)
977 safe_syscall2(int, flock, int, fd, int, operation)
978 safe_syscall4(int, rt_sigtimedwait, const sigset_t *, these, siginfo_t *, uinfo,
979 const struct timespec *, uts, size_t, sigsetsize)
980 safe_syscall4(int, accept4, int, fd, struct sockaddr *, addr, socklen_t *, len,
981 int, flags)
982 safe_syscall2(int, nanosleep, const struct timespec *, req,
983 struct timespec *, rem)
984 #ifdef TARGET_NR_clock_nanosleep
985 safe_syscall4(int, clock_nanosleep, const clockid_t, clock, int, flags,
986 const struct timespec *, req, struct timespec *, rem)
987 #endif
988 #ifdef __NR_msgsnd
989 safe_syscall4(int, msgsnd, int, msgid, const void *, msgp, size_t, sz,
990 int, flags)
991 safe_syscall5(int, msgrcv, int, msgid, void *, msgp, size_t, sz,
992 long, msgtype, int, flags)
993 safe_syscall4(int, semtimedop, int, semid, struct sembuf *, tsops,
994 unsigned, nsops, const struct timespec *, timeout)
995 #else
996 /* This host kernel architecture uses a single ipc syscall; fake up
997 * wrappers for the sub-operations to hide this implementation detail.
998 * Annoyingly we can't include linux/ipc.h to get the constant definitions
999 * for the call parameter because some structs in there conflict with the
1000 * sys/ipc.h ones. So we just define them here, and rely on them being
1001 * the same for all host architectures.
1003 #define Q_SEMTIMEDOP 4
1004 #define Q_MSGSND 11
1005 #define Q_MSGRCV 12
1006 #define Q_IPCCALL(VERSION, OP) ((VERSION) << 16 | (OP))
1008 safe_syscall6(int, ipc, int, call, long, first, long, second, long, third,
1009 void *, ptr, long, fifth)
1010 static int safe_msgsnd(int msgid, const void *msgp, size_t sz, int flags)
1012 return safe_ipc(Q_IPCCALL(0, Q_MSGSND), msgid, sz, flags, (void *)msgp, 0);
1014 static int safe_msgrcv(int msgid, void *msgp, size_t sz, long type, int flags)
1016 return safe_ipc(Q_IPCCALL(1, Q_MSGRCV), msgid, sz, flags, msgp, type);
1018 static int safe_semtimedop(int semid, struct sembuf *tsops, unsigned nsops,
1019 const struct timespec *timeout)
1021 return safe_ipc(Q_IPCCALL(0, Q_SEMTIMEDOP), semid, nsops, 0, tsops,
1022 (long)timeout);
1024 #endif
1025 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
1026 safe_syscall5(int, mq_timedsend, int, mqdes, const char *, msg_ptr,
1027 size_t, len, unsigned, prio, const struct timespec *, timeout)
1028 safe_syscall5(int, mq_timedreceive, int, mqdes, char *, msg_ptr,
1029 size_t, len, unsigned *, prio, const struct timespec *, timeout)
1030 #endif
1031 /* We do ioctl like this rather than via safe_syscall3 to preserve the
1032 * "third argument might be integer or pointer or not present" behaviour of
1033 * the libc function.
1035 #define safe_ioctl(...) safe_syscall(__NR_ioctl, __VA_ARGS__)
1036 /* Similarly for fcntl. Note that callers must always:
1037 * pass the F_GETLK64 etc constants rather than the unsuffixed F_GETLK
1038 * use the flock64 struct rather than unsuffixed flock
1039 * This will then work and use a 64-bit offset for both 32-bit and 64-bit hosts.
1041 #ifdef __NR_fcntl64
1042 #define safe_fcntl(...) safe_syscall(__NR_fcntl64, __VA_ARGS__)
1043 #else
1044 #define safe_fcntl(...) safe_syscall(__NR_fcntl, __VA_ARGS__)
1045 #endif
1047 static inline int host_to_target_sock_type(int host_type)
1049 int target_type;
1051 switch (host_type & 0xf /* SOCK_TYPE_MASK */) {
1052 case SOCK_DGRAM:
1053 target_type = TARGET_SOCK_DGRAM;
1054 break;
1055 case SOCK_STREAM:
1056 target_type = TARGET_SOCK_STREAM;
1057 break;
1058 default:
1059 target_type = host_type & 0xf /* SOCK_TYPE_MASK */;
1060 break;
1063 #if defined(SOCK_CLOEXEC)
1064 if (host_type & SOCK_CLOEXEC) {
1065 target_type |= TARGET_SOCK_CLOEXEC;
1067 #endif
1069 #if defined(SOCK_NONBLOCK)
1070 if (host_type & SOCK_NONBLOCK) {
1071 target_type |= TARGET_SOCK_NONBLOCK;
1073 #endif
1075 return target_type;
1078 static abi_ulong target_brk;
1079 static abi_ulong target_original_brk;
1080 static abi_ulong brk_page;
1082 void target_set_brk(abi_ulong new_brk)
1084 target_original_brk = target_brk = HOST_PAGE_ALIGN(new_brk);
1085 brk_page = HOST_PAGE_ALIGN(target_brk);
1088 //#define DEBUGF_BRK(message, args...) do { fprintf(stderr, (message), ## args); } while (0)
1089 #define DEBUGF_BRK(message, args...)
1091 /* do_brk() must return target values and target errnos. */
1092 abi_long do_brk(abi_ulong new_brk)
1094 abi_long mapped_addr;
1095 abi_ulong new_alloc_size;
1097 DEBUGF_BRK("do_brk(" TARGET_ABI_FMT_lx ") -> ", new_brk);
1099 if (!new_brk) {
1100 DEBUGF_BRK(TARGET_ABI_FMT_lx " (!new_brk)\n", target_brk);
1101 return target_brk;
1103 if (new_brk < target_original_brk) {
1104 DEBUGF_BRK(TARGET_ABI_FMT_lx " (new_brk < target_original_brk)\n",
1105 target_brk);
1106 return target_brk;
1109 /* If the new brk is less than the highest page reserved to the
1110 * target heap allocation, set it and we're almost done... */
1111 if (new_brk <= brk_page) {
1112 /* Heap contents are initialized to zero, as for anonymous
1113 * mapped pages. */
1114 if (new_brk > target_brk) {
1115 memset(g2h(target_brk), 0, new_brk - target_brk);
1117 target_brk = new_brk;
1118 DEBUGF_BRK(TARGET_ABI_FMT_lx " (new_brk <= brk_page)\n", target_brk);
1119 return target_brk;
1122 /* We need to allocate more memory after the brk... Note that
1123 * we don't use MAP_FIXED because that will map over the top of
1124 * any existing mapping (like the one with the host libc or qemu
1125 * itself); instead we treat "mapped but at wrong address" as
1126 * a failure and unmap again.
1128 new_alloc_size = HOST_PAGE_ALIGN(new_brk - brk_page);
1129 mapped_addr = get_errno(target_mmap(brk_page, new_alloc_size,
1130 PROT_READ|PROT_WRITE,
1131 MAP_ANON|MAP_PRIVATE, 0, 0));
1133 if (mapped_addr == brk_page) {
1134 /* Heap contents are initialized to zero, as for anonymous
1135 * mapped pages. Technically the new pages are already
1136 * initialized to zero since they *are* anonymous mapped
1137 * pages, however we have to take care with the contents that
1138 * come from the remaining part of the previous page: it may
1139 * contains garbage data due to a previous heap usage (grown
1140 * then shrunken). */
1141 memset(g2h(target_brk), 0, brk_page - target_brk);
1143 target_brk = new_brk;
1144 brk_page = HOST_PAGE_ALIGN(target_brk);
1145 DEBUGF_BRK(TARGET_ABI_FMT_lx " (mapped_addr == brk_page)\n",
1146 target_brk);
1147 return target_brk;
1148 } else if (mapped_addr != -1) {
1149 /* Mapped but at wrong address, meaning there wasn't actually
1150 * enough space for this brk.
1152 target_munmap(mapped_addr, new_alloc_size);
1153 mapped_addr = -1;
1154 DEBUGF_BRK(TARGET_ABI_FMT_lx " (mapped_addr != -1)\n", target_brk);
1156 else {
1157 DEBUGF_BRK(TARGET_ABI_FMT_lx " (otherwise)\n", target_brk);
1160 #if defined(TARGET_ALPHA)
1161 /* We (partially) emulate OSF/1 on Alpha, which requires we
1162 return a proper errno, not an unchanged brk value. */
1163 return -TARGET_ENOMEM;
1164 #endif
1165 /* For everything else, return the previous break. */
1166 return target_brk;
1169 static inline abi_long copy_from_user_fdset(fd_set *fds,
1170 abi_ulong target_fds_addr,
1171 int n)
1173 int i, nw, j, k;
1174 abi_ulong b, *target_fds;
1176 nw = DIV_ROUND_UP(n, TARGET_ABI_BITS);
1177 if (!(target_fds = lock_user(VERIFY_READ,
1178 target_fds_addr,
1179 sizeof(abi_ulong) * nw,
1180 1)))
1181 return -TARGET_EFAULT;
1183 FD_ZERO(fds);
1184 k = 0;
1185 for (i = 0; i < nw; i++) {
1186 /* grab the abi_ulong */
1187 __get_user(b, &target_fds[i]);
1188 for (j = 0; j < TARGET_ABI_BITS; j++) {
1189 /* check the bit inside the abi_ulong */
1190 if ((b >> j) & 1)
1191 FD_SET(k, fds);
1192 k++;
1196 unlock_user(target_fds, target_fds_addr, 0);
1198 return 0;
1201 static inline abi_ulong copy_from_user_fdset_ptr(fd_set *fds, fd_set **fds_ptr,
1202 abi_ulong target_fds_addr,
1203 int n)
1205 if (target_fds_addr) {
1206 if (copy_from_user_fdset(fds, target_fds_addr, n))
1207 return -TARGET_EFAULT;
1208 *fds_ptr = fds;
1209 } else {
1210 *fds_ptr = NULL;
1212 return 0;
1215 static inline abi_long copy_to_user_fdset(abi_ulong target_fds_addr,
1216 const fd_set *fds,
1217 int n)
1219 int i, nw, j, k;
1220 abi_long v;
1221 abi_ulong *target_fds;
1223 nw = DIV_ROUND_UP(n, TARGET_ABI_BITS);
1224 if (!(target_fds = lock_user(VERIFY_WRITE,
1225 target_fds_addr,
1226 sizeof(abi_ulong) * nw,
1227 0)))
1228 return -TARGET_EFAULT;
1230 k = 0;
1231 for (i = 0; i < nw; i++) {
1232 v = 0;
1233 for (j = 0; j < TARGET_ABI_BITS; j++) {
1234 v |= ((abi_ulong)(FD_ISSET(k, fds) != 0) << j);
1235 k++;
1237 __put_user(v, &target_fds[i]);
1240 unlock_user(target_fds, target_fds_addr, sizeof(abi_ulong) * nw);
1242 return 0;
1245 #if defined(__alpha__)
1246 #define HOST_HZ 1024
1247 #else
1248 #define HOST_HZ 100
1249 #endif
1251 static inline abi_long host_to_target_clock_t(long ticks)
1253 #if HOST_HZ == TARGET_HZ
1254 return ticks;
1255 #else
1256 return ((int64_t)ticks * TARGET_HZ) / HOST_HZ;
1257 #endif
1260 static inline abi_long host_to_target_rusage(abi_ulong target_addr,
1261 const struct rusage *rusage)
1263 struct target_rusage *target_rusage;
1265 if (!lock_user_struct(VERIFY_WRITE, target_rusage, target_addr, 0))
1266 return -TARGET_EFAULT;
1267 target_rusage->ru_utime.tv_sec = tswapal(rusage->ru_utime.tv_sec);
1268 target_rusage->ru_utime.tv_usec = tswapal(rusage->ru_utime.tv_usec);
1269 target_rusage->ru_stime.tv_sec = tswapal(rusage->ru_stime.tv_sec);
1270 target_rusage->ru_stime.tv_usec = tswapal(rusage->ru_stime.tv_usec);
1271 target_rusage->ru_maxrss = tswapal(rusage->ru_maxrss);
1272 target_rusage->ru_ixrss = tswapal(rusage->ru_ixrss);
1273 target_rusage->ru_idrss = tswapal(rusage->ru_idrss);
1274 target_rusage->ru_isrss = tswapal(rusage->ru_isrss);
1275 target_rusage->ru_minflt = tswapal(rusage->ru_minflt);
1276 target_rusage->ru_majflt = tswapal(rusage->ru_majflt);
1277 target_rusage->ru_nswap = tswapal(rusage->ru_nswap);
1278 target_rusage->ru_inblock = tswapal(rusage->ru_inblock);
1279 target_rusage->ru_oublock = tswapal(rusage->ru_oublock);
1280 target_rusage->ru_msgsnd = tswapal(rusage->ru_msgsnd);
1281 target_rusage->ru_msgrcv = tswapal(rusage->ru_msgrcv);
1282 target_rusage->ru_nsignals = tswapal(rusage->ru_nsignals);
1283 target_rusage->ru_nvcsw = tswapal(rusage->ru_nvcsw);
1284 target_rusage->ru_nivcsw = tswapal(rusage->ru_nivcsw);
1285 unlock_user_struct(target_rusage, target_addr, 1);
1287 return 0;
1290 static inline rlim_t target_to_host_rlim(abi_ulong target_rlim)
1292 abi_ulong target_rlim_swap;
1293 rlim_t result;
1295 target_rlim_swap = tswapal(target_rlim);
1296 if (target_rlim_swap == TARGET_RLIM_INFINITY)
1297 return RLIM_INFINITY;
1299 result = target_rlim_swap;
1300 if (target_rlim_swap != (rlim_t)result)
1301 return RLIM_INFINITY;
1303 return result;
1306 static inline abi_ulong host_to_target_rlim(rlim_t rlim)
1308 abi_ulong target_rlim_swap;
1309 abi_ulong result;
1311 if (rlim == RLIM_INFINITY || rlim != (abi_long)rlim)
1312 target_rlim_swap = TARGET_RLIM_INFINITY;
1313 else
1314 target_rlim_swap = rlim;
1315 result = tswapal(target_rlim_swap);
1317 return result;
1320 static inline int target_to_host_resource(int code)
1322 switch (code) {
1323 case TARGET_RLIMIT_AS:
1324 return RLIMIT_AS;
1325 case TARGET_RLIMIT_CORE:
1326 return RLIMIT_CORE;
1327 case TARGET_RLIMIT_CPU:
1328 return RLIMIT_CPU;
1329 case TARGET_RLIMIT_DATA:
1330 return RLIMIT_DATA;
1331 case TARGET_RLIMIT_FSIZE:
1332 return RLIMIT_FSIZE;
1333 case TARGET_RLIMIT_LOCKS:
1334 return RLIMIT_LOCKS;
1335 case TARGET_RLIMIT_MEMLOCK:
1336 return RLIMIT_MEMLOCK;
1337 case TARGET_RLIMIT_MSGQUEUE:
1338 return RLIMIT_MSGQUEUE;
1339 case TARGET_RLIMIT_NICE:
1340 return RLIMIT_NICE;
1341 case TARGET_RLIMIT_NOFILE:
1342 return RLIMIT_NOFILE;
1343 case TARGET_RLIMIT_NPROC:
1344 return RLIMIT_NPROC;
1345 case TARGET_RLIMIT_RSS:
1346 return RLIMIT_RSS;
1347 case TARGET_RLIMIT_RTPRIO:
1348 return RLIMIT_RTPRIO;
1349 case TARGET_RLIMIT_SIGPENDING:
1350 return RLIMIT_SIGPENDING;
1351 case TARGET_RLIMIT_STACK:
1352 return RLIMIT_STACK;
1353 default:
1354 return code;
1358 static inline abi_long copy_from_user_timeval(struct timeval *tv,
1359 abi_ulong target_tv_addr)
1361 struct target_timeval *target_tv;
1363 if (!lock_user_struct(VERIFY_READ, target_tv, target_tv_addr, 1))
1364 return -TARGET_EFAULT;
1366 __get_user(tv->tv_sec, &target_tv->tv_sec);
1367 __get_user(tv->tv_usec, &target_tv->tv_usec);
1369 unlock_user_struct(target_tv, target_tv_addr, 0);
1371 return 0;
1374 static inline abi_long copy_to_user_timeval(abi_ulong target_tv_addr,
1375 const struct timeval *tv)
1377 struct target_timeval *target_tv;
1379 if (!lock_user_struct(VERIFY_WRITE, target_tv, target_tv_addr, 0))
1380 return -TARGET_EFAULT;
1382 __put_user(tv->tv_sec, &target_tv->tv_sec);
1383 __put_user(tv->tv_usec, &target_tv->tv_usec);
1385 unlock_user_struct(target_tv, target_tv_addr, 1);
1387 return 0;
1390 static inline abi_long copy_from_user_timezone(struct timezone *tz,
1391 abi_ulong target_tz_addr)
1393 struct target_timezone *target_tz;
1395 if (!lock_user_struct(VERIFY_READ, target_tz, target_tz_addr, 1)) {
1396 return -TARGET_EFAULT;
1399 __get_user(tz->tz_minuteswest, &target_tz->tz_minuteswest);
1400 __get_user(tz->tz_dsttime, &target_tz->tz_dsttime);
1402 unlock_user_struct(target_tz, target_tz_addr, 0);
1404 return 0;
1407 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
1408 #include <mqueue.h>
1410 static inline abi_long copy_from_user_mq_attr(struct mq_attr *attr,
1411 abi_ulong target_mq_attr_addr)
1413 struct target_mq_attr *target_mq_attr;
1415 if (!lock_user_struct(VERIFY_READ, target_mq_attr,
1416 target_mq_attr_addr, 1))
1417 return -TARGET_EFAULT;
1419 __get_user(attr->mq_flags, &target_mq_attr->mq_flags);
1420 __get_user(attr->mq_maxmsg, &target_mq_attr->mq_maxmsg);
1421 __get_user(attr->mq_msgsize, &target_mq_attr->mq_msgsize);
1422 __get_user(attr->mq_curmsgs, &target_mq_attr->mq_curmsgs);
1424 unlock_user_struct(target_mq_attr, target_mq_attr_addr, 0);
1426 return 0;
1429 static inline abi_long copy_to_user_mq_attr(abi_ulong target_mq_attr_addr,
1430 const struct mq_attr *attr)
1432 struct target_mq_attr *target_mq_attr;
1434 if (!lock_user_struct(VERIFY_WRITE, target_mq_attr,
1435 target_mq_attr_addr, 0))
1436 return -TARGET_EFAULT;
1438 __put_user(attr->mq_flags, &target_mq_attr->mq_flags);
1439 __put_user(attr->mq_maxmsg, &target_mq_attr->mq_maxmsg);
1440 __put_user(attr->mq_msgsize, &target_mq_attr->mq_msgsize);
1441 __put_user(attr->mq_curmsgs, &target_mq_attr->mq_curmsgs);
1443 unlock_user_struct(target_mq_attr, target_mq_attr_addr, 1);
1445 return 0;
1447 #endif
1449 #if defined(TARGET_NR_select) || defined(TARGET_NR__newselect)
1450 /* do_select() must return target values and target errnos. */
1451 static abi_long do_select(int n,
1452 abi_ulong rfd_addr, abi_ulong wfd_addr,
1453 abi_ulong efd_addr, abi_ulong target_tv_addr)
1455 fd_set rfds, wfds, efds;
1456 fd_set *rfds_ptr, *wfds_ptr, *efds_ptr;
1457 struct timeval tv;
1458 struct timespec ts, *ts_ptr;
1459 abi_long ret;
1461 ret = copy_from_user_fdset_ptr(&rfds, &rfds_ptr, rfd_addr, n);
1462 if (ret) {
1463 return ret;
1465 ret = copy_from_user_fdset_ptr(&wfds, &wfds_ptr, wfd_addr, n);
1466 if (ret) {
1467 return ret;
1469 ret = copy_from_user_fdset_ptr(&efds, &efds_ptr, efd_addr, n);
1470 if (ret) {
1471 return ret;
1474 if (target_tv_addr) {
1475 if (copy_from_user_timeval(&tv, target_tv_addr))
1476 return -TARGET_EFAULT;
1477 ts.tv_sec = tv.tv_sec;
1478 ts.tv_nsec = tv.tv_usec * 1000;
1479 ts_ptr = &ts;
1480 } else {
1481 ts_ptr = NULL;
1484 ret = get_errno(safe_pselect6(n, rfds_ptr, wfds_ptr, efds_ptr,
1485 ts_ptr, NULL));
1487 if (!is_error(ret)) {
1488 if (rfd_addr && copy_to_user_fdset(rfd_addr, &rfds, n))
1489 return -TARGET_EFAULT;
1490 if (wfd_addr && copy_to_user_fdset(wfd_addr, &wfds, n))
1491 return -TARGET_EFAULT;
1492 if (efd_addr && copy_to_user_fdset(efd_addr, &efds, n))
1493 return -TARGET_EFAULT;
1495 if (target_tv_addr) {
1496 tv.tv_sec = ts.tv_sec;
1497 tv.tv_usec = ts.tv_nsec / 1000;
1498 if (copy_to_user_timeval(target_tv_addr, &tv)) {
1499 return -TARGET_EFAULT;
1504 return ret;
1507 #if defined(TARGET_WANT_OLD_SYS_SELECT)
1508 static abi_long do_old_select(abi_ulong arg1)
1510 struct target_sel_arg_struct *sel;
1511 abi_ulong inp, outp, exp, tvp;
1512 long nsel;
1514 if (!lock_user_struct(VERIFY_READ, sel, arg1, 1)) {
1515 return -TARGET_EFAULT;
1518 nsel = tswapal(sel->n);
1519 inp = tswapal(sel->inp);
1520 outp = tswapal(sel->outp);
1521 exp = tswapal(sel->exp);
1522 tvp = tswapal(sel->tvp);
1524 unlock_user_struct(sel, arg1, 0);
1526 return do_select(nsel, inp, outp, exp, tvp);
1528 #endif
1529 #endif
1531 static abi_long do_pipe2(int host_pipe[], int flags)
1533 #ifdef CONFIG_PIPE2
1534 return pipe2(host_pipe, flags);
1535 #else
1536 return -ENOSYS;
1537 #endif
1540 static abi_long do_pipe(void *cpu_env, abi_ulong pipedes,
1541 int flags, int is_pipe2)
1543 int host_pipe[2];
1544 abi_long ret;
1545 ret = flags ? do_pipe2(host_pipe, flags) : pipe(host_pipe);
1547 if (is_error(ret))
1548 return get_errno(ret);
1550 /* Several targets have special calling conventions for the original
1551 pipe syscall, but didn't replicate this into the pipe2 syscall. */
1552 if (!is_pipe2) {
1553 #if defined(TARGET_ALPHA)
1554 ((CPUAlphaState *)cpu_env)->ir[IR_A4] = host_pipe[1];
1555 return host_pipe[0];
1556 #elif defined(TARGET_MIPS)
1557 ((CPUMIPSState*)cpu_env)->active_tc.gpr[3] = host_pipe[1];
1558 return host_pipe[0];
1559 #elif defined(TARGET_SH4)
1560 ((CPUSH4State*)cpu_env)->gregs[1] = host_pipe[1];
1561 return host_pipe[0];
1562 #elif defined(TARGET_SPARC)
1563 ((CPUSPARCState*)cpu_env)->regwptr[1] = host_pipe[1];
1564 return host_pipe[0];
1565 #endif
1568 if (put_user_s32(host_pipe[0], pipedes)
1569 || put_user_s32(host_pipe[1], pipedes + sizeof(host_pipe[0])))
1570 return -TARGET_EFAULT;
1571 return get_errno(ret);
1574 static inline abi_long target_to_host_ip_mreq(struct ip_mreqn *mreqn,
1575 abi_ulong target_addr,
1576 socklen_t len)
1578 struct target_ip_mreqn *target_smreqn;
1580 target_smreqn = lock_user(VERIFY_READ, target_addr, len, 1);
1581 if (!target_smreqn)
1582 return -TARGET_EFAULT;
1583 mreqn->imr_multiaddr.s_addr = target_smreqn->imr_multiaddr.s_addr;
1584 mreqn->imr_address.s_addr = target_smreqn->imr_address.s_addr;
1585 if (len == sizeof(struct target_ip_mreqn))
1586 mreqn->imr_ifindex = tswapal(target_smreqn->imr_ifindex);
1587 unlock_user(target_smreqn, target_addr, 0);
1589 return 0;
1592 static inline abi_long target_to_host_sockaddr(int fd, struct sockaddr *addr,
1593 abi_ulong target_addr,
1594 socklen_t len)
1596 const socklen_t unix_maxlen = sizeof (struct sockaddr_un);
1597 sa_family_t sa_family;
1598 struct target_sockaddr *target_saddr;
1600 if (fd_trans_target_to_host_addr(fd)) {
1601 return fd_trans_target_to_host_addr(fd)(addr, target_addr, len);
1604 target_saddr = lock_user(VERIFY_READ, target_addr, len, 1);
1605 if (!target_saddr)
1606 return -TARGET_EFAULT;
1608 sa_family = tswap16(target_saddr->sa_family);
1610 /* Oops. The caller might send a incomplete sun_path; sun_path
1611 * must be terminated by \0 (see the manual page), but
1612 * unfortunately it is quite common to specify sockaddr_un
1613 * length as "strlen(x->sun_path)" while it should be
1614 * "strlen(...) + 1". We'll fix that here if needed.
1615 * Linux kernel has a similar feature.
1618 if (sa_family == AF_UNIX) {
1619 if (len < unix_maxlen && len > 0) {
1620 char *cp = (char*)target_saddr;
1622 if ( cp[len-1] && !cp[len] )
1623 len++;
1625 if (len > unix_maxlen)
1626 len = unix_maxlen;
1629 memcpy(addr, target_saddr, len);
1630 addr->sa_family = sa_family;
1631 if (sa_family == AF_NETLINK) {
1632 struct sockaddr_nl *nladdr;
1634 nladdr = (struct sockaddr_nl *)addr;
1635 nladdr->nl_pid = tswap32(nladdr->nl_pid);
1636 nladdr->nl_groups = tswap32(nladdr->nl_groups);
1637 } else if (sa_family == AF_PACKET) {
1638 struct target_sockaddr_ll *lladdr;
1640 lladdr = (struct target_sockaddr_ll *)addr;
1641 lladdr->sll_ifindex = tswap32(lladdr->sll_ifindex);
1642 lladdr->sll_hatype = tswap16(lladdr->sll_hatype);
1644 unlock_user(target_saddr, target_addr, 0);
1646 return 0;
1649 static inline abi_long host_to_target_sockaddr(abi_ulong target_addr,
1650 struct sockaddr *addr,
1651 socklen_t len)
1653 struct target_sockaddr *target_saddr;
1655 if (len == 0) {
1656 return 0;
1658 assert(addr);
1660 target_saddr = lock_user(VERIFY_WRITE, target_addr, len, 0);
1661 if (!target_saddr)
1662 return -TARGET_EFAULT;
1663 memcpy(target_saddr, addr, len);
1664 if (len >= offsetof(struct target_sockaddr, sa_family) +
1665 sizeof(target_saddr->sa_family)) {
1666 target_saddr->sa_family = tswap16(addr->sa_family);
1668 if (addr->sa_family == AF_NETLINK && len >= sizeof(struct sockaddr_nl)) {
1669 struct sockaddr_nl *target_nl = (struct sockaddr_nl *)target_saddr;
1670 target_nl->nl_pid = tswap32(target_nl->nl_pid);
1671 target_nl->nl_groups = tswap32(target_nl->nl_groups);
1672 } else if (addr->sa_family == AF_PACKET) {
1673 struct sockaddr_ll *target_ll = (struct sockaddr_ll *)target_saddr;
1674 target_ll->sll_ifindex = tswap32(target_ll->sll_ifindex);
1675 target_ll->sll_hatype = tswap16(target_ll->sll_hatype);
1676 } else if (addr->sa_family == AF_INET6 &&
1677 len >= sizeof(struct target_sockaddr_in6)) {
1678 struct target_sockaddr_in6 *target_in6 =
1679 (struct target_sockaddr_in6 *)target_saddr;
1680 target_in6->sin6_scope_id = tswap16(target_in6->sin6_scope_id);
1682 unlock_user(target_saddr, target_addr, len);
1684 return 0;
1687 static inline abi_long target_to_host_cmsg(struct msghdr *msgh,
1688 struct target_msghdr *target_msgh)
1690 struct cmsghdr *cmsg = CMSG_FIRSTHDR(msgh);
1691 abi_long msg_controllen;
1692 abi_ulong target_cmsg_addr;
1693 struct target_cmsghdr *target_cmsg, *target_cmsg_start;
1694 socklen_t space = 0;
1696 msg_controllen = tswapal(target_msgh->msg_controllen);
1697 if (msg_controllen < sizeof (struct target_cmsghdr))
1698 goto the_end;
1699 target_cmsg_addr = tswapal(target_msgh->msg_control);
1700 target_cmsg = lock_user(VERIFY_READ, target_cmsg_addr, msg_controllen, 1);
1701 target_cmsg_start = target_cmsg;
1702 if (!target_cmsg)
1703 return -TARGET_EFAULT;
1705 while (cmsg && target_cmsg) {
1706 void *data = CMSG_DATA(cmsg);
1707 void *target_data = TARGET_CMSG_DATA(target_cmsg);
1709 int len = tswapal(target_cmsg->cmsg_len)
1710 - sizeof(struct target_cmsghdr);
1712 space += CMSG_SPACE(len);
1713 if (space > msgh->msg_controllen) {
1714 space -= CMSG_SPACE(len);
1715 /* This is a QEMU bug, since we allocated the payload
1716 * area ourselves (unlike overflow in host-to-target
1717 * conversion, which is just the guest giving us a buffer
1718 * that's too small). It can't happen for the payload types
1719 * we currently support; if it becomes an issue in future
1720 * we would need to improve our allocation strategy to
1721 * something more intelligent than "twice the size of the
1722 * target buffer we're reading from".
1724 gemu_log("Host cmsg overflow\n");
1725 break;
1728 if (tswap32(target_cmsg->cmsg_level) == TARGET_SOL_SOCKET) {
1729 cmsg->cmsg_level = SOL_SOCKET;
1730 } else {
1731 cmsg->cmsg_level = tswap32(target_cmsg->cmsg_level);
1733 cmsg->cmsg_type = tswap32(target_cmsg->cmsg_type);
1734 cmsg->cmsg_len = CMSG_LEN(len);
1736 if (cmsg->cmsg_level == SOL_SOCKET && cmsg->cmsg_type == SCM_RIGHTS) {
1737 int *fd = (int *)data;
1738 int *target_fd = (int *)target_data;
1739 int i, numfds = len / sizeof(int);
1741 for (i = 0; i < numfds; i++) {
1742 __get_user(fd[i], target_fd + i);
1744 } else if (cmsg->cmsg_level == SOL_SOCKET
1745 && cmsg->cmsg_type == SCM_CREDENTIALS) {
1746 struct ucred *cred = (struct ucred *)data;
1747 struct target_ucred *target_cred =
1748 (struct target_ucred *)target_data;
1750 __get_user(cred->pid, &target_cred->pid);
1751 __get_user(cred->uid, &target_cred->uid);
1752 __get_user(cred->gid, &target_cred->gid);
1753 } else {
1754 gemu_log("Unsupported ancillary data: %d/%d\n",
1755 cmsg->cmsg_level, cmsg->cmsg_type);
1756 memcpy(data, target_data, len);
1759 cmsg = CMSG_NXTHDR(msgh, cmsg);
1760 target_cmsg = TARGET_CMSG_NXTHDR(target_msgh, target_cmsg,
1761 target_cmsg_start);
1763 unlock_user(target_cmsg, target_cmsg_addr, 0);
1764 the_end:
1765 msgh->msg_controllen = space;
1766 return 0;
1769 static inline abi_long host_to_target_cmsg(struct target_msghdr *target_msgh,
1770 struct msghdr *msgh)
1772 struct cmsghdr *cmsg = CMSG_FIRSTHDR(msgh);
1773 abi_long msg_controllen;
1774 abi_ulong target_cmsg_addr;
1775 struct target_cmsghdr *target_cmsg, *target_cmsg_start;
1776 socklen_t space = 0;
1778 msg_controllen = tswapal(target_msgh->msg_controllen);
1779 if (msg_controllen < sizeof (struct target_cmsghdr))
1780 goto the_end;
1781 target_cmsg_addr = tswapal(target_msgh->msg_control);
1782 target_cmsg = lock_user(VERIFY_WRITE, target_cmsg_addr, msg_controllen, 0);
1783 target_cmsg_start = target_cmsg;
1784 if (!target_cmsg)
1785 return -TARGET_EFAULT;
1787 while (cmsg && target_cmsg) {
1788 void *data = CMSG_DATA(cmsg);
1789 void *target_data = TARGET_CMSG_DATA(target_cmsg);
1791 int len = cmsg->cmsg_len - sizeof(struct cmsghdr);
1792 int tgt_len, tgt_space;
1794 /* We never copy a half-header but may copy half-data;
1795 * this is Linux's behaviour in put_cmsg(). Note that
1796 * truncation here is a guest problem (which we report
1797 * to the guest via the CTRUNC bit), unlike truncation
1798 * in target_to_host_cmsg, which is a QEMU bug.
1800 if (msg_controllen < sizeof(struct target_cmsghdr)) {
1801 target_msgh->msg_flags |= tswap32(MSG_CTRUNC);
1802 break;
1805 if (cmsg->cmsg_level == SOL_SOCKET) {
1806 target_cmsg->cmsg_level = tswap32(TARGET_SOL_SOCKET);
1807 } else {
1808 target_cmsg->cmsg_level = tswap32(cmsg->cmsg_level);
1810 target_cmsg->cmsg_type = tswap32(cmsg->cmsg_type);
1812 /* Payload types which need a different size of payload on
1813 * the target must adjust tgt_len here.
1815 switch (cmsg->cmsg_level) {
1816 case SOL_SOCKET:
1817 switch (cmsg->cmsg_type) {
1818 case SO_TIMESTAMP:
1819 tgt_len = sizeof(struct target_timeval);
1820 break;
1821 default:
1822 break;
1824 default:
1825 tgt_len = len;
1826 break;
1829 if (msg_controllen < TARGET_CMSG_LEN(tgt_len)) {
1830 target_msgh->msg_flags |= tswap32(MSG_CTRUNC);
1831 tgt_len = msg_controllen - sizeof(struct target_cmsghdr);
1834 /* We must now copy-and-convert len bytes of payload
1835 * into tgt_len bytes of destination space. Bear in mind
1836 * that in both source and destination we may be dealing
1837 * with a truncated value!
1839 switch (cmsg->cmsg_level) {
1840 case SOL_SOCKET:
1841 switch (cmsg->cmsg_type) {
1842 case SCM_RIGHTS:
1844 int *fd = (int *)data;
1845 int *target_fd = (int *)target_data;
1846 int i, numfds = tgt_len / sizeof(int);
1848 for (i = 0; i < numfds; i++) {
1849 __put_user(fd[i], target_fd + i);
1851 break;
1853 case SO_TIMESTAMP:
1855 struct timeval *tv = (struct timeval *)data;
1856 struct target_timeval *target_tv =
1857 (struct target_timeval *)target_data;
1859 if (len != sizeof(struct timeval) ||
1860 tgt_len != sizeof(struct target_timeval)) {
1861 goto unimplemented;
1864 /* copy struct timeval to target */
1865 __put_user(tv->tv_sec, &target_tv->tv_sec);
1866 __put_user(tv->tv_usec, &target_tv->tv_usec);
1867 break;
1869 case SCM_CREDENTIALS:
1871 struct ucred *cred = (struct ucred *)data;
1872 struct target_ucred *target_cred =
1873 (struct target_ucred *)target_data;
1875 __put_user(cred->pid, &target_cred->pid);
1876 __put_user(cred->uid, &target_cred->uid);
1877 __put_user(cred->gid, &target_cred->gid);
1878 break;
1880 default:
1881 goto unimplemented;
1883 break;
1885 case SOL_IP:
1886 switch (cmsg->cmsg_type) {
1887 case IP_TTL:
1889 uint32_t *v = (uint32_t *)data;
1890 uint32_t *t_int = (uint32_t *)target_data;
1892 if (len != sizeof(uint32_t) ||
1893 tgt_len != sizeof(uint32_t)) {
1894 goto unimplemented;
1896 __put_user(*v, t_int);
1897 break;
1899 case IP_RECVERR:
1901 struct errhdr_t {
1902 struct sock_extended_err ee;
1903 struct sockaddr_in offender;
1905 struct errhdr_t *errh = (struct errhdr_t *)data;
1906 struct errhdr_t *target_errh =
1907 (struct errhdr_t *)target_data;
1909 if (len != sizeof(struct errhdr_t) ||
1910 tgt_len != sizeof(struct errhdr_t)) {
1911 goto unimplemented;
1913 __put_user(errh->ee.ee_errno, &target_errh->ee.ee_errno);
1914 __put_user(errh->ee.ee_origin, &target_errh->ee.ee_origin);
1915 __put_user(errh->ee.ee_type, &target_errh->ee.ee_type);
1916 __put_user(errh->ee.ee_code, &target_errh->ee.ee_code);
1917 __put_user(errh->ee.ee_pad, &target_errh->ee.ee_pad);
1918 __put_user(errh->ee.ee_info, &target_errh->ee.ee_info);
1919 __put_user(errh->ee.ee_data, &target_errh->ee.ee_data);
1920 host_to_target_sockaddr((unsigned long) &target_errh->offender,
1921 (void *) &errh->offender, sizeof(errh->offender));
1922 break;
1924 default:
1925 goto unimplemented;
1927 break;
1929 case SOL_IPV6:
1930 switch (cmsg->cmsg_type) {
1931 case IPV6_HOPLIMIT:
1933 uint32_t *v = (uint32_t *)data;
1934 uint32_t *t_int = (uint32_t *)target_data;
1936 if (len != sizeof(uint32_t) ||
1937 tgt_len != sizeof(uint32_t)) {
1938 goto unimplemented;
1940 __put_user(*v, t_int);
1941 break;
1943 case IPV6_RECVERR:
1945 struct errhdr6_t {
1946 struct sock_extended_err ee;
1947 struct sockaddr_in6 offender;
1949 struct errhdr6_t *errh = (struct errhdr6_t *)data;
1950 struct errhdr6_t *target_errh =
1951 (struct errhdr6_t *)target_data;
1953 if (len != sizeof(struct errhdr6_t) ||
1954 tgt_len != sizeof(struct errhdr6_t)) {
1955 goto unimplemented;
1957 __put_user(errh->ee.ee_errno, &target_errh->ee.ee_errno);
1958 __put_user(errh->ee.ee_origin, &target_errh->ee.ee_origin);
1959 __put_user(errh->ee.ee_type, &target_errh->ee.ee_type);
1960 __put_user(errh->ee.ee_code, &target_errh->ee.ee_code);
1961 __put_user(errh->ee.ee_pad, &target_errh->ee.ee_pad);
1962 __put_user(errh->ee.ee_info, &target_errh->ee.ee_info);
1963 __put_user(errh->ee.ee_data, &target_errh->ee.ee_data);
1964 host_to_target_sockaddr((unsigned long) &target_errh->offender,
1965 (void *) &errh->offender, sizeof(errh->offender));
1966 break;
1968 default:
1969 goto unimplemented;
1971 break;
1973 default:
1974 unimplemented:
1975 gemu_log("Unsupported ancillary data: %d/%d\n",
1976 cmsg->cmsg_level, cmsg->cmsg_type);
1977 memcpy(target_data, data, MIN(len, tgt_len));
1978 if (tgt_len > len) {
1979 memset(target_data + len, 0, tgt_len - len);
1983 target_cmsg->cmsg_len = tswapal(TARGET_CMSG_LEN(tgt_len));
1984 tgt_space = TARGET_CMSG_SPACE(tgt_len);
1985 if (msg_controllen < tgt_space) {
1986 tgt_space = msg_controllen;
1988 msg_controllen -= tgt_space;
1989 space += tgt_space;
1990 cmsg = CMSG_NXTHDR(msgh, cmsg);
1991 target_cmsg = TARGET_CMSG_NXTHDR(target_msgh, target_cmsg,
1992 target_cmsg_start);
1994 unlock_user(target_cmsg, target_cmsg_addr, space);
1995 the_end:
1996 target_msgh->msg_controllen = tswapal(space);
1997 return 0;
2000 static void tswap_nlmsghdr(struct nlmsghdr *nlh)
2002 nlh->nlmsg_len = tswap32(nlh->nlmsg_len);
2003 nlh->nlmsg_type = tswap16(nlh->nlmsg_type);
2004 nlh->nlmsg_flags = tswap16(nlh->nlmsg_flags);
2005 nlh->nlmsg_seq = tswap32(nlh->nlmsg_seq);
2006 nlh->nlmsg_pid = tswap32(nlh->nlmsg_pid);
2009 static abi_long host_to_target_for_each_nlmsg(struct nlmsghdr *nlh,
2010 size_t len,
2011 abi_long (*host_to_target_nlmsg)
2012 (struct nlmsghdr *))
2014 uint32_t nlmsg_len;
2015 abi_long ret;
2017 while (len > sizeof(struct nlmsghdr)) {
2019 nlmsg_len = nlh->nlmsg_len;
2020 if (nlmsg_len < sizeof(struct nlmsghdr) ||
2021 nlmsg_len > len) {
2022 break;
2025 switch (nlh->nlmsg_type) {
2026 case NLMSG_DONE:
2027 tswap_nlmsghdr(nlh);
2028 return 0;
2029 case NLMSG_NOOP:
2030 break;
2031 case NLMSG_ERROR:
2033 struct nlmsgerr *e = NLMSG_DATA(nlh);
2034 e->error = tswap32(e->error);
2035 tswap_nlmsghdr(&e->msg);
2036 tswap_nlmsghdr(nlh);
2037 return 0;
2039 default:
2040 ret = host_to_target_nlmsg(nlh);
2041 if (ret < 0) {
2042 tswap_nlmsghdr(nlh);
2043 return ret;
2045 break;
2047 tswap_nlmsghdr(nlh);
2048 len -= NLMSG_ALIGN(nlmsg_len);
2049 nlh = (struct nlmsghdr *)(((char*)nlh) + NLMSG_ALIGN(nlmsg_len));
2051 return 0;
2054 static abi_long target_to_host_for_each_nlmsg(struct nlmsghdr *nlh,
2055 size_t len,
2056 abi_long (*target_to_host_nlmsg)
2057 (struct nlmsghdr *))
2059 int ret;
2061 while (len > sizeof(struct nlmsghdr)) {
2062 if (tswap32(nlh->nlmsg_len) < sizeof(struct nlmsghdr) ||
2063 tswap32(nlh->nlmsg_len) > len) {
2064 break;
2066 tswap_nlmsghdr(nlh);
2067 switch (nlh->nlmsg_type) {
2068 case NLMSG_DONE:
2069 return 0;
2070 case NLMSG_NOOP:
2071 break;
2072 case NLMSG_ERROR:
2074 struct nlmsgerr *e = NLMSG_DATA(nlh);
2075 e->error = tswap32(e->error);
2076 tswap_nlmsghdr(&e->msg);
2077 return 0;
2079 default:
2080 ret = target_to_host_nlmsg(nlh);
2081 if (ret < 0) {
2082 return ret;
2085 len -= NLMSG_ALIGN(nlh->nlmsg_len);
2086 nlh = (struct nlmsghdr *)(((char *)nlh) + NLMSG_ALIGN(nlh->nlmsg_len));
2088 return 0;
2091 #ifdef CONFIG_RTNETLINK
2092 static abi_long host_to_target_for_each_nlattr(struct nlattr *nlattr,
2093 size_t len, void *context,
2094 abi_long (*host_to_target_nlattr)
2095 (struct nlattr *,
2096 void *context))
2098 unsigned short nla_len;
2099 abi_long ret;
2101 while (len > sizeof(struct nlattr)) {
2102 nla_len = nlattr->nla_len;
2103 if (nla_len < sizeof(struct nlattr) ||
2104 nla_len > len) {
2105 break;
2107 ret = host_to_target_nlattr(nlattr, context);
2108 nlattr->nla_len = tswap16(nlattr->nla_len);
2109 nlattr->nla_type = tswap16(nlattr->nla_type);
2110 if (ret < 0) {
2111 return ret;
2113 len -= NLA_ALIGN(nla_len);
2114 nlattr = (struct nlattr *)(((char *)nlattr) + NLA_ALIGN(nla_len));
2116 return 0;
2119 static abi_long host_to_target_for_each_rtattr(struct rtattr *rtattr,
2120 size_t len,
2121 abi_long (*host_to_target_rtattr)
2122 (struct rtattr *))
2124 unsigned short rta_len;
2125 abi_long ret;
2127 while (len > sizeof(struct rtattr)) {
2128 rta_len = rtattr->rta_len;
2129 if (rta_len < sizeof(struct rtattr) ||
2130 rta_len > len) {
2131 break;
2133 ret = host_to_target_rtattr(rtattr);
2134 rtattr->rta_len = tswap16(rtattr->rta_len);
2135 rtattr->rta_type = tswap16(rtattr->rta_type);
2136 if (ret < 0) {
2137 return ret;
2139 len -= RTA_ALIGN(rta_len);
2140 rtattr = (struct rtattr *)(((char *)rtattr) + RTA_ALIGN(rta_len));
2142 return 0;
2145 #define NLA_DATA(nla) ((void *)((char *)(nla)) + NLA_HDRLEN)
2147 static abi_long host_to_target_data_bridge_nlattr(struct nlattr *nlattr,
2148 void *context)
2150 uint16_t *u16;
2151 uint32_t *u32;
2152 uint64_t *u64;
2154 switch (nlattr->nla_type) {
2155 /* no data */
2156 case QEMU_IFLA_BR_FDB_FLUSH:
2157 break;
2158 /* binary */
2159 case QEMU_IFLA_BR_GROUP_ADDR:
2160 break;
2161 /* uint8_t */
2162 case QEMU_IFLA_BR_VLAN_FILTERING:
2163 case QEMU_IFLA_BR_TOPOLOGY_CHANGE:
2164 case QEMU_IFLA_BR_TOPOLOGY_CHANGE_DETECTED:
2165 case QEMU_IFLA_BR_MCAST_ROUTER:
2166 case QEMU_IFLA_BR_MCAST_SNOOPING:
2167 case QEMU_IFLA_BR_MCAST_QUERY_USE_IFADDR:
2168 case QEMU_IFLA_BR_MCAST_QUERIER:
2169 case QEMU_IFLA_BR_NF_CALL_IPTABLES:
2170 case QEMU_IFLA_BR_NF_CALL_IP6TABLES:
2171 case QEMU_IFLA_BR_NF_CALL_ARPTABLES:
2172 break;
2173 /* uint16_t */
2174 case QEMU_IFLA_BR_PRIORITY:
2175 case QEMU_IFLA_BR_VLAN_PROTOCOL:
2176 case QEMU_IFLA_BR_GROUP_FWD_MASK:
2177 case QEMU_IFLA_BR_ROOT_PORT:
2178 case QEMU_IFLA_BR_VLAN_DEFAULT_PVID:
2179 u16 = NLA_DATA(nlattr);
2180 *u16 = tswap16(*u16);
2181 break;
2182 /* uint32_t */
2183 case QEMU_IFLA_BR_FORWARD_DELAY:
2184 case QEMU_IFLA_BR_HELLO_TIME:
2185 case QEMU_IFLA_BR_MAX_AGE:
2186 case QEMU_IFLA_BR_AGEING_TIME:
2187 case QEMU_IFLA_BR_STP_STATE:
2188 case QEMU_IFLA_BR_ROOT_PATH_COST:
2189 case QEMU_IFLA_BR_MCAST_HASH_ELASTICITY:
2190 case QEMU_IFLA_BR_MCAST_HASH_MAX:
2191 case QEMU_IFLA_BR_MCAST_LAST_MEMBER_CNT:
2192 case QEMU_IFLA_BR_MCAST_STARTUP_QUERY_CNT:
2193 u32 = NLA_DATA(nlattr);
2194 *u32 = tswap32(*u32);
2195 break;
2196 /* uint64_t */
2197 case QEMU_IFLA_BR_HELLO_TIMER:
2198 case QEMU_IFLA_BR_TCN_TIMER:
2199 case QEMU_IFLA_BR_GC_TIMER:
2200 case QEMU_IFLA_BR_TOPOLOGY_CHANGE_TIMER:
2201 case QEMU_IFLA_BR_MCAST_LAST_MEMBER_INTVL:
2202 case QEMU_IFLA_BR_MCAST_MEMBERSHIP_INTVL:
2203 case QEMU_IFLA_BR_MCAST_QUERIER_INTVL:
2204 case QEMU_IFLA_BR_MCAST_QUERY_INTVL:
2205 case QEMU_IFLA_BR_MCAST_QUERY_RESPONSE_INTVL:
2206 case QEMU_IFLA_BR_MCAST_STARTUP_QUERY_INTVL:
2207 u64 = NLA_DATA(nlattr);
2208 *u64 = tswap64(*u64);
2209 break;
2210 /* ifla_bridge_id: uin8_t[] */
2211 case QEMU_IFLA_BR_ROOT_ID:
2212 case QEMU_IFLA_BR_BRIDGE_ID:
2213 break;
2214 default:
2215 gemu_log("Unknown QEMU_IFLA_BR type %d\n", nlattr->nla_type);
2216 break;
2218 return 0;
2221 static abi_long host_to_target_slave_data_bridge_nlattr(struct nlattr *nlattr,
2222 void *context)
2224 uint16_t *u16;
2225 uint32_t *u32;
2226 uint64_t *u64;
2228 switch (nlattr->nla_type) {
2229 /* uint8_t */
2230 case QEMU_IFLA_BRPORT_STATE:
2231 case QEMU_IFLA_BRPORT_MODE:
2232 case QEMU_IFLA_BRPORT_GUARD:
2233 case QEMU_IFLA_BRPORT_PROTECT:
2234 case QEMU_IFLA_BRPORT_FAST_LEAVE:
2235 case QEMU_IFLA_BRPORT_LEARNING:
2236 case QEMU_IFLA_BRPORT_UNICAST_FLOOD:
2237 case QEMU_IFLA_BRPORT_PROXYARP:
2238 case QEMU_IFLA_BRPORT_LEARNING_SYNC:
2239 case QEMU_IFLA_BRPORT_PROXYARP_WIFI:
2240 case QEMU_IFLA_BRPORT_TOPOLOGY_CHANGE_ACK:
2241 case QEMU_IFLA_BRPORT_CONFIG_PENDING:
2242 case QEMU_IFLA_BRPORT_MULTICAST_ROUTER:
2243 break;
2244 /* uint16_t */
2245 case QEMU_IFLA_BRPORT_PRIORITY:
2246 case QEMU_IFLA_BRPORT_DESIGNATED_PORT:
2247 case QEMU_IFLA_BRPORT_DESIGNATED_COST:
2248 case QEMU_IFLA_BRPORT_ID:
2249 case QEMU_IFLA_BRPORT_NO:
2250 u16 = NLA_DATA(nlattr);
2251 *u16 = tswap16(*u16);
2252 break;
2253 /* uin32_t */
2254 case QEMU_IFLA_BRPORT_COST:
2255 u32 = NLA_DATA(nlattr);
2256 *u32 = tswap32(*u32);
2257 break;
2258 /* uint64_t */
2259 case QEMU_IFLA_BRPORT_MESSAGE_AGE_TIMER:
2260 case QEMU_IFLA_BRPORT_FORWARD_DELAY_TIMER:
2261 case QEMU_IFLA_BRPORT_HOLD_TIMER:
2262 u64 = NLA_DATA(nlattr);
2263 *u64 = tswap64(*u64);
2264 break;
2265 /* ifla_bridge_id: uint8_t[] */
2266 case QEMU_IFLA_BRPORT_ROOT_ID:
2267 case QEMU_IFLA_BRPORT_BRIDGE_ID:
2268 break;
2269 default:
2270 gemu_log("Unknown QEMU_IFLA_BRPORT type %d\n", nlattr->nla_type);
2271 break;
2273 return 0;
2276 struct linkinfo_context {
2277 int len;
2278 char *name;
2279 int slave_len;
2280 char *slave_name;
2283 static abi_long host_to_target_data_linkinfo_nlattr(struct nlattr *nlattr,
2284 void *context)
2286 struct linkinfo_context *li_context = context;
2288 switch (nlattr->nla_type) {
2289 /* string */
2290 case QEMU_IFLA_INFO_KIND:
2291 li_context->name = NLA_DATA(nlattr);
2292 li_context->len = nlattr->nla_len - NLA_HDRLEN;
2293 break;
2294 case QEMU_IFLA_INFO_SLAVE_KIND:
2295 li_context->slave_name = NLA_DATA(nlattr);
2296 li_context->slave_len = nlattr->nla_len - NLA_HDRLEN;
2297 break;
2298 /* stats */
2299 case QEMU_IFLA_INFO_XSTATS:
2300 /* FIXME: only used by CAN */
2301 break;
2302 /* nested */
2303 case QEMU_IFLA_INFO_DATA:
2304 if (strncmp(li_context->name, "bridge",
2305 li_context->len) == 0) {
2306 return host_to_target_for_each_nlattr(NLA_DATA(nlattr),
2307 nlattr->nla_len,
2308 NULL,
2309 host_to_target_data_bridge_nlattr);
2310 } else {
2311 gemu_log("Unknown QEMU_IFLA_INFO_KIND %s\n", li_context->name);
2313 break;
2314 case QEMU_IFLA_INFO_SLAVE_DATA:
2315 if (strncmp(li_context->slave_name, "bridge",
2316 li_context->slave_len) == 0) {
2317 return host_to_target_for_each_nlattr(NLA_DATA(nlattr),
2318 nlattr->nla_len,
2319 NULL,
2320 host_to_target_slave_data_bridge_nlattr);
2321 } else {
2322 gemu_log("Unknown QEMU_IFLA_INFO_SLAVE_KIND %s\n",
2323 li_context->slave_name);
2325 break;
2326 default:
2327 gemu_log("Unknown host QEMU_IFLA_INFO type: %d\n", nlattr->nla_type);
2328 break;
2331 return 0;
2334 static abi_long host_to_target_data_inet_nlattr(struct nlattr *nlattr,
2335 void *context)
2337 uint32_t *u32;
2338 int i;
2340 switch (nlattr->nla_type) {
2341 case QEMU_IFLA_INET_CONF:
2342 u32 = NLA_DATA(nlattr);
2343 for (i = 0; i < (nlattr->nla_len - NLA_HDRLEN) / sizeof(*u32);
2344 i++) {
2345 u32[i] = tswap32(u32[i]);
2347 break;
2348 default:
2349 gemu_log("Unknown host AF_INET type: %d\n", nlattr->nla_type);
2351 return 0;
2354 static abi_long host_to_target_data_inet6_nlattr(struct nlattr *nlattr,
2355 void *context)
2357 uint32_t *u32;
2358 uint64_t *u64;
2359 struct ifla_cacheinfo *ci;
2360 int i;
2362 switch (nlattr->nla_type) {
2363 /* binaries */
2364 case QEMU_IFLA_INET6_TOKEN:
2365 break;
2366 /* uint8_t */
2367 case QEMU_IFLA_INET6_ADDR_GEN_MODE:
2368 break;
2369 /* uint32_t */
2370 case QEMU_IFLA_INET6_FLAGS:
2371 u32 = NLA_DATA(nlattr);
2372 *u32 = tswap32(*u32);
2373 break;
2374 /* uint32_t[] */
2375 case QEMU_IFLA_INET6_CONF:
2376 u32 = NLA_DATA(nlattr);
2377 for (i = 0; i < (nlattr->nla_len - NLA_HDRLEN) / sizeof(*u32);
2378 i++) {
2379 u32[i] = tswap32(u32[i]);
2381 break;
2382 /* ifla_cacheinfo */
2383 case QEMU_IFLA_INET6_CACHEINFO:
2384 ci = NLA_DATA(nlattr);
2385 ci->max_reasm_len = tswap32(ci->max_reasm_len);
2386 ci->tstamp = tswap32(ci->tstamp);
2387 ci->reachable_time = tswap32(ci->reachable_time);
2388 ci->retrans_time = tswap32(ci->retrans_time);
2389 break;
2390 /* uint64_t[] */
2391 case QEMU_IFLA_INET6_STATS:
2392 case QEMU_IFLA_INET6_ICMP6STATS:
2393 u64 = NLA_DATA(nlattr);
2394 for (i = 0; i < (nlattr->nla_len - NLA_HDRLEN) / sizeof(*u64);
2395 i++) {
2396 u64[i] = tswap64(u64[i]);
2398 break;
2399 default:
2400 gemu_log("Unknown host AF_INET6 type: %d\n", nlattr->nla_type);
2402 return 0;
2405 static abi_long host_to_target_data_spec_nlattr(struct nlattr *nlattr,
2406 void *context)
2408 switch (nlattr->nla_type) {
2409 case AF_INET:
2410 return host_to_target_for_each_nlattr(NLA_DATA(nlattr), nlattr->nla_len,
2411 NULL,
2412 host_to_target_data_inet_nlattr);
2413 case AF_INET6:
2414 return host_to_target_for_each_nlattr(NLA_DATA(nlattr), nlattr->nla_len,
2415 NULL,
2416 host_to_target_data_inet6_nlattr);
2417 default:
2418 gemu_log("Unknown host AF_SPEC type: %d\n", nlattr->nla_type);
2419 break;
2421 return 0;
2424 static abi_long host_to_target_data_link_rtattr(struct rtattr *rtattr)
2426 uint32_t *u32;
2427 struct rtnl_link_stats *st;
2428 struct rtnl_link_stats64 *st64;
2429 struct rtnl_link_ifmap *map;
2430 struct linkinfo_context li_context;
2432 switch (rtattr->rta_type) {
2433 /* binary stream */
2434 case QEMU_IFLA_ADDRESS:
2435 case QEMU_IFLA_BROADCAST:
2436 /* string */
2437 case QEMU_IFLA_IFNAME:
2438 case QEMU_IFLA_QDISC:
2439 break;
2440 /* uin8_t */
2441 case QEMU_IFLA_OPERSTATE:
2442 case QEMU_IFLA_LINKMODE:
2443 case QEMU_IFLA_CARRIER:
2444 case QEMU_IFLA_PROTO_DOWN:
2445 break;
2446 /* uint32_t */
2447 case QEMU_IFLA_MTU:
2448 case QEMU_IFLA_LINK:
2449 case QEMU_IFLA_WEIGHT:
2450 case QEMU_IFLA_TXQLEN:
2451 case QEMU_IFLA_CARRIER_CHANGES:
2452 case QEMU_IFLA_NUM_RX_QUEUES:
2453 case QEMU_IFLA_NUM_TX_QUEUES:
2454 case QEMU_IFLA_PROMISCUITY:
2455 case QEMU_IFLA_EXT_MASK:
2456 case QEMU_IFLA_LINK_NETNSID:
2457 case QEMU_IFLA_GROUP:
2458 case QEMU_IFLA_MASTER:
2459 case QEMU_IFLA_NUM_VF:
2460 case QEMU_IFLA_GSO_MAX_SEGS:
2461 case QEMU_IFLA_GSO_MAX_SIZE:
2462 u32 = RTA_DATA(rtattr);
2463 *u32 = tswap32(*u32);
2464 break;
2465 /* struct rtnl_link_stats */
2466 case QEMU_IFLA_STATS:
2467 st = RTA_DATA(rtattr);
2468 st->rx_packets = tswap32(st->rx_packets);
2469 st->tx_packets = tswap32(st->tx_packets);
2470 st->rx_bytes = tswap32(st->rx_bytes);
2471 st->tx_bytes = tswap32(st->tx_bytes);
2472 st->rx_errors = tswap32(st->rx_errors);
2473 st->tx_errors = tswap32(st->tx_errors);
2474 st->rx_dropped = tswap32(st->rx_dropped);
2475 st->tx_dropped = tswap32(st->tx_dropped);
2476 st->multicast = tswap32(st->multicast);
2477 st->collisions = tswap32(st->collisions);
2479 /* detailed rx_errors: */
2480 st->rx_length_errors = tswap32(st->rx_length_errors);
2481 st->rx_over_errors = tswap32(st->rx_over_errors);
2482 st->rx_crc_errors = tswap32(st->rx_crc_errors);
2483 st->rx_frame_errors = tswap32(st->rx_frame_errors);
2484 st->rx_fifo_errors = tswap32(st->rx_fifo_errors);
2485 st->rx_missed_errors = tswap32(st->rx_missed_errors);
2487 /* detailed tx_errors */
2488 st->tx_aborted_errors = tswap32(st->tx_aborted_errors);
2489 st->tx_carrier_errors = tswap32(st->tx_carrier_errors);
2490 st->tx_fifo_errors = tswap32(st->tx_fifo_errors);
2491 st->tx_heartbeat_errors = tswap32(st->tx_heartbeat_errors);
2492 st->tx_window_errors = tswap32(st->tx_window_errors);
2494 /* for cslip etc */
2495 st->rx_compressed = tswap32(st->rx_compressed);
2496 st->tx_compressed = tswap32(st->tx_compressed);
2497 break;
2498 /* struct rtnl_link_stats64 */
2499 case QEMU_IFLA_STATS64:
2500 st64 = RTA_DATA(rtattr);
2501 st64->rx_packets = tswap64(st64->rx_packets);
2502 st64->tx_packets = tswap64(st64->tx_packets);
2503 st64->rx_bytes = tswap64(st64->rx_bytes);
2504 st64->tx_bytes = tswap64(st64->tx_bytes);
2505 st64->rx_errors = tswap64(st64->rx_errors);
2506 st64->tx_errors = tswap64(st64->tx_errors);
2507 st64->rx_dropped = tswap64(st64->rx_dropped);
2508 st64->tx_dropped = tswap64(st64->tx_dropped);
2509 st64->multicast = tswap64(st64->multicast);
2510 st64->collisions = tswap64(st64->collisions);
2512 /* detailed rx_errors: */
2513 st64->rx_length_errors = tswap64(st64->rx_length_errors);
2514 st64->rx_over_errors = tswap64(st64->rx_over_errors);
2515 st64->rx_crc_errors = tswap64(st64->rx_crc_errors);
2516 st64->rx_frame_errors = tswap64(st64->rx_frame_errors);
2517 st64->rx_fifo_errors = tswap64(st64->rx_fifo_errors);
2518 st64->rx_missed_errors = tswap64(st64->rx_missed_errors);
2520 /* detailed tx_errors */
2521 st64->tx_aborted_errors = tswap64(st64->tx_aborted_errors);
2522 st64->tx_carrier_errors = tswap64(st64->tx_carrier_errors);
2523 st64->tx_fifo_errors = tswap64(st64->tx_fifo_errors);
2524 st64->tx_heartbeat_errors = tswap64(st64->tx_heartbeat_errors);
2525 st64->tx_window_errors = tswap64(st64->tx_window_errors);
2527 /* for cslip etc */
2528 st64->rx_compressed = tswap64(st64->rx_compressed);
2529 st64->tx_compressed = tswap64(st64->tx_compressed);
2530 break;
2531 /* struct rtnl_link_ifmap */
2532 case QEMU_IFLA_MAP:
2533 map = RTA_DATA(rtattr);
2534 map->mem_start = tswap64(map->mem_start);
2535 map->mem_end = tswap64(map->mem_end);
2536 map->base_addr = tswap64(map->base_addr);
2537 map->irq = tswap16(map->irq);
2538 break;
2539 /* nested */
2540 case QEMU_IFLA_LINKINFO:
2541 memset(&li_context, 0, sizeof(li_context));
2542 return host_to_target_for_each_nlattr(RTA_DATA(rtattr), rtattr->rta_len,
2543 &li_context,
2544 host_to_target_data_linkinfo_nlattr);
2545 case QEMU_IFLA_AF_SPEC:
2546 return host_to_target_for_each_nlattr(RTA_DATA(rtattr), rtattr->rta_len,
2547 NULL,
2548 host_to_target_data_spec_nlattr);
2549 default:
2550 gemu_log("Unknown host QEMU_IFLA type: %d\n", rtattr->rta_type);
2551 break;
2553 return 0;
2556 static abi_long host_to_target_data_addr_rtattr(struct rtattr *rtattr)
2558 uint32_t *u32;
2559 struct ifa_cacheinfo *ci;
2561 switch (rtattr->rta_type) {
2562 /* binary: depends on family type */
2563 case IFA_ADDRESS:
2564 case IFA_LOCAL:
2565 break;
2566 /* string */
2567 case IFA_LABEL:
2568 break;
2569 /* u32 */
2570 case IFA_FLAGS:
2571 case IFA_BROADCAST:
2572 u32 = RTA_DATA(rtattr);
2573 *u32 = tswap32(*u32);
2574 break;
2575 /* struct ifa_cacheinfo */
2576 case IFA_CACHEINFO:
2577 ci = RTA_DATA(rtattr);
2578 ci->ifa_prefered = tswap32(ci->ifa_prefered);
2579 ci->ifa_valid = tswap32(ci->ifa_valid);
2580 ci->cstamp = tswap32(ci->cstamp);
2581 ci->tstamp = tswap32(ci->tstamp);
2582 break;
2583 default:
2584 gemu_log("Unknown host IFA type: %d\n", rtattr->rta_type);
2585 break;
2587 return 0;
2590 static abi_long host_to_target_data_route_rtattr(struct rtattr *rtattr)
2592 uint32_t *u32;
2593 switch (rtattr->rta_type) {
2594 /* binary: depends on family type */
2595 case RTA_GATEWAY:
2596 case RTA_DST:
2597 case RTA_PREFSRC:
2598 break;
2599 /* u32 */
2600 case RTA_PRIORITY:
2601 case RTA_TABLE:
2602 case RTA_OIF:
2603 u32 = RTA_DATA(rtattr);
2604 *u32 = tswap32(*u32);
2605 break;
2606 default:
2607 gemu_log("Unknown host RTA type: %d\n", rtattr->rta_type);
2608 break;
2610 return 0;
2613 static abi_long host_to_target_link_rtattr(struct rtattr *rtattr,
2614 uint32_t rtattr_len)
2616 return host_to_target_for_each_rtattr(rtattr, rtattr_len,
2617 host_to_target_data_link_rtattr);
2620 static abi_long host_to_target_addr_rtattr(struct rtattr *rtattr,
2621 uint32_t rtattr_len)
2623 return host_to_target_for_each_rtattr(rtattr, rtattr_len,
2624 host_to_target_data_addr_rtattr);
2627 static abi_long host_to_target_route_rtattr(struct rtattr *rtattr,
2628 uint32_t rtattr_len)
2630 return host_to_target_for_each_rtattr(rtattr, rtattr_len,
2631 host_to_target_data_route_rtattr);
2634 static abi_long host_to_target_data_route(struct nlmsghdr *nlh)
2636 uint32_t nlmsg_len;
2637 struct ifinfomsg *ifi;
2638 struct ifaddrmsg *ifa;
2639 struct rtmsg *rtm;
2641 nlmsg_len = nlh->nlmsg_len;
2642 switch (nlh->nlmsg_type) {
2643 case RTM_NEWLINK:
2644 case RTM_DELLINK:
2645 case RTM_GETLINK:
2646 if (nlh->nlmsg_len >= NLMSG_LENGTH(sizeof(*ifi))) {
2647 ifi = NLMSG_DATA(nlh);
2648 ifi->ifi_type = tswap16(ifi->ifi_type);
2649 ifi->ifi_index = tswap32(ifi->ifi_index);
2650 ifi->ifi_flags = tswap32(ifi->ifi_flags);
2651 ifi->ifi_change = tswap32(ifi->ifi_change);
2652 host_to_target_link_rtattr(IFLA_RTA(ifi),
2653 nlmsg_len - NLMSG_LENGTH(sizeof(*ifi)));
2655 break;
2656 case RTM_NEWADDR:
2657 case RTM_DELADDR:
2658 case RTM_GETADDR:
2659 if (nlh->nlmsg_len >= NLMSG_LENGTH(sizeof(*ifa))) {
2660 ifa = NLMSG_DATA(nlh);
2661 ifa->ifa_index = tswap32(ifa->ifa_index);
2662 host_to_target_addr_rtattr(IFA_RTA(ifa),
2663 nlmsg_len - NLMSG_LENGTH(sizeof(*ifa)));
2665 break;
2666 case RTM_NEWROUTE:
2667 case RTM_DELROUTE:
2668 case RTM_GETROUTE:
2669 if (nlh->nlmsg_len >= NLMSG_LENGTH(sizeof(*rtm))) {
2670 rtm = NLMSG_DATA(nlh);
2671 rtm->rtm_flags = tswap32(rtm->rtm_flags);
2672 host_to_target_route_rtattr(RTM_RTA(rtm),
2673 nlmsg_len - NLMSG_LENGTH(sizeof(*rtm)));
2675 break;
2676 default:
2677 return -TARGET_EINVAL;
2679 return 0;
2682 static inline abi_long host_to_target_nlmsg_route(struct nlmsghdr *nlh,
2683 size_t len)
2685 return host_to_target_for_each_nlmsg(nlh, len, host_to_target_data_route);
2688 static abi_long target_to_host_for_each_rtattr(struct rtattr *rtattr,
2689 size_t len,
2690 abi_long (*target_to_host_rtattr)
2691 (struct rtattr *))
2693 abi_long ret;
2695 while (len >= sizeof(struct rtattr)) {
2696 if (tswap16(rtattr->rta_len) < sizeof(struct rtattr) ||
2697 tswap16(rtattr->rta_len) > len) {
2698 break;
2700 rtattr->rta_len = tswap16(rtattr->rta_len);
2701 rtattr->rta_type = tswap16(rtattr->rta_type);
2702 ret = target_to_host_rtattr(rtattr);
2703 if (ret < 0) {
2704 return ret;
2706 len -= RTA_ALIGN(rtattr->rta_len);
2707 rtattr = (struct rtattr *)(((char *)rtattr) +
2708 RTA_ALIGN(rtattr->rta_len));
2710 return 0;
2713 static abi_long target_to_host_data_link_rtattr(struct rtattr *rtattr)
2715 switch (rtattr->rta_type) {
2716 default:
2717 gemu_log("Unknown target QEMU_IFLA type: %d\n", rtattr->rta_type);
2718 break;
2720 return 0;
2723 static abi_long target_to_host_data_addr_rtattr(struct rtattr *rtattr)
2725 switch (rtattr->rta_type) {
2726 /* binary: depends on family type */
2727 case IFA_LOCAL:
2728 case IFA_ADDRESS:
2729 break;
2730 default:
2731 gemu_log("Unknown target IFA type: %d\n", rtattr->rta_type);
2732 break;
2734 return 0;
2737 static abi_long target_to_host_data_route_rtattr(struct rtattr *rtattr)
2739 uint32_t *u32;
2740 switch (rtattr->rta_type) {
2741 /* binary: depends on family type */
2742 case RTA_DST:
2743 case RTA_SRC:
2744 case RTA_GATEWAY:
2745 break;
2746 /* u32 */
2747 case RTA_PRIORITY:
2748 case RTA_OIF:
2749 u32 = RTA_DATA(rtattr);
2750 *u32 = tswap32(*u32);
2751 break;
2752 default:
2753 gemu_log("Unknown target RTA type: %d\n", rtattr->rta_type);
2754 break;
2756 return 0;
2759 static void target_to_host_link_rtattr(struct rtattr *rtattr,
2760 uint32_t rtattr_len)
2762 target_to_host_for_each_rtattr(rtattr, rtattr_len,
2763 target_to_host_data_link_rtattr);
2766 static void target_to_host_addr_rtattr(struct rtattr *rtattr,
2767 uint32_t rtattr_len)
2769 target_to_host_for_each_rtattr(rtattr, rtattr_len,
2770 target_to_host_data_addr_rtattr);
2773 static void target_to_host_route_rtattr(struct rtattr *rtattr,
2774 uint32_t rtattr_len)
2776 target_to_host_for_each_rtattr(rtattr, rtattr_len,
2777 target_to_host_data_route_rtattr);
2780 static abi_long target_to_host_data_route(struct nlmsghdr *nlh)
2782 struct ifinfomsg *ifi;
2783 struct ifaddrmsg *ifa;
2784 struct rtmsg *rtm;
2786 switch (nlh->nlmsg_type) {
2787 case RTM_GETLINK:
2788 break;
2789 case RTM_NEWLINK:
2790 case RTM_DELLINK:
2791 if (nlh->nlmsg_len >= NLMSG_LENGTH(sizeof(*ifi))) {
2792 ifi = NLMSG_DATA(nlh);
2793 ifi->ifi_type = tswap16(ifi->ifi_type);
2794 ifi->ifi_index = tswap32(ifi->ifi_index);
2795 ifi->ifi_flags = tswap32(ifi->ifi_flags);
2796 ifi->ifi_change = tswap32(ifi->ifi_change);
2797 target_to_host_link_rtattr(IFLA_RTA(ifi), nlh->nlmsg_len -
2798 NLMSG_LENGTH(sizeof(*ifi)));
2800 break;
2801 case RTM_GETADDR:
2802 case RTM_NEWADDR:
2803 case RTM_DELADDR:
2804 if (nlh->nlmsg_len >= NLMSG_LENGTH(sizeof(*ifa))) {
2805 ifa = NLMSG_DATA(nlh);
2806 ifa->ifa_index = tswap32(ifa->ifa_index);
2807 target_to_host_addr_rtattr(IFA_RTA(ifa), nlh->nlmsg_len -
2808 NLMSG_LENGTH(sizeof(*ifa)));
2810 break;
2811 case RTM_GETROUTE:
2812 break;
2813 case RTM_NEWROUTE:
2814 case RTM_DELROUTE:
2815 if (nlh->nlmsg_len >= NLMSG_LENGTH(sizeof(*rtm))) {
2816 rtm = NLMSG_DATA(nlh);
2817 rtm->rtm_flags = tswap32(rtm->rtm_flags);
2818 target_to_host_route_rtattr(RTM_RTA(rtm), nlh->nlmsg_len -
2819 NLMSG_LENGTH(sizeof(*rtm)));
2821 break;
2822 default:
2823 return -TARGET_EOPNOTSUPP;
2825 return 0;
2828 static abi_long target_to_host_nlmsg_route(struct nlmsghdr *nlh, size_t len)
2830 return target_to_host_for_each_nlmsg(nlh, len, target_to_host_data_route);
2832 #endif /* CONFIG_RTNETLINK */
2834 static abi_long host_to_target_data_audit(struct nlmsghdr *nlh)
2836 switch (nlh->nlmsg_type) {
2837 default:
2838 gemu_log("Unknown host audit message type %d\n",
2839 nlh->nlmsg_type);
2840 return -TARGET_EINVAL;
2842 return 0;
2845 static inline abi_long host_to_target_nlmsg_audit(struct nlmsghdr *nlh,
2846 size_t len)
2848 return host_to_target_for_each_nlmsg(nlh, len, host_to_target_data_audit);
2851 static abi_long target_to_host_data_audit(struct nlmsghdr *nlh)
2853 switch (nlh->nlmsg_type) {
2854 case AUDIT_USER:
2855 case AUDIT_FIRST_USER_MSG ... AUDIT_LAST_USER_MSG:
2856 case AUDIT_FIRST_USER_MSG2 ... AUDIT_LAST_USER_MSG2:
2857 break;
2858 default:
2859 gemu_log("Unknown target audit message type %d\n",
2860 nlh->nlmsg_type);
2861 return -TARGET_EINVAL;
2864 return 0;
2867 static abi_long target_to_host_nlmsg_audit(struct nlmsghdr *nlh, size_t len)
2869 return target_to_host_for_each_nlmsg(nlh, len, target_to_host_data_audit);
2872 /* do_setsockopt() Must return target values and target errnos. */
2873 static abi_long do_setsockopt(int sockfd, int level, int optname,
2874 abi_ulong optval_addr, socklen_t optlen)
2876 abi_long ret;
2877 int val;
2878 struct ip_mreqn *ip_mreq;
2879 struct ip_mreq_source *ip_mreq_source;
2881 switch(level) {
2882 case SOL_TCP:
2883 /* TCP options all take an 'int' value. */
2884 if (optlen < sizeof(uint32_t))
2885 return -TARGET_EINVAL;
2887 if (get_user_u32(val, optval_addr))
2888 return -TARGET_EFAULT;
2889 ret = get_errno(setsockopt(sockfd, level, optname, &val, sizeof(val)));
2890 break;
2891 case SOL_IP:
2892 switch(optname) {
2893 case IP_TOS:
2894 case IP_TTL:
2895 case IP_HDRINCL:
2896 case IP_ROUTER_ALERT:
2897 case IP_RECVOPTS:
2898 case IP_RETOPTS:
2899 case IP_PKTINFO:
2900 case IP_MTU_DISCOVER:
2901 case IP_RECVERR:
2902 case IP_RECVTTL:
2903 case IP_RECVTOS:
2904 #ifdef IP_FREEBIND
2905 case IP_FREEBIND:
2906 #endif
2907 case IP_MULTICAST_TTL:
2908 case IP_MULTICAST_LOOP:
2909 val = 0;
2910 if (optlen >= sizeof(uint32_t)) {
2911 if (get_user_u32(val, optval_addr))
2912 return -TARGET_EFAULT;
2913 } else if (optlen >= 1) {
2914 if (get_user_u8(val, optval_addr))
2915 return -TARGET_EFAULT;
2917 ret = get_errno(setsockopt(sockfd, level, optname, &val, sizeof(val)));
2918 break;
2919 case IP_ADD_MEMBERSHIP:
2920 case IP_DROP_MEMBERSHIP:
2921 if (optlen < sizeof (struct target_ip_mreq) ||
2922 optlen > sizeof (struct target_ip_mreqn))
2923 return -TARGET_EINVAL;
2925 ip_mreq = (struct ip_mreqn *) alloca(optlen);
2926 target_to_host_ip_mreq(ip_mreq, optval_addr, optlen);
2927 ret = get_errno(setsockopt(sockfd, level, optname, ip_mreq, optlen));
2928 break;
2930 case IP_BLOCK_SOURCE:
2931 case IP_UNBLOCK_SOURCE:
2932 case IP_ADD_SOURCE_MEMBERSHIP:
2933 case IP_DROP_SOURCE_MEMBERSHIP:
2934 if (optlen != sizeof (struct target_ip_mreq_source))
2935 return -TARGET_EINVAL;
2937 ip_mreq_source = lock_user(VERIFY_READ, optval_addr, optlen, 1);
2938 ret = get_errno(setsockopt(sockfd, level, optname, ip_mreq_source, optlen));
2939 unlock_user (ip_mreq_source, optval_addr, 0);
2940 break;
2942 default:
2943 goto unimplemented;
2945 break;
2946 case SOL_IPV6:
2947 switch (optname) {
2948 case IPV6_MTU_DISCOVER:
2949 case IPV6_MTU:
2950 case IPV6_V6ONLY:
2951 case IPV6_RECVPKTINFO:
2952 case IPV6_UNICAST_HOPS:
2953 case IPV6_RECVERR:
2954 case IPV6_RECVHOPLIMIT:
2955 case IPV6_2292HOPLIMIT:
2956 case IPV6_CHECKSUM:
2957 val = 0;
2958 if (optlen < sizeof(uint32_t)) {
2959 return -TARGET_EINVAL;
2961 if (get_user_u32(val, optval_addr)) {
2962 return -TARGET_EFAULT;
2964 ret = get_errno(setsockopt(sockfd, level, optname,
2965 &val, sizeof(val)));
2966 break;
2967 case IPV6_PKTINFO:
2969 struct in6_pktinfo pki;
2971 if (optlen < sizeof(pki)) {
2972 return -TARGET_EINVAL;
2975 if (copy_from_user(&pki, optval_addr, sizeof(pki))) {
2976 return -TARGET_EFAULT;
2979 pki.ipi6_ifindex = tswap32(pki.ipi6_ifindex);
2981 ret = get_errno(setsockopt(sockfd, level, optname,
2982 &pki, sizeof(pki)));
2983 break;
2985 default:
2986 goto unimplemented;
2988 break;
2989 case SOL_ICMPV6:
2990 switch (optname) {
2991 case ICMPV6_FILTER:
2993 struct icmp6_filter icmp6f;
2995 if (optlen > sizeof(icmp6f)) {
2996 optlen = sizeof(icmp6f);
2999 if (copy_from_user(&icmp6f, optval_addr, optlen)) {
3000 return -TARGET_EFAULT;
3003 for (val = 0; val < 8; val++) {
3004 icmp6f.data[val] = tswap32(icmp6f.data[val]);
3007 ret = get_errno(setsockopt(sockfd, level, optname,
3008 &icmp6f, optlen));
3009 break;
3011 default:
3012 goto unimplemented;
3014 break;
3015 case SOL_RAW:
3016 switch (optname) {
3017 case ICMP_FILTER:
3018 case IPV6_CHECKSUM:
3019 /* those take an u32 value */
3020 if (optlen < sizeof(uint32_t)) {
3021 return -TARGET_EINVAL;
3024 if (get_user_u32(val, optval_addr)) {
3025 return -TARGET_EFAULT;
3027 ret = get_errno(setsockopt(sockfd, level, optname,
3028 &val, sizeof(val)));
3029 break;
3031 default:
3032 goto unimplemented;
3034 break;
3035 case TARGET_SOL_SOCKET:
3036 switch (optname) {
3037 case TARGET_SO_RCVTIMEO:
3039 struct timeval tv;
3041 optname = SO_RCVTIMEO;
3043 set_timeout:
3044 if (optlen != sizeof(struct target_timeval)) {
3045 return -TARGET_EINVAL;
3048 if (copy_from_user_timeval(&tv, optval_addr)) {
3049 return -TARGET_EFAULT;
3052 ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname,
3053 &tv, sizeof(tv)));
3054 return ret;
3056 case TARGET_SO_SNDTIMEO:
3057 optname = SO_SNDTIMEO;
3058 goto set_timeout;
3059 case TARGET_SO_ATTACH_FILTER:
3061 struct target_sock_fprog *tfprog;
3062 struct target_sock_filter *tfilter;
3063 struct sock_fprog fprog;
3064 struct sock_filter *filter;
3065 int i;
3067 if (optlen != sizeof(*tfprog)) {
3068 return -TARGET_EINVAL;
3070 if (!lock_user_struct(VERIFY_READ, tfprog, optval_addr, 0)) {
3071 return -TARGET_EFAULT;
3073 if (!lock_user_struct(VERIFY_READ, tfilter,
3074 tswapal(tfprog->filter), 0)) {
3075 unlock_user_struct(tfprog, optval_addr, 1);
3076 return -TARGET_EFAULT;
3079 fprog.len = tswap16(tfprog->len);
3080 filter = g_try_new(struct sock_filter, fprog.len);
3081 if (filter == NULL) {
3082 unlock_user_struct(tfilter, tfprog->filter, 1);
3083 unlock_user_struct(tfprog, optval_addr, 1);
3084 return -TARGET_ENOMEM;
3086 for (i = 0; i < fprog.len; i++) {
3087 filter[i].code = tswap16(tfilter[i].code);
3088 filter[i].jt = tfilter[i].jt;
3089 filter[i].jf = tfilter[i].jf;
3090 filter[i].k = tswap32(tfilter[i].k);
3092 fprog.filter = filter;
3094 ret = get_errno(setsockopt(sockfd, SOL_SOCKET,
3095 SO_ATTACH_FILTER, &fprog, sizeof(fprog)));
3096 g_free(filter);
3098 unlock_user_struct(tfilter, tfprog->filter, 1);
3099 unlock_user_struct(tfprog, optval_addr, 1);
3100 return ret;
3102 case TARGET_SO_BINDTODEVICE:
3104 char *dev_ifname, *addr_ifname;
3106 if (optlen > IFNAMSIZ - 1) {
3107 optlen = IFNAMSIZ - 1;
3109 dev_ifname = lock_user(VERIFY_READ, optval_addr, optlen, 1);
3110 if (!dev_ifname) {
3111 return -TARGET_EFAULT;
3113 optname = SO_BINDTODEVICE;
3114 addr_ifname = alloca(IFNAMSIZ);
3115 memcpy(addr_ifname, dev_ifname, optlen);
3116 addr_ifname[optlen] = 0;
3117 ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname,
3118 addr_ifname, optlen));
3119 unlock_user (dev_ifname, optval_addr, 0);
3120 return ret;
3122 /* Options with 'int' argument. */
3123 case TARGET_SO_DEBUG:
3124 optname = SO_DEBUG;
3125 break;
3126 case TARGET_SO_REUSEADDR:
3127 optname = SO_REUSEADDR;
3128 break;
3129 case TARGET_SO_TYPE:
3130 optname = SO_TYPE;
3131 break;
3132 case TARGET_SO_ERROR:
3133 optname = SO_ERROR;
3134 break;
3135 case TARGET_SO_DONTROUTE:
3136 optname = SO_DONTROUTE;
3137 break;
3138 case TARGET_SO_BROADCAST:
3139 optname = SO_BROADCAST;
3140 break;
3141 case TARGET_SO_SNDBUF:
3142 optname = SO_SNDBUF;
3143 break;
3144 case TARGET_SO_SNDBUFFORCE:
3145 optname = SO_SNDBUFFORCE;
3146 break;
3147 case TARGET_SO_RCVBUF:
3148 optname = SO_RCVBUF;
3149 break;
3150 case TARGET_SO_RCVBUFFORCE:
3151 optname = SO_RCVBUFFORCE;
3152 break;
3153 case TARGET_SO_KEEPALIVE:
3154 optname = SO_KEEPALIVE;
3155 break;
3156 case TARGET_SO_OOBINLINE:
3157 optname = SO_OOBINLINE;
3158 break;
3159 case TARGET_SO_NO_CHECK:
3160 optname = SO_NO_CHECK;
3161 break;
3162 case TARGET_SO_PRIORITY:
3163 optname = SO_PRIORITY;
3164 break;
3165 #ifdef SO_BSDCOMPAT
3166 case TARGET_SO_BSDCOMPAT:
3167 optname = SO_BSDCOMPAT;
3168 break;
3169 #endif
3170 case TARGET_SO_PASSCRED:
3171 optname = SO_PASSCRED;
3172 break;
3173 case TARGET_SO_PASSSEC:
3174 optname = SO_PASSSEC;
3175 break;
3176 case TARGET_SO_TIMESTAMP:
3177 optname = SO_TIMESTAMP;
3178 break;
3179 case TARGET_SO_RCVLOWAT:
3180 optname = SO_RCVLOWAT;
3181 break;
3182 default:
3183 goto unimplemented;
3185 if (optlen < sizeof(uint32_t))
3186 return -TARGET_EINVAL;
3188 if (get_user_u32(val, optval_addr))
3189 return -TARGET_EFAULT;
3190 ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname, &val, sizeof(val)));
3191 break;
3192 default:
3193 unimplemented:
3194 gemu_log("Unsupported setsockopt level=%d optname=%d\n", level, optname);
3195 ret = -TARGET_ENOPROTOOPT;
3197 return ret;
3200 /* do_getsockopt() Must return target values and target errnos. */
3201 static abi_long do_getsockopt(int sockfd, int level, int optname,
3202 abi_ulong optval_addr, abi_ulong optlen)
3204 abi_long ret;
3205 int len, val;
3206 socklen_t lv;
3208 switch(level) {
3209 case TARGET_SOL_SOCKET:
3210 level = SOL_SOCKET;
3211 switch (optname) {
3212 /* These don't just return a single integer */
3213 case TARGET_SO_LINGER:
3214 case TARGET_SO_RCVTIMEO:
3215 case TARGET_SO_SNDTIMEO:
3216 case TARGET_SO_PEERNAME:
3217 goto unimplemented;
3218 case TARGET_SO_PEERCRED: {
3219 struct ucred cr;
3220 socklen_t crlen;
3221 struct target_ucred *tcr;
3223 if (get_user_u32(len, optlen)) {
3224 return -TARGET_EFAULT;
3226 if (len < 0) {
3227 return -TARGET_EINVAL;
3230 crlen = sizeof(cr);
3231 ret = get_errno(getsockopt(sockfd, level, SO_PEERCRED,
3232 &cr, &crlen));
3233 if (ret < 0) {
3234 return ret;
3236 if (len > crlen) {
3237 len = crlen;
3239 if (!lock_user_struct(VERIFY_WRITE, tcr, optval_addr, 0)) {
3240 return -TARGET_EFAULT;
3242 __put_user(cr.pid, &tcr->pid);
3243 __put_user(cr.uid, &tcr->uid);
3244 __put_user(cr.gid, &tcr->gid);
3245 unlock_user_struct(tcr, optval_addr, 1);
3246 if (put_user_u32(len, optlen)) {
3247 return -TARGET_EFAULT;
3249 break;
3251 /* Options with 'int' argument. */
3252 case TARGET_SO_DEBUG:
3253 optname = SO_DEBUG;
3254 goto int_case;
3255 case TARGET_SO_REUSEADDR:
3256 optname = SO_REUSEADDR;
3257 goto int_case;
3258 case TARGET_SO_TYPE:
3259 optname = SO_TYPE;
3260 goto int_case;
3261 case TARGET_SO_ERROR:
3262 optname = SO_ERROR;
3263 goto int_case;
3264 case TARGET_SO_DONTROUTE:
3265 optname = SO_DONTROUTE;
3266 goto int_case;
3267 case TARGET_SO_BROADCAST:
3268 optname = SO_BROADCAST;
3269 goto int_case;
3270 case TARGET_SO_SNDBUF:
3271 optname = SO_SNDBUF;
3272 goto int_case;
3273 case TARGET_SO_RCVBUF:
3274 optname = SO_RCVBUF;
3275 goto int_case;
3276 case TARGET_SO_KEEPALIVE:
3277 optname = SO_KEEPALIVE;
3278 goto int_case;
3279 case TARGET_SO_OOBINLINE:
3280 optname = SO_OOBINLINE;
3281 goto int_case;
3282 case TARGET_SO_NO_CHECK:
3283 optname = SO_NO_CHECK;
3284 goto int_case;
3285 case TARGET_SO_PRIORITY:
3286 optname = SO_PRIORITY;
3287 goto int_case;
3288 #ifdef SO_BSDCOMPAT
3289 case TARGET_SO_BSDCOMPAT:
3290 optname = SO_BSDCOMPAT;
3291 goto int_case;
3292 #endif
3293 case TARGET_SO_PASSCRED:
3294 optname = SO_PASSCRED;
3295 goto int_case;
3296 case TARGET_SO_TIMESTAMP:
3297 optname = SO_TIMESTAMP;
3298 goto int_case;
3299 case TARGET_SO_RCVLOWAT:
3300 optname = SO_RCVLOWAT;
3301 goto int_case;
3302 case TARGET_SO_ACCEPTCONN:
3303 optname = SO_ACCEPTCONN;
3304 goto int_case;
3305 default:
3306 goto int_case;
3308 break;
3309 case SOL_TCP:
3310 /* TCP options all take an 'int' value. */
3311 int_case:
3312 if (get_user_u32(len, optlen))
3313 return -TARGET_EFAULT;
3314 if (len < 0)
3315 return -TARGET_EINVAL;
3316 lv = sizeof(lv);
3317 ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
3318 if (ret < 0)
3319 return ret;
3320 if (optname == SO_TYPE) {
3321 val = host_to_target_sock_type(val);
3323 if (len > lv)
3324 len = lv;
3325 if (len == 4) {
3326 if (put_user_u32(val, optval_addr))
3327 return -TARGET_EFAULT;
3328 } else {
3329 if (put_user_u8(val, optval_addr))
3330 return -TARGET_EFAULT;
3332 if (put_user_u32(len, optlen))
3333 return -TARGET_EFAULT;
3334 break;
3335 case SOL_IP:
3336 switch(optname) {
3337 case IP_TOS:
3338 case IP_TTL:
3339 case IP_HDRINCL:
3340 case IP_ROUTER_ALERT:
3341 case IP_RECVOPTS:
3342 case IP_RETOPTS:
3343 case IP_PKTINFO:
3344 case IP_MTU_DISCOVER:
3345 case IP_RECVERR:
3346 case IP_RECVTOS:
3347 #ifdef IP_FREEBIND
3348 case IP_FREEBIND:
3349 #endif
3350 case IP_MULTICAST_TTL:
3351 case IP_MULTICAST_LOOP:
3352 if (get_user_u32(len, optlen))
3353 return -TARGET_EFAULT;
3354 if (len < 0)
3355 return -TARGET_EINVAL;
3356 lv = sizeof(lv);
3357 ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
3358 if (ret < 0)
3359 return ret;
3360 if (len < sizeof(int) && len > 0 && val >= 0 && val < 255) {
3361 len = 1;
3362 if (put_user_u32(len, optlen)
3363 || put_user_u8(val, optval_addr))
3364 return -TARGET_EFAULT;
3365 } else {
3366 if (len > sizeof(int))
3367 len = sizeof(int);
3368 if (put_user_u32(len, optlen)
3369 || put_user_u32(val, optval_addr))
3370 return -TARGET_EFAULT;
3372 break;
3373 default:
3374 ret = -TARGET_ENOPROTOOPT;
3375 break;
3377 break;
3378 default:
3379 unimplemented:
3380 gemu_log("getsockopt level=%d optname=%d not yet supported\n",
3381 level, optname);
3382 ret = -TARGET_EOPNOTSUPP;
3383 break;
3385 return ret;
3388 static struct iovec *lock_iovec(int type, abi_ulong target_addr,
3389 abi_ulong count, int copy)
3391 struct target_iovec *target_vec;
3392 struct iovec *vec;
3393 abi_ulong total_len, max_len;
3394 int i;
3395 int err = 0;
3396 bool bad_address = false;
3398 if (count == 0) {
3399 errno = 0;
3400 return NULL;
3402 if (count > IOV_MAX) {
3403 errno = EINVAL;
3404 return NULL;
3407 vec = g_try_new0(struct iovec, count);
3408 if (vec == NULL) {
3409 errno = ENOMEM;
3410 return NULL;
3413 target_vec = lock_user(VERIFY_READ, target_addr,
3414 count * sizeof(struct target_iovec), 1);
3415 if (target_vec == NULL) {
3416 err = EFAULT;
3417 goto fail2;
3420 /* ??? If host page size > target page size, this will result in a
3421 value larger than what we can actually support. */
3422 max_len = 0x7fffffff & TARGET_PAGE_MASK;
3423 total_len = 0;
3425 for (i = 0; i < count; i++) {
3426 abi_ulong base = tswapal(target_vec[i].iov_base);
3427 abi_long len = tswapal(target_vec[i].iov_len);
3429 if (len < 0) {
3430 err = EINVAL;
3431 goto fail;
3432 } else if (len == 0) {
3433 /* Zero length pointer is ignored. */
3434 vec[i].iov_base = 0;
3435 } else {
3436 vec[i].iov_base = lock_user(type, base, len, copy);
3437 /* If the first buffer pointer is bad, this is a fault. But
3438 * subsequent bad buffers will result in a partial write; this
3439 * is realized by filling the vector with null pointers and
3440 * zero lengths. */
3441 if (!vec[i].iov_base) {
3442 if (i == 0) {
3443 err = EFAULT;
3444 goto fail;
3445 } else {
3446 bad_address = true;
3449 if (bad_address) {
3450 len = 0;
3452 if (len > max_len - total_len) {
3453 len = max_len - total_len;
3456 vec[i].iov_len = len;
3457 total_len += len;
3460 unlock_user(target_vec, target_addr, 0);
3461 return vec;
3463 fail:
3464 while (--i >= 0) {
3465 if (tswapal(target_vec[i].iov_len) > 0) {
3466 unlock_user(vec[i].iov_base, tswapal(target_vec[i].iov_base), 0);
3469 unlock_user(target_vec, target_addr, 0);
3470 fail2:
3471 g_free(vec);
3472 errno = err;
3473 return NULL;
3476 static void unlock_iovec(struct iovec *vec, abi_ulong target_addr,
3477 abi_ulong count, int copy)
3479 struct target_iovec *target_vec;
3480 int i;
3482 target_vec = lock_user(VERIFY_READ, target_addr,
3483 count * sizeof(struct target_iovec), 1);
3484 if (target_vec) {
3485 for (i = 0; i < count; i++) {
3486 abi_ulong base = tswapal(target_vec[i].iov_base);
3487 abi_long len = tswapal(target_vec[i].iov_len);
3488 if (len < 0) {
3489 break;
3491 unlock_user(vec[i].iov_base, base, copy ? vec[i].iov_len : 0);
3493 unlock_user(target_vec, target_addr, 0);
3496 g_free(vec);
3499 static inline int target_to_host_sock_type(int *type)
3501 int host_type = 0;
3502 int target_type = *type;
3504 switch (target_type & TARGET_SOCK_TYPE_MASK) {
3505 case TARGET_SOCK_DGRAM:
3506 host_type = SOCK_DGRAM;
3507 break;
3508 case TARGET_SOCK_STREAM:
3509 host_type = SOCK_STREAM;
3510 break;
3511 default:
3512 host_type = target_type & TARGET_SOCK_TYPE_MASK;
3513 break;
3515 if (target_type & TARGET_SOCK_CLOEXEC) {
3516 #if defined(SOCK_CLOEXEC)
3517 host_type |= SOCK_CLOEXEC;
3518 #else
3519 return -TARGET_EINVAL;
3520 #endif
3522 if (target_type & TARGET_SOCK_NONBLOCK) {
3523 #if defined(SOCK_NONBLOCK)
3524 host_type |= SOCK_NONBLOCK;
3525 #elif !defined(O_NONBLOCK)
3526 return -TARGET_EINVAL;
3527 #endif
3529 *type = host_type;
3530 return 0;
3533 /* Try to emulate socket type flags after socket creation. */
3534 static int sock_flags_fixup(int fd, int target_type)
3536 #if !defined(SOCK_NONBLOCK) && defined(O_NONBLOCK)
3537 if (target_type & TARGET_SOCK_NONBLOCK) {
3538 int flags = fcntl(fd, F_GETFL);
3539 if (fcntl(fd, F_SETFL, O_NONBLOCK | flags) == -1) {
3540 close(fd);
3541 return -TARGET_EINVAL;
3544 #endif
3545 return fd;
3548 static abi_long packet_target_to_host_sockaddr(void *host_addr,
3549 abi_ulong target_addr,
3550 socklen_t len)
3552 struct sockaddr *addr = host_addr;
3553 struct target_sockaddr *target_saddr;
3555 target_saddr = lock_user(VERIFY_READ, target_addr, len, 1);
3556 if (!target_saddr) {
3557 return -TARGET_EFAULT;
3560 memcpy(addr, target_saddr, len);
3561 addr->sa_family = tswap16(target_saddr->sa_family);
3562 /* spkt_protocol is big-endian */
3564 unlock_user(target_saddr, target_addr, 0);
3565 return 0;
3568 static TargetFdTrans target_packet_trans = {
3569 .target_to_host_addr = packet_target_to_host_sockaddr,
3572 #ifdef CONFIG_RTNETLINK
3573 static abi_long netlink_route_target_to_host(void *buf, size_t len)
3575 abi_long ret;
3577 ret = target_to_host_nlmsg_route(buf, len);
3578 if (ret < 0) {
3579 return ret;
3582 return len;
3585 static abi_long netlink_route_host_to_target(void *buf, size_t len)
3587 abi_long ret;
3589 ret = host_to_target_nlmsg_route(buf, len);
3590 if (ret < 0) {
3591 return ret;
3594 return len;
3597 static TargetFdTrans target_netlink_route_trans = {
3598 .target_to_host_data = netlink_route_target_to_host,
3599 .host_to_target_data = netlink_route_host_to_target,
3601 #endif /* CONFIG_RTNETLINK */
3603 static abi_long netlink_audit_target_to_host(void *buf, size_t len)
3605 abi_long ret;
3607 ret = target_to_host_nlmsg_audit(buf, len);
3608 if (ret < 0) {
3609 return ret;
3612 return len;
3615 static abi_long netlink_audit_host_to_target(void *buf, size_t len)
3617 abi_long ret;
3619 ret = host_to_target_nlmsg_audit(buf, len);
3620 if (ret < 0) {
3621 return ret;
3624 return len;
3627 static TargetFdTrans target_netlink_audit_trans = {
3628 .target_to_host_data = netlink_audit_target_to_host,
3629 .host_to_target_data = netlink_audit_host_to_target,
3632 /* do_socket() Must return target values and target errnos. */
3633 static abi_long do_socket(int domain, int type, int protocol)
3635 int target_type = type;
3636 int ret;
3638 ret = target_to_host_sock_type(&type);
3639 if (ret) {
3640 return ret;
3643 if (domain == PF_NETLINK && !(
3644 #ifdef CONFIG_RTNETLINK
3645 protocol == NETLINK_ROUTE ||
3646 #endif
3647 protocol == NETLINK_KOBJECT_UEVENT ||
3648 protocol == NETLINK_AUDIT)) {
3649 return -EPFNOSUPPORT;
3652 if (domain == AF_PACKET ||
3653 (domain == AF_INET && type == SOCK_PACKET)) {
3654 protocol = tswap16(protocol);
3657 ret = get_errno(socket(domain, type, protocol));
3658 if (ret >= 0) {
3659 ret = sock_flags_fixup(ret, target_type);
3660 if (type == SOCK_PACKET) {
3661 /* Manage an obsolete case :
3662 * if socket type is SOCK_PACKET, bind by name
3664 fd_trans_register(ret, &target_packet_trans);
3665 } else if (domain == PF_NETLINK) {
3666 switch (protocol) {
3667 #ifdef CONFIG_RTNETLINK
3668 case NETLINK_ROUTE:
3669 fd_trans_register(ret, &target_netlink_route_trans);
3670 break;
3671 #endif
3672 case NETLINK_KOBJECT_UEVENT:
3673 /* nothing to do: messages are strings */
3674 break;
3675 case NETLINK_AUDIT:
3676 fd_trans_register(ret, &target_netlink_audit_trans);
3677 break;
3678 default:
3679 g_assert_not_reached();
3683 return ret;
3686 /* do_bind() Must return target values and target errnos. */
3687 static abi_long do_bind(int sockfd, abi_ulong target_addr,
3688 socklen_t addrlen)
3690 void *addr;
3691 abi_long ret;
3693 if ((int)addrlen < 0) {
3694 return -TARGET_EINVAL;
3697 addr = alloca(addrlen+1);
3699 ret = target_to_host_sockaddr(sockfd, addr, target_addr, addrlen);
3700 if (ret)
3701 return ret;
3703 return get_errno(bind(sockfd, addr, addrlen));
3706 /* do_connect() Must return target values and target errnos. */
3707 static abi_long do_connect(int sockfd, abi_ulong target_addr,
3708 socklen_t addrlen)
3710 void *addr;
3711 abi_long ret;
3713 if ((int)addrlen < 0) {
3714 return -TARGET_EINVAL;
3717 addr = alloca(addrlen+1);
3719 ret = target_to_host_sockaddr(sockfd, addr, target_addr, addrlen);
3720 if (ret)
3721 return ret;
3723 return get_errno(safe_connect(sockfd, addr, addrlen));
3726 /* do_sendrecvmsg_locked() Must return target values and target errnos. */
3727 static abi_long do_sendrecvmsg_locked(int fd, struct target_msghdr *msgp,
3728 int flags, int send)
3730 abi_long ret, len;
3731 struct msghdr msg;
3732 abi_ulong count;
3733 struct iovec *vec;
3734 abi_ulong target_vec;
3736 if (msgp->msg_name) {
3737 msg.msg_namelen = tswap32(msgp->msg_namelen);
3738 msg.msg_name = alloca(msg.msg_namelen+1);
3739 ret = target_to_host_sockaddr(fd, msg.msg_name,
3740 tswapal(msgp->msg_name),
3741 msg.msg_namelen);
3742 if (ret == -TARGET_EFAULT) {
3743 /* For connected sockets msg_name and msg_namelen must
3744 * be ignored, so returning EFAULT immediately is wrong.
3745 * Instead, pass a bad msg_name to the host kernel, and
3746 * let it decide whether to return EFAULT or not.
3748 msg.msg_name = (void *)-1;
3749 } else if (ret) {
3750 goto out2;
3752 } else {
3753 msg.msg_name = NULL;
3754 msg.msg_namelen = 0;
3756 msg.msg_controllen = 2 * tswapal(msgp->msg_controllen);
3757 msg.msg_control = alloca(msg.msg_controllen);
3758 msg.msg_flags = tswap32(msgp->msg_flags);
3760 count = tswapal(msgp->msg_iovlen);
3761 target_vec = tswapal(msgp->msg_iov);
3763 if (count > IOV_MAX) {
3764 /* sendrcvmsg returns a different errno for this condition than
3765 * readv/writev, so we must catch it here before lock_iovec() does.
3767 ret = -TARGET_EMSGSIZE;
3768 goto out2;
3771 vec = lock_iovec(send ? VERIFY_READ : VERIFY_WRITE,
3772 target_vec, count, send);
3773 if (vec == NULL) {
3774 ret = -host_to_target_errno(errno);
3775 goto out2;
3777 msg.msg_iovlen = count;
3778 msg.msg_iov = vec;
3780 if (send) {
3781 if (fd_trans_target_to_host_data(fd)) {
3782 void *host_msg;
3784 host_msg = g_malloc(msg.msg_iov->iov_len);
3785 memcpy(host_msg, msg.msg_iov->iov_base, msg.msg_iov->iov_len);
3786 ret = fd_trans_target_to_host_data(fd)(host_msg,
3787 msg.msg_iov->iov_len);
3788 if (ret >= 0) {
3789 msg.msg_iov->iov_base = host_msg;
3790 ret = get_errno(safe_sendmsg(fd, &msg, flags));
3792 g_free(host_msg);
3793 } else {
3794 ret = target_to_host_cmsg(&msg, msgp);
3795 if (ret == 0) {
3796 ret = get_errno(safe_sendmsg(fd, &msg, flags));
3799 } else {
3800 ret = get_errno(safe_recvmsg(fd, &msg, flags));
3801 if (!is_error(ret)) {
3802 len = ret;
3803 if (fd_trans_host_to_target_data(fd)) {
3804 ret = fd_trans_host_to_target_data(fd)(msg.msg_iov->iov_base,
3805 len);
3806 } else {
3807 ret = host_to_target_cmsg(msgp, &msg);
3809 if (!is_error(ret)) {
3810 msgp->msg_namelen = tswap32(msg.msg_namelen);
3811 if (msg.msg_name != NULL && msg.msg_name != (void *)-1) {
3812 ret = host_to_target_sockaddr(tswapal(msgp->msg_name),
3813 msg.msg_name, msg.msg_namelen);
3814 if (ret) {
3815 goto out;
3819 ret = len;
3824 out:
3825 unlock_iovec(vec, target_vec, count, !send);
3826 out2:
3827 return ret;
3830 static abi_long do_sendrecvmsg(int fd, abi_ulong target_msg,
3831 int flags, int send)
3833 abi_long ret;
3834 struct target_msghdr *msgp;
3836 if (!lock_user_struct(send ? VERIFY_READ : VERIFY_WRITE,
3837 msgp,
3838 target_msg,
3839 send ? 1 : 0)) {
3840 return -TARGET_EFAULT;
3842 ret = do_sendrecvmsg_locked(fd, msgp, flags, send);
3843 unlock_user_struct(msgp, target_msg, send ? 0 : 1);
3844 return ret;
3847 /* We don't rely on the C library to have sendmmsg/recvmmsg support,
3848 * so it might not have this *mmsg-specific flag either.
3850 #ifndef MSG_WAITFORONE
3851 #define MSG_WAITFORONE 0x10000
3852 #endif
3854 static abi_long do_sendrecvmmsg(int fd, abi_ulong target_msgvec,
3855 unsigned int vlen, unsigned int flags,
3856 int send)
3858 struct target_mmsghdr *mmsgp;
3859 abi_long ret = 0;
3860 int i;
3862 if (vlen > UIO_MAXIOV) {
3863 vlen = UIO_MAXIOV;
3866 mmsgp = lock_user(VERIFY_WRITE, target_msgvec, sizeof(*mmsgp) * vlen, 1);
3867 if (!mmsgp) {
3868 return -TARGET_EFAULT;
3871 for (i = 0; i < vlen; i++) {
3872 ret = do_sendrecvmsg_locked(fd, &mmsgp[i].msg_hdr, flags, send);
3873 if (is_error(ret)) {
3874 break;
3876 mmsgp[i].msg_len = tswap32(ret);
3877 /* MSG_WAITFORONE turns on MSG_DONTWAIT after one packet */
3878 if (flags & MSG_WAITFORONE) {
3879 flags |= MSG_DONTWAIT;
3883 unlock_user(mmsgp, target_msgvec, sizeof(*mmsgp) * i);
3885 /* Return number of datagrams sent if we sent any at all;
3886 * otherwise return the error.
3888 if (i) {
3889 return i;
3891 return ret;
3894 /* do_accept4() Must return target values and target errnos. */
3895 static abi_long do_accept4(int fd, abi_ulong target_addr,
3896 abi_ulong target_addrlen_addr, int flags)
3898 socklen_t addrlen;
3899 void *addr;
3900 abi_long ret;
3901 int host_flags;
3903 host_flags = target_to_host_bitmask(flags, fcntl_flags_tbl);
3905 if (target_addr == 0) {
3906 return get_errno(safe_accept4(fd, NULL, NULL, host_flags));
3909 /* linux returns EINVAL if addrlen pointer is invalid */
3910 if (get_user_u32(addrlen, target_addrlen_addr))
3911 return -TARGET_EINVAL;
3913 if ((int)addrlen < 0) {
3914 return -TARGET_EINVAL;
3917 if (!access_ok(VERIFY_WRITE, target_addr, addrlen))
3918 return -TARGET_EINVAL;
3920 addr = alloca(addrlen);
3922 ret = get_errno(safe_accept4(fd, addr, &addrlen, host_flags));
3923 if (!is_error(ret)) {
3924 host_to_target_sockaddr(target_addr, addr, addrlen);
3925 if (put_user_u32(addrlen, target_addrlen_addr))
3926 ret = -TARGET_EFAULT;
3928 return ret;
3931 /* do_getpeername() Must return target values and target errnos. */
3932 static abi_long do_getpeername(int fd, abi_ulong target_addr,
3933 abi_ulong target_addrlen_addr)
3935 socklen_t addrlen;
3936 void *addr;
3937 abi_long ret;
3939 if (get_user_u32(addrlen, target_addrlen_addr))
3940 return -TARGET_EFAULT;
3942 if ((int)addrlen < 0) {
3943 return -TARGET_EINVAL;
3946 if (!access_ok(VERIFY_WRITE, target_addr, addrlen))
3947 return -TARGET_EFAULT;
3949 addr = alloca(addrlen);
3951 ret = get_errno(getpeername(fd, addr, &addrlen));
3952 if (!is_error(ret)) {
3953 host_to_target_sockaddr(target_addr, addr, addrlen);
3954 if (put_user_u32(addrlen, target_addrlen_addr))
3955 ret = -TARGET_EFAULT;
3957 return ret;
3960 /* do_getsockname() Must return target values and target errnos. */
3961 static abi_long do_getsockname(int fd, abi_ulong target_addr,
3962 abi_ulong target_addrlen_addr)
3964 socklen_t addrlen;
3965 void *addr;
3966 abi_long ret;
3968 if (get_user_u32(addrlen, target_addrlen_addr))
3969 return -TARGET_EFAULT;
3971 if ((int)addrlen < 0) {
3972 return -TARGET_EINVAL;
3975 if (!access_ok(VERIFY_WRITE, target_addr, addrlen))
3976 return -TARGET_EFAULT;
3978 addr = alloca(addrlen);
3980 ret = get_errno(getsockname(fd, addr, &addrlen));
3981 if (!is_error(ret)) {
3982 host_to_target_sockaddr(target_addr, addr, addrlen);
3983 if (put_user_u32(addrlen, target_addrlen_addr))
3984 ret = -TARGET_EFAULT;
3986 return ret;
3989 /* do_socketpair() Must return target values and target errnos. */
3990 static abi_long do_socketpair(int domain, int type, int protocol,
3991 abi_ulong target_tab_addr)
3993 int tab[2];
3994 abi_long ret;
3996 target_to_host_sock_type(&type);
3998 ret = get_errno(socketpair(domain, type, protocol, tab));
3999 if (!is_error(ret)) {
4000 if (put_user_s32(tab[0], target_tab_addr)
4001 || put_user_s32(tab[1], target_tab_addr + sizeof(tab[0])))
4002 ret = -TARGET_EFAULT;
4004 return ret;
4007 /* do_sendto() Must return target values and target errnos. */
4008 static abi_long do_sendto(int fd, abi_ulong msg, size_t len, int flags,
4009 abi_ulong target_addr, socklen_t addrlen)
4011 void *addr;
4012 void *host_msg;
4013 void *copy_msg = NULL;
4014 abi_long ret;
4016 if ((int)addrlen < 0) {
4017 return -TARGET_EINVAL;
4020 host_msg = lock_user(VERIFY_READ, msg, len, 1);
4021 if (!host_msg)
4022 return -TARGET_EFAULT;
4023 if (fd_trans_target_to_host_data(fd)) {
4024 copy_msg = host_msg;
4025 host_msg = g_malloc(len);
4026 memcpy(host_msg, copy_msg, len);
4027 ret = fd_trans_target_to_host_data(fd)(host_msg, len);
4028 if (ret < 0) {
4029 goto fail;
4032 if (target_addr) {
4033 addr = alloca(addrlen+1);
4034 ret = target_to_host_sockaddr(fd, addr, target_addr, addrlen);
4035 if (ret) {
4036 goto fail;
4038 ret = get_errno(safe_sendto(fd, host_msg, len, flags, addr, addrlen));
4039 } else {
4040 ret = get_errno(safe_sendto(fd, host_msg, len, flags, NULL, 0));
4042 fail:
4043 if (copy_msg) {
4044 g_free(host_msg);
4045 host_msg = copy_msg;
4047 unlock_user(host_msg, msg, 0);
4048 return ret;
4051 /* do_recvfrom() Must return target values and target errnos. */
4052 static abi_long do_recvfrom(int fd, abi_ulong msg, size_t len, int flags,
4053 abi_ulong target_addr,
4054 abi_ulong target_addrlen)
4056 socklen_t addrlen;
4057 void *addr;
4058 void *host_msg;
4059 abi_long ret;
4061 host_msg = lock_user(VERIFY_WRITE, msg, len, 0);
4062 if (!host_msg)
4063 return -TARGET_EFAULT;
4064 if (target_addr) {
4065 if (get_user_u32(addrlen, target_addrlen)) {
4066 ret = -TARGET_EFAULT;
4067 goto fail;
4069 if ((int)addrlen < 0) {
4070 ret = -TARGET_EINVAL;
4071 goto fail;
4073 addr = alloca(addrlen);
4074 ret = get_errno(safe_recvfrom(fd, host_msg, len, flags,
4075 addr, &addrlen));
4076 } else {
4077 addr = NULL; /* To keep compiler quiet. */
4078 ret = get_errno(safe_recvfrom(fd, host_msg, len, flags, NULL, 0));
4080 if (!is_error(ret)) {
4081 if (fd_trans_host_to_target_data(fd)) {
4082 ret = fd_trans_host_to_target_data(fd)(host_msg, ret);
4084 if (target_addr) {
4085 host_to_target_sockaddr(target_addr, addr, addrlen);
4086 if (put_user_u32(addrlen, target_addrlen)) {
4087 ret = -TARGET_EFAULT;
4088 goto fail;
4091 unlock_user(host_msg, msg, len);
4092 } else {
4093 fail:
4094 unlock_user(host_msg, msg, 0);
4096 return ret;
4099 #ifdef TARGET_NR_socketcall
4100 /* do_socketcall() must return target values and target errnos. */
4101 static abi_long do_socketcall(int num, abi_ulong vptr)
4103 static const unsigned nargs[] = { /* number of arguments per operation */
4104 [TARGET_SYS_SOCKET] = 3, /* domain, type, protocol */
4105 [TARGET_SYS_BIND] = 3, /* fd, addr, addrlen */
4106 [TARGET_SYS_CONNECT] = 3, /* fd, addr, addrlen */
4107 [TARGET_SYS_LISTEN] = 2, /* fd, backlog */
4108 [TARGET_SYS_ACCEPT] = 3, /* fd, addr, addrlen */
4109 [TARGET_SYS_GETSOCKNAME] = 3, /* fd, addr, addrlen */
4110 [TARGET_SYS_GETPEERNAME] = 3, /* fd, addr, addrlen */
4111 [TARGET_SYS_SOCKETPAIR] = 4, /* domain, type, protocol, tab */
4112 [TARGET_SYS_SEND] = 4, /* fd, msg, len, flags */
4113 [TARGET_SYS_RECV] = 4, /* fd, msg, len, flags */
4114 [TARGET_SYS_SENDTO] = 6, /* fd, msg, len, flags, addr, addrlen */
4115 [TARGET_SYS_RECVFROM] = 6, /* fd, msg, len, flags, addr, addrlen */
4116 [TARGET_SYS_SHUTDOWN] = 2, /* fd, how */
4117 [TARGET_SYS_SETSOCKOPT] = 5, /* fd, level, optname, optval, optlen */
4118 [TARGET_SYS_GETSOCKOPT] = 5, /* fd, level, optname, optval, optlen */
4119 [TARGET_SYS_SENDMSG] = 3, /* fd, msg, flags */
4120 [TARGET_SYS_RECVMSG] = 3, /* fd, msg, flags */
4121 [TARGET_SYS_ACCEPT4] = 4, /* fd, addr, addrlen, flags */
4122 [TARGET_SYS_RECVMMSG] = 4, /* fd, msgvec, vlen, flags */
4123 [TARGET_SYS_SENDMMSG] = 4, /* fd, msgvec, vlen, flags */
4125 abi_long a[6]; /* max 6 args */
4126 unsigned i;
4128 /* check the range of the first argument num */
4129 /* (TARGET_SYS_SENDMMSG is the highest among TARGET_SYS_xxx) */
4130 if (num < 1 || num > TARGET_SYS_SENDMMSG) {
4131 return -TARGET_EINVAL;
4133 /* ensure we have space for args */
4134 if (nargs[num] > ARRAY_SIZE(a)) {
4135 return -TARGET_EINVAL;
4137 /* collect the arguments in a[] according to nargs[] */
4138 for (i = 0; i < nargs[num]; ++i) {
4139 if (get_user_ual(a[i], vptr + i * sizeof(abi_long)) != 0) {
4140 return -TARGET_EFAULT;
4143 /* now when we have the args, invoke the appropriate underlying function */
4144 switch (num) {
4145 case TARGET_SYS_SOCKET: /* domain, type, protocol */
4146 return do_socket(a[0], a[1], a[2]);
4147 case TARGET_SYS_BIND: /* sockfd, addr, addrlen */
4148 return do_bind(a[0], a[1], a[2]);
4149 case TARGET_SYS_CONNECT: /* sockfd, addr, addrlen */
4150 return do_connect(a[0], a[1], a[2]);
4151 case TARGET_SYS_LISTEN: /* sockfd, backlog */
4152 return get_errno(listen(a[0], a[1]));
4153 case TARGET_SYS_ACCEPT: /* sockfd, addr, addrlen */
4154 return do_accept4(a[0], a[1], a[2], 0);
4155 case TARGET_SYS_GETSOCKNAME: /* sockfd, addr, addrlen */
4156 return do_getsockname(a[0], a[1], a[2]);
4157 case TARGET_SYS_GETPEERNAME: /* sockfd, addr, addrlen */
4158 return do_getpeername(a[0], a[1], a[2]);
4159 case TARGET_SYS_SOCKETPAIR: /* domain, type, protocol, tab */
4160 return do_socketpair(a[0], a[1], a[2], a[3]);
4161 case TARGET_SYS_SEND: /* sockfd, msg, len, flags */
4162 return do_sendto(a[0], a[1], a[2], a[3], 0, 0);
4163 case TARGET_SYS_RECV: /* sockfd, msg, len, flags */
4164 return do_recvfrom(a[0], a[1], a[2], a[3], 0, 0);
4165 case TARGET_SYS_SENDTO: /* sockfd, msg, len, flags, addr, addrlen */
4166 return do_sendto(a[0], a[1], a[2], a[3], a[4], a[5]);
4167 case TARGET_SYS_RECVFROM: /* sockfd, msg, len, flags, addr, addrlen */
4168 return do_recvfrom(a[0], a[1], a[2], a[3], a[4], a[5]);
4169 case TARGET_SYS_SHUTDOWN: /* sockfd, how */
4170 return get_errno(shutdown(a[0], a[1]));
4171 case TARGET_SYS_SETSOCKOPT: /* sockfd, level, optname, optval, optlen */
4172 return do_setsockopt(a[0], a[1], a[2], a[3], a[4]);
4173 case TARGET_SYS_GETSOCKOPT: /* sockfd, level, optname, optval, optlen */
4174 return do_getsockopt(a[0], a[1], a[2], a[3], a[4]);
4175 case TARGET_SYS_SENDMSG: /* sockfd, msg, flags */
4176 return do_sendrecvmsg(a[0], a[1], a[2], 1);
4177 case TARGET_SYS_RECVMSG: /* sockfd, msg, flags */
4178 return do_sendrecvmsg(a[0], a[1], a[2], 0);
4179 case TARGET_SYS_ACCEPT4: /* sockfd, addr, addrlen, flags */
4180 return do_accept4(a[0], a[1], a[2], a[3]);
4181 case TARGET_SYS_RECVMMSG: /* sockfd, msgvec, vlen, flags */
4182 return do_sendrecvmmsg(a[0], a[1], a[2], a[3], 0);
4183 case TARGET_SYS_SENDMMSG: /* sockfd, msgvec, vlen, flags */
4184 return do_sendrecvmmsg(a[0], a[1], a[2], a[3], 1);
4185 default:
4186 gemu_log("Unsupported socketcall: %d\n", num);
4187 return -TARGET_EINVAL;
4190 #endif
4192 #define N_SHM_REGIONS 32
4194 static struct shm_region {
4195 abi_ulong start;
4196 abi_ulong size;
4197 bool in_use;
4198 } shm_regions[N_SHM_REGIONS];
4200 #ifndef TARGET_SEMID64_DS
4201 /* asm-generic version of this struct */
4202 struct target_semid64_ds
4204 struct target_ipc_perm sem_perm;
4205 abi_ulong sem_otime;
4206 #if TARGET_ABI_BITS == 32
4207 abi_ulong __unused1;
4208 #endif
4209 abi_ulong sem_ctime;
4210 #if TARGET_ABI_BITS == 32
4211 abi_ulong __unused2;
4212 #endif
4213 abi_ulong sem_nsems;
4214 abi_ulong __unused3;
4215 abi_ulong __unused4;
4217 #endif
4219 static inline abi_long target_to_host_ipc_perm(struct ipc_perm *host_ip,
4220 abi_ulong target_addr)
4222 struct target_ipc_perm *target_ip;
4223 struct target_semid64_ds *target_sd;
4225 if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
4226 return -TARGET_EFAULT;
4227 target_ip = &(target_sd->sem_perm);
4228 host_ip->__key = tswap32(target_ip->__key);
4229 host_ip->uid = tswap32(target_ip->uid);
4230 host_ip->gid = tswap32(target_ip->gid);
4231 host_ip->cuid = tswap32(target_ip->cuid);
4232 host_ip->cgid = tswap32(target_ip->cgid);
4233 #if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_PPC)
4234 host_ip->mode = tswap32(target_ip->mode);
4235 #else
4236 host_ip->mode = tswap16(target_ip->mode);
4237 #endif
4238 #if defined(TARGET_PPC)
4239 host_ip->__seq = tswap32(target_ip->__seq);
4240 #else
4241 host_ip->__seq = tswap16(target_ip->__seq);
4242 #endif
4243 unlock_user_struct(target_sd, target_addr, 0);
4244 return 0;
4247 static inline abi_long host_to_target_ipc_perm(abi_ulong target_addr,
4248 struct ipc_perm *host_ip)
4250 struct target_ipc_perm *target_ip;
4251 struct target_semid64_ds *target_sd;
4253 if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
4254 return -TARGET_EFAULT;
4255 target_ip = &(target_sd->sem_perm);
4256 target_ip->__key = tswap32(host_ip->__key);
4257 target_ip->uid = tswap32(host_ip->uid);
4258 target_ip->gid = tswap32(host_ip->gid);
4259 target_ip->cuid = tswap32(host_ip->cuid);
4260 target_ip->cgid = tswap32(host_ip->cgid);
4261 #if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_PPC)
4262 target_ip->mode = tswap32(host_ip->mode);
4263 #else
4264 target_ip->mode = tswap16(host_ip->mode);
4265 #endif
4266 #if defined(TARGET_PPC)
4267 target_ip->__seq = tswap32(host_ip->__seq);
4268 #else
4269 target_ip->__seq = tswap16(host_ip->__seq);
4270 #endif
4271 unlock_user_struct(target_sd, target_addr, 1);
4272 return 0;
4275 static inline abi_long target_to_host_semid_ds(struct semid_ds *host_sd,
4276 abi_ulong target_addr)
4278 struct target_semid64_ds *target_sd;
4280 if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
4281 return -TARGET_EFAULT;
4282 if (target_to_host_ipc_perm(&(host_sd->sem_perm),target_addr))
4283 return -TARGET_EFAULT;
4284 host_sd->sem_nsems = tswapal(target_sd->sem_nsems);
4285 host_sd->sem_otime = tswapal(target_sd->sem_otime);
4286 host_sd->sem_ctime = tswapal(target_sd->sem_ctime);
4287 unlock_user_struct(target_sd, target_addr, 0);
4288 return 0;
4291 static inline abi_long host_to_target_semid_ds(abi_ulong target_addr,
4292 struct semid_ds *host_sd)
4294 struct target_semid64_ds *target_sd;
4296 if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
4297 return -TARGET_EFAULT;
4298 if (host_to_target_ipc_perm(target_addr,&(host_sd->sem_perm)))
4299 return -TARGET_EFAULT;
4300 target_sd->sem_nsems = tswapal(host_sd->sem_nsems);
4301 target_sd->sem_otime = tswapal(host_sd->sem_otime);
4302 target_sd->sem_ctime = tswapal(host_sd->sem_ctime);
4303 unlock_user_struct(target_sd, target_addr, 1);
4304 return 0;
4307 struct target_seminfo {
4308 int semmap;
4309 int semmni;
4310 int semmns;
4311 int semmnu;
4312 int semmsl;
4313 int semopm;
4314 int semume;
4315 int semusz;
4316 int semvmx;
4317 int semaem;
4320 static inline abi_long host_to_target_seminfo(abi_ulong target_addr,
4321 struct seminfo *host_seminfo)
4323 struct target_seminfo *target_seminfo;
4324 if (!lock_user_struct(VERIFY_WRITE, target_seminfo, target_addr, 0))
4325 return -TARGET_EFAULT;
4326 __put_user(host_seminfo->semmap, &target_seminfo->semmap);
4327 __put_user(host_seminfo->semmni, &target_seminfo->semmni);
4328 __put_user(host_seminfo->semmns, &target_seminfo->semmns);
4329 __put_user(host_seminfo->semmnu, &target_seminfo->semmnu);
4330 __put_user(host_seminfo->semmsl, &target_seminfo->semmsl);
4331 __put_user(host_seminfo->semopm, &target_seminfo->semopm);
4332 __put_user(host_seminfo->semume, &target_seminfo->semume);
4333 __put_user(host_seminfo->semusz, &target_seminfo->semusz);
4334 __put_user(host_seminfo->semvmx, &target_seminfo->semvmx);
4335 __put_user(host_seminfo->semaem, &target_seminfo->semaem);
4336 unlock_user_struct(target_seminfo, target_addr, 1);
4337 return 0;
4340 union semun {
4341 int val;
4342 struct semid_ds *buf;
4343 unsigned short *array;
4344 struct seminfo *__buf;
4347 union target_semun {
4348 int val;
4349 abi_ulong buf;
4350 abi_ulong array;
4351 abi_ulong __buf;
4354 static inline abi_long target_to_host_semarray(int semid, unsigned short **host_array,
4355 abi_ulong target_addr)
4357 int nsems;
4358 unsigned short *array;
4359 union semun semun;
4360 struct semid_ds semid_ds;
4361 int i, ret;
4363 semun.buf = &semid_ds;
4365 ret = semctl(semid, 0, IPC_STAT, semun);
4366 if (ret == -1)
4367 return get_errno(ret);
4369 nsems = semid_ds.sem_nsems;
4371 *host_array = g_try_new(unsigned short, nsems);
4372 if (!*host_array) {
4373 return -TARGET_ENOMEM;
4375 array = lock_user(VERIFY_READ, target_addr,
4376 nsems*sizeof(unsigned short), 1);
4377 if (!array) {
4378 g_free(*host_array);
4379 return -TARGET_EFAULT;
4382 for(i=0; i<nsems; i++) {
4383 __get_user((*host_array)[i], &array[i]);
4385 unlock_user(array, target_addr, 0);
4387 return 0;
4390 static inline abi_long host_to_target_semarray(int semid, abi_ulong target_addr,
4391 unsigned short **host_array)
4393 int nsems;
4394 unsigned short *array;
4395 union semun semun;
4396 struct semid_ds semid_ds;
4397 int i, ret;
4399 semun.buf = &semid_ds;
4401 ret = semctl(semid, 0, IPC_STAT, semun);
4402 if (ret == -1)
4403 return get_errno(ret);
4405 nsems = semid_ds.sem_nsems;
4407 array = lock_user(VERIFY_WRITE, target_addr,
4408 nsems*sizeof(unsigned short), 0);
4409 if (!array)
4410 return -TARGET_EFAULT;
4412 for(i=0; i<nsems; i++) {
4413 __put_user((*host_array)[i], &array[i]);
4415 g_free(*host_array);
4416 unlock_user(array, target_addr, 1);
4418 return 0;
4421 static inline abi_long do_semctl(int semid, int semnum, int cmd,
4422 abi_ulong target_arg)
4424 union target_semun target_su = { .buf = target_arg };
4425 union semun arg;
4426 struct semid_ds dsarg;
4427 unsigned short *array = NULL;
4428 struct seminfo seminfo;
4429 abi_long ret = -TARGET_EINVAL;
4430 abi_long err;
4431 cmd &= 0xff;
4433 switch( cmd ) {
4434 case GETVAL:
4435 case SETVAL:
4436 /* In 64 bit cross-endian situations, we will erroneously pick up
4437 * the wrong half of the union for the "val" element. To rectify
4438 * this, the entire 8-byte structure is byteswapped, followed by
4439 * a swap of the 4 byte val field. In other cases, the data is
4440 * already in proper host byte order. */
4441 if (sizeof(target_su.val) != (sizeof(target_su.buf))) {
4442 target_su.buf = tswapal(target_su.buf);
4443 arg.val = tswap32(target_su.val);
4444 } else {
4445 arg.val = target_su.val;
4447 ret = get_errno(semctl(semid, semnum, cmd, arg));
4448 break;
4449 case GETALL:
4450 case SETALL:
4451 err = target_to_host_semarray(semid, &array, target_su.array);
4452 if (err)
4453 return err;
4454 arg.array = array;
4455 ret = get_errno(semctl(semid, semnum, cmd, arg));
4456 err = host_to_target_semarray(semid, target_su.array, &array);
4457 if (err)
4458 return err;
4459 break;
4460 case IPC_STAT:
4461 case IPC_SET:
4462 case SEM_STAT:
4463 err = target_to_host_semid_ds(&dsarg, target_su.buf);
4464 if (err)
4465 return err;
4466 arg.buf = &dsarg;
4467 ret = get_errno(semctl(semid, semnum, cmd, arg));
4468 err = host_to_target_semid_ds(target_su.buf, &dsarg);
4469 if (err)
4470 return err;
4471 break;
4472 case IPC_INFO:
4473 case SEM_INFO:
4474 arg.__buf = &seminfo;
4475 ret = get_errno(semctl(semid, semnum, cmd, arg));
4476 err = host_to_target_seminfo(target_su.__buf, &seminfo);
4477 if (err)
4478 return err;
4479 break;
4480 case IPC_RMID:
4481 case GETPID:
4482 case GETNCNT:
4483 case GETZCNT:
4484 ret = get_errno(semctl(semid, semnum, cmd, NULL));
4485 break;
4488 return ret;
4491 struct target_sembuf {
4492 unsigned short sem_num;
4493 short sem_op;
4494 short sem_flg;
4497 static inline abi_long target_to_host_sembuf(struct sembuf *host_sembuf,
4498 abi_ulong target_addr,
4499 unsigned nsops)
4501 struct target_sembuf *target_sembuf;
4502 int i;
4504 target_sembuf = lock_user(VERIFY_READ, target_addr,
4505 nsops*sizeof(struct target_sembuf), 1);
4506 if (!target_sembuf)
4507 return -TARGET_EFAULT;
4509 for(i=0; i<nsops; i++) {
4510 __get_user(host_sembuf[i].sem_num, &target_sembuf[i].sem_num);
4511 __get_user(host_sembuf[i].sem_op, &target_sembuf[i].sem_op);
4512 __get_user(host_sembuf[i].sem_flg, &target_sembuf[i].sem_flg);
4515 unlock_user(target_sembuf, target_addr, 0);
4517 return 0;
4520 static inline abi_long do_semop(int semid, abi_long ptr, unsigned nsops)
4522 struct sembuf sops[nsops];
4524 if (target_to_host_sembuf(sops, ptr, nsops))
4525 return -TARGET_EFAULT;
4527 return get_errno(safe_semtimedop(semid, sops, nsops, NULL));
4530 struct target_msqid_ds
4532 struct target_ipc_perm msg_perm;
4533 abi_ulong msg_stime;
4534 #if TARGET_ABI_BITS == 32
4535 abi_ulong __unused1;
4536 #endif
4537 abi_ulong msg_rtime;
4538 #if TARGET_ABI_BITS == 32
4539 abi_ulong __unused2;
4540 #endif
4541 abi_ulong msg_ctime;
4542 #if TARGET_ABI_BITS == 32
4543 abi_ulong __unused3;
4544 #endif
4545 abi_ulong __msg_cbytes;
4546 abi_ulong msg_qnum;
4547 abi_ulong msg_qbytes;
4548 abi_ulong msg_lspid;
4549 abi_ulong msg_lrpid;
4550 abi_ulong __unused4;
4551 abi_ulong __unused5;
4554 static inline abi_long target_to_host_msqid_ds(struct msqid_ds *host_md,
4555 abi_ulong target_addr)
4557 struct target_msqid_ds *target_md;
4559 if (!lock_user_struct(VERIFY_READ, target_md, target_addr, 1))
4560 return -TARGET_EFAULT;
4561 if (target_to_host_ipc_perm(&(host_md->msg_perm),target_addr))
4562 return -TARGET_EFAULT;
4563 host_md->msg_stime = tswapal(target_md->msg_stime);
4564 host_md->msg_rtime = tswapal(target_md->msg_rtime);
4565 host_md->msg_ctime = tswapal(target_md->msg_ctime);
4566 host_md->__msg_cbytes = tswapal(target_md->__msg_cbytes);
4567 host_md->msg_qnum = tswapal(target_md->msg_qnum);
4568 host_md->msg_qbytes = tswapal(target_md->msg_qbytes);
4569 host_md->msg_lspid = tswapal(target_md->msg_lspid);
4570 host_md->msg_lrpid = tswapal(target_md->msg_lrpid);
4571 unlock_user_struct(target_md, target_addr, 0);
4572 return 0;
4575 static inline abi_long host_to_target_msqid_ds(abi_ulong target_addr,
4576 struct msqid_ds *host_md)
4578 struct target_msqid_ds *target_md;
4580 if (!lock_user_struct(VERIFY_WRITE, target_md, target_addr, 0))
4581 return -TARGET_EFAULT;
4582 if (host_to_target_ipc_perm(target_addr,&(host_md->msg_perm)))
4583 return -TARGET_EFAULT;
4584 target_md->msg_stime = tswapal(host_md->msg_stime);
4585 target_md->msg_rtime = tswapal(host_md->msg_rtime);
4586 target_md->msg_ctime = tswapal(host_md->msg_ctime);
4587 target_md->__msg_cbytes = tswapal(host_md->__msg_cbytes);
4588 target_md->msg_qnum = tswapal(host_md->msg_qnum);
4589 target_md->msg_qbytes = tswapal(host_md->msg_qbytes);
4590 target_md->msg_lspid = tswapal(host_md->msg_lspid);
4591 target_md->msg_lrpid = tswapal(host_md->msg_lrpid);
4592 unlock_user_struct(target_md, target_addr, 1);
4593 return 0;
4596 struct target_msginfo {
4597 int msgpool;
4598 int msgmap;
4599 int msgmax;
4600 int msgmnb;
4601 int msgmni;
4602 int msgssz;
4603 int msgtql;
4604 unsigned short int msgseg;
4607 static inline abi_long host_to_target_msginfo(abi_ulong target_addr,
4608 struct msginfo *host_msginfo)
4610 struct target_msginfo *target_msginfo;
4611 if (!lock_user_struct(VERIFY_WRITE, target_msginfo, target_addr, 0))
4612 return -TARGET_EFAULT;
4613 __put_user(host_msginfo->msgpool, &target_msginfo->msgpool);
4614 __put_user(host_msginfo->msgmap, &target_msginfo->msgmap);
4615 __put_user(host_msginfo->msgmax, &target_msginfo->msgmax);
4616 __put_user(host_msginfo->msgmnb, &target_msginfo->msgmnb);
4617 __put_user(host_msginfo->msgmni, &target_msginfo->msgmni);
4618 __put_user(host_msginfo->msgssz, &target_msginfo->msgssz);
4619 __put_user(host_msginfo->msgtql, &target_msginfo->msgtql);
4620 __put_user(host_msginfo->msgseg, &target_msginfo->msgseg);
4621 unlock_user_struct(target_msginfo, target_addr, 1);
4622 return 0;
4625 static inline abi_long do_msgctl(int msgid, int cmd, abi_long ptr)
4627 struct msqid_ds dsarg;
4628 struct msginfo msginfo;
4629 abi_long ret = -TARGET_EINVAL;
4631 cmd &= 0xff;
4633 switch (cmd) {
4634 case IPC_STAT:
4635 case IPC_SET:
4636 case MSG_STAT:
4637 if (target_to_host_msqid_ds(&dsarg,ptr))
4638 return -TARGET_EFAULT;
4639 ret = get_errno(msgctl(msgid, cmd, &dsarg));
4640 if (host_to_target_msqid_ds(ptr,&dsarg))
4641 return -TARGET_EFAULT;
4642 break;
4643 case IPC_RMID:
4644 ret = get_errno(msgctl(msgid, cmd, NULL));
4645 break;
4646 case IPC_INFO:
4647 case MSG_INFO:
4648 ret = get_errno(msgctl(msgid, cmd, (struct msqid_ds *)&msginfo));
4649 if (host_to_target_msginfo(ptr, &msginfo))
4650 return -TARGET_EFAULT;
4651 break;
4654 return ret;
4657 struct target_msgbuf {
4658 abi_long mtype;
4659 char mtext[1];
4662 static inline abi_long do_msgsnd(int msqid, abi_long msgp,
4663 ssize_t msgsz, int msgflg)
4665 struct target_msgbuf *target_mb;
4666 struct msgbuf *host_mb;
4667 abi_long ret = 0;
4669 if (msgsz < 0) {
4670 return -TARGET_EINVAL;
4673 if (!lock_user_struct(VERIFY_READ, target_mb, msgp, 0))
4674 return -TARGET_EFAULT;
4675 host_mb = g_try_malloc(msgsz + sizeof(long));
4676 if (!host_mb) {
4677 unlock_user_struct(target_mb, msgp, 0);
4678 return -TARGET_ENOMEM;
4680 host_mb->mtype = (abi_long) tswapal(target_mb->mtype);
4681 memcpy(host_mb->mtext, target_mb->mtext, msgsz);
4682 ret = get_errno(safe_msgsnd(msqid, host_mb, msgsz, msgflg));
4683 g_free(host_mb);
4684 unlock_user_struct(target_mb, msgp, 0);
4686 return ret;
4689 static inline abi_long do_msgrcv(int msqid, abi_long msgp,
4690 ssize_t msgsz, abi_long msgtyp,
4691 int msgflg)
4693 struct target_msgbuf *target_mb;
4694 char *target_mtext;
4695 struct msgbuf *host_mb;
4696 abi_long ret = 0;
4698 if (msgsz < 0) {
4699 return -TARGET_EINVAL;
4702 if (!lock_user_struct(VERIFY_WRITE, target_mb, msgp, 0))
4703 return -TARGET_EFAULT;
4705 host_mb = g_try_malloc(msgsz + sizeof(long));
4706 if (!host_mb) {
4707 ret = -TARGET_ENOMEM;
4708 goto end;
4710 ret = get_errno(safe_msgrcv(msqid, host_mb, msgsz, msgtyp, msgflg));
4712 if (ret > 0) {
4713 abi_ulong target_mtext_addr = msgp + sizeof(abi_ulong);
4714 target_mtext = lock_user(VERIFY_WRITE, target_mtext_addr, ret, 0);
4715 if (!target_mtext) {
4716 ret = -TARGET_EFAULT;
4717 goto end;
4719 memcpy(target_mb->mtext, host_mb->mtext, ret);
4720 unlock_user(target_mtext, target_mtext_addr, ret);
4723 target_mb->mtype = tswapal(host_mb->mtype);
4725 end:
4726 if (target_mb)
4727 unlock_user_struct(target_mb, msgp, 1);
4728 g_free(host_mb);
4729 return ret;
4732 static inline abi_long target_to_host_shmid_ds(struct shmid_ds *host_sd,
4733 abi_ulong target_addr)
4735 struct target_shmid_ds *target_sd;
4737 if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
4738 return -TARGET_EFAULT;
4739 if (target_to_host_ipc_perm(&(host_sd->shm_perm), target_addr))
4740 return -TARGET_EFAULT;
4741 __get_user(host_sd->shm_segsz, &target_sd->shm_segsz);
4742 __get_user(host_sd->shm_atime, &target_sd->shm_atime);
4743 __get_user(host_sd->shm_dtime, &target_sd->shm_dtime);
4744 __get_user(host_sd->shm_ctime, &target_sd->shm_ctime);
4745 __get_user(host_sd->shm_cpid, &target_sd->shm_cpid);
4746 __get_user(host_sd->shm_lpid, &target_sd->shm_lpid);
4747 __get_user(host_sd->shm_nattch, &target_sd->shm_nattch);
4748 unlock_user_struct(target_sd, target_addr, 0);
4749 return 0;
4752 static inline abi_long host_to_target_shmid_ds(abi_ulong target_addr,
4753 struct shmid_ds *host_sd)
4755 struct target_shmid_ds *target_sd;
4757 if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
4758 return -TARGET_EFAULT;
4759 if (host_to_target_ipc_perm(target_addr, &(host_sd->shm_perm)))
4760 return -TARGET_EFAULT;
4761 __put_user(host_sd->shm_segsz, &target_sd->shm_segsz);
4762 __put_user(host_sd->shm_atime, &target_sd->shm_atime);
4763 __put_user(host_sd->shm_dtime, &target_sd->shm_dtime);
4764 __put_user(host_sd->shm_ctime, &target_sd->shm_ctime);
4765 __put_user(host_sd->shm_cpid, &target_sd->shm_cpid);
4766 __put_user(host_sd->shm_lpid, &target_sd->shm_lpid);
4767 __put_user(host_sd->shm_nattch, &target_sd->shm_nattch);
4768 unlock_user_struct(target_sd, target_addr, 1);
4769 return 0;
4772 struct target_shminfo {
4773 abi_ulong shmmax;
4774 abi_ulong shmmin;
4775 abi_ulong shmmni;
4776 abi_ulong shmseg;
4777 abi_ulong shmall;
4780 static inline abi_long host_to_target_shminfo(abi_ulong target_addr,
4781 struct shminfo *host_shminfo)
4783 struct target_shminfo *target_shminfo;
4784 if (!lock_user_struct(VERIFY_WRITE, target_shminfo, target_addr, 0))
4785 return -TARGET_EFAULT;
4786 __put_user(host_shminfo->shmmax, &target_shminfo->shmmax);
4787 __put_user(host_shminfo->shmmin, &target_shminfo->shmmin);
4788 __put_user(host_shminfo->shmmni, &target_shminfo->shmmni);
4789 __put_user(host_shminfo->shmseg, &target_shminfo->shmseg);
4790 __put_user(host_shminfo->shmall, &target_shminfo->shmall);
4791 unlock_user_struct(target_shminfo, target_addr, 1);
4792 return 0;
4795 struct target_shm_info {
4796 int used_ids;
4797 abi_ulong shm_tot;
4798 abi_ulong shm_rss;
4799 abi_ulong shm_swp;
4800 abi_ulong swap_attempts;
4801 abi_ulong swap_successes;
4804 static inline abi_long host_to_target_shm_info(abi_ulong target_addr,
4805 struct shm_info *host_shm_info)
4807 struct target_shm_info *target_shm_info;
4808 if (!lock_user_struct(VERIFY_WRITE, target_shm_info, target_addr, 0))
4809 return -TARGET_EFAULT;
4810 __put_user(host_shm_info->used_ids, &target_shm_info->used_ids);
4811 __put_user(host_shm_info->shm_tot, &target_shm_info->shm_tot);
4812 __put_user(host_shm_info->shm_rss, &target_shm_info->shm_rss);
4813 __put_user(host_shm_info->shm_swp, &target_shm_info->shm_swp);
4814 __put_user(host_shm_info->swap_attempts, &target_shm_info->swap_attempts);
4815 __put_user(host_shm_info->swap_successes, &target_shm_info->swap_successes);
4816 unlock_user_struct(target_shm_info, target_addr, 1);
4817 return 0;
4820 static inline abi_long do_shmctl(int shmid, int cmd, abi_long buf)
4822 struct shmid_ds dsarg;
4823 struct shminfo shminfo;
4824 struct shm_info shm_info;
4825 abi_long ret = -TARGET_EINVAL;
4827 cmd &= 0xff;
4829 switch(cmd) {
4830 case IPC_STAT:
4831 case IPC_SET:
4832 case SHM_STAT:
4833 if (target_to_host_shmid_ds(&dsarg, buf))
4834 return -TARGET_EFAULT;
4835 ret = get_errno(shmctl(shmid, cmd, &dsarg));
4836 if (host_to_target_shmid_ds(buf, &dsarg))
4837 return -TARGET_EFAULT;
4838 break;
4839 case IPC_INFO:
4840 ret = get_errno(shmctl(shmid, cmd, (struct shmid_ds *)&shminfo));
4841 if (host_to_target_shminfo(buf, &shminfo))
4842 return -TARGET_EFAULT;
4843 break;
4844 case SHM_INFO:
4845 ret = get_errno(shmctl(shmid, cmd, (struct shmid_ds *)&shm_info));
4846 if (host_to_target_shm_info(buf, &shm_info))
4847 return -TARGET_EFAULT;
4848 break;
4849 case IPC_RMID:
4850 case SHM_LOCK:
4851 case SHM_UNLOCK:
4852 ret = get_errno(shmctl(shmid, cmd, NULL));
4853 break;
4856 return ret;
4859 #ifndef TARGET_FORCE_SHMLBA
4860 /* For most architectures, SHMLBA is the same as the page size;
4861 * some architectures have larger values, in which case they should
4862 * define TARGET_FORCE_SHMLBA and provide a target_shmlba() function.
4863 * This corresponds to the kernel arch code defining __ARCH_FORCE_SHMLBA
4864 * and defining its own value for SHMLBA.
4866 * The kernel also permits SHMLBA to be set by the architecture to a
4867 * value larger than the page size without setting __ARCH_FORCE_SHMLBA;
4868 * this means that addresses are rounded to the large size if
4869 * SHM_RND is set but addresses not aligned to that size are not rejected
4870 * as long as they are at least page-aligned. Since the only architecture
4871 * which uses this is ia64 this code doesn't provide for that oddity.
4873 static inline abi_ulong target_shmlba(CPUArchState *cpu_env)
4875 return TARGET_PAGE_SIZE;
4877 #endif
4879 static inline abi_ulong do_shmat(CPUArchState *cpu_env,
4880 int shmid, abi_ulong shmaddr, int shmflg)
4882 abi_long raddr;
4883 void *host_raddr;
4884 struct shmid_ds shm_info;
4885 int i,ret;
4886 abi_ulong shmlba;
4888 /* find out the length of the shared memory segment */
4889 ret = get_errno(shmctl(shmid, IPC_STAT, &shm_info));
4890 if (is_error(ret)) {
4891 /* can't get length, bail out */
4892 return ret;
4895 shmlba = target_shmlba(cpu_env);
4897 if (shmaddr & (shmlba - 1)) {
4898 if (shmflg & SHM_RND) {
4899 shmaddr &= ~(shmlba - 1);
4900 } else {
4901 return -TARGET_EINVAL;
4905 mmap_lock();
4907 if (shmaddr)
4908 host_raddr = shmat(shmid, (void *)g2h(shmaddr), shmflg);
4909 else {
4910 abi_ulong mmap_start;
4912 mmap_start = mmap_find_vma(0, shm_info.shm_segsz);
4914 if (mmap_start == -1) {
4915 errno = ENOMEM;
4916 host_raddr = (void *)-1;
4917 } else
4918 host_raddr = shmat(shmid, g2h(mmap_start), shmflg | SHM_REMAP);
4921 if (host_raddr == (void *)-1) {
4922 mmap_unlock();
4923 return get_errno((long)host_raddr);
4925 raddr=h2g((unsigned long)host_raddr);
4927 page_set_flags(raddr, raddr + shm_info.shm_segsz,
4928 PAGE_VALID | PAGE_READ |
4929 ((shmflg & SHM_RDONLY)? 0 : PAGE_WRITE));
4931 for (i = 0; i < N_SHM_REGIONS; i++) {
4932 if (!shm_regions[i].in_use) {
4933 shm_regions[i].in_use = true;
4934 shm_regions[i].start = raddr;
4935 shm_regions[i].size = shm_info.shm_segsz;
4936 break;
4940 mmap_unlock();
4941 return raddr;
4945 static inline abi_long do_shmdt(abi_ulong shmaddr)
4947 int i;
4949 for (i = 0; i < N_SHM_REGIONS; ++i) {
4950 if (shm_regions[i].in_use && shm_regions[i].start == shmaddr) {
4951 shm_regions[i].in_use = false;
4952 page_set_flags(shmaddr, shmaddr + shm_regions[i].size, 0);
4953 break;
4957 return get_errno(shmdt(g2h(shmaddr)));
4960 #ifdef TARGET_NR_ipc
4961 /* ??? This only works with linear mappings. */
4962 /* do_ipc() must return target values and target errnos. */
4963 static abi_long do_ipc(CPUArchState *cpu_env,
4964 unsigned int call, abi_long first,
4965 abi_long second, abi_long third,
4966 abi_long ptr, abi_long fifth)
4968 int version;
4969 abi_long ret = 0;
4971 version = call >> 16;
4972 call &= 0xffff;
4974 switch (call) {
4975 case IPCOP_semop:
4976 ret = do_semop(first, ptr, second);
4977 break;
4979 case IPCOP_semget:
4980 ret = get_errno(semget(first, second, third));
4981 break;
4983 case IPCOP_semctl: {
4984 /* The semun argument to semctl is passed by value, so dereference the
4985 * ptr argument. */
4986 abi_ulong atptr;
4987 get_user_ual(atptr, ptr);
4988 ret = do_semctl(first, second, third, atptr);
4989 break;
4992 case IPCOP_msgget:
4993 ret = get_errno(msgget(first, second));
4994 break;
4996 case IPCOP_msgsnd:
4997 ret = do_msgsnd(first, ptr, second, third);
4998 break;
5000 case IPCOP_msgctl:
5001 ret = do_msgctl(first, second, ptr);
5002 break;
5004 case IPCOP_msgrcv:
5005 switch (version) {
5006 case 0:
5008 struct target_ipc_kludge {
5009 abi_long msgp;
5010 abi_long msgtyp;
5011 } *tmp;
5013 if (!lock_user_struct(VERIFY_READ, tmp, ptr, 1)) {
5014 ret = -TARGET_EFAULT;
5015 break;
5018 ret = do_msgrcv(first, tswapal(tmp->msgp), second, tswapal(tmp->msgtyp), third);
5020 unlock_user_struct(tmp, ptr, 0);
5021 break;
5023 default:
5024 ret = do_msgrcv(first, ptr, second, fifth, third);
5026 break;
5028 case IPCOP_shmat:
5029 switch (version) {
5030 default:
5032 abi_ulong raddr;
5033 raddr = do_shmat(cpu_env, first, ptr, second);
5034 if (is_error(raddr))
5035 return get_errno(raddr);
5036 if (put_user_ual(raddr, third))
5037 return -TARGET_EFAULT;
5038 break;
5040 case 1:
5041 ret = -TARGET_EINVAL;
5042 break;
5044 break;
5045 case IPCOP_shmdt:
5046 ret = do_shmdt(ptr);
5047 break;
5049 case IPCOP_shmget:
5050 /* IPC_* flag values are the same on all linux platforms */
5051 ret = get_errno(shmget(first, second, third));
5052 break;
5054 /* IPC_* and SHM_* command values are the same on all linux platforms */
5055 case IPCOP_shmctl:
5056 ret = do_shmctl(first, second, ptr);
5057 break;
5058 default:
5059 gemu_log("Unsupported ipc call: %d (version %d)\n", call, version);
5060 ret = -TARGET_ENOSYS;
5061 break;
5063 return ret;
5065 #endif
5067 /* kernel structure types definitions */
5069 #define STRUCT(name, ...) STRUCT_ ## name,
5070 #define STRUCT_SPECIAL(name) STRUCT_ ## name,
5071 enum {
5072 #include "syscall_types.h"
5073 STRUCT_MAX
5075 #undef STRUCT
5076 #undef STRUCT_SPECIAL
5078 #define STRUCT(name, ...) static const argtype struct_ ## name ## _def[] = { __VA_ARGS__, TYPE_NULL };
5079 #define STRUCT_SPECIAL(name)
5080 #include "syscall_types.h"
5081 #undef STRUCT
5082 #undef STRUCT_SPECIAL
5084 typedef struct IOCTLEntry IOCTLEntry;
5086 typedef abi_long do_ioctl_fn(const IOCTLEntry *ie, uint8_t *buf_temp,
5087 int fd, int cmd, abi_long arg);
5089 struct IOCTLEntry {
5090 int target_cmd;
5091 unsigned int host_cmd;
5092 const char *name;
5093 int access;
5094 do_ioctl_fn *do_ioctl;
5095 const argtype arg_type[5];
5098 #define IOC_R 0x0001
5099 #define IOC_W 0x0002
5100 #define IOC_RW (IOC_R | IOC_W)
5102 #define MAX_STRUCT_SIZE 4096
5104 #ifdef CONFIG_FIEMAP
5105 /* So fiemap access checks don't overflow on 32 bit systems.
5106 * This is very slightly smaller than the limit imposed by
5107 * the underlying kernel.
5109 #define FIEMAP_MAX_EXTENTS ((UINT_MAX - sizeof(struct fiemap)) \
5110 / sizeof(struct fiemap_extent))
5112 static abi_long do_ioctl_fs_ioc_fiemap(const IOCTLEntry *ie, uint8_t *buf_temp,
5113 int fd, int cmd, abi_long arg)
5115 /* The parameter for this ioctl is a struct fiemap followed
5116 * by an array of struct fiemap_extent whose size is set
5117 * in fiemap->fm_extent_count. The array is filled in by the
5118 * ioctl.
5120 int target_size_in, target_size_out;
5121 struct fiemap *fm;
5122 const argtype *arg_type = ie->arg_type;
5123 const argtype extent_arg_type[] = { MK_STRUCT(STRUCT_fiemap_extent) };
5124 void *argptr, *p;
5125 abi_long ret;
5126 int i, extent_size = thunk_type_size(extent_arg_type, 0);
5127 uint32_t outbufsz;
5128 int free_fm = 0;
5130 assert(arg_type[0] == TYPE_PTR);
5131 assert(ie->access == IOC_RW);
5132 arg_type++;
5133 target_size_in = thunk_type_size(arg_type, 0);
5134 argptr = lock_user(VERIFY_READ, arg, target_size_in, 1);
5135 if (!argptr) {
5136 return -TARGET_EFAULT;
5138 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
5139 unlock_user(argptr, arg, 0);
5140 fm = (struct fiemap *)buf_temp;
5141 if (fm->fm_extent_count > FIEMAP_MAX_EXTENTS) {
5142 return -TARGET_EINVAL;
5145 outbufsz = sizeof (*fm) +
5146 (sizeof(struct fiemap_extent) * fm->fm_extent_count);
5148 if (outbufsz > MAX_STRUCT_SIZE) {
5149 /* We can't fit all the extents into the fixed size buffer.
5150 * Allocate one that is large enough and use it instead.
5152 fm = g_try_malloc(outbufsz);
5153 if (!fm) {
5154 return -TARGET_ENOMEM;
5156 memcpy(fm, buf_temp, sizeof(struct fiemap));
5157 free_fm = 1;
5159 ret = get_errno(safe_ioctl(fd, ie->host_cmd, fm));
5160 if (!is_error(ret)) {
5161 target_size_out = target_size_in;
5162 /* An extent_count of 0 means we were only counting the extents
5163 * so there are no structs to copy
5165 if (fm->fm_extent_count != 0) {
5166 target_size_out += fm->fm_mapped_extents * extent_size;
5168 argptr = lock_user(VERIFY_WRITE, arg, target_size_out, 0);
5169 if (!argptr) {
5170 ret = -TARGET_EFAULT;
5171 } else {
5172 /* Convert the struct fiemap */
5173 thunk_convert(argptr, fm, arg_type, THUNK_TARGET);
5174 if (fm->fm_extent_count != 0) {
5175 p = argptr + target_size_in;
5176 /* ...and then all the struct fiemap_extents */
5177 for (i = 0; i < fm->fm_mapped_extents; i++) {
5178 thunk_convert(p, &fm->fm_extents[i], extent_arg_type,
5179 THUNK_TARGET);
5180 p += extent_size;
5183 unlock_user(argptr, arg, target_size_out);
5186 if (free_fm) {
5187 g_free(fm);
5189 return ret;
5191 #endif
5193 static abi_long do_ioctl_ifconf(const IOCTLEntry *ie, uint8_t *buf_temp,
5194 int fd, int cmd, abi_long arg)
5196 const argtype *arg_type = ie->arg_type;
5197 int target_size;
5198 void *argptr;
5199 int ret;
5200 struct ifconf *host_ifconf;
5201 uint32_t outbufsz;
5202 const argtype ifreq_arg_type[] = { MK_STRUCT(STRUCT_sockaddr_ifreq) };
5203 int target_ifreq_size;
5204 int nb_ifreq;
5205 int free_buf = 0;
5206 int i;
5207 int target_ifc_len;
5208 abi_long target_ifc_buf;
5209 int host_ifc_len;
5210 char *host_ifc_buf;
5212 assert(arg_type[0] == TYPE_PTR);
5213 assert(ie->access == IOC_RW);
5215 arg_type++;
5216 target_size = thunk_type_size(arg_type, 0);
5218 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5219 if (!argptr)
5220 return -TARGET_EFAULT;
5221 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
5222 unlock_user(argptr, arg, 0);
5224 host_ifconf = (struct ifconf *)(unsigned long)buf_temp;
5225 target_ifc_len = host_ifconf->ifc_len;
5226 target_ifc_buf = (abi_long)(unsigned long)host_ifconf->ifc_buf;
5228 target_ifreq_size = thunk_type_size(ifreq_arg_type, 0);
5229 nb_ifreq = target_ifc_len / target_ifreq_size;
5230 host_ifc_len = nb_ifreq * sizeof(struct ifreq);
5232 outbufsz = sizeof(*host_ifconf) + host_ifc_len;
5233 if (outbufsz > MAX_STRUCT_SIZE) {
5234 /* We can't fit all the extents into the fixed size buffer.
5235 * Allocate one that is large enough and use it instead.
5237 host_ifconf = malloc(outbufsz);
5238 if (!host_ifconf) {
5239 return -TARGET_ENOMEM;
5241 memcpy(host_ifconf, buf_temp, sizeof(*host_ifconf));
5242 free_buf = 1;
5244 host_ifc_buf = (char*)host_ifconf + sizeof(*host_ifconf);
5246 host_ifconf->ifc_len = host_ifc_len;
5247 host_ifconf->ifc_buf = host_ifc_buf;
5249 ret = get_errno(safe_ioctl(fd, ie->host_cmd, host_ifconf));
5250 if (!is_error(ret)) {
5251 /* convert host ifc_len to target ifc_len */
5253 nb_ifreq = host_ifconf->ifc_len / sizeof(struct ifreq);
5254 target_ifc_len = nb_ifreq * target_ifreq_size;
5255 host_ifconf->ifc_len = target_ifc_len;
5257 /* restore target ifc_buf */
5259 host_ifconf->ifc_buf = (char *)(unsigned long)target_ifc_buf;
5261 /* copy struct ifconf to target user */
5263 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
5264 if (!argptr)
5265 return -TARGET_EFAULT;
5266 thunk_convert(argptr, host_ifconf, arg_type, THUNK_TARGET);
5267 unlock_user(argptr, arg, target_size);
5269 /* copy ifreq[] to target user */
5271 argptr = lock_user(VERIFY_WRITE, target_ifc_buf, target_ifc_len, 0);
5272 for (i = 0; i < nb_ifreq ; i++) {
5273 thunk_convert(argptr + i * target_ifreq_size,
5274 host_ifc_buf + i * sizeof(struct ifreq),
5275 ifreq_arg_type, THUNK_TARGET);
5277 unlock_user(argptr, target_ifc_buf, target_ifc_len);
5280 if (free_buf) {
5281 free(host_ifconf);
5284 return ret;
5287 static abi_long do_ioctl_dm(const IOCTLEntry *ie, uint8_t *buf_temp, int fd,
5288 int cmd, abi_long arg)
5290 void *argptr;
5291 struct dm_ioctl *host_dm;
5292 abi_long guest_data;
5293 uint32_t guest_data_size;
5294 int target_size;
5295 const argtype *arg_type = ie->arg_type;
5296 abi_long ret;
5297 void *big_buf = NULL;
5298 char *host_data;
5300 arg_type++;
5301 target_size = thunk_type_size(arg_type, 0);
5302 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5303 if (!argptr) {
5304 ret = -TARGET_EFAULT;
5305 goto out;
5307 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
5308 unlock_user(argptr, arg, 0);
5310 /* buf_temp is too small, so fetch things into a bigger buffer */
5311 big_buf = g_malloc0(((struct dm_ioctl*)buf_temp)->data_size * 2);
5312 memcpy(big_buf, buf_temp, target_size);
5313 buf_temp = big_buf;
5314 host_dm = big_buf;
5316 guest_data = arg + host_dm->data_start;
5317 if ((guest_data - arg) < 0) {
5318 ret = -TARGET_EINVAL;
5319 goto out;
5321 guest_data_size = host_dm->data_size - host_dm->data_start;
5322 host_data = (char*)host_dm + host_dm->data_start;
5324 argptr = lock_user(VERIFY_READ, guest_data, guest_data_size, 1);
5325 if (!argptr) {
5326 ret = -TARGET_EFAULT;
5327 goto out;
5330 switch (ie->host_cmd) {
5331 case DM_REMOVE_ALL:
5332 case DM_LIST_DEVICES:
5333 case DM_DEV_CREATE:
5334 case DM_DEV_REMOVE:
5335 case DM_DEV_SUSPEND:
5336 case DM_DEV_STATUS:
5337 case DM_DEV_WAIT:
5338 case DM_TABLE_STATUS:
5339 case DM_TABLE_CLEAR:
5340 case DM_TABLE_DEPS:
5341 case DM_LIST_VERSIONS:
5342 /* no input data */
5343 break;
5344 case DM_DEV_RENAME:
5345 case DM_DEV_SET_GEOMETRY:
5346 /* data contains only strings */
5347 memcpy(host_data, argptr, guest_data_size);
5348 break;
5349 case DM_TARGET_MSG:
5350 memcpy(host_data, argptr, guest_data_size);
5351 *(uint64_t*)host_data = tswap64(*(uint64_t*)argptr);
5352 break;
5353 case DM_TABLE_LOAD:
5355 void *gspec = argptr;
5356 void *cur_data = host_data;
5357 const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_spec) };
5358 int spec_size = thunk_type_size(arg_type, 0);
5359 int i;
5361 for (i = 0; i < host_dm->target_count; i++) {
5362 struct dm_target_spec *spec = cur_data;
5363 uint32_t next;
5364 int slen;
5366 thunk_convert(spec, gspec, arg_type, THUNK_HOST);
5367 slen = strlen((char*)gspec + spec_size) + 1;
5368 next = spec->next;
5369 spec->next = sizeof(*spec) + slen;
5370 strcpy((char*)&spec[1], gspec + spec_size);
5371 gspec += next;
5372 cur_data += spec->next;
5374 break;
5376 default:
5377 ret = -TARGET_EINVAL;
5378 unlock_user(argptr, guest_data, 0);
5379 goto out;
5381 unlock_user(argptr, guest_data, 0);
5383 ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5384 if (!is_error(ret)) {
5385 guest_data = arg + host_dm->data_start;
5386 guest_data_size = host_dm->data_size - host_dm->data_start;
5387 argptr = lock_user(VERIFY_WRITE, guest_data, guest_data_size, 0);
5388 switch (ie->host_cmd) {
5389 case DM_REMOVE_ALL:
5390 case DM_DEV_CREATE:
5391 case DM_DEV_REMOVE:
5392 case DM_DEV_RENAME:
5393 case DM_DEV_SUSPEND:
5394 case DM_DEV_STATUS:
5395 case DM_TABLE_LOAD:
5396 case DM_TABLE_CLEAR:
5397 case DM_TARGET_MSG:
5398 case DM_DEV_SET_GEOMETRY:
5399 /* no return data */
5400 break;
5401 case DM_LIST_DEVICES:
5403 struct dm_name_list *nl = (void*)host_dm + host_dm->data_start;
5404 uint32_t remaining_data = guest_data_size;
5405 void *cur_data = argptr;
5406 const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_name_list) };
5407 int nl_size = 12; /* can't use thunk_size due to alignment */
5409 while (1) {
5410 uint32_t next = nl->next;
5411 if (next) {
5412 nl->next = nl_size + (strlen(nl->name) + 1);
5414 if (remaining_data < nl->next) {
5415 host_dm->flags |= DM_BUFFER_FULL_FLAG;
5416 break;
5418 thunk_convert(cur_data, nl, arg_type, THUNK_TARGET);
5419 strcpy(cur_data + nl_size, nl->name);
5420 cur_data += nl->next;
5421 remaining_data -= nl->next;
5422 if (!next) {
5423 break;
5425 nl = (void*)nl + next;
5427 break;
5429 case DM_DEV_WAIT:
5430 case DM_TABLE_STATUS:
5432 struct dm_target_spec *spec = (void*)host_dm + host_dm->data_start;
5433 void *cur_data = argptr;
5434 const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_spec) };
5435 int spec_size = thunk_type_size(arg_type, 0);
5436 int i;
5438 for (i = 0; i < host_dm->target_count; i++) {
5439 uint32_t next = spec->next;
5440 int slen = strlen((char*)&spec[1]) + 1;
5441 spec->next = (cur_data - argptr) + spec_size + slen;
5442 if (guest_data_size < spec->next) {
5443 host_dm->flags |= DM_BUFFER_FULL_FLAG;
5444 break;
5446 thunk_convert(cur_data, spec, arg_type, THUNK_TARGET);
5447 strcpy(cur_data + spec_size, (char*)&spec[1]);
5448 cur_data = argptr + spec->next;
5449 spec = (void*)host_dm + host_dm->data_start + next;
5451 break;
5453 case DM_TABLE_DEPS:
5455 void *hdata = (void*)host_dm + host_dm->data_start;
5456 int count = *(uint32_t*)hdata;
5457 uint64_t *hdev = hdata + 8;
5458 uint64_t *gdev = argptr + 8;
5459 int i;
5461 *(uint32_t*)argptr = tswap32(count);
5462 for (i = 0; i < count; i++) {
5463 *gdev = tswap64(*hdev);
5464 gdev++;
5465 hdev++;
5467 break;
5469 case DM_LIST_VERSIONS:
5471 struct dm_target_versions *vers = (void*)host_dm + host_dm->data_start;
5472 uint32_t remaining_data = guest_data_size;
5473 void *cur_data = argptr;
5474 const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_versions) };
5475 int vers_size = thunk_type_size(arg_type, 0);
5477 while (1) {
5478 uint32_t next = vers->next;
5479 if (next) {
5480 vers->next = vers_size + (strlen(vers->name) + 1);
5482 if (remaining_data < vers->next) {
5483 host_dm->flags |= DM_BUFFER_FULL_FLAG;
5484 break;
5486 thunk_convert(cur_data, vers, arg_type, THUNK_TARGET);
5487 strcpy(cur_data + vers_size, vers->name);
5488 cur_data += vers->next;
5489 remaining_data -= vers->next;
5490 if (!next) {
5491 break;
5493 vers = (void*)vers + next;
5495 break;
5497 default:
5498 unlock_user(argptr, guest_data, 0);
5499 ret = -TARGET_EINVAL;
5500 goto out;
5502 unlock_user(argptr, guest_data, guest_data_size);
5504 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
5505 if (!argptr) {
5506 ret = -TARGET_EFAULT;
5507 goto out;
5509 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
5510 unlock_user(argptr, arg, target_size);
5512 out:
5513 g_free(big_buf);
5514 return ret;
5517 static abi_long do_ioctl_blkpg(const IOCTLEntry *ie, uint8_t *buf_temp, int fd,
5518 int cmd, abi_long arg)
5520 void *argptr;
5521 int target_size;
5522 const argtype *arg_type = ie->arg_type;
5523 const argtype part_arg_type[] = { MK_STRUCT(STRUCT_blkpg_partition) };
5524 abi_long ret;
5526 struct blkpg_ioctl_arg *host_blkpg = (void*)buf_temp;
5527 struct blkpg_partition host_part;
5529 /* Read and convert blkpg */
5530 arg_type++;
5531 target_size = thunk_type_size(arg_type, 0);
5532 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5533 if (!argptr) {
5534 ret = -TARGET_EFAULT;
5535 goto out;
5537 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
5538 unlock_user(argptr, arg, 0);
5540 switch (host_blkpg->op) {
5541 case BLKPG_ADD_PARTITION:
5542 case BLKPG_DEL_PARTITION:
5543 /* payload is struct blkpg_partition */
5544 break;
5545 default:
5546 /* Unknown opcode */
5547 ret = -TARGET_EINVAL;
5548 goto out;
5551 /* Read and convert blkpg->data */
5552 arg = (abi_long)(uintptr_t)host_blkpg->data;
5553 target_size = thunk_type_size(part_arg_type, 0);
5554 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5555 if (!argptr) {
5556 ret = -TARGET_EFAULT;
5557 goto out;
5559 thunk_convert(&host_part, argptr, part_arg_type, THUNK_HOST);
5560 unlock_user(argptr, arg, 0);
5562 /* Swizzle the data pointer to our local copy and call! */
5563 host_blkpg->data = &host_part;
5564 ret = get_errno(safe_ioctl(fd, ie->host_cmd, host_blkpg));
5566 out:
5567 return ret;
5570 static abi_long do_ioctl_rt(const IOCTLEntry *ie, uint8_t *buf_temp,
5571 int fd, int cmd, abi_long arg)
5573 const argtype *arg_type = ie->arg_type;
5574 const StructEntry *se;
5575 const argtype *field_types;
5576 const int *dst_offsets, *src_offsets;
5577 int target_size;
5578 void *argptr;
5579 abi_ulong *target_rt_dev_ptr;
5580 unsigned long *host_rt_dev_ptr;
5581 abi_long ret;
5582 int i;
5584 assert(ie->access == IOC_W);
5585 assert(*arg_type == TYPE_PTR);
5586 arg_type++;
5587 assert(*arg_type == TYPE_STRUCT);
5588 target_size = thunk_type_size(arg_type, 0);
5589 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5590 if (!argptr) {
5591 return -TARGET_EFAULT;
5593 arg_type++;
5594 assert(*arg_type == (int)STRUCT_rtentry);
5595 se = struct_entries + *arg_type++;
5596 assert(se->convert[0] == NULL);
5597 /* convert struct here to be able to catch rt_dev string */
5598 field_types = se->field_types;
5599 dst_offsets = se->field_offsets[THUNK_HOST];
5600 src_offsets = se->field_offsets[THUNK_TARGET];
5601 for (i = 0; i < se->nb_fields; i++) {
5602 if (dst_offsets[i] == offsetof(struct rtentry, rt_dev)) {
5603 assert(*field_types == TYPE_PTRVOID);
5604 target_rt_dev_ptr = (abi_ulong *)(argptr + src_offsets[i]);
5605 host_rt_dev_ptr = (unsigned long *)(buf_temp + dst_offsets[i]);
5606 if (*target_rt_dev_ptr != 0) {
5607 *host_rt_dev_ptr = (unsigned long)lock_user_string(
5608 tswapal(*target_rt_dev_ptr));
5609 if (!*host_rt_dev_ptr) {
5610 unlock_user(argptr, arg, 0);
5611 return -TARGET_EFAULT;
5613 } else {
5614 *host_rt_dev_ptr = 0;
5616 field_types++;
5617 continue;
5619 field_types = thunk_convert(buf_temp + dst_offsets[i],
5620 argptr + src_offsets[i],
5621 field_types, THUNK_HOST);
5623 unlock_user(argptr, arg, 0);
5625 ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5626 if (*host_rt_dev_ptr != 0) {
5627 unlock_user((void *)*host_rt_dev_ptr,
5628 *target_rt_dev_ptr, 0);
5630 return ret;
5633 static abi_long do_ioctl_kdsigaccept(const IOCTLEntry *ie, uint8_t *buf_temp,
5634 int fd, int cmd, abi_long arg)
5636 int sig = target_to_host_signal(arg);
5637 return get_errno(safe_ioctl(fd, ie->host_cmd, sig));
5640 static IOCTLEntry ioctl_entries[] = {
5641 #define IOCTL(cmd, access, ...) \
5642 { TARGET_ ## cmd, cmd, #cmd, access, 0, { __VA_ARGS__ } },
5643 #define IOCTL_SPECIAL(cmd, access, dofn, ...) \
5644 { TARGET_ ## cmd, cmd, #cmd, access, dofn, { __VA_ARGS__ } },
5645 #define IOCTL_IGNORE(cmd) \
5646 { TARGET_ ## cmd, 0, #cmd },
5647 #include "ioctls.h"
5648 { 0, 0, },
5651 /* ??? Implement proper locking for ioctls. */
5652 /* do_ioctl() Must return target values and target errnos. */
5653 static abi_long do_ioctl(int fd, int cmd, abi_long arg)
5655 const IOCTLEntry *ie;
5656 const argtype *arg_type;
5657 abi_long ret;
5658 uint8_t buf_temp[MAX_STRUCT_SIZE];
5659 int target_size;
5660 void *argptr;
5662 ie = ioctl_entries;
5663 for(;;) {
5664 if (ie->target_cmd == 0) {
5665 gemu_log("Unsupported ioctl: cmd=0x%04lx\n", (long)cmd);
5666 return -TARGET_ENOSYS;
5668 if (ie->target_cmd == cmd)
5669 break;
5670 ie++;
5672 arg_type = ie->arg_type;
5673 #if defined(DEBUG)
5674 gemu_log("ioctl: cmd=0x%04lx (%s)\n", (long)cmd, ie->name);
5675 #endif
5676 if (ie->do_ioctl) {
5677 return ie->do_ioctl(ie, buf_temp, fd, cmd, arg);
5678 } else if (!ie->host_cmd) {
5679 /* Some architectures define BSD ioctls in their headers
5680 that are not implemented in Linux. */
5681 return -TARGET_ENOSYS;
5684 switch(arg_type[0]) {
5685 case TYPE_NULL:
5686 /* no argument */
5687 ret = get_errno(safe_ioctl(fd, ie->host_cmd));
5688 break;
5689 case TYPE_PTRVOID:
5690 case TYPE_INT:
5691 ret = get_errno(safe_ioctl(fd, ie->host_cmd, arg));
5692 break;
5693 case TYPE_PTR:
5694 arg_type++;
5695 target_size = thunk_type_size(arg_type, 0);
5696 switch(ie->access) {
5697 case IOC_R:
5698 ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5699 if (!is_error(ret)) {
5700 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
5701 if (!argptr)
5702 return -TARGET_EFAULT;
5703 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
5704 unlock_user(argptr, arg, target_size);
5706 break;
5707 case IOC_W:
5708 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5709 if (!argptr)
5710 return -TARGET_EFAULT;
5711 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
5712 unlock_user(argptr, arg, 0);
5713 ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5714 break;
5715 default:
5716 case IOC_RW:
5717 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5718 if (!argptr)
5719 return -TARGET_EFAULT;
5720 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
5721 unlock_user(argptr, arg, 0);
5722 ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5723 if (!is_error(ret)) {
5724 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
5725 if (!argptr)
5726 return -TARGET_EFAULT;
5727 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
5728 unlock_user(argptr, arg, target_size);
5730 break;
5732 break;
5733 default:
5734 gemu_log("Unsupported ioctl type: cmd=0x%04lx type=%d\n",
5735 (long)cmd, arg_type[0]);
5736 ret = -TARGET_ENOSYS;
5737 break;
5739 return ret;
5742 static const bitmask_transtbl iflag_tbl[] = {
5743 { TARGET_IGNBRK, TARGET_IGNBRK, IGNBRK, IGNBRK },
5744 { TARGET_BRKINT, TARGET_BRKINT, BRKINT, BRKINT },
5745 { TARGET_IGNPAR, TARGET_IGNPAR, IGNPAR, IGNPAR },
5746 { TARGET_PARMRK, TARGET_PARMRK, PARMRK, PARMRK },
5747 { TARGET_INPCK, TARGET_INPCK, INPCK, INPCK },
5748 { TARGET_ISTRIP, TARGET_ISTRIP, ISTRIP, ISTRIP },
5749 { TARGET_INLCR, TARGET_INLCR, INLCR, INLCR },
5750 { TARGET_IGNCR, TARGET_IGNCR, IGNCR, IGNCR },
5751 { TARGET_ICRNL, TARGET_ICRNL, ICRNL, ICRNL },
5752 { TARGET_IUCLC, TARGET_IUCLC, IUCLC, IUCLC },
5753 { TARGET_IXON, TARGET_IXON, IXON, IXON },
5754 { TARGET_IXANY, TARGET_IXANY, IXANY, IXANY },
5755 { TARGET_IXOFF, TARGET_IXOFF, IXOFF, IXOFF },
5756 { TARGET_IMAXBEL, TARGET_IMAXBEL, IMAXBEL, IMAXBEL },
5757 { 0, 0, 0, 0 }
5760 static const bitmask_transtbl oflag_tbl[] = {
5761 { TARGET_OPOST, TARGET_OPOST, OPOST, OPOST },
5762 { TARGET_OLCUC, TARGET_OLCUC, OLCUC, OLCUC },
5763 { TARGET_ONLCR, TARGET_ONLCR, ONLCR, ONLCR },
5764 { TARGET_OCRNL, TARGET_OCRNL, OCRNL, OCRNL },
5765 { TARGET_ONOCR, TARGET_ONOCR, ONOCR, ONOCR },
5766 { TARGET_ONLRET, TARGET_ONLRET, ONLRET, ONLRET },
5767 { TARGET_OFILL, TARGET_OFILL, OFILL, OFILL },
5768 { TARGET_OFDEL, TARGET_OFDEL, OFDEL, OFDEL },
5769 { TARGET_NLDLY, TARGET_NL0, NLDLY, NL0 },
5770 { TARGET_NLDLY, TARGET_NL1, NLDLY, NL1 },
5771 { TARGET_CRDLY, TARGET_CR0, CRDLY, CR0 },
5772 { TARGET_CRDLY, TARGET_CR1, CRDLY, CR1 },
5773 { TARGET_CRDLY, TARGET_CR2, CRDLY, CR2 },
5774 { TARGET_CRDLY, TARGET_CR3, CRDLY, CR3 },
5775 { TARGET_TABDLY, TARGET_TAB0, TABDLY, TAB0 },
5776 { TARGET_TABDLY, TARGET_TAB1, TABDLY, TAB1 },
5777 { TARGET_TABDLY, TARGET_TAB2, TABDLY, TAB2 },
5778 { TARGET_TABDLY, TARGET_TAB3, TABDLY, TAB3 },
5779 { TARGET_BSDLY, TARGET_BS0, BSDLY, BS0 },
5780 { TARGET_BSDLY, TARGET_BS1, BSDLY, BS1 },
5781 { TARGET_VTDLY, TARGET_VT0, VTDLY, VT0 },
5782 { TARGET_VTDLY, TARGET_VT1, VTDLY, VT1 },
5783 { TARGET_FFDLY, TARGET_FF0, FFDLY, FF0 },
5784 { TARGET_FFDLY, TARGET_FF1, FFDLY, FF1 },
5785 { 0, 0, 0, 0 }
5788 static const bitmask_transtbl cflag_tbl[] = {
5789 { TARGET_CBAUD, TARGET_B0, CBAUD, B0 },
5790 { TARGET_CBAUD, TARGET_B50, CBAUD, B50 },
5791 { TARGET_CBAUD, TARGET_B75, CBAUD, B75 },
5792 { TARGET_CBAUD, TARGET_B110, CBAUD, B110 },
5793 { TARGET_CBAUD, TARGET_B134, CBAUD, B134 },
5794 { TARGET_CBAUD, TARGET_B150, CBAUD, B150 },
5795 { TARGET_CBAUD, TARGET_B200, CBAUD, B200 },
5796 { TARGET_CBAUD, TARGET_B300, CBAUD, B300 },
5797 { TARGET_CBAUD, TARGET_B600, CBAUD, B600 },
5798 { TARGET_CBAUD, TARGET_B1200, CBAUD, B1200 },
5799 { TARGET_CBAUD, TARGET_B1800, CBAUD, B1800 },
5800 { TARGET_CBAUD, TARGET_B2400, CBAUD, B2400 },
5801 { TARGET_CBAUD, TARGET_B4800, CBAUD, B4800 },
5802 { TARGET_CBAUD, TARGET_B9600, CBAUD, B9600 },
5803 { TARGET_CBAUD, TARGET_B19200, CBAUD, B19200 },
5804 { TARGET_CBAUD, TARGET_B38400, CBAUD, B38400 },
5805 { TARGET_CBAUD, TARGET_B57600, CBAUD, B57600 },
5806 { TARGET_CBAUD, TARGET_B115200, CBAUD, B115200 },
5807 { TARGET_CBAUD, TARGET_B230400, CBAUD, B230400 },
5808 { TARGET_CBAUD, TARGET_B460800, CBAUD, B460800 },
5809 { TARGET_CSIZE, TARGET_CS5, CSIZE, CS5 },
5810 { TARGET_CSIZE, TARGET_CS6, CSIZE, CS6 },
5811 { TARGET_CSIZE, TARGET_CS7, CSIZE, CS7 },
5812 { TARGET_CSIZE, TARGET_CS8, CSIZE, CS8 },
5813 { TARGET_CSTOPB, TARGET_CSTOPB, CSTOPB, CSTOPB },
5814 { TARGET_CREAD, TARGET_CREAD, CREAD, CREAD },
5815 { TARGET_PARENB, TARGET_PARENB, PARENB, PARENB },
5816 { TARGET_PARODD, TARGET_PARODD, PARODD, PARODD },
5817 { TARGET_HUPCL, TARGET_HUPCL, HUPCL, HUPCL },
5818 { TARGET_CLOCAL, TARGET_CLOCAL, CLOCAL, CLOCAL },
5819 { TARGET_CRTSCTS, TARGET_CRTSCTS, CRTSCTS, CRTSCTS },
5820 { 0, 0, 0, 0 }
5823 static const bitmask_transtbl lflag_tbl[] = {
5824 { TARGET_ISIG, TARGET_ISIG, ISIG, ISIG },
5825 { TARGET_ICANON, TARGET_ICANON, ICANON, ICANON },
5826 { TARGET_XCASE, TARGET_XCASE, XCASE, XCASE },
5827 { TARGET_ECHO, TARGET_ECHO, ECHO, ECHO },
5828 { TARGET_ECHOE, TARGET_ECHOE, ECHOE, ECHOE },
5829 { TARGET_ECHOK, TARGET_ECHOK, ECHOK, ECHOK },
5830 { TARGET_ECHONL, TARGET_ECHONL, ECHONL, ECHONL },
5831 { TARGET_NOFLSH, TARGET_NOFLSH, NOFLSH, NOFLSH },
5832 { TARGET_TOSTOP, TARGET_TOSTOP, TOSTOP, TOSTOP },
5833 { TARGET_ECHOCTL, TARGET_ECHOCTL, ECHOCTL, ECHOCTL },
5834 { TARGET_ECHOPRT, TARGET_ECHOPRT, ECHOPRT, ECHOPRT },
5835 { TARGET_ECHOKE, TARGET_ECHOKE, ECHOKE, ECHOKE },
5836 { TARGET_FLUSHO, TARGET_FLUSHO, FLUSHO, FLUSHO },
5837 { TARGET_PENDIN, TARGET_PENDIN, PENDIN, PENDIN },
5838 { TARGET_IEXTEN, TARGET_IEXTEN, IEXTEN, IEXTEN },
5839 { 0, 0, 0, 0 }
5842 static void target_to_host_termios (void *dst, const void *src)
5844 struct host_termios *host = dst;
5845 const struct target_termios *target = src;
5847 host->c_iflag =
5848 target_to_host_bitmask(tswap32(target->c_iflag), iflag_tbl);
5849 host->c_oflag =
5850 target_to_host_bitmask(tswap32(target->c_oflag), oflag_tbl);
5851 host->c_cflag =
5852 target_to_host_bitmask(tswap32(target->c_cflag), cflag_tbl);
5853 host->c_lflag =
5854 target_to_host_bitmask(tswap32(target->c_lflag), lflag_tbl);
5855 host->c_line = target->c_line;
5857 memset(host->c_cc, 0, sizeof(host->c_cc));
5858 host->c_cc[VINTR] = target->c_cc[TARGET_VINTR];
5859 host->c_cc[VQUIT] = target->c_cc[TARGET_VQUIT];
5860 host->c_cc[VERASE] = target->c_cc[TARGET_VERASE];
5861 host->c_cc[VKILL] = target->c_cc[TARGET_VKILL];
5862 host->c_cc[VEOF] = target->c_cc[TARGET_VEOF];
5863 host->c_cc[VTIME] = target->c_cc[TARGET_VTIME];
5864 host->c_cc[VMIN] = target->c_cc[TARGET_VMIN];
5865 host->c_cc[VSWTC] = target->c_cc[TARGET_VSWTC];
5866 host->c_cc[VSTART] = target->c_cc[TARGET_VSTART];
5867 host->c_cc[VSTOP] = target->c_cc[TARGET_VSTOP];
5868 host->c_cc[VSUSP] = target->c_cc[TARGET_VSUSP];
5869 host->c_cc[VEOL] = target->c_cc[TARGET_VEOL];
5870 host->c_cc[VREPRINT] = target->c_cc[TARGET_VREPRINT];
5871 host->c_cc[VDISCARD] = target->c_cc[TARGET_VDISCARD];
5872 host->c_cc[VWERASE] = target->c_cc[TARGET_VWERASE];
5873 host->c_cc[VLNEXT] = target->c_cc[TARGET_VLNEXT];
5874 host->c_cc[VEOL2] = target->c_cc[TARGET_VEOL2];
5877 static void host_to_target_termios (void *dst, const void *src)
5879 struct target_termios *target = dst;
5880 const struct host_termios *host = src;
5882 target->c_iflag =
5883 tswap32(host_to_target_bitmask(host->c_iflag, iflag_tbl));
5884 target->c_oflag =
5885 tswap32(host_to_target_bitmask(host->c_oflag, oflag_tbl));
5886 target->c_cflag =
5887 tswap32(host_to_target_bitmask(host->c_cflag, cflag_tbl));
5888 target->c_lflag =
5889 tswap32(host_to_target_bitmask(host->c_lflag, lflag_tbl));
5890 target->c_line = host->c_line;
5892 memset(target->c_cc, 0, sizeof(target->c_cc));
5893 target->c_cc[TARGET_VINTR] = host->c_cc[VINTR];
5894 target->c_cc[TARGET_VQUIT] = host->c_cc[VQUIT];
5895 target->c_cc[TARGET_VERASE] = host->c_cc[VERASE];
5896 target->c_cc[TARGET_VKILL] = host->c_cc[VKILL];
5897 target->c_cc[TARGET_VEOF] = host->c_cc[VEOF];
5898 target->c_cc[TARGET_VTIME] = host->c_cc[VTIME];
5899 target->c_cc[TARGET_VMIN] = host->c_cc[VMIN];
5900 target->c_cc[TARGET_VSWTC] = host->c_cc[VSWTC];
5901 target->c_cc[TARGET_VSTART] = host->c_cc[VSTART];
5902 target->c_cc[TARGET_VSTOP] = host->c_cc[VSTOP];
5903 target->c_cc[TARGET_VSUSP] = host->c_cc[VSUSP];
5904 target->c_cc[TARGET_VEOL] = host->c_cc[VEOL];
5905 target->c_cc[TARGET_VREPRINT] = host->c_cc[VREPRINT];
5906 target->c_cc[TARGET_VDISCARD] = host->c_cc[VDISCARD];
5907 target->c_cc[TARGET_VWERASE] = host->c_cc[VWERASE];
5908 target->c_cc[TARGET_VLNEXT] = host->c_cc[VLNEXT];
5909 target->c_cc[TARGET_VEOL2] = host->c_cc[VEOL2];
5912 static const StructEntry struct_termios_def = {
5913 .convert = { host_to_target_termios, target_to_host_termios },
5914 .size = { sizeof(struct target_termios), sizeof(struct host_termios) },
5915 .align = { __alignof__(struct target_termios), __alignof__(struct host_termios) },
5918 static bitmask_transtbl mmap_flags_tbl[] = {
5919 { TARGET_MAP_SHARED, TARGET_MAP_SHARED, MAP_SHARED, MAP_SHARED },
5920 { TARGET_MAP_PRIVATE, TARGET_MAP_PRIVATE, MAP_PRIVATE, MAP_PRIVATE },
5921 { TARGET_MAP_FIXED, TARGET_MAP_FIXED, MAP_FIXED, MAP_FIXED },
5922 { TARGET_MAP_ANONYMOUS, TARGET_MAP_ANONYMOUS,
5923 MAP_ANONYMOUS, MAP_ANONYMOUS },
5924 { TARGET_MAP_GROWSDOWN, TARGET_MAP_GROWSDOWN,
5925 MAP_GROWSDOWN, MAP_GROWSDOWN },
5926 { TARGET_MAP_DENYWRITE, TARGET_MAP_DENYWRITE,
5927 MAP_DENYWRITE, MAP_DENYWRITE },
5928 { TARGET_MAP_EXECUTABLE, TARGET_MAP_EXECUTABLE,
5929 MAP_EXECUTABLE, MAP_EXECUTABLE },
5930 { TARGET_MAP_LOCKED, TARGET_MAP_LOCKED, MAP_LOCKED, MAP_LOCKED },
5931 { TARGET_MAP_NORESERVE, TARGET_MAP_NORESERVE,
5932 MAP_NORESERVE, MAP_NORESERVE },
5933 { TARGET_MAP_HUGETLB, TARGET_MAP_HUGETLB, MAP_HUGETLB, MAP_HUGETLB },
5934 /* MAP_STACK had been ignored by the kernel for quite some time.
5935 Recognize it for the target insofar as we do not want to pass
5936 it through to the host. */
5937 { TARGET_MAP_STACK, TARGET_MAP_STACK, 0, 0 },
5938 { 0, 0, 0, 0 }
5941 #if defined(TARGET_I386)
5943 /* NOTE: there is really one LDT for all the threads */
5944 static uint8_t *ldt_table;
5946 static abi_long read_ldt(abi_ulong ptr, unsigned long bytecount)
5948 int size;
5949 void *p;
5951 if (!ldt_table)
5952 return 0;
5953 size = TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE;
5954 if (size > bytecount)
5955 size = bytecount;
5956 p = lock_user(VERIFY_WRITE, ptr, size, 0);
5957 if (!p)
5958 return -TARGET_EFAULT;
5959 /* ??? Should this by byteswapped? */
5960 memcpy(p, ldt_table, size);
5961 unlock_user(p, ptr, size);
5962 return size;
5965 /* XXX: add locking support */
5966 static abi_long write_ldt(CPUX86State *env,
5967 abi_ulong ptr, unsigned long bytecount, int oldmode)
5969 struct target_modify_ldt_ldt_s ldt_info;
5970 struct target_modify_ldt_ldt_s *target_ldt_info;
5971 int seg_32bit, contents, read_exec_only, limit_in_pages;
5972 int seg_not_present, useable, lm;
5973 uint32_t *lp, entry_1, entry_2;
5975 if (bytecount != sizeof(ldt_info))
5976 return -TARGET_EINVAL;
5977 if (!lock_user_struct(VERIFY_READ, target_ldt_info, ptr, 1))
5978 return -TARGET_EFAULT;
5979 ldt_info.entry_number = tswap32(target_ldt_info->entry_number);
5980 ldt_info.base_addr = tswapal(target_ldt_info->base_addr);
5981 ldt_info.limit = tswap32(target_ldt_info->limit);
5982 ldt_info.flags = tswap32(target_ldt_info->flags);
5983 unlock_user_struct(target_ldt_info, ptr, 0);
5985 if (ldt_info.entry_number >= TARGET_LDT_ENTRIES)
5986 return -TARGET_EINVAL;
5987 seg_32bit = ldt_info.flags & 1;
5988 contents = (ldt_info.flags >> 1) & 3;
5989 read_exec_only = (ldt_info.flags >> 3) & 1;
5990 limit_in_pages = (ldt_info.flags >> 4) & 1;
5991 seg_not_present = (ldt_info.flags >> 5) & 1;
5992 useable = (ldt_info.flags >> 6) & 1;
5993 #ifdef TARGET_ABI32
5994 lm = 0;
5995 #else
5996 lm = (ldt_info.flags >> 7) & 1;
5997 #endif
5998 if (contents == 3) {
5999 if (oldmode)
6000 return -TARGET_EINVAL;
6001 if (seg_not_present == 0)
6002 return -TARGET_EINVAL;
6004 /* allocate the LDT */
6005 if (!ldt_table) {
6006 env->ldt.base = target_mmap(0,
6007 TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE,
6008 PROT_READ|PROT_WRITE,
6009 MAP_ANONYMOUS|MAP_PRIVATE, -1, 0);
6010 if (env->ldt.base == -1)
6011 return -TARGET_ENOMEM;
6012 memset(g2h(env->ldt.base), 0,
6013 TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE);
6014 env->ldt.limit = 0xffff;
6015 ldt_table = g2h(env->ldt.base);
6018 /* NOTE: same code as Linux kernel */
6019 /* Allow LDTs to be cleared by the user. */
6020 if (ldt_info.base_addr == 0 && ldt_info.limit == 0) {
6021 if (oldmode ||
6022 (contents == 0 &&
6023 read_exec_only == 1 &&
6024 seg_32bit == 0 &&
6025 limit_in_pages == 0 &&
6026 seg_not_present == 1 &&
6027 useable == 0 )) {
6028 entry_1 = 0;
6029 entry_2 = 0;
6030 goto install;
6034 entry_1 = ((ldt_info.base_addr & 0x0000ffff) << 16) |
6035 (ldt_info.limit & 0x0ffff);
6036 entry_2 = (ldt_info.base_addr & 0xff000000) |
6037 ((ldt_info.base_addr & 0x00ff0000) >> 16) |
6038 (ldt_info.limit & 0xf0000) |
6039 ((read_exec_only ^ 1) << 9) |
6040 (contents << 10) |
6041 ((seg_not_present ^ 1) << 15) |
6042 (seg_32bit << 22) |
6043 (limit_in_pages << 23) |
6044 (lm << 21) |
6045 0x7000;
6046 if (!oldmode)
6047 entry_2 |= (useable << 20);
6049 /* Install the new entry ... */
6050 install:
6051 lp = (uint32_t *)(ldt_table + (ldt_info.entry_number << 3));
6052 lp[0] = tswap32(entry_1);
6053 lp[1] = tswap32(entry_2);
6054 return 0;
6057 /* specific and weird i386 syscalls */
6058 static abi_long do_modify_ldt(CPUX86State *env, int func, abi_ulong ptr,
6059 unsigned long bytecount)
6061 abi_long ret;
6063 switch (func) {
6064 case 0:
6065 ret = read_ldt(ptr, bytecount);
6066 break;
6067 case 1:
6068 ret = write_ldt(env, ptr, bytecount, 1);
6069 break;
6070 case 0x11:
6071 ret = write_ldt(env, ptr, bytecount, 0);
6072 break;
6073 default:
6074 ret = -TARGET_ENOSYS;
6075 break;
6077 return ret;
6080 #if defined(TARGET_I386) && defined(TARGET_ABI32)
6081 abi_long do_set_thread_area(CPUX86State *env, abi_ulong ptr)
6083 uint64_t *gdt_table = g2h(env->gdt.base);
6084 struct target_modify_ldt_ldt_s ldt_info;
6085 struct target_modify_ldt_ldt_s *target_ldt_info;
6086 int seg_32bit, contents, read_exec_only, limit_in_pages;
6087 int seg_not_present, useable, lm;
6088 uint32_t *lp, entry_1, entry_2;
6089 int i;
6091 lock_user_struct(VERIFY_WRITE, target_ldt_info, ptr, 1);
6092 if (!target_ldt_info)
6093 return -TARGET_EFAULT;
6094 ldt_info.entry_number = tswap32(target_ldt_info->entry_number);
6095 ldt_info.base_addr = tswapal(target_ldt_info->base_addr);
6096 ldt_info.limit = tswap32(target_ldt_info->limit);
6097 ldt_info.flags = tswap32(target_ldt_info->flags);
6098 if (ldt_info.entry_number == -1) {
6099 for (i=TARGET_GDT_ENTRY_TLS_MIN; i<=TARGET_GDT_ENTRY_TLS_MAX; i++) {
6100 if (gdt_table[i] == 0) {
6101 ldt_info.entry_number = i;
6102 target_ldt_info->entry_number = tswap32(i);
6103 break;
6107 unlock_user_struct(target_ldt_info, ptr, 1);
6109 if (ldt_info.entry_number < TARGET_GDT_ENTRY_TLS_MIN ||
6110 ldt_info.entry_number > TARGET_GDT_ENTRY_TLS_MAX)
6111 return -TARGET_EINVAL;
6112 seg_32bit = ldt_info.flags & 1;
6113 contents = (ldt_info.flags >> 1) & 3;
6114 read_exec_only = (ldt_info.flags >> 3) & 1;
6115 limit_in_pages = (ldt_info.flags >> 4) & 1;
6116 seg_not_present = (ldt_info.flags >> 5) & 1;
6117 useable = (ldt_info.flags >> 6) & 1;
6118 #ifdef TARGET_ABI32
6119 lm = 0;
6120 #else
6121 lm = (ldt_info.flags >> 7) & 1;
6122 #endif
6124 if (contents == 3) {
6125 if (seg_not_present == 0)
6126 return -TARGET_EINVAL;
6129 /* NOTE: same code as Linux kernel */
6130 /* Allow LDTs to be cleared by the user. */
6131 if (ldt_info.base_addr == 0 && ldt_info.limit == 0) {
6132 if ((contents == 0 &&
6133 read_exec_only == 1 &&
6134 seg_32bit == 0 &&
6135 limit_in_pages == 0 &&
6136 seg_not_present == 1 &&
6137 useable == 0 )) {
6138 entry_1 = 0;
6139 entry_2 = 0;
6140 goto install;
6144 entry_1 = ((ldt_info.base_addr & 0x0000ffff) << 16) |
6145 (ldt_info.limit & 0x0ffff);
6146 entry_2 = (ldt_info.base_addr & 0xff000000) |
6147 ((ldt_info.base_addr & 0x00ff0000) >> 16) |
6148 (ldt_info.limit & 0xf0000) |
6149 ((read_exec_only ^ 1) << 9) |
6150 (contents << 10) |
6151 ((seg_not_present ^ 1) << 15) |
6152 (seg_32bit << 22) |
6153 (limit_in_pages << 23) |
6154 (useable << 20) |
6155 (lm << 21) |
6156 0x7000;
6158 /* Install the new entry ... */
6159 install:
6160 lp = (uint32_t *)(gdt_table + ldt_info.entry_number);
6161 lp[0] = tswap32(entry_1);
6162 lp[1] = tswap32(entry_2);
6163 return 0;
6166 static abi_long do_get_thread_area(CPUX86State *env, abi_ulong ptr)
6168 struct target_modify_ldt_ldt_s *target_ldt_info;
6169 uint64_t *gdt_table = g2h(env->gdt.base);
6170 uint32_t base_addr, limit, flags;
6171 int seg_32bit, contents, read_exec_only, limit_in_pages, idx;
6172 int seg_not_present, useable, lm;
6173 uint32_t *lp, entry_1, entry_2;
6175 lock_user_struct(VERIFY_WRITE, target_ldt_info, ptr, 1);
6176 if (!target_ldt_info)
6177 return -TARGET_EFAULT;
6178 idx = tswap32(target_ldt_info->entry_number);
6179 if (idx < TARGET_GDT_ENTRY_TLS_MIN ||
6180 idx > TARGET_GDT_ENTRY_TLS_MAX) {
6181 unlock_user_struct(target_ldt_info, ptr, 1);
6182 return -TARGET_EINVAL;
6184 lp = (uint32_t *)(gdt_table + idx);
6185 entry_1 = tswap32(lp[0]);
6186 entry_2 = tswap32(lp[1]);
6188 read_exec_only = ((entry_2 >> 9) & 1) ^ 1;
6189 contents = (entry_2 >> 10) & 3;
6190 seg_not_present = ((entry_2 >> 15) & 1) ^ 1;
6191 seg_32bit = (entry_2 >> 22) & 1;
6192 limit_in_pages = (entry_2 >> 23) & 1;
6193 useable = (entry_2 >> 20) & 1;
6194 #ifdef TARGET_ABI32
6195 lm = 0;
6196 #else
6197 lm = (entry_2 >> 21) & 1;
6198 #endif
6199 flags = (seg_32bit << 0) | (contents << 1) |
6200 (read_exec_only << 3) | (limit_in_pages << 4) |
6201 (seg_not_present << 5) | (useable << 6) | (lm << 7);
6202 limit = (entry_1 & 0xffff) | (entry_2 & 0xf0000);
6203 base_addr = (entry_1 >> 16) |
6204 (entry_2 & 0xff000000) |
6205 ((entry_2 & 0xff) << 16);
6206 target_ldt_info->base_addr = tswapal(base_addr);
6207 target_ldt_info->limit = tswap32(limit);
6208 target_ldt_info->flags = tswap32(flags);
6209 unlock_user_struct(target_ldt_info, ptr, 1);
6210 return 0;
6212 #endif /* TARGET_I386 && TARGET_ABI32 */
6214 #ifndef TARGET_ABI32
6215 abi_long do_arch_prctl(CPUX86State *env, int code, abi_ulong addr)
6217 abi_long ret = 0;
6218 abi_ulong val;
6219 int idx;
6221 switch(code) {
6222 case TARGET_ARCH_SET_GS:
6223 case TARGET_ARCH_SET_FS:
6224 if (code == TARGET_ARCH_SET_GS)
6225 idx = R_GS;
6226 else
6227 idx = R_FS;
6228 cpu_x86_load_seg(env, idx, 0);
6229 env->segs[idx].base = addr;
6230 break;
6231 case TARGET_ARCH_GET_GS:
6232 case TARGET_ARCH_GET_FS:
6233 if (code == TARGET_ARCH_GET_GS)
6234 idx = R_GS;
6235 else
6236 idx = R_FS;
6237 val = env->segs[idx].base;
6238 if (put_user(val, addr, abi_ulong))
6239 ret = -TARGET_EFAULT;
6240 break;
6241 default:
6242 ret = -TARGET_EINVAL;
6243 break;
6245 return ret;
6247 #endif
6249 #endif /* defined(TARGET_I386) */
6251 #define NEW_STACK_SIZE 0x40000
6254 static pthread_mutex_t clone_lock = PTHREAD_MUTEX_INITIALIZER;
6255 typedef struct {
6256 CPUArchState *env;
6257 pthread_mutex_t mutex;
6258 pthread_cond_t cond;
6259 pthread_t thread;
6260 uint32_t tid;
6261 abi_ulong child_tidptr;
6262 abi_ulong parent_tidptr;
6263 sigset_t sigmask;
6264 } new_thread_info;
6266 static void *clone_func(void *arg)
6268 new_thread_info *info = arg;
6269 CPUArchState *env;
6270 CPUState *cpu;
6271 TaskState *ts;
6273 rcu_register_thread();
6274 tcg_register_thread();
6275 env = info->env;
6276 cpu = ENV_GET_CPU(env);
6277 thread_cpu = cpu;
6278 ts = (TaskState *)cpu->opaque;
6279 info->tid = gettid();
6280 task_settid(ts);
6281 if (info->child_tidptr)
6282 put_user_u32(info->tid, info->child_tidptr);
6283 if (info->parent_tidptr)
6284 put_user_u32(info->tid, info->parent_tidptr);
6285 /* Enable signals. */
6286 sigprocmask(SIG_SETMASK, &info->sigmask, NULL);
6287 /* Signal to the parent that we're ready. */
6288 pthread_mutex_lock(&info->mutex);
6289 pthread_cond_broadcast(&info->cond);
6290 pthread_mutex_unlock(&info->mutex);
6291 /* Wait until the parent has finished initializing the tls state. */
6292 pthread_mutex_lock(&clone_lock);
6293 pthread_mutex_unlock(&clone_lock);
6294 cpu_loop(env);
6295 /* never exits */
6296 return NULL;
6299 /* do_fork() Must return host values and target errnos (unlike most
6300 do_*() functions). */
6301 static int do_fork(CPUArchState *env, unsigned int flags, abi_ulong newsp,
6302 abi_ulong parent_tidptr, target_ulong newtls,
6303 abi_ulong child_tidptr)
6305 CPUState *cpu = ENV_GET_CPU(env);
6306 int ret;
6307 TaskState *ts;
6308 CPUState *new_cpu;
6309 CPUArchState *new_env;
6310 sigset_t sigmask;
6312 flags &= ~CLONE_IGNORED_FLAGS;
6314 /* Emulate vfork() with fork() */
6315 if (flags & CLONE_VFORK)
6316 flags &= ~(CLONE_VFORK | CLONE_VM);
6318 if (flags & CLONE_VM) {
6319 TaskState *parent_ts = (TaskState *)cpu->opaque;
6320 new_thread_info info;
6321 pthread_attr_t attr;
6323 if (((flags & CLONE_THREAD_FLAGS) != CLONE_THREAD_FLAGS) ||
6324 (flags & CLONE_INVALID_THREAD_FLAGS)) {
6325 return -TARGET_EINVAL;
6328 ts = g_new0(TaskState, 1);
6329 init_task_state(ts);
6330 /* we create a new CPU instance. */
6331 new_env = cpu_copy(env);
6332 /* Init regs that differ from the parent. */
6333 cpu_clone_regs(new_env, newsp);
6334 new_cpu = ENV_GET_CPU(new_env);
6335 new_cpu->opaque = ts;
6336 ts->bprm = parent_ts->bprm;
6337 ts->info = parent_ts->info;
6338 ts->signal_mask = parent_ts->signal_mask;
6340 if (flags & CLONE_CHILD_CLEARTID) {
6341 ts->child_tidptr = child_tidptr;
6344 if (flags & CLONE_SETTLS) {
6345 cpu_set_tls (new_env, newtls);
6348 /* Grab a mutex so that thread setup appears atomic. */
6349 pthread_mutex_lock(&clone_lock);
6351 memset(&info, 0, sizeof(info));
6352 pthread_mutex_init(&info.mutex, NULL);
6353 pthread_mutex_lock(&info.mutex);
6354 pthread_cond_init(&info.cond, NULL);
6355 info.env = new_env;
6356 if (flags & CLONE_CHILD_SETTID) {
6357 info.child_tidptr = child_tidptr;
6359 if (flags & CLONE_PARENT_SETTID) {
6360 info.parent_tidptr = parent_tidptr;
6363 ret = pthread_attr_init(&attr);
6364 ret = pthread_attr_setstacksize(&attr, NEW_STACK_SIZE);
6365 ret = pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED);
6366 /* It is not safe to deliver signals until the child has finished
6367 initializing, so temporarily block all signals. */
6368 sigfillset(&sigmask);
6369 sigprocmask(SIG_BLOCK, &sigmask, &info.sigmask);
6371 /* If this is our first additional thread, we need to ensure we
6372 * generate code for parallel execution and flush old translations.
6374 if (!parallel_cpus) {
6375 parallel_cpus = true;
6376 tb_flush(cpu);
6379 ret = pthread_create(&info.thread, &attr, clone_func, &info);
6380 /* TODO: Free new CPU state if thread creation failed. */
6382 sigprocmask(SIG_SETMASK, &info.sigmask, NULL);
6383 pthread_attr_destroy(&attr);
6384 if (ret == 0) {
6385 /* Wait for the child to initialize. */
6386 pthread_cond_wait(&info.cond, &info.mutex);
6387 ret = info.tid;
6388 } else {
6389 ret = -1;
6391 pthread_mutex_unlock(&info.mutex);
6392 pthread_cond_destroy(&info.cond);
6393 pthread_mutex_destroy(&info.mutex);
6394 pthread_mutex_unlock(&clone_lock);
6395 } else {
6396 /* if no CLONE_VM, we consider it is a fork */
6397 if (flags & CLONE_INVALID_FORK_FLAGS) {
6398 return -TARGET_EINVAL;
6401 /* We can't support custom termination signals */
6402 if ((flags & CSIGNAL) != TARGET_SIGCHLD) {
6403 return -TARGET_EINVAL;
6406 if (block_signals()) {
6407 return -TARGET_ERESTARTSYS;
6410 fork_start();
6411 ret = fork();
6412 if (ret == 0) {
6413 /* Child Process. */
6414 cpu_clone_regs(env, newsp);
6415 fork_end(1);
6416 /* There is a race condition here. The parent process could
6417 theoretically read the TID in the child process before the child
6418 tid is set. This would require using either ptrace
6419 (not implemented) or having *_tidptr to point at a shared memory
6420 mapping. We can't repeat the spinlock hack used above because
6421 the child process gets its own copy of the lock. */
6422 if (flags & CLONE_CHILD_SETTID)
6423 put_user_u32(gettid(), child_tidptr);
6424 if (flags & CLONE_PARENT_SETTID)
6425 put_user_u32(gettid(), parent_tidptr);
6426 ts = (TaskState *)cpu->opaque;
6427 if (flags & CLONE_SETTLS)
6428 cpu_set_tls (env, newtls);
6429 if (flags & CLONE_CHILD_CLEARTID)
6430 ts->child_tidptr = child_tidptr;
6431 } else {
6432 fork_end(0);
6435 return ret;
6438 /* warning : doesn't handle linux specific flags... */
6439 static int target_to_host_fcntl_cmd(int cmd)
6441 switch(cmd) {
6442 case TARGET_F_DUPFD:
6443 case TARGET_F_GETFD:
6444 case TARGET_F_SETFD:
6445 case TARGET_F_GETFL:
6446 case TARGET_F_SETFL:
6447 return cmd;
6448 case TARGET_F_GETLK:
6449 return F_GETLK64;
6450 case TARGET_F_SETLK:
6451 return F_SETLK64;
6452 case TARGET_F_SETLKW:
6453 return F_SETLKW64;
6454 case TARGET_F_GETOWN:
6455 return F_GETOWN;
6456 case TARGET_F_SETOWN:
6457 return F_SETOWN;
6458 case TARGET_F_GETSIG:
6459 return F_GETSIG;
6460 case TARGET_F_SETSIG:
6461 return F_SETSIG;
6462 #if TARGET_ABI_BITS == 32
6463 case TARGET_F_GETLK64:
6464 return F_GETLK64;
6465 case TARGET_F_SETLK64:
6466 return F_SETLK64;
6467 case TARGET_F_SETLKW64:
6468 return F_SETLKW64;
6469 #endif
6470 case TARGET_F_SETLEASE:
6471 return F_SETLEASE;
6472 case TARGET_F_GETLEASE:
6473 return F_GETLEASE;
6474 #ifdef F_DUPFD_CLOEXEC
6475 case TARGET_F_DUPFD_CLOEXEC:
6476 return F_DUPFD_CLOEXEC;
6477 #endif
6478 case TARGET_F_NOTIFY:
6479 return F_NOTIFY;
6480 #ifdef F_GETOWN_EX
6481 case TARGET_F_GETOWN_EX:
6482 return F_GETOWN_EX;
6483 #endif
6484 #ifdef F_SETOWN_EX
6485 case TARGET_F_SETOWN_EX:
6486 return F_SETOWN_EX;
6487 #endif
6488 #ifdef F_SETPIPE_SZ
6489 case TARGET_F_SETPIPE_SZ:
6490 return F_SETPIPE_SZ;
6491 case TARGET_F_GETPIPE_SZ:
6492 return F_GETPIPE_SZ;
6493 #endif
6494 default:
6495 return -TARGET_EINVAL;
6497 return -TARGET_EINVAL;
6500 #define TRANSTBL_CONVERT(a) { -1, TARGET_##a, -1, a }
6501 static const bitmask_transtbl flock_tbl[] = {
6502 TRANSTBL_CONVERT(F_RDLCK),
6503 TRANSTBL_CONVERT(F_WRLCK),
6504 TRANSTBL_CONVERT(F_UNLCK),
6505 TRANSTBL_CONVERT(F_EXLCK),
6506 TRANSTBL_CONVERT(F_SHLCK),
6507 { 0, 0, 0, 0 }
6510 static inline abi_long copy_from_user_flock(struct flock64 *fl,
6511 abi_ulong target_flock_addr)
6513 struct target_flock *target_fl;
6514 short l_type;
6516 if (!lock_user_struct(VERIFY_READ, target_fl, target_flock_addr, 1)) {
6517 return -TARGET_EFAULT;
6520 __get_user(l_type, &target_fl->l_type);
6521 fl->l_type = target_to_host_bitmask(l_type, flock_tbl);
6522 __get_user(fl->l_whence, &target_fl->l_whence);
6523 __get_user(fl->l_start, &target_fl->l_start);
6524 __get_user(fl->l_len, &target_fl->l_len);
6525 __get_user(fl->l_pid, &target_fl->l_pid);
6526 unlock_user_struct(target_fl, target_flock_addr, 0);
6527 return 0;
6530 static inline abi_long copy_to_user_flock(abi_ulong target_flock_addr,
6531 const struct flock64 *fl)
6533 struct target_flock *target_fl;
6534 short l_type;
6536 if (!lock_user_struct(VERIFY_WRITE, target_fl, target_flock_addr, 0)) {
6537 return -TARGET_EFAULT;
6540 l_type = host_to_target_bitmask(fl->l_type, flock_tbl);
6541 __put_user(l_type, &target_fl->l_type);
6542 __put_user(fl->l_whence, &target_fl->l_whence);
6543 __put_user(fl->l_start, &target_fl->l_start);
6544 __put_user(fl->l_len, &target_fl->l_len);
6545 __put_user(fl->l_pid, &target_fl->l_pid);
6546 unlock_user_struct(target_fl, target_flock_addr, 1);
6547 return 0;
6550 typedef abi_long from_flock64_fn(struct flock64 *fl, abi_ulong target_addr);
6551 typedef abi_long to_flock64_fn(abi_ulong target_addr, const struct flock64 *fl);
6553 #if defined(TARGET_ARM) && TARGET_ABI_BITS == 32
6554 static inline abi_long copy_from_user_eabi_flock64(struct flock64 *fl,
6555 abi_ulong target_flock_addr)
6557 struct target_eabi_flock64 *target_fl;
6558 short l_type;
6560 if (!lock_user_struct(VERIFY_READ, target_fl, target_flock_addr, 1)) {
6561 return -TARGET_EFAULT;
6564 __get_user(l_type, &target_fl->l_type);
6565 fl->l_type = target_to_host_bitmask(l_type, flock_tbl);
6566 __get_user(fl->l_whence, &target_fl->l_whence);
6567 __get_user(fl->l_start, &target_fl->l_start);
6568 __get_user(fl->l_len, &target_fl->l_len);
6569 __get_user(fl->l_pid, &target_fl->l_pid);
6570 unlock_user_struct(target_fl, target_flock_addr, 0);
6571 return 0;
6574 static inline abi_long copy_to_user_eabi_flock64(abi_ulong target_flock_addr,
6575 const struct flock64 *fl)
6577 struct target_eabi_flock64 *target_fl;
6578 short l_type;
6580 if (!lock_user_struct(VERIFY_WRITE, target_fl, target_flock_addr, 0)) {
6581 return -TARGET_EFAULT;
6584 l_type = host_to_target_bitmask(fl->l_type, flock_tbl);
6585 __put_user(l_type, &target_fl->l_type);
6586 __put_user(fl->l_whence, &target_fl->l_whence);
6587 __put_user(fl->l_start, &target_fl->l_start);
6588 __put_user(fl->l_len, &target_fl->l_len);
6589 __put_user(fl->l_pid, &target_fl->l_pid);
6590 unlock_user_struct(target_fl, target_flock_addr, 1);
6591 return 0;
6593 #endif
6595 static inline abi_long copy_from_user_flock64(struct flock64 *fl,
6596 abi_ulong target_flock_addr)
6598 struct target_flock64 *target_fl;
6599 short l_type;
6601 if (!lock_user_struct(VERIFY_READ, target_fl, target_flock_addr, 1)) {
6602 return -TARGET_EFAULT;
6605 __get_user(l_type, &target_fl->l_type);
6606 fl->l_type = target_to_host_bitmask(l_type, flock_tbl);
6607 __get_user(fl->l_whence, &target_fl->l_whence);
6608 __get_user(fl->l_start, &target_fl->l_start);
6609 __get_user(fl->l_len, &target_fl->l_len);
6610 __get_user(fl->l_pid, &target_fl->l_pid);
6611 unlock_user_struct(target_fl, target_flock_addr, 0);
6612 return 0;
6615 static inline abi_long copy_to_user_flock64(abi_ulong target_flock_addr,
6616 const struct flock64 *fl)
6618 struct target_flock64 *target_fl;
6619 short l_type;
6621 if (!lock_user_struct(VERIFY_WRITE, target_fl, target_flock_addr, 0)) {
6622 return -TARGET_EFAULT;
6625 l_type = host_to_target_bitmask(fl->l_type, flock_tbl);
6626 __put_user(l_type, &target_fl->l_type);
6627 __put_user(fl->l_whence, &target_fl->l_whence);
6628 __put_user(fl->l_start, &target_fl->l_start);
6629 __put_user(fl->l_len, &target_fl->l_len);
6630 __put_user(fl->l_pid, &target_fl->l_pid);
6631 unlock_user_struct(target_fl, target_flock_addr, 1);
6632 return 0;
6635 static abi_long do_fcntl(int fd, int cmd, abi_ulong arg)
6637 struct flock64 fl64;
6638 #ifdef F_GETOWN_EX
6639 struct f_owner_ex fox;
6640 struct target_f_owner_ex *target_fox;
6641 #endif
6642 abi_long ret;
6643 int host_cmd = target_to_host_fcntl_cmd(cmd);
6645 if (host_cmd == -TARGET_EINVAL)
6646 return host_cmd;
6648 switch(cmd) {
6649 case TARGET_F_GETLK:
6650 ret = copy_from_user_flock(&fl64, arg);
6651 if (ret) {
6652 return ret;
6654 ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
6655 if (ret == 0) {
6656 ret = copy_to_user_flock(arg, &fl64);
6658 break;
6660 case TARGET_F_SETLK:
6661 case TARGET_F_SETLKW:
6662 ret = copy_from_user_flock(&fl64, arg);
6663 if (ret) {
6664 return ret;
6666 ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
6667 break;
6669 case TARGET_F_GETLK64:
6670 ret = copy_from_user_flock64(&fl64, arg);
6671 if (ret) {
6672 return ret;
6674 ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
6675 if (ret == 0) {
6676 ret = copy_to_user_flock64(arg, &fl64);
6678 break;
6679 case TARGET_F_SETLK64:
6680 case TARGET_F_SETLKW64:
6681 ret = copy_from_user_flock64(&fl64, arg);
6682 if (ret) {
6683 return ret;
6685 ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
6686 break;
6688 case TARGET_F_GETFL:
6689 ret = get_errno(safe_fcntl(fd, host_cmd, arg));
6690 if (ret >= 0) {
6691 ret = host_to_target_bitmask(ret, fcntl_flags_tbl);
6693 break;
6695 case TARGET_F_SETFL:
6696 ret = get_errno(safe_fcntl(fd, host_cmd,
6697 target_to_host_bitmask(arg,
6698 fcntl_flags_tbl)));
6699 break;
6701 #ifdef F_GETOWN_EX
6702 case TARGET_F_GETOWN_EX:
6703 ret = get_errno(safe_fcntl(fd, host_cmd, &fox));
6704 if (ret >= 0) {
6705 if (!lock_user_struct(VERIFY_WRITE, target_fox, arg, 0))
6706 return -TARGET_EFAULT;
6707 target_fox->type = tswap32(fox.type);
6708 target_fox->pid = tswap32(fox.pid);
6709 unlock_user_struct(target_fox, arg, 1);
6711 break;
6712 #endif
6714 #ifdef F_SETOWN_EX
6715 case TARGET_F_SETOWN_EX:
6716 if (!lock_user_struct(VERIFY_READ, target_fox, arg, 1))
6717 return -TARGET_EFAULT;
6718 fox.type = tswap32(target_fox->type);
6719 fox.pid = tswap32(target_fox->pid);
6720 unlock_user_struct(target_fox, arg, 0);
6721 ret = get_errno(safe_fcntl(fd, host_cmd, &fox));
6722 break;
6723 #endif
6725 case TARGET_F_SETOWN:
6726 case TARGET_F_GETOWN:
6727 case TARGET_F_SETSIG:
6728 case TARGET_F_GETSIG:
6729 case TARGET_F_SETLEASE:
6730 case TARGET_F_GETLEASE:
6731 case TARGET_F_SETPIPE_SZ:
6732 case TARGET_F_GETPIPE_SZ:
6733 ret = get_errno(safe_fcntl(fd, host_cmd, arg));
6734 break;
6736 default:
6737 ret = get_errno(safe_fcntl(fd, cmd, arg));
6738 break;
6740 return ret;
6743 #ifdef USE_UID16
6745 static inline int high2lowuid(int uid)
6747 if (uid > 65535)
6748 return 65534;
6749 else
6750 return uid;
6753 static inline int high2lowgid(int gid)
6755 if (gid > 65535)
6756 return 65534;
6757 else
6758 return gid;
6761 static inline int low2highuid(int uid)
6763 if ((int16_t)uid == -1)
6764 return -1;
6765 else
6766 return uid;
6769 static inline int low2highgid(int gid)
6771 if ((int16_t)gid == -1)
6772 return -1;
6773 else
6774 return gid;
6776 static inline int tswapid(int id)
6778 return tswap16(id);
6781 #define put_user_id(x, gaddr) put_user_u16(x, gaddr)
6783 #else /* !USE_UID16 */
6784 static inline int high2lowuid(int uid)
6786 return uid;
6788 static inline int high2lowgid(int gid)
6790 return gid;
6792 static inline int low2highuid(int uid)
6794 return uid;
6796 static inline int low2highgid(int gid)
6798 return gid;
6800 static inline int tswapid(int id)
6802 return tswap32(id);
6805 #define put_user_id(x, gaddr) put_user_u32(x, gaddr)
6807 #endif /* USE_UID16 */
6809 /* We must do direct syscalls for setting UID/GID, because we want to
6810 * implement the Linux system call semantics of "change only for this thread",
6811 * not the libc/POSIX semantics of "change for all threads in process".
6812 * (See http://ewontfix.com/17/ for more details.)
6813 * We use the 32-bit version of the syscalls if present; if it is not
6814 * then either the host architecture supports 32-bit UIDs natively with
6815 * the standard syscall, or the 16-bit UID is the best we can do.
6817 #ifdef __NR_setuid32
6818 #define __NR_sys_setuid __NR_setuid32
6819 #else
6820 #define __NR_sys_setuid __NR_setuid
6821 #endif
6822 #ifdef __NR_setgid32
6823 #define __NR_sys_setgid __NR_setgid32
6824 #else
6825 #define __NR_sys_setgid __NR_setgid
6826 #endif
6827 #ifdef __NR_setresuid32
6828 #define __NR_sys_setresuid __NR_setresuid32
6829 #else
6830 #define __NR_sys_setresuid __NR_setresuid
6831 #endif
6832 #ifdef __NR_setresgid32
6833 #define __NR_sys_setresgid __NR_setresgid32
6834 #else
6835 #define __NR_sys_setresgid __NR_setresgid
6836 #endif
6838 _syscall1(int, sys_setuid, uid_t, uid)
6839 _syscall1(int, sys_setgid, gid_t, gid)
6840 _syscall3(int, sys_setresuid, uid_t, ruid, uid_t, euid, uid_t, suid)
6841 _syscall3(int, sys_setresgid, gid_t, rgid, gid_t, egid, gid_t, sgid)
6843 void syscall_init(void)
6845 IOCTLEntry *ie;
6846 const argtype *arg_type;
6847 int size;
6848 int i;
6850 thunk_init(STRUCT_MAX);
6852 #define STRUCT(name, ...) thunk_register_struct(STRUCT_ ## name, #name, struct_ ## name ## _def);
6853 #define STRUCT_SPECIAL(name) thunk_register_struct_direct(STRUCT_ ## name, #name, &struct_ ## name ## _def);
6854 #include "syscall_types.h"
6855 #undef STRUCT
6856 #undef STRUCT_SPECIAL
6858 /* Build target_to_host_errno_table[] table from
6859 * host_to_target_errno_table[]. */
6860 for (i = 0; i < ERRNO_TABLE_SIZE; i++) {
6861 target_to_host_errno_table[host_to_target_errno_table[i]] = i;
6864 /* we patch the ioctl size if necessary. We rely on the fact that
6865 no ioctl has all the bits at '1' in the size field */
6866 ie = ioctl_entries;
6867 while (ie->target_cmd != 0) {
6868 if (((ie->target_cmd >> TARGET_IOC_SIZESHIFT) & TARGET_IOC_SIZEMASK) ==
6869 TARGET_IOC_SIZEMASK) {
6870 arg_type = ie->arg_type;
6871 if (arg_type[0] != TYPE_PTR) {
6872 fprintf(stderr, "cannot patch size for ioctl 0x%x\n",
6873 ie->target_cmd);
6874 exit(1);
6876 arg_type++;
6877 size = thunk_type_size(arg_type, 0);
6878 ie->target_cmd = (ie->target_cmd &
6879 ~(TARGET_IOC_SIZEMASK << TARGET_IOC_SIZESHIFT)) |
6880 (size << TARGET_IOC_SIZESHIFT);
6883 /* automatic consistency check if same arch */
6884 #if (defined(__i386__) && defined(TARGET_I386) && defined(TARGET_ABI32)) || \
6885 (defined(__x86_64__) && defined(TARGET_X86_64))
6886 if (unlikely(ie->target_cmd != ie->host_cmd)) {
6887 fprintf(stderr, "ERROR: ioctl(%s): target=0x%x host=0x%x\n",
6888 ie->name, ie->target_cmd, ie->host_cmd);
6890 #endif
6891 ie++;
6895 #if TARGET_ABI_BITS == 32
6896 static inline uint64_t target_offset64(uint32_t word0, uint32_t word1)
6898 #ifdef TARGET_WORDS_BIGENDIAN
6899 return ((uint64_t)word0 << 32) | word1;
6900 #else
6901 return ((uint64_t)word1 << 32) | word0;
6902 #endif
6904 #else /* TARGET_ABI_BITS == 32 */
6905 static inline uint64_t target_offset64(uint64_t word0, uint64_t word1)
6907 return word0;
6909 #endif /* TARGET_ABI_BITS != 32 */
6911 #ifdef TARGET_NR_truncate64
6912 static inline abi_long target_truncate64(void *cpu_env, const char *arg1,
6913 abi_long arg2,
6914 abi_long arg3,
6915 abi_long arg4)
6917 if (regpairs_aligned(cpu_env, TARGET_NR_truncate64)) {
6918 arg2 = arg3;
6919 arg3 = arg4;
6921 return get_errno(truncate64(arg1, target_offset64(arg2, arg3)));
6923 #endif
6925 #ifdef TARGET_NR_ftruncate64
6926 static inline abi_long target_ftruncate64(void *cpu_env, abi_long arg1,
6927 abi_long arg2,
6928 abi_long arg3,
6929 abi_long arg4)
6931 if (regpairs_aligned(cpu_env, TARGET_NR_ftruncate64)) {
6932 arg2 = arg3;
6933 arg3 = arg4;
6935 return get_errno(ftruncate64(arg1, target_offset64(arg2, arg3)));
6937 #endif
6939 static inline abi_long target_to_host_timespec(struct timespec *host_ts,
6940 abi_ulong target_addr)
6942 struct target_timespec *target_ts;
6944 if (!lock_user_struct(VERIFY_READ, target_ts, target_addr, 1))
6945 return -TARGET_EFAULT;
6946 __get_user(host_ts->tv_sec, &target_ts->tv_sec);
6947 __get_user(host_ts->tv_nsec, &target_ts->tv_nsec);
6948 unlock_user_struct(target_ts, target_addr, 0);
6949 return 0;
6952 static inline abi_long host_to_target_timespec(abi_ulong target_addr,
6953 struct timespec *host_ts)
6955 struct target_timespec *target_ts;
6957 if (!lock_user_struct(VERIFY_WRITE, target_ts, target_addr, 0))
6958 return -TARGET_EFAULT;
6959 __put_user(host_ts->tv_sec, &target_ts->tv_sec);
6960 __put_user(host_ts->tv_nsec, &target_ts->tv_nsec);
6961 unlock_user_struct(target_ts, target_addr, 1);
6962 return 0;
6965 static inline abi_long target_to_host_itimerspec(struct itimerspec *host_itspec,
6966 abi_ulong target_addr)
6968 struct target_itimerspec *target_itspec;
6970 if (!lock_user_struct(VERIFY_READ, target_itspec, target_addr, 1)) {
6971 return -TARGET_EFAULT;
6974 host_itspec->it_interval.tv_sec =
6975 tswapal(target_itspec->it_interval.tv_sec);
6976 host_itspec->it_interval.tv_nsec =
6977 tswapal(target_itspec->it_interval.tv_nsec);
6978 host_itspec->it_value.tv_sec = tswapal(target_itspec->it_value.tv_sec);
6979 host_itspec->it_value.tv_nsec = tswapal(target_itspec->it_value.tv_nsec);
6981 unlock_user_struct(target_itspec, target_addr, 1);
6982 return 0;
6985 static inline abi_long host_to_target_itimerspec(abi_ulong target_addr,
6986 struct itimerspec *host_its)
6988 struct target_itimerspec *target_itspec;
6990 if (!lock_user_struct(VERIFY_WRITE, target_itspec, target_addr, 0)) {
6991 return -TARGET_EFAULT;
6994 target_itspec->it_interval.tv_sec = tswapal(host_its->it_interval.tv_sec);
6995 target_itspec->it_interval.tv_nsec = tswapal(host_its->it_interval.tv_nsec);
6997 target_itspec->it_value.tv_sec = tswapal(host_its->it_value.tv_sec);
6998 target_itspec->it_value.tv_nsec = tswapal(host_its->it_value.tv_nsec);
7000 unlock_user_struct(target_itspec, target_addr, 0);
7001 return 0;
7004 static inline abi_long target_to_host_timex(struct timex *host_tx,
7005 abi_long target_addr)
7007 struct target_timex *target_tx;
7009 if (!lock_user_struct(VERIFY_READ, target_tx, target_addr, 1)) {
7010 return -TARGET_EFAULT;
7013 __get_user(host_tx->modes, &target_tx->modes);
7014 __get_user(host_tx->offset, &target_tx->offset);
7015 __get_user(host_tx->freq, &target_tx->freq);
7016 __get_user(host_tx->maxerror, &target_tx->maxerror);
7017 __get_user(host_tx->esterror, &target_tx->esterror);
7018 __get_user(host_tx->status, &target_tx->status);
7019 __get_user(host_tx->constant, &target_tx->constant);
7020 __get_user(host_tx->precision, &target_tx->precision);
7021 __get_user(host_tx->tolerance, &target_tx->tolerance);
7022 __get_user(host_tx->time.tv_sec, &target_tx->time.tv_sec);
7023 __get_user(host_tx->time.tv_usec, &target_tx->time.tv_usec);
7024 __get_user(host_tx->tick, &target_tx->tick);
7025 __get_user(host_tx->ppsfreq, &target_tx->ppsfreq);
7026 __get_user(host_tx->jitter, &target_tx->jitter);
7027 __get_user(host_tx->shift, &target_tx->shift);
7028 __get_user(host_tx->stabil, &target_tx->stabil);
7029 __get_user(host_tx->jitcnt, &target_tx->jitcnt);
7030 __get_user(host_tx->calcnt, &target_tx->calcnt);
7031 __get_user(host_tx->errcnt, &target_tx->errcnt);
7032 __get_user(host_tx->stbcnt, &target_tx->stbcnt);
7033 __get_user(host_tx->tai, &target_tx->tai);
7035 unlock_user_struct(target_tx, target_addr, 0);
7036 return 0;
7039 static inline abi_long host_to_target_timex(abi_long target_addr,
7040 struct timex *host_tx)
7042 struct target_timex *target_tx;
7044 if (!lock_user_struct(VERIFY_WRITE, target_tx, target_addr, 0)) {
7045 return -TARGET_EFAULT;
7048 __put_user(host_tx->modes, &target_tx->modes);
7049 __put_user(host_tx->offset, &target_tx->offset);
7050 __put_user(host_tx->freq, &target_tx->freq);
7051 __put_user(host_tx->maxerror, &target_tx->maxerror);
7052 __put_user(host_tx->esterror, &target_tx->esterror);
7053 __put_user(host_tx->status, &target_tx->status);
7054 __put_user(host_tx->constant, &target_tx->constant);
7055 __put_user(host_tx->precision, &target_tx->precision);
7056 __put_user(host_tx->tolerance, &target_tx->tolerance);
7057 __put_user(host_tx->time.tv_sec, &target_tx->time.tv_sec);
7058 __put_user(host_tx->time.tv_usec, &target_tx->time.tv_usec);
7059 __put_user(host_tx->tick, &target_tx->tick);
7060 __put_user(host_tx->ppsfreq, &target_tx->ppsfreq);
7061 __put_user(host_tx->jitter, &target_tx->jitter);
7062 __put_user(host_tx->shift, &target_tx->shift);
7063 __put_user(host_tx->stabil, &target_tx->stabil);
7064 __put_user(host_tx->jitcnt, &target_tx->jitcnt);
7065 __put_user(host_tx->calcnt, &target_tx->calcnt);
7066 __put_user(host_tx->errcnt, &target_tx->errcnt);
7067 __put_user(host_tx->stbcnt, &target_tx->stbcnt);
7068 __put_user(host_tx->tai, &target_tx->tai);
7070 unlock_user_struct(target_tx, target_addr, 1);
7071 return 0;
7075 static inline abi_long target_to_host_sigevent(struct sigevent *host_sevp,
7076 abi_ulong target_addr)
7078 struct target_sigevent *target_sevp;
7080 if (!lock_user_struct(VERIFY_READ, target_sevp, target_addr, 1)) {
7081 return -TARGET_EFAULT;
7084 /* This union is awkward on 64 bit systems because it has a 32 bit
7085 * integer and a pointer in it; we follow the conversion approach
7086 * used for handling sigval types in signal.c so the guest should get
7087 * the correct value back even if we did a 64 bit byteswap and it's
7088 * using the 32 bit integer.
7090 host_sevp->sigev_value.sival_ptr =
7091 (void *)(uintptr_t)tswapal(target_sevp->sigev_value.sival_ptr);
7092 host_sevp->sigev_signo =
7093 target_to_host_signal(tswap32(target_sevp->sigev_signo));
7094 host_sevp->sigev_notify = tswap32(target_sevp->sigev_notify);
7095 host_sevp->_sigev_un._tid = tswap32(target_sevp->_sigev_un._tid);
7097 unlock_user_struct(target_sevp, target_addr, 1);
7098 return 0;
7101 #if defined(TARGET_NR_mlockall)
7102 static inline int target_to_host_mlockall_arg(int arg)
7104 int result = 0;
7106 if (arg & TARGET_MLOCKALL_MCL_CURRENT) {
7107 result |= MCL_CURRENT;
7109 if (arg & TARGET_MLOCKALL_MCL_FUTURE) {
7110 result |= MCL_FUTURE;
7112 return result;
7114 #endif
7116 static inline abi_long host_to_target_stat64(void *cpu_env,
7117 abi_ulong target_addr,
7118 struct stat *host_st)
7120 #if defined(TARGET_ARM) && defined(TARGET_ABI32)
7121 if (((CPUARMState *)cpu_env)->eabi) {
7122 struct target_eabi_stat64 *target_st;
7124 if (!lock_user_struct(VERIFY_WRITE, target_st, target_addr, 0))
7125 return -TARGET_EFAULT;
7126 memset(target_st, 0, sizeof(struct target_eabi_stat64));
7127 __put_user(host_st->st_dev, &target_st->st_dev);
7128 __put_user(host_st->st_ino, &target_st->st_ino);
7129 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
7130 __put_user(host_st->st_ino, &target_st->__st_ino);
7131 #endif
7132 __put_user(host_st->st_mode, &target_st->st_mode);
7133 __put_user(host_st->st_nlink, &target_st->st_nlink);
7134 __put_user(host_st->st_uid, &target_st->st_uid);
7135 __put_user(host_st->st_gid, &target_st->st_gid);
7136 __put_user(host_st->st_rdev, &target_st->st_rdev);
7137 __put_user(host_st->st_size, &target_st->st_size);
7138 __put_user(host_st->st_blksize, &target_st->st_blksize);
7139 __put_user(host_st->st_blocks, &target_st->st_blocks);
7140 __put_user(host_st->st_atime, &target_st->target_st_atime);
7141 __put_user(host_st->st_mtime, &target_st->target_st_mtime);
7142 __put_user(host_st->st_ctime, &target_st->target_st_ctime);
7143 unlock_user_struct(target_st, target_addr, 1);
7144 } else
7145 #endif
7147 #if defined(TARGET_HAS_STRUCT_STAT64)
7148 struct target_stat64 *target_st;
7149 #else
7150 struct target_stat *target_st;
7151 #endif
7153 if (!lock_user_struct(VERIFY_WRITE, target_st, target_addr, 0))
7154 return -TARGET_EFAULT;
7155 memset(target_st, 0, sizeof(*target_st));
7156 __put_user(host_st->st_dev, &target_st->st_dev);
7157 __put_user(host_st->st_ino, &target_st->st_ino);
7158 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
7159 __put_user(host_st->st_ino, &target_st->__st_ino);
7160 #endif
7161 __put_user(host_st->st_mode, &target_st->st_mode);
7162 __put_user(host_st->st_nlink, &target_st->st_nlink);
7163 __put_user(host_st->st_uid, &target_st->st_uid);
7164 __put_user(host_st->st_gid, &target_st->st_gid);
7165 __put_user(host_st->st_rdev, &target_st->st_rdev);
7166 /* XXX: better use of kernel struct */
7167 __put_user(host_st->st_size, &target_st->st_size);
7168 __put_user(host_st->st_blksize, &target_st->st_blksize);
7169 __put_user(host_st->st_blocks, &target_st->st_blocks);
7170 __put_user(host_st->st_atime, &target_st->target_st_atime);
7171 __put_user(host_st->st_mtime, &target_st->target_st_mtime);
7172 __put_user(host_st->st_ctime, &target_st->target_st_ctime);
7173 unlock_user_struct(target_st, target_addr, 1);
7176 return 0;
7179 /* ??? Using host futex calls even when target atomic operations
7180 are not really atomic probably breaks things. However implementing
7181 futexes locally would make futexes shared between multiple processes
7182 tricky. However they're probably useless because guest atomic
7183 operations won't work either. */
7184 static int do_futex(target_ulong uaddr, int op, int val, target_ulong timeout,
7185 target_ulong uaddr2, int val3)
7187 struct timespec ts, *pts;
7188 int base_op;
7190 /* ??? We assume FUTEX_* constants are the same on both host
7191 and target. */
7192 #ifdef FUTEX_CMD_MASK
7193 base_op = op & FUTEX_CMD_MASK;
7194 #else
7195 base_op = op;
7196 #endif
7197 switch (base_op) {
7198 case FUTEX_WAIT:
7199 case FUTEX_WAIT_BITSET:
7200 if (timeout) {
7201 pts = &ts;
7202 target_to_host_timespec(pts, timeout);
7203 } else {
7204 pts = NULL;
7206 return get_errno(safe_futex(g2h(uaddr), op, tswap32(val),
7207 pts, NULL, val3));
7208 case FUTEX_WAKE:
7209 return get_errno(safe_futex(g2h(uaddr), op, val, NULL, NULL, 0));
7210 case FUTEX_FD:
7211 return get_errno(safe_futex(g2h(uaddr), op, val, NULL, NULL, 0));
7212 case FUTEX_REQUEUE:
7213 case FUTEX_CMP_REQUEUE:
7214 case FUTEX_WAKE_OP:
7215 /* For FUTEX_REQUEUE, FUTEX_CMP_REQUEUE, and FUTEX_WAKE_OP, the
7216 TIMEOUT parameter is interpreted as a uint32_t by the kernel.
7217 But the prototype takes a `struct timespec *'; insert casts
7218 to satisfy the compiler. We do not need to tswap TIMEOUT
7219 since it's not compared to guest memory. */
7220 pts = (struct timespec *)(uintptr_t) timeout;
7221 return get_errno(safe_futex(g2h(uaddr), op, val, pts,
7222 g2h(uaddr2),
7223 (base_op == FUTEX_CMP_REQUEUE
7224 ? tswap32(val3)
7225 : val3)));
7226 default:
7227 return -TARGET_ENOSYS;
7230 #if defined(TARGET_NR_name_to_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
7231 static abi_long do_name_to_handle_at(abi_long dirfd, abi_long pathname,
7232 abi_long handle, abi_long mount_id,
7233 abi_long flags)
7235 struct file_handle *target_fh;
7236 struct file_handle *fh;
7237 int mid = 0;
7238 abi_long ret;
7239 char *name;
7240 unsigned int size, total_size;
7242 if (get_user_s32(size, handle)) {
7243 return -TARGET_EFAULT;
7246 name = lock_user_string(pathname);
7247 if (!name) {
7248 return -TARGET_EFAULT;
7251 total_size = sizeof(struct file_handle) + size;
7252 target_fh = lock_user(VERIFY_WRITE, handle, total_size, 0);
7253 if (!target_fh) {
7254 unlock_user(name, pathname, 0);
7255 return -TARGET_EFAULT;
7258 fh = g_malloc0(total_size);
7259 fh->handle_bytes = size;
7261 ret = get_errno(name_to_handle_at(dirfd, path(name), fh, &mid, flags));
7262 unlock_user(name, pathname, 0);
7264 /* man name_to_handle_at(2):
7265 * Other than the use of the handle_bytes field, the caller should treat
7266 * the file_handle structure as an opaque data type
7269 memcpy(target_fh, fh, total_size);
7270 target_fh->handle_bytes = tswap32(fh->handle_bytes);
7271 target_fh->handle_type = tswap32(fh->handle_type);
7272 g_free(fh);
7273 unlock_user(target_fh, handle, total_size);
7275 if (put_user_s32(mid, mount_id)) {
7276 return -TARGET_EFAULT;
7279 return ret;
7282 #endif
7284 #if defined(TARGET_NR_open_by_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
7285 static abi_long do_open_by_handle_at(abi_long mount_fd, abi_long handle,
7286 abi_long flags)
7288 struct file_handle *target_fh;
7289 struct file_handle *fh;
7290 unsigned int size, total_size;
7291 abi_long ret;
7293 if (get_user_s32(size, handle)) {
7294 return -TARGET_EFAULT;
7297 total_size = sizeof(struct file_handle) + size;
7298 target_fh = lock_user(VERIFY_READ, handle, total_size, 1);
7299 if (!target_fh) {
7300 return -TARGET_EFAULT;
7303 fh = g_memdup(target_fh, total_size);
7304 fh->handle_bytes = size;
7305 fh->handle_type = tswap32(target_fh->handle_type);
7307 ret = get_errno(open_by_handle_at(mount_fd, fh,
7308 target_to_host_bitmask(flags, fcntl_flags_tbl)));
7310 g_free(fh);
7312 unlock_user(target_fh, handle, total_size);
7314 return ret;
7316 #endif
7318 #if defined(TARGET_NR_signalfd) || defined(TARGET_NR_signalfd4)
7320 /* signalfd siginfo conversion */
7322 static void
7323 host_to_target_signalfd_siginfo(struct signalfd_siginfo *tinfo,
7324 const struct signalfd_siginfo *info)
7326 int sig = host_to_target_signal(info->ssi_signo);
7328 /* linux/signalfd.h defines a ssi_addr_lsb
7329 * not defined in sys/signalfd.h but used by some kernels
7332 #ifdef BUS_MCEERR_AO
7333 if (tinfo->ssi_signo == SIGBUS &&
7334 (tinfo->ssi_code == BUS_MCEERR_AR ||
7335 tinfo->ssi_code == BUS_MCEERR_AO)) {
7336 uint16_t *ssi_addr_lsb = (uint16_t *)(&info->ssi_addr + 1);
7337 uint16_t *tssi_addr_lsb = (uint16_t *)(&tinfo->ssi_addr + 1);
7338 *tssi_addr_lsb = tswap16(*ssi_addr_lsb);
7340 #endif
7342 tinfo->ssi_signo = tswap32(sig);
7343 tinfo->ssi_errno = tswap32(tinfo->ssi_errno);
7344 tinfo->ssi_code = tswap32(info->ssi_code);
7345 tinfo->ssi_pid = tswap32(info->ssi_pid);
7346 tinfo->ssi_uid = tswap32(info->ssi_uid);
7347 tinfo->ssi_fd = tswap32(info->ssi_fd);
7348 tinfo->ssi_tid = tswap32(info->ssi_tid);
7349 tinfo->ssi_band = tswap32(info->ssi_band);
7350 tinfo->ssi_overrun = tswap32(info->ssi_overrun);
7351 tinfo->ssi_trapno = tswap32(info->ssi_trapno);
7352 tinfo->ssi_status = tswap32(info->ssi_status);
7353 tinfo->ssi_int = tswap32(info->ssi_int);
7354 tinfo->ssi_ptr = tswap64(info->ssi_ptr);
7355 tinfo->ssi_utime = tswap64(info->ssi_utime);
7356 tinfo->ssi_stime = tswap64(info->ssi_stime);
7357 tinfo->ssi_addr = tswap64(info->ssi_addr);
7360 static abi_long host_to_target_data_signalfd(void *buf, size_t len)
7362 int i;
7364 for (i = 0; i < len; i += sizeof(struct signalfd_siginfo)) {
7365 host_to_target_signalfd_siginfo(buf + i, buf + i);
7368 return len;
7371 static TargetFdTrans target_signalfd_trans = {
7372 .host_to_target_data = host_to_target_data_signalfd,
7375 static abi_long do_signalfd4(int fd, abi_long mask, int flags)
7377 int host_flags;
7378 target_sigset_t *target_mask;
7379 sigset_t host_mask;
7380 abi_long ret;
7382 if (flags & ~(TARGET_O_NONBLOCK | TARGET_O_CLOEXEC)) {
7383 return -TARGET_EINVAL;
7385 if (!lock_user_struct(VERIFY_READ, target_mask, mask, 1)) {
7386 return -TARGET_EFAULT;
7389 target_to_host_sigset(&host_mask, target_mask);
7391 host_flags = target_to_host_bitmask(flags, fcntl_flags_tbl);
7393 ret = get_errno(signalfd(fd, &host_mask, host_flags));
7394 if (ret >= 0) {
7395 fd_trans_register(ret, &target_signalfd_trans);
7398 unlock_user_struct(target_mask, mask, 0);
7400 return ret;
7402 #endif
7404 /* Map host to target signal numbers for the wait family of syscalls.
7405 Assume all other status bits are the same. */
7406 int host_to_target_waitstatus(int status)
7408 if (WIFSIGNALED(status)) {
7409 return host_to_target_signal(WTERMSIG(status)) | (status & ~0x7f);
7411 if (WIFSTOPPED(status)) {
7412 return (host_to_target_signal(WSTOPSIG(status)) << 8)
7413 | (status & 0xff);
7415 return status;
7418 static int open_self_cmdline(void *cpu_env, int fd)
7420 CPUState *cpu = ENV_GET_CPU((CPUArchState *)cpu_env);
7421 struct linux_binprm *bprm = ((TaskState *)cpu->opaque)->bprm;
7422 int i;
7424 for (i = 0; i < bprm->argc; i++) {
7425 size_t len = strlen(bprm->argv[i]) + 1;
7427 if (write(fd, bprm->argv[i], len) != len) {
7428 return -1;
7432 return 0;
7435 static int open_self_maps(void *cpu_env, int fd)
7437 CPUState *cpu = ENV_GET_CPU((CPUArchState *)cpu_env);
7438 TaskState *ts = cpu->opaque;
7439 FILE *fp;
7440 char *line = NULL;
7441 size_t len = 0;
7442 ssize_t read;
7444 fp = fopen("/proc/self/maps", "r");
7445 if (fp == NULL) {
7446 return -1;
7449 while ((read = getline(&line, &len, fp)) != -1) {
7450 int fields, dev_maj, dev_min, inode;
7451 uint64_t min, max, offset;
7452 char flag_r, flag_w, flag_x, flag_p;
7453 char path[512] = "";
7454 fields = sscanf(line, "%"PRIx64"-%"PRIx64" %c%c%c%c %"PRIx64" %x:%x %d"
7455 " %512s", &min, &max, &flag_r, &flag_w, &flag_x,
7456 &flag_p, &offset, &dev_maj, &dev_min, &inode, path);
7458 if ((fields < 10) || (fields > 11)) {
7459 continue;
7461 if (h2g_valid(min)) {
7462 int flags = page_get_flags(h2g(min));
7463 max = h2g_valid(max - 1) ? max : (uintptr_t)g2h(GUEST_ADDR_MAX);
7464 if (page_check_range(h2g(min), max - min, flags) == -1) {
7465 continue;
7467 if (h2g(min) == ts->info->stack_limit) {
7468 pstrcpy(path, sizeof(path), " [stack]");
7470 dprintf(fd, TARGET_ABI_FMT_lx "-" TARGET_ABI_FMT_lx
7471 " %c%c%c%c %08" PRIx64 " %02x:%02x %d %s%s\n",
7472 h2g(min), h2g(max - 1) + 1, flag_r, flag_w,
7473 flag_x, flag_p, offset, dev_maj, dev_min, inode,
7474 path[0] ? " " : "", path);
7478 free(line);
7479 fclose(fp);
7481 return 0;
7484 static int open_self_stat(void *cpu_env, int fd)
7486 CPUState *cpu = ENV_GET_CPU((CPUArchState *)cpu_env);
7487 TaskState *ts = cpu->opaque;
7488 abi_ulong start_stack = ts->info->start_stack;
7489 int i;
7491 for (i = 0; i < 44; i++) {
7492 char buf[128];
7493 int len;
7494 uint64_t val = 0;
7496 if (i == 0) {
7497 /* pid */
7498 val = getpid();
7499 snprintf(buf, sizeof(buf), "%"PRId64 " ", val);
7500 } else if (i == 1) {
7501 /* app name */
7502 snprintf(buf, sizeof(buf), "(%s) ", ts->bprm->argv[0]);
7503 } else if (i == 27) {
7504 /* stack bottom */
7505 val = start_stack;
7506 snprintf(buf, sizeof(buf), "%"PRId64 " ", val);
7507 } else {
7508 /* for the rest, there is MasterCard */
7509 snprintf(buf, sizeof(buf), "0%c", i == 43 ? '\n' : ' ');
7512 len = strlen(buf);
7513 if (write(fd, buf, len) != len) {
7514 return -1;
7518 return 0;
7521 static int open_self_auxv(void *cpu_env, int fd)
7523 CPUState *cpu = ENV_GET_CPU((CPUArchState *)cpu_env);
7524 TaskState *ts = cpu->opaque;
7525 abi_ulong auxv = ts->info->saved_auxv;
7526 abi_ulong len = ts->info->auxv_len;
7527 char *ptr;
7530 * Auxiliary vector is stored in target process stack.
7531 * read in whole auxv vector and copy it to file
7533 ptr = lock_user(VERIFY_READ, auxv, len, 0);
7534 if (ptr != NULL) {
7535 while (len > 0) {
7536 ssize_t r;
7537 r = write(fd, ptr, len);
7538 if (r <= 0) {
7539 break;
7541 len -= r;
7542 ptr += r;
7544 lseek(fd, 0, SEEK_SET);
7545 unlock_user(ptr, auxv, len);
7548 return 0;
7551 static int is_proc_myself(const char *filename, const char *entry)
7553 if (!strncmp(filename, "/proc/", strlen("/proc/"))) {
7554 filename += strlen("/proc/");
7555 if (!strncmp(filename, "self/", strlen("self/"))) {
7556 filename += strlen("self/");
7557 } else if (*filename >= '1' && *filename <= '9') {
7558 char myself[80];
7559 snprintf(myself, sizeof(myself), "%d/", getpid());
7560 if (!strncmp(filename, myself, strlen(myself))) {
7561 filename += strlen(myself);
7562 } else {
7563 return 0;
7565 } else {
7566 return 0;
7568 if (!strcmp(filename, entry)) {
7569 return 1;
7572 return 0;
7575 #if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN)
7576 static int is_proc(const char *filename, const char *entry)
7578 return strcmp(filename, entry) == 0;
7581 static int open_net_route(void *cpu_env, int fd)
7583 FILE *fp;
7584 char *line = NULL;
7585 size_t len = 0;
7586 ssize_t read;
7588 fp = fopen("/proc/net/route", "r");
7589 if (fp == NULL) {
7590 return -1;
7593 /* read header */
7595 read = getline(&line, &len, fp);
7596 dprintf(fd, "%s", line);
7598 /* read routes */
7600 while ((read = getline(&line, &len, fp)) != -1) {
7601 char iface[16];
7602 uint32_t dest, gw, mask;
7603 unsigned int flags, refcnt, use, metric, mtu, window, irtt;
7604 sscanf(line, "%s\t%08x\t%08x\t%04x\t%d\t%d\t%d\t%08x\t%d\t%u\t%u\n",
7605 iface, &dest, &gw, &flags, &refcnt, &use, &metric,
7606 &mask, &mtu, &window, &irtt);
7607 dprintf(fd, "%s\t%08x\t%08x\t%04x\t%d\t%d\t%d\t%08x\t%d\t%u\t%u\n",
7608 iface, tswap32(dest), tswap32(gw), flags, refcnt, use,
7609 metric, tswap32(mask), mtu, window, irtt);
7612 free(line);
7613 fclose(fp);
7615 return 0;
7617 #endif
7619 static int do_openat(void *cpu_env, int dirfd, const char *pathname, int flags, mode_t mode)
7621 struct fake_open {
7622 const char *filename;
7623 int (*fill)(void *cpu_env, int fd);
7624 int (*cmp)(const char *s1, const char *s2);
7626 const struct fake_open *fake_open;
7627 static const struct fake_open fakes[] = {
7628 { "maps", open_self_maps, is_proc_myself },
7629 { "stat", open_self_stat, is_proc_myself },
7630 { "auxv", open_self_auxv, is_proc_myself },
7631 { "cmdline", open_self_cmdline, is_proc_myself },
7632 #if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN)
7633 { "/proc/net/route", open_net_route, is_proc },
7634 #endif
7635 { NULL, NULL, NULL }
7638 if (is_proc_myself(pathname, "exe")) {
7639 int execfd = qemu_getauxval(AT_EXECFD);
7640 return execfd ? execfd : safe_openat(dirfd, exec_path, flags, mode);
7643 for (fake_open = fakes; fake_open->filename; fake_open++) {
7644 if (fake_open->cmp(pathname, fake_open->filename)) {
7645 break;
7649 if (fake_open->filename) {
7650 const char *tmpdir;
7651 char filename[PATH_MAX];
7652 int fd, r;
7654 /* create temporary file to map stat to */
7655 tmpdir = getenv("TMPDIR");
7656 if (!tmpdir)
7657 tmpdir = "/tmp";
7658 snprintf(filename, sizeof(filename), "%s/qemu-open.XXXXXX", tmpdir);
7659 fd = mkstemp(filename);
7660 if (fd < 0) {
7661 return fd;
7663 unlink(filename);
7665 if ((r = fake_open->fill(cpu_env, fd))) {
7666 int e = errno;
7667 close(fd);
7668 errno = e;
7669 return r;
7671 lseek(fd, 0, SEEK_SET);
7673 return fd;
7676 return safe_openat(dirfd, path(pathname), flags, mode);
7679 #define TIMER_MAGIC 0x0caf0000
7680 #define TIMER_MAGIC_MASK 0xffff0000
7682 /* Convert QEMU provided timer ID back to internal 16bit index format */
7683 static target_timer_t get_timer_id(abi_long arg)
7685 target_timer_t timerid = arg;
7687 if ((timerid & TIMER_MAGIC_MASK) != TIMER_MAGIC) {
7688 return -TARGET_EINVAL;
7691 timerid &= 0xffff;
7693 if (timerid >= ARRAY_SIZE(g_posix_timers)) {
7694 return -TARGET_EINVAL;
7697 return timerid;
7700 static abi_long swap_data_eventfd(void *buf, size_t len)
7702 uint64_t *counter = buf;
7703 int i;
7705 if (len < sizeof(uint64_t)) {
7706 return -EINVAL;
7709 for (i = 0; i < len; i += sizeof(uint64_t)) {
7710 *counter = tswap64(*counter);
7711 counter++;
7714 return len;
7717 static TargetFdTrans target_eventfd_trans = {
7718 .host_to_target_data = swap_data_eventfd,
7719 .target_to_host_data = swap_data_eventfd,
7722 #if (defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init)) || \
7723 (defined(CONFIG_INOTIFY1) && defined(TARGET_NR_inotify_init1) && \
7724 defined(__NR_inotify_init1))
7725 static abi_long host_to_target_data_inotify(void *buf, size_t len)
7727 struct inotify_event *ev;
7728 int i;
7729 uint32_t name_len;
7731 for (i = 0; i < len; i += sizeof(struct inotify_event) + name_len) {
7732 ev = (struct inotify_event *)((char *)buf + i);
7733 name_len = ev->len;
7735 ev->wd = tswap32(ev->wd);
7736 ev->mask = tswap32(ev->mask);
7737 ev->cookie = tswap32(ev->cookie);
7738 ev->len = tswap32(name_len);
7741 return len;
7744 static TargetFdTrans target_inotify_trans = {
7745 .host_to_target_data = host_to_target_data_inotify,
7747 #endif
7749 static int target_to_host_cpu_mask(unsigned long *host_mask,
7750 size_t host_size,
7751 abi_ulong target_addr,
7752 size_t target_size)
7754 unsigned target_bits = sizeof(abi_ulong) * 8;
7755 unsigned host_bits = sizeof(*host_mask) * 8;
7756 abi_ulong *target_mask;
7757 unsigned i, j;
7759 assert(host_size >= target_size);
7761 target_mask = lock_user(VERIFY_READ, target_addr, target_size, 1);
7762 if (!target_mask) {
7763 return -TARGET_EFAULT;
7765 memset(host_mask, 0, host_size);
7767 for (i = 0 ; i < target_size / sizeof(abi_ulong); i++) {
7768 unsigned bit = i * target_bits;
7769 abi_ulong val;
7771 __get_user(val, &target_mask[i]);
7772 for (j = 0; j < target_bits; j++, bit++) {
7773 if (val & (1UL << j)) {
7774 host_mask[bit / host_bits] |= 1UL << (bit % host_bits);
7779 unlock_user(target_mask, target_addr, 0);
7780 return 0;
7783 static int host_to_target_cpu_mask(const unsigned long *host_mask,
7784 size_t host_size,
7785 abi_ulong target_addr,
7786 size_t target_size)
7788 unsigned target_bits = sizeof(abi_ulong) * 8;
7789 unsigned host_bits = sizeof(*host_mask) * 8;
7790 abi_ulong *target_mask;
7791 unsigned i, j;
7793 assert(host_size >= target_size);
7795 target_mask = lock_user(VERIFY_WRITE, target_addr, target_size, 0);
7796 if (!target_mask) {
7797 return -TARGET_EFAULT;
7800 for (i = 0 ; i < target_size / sizeof(abi_ulong); i++) {
7801 unsigned bit = i * target_bits;
7802 abi_ulong val = 0;
7804 for (j = 0; j < target_bits; j++, bit++) {
7805 if (host_mask[bit / host_bits] & (1UL << (bit % host_bits))) {
7806 val |= 1UL << j;
7809 __put_user(val, &target_mask[i]);
7812 unlock_user(target_mask, target_addr, target_size);
7813 return 0;
7816 /* do_syscall() should always have a single exit point at the end so
7817 that actions, such as logging of syscall results, can be performed.
7818 All errnos that do_syscall() returns must be -TARGET_<errcode>. */
7819 abi_long do_syscall(void *cpu_env, int num, abi_long arg1,
7820 abi_long arg2, abi_long arg3, abi_long arg4,
7821 abi_long arg5, abi_long arg6, abi_long arg7,
7822 abi_long arg8)
7824 CPUState *cpu = ENV_GET_CPU(cpu_env);
7825 abi_long ret;
7826 struct stat st;
7827 struct statfs stfs;
7828 void *p;
7830 #if defined(DEBUG_ERESTARTSYS)
7831 /* Debug-only code for exercising the syscall-restart code paths
7832 * in the per-architecture cpu main loops: restart every syscall
7833 * the guest makes once before letting it through.
7836 static int flag;
7838 flag = !flag;
7839 if (flag) {
7840 return -TARGET_ERESTARTSYS;
7843 #endif
7845 #ifdef DEBUG
7846 gemu_log("syscall %d", num);
7847 #endif
7848 trace_guest_user_syscall(cpu, num, arg1, arg2, arg3, arg4, arg5, arg6, arg7, arg8);
7849 if(do_strace)
7850 print_syscall(num, arg1, arg2, arg3, arg4, arg5, arg6);
7852 switch(num) {
7853 case TARGET_NR_exit:
7854 /* In old applications this may be used to implement _exit(2).
7855 However in threaded applictions it is used for thread termination,
7856 and _exit_group is used for application termination.
7857 Do thread termination if we have more then one thread. */
7859 if (block_signals()) {
7860 ret = -TARGET_ERESTARTSYS;
7861 break;
7864 cpu_list_lock();
7866 if (CPU_NEXT(first_cpu)) {
7867 TaskState *ts;
7869 /* Remove the CPU from the list. */
7870 QTAILQ_REMOVE(&cpus, cpu, node);
7872 cpu_list_unlock();
7874 ts = cpu->opaque;
7875 if (ts->child_tidptr) {
7876 put_user_u32(0, ts->child_tidptr);
7877 sys_futex(g2h(ts->child_tidptr), FUTEX_WAKE, INT_MAX,
7878 NULL, NULL, 0);
7880 thread_cpu = NULL;
7881 object_unref(OBJECT(cpu));
7882 g_free(ts);
7883 rcu_unregister_thread();
7884 pthread_exit(NULL);
7887 cpu_list_unlock();
7888 #ifdef TARGET_GPROF
7889 _mcleanup();
7890 #endif
7891 gdb_exit(cpu_env, arg1);
7892 _exit(arg1);
7893 ret = 0; /* avoid warning */
7894 break;
7895 case TARGET_NR_read:
7896 if (arg3 == 0)
7897 ret = 0;
7898 else {
7899 if (!(p = lock_user(VERIFY_WRITE, arg2, arg3, 0)))
7900 goto efault;
7901 ret = get_errno(safe_read(arg1, p, arg3));
7902 if (ret >= 0 &&
7903 fd_trans_host_to_target_data(arg1)) {
7904 ret = fd_trans_host_to_target_data(arg1)(p, ret);
7906 unlock_user(p, arg2, ret);
7908 break;
7909 case TARGET_NR_write:
7910 if (!(p = lock_user(VERIFY_READ, arg2, arg3, 1)))
7911 goto efault;
7912 if (fd_trans_target_to_host_data(arg1)) {
7913 void *copy = g_malloc(arg3);
7914 memcpy(copy, p, arg3);
7915 ret = fd_trans_target_to_host_data(arg1)(copy, arg3);
7916 if (ret >= 0) {
7917 ret = get_errno(safe_write(arg1, copy, ret));
7919 g_free(copy);
7920 } else {
7921 ret = get_errno(safe_write(arg1, p, arg3));
7923 unlock_user(p, arg2, 0);
7924 break;
7925 #ifdef TARGET_NR_open
7926 case TARGET_NR_open:
7927 if (!(p = lock_user_string(arg1)))
7928 goto efault;
7929 ret = get_errno(do_openat(cpu_env, AT_FDCWD, p,
7930 target_to_host_bitmask(arg2, fcntl_flags_tbl),
7931 arg3));
7932 fd_trans_unregister(ret);
7933 unlock_user(p, arg1, 0);
7934 break;
7935 #endif
7936 case TARGET_NR_openat:
7937 if (!(p = lock_user_string(arg2)))
7938 goto efault;
7939 ret = get_errno(do_openat(cpu_env, arg1, p,
7940 target_to_host_bitmask(arg3, fcntl_flags_tbl),
7941 arg4));
7942 fd_trans_unregister(ret);
7943 unlock_user(p, arg2, 0);
7944 break;
7945 #if defined(TARGET_NR_name_to_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
7946 case TARGET_NR_name_to_handle_at:
7947 ret = do_name_to_handle_at(arg1, arg2, arg3, arg4, arg5);
7948 break;
7949 #endif
7950 #if defined(TARGET_NR_open_by_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
7951 case TARGET_NR_open_by_handle_at:
7952 ret = do_open_by_handle_at(arg1, arg2, arg3);
7953 fd_trans_unregister(ret);
7954 break;
7955 #endif
7956 case TARGET_NR_close:
7957 fd_trans_unregister(arg1);
7958 ret = get_errno(close(arg1));
7959 break;
7960 case TARGET_NR_brk:
7961 ret = do_brk(arg1);
7962 break;
7963 #ifdef TARGET_NR_fork
7964 case TARGET_NR_fork:
7965 ret = get_errno(do_fork(cpu_env, TARGET_SIGCHLD, 0, 0, 0, 0));
7966 break;
7967 #endif
7968 #ifdef TARGET_NR_waitpid
7969 case TARGET_NR_waitpid:
7971 int status;
7972 ret = get_errno(safe_wait4(arg1, &status, arg3, 0));
7973 if (!is_error(ret) && arg2 && ret
7974 && put_user_s32(host_to_target_waitstatus(status), arg2))
7975 goto efault;
7977 break;
7978 #endif
7979 #ifdef TARGET_NR_waitid
7980 case TARGET_NR_waitid:
7982 siginfo_t info;
7983 info.si_pid = 0;
7984 ret = get_errno(safe_waitid(arg1, arg2, &info, arg4, NULL));
7985 if (!is_error(ret) && arg3 && info.si_pid != 0) {
7986 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_siginfo_t), 0)))
7987 goto efault;
7988 host_to_target_siginfo(p, &info);
7989 unlock_user(p, arg3, sizeof(target_siginfo_t));
7992 break;
7993 #endif
7994 #ifdef TARGET_NR_creat /* not on alpha */
7995 case TARGET_NR_creat:
7996 if (!(p = lock_user_string(arg1)))
7997 goto efault;
7998 ret = get_errno(creat(p, arg2));
7999 fd_trans_unregister(ret);
8000 unlock_user(p, arg1, 0);
8001 break;
8002 #endif
8003 #ifdef TARGET_NR_link
8004 case TARGET_NR_link:
8006 void * p2;
8007 p = lock_user_string(arg1);
8008 p2 = lock_user_string(arg2);
8009 if (!p || !p2)
8010 ret = -TARGET_EFAULT;
8011 else
8012 ret = get_errno(link(p, p2));
8013 unlock_user(p2, arg2, 0);
8014 unlock_user(p, arg1, 0);
8016 break;
8017 #endif
8018 #if defined(TARGET_NR_linkat)
8019 case TARGET_NR_linkat:
8021 void * p2 = NULL;
8022 if (!arg2 || !arg4)
8023 goto efault;
8024 p = lock_user_string(arg2);
8025 p2 = lock_user_string(arg4);
8026 if (!p || !p2)
8027 ret = -TARGET_EFAULT;
8028 else
8029 ret = get_errno(linkat(arg1, p, arg3, p2, arg5));
8030 unlock_user(p, arg2, 0);
8031 unlock_user(p2, arg4, 0);
8033 break;
8034 #endif
8035 #ifdef TARGET_NR_unlink
8036 case TARGET_NR_unlink:
8037 if (!(p = lock_user_string(arg1)))
8038 goto efault;
8039 ret = get_errno(unlink(p));
8040 unlock_user(p, arg1, 0);
8041 break;
8042 #endif
8043 #if defined(TARGET_NR_unlinkat)
8044 case TARGET_NR_unlinkat:
8045 if (!(p = lock_user_string(arg2)))
8046 goto efault;
8047 ret = get_errno(unlinkat(arg1, p, arg3));
8048 unlock_user(p, arg2, 0);
8049 break;
8050 #endif
8051 case TARGET_NR_execve:
8053 char **argp, **envp;
8054 int argc, envc;
8055 abi_ulong gp;
8056 abi_ulong guest_argp;
8057 abi_ulong guest_envp;
8058 abi_ulong addr;
8059 char **q;
8060 int total_size = 0;
8062 argc = 0;
8063 guest_argp = arg2;
8064 for (gp = guest_argp; gp; gp += sizeof(abi_ulong)) {
8065 if (get_user_ual(addr, gp))
8066 goto efault;
8067 if (!addr)
8068 break;
8069 argc++;
8071 envc = 0;
8072 guest_envp = arg3;
8073 for (gp = guest_envp; gp; gp += sizeof(abi_ulong)) {
8074 if (get_user_ual(addr, gp))
8075 goto efault;
8076 if (!addr)
8077 break;
8078 envc++;
8081 argp = g_new0(char *, argc + 1);
8082 envp = g_new0(char *, envc + 1);
8084 for (gp = guest_argp, q = argp; gp;
8085 gp += sizeof(abi_ulong), q++) {
8086 if (get_user_ual(addr, gp))
8087 goto execve_efault;
8088 if (!addr)
8089 break;
8090 if (!(*q = lock_user_string(addr)))
8091 goto execve_efault;
8092 total_size += strlen(*q) + 1;
8094 *q = NULL;
8096 for (gp = guest_envp, q = envp; gp;
8097 gp += sizeof(abi_ulong), q++) {
8098 if (get_user_ual(addr, gp))
8099 goto execve_efault;
8100 if (!addr)
8101 break;
8102 if (!(*q = lock_user_string(addr)))
8103 goto execve_efault;
8104 total_size += strlen(*q) + 1;
8106 *q = NULL;
8108 if (!(p = lock_user_string(arg1)))
8109 goto execve_efault;
8110 /* Although execve() is not an interruptible syscall it is
8111 * a special case where we must use the safe_syscall wrapper:
8112 * if we allow a signal to happen before we make the host
8113 * syscall then we will 'lose' it, because at the point of
8114 * execve the process leaves QEMU's control. So we use the
8115 * safe syscall wrapper to ensure that we either take the
8116 * signal as a guest signal, or else it does not happen
8117 * before the execve completes and makes it the other
8118 * program's problem.
8120 ret = get_errno(safe_execve(p, argp, envp));
8121 unlock_user(p, arg1, 0);
8123 goto execve_end;
8125 execve_efault:
8126 ret = -TARGET_EFAULT;
8128 execve_end:
8129 for (gp = guest_argp, q = argp; *q;
8130 gp += sizeof(abi_ulong), q++) {
8131 if (get_user_ual(addr, gp)
8132 || !addr)
8133 break;
8134 unlock_user(*q, addr, 0);
8136 for (gp = guest_envp, q = envp; *q;
8137 gp += sizeof(abi_ulong), q++) {
8138 if (get_user_ual(addr, gp)
8139 || !addr)
8140 break;
8141 unlock_user(*q, addr, 0);
8144 g_free(argp);
8145 g_free(envp);
8147 break;
8148 case TARGET_NR_chdir:
8149 if (!(p = lock_user_string(arg1)))
8150 goto efault;
8151 ret = get_errno(chdir(p));
8152 unlock_user(p, arg1, 0);
8153 break;
8154 #ifdef TARGET_NR_time
8155 case TARGET_NR_time:
8157 time_t host_time;
8158 ret = get_errno(time(&host_time));
8159 if (!is_error(ret)
8160 && arg1
8161 && put_user_sal(host_time, arg1))
8162 goto efault;
8164 break;
8165 #endif
8166 #ifdef TARGET_NR_mknod
8167 case TARGET_NR_mknod:
8168 if (!(p = lock_user_string(arg1)))
8169 goto efault;
8170 ret = get_errno(mknod(p, arg2, arg3));
8171 unlock_user(p, arg1, 0);
8172 break;
8173 #endif
8174 #if defined(TARGET_NR_mknodat)
8175 case TARGET_NR_mknodat:
8176 if (!(p = lock_user_string(arg2)))
8177 goto efault;
8178 ret = get_errno(mknodat(arg1, p, arg3, arg4));
8179 unlock_user(p, arg2, 0);
8180 break;
8181 #endif
8182 #ifdef TARGET_NR_chmod
8183 case TARGET_NR_chmod:
8184 if (!(p = lock_user_string(arg1)))
8185 goto efault;
8186 ret = get_errno(chmod(p, arg2));
8187 unlock_user(p, arg1, 0);
8188 break;
8189 #endif
8190 #ifdef TARGET_NR_break
8191 case TARGET_NR_break:
8192 goto unimplemented;
8193 #endif
8194 #ifdef TARGET_NR_oldstat
8195 case TARGET_NR_oldstat:
8196 goto unimplemented;
8197 #endif
8198 case TARGET_NR_lseek:
8199 ret = get_errno(lseek(arg1, arg2, arg3));
8200 break;
8201 #if defined(TARGET_NR_getxpid) && defined(TARGET_ALPHA)
8202 /* Alpha specific */
8203 case TARGET_NR_getxpid:
8204 ((CPUAlphaState *)cpu_env)->ir[IR_A4] = getppid();
8205 ret = get_errno(getpid());
8206 break;
8207 #endif
8208 #ifdef TARGET_NR_getpid
8209 case TARGET_NR_getpid:
8210 ret = get_errno(getpid());
8211 break;
8212 #endif
8213 case TARGET_NR_mount:
8215 /* need to look at the data field */
8216 void *p2, *p3;
8218 if (arg1) {
8219 p = lock_user_string(arg1);
8220 if (!p) {
8221 goto efault;
8223 } else {
8224 p = NULL;
8227 p2 = lock_user_string(arg2);
8228 if (!p2) {
8229 if (arg1) {
8230 unlock_user(p, arg1, 0);
8232 goto efault;
8235 if (arg3) {
8236 p3 = lock_user_string(arg3);
8237 if (!p3) {
8238 if (arg1) {
8239 unlock_user(p, arg1, 0);
8241 unlock_user(p2, arg2, 0);
8242 goto efault;
8244 } else {
8245 p3 = NULL;
8248 /* FIXME - arg5 should be locked, but it isn't clear how to
8249 * do that since it's not guaranteed to be a NULL-terminated
8250 * string.
8252 if (!arg5) {
8253 ret = mount(p, p2, p3, (unsigned long)arg4, NULL);
8254 } else {
8255 ret = mount(p, p2, p3, (unsigned long)arg4, g2h(arg5));
8257 ret = get_errno(ret);
8259 if (arg1) {
8260 unlock_user(p, arg1, 0);
8262 unlock_user(p2, arg2, 0);
8263 if (arg3) {
8264 unlock_user(p3, arg3, 0);
8267 break;
8268 #ifdef TARGET_NR_umount
8269 case TARGET_NR_umount:
8270 if (!(p = lock_user_string(arg1)))
8271 goto efault;
8272 ret = get_errno(umount(p));
8273 unlock_user(p, arg1, 0);
8274 break;
8275 #endif
8276 #ifdef TARGET_NR_stime /* not on alpha */
8277 case TARGET_NR_stime:
8279 time_t host_time;
8280 if (get_user_sal(host_time, arg1))
8281 goto efault;
8282 ret = get_errno(stime(&host_time));
8284 break;
8285 #endif
8286 case TARGET_NR_ptrace:
8287 goto unimplemented;
8288 #ifdef TARGET_NR_alarm /* not on alpha */
8289 case TARGET_NR_alarm:
8290 ret = alarm(arg1);
8291 break;
8292 #endif
8293 #ifdef TARGET_NR_oldfstat
8294 case TARGET_NR_oldfstat:
8295 goto unimplemented;
8296 #endif
8297 #ifdef TARGET_NR_pause /* not on alpha */
8298 case TARGET_NR_pause:
8299 if (!block_signals()) {
8300 sigsuspend(&((TaskState *)cpu->opaque)->signal_mask);
8302 ret = -TARGET_EINTR;
8303 break;
8304 #endif
8305 #ifdef TARGET_NR_utime
8306 case TARGET_NR_utime:
8308 struct utimbuf tbuf, *host_tbuf;
8309 struct target_utimbuf *target_tbuf;
8310 if (arg2) {
8311 if (!lock_user_struct(VERIFY_READ, target_tbuf, arg2, 1))
8312 goto efault;
8313 tbuf.actime = tswapal(target_tbuf->actime);
8314 tbuf.modtime = tswapal(target_tbuf->modtime);
8315 unlock_user_struct(target_tbuf, arg2, 0);
8316 host_tbuf = &tbuf;
8317 } else {
8318 host_tbuf = NULL;
8320 if (!(p = lock_user_string(arg1)))
8321 goto efault;
8322 ret = get_errno(utime(p, host_tbuf));
8323 unlock_user(p, arg1, 0);
8325 break;
8326 #endif
8327 #ifdef TARGET_NR_utimes
8328 case TARGET_NR_utimes:
8330 struct timeval *tvp, tv[2];
8331 if (arg2) {
8332 if (copy_from_user_timeval(&tv[0], arg2)
8333 || copy_from_user_timeval(&tv[1],
8334 arg2 + sizeof(struct target_timeval)))
8335 goto efault;
8336 tvp = tv;
8337 } else {
8338 tvp = NULL;
8340 if (!(p = lock_user_string(arg1)))
8341 goto efault;
8342 ret = get_errno(utimes(p, tvp));
8343 unlock_user(p, arg1, 0);
8345 break;
8346 #endif
8347 #if defined(TARGET_NR_futimesat)
8348 case TARGET_NR_futimesat:
8350 struct timeval *tvp, tv[2];
8351 if (arg3) {
8352 if (copy_from_user_timeval(&tv[0], arg3)
8353 || copy_from_user_timeval(&tv[1],
8354 arg3 + sizeof(struct target_timeval)))
8355 goto efault;
8356 tvp = tv;
8357 } else {
8358 tvp = NULL;
8360 if (!(p = lock_user_string(arg2)))
8361 goto efault;
8362 ret = get_errno(futimesat(arg1, path(p), tvp));
8363 unlock_user(p, arg2, 0);
8365 break;
8366 #endif
8367 #ifdef TARGET_NR_stty
8368 case TARGET_NR_stty:
8369 goto unimplemented;
8370 #endif
8371 #ifdef TARGET_NR_gtty
8372 case TARGET_NR_gtty:
8373 goto unimplemented;
8374 #endif
8375 #ifdef TARGET_NR_access
8376 case TARGET_NR_access:
8377 if (!(p = lock_user_string(arg1)))
8378 goto efault;
8379 ret = get_errno(access(path(p), arg2));
8380 unlock_user(p, arg1, 0);
8381 break;
8382 #endif
8383 #if defined(TARGET_NR_faccessat) && defined(__NR_faccessat)
8384 case TARGET_NR_faccessat:
8385 if (!(p = lock_user_string(arg2)))
8386 goto efault;
8387 ret = get_errno(faccessat(arg1, p, arg3, 0));
8388 unlock_user(p, arg2, 0);
8389 break;
8390 #endif
8391 #ifdef TARGET_NR_nice /* not on alpha */
8392 case TARGET_NR_nice:
8393 ret = get_errno(nice(arg1));
8394 break;
8395 #endif
8396 #ifdef TARGET_NR_ftime
8397 case TARGET_NR_ftime:
8398 goto unimplemented;
8399 #endif
8400 case TARGET_NR_sync:
8401 sync();
8402 ret = 0;
8403 break;
8404 #if defined(TARGET_NR_syncfs) && defined(CONFIG_SYNCFS)
8405 case TARGET_NR_syncfs:
8406 ret = get_errno(syncfs(arg1));
8407 break;
8408 #endif
8409 case TARGET_NR_kill:
8410 ret = get_errno(safe_kill(arg1, target_to_host_signal(arg2)));
8411 break;
8412 #ifdef TARGET_NR_rename
8413 case TARGET_NR_rename:
8415 void *p2;
8416 p = lock_user_string(arg1);
8417 p2 = lock_user_string(arg2);
8418 if (!p || !p2)
8419 ret = -TARGET_EFAULT;
8420 else
8421 ret = get_errno(rename(p, p2));
8422 unlock_user(p2, arg2, 0);
8423 unlock_user(p, arg1, 0);
8425 break;
8426 #endif
8427 #if defined(TARGET_NR_renameat)
8428 case TARGET_NR_renameat:
8430 void *p2;
8431 p = lock_user_string(arg2);
8432 p2 = lock_user_string(arg4);
8433 if (!p || !p2)
8434 ret = -TARGET_EFAULT;
8435 else
8436 ret = get_errno(renameat(arg1, p, arg3, p2));
8437 unlock_user(p2, arg4, 0);
8438 unlock_user(p, arg2, 0);
8440 break;
8441 #endif
8442 #if defined(TARGET_NR_renameat2)
8443 case TARGET_NR_renameat2:
8445 void *p2;
8446 p = lock_user_string(arg2);
8447 p2 = lock_user_string(arg4);
8448 if (!p || !p2) {
8449 ret = -TARGET_EFAULT;
8450 } else {
8451 ret = get_errno(sys_renameat2(arg1, p, arg3, p2, arg5));
8453 unlock_user(p2, arg4, 0);
8454 unlock_user(p, arg2, 0);
8456 break;
8457 #endif
8458 #ifdef TARGET_NR_mkdir
8459 case TARGET_NR_mkdir:
8460 if (!(p = lock_user_string(arg1)))
8461 goto efault;
8462 ret = get_errno(mkdir(p, arg2));
8463 unlock_user(p, arg1, 0);
8464 break;
8465 #endif
8466 #if defined(TARGET_NR_mkdirat)
8467 case TARGET_NR_mkdirat:
8468 if (!(p = lock_user_string(arg2)))
8469 goto efault;
8470 ret = get_errno(mkdirat(arg1, p, arg3));
8471 unlock_user(p, arg2, 0);
8472 break;
8473 #endif
8474 #ifdef TARGET_NR_rmdir
8475 case TARGET_NR_rmdir:
8476 if (!(p = lock_user_string(arg1)))
8477 goto efault;
8478 ret = get_errno(rmdir(p));
8479 unlock_user(p, arg1, 0);
8480 break;
8481 #endif
8482 case TARGET_NR_dup:
8483 ret = get_errno(dup(arg1));
8484 if (ret >= 0) {
8485 fd_trans_dup(arg1, ret);
8487 break;
8488 #ifdef TARGET_NR_pipe
8489 case TARGET_NR_pipe:
8490 ret = do_pipe(cpu_env, arg1, 0, 0);
8491 break;
8492 #endif
8493 #ifdef TARGET_NR_pipe2
8494 case TARGET_NR_pipe2:
8495 ret = do_pipe(cpu_env, arg1,
8496 target_to_host_bitmask(arg2, fcntl_flags_tbl), 1);
8497 break;
8498 #endif
8499 case TARGET_NR_times:
8501 struct target_tms *tmsp;
8502 struct tms tms;
8503 ret = get_errno(times(&tms));
8504 if (arg1) {
8505 tmsp = lock_user(VERIFY_WRITE, arg1, sizeof(struct target_tms), 0);
8506 if (!tmsp)
8507 goto efault;
8508 tmsp->tms_utime = tswapal(host_to_target_clock_t(tms.tms_utime));
8509 tmsp->tms_stime = tswapal(host_to_target_clock_t(tms.tms_stime));
8510 tmsp->tms_cutime = tswapal(host_to_target_clock_t(tms.tms_cutime));
8511 tmsp->tms_cstime = tswapal(host_to_target_clock_t(tms.tms_cstime));
8513 if (!is_error(ret))
8514 ret = host_to_target_clock_t(ret);
8516 break;
8517 #ifdef TARGET_NR_prof
8518 case TARGET_NR_prof:
8519 goto unimplemented;
8520 #endif
8521 #ifdef TARGET_NR_signal
8522 case TARGET_NR_signal:
8523 goto unimplemented;
8524 #endif
8525 case TARGET_NR_acct:
8526 if (arg1 == 0) {
8527 ret = get_errno(acct(NULL));
8528 } else {
8529 if (!(p = lock_user_string(arg1)))
8530 goto efault;
8531 ret = get_errno(acct(path(p)));
8532 unlock_user(p, arg1, 0);
8534 break;
8535 #ifdef TARGET_NR_umount2
8536 case TARGET_NR_umount2:
8537 if (!(p = lock_user_string(arg1)))
8538 goto efault;
8539 ret = get_errno(umount2(p, arg2));
8540 unlock_user(p, arg1, 0);
8541 break;
8542 #endif
8543 #ifdef TARGET_NR_lock
8544 case TARGET_NR_lock:
8545 goto unimplemented;
8546 #endif
8547 case TARGET_NR_ioctl:
8548 ret = do_ioctl(arg1, arg2, arg3);
8549 break;
8550 case TARGET_NR_fcntl:
8551 ret = do_fcntl(arg1, arg2, arg3);
8552 break;
8553 #ifdef TARGET_NR_mpx
8554 case TARGET_NR_mpx:
8555 goto unimplemented;
8556 #endif
8557 case TARGET_NR_setpgid:
8558 ret = get_errno(setpgid(arg1, arg2));
8559 break;
8560 #ifdef TARGET_NR_ulimit
8561 case TARGET_NR_ulimit:
8562 goto unimplemented;
8563 #endif
8564 #ifdef TARGET_NR_oldolduname
8565 case TARGET_NR_oldolduname:
8566 goto unimplemented;
8567 #endif
8568 case TARGET_NR_umask:
8569 ret = get_errno(umask(arg1));
8570 break;
8571 case TARGET_NR_chroot:
8572 if (!(p = lock_user_string(arg1)))
8573 goto efault;
8574 ret = get_errno(chroot(p));
8575 unlock_user(p, arg1, 0);
8576 break;
8577 #ifdef TARGET_NR_ustat
8578 case TARGET_NR_ustat:
8579 goto unimplemented;
8580 #endif
8581 #ifdef TARGET_NR_dup2
8582 case TARGET_NR_dup2:
8583 ret = get_errno(dup2(arg1, arg2));
8584 if (ret >= 0) {
8585 fd_trans_dup(arg1, arg2);
8587 break;
8588 #endif
8589 #if defined(CONFIG_DUP3) && defined(TARGET_NR_dup3)
8590 case TARGET_NR_dup3:
8592 int host_flags;
8594 if ((arg3 & ~TARGET_O_CLOEXEC) != 0) {
8595 return -EINVAL;
8597 host_flags = target_to_host_bitmask(arg3, fcntl_flags_tbl);
8598 ret = get_errno(dup3(arg1, arg2, host_flags));
8599 if (ret >= 0) {
8600 fd_trans_dup(arg1, arg2);
8602 break;
8604 #endif
8605 #ifdef TARGET_NR_getppid /* not on alpha */
8606 case TARGET_NR_getppid:
8607 ret = get_errno(getppid());
8608 break;
8609 #endif
8610 #ifdef TARGET_NR_getpgrp
8611 case TARGET_NR_getpgrp:
8612 ret = get_errno(getpgrp());
8613 break;
8614 #endif
8615 case TARGET_NR_setsid:
8616 ret = get_errno(setsid());
8617 break;
8618 #ifdef TARGET_NR_sigaction
8619 case TARGET_NR_sigaction:
8621 #if defined(TARGET_ALPHA)
8622 struct target_sigaction act, oact, *pact = 0;
8623 struct target_old_sigaction *old_act;
8624 if (arg2) {
8625 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
8626 goto efault;
8627 act._sa_handler = old_act->_sa_handler;
8628 target_siginitset(&act.sa_mask, old_act->sa_mask);
8629 act.sa_flags = old_act->sa_flags;
8630 act.sa_restorer = 0;
8631 unlock_user_struct(old_act, arg2, 0);
8632 pact = &act;
8634 ret = get_errno(do_sigaction(arg1, pact, &oact));
8635 if (!is_error(ret) && arg3) {
8636 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
8637 goto efault;
8638 old_act->_sa_handler = oact._sa_handler;
8639 old_act->sa_mask = oact.sa_mask.sig[0];
8640 old_act->sa_flags = oact.sa_flags;
8641 unlock_user_struct(old_act, arg3, 1);
8643 #elif defined(TARGET_MIPS)
8644 struct target_sigaction act, oact, *pact, *old_act;
8646 if (arg2) {
8647 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
8648 goto efault;
8649 act._sa_handler = old_act->_sa_handler;
8650 target_siginitset(&act.sa_mask, old_act->sa_mask.sig[0]);
8651 act.sa_flags = old_act->sa_flags;
8652 unlock_user_struct(old_act, arg2, 0);
8653 pact = &act;
8654 } else {
8655 pact = NULL;
8658 ret = get_errno(do_sigaction(arg1, pact, &oact));
8660 if (!is_error(ret) && arg3) {
8661 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
8662 goto efault;
8663 old_act->_sa_handler = oact._sa_handler;
8664 old_act->sa_flags = oact.sa_flags;
8665 old_act->sa_mask.sig[0] = oact.sa_mask.sig[0];
8666 old_act->sa_mask.sig[1] = 0;
8667 old_act->sa_mask.sig[2] = 0;
8668 old_act->sa_mask.sig[3] = 0;
8669 unlock_user_struct(old_act, arg3, 1);
8671 #else
8672 struct target_old_sigaction *old_act;
8673 struct target_sigaction act, oact, *pact;
8674 if (arg2) {
8675 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
8676 goto efault;
8677 act._sa_handler = old_act->_sa_handler;
8678 target_siginitset(&act.sa_mask, old_act->sa_mask);
8679 act.sa_flags = old_act->sa_flags;
8680 act.sa_restorer = old_act->sa_restorer;
8681 unlock_user_struct(old_act, arg2, 0);
8682 pact = &act;
8683 } else {
8684 pact = NULL;
8686 ret = get_errno(do_sigaction(arg1, pact, &oact));
8687 if (!is_error(ret) && arg3) {
8688 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
8689 goto efault;
8690 old_act->_sa_handler = oact._sa_handler;
8691 old_act->sa_mask = oact.sa_mask.sig[0];
8692 old_act->sa_flags = oact.sa_flags;
8693 old_act->sa_restorer = oact.sa_restorer;
8694 unlock_user_struct(old_act, arg3, 1);
8696 #endif
8698 break;
8699 #endif
8700 case TARGET_NR_rt_sigaction:
8702 #if defined(TARGET_ALPHA)
8703 /* For Alpha and SPARC this is a 5 argument syscall, with
8704 * a 'restorer' parameter which must be copied into the
8705 * sa_restorer field of the sigaction struct.
8706 * For Alpha that 'restorer' is arg5; for SPARC it is arg4,
8707 * and arg5 is the sigsetsize.
8708 * Alpha also has a separate rt_sigaction struct that it uses
8709 * here; SPARC uses the usual sigaction struct.
8711 struct target_rt_sigaction *rt_act;
8712 struct target_sigaction act, oact, *pact = 0;
8714 if (arg4 != sizeof(target_sigset_t)) {
8715 ret = -TARGET_EINVAL;
8716 break;
8718 if (arg2) {
8719 if (!lock_user_struct(VERIFY_READ, rt_act, arg2, 1))
8720 goto efault;
8721 act._sa_handler = rt_act->_sa_handler;
8722 act.sa_mask = rt_act->sa_mask;
8723 act.sa_flags = rt_act->sa_flags;
8724 act.sa_restorer = arg5;
8725 unlock_user_struct(rt_act, arg2, 0);
8726 pact = &act;
8728 ret = get_errno(do_sigaction(arg1, pact, &oact));
8729 if (!is_error(ret) && arg3) {
8730 if (!lock_user_struct(VERIFY_WRITE, rt_act, arg3, 0))
8731 goto efault;
8732 rt_act->_sa_handler = oact._sa_handler;
8733 rt_act->sa_mask = oact.sa_mask;
8734 rt_act->sa_flags = oact.sa_flags;
8735 unlock_user_struct(rt_act, arg3, 1);
8737 #else
8738 #ifdef TARGET_SPARC
8739 target_ulong restorer = arg4;
8740 target_ulong sigsetsize = arg5;
8741 #else
8742 target_ulong sigsetsize = arg4;
8743 #endif
8744 struct target_sigaction *act;
8745 struct target_sigaction *oact;
8747 if (sigsetsize != sizeof(target_sigset_t)) {
8748 ret = -TARGET_EINVAL;
8749 break;
8751 if (arg2) {
8752 if (!lock_user_struct(VERIFY_READ, act, arg2, 1)) {
8753 goto efault;
8755 #ifdef TARGET_SPARC
8756 act->sa_restorer = restorer;
8757 #endif
8758 } else {
8759 act = NULL;
8761 if (arg3) {
8762 if (!lock_user_struct(VERIFY_WRITE, oact, arg3, 0)) {
8763 ret = -TARGET_EFAULT;
8764 goto rt_sigaction_fail;
8766 } else
8767 oact = NULL;
8768 ret = get_errno(do_sigaction(arg1, act, oact));
8769 rt_sigaction_fail:
8770 if (act)
8771 unlock_user_struct(act, arg2, 0);
8772 if (oact)
8773 unlock_user_struct(oact, arg3, 1);
8774 #endif
8776 break;
8777 #ifdef TARGET_NR_sgetmask /* not on alpha */
8778 case TARGET_NR_sgetmask:
8780 sigset_t cur_set;
8781 abi_ulong target_set;
8782 ret = do_sigprocmask(0, NULL, &cur_set);
8783 if (!ret) {
8784 host_to_target_old_sigset(&target_set, &cur_set);
8785 ret = target_set;
8788 break;
8789 #endif
8790 #ifdef TARGET_NR_ssetmask /* not on alpha */
8791 case TARGET_NR_ssetmask:
8793 sigset_t set, oset;
8794 abi_ulong target_set = arg1;
8795 target_to_host_old_sigset(&set, &target_set);
8796 ret = do_sigprocmask(SIG_SETMASK, &set, &oset);
8797 if (!ret) {
8798 host_to_target_old_sigset(&target_set, &oset);
8799 ret = target_set;
8802 break;
8803 #endif
8804 #ifdef TARGET_NR_sigprocmask
8805 case TARGET_NR_sigprocmask:
8807 #if defined(TARGET_ALPHA)
8808 sigset_t set, oldset;
8809 abi_ulong mask;
8810 int how;
8812 switch (arg1) {
8813 case TARGET_SIG_BLOCK:
8814 how = SIG_BLOCK;
8815 break;
8816 case TARGET_SIG_UNBLOCK:
8817 how = SIG_UNBLOCK;
8818 break;
8819 case TARGET_SIG_SETMASK:
8820 how = SIG_SETMASK;
8821 break;
8822 default:
8823 ret = -TARGET_EINVAL;
8824 goto fail;
8826 mask = arg2;
8827 target_to_host_old_sigset(&set, &mask);
8829 ret = do_sigprocmask(how, &set, &oldset);
8830 if (!is_error(ret)) {
8831 host_to_target_old_sigset(&mask, &oldset);
8832 ret = mask;
8833 ((CPUAlphaState *)cpu_env)->ir[IR_V0] = 0; /* force no error */
8835 #else
8836 sigset_t set, oldset, *set_ptr;
8837 int how;
8839 if (arg2) {
8840 switch (arg1) {
8841 case TARGET_SIG_BLOCK:
8842 how = SIG_BLOCK;
8843 break;
8844 case TARGET_SIG_UNBLOCK:
8845 how = SIG_UNBLOCK;
8846 break;
8847 case TARGET_SIG_SETMASK:
8848 how = SIG_SETMASK;
8849 break;
8850 default:
8851 ret = -TARGET_EINVAL;
8852 goto fail;
8854 if (!(p = lock_user(VERIFY_READ, arg2, sizeof(target_sigset_t), 1)))
8855 goto efault;
8856 target_to_host_old_sigset(&set, p);
8857 unlock_user(p, arg2, 0);
8858 set_ptr = &set;
8859 } else {
8860 how = 0;
8861 set_ptr = NULL;
8863 ret = do_sigprocmask(how, set_ptr, &oldset);
8864 if (!is_error(ret) && arg3) {
8865 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_sigset_t), 0)))
8866 goto efault;
8867 host_to_target_old_sigset(p, &oldset);
8868 unlock_user(p, arg3, sizeof(target_sigset_t));
8870 #endif
8872 break;
8873 #endif
8874 case TARGET_NR_rt_sigprocmask:
8876 int how = arg1;
8877 sigset_t set, oldset, *set_ptr;
8879 if (arg4 != sizeof(target_sigset_t)) {
8880 ret = -TARGET_EINVAL;
8881 break;
8884 if (arg2) {
8885 switch(how) {
8886 case TARGET_SIG_BLOCK:
8887 how = SIG_BLOCK;
8888 break;
8889 case TARGET_SIG_UNBLOCK:
8890 how = SIG_UNBLOCK;
8891 break;
8892 case TARGET_SIG_SETMASK:
8893 how = SIG_SETMASK;
8894 break;
8895 default:
8896 ret = -TARGET_EINVAL;
8897 goto fail;
8899 if (!(p = lock_user(VERIFY_READ, arg2, sizeof(target_sigset_t), 1)))
8900 goto efault;
8901 target_to_host_sigset(&set, p);
8902 unlock_user(p, arg2, 0);
8903 set_ptr = &set;
8904 } else {
8905 how = 0;
8906 set_ptr = NULL;
8908 ret = do_sigprocmask(how, set_ptr, &oldset);
8909 if (!is_error(ret) && arg3) {
8910 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_sigset_t), 0)))
8911 goto efault;
8912 host_to_target_sigset(p, &oldset);
8913 unlock_user(p, arg3, sizeof(target_sigset_t));
8916 break;
8917 #ifdef TARGET_NR_sigpending
8918 case TARGET_NR_sigpending:
8920 sigset_t set;
8921 ret = get_errno(sigpending(&set));
8922 if (!is_error(ret)) {
8923 if (!(p = lock_user(VERIFY_WRITE, arg1, sizeof(target_sigset_t), 0)))
8924 goto efault;
8925 host_to_target_old_sigset(p, &set);
8926 unlock_user(p, arg1, sizeof(target_sigset_t));
8929 break;
8930 #endif
8931 case TARGET_NR_rt_sigpending:
8933 sigset_t set;
8935 /* Yes, this check is >, not != like most. We follow the kernel's
8936 * logic and it does it like this because it implements
8937 * NR_sigpending through the same code path, and in that case
8938 * the old_sigset_t is smaller in size.
8940 if (arg2 > sizeof(target_sigset_t)) {
8941 ret = -TARGET_EINVAL;
8942 break;
8945 ret = get_errno(sigpending(&set));
8946 if (!is_error(ret)) {
8947 if (!(p = lock_user(VERIFY_WRITE, arg1, sizeof(target_sigset_t), 0)))
8948 goto efault;
8949 host_to_target_sigset(p, &set);
8950 unlock_user(p, arg1, sizeof(target_sigset_t));
8953 break;
8954 #ifdef TARGET_NR_sigsuspend
8955 case TARGET_NR_sigsuspend:
8957 TaskState *ts = cpu->opaque;
8958 #if defined(TARGET_ALPHA)
8959 abi_ulong mask = arg1;
8960 target_to_host_old_sigset(&ts->sigsuspend_mask, &mask);
8961 #else
8962 if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
8963 goto efault;
8964 target_to_host_old_sigset(&ts->sigsuspend_mask, p);
8965 unlock_user(p, arg1, 0);
8966 #endif
8967 ret = get_errno(safe_rt_sigsuspend(&ts->sigsuspend_mask,
8968 SIGSET_T_SIZE));
8969 if (ret != -TARGET_ERESTARTSYS) {
8970 ts->in_sigsuspend = 1;
8973 break;
8974 #endif
8975 case TARGET_NR_rt_sigsuspend:
8977 TaskState *ts = cpu->opaque;
8979 if (arg2 != sizeof(target_sigset_t)) {
8980 ret = -TARGET_EINVAL;
8981 break;
8983 if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
8984 goto efault;
8985 target_to_host_sigset(&ts->sigsuspend_mask, p);
8986 unlock_user(p, arg1, 0);
8987 ret = get_errno(safe_rt_sigsuspend(&ts->sigsuspend_mask,
8988 SIGSET_T_SIZE));
8989 if (ret != -TARGET_ERESTARTSYS) {
8990 ts->in_sigsuspend = 1;
8993 break;
8994 case TARGET_NR_rt_sigtimedwait:
8996 sigset_t set;
8997 struct timespec uts, *puts;
8998 siginfo_t uinfo;
9000 if (arg4 != sizeof(target_sigset_t)) {
9001 ret = -TARGET_EINVAL;
9002 break;
9005 if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
9006 goto efault;
9007 target_to_host_sigset(&set, p);
9008 unlock_user(p, arg1, 0);
9009 if (arg3) {
9010 puts = &uts;
9011 target_to_host_timespec(puts, arg3);
9012 } else {
9013 puts = NULL;
9015 ret = get_errno(safe_rt_sigtimedwait(&set, &uinfo, puts,
9016 SIGSET_T_SIZE));
9017 if (!is_error(ret)) {
9018 if (arg2) {
9019 p = lock_user(VERIFY_WRITE, arg2, sizeof(target_siginfo_t),
9021 if (!p) {
9022 goto efault;
9024 host_to_target_siginfo(p, &uinfo);
9025 unlock_user(p, arg2, sizeof(target_siginfo_t));
9027 ret = host_to_target_signal(ret);
9030 break;
9031 case TARGET_NR_rt_sigqueueinfo:
9033 siginfo_t uinfo;
9035 p = lock_user(VERIFY_READ, arg3, sizeof(target_siginfo_t), 1);
9036 if (!p) {
9037 goto efault;
9039 target_to_host_siginfo(&uinfo, p);
9040 unlock_user(p, arg3, 0);
9041 ret = get_errno(sys_rt_sigqueueinfo(arg1, arg2, &uinfo));
9043 break;
9044 case TARGET_NR_rt_tgsigqueueinfo:
9046 siginfo_t uinfo;
9048 p = lock_user(VERIFY_READ, arg4, sizeof(target_siginfo_t), 1);
9049 if (!p) {
9050 goto efault;
9052 target_to_host_siginfo(&uinfo, p);
9053 unlock_user(p, arg4, 0);
9054 ret = get_errno(sys_rt_tgsigqueueinfo(arg1, arg2, arg3, &uinfo));
9056 break;
9057 #ifdef TARGET_NR_sigreturn
9058 case TARGET_NR_sigreturn:
9059 if (block_signals()) {
9060 ret = -TARGET_ERESTARTSYS;
9061 } else {
9062 ret = do_sigreturn(cpu_env);
9064 break;
9065 #endif
9066 case TARGET_NR_rt_sigreturn:
9067 if (block_signals()) {
9068 ret = -TARGET_ERESTARTSYS;
9069 } else {
9070 ret = do_rt_sigreturn(cpu_env);
9072 break;
9073 case TARGET_NR_sethostname:
9074 if (!(p = lock_user_string(arg1)))
9075 goto efault;
9076 ret = get_errno(sethostname(p, arg2));
9077 unlock_user(p, arg1, 0);
9078 break;
9079 case TARGET_NR_setrlimit:
9081 int resource = target_to_host_resource(arg1);
9082 struct target_rlimit *target_rlim;
9083 struct rlimit rlim;
9084 if (!lock_user_struct(VERIFY_READ, target_rlim, arg2, 1))
9085 goto efault;
9086 rlim.rlim_cur = target_to_host_rlim(target_rlim->rlim_cur);
9087 rlim.rlim_max = target_to_host_rlim(target_rlim->rlim_max);
9088 unlock_user_struct(target_rlim, arg2, 0);
9089 ret = get_errno(setrlimit(resource, &rlim));
9091 break;
9092 case TARGET_NR_getrlimit:
9094 int resource = target_to_host_resource(arg1);
9095 struct target_rlimit *target_rlim;
9096 struct rlimit rlim;
9098 ret = get_errno(getrlimit(resource, &rlim));
9099 if (!is_error(ret)) {
9100 if (!lock_user_struct(VERIFY_WRITE, target_rlim, arg2, 0))
9101 goto efault;
9102 target_rlim->rlim_cur = host_to_target_rlim(rlim.rlim_cur);
9103 target_rlim->rlim_max = host_to_target_rlim(rlim.rlim_max);
9104 unlock_user_struct(target_rlim, arg2, 1);
9107 break;
9108 case TARGET_NR_getrusage:
9110 struct rusage rusage;
9111 ret = get_errno(getrusage(arg1, &rusage));
9112 if (!is_error(ret)) {
9113 ret = host_to_target_rusage(arg2, &rusage);
9116 break;
9117 case TARGET_NR_gettimeofday:
9119 struct timeval tv;
9120 ret = get_errno(gettimeofday(&tv, NULL));
9121 if (!is_error(ret)) {
9122 if (copy_to_user_timeval(arg1, &tv))
9123 goto efault;
9126 break;
9127 case TARGET_NR_settimeofday:
9129 struct timeval tv, *ptv = NULL;
9130 struct timezone tz, *ptz = NULL;
9132 if (arg1) {
9133 if (copy_from_user_timeval(&tv, arg1)) {
9134 goto efault;
9136 ptv = &tv;
9139 if (arg2) {
9140 if (copy_from_user_timezone(&tz, arg2)) {
9141 goto efault;
9143 ptz = &tz;
9146 ret = get_errno(settimeofday(ptv, ptz));
9148 break;
9149 #if defined(TARGET_NR_select)
9150 case TARGET_NR_select:
9151 #if defined(TARGET_WANT_NI_OLD_SELECT)
9152 /* some architectures used to have old_select here
9153 * but now ENOSYS it.
9155 ret = -TARGET_ENOSYS;
9156 #elif defined(TARGET_WANT_OLD_SYS_SELECT)
9157 ret = do_old_select(arg1);
9158 #else
9159 ret = do_select(arg1, arg2, arg3, arg4, arg5);
9160 #endif
9161 break;
9162 #endif
9163 #ifdef TARGET_NR_pselect6
9164 case TARGET_NR_pselect6:
9166 abi_long rfd_addr, wfd_addr, efd_addr, n, ts_addr;
9167 fd_set rfds, wfds, efds;
9168 fd_set *rfds_ptr, *wfds_ptr, *efds_ptr;
9169 struct timespec ts, *ts_ptr;
9172 * The 6th arg is actually two args smashed together,
9173 * so we cannot use the C library.
9175 sigset_t set;
9176 struct {
9177 sigset_t *set;
9178 size_t size;
9179 } sig, *sig_ptr;
9181 abi_ulong arg_sigset, arg_sigsize, *arg7;
9182 target_sigset_t *target_sigset;
9184 n = arg1;
9185 rfd_addr = arg2;
9186 wfd_addr = arg3;
9187 efd_addr = arg4;
9188 ts_addr = arg5;
9190 ret = copy_from_user_fdset_ptr(&rfds, &rfds_ptr, rfd_addr, n);
9191 if (ret) {
9192 goto fail;
9194 ret = copy_from_user_fdset_ptr(&wfds, &wfds_ptr, wfd_addr, n);
9195 if (ret) {
9196 goto fail;
9198 ret = copy_from_user_fdset_ptr(&efds, &efds_ptr, efd_addr, n);
9199 if (ret) {
9200 goto fail;
9204 * This takes a timespec, and not a timeval, so we cannot
9205 * use the do_select() helper ...
9207 if (ts_addr) {
9208 if (target_to_host_timespec(&ts, ts_addr)) {
9209 goto efault;
9211 ts_ptr = &ts;
9212 } else {
9213 ts_ptr = NULL;
9216 /* Extract the two packed args for the sigset */
9217 if (arg6) {
9218 sig_ptr = &sig;
9219 sig.size = SIGSET_T_SIZE;
9221 arg7 = lock_user(VERIFY_READ, arg6, sizeof(*arg7) * 2, 1);
9222 if (!arg7) {
9223 goto efault;
9225 arg_sigset = tswapal(arg7[0]);
9226 arg_sigsize = tswapal(arg7[1]);
9227 unlock_user(arg7, arg6, 0);
9229 if (arg_sigset) {
9230 sig.set = &set;
9231 if (arg_sigsize != sizeof(*target_sigset)) {
9232 /* Like the kernel, we enforce correct size sigsets */
9233 ret = -TARGET_EINVAL;
9234 goto fail;
9236 target_sigset = lock_user(VERIFY_READ, arg_sigset,
9237 sizeof(*target_sigset), 1);
9238 if (!target_sigset) {
9239 goto efault;
9241 target_to_host_sigset(&set, target_sigset);
9242 unlock_user(target_sigset, arg_sigset, 0);
9243 } else {
9244 sig.set = NULL;
9246 } else {
9247 sig_ptr = NULL;
9250 ret = get_errno(safe_pselect6(n, rfds_ptr, wfds_ptr, efds_ptr,
9251 ts_ptr, sig_ptr));
9253 if (!is_error(ret)) {
9254 if (rfd_addr && copy_to_user_fdset(rfd_addr, &rfds, n))
9255 goto efault;
9256 if (wfd_addr && copy_to_user_fdset(wfd_addr, &wfds, n))
9257 goto efault;
9258 if (efd_addr && copy_to_user_fdset(efd_addr, &efds, n))
9259 goto efault;
9261 if (ts_addr && host_to_target_timespec(ts_addr, &ts))
9262 goto efault;
9265 break;
9266 #endif
9267 #ifdef TARGET_NR_symlink
9268 case TARGET_NR_symlink:
9270 void *p2;
9271 p = lock_user_string(arg1);
9272 p2 = lock_user_string(arg2);
9273 if (!p || !p2)
9274 ret = -TARGET_EFAULT;
9275 else
9276 ret = get_errno(symlink(p, p2));
9277 unlock_user(p2, arg2, 0);
9278 unlock_user(p, arg1, 0);
9280 break;
9281 #endif
9282 #if defined(TARGET_NR_symlinkat)
9283 case TARGET_NR_symlinkat:
9285 void *p2;
9286 p = lock_user_string(arg1);
9287 p2 = lock_user_string(arg3);
9288 if (!p || !p2)
9289 ret = -TARGET_EFAULT;
9290 else
9291 ret = get_errno(symlinkat(p, arg2, p2));
9292 unlock_user(p2, arg3, 0);
9293 unlock_user(p, arg1, 0);
9295 break;
9296 #endif
9297 #ifdef TARGET_NR_oldlstat
9298 case TARGET_NR_oldlstat:
9299 goto unimplemented;
9300 #endif
9301 #ifdef TARGET_NR_readlink
9302 case TARGET_NR_readlink:
9304 void *p2;
9305 p = lock_user_string(arg1);
9306 p2 = lock_user(VERIFY_WRITE, arg2, arg3, 0);
9307 if (!p || !p2) {
9308 ret = -TARGET_EFAULT;
9309 } else if (!arg3) {
9310 /* Short circuit this for the magic exe check. */
9311 ret = -TARGET_EINVAL;
9312 } else if (is_proc_myself((const char *)p, "exe")) {
9313 char real[PATH_MAX], *temp;
9314 temp = realpath(exec_path, real);
9315 /* Return value is # of bytes that we wrote to the buffer. */
9316 if (temp == NULL) {
9317 ret = get_errno(-1);
9318 } else {
9319 /* Don't worry about sign mismatch as earlier mapping
9320 * logic would have thrown a bad address error. */
9321 ret = MIN(strlen(real), arg3);
9322 /* We cannot NUL terminate the string. */
9323 memcpy(p2, real, ret);
9325 } else {
9326 ret = get_errno(readlink(path(p), p2, arg3));
9328 unlock_user(p2, arg2, ret);
9329 unlock_user(p, arg1, 0);
9331 break;
9332 #endif
9333 #if defined(TARGET_NR_readlinkat)
9334 case TARGET_NR_readlinkat:
9336 void *p2;
9337 p = lock_user_string(arg2);
9338 p2 = lock_user(VERIFY_WRITE, arg3, arg4, 0);
9339 if (!p || !p2) {
9340 ret = -TARGET_EFAULT;
9341 } else if (is_proc_myself((const char *)p, "exe")) {
9342 char real[PATH_MAX], *temp;
9343 temp = realpath(exec_path, real);
9344 ret = temp == NULL ? get_errno(-1) : strlen(real) ;
9345 snprintf((char *)p2, arg4, "%s", real);
9346 } else {
9347 ret = get_errno(readlinkat(arg1, path(p), p2, arg4));
9349 unlock_user(p2, arg3, ret);
9350 unlock_user(p, arg2, 0);
9352 break;
9353 #endif
9354 #ifdef TARGET_NR_uselib
9355 case TARGET_NR_uselib:
9356 goto unimplemented;
9357 #endif
9358 #ifdef TARGET_NR_swapon
9359 case TARGET_NR_swapon:
9360 if (!(p = lock_user_string(arg1)))
9361 goto efault;
9362 ret = get_errno(swapon(p, arg2));
9363 unlock_user(p, arg1, 0);
9364 break;
9365 #endif
9366 case TARGET_NR_reboot:
9367 if (arg3 == LINUX_REBOOT_CMD_RESTART2) {
9368 /* arg4 must be ignored in all other cases */
9369 p = lock_user_string(arg4);
9370 if (!p) {
9371 goto efault;
9373 ret = get_errno(reboot(arg1, arg2, arg3, p));
9374 unlock_user(p, arg4, 0);
9375 } else {
9376 ret = get_errno(reboot(arg1, arg2, arg3, NULL));
9378 break;
9379 #ifdef TARGET_NR_readdir
9380 case TARGET_NR_readdir:
9381 goto unimplemented;
9382 #endif
9383 #ifdef TARGET_NR_mmap
9384 case TARGET_NR_mmap:
9385 #if (defined(TARGET_I386) && defined(TARGET_ABI32)) || \
9386 (defined(TARGET_ARM) && defined(TARGET_ABI32)) || \
9387 defined(TARGET_M68K) || defined(TARGET_CRIS) || defined(TARGET_MICROBLAZE) \
9388 || defined(TARGET_S390X)
9390 abi_ulong *v;
9391 abi_ulong v1, v2, v3, v4, v5, v6;
9392 if (!(v = lock_user(VERIFY_READ, arg1, 6 * sizeof(abi_ulong), 1)))
9393 goto efault;
9394 v1 = tswapal(v[0]);
9395 v2 = tswapal(v[1]);
9396 v3 = tswapal(v[2]);
9397 v4 = tswapal(v[3]);
9398 v5 = tswapal(v[4]);
9399 v6 = tswapal(v[5]);
9400 unlock_user(v, arg1, 0);
9401 ret = get_errno(target_mmap(v1, v2, v3,
9402 target_to_host_bitmask(v4, mmap_flags_tbl),
9403 v5, v6));
9405 #else
9406 ret = get_errno(target_mmap(arg1, arg2, arg3,
9407 target_to_host_bitmask(arg4, mmap_flags_tbl),
9408 arg5,
9409 arg6));
9410 #endif
9411 break;
9412 #endif
9413 #ifdef TARGET_NR_mmap2
9414 case TARGET_NR_mmap2:
9415 #ifndef MMAP_SHIFT
9416 #define MMAP_SHIFT 12
9417 #endif
9418 ret = get_errno(target_mmap(arg1, arg2, arg3,
9419 target_to_host_bitmask(arg4, mmap_flags_tbl),
9420 arg5,
9421 arg6 << MMAP_SHIFT));
9422 break;
9423 #endif
9424 case TARGET_NR_munmap:
9425 ret = get_errno(target_munmap(arg1, arg2));
9426 break;
9427 case TARGET_NR_mprotect:
9429 TaskState *ts = cpu->opaque;
9430 /* Special hack to detect libc making the stack executable. */
9431 if ((arg3 & PROT_GROWSDOWN)
9432 && arg1 >= ts->info->stack_limit
9433 && arg1 <= ts->info->start_stack) {
9434 arg3 &= ~PROT_GROWSDOWN;
9435 arg2 = arg2 + arg1 - ts->info->stack_limit;
9436 arg1 = ts->info->stack_limit;
9439 ret = get_errno(target_mprotect(arg1, arg2, arg3));
9440 break;
9441 #ifdef TARGET_NR_mremap
9442 case TARGET_NR_mremap:
9443 ret = get_errno(target_mremap(arg1, arg2, arg3, arg4, arg5));
9444 break;
9445 #endif
9446 /* ??? msync/mlock/munlock are broken for softmmu. */
9447 #ifdef TARGET_NR_msync
9448 case TARGET_NR_msync:
9449 ret = get_errno(msync(g2h(arg1), arg2, arg3));
9450 break;
9451 #endif
9452 #ifdef TARGET_NR_mlock
9453 case TARGET_NR_mlock:
9454 ret = get_errno(mlock(g2h(arg1), arg2));
9455 break;
9456 #endif
9457 #ifdef TARGET_NR_munlock
9458 case TARGET_NR_munlock:
9459 ret = get_errno(munlock(g2h(arg1), arg2));
9460 break;
9461 #endif
9462 #ifdef TARGET_NR_mlockall
9463 case TARGET_NR_mlockall:
9464 ret = get_errno(mlockall(target_to_host_mlockall_arg(arg1)));
9465 break;
9466 #endif
9467 #ifdef TARGET_NR_munlockall
9468 case TARGET_NR_munlockall:
9469 ret = get_errno(munlockall());
9470 break;
9471 #endif
9472 case TARGET_NR_truncate:
9473 if (!(p = lock_user_string(arg1)))
9474 goto efault;
9475 ret = get_errno(truncate(p, arg2));
9476 unlock_user(p, arg1, 0);
9477 break;
9478 case TARGET_NR_ftruncate:
9479 ret = get_errno(ftruncate(arg1, arg2));
9480 break;
9481 case TARGET_NR_fchmod:
9482 ret = get_errno(fchmod(arg1, arg2));
9483 break;
9484 #if defined(TARGET_NR_fchmodat)
9485 case TARGET_NR_fchmodat:
9486 if (!(p = lock_user_string(arg2)))
9487 goto efault;
9488 ret = get_errno(fchmodat(arg1, p, arg3, 0));
9489 unlock_user(p, arg2, 0);
9490 break;
9491 #endif
9492 case TARGET_NR_getpriority:
9493 /* Note that negative values are valid for getpriority, so we must
9494 differentiate based on errno settings. */
9495 errno = 0;
9496 ret = getpriority(arg1, arg2);
9497 if (ret == -1 && errno != 0) {
9498 ret = -host_to_target_errno(errno);
9499 break;
9501 #ifdef TARGET_ALPHA
9502 /* Return value is the unbiased priority. Signal no error. */
9503 ((CPUAlphaState *)cpu_env)->ir[IR_V0] = 0;
9504 #else
9505 /* Return value is a biased priority to avoid negative numbers. */
9506 ret = 20 - ret;
9507 #endif
9508 break;
9509 case TARGET_NR_setpriority:
9510 ret = get_errno(setpriority(arg1, arg2, arg3));
9511 break;
9512 #ifdef TARGET_NR_profil
9513 case TARGET_NR_profil:
9514 goto unimplemented;
9515 #endif
9516 case TARGET_NR_statfs:
9517 if (!(p = lock_user_string(arg1)))
9518 goto efault;
9519 ret = get_errno(statfs(path(p), &stfs));
9520 unlock_user(p, arg1, 0);
9521 convert_statfs:
9522 if (!is_error(ret)) {
9523 struct target_statfs *target_stfs;
9525 if (!lock_user_struct(VERIFY_WRITE, target_stfs, arg2, 0))
9526 goto efault;
9527 __put_user(stfs.f_type, &target_stfs->f_type);
9528 __put_user(stfs.f_bsize, &target_stfs->f_bsize);
9529 __put_user(stfs.f_blocks, &target_stfs->f_blocks);
9530 __put_user(stfs.f_bfree, &target_stfs->f_bfree);
9531 __put_user(stfs.f_bavail, &target_stfs->f_bavail);
9532 __put_user(stfs.f_files, &target_stfs->f_files);
9533 __put_user(stfs.f_ffree, &target_stfs->f_ffree);
9534 __put_user(stfs.f_fsid.__val[0], &target_stfs->f_fsid.val[0]);
9535 __put_user(stfs.f_fsid.__val[1], &target_stfs->f_fsid.val[1]);
9536 __put_user(stfs.f_namelen, &target_stfs->f_namelen);
9537 __put_user(stfs.f_frsize, &target_stfs->f_frsize);
9538 memset(target_stfs->f_spare, 0, sizeof(target_stfs->f_spare));
9539 unlock_user_struct(target_stfs, arg2, 1);
9541 break;
9542 case TARGET_NR_fstatfs:
9543 ret = get_errno(fstatfs(arg1, &stfs));
9544 goto convert_statfs;
9545 #ifdef TARGET_NR_statfs64
9546 case TARGET_NR_statfs64:
9547 if (!(p = lock_user_string(arg1)))
9548 goto efault;
9549 ret = get_errno(statfs(path(p), &stfs));
9550 unlock_user(p, arg1, 0);
9551 convert_statfs64:
9552 if (!is_error(ret)) {
9553 struct target_statfs64 *target_stfs;
9555 if (!lock_user_struct(VERIFY_WRITE, target_stfs, arg3, 0))
9556 goto efault;
9557 __put_user(stfs.f_type, &target_stfs->f_type);
9558 __put_user(stfs.f_bsize, &target_stfs->f_bsize);
9559 __put_user(stfs.f_blocks, &target_stfs->f_blocks);
9560 __put_user(stfs.f_bfree, &target_stfs->f_bfree);
9561 __put_user(stfs.f_bavail, &target_stfs->f_bavail);
9562 __put_user(stfs.f_files, &target_stfs->f_files);
9563 __put_user(stfs.f_ffree, &target_stfs->f_ffree);
9564 __put_user(stfs.f_fsid.__val[0], &target_stfs->f_fsid.val[0]);
9565 __put_user(stfs.f_fsid.__val[1], &target_stfs->f_fsid.val[1]);
9566 __put_user(stfs.f_namelen, &target_stfs->f_namelen);
9567 __put_user(stfs.f_frsize, &target_stfs->f_frsize);
9568 memset(target_stfs->f_spare, 0, sizeof(target_stfs->f_spare));
9569 unlock_user_struct(target_stfs, arg3, 1);
9571 break;
9572 case TARGET_NR_fstatfs64:
9573 ret = get_errno(fstatfs(arg1, &stfs));
9574 goto convert_statfs64;
9575 #endif
9576 #ifdef TARGET_NR_ioperm
9577 case TARGET_NR_ioperm:
9578 goto unimplemented;
9579 #endif
9580 #ifdef TARGET_NR_socketcall
9581 case TARGET_NR_socketcall:
9582 ret = do_socketcall(arg1, arg2);
9583 break;
9584 #endif
9585 #ifdef TARGET_NR_accept
9586 case TARGET_NR_accept:
9587 ret = do_accept4(arg1, arg2, arg3, 0);
9588 break;
9589 #endif
9590 #ifdef TARGET_NR_accept4
9591 case TARGET_NR_accept4:
9592 ret = do_accept4(arg1, arg2, arg3, arg4);
9593 break;
9594 #endif
9595 #ifdef TARGET_NR_bind
9596 case TARGET_NR_bind:
9597 ret = do_bind(arg1, arg2, arg3);
9598 break;
9599 #endif
9600 #ifdef TARGET_NR_connect
9601 case TARGET_NR_connect:
9602 ret = do_connect(arg1, arg2, arg3);
9603 break;
9604 #endif
9605 #ifdef TARGET_NR_getpeername
9606 case TARGET_NR_getpeername:
9607 ret = do_getpeername(arg1, arg2, arg3);
9608 break;
9609 #endif
9610 #ifdef TARGET_NR_getsockname
9611 case TARGET_NR_getsockname:
9612 ret = do_getsockname(arg1, arg2, arg3);
9613 break;
9614 #endif
9615 #ifdef TARGET_NR_getsockopt
9616 case TARGET_NR_getsockopt:
9617 ret = do_getsockopt(arg1, arg2, arg3, arg4, arg5);
9618 break;
9619 #endif
9620 #ifdef TARGET_NR_listen
9621 case TARGET_NR_listen:
9622 ret = get_errno(listen(arg1, arg2));
9623 break;
9624 #endif
9625 #ifdef TARGET_NR_recv
9626 case TARGET_NR_recv:
9627 ret = do_recvfrom(arg1, arg2, arg3, arg4, 0, 0);
9628 break;
9629 #endif
9630 #ifdef TARGET_NR_recvfrom
9631 case TARGET_NR_recvfrom:
9632 ret = do_recvfrom(arg1, arg2, arg3, arg4, arg5, arg6);
9633 break;
9634 #endif
9635 #ifdef TARGET_NR_recvmsg
9636 case TARGET_NR_recvmsg:
9637 ret = do_sendrecvmsg(arg1, arg2, arg3, 0);
9638 break;
9639 #endif
9640 #ifdef TARGET_NR_send
9641 case TARGET_NR_send:
9642 ret = do_sendto(arg1, arg2, arg3, arg4, 0, 0);
9643 break;
9644 #endif
9645 #ifdef TARGET_NR_sendmsg
9646 case TARGET_NR_sendmsg:
9647 ret = do_sendrecvmsg(arg1, arg2, arg3, 1);
9648 break;
9649 #endif
9650 #ifdef TARGET_NR_sendmmsg
9651 case TARGET_NR_sendmmsg:
9652 ret = do_sendrecvmmsg(arg1, arg2, arg3, arg4, 1);
9653 break;
9654 case TARGET_NR_recvmmsg:
9655 ret = do_sendrecvmmsg(arg1, arg2, arg3, arg4, 0);
9656 break;
9657 #endif
9658 #ifdef TARGET_NR_sendto
9659 case TARGET_NR_sendto:
9660 ret = do_sendto(arg1, arg2, arg3, arg4, arg5, arg6);
9661 break;
9662 #endif
9663 #ifdef TARGET_NR_shutdown
9664 case TARGET_NR_shutdown:
9665 ret = get_errno(shutdown(arg1, arg2));
9666 break;
9667 #endif
9668 #if defined(TARGET_NR_getrandom) && defined(__NR_getrandom)
9669 case TARGET_NR_getrandom:
9670 p = lock_user(VERIFY_WRITE, arg1, arg2, 0);
9671 if (!p) {
9672 goto efault;
9674 ret = get_errno(getrandom(p, arg2, arg3));
9675 unlock_user(p, arg1, ret);
9676 break;
9677 #endif
9678 #ifdef TARGET_NR_socket
9679 case TARGET_NR_socket:
9680 ret = do_socket(arg1, arg2, arg3);
9681 break;
9682 #endif
9683 #ifdef TARGET_NR_socketpair
9684 case TARGET_NR_socketpair:
9685 ret = do_socketpair(arg1, arg2, arg3, arg4);
9686 break;
9687 #endif
9688 #ifdef TARGET_NR_setsockopt
9689 case TARGET_NR_setsockopt:
9690 ret = do_setsockopt(arg1, arg2, arg3, arg4, (socklen_t) arg5);
9691 break;
9692 #endif
9693 #if defined(TARGET_NR_syslog)
9694 case TARGET_NR_syslog:
9696 int len = arg2;
9698 switch (arg1) {
9699 case TARGET_SYSLOG_ACTION_CLOSE: /* Close log */
9700 case TARGET_SYSLOG_ACTION_OPEN: /* Open log */
9701 case TARGET_SYSLOG_ACTION_CLEAR: /* Clear ring buffer */
9702 case TARGET_SYSLOG_ACTION_CONSOLE_OFF: /* Disable logging */
9703 case TARGET_SYSLOG_ACTION_CONSOLE_ON: /* Enable logging */
9704 case TARGET_SYSLOG_ACTION_CONSOLE_LEVEL: /* Set messages level */
9705 case TARGET_SYSLOG_ACTION_SIZE_UNREAD: /* Number of chars */
9706 case TARGET_SYSLOG_ACTION_SIZE_BUFFER: /* Size of the buffer */
9708 ret = get_errno(sys_syslog((int)arg1, NULL, (int)arg3));
9710 break;
9711 case TARGET_SYSLOG_ACTION_READ: /* Read from log */
9712 case TARGET_SYSLOG_ACTION_READ_CLEAR: /* Read/clear msgs */
9713 case TARGET_SYSLOG_ACTION_READ_ALL: /* Read last messages */
9715 ret = -TARGET_EINVAL;
9716 if (len < 0) {
9717 goto fail;
9719 ret = 0;
9720 if (len == 0) {
9721 break;
9723 p = lock_user(VERIFY_WRITE, arg2, arg3, 0);
9724 if (!p) {
9725 ret = -TARGET_EFAULT;
9726 goto fail;
9728 ret = get_errno(sys_syslog((int)arg1, p, (int)arg3));
9729 unlock_user(p, arg2, arg3);
9731 break;
9732 default:
9733 ret = -EINVAL;
9734 break;
9737 break;
9738 #endif
9739 case TARGET_NR_setitimer:
9741 struct itimerval value, ovalue, *pvalue;
9743 if (arg2) {
9744 pvalue = &value;
9745 if (copy_from_user_timeval(&pvalue->it_interval, arg2)
9746 || copy_from_user_timeval(&pvalue->it_value,
9747 arg2 + sizeof(struct target_timeval)))
9748 goto efault;
9749 } else {
9750 pvalue = NULL;
9752 ret = get_errno(setitimer(arg1, pvalue, &ovalue));
9753 if (!is_error(ret) && arg3) {
9754 if (copy_to_user_timeval(arg3,
9755 &ovalue.it_interval)
9756 || copy_to_user_timeval(arg3 + sizeof(struct target_timeval),
9757 &ovalue.it_value))
9758 goto efault;
9761 break;
9762 case TARGET_NR_getitimer:
9764 struct itimerval value;
9766 ret = get_errno(getitimer(arg1, &value));
9767 if (!is_error(ret) && arg2) {
9768 if (copy_to_user_timeval(arg2,
9769 &value.it_interval)
9770 || copy_to_user_timeval(arg2 + sizeof(struct target_timeval),
9771 &value.it_value))
9772 goto efault;
9775 break;
9776 #ifdef TARGET_NR_stat
9777 case TARGET_NR_stat:
9778 if (!(p = lock_user_string(arg1)))
9779 goto efault;
9780 ret = get_errno(stat(path(p), &st));
9781 unlock_user(p, arg1, 0);
9782 goto do_stat;
9783 #endif
9784 #ifdef TARGET_NR_lstat
9785 case TARGET_NR_lstat:
9786 if (!(p = lock_user_string(arg1)))
9787 goto efault;
9788 ret = get_errno(lstat(path(p), &st));
9789 unlock_user(p, arg1, 0);
9790 goto do_stat;
9791 #endif
9792 case TARGET_NR_fstat:
9794 ret = get_errno(fstat(arg1, &st));
9795 #if defined(TARGET_NR_stat) || defined(TARGET_NR_lstat)
9796 do_stat:
9797 #endif
9798 if (!is_error(ret)) {
9799 struct target_stat *target_st;
9801 if (!lock_user_struct(VERIFY_WRITE, target_st, arg2, 0))
9802 goto efault;
9803 memset(target_st, 0, sizeof(*target_st));
9804 __put_user(st.st_dev, &target_st->st_dev);
9805 __put_user(st.st_ino, &target_st->st_ino);
9806 __put_user(st.st_mode, &target_st->st_mode);
9807 __put_user(st.st_uid, &target_st->st_uid);
9808 __put_user(st.st_gid, &target_st->st_gid);
9809 __put_user(st.st_nlink, &target_st->st_nlink);
9810 __put_user(st.st_rdev, &target_st->st_rdev);
9811 __put_user(st.st_size, &target_st->st_size);
9812 __put_user(st.st_blksize, &target_st->st_blksize);
9813 __put_user(st.st_blocks, &target_st->st_blocks);
9814 __put_user(st.st_atime, &target_st->target_st_atime);
9815 __put_user(st.st_mtime, &target_st->target_st_mtime);
9816 __put_user(st.st_ctime, &target_st->target_st_ctime);
9817 unlock_user_struct(target_st, arg2, 1);
9820 break;
9821 #ifdef TARGET_NR_olduname
9822 case TARGET_NR_olduname:
9823 goto unimplemented;
9824 #endif
9825 #ifdef TARGET_NR_iopl
9826 case TARGET_NR_iopl:
9827 goto unimplemented;
9828 #endif
9829 case TARGET_NR_vhangup:
9830 ret = get_errno(vhangup());
9831 break;
9832 #ifdef TARGET_NR_idle
9833 case TARGET_NR_idle:
9834 goto unimplemented;
9835 #endif
9836 #ifdef TARGET_NR_syscall
9837 case TARGET_NR_syscall:
9838 ret = do_syscall(cpu_env, arg1 & 0xffff, arg2, arg3, arg4, arg5,
9839 arg6, arg7, arg8, 0);
9840 break;
9841 #endif
9842 case TARGET_NR_wait4:
9844 int status;
9845 abi_long status_ptr = arg2;
9846 struct rusage rusage, *rusage_ptr;
9847 abi_ulong target_rusage = arg4;
9848 abi_long rusage_err;
9849 if (target_rusage)
9850 rusage_ptr = &rusage;
9851 else
9852 rusage_ptr = NULL;
9853 ret = get_errno(safe_wait4(arg1, &status, arg3, rusage_ptr));
9854 if (!is_error(ret)) {
9855 if (status_ptr && ret) {
9856 status = host_to_target_waitstatus(status);
9857 if (put_user_s32(status, status_ptr))
9858 goto efault;
9860 if (target_rusage) {
9861 rusage_err = host_to_target_rusage(target_rusage, &rusage);
9862 if (rusage_err) {
9863 ret = rusage_err;
9868 break;
9869 #ifdef TARGET_NR_swapoff
9870 case TARGET_NR_swapoff:
9871 if (!(p = lock_user_string(arg1)))
9872 goto efault;
9873 ret = get_errno(swapoff(p));
9874 unlock_user(p, arg1, 0);
9875 break;
9876 #endif
9877 case TARGET_NR_sysinfo:
9879 struct target_sysinfo *target_value;
9880 struct sysinfo value;
9881 ret = get_errno(sysinfo(&value));
9882 if (!is_error(ret) && arg1)
9884 if (!lock_user_struct(VERIFY_WRITE, target_value, arg1, 0))
9885 goto efault;
9886 __put_user(value.uptime, &target_value->uptime);
9887 __put_user(value.loads[0], &target_value->loads[0]);
9888 __put_user(value.loads[1], &target_value->loads[1]);
9889 __put_user(value.loads[2], &target_value->loads[2]);
9890 __put_user(value.totalram, &target_value->totalram);
9891 __put_user(value.freeram, &target_value->freeram);
9892 __put_user(value.sharedram, &target_value->sharedram);
9893 __put_user(value.bufferram, &target_value->bufferram);
9894 __put_user(value.totalswap, &target_value->totalswap);
9895 __put_user(value.freeswap, &target_value->freeswap);
9896 __put_user(value.procs, &target_value->procs);
9897 __put_user(value.totalhigh, &target_value->totalhigh);
9898 __put_user(value.freehigh, &target_value->freehigh);
9899 __put_user(value.mem_unit, &target_value->mem_unit);
9900 unlock_user_struct(target_value, arg1, 1);
9903 break;
9904 #ifdef TARGET_NR_ipc
9905 case TARGET_NR_ipc:
9906 ret = do_ipc(cpu_env, arg1, arg2, arg3, arg4, arg5, arg6);
9907 break;
9908 #endif
9909 #ifdef TARGET_NR_semget
9910 case TARGET_NR_semget:
9911 ret = get_errno(semget(arg1, arg2, arg3));
9912 break;
9913 #endif
9914 #ifdef TARGET_NR_semop
9915 case TARGET_NR_semop:
9916 ret = do_semop(arg1, arg2, arg3);
9917 break;
9918 #endif
9919 #ifdef TARGET_NR_semctl
9920 case TARGET_NR_semctl:
9921 ret = do_semctl(arg1, arg2, arg3, arg4);
9922 break;
9923 #endif
9924 #ifdef TARGET_NR_msgctl
9925 case TARGET_NR_msgctl:
9926 ret = do_msgctl(arg1, arg2, arg3);
9927 break;
9928 #endif
9929 #ifdef TARGET_NR_msgget
9930 case TARGET_NR_msgget:
9931 ret = get_errno(msgget(arg1, arg2));
9932 break;
9933 #endif
9934 #ifdef TARGET_NR_msgrcv
9935 case TARGET_NR_msgrcv:
9936 ret = do_msgrcv(arg1, arg2, arg3, arg4, arg5);
9937 break;
9938 #endif
9939 #ifdef TARGET_NR_msgsnd
9940 case TARGET_NR_msgsnd:
9941 ret = do_msgsnd(arg1, arg2, arg3, arg4);
9942 break;
9943 #endif
9944 #ifdef TARGET_NR_shmget
9945 case TARGET_NR_shmget:
9946 ret = get_errno(shmget(arg1, arg2, arg3));
9947 break;
9948 #endif
9949 #ifdef TARGET_NR_shmctl
9950 case TARGET_NR_shmctl:
9951 ret = do_shmctl(arg1, arg2, arg3);
9952 break;
9953 #endif
9954 #ifdef TARGET_NR_shmat
9955 case TARGET_NR_shmat:
9956 ret = do_shmat(cpu_env, arg1, arg2, arg3);
9957 break;
9958 #endif
9959 #ifdef TARGET_NR_shmdt
9960 case TARGET_NR_shmdt:
9961 ret = do_shmdt(arg1);
9962 break;
9963 #endif
9964 case TARGET_NR_fsync:
9965 ret = get_errno(fsync(arg1));
9966 break;
9967 case TARGET_NR_clone:
9968 /* Linux manages to have three different orderings for its
9969 * arguments to clone(); the BACKWARDS and BACKWARDS2 defines
9970 * match the kernel's CONFIG_CLONE_* settings.
9971 * Microblaze is further special in that it uses a sixth
9972 * implicit argument to clone for the TLS pointer.
9974 #if defined(TARGET_MICROBLAZE)
9975 ret = get_errno(do_fork(cpu_env, arg1, arg2, arg4, arg6, arg5));
9976 #elif defined(TARGET_CLONE_BACKWARDS)
9977 ret = get_errno(do_fork(cpu_env, arg1, arg2, arg3, arg4, arg5));
9978 #elif defined(TARGET_CLONE_BACKWARDS2)
9979 ret = get_errno(do_fork(cpu_env, arg2, arg1, arg3, arg5, arg4));
9980 #else
9981 ret = get_errno(do_fork(cpu_env, arg1, arg2, arg3, arg5, arg4));
9982 #endif
9983 break;
9984 #ifdef __NR_exit_group
9985 /* new thread calls */
9986 case TARGET_NR_exit_group:
9987 #ifdef TARGET_GPROF
9988 _mcleanup();
9989 #endif
9990 gdb_exit(cpu_env, arg1);
9991 ret = get_errno(exit_group(arg1));
9992 break;
9993 #endif
9994 case TARGET_NR_setdomainname:
9995 if (!(p = lock_user_string(arg1)))
9996 goto efault;
9997 ret = get_errno(setdomainname(p, arg2));
9998 unlock_user(p, arg1, 0);
9999 break;
10000 case TARGET_NR_uname:
10001 /* no need to transcode because we use the linux syscall */
10003 struct new_utsname * buf;
10005 if (!lock_user_struct(VERIFY_WRITE, buf, arg1, 0))
10006 goto efault;
10007 ret = get_errno(sys_uname(buf));
10008 if (!is_error(ret)) {
10009 /* Overwrite the native machine name with whatever is being
10010 emulated. */
10011 strcpy (buf->machine, cpu_to_uname_machine(cpu_env));
10012 /* Allow the user to override the reported release. */
10013 if (qemu_uname_release && *qemu_uname_release) {
10014 g_strlcpy(buf->release, qemu_uname_release,
10015 sizeof(buf->release));
10018 unlock_user_struct(buf, arg1, 1);
10020 break;
10021 #ifdef TARGET_I386
10022 case TARGET_NR_modify_ldt:
10023 ret = do_modify_ldt(cpu_env, arg1, arg2, arg3);
10024 break;
10025 #if !defined(TARGET_X86_64)
10026 case TARGET_NR_vm86old:
10027 goto unimplemented;
10028 case TARGET_NR_vm86:
10029 ret = do_vm86(cpu_env, arg1, arg2);
10030 break;
10031 #endif
10032 #endif
10033 case TARGET_NR_adjtimex:
10035 struct timex host_buf;
10037 if (target_to_host_timex(&host_buf, arg1) != 0) {
10038 goto efault;
10040 ret = get_errno(adjtimex(&host_buf));
10041 if (!is_error(ret)) {
10042 if (host_to_target_timex(arg1, &host_buf) != 0) {
10043 goto efault;
10047 break;
10048 #if defined(TARGET_NR_clock_adjtime) && defined(CONFIG_CLOCK_ADJTIME)
10049 case TARGET_NR_clock_adjtime:
10051 struct timex htx, *phtx = &htx;
10053 if (target_to_host_timex(phtx, arg2) != 0) {
10054 goto efault;
10056 ret = get_errno(clock_adjtime(arg1, phtx));
10057 if (!is_error(ret) && phtx) {
10058 if (host_to_target_timex(arg2, phtx) != 0) {
10059 goto efault;
10063 break;
10064 #endif
10065 #ifdef TARGET_NR_create_module
10066 case TARGET_NR_create_module:
10067 #endif
10068 case TARGET_NR_init_module:
10069 case TARGET_NR_delete_module:
10070 #ifdef TARGET_NR_get_kernel_syms
10071 case TARGET_NR_get_kernel_syms:
10072 #endif
10073 goto unimplemented;
10074 case TARGET_NR_quotactl:
10075 goto unimplemented;
10076 case TARGET_NR_getpgid:
10077 ret = get_errno(getpgid(arg1));
10078 break;
10079 case TARGET_NR_fchdir:
10080 ret = get_errno(fchdir(arg1));
10081 break;
10082 #ifdef TARGET_NR_bdflush /* not on x86_64 */
10083 case TARGET_NR_bdflush:
10084 goto unimplemented;
10085 #endif
10086 #ifdef TARGET_NR_sysfs
10087 case TARGET_NR_sysfs:
10088 goto unimplemented;
10089 #endif
10090 case TARGET_NR_personality:
10091 ret = get_errno(personality(arg1));
10092 break;
10093 #ifdef TARGET_NR_afs_syscall
10094 case TARGET_NR_afs_syscall:
10095 goto unimplemented;
10096 #endif
10097 #ifdef TARGET_NR__llseek /* Not on alpha */
10098 case TARGET_NR__llseek:
10100 int64_t res;
10101 #if !defined(__NR_llseek)
10102 res = lseek(arg1, ((uint64_t)arg2 << 32) | (abi_ulong)arg3, arg5);
10103 if (res == -1) {
10104 ret = get_errno(res);
10105 } else {
10106 ret = 0;
10108 #else
10109 ret = get_errno(_llseek(arg1, arg2, arg3, &res, arg5));
10110 #endif
10111 if ((ret == 0) && put_user_s64(res, arg4)) {
10112 goto efault;
10115 break;
10116 #endif
10117 #ifdef TARGET_NR_getdents
10118 case TARGET_NR_getdents:
10119 #ifdef __NR_getdents
10120 #if TARGET_ABI_BITS == 32 && HOST_LONG_BITS == 64
10122 struct target_dirent *target_dirp;
10123 struct linux_dirent *dirp;
10124 abi_long count = arg3;
10126 dirp = g_try_malloc(count);
10127 if (!dirp) {
10128 ret = -TARGET_ENOMEM;
10129 goto fail;
10132 ret = get_errno(sys_getdents(arg1, dirp, count));
10133 if (!is_error(ret)) {
10134 struct linux_dirent *de;
10135 struct target_dirent *tde;
10136 int len = ret;
10137 int reclen, treclen;
10138 int count1, tnamelen;
10140 count1 = 0;
10141 de = dirp;
10142 if (!(target_dirp = lock_user(VERIFY_WRITE, arg2, count, 0)))
10143 goto efault;
10144 tde = target_dirp;
10145 while (len > 0) {
10146 reclen = de->d_reclen;
10147 tnamelen = reclen - offsetof(struct linux_dirent, d_name);
10148 assert(tnamelen >= 0);
10149 treclen = tnamelen + offsetof(struct target_dirent, d_name);
10150 assert(count1 + treclen <= count);
10151 tde->d_reclen = tswap16(treclen);
10152 tde->d_ino = tswapal(de->d_ino);
10153 tde->d_off = tswapal(de->d_off);
10154 memcpy(tde->d_name, de->d_name, tnamelen);
10155 de = (struct linux_dirent *)((char *)de + reclen);
10156 len -= reclen;
10157 tde = (struct target_dirent *)((char *)tde + treclen);
10158 count1 += treclen;
10160 ret = count1;
10161 unlock_user(target_dirp, arg2, ret);
10163 g_free(dirp);
10165 #else
10167 struct linux_dirent *dirp;
10168 abi_long count = arg3;
10170 if (!(dirp = lock_user(VERIFY_WRITE, arg2, count, 0)))
10171 goto efault;
10172 ret = get_errno(sys_getdents(arg1, dirp, count));
10173 if (!is_error(ret)) {
10174 struct linux_dirent *de;
10175 int len = ret;
10176 int reclen;
10177 de = dirp;
10178 while (len > 0) {
10179 reclen = de->d_reclen;
10180 if (reclen > len)
10181 break;
10182 de->d_reclen = tswap16(reclen);
10183 tswapls(&de->d_ino);
10184 tswapls(&de->d_off);
10185 de = (struct linux_dirent *)((char *)de + reclen);
10186 len -= reclen;
10189 unlock_user(dirp, arg2, ret);
10191 #endif
10192 #else
10193 /* Implement getdents in terms of getdents64 */
10195 struct linux_dirent64 *dirp;
10196 abi_long count = arg3;
10198 dirp = lock_user(VERIFY_WRITE, arg2, count, 0);
10199 if (!dirp) {
10200 goto efault;
10202 ret = get_errno(sys_getdents64(arg1, dirp, count));
10203 if (!is_error(ret)) {
10204 /* Convert the dirent64 structs to target dirent. We do this
10205 * in-place, since we can guarantee that a target_dirent is no
10206 * larger than a dirent64; however this means we have to be
10207 * careful to read everything before writing in the new format.
10209 struct linux_dirent64 *de;
10210 struct target_dirent *tde;
10211 int len = ret;
10212 int tlen = 0;
10214 de = dirp;
10215 tde = (struct target_dirent *)dirp;
10216 while (len > 0) {
10217 int namelen, treclen;
10218 int reclen = de->d_reclen;
10219 uint64_t ino = de->d_ino;
10220 int64_t off = de->d_off;
10221 uint8_t type = de->d_type;
10223 namelen = strlen(de->d_name);
10224 treclen = offsetof(struct target_dirent, d_name)
10225 + namelen + 2;
10226 treclen = QEMU_ALIGN_UP(treclen, sizeof(abi_long));
10228 memmove(tde->d_name, de->d_name, namelen + 1);
10229 tde->d_ino = tswapal(ino);
10230 tde->d_off = tswapal(off);
10231 tde->d_reclen = tswap16(treclen);
10232 /* The target_dirent type is in what was formerly a padding
10233 * byte at the end of the structure:
10235 *(((char *)tde) + treclen - 1) = type;
10237 de = (struct linux_dirent64 *)((char *)de + reclen);
10238 tde = (struct target_dirent *)((char *)tde + treclen);
10239 len -= reclen;
10240 tlen += treclen;
10242 ret = tlen;
10244 unlock_user(dirp, arg2, ret);
10246 #endif
10247 break;
10248 #endif /* TARGET_NR_getdents */
10249 #if defined(TARGET_NR_getdents64) && defined(__NR_getdents64)
10250 case TARGET_NR_getdents64:
10252 struct linux_dirent64 *dirp;
10253 abi_long count = arg3;
10254 if (!(dirp = lock_user(VERIFY_WRITE, arg2, count, 0)))
10255 goto efault;
10256 ret = get_errno(sys_getdents64(arg1, dirp, count));
10257 if (!is_error(ret)) {
10258 struct linux_dirent64 *de;
10259 int len = ret;
10260 int reclen;
10261 de = dirp;
10262 while (len > 0) {
10263 reclen = de->d_reclen;
10264 if (reclen > len)
10265 break;
10266 de->d_reclen = tswap16(reclen);
10267 tswap64s((uint64_t *)&de->d_ino);
10268 tswap64s((uint64_t *)&de->d_off);
10269 de = (struct linux_dirent64 *)((char *)de + reclen);
10270 len -= reclen;
10273 unlock_user(dirp, arg2, ret);
10275 break;
10276 #endif /* TARGET_NR_getdents64 */
10277 #if defined(TARGET_NR__newselect)
10278 case TARGET_NR__newselect:
10279 ret = do_select(arg1, arg2, arg3, arg4, arg5);
10280 break;
10281 #endif
10282 #if defined(TARGET_NR_poll) || defined(TARGET_NR_ppoll)
10283 # ifdef TARGET_NR_poll
10284 case TARGET_NR_poll:
10285 # endif
10286 # ifdef TARGET_NR_ppoll
10287 case TARGET_NR_ppoll:
10288 # endif
10290 struct target_pollfd *target_pfd;
10291 unsigned int nfds = arg2;
10292 struct pollfd *pfd;
10293 unsigned int i;
10295 pfd = NULL;
10296 target_pfd = NULL;
10297 if (nfds) {
10298 if (nfds > (INT_MAX / sizeof(struct target_pollfd))) {
10299 ret = -TARGET_EINVAL;
10300 break;
10303 target_pfd = lock_user(VERIFY_WRITE, arg1,
10304 sizeof(struct target_pollfd) * nfds, 1);
10305 if (!target_pfd) {
10306 goto efault;
10309 pfd = alloca(sizeof(struct pollfd) * nfds);
10310 for (i = 0; i < nfds; i++) {
10311 pfd[i].fd = tswap32(target_pfd[i].fd);
10312 pfd[i].events = tswap16(target_pfd[i].events);
10316 switch (num) {
10317 # ifdef TARGET_NR_ppoll
10318 case TARGET_NR_ppoll:
10320 struct timespec _timeout_ts, *timeout_ts = &_timeout_ts;
10321 target_sigset_t *target_set;
10322 sigset_t _set, *set = &_set;
10324 if (arg3) {
10325 if (target_to_host_timespec(timeout_ts, arg3)) {
10326 unlock_user(target_pfd, arg1, 0);
10327 goto efault;
10329 } else {
10330 timeout_ts = NULL;
10333 if (arg4) {
10334 if (arg5 != sizeof(target_sigset_t)) {
10335 unlock_user(target_pfd, arg1, 0);
10336 ret = -TARGET_EINVAL;
10337 break;
10340 target_set = lock_user(VERIFY_READ, arg4, sizeof(target_sigset_t), 1);
10341 if (!target_set) {
10342 unlock_user(target_pfd, arg1, 0);
10343 goto efault;
10345 target_to_host_sigset(set, target_set);
10346 } else {
10347 set = NULL;
10350 ret = get_errno(safe_ppoll(pfd, nfds, timeout_ts,
10351 set, SIGSET_T_SIZE));
10353 if (!is_error(ret) && arg3) {
10354 host_to_target_timespec(arg3, timeout_ts);
10356 if (arg4) {
10357 unlock_user(target_set, arg4, 0);
10359 break;
10361 # endif
10362 # ifdef TARGET_NR_poll
10363 case TARGET_NR_poll:
10365 struct timespec ts, *pts;
10367 if (arg3 >= 0) {
10368 /* Convert ms to secs, ns */
10369 ts.tv_sec = arg3 / 1000;
10370 ts.tv_nsec = (arg3 % 1000) * 1000000LL;
10371 pts = &ts;
10372 } else {
10373 /* -ve poll() timeout means "infinite" */
10374 pts = NULL;
10376 ret = get_errno(safe_ppoll(pfd, nfds, pts, NULL, 0));
10377 break;
10379 # endif
10380 default:
10381 g_assert_not_reached();
10384 if (!is_error(ret)) {
10385 for(i = 0; i < nfds; i++) {
10386 target_pfd[i].revents = tswap16(pfd[i].revents);
10389 unlock_user(target_pfd, arg1, sizeof(struct target_pollfd) * nfds);
10391 break;
10392 #endif
10393 case TARGET_NR_flock:
10394 /* NOTE: the flock constant seems to be the same for every
10395 Linux platform */
10396 ret = get_errno(safe_flock(arg1, arg2));
10397 break;
10398 case TARGET_NR_readv:
10400 struct iovec *vec = lock_iovec(VERIFY_WRITE, arg2, arg3, 0);
10401 if (vec != NULL) {
10402 ret = get_errno(safe_readv(arg1, vec, arg3));
10403 unlock_iovec(vec, arg2, arg3, 1);
10404 } else {
10405 ret = -host_to_target_errno(errno);
10408 break;
10409 case TARGET_NR_writev:
10411 struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1);
10412 if (vec != NULL) {
10413 ret = get_errno(safe_writev(arg1, vec, arg3));
10414 unlock_iovec(vec, arg2, arg3, 0);
10415 } else {
10416 ret = -host_to_target_errno(errno);
10419 break;
10420 #if defined(TARGET_NR_preadv)
10421 case TARGET_NR_preadv:
10423 struct iovec *vec = lock_iovec(VERIFY_WRITE, arg2, arg3, 0);
10424 if (vec != NULL) {
10425 ret = get_errno(safe_preadv(arg1, vec, arg3, arg4, arg5));
10426 unlock_iovec(vec, arg2, arg3, 1);
10427 } else {
10428 ret = -host_to_target_errno(errno);
10431 break;
10432 #endif
10433 #if defined(TARGET_NR_pwritev)
10434 case TARGET_NR_pwritev:
10436 struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1);
10437 if (vec != NULL) {
10438 ret = get_errno(safe_pwritev(arg1, vec, arg3, arg4, arg5));
10439 unlock_iovec(vec, arg2, arg3, 0);
10440 } else {
10441 ret = -host_to_target_errno(errno);
10444 break;
10445 #endif
10446 case TARGET_NR_getsid:
10447 ret = get_errno(getsid(arg1));
10448 break;
10449 #if defined(TARGET_NR_fdatasync) /* Not on alpha (osf_datasync ?) */
10450 case TARGET_NR_fdatasync:
10451 ret = get_errno(fdatasync(arg1));
10452 break;
10453 #endif
10454 #ifdef TARGET_NR__sysctl
10455 case TARGET_NR__sysctl:
10456 /* We don't implement this, but ENOTDIR is always a safe
10457 return value. */
10458 ret = -TARGET_ENOTDIR;
10459 break;
10460 #endif
10461 case TARGET_NR_sched_getaffinity:
10463 unsigned int mask_size;
10464 unsigned long *mask;
10467 * sched_getaffinity needs multiples of ulong, so need to take
10468 * care of mismatches between target ulong and host ulong sizes.
10470 if (arg2 & (sizeof(abi_ulong) - 1)) {
10471 ret = -TARGET_EINVAL;
10472 break;
10474 mask_size = (arg2 + (sizeof(*mask) - 1)) & ~(sizeof(*mask) - 1);
10476 mask = alloca(mask_size);
10477 memset(mask, 0, mask_size);
10478 ret = get_errno(sys_sched_getaffinity(arg1, mask_size, mask));
10480 if (!is_error(ret)) {
10481 if (ret > arg2) {
10482 /* More data returned than the caller's buffer will fit.
10483 * This only happens if sizeof(abi_long) < sizeof(long)
10484 * and the caller passed us a buffer holding an odd number
10485 * of abi_longs. If the host kernel is actually using the
10486 * extra 4 bytes then fail EINVAL; otherwise we can just
10487 * ignore them and only copy the interesting part.
10489 int numcpus = sysconf(_SC_NPROCESSORS_CONF);
10490 if (numcpus > arg2 * 8) {
10491 ret = -TARGET_EINVAL;
10492 break;
10494 ret = arg2;
10497 ret = host_to_target_cpu_mask(mask, mask_size, arg3, arg2);
10500 break;
10501 case TARGET_NR_sched_setaffinity:
10503 unsigned int mask_size;
10504 unsigned long *mask;
10507 * sched_setaffinity needs multiples of ulong, so need to take
10508 * care of mismatches between target ulong and host ulong sizes.
10510 if (arg2 & (sizeof(abi_ulong) - 1)) {
10511 ret = -TARGET_EINVAL;
10512 break;
10514 mask_size = (arg2 + (sizeof(*mask) - 1)) & ~(sizeof(*mask) - 1);
10515 mask = alloca(mask_size);
10517 ret = target_to_host_cpu_mask(mask, mask_size, arg3, arg2);
10518 if (ret) {
10519 break;
10522 ret = get_errno(sys_sched_setaffinity(arg1, mask_size, mask));
10524 break;
10525 case TARGET_NR_getcpu:
10527 unsigned cpu, node;
10528 ret = get_errno(sys_getcpu(arg1 ? &cpu : NULL,
10529 arg2 ? &node : NULL,
10530 NULL));
10531 if (is_error(ret)) {
10532 goto fail;
10534 if (arg1 && put_user_u32(cpu, arg1)) {
10535 goto efault;
10537 if (arg2 && put_user_u32(node, arg2)) {
10538 goto efault;
10541 break;
10542 case TARGET_NR_sched_setparam:
10544 struct sched_param *target_schp;
10545 struct sched_param schp;
10547 if (arg2 == 0) {
10548 return -TARGET_EINVAL;
10550 if (!lock_user_struct(VERIFY_READ, target_schp, arg2, 1))
10551 goto efault;
10552 schp.sched_priority = tswap32(target_schp->sched_priority);
10553 unlock_user_struct(target_schp, arg2, 0);
10554 ret = get_errno(sched_setparam(arg1, &schp));
10556 break;
10557 case TARGET_NR_sched_getparam:
10559 struct sched_param *target_schp;
10560 struct sched_param schp;
10562 if (arg2 == 0) {
10563 return -TARGET_EINVAL;
10565 ret = get_errno(sched_getparam(arg1, &schp));
10566 if (!is_error(ret)) {
10567 if (!lock_user_struct(VERIFY_WRITE, target_schp, arg2, 0))
10568 goto efault;
10569 target_schp->sched_priority = tswap32(schp.sched_priority);
10570 unlock_user_struct(target_schp, arg2, 1);
10573 break;
10574 case TARGET_NR_sched_setscheduler:
10576 struct sched_param *target_schp;
10577 struct sched_param schp;
10578 if (arg3 == 0) {
10579 return -TARGET_EINVAL;
10581 if (!lock_user_struct(VERIFY_READ, target_schp, arg3, 1))
10582 goto efault;
10583 schp.sched_priority = tswap32(target_schp->sched_priority);
10584 unlock_user_struct(target_schp, arg3, 0);
10585 ret = get_errno(sched_setscheduler(arg1, arg2, &schp));
10587 break;
10588 case TARGET_NR_sched_getscheduler:
10589 ret = get_errno(sched_getscheduler(arg1));
10590 break;
10591 case TARGET_NR_sched_yield:
10592 ret = get_errno(sched_yield());
10593 break;
10594 case TARGET_NR_sched_get_priority_max:
10595 ret = get_errno(sched_get_priority_max(arg1));
10596 break;
10597 case TARGET_NR_sched_get_priority_min:
10598 ret = get_errno(sched_get_priority_min(arg1));
10599 break;
10600 case TARGET_NR_sched_rr_get_interval:
10602 struct timespec ts;
10603 ret = get_errno(sched_rr_get_interval(arg1, &ts));
10604 if (!is_error(ret)) {
10605 ret = host_to_target_timespec(arg2, &ts);
10608 break;
10609 case TARGET_NR_nanosleep:
10611 struct timespec req, rem;
10612 target_to_host_timespec(&req, arg1);
10613 ret = get_errno(safe_nanosleep(&req, &rem));
10614 if (is_error(ret) && arg2) {
10615 host_to_target_timespec(arg2, &rem);
10618 break;
10619 #ifdef TARGET_NR_query_module
10620 case TARGET_NR_query_module:
10621 goto unimplemented;
10622 #endif
10623 #ifdef TARGET_NR_nfsservctl
10624 case TARGET_NR_nfsservctl:
10625 goto unimplemented;
10626 #endif
10627 case TARGET_NR_prctl:
10628 switch (arg1) {
10629 case PR_GET_PDEATHSIG:
10631 int deathsig;
10632 ret = get_errno(prctl(arg1, &deathsig, arg3, arg4, arg5));
10633 if (!is_error(ret) && arg2
10634 && put_user_ual(deathsig, arg2)) {
10635 goto efault;
10637 break;
10639 #ifdef PR_GET_NAME
10640 case PR_GET_NAME:
10642 void *name = lock_user(VERIFY_WRITE, arg2, 16, 1);
10643 if (!name) {
10644 goto efault;
10646 ret = get_errno(prctl(arg1, (unsigned long)name,
10647 arg3, arg4, arg5));
10648 unlock_user(name, arg2, 16);
10649 break;
10651 case PR_SET_NAME:
10653 void *name = lock_user(VERIFY_READ, arg2, 16, 1);
10654 if (!name) {
10655 goto efault;
10657 ret = get_errno(prctl(arg1, (unsigned long)name,
10658 arg3, arg4, arg5));
10659 unlock_user(name, arg2, 0);
10660 break;
10662 #endif
10663 case PR_GET_SECCOMP:
10664 case PR_SET_SECCOMP:
10665 /* Disable seccomp to prevent the target disabling syscalls we
10666 * need. */
10667 ret = -TARGET_EINVAL;
10668 break;
10669 default:
10670 /* Most prctl options have no pointer arguments */
10671 ret = get_errno(prctl(arg1, arg2, arg3, arg4, arg5));
10672 break;
10674 break;
10675 #ifdef TARGET_NR_arch_prctl
10676 case TARGET_NR_arch_prctl:
10677 #if defined(TARGET_I386) && !defined(TARGET_ABI32)
10678 ret = do_arch_prctl(cpu_env, arg1, arg2);
10679 break;
10680 #else
10681 goto unimplemented;
10682 #endif
10683 #endif
10684 #ifdef TARGET_NR_pread64
10685 case TARGET_NR_pread64:
10686 if (regpairs_aligned(cpu_env, num)) {
10687 arg4 = arg5;
10688 arg5 = arg6;
10690 if (!(p = lock_user(VERIFY_WRITE, arg2, arg3, 0)))
10691 goto efault;
10692 ret = get_errno(pread64(arg1, p, arg3, target_offset64(arg4, arg5)));
10693 unlock_user(p, arg2, ret);
10694 break;
10695 case TARGET_NR_pwrite64:
10696 if (regpairs_aligned(cpu_env, num)) {
10697 arg4 = arg5;
10698 arg5 = arg6;
10700 if (!(p = lock_user(VERIFY_READ, arg2, arg3, 1)))
10701 goto efault;
10702 ret = get_errno(pwrite64(arg1, p, arg3, target_offset64(arg4, arg5)));
10703 unlock_user(p, arg2, 0);
10704 break;
10705 #endif
10706 case TARGET_NR_getcwd:
10707 if (!(p = lock_user(VERIFY_WRITE, arg1, arg2, 0)))
10708 goto efault;
10709 ret = get_errno(sys_getcwd1(p, arg2));
10710 unlock_user(p, arg1, ret);
10711 break;
10712 case TARGET_NR_capget:
10713 case TARGET_NR_capset:
10715 struct target_user_cap_header *target_header;
10716 struct target_user_cap_data *target_data = NULL;
10717 struct __user_cap_header_struct header;
10718 struct __user_cap_data_struct data[2];
10719 struct __user_cap_data_struct *dataptr = NULL;
10720 int i, target_datalen;
10721 int data_items = 1;
10723 if (!lock_user_struct(VERIFY_WRITE, target_header, arg1, 1)) {
10724 goto efault;
10726 header.version = tswap32(target_header->version);
10727 header.pid = tswap32(target_header->pid);
10729 if (header.version != _LINUX_CAPABILITY_VERSION) {
10730 /* Version 2 and up takes pointer to two user_data structs */
10731 data_items = 2;
10734 target_datalen = sizeof(*target_data) * data_items;
10736 if (arg2) {
10737 if (num == TARGET_NR_capget) {
10738 target_data = lock_user(VERIFY_WRITE, arg2, target_datalen, 0);
10739 } else {
10740 target_data = lock_user(VERIFY_READ, arg2, target_datalen, 1);
10742 if (!target_data) {
10743 unlock_user_struct(target_header, arg1, 0);
10744 goto efault;
10747 if (num == TARGET_NR_capset) {
10748 for (i = 0; i < data_items; i++) {
10749 data[i].effective = tswap32(target_data[i].effective);
10750 data[i].permitted = tswap32(target_data[i].permitted);
10751 data[i].inheritable = tswap32(target_data[i].inheritable);
10755 dataptr = data;
10758 if (num == TARGET_NR_capget) {
10759 ret = get_errno(capget(&header, dataptr));
10760 } else {
10761 ret = get_errno(capset(&header, dataptr));
10764 /* The kernel always updates version for both capget and capset */
10765 target_header->version = tswap32(header.version);
10766 unlock_user_struct(target_header, arg1, 1);
10768 if (arg2) {
10769 if (num == TARGET_NR_capget) {
10770 for (i = 0; i < data_items; i++) {
10771 target_data[i].effective = tswap32(data[i].effective);
10772 target_data[i].permitted = tswap32(data[i].permitted);
10773 target_data[i].inheritable = tswap32(data[i].inheritable);
10775 unlock_user(target_data, arg2, target_datalen);
10776 } else {
10777 unlock_user(target_data, arg2, 0);
10780 break;
10782 case TARGET_NR_sigaltstack:
10783 ret = do_sigaltstack(arg1, arg2, get_sp_from_cpustate((CPUArchState *)cpu_env));
10784 break;
10786 #ifdef CONFIG_SENDFILE
10787 case TARGET_NR_sendfile:
10789 off_t *offp = NULL;
10790 off_t off;
10791 if (arg3) {
10792 ret = get_user_sal(off, arg3);
10793 if (is_error(ret)) {
10794 break;
10796 offp = &off;
10798 ret = get_errno(sendfile(arg1, arg2, offp, arg4));
10799 if (!is_error(ret) && arg3) {
10800 abi_long ret2 = put_user_sal(off, arg3);
10801 if (is_error(ret2)) {
10802 ret = ret2;
10805 break;
10807 #ifdef TARGET_NR_sendfile64
10808 case TARGET_NR_sendfile64:
10810 off_t *offp = NULL;
10811 off_t off;
10812 if (arg3) {
10813 ret = get_user_s64(off, arg3);
10814 if (is_error(ret)) {
10815 break;
10817 offp = &off;
10819 ret = get_errno(sendfile(arg1, arg2, offp, arg4));
10820 if (!is_error(ret) && arg3) {
10821 abi_long ret2 = put_user_s64(off, arg3);
10822 if (is_error(ret2)) {
10823 ret = ret2;
10826 break;
10828 #endif
10829 #else
10830 case TARGET_NR_sendfile:
10831 #ifdef TARGET_NR_sendfile64
10832 case TARGET_NR_sendfile64:
10833 #endif
10834 goto unimplemented;
10835 #endif
10837 #ifdef TARGET_NR_getpmsg
10838 case TARGET_NR_getpmsg:
10839 goto unimplemented;
10840 #endif
10841 #ifdef TARGET_NR_putpmsg
10842 case TARGET_NR_putpmsg:
10843 goto unimplemented;
10844 #endif
10845 #ifdef TARGET_NR_vfork
10846 case TARGET_NR_vfork:
10847 ret = get_errno(do_fork(cpu_env,
10848 CLONE_VFORK | CLONE_VM | TARGET_SIGCHLD,
10849 0, 0, 0, 0));
10850 break;
10851 #endif
10852 #ifdef TARGET_NR_ugetrlimit
10853 case TARGET_NR_ugetrlimit:
10855 struct rlimit rlim;
10856 int resource = target_to_host_resource(arg1);
10857 ret = get_errno(getrlimit(resource, &rlim));
10858 if (!is_error(ret)) {
10859 struct target_rlimit *target_rlim;
10860 if (!lock_user_struct(VERIFY_WRITE, target_rlim, arg2, 0))
10861 goto efault;
10862 target_rlim->rlim_cur = host_to_target_rlim(rlim.rlim_cur);
10863 target_rlim->rlim_max = host_to_target_rlim(rlim.rlim_max);
10864 unlock_user_struct(target_rlim, arg2, 1);
10866 break;
10868 #endif
10869 #ifdef TARGET_NR_truncate64
10870 case TARGET_NR_truncate64:
10871 if (!(p = lock_user_string(arg1)))
10872 goto efault;
10873 ret = target_truncate64(cpu_env, p, arg2, arg3, arg4);
10874 unlock_user(p, arg1, 0);
10875 break;
10876 #endif
10877 #ifdef TARGET_NR_ftruncate64
10878 case TARGET_NR_ftruncate64:
10879 ret = target_ftruncate64(cpu_env, arg1, arg2, arg3, arg4);
10880 break;
10881 #endif
10882 #ifdef TARGET_NR_stat64
10883 case TARGET_NR_stat64:
10884 if (!(p = lock_user_string(arg1)))
10885 goto efault;
10886 ret = get_errno(stat(path(p), &st));
10887 unlock_user(p, arg1, 0);
10888 if (!is_error(ret))
10889 ret = host_to_target_stat64(cpu_env, arg2, &st);
10890 break;
10891 #endif
10892 #ifdef TARGET_NR_lstat64
10893 case TARGET_NR_lstat64:
10894 if (!(p = lock_user_string(arg1)))
10895 goto efault;
10896 ret = get_errno(lstat(path(p), &st));
10897 unlock_user(p, arg1, 0);
10898 if (!is_error(ret))
10899 ret = host_to_target_stat64(cpu_env, arg2, &st);
10900 break;
10901 #endif
10902 #ifdef TARGET_NR_fstat64
10903 case TARGET_NR_fstat64:
10904 ret = get_errno(fstat(arg1, &st));
10905 if (!is_error(ret))
10906 ret = host_to_target_stat64(cpu_env, arg2, &st);
10907 break;
10908 #endif
10909 #if (defined(TARGET_NR_fstatat64) || defined(TARGET_NR_newfstatat))
10910 #ifdef TARGET_NR_fstatat64
10911 case TARGET_NR_fstatat64:
10912 #endif
10913 #ifdef TARGET_NR_newfstatat
10914 case TARGET_NR_newfstatat:
10915 #endif
10916 if (!(p = lock_user_string(arg2)))
10917 goto efault;
10918 ret = get_errno(fstatat(arg1, path(p), &st, arg4));
10919 if (!is_error(ret))
10920 ret = host_to_target_stat64(cpu_env, arg3, &st);
10921 break;
10922 #endif
10923 #ifdef TARGET_NR_lchown
10924 case TARGET_NR_lchown:
10925 if (!(p = lock_user_string(arg1)))
10926 goto efault;
10927 ret = get_errno(lchown(p, low2highuid(arg2), low2highgid(arg3)));
10928 unlock_user(p, arg1, 0);
10929 break;
10930 #endif
10931 #ifdef TARGET_NR_getuid
10932 case TARGET_NR_getuid:
10933 ret = get_errno(high2lowuid(getuid()));
10934 break;
10935 #endif
10936 #ifdef TARGET_NR_getgid
10937 case TARGET_NR_getgid:
10938 ret = get_errno(high2lowgid(getgid()));
10939 break;
10940 #endif
10941 #ifdef TARGET_NR_geteuid
10942 case TARGET_NR_geteuid:
10943 ret = get_errno(high2lowuid(geteuid()));
10944 break;
10945 #endif
10946 #ifdef TARGET_NR_getegid
10947 case TARGET_NR_getegid:
10948 ret = get_errno(high2lowgid(getegid()));
10949 break;
10950 #endif
10951 case TARGET_NR_setreuid:
10952 ret = get_errno(setreuid(low2highuid(arg1), low2highuid(arg2)));
10953 break;
10954 case TARGET_NR_setregid:
10955 ret = get_errno(setregid(low2highgid(arg1), low2highgid(arg2)));
10956 break;
10957 case TARGET_NR_getgroups:
10959 int gidsetsize = arg1;
10960 target_id *target_grouplist;
10961 gid_t *grouplist;
10962 int i;
10964 grouplist = alloca(gidsetsize * sizeof(gid_t));
10965 ret = get_errno(getgroups(gidsetsize, grouplist));
10966 if (gidsetsize == 0)
10967 break;
10968 if (!is_error(ret)) {
10969 target_grouplist = lock_user(VERIFY_WRITE, arg2, gidsetsize * sizeof(target_id), 0);
10970 if (!target_grouplist)
10971 goto efault;
10972 for(i = 0;i < ret; i++)
10973 target_grouplist[i] = tswapid(high2lowgid(grouplist[i]));
10974 unlock_user(target_grouplist, arg2, gidsetsize * sizeof(target_id));
10977 break;
10978 case TARGET_NR_setgroups:
10980 int gidsetsize = arg1;
10981 target_id *target_grouplist;
10982 gid_t *grouplist = NULL;
10983 int i;
10984 if (gidsetsize) {
10985 grouplist = alloca(gidsetsize * sizeof(gid_t));
10986 target_grouplist = lock_user(VERIFY_READ, arg2, gidsetsize * sizeof(target_id), 1);
10987 if (!target_grouplist) {
10988 ret = -TARGET_EFAULT;
10989 goto fail;
10991 for (i = 0; i < gidsetsize; i++) {
10992 grouplist[i] = low2highgid(tswapid(target_grouplist[i]));
10994 unlock_user(target_grouplist, arg2, 0);
10996 ret = get_errno(setgroups(gidsetsize, grouplist));
10998 break;
10999 case TARGET_NR_fchown:
11000 ret = get_errno(fchown(arg1, low2highuid(arg2), low2highgid(arg3)));
11001 break;
11002 #if defined(TARGET_NR_fchownat)
11003 case TARGET_NR_fchownat:
11004 if (!(p = lock_user_string(arg2)))
11005 goto efault;
11006 ret = get_errno(fchownat(arg1, p, low2highuid(arg3),
11007 low2highgid(arg4), arg5));
11008 unlock_user(p, arg2, 0);
11009 break;
11010 #endif
11011 #ifdef TARGET_NR_setresuid
11012 case TARGET_NR_setresuid:
11013 ret = get_errno(sys_setresuid(low2highuid(arg1),
11014 low2highuid(arg2),
11015 low2highuid(arg3)));
11016 break;
11017 #endif
11018 #ifdef TARGET_NR_getresuid
11019 case TARGET_NR_getresuid:
11021 uid_t ruid, euid, suid;
11022 ret = get_errno(getresuid(&ruid, &euid, &suid));
11023 if (!is_error(ret)) {
11024 if (put_user_id(high2lowuid(ruid), arg1)
11025 || put_user_id(high2lowuid(euid), arg2)
11026 || put_user_id(high2lowuid(suid), arg3))
11027 goto efault;
11030 break;
11031 #endif
11032 #ifdef TARGET_NR_getresgid
11033 case TARGET_NR_setresgid:
11034 ret = get_errno(sys_setresgid(low2highgid(arg1),
11035 low2highgid(arg2),
11036 low2highgid(arg3)));
11037 break;
11038 #endif
11039 #ifdef TARGET_NR_getresgid
11040 case TARGET_NR_getresgid:
11042 gid_t rgid, egid, sgid;
11043 ret = get_errno(getresgid(&rgid, &egid, &sgid));
11044 if (!is_error(ret)) {
11045 if (put_user_id(high2lowgid(rgid), arg1)
11046 || put_user_id(high2lowgid(egid), arg2)
11047 || put_user_id(high2lowgid(sgid), arg3))
11048 goto efault;
11051 break;
11052 #endif
11053 #ifdef TARGET_NR_chown
11054 case TARGET_NR_chown:
11055 if (!(p = lock_user_string(arg1)))
11056 goto efault;
11057 ret = get_errno(chown(p, low2highuid(arg2), low2highgid(arg3)));
11058 unlock_user(p, arg1, 0);
11059 break;
11060 #endif
11061 case TARGET_NR_setuid:
11062 ret = get_errno(sys_setuid(low2highuid(arg1)));
11063 break;
11064 case TARGET_NR_setgid:
11065 ret = get_errno(sys_setgid(low2highgid(arg1)));
11066 break;
11067 case TARGET_NR_setfsuid:
11068 ret = get_errno(setfsuid(arg1));
11069 break;
11070 case TARGET_NR_setfsgid:
11071 ret = get_errno(setfsgid(arg1));
11072 break;
11074 #ifdef TARGET_NR_lchown32
11075 case TARGET_NR_lchown32:
11076 if (!(p = lock_user_string(arg1)))
11077 goto efault;
11078 ret = get_errno(lchown(p, arg2, arg3));
11079 unlock_user(p, arg1, 0);
11080 break;
11081 #endif
11082 #ifdef TARGET_NR_getuid32
11083 case TARGET_NR_getuid32:
11084 ret = get_errno(getuid());
11085 break;
11086 #endif
11088 #if defined(TARGET_NR_getxuid) && defined(TARGET_ALPHA)
11089 /* Alpha specific */
11090 case TARGET_NR_getxuid:
11092 uid_t euid;
11093 euid=geteuid();
11094 ((CPUAlphaState *)cpu_env)->ir[IR_A4]=euid;
11096 ret = get_errno(getuid());
11097 break;
11098 #endif
11099 #if defined(TARGET_NR_getxgid) && defined(TARGET_ALPHA)
11100 /* Alpha specific */
11101 case TARGET_NR_getxgid:
11103 uid_t egid;
11104 egid=getegid();
11105 ((CPUAlphaState *)cpu_env)->ir[IR_A4]=egid;
11107 ret = get_errno(getgid());
11108 break;
11109 #endif
11110 #if defined(TARGET_NR_osf_getsysinfo) && defined(TARGET_ALPHA)
11111 /* Alpha specific */
11112 case TARGET_NR_osf_getsysinfo:
11113 ret = -TARGET_EOPNOTSUPP;
11114 switch (arg1) {
11115 case TARGET_GSI_IEEE_FP_CONTROL:
11117 uint64_t swcr, fpcr = cpu_alpha_load_fpcr (cpu_env);
11119 /* Copied from linux ieee_fpcr_to_swcr. */
11120 swcr = (fpcr >> 35) & SWCR_STATUS_MASK;
11121 swcr |= (fpcr >> 36) & SWCR_MAP_DMZ;
11122 swcr |= (~fpcr >> 48) & (SWCR_TRAP_ENABLE_INV
11123 | SWCR_TRAP_ENABLE_DZE
11124 | SWCR_TRAP_ENABLE_OVF);
11125 swcr |= (~fpcr >> 57) & (SWCR_TRAP_ENABLE_UNF
11126 | SWCR_TRAP_ENABLE_INE);
11127 swcr |= (fpcr >> 47) & SWCR_MAP_UMZ;
11128 swcr |= (~fpcr >> 41) & SWCR_TRAP_ENABLE_DNO;
11130 if (put_user_u64 (swcr, arg2))
11131 goto efault;
11132 ret = 0;
11134 break;
11136 /* case GSI_IEEE_STATE_AT_SIGNAL:
11137 -- Not implemented in linux kernel.
11138 case GSI_UACPROC:
11139 -- Retrieves current unaligned access state; not much used.
11140 case GSI_PROC_TYPE:
11141 -- Retrieves implver information; surely not used.
11142 case GSI_GET_HWRPB:
11143 -- Grabs a copy of the HWRPB; surely not used.
11146 break;
11147 #endif
11148 #if defined(TARGET_NR_osf_setsysinfo) && defined(TARGET_ALPHA)
11149 /* Alpha specific */
11150 case TARGET_NR_osf_setsysinfo:
11151 ret = -TARGET_EOPNOTSUPP;
11152 switch (arg1) {
11153 case TARGET_SSI_IEEE_FP_CONTROL:
11155 uint64_t swcr, fpcr, orig_fpcr;
11157 if (get_user_u64 (swcr, arg2)) {
11158 goto efault;
11160 orig_fpcr = cpu_alpha_load_fpcr(cpu_env);
11161 fpcr = orig_fpcr & FPCR_DYN_MASK;
11163 /* Copied from linux ieee_swcr_to_fpcr. */
11164 fpcr |= (swcr & SWCR_STATUS_MASK) << 35;
11165 fpcr |= (swcr & SWCR_MAP_DMZ) << 36;
11166 fpcr |= (~swcr & (SWCR_TRAP_ENABLE_INV
11167 | SWCR_TRAP_ENABLE_DZE
11168 | SWCR_TRAP_ENABLE_OVF)) << 48;
11169 fpcr |= (~swcr & (SWCR_TRAP_ENABLE_UNF
11170 | SWCR_TRAP_ENABLE_INE)) << 57;
11171 fpcr |= (swcr & SWCR_MAP_UMZ ? FPCR_UNDZ | FPCR_UNFD : 0);
11172 fpcr |= (~swcr & SWCR_TRAP_ENABLE_DNO) << 41;
11174 cpu_alpha_store_fpcr(cpu_env, fpcr);
11175 ret = 0;
11177 break;
11179 case TARGET_SSI_IEEE_RAISE_EXCEPTION:
11181 uint64_t exc, fpcr, orig_fpcr;
11182 int si_code;
11184 if (get_user_u64(exc, arg2)) {
11185 goto efault;
11188 orig_fpcr = cpu_alpha_load_fpcr(cpu_env);
11190 /* We only add to the exception status here. */
11191 fpcr = orig_fpcr | ((exc & SWCR_STATUS_MASK) << 35);
11193 cpu_alpha_store_fpcr(cpu_env, fpcr);
11194 ret = 0;
11196 /* Old exceptions are not signaled. */
11197 fpcr &= ~(orig_fpcr & FPCR_STATUS_MASK);
11199 /* If any exceptions set by this call,
11200 and are unmasked, send a signal. */
11201 si_code = 0;
11202 if ((fpcr & (FPCR_INE | FPCR_INED)) == FPCR_INE) {
11203 si_code = TARGET_FPE_FLTRES;
11205 if ((fpcr & (FPCR_UNF | FPCR_UNFD)) == FPCR_UNF) {
11206 si_code = TARGET_FPE_FLTUND;
11208 if ((fpcr & (FPCR_OVF | FPCR_OVFD)) == FPCR_OVF) {
11209 si_code = TARGET_FPE_FLTOVF;
11211 if ((fpcr & (FPCR_DZE | FPCR_DZED)) == FPCR_DZE) {
11212 si_code = TARGET_FPE_FLTDIV;
11214 if ((fpcr & (FPCR_INV | FPCR_INVD)) == FPCR_INV) {
11215 si_code = TARGET_FPE_FLTINV;
11217 if (si_code != 0) {
11218 target_siginfo_t info;
11219 info.si_signo = SIGFPE;
11220 info.si_errno = 0;
11221 info.si_code = si_code;
11222 info._sifields._sigfault._addr
11223 = ((CPUArchState *)cpu_env)->pc;
11224 queue_signal((CPUArchState *)cpu_env, info.si_signo,
11225 QEMU_SI_FAULT, &info);
11228 break;
11230 /* case SSI_NVPAIRS:
11231 -- Used with SSIN_UACPROC to enable unaligned accesses.
11232 case SSI_IEEE_STATE_AT_SIGNAL:
11233 case SSI_IEEE_IGNORE_STATE_AT_SIGNAL:
11234 -- Not implemented in linux kernel
11237 break;
11238 #endif
11239 #ifdef TARGET_NR_osf_sigprocmask
11240 /* Alpha specific. */
11241 case TARGET_NR_osf_sigprocmask:
11243 abi_ulong mask;
11244 int how;
11245 sigset_t set, oldset;
11247 switch(arg1) {
11248 case TARGET_SIG_BLOCK:
11249 how = SIG_BLOCK;
11250 break;
11251 case TARGET_SIG_UNBLOCK:
11252 how = SIG_UNBLOCK;
11253 break;
11254 case TARGET_SIG_SETMASK:
11255 how = SIG_SETMASK;
11256 break;
11257 default:
11258 ret = -TARGET_EINVAL;
11259 goto fail;
11261 mask = arg2;
11262 target_to_host_old_sigset(&set, &mask);
11263 ret = do_sigprocmask(how, &set, &oldset);
11264 if (!ret) {
11265 host_to_target_old_sigset(&mask, &oldset);
11266 ret = mask;
11269 break;
11270 #endif
11272 #ifdef TARGET_NR_getgid32
11273 case TARGET_NR_getgid32:
11274 ret = get_errno(getgid());
11275 break;
11276 #endif
11277 #ifdef TARGET_NR_geteuid32
11278 case TARGET_NR_geteuid32:
11279 ret = get_errno(geteuid());
11280 break;
11281 #endif
11282 #ifdef TARGET_NR_getegid32
11283 case TARGET_NR_getegid32:
11284 ret = get_errno(getegid());
11285 break;
11286 #endif
11287 #ifdef TARGET_NR_setreuid32
11288 case TARGET_NR_setreuid32:
11289 ret = get_errno(setreuid(arg1, arg2));
11290 break;
11291 #endif
11292 #ifdef TARGET_NR_setregid32
11293 case TARGET_NR_setregid32:
11294 ret = get_errno(setregid(arg1, arg2));
11295 break;
11296 #endif
11297 #ifdef TARGET_NR_getgroups32
11298 case TARGET_NR_getgroups32:
11300 int gidsetsize = arg1;
11301 uint32_t *target_grouplist;
11302 gid_t *grouplist;
11303 int i;
11305 grouplist = alloca(gidsetsize * sizeof(gid_t));
11306 ret = get_errno(getgroups(gidsetsize, grouplist));
11307 if (gidsetsize == 0)
11308 break;
11309 if (!is_error(ret)) {
11310 target_grouplist = lock_user(VERIFY_WRITE, arg2, gidsetsize * 4, 0);
11311 if (!target_grouplist) {
11312 ret = -TARGET_EFAULT;
11313 goto fail;
11315 for(i = 0;i < ret; i++)
11316 target_grouplist[i] = tswap32(grouplist[i]);
11317 unlock_user(target_grouplist, arg2, gidsetsize * 4);
11320 break;
11321 #endif
11322 #ifdef TARGET_NR_setgroups32
11323 case TARGET_NR_setgroups32:
11325 int gidsetsize = arg1;
11326 uint32_t *target_grouplist;
11327 gid_t *grouplist;
11328 int i;
11330 grouplist = alloca(gidsetsize * sizeof(gid_t));
11331 target_grouplist = lock_user(VERIFY_READ, arg2, gidsetsize * 4, 1);
11332 if (!target_grouplist) {
11333 ret = -TARGET_EFAULT;
11334 goto fail;
11336 for(i = 0;i < gidsetsize; i++)
11337 grouplist[i] = tswap32(target_grouplist[i]);
11338 unlock_user(target_grouplist, arg2, 0);
11339 ret = get_errno(setgroups(gidsetsize, grouplist));
11341 break;
11342 #endif
11343 #ifdef TARGET_NR_fchown32
11344 case TARGET_NR_fchown32:
11345 ret = get_errno(fchown(arg1, arg2, arg3));
11346 break;
11347 #endif
11348 #ifdef TARGET_NR_setresuid32
11349 case TARGET_NR_setresuid32:
11350 ret = get_errno(sys_setresuid(arg1, arg2, arg3));
11351 break;
11352 #endif
11353 #ifdef TARGET_NR_getresuid32
11354 case TARGET_NR_getresuid32:
11356 uid_t ruid, euid, suid;
11357 ret = get_errno(getresuid(&ruid, &euid, &suid));
11358 if (!is_error(ret)) {
11359 if (put_user_u32(ruid, arg1)
11360 || put_user_u32(euid, arg2)
11361 || put_user_u32(suid, arg3))
11362 goto efault;
11365 break;
11366 #endif
11367 #ifdef TARGET_NR_setresgid32
11368 case TARGET_NR_setresgid32:
11369 ret = get_errno(sys_setresgid(arg1, arg2, arg3));
11370 break;
11371 #endif
11372 #ifdef TARGET_NR_getresgid32
11373 case TARGET_NR_getresgid32:
11375 gid_t rgid, egid, sgid;
11376 ret = get_errno(getresgid(&rgid, &egid, &sgid));
11377 if (!is_error(ret)) {
11378 if (put_user_u32(rgid, arg1)
11379 || put_user_u32(egid, arg2)
11380 || put_user_u32(sgid, arg3))
11381 goto efault;
11384 break;
11385 #endif
11386 #ifdef TARGET_NR_chown32
11387 case TARGET_NR_chown32:
11388 if (!(p = lock_user_string(arg1)))
11389 goto efault;
11390 ret = get_errno(chown(p, arg2, arg3));
11391 unlock_user(p, arg1, 0);
11392 break;
11393 #endif
11394 #ifdef TARGET_NR_setuid32
11395 case TARGET_NR_setuid32:
11396 ret = get_errno(sys_setuid(arg1));
11397 break;
11398 #endif
11399 #ifdef TARGET_NR_setgid32
11400 case TARGET_NR_setgid32:
11401 ret = get_errno(sys_setgid(arg1));
11402 break;
11403 #endif
11404 #ifdef TARGET_NR_setfsuid32
11405 case TARGET_NR_setfsuid32:
11406 ret = get_errno(setfsuid(arg1));
11407 break;
11408 #endif
11409 #ifdef TARGET_NR_setfsgid32
11410 case TARGET_NR_setfsgid32:
11411 ret = get_errno(setfsgid(arg1));
11412 break;
11413 #endif
11415 case TARGET_NR_pivot_root:
11416 goto unimplemented;
11417 #ifdef TARGET_NR_mincore
11418 case TARGET_NR_mincore:
11420 void *a;
11421 ret = -TARGET_ENOMEM;
11422 a = lock_user(VERIFY_READ, arg1, arg2, 0);
11423 if (!a) {
11424 goto fail;
11426 ret = -TARGET_EFAULT;
11427 p = lock_user_string(arg3);
11428 if (!p) {
11429 goto mincore_fail;
11431 ret = get_errno(mincore(a, arg2, p));
11432 unlock_user(p, arg3, ret);
11433 mincore_fail:
11434 unlock_user(a, arg1, 0);
11436 break;
11437 #endif
11438 #ifdef TARGET_NR_arm_fadvise64_64
11439 case TARGET_NR_arm_fadvise64_64:
11440 /* arm_fadvise64_64 looks like fadvise64_64 but
11441 * with different argument order: fd, advice, offset, len
11442 * rather than the usual fd, offset, len, advice.
11443 * Note that offset and len are both 64-bit so appear as
11444 * pairs of 32-bit registers.
11446 ret = posix_fadvise(arg1, target_offset64(arg3, arg4),
11447 target_offset64(arg5, arg6), arg2);
11448 ret = -host_to_target_errno(ret);
11449 break;
11450 #endif
11452 #if TARGET_ABI_BITS == 32
11454 #ifdef TARGET_NR_fadvise64_64
11455 case TARGET_NR_fadvise64_64:
11456 #if defined(TARGET_PPC)
11457 /* 6 args: fd, advice, offset (high, low), len (high, low) */
11458 ret = arg2;
11459 arg2 = arg3;
11460 arg3 = arg4;
11461 arg4 = arg5;
11462 arg5 = arg6;
11463 arg6 = ret;
11464 #else
11465 /* 6 args: fd, offset (high, low), len (high, low), advice */
11466 if (regpairs_aligned(cpu_env, num)) {
11467 /* offset is in (3,4), len in (5,6) and advice in 7 */
11468 arg2 = arg3;
11469 arg3 = arg4;
11470 arg4 = arg5;
11471 arg5 = arg6;
11472 arg6 = arg7;
11474 #endif
11475 ret = -host_to_target_errno(posix_fadvise(arg1,
11476 target_offset64(arg2, arg3),
11477 target_offset64(arg4, arg5),
11478 arg6));
11479 break;
11480 #endif
11482 #ifdef TARGET_NR_fadvise64
11483 case TARGET_NR_fadvise64:
11484 /* 5 args: fd, offset (high, low), len, advice */
11485 if (regpairs_aligned(cpu_env, num)) {
11486 /* offset is in (3,4), len in 5 and advice in 6 */
11487 arg2 = arg3;
11488 arg3 = arg4;
11489 arg4 = arg5;
11490 arg5 = arg6;
11492 ret = -host_to_target_errno(posix_fadvise(arg1,
11493 target_offset64(arg2, arg3),
11494 arg4, arg5));
11495 break;
11496 #endif
11498 #else /* not a 32-bit ABI */
11499 #if defined(TARGET_NR_fadvise64_64) || defined(TARGET_NR_fadvise64)
11500 #ifdef TARGET_NR_fadvise64_64
11501 case TARGET_NR_fadvise64_64:
11502 #endif
11503 #ifdef TARGET_NR_fadvise64
11504 case TARGET_NR_fadvise64:
11505 #endif
11506 #ifdef TARGET_S390X
11507 switch (arg4) {
11508 case 4: arg4 = POSIX_FADV_NOREUSE + 1; break; /* make sure it's an invalid value */
11509 case 5: arg4 = POSIX_FADV_NOREUSE + 2; break; /* ditto */
11510 case 6: arg4 = POSIX_FADV_DONTNEED; break;
11511 case 7: arg4 = POSIX_FADV_NOREUSE; break;
11512 default: break;
11514 #endif
11515 ret = -host_to_target_errno(posix_fadvise(arg1, arg2, arg3, arg4));
11516 break;
11517 #endif
11518 #endif /* end of 64-bit ABI fadvise handling */
11520 #ifdef TARGET_NR_madvise
11521 case TARGET_NR_madvise:
11522 /* A straight passthrough may not be safe because qemu sometimes
11523 turns private file-backed mappings into anonymous mappings.
11524 This will break MADV_DONTNEED.
11525 This is a hint, so ignoring and returning success is ok. */
11526 ret = get_errno(0);
11527 break;
11528 #endif
11529 #if TARGET_ABI_BITS == 32
11530 case TARGET_NR_fcntl64:
11532 int cmd;
11533 struct flock64 fl;
11534 from_flock64_fn *copyfrom = copy_from_user_flock64;
11535 to_flock64_fn *copyto = copy_to_user_flock64;
11537 #ifdef TARGET_ARM
11538 if (((CPUARMState *)cpu_env)->eabi) {
11539 copyfrom = copy_from_user_eabi_flock64;
11540 copyto = copy_to_user_eabi_flock64;
11542 #endif
11544 cmd = target_to_host_fcntl_cmd(arg2);
11545 if (cmd == -TARGET_EINVAL) {
11546 ret = cmd;
11547 break;
11550 switch(arg2) {
11551 case TARGET_F_GETLK64:
11552 ret = copyfrom(&fl, arg3);
11553 if (ret) {
11554 break;
11556 ret = get_errno(fcntl(arg1, cmd, &fl));
11557 if (ret == 0) {
11558 ret = copyto(arg3, &fl);
11560 break;
11562 case TARGET_F_SETLK64:
11563 case TARGET_F_SETLKW64:
11564 ret = copyfrom(&fl, arg3);
11565 if (ret) {
11566 break;
11568 ret = get_errno(safe_fcntl(arg1, cmd, &fl));
11569 break;
11570 default:
11571 ret = do_fcntl(arg1, arg2, arg3);
11572 break;
11574 break;
11576 #endif
11577 #ifdef TARGET_NR_cacheflush
11578 case TARGET_NR_cacheflush:
11579 /* self-modifying code is handled automatically, so nothing needed */
11580 ret = 0;
11581 break;
11582 #endif
11583 #ifdef TARGET_NR_security
11584 case TARGET_NR_security:
11585 goto unimplemented;
11586 #endif
11587 #ifdef TARGET_NR_getpagesize
11588 case TARGET_NR_getpagesize:
11589 ret = TARGET_PAGE_SIZE;
11590 break;
11591 #endif
11592 case TARGET_NR_gettid:
11593 ret = get_errno(gettid());
11594 break;
11595 #ifdef TARGET_NR_readahead
11596 case TARGET_NR_readahead:
11597 #if TARGET_ABI_BITS == 32
11598 if (regpairs_aligned(cpu_env, num)) {
11599 arg2 = arg3;
11600 arg3 = arg4;
11601 arg4 = arg5;
11603 ret = get_errno(readahead(arg1, target_offset64(arg2, arg3) , arg4));
11604 #else
11605 ret = get_errno(readahead(arg1, arg2, arg3));
11606 #endif
11607 break;
11608 #endif
11609 #ifdef CONFIG_ATTR
11610 #ifdef TARGET_NR_setxattr
11611 case TARGET_NR_listxattr:
11612 case TARGET_NR_llistxattr:
11614 void *p, *b = 0;
11615 if (arg2) {
11616 b = lock_user(VERIFY_WRITE, arg2, arg3, 0);
11617 if (!b) {
11618 ret = -TARGET_EFAULT;
11619 break;
11622 p = lock_user_string(arg1);
11623 if (p) {
11624 if (num == TARGET_NR_listxattr) {
11625 ret = get_errno(listxattr(p, b, arg3));
11626 } else {
11627 ret = get_errno(llistxattr(p, b, arg3));
11629 } else {
11630 ret = -TARGET_EFAULT;
11632 unlock_user(p, arg1, 0);
11633 unlock_user(b, arg2, arg3);
11634 break;
11636 case TARGET_NR_flistxattr:
11638 void *b = 0;
11639 if (arg2) {
11640 b = lock_user(VERIFY_WRITE, arg2, arg3, 0);
11641 if (!b) {
11642 ret = -TARGET_EFAULT;
11643 break;
11646 ret = get_errno(flistxattr(arg1, b, arg3));
11647 unlock_user(b, arg2, arg3);
11648 break;
11650 case TARGET_NR_setxattr:
11651 case TARGET_NR_lsetxattr:
11653 void *p, *n, *v = 0;
11654 if (arg3) {
11655 v = lock_user(VERIFY_READ, arg3, arg4, 1);
11656 if (!v) {
11657 ret = -TARGET_EFAULT;
11658 break;
11661 p = lock_user_string(arg1);
11662 n = lock_user_string(arg2);
11663 if (p && n) {
11664 if (num == TARGET_NR_setxattr) {
11665 ret = get_errno(setxattr(p, n, v, arg4, arg5));
11666 } else {
11667 ret = get_errno(lsetxattr(p, n, v, arg4, arg5));
11669 } else {
11670 ret = -TARGET_EFAULT;
11672 unlock_user(p, arg1, 0);
11673 unlock_user(n, arg2, 0);
11674 unlock_user(v, arg3, 0);
11676 break;
11677 case TARGET_NR_fsetxattr:
11679 void *n, *v = 0;
11680 if (arg3) {
11681 v = lock_user(VERIFY_READ, arg3, arg4, 1);
11682 if (!v) {
11683 ret = -TARGET_EFAULT;
11684 break;
11687 n = lock_user_string(arg2);
11688 if (n) {
11689 ret = get_errno(fsetxattr(arg1, n, v, arg4, arg5));
11690 } else {
11691 ret = -TARGET_EFAULT;
11693 unlock_user(n, arg2, 0);
11694 unlock_user(v, arg3, 0);
11696 break;
11697 case TARGET_NR_getxattr:
11698 case TARGET_NR_lgetxattr:
11700 void *p, *n, *v = 0;
11701 if (arg3) {
11702 v = lock_user(VERIFY_WRITE, arg3, arg4, 0);
11703 if (!v) {
11704 ret = -TARGET_EFAULT;
11705 break;
11708 p = lock_user_string(arg1);
11709 n = lock_user_string(arg2);
11710 if (p && n) {
11711 if (num == TARGET_NR_getxattr) {
11712 ret = get_errno(getxattr(p, n, v, arg4));
11713 } else {
11714 ret = get_errno(lgetxattr(p, n, v, arg4));
11716 } else {
11717 ret = -TARGET_EFAULT;
11719 unlock_user(p, arg1, 0);
11720 unlock_user(n, arg2, 0);
11721 unlock_user(v, arg3, arg4);
11723 break;
11724 case TARGET_NR_fgetxattr:
11726 void *n, *v = 0;
11727 if (arg3) {
11728 v = lock_user(VERIFY_WRITE, arg3, arg4, 0);
11729 if (!v) {
11730 ret = -TARGET_EFAULT;
11731 break;
11734 n = lock_user_string(arg2);
11735 if (n) {
11736 ret = get_errno(fgetxattr(arg1, n, v, arg4));
11737 } else {
11738 ret = -TARGET_EFAULT;
11740 unlock_user(n, arg2, 0);
11741 unlock_user(v, arg3, arg4);
11743 break;
11744 case TARGET_NR_removexattr:
11745 case TARGET_NR_lremovexattr:
11747 void *p, *n;
11748 p = lock_user_string(arg1);
11749 n = lock_user_string(arg2);
11750 if (p && n) {
11751 if (num == TARGET_NR_removexattr) {
11752 ret = get_errno(removexattr(p, n));
11753 } else {
11754 ret = get_errno(lremovexattr(p, n));
11756 } else {
11757 ret = -TARGET_EFAULT;
11759 unlock_user(p, arg1, 0);
11760 unlock_user(n, arg2, 0);
11762 break;
11763 case TARGET_NR_fremovexattr:
11765 void *n;
11766 n = lock_user_string(arg2);
11767 if (n) {
11768 ret = get_errno(fremovexattr(arg1, n));
11769 } else {
11770 ret = -TARGET_EFAULT;
11772 unlock_user(n, arg2, 0);
11774 break;
11775 #endif
11776 #endif /* CONFIG_ATTR */
11777 #ifdef TARGET_NR_set_thread_area
11778 case TARGET_NR_set_thread_area:
11779 #if defined(TARGET_MIPS)
11780 ((CPUMIPSState *) cpu_env)->active_tc.CP0_UserLocal = arg1;
11781 ret = 0;
11782 break;
11783 #elif defined(TARGET_CRIS)
11784 if (arg1 & 0xff)
11785 ret = -TARGET_EINVAL;
11786 else {
11787 ((CPUCRISState *) cpu_env)->pregs[PR_PID] = arg1;
11788 ret = 0;
11790 break;
11791 #elif defined(TARGET_I386) && defined(TARGET_ABI32)
11792 ret = do_set_thread_area(cpu_env, arg1);
11793 break;
11794 #elif defined(TARGET_M68K)
11796 TaskState *ts = cpu->opaque;
11797 ts->tp_value = arg1;
11798 ret = 0;
11799 break;
11801 #else
11802 goto unimplemented_nowarn;
11803 #endif
11804 #endif
11805 #ifdef TARGET_NR_get_thread_area
11806 case TARGET_NR_get_thread_area:
11807 #if defined(TARGET_I386) && defined(TARGET_ABI32)
11808 ret = do_get_thread_area(cpu_env, arg1);
11809 break;
11810 #elif defined(TARGET_M68K)
11812 TaskState *ts = cpu->opaque;
11813 ret = ts->tp_value;
11814 break;
11816 #else
11817 goto unimplemented_nowarn;
11818 #endif
11819 #endif
11820 #ifdef TARGET_NR_getdomainname
11821 case TARGET_NR_getdomainname:
11822 goto unimplemented_nowarn;
11823 #endif
11825 #ifdef TARGET_NR_clock_gettime
11826 case TARGET_NR_clock_gettime:
11828 struct timespec ts;
11829 ret = get_errno(clock_gettime(arg1, &ts));
11830 if (!is_error(ret)) {
11831 host_to_target_timespec(arg2, &ts);
11833 break;
11835 #endif
11836 #ifdef TARGET_NR_clock_getres
11837 case TARGET_NR_clock_getres:
11839 struct timespec ts;
11840 ret = get_errno(clock_getres(arg1, &ts));
11841 if (!is_error(ret)) {
11842 host_to_target_timespec(arg2, &ts);
11844 break;
11846 #endif
11847 #ifdef TARGET_NR_clock_nanosleep
11848 case TARGET_NR_clock_nanosleep:
11850 struct timespec ts;
11851 target_to_host_timespec(&ts, arg3);
11852 ret = get_errno(safe_clock_nanosleep(arg1, arg2,
11853 &ts, arg4 ? &ts : NULL));
11854 if (arg4)
11855 host_to_target_timespec(arg4, &ts);
11857 #if defined(TARGET_PPC)
11858 /* clock_nanosleep is odd in that it returns positive errno values.
11859 * On PPC, CR0 bit 3 should be set in such a situation. */
11860 if (ret && ret != -TARGET_ERESTARTSYS) {
11861 ((CPUPPCState *)cpu_env)->crf[0] |= 1;
11863 #endif
11864 break;
11866 #endif
11868 #if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address)
11869 case TARGET_NR_set_tid_address:
11870 ret = get_errno(set_tid_address((int *)g2h(arg1)));
11871 break;
11872 #endif
11874 case TARGET_NR_tkill:
11875 ret = get_errno(safe_tkill((int)arg1, target_to_host_signal(arg2)));
11876 break;
11878 case TARGET_NR_tgkill:
11879 ret = get_errno(safe_tgkill((int)arg1, (int)arg2,
11880 target_to_host_signal(arg3)));
11881 break;
11883 #ifdef TARGET_NR_set_robust_list
11884 case TARGET_NR_set_robust_list:
11885 case TARGET_NR_get_robust_list:
11886 /* The ABI for supporting robust futexes has userspace pass
11887 * the kernel a pointer to a linked list which is updated by
11888 * userspace after the syscall; the list is walked by the kernel
11889 * when the thread exits. Since the linked list in QEMU guest
11890 * memory isn't a valid linked list for the host and we have
11891 * no way to reliably intercept the thread-death event, we can't
11892 * support these. Silently return ENOSYS so that guest userspace
11893 * falls back to a non-robust futex implementation (which should
11894 * be OK except in the corner case of the guest crashing while
11895 * holding a mutex that is shared with another process via
11896 * shared memory).
11898 goto unimplemented_nowarn;
11899 #endif
11901 #if defined(TARGET_NR_utimensat)
11902 case TARGET_NR_utimensat:
11904 struct timespec *tsp, ts[2];
11905 if (!arg3) {
11906 tsp = NULL;
11907 } else {
11908 target_to_host_timespec(ts, arg3);
11909 target_to_host_timespec(ts+1, arg3+sizeof(struct target_timespec));
11910 tsp = ts;
11912 if (!arg2)
11913 ret = get_errno(sys_utimensat(arg1, NULL, tsp, arg4));
11914 else {
11915 if (!(p = lock_user_string(arg2))) {
11916 ret = -TARGET_EFAULT;
11917 goto fail;
11919 ret = get_errno(sys_utimensat(arg1, path(p), tsp, arg4));
11920 unlock_user(p, arg2, 0);
11923 break;
11924 #endif
11925 case TARGET_NR_futex:
11926 ret = do_futex(arg1, arg2, arg3, arg4, arg5, arg6);
11927 break;
11928 #if defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init)
11929 case TARGET_NR_inotify_init:
11930 ret = get_errno(sys_inotify_init());
11931 if (ret >= 0) {
11932 fd_trans_register(ret, &target_inotify_trans);
11934 break;
11935 #endif
11936 #ifdef CONFIG_INOTIFY1
11937 #if defined(TARGET_NR_inotify_init1) && defined(__NR_inotify_init1)
11938 case TARGET_NR_inotify_init1:
11939 ret = get_errno(sys_inotify_init1(target_to_host_bitmask(arg1,
11940 fcntl_flags_tbl)));
11941 if (ret >= 0) {
11942 fd_trans_register(ret, &target_inotify_trans);
11944 break;
11945 #endif
11946 #endif
11947 #if defined(TARGET_NR_inotify_add_watch) && defined(__NR_inotify_add_watch)
11948 case TARGET_NR_inotify_add_watch:
11949 p = lock_user_string(arg2);
11950 ret = get_errno(sys_inotify_add_watch(arg1, path(p), arg3));
11951 unlock_user(p, arg2, 0);
11952 break;
11953 #endif
11954 #if defined(TARGET_NR_inotify_rm_watch) && defined(__NR_inotify_rm_watch)
11955 case TARGET_NR_inotify_rm_watch:
11956 ret = get_errno(sys_inotify_rm_watch(arg1, arg2));
11957 break;
11958 #endif
11960 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
11961 case TARGET_NR_mq_open:
11963 struct mq_attr posix_mq_attr;
11964 struct mq_attr *pposix_mq_attr;
11965 int host_flags;
11967 host_flags = target_to_host_bitmask(arg2, fcntl_flags_tbl);
11968 pposix_mq_attr = NULL;
11969 if (arg4) {
11970 if (copy_from_user_mq_attr(&posix_mq_attr, arg4) != 0) {
11971 goto efault;
11973 pposix_mq_attr = &posix_mq_attr;
11975 p = lock_user_string(arg1 - 1);
11976 if (!p) {
11977 goto efault;
11979 ret = get_errno(mq_open(p, host_flags, arg3, pposix_mq_attr));
11980 unlock_user (p, arg1, 0);
11982 break;
11984 case TARGET_NR_mq_unlink:
11985 p = lock_user_string(arg1 - 1);
11986 if (!p) {
11987 ret = -TARGET_EFAULT;
11988 break;
11990 ret = get_errno(mq_unlink(p));
11991 unlock_user (p, arg1, 0);
11992 break;
11994 case TARGET_NR_mq_timedsend:
11996 struct timespec ts;
11998 p = lock_user (VERIFY_READ, arg2, arg3, 1);
11999 if (arg5 != 0) {
12000 target_to_host_timespec(&ts, arg5);
12001 ret = get_errno(safe_mq_timedsend(arg1, p, arg3, arg4, &ts));
12002 host_to_target_timespec(arg5, &ts);
12003 } else {
12004 ret = get_errno(safe_mq_timedsend(arg1, p, arg3, arg4, NULL));
12006 unlock_user (p, arg2, arg3);
12008 break;
12010 case TARGET_NR_mq_timedreceive:
12012 struct timespec ts;
12013 unsigned int prio;
12015 p = lock_user (VERIFY_READ, arg2, arg3, 1);
12016 if (arg5 != 0) {
12017 target_to_host_timespec(&ts, arg5);
12018 ret = get_errno(safe_mq_timedreceive(arg1, p, arg3,
12019 &prio, &ts));
12020 host_to_target_timespec(arg5, &ts);
12021 } else {
12022 ret = get_errno(safe_mq_timedreceive(arg1, p, arg3,
12023 &prio, NULL));
12025 unlock_user (p, arg2, arg3);
12026 if (arg4 != 0)
12027 put_user_u32(prio, arg4);
12029 break;
12031 /* Not implemented for now... */
12032 /* case TARGET_NR_mq_notify: */
12033 /* break; */
12035 case TARGET_NR_mq_getsetattr:
12037 struct mq_attr posix_mq_attr_in, posix_mq_attr_out;
12038 ret = 0;
12039 if (arg3 != 0) {
12040 ret = mq_getattr(arg1, &posix_mq_attr_out);
12041 copy_to_user_mq_attr(arg3, &posix_mq_attr_out);
12043 if (arg2 != 0) {
12044 copy_from_user_mq_attr(&posix_mq_attr_in, arg2);
12045 ret |= mq_setattr(arg1, &posix_mq_attr_in, &posix_mq_attr_out);
12049 break;
12050 #endif
12052 #ifdef CONFIG_SPLICE
12053 #ifdef TARGET_NR_tee
12054 case TARGET_NR_tee:
12056 ret = get_errno(tee(arg1,arg2,arg3,arg4));
12058 break;
12059 #endif
12060 #ifdef TARGET_NR_splice
12061 case TARGET_NR_splice:
12063 loff_t loff_in, loff_out;
12064 loff_t *ploff_in = NULL, *ploff_out = NULL;
12065 if (arg2) {
12066 if (get_user_u64(loff_in, arg2)) {
12067 goto efault;
12069 ploff_in = &loff_in;
12071 if (arg4) {
12072 if (get_user_u64(loff_out, arg4)) {
12073 goto efault;
12075 ploff_out = &loff_out;
12077 ret = get_errno(splice(arg1, ploff_in, arg3, ploff_out, arg5, arg6));
12078 if (arg2) {
12079 if (put_user_u64(loff_in, arg2)) {
12080 goto efault;
12083 if (arg4) {
12084 if (put_user_u64(loff_out, arg4)) {
12085 goto efault;
12089 break;
12090 #endif
12091 #ifdef TARGET_NR_vmsplice
12092 case TARGET_NR_vmsplice:
12094 struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1);
12095 if (vec != NULL) {
12096 ret = get_errno(vmsplice(arg1, vec, arg3, arg4));
12097 unlock_iovec(vec, arg2, arg3, 0);
12098 } else {
12099 ret = -host_to_target_errno(errno);
12102 break;
12103 #endif
12104 #endif /* CONFIG_SPLICE */
12105 #ifdef CONFIG_EVENTFD
12106 #if defined(TARGET_NR_eventfd)
12107 case TARGET_NR_eventfd:
12108 ret = get_errno(eventfd(arg1, 0));
12109 if (ret >= 0) {
12110 fd_trans_register(ret, &target_eventfd_trans);
12112 break;
12113 #endif
12114 #if defined(TARGET_NR_eventfd2)
12115 case TARGET_NR_eventfd2:
12117 int host_flags = arg2 & (~(TARGET_O_NONBLOCK | TARGET_O_CLOEXEC));
12118 if (arg2 & TARGET_O_NONBLOCK) {
12119 host_flags |= O_NONBLOCK;
12121 if (arg2 & TARGET_O_CLOEXEC) {
12122 host_flags |= O_CLOEXEC;
12124 ret = get_errno(eventfd(arg1, host_flags));
12125 if (ret >= 0) {
12126 fd_trans_register(ret, &target_eventfd_trans);
12128 break;
12130 #endif
12131 #endif /* CONFIG_EVENTFD */
12132 #if defined(CONFIG_FALLOCATE) && defined(TARGET_NR_fallocate)
12133 case TARGET_NR_fallocate:
12134 #if TARGET_ABI_BITS == 32
12135 ret = get_errno(fallocate(arg1, arg2, target_offset64(arg3, arg4),
12136 target_offset64(arg5, arg6)));
12137 #else
12138 ret = get_errno(fallocate(arg1, arg2, arg3, arg4));
12139 #endif
12140 break;
12141 #endif
12142 #if defined(CONFIG_SYNC_FILE_RANGE)
12143 #if defined(TARGET_NR_sync_file_range)
12144 case TARGET_NR_sync_file_range:
12145 #if TARGET_ABI_BITS == 32
12146 #if defined(TARGET_MIPS)
12147 ret = get_errno(sync_file_range(arg1, target_offset64(arg3, arg4),
12148 target_offset64(arg5, arg6), arg7));
12149 #else
12150 ret = get_errno(sync_file_range(arg1, target_offset64(arg2, arg3),
12151 target_offset64(arg4, arg5), arg6));
12152 #endif /* !TARGET_MIPS */
12153 #else
12154 ret = get_errno(sync_file_range(arg1, arg2, arg3, arg4));
12155 #endif
12156 break;
12157 #endif
12158 #if defined(TARGET_NR_sync_file_range2)
12159 case TARGET_NR_sync_file_range2:
12160 /* This is like sync_file_range but the arguments are reordered */
12161 #if TARGET_ABI_BITS == 32
12162 ret = get_errno(sync_file_range(arg1, target_offset64(arg3, arg4),
12163 target_offset64(arg5, arg6), arg2));
12164 #else
12165 ret = get_errno(sync_file_range(arg1, arg3, arg4, arg2));
12166 #endif
12167 break;
12168 #endif
12169 #endif
12170 #if defined(TARGET_NR_signalfd4)
12171 case TARGET_NR_signalfd4:
12172 ret = do_signalfd4(arg1, arg2, arg4);
12173 break;
12174 #endif
12175 #if defined(TARGET_NR_signalfd)
12176 case TARGET_NR_signalfd:
12177 ret = do_signalfd4(arg1, arg2, 0);
12178 break;
12179 #endif
12180 #if defined(CONFIG_EPOLL)
12181 #if defined(TARGET_NR_epoll_create)
12182 case TARGET_NR_epoll_create:
12183 ret = get_errno(epoll_create(arg1));
12184 break;
12185 #endif
12186 #if defined(TARGET_NR_epoll_create1) && defined(CONFIG_EPOLL_CREATE1)
12187 case TARGET_NR_epoll_create1:
12188 ret = get_errno(epoll_create1(arg1));
12189 break;
12190 #endif
12191 #if defined(TARGET_NR_epoll_ctl)
12192 case TARGET_NR_epoll_ctl:
12194 struct epoll_event ep;
12195 struct epoll_event *epp = 0;
12196 if (arg4) {
12197 struct target_epoll_event *target_ep;
12198 if (!lock_user_struct(VERIFY_READ, target_ep, arg4, 1)) {
12199 goto efault;
12201 ep.events = tswap32(target_ep->events);
12202 /* The epoll_data_t union is just opaque data to the kernel,
12203 * so we transfer all 64 bits across and need not worry what
12204 * actual data type it is.
12206 ep.data.u64 = tswap64(target_ep->data.u64);
12207 unlock_user_struct(target_ep, arg4, 0);
12208 epp = &ep;
12210 ret = get_errno(epoll_ctl(arg1, arg2, arg3, epp));
12211 break;
12213 #endif
12215 #if defined(TARGET_NR_epoll_wait) || defined(TARGET_NR_epoll_pwait)
12216 #if defined(TARGET_NR_epoll_wait)
12217 case TARGET_NR_epoll_wait:
12218 #endif
12219 #if defined(TARGET_NR_epoll_pwait)
12220 case TARGET_NR_epoll_pwait:
12221 #endif
12223 struct target_epoll_event *target_ep;
12224 struct epoll_event *ep;
12225 int epfd = arg1;
12226 int maxevents = arg3;
12227 int timeout = arg4;
12229 if (maxevents <= 0 || maxevents > TARGET_EP_MAX_EVENTS) {
12230 ret = -TARGET_EINVAL;
12231 break;
12234 target_ep = lock_user(VERIFY_WRITE, arg2,
12235 maxevents * sizeof(struct target_epoll_event), 1);
12236 if (!target_ep) {
12237 goto efault;
12240 ep = g_try_new(struct epoll_event, maxevents);
12241 if (!ep) {
12242 unlock_user(target_ep, arg2, 0);
12243 ret = -TARGET_ENOMEM;
12244 break;
12247 switch (num) {
12248 #if defined(TARGET_NR_epoll_pwait)
12249 case TARGET_NR_epoll_pwait:
12251 target_sigset_t *target_set;
12252 sigset_t _set, *set = &_set;
12254 if (arg5) {
12255 if (arg6 != sizeof(target_sigset_t)) {
12256 ret = -TARGET_EINVAL;
12257 break;
12260 target_set = lock_user(VERIFY_READ, arg5,
12261 sizeof(target_sigset_t), 1);
12262 if (!target_set) {
12263 ret = -TARGET_EFAULT;
12264 break;
12266 target_to_host_sigset(set, target_set);
12267 unlock_user(target_set, arg5, 0);
12268 } else {
12269 set = NULL;
12272 ret = get_errno(safe_epoll_pwait(epfd, ep, maxevents, timeout,
12273 set, SIGSET_T_SIZE));
12274 break;
12276 #endif
12277 #if defined(TARGET_NR_epoll_wait)
12278 case TARGET_NR_epoll_wait:
12279 ret = get_errno(safe_epoll_pwait(epfd, ep, maxevents, timeout,
12280 NULL, 0));
12281 break;
12282 #endif
12283 default:
12284 ret = -TARGET_ENOSYS;
12286 if (!is_error(ret)) {
12287 int i;
12288 for (i = 0; i < ret; i++) {
12289 target_ep[i].events = tswap32(ep[i].events);
12290 target_ep[i].data.u64 = tswap64(ep[i].data.u64);
12292 unlock_user(target_ep, arg2,
12293 ret * sizeof(struct target_epoll_event));
12294 } else {
12295 unlock_user(target_ep, arg2, 0);
12297 g_free(ep);
12298 break;
12300 #endif
12301 #endif
12302 #ifdef TARGET_NR_prlimit64
12303 case TARGET_NR_prlimit64:
12305 /* args: pid, resource number, ptr to new rlimit, ptr to old rlimit */
12306 struct target_rlimit64 *target_rnew, *target_rold;
12307 struct host_rlimit64 rnew, rold, *rnewp = 0;
12308 int resource = target_to_host_resource(arg2);
12309 if (arg3) {
12310 if (!lock_user_struct(VERIFY_READ, target_rnew, arg3, 1)) {
12311 goto efault;
12313 rnew.rlim_cur = tswap64(target_rnew->rlim_cur);
12314 rnew.rlim_max = tswap64(target_rnew->rlim_max);
12315 unlock_user_struct(target_rnew, arg3, 0);
12316 rnewp = &rnew;
12319 ret = get_errno(sys_prlimit64(arg1, resource, rnewp, arg4 ? &rold : 0));
12320 if (!is_error(ret) && arg4) {
12321 if (!lock_user_struct(VERIFY_WRITE, target_rold, arg4, 1)) {
12322 goto efault;
12324 target_rold->rlim_cur = tswap64(rold.rlim_cur);
12325 target_rold->rlim_max = tswap64(rold.rlim_max);
12326 unlock_user_struct(target_rold, arg4, 1);
12328 break;
12330 #endif
12331 #ifdef TARGET_NR_gethostname
12332 case TARGET_NR_gethostname:
12334 char *name = lock_user(VERIFY_WRITE, arg1, arg2, 0);
12335 if (name) {
12336 ret = get_errno(gethostname(name, arg2));
12337 unlock_user(name, arg1, arg2);
12338 } else {
12339 ret = -TARGET_EFAULT;
12341 break;
12343 #endif
12344 #ifdef TARGET_NR_atomic_cmpxchg_32
12345 case TARGET_NR_atomic_cmpxchg_32:
12347 /* should use start_exclusive from main.c */
12348 abi_ulong mem_value;
12349 if (get_user_u32(mem_value, arg6)) {
12350 target_siginfo_t info;
12351 info.si_signo = SIGSEGV;
12352 info.si_errno = 0;
12353 info.si_code = TARGET_SEGV_MAPERR;
12354 info._sifields._sigfault._addr = arg6;
12355 queue_signal((CPUArchState *)cpu_env, info.si_signo,
12356 QEMU_SI_FAULT, &info);
12357 ret = 0xdeadbeef;
12360 if (mem_value == arg2)
12361 put_user_u32(arg1, arg6);
12362 ret = mem_value;
12363 break;
12365 #endif
12366 #ifdef TARGET_NR_atomic_barrier
12367 case TARGET_NR_atomic_barrier:
12369 /* Like the kernel implementation and the qemu arm barrier, no-op this? */
12370 ret = 0;
12371 break;
12373 #endif
12375 #ifdef TARGET_NR_timer_create
12376 case TARGET_NR_timer_create:
12378 /* args: clockid_t clockid, struct sigevent *sevp, timer_t *timerid */
12380 struct sigevent host_sevp = { {0}, }, *phost_sevp = NULL;
12382 int clkid = arg1;
12383 int timer_index = next_free_host_timer();
12385 if (timer_index < 0) {
12386 ret = -TARGET_EAGAIN;
12387 } else {
12388 timer_t *phtimer = g_posix_timers + timer_index;
12390 if (arg2) {
12391 phost_sevp = &host_sevp;
12392 ret = target_to_host_sigevent(phost_sevp, arg2);
12393 if (ret != 0) {
12394 break;
12398 ret = get_errno(timer_create(clkid, phost_sevp, phtimer));
12399 if (ret) {
12400 phtimer = NULL;
12401 } else {
12402 if (put_user(TIMER_MAGIC | timer_index, arg3, target_timer_t)) {
12403 goto efault;
12407 break;
12409 #endif
12411 #ifdef TARGET_NR_timer_settime
12412 case TARGET_NR_timer_settime:
12414 /* args: timer_t timerid, int flags, const struct itimerspec *new_value,
12415 * struct itimerspec * old_value */
12416 target_timer_t timerid = get_timer_id(arg1);
12418 if (timerid < 0) {
12419 ret = timerid;
12420 } else if (arg3 == 0) {
12421 ret = -TARGET_EINVAL;
12422 } else {
12423 timer_t htimer = g_posix_timers[timerid];
12424 struct itimerspec hspec_new = {{0},}, hspec_old = {{0},};
12426 if (target_to_host_itimerspec(&hspec_new, arg3)) {
12427 goto efault;
12429 ret = get_errno(
12430 timer_settime(htimer, arg2, &hspec_new, &hspec_old));
12431 if (arg4 && host_to_target_itimerspec(arg4, &hspec_old)) {
12432 goto efault;
12435 break;
12437 #endif
12439 #ifdef TARGET_NR_timer_gettime
12440 case TARGET_NR_timer_gettime:
12442 /* args: timer_t timerid, struct itimerspec *curr_value */
12443 target_timer_t timerid = get_timer_id(arg1);
12445 if (timerid < 0) {
12446 ret = timerid;
12447 } else if (!arg2) {
12448 ret = -TARGET_EFAULT;
12449 } else {
12450 timer_t htimer = g_posix_timers[timerid];
12451 struct itimerspec hspec;
12452 ret = get_errno(timer_gettime(htimer, &hspec));
12454 if (host_to_target_itimerspec(arg2, &hspec)) {
12455 ret = -TARGET_EFAULT;
12458 break;
12460 #endif
12462 #ifdef TARGET_NR_timer_getoverrun
12463 case TARGET_NR_timer_getoverrun:
12465 /* args: timer_t timerid */
12466 target_timer_t timerid = get_timer_id(arg1);
12468 if (timerid < 0) {
12469 ret = timerid;
12470 } else {
12471 timer_t htimer = g_posix_timers[timerid];
12472 ret = get_errno(timer_getoverrun(htimer));
12474 fd_trans_unregister(ret);
12475 break;
12477 #endif
12479 #ifdef TARGET_NR_timer_delete
12480 case TARGET_NR_timer_delete:
12482 /* args: timer_t timerid */
12483 target_timer_t timerid = get_timer_id(arg1);
12485 if (timerid < 0) {
12486 ret = timerid;
12487 } else {
12488 timer_t htimer = g_posix_timers[timerid];
12489 ret = get_errno(timer_delete(htimer));
12490 g_posix_timers[timerid] = 0;
12492 break;
12494 #endif
12496 #if defined(TARGET_NR_timerfd_create) && defined(CONFIG_TIMERFD)
12497 case TARGET_NR_timerfd_create:
12498 ret = get_errno(timerfd_create(arg1,
12499 target_to_host_bitmask(arg2, fcntl_flags_tbl)));
12500 break;
12501 #endif
12503 #if defined(TARGET_NR_timerfd_gettime) && defined(CONFIG_TIMERFD)
12504 case TARGET_NR_timerfd_gettime:
12506 struct itimerspec its_curr;
12508 ret = get_errno(timerfd_gettime(arg1, &its_curr));
12510 if (arg2 && host_to_target_itimerspec(arg2, &its_curr)) {
12511 goto efault;
12514 break;
12515 #endif
12517 #if defined(TARGET_NR_timerfd_settime) && defined(CONFIG_TIMERFD)
12518 case TARGET_NR_timerfd_settime:
12520 struct itimerspec its_new, its_old, *p_new;
12522 if (arg3) {
12523 if (target_to_host_itimerspec(&its_new, arg3)) {
12524 goto efault;
12526 p_new = &its_new;
12527 } else {
12528 p_new = NULL;
12531 ret = get_errno(timerfd_settime(arg1, arg2, p_new, &its_old));
12533 if (arg4 && host_to_target_itimerspec(arg4, &its_old)) {
12534 goto efault;
12537 break;
12538 #endif
12540 #if defined(TARGET_NR_ioprio_get) && defined(__NR_ioprio_get)
12541 case TARGET_NR_ioprio_get:
12542 ret = get_errno(ioprio_get(arg1, arg2));
12543 break;
12544 #endif
12546 #if defined(TARGET_NR_ioprio_set) && defined(__NR_ioprio_set)
12547 case TARGET_NR_ioprio_set:
12548 ret = get_errno(ioprio_set(arg1, arg2, arg3));
12549 break;
12550 #endif
12552 #if defined(TARGET_NR_setns) && defined(CONFIG_SETNS)
12553 case TARGET_NR_setns:
12554 ret = get_errno(setns(arg1, arg2));
12555 break;
12556 #endif
12557 #if defined(TARGET_NR_unshare) && defined(CONFIG_SETNS)
12558 case TARGET_NR_unshare:
12559 ret = get_errno(unshare(arg1));
12560 break;
12561 #endif
12562 #if defined(TARGET_NR_kcmp) && defined(__NR_kcmp)
12563 case TARGET_NR_kcmp:
12564 ret = get_errno(kcmp(arg1, arg2, arg3, arg4, arg5));
12565 break;
12566 #endif
12568 default:
12569 unimplemented:
12570 gemu_log("qemu: Unsupported syscall: %d\n", num);
12571 #if defined(TARGET_NR_setxattr) || defined(TARGET_NR_get_thread_area) || defined(TARGET_NR_getdomainname) || defined(TARGET_NR_set_robust_list)
12572 unimplemented_nowarn:
12573 #endif
12574 ret = -TARGET_ENOSYS;
12575 break;
12577 fail:
12578 #ifdef DEBUG
12579 gemu_log(" = " TARGET_ABI_FMT_ld "\n", ret);
12580 #endif
12581 if(do_strace)
12582 print_syscall_ret(num, ret);
12583 trace_guest_user_syscall_ret(cpu, num, ret);
12584 return ret;
12585 efault:
12586 ret = -TARGET_EFAULT;
12587 goto fail;