linux-user: Check for bad event numbers in epoll_wait
[qemu.git] / linux-user / syscall.c
blobeecccbb25c516fd342d4ca1bd0b3755bd9a45023
1 /*
2 * Linux syscalls
4 * Copyright (c) 2003 Fabrice Bellard
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, see <http://www.gnu.org/licenses/>.
19 #define _ATFILE_SOURCE
20 #include "qemu/osdep.h"
21 #include "qemu/cutils.h"
22 #include "qemu/path.h"
23 #include <elf.h>
24 #include <endian.h>
25 #include <grp.h>
26 #include <sys/ipc.h>
27 #include <sys/msg.h>
28 #include <sys/wait.h>
29 #include <sys/mount.h>
30 #include <sys/file.h>
31 #include <sys/fsuid.h>
32 #include <sys/personality.h>
33 #include <sys/prctl.h>
34 #include <sys/resource.h>
35 #include <sys/swap.h>
36 #include <linux/capability.h>
37 #include <sched.h>
38 #ifdef __ia64__
39 int __clone2(int (*fn)(void *), void *child_stack_base,
40 size_t stack_size, int flags, void *arg, ...);
41 #endif
42 #include <sys/socket.h>
43 #include <sys/un.h>
44 #include <sys/uio.h>
45 #include <sys/poll.h>
46 #include <sys/times.h>
47 #include <sys/shm.h>
48 #include <sys/sem.h>
49 #include <sys/statfs.h>
50 #include <utime.h>
51 #include <sys/sysinfo.h>
52 #include <sys/signalfd.h>
53 //#include <sys/user.h>
54 #include <netinet/ip.h>
55 #include <netinet/tcp.h>
56 #include <linux/wireless.h>
57 #include <linux/icmp.h>
58 #include "qemu-common.h"
59 #ifdef CONFIG_TIMERFD
60 #include <sys/timerfd.h>
61 #endif
62 #ifdef TARGET_GPROF
63 #include <sys/gmon.h>
64 #endif
65 #ifdef CONFIG_EVENTFD
66 #include <sys/eventfd.h>
67 #endif
68 #ifdef CONFIG_EPOLL
69 #include <sys/epoll.h>
70 #endif
71 #ifdef CONFIG_ATTR
72 #include "qemu/xattr.h"
73 #endif
74 #ifdef CONFIG_SENDFILE
75 #include <sys/sendfile.h>
76 #endif
78 #define termios host_termios
79 #define winsize host_winsize
80 #define termio host_termio
81 #define sgttyb host_sgttyb /* same as target */
82 #define tchars host_tchars /* same as target */
83 #define ltchars host_ltchars /* same as target */
85 #include <linux/termios.h>
86 #include <linux/unistd.h>
87 #include <linux/cdrom.h>
88 #include <linux/hdreg.h>
89 #include <linux/soundcard.h>
90 #include <linux/kd.h>
91 #include <linux/mtio.h>
92 #include <linux/fs.h>
93 #if defined(CONFIG_FIEMAP)
94 #include <linux/fiemap.h>
95 #endif
96 #include <linux/fb.h>
97 #include <linux/vt.h>
98 #include <linux/dm-ioctl.h>
99 #include <linux/reboot.h>
100 #include <linux/route.h>
101 #include <linux/filter.h>
102 #include <linux/blkpg.h>
103 #include <netpacket/packet.h>
104 #include <linux/netlink.h>
105 #ifdef CONFIG_RTNETLINK
106 #include <linux/rtnetlink.h>
107 #include <linux/if_bridge.h>
108 #endif
109 #include <linux/audit.h>
110 #include "linux_loop.h"
111 #include "uname.h"
113 #include "qemu.h"
115 #define CLONE_NPTL_FLAGS2 (CLONE_SETTLS | \
116 CLONE_PARENT_SETTID | CLONE_CHILD_SETTID | CLONE_CHILD_CLEARTID)
118 //#define DEBUG
119 /* Define DEBUG_ERESTARTSYS to force every syscall to be restarted
120 * once. This exercises the codepaths for restart.
122 //#define DEBUG_ERESTARTSYS
124 //#include <linux/msdos_fs.h>
125 #define VFAT_IOCTL_READDIR_BOTH _IOR('r', 1, struct linux_dirent [2])
126 #define VFAT_IOCTL_READDIR_SHORT _IOR('r', 2, struct linux_dirent [2])
128 #undef _syscall0
129 #undef _syscall1
130 #undef _syscall2
131 #undef _syscall3
132 #undef _syscall4
133 #undef _syscall5
134 #undef _syscall6
136 #define _syscall0(type,name) \
137 static type name (void) \
139 return syscall(__NR_##name); \
142 #define _syscall1(type,name,type1,arg1) \
143 static type name (type1 arg1) \
145 return syscall(__NR_##name, arg1); \
148 #define _syscall2(type,name,type1,arg1,type2,arg2) \
149 static type name (type1 arg1,type2 arg2) \
151 return syscall(__NR_##name, arg1, arg2); \
154 #define _syscall3(type,name,type1,arg1,type2,arg2,type3,arg3) \
155 static type name (type1 arg1,type2 arg2,type3 arg3) \
157 return syscall(__NR_##name, arg1, arg2, arg3); \
160 #define _syscall4(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4) \
161 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4) \
163 return syscall(__NR_##name, arg1, arg2, arg3, arg4); \
166 #define _syscall5(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4, \
167 type5,arg5) \
168 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5) \
170 return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5); \
174 #define _syscall6(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4, \
175 type5,arg5,type6,arg6) \
176 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5, \
177 type6 arg6) \
179 return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6); \
183 #define __NR_sys_uname __NR_uname
184 #define __NR_sys_getcwd1 __NR_getcwd
185 #define __NR_sys_getdents __NR_getdents
186 #define __NR_sys_getdents64 __NR_getdents64
187 #define __NR_sys_getpriority __NR_getpriority
188 #define __NR_sys_rt_sigqueueinfo __NR_rt_sigqueueinfo
189 #define __NR_sys_syslog __NR_syslog
190 #define __NR_sys_futex __NR_futex
191 #define __NR_sys_inotify_init __NR_inotify_init
192 #define __NR_sys_inotify_add_watch __NR_inotify_add_watch
193 #define __NR_sys_inotify_rm_watch __NR_inotify_rm_watch
195 #if defined(__alpha__) || defined (__ia64__) || defined(__x86_64__) || \
196 defined(__s390x__)
197 #define __NR__llseek __NR_lseek
198 #endif
200 /* Newer kernel ports have llseek() instead of _llseek() */
201 #if defined(TARGET_NR_llseek) && !defined(TARGET_NR__llseek)
202 #define TARGET_NR__llseek TARGET_NR_llseek
203 #endif
205 #ifdef __NR_gettid
206 _syscall0(int, gettid)
207 #else
208 /* This is a replacement for the host gettid() and must return a host
209 errno. */
210 static int gettid(void) {
211 return -ENOSYS;
213 #endif
214 #if defined(TARGET_NR_getdents) && defined(__NR_getdents)
215 _syscall3(int, sys_getdents, uint, fd, struct linux_dirent *, dirp, uint, count);
216 #endif
217 #if !defined(__NR_getdents) || \
218 (defined(TARGET_NR_getdents64) && defined(__NR_getdents64))
219 _syscall3(int, sys_getdents64, uint, fd, struct linux_dirent64 *, dirp, uint, count);
220 #endif
221 #if defined(TARGET_NR__llseek) && defined(__NR_llseek)
222 _syscall5(int, _llseek, uint, fd, ulong, hi, ulong, lo,
223 loff_t *, res, uint, wh);
224 #endif
225 _syscall3(int,sys_rt_sigqueueinfo,int,pid,int,sig,siginfo_t *,uinfo)
226 _syscall3(int,sys_syslog,int,type,char*,bufp,int,len)
227 #ifdef __NR_exit_group
228 _syscall1(int,exit_group,int,error_code)
229 #endif
230 #if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address)
231 _syscall1(int,set_tid_address,int *,tidptr)
232 #endif
233 #if defined(TARGET_NR_futex) && defined(__NR_futex)
234 _syscall6(int,sys_futex,int *,uaddr,int,op,int,val,
235 const struct timespec *,timeout,int *,uaddr2,int,val3)
236 #endif
237 #define __NR_sys_sched_getaffinity __NR_sched_getaffinity
238 _syscall3(int, sys_sched_getaffinity, pid_t, pid, unsigned int, len,
239 unsigned long *, user_mask_ptr);
240 #define __NR_sys_sched_setaffinity __NR_sched_setaffinity
241 _syscall3(int, sys_sched_setaffinity, pid_t, pid, unsigned int, len,
242 unsigned long *, user_mask_ptr);
243 _syscall4(int, reboot, int, magic1, int, magic2, unsigned int, cmd,
244 void *, arg);
245 _syscall2(int, capget, struct __user_cap_header_struct *, header,
246 struct __user_cap_data_struct *, data);
247 _syscall2(int, capset, struct __user_cap_header_struct *, header,
248 struct __user_cap_data_struct *, data);
249 #if defined(TARGET_NR_ioprio_get) && defined(__NR_ioprio_get)
250 _syscall2(int, ioprio_get, int, which, int, who)
251 #endif
252 #if defined(TARGET_NR_ioprio_set) && defined(__NR_ioprio_set)
253 _syscall3(int, ioprio_set, int, which, int, who, int, ioprio)
254 #endif
255 #if defined(TARGET_NR_getrandom) && defined(__NR_getrandom)
256 _syscall3(int, getrandom, void *, buf, size_t, buflen, unsigned int, flags)
257 #endif
259 static bitmask_transtbl fcntl_flags_tbl[] = {
260 { TARGET_O_ACCMODE, TARGET_O_WRONLY, O_ACCMODE, O_WRONLY, },
261 { TARGET_O_ACCMODE, TARGET_O_RDWR, O_ACCMODE, O_RDWR, },
262 { TARGET_O_CREAT, TARGET_O_CREAT, O_CREAT, O_CREAT, },
263 { TARGET_O_EXCL, TARGET_O_EXCL, O_EXCL, O_EXCL, },
264 { TARGET_O_NOCTTY, TARGET_O_NOCTTY, O_NOCTTY, O_NOCTTY, },
265 { TARGET_O_TRUNC, TARGET_O_TRUNC, O_TRUNC, O_TRUNC, },
266 { TARGET_O_APPEND, TARGET_O_APPEND, O_APPEND, O_APPEND, },
267 { TARGET_O_NONBLOCK, TARGET_O_NONBLOCK, O_NONBLOCK, O_NONBLOCK, },
268 { TARGET_O_SYNC, TARGET_O_DSYNC, O_SYNC, O_DSYNC, },
269 { TARGET_O_SYNC, TARGET_O_SYNC, O_SYNC, O_SYNC, },
270 { TARGET_FASYNC, TARGET_FASYNC, FASYNC, FASYNC, },
271 { TARGET_O_DIRECTORY, TARGET_O_DIRECTORY, O_DIRECTORY, O_DIRECTORY, },
272 { TARGET_O_NOFOLLOW, TARGET_O_NOFOLLOW, O_NOFOLLOW, O_NOFOLLOW, },
273 #if defined(O_DIRECT)
274 { TARGET_O_DIRECT, TARGET_O_DIRECT, O_DIRECT, O_DIRECT, },
275 #endif
276 #if defined(O_NOATIME)
277 { TARGET_O_NOATIME, TARGET_O_NOATIME, O_NOATIME, O_NOATIME },
278 #endif
279 #if defined(O_CLOEXEC)
280 { TARGET_O_CLOEXEC, TARGET_O_CLOEXEC, O_CLOEXEC, O_CLOEXEC },
281 #endif
282 #if defined(O_PATH)
283 { TARGET_O_PATH, TARGET_O_PATH, O_PATH, O_PATH },
284 #endif
285 /* Don't terminate the list prematurely on 64-bit host+guest. */
286 #if TARGET_O_LARGEFILE != 0 || O_LARGEFILE != 0
287 { TARGET_O_LARGEFILE, TARGET_O_LARGEFILE, O_LARGEFILE, O_LARGEFILE, },
288 #endif
289 { 0, 0, 0, 0 }
292 enum {
293 QEMU_IFLA_BR_UNSPEC,
294 QEMU_IFLA_BR_FORWARD_DELAY,
295 QEMU_IFLA_BR_HELLO_TIME,
296 QEMU_IFLA_BR_MAX_AGE,
297 QEMU_IFLA_BR_AGEING_TIME,
298 QEMU_IFLA_BR_STP_STATE,
299 QEMU_IFLA_BR_PRIORITY,
300 QEMU_IFLA_BR_VLAN_FILTERING,
301 QEMU_IFLA_BR_VLAN_PROTOCOL,
302 QEMU_IFLA_BR_GROUP_FWD_MASK,
303 QEMU_IFLA_BR_ROOT_ID,
304 QEMU_IFLA_BR_BRIDGE_ID,
305 QEMU_IFLA_BR_ROOT_PORT,
306 QEMU_IFLA_BR_ROOT_PATH_COST,
307 QEMU_IFLA_BR_TOPOLOGY_CHANGE,
308 QEMU_IFLA_BR_TOPOLOGY_CHANGE_DETECTED,
309 QEMU_IFLA_BR_HELLO_TIMER,
310 QEMU_IFLA_BR_TCN_TIMER,
311 QEMU_IFLA_BR_TOPOLOGY_CHANGE_TIMER,
312 QEMU_IFLA_BR_GC_TIMER,
313 QEMU_IFLA_BR_GROUP_ADDR,
314 QEMU_IFLA_BR_FDB_FLUSH,
315 QEMU_IFLA_BR_MCAST_ROUTER,
316 QEMU_IFLA_BR_MCAST_SNOOPING,
317 QEMU_IFLA_BR_MCAST_QUERY_USE_IFADDR,
318 QEMU_IFLA_BR_MCAST_QUERIER,
319 QEMU_IFLA_BR_MCAST_HASH_ELASTICITY,
320 QEMU_IFLA_BR_MCAST_HASH_MAX,
321 QEMU_IFLA_BR_MCAST_LAST_MEMBER_CNT,
322 QEMU_IFLA_BR_MCAST_STARTUP_QUERY_CNT,
323 QEMU_IFLA_BR_MCAST_LAST_MEMBER_INTVL,
324 QEMU_IFLA_BR_MCAST_MEMBERSHIP_INTVL,
325 QEMU_IFLA_BR_MCAST_QUERIER_INTVL,
326 QEMU_IFLA_BR_MCAST_QUERY_INTVL,
327 QEMU_IFLA_BR_MCAST_QUERY_RESPONSE_INTVL,
328 QEMU_IFLA_BR_MCAST_STARTUP_QUERY_INTVL,
329 QEMU_IFLA_BR_NF_CALL_IPTABLES,
330 QEMU_IFLA_BR_NF_CALL_IP6TABLES,
331 QEMU_IFLA_BR_NF_CALL_ARPTABLES,
332 QEMU_IFLA_BR_VLAN_DEFAULT_PVID,
333 QEMU_IFLA_BR_PAD,
334 QEMU_IFLA_BR_VLAN_STATS_ENABLED,
335 QEMU_IFLA_BR_MCAST_STATS_ENABLED,
336 QEMU___IFLA_BR_MAX,
339 enum {
340 QEMU_IFLA_UNSPEC,
341 QEMU_IFLA_ADDRESS,
342 QEMU_IFLA_BROADCAST,
343 QEMU_IFLA_IFNAME,
344 QEMU_IFLA_MTU,
345 QEMU_IFLA_LINK,
346 QEMU_IFLA_QDISC,
347 QEMU_IFLA_STATS,
348 QEMU_IFLA_COST,
349 QEMU_IFLA_PRIORITY,
350 QEMU_IFLA_MASTER,
351 QEMU_IFLA_WIRELESS,
352 QEMU_IFLA_PROTINFO,
353 QEMU_IFLA_TXQLEN,
354 QEMU_IFLA_MAP,
355 QEMU_IFLA_WEIGHT,
356 QEMU_IFLA_OPERSTATE,
357 QEMU_IFLA_LINKMODE,
358 QEMU_IFLA_LINKINFO,
359 QEMU_IFLA_NET_NS_PID,
360 QEMU_IFLA_IFALIAS,
361 QEMU_IFLA_NUM_VF,
362 QEMU_IFLA_VFINFO_LIST,
363 QEMU_IFLA_STATS64,
364 QEMU_IFLA_VF_PORTS,
365 QEMU_IFLA_PORT_SELF,
366 QEMU_IFLA_AF_SPEC,
367 QEMU_IFLA_GROUP,
368 QEMU_IFLA_NET_NS_FD,
369 QEMU_IFLA_EXT_MASK,
370 QEMU_IFLA_PROMISCUITY,
371 QEMU_IFLA_NUM_TX_QUEUES,
372 QEMU_IFLA_NUM_RX_QUEUES,
373 QEMU_IFLA_CARRIER,
374 QEMU_IFLA_PHYS_PORT_ID,
375 QEMU_IFLA_CARRIER_CHANGES,
376 QEMU_IFLA_PHYS_SWITCH_ID,
377 QEMU_IFLA_LINK_NETNSID,
378 QEMU_IFLA_PHYS_PORT_NAME,
379 QEMU_IFLA_PROTO_DOWN,
380 QEMU_IFLA_GSO_MAX_SEGS,
381 QEMU_IFLA_GSO_MAX_SIZE,
382 QEMU_IFLA_PAD,
383 QEMU_IFLA_XDP,
384 QEMU___IFLA_MAX
387 enum {
388 QEMU_IFLA_BRPORT_UNSPEC,
389 QEMU_IFLA_BRPORT_STATE,
390 QEMU_IFLA_BRPORT_PRIORITY,
391 QEMU_IFLA_BRPORT_COST,
392 QEMU_IFLA_BRPORT_MODE,
393 QEMU_IFLA_BRPORT_GUARD,
394 QEMU_IFLA_BRPORT_PROTECT,
395 QEMU_IFLA_BRPORT_FAST_LEAVE,
396 QEMU_IFLA_BRPORT_LEARNING,
397 QEMU_IFLA_BRPORT_UNICAST_FLOOD,
398 QEMU_IFLA_BRPORT_PROXYARP,
399 QEMU_IFLA_BRPORT_LEARNING_SYNC,
400 QEMU_IFLA_BRPORT_PROXYARP_WIFI,
401 QEMU_IFLA_BRPORT_ROOT_ID,
402 QEMU_IFLA_BRPORT_BRIDGE_ID,
403 QEMU_IFLA_BRPORT_DESIGNATED_PORT,
404 QEMU_IFLA_BRPORT_DESIGNATED_COST,
405 QEMU_IFLA_BRPORT_ID,
406 QEMU_IFLA_BRPORT_NO,
407 QEMU_IFLA_BRPORT_TOPOLOGY_CHANGE_ACK,
408 QEMU_IFLA_BRPORT_CONFIG_PENDING,
409 QEMU_IFLA_BRPORT_MESSAGE_AGE_TIMER,
410 QEMU_IFLA_BRPORT_FORWARD_DELAY_TIMER,
411 QEMU_IFLA_BRPORT_HOLD_TIMER,
412 QEMU_IFLA_BRPORT_FLUSH,
413 QEMU_IFLA_BRPORT_MULTICAST_ROUTER,
414 QEMU_IFLA_BRPORT_PAD,
415 QEMU___IFLA_BRPORT_MAX
418 enum {
419 QEMU_IFLA_INFO_UNSPEC,
420 QEMU_IFLA_INFO_KIND,
421 QEMU_IFLA_INFO_DATA,
422 QEMU_IFLA_INFO_XSTATS,
423 QEMU_IFLA_INFO_SLAVE_KIND,
424 QEMU_IFLA_INFO_SLAVE_DATA,
425 QEMU___IFLA_INFO_MAX,
428 enum {
429 QEMU_IFLA_INET_UNSPEC,
430 QEMU_IFLA_INET_CONF,
431 QEMU___IFLA_INET_MAX,
434 enum {
435 QEMU_IFLA_INET6_UNSPEC,
436 QEMU_IFLA_INET6_FLAGS,
437 QEMU_IFLA_INET6_CONF,
438 QEMU_IFLA_INET6_STATS,
439 QEMU_IFLA_INET6_MCAST,
440 QEMU_IFLA_INET6_CACHEINFO,
441 QEMU_IFLA_INET6_ICMP6STATS,
442 QEMU_IFLA_INET6_TOKEN,
443 QEMU_IFLA_INET6_ADDR_GEN_MODE,
444 QEMU___IFLA_INET6_MAX
447 typedef abi_long (*TargetFdDataFunc)(void *, size_t);
448 typedef abi_long (*TargetFdAddrFunc)(void *, abi_ulong, socklen_t);
449 typedef struct TargetFdTrans {
450 TargetFdDataFunc host_to_target_data;
451 TargetFdDataFunc target_to_host_data;
452 TargetFdAddrFunc target_to_host_addr;
453 } TargetFdTrans;
455 static TargetFdTrans **target_fd_trans;
457 static unsigned int target_fd_max;
459 static TargetFdDataFunc fd_trans_target_to_host_data(int fd)
461 if (fd >= 0 && fd < target_fd_max && target_fd_trans[fd]) {
462 return target_fd_trans[fd]->target_to_host_data;
464 return NULL;
467 static TargetFdDataFunc fd_trans_host_to_target_data(int fd)
469 if (fd >= 0 && fd < target_fd_max && target_fd_trans[fd]) {
470 return target_fd_trans[fd]->host_to_target_data;
472 return NULL;
475 static TargetFdAddrFunc fd_trans_target_to_host_addr(int fd)
477 if (fd >= 0 && fd < target_fd_max && target_fd_trans[fd]) {
478 return target_fd_trans[fd]->target_to_host_addr;
480 return NULL;
483 static void fd_trans_register(int fd, TargetFdTrans *trans)
485 unsigned int oldmax;
487 if (fd >= target_fd_max) {
488 oldmax = target_fd_max;
489 target_fd_max = ((fd >> 6) + 1) << 6; /* by slice of 64 entries */
490 target_fd_trans = g_renew(TargetFdTrans *,
491 target_fd_trans, target_fd_max);
492 memset((void *)(target_fd_trans + oldmax), 0,
493 (target_fd_max - oldmax) * sizeof(TargetFdTrans *));
495 target_fd_trans[fd] = trans;
498 static void fd_trans_unregister(int fd)
500 if (fd >= 0 && fd < target_fd_max) {
501 target_fd_trans[fd] = NULL;
505 static void fd_trans_dup(int oldfd, int newfd)
507 fd_trans_unregister(newfd);
508 if (oldfd < target_fd_max && target_fd_trans[oldfd]) {
509 fd_trans_register(newfd, target_fd_trans[oldfd]);
513 static int sys_getcwd1(char *buf, size_t size)
515 if (getcwd(buf, size) == NULL) {
516 /* getcwd() sets errno */
517 return (-1);
519 return strlen(buf)+1;
522 #ifdef TARGET_NR_utimensat
523 #if defined(__NR_utimensat)
524 #define __NR_sys_utimensat __NR_utimensat
525 _syscall4(int,sys_utimensat,int,dirfd,const char *,pathname,
526 const struct timespec *,tsp,int,flags)
527 #else
528 static int sys_utimensat(int dirfd, const char *pathname,
529 const struct timespec times[2], int flags)
531 errno = ENOSYS;
532 return -1;
534 #endif
535 #endif /* TARGET_NR_utimensat */
537 #ifdef CONFIG_INOTIFY
538 #include <sys/inotify.h>
540 #if defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init)
541 static int sys_inotify_init(void)
543 return (inotify_init());
545 #endif
546 #if defined(TARGET_NR_inotify_add_watch) && defined(__NR_inotify_add_watch)
547 static int sys_inotify_add_watch(int fd,const char *pathname, int32_t mask)
549 return (inotify_add_watch(fd, pathname, mask));
551 #endif
552 #if defined(TARGET_NR_inotify_rm_watch) && defined(__NR_inotify_rm_watch)
553 static int sys_inotify_rm_watch(int fd, int32_t wd)
555 return (inotify_rm_watch(fd, wd));
557 #endif
558 #ifdef CONFIG_INOTIFY1
559 #if defined(TARGET_NR_inotify_init1) && defined(__NR_inotify_init1)
560 static int sys_inotify_init1(int flags)
562 return (inotify_init1(flags));
564 #endif
565 #endif
566 #else
567 /* Userspace can usually survive runtime without inotify */
568 #undef TARGET_NR_inotify_init
569 #undef TARGET_NR_inotify_init1
570 #undef TARGET_NR_inotify_add_watch
571 #undef TARGET_NR_inotify_rm_watch
572 #endif /* CONFIG_INOTIFY */
574 #if defined(TARGET_NR_prlimit64)
575 #ifndef __NR_prlimit64
576 # define __NR_prlimit64 -1
577 #endif
578 #define __NR_sys_prlimit64 __NR_prlimit64
579 /* The glibc rlimit structure may not be that used by the underlying syscall */
580 struct host_rlimit64 {
581 uint64_t rlim_cur;
582 uint64_t rlim_max;
584 _syscall4(int, sys_prlimit64, pid_t, pid, int, resource,
585 const struct host_rlimit64 *, new_limit,
586 struct host_rlimit64 *, old_limit)
587 #endif
590 #if defined(TARGET_NR_timer_create)
591 /* Maxiumum of 32 active POSIX timers allowed at any one time. */
592 static timer_t g_posix_timers[32] = { 0, } ;
594 static inline int next_free_host_timer(void)
596 int k ;
597 /* FIXME: Does finding the next free slot require a lock? */
598 for (k = 0; k < ARRAY_SIZE(g_posix_timers); k++) {
599 if (g_posix_timers[k] == 0) {
600 g_posix_timers[k] = (timer_t) 1;
601 return k;
604 return -1;
606 #endif
608 /* ARM EABI and MIPS expect 64bit types aligned even on pairs or registers */
609 #ifdef TARGET_ARM
610 static inline int regpairs_aligned(void *cpu_env) {
611 return ((((CPUARMState *)cpu_env)->eabi) == 1) ;
613 #elif defined(TARGET_MIPS)
614 static inline int regpairs_aligned(void *cpu_env) { return 1; }
615 #elif defined(TARGET_PPC) && !defined(TARGET_PPC64)
616 /* SysV AVI for PPC32 expects 64bit parameters to be passed on odd/even pairs
617 * of registers which translates to the same as ARM/MIPS, because we start with
618 * r3 as arg1 */
619 static inline int regpairs_aligned(void *cpu_env) { return 1; }
620 #else
621 static inline int regpairs_aligned(void *cpu_env) { return 0; }
622 #endif
624 #define ERRNO_TABLE_SIZE 1200
626 /* target_to_host_errno_table[] is initialized from
627 * host_to_target_errno_table[] in syscall_init(). */
628 static uint16_t target_to_host_errno_table[ERRNO_TABLE_SIZE] = {
632 * This list is the union of errno values overridden in asm-<arch>/errno.h
633 * minus the errnos that are not actually generic to all archs.
635 static uint16_t host_to_target_errno_table[ERRNO_TABLE_SIZE] = {
636 [EAGAIN] = TARGET_EAGAIN,
637 [EIDRM] = TARGET_EIDRM,
638 [ECHRNG] = TARGET_ECHRNG,
639 [EL2NSYNC] = TARGET_EL2NSYNC,
640 [EL3HLT] = TARGET_EL3HLT,
641 [EL3RST] = TARGET_EL3RST,
642 [ELNRNG] = TARGET_ELNRNG,
643 [EUNATCH] = TARGET_EUNATCH,
644 [ENOCSI] = TARGET_ENOCSI,
645 [EL2HLT] = TARGET_EL2HLT,
646 [EDEADLK] = TARGET_EDEADLK,
647 [ENOLCK] = TARGET_ENOLCK,
648 [EBADE] = TARGET_EBADE,
649 [EBADR] = TARGET_EBADR,
650 [EXFULL] = TARGET_EXFULL,
651 [ENOANO] = TARGET_ENOANO,
652 [EBADRQC] = TARGET_EBADRQC,
653 [EBADSLT] = TARGET_EBADSLT,
654 [EBFONT] = TARGET_EBFONT,
655 [ENOSTR] = TARGET_ENOSTR,
656 [ENODATA] = TARGET_ENODATA,
657 [ETIME] = TARGET_ETIME,
658 [ENOSR] = TARGET_ENOSR,
659 [ENONET] = TARGET_ENONET,
660 [ENOPKG] = TARGET_ENOPKG,
661 [EREMOTE] = TARGET_EREMOTE,
662 [ENOLINK] = TARGET_ENOLINK,
663 [EADV] = TARGET_EADV,
664 [ESRMNT] = TARGET_ESRMNT,
665 [ECOMM] = TARGET_ECOMM,
666 [EPROTO] = TARGET_EPROTO,
667 [EDOTDOT] = TARGET_EDOTDOT,
668 [EMULTIHOP] = TARGET_EMULTIHOP,
669 [EBADMSG] = TARGET_EBADMSG,
670 [ENAMETOOLONG] = TARGET_ENAMETOOLONG,
671 [EOVERFLOW] = TARGET_EOVERFLOW,
672 [ENOTUNIQ] = TARGET_ENOTUNIQ,
673 [EBADFD] = TARGET_EBADFD,
674 [EREMCHG] = TARGET_EREMCHG,
675 [ELIBACC] = TARGET_ELIBACC,
676 [ELIBBAD] = TARGET_ELIBBAD,
677 [ELIBSCN] = TARGET_ELIBSCN,
678 [ELIBMAX] = TARGET_ELIBMAX,
679 [ELIBEXEC] = TARGET_ELIBEXEC,
680 [EILSEQ] = TARGET_EILSEQ,
681 [ENOSYS] = TARGET_ENOSYS,
682 [ELOOP] = TARGET_ELOOP,
683 [ERESTART] = TARGET_ERESTART,
684 [ESTRPIPE] = TARGET_ESTRPIPE,
685 [ENOTEMPTY] = TARGET_ENOTEMPTY,
686 [EUSERS] = TARGET_EUSERS,
687 [ENOTSOCK] = TARGET_ENOTSOCK,
688 [EDESTADDRREQ] = TARGET_EDESTADDRREQ,
689 [EMSGSIZE] = TARGET_EMSGSIZE,
690 [EPROTOTYPE] = TARGET_EPROTOTYPE,
691 [ENOPROTOOPT] = TARGET_ENOPROTOOPT,
692 [EPROTONOSUPPORT] = TARGET_EPROTONOSUPPORT,
693 [ESOCKTNOSUPPORT] = TARGET_ESOCKTNOSUPPORT,
694 [EOPNOTSUPP] = TARGET_EOPNOTSUPP,
695 [EPFNOSUPPORT] = TARGET_EPFNOSUPPORT,
696 [EAFNOSUPPORT] = TARGET_EAFNOSUPPORT,
697 [EADDRINUSE] = TARGET_EADDRINUSE,
698 [EADDRNOTAVAIL] = TARGET_EADDRNOTAVAIL,
699 [ENETDOWN] = TARGET_ENETDOWN,
700 [ENETUNREACH] = TARGET_ENETUNREACH,
701 [ENETRESET] = TARGET_ENETRESET,
702 [ECONNABORTED] = TARGET_ECONNABORTED,
703 [ECONNRESET] = TARGET_ECONNRESET,
704 [ENOBUFS] = TARGET_ENOBUFS,
705 [EISCONN] = TARGET_EISCONN,
706 [ENOTCONN] = TARGET_ENOTCONN,
707 [EUCLEAN] = TARGET_EUCLEAN,
708 [ENOTNAM] = TARGET_ENOTNAM,
709 [ENAVAIL] = TARGET_ENAVAIL,
710 [EISNAM] = TARGET_EISNAM,
711 [EREMOTEIO] = TARGET_EREMOTEIO,
712 [ESHUTDOWN] = TARGET_ESHUTDOWN,
713 [ETOOMANYREFS] = TARGET_ETOOMANYREFS,
714 [ETIMEDOUT] = TARGET_ETIMEDOUT,
715 [ECONNREFUSED] = TARGET_ECONNREFUSED,
716 [EHOSTDOWN] = TARGET_EHOSTDOWN,
717 [EHOSTUNREACH] = TARGET_EHOSTUNREACH,
718 [EALREADY] = TARGET_EALREADY,
719 [EINPROGRESS] = TARGET_EINPROGRESS,
720 [ESTALE] = TARGET_ESTALE,
721 [ECANCELED] = TARGET_ECANCELED,
722 [ENOMEDIUM] = TARGET_ENOMEDIUM,
723 [EMEDIUMTYPE] = TARGET_EMEDIUMTYPE,
724 #ifdef ENOKEY
725 [ENOKEY] = TARGET_ENOKEY,
726 #endif
727 #ifdef EKEYEXPIRED
728 [EKEYEXPIRED] = TARGET_EKEYEXPIRED,
729 #endif
730 #ifdef EKEYREVOKED
731 [EKEYREVOKED] = TARGET_EKEYREVOKED,
732 #endif
733 #ifdef EKEYREJECTED
734 [EKEYREJECTED] = TARGET_EKEYREJECTED,
735 #endif
736 #ifdef EOWNERDEAD
737 [EOWNERDEAD] = TARGET_EOWNERDEAD,
738 #endif
739 #ifdef ENOTRECOVERABLE
740 [ENOTRECOVERABLE] = TARGET_ENOTRECOVERABLE,
741 #endif
744 static inline int host_to_target_errno(int err)
746 if (err >= 0 && err < ERRNO_TABLE_SIZE &&
747 host_to_target_errno_table[err]) {
748 return host_to_target_errno_table[err];
750 return err;
753 static inline int target_to_host_errno(int err)
755 if (err >= 0 && err < ERRNO_TABLE_SIZE &&
756 target_to_host_errno_table[err]) {
757 return target_to_host_errno_table[err];
759 return err;
762 static inline abi_long get_errno(abi_long ret)
764 if (ret == -1)
765 return -host_to_target_errno(errno);
766 else
767 return ret;
770 static inline int is_error(abi_long ret)
772 return (abi_ulong)ret >= (abi_ulong)(-4096);
775 const char *target_strerror(int err)
777 if (err == TARGET_ERESTARTSYS) {
778 return "To be restarted";
780 if (err == TARGET_QEMU_ESIGRETURN) {
781 return "Successful exit from sigreturn";
784 if ((err >= ERRNO_TABLE_SIZE) || (err < 0)) {
785 return NULL;
787 return strerror(target_to_host_errno(err));
790 #define safe_syscall0(type, name) \
791 static type safe_##name(void) \
793 return safe_syscall(__NR_##name); \
796 #define safe_syscall1(type, name, type1, arg1) \
797 static type safe_##name(type1 arg1) \
799 return safe_syscall(__NR_##name, arg1); \
802 #define safe_syscall2(type, name, type1, arg1, type2, arg2) \
803 static type safe_##name(type1 arg1, type2 arg2) \
805 return safe_syscall(__NR_##name, arg1, arg2); \
808 #define safe_syscall3(type, name, type1, arg1, type2, arg2, type3, arg3) \
809 static type safe_##name(type1 arg1, type2 arg2, type3 arg3) \
811 return safe_syscall(__NR_##name, arg1, arg2, arg3); \
814 #define safe_syscall4(type, name, type1, arg1, type2, arg2, type3, arg3, \
815 type4, arg4) \
816 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4) \
818 return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4); \
821 #define safe_syscall5(type, name, type1, arg1, type2, arg2, type3, arg3, \
822 type4, arg4, type5, arg5) \
823 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, \
824 type5 arg5) \
826 return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5); \
829 #define safe_syscall6(type, name, type1, arg1, type2, arg2, type3, arg3, \
830 type4, arg4, type5, arg5, type6, arg6) \
831 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, \
832 type5 arg5, type6 arg6) \
834 return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6); \
837 safe_syscall3(ssize_t, read, int, fd, void *, buff, size_t, count)
838 safe_syscall3(ssize_t, write, int, fd, const void *, buff, size_t, count)
839 safe_syscall4(int, openat, int, dirfd, const char *, pathname, \
840 int, flags, mode_t, mode)
841 safe_syscall4(pid_t, wait4, pid_t, pid, int *, status, int, options, \
842 struct rusage *, rusage)
843 safe_syscall5(int, waitid, idtype_t, idtype, id_t, id, siginfo_t *, infop, \
844 int, options, struct rusage *, rusage)
845 safe_syscall3(int, execve, const char *, filename, char **, argv, char **, envp)
846 safe_syscall6(int, pselect6, int, nfds, fd_set *, readfds, fd_set *, writefds, \
847 fd_set *, exceptfds, struct timespec *, timeout, void *, sig)
848 safe_syscall5(int, ppoll, struct pollfd *, ufds, unsigned int, nfds,
849 struct timespec *, tsp, const sigset_t *, sigmask,
850 size_t, sigsetsize)
851 safe_syscall6(int, epoll_pwait, int, epfd, struct epoll_event *, events,
852 int, maxevents, int, timeout, const sigset_t *, sigmask,
853 size_t, sigsetsize)
854 safe_syscall6(int,futex,int *,uaddr,int,op,int,val, \
855 const struct timespec *,timeout,int *,uaddr2,int,val3)
856 safe_syscall2(int, rt_sigsuspend, sigset_t *, newset, size_t, sigsetsize)
857 safe_syscall2(int, kill, pid_t, pid, int, sig)
858 safe_syscall2(int, tkill, int, tid, int, sig)
859 safe_syscall3(int, tgkill, int, tgid, int, pid, int, sig)
860 safe_syscall3(ssize_t, readv, int, fd, const struct iovec *, iov, int, iovcnt)
861 safe_syscall3(ssize_t, writev, int, fd, const struct iovec *, iov, int, iovcnt)
862 safe_syscall3(int, connect, int, fd, const struct sockaddr *, addr,
863 socklen_t, addrlen)
864 safe_syscall6(ssize_t, sendto, int, fd, const void *, buf, size_t, len,
865 int, flags, const struct sockaddr *, addr, socklen_t, addrlen)
866 safe_syscall6(ssize_t, recvfrom, int, fd, void *, buf, size_t, len,
867 int, flags, struct sockaddr *, addr, socklen_t *, addrlen)
868 safe_syscall3(ssize_t, sendmsg, int, fd, const struct msghdr *, msg, int, flags)
869 safe_syscall3(ssize_t, recvmsg, int, fd, struct msghdr *, msg, int, flags)
870 safe_syscall2(int, flock, int, fd, int, operation)
871 safe_syscall4(int, rt_sigtimedwait, const sigset_t *, these, siginfo_t *, uinfo,
872 const struct timespec *, uts, size_t, sigsetsize)
873 safe_syscall4(int, accept4, int, fd, struct sockaddr *, addr, socklen_t *, len,
874 int, flags)
875 safe_syscall2(int, nanosleep, const struct timespec *, req,
876 struct timespec *, rem)
877 #ifdef TARGET_NR_clock_nanosleep
878 safe_syscall4(int, clock_nanosleep, const clockid_t, clock, int, flags,
879 const struct timespec *, req, struct timespec *, rem)
880 #endif
881 #ifdef __NR_msgsnd
882 safe_syscall4(int, msgsnd, int, msgid, const void *, msgp, size_t, sz,
883 int, flags)
884 safe_syscall5(int, msgrcv, int, msgid, void *, msgp, size_t, sz,
885 long, msgtype, int, flags)
886 safe_syscall4(int, semtimedop, int, semid, struct sembuf *, tsops,
887 unsigned, nsops, const struct timespec *, timeout)
888 #else
889 /* This host kernel architecture uses a single ipc syscall; fake up
890 * wrappers for the sub-operations to hide this implementation detail.
891 * Annoyingly we can't include linux/ipc.h to get the constant definitions
892 * for the call parameter because some structs in there conflict with the
893 * sys/ipc.h ones. So we just define them here, and rely on them being
894 * the same for all host architectures.
896 #define Q_SEMTIMEDOP 4
897 #define Q_MSGSND 11
898 #define Q_MSGRCV 12
899 #define Q_IPCCALL(VERSION, OP) ((VERSION) << 16 | (OP))
901 safe_syscall6(int, ipc, int, call, long, first, long, second, long, third,
902 void *, ptr, long, fifth)
903 static int safe_msgsnd(int msgid, const void *msgp, size_t sz, int flags)
905 return safe_ipc(Q_IPCCALL(0, Q_MSGSND), msgid, sz, flags, (void *)msgp, 0);
907 static int safe_msgrcv(int msgid, void *msgp, size_t sz, long type, int flags)
909 return safe_ipc(Q_IPCCALL(1, Q_MSGRCV), msgid, sz, flags, msgp, type);
911 static int safe_semtimedop(int semid, struct sembuf *tsops, unsigned nsops,
912 const struct timespec *timeout)
914 return safe_ipc(Q_IPCCALL(0, Q_SEMTIMEDOP), semid, nsops, 0, tsops,
915 (long)timeout);
917 #endif
918 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
919 safe_syscall5(int, mq_timedsend, int, mqdes, const char *, msg_ptr,
920 size_t, len, unsigned, prio, const struct timespec *, timeout)
921 safe_syscall5(int, mq_timedreceive, int, mqdes, char *, msg_ptr,
922 size_t, len, unsigned *, prio, const struct timespec *, timeout)
923 #endif
924 /* We do ioctl like this rather than via safe_syscall3 to preserve the
925 * "third argument might be integer or pointer or not present" behaviour of
926 * the libc function.
928 #define safe_ioctl(...) safe_syscall(__NR_ioctl, __VA_ARGS__)
929 /* Similarly for fcntl. Note that callers must always:
930 * pass the F_GETLK64 etc constants rather than the unsuffixed F_GETLK
931 * use the flock64 struct rather than unsuffixed flock
932 * This will then work and use a 64-bit offset for both 32-bit and 64-bit hosts.
934 #ifdef __NR_fcntl64
935 #define safe_fcntl(...) safe_syscall(__NR_fcntl64, __VA_ARGS__)
936 #else
937 #define safe_fcntl(...) safe_syscall(__NR_fcntl, __VA_ARGS__)
938 #endif
940 static inline int host_to_target_sock_type(int host_type)
942 int target_type;
944 switch (host_type & 0xf /* SOCK_TYPE_MASK */) {
945 case SOCK_DGRAM:
946 target_type = TARGET_SOCK_DGRAM;
947 break;
948 case SOCK_STREAM:
949 target_type = TARGET_SOCK_STREAM;
950 break;
951 default:
952 target_type = host_type & 0xf /* SOCK_TYPE_MASK */;
953 break;
956 #if defined(SOCK_CLOEXEC)
957 if (host_type & SOCK_CLOEXEC) {
958 target_type |= TARGET_SOCK_CLOEXEC;
960 #endif
962 #if defined(SOCK_NONBLOCK)
963 if (host_type & SOCK_NONBLOCK) {
964 target_type |= TARGET_SOCK_NONBLOCK;
966 #endif
968 return target_type;
971 static abi_ulong target_brk;
972 static abi_ulong target_original_brk;
973 static abi_ulong brk_page;
975 void target_set_brk(abi_ulong new_brk)
977 target_original_brk = target_brk = HOST_PAGE_ALIGN(new_brk);
978 brk_page = HOST_PAGE_ALIGN(target_brk);
981 //#define DEBUGF_BRK(message, args...) do { fprintf(stderr, (message), ## args); } while (0)
982 #define DEBUGF_BRK(message, args...)
984 /* do_brk() must return target values and target errnos. */
985 abi_long do_brk(abi_ulong new_brk)
987 abi_long mapped_addr;
988 abi_ulong new_alloc_size;
990 DEBUGF_BRK("do_brk(" TARGET_ABI_FMT_lx ") -> ", new_brk);
992 if (!new_brk) {
993 DEBUGF_BRK(TARGET_ABI_FMT_lx " (!new_brk)\n", target_brk);
994 return target_brk;
996 if (new_brk < target_original_brk) {
997 DEBUGF_BRK(TARGET_ABI_FMT_lx " (new_brk < target_original_brk)\n",
998 target_brk);
999 return target_brk;
1002 /* If the new brk is less than the highest page reserved to the
1003 * target heap allocation, set it and we're almost done... */
1004 if (new_brk <= brk_page) {
1005 /* Heap contents are initialized to zero, as for anonymous
1006 * mapped pages. */
1007 if (new_brk > target_brk) {
1008 memset(g2h(target_brk), 0, new_brk - target_brk);
1010 target_brk = new_brk;
1011 DEBUGF_BRK(TARGET_ABI_FMT_lx " (new_brk <= brk_page)\n", target_brk);
1012 return target_brk;
1015 /* We need to allocate more memory after the brk... Note that
1016 * we don't use MAP_FIXED because that will map over the top of
1017 * any existing mapping (like the one with the host libc or qemu
1018 * itself); instead we treat "mapped but at wrong address" as
1019 * a failure and unmap again.
1021 new_alloc_size = HOST_PAGE_ALIGN(new_brk - brk_page);
1022 mapped_addr = get_errno(target_mmap(brk_page, new_alloc_size,
1023 PROT_READ|PROT_WRITE,
1024 MAP_ANON|MAP_PRIVATE, 0, 0));
1026 if (mapped_addr == brk_page) {
1027 /* Heap contents are initialized to zero, as for anonymous
1028 * mapped pages. Technically the new pages are already
1029 * initialized to zero since they *are* anonymous mapped
1030 * pages, however we have to take care with the contents that
1031 * come from the remaining part of the previous page: it may
1032 * contains garbage data due to a previous heap usage (grown
1033 * then shrunken). */
1034 memset(g2h(target_brk), 0, brk_page - target_brk);
1036 target_brk = new_brk;
1037 brk_page = HOST_PAGE_ALIGN(target_brk);
1038 DEBUGF_BRK(TARGET_ABI_FMT_lx " (mapped_addr == brk_page)\n",
1039 target_brk);
1040 return target_brk;
1041 } else if (mapped_addr != -1) {
1042 /* Mapped but at wrong address, meaning there wasn't actually
1043 * enough space for this brk.
1045 target_munmap(mapped_addr, new_alloc_size);
1046 mapped_addr = -1;
1047 DEBUGF_BRK(TARGET_ABI_FMT_lx " (mapped_addr != -1)\n", target_brk);
1049 else {
1050 DEBUGF_BRK(TARGET_ABI_FMT_lx " (otherwise)\n", target_brk);
1053 #if defined(TARGET_ALPHA)
1054 /* We (partially) emulate OSF/1 on Alpha, which requires we
1055 return a proper errno, not an unchanged brk value. */
1056 return -TARGET_ENOMEM;
1057 #endif
1058 /* For everything else, return the previous break. */
1059 return target_brk;
1062 static inline abi_long copy_from_user_fdset(fd_set *fds,
1063 abi_ulong target_fds_addr,
1064 int n)
1066 int i, nw, j, k;
1067 abi_ulong b, *target_fds;
1069 nw = DIV_ROUND_UP(n, TARGET_ABI_BITS);
1070 if (!(target_fds = lock_user(VERIFY_READ,
1071 target_fds_addr,
1072 sizeof(abi_ulong) * nw,
1073 1)))
1074 return -TARGET_EFAULT;
1076 FD_ZERO(fds);
1077 k = 0;
1078 for (i = 0; i < nw; i++) {
1079 /* grab the abi_ulong */
1080 __get_user(b, &target_fds[i]);
1081 for (j = 0; j < TARGET_ABI_BITS; j++) {
1082 /* check the bit inside the abi_ulong */
1083 if ((b >> j) & 1)
1084 FD_SET(k, fds);
1085 k++;
1089 unlock_user(target_fds, target_fds_addr, 0);
1091 return 0;
1094 static inline abi_ulong copy_from_user_fdset_ptr(fd_set *fds, fd_set **fds_ptr,
1095 abi_ulong target_fds_addr,
1096 int n)
1098 if (target_fds_addr) {
1099 if (copy_from_user_fdset(fds, target_fds_addr, n))
1100 return -TARGET_EFAULT;
1101 *fds_ptr = fds;
1102 } else {
1103 *fds_ptr = NULL;
1105 return 0;
1108 static inline abi_long copy_to_user_fdset(abi_ulong target_fds_addr,
1109 const fd_set *fds,
1110 int n)
1112 int i, nw, j, k;
1113 abi_long v;
1114 abi_ulong *target_fds;
1116 nw = DIV_ROUND_UP(n, TARGET_ABI_BITS);
1117 if (!(target_fds = lock_user(VERIFY_WRITE,
1118 target_fds_addr,
1119 sizeof(abi_ulong) * nw,
1120 0)))
1121 return -TARGET_EFAULT;
1123 k = 0;
1124 for (i = 0; i < nw; i++) {
1125 v = 0;
1126 for (j = 0; j < TARGET_ABI_BITS; j++) {
1127 v |= ((abi_ulong)(FD_ISSET(k, fds) != 0) << j);
1128 k++;
1130 __put_user(v, &target_fds[i]);
1133 unlock_user(target_fds, target_fds_addr, sizeof(abi_ulong) * nw);
1135 return 0;
1138 #if defined(__alpha__)
1139 #define HOST_HZ 1024
1140 #else
1141 #define HOST_HZ 100
1142 #endif
1144 static inline abi_long host_to_target_clock_t(long ticks)
1146 #if HOST_HZ == TARGET_HZ
1147 return ticks;
1148 #else
1149 return ((int64_t)ticks * TARGET_HZ) / HOST_HZ;
1150 #endif
1153 static inline abi_long host_to_target_rusage(abi_ulong target_addr,
1154 const struct rusage *rusage)
1156 struct target_rusage *target_rusage;
1158 if (!lock_user_struct(VERIFY_WRITE, target_rusage, target_addr, 0))
1159 return -TARGET_EFAULT;
1160 target_rusage->ru_utime.tv_sec = tswapal(rusage->ru_utime.tv_sec);
1161 target_rusage->ru_utime.tv_usec = tswapal(rusage->ru_utime.tv_usec);
1162 target_rusage->ru_stime.tv_sec = tswapal(rusage->ru_stime.tv_sec);
1163 target_rusage->ru_stime.tv_usec = tswapal(rusage->ru_stime.tv_usec);
1164 target_rusage->ru_maxrss = tswapal(rusage->ru_maxrss);
1165 target_rusage->ru_ixrss = tswapal(rusage->ru_ixrss);
1166 target_rusage->ru_idrss = tswapal(rusage->ru_idrss);
1167 target_rusage->ru_isrss = tswapal(rusage->ru_isrss);
1168 target_rusage->ru_minflt = tswapal(rusage->ru_minflt);
1169 target_rusage->ru_majflt = tswapal(rusage->ru_majflt);
1170 target_rusage->ru_nswap = tswapal(rusage->ru_nswap);
1171 target_rusage->ru_inblock = tswapal(rusage->ru_inblock);
1172 target_rusage->ru_oublock = tswapal(rusage->ru_oublock);
1173 target_rusage->ru_msgsnd = tswapal(rusage->ru_msgsnd);
1174 target_rusage->ru_msgrcv = tswapal(rusage->ru_msgrcv);
1175 target_rusage->ru_nsignals = tswapal(rusage->ru_nsignals);
1176 target_rusage->ru_nvcsw = tswapal(rusage->ru_nvcsw);
1177 target_rusage->ru_nivcsw = tswapal(rusage->ru_nivcsw);
1178 unlock_user_struct(target_rusage, target_addr, 1);
1180 return 0;
1183 static inline rlim_t target_to_host_rlim(abi_ulong target_rlim)
1185 abi_ulong target_rlim_swap;
1186 rlim_t result;
1188 target_rlim_swap = tswapal(target_rlim);
1189 if (target_rlim_swap == TARGET_RLIM_INFINITY)
1190 return RLIM_INFINITY;
1192 result = target_rlim_swap;
1193 if (target_rlim_swap != (rlim_t)result)
1194 return RLIM_INFINITY;
1196 return result;
1199 static inline abi_ulong host_to_target_rlim(rlim_t rlim)
1201 abi_ulong target_rlim_swap;
1202 abi_ulong result;
1204 if (rlim == RLIM_INFINITY || rlim != (abi_long)rlim)
1205 target_rlim_swap = TARGET_RLIM_INFINITY;
1206 else
1207 target_rlim_swap = rlim;
1208 result = tswapal(target_rlim_swap);
1210 return result;
1213 static inline int target_to_host_resource(int code)
1215 switch (code) {
1216 case TARGET_RLIMIT_AS:
1217 return RLIMIT_AS;
1218 case TARGET_RLIMIT_CORE:
1219 return RLIMIT_CORE;
1220 case TARGET_RLIMIT_CPU:
1221 return RLIMIT_CPU;
1222 case TARGET_RLIMIT_DATA:
1223 return RLIMIT_DATA;
1224 case TARGET_RLIMIT_FSIZE:
1225 return RLIMIT_FSIZE;
1226 case TARGET_RLIMIT_LOCKS:
1227 return RLIMIT_LOCKS;
1228 case TARGET_RLIMIT_MEMLOCK:
1229 return RLIMIT_MEMLOCK;
1230 case TARGET_RLIMIT_MSGQUEUE:
1231 return RLIMIT_MSGQUEUE;
1232 case TARGET_RLIMIT_NICE:
1233 return RLIMIT_NICE;
1234 case TARGET_RLIMIT_NOFILE:
1235 return RLIMIT_NOFILE;
1236 case TARGET_RLIMIT_NPROC:
1237 return RLIMIT_NPROC;
1238 case TARGET_RLIMIT_RSS:
1239 return RLIMIT_RSS;
1240 case TARGET_RLIMIT_RTPRIO:
1241 return RLIMIT_RTPRIO;
1242 case TARGET_RLIMIT_SIGPENDING:
1243 return RLIMIT_SIGPENDING;
1244 case TARGET_RLIMIT_STACK:
1245 return RLIMIT_STACK;
1246 default:
1247 return code;
1251 static inline abi_long copy_from_user_timeval(struct timeval *tv,
1252 abi_ulong target_tv_addr)
1254 struct target_timeval *target_tv;
1256 if (!lock_user_struct(VERIFY_READ, target_tv, target_tv_addr, 1))
1257 return -TARGET_EFAULT;
1259 __get_user(tv->tv_sec, &target_tv->tv_sec);
1260 __get_user(tv->tv_usec, &target_tv->tv_usec);
1262 unlock_user_struct(target_tv, target_tv_addr, 0);
1264 return 0;
1267 static inline abi_long copy_to_user_timeval(abi_ulong target_tv_addr,
1268 const struct timeval *tv)
1270 struct target_timeval *target_tv;
1272 if (!lock_user_struct(VERIFY_WRITE, target_tv, target_tv_addr, 0))
1273 return -TARGET_EFAULT;
1275 __put_user(tv->tv_sec, &target_tv->tv_sec);
1276 __put_user(tv->tv_usec, &target_tv->tv_usec);
1278 unlock_user_struct(target_tv, target_tv_addr, 1);
1280 return 0;
1283 static inline abi_long copy_from_user_timezone(struct timezone *tz,
1284 abi_ulong target_tz_addr)
1286 struct target_timezone *target_tz;
1288 if (!lock_user_struct(VERIFY_READ, target_tz, target_tz_addr, 1)) {
1289 return -TARGET_EFAULT;
1292 __get_user(tz->tz_minuteswest, &target_tz->tz_minuteswest);
1293 __get_user(tz->tz_dsttime, &target_tz->tz_dsttime);
1295 unlock_user_struct(target_tz, target_tz_addr, 0);
1297 return 0;
1300 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
1301 #include <mqueue.h>
1303 static inline abi_long copy_from_user_mq_attr(struct mq_attr *attr,
1304 abi_ulong target_mq_attr_addr)
1306 struct target_mq_attr *target_mq_attr;
1308 if (!lock_user_struct(VERIFY_READ, target_mq_attr,
1309 target_mq_attr_addr, 1))
1310 return -TARGET_EFAULT;
1312 __get_user(attr->mq_flags, &target_mq_attr->mq_flags);
1313 __get_user(attr->mq_maxmsg, &target_mq_attr->mq_maxmsg);
1314 __get_user(attr->mq_msgsize, &target_mq_attr->mq_msgsize);
1315 __get_user(attr->mq_curmsgs, &target_mq_attr->mq_curmsgs);
1317 unlock_user_struct(target_mq_attr, target_mq_attr_addr, 0);
1319 return 0;
1322 static inline abi_long copy_to_user_mq_attr(abi_ulong target_mq_attr_addr,
1323 const struct mq_attr *attr)
1325 struct target_mq_attr *target_mq_attr;
1327 if (!lock_user_struct(VERIFY_WRITE, target_mq_attr,
1328 target_mq_attr_addr, 0))
1329 return -TARGET_EFAULT;
1331 __put_user(attr->mq_flags, &target_mq_attr->mq_flags);
1332 __put_user(attr->mq_maxmsg, &target_mq_attr->mq_maxmsg);
1333 __put_user(attr->mq_msgsize, &target_mq_attr->mq_msgsize);
1334 __put_user(attr->mq_curmsgs, &target_mq_attr->mq_curmsgs);
1336 unlock_user_struct(target_mq_attr, target_mq_attr_addr, 1);
1338 return 0;
1340 #endif
1342 #if defined(TARGET_NR_select) || defined(TARGET_NR__newselect)
1343 /* do_select() must return target values and target errnos. */
1344 static abi_long do_select(int n,
1345 abi_ulong rfd_addr, abi_ulong wfd_addr,
1346 abi_ulong efd_addr, abi_ulong target_tv_addr)
1348 fd_set rfds, wfds, efds;
1349 fd_set *rfds_ptr, *wfds_ptr, *efds_ptr;
1350 struct timeval tv;
1351 struct timespec ts, *ts_ptr;
1352 abi_long ret;
1354 ret = copy_from_user_fdset_ptr(&rfds, &rfds_ptr, rfd_addr, n);
1355 if (ret) {
1356 return ret;
1358 ret = copy_from_user_fdset_ptr(&wfds, &wfds_ptr, wfd_addr, n);
1359 if (ret) {
1360 return ret;
1362 ret = copy_from_user_fdset_ptr(&efds, &efds_ptr, efd_addr, n);
1363 if (ret) {
1364 return ret;
1367 if (target_tv_addr) {
1368 if (copy_from_user_timeval(&tv, target_tv_addr))
1369 return -TARGET_EFAULT;
1370 ts.tv_sec = tv.tv_sec;
1371 ts.tv_nsec = tv.tv_usec * 1000;
1372 ts_ptr = &ts;
1373 } else {
1374 ts_ptr = NULL;
1377 ret = get_errno(safe_pselect6(n, rfds_ptr, wfds_ptr, efds_ptr,
1378 ts_ptr, NULL));
1380 if (!is_error(ret)) {
1381 if (rfd_addr && copy_to_user_fdset(rfd_addr, &rfds, n))
1382 return -TARGET_EFAULT;
1383 if (wfd_addr && copy_to_user_fdset(wfd_addr, &wfds, n))
1384 return -TARGET_EFAULT;
1385 if (efd_addr && copy_to_user_fdset(efd_addr, &efds, n))
1386 return -TARGET_EFAULT;
1388 if (target_tv_addr) {
1389 tv.tv_sec = ts.tv_sec;
1390 tv.tv_usec = ts.tv_nsec / 1000;
1391 if (copy_to_user_timeval(target_tv_addr, &tv)) {
1392 return -TARGET_EFAULT;
1397 return ret;
1399 #endif
1401 static abi_long do_pipe2(int host_pipe[], int flags)
1403 #ifdef CONFIG_PIPE2
1404 return pipe2(host_pipe, flags);
1405 #else
1406 return -ENOSYS;
1407 #endif
1410 static abi_long do_pipe(void *cpu_env, abi_ulong pipedes,
1411 int flags, int is_pipe2)
1413 int host_pipe[2];
1414 abi_long ret;
1415 ret = flags ? do_pipe2(host_pipe, flags) : pipe(host_pipe);
1417 if (is_error(ret))
1418 return get_errno(ret);
1420 /* Several targets have special calling conventions for the original
1421 pipe syscall, but didn't replicate this into the pipe2 syscall. */
1422 if (!is_pipe2) {
1423 #if defined(TARGET_ALPHA)
1424 ((CPUAlphaState *)cpu_env)->ir[IR_A4] = host_pipe[1];
1425 return host_pipe[0];
1426 #elif defined(TARGET_MIPS)
1427 ((CPUMIPSState*)cpu_env)->active_tc.gpr[3] = host_pipe[1];
1428 return host_pipe[0];
1429 #elif defined(TARGET_SH4)
1430 ((CPUSH4State*)cpu_env)->gregs[1] = host_pipe[1];
1431 return host_pipe[0];
1432 #elif defined(TARGET_SPARC)
1433 ((CPUSPARCState*)cpu_env)->regwptr[1] = host_pipe[1];
1434 return host_pipe[0];
1435 #endif
1438 if (put_user_s32(host_pipe[0], pipedes)
1439 || put_user_s32(host_pipe[1], pipedes + sizeof(host_pipe[0])))
1440 return -TARGET_EFAULT;
1441 return get_errno(ret);
1444 static inline abi_long target_to_host_ip_mreq(struct ip_mreqn *mreqn,
1445 abi_ulong target_addr,
1446 socklen_t len)
1448 struct target_ip_mreqn *target_smreqn;
1450 target_smreqn = lock_user(VERIFY_READ, target_addr, len, 1);
1451 if (!target_smreqn)
1452 return -TARGET_EFAULT;
1453 mreqn->imr_multiaddr.s_addr = target_smreqn->imr_multiaddr.s_addr;
1454 mreqn->imr_address.s_addr = target_smreqn->imr_address.s_addr;
1455 if (len == sizeof(struct target_ip_mreqn))
1456 mreqn->imr_ifindex = tswapal(target_smreqn->imr_ifindex);
1457 unlock_user(target_smreqn, target_addr, 0);
1459 return 0;
1462 static inline abi_long target_to_host_sockaddr(int fd, struct sockaddr *addr,
1463 abi_ulong target_addr,
1464 socklen_t len)
1466 const socklen_t unix_maxlen = sizeof (struct sockaddr_un);
1467 sa_family_t sa_family;
1468 struct target_sockaddr *target_saddr;
1470 if (fd_trans_target_to_host_addr(fd)) {
1471 return fd_trans_target_to_host_addr(fd)(addr, target_addr, len);
1474 target_saddr = lock_user(VERIFY_READ, target_addr, len, 1);
1475 if (!target_saddr)
1476 return -TARGET_EFAULT;
1478 sa_family = tswap16(target_saddr->sa_family);
1480 /* Oops. The caller might send a incomplete sun_path; sun_path
1481 * must be terminated by \0 (see the manual page), but
1482 * unfortunately it is quite common to specify sockaddr_un
1483 * length as "strlen(x->sun_path)" while it should be
1484 * "strlen(...) + 1". We'll fix that here if needed.
1485 * Linux kernel has a similar feature.
1488 if (sa_family == AF_UNIX) {
1489 if (len < unix_maxlen && len > 0) {
1490 char *cp = (char*)target_saddr;
1492 if ( cp[len-1] && !cp[len] )
1493 len++;
1495 if (len > unix_maxlen)
1496 len = unix_maxlen;
1499 memcpy(addr, target_saddr, len);
1500 addr->sa_family = sa_family;
1501 if (sa_family == AF_NETLINK) {
1502 struct sockaddr_nl *nladdr;
1504 nladdr = (struct sockaddr_nl *)addr;
1505 nladdr->nl_pid = tswap32(nladdr->nl_pid);
1506 nladdr->nl_groups = tswap32(nladdr->nl_groups);
1507 } else if (sa_family == AF_PACKET) {
1508 struct target_sockaddr_ll *lladdr;
1510 lladdr = (struct target_sockaddr_ll *)addr;
1511 lladdr->sll_ifindex = tswap32(lladdr->sll_ifindex);
1512 lladdr->sll_hatype = tswap16(lladdr->sll_hatype);
1514 unlock_user(target_saddr, target_addr, 0);
1516 return 0;
1519 static inline abi_long host_to_target_sockaddr(abi_ulong target_addr,
1520 struct sockaddr *addr,
1521 socklen_t len)
1523 struct target_sockaddr *target_saddr;
1525 if (len == 0) {
1526 return 0;
1529 target_saddr = lock_user(VERIFY_WRITE, target_addr, len, 0);
1530 if (!target_saddr)
1531 return -TARGET_EFAULT;
1532 memcpy(target_saddr, addr, len);
1533 if (len >= offsetof(struct target_sockaddr, sa_family) +
1534 sizeof(target_saddr->sa_family)) {
1535 target_saddr->sa_family = tswap16(addr->sa_family);
1537 if (addr->sa_family == AF_NETLINK && len >= sizeof(struct sockaddr_nl)) {
1538 struct sockaddr_nl *target_nl = (struct sockaddr_nl *)target_saddr;
1539 target_nl->nl_pid = tswap32(target_nl->nl_pid);
1540 target_nl->nl_groups = tswap32(target_nl->nl_groups);
1541 } else if (addr->sa_family == AF_PACKET) {
1542 struct sockaddr_ll *target_ll = (struct sockaddr_ll *)target_saddr;
1543 target_ll->sll_ifindex = tswap32(target_ll->sll_ifindex);
1544 target_ll->sll_hatype = tswap16(target_ll->sll_hatype);
1546 unlock_user(target_saddr, target_addr, len);
1548 return 0;
1551 static inline abi_long target_to_host_cmsg(struct msghdr *msgh,
1552 struct target_msghdr *target_msgh)
1554 struct cmsghdr *cmsg = CMSG_FIRSTHDR(msgh);
1555 abi_long msg_controllen;
1556 abi_ulong target_cmsg_addr;
1557 struct target_cmsghdr *target_cmsg, *target_cmsg_start;
1558 socklen_t space = 0;
1560 msg_controllen = tswapal(target_msgh->msg_controllen);
1561 if (msg_controllen < sizeof (struct target_cmsghdr))
1562 goto the_end;
1563 target_cmsg_addr = tswapal(target_msgh->msg_control);
1564 target_cmsg = lock_user(VERIFY_READ, target_cmsg_addr, msg_controllen, 1);
1565 target_cmsg_start = target_cmsg;
1566 if (!target_cmsg)
1567 return -TARGET_EFAULT;
1569 while (cmsg && target_cmsg) {
1570 void *data = CMSG_DATA(cmsg);
1571 void *target_data = TARGET_CMSG_DATA(target_cmsg);
1573 int len = tswapal(target_cmsg->cmsg_len)
1574 - TARGET_CMSG_ALIGN(sizeof (struct target_cmsghdr));
1576 space += CMSG_SPACE(len);
1577 if (space > msgh->msg_controllen) {
1578 space -= CMSG_SPACE(len);
1579 /* This is a QEMU bug, since we allocated the payload
1580 * area ourselves (unlike overflow in host-to-target
1581 * conversion, which is just the guest giving us a buffer
1582 * that's too small). It can't happen for the payload types
1583 * we currently support; if it becomes an issue in future
1584 * we would need to improve our allocation strategy to
1585 * something more intelligent than "twice the size of the
1586 * target buffer we're reading from".
1588 gemu_log("Host cmsg overflow\n");
1589 break;
1592 if (tswap32(target_cmsg->cmsg_level) == TARGET_SOL_SOCKET) {
1593 cmsg->cmsg_level = SOL_SOCKET;
1594 } else {
1595 cmsg->cmsg_level = tswap32(target_cmsg->cmsg_level);
1597 cmsg->cmsg_type = tswap32(target_cmsg->cmsg_type);
1598 cmsg->cmsg_len = CMSG_LEN(len);
1600 if (cmsg->cmsg_level == SOL_SOCKET && cmsg->cmsg_type == SCM_RIGHTS) {
1601 int *fd = (int *)data;
1602 int *target_fd = (int *)target_data;
1603 int i, numfds = len / sizeof(int);
1605 for (i = 0; i < numfds; i++) {
1606 __get_user(fd[i], target_fd + i);
1608 } else if (cmsg->cmsg_level == SOL_SOCKET
1609 && cmsg->cmsg_type == SCM_CREDENTIALS) {
1610 struct ucred *cred = (struct ucred *)data;
1611 struct target_ucred *target_cred =
1612 (struct target_ucred *)target_data;
1614 __get_user(cred->pid, &target_cred->pid);
1615 __get_user(cred->uid, &target_cred->uid);
1616 __get_user(cred->gid, &target_cred->gid);
1617 } else {
1618 gemu_log("Unsupported ancillary data: %d/%d\n",
1619 cmsg->cmsg_level, cmsg->cmsg_type);
1620 memcpy(data, target_data, len);
1623 cmsg = CMSG_NXTHDR(msgh, cmsg);
1624 target_cmsg = TARGET_CMSG_NXTHDR(target_msgh, target_cmsg,
1625 target_cmsg_start);
1627 unlock_user(target_cmsg, target_cmsg_addr, 0);
1628 the_end:
1629 msgh->msg_controllen = space;
1630 return 0;
1633 static inline abi_long host_to_target_cmsg(struct target_msghdr *target_msgh,
1634 struct msghdr *msgh)
1636 struct cmsghdr *cmsg = CMSG_FIRSTHDR(msgh);
1637 abi_long msg_controllen;
1638 abi_ulong target_cmsg_addr;
1639 struct target_cmsghdr *target_cmsg, *target_cmsg_start;
1640 socklen_t space = 0;
1642 msg_controllen = tswapal(target_msgh->msg_controllen);
1643 if (msg_controllen < sizeof (struct target_cmsghdr))
1644 goto the_end;
1645 target_cmsg_addr = tswapal(target_msgh->msg_control);
1646 target_cmsg = lock_user(VERIFY_WRITE, target_cmsg_addr, msg_controllen, 0);
1647 target_cmsg_start = target_cmsg;
1648 if (!target_cmsg)
1649 return -TARGET_EFAULT;
1651 while (cmsg && target_cmsg) {
1652 void *data = CMSG_DATA(cmsg);
1653 void *target_data = TARGET_CMSG_DATA(target_cmsg);
1655 int len = cmsg->cmsg_len - CMSG_ALIGN(sizeof (struct cmsghdr));
1656 int tgt_len, tgt_space;
1658 /* We never copy a half-header but may copy half-data;
1659 * this is Linux's behaviour in put_cmsg(). Note that
1660 * truncation here is a guest problem (which we report
1661 * to the guest via the CTRUNC bit), unlike truncation
1662 * in target_to_host_cmsg, which is a QEMU bug.
1664 if (msg_controllen < sizeof(struct cmsghdr)) {
1665 target_msgh->msg_flags |= tswap32(MSG_CTRUNC);
1666 break;
1669 if (cmsg->cmsg_level == SOL_SOCKET) {
1670 target_cmsg->cmsg_level = tswap32(TARGET_SOL_SOCKET);
1671 } else {
1672 target_cmsg->cmsg_level = tswap32(cmsg->cmsg_level);
1674 target_cmsg->cmsg_type = tswap32(cmsg->cmsg_type);
1676 tgt_len = TARGET_CMSG_LEN(len);
1678 /* Payload types which need a different size of payload on
1679 * the target must adjust tgt_len here.
1681 switch (cmsg->cmsg_level) {
1682 case SOL_SOCKET:
1683 switch (cmsg->cmsg_type) {
1684 case SO_TIMESTAMP:
1685 tgt_len = sizeof(struct target_timeval);
1686 break;
1687 default:
1688 break;
1690 default:
1691 break;
1694 if (msg_controllen < tgt_len) {
1695 target_msgh->msg_flags |= tswap32(MSG_CTRUNC);
1696 tgt_len = msg_controllen;
1699 /* We must now copy-and-convert len bytes of payload
1700 * into tgt_len bytes of destination space. Bear in mind
1701 * that in both source and destination we may be dealing
1702 * with a truncated value!
1704 switch (cmsg->cmsg_level) {
1705 case SOL_SOCKET:
1706 switch (cmsg->cmsg_type) {
1707 case SCM_RIGHTS:
1709 int *fd = (int *)data;
1710 int *target_fd = (int *)target_data;
1711 int i, numfds = tgt_len / sizeof(int);
1713 for (i = 0; i < numfds; i++) {
1714 __put_user(fd[i], target_fd + i);
1716 break;
1718 case SO_TIMESTAMP:
1720 struct timeval *tv = (struct timeval *)data;
1721 struct target_timeval *target_tv =
1722 (struct target_timeval *)target_data;
1724 if (len != sizeof(struct timeval) ||
1725 tgt_len != sizeof(struct target_timeval)) {
1726 goto unimplemented;
1729 /* copy struct timeval to target */
1730 __put_user(tv->tv_sec, &target_tv->tv_sec);
1731 __put_user(tv->tv_usec, &target_tv->tv_usec);
1732 break;
1734 case SCM_CREDENTIALS:
1736 struct ucred *cred = (struct ucred *)data;
1737 struct target_ucred *target_cred =
1738 (struct target_ucred *)target_data;
1740 __put_user(cred->pid, &target_cred->pid);
1741 __put_user(cred->uid, &target_cred->uid);
1742 __put_user(cred->gid, &target_cred->gid);
1743 break;
1745 default:
1746 goto unimplemented;
1748 break;
1750 default:
1751 unimplemented:
1752 gemu_log("Unsupported ancillary data: %d/%d\n",
1753 cmsg->cmsg_level, cmsg->cmsg_type);
1754 memcpy(target_data, data, MIN(len, tgt_len));
1755 if (tgt_len > len) {
1756 memset(target_data + len, 0, tgt_len - len);
1760 target_cmsg->cmsg_len = tswapal(tgt_len);
1761 tgt_space = TARGET_CMSG_SPACE(len);
1762 if (msg_controllen < tgt_space) {
1763 tgt_space = msg_controllen;
1765 msg_controllen -= tgt_space;
1766 space += tgt_space;
1767 cmsg = CMSG_NXTHDR(msgh, cmsg);
1768 target_cmsg = TARGET_CMSG_NXTHDR(target_msgh, target_cmsg,
1769 target_cmsg_start);
1771 unlock_user(target_cmsg, target_cmsg_addr, space);
1772 the_end:
1773 target_msgh->msg_controllen = tswapal(space);
1774 return 0;
1777 static void tswap_nlmsghdr(struct nlmsghdr *nlh)
1779 nlh->nlmsg_len = tswap32(nlh->nlmsg_len);
1780 nlh->nlmsg_type = tswap16(nlh->nlmsg_type);
1781 nlh->nlmsg_flags = tswap16(nlh->nlmsg_flags);
1782 nlh->nlmsg_seq = tswap32(nlh->nlmsg_seq);
1783 nlh->nlmsg_pid = tswap32(nlh->nlmsg_pid);
1786 static abi_long host_to_target_for_each_nlmsg(struct nlmsghdr *nlh,
1787 size_t len,
1788 abi_long (*host_to_target_nlmsg)
1789 (struct nlmsghdr *))
1791 uint32_t nlmsg_len;
1792 abi_long ret;
1794 while (len > sizeof(struct nlmsghdr)) {
1796 nlmsg_len = nlh->nlmsg_len;
1797 if (nlmsg_len < sizeof(struct nlmsghdr) ||
1798 nlmsg_len > len) {
1799 break;
1802 switch (nlh->nlmsg_type) {
1803 case NLMSG_DONE:
1804 tswap_nlmsghdr(nlh);
1805 return 0;
1806 case NLMSG_NOOP:
1807 break;
1808 case NLMSG_ERROR:
1810 struct nlmsgerr *e = NLMSG_DATA(nlh);
1811 e->error = tswap32(e->error);
1812 tswap_nlmsghdr(&e->msg);
1813 tswap_nlmsghdr(nlh);
1814 return 0;
1816 default:
1817 ret = host_to_target_nlmsg(nlh);
1818 if (ret < 0) {
1819 tswap_nlmsghdr(nlh);
1820 return ret;
1822 break;
1824 tswap_nlmsghdr(nlh);
1825 len -= NLMSG_ALIGN(nlmsg_len);
1826 nlh = (struct nlmsghdr *)(((char*)nlh) + NLMSG_ALIGN(nlmsg_len));
1828 return 0;
1831 static abi_long target_to_host_for_each_nlmsg(struct nlmsghdr *nlh,
1832 size_t len,
1833 abi_long (*target_to_host_nlmsg)
1834 (struct nlmsghdr *))
1836 int ret;
1838 while (len > sizeof(struct nlmsghdr)) {
1839 if (tswap32(nlh->nlmsg_len) < sizeof(struct nlmsghdr) ||
1840 tswap32(nlh->nlmsg_len) > len) {
1841 break;
1843 tswap_nlmsghdr(nlh);
1844 switch (nlh->nlmsg_type) {
1845 case NLMSG_DONE:
1846 return 0;
1847 case NLMSG_NOOP:
1848 break;
1849 case NLMSG_ERROR:
1851 struct nlmsgerr *e = NLMSG_DATA(nlh);
1852 e->error = tswap32(e->error);
1853 tswap_nlmsghdr(&e->msg);
1854 return 0;
1856 default:
1857 ret = target_to_host_nlmsg(nlh);
1858 if (ret < 0) {
1859 return ret;
1862 len -= NLMSG_ALIGN(nlh->nlmsg_len);
1863 nlh = (struct nlmsghdr *)(((char *)nlh) + NLMSG_ALIGN(nlh->nlmsg_len));
1865 return 0;
1868 #ifdef CONFIG_RTNETLINK
1869 static abi_long host_to_target_for_each_nlattr(struct nlattr *nlattr,
1870 size_t len, void *context,
1871 abi_long (*host_to_target_nlattr)
1872 (struct nlattr *,
1873 void *context))
1875 unsigned short nla_len;
1876 abi_long ret;
1878 while (len > sizeof(struct nlattr)) {
1879 nla_len = nlattr->nla_len;
1880 if (nla_len < sizeof(struct nlattr) ||
1881 nla_len > len) {
1882 break;
1884 ret = host_to_target_nlattr(nlattr, context);
1885 nlattr->nla_len = tswap16(nlattr->nla_len);
1886 nlattr->nla_type = tswap16(nlattr->nla_type);
1887 if (ret < 0) {
1888 return ret;
1890 len -= NLA_ALIGN(nla_len);
1891 nlattr = (struct nlattr *)(((char *)nlattr) + NLA_ALIGN(nla_len));
1893 return 0;
1896 static abi_long host_to_target_for_each_rtattr(struct rtattr *rtattr,
1897 size_t len,
1898 abi_long (*host_to_target_rtattr)
1899 (struct rtattr *))
1901 unsigned short rta_len;
1902 abi_long ret;
1904 while (len > sizeof(struct rtattr)) {
1905 rta_len = rtattr->rta_len;
1906 if (rta_len < sizeof(struct rtattr) ||
1907 rta_len > len) {
1908 break;
1910 ret = host_to_target_rtattr(rtattr);
1911 rtattr->rta_len = tswap16(rtattr->rta_len);
1912 rtattr->rta_type = tswap16(rtattr->rta_type);
1913 if (ret < 0) {
1914 return ret;
1916 len -= RTA_ALIGN(rta_len);
1917 rtattr = (struct rtattr *)(((char *)rtattr) + RTA_ALIGN(rta_len));
1919 return 0;
1922 #define NLA_DATA(nla) ((void *)((char *)(nla)) + NLA_HDRLEN)
1924 static abi_long host_to_target_data_bridge_nlattr(struct nlattr *nlattr,
1925 void *context)
1927 uint16_t *u16;
1928 uint32_t *u32;
1929 uint64_t *u64;
1931 switch (nlattr->nla_type) {
1932 /* no data */
1933 case QEMU_IFLA_BR_FDB_FLUSH:
1934 break;
1935 /* binary */
1936 case QEMU_IFLA_BR_GROUP_ADDR:
1937 break;
1938 /* uint8_t */
1939 case QEMU_IFLA_BR_VLAN_FILTERING:
1940 case QEMU_IFLA_BR_TOPOLOGY_CHANGE:
1941 case QEMU_IFLA_BR_TOPOLOGY_CHANGE_DETECTED:
1942 case QEMU_IFLA_BR_MCAST_ROUTER:
1943 case QEMU_IFLA_BR_MCAST_SNOOPING:
1944 case QEMU_IFLA_BR_MCAST_QUERY_USE_IFADDR:
1945 case QEMU_IFLA_BR_MCAST_QUERIER:
1946 case QEMU_IFLA_BR_NF_CALL_IPTABLES:
1947 case QEMU_IFLA_BR_NF_CALL_IP6TABLES:
1948 case QEMU_IFLA_BR_NF_CALL_ARPTABLES:
1949 break;
1950 /* uint16_t */
1951 case QEMU_IFLA_BR_PRIORITY:
1952 case QEMU_IFLA_BR_VLAN_PROTOCOL:
1953 case QEMU_IFLA_BR_GROUP_FWD_MASK:
1954 case QEMU_IFLA_BR_ROOT_PORT:
1955 case QEMU_IFLA_BR_VLAN_DEFAULT_PVID:
1956 u16 = NLA_DATA(nlattr);
1957 *u16 = tswap16(*u16);
1958 break;
1959 /* uint32_t */
1960 case QEMU_IFLA_BR_FORWARD_DELAY:
1961 case QEMU_IFLA_BR_HELLO_TIME:
1962 case QEMU_IFLA_BR_MAX_AGE:
1963 case QEMU_IFLA_BR_AGEING_TIME:
1964 case QEMU_IFLA_BR_STP_STATE:
1965 case QEMU_IFLA_BR_ROOT_PATH_COST:
1966 case QEMU_IFLA_BR_MCAST_HASH_ELASTICITY:
1967 case QEMU_IFLA_BR_MCAST_HASH_MAX:
1968 case QEMU_IFLA_BR_MCAST_LAST_MEMBER_CNT:
1969 case QEMU_IFLA_BR_MCAST_STARTUP_QUERY_CNT:
1970 u32 = NLA_DATA(nlattr);
1971 *u32 = tswap32(*u32);
1972 break;
1973 /* uint64_t */
1974 case QEMU_IFLA_BR_HELLO_TIMER:
1975 case QEMU_IFLA_BR_TCN_TIMER:
1976 case QEMU_IFLA_BR_GC_TIMER:
1977 case QEMU_IFLA_BR_TOPOLOGY_CHANGE_TIMER:
1978 case QEMU_IFLA_BR_MCAST_LAST_MEMBER_INTVL:
1979 case QEMU_IFLA_BR_MCAST_MEMBERSHIP_INTVL:
1980 case QEMU_IFLA_BR_MCAST_QUERIER_INTVL:
1981 case QEMU_IFLA_BR_MCAST_QUERY_INTVL:
1982 case QEMU_IFLA_BR_MCAST_QUERY_RESPONSE_INTVL:
1983 case QEMU_IFLA_BR_MCAST_STARTUP_QUERY_INTVL:
1984 u64 = NLA_DATA(nlattr);
1985 *u64 = tswap64(*u64);
1986 break;
1987 /* ifla_bridge_id: uin8_t[] */
1988 case QEMU_IFLA_BR_ROOT_ID:
1989 case QEMU_IFLA_BR_BRIDGE_ID:
1990 break;
1991 default:
1992 gemu_log("Unknown QEMU_IFLA_BR type %d\n", nlattr->nla_type);
1993 break;
1995 return 0;
1998 static abi_long host_to_target_slave_data_bridge_nlattr(struct nlattr *nlattr,
1999 void *context)
2001 uint16_t *u16;
2002 uint32_t *u32;
2003 uint64_t *u64;
2005 switch (nlattr->nla_type) {
2006 /* uint8_t */
2007 case QEMU_IFLA_BRPORT_STATE:
2008 case QEMU_IFLA_BRPORT_MODE:
2009 case QEMU_IFLA_BRPORT_GUARD:
2010 case QEMU_IFLA_BRPORT_PROTECT:
2011 case QEMU_IFLA_BRPORT_FAST_LEAVE:
2012 case QEMU_IFLA_BRPORT_LEARNING:
2013 case QEMU_IFLA_BRPORT_UNICAST_FLOOD:
2014 case QEMU_IFLA_BRPORT_PROXYARP:
2015 case QEMU_IFLA_BRPORT_LEARNING_SYNC:
2016 case QEMU_IFLA_BRPORT_PROXYARP_WIFI:
2017 case QEMU_IFLA_BRPORT_TOPOLOGY_CHANGE_ACK:
2018 case QEMU_IFLA_BRPORT_CONFIG_PENDING:
2019 case QEMU_IFLA_BRPORT_MULTICAST_ROUTER:
2020 break;
2021 /* uint16_t */
2022 case QEMU_IFLA_BRPORT_PRIORITY:
2023 case QEMU_IFLA_BRPORT_DESIGNATED_PORT:
2024 case QEMU_IFLA_BRPORT_DESIGNATED_COST:
2025 case QEMU_IFLA_BRPORT_ID:
2026 case QEMU_IFLA_BRPORT_NO:
2027 u16 = NLA_DATA(nlattr);
2028 *u16 = tswap16(*u16);
2029 break;
2030 /* uin32_t */
2031 case QEMU_IFLA_BRPORT_COST:
2032 u32 = NLA_DATA(nlattr);
2033 *u32 = tswap32(*u32);
2034 break;
2035 /* uint64_t */
2036 case QEMU_IFLA_BRPORT_MESSAGE_AGE_TIMER:
2037 case QEMU_IFLA_BRPORT_FORWARD_DELAY_TIMER:
2038 case QEMU_IFLA_BRPORT_HOLD_TIMER:
2039 u64 = NLA_DATA(nlattr);
2040 *u64 = tswap64(*u64);
2041 break;
2042 /* ifla_bridge_id: uint8_t[] */
2043 case QEMU_IFLA_BRPORT_ROOT_ID:
2044 case QEMU_IFLA_BRPORT_BRIDGE_ID:
2045 break;
2046 default:
2047 gemu_log("Unknown QEMU_IFLA_BRPORT type %d\n", nlattr->nla_type);
2048 break;
2050 return 0;
2053 struct linkinfo_context {
2054 int len;
2055 char *name;
2056 int slave_len;
2057 char *slave_name;
2060 static abi_long host_to_target_data_linkinfo_nlattr(struct nlattr *nlattr,
2061 void *context)
2063 struct linkinfo_context *li_context = context;
2065 switch (nlattr->nla_type) {
2066 /* string */
2067 case QEMU_IFLA_INFO_KIND:
2068 li_context->name = NLA_DATA(nlattr);
2069 li_context->len = nlattr->nla_len - NLA_HDRLEN;
2070 break;
2071 case QEMU_IFLA_INFO_SLAVE_KIND:
2072 li_context->slave_name = NLA_DATA(nlattr);
2073 li_context->slave_len = nlattr->nla_len - NLA_HDRLEN;
2074 break;
2075 /* stats */
2076 case QEMU_IFLA_INFO_XSTATS:
2077 /* FIXME: only used by CAN */
2078 break;
2079 /* nested */
2080 case QEMU_IFLA_INFO_DATA:
2081 if (strncmp(li_context->name, "bridge",
2082 li_context->len) == 0) {
2083 return host_to_target_for_each_nlattr(NLA_DATA(nlattr),
2084 nlattr->nla_len,
2085 NULL,
2086 host_to_target_data_bridge_nlattr);
2087 } else {
2088 gemu_log("Unknown QEMU_IFLA_INFO_KIND %s\n", li_context->name);
2090 break;
2091 case QEMU_IFLA_INFO_SLAVE_DATA:
2092 if (strncmp(li_context->slave_name, "bridge",
2093 li_context->slave_len) == 0) {
2094 return host_to_target_for_each_nlattr(NLA_DATA(nlattr),
2095 nlattr->nla_len,
2096 NULL,
2097 host_to_target_slave_data_bridge_nlattr);
2098 } else {
2099 gemu_log("Unknown QEMU_IFLA_INFO_SLAVE_KIND %s\n",
2100 li_context->slave_name);
2102 break;
2103 default:
2104 gemu_log("Unknown host QEMU_IFLA_INFO type: %d\n", nlattr->nla_type);
2105 break;
2108 return 0;
2111 static abi_long host_to_target_data_inet_nlattr(struct nlattr *nlattr,
2112 void *context)
2114 uint32_t *u32;
2115 int i;
2117 switch (nlattr->nla_type) {
2118 case QEMU_IFLA_INET_CONF:
2119 u32 = NLA_DATA(nlattr);
2120 for (i = 0; i < (nlattr->nla_len - NLA_HDRLEN) / sizeof(*u32);
2121 i++) {
2122 u32[i] = tswap32(u32[i]);
2124 break;
2125 default:
2126 gemu_log("Unknown host AF_INET type: %d\n", nlattr->nla_type);
2128 return 0;
2131 static abi_long host_to_target_data_inet6_nlattr(struct nlattr *nlattr,
2132 void *context)
2134 uint32_t *u32;
2135 uint64_t *u64;
2136 struct ifla_cacheinfo *ci;
2137 int i;
2139 switch (nlattr->nla_type) {
2140 /* binaries */
2141 case QEMU_IFLA_INET6_TOKEN:
2142 break;
2143 /* uint8_t */
2144 case QEMU_IFLA_INET6_ADDR_GEN_MODE:
2145 break;
2146 /* uint32_t */
2147 case QEMU_IFLA_INET6_FLAGS:
2148 u32 = NLA_DATA(nlattr);
2149 *u32 = tswap32(*u32);
2150 break;
2151 /* uint32_t[] */
2152 case QEMU_IFLA_INET6_CONF:
2153 u32 = NLA_DATA(nlattr);
2154 for (i = 0; i < (nlattr->nla_len - NLA_HDRLEN) / sizeof(*u32);
2155 i++) {
2156 u32[i] = tswap32(u32[i]);
2158 break;
2159 /* ifla_cacheinfo */
2160 case QEMU_IFLA_INET6_CACHEINFO:
2161 ci = NLA_DATA(nlattr);
2162 ci->max_reasm_len = tswap32(ci->max_reasm_len);
2163 ci->tstamp = tswap32(ci->tstamp);
2164 ci->reachable_time = tswap32(ci->reachable_time);
2165 ci->retrans_time = tswap32(ci->retrans_time);
2166 break;
2167 /* uint64_t[] */
2168 case QEMU_IFLA_INET6_STATS:
2169 case QEMU_IFLA_INET6_ICMP6STATS:
2170 u64 = NLA_DATA(nlattr);
2171 for (i = 0; i < (nlattr->nla_len - NLA_HDRLEN) / sizeof(*u64);
2172 i++) {
2173 u64[i] = tswap64(u64[i]);
2175 break;
2176 default:
2177 gemu_log("Unknown host AF_INET6 type: %d\n", nlattr->nla_type);
2179 return 0;
2182 static abi_long host_to_target_data_spec_nlattr(struct nlattr *nlattr,
2183 void *context)
2185 switch (nlattr->nla_type) {
2186 case AF_INET:
2187 return host_to_target_for_each_nlattr(NLA_DATA(nlattr), nlattr->nla_len,
2188 NULL,
2189 host_to_target_data_inet_nlattr);
2190 case AF_INET6:
2191 return host_to_target_for_each_nlattr(NLA_DATA(nlattr), nlattr->nla_len,
2192 NULL,
2193 host_to_target_data_inet6_nlattr);
2194 default:
2195 gemu_log("Unknown host AF_SPEC type: %d\n", nlattr->nla_type);
2196 break;
2198 return 0;
2201 static abi_long host_to_target_data_link_rtattr(struct rtattr *rtattr)
2203 uint32_t *u32;
2204 struct rtnl_link_stats *st;
2205 struct rtnl_link_stats64 *st64;
2206 struct rtnl_link_ifmap *map;
2207 struct linkinfo_context li_context;
2209 switch (rtattr->rta_type) {
2210 /* binary stream */
2211 case QEMU_IFLA_ADDRESS:
2212 case QEMU_IFLA_BROADCAST:
2213 /* string */
2214 case QEMU_IFLA_IFNAME:
2215 case QEMU_IFLA_QDISC:
2216 break;
2217 /* uin8_t */
2218 case QEMU_IFLA_OPERSTATE:
2219 case QEMU_IFLA_LINKMODE:
2220 case QEMU_IFLA_CARRIER:
2221 case QEMU_IFLA_PROTO_DOWN:
2222 break;
2223 /* uint32_t */
2224 case QEMU_IFLA_MTU:
2225 case QEMU_IFLA_LINK:
2226 case QEMU_IFLA_WEIGHT:
2227 case QEMU_IFLA_TXQLEN:
2228 case QEMU_IFLA_CARRIER_CHANGES:
2229 case QEMU_IFLA_NUM_RX_QUEUES:
2230 case QEMU_IFLA_NUM_TX_QUEUES:
2231 case QEMU_IFLA_PROMISCUITY:
2232 case QEMU_IFLA_EXT_MASK:
2233 case QEMU_IFLA_LINK_NETNSID:
2234 case QEMU_IFLA_GROUP:
2235 case QEMU_IFLA_MASTER:
2236 case QEMU_IFLA_NUM_VF:
2237 u32 = RTA_DATA(rtattr);
2238 *u32 = tswap32(*u32);
2239 break;
2240 /* struct rtnl_link_stats */
2241 case QEMU_IFLA_STATS:
2242 st = RTA_DATA(rtattr);
2243 st->rx_packets = tswap32(st->rx_packets);
2244 st->tx_packets = tswap32(st->tx_packets);
2245 st->rx_bytes = tswap32(st->rx_bytes);
2246 st->tx_bytes = tswap32(st->tx_bytes);
2247 st->rx_errors = tswap32(st->rx_errors);
2248 st->tx_errors = tswap32(st->tx_errors);
2249 st->rx_dropped = tswap32(st->rx_dropped);
2250 st->tx_dropped = tswap32(st->tx_dropped);
2251 st->multicast = tswap32(st->multicast);
2252 st->collisions = tswap32(st->collisions);
2254 /* detailed rx_errors: */
2255 st->rx_length_errors = tswap32(st->rx_length_errors);
2256 st->rx_over_errors = tswap32(st->rx_over_errors);
2257 st->rx_crc_errors = tswap32(st->rx_crc_errors);
2258 st->rx_frame_errors = tswap32(st->rx_frame_errors);
2259 st->rx_fifo_errors = tswap32(st->rx_fifo_errors);
2260 st->rx_missed_errors = tswap32(st->rx_missed_errors);
2262 /* detailed tx_errors */
2263 st->tx_aborted_errors = tswap32(st->tx_aborted_errors);
2264 st->tx_carrier_errors = tswap32(st->tx_carrier_errors);
2265 st->tx_fifo_errors = tswap32(st->tx_fifo_errors);
2266 st->tx_heartbeat_errors = tswap32(st->tx_heartbeat_errors);
2267 st->tx_window_errors = tswap32(st->tx_window_errors);
2269 /* for cslip etc */
2270 st->rx_compressed = tswap32(st->rx_compressed);
2271 st->tx_compressed = tswap32(st->tx_compressed);
2272 break;
2273 /* struct rtnl_link_stats64 */
2274 case QEMU_IFLA_STATS64:
2275 st64 = RTA_DATA(rtattr);
2276 st64->rx_packets = tswap64(st64->rx_packets);
2277 st64->tx_packets = tswap64(st64->tx_packets);
2278 st64->rx_bytes = tswap64(st64->rx_bytes);
2279 st64->tx_bytes = tswap64(st64->tx_bytes);
2280 st64->rx_errors = tswap64(st64->rx_errors);
2281 st64->tx_errors = tswap64(st64->tx_errors);
2282 st64->rx_dropped = tswap64(st64->rx_dropped);
2283 st64->tx_dropped = tswap64(st64->tx_dropped);
2284 st64->multicast = tswap64(st64->multicast);
2285 st64->collisions = tswap64(st64->collisions);
2287 /* detailed rx_errors: */
2288 st64->rx_length_errors = tswap64(st64->rx_length_errors);
2289 st64->rx_over_errors = tswap64(st64->rx_over_errors);
2290 st64->rx_crc_errors = tswap64(st64->rx_crc_errors);
2291 st64->rx_frame_errors = tswap64(st64->rx_frame_errors);
2292 st64->rx_fifo_errors = tswap64(st64->rx_fifo_errors);
2293 st64->rx_missed_errors = tswap64(st64->rx_missed_errors);
2295 /* detailed tx_errors */
2296 st64->tx_aborted_errors = tswap64(st64->tx_aborted_errors);
2297 st64->tx_carrier_errors = tswap64(st64->tx_carrier_errors);
2298 st64->tx_fifo_errors = tswap64(st64->tx_fifo_errors);
2299 st64->tx_heartbeat_errors = tswap64(st64->tx_heartbeat_errors);
2300 st64->tx_window_errors = tswap64(st64->tx_window_errors);
2302 /* for cslip etc */
2303 st64->rx_compressed = tswap64(st64->rx_compressed);
2304 st64->tx_compressed = tswap64(st64->tx_compressed);
2305 break;
2306 /* struct rtnl_link_ifmap */
2307 case QEMU_IFLA_MAP:
2308 map = RTA_DATA(rtattr);
2309 map->mem_start = tswap64(map->mem_start);
2310 map->mem_end = tswap64(map->mem_end);
2311 map->base_addr = tswap64(map->base_addr);
2312 map->irq = tswap16(map->irq);
2313 break;
2314 /* nested */
2315 case QEMU_IFLA_LINKINFO:
2316 memset(&li_context, 0, sizeof(li_context));
2317 return host_to_target_for_each_nlattr(RTA_DATA(rtattr), rtattr->rta_len,
2318 &li_context,
2319 host_to_target_data_linkinfo_nlattr);
2320 case QEMU_IFLA_AF_SPEC:
2321 return host_to_target_for_each_nlattr(RTA_DATA(rtattr), rtattr->rta_len,
2322 NULL,
2323 host_to_target_data_spec_nlattr);
2324 default:
2325 gemu_log("Unknown host QEMU_IFLA type: %d\n", rtattr->rta_type);
2326 break;
2328 return 0;
2331 static abi_long host_to_target_data_addr_rtattr(struct rtattr *rtattr)
2333 uint32_t *u32;
2334 struct ifa_cacheinfo *ci;
2336 switch (rtattr->rta_type) {
2337 /* binary: depends on family type */
2338 case IFA_ADDRESS:
2339 case IFA_LOCAL:
2340 break;
2341 /* string */
2342 case IFA_LABEL:
2343 break;
2344 /* u32 */
2345 case IFA_FLAGS:
2346 case IFA_BROADCAST:
2347 u32 = RTA_DATA(rtattr);
2348 *u32 = tswap32(*u32);
2349 break;
2350 /* struct ifa_cacheinfo */
2351 case IFA_CACHEINFO:
2352 ci = RTA_DATA(rtattr);
2353 ci->ifa_prefered = tswap32(ci->ifa_prefered);
2354 ci->ifa_valid = tswap32(ci->ifa_valid);
2355 ci->cstamp = tswap32(ci->cstamp);
2356 ci->tstamp = tswap32(ci->tstamp);
2357 break;
2358 default:
2359 gemu_log("Unknown host IFA type: %d\n", rtattr->rta_type);
2360 break;
2362 return 0;
2365 static abi_long host_to_target_data_route_rtattr(struct rtattr *rtattr)
2367 uint32_t *u32;
2368 switch (rtattr->rta_type) {
2369 /* binary: depends on family type */
2370 case RTA_GATEWAY:
2371 case RTA_DST:
2372 case RTA_PREFSRC:
2373 break;
2374 /* u32 */
2375 case RTA_PRIORITY:
2376 case RTA_TABLE:
2377 case RTA_OIF:
2378 u32 = RTA_DATA(rtattr);
2379 *u32 = tswap32(*u32);
2380 break;
2381 default:
2382 gemu_log("Unknown host RTA type: %d\n", rtattr->rta_type);
2383 break;
2385 return 0;
2388 static abi_long host_to_target_link_rtattr(struct rtattr *rtattr,
2389 uint32_t rtattr_len)
2391 return host_to_target_for_each_rtattr(rtattr, rtattr_len,
2392 host_to_target_data_link_rtattr);
2395 static abi_long host_to_target_addr_rtattr(struct rtattr *rtattr,
2396 uint32_t rtattr_len)
2398 return host_to_target_for_each_rtattr(rtattr, rtattr_len,
2399 host_to_target_data_addr_rtattr);
2402 static abi_long host_to_target_route_rtattr(struct rtattr *rtattr,
2403 uint32_t rtattr_len)
2405 return host_to_target_for_each_rtattr(rtattr, rtattr_len,
2406 host_to_target_data_route_rtattr);
2409 static abi_long host_to_target_data_route(struct nlmsghdr *nlh)
2411 uint32_t nlmsg_len;
2412 struct ifinfomsg *ifi;
2413 struct ifaddrmsg *ifa;
2414 struct rtmsg *rtm;
2416 nlmsg_len = nlh->nlmsg_len;
2417 switch (nlh->nlmsg_type) {
2418 case RTM_NEWLINK:
2419 case RTM_DELLINK:
2420 case RTM_GETLINK:
2421 if (nlh->nlmsg_len >= NLMSG_LENGTH(sizeof(*ifi))) {
2422 ifi = NLMSG_DATA(nlh);
2423 ifi->ifi_type = tswap16(ifi->ifi_type);
2424 ifi->ifi_index = tswap32(ifi->ifi_index);
2425 ifi->ifi_flags = tswap32(ifi->ifi_flags);
2426 ifi->ifi_change = tswap32(ifi->ifi_change);
2427 host_to_target_link_rtattr(IFLA_RTA(ifi),
2428 nlmsg_len - NLMSG_LENGTH(sizeof(*ifi)));
2430 break;
2431 case RTM_NEWADDR:
2432 case RTM_DELADDR:
2433 case RTM_GETADDR:
2434 if (nlh->nlmsg_len >= NLMSG_LENGTH(sizeof(*ifa))) {
2435 ifa = NLMSG_DATA(nlh);
2436 ifa->ifa_index = tswap32(ifa->ifa_index);
2437 host_to_target_addr_rtattr(IFA_RTA(ifa),
2438 nlmsg_len - NLMSG_LENGTH(sizeof(*ifa)));
2440 break;
2441 case RTM_NEWROUTE:
2442 case RTM_DELROUTE:
2443 case RTM_GETROUTE:
2444 if (nlh->nlmsg_len >= NLMSG_LENGTH(sizeof(*rtm))) {
2445 rtm = NLMSG_DATA(nlh);
2446 rtm->rtm_flags = tswap32(rtm->rtm_flags);
2447 host_to_target_route_rtattr(RTM_RTA(rtm),
2448 nlmsg_len - NLMSG_LENGTH(sizeof(*rtm)));
2450 break;
2451 default:
2452 return -TARGET_EINVAL;
2454 return 0;
2457 static inline abi_long host_to_target_nlmsg_route(struct nlmsghdr *nlh,
2458 size_t len)
2460 return host_to_target_for_each_nlmsg(nlh, len, host_to_target_data_route);
2463 static abi_long target_to_host_for_each_rtattr(struct rtattr *rtattr,
2464 size_t len,
2465 abi_long (*target_to_host_rtattr)
2466 (struct rtattr *))
2468 abi_long ret;
2470 while (len >= sizeof(struct rtattr)) {
2471 if (tswap16(rtattr->rta_len) < sizeof(struct rtattr) ||
2472 tswap16(rtattr->rta_len) > len) {
2473 break;
2475 rtattr->rta_len = tswap16(rtattr->rta_len);
2476 rtattr->rta_type = tswap16(rtattr->rta_type);
2477 ret = target_to_host_rtattr(rtattr);
2478 if (ret < 0) {
2479 return ret;
2481 len -= RTA_ALIGN(rtattr->rta_len);
2482 rtattr = (struct rtattr *)(((char *)rtattr) +
2483 RTA_ALIGN(rtattr->rta_len));
2485 return 0;
2488 static abi_long target_to_host_data_link_rtattr(struct rtattr *rtattr)
2490 switch (rtattr->rta_type) {
2491 default:
2492 gemu_log("Unknown target QEMU_IFLA type: %d\n", rtattr->rta_type);
2493 break;
2495 return 0;
2498 static abi_long target_to_host_data_addr_rtattr(struct rtattr *rtattr)
2500 switch (rtattr->rta_type) {
2501 /* binary: depends on family type */
2502 case IFA_LOCAL:
2503 case IFA_ADDRESS:
2504 break;
2505 default:
2506 gemu_log("Unknown target IFA type: %d\n", rtattr->rta_type);
2507 break;
2509 return 0;
2512 static abi_long target_to_host_data_route_rtattr(struct rtattr *rtattr)
2514 uint32_t *u32;
2515 switch (rtattr->rta_type) {
2516 /* binary: depends on family type */
2517 case RTA_DST:
2518 case RTA_SRC:
2519 case RTA_GATEWAY:
2520 break;
2521 /* u32 */
2522 case RTA_OIF:
2523 u32 = RTA_DATA(rtattr);
2524 *u32 = tswap32(*u32);
2525 break;
2526 default:
2527 gemu_log("Unknown target RTA type: %d\n", rtattr->rta_type);
2528 break;
2530 return 0;
2533 static void target_to_host_link_rtattr(struct rtattr *rtattr,
2534 uint32_t rtattr_len)
2536 target_to_host_for_each_rtattr(rtattr, rtattr_len,
2537 target_to_host_data_link_rtattr);
2540 static void target_to_host_addr_rtattr(struct rtattr *rtattr,
2541 uint32_t rtattr_len)
2543 target_to_host_for_each_rtattr(rtattr, rtattr_len,
2544 target_to_host_data_addr_rtattr);
2547 static void target_to_host_route_rtattr(struct rtattr *rtattr,
2548 uint32_t rtattr_len)
2550 target_to_host_for_each_rtattr(rtattr, rtattr_len,
2551 target_to_host_data_route_rtattr);
2554 static abi_long target_to_host_data_route(struct nlmsghdr *nlh)
2556 struct ifinfomsg *ifi;
2557 struct ifaddrmsg *ifa;
2558 struct rtmsg *rtm;
2560 switch (nlh->nlmsg_type) {
2561 case RTM_GETLINK:
2562 break;
2563 case RTM_NEWLINK:
2564 case RTM_DELLINK:
2565 if (nlh->nlmsg_len >= NLMSG_LENGTH(sizeof(*ifi))) {
2566 ifi = NLMSG_DATA(nlh);
2567 ifi->ifi_type = tswap16(ifi->ifi_type);
2568 ifi->ifi_index = tswap32(ifi->ifi_index);
2569 ifi->ifi_flags = tswap32(ifi->ifi_flags);
2570 ifi->ifi_change = tswap32(ifi->ifi_change);
2571 target_to_host_link_rtattr(IFLA_RTA(ifi), nlh->nlmsg_len -
2572 NLMSG_LENGTH(sizeof(*ifi)));
2574 break;
2575 case RTM_GETADDR:
2576 case RTM_NEWADDR:
2577 case RTM_DELADDR:
2578 if (nlh->nlmsg_len >= NLMSG_LENGTH(sizeof(*ifa))) {
2579 ifa = NLMSG_DATA(nlh);
2580 ifa->ifa_index = tswap32(ifa->ifa_index);
2581 target_to_host_addr_rtattr(IFA_RTA(ifa), nlh->nlmsg_len -
2582 NLMSG_LENGTH(sizeof(*ifa)));
2584 break;
2585 case RTM_GETROUTE:
2586 break;
2587 case RTM_NEWROUTE:
2588 case RTM_DELROUTE:
2589 if (nlh->nlmsg_len >= NLMSG_LENGTH(sizeof(*rtm))) {
2590 rtm = NLMSG_DATA(nlh);
2591 rtm->rtm_flags = tswap32(rtm->rtm_flags);
2592 target_to_host_route_rtattr(RTM_RTA(rtm), nlh->nlmsg_len -
2593 NLMSG_LENGTH(sizeof(*rtm)));
2595 break;
2596 default:
2597 return -TARGET_EOPNOTSUPP;
2599 return 0;
2602 static abi_long target_to_host_nlmsg_route(struct nlmsghdr *nlh, size_t len)
2604 return target_to_host_for_each_nlmsg(nlh, len, target_to_host_data_route);
2606 #endif /* CONFIG_RTNETLINK */
2608 static abi_long host_to_target_data_audit(struct nlmsghdr *nlh)
2610 switch (nlh->nlmsg_type) {
2611 default:
2612 gemu_log("Unknown host audit message type %d\n",
2613 nlh->nlmsg_type);
2614 return -TARGET_EINVAL;
2616 return 0;
2619 static inline abi_long host_to_target_nlmsg_audit(struct nlmsghdr *nlh,
2620 size_t len)
2622 return host_to_target_for_each_nlmsg(nlh, len, host_to_target_data_audit);
2625 static abi_long target_to_host_data_audit(struct nlmsghdr *nlh)
2627 switch (nlh->nlmsg_type) {
2628 case AUDIT_USER:
2629 case AUDIT_FIRST_USER_MSG ... AUDIT_LAST_USER_MSG:
2630 case AUDIT_FIRST_USER_MSG2 ... AUDIT_LAST_USER_MSG2:
2631 break;
2632 default:
2633 gemu_log("Unknown target audit message type %d\n",
2634 nlh->nlmsg_type);
2635 return -TARGET_EINVAL;
2638 return 0;
2641 static abi_long target_to_host_nlmsg_audit(struct nlmsghdr *nlh, size_t len)
2643 return target_to_host_for_each_nlmsg(nlh, len, target_to_host_data_audit);
2646 /* do_setsockopt() Must return target values and target errnos. */
2647 static abi_long do_setsockopt(int sockfd, int level, int optname,
2648 abi_ulong optval_addr, socklen_t optlen)
2650 abi_long ret;
2651 int val;
2652 struct ip_mreqn *ip_mreq;
2653 struct ip_mreq_source *ip_mreq_source;
2655 switch(level) {
2656 case SOL_TCP:
2657 /* TCP options all take an 'int' value. */
2658 if (optlen < sizeof(uint32_t))
2659 return -TARGET_EINVAL;
2661 if (get_user_u32(val, optval_addr))
2662 return -TARGET_EFAULT;
2663 ret = get_errno(setsockopt(sockfd, level, optname, &val, sizeof(val)));
2664 break;
2665 case SOL_IP:
2666 switch(optname) {
2667 case IP_TOS:
2668 case IP_TTL:
2669 case IP_HDRINCL:
2670 case IP_ROUTER_ALERT:
2671 case IP_RECVOPTS:
2672 case IP_RETOPTS:
2673 case IP_PKTINFO:
2674 case IP_MTU_DISCOVER:
2675 case IP_RECVERR:
2676 case IP_RECVTOS:
2677 #ifdef IP_FREEBIND
2678 case IP_FREEBIND:
2679 #endif
2680 case IP_MULTICAST_TTL:
2681 case IP_MULTICAST_LOOP:
2682 val = 0;
2683 if (optlen >= sizeof(uint32_t)) {
2684 if (get_user_u32(val, optval_addr))
2685 return -TARGET_EFAULT;
2686 } else if (optlen >= 1) {
2687 if (get_user_u8(val, optval_addr))
2688 return -TARGET_EFAULT;
2690 ret = get_errno(setsockopt(sockfd, level, optname, &val, sizeof(val)));
2691 break;
2692 case IP_ADD_MEMBERSHIP:
2693 case IP_DROP_MEMBERSHIP:
2694 if (optlen < sizeof (struct target_ip_mreq) ||
2695 optlen > sizeof (struct target_ip_mreqn))
2696 return -TARGET_EINVAL;
2698 ip_mreq = (struct ip_mreqn *) alloca(optlen);
2699 target_to_host_ip_mreq(ip_mreq, optval_addr, optlen);
2700 ret = get_errno(setsockopt(sockfd, level, optname, ip_mreq, optlen));
2701 break;
2703 case IP_BLOCK_SOURCE:
2704 case IP_UNBLOCK_SOURCE:
2705 case IP_ADD_SOURCE_MEMBERSHIP:
2706 case IP_DROP_SOURCE_MEMBERSHIP:
2707 if (optlen != sizeof (struct target_ip_mreq_source))
2708 return -TARGET_EINVAL;
2710 ip_mreq_source = lock_user(VERIFY_READ, optval_addr, optlen, 1);
2711 ret = get_errno(setsockopt(sockfd, level, optname, ip_mreq_source, optlen));
2712 unlock_user (ip_mreq_source, optval_addr, 0);
2713 break;
2715 default:
2716 goto unimplemented;
2718 break;
2719 case SOL_IPV6:
2720 switch (optname) {
2721 case IPV6_MTU_DISCOVER:
2722 case IPV6_MTU:
2723 case IPV6_V6ONLY:
2724 case IPV6_RECVPKTINFO:
2725 val = 0;
2726 if (optlen < sizeof(uint32_t)) {
2727 return -TARGET_EINVAL;
2729 if (get_user_u32(val, optval_addr)) {
2730 return -TARGET_EFAULT;
2732 ret = get_errno(setsockopt(sockfd, level, optname,
2733 &val, sizeof(val)));
2734 break;
2735 default:
2736 goto unimplemented;
2738 break;
2739 case SOL_RAW:
2740 switch (optname) {
2741 case ICMP_FILTER:
2742 /* struct icmp_filter takes an u32 value */
2743 if (optlen < sizeof(uint32_t)) {
2744 return -TARGET_EINVAL;
2747 if (get_user_u32(val, optval_addr)) {
2748 return -TARGET_EFAULT;
2750 ret = get_errno(setsockopt(sockfd, level, optname,
2751 &val, sizeof(val)));
2752 break;
2754 default:
2755 goto unimplemented;
2757 break;
2758 case TARGET_SOL_SOCKET:
2759 switch (optname) {
2760 case TARGET_SO_RCVTIMEO:
2762 struct timeval tv;
2764 optname = SO_RCVTIMEO;
2766 set_timeout:
2767 if (optlen != sizeof(struct target_timeval)) {
2768 return -TARGET_EINVAL;
2771 if (copy_from_user_timeval(&tv, optval_addr)) {
2772 return -TARGET_EFAULT;
2775 ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname,
2776 &tv, sizeof(tv)));
2777 return ret;
2779 case TARGET_SO_SNDTIMEO:
2780 optname = SO_SNDTIMEO;
2781 goto set_timeout;
2782 case TARGET_SO_ATTACH_FILTER:
2784 struct target_sock_fprog *tfprog;
2785 struct target_sock_filter *tfilter;
2786 struct sock_fprog fprog;
2787 struct sock_filter *filter;
2788 int i;
2790 if (optlen != sizeof(*tfprog)) {
2791 return -TARGET_EINVAL;
2793 if (!lock_user_struct(VERIFY_READ, tfprog, optval_addr, 0)) {
2794 return -TARGET_EFAULT;
2796 if (!lock_user_struct(VERIFY_READ, tfilter,
2797 tswapal(tfprog->filter), 0)) {
2798 unlock_user_struct(tfprog, optval_addr, 1);
2799 return -TARGET_EFAULT;
2802 fprog.len = tswap16(tfprog->len);
2803 filter = g_try_new(struct sock_filter, fprog.len);
2804 if (filter == NULL) {
2805 unlock_user_struct(tfilter, tfprog->filter, 1);
2806 unlock_user_struct(tfprog, optval_addr, 1);
2807 return -TARGET_ENOMEM;
2809 for (i = 0; i < fprog.len; i++) {
2810 filter[i].code = tswap16(tfilter[i].code);
2811 filter[i].jt = tfilter[i].jt;
2812 filter[i].jf = tfilter[i].jf;
2813 filter[i].k = tswap32(tfilter[i].k);
2815 fprog.filter = filter;
2817 ret = get_errno(setsockopt(sockfd, SOL_SOCKET,
2818 SO_ATTACH_FILTER, &fprog, sizeof(fprog)));
2819 g_free(filter);
2821 unlock_user_struct(tfilter, tfprog->filter, 1);
2822 unlock_user_struct(tfprog, optval_addr, 1);
2823 return ret;
2825 case TARGET_SO_BINDTODEVICE:
2827 char *dev_ifname, *addr_ifname;
2829 if (optlen > IFNAMSIZ - 1) {
2830 optlen = IFNAMSIZ - 1;
2832 dev_ifname = lock_user(VERIFY_READ, optval_addr, optlen, 1);
2833 if (!dev_ifname) {
2834 return -TARGET_EFAULT;
2836 optname = SO_BINDTODEVICE;
2837 addr_ifname = alloca(IFNAMSIZ);
2838 memcpy(addr_ifname, dev_ifname, optlen);
2839 addr_ifname[optlen] = 0;
2840 ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname,
2841 addr_ifname, optlen));
2842 unlock_user (dev_ifname, optval_addr, 0);
2843 return ret;
2845 /* Options with 'int' argument. */
2846 case TARGET_SO_DEBUG:
2847 optname = SO_DEBUG;
2848 break;
2849 case TARGET_SO_REUSEADDR:
2850 optname = SO_REUSEADDR;
2851 break;
2852 case TARGET_SO_TYPE:
2853 optname = SO_TYPE;
2854 break;
2855 case TARGET_SO_ERROR:
2856 optname = SO_ERROR;
2857 break;
2858 case TARGET_SO_DONTROUTE:
2859 optname = SO_DONTROUTE;
2860 break;
2861 case TARGET_SO_BROADCAST:
2862 optname = SO_BROADCAST;
2863 break;
2864 case TARGET_SO_SNDBUF:
2865 optname = SO_SNDBUF;
2866 break;
2867 case TARGET_SO_SNDBUFFORCE:
2868 optname = SO_SNDBUFFORCE;
2869 break;
2870 case TARGET_SO_RCVBUF:
2871 optname = SO_RCVBUF;
2872 break;
2873 case TARGET_SO_RCVBUFFORCE:
2874 optname = SO_RCVBUFFORCE;
2875 break;
2876 case TARGET_SO_KEEPALIVE:
2877 optname = SO_KEEPALIVE;
2878 break;
2879 case TARGET_SO_OOBINLINE:
2880 optname = SO_OOBINLINE;
2881 break;
2882 case TARGET_SO_NO_CHECK:
2883 optname = SO_NO_CHECK;
2884 break;
2885 case TARGET_SO_PRIORITY:
2886 optname = SO_PRIORITY;
2887 break;
2888 #ifdef SO_BSDCOMPAT
2889 case TARGET_SO_BSDCOMPAT:
2890 optname = SO_BSDCOMPAT;
2891 break;
2892 #endif
2893 case TARGET_SO_PASSCRED:
2894 optname = SO_PASSCRED;
2895 break;
2896 case TARGET_SO_PASSSEC:
2897 optname = SO_PASSSEC;
2898 break;
2899 case TARGET_SO_TIMESTAMP:
2900 optname = SO_TIMESTAMP;
2901 break;
2902 case TARGET_SO_RCVLOWAT:
2903 optname = SO_RCVLOWAT;
2904 break;
2905 break;
2906 default:
2907 goto unimplemented;
2909 if (optlen < sizeof(uint32_t))
2910 return -TARGET_EINVAL;
2912 if (get_user_u32(val, optval_addr))
2913 return -TARGET_EFAULT;
2914 ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname, &val, sizeof(val)));
2915 break;
2916 default:
2917 unimplemented:
2918 gemu_log("Unsupported setsockopt level=%d optname=%d\n", level, optname);
2919 ret = -TARGET_ENOPROTOOPT;
2921 return ret;
2924 /* do_getsockopt() Must return target values and target errnos. */
2925 static abi_long do_getsockopt(int sockfd, int level, int optname,
2926 abi_ulong optval_addr, abi_ulong optlen)
2928 abi_long ret;
2929 int len, val;
2930 socklen_t lv;
2932 switch(level) {
2933 case TARGET_SOL_SOCKET:
2934 level = SOL_SOCKET;
2935 switch (optname) {
2936 /* These don't just return a single integer */
2937 case TARGET_SO_LINGER:
2938 case TARGET_SO_RCVTIMEO:
2939 case TARGET_SO_SNDTIMEO:
2940 case TARGET_SO_PEERNAME:
2941 goto unimplemented;
2942 case TARGET_SO_PEERCRED: {
2943 struct ucred cr;
2944 socklen_t crlen;
2945 struct target_ucred *tcr;
2947 if (get_user_u32(len, optlen)) {
2948 return -TARGET_EFAULT;
2950 if (len < 0) {
2951 return -TARGET_EINVAL;
2954 crlen = sizeof(cr);
2955 ret = get_errno(getsockopt(sockfd, level, SO_PEERCRED,
2956 &cr, &crlen));
2957 if (ret < 0) {
2958 return ret;
2960 if (len > crlen) {
2961 len = crlen;
2963 if (!lock_user_struct(VERIFY_WRITE, tcr, optval_addr, 0)) {
2964 return -TARGET_EFAULT;
2966 __put_user(cr.pid, &tcr->pid);
2967 __put_user(cr.uid, &tcr->uid);
2968 __put_user(cr.gid, &tcr->gid);
2969 unlock_user_struct(tcr, optval_addr, 1);
2970 if (put_user_u32(len, optlen)) {
2971 return -TARGET_EFAULT;
2973 break;
2975 /* Options with 'int' argument. */
2976 case TARGET_SO_DEBUG:
2977 optname = SO_DEBUG;
2978 goto int_case;
2979 case TARGET_SO_REUSEADDR:
2980 optname = SO_REUSEADDR;
2981 goto int_case;
2982 case TARGET_SO_TYPE:
2983 optname = SO_TYPE;
2984 goto int_case;
2985 case TARGET_SO_ERROR:
2986 optname = SO_ERROR;
2987 goto int_case;
2988 case TARGET_SO_DONTROUTE:
2989 optname = SO_DONTROUTE;
2990 goto int_case;
2991 case TARGET_SO_BROADCAST:
2992 optname = SO_BROADCAST;
2993 goto int_case;
2994 case TARGET_SO_SNDBUF:
2995 optname = SO_SNDBUF;
2996 goto int_case;
2997 case TARGET_SO_RCVBUF:
2998 optname = SO_RCVBUF;
2999 goto int_case;
3000 case TARGET_SO_KEEPALIVE:
3001 optname = SO_KEEPALIVE;
3002 goto int_case;
3003 case TARGET_SO_OOBINLINE:
3004 optname = SO_OOBINLINE;
3005 goto int_case;
3006 case TARGET_SO_NO_CHECK:
3007 optname = SO_NO_CHECK;
3008 goto int_case;
3009 case TARGET_SO_PRIORITY:
3010 optname = SO_PRIORITY;
3011 goto int_case;
3012 #ifdef SO_BSDCOMPAT
3013 case TARGET_SO_BSDCOMPAT:
3014 optname = SO_BSDCOMPAT;
3015 goto int_case;
3016 #endif
3017 case TARGET_SO_PASSCRED:
3018 optname = SO_PASSCRED;
3019 goto int_case;
3020 case TARGET_SO_TIMESTAMP:
3021 optname = SO_TIMESTAMP;
3022 goto int_case;
3023 case TARGET_SO_RCVLOWAT:
3024 optname = SO_RCVLOWAT;
3025 goto int_case;
3026 case TARGET_SO_ACCEPTCONN:
3027 optname = SO_ACCEPTCONN;
3028 goto int_case;
3029 default:
3030 goto int_case;
3032 break;
3033 case SOL_TCP:
3034 /* TCP options all take an 'int' value. */
3035 int_case:
3036 if (get_user_u32(len, optlen))
3037 return -TARGET_EFAULT;
3038 if (len < 0)
3039 return -TARGET_EINVAL;
3040 lv = sizeof(lv);
3041 ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
3042 if (ret < 0)
3043 return ret;
3044 if (optname == SO_TYPE) {
3045 val = host_to_target_sock_type(val);
3047 if (len > lv)
3048 len = lv;
3049 if (len == 4) {
3050 if (put_user_u32(val, optval_addr))
3051 return -TARGET_EFAULT;
3052 } else {
3053 if (put_user_u8(val, optval_addr))
3054 return -TARGET_EFAULT;
3056 if (put_user_u32(len, optlen))
3057 return -TARGET_EFAULT;
3058 break;
3059 case SOL_IP:
3060 switch(optname) {
3061 case IP_TOS:
3062 case IP_TTL:
3063 case IP_HDRINCL:
3064 case IP_ROUTER_ALERT:
3065 case IP_RECVOPTS:
3066 case IP_RETOPTS:
3067 case IP_PKTINFO:
3068 case IP_MTU_DISCOVER:
3069 case IP_RECVERR:
3070 case IP_RECVTOS:
3071 #ifdef IP_FREEBIND
3072 case IP_FREEBIND:
3073 #endif
3074 case IP_MULTICAST_TTL:
3075 case IP_MULTICAST_LOOP:
3076 if (get_user_u32(len, optlen))
3077 return -TARGET_EFAULT;
3078 if (len < 0)
3079 return -TARGET_EINVAL;
3080 lv = sizeof(lv);
3081 ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
3082 if (ret < 0)
3083 return ret;
3084 if (len < sizeof(int) && len > 0 && val >= 0 && val < 255) {
3085 len = 1;
3086 if (put_user_u32(len, optlen)
3087 || put_user_u8(val, optval_addr))
3088 return -TARGET_EFAULT;
3089 } else {
3090 if (len > sizeof(int))
3091 len = sizeof(int);
3092 if (put_user_u32(len, optlen)
3093 || put_user_u32(val, optval_addr))
3094 return -TARGET_EFAULT;
3096 break;
3097 default:
3098 ret = -TARGET_ENOPROTOOPT;
3099 break;
3101 break;
3102 default:
3103 unimplemented:
3104 gemu_log("getsockopt level=%d optname=%d not yet supported\n",
3105 level, optname);
3106 ret = -TARGET_EOPNOTSUPP;
3107 break;
3109 return ret;
3112 static struct iovec *lock_iovec(int type, abi_ulong target_addr,
3113 abi_ulong count, int copy)
3115 struct target_iovec *target_vec;
3116 struct iovec *vec;
3117 abi_ulong total_len, max_len;
3118 int i;
3119 int err = 0;
3120 bool bad_address = false;
3122 if (count == 0) {
3123 errno = 0;
3124 return NULL;
3126 if (count > IOV_MAX) {
3127 errno = EINVAL;
3128 return NULL;
3131 vec = g_try_new0(struct iovec, count);
3132 if (vec == NULL) {
3133 errno = ENOMEM;
3134 return NULL;
3137 target_vec = lock_user(VERIFY_READ, target_addr,
3138 count * sizeof(struct target_iovec), 1);
3139 if (target_vec == NULL) {
3140 err = EFAULT;
3141 goto fail2;
3144 /* ??? If host page size > target page size, this will result in a
3145 value larger than what we can actually support. */
3146 max_len = 0x7fffffff & TARGET_PAGE_MASK;
3147 total_len = 0;
3149 for (i = 0; i < count; i++) {
3150 abi_ulong base = tswapal(target_vec[i].iov_base);
3151 abi_long len = tswapal(target_vec[i].iov_len);
3153 if (len < 0) {
3154 err = EINVAL;
3155 goto fail;
3156 } else if (len == 0) {
3157 /* Zero length pointer is ignored. */
3158 vec[i].iov_base = 0;
3159 } else {
3160 vec[i].iov_base = lock_user(type, base, len, copy);
3161 /* If the first buffer pointer is bad, this is a fault. But
3162 * subsequent bad buffers will result in a partial write; this
3163 * is realized by filling the vector with null pointers and
3164 * zero lengths. */
3165 if (!vec[i].iov_base) {
3166 if (i == 0) {
3167 err = EFAULT;
3168 goto fail;
3169 } else {
3170 bad_address = true;
3173 if (bad_address) {
3174 len = 0;
3176 if (len > max_len - total_len) {
3177 len = max_len - total_len;
3180 vec[i].iov_len = len;
3181 total_len += len;
3184 unlock_user(target_vec, target_addr, 0);
3185 return vec;
3187 fail:
3188 while (--i >= 0) {
3189 if (tswapal(target_vec[i].iov_len) > 0) {
3190 unlock_user(vec[i].iov_base, tswapal(target_vec[i].iov_base), 0);
3193 unlock_user(target_vec, target_addr, 0);
3194 fail2:
3195 g_free(vec);
3196 errno = err;
3197 return NULL;
3200 static void unlock_iovec(struct iovec *vec, abi_ulong target_addr,
3201 abi_ulong count, int copy)
3203 struct target_iovec *target_vec;
3204 int i;
3206 target_vec = lock_user(VERIFY_READ, target_addr,
3207 count * sizeof(struct target_iovec), 1);
3208 if (target_vec) {
3209 for (i = 0; i < count; i++) {
3210 abi_ulong base = tswapal(target_vec[i].iov_base);
3211 abi_long len = tswapal(target_vec[i].iov_len);
3212 if (len < 0) {
3213 break;
3215 unlock_user(vec[i].iov_base, base, copy ? vec[i].iov_len : 0);
3217 unlock_user(target_vec, target_addr, 0);
3220 g_free(vec);
3223 static inline int target_to_host_sock_type(int *type)
3225 int host_type = 0;
3226 int target_type = *type;
3228 switch (target_type & TARGET_SOCK_TYPE_MASK) {
3229 case TARGET_SOCK_DGRAM:
3230 host_type = SOCK_DGRAM;
3231 break;
3232 case TARGET_SOCK_STREAM:
3233 host_type = SOCK_STREAM;
3234 break;
3235 default:
3236 host_type = target_type & TARGET_SOCK_TYPE_MASK;
3237 break;
3239 if (target_type & TARGET_SOCK_CLOEXEC) {
3240 #if defined(SOCK_CLOEXEC)
3241 host_type |= SOCK_CLOEXEC;
3242 #else
3243 return -TARGET_EINVAL;
3244 #endif
3246 if (target_type & TARGET_SOCK_NONBLOCK) {
3247 #if defined(SOCK_NONBLOCK)
3248 host_type |= SOCK_NONBLOCK;
3249 #elif !defined(O_NONBLOCK)
3250 return -TARGET_EINVAL;
3251 #endif
3253 *type = host_type;
3254 return 0;
3257 /* Try to emulate socket type flags after socket creation. */
3258 static int sock_flags_fixup(int fd, int target_type)
3260 #if !defined(SOCK_NONBLOCK) && defined(O_NONBLOCK)
3261 if (target_type & TARGET_SOCK_NONBLOCK) {
3262 int flags = fcntl(fd, F_GETFL);
3263 if (fcntl(fd, F_SETFL, O_NONBLOCK | flags) == -1) {
3264 close(fd);
3265 return -TARGET_EINVAL;
3268 #endif
3269 return fd;
3272 static abi_long packet_target_to_host_sockaddr(void *host_addr,
3273 abi_ulong target_addr,
3274 socklen_t len)
3276 struct sockaddr *addr = host_addr;
3277 struct target_sockaddr *target_saddr;
3279 target_saddr = lock_user(VERIFY_READ, target_addr, len, 1);
3280 if (!target_saddr) {
3281 return -TARGET_EFAULT;
3284 memcpy(addr, target_saddr, len);
3285 addr->sa_family = tswap16(target_saddr->sa_family);
3286 /* spkt_protocol is big-endian */
3288 unlock_user(target_saddr, target_addr, 0);
3289 return 0;
3292 static TargetFdTrans target_packet_trans = {
3293 .target_to_host_addr = packet_target_to_host_sockaddr,
3296 #ifdef CONFIG_RTNETLINK
3297 static abi_long netlink_route_target_to_host(void *buf, size_t len)
3299 abi_long ret;
3301 ret = target_to_host_nlmsg_route(buf, len);
3302 if (ret < 0) {
3303 return ret;
3306 return len;
3309 static abi_long netlink_route_host_to_target(void *buf, size_t len)
3311 abi_long ret;
3313 ret = host_to_target_nlmsg_route(buf, len);
3314 if (ret < 0) {
3315 return ret;
3318 return len;
3321 static TargetFdTrans target_netlink_route_trans = {
3322 .target_to_host_data = netlink_route_target_to_host,
3323 .host_to_target_data = netlink_route_host_to_target,
3325 #endif /* CONFIG_RTNETLINK */
3327 static abi_long netlink_audit_target_to_host(void *buf, size_t len)
3329 abi_long ret;
3331 ret = target_to_host_nlmsg_audit(buf, len);
3332 if (ret < 0) {
3333 return ret;
3336 return len;
3339 static abi_long netlink_audit_host_to_target(void *buf, size_t len)
3341 abi_long ret;
3343 ret = host_to_target_nlmsg_audit(buf, len);
3344 if (ret < 0) {
3345 return ret;
3348 return len;
3351 static TargetFdTrans target_netlink_audit_trans = {
3352 .target_to_host_data = netlink_audit_target_to_host,
3353 .host_to_target_data = netlink_audit_host_to_target,
3356 /* do_socket() Must return target values and target errnos. */
3357 static abi_long do_socket(int domain, int type, int protocol)
3359 int target_type = type;
3360 int ret;
3362 ret = target_to_host_sock_type(&type);
3363 if (ret) {
3364 return ret;
3367 if (domain == PF_NETLINK && !(
3368 #ifdef CONFIG_RTNETLINK
3369 protocol == NETLINK_ROUTE ||
3370 #endif
3371 protocol == NETLINK_KOBJECT_UEVENT ||
3372 protocol == NETLINK_AUDIT)) {
3373 return -EPFNOSUPPORT;
3376 if (domain == AF_PACKET ||
3377 (domain == AF_INET && type == SOCK_PACKET)) {
3378 protocol = tswap16(protocol);
3381 ret = get_errno(socket(domain, type, protocol));
3382 if (ret >= 0) {
3383 ret = sock_flags_fixup(ret, target_type);
3384 if (type == SOCK_PACKET) {
3385 /* Manage an obsolete case :
3386 * if socket type is SOCK_PACKET, bind by name
3388 fd_trans_register(ret, &target_packet_trans);
3389 } else if (domain == PF_NETLINK) {
3390 switch (protocol) {
3391 #ifdef CONFIG_RTNETLINK
3392 case NETLINK_ROUTE:
3393 fd_trans_register(ret, &target_netlink_route_trans);
3394 break;
3395 #endif
3396 case NETLINK_KOBJECT_UEVENT:
3397 /* nothing to do: messages are strings */
3398 break;
3399 case NETLINK_AUDIT:
3400 fd_trans_register(ret, &target_netlink_audit_trans);
3401 break;
3402 default:
3403 g_assert_not_reached();
3407 return ret;
3410 /* do_bind() Must return target values and target errnos. */
3411 static abi_long do_bind(int sockfd, abi_ulong target_addr,
3412 socklen_t addrlen)
3414 void *addr;
3415 abi_long ret;
3417 if ((int)addrlen < 0) {
3418 return -TARGET_EINVAL;
3421 addr = alloca(addrlen+1);
3423 ret = target_to_host_sockaddr(sockfd, addr, target_addr, addrlen);
3424 if (ret)
3425 return ret;
3427 return get_errno(bind(sockfd, addr, addrlen));
3430 /* do_connect() Must return target values and target errnos. */
3431 static abi_long do_connect(int sockfd, abi_ulong target_addr,
3432 socklen_t addrlen)
3434 void *addr;
3435 abi_long ret;
3437 if ((int)addrlen < 0) {
3438 return -TARGET_EINVAL;
3441 addr = alloca(addrlen+1);
3443 ret = target_to_host_sockaddr(sockfd, addr, target_addr, addrlen);
3444 if (ret)
3445 return ret;
3447 return get_errno(safe_connect(sockfd, addr, addrlen));
3450 /* do_sendrecvmsg_locked() Must return target values and target errnos. */
3451 static abi_long do_sendrecvmsg_locked(int fd, struct target_msghdr *msgp,
3452 int flags, int send)
3454 abi_long ret, len;
3455 struct msghdr msg;
3456 abi_ulong count;
3457 struct iovec *vec;
3458 abi_ulong target_vec;
3460 if (msgp->msg_name) {
3461 msg.msg_namelen = tswap32(msgp->msg_namelen);
3462 msg.msg_name = alloca(msg.msg_namelen+1);
3463 ret = target_to_host_sockaddr(fd, msg.msg_name,
3464 tswapal(msgp->msg_name),
3465 msg.msg_namelen);
3466 if (ret == -TARGET_EFAULT) {
3467 /* For connected sockets msg_name and msg_namelen must
3468 * be ignored, so returning EFAULT immediately is wrong.
3469 * Instead, pass a bad msg_name to the host kernel, and
3470 * let it decide whether to return EFAULT or not.
3472 msg.msg_name = (void *)-1;
3473 } else if (ret) {
3474 goto out2;
3476 } else {
3477 msg.msg_name = NULL;
3478 msg.msg_namelen = 0;
3480 msg.msg_controllen = 2 * tswapal(msgp->msg_controllen);
3481 msg.msg_control = alloca(msg.msg_controllen);
3482 msg.msg_flags = tswap32(msgp->msg_flags);
3484 count = tswapal(msgp->msg_iovlen);
3485 target_vec = tswapal(msgp->msg_iov);
3487 if (count > IOV_MAX) {
3488 /* sendrcvmsg returns a different errno for this condition than
3489 * readv/writev, so we must catch it here before lock_iovec() does.
3491 ret = -TARGET_EMSGSIZE;
3492 goto out2;
3495 vec = lock_iovec(send ? VERIFY_READ : VERIFY_WRITE,
3496 target_vec, count, send);
3497 if (vec == NULL) {
3498 ret = -host_to_target_errno(errno);
3499 goto out2;
3501 msg.msg_iovlen = count;
3502 msg.msg_iov = vec;
3504 if (send) {
3505 if (fd_trans_target_to_host_data(fd)) {
3506 void *host_msg;
3508 host_msg = g_malloc(msg.msg_iov->iov_len);
3509 memcpy(host_msg, msg.msg_iov->iov_base, msg.msg_iov->iov_len);
3510 ret = fd_trans_target_to_host_data(fd)(host_msg,
3511 msg.msg_iov->iov_len);
3512 if (ret >= 0) {
3513 msg.msg_iov->iov_base = host_msg;
3514 ret = get_errno(safe_sendmsg(fd, &msg, flags));
3516 g_free(host_msg);
3517 } else {
3518 ret = target_to_host_cmsg(&msg, msgp);
3519 if (ret == 0) {
3520 ret = get_errno(safe_sendmsg(fd, &msg, flags));
3523 } else {
3524 ret = get_errno(safe_recvmsg(fd, &msg, flags));
3525 if (!is_error(ret)) {
3526 len = ret;
3527 if (fd_trans_host_to_target_data(fd)) {
3528 ret = fd_trans_host_to_target_data(fd)(msg.msg_iov->iov_base,
3529 len);
3530 } else {
3531 ret = host_to_target_cmsg(msgp, &msg);
3533 if (!is_error(ret)) {
3534 msgp->msg_namelen = tswap32(msg.msg_namelen);
3535 if (msg.msg_name != NULL && msg.msg_name != (void *)-1) {
3536 ret = host_to_target_sockaddr(tswapal(msgp->msg_name),
3537 msg.msg_name, msg.msg_namelen);
3538 if (ret) {
3539 goto out;
3543 ret = len;
3548 out:
3549 unlock_iovec(vec, target_vec, count, !send);
3550 out2:
3551 return ret;
3554 static abi_long do_sendrecvmsg(int fd, abi_ulong target_msg,
3555 int flags, int send)
3557 abi_long ret;
3558 struct target_msghdr *msgp;
3560 if (!lock_user_struct(send ? VERIFY_READ : VERIFY_WRITE,
3561 msgp,
3562 target_msg,
3563 send ? 1 : 0)) {
3564 return -TARGET_EFAULT;
3566 ret = do_sendrecvmsg_locked(fd, msgp, flags, send);
3567 unlock_user_struct(msgp, target_msg, send ? 0 : 1);
3568 return ret;
3571 /* We don't rely on the C library to have sendmmsg/recvmmsg support,
3572 * so it might not have this *mmsg-specific flag either.
3574 #ifndef MSG_WAITFORONE
3575 #define MSG_WAITFORONE 0x10000
3576 #endif
3578 static abi_long do_sendrecvmmsg(int fd, abi_ulong target_msgvec,
3579 unsigned int vlen, unsigned int flags,
3580 int send)
3582 struct target_mmsghdr *mmsgp;
3583 abi_long ret = 0;
3584 int i;
3586 if (vlen > UIO_MAXIOV) {
3587 vlen = UIO_MAXIOV;
3590 mmsgp = lock_user(VERIFY_WRITE, target_msgvec, sizeof(*mmsgp) * vlen, 1);
3591 if (!mmsgp) {
3592 return -TARGET_EFAULT;
3595 for (i = 0; i < vlen; i++) {
3596 ret = do_sendrecvmsg_locked(fd, &mmsgp[i].msg_hdr, flags, send);
3597 if (is_error(ret)) {
3598 break;
3600 mmsgp[i].msg_len = tswap32(ret);
3601 /* MSG_WAITFORONE turns on MSG_DONTWAIT after one packet */
3602 if (flags & MSG_WAITFORONE) {
3603 flags |= MSG_DONTWAIT;
3607 unlock_user(mmsgp, target_msgvec, sizeof(*mmsgp) * i);
3609 /* Return number of datagrams sent if we sent any at all;
3610 * otherwise return the error.
3612 if (i) {
3613 return i;
3615 return ret;
3618 /* do_accept4() Must return target values and target errnos. */
3619 static abi_long do_accept4(int fd, abi_ulong target_addr,
3620 abi_ulong target_addrlen_addr, int flags)
3622 socklen_t addrlen;
3623 void *addr;
3624 abi_long ret;
3625 int host_flags;
3627 host_flags = target_to_host_bitmask(flags, fcntl_flags_tbl);
3629 if (target_addr == 0) {
3630 return get_errno(safe_accept4(fd, NULL, NULL, host_flags));
3633 /* linux returns EINVAL if addrlen pointer is invalid */
3634 if (get_user_u32(addrlen, target_addrlen_addr))
3635 return -TARGET_EINVAL;
3637 if ((int)addrlen < 0) {
3638 return -TARGET_EINVAL;
3641 if (!access_ok(VERIFY_WRITE, target_addr, addrlen))
3642 return -TARGET_EINVAL;
3644 addr = alloca(addrlen);
3646 ret = get_errno(safe_accept4(fd, addr, &addrlen, host_flags));
3647 if (!is_error(ret)) {
3648 host_to_target_sockaddr(target_addr, addr, addrlen);
3649 if (put_user_u32(addrlen, target_addrlen_addr))
3650 ret = -TARGET_EFAULT;
3652 return ret;
3655 /* do_getpeername() Must return target values and target errnos. */
3656 static abi_long do_getpeername(int fd, abi_ulong target_addr,
3657 abi_ulong target_addrlen_addr)
3659 socklen_t addrlen;
3660 void *addr;
3661 abi_long ret;
3663 if (get_user_u32(addrlen, target_addrlen_addr))
3664 return -TARGET_EFAULT;
3666 if ((int)addrlen < 0) {
3667 return -TARGET_EINVAL;
3670 if (!access_ok(VERIFY_WRITE, target_addr, addrlen))
3671 return -TARGET_EFAULT;
3673 addr = alloca(addrlen);
3675 ret = get_errno(getpeername(fd, addr, &addrlen));
3676 if (!is_error(ret)) {
3677 host_to_target_sockaddr(target_addr, addr, addrlen);
3678 if (put_user_u32(addrlen, target_addrlen_addr))
3679 ret = -TARGET_EFAULT;
3681 return ret;
3684 /* do_getsockname() Must return target values and target errnos. */
3685 static abi_long do_getsockname(int fd, abi_ulong target_addr,
3686 abi_ulong target_addrlen_addr)
3688 socklen_t addrlen;
3689 void *addr;
3690 abi_long ret;
3692 if (get_user_u32(addrlen, target_addrlen_addr))
3693 return -TARGET_EFAULT;
3695 if ((int)addrlen < 0) {
3696 return -TARGET_EINVAL;
3699 if (!access_ok(VERIFY_WRITE, target_addr, addrlen))
3700 return -TARGET_EFAULT;
3702 addr = alloca(addrlen);
3704 ret = get_errno(getsockname(fd, addr, &addrlen));
3705 if (!is_error(ret)) {
3706 host_to_target_sockaddr(target_addr, addr, addrlen);
3707 if (put_user_u32(addrlen, target_addrlen_addr))
3708 ret = -TARGET_EFAULT;
3710 return ret;
3713 /* do_socketpair() Must return target values and target errnos. */
3714 static abi_long do_socketpair(int domain, int type, int protocol,
3715 abi_ulong target_tab_addr)
3717 int tab[2];
3718 abi_long ret;
3720 target_to_host_sock_type(&type);
3722 ret = get_errno(socketpair(domain, type, protocol, tab));
3723 if (!is_error(ret)) {
3724 if (put_user_s32(tab[0], target_tab_addr)
3725 || put_user_s32(tab[1], target_tab_addr + sizeof(tab[0])))
3726 ret = -TARGET_EFAULT;
3728 return ret;
3731 /* do_sendto() Must return target values and target errnos. */
3732 static abi_long do_sendto(int fd, abi_ulong msg, size_t len, int flags,
3733 abi_ulong target_addr, socklen_t addrlen)
3735 void *addr;
3736 void *host_msg;
3737 void *copy_msg = NULL;
3738 abi_long ret;
3740 if ((int)addrlen < 0) {
3741 return -TARGET_EINVAL;
3744 host_msg = lock_user(VERIFY_READ, msg, len, 1);
3745 if (!host_msg)
3746 return -TARGET_EFAULT;
3747 if (fd_trans_target_to_host_data(fd)) {
3748 copy_msg = host_msg;
3749 host_msg = g_malloc(len);
3750 memcpy(host_msg, copy_msg, len);
3751 ret = fd_trans_target_to_host_data(fd)(host_msg, len);
3752 if (ret < 0) {
3753 goto fail;
3756 if (target_addr) {
3757 addr = alloca(addrlen+1);
3758 ret = target_to_host_sockaddr(fd, addr, target_addr, addrlen);
3759 if (ret) {
3760 goto fail;
3762 ret = get_errno(safe_sendto(fd, host_msg, len, flags, addr, addrlen));
3763 } else {
3764 ret = get_errno(safe_sendto(fd, host_msg, len, flags, NULL, 0));
3766 fail:
3767 if (copy_msg) {
3768 g_free(host_msg);
3769 host_msg = copy_msg;
3771 unlock_user(host_msg, msg, 0);
3772 return ret;
3775 /* do_recvfrom() Must return target values and target errnos. */
3776 static abi_long do_recvfrom(int fd, abi_ulong msg, size_t len, int flags,
3777 abi_ulong target_addr,
3778 abi_ulong target_addrlen)
3780 socklen_t addrlen;
3781 void *addr;
3782 void *host_msg;
3783 abi_long ret;
3785 host_msg = lock_user(VERIFY_WRITE, msg, len, 0);
3786 if (!host_msg)
3787 return -TARGET_EFAULT;
3788 if (target_addr) {
3789 if (get_user_u32(addrlen, target_addrlen)) {
3790 ret = -TARGET_EFAULT;
3791 goto fail;
3793 if ((int)addrlen < 0) {
3794 ret = -TARGET_EINVAL;
3795 goto fail;
3797 addr = alloca(addrlen);
3798 ret = get_errno(safe_recvfrom(fd, host_msg, len, flags,
3799 addr, &addrlen));
3800 } else {
3801 addr = NULL; /* To keep compiler quiet. */
3802 ret = get_errno(safe_recvfrom(fd, host_msg, len, flags, NULL, 0));
3804 if (!is_error(ret)) {
3805 if (fd_trans_host_to_target_data(fd)) {
3806 ret = fd_trans_host_to_target_data(fd)(host_msg, ret);
3808 if (target_addr) {
3809 host_to_target_sockaddr(target_addr, addr, addrlen);
3810 if (put_user_u32(addrlen, target_addrlen)) {
3811 ret = -TARGET_EFAULT;
3812 goto fail;
3815 unlock_user(host_msg, msg, len);
3816 } else {
3817 fail:
3818 unlock_user(host_msg, msg, 0);
3820 return ret;
3823 #ifdef TARGET_NR_socketcall
3824 /* do_socketcall() Must return target values and target errnos. */
3825 static abi_long do_socketcall(int num, abi_ulong vptr)
3827 static const unsigned ac[] = { /* number of arguments per call */
3828 [SOCKOP_socket] = 3, /* domain, type, protocol */
3829 [SOCKOP_bind] = 3, /* sockfd, addr, addrlen */
3830 [SOCKOP_connect] = 3, /* sockfd, addr, addrlen */
3831 [SOCKOP_listen] = 2, /* sockfd, backlog */
3832 [SOCKOP_accept] = 3, /* sockfd, addr, addrlen */
3833 [SOCKOP_accept4] = 4, /* sockfd, addr, addrlen, flags */
3834 [SOCKOP_getsockname] = 3, /* sockfd, addr, addrlen */
3835 [SOCKOP_getpeername] = 3, /* sockfd, addr, addrlen */
3836 [SOCKOP_socketpair] = 4, /* domain, type, protocol, tab */
3837 [SOCKOP_send] = 4, /* sockfd, msg, len, flags */
3838 [SOCKOP_recv] = 4, /* sockfd, msg, len, flags */
3839 [SOCKOP_sendto] = 6, /* sockfd, msg, len, flags, addr, addrlen */
3840 [SOCKOP_recvfrom] = 6, /* sockfd, msg, len, flags, addr, addrlen */
3841 [SOCKOP_shutdown] = 2, /* sockfd, how */
3842 [SOCKOP_sendmsg] = 3, /* sockfd, msg, flags */
3843 [SOCKOP_recvmsg] = 3, /* sockfd, msg, flags */
3844 [SOCKOP_sendmmsg] = 4, /* sockfd, msgvec, vlen, flags */
3845 [SOCKOP_recvmmsg] = 4, /* sockfd, msgvec, vlen, flags */
3846 [SOCKOP_setsockopt] = 5, /* sockfd, level, optname, optval, optlen */
3847 [SOCKOP_getsockopt] = 5, /* sockfd, level, optname, optval, optlen */
3849 abi_long a[6]; /* max 6 args */
3851 /* first, collect the arguments in a[] according to ac[] */
3852 if (num >= 0 && num < ARRAY_SIZE(ac)) {
3853 unsigned i;
3854 assert(ARRAY_SIZE(a) >= ac[num]); /* ensure we have space for args */
3855 for (i = 0; i < ac[num]; ++i) {
3856 if (get_user_ual(a[i], vptr + i * sizeof(abi_long)) != 0) {
3857 return -TARGET_EFAULT;
3862 /* now when we have the args, actually handle the call */
3863 switch (num) {
3864 case SOCKOP_socket: /* domain, type, protocol */
3865 return do_socket(a[0], a[1], a[2]);
3866 case SOCKOP_bind: /* sockfd, addr, addrlen */
3867 return do_bind(a[0], a[1], a[2]);
3868 case SOCKOP_connect: /* sockfd, addr, addrlen */
3869 return do_connect(a[0], a[1], a[2]);
3870 case SOCKOP_listen: /* sockfd, backlog */
3871 return get_errno(listen(a[0], a[1]));
3872 case SOCKOP_accept: /* sockfd, addr, addrlen */
3873 return do_accept4(a[0], a[1], a[2], 0);
3874 case SOCKOP_accept4: /* sockfd, addr, addrlen, flags */
3875 return do_accept4(a[0], a[1], a[2], a[3]);
3876 case SOCKOP_getsockname: /* sockfd, addr, addrlen */
3877 return do_getsockname(a[0], a[1], a[2]);
3878 case SOCKOP_getpeername: /* sockfd, addr, addrlen */
3879 return do_getpeername(a[0], a[1], a[2]);
3880 case SOCKOP_socketpair: /* domain, type, protocol, tab */
3881 return do_socketpair(a[0], a[1], a[2], a[3]);
3882 case SOCKOP_send: /* sockfd, msg, len, flags */
3883 return do_sendto(a[0], a[1], a[2], a[3], 0, 0);
3884 case SOCKOP_recv: /* sockfd, msg, len, flags */
3885 return do_recvfrom(a[0], a[1], a[2], a[3], 0, 0);
3886 case SOCKOP_sendto: /* sockfd, msg, len, flags, addr, addrlen */
3887 return do_sendto(a[0], a[1], a[2], a[3], a[4], a[5]);
3888 case SOCKOP_recvfrom: /* sockfd, msg, len, flags, addr, addrlen */
3889 return do_recvfrom(a[0], a[1], a[2], a[3], a[4], a[5]);
3890 case SOCKOP_shutdown: /* sockfd, how */
3891 return get_errno(shutdown(a[0], a[1]));
3892 case SOCKOP_sendmsg: /* sockfd, msg, flags */
3893 return do_sendrecvmsg(a[0], a[1], a[2], 1);
3894 case SOCKOP_recvmsg: /* sockfd, msg, flags */
3895 return do_sendrecvmsg(a[0], a[1], a[2], 0);
3896 case SOCKOP_sendmmsg: /* sockfd, msgvec, vlen, flags */
3897 return do_sendrecvmmsg(a[0], a[1], a[2], a[3], 1);
3898 case SOCKOP_recvmmsg: /* sockfd, msgvec, vlen, flags */
3899 return do_sendrecvmmsg(a[0], a[1], a[2], a[3], 0);
3900 case SOCKOP_setsockopt: /* sockfd, level, optname, optval, optlen */
3901 return do_setsockopt(a[0], a[1], a[2], a[3], a[4]);
3902 case SOCKOP_getsockopt: /* sockfd, level, optname, optval, optlen */
3903 return do_getsockopt(a[0], a[1], a[2], a[3], a[4]);
3904 default:
3905 gemu_log("Unsupported socketcall: %d\n", num);
3906 return -TARGET_ENOSYS;
3909 #endif
3911 #define N_SHM_REGIONS 32
3913 static struct shm_region {
3914 abi_ulong start;
3915 abi_ulong size;
3916 bool in_use;
3917 } shm_regions[N_SHM_REGIONS];
3919 #ifndef TARGET_SEMID64_DS
3920 /* asm-generic version of this struct */
3921 struct target_semid64_ds
3923 struct target_ipc_perm sem_perm;
3924 abi_ulong sem_otime;
3925 #if TARGET_ABI_BITS == 32
3926 abi_ulong __unused1;
3927 #endif
3928 abi_ulong sem_ctime;
3929 #if TARGET_ABI_BITS == 32
3930 abi_ulong __unused2;
3931 #endif
3932 abi_ulong sem_nsems;
3933 abi_ulong __unused3;
3934 abi_ulong __unused4;
3936 #endif
3938 static inline abi_long target_to_host_ipc_perm(struct ipc_perm *host_ip,
3939 abi_ulong target_addr)
3941 struct target_ipc_perm *target_ip;
3942 struct target_semid64_ds *target_sd;
3944 if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
3945 return -TARGET_EFAULT;
3946 target_ip = &(target_sd->sem_perm);
3947 host_ip->__key = tswap32(target_ip->__key);
3948 host_ip->uid = tswap32(target_ip->uid);
3949 host_ip->gid = tswap32(target_ip->gid);
3950 host_ip->cuid = tswap32(target_ip->cuid);
3951 host_ip->cgid = tswap32(target_ip->cgid);
3952 #if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_PPC)
3953 host_ip->mode = tswap32(target_ip->mode);
3954 #else
3955 host_ip->mode = tswap16(target_ip->mode);
3956 #endif
3957 #if defined(TARGET_PPC)
3958 host_ip->__seq = tswap32(target_ip->__seq);
3959 #else
3960 host_ip->__seq = tswap16(target_ip->__seq);
3961 #endif
3962 unlock_user_struct(target_sd, target_addr, 0);
3963 return 0;
3966 static inline abi_long host_to_target_ipc_perm(abi_ulong target_addr,
3967 struct ipc_perm *host_ip)
3969 struct target_ipc_perm *target_ip;
3970 struct target_semid64_ds *target_sd;
3972 if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
3973 return -TARGET_EFAULT;
3974 target_ip = &(target_sd->sem_perm);
3975 target_ip->__key = tswap32(host_ip->__key);
3976 target_ip->uid = tswap32(host_ip->uid);
3977 target_ip->gid = tswap32(host_ip->gid);
3978 target_ip->cuid = tswap32(host_ip->cuid);
3979 target_ip->cgid = tswap32(host_ip->cgid);
3980 #if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_PPC)
3981 target_ip->mode = tswap32(host_ip->mode);
3982 #else
3983 target_ip->mode = tswap16(host_ip->mode);
3984 #endif
3985 #if defined(TARGET_PPC)
3986 target_ip->__seq = tswap32(host_ip->__seq);
3987 #else
3988 target_ip->__seq = tswap16(host_ip->__seq);
3989 #endif
3990 unlock_user_struct(target_sd, target_addr, 1);
3991 return 0;
3994 static inline abi_long target_to_host_semid_ds(struct semid_ds *host_sd,
3995 abi_ulong target_addr)
3997 struct target_semid64_ds *target_sd;
3999 if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
4000 return -TARGET_EFAULT;
4001 if (target_to_host_ipc_perm(&(host_sd->sem_perm),target_addr))
4002 return -TARGET_EFAULT;
4003 host_sd->sem_nsems = tswapal(target_sd->sem_nsems);
4004 host_sd->sem_otime = tswapal(target_sd->sem_otime);
4005 host_sd->sem_ctime = tswapal(target_sd->sem_ctime);
4006 unlock_user_struct(target_sd, target_addr, 0);
4007 return 0;
4010 static inline abi_long host_to_target_semid_ds(abi_ulong target_addr,
4011 struct semid_ds *host_sd)
4013 struct target_semid64_ds *target_sd;
4015 if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
4016 return -TARGET_EFAULT;
4017 if (host_to_target_ipc_perm(target_addr,&(host_sd->sem_perm)))
4018 return -TARGET_EFAULT;
4019 target_sd->sem_nsems = tswapal(host_sd->sem_nsems);
4020 target_sd->sem_otime = tswapal(host_sd->sem_otime);
4021 target_sd->sem_ctime = tswapal(host_sd->sem_ctime);
4022 unlock_user_struct(target_sd, target_addr, 1);
4023 return 0;
4026 struct target_seminfo {
4027 int semmap;
4028 int semmni;
4029 int semmns;
4030 int semmnu;
4031 int semmsl;
4032 int semopm;
4033 int semume;
4034 int semusz;
4035 int semvmx;
4036 int semaem;
4039 static inline abi_long host_to_target_seminfo(abi_ulong target_addr,
4040 struct seminfo *host_seminfo)
4042 struct target_seminfo *target_seminfo;
4043 if (!lock_user_struct(VERIFY_WRITE, target_seminfo, target_addr, 0))
4044 return -TARGET_EFAULT;
4045 __put_user(host_seminfo->semmap, &target_seminfo->semmap);
4046 __put_user(host_seminfo->semmni, &target_seminfo->semmni);
4047 __put_user(host_seminfo->semmns, &target_seminfo->semmns);
4048 __put_user(host_seminfo->semmnu, &target_seminfo->semmnu);
4049 __put_user(host_seminfo->semmsl, &target_seminfo->semmsl);
4050 __put_user(host_seminfo->semopm, &target_seminfo->semopm);
4051 __put_user(host_seminfo->semume, &target_seminfo->semume);
4052 __put_user(host_seminfo->semusz, &target_seminfo->semusz);
4053 __put_user(host_seminfo->semvmx, &target_seminfo->semvmx);
4054 __put_user(host_seminfo->semaem, &target_seminfo->semaem);
4055 unlock_user_struct(target_seminfo, target_addr, 1);
4056 return 0;
4059 union semun {
4060 int val;
4061 struct semid_ds *buf;
4062 unsigned short *array;
4063 struct seminfo *__buf;
4066 union target_semun {
4067 int val;
4068 abi_ulong buf;
4069 abi_ulong array;
4070 abi_ulong __buf;
4073 static inline abi_long target_to_host_semarray(int semid, unsigned short **host_array,
4074 abi_ulong target_addr)
4076 int nsems;
4077 unsigned short *array;
4078 union semun semun;
4079 struct semid_ds semid_ds;
4080 int i, ret;
4082 semun.buf = &semid_ds;
4084 ret = semctl(semid, 0, IPC_STAT, semun);
4085 if (ret == -1)
4086 return get_errno(ret);
4088 nsems = semid_ds.sem_nsems;
4090 *host_array = g_try_new(unsigned short, nsems);
4091 if (!*host_array) {
4092 return -TARGET_ENOMEM;
4094 array = lock_user(VERIFY_READ, target_addr,
4095 nsems*sizeof(unsigned short), 1);
4096 if (!array) {
4097 g_free(*host_array);
4098 return -TARGET_EFAULT;
4101 for(i=0; i<nsems; i++) {
4102 __get_user((*host_array)[i], &array[i]);
4104 unlock_user(array, target_addr, 0);
4106 return 0;
4109 static inline abi_long host_to_target_semarray(int semid, abi_ulong target_addr,
4110 unsigned short **host_array)
4112 int nsems;
4113 unsigned short *array;
4114 union semun semun;
4115 struct semid_ds semid_ds;
4116 int i, ret;
4118 semun.buf = &semid_ds;
4120 ret = semctl(semid, 0, IPC_STAT, semun);
4121 if (ret == -1)
4122 return get_errno(ret);
4124 nsems = semid_ds.sem_nsems;
4126 array = lock_user(VERIFY_WRITE, target_addr,
4127 nsems*sizeof(unsigned short), 0);
4128 if (!array)
4129 return -TARGET_EFAULT;
4131 for(i=0; i<nsems; i++) {
4132 __put_user((*host_array)[i], &array[i]);
4134 g_free(*host_array);
4135 unlock_user(array, target_addr, 1);
4137 return 0;
4140 static inline abi_long do_semctl(int semid, int semnum, int cmd,
4141 abi_ulong target_arg)
4143 union target_semun target_su = { .buf = target_arg };
4144 union semun arg;
4145 struct semid_ds dsarg;
4146 unsigned short *array = NULL;
4147 struct seminfo seminfo;
4148 abi_long ret = -TARGET_EINVAL;
4149 abi_long err;
4150 cmd &= 0xff;
4152 switch( cmd ) {
4153 case GETVAL:
4154 case SETVAL:
4155 /* In 64 bit cross-endian situations, we will erroneously pick up
4156 * the wrong half of the union for the "val" element. To rectify
4157 * this, the entire 8-byte structure is byteswapped, followed by
4158 * a swap of the 4 byte val field. In other cases, the data is
4159 * already in proper host byte order. */
4160 if (sizeof(target_su.val) != (sizeof(target_su.buf))) {
4161 target_su.buf = tswapal(target_su.buf);
4162 arg.val = tswap32(target_su.val);
4163 } else {
4164 arg.val = target_su.val;
4166 ret = get_errno(semctl(semid, semnum, cmd, arg));
4167 break;
4168 case GETALL:
4169 case SETALL:
4170 err = target_to_host_semarray(semid, &array, target_su.array);
4171 if (err)
4172 return err;
4173 arg.array = array;
4174 ret = get_errno(semctl(semid, semnum, cmd, arg));
4175 err = host_to_target_semarray(semid, target_su.array, &array);
4176 if (err)
4177 return err;
4178 break;
4179 case IPC_STAT:
4180 case IPC_SET:
4181 case SEM_STAT:
4182 err = target_to_host_semid_ds(&dsarg, target_su.buf);
4183 if (err)
4184 return err;
4185 arg.buf = &dsarg;
4186 ret = get_errno(semctl(semid, semnum, cmd, arg));
4187 err = host_to_target_semid_ds(target_su.buf, &dsarg);
4188 if (err)
4189 return err;
4190 break;
4191 case IPC_INFO:
4192 case SEM_INFO:
4193 arg.__buf = &seminfo;
4194 ret = get_errno(semctl(semid, semnum, cmd, arg));
4195 err = host_to_target_seminfo(target_su.__buf, &seminfo);
4196 if (err)
4197 return err;
4198 break;
4199 case IPC_RMID:
4200 case GETPID:
4201 case GETNCNT:
4202 case GETZCNT:
4203 ret = get_errno(semctl(semid, semnum, cmd, NULL));
4204 break;
4207 return ret;
4210 struct target_sembuf {
4211 unsigned short sem_num;
4212 short sem_op;
4213 short sem_flg;
4216 static inline abi_long target_to_host_sembuf(struct sembuf *host_sembuf,
4217 abi_ulong target_addr,
4218 unsigned nsops)
4220 struct target_sembuf *target_sembuf;
4221 int i;
4223 target_sembuf = lock_user(VERIFY_READ, target_addr,
4224 nsops*sizeof(struct target_sembuf), 1);
4225 if (!target_sembuf)
4226 return -TARGET_EFAULT;
4228 for(i=0; i<nsops; i++) {
4229 __get_user(host_sembuf[i].sem_num, &target_sembuf[i].sem_num);
4230 __get_user(host_sembuf[i].sem_op, &target_sembuf[i].sem_op);
4231 __get_user(host_sembuf[i].sem_flg, &target_sembuf[i].sem_flg);
4234 unlock_user(target_sembuf, target_addr, 0);
4236 return 0;
4239 static inline abi_long do_semop(int semid, abi_long ptr, unsigned nsops)
4241 struct sembuf sops[nsops];
4243 if (target_to_host_sembuf(sops, ptr, nsops))
4244 return -TARGET_EFAULT;
4246 return get_errno(safe_semtimedop(semid, sops, nsops, NULL));
4249 struct target_msqid_ds
4251 struct target_ipc_perm msg_perm;
4252 abi_ulong msg_stime;
4253 #if TARGET_ABI_BITS == 32
4254 abi_ulong __unused1;
4255 #endif
4256 abi_ulong msg_rtime;
4257 #if TARGET_ABI_BITS == 32
4258 abi_ulong __unused2;
4259 #endif
4260 abi_ulong msg_ctime;
4261 #if TARGET_ABI_BITS == 32
4262 abi_ulong __unused3;
4263 #endif
4264 abi_ulong __msg_cbytes;
4265 abi_ulong msg_qnum;
4266 abi_ulong msg_qbytes;
4267 abi_ulong msg_lspid;
4268 abi_ulong msg_lrpid;
4269 abi_ulong __unused4;
4270 abi_ulong __unused5;
4273 static inline abi_long target_to_host_msqid_ds(struct msqid_ds *host_md,
4274 abi_ulong target_addr)
4276 struct target_msqid_ds *target_md;
4278 if (!lock_user_struct(VERIFY_READ, target_md, target_addr, 1))
4279 return -TARGET_EFAULT;
4280 if (target_to_host_ipc_perm(&(host_md->msg_perm),target_addr))
4281 return -TARGET_EFAULT;
4282 host_md->msg_stime = tswapal(target_md->msg_stime);
4283 host_md->msg_rtime = tswapal(target_md->msg_rtime);
4284 host_md->msg_ctime = tswapal(target_md->msg_ctime);
4285 host_md->__msg_cbytes = tswapal(target_md->__msg_cbytes);
4286 host_md->msg_qnum = tswapal(target_md->msg_qnum);
4287 host_md->msg_qbytes = tswapal(target_md->msg_qbytes);
4288 host_md->msg_lspid = tswapal(target_md->msg_lspid);
4289 host_md->msg_lrpid = tswapal(target_md->msg_lrpid);
4290 unlock_user_struct(target_md, target_addr, 0);
4291 return 0;
4294 static inline abi_long host_to_target_msqid_ds(abi_ulong target_addr,
4295 struct msqid_ds *host_md)
4297 struct target_msqid_ds *target_md;
4299 if (!lock_user_struct(VERIFY_WRITE, target_md, target_addr, 0))
4300 return -TARGET_EFAULT;
4301 if (host_to_target_ipc_perm(target_addr,&(host_md->msg_perm)))
4302 return -TARGET_EFAULT;
4303 target_md->msg_stime = tswapal(host_md->msg_stime);
4304 target_md->msg_rtime = tswapal(host_md->msg_rtime);
4305 target_md->msg_ctime = tswapal(host_md->msg_ctime);
4306 target_md->__msg_cbytes = tswapal(host_md->__msg_cbytes);
4307 target_md->msg_qnum = tswapal(host_md->msg_qnum);
4308 target_md->msg_qbytes = tswapal(host_md->msg_qbytes);
4309 target_md->msg_lspid = tswapal(host_md->msg_lspid);
4310 target_md->msg_lrpid = tswapal(host_md->msg_lrpid);
4311 unlock_user_struct(target_md, target_addr, 1);
4312 return 0;
4315 struct target_msginfo {
4316 int msgpool;
4317 int msgmap;
4318 int msgmax;
4319 int msgmnb;
4320 int msgmni;
4321 int msgssz;
4322 int msgtql;
4323 unsigned short int msgseg;
4326 static inline abi_long host_to_target_msginfo(abi_ulong target_addr,
4327 struct msginfo *host_msginfo)
4329 struct target_msginfo *target_msginfo;
4330 if (!lock_user_struct(VERIFY_WRITE, target_msginfo, target_addr, 0))
4331 return -TARGET_EFAULT;
4332 __put_user(host_msginfo->msgpool, &target_msginfo->msgpool);
4333 __put_user(host_msginfo->msgmap, &target_msginfo->msgmap);
4334 __put_user(host_msginfo->msgmax, &target_msginfo->msgmax);
4335 __put_user(host_msginfo->msgmnb, &target_msginfo->msgmnb);
4336 __put_user(host_msginfo->msgmni, &target_msginfo->msgmni);
4337 __put_user(host_msginfo->msgssz, &target_msginfo->msgssz);
4338 __put_user(host_msginfo->msgtql, &target_msginfo->msgtql);
4339 __put_user(host_msginfo->msgseg, &target_msginfo->msgseg);
4340 unlock_user_struct(target_msginfo, target_addr, 1);
4341 return 0;
4344 static inline abi_long do_msgctl(int msgid, int cmd, abi_long ptr)
4346 struct msqid_ds dsarg;
4347 struct msginfo msginfo;
4348 abi_long ret = -TARGET_EINVAL;
4350 cmd &= 0xff;
4352 switch (cmd) {
4353 case IPC_STAT:
4354 case IPC_SET:
4355 case MSG_STAT:
4356 if (target_to_host_msqid_ds(&dsarg,ptr))
4357 return -TARGET_EFAULT;
4358 ret = get_errno(msgctl(msgid, cmd, &dsarg));
4359 if (host_to_target_msqid_ds(ptr,&dsarg))
4360 return -TARGET_EFAULT;
4361 break;
4362 case IPC_RMID:
4363 ret = get_errno(msgctl(msgid, cmd, NULL));
4364 break;
4365 case IPC_INFO:
4366 case MSG_INFO:
4367 ret = get_errno(msgctl(msgid, cmd, (struct msqid_ds *)&msginfo));
4368 if (host_to_target_msginfo(ptr, &msginfo))
4369 return -TARGET_EFAULT;
4370 break;
4373 return ret;
4376 struct target_msgbuf {
4377 abi_long mtype;
4378 char mtext[1];
4381 static inline abi_long do_msgsnd(int msqid, abi_long msgp,
4382 ssize_t msgsz, int msgflg)
4384 struct target_msgbuf *target_mb;
4385 struct msgbuf *host_mb;
4386 abi_long ret = 0;
4388 if (msgsz < 0) {
4389 return -TARGET_EINVAL;
4392 if (!lock_user_struct(VERIFY_READ, target_mb, msgp, 0))
4393 return -TARGET_EFAULT;
4394 host_mb = g_try_malloc(msgsz + sizeof(long));
4395 if (!host_mb) {
4396 unlock_user_struct(target_mb, msgp, 0);
4397 return -TARGET_ENOMEM;
4399 host_mb->mtype = (abi_long) tswapal(target_mb->mtype);
4400 memcpy(host_mb->mtext, target_mb->mtext, msgsz);
4401 ret = get_errno(safe_msgsnd(msqid, host_mb, msgsz, msgflg));
4402 g_free(host_mb);
4403 unlock_user_struct(target_mb, msgp, 0);
4405 return ret;
4408 static inline abi_long do_msgrcv(int msqid, abi_long msgp,
4409 ssize_t msgsz, abi_long msgtyp,
4410 int msgflg)
4412 struct target_msgbuf *target_mb;
4413 char *target_mtext;
4414 struct msgbuf *host_mb;
4415 abi_long ret = 0;
4417 if (msgsz < 0) {
4418 return -TARGET_EINVAL;
4421 if (!lock_user_struct(VERIFY_WRITE, target_mb, msgp, 0))
4422 return -TARGET_EFAULT;
4424 host_mb = g_try_malloc(msgsz + sizeof(long));
4425 if (!host_mb) {
4426 ret = -TARGET_ENOMEM;
4427 goto end;
4429 ret = get_errno(safe_msgrcv(msqid, host_mb, msgsz, msgtyp, msgflg));
4431 if (ret > 0) {
4432 abi_ulong target_mtext_addr = msgp + sizeof(abi_ulong);
4433 target_mtext = lock_user(VERIFY_WRITE, target_mtext_addr, ret, 0);
4434 if (!target_mtext) {
4435 ret = -TARGET_EFAULT;
4436 goto end;
4438 memcpy(target_mb->mtext, host_mb->mtext, ret);
4439 unlock_user(target_mtext, target_mtext_addr, ret);
4442 target_mb->mtype = tswapal(host_mb->mtype);
4444 end:
4445 if (target_mb)
4446 unlock_user_struct(target_mb, msgp, 1);
4447 g_free(host_mb);
4448 return ret;
4451 static inline abi_long target_to_host_shmid_ds(struct shmid_ds *host_sd,
4452 abi_ulong target_addr)
4454 struct target_shmid_ds *target_sd;
4456 if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
4457 return -TARGET_EFAULT;
4458 if (target_to_host_ipc_perm(&(host_sd->shm_perm), target_addr))
4459 return -TARGET_EFAULT;
4460 __get_user(host_sd->shm_segsz, &target_sd->shm_segsz);
4461 __get_user(host_sd->shm_atime, &target_sd->shm_atime);
4462 __get_user(host_sd->shm_dtime, &target_sd->shm_dtime);
4463 __get_user(host_sd->shm_ctime, &target_sd->shm_ctime);
4464 __get_user(host_sd->shm_cpid, &target_sd->shm_cpid);
4465 __get_user(host_sd->shm_lpid, &target_sd->shm_lpid);
4466 __get_user(host_sd->shm_nattch, &target_sd->shm_nattch);
4467 unlock_user_struct(target_sd, target_addr, 0);
4468 return 0;
4471 static inline abi_long host_to_target_shmid_ds(abi_ulong target_addr,
4472 struct shmid_ds *host_sd)
4474 struct target_shmid_ds *target_sd;
4476 if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
4477 return -TARGET_EFAULT;
4478 if (host_to_target_ipc_perm(target_addr, &(host_sd->shm_perm)))
4479 return -TARGET_EFAULT;
4480 __put_user(host_sd->shm_segsz, &target_sd->shm_segsz);
4481 __put_user(host_sd->shm_atime, &target_sd->shm_atime);
4482 __put_user(host_sd->shm_dtime, &target_sd->shm_dtime);
4483 __put_user(host_sd->shm_ctime, &target_sd->shm_ctime);
4484 __put_user(host_sd->shm_cpid, &target_sd->shm_cpid);
4485 __put_user(host_sd->shm_lpid, &target_sd->shm_lpid);
4486 __put_user(host_sd->shm_nattch, &target_sd->shm_nattch);
4487 unlock_user_struct(target_sd, target_addr, 1);
4488 return 0;
4491 struct target_shminfo {
4492 abi_ulong shmmax;
4493 abi_ulong shmmin;
4494 abi_ulong shmmni;
4495 abi_ulong shmseg;
4496 abi_ulong shmall;
4499 static inline abi_long host_to_target_shminfo(abi_ulong target_addr,
4500 struct shminfo *host_shminfo)
4502 struct target_shminfo *target_shminfo;
4503 if (!lock_user_struct(VERIFY_WRITE, target_shminfo, target_addr, 0))
4504 return -TARGET_EFAULT;
4505 __put_user(host_shminfo->shmmax, &target_shminfo->shmmax);
4506 __put_user(host_shminfo->shmmin, &target_shminfo->shmmin);
4507 __put_user(host_shminfo->shmmni, &target_shminfo->shmmni);
4508 __put_user(host_shminfo->shmseg, &target_shminfo->shmseg);
4509 __put_user(host_shminfo->shmall, &target_shminfo->shmall);
4510 unlock_user_struct(target_shminfo, target_addr, 1);
4511 return 0;
4514 struct target_shm_info {
4515 int used_ids;
4516 abi_ulong shm_tot;
4517 abi_ulong shm_rss;
4518 abi_ulong shm_swp;
4519 abi_ulong swap_attempts;
4520 abi_ulong swap_successes;
4523 static inline abi_long host_to_target_shm_info(abi_ulong target_addr,
4524 struct shm_info *host_shm_info)
4526 struct target_shm_info *target_shm_info;
4527 if (!lock_user_struct(VERIFY_WRITE, target_shm_info, target_addr, 0))
4528 return -TARGET_EFAULT;
4529 __put_user(host_shm_info->used_ids, &target_shm_info->used_ids);
4530 __put_user(host_shm_info->shm_tot, &target_shm_info->shm_tot);
4531 __put_user(host_shm_info->shm_rss, &target_shm_info->shm_rss);
4532 __put_user(host_shm_info->shm_swp, &target_shm_info->shm_swp);
4533 __put_user(host_shm_info->swap_attempts, &target_shm_info->swap_attempts);
4534 __put_user(host_shm_info->swap_successes, &target_shm_info->swap_successes);
4535 unlock_user_struct(target_shm_info, target_addr, 1);
4536 return 0;
4539 static inline abi_long do_shmctl(int shmid, int cmd, abi_long buf)
4541 struct shmid_ds dsarg;
4542 struct shminfo shminfo;
4543 struct shm_info shm_info;
4544 abi_long ret = -TARGET_EINVAL;
4546 cmd &= 0xff;
4548 switch(cmd) {
4549 case IPC_STAT:
4550 case IPC_SET:
4551 case SHM_STAT:
4552 if (target_to_host_shmid_ds(&dsarg, buf))
4553 return -TARGET_EFAULT;
4554 ret = get_errno(shmctl(shmid, cmd, &dsarg));
4555 if (host_to_target_shmid_ds(buf, &dsarg))
4556 return -TARGET_EFAULT;
4557 break;
4558 case IPC_INFO:
4559 ret = get_errno(shmctl(shmid, cmd, (struct shmid_ds *)&shminfo));
4560 if (host_to_target_shminfo(buf, &shminfo))
4561 return -TARGET_EFAULT;
4562 break;
4563 case SHM_INFO:
4564 ret = get_errno(shmctl(shmid, cmd, (struct shmid_ds *)&shm_info));
4565 if (host_to_target_shm_info(buf, &shm_info))
4566 return -TARGET_EFAULT;
4567 break;
4568 case IPC_RMID:
4569 case SHM_LOCK:
4570 case SHM_UNLOCK:
4571 ret = get_errno(shmctl(shmid, cmd, NULL));
4572 break;
4575 return ret;
4578 static inline abi_ulong do_shmat(int shmid, abi_ulong shmaddr, int shmflg)
4580 abi_long raddr;
4581 void *host_raddr;
4582 struct shmid_ds shm_info;
4583 int i,ret;
4585 /* find out the length of the shared memory segment */
4586 ret = get_errno(shmctl(shmid, IPC_STAT, &shm_info));
4587 if (is_error(ret)) {
4588 /* can't get length, bail out */
4589 return ret;
4592 mmap_lock();
4594 if (shmaddr)
4595 host_raddr = shmat(shmid, (void *)g2h(shmaddr), shmflg);
4596 else {
4597 abi_ulong mmap_start;
4599 mmap_start = mmap_find_vma(0, shm_info.shm_segsz);
4601 if (mmap_start == -1) {
4602 errno = ENOMEM;
4603 host_raddr = (void *)-1;
4604 } else
4605 host_raddr = shmat(shmid, g2h(mmap_start), shmflg | SHM_REMAP);
4608 if (host_raddr == (void *)-1) {
4609 mmap_unlock();
4610 return get_errno((long)host_raddr);
4612 raddr=h2g((unsigned long)host_raddr);
4614 page_set_flags(raddr, raddr + shm_info.shm_segsz,
4615 PAGE_VALID | PAGE_READ |
4616 ((shmflg & SHM_RDONLY)? 0 : PAGE_WRITE));
4618 for (i = 0; i < N_SHM_REGIONS; i++) {
4619 if (!shm_regions[i].in_use) {
4620 shm_regions[i].in_use = true;
4621 shm_regions[i].start = raddr;
4622 shm_regions[i].size = shm_info.shm_segsz;
4623 break;
4627 mmap_unlock();
4628 return raddr;
4632 static inline abi_long do_shmdt(abi_ulong shmaddr)
4634 int i;
4636 for (i = 0; i < N_SHM_REGIONS; ++i) {
4637 if (shm_regions[i].in_use && shm_regions[i].start == shmaddr) {
4638 shm_regions[i].in_use = false;
4639 page_set_flags(shmaddr, shmaddr + shm_regions[i].size, 0);
4640 break;
4644 return get_errno(shmdt(g2h(shmaddr)));
4647 #ifdef TARGET_NR_ipc
4648 /* ??? This only works with linear mappings. */
4649 /* do_ipc() must return target values and target errnos. */
4650 static abi_long do_ipc(unsigned int call, abi_long first,
4651 abi_long second, abi_long third,
4652 abi_long ptr, abi_long fifth)
4654 int version;
4655 abi_long ret = 0;
4657 version = call >> 16;
4658 call &= 0xffff;
4660 switch (call) {
4661 case IPCOP_semop:
4662 ret = do_semop(first, ptr, second);
4663 break;
4665 case IPCOP_semget:
4666 ret = get_errno(semget(first, second, third));
4667 break;
4669 case IPCOP_semctl: {
4670 /* The semun argument to semctl is passed by value, so dereference the
4671 * ptr argument. */
4672 abi_ulong atptr;
4673 get_user_ual(atptr, ptr);
4674 ret = do_semctl(first, second, third, atptr);
4675 break;
4678 case IPCOP_msgget:
4679 ret = get_errno(msgget(first, second));
4680 break;
4682 case IPCOP_msgsnd:
4683 ret = do_msgsnd(first, ptr, second, third);
4684 break;
4686 case IPCOP_msgctl:
4687 ret = do_msgctl(first, second, ptr);
4688 break;
4690 case IPCOP_msgrcv:
4691 switch (version) {
4692 case 0:
4694 struct target_ipc_kludge {
4695 abi_long msgp;
4696 abi_long msgtyp;
4697 } *tmp;
4699 if (!lock_user_struct(VERIFY_READ, tmp, ptr, 1)) {
4700 ret = -TARGET_EFAULT;
4701 break;
4704 ret = do_msgrcv(first, tswapal(tmp->msgp), second, tswapal(tmp->msgtyp), third);
4706 unlock_user_struct(tmp, ptr, 0);
4707 break;
4709 default:
4710 ret = do_msgrcv(first, ptr, second, fifth, third);
4712 break;
4714 case IPCOP_shmat:
4715 switch (version) {
4716 default:
4718 abi_ulong raddr;
4719 raddr = do_shmat(first, ptr, second);
4720 if (is_error(raddr))
4721 return get_errno(raddr);
4722 if (put_user_ual(raddr, third))
4723 return -TARGET_EFAULT;
4724 break;
4726 case 1:
4727 ret = -TARGET_EINVAL;
4728 break;
4730 break;
4731 case IPCOP_shmdt:
4732 ret = do_shmdt(ptr);
4733 break;
4735 case IPCOP_shmget:
4736 /* IPC_* flag values are the same on all linux platforms */
4737 ret = get_errno(shmget(first, second, third));
4738 break;
4740 /* IPC_* and SHM_* command values are the same on all linux platforms */
4741 case IPCOP_shmctl:
4742 ret = do_shmctl(first, second, ptr);
4743 break;
4744 default:
4745 gemu_log("Unsupported ipc call: %d (version %d)\n", call, version);
4746 ret = -TARGET_ENOSYS;
4747 break;
4749 return ret;
4751 #endif
4753 /* kernel structure types definitions */
4755 #define STRUCT(name, ...) STRUCT_ ## name,
4756 #define STRUCT_SPECIAL(name) STRUCT_ ## name,
4757 enum {
4758 #include "syscall_types.h"
4759 STRUCT_MAX
4761 #undef STRUCT
4762 #undef STRUCT_SPECIAL
4764 #define STRUCT(name, ...) static const argtype struct_ ## name ## _def[] = { __VA_ARGS__, TYPE_NULL };
4765 #define STRUCT_SPECIAL(name)
4766 #include "syscall_types.h"
4767 #undef STRUCT
4768 #undef STRUCT_SPECIAL
4770 typedef struct IOCTLEntry IOCTLEntry;
4772 typedef abi_long do_ioctl_fn(const IOCTLEntry *ie, uint8_t *buf_temp,
4773 int fd, int cmd, abi_long arg);
4775 struct IOCTLEntry {
4776 int target_cmd;
4777 unsigned int host_cmd;
4778 const char *name;
4779 int access;
4780 do_ioctl_fn *do_ioctl;
4781 const argtype arg_type[5];
4784 #define IOC_R 0x0001
4785 #define IOC_W 0x0002
4786 #define IOC_RW (IOC_R | IOC_W)
4788 #define MAX_STRUCT_SIZE 4096
4790 #ifdef CONFIG_FIEMAP
4791 /* So fiemap access checks don't overflow on 32 bit systems.
4792 * This is very slightly smaller than the limit imposed by
4793 * the underlying kernel.
4795 #define FIEMAP_MAX_EXTENTS ((UINT_MAX - sizeof(struct fiemap)) \
4796 / sizeof(struct fiemap_extent))
4798 static abi_long do_ioctl_fs_ioc_fiemap(const IOCTLEntry *ie, uint8_t *buf_temp,
4799 int fd, int cmd, abi_long arg)
4801 /* The parameter for this ioctl is a struct fiemap followed
4802 * by an array of struct fiemap_extent whose size is set
4803 * in fiemap->fm_extent_count. The array is filled in by the
4804 * ioctl.
4806 int target_size_in, target_size_out;
4807 struct fiemap *fm;
4808 const argtype *arg_type = ie->arg_type;
4809 const argtype extent_arg_type[] = { MK_STRUCT(STRUCT_fiemap_extent) };
4810 void *argptr, *p;
4811 abi_long ret;
4812 int i, extent_size = thunk_type_size(extent_arg_type, 0);
4813 uint32_t outbufsz;
4814 int free_fm = 0;
4816 assert(arg_type[0] == TYPE_PTR);
4817 assert(ie->access == IOC_RW);
4818 arg_type++;
4819 target_size_in = thunk_type_size(arg_type, 0);
4820 argptr = lock_user(VERIFY_READ, arg, target_size_in, 1);
4821 if (!argptr) {
4822 return -TARGET_EFAULT;
4824 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
4825 unlock_user(argptr, arg, 0);
4826 fm = (struct fiemap *)buf_temp;
4827 if (fm->fm_extent_count > FIEMAP_MAX_EXTENTS) {
4828 return -TARGET_EINVAL;
4831 outbufsz = sizeof (*fm) +
4832 (sizeof(struct fiemap_extent) * fm->fm_extent_count);
4834 if (outbufsz > MAX_STRUCT_SIZE) {
4835 /* We can't fit all the extents into the fixed size buffer.
4836 * Allocate one that is large enough and use it instead.
4838 fm = g_try_malloc(outbufsz);
4839 if (!fm) {
4840 return -TARGET_ENOMEM;
4842 memcpy(fm, buf_temp, sizeof(struct fiemap));
4843 free_fm = 1;
4845 ret = get_errno(safe_ioctl(fd, ie->host_cmd, fm));
4846 if (!is_error(ret)) {
4847 target_size_out = target_size_in;
4848 /* An extent_count of 0 means we were only counting the extents
4849 * so there are no structs to copy
4851 if (fm->fm_extent_count != 0) {
4852 target_size_out += fm->fm_mapped_extents * extent_size;
4854 argptr = lock_user(VERIFY_WRITE, arg, target_size_out, 0);
4855 if (!argptr) {
4856 ret = -TARGET_EFAULT;
4857 } else {
4858 /* Convert the struct fiemap */
4859 thunk_convert(argptr, fm, arg_type, THUNK_TARGET);
4860 if (fm->fm_extent_count != 0) {
4861 p = argptr + target_size_in;
4862 /* ...and then all the struct fiemap_extents */
4863 for (i = 0; i < fm->fm_mapped_extents; i++) {
4864 thunk_convert(p, &fm->fm_extents[i], extent_arg_type,
4865 THUNK_TARGET);
4866 p += extent_size;
4869 unlock_user(argptr, arg, target_size_out);
4872 if (free_fm) {
4873 g_free(fm);
4875 return ret;
4877 #endif
4879 static abi_long do_ioctl_ifconf(const IOCTLEntry *ie, uint8_t *buf_temp,
4880 int fd, int cmd, abi_long arg)
4882 const argtype *arg_type = ie->arg_type;
4883 int target_size;
4884 void *argptr;
4885 int ret;
4886 struct ifconf *host_ifconf;
4887 uint32_t outbufsz;
4888 const argtype ifreq_arg_type[] = { MK_STRUCT(STRUCT_sockaddr_ifreq) };
4889 int target_ifreq_size;
4890 int nb_ifreq;
4891 int free_buf = 0;
4892 int i;
4893 int target_ifc_len;
4894 abi_long target_ifc_buf;
4895 int host_ifc_len;
4896 char *host_ifc_buf;
4898 assert(arg_type[0] == TYPE_PTR);
4899 assert(ie->access == IOC_RW);
4901 arg_type++;
4902 target_size = thunk_type_size(arg_type, 0);
4904 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
4905 if (!argptr)
4906 return -TARGET_EFAULT;
4907 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
4908 unlock_user(argptr, arg, 0);
4910 host_ifconf = (struct ifconf *)(unsigned long)buf_temp;
4911 target_ifc_len = host_ifconf->ifc_len;
4912 target_ifc_buf = (abi_long)(unsigned long)host_ifconf->ifc_buf;
4914 target_ifreq_size = thunk_type_size(ifreq_arg_type, 0);
4915 nb_ifreq = target_ifc_len / target_ifreq_size;
4916 host_ifc_len = nb_ifreq * sizeof(struct ifreq);
4918 outbufsz = sizeof(*host_ifconf) + host_ifc_len;
4919 if (outbufsz > MAX_STRUCT_SIZE) {
4920 /* We can't fit all the extents into the fixed size buffer.
4921 * Allocate one that is large enough and use it instead.
4923 host_ifconf = malloc(outbufsz);
4924 if (!host_ifconf) {
4925 return -TARGET_ENOMEM;
4927 memcpy(host_ifconf, buf_temp, sizeof(*host_ifconf));
4928 free_buf = 1;
4930 host_ifc_buf = (char*)host_ifconf + sizeof(*host_ifconf);
4932 host_ifconf->ifc_len = host_ifc_len;
4933 host_ifconf->ifc_buf = host_ifc_buf;
4935 ret = get_errno(safe_ioctl(fd, ie->host_cmd, host_ifconf));
4936 if (!is_error(ret)) {
4937 /* convert host ifc_len to target ifc_len */
4939 nb_ifreq = host_ifconf->ifc_len / sizeof(struct ifreq);
4940 target_ifc_len = nb_ifreq * target_ifreq_size;
4941 host_ifconf->ifc_len = target_ifc_len;
4943 /* restore target ifc_buf */
4945 host_ifconf->ifc_buf = (char *)(unsigned long)target_ifc_buf;
4947 /* copy struct ifconf to target user */
4949 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
4950 if (!argptr)
4951 return -TARGET_EFAULT;
4952 thunk_convert(argptr, host_ifconf, arg_type, THUNK_TARGET);
4953 unlock_user(argptr, arg, target_size);
4955 /* copy ifreq[] to target user */
4957 argptr = lock_user(VERIFY_WRITE, target_ifc_buf, target_ifc_len, 0);
4958 for (i = 0; i < nb_ifreq ; i++) {
4959 thunk_convert(argptr + i * target_ifreq_size,
4960 host_ifc_buf + i * sizeof(struct ifreq),
4961 ifreq_arg_type, THUNK_TARGET);
4963 unlock_user(argptr, target_ifc_buf, target_ifc_len);
4966 if (free_buf) {
4967 free(host_ifconf);
4970 return ret;
4973 static abi_long do_ioctl_dm(const IOCTLEntry *ie, uint8_t *buf_temp, int fd,
4974 int cmd, abi_long arg)
4976 void *argptr;
4977 struct dm_ioctl *host_dm;
4978 abi_long guest_data;
4979 uint32_t guest_data_size;
4980 int target_size;
4981 const argtype *arg_type = ie->arg_type;
4982 abi_long ret;
4983 void *big_buf = NULL;
4984 char *host_data;
4986 arg_type++;
4987 target_size = thunk_type_size(arg_type, 0);
4988 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
4989 if (!argptr) {
4990 ret = -TARGET_EFAULT;
4991 goto out;
4993 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
4994 unlock_user(argptr, arg, 0);
4996 /* buf_temp is too small, so fetch things into a bigger buffer */
4997 big_buf = g_malloc0(((struct dm_ioctl*)buf_temp)->data_size * 2);
4998 memcpy(big_buf, buf_temp, target_size);
4999 buf_temp = big_buf;
5000 host_dm = big_buf;
5002 guest_data = arg + host_dm->data_start;
5003 if ((guest_data - arg) < 0) {
5004 ret = -EINVAL;
5005 goto out;
5007 guest_data_size = host_dm->data_size - host_dm->data_start;
5008 host_data = (char*)host_dm + host_dm->data_start;
5010 argptr = lock_user(VERIFY_READ, guest_data, guest_data_size, 1);
5011 switch (ie->host_cmd) {
5012 case DM_REMOVE_ALL:
5013 case DM_LIST_DEVICES:
5014 case DM_DEV_CREATE:
5015 case DM_DEV_REMOVE:
5016 case DM_DEV_SUSPEND:
5017 case DM_DEV_STATUS:
5018 case DM_DEV_WAIT:
5019 case DM_TABLE_STATUS:
5020 case DM_TABLE_CLEAR:
5021 case DM_TABLE_DEPS:
5022 case DM_LIST_VERSIONS:
5023 /* no input data */
5024 break;
5025 case DM_DEV_RENAME:
5026 case DM_DEV_SET_GEOMETRY:
5027 /* data contains only strings */
5028 memcpy(host_data, argptr, guest_data_size);
5029 break;
5030 case DM_TARGET_MSG:
5031 memcpy(host_data, argptr, guest_data_size);
5032 *(uint64_t*)host_data = tswap64(*(uint64_t*)argptr);
5033 break;
5034 case DM_TABLE_LOAD:
5036 void *gspec = argptr;
5037 void *cur_data = host_data;
5038 const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_spec) };
5039 int spec_size = thunk_type_size(arg_type, 0);
5040 int i;
5042 for (i = 0; i < host_dm->target_count; i++) {
5043 struct dm_target_spec *spec = cur_data;
5044 uint32_t next;
5045 int slen;
5047 thunk_convert(spec, gspec, arg_type, THUNK_HOST);
5048 slen = strlen((char*)gspec + spec_size) + 1;
5049 next = spec->next;
5050 spec->next = sizeof(*spec) + slen;
5051 strcpy((char*)&spec[1], gspec + spec_size);
5052 gspec += next;
5053 cur_data += spec->next;
5055 break;
5057 default:
5058 ret = -TARGET_EINVAL;
5059 unlock_user(argptr, guest_data, 0);
5060 goto out;
5062 unlock_user(argptr, guest_data, 0);
5064 ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5065 if (!is_error(ret)) {
5066 guest_data = arg + host_dm->data_start;
5067 guest_data_size = host_dm->data_size - host_dm->data_start;
5068 argptr = lock_user(VERIFY_WRITE, guest_data, guest_data_size, 0);
5069 switch (ie->host_cmd) {
5070 case DM_REMOVE_ALL:
5071 case DM_DEV_CREATE:
5072 case DM_DEV_REMOVE:
5073 case DM_DEV_RENAME:
5074 case DM_DEV_SUSPEND:
5075 case DM_DEV_STATUS:
5076 case DM_TABLE_LOAD:
5077 case DM_TABLE_CLEAR:
5078 case DM_TARGET_MSG:
5079 case DM_DEV_SET_GEOMETRY:
5080 /* no return data */
5081 break;
5082 case DM_LIST_DEVICES:
5084 struct dm_name_list *nl = (void*)host_dm + host_dm->data_start;
5085 uint32_t remaining_data = guest_data_size;
5086 void *cur_data = argptr;
5087 const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_name_list) };
5088 int nl_size = 12; /* can't use thunk_size due to alignment */
5090 while (1) {
5091 uint32_t next = nl->next;
5092 if (next) {
5093 nl->next = nl_size + (strlen(nl->name) + 1);
5095 if (remaining_data < nl->next) {
5096 host_dm->flags |= DM_BUFFER_FULL_FLAG;
5097 break;
5099 thunk_convert(cur_data, nl, arg_type, THUNK_TARGET);
5100 strcpy(cur_data + nl_size, nl->name);
5101 cur_data += nl->next;
5102 remaining_data -= nl->next;
5103 if (!next) {
5104 break;
5106 nl = (void*)nl + next;
5108 break;
5110 case DM_DEV_WAIT:
5111 case DM_TABLE_STATUS:
5113 struct dm_target_spec *spec = (void*)host_dm + host_dm->data_start;
5114 void *cur_data = argptr;
5115 const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_spec) };
5116 int spec_size = thunk_type_size(arg_type, 0);
5117 int i;
5119 for (i = 0; i < host_dm->target_count; i++) {
5120 uint32_t next = spec->next;
5121 int slen = strlen((char*)&spec[1]) + 1;
5122 spec->next = (cur_data - argptr) + spec_size + slen;
5123 if (guest_data_size < spec->next) {
5124 host_dm->flags |= DM_BUFFER_FULL_FLAG;
5125 break;
5127 thunk_convert(cur_data, spec, arg_type, THUNK_TARGET);
5128 strcpy(cur_data + spec_size, (char*)&spec[1]);
5129 cur_data = argptr + spec->next;
5130 spec = (void*)host_dm + host_dm->data_start + next;
5132 break;
5134 case DM_TABLE_DEPS:
5136 void *hdata = (void*)host_dm + host_dm->data_start;
5137 int count = *(uint32_t*)hdata;
5138 uint64_t *hdev = hdata + 8;
5139 uint64_t *gdev = argptr + 8;
5140 int i;
5142 *(uint32_t*)argptr = tswap32(count);
5143 for (i = 0; i < count; i++) {
5144 *gdev = tswap64(*hdev);
5145 gdev++;
5146 hdev++;
5148 break;
5150 case DM_LIST_VERSIONS:
5152 struct dm_target_versions *vers = (void*)host_dm + host_dm->data_start;
5153 uint32_t remaining_data = guest_data_size;
5154 void *cur_data = argptr;
5155 const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_versions) };
5156 int vers_size = thunk_type_size(arg_type, 0);
5158 while (1) {
5159 uint32_t next = vers->next;
5160 if (next) {
5161 vers->next = vers_size + (strlen(vers->name) + 1);
5163 if (remaining_data < vers->next) {
5164 host_dm->flags |= DM_BUFFER_FULL_FLAG;
5165 break;
5167 thunk_convert(cur_data, vers, arg_type, THUNK_TARGET);
5168 strcpy(cur_data + vers_size, vers->name);
5169 cur_data += vers->next;
5170 remaining_data -= vers->next;
5171 if (!next) {
5172 break;
5174 vers = (void*)vers + next;
5176 break;
5178 default:
5179 unlock_user(argptr, guest_data, 0);
5180 ret = -TARGET_EINVAL;
5181 goto out;
5183 unlock_user(argptr, guest_data, guest_data_size);
5185 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
5186 if (!argptr) {
5187 ret = -TARGET_EFAULT;
5188 goto out;
5190 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
5191 unlock_user(argptr, arg, target_size);
5193 out:
5194 g_free(big_buf);
5195 return ret;
5198 static abi_long do_ioctl_blkpg(const IOCTLEntry *ie, uint8_t *buf_temp, int fd,
5199 int cmd, abi_long arg)
5201 void *argptr;
5202 int target_size;
5203 const argtype *arg_type = ie->arg_type;
5204 const argtype part_arg_type[] = { MK_STRUCT(STRUCT_blkpg_partition) };
5205 abi_long ret;
5207 struct blkpg_ioctl_arg *host_blkpg = (void*)buf_temp;
5208 struct blkpg_partition host_part;
5210 /* Read and convert blkpg */
5211 arg_type++;
5212 target_size = thunk_type_size(arg_type, 0);
5213 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5214 if (!argptr) {
5215 ret = -TARGET_EFAULT;
5216 goto out;
5218 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
5219 unlock_user(argptr, arg, 0);
5221 switch (host_blkpg->op) {
5222 case BLKPG_ADD_PARTITION:
5223 case BLKPG_DEL_PARTITION:
5224 /* payload is struct blkpg_partition */
5225 break;
5226 default:
5227 /* Unknown opcode */
5228 ret = -TARGET_EINVAL;
5229 goto out;
5232 /* Read and convert blkpg->data */
5233 arg = (abi_long)(uintptr_t)host_blkpg->data;
5234 target_size = thunk_type_size(part_arg_type, 0);
5235 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5236 if (!argptr) {
5237 ret = -TARGET_EFAULT;
5238 goto out;
5240 thunk_convert(&host_part, argptr, part_arg_type, THUNK_HOST);
5241 unlock_user(argptr, arg, 0);
5243 /* Swizzle the data pointer to our local copy and call! */
5244 host_blkpg->data = &host_part;
5245 ret = get_errno(safe_ioctl(fd, ie->host_cmd, host_blkpg));
5247 out:
5248 return ret;
5251 static abi_long do_ioctl_rt(const IOCTLEntry *ie, uint8_t *buf_temp,
5252 int fd, int cmd, abi_long arg)
5254 const argtype *arg_type = ie->arg_type;
5255 const StructEntry *se;
5256 const argtype *field_types;
5257 const int *dst_offsets, *src_offsets;
5258 int target_size;
5259 void *argptr;
5260 abi_ulong *target_rt_dev_ptr;
5261 unsigned long *host_rt_dev_ptr;
5262 abi_long ret;
5263 int i;
5265 assert(ie->access == IOC_W);
5266 assert(*arg_type == TYPE_PTR);
5267 arg_type++;
5268 assert(*arg_type == TYPE_STRUCT);
5269 target_size = thunk_type_size(arg_type, 0);
5270 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5271 if (!argptr) {
5272 return -TARGET_EFAULT;
5274 arg_type++;
5275 assert(*arg_type == (int)STRUCT_rtentry);
5276 se = struct_entries + *arg_type++;
5277 assert(se->convert[0] == NULL);
5278 /* convert struct here to be able to catch rt_dev string */
5279 field_types = se->field_types;
5280 dst_offsets = se->field_offsets[THUNK_HOST];
5281 src_offsets = se->field_offsets[THUNK_TARGET];
5282 for (i = 0; i < se->nb_fields; i++) {
5283 if (dst_offsets[i] == offsetof(struct rtentry, rt_dev)) {
5284 assert(*field_types == TYPE_PTRVOID);
5285 target_rt_dev_ptr = (abi_ulong *)(argptr + src_offsets[i]);
5286 host_rt_dev_ptr = (unsigned long *)(buf_temp + dst_offsets[i]);
5287 if (*target_rt_dev_ptr != 0) {
5288 *host_rt_dev_ptr = (unsigned long)lock_user_string(
5289 tswapal(*target_rt_dev_ptr));
5290 if (!*host_rt_dev_ptr) {
5291 unlock_user(argptr, arg, 0);
5292 return -TARGET_EFAULT;
5294 } else {
5295 *host_rt_dev_ptr = 0;
5297 field_types++;
5298 continue;
5300 field_types = thunk_convert(buf_temp + dst_offsets[i],
5301 argptr + src_offsets[i],
5302 field_types, THUNK_HOST);
5304 unlock_user(argptr, arg, 0);
5306 ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5307 if (*host_rt_dev_ptr != 0) {
5308 unlock_user((void *)*host_rt_dev_ptr,
5309 *target_rt_dev_ptr, 0);
5311 return ret;
5314 static abi_long do_ioctl_kdsigaccept(const IOCTLEntry *ie, uint8_t *buf_temp,
5315 int fd, int cmd, abi_long arg)
5317 int sig = target_to_host_signal(arg);
5318 return get_errno(safe_ioctl(fd, ie->host_cmd, sig));
5321 static IOCTLEntry ioctl_entries[] = {
5322 #define IOCTL(cmd, access, ...) \
5323 { TARGET_ ## cmd, cmd, #cmd, access, 0, { __VA_ARGS__ } },
5324 #define IOCTL_SPECIAL(cmd, access, dofn, ...) \
5325 { TARGET_ ## cmd, cmd, #cmd, access, dofn, { __VA_ARGS__ } },
5326 #include "ioctls.h"
5327 { 0, 0, },
5330 /* ??? Implement proper locking for ioctls. */
5331 /* do_ioctl() Must return target values and target errnos. */
5332 static abi_long do_ioctl(int fd, int cmd, abi_long arg)
5334 const IOCTLEntry *ie;
5335 const argtype *arg_type;
5336 abi_long ret;
5337 uint8_t buf_temp[MAX_STRUCT_SIZE];
5338 int target_size;
5339 void *argptr;
5341 ie = ioctl_entries;
5342 for(;;) {
5343 if (ie->target_cmd == 0) {
5344 gemu_log("Unsupported ioctl: cmd=0x%04lx\n", (long)cmd);
5345 return -TARGET_ENOSYS;
5347 if (ie->target_cmd == cmd)
5348 break;
5349 ie++;
5351 arg_type = ie->arg_type;
5352 #if defined(DEBUG)
5353 gemu_log("ioctl: cmd=0x%04lx (%s)\n", (long)cmd, ie->name);
5354 #endif
5355 if (ie->do_ioctl) {
5356 return ie->do_ioctl(ie, buf_temp, fd, cmd, arg);
5359 switch(arg_type[0]) {
5360 case TYPE_NULL:
5361 /* no argument */
5362 ret = get_errno(safe_ioctl(fd, ie->host_cmd));
5363 break;
5364 case TYPE_PTRVOID:
5365 case TYPE_INT:
5366 ret = get_errno(safe_ioctl(fd, ie->host_cmd, arg));
5367 break;
5368 case TYPE_PTR:
5369 arg_type++;
5370 target_size = thunk_type_size(arg_type, 0);
5371 switch(ie->access) {
5372 case IOC_R:
5373 ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5374 if (!is_error(ret)) {
5375 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
5376 if (!argptr)
5377 return -TARGET_EFAULT;
5378 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
5379 unlock_user(argptr, arg, target_size);
5381 break;
5382 case IOC_W:
5383 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5384 if (!argptr)
5385 return -TARGET_EFAULT;
5386 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
5387 unlock_user(argptr, arg, 0);
5388 ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5389 break;
5390 default:
5391 case IOC_RW:
5392 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5393 if (!argptr)
5394 return -TARGET_EFAULT;
5395 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
5396 unlock_user(argptr, arg, 0);
5397 ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5398 if (!is_error(ret)) {
5399 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
5400 if (!argptr)
5401 return -TARGET_EFAULT;
5402 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
5403 unlock_user(argptr, arg, target_size);
5405 break;
5407 break;
5408 default:
5409 gemu_log("Unsupported ioctl type: cmd=0x%04lx type=%d\n",
5410 (long)cmd, arg_type[0]);
5411 ret = -TARGET_ENOSYS;
5412 break;
5414 return ret;
5417 static const bitmask_transtbl iflag_tbl[] = {
5418 { TARGET_IGNBRK, TARGET_IGNBRK, IGNBRK, IGNBRK },
5419 { TARGET_BRKINT, TARGET_BRKINT, BRKINT, BRKINT },
5420 { TARGET_IGNPAR, TARGET_IGNPAR, IGNPAR, IGNPAR },
5421 { TARGET_PARMRK, TARGET_PARMRK, PARMRK, PARMRK },
5422 { TARGET_INPCK, TARGET_INPCK, INPCK, INPCK },
5423 { TARGET_ISTRIP, TARGET_ISTRIP, ISTRIP, ISTRIP },
5424 { TARGET_INLCR, TARGET_INLCR, INLCR, INLCR },
5425 { TARGET_IGNCR, TARGET_IGNCR, IGNCR, IGNCR },
5426 { TARGET_ICRNL, TARGET_ICRNL, ICRNL, ICRNL },
5427 { TARGET_IUCLC, TARGET_IUCLC, IUCLC, IUCLC },
5428 { TARGET_IXON, TARGET_IXON, IXON, IXON },
5429 { TARGET_IXANY, TARGET_IXANY, IXANY, IXANY },
5430 { TARGET_IXOFF, TARGET_IXOFF, IXOFF, IXOFF },
5431 { TARGET_IMAXBEL, TARGET_IMAXBEL, IMAXBEL, IMAXBEL },
5432 { 0, 0, 0, 0 }
5435 static const bitmask_transtbl oflag_tbl[] = {
5436 { TARGET_OPOST, TARGET_OPOST, OPOST, OPOST },
5437 { TARGET_OLCUC, TARGET_OLCUC, OLCUC, OLCUC },
5438 { TARGET_ONLCR, TARGET_ONLCR, ONLCR, ONLCR },
5439 { TARGET_OCRNL, TARGET_OCRNL, OCRNL, OCRNL },
5440 { TARGET_ONOCR, TARGET_ONOCR, ONOCR, ONOCR },
5441 { TARGET_ONLRET, TARGET_ONLRET, ONLRET, ONLRET },
5442 { TARGET_OFILL, TARGET_OFILL, OFILL, OFILL },
5443 { TARGET_OFDEL, TARGET_OFDEL, OFDEL, OFDEL },
5444 { TARGET_NLDLY, TARGET_NL0, NLDLY, NL0 },
5445 { TARGET_NLDLY, TARGET_NL1, NLDLY, NL1 },
5446 { TARGET_CRDLY, TARGET_CR0, CRDLY, CR0 },
5447 { TARGET_CRDLY, TARGET_CR1, CRDLY, CR1 },
5448 { TARGET_CRDLY, TARGET_CR2, CRDLY, CR2 },
5449 { TARGET_CRDLY, TARGET_CR3, CRDLY, CR3 },
5450 { TARGET_TABDLY, TARGET_TAB0, TABDLY, TAB0 },
5451 { TARGET_TABDLY, TARGET_TAB1, TABDLY, TAB1 },
5452 { TARGET_TABDLY, TARGET_TAB2, TABDLY, TAB2 },
5453 { TARGET_TABDLY, TARGET_TAB3, TABDLY, TAB3 },
5454 { TARGET_BSDLY, TARGET_BS0, BSDLY, BS0 },
5455 { TARGET_BSDLY, TARGET_BS1, BSDLY, BS1 },
5456 { TARGET_VTDLY, TARGET_VT0, VTDLY, VT0 },
5457 { TARGET_VTDLY, TARGET_VT1, VTDLY, VT1 },
5458 { TARGET_FFDLY, TARGET_FF0, FFDLY, FF0 },
5459 { TARGET_FFDLY, TARGET_FF1, FFDLY, FF1 },
5460 { 0, 0, 0, 0 }
5463 static const bitmask_transtbl cflag_tbl[] = {
5464 { TARGET_CBAUD, TARGET_B0, CBAUD, B0 },
5465 { TARGET_CBAUD, TARGET_B50, CBAUD, B50 },
5466 { TARGET_CBAUD, TARGET_B75, CBAUD, B75 },
5467 { TARGET_CBAUD, TARGET_B110, CBAUD, B110 },
5468 { TARGET_CBAUD, TARGET_B134, CBAUD, B134 },
5469 { TARGET_CBAUD, TARGET_B150, CBAUD, B150 },
5470 { TARGET_CBAUD, TARGET_B200, CBAUD, B200 },
5471 { TARGET_CBAUD, TARGET_B300, CBAUD, B300 },
5472 { TARGET_CBAUD, TARGET_B600, CBAUD, B600 },
5473 { TARGET_CBAUD, TARGET_B1200, CBAUD, B1200 },
5474 { TARGET_CBAUD, TARGET_B1800, CBAUD, B1800 },
5475 { TARGET_CBAUD, TARGET_B2400, CBAUD, B2400 },
5476 { TARGET_CBAUD, TARGET_B4800, CBAUD, B4800 },
5477 { TARGET_CBAUD, TARGET_B9600, CBAUD, B9600 },
5478 { TARGET_CBAUD, TARGET_B19200, CBAUD, B19200 },
5479 { TARGET_CBAUD, TARGET_B38400, CBAUD, B38400 },
5480 { TARGET_CBAUD, TARGET_B57600, CBAUD, B57600 },
5481 { TARGET_CBAUD, TARGET_B115200, CBAUD, B115200 },
5482 { TARGET_CBAUD, TARGET_B230400, CBAUD, B230400 },
5483 { TARGET_CBAUD, TARGET_B460800, CBAUD, B460800 },
5484 { TARGET_CSIZE, TARGET_CS5, CSIZE, CS5 },
5485 { TARGET_CSIZE, TARGET_CS6, CSIZE, CS6 },
5486 { TARGET_CSIZE, TARGET_CS7, CSIZE, CS7 },
5487 { TARGET_CSIZE, TARGET_CS8, CSIZE, CS8 },
5488 { TARGET_CSTOPB, TARGET_CSTOPB, CSTOPB, CSTOPB },
5489 { TARGET_CREAD, TARGET_CREAD, CREAD, CREAD },
5490 { TARGET_PARENB, TARGET_PARENB, PARENB, PARENB },
5491 { TARGET_PARODD, TARGET_PARODD, PARODD, PARODD },
5492 { TARGET_HUPCL, TARGET_HUPCL, HUPCL, HUPCL },
5493 { TARGET_CLOCAL, TARGET_CLOCAL, CLOCAL, CLOCAL },
5494 { TARGET_CRTSCTS, TARGET_CRTSCTS, CRTSCTS, CRTSCTS },
5495 { 0, 0, 0, 0 }
5498 static const bitmask_transtbl lflag_tbl[] = {
5499 { TARGET_ISIG, TARGET_ISIG, ISIG, ISIG },
5500 { TARGET_ICANON, TARGET_ICANON, ICANON, ICANON },
5501 { TARGET_XCASE, TARGET_XCASE, XCASE, XCASE },
5502 { TARGET_ECHO, TARGET_ECHO, ECHO, ECHO },
5503 { TARGET_ECHOE, TARGET_ECHOE, ECHOE, ECHOE },
5504 { TARGET_ECHOK, TARGET_ECHOK, ECHOK, ECHOK },
5505 { TARGET_ECHONL, TARGET_ECHONL, ECHONL, ECHONL },
5506 { TARGET_NOFLSH, TARGET_NOFLSH, NOFLSH, NOFLSH },
5507 { TARGET_TOSTOP, TARGET_TOSTOP, TOSTOP, TOSTOP },
5508 { TARGET_ECHOCTL, TARGET_ECHOCTL, ECHOCTL, ECHOCTL },
5509 { TARGET_ECHOPRT, TARGET_ECHOPRT, ECHOPRT, ECHOPRT },
5510 { TARGET_ECHOKE, TARGET_ECHOKE, ECHOKE, ECHOKE },
5511 { TARGET_FLUSHO, TARGET_FLUSHO, FLUSHO, FLUSHO },
5512 { TARGET_PENDIN, TARGET_PENDIN, PENDIN, PENDIN },
5513 { TARGET_IEXTEN, TARGET_IEXTEN, IEXTEN, IEXTEN },
5514 { 0, 0, 0, 0 }
5517 static void target_to_host_termios (void *dst, const void *src)
5519 struct host_termios *host = dst;
5520 const struct target_termios *target = src;
5522 host->c_iflag =
5523 target_to_host_bitmask(tswap32(target->c_iflag), iflag_tbl);
5524 host->c_oflag =
5525 target_to_host_bitmask(tswap32(target->c_oflag), oflag_tbl);
5526 host->c_cflag =
5527 target_to_host_bitmask(tswap32(target->c_cflag), cflag_tbl);
5528 host->c_lflag =
5529 target_to_host_bitmask(tswap32(target->c_lflag), lflag_tbl);
5530 host->c_line = target->c_line;
5532 memset(host->c_cc, 0, sizeof(host->c_cc));
5533 host->c_cc[VINTR] = target->c_cc[TARGET_VINTR];
5534 host->c_cc[VQUIT] = target->c_cc[TARGET_VQUIT];
5535 host->c_cc[VERASE] = target->c_cc[TARGET_VERASE];
5536 host->c_cc[VKILL] = target->c_cc[TARGET_VKILL];
5537 host->c_cc[VEOF] = target->c_cc[TARGET_VEOF];
5538 host->c_cc[VTIME] = target->c_cc[TARGET_VTIME];
5539 host->c_cc[VMIN] = target->c_cc[TARGET_VMIN];
5540 host->c_cc[VSWTC] = target->c_cc[TARGET_VSWTC];
5541 host->c_cc[VSTART] = target->c_cc[TARGET_VSTART];
5542 host->c_cc[VSTOP] = target->c_cc[TARGET_VSTOP];
5543 host->c_cc[VSUSP] = target->c_cc[TARGET_VSUSP];
5544 host->c_cc[VEOL] = target->c_cc[TARGET_VEOL];
5545 host->c_cc[VREPRINT] = target->c_cc[TARGET_VREPRINT];
5546 host->c_cc[VDISCARD] = target->c_cc[TARGET_VDISCARD];
5547 host->c_cc[VWERASE] = target->c_cc[TARGET_VWERASE];
5548 host->c_cc[VLNEXT] = target->c_cc[TARGET_VLNEXT];
5549 host->c_cc[VEOL2] = target->c_cc[TARGET_VEOL2];
5552 static void host_to_target_termios (void *dst, const void *src)
5554 struct target_termios *target = dst;
5555 const struct host_termios *host = src;
5557 target->c_iflag =
5558 tswap32(host_to_target_bitmask(host->c_iflag, iflag_tbl));
5559 target->c_oflag =
5560 tswap32(host_to_target_bitmask(host->c_oflag, oflag_tbl));
5561 target->c_cflag =
5562 tswap32(host_to_target_bitmask(host->c_cflag, cflag_tbl));
5563 target->c_lflag =
5564 tswap32(host_to_target_bitmask(host->c_lflag, lflag_tbl));
5565 target->c_line = host->c_line;
5567 memset(target->c_cc, 0, sizeof(target->c_cc));
5568 target->c_cc[TARGET_VINTR] = host->c_cc[VINTR];
5569 target->c_cc[TARGET_VQUIT] = host->c_cc[VQUIT];
5570 target->c_cc[TARGET_VERASE] = host->c_cc[VERASE];
5571 target->c_cc[TARGET_VKILL] = host->c_cc[VKILL];
5572 target->c_cc[TARGET_VEOF] = host->c_cc[VEOF];
5573 target->c_cc[TARGET_VTIME] = host->c_cc[VTIME];
5574 target->c_cc[TARGET_VMIN] = host->c_cc[VMIN];
5575 target->c_cc[TARGET_VSWTC] = host->c_cc[VSWTC];
5576 target->c_cc[TARGET_VSTART] = host->c_cc[VSTART];
5577 target->c_cc[TARGET_VSTOP] = host->c_cc[VSTOP];
5578 target->c_cc[TARGET_VSUSP] = host->c_cc[VSUSP];
5579 target->c_cc[TARGET_VEOL] = host->c_cc[VEOL];
5580 target->c_cc[TARGET_VREPRINT] = host->c_cc[VREPRINT];
5581 target->c_cc[TARGET_VDISCARD] = host->c_cc[VDISCARD];
5582 target->c_cc[TARGET_VWERASE] = host->c_cc[VWERASE];
5583 target->c_cc[TARGET_VLNEXT] = host->c_cc[VLNEXT];
5584 target->c_cc[TARGET_VEOL2] = host->c_cc[VEOL2];
5587 static const StructEntry struct_termios_def = {
5588 .convert = { host_to_target_termios, target_to_host_termios },
5589 .size = { sizeof(struct target_termios), sizeof(struct host_termios) },
5590 .align = { __alignof__(struct target_termios), __alignof__(struct host_termios) },
5593 static bitmask_transtbl mmap_flags_tbl[] = {
5594 { TARGET_MAP_SHARED, TARGET_MAP_SHARED, MAP_SHARED, MAP_SHARED },
5595 { TARGET_MAP_PRIVATE, TARGET_MAP_PRIVATE, MAP_PRIVATE, MAP_PRIVATE },
5596 { TARGET_MAP_FIXED, TARGET_MAP_FIXED, MAP_FIXED, MAP_FIXED },
5597 { TARGET_MAP_ANONYMOUS, TARGET_MAP_ANONYMOUS, MAP_ANONYMOUS, MAP_ANONYMOUS },
5598 { TARGET_MAP_GROWSDOWN, TARGET_MAP_GROWSDOWN, MAP_GROWSDOWN, MAP_GROWSDOWN },
5599 { TARGET_MAP_DENYWRITE, TARGET_MAP_DENYWRITE, MAP_DENYWRITE, MAP_DENYWRITE },
5600 { TARGET_MAP_EXECUTABLE, TARGET_MAP_EXECUTABLE, MAP_EXECUTABLE, MAP_EXECUTABLE },
5601 { TARGET_MAP_LOCKED, TARGET_MAP_LOCKED, MAP_LOCKED, MAP_LOCKED },
5602 { TARGET_MAP_NORESERVE, TARGET_MAP_NORESERVE, MAP_NORESERVE,
5603 MAP_NORESERVE },
5604 { 0, 0, 0, 0 }
5607 #if defined(TARGET_I386)
5609 /* NOTE: there is really one LDT for all the threads */
5610 static uint8_t *ldt_table;
5612 static abi_long read_ldt(abi_ulong ptr, unsigned long bytecount)
5614 int size;
5615 void *p;
5617 if (!ldt_table)
5618 return 0;
5619 size = TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE;
5620 if (size > bytecount)
5621 size = bytecount;
5622 p = lock_user(VERIFY_WRITE, ptr, size, 0);
5623 if (!p)
5624 return -TARGET_EFAULT;
5625 /* ??? Should this by byteswapped? */
5626 memcpy(p, ldt_table, size);
5627 unlock_user(p, ptr, size);
5628 return size;
5631 /* XXX: add locking support */
5632 static abi_long write_ldt(CPUX86State *env,
5633 abi_ulong ptr, unsigned long bytecount, int oldmode)
5635 struct target_modify_ldt_ldt_s ldt_info;
5636 struct target_modify_ldt_ldt_s *target_ldt_info;
5637 int seg_32bit, contents, read_exec_only, limit_in_pages;
5638 int seg_not_present, useable, lm;
5639 uint32_t *lp, entry_1, entry_2;
5641 if (bytecount != sizeof(ldt_info))
5642 return -TARGET_EINVAL;
5643 if (!lock_user_struct(VERIFY_READ, target_ldt_info, ptr, 1))
5644 return -TARGET_EFAULT;
5645 ldt_info.entry_number = tswap32(target_ldt_info->entry_number);
5646 ldt_info.base_addr = tswapal(target_ldt_info->base_addr);
5647 ldt_info.limit = tswap32(target_ldt_info->limit);
5648 ldt_info.flags = tswap32(target_ldt_info->flags);
5649 unlock_user_struct(target_ldt_info, ptr, 0);
5651 if (ldt_info.entry_number >= TARGET_LDT_ENTRIES)
5652 return -TARGET_EINVAL;
5653 seg_32bit = ldt_info.flags & 1;
5654 contents = (ldt_info.flags >> 1) & 3;
5655 read_exec_only = (ldt_info.flags >> 3) & 1;
5656 limit_in_pages = (ldt_info.flags >> 4) & 1;
5657 seg_not_present = (ldt_info.flags >> 5) & 1;
5658 useable = (ldt_info.flags >> 6) & 1;
5659 #ifdef TARGET_ABI32
5660 lm = 0;
5661 #else
5662 lm = (ldt_info.flags >> 7) & 1;
5663 #endif
5664 if (contents == 3) {
5665 if (oldmode)
5666 return -TARGET_EINVAL;
5667 if (seg_not_present == 0)
5668 return -TARGET_EINVAL;
5670 /* allocate the LDT */
5671 if (!ldt_table) {
5672 env->ldt.base = target_mmap(0,
5673 TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE,
5674 PROT_READ|PROT_WRITE,
5675 MAP_ANONYMOUS|MAP_PRIVATE, -1, 0);
5676 if (env->ldt.base == -1)
5677 return -TARGET_ENOMEM;
5678 memset(g2h(env->ldt.base), 0,
5679 TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE);
5680 env->ldt.limit = 0xffff;
5681 ldt_table = g2h(env->ldt.base);
5684 /* NOTE: same code as Linux kernel */
5685 /* Allow LDTs to be cleared by the user. */
5686 if (ldt_info.base_addr == 0 && ldt_info.limit == 0) {
5687 if (oldmode ||
5688 (contents == 0 &&
5689 read_exec_only == 1 &&
5690 seg_32bit == 0 &&
5691 limit_in_pages == 0 &&
5692 seg_not_present == 1 &&
5693 useable == 0 )) {
5694 entry_1 = 0;
5695 entry_2 = 0;
5696 goto install;
5700 entry_1 = ((ldt_info.base_addr & 0x0000ffff) << 16) |
5701 (ldt_info.limit & 0x0ffff);
5702 entry_2 = (ldt_info.base_addr & 0xff000000) |
5703 ((ldt_info.base_addr & 0x00ff0000) >> 16) |
5704 (ldt_info.limit & 0xf0000) |
5705 ((read_exec_only ^ 1) << 9) |
5706 (contents << 10) |
5707 ((seg_not_present ^ 1) << 15) |
5708 (seg_32bit << 22) |
5709 (limit_in_pages << 23) |
5710 (lm << 21) |
5711 0x7000;
5712 if (!oldmode)
5713 entry_2 |= (useable << 20);
5715 /* Install the new entry ... */
5716 install:
5717 lp = (uint32_t *)(ldt_table + (ldt_info.entry_number << 3));
5718 lp[0] = tswap32(entry_1);
5719 lp[1] = tswap32(entry_2);
5720 return 0;
5723 /* specific and weird i386 syscalls */
5724 static abi_long do_modify_ldt(CPUX86State *env, int func, abi_ulong ptr,
5725 unsigned long bytecount)
5727 abi_long ret;
5729 switch (func) {
5730 case 0:
5731 ret = read_ldt(ptr, bytecount);
5732 break;
5733 case 1:
5734 ret = write_ldt(env, ptr, bytecount, 1);
5735 break;
5736 case 0x11:
5737 ret = write_ldt(env, ptr, bytecount, 0);
5738 break;
5739 default:
5740 ret = -TARGET_ENOSYS;
5741 break;
5743 return ret;
5746 #if defined(TARGET_I386) && defined(TARGET_ABI32)
5747 abi_long do_set_thread_area(CPUX86State *env, abi_ulong ptr)
5749 uint64_t *gdt_table = g2h(env->gdt.base);
5750 struct target_modify_ldt_ldt_s ldt_info;
5751 struct target_modify_ldt_ldt_s *target_ldt_info;
5752 int seg_32bit, contents, read_exec_only, limit_in_pages;
5753 int seg_not_present, useable, lm;
5754 uint32_t *lp, entry_1, entry_2;
5755 int i;
5757 lock_user_struct(VERIFY_WRITE, target_ldt_info, ptr, 1);
5758 if (!target_ldt_info)
5759 return -TARGET_EFAULT;
5760 ldt_info.entry_number = tswap32(target_ldt_info->entry_number);
5761 ldt_info.base_addr = tswapal(target_ldt_info->base_addr);
5762 ldt_info.limit = tswap32(target_ldt_info->limit);
5763 ldt_info.flags = tswap32(target_ldt_info->flags);
5764 if (ldt_info.entry_number == -1) {
5765 for (i=TARGET_GDT_ENTRY_TLS_MIN; i<=TARGET_GDT_ENTRY_TLS_MAX; i++) {
5766 if (gdt_table[i] == 0) {
5767 ldt_info.entry_number = i;
5768 target_ldt_info->entry_number = tswap32(i);
5769 break;
5773 unlock_user_struct(target_ldt_info, ptr, 1);
5775 if (ldt_info.entry_number < TARGET_GDT_ENTRY_TLS_MIN ||
5776 ldt_info.entry_number > TARGET_GDT_ENTRY_TLS_MAX)
5777 return -TARGET_EINVAL;
5778 seg_32bit = ldt_info.flags & 1;
5779 contents = (ldt_info.flags >> 1) & 3;
5780 read_exec_only = (ldt_info.flags >> 3) & 1;
5781 limit_in_pages = (ldt_info.flags >> 4) & 1;
5782 seg_not_present = (ldt_info.flags >> 5) & 1;
5783 useable = (ldt_info.flags >> 6) & 1;
5784 #ifdef TARGET_ABI32
5785 lm = 0;
5786 #else
5787 lm = (ldt_info.flags >> 7) & 1;
5788 #endif
5790 if (contents == 3) {
5791 if (seg_not_present == 0)
5792 return -TARGET_EINVAL;
5795 /* NOTE: same code as Linux kernel */
5796 /* Allow LDTs to be cleared by the user. */
5797 if (ldt_info.base_addr == 0 && ldt_info.limit == 0) {
5798 if ((contents == 0 &&
5799 read_exec_only == 1 &&
5800 seg_32bit == 0 &&
5801 limit_in_pages == 0 &&
5802 seg_not_present == 1 &&
5803 useable == 0 )) {
5804 entry_1 = 0;
5805 entry_2 = 0;
5806 goto install;
5810 entry_1 = ((ldt_info.base_addr & 0x0000ffff) << 16) |
5811 (ldt_info.limit & 0x0ffff);
5812 entry_2 = (ldt_info.base_addr & 0xff000000) |
5813 ((ldt_info.base_addr & 0x00ff0000) >> 16) |
5814 (ldt_info.limit & 0xf0000) |
5815 ((read_exec_only ^ 1) << 9) |
5816 (contents << 10) |
5817 ((seg_not_present ^ 1) << 15) |
5818 (seg_32bit << 22) |
5819 (limit_in_pages << 23) |
5820 (useable << 20) |
5821 (lm << 21) |
5822 0x7000;
5824 /* Install the new entry ... */
5825 install:
5826 lp = (uint32_t *)(gdt_table + ldt_info.entry_number);
5827 lp[0] = tswap32(entry_1);
5828 lp[1] = tswap32(entry_2);
5829 return 0;
5832 static abi_long do_get_thread_area(CPUX86State *env, abi_ulong ptr)
5834 struct target_modify_ldt_ldt_s *target_ldt_info;
5835 uint64_t *gdt_table = g2h(env->gdt.base);
5836 uint32_t base_addr, limit, flags;
5837 int seg_32bit, contents, read_exec_only, limit_in_pages, idx;
5838 int seg_not_present, useable, lm;
5839 uint32_t *lp, entry_1, entry_2;
5841 lock_user_struct(VERIFY_WRITE, target_ldt_info, ptr, 1);
5842 if (!target_ldt_info)
5843 return -TARGET_EFAULT;
5844 idx = tswap32(target_ldt_info->entry_number);
5845 if (idx < TARGET_GDT_ENTRY_TLS_MIN ||
5846 idx > TARGET_GDT_ENTRY_TLS_MAX) {
5847 unlock_user_struct(target_ldt_info, ptr, 1);
5848 return -TARGET_EINVAL;
5850 lp = (uint32_t *)(gdt_table + idx);
5851 entry_1 = tswap32(lp[0]);
5852 entry_2 = tswap32(lp[1]);
5854 read_exec_only = ((entry_2 >> 9) & 1) ^ 1;
5855 contents = (entry_2 >> 10) & 3;
5856 seg_not_present = ((entry_2 >> 15) & 1) ^ 1;
5857 seg_32bit = (entry_2 >> 22) & 1;
5858 limit_in_pages = (entry_2 >> 23) & 1;
5859 useable = (entry_2 >> 20) & 1;
5860 #ifdef TARGET_ABI32
5861 lm = 0;
5862 #else
5863 lm = (entry_2 >> 21) & 1;
5864 #endif
5865 flags = (seg_32bit << 0) | (contents << 1) |
5866 (read_exec_only << 3) | (limit_in_pages << 4) |
5867 (seg_not_present << 5) | (useable << 6) | (lm << 7);
5868 limit = (entry_1 & 0xffff) | (entry_2 & 0xf0000);
5869 base_addr = (entry_1 >> 16) |
5870 (entry_2 & 0xff000000) |
5871 ((entry_2 & 0xff) << 16);
5872 target_ldt_info->base_addr = tswapal(base_addr);
5873 target_ldt_info->limit = tswap32(limit);
5874 target_ldt_info->flags = tswap32(flags);
5875 unlock_user_struct(target_ldt_info, ptr, 1);
5876 return 0;
5878 #endif /* TARGET_I386 && TARGET_ABI32 */
5880 #ifndef TARGET_ABI32
5881 abi_long do_arch_prctl(CPUX86State *env, int code, abi_ulong addr)
5883 abi_long ret = 0;
5884 abi_ulong val;
5885 int idx;
5887 switch(code) {
5888 case TARGET_ARCH_SET_GS:
5889 case TARGET_ARCH_SET_FS:
5890 if (code == TARGET_ARCH_SET_GS)
5891 idx = R_GS;
5892 else
5893 idx = R_FS;
5894 cpu_x86_load_seg(env, idx, 0);
5895 env->segs[idx].base = addr;
5896 break;
5897 case TARGET_ARCH_GET_GS:
5898 case TARGET_ARCH_GET_FS:
5899 if (code == TARGET_ARCH_GET_GS)
5900 idx = R_GS;
5901 else
5902 idx = R_FS;
5903 val = env->segs[idx].base;
5904 if (put_user(val, addr, abi_ulong))
5905 ret = -TARGET_EFAULT;
5906 break;
5907 default:
5908 ret = -TARGET_EINVAL;
5909 break;
5911 return ret;
5913 #endif
5915 #endif /* defined(TARGET_I386) */
5917 #define NEW_STACK_SIZE 0x40000
5920 static pthread_mutex_t clone_lock = PTHREAD_MUTEX_INITIALIZER;
5921 typedef struct {
5922 CPUArchState *env;
5923 pthread_mutex_t mutex;
5924 pthread_cond_t cond;
5925 pthread_t thread;
5926 uint32_t tid;
5927 abi_ulong child_tidptr;
5928 abi_ulong parent_tidptr;
5929 sigset_t sigmask;
5930 } new_thread_info;
5932 static void *clone_func(void *arg)
5934 new_thread_info *info = arg;
5935 CPUArchState *env;
5936 CPUState *cpu;
5937 TaskState *ts;
5939 rcu_register_thread();
5940 env = info->env;
5941 cpu = ENV_GET_CPU(env);
5942 thread_cpu = cpu;
5943 ts = (TaskState *)cpu->opaque;
5944 info->tid = gettid();
5945 cpu->host_tid = info->tid;
5946 task_settid(ts);
5947 if (info->child_tidptr)
5948 put_user_u32(info->tid, info->child_tidptr);
5949 if (info->parent_tidptr)
5950 put_user_u32(info->tid, info->parent_tidptr);
5951 /* Enable signals. */
5952 sigprocmask(SIG_SETMASK, &info->sigmask, NULL);
5953 /* Signal to the parent that we're ready. */
5954 pthread_mutex_lock(&info->mutex);
5955 pthread_cond_broadcast(&info->cond);
5956 pthread_mutex_unlock(&info->mutex);
5957 /* Wait until the parent has finshed initializing the tls state. */
5958 pthread_mutex_lock(&clone_lock);
5959 pthread_mutex_unlock(&clone_lock);
5960 cpu_loop(env);
5961 /* never exits */
5962 return NULL;
5965 /* do_fork() Must return host values and target errnos (unlike most
5966 do_*() functions). */
5967 static int do_fork(CPUArchState *env, unsigned int flags, abi_ulong newsp,
5968 abi_ulong parent_tidptr, target_ulong newtls,
5969 abi_ulong child_tidptr)
5971 CPUState *cpu = ENV_GET_CPU(env);
5972 int ret;
5973 TaskState *ts;
5974 CPUState *new_cpu;
5975 CPUArchState *new_env;
5976 unsigned int nptl_flags;
5977 sigset_t sigmask;
5979 /* Emulate vfork() with fork() */
5980 if (flags & CLONE_VFORK)
5981 flags &= ~(CLONE_VFORK | CLONE_VM);
5983 if (flags & CLONE_VM) {
5984 TaskState *parent_ts = (TaskState *)cpu->opaque;
5985 new_thread_info info;
5986 pthread_attr_t attr;
5988 ts = g_new0(TaskState, 1);
5989 init_task_state(ts);
5990 /* we create a new CPU instance. */
5991 new_env = cpu_copy(env);
5992 /* Init regs that differ from the parent. */
5993 cpu_clone_regs(new_env, newsp);
5994 new_cpu = ENV_GET_CPU(new_env);
5995 new_cpu->opaque = ts;
5996 ts->bprm = parent_ts->bprm;
5997 ts->info = parent_ts->info;
5998 ts->signal_mask = parent_ts->signal_mask;
5999 nptl_flags = flags;
6000 flags &= ~CLONE_NPTL_FLAGS2;
6002 if (nptl_flags & CLONE_CHILD_CLEARTID) {
6003 ts->child_tidptr = child_tidptr;
6006 if (nptl_flags & CLONE_SETTLS)
6007 cpu_set_tls (new_env, newtls);
6009 /* Grab a mutex so that thread setup appears atomic. */
6010 pthread_mutex_lock(&clone_lock);
6012 memset(&info, 0, sizeof(info));
6013 pthread_mutex_init(&info.mutex, NULL);
6014 pthread_mutex_lock(&info.mutex);
6015 pthread_cond_init(&info.cond, NULL);
6016 info.env = new_env;
6017 if (nptl_flags & CLONE_CHILD_SETTID)
6018 info.child_tidptr = child_tidptr;
6019 if (nptl_flags & CLONE_PARENT_SETTID)
6020 info.parent_tidptr = parent_tidptr;
6022 ret = pthread_attr_init(&attr);
6023 ret = pthread_attr_setstacksize(&attr, NEW_STACK_SIZE);
6024 ret = pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED);
6025 /* It is not safe to deliver signals until the child has finished
6026 initializing, so temporarily block all signals. */
6027 sigfillset(&sigmask);
6028 sigprocmask(SIG_BLOCK, &sigmask, &info.sigmask);
6030 ret = pthread_create(&info.thread, &attr, clone_func, &info);
6031 /* TODO: Free new CPU state if thread creation failed. */
6033 sigprocmask(SIG_SETMASK, &info.sigmask, NULL);
6034 pthread_attr_destroy(&attr);
6035 if (ret == 0) {
6036 /* Wait for the child to initialize. */
6037 pthread_cond_wait(&info.cond, &info.mutex);
6038 ret = info.tid;
6039 if (flags & CLONE_PARENT_SETTID)
6040 put_user_u32(ret, parent_tidptr);
6041 } else {
6042 ret = -1;
6044 pthread_mutex_unlock(&info.mutex);
6045 pthread_cond_destroy(&info.cond);
6046 pthread_mutex_destroy(&info.mutex);
6047 pthread_mutex_unlock(&clone_lock);
6048 } else {
6049 /* if no CLONE_VM, we consider it is a fork */
6050 if ((flags & ~(CSIGNAL | CLONE_NPTL_FLAGS2)) != 0) {
6051 return -TARGET_EINVAL;
6054 if (block_signals()) {
6055 return -TARGET_ERESTARTSYS;
6058 fork_start();
6059 ret = fork();
6060 if (ret == 0) {
6061 /* Child Process. */
6062 rcu_after_fork();
6063 cpu_clone_regs(env, newsp);
6064 fork_end(1);
6065 /* There is a race condition here. The parent process could
6066 theoretically read the TID in the child process before the child
6067 tid is set. This would require using either ptrace
6068 (not implemented) or having *_tidptr to point at a shared memory
6069 mapping. We can't repeat the spinlock hack used above because
6070 the child process gets its own copy of the lock. */
6071 if (flags & CLONE_CHILD_SETTID)
6072 put_user_u32(gettid(), child_tidptr);
6073 if (flags & CLONE_PARENT_SETTID)
6074 put_user_u32(gettid(), parent_tidptr);
6075 ts = (TaskState *)cpu->opaque;
6076 if (flags & CLONE_SETTLS)
6077 cpu_set_tls (env, newtls);
6078 if (flags & CLONE_CHILD_CLEARTID)
6079 ts->child_tidptr = child_tidptr;
6080 } else {
6081 fork_end(0);
6084 return ret;
6087 /* warning : doesn't handle linux specific flags... */
6088 static int target_to_host_fcntl_cmd(int cmd)
6090 switch(cmd) {
6091 case TARGET_F_DUPFD:
6092 case TARGET_F_GETFD:
6093 case TARGET_F_SETFD:
6094 case TARGET_F_GETFL:
6095 case TARGET_F_SETFL:
6096 return cmd;
6097 case TARGET_F_GETLK:
6098 return F_GETLK64;
6099 case TARGET_F_SETLK:
6100 return F_SETLK64;
6101 case TARGET_F_SETLKW:
6102 return F_SETLKW64;
6103 case TARGET_F_GETOWN:
6104 return F_GETOWN;
6105 case TARGET_F_SETOWN:
6106 return F_SETOWN;
6107 case TARGET_F_GETSIG:
6108 return F_GETSIG;
6109 case TARGET_F_SETSIG:
6110 return F_SETSIG;
6111 #if TARGET_ABI_BITS == 32
6112 case TARGET_F_GETLK64:
6113 return F_GETLK64;
6114 case TARGET_F_SETLK64:
6115 return F_SETLK64;
6116 case TARGET_F_SETLKW64:
6117 return F_SETLKW64;
6118 #endif
6119 case TARGET_F_SETLEASE:
6120 return F_SETLEASE;
6121 case TARGET_F_GETLEASE:
6122 return F_GETLEASE;
6123 #ifdef F_DUPFD_CLOEXEC
6124 case TARGET_F_DUPFD_CLOEXEC:
6125 return F_DUPFD_CLOEXEC;
6126 #endif
6127 case TARGET_F_NOTIFY:
6128 return F_NOTIFY;
6129 #ifdef F_GETOWN_EX
6130 case TARGET_F_GETOWN_EX:
6131 return F_GETOWN_EX;
6132 #endif
6133 #ifdef F_SETOWN_EX
6134 case TARGET_F_SETOWN_EX:
6135 return F_SETOWN_EX;
6136 #endif
6137 #ifdef F_SETPIPE_SZ
6138 case TARGET_F_SETPIPE_SZ:
6139 return F_SETPIPE_SZ;
6140 case TARGET_F_GETPIPE_SZ:
6141 return F_GETPIPE_SZ;
6142 #endif
6143 default:
6144 return -TARGET_EINVAL;
6146 return -TARGET_EINVAL;
6149 #define TRANSTBL_CONVERT(a) { -1, TARGET_##a, -1, a }
6150 static const bitmask_transtbl flock_tbl[] = {
6151 TRANSTBL_CONVERT(F_RDLCK),
6152 TRANSTBL_CONVERT(F_WRLCK),
6153 TRANSTBL_CONVERT(F_UNLCK),
6154 TRANSTBL_CONVERT(F_EXLCK),
6155 TRANSTBL_CONVERT(F_SHLCK),
6156 { 0, 0, 0, 0 }
6159 static inline abi_long copy_from_user_flock(struct flock64 *fl,
6160 abi_ulong target_flock_addr)
6162 struct target_flock *target_fl;
6163 short l_type;
6165 if (!lock_user_struct(VERIFY_READ, target_fl, target_flock_addr, 1)) {
6166 return -TARGET_EFAULT;
6169 __get_user(l_type, &target_fl->l_type);
6170 fl->l_type = target_to_host_bitmask(l_type, flock_tbl);
6171 __get_user(fl->l_whence, &target_fl->l_whence);
6172 __get_user(fl->l_start, &target_fl->l_start);
6173 __get_user(fl->l_len, &target_fl->l_len);
6174 __get_user(fl->l_pid, &target_fl->l_pid);
6175 unlock_user_struct(target_fl, target_flock_addr, 0);
6176 return 0;
6179 static inline abi_long copy_to_user_flock(abi_ulong target_flock_addr,
6180 const struct flock64 *fl)
6182 struct target_flock *target_fl;
6183 short l_type;
6185 if (!lock_user_struct(VERIFY_WRITE, target_fl, target_flock_addr, 0)) {
6186 return -TARGET_EFAULT;
6189 l_type = host_to_target_bitmask(fl->l_type, flock_tbl);
6190 __put_user(l_type, &target_fl->l_type);
6191 __put_user(fl->l_whence, &target_fl->l_whence);
6192 __put_user(fl->l_start, &target_fl->l_start);
6193 __put_user(fl->l_len, &target_fl->l_len);
6194 __put_user(fl->l_pid, &target_fl->l_pid);
6195 unlock_user_struct(target_fl, target_flock_addr, 1);
6196 return 0;
6199 typedef abi_long from_flock64_fn(struct flock64 *fl, abi_ulong target_addr);
6200 typedef abi_long to_flock64_fn(abi_ulong target_addr, const struct flock64 *fl);
6202 #if defined(TARGET_ARM) && TARGET_ABI_BITS == 32
6203 static inline abi_long copy_from_user_eabi_flock64(struct flock64 *fl,
6204 abi_ulong target_flock_addr)
6206 struct target_eabi_flock64 *target_fl;
6207 short l_type;
6209 if (!lock_user_struct(VERIFY_READ, target_fl, target_flock_addr, 1)) {
6210 return -TARGET_EFAULT;
6213 __get_user(l_type, &target_fl->l_type);
6214 fl->l_type = target_to_host_bitmask(l_type, flock_tbl);
6215 __get_user(fl->l_whence, &target_fl->l_whence);
6216 __get_user(fl->l_start, &target_fl->l_start);
6217 __get_user(fl->l_len, &target_fl->l_len);
6218 __get_user(fl->l_pid, &target_fl->l_pid);
6219 unlock_user_struct(target_fl, target_flock_addr, 0);
6220 return 0;
6223 static inline abi_long copy_to_user_eabi_flock64(abi_ulong target_flock_addr,
6224 const struct flock64 *fl)
6226 struct target_eabi_flock64 *target_fl;
6227 short l_type;
6229 if (!lock_user_struct(VERIFY_WRITE, target_fl, target_flock_addr, 0)) {
6230 return -TARGET_EFAULT;
6233 l_type = host_to_target_bitmask(fl->l_type, flock_tbl);
6234 __put_user(l_type, &target_fl->l_type);
6235 __put_user(fl->l_whence, &target_fl->l_whence);
6236 __put_user(fl->l_start, &target_fl->l_start);
6237 __put_user(fl->l_len, &target_fl->l_len);
6238 __put_user(fl->l_pid, &target_fl->l_pid);
6239 unlock_user_struct(target_fl, target_flock_addr, 1);
6240 return 0;
6242 #endif
6244 static inline abi_long copy_from_user_flock64(struct flock64 *fl,
6245 abi_ulong target_flock_addr)
6247 struct target_flock64 *target_fl;
6248 short l_type;
6250 if (!lock_user_struct(VERIFY_READ, target_fl, target_flock_addr, 1)) {
6251 return -TARGET_EFAULT;
6254 __get_user(l_type, &target_fl->l_type);
6255 fl->l_type = target_to_host_bitmask(l_type, flock_tbl);
6256 __get_user(fl->l_whence, &target_fl->l_whence);
6257 __get_user(fl->l_start, &target_fl->l_start);
6258 __get_user(fl->l_len, &target_fl->l_len);
6259 __get_user(fl->l_pid, &target_fl->l_pid);
6260 unlock_user_struct(target_fl, target_flock_addr, 0);
6261 return 0;
6264 static inline abi_long copy_to_user_flock64(abi_ulong target_flock_addr,
6265 const struct flock64 *fl)
6267 struct target_flock64 *target_fl;
6268 short l_type;
6270 if (!lock_user_struct(VERIFY_WRITE, target_fl, target_flock_addr, 0)) {
6271 return -TARGET_EFAULT;
6274 l_type = host_to_target_bitmask(fl->l_type, flock_tbl);
6275 __put_user(l_type, &target_fl->l_type);
6276 __put_user(fl->l_whence, &target_fl->l_whence);
6277 __put_user(fl->l_start, &target_fl->l_start);
6278 __put_user(fl->l_len, &target_fl->l_len);
6279 __put_user(fl->l_pid, &target_fl->l_pid);
6280 unlock_user_struct(target_fl, target_flock_addr, 1);
6281 return 0;
6284 static abi_long do_fcntl(int fd, int cmd, abi_ulong arg)
6286 struct flock64 fl64;
6287 #ifdef F_GETOWN_EX
6288 struct f_owner_ex fox;
6289 struct target_f_owner_ex *target_fox;
6290 #endif
6291 abi_long ret;
6292 int host_cmd = target_to_host_fcntl_cmd(cmd);
6294 if (host_cmd == -TARGET_EINVAL)
6295 return host_cmd;
6297 switch(cmd) {
6298 case TARGET_F_GETLK:
6299 ret = copy_from_user_flock(&fl64, arg);
6300 if (ret) {
6301 return ret;
6303 ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
6304 if (ret == 0) {
6305 ret = copy_to_user_flock(arg, &fl64);
6307 break;
6309 case TARGET_F_SETLK:
6310 case TARGET_F_SETLKW:
6311 ret = copy_from_user_flock(&fl64, arg);
6312 if (ret) {
6313 return ret;
6315 ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
6316 break;
6318 case TARGET_F_GETLK64:
6319 ret = copy_from_user_flock64(&fl64, arg);
6320 if (ret) {
6321 return ret;
6323 ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
6324 if (ret == 0) {
6325 ret = copy_to_user_flock64(arg, &fl64);
6327 break;
6328 case TARGET_F_SETLK64:
6329 case TARGET_F_SETLKW64:
6330 ret = copy_from_user_flock64(&fl64, arg);
6331 if (ret) {
6332 return ret;
6334 ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
6335 break;
6337 case TARGET_F_GETFL:
6338 ret = get_errno(safe_fcntl(fd, host_cmd, arg));
6339 if (ret >= 0) {
6340 ret = host_to_target_bitmask(ret, fcntl_flags_tbl);
6342 break;
6344 case TARGET_F_SETFL:
6345 ret = get_errno(safe_fcntl(fd, host_cmd,
6346 target_to_host_bitmask(arg,
6347 fcntl_flags_tbl)));
6348 break;
6350 #ifdef F_GETOWN_EX
6351 case TARGET_F_GETOWN_EX:
6352 ret = get_errno(safe_fcntl(fd, host_cmd, &fox));
6353 if (ret >= 0) {
6354 if (!lock_user_struct(VERIFY_WRITE, target_fox, arg, 0))
6355 return -TARGET_EFAULT;
6356 target_fox->type = tswap32(fox.type);
6357 target_fox->pid = tswap32(fox.pid);
6358 unlock_user_struct(target_fox, arg, 1);
6360 break;
6361 #endif
6363 #ifdef F_SETOWN_EX
6364 case TARGET_F_SETOWN_EX:
6365 if (!lock_user_struct(VERIFY_READ, target_fox, arg, 1))
6366 return -TARGET_EFAULT;
6367 fox.type = tswap32(target_fox->type);
6368 fox.pid = tswap32(target_fox->pid);
6369 unlock_user_struct(target_fox, arg, 0);
6370 ret = get_errno(safe_fcntl(fd, host_cmd, &fox));
6371 break;
6372 #endif
6374 case TARGET_F_SETOWN:
6375 case TARGET_F_GETOWN:
6376 case TARGET_F_SETSIG:
6377 case TARGET_F_GETSIG:
6378 case TARGET_F_SETLEASE:
6379 case TARGET_F_GETLEASE:
6380 case TARGET_F_SETPIPE_SZ:
6381 case TARGET_F_GETPIPE_SZ:
6382 ret = get_errno(safe_fcntl(fd, host_cmd, arg));
6383 break;
6385 default:
6386 ret = get_errno(safe_fcntl(fd, cmd, arg));
6387 break;
6389 return ret;
6392 #ifdef USE_UID16
6394 static inline int high2lowuid(int uid)
6396 if (uid > 65535)
6397 return 65534;
6398 else
6399 return uid;
6402 static inline int high2lowgid(int gid)
6404 if (gid > 65535)
6405 return 65534;
6406 else
6407 return gid;
6410 static inline int low2highuid(int uid)
6412 if ((int16_t)uid == -1)
6413 return -1;
6414 else
6415 return uid;
6418 static inline int low2highgid(int gid)
6420 if ((int16_t)gid == -1)
6421 return -1;
6422 else
6423 return gid;
6425 static inline int tswapid(int id)
6427 return tswap16(id);
6430 #define put_user_id(x, gaddr) put_user_u16(x, gaddr)
6432 #else /* !USE_UID16 */
6433 static inline int high2lowuid(int uid)
6435 return uid;
6437 static inline int high2lowgid(int gid)
6439 return gid;
6441 static inline int low2highuid(int uid)
6443 return uid;
6445 static inline int low2highgid(int gid)
6447 return gid;
6449 static inline int tswapid(int id)
6451 return tswap32(id);
6454 #define put_user_id(x, gaddr) put_user_u32(x, gaddr)
6456 #endif /* USE_UID16 */
6458 /* We must do direct syscalls for setting UID/GID, because we want to
6459 * implement the Linux system call semantics of "change only for this thread",
6460 * not the libc/POSIX semantics of "change for all threads in process".
6461 * (See http://ewontfix.com/17/ for more details.)
6462 * We use the 32-bit version of the syscalls if present; if it is not
6463 * then either the host architecture supports 32-bit UIDs natively with
6464 * the standard syscall, or the 16-bit UID is the best we can do.
6466 #ifdef __NR_setuid32
6467 #define __NR_sys_setuid __NR_setuid32
6468 #else
6469 #define __NR_sys_setuid __NR_setuid
6470 #endif
6471 #ifdef __NR_setgid32
6472 #define __NR_sys_setgid __NR_setgid32
6473 #else
6474 #define __NR_sys_setgid __NR_setgid
6475 #endif
6476 #ifdef __NR_setresuid32
6477 #define __NR_sys_setresuid __NR_setresuid32
6478 #else
6479 #define __NR_sys_setresuid __NR_setresuid
6480 #endif
6481 #ifdef __NR_setresgid32
6482 #define __NR_sys_setresgid __NR_setresgid32
6483 #else
6484 #define __NR_sys_setresgid __NR_setresgid
6485 #endif
6487 _syscall1(int, sys_setuid, uid_t, uid)
6488 _syscall1(int, sys_setgid, gid_t, gid)
6489 _syscall3(int, sys_setresuid, uid_t, ruid, uid_t, euid, uid_t, suid)
6490 _syscall3(int, sys_setresgid, gid_t, rgid, gid_t, egid, gid_t, sgid)
6492 void syscall_init(void)
6494 IOCTLEntry *ie;
6495 const argtype *arg_type;
6496 int size;
6497 int i;
6499 thunk_init(STRUCT_MAX);
6501 #define STRUCT(name, ...) thunk_register_struct(STRUCT_ ## name, #name, struct_ ## name ## _def);
6502 #define STRUCT_SPECIAL(name) thunk_register_struct_direct(STRUCT_ ## name, #name, &struct_ ## name ## _def);
6503 #include "syscall_types.h"
6504 #undef STRUCT
6505 #undef STRUCT_SPECIAL
6507 /* Build target_to_host_errno_table[] table from
6508 * host_to_target_errno_table[]. */
6509 for (i = 0; i < ERRNO_TABLE_SIZE; i++) {
6510 target_to_host_errno_table[host_to_target_errno_table[i]] = i;
6513 /* we patch the ioctl size if necessary. We rely on the fact that
6514 no ioctl has all the bits at '1' in the size field */
6515 ie = ioctl_entries;
6516 while (ie->target_cmd != 0) {
6517 if (((ie->target_cmd >> TARGET_IOC_SIZESHIFT) & TARGET_IOC_SIZEMASK) ==
6518 TARGET_IOC_SIZEMASK) {
6519 arg_type = ie->arg_type;
6520 if (arg_type[0] != TYPE_PTR) {
6521 fprintf(stderr, "cannot patch size for ioctl 0x%x\n",
6522 ie->target_cmd);
6523 exit(1);
6525 arg_type++;
6526 size = thunk_type_size(arg_type, 0);
6527 ie->target_cmd = (ie->target_cmd &
6528 ~(TARGET_IOC_SIZEMASK << TARGET_IOC_SIZESHIFT)) |
6529 (size << TARGET_IOC_SIZESHIFT);
6532 /* automatic consistency check if same arch */
6533 #if (defined(__i386__) && defined(TARGET_I386) && defined(TARGET_ABI32)) || \
6534 (defined(__x86_64__) && defined(TARGET_X86_64))
6535 if (unlikely(ie->target_cmd != ie->host_cmd)) {
6536 fprintf(stderr, "ERROR: ioctl(%s): target=0x%x host=0x%x\n",
6537 ie->name, ie->target_cmd, ie->host_cmd);
6539 #endif
6540 ie++;
6544 #if TARGET_ABI_BITS == 32
6545 static inline uint64_t target_offset64(uint32_t word0, uint32_t word1)
6547 #ifdef TARGET_WORDS_BIGENDIAN
6548 return ((uint64_t)word0 << 32) | word1;
6549 #else
6550 return ((uint64_t)word1 << 32) | word0;
6551 #endif
6553 #else /* TARGET_ABI_BITS == 32 */
6554 static inline uint64_t target_offset64(uint64_t word0, uint64_t word1)
6556 return word0;
6558 #endif /* TARGET_ABI_BITS != 32 */
6560 #ifdef TARGET_NR_truncate64
6561 static inline abi_long target_truncate64(void *cpu_env, const char *arg1,
6562 abi_long arg2,
6563 abi_long arg3,
6564 abi_long arg4)
6566 if (regpairs_aligned(cpu_env)) {
6567 arg2 = arg3;
6568 arg3 = arg4;
6570 return get_errno(truncate64(arg1, target_offset64(arg2, arg3)));
6572 #endif
6574 #ifdef TARGET_NR_ftruncate64
6575 static inline abi_long target_ftruncate64(void *cpu_env, abi_long arg1,
6576 abi_long arg2,
6577 abi_long arg3,
6578 abi_long arg4)
6580 if (regpairs_aligned(cpu_env)) {
6581 arg2 = arg3;
6582 arg3 = arg4;
6584 return get_errno(ftruncate64(arg1, target_offset64(arg2, arg3)));
6586 #endif
6588 static inline abi_long target_to_host_timespec(struct timespec *host_ts,
6589 abi_ulong target_addr)
6591 struct target_timespec *target_ts;
6593 if (!lock_user_struct(VERIFY_READ, target_ts, target_addr, 1))
6594 return -TARGET_EFAULT;
6595 __get_user(host_ts->tv_sec, &target_ts->tv_sec);
6596 __get_user(host_ts->tv_nsec, &target_ts->tv_nsec);
6597 unlock_user_struct(target_ts, target_addr, 0);
6598 return 0;
6601 static inline abi_long host_to_target_timespec(abi_ulong target_addr,
6602 struct timespec *host_ts)
6604 struct target_timespec *target_ts;
6606 if (!lock_user_struct(VERIFY_WRITE, target_ts, target_addr, 0))
6607 return -TARGET_EFAULT;
6608 __put_user(host_ts->tv_sec, &target_ts->tv_sec);
6609 __put_user(host_ts->tv_nsec, &target_ts->tv_nsec);
6610 unlock_user_struct(target_ts, target_addr, 1);
6611 return 0;
6614 static inline abi_long target_to_host_itimerspec(struct itimerspec *host_itspec,
6615 abi_ulong target_addr)
6617 struct target_itimerspec *target_itspec;
6619 if (!lock_user_struct(VERIFY_READ, target_itspec, target_addr, 1)) {
6620 return -TARGET_EFAULT;
6623 host_itspec->it_interval.tv_sec =
6624 tswapal(target_itspec->it_interval.tv_sec);
6625 host_itspec->it_interval.tv_nsec =
6626 tswapal(target_itspec->it_interval.tv_nsec);
6627 host_itspec->it_value.tv_sec = tswapal(target_itspec->it_value.tv_sec);
6628 host_itspec->it_value.tv_nsec = tswapal(target_itspec->it_value.tv_nsec);
6630 unlock_user_struct(target_itspec, target_addr, 1);
6631 return 0;
6634 static inline abi_long host_to_target_itimerspec(abi_ulong target_addr,
6635 struct itimerspec *host_its)
6637 struct target_itimerspec *target_itspec;
6639 if (!lock_user_struct(VERIFY_WRITE, target_itspec, target_addr, 0)) {
6640 return -TARGET_EFAULT;
6643 target_itspec->it_interval.tv_sec = tswapal(host_its->it_interval.tv_sec);
6644 target_itspec->it_interval.tv_nsec = tswapal(host_its->it_interval.tv_nsec);
6646 target_itspec->it_value.tv_sec = tswapal(host_its->it_value.tv_sec);
6647 target_itspec->it_value.tv_nsec = tswapal(host_its->it_value.tv_nsec);
6649 unlock_user_struct(target_itspec, target_addr, 0);
6650 return 0;
6653 static inline abi_long target_to_host_sigevent(struct sigevent *host_sevp,
6654 abi_ulong target_addr)
6656 struct target_sigevent *target_sevp;
6658 if (!lock_user_struct(VERIFY_READ, target_sevp, target_addr, 1)) {
6659 return -TARGET_EFAULT;
6662 /* This union is awkward on 64 bit systems because it has a 32 bit
6663 * integer and a pointer in it; we follow the conversion approach
6664 * used for handling sigval types in signal.c so the guest should get
6665 * the correct value back even if we did a 64 bit byteswap and it's
6666 * using the 32 bit integer.
6668 host_sevp->sigev_value.sival_ptr =
6669 (void *)(uintptr_t)tswapal(target_sevp->sigev_value.sival_ptr);
6670 host_sevp->sigev_signo =
6671 target_to_host_signal(tswap32(target_sevp->sigev_signo));
6672 host_sevp->sigev_notify = tswap32(target_sevp->sigev_notify);
6673 host_sevp->_sigev_un._tid = tswap32(target_sevp->_sigev_un._tid);
6675 unlock_user_struct(target_sevp, target_addr, 1);
6676 return 0;
6679 #if defined(TARGET_NR_mlockall)
6680 static inline int target_to_host_mlockall_arg(int arg)
6682 int result = 0;
6684 if (arg & TARGET_MLOCKALL_MCL_CURRENT) {
6685 result |= MCL_CURRENT;
6687 if (arg & TARGET_MLOCKALL_MCL_FUTURE) {
6688 result |= MCL_FUTURE;
6690 return result;
6692 #endif
6694 static inline abi_long host_to_target_stat64(void *cpu_env,
6695 abi_ulong target_addr,
6696 struct stat *host_st)
6698 #if defined(TARGET_ARM) && defined(TARGET_ABI32)
6699 if (((CPUARMState *)cpu_env)->eabi) {
6700 struct target_eabi_stat64 *target_st;
6702 if (!lock_user_struct(VERIFY_WRITE, target_st, target_addr, 0))
6703 return -TARGET_EFAULT;
6704 memset(target_st, 0, sizeof(struct target_eabi_stat64));
6705 __put_user(host_st->st_dev, &target_st->st_dev);
6706 __put_user(host_st->st_ino, &target_st->st_ino);
6707 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
6708 __put_user(host_st->st_ino, &target_st->__st_ino);
6709 #endif
6710 __put_user(host_st->st_mode, &target_st->st_mode);
6711 __put_user(host_st->st_nlink, &target_st->st_nlink);
6712 __put_user(host_st->st_uid, &target_st->st_uid);
6713 __put_user(host_st->st_gid, &target_st->st_gid);
6714 __put_user(host_st->st_rdev, &target_st->st_rdev);
6715 __put_user(host_st->st_size, &target_st->st_size);
6716 __put_user(host_st->st_blksize, &target_st->st_blksize);
6717 __put_user(host_st->st_blocks, &target_st->st_blocks);
6718 __put_user(host_st->st_atime, &target_st->target_st_atime);
6719 __put_user(host_st->st_mtime, &target_st->target_st_mtime);
6720 __put_user(host_st->st_ctime, &target_st->target_st_ctime);
6721 unlock_user_struct(target_st, target_addr, 1);
6722 } else
6723 #endif
6725 #if defined(TARGET_HAS_STRUCT_STAT64)
6726 struct target_stat64 *target_st;
6727 #else
6728 struct target_stat *target_st;
6729 #endif
6731 if (!lock_user_struct(VERIFY_WRITE, target_st, target_addr, 0))
6732 return -TARGET_EFAULT;
6733 memset(target_st, 0, sizeof(*target_st));
6734 __put_user(host_st->st_dev, &target_st->st_dev);
6735 __put_user(host_st->st_ino, &target_st->st_ino);
6736 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
6737 __put_user(host_st->st_ino, &target_st->__st_ino);
6738 #endif
6739 __put_user(host_st->st_mode, &target_st->st_mode);
6740 __put_user(host_st->st_nlink, &target_st->st_nlink);
6741 __put_user(host_st->st_uid, &target_st->st_uid);
6742 __put_user(host_st->st_gid, &target_st->st_gid);
6743 __put_user(host_st->st_rdev, &target_st->st_rdev);
6744 /* XXX: better use of kernel struct */
6745 __put_user(host_st->st_size, &target_st->st_size);
6746 __put_user(host_st->st_blksize, &target_st->st_blksize);
6747 __put_user(host_st->st_blocks, &target_st->st_blocks);
6748 __put_user(host_st->st_atime, &target_st->target_st_atime);
6749 __put_user(host_st->st_mtime, &target_st->target_st_mtime);
6750 __put_user(host_st->st_ctime, &target_st->target_st_ctime);
6751 unlock_user_struct(target_st, target_addr, 1);
6754 return 0;
6757 /* ??? Using host futex calls even when target atomic operations
6758 are not really atomic probably breaks things. However implementing
6759 futexes locally would make futexes shared between multiple processes
6760 tricky. However they're probably useless because guest atomic
6761 operations won't work either. */
6762 static int do_futex(target_ulong uaddr, int op, int val, target_ulong timeout,
6763 target_ulong uaddr2, int val3)
6765 struct timespec ts, *pts;
6766 int base_op;
6768 /* ??? We assume FUTEX_* constants are the same on both host
6769 and target. */
6770 #ifdef FUTEX_CMD_MASK
6771 base_op = op & FUTEX_CMD_MASK;
6772 #else
6773 base_op = op;
6774 #endif
6775 switch (base_op) {
6776 case FUTEX_WAIT:
6777 case FUTEX_WAIT_BITSET:
6778 if (timeout) {
6779 pts = &ts;
6780 target_to_host_timespec(pts, timeout);
6781 } else {
6782 pts = NULL;
6784 return get_errno(safe_futex(g2h(uaddr), op, tswap32(val),
6785 pts, NULL, val3));
6786 case FUTEX_WAKE:
6787 return get_errno(safe_futex(g2h(uaddr), op, val, NULL, NULL, 0));
6788 case FUTEX_FD:
6789 return get_errno(safe_futex(g2h(uaddr), op, val, NULL, NULL, 0));
6790 case FUTEX_REQUEUE:
6791 case FUTEX_CMP_REQUEUE:
6792 case FUTEX_WAKE_OP:
6793 /* For FUTEX_REQUEUE, FUTEX_CMP_REQUEUE, and FUTEX_WAKE_OP, the
6794 TIMEOUT parameter is interpreted as a uint32_t by the kernel.
6795 But the prototype takes a `struct timespec *'; insert casts
6796 to satisfy the compiler. We do not need to tswap TIMEOUT
6797 since it's not compared to guest memory. */
6798 pts = (struct timespec *)(uintptr_t) timeout;
6799 return get_errno(safe_futex(g2h(uaddr), op, val, pts,
6800 g2h(uaddr2),
6801 (base_op == FUTEX_CMP_REQUEUE
6802 ? tswap32(val3)
6803 : val3)));
6804 default:
6805 return -TARGET_ENOSYS;
6808 #if defined(TARGET_NR_name_to_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
6809 static abi_long do_name_to_handle_at(abi_long dirfd, abi_long pathname,
6810 abi_long handle, abi_long mount_id,
6811 abi_long flags)
6813 struct file_handle *target_fh;
6814 struct file_handle *fh;
6815 int mid = 0;
6816 abi_long ret;
6817 char *name;
6818 unsigned int size, total_size;
6820 if (get_user_s32(size, handle)) {
6821 return -TARGET_EFAULT;
6824 name = lock_user_string(pathname);
6825 if (!name) {
6826 return -TARGET_EFAULT;
6829 total_size = sizeof(struct file_handle) + size;
6830 target_fh = lock_user(VERIFY_WRITE, handle, total_size, 0);
6831 if (!target_fh) {
6832 unlock_user(name, pathname, 0);
6833 return -TARGET_EFAULT;
6836 fh = g_malloc0(total_size);
6837 fh->handle_bytes = size;
6839 ret = get_errno(name_to_handle_at(dirfd, path(name), fh, &mid, flags));
6840 unlock_user(name, pathname, 0);
6842 /* man name_to_handle_at(2):
6843 * Other than the use of the handle_bytes field, the caller should treat
6844 * the file_handle structure as an opaque data type
6847 memcpy(target_fh, fh, total_size);
6848 target_fh->handle_bytes = tswap32(fh->handle_bytes);
6849 target_fh->handle_type = tswap32(fh->handle_type);
6850 g_free(fh);
6851 unlock_user(target_fh, handle, total_size);
6853 if (put_user_s32(mid, mount_id)) {
6854 return -TARGET_EFAULT;
6857 return ret;
6860 #endif
6862 #if defined(TARGET_NR_open_by_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
6863 static abi_long do_open_by_handle_at(abi_long mount_fd, abi_long handle,
6864 abi_long flags)
6866 struct file_handle *target_fh;
6867 struct file_handle *fh;
6868 unsigned int size, total_size;
6869 abi_long ret;
6871 if (get_user_s32(size, handle)) {
6872 return -TARGET_EFAULT;
6875 total_size = sizeof(struct file_handle) + size;
6876 target_fh = lock_user(VERIFY_READ, handle, total_size, 1);
6877 if (!target_fh) {
6878 return -TARGET_EFAULT;
6881 fh = g_memdup(target_fh, total_size);
6882 fh->handle_bytes = size;
6883 fh->handle_type = tswap32(target_fh->handle_type);
6885 ret = get_errno(open_by_handle_at(mount_fd, fh,
6886 target_to_host_bitmask(flags, fcntl_flags_tbl)));
6888 g_free(fh);
6890 unlock_user(target_fh, handle, total_size);
6892 return ret;
6894 #endif
6896 #if defined(TARGET_NR_signalfd) || defined(TARGET_NR_signalfd4)
6898 /* signalfd siginfo conversion */
6900 static void
6901 host_to_target_signalfd_siginfo(struct signalfd_siginfo *tinfo,
6902 const struct signalfd_siginfo *info)
6904 int sig = host_to_target_signal(info->ssi_signo);
6906 /* linux/signalfd.h defines a ssi_addr_lsb
6907 * not defined in sys/signalfd.h but used by some kernels
6910 #ifdef BUS_MCEERR_AO
6911 if (tinfo->ssi_signo == SIGBUS &&
6912 (tinfo->ssi_code == BUS_MCEERR_AR ||
6913 tinfo->ssi_code == BUS_MCEERR_AO)) {
6914 uint16_t *ssi_addr_lsb = (uint16_t *)(&info->ssi_addr + 1);
6915 uint16_t *tssi_addr_lsb = (uint16_t *)(&tinfo->ssi_addr + 1);
6916 *tssi_addr_lsb = tswap16(*ssi_addr_lsb);
6918 #endif
6920 tinfo->ssi_signo = tswap32(sig);
6921 tinfo->ssi_errno = tswap32(tinfo->ssi_errno);
6922 tinfo->ssi_code = tswap32(info->ssi_code);
6923 tinfo->ssi_pid = tswap32(info->ssi_pid);
6924 tinfo->ssi_uid = tswap32(info->ssi_uid);
6925 tinfo->ssi_fd = tswap32(info->ssi_fd);
6926 tinfo->ssi_tid = tswap32(info->ssi_tid);
6927 tinfo->ssi_band = tswap32(info->ssi_band);
6928 tinfo->ssi_overrun = tswap32(info->ssi_overrun);
6929 tinfo->ssi_trapno = tswap32(info->ssi_trapno);
6930 tinfo->ssi_status = tswap32(info->ssi_status);
6931 tinfo->ssi_int = tswap32(info->ssi_int);
6932 tinfo->ssi_ptr = tswap64(info->ssi_ptr);
6933 tinfo->ssi_utime = tswap64(info->ssi_utime);
6934 tinfo->ssi_stime = tswap64(info->ssi_stime);
6935 tinfo->ssi_addr = tswap64(info->ssi_addr);
6938 static abi_long host_to_target_data_signalfd(void *buf, size_t len)
6940 int i;
6942 for (i = 0; i < len; i += sizeof(struct signalfd_siginfo)) {
6943 host_to_target_signalfd_siginfo(buf + i, buf + i);
6946 return len;
6949 static TargetFdTrans target_signalfd_trans = {
6950 .host_to_target_data = host_to_target_data_signalfd,
6953 static abi_long do_signalfd4(int fd, abi_long mask, int flags)
6955 int host_flags;
6956 target_sigset_t *target_mask;
6957 sigset_t host_mask;
6958 abi_long ret;
6960 if (flags & ~(TARGET_O_NONBLOCK | TARGET_O_CLOEXEC)) {
6961 return -TARGET_EINVAL;
6963 if (!lock_user_struct(VERIFY_READ, target_mask, mask, 1)) {
6964 return -TARGET_EFAULT;
6967 target_to_host_sigset(&host_mask, target_mask);
6969 host_flags = target_to_host_bitmask(flags, fcntl_flags_tbl);
6971 ret = get_errno(signalfd(fd, &host_mask, host_flags));
6972 if (ret >= 0) {
6973 fd_trans_register(ret, &target_signalfd_trans);
6976 unlock_user_struct(target_mask, mask, 0);
6978 return ret;
6980 #endif
6982 /* Map host to target signal numbers for the wait family of syscalls.
6983 Assume all other status bits are the same. */
6984 int host_to_target_waitstatus(int status)
6986 if (WIFSIGNALED(status)) {
6987 return host_to_target_signal(WTERMSIG(status)) | (status & ~0x7f);
6989 if (WIFSTOPPED(status)) {
6990 return (host_to_target_signal(WSTOPSIG(status)) << 8)
6991 | (status & 0xff);
6993 return status;
6996 static int open_self_cmdline(void *cpu_env, int fd)
6998 int fd_orig = -1;
6999 bool word_skipped = false;
7001 fd_orig = open("/proc/self/cmdline", O_RDONLY);
7002 if (fd_orig < 0) {
7003 return fd_orig;
7006 while (true) {
7007 ssize_t nb_read;
7008 char buf[128];
7009 char *cp_buf = buf;
7011 nb_read = read(fd_orig, buf, sizeof(buf));
7012 if (nb_read < 0) {
7013 int e = errno;
7014 fd_orig = close(fd_orig);
7015 errno = e;
7016 return -1;
7017 } else if (nb_read == 0) {
7018 break;
7021 if (!word_skipped) {
7022 /* Skip the first string, which is the path to qemu-*-static
7023 instead of the actual command. */
7024 cp_buf = memchr(buf, 0, nb_read);
7025 if (cp_buf) {
7026 /* Null byte found, skip one string */
7027 cp_buf++;
7028 nb_read -= cp_buf - buf;
7029 word_skipped = true;
7033 if (word_skipped) {
7034 if (write(fd, cp_buf, nb_read) != nb_read) {
7035 int e = errno;
7036 close(fd_orig);
7037 errno = e;
7038 return -1;
7043 return close(fd_orig);
7046 static int open_self_maps(void *cpu_env, int fd)
7048 CPUState *cpu = ENV_GET_CPU((CPUArchState *)cpu_env);
7049 TaskState *ts = cpu->opaque;
7050 FILE *fp;
7051 char *line = NULL;
7052 size_t len = 0;
7053 ssize_t read;
7055 fp = fopen("/proc/self/maps", "r");
7056 if (fp == NULL) {
7057 return -1;
7060 while ((read = getline(&line, &len, fp)) != -1) {
7061 int fields, dev_maj, dev_min, inode;
7062 uint64_t min, max, offset;
7063 char flag_r, flag_w, flag_x, flag_p;
7064 char path[512] = "";
7065 fields = sscanf(line, "%"PRIx64"-%"PRIx64" %c%c%c%c %"PRIx64" %x:%x %d"
7066 " %512s", &min, &max, &flag_r, &flag_w, &flag_x,
7067 &flag_p, &offset, &dev_maj, &dev_min, &inode, path);
7069 if ((fields < 10) || (fields > 11)) {
7070 continue;
7072 if (h2g_valid(min)) {
7073 int flags = page_get_flags(h2g(min));
7074 max = h2g_valid(max - 1) ? max : (uintptr_t)g2h(GUEST_ADDR_MAX);
7075 if (page_check_range(h2g(min), max - min, flags) == -1) {
7076 continue;
7078 if (h2g(min) == ts->info->stack_limit) {
7079 pstrcpy(path, sizeof(path), " [stack]");
7081 dprintf(fd, TARGET_ABI_FMT_lx "-" TARGET_ABI_FMT_lx
7082 " %c%c%c%c %08" PRIx64 " %02x:%02x %d %s%s\n",
7083 h2g(min), h2g(max - 1) + 1, flag_r, flag_w,
7084 flag_x, flag_p, offset, dev_maj, dev_min, inode,
7085 path[0] ? " " : "", path);
7089 free(line);
7090 fclose(fp);
7092 return 0;
7095 static int open_self_stat(void *cpu_env, int fd)
7097 CPUState *cpu = ENV_GET_CPU((CPUArchState *)cpu_env);
7098 TaskState *ts = cpu->opaque;
7099 abi_ulong start_stack = ts->info->start_stack;
7100 int i;
7102 for (i = 0; i < 44; i++) {
7103 char buf[128];
7104 int len;
7105 uint64_t val = 0;
7107 if (i == 0) {
7108 /* pid */
7109 val = getpid();
7110 snprintf(buf, sizeof(buf), "%"PRId64 " ", val);
7111 } else if (i == 1) {
7112 /* app name */
7113 snprintf(buf, sizeof(buf), "(%s) ", ts->bprm->argv[0]);
7114 } else if (i == 27) {
7115 /* stack bottom */
7116 val = start_stack;
7117 snprintf(buf, sizeof(buf), "%"PRId64 " ", val);
7118 } else {
7119 /* for the rest, there is MasterCard */
7120 snprintf(buf, sizeof(buf), "0%c", i == 43 ? '\n' : ' ');
7123 len = strlen(buf);
7124 if (write(fd, buf, len) != len) {
7125 return -1;
7129 return 0;
7132 static int open_self_auxv(void *cpu_env, int fd)
7134 CPUState *cpu = ENV_GET_CPU((CPUArchState *)cpu_env);
7135 TaskState *ts = cpu->opaque;
7136 abi_ulong auxv = ts->info->saved_auxv;
7137 abi_ulong len = ts->info->auxv_len;
7138 char *ptr;
7141 * Auxiliary vector is stored in target process stack.
7142 * read in whole auxv vector and copy it to file
7144 ptr = lock_user(VERIFY_READ, auxv, len, 0);
7145 if (ptr != NULL) {
7146 while (len > 0) {
7147 ssize_t r;
7148 r = write(fd, ptr, len);
7149 if (r <= 0) {
7150 break;
7152 len -= r;
7153 ptr += r;
7155 lseek(fd, 0, SEEK_SET);
7156 unlock_user(ptr, auxv, len);
7159 return 0;
7162 static int is_proc_myself(const char *filename, const char *entry)
7164 if (!strncmp(filename, "/proc/", strlen("/proc/"))) {
7165 filename += strlen("/proc/");
7166 if (!strncmp(filename, "self/", strlen("self/"))) {
7167 filename += strlen("self/");
7168 } else if (*filename >= '1' && *filename <= '9') {
7169 char myself[80];
7170 snprintf(myself, sizeof(myself), "%d/", getpid());
7171 if (!strncmp(filename, myself, strlen(myself))) {
7172 filename += strlen(myself);
7173 } else {
7174 return 0;
7176 } else {
7177 return 0;
7179 if (!strcmp(filename, entry)) {
7180 return 1;
7183 return 0;
7186 #if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN)
7187 static int is_proc(const char *filename, const char *entry)
7189 return strcmp(filename, entry) == 0;
7192 static int open_net_route(void *cpu_env, int fd)
7194 FILE *fp;
7195 char *line = NULL;
7196 size_t len = 0;
7197 ssize_t read;
7199 fp = fopen("/proc/net/route", "r");
7200 if (fp == NULL) {
7201 return -1;
7204 /* read header */
7206 read = getline(&line, &len, fp);
7207 dprintf(fd, "%s", line);
7209 /* read routes */
7211 while ((read = getline(&line, &len, fp)) != -1) {
7212 char iface[16];
7213 uint32_t dest, gw, mask;
7214 unsigned int flags, refcnt, use, metric, mtu, window, irtt;
7215 sscanf(line, "%s\t%08x\t%08x\t%04x\t%d\t%d\t%d\t%08x\t%d\t%u\t%u\n",
7216 iface, &dest, &gw, &flags, &refcnt, &use, &metric,
7217 &mask, &mtu, &window, &irtt);
7218 dprintf(fd, "%s\t%08x\t%08x\t%04x\t%d\t%d\t%d\t%08x\t%d\t%u\t%u\n",
7219 iface, tswap32(dest), tswap32(gw), flags, refcnt, use,
7220 metric, tswap32(mask), mtu, window, irtt);
7223 free(line);
7224 fclose(fp);
7226 return 0;
7228 #endif
7230 static int do_openat(void *cpu_env, int dirfd, const char *pathname, int flags, mode_t mode)
7232 struct fake_open {
7233 const char *filename;
7234 int (*fill)(void *cpu_env, int fd);
7235 int (*cmp)(const char *s1, const char *s2);
7237 const struct fake_open *fake_open;
7238 static const struct fake_open fakes[] = {
7239 { "maps", open_self_maps, is_proc_myself },
7240 { "stat", open_self_stat, is_proc_myself },
7241 { "auxv", open_self_auxv, is_proc_myself },
7242 { "cmdline", open_self_cmdline, is_proc_myself },
7243 #if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN)
7244 { "/proc/net/route", open_net_route, is_proc },
7245 #endif
7246 { NULL, NULL, NULL }
7249 if (is_proc_myself(pathname, "exe")) {
7250 int execfd = qemu_getauxval(AT_EXECFD);
7251 return execfd ? execfd : safe_openat(dirfd, exec_path, flags, mode);
7254 for (fake_open = fakes; fake_open->filename; fake_open++) {
7255 if (fake_open->cmp(pathname, fake_open->filename)) {
7256 break;
7260 if (fake_open->filename) {
7261 const char *tmpdir;
7262 char filename[PATH_MAX];
7263 int fd, r;
7265 /* create temporary file to map stat to */
7266 tmpdir = getenv("TMPDIR");
7267 if (!tmpdir)
7268 tmpdir = "/tmp";
7269 snprintf(filename, sizeof(filename), "%s/qemu-open.XXXXXX", tmpdir);
7270 fd = mkstemp(filename);
7271 if (fd < 0) {
7272 return fd;
7274 unlink(filename);
7276 if ((r = fake_open->fill(cpu_env, fd))) {
7277 int e = errno;
7278 close(fd);
7279 errno = e;
7280 return r;
7282 lseek(fd, 0, SEEK_SET);
7284 return fd;
7287 return safe_openat(dirfd, path(pathname), flags, mode);
7290 #define TIMER_MAGIC 0x0caf0000
7291 #define TIMER_MAGIC_MASK 0xffff0000
7293 /* Convert QEMU provided timer ID back to internal 16bit index format */
7294 static target_timer_t get_timer_id(abi_long arg)
7296 target_timer_t timerid = arg;
7298 if ((timerid & TIMER_MAGIC_MASK) != TIMER_MAGIC) {
7299 return -TARGET_EINVAL;
7302 timerid &= 0xffff;
7304 if (timerid >= ARRAY_SIZE(g_posix_timers)) {
7305 return -TARGET_EINVAL;
7308 return timerid;
7311 /* do_syscall() should always have a single exit point at the end so
7312 that actions, such as logging of syscall results, can be performed.
7313 All errnos that do_syscall() returns must be -TARGET_<errcode>. */
7314 abi_long do_syscall(void *cpu_env, int num, abi_long arg1,
7315 abi_long arg2, abi_long arg3, abi_long arg4,
7316 abi_long arg5, abi_long arg6, abi_long arg7,
7317 abi_long arg8)
7319 CPUState *cpu = ENV_GET_CPU(cpu_env);
7320 abi_long ret;
7321 struct stat st;
7322 struct statfs stfs;
7323 void *p;
7325 #if defined(DEBUG_ERESTARTSYS)
7326 /* Debug-only code for exercising the syscall-restart code paths
7327 * in the per-architecture cpu main loops: restart every syscall
7328 * the guest makes once before letting it through.
7331 static int flag;
7333 flag = !flag;
7334 if (flag) {
7335 return -TARGET_ERESTARTSYS;
7338 #endif
7340 #ifdef DEBUG
7341 gemu_log("syscall %d", num);
7342 #endif
7343 trace_guest_user_syscall(cpu, num, arg1, arg2, arg3, arg4, arg5, arg6, arg7, arg8);
7344 if(do_strace)
7345 print_syscall(num, arg1, arg2, arg3, arg4, arg5, arg6);
7347 switch(num) {
7348 case TARGET_NR_exit:
7349 /* In old applications this may be used to implement _exit(2).
7350 However in threaded applictions it is used for thread termination,
7351 and _exit_group is used for application termination.
7352 Do thread termination if we have more then one thread. */
7354 if (block_signals()) {
7355 ret = -TARGET_ERESTARTSYS;
7356 break;
7359 if (CPU_NEXT(first_cpu)) {
7360 TaskState *ts;
7362 cpu_list_lock();
7363 /* Remove the CPU from the list. */
7364 QTAILQ_REMOVE(&cpus, cpu, node);
7365 cpu_list_unlock();
7366 ts = cpu->opaque;
7367 if (ts->child_tidptr) {
7368 put_user_u32(0, ts->child_tidptr);
7369 sys_futex(g2h(ts->child_tidptr), FUTEX_WAKE, INT_MAX,
7370 NULL, NULL, 0);
7372 thread_cpu = NULL;
7373 object_unref(OBJECT(cpu));
7374 g_free(ts);
7375 rcu_unregister_thread();
7376 pthread_exit(NULL);
7378 #ifdef TARGET_GPROF
7379 _mcleanup();
7380 #endif
7381 gdb_exit(cpu_env, arg1);
7382 _exit(arg1);
7383 ret = 0; /* avoid warning */
7384 break;
7385 case TARGET_NR_read:
7386 if (arg3 == 0)
7387 ret = 0;
7388 else {
7389 if (!(p = lock_user(VERIFY_WRITE, arg2, arg3, 0)))
7390 goto efault;
7391 ret = get_errno(safe_read(arg1, p, arg3));
7392 if (ret >= 0 &&
7393 fd_trans_host_to_target_data(arg1)) {
7394 ret = fd_trans_host_to_target_data(arg1)(p, ret);
7396 unlock_user(p, arg2, ret);
7398 break;
7399 case TARGET_NR_write:
7400 if (!(p = lock_user(VERIFY_READ, arg2, arg3, 1)))
7401 goto efault;
7402 ret = get_errno(safe_write(arg1, p, arg3));
7403 unlock_user(p, arg2, 0);
7404 break;
7405 #ifdef TARGET_NR_open
7406 case TARGET_NR_open:
7407 if (!(p = lock_user_string(arg1)))
7408 goto efault;
7409 ret = get_errno(do_openat(cpu_env, AT_FDCWD, p,
7410 target_to_host_bitmask(arg2, fcntl_flags_tbl),
7411 arg3));
7412 fd_trans_unregister(ret);
7413 unlock_user(p, arg1, 0);
7414 break;
7415 #endif
7416 case TARGET_NR_openat:
7417 if (!(p = lock_user_string(arg2)))
7418 goto efault;
7419 ret = get_errno(do_openat(cpu_env, arg1, p,
7420 target_to_host_bitmask(arg3, fcntl_flags_tbl),
7421 arg4));
7422 fd_trans_unregister(ret);
7423 unlock_user(p, arg2, 0);
7424 break;
7425 #if defined(TARGET_NR_name_to_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
7426 case TARGET_NR_name_to_handle_at:
7427 ret = do_name_to_handle_at(arg1, arg2, arg3, arg4, arg5);
7428 break;
7429 #endif
7430 #if defined(TARGET_NR_open_by_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
7431 case TARGET_NR_open_by_handle_at:
7432 ret = do_open_by_handle_at(arg1, arg2, arg3);
7433 fd_trans_unregister(ret);
7434 break;
7435 #endif
7436 case TARGET_NR_close:
7437 fd_trans_unregister(arg1);
7438 ret = get_errno(close(arg1));
7439 break;
7440 case TARGET_NR_brk:
7441 ret = do_brk(arg1);
7442 break;
7443 #ifdef TARGET_NR_fork
7444 case TARGET_NR_fork:
7445 ret = get_errno(do_fork(cpu_env, SIGCHLD, 0, 0, 0, 0));
7446 break;
7447 #endif
7448 #ifdef TARGET_NR_waitpid
7449 case TARGET_NR_waitpid:
7451 int status;
7452 ret = get_errno(safe_wait4(arg1, &status, arg3, 0));
7453 if (!is_error(ret) && arg2 && ret
7454 && put_user_s32(host_to_target_waitstatus(status), arg2))
7455 goto efault;
7457 break;
7458 #endif
7459 #ifdef TARGET_NR_waitid
7460 case TARGET_NR_waitid:
7462 siginfo_t info;
7463 info.si_pid = 0;
7464 ret = get_errno(safe_waitid(arg1, arg2, &info, arg4, NULL));
7465 if (!is_error(ret) && arg3 && info.si_pid != 0) {
7466 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_siginfo_t), 0)))
7467 goto efault;
7468 host_to_target_siginfo(p, &info);
7469 unlock_user(p, arg3, sizeof(target_siginfo_t));
7472 break;
7473 #endif
7474 #ifdef TARGET_NR_creat /* not on alpha */
7475 case TARGET_NR_creat:
7476 if (!(p = lock_user_string(arg1)))
7477 goto efault;
7478 ret = get_errno(creat(p, arg2));
7479 fd_trans_unregister(ret);
7480 unlock_user(p, arg1, 0);
7481 break;
7482 #endif
7483 #ifdef TARGET_NR_link
7484 case TARGET_NR_link:
7486 void * p2;
7487 p = lock_user_string(arg1);
7488 p2 = lock_user_string(arg2);
7489 if (!p || !p2)
7490 ret = -TARGET_EFAULT;
7491 else
7492 ret = get_errno(link(p, p2));
7493 unlock_user(p2, arg2, 0);
7494 unlock_user(p, arg1, 0);
7496 break;
7497 #endif
7498 #if defined(TARGET_NR_linkat)
7499 case TARGET_NR_linkat:
7501 void * p2 = NULL;
7502 if (!arg2 || !arg4)
7503 goto efault;
7504 p = lock_user_string(arg2);
7505 p2 = lock_user_string(arg4);
7506 if (!p || !p2)
7507 ret = -TARGET_EFAULT;
7508 else
7509 ret = get_errno(linkat(arg1, p, arg3, p2, arg5));
7510 unlock_user(p, arg2, 0);
7511 unlock_user(p2, arg4, 0);
7513 break;
7514 #endif
7515 #ifdef TARGET_NR_unlink
7516 case TARGET_NR_unlink:
7517 if (!(p = lock_user_string(arg1)))
7518 goto efault;
7519 ret = get_errno(unlink(p));
7520 unlock_user(p, arg1, 0);
7521 break;
7522 #endif
7523 #if defined(TARGET_NR_unlinkat)
7524 case TARGET_NR_unlinkat:
7525 if (!(p = lock_user_string(arg2)))
7526 goto efault;
7527 ret = get_errno(unlinkat(arg1, p, arg3));
7528 unlock_user(p, arg2, 0);
7529 break;
7530 #endif
7531 case TARGET_NR_execve:
7533 char **argp, **envp;
7534 int argc, envc;
7535 abi_ulong gp;
7536 abi_ulong guest_argp;
7537 abi_ulong guest_envp;
7538 abi_ulong addr;
7539 char **q;
7540 int total_size = 0;
7542 argc = 0;
7543 guest_argp = arg2;
7544 for (gp = guest_argp; gp; gp += sizeof(abi_ulong)) {
7545 if (get_user_ual(addr, gp))
7546 goto efault;
7547 if (!addr)
7548 break;
7549 argc++;
7551 envc = 0;
7552 guest_envp = arg3;
7553 for (gp = guest_envp; gp; gp += sizeof(abi_ulong)) {
7554 if (get_user_ual(addr, gp))
7555 goto efault;
7556 if (!addr)
7557 break;
7558 envc++;
7561 argp = alloca((argc + 1) * sizeof(void *));
7562 envp = alloca((envc + 1) * sizeof(void *));
7564 for (gp = guest_argp, q = argp; gp;
7565 gp += sizeof(abi_ulong), q++) {
7566 if (get_user_ual(addr, gp))
7567 goto execve_efault;
7568 if (!addr)
7569 break;
7570 if (!(*q = lock_user_string(addr)))
7571 goto execve_efault;
7572 total_size += strlen(*q) + 1;
7574 *q = NULL;
7576 for (gp = guest_envp, q = envp; gp;
7577 gp += sizeof(abi_ulong), q++) {
7578 if (get_user_ual(addr, gp))
7579 goto execve_efault;
7580 if (!addr)
7581 break;
7582 if (!(*q = lock_user_string(addr)))
7583 goto execve_efault;
7584 total_size += strlen(*q) + 1;
7586 *q = NULL;
7588 if (!(p = lock_user_string(arg1)))
7589 goto execve_efault;
7590 /* Although execve() is not an interruptible syscall it is
7591 * a special case where we must use the safe_syscall wrapper:
7592 * if we allow a signal to happen before we make the host
7593 * syscall then we will 'lose' it, because at the point of
7594 * execve the process leaves QEMU's control. So we use the
7595 * safe syscall wrapper to ensure that we either take the
7596 * signal as a guest signal, or else it does not happen
7597 * before the execve completes and makes it the other
7598 * program's problem.
7600 ret = get_errno(safe_execve(p, argp, envp));
7601 unlock_user(p, arg1, 0);
7603 goto execve_end;
7605 execve_efault:
7606 ret = -TARGET_EFAULT;
7608 execve_end:
7609 for (gp = guest_argp, q = argp; *q;
7610 gp += sizeof(abi_ulong), q++) {
7611 if (get_user_ual(addr, gp)
7612 || !addr)
7613 break;
7614 unlock_user(*q, addr, 0);
7616 for (gp = guest_envp, q = envp; *q;
7617 gp += sizeof(abi_ulong), q++) {
7618 if (get_user_ual(addr, gp)
7619 || !addr)
7620 break;
7621 unlock_user(*q, addr, 0);
7624 break;
7625 case TARGET_NR_chdir:
7626 if (!(p = lock_user_string(arg1)))
7627 goto efault;
7628 ret = get_errno(chdir(p));
7629 unlock_user(p, arg1, 0);
7630 break;
7631 #ifdef TARGET_NR_time
7632 case TARGET_NR_time:
7634 time_t host_time;
7635 ret = get_errno(time(&host_time));
7636 if (!is_error(ret)
7637 && arg1
7638 && put_user_sal(host_time, arg1))
7639 goto efault;
7641 break;
7642 #endif
7643 #ifdef TARGET_NR_mknod
7644 case TARGET_NR_mknod:
7645 if (!(p = lock_user_string(arg1)))
7646 goto efault;
7647 ret = get_errno(mknod(p, arg2, arg3));
7648 unlock_user(p, arg1, 0);
7649 break;
7650 #endif
7651 #if defined(TARGET_NR_mknodat)
7652 case TARGET_NR_mknodat:
7653 if (!(p = lock_user_string(arg2)))
7654 goto efault;
7655 ret = get_errno(mknodat(arg1, p, arg3, arg4));
7656 unlock_user(p, arg2, 0);
7657 break;
7658 #endif
7659 #ifdef TARGET_NR_chmod
7660 case TARGET_NR_chmod:
7661 if (!(p = lock_user_string(arg1)))
7662 goto efault;
7663 ret = get_errno(chmod(p, arg2));
7664 unlock_user(p, arg1, 0);
7665 break;
7666 #endif
7667 #ifdef TARGET_NR_break
7668 case TARGET_NR_break:
7669 goto unimplemented;
7670 #endif
7671 #ifdef TARGET_NR_oldstat
7672 case TARGET_NR_oldstat:
7673 goto unimplemented;
7674 #endif
7675 case TARGET_NR_lseek:
7676 ret = get_errno(lseek(arg1, arg2, arg3));
7677 break;
7678 #if defined(TARGET_NR_getxpid) && defined(TARGET_ALPHA)
7679 /* Alpha specific */
7680 case TARGET_NR_getxpid:
7681 ((CPUAlphaState *)cpu_env)->ir[IR_A4] = getppid();
7682 ret = get_errno(getpid());
7683 break;
7684 #endif
7685 #ifdef TARGET_NR_getpid
7686 case TARGET_NR_getpid:
7687 ret = get_errno(getpid());
7688 break;
7689 #endif
7690 case TARGET_NR_mount:
7692 /* need to look at the data field */
7693 void *p2, *p3;
7695 if (arg1) {
7696 p = lock_user_string(arg1);
7697 if (!p) {
7698 goto efault;
7700 } else {
7701 p = NULL;
7704 p2 = lock_user_string(arg2);
7705 if (!p2) {
7706 if (arg1) {
7707 unlock_user(p, arg1, 0);
7709 goto efault;
7712 if (arg3) {
7713 p3 = lock_user_string(arg3);
7714 if (!p3) {
7715 if (arg1) {
7716 unlock_user(p, arg1, 0);
7718 unlock_user(p2, arg2, 0);
7719 goto efault;
7721 } else {
7722 p3 = NULL;
7725 /* FIXME - arg5 should be locked, but it isn't clear how to
7726 * do that since it's not guaranteed to be a NULL-terminated
7727 * string.
7729 if (!arg5) {
7730 ret = mount(p, p2, p3, (unsigned long)arg4, NULL);
7731 } else {
7732 ret = mount(p, p2, p3, (unsigned long)arg4, g2h(arg5));
7734 ret = get_errno(ret);
7736 if (arg1) {
7737 unlock_user(p, arg1, 0);
7739 unlock_user(p2, arg2, 0);
7740 if (arg3) {
7741 unlock_user(p3, arg3, 0);
7744 break;
7745 #ifdef TARGET_NR_umount
7746 case TARGET_NR_umount:
7747 if (!(p = lock_user_string(arg1)))
7748 goto efault;
7749 ret = get_errno(umount(p));
7750 unlock_user(p, arg1, 0);
7751 break;
7752 #endif
7753 #ifdef TARGET_NR_stime /* not on alpha */
7754 case TARGET_NR_stime:
7756 time_t host_time;
7757 if (get_user_sal(host_time, arg1))
7758 goto efault;
7759 ret = get_errno(stime(&host_time));
7761 break;
7762 #endif
7763 case TARGET_NR_ptrace:
7764 goto unimplemented;
7765 #ifdef TARGET_NR_alarm /* not on alpha */
7766 case TARGET_NR_alarm:
7767 ret = alarm(arg1);
7768 break;
7769 #endif
7770 #ifdef TARGET_NR_oldfstat
7771 case TARGET_NR_oldfstat:
7772 goto unimplemented;
7773 #endif
7774 #ifdef TARGET_NR_pause /* not on alpha */
7775 case TARGET_NR_pause:
7776 if (!block_signals()) {
7777 sigsuspend(&((TaskState *)cpu->opaque)->signal_mask);
7779 ret = -TARGET_EINTR;
7780 break;
7781 #endif
7782 #ifdef TARGET_NR_utime
7783 case TARGET_NR_utime:
7785 struct utimbuf tbuf, *host_tbuf;
7786 struct target_utimbuf *target_tbuf;
7787 if (arg2) {
7788 if (!lock_user_struct(VERIFY_READ, target_tbuf, arg2, 1))
7789 goto efault;
7790 tbuf.actime = tswapal(target_tbuf->actime);
7791 tbuf.modtime = tswapal(target_tbuf->modtime);
7792 unlock_user_struct(target_tbuf, arg2, 0);
7793 host_tbuf = &tbuf;
7794 } else {
7795 host_tbuf = NULL;
7797 if (!(p = lock_user_string(arg1)))
7798 goto efault;
7799 ret = get_errno(utime(p, host_tbuf));
7800 unlock_user(p, arg1, 0);
7802 break;
7803 #endif
7804 #ifdef TARGET_NR_utimes
7805 case TARGET_NR_utimes:
7807 struct timeval *tvp, tv[2];
7808 if (arg2) {
7809 if (copy_from_user_timeval(&tv[0], arg2)
7810 || copy_from_user_timeval(&tv[1],
7811 arg2 + sizeof(struct target_timeval)))
7812 goto efault;
7813 tvp = tv;
7814 } else {
7815 tvp = NULL;
7817 if (!(p = lock_user_string(arg1)))
7818 goto efault;
7819 ret = get_errno(utimes(p, tvp));
7820 unlock_user(p, arg1, 0);
7822 break;
7823 #endif
7824 #if defined(TARGET_NR_futimesat)
7825 case TARGET_NR_futimesat:
7827 struct timeval *tvp, tv[2];
7828 if (arg3) {
7829 if (copy_from_user_timeval(&tv[0], arg3)
7830 || copy_from_user_timeval(&tv[1],
7831 arg3 + sizeof(struct target_timeval)))
7832 goto efault;
7833 tvp = tv;
7834 } else {
7835 tvp = NULL;
7837 if (!(p = lock_user_string(arg2)))
7838 goto efault;
7839 ret = get_errno(futimesat(arg1, path(p), tvp));
7840 unlock_user(p, arg2, 0);
7842 break;
7843 #endif
7844 #ifdef TARGET_NR_stty
7845 case TARGET_NR_stty:
7846 goto unimplemented;
7847 #endif
7848 #ifdef TARGET_NR_gtty
7849 case TARGET_NR_gtty:
7850 goto unimplemented;
7851 #endif
7852 #ifdef TARGET_NR_access
7853 case TARGET_NR_access:
7854 if (!(p = lock_user_string(arg1)))
7855 goto efault;
7856 ret = get_errno(access(path(p), arg2));
7857 unlock_user(p, arg1, 0);
7858 break;
7859 #endif
7860 #if defined(TARGET_NR_faccessat) && defined(__NR_faccessat)
7861 case TARGET_NR_faccessat:
7862 if (!(p = lock_user_string(arg2)))
7863 goto efault;
7864 ret = get_errno(faccessat(arg1, p, arg3, 0));
7865 unlock_user(p, arg2, 0);
7866 break;
7867 #endif
7868 #ifdef TARGET_NR_nice /* not on alpha */
7869 case TARGET_NR_nice:
7870 ret = get_errno(nice(arg1));
7871 break;
7872 #endif
7873 #ifdef TARGET_NR_ftime
7874 case TARGET_NR_ftime:
7875 goto unimplemented;
7876 #endif
7877 case TARGET_NR_sync:
7878 sync();
7879 ret = 0;
7880 break;
7881 case TARGET_NR_kill:
7882 ret = get_errno(safe_kill(arg1, target_to_host_signal(arg2)));
7883 break;
7884 #ifdef TARGET_NR_rename
7885 case TARGET_NR_rename:
7887 void *p2;
7888 p = lock_user_string(arg1);
7889 p2 = lock_user_string(arg2);
7890 if (!p || !p2)
7891 ret = -TARGET_EFAULT;
7892 else
7893 ret = get_errno(rename(p, p2));
7894 unlock_user(p2, arg2, 0);
7895 unlock_user(p, arg1, 0);
7897 break;
7898 #endif
7899 #if defined(TARGET_NR_renameat)
7900 case TARGET_NR_renameat:
7902 void *p2;
7903 p = lock_user_string(arg2);
7904 p2 = lock_user_string(arg4);
7905 if (!p || !p2)
7906 ret = -TARGET_EFAULT;
7907 else
7908 ret = get_errno(renameat(arg1, p, arg3, p2));
7909 unlock_user(p2, arg4, 0);
7910 unlock_user(p, arg2, 0);
7912 break;
7913 #endif
7914 #ifdef TARGET_NR_mkdir
7915 case TARGET_NR_mkdir:
7916 if (!(p = lock_user_string(arg1)))
7917 goto efault;
7918 ret = get_errno(mkdir(p, arg2));
7919 unlock_user(p, arg1, 0);
7920 break;
7921 #endif
7922 #if defined(TARGET_NR_mkdirat)
7923 case TARGET_NR_mkdirat:
7924 if (!(p = lock_user_string(arg2)))
7925 goto efault;
7926 ret = get_errno(mkdirat(arg1, p, arg3));
7927 unlock_user(p, arg2, 0);
7928 break;
7929 #endif
7930 #ifdef TARGET_NR_rmdir
7931 case TARGET_NR_rmdir:
7932 if (!(p = lock_user_string(arg1)))
7933 goto efault;
7934 ret = get_errno(rmdir(p));
7935 unlock_user(p, arg1, 0);
7936 break;
7937 #endif
7938 case TARGET_NR_dup:
7939 ret = get_errno(dup(arg1));
7940 if (ret >= 0) {
7941 fd_trans_dup(arg1, ret);
7943 break;
7944 #ifdef TARGET_NR_pipe
7945 case TARGET_NR_pipe:
7946 ret = do_pipe(cpu_env, arg1, 0, 0);
7947 break;
7948 #endif
7949 #ifdef TARGET_NR_pipe2
7950 case TARGET_NR_pipe2:
7951 ret = do_pipe(cpu_env, arg1,
7952 target_to_host_bitmask(arg2, fcntl_flags_tbl), 1);
7953 break;
7954 #endif
7955 case TARGET_NR_times:
7957 struct target_tms *tmsp;
7958 struct tms tms;
7959 ret = get_errno(times(&tms));
7960 if (arg1) {
7961 tmsp = lock_user(VERIFY_WRITE, arg1, sizeof(struct target_tms), 0);
7962 if (!tmsp)
7963 goto efault;
7964 tmsp->tms_utime = tswapal(host_to_target_clock_t(tms.tms_utime));
7965 tmsp->tms_stime = tswapal(host_to_target_clock_t(tms.tms_stime));
7966 tmsp->tms_cutime = tswapal(host_to_target_clock_t(tms.tms_cutime));
7967 tmsp->tms_cstime = tswapal(host_to_target_clock_t(tms.tms_cstime));
7969 if (!is_error(ret))
7970 ret = host_to_target_clock_t(ret);
7972 break;
7973 #ifdef TARGET_NR_prof
7974 case TARGET_NR_prof:
7975 goto unimplemented;
7976 #endif
7977 #ifdef TARGET_NR_signal
7978 case TARGET_NR_signal:
7979 goto unimplemented;
7980 #endif
7981 case TARGET_NR_acct:
7982 if (arg1 == 0) {
7983 ret = get_errno(acct(NULL));
7984 } else {
7985 if (!(p = lock_user_string(arg1)))
7986 goto efault;
7987 ret = get_errno(acct(path(p)));
7988 unlock_user(p, arg1, 0);
7990 break;
7991 #ifdef TARGET_NR_umount2
7992 case TARGET_NR_umount2:
7993 if (!(p = lock_user_string(arg1)))
7994 goto efault;
7995 ret = get_errno(umount2(p, arg2));
7996 unlock_user(p, arg1, 0);
7997 break;
7998 #endif
7999 #ifdef TARGET_NR_lock
8000 case TARGET_NR_lock:
8001 goto unimplemented;
8002 #endif
8003 case TARGET_NR_ioctl:
8004 ret = do_ioctl(arg1, arg2, arg3);
8005 break;
8006 case TARGET_NR_fcntl:
8007 ret = do_fcntl(arg1, arg2, arg3);
8008 break;
8009 #ifdef TARGET_NR_mpx
8010 case TARGET_NR_mpx:
8011 goto unimplemented;
8012 #endif
8013 case TARGET_NR_setpgid:
8014 ret = get_errno(setpgid(arg1, arg2));
8015 break;
8016 #ifdef TARGET_NR_ulimit
8017 case TARGET_NR_ulimit:
8018 goto unimplemented;
8019 #endif
8020 #ifdef TARGET_NR_oldolduname
8021 case TARGET_NR_oldolduname:
8022 goto unimplemented;
8023 #endif
8024 case TARGET_NR_umask:
8025 ret = get_errno(umask(arg1));
8026 break;
8027 case TARGET_NR_chroot:
8028 if (!(p = lock_user_string(arg1)))
8029 goto efault;
8030 ret = get_errno(chroot(p));
8031 unlock_user(p, arg1, 0);
8032 break;
8033 #ifdef TARGET_NR_ustat
8034 case TARGET_NR_ustat:
8035 goto unimplemented;
8036 #endif
8037 #ifdef TARGET_NR_dup2
8038 case TARGET_NR_dup2:
8039 ret = get_errno(dup2(arg1, arg2));
8040 if (ret >= 0) {
8041 fd_trans_dup(arg1, arg2);
8043 break;
8044 #endif
8045 #if defined(CONFIG_DUP3) && defined(TARGET_NR_dup3)
8046 case TARGET_NR_dup3:
8047 ret = get_errno(dup3(arg1, arg2, arg3));
8048 if (ret >= 0) {
8049 fd_trans_dup(arg1, arg2);
8051 break;
8052 #endif
8053 #ifdef TARGET_NR_getppid /* not on alpha */
8054 case TARGET_NR_getppid:
8055 ret = get_errno(getppid());
8056 break;
8057 #endif
8058 #ifdef TARGET_NR_getpgrp
8059 case TARGET_NR_getpgrp:
8060 ret = get_errno(getpgrp());
8061 break;
8062 #endif
8063 case TARGET_NR_setsid:
8064 ret = get_errno(setsid());
8065 break;
8066 #ifdef TARGET_NR_sigaction
8067 case TARGET_NR_sigaction:
8069 #if defined(TARGET_ALPHA)
8070 struct target_sigaction act, oact, *pact = 0;
8071 struct target_old_sigaction *old_act;
8072 if (arg2) {
8073 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
8074 goto efault;
8075 act._sa_handler = old_act->_sa_handler;
8076 target_siginitset(&act.sa_mask, old_act->sa_mask);
8077 act.sa_flags = old_act->sa_flags;
8078 act.sa_restorer = 0;
8079 unlock_user_struct(old_act, arg2, 0);
8080 pact = &act;
8082 ret = get_errno(do_sigaction(arg1, pact, &oact));
8083 if (!is_error(ret) && arg3) {
8084 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
8085 goto efault;
8086 old_act->_sa_handler = oact._sa_handler;
8087 old_act->sa_mask = oact.sa_mask.sig[0];
8088 old_act->sa_flags = oact.sa_flags;
8089 unlock_user_struct(old_act, arg3, 1);
8091 #elif defined(TARGET_MIPS)
8092 struct target_sigaction act, oact, *pact, *old_act;
8094 if (arg2) {
8095 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
8096 goto efault;
8097 act._sa_handler = old_act->_sa_handler;
8098 target_siginitset(&act.sa_mask, old_act->sa_mask.sig[0]);
8099 act.sa_flags = old_act->sa_flags;
8100 unlock_user_struct(old_act, arg2, 0);
8101 pact = &act;
8102 } else {
8103 pact = NULL;
8106 ret = get_errno(do_sigaction(arg1, pact, &oact));
8108 if (!is_error(ret) && arg3) {
8109 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
8110 goto efault;
8111 old_act->_sa_handler = oact._sa_handler;
8112 old_act->sa_flags = oact.sa_flags;
8113 old_act->sa_mask.sig[0] = oact.sa_mask.sig[0];
8114 old_act->sa_mask.sig[1] = 0;
8115 old_act->sa_mask.sig[2] = 0;
8116 old_act->sa_mask.sig[3] = 0;
8117 unlock_user_struct(old_act, arg3, 1);
8119 #else
8120 struct target_old_sigaction *old_act;
8121 struct target_sigaction act, oact, *pact;
8122 if (arg2) {
8123 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
8124 goto efault;
8125 act._sa_handler = old_act->_sa_handler;
8126 target_siginitset(&act.sa_mask, old_act->sa_mask);
8127 act.sa_flags = old_act->sa_flags;
8128 act.sa_restorer = old_act->sa_restorer;
8129 unlock_user_struct(old_act, arg2, 0);
8130 pact = &act;
8131 } else {
8132 pact = NULL;
8134 ret = get_errno(do_sigaction(arg1, pact, &oact));
8135 if (!is_error(ret) && arg3) {
8136 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
8137 goto efault;
8138 old_act->_sa_handler = oact._sa_handler;
8139 old_act->sa_mask = oact.sa_mask.sig[0];
8140 old_act->sa_flags = oact.sa_flags;
8141 old_act->sa_restorer = oact.sa_restorer;
8142 unlock_user_struct(old_act, arg3, 1);
8144 #endif
8146 break;
8147 #endif
8148 case TARGET_NR_rt_sigaction:
8150 #if defined(TARGET_ALPHA)
8151 struct target_sigaction act, oact, *pact = 0;
8152 struct target_rt_sigaction *rt_act;
8154 if (arg4 != sizeof(target_sigset_t)) {
8155 ret = -TARGET_EINVAL;
8156 break;
8158 if (arg2) {
8159 if (!lock_user_struct(VERIFY_READ, rt_act, arg2, 1))
8160 goto efault;
8161 act._sa_handler = rt_act->_sa_handler;
8162 act.sa_mask = rt_act->sa_mask;
8163 act.sa_flags = rt_act->sa_flags;
8164 act.sa_restorer = arg5;
8165 unlock_user_struct(rt_act, arg2, 0);
8166 pact = &act;
8168 ret = get_errno(do_sigaction(arg1, pact, &oact));
8169 if (!is_error(ret) && arg3) {
8170 if (!lock_user_struct(VERIFY_WRITE, rt_act, arg3, 0))
8171 goto efault;
8172 rt_act->_sa_handler = oact._sa_handler;
8173 rt_act->sa_mask = oact.sa_mask;
8174 rt_act->sa_flags = oact.sa_flags;
8175 unlock_user_struct(rt_act, arg3, 1);
8177 #else
8178 struct target_sigaction *act;
8179 struct target_sigaction *oact;
8181 if (arg4 != sizeof(target_sigset_t)) {
8182 ret = -TARGET_EINVAL;
8183 break;
8185 if (arg2) {
8186 if (!lock_user_struct(VERIFY_READ, act, arg2, 1))
8187 goto efault;
8188 } else
8189 act = NULL;
8190 if (arg3) {
8191 if (!lock_user_struct(VERIFY_WRITE, oact, arg3, 0)) {
8192 ret = -TARGET_EFAULT;
8193 goto rt_sigaction_fail;
8195 } else
8196 oact = NULL;
8197 ret = get_errno(do_sigaction(arg1, act, oact));
8198 rt_sigaction_fail:
8199 if (act)
8200 unlock_user_struct(act, arg2, 0);
8201 if (oact)
8202 unlock_user_struct(oact, arg3, 1);
8203 #endif
8205 break;
8206 #ifdef TARGET_NR_sgetmask /* not on alpha */
8207 case TARGET_NR_sgetmask:
8209 sigset_t cur_set;
8210 abi_ulong target_set;
8211 ret = do_sigprocmask(0, NULL, &cur_set);
8212 if (!ret) {
8213 host_to_target_old_sigset(&target_set, &cur_set);
8214 ret = target_set;
8217 break;
8218 #endif
8219 #ifdef TARGET_NR_ssetmask /* not on alpha */
8220 case TARGET_NR_ssetmask:
8222 sigset_t set, oset, cur_set;
8223 abi_ulong target_set = arg1;
8224 /* We only have one word of the new mask so we must read
8225 * the rest of it with do_sigprocmask() and OR in this word.
8226 * We are guaranteed that a do_sigprocmask() that only queries
8227 * the signal mask will not fail.
8229 ret = do_sigprocmask(0, NULL, &cur_set);
8230 assert(!ret);
8231 target_to_host_old_sigset(&set, &target_set);
8232 sigorset(&set, &set, &cur_set);
8233 ret = do_sigprocmask(SIG_SETMASK, &set, &oset);
8234 if (!ret) {
8235 host_to_target_old_sigset(&target_set, &oset);
8236 ret = target_set;
8239 break;
8240 #endif
8241 #ifdef TARGET_NR_sigprocmask
8242 case TARGET_NR_sigprocmask:
8244 #if defined(TARGET_ALPHA)
8245 sigset_t set, oldset;
8246 abi_ulong mask;
8247 int how;
8249 switch (arg1) {
8250 case TARGET_SIG_BLOCK:
8251 how = SIG_BLOCK;
8252 break;
8253 case TARGET_SIG_UNBLOCK:
8254 how = SIG_UNBLOCK;
8255 break;
8256 case TARGET_SIG_SETMASK:
8257 how = SIG_SETMASK;
8258 break;
8259 default:
8260 ret = -TARGET_EINVAL;
8261 goto fail;
8263 mask = arg2;
8264 target_to_host_old_sigset(&set, &mask);
8266 ret = do_sigprocmask(how, &set, &oldset);
8267 if (!is_error(ret)) {
8268 host_to_target_old_sigset(&mask, &oldset);
8269 ret = mask;
8270 ((CPUAlphaState *)cpu_env)->ir[IR_V0] = 0; /* force no error */
8272 #else
8273 sigset_t set, oldset, *set_ptr;
8274 int how;
8276 if (arg2) {
8277 switch (arg1) {
8278 case TARGET_SIG_BLOCK:
8279 how = SIG_BLOCK;
8280 break;
8281 case TARGET_SIG_UNBLOCK:
8282 how = SIG_UNBLOCK;
8283 break;
8284 case TARGET_SIG_SETMASK:
8285 how = SIG_SETMASK;
8286 break;
8287 default:
8288 ret = -TARGET_EINVAL;
8289 goto fail;
8291 if (!(p = lock_user(VERIFY_READ, arg2, sizeof(target_sigset_t), 1)))
8292 goto efault;
8293 target_to_host_old_sigset(&set, p);
8294 unlock_user(p, arg2, 0);
8295 set_ptr = &set;
8296 } else {
8297 how = 0;
8298 set_ptr = NULL;
8300 ret = do_sigprocmask(how, set_ptr, &oldset);
8301 if (!is_error(ret) && arg3) {
8302 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_sigset_t), 0)))
8303 goto efault;
8304 host_to_target_old_sigset(p, &oldset);
8305 unlock_user(p, arg3, sizeof(target_sigset_t));
8307 #endif
8309 break;
8310 #endif
8311 case TARGET_NR_rt_sigprocmask:
8313 int how = arg1;
8314 sigset_t set, oldset, *set_ptr;
8316 if (arg4 != sizeof(target_sigset_t)) {
8317 ret = -TARGET_EINVAL;
8318 break;
8321 if (arg2) {
8322 switch(how) {
8323 case TARGET_SIG_BLOCK:
8324 how = SIG_BLOCK;
8325 break;
8326 case TARGET_SIG_UNBLOCK:
8327 how = SIG_UNBLOCK;
8328 break;
8329 case TARGET_SIG_SETMASK:
8330 how = SIG_SETMASK;
8331 break;
8332 default:
8333 ret = -TARGET_EINVAL;
8334 goto fail;
8336 if (!(p = lock_user(VERIFY_READ, arg2, sizeof(target_sigset_t), 1)))
8337 goto efault;
8338 target_to_host_sigset(&set, p);
8339 unlock_user(p, arg2, 0);
8340 set_ptr = &set;
8341 } else {
8342 how = 0;
8343 set_ptr = NULL;
8345 ret = do_sigprocmask(how, set_ptr, &oldset);
8346 if (!is_error(ret) && arg3) {
8347 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_sigset_t), 0)))
8348 goto efault;
8349 host_to_target_sigset(p, &oldset);
8350 unlock_user(p, arg3, sizeof(target_sigset_t));
8353 break;
8354 #ifdef TARGET_NR_sigpending
8355 case TARGET_NR_sigpending:
8357 sigset_t set;
8358 ret = get_errno(sigpending(&set));
8359 if (!is_error(ret)) {
8360 if (!(p = lock_user(VERIFY_WRITE, arg1, sizeof(target_sigset_t), 0)))
8361 goto efault;
8362 host_to_target_old_sigset(p, &set);
8363 unlock_user(p, arg1, sizeof(target_sigset_t));
8366 break;
8367 #endif
8368 case TARGET_NR_rt_sigpending:
8370 sigset_t set;
8372 /* Yes, this check is >, not != like most. We follow the kernel's
8373 * logic and it does it like this because it implements
8374 * NR_sigpending through the same code path, and in that case
8375 * the old_sigset_t is smaller in size.
8377 if (arg2 > sizeof(target_sigset_t)) {
8378 ret = -TARGET_EINVAL;
8379 break;
8382 ret = get_errno(sigpending(&set));
8383 if (!is_error(ret)) {
8384 if (!(p = lock_user(VERIFY_WRITE, arg1, sizeof(target_sigset_t), 0)))
8385 goto efault;
8386 host_to_target_sigset(p, &set);
8387 unlock_user(p, arg1, sizeof(target_sigset_t));
8390 break;
8391 #ifdef TARGET_NR_sigsuspend
8392 case TARGET_NR_sigsuspend:
8394 TaskState *ts = cpu->opaque;
8395 #if defined(TARGET_ALPHA)
8396 abi_ulong mask = arg1;
8397 target_to_host_old_sigset(&ts->sigsuspend_mask, &mask);
8398 #else
8399 if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
8400 goto efault;
8401 target_to_host_old_sigset(&ts->sigsuspend_mask, p);
8402 unlock_user(p, arg1, 0);
8403 #endif
8404 ret = get_errno(safe_rt_sigsuspend(&ts->sigsuspend_mask,
8405 SIGSET_T_SIZE));
8406 if (ret != -TARGET_ERESTARTSYS) {
8407 ts->in_sigsuspend = 1;
8410 break;
8411 #endif
8412 case TARGET_NR_rt_sigsuspend:
8414 TaskState *ts = cpu->opaque;
8416 if (arg2 != sizeof(target_sigset_t)) {
8417 ret = -TARGET_EINVAL;
8418 break;
8420 if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
8421 goto efault;
8422 target_to_host_sigset(&ts->sigsuspend_mask, p);
8423 unlock_user(p, arg1, 0);
8424 ret = get_errno(safe_rt_sigsuspend(&ts->sigsuspend_mask,
8425 SIGSET_T_SIZE));
8426 if (ret != -TARGET_ERESTARTSYS) {
8427 ts->in_sigsuspend = 1;
8430 break;
8431 case TARGET_NR_rt_sigtimedwait:
8433 sigset_t set;
8434 struct timespec uts, *puts;
8435 siginfo_t uinfo;
8437 if (arg4 != sizeof(target_sigset_t)) {
8438 ret = -TARGET_EINVAL;
8439 break;
8442 if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
8443 goto efault;
8444 target_to_host_sigset(&set, p);
8445 unlock_user(p, arg1, 0);
8446 if (arg3) {
8447 puts = &uts;
8448 target_to_host_timespec(puts, arg3);
8449 } else {
8450 puts = NULL;
8452 ret = get_errno(safe_rt_sigtimedwait(&set, &uinfo, puts,
8453 SIGSET_T_SIZE));
8454 if (!is_error(ret)) {
8455 if (arg2) {
8456 p = lock_user(VERIFY_WRITE, arg2, sizeof(target_siginfo_t),
8458 if (!p) {
8459 goto efault;
8461 host_to_target_siginfo(p, &uinfo);
8462 unlock_user(p, arg2, sizeof(target_siginfo_t));
8464 ret = host_to_target_signal(ret);
8467 break;
8468 case TARGET_NR_rt_sigqueueinfo:
8470 siginfo_t uinfo;
8472 p = lock_user(VERIFY_READ, arg3, sizeof(target_siginfo_t), 1);
8473 if (!p) {
8474 goto efault;
8476 target_to_host_siginfo(&uinfo, p);
8477 unlock_user(p, arg1, 0);
8478 ret = get_errno(sys_rt_sigqueueinfo(arg1, arg2, &uinfo));
8480 break;
8481 #ifdef TARGET_NR_sigreturn
8482 case TARGET_NR_sigreturn:
8483 if (block_signals()) {
8484 ret = -TARGET_ERESTARTSYS;
8485 } else {
8486 ret = do_sigreturn(cpu_env);
8488 break;
8489 #endif
8490 case TARGET_NR_rt_sigreturn:
8491 if (block_signals()) {
8492 ret = -TARGET_ERESTARTSYS;
8493 } else {
8494 ret = do_rt_sigreturn(cpu_env);
8496 break;
8497 case TARGET_NR_sethostname:
8498 if (!(p = lock_user_string(arg1)))
8499 goto efault;
8500 ret = get_errno(sethostname(p, arg2));
8501 unlock_user(p, arg1, 0);
8502 break;
8503 case TARGET_NR_setrlimit:
8505 int resource = target_to_host_resource(arg1);
8506 struct target_rlimit *target_rlim;
8507 struct rlimit rlim;
8508 if (!lock_user_struct(VERIFY_READ, target_rlim, arg2, 1))
8509 goto efault;
8510 rlim.rlim_cur = target_to_host_rlim(target_rlim->rlim_cur);
8511 rlim.rlim_max = target_to_host_rlim(target_rlim->rlim_max);
8512 unlock_user_struct(target_rlim, arg2, 0);
8513 ret = get_errno(setrlimit(resource, &rlim));
8515 break;
8516 case TARGET_NR_getrlimit:
8518 int resource = target_to_host_resource(arg1);
8519 struct target_rlimit *target_rlim;
8520 struct rlimit rlim;
8522 ret = get_errno(getrlimit(resource, &rlim));
8523 if (!is_error(ret)) {
8524 if (!lock_user_struct(VERIFY_WRITE, target_rlim, arg2, 0))
8525 goto efault;
8526 target_rlim->rlim_cur = host_to_target_rlim(rlim.rlim_cur);
8527 target_rlim->rlim_max = host_to_target_rlim(rlim.rlim_max);
8528 unlock_user_struct(target_rlim, arg2, 1);
8531 break;
8532 case TARGET_NR_getrusage:
8534 struct rusage rusage;
8535 ret = get_errno(getrusage(arg1, &rusage));
8536 if (!is_error(ret)) {
8537 ret = host_to_target_rusage(arg2, &rusage);
8540 break;
8541 case TARGET_NR_gettimeofday:
8543 struct timeval tv;
8544 ret = get_errno(gettimeofday(&tv, NULL));
8545 if (!is_error(ret)) {
8546 if (copy_to_user_timeval(arg1, &tv))
8547 goto efault;
8550 break;
8551 case TARGET_NR_settimeofday:
8553 struct timeval tv, *ptv = NULL;
8554 struct timezone tz, *ptz = NULL;
8556 if (arg1) {
8557 if (copy_from_user_timeval(&tv, arg1)) {
8558 goto efault;
8560 ptv = &tv;
8563 if (arg2) {
8564 if (copy_from_user_timezone(&tz, arg2)) {
8565 goto efault;
8567 ptz = &tz;
8570 ret = get_errno(settimeofday(ptv, ptz));
8572 break;
8573 #if defined(TARGET_NR_select)
8574 case TARGET_NR_select:
8575 #if defined(TARGET_S390X) || defined(TARGET_ALPHA)
8576 ret = do_select(arg1, arg2, arg3, arg4, arg5);
8577 #else
8579 struct target_sel_arg_struct *sel;
8580 abi_ulong inp, outp, exp, tvp;
8581 long nsel;
8583 if (!lock_user_struct(VERIFY_READ, sel, arg1, 1))
8584 goto efault;
8585 nsel = tswapal(sel->n);
8586 inp = tswapal(sel->inp);
8587 outp = tswapal(sel->outp);
8588 exp = tswapal(sel->exp);
8589 tvp = tswapal(sel->tvp);
8590 unlock_user_struct(sel, arg1, 0);
8591 ret = do_select(nsel, inp, outp, exp, tvp);
8593 #endif
8594 break;
8595 #endif
8596 #ifdef TARGET_NR_pselect6
8597 case TARGET_NR_pselect6:
8599 abi_long rfd_addr, wfd_addr, efd_addr, n, ts_addr;
8600 fd_set rfds, wfds, efds;
8601 fd_set *rfds_ptr, *wfds_ptr, *efds_ptr;
8602 struct timespec ts, *ts_ptr;
8605 * The 6th arg is actually two args smashed together,
8606 * so we cannot use the C library.
8608 sigset_t set;
8609 struct {
8610 sigset_t *set;
8611 size_t size;
8612 } sig, *sig_ptr;
8614 abi_ulong arg_sigset, arg_sigsize, *arg7;
8615 target_sigset_t *target_sigset;
8617 n = arg1;
8618 rfd_addr = arg2;
8619 wfd_addr = arg3;
8620 efd_addr = arg4;
8621 ts_addr = arg5;
8623 ret = copy_from_user_fdset_ptr(&rfds, &rfds_ptr, rfd_addr, n);
8624 if (ret) {
8625 goto fail;
8627 ret = copy_from_user_fdset_ptr(&wfds, &wfds_ptr, wfd_addr, n);
8628 if (ret) {
8629 goto fail;
8631 ret = copy_from_user_fdset_ptr(&efds, &efds_ptr, efd_addr, n);
8632 if (ret) {
8633 goto fail;
8637 * This takes a timespec, and not a timeval, so we cannot
8638 * use the do_select() helper ...
8640 if (ts_addr) {
8641 if (target_to_host_timespec(&ts, ts_addr)) {
8642 goto efault;
8644 ts_ptr = &ts;
8645 } else {
8646 ts_ptr = NULL;
8649 /* Extract the two packed args for the sigset */
8650 if (arg6) {
8651 sig_ptr = &sig;
8652 sig.size = SIGSET_T_SIZE;
8654 arg7 = lock_user(VERIFY_READ, arg6, sizeof(*arg7) * 2, 1);
8655 if (!arg7) {
8656 goto efault;
8658 arg_sigset = tswapal(arg7[0]);
8659 arg_sigsize = tswapal(arg7[1]);
8660 unlock_user(arg7, arg6, 0);
8662 if (arg_sigset) {
8663 sig.set = &set;
8664 if (arg_sigsize != sizeof(*target_sigset)) {
8665 /* Like the kernel, we enforce correct size sigsets */
8666 ret = -TARGET_EINVAL;
8667 goto fail;
8669 target_sigset = lock_user(VERIFY_READ, arg_sigset,
8670 sizeof(*target_sigset), 1);
8671 if (!target_sigset) {
8672 goto efault;
8674 target_to_host_sigset(&set, target_sigset);
8675 unlock_user(target_sigset, arg_sigset, 0);
8676 } else {
8677 sig.set = NULL;
8679 } else {
8680 sig_ptr = NULL;
8683 ret = get_errno(safe_pselect6(n, rfds_ptr, wfds_ptr, efds_ptr,
8684 ts_ptr, sig_ptr));
8686 if (!is_error(ret)) {
8687 if (rfd_addr && copy_to_user_fdset(rfd_addr, &rfds, n))
8688 goto efault;
8689 if (wfd_addr && copy_to_user_fdset(wfd_addr, &wfds, n))
8690 goto efault;
8691 if (efd_addr && copy_to_user_fdset(efd_addr, &efds, n))
8692 goto efault;
8694 if (ts_addr && host_to_target_timespec(ts_addr, &ts))
8695 goto efault;
8698 break;
8699 #endif
8700 #ifdef TARGET_NR_symlink
8701 case TARGET_NR_symlink:
8703 void *p2;
8704 p = lock_user_string(arg1);
8705 p2 = lock_user_string(arg2);
8706 if (!p || !p2)
8707 ret = -TARGET_EFAULT;
8708 else
8709 ret = get_errno(symlink(p, p2));
8710 unlock_user(p2, arg2, 0);
8711 unlock_user(p, arg1, 0);
8713 break;
8714 #endif
8715 #if defined(TARGET_NR_symlinkat)
8716 case TARGET_NR_symlinkat:
8718 void *p2;
8719 p = lock_user_string(arg1);
8720 p2 = lock_user_string(arg3);
8721 if (!p || !p2)
8722 ret = -TARGET_EFAULT;
8723 else
8724 ret = get_errno(symlinkat(p, arg2, p2));
8725 unlock_user(p2, arg3, 0);
8726 unlock_user(p, arg1, 0);
8728 break;
8729 #endif
8730 #ifdef TARGET_NR_oldlstat
8731 case TARGET_NR_oldlstat:
8732 goto unimplemented;
8733 #endif
8734 #ifdef TARGET_NR_readlink
8735 case TARGET_NR_readlink:
8737 void *p2;
8738 p = lock_user_string(arg1);
8739 p2 = lock_user(VERIFY_WRITE, arg2, arg3, 0);
8740 if (!p || !p2) {
8741 ret = -TARGET_EFAULT;
8742 } else if (!arg3) {
8743 /* Short circuit this for the magic exe check. */
8744 ret = -TARGET_EINVAL;
8745 } else if (is_proc_myself((const char *)p, "exe")) {
8746 char real[PATH_MAX], *temp;
8747 temp = realpath(exec_path, real);
8748 /* Return value is # of bytes that we wrote to the buffer. */
8749 if (temp == NULL) {
8750 ret = get_errno(-1);
8751 } else {
8752 /* Don't worry about sign mismatch as earlier mapping
8753 * logic would have thrown a bad address error. */
8754 ret = MIN(strlen(real), arg3);
8755 /* We cannot NUL terminate the string. */
8756 memcpy(p2, real, ret);
8758 } else {
8759 ret = get_errno(readlink(path(p), p2, arg3));
8761 unlock_user(p2, arg2, ret);
8762 unlock_user(p, arg1, 0);
8764 break;
8765 #endif
8766 #if defined(TARGET_NR_readlinkat)
8767 case TARGET_NR_readlinkat:
8769 void *p2;
8770 p = lock_user_string(arg2);
8771 p2 = lock_user(VERIFY_WRITE, arg3, arg4, 0);
8772 if (!p || !p2) {
8773 ret = -TARGET_EFAULT;
8774 } else if (is_proc_myself((const char *)p, "exe")) {
8775 char real[PATH_MAX], *temp;
8776 temp = realpath(exec_path, real);
8777 ret = temp == NULL ? get_errno(-1) : strlen(real) ;
8778 snprintf((char *)p2, arg4, "%s", real);
8779 } else {
8780 ret = get_errno(readlinkat(arg1, path(p), p2, arg4));
8782 unlock_user(p2, arg3, ret);
8783 unlock_user(p, arg2, 0);
8785 break;
8786 #endif
8787 #ifdef TARGET_NR_uselib
8788 case TARGET_NR_uselib:
8789 goto unimplemented;
8790 #endif
8791 #ifdef TARGET_NR_swapon
8792 case TARGET_NR_swapon:
8793 if (!(p = lock_user_string(arg1)))
8794 goto efault;
8795 ret = get_errno(swapon(p, arg2));
8796 unlock_user(p, arg1, 0);
8797 break;
8798 #endif
8799 case TARGET_NR_reboot:
8800 if (arg3 == LINUX_REBOOT_CMD_RESTART2) {
8801 /* arg4 must be ignored in all other cases */
8802 p = lock_user_string(arg4);
8803 if (!p) {
8804 goto efault;
8806 ret = get_errno(reboot(arg1, arg2, arg3, p));
8807 unlock_user(p, arg4, 0);
8808 } else {
8809 ret = get_errno(reboot(arg1, arg2, arg3, NULL));
8811 break;
8812 #ifdef TARGET_NR_readdir
8813 case TARGET_NR_readdir:
8814 goto unimplemented;
8815 #endif
8816 #ifdef TARGET_NR_mmap
8817 case TARGET_NR_mmap:
8818 #if (defined(TARGET_I386) && defined(TARGET_ABI32)) || \
8819 (defined(TARGET_ARM) && defined(TARGET_ABI32)) || \
8820 defined(TARGET_M68K) || defined(TARGET_CRIS) || defined(TARGET_MICROBLAZE) \
8821 || defined(TARGET_S390X)
8823 abi_ulong *v;
8824 abi_ulong v1, v2, v3, v4, v5, v6;
8825 if (!(v = lock_user(VERIFY_READ, arg1, 6 * sizeof(abi_ulong), 1)))
8826 goto efault;
8827 v1 = tswapal(v[0]);
8828 v2 = tswapal(v[1]);
8829 v3 = tswapal(v[2]);
8830 v4 = tswapal(v[3]);
8831 v5 = tswapal(v[4]);
8832 v6 = tswapal(v[5]);
8833 unlock_user(v, arg1, 0);
8834 ret = get_errno(target_mmap(v1, v2, v3,
8835 target_to_host_bitmask(v4, mmap_flags_tbl),
8836 v5, v6));
8838 #else
8839 ret = get_errno(target_mmap(arg1, arg2, arg3,
8840 target_to_host_bitmask(arg4, mmap_flags_tbl),
8841 arg5,
8842 arg6));
8843 #endif
8844 break;
8845 #endif
8846 #ifdef TARGET_NR_mmap2
8847 case TARGET_NR_mmap2:
8848 #ifndef MMAP_SHIFT
8849 #define MMAP_SHIFT 12
8850 #endif
8851 ret = get_errno(target_mmap(arg1, arg2, arg3,
8852 target_to_host_bitmask(arg4, mmap_flags_tbl),
8853 arg5,
8854 arg6 << MMAP_SHIFT));
8855 break;
8856 #endif
8857 case TARGET_NR_munmap:
8858 ret = get_errno(target_munmap(arg1, arg2));
8859 break;
8860 case TARGET_NR_mprotect:
8862 TaskState *ts = cpu->opaque;
8863 /* Special hack to detect libc making the stack executable. */
8864 if ((arg3 & PROT_GROWSDOWN)
8865 && arg1 >= ts->info->stack_limit
8866 && arg1 <= ts->info->start_stack) {
8867 arg3 &= ~PROT_GROWSDOWN;
8868 arg2 = arg2 + arg1 - ts->info->stack_limit;
8869 arg1 = ts->info->stack_limit;
8872 ret = get_errno(target_mprotect(arg1, arg2, arg3));
8873 break;
8874 #ifdef TARGET_NR_mremap
8875 case TARGET_NR_mremap:
8876 ret = get_errno(target_mremap(arg1, arg2, arg3, arg4, arg5));
8877 break;
8878 #endif
8879 /* ??? msync/mlock/munlock are broken for softmmu. */
8880 #ifdef TARGET_NR_msync
8881 case TARGET_NR_msync:
8882 ret = get_errno(msync(g2h(arg1), arg2, arg3));
8883 break;
8884 #endif
8885 #ifdef TARGET_NR_mlock
8886 case TARGET_NR_mlock:
8887 ret = get_errno(mlock(g2h(arg1), arg2));
8888 break;
8889 #endif
8890 #ifdef TARGET_NR_munlock
8891 case TARGET_NR_munlock:
8892 ret = get_errno(munlock(g2h(arg1), arg2));
8893 break;
8894 #endif
8895 #ifdef TARGET_NR_mlockall
8896 case TARGET_NR_mlockall:
8897 ret = get_errno(mlockall(target_to_host_mlockall_arg(arg1)));
8898 break;
8899 #endif
8900 #ifdef TARGET_NR_munlockall
8901 case TARGET_NR_munlockall:
8902 ret = get_errno(munlockall());
8903 break;
8904 #endif
8905 case TARGET_NR_truncate:
8906 if (!(p = lock_user_string(arg1)))
8907 goto efault;
8908 ret = get_errno(truncate(p, arg2));
8909 unlock_user(p, arg1, 0);
8910 break;
8911 case TARGET_NR_ftruncate:
8912 ret = get_errno(ftruncate(arg1, arg2));
8913 break;
8914 case TARGET_NR_fchmod:
8915 ret = get_errno(fchmod(arg1, arg2));
8916 break;
8917 #if defined(TARGET_NR_fchmodat)
8918 case TARGET_NR_fchmodat:
8919 if (!(p = lock_user_string(arg2)))
8920 goto efault;
8921 ret = get_errno(fchmodat(arg1, p, arg3, 0));
8922 unlock_user(p, arg2, 0);
8923 break;
8924 #endif
8925 case TARGET_NR_getpriority:
8926 /* Note that negative values are valid for getpriority, so we must
8927 differentiate based on errno settings. */
8928 errno = 0;
8929 ret = getpriority(arg1, arg2);
8930 if (ret == -1 && errno != 0) {
8931 ret = -host_to_target_errno(errno);
8932 break;
8934 #ifdef TARGET_ALPHA
8935 /* Return value is the unbiased priority. Signal no error. */
8936 ((CPUAlphaState *)cpu_env)->ir[IR_V0] = 0;
8937 #else
8938 /* Return value is a biased priority to avoid negative numbers. */
8939 ret = 20 - ret;
8940 #endif
8941 break;
8942 case TARGET_NR_setpriority:
8943 ret = get_errno(setpriority(arg1, arg2, arg3));
8944 break;
8945 #ifdef TARGET_NR_profil
8946 case TARGET_NR_profil:
8947 goto unimplemented;
8948 #endif
8949 case TARGET_NR_statfs:
8950 if (!(p = lock_user_string(arg1)))
8951 goto efault;
8952 ret = get_errno(statfs(path(p), &stfs));
8953 unlock_user(p, arg1, 0);
8954 convert_statfs:
8955 if (!is_error(ret)) {
8956 struct target_statfs *target_stfs;
8958 if (!lock_user_struct(VERIFY_WRITE, target_stfs, arg2, 0))
8959 goto efault;
8960 __put_user(stfs.f_type, &target_stfs->f_type);
8961 __put_user(stfs.f_bsize, &target_stfs->f_bsize);
8962 __put_user(stfs.f_blocks, &target_stfs->f_blocks);
8963 __put_user(stfs.f_bfree, &target_stfs->f_bfree);
8964 __put_user(stfs.f_bavail, &target_stfs->f_bavail);
8965 __put_user(stfs.f_files, &target_stfs->f_files);
8966 __put_user(stfs.f_ffree, &target_stfs->f_ffree);
8967 __put_user(stfs.f_fsid.__val[0], &target_stfs->f_fsid.val[0]);
8968 __put_user(stfs.f_fsid.__val[1], &target_stfs->f_fsid.val[1]);
8969 __put_user(stfs.f_namelen, &target_stfs->f_namelen);
8970 __put_user(stfs.f_frsize, &target_stfs->f_frsize);
8971 memset(target_stfs->f_spare, 0, sizeof(target_stfs->f_spare));
8972 unlock_user_struct(target_stfs, arg2, 1);
8974 break;
8975 case TARGET_NR_fstatfs:
8976 ret = get_errno(fstatfs(arg1, &stfs));
8977 goto convert_statfs;
8978 #ifdef TARGET_NR_statfs64
8979 case TARGET_NR_statfs64:
8980 if (!(p = lock_user_string(arg1)))
8981 goto efault;
8982 ret = get_errno(statfs(path(p), &stfs));
8983 unlock_user(p, arg1, 0);
8984 convert_statfs64:
8985 if (!is_error(ret)) {
8986 struct target_statfs64 *target_stfs;
8988 if (!lock_user_struct(VERIFY_WRITE, target_stfs, arg3, 0))
8989 goto efault;
8990 __put_user(stfs.f_type, &target_stfs->f_type);
8991 __put_user(stfs.f_bsize, &target_stfs->f_bsize);
8992 __put_user(stfs.f_blocks, &target_stfs->f_blocks);
8993 __put_user(stfs.f_bfree, &target_stfs->f_bfree);
8994 __put_user(stfs.f_bavail, &target_stfs->f_bavail);
8995 __put_user(stfs.f_files, &target_stfs->f_files);
8996 __put_user(stfs.f_ffree, &target_stfs->f_ffree);
8997 __put_user(stfs.f_fsid.__val[0], &target_stfs->f_fsid.val[0]);
8998 __put_user(stfs.f_fsid.__val[1], &target_stfs->f_fsid.val[1]);
8999 __put_user(stfs.f_namelen, &target_stfs->f_namelen);
9000 __put_user(stfs.f_frsize, &target_stfs->f_frsize);
9001 memset(target_stfs->f_spare, 0, sizeof(target_stfs->f_spare));
9002 unlock_user_struct(target_stfs, arg3, 1);
9004 break;
9005 case TARGET_NR_fstatfs64:
9006 ret = get_errno(fstatfs(arg1, &stfs));
9007 goto convert_statfs64;
9008 #endif
9009 #ifdef TARGET_NR_ioperm
9010 case TARGET_NR_ioperm:
9011 goto unimplemented;
9012 #endif
9013 #ifdef TARGET_NR_socketcall
9014 case TARGET_NR_socketcall:
9015 ret = do_socketcall(arg1, arg2);
9016 break;
9017 #endif
9018 #ifdef TARGET_NR_accept
9019 case TARGET_NR_accept:
9020 ret = do_accept4(arg1, arg2, arg3, 0);
9021 break;
9022 #endif
9023 #ifdef TARGET_NR_accept4
9024 case TARGET_NR_accept4:
9025 ret = do_accept4(arg1, arg2, arg3, arg4);
9026 break;
9027 #endif
9028 #ifdef TARGET_NR_bind
9029 case TARGET_NR_bind:
9030 ret = do_bind(arg1, arg2, arg3);
9031 break;
9032 #endif
9033 #ifdef TARGET_NR_connect
9034 case TARGET_NR_connect:
9035 ret = do_connect(arg1, arg2, arg3);
9036 break;
9037 #endif
9038 #ifdef TARGET_NR_getpeername
9039 case TARGET_NR_getpeername:
9040 ret = do_getpeername(arg1, arg2, arg3);
9041 break;
9042 #endif
9043 #ifdef TARGET_NR_getsockname
9044 case TARGET_NR_getsockname:
9045 ret = do_getsockname(arg1, arg2, arg3);
9046 break;
9047 #endif
9048 #ifdef TARGET_NR_getsockopt
9049 case TARGET_NR_getsockopt:
9050 ret = do_getsockopt(arg1, arg2, arg3, arg4, arg5);
9051 break;
9052 #endif
9053 #ifdef TARGET_NR_listen
9054 case TARGET_NR_listen:
9055 ret = get_errno(listen(arg1, arg2));
9056 break;
9057 #endif
9058 #ifdef TARGET_NR_recv
9059 case TARGET_NR_recv:
9060 ret = do_recvfrom(arg1, arg2, arg3, arg4, 0, 0);
9061 break;
9062 #endif
9063 #ifdef TARGET_NR_recvfrom
9064 case TARGET_NR_recvfrom:
9065 ret = do_recvfrom(arg1, arg2, arg3, arg4, arg5, arg6);
9066 break;
9067 #endif
9068 #ifdef TARGET_NR_recvmsg
9069 case TARGET_NR_recvmsg:
9070 ret = do_sendrecvmsg(arg1, arg2, arg3, 0);
9071 break;
9072 #endif
9073 #ifdef TARGET_NR_send
9074 case TARGET_NR_send:
9075 ret = do_sendto(arg1, arg2, arg3, arg4, 0, 0);
9076 break;
9077 #endif
9078 #ifdef TARGET_NR_sendmsg
9079 case TARGET_NR_sendmsg:
9080 ret = do_sendrecvmsg(arg1, arg2, arg3, 1);
9081 break;
9082 #endif
9083 #ifdef TARGET_NR_sendmmsg
9084 case TARGET_NR_sendmmsg:
9085 ret = do_sendrecvmmsg(arg1, arg2, arg3, arg4, 1);
9086 break;
9087 case TARGET_NR_recvmmsg:
9088 ret = do_sendrecvmmsg(arg1, arg2, arg3, arg4, 0);
9089 break;
9090 #endif
9091 #ifdef TARGET_NR_sendto
9092 case TARGET_NR_sendto:
9093 ret = do_sendto(arg1, arg2, arg3, arg4, arg5, arg6);
9094 break;
9095 #endif
9096 #ifdef TARGET_NR_shutdown
9097 case TARGET_NR_shutdown:
9098 ret = get_errno(shutdown(arg1, arg2));
9099 break;
9100 #endif
9101 #if defined(TARGET_NR_getrandom) && defined(__NR_getrandom)
9102 case TARGET_NR_getrandom:
9103 p = lock_user(VERIFY_WRITE, arg1, arg2, 0);
9104 if (!p) {
9105 goto efault;
9107 ret = get_errno(getrandom(p, arg2, arg3));
9108 unlock_user(p, arg1, ret);
9109 break;
9110 #endif
9111 #ifdef TARGET_NR_socket
9112 case TARGET_NR_socket:
9113 ret = do_socket(arg1, arg2, arg3);
9114 fd_trans_unregister(ret);
9115 break;
9116 #endif
9117 #ifdef TARGET_NR_socketpair
9118 case TARGET_NR_socketpair:
9119 ret = do_socketpair(arg1, arg2, arg3, arg4);
9120 break;
9121 #endif
9122 #ifdef TARGET_NR_setsockopt
9123 case TARGET_NR_setsockopt:
9124 ret = do_setsockopt(arg1, arg2, arg3, arg4, (socklen_t) arg5);
9125 break;
9126 #endif
9128 case TARGET_NR_syslog:
9129 if (!(p = lock_user_string(arg2)))
9130 goto efault;
9131 ret = get_errno(sys_syslog((int)arg1, p, (int)arg3));
9132 unlock_user(p, arg2, 0);
9133 break;
9135 case TARGET_NR_setitimer:
9137 struct itimerval value, ovalue, *pvalue;
9139 if (arg2) {
9140 pvalue = &value;
9141 if (copy_from_user_timeval(&pvalue->it_interval, arg2)
9142 || copy_from_user_timeval(&pvalue->it_value,
9143 arg2 + sizeof(struct target_timeval)))
9144 goto efault;
9145 } else {
9146 pvalue = NULL;
9148 ret = get_errno(setitimer(arg1, pvalue, &ovalue));
9149 if (!is_error(ret) && arg3) {
9150 if (copy_to_user_timeval(arg3,
9151 &ovalue.it_interval)
9152 || copy_to_user_timeval(arg3 + sizeof(struct target_timeval),
9153 &ovalue.it_value))
9154 goto efault;
9157 break;
9158 case TARGET_NR_getitimer:
9160 struct itimerval value;
9162 ret = get_errno(getitimer(arg1, &value));
9163 if (!is_error(ret) && arg2) {
9164 if (copy_to_user_timeval(arg2,
9165 &value.it_interval)
9166 || copy_to_user_timeval(arg2 + sizeof(struct target_timeval),
9167 &value.it_value))
9168 goto efault;
9171 break;
9172 #ifdef TARGET_NR_stat
9173 case TARGET_NR_stat:
9174 if (!(p = lock_user_string(arg1)))
9175 goto efault;
9176 ret = get_errno(stat(path(p), &st));
9177 unlock_user(p, arg1, 0);
9178 goto do_stat;
9179 #endif
9180 #ifdef TARGET_NR_lstat
9181 case TARGET_NR_lstat:
9182 if (!(p = lock_user_string(arg1)))
9183 goto efault;
9184 ret = get_errno(lstat(path(p), &st));
9185 unlock_user(p, arg1, 0);
9186 goto do_stat;
9187 #endif
9188 case TARGET_NR_fstat:
9190 ret = get_errno(fstat(arg1, &st));
9191 #if defined(TARGET_NR_stat) || defined(TARGET_NR_lstat)
9192 do_stat:
9193 #endif
9194 if (!is_error(ret)) {
9195 struct target_stat *target_st;
9197 if (!lock_user_struct(VERIFY_WRITE, target_st, arg2, 0))
9198 goto efault;
9199 memset(target_st, 0, sizeof(*target_st));
9200 __put_user(st.st_dev, &target_st->st_dev);
9201 __put_user(st.st_ino, &target_st->st_ino);
9202 __put_user(st.st_mode, &target_st->st_mode);
9203 __put_user(st.st_uid, &target_st->st_uid);
9204 __put_user(st.st_gid, &target_st->st_gid);
9205 __put_user(st.st_nlink, &target_st->st_nlink);
9206 __put_user(st.st_rdev, &target_st->st_rdev);
9207 __put_user(st.st_size, &target_st->st_size);
9208 __put_user(st.st_blksize, &target_st->st_blksize);
9209 __put_user(st.st_blocks, &target_st->st_blocks);
9210 __put_user(st.st_atime, &target_st->target_st_atime);
9211 __put_user(st.st_mtime, &target_st->target_st_mtime);
9212 __put_user(st.st_ctime, &target_st->target_st_ctime);
9213 unlock_user_struct(target_st, arg2, 1);
9216 break;
9217 #ifdef TARGET_NR_olduname
9218 case TARGET_NR_olduname:
9219 goto unimplemented;
9220 #endif
9221 #ifdef TARGET_NR_iopl
9222 case TARGET_NR_iopl:
9223 goto unimplemented;
9224 #endif
9225 case TARGET_NR_vhangup:
9226 ret = get_errno(vhangup());
9227 break;
9228 #ifdef TARGET_NR_idle
9229 case TARGET_NR_idle:
9230 goto unimplemented;
9231 #endif
9232 #ifdef TARGET_NR_syscall
9233 case TARGET_NR_syscall:
9234 ret = do_syscall(cpu_env, arg1 & 0xffff, arg2, arg3, arg4, arg5,
9235 arg6, arg7, arg8, 0);
9236 break;
9237 #endif
9238 case TARGET_NR_wait4:
9240 int status;
9241 abi_long status_ptr = arg2;
9242 struct rusage rusage, *rusage_ptr;
9243 abi_ulong target_rusage = arg4;
9244 abi_long rusage_err;
9245 if (target_rusage)
9246 rusage_ptr = &rusage;
9247 else
9248 rusage_ptr = NULL;
9249 ret = get_errno(safe_wait4(arg1, &status, arg3, rusage_ptr));
9250 if (!is_error(ret)) {
9251 if (status_ptr && ret) {
9252 status = host_to_target_waitstatus(status);
9253 if (put_user_s32(status, status_ptr))
9254 goto efault;
9256 if (target_rusage) {
9257 rusage_err = host_to_target_rusage(target_rusage, &rusage);
9258 if (rusage_err) {
9259 ret = rusage_err;
9264 break;
9265 #ifdef TARGET_NR_swapoff
9266 case TARGET_NR_swapoff:
9267 if (!(p = lock_user_string(arg1)))
9268 goto efault;
9269 ret = get_errno(swapoff(p));
9270 unlock_user(p, arg1, 0);
9271 break;
9272 #endif
9273 case TARGET_NR_sysinfo:
9275 struct target_sysinfo *target_value;
9276 struct sysinfo value;
9277 ret = get_errno(sysinfo(&value));
9278 if (!is_error(ret) && arg1)
9280 if (!lock_user_struct(VERIFY_WRITE, target_value, arg1, 0))
9281 goto efault;
9282 __put_user(value.uptime, &target_value->uptime);
9283 __put_user(value.loads[0], &target_value->loads[0]);
9284 __put_user(value.loads[1], &target_value->loads[1]);
9285 __put_user(value.loads[2], &target_value->loads[2]);
9286 __put_user(value.totalram, &target_value->totalram);
9287 __put_user(value.freeram, &target_value->freeram);
9288 __put_user(value.sharedram, &target_value->sharedram);
9289 __put_user(value.bufferram, &target_value->bufferram);
9290 __put_user(value.totalswap, &target_value->totalswap);
9291 __put_user(value.freeswap, &target_value->freeswap);
9292 __put_user(value.procs, &target_value->procs);
9293 __put_user(value.totalhigh, &target_value->totalhigh);
9294 __put_user(value.freehigh, &target_value->freehigh);
9295 __put_user(value.mem_unit, &target_value->mem_unit);
9296 unlock_user_struct(target_value, arg1, 1);
9299 break;
9300 #ifdef TARGET_NR_ipc
9301 case TARGET_NR_ipc:
9302 ret = do_ipc(arg1, arg2, arg3, arg4, arg5, arg6);
9303 break;
9304 #endif
9305 #ifdef TARGET_NR_semget
9306 case TARGET_NR_semget:
9307 ret = get_errno(semget(arg1, arg2, arg3));
9308 break;
9309 #endif
9310 #ifdef TARGET_NR_semop
9311 case TARGET_NR_semop:
9312 ret = do_semop(arg1, arg2, arg3);
9313 break;
9314 #endif
9315 #ifdef TARGET_NR_semctl
9316 case TARGET_NR_semctl:
9317 ret = do_semctl(arg1, arg2, arg3, arg4);
9318 break;
9319 #endif
9320 #ifdef TARGET_NR_msgctl
9321 case TARGET_NR_msgctl:
9322 ret = do_msgctl(arg1, arg2, arg3);
9323 break;
9324 #endif
9325 #ifdef TARGET_NR_msgget
9326 case TARGET_NR_msgget:
9327 ret = get_errno(msgget(arg1, arg2));
9328 break;
9329 #endif
9330 #ifdef TARGET_NR_msgrcv
9331 case TARGET_NR_msgrcv:
9332 ret = do_msgrcv(arg1, arg2, arg3, arg4, arg5);
9333 break;
9334 #endif
9335 #ifdef TARGET_NR_msgsnd
9336 case TARGET_NR_msgsnd:
9337 ret = do_msgsnd(arg1, arg2, arg3, arg4);
9338 break;
9339 #endif
9340 #ifdef TARGET_NR_shmget
9341 case TARGET_NR_shmget:
9342 ret = get_errno(shmget(arg1, arg2, arg3));
9343 break;
9344 #endif
9345 #ifdef TARGET_NR_shmctl
9346 case TARGET_NR_shmctl:
9347 ret = do_shmctl(arg1, arg2, arg3);
9348 break;
9349 #endif
9350 #ifdef TARGET_NR_shmat
9351 case TARGET_NR_shmat:
9352 ret = do_shmat(arg1, arg2, arg3);
9353 break;
9354 #endif
9355 #ifdef TARGET_NR_shmdt
9356 case TARGET_NR_shmdt:
9357 ret = do_shmdt(arg1);
9358 break;
9359 #endif
9360 case TARGET_NR_fsync:
9361 ret = get_errno(fsync(arg1));
9362 break;
9363 case TARGET_NR_clone:
9364 /* Linux manages to have three different orderings for its
9365 * arguments to clone(); the BACKWARDS and BACKWARDS2 defines
9366 * match the kernel's CONFIG_CLONE_* settings.
9367 * Microblaze is further special in that it uses a sixth
9368 * implicit argument to clone for the TLS pointer.
9370 #if defined(TARGET_MICROBLAZE)
9371 ret = get_errno(do_fork(cpu_env, arg1, arg2, arg4, arg6, arg5));
9372 #elif defined(TARGET_CLONE_BACKWARDS)
9373 ret = get_errno(do_fork(cpu_env, arg1, arg2, arg3, arg4, arg5));
9374 #elif defined(TARGET_CLONE_BACKWARDS2)
9375 ret = get_errno(do_fork(cpu_env, arg2, arg1, arg3, arg5, arg4));
9376 #else
9377 ret = get_errno(do_fork(cpu_env, arg1, arg2, arg3, arg5, arg4));
9378 #endif
9379 break;
9380 #ifdef __NR_exit_group
9381 /* new thread calls */
9382 case TARGET_NR_exit_group:
9383 #ifdef TARGET_GPROF
9384 _mcleanup();
9385 #endif
9386 gdb_exit(cpu_env, arg1);
9387 ret = get_errno(exit_group(arg1));
9388 break;
9389 #endif
9390 case TARGET_NR_setdomainname:
9391 if (!(p = lock_user_string(arg1)))
9392 goto efault;
9393 ret = get_errno(setdomainname(p, arg2));
9394 unlock_user(p, arg1, 0);
9395 break;
9396 case TARGET_NR_uname:
9397 /* no need to transcode because we use the linux syscall */
9399 struct new_utsname * buf;
9401 if (!lock_user_struct(VERIFY_WRITE, buf, arg1, 0))
9402 goto efault;
9403 ret = get_errno(sys_uname(buf));
9404 if (!is_error(ret)) {
9405 /* Overwrite the native machine name with whatever is being
9406 emulated. */
9407 strcpy (buf->machine, cpu_to_uname_machine(cpu_env));
9408 /* Allow the user to override the reported release. */
9409 if (qemu_uname_release && *qemu_uname_release) {
9410 g_strlcpy(buf->release, qemu_uname_release,
9411 sizeof(buf->release));
9414 unlock_user_struct(buf, arg1, 1);
9416 break;
9417 #ifdef TARGET_I386
9418 case TARGET_NR_modify_ldt:
9419 ret = do_modify_ldt(cpu_env, arg1, arg2, arg3);
9420 break;
9421 #if !defined(TARGET_X86_64)
9422 case TARGET_NR_vm86old:
9423 goto unimplemented;
9424 case TARGET_NR_vm86:
9425 ret = do_vm86(cpu_env, arg1, arg2);
9426 break;
9427 #endif
9428 #endif
9429 case TARGET_NR_adjtimex:
9430 goto unimplemented;
9431 #ifdef TARGET_NR_create_module
9432 case TARGET_NR_create_module:
9433 #endif
9434 case TARGET_NR_init_module:
9435 case TARGET_NR_delete_module:
9436 #ifdef TARGET_NR_get_kernel_syms
9437 case TARGET_NR_get_kernel_syms:
9438 #endif
9439 goto unimplemented;
9440 case TARGET_NR_quotactl:
9441 goto unimplemented;
9442 case TARGET_NR_getpgid:
9443 ret = get_errno(getpgid(arg1));
9444 break;
9445 case TARGET_NR_fchdir:
9446 ret = get_errno(fchdir(arg1));
9447 break;
9448 #ifdef TARGET_NR_bdflush /* not on x86_64 */
9449 case TARGET_NR_bdflush:
9450 goto unimplemented;
9451 #endif
9452 #ifdef TARGET_NR_sysfs
9453 case TARGET_NR_sysfs:
9454 goto unimplemented;
9455 #endif
9456 case TARGET_NR_personality:
9457 ret = get_errno(personality(arg1));
9458 break;
9459 #ifdef TARGET_NR_afs_syscall
9460 case TARGET_NR_afs_syscall:
9461 goto unimplemented;
9462 #endif
9463 #ifdef TARGET_NR__llseek /* Not on alpha */
9464 case TARGET_NR__llseek:
9466 int64_t res;
9467 #if !defined(__NR_llseek)
9468 res = lseek(arg1, ((uint64_t)arg2 << 32) | (abi_ulong)arg3, arg5);
9469 if (res == -1) {
9470 ret = get_errno(res);
9471 } else {
9472 ret = 0;
9474 #else
9475 ret = get_errno(_llseek(arg1, arg2, arg3, &res, arg5));
9476 #endif
9477 if ((ret == 0) && put_user_s64(res, arg4)) {
9478 goto efault;
9481 break;
9482 #endif
9483 #ifdef TARGET_NR_getdents
9484 case TARGET_NR_getdents:
9485 #ifdef __NR_getdents
9486 #if TARGET_ABI_BITS == 32 && HOST_LONG_BITS == 64
9488 struct target_dirent *target_dirp;
9489 struct linux_dirent *dirp;
9490 abi_long count = arg3;
9492 dirp = g_try_malloc(count);
9493 if (!dirp) {
9494 ret = -TARGET_ENOMEM;
9495 goto fail;
9498 ret = get_errno(sys_getdents(arg1, dirp, count));
9499 if (!is_error(ret)) {
9500 struct linux_dirent *de;
9501 struct target_dirent *tde;
9502 int len = ret;
9503 int reclen, treclen;
9504 int count1, tnamelen;
9506 count1 = 0;
9507 de = dirp;
9508 if (!(target_dirp = lock_user(VERIFY_WRITE, arg2, count, 0)))
9509 goto efault;
9510 tde = target_dirp;
9511 while (len > 0) {
9512 reclen = de->d_reclen;
9513 tnamelen = reclen - offsetof(struct linux_dirent, d_name);
9514 assert(tnamelen >= 0);
9515 treclen = tnamelen + offsetof(struct target_dirent, d_name);
9516 assert(count1 + treclen <= count);
9517 tde->d_reclen = tswap16(treclen);
9518 tde->d_ino = tswapal(de->d_ino);
9519 tde->d_off = tswapal(de->d_off);
9520 memcpy(tde->d_name, de->d_name, tnamelen);
9521 de = (struct linux_dirent *)((char *)de + reclen);
9522 len -= reclen;
9523 tde = (struct target_dirent *)((char *)tde + treclen);
9524 count1 += treclen;
9526 ret = count1;
9527 unlock_user(target_dirp, arg2, ret);
9529 g_free(dirp);
9531 #else
9533 struct linux_dirent *dirp;
9534 abi_long count = arg3;
9536 if (!(dirp = lock_user(VERIFY_WRITE, arg2, count, 0)))
9537 goto efault;
9538 ret = get_errno(sys_getdents(arg1, dirp, count));
9539 if (!is_error(ret)) {
9540 struct linux_dirent *de;
9541 int len = ret;
9542 int reclen;
9543 de = dirp;
9544 while (len > 0) {
9545 reclen = de->d_reclen;
9546 if (reclen > len)
9547 break;
9548 de->d_reclen = tswap16(reclen);
9549 tswapls(&de->d_ino);
9550 tswapls(&de->d_off);
9551 de = (struct linux_dirent *)((char *)de + reclen);
9552 len -= reclen;
9555 unlock_user(dirp, arg2, ret);
9557 #endif
9558 #else
9559 /* Implement getdents in terms of getdents64 */
9561 struct linux_dirent64 *dirp;
9562 abi_long count = arg3;
9564 dirp = lock_user(VERIFY_WRITE, arg2, count, 0);
9565 if (!dirp) {
9566 goto efault;
9568 ret = get_errno(sys_getdents64(arg1, dirp, count));
9569 if (!is_error(ret)) {
9570 /* Convert the dirent64 structs to target dirent. We do this
9571 * in-place, since we can guarantee that a target_dirent is no
9572 * larger than a dirent64; however this means we have to be
9573 * careful to read everything before writing in the new format.
9575 struct linux_dirent64 *de;
9576 struct target_dirent *tde;
9577 int len = ret;
9578 int tlen = 0;
9580 de = dirp;
9581 tde = (struct target_dirent *)dirp;
9582 while (len > 0) {
9583 int namelen, treclen;
9584 int reclen = de->d_reclen;
9585 uint64_t ino = de->d_ino;
9586 int64_t off = de->d_off;
9587 uint8_t type = de->d_type;
9589 namelen = strlen(de->d_name);
9590 treclen = offsetof(struct target_dirent, d_name)
9591 + namelen + 2;
9592 treclen = QEMU_ALIGN_UP(treclen, sizeof(abi_long));
9594 memmove(tde->d_name, de->d_name, namelen + 1);
9595 tde->d_ino = tswapal(ino);
9596 tde->d_off = tswapal(off);
9597 tde->d_reclen = tswap16(treclen);
9598 /* The target_dirent type is in what was formerly a padding
9599 * byte at the end of the structure:
9601 *(((char *)tde) + treclen - 1) = type;
9603 de = (struct linux_dirent64 *)((char *)de + reclen);
9604 tde = (struct target_dirent *)((char *)tde + treclen);
9605 len -= reclen;
9606 tlen += treclen;
9608 ret = tlen;
9610 unlock_user(dirp, arg2, ret);
9612 #endif
9613 break;
9614 #endif /* TARGET_NR_getdents */
9615 #if defined(TARGET_NR_getdents64) && defined(__NR_getdents64)
9616 case TARGET_NR_getdents64:
9618 struct linux_dirent64 *dirp;
9619 abi_long count = arg3;
9620 if (!(dirp = lock_user(VERIFY_WRITE, arg2, count, 0)))
9621 goto efault;
9622 ret = get_errno(sys_getdents64(arg1, dirp, count));
9623 if (!is_error(ret)) {
9624 struct linux_dirent64 *de;
9625 int len = ret;
9626 int reclen;
9627 de = dirp;
9628 while (len > 0) {
9629 reclen = de->d_reclen;
9630 if (reclen > len)
9631 break;
9632 de->d_reclen = tswap16(reclen);
9633 tswap64s((uint64_t *)&de->d_ino);
9634 tswap64s((uint64_t *)&de->d_off);
9635 de = (struct linux_dirent64 *)((char *)de + reclen);
9636 len -= reclen;
9639 unlock_user(dirp, arg2, ret);
9641 break;
9642 #endif /* TARGET_NR_getdents64 */
9643 #if defined(TARGET_NR__newselect)
9644 case TARGET_NR__newselect:
9645 ret = do_select(arg1, arg2, arg3, arg4, arg5);
9646 break;
9647 #endif
9648 #if defined(TARGET_NR_poll) || defined(TARGET_NR_ppoll)
9649 # ifdef TARGET_NR_poll
9650 case TARGET_NR_poll:
9651 # endif
9652 # ifdef TARGET_NR_ppoll
9653 case TARGET_NR_ppoll:
9654 # endif
9656 struct target_pollfd *target_pfd;
9657 unsigned int nfds = arg2;
9658 struct pollfd *pfd;
9659 unsigned int i;
9661 pfd = NULL;
9662 target_pfd = NULL;
9663 if (nfds) {
9664 target_pfd = lock_user(VERIFY_WRITE, arg1,
9665 sizeof(struct target_pollfd) * nfds, 1);
9666 if (!target_pfd) {
9667 goto efault;
9670 pfd = alloca(sizeof(struct pollfd) * nfds);
9671 for (i = 0; i < nfds; i++) {
9672 pfd[i].fd = tswap32(target_pfd[i].fd);
9673 pfd[i].events = tswap16(target_pfd[i].events);
9677 switch (num) {
9678 # ifdef TARGET_NR_ppoll
9679 case TARGET_NR_ppoll:
9681 struct timespec _timeout_ts, *timeout_ts = &_timeout_ts;
9682 target_sigset_t *target_set;
9683 sigset_t _set, *set = &_set;
9685 if (arg3) {
9686 if (target_to_host_timespec(timeout_ts, arg3)) {
9687 unlock_user(target_pfd, arg1, 0);
9688 goto efault;
9690 } else {
9691 timeout_ts = NULL;
9694 if (arg4) {
9695 if (arg5 != sizeof(target_sigset_t)) {
9696 unlock_user(target_pfd, arg1, 0);
9697 ret = -TARGET_EINVAL;
9698 break;
9701 target_set = lock_user(VERIFY_READ, arg4, sizeof(target_sigset_t), 1);
9702 if (!target_set) {
9703 unlock_user(target_pfd, arg1, 0);
9704 goto efault;
9706 target_to_host_sigset(set, target_set);
9707 } else {
9708 set = NULL;
9711 ret = get_errno(safe_ppoll(pfd, nfds, timeout_ts,
9712 set, SIGSET_T_SIZE));
9714 if (!is_error(ret) && arg3) {
9715 host_to_target_timespec(arg3, timeout_ts);
9717 if (arg4) {
9718 unlock_user(target_set, arg4, 0);
9720 break;
9722 # endif
9723 # ifdef TARGET_NR_poll
9724 case TARGET_NR_poll:
9726 struct timespec ts, *pts;
9728 if (arg3 >= 0) {
9729 /* Convert ms to secs, ns */
9730 ts.tv_sec = arg3 / 1000;
9731 ts.tv_nsec = (arg3 % 1000) * 1000000LL;
9732 pts = &ts;
9733 } else {
9734 /* -ve poll() timeout means "infinite" */
9735 pts = NULL;
9737 ret = get_errno(safe_ppoll(pfd, nfds, pts, NULL, 0));
9738 break;
9740 # endif
9741 default:
9742 g_assert_not_reached();
9745 if (!is_error(ret)) {
9746 for(i = 0; i < nfds; i++) {
9747 target_pfd[i].revents = tswap16(pfd[i].revents);
9750 unlock_user(target_pfd, arg1, sizeof(struct target_pollfd) * nfds);
9752 break;
9753 #endif
9754 case TARGET_NR_flock:
9755 /* NOTE: the flock constant seems to be the same for every
9756 Linux platform */
9757 ret = get_errno(safe_flock(arg1, arg2));
9758 break;
9759 case TARGET_NR_readv:
9761 struct iovec *vec = lock_iovec(VERIFY_WRITE, arg2, arg3, 0);
9762 if (vec != NULL) {
9763 ret = get_errno(safe_readv(arg1, vec, arg3));
9764 unlock_iovec(vec, arg2, arg3, 1);
9765 } else {
9766 ret = -host_to_target_errno(errno);
9769 break;
9770 case TARGET_NR_writev:
9772 struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1);
9773 if (vec != NULL) {
9774 ret = get_errno(safe_writev(arg1, vec, arg3));
9775 unlock_iovec(vec, arg2, arg3, 0);
9776 } else {
9777 ret = -host_to_target_errno(errno);
9780 break;
9781 case TARGET_NR_getsid:
9782 ret = get_errno(getsid(arg1));
9783 break;
9784 #if defined(TARGET_NR_fdatasync) /* Not on alpha (osf_datasync ?) */
9785 case TARGET_NR_fdatasync:
9786 ret = get_errno(fdatasync(arg1));
9787 break;
9788 #endif
9789 #ifdef TARGET_NR__sysctl
9790 case TARGET_NR__sysctl:
9791 /* We don't implement this, but ENOTDIR is always a safe
9792 return value. */
9793 ret = -TARGET_ENOTDIR;
9794 break;
9795 #endif
9796 case TARGET_NR_sched_getaffinity:
9798 unsigned int mask_size;
9799 unsigned long *mask;
9802 * sched_getaffinity needs multiples of ulong, so need to take
9803 * care of mismatches between target ulong and host ulong sizes.
9805 if (arg2 & (sizeof(abi_ulong) - 1)) {
9806 ret = -TARGET_EINVAL;
9807 break;
9809 mask_size = (arg2 + (sizeof(*mask) - 1)) & ~(sizeof(*mask) - 1);
9811 mask = alloca(mask_size);
9812 ret = get_errno(sys_sched_getaffinity(arg1, mask_size, mask));
9814 if (!is_error(ret)) {
9815 if (ret > arg2) {
9816 /* More data returned than the caller's buffer will fit.
9817 * This only happens if sizeof(abi_long) < sizeof(long)
9818 * and the caller passed us a buffer holding an odd number
9819 * of abi_longs. If the host kernel is actually using the
9820 * extra 4 bytes then fail EINVAL; otherwise we can just
9821 * ignore them and only copy the interesting part.
9823 int numcpus = sysconf(_SC_NPROCESSORS_CONF);
9824 if (numcpus > arg2 * 8) {
9825 ret = -TARGET_EINVAL;
9826 break;
9828 ret = arg2;
9831 if (copy_to_user(arg3, mask, ret)) {
9832 goto efault;
9836 break;
9837 case TARGET_NR_sched_setaffinity:
9839 unsigned int mask_size;
9840 unsigned long *mask;
9843 * sched_setaffinity needs multiples of ulong, so need to take
9844 * care of mismatches between target ulong and host ulong sizes.
9846 if (arg2 & (sizeof(abi_ulong) - 1)) {
9847 ret = -TARGET_EINVAL;
9848 break;
9850 mask_size = (arg2 + (sizeof(*mask) - 1)) & ~(sizeof(*mask) - 1);
9852 mask = alloca(mask_size);
9853 if (!lock_user_struct(VERIFY_READ, p, arg3, 1)) {
9854 goto efault;
9856 memcpy(mask, p, arg2);
9857 unlock_user_struct(p, arg2, 0);
9859 ret = get_errno(sys_sched_setaffinity(arg1, mask_size, mask));
9861 break;
9862 case TARGET_NR_sched_setparam:
9864 struct sched_param *target_schp;
9865 struct sched_param schp;
9867 if (arg2 == 0) {
9868 return -TARGET_EINVAL;
9870 if (!lock_user_struct(VERIFY_READ, target_schp, arg2, 1))
9871 goto efault;
9872 schp.sched_priority = tswap32(target_schp->sched_priority);
9873 unlock_user_struct(target_schp, arg2, 0);
9874 ret = get_errno(sched_setparam(arg1, &schp));
9876 break;
9877 case TARGET_NR_sched_getparam:
9879 struct sched_param *target_schp;
9880 struct sched_param schp;
9882 if (arg2 == 0) {
9883 return -TARGET_EINVAL;
9885 ret = get_errno(sched_getparam(arg1, &schp));
9886 if (!is_error(ret)) {
9887 if (!lock_user_struct(VERIFY_WRITE, target_schp, arg2, 0))
9888 goto efault;
9889 target_schp->sched_priority = tswap32(schp.sched_priority);
9890 unlock_user_struct(target_schp, arg2, 1);
9893 break;
9894 case TARGET_NR_sched_setscheduler:
9896 struct sched_param *target_schp;
9897 struct sched_param schp;
9898 if (arg3 == 0) {
9899 return -TARGET_EINVAL;
9901 if (!lock_user_struct(VERIFY_READ, target_schp, arg3, 1))
9902 goto efault;
9903 schp.sched_priority = tswap32(target_schp->sched_priority);
9904 unlock_user_struct(target_schp, arg3, 0);
9905 ret = get_errno(sched_setscheduler(arg1, arg2, &schp));
9907 break;
9908 case TARGET_NR_sched_getscheduler:
9909 ret = get_errno(sched_getscheduler(arg1));
9910 break;
9911 case TARGET_NR_sched_yield:
9912 ret = get_errno(sched_yield());
9913 break;
9914 case TARGET_NR_sched_get_priority_max:
9915 ret = get_errno(sched_get_priority_max(arg1));
9916 break;
9917 case TARGET_NR_sched_get_priority_min:
9918 ret = get_errno(sched_get_priority_min(arg1));
9919 break;
9920 case TARGET_NR_sched_rr_get_interval:
9922 struct timespec ts;
9923 ret = get_errno(sched_rr_get_interval(arg1, &ts));
9924 if (!is_error(ret)) {
9925 ret = host_to_target_timespec(arg2, &ts);
9928 break;
9929 case TARGET_NR_nanosleep:
9931 struct timespec req, rem;
9932 target_to_host_timespec(&req, arg1);
9933 ret = get_errno(safe_nanosleep(&req, &rem));
9934 if (is_error(ret) && arg2) {
9935 host_to_target_timespec(arg2, &rem);
9938 break;
9939 #ifdef TARGET_NR_query_module
9940 case TARGET_NR_query_module:
9941 goto unimplemented;
9942 #endif
9943 #ifdef TARGET_NR_nfsservctl
9944 case TARGET_NR_nfsservctl:
9945 goto unimplemented;
9946 #endif
9947 case TARGET_NR_prctl:
9948 switch (arg1) {
9949 case PR_GET_PDEATHSIG:
9951 int deathsig;
9952 ret = get_errno(prctl(arg1, &deathsig, arg3, arg4, arg5));
9953 if (!is_error(ret) && arg2
9954 && put_user_ual(deathsig, arg2)) {
9955 goto efault;
9957 break;
9959 #ifdef PR_GET_NAME
9960 case PR_GET_NAME:
9962 void *name = lock_user(VERIFY_WRITE, arg2, 16, 1);
9963 if (!name) {
9964 goto efault;
9966 ret = get_errno(prctl(arg1, (unsigned long)name,
9967 arg3, arg4, arg5));
9968 unlock_user(name, arg2, 16);
9969 break;
9971 case PR_SET_NAME:
9973 void *name = lock_user(VERIFY_READ, arg2, 16, 1);
9974 if (!name) {
9975 goto efault;
9977 ret = get_errno(prctl(arg1, (unsigned long)name,
9978 arg3, arg4, arg5));
9979 unlock_user(name, arg2, 0);
9980 break;
9982 #endif
9983 default:
9984 /* Most prctl options have no pointer arguments */
9985 ret = get_errno(prctl(arg1, arg2, arg3, arg4, arg5));
9986 break;
9988 break;
9989 #ifdef TARGET_NR_arch_prctl
9990 case TARGET_NR_arch_prctl:
9991 #if defined(TARGET_I386) && !defined(TARGET_ABI32)
9992 ret = do_arch_prctl(cpu_env, arg1, arg2);
9993 break;
9994 #else
9995 goto unimplemented;
9996 #endif
9997 #endif
9998 #ifdef TARGET_NR_pread64
9999 case TARGET_NR_pread64:
10000 if (regpairs_aligned(cpu_env)) {
10001 arg4 = arg5;
10002 arg5 = arg6;
10004 if (!(p = lock_user(VERIFY_WRITE, arg2, arg3, 0)))
10005 goto efault;
10006 ret = get_errno(pread64(arg1, p, arg3, target_offset64(arg4, arg5)));
10007 unlock_user(p, arg2, ret);
10008 break;
10009 case TARGET_NR_pwrite64:
10010 if (regpairs_aligned(cpu_env)) {
10011 arg4 = arg5;
10012 arg5 = arg6;
10014 if (!(p = lock_user(VERIFY_READ, arg2, arg3, 1)))
10015 goto efault;
10016 ret = get_errno(pwrite64(arg1, p, arg3, target_offset64(arg4, arg5)));
10017 unlock_user(p, arg2, 0);
10018 break;
10019 #endif
10020 case TARGET_NR_getcwd:
10021 if (!(p = lock_user(VERIFY_WRITE, arg1, arg2, 0)))
10022 goto efault;
10023 ret = get_errno(sys_getcwd1(p, arg2));
10024 unlock_user(p, arg1, ret);
10025 break;
10026 case TARGET_NR_capget:
10027 case TARGET_NR_capset:
10029 struct target_user_cap_header *target_header;
10030 struct target_user_cap_data *target_data = NULL;
10031 struct __user_cap_header_struct header;
10032 struct __user_cap_data_struct data[2];
10033 struct __user_cap_data_struct *dataptr = NULL;
10034 int i, target_datalen;
10035 int data_items = 1;
10037 if (!lock_user_struct(VERIFY_WRITE, target_header, arg1, 1)) {
10038 goto efault;
10040 header.version = tswap32(target_header->version);
10041 header.pid = tswap32(target_header->pid);
10043 if (header.version != _LINUX_CAPABILITY_VERSION) {
10044 /* Version 2 and up takes pointer to two user_data structs */
10045 data_items = 2;
10048 target_datalen = sizeof(*target_data) * data_items;
10050 if (arg2) {
10051 if (num == TARGET_NR_capget) {
10052 target_data = lock_user(VERIFY_WRITE, arg2, target_datalen, 0);
10053 } else {
10054 target_data = lock_user(VERIFY_READ, arg2, target_datalen, 1);
10056 if (!target_data) {
10057 unlock_user_struct(target_header, arg1, 0);
10058 goto efault;
10061 if (num == TARGET_NR_capset) {
10062 for (i = 0; i < data_items; i++) {
10063 data[i].effective = tswap32(target_data[i].effective);
10064 data[i].permitted = tswap32(target_data[i].permitted);
10065 data[i].inheritable = tswap32(target_data[i].inheritable);
10069 dataptr = data;
10072 if (num == TARGET_NR_capget) {
10073 ret = get_errno(capget(&header, dataptr));
10074 } else {
10075 ret = get_errno(capset(&header, dataptr));
10078 /* The kernel always updates version for both capget and capset */
10079 target_header->version = tswap32(header.version);
10080 unlock_user_struct(target_header, arg1, 1);
10082 if (arg2) {
10083 if (num == TARGET_NR_capget) {
10084 for (i = 0; i < data_items; i++) {
10085 target_data[i].effective = tswap32(data[i].effective);
10086 target_data[i].permitted = tswap32(data[i].permitted);
10087 target_data[i].inheritable = tswap32(data[i].inheritable);
10089 unlock_user(target_data, arg2, target_datalen);
10090 } else {
10091 unlock_user(target_data, arg2, 0);
10094 break;
10096 case TARGET_NR_sigaltstack:
10097 ret = do_sigaltstack(arg1, arg2, get_sp_from_cpustate((CPUArchState *)cpu_env));
10098 break;
10100 #ifdef CONFIG_SENDFILE
10101 case TARGET_NR_sendfile:
10103 off_t *offp = NULL;
10104 off_t off;
10105 if (arg3) {
10106 ret = get_user_sal(off, arg3);
10107 if (is_error(ret)) {
10108 break;
10110 offp = &off;
10112 ret = get_errno(sendfile(arg1, arg2, offp, arg4));
10113 if (!is_error(ret) && arg3) {
10114 abi_long ret2 = put_user_sal(off, arg3);
10115 if (is_error(ret2)) {
10116 ret = ret2;
10119 break;
10121 #ifdef TARGET_NR_sendfile64
10122 case TARGET_NR_sendfile64:
10124 off_t *offp = NULL;
10125 off_t off;
10126 if (arg3) {
10127 ret = get_user_s64(off, arg3);
10128 if (is_error(ret)) {
10129 break;
10131 offp = &off;
10133 ret = get_errno(sendfile(arg1, arg2, offp, arg4));
10134 if (!is_error(ret) && arg3) {
10135 abi_long ret2 = put_user_s64(off, arg3);
10136 if (is_error(ret2)) {
10137 ret = ret2;
10140 break;
10142 #endif
10143 #else
10144 case TARGET_NR_sendfile:
10145 #ifdef TARGET_NR_sendfile64
10146 case TARGET_NR_sendfile64:
10147 #endif
10148 goto unimplemented;
10149 #endif
10151 #ifdef TARGET_NR_getpmsg
10152 case TARGET_NR_getpmsg:
10153 goto unimplemented;
10154 #endif
10155 #ifdef TARGET_NR_putpmsg
10156 case TARGET_NR_putpmsg:
10157 goto unimplemented;
10158 #endif
10159 #ifdef TARGET_NR_vfork
10160 case TARGET_NR_vfork:
10161 ret = get_errno(do_fork(cpu_env, CLONE_VFORK | CLONE_VM | SIGCHLD,
10162 0, 0, 0, 0));
10163 break;
10164 #endif
10165 #ifdef TARGET_NR_ugetrlimit
10166 case TARGET_NR_ugetrlimit:
10168 struct rlimit rlim;
10169 int resource = target_to_host_resource(arg1);
10170 ret = get_errno(getrlimit(resource, &rlim));
10171 if (!is_error(ret)) {
10172 struct target_rlimit *target_rlim;
10173 if (!lock_user_struct(VERIFY_WRITE, target_rlim, arg2, 0))
10174 goto efault;
10175 target_rlim->rlim_cur = host_to_target_rlim(rlim.rlim_cur);
10176 target_rlim->rlim_max = host_to_target_rlim(rlim.rlim_max);
10177 unlock_user_struct(target_rlim, arg2, 1);
10179 break;
10181 #endif
10182 #ifdef TARGET_NR_truncate64
10183 case TARGET_NR_truncate64:
10184 if (!(p = lock_user_string(arg1)))
10185 goto efault;
10186 ret = target_truncate64(cpu_env, p, arg2, arg3, arg4);
10187 unlock_user(p, arg1, 0);
10188 break;
10189 #endif
10190 #ifdef TARGET_NR_ftruncate64
10191 case TARGET_NR_ftruncate64:
10192 ret = target_ftruncate64(cpu_env, arg1, arg2, arg3, arg4);
10193 break;
10194 #endif
10195 #ifdef TARGET_NR_stat64
10196 case TARGET_NR_stat64:
10197 if (!(p = lock_user_string(arg1)))
10198 goto efault;
10199 ret = get_errno(stat(path(p), &st));
10200 unlock_user(p, arg1, 0);
10201 if (!is_error(ret))
10202 ret = host_to_target_stat64(cpu_env, arg2, &st);
10203 break;
10204 #endif
10205 #ifdef TARGET_NR_lstat64
10206 case TARGET_NR_lstat64:
10207 if (!(p = lock_user_string(arg1)))
10208 goto efault;
10209 ret = get_errno(lstat(path(p), &st));
10210 unlock_user(p, arg1, 0);
10211 if (!is_error(ret))
10212 ret = host_to_target_stat64(cpu_env, arg2, &st);
10213 break;
10214 #endif
10215 #ifdef TARGET_NR_fstat64
10216 case TARGET_NR_fstat64:
10217 ret = get_errno(fstat(arg1, &st));
10218 if (!is_error(ret))
10219 ret = host_to_target_stat64(cpu_env, arg2, &st);
10220 break;
10221 #endif
10222 #if (defined(TARGET_NR_fstatat64) || defined(TARGET_NR_newfstatat))
10223 #ifdef TARGET_NR_fstatat64
10224 case TARGET_NR_fstatat64:
10225 #endif
10226 #ifdef TARGET_NR_newfstatat
10227 case TARGET_NR_newfstatat:
10228 #endif
10229 if (!(p = lock_user_string(arg2)))
10230 goto efault;
10231 ret = get_errno(fstatat(arg1, path(p), &st, arg4));
10232 if (!is_error(ret))
10233 ret = host_to_target_stat64(cpu_env, arg3, &st);
10234 break;
10235 #endif
10236 #ifdef TARGET_NR_lchown
10237 case TARGET_NR_lchown:
10238 if (!(p = lock_user_string(arg1)))
10239 goto efault;
10240 ret = get_errno(lchown(p, low2highuid(arg2), low2highgid(arg3)));
10241 unlock_user(p, arg1, 0);
10242 break;
10243 #endif
10244 #ifdef TARGET_NR_getuid
10245 case TARGET_NR_getuid:
10246 ret = get_errno(high2lowuid(getuid()));
10247 break;
10248 #endif
10249 #ifdef TARGET_NR_getgid
10250 case TARGET_NR_getgid:
10251 ret = get_errno(high2lowgid(getgid()));
10252 break;
10253 #endif
10254 #ifdef TARGET_NR_geteuid
10255 case TARGET_NR_geteuid:
10256 ret = get_errno(high2lowuid(geteuid()));
10257 break;
10258 #endif
10259 #ifdef TARGET_NR_getegid
10260 case TARGET_NR_getegid:
10261 ret = get_errno(high2lowgid(getegid()));
10262 break;
10263 #endif
10264 case TARGET_NR_setreuid:
10265 ret = get_errno(setreuid(low2highuid(arg1), low2highuid(arg2)));
10266 break;
10267 case TARGET_NR_setregid:
10268 ret = get_errno(setregid(low2highgid(arg1), low2highgid(arg2)));
10269 break;
10270 case TARGET_NR_getgroups:
10272 int gidsetsize = arg1;
10273 target_id *target_grouplist;
10274 gid_t *grouplist;
10275 int i;
10277 grouplist = alloca(gidsetsize * sizeof(gid_t));
10278 ret = get_errno(getgroups(gidsetsize, grouplist));
10279 if (gidsetsize == 0)
10280 break;
10281 if (!is_error(ret)) {
10282 target_grouplist = lock_user(VERIFY_WRITE, arg2, gidsetsize * sizeof(target_id), 0);
10283 if (!target_grouplist)
10284 goto efault;
10285 for(i = 0;i < ret; i++)
10286 target_grouplist[i] = tswapid(high2lowgid(grouplist[i]));
10287 unlock_user(target_grouplist, arg2, gidsetsize * sizeof(target_id));
10290 break;
10291 case TARGET_NR_setgroups:
10293 int gidsetsize = arg1;
10294 target_id *target_grouplist;
10295 gid_t *grouplist = NULL;
10296 int i;
10297 if (gidsetsize) {
10298 grouplist = alloca(gidsetsize * sizeof(gid_t));
10299 target_grouplist = lock_user(VERIFY_READ, arg2, gidsetsize * sizeof(target_id), 1);
10300 if (!target_grouplist) {
10301 ret = -TARGET_EFAULT;
10302 goto fail;
10304 for (i = 0; i < gidsetsize; i++) {
10305 grouplist[i] = low2highgid(tswapid(target_grouplist[i]));
10307 unlock_user(target_grouplist, arg2, 0);
10309 ret = get_errno(setgroups(gidsetsize, grouplist));
10311 break;
10312 case TARGET_NR_fchown:
10313 ret = get_errno(fchown(arg1, low2highuid(arg2), low2highgid(arg3)));
10314 break;
10315 #if defined(TARGET_NR_fchownat)
10316 case TARGET_NR_fchownat:
10317 if (!(p = lock_user_string(arg2)))
10318 goto efault;
10319 ret = get_errno(fchownat(arg1, p, low2highuid(arg3),
10320 low2highgid(arg4), arg5));
10321 unlock_user(p, arg2, 0);
10322 break;
10323 #endif
10324 #ifdef TARGET_NR_setresuid
10325 case TARGET_NR_setresuid:
10326 ret = get_errno(sys_setresuid(low2highuid(arg1),
10327 low2highuid(arg2),
10328 low2highuid(arg3)));
10329 break;
10330 #endif
10331 #ifdef TARGET_NR_getresuid
10332 case TARGET_NR_getresuid:
10334 uid_t ruid, euid, suid;
10335 ret = get_errno(getresuid(&ruid, &euid, &suid));
10336 if (!is_error(ret)) {
10337 if (put_user_id(high2lowuid(ruid), arg1)
10338 || put_user_id(high2lowuid(euid), arg2)
10339 || put_user_id(high2lowuid(suid), arg3))
10340 goto efault;
10343 break;
10344 #endif
10345 #ifdef TARGET_NR_getresgid
10346 case TARGET_NR_setresgid:
10347 ret = get_errno(sys_setresgid(low2highgid(arg1),
10348 low2highgid(arg2),
10349 low2highgid(arg3)));
10350 break;
10351 #endif
10352 #ifdef TARGET_NR_getresgid
10353 case TARGET_NR_getresgid:
10355 gid_t rgid, egid, sgid;
10356 ret = get_errno(getresgid(&rgid, &egid, &sgid));
10357 if (!is_error(ret)) {
10358 if (put_user_id(high2lowgid(rgid), arg1)
10359 || put_user_id(high2lowgid(egid), arg2)
10360 || put_user_id(high2lowgid(sgid), arg3))
10361 goto efault;
10364 break;
10365 #endif
10366 #ifdef TARGET_NR_chown
10367 case TARGET_NR_chown:
10368 if (!(p = lock_user_string(arg1)))
10369 goto efault;
10370 ret = get_errno(chown(p, low2highuid(arg2), low2highgid(arg3)));
10371 unlock_user(p, arg1, 0);
10372 break;
10373 #endif
10374 case TARGET_NR_setuid:
10375 ret = get_errno(sys_setuid(low2highuid(arg1)));
10376 break;
10377 case TARGET_NR_setgid:
10378 ret = get_errno(sys_setgid(low2highgid(arg1)));
10379 break;
10380 case TARGET_NR_setfsuid:
10381 ret = get_errno(setfsuid(arg1));
10382 break;
10383 case TARGET_NR_setfsgid:
10384 ret = get_errno(setfsgid(arg1));
10385 break;
10387 #ifdef TARGET_NR_lchown32
10388 case TARGET_NR_lchown32:
10389 if (!(p = lock_user_string(arg1)))
10390 goto efault;
10391 ret = get_errno(lchown(p, arg2, arg3));
10392 unlock_user(p, arg1, 0);
10393 break;
10394 #endif
10395 #ifdef TARGET_NR_getuid32
10396 case TARGET_NR_getuid32:
10397 ret = get_errno(getuid());
10398 break;
10399 #endif
10401 #if defined(TARGET_NR_getxuid) && defined(TARGET_ALPHA)
10402 /* Alpha specific */
10403 case TARGET_NR_getxuid:
10405 uid_t euid;
10406 euid=geteuid();
10407 ((CPUAlphaState *)cpu_env)->ir[IR_A4]=euid;
10409 ret = get_errno(getuid());
10410 break;
10411 #endif
10412 #if defined(TARGET_NR_getxgid) && defined(TARGET_ALPHA)
10413 /* Alpha specific */
10414 case TARGET_NR_getxgid:
10416 uid_t egid;
10417 egid=getegid();
10418 ((CPUAlphaState *)cpu_env)->ir[IR_A4]=egid;
10420 ret = get_errno(getgid());
10421 break;
10422 #endif
10423 #if defined(TARGET_NR_osf_getsysinfo) && defined(TARGET_ALPHA)
10424 /* Alpha specific */
10425 case TARGET_NR_osf_getsysinfo:
10426 ret = -TARGET_EOPNOTSUPP;
10427 switch (arg1) {
10428 case TARGET_GSI_IEEE_FP_CONTROL:
10430 uint64_t swcr, fpcr = cpu_alpha_load_fpcr (cpu_env);
10432 /* Copied from linux ieee_fpcr_to_swcr. */
10433 swcr = (fpcr >> 35) & SWCR_STATUS_MASK;
10434 swcr |= (fpcr >> 36) & SWCR_MAP_DMZ;
10435 swcr |= (~fpcr >> 48) & (SWCR_TRAP_ENABLE_INV
10436 | SWCR_TRAP_ENABLE_DZE
10437 | SWCR_TRAP_ENABLE_OVF);
10438 swcr |= (~fpcr >> 57) & (SWCR_TRAP_ENABLE_UNF
10439 | SWCR_TRAP_ENABLE_INE);
10440 swcr |= (fpcr >> 47) & SWCR_MAP_UMZ;
10441 swcr |= (~fpcr >> 41) & SWCR_TRAP_ENABLE_DNO;
10443 if (put_user_u64 (swcr, arg2))
10444 goto efault;
10445 ret = 0;
10447 break;
10449 /* case GSI_IEEE_STATE_AT_SIGNAL:
10450 -- Not implemented in linux kernel.
10451 case GSI_UACPROC:
10452 -- Retrieves current unaligned access state; not much used.
10453 case GSI_PROC_TYPE:
10454 -- Retrieves implver information; surely not used.
10455 case GSI_GET_HWRPB:
10456 -- Grabs a copy of the HWRPB; surely not used.
10459 break;
10460 #endif
10461 #if defined(TARGET_NR_osf_setsysinfo) && defined(TARGET_ALPHA)
10462 /* Alpha specific */
10463 case TARGET_NR_osf_setsysinfo:
10464 ret = -TARGET_EOPNOTSUPP;
10465 switch (arg1) {
10466 case TARGET_SSI_IEEE_FP_CONTROL:
10468 uint64_t swcr, fpcr, orig_fpcr;
10470 if (get_user_u64 (swcr, arg2)) {
10471 goto efault;
10473 orig_fpcr = cpu_alpha_load_fpcr(cpu_env);
10474 fpcr = orig_fpcr & FPCR_DYN_MASK;
10476 /* Copied from linux ieee_swcr_to_fpcr. */
10477 fpcr |= (swcr & SWCR_STATUS_MASK) << 35;
10478 fpcr |= (swcr & SWCR_MAP_DMZ) << 36;
10479 fpcr |= (~swcr & (SWCR_TRAP_ENABLE_INV
10480 | SWCR_TRAP_ENABLE_DZE
10481 | SWCR_TRAP_ENABLE_OVF)) << 48;
10482 fpcr |= (~swcr & (SWCR_TRAP_ENABLE_UNF
10483 | SWCR_TRAP_ENABLE_INE)) << 57;
10484 fpcr |= (swcr & SWCR_MAP_UMZ ? FPCR_UNDZ | FPCR_UNFD : 0);
10485 fpcr |= (~swcr & SWCR_TRAP_ENABLE_DNO) << 41;
10487 cpu_alpha_store_fpcr(cpu_env, fpcr);
10488 ret = 0;
10490 break;
10492 case TARGET_SSI_IEEE_RAISE_EXCEPTION:
10494 uint64_t exc, fpcr, orig_fpcr;
10495 int si_code;
10497 if (get_user_u64(exc, arg2)) {
10498 goto efault;
10501 orig_fpcr = cpu_alpha_load_fpcr(cpu_env);
10503 /* We only add to the exception status here. */
10504 fpcr = orig_fpcr | ((exc & SWCR_STATUS_MASK) << 35);
10506 cpu_alpha_store_fpcr(cpu_env, fpcr);
10507 ret = 0;
10509 /* Old exceptions are not signaled. */
10510 fpcr &= ~(orig_fpcr & FPCR_STATUS_MASK);
10512 /* If any exceptions set by this call,
10513 and are unmasked, send a signal. */
10514 si_code = 0;
10515 if ((fpcr & (FPCR_INE | FPCR_INED)) == FPCR_INE) {
10516 si_code = TARGET_FPE_FLTRES;
10518 if ((fpcr & (FPCR_UNF | FPCR_UNFD)) == FPCR_UNF) {
10519 si_code = TARGET_FPE_FLTUND;
10521 if ((fpcr & (FPCR_OVF | FPCR_OVFD)) == FPCR_OVF) {
10522 si_code = TARGET_FPE_FLTOVF;
10524 if ((fpcr & (FPCR_DZE | FPCR_DZED)) == FPCR_DZE) {
10525 si_code = TARGET_FPE_FLTDIV;
10527 if ((fpcr & (FPCR_INV | FPCR_INVD)) == FPCR_INV) {
10528 si_code = TARGET_FPE_FLTINV;
10530 if (si_code != 0) {
10531 target_siginfo_t info;
10532 info.si_signo = SIGFPE;
10533 info.si_errno = 0;
10534 info.si_code = si_code;
10535 info._sifields._sigfault._addr
10536 = ((CPUArchState *)cpu_env)->pc;
10537 queue_signal((CPUArchState *)cpu_env, info.si_signo, &info);
10540 break;
10542 /* case SSI_NVPAIRS:
10543 -- Used with SSIN_UACPROC to enable unaligned accesses.
10544 case SSI_IEEE_STATE_AT_SIGNAL:
10545 case SSI_IEEE_IGNORE_STATE_AT_SIGNAL:
10546 -- Not implemented in linux kernel
10549 break;
10550 #endif
10551 #ifdef TARGET_NR_osf_sigprocmask
10552 /* Alpha specific. */
10553 case TARGET_NR_osf_sigprocmask:
10555 abi_ulong mask;
10556 int how;
10557 sigset_t set, oldset;
10559 switch(arg1) {
10560 case TARGET_SIG_BLOCK:
10561 how = SIG_BLOCK;
10562 break;
10563 case TARGET_SIG_UNBLOCK:
10564 how = SIG_UNBLOCK;
10565 break;
10566 case TARGET_SIG_SETMASK:
10567 how = SIG_SETMASK;
10568 break;
10569 default:
10570 ret = -TARGET_EINVAL;
10571 goto fail;
10573 mask = arg2;
10574 target_to_host_old_sigset(&set, &mask);
10575 ret = do_sigprocmask(how, &set, &oldset);
10576 if (!ret) {
10577 host_to_target_old_sigset(&mask, &oldset);
10578 ret = mask;
10581 break;
10582 #endif
10584 #ifdef TARGET_NR_getgid32
10585 case TARGET_NR_getgid32:
10586 ret = get_errno(getgid());
10587 break;
10588 #endif
10589 #ifdef TARGET_NR_geteuid32
10590 case TARGET_NR_geteuid32:
10591 ret = get_errno(geteuid());
10592 break;
10593 #endif
10594 #ifdef TARGET_NR_getegid32
10595 case TARGET_NR_getegid32:
10596 ret = get_errno(getegid());
10597 break;
10598 #endif
10599 #ifdef TARGET_NR_setreuid32
10600 case TARGET_NR_setreuid32:
10601 ret = get_errno(setreuid(arg1, arg2));
10602 break;
10603 #endif
10604 #ifdef TARGET_NR_setregid32
10605 case TARGET_NR_setregid32:
10606 ret = get_errno(setregid(arg1, arg2));
10607 break;
10608 #endif
10609 #ifdef TARGET_NR_getgroups32
10610 case TARGET_NR_getgroups32:
10612 int gidsetsize = arg1;
10613 uint32_t *target_grouplist;
10614 gid_t *grouplist;
10615 int i;
10617 grouplist = alloca(gidsetsize * sizeof(gid_t));
10618 ret = get_errno(getgroups(gidsetsize, grouplist));
10619 if (gidsetsize == 0)
10620 break;
10621 if (!is_error(ret)) {
10622 target_grouplist = lock_user(VERIFY_WRITE, arg2, gidsetsize * 4, 0);
10623 if (!target_grouplist) {
10624 ret = -TARGET_EFAULT;
10625 goto fail;
10627 for(i = 0;i < ret; i++)
10628 target_grouplist[i] = tswap32(grouplist[i]);
10629 unlock_user(target_grouplist, arg2, gidsetsize * 4);
10632 break;
10633 #endif
10634 #ifdef TARGET_NR_setgroups32
10635 case TARGET_NR_setgroups32:
10637 int gidsetsize = arg1;
10638 uint32_t *target_grouplist;
10639 gid_t *grouplist;
10640 int i;
10642 grouplist = alloca(gidsetsize * sizeof(gid_t));
10643 target_grouplist = lock_user(VERIFY_READ, arg2, gidsetsize * 4, 1);
10644 if (!target_grouplist) {
10645 ret = -TARGET_EFAULT;
10646 goto fail;
10648 for(i = 0;i < gidsetsize; i++)
10649 grouplist[i] = tswap32(target_grouplist[i]);
10650 unlock_user(target_grouplist, arg2, 0);
10651 ret = get_errno(setgroups(gidsetsize, grouplist));
10653 break;
10654 #endif
10655 #ifdef TARGET_NR_fchown32
10656 case TARGET_NR_fchown32:
10657 ret = get_errno(fchown(arg1, arg2, arg3));
10658 break;
10659 #endif
10660 #ifdef TARGET_NR_setresuid32
10661 case TARGET_NR_setresuid32:
10662 ret = get_errno(sys_setresuid(arg1, arg2, arg3));
10663 break;
10664 #endif
10665 #ifdef TARGET_NR_getresuid32
10666 case TARGET_NR_getresuid32:
10668 uid_t ruid, euid, suid;
10669 ret = get_errno(getresuid(&ruid, &euid, &suid));
10670 if (!is_error(ret)) {
10671 if (put_user_u32(ruid, arg1)
10672 || put_user_u32(euid, arg2)
10673 || put_user_u32(suid, arg3))
10674 goto efault;
10677 break;
10678 #endif
10679 #ifdef TARGET_NR_setresgid32
10680 case TARGET_NR_setresgid32:
10681 ret = get_errno(sys_setresgid(arg1, arg2, arg3));
10682 break;
10683 #endif
10684 #ifdef TARGET_NR_getresgid32
10685 case TARGET_NR_getresgid32:
10687 gid_t rgid, egid, sgid;
10688 ret = get_errno(getresgid(&rgid, &egid, &sgid));
10689 if (!is_error(ret)) {
10690 if (put_user_u32(rgid, arg1)
10691 || put_user_u32(egid, arg2)
10692 || put_user_u32(sgid, arg3))
10693 goto efault;
10696 break;
10697 #endif
10698 #ifdef TARGET_NR_chown32
10699 case TARGET_NR_chown32:
10700 if (!(p = lock_user_string(arg1)))
10701 goto efault;
10702 ret = get_errno(chown(p, arg2, arg3));
10703 unlock_user(p, arg1, 0);
10704 break;
10705 #endif
10706 #ifdef TARGET_NR_setuid32
10707 case TARGET_NR_setuid32:
10708 ret = get_errno(sys_setuid(arg1));
10709 break;
10710 #endif
10711 #ifdef TARGET_NR_setgid32
10712 case TARGET_NR_setgid32:
10713 ret = get_errno(sys_setgid(arg1));
10714 break;
10715 #endif
10716 #ifdef TARGET_NR_setfsuid32
10717 case TARGET_NR_setfsuid32:
10718 ret = get_errno(setfsuid(arg1));
10719 break;
10720 #endif
10721 #ifdef TARGET_NR_setfsgid32
10722 case TARGET_NR_setfsgid32:
10723 ret = get_errno(setfsgid(arg1));
10724 break;
10725 #endif
10727 case TARGET_NR_pivot_root:
10728 goto unimplemented;
10729 #ifdef TARGET_NR_mincore
10730 case TARGET_NR_mincore:
10732 void *a;
10733 ret = -TARGET_EFAULT;
10734 if (!(a = lock_user(VERIFY_READ, arg1,arg2, 0)))
10735 goto efault;
10736 if (!(p = lock_user_string(arg3)))
10737 goto mincore_fail;
10738 ret = get_errno(mincore(a, arg2, p));
10739 unlock_user(p, arg3, ret);
10740 mincore_fail:
10741 unlock_user(a, arg1, 0);
10743 break;
10744 #endif
10745 #ifdef TARGET_NR_arm_fadvise64_64
10746 case TARGET_NR_arm_fadvise64_64:
10747 /* arm_fadvise64_64 looks like fadvise64_64 but
10748 * with different argument order: fd, advice, offset, len
10749 * rather than the usual fd, offset, len, advice.
10750 * Note that offset and len are both 64-bit so appear as
10751 * pairs of 32-bit registers.
10753 ret = posix_fadvise(arg1, target_offset64(arg3, arg4),
10754 target_offset64(arg5, arg6), arg2);
10755 ret = -host_to_target_errno(ret);
10756 break;
10757 #endif
10759 #if TARGET_ABI_BITS == 32
10761 #ifdef TARGET_NR_fadvise64_64
10762 case TARGET_NR_fadvise64_64:
10763 /* 6 args: fd, offset (high, low), len (high, low), advice */
10764 if (regpairs_aligned(cpu_env)) {
10765 /* offset is in (3,4), len in (5,6) and advice in 7 */
10766 arg2 = arg3;
10767 arg3 = arg4;
10768 arg4 = arg5;
10769 arg5 = arg6;
10770 arg6 = arg7;
10772 ret = -host_to_target_errno(posix_fadvise(arg1,
10773 target_offset64(arg2, arg3),
10774 target_offset64(arg4, arg5),
10775 arg6));
10776 break;
10777 #endif
10779 #ifdef TARGET_NR_fadvise64
10780 case TARGET_NR_fadvise64:
10781 /* 5 args: fd, offset (high, low), len, advice */
10782 if (regpairs_aligned(cpu_env)) {
10783 /* offset is in (3,4), len in 5 and advice in 6 */
10784 arg2 = arg3;
10785 arg3 = arg4;
10786 arg4 = arg5;
10787 arg5 = arg6;
10789 ret = -host_to_target_errno(posix_fadvise(arg1,
10790 target_offset64(arg2, arg3),
10791 arg4, arg5));
10792 break;
10793 #endif
10795 #else /* not a 32-bit ABI */
10796 #if defined(TARGET_NR_fadvise64_64) || defined(TARGET_NR_fadvise64)
10797 #ifdef TARGET_NR_fadvise64_64
10798 case TARGET_NR_fadvise64_64:
10799 #endif
10800 #ifdef TARGET_NR_fadvise64
10801 case TARGET_NR_fadvise64:
10802 #endif
10803 #ifdef TARGET_S390X
10804 switch (arg4) {
10805 case 4: arg4 = POSIX_FADV_NOREUSE + 1; break; /* make sure it's an invalid value */
10806 case 5: arg4 = POSIX_FADV_NOREUSE + 2; break; /* ditto */
10807 case 6: arg4 = POSIX_FADV_DONTNEED; break;
10808 case 7: arg4 = POSIX_FADV_NOREUSE; break;
10809 default: break;
10811 #endif
10812 ret = -host_to_target_errno(posix_fadvise(arg1, arg2, arg3, arg4));
10813 break;
10814 #endif
10815 #endif /* end of 64-bit ABI fadvise handling */
10817 #ifdef TARGET_NR_madvise
10818 case TARGET_NR_madvise:
10819 /* A straight passthrough may not be safe because qemu sometimes
10820 turns private file-backed mappings into anonymous mappings.
10821 This will break MADV_DONTNEED.
10822 This is a hint, so ignoring and returning success is ok. */
10823 ret = get_errno(0);
10824 break;
10825 #endif
10826 #if TARGET_ABI_BITS == 32
10827 case TARGET_NR_fcntl64:
10829 int cmd;
10830 struct flock64 fl;
10831 from_flock64_fn *copyfrom = copy_from_user_flock64;
10832 to_flock64_fn *copyto = copy_to_user_flock64;
10834 #ifdef TARGET_ARM
10835 if (((CPUARMState *)cpu_env)->eabi) {
10836 copyfrom = copy_from_user_eabi_flock64;
10837 copyto = copy_to_user_eabi_flock64;
10839 #endif
10841 cmd = target_to_host_fcntl_cmd(arg2);
10842 if (cmd == -TARGET_EINVAL) {
10843 ret = cmd;
10844 break;
10847 switch(arg2) {
10848 case TARGET_F_GETLK64:
10849 ret = copyfrom(&fl, arg3);
10850 if (ret) {
10851 break;
10853 ret = get_errno(fcntl(arg1, cmd, &fl));
10854 if (ret == 0) {
10855 ret = copyto(arg3, &fl);
10857 break;
10859 case TARGET_F_SETLK64:
10860 case TARGET_F_SETLKW64:
10861 ret = copyfrom(&fl, arg3);
10862 if (ret) {
10863 break;
10865 ret = get_errno(safe_fcntl(arg1, cmd, &fl));
10866 break;
10867 default:
10868 ret = do_fcntl(arg1, arg2, arg3);
10869 break;
10871 break;
10873 #endif
10874 #ifdef TARGET_NR_cacheflush
10875 case TARGET_NR_cacheflush:
10876 /* self-modifying code is handled automatically, so nothing needed */
10877 ret = 0;
10878 break;
10879 #endif
10880 #ifdef TARGET_NR_security
10881 case TARGET_NR_security:
10882 goto unimplemented;
10883 #endif
10884 #ifdef TARGET_NR_getpagesize
10885 case TARGET_NR_getpagesize:
10886 ret = TARGET_PAGE_SIZE;
10887 break;
10888 #endif
10889 case TARGET_NR_gettid:
10890 ret = get_errno(gettid());
10891 break;
10892 #ifdef TARGET_NR_readahead
10893 case TARGET_NR_readahead:
10894 #if TARGET_ABI_BITS == 32
10895 if (regpairs_aligned(cpu_env)) {
10896 arg2 = arg3;
10897 arg3 = arg4;
10898 arg4 = arg5;
10900 ret = get_errno(readahead(arg1, ((off64_t)arg3 << 32) | arg2, arg4));
10901 #else
10902 ret = get_errno(readahead(arg1, arg2, arg3));
10903 #endif
10904 break;
10905 #endif
10906 #ifdef CONFIG_ATTR
10907 #ifdef TARGET_NR_setxattr
10908 case TARGET_NR_listxattr:
10909 case TARGET_NR_llistxattr:
10911 void *p, *b = 0;
10912 if (arg2) {
10913 b = lock_user(VERIFY_WRITE, arg2, arg3, 0);
10914 if (!b) {
10915 ret = -TARGET_EFAULT;
10916 break;
10919 p = lock_user_string(arg1);
10920 if (p) {
10921 if (num == TARGET_NR_listxattr) {
10922 ret = get_errno(listxattr(p, b, arg3));
10923 } else {
10924 ret = get_errno(llistxattr(p, b, arg3));
10926 } else {
10927 ret = -TARGET_EFAULT;
10929 unlock_user(p, arg1, 0);
10930 unlock_user(b, arg2, arg3);
10931 break;
10933 case TARGET_NR_flistxattr:
10935 void *b = 0;
10936 if (arg2) {
10937 b = lock_user(VERIFY_WRITE, arg2, arg3, 0);
10938 if (!b) {
10939 ret = -TARGET_EFAULT;
10940 break;
10943 ret = get_errno(flistxattr(arg1, b, arg3));
10944 unlock_user(b, arg2, arg3);
10945 break;
10947 case TARGET_NR_setxattr:
10948 case TARGET_NR_lsetxattr:
10950 void *p, *n, *v = 0;
10951 if (arg3) {
10952 v = lock_user(VERIFY_READ, arg3, arg4, 1);
10953 if (!v) {
10954 ret = -TARGET_EFAULT;
10955 break;
10958 p = lock_user_string(arg1);
10959 n = lock_user_string(arg2);
10960 if (p && n) {
10961 if (num == TARGET_NR_setxattr) {
10962 ret = get_errno(setxattr(p, n, v, arg4, arg5));
10963 } else {
10964 ret = get_errno(lsetxattr(p, n, v, arg4, arg5));
10966 } else {
10967 ret = -TARGET_EFAULT;
10969 unlock_user(p, arg1, 0);
10970 unlock_user(n, arg2, 0);
10971 unlock_user(v, arg3, 0);
10973 break;
10974 case TARGET_NR_fsetxattr:
10976 void *n, *v = 0;
10977 if (arg3) {
10978 v = lock_user(VERIFY_READ, arg3, arg4, 1);
10979 if (!v) {
10980 ret = -TARGET_EFAULT;
10981 break;
10984 n = lock_user_string(arg2);
10985 if (n) {
10986 ret = get_errno(fsetxattr(arg1, n, v, arg4, arg5));
10987 } else {
10988 ret = -TARGET_EFAULT;
10990 unlock_user(n, arg2, 0);
10991 unlock_user(v, arg3, 0);
10993 break;
10994 case TARGET_NR_getxattr:
10995 case TARGET_NR_lgetxattr:
10997 void *p, *n, *v = 0;
10998 if (arg3) {
10999 v = lock_user(VERIFY_WRITE, arg3, arg4, 0);
11000 if (!v) {
11001 ret = -TARGET_EFAULT;
11002 break;
11005 p = lock_user_string(arg1);
11006 n = lock_user_string(arg2);
11007 if (p && n) {
11008 if (num == TARGET_NR_getxattr) {
11009 ret = get_errno(getxattr(p, n, v, arg4));
11010 } else {
11011 ret = get_errno(lgetxattr(p, n, v, arg4));
11013 } else {
11014 ret = -TARGET_EFAULT;
11016 unlock_user(p, arg1, 0);
11017 unlock_user(n, arg2, 0);
11018 unlock_user(v, arg3, arg4);
11020 break;
11021 case TARGET_NR_fgetxattr:
11023 void *n, *v = 0;
11024 if (arg3) {
11025 v = lock_user(VERIFY_WRITE, arg3, arg4, 0);
11026 if (!v) {
11027 ret = -TARGET_EFAULT;
11028 break;
11031 n = lock_user_string(arg2);
11032 if (n) {
11033 ret = get_errno(fgetxattr(arg1, n, v, arg4));
11034 } else {
11035 ret = -TARGET_EFAULT;
11037 unlock_user(n, arg2, 0);
11038 unlock_user(v, arg3, arg4);
11040 break;
11041 case TARGET_NR_removexattr:
11042 case TARGET_NR_lremovexattr:
11044 void *p, *n;
11045 p = lock_user_string(arg1);
11046 n = lock_user_string(arg2);
11047 if (p && n) {
11048 if (num == TARGET_NR_removexattr) {
11049 ret = get_errno(removexattr(p, n));
11050 } else {
11051 ret = get_errno(lremovexattr(p, n));
11053 } else {
11054 ret = -TARGET_EFAULT;
11056 unlock_user(p, arg1, 0);
11057 unlock_user(n, arg2, 0);
11059 break;
11060 case TARGET_NR_fremovexattr:
11062 void *n;
11063 n = lock_user_string(arg2);
11064 if (n) {
11065 ret = get_errno(fremovexattr(arg1, n));
11066 } else {
11067 ret = -TARGET_EFAULT;
11069 unlock_user(n, arg2, 0);
11071 break;
11072 #endif
11073 #endif /* CONFIG_ATTR */
11074 #ifdef TARGET_NR_set_thread_area
11075 case TARGET_NR_set_thread_area:
11076 #if defined(TARGET_MIPS)
11077 ((CPUMIPSState *) cpu_env)->active_tc.CP0_UserLocal = arg1;
11078 ret = 0;
11079 break;
11080 #elif defined(TARGET_CRIS)
11081 if (arg1 & 0xff)
11082 ret = -TARGET_EINVAL;
11083 else {
11084 ((CPUCRISState *) cpu_env)->pregs[PR_PID] = arg1;
11085 ret = 0;
11087 break;
11088 #elif defined(TARGET_I386) && defined(TARGET_ABI32)
11089 ret = do_set_thread_area(cpu_env, arg1);
11090 break;
11091 #elif defined(TARGET_M68K)
11093 TaskState *ts = cpu->opaque;
11094 ts->tp_value = arg1;
11095 ret = 0;
11096 break;
11098 #else
11099 goto unimplemented_nowarn;
11100 #endif
11101 #endif
11102 #ifdef TARGET_NR_get_thread_area
11103 case TARGET_NR_get_thread_area:
11104 #if defined(TARGET_I386) && defined(TARGET_ABI32)
11105 ret = do_get_thread_area(cpu_env, arg1);
11106 break;
11107 #elif defined(TARGET_M68K)
11109 TaskState *ts = cpu->opaque;
11110 ret = ts->tp_value;
11111 break;
11113 #else
11114 goto unimplemented_nowarn;
11115 #endif
11116 #endif
11117 #ifdef TARGET_NR_getdomainname
11118 case TARGET_NR_getdomainname:
11119 goto unimplemented_nowarn;
11120 #endif
11122 #ifdef TARGET_NR_clock_gettime
11123 case TARGET_NR_clock_gettime:
11125 struct timespec ts;
11126 ret = get_errno(clock_gettime(arg1, &ts));
11127 if (!is_error(ret)) {
11128 host_to_target_timespec(arg2, &ts);
11130 break;
11132 #endif
11133 #ifdef TARGET_NR_clock_getres
11134 case TARGET_NR_clock_getres:
11136 struct timespec ts;
11137 ret = get_errno(clock_getres(arg1, &ts));
11138 if (!is_error(ret)) {
11139 host_to_target_timespec(arg2, &ts);
11141 break;
11143 #endif
11144 #ifdef TARGET_NR_clock_nanosleep
11145 case TARGET_NR_clock_nanosleep:
11147 struct timespec ts;
11148 target_to_host_timespec(&ts, arg3);
11149 ret = get_errno(safe_clock_nanosleep(arg1, arg2,
11150 &ts, arg4 ? &ts : NULL));
11151 if (arg4)
11152 host_to_target_timespec(arg4, &ts);
11154 #if defined(TARGET_PPC)
11155 /* clock_nanosleep is odd in that it returns positive errno values.
11156 * On PPC, CR0 bit 3 should be set in such a situation. */
11157 if (ret && ret != -TARGET_ERESTARTSYS) {
11158 ((CPUPPCState *)cpu_env)->crf[0] |= 1;
11160 #endif
11161 break;
11163 #endif
11165 #if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address)
11166 case TARGET_NR_set_tid_address:
11167 ret = get_errno(set_tid_address((int *)g2h(arg1)));
11168 break;
11169 #endif
11171 case TARGET_NR_tkill:
11172 ret = get_errno(safe_tkill((int)arg1, target_to_host_signal(arg2)));
11173 break;
11175 case TARGET_NR_tgkill:
11176 ret = get_errno(safe_tgkill((int)arg1, (int)arg2,
11177 target_to_host_signal(arg3)));
11178 break;
11180 #ifdef TARGET_NR_set_robust_list
11181 case TARGET_NR_set_robust_list:
11182 case TARGET_NR_get_robust_list:
11183 /* The ABI for supporting robust futexes has userspace pass
11184 * the kernel a pointer to a linked list which is updated by
11185 * userspace after the syscall; the list is walked by the kernel
11186 * when the thread exits. Since the linked list in QEMU guest
11187 * memory isn't a valid linked list for the host and we have
11188 * no way to reliably intercept the thread-death event, we can't
11189 * support these. Silently return ENOSYS so that guest userspace
11190 * falls back to a non-robust futex implementation (which should
11191 * be OK except in the corner case of the guest crashing while
11192 * holding a mutex that is shared with another process via
11193 * shared memory).
11195 goto unimplemented_nowarn;
11196 #endif
11198 #if defined(TARGET_NR_utimensat)
11199 case TARGET_NR_utimensat:
11201 struct timespec *tsp, ts[2];
11202 if (!arg3) {
11203 tsp = NULL;
11204 } else {
11205 target_to_host_timespec(ts, arg3);
11206 target_to_host_timespec(ts+1, arg3+sizeof(struct target_timespec));
11207 tsp = ts;
11209 if (!arg2)
11210 ret = get_errno(sys_utimensat(arg1, NULL, tsp, arg4));
11211 else {
11212 if (!(p = lock_user_string(arg2))) {
11213 ret = -TARGET_EFAULT;
11214 goto fail;
11216 ret = get_errno(sys_utimensat(arg1, path(p), tsp, arg4));
11217 unlock_user(p, arg2, 0);
11220 break;
11221 #endif
11222 case TARGET_NR_futex:
11223 ret = do_futex(arg1, arg2, arg3, arg4, arg5, arg6);
11224 break;
11225 #if defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init)
11226 case TARGET_NR_inotify_init:
11227 ret = get_errno(sys_inotify_init());
11228 break;
11229 #endif
11230 #ifdef CONFIG_INOTIFY1
11231 #if defined(TARGET_NR_inotify_init1) && defined(__NR_inotify_init1)
11232 case TARGET_NR_inotify_init1:
11233 ret = get_errno(sys_inotify_init1(arg1));
11234 break;
11235 #endif
11236 #endif
11237 #if defined(TARGET_NR_inotify_add_watch) && defined(__NR_inotify_add_watch)
11238 case TARGET_NR_inotify_add_watch:
11239 p = lock_user_string(arg2);
11240 ret = get_errno(sys_inotify_add_watch(arg1, path(p), arg3));
11241 unlock_user(p, arg2, 0);
11242 break;
11243 #endif
11244 #if defined(TARGET_NR_inotify_rm_watch) && defined(__NR_inotify_rm_watch)
11245 case TARGET_NR_inotify_rm_watch:
11246 ret = get_errno(sys_inotify_rm_watch(arg1, arg2));
11247 break;
11248 #endif
11250 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
11251 case TARGET_NR_mq_open:
11253 struct mq_attr posix_mq_attr, *attrp;
11255 p = lock_user_string(arg1 - 1);
11256 if (arg4 != 0) {
11257 copy_from_user_mq_attr (&posix_mq_attr, arg4);
11258 attrp = &posix_mq_attr;
11259 } else {
11260 attrp = 0;
11262 ret = get_errno(mq_open(p, arg2, arg3, attrp));
11263 unlock_user (p, arg1, 0);
11265 break;
11267 case TARGET_NR_mq_unlink:
11268 p = lock_user_string(arg1 - 1);
11269 ret = get_errno(mq_unlink(p));
11270 unlock_user (p, arg1, 0);
11271 break;
11273 case TARGET_NR_mq_timedsend:
11275 struct timespec ts;
11277 p = lock_user (VERIFY_READ, arg2, arg3, 1);
11278 if (arg5 != 0) {
11279 target_to_host_timespec(&ts, arg5);
11280 ret = get_errno(safe_mq_timedsend(arg1, p, arg3, arg4, &ts));
11281 host_to_target_timespec(arg5, &ts);
11282 } else {
11283 ret = get_errno(safe_mq_timedsend(arg1, p, arg3, arg4, NULL));
11285 unlock_user (p, arg2, arg3);
11287 break;
11289 case TARGET_NR_mq_timedreceive:
11291 struct timespec ts;
11292 unsigned int prio;
11294 p = lock_user (VERIFY_READ, arg2, arg3, 1);
11295 if (arg5 != 0) {
11296 target_to_host_timespec(&ts, arg5);
11297 ret = get_errno(safe_mq_timedreceive(arg1, p, arg3,
11298 &prio, &ts));
11299 host_to_target_timespec(arg5, &ts);
11300 } else {
11301 ret = get_errno(safe_mq_timedreceive(arg1, p, arg3,
11302 &prio, NULL));
11304 unlock_user (p, arg2, arg3);
11305 if (arg4 != 0)
11306 put_user_u32(prio, arg4);
11308 break;
11310 /* Not implemented for now... */
11311 /* case TARGET_NR_mq_notify: */
11312 /* break; */
11314 case TARGET_NR_mq_getsetattr:
11316 struct mq_attr posix_mq_attr_in, posix_mq_attr_out;
11317 ret = 0;
11318 if (arg3 != 0) {
11319 ret = mq_getattr(arg1, &posix_mq_attr_out);
11320 copy_to_user_mq_attr(arg3, &posix_mq_attr_out);
11322 if (arg2 != 0) {
11323 copy_from_user_mq_attr(&posix_mq_attr_in, arg2);
11324 ret |= mq_setattr(arg1, &posix_mq_attr_in, &posix_mq_attr_out);
11328 break;
11329 #endif
11331 #ifdef CONFIG_SPLICE
11332 #ifdef TARGET_NR_tee
11333 case TARGET_NR_tee:
11335 ret = get_errno(tee(arg1,arg2,arg3,arg4));
11337 break;
11338 #endif
11339 #ifdef TARGET_NR_splice
11340 case TARGET_NR_splice:
11342 loff_t loff_in, loff_out;
11343 loff_t *ploff_in = NULL, *ploff_out = NULL;
11344 if (arg2) {
11345 if (get_user_u64(loff_in, arg2)) {
11346 goto efault;
11348 ploff_in = &loff_in;
11350 if (arg4) {
11351 if (get_user_u64(loff_out, arg4)) {
11352 goto efault;
11354 ploff_out = &loff_out;
11356 ret = get_errno(splice(arg1, ploff_in, arg3, ploff_out, arg5, arg6));
11357 if (arg2) {
11358 if (put_user_u64(loff_in, arg2)) {
11359 goto efault;
11362 if (arg4) {
11363 if (put_user_u64(loff_out, arg4)) {
11364 goto efault;
11368 break;
11369 #endif
11370 #ifdef TARGET_NR_vmsplice
11371 case TARGET_NR_vmsplice:
11373 struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1);
11374 if (vec != NULL) {
11375 ret = get_errno(vmsplice(arg1, vec, arg3, arg4));
11376 unlock_iovec(vec, arg2, arg3, 0);
11377 } else {
11378 ret = -host_to_target_errno(errno);
11381 break;
11382 #endif
11383 #endif /* CONFIG_SPLICE */
11384 #ifdef CONFIG_EVENTFD
11385 #if defined(TARGET_NR_eventfd)
11386 case TARGET_NR_eventfd:
11387 ret = get_errno(eventfd(arg1, 0));
11388 fd_trans_unregister(ret);
11389 break;
11390 #endif
11391 #if defined(TARGET_NR_eventfd2)
11392 case TARGET_NR_eventfd2:
11394 int host_flags = arg2 & (~(TARGET_O_NONBLOCK | TARGET_O_CLOEXEC));
11395 if (arg2 & TARGET_O_NONBLOCK) {
11396 host_flags |= O_NONBLOCK;
11398 if (arg2 & TARGET_O_CLOEXEC) {
11399 host_flags |= O_CLOEXEC;
11401 ret = get_errno(eventfd(arg1, host_flags));
11402 fd_trans_unregister(ret);
11403 break;
11405 #endif
11406 #endif /* CONFIG_EVENTFD */
11407 #if defined(CONFIG_FALLOCATE) && defined(TARGET_NR_fallocate)
11408 case TARGET_NR_fallocate:
11409 #if TARGET_ABI_BITS == 32
11410 ret = get_errno(fallocate(arg1, arg2, target_offset64(arg3, arg4),
11411 target_offset64(arg5, arg6)));
11412 #else
11413 ret = get_errno(fallocate(arg1, arg2, arg3, arg4));
11414 #endif
11415 break;
11416 #endif
11417 #if defined(CONFIG_SYNC_FILE_RANGE)
11418 #if defined(TARGET_NR_sync_file_range)
11419 case TARGET_NR_sync_file_range:
11420 #if TARGET_ABI_BITS == 32
11421 #if defined(TARGET_MIPS)
11422 ret = get_errno(sync_file_range(arg1, target_offset64(arg3, arg4),
11423 target_offset64(arg5, arg6), arg7));
11424 #else
11425 ret = get_errno(sync_file_range(arg1, target_offset64(arg2, arg3),
11426 target_offset64(arg4, arg5), arg6));
11427 #endif /* !TARGET_MIPS */
11428 #else
11429 ret = get_errno(sync_file_range(arg1, arg2, arg3, arg4));
11430 #endif
11431 break;
11432 #endif
11433 #if defined(TARGET_NR_sync_file_range2)
11434 case TARGET_NR_sync_file_range2:
11435 /* This is like sync_file_range but the arguments are reordered */
11436 #if TARGET_ABI_BITS == 32
11437 ret = get_errno(sync_file_range(arg1, target_offset64(arg3, arg4),
11438 target_offset64(arg5, arg6), arg2));
11439 #else
11440 ret = get_errno(sync_file_range(arg1, arg3, arg4, arg2));
11441 #endif
11442 break;
11443 #endif
11444 #endif
11445 #if defined(TARGET_NR_signalfd4)
11446 case TARGET_NR_signalfd4:
11447 ret = do_signalfd4(arg1, arg2, arg4);
11448 break;
11449 #endif
11450 #if defined(TARGET_NR_signalfd)
11451 case TARGET_NR_signalfd:
11452 ret = do_signalfd4(arg1, arg2, 0);
11453 break;
11454 #endif
11455 #if defined(CONFIG_EPOLL)
11456 #if defined(TARGET_NR_epoll_create)
11457 case TARGET_NR_epoll_create:
11458 ret = get_errno(epoll_create(arg1));
11459 break;
11460 #endif
11461 #if defined(TARGET_NR_epoll_create1) && defined(CONFIG_EPOLL_CREATE1)
11462 case TARGET_NR_epoll_create1:
11463 ret = get_errno(epoll_create1(arg1));
11464 break;
11465 #endif
11466 #if defined(TARGET_NR_epoll_ctl)
11467 case TARGET_NR_epoll_ctl:
11469 struct epoll_event ep;
11470 struct epoll_event *epp = 0;
11471 if (arg4) {
11472 struct target_epoll_event *target_ep;
11473 if (!lock_user_struct(VERIFY_READ, target_ep, arg4, 1)) {
11474 goto efault;
11476 ep.events = tswap32(target_ep->events);
11477 /* The epoll_data_t union is just opaque data to the kernel,
11478 * so we transfer all 64 bits across and need not worry what
11479 * actual data type it is.
11481 ep.data.u64 = tswap64(target_ep->data.u64);
11482 unlock_user_struct(target_ep, arg4, 0);
11483 epp = &ep;
11485 ret = get_errno(epoll_ctl(arg1, arg2, arg3, epp));
11486 break;
11488 #endif
11490 #if defined(TARGET_NR_epoll_wait) || defined(TARGET_NR_epoll_pwait)
11491 #if defined(TARGET_NR_epoll_wait)
11492 case TARGET_NR_epoll_wait:
11493 #endif
11494 #if defined(TARGET_NR_epoll_pwait)
11495 case TARGET_NR_epoll_pwait:
11496 #endif
11498 struct target_epoll_event *target_ep;
11499 struct epoll_event *ep;
11500 int epfd = arg1;
11501 int maxevents = arg3;
11502 int timeout = arg4;
11504 if (maxevents <= 0 || maxevents > TARGET_EP_MAX_EVENTS) {
11505 ret = -TARGET_EINVAL;
11506 break;
11509 target_ep = lock_user(VERIFY_WRITE, arg2,
11510 maxevents * sizeof(struct target_epoll_event), 1);
11511 if (!target_ep) {
11512 goto efault;
11515 ep = alloca(maxevents * sizeof(struct epoll_event));
11517 switch (num) {
11518 #if defined(TARGET_NR_epoll_pwait)
11519 case TARGET_NR_epoll_pwait:
11521 target_sigset_t *target_set;
11522 sigset_t _set, *set = &_set;
11524 if (arg5) {
11525 if (arg6 != sizeof(target_sigset_t)) {
11526 ret = -TARGET_EINVAL;
11527 break;
11530 target_set = lock_user(VERIFY_READ, arg5,
11531 sizeof(target_sigset_t), 1);
11532 if (!target_set) {
11533 unlock_user(target_ep, arg2, 0);
11534 goto efault;
11536 target_to_host_sigset(set, target_set);
11537 unlock_user(target_set, arg5, 0);
11538 } else {
11539 set = NULL;
11542 ret = get_errno(safe_epoll_pwait(epfd, ep, maxevents, timeout,
11543 set, SIGSET_T_SIZE));
11544 break;
11546 #endif
11547 #if defined(TARGET_NR_epoll_wait)
11548 case TARGET_NR_epoll_wait:
11549 ret = get_errno(safe_epoll_pwait(epfd, ep, maxevents, timeout,
11550 NULL, 0));
11551 break;
11552 #endif
11553 default:
11554 ret = -TARGET_ENOSYS;
11556 if (!is_error(ret)) {
11557 int i;
11558 for (i = 0; i < ret; i++) {
11559 target_ep[i].events = tswap32(ep[i].events);
11560 target_ep[i].data.u64 = tswap64(ep[i].data.u64);
11563 unlock_user(target_ep, arg2, ret * sizeof(struct target_epoll_event));
11564 break;
11566 #endif
11567 #endif
11568 #ifdef TARGET_NR_prlimit64
11569 case TARGET_NR_prlimit64:
11571 /* args: pid, resource number, ptr to new rlimit, ptr to old rlimit */
11572 struct target_rlimit64 *target_rnew, *target_rold;
11573 struct host_rlimit64 rnew, rold, *rnewp = 0;
11574 int resource = target_to_host_resource(arg2);
11575 if (arg3) {
11576 if (!lock_user_struct(VERIFY_READ, target_rnew, arg3, 1)) {
11577 goto efault;
11579 rnew.rlim_cur = tswap64(target_rnew->rlim_cur);
11580 rnew.rlim_max = tswap64(target_rnew->rlim_max);
11581 unlock_user_struct(target_rnew, arg3, 0);
11582 rnewp = &rnew;
11585 ret = get_errno(sys_prlimit64(arg1, resource, rnewp, arg4 ? &rold : 0));
11586 if (!is_error(ret) && arg4) {
11587 if (!lock_user_struct(VERIFY_WRITE, target_rold, arg4, 1)) {
11588 goto efault;
11590 target_rold->rlim_cur = tswap64(rold.rlim_cur);
11591 target_rold->rlim_max = tswap64(rold.rlim_max);
11592 unlock_user_struct(target_rold, arg4, 1);
11594 break;
11596 #endif
11597 #ifdef TARGET_NR_gethostname
11598 case TARGET_NR_gethostname:
11600 char *name = lock_user(VERIFY_WRITE, arg1, arg2, 0);
11601 if (name) {
11602 ret = get_errno(gethostname(name, arg2));
11603 unlock_user(name, arg1, arg2);
11604 } else {
11605 ret = -TARGET_EFAULT;
11607 break;
11609 #endif
11610 #ifdef TARGET_NR_atomic_cmpxchg_32
11611 case TARGET_NR_atomic_cmpxchg_32:
11613 /* should use start_exclusive from main.c */
11614 abi_ulong mem_value;
11615 if (get_user_u32(mem_value, arg6)) {
11616 target_siginfo_t info;
11617 info.si_signo = SIGSEGV;
11618 info.si_errno = 0;
11619 info.si_code = TARGET_SEGV_MAPERR;
11620 info._sifields._sigfault._addr = arg6;
11621 queue_signal((CPUArchState *)cpu_env, info.si_signo, &info);
11622 ret = 0xdeadbeef;
11625 if (mem_value == arg2)
11626 put_user_u32(arg1, arg6);
11627 ret = mem_value;
11628 break;
11630 #endif
11631 #ifdef TARGET_NR_atomic_barrier
11632 case TARGET_NR_atomic_barrier:
11634 /* Like the kernel implementation and the qemu arm barrier, no-op this? */
11635 ret = 0;
11636 break;
11638 #endif
11640 #ifdef TARGET_NR_timer_create
11641 case TARGET_NR_timer_create:
11643 /* args: clockid_t clockid, struct sigevent *sevp, timer_t *timerid */
11645 struct sigevent host_sevp = { {0}, }, *phost_sevp = NULL;
11647 int clkid = arg1;
11648 int timer_index = next_free_host_timer();
11650 if (timer_index < 0) {
11651 ret = -TARGET_EAGAIN;
11652 } else {
11653 timer_t *phtimer = g_posix_timers + timer_index;
11655 if (arg2) {
11656 phost_sevp = &host_sevp;
11657 ret = target_to_host_sigevent(phost_sevp, arg2);
11658 if (ret != 0) {
11659 break;
11663 ret = get_errno(timer_create(clkid, phost_sevp, phtimer));
11664 if (ret) {
11665 phtimer = NULL;
11666 } else {
11667 if (put_user(TIMER_MAGIC | timer_index, arg3, target_timer_t)) {
11668 goto efault;
11672 break;
11674 #endif
11676 #ifdef TARGET_NR_timer_settime
11677 case TARGET_NR_timer_settime:
11679 /* args: timer_t timerid, int flags, const struct itimerspec *new_value,
11680 * struct itimerspec * old_value */
11681 target_timer_t timerid = get_timer_id(arg1);
11683 if (timerid < 0) {
11684 ret = timerid;
11685 } else if (arg3 == 0) {
11686 ret = -TARGET_EINVAL;
11687 } else {
11688 timer_t htimer = g_posix_timers[timerid];
11689 struct itimerspec hspec_new = {{0},}, hspec_old = {{0},};
11691 target_to_host_itimerspec(&hspec_new, arg3);
11692 ret = get_errno(
11693 timer_settime(htimer, arg2, &hspec_new, &hspec_old));
11694 host_to_target_itimerspec(arg2, &hspec_old);
11696 break;
11698 #endif
11700 #ifdef TARGET_NR_timer_gettime
11701 case TARGET_NR_timer_gettime:
11703 /* args: timer_t timerid, struct itimerspec *curr_value */
11704 target_timer_t timerid = get_timer_id(arg1);
11706 if (timerid < 0) {
11707 ret = timerid;
11708 } else if (!arg2) {
11709 ret = -TARGET_EFAULT;
11710 } else {
11711 timer_t htimer = g_posix_timers[timerid];
11712 struct itimerspec hspec;
11713 ret = get_errno(timer_gettime(htimer, &hspec));
11715 if (host_to_target_itimerspec(arg2, &hspec)) {
11716 ret = -TARGET_EFAULT;
11719 break;
11721 #endif
11723 #ifdef TARGET_NR_timer_getoverrun
11724 case TARGET_NR_timer_getoverrun:
11726 /* args: timer_t timerid */
11727 target_timer_t timerid = get_timer_id(arg1);
11729 if (timerid < 0) {
11730 ret = timerid;
11731 } else {
11732 timer_t htimer = g_posix_timers[timerid];
11733 ret = get_errno(timer_getoverrun(htimer));
11735 fd_trans_unregister(ret);
11736 break;
11738 #endif
11740 #ifdef TARGET_NR_timer_delete
11741 case TARGET_NR_timer_delete:
11743 /* args: timer_t timerid */
11744 target_timer_t timerid = get_timer_id(arg1);
11746 if (timerid < 0) {
11747 ret = timerid;
11748 } else {
11749 timer_t htimer = g_posix_timers[timerid];
11750 ret = get_errno(timer_delete(htimer));
11751 g_posix_timers[timerid] = 0;
11753 break;
11755 #endif
11757 #if defined(TARGET_NR_timerfd_create) && defined(CONFIG_TIMERFD)
11758 case TARGET_NR_timerfd_create:
11759 ret = get_errno(timerfd_create(arg1,
11760 target_to_host_bitmask(arg2, fcntl_flags_tbl)));
11761 break;
11762 #endif
11764 #if defined(TARGET_NR_timerfd_gettime) && defined(CONFIG_TIMERFD)
11765 case TARGET_NR_timerfd_gettime:
11767 struct itimerspec its_curr;
11769 ret = get_errno(timerfd_gettime(arg1, &its_curr));
11771 if (arg2 && host_to_target_itimerspec(arg2, &its_curr)) {
11772 goto efault;
11775 break;
11776 #endif
11778 #if defined(TARGET_NR_timerfd_settime) && defined(CONFIG_TIMERFD)
11779 case TARGET_NR_timerfd_settime:
11781 struct itimerspec its_new, its_old, *p_new;
11783 if (arg3) {
11784 if (target_to_host_itimerspec(&its_new, arg3)) {
11785 goto efault;
11787 p_new = &its_new;
11788 } else {
11789 p_new = NULL;
11792 ret = get_errno(timerfd_settime(arg1, arg2, p_new, &its_old));
11794 if (arg4 && host_to_target_itimerspec(arg4, &its_old)) {
11795 goto efault;
11798 break;
11799 #endif
11801 #if defined(TARGET_NR_ioprio_get) && defined(__NR_ioprio_get)
11802 case TARGET_NR_ioprio_get:
11803 ret = get_errno(ioprio_get(arg1, arg2));
11804 break;
11805 #endif
11807 #if defined(TARGET_NR_ioprio_set) && defined(__NR_ioprio_set)
11808 case TARGET_NR_ioprio_set:
11809 ret = get_errno(ioprio_set(arg1, arg2, arg3));
11810 break;
11811 #endif
11813 #if defined(TARGET_NR_setns) && defined(CONFIG_SETNS)
11814 case TARGET_NR_setns:
11815 ret = get_errno(setns(arg1, arg2));
11816 break;
11817 #endif
11818 #if defined(TARGET_NR_unshare) && defined(CONFIG_SETNS)
11819 case TARGET_NR_unshare:
11820 ret = get_errno(unshare(arg1));
11821 break;
11822 #endif
11824 default:
11825 unimplemented:
11826 gemu_log("qemu: Unsupported syscall: %d\n", num);
11827 #if defined(TARGET_NR_setxattr) || defined(TARGET_NR_get_thread_area) || defined(TARGET_NR_getdomainname) || defined(TARGET_NR_set_robust_list)
11828 unimplemented_nowarn:
11829 #endif
11830 ret = -TARGET_ENOSYS;
11831 break;
11833 fail:
11834 #ifdef DEBUG
11835 gemu_log(" = " TARGET_ABI_FMT_ld "\n", ret);
11836 #endif
11837 if(do_strace)
11838 print_syscall_ret(num, ret);
11839 trace_guest_user_syscall_ret(cpu, num, ret);
11840 return ret;
11841 efault:
11842 ret = -TARGET_EFAULT;
11843 goto fail;