4 * Copyright (c) 2003 Fabrice Bellard
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, see <http://www.gnu.org/licenses/>.
19 #define _ATFILE_SOURCE
20 #include "qemu/osdep.h"
21 #include "qemu/cutils.h"
22 #include "qemu/path.h"
29 #include <sys/mount.h>
31 #include <sys/fsuid.h>
32 #include <sys/personality.h>
33 #include <sys/prctl.h>
34 #include <sys/resource.h>
36 #include <linux/capability.h>
38 #include <sys/timex.h>
40 int __clone2(int (*fn
)(void *), void *child_stack_base
,
41 size_t stack_size
, int flags
, void *arg
, ...);
43 #include <sys/socket.h>
47 #include <sys/times.h>
50 #include <sys/statfs.h>
53 #include <sys/sysinfo.h>
54 #include <sys/signalfd.h>
55 //#include <sys/user.h>
56 #include <netinet/ip.h>
57 #include <netinet/tcp.h>
58 #include <linux/wireless.h>
59 #include <linux/icmp.h>
60 #include <linux/icmpv6.h>
61 #include <linux/errqueue.h>
62 #include <linux/random.h>
63 #include "qemu-common.h"
65 #include <sys/timerfd.h>
71 #include <sys/eventfd.h>
74 #include <sys/epoll.h>
77 #include "qemu/xattr.h"
79 #ifdef CONFIG_SENDFILE
80 #include <sys/sendfile.h>
83 #define termios host_termios
84 #define winsize host_winsize
85 #define termio host_termio
86 #define sgttyb host_sgttyb /* same as target */
87 #define tchars host_tchars /* same as target */
88 #define ltchars host_ltchars /* same as target */
90 #include <linux/termios.h>
91 #include <linux/unistd.h>
92 #include <linux/cdrom.h>
93 #include <linux/hdreg.h>
94 #include <linux/soundcard.h>
96 #include <linux/mtio.h>
98 #if defined(CONFIG_FIEMAP)
99 #include <linux/fiemap.h>
101 #include <linux/fb.h>
102 #include <linux/vt.h>
103 #include <linux/dm-ioctl.h>
104 #include <linux/reboot.h>
105 #include <linux/route.h>
106 #include <linux/filter.h>
107 #include <linux/blkpg.h>
108 #include <netpacket/packet.h>
109 #include <linux/netlink.h>
110 #ifdef CONFIG_RTNETLINK
111 #include <linux/rtnetlink.h>
112 #include <linux/if_bridge.h>
114 #include <linux/audit.h>
115 #include "linux_loop.h"
121 #define CLONE_IO 0x80000000 /* Clone io context */
124 /* We can't directly call the host clone syscall, because this will
125 * badly confuse libc (breaking mutexes, for example). So we must
126 * divide clone flags into:
127 * * flag combinations that look like pthread_create()
128 * * flag combinations that look like fork()
129 * * flags we can implement within QEMU itself
130 * * flags we can't support and will return an error for
132 /* For thread creation, all these flags must be present; for
133 * fork, none must be present.
135 #define CLONE_THREAD_FLAGS \
136 (CLONE_VM | CLONE_FS | CLONE_FILES | \
137 CLONE_SIGHAND | CLONE_THREAD | CLONE_SYSVSEM)
139 /* These flags are ignored:
140 * CLONE_DETACHED is now ignored by the kernel;
141 * CLONE_IO is just an optimisation hint to the I/O scheduler
143 #define CLONE_IGNORED_FLAGS \
144 (CLONE_DETACHED | CLONE_IO)
146 /* Flags for fork which we can implement within QEMU itself */
147 #define CLONE_OPTIONAL_FORK_FLAGS \
148 (CLONE_SETTLS | CLONE_PARENT_SETTID | \
149 CLONE_CHILD_CLEARTID | CLONE_CHILD_SETTID)
151 /* Flags for thread creation which we can implement within QEMU itself */
152 #define CLONE_OPTIONAL_THREAD_FLAGS \
153 (CLONE_SETTLS | CLONE_PARENT_SETTID | \
154 CLONE_CHILD_CLEARTID | CLONE_CHILD_SETTID | CLONE_PARENT)
156 #define CLONE_INVALID_FORK_FLAGS \
157 (~(CSIGNAL | CLONE_OPTIONAL_FORK_FLAGS | CLONE_IGNORED_FLAGS))
159 #define CLONE_INVALID_THREAD_FLAGS \
160 (~(CSIGNAL | CLONE_THREAD_FLAGS | CLONE_OPTIONAL_THREAD_FLAGS | \
161 CLONE_IGNORED_FLAGS))
163 /* CLONE_VFORK is special cased early in do_fork(). The other flag bits
164 * have almost all been allocated. We cannot support any of
165 * CLONE_NEWNS, CLONE_NEWCGROUP, CLONE_NEWUTS, CLONE_NEWIPC,
166 * CLONE_NEWUSER, CLONE_NEWPID, CLONE_NEWNET, CLONE_PTRACE, CLONE_UNTRACED.
167 * The checks against the invalid thread masks above will catch these.
168 * (The one remaining unallocated bit is 0x1000 which used to be CLONE_PID.)
172 /* Define DEBUG_ERESTARTSYS to force every syscall to be restarted
173 * once. This exercises the codepaths for restart.
175 //#define DEBUG_ERESTARTSYS
177 //#include <linux/msdos_fs.h>
178 #define VFAT_IOCTL_READDIR_BOTH _IOR('r', 1, struct linux_dirent [2])
179 #define VFAT_IOCTL_READDIR_SHORT _IOR('r', 2, struct linux_dirent [2])
189 #define _syscall0(type,name) \
190 static type name (void) \
192 return syscall(__NR_##name); \
195 #define _syscall1(type,name,type1,arg1) \
196 static type name (type1 arg1) \
198 return syscall(__NR_##name, arg1); \
201 #define _syscall2(type,name,type1,arg1,type2,arg2) \
202 static type name (type1 arg1,type2 arg2) \
204 return syscall(__NR_##name, arg1, arg2); \
207 #define _syscall3(type,name,type1,arg1,type2,arg2,type3,arg3) \
208 static type name (type1 arg1,type2 arg2,type3 arg3) \
210 return syscall(__NR_##name, arg1, arg2, arg3); \
213 #define _syscall4(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4) \
214 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4) \
216 return syscall(__NR_##name, arg1, arg2, arg3, arg4); \
219 #define _syscall5(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4, \
221 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5) \
223 return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5); \
227 #define _syscall6(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4, \
228 type5,arg5,type6,arg6) \
229 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5, \
232 return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6); \
236 #define __NR_sys_uname __NR_uname
237 #define __NR_sys_getcwd1 __NR_getcwd
238 #define __NR_sys_getdents __NR_getdents
239 #define __NR_sys_getdents64 __NR_getdents64
240 #define __NR_sys_getpriority __NR_getpriority
241 #define __NR_sys_rt_sigqueueinfo __NR_rt_sigqueueinfo
242 #define __NR_sys_rt_tgsigqueueinfo __NR_rt_tgsigqueueinfo
243 #define __NR_sys_syslog __NR_syslog
244 #define __NR_sys_futex __NR_futex
245 #define __NR_sys_inotify_init __NR_inotify_init
246 #define __NR_sys_inotify_add_watch __NR_inotify_add_watch
247 #define __NR_sys_inotify_rm_watch __NR_inotify_rm_watch
249 #if defined(__alpha__) || defined (__ia64__) || defined(__x86_64__) || \
251 #define __NR__llseek __NR_lseek
254 /* Newer kernel ports have llseek() instead of _llseek() */
255 #if defined(TARGET_NR_llseek) && !defined(TARGET_NR__llseek)
256 #define TARGET_NR__llseek TARGET_NR_llseek
260 _syscall0(int, gettid
)
262 /* This is a replacement for the host gettid() and must return a host
264 static int gettid(void) {
268 #if defined(TARGET_NR_getdents) && defined(__NR_getdents)
269 _syscall3(int, sys_getdents
, uint
, fd
, struct linux_dirent
*, dirp
, uint
, count
);
271 #if !defined(__NR_getdents) || \
272 (defined(TARGET_NR_getdents64) && defined(__NR_getdents64))
273 _syscall3(int, sys_getdents64
, uint
, fd
, struct linux_dirent64
*, dirp
, uint
, count
);
275 #if defined(TARGET_NR__llseek) && defined(__NR_llseek)
276 _syscall5(int, _llseek
, uint
, fd
, ulong
, hi
, ulong
, lo
,
277 loff_t
*, res
, uint
, wh
);
279 _syscall3(int, sys_rt_sigqueueinfo
, pid_t
, pid
, int, sig
, siginfo_t
*, uinfo
)
280 _syscall4(int, sys_rt_tgsigqueueinfo
, pid_t
, pid
, pid_t
, tid
, int, sig
,
282 _syscall3(int,sys_syslog
,int,type
,char*,bufp
,int,len
)
283 #ifdef __NR_exit_group
284 _syscall1(int,exit_group
,int,error_code
)
286 #if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address)
287 _syscall1(int,set_tid_address
,int *,tidptr
)
289 #if defined(TARGET_NR_futex) && defined(__NR_futex)
290 _syscall6(int,sys_futex
,int *,uaddr
,int,op
,int,val
,
291 const struct timespec
*,timeout
,int *,uaddr2
,int,val3
)
293 #define __NR_sys_sched_getaffinity __NR_sched_getaffinity
294 _syscall3(int, sys_sched_getaffinity
, pid_t
, pid
, unsigned int, len
,
295 unsigned long *, user_mask_ptr
);
296 #define __NR_sys_sched_setaffinity __NR_sched_setaffinity
297 _syscall3(int, sys_sched_setaffinity
, pid_t
, pid
, unsigned int, len
,
298 unsigned long *, user_mask_ptr
);
299 _syscall4(int, reboot
, int, magic1
, int, magic2
, unsigned int, cmd
,
301 _syscall2(int, capget
, struct __user_cap_header_struct
*, header
,
302 struct __user_cap_data_struct
*, data
);
303 _syscall2(int, capset
, struct __user_cap_header_struct
*, header
,
304 struct __user_cap_data_struct
*, data
);
305 #if defined(TARGET_NR_ioprio_get) && defined(__NR_ioprio_get)
306 _syscall2(int, ioprio_get
, int, which
, int, who
)
308 #if defined(TARGET_NR_ioprio_set) && defined(__NR_ioprio_set)
309 _syscall3(int, ioprio_set
, int, which
, int, who
, int, ioprio
)
311 #if defined(TARGET_NR_getrandom) && defined(__NR_getrandom)
312 _syscall3(int, getrandom
, void *, buf
, size_t, buflen
, unsigned int, flags
)
315 #if defined(TARGET_NR_kcmp) && defined(__NR_kcmp)
316 _syscall5(int, kcmp
, pid_t
, pid1
, pid_t
, pid2
, int, type
,
317 unsigned long, idx1
, unsigned long, idx2
)
320 static bitmask_transtbl fcntl_flags_tbl
[] = {
321 { TARGET_O_ACCMODE
, TARGET_O_WRONLY
, O_ACCMODE
, O_WRONLY
, },
322 { TARGET_O_ACCMODE
, TARGET_O_RDWR
, O_ACCMODE
, O_RDWR
, },
323 { TARGET_O_CREAT
, TARGET_O_CREAT
, O_CREAT
, O_CREAT
, },
324 { TARGET_O_EXCL
, TARGET_O_EXCL
, O_EXCL
, O_EXCL
, },
325 { TARGET_O_NOCTTY
, TARGET_O_NOCTTY
, O_NOCTTY
, O_NOCTTY
, },
326 { TARGET_O_TRUNC
, TARGET_O_TRUNC
, O_TRUNC
, O_TRUNC
, },
327 { TARGET_O_APPEND
, TARGET_O_APPEND
, O_APPEND
, O_APPEND
, },
328 { TARGET_O_NONBLOCK
, TARGET_O_NONBLOCK
, O_NONBLOCK
, O_NONBLOCK
, },
329 { TARGET_O_SYNC
, TARGET_O_DSYNC
, O_SYNC
, O_DSYNC
, },
330 { TARGET_O_SYNC
, TARGET_O_SYNC
, O_SYNC
, O_SYNC
, },
331 { TARGET_FASYNC
, TARGET_FASYNC
, FASYNC
, FASYNC
, },
332 { TARGET_O_DIRECTORY
, TARGET_O_DIRECTORY
, O_DIRECTORY
, O_DIRECTORY
, },
333 { TARGET_O_NOFOLLOW
, TARGET_O_NOFOLLOW
, O_NOFOLLOW
, O_NOFOLLOW
, },
334 #if defined(O_DIRECT)
335 { TARGET_O_DIRECT
, TARGET_O_DIRECT
, O_DIRECT
, O_DIRECT
, },
337 #if defined(O_NOATIME)
338 { TARGET_O_NOATIME
, TARGET_O_NOATIME
, O_NOATIME
, O_NOATIME
},
340 #if defined(O_CLOEXEC)
341 { TARGET_O_CLOEXEC
, TARGET_O_CLOEXEC
, O_CLOEXEC
, O_CLOEXEC
},
344 { TARGET_O_PATH
, TARGET_O_PATH
, O_PATH
, O_PATH
},
346 #if defined(O_TMPFILE)
347 { TARGET_O_TMPFILE
, TARGET_O_TMPFILE
, O_TMPFILE
, O_TMPFILE
},
349 /* Don't terminate the list prematurely on 64-bit host+guest. */
350 #if TARGET_O_LARGEFILE != 0 || O_LARGEFILE != 0
351 { TARGET_O_LARGEFILE
, TARGET_O_LARGEFILE
, O_LARGEFILE
, O_LARGEFILE
, },
358 QEMU_IFLA_BR_FORWARD_DELAY
,
359 QEMU_IFLA_BR_HELLO_TIME
,
360 QEMU_IFLA_BR_MAX_AGE
,
361 QEMU_IFLA_BR_AGEING_TIME
,
362 QEMU_IFLA_BR_STP_STATE
,
363 QEMU_IFLA_BR_PRIORITY
,
364 QEMU_IFLA_BR_VLAN_FILTERING
,
365 QEMU_IFLA_BR_VLAN_PROTOCOL
,
366 QEMU_IFLA_BR_GROUP_FWD_MASK
,
367 QEMU_IFLA_BR_ROOT_ID
,
368 QEMU_IFLA_BR_BRIDGE_ID
,
369 QEMU_IFLA_BR_ROOT_PORT
,
370 QEMU_IFLA_BR_ROOT_PATH_COST
,
371 QEMU_IFLA_BR_TOPOLOGY_CHANGE
,
372 QEMU_IFLA_BR_TOPOLOGY_CHANGE_DETECTED
,
373 QEMU_IFLA_BR_HELLO_TIMER
,
374 QEMU_IFLA_BR_TCN_TIMER
,
375 QEMU_IFLA_BR_TOPOLOGY_CHANGE_TIMER
,
376 QEMU_IFLA_BR_GC_TIMER
,
377 QEMU_IFLA_BR_GROUP_ADDR
,
378 QEMU_IFLA_BR_FDB_FLUSH
,
379 QEMU_IFLA_BR_MCAST_ROUTER
,
380 QEMU_IFLA_BR_MCAST_SNOOPING
,
381 QEMU_IFLA_BR_MCAST_QUERY_USE_IFADDR
,
382 QEMU_IFLA_BR_MCAST_QUERIER
,
383 QEMU_IFLA_BR_MCAST_HASH_ELASTICITY
,
384 QEMU_IFLA_BR_MCAST_HASH_MAX
,
385 QEMU_IFLA_BR_MCAST_LAST_MEMBER_CNT
,
386 QEMU_IFLA_BR_MCAST_STARTUP_QUERY_CNT
,
387 QEMU_IFLA_BR_MCAST_LAST_MEMBER_INTVL
,
388 QEMU_IFLA_BR_MCAST_MEMBERSHIP_INTVL
,
389 QEMU_IFLA_BR_MCAST_QUERIER_INTVL
,
390 QEMU_IFLA_BR_MCAST_QUERY_INTVL
,
391 QEMU_IFLA_BR_MCAST_QUERY_RESPONSE_INTVL
,
392 QEMU_IFLA_BR_MCAST_STARTUP_QUERY_INTVL
,
393 QEMU_IFLA_BR_NF_CALL_IPTABLES
,
394 QEMU_IFLA_BR_NF_CALL_IP6TABLES
,
395 QEMU_IFLA_BR_NF_CALL_ARPTABLES
,
396 QEMU_IFLA_BR_VLAN_DEFAULT_PVID
,
398 QEMU_IFLA_BR_VLAN_STATS_ENABLED
,
399 QEMU_IFLA_BR_MCAST_STATS_ENABLED
,
423 QEMU_IFLA_NET_NS_PID
,
426 QEMU_IFLA_VFINFO_LIST
,
434 QEMU_IFLA_PROMISCUITY
,
435 QEMU_IFLA_NUM_TX_QUEUES
,
436 QEMU_IFLA_NUM_RX_QUEUES
,
438 QEMU_IFLA_PHYS_PORT_ID
,
439 QEMU_IFLA_CARRIER_CHANGES
,
440 QEMU_IFLA_PHYS_SWITCH_ID
,
441 QEMU_IFLA_LINK_NETNSID
,
442 QEMU_IFLA_PHYS_PORT_NAME
,
443 QEMU_IFLA_PROTO_DOWN
,
444 QEMU_IFLA_GSO_MAX_SEGS
,
445 QEMU_IFLA_GSO_MAX_SIZE
,
452 QEMU_IFLA_BRPORT_UNSPEC
,
453 QEMU_IFLA_BRPORT_STATE
,
454 QEMU_IFLA_BRPORT_PRIORITY
,
455 QEMU_IFLA_BRPORT_COST
,
456 QEMU_IFLA_BRPORT_MODE
,
457 QEMU_IFLA_BRPORT_GUARD
,
458 QEMU_IFLA_BRPORT_PROTECT
,
459 QEMU_IFLA_BRPORT_FAST_LEAVE
,
460 QEMU_IFLA_BRPORT_LEARNING
,
461 QEMU_IFLA_BRPORT_UNICAST_FLOOD
,
462 QEMU_IFLA_BRPORT_PROXYARP
,
463 QEMU_IFLA_BRPORT_LEARNING_SYNC
,
464 QEMU_IFLA_BRPORT_PROXYARP_WIFI
,
465 QEMU_IFLA_BRPORT_ROOT_ID
,
466 QEMU_IFLA_BRPORT_BRIDGE_ID
,
467 QEMU_IFLA_BRPORT_DESIGNATED_PORT
,
468 QEMU_IFLA_BRPORT_DESIGNATED_COST
,
471 QEMU_IFLA_BRPORT_TOPOLOGY_CHANGE_ACK
,
472 QEMU_IFLA_BRPORT_CONFIG_PENDING
,
473 QEMU_IFLA_BRPORT_MESSAGE_AGE_TIMER
,
474 QEMU_IFLA_BRPORT_FORWARD_DELAY_TIMER
,
475 QEMU_IFLA_BRPORT_HOLD_TIMER
,
476 QEMU_IFLA_BRPORT_FLUSH
,
477 QEMU_IFLA_BRPORT_MULTICAST_ROUTER
,
478 QEMU_IFLA_BRPORT_PAD
,
479 QEMU___IFLA_BRPORT_MAX
483 QEMU_IFLA_INFO_UNSPEC
,
486 QEMU_IFLA_INFO_XSTATS
,
487 QEMU_IFLA_INFO_SLAVE_KIND
,
488 QEMU_IFLA_INFO_SLAVE_DATA
,
489 QEMU___IFLA_INFO_MAX
,
493 QEMU_IFLA_INET_UNSPEC
,
495 QEMU___IFLA_INET_MAX
,
499 QEMU_IFLA_INET6_UNSPEC
,
500 QEMU_IFLA_INET6_FLAGS
,
501 QEMU_IFLA_INET6_CONF
,
502 QEMU_IFLA_INET6_STATS
,
503 QEMU_IFLA_INET6_MCAST
,
504 QEMU_IFLA_INET6_CACHEINFO
,
505 QEMU_IFLA_INET6_ICMP6STATS
,
506 QEMU_IFLA_INET6_TOKEN
,
507 QEMU_IFLA_INET6_ADDR_GEN_MODE
,
508 QEMU___IFLA_INET6_MAX
511 typedef abi_long (*TargetFdDataFunc
)(void *, size_t);
512 typedef abi_long (*TargetFdAddrFunc
)(void *, abi_ulong
, socklen_t
);
513 typedef struct TargetFdTrans
{
514 TargetFdDataFunc host_to_target_data
;
515 TargetFdDataFunc target_to_host_data
;
516 TargetFdAddrFunc target_to_host_addr
;
519 static TargetFdTrans
**target_fd_trans
;
521 static unsigned int target_fd_max
;
523 static TargetFdDataFunc
fd_trans_target_to_host_data(int fd
)
525 if (fd
>= 0 && fd
< target_fd_max
&& target_fd_trans
[fd
]) {
526 return target_fd_trans
[fd
]->target_to_host_data
;
531 static TargetFdDataFunc
fd_trans_host_to_target_data(int fd
)
533 if (fd
>= 0 && fd
< target_fd_max
&& target_fd_trans
[fd
]) {
534 return target_fd_trans
[fd
]->host_to_target_data
;
539 static TargetFdAddrFunc
fd_trans_target_to_host_addr(int fd
)
541 if (fd
>= 0 && fd
< target_fd_max
&& target_fd_trans
[fd
]) {
542 return target_fd_trans
[fd
]->target_to_host_addr
;
547 static void fd_trans_register(int fd
, TargetFdTrans
*trans
)
551 if (fd
>= target_fd_max
) {
552 oldmax
= target_fd_max
;
553 target_fd_max
= ((fd
>> 6) + 1) << 6; /* by slice of 64 entries */
554 target_fd_trans
= g_renew(TargetFdTrans
*,
555 target_fd_trans
, target_fd_max
);
556 memset((void *)(target_fd_trans
+ oldmax
), 0,
557 (target_fd_max
- oldmax
) * sizeof(TargetFdTrans
*));
559 target_fd_trans
[fd
] = trans
;
562 static void fd_trans_unregister(int fd
)
564 if (fd
>= 0 && fd
< target_fd_max
) {
565 target_fd_trans
[fd
] = NULL
;
569 static void fd_trans_dup(int oldfd
, int newfd
)
571 fd_trans_unregister(newfd
);
572 if (oldfd
< target_fd_max
&& target_fd_trans
[oldfd
]) {
573 fd_trans_register(newfd
, target_fd_trans
[oldfd
]);
577 static int sys_getcwd1(char *buf
, size_t size
)
579 if (getcwd(buf
, size
) == NULL
) {
580 /* getcwd() sets errno */
583 return strlen(buf
)+1;
586 #ifdef TARGET_NR_utimensat
587 #if defined(__NR_utimensat)
588 #define __NR_sys_utimensat __NR_utimensat
589 _syscall4(int,sys_utimensat
,int,dirfd
,const char *,pathname
,
590 const struct timespec
*,tsp
,int,flags
)
592 static int sys_utimensat(int dirfd
, const char *pathname
,
593 const struct timespec times
[2], int flags
)
599 #endif /* TARGET_NR_utimensat */
601 #ifdef CONFIG_INOTIFY
602 #include <sys/inotify.h>
604 #if defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init)
605 static int sys_inotify_init(void)
607 return (inotify_init());
610 #if defined(TARGET_NR_inotify_add_watch) && defined(__NR_inotify_add_watch)
611 static int sys_inotify_add_watch(int fd
,const char *pathname
, int32_t mask
)
613 return (inotify_add_watch(fd
, pathname
, mask
));
616 #if defined(TARGET_NR_inotify_rm_watch) && defined(__NR_inotify_rm_watch)
617 static int sys_inotify_rm_watch(int fd
, int32_t wd
)
619 return (inotify_rm_watch(fd
, wd
));
622 #ifdef CONFIG_INOTIFY1
623 #if defined(TARGET_NR_inotify_init1) && defined(__NR_inotify_init1)
624 static int sys_inotify_init1(int flags
)
626 return (inotify_init1(flags
));
631 /* Userspace can usually survive runtime without inotify */
632 #undef TARGET_NR_inotify_init
633 #undef TARGET_NR_inotify_init1
634 #undef TARGET_NR_inotify_add_watch
635 #undef TARGET_NR_inotify_rm_watch
636 #endif /* CONFIG_INOTIFY */
638 #if defined(TARGET_NR_prlimit64)
639 #ifndef __NR_prlimit64
640 # define __NR_prlimit64 -1
642 #define __NR_sys_prlimit64 __NR_prlimit64
643 /* The glibc rlimit structure may not be that used by the underlying syscall */
644 struct host_rlimit64
{
648 _syscall4(int, sys_prlimit64
, pid_t
, pid
, int, resource
,
649 const struct host_rlimit64
*, new_limit
,
650 struct host_rlimit64
*, old_limit
)
654 #if defined(TARGET_NR_timer_create)
655 /* Maxiumum of 32 active POSIX timers allowed at any one time. */
656 static timer_t g_posix_timers
[32] = { 0, } ;
658 static inline int next_free_host_timer(void)
661 /* FIXME: Does finding the next free slot require a lock? */
662 for (k
= 0; k
< ARRAY_SIZE(g_posix_timers
); k
++) {
663 if (g_posix_timers
[k
] == 0) {
664 g_posix_timers
[k
] = (timer_t
) 1;
672 /* ARM EABI and MIPS expect 64bit types aligned even on pairs or registers */
674 static inline int regpairs_aligned(void *cpu_env
, int num
)
676 return ((((CPUARMState
*)cpu_env
)->eabi
) == 1) ;
678 #elif defined(TARGET_MIPS) && (TARGET_ABI_BITS == 32)
679 static inline int regpairs_aligned(void *cpu_env
, int num
) { return 1; }
680 #elif defined(TARGET_PPC) && !defined(TARGET_PPC64)
681 /* SysV AVI for PPC32 expects 64bit parameters to be passed on odd/even pairs
682 * of registers which translates to the same as ARM/MIPS, because we start with
684 static inline int regpairs_aligned(void *cpu_env
, int num
) { return 1; }
685 #elif defined(TARGET_SH4)
686 /* SH4 doesn't align register pairs, except for p{read,write}64 */
687 static inline int regpairs_aligned(void *cpu_env
, int num
)
690 case TARGET_NR_pread64
:
691 case TARGET_NR_pwrite64
:
699 static inline int regpairs_aligned(void *cpu_env
, int num
) { return 0; }
702 #define ERRNO_TABLE_SIZE 1200
704 /* target_to_host_errno_table[] is initialized from
705 * host_to_target_errno_table[] in syscall_init(). */
706 static uint16_t target_to_host_errno_table
[ERRNO_TABLE_SIZE
] = {
710 * This list is the union of errno values overridden in asm-<arch>/errno.h
711 * minus the errnos that are not actually generic to all archs.
713 static uint16_t host_to_target_errno_table
[ERRNO_TABLE_SIZE
] = {
714 [EAGAIN
] = TARGET_EAGAIN
,
715 [EIDRM
] = TARGET_EIDRM
,
716 [ECHRNG
] = TARGET_ECHRNG
,
717 [EL2NSYNC
] = TARGET_EL2NSYNC
,
718 [EL3HLT
] = TARGET_EL3HLT
,
719 [EL3RST
] = TARGET_EL3RST
,
720 [ELNRNG
] = TARGET_ELNRNG
,
721 [EUNATCH
] = TARGET_EUNATCH
,
722 [ENOCSI
] = TARGET_ENOCSI
,
723 [EL2HLT
] = TARGET_EL2HLT
,
724 [EDEADLK
] = TARGET_EDEADLK
,
725 [ENOLCK
] = TARGET_ENOLCK
,
726 [EBADE
] = TARGET_EBADE
,
727 [EBADR
] = TARGET_EBADR
,
728 [EXFULL
] = TARGET_EXFULL
,
729 [ENOANO
] = TARGET_ENOANO
,
730 [EBADRQC
] = TARGET_EBADRQC
,
731 [EBADSLT
] = TARGET_EBADSLT
,
732 [EBFONT
] = TARGET_EBFONT
,
733 [ENOSTR
] = TARGET_ENOSTR
,
734 [ENODATA
] = TARGET_ENODATA
,
735 [ETIME
] = TARGET_ETIME
,
736 [ENOSR
] = TARGET_ENOSR
,
737 [ENONET
] = TARGET_ENONET
,
738 [ENOPKG
] = TARGET_ENOPKG
,
739 [EREMOTE
] = TARGET_EREMOTE
,
740 [ENOLINK
] = TARGET_ENOLINK
,
741 [EADV
] = TARGET_EADV
,
742 [ESRMNT
] = TARGET_ESRMNT
,
743 [ECOMM
] = TARGET_ECOMM
,
744 [EPROTO
] = TARGET_EPROTO
,
745 [EDOTDOT
] = TARGET_EDOTDOT
,
746 [EMULTIHOP
] = TARGET_EMULTIHOP
,
747 [EBADMSG
] = TARGET_EBADMSG
,
748 [ENAMETOOLONG
] = TARGET_ENAMETOOLONG
,
749 [EOVERFLOW
] = TARGET_EOVERFLOW
,
750 [ENOTUNIQ
] = TARGET_ENOTUNIQ
,
751 [EBADFD
] = TARGET_EBADFD
,
752 [EREMCHG
] = TARGET_EREMCHG
,
753 [ELIBACC
] = TARGET_ELIBACC
,
754 [ELIBBAD
] = TARGET_ELIBBAD
,
755 [ELIBSCN
] = TARGET_ELIBSCN
,
756 [ELIBMAX
] = TARGET_ELIBMAX
,
757 [ELIBEXEC
] = TARGET_ELIBEXEC
,
758 [EILSEQ
] = TARGET_EILSEQ
,
759 [ENOSYS
] = TARGET_ENOSYS
,
760 [ELOOP
] = TARGET_ELOOP
,
761 [ERESTART
] = TARGET_ERESTART
,
762 [ESTRPIPE
] = TARGET_ESTRPIPE
,
763 [ENOTEMPTY
] = TARGET_ENOTEMPTY
,
764 [EUSERS
] = TARGET_EUSERS
,
765 [ENOTSOCK
] = TARGET_ENOTSOCK
,
766 [EDESTADDRREQ
] = TARGET_EDESTADDRREQ
,
767 [EMSGSIZE
] = TARGET_EMSGSIZE
,
768 [EPROTOTYPE
] = TARGET_EPROTOTYPE
,
769 [ENOPROTOOPT
] = TARGET_ENOPROTOOPT
,
770 [EPROTONOSUPPORT
] = TARGET_EPROTONOSUPPORT
,
771 [ESOCKTNOSUPPORT
] = TARGET_ESOCKTNOSUPPORT
,
772 [EOPNOTSUPP
] = TARGET_EOPNOTSUPP
,
773 [EPFNOSUPPORT
] = TARGET_EPFNOSUPPORT
,
774 [EAFNOSUPPORT
] = TARGET_EAFNOSUPPORT
,
775 [EADDRINUSE
] = TARGET_EADDRINUSE
,
776 [EADDRNOTAVAIL
] = TARGET_EADDRNOTAVAIL
,
777 [ENETDOWN
] = TARGET_ENETDOWN
,
778 [ENETUNREACH
] = TARGET_ENETUNREACH
,
779 [ENETRESET
] = TARGET_ENETRESET
,
780 [ECONNABORTED
] = TARGET_ECONNABORTED
,
781 [ECONNRESET
] = TARGET_ECONNRESET
,
782 [ENOBUFS
] = TARGET_ENOBUFS
,
783 [EISCONN
] = TARGET_EISCONN
,
784 [ENOTCONN
] = TARGET_ENOTCONN
,
785 [EUCLEAN
] = TARGET_EUCLEAN
,
786 [ENOTNAM
] = TARGET_ENOTNAM
,
787 [ENAVAIL
] = TARGET_ENAVAIL
,
788 [EISNAM
] = TARGET_EISNAM
,
789 [EREMOTEIO
] = TARGET_EREMOTEIO
,
790 [EDQUOT
] = TARGET_EDQUOT
,
791 [ESHUTDOWN
] = TARGET_ESHUTDOWN
,
792 [ETOOMANYREFS
] = TARGET_ETOOMANYREFS
,
793 [ETIMEDOUT
] = TARGET_ETIMEDOUT
,
794 [ECONNREFUSED
] = TARGET_ECONNREFUSED
,
795 [EHOSTDOWN
] = TARGET_EHOSTDOWN
,
796 [EHOSTUNREACH
] = TARGET_EHOSTUNREACH
,
797 [EALREADY
] = TARGET_EALREADY
,
798 [EINPROGRESS
] = TARGET_EINPROGRESS
,
799 [ESTALE
] = TARGET_ESTALE
,
800 [ECANCELED
] = TARGET_ECANCELED
,
801 [ENOMEDIUM
] = TARGET_ENOMEDIUM
,
802 [EMEDIUMTYPE
] = TARGET_EMEDIUMTYPE
,
804 [ENOKEY
] = TARGET_ENOKEY
,
807 [EKEYEXPIRED
] = TARGET_EKEYEXPIRED
,
810 [EKEYREVOKED
] = TARGET_EKEYREVOKED
,
813 [EKEYREJECTED
] = TARGET_EKEYREJECTED
,
816 [EOWNERDEAD
] = TARGET_EOWNERDEAD
,
818 #ifdef ENOTRECOVERABLE
819 [ENOTRECOVERABLE
] = TARGET_ENOTRECOVERABLE
,
822 [ENOMSG
] = TARGET_ENOMSG
,
825 [ERFKILL
] = TARGET_ERFKILL
,
828 [EHWPOISON
] = TARGET_EHWPOISON
,
832 static inline int host_to_target_errno(int err
)
834 if (err
>= 0 && err
< ERRNO_TABLE_SIZE
&&
835 host_to_target_errno_table
[err
]) {
836 return host_to_target_errno_table
[err
];
841 static inline int target_to_host_errno(int err
)
843 if (err
>= 0 && err
< ERRNO_TABLE_SIZE
&&
844 target_to_host_errno_table
[err
]) {
845 return target_to_host_errno_table
[err
];
850 static inline abi_long
get_errno(abi_long ret
)
853 return -host_to_target_errno(errno
);
858 static inline int is_error(abi_long ret
)
860 return (abi_ulong
)ret
>= (abi_ulong
)(-4096);
863 const char *target_strerror(int err
)
865 if (err
== TARGET_ERESTARTSYS
) {
866 return "To be restarted";
868 if (err
== TARGET_QEMU_ESIGRETURN
) {
869 return "Successful exit from sigreturn";
872 if ((err
>= ERRNO_TABLE_SIZE
) || (err
< 0)) {
875 return strerror(target_to_host_errno(err
));
878 #define safe_syscall0(type, name) \
879 static type safe_##name(void) \
881 return safe_syscall(__NR_##name); \
884 #define safe_syscall1(type, name, type1, arg1) \
885 static type safe_##name(type1 arg1) \
887 return safe_syscall(__NR_##name, arg1); \
890 #define safe_syscall2(type, name, type1, arg1, type2, arg2) \
891 static type safe_##name(type1 arg1, type2 arg2) \
893 return safe_syscall(__NR_##name, arg1, arg2); \
896 #define safe_syscall3(type, name, type1, arg1, type2, arg2, type3, arg3) \
897 static type safe_##name(type1 arg1, type2 arg2, type3 arg3) \
899 return safe_syscall(__NR_##name, arg1, arg2, arg3); \
902 #define safe_syscall4(type, name, type1, arg1, type2, arg2, type3, arg3, \
904 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4) \
906 return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4); \
909 #define safe_syscall5(type, name, type1, arg1, type2, arg2, type3, arg3, \
910 type4, arg4, type5, arg5) \
911 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, \
914 return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5); \
917 #define safe_syscall6(type, name, type1, arg1, type2, arg2, type3, arg3, \
918 type4, arg4, type5, arg5, type6, arg6) \
919 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, \
920 type5 arg5, type6 arg6) \
922 return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6); \
925 safe_syscall3(ssize_t
, read
, int, fd
, void *, buff
, size_t, count
)
926 safe_syscall3(ssize_t
, write
, int, fd
, const void *, buff
, size_t, count
)
927 safe_syscall4(int, openat
, int, dirfd
, const char *, pathname
, \
928 int, flags
, mode_t
, mode
)
929 safe_syscall4(pid_t
, wait4
, pid_t
, pid
, int *, status
, int, options
, \
930 struct rusage
*, rusage
)
931 safe_syscall5(int, waitid
, idtype_t
, idtype
, id_t
, id
, siginfo_t
*, infop
, \
932 int, options
, struct rusage
*, rusage
)
933 safe_syscall3(int, execve
, const char *, filename
, char **, argv
, char **, envp
)
934 safe_syscall6(int, pselect6
, int, nfds
, fd_set
*, readfds
, fd_set
*, writefds
, \
935 fd_set
*, exceptfds
, struct timespec
*, timeout
, void *, sig
)
936 safe_syscall5(int, ppoll
, struct pollfd
*, ufds
, unsigned int, nfds
,
937 struct timespec
*, tsp
, const sigset_t
*, sigmask
,
939 safe_syscall6(int, epoll_pwait
, int, epfd
, struct epoll_event
*, events
,
940 int, maxevents
, int, timeout
, const sigset_t
*, sigmask
,
942 safe_syscall6(int,futex
,int *,uaddr
,int,op
,int,val
, \
943 const struct timespec
*,timeout
,int *,uaddr2
,int,val3
)
944 safe_syscall2(int, rt_sigsuspend
, sigset_t
*, newset
, size_t, sigsetsize
)
945 safe_syscall2(int, kill
, pid_t
, pid
, int, sig
)
946 safe_syscall2(int, tkill
, int, tid
, int, sig
)
947 safe_syscall3(int, tgkill
, int, tgid
, int, pid
, int, sig
)
948 safe_syscall3(ssize_t
, readv
, int, fd
, const struct iovec
*, iov
, int, iovcnt
)
949 safe_syscall3(ssize_t
, writev
, int, fd
, const struct iovec
*, iov
, int, iovcnt
)
950 safe_syscall5(ssize_t
, preadv
, int, fd
, const struct iovec
*, iov
, int, iovcnt
,
951 unsigned long, pos_l
, unsigned long, pos_h
)
952 safe_syscall5(ssize_t
, pwritev
, int, fd
, const struct iovec
*, iov
, int, iovcnt
,
953 unsigned long, pos_l
, unsigned long, pos_h
)
954 safe_syscall3(int, connect
, int, fd
, const struct sockaddr
*, addr
,
956 safe_syscall6(ssize_t
, sendto
, int, fd
, const void *, buf
, size_t, len
,
957 int, flags
, const struct sockaddr
*, addr
, socklen_t
, addrlen
)
958 safe_syscall6(ssize_t
, recvfrom
, int, fd
, void *, buf
, size_t, len
,
959 int, flags
, struct sockaddr
*, addr
, socklen_t
*, addrlen
)
960 safe_syscall3(ssize_t
, sendmsg
, int, fd
, const struct msghdr
*, msg
, int, flags
)
961 safe_syscall3(ssize_t
, recvmsg
, int, fd
, struct msghdr
*, msg
, int, flags
)
962 safe_syscall2(int, flock
, int, fd
, int, operation
)
963 safe_syscall4(int, rt_sigtimedwait
, const sigset_t
*, these
, siginfo_t
*, uinfo
,
964 const struct timespec
*, uts
, size_t, sigsetsize
)
965 safe_syscall4(int, accept4
, int, fd
, struct sockaddr
*, addr
, socklen_t
*, len
,
967 safe_syscall2(int, nanosleep
, const struct timespec
*, req
,
968 struct timespec
*, rem
)
969 #ifdef TARGET_NR_clock_nanosleep
970 safe_syscall4(int, clock_nanosleep
, const clockid_t
, clock
, int, flags
,
971 const struct timespec
*, req
, struct timespec
*, rem
)
974 safe_syscall4(int, msgsnd
, int, msgid
, const void *, msgp
, size_t, sz
,
976 safe_syscall5(int, msgrcv
, int, msgid
, void *, msgp
, size_t, sz
,
977 long, msgtype
, int, flags
)
978 safe_syscall4(int, semtimedop
, int, semid
, struct sembuf
*, tsops
,
979 unsigned, nsops
, const struct timespec
*, timeout
)
981 /* This host kernel architecture uses a single ipc syscall; fake up
982 * wrappers for the sub-operations to hide this implementation detail.
983 * Annoyingly we can't include linux/ipc.h to get the constant definitions
984 * for the call parameter because some structs in there conflict with the
985 * sys/ipc.h ones. So we just define them here, and rely on them being
986 * the same for all host architectures.
988 #define Q_SEMTIMEDOP 4
991 #define Q_IPCCALL(VERSION, OP) ((VERSION) << 16 | (OP))
993 safe_syscall6(int, ipc
, int, call
, long, first
, long, second
, long, third
,
994 void *, ptr
, long, fifth
)
995 static int safe_msgsnd(int msgid
, const void *msgp
, size_t sz
, int flags
)
997 return safe_ipc(Q_IPCCALL(0, Q_MSGSND
), msgid
, sz
, flags
, (void *)msgp
, 0);
999 static int safe_msgrcv(int msgid
, void *msgp
, size_t sz
, long type
, int flags
)
1001 return safe_ipc(Q_IPCCALL(1, Q_MSGRCV
), msgid
, sz
, flags
, msgp
, type
);
1003 static int safe_semtimedop(int semid
, struct sembuf
*tsops
, unsigned nsops
,
1004 const struct timespec
*timeout
)
1006 return safe_ipc(Q_IPCCALL(0, Q_SEMTIMEDOP
), semid
, nsops
, 0, tsops
,
1010 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
1011 safe_syscall5(int, mq_timedsend
, int, mqdes
, const char *, msg_ptr
,
1012 size_t, len
, unsigned, prio
, const struct timespec
*, timeout
)
1013 safe_syscall5(int, mq_timedreceive
, int, mqdes
, char *, msg_ptr
,
1014 size_t, len
, unsigned *, prio
, const struct timespec
*, timeout
)
1016 /* We do ioctl like this rather than via safe_syscall3 to preserve the
1017 * "third argument might be integer or pointer or not present" behaviour of
1018 * the libc function.
1020 #define safe_ioctl(...) safe_syscall(__NR_ioctl, __VA_ARGS__)
1021 /* Similarly for fcntl. Note that callers must always:
1022 * pass the F_GETLK64 etc constants rather than the unsuffixed F_GETLK
1023 * use the flock64 struct rather than unsuffixed flock
1024 * This will then work and use a 64-bit offset for both 32-bit and 64-bit hosts.
1027 #define safe_fcntl(...) safe_syscall(__NR_fcntl64, __VA_ARGS__)
1029 #define safe_fcntl(...) safe_syscall(__NR_fcntl, __VA_ARGS__)
1032 static inline int host_to_target_sock_type(int host_type
)
1036 switch (host_type
& 0xf /* SOCK_TYPE_MASK */) {
1038 target_type
= TARGET_SOCK_DGRAM
;
1041 target_type
= TARGET_SOCK_STREAM
;
1044 target_type
= host_type
& 0xf /* SOCK_TYPE_MASK */;
1048 #if defined(SOCK_CLOEXEC)
1049 if (host_type
& SOCK_CLOEXEC
) {
1050 target_type
|= TARGET_SOCK_CLOEXEC
;
1054 #if defined(SOCK_NONBLOCK)
1055 if (host_type
& SOCK_NONBLOCK
) {
1056 target_type
|= TARGET_SOCK_NONBLOCK
;
1063 static abi_ulong target_brk
;
1064 static abi_ulong target_original_brk
;
1065 static abi_ulong brk_page
;
1067 void target_set_brk(abi_ulong new_brk
)
1069 target_original_brk
= target_brk
= HOST_PAGE_ALIGN(new_brk
);
1070 brk_page
= HOST_PAGE_ALIGN(target_brk
);
1073 //#define DEBUGF_BRK(message, args...) do { fprintf(stderr, (message), ## args); } while (0)
1074 #define DEBUGF_BRK(message, args...)
1076 /* do_brk() must return target values and target errnos. */
1077 abi_long
do_brk(abi_ulong new_brk
)
1079 abi_long mapped_addr
;
1080 abi_ulong new_alloc_size
;
1082 DEBUGF_BRK("do_brk(" TARGET_ABI_FMT_lx
") -> ", new_brk
);
1085 DEBUGF_BRK(TARGET_ABI_FMT_lx
" (!new_brk)\n", target_brk
);
1088 if (new_brk
< target_original_brk
) {
1089 DEBUGF_BRK(TARGET_ABI_FMT_lx
" (new_brk < target_original_brk)\n",
1094 /* If the new brk is less than the highest page reserved to the
1095 * target heap allocation, set it and we're almost done... */
1096 if (new_brk
<= brk_page
) {
1097 /* Heap contents are initialized to zero, as for anonymous
1099 if (new_brk
> target_brk
) {
1100 memset(g2h(target_brk
), 0, new_brk
- target_brk
);
1102 target_brk
= new_brk
;
1103 DEBUGF_BRK(TARGET_ABI_FMT_lx
" (new_brk <= brk_page)\n", target_brk
);
1107 /* We need to allocate more memory after the brk... Note that
1108 * we don't use MAP_FIXED because that will map over the top of
1109 * any existing mapping (like the one with the host libc or qemu
1110 * itself); instead we treat "mapped but at wrong address" as
1111 * a failure and unmap again.
1113 new_alloc_size
= HOST_PAGE_ALIGN(new_brk
- brk_page
);
1114 mapped_addr
= get_errno(target_mmap(brk_page
, new_alloc_size
,
1115 PROT_READ
|PROT_WRITE
,
1116 MAP_ANON
|MAP_PRIVATE
, 0, 0));
1118 if (mapped_addr
== brk_page
) {
1119 /* Heap contents are initialized to zero, as for anonymous
1120 * mapped pages. Technically the new pages are already
1121 * initialized to zero since they *are* anonymous mapped
1122 * pages, however we have to take care with the contents that
1123 * come from the remaining part of the previous page: it may
1124 * contains garbage data due to a previous heap usage (grown
1125 * then shrunken). */
1126 memset(g2h(target_brk
), 0, brk_page
- target_brk
);
1128 target_brk
= new_brk
;
1129 brk_page
= HOST_PAGE_ALIGN(target_brk
);
1130 DEBUGF_BRK(TARGET_ABI_FMT_lx
" (mapped_addr == brk_page)\n",
1133 } else if (mapped_addr
!= -1) {
1134 /* Mapped but at wrong address, meaning there wasn't actually
1135 * enough space for this brk.
1137 target_munmap(mapped_addr
, new_alloc_size
);
1139 DEBUGF_BRK(TARGET_ABI_FMT_lx
" (mapped_addr != -1)\n", target_brk
);
1142 DEBUGF_BRK(TARGET_ABI_FMT_lx
" (otherwise)\n", target_brk
);
1145 #if defined(TARGET_ALPHA)
1146 /* We (partially) emulate OSF/1 on Alpha, which requires we
1147 return a proper errno, not an unchanged brk value. */
1148 return -TARGET_ENOMEM
;
1150 /* For everything else, return the previous break. */
1154 static inline abi_long
copy_from_user_fdset(fd_set
*fds
,
1155 abi_ulong target_fds_addr
,
1159 abi_ulong b
, *target_fds
;
1161 nw
= DIV_ROUND_UP(n
, TARGET_ABI_BITS
);
1162 if (!(target_fds
= lock_user(VERIFY_READ
,
1164 sizeof(abi_ulong
) * nw
,
1166 return -TARGET_EFAULT
;
1170 for (i
= 0; i
< nw
; i
++) {
1171 /* grab the abi_ulong */
1172 __get_user(b
, &target_fds
[i
]);
1173 for (j
= 0; j
< TARGET_ABI_BITS
; j
++) {
1174 /* check the bit inside the abi_ulong */
1181 unlock_user(target_fds
, target_fds_addr
, 0);
1186 static inline abi_ulong
copy_from_user_fdset_ptr(fd_set
*fds
, fd_set
**fds_ptr
,
1187 abi_ulong target_fds_addr
,
1190 if (target_fds_addr
) {
1191 if (copy_from_user_fdset(fds
, target_fds_addr
, n
))
1192 return -TARGET_EFAULT
;
1200 static inline abi_long
copy_to_user_fdset(abi_ulong target_fds_addr
,
1206 abi_ulong
*target_fds
;
1208 nw
= DIV_ROUND_UP(n
, TARGET_ABI_BITS
);
1209 if (!(target_fds
= lock_user(VERIFY_WRITE
,
1211 sizeof(abi_ulong
) * nw
,
1213 return -TARGET_EFAULT
;
1216 for (i
= 0; i
< nw
; i
++) {
1218 for (j
= 0; j
< TARGET_ABI_BITS
; j
++) {
1219 v
|= ((abi_ulong
)(FD_ISSET(k
, fds
) != 0) << j
);
1222 __put_user(v
, &target_fds
[i
]);
1225 unlock_user(target_fds
, target_fds_addr
, sizeof(abi_ulong
) * nw
);
1230 #if defined(__alpha__)
1231 #define HOST_HZ 1024
1236 static inline abi_long
host_to_target_clock_t(long ticks
)
1238 #if HOST_HZ == TARGET_HZ
1241 return ((int64_t)ticks
* TARGET_HZ
) / HOST_HZ
;
1245 static inline abi_long
host_to_target_rusage(abi_ulong target_addr
,
1246 const struct rusage
*rusage
)
1248 struct target_rusage
*target_rusage
;
1250 if (!lock_user_struct(VERIFY_WRITE
, target_rusage
, target_addr
, 0))
1251 return -TARGET_EFAULT
;
1252 target_rusage
->ru_utime
.tv_sec
= tswapal(rusage
->ru_utime
.tv_sec
);
1253 target_rusage
->ru_utime
.tv_usec
= tswapal(rusage
->ru_utime
.tv_usec
);
1254 target_rusage
->ru_stime
.tv_sec
= tswapal(rusage
->ru_stime
.tv_sec
);
1255 target_rusage
->ru_stime
.tv_usec
= tswapal(rusage
->ru_stime
.tv_usec
);
1256 target_rusage
->ru_maxrss
= tswapal(rusage
->ru_maxrss
);
1257 target_rusage
->ru_ixrss
= tswapal(rusage
->ru_ixrss
);
1258 target_rusage
->ru_idrss
= tswapal(rusage
->ru_idrss
);
1259 target_rusage
->ru_isrss
= tswapal(rusage
->ru_isrss
);
1260 target_rusage
->ru_minflt
= tswapal(rusage
->ru_minflt
);
1261 target_rusage
->ru_majflt
= tswapal(rusage
->ru_majflt
);
1262 target_rusage
->ru_nswap
= tswapal(rusage
->ru_nswap
);
1263 target_rusage
->ru_inblock
= tswapal(rusage
->ru_inblock
);
1264 target_rusage
->ru_oublock
= tswapal(rusage
->ru_oublock
);
1265 target_rusage
->ru_msgsnd
= tswapal(rusage
->ru_msgsnd
);
1266 target_rusage
->ru_msgrcv
= tswapal(rusage
->ru_msgrcv
);
1267 target_rusage
->ru_nsignals
= tswapal(rusage
->ru_nsignals
);
1268 target_rusage
->ru_nvcsw
= tswapal(rusage
->ru_nvcsw
);
1269 target_rusage
->ru_nivcsw
= tswapal(rusage
->ru_nivcsw
);
1270 unlock_user_struct(target_rusage
, target_addr
, 1);
1275 static inline rlim_t
target_to_host_rlim(abi_ulong target_rlim
)
1277 abi_ulong target_rlim_swap
;
1280 target_rlim_swap
= tswapal(target_rlim
);
1281 if (target_rlim_swap
== TARGET_RLIM_INFINITY
)
1282 return RLIM_INFINITY
;
1284 result
= target_rlim_swap
;
1285 if (target_rlim_swap
!= (rlim_t
)result
)
1286 return RLIM_INFINITY
;
1291 static inline abi_ulong
host_to_target_rlim(rlim_t rlim
)
1293 abi_ulong target_rlim_swap
;
1296 if (rlim
== RLIM_INFINITY
|| rlim
!= (abi_long
)rlim
)
1297 target_rlim_swap
= TARGET_RLIM_INFINITY
;
1299 target_rlim_swap
= rlim
;
1300 result
= tswapal(target_rlim_swap
);
1305 static inline int target_to_host_resource(int code
)
1308 case TARGET_RLIMIT_AS
:
1310 case TARGET_RLIMIT_CORE
:
1312 case TARGET_RLIMIT_CPU
:
1314 case TARGET_RLIMIT_DATA
:
1316 case TARGET_RLIMIT_FSIZE
:
1317 return RLIMIT_FSIZE
;
1318 case TARGET_RLIMIT_LOCKS
:
1319 return RLIMIT_LOCKS
;
1320 case TARGET_RLIMIT_MEMLOCK
:
1321 return RLIMIT_MEMLOCK
;
1322 case TARGET_RLIMIT_MSGQUEUE
:
1323 return RLIMIT_MSGQUEUE
;
1324 case TARGET_RLIMIT_NICE
:
1326 case TARGET_RLIMIT_NOFILE
:
1327 return RLIMIT_NOFILE
;
1328 case TARGET_RLIMIT_NPROC
:
1329 return RLIMIT_NPROC
;
1330 case TARGET_RLIMIT_RSS
:
1332 case TARGET_RLIMIT_RTPRIO
:
1333 return RLIMIT_RTPRIO
;
1334 case TARGET_RLIMIT_SIGPENDING
:
1335 return RLIMIT_SIGPENDING
;
1336 case TARGET_RLIMIT_STACK
:
1337 return RLIMIT_STACK
;
1343 static inline abi_long
copy_from_user_timeval(struct timeval
*tv
,
1344 abi_ulong target_tv_addr
)
1346 struct target_timeval
*target_tv
;
1348 if (!lock_user_struct(VERIFY_READ
, target_tv
, target_tv_addr
, 1))
1349 return -TARGET_EFAULT
;
1351 __get_user(tv
->tv_sec
, &target_tv
->tv_sec
);
1352 __get_user(tv
->tv_usec
, &target_tv
->tv_usec
);
1354 unlock_user_struct(target_tv
, target_tv_addr
, 0);
1359 static inline abi_long
copy_to_user_timeval(abi_ulong target_tv_addr
,
1360 const struct timeval
*tv
)
1362 struct target_timeval
*target_tv
;
1364 if (!lock_user_struct(VERIFY_WRITE
, target_tv
, target_tv_addr
, 0))
1365 return -TARGET_EFAULT
;
1367 __put_user(tv
->tv_sec
, &target_tv
->tv_sec
);
1368 __put_user(tv
->tv_usec
, &target_tv
->tv_usec
);
1370 unlock_user_struct(target_tv
, target_tv_addr
, 1);
1375 static inline abi_long
copy_from_user_timezone(struct timezone
*tz
,
1376 abi_ulong target_tz_addr
)
1378 struct target_timezone
*target_tz
;
1380 if (!lock_user_struct(VERIFY_READ
, target_tz
, target_tz_addr
, 1)) {
1381 return -TARGET_EFAULT
;
1384 __get_user(tz
->tz_minuteswest
, &target_tz
->tz_minuteswest
);
1385 __get_user(tz
->tz_dsttime
, &target_tz
->tz_dsttime
);
1387 unlock_user_struct(target_tz
, target_tz_addr
, 0);
1392 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
1395 static inline abi_long
copy_from_user_mq_attr(struct mq_attr
*attr
,
1396 abi_ulong target_mq_attr_addr
)
1398 struct target_mq_attr
*target_mq_attr
;
1400 if (!lock_user_struct(VERIFY_READ
, target_mq_attr
,
1401 target_mq_attr_addr
, 1))
1402 return -TARGET_EFAULT
;
1404 __get_user(attr
->mq_flags
, &target_mq_attr
->mq_flags
);
1405 __get_user(attr
->mq_maxmsg
, &target_mq_attr
->mq_maxmsg
);
1406 __get_user(attr
->mq_msgsize
, &target_mq_attr
->mq_msgsize
);
1407 __get_user(attr
->mq_curmsgs
, &target_mq_attr
->mq_curmsgs
);
1409 unlock_user_struct(target_mq_attr
, target_mq_attr_addr
, 0);
1414 static inline abi_long
copy_to_user_mq_attr(abi_ulong target_mq_attr_addr
,
1415 const struct mq_attr
*attr
)
1417 struct target_mq_attr
*target_mq_attr
;
1419 if (!lock_user_struct(VERIFY_WRITE
, target_mq_attr
,
1420 target_mq_attr_addr
, 0))
1421 return -TARGET_EFAULT
;
1423 __put_user(attr
->mq_flags
, &target_mq_attr
->mq_flags
);
1424 __put_user(attr
->mq_maxmsg
, &target_mq_attr
->mq_maxmsg
);
1425 __put_user(attr
->mq_msgsize
, &target_mq_attr
->mq_msgsize
);
1426 __put_user(attr
->mq_curmsgs
, &target_mq_attr
->mq_curmsgs
);
1428 unlock_user_struct(target_mq_attr
, target_mq_attr_addr
, 1);
1434 #if defined(TARGET_NR_select) || defined(TARGET_NR__newselect)
1435 /* do_select() must return target values and target errnos. */
1436 static abi_long
do_select(int n
,
1437 abi_ulong rfd_addr
, abi_ulong wfd_addr
,
1438 abi_ulong efd_addr
, abi_ulong target_tv_addr
)
1440 fd_set rfds
, wfds
, efds
;
1441 fd_set
*rfds_ptr
, *wfds_ptr
, *efds_ptr
;
1443 struct timespec ts
, *ts_ptr
;
1446 ret
= copy_from_user_fdset_ptr(&rfds
, &rfds_ptr
, rfd_addr
, n
);
1450 ret
= copy_from_user_fdset_ptr(&wfds
, &wfds_ptr
, wfd_addr
, n
);
1454 ret
= copy_from_user_fdset_ptr(&efds
, &efds_ptr
, efd_addr
, n
);
1459 if (target_tv_addr
) {
1460 if (copy_from_user_timeval(&tv
, target_tv_addr
))
1461 return -TARGET_EFAULT
;
1462 ts
.tv_sec
= tv
.tv_sec
;
1463 ts
.tv_nsec
= tv
.tv_usec
* 1000;
1469 ret
= get_errno(safe_pselect6(n
, rfds_ptr
, wfds_ptr
, efds_ptr
,
1472 if (!is_error(ret
)) {
1473 if (rfd_addr
&& copy_to_user_fdset(rfd_addr
, &rfds
, n
))
1474 return -TARGET_EFAULT
;
1475 if (wfd_addr
&& copy_to_user_fdset(wfd_addr
, &wfds
, n
))
1476 return -TARGET_EFAULT
;
1477 if (efd_addr
&& copy_to_user_fdset(efd_addr
, &efds
, n
))
1478 return -TARGET_EFAULT
;
1480 if (target_tv_addr
) {
1481 tv
.tv_sec
= ts
.tv_sec
;
1482 tv
.tv_usec
= ts
.tv_nsec
/ 1000;
1483 if (copy_to_user_timeval(target_tv_addr
, &tv
)) {
1484 return -TARGET_EFAULT
;
1492 #if defined(TARGET_WANT_OLD_SYS_SELECT)
1493 static abi_long
do_old_select(abi_ulong arg1
)
1495 struct target_sel_arg_struct
*sel
;
1496 abi_ulong inp
, outp
, exp
, tvp
;
1499 if (!lock_user_struct(VERIFY_READ
, sel
, arg1
, 1)) {
1500 return -TARGET_EFAULT
;
1503 nsel
= tswapal(sel
->n
);
1504 inp
= tswapal(sel
->inp
);
1505 outp
= tswapal(sel
->outp
);
1506 exp
= tswapal(sel
->exp
);
1507 tvp
= tswapal(sel
->tvp
);
1509 unlock_user_struct(sel
, arg1
, 0);
1511 return do_select(nsel
, inp
, outp
, exp
, tvp
);
1516 static abi_long
do_pipe2(int host_pipe
[], int flags
)
1519 return pipe2(host_pipe
, flags
);
1525 static abi_long
do_pipe(void *cpu_env
, abi_ulong pipedes
,
1526 int flags
, int is_pipe2
)
1530 ret
= flags
? do_pipe2(host_pipe
, flags
) : pipe(host_pipe
);
1533 return get_errno(ret
);
1535 /* Several targets have special calling conventions for the original
1536 pipe syscall, but didn't replicate this into the pipe2 syscall. */
1538 #if defined(TARGET_ALPHA)
1539 ((CPUAlphaState
*)cpu_env
)->ir
[IR_A4
] = host_pipe
[1];
1540 return host_pipe
[0];
1541 #elif defined(TARGET_MIPS)
1542 ((CPUMIPSState
*)cpu_env
)->active_tc
.gpr
[3] = host_pipe
[1];
1543 return host_pipe
[0];
1544 #elif defined(TARGET_SH4)
1545 ((CPUSH4State
*)cpu_env
)->gregs
[1] = host_pipe
[1];
1546 return host_pipe
[0];
1547 #elif defined(TARGET_SPARC)
1548 ((CPUSPARCState
*)cpu_env
)->regwptr
[1] = host_pipe
[1];
1549 return host_pipe
[0];
1553 if (put_user_s32(host_pipe
[0], pipedes
)
1554 || put_user_s32(host_pipe
[1], pipedes
+ sizeof(host_pipe
[0])))
1555 return -TARGET_EFAULT
;
1556 return get_errno(ret
);
1559 static inline abi_long
target_to_host_ip_mreq(struct ip_mreqn
*mreqn
,
1560 abi_ulong target_addr
,
1563 struct target_ip_mreqn
*target_smreqn
;
1565 target_smreqn
= lock_user(VERIFY_READ
, target_addr
, len
, 1);
1567 return -TARGET_EFAULT
;
1568 mreqn
->imr_multiaddr
.s_addr
= target_smreqn
->imr_multiaddr
.s_addr
;
1569 mreqn
->imr_address
.s_addr
= target_smreqn
->imr_address
.s_addr
;
1570 if (len
== sizeof(struct target_ip_mreqn
))
1571 mreqn
->imr_ifindex
= tswapal(target_smreqn
->imr_ifindex
);
1572 unlock_user(target_smreqn
, target_addr
, 0);
1577 static inline abi_long
target_to_host_sockaddr(int fd
, struct sockaddr
*addr
,
1578 abi_ulong target_addr
,
1581 const socklen_t unix_maxlen
= sizeof (struct sockaddr_un
);
1582 sa_family_t sa_family
;
1583 struct target_sockaddr
*target_saddr
;
1585 if (fd_trans_target_to_host_addr(fd
)) {
1586 return fd_trans_target_to_host_addr(fd
)(addr
, target_addr
, len
);
1589 target_saddr
= lock_user(VERIFY_READ
, target_addr
, len
, 1);
1591 return -TARGET_EFAULT
;
1593 sa_family
= tswap16(target_saddr
->sa_family
);
1595 /* Oops. The caller might send a incomplete sun_path; sun_path
1596 * must be terminated by \0 (see the manual page), but
1597 * unfortunately it is quite common to specify sockaddr_un
1598 * length as "strlen(x->sun_path)" while it should be
1599 * "strlen(...) + 1". We'll fix that here if needed.
1600 * Linux kernel has a similar feature.
1603 if (sa_family
== AF_UNIX
) {
1604 if (len
< unix_maxlen
&& len
> 0) {
1605 char *cp
= (char*)target_saddr
;
1607 if ( cp
[len
-1] && !cp
[len
] )
1610 if (len
> unix_maxlen
)
1614 memcpy(addr
, target_saddr
, len
);
1615 addr
->sa_family
= sa_family
;
1616 if (sa_family
== AF_NETLINK
) {
1617 struct sockaddr_nl
*nladdr
;
1619 nladdr
= (struct sockaddr_nl
*)addr
;
1620 nladdr
->nl_pid
= tswap32(nladdr
->nl_pid
);
1621 nladdr
->nl_groups
= tswap32(nladdr
->nl_groups
);
1622 } else if (sa_family
== AF_PACKET
) {
1623 struct target_sockaddr_ll
*lladdr
;
1625 lladdr
= (struct target_sockaddr_ll
*)addr
;
1626 lladdr
->sll_ifindex
= tswap32(lladdr
->sll_ifindex
);
1627 lladdr
->sll_hatype
= tswap16(lladdr
->sll_hatype
);
1629 unlock_user(target_saddr
, target_addr
, 0);
1634 static inline abi_long
host_to_target_sockaddr(abi_ulong target_addr
,
1635 struct sockaddr
*addr
,
1638 struct target_sockaddr
*target_saddr
;
1645 target_saddr
= lock_user(VERIFY_WRITE
, target_addr
, len
, 0);
1647 return -TARGET_EFAULT
;
1648 memcpy(target_saddr
, addr
, len
);
1649 if (len
>= offsetof(struct target_sockaddr
, sa_family
) +
1650 sizeof(target_saddr
->sa_family
)) {
1651 target_saddr
->sa_family
= tswap16(addr
->sa_family
);
1653 if (addr
->sa_family
== AF_NETLINK
&& len
>= sizeof(struct sockaddr_nl
)) {
1654 struct sockaddr_nl
*target_nl
= (struct sockaddr_nl
*)target_saddr
;
1655 target_nl
->nl_pid
= tswap32(target_nl
->nl_pid
);
1656 target_nl
->nl_groups
= tswap32(target_nl
->nl_groups
);
1657 } else if (addr
->sa_family
== AF_PACKET
) {
1658 struct sockaddr_ll
*target_ll
= (struct sockaddr_ll
*)target_saddr
;
1659 target_ll
->sll_ifindex
= tswap32(target_ll
->sll_ifindex
);
1660 target_ll
->sll_hatype
= tswap16(target_ll
->sll_hatype
);
1661 } else if (addr
->sa_family
== AF_INET6
&&
1662 len
>= sizeof(struct target_sockaddr_in6
)) {
1663 struct target_sockaddr_in6
*target_in6
=
1664 (struct target_sockaddr_in6
*)target_saddr
;
1665 target_in6
->sin6_scope_id
= tswap16(target_in6
->sin6_scope_id
);
1667 unlock_user(target_saddr
, target_addr
, len
);
1672 static inline abi_long
target_to_host_cmsg(struct msghdr
*msgh
,
1673 struct target_msghdr
*target_msgh
)
1675 struct cmsghdr
*cmsg
= CMSG_FIRSTHDR(msgh
);
1676 abi_long msg_controllen
;
1677 abi_ulong target_cmsg_addr
;
1678 struct target_cmsghdr
*target_cmsg
, *target_cmsg_start
;
1679 socklen_t space
= 0;
1681 msg_controllen
= tswapal(target_msgh
->msg_controllen
);
1682 if (msg_controllen
< sizeof (struct target_cmsghdr
))
1684 target_cmsg_addr
= tswapal(target_msgh
->msg_control
);
1685 target_cmsg
= lock_user(VERIFY_READ
, target_cmsg_addr
, msg_controllen
, 1);
1686 target_cmsg_start
= target_cmsg
;
1688 return -TARGET_EFAULT
;
1690 while (cmsg
&& target_cmsg
) {
1691 void *data
= CMSG_DATA(cmsg
);
1692 void *target_data
= TARGET_CMSG_DATA(target_cmsg
);
1694 int len
= tswapal(target_cmsg
->cmsg_len
)
1695 - TARGET_CMSG_ALIGN(sizeof (struct target_cmsghdr
));
1697 space
+= CMSG_SPACE(len
);
1698 if (space
> msgh
->msg_controllen
) {
1699 space
-= CMSG_SPACE(len
);
1700 /* This is a QEMU bug, since we allocated the payload
1701 * area ourselves (unlike overflow in host-to-target
1702 * conversion, which is just the guest giving us a buffer
1703 * that's too small). It can't happen for the payload types
1704 * we currently support; if it becomes an issue in future
1705 * we would need to improve our allocation strategy to
1706 * something more intelligent than "twice the size of the
1707 * target buffer we're reading from".
1709 gemu_log("Host cmsg overflow\n");
1713 if (tswap32(target_cmsg
->cmsg_level
) == TARGET_SOL_SOCKET
) {
1714 cmsg
->cmsg_level
= SOL_SOCKET
;
1716 cmsg
->cmsg_level
= tswap32(target_cmsg
->cmsg_level
);
1718 cmsg
->cmsg_type
= tswap32(target_cmsg
->cmsg_type
);
1719 cmsg
->cmsg_len
= CMSG_LEN(len
);
1721 if (cmsg
->cmsg_level
== SOL_SOCKET
&& cmsg
->cmsg_type
== SCM_RIGHTS
) {
1722 int *fd
= (int *)data
;
1723 int *target_fd
= (int *)target_data
;
1724 int i
, numfds
= len
/ sizeof(int);
1726 for (i
= 0; i
< numfds
; i
++) {
1727 __get_user(fd
[i
], target_fd
+ i
);
1729 } else if (cmsg
->cmsg_level
== SOL_SOCKET
1730 && cmsg
->cmsg_type
== SCM_CREDENTIALS
) {
1731 struct ucred
*cred
= (struct ucred
*)data
;
1732 struct target_ucred
*target_cred
=
1733 (struct target_ucred
*)target_data
;
1735 __get_user(cred
->pid
, &target_cred
->pid
);
1736 __get_user(cred
->uid
, &target_cred
->uid
);
1737 __get_user(cred
->gid
, &target_cred
->gid
);
1739 gemu_log("Unsupported ancillary data: %d/%d\n",
1740 cmsg
->cmsg_level
, cmsg
->cmsg_type
);
1741 memcpy(data
, target_data
, len
);
1744 cmsg
= CMSG_NXTHDR(msgh
, cmsg
);
1745 target_cmsg
= TARGET_CMSG_NXTHDR(target_msgh
, target_cmsg
,
1748 unlock_user(target_cmsg
, target_cmsg_addr
, 0);
1750 msgh
->msg_controllen
= space
;
1754 static inline abi_long
host_to_target_cmsg(struct target_msghdr
*target_msgh
,
1755 struct msghdr
*msgh
)
1757 struct cmsghdr
*cmsg
= CMSG_FIRSTHDR(msgh
);
1758 abi_long msg_controllen
;
1759 abi_ulong target_cmsg_addr
;
1760 struct target_cmsghdr
*target_cmsg
, *target_cmsg_start
;
1761 socklen_t space
= 0;
1763 msg_controllen
= tswapal(target_msgh
->msg_controllen
);
1764 if (msg_controllen
< sizeof (struct target_cmsghdr
))
1766 target_cmsg_addr
= tswapal(target_msgh
->msg_control
);
1767 target_cmsg
= lock_user(VERIFY_WRITE
, target_cmsg_addr
, msg_controllen
, 0);
1768 target_cmsg_start
= target_cmsg
;
1770 return -TARGET_EFAULT
;
1772 while (cmsg
&& target_cmsg
) {
1773 void *data
= CMSG_DATA(cmsg
);
1774 void *target_data
= TARGET_CMSG_DATA(target_cmsg
);
1776 int len
= cmsg
->cmsg_len
- CMSG_ALIGN(sizeof (struct cmsghdr
));
1777 int tgt_len
, tgt_space
;
1779 /* We never copy a half-header but may copy half-data;
1780 * this is Linux's behaviour in put_cmsg(). Note that
1781 * truncation here is a guest problem (which we report
1782 * to the guest via the CTRUNC bit), unlike truncation
1783 * in target_to_host_cmsg, which is a QEMU bug.
1785 if (msg_controllen
< sizeof(struct cmsghdr
)) {
1786 target_msgh
->msg_flags
|= tswap32(MSG_CTRUNC
);
1790 if (cmsg
->cmsg_level
== SOL_SOCKET
) {
1791 target_cmsg
->cmsg_level
= tswap32(TARGET_SOL_SOCKET
);
1793 target_cmsg
->cmsg_level
= tswap32(cmsg
->cmsg_level
);
1795 target_cmsg
->cmsg_type
= tswap32(cmsg
->cmsg_type
);
1797 tgt_len
= TARGET_CMSG_LEN(len
);
1799 /* Payload types which need a different size of payload on
1800 * the target must adjust tgt_len here.
1802 switch (cmsg
->cmsg_level
) {
1804 switch (cmsg
->cmsg_type
) {
1806 tgt_len
= sizeof(struct target_timeval
);
1815 if (msg_controllen
< tgt_len
) {
1816 target_msgh
->msg_flags
|= tswap32(MSG_CTRUNC
);
1817 tgt_len
= msg_controllen
;
1820 /* We must now copy-and-convert len bytes of payload
1821 * into tgt_len bytes of destination space. Bear in mind
1822 * that in both source and destination we may be dealing
1823 * with a truncated value!
1825 switch (cmsg
->cmsg_level
) {
1827 switch (cmsg
->cmsg_type
) {
1830 int *fd
= (int *)data
;
1831 int *target_fd
= (int *)target_data
;
1832 int i
, numfds
= tgt_len
/ sizeof(int);
1834 for (i
= 0; i
< numfds
; i
++) {
1835 __put_user(fd
[i
], target_fd
+ i
);
1841 struct timeval
*tv
= (struct timeval
*)data
;
1842 struct target_timeval
*target_tv
=
1843 (struct target_timeval
*)target_data
;
1845 if (len
!= sizeof(struct timeval
) ||
1846 tgt_len
!= sizeof(struct target_timeval
)) {
1850 /* copy struct timeval to target */
1851 __put_user(tv
->tv_sec
, &target_tv
->tv_sec
);
1852 __put_user(tv
->tv_usec
, &target_tv
->tv_usec
);
1855 case SCM_CREDENTIALS
:
1857 struct ucred
*cred
= (struct ucred
*)data
;
1858 struct target_ucred
*target_cred
=
1859 (struct target_ucred
*)target_data
;
1861 __put_user(cred
->pid
, &target_cred
->pid
);
1862 __put_user(cred
->uid
, &target_cred
->uid
);
1863 __put_user(cred
->gid
, &target_cred
->gid
);
1872 switch (cmsg
->cmsg_type
) {
1875 uint32_t *v
= (uint32_t *)data
;
1876 uint32_t *t_int
= (uint32_t *)target_data
;
1878 __put_user(*v
, t_int
);
1884 struct sock_extended_err ee
;
1885 struct sockaddr_in offender
;
1887 struct errhdr_t
*errh
= (struct errhdr_t
*)data
;
1888 struct errhdr_t
*target_errh
=
1889 (struct errhdr_t
*)target_data
;
1891 __put_user(errh
->ee
.ee_errno
, &target_errh
->ee
.ee_errno
);
1892 __put_user(errh
->ee
.ee_origin
, &target_errh
->ee
.ee_origin
);
1893 __put_user(errh
->ee
.ee_type
, &target_errh
->ee
.ee_type
);
1894 __put_user(errh
->ee
.ee_code
, &target_errh
->ee
.ee_code
);
1895 __put_user(errh
->ee
.ee_pad
, &target_errh
->ee
.ee_pad
);
1896 __put_user(errh
->ee
.ee_info
, &target_errh
->ee
.ee_info
);
1897 __put_user(errh
->ee
.ee_data
, &target_errh
->ee
.ee_data
);
1898 host_to_target_sockaddr((unsigned long) &target_errh
->offender
,
1899 (void *) &errh
->offender
, sizeof(errh
->offender
));
1908 switch (cmsg
->cmsg_type
) {
1911 uint32_t *v
= (uint32_t *)data
;
1912 uint32_t *t_int
= (uint32_t *)target_data
;
1914 __put_user(*v
, t_int
);
1920 struct sock_extended_err ee
;
1921 struct sockaddr_in6 offender
;
1923 struct errhdr6_t
*errh
= (struct errhdr6_t
*)data
;
1924 struct errhdr6_t
*target_errh
=
1925 (struct errhdr6_t
*)target_data
;
1927 __put_user(errh
->ee
.ee_errno
, &target_errh
->ee
.ee_errno
);
1928 __put_user(errh
->ee
.ee_origin
, &target_errh
->ee
.ee_origin
);
1929 __put_user(errh
->ee
.ee_type
, &target_errh
->ee
.ee_type
);
1930 __put_user(errh
->ee
.ee_code
, &target_errh
->ee
.ee_code
);
1931 __put_user(errh
->ee
.ee_pad
, &target_errh
->ee
.ee_pad
);
1932 __put_user(errh
->ee
.ee_info
, &target_errh
->ee
.ee_info
);
1933 __put_user(errh
->ee
.ee_data
, &target_errh
->ee
.ee_data
);
1934 host_to_target_sockaddr((unsigned long) &target_errh
->offender
,
1935 (void *) &errh
->offender
, sizeof(errh
->offender
));
1945 gemu_log("Unsupported ancillary data: %d/%d\n",
1946 cmsg
->cmsg_level
, cmsg
->cmsg_type
);
1947 memcpy(target_data
, data
, MIN(len
, tgt_len
));
1948 if (tgt_len
> len
) {
1949 memset(target_data
+ len
, 0, tgt_len
- len
);
1953 target_cmsg
->cmsg_len
= tswapal(tgt_len
);
1954 tgt_space
= TARGET_CMSG_SPACE(len
);
1955 if (msg_controllen
< tgt_space
) {
1956 tgt_space
= msg_controllen
;
1958 msg_controllen
-= tgt_space
;
1960 cmsg
= CMSG_NXTHDR(msgh
, cmsg
);
1961 target_cmsg
= TARGET_CMSG_NXTHDR(target_msgh
, target_cmsg
,
1964 unlock_user(target_cmsg
, target_cmsg_addr
, space
);
1966 target_msgh
->msg_controllen
= tswapal(space
);
1970 static void tswap_nlmsghdr(struct nlmsghdr
*nlh
)
1972 nlh
->nlmsg_len
= tswap32(nlh
->nlmsg_len
);
1973 nlh
->nlmsg_type
= tswap16(nlh
->nlmsg_type
);
1974 nlh
->nlmsg_flags
= tswap16(nlh
->nlmsg_flags
);
1975 nlh
->nlmsg_seq
= tswap32(nlh
->nlmsg_seq
);
1976 nlh
->nlmsg_pid
= tswap32(nlh
->nlmsg_pid
);
1979 static abi_long
host_to_target_for_each_nlmsg(struct nlmsghdr
*nlh
,
1981 abi_long (*host_to_target_nlmsg
)
1982 (struct nlmsghdr
*))
1987 while (len
> sizeof(struct nlmsghdr
)) {
1989 nlmsg_len
= nlh
->nlmsg_len
;
1990 if (nlmsg_len
< sizeof(struct nlmsghdr
) ||
1995 switch (nlh
->nlmsg_type
) {
1997 tswap_nlmsghdr(nlh
);
2003 struct nlmsgerr
*e
= NLMSG_DATA(nlh
);
2004 e
->error
= tswap32(e
->error
);
2005 tswap_nlmsghdr(&e
->msg
);
2006 tswap_nlmsghdr(nlh
);
2010 ret
= host_to_target_nlmsg(nlh
);
2012 tswap_nlmsghdr(nlh
);
2017 tswap_nlmsghdr(nlh
);
2018 len
-= NLMSG_ALIGN(nlmsg_len
);
2019 nlh
= (struct nlmsghdr
*)(((char*)nlh
) + NLMSG_ALIGN(nlmsg_len
));
2024 static abi_long
target_to_host_for_each_nlmsg(struct nlmsghdr
*nlh
,
2026 abi_long (*target_to_host_nlmsg
)
2027 (struct nlmsghdr
*))
2031 while (len
> sizeof(struct nlmsghdr
)) {
2032 if (tswap32(nlh
->nlmsg_len
) < sizeof(struct nlmsghdr
) ||
2033 tswap32(nlh
->nlmsg_len
) > len
) {
2036 tswap_nlmsghdr(nlh
);
2037 switch (nlh
->nlmsg_type
) {
2044 struct nlmsgerr
*e
= NLMSG_DATA(nlh
);
2045 e
->error
= tswap32(e
->error
);
2046 tswap_nlmsghdr(&e
->msg
);
2050 ret
= target_to_host_nlmsg(nlh
);
2055 len
-= NLMSG_ALIGN(nlh
->nlmsg_len
);
2056 nlh
= (struct nlmsghdr
*)(((char *)nlh
) + NLMSG_ALIGN(nlh
->nlmsg_len
));
2061 #ifdef CONFIG_RTNETLINK
2062 static abi_long
host_to_target_for_each_nlattr(struct nlattr
*nlattr
,
2063 size_t len
, void *context
,
2064 abi_long (*host_to_target_nlattr
)
2068 unsigned short nla_len
;
2071 while (len
> sizeof(struct nlattr
)) {
2072 nla_len
= nlattr
->nla_len
;
2073 if (nla_len
< sizeof(struct nlattr
) ||
2077 ret
= host_to_target_nlattr(nlattr
, context
);
2078 nlattr
->nla_len
= tswap16(nlattr
->nla_len
);
2079 nlattr
->nla_type
= tswap16(nlattr
->nla_type
);
2083 len
-= NLA_ALIGN(nla_len
);
2084 nlattr
= (struct nlattr
*)(((char *)nlattr
) + NLA_ALIGN(nla_len
));
2089 static abi_long
host_to_target_for_each_rtattr(struct rtattr
*rtattr
,
2091 abi_long (*host_to_target_rtattr
)
2094 unsigned short rta_len
;
2097 while (len
> sizeof(struct rtattr
)) {
2098 rta_len
= rtattr
->rta_len
;
2099 if (rta_len
< sizeof(struct rtattr
) ||
2103 ret
= host_to_target_rtattr(rtattr
);
2104 rtattr
->rta_len
= tswap16(rtattr
->rta_len
);
2105 rtattr
->rta_type
= tswap16(rtattr
->rta_type
);
2109 len
-= RTA_ALIGN(rta_len
);
2110 rtattr
= (struct rtattr
*)(((char *)rtattr
) + RTA_ALIGN(rta_len
));
2115 #define NLA_DATA(nla) ((void *)((char *)(nla)) + NLA_HDRLEN)
2117 static abi_long
host_to_target_data_bridge_nlattr(struct nlattr
*nlattr
,
2124 switch (nlattr
->nla_type
) {
2126 case QEMU_IFLA_BR_FDB_FLUSH
:
2129 case QEMU_IFLA_BR_GROUP_ADDR
:
2132 case QEMU_IFLA_BR_VLAN_FILTERING
:
2133 case QEMU_IFLA_BR_TOPOLOGY_CHANGE
:
2134 case QEMU_IFLA_BR_TOPOLOGY_CHANGE_DETECTED
:
2135 case QEMU_IFLA_BR_MCAST_ROUTER
:
2136 case QEMU_IFLA_BR_MCAST_SNOOPING
:
2137 case QEMU_IFLA_BR_MCAST_QUERY_USE_IFADDR
:
2138 case QEMU_IFLA_BR_MCAST_QUERIER
:
2139 case QEMU_IFLA_BR_NF_CALL_IPTABLES
:
2140 case QEMU_IFLA_BR_NF_CALL_IP6TABLES
:
2141 case QEMU_IFLA_BR_NF_CALL_ARPTABLES
:
2144 case QEMU_IFLA_BR_PRIORITY
:
2145 case QEMU_IFLA_BR_VLAN_PROTOCOL
:
2146 case QEMU_IFLA_BR_GROUP_FWD_MASK
:
2147 case QEMU_IFLA_BR_ROOT_PORT
:
2148 case QEMU_IFLA_BR_VLAN_DEFAULT_PVID
:
2149 u16
= NLA_DATA(nlattr
);
2150 *u16
= tswap16(*u16
);
2153 case QEMU_IFLA_BR_FORWARD_DELAY
:
2154 case QEMU_IFLA_BR_HELLO_TIME
:
2155 case QEMU_IFLA_BR_MAX_AGE
:
2156 case QEMU_IFLA_BR_AGEING_TIME
:
2157 case QEMU_IFLA_BR_STP_STATE
:
2158 case QEMU_IFLA_BR_ROOT_PATH_COST
:
2159 case QEMU_IFLA_BR_MCAST_HASH_ELASTICITY
:
2160 case QEMU_IFLA_BR_MCAST_HASH_MAX
:
2161 case QEMU_IFLA_BR_MCAST_LAST_MEMBER_CNT
:
2162 case QEMU_IFLA_BR_MCAST_STARTUP_QUERY_CNT
:
2163 u32
= NLA_DATA(nlattr
);
2164 *u32
= tswap32(*u32
);
2167 case QEMU_IFLA_BR_HELLO_TIMER
:
2168 case QEMU_IFLA_BR_TCN_TIMER
:
2169 case QEMU_IFLA_BR_GC_TIMER
:
2170 case QEMU_IFLA_BR_TOPOLOGY_CHANGE_TIMER
:
2171 case QEMU_IFLA_BR_MCAST_LAST_MEMBER_INTVL
:
2172 case QEMU_IFLA_BR_MCAST_MEMBERSHIP_INTVL
:
2173 case QEMU_IFLA_BR_MCAST_QUERIER_INTVL
:
2174 case QEMU_IFLA_BR_MCAST_QUERY_INTVL
:
2175 case QEMU_IFLA_BR_MCAST_QUERY_RESPONSE_INTVL
:
2176 case QEMU_IFLA_BR_MCAST_STARTUP_QUERY_INTVL
:
2177 u64
= NLA_DATA(nlattr
);
2178 *u64
= tswap64(*u64
);
2180 /* ifla_bridge_id: uin8_t[] */
2181 case QEMU_IFLA_BR_ROOT_ID
:
2182 case QEMU_IFLA_BR_BRIDGE_ID
:
2185 gemu_log("Unknown QEMU_IFLA_BR type %d\n", nlattr
->nla_type
);
2191 static abi_long
host_to_target_slave_data_bridge_nlattr(struct nlattr
*nlattr
,
2198 switch (nlattr
->nla_type
) {
2200 case QEMU_IFLA_BRPORT_STATE
:
2201 case QEMU_IFLA_BRPORT_MODE
:
2202 case QEMU_IFLA_BRPORT_GUARD
:
2203 case QEMU_IFLA_BRPORT_PROTECT
:
2204 case QEMU_IFLA_BRPORT_FAST_LEAVE
:
2205 case QEMU_IFLA_BRPORT_LEARNING
:
2206 case QEMU_IFLA_BRPORT_UNICAST_FLOOD
:
2207 case QEMU_IFLA_BRPORT_PROXYARP
:
2208 case QEMU_IFLA_BRPORT_LEARNING_SYNC
:
2209 case QEMU_IFLA_BRPORT_PROXYARP_WIFI
:
2210 case QEMU_IFLA_BRPORT_TOPOLOGY_CHANGE_ACK
:
2211 case QEMU_IFLA_BRPORT_CONFIG_PENDING
:
2212 case QEMU_IFLA_BRPORT_MULTICAST_ROUTER
:
2215 case QEMU_IFLA_BRPORT_PRIORITY
:
2216 case QEMU_IFLA_BRPORT_DESIGNATED_PORT
:
2217 case QEMU_IFLA_BRPORT_DESIGNATED_COST
:
2218 case QEMU_IFLA_BRPORT_ID
:
2219 case QEMU_IFLA_BRPORT_NO
:
2220 u16
= NLA_DATA(nlattr
);
2221 *u16
= tswap16(*u16
);
2224 case QEMU_IFLA_BRPORT_COST
:
2225 u32
= NLA_DATA(nlattr
);
2226 *u32
= tswap32(*u32
);
2229 case QEMU_IFLA_BRPORT_MESSAGE_AGE_TIMER
:
2230 case QEMU_IFLA_BRPORT_FORWARD_DELAY_TIMER
:
2231 case QEMU_IFLA_BRPORT_HOLD_TIMER
:
2232 u64
= NLA_DATA(nlattr
);
2233 *u64
= tswap64(*u64
);
2235 /* ifla_bridge_id: uint8_t[] */
2236 case QEMU_IFLA_BRPORT_ROOT_ID
:
2237 case QEMU_IFLA_BRPORT_BRIDGE_ID
:
2240 gemu_log("Unknown QEMU_IFLA_BRPORT type %d\n", nlattr
->nla_type
);
2246 struct linkinfo_context
{
2253 static abi_long
host_to_target_data_linkinfo_nlattr(struct nlattr
*nlattr
,
2256 struct linkinfo_context
*li_context
= context
;
2258 switch (nlattr
->nla_type
) {
2260 case QEMU_IFLA_INFO_KIND
:
2261 li_context
->name
= NLA_DATA(nlattr
);
2262 li_context
->len
= nlattr
->nla_len
- NLA_HDRLEN
;
2264 case QEMU_IFLA_INFO_SLAVE_KIND
:
2265 li_context
->slave_name
= NLA_DATA(nlattr
);
2266 li_context
->slave_len
= nlattr
->nla_len
- NLA_HDRLEN
;
2269 case QEMU_IFLA_INFO_XSTATS
:
2270 /* FIXME: only used by CAN */
2273 case QEMU_IFLA_INFO_DATA
:
2274 if (strncmp(li_context
->name
, "bridge",
2275 li_context
->len
) == 0) {
2276 return host_to_target_for_each_nlattr(NLA_DATA(nlattr
),
2279 host_to_target_data_bridge_nlattr
);
2281 gemu_log("Unknown QEMU_IFLA_INFO_KIND %s\n", li_context
->name
);
2284 case QEMU_IFLA_INFO_SLAVE_DATA
:
2285 if (strncmp(li_context
->slave_name
, "bridge",
2286 li_context
->slave_len
) == 0) {
2287 return host_to_target_for_each_nlattr(NLA_DATA(nlattr
),
2290 host_to_target_slave_data_bridge_nlattr
);
2292 gemu_log("Unknown QEMU_IFLA_INFO_SLAVE_KIND %s\n",
2293 li_context
->slave_name
);
2297 gemu_log("Unknown host QEMU_IFLA_INFO type: %d\n", nlattr
->nla_type
);
2304 static abi_long
host_to_target_data_inet_nlattr(struct nlattr
*nlattr
,
2310 switch (nlattr
->nla_type
) {
2311 case QEMU_IFLA_INET_CONF
:
2312 u32
= NLA_DATA(nlattr
);
2313 for (i
= 0; i
< (nlattr
->nla_len
- NLA_HDRLEN
) / sizeof(*u32
);
2315 u32
[i
] = tswap32(u32
[i
]);
2319 gemu_log("Unknown host AF_INET type: %d\n", nlattr
->nla_type
);
2324 static abi_long
host_to_target_data_inet6_nlattr(struct nlattr
*nlattr
,
2329 struct ifla_cacheinfo
*ci
;
2332 switch (nlattr
->nla_type
) {
2334 case QEMU_IFLA_INET6_TOKEN
:
2337 case QEMU_IFLA_INET6_ADDR_GEN_MODE
:
2340 case QEMU_IFLA_INET6_FLAGS
:
2341 u32
= NLA_DATA(nlattr
);
2342 *u32
= tswap32(*u32
);
2345 case QEMU_IFLA_INET6_CONF
:
2346 u32
= NLA_DATA(nlattr
);
2347 for (i
= 0; i
< (nlattr
->nla_len
- NLA_HDRLEN
) / sizeof(*u32
);
2349 u32
[i
] = tswap32(u32
[i
]);
2352 /* ifla_cacheinfo */
2353 case QEMU_IFLA_INET6_CACHEINFO
:
2354 ci
= NLA_DATA(nlattr
);
2355 ci
->max_reasm_len
= tswap32(ci
->max_reasm_len
);
2356 ci
->tstamp
= tswap32(ci
->tstamp
);
2357 ci
->reachable_time
= tswap32(ci
->reachable_time
);
2358 ci
->retrans_time
= tswap32(ci
->retrans_time
);
2361 case QEMU_IFLA_INET6_STATS
:
2362 case QEMU_IFLA_INET6_ICMP6STATS
:
2363 u64
= NLA_DATA(nlattr
);
2364 for (i
= 0; i
< (nlattr
->nla_len
- NLA_HDRLEN
) / sizeof(*u64
);
2366 u64
[i
] = tswap64(u64
[i
]);
2370 gemu_log("Unknown host AF_INET6 type: %d\n", nlattr
->nla_type
);
2375 static abi_long
host_to_target_data_spec_nlattr(struct nlattr
*nlattr
,
2378 switch (nlattr
->nla_type
) {
2380 return host_to_target_for_each_nlattr(NLA_DATA(nlattr
), nlattr
->nla_len
,
2382 host_to_target_data_inet_nlattr
);
2384 return host_to_target_for_each_nlattr(NLA_DATA(nlattr
), nlattr
->nla_len
,
2386 host_to_target_data_inet6_nlattr
);
2388 gemu_log("Unknown host AF_SPEC type: %d\n", nlattr
->nla_type
);
2394 static abi_long
host_to_target_data_link_rtattr(struct rtattr
*rtattr
)
2397 struct rtnl_link_stats
*st
;
2398 struct rtnl_link_stats64
*st64
;
2399 struct rtnl_link_ifmap
*map
;
2400 struct linkinfo_context li_context
;
2402 switch (rtattr
->rta_type
) {
2404 case QEMU_IFLA_ADDRESS
:
2405 case QEMU_IFLA_BROADCAST
:
2407 case QEMU_IFLA_IFNAME
:
2408 case QEMU_IFLA_QDISC
:
2411 case QEMU_IFLA_OPERSTATE
:
2412 case QEMU_IFLA_LINKMODE
:
2413 case QEMU_IFLA_CARRIER
:
2414 case QEMU_IFLA_PROTO_DOWN
:
2418 case QEMU_IFLA_LINK
:
2419 case QEMU_IFLA_WEIGHT
:
2420 case QEMU_IFLA_TXQLEN
:
2421 case QEMU_IFLA_CARRIER_CHANGES
:
2422 case QEMU_IFLA_NUM_RX_QUEUES
:
2423 case QEMU_IFLA_NUM_TX_QUEUES
:
2424 case QEMU_IFLA_PROMISCUITY
:
2425 case QEMU_IFLA_EXT_MASK
:
2426 case QEMU_IFLA_LINK_NETNSID
:
2427 case QEMU_IFLA_GROUP
:
2428 case QEMU_IFLA_MASTER
:
2429 case QEMU_IFLA_NUM_VF
:
2430 case QEMU_IFLA_GSO_MAX_SEGS
:
2431 case QEMU_IFLA_GSO_MAX_SIZE
:
2432 u32
= RTA_DATA(rtattr
);
2433 *u32
= tswap32(*u32
);
2435 /* struct rtnl_link_stats */
2436 case QEMU_IFLA_STATS
:
2437 st
= RTA_DATA(rtattr
);
2438 st
->rx_packets
= tswap32(st
->rx_packets
);
2439 st
->tx_packets
= tswap32(st
->tx_packets
);
2440 st
->rx_bytes
= tswap32(st
->rx_bytes
);
2441 st
->tx_bytes
= tswap32(st
->tx_bytes
);
2442 st
->rx_errors
= tswap32(st
->rx_errors
);
2443 st
->tx_errors
= tswap32(st
->tx_errors
);
2444 st
->rx_dropped
= tswap32(st
->rx_dropped
);
2445 st
->tx_dropped
= tswap32(st
->tx_dropped
);
2446 st
->multicast
= tswap32(st
->multicast
);
2447 st
->collisions
= tswap32(st
->collisions
);
2449 /* detailed rx_errors: */
2450 st
->rx_length_errors
= tswap32(st
->rx_length_errors
);
2451 st
->rx_over_errors
= tswap32(st
->rx_over_errors
);
2452 st
->rx_crc_errors
= tswap32(st
->rx_crc_errors
);
2453 st
->rx_frame_errors
= tswap32(st
->rx_frame_errors
);
2454 st
->rx_fifo_errors
= tswap32(st
->rx_fifo_errors
);
2455 st
->rx_missed_errors
= tswap32(st
->rx_missed_errors
);
2457 /* detailed tx_errors */
2458 st
->tx_aborted_errors
= tswap32(st
->tx_aborted_errors
);
2459 st
->tx_carrier_errors
= tswap32(st
->tx_carrier_errors
);
2460 st
->tx_fifo_errors
= tswap32(st
->tx_fifo_errors
);
2461 st
->tx_heartbeat_errors
= tswap32(st
->tx_heartbeat_errors
);
2462 st
->tx_window_errors
= tswap32(st
->tx_window_errors
);
2465 st
->rx_compressed
= tswap32(st
->rx_compressed
);
2466 st
->tx_compressed
= tswap32(st
->tx_compressed
);
2468 /* struct rtnl_link_stats64 */
2469 case QEMU_IFLA_STATS64
:
2470 st64
= RTA_DATA(rtattr
);
2471 st64
->rx_packets
= tswap64(st64
->rx_packets
);
2472 st64
->tx_packets
= tswap64(st64
->tx_packets
);
2473 st64
->rx_bytes
= tswap64(st64
->rx_bytes
);
2474 st64
->tx_bytes
= tswap64(st64
->tx_bytes
);
2475 st64
->rx_errors
= tswap64(st64
->rx_errors
);
2476 st64
->tx_errors
= tswap64(st64
->tx_errors
);
2477 st64
->rx_dropped
= tswap64(st64
->rx_dropped
);
2478 st64
->tx_dropped
= tswap64(st64
->tx_dropped
);
2479 st64
->multicast
= tswap64(st64
->multicast
);
2480 st64
->collisions
= tswap64(st64
->collisions
);
2482 /* detailed rx_errors: */
2483 st64
->rx_length_errors
= tswap64(st64
->rx_length_errors
);
2484 st64
->rx_over_errors
= tswap64(st64
->rx_over_errors
);
2485 st64
->rx_crc_errors
= tswap64(st64
->rx_crc_errors
);
2486 st64
->rx_frame_errors
= tswap64(st64
->rx_frame_errors
);
2487 st64
->rx_fifo_errors
= tswap64(st64
->rx_fifo_errors
);
2488 st64
->rx_missed_errors
= tswap64(st64
->rx_missed_errors
);
2490 /* detailed tx_errors */
2491 st64
->tx_aborted_errors
= tswap64(st64
->tx_aborted_errors
);
2492 st64
->tx_carrier_errors
= tswap64(st64
->tx_carrier_errors
);
2493 st64
->tx_fifo_errors
= tswap64(st64
->tx_fifo_errors
);
2494 st64
->tx_heartbeat_errors
= tswap64(st64
->tx_heartbeat_errors
);
2495 st64
->tx_window_errors
= tswap64(st64
->tx_window_errors
);
2498 st64
->rx_compressed
= tswap64(st64
->rx_compressed
);
2499 st64
->tx_compressed
= tswap64(st64
->tx_compressed
);
2501 /* struct rtnl_link_ifmap */
2503 map
= RTA_DATA(rtattr
);
2504 map
->mem_start
= tswap64(map
->mem_start
);
2505 map
->mem_end
= tswap64(map
->mem_end
);
2506 map
->base_addr
= tswap64(map
->base_addr
);
2507 map
->irq
= tswap16(map
->irq
);
2510 case QEMU_IFLA_LINKINFO
:
2511 memset(&li_context
, 0, sizeof(li_context
));
2512 return host_to_target_for_each_nlattr(RTA_DATA(rtattr
), rtattr
->rta_len
,
2514 host_to_target_data_linkinfo_nlattr
);
2515 case QEMU_IFLA_AF_SPEC
:
2516 return host_to_target_for_each_nlattr(RTA_DATA(rtattr
), rtattr
->rta_len
,
2518 host_to_target_data_spec_nlattr
);
2520 gemu_log("Unknown host QEMU_IFLA type: %d\n", rtattr
->rta_type
);
2526 static abi_long
host_to_target_data_addr_rtattr(struct rtattr
*rtattr
)
2529 struct ifa_cacheinfo
*ci
;
2531 switch (rtattr
->rta_type
) {
2532 /* binary: depends on family type */
2542 u32
= RTA_DATA(rtattr
);
2543 *u32
= tswap32(*u32
);
2545 /* struct ifa_cacheinfo */
2547 ci
= RTA_DATA(rtattr
);
2548 ci
->ifa_prefered
= tswap32(ci
->ifa_prefered
);
2549 ci
->ifa_valid
= tswap32(ci
->ifa_valid
);
2550 ci
->cstamp
= tswap32(ci
->cstamp
);
2551 ci
->tstamp
= tswap32(ci
->tstamp
);
2554 gemu_log("Unknown host IFA type: %d\n", rtattr
->rta_type
);
2560 static abi_long
host_to_target_data_route_rtattr(struct rtattr
*rtattr
)
2563 switch (rtattr
->rta_type
) {
2564 /* binary: depends on family type */
2573 u32
= RTA_DATA(rtattr
);
2574 *u32
= tswap32(*u32
);
2577 gemu_log("Unknown host RTA type: %d\n", rtattr
->rta_type
);
2583 static abi_long
host_to_target_link_rtattr(struct rtattr
*rtattr
,
2584 uint32_t rtattr_len
)
2586 return host_to_target_for_each_rtattr(rtattr
, rtattr_len
,
2587 host_to_target_data_link_rtattr
);
2590 static abi_long
host_to_target_addr_rtattr(struct rtattr
*rtattr
,
2591 uint32_t rtattr_len
)
2593 return host_to_target_for_each_rtattr(rtattr
, rtattr_len
,
2594 host_to_target_data_addr_rtattr
);
2597 static abi_long
host_to_target_route_rtattr(struct rtattr
*rtattr
,
2598 uint32_t rtattr_len
)
2600 return host_to_target_for_each_rtattr(rtattr
, rtattr_len
,
2601 host_to_target_data_route_rtattr
);
2604 static abi_long
host_to_target_data_route(struct nlmsghdr
*nlh
)
2607 struct ifinfomsg
*ifi
;
2608 struct ifaddrmsg
*ifa
;
2611 nlmsg_len
= nlh
->nlmsg_len
;
2612 switch (nlh
->nlmsg_type
) {
2616 if (nlh
->nlmsg_len
>= NLMSG_LENGTH(sizeof(*ifi
))) {
2617 ifi
= NLMSG_DATA(nlh
);
2618 ifi
->ifi_type
= tswap16(ifi
->ifi_type
);
2619 ifi
->ifi_index
= tswap32(ifi
->ifi_index
);
2620 ifi
->ifi_flags
= tswap32(ifi
->ifi_flags
);
2621 ifi
->ifi_change
= tswap32(ifi
->ifi_change
);
2622 host_to_target_link_rtattr(IFLA_RTA(ifi
),
2623 nlmsg_len
- NLMSG_LENGTH(sizeof(*ifi
)));
2629 if (nlh
->nlmsg_len
>= NLMSG_LENGTH(sizeof(*ifa
))) {
2630 ifa
= NLMSG_DATA(nlh
);
2631 ifa
->ifa_index
= tswap32(ifa
->ifa_index
);
2632 host_to_target_addr_rtattr(IFA_RTA(ifa
),
2633 nlmsg_len
- NLMSG_LENGTH(sizeof(*ifa
)));
2639 if (nlh
->nlmsg_len
>= NLMSG_LENGTH(sizeof(*rtm
))) {
2640 rtm
= NLMSG_DATA(nlh
);
2641 rtm
->rtm_flags
= tswap32(rtm
->rtm_flags
);
2642 host_to_target_route_rtattr(RTM_RTA(rtm
),
2643 nlmsg_len
- NLMSG_LENGTH(sizeof(*rtm
)));
2647 return -TARGET_EINVAL
;
2652 static inline abi_long
host_to_target_nlmsg_route(struct nlmsghdr
*nlh
,
2655 return host_to_target_for_each_nlmsg(nlh
, len
, host_to_target_data_route
);
2658 static abi_long
target_to_host_for_each_rtattr(struct rtattr
*rtattr
,
2660 abi_long (*target_to_host_rtattr
)
2665 while (len
>= sizeof(struct rtattr
)) {
2666 if (tswap16(rtattr
->rta_len
) < sizeof(struct rtattr
) ||
2667 tswap16(rtattr
->rta_len
) > len
) {
2670 rtattr
->rta_len
= tswap16(rtattr
->rta_len
);
2671 rtattr
->rta_type
= tswap16(rtattr
->rta_type
);
2672 ret
= target_to_host_rtattr(rtattr
);
2676 len
-= RTA_ALIGN(rtattr
->rta_len
);
2677 rtattr
= (struct rtattr
*)(((char *)rtattr
) +
2678 RTA_ALIGN(rtattr
->rta_len
));
2683 static abi_long
target_to_host_data_link_rtattr(struct rtattr
*rtattr
)
2685 switch (rtattr
->rta_type
) {
2687 gemu_log("Unknown target QEMU_IFLA type: %d\n", rtattr
->rta_type
);
2693 static abi_long
target_to_host_data_addr_rtattr(struct rtattr
*rtattr
)
2695 switch (rtattr
->rta_type
) {
2696 /* binary: depends on family type */
2701 gemu_log("Unknown target IFA type: %d\n", rtattr
->rta_type
);
2707 static abi_long
target_to_host_data_route_rtattr(struct rtattr
*rtattr
)
2710 switch (rtattr
->rta_type
) {
2711 /* binary: depends on family type */
2719 u32
= RTA_DATA(rtattr
);
2720 *u32
= tswap32(*u32
);
2723 gemu_log("Unknown target RTA type: %d\n", rtattr
->rta_type
);
2729 static void target_to_host_link_rtattr(struct rtattr
*rtattr
,
2730 uint32_t rtattr_len
)
2732 target_to_host_for_each_rtattr(rtattr
, rtattr_len
,
2733 target_to_host_data_link_rtattr
);
2736 static void target_to_host_addr_rtattr(struct rtattr
*rtattr
,
2737 uint32_t rtattr_len
)
2739 target_to_host_for_each_rtattr(rtattr
, rtattr_len
,
2740 target_to_host_data_addr_rtattr
);
2743 static void target_to_host_route_rtattr(struct rtattr
*rtattr
,
2744 uint32_t rtattr_len
)
2746 target_to_host_for_each_rtattr(rtattr
, rtattr_len
,
2747 target_to_host_data_route_rtattr
);
2750 static abi_long
target_to_host_data_route(struct nlmsghdr
*nlh
)
2752 struct ifinfomsg
*ifi
;
2753 struct ifaddrmsg
*ifa
;
2756 switch (nlh
->nlmsg_type
) {
2761 if (nlh
->nlmsg_len
>= NLMSG_LENGTH(sizeof(*ifi
))) {
2762 ifi
= NLMSG_DATA(nlh
);
2763 ifi
->ifi_type
= tswap16(ifi
->ifi_type
);
2764 ifi
->ifi_index
= tswap32(ifi
->ifi_index
);
2765 ifi
->ifi_flags
= tswap32(ifi
->ifi_flags
);
2766 ifi
->ifi_change
= tswap32(ifi
->ifi_change
);
2767 target_to_host_link_rtattr(IFLA_RTA(ifi
), nlh
->nlmsg_len
-
2768 NLMSG_LENGTH(sizeof(*ifi
)));
2774 if (nlh
->nlmsg_len
>= NLMSG_LENGTH(sizeof(*ifa
))) {
2775 ifa
= NLMSG_DATA(nlh
);
2776 ifa
->ifa_index
= tswap32(ifa
->ifa_index
);
2777 target_to_host_addr_rtattr(IFA_RTA(ifa
), nlh
->nlmsg_len
-
2778 NLMSG_LENGTH(sizeof(*ifa
)));
2785 if (nlh
->nlmsg_len
>= NLMSG_LENGTH(sizeof(*rtm
))) {
2786 rtm
= NLMSG_DATA(nlh
);
2787 rtm
->rtm_flags
= tswap32(rtm
->rtm_flags
);
2788 target_to_host_route_rtattr(RTM_RTA(rtm
), nlh
->nlmsg_len
-
2789 NLMSG_LENGTH(sizeof(*rtm
)));
2793 return -TARGET_EOPNOTSUPP
;
2798 static abi_long
target_to_host_nlmsg_route(struct nlmsghdr
*nlh
, size_t len
)
2800 return target_to_host_for_each_nlmsg(nlh
, len
, target_to_host_data_route
);
2802 #endif /* CONFIG_RTNETLINK */
2804 static abi_long
host_to_target_data_audit(struct nlmsghdr
*nlh
)
2806 switch (nlh
->nlmsg_type
) {
2808 gemu_log("Unknown host audit message type %d\n",
2810 return -TARGET_EINVAL
;
2815 static inline abi_long
host_to_target_nlmsg_audit(struct nlmsghdr
*nlh
,
2818 return host_to_target_for_each_nlmsg(nlh
, len
, host_to_target_data_audit
);
2821 static abi_long
target_to_host_data_audit(struct nlmsghdr
*nlh
)
2823 switch (nlh
->nlmsg_type
) {
2825 case AUDIT_FIRST_USER_MSG
... AUDIT_LAST_USER_MSG
:
2826 case AUDIT_FIRST_USER_MSG2
... AUDIT_LAST_USER_MSG2
:
2829 gemu_log("Unknown target audit message type %d\n",
2831 return -TARGET_EINVAL
;
2837 static abi_long
target_to_host_nlmsg_audit(struct nlmsghdr
*nlh
, size_t len
)
2839 return target_to_host_for_each_nlmsg(nlh
, len
, target_to_host_data_audit
);
2842 /* do_setsockopt() Must return target values and target errnos. */
2843 static abi_long
do_setsockopt(int sockfd
, int level
, int optname
,
2844 abi_ulong optval_addr
, socklen_t optlen
)
2848 struct ip_mreqn
*ip_mreq
;
2849 struct ip_mreq_source
*ip_mreq_source
;
2853 /* TCP options all take an 'int' value. */
2854 if (optlen
< sizeof(uint32_t))
2855 return -TARGET_EINVAL
;
2857 if (get_user_u32(val
, optval_addr
))
2858 return -TARGET_EFAULT
;
2859 ret
= get_errno(setsockopt(sockfd
, level
, optname
, &val
, sizeof(val
)));
2866 case IP_ROUTER_ALERT
:
2870 case IP_MTU_DISCOVER
:
2877 case IP_MULTICAST_TTL
:
2878 case IP_MULTICAST_LOOP
:
2880 if (optlen
>= sizeof(uint32_t)) {
2881 if (get_user_u32(val
, optval_addr
))
2882 return -TARGET_EFAULT
;
2883 } else if (optlen
>= 1) {
2884 if (get_user_u8(val
, optval_addr
))
2885 return -TARGET_EFAULT
;
2887 ret
= get_errno(setsockopt(sockfd
, level
, optname
, &val
, sizeof(val
)));
2889 case IP_ADD_MEMBERSHIP
:
2890 case IP_DROP_MEMBERSHIP
:
2891 if (optlen
< sizeof (struct target_ip_mreq
) ||
2892 optlen
> sizeof (struct target_ip_mreqn
))
2893 return -TARGET_EINVAL
;
2895 ip_mreq
= (struct ip_mreqn
*) alloca(optlen
);
2896 target_to_host_ip_mreq(ip_mreq
, optval_addr
, optlen
);
2897 ret
= get_errno(setsockopt(sockfd
, level
, optname
, ip_mreq
, optlen
));
2900 case IP_BLOCK_SOURCE
:
2901 case IP_UNBLOCK_SOURCE
:
2902 case IP_ADD_SOURCE_MEMBERSHIP
:
2903 case IP_DROP_SOURCE_MEMBERSHIP
:
2904 if (optlen
!= sizeof (struct target_ip_mreq_source
))
2905 return -TARGET_EINVAL
;
2907 ip_mreq_source
= lock_user(VERIFY_READ
, optval_addr
, optlen
, 1);
2908 ret
= get_errno(setsockopt(sockfd
, level
, optname
, ip_mreq_source
, optlen
));
2909 unlock_user (ip_mreq_source
, optval_addr
, 0);
2918 case IPV6_MTU_DISCOVER
:
2921 case IPV6_RECVPKTINFO
:
2922 case IPV6_UNICAST_HOPS
:
2924 case IPV6_RECVHOPLIMIT
:
2925 case IPV6_2292HOPLIMIT
:
2928 if (optlen
< sizeof(uint32_t)) {
2929 return -TARGET_EINVAL
;
2931 if (get_user_u32(val
, optval_addr
)) {
2932 return -TARGET_EFAULT
;
2934 ret
= get_errno(setsockopt(sockfd
, level
, optname
,
2935 &val
, sizeof(val
)));
2939 struct in6_pktinfo pki
;
2941 if (optlen
< sizeof(pki
)) {
2942 return -TARGET_EINVAL
;
2945 if (copy_from_user(&pki
, optval_addr
, sizeof(pki
))) {
2946 return -TARGET_EFAULT
;
2949 pki
.ipi6_ifindex
= tswap32(pki
.ipi6_ifindex
);
2951 ret
= get_errno(setsockopt(sockfd
, level
, optname
,
2952 &pki
, sizeof(pki
)));
2963 struct icmp6_filter icmp6f
;
2965 if (optlen
> sizeof(icmp6f
)) {
2966 optlen
= sizeof(icmp6f
);
2969 if (copy_from_user(&icmp6f
, optval_addr
, optlen
)) {
2970 return -TARGET_EFAULT
;
2973 for (val
= 0; val
< 8; val
++) {
2974 icmp6f
.data
[val
] = tswap32(icmp6f
.data
[val
]);
2977 ret
= get_errno(setsockopt(sockfd
, level
, optname
,
2989 /* those take an u32 value */
2990 if (optlen
< sizeof(uint32_t)) {
2991 return -TARGET_EINVAL
;
2994 if (get_user_u32(val
, optval_addr
)) {
2995 return -TARGET_EFAULT
;
2997 ret
= get_errno(setsockopt(sockfd
, level
, optname
,
2998 &val
, sizeof(val
)));
3005 case TARGET_SOL_SOCKET
:
3007 case TARGET_SO_RCVTIMEO
:
3011 optname
= SO_RCVTIMEO
;
3014 if (optlen
!= sizeof(struct target_timeval
)) {
3015 return -TARGET_EINVAL
;
3018 if (copy_from_user_timeval(&tv
, optval_addr
)) {
3019 return -TARGET_EFAULT
;
3022 ret
= get_errno(setsockopt(sockfd
, SOL_SOCKET
, optname
,
3026 case TARGET_SO_SNDTIMEO
:
3027 optname
= SO_SNDTIMEO
;
3029 case TARGET_SO_ATTACH_FILTER
:
3031 struct target_sock_fprog
*tfprog
;
3032 struct target_sock_filter
*tfilter
;
3033 struct sock_fprog fprog
;
3034 struct sock_filter
*filter
;
3037 if (optlen
!= sizeof(*tfprog
)) {
3038 return -TARGET_EINVAL
;
3040 if (!lock_user_struct(VERIFY_READ
, tfprog
, optval_addr
, 0)) {
3041 return -TARGET_EFAULT
;
3043 if (!lock_user_struct(VERIFY_READ
, tfilter
,
3044 tswapal(tfprog
->filter
), 0)) {
3045 unlock_user_struct(tfprog
, optval_addr
, 1);
3046 return -TARGET_EFAULT
;
3049 fprog
.len
= tswap16(tfprog
->len
);
3050 filter
= g_try_new(struct sock_filter
, fprog
.len
);
3051 if (filter
== NULL
) {
3052 unlock_user_struct(tfilter
, tfprog
->filter
, 1);
3053 unlock_user_struct(tfprog
, optval_addr
, 1);
3054 return -TARGET_ENOMEM
;
3056 for (i
= 0; i
< fprog
.len
; i
++) {
3057 filter
[i
].code
= tswap16(tfilter
[i
].code
);
3058 filter
[i
].jt
= tfilter
[i
].jt
;
3059 filter
[i
].jf
= tfilter
[i
].jf
;
3060 filter
[i
].k
= tswap32(tfilter
[i
].k
);
3062 fprog
.filter
= filter
;
3064 ret
= get_errno(setsockopt(sockfd
, SOL_SOCKET
,
3065 SO_ATTACH_FILTER
, &fprog
, sizeof(fprog
)));
3068 unlock_user_struct(tfilter
, tfprog
->filter
, 1);
3069 unlock_user_struct(tfprog
, optval_addr
, 1);
3072 case TARGET_SO_BINDTODEVICE
:
3074 char *dev_ifname
, *addr_ifname
;
3076 if (optlen
> IFNAMSIZ
- 1) {
3077 optlen
= IFNAMSIZ
- 1;
3079 dev_ifname
= lock_user(VERIFY_READ
, optval_addr
, optlen
, 1);
3081 return -TARGET_EFAULT
;
3083 optname
= SO_BINDTODEVICE
;
3084 addr_ifname
= alloca(IFNAMSIZ
);
3085 memcpy(addr_ifname
, dev_ifname
, optlen
);
3086 addr_ifname
[optlen
] = 0;
3087 ret
= get_errno(setsockopt(sockfd
, SOL_SOCKET
, optname
,
3088 addr_ifname
, optlen
));
3089 unlock_user (dev_ifname
, optval_addr
, 0);
3092 /* Options with 'int' argument. */
3093 case TARGET_SO_DEBUG
:
3096 case TARGET_SO_REUSEADDR
:
3097 optname
= SO_REUSEADDR
;
3099 case TARGET_SO_TYPE
:
3102 case TARGET_SO_ERROR
:
3105 case TARGET_SO_DONTROUTE
:
3106 optname
= SO_DONTROUTE
;
3108 case TARGET_SO_BROADCAST
:
3109 optname
= SO_BROADCAST
;
3111 case TARGET_SO_SNDBUF
:
3112 optname
= SO_SNDBUF
;
3114 case TARGET_SO_SNDBUFFORCE
:
3115 optname
= SO_SNDBUFFORCE
;
3117 case TARGET_SO_RCVBUF
:
3118 optname
= SO_RCVBUF
;
3120 case TARGET_SO_RCVBUFFORCE
:
3121 optname
= SO_RCVBUFFORCE
;
3123 case TARGET_SO_KEEPALIVE
:
3124 optname
= SO_KEEPALIVE
;
3126 case TARGET_SO_OOBINLINE
:
3127 optname
= SO_OOBINLINE
;
3129 case TARGET_SO_NO_CHECK
:
3130 optname
= SO_NO_CHECK
;
3132 case TARGET_SO_PRIORITY
:
3133 optname
= SO_PRIORITY
;
3136 case TARGET_SO_BSDCOMPAT
:
3137 optname
= SO_BSDCOMPAT
;
3140 case TARGET_SO_PASSCRED
:
3141 optname
= SO_PASSCRED
;
3143 case TARGET_SO_PASSSEC
:
3144 optname
= SO_PASSSEC
;
3146 case TARGET_SO_TIMESTAMP
:
3147 optname
= SO_TIMESTAMP
;
3149 case TARGET_SO_RCVLOWAT
:
3150 optname
= SO_RCVLOWAT
;
3155 if (optlen
< sizeof(uint32_t))
3156 return -TARGET_EINVAL
;
3158 if (get_user_u32(val
, optval_addr
))
3159 return -TARGET_EFAULT
;
3160 ret
= get_errno(setsockopt(sockfd
, SOL_SOCKET
, optname
, &val
, sizeof(val
)));
3164 gemu_log("Unsupported setsockopt level=%d optname=%d\n", level
, optname
);
3165 ret
= -TARGET_ENOPROTOOPT
;
3170 /* do_getsockopt() Must return target values and target errnos. */
3171 static abi_long
do_getsockopt(int sockfd
, int level
, int optname
,
3172 abi_ulong optval_addr
, abi_ulong optlen
)
3179 case TARGET_SOL_SOCKET
:
3182 /* These don't just return a single integer */
3183 case TARGET_SO_LINGER
:
3184 case TARGET_SO_RCVTIMEO
:
3185 case TARGET_SO_SNDTIMEO
:
3186 case TARGET_SO_PEERNAME
:
3188 case TARGET_SO_PEERCRED
: {
3191 struct target_ucred
*tcr
;
3193 if (get_user_u32(len
, optlen
)) {
3194 return -TARGET_EFAULT
;
3197 return -TARGET_EINVAL
;
3201 ret
= get_errno(getsockopt(sockfd
, level
, SO_PEERCRED
,
3209 if (!lock_user_struct(VERIFY_WRITE
, tcr
, optval_addr
, 0)) {
3210 return -TARGET_EFAULT
;
3212 __put_user(cr
.pid
, &tcr
->pid
);
3213 __put_user(cr
.uid
, &tcr
->uid
);
3214 __put_user(cr
.gid
, &tcr
->gid
);
3215 unlock_user_struct(tcr
, optval_addr
, 1);
3216 if (put_user_u32(len
, optlen
)) {
3217 return -TARGET_EFAULT
;
3221 /* Options with 'int' argument. */
3222 case TARGET_SO_DEBUG
:
3225 case TARGET_SO_REUSEADDR
:
3226 optname
= SO_REUSEADDR
;
3228 case TARGET_SO_TYPE
:
3231 case TARGET_SO_ERROR
:
3234 case TARGET_SO_DONTROUTE
:
3235 optname
= SO_DONTROUTE
;
3237 case TARGET_SO_BROADCAST
:
3238 optname
= SO_BROADCAST
;
3240 case TARGET_SO_SNDBUF
:
3241 optname
= SO_SNDBUF
;
3243 case TARGET_SO_RCVBUF
:
3244 optname
= SO_RCVBUF
;
3246 case TARGET_SO_KEEPALIVE
:
3247 optname
= SO_KEEPALIVE
;
3249 case TARGET_SO_OOBINLINE
:
3250 optname
= SO_OOBINLINE
;
3252 case TARGET_SO_NO_CHECK
:
3253 optname
= SO_NO_CHECK
;
3255 case TARGET_SO_PRIORITY
:
3256 optname
= SO_PRIORITY
;
3259 case TARGET_SO_BSDCOMPAT
:
3260 optname
= SO_BSDCOMPAT
;
3263 case TARGET_SO_PASSCRED
:
3264 optname
= SO_PASSCRED
;
3266 case TARGET_SO_TIMESTAMP
:
3267 optname
= SO_TIMESTAMP
;
3269 case TARGET_SO_RCVLOWAT
:
3270 optname
= SO_RCVLOWAT
;
3272 case TARGET_SO_ACCEPTCONN
:
3273 optname
= SO_ACCEPTCONN
;
3280 /* TCP options all take an 'int' value. */
3282 if (get_user_u32(len
, optlen
))
3283 return -TARGET_EFAULT
;
3285 return -TARGET_EINVAL
;
3287 ret
= get_errno(getsockopt(sockfd
, level
, optname
, &val
, &lv
));
3290 if (optname
== SO_TYPE
) {
3291 val
= host_to_target_sock_type(val
);
3296 if (put_user_u32(val
, optval_addr
))
3297 return -TARGET_EFAULT
;
3299 if (put_user_u8(val
, optval_addr
))
3300 return -TARGET_EFAULT
;
3302 if (put_user_u32(len
, optlen
))
3303 return -TARGET_EFAULT
;
3310 case IP_ROUTER_ALERT
:
3314 case IP_MTU_DISCOVER
:
3320 case IP_MULTICAST_TTL
:
3321 case IP_MULTICAST_LOOP
:
3322 if (get_user_u32(len
, optlen
))
3323 return -TARGET_EFAULT
;
3325 return -TARGET_EINVAL
;
3327 ret
= get_errno(getsockopt(sockfd
, level
, optname
, &val
, &lv
));
3330 if (len
< sizeof(int) && len
> 0 && val
>= 0 && val
< 255) {
3332 if (put_user_u32(len
, optlen
)
3333 || put_user_u8(val
, optval_addr
))
3334 return -TARGET_EFAULT
;
3336 if (len
> sizeof(int))
3338 if (put_user_u32(len
, optlen
)
3339 || put_user_u32(val
, optval_addr
))
3340 return -TARGET_EFAULT
;
3344 ret
= -TARGET_ENOPROTOOPT
;
3350 gemu_log("getsockopt level=%d optname=%d not yet supported\n",
3352 ret
= -TARGET_EOPNOTSUPP
;
3358 static struct iovec
*lock_iovec(int type
, abi_ulong target_addr
,
3359 abi_ulong count
, int copy
)
3361 struct target_iovec
*target_vec
;
3363 abi_ulong total_len
, max_len
;
3366 bool bad_address
= false;
3372 if (count
> IOV_MAX
) {
3377 vec
= g_try_new0(struct iovec
, count
);
3383 target_vec
= lock_user(VERIFY_READ
, target_addr
,
3384 count
* sizeof(struct target_iovec
), 1);
3385 if (target_vec
== NULL
) {
3390 /* ??? If host page size > target page size, this will result in a
3391 value larger than what we can actually support. */
3392 max_len
= 0x7fffffff & TARGET_PAGE_MASK
;
3395 for (i
= 0; i
< count
; i
++) {
3396 abi_ulong base
= tswapal(target_vec
[i
].iov_base
);
3397 abi_long len
= tswapal(target_vec
[i
].iov_len
);
3402 } else if (len
== 0) {
3403 /* Zero length pointer is ignored. */
3404 vec
[i
].iov_base
= 0;
3406 vec
[i
].iov_base
= lock_user(type
, base
, len
, copy
);
3407 /* If the first buffer pointer is bad, this is a fault. But
3408 * subsequent bad buffers will result in a partial write; this
3409 * is realized by filling the vector with null pointers and
3411 if (!vec
[i
].iov_base
) {
3422 if (len
> max_len
- total_len
) {
3423 len
= max_len
- total_len
;
3426 vec
[i
].iov_len
= len
;
3430 unlock_user(target_vec
, target_addr
, 0);
3435 if (tswapal(target_vec
[i
].iov_len
) > 0) {
3436 unlock_user(vec
[i
].iov_base
, tswapal(target_vec
[i
].iov_base
), 0);
3439 unlock_user(target_vec
, target_addr
, 0);
3446 static void unlock_iovec(struct iovec
*vec
, abi_ulong target_addr
,
3447 abi_ulong count
, int copy
)
3449 struct target_iovec
*target_vec
;
3452 target_vec
= lock_user(VERIFY_READ
, target_addr
,
3453 count
* sizeof(struct target_iovec
), 1);
3455 for (i
= 0; i
< count
; i
++) {
3456 abi_ulong base
= tswapal(target_vec
[i
].iov_base
);
3457 abi_long len
= tswapal(target_vec
[i
].iov_len
);
3461 unlock_user(vec
[i
].iov_base
, base
, copy
? vec
[i
].iov_len
: 0);
3463 unlock_user(target_vec
, target_addr
, 0);
3469 static inline int target_to_host_sock_type(int *type
)
3472 int target_type
= *type
;
3474 switch (target_type
& TARGET_SOCK_TYPE_MASK
) {
3475 case TARGET_SOCK_DGRAM
:
3476 host_type
= SOCK_DGRAM
;
3478 case TARGET_SOCK_STREAM
:
3479 host_type
= SOCK_STREAM
;
3482 host_type
= target_type
& TARGET_SOCK_TYPE_MASK
;
3485 if (target_type
& TARGET_SOCK_CLOEXEC
) {
3486 #if defined(SOCK_CLOEXEC)
3487 host_type
|= SOCK_CLOEXEC
;
3489 return -TARGET_EINVAL
;
3492 if (target_type
& TARGET_SOCK_NONBLOCK
) {
3493 #if defined(SOCK_NONBLOCK)
3494 host_type
|= SOCK_NONBLOCK
;
3495 #elif !defined(O_NONBLOCK)
3496 return -TARGET_EINVAL
;
3503 /* Try to emulate socket type flags after socket creation. */
3504 static int sock_flags_fixup(int fd
, int target_type
)
3506 #if !defined(SOCK_NONBLOCK) && defined(O_NONBLOCK)
3507 if (target_type
& TARGET_SOCK_NONBLOCK
) {
3508 int flags
= fcntl(fd
, F_GETFL
);
3509 if (fcntl(fd
, F_SETFL
, O_NONBLOCK
| flags
) == -1) {
3511 return -TARGET_EINVAL
;
3518 static abi_long
packet_target_to_host_sockaddr(void *host_addr
,
3519 abi_ulong target_addr
,
3522 struct sockaddr
*addr
= host_addr
;
3523 struct target_sockaddr
*target_saddr
;
3525 target_saddr
= lock_user(VERIFY_READ
, target_addr
, len
, 1);
3526 if (!target_saddr
) {
3527 return -TARGET_EFAULT
;
3530 memcpy(addr
, target_saddr
, len
);
3531 addr
->sa_family
= tswap16(target_saddr
->sa_family
);
3532 /* spkt_protocol is big-endian */
3534 unlock_user(target_saddr
, target_addr
, 0);
3538 static TargetFdTrans target_packet_trans
= {
3539 .target_to_host_addr
= packet_target_to_host_sockaddr
,
3542 #ifdef CONFIG_RTNETLINK
3543 static abi_long
netlink_route_target_to_host(void *buf
, size_t len
)
3547 ret
= target_to_host_nlmsg_route(buf
, len
);
3555 static abi_long
netlink_route_host_to_target(void *buf
, size_t len
)
3559 ret
= host_to_target_nlmsg_route(buf
, len
);
3567 static TargetFdTrans target_netlink_route_trans
= {
3568 .target_to_host_data
= netlink_route_target_to_host
,
3569 .host_to_target_data
= netlink_route_host_to_target
,
3571 #endif /* CONFIG_RTNETLINK */
3573 static abi_long
netlink_audit_target_to_host(void *buf
, size_t len
)
3577 ret
= target_to_host_nlmsg_audit(buf
, len
);
3585 static abi_long
netlink_audit_host_to_target(void *buf
, size_t len
)
3589 ret
= host_to_target_nlmsg_audit(buf
, len
);
3597 static TargetFdTrans target_netlink_audit_trans
= {
3598 .target_to_host_data
= netlink_audit_target_to_host
,
3599 .host_to_target_data
= netlink_audit_host_to_target
,
3602 /* do_socket() Must return target values and target errnos. */
3603 static abi_long
do_socket(int domain
, int type
, int protocol
)
3605 int target_type
= type
;
3608 ret
= target_to_host_sock_type(&type
);
3613 if (domain
== PF_NETLINK
&& !(
3614 #ifdef CONFIG_RTNETLINK
3615 protocol
== NETLINK_ROUTE
||
3617 protocol
== NETLINK_KOBJECT_UEVENT
||
3618 protocol
== NETLINK_AUDIT
)) {
3619 return -EPFNOSUPPORT
;
3622 if (domain
== AF_PACKET
||
3623 (domain
== AF_INET
&& type
== SOCK_PACKET
)) {
3624 protocol
= tswap16(protocol
);
3627 ret
= get_errno(socket(domain
, type
, protocol
));
3629 ret
= sock_flags_fixup(ret
, target_type
);
3630 if (type
== SOCK_PACKET
) {
3631 /* Manage an obsolete case :
3632 * if socket type is SOCK_PACKET, bind by name
3634 fd_trans_register(ret
, &target_packet_trans
);
3635 } else if (domain
== PF_NETLINK
) {
3637 #ifdef CONFIG_RTNETLINK
3639 fd_trans_register(ret
, &target_netlink_route_trans
);
3642 case NETLINK_KOBJECT_UEVENT
:
3643 /* nothing to do: messages are strings */
3646 fd_trans_register(ret
, &target_netlink_audit_trans
);
3649 g_assert_not_reached();
3656 /* do_bind() Must return target values and target errnos. */
3657 static abi_long
do_bind(int sockfd
, abi_ulong target_addr
,
3663 if ((int)addrlen
< 0) {
3664 return -TARGET_EINVAL
;
3667 addr
= alloca(addrlen
+1);
3669 ret
= target_to_host_sockaddr(sockfd
, addr
, target_addr
, addrlen
);
3673 return get_errno(bind(sockfd
, addr
, addrlen
));
3676 /* do_connect() Must return target values and target errnos. */
3677 static abi_long
do_connect(int sockfd
, abi_ulong target_addr
,
3683 if ((int)addrlen
< 0) {
3684 return -TARGET_EINVAL
;
3687 addr
= alloca(addrlen
+1);
3689 ret
= target_to_host_sockaddr(sockfd
, addr
, target_addr
, addrlen
);
3693 return get_errno(safe_connect(sockfd
, addr
, addrlen
));
3696 /* do_sendrecvmsg_locked() Must return target values and target errnos. */
3697 static abi_long
do_sendrecvmsg_locked(int fd
, struct target_msghdr
*msgp
,
3698 int flags
, int send
)
3704 abi_ulong target_vec
;
3706 if (msgp
->msg_name
) {
3707 msg
.msg_namelen
= tswap32(msgp
->msg_namelen
);
3708 msg
.msg_name
= alloca(msg
.msg_namelen
+1);
3709 ret
= target_to_host_sockaddr(fd
, msg
.msg_name
,
3710 tswapal(msgp
->msg_name
),
3712 if (ret
== -TARGET_EFAULT
) {
3713 /* For connected sockets msg_name and msg_namelen must
3714 * be ignored, so returning EFAULT immediately is wrong.
3715 * Instead, pass a bad msg_name to the host kernel, and
3716 * let it decide whether to return EFAULT or not.
3718 msg
.msg_name
= (void *)-1;
3723 msg
.msg_name
= NULL
;
3724 msg
.msg_namelen
= 0;
3726 msg
.msg_controllen
= 2 * tswapal(msgp
->msg_controllen
);
3727 msg
.msg_control
= alloca(msg
.msg_controllen
);
3728 msg
.msg_flags
= tswap32(msgp
->msg_flags
);
3730 count
= tswapal(msgp
->msg_iovlen
);
3731 target_vec
= tswapal(msgp
->msg_iov
);
3733 if (count
> IOV_MAX
) {
3734 /* sendrcvmsg returns a different errno for this condition than
3735 * readv/writev, so we must catch it here before lock_iovec() does.
3737 ret
= -TARGET_EMSGSIZE
;
3741 vec
= lock_iovec(send
? VERIFY_READ
: VERIFY_WRITE
,
3742 target_vec
, count
, send
);
3744 ret
= -host_to_target_errno(errno
);
3747 msg
.msg_iovlen
= count
;
3751 if (fd_trans_target_to_host_data(fd
)) {
3754 host_msg
= g_malloc(msg
.msg_iov
->iov_len
);
3755 memcpy(host_msg
, msg
.msg_iov
->iov_base
, msg
.msg_iov
->iov_len
);
3756 ret
= fd_trans_target_to_host_data(fd
)(host_msg
,
3757 msg
.msg_iov
->iov_len
);
3759 msg
.msg_iov
->iov_base
= host_msg
;
3760 ret
= get_errno(safe_sendmsg(fd
, &msg
, flags
));
3764 ret
= target_to_host_cmsg(&msg
, msgp
);
3766 ret
= get_errno(safe_sendmsg(fd
, &msg
, flags
));
3770 ret
= get_errno(safe_recvmsg(fd
, &msg
, flags
));
3771 if (!is_error(ret
)) {
3773 if (fd_trans_host_to_target_data(fd
)) {
3774 ret
= fd_trans_host_to_target_data(fd
)(msg
.msg_iov
->iov_base
,
3777 ret
= host_to_target_cmsg(msgp
, &msg
);
3779 if (!is_error(ret
)) {
3780 msgp
->msg_namelen
= tswap32(msg
.msg_namelen
);
3781 if (msg
.msg_name
!= NULL
&& msg
.msg_name
!= (void *)-1) {
3782 ret
= host_to_target_sockaddr(tswapal(msgp
->msg_name
),
3783 msg
.msg_name
, msg
.msg_namelen
);
3795 unlock_iovec(vec
, target_vec
, count
, !send
);
3800 static abi_long
do_sendrecvmsg(int fd
, abi_ulong target_msg
,
3801 int flags
, int send
)
3804 struct target_msghdr
*msgp
;
3806 if (!lock_user_struct(send
? VERIFY_READ
: VERIFY_WRITE
,
3810 return -TARGET_EFAULT
;
3812 ret
= do_sendrecvmsg_locked(fd
, msgp
, flags
, send
);
3813 unlock_user_struct(msgp
, target_msg
, send
? 0 : 1);
3817 /* We don't rely on the C library to have sendmmsg/recvmmsg support,
3818 * so it might not have this *mmsg-specific flag either.
3820 #ifndef MSG_WAITFORONE
3821 #define MSG_WAITFORONE 0x10000
3824 static abi_long
do_sendrecvmmsg(int fd
, abi_ulong target_msgvec
,
3825 unsigned int vlen
, unsigned int flags
,
3828 struct target_mmsghdr
*mmsgp
;
3832 if (vlen
> UIO_MAXIOV
) {
3836 mmsgp
= lock_user(VERIFY_WRITE
, target_msgvec
, sizeof(*mmsgp
) * vlen
, 1);
3838 return -TARGET_EFAULT
;
3841 for (i
= 0; i
< vlen
; i
++) {
3842 ret
= do_sendrecvmsg_locked(fd
, &mmsgp
[i
].msg_hdr
, flags
, send
);
3843 if (is_error(ret
)) {
3846 mmsgp
[i
].msg_len
= tswap32(ret
);
3847 /* MSG_WAITFORONE turns on MSG_DONTWAIT after one packet */
3848 if (flags
& MSG_WAITFORONE
) {
3849 flags
|= MSG_DONTWAIT
;
3853 unlock_user(mmsgp
, target_msgvec
, sizeof(*mmsgp
) * i
);
3855 /* Return number of datagrams sent if we sent any at all;
3856 * otherwise return the error.
3864 /* do_accept4() Must return target values and target errnos. */
3865 static abi_long
do_accept4(int fd
, abi_ulong target_addr
,
3866 abi_ulong target_addrlen_addr
, int flags
)
3873 host_flags
= target_to_host_bitmask(flags
, fcntl_flags_tbl
);
3875 if (target_addr
== 0) {
3876 return get_errno(safe_accept4(fd
, NULL
, NULL
, host_flags
));
3879 /* linux returns EINVAL if addrlen pointer is invalid */
3880 if (get_user_u32(addrlen
, target_addrlen_addr
))
3881 return -TARGET_EINVAL
;
3883 if ((int)addrlen
< 0) {
3884 return -TARGET_EINVAL
;
3887 if (!access_ok(VERIFY_WRITE
, target_addr
, addrlen
))
3888 return -TARGET_EINVAL
;
3890 addr
= alloca(addrlen
);
3892 ret
= get_errno(safe_accept4(fd
, addr
, &addrlen
, host_flags
));
3893 if (!is_error(ret
)) {
3894 host_to_target_sockaddr(target_addr
, addr
, addrlen
);
3895 if (put_user_u32(addrlen
, target_addrlen_addr
))
3896 ret
= -TARGET_EFAULT
;
3901 /* do_getpeername() Must return target values and target errnos. */
3902 static abi_long
do_getpeername(int fd
, abi_ulong target_addr
,
3903 abi_ulong target_addrlen_addr
)
3909 if (get_user_u32(addrlen
, target_addrlen_addr
))
3910 return -TARGET_EFAULT
;
3912 if ((int)addrlen
< 0) {
3913 return -TARGET_EINVAL
;
3916 if (!access_ok(VERIFY_WRITE
, target_addr
, addrlen
))
3917 return -TARGET_EFAULT
;
3919 addr
= alloca(addrlen
);
3921 ret
= get_errno(getpeername(fd
, addr
, &addrlen
));
3922 if (!is_error(ret
)) {
3923 host_to_target_sockaddr(target_addr
, addr
, addrlen
);
3924 if (put_user_u32(addrlen
, target_addrlen_addr
))
3925 ret
= -TARGET_EFAULT
;
3930 /* do_getsockname() Must return target values and target errnos. */
3931 static abi_long
do_getsockname(int fd
, abi_ulong target_addr
,
3932 abi_ulong target_addrlen_addr
)
3938 if (get_user_u32(addrlen
, target_addrlen_addr
))
3939 return -TARGET_EFAULT
;
3941 if ((int)addrlen
< 0) {
3942 return -TARGET_EINVAL
;
3945 if (!access_ok(VERIFY_WRITE
, target_addr
, addrlen
))
3946 return -TARGET_EFAULT
;
3948 addr
= alloca(addrlen
);
3950 ret
= get_errno(getsockname(fd
, addr
, &addrlen
));
3951 if (!is_error(ret
)) {
3952 host_to_target_sockaddr(target_addr
, addr
, addrlen
);
3953 if (put_user_u32(addrlen
, target_addrlen_addr
))
3954 ret
= -TARGET_EFAULT
;
3959 /* do_socketpair() Must return target values and target errnos. */
3960 static abi_long
do_socketpair(int domain
, int type
, int protocol
,
3961 abi_ulong target_tab_addr
)
3966 target_to_host_sock_type(&type
);
3968 ret
= get_errno(socketpair(domain
, type
, protocol
, tab
));
3969 if (!is_error(ret
)) {
3970 if (put_user_s32(tab
[0], target_tab_addr
)
3971 || put_user_s32(tab
[1], target_tab_addr
+ sizeof(tab
[0])))
3972 ret
= -TARGET_EFAULT
;
3977 /* do_sendto() Must return target values and target errnos. */
3978 static abi_long
do_sendto(int fd
, abi_ulong msg
, size_t len
, int flags
,
3979 abi_ulong target_addr
, socklen_t addrlen
)
3983 void *copy_msg
= NULL
;
3986 if ((int)addrlen
< 0) {
3987 return -TARGET_EINVAL
;
3990 host_msg
= lock_user(VERIFY_READ
, msg
, len
, 1);
3992 return -TARGET_EFAULT
;
3993 if (fd_trans_target_to_host_data(fd
)) {
3994 copy_msg
= host_msg
;
3995 host_msg
= g_malloc(len
);
3996 memcpy(host_msg
, copy_msg
, len
);
3997 ret
= fd_trans_target_to_host_data(fd
)(host_msg
, len
);
4003 addr
= alloca(addrlen
+1);
4004 ret
= target_to_host_sockaddr(fd
, addr
, target_addr
, addrlen
);
4008 ret
= get_errno(safe_sendto(fd
, host_msg
, len
, flags
, addr
, addrlen
));
4010 ret
= get_errno(safe_sendto(fd
, host_msg
, len
, flags
, NULL
, 0));
4015 host_msg
= copy_msg
;
4017 unlock_user(host_msg
, msg
, 0);
4021 /* do_recvfrom() Must return target values and target errnos. */
4022 static abi_long
do_recvfrom(int fd
, abi_ulong msg
, size_t len
, int flags
,
4023 abi_ulong target_addr
,
4024 abi_ulong target_addrlen
)
4031 host_msg
= lock_user(VERIFY_WRITE
, msg
, len
, 0);
4033 return -TARGET_EFAULT
;
4035 if (get_user_u32(addrlen
, target_addrlen
)) {
4036 ret
= -TARGET_EFAULT
;
4039 if ((int)addrlen
< 0) {
4040 ret
= -TARGET_EINVAL
;
4043 addr
= alloca(addrlen
);
4044 ret
= get_errno(safe_recvfrom(fd
, host_msg
, len
, flags
,
4047 addr
= NULL
; /* To keep compiler quiet. */
4048 ret
= get_errno(safe_recvfrom(fd
, host_msg
, len
, flags
, NULL
, 0));
4050 if (!is_error(ret
)) {
4051 if (fd_trans_host_to_target_data(fd
)) {
4052 ret
= fd_trans_host_to_target_data(fd
)(host_msg
, ret
);
4055 host_to_target_sockaddr(target_addr
, addr
, addrlen
);
4056 if (put_user_u32(addrlen
, target_addrlen
)) {
4057 ret
= -TARGET_EFAULT
;
4061 unlock_user(host_msg
, msg
, len
);
4064 unlock_user(host_msg
, msg
, 0);
4069 #ifdef TARGET_NR_socketcall
4070 /* do_socketcall() must return target values and target errnos. */
4071 static abi_long
do_socketcall(int num
, abi_ulong vptr
)
4073 static const unsigned nargs
[] = { /* number of arguments per operation */
4074 [TARGET_SYS_SOCKET
] = 3, /* domain, type, protocol */
4075 [TARGET_SYS_BIND
] = 3, /* fd, addr, addrlen */
4076 [TARGET_SYS_CONNECT
] = 3, /* fd, addr, addrlen */
4077 [TARGET_SYS_LISTEN
] = 2, /* fd, backlog */
4078 [TARGET_SYS_ACCEPT
] = 3, /* fd, addr, addrlen */
4079 [TARGET_SYS_GETSOCKNAME
] = 3, /* fd, addr, addrlen */
4080 [TARGET_SYS_GETPEERNAME
] = 3, /* fd, addr, addrlen */
4081 [TARGET_SYS_SOCKETPAIR
] = 4, /* domain, type, protocol, tab */
4082 [TARGET_SYS_SEND
] = 4, /* fd, msg, len, flags */
4083 [TARGET_SYS_RECV
] = 4, /* fd, msg, len, flags */
4084 [TARGET_SYS_SENDTO
] = 6, /* fd, msg, len, flags, addr, addrlen */
4085 [TARGET_SYS_RECVFROM
] = 6, /* fd, msg, len, flags, addr, addrlen */
4086 [TARGET_SYS_SHUTDOWN
] = 2, /* fd, how */
4087 [TARGET_SYS_SETSOCKOPT
] = 5, /* fd, level, optname, optval, optlen */
4088 [TARGET_SYS_GETSOCKOPT
] = 5, /* fd, level, optname, optval, optlen */
4089 [TARGET_SYS_SENDMSG
] = 3, /* fd, msg, flags */
4090 [TARGET_SYS_RECVMSG
] = 3, /* fd, msg, flags */
4091 [TARGET_SYS_ACCEPT4
] = 4, /* fd, addr, addrlen, flags */
4092 [TARGET_SYS_RECVMMSG
] = 4, /* fd, msgvec, vlen, flags */
4093 [TARGET_SYS_SENDMMSG
] = 4, /* fd, msgvec, vlen, flags */
4095 abi_long a
[6]; /* max 6 args */
4098 /* check the range of the first argument num */
4099 /* (TARGET_SYS_SENDMMSG is the highest among TARGET_SYS_xxx) */
4100 if (num
< 1 || num
> TARGET_SYS_SENDMMSG
) {
4101 return -TARGET_EINVAL
;
4103 /* ensure we have space for args */
4104 if (nargs
[num
] > ARRAY_SIZE(a
)) {
4105 return -TARGET_EINVAL
;
4107 /* collect the arguments in a[] according to nargs[] */
4108 for (i
= 0; i
< nargs
[num
]; ++i
) {
4109 if (get_user_ual(a
[i
], vptr
+ i
* sizeof(abi_long
)) != 0) {
4110 return -TARGET_EFAULT
;
4113 /* now when we have the args, invoke the appropriate underlying function */
4115 case TARGET_SYS_SOCKET
: /* domain, type, protocol */
4116 return do_socket(a
[0], a
[1], a
[2]);
4117 case TARGET_SYS_BIND
: /* sockfd, addr, addrlen */
4118 return do_bind(a
[0], a
[1], a
[2]);
4119 case TARGET_SYS_CONNECT
: /* sockfd, addr, addrlen */
4120 return do_connect(a
[0], a
[1], a
[2]);
4121 case TARGET_SYS_LISTEN
: /* sockfd, backlog */
4122 return get_errno(listen(a
[0], a
[1]));
4123 case TARGET_SYS_ACCEPT
: /* sockfd, addr, addrlen */
4124 return do_accept4(a
[0], a
[1], a
[2], 0);
4125 case TARGET_SYS_GETSOCKNAME
: /* sockfd, addr, addrlen */
4126 return do_getsockname(a
[0], a
[1], a
[2]);
4127 case TARGET_SYS_GETPEERNAME
: /* sockfd, addr, addrlen */
4128 return do_getpeername(a
[0], a
[1], a
[2]);
4129 case TARGET_SYS_SOCKETPAIR
: /* domain, type, protocol, tab */
4130 return do_socketpair(a
[0], a
[1], a
[2], a
[3]);
4131 case TARGET_SYS_SEND
: /* sockfd, msg, len, flags */
4132 return do_sendto(a
[0], a
[1], a
[2], a
[3], 0, 0);
4133 case TARGET_SYS_RECV
: /* sockfd, msg, len, flags */
4134 return do_recvfrom(a
[0], a
[1], a
[2], a
[3], 0, 0);
4135 case TARGET_SYS_SENDTO
: /* sockfd, msg, len, flags, addr, addrlen */
4136 return do_sendto(a
[0], a
[1], a
[2], a
[3], a
[4], a
[5]);
4137 case TARGET_SYS_RECVFROM
: /* sockfd, msg, len, flags, addr, addrlen */
4138 return do_recvfrom(a
[0], a
[1], a
[2], a
[3], a
[4], a
[5]);
4139 case TARGET_SYS_SHUTDOWN
: /* sockfd, how */
4140 return get_errno(shutdown(a
[0], a
[1]));
4141 case TARGET_SYS_SETSOCKOPT
: /* sockfd, level, optname, optval, optlen */
4142 return do_setsockopt(a
[0], a
[1], a
[2], a
[3], a
[4]);
4143 case TARGET_SYS_GETSOCKOPT
: /* sockfd, level, optname, optval, optlen */
4144 return do_getsockopt(a
[0], a
[1], a
[2], a
[3], a
[4]);
4145 case TARGET_SYS_SENDMSG
: /* sockfd, msg, flags */
4146 return do_sendrecvmsg(a
[0], a
[1], a
[2], 1);
4147 case TARGET_SYS_RECVMSG
: /* sockfd, msg, flags */
4148 return do_sendrecvmsg(a
[0], a
[1], a
[2], 0);
4149 case TARGET_SYS_ACCEPT4
: /* sockfd, addr, addrlen, flags */
4150 return do_accept4(a
[0], a
[1], a
[2], a
[3]);
4151 case TARGET_SYS_RECVMMSG
: /* sockfd, msgvec, vlen, flags */
4152 return do_sendrecvmmsg(a
[0], a
[1], a
[2], a
[3], 0);
4153 case TARGET_SYS_SENDMMSG
: /* sockfd, msgvec, vlen, flags */
4154 return do_sendrecvmmsg(a
[0], a
[1], a
[2], a
[3], 1);
4156 gemu_log("Unsupported socketcall: %d\n", num
);
4157 return -TARGET_EINVAL
;
4162 #define N_SHM_REGIONS 32
4164 static struct shm_region
{
4168 } shm_regions
[N_SHM_REGIONS
];
4170 #ifndef TARGET_SEMID64_DS
4171 /* asm-generic version of this struct */
4172 struct target_semid64_ds
4174 struct target_ipc_perm sem_perm
;
4175 abi_ulong sem_otime
;
4176 #if TARGET_ABI_BITS == 32
4177 abi_ulong __unused1
;
4179 abi_ulong sem_ctime
;
4180 #if TARGET_ABI_BITS == 32
4181 abi_ulong __unused2
;
4183 abi_ulong sem_nsems
;
4184 abi_ulong __unused3
;
4185 abi_ulong __unused4
;
4189 static inline abi_long
target_to_host_ipc_perm(struct ipc_perm
*host_ip
,
4190 abi_ulong target_addr
)
4192 struct target_ipc_perm
*target_ip
;
4193 struct target_semid64_ds
*target_sd
;
4195 if (!lock_user_struct(VERIFY_READ
, target_sd
, target_addr
, 1))
4196 return -TARGET_EFAULT
;
4197 target_ip
= &(target_sd
->sem_perm
);
4198 host_ip
->__key
= tswap32(target_ip
->__key
);
4199 host_ip
->uid
= tswap32(target_ip
->uid
);
4200 host_ip
->gid
= tswap32(target_ip
->gid
);
4201 host_ip
->cuid
= tswap32(target_ip
->cuid
);
4202 host_ip
->cgid
= tswap32(target_ip
->cgid
);
4203 #if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_PPC)
4204 host_ip
->mode
= tswap32(target_ip
->mode
);
4206 host_ip
->mode
= tswap16(target_ip
->mode
);
4208 #if defined(TARGET_PPC)
4209 host_ip
->__seq
= tswap32(target_ip
->__seq
);
4211 host_ip
->__seq
= tswap16(target_ip
->__seq
);
4213 unlock_user_struct(target_sd
, target_addr
, 0);
4217 static inline abi_long
host_to_target_ipc_perm(abi_ulong target_addr
,
4218 struct ipc_perm
*host_ip
)
4220 struct target_ipc_perm
*target_ip
;
4221 struct target_semid64_ds
*target_sd
;
4223 if (!lock_user_struct(VERIFY_WRITE
, target_sd
, target_addr
, 0))
4224 return -TARGET_EFAULT
;
4225 target_ip
= &(target_sd
->sem_perm
);
4226 target_ip
->__key
= tswap32(host_ip
->__key
);
4227 target_ip
->uid
= tswap32(host_ip
->uid
);
4228 target_ip
->gid
= tswap32(host_ip
->gid
);
4229 target_ip
->cuid
= tswap32(host_ip
->cuid
);
4230 target_ip
->cgid
= tswap32(host_ip
->cgid
);
4231 #if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_PPC)
4232 target_ip
->mode
= tswap32(host_ip
->mode
);
4234 target_ip
->mode
= tswap16(host_ip
->mode
);
4236 #if defined(TARGET_PPC)
4237 target_ip
->__seq
= tswap32(host_ip
->__seq
);
4239 target_ip
->__seq
= tswap16(host_ip
->__seq
);
4241 unlock_user_struct(target_sd
, target_addr
, 1);
4245 static inline abi_long
target_to_host_semid_ds(struct semid_ds
*host_sd
,
4246 abi_ulong target_addr
)
4248 struct target_semid64_ds
*target_sd
;
4250 if (!lock_user_struct(VERIFY_READ
, target_sd
, target_addr
, 1))
4251 return -TARGET_EFAULT
;
4252 if (target_to_host_ipc_perm(&(host_sd
->sem_perm
),target_addr
))
4253 return -TARGET_EFAULT
;
4254 host_sd
->sem_nsems
= tswapal(target_sd
->sem_nsems
);
4255 host_sd
->sem_otime
= tswapal(target_sd
->sem_otime
);
4256 host_sd
->sem_ctime
= tswapal(target_sd
->sem_ctime
);
4257 unlock_user_struct(target_sd
, target_addr
, 0);
4261 static inline abi_long
host_to_target_semid_ds(abi_ulong target_addr
,
4262 struct semid_ds
*host_sd
)
4264 struct target_semid64_ds
*target_sd
;
4266 if (!lock_user_struct(VERIFY_WRITE
, target_sd
, target_addr
, 0))
4267 return -TARGET_EFAULT
;
4268 if (host_to_target_ipc_perm(target_addr
,&(host_sd
->sem_perm
)))
4269 return -TARGET_EFAULT
;
4270 target_sd
->sem_nsems
= tswapal(host_sd
->sem_nsems
);
4271 target_sd
->sem_otime
= tswapal(host_sd
->sem_otime
);
4272 target_sd
->sem_ctime
= tswapal(host_sd
->sem_ctime
);
4273 unlock_user_struct(target_sd
, target_addr
, 1);
4277 struct target_seminfo
{
4290 static inline abi_long
host_to_target_seminfo(abi_ulong target_addr
,
4291 struct seminfo
*host_seminfo
)
4293 struct target_seminfo
*target_seminfo
;
4294 if (!lock_user_struct(VERIFY_WRITE
, target_seminfo
, target_addr
, 0))
4295 return -TARGET_EFAULT
;
4296 __put_user(host_seminfo
->semmap
, &target_seminfo
->semmap
);
4297 __put_user(host_seminfo
->semmni
, &target_seminfo
->semmni
);
4298 __put_user(host_seminfo
->semmns
, &target_seminfo
->semmns
);
4299 __put_user(host_seminfo
->semmnu
, &target_seminfo
->semmnu
);
4300 __put_user(host_seminfo
->semmsl
, &target_seminfo
->semmsl
);
4301 __put_user(host_seminfo
->semopm
, &target_seminfo
->semopm
);
4302 __put_user(host_seminfo
->semume
, &target_seminfo
->semume
);
4303 __put_user(host_seminfo
->semusz
, &target_seminfo
->semusz
);
4304 __put_user(host_seminfo
->semvmx
, &target_seminfo
->semvmx
);
4305 __put_user(host_seminfo
->semaem
, &target_seminfo
->semaem
);
4306 unlock_user_struct(target_seminfo
, target_addr
, 1);
4312 struct semid_ds
*buf
;
4313 unsigned short *array
;
4314 struct seminfo
*__buf
;
4317 union target_semun
{
4324 static inline abi_long
target_to_host_semarray(int semid
, unsigned short **host_array
,
4325 abi_ulong target_addr
)
4328 unsigned short *array
;
4330 struct semid_ds semid_ds
;
4333 semun
.buf
= &semid_ds
;
4335 ret
= semctl(semid
, 0, IPC_STAT
, semun
);
4337 return get_errno(ret
);
4339 nsems
= semid_ds
.sem_nsems
;
4341 *host_array
= g_try_new(unsigned short, nsems
);
4343 return -TARGET_ENOMEM
;
4345 array
= lock_user(VERIFY_READ
, target_addr
,
4346 nsems
*sizeof(unsigned short), 1);
4348 g_free(*host_array
);
4349 return -TARGET_EFAULT
;
4352 for(i
=0; i
<nsems
; i
++) {
4353 __get_user((*host_array
)[i
], &array
[i
]);
4355 unlock_user(array
, target_addr
, 0);
4360 static inline abi_long
host_to_target_semarray(int semid
, abi_ulong target_addr
,
4361 unsigned short **host_array
)
4364 unsigned short *array
;
4366 struct semid_ds semid_ds
;
4369 semun
.buf
= &semid_ds
;
4371 ret
= semctl(semid
, 0, IPC_STAT
, semun
);
4373 return get_errno(ret
);
4375 nsems
= semid_ds
.sem_nsems
;
4377 array
= lock_user(VERIFY_WRITE
, target_addr
,
4378 nsems
*sizeof(unsigned short), 0);
4380 return -TARGET_EFAULT
;
4382 for(i
=0; i
<nsems
; i
++) {
4383 __put_user((*host_array
)[i
], &array
[i
]);
4385 g_free(*host_array
);
4386 unlock_user(array
, target_addr
, 1);
4391 static inline abi_long
do_semctl(int semid
, int semnum
, int cmd
,
4392 abi_ulong target_arg
)
4394 union target_semun target_su
= { .buf
= target_arg
};
4396 struct semid_ds dsarg
;
4397 unsigned short *array
= NULL
;
4398 struct seminfo seminfo
;
4399 abi_long ret
= -TARGET_EINVAL
;
4406 /* In 64 bit cross-endian situations, we will erroneously pick up
4407 * the wrong half of the union for the "val" element. To rectify
4408 * this, the entire 8-byte structure is byteswapped, followed by
4409 * a swap of the 4 byte val field. In other cases, the data is
4410 * already in proper host byte order. */
4411 if (sizeof(target_su
.val
) != (sizeof(target_su
.buf
))) {
4412 target_su
.buf
= tswapal(target_su
.buf
);
4413 arg
.val
= tswap32(target_su
.val
);
4415 arg
.val
= target_su
.val
;
4417 ret
= get_errno(semctl(semid
, semnum
, cmd
, arg
));
4421 err
= target_to_host_semarray(semid
, &array
, target_su
.array
);
4425 ret
= get_errno(semctl(semid
, semnum
, cmd
, arg
));
4426 err
= host_to_target_semarray(semid
, target_su
.array
, &array
);
4433 err
= target_to_host_semid_ds(&dsarg
, target_su
.buf
);
4437 ret
= get_errno(semctl(semid
, semnum
, cmd
, arg
));
4438 err
= host_to_target_semid_ds(target_su
.buf
, &dsarg
);
4444 arg
.__buf
= &seminfo
;
4445 ret
= get_errno(semctl(semid
, semnum
, cmd
, arg
));
4446 err
= host_to_target_seminfo(target_su
.__buf
, &seminfo
);
4454 ret
= get_errno(semctl(semid
, semnum
, cmd
, NULL
));
4461 struct target_sembuf
{
4462 unsigned short sem_num
;
4467 static inline abi_long
target_to_host_sembuf(struct sembuf
*host_sembuf
,
4468 abi_ulong target_addr
,
4471 struct target_sembuf
*target_sembuf
;
4474 target_sembuf
= lock_user(VERIFY_READ
, target_addr
,
4475 nsops
*sizeof(struct target_sembuf
), 1);
4477 return -TARGET_EFAULT
;
4479 for(i
=0; i
<nsops
; i
++) {
4480 __get_user(host_sembuf
[i
].sem_num
, &target_sembuf
[i
].sem_num
);
4481 __get_user(host_sembuf
[i
].sem_op
, &target_sembuf
[i
].sem_op
);
4482 __get_user(host_sembuf
[i
].sem_flg
, &target_sembuf
[i
].sem_flg
);
4485 unlock_user(target_sembuf
, target_addr
, 0);
4490 static inline abi_long
do_semop(int semid
, abi_long ptr
, unsigned nsops
)
4492 struct sembuf sops
[nsops
];
4494 if (target_to_host_sembuf(sops
, ptr
, nsops
))
4495 return -TARGET_EFAULT
;
4497 return get_errno(safe_semtimedop(semid
, sops
, nsops
, NULL
));
4500 struct target_msqid_ds
4502 struct target_ipc_perm msg_perm
;
4503 abi_ulong msg_stime
;
4504 #if TARGET_ABI_BITS == 32
4505 abi_ulong __unused1
;
4507 abi_ulong msg_rtime
;
4508 #if TARGET_ABI_BITS == 32
4509 abi_ulong __unused2
;
4511 abi_ulong msg_ctime
;
4512 #if TARGET_ABI_BITS == 32
4513 abi_ulong __unused3
;
4515 abi_ulong __msg_cbytes
;
4517 abi_ulong msg_qbytes
;
4518 abi_ulong msg_lspid
;
4519 abi_ulong msg_lrpid
;
4520 abi_ulong __unused4
;
4521 abi_ulong __unused5
;
4524 static inline abi_long
target_to_host_msqid_ds(struct msqid_ds
*host_md
,
4525 abi_ulong target_addr
)
4527 struct target_msqid_ds
*target_md
;
4529 if (!lock_user_struct(VERIFY_READ
, target_md
, target_addr
, 1))
4530 return -TARGET_EFAULT
;
4531 if (target_to_host_ipc_perm(&(host_md
->msg_perm
),target_addr
))
4532 return -TARGET_EFAULT
;
4533 host_md
->msg_stime
= tswapal(target_md
->msg_stime
);
4534 host_md
->msg_rtime
= tswapal(target_md
->msg_rtime
);
4535 host_md
->msg_ctime
= tswapal(target_md
->msg_ctime
);
4536 host_md
->__msg_cbytes
= tswapal(target_md
->__msg_cbytes
);
4537 host_md
->msg_qnum
= tswapal(target_md
->msg_qnum
);
4538 host_md
->msg_qbytes
= tswapal(target_md
->msg_qbytes
);
4539 host_md
->msg_lspid
= tswapal(target_md
->msg_lspid
);
4540 host_md
->msg_lrpid
= tswapal(target_md
->msg_lrpid
);
4541 unlock_user_struct(target_md
, target_addr
, 0);
4545 static inline abi_long
host_to_target_msqid_ds(abi_ulong target_addr
,
4546 struct msqid_ds
*host_md
)
4548 struct target_msqid_ds
*target_md
;
4550 if (!lock_user_struct(VERIFY_WRITE
, target_md
, target_addr
, 0))
4551 return -TARGET_EFAULT
;
4552 if (host_to_target_ipc_perm(target_addr
,&(host_md
->msg_perm
)))
4553 return -TARGET_EFAULT
;
4554 target_md
->msg_stime
= tswapal(host_md
->msg_stime
);
4555 target_md
->msg_rtime
= tswapal(host_md
->msg_rtime
);
4556 target_md
->msg_ctime
= tswapal(host_md
->msg_ctime
);
4557 target_md
->__msg_cbytes
= tswapal(host_md
->__msg_cbytes
);
4558 target_md
->msg_qnum
= tswapal(host_md
->msg_qnum
);
4559 target_md
->msg_qbytes
= tswapal(host_md
->msg_qbytes
);
4560 target_md
->msg_lspid
= tswapal(host_md
->msg_lspid
);
4561 target_md
->msg_lrpid
= tswapal(host_md
->msg_lrpid
);
4562 unlock_user_struct(target_md
, target_addr
, 1);
4566 struct target_msginfo
{
4574 unsigned short int msgseg
;
4577 static inline abi_long
host_to_target_msginfo(abi_ulong target_addr
,
4578 struct msginfo
*host_msginfo
)
4580 struct target_msginfo
*target_msginfo
;
4581 if (!lock_user_struct(VERIFY_WRITE
, target_msginfo
, target_addr
, 0))
4582 return -TARGET_EFAULT
;
4583 __put_user(host_msginfo
->msgpool
, &target_msginfo
->msgpool
);
4584 __put_user(host_msginfo
->msgmap
, &target_msginfo
->msgmap
);
4585 __put_user(host_msginfo
->msgmax
, &target_msginfo
->msgmax
);
4586 __put_user(host_msginfo
->msgmnb
, &target_msginfo
->msgmnb
);
4587 __put_user(host_msginfo
->msgmni
, &target_msginfo
->msgmni
);
4588 __put_user(host_msginfo
->msgssz
, &target_msginfo
->msgssz
);
4589 __put_user(host_msginfo
->msgtql
, &target_msginfo
->msgtql
);
4590 __put_user(host_msginfo
->msgseg
, &target_msginfo
->msgseg
);
4591 unlock_user_struct(target_msginfo
, target_addr
, 1);
4595 static inline abi_long
do_msgctl(int msgid
, int cmd
, abi_long ptr
)
4597 struct msqid_ds dsarg
;
4598 struct msginfo msginfo
;
4599 abi_long ret
= -TARGET_EINVAL
;
4607 if (target_to_host_msqid_ds(&dsarg
,ptr
))
4608 return -TARGET_EFAULT
;
4609 ret
= get_errno(msgctl(msgid
, cmd
, &dsarg
));
4610 if (host_to_target_msqid_ds(ptr
,&dsarg
))
4611 return -TARGET_EFAULT
;
4614 ret
= get_errno(msgctl(msgid
, cmd
, NULL
));
4618 ret
= get_errno(msgctl(msgid
, cmd
, (struct msqid_ds
*)&msginfo
));
4619 if (host_to_target_msginfo(ptr
, &msginfo
))
4620 return -TARGET_EFAULT
;
4627 struct target_msgbuf
{
4632 static inline abi_long
do_msgsnd(int msqid
, abi_long msgp
,
4633 ssize_t msgsz
, int msgflg
)
4635 struct target_msgbuf
*target_mb
;
4636 struct msgbuf
*host_mb
;
4640 return -TARGET_EINVAL
;
4643 if (!lock_user_struct(VERIFY_READ
, target_mb
, msgp
, 0))
4644 return -TARGET_EFAULT
;
4645 host_mb
= g_try_malloc(msgsz
+ sizeof(long));
4647 unlock_user_struct(target_mb
, msgp
, 0);
4648 return -TARGET_ENOMEM
;
4650 host_mb
->mtype
= (abi_long
) tswapal(target_mb
->mtype
);
4651 memcpy(host_mb
->mtext
, target_mb
->mtext
, msgsz
);
4652 ret
= get_errno(safe_msgsnd(msqid
, host_mb
, msgsz
, msgflg
));
4654 unlock_user_struct(target_mb
, msgp
, 0);
4659 static inline abi_long
do_msgrcv(int msqid
, abi_long msgp
,
4660 ssize_t msgsz
, abi_long msgtyp
,
4663 struct target_msgbuf
*target_mb
;
4665 struct msgbuf
*host_mb
;
4669 return -TARGET_EINVAL
;
4672 if (!lock_user_struct(VERIFY_WRITE
, target_mb
, msgp
, 0))
4673 return -TARGET_EFAULT
;
4675 host_mb
= g_try_malloc(msgsz
+ sizeof(long));
4677 ret
= -TARGET_ENOMEM
;
4680 ret
= get_errno(safe_msgrcv(msqid
, host_mb
, msgsz
, msgtyp
, msgflg
));
4683 abi_ulong target_mtext_addr
= msgp
+ sizeof(abi_ulong
);
4684 target_mtext
= lock_user(VERIFY_WRITE
, target_mtext_addr
, ret
, 0);
4685 if (!target_mtext
) {
4686 ret
= -TARGET_EFAULT
;
4689 memcpy(target_mb
->mtext
, host_mb
->mtext
, ret
);
4690 unlock_user(target_mtext
, target_mtext_addr
, ret
);
4693 target_mb
->mtype
= tswapal(host_mb
->mtype
);
4697 unlock_user_struct(target_mb
, msgp
, 1);
4702 static inline abi_long
target_to_host_shmid_ds(struct shmid_ds
*host_sd
,
4703 abi_ulong target_addr
)
4705 struct target_shmid_ds
*target_sd
;
4707 if (!lock_user_struct(VERIFY_READ
, target_sd
, target_addr
, 1))
4708 return -TARGET_EFAULT
;
4709 if (target_to_host_ipc_perm(&(host_sd
->shm_perm
), target_addr
))
4710 return -TARGET_EFAULT
;
4711 __get_user(host_sd
->shm_segsz
, &target_sd
->shm_segsz
);
4712 __get_user(host_sd
->shm_atime
, &target_sd
->shm_atime
);
4713 __get_user(host_sd
->shm_dtime
, &target_sd
->shm_dtime
);
4714 __get_user(host_sd
->shm_ctime
, &target_sd
->shm_ctime
);
4715 __get_user(host_sd
->shm_cpid
, &target_sd
->shm_cpid
);
4716 __get_user(host_sd
->shm_lpid
, &target_sd
->shm_lpid
);
4717 __get_user(host_sd
->shm_nattch
, &target_sd
->shm_nattch
);
4718 unlock_user_struct(target_sd
, target_addr
, 0);
4722 static inline abi_long
host_to_target_shmid_ds(abi_ulong target_addr
,
4723 struct shmid_ds
*host_sd
)
4725 struct target_shmid_ds
*target_sd
;
4727 if (!lock_user_struct(VERIFY_WRITE
, target_sd
, target_addr
, 0))
4728 return -TARGET_EFAULT
;
4729 if (host_to_target_ipc_perm(target_addr
, &(host_sd
->shm_perm
)))
4730 return -TARGET_EFAULT
;
4731 __put_user(host_sd
->shm_segsz
, &target_sd
->shm_segsz
);
4732 __put_user(host_sd
->shm_atime
, &target_sd
->shm_atime
);
4733 __put_user(host_sd
->shm_dtime
, &target_sd
->shm_dtime
);
4734 __put_user(host_sd
->shm_ctime
, &target_sd
->shm_ctime
);
4735 __put_user(host_sd
->shm_cpid
, &target_sd
->shm_cpid
);
4736 __put_user(host_sd
->shm_lpid
, &target_sd
->shm_lpid
);
4737 __put_user(host_sd
->shm_nattch
, &target_sd
->shm_nattch
);
4738 unlock_user_struct(target_sd
, target_addr
, 1);
4742 struct target_shminfo
{
4750 static inline abi_long
host_to_target_shminfo(abi_ulong target_addr
,
4751 struct shminfo
*host_shminfo
)
4753 struct target_shminfo
*target_shminfo
;
4754 if (!lock_user_struct(VERIFY_WRITE
, target_shminfo
, target_addr
, 0))
4755 return -TARGET_EFAULT
;
4756 __put_user(host_shminfo
->shmmax
, &target_shminfo
->shmmax
);
4757 __put_user(host_shminfo
->shmmin
, &target_shminfo
->shmmin
);
4758 __put_user(host_shminfo
->shmmni
, &target_shminfo
->shmmni
);
4759 __put_user(host_shminfo
->shmseg
, &target_shminfo
->shmseg
);
4760 __put_user(host_shminfo
->shmall
, &target_shminfo
->shmall
);
4761 unlock_user_struct(target_shminfo
, target_addr
, 1);
4765 struct target_shm_info
{
4770 abi_ulong swap_attempts
;
4771 abi_ulong swap_successes
;
4774 static inline abi_long
host_to_target_shm_info(abi_ulong target_addr
,
4775 struct shm_info
*host_shm_info
)
4777 struct target_shm_info
*target_shm_info
;
4778 if (!lock_user_struct(VERIFY_WRITE
, target_shm_info
, target_addr
, 0))
4779 return -TARGET_EFAULT
;
4780 __put_user(host_shm_info
->used_ids
, &target_shm_info
->used_ids
);
4781 __put_user(host_shm_info
->shm_tot
, &target_shm_info
->shm_tot
);
4782 __put_user(host_shm_info
->shm_rss
, &target_shm_info
->shm_rss
);
4783 __put_user(host_shm_info
->shm_swp
, &target_shm_info
->shm_swp
);
4784 __put_user(host_shm_info
->swap_attempts
, &target_shm_info
->swap_attempts
);
4785 __put_user(host_shm_info
->swap_successes
, &target_shm_info
->swap_successes
);
4786 unlock_user_struct(target_shm_info
, target_addr
, 1);
4790 static inline abi_long
do_shmctl(int shmid
, int cmd
, abi_long buf
)
4792 struct shmid_ds dsarg
;
4793 struct shminfo shminfo
;
4794 struct shm_info shm_info
;
4795 abi_long ret
= -TARGET_EINVAL
;
4803 if (target_to_host_shmid_ds(&dsarg
, buf
))
4804 return -TARGET_EFAULT
;
4805 ret
= get_errno(shmctl(shmid
, cmd
, &dsarg
));
4806 if (host_to_target_shmid_ds(buf
, &dsarg
))
4807 return -TARGET_EFAULT
;
4810 ret
= get_errno(shmctl(shmid
, cmd
, (struct shmid_ds
*)&shminfo
));
4811 if (host_to_target_shminfo(buf
, &shminfo
))
4812 return -TARGET_EFAULT
;
4815 ret
= get_errno(shmctl(shmid
, cmd
, (struct shmid_ds
*)&shm_info
));
4816 if (host_to_target_shm_info(buf
, &shm_info
))
4817 return -TARGET_EFAULT
;
4822 ret
= get_errno(shmctl(shmid
, cmd
, NULL
));
4829 #ifndef TARGET_FORCE_SHMLBA
4830 /* For most architectures, SHMLBA is the same as the page size;
4831 * some architectures have larger values, in which case they should
4832 * define TARGET_FORCE_SHMLBA and provide a target_shmlba() function.
4833 * This corresponds to the kernel arch code defining __ARCH_FORCE_SHMLBA
4834 * and defining its own value for SHMLBA.
4836 * The kernel also permits SHMLBA to be set by the architecture to a
4837 * value larger than the page size without setting __ARCH_FORCE_SHMLBA;
4838 * this means that addresses are rounded to the large size if
4839 * SHM_RND is set but addresses not aligned to that size are not rejected
4840 * as long as they are at least page-aligned. Since the only architecture
4841 * which uses this is ia64 this code doesn't provide for that oddity.
4843 static inline abi_ulong
target_shmlba(CPUArchState
*cpu_env
)
4845 return TARGET_PAGE_SIZE
;
4849 static inline abi_ulong
do_shmat(CPUArchState
*cpu_env
,
4850 int shmid
, abi_ulong shmaddr
, int shmflg
)
4854 struct shmid_ds shm_info
;
4858 /* find out the length of the shared memory segment */
4859 ret
= get_errno(shmctl(shmid
, IPC_STAT
, &shm_info
));
4860 if (is_error(ret
)) {
4861 /* can't get length, bail out */
4865 shmlba
= target_shmlba(cpu_env
);
4867 if (shmaddr
& (shmlba
- 1)) {
4868 if (shmflg
& SHM_RND
) {
4869 shmaddr
&= ~(shmlba
- 1);
4871 return -TARGET_EINVAL
;
4878 host_raddr
= shmat(shmid
, (void *)g2h(shmaddr
), shmflg
);
4880 abi_ulong mmap_start
;
4882 mmap_start
= mmap_find_vma(0, shm_info
.shm_segsz
);
4884 if (mmap_start
== -1) {
4886 host_raddr
= (void *)-1;
4888 host_raddr
= shmat(shmid
, g2h(mmap_start
), shmflg
| SHM_REMAP
);
4891 if (host_raddr
== (void *)-1) {
4893 return get_errno((long)host_raddr
);
4895 raddr
=h2g((unsigned long)host_raddr
);
4897 page_set_flags(raddr
, raddr
+ shm_info
.shm_segsz
,
4898 PAGE_VALID
| PAGE_READ
|
4899 ((shmflg
& SHM_RDONLY
)? 0 : PAGE_WRITE
));
4901 for (i
= 0; i
< N_SHM_REGIONS
; i
++) {
4902 if (!shm_regions
[i
].in_use
) {
4903 shm_regions
[i
].in_use
= true;
4904 shm_regions
[i
].start
= raddr
;
4905 shm_regions
[i
].size
= shm_info
.shm_segsz
;
4915 static inline abi_long
do_shmdt(abi_ulong shmaddr
)
4919 for (i
= 0; i
< N_SHM_REGIONS
; ++i
) {
4920 if (shm_regions
[i
].in_use
&& shm_regions
[i
].start
== shmaddr
) {
4921 shm_regions
[i
].in_use
= false;
4922 page_set_flags(shmaddr
, shmaddr
+ shm_regions
[i
].size
, 0);
4927 return get_errno(shmdt(g2h(shmaddr
)));
4930 #ifdef TARGET_NR_ipc
4931 /* ??? This only works with linear mappings. */
4932 /* do_ipc() must return target values and target errnos. */
4933 static abi_long
do_ipc(CPUArchState
*cpu_env
,
4934 unsigned int call
, abi_long first
,
4935 abi_long second
, abi_long third
,
4936 abi_long ptr
, abi_long fifth
)
4941 version
= call
>> 16;
4946 ret
= do_semop(first
, ptr
, second
);
4950 ret
= get_errno(semget(first
, second
, third
));
4953 case IPCOP_semctl
: {
4954 /* The semun argument to semctl is passed by value, so dereference the
4957 get_user_ual(atptr
, ptr
);
4958 ret
= do_semctl(first
, second
, third
, atptr
);
4963 ret
= get_errno(msgget(first
, second
));
4967 ret
= do_msgsnd(first
, ptr
, second
, third
);
4971 ret
= do_msgctl(first
, second
, ptr
);
4978 struct target_ipc_kludge
{
4983 if (!lock_user_struct(VERIFY_READ
, tmp
, ptr
, 1)) {
4984 ret
= -TARGET_EFAULT
;
4988 ret
= do_msgrcv(first
, tswapal(tmp
->msgp
), second
, tswapal(tmp
->msgtyp
), third
);
4990 unlock_user_struct(tmp
, ptr
, 0);
4994 ret
= do_msgrcv(first
, ptr
, second
, fifth
, third
);
5003 raddr
= do_shmat(cpu_env
, first
, ptr
, second
);
5004 if (is_error(raddr
))
5005 return get_errno(raddr
);
5006 if (put_user_ual(raddr
, third
))
5007 return -TARGET_EFAULT
;
5011 ret
= -TARGET_EINVAL
;
5016 ret
= do_shmdt(ptr
);
5020 /* IPC_* flag values are the same on all linux platforms */
5021 ret
= get_errno(shmget(first
, second
, third
));
5024 /* IPC_* and SHM_* command values are the same on all linux platforms */
5026 ret
= do_shmctl(first
, second
, ptr
);
5029 gemu_log("Unsupported ipc call: %d (version %d)\n", call
, version
);
5030 ret
= -TARGET_ENOSYS
;
5037 /* kernel structure types definitions */
5039 #define STRUCT(name, ...) STRUCT_ ## name,
5040 #define STRUCT_SPECIAL(name) STRUCT_ ## name,
5042 #include "syscall_types.h"
5046 #undef STRUCT_SPECIAL
5048 #define STRUCT(name, ...) static const argtype struct_ ## name ## _def[] = { __VA_ARGS__, TYPE_NULL };
5049 #define STRUCT_SPECIAL(name)
5050 #include "syscall_types.h"
5052 #undef STRUCT_SPECIAL
5054 typedef struct IOCTLEntry IOCTLEntry
;
5056 typedef abi_long
do_ioctl_fn(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
5057 int fd
, int cmd
, abi_long arg
);
5061 unsigned int host_cmd
;
5064 do_ioctl_fn
*do_ioctl
;
5065 const argtype arg_type
[5];
5068 #define IOC_R 0x0001
5069 #define IOC_W 0x0002
5070 #define IOC_RW (IOC_R | IOC_W)
5072 #define MAX_STRUCT_SIZE 4096
5074 #ifdef CONFIG_FIEMAP
5075 /* So fiemap access checks don't overflow on 32 bit systems.
5076 * This is very slightly smaller than the limit imposed by
5077 * the underlying kernel.
5079 #define FIEMAP_MAX_EXTENTS ((UINT_MAX - sizeof(struct fiemap)) \
5080 / sizeof(struct fiemap_extent))
5082 static abi_long
do_ioctl_fs_ioc_fiemap(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
5083 int fd
, int cmd
, abi_long arg
)
5085 /* The parameter for this ioctl is a struct fiemap followed
5086 * by an array of struct fiemap_extent whose size is set
5087 * in fiemap->fm_extent_count. The array is filled in by the
5090 int target_size_in
, target_size_out
;
5092 const argtype
*arg_type
= ie
->arg_type
;
5093 const argtype extent_arg_type
[] = { MK_STRUCT(STRUCT_fiemap_extent
) };
5096 int i
, extent_size
= thunk_type_size(extent_arg_type
, 0);
5100 assert(arg_type
[0] == TYPE_PTR
);
5101 assert(ie
->access
== IOC_RW
);
5103 target_size_in
= thunk_type_size(arg_type
, 0);
5104 argptr
= lock_user(VERIFY_READ
, arg
, target_size_in
, 1);
5106 return -TARGET_EFAULT
;
5108 thunk_convert(buf_temp
, argptr
, arg_type
, THUNK_HOST
);
5109 unlock_user(argptr
, arg
, 0);
5110 fm
= (struct fiemap
*)buf_temp
;
5111 if (fm
->fm_extent_count
> FIEMAP_MAX_EXTENTS
) {
5112 return -TARGET_EINVAL
;
5115 outbufsz
= sizeof (*fm
) +
5116 (sizeof(struct fiemap_extent
) * fm
->fm_extent_count
);
5118 if (outbufsz
> MAX_STRUCT_SIZE
) {
5119 /* We can't fit all the extents into the fixed size buffer.
5120 * Allocate one that is large enough and use it instead.
5122 fm
= g_try_malloc(outbufsz
);
5124 return -TARGET_ENOMEM
;
5126 memcpy(fm
, buf_temp
, sizeof(struct fiemap
));
5129 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, fm
));
5130 if (!is_error(ret
)) {
5131 target_size_out
= target_size_in
;
5132 /* An extent_count of 0 means we were only counting the extents
5133 * so there are no structs to copy
5135 if (fm
->fm_extent_count
!= 0) {
5136 target_size_out
+= fm
->fm_mapped_extents
* extent_size
;
5138 argptr
= lock_user(VERIFY_WRITE
, arg
, target_size_out
, 0);
5140 ret
= -TARGET_EFAULT
;
5142 /* Convert the struct fiemap */
5143 thunk_convert(argptr
, fm
, arg_type
, THUNK_TARGET
);
5144 if (fm
->fm_extent_count
!= 0) {
5145 p
= argptr
+ target_size_in
;
5146 /* ...and then all the struct fiemap_extents */
5147 for (i
= 0; i
< fm
->fm_mapped_extents
; i
++) {
5148 thunk_convert(p
, &fm
->fm_extents
[i
], extent_arg_type
,
5153 unlock_user(argptr
, arg
, target_size_out
);
5163 static abi_long
do_ioctl_ifconf(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
5164 int fd
, int cmd
, abi_long arg
)
5166 const argtype
*arg_type
= ie
->arg_type
;
5170 struct ifconf
*host_ifconf
;
5172 const argtype ifreq_arg_type
[] = { MK_STRUCT(STRUCT_sockaddr_ifreq
) };
5173 int target_ifreq_size
;
5178 abi_long target_ifc_buf
;
5182 assert(arg_type
[0] == TYPE_PTR
);
5183 assert(ie
->access
== IOC_RW
);
5186 target_size
= thunk_type_size(arg_type
, 0);
5188 argptr
= lock_user(VERIFY_READ
, arg
, target_size
, 1);
5190 return -TARGET_EFAULT
;
5191 thunk_convert(buf_temp
, argptr
, arg_type
, THUNK_HOST
);
5192 unlock_user(argptr
, arg
, 0);
5194 host_ifconf
= (struct ifconf
*)(unsigned long)buf_temp
;
5195 target_ifc_len
= host_ifconf
->ifc_len
;
5196 target_ifc_buf
= (abi_long
)(unsigned long)host_ifconf
->ifc_buf
;
5198 target_ifreq_size
= thunk_type_size(ifreq_arg_type
, 0);
5199 nb_ifreq
= target_ifc_len
/ target_ifreq_size
;
5200 host_ifc_len
= nb_ifreq
* sizeof(struct ifreq
);
5202 outbufsz
= sizeof(*host_ifconf
) + host_ifc_len
;
5203 if (outbufsz
> MAX_STRUCT_SIZE
) {
5204 /* We can't fit all the extents into the fixed size buffer.
5205 * Allocate one that is large enough and use it instead.
5207 host_ifconf
= malloc(outbufsz
);
5209 return -TARGET_ENOMEM
;
5211 memcpy(host_ifconf
, buf_temp
, sizeof(*host_ifconf
));
5214 host_ifc_buf
= (char*)host_ifconf
+ sizeof(*host_ifconf
);
5216 host_ifconf
->ifc_len
= host_ifc_len
;
5217 host_ifconf
->ifc_buf
= host_ifc_buf
;
5219 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, host_ifconf
));
5220 if (!is_error(ret
)) {
5221 /* convert host ifc_len to target ifc_len */
5223 nb_ifreq
= host_ifconf
->ifc_len
/ sizeof(struct ifreq
);
5224 target_ifc_len
= nb_ifreq
* target_ifreq_size
;
5225 host_ifconf
->ifc_len
= target_ifc_len
;
5227 /* restore target ifc_buf */
5229 host_ifconf
->ifc_buf
= (char *)(unsigned long)target_ifc_buf
;
5231 /* copy struct ifconf to target user */
5233 argptr
= lock_user(VERIFY_WRITE
, arg
, target_size
, 0);
5235 return -TARGET_EFAULT
;
5236 thunk_convert(argptr
, host_ifconf
, arg_type
, THUNK_TARGET
);
5237 unlock_user(argptr
, arg
, target_size
);
5239 /* copy ifreq[] to target user */
5241 argptr
= lock_user(VERIFY_WRITE
, target_ifc_buf
, target_ifc_len
, 0);
5242 for (i
= 0; i
< nb_ifreq
; i
++) {
5243 thunk_convert(argptr
+ i
* target_ifreq_size
,
5244 host_ifc_buf
+ i
* sizeof(struct ifreq
),
5245 ifreq_arg_type
, THUNK_TARGET
);
5247 unlock_user(argptr
, target_ifc_buf
, target_ifc_len
);
5257 static abi_long
do_ioctl_dm(const IOCTLEntry
*ie
, uint8_t *buf_temp
, int fd
,
5258 int cmd
, abi_long arg
)
5261 struct dm_ioctl
*host_dm
;
5262 abi_long guest_data
;
5263 uint32_t guest_data_size
;
5265 const argtype
*arg_type
= ie
->arg_type
;
5267 void *big_buf
= NULL
;
5271 target_size
= thunk_type_size(arg_type
, 0);
5272 argptr
= lock_user(VERIFY_READ
, arg
, target_size
, 1);
5274 ret
= -TARGET_EFAULT
;
5277 thunk_convert(buf_temp
, argptr
, arg_type
, THUNK_HOST
);
5278 unlock_user(argptr
, arg
, 0);
5280 /* buf_temp is too small, so fetch things into a bigger buffer */
5281 big_buf
= g_malloc0(((struct dm_ioctl
*)buf_temp
)->data_size
* 2);
5282 memcpy(big_buf
, buf_temp
, target_size
);
5286 guest_data
= arg
+ host_dm
->data_start
;
5287 if ((guest_data
- arg
) < 0) {
5288 ret
= -TARGET_EINVAL
;
5291 guest_data_size
= host_dm
->data_size
- host_dm
->data_start
;
5292 host_data
= (char*)host_dm
+ host_dm
->data_start
;
5294 argptr
= lock_user(VERIFY_READ
, guest_data
, guest_data_size
, 1);
5296 ret
= -TARGET_EFAULT
;
5300 switch (ie
->host_cmd
) {
5302 case DM_LIST_DEVICES
:
5305 case DM_DEV_SUSPEND
:
5308 case DM_TABLE_STATUS
:
5309 case DM_TABLE_CLEAR
:
5311 case DM_LIST_VERSIONS
:
5315 case DM_DEV_SET_GEOMETRY
:
5316 /* data contains only strings */
5317 memcpy(host_data
, argptr
, guest_data_size
);
5320 memcpy(host_data
, argptr
, guest_data_size
);
5321 *(uint64_t*)host_data
= tswap64(*(uint64_t*)argptr
);
5325 void *gspec
= argptr
;
5326 void *cur_data
= host_data
;
5327 const argtype arg_type
[] = { MK_STRUCT(STRUCT_dm_target_spec
) };
5328 int spec_size
= thunk_type_size(arg_type
, 0);
5331 for (i
= 0; i
< host_dm
->target_count
; i
++) {
5332 struct dm_target_spec
*spec
= cur_data
;
5336 thunk_convert(spec
, gspec
, arg_type
, THUNK_HOST
);
5337 slen
= strlen((char*)gspec
+ spec_size
) + 1;
5339 spec
->next
= sizeof(*spec
) + slen
;
5340 strcpy((char*)&spec
[1], gspec
+ spec_size
);
5342 cur_data
+= spec
->next
;
5347 ret
= -TARGET_EINVAL
;
5348 unlock_user(argptr
, guest_data
, 0);
5351 unlock_user(argptr
, guest_data
, 0);
5353 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, buf_temp
));
5354 if (!is_error(ret
)) {
5355 guest_data
= arg
+ host_dm
->data_start
;
5356 guest_data_size
= host_dm
->data_size
- host_dm
->data_start
;
5357 argptr
= lock_user(VERIFY_WRITE
, guest_data
, guest_data_size
, 0);
5358 switch (ie
->host_cmd
) {
5363 case DM_DEV_SUSPEND
:
5366 case DM_TABLE_CLEAR
:
5368 case DM_DEV_SET_GEOMETRY
:
5369 /* no return data */
5371 case DM_LIST_DEVICES
:
5373 struct dm_name_list
*nl
= (void*)host_dm
+ host_dm
->data_start
;
5374 uint32_t remaining_data
= guest_data_size
;
5375 void *cur_data
= argptr
;
5376 const argtype arg_type
[] = { MK_STRUCT(STRUCT_dm_name_list
) };
5377 int nl_size
= 12; /* can't use thunk_size due to alignment */
5380 uint32_t next
= nl
->next
;
5382 nl
->next
= nl_size
+ (strlen(nl
->name
) + 1);
5384 if (remaining_data
< nl
->next
) {
5385 host_dm
->flags
|= DM_BUFFER_FULL_FLAG
;
5388 thunk_convert(cur_data
, nl
, arg_type
, THUNK_TARGET
);
5389 strcpy(cur_data
+ nl_size
, nl
->name
);
5390 cur_data
+= nl
->next
;
5391 remaining_data
-= nl
->next
;
5395 nl
= (void*)nl
+ next
;
5400 case DM_TABLE_STATUS
:
5402 struct dm_target_spec
*spec
= (void*)host_dm
+ host_dm
->data_start
;
5403 void *cur_data
= argptr
;
5404 const argtype arg_type
[] = { MK_STRUCT(STRUCT_dm_target_spec
) };
5405 int spec_size
= thunk_type_size(arg_type
, 0);
5408 for (i
= 0; i
< host_dm
->target_count
; i
++) {
5409 uint32_t next
= spec
->next
;
5410 int slen
= strlen((char*)&spec
[1]) + 1;
5411 spec
->next
= (cur_data
- argptr
) + spec_size
+ slen
;
5412 if (guest_data_size
< spec
->next
) {
5413 host_dm
->flags
|= DM_BUFFER_FULL_FLAG
;
5416 thunk_convert(cur_data
, spec
, arg_type
, THUNK_TARGET
);
5417 strcpy(cur_data
+ spec_size
, (char*)&spec
[1]);
5418 cur_data
= argptr
+ spec
->next
;
5419 spec
= (void*)host_dm
+ host_dm
->data_start
+ next
;
5425 void *hdata
= (void*)host_dm
+ host_dm
->data_start
;
5426 int count
= *(uint32_t*)hdata
;
5427 uint64_t *hdev
= hdata
+ 8;
5428 uint64_t *gdev
= argptr
+ 8;
5431 *(uint32_t*)argptr
= tswap32(count
);
5432 for (i
= 0; i
< count
; i
++) {
5433 *gdev
= tswap64(*hdev
);
5439 case DM_LIST_VERSIONS
:
5441 struct dm_target_versions
*vers
= (void*)host_dm
+ host_dm
->data_start
;
5442 uint32_t remaining_data
= guest_data_size
;
5443 void *cur_data
= argptr
;
5444 const argtype arg_type
[] = { MK_STRUCT(STRUCT_dm_target_versions
) };
5445 int vers_size
= thunk_type_size(arg_type
, 0);
5448 uint32_t next
= vers
->next
;
5450 vers
->next
= vers_size
+ (strlen(vers
->name
) + 1);
5452 if (remaining_data
< vers
->next
) {
5453 host_dm
->flags
|= DM_BUFFER_FULL_FLAG
;
5456 thunk_convert(cur_data
, vers
, arg_type
, THUNK_TARGET
);
5457 strcpy(cur_data
+ vers_size
, vers
->name
);
5458 cur_data
+= vers
->next
;
5459 remaining_data
-= vers
->next
;
5463 vers
= (void*)vers
+ next
;
5468 unlock_user(argptr
, guest_data
, 0);
5469 ret
= -TARGET_EINVAL
;
5472 unlock_user(argptr
, guest_data
, guest_data_size
);
5474 argptr
= lock_user(VERIFY_WRITE
, arg
, target_size
, 0);
5476 ret
= -TARGET_EFAULT
;
5479 thunk_convert(argptr
, buf_temp
, arg_type
, THUNK_TARGET
);
5480 unlock_user(argptr
, arg
, target_size
);
5487 static abi_long
do_ioctl_blkpg(const IOCTLEntry
*ie
, uint8_t *buf_temp
, int fd
,
5488 int cmd
, abi_long arg
)
5492 const argtype
*arg_type
= ie
->arg_type
;
5493 const argtype part_arg_type
[] = { MK_STRUCT(STRUCT_blkpg_partition
) };
5496 struct blkpg_ioctl_arg
*host_blkpg
= (void*)buf_temp
;
5497 struct blkpg_partition host_part
;
5499 /* Read and convert blkpg */
5501 target_size
= thunk_type_size(arg_type
, 0);
5502 argptr
= lock_user(VERIFY_READ
, arg
, target_size
, 1);
5504 ret
= -TARGET_EFAULT
;
5507 thunk_convert(buf_temp
, argptr
, arg_type
, THUNK_HOST
);
5508 unlock_user(argptr
, arg
, 0);
5510 switch (host_blkpg
->op
) {
5511 case BLKPG_ADD_PARTITION
:
5512 case BLKPG_DEL_PARTITION
:
5513 /* payload is struct blkpg_partition */
5516 /* Unknown opcode */
5517 ret
= -TARGET_EINVAL
;
5521 /* Read and convert blkpg->data */
5522 arg
= (abi_long
)(uintptr_t)host_blkpg
->data
;
5523 target_size
= thunk_type_size(part_arg_type
, 0);
5524 argptr
= lock_user(VERIFY_READ
, arg
, target_size
, 1);
5526 ret
= -TARGET_EFAULT
;
5529 thunk_convert(&host_part
, argptr
, part_arg_type
, THUNK_HOST
);
5530 unlock_user(argptr
, arg
, 0);
5532 /* Swizzle the data pointer to our local copy and call! */
5533 host_blkpg
->data
= &host_part
;
5534 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, host_blkpg
));
5540 static abi_long
do_ioctl_rt(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
5541 int fd
, int cmd
, abi_long arg
)
5543 const argtype
*arg_type
= ie
->arg_type
;
5544 const StructEntry
*se
;
5545 const argtype
*field_types
;
5546 const int *dst_offsets
, *src_offsets
;
5549 abi_ulong
*target_rt_dev_ptr
;
5550 unsigned long *host_rt_dev_ptr
;
5554 assert(ie
->access
== IOC_W
);
5555 assert(*arg_type
== TYPE_PTR
);
5557 assert(*arg_type
== TYPE_STRUCT
);
5558 target_size
= thunk_type_size(arg_type
, 0);
5559 argptr
= lock_user(VERIFY_READ
, arg
, target_size
, 1);
5561 return -TARGET_EFAULT
;
5564 assert(*arg_type
== (int)STRUCT_rtentry
);
5565 se
= struct_entries
+ *arg_type
++;
5566 assert(se
->convert
[0] == NULL
);
5567 /* convert struct here to be able to catch rt_dev string */
5568 field_types
= se
->field_types
;
5569 dst_offsets
= se
->field_offsets
[THUNK_HOST
];
5570 src_offsets
= se
->field_offsets
[THUNK_TARGET
];
5571 for (i
= 0; i
< se
->nb_fields
; i
++) {
5572 if (dst_offsets
[i
] == offsetof(struct rtentry
, rt_dev
)) {
5573 assert(*field_types
== TYPE_PTRVOID
);
5574 target_rt_dev_ptr
= (abi_ulong
*)(argptr
+ src_offsets
[i
]);
5575 host_rt_dev_ptr
= (unsigned long *)(buf_temp
+ dst_offsets
[i
]);
5576 if (*target_rt_dev_ptr
!= 0) {
5577 *host_rt_dev_ptr
= (unsigned long)lock_user_string(
5578 tswapal(*target_rt_dev_ptr
));
5579 if (!*host_rt_dev_ptr
) {
5580 unlock_user(argptr
, arg
, 0);
5581 return -TARGET_EFAULT
;
5584 *host_rt_dev_ptr
= 0;
5589 field_types
= thunk_convert(buf_temp
+ dst_offsets
[i
],
5590 argptr
+ src_offsets
[i
],
5591 field_types
, THUNK_HOST
);
5593 unlock_user(argptr
, arg
, 0);
5595 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, buf_temp
));
5596 if (*host_rt_dev_ptr
!= 0) {
5597 unlock_user((void *)*host_rt_dev_ptr
,
5598 *target_rt_dev_ptr
, 0);
5603 static abi_long
do_ioctl_kdsigaccept(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
5604 int fd
, int cmd
, abi_long arg
)
5606 int sig
= target_to_host_signal(arg
);
5607 return get_errno(safe_ioctl(fd
, ie
->host_cmd
, sig
));
5610 static IOCTLEntry ioctl_entries
[] = {
5611 #define IOCTL(cmd, access, ...) \
5612 { TARGET_ ## cmd, cmd, #cmd, access, 0, { __VA_ARGS__ } },
5613 #define IOCTL_SPECIAL(cmd, access, dofn, ...) \
5614 { TARGET_ ## cmd, cmd, #cmd, access, dofn, { __VA_ARGS__ } },
5615 #define IOCTL_IGNORE(cmd) \
5616 { TARGET_ ## cmd, 0, #cmd },
5621 /* ??? Implement proper locking for ioctls. */
5622 /* do_ioctl() Must return target values and target errnos. */
5623 static abi_long
do_ioctl(int fd
, int cmd
, abi_long arg
)
5625 const IOCTLEntry
*ie
;
5626 const argtype
*arg_type
;
5628 uint8_t buf_temp
[MAX_STRUCT_SIZE
];
5634 if (ie
->target_cmd
== 0) {
5635 gemu_log("Unsupported ioctl: cmd=0x%04lx\n", (long)cmd
);
5636 return -TARGET_ENOSYS
;
5638 if (ie
->target_cmd
== cmd
)
5642 arg_type
= ie
->arg_type
;
5644 gemu_log("ioctl: cmd=0x%04lx (%s)\n", (long)cmd
, ie
->name
);
5647 return ie
->do_ioctl(ie
, buf_temp
, fd
, cmd
, arg
);
5648 } else if (!ie
->host_cmd
) {
5649 /* Some architectures define BSD ioctls in their headers
5650 that are not implemented in Linux. */
5651 return -TARGET_ENOSYS
;
5654 switch(arg_type
[0]) {
5657 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
));
5661 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, arg
));
5665 target_size
= thunk_type_size(arg_type
, 0);
5666 switch(ie
->access
) {
5668 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, buf_temp
));
5669 if (!is_error(ret
)) {
5670 argptr
= lock_user(VERIFY_WRITE
, arg
, target_size
, 0);
5672 return -TARGET_EFAULT
;
5673 thunk_convert(argptr
, buf_temp
, arg_type
, THUNK_TARGET
);
5674 unlock_user(argptr
, arg
, target_size
);
5678 argptr
= lock_user(VERIFY_READ
, arg
, target_size
, 1);
5680 return -TARGET_EFAULT
;
5681 thunk_convert(buf_temp
, argptr
, arg_type
, THUNK_HOST
);
5682 unlock_user(argptr
, arg
, 0);
5683 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, buf_temp
));
5687 argptr
= lock_user(VERIFY_READ
, arg
, target_size
, 1);
5689 return -TARGET_EFAULT
;
5690 thunk_convert(buf_temp
, argptr
, arg_type
, THUNK_HOST
);
5691 unlock_user(argptr
, arg
, 0);
5692 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, buf_temp
));
5693 if (!is_error(ret
)) {
5694 argptr
= lock_user(VERIFY_WRITE
, arg
, target_size
, 0);
5696 return -TARGET_EFAULT
;
5697 thunk_convert(argptr
, buf_temp
, arg_type
, THUNK_TARGET
);
5698 unlock_user(argptr
, arg
, target_size
);
5704 gemu_log("Unsupported ioctl type: cmd=0x%04lx type=%d\n",
5705 (long)cmd
, arg_type
[0]);
5706 ret
= -TARGET_ENOSYS
;
5712 static const bitmask_transtbl iflag_tbl
[] = {
5713 { TARGET_IGNBRK
, TARGET_IGNBRK
, IGNBRK
, IGNBRK
},
5714 { TARGET_BRKINT
, TARGET_BRKINT
, BRKINT
, BRKINT
},
5715 { TARGET_IGNPAR
, TARGET_IGNPAR
, IGNPAR
, IGNPAR
},
5716 { TARGET_PARMRK
, TARGET_PARMRK
, PARMRK
, PARMRK
},
5717 { TARGET_INPCK
, TARGET_INPCK
, INPCK
, INPCK
},
5718 { TARGET_ISTRIP
, TARGET_ISTRIP
, ISTRIP
, ISTRIP
},
5719 { TARGET_INLCR
, TARGET_INLCR
, INLCR
, INLCR
},
5720 { TARGET_IGNCR
, TARGET_IGNCR
, IGNCR
, IGNCR
},
5721 { TARGET_ICRNL
, TARGET_ICRNL
, ICRNL
, ICRNL
},
5722 { TARGET_IUCLC
, TARGET_IUCLC
, IUCLC
, IUCLC
},
5723 { TARGET_IXON
, TARGET_IXON
, IXON
, IXON
},
5724 { TARGET_IXANY
, TARGET_IXANY
, IXANY
, IXANY
},
5725 { TARGET_IXOFF
, TARGET_IXOFF
, IXOFF
, IXOFF
},
5726 { TARGET_IMAXBEL
, TARGET_IMAXBEL
, IMAXBEL
, IMAXBEL
},
5730 static const bitmask_transtbl oflag_tbl
[] = {
5731 { TARGET_OPOST
, TARGET_OPOST
, OPOST
, OPOST
},
5732 { TARGET_OLCUC
, TARGET_OLCUC
, OLCUC
, OLCUC
},
5733 { TARGET_ONLCR
, TARGET_ONLCR
, ONLCR
, ONLCR
},
5734 { TARGET_OCRNL
, TARGET_OCRNL
, OCRNL
, OCRNL
},
5735 { TARGET_ONOCR
, TARGET_ONOCR
, ONOCR
, ONOCR
},
5736 { TARGET_ONLRET
, TARGET_ONLRET
, ONLRET
, ONLRET
},
5737 { TARGET_OFILL
, TARGET_OFILL
, OFILL
, OFILL
},
5738 { TARGET_OFDEL
, TARGET_OFDEL
, OFDEL
, OFDEL
},
5739 { TARGET_NLDLY
, TARGET_NL0
, NLDLY
, NL0
},
5740 { TARGET_NLDLY
, TARGET_NL1
, NLDLY
, NL1
},
5741 { TARGET_CRDLY
, TARGET_CR0
, CRDLY
, CR0
},
5742 { TARGET_CRDLY
, TARGET_CR1
, CRDLY
, CR1
},
5743 { TARGET_CRDLY
, TARGET_CR2
, CRDLY
, CR2
},
5744 { TARGET_CRDLY
, TARGET_CR3
, CRDLY
, CR3
},
5745 { TARGET_TABDLY
, TARGET_TAB0
, TABDLY
, TAB0
},
5746 { TARGET_TABDLY
, TARGET_TAB1
, TABDLY
, TAB1
},
5747 { TARGET_TABDLY
, TARGET_TAB2
, TABDLY
, TAB2
},
5748 { TARGET_TABDLY
, TARGET_TAB3
, TABDLY
, TAB3
},
5749 { TARGET_BSDLY
, TARGET_BS0
, BSDLY
, BS0
},
5750 { TARGET_BSDLY
, TARGET_BS1
, BSDLY
, BS1
},
5751 { TARGET_VTDLY
, TARGET_VT0
, VTDLY
, VT0
},
5752 { TARGET_VTDLY
, TARGET_VT1
, VTDLY
, VT1
},
5753 { TARGET_FFDLY
, TARGET_FF0
, FFDLY
, FF0
},
5754 { TARGET_FFDLY
, TARGET_FF1
, FFDLY
, FF1
},
5758 static const bitmask_transtbl cflag_tbl
[] = {
5759 { TARGET_CBAUD
, TARGET_B0
, CBAUD
, B0
},
5760 { TARGET_CBAUD
, TARGET_B50
, CBAUD
, B50
},
5761 { TARGET_CBAUD
, TARGET_B75
, CBAUD
, B75
},
5762 { TARGET_CBAUD
, TARGET_B110
, CBAUD
, B110
},
5763 { TARGET_CBAUD
, TARGET_B134
, CBAUD
, B134
},
5764 { TARGET_CBAUD
, TARGET_B150
, CBAUD
, B150
},
5765 { TARGET_CBAUD
, TARGET_B200
, CBAUD
, B200
},
5766 { TARGET_CBAUD
, TARGET_B300
, CBAUD
, B300
},
5767 { TARGET_CBAUD
, TARGET_B600
, CBAUD
, B600
},
5768 { TARGET_CBAUD
, TARGET_B1200
, CBAUD
, B1200
},
5769 { TARGET_CBAUD
, TARGET_B1800
, CBAUD
, B1800
},
5770 { TARGET_CBAUD
, TARGET_B2400
, CBAUD
, B2400
},
5771 { TARGET_CBAUD
, TARGET_B4800
, CBAUD
, B4800
},
5772 { TARGET_CBAUD
, TARGET_B9600
, CBAUD
, B9600
},
5773 { TARGET_CBAUD
, TARGET_B19200
, CBAUD
, B19200
},
5774 { TARGET_CBAUD
, TARGET_B38400
, CBAUD
, B38400
},
5775 { TARGET_CBAUD
, TARGET_B57600
, CBAUD
, B57600
},
5776 { TARGET_CBAUD
, TARGET_B115200
, CBAUD
, B115200
},
5777 { TARGET_CBAUD
, TARGET_B230400
, CBAUD
, B230400
},
5778 { TARGET_CBAUD
, TARGET_B460800
, CBAUD
, B460800
},
5779 { TARGET_CSIZE
, TARGET_CS5
, CSIZE
, CS5
},
5780 { TARGET_CSIZE
, TARGET_CS6
, CSIZE
, CS6
},
5781 { TARGET_CSIZE
, TARGET_CS7
, CSIZE
, CS7
},
5782 { TARGET_CSIZE
, TARGET_CS8
, CSIZE
, CS8
},
5783 { TARGET_CSTOPB
, TARGET_CSTOPB
, CSTOPB
, CSTOPB
},
5784 { TARGET_CREAD
, TARGET_CREAD
, CREAD
, CREAD
},
5785 { TARGET_PARENB
, TARGET_PARENB
, PARENB
, PARENB
},
5786 { TARGET_PARODD
, TARGET_PARODD
, PARODD
, PARODD
},
5787 { TARGET_HUPCL
, TARGET_HUPCL
, HUPCL
, HUPCL
},
5788 { TARGET_CLOCAL
, TARGET_CLOCAL
, CLOCAL
, CLOCAL
},
5789 { TARGET_CRTSCTS
, TARGET_CRTSCTS
, CRTSCTS
, CRTSCTS
},
5793 static const bitmask_transtbl lflag_tbl
[] = {
5794 { TARGET_ISIG
, TARGET_ISIG
, ISIG
, ISIG
},
5795 { TARGET_ICANON
, TARGET_ICANON
, ICANON
, ICANON
},
5796 { TARGET_XCASE
, TARGET_XCASE
, XCASE
, XCASE
},
5797 { TARGET_ECHO
, TARGET_ECHO
, ECHO
, ECHO
},
5798 { TARGET_ECHOE
, TARGET_ECHOE
, ECHOE
, ECHOE
},
5799 { TARGET_ECHOK
, TARGET_ECHOK
, ECHOK
, ECHOK
},
5800 { TARGET_ECHONL
, TARGET_ECHONL
, ECHONL
, ECHONL
},
5801 { TARGET_NOFLSH
, TARGET_NOFLSH
, NOFLSH
, NOFLSH
},
5802 { TARGET_TOSTOP
, TARGET_TOSTOP
, TOSTOP
, TOSTOP
},
5803 { TARGET_ECHOCTL
, TARGET_ECHOCTL
, ECHOCTL
, ECHOCTL
},
5804 { TARGET_ECHOPRT
, TARGET_ECHOPRT
, ECHOPRT
, ECHOPRT
},
5805 { TARGET_ECHOKE
, TARGET_ECHOKE
, ECHOKE
, ECHOKE
},
5806 { TARGET_FLUSHO
, TARGET_FLUSHO
, FLUSHO
, FLUSHO
},
5807 { TARGET_PENDIN
, TARGET_PENDIN
, PENDIN
, PENDIN
},
5808 { TARGET_IEXTEN
, TARGET_IEXTEN
, IEXTEN
, IEXTEN
},
5812 static void target_to_host_termios (void *dst
, const void *src
)
5814 struct host_termios
*host
= dst
;
5815 const struct target_termios
*target
= src
;
5818 target_to_host_bitmask(tswap32(target
->c_iflag
), iflag_tbl
);
5820 target_to_host_bitmask(tswap32(target
->c_oflag
), oflag_tbl
);
5822 target_to_host_bitmask(tswap32(target
->c_cflag
), cflag_tbl
);
5824 target_to_host_bitmask(tswap32(target
->c_lflag
), lflag_tbl
);
5825 host
->c_line
= target
->c_line
;
5827 memset(host
->c_cc
, 0, sizeof(host
->c_cc
));
5828 host
->c_cc
[VINTR
] = target
->c_cc
[TARGET_VINTR
];
5829 host
->c_cc
[VQUIT
] = target
->c_cc
[TARGET_VQUIT
];
5830 host
->c_cc
[VERASE
] = target
->c_cc
[TARGET_VERASE
];
5831 host
->c_cc
[VKILL
] = target
->c_cc
[TARGET_VKILL
];
5832 host
->c_cc
[VEOF
] = target
->c_cc
[TARGET_VEOF
];
5833 host
->c_cc
[VTIME
] = target
->c_cc
[TARGET_VTIME
];
5834 host
->c_cc
[VMIN
] = target
->c_cc
[TARGET_VMIN
];
5835 host
->c_cc
[VSWTC
] = target
->c_cc
[TARGET_VSWTC
];
5836 host
->c_cc
[VSTART
] = target
->c_cc
[TARGET_VSTART
];
5837 host
->c_cc
[VSTOP
] = target
->c_cc
[TARGET_VSTOP
];
5838 host
->c_cc
[VSUSP
] = target
->c_cc
[TARGET_VSUSP
];
5839 host
->c_cc
[VEOL
] = target
->c_cc
[TARGET_VEOL
];
5840 host
->c_cc
[VREPRINT
] = target
->c_cc
[TARGET_VREPRINT
];
5841 host
->c_cc
[VDISCARD
] = target
->c_cc
[TARGET_VDISCARD
];
5842 host
->c_cc
[VWERASE
] = target
->c_cc
[TARGET_VWERASE
];
5843 host
->c_cc
[VLNEXT
] = target
->c_cc
[TARGET_VLNEXT
];
5844 host
->c_cc
[VEOL2
] = target
->c_cc
[TARGET_VEOL2
];
5847 static void host_to_target_termios (void *dst
, const void *src
)
5849 struct target_termios
*target
= dst
;
5850 const struct host_termios
*host
= src
;
5853 tswap32(host_to_target_bitmask(host
->c_iflag
, iflag_tbl
));
5855 tswap32(host_to_target_bitmask(host
->c_oflag
, oflag_tbl
));
5857 tswap32(host_to_target_bitmask(host
->c_cflag
, cflag_tbl
));
5859 tswap32(host_to_target_bitmask(host
->c_lflag
, lflag_tbl
));
5860 target
->c_line
= host
->c_line
;
5862 memset(target
->c_cc
, 0, sizeof(target
->c_cc
));
5863 target
->c_cc
[TARGET_VINTR
] = host
->c_cc
[VINTR
];
5864 target
->c_cc
[TARGET_VQUIT
] = host
->c_cc
[VQUIT
];
5865 target
->c_cc
[TARGET_VERASE
] = host
->c_cc
[VERASE
];
5866 target
->c_cc
[TARGET_VKILL
] = host
->c_cc
[VKILL
];
5867 target
->c_cc
[TARGET_VEOF
] = host
->c_cc
[VEOF
];
5868 target
->c_cc
[TARGET_VTIME
] = host
->c_cc
[VTIME
];
5869 target
->c_cc
[TARGET_VMIN
] = host
->c_cc
[VMIN
];
5870 target
->c_cc
[TARGET_VSWTC
] = host
->c_cc
[VSWTC
];
5871 target
->c_cc
[TARGET_VSTART
] = host
->c_cc
[VSTART
];
5872 target
->c_cc
[TARGET_VSTOP
] = host
->c_cc
[VSTOP
];
5873 target
->c_cc
[TARGET_VSUSP
] = host
->c_cc
[VSUSP
];
5874 target
->c_cc
[TARGET_VEOL
] = host
->c_cc
[VEOL
];
5875 target
->c_cc
[TARGET_VREPRINT
] = host
->c_cc
[VREPRINT
];
5876 target
->c_cc
[TARGET_VDISCARD
] = host
->c_cc
[VDISCARD
];
5877 target
->c_cc
[TARGET_VWERASE
] = host
->c_cc
[VWERASE
];
5878 target
->c_cc
[TARGET_VLNEXT
] = host
->c_cc
[VLNEXT
];
5879 target
->c_cc
[TARGET_VEOL2
] = host
->c_cc
[VEOL2
];
5882 static const StructEntry struct_termios_def
= {
5883 .convert
= { host_to_target_termios
, target_to_host_termios
},
5884 .size
= { sizeof(struct target_termios
), sizeof(struct host_termios
) },
5885 .align
= { __alignof__(struct target_termios
), __alignof__(struct host_termios
) },
5888 static bitmask_transtbl mmap_flags_tbl
[] = {
5889 { TARGET_MAP_SHARED
, TARGET_MAP_SHARED
, MAP_SHARED
, MAP_SHARED
},
5890 { TARGET_MAP_PRIVATE
, TARGET_MAP_PRIVATE
, MAP_PRIVATE
, MAP_PRIVATE
},
5891 { TARGET_MAP_FIXED
, TARGET_MAP_FIXED
, MAP_FIXED
, MAP_FIXED
},
5892 { TARGET_MAP_ANONYMOUS
, TARGET_MAP_ANONYMOUS
,
5893 MAP_ANONYMOUS
, MAP_ANONYMOUS
},
5894 { TARGET_MAP_GROWSDOWN
, TARGET_MAP_GROWSDOWN
,
5895 MAP_GROWSDOWN
, MAP_GROWSDOWN
},
5896 { TARGET_MAP_DENYWRITE
, TARGET_MAP_DENYWRITE
,
5897 MAP_DENYWRITE
, MAP_DENYWRITE
},
5898 { TARGET_MAP_EXECUTABLE
, TARGET_MAP_EXECUTABLE
,
5899 MAP_EXECUTABLE
, MAP_EXECUTABLE
},
5900 { TARGET_MAP_LOCKED
, TARGET_MAP_LOCKED
, MAP_LOCKED
, MAP_LOCKED
},
5901 { TARGET_MAP_NORESERVE
, TARGET_MAP_NORESERVE
,
5902 MAP_NORESERVE
, MAP_NORESERVE
},
5903 { TARGET_MAP_HUGETLB
, TARGET_MAP_HUGETLB
, MAP_HUGETLB
, MAP_HUGETLB
},
5904 /* MAP_STACK had been ignored by the kernel for quite some time.
5905 Recognize it for the target insofar as we do not want to pass
5906 it through to the host. */
5907 { TARGET_MAP_STACK
, TARGET_MAP_STACK
, 0, 0 },
5911 #if defined(TARGET_I386)
5913 /* NOTE: there is really one LDT for all the threads */
5914 static uint8_t *ldt_table
;
5916 static abi_long
read_ldt(abi_ulong ptr
, unsigned long bytecount
)
5923 size
= TARGET_LDT_ENTRIES
* TARGET_LDT_ENTRY_SIZE
;
5924 if (size
> bytecount
)
5926 p
= lock_user(VERIFY_WRITE
, ptr
, size
, 0);
5928 return -TARGET_EFAULT
;
5929 /* ??? Should this by byteswapped? */
5930 memcpy(p
, ldt_table
, size
);
5931 unlock_user(p
, ptr
, size
);
5935 /* XXX: add locking support */
5936 static abi_long
write_ldt(CPUX86State
*env
,
5937 abi_ulong ptr
, unsigned long bytecount
, int oldmode
)
5939 struct target_modify_ldt_ldt_s ldt_info
;
5940 struct target_modify_ldt_ldt_s
*target_ldt_info
;
5941 int seg_32bit
, contents
, read_exec_only
, limit_in_pages
;
5942 int seg_not_present
, useable
, lm
;
5943 uint32_t *lp
, entry_1
, entry_2
;
5945 if (bytecount
!= sizeof(ldt_info
))
5946 return -TARGET_EINVAL
;
5947 if (!lock_user_struct(VERIFY_READ
, target_ldt_info
, ptr
, 1))
5948 return -TARGET_EFAULT
;
5949 ldt_info
.entry_number
= tswap32(target_ldt_info
->entry_number
);
5950 ldt_info
.base_addr
= tswapal(target_ldt_info
->base_addr
);
5951 ldt_info
.limit
= tswap32(target_ldt_info
->limit
);
5952 ldt_info
.flags
= tswap32(target_ldt_info
->flags
);
5953 unlock_user_struct(target_ldt_info
, ptr
, 0);
5955 if (ldt_info
.entry_number
>= TARGET_LDT_ENTRIES
)
5956 return -TARGET_EINVAL
;
5957 seg_32bit
= ldt_info
.flags
& 1;
5958 contents
= (ldt_info
.flags
>> 1) & 3;
5959 read_exec_only
= (ldt_info
.flags
>> 3) & 1;
5960 limit_in_pages
= (ldt_info
.flags
>> 4) & 1;
5961 seg_not_present
= (ldt_info
.flags
>> 5) & 1;
5962 useable
= (ldt_info
.flags
>> 6) & 1;
5966 lm
= (ldt_info
.flags
>> 7) & 1;
5968 if (contents
== 3) {
5970 return -TARGET_EINVAL
;
5971 if (seg_not_present
== 0)
5972 return -TARGET_EINVAL
;
5974 /* allocate the LDT */
5976 env
->ldt
.base
= target_mmap(0,
5977 TARGET_LDT_ENTRIES
* TARGET_LDT_ENTRY_SIZE
,
5978 PROT_READ
|PROT_WRITE
,
5979 MAP_ANONYMOUS
|MAP_PRIVATE
, -1, 0);
5980 if (env
->ldt
.base
== -1)
5981 return -TARGET_ENOMEM
;
5982 memset(g2h(env
->ldt
.base
), 0,
5983 TARGET_LDT_ENTRIES
* TARGET_LDT_ENTRY_SIZE
);
5984 env
->ldt
.limit
= 0xffff;
5985 ldt_table
= g2h(env
->ldt
.base
);
5988 /* NOTE: same code as Linux kernel */
5989 /* Allow LDTs to be cleared by the user. */
5990 if (ldt_info
.base_addr
== 0 && ldt_info
.limit
== 0) {
5993 read_exec_only
== 1 &&
5995 limit_in_pages
== 0 &&
5996 seg_not_present
== 1 &&
6004 entry_1
= ((ldt_info
.base_addr
& 0x0000ffff) << 16) |
6005 (ldt_info
.limit
& 0x0ffff);
6006 entry_2
= (ldt_info
.base_addr
& 0xff000000) |
6007 ((ldt_info
.base_addr
& 0x00ff0000) >> 16) |
6008 (ldt_info
.limit
& 0xf0000) |
6009 ((read_exec_only
^ 1) << 9) |
6011 ((seg_not_present
^ 1) << 15) |
6013 (limit_in_pages
<< 23) |
6017 entry_2
|= (useable
<< 20);
6019 /* Install the new entry ... */
6021 lp
= (uint32_t *)(ldt_table
+ (ldt_info
.entry_number
<< 3));
6022 lp
[0] = tswap32(entry_1
);
6023 lp
[1] = tswap32(entry_2
);
6027 /* specific and weird i386 syscalls */
6028 static abi_long
do_modify_ldt(CPUX86State
*env
, int func
, abi_ulong ptr
,
6029 unsigned long bytecount
)
6035 ret
= read_ldt(ptr
, bytecount
);
6038 ret
= write_ldt(env
, ptr
, bytecount
, 1);
6041 ret
= write_ldt(env
, ptr
, bytecount
, 0);
6044 ret
= -TARGET_ENOSYS
;
6050 #if defined(TARGET_I386) && defined(TARGET_ABI32)
6051 abi_long
do_set_thread_area(CPUX86State
*env
, abi_ulong ptr
)
6053 uint64_t *gdt_table
= g2h(env
->gdt
.base
);
6054 struct target_modify_ldt_ldt_s ldt_info
;
6055 struct target_modify_ldt_ldt_s
*target_ldt_info
;
6056 int seg_32bit
, contents
, read_exec_only
, limit_in_pages
;
6057 int seg_not_present
, useable
, lm
;
6058 uint32_t *lp
, entry_1
, entry_2
;
6061 lock_user_struct(VERIFY_WRITE
, target_ldt_info
, ptr
, 1);
6062 if (!target_ldt_info
)
6063 return -TARGET_EFAULT
;
6064 ldt_info
.entry_number
= tswap32(target_ldt_info
->entry_number
);
6065 ldt_info
.base_addr
= tswapal(target_ldt_info
->base_addr
);
6066 ldt_info
.limit
= tswap32(target_ldt_info
->limit
);
6067 ldt_info
.flags
= tswap32(target_ldt_info
->flags
);
6068 if (ldt_info
.entry_number
== -1) {
6069 for (i
=TARGET_GDT_ENTRY_TLS_MIN
; i
<=TARGET_GDT_ENTRY_TLS_MAX
; i
++) {
6070 if (gdt_table
[i
] == 0) {
6071 ldt_info
.entry_number
= i
;
6072 target_ldt_info
->entry_number
= tswap32(i
);
6077 unlock_user_struct(target_ldt_info
, ptr
, 1);
6079 if (ldt_info
.entry_number
< TARGET_GDT_ENTRY_TLS_MIN
||
6080 ldt_info
.entry_number
> TARGET_GDT_ENTRY_TLS_MAX
)
6081 return -TARGET_EINVAL
;
6082 seg_32bit
= ldt_info
.flags
& 1;
6083 contents
= (ldt_info
.flags
>> 1) & 3;
6084 read_exec_only
= (ldt_info
.flags
>> 3) & 1;
6085 limit_in_pages
= (ldt_info
.flags
>> 4) & 1;
6086 seg_not_present
= (ldt_info
.flags
>> 5) & 1;
6087 useable
= (ldt_info
.flags
>> 6) & 1;
6091 lm
= (ldt_info
.flags
>> 7) & 1;
6094 if (contents
== 3) {
6095 if (seg_not_present
== 0)
6096 return -TARGET_EINVAL
;
6099 /* NOTE: same code as Linux kernel */
6100 /* Allow LDTs to be cleared by the user. */
6101 if (ldt_info
.base_addr
== 0 && ldt_info
.limit
== 0) {
6102 if ((contents
== 0 &&
6103 read_exec_only
== 1 &&
6105 limit_in_pages
== 0 &&
6106 seg_not_present
== 1 &&
6114 entry_1
= ((ldt_info
.base_addr
& 0x0000ffff) << 16) |
6115 (ldt_info
.limit
& 0x0ffff);
6116 entry_2
= (ldt_info
.base_addr
& 0xff000000) |
6117 ((ldt_info
.base_addr
& 0x00ff0000) >> 16) |
6118 (ldt_info
.limit
& 0xf0000) |
6119 ((read_exec_only
^ 1) << 9) |
6121 ((seg_not_present
^ 1) << 15) |
6123 (limit_in_pages
<< 23) |
6128 /* Install the new entry ... */
6130 lp
= (uint32_t *)(gdt_table
+ ldt_info
.entry_number
);
6131 lp
[0] = tswap32(entry_1
);
6132 lp
[1] = tswap32(entry_2
);
6136 static abi_long
do_get_thread_area(CPUX86State
*env
, abi_ulong ptr
)
6138 struct target_modify_ldt_ldt_s
*target_ldt_info
;
6139 uint64_t *gdt_table
= g2h(env
->gdt
.base
);
6140 uint32_t base_addr
, limit
, flags
;
6141 int seg_32bit
, contents
, read_exec_only
, limit_in_pages
, idx
;
6142 int seg_not_present
, useable
, lm
;
6143 uint32_t *lp
, entry_1
, entry_2
;
6145 lock_user_struct(VERIFY_WRITE
, target_ldt_info
, ptr
, 1);
6146 if (!target_ldt_info
)
6147 return -TARGET_EFAULT
;
6148 idx
= tswap32(target_ldt_info
->entry_number
);
6149 if (idx
< TARGET_GDT_ENTRY_TLS_MIN
||
6150 idx
> TARGET_GDT_ENTRY_TLS_MAX
) {
6151 unlock_user_struct(target_ldt_info
, ptr
, 1);
6152 return -TARGET_EINVAL
;
6154 lp
= (uint32_t *)(gdt_table
+ idx
);
6155 entry_1
= tswap32(lp
[0]);
6156 entry_2
= tswap32(lp
[1]);
6158 read_exec_only
= ((entry_2
>> 9) & 1) ^ 1;
6159 contents
= (entry_2
>> 10) & 3;
6160 seg_not_present
= ((entry_2
>> 15) & 1) ^ 1;
6161 seg_32bit
= (entry_2
>> 22) & 1;
6162 limit_in_pages
= (entry_2
>> 23) & 1;
6163 useable
= (entry_2
>> 20) & 1;
6167 lm
= (entry_2
>> 21) & 1;
6169 flags
= (seg_32bit
<< 0) | (contents
<< 1) |
6170 (read_exec_only
<< 3) | (limit_in_pages
<< 4) |
6171 (seg_not_present
<< 5) | (useable
<< 6) | (lm
<< 7);
6172 limit
= (entry_1
& 0xffff) | (entry_2
& 0xf0000);
6173 base_addr
= (entry_1
>> 16) |
6174 (entry_2
& 0xff000000) |
6175 ((entry_2
& 0xff) << 16);
6176 target_ldt_info
->base_addr
= tswapal(base_addr
);
6177 target_ldt_info
->limit
= tswap32(limit
);
6178 target_ldt_info
->flags
= tswap32(flags
);
6179 unlock_user_struct(target_ldt_info
, ptr
, 1);
6182 #endif /* TARGET_I386 && TARGET_ABI32 */
6184 #ifndef TARGET_ABI32
6185 abi_long
do_arch_prctl(CPUX86State
*env
, int code
, abi_ulong addr
)
6192 case TARGET_ARCH_SET_GS
:
6193 case TARGET_ARCH_SET_FS
:
6194 if (code
== TARGET_ARCH_SET_GS
)
6198 cpu_x86_load_seg(env
, idx
, 0);
6199 env
->segs
[idx
].base
= addr
;
6201 case TARGET_ARCH_GET_GS
:
6202 case TARGET_ARCH_GET_FS
:
6203 if (code
== TARGET_ARCH_GET_GS
)
6207 val
= env
->segs
[idx
].base
;
6208 if (put_user(val
, addr
, abi_ulong
))
6209 ret
= -TARGET_EFAULT
;
6212 ret
= -TARGET_EINVAL
;
6219 #endif /* defined(TARGET_I386) */
6221 #define NEW_STACK_SIZE 0x40000
6224 static pthread_mutex_t clone_lock
= PTHREAD_MUTEX_INITIALIZER
;
6227 pthread_mutex_t mutex
;
6228 pthread_cond_t cond
;
6231 abi_ulong child_tidptr
;
6232 abi_ulong parent_tidptr
;
6236 static void *clone_func(void *arg
)
6238 new_thread_info
*info
= arg
;
6243 rcu_register_thread();
6244 tcg_register_thread();
6246 cpu
= ENV_GET_CPU(env
);
6248 ts
= (TaskState
*)cpu
->opaque
;
6249 info
->tid
= gettid();
6251 if (info
->child_tidptr
)
6252 put_user_u32(info
->tid
, info
->child_tidptr
);
6253 if (info
->parent_tidptr
)
6254 put_user_u32(info
->tid
, info
->parent_tidptr
);
6255 /* Enable signals. */
6256 sigprocmask(SIG_SETMASK
, &info
->sigmask
, NULL
);
6257 /* Signal to the parent that we're ready. */
6258 pthread_mutex_lock(&info
->mutex
);
6259 pthread_cond_broadcast(&info
->cond
);
6260 pthread_mutex_unlock(&info
->mutex
);
6261 /* Wait until the parent has finished initializing the tls state. */
6262 pthread_mutex_lock(&clone_lock
);
6263 pthread_mutex_unlock(&clone_lock
);
6269 /* do_fork() Must return host values and target errnos (unlike most
6270 do_*() functions). */
6271 static int do_fork(CPUArchState
*env
, unsigned int flags
, abi_ulong newsp
,
6272 abi_ulong parent_tidptr
, target_ulong newtls
,
6273 abi_ulong child_tidptr
)
6275 CPUState
*cpu
= ENV_GET_CPU(env
);
6279 CPUArchState
*new_env
;
6282 flags
&= ~CLONE_IGNORED_FLAGS
;
6284 /* Emulate vfork() with fork() */
6285 if (flags
& CLONE_VFORK
)
6286 flags
&= ~(CLONE_VFORK
| CLONE_VM
);
6288 if (flags
& CLONE_VM
) {
6289 TaskState
*parent_ts
= (TaskState
*)cpu
->opaque
;
6290 new_thread_info info
;
6291 pthread_attr_t attr
;
6293 if (((flags
& CLONE_THREAD_FLAGS
) != CLONE_THREAD_FLAGS
) ||
6294 (flags
& CLONE_INVALID_THREAD_FLAGS
)) {
6295 return -TARGET_EINVAL
;
6298 ts
= g_new0(TaskState
, 1);
6299 init_task_state(ts
);
6300 /* we create a new CPU instance. */
6301 new_env
= cpu_copy(env
);
6302 /* Init regs that differ from the parent. */
6303 cpu_clone_regs(new_env
, newsp
);
6304 new_cpu
= ENV_GET_CPU(new_env
);
6305 new_cpu
->opaque
= ts
;
6306 ts
->bprm
= parent_ts
->bprm
;
6307 ts
->info
= parent_ts
->info
;
6308 ts
->signal_mask
= parent_ts
->signal_mask
;
6310 if (flags
& CLONE_CHILD_CLEARTID
) {
6311 ts
->child_tidptr
= child_tidptr
;
6314 if (flags
& CLONE_SETTLS
) {
6315 cpu_set_tls (new_env
, newtls
);
6318 /* Grab a mutex so that thread setup appears atomic. */
6319 pthread_mutex_lock(&clone_lock
);
6321 memset(&info
, 0, sizeof(info
));
6322 pthread_mutex_init(&info
.mutex
, NULL
);
6323 pthread_mutex_lock(&info
.mutex
);
6324 pthread_cond_init(&info
.cond
, NULL
);
6326 if (flags
& CLONE_CHILD_SETTID
) {
6327 info
.child_tidptr
= child_tidptr
;
6329 if (flags
& CLONE_PARENT_SETTID
) {
6330 info
.parent_tidptr
= parent_tidptr
;
6333 ret
= pthread_attr_init(&attr
);
6334 ret
= pthread_attr_setstacksize(&attr
, NEW_STACK_SIZE
);
6335 ret
= pthread_attr_setdetachstate(&attr
, PTHREAD_CREATE_DETACHED
);
6336 /* It is not safe to deliver signals until the child has finished
6337 initializing, so temporarily block all signals. */
6338 sigfillset(&sigmask
);
6339 sigprocmask(SIG_BLOCK
, &sigmask
, &info
.sigmask
);
6341 /* If this is our first additional thread, we need to ensure we
6342 * generate code for parallel execution and flush old translations.
6344 if (!parallel_cpus
) {
6345 parallel_cpus
= true;
6349 ret
= pthread_create(&info
.thread
, &attr
, clone_func
, &info
);
6350 /* TODO: Free new CPU state if thread creation failed. */
6352 sigprocmask(SIG_SETMASK
, &info
.sigmask
, NULL
);
6353 pthread_attr_destroy(&attr
);
6355 /* Wait for the child to initialize. */
6356 pthread_cond_wait(&info
.cond
, &info
.mutex
);
6361 pthread_mutex_unlock(&info
.mutex
);
6362 pthread_cond_destroy(&info
.cond
);
6363 pthread_mutex_destroy(&info
.mutex
);
6364 pthread_mutex_unlock(&clone_lock
);
6366 /* if no CLONE_VM, we consider it is a fork */
6367 if (flags
& CLONE_INVALID_FORK_FLAGS
) {
6368 return -TARGET_EINVAL
;
6371 /* We can't support custom termination signals */
6372 if ((flags
& CSIGNAL
) != TARGET_SIGCHLD
) {
6373 return -TARGET_EINVAL
;
6376 if (block_signals()) {
6377 return -TARGET_ERESTARTSYS
;
6383 /* Child Process. */
6384 cpu_clone_regs(env
, newsp
);
6386 /* There is a race condition here. The parent process could
6387 theoretically read the TID in the child process before the child
6388 tid is set. This would require using either ptrace
6389 (not implemented) or having *_tidptr to point at a shared memory
6390 mapping. We can't repeat the spinlock hack used above because
6391 the child process gets its own copy of the lock. */
6392 if (flags
& CLONE_CHILD_SETTID
)
6393 put_user_u32(gettid(), child_tidptr
);
6394 if (flags
& CLONE_PARENT_SETTID
)
6395 put_user_u32(gettid(), parent_tidptr
);
6396 ts
= (TaskState
*)cpu
->opaque
;
6397 if (flags
& CLONE_SETTLS
)
6398 cpu_set_tls (env
, newtls
);
6399 if (flags
& CLONE_CHILD_CLEARTID
)
6400 ts
->child_tidptr
= child_tidptr
;
6408 /* warning : doesn't handle linux specific flags... */
6409 static int target_to_host_fcntl_cmd(int cmd
)
6412 case TARGET_F_DUPFD
:
6413 case TARGET_F_GETFD
:
6414 case TARGET_F_SETFD
:
6415 case TARGET_F_GETFL
:
6416 case TARGET_F_SETFL
:
6418 case TARGET_F_GETLK
:
6420 case TARGET_F_SETLK
:
6422 case TARGET_F_SETLKW
:
6424 case TARGET_F_GETOWN
:
6426 case TARGET_F_SETOWN
:
6428 case TARGET_F_GETSIG
:
6430 case TARGET_F_SETSIG
:
6432 #if TARGET_ABI_BITS == 32
6433 case TARGET_F_GETLK64
:
6435 case TARGET_F_SETLK64
:
6437 case TARGET_F_SETLKW64
:
6440 case TARGET_F_SETLEASE
:
6442 case TARGET_F_GETLEASE
:
6444 #ifdef F_DUPFD_CLOEXEC
6445 case TARGET_F_DUPFD_CLOEXEC
:
6446 return F_DUPFD_CLOEXEC
;
6448 case TARGET_F_NOTIFY
:
6451 case TARGET_F_GETOWN_EX
:
6455 case TARGET_F_SETOWN_EX
:
6459 case TARGET_F_SETPIPE_SZ
:
6460 return F_SETPIPE_SZ
;
6461 case TARGET_F_GETPIPE_SZ
:
6462 return F_GETPIPE_SZ
;
6465 return -TARGET_EINVAL
;
6467 return -TARGET_EINVAL
;
6470 #define TRANSTBL_CONVERT(a) { -1, TARGET_##a, -1, a }
6471 static const bitmask_transtbl flock_tbl
[] = {
6472 TRANSTBL_CONVERT(F_RDLCK
),
6473 TRANSTBL_CONVERT(F_WRLCK
),
6474 TRANSTBL_CONVERT(F_UNLCK
),
6475 TRANSTBL_CONVERT(F_EXLCK
),
6476 TRANSTBL_CONVERT(F_SHLCK
),
6480 static inline abi_long
copy_from_user_flock(struct flock64
*fl
,
6481 abi_ulong target_flock_addr
)
6483 struct target_flock
*target_fl
;
6486 if (!lock_user_struct(VERIFY_READ
, target_fl
, target_flock_addr
, 1)) {
6487 return -TARGET_EFAULT
;
6490 __get_user(l_type
, &target_fl
->l_type
);
6491 fl
->l_type
= target_to_host_bitmask(l_type
, flock_tbl
);
6492 __get_user(fl
->l_whence
, &target_fl
->l_whence
);
6493 __get_user(fl
->l_start
, &target_fl
->l_start
);
6494 __get_user(fl
->l_len
, &target_fl
->l_len
);
6495 __get_user(fl
->l_pid
, &target_fl
->l_pid
);
6496 unlock_user_struct(target_fl
, target_flock_addr
, 0);
6500 static inline abi_long
copy_to_user_flock(abi_ulong target_flock_addr
,
6501 const struct flock64
*fl
)
6503 struct target_flock
*target_fl
;
6506 if (!lock_user_struct(VERIFY_WRITE
, target_fl
, target_flock_addr
, 0)) {
6507 return -TARGET_EFAULT
;
6510 l_type
= host_to_target_bitmask(fl
->l_type
, flock_tbl
);
6511 __put_user(l_type
, &target_fl
->l_type
);
6512 __put_user(fl
->l_whence
, &target_fl
->l_whence
);
6513 __put_user(fl
->l_start
, &target_fl
->l_start
);
6514 __put_user(fl
->l_len
, &target_fl
->l_len
);
6515 __put_user(fl
->l_pid
, &target_fl
->l_pid
);
6516 unlock_user_struct(target_fl
, target_flock_addr
, 1);
6520 typedef abi_long
from_flock64_fn(struct flock64
*fl
, abi_ulong target_addr
);
6521 typedef abi_long
to_flock64_fn(abi_ulong target_addr
, const struct flock64
*fl
);
6523 #if defined(TARGET_ARM) && TARGET_ABI_BITS == 32
6524 static inline abi_long
copy_from_user_eabi_flock64(struct flock64
*fl
,
6525 abi_ulong target_flock_addr
)
6527 struct target_eabi_flock64
*target_fl
;
6530 if (!lock_user_struct(VERIFY_READ
, target_fl
, target_flock_addr
, 1)) {
6531 return -TARGET_EFAULT
;
6534 __get_user(l_type
, &target_fl
->l_type
);
6535 fl
->l_type
= target_to_host_bitmask(l_type
, flock_tbl
);
6536 __get_user(fl
->l_whence
, &target_fl
->l_whence
);
6537 __get_user(fl
->l_start
, &target_fl
->l_start
);
6538 __get_user(fl
->l_len
, &target_fl
->l_len
);
6539 __get_user(fl
->l_pid
, &target_fl
->l_pid
);
6540 unlock_user_struct(target_fl
, target_flock_addr
, 0);
6544 static inline abi_long
copy_to_user_eabi_flock64(abi_ulong target_flock_addr
,
6545 const struct flock64
*fl
)
6547 struct target_eabi_flock64
*target_fl
;
6550 if (!lock_user_struct(VERIFY_WRITE
, target_fl
, target_flock_addr
, 0)) {
6551 return -TARGET_EFAULT
;
6554 l_type
= host_to_target_bitmask(fl
->l_type
, flock_tbl
);
6555 __put_user(l_type
, &target_fl
->l_type
);
6556 __put_user(fl
->l_whence
, &target_fl
->l_whence
);
6557 __put_user(fl
->l_start
, &target_fl
->l_start
);
6558 __put_user(fl
->l_len
, &target_fl
->l_len
);
6559 __put_user(fl
->l_pid
, &target_fl
->l_pid
);
6560 unlock_user_struct(target_fl
, target_flock_addr
, 1);
6565 static inline abi_long
copy_from_user_flock64(struct flock64
*fl
,
6566 abi_ulong target_flock_addr
)
6568 struct target_flock64
*target_fl
;
6571 if (!lock_user_struct(VERIFY_READ
, target_fl
, target_flock_addr
, 1)) {
6572 return -TARGET_EFAULT
;
6575 __get_user(l_type
, &target_fl
->l_type
);
6576 fl
->l_type
= target_to_host_bitmask(l_type
, flock_tbl
);
6577 __get_user(fl
->l_whence
, &target_fl
->l_whence
);
6578 __get_user(fl
->l_start
, &target_fl
->l_start
);
6579 __get_user(fl
->l_len
, &target_fl
->l_len
);
6580 __get_user(fl
->l_pid
, &target_fl
->l_pid
);
6581 unlock_user_struct(target_fl
, target_flock_addr
, 0);
6585 static inline abi_long
copy_to_user_flock64(abi_ulong target_flock_addr
,
6586 const struct flock64
*fl
)
6588 struct target_flock64
*target_fl
;
6591 if (!lock_user_struct(VERIFY_WRITE
, target_fl
, target_flock_addr
, 0)) {
6592 return -TARGET_EFAULT
;
6595 l_type
= host_to_target_bitmask(fl
->l_type
, flock_tbl
);
6596 __put_user(l_type
, &target_fl
->l_type
);
6597 __put_user(fl
->l_whence
, &target_fl
->l_whence
);
6598 __put_user(fl
->l_start
, &target_fl
->l_start
);
6599 __put_user(fl
->l_len
, &target_fl
->l_len
);
6600 __put_user(fl
->l_pid
, &target_fl
->l_pid
);
6601 unlock_user_struct(target_fl
, target_flock_addr
, 1);
6605 static abi_long
do_fcntl(int fd
, int cmd
, abi_ulong arg
)
6607 struct flock64 fl64
;
6609 struct f_owner_ex fox
;
6610 struct target_f_owner_ex
*target_fox
;
6613 int host_cmd
= target_to_host_fcntl_cmd(cmd
);
6615 if (host_cmd
== -TARGET_EINVAL
)
6619 case TARGET_F_GETLK
:
6620 ret
= copy_from_user_flock(&fl64
, arg
);
6624 ret
= get_errno(safe_fcntl(fd
, host_cmd
, &fl64
));
6626 ret
= copy_to_user_flock(arg
, &fl64
);
6630 case TARGET_F_SETLK
:
6631 case TARGET_F_SETLKW
:
6632 ret
= copy_from_user_flock(&fl64
, arg
);
6636 ret
= get_errno(safe_fcntl(fd
, host_cmd
, &fl64
));
6639 case TARGET_F_GETLK64
:
6640 ret
= copy_from_user_flock64(&fl64
, arg
);
6644 ret
= get_errno(safe_fcntl(fd
, host_cmd
, &fl64
));
6646 ret
= copy_to_user_flock64(arg
, &fl64
);
6649 case TARGET_F_SETLK64
:
6650 case TARGET_F_SETLKW64
:
6651 ret
= copy_from_user_flock64(&fl64
, arg
);
6655 ret
= get_errno(safe_fcntl(fd
, host_cmd
, &fl64
));
6658 case TARGET_F_GETFL
:
6659 ret
= get_errno(safe_fcntl(fd
, host_cmd
, arg
));
6661 ret
= host_to_target_bitmask(ret
, fcntl_flags_tbl
);
6665 case TARGET_F_SETFL
:
6666 ret
= get_errno(safe_fcntl(fd
, host_cmd
,
6667 target_to_host_bitmask(arg
,
6672 case TARGET_F_GETOWN_EX
:
6673 ret
= get_errno(safe_fcntl(fd
, host_cmd
, &fox
));
6675 if (!lock_user_struct(VERIFY_WRITE
, target_fox
, arg
, 0))
6676 return -TARGET_EFAULT
;
6677 target_fox
->type
= tswap32(fox
.type
);
6678 target_fox
->pid
= tswap32(fox
.pid
);
6679 unlock_user_struct(target_fox
, arg
, 1);
6685 case TARGET_F_SETOWN_EX
:
6686 if (!lock_user_struct(VERIFY_READ
, target_fox
, arg
, 1))
6687 return -TARGET_EFAULT
;
6688 fox
.type
= tswap32(target_fox
->type
);
6689 fox
.pid
= tswap32(target_fox
->pid
);
6690 unlock_user_struct(target_fox
, arg
, 0);
6691 ret
= get_errno(safe_fcntl(fd
, host_cmd
, &fox
));
6695 case TARGET_F_SETOWN
:
6696 case TARGET_F_GETOWN
:
6697 case TARGET_F_SETSIG
:
6698 case TARGET_F_GETSIG
:
6699 case TARGET_F_SETLEASE
:
6700 case TARGET_F_GETLEASE
:
6701 case TARGET_F_SETPIPE_SZ
:
6702 case TARGET_F_GETPIPE_SZ
:
6703 ret
= get_errno(safe_fcntl(fd
, host_cmd
, arg
));
6707 ret
= get_errno(safe_fcntl(fd
, cmd
, arg
));
6715 static inline int high2lowuid(int uid
)
6723 static inline int high2lowgid(int gid
)
6731 static inline int low2highuid(int uid
)
6733 if ((int16_t)uid
== -1)
6739 static inline int low2highgid(int gid
)
6741 if ((int16_t)gid
== -1)
6746 static inline int tswapid(int id
)
6751 #define put_user_id(x, gaddr) put_user_u16(x, gaddr)
6753 #else /* !USE_UID16 */
6754 static inline int high2lowuid(int uid
)
6758 static inline int high2lowgid(int gid
)
6762 static inline int low2highuid(int uid
)
6766 static inline int low2highgid(int gid
)
6770 static inline int tswapid(int id
)
6775 #define put_user_id(x, gaddr) put_user_u32(x, gaddr)
6777 #endif /* USE_UID16 */
6779 /* We must do direct syscalls for setting UID/GID, because we want to
6780 * implement the Linux system call semantics of "change only for this thread",
6781 * not the libc/POSIX semantics of "change for all threads in process".
6782 * (See http://ewontfix.com/17/ for more details.)
6783 * We use the 32-bit version of the syscalls if present; if it is not
6784 * then either the host architecture supports 32-bit UIDs natively with
6785 * the standard syscall, or the 16-bit UID is the best we can do.
6787 #ifdef __NR_setuid32
6788 #define __NR_sys_setuid __NR_setuid32
6790 #define __NR_sys_setuid __NR_setuid
6792 #ifdef __NR_setgid32
6793 #define __NR_sys_setgid __NR_setgid32
6795 #define __NR_sys_setgid __NR_setgid
6797 #ifdef __NR_setresuid32
6798 #define __NR_sys_setresuid __NR_setresuid32
6800 #define __NR_sys_setresuid __NR_setresuid
6802 #ifdef __NR_setresgid32
6803 #define __NR_sys_setresgid __NR_setresgid32
6805 #define __NR_sys_setresgid __NR_setresgid
6808 _syscall1(int, sys_setuid
, uid_t
, uid
)
6809 _syscall1(int, sys_setgid
, gid_t
, gid
)
6810 _syscall3(int, sys_setresuid
, uid_t
, ruid
, uid_t
, euid
, uid_t
, suid
)
6811 _syscall3(int, sys_setresgid
, gid_t
, rgid
, gid_t
, egid
, gid_t
, sgid
)
6813 void syscall_init(void)
6816 const argtype
*arg_type
;
6820 thunk_init(STRUCT_MAX
);
6822 #define STRUCT(name, ...) thunk_register_struct(STRUCT_ ## name, #name, struct_ ## name ## _def);
6823 #define STRUCT_SPECIAL(name) thunk_register_struct_direct(STRUCT_ ## name, #name, &struct_ ## name ## _def);
6824 #include "syscall_types.h"
6826 #undef STRUCT_SPECIAL
6828 /* Build target_to_host_errno_table[] table from
6829 * host_to_target_errno_table[]. */
6830 for (i
= 0; i
< ERRNO_TABLE_SIZE
; i
++) {
6831 target_to_host_errno_table
[host_to_target_errno_table
[i
]] = i
;
6834 /* we patch the ioctl size if necessary. We rely on the fact that
6835 no ioctl has all the bits at '1' in the size field */
6837 while (ie
->target_cmd
!= 0) {
6838 if (((ie
->target_cmd
>> TARGET_IOC_SIZESHIFT
) & TARGET_IOC_SIZEMASK
) ==
6839 TARGET_IOC_SIZEMASK
) {
6840 arg_type
= ie
->arg_type
;
6841 if (arg_type
[0] != TYPE_PTR
) {
6842 fprintf(stderr
, "cannot patch size for ioctl 0x%x\n",
6847 size
= thunk_type_size(arg_type
, 0);
6848 ie
->target_cmd
= (ie
->target_cmd
&
6849 ~(TARGET_IOC_SIZEMASK
<< TARGET_IOC_SIZESHIFT
)) |
6850 (size
<< TARGET_IOC_SIZESHIFT
);
6853 /* automatic consistency check if same arch */
6854 #if (defined(__i386__) && defined(TARGET_I386) && defined(TARGET_ABI32)) || \
6855 (defined(__x86_64__) && defined(TARGET_X86_64))
6856 if (unlikely(ie
->target_cmd
!= ie
->host_cmd
)) {
6857 fprintf(stderr
, "ERROR: ioctl(%s): target=0x%x host=0x%x\n",
6858 ie
->name
, ie
->target_cmd
, ie
->host_cmd
);
6865 #if TARGET_ABI_BITS == 32
6866 static inline uint64_t target_offset64(uint32_t word0
, uint32_t word1
)
6868 #ifdef TARGET_WORDS_BIGENDIAN
6869 return ((uint64_t)word0
<< 32) | word1
;
6871 return ((uint64_t)word1
<< 32) | word0
;
6874 #else /* TARGET_ABI_BITS == 32 */
6875 static inline uint64_t target_offset64(uint64_t word0
, uint64_t word1
)
6879 #endif /* TARGET_ABI_BITS != 32 */
6881 #ifdef TARGET_NR_truncate64
6882 static inline abi_long
target_truncate64(void *cpu_env
, const char *arg1
,
6887 if (regpairs_aligned(cpu_env
, TARGET_NR_truncate64
)) {
6891 return get_errno(truncate64(arg1
, target_offset64(arg2
, arg3
)));
6895 #ifdef TARGET_NR_ftruncate64
6896 static inline abi_long
target_ftruncate64(void *cpu_env
, abi_long arg1
,
6901 if (regpairs_aligned(cpu_env
, TARGET_NR_ftruncate64
)) {
6905 return get_errno(ftruncate64(arg1
, target_offset64(arg2
, arg3
)));
6909 static inline abi_long
target_to_host_timespec(struct timespec
*host_ts
,
6910 abi_ulong target_addr
)
6912 struct target_timespec
*target_ts
;
6914 if (!lock_user_struct(VERIFY_READ
, target_ts
, target_addr
, 1))
6915 return -TARGET_EFAULT
;
6916 __get_user(host_ts
->tv_sec
, &target_ts
->tv_sec
);
6917 __get_user(host_ts
->tv_nsec
, &target_ts
->tv_nsec
);
6918 unlock_user_struct(target_ts
, target_addr
, 0);
6922 static inline abi_long
host_to_target_timespec(abi_ulong target_addr
,
6923 struct timespec
*host_ts
)
6925 struct target_timespec
*target_ts
;
6927 if (!lock_user_struct(VERIFY_WRITE
, target_ts
, target_addr
, 0))
6928 return -TARGET_EFAULT
;
6929 __put_user(host_ts
->tv_sec
, &target_ts
->tv_sec
);
6930 __put_user(host_ts
->tv_nsec
, &target_ts
->tv_nsec
);
6931 unlock_user_struct(target_ts
, target_addr
, 1);
6935 static inline abi_long
target_to_host_itimerspec(struct itimerspec
*host_itspec
,
6936 abi_ulong target_addr
)
6938 struct target_itimerspec
*target_itspec
;
6940 if (!lock_user_struct(VERIFY_READ
, target_itspec
, target_addr
, 1)) {
6941 return -TARGET_EFAULT
;
6944 host_itspec
->it_interval
.tv_sec
=
6945 tswapal(target_itspec
->it_interval
.tv_sec
);
6946 host_itspec
->it_interval
.tv_nsec
=
6947 tswapal(target_itspec
->it_interval
.tv_nsec
);
6948 host_itspec
->it_value
.tv_sec
= tswapal(target_itspec
->it_value
.tv_sec
);
6949 host_itspec
->it_value
.tv_nsec
= tswapal(target_itspec
->it_value
.tv_nsec
);
6951 unlock_user_struct(target_itspec
, target_addr
, 1);
6955 static inline abi_long
host_to_target_itimerspec(abi_ulong target_addr
,
6956 struct itimerspec
*host_its
)
6958 struct target_itimerspec
*target_itspec
;
6960 if (!lock_user_struct(VERIFY_WRITE
, target_itspec
, target_addr
, 0)) {
6961 return -TARGET_EFAULT
;
6964 target_itspec
->it_interval
.tv_sec
= tswapal(host_its
->it_interval
.tv_sec
);
6965 target_itspec
->it_interval
.tv_nsec
= tswapal(host_its
->it_interval
.tv_nsec
);
6967 target_itspec
->it_value
.tv_sec
= tswapal(host_its
->it_value
.tv_sec
);
6968 target_itspec
->it_value
.tv_nsec
= tswapal(host_its
->it_value
.tv_nsec
);
6970 unlock_user_struct(target_itspec
, target_addr
, 0);
6974 static inline abi_long
target_to_host_timex(struct timex
*host_tx
,
6975 abi_long target_addr
)
6977 struct target_timex
*target_tx
;
6979 if (!lock_user_struct(VERIFY_READ
, target_tx
, target_addr
, 1)) {
6980 return -TARGET_EFAULT
;
6983 __get_user(host_tx
->modes
, &target_tx
->modes
);
6984 __get_user(host_tx
->offset
, &target_tx
->offset
);
6985 __get_user(host_tx
->freq
, &target_tx
->freq
);
6986 __get_user(host_tx
->maxerror
, &target_tx
->maxerror
);
6987 __get_user(host_tx
->esterror
, &target_tx
->esterror
);
6988 __get_user(host_tx
->status
, &target_tx
->status
);
6989 __get_user(host_tx
->constant
, &target_tx
->constant
);
6990 __get_user(host_tx
->precision
, &target_tx
->precision
);
6991 __get_user(host_tx
->tolerance
, &target_tx
->tolerance
);
6992 __get_user(host_tx
->time
.tv_sec
, &target_tx
->time
.tv_sec
);
6993 __get_user(host_tx
->time
.tv_usec
, &target_tx
->time
.tv_usec
);
6994 __get_user(host_tx
->tick
, &target_tx
->tick
);
6995 __get_user(host_tx
->ppsfreq
, &target_tx
->ppsfreq
);
6996 __get_user(host_tx
->jitter
, &target_tx
->jitter
);
6997 __get_user(host_tx
->shift
, &target_tx
->shift
);
6998 __get_user(host_tx
->stabil
, &target_tx
->stabil
);
6999 __get_user(host_tx
->jitcnt
, &target_tx
->jitcnt
);
7000 __get_user(host_tx
->calcnt
, &target_tx
->calcnt
);
7001 __get_user(host_tx
->errcnt
, &target_tx
->errcnt
);
7002 __get_user(host_tx
->stbcnt
, &target_tx
->stbcnt
);
7003 __get_user(host_tx
->tai
, &target_tx
->tai
);
7005 unlock_user_struct(target_tx
, target_addr
, 0);
7009 static inline abi_long
host_to_target_timex(abi_long target_addr
,
7010 struct timex
*host_tx
)
7012 struct target_timex
*target_tx
;
7014 if (!lock_user_struct(VERIFY_WRITE
, target_tx
, target_addr
, 0)) {
7015 return -TARGET_EFAULT
;
7018 __put_user(host_tx
->modes
, &target_tx
->modes
);
7019 __put_user(host_tx
->offset
, &target_tx
->offset
);
7020 __put_user(host_tx
->freq
, &target_tx
->freq
);
7021 __put_user(host_tx
->maxerror
, &target_tx
->maxerror
);
7022 __put_user(host_tx
->esterror
, &target_tx
->esterror
);
7023 __put_user(host_tx
->status
, &target_tx
->status
);
7024 __put_user(host_tx
->constant
, &target_tx
->constant
);
7025 __put_user(host_tx
->precision
, &target_tx
->precision
);
7026 __put_user(host_tx
->tolerance
, &target_tx
->tolerance
);
7027 __put_user(host_tx
->time
.tv_sec
, &target_tx
->time
.tv_sec
);
7028 __put_user(host_tx
->time
.tv_usec
, &target_tx
->time
.tv_usec
);
7029 __put_user(host_tx
->tick
, &target_tx
->tick
);
7030 __put_user(host_tx
->ppsfreq
, &target_tx
->ppsfreq
);
7031 __put_user(host_tx
->jitter
, &target_tx
->jitter
);
7032 __put_user(host_tx
->shift
, &target_tx
->shift
);
7033 __put_user(host_tx
->stabil
, &target_tx
->stabil
);
7034 __put_user(host_tx
->jitcnt
, &target_tx
->jitcnt
);
7035 __put_user(host_tx
->calcnt
, &target_tx
->calcnt
);
7036 __put_user(host_tx
->errcnt
, &target_tx
->errcnt
);
7037 __put_user(host_tx
->stbcnt
, &target_tx
->stbcnt
);
7038 __put_user(host_tx
->tai
, &target_tx
->tai
);
7040 unlock_user_struct(target_tx
, target_addr
, 1);
7045 static inline abi_long
target_to_host_sigevent(struct sigevent
*host_sevp
,
7046 abi_ulong target_addr
)
7048 struct target_sigevent
*target_sevp
;
7050 if (!lock_user_struct(VERIFY_READ
, target_sevp
, target_addr
, 1)) {
7051 return -TARGET_EFAULT
;
7054 /* This union is awkward on 64 bit systems because it has a 32 bit
7055 * integer and a pointer in it; we follow the conversion approach
7056 * used for handling sigval types in signal.c so the guest should get
7057 * the correct value back even if we did a 64 bit byteswap and it's
7058 * using the 32 bit integer.
7060 host_sevp
->sigev_value
.sival_ptr
=
7061 (void *)(uintptr_t)tswapal(target_sevp
->sigev_value
.sival_ptr
);
7062 host_sevp
->sigev_signo
=
7063 target_to_host_signal(tswap32(target_sevp
->sigev_signo
));
7064 host_sevp
->sigev_notify
= tswap32(target_sevp
->sigev_notify
);
7065 host_sevp
->_sigev_un
._tid
= tswap32(target_sevp
->_sigev_un
._tid
);
7067 unlock_user_struct(target_sevp
, target_addr
, 1);
7071 #if defined(TARGET_NR_mlockall)
7072 static inline int target_to_host_mlockall_arg(int arg
)
7076 if (arg
& TARGET_MLOCKALL_MCL_CURRENT
) {
7077 result
|= MCL_CURRENT
;
7079 if (arg
& TARGET_MLOCKALL_MCL_FUTURE
) {
7080 result
|= MCL_FUTURE
;
7086 static inline abi_long
host_to_target_stat64(void *cpu_env
,
7087 abi_ulong target_addr
,
7088 struct stat
*host_st
)
7090 #if defined(TARGET_ARM) && defined(TARGET_ABI32)
7091 if (((CPUARMState
*)cpu_env
)->eabi
) {
7092 struct target_eabi_stat64
*target_st
;
7094 if (!lock_user_struct(VERIFY_WRITE
, target_st
, target_addr
, 0))
7095 return -TARGET_EFAULT
;
7096 memset(target_st
, 0, sizeof(struct target_eabi_stat64
));
7097 __put_user(host_st
->st_dev
, &target_st
->st_dev
);
7098 __put_user(host_st
->st_ino
, &target_st
->st_ino
);
7099 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
7100 __put_user(host_st
->st_ino
, &target_st
->__st_ino
);
7102 __put_user(host_st
->st_mode
, &target_st
->st_mode
);
7103 __put_user(host_st
->st_nlink
, &target_st
->st_nlink
);
7104 __put_user(host_st
->st_uid
, &target_st
->st_uid
);
7105 __put_user(host_st
->st_gid
, &target_st
->st_gid
);
7106 __put_user(host_st
->st_rdev
, &target_st
->st_rdev
);
7107 __put_user(host_st
->st_size
, &target_st
->st_size
);
7108 __put_user(host_st
->st_blksize
, &target_st
->st_blksize
);
7109 __put_user(host_st
->st_blocks
, &target_st
->st_blocks
);
7110 __put_user(host_st
->st_atime
, &target_st
->target_st_atime
);
7111 __put_user(host_st
->st_mtime
, &target_st
->target_st_mtime
);
7112 __put_user(host_st
->st_ctime
, &target_st
->target_st_ctime
);
7113 unlock_user_struct(target_st
, target_addr
, 1);
7117 #if defined(TARGET_HAS_STRUCT_STAT64)
7118 struct target_stat64
*target_st
;
7120 struct target_stat
*target_st
;
7123 if (!lock_user_struct(VERIFY_WRITE
, target_st
, target_addr
, 0))
7124 return -TARGET_EFAULT
;
7125 memset(target_st
, 0, sizeof(*target_st
));
7126 __put_user(host_st
->st_dev
, &target_st
->st_dev
);
7127 __put_user(host_st
->st_ino
, &target_st
->st_ino
);
7128 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
7129 __put_user(host_st
->st_ino
, &target_st
->__st_ino
);
7131 __put_user(host_st
->st_mode
, &target_st
->st_mode
);
7132 __put_user(host_st
->st_nlink
, &target_st
->st_nlink
);
7133 __put_user(host_st
->st_uid
, &target_st
->st_uid
);
7134 __put_user(host_st
->st_gid
, &target_st
->st_gid
);
7135 __put_user(host_st
->st_rdev
, &target_st
->st_rdev
);
7136 /* XXX: better use of kernel struct */
7137 __put_user(host_st
->st_size
, &target_st
->st_size
);
7138 __put_user(host_st
->st_blksize
, &target_st
->st_blksize
);
7139 __put_user(host_st
->st_blocks
, &target_st
->st_blocks
);
7140 __put_user(host_st
->st_atime
, &target_st
->target_st_atime
);
7141 __put_user(host_st
->st_mtime
, &target_st
->target_st_mtime
);
7142 __put_user(host_st
->st_ctime
, &target_st
->target_st_ctime
);
7143 unlock_user_struct(target_st
, target_addr
, 1);
7149 /* ??? Using host futex calls even when target atomic operations
7150 are not really atomic probably breaks things. However implementing
7151 futexes locally would make futexes shared between multiple processes
7152 tricky. However they're probably useless because guest atomic
7153 operations won't work either. */
7154 static int do_futex(target_ulong uaddr
, int op
, int val
, target_ulong timeout
,
7155 target_ulong uaddr2
, int val3
)
7157 struct timespec ts
, *pts
;
7160 /* ??? We assume FUTEX_* constants are the same on both host
7162 #ifdef FUTEX_CMD_MASK
7163 base_op
= op
& FUTEX_CMD_MASK
;
7169 case FUTEX_WAIT_BITSET
:
7172 target_to_host_timespec(pts
, timeout
);
7176 return get_errno(safe_futex(g2h(uaddr
), op
, tswap32(val
),
7179 return get_errno(safe_futex(g2h(uaddr
), op
, val
, NULL
, NULL
, 0));
7181 return get_errno(safe_futex(g2h(uaddr
), op
, val
, NULL
, NULL
, 0));
7183 case FUTEX_CMP_REQUEUE
:
7185 /* For FUTEX_REQUEUE, FUTEX_CMP_REQUEUE, and FUTEX_WAKE_OP, the
7186 TIMEOUT parameter is interpreted as a uint32_t by the kernel.
7187 But the prototype takes a `struct timespec *'; insert casts
7188 to satisfy the compiler. We do not need to tswap TIMEOUT
7189 since it's not compared to guest memory. */
7190 pts
= (struct timespec
*)(uintptr_t) timeout
;
7191 return get_errno(safe_futex(g2h(uaddr
), op
, val
, pts
,
7193 (base_op
== FUTEX_CMP_REQUEUE
7197 return -TARGET_ENOSYS
;
7200 #if defined(TARGET_NR_name_to_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
7201 static abi_long
do_name_to_handle_at(abi_long dirfd
, abi_long pathname
,
7202 abi_long handle
, abi_long mount_id
,
7205 struct file_handle
*target_fh
;
7206 struct file_handle
*fh
;
7210 unsigned int size
, total_size
;
7212 if (get_user_s32(size
, handle
)) {
7213 return -TARGET_EFAULT
;
7216 name
= lock_user_string(pathname
);
7218 return -TARGET_EFAULT
;
7221 total_size
= sizeof(struct file_handle
) + size
;
7222 target_fh
= lock_user(VERIFY_WRITE
, handle
, total_size
, 0);
7224 unlock_user(name
, pathname
, 0);
7225 return -TARGET_EFAULT
;
7228 fh
= g_malloc0(total_size
);
7229 fh
->handle_bytes
= size
;
7231 ret
= get_errno(name_to_handle_at(dirfd
, path(name
), fh
, &mid
, flags
));
7232 unlock_user(name
, pathname
, 0);
7234 /* man name_to_handle_at(2):
7235 * Other than the use of the handle_bytes field, the caller should treat
7236 * the file_handle structure as an opaque data type
7239 memcpy(target_fh
, fh
, total_size
);
7240 target_fh
->handle_bytes
= tswap32(fh
->handle_bytes
);
7241 target_fh
->handle_type
= tswap32(fh
->handle_type
);
7243 unlock_user(target_fh
, handle
, total_size
);
7245 if (put_user_s32(mid
, mount_id
)) {
7246 return -TARGET_EFAULT
;
7254 #if defined(TARGET_NR_open_by_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
7255 static abi_long
do_open_by_handle_at(abi_long mount_fd
, abi_long handle
,
7258 struct file_handle
*target_fh
;
7259 struct file_handle
*fh
;
7260 unsigned int size
, total_size
;
7263 if (get_user_s32(size
, handle
)) {
7264 return -TARGET_EFAULT
;
7267 total_size
= sizeof(struct file_handle
) + size
;
7268 target_fh
= lock_user(VERIFY_READ
, handle
, total_size
, 1);
7270 return -TARGET_EFAULT
;
7273 fh
= g_memdup(target_fh
, total_size
);
7274 fh
->handle_bytes
= size
;
7275 fh
->handle_type
= tswap32(target_fh
->handle_type
);
7277 ret
= get_errno(open_by_handle_at(mount_fd
, fh
,
7278 target_to_host_bitmask(flags
, fcntl_flags_tbl
)));
7282 unlock_user(target_fh
, handle
, total_size
);
7288 #if defined(TARGET_NR_signalfd) || defined(TARGET_NR_signalfd4)
7290 /* signalfd siginfo conversion */
7293 host_to_target_signalfd_siginfo(struct signalfd_siginfo
*tinfo
,
7294 const struct signalfd_siginfo
*info
)
7296 int sig
= host_to_target_signal(info
->ssi_signo
);
7298 /* linux/signalfd.h defines a ssi_addr_lsb
7299 * not defined in sys/signalfd.h but used by some kernels
7302 #ifdef BUS_MCEERR_AO
7303 if (tinfo
->ssi_signo
== SIGBUS
&&
7304 (tinfo
->ssi_code
== BUS_MCEERR_AR
||
7305 tinfo
->ssi_code
== BUS_MCEERR_AO
)) {
7306 uint16_t *ssi_addr_lsb
= (uint16_t *)(&info
->ssi_addr
+ 1);
7307 uint16_t *tssi_addr_lsb
= (uint16_t *)(&tinfo
->ssi_addr
+ 1);
7308 *tssi_addr_lsb
= tswap16(*ssi_addr_lsb
);
7312 tinfo
->ssi_signo
= tswap32(sig
);
7313 tinfo
->ssi_errno
= tswap32(tinfo
->ssi_errno
);
7314 tinfo
->ssi_code
= tswap32(info
->ssi_code
);
7315 tinfo
->ssi_pid
= tswap32(info
->ssi_pid
);
7316 tinfo
->ssi_uid
= tswap32(info
->ssi_uid
);
7317 tinfo
->ssi_fd
= tswap32(info
->ssi_fd
);
7318 tinfo
->ssi_tid
= tswap32(info
->ssi_tid
);
7319 tinfo
->ssi_band
= tswap32(info
->ssi_band
);
7320 tinfo
->ssi_overrun
= tswap32(info
->ssi_overrun
);
7321 tinfo
->ssi_trapno
= tswap32(info
->ssi_trapno
);
7322 tinfo
->ssi_status
= tswap32(info
->ssi_status
);
7323 tinfo
->ssi_int
= tswap32(info
->ssi_int
);
7324 tinfo
->ssi_ptr
= tswap64(info
->ssi_ptr
);
7325 tinfo
->ssi_utime
= tswap64(info
->ssi_utime
);
7326 tinfo
->ssi_stime
= tswap64(info
->ssi_stime
);
7327 tinfo
->ssi_addr
= tswap64(info
->ssi_addr
);
7330 static abi_long
host_to_target_data_signalfd(void *buf
, size_t len
)
7334 for (i
= 0; i
< len
; i
+= sizeof(struct signalfd_siginfo
)) {
7335 host_to_target_signalfd_siginfo(buf
+ i
, buf
+ i
);
7341 static TargetFdTrans target_signalfd_trans
= {
7342 .host_to_target_data
= host_to_target_data_signalfd
,
7345 static abi_long
do_signalfd4(int fd
, abi_long mask
, int flags
)
7348 target_sigset_t
*target_mask
;
7352 if (flags
& ~(TARGET_O_NONBLOCK
| TARGET_O_CLOEXEC
)) {
7353 return -TARGET_EINVAL
;
7355 if (!lock_user_struct(VERIFY_READ
, target_mask
, mask
, 1)) {
7356 return -TARGET_EFAULT
;
7359 target_to_host_sigset(&host_mask
, target_mask
);
7361 host_flags
= target_to_host_bitmask(flags
, fcntl_flags_tbl
);
7363 ret
= get_errno(signalfd(fd
, &host_mask
, host_flags
));
7365 fd_trans_register(ret
, &target_signalfd_trans
);
7368 unlock_user_struct(target_mask
, mask
, 0);
7374 /* Map host to target signal numbers for the wait family of syscalls.
7375 Assume all other status bits are the same. */
7376 int host_to_target_waitstatus(int status
)
7378 if (WIFSIGNALED(status
)) {
7379 return host_to_target_signal(WTERMSIG(status
)) | (status
& ~0x7f);
7381 if (WIFSTOPPED(status
)) {
7382 return (host_to_target_signal(WSTOPSIG(status
)) << 8)
7388 static int open_self_cmdline(void *cpu_env
, int fd
)
7390 CPUState
*cpu
= ENV_GET_CPU((CPUArchState
*)cpu_env
);
7391 struct linux_binprm
*bprm
= ((TaskState
*)cpu
->opaque
)->bprm
;
7394 for (i
= 0; i
< bprm
->argc
; i
++) {
7395 size_t len
= strlen(bprm
->argv
[i
]) + 1;
7397 if (write(fd
, bprm
->argv
[i
], len
) != len
) {
7405 static int open_self_maps(void *cpu_env
, int fd
)
7407 CPUState
*cpu
= ENV_GET_CPU((CPUArchState
*)cpu_env
);
7408 TaskState
*ts
= cpu
->opaque
;
7414 fp
= fopen("/proc/self/maps", "r");
7419 while ((read
= getline(&line
, &len
, fp
)) != -1) {
7420 int fields
, dev_maj
, dev_min
, inode
;
7421 uint64_t min
, max
, offset
;
7422 char flag_r
, flag_w
, flag_x
, flag_p
;
7423 char path
[512] = "";
7424 fields
= sscanf(line
, "%"PRIx64
"-%"PRIx64
" %c%c%c%c %"PRIx64
" %x:%x %d"
7425 " %512s", &min
, &max
, &flag_r
, &flag_w
, &flag_x
,
7426 &flag_p
, &offset
, &dev_maj
, &dev_min
, &inode
, path
);
7428 if ((fields
< 10) || (fields
> 11)) {
7431 if (h2g_valid(min
)) {
7432 int flags
= page_get_flags(h2g(min
));
7433 max
= h2g_valid(max
- 1) ? max
: (uintptr_t)g2h(GUEST_ADDR_MAX
);
7434 if (page_check_range(h2g(min
), max
- min
, flags
) == -1) {
7437 if (h2g(min
) == ts
->info
->stack_limit
) {
7438 pstrcpy(path
, sizeof(path
), " [stack]");
7440 dprintf(fd
, TARGET_ABI_FMT_lx
"-" TARGET_ABI_FMT_lx
7441 " %c%c%c%c %08" PRIx64
" %02x:%02x %d %s%s\n",
7442 h2g(min
), h2g(max
- 1) + 1, flag_r
, flag_w
,
7443 flag_x
, flag_p
, offset
, dev_maj
, dev_min
, inode
,
7444 path
[0] ? " " : "", path
);
7454 static int open_self_stat(void *cpu_env
, int fd
)
7456 CPUState
*cpu
= ENV_GET_CPU((CPUArchState
*)cpu_env
);
7457 TaskState
*ts
= cpu
->opaque
;
7458 abi_ulong start_stack
= ts
->info
->start_stack
;
7461 for (i
= 0; i
< 44; i
++) {
7469 snprintf(buf
, sizeof(buf
), "%"PRId64
" ", val
);
7470 } else if (i
== 1) {
7472 snprintf(buf
, sizeof(buf
), "(%s) ", ts
->bprm
->argv
[0]);
7473 } else if (i
== 27) {
7476 snprintf(buf
, sizeof(buf
), "%"PRId64
" ", val
);
7478 /* for the rest, there is MasterCard */
7479 snprintf(buf
, sizeof(buf
), "0%c", i
== 43 ? '\n' : ' ');
7483 if (write(fd
, buf
, len
) != len
) {
7491 static int open_self_auxv(void *cpu_env
, int fd
)
7493 CPUState
*cpu
= ENV_GET_CPU((CPUArchState
*)cpu_env
);
7494 TaskState
*ts
= cpu
->opaque
;
7495 abi_ulong auxv
= ts
->info
->saved_auxv
;
7496 abi_ulong len
= ts
->info
->auxv_len
;
7500 * Auxiliary vector is stored in target process stack.
7501 * read in whole auxv vector and copy it to file
7503 ptr
= lock_user(VERIFY_READ
, auxv
, len
, 0);
7507 r
= write(fd
, ptr
, len
);
7514 lseek(fd
, 0, SEEK_SET
);
7515 unlock_user(ptr
, auxv
, len
);
7521 static int is_proc_myself(const char *filename
, const char *entry
)
7523 if (!strncmp(filename
, "/proc/", strlen("/proc/"))) {
7524 filename
+= strlen("/proc/");
7525 if (!strncmp(filename
, "self/", strlen("self/"))) {
7526 filename
+= strlen("self/");
7527 } else if (*filename
>= '1' && *filename
<= '9') {
7529 snprintf(myself
, sizeof(myself
), "%d/", getpid());
7530 if (!strncmp(filename
, myself
, strlen(myself
))) {
7531 filename
+= strlen(myself
);
7538 if (!strcmp(filename
, entry
)) {
7545 #if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN)
7546 static int is_proc(const char *filename
, const char *entry
)
7548 return strcmp(filename
, entry
) == 0;
7551 static int open_net_route(void *cpu_env
, int fd
)
7558 fp
= fopen("/proc/net/route", "r");
7565 read
= getline(&line
, &len
, fp
);
7566 dprintf(fd
, "%s", line
);
7570 while ((read
= getline(&line
, &len
, fp
)) != -1) {
7572 uint32_t dest
, gw
, mask
;
7573 unsigned int flags
, refcnt
, use
, metric
, mtu
, window
, irtt
;
7574 sscanf(line
, "%s\t%08x\t%08x\t%04x\t%d\t%d\t%d\t%08x\t%d\t%u\t%u\n",
7575 iface
, &dest
, &gw
, &flags
, &refcnt
, &use
, &metric
,
7576 &mask
, &mtu
, &window
, &irtt
);
7577 dprintf(fd
, "%s\t%08x\t%08x\t%04x\t%d\t%d\t%d\t%08x\t%d\t%u\t%u\n",
7578 iface
, tswap32(dest
), tswap32(gw
), flags
, refcnt
, use
,
7579 metric
, tswap32(mask
), mtu
, window
, irtt
);
7589 static int do_openat(void *cpu_env
, int dirfd
, const char *pathname
, int flags
, mode_t mode
)
7592 const char *filename
;
7593 int (*fill
)(void *cpu_env
, int fd
);
7594 int (*cmp
)(const char *s1
, const char *s2
);
7596 const struct fake_open
*fake_open
;
7597 static const struct fake_open fakes
[] = {
7598 { "maps", open_self_maps
, is_proc_myself
},
7599 { "stat", open_self_stat
, is_proc_myself
},
7600 { "auxv", open_self_auxv
, is_proc_myself
},
7601 { "cmdline", open_self_cmdline
, is_proc_myself
},
7602 #if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN)
7603 { "/proc/net/route", open_net_route
, is_proc
},
7605 { NULL
, NULL
, NULL
}
7608 if (is_proc_myself(pathname
, "exe")) {
7609 int execfd
= qemu_getauxval(AT_EXECFD
);
7610 return execfd
? execfd
: safe_openat(dirfd
, exec_path
, flags
, mode
);
7613 for (fake_open
= fakes
; fake_open
->filename
; fake_open
++) {
7614 if (fake_open
->cmp(pathname
, fake_open
->filename
)) {
7619 if (fake_open
->filename
) {
7621 char filename
[PATH_MAX
];
7624 /* create temporary file to map stat to */
7625 tmpdir
= getenv("TMPDIR");
7628 snprintf(filename
, sizeof(filename
), "%s/qemu-open.XXXXXX", tmpdir
);
7629 fd
= mkstemp(filename
);
7635 if ((r
= fake_open
->fill(cpu_env
, fd
))) {
7641 lseek(fd
, 0, SEEK_SET
);
7646 return safe_openat(dirfd
, path(pathname
), flags
, mode
);
7649 #define TIMER_MAGIC 0x0caf0000
7650 #define TIMER_MAGIC_MASK 0xffff0000
7652 /* Convert QEMU provided timer ID back to internal 16bit index format */
7653 static target_timer_t
get_timer_id(abi_long arg
)
7655 target_timer_t timerid
= arg
;
7657 if ((timerid
& TIMER_MAGIC_MASK
) != TIMER_MAGIC
) {
7658 return -TARGET_EINVAL
;
7663 if (timerid
>= ARRAY_SIZE(g_posix_timers
)) {
7664 return -TARGET_EINVAL
;
7670 static abi_long
swap_data_eventfd(void *buf
, size_t len
)
7672 uint64_t *counter
= buf
;
7675 if (len
< sizeof(uint64_t)) {
7679 for (i
= 0; i
< len
; i
+= sizeof(uint64_t)) {
7680 *counter
= tswap64(*counter
);
7687 static TargetFdTrans target_eventfd_trans
= {
7688 .host_to_target_data
= swap_data_eventfd
,
7689 .target_to_host_data
= swap_data_eventfd
,
7692 #if (defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init)) || \
7693 (defined(CONFIG_INOTIFY1) && defined(TARGET_NR_inotify_init1) && \
7694 defined(__NR_inotify_init1))
7695 static abi_long
host_to_target_data_inotify(void *buf
, size_t len
)
7697 struct inotify_event
*ev
;
7701 for (i
= 0; i
< len
; i
+= sizeof(struct inotify_event
) + name_len
) {
7702 ev
= (struct inotify_event
*)((char *)buf
+ i
);
7705 ev
->wd
= tswap32(ev
->wd
);
7706 ev
->mask
= tswap32(ev
->mask
);
7707 ev
->cookie
= tswap32(ev
->cookie
);
7708 ev
->len
= tswap32(name_len
);
7714 static TargetFdTrans target_inotify_trans
= {
7715 .host_to_target_data
= host_to_target_data_inotify
,
7719 /* do_syscall() should always have a single exit point at the end so
7720 that actions, such as logging of syscall results, can be performed.
7721 All errnos that do_syscall() returns must be -TARGET_<errcode>. */
7722 abi_long
do_syscall(void *cpu_env
, int num
, abi_long arg1
,
7723 abi_long arg2
, abi_long arg3
, abi_long arg4
,
7724 abi_long arg5
, abi_long arg6
, abi_long arg7
,
7727 CPUState
*cpu
= ENV_GET_CPU(cpu_env
);
7733 #if defined(DEBUG_ERESTARTSYS)
7734 /* Debug-only code for exercising the syscall-restart code paths
7735 * in the per-architecture cpu main loops: restart every syscall
7736 * the guest makes once before letting it through.
7743 return -TARGET_ERESTARTSYS
;
7749 gemu_log("syscall %d", num
);
7751 trace_guest_user_syscall(cpu
, num
, arg1
, arg2
, arg3
, arg4
, arg5
, arg6
, arg7
, arg8
);
7753 print_syscall(num
, arg1
, arg2
, arg3
, arg4
, arg5
, arg6
);
7756 case TARGET_NR_exit
:
7757 /* In old applications this may be used to implement _exit(2).
7758 However in threaded applictions it is used for thread termination,
7759 and _exit_group is used for application termination.
7760 Do thread termination if we have more then one thread. */
7762 if (block_signals()) {
7763 ret
= -TARGET_ERESTARTSYS
;
7769 if (CPU_NEXT(first_cpu
)) {
7772 /* Remove the CPU from the list. */
7773 QTAILQ_REMOVE(&cpus
, cpu
, node
);
7778 if (ts
->child_tidptr
) {
7779 put_user_u32(0, ts
->child_tidptr
);
7780 sys_futex(g2h(ts
->child_tidptr
), FUTEX_WAKE
, INT_MAX
,
7784 object_unref(OBJECT(cpu
));
7786 rcu_unregister_thread();
7794 gdb_exit(cpu_env
, arg1
);
7796 ret
= 0; /* avoid warning */
7798 case TARGET_NR_read
:
7802 if (!(p
= lock_user(VERIFY_WRITE
, arg2
, arg3
, 0)))
7804 ret
= get_errno(safe_read(arg1
, p
, arg3
));
7806 fd_trans_host_to_target_data(arg1
)) {
7807 ret
= fd_trans_host_to_target_data(arg1
)(p
, ret
);
7809 unlock_user(p
, arg2
, ret
);
7812 case TARGET_NR_write
:
7813 if (!(p
= lock_user(VERIFY_READ
, arg2
, arg3
, 1)))
7815 if (fd_trans_target_to_host_data(arg1
)) {
7816 void *copy
= g_malloc(arg3
);
7817 memcpy(copy
, p
, arg3
);
7818 ret
= fd_trans_target_to_host_data(arg1
)(copy
, arg3
);
7820 ret
= get_errno(safe_write(arg1
, copy
, ret
));
7824 ret
= get_errno(safe_write(arg1
, p
, arg3
));
7826 unlock_user(p
, arg2
, 0);
7828 #ifdef TARGET_NR_open
7829 case TARGET_NR_open
:
7830 if (!(p
= lock_user_string(arg1
)))
7832 ret
= get_errno(do_openat(cpu_env
, AT_FDCWD
, p
,
7833 target_to_host_bitmask(arg2
, fcntl_flags_tbl
),
7835 fd_trans_unregister(ret
);
7836 unlock_user(p
, arg1
, 0);
7839 case TARGET_NR_openat
:
7840 if (!(p
= lock_user_string(arg2
)))
7842 ret
= get_errno(do_openat(cpu_env
, arg1
, p
,
7843 target_to_host_bitmask(arg3
, fcntl_flags_tbl
),
7845 fd_trans_unregister(ret
);
7846 unlock_user(p
, arg2
, 0);
7848 #if defined(TARGET_NR_name_to_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
7849 case TARGET_NR_name_to_handle_at
:
7850 ret
= do_name_to_handle_at(arg1
, arg2
, arg3
, arg4
, arg5
);
7853 #if defined(TARGET_NR_open_by_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
7854 case TARGET_NR_open_by_handle_at
:
7855 ret
= do_open_by_handle_at(arg1
, arg2
, arg3
);
7856 fd_trans_unregister(ret
);
7859 case TARGET_NR_close
:
7860 fd_trans_unregister(arg1
);
7861 ret
= get_errno(close(arg1
));
7866 #ifdef TARGET_NR_fork
7867 case TARGET_NR_fork
:
7868 ret
= get_errno(do_fork(cpu_env
, TARGET_SIGCHLD
, 0, 0, 0, 0));
7871 #ifdef TARGET_NR_waitpid
7872 case TARGET_NR_waitpid
:
7875 ret
= get_errno(safe_wait4(arg1
, &status
, arg3
, 0));
7876 if (!is_error(ret
) && arg2
&& ret
7877 && put_user_s32(host_to_target_waitstatus(status
), arg2
))
7882 #ifdef TARGET_NR_waitid
7883 case TARGET_NR_waitid
:
7887 ret
= get_errno(safe_waitid(arg1
, arg2
, &info
, arg4
, NULL
));
7888 if (!is_error(ret
) && arg3
&& info
.si_pid
!= 0) {
7889 if (!(p
= lock_user(VERIFY_WRITE
, arg3
, sizeof(target_siginfo_t
), 0)))
7891 host_to_target_siginfo(p
, &info
);
7892 unlock_user(p
, arg3
, sizeof(target_siginfo_t
));
7897 #ifdef TARGET_NR_creat /* not on alpha */
7898 case TARGET_NR_creat
:
7899 if (!(p
= lock_user_string(arg1
)))
7901 ret
= get_errno(creat(p
, arg2
));
7902 fd_trans_unregister(ret
);
7903 unlock_user(p
, arg1
, 0);
7906 #ifdef TARGET_NR_link
7907 case TARGET_NR_link
:
7910 p
= lock_user_string(arg1
);
7911 p2
= lock_user_string(arg2
);
7913 ret
= -TARGET_EFAULT
;
7915 ret
= get_errno(link(p
, p2
));
7916 unlock_user(p2
, arg2
, 0);
7917 unlock_user(p
, arg1
, 0);
7921 #if defined(TARGET_NR_linkat)
7922 case TARGET_NR_linkat
:
7927 p
= lock_user_string(arg2
);
7928 p2
= lock_user_string(arg4
);
7930 ret
= -TARGET_EFAULT
;
7932 ret
= get_errno(linkat(arg1
, p
, arg3
, p2
, arg5
));
7933 unlock_user(p
, arg2
, 0);
7934 unlock_user(p2
, arg4
, 0);
7938 #ifdef TARGET_NR_unlink
7939 case TARGET_NR_unlink
:
7940 if (!(p
= lock_user_string(arg1
)))
7942 ret
= get_errno(unlink(p
));
7943 unlock_user(p
, arg1
, 0);
7946 #if defined(TARGET_NR_unlinkat)
7947 case TARGET_NR_unlinkat
:
7948 if (!(p
= lock_user_string(arg2
)))
7950 ret
= get_errno(unlinkat(arg1
, p
, arg3
));
7951 unlock_user(p
, arg2
, 0);
7954 case TARGET_NR_execve
:
7956 char **argp
, **envp
;
7959 abi_ulong guest_argp
;
7960 abi_ulong guest_envp
;
7967 for (gp
= guest_argp
; gp
; gp
+= sizeof(abi_ulong
)) {
7968 if (get_user_ual(addr
, gp
))
7976 for (gp
= guest_envp
; gp
; gp
+= sizeof(abi_ulong
)) {
7977 if (get_user_ual(addr
, gp
))
7984 argp
= g_new0(char *, argc
+ 1);
7985 envp
= g_new0(char *, envc
+ 1);
7987 for (gp
= guest_argp
, q
= argp
; gp
;
7988 gp
+= sizeof(abi_ulong
), q
++) {
7989 if (get_user_ual(addr
, gp
))
7993 if (!(*q
= lock_user_string(addr
)))
7995 total_size
+= strlen(*q
) + 1;
7999 for (gp
= guest_envp
, q
= envp
; gp
;
8000 gp
+= sizeof(abi_ulong
), q
++) {
8001 if (get_user_ual(addr
, gp
))
8005 if (!(*q
= lock_user_string(addr
)))
8007 total_size
+= strlen(*q
) + 1;
8011 if (!(p
= lock_user_string(arg1
)))
8013 /* Although execve() is not an interruptible syscall it is
8014 * a special case where we must use the safe_syscall wrapper:
8015 * if we allow a signal to happen before we make the host
8016 * syscall then we will 'lose' it, because at the point of
8017 * execve the process leaves QEMU's control. So we use the
8018 * safe syscall wrapper to ensure that we either take the
8019 * signal as a guest signal, or else it does not happen
8020 * before the execve completes and makes it the other
8021 * program's problem.
8023 ret
= get_errno(safe_execve(p
, argp
, envp
));
8024 unlock_user(p
, arg1
, 0);
8029 ret
= -TARGET_EFAULT
;
8032 for (gp
= guest_argp
, q
= argp
; *q
;
8033 gp
+= sizeof(abi_ulong
), q
++) {
8034 if (get_user_ual(addr
, gp
)
8037 unlock_user(*q
, addr
, 0);
8039 for (gp
= guest_envp
, q
= envp
; *q
;
8040 gp
+= sizeof(abi_ulong
), q
++) {
8041 if (get_user_ual(addr
, gp
)
8044 unlock_user(*q
, addr
, 0);
8051 case TARGET_NR_chdir
:
8052 if (!(p
= lock_user_string(arg1
)))
8054 ret
= get_errno(chdir(p
));
8055 unlock_user(p
, arg1
, 0);
8057 #ifdef TARGET_NR_time
8058 case TARGET_NR_time
:
8061 ret
= get_errno(time(&host_time
));
8064 && put_user_sal(host_time
, arg1
))
8069 #ifdef TARGET_NR_mknod
8070 case TARGET_NR_mknod
:
8071 if (!(p
= lock_user_string(arg1
)))
8073 ret
= get_errno(mknod(p
, arg2
, arg3
));
8074 unlock_user(p
, arg1
, 0);
8077 #if defined(TARGET_NR_mknodat)
8078 case TARGET_NR_mknodat
:
8079 if (!(p
= lock_user_string(arg2
)))
8081 ret
= get_errno(mknodat(arg1
, p
, arg3
, arg4
));
8082 unlock_user(p
, arg2
, 0);
8085 #ifdef TARGET_NR_chmod
8086 case TARGET_NR_chmod
:
8087 if (!(p
= lock_user_string(arg1
)))
8089 ret
= get_errno(chmod(p
, arg2
));
8090 unlock_user(p
, arg1
, 0);
8093 #ifdef TARGET_NR_break
8094 case TARGET_NR_break
:
8097 #ifdef TARGET_NR_oldstat
8098 case TARGET_NR_oldstat
:
8101 case TARGET_NR_lseek
:
8102 ret
= get_errno(lseek(arg1
, arg2
, arg3
));
8104 #if defined(TARGET_NR_getxpid) && defined(TARGET_ALPHA)
8105 /* Alpha specific */
8106 case TARGET_NR_getxpid
:
8107 ((CPUAlphaState
*)cpu_env
)->ir
[IR_A4
] = getppid();
8108 ret
= get_errno(getpid());
8111 #ifdef TARGET_NR_getpid
8112 case TARGET_NR_getpid
:
8113 ret
= get_errno(getpid());
8116 case TARGET_NR_mount
:
8118 /* need to look at the data field */
8122 p
= lock_user_string(arg1
);
8130 p2
= lock_user_string(arg2
);
8133 unlock_user(p
, arg1
, 0);
8139 p3
= lock_user_string(arg3
);
8142 unlock_user(p
, arg1
, 0);
8144 unlock_user(p2
, arg2
, 0);
8151 /* FIXME - arg5 should be locked, but it isn't clear how to
8152 * do that since it's not guaranteed to be a NULL-terminated
8156 ret
= mount(p
, p2
, p3
, (unsigned long)arg4
, NULL
);
8158 ret
= mount(p
, p2
, p3
, (unsigned long)arg4
, g2h(arg5
));
8160 ret
= get_errno(ret
);
8163 unlock_user(p
, arg1
, 0);
8165 unlock_user(p2
, arg2
, 0);
8167 unlock_user(p3
, arg3
, 0);
8171 #ifdef TARGET_NR_umount
8172 case TARGET_NR_umount
:
8173 if (!(p
= lock_user_string(arg1
)))
8175 ret
= get_errno(umount(p
));
8176 unlock_user(p
, arg1
, 0);
8179 #ifdef TARGET_NR_stime /* not on alpha */
8180 case TARGET_NR_stime
:
8183 if (get_user_sal(host_time
, arg1
))
8185 ret
= get_errno(stime(&host_time
));
8189 case TARGET_NR_ptrace
:
8191 #ifdef TARGET_NR_alarm /* not on alpha */
8192 case TARGET_NR_alarm
:
8196 #ifdef TARGET_NR_oldfstat
8197 case TARGET_NR_oldfstat
:
8200 #ifdef TARGET_NR_pause /* not on alpha */
8201 case TARGET_NR_pause
:
8202 if (!block_signals()) {
8203 sigsuspend(&((TaskState
*)cpu
->opaque
)->signal_mask
);
8205 ret
= -TARGET_EINTR
;
8208 #ifdef TARGET_NR_utime
8209 case TARGET_NR_utime
:
8211 struct utimbuf tbuf
, *host_tbuf
;
8212 struct target_utimbuf
*target_tbuf
;
8214 if (!lock_user_struct(VERIFY_READ
, target_tbuf
, arg2
, 1))
8216 tbuf
.actime
= tswapal(target_tbuf
->actime
);
8217 tbuf
.modtime
= tswapal(target_tbuf
->modtime
);
8218 unlock_user_struct(target_tbuf
, arg2
, 0);
8223 if (!(p
= lock_user_string(arg1
)))
8225 ret
= get_errno(utime(p
, host_tbuf
));
8226 unlock_user(p
, arg1
, 0);
8230 #ifdef TARGET_NR_utimes
8231 case TARGET_NR_utimes
:
8233 struct timeval
*tvp
, tv
[2];
8235 if (copy_from_user_timeval(&tv
[0], arg2
)
8236 || copy_from_user_timeval(&tv
[1],
8237 arg2
+ sizeof(struct target_timeval
)))
8243 if (!(p
= lock_user_string(arg1
)))
8245 ret
= get_errno(utimes(p
, tvp
));
8246 unlock_user(p
, arg1
, 0);
8250 #if defined(TARGET_NR_futimesat)
8251 case TARGET_NR_futimesat
:
8253 struct timeval
*tvp
, tv
[2];
8255 if (copy_from_user_timeval(&tv
[0], arg3
)
8256 || copy_from_user_timeval(&tv
[1],
8257 arg3
+ sizeof(struct target_timeval
)))
8263 if (!(p
= lock_user_string(arg2
)))
8265 ret
= get_errno(futimesat(arg1
, path(p
), tvp
));
8266 unlock_user(p
, arg2
, 0);
8270 #ifdef TARGET_NR_stty
8271 case TARGET_NR_stty
:
8274 #ifdef TARGET_NR_gtty
8275 case TARGET_NR_gtty
:
8278 #ifdef TARGET_NR_access
8279 case TARGET_NR_access
:
8280 if (!(p
= lock_user_string(arg1
)))
8282 ret
= get_errno(access(path(p
), arg2
));
8283 unlock_user(p
, arg1
, 0);
8286 #if defined(TARGET_NR_faccessat) && defined(__NR_faccessat)
8287 case TARGET_NR_faccessat
:
8288 if (!(p
= lock_user_string(arg2
)))
8290 ret
= get_errno(faccessat(arg1
, p
, arg3
, 0));
8291 unlock_user(p
, arg2
, 0);
8294 #ifdef TARGET_NR_nice /* not on alpha */
8295 case TARGET_NR_nice
:
8296 ret
= get_errno(nice(arg1
));
8299 #ifdef TARGET_NR_ftime
8300 case TARGET_NR_ftime
:
8303 case TARGET_NR_sync
:
8307 #if defined(TARGET_NR_syncfs) && defined(CONFIG_SYNCFS)
8308 case TARGET_NR_syncfs
:
8309 ret
= get_errno(syncfs(arg1
));
8312 case TARGET_NR_kill
:
8313 ret
= get_errno(safe_kill(arg1
, target_to_host_signal(arg2
)));
8315 #ifdef TARGET_NR_rename
8316 case TARGET_NR_rename
:
8319 p
= lock_user_string(arg1
);
8320 p2
= lock_user_string(arg2
);
8322 ret
= -TARGET_EFAULT
;
8324 ret
= get_errno(rename(p
, p2
));
8325 unlock_user(p2
, arg2
, 0);
8326 unlock_user(p
, arg1
, 0);
8330 #if defined(TARGET_NR_renameat)
8331 case TARGET_NR_renameat
:
8334 p
= lock_user_string(arg2
);
8335 p2
= lock_user_string(arg4
);
8337 ret
= -TARGET_EFAULT
;
8339 ret
= get_errno(renameat(arg1
, p
, arg3
, p2
));
8340 unlock_user(p2
, arg4
, 0);
8341 unlock_user(p
, arg2
, 0);
8345 #ifdef TARGET_NR_mkdir
8346 case TARGET_NR_mkdir
:
8347 if (!(p
= lock_user_string(arg1
)))
8349 ret
= get_errno(mkdir(p
, arg2
));
8350 unlock_user(p
, arg1
, 0);
8353 #if defined(TARGET_NR_mkdirat)
8354 case TARGET_NR_mkdirat
:
8355 if (!(p
= lock_user_string(arg2
)))
8357 ret
= get_errno(mkdirat(arg1
, p
, arg3
));
8358 unlock_user(p
, arg2
, 0);
8361 #ifdef TARGET_NR_rmdir
8362 case TARGET_NR_rmdir
:
8363 if (!(p
= lock_user_string(arg1
)))
8365 ret
= get_errno(rmdir(p
));
8366 unlock_user(p
, arg1
, 0);
8370 ret
= get_errno(dup(arg1
));
8372 fd_trans_dup(arg1
, ret
);
8375 #ifdef TARGET_NR_pipe
8376 case TARGET_NR_pipe
:
8377 ret
= do_pipe(cpu_env
, arg1
, 0, 0);
8380 #ifdef TARGET_NR_pipe2
8381 case TARGET_NR_pipe2
:
8382 ret
= do_pipe(cpu_env
, arg1
,
8383 target_to_host_bitmask(arg2
, fcntl_flags_tbl
), 1);
8386 case TARGET_NR_times
:
8388 struct target_tms
*tmsp
;
8390 ret
= get_errno(times(&tms
));
8392 tmsp
= lock_user(VERIFY_WRITE
, arg1
, sizeof(struct target_tms
), 0);
8395 tmsp
->tms_utime
= tswapal(host_to_target_clock_t(tms
.tms_utime
));
8396 tmsp
->tms_stime
= tswapal(host_to_target_clock_t(tms
.tms_stime
));
8397 tmsp
->tms_cutime
= tswapal(host_to_target_clock_t(tms
.tms_cutime
));
8398 tmsp
->tms_cstime
= tswapal(host_to_target_clock_t(tms
.tms_cstime
));
8401 ret
= host_to_target_clock_t(ret
);
8404 #ifdef TARGET_NR_prof
8405 case TARGET_NR_prof
:
8408 #ifdef TARGET_NR_signal
8409 case TARGET_NR_signal
:
8412 case TARGET_NR_acct
:
8414 ret
= get_errno(acct(NULL
));
8416 if (!(p
= lock_user_string(arg1
)))
8418 ret
= get_errno(acct(path(p
)));
8419 unlock_user(p
, arg1
, 0);
8422 #ifdef TARGET_NR_umount2
8423 case TARGET_NR_umount2
:
8424 if (!(p
= lock_user_string(arg1
)))
8426 ret
= get_errno(umount2(p
, arg2
));
8427 unlock_user(p
, arg1
, 0);
8430 #ifdef TARGET_NR_lock
8431 case TARGET_NR_lock
:
8434 case TARGET_NR_ioctl
:
8435 ret
= do_ioctl(arg1
, arg2
, arg3
);
8437 case TARGET_NR_fcntl
:
8438 ret
= do_fcntl(arg1
, arg2
, arg3
);
8440 #ifdef TARGET_NR_mpx
8444 case TARGET_NR_setpgid
:
8445 ret
= get_errno(setpgid(arg1
, arg2
));
8447 #ifdef TARGET_NR_ulimit
8448 case TARGET_NR_ulimit
:
8451 #ifdef TARGET_NR_oldolduname
8452 case TARGET_NR_oldolduname
:
8455 case TARGET_NR_umask
:
8456 ret
= get_errno(umask(arg1
));
8458 case TARGET_NR_chroot
:
8459 if (!(p
= lock_user_string(arg1
)))
8461 ret
= get_errno(chroot(p
));
8462 unlock_user(p
, arg1
, 0);
8464 #ifdef TARGET_NR_ustat
8465 case TARGET_NR_ustat
:
8468 #ifdef TARGET_NR_dup2
8469 case TARGET_NR_dup2
:
8470 ret
= get_errno(dup2(arg1
, arg2
));
8472 fd_trans_dup(arg1
, arg2
);
8476 #if defined(CONFIG_DUP3) && defined(TARGET_NR_dup3)
8477 case TARGET_NR_dup3
:
8478 ret
= get_errno(dup3(arg1
, arg2
, arg3
));
8480 fd_trans_dup(arg1
, arg2
);
8484 #ifdef TARGET_NR_getppid /* not on alpha */
8485 case TARGET_NR_getppid
:
8486 ret
= get_errno(getppid());
8489 #ifdef TARGET_NR_getpgrp
8490 case TARGET_NR_getpgrp
:
8491 ret
= get_errno(getpgrp());
8494 case TARGET_NR_setsid
:
8495 ret
= get_errno(setsid());
8497 #ifdef TARGET_NR_sigaction
8498 case TARGET_NR_sigaction
:
8500 #if defined(TARGET_ALPHA)
8501 struct target_sigaction act
, oact
, *pact
= 0;
8502 struct target_old_sigaction
*old_act
;
8504 if (!lock_user_struct(VERIFY_READ
, old_act
, arg2
, 1))
8506 act
._sa_handler
= old_act
->_sa_handler
;
8507 target_siginitset(&act
.sa_mask
, old_act
->sa_mask
);
8508 act
.sa_flags
= old_act
->sa_flags
;
8509 act
.sa_restorer
= 0;
8510 unlock_user_struct(old_act
, arg2
, 0);
8513 ret
= get_errno(do_sigaction(arg1
, pact
, &oact
));
8514 if (!is_error(ret
) && arg3
) {
8515 if (!lock_user_struct(VERIFY_WRITE
, old_act
, arg3
, 0))
8517 old_act
->_sa_handler
= oact
._sa_handler
;
8518 old_act
->sa_mask
= oact
.sa_mask
.sig
[0];
8519 old_act
->sa_flags
= oact
.sa_flags
;
8520 unlock_user_struct(old_act
, arg3
, 1);
8522 #elif defined(TARGET_MIPS)
8523 struct target_sigaction act
, oact
, *pact
, *old_act
;
8526 if (!lock_user_struct(VERIFY_READ
, old_act
, arg2
, 1))
8528 act
._sa_handler
= old_act
->_sa_handler
;
8529 target_siginitset(&act
.sa_mask
, old_act
->sa_mask
.sig
[0]);
8530 act
.sa_flags
= old_act
->sa_flags
;
8531 unlock_user_struct(old_act
, arg2
, 0);
8537 ret
= get_errno(do_sigaction(arg1
, pact
, &oact
));
8539 if (!is_error(ret
) && arg3
) {
8540 if (!lock_user_struct(VERIFY_WRITE
, old_act
, arg3
, 0))
8542 old_act
->_sa_handler
= oact
._sa_handler
;
8543 old_act
->sa_flags
= oact
.sa_flags
;
8544 old_act
->sa_mask
.sig
[0] = oact
.sa_mask
.sig
[0];
8545 old_act
->sa_mask
.sig
[1] = 0;
8546 old_act
->sa_mask
.sig
[2] = 0;
8547 old_act
->sa_mask
.sig
[3] = 0;
8548 unlock_user_struct(old_act
, arg3
, 1);
8551 struct target_old_sigaction
*old_act
;
8552 struct target_sigaction act
, oact
, *pact
;
8554 if (!lock_user_struct(VERIFY_READ
, old_act
, arg2
, 1))
8556 act
._sa_handler
= old_act
->_sa_handler
;
8557 target_siginitset(&act
.sa_mask
, old_act
->sa_mask
);
8558 act
.sa_flags
= old_act
->sa_flags
;
8559 act
.sa_restorer
= old_act
->sa_restorer
;
8560 unlock_user_struct(old_act
, arg2
, 0);
8565 ret
= get_errno(do_sigaction(arg1
, pact
, &oact
));
8566 if (!is_error(ret
) && arg3
) {
8567 if (!lock_user_struct(VERIFY_WRITE
, old_act
, arg3
, 0))
8569 old_act
->_sa_handler
= oact
._sa_handler
;
8570 old_act
->sa_mask
= oact
.sa_mask
.sig
[0];
8571 old_act
->sa_flags
= oact
.sa_flags
;
8572 old_act
->sa_restorer
= oact
.sa_restorer
;
8573 unlock_user_struct(old_act
, arg3
, 1);
8579 case TARGET_NR_rt_sigaction
:
8581 #if defined(TARGET_ALPHA)
8582 /* For Alpha and SPARC this is a 5 argument syscall, with
8583 * a 'restorer' parameter which must be copied into the
8584 * sa_restorer field of the sigaction struct.
8585 * For Alpha that 'restorer' is arg5; for SPARC it is arg4,
8586 * and arg5 is the sigsetsize.
8587 * Alpha also has a separate rt_sigaction struct that it uses
8588 * here; SPARC uses the usual sigaction struct.
8590 struct target_rt_sigaction
*rt_act
;
8591 struct target_sigaction act
, oact
, *pact
= 0;
8593 if (arg4
!= sizeof(target_sigset_t
)) {
8594 ret
= -TARGET_EINVAL
;
8598 if (!lock_user_struct(VERIFY_READ
, rt_act
, arg2
, 1))
8600 act
._sa_handler
= rt_act
->_sa_handler
;
8601 act
.sa_mask
= rt_act
->sa_mask
;
8602 act
.sa_flags
= rt_act
->sa_flags
;
8603 act
.sa_restorer
= arg5
;
8604 unlock_user_struct(rt_act
, arg2
, 0);
8607 ret
= get_errno(do_sigaction(arg1
, pact
, &oact
));
8608 if (!is_error(ret
) && arg3
) {
8609 if (!lock_user_struct(VERIFY_WRITE
, rt_act
, arg3
, 0))
8611 rt_act
->_sa_handler
= oact
._sa_handler
;
8612 rt_act
->sa_mask
= oact
.sa_mask
;
8613 rt_act
->sa_flags
= oact
.sa_flags
;
8614 unlock_user_struct(rt_act
, arg3
, 1);
8618 target_ulong restorer
= arg4
;
8619 target_ulong sigsetsize
= arg5
;
8621 target_ulong sigsetsize
= arg4
;
8623 struct target_sigaction
*act
;
8624 struct target_sigaction
*oact
;
8626 if (sigsetsize
!= sizeof(target_sigset_t
)) {
8627 ret
= -TARGET_EINVAL
;
8631 if (!lock_user_struct(VERIFY_READ
, act
, arg2
, 1)) {
8635 act
->sa_restorer
= restorer
;
8641 if (!lock_user_struct(VERIFY_WRITE
, oact
, arg3
, 0)) {
8642 ret
= -TARGET_EFAULT
;
8643 goto rt_sigaction_fail
;
8647 ret
= get_errno(do_sigaction(arg1
, act
, oact
));
8650 unlock_user_struct(act
, arg2
, 0);
8652 unlock_user_struct(oact
, arg3
, 1);
8656 #ifdef TARGET_NR_sgetmask /* not on alpha */
8657 case TARGET_NR_sgetmask
:
8660 abi_ulong target_set
;
8661 ret
= do_sigprocmask(0, NULL
, &cur_set
);
8663 host_to_target_old_sigset(&target_set
, &cur_set
);
8669 #ifdef TARGET_NR_ssetmask /* not on alpha */
8670 case TARGET_NR_ssetmask
:
8673 abi_ulong target_set
= arg1
;
8674 target_to_host_old_sigset(&set
, &target_set
);
8675 ret
= do_sigprocmask(SIG_SETMASK
, &set
, &oset
);
8677 host_to_target_old_sigset(&target_set
, &oset
);
8683 #ifdef TARGET_NR_sigprocmask
8684 case TARGET_NR_sigprocmask
:
8686 #if defined(TARGET_ALPHA)
8687 sigset_t set
, oldset
;
8692 case TARGET_SIG_BLOCK
:
8695 case TARGET_SIG_UNBLOCK
:
8698 case TARGET_SIG_SETMASK
:
8702 ret
= -TARGET_EINVAL
;
8706 target_to_host_old_sigset(&set
, &mask
);
8708 ret
= do_sigprocmask(how
, &set
, &oldset
);
8709 if (!is_error(ret
)) {
8710 host_to_target_old_sigset(&mask
, &oldset
);
8712 ((CPUAlphaState
*)cpu_env
)->ir
[IR_V0
] = 0; /* force no error */
8715 sigset_t set
, oldset
, *set_ptr
;
8720 case TARGET_SIG_BLOCK
:
8723 case TARGET_SIG_UNBLOCK
:
8726 case TARGET_SIG_SETMASK
:
8730 ret
= -TARGET_EINVAL
;
8733 if (!(p
= lock_user(VERIFY_READ
, arg2
, sizeof(target_sigset_t
), 1)))
8735 target_to_host_old_sigset(&set
, p
);
8736 unlock_user(p
, arg2
, 0);
8742 ret
= do_sigprocmask(how
, set_ptr
, &oldset
);
8743 if (!is_error(ret
) && arg3
) {
8744 if (!(p
= lock_user(VERIFY_WRITE
, arg3
, sizeof(target_sigset_t
), 0)))
8746 host_to_target_old_sigset(p
, &oldset
);
8747 unlock_user(p
, arg3
, sizeof(target_sigset_t
));
8753 case TARGET_NR_rt_sigprocmask
:
8756 sigset_t set
, oldset
, *set_ptr
;
8758 if (arg4
!= sizeof(target_sigset_t
)) {
8759 ret
= -TARGET_EINVAL
;
8765 case TARGET_SIG_BLOCK
:
8768 case TARGET_SIG_UNBLOCK
:
8771 case TARGET_SIG_SETMASK
:
8775 ret
= -TARGET_EINVAL
;
8778 if (!(p
= lock_user(VERIFY_READ
, arg2
, sizeof(target_sigset_t
), 1)))
8780 target_to_host_sigset(&set
, p
);
8781 unlock_user(p
, arg2
, 0);
8787 ret
= do_sigprocmask(how
, set_ptr
, &oldset
);
8788 if (!is_error(ret
) && arg3
) {
8789 if (!(p
= lock_user(VERIFY_WRITE
, arg3
, sizeof(target_sigset_t
), 0)))
8791 host_to_target_sigset(p
, &oldset
);
8792 unlock_user(p
, arg3
, sizeof(target_sigset_t
));
8796 #ifdef TARGET_NR_sigpending
8797 case TARGET_NR_sigpending
:
8800 ret
= get_errno(sigpending(&set
));
8801 if (!is_error(ret
)) {
8802 if (!(p
= lock_user(VERIFY_WRITE
, arg1
, sizeof(target_sigset_t
), 0)))
8804 host_to_target_old_sigset(p
, &set
);
8805 unlock_user(p
, arg1
, sizeof(target_sigset_t
));
8810 case TARGET_NR_rt_sigpending
:
8814 /* Yes, this check is >, not != like most. We follow the kernel's
8815 * logic and it does it like this because it implements
8816 * NR_sigpending through the same code path, and in that case
8817 * the old_sigset_t is smaller in size.
8819 if (arg2
> sizeof(target_sigset_t
)) {
8820 ret
= -TARGET_EINVAL
;
8824 ret
= get_errno(sigpending(&set
));
8825 if (!is_error(ret
)) {
8826 if (!(p
= lock_user(VERIFY_WRITE
, arg1
, sizeof(target_sigset_t
), 0)))
8828 host_to_target_sigset(p
, &set
);
8829 unlock_user(p
, arg1
, sizeof(target_sigset_t
));
8833 #ifdef TARGET_NR_sigsuspend
8834 case TARGET_NR_sigsuspend
:
8836 TaskState
*ts
= cpu
->opaque
;
8837 #if defined(TARGET_ALPHA)
8838 abi_ulong mask
= arg1
;
8839 target_to_host_old_sigset(&ts
->sigsuspend_mask
, &mask
);
8841 if (!(p
= lock_user(VERIFY_READ
, arg1
, sizeof(target_sigset_t
), 1)))
8843 target_to_host_old_sigset(&ts
->sigsuspend_mask
, p
);
8844 unlock_user(p
, arg1
, 0);
8846 ret
= get_errno(safe_rt_sigsuspend(&ts
->sigsuspend_mask
,
8848 if (ret
!= -TARGET_ERESTARTSYS
) {
8849 ts
->in_sigsuspend
= 1;
8854 case TARGET_NR_rt_sigsuspend
:
8856 TaskState
*ts
= cpu
->opaque
;
8858 if (arg2
!= sizeof(target_sigset_t
)) {
8859 ret
= -TARGET_EINVAL
;
8862 if (!(p
= lock_user(VERIFY_READ
, arg1
, sizeof(target_sigset_t
), 1)))
8864 target_to_host_sigset(&ts
->sigsuspend_mask
, p
);
8865 unlock_user(p
, arg1
, 0);
8866 ret
= get_errno(safe_rt_sigsuspend(&ts
->sigsuspend_mask
,
8868 if (ret
!= -TARGET_ERESTARTSYS
) {
8869 ts
->in_sigsuspend
= 1;
8873 case TARGET_NR_rt_sigtimedwait
:
8876 struct timespec uts
, *puts
;
8879 if (arg4
!= sizeof(target_sigset_t
)) {
8880 ret
= -TARGET_EINVAL
;
8884 if (!(p
= lock_user(VERIFY_READ
, arg1
, sizeof(target_sigset_t
), 1)))
8886 target_to_host_sigset(&set
, p
);
8887 unlock_user(p
, arg1
, 0);
8890 target_to_host_timespec(puts
, arg3
);
8894 ret
= get_errno(safe_rt_sigtimedwait(&set
, &uinfo
, puts
,
8896 if (!is_error(ret
)) {
8898 p
= lock_user(VERIFY_WRITE
, arg2
, sizeof(target_siginfo_t
),
8903 host_to_target_siginfo(p
, &uinfo
);
8904 unlock_user(p
, arg2
, sizeof(target_siginfo_t
));
8906 ret
= host_to_target_signal(ret
);
8910 case TARGET_NR_rt_sigqueueinfo
:
8914 p
= lock_user(VERIFY_READ
, arg3
, sizeof(target_siginfo_t
), 1);
8918 target_to_host_siginfo(&uinfo
, p
);
8919 unlock_user(p
, arg3
, 0);
8920 ret
= get_errno(sys_rt_sigqueueinfo(arg1
, arg2
, &uinfo
));
8923 case TARGET_NR_rt_tgsigqueueinfo
:
8927 p
= lock_user(VERIFY_READ
, arg4
, sizeof(target_siginfo_t
), 1);
8931 target_to_host_siginfo(&uinfo
, p
);
8932 unlock_user(p
, arg4
, 0);
8933 ret
= get_errno(sys_rt_tgsigqueueinfo(arg1
, arg2
, arg3
, &uinfo
));
8936 #ifdef TARGET_NR_sigreturn
8937 case TARGET_NR_sigreturn
:
8938 if (block_signals()) {
8939 ret
= -TARGET_ERESTARTSYS
;
8941 ret
= do_sigreturn(cpu_env
);
8945 case TARGET_NR_rt_sigreturn
:
8946 if (block_signals()) {
8947 ret
= -TARGET_ERESTARTSYS
;
8949 ret
= do_rt_sigreturn(cpu_env
);
8952 case TARGET_NR_sethostname
:
8953 if (!(p
= lock_user_string(arg1
)))
8955 ret
= get_errno(sethostname(p
, arg2
));
8956 unlock_user(p
, arg1
, 0);
8958 case TARGET_NR_setrlimit
:
8960 int resource
= target_to_host_resource(arg1
);
8961 struct target_rlimit
*target_rlim
;
8963 if (!lock_user_struct(VERIFY_READ
, target_rlim
, arg2
, 1))
8965 rlim
.rlim_cur
= target_to_host_rlim(target_rlim
->rlim_cur
);
8966 rlim
.rlim_max
= target_to_host_rlim(target_rlim
->rlim_max
);
8967 unlock_user_struct(target_rlim
, arg2
, 0);
8968 ret
= get_errno(setrlimit(resource
, &rlim
));
8971 case TARGET_NR_getrlimit
:
8973 int resource
= target_to_host_resource(arg1
);
8974 struct target_rlimit
*target_rlim
;
8977 ret
= get_errno(getrlimit(resource
, &rlim
));
8978 if (!is_error(ret
)) {
8979 if (!lock_user_struct(VERIFY_WRITE
, target_rlim
, arg2
, 0))
8981 target_rlim
->rlim_cur
= host_to_target_rlim(rlim
.rlim_cur
);
8982 target_rlim
->rlim_max
= host_to_target_rlim(rlim
.rlim_max
);
8983 unlock_user_struct(target_rlim
, arg2
, 1);
8987 case TARGET_NR_getrusage
:
8989 struct rusage rusage
;
8990 ret
= get_errno(getrusage(arg1
, &rusage
));
8991 if (!is_error(ret
)) {
8992 ret
= host_to_target_rusage(arg2
, &rusage
);
8996 case TARGET_NR_gettimeofday
:
8999 ret
= get_errno(gettimeofday(&tv
, NULL
));
9000 if (!is_error(ret
)) {
9001 if (copy_to_user_timeval(arg1
, &tv
))
9006 case TARGET_NR_settimeofday
:
9008 struct timeval tv
, *ptv
= NULL
;
9009 struct timezone tz
, *ptz
= NULL
;
9012 if (copy_from_user_timeval(&tv
, arg1
)) {
9019 if (copy_from_user_timezone(&tz
, arg2
)) {
9025 ret
= get_errno(settimeofday(ptv
, ptz
));
9028 #if defined(TARGET_NR_select)
9029 case TARGET_NR_select
:
9030 #if defined(TARGET_WANT_NI_OLD_SELECT)
9031 /* some architectures used to have old_select here
9032 * but now ENOSYS it.
9034 ret
= -TARGET_ENOSYS
;
9035 #elif defined(TARGET_WANT_OLD_SYS_SELECT)
9036 ret
= do_old_select(arg1
);
9038 ret
= do_select(arg1
, arg2
, arg3
, arg4
, arg5
);
9042 #ifdef TARGET_NR_pselect6
9043 case TARGET_NR_pselect6
:
9045 abi_long rfd_addr
, wfd_addr
, efd_addr
, n
, ts_addr
;
9046 fd_set rfds
, wfds
, efds
;
9047 fd_set
*rfds_ptr
, *wfds_ptr
, *efds_ptr
;
9048 struct timespec ts
, *ts_ptr
;
9051 * The 6th arg is actually two args smashed together,
9052 * so we cannot use the C library.
9060 abi_ulong arg_sigset
, arg_sigsize
, *arg7
;
9061 target_sigset_t
*target_sigset
;
9069 ret
= copy_from_user_fdset_ptr(&rfds
, &rfds_ptr
, rfd_addr
, n
);
9073 ret
= copy_from_user_fdset_ptr(&wfds
, &wfds_ptr
, wfd_addr
, n
);
9077 ret
= copy_from_user_fdset_ptr(&efds
, &efds_ptr
, efd_addr
, n
);
9083 * This takes a timespec, and not a timeval, so we cannot
9084 * use the do_select() helper ...
9087 if (target_to_host_timespec(&ts
, ts_addr
)) {
9095 /* Extract the two packed args for the sigset */
9098 sig
.size
= SIGSET_T_SIZE
;
9100 arg7
= lock_user(VERIFY_READ
, arg6
, sizeof(*arg7
) * 2, 1);
9104 arg_sigset
= tswapal(arg7
[0]);
9105 arg_sigsize
= tswapal(arg7
[1]);
9106 unlock_user(arg7
, arg6
, 0);
9110 if (arg_sigsize
!= sizeof(*target_sigset
)) {
9111 /* Like the kernel, we enforce correct size sigsets */
9112 ret
= -TARGET_EINVAL
;
9115 target_sigset
= lock_user(VERIFY_READ
, arg_sigset
,
9116 sizeof(*target_sigset
), 1);
9117 if (!target_sigset
) {
9120 target_to_host_sigset(&set
, target_sigset
);
9121 unlock_user(target_sigset
, arg_sigset
, 0);
9129 ret
= get_errno(safe_pselect6(n
, rfds_ptr
, wfds_ptr
, efds_ptr
,
9132 if (!is_error(ret
)) {
9133 if (rfd_addr
&& copy_to_user_fdset(rfd_addr
, &rfds
, n
))
9135 if (wfd_addr
&& copy_to_user_fdset(wfd_addr
, &wfds
, n
))
9137 if (efd_addr
&& copy_to_user_fdset(efd_addr
, &efds
, n
))
9140 if (ts_addr
&& host_to_target_timespec(ts_addr
, &ts
))
9146 #ifdef TARGET_NR_symlink
9147 case TARGET_NR_symlink
:
9150 p
= lock_user_string(arg1
);
9151 p2
= lock_user_string(arg2
);
9153 ret
= -TARGET_EFAULT
;
9155 ret
= get_errno(symlink(p
, p2
));
9156 unlock_user(p2
, arg2
, 0);
9157 unlock_user(p
, arg1
, 0);
9161 #if defined(TARGET_NR_symlinkat)
9162 case TARGET_NR_symlinkat
:
9165 p
= lock_user_string(arg1
);
9166 p2
= lock_user_string(arg3
);
9168 ret
= -TARGET_EFAULT
;
9170 ret
= get_errno(symlinkat(p
, arg2
, p2
));
9171 unlock_user(p2
, arg3
, 0);
9172 unlock_user(p
, arg1
, 0);
9176 #ifdef TARGET_NR_oldlstat
9177 case TARGET_NR_oldlstat
:
9180 #ifdef TARGET_NR_readlink
9181 case TARGET_NR_readlink
:
9184 p
= lock_user_string(arg1
);
9185 p2
= lock_user(VERIFY_WRITE
, arg2
, arg3
, 0);
9187 ret
= -TARGET_EFAULT
;
9189 /* Short circuit this for the magic exe check. */
9190 ret
= -TARGET_EINVAL
;
9191 } else if (is_proc_myself((const char *)p
, "exe")) {
9192 char real
[PATH_MAX
], *temp
;
9193 temp
= realpath(exec_path
, real
);
9194 /* Return value is # of bytes that we wrote to the buffer. */
9196 ret
= get_errno(-1);
9198 /* Don't worry about sign mismatch as earlier mapping
9199 * logic would have thrown a bad address error. */
9200 ret
= MIN(strlen(real
), arg3
);
9201 /* We cannot NUL terminate the string. */
9202 memcpy(p2
, real
, ret
);
9205 ret
= get_errno(readlink(path(p
), p2
, arg3
));
9207 unlock_user(p2
, arg2
, ret
);
9208 unlock_user(p
, arg1
, 0);
9212 #if defined(TARGET_NR_readlinkat)
9213 case TARGET_NR_readlinkat
:
9216 p
= lock_user_string(arg2
);
9217 p2
= lock_user(VERIFY_WRITE
, arg3
, arg4
, 0);
9219 ret
= -TARGET_EFAULT
;
9220 } else if (is_proc_myself((const char *)p
, "exe")) {
9221 char real
[PATH_MAX
], *temp
;
9222 temp
= realpath(exec_path
, real
);
9223 ret
= temp
== NULL
? get_errno(-1) : strlen(real
) ;
9224 snprintf((char *)p2
, arg4
, "%s", real
);
9226 ret
= get_errno(readlinkat(arg1
, path(p
), p2
, arg4
));
9228 unlock_user(p2
, arg3
, ret
);
9229 unlock_user(p
, arg2
, 0);
9233 #ifdef TARGET_NR_uselib
9234 case TARGET_NR_uselib
:
9237 #ifdef TARGET_NR_swapon
9238 case TARGET_NR_swapon
:
9239 if (!(p
= lock_user_string(arg1
)))
9241 ret
= get_errno(swapon(p
, arg2
));
9242 unlock_user(p
, arg1
, 0);
9245 case TARGET_NR_reboot
:
9246 if (arg3
== LINUX_REBOOT_CMD_RESTART2
) {
9247 /* arg4 must be ignored in all other cases */
9248 p
= lock_user_string(arg4
);
9252 ret
= get_errno(reboot(arg1
, arg2
, arg3
, p
));
9253 unlock_user(p
, arg4
, 0);
9255 ret
= get_errno(reboot(arg1
, arg2
, arg3
, NULL
));
9258 #ifdef TARGET_NR_readdir
9259 case TARGET_NR_readdir
:
9262 #ifdef TARGET_NR_mmap
9263 case TARGET_NR_mmap
:
9264 #if (defined(TARGET_I386) && defined(TARGET_ABI32)) || \
9265 (defined(TARGET_ARM) && defined(TARGET_ABI32)) || \
9266 defined(TARGET_M68K) || defined(TARGET_CRIS) || defined(TARGET_MICROBLAZE) \
9267 || defined(TARGET_S390X)
9270 abi_ulong v1
, v2
, v3
, v4
, v5
, v6
;
9271 if (!(v
= lock_user(VERIFY_READ
, arg1
, 6 * sizeof(abi_ulong
), 1)))
9279 unlock_user(v
, arg1
, 0);
9280 ret
= get_errno(target_mmap(v1
, v2
, v3
,
9281 target_to_host_bitmask(v4
, mmap_flags_tbl
),
9285 ret
= get_errno(target_mmap(arg1
, arg2
, arg3
,
9286 target_to_host_bitmask(arg4
, mmap_flags_tbl
),
9292 #ifdef TARGET_NR_mmap2
9293 case TARGET_NR_mmap2
:
9295 #define MMAP_SHIFT 12
9297 ret
= get_errno(target_mmap(arg1
, arg2
, arg3
,
9298 target_to_host_bitmask(arg4
, mmap_flags_tbl
),
9300 arg6
<< MMAP_SHIFT
));
9303 case TARGET_NR_munmap
:
9304 ret
= get_errno(target_munmap(arg1
, arg2
));
9306 case TARGET_NR_mprotect
:
9308 TaskState
*ts
= cpu
->opaque
;
9309 /* Special hack to detect libc making the stack executable. */
9310 if ((arg3
& PROT_GROWSDOWN
)
9311 && arg1
>= ts
->info
->stack_limit
9312 && arg1
<= ts
->info
->start_stack
) {
9313 arg3
&= ~PROT_GROWSDOWN
;
9314 arg2
= arg2
+ arg1
- ts
->info
->stack_limit
;
9315 arg1
= ts
->info
->stack_limit
;
9318 ret
= get_errno(target_mprotect(arg1
, arg2
, arg3
));
9320 #ifdef TARGET_NR_mremap
9321 case TARGET_NR_mremap
:
9322 ret
= get_errno(target_mremap(arg1
, arg2
, arg3
, arg4
, arg5
));
9325 /* ??? msync/mlock/munlock are broken for softmmu. */
9326 #ifdef TARGET_NR_msync
9327 case TARGET_NR_msync
:
9328 ret
= get_errno(msync(g2h(arg1
), arg2
, arg3
));
9331 #ifdef TARGET_NR_mlock
9332 case TARGET_NR_mlock
:
9333 ret
= get_errno(mlock(g2h(arg1
), arg2
));
9336 #ifdef TARGET_NR_munlock
9337 case TARGET_NR_munlock
:
9338 ret
= get_errno(munlock(g2h(arg1
), arg2
));
9341 #ifdef TARGET_NR_mlockall
9342 case TARGET_NR_mlockall
:
9343 ret
= get_errno(mlockall(target_to_host_mlockall_arg(arg1
)));
9346 #ifdef TARGET_NR_munlockall
9347 case TARGET_NR_munlockall
:
9348 ret
= get_errno(munlockall());
9351 case TARGET_NR_truncate
:
9352 if (!(p
= lock_user_string(arg1
)))
9354 ret
= get_errno(truncate(p
, arg2
));
9355 unlock_user(p
, arg1
, 0);
9357 case TARGET_NR_ftruncate
:
9358 ret
= get_errno(ftruncate(arg1
, arg2
));
9360 case TARGET_NR_fchmod
:
9361 ret
= get_errno(fchmod(arg1
, arg2
));
9363 #if defined(TARGET_NR_fchmodat)
9364 case TARGET_NR_fchmodat
:
9365 if (!(p
= lock_user_string(arg2
)))
9367 ret
= get_errno(fchmodat(arg1
, p
, arg3
, 0));
9368 unlock_user(p
, arg2
, 0);
9371 case TARGET_NR_getpriority
:
9372 /* Note that negative values are valid for getpriority, so we must
9373 differentiate based on errno settings. */
9375 ret
= getpriority(arg1
, arg2
);
9376 if (ret
== -1 && errno
!= 0) {
9377 ret
= -host_to_target_errno(errno
);
9381 /* Return value is the unbiased priority. Signal no error. */
9382 ((CPUAlphaState
*)cpu_env
)->ir
[IR_V0
] = 0;
9384 /* Return value is a biased priority to avoid negative numbers. */
9388 case TARGET_NR_setpriority
:
9389 ret
= get_errno(setpriority(arg1
, arg2
, arg3
));
9391 #ifdef TARGET_NR_profil
9392 case TARGET_NR_profil
:
9395 case TARGET_NR_statfs
:
9396 if (!(p
= lock_user_string(arg1
)))
9398 ret
= get_errno(statfs(path(p
), &stfs
));
9399 unlock_user(p
, arg1
, 0);
9401 if (!is_error(ret
)) {
9402 struct target_statfs
*target_stfs
;
9404 if (!lock_user_struct(VERIFY_WRITE
, target_stfs
, arg2
, 0))
9406 __put_user(stfs
.f_type
, &target_stfs
->f_type
);
9407 __put_user(stfs
.f_bsize
, &target_stfs
->f_bsize
);
9408 __put_user(stfs
.f_blocks
, &target_stfs
->f_blocks
);
9409 __put_user(stfs
.f_bfree
, &target_stfs
->f_bfree
);
9410 __put_user(stfs
.f_bavail
, &target_stfs
->f_bavail
);
9411 __put_user(stfs
.f_files
, &target_stfs
->f_files
);
9412 __put_user(stfs
.f_ffree
, &target_stfs
->f_ffree
);
9413 __put_user(stfs
.f_fsid
.__val
[0], &target_stfs
->f_fsid
.val
[0]);
9414 __put_user(stfs
.f_fsid
.__val
[1], &target_stfs
->f_fsid
.val
[1]);
9415 __put_user(stfs
.f_namelen
, &target_stfs
->f_namelen
);
9416 __put_user(stfs
.f_frsize
, &target_stfs
->f_frsize
);
9417 memset(target_stfs
->f_spare
, 0, sizeof(target_stfs
->f_spare
));
9418 unlock_user_struct(target_stfs
, arg2
, 1);
9421 case TARGET_NR_fstatfs
:
9422 ret
= get_errno(fstatfs(arg1
, &stfs
));
9423 goto convert_statfs
;
9424 #ifdef TARGET_NR_statfs64
9425 case TARGET_NR_statfs64
:
9426 if (!(p
= lock_user_string(arg1
)))
9428 ret
= get_errno(statfs(path(p
), &stfs
));
9429 unlock_user(p
, arg1
, 0);
9431 if (!is_error(ret
)) {
9432 struct target_statfs64
*target_stfs
;
9434 if (!lock_user_struct(VERIFY_WRITE
, target_stfs
, arg3
, 0))
9436 __put_user(stfs
.f_type
, &target_stfs
->f_type
);
9437 __put_user(stfs
.f_bsize
, &target_stfs
->f_bsize
);
9438 __put_user(stfs
.f_blocks
, &target_stfs
->f_blocks
);
9439 __put_user(stfs
.f_bfree
, &target_stfs
->f_bfree
);
9440 __put_user(stfs
.f_bavail
, &target_stfs
->f_bavail
);
9441 __put_user(stfs
.f_files
, &target_stfs
->f_files
);
9442 __put_user(stfs
.f_ffree
, &target_stfs
->f_ffree
);
9443 __put_user(stfs
.f_fsid
.__val
[0], &target_stfs
->f_fsid
.val
[0]);
9444 __put_user(stfs
.f_fsid
.__val
[1], &target_stfs
->f_fsid
.val
[1]);
9445 __put_user(stfs
.f_namelen
, &target_stfs
->f_namelen
);
9446 __put_user(stfs
.f_frsize
, &target_stfs
->f_frsize
);
9447 memset(target_stfs
->f_spare
, 0, sizeof(target_stfs
->f_spare
));
9448 unlock_user_struct(target_stfs
, arg3
, 1);
9451 case TARGET_NR_fstatfs64
:
9452 ret
= get_errno(fstatfs(arg1
, &stfs
));
9453 goto convert_statfs64
;
9455 #ifdef TARGET_NR_ioperm
9456 case TARGET_NR_ioperm
:
9459 #ifdef TARGET_NR_socketcall
9460 case TARGET_NR_socketcall
:
9461 ret
= do_socketcall(arg1
, arg2
);
9464 #ifdef TARGET_NR_accept
9465 case TARGET_NR_accept
:
9466 ret
= do_accept4(arg1
, arg2
, arg3
, 0);
9469 #ifdef TARGET_NR_accept4
9470 case TARGET_NR_accept4
:
9471 ret
= do_accept4(arg1
, arg2
, arg3
, arg4
);
9474 #ifdef TARGET_NR_bind
9475 case TARGET_NR_bind
:
9476 ret
= do_bind(arg1
, arg2
, arg3
);
9479 #ifdef TARGET_NR_connect
9480 case TARGET_NR_connect
:
9481 ret
= do_connect(arg1
, arg2
, arg3
);
9484 #ifdef TARGET_NR_getpeername
9485 case TARGET_NR_getpeername
:
9486 ret
= do_getpeername(arg1
, arg2
, arg3
);
9489 #ifdef TARGET_NR_getsockname
9490 case TARGET_NR_getsockname
:
9491 ret
= do_getsockname(arg1
, arg2
, arg3
);
9494 #ifdef TARGET_NR_getsockopt
9495 case TARGET_NR_getsockopt
:
9496 ret
= do_getsockopt(arg1
, arg2
, arg3
, arg4
, arg5
);
9499 #ifdef TARGET_NR_listen
9500 case TARGET_NR_listen
:
9501 ret
= get_errno(listen(arg1
, arg2
));
9504 #ifdef TARGET_NR_recv
9505 case TARGET_NR_recv
:
9506 ret
= do_recvfrom(arg1
, arg2
, arg3
, arg4
, 0, 0);
9509 #ifdef TARGET_NR_recvfrom
9510 case TARGET_NR_recvfrom
:
9511 ret
= do_recvfrom(arg1
, arg2
, arg3
, arg4
, arg5
, arg6
);
9514 #ifdef TARGET_NR_recvmsg
9515 case TARGET_NR_recvmsg
:
9516 ret
= do_sendrecvmsg(arg1
, arg2
, arg3
, 0);
9519 #ifdef TARGET_NR_send
9520 case TARGET_NR_send
:
9521 ret
= do_sendto(arg1
, arg2
, arg3
, arg4
, 0, 0);
9524 #ifdef TARGET_NR_sendmsg
9525 case TARGET_NR_sendmsg
:
9526 ret
= do_sendrecvmsg(arg1
, arg2
, arg3
, 1);
9529 #ifdef TARGET_NR_sendmmsg
9530 case TARGET_NR_sendmmsg
:
9531 ret
= do_sendrecvmmsg(arg1
, arg2
, arg3
, arg4
, 1);
9533 case TARGET_NR_recvmmsg
:
9534 ret
= do_sendrecvmmsg(arg1
, arg2
, arg3
, arg4
, 0);
9537 #ifdef TARGET_NR_sendto
9538 case TARGET_NR_sendto
:
9539 ret
= do_sendto(arg1
, arg2
, arg3
, arg4
, arg5
, arg6
);
9542 #ifdef TARGET_NR_shutdown
9543 case TARGET_NR_shutdown
:
9544 ret
= get_errno(shutdown(arg1
, arg2
));
9547 #if defined(TARGET_NR_getrandom) && defined(__NR_getrandom)
9548 case TARGET_NR_getrandom
:
9549 p
= lock_user(VERIFY_WRITE
, arg1
, arg2
, 0);
9553 ret
= get_errno(getrandom(p
, arg2
, arg3
));
9554 unlock_user(p
, arg1
, ret
);
9557 #ifdef TARGET_NR_socket
9558 case TARGET_NR_socket
:
9559 ret
= do_socket(arg1
, arg2
, arg3
);
9562 #ifdef TARGET_NR_socketpair
9563 case TARGET_NR_socketpair
:
9564 ret
= do_socketpair(arg1
, arg2
, arg3
, arg4
);
9567 #ifdef TARGET_NR_setsockopt
9568 case TARGET_NR_setsockopt
:
9569 ret
= do_setsockopt(arg1
, arg2
, arg3
, arg4
, (socklen_t
) arg5
);
9572 #if defined(TARGET_NR_syslog)
9573 case TARGET_NR_syslog
:
9578 case TARGET_SYSLOG_ACTION_CLOSE
: /* Close log */
9579 case TARGET_SYSLOG_ACTION_OPEN
: /* Open log */
9580 case TARGET_SYSLOG_ACTION_CLEAR
: /* Clear ring buffer */
9581 case TARGET_SYSLOG_ACTION_CONSOLE_OFF
: /* Disable logging */
9582 case TARGET_SYSLOG_ACTION_CONSOLE_ON
: /* Enable logging */
9583 case TARGET_SYSLOG_ACTION_CONSOLE_LEVEL
: /* Set messages level */
9584 case TARGET_SYSLOG_ACTION_SIZE_UNREAD
: /* Number of chars */
9585 case TARGET_SYSLOG_ACTION_SIZE_BUFFER
: /* Size of the buffer */
9587 ret
= get_errno(sys_syslog((int)arg1
, NULL
, (int)arg3
));
9590 case TARGET_SYSLOG_ACTION_READ
: /* Read from log */
9591 case TARGET_SYSLOG_ACTION_READ_CLEAR
: /* Read/clear msgs */
9592 case TARGET_SYSLOG_ACTION_READ_ALL
: /* Read last messages */
9594 ret
= -TARGET_EINVAL
;
9602 p
= lock_user(VERIFY_WRITE
, arg2
, arg3
, 0);
9604 ret
= -TARGET_EFAULT
;
9607 ret
= get_errno(sys_syslog((int)arg1
, p
, (int)arg3
));
9608 unlock_user(p
, arg2
, arg3
);
9618 case TARGET_NR_setitimer
:
9620 struct itimerval value
, ovalue
, *pvalue
;
9624 if (copy_from_user_timeval(&pvalue
->it_interval
, arg2
)
9625 || copy_from_user_timeval(&pvalue
->it_value
,
9626 arg2
+ sizeof(struct target_timeval
)))
9631 ret
= get_errno(setitimer(arg1
, pvalue
, &ovalue
));
9632 if (!is_error(ret
) && arg3
) {
9633 if (copy_to_user_timeval(arg3
,
9634 &ovalue
.it_interval
)
9635 || copy_to_user_timeval(arg3
+ sizeof(struct target_timeval
),
9641 case TARGET_NR_getitimer
:
9643 struct itimerval value
;
9645 ret
= get_errno(getitimer(arg1
, &value
));
9646 if (!is_error(ret
) && arg2
) {
9647 if (copy_to_user_timeval(arg2
,
9649 || copy_to_user_timeval(arg2
+ sizeof(struct target_timeval
),
9655 #ifdef TARGET_NR_stat
9656 case TARGET_NR_stat
:
9657 if (!(p
= lock_user_string(arg1
)))
9659 ret
= get_errno(stat(path(p
), &st
));
9660 unlock_user(p
, arg1
, 0);
9663 #ifdef TARGET_NR_lstat
9664 case TARGET_NR_lstat
:
9665 if (!(p
= lock_user_string(arg1
)))
9667 ret
= get_errno(lstat(path(p
), &st
));
9668 unlock_user(p
, arg1
, 0);
9671 case TARGET_NR_fstat
:
9673 ret
= get_errno(fstat(arg1
, &st
));
9674 #if defined(TARGET_NR_stat) || defined(TARGET_NR_lstat)
9677 if (!is_error(ret
)) {
9678 struct target_stat
*target_st
;
9680 if (!lock_user_struct(VERIFY_WRITE
, target_st
, arg2
, 0))
9682 memset(target_st
, 0, sizeof(*target_st
));
9683 __put_user(st
.st_dev
, &target_st
->st_dev
);
9684 __put_user(st
.st_ino
, &target_st
->st_ino
);
9685 __put_user(st
.st_mode
, &target_st
->st_mode
);
9686 __put_user(st
.st_uid
, &target_st
->st_uid
);
9687 __put_user(st
.st_gid
, &target_st
->st_gid
);
9688 __put_user(st
.st_nlink
, &target_st
->st_nlink
);
9689 __put_user(st
.st_rdev
, &target_st
->st_rdev
);
9690 __put_user(st
.st_size
, &target_st
->st_size
);
9691 __put_user(st
.st_blksize
, &target_st
->st_blksize
);
9692 __put_user(st
.st_blocks
, &target_st
->st_blocks
);
9693 __put_user(st
.st_atime
, &target_st
->target_st_atime
);
9694 __put_user(st
.st_mtime
, &target_st
->target_st_mtime
);
9695 __put_user(st
.st_ctime
, &target_st
->target_st_ctime
);
9696 unlock_user_struct(target_st
, arg2
, 1);
9700 #ifdef TARGET_NR_olduname
9701 case TARGET_NR_olduname
:
9704 #ifdef TARGET_NR_iopl
9705 case TARGET_NR_iopl
:
9708 case TARGET_NR_vhangup
:
9709 ret
= get_errno(vhangup());
9711 #ifdef TARGET_NR_idle
9712 case TARGET_NR_idle
:
9715 #ifdef TARGET_NR_syscall
9716 case TARGET_NR_syscall
:
9717 ret
= do_syscall(cpu_env
, arg1
& 0xffff, arg2
, arg3
, arg4
, arg5
,
9718 arg6
, arg7
, arg8
, 0);
9721 case TARGET_NR_wait4
:
9724 abi_long status_ptr
= arg2
;
9725 struct rusage rusage
, *rusage_ptr
;
9726 abi_ulong target_rusage
= arg4
;
9727 abi_long rusage_err
;
9729 rusage_ptr
= &rusage
;
9732 ret
= get_errno(safe_wait4(arg1
, &status
, arg3
, rusage_ptr
));
9733 if (!is_error(ret
)) {
9734 if (status_ptr
&& ret
) {
9735 status
= host_to_target_waitstatus(status
);
9736 if (put_user_s32(status
, status_ptr
))
9739 if (target_rusage
) {
9740 rusage_err
= host_to_target_rusage(target_rusage
, &rusage
);
9748 #ifdef TARGET_NR_swapoff
9749 case TARGET_NR_swapoff
:
9750 if (!(p
= lock_user_string(arg1
)))
9752 ret
= get_errno(swapoff(p
));
9753 unlock_user(p
, arg1
, 0);
9756 case TARGET_NR_sysinfo
:
9758 struct target_sysinfo
*target_value
;
9759 struct sysinfo value
;
9760 ret
= get_errno(sysinfo(&value
));
9761 if (!is_error(ret
) && arg1
)
9763 if (!lock_user_struct(VERIFY_WRITE
, target_value
, arg1
, 0))
9765 __put_user(value
.uptime
, &target_value
->uptime
);
9766 __put_user(value
.loads
[0], &target_value
->loads
[0]);
9767 __put_user(value
.loads
[1], &target_value
->loads
[1]);
9768 __put_user(value
.loads
[2], &target_value
->loads
[2]);
9769 __put_user(value
.totalram
, &target_value
->totalram
);
9770 __put_user(value
.freeram
, &target_value
->freeram
);
9771 __put_user(value
.sharedram
, &target_value
->sharedram
);
9772 __put_user(value
.bufferram
, &target_value
->bufferram
);
9773 __put_user(value
.totalswap
, &target_value
->totalswap
);
9774 __put_user(value
.freeswap
, &target_value
->freeswap
);
9775 __put_user(value
.procs
, &target_value
->procs
);
9776 __put_user(value
.totalhigh
, &target_value
->totalhigh
);
9777 __put_user(value
.freehigh
, &target_value
->freehigh
);
9778 __put_user(value
.mem_unit
, &target_value
->mem_unit
);
9779 unlock_user_struct(target_value
, arg1
, 1);
9783 #ifdef TARGET_NR_ipc
9785 ret
= do_ipc(cpu_env
, arg1
, arg2
, arg3
, arg4
, arg5
, arg6
);
9788 #ifdef TARGET_NR_semget
9789 case TARGET_NR_semget
:
9790 ret
= get_errno(semget(arg1
, arg2
, arg3
));
9793 #ifdef TARGET_NR_semop
9794 case TARGET_NR_semop
:
9795 ret
= do_semop(arg1
, arg2
, arg3
);
9798 #ifdef TARGET_NR_semctl
9799 case TARGET_NR_semctl
:
9800 ret
= do_semctl(arg1
, arg2
, arg3
, arg4
);
9803 #ifdef TARGET_NR_msgctl
9804 case TARGET_NR_msgctl
:
9805 ret
= do_msgctl(arg1
, arg2
, arg3
);
9808 #ifdef TARGET_NR_msgget
9809 case TARGET_NR_msgget
:
9810 ret
= get_errno(msgget(arg1
, arg2
));
9813 #ifdef TARGET_NR_msgrcv
9814 case TARGET_NR_msgrcv
:
9815 ret
= do_msgrcv(arg1
, arg2
, arg3
, arg4
, arg5
);
9818 #ifdef TARGET_NR_msgsnd
9819 case TARGET_NR_msgsnd
:
9820 ret
= do_msgsnd(arg1
, arg2
, arg3
, arg4
);
9823 #ifdef TARGET_NR_shmget
9824 case TARGET_NR_shmget
:
9825 ret
= get_errno(shmget(arg1
, arg2
, arg3
));
9828 #ifdef TARGET_NR_shmctl
9829 case TARGET_NR_shmctl
:
9830 ret
= do_shmctl(arg1
, arg2
, arg3
);
9833 #ifdef TARGET_NR_shmat
9834 case TARGET_NR_shmat
:
9835 ret
= do_shmat(cpu_env
, arg1
, arg2
, arg3
);
9838 #ifdef TARGET_NR_shmdt
9839 case TARGET_NR_shmdt
:
9840 ret
= do_shmdt(arg1
);
9843 case TARGET_NR_fsync
:
9844 ret
= get_errno(fsync(arg1
));
9846 case TARGET_NR_clone
:
9847 /* Linux manages to have three different orderings for its
9848 * arguments to clone(); the BACKWARDS and BACKWARDS2 defines
9849 * match the kernel's CONFIG_CLONE_* settings.
9850 * Microblaze is further special in that it uses a sixth
9851 * implicit argument to clone for the TLS pointer.
9853 #if defined(TARGET_MICROBLAZE)
9854 ret
= get_errno(do_fork(cpu_env
, arg1
, arg2
, arg4
, arg6
, arg5
));
9855 #elif defined(TARGET_CLONE_BACKWARDS)
9856 ret
= get_errno(do_fork(cpu_env
, arg1
, arg2
, arg3
, arg4
, arg5
));
9857 #elif defined(TARGET_CLONE_BACKWARDS2)
9858 ret
= get_errno(do_fork(cpu_env
, arg2
, arg1
, arg3
, arg5
, arg4
));
9860 ret
= get_errno(do_fork(cpu_env
, arg1
, arg2
, arg3
, arg5
, arg4
));
9863 #ifdef __NR_exit_group
9864 /* new thread calls */
9865 case TARGET_NR_exit_group
:
9869 gdb_exit(cpu_env
, arg1
);
9870 ret
= get_errno(exit_group(arg1
));
9873 case TARGET_NR_setdomainname
:
9874 if (!(p
= lock_user_string(arg1
)))
9876 ret
= get_errno(setdomainname(p
, arg2
));
9877 unlock_user(p
, arg1
, 0);
9879 case TARGET_NR_uname
:
9880 /* no need to transcode because we use the linux syscall */
9882 struct new_utsname
* buf
;
9884 if (!lock_user_struct(VERIFY_WRITE
, buf
, arg1
, 0))
9886 ret
= get_errno(sys_uname(buf
));
9887 if (!is_error(ret
)) {
9888 /* Overwrite the native machine name with whatever is being
9890 strcpy (buf
->machine
, cpu_to_uname_machine(cpu_env
));
9891 /* Allow the user to override the reported release. */
9892 if (qemu_uname_release
&& *qemu_uname_release
) {
9893 g_strlcpy(buf
->release
, qemu_uname_release
,
9894 sizeof(buf
->release
));
9897 unlock_user_struct(buf
, arg1
, 1);
9901 case TARGET_NR_modify_ldt
:
9902 ret
= do_modify_ldt(cpu_env
, arg1
, arg2
, arg3
);
9904 #if !defined(TARGET_X86_64)
9905 case TARGET_NR_vm86old
:
9907 case TARGET_NR_vm86
:
9908 ret
= do_vm86(cpu_env
, arg1
, arg2
);
9912 case TARGET_NR_adjtimex
:
9914 struct timex host_buf
;
9916 if (target_to_host_timex(&host_buf
, arg1
) != 0) {
9919 ret
= get_errno(adjtimex(&host_buf
));
9920 if (!is_error(ret
)) {
9921 if (host_to_target_timex(arg1
, &host_buf
) != 0) {
9927 #if defined(TARGET_NR_clock_adjtime) && defined(CONFIG_CLOCK_ADJTIME)
9928 case TARGET_NR_clock_adjtime
:
9930 struct timex htx
, *phtx
= &htx
;
9932 if (target_to_host_timex(phtx
, arg2
) != 0) {
9935 ret
= get_errno(clock_adjtime(arg1
, phtx
));
9936 if (!is_error(ret
) && phtx
) {
9937 if (host_to_target_timex(arg2
, phtx
) != 0) {
9944 #ifdef TARGET_NR_create_module
9945 case TARGET_NR_create_module
:
9947 case TARGET_NR_init_module
:
9948 case TARGET_NR_delete_module
:
9949 #ifdef TARGET_NR_get_kernel_syms
9950 case TARGET_NR_get_kernel_syms
:
9953 case TARGET_NR_quotactl
:
9955 case TARGET_NR_getpgid
:
9956 ret
= get_errno(getpgid(arg1
));
9958 case TARGET_NR_fchdir
:
9959 ret
= get_errno(fchdir(arg1
));
9961 #ifdef TARGET_NR_bdflush /* not on x86_64 */
9962 case TARGET_NR_bdflush
:
9965 #ifdef TARGET_NR_sysfs
9966 case TARGET_NR_sysfs
:
9969 case TARGET_NR_personality
:
9970 ret
= get_errno(personality(arg1
));
9972 #ifdef TARGET_NR_afs_syscall
9973 case TARGET_NR_afs_syscall
:
9976 #ifdef TARGET_NR__llseek /* Not on alpha */
9977 case TARGET_NR__llseek
:
9980 #if !defined(__NR_llseek)
9981 res
= lseek(arg1
, ((uint64_t)arg2
<< 32) | (abi_ulong
)arg3
, arg5
);
9983 ret
= get_errno(res
);
9988 ret
= get_errno(_llseek(arg1
, arg2
, arg3
, &res
, arg5
));
9990 if ((ret
== 0) && put_user_s64(res
, arg4
)) {
9996 #ifdef TARGET_NR_getdents
9997 case TARGET_NR_getdents
:
9998 #ifdef __NR_getdents
9999 #if TARGET_ABI_BITS == 32 && HOST_LONG_BITS == 64
10001 struct target_dirent
*target_dirp
;
10002 struct linux_dirent
*dirp
;
10003 abi_long count
= arg3
;
10005 dirp
= g_try_malloc(count
);
10007 ret
= -TARGET_ENOMEM
;
10011 ret
= get_errno(sys_getdents(arg1
, dirp
, count
));
10012 if (!is_error(ret
)) {
10013 struct linux_dirent
*de
;
10014 struct target_dirent
*tde
;
10016 int reclen
, treclen
;
10017 int count1
, tnamelen
;
10021 if (!(target_dirp
= lock_user(VERIFY_WRITE
, arg2
, count
, 0)))
10025 reclen
= de
->d_reclen
;
10026 tnamelen
= reclen
- offsetof(struct linux_dirent
, d_name
);
10027 assert(tnamelen
>= 0);
10028 treclen
= tnamelen
+ offsetof(struct target_dirent
, d_name
);
10029 assert(count1
+ treclen
<= count
);
10030 tde
->d_reclen
= tswap16(treclen
);
10031 tde
->d_ino
= tswapal(de
->d_ino
);
10032 tde
->d_off
= tswapal(de
->d_off
);
10033 memcpy(tde
->d_name
, de
->d_name
, tnamelen
);
10034 de
= (struct linux_dirent
*)((char *)de
+ reclen
);
10036 tde
= (struct target_dirent
*)((char *)tde
+ treclen
);
10040 unlock_user(target_dirp
, arg2
, ret
);
10046 struct linux_dirent
*dirp
;
10047 abi_long count
= arg3
;
10049 if (!(dirp
= lock_user(VERIFY_WRITE
, arg2
, count
, 0)))
10051 ret
= get_errno(sys_getdents(arg1
, dirp
, count
));
10052 if (!is_error(ret
)) {
10053 struct linux_dirent
*de
;
10058 reclen
= de
->d_reclen
;
10061 de
->d_reclen
= tswap16(reclen
);
10062 tswapls(&de
->d_ino
);
10063 tswapls(&de
->d_off
);
10064 de
= (struct linux_dirent
*)((char *)de
+ reclen
);
10068 unlock_user(dirp
, arg2
, ret
);
10072 /* Implement getdents in terms of getdents64 */
10074 struct linux_dirent64
*dirp
;
10075 abi_long count
= arg3
;
10077 dirp
= lock_user(VERIFY_WRITE
, arg2
, count
, 0);
10081 ret
= get_errno(sys_getdents64(arg1
, dirp
, count
));
10082 if (!is_error(ret
)) {
10083 /* Convert the dirent64 structs to target dirent. We do this
10084 * in-place, since we can guarantee that a target_dirent is no
10085 * larger than a dirent64; however this means we have to be
10086 * careful to read everything before writing in the new format.
10088 struct linux_dirent64
*de
;
10089 struct target_dirent
*tde
;
10094 tde
= (struct target_dirent
*)dirp
;
10096 int namelen
, treclen
;
10097 int reclen
= de
->d_reclen
;
10098 uint64_t ino
= de
->d_ino
;
10099 int64_t off
= de
->d_off
;
10100 uint8_t type
= de
->d_type
;
10102 namelen
= strlen(de
->d_name
);
10103 treclen
= offsetof(struct target_dirent
, d_name
)
10105 treclen
= QEMU_ALIGN_UP(treclen
, sizeof(abi_long
));
10107 memmove(tde
->d_name
, de
->d_name
, namelen
+ 1);
10108 tde
->d_ino
= tswapal(ino
);
10109 tde
->d_off
= tswapal(off
);
10110 tde
->d_reclen
= tswap16(treclen
);
10111 /* The target_dirent type is in what was formerly a padding
10112 * byte at the end of the structure:
10114 *(((char *)tde
) + treclen
- 1) = type
;
10116 de
= (struct linux_dirent64
*)((char *)de
+ reclen
);
10117 tde
= (struct target_dirent
*)((char *)tde
+ treclen
);
10123 unlock_user(dirp
, arg2
, ret
);
10127 #endif /* TARGET_NR_getdents */
10128 #if defined(TARGET_NR_getdents64) && defined(__NR_getdents64)
10129 case TARGET_NR_getdents64
:
10131 struct linux_dirent64
*dirp
;
10132 abi_long count
= arg3
;
10133 if (!(dirp
= lock_user(VERIFY_WRITE
, arg2
, count
, 0)))
10135 ret
= get_errno(sys_getdents64(arg1
, dirp
, count
));
10136 if (!is_error(ret
)) {
10137 struct linux_dirent64
*de
;
10142 reclen
= de
->d_reclen
;
10145 de
->d_reclen
= tswap16(reclen
);
10146 tswap64s((uint64_t *)&de
->d_ino
);
10147 tswap64s((uint64_t *)&de
->d_off
);
10148 de
= (struct linux_dirent64
*)((char *)de
+ reclen
);
10152 unlock_user(dirp
, arg2
, ret
);
10155 #endif /* TARGET_NR_getdents64 */
10156 #if defined(TARGET_NR__newselect)
10157 case TARGET_NR__newselect
:
10158 ret
= do_select(arg1
, arg2
, arg3
, arg4
, arg5
);
10161 #if defined(TARGET_NR_poll) || defined(TARGET_NR_ppoll)
10162 # ifdef TARGET_NR_poll
10163 case TARGET_NR_poll
:
10165 # ifdef TARGET_NR_ppoll
10166 case TARGET_NR_ppoll
:
10169 struct target_pollfd
*target_pfd
;
10170 unsigned int nfds
= arg2
;
10171 struct pollfd
*pfd
;
10177 if (nfds
> (INT_MAX
/ sizeof(struct target_pollfd
))) {
10178 ret
= -TARGET_EINVAL
;
10182 target_pfd
= lock_user(VERIFY_WRITE
, arg1
,
10183 sizeof(struct target_pollfd
) * nfds
, 1);
10188 pfd
= alloca(sizeof(struct pollfd
) * nfds
);
10189 for (i
= 0; i
< nfds
; i
++) {
10190 pfd
[i
].fd
= tswap32(target_pfd
[i
].fd
);
10191 pfd
[i
].events
= tswap16(target_pfd
[i
].events
);
10196 # ifdef TARGET_NR_ppoll
10197 case TARGET_NR_ppoll
:
10199 struct timespec _timeout_ts
, *timeout_ts
= &_timeout_ts
;
10200 target_sigset_t
*target_set
;
10201 sigset_t _set
, *set
= &_set
;
10204 if (target_to_host_timespec(timeout_ts
, arg3
)) {
10205 unlock_user(target_pfd
, arg1
, 0);
10213 if (arg5
!= sizeof(target_sigset_t
)) {
10214 unlock_user(target_pfd
, arg1
, 0);
10215 ret
= -TARGET_EINVAL
;
10219 target_set
= lock_user(VERIFY_READ
, arg4
, sizeof(target_sigset_t
), 1);
10221 unlock_user(target_pfd
, arg1
, 0);
10224 target_to_host_sigset(set
, target_set
);
10229 ret
= get_errno(safe_ppoll(pfd
, nfds
, timeout_ts
,
10230 set
, SIGSET_T_SIZE
));
10232 if (!is_error(ret
) && arg3
) {
10233 host_to_target_timespec(arg3
, timeout_ts
);
10236 unlock_user(target_set
, arg4
, 0);
10241 # ifdef TARGET_NR_poll
10242 case TARGET_NR_poll
:
10244 struct timespec ts
, *pts
;
10247 /* Convert ms to secs, ns */
10248 ts
.tv_sec
= arg3
/ 1000;
10249 ts
.tv_nsec
= (arg3
% 1000) * 1000000LL;
10252 /* -ve poll() timeout means "infinite" */
10255 ret
= get_errno(safe_ppoll(pfd
, nfds
, pts
, NULL
, 0));
10260 g_assert_not_reached();
10263 if (!is_error(ret
)) {
10264 for(i
= 0; i
< nfds
; i
++) {
10265 target_pfd
[i
].revents
= tswap16(pfd
[i
].revents
);
10268 unlock_user(target_pfd
, arg1
, sizeof(struct target_pollfd
) * nfds
);
10272 case TARGET_NR_flock
:
10273 /* NOTE: the flock constant seems to be the same for every
10275 ret
= get_errno(safe_flock(arg1
, arg2
));
10277 case TARGET_NR_readv
:
10279 struct iovec
*vec
= lock_iovec(VERIFY_WRITE
, arg2
, arg3
, 0);
10281 ret
= get_errno(safe_readv(arg1
, vec
, arg3
));
10282 unlock_iovec(vec
, arg2
, arg3
, 1);
10284 ret
= -host_to_target_errno(errno
);
10288 case TARGET_NR_writev
:
10290 struct iovec
*vec
= lock_iovec(VERIFY_READ
, arg2
, arg3
, 1);
10292 ret
= get_errno(safe_writev(arg1
, vec
, arg3
));
10293 unlock_iovec(vec
, arg2
, arg3
, 0);
10295 ret
= -host_to_target_errno(errno
);
10299 #if defined(TARGET_NR_preadv)
10300 case TARGET_NR_preadv
:
10302 struct iovec
*vec
= lock_iovec(VERIFY_WRITE
, arg2
, arg3
, 0);
10304 ret
= get_errno(safe_preadv(arg1
, vec
, arg3
, arg4
, arg5
));
10305 unlock_iovec(vec
, arg2
, arg3
, 1);
10307 ret
= -host_to_target_errno(errno
);
10312 #if defined(TARGET_NR_pwritev)
10313 case TARGET_NR_pwritev
:
10315 struct iovec
*vec
= lock_iovec(VERIFY_READ
, arg2
, arg3
, 1);
10317 ret
= get_errno(safe_pwritev(arg1
, vec
, arg3
, arg4
, arg5
));
10318 unlock_iovec(vec
, arg2
, arg3
, 0);
10320 ret
= -host_to_target_errno(errno
);
10325 case TARGET_NR_getsid
:
10326 ret
= get_errno(getsid(arg1
));
10328 #if defined(TARGET_NR_fdatasync) /* Not on alpha (osf_datasync ?) */
10329 case TARGET_NR_fdatasync
:
10330 ret
= get_errno(fdatasync(arg1
));
10333 #ifdef TARGET_NR__sysctl
10334 case TARGET_NR__sysctl
:
10335 /* We don't implement this, but ENOTDIR is always a safe
10337 ret
= -TARGET_ENOTDIR
;
10340 case TARGET_NR_sched_getaffinity
:
10342 unsigned int mask_size
;
10343 unsigned long *mask
;
10346 * sched_getaffinity needs multiples of ulong, so need to take
10347 * care of mismatches between target ulong and host ulong sizes.
10349 if (arg2
& (sizeof(abi_ulong
) - 1)) {
10350 ret
= -TARGET_EINVAL
;
10353 mask_size
= (arg2
+ (sizeof(*mask
) - 1)) & ~(sizeof(*mask
) - 1);
10355 mask
= alloca(mask_size
);
10356 ret
= get_errno(sys_sched_getaffinity(arg1
, mask_size
, mask
));
10358 if (!is_error(ret
)) {
10360 /* More data returned than the caller's buffer will fit.
10361 * This only happens if sizeof(abi_long) < sizeof(long)
10362 * and the caller passed us a buffer holding an odd number
10363 * of abi_longs. If the host kernel is actually using the
10364 * extra 4 bytes then fail EINVAL; otherwise we can just
10365 * ignore them and only copy the interesting part.
10367 int numcpus
= sysconf(_SC_NPROCESSORS_CONF
);
10368 if (numcpus
> arg2
* 8) {
10369 ret
= -TARGET_EINVAL
;
10375 if (copy_to_user(arg3
, mask
, ret
)) {
10381 case TARGET_NR_sched_setaffinity
:
10383 unsigned int mask_size
;
10384 unsigned long *mask
;
10387 * sched_setaffinity needs multiples of ulong, so need to take
10388 * care of mismatches between target ulong and host ulong sizes.
10390 if (arg2
& (sizeof(abi_ulong
) - 1)) {
10391 ret
= -TARGET_EINVAL
;
10394 mask_size
= (arg2
+ (sizeof(*mask
) - 1)) & ~(sizeof(*mask
) - 1);
10396 mask
= alloca(mask_size
);
10397 if (!lock_user_struct(VERIFY_READ
, p
, arg3
, 1)) {
10400 memcpy(mask
, p
, arg2
);
10401 unlock_user_struct(p
, arg2
, 0);
10403 ret
= get_errno(sys_sched_setaffinity(arg1
, mask_size
, mask
));
10406 case TARGET_NR_sched_setparam
:
10408 struct sched_param
*target_schp
;
10409 struct sched_param schp
;
10412 return -TARGET_EINVAL
;
10414 if (!lock_user_struct(VERIFY_READ
, target_schp
, arg2
, 1))
10416 schp
.sched_priority
= tswap32(target_schp
->sched_priority
);
10417 unlock_user_struct(target_schp
, arg2
, 0);
10418 ret
= get_errno(sched_setparam(arg1
, &schp
));
10421 case TARGET_NR_sched_getparam
:
10423 struct sched_param
*target_schp
;
10424 struct sched_param schp
;
10427 return -TARGET_EINVAL
;
10429 ret
= get_errno(sched_getparam(arg1
, &schp
));
10430 if (!is_error(ret
)) {
10431 if (!lock_user_struct(VERIFY_WRITE
, target_schp
, arg2
, 0))
10433 target_schp
->sched_priority
= tswap32(schp
.sched_priority
);
10434 unlock_user_struct(target_schp
, arg2
, 1);
10438 case TARGET_NR_sched_setscheduler
:
10440 struct sched_param
*target_schp
;
10441 struct sched_param schp
;
10443 return -TARGET_EINVAL
;
10445 if (!lock_user_struct(VERIFY_READ
, target_schp
, arg3
, 1))
10447 schp
.sched_priority
= tswap32(target_schp
->sched_priority
);
10448 unlock_user_struct(target_schp
, arg3
, 0);
10449 ret
= get_errno(sched_setscheduler(arg1
, arg2
, &schp
));
10452 case TARGET_NR_sched_getscheduler
:
10453 ret
= get_errno(sched_getscheduler(arg1
));
10455 case TARGET_NR_sched_yield
:
10456 ret
= get_errno(sched_yield());
10458 case TARGET_NR_sched_get_priority_max
:
10459 ret
= get_errno(sched_get_priority_max(arg1
));
10461 case TARGET_NR_sched_get_priority_min
:
10462 ret
= get_errno(sched_get_priority_min(arg1
));
10464 case TARGET_NR_sched_rr_get_interval
:
10466 struct timespec ts
;
10467 ret
= get_errno(sched_rr_get_interval(arg1
, &ts
));
10468 if (!is_error(ret
)) {
10469 ret
= host_to_target_timespec(arg2
, &ts
);
10473 case TARGET_NR_nanosleep
:
10475 struct timespec req
, rem
;
10476 target_to_host_timespec(&req
, arg1
);
10477 ret
= get_errno(safe_nanosleep(&req
, &rem
));
10478 if (is_error(ret
) && arg2
) {
10479 host_to_target_timespec(arg2
, &rem
);
10483 #ifdef TARGET_NR_query_module
10484 case TARGET_NR_query_module
:
10485 goto unimplemented
;
10487 #ifdef TARGET_NR_nfsservctl
10488 case TARGET_NR_nfsservctl
:
10489 goto unimplemented
;
10491 case TARGET_NR_prctl
:
10493 case PR_GET_PDEATHSIG
:
10496 ret
= get_errno(prctl(arg1
, &deathsig
, arg3
, arg4
, arg5
));
10497 if (!is_error(ret
) && arg2
10498 && put_user_ual(deathsig
, arg2
)) {
10506 void *name
= lock_user(VERIFY_WRITE
, arg2
, 16, 1);
10510 ret
= get_errno(prctl(arg1
, (unsigned long)name
,
10511 arg3
, arg4
, arg5
));
10512 unlock_user(name
, arg2
, 16);
10517 void *name
= lock_user(VERIFY_READ
, arg2
, 16, 1);
10521 ret
= get_errno(prctl(arg1
, (unsigned long)name
,
10522 arg3
, arg4
, arg5
));
10523 unlock_user(name
, arg2
, 0);
10527 case PR_GET_SECCOMP
:
10528 case PR_SET_SECCOMP
:
10529 /* Disable seccomp to prevent the target disabling syscalls we
10531 ret
= -TARGET_EINVAL
;
10534 /* Most prctl options have no pointer arguments */
10535 ret
= get_errno(prctl(arg1
, arg2
, arg3
, arg4
, arg5
));
10539 #ifdef TARGET_NR_arch_prctl
10540 case TARGET_NR_arch_prctl
:
10541 #if defined(TARGET_I386) && !defined(TARGET_ABI32)
10542 ret
= do_arch_prctl(cpu_env
, arg1
, arg2
);
10545 goto unimplemented
;
10548 #ifdef TARGET_NR_pread64
10549 case TARGET_NR_pread64
:
10550 if (regpairs_aligned(cpu_env
, num
)) {
10554 if (!(p
= lock_user(VERIFY_WRITE
, arg2
, arg3
, 0)))
10556 ret
= get_errno(pread64(arg1
, p
, arg3
, target_offset64(arg4
, arg5
)));
10557 unlock_user(p
, arg2
, ret
);
10559 case TARGET_NR_pwrite64
:
10560 if (regpairs_aligned(cpu_env
, num
)) {
10564 if (!(p
= lock_user(VERIFY_READ
, arg2
, arg3
, 1)))
10566 ret
= get_errno(pwrite64(arg1
, p
, arg3
, target_offset64(arg4
, arg5
)));
10567 unlock_user(p
, arg2
, 0);
10570 case TARGET_NR_getcwd
:
10571 if (!(p
= lock_user(VERIFY_WRITE
, arg1
, arg2
, 0)))
10573 ret
= get_errno(sys_getcwd1(p
, arg2
));
10574 unlock_user(p
, arg1
, ret
);
10576 case TARGET_NR_capget
:
10577 case TARGET_NR_capset
:
10579 struct target_user_cap_header
*target_header
;
10580 struct target_user_cap_data
*target_data
= NULL
;
10581 struct __user_cap_header_struct header
;
10582 struct __user_cap_data_struct data
[2];
10583 struct __user_cap_data_struct
*dataptr
= NULL
;
10584 int i
, target_datalen
;
10585 int data_items
= 1;
10587 if (!lock_user_struct(VERIFY_WRITE
, target_header
, arg1
, 1)) {
10590 header
.version
= tswap32(target_header
->version
);
10591 header
.pid
= tswap32(target_header
->pid
);
10593 if (header
.version
!= _LINUX_CAPABILITY_VERSION
) {
10594 /* Version 2 and up takes pointer to two user_data structs */
10598 target_datalen
= sizeof(*target_data
) * data_items
;
10601 if (num
== TARGET_NR_capget
) {
10602 target_data
= lock_user(VERIFY_WRITE
, arg2
, target_datalen
, 0);
10604 target_data
= lock_user(VERIFY_READ
, arg2
, target_datalen
, 1);
10606 if (!target_data
) {
10607 unlock_user_struct(target_header
, arg1
, 0);
10611 if (num
== TARGET_NR_capset
) {
10612 for (i
= 0; i
< data_items
; i
++) {
10613 data
[i
].effective
= tswap32(target_data
[i
].effective
);
10614 data
[i
].permitted
= tswap32(target_data
[i
].permitted
);
10615 data
[i
].inheritable
= tswap32(target_data
[i
].inheritable
);
10622 if (num
== TARGET_NR_capget
) {
10623 ret
= get_errno(capget(&header
, dataptr
));
10625 ret
= get_errno(capset(&header
, dataptr
));
10628 /* The kernel always updates version for both capget and capset */
10629 target_header
->version
= tswap32(header
.version
);
10630 unlock_user_struct(target_header
, arg1
, 1);
10633 if (num
== TARGET_NR_capget
) {
10634 for (i
= 0; i
< data_items
; i
++) {
10635 target_data
[i
].effective
= tswap32(data
[i
].effective
);
10636 target_data
[i
].permitted
= tswap32(data
[i
].permitted
);
10637 target_data
[i
].inheritable
= tswap32(data
[i
].inheritable
);
10639 unlock_user(target_data
, arg2
, target_datalen
);
10641 unlock_user(target_data
, arg2
, 0);
10646 case TARGET_NR_sigaltstack
:
10647 ret
= do_sigaltstack(arg1
, arg2
, get_sp_from_cpustate((CPUArchState
*)cpu_env
));
10650 #ifdef CONFIG_SENDFILE
10651 case TARGET_NR_sendfile
:
10653 off_t
*offp
= NULL
;
10656 ret
= get_user_sal(off
, arg3
);
10657 if (is_error(ret
)) {
10662 ret
= get_errno(sendfile(arg1
, arg2
, offp
, arg4
));
10663 if (!is_error(ret
) && arg3
) {
10664 abi_long ret2
= put_user_sal(off
, arg3
);
10665 if (is_error(ret2
)) {
10671 #ifdef TARGET_NR_sendfile64
10672 case TARGET_NR_sendfile64
:
10674 off_t
*offp
= NULL
;
10677 ret
= get_user_s64(off
, arg3
);
10678 if (is_error(ret
)) {
10683 ret
= get_errno(sendfile(arg1
, arg2
, offp
, arg4
));
10684 if (!is_error(ret
) && arg3
) {
10685 abi_long ret2
= put_user_s64(off
, arg3
);
10686 if (is_error(ret2
)) {
10694 case TARGET_NR_sendfile
:
10695 #ifdef TARGET_NR_sendfile64
10696 case TARGET_NR_sendfile64
:
10698 goto unimplemented
;
10701 #ifdef TARGET_NR_getpmsg
10702 case TARGET_NR_getpmsg
:
10703 goto unimplemented
;
10705 #ifdef TARGET_NR_putpmsg
10706 case TARGET_NR_putpmsg
:
10707 goto unimplemented
;
10709 #ifdef TARGET_NR_vfork
10710 case TARGET_NR_vfork
:
10711 ret
= get_errno(do_fork(cpu_env
,
10712 CLONE_VFORK
| CLONE_VM
| TARGET_SIGCHLD
,
10716 #ifdef TARGET_NR_ugetrlimit
10717 case TARGET_NR_ugetrlimit
:
10719 struct rlimit rlim
;
10720 int resource
= target_to_host_resource(arg1
);
10721 ret
= get_errno(getrlimit(resource
, &rlim
));
10722 if (!is_error(ret
)) {
10723 struct target_rlimit
*target_rlim
;
10724 if (!lock_user_struct(VERIFY_WRITE
, target_rlim
, arg2
, 0))
10726 target_rlim
->rlim_cur
= host_to_target_rlim(rlim
.rlim_cur
);
10727 target_rlim
->rlim_max
= host_to_target_rlim(rlim
.rlim_max
);
10728 unlock_user_struct(target_rlim
, arg2
, 1);
10733 #ifdef TARGET_NR_truncate64
10734 case TARGET_NR_truncate64
:
10735 if (!(p
= lock_user_string(arg1
)))
10737 ret
= target_truncate64(cpu_env
, p
, arg2
, arg3
, arg4
);
10738 unlock_user(p
, arg1
, 0);
10741 #ifdef TARGET_NR_ftruncate64
10742 case TARGET_NR_ftruncate64
:
10743 ret
= target_ftruncate64(cpu_env
, arg1
, arg2
, arg3
, arg4
);
10746 #ifdef TARGET_NR_stat64
10747 case TARGET_NR_stat64
:
10748 if (!(p
= lock_user_string(arg1
)))
10750 ret
= get_errno(stat(path(p
), &st
));
10751 unlock_user(p
, arg1
, 0);
10752 if (!is_error(ret
))
10753 ret
= host_to_target_stat64(cpu_env
, arg2
, &st
);
10756 #ifdef TARGET_NR_lstat64
10757 case TARGET_NR_lstat64
:
10758 if (!(p
= lock_user_string(arg1
)))
10760 ret
= get_errno(lstat(path(p
), &st
));
10761 unlock_user(p
, arg1
, 0);
10762 if (!is_error(ret
))
10763 ret
= host_to_target_stat64(cpu_env
, arg2
, &st
);
10766 #ifdef TARGET_NR_fstat64
10767 case TARGET_NR_fstat64
:
10768 ret
= get_errno(fstat(arg1
, &st
));
10769 if (!is_error(ret
))
10770 ret
= host_to_target_stat64(cpu_env
, arg2
, &st
);
10773 #if (defined(TARGET_NR_fstatat64) || defined(TARGET_NR_newfstatat))
10774 #ifdef TARGET_NR_fstatat64
10775 case TARGET_NR_fstatat64
:
10777 #ifdef TARGET_NR_newfstatat
10778 case TARGET_NR_newfstatat
:
10780 if (!(p
= lock_user_string(arg2
)))
10782 ret
= get_errno(fstatat(arg1
, path(p
), &st
, arg4
));
10783 if (!is_error(ret
))
10784 ret
= host_to_target_stat64(cpu_env
, arg3
, &st
);
10787 #ifdef TARGET_NR_lchown
10788 case TARGET_NR_lchown
:
10789 if (!(p
= lock_user_string(arg1
)))
10791 ret
= get_errno(lchown(p
, low2highuid(arg2
), low2highgid(arg3
)));
10792 unlock_user(p
, arg1
, 0);
10795 #ifdef TARGET_NR_getuid
10796 case TARGET_NR_getuid
:
10797 ret
= get_errno(high2lowuid(getuid()));
10800 #ifdef TARGET_NR_getgid
10801 case TARGET_NR_getgid
:
10802 ret
= get_errno(high2lowgid(getgid()));
10805 #ifdef TARGET_NR_geteuid
10806 case TARGET_NR_geteuid
:
10807 ret
= get_errno(high2lowuid(geteuid()));
10810 #ifdef TARGET_NR_getegid
10811 case TARGET_NR_getegid
:
10812 ret
= get_errno(high2lowgid(getegid()));
10815 case TARGET_NR_setreuid
:
10816 ret
= get_errno(setreuid(low2highuid(arg1
), low2highuid(arg2
)));
10818 case TARGET_NR_setregid
:
10819 ret
= get_errno(setregid(low2highgid(arg1
), low2highgid(arg2
)));
10821 case TARGET_NR_getgroups
:
10823 int gidsetsize
= arg1
;
10824 target_id
*target_grouplist
;
10828 grouplist
= alloca(gidsetsize
* sizeof(gid_t
));
10829 ret
= get_errno(getgroups(gidsetsize
, grouplist
));
10830 if (gidsetsize
== 0)
10832 if (!is_error(ret
)) {
10833 target_grouplist
= lock_user(VERIFY_WRITE
, arg2
, gidsetsize
* sizeof(target_id
), 0);
10834 if (!target_grouplist
)
10836 for(i
= 0;i
< ret
; i
++)
10837 target_grouplist
[i
] = tswapid(high2lowgid(grouplist
[i
]));
10838 unlock_user(target_grouplist
, arg2
, gidsetsize
* sizeof(target_id
));
10842 case TARGET_NR_setgroups
:
10844 int gidsetsize
= arg1
;
10845 target_id
*target_grouplist
;
10846 gid_t
*grouplist
= NULL
;
10849 grouplist
= alloca(gidsetsize
* sizeof(gid_t
));
10850 target_grouplist
= lock_user(VERIFY_READ
, arg2
, gidsetsize
* sizeof(target_id
), 1);
10851 if (!target_grouplist
) {
10852 ret
= -TARGET_EFAULT
;
10855 for (i
= 0; i
< gidsetsize
; i
++) {
10856 grouplist
[i
] = low2highgid(tswapid(target_grouplist
[i
]));
10858 unlock_user(target_grouplist
, arg2
, 0);
10860 ret
= get_errno(setgroups(gidsetsize
, grouplist
));
10863 case TARGET_NR_fchown
:
10864 ret
= get_errno(fchown(arg1
, low2highuid(arg2
), low2highgid(arg3
)));
10866 #if defined(TARGET_NR_fchownat)
10867 case TARGET_NR_fchownat
:
10868 if (!(p
= lock_user_string(arg2
)))
10870 ret
= get_errno(fchownat(arg1
, p
, low2highuid(arg3
),
10871 low2highgid(arg4
), arg5
));
10872 unlock_user(p
, arg2
, 0);
10875 #ifdef TARGET_NR_setresuid
10876 case TARGET_NR_setresuid
:
10877 ret
= get_errno(sys_setresuid(low2highuid(arg1
),
10879 low2highuid(arg3
)));
10882 #ifdef TARGET_NR_getresuid
10883 case TARGET_NR_getresuid
:
10885 uid_t ruid
, euid
, suid
;
10886 ret
= get_errno(getresuid(&ruid
, &euid
, &suid
));
10887 if (!is_error(ret
)) {
10888 if (put_user_id(high2lowuid(ruid
), arg1
)
10889 || put_user_id(high2lowuid(euid
), arg2
)
10890 || put_user_id(high2lowuid(suid
), arg3
))
10896 #ifdef TARGET_NR_getresgid
10897 case TARGET_NR_setresgid
:
10898 ret
= get_errno(sys_setresgid(low2highgid(arg1
),
10900 low2highgid(arg3
)));
10903 #ifdef TARGET_NR_getresgid
10904 case TARGET_NR_getresgid
:
10906 gid_t rgid
, egid
, sgid
;
10907 ret
= get_errno(getresgid(&rgid
, &egid
, &sgid
));
10908 if (!is_error(ret
)) {
10909 if (put_user_id(high2lowgid(rgid
), arg1
)
10910 || put_user_id(high2lowgid(egid
), arg2
)
10911 || put_user_id(high2lowgid(sgid
), arg3
))
10917 #ifdef TARGET_NR_chown
10918 case TARGET_NR_chown
:
10919 if (!(p
= lock_user_string(arg1
)))
10921 ret
= get_errno(chown(p
, low2highuid(arg2
), low2highgid(arg3
)));
10922 unlock_user(p
, arg1
, 0);
10925 case TARGET_NR_setuid
:
10926 ret
= get_errno(sys_setuid(low2highuid(arg1
)));
10928 case TARGET_NR_setgid
:
10929 ret
= get_errno(sys_setgid(low2highgid(arg1
)));
10931 case TARGET_NR_setfsuid
:
10932 ret
= get_errno(setfsuid(arg1
));
10934 case TARGET_NR_setfsgid
:
10935 ret
= get_errno(setfsgid(arg1
));
10938 #ifdef TARGET_NR_lchown32
10939 case TARGET_NR_lchown32
:
10940 if (!(p
= lock_user_string(arg1
)))
10942 ret
= get_errno(lchown(p
, arg2
, arg3
));
10943 unlock_user(p
, arg1
, 0);
10946 #ifdef TARGET_NR_getuid32
10947 case TARGET_NR_getuid32
:
10948 ret
= get_errno(getuid());
10952 #if defined(TARGET_NR_getxuid) && defined(TARGET_ALPHA)
10953 /* Alpha specific */
10954 case TARGET_NR_getxuid
:
10958 ((CPUAlphaState
*)cpu_env
)->ir
[IR_A4
]=euid
;
10960 ret
= get_errno(getuid());
10963 #if defined(TARGET_NR_getxgid) && defined(TARGET_ALPHA)
10964 /* Alpha specific */
10965 case TARGET_NR_getxgid
:
10969 ((CPUAlphaState
*)cpu_env
)->ir
[IR_A4
]=egid
;
10971 ret
= get_errno(getgid());
10974 #if defined(TARGET_NR_osf_getsysinfo) && defined(TARGET_ALPHA)
10975 /* Alpha specific */
10976 case TARGET_NR_osf_getsysinfo
:
10977 ret
= -TARGET_EOPNOTSUPP
;
10979 case TARGET_GSI_IEEE_FP_CONTROL
:
10981 uint64_t swcr
, fpcr
= cpu_alpha_load_fpcr (cpu_env
);
10983 /* Copied from linux ieee_fpcr_to_swcr. */
10984 swcr
= (fpcr
>> 35) & SWCR_STATUS_MASK
;
10985 swcr
|= (fpcr
>> 36) & SWCR_MAP_DMZ
;
10986 swcr
|= (~fpcr
>> 48) & (SWCR_TRAP_ENABLE_INV
10987 | SWCR_TRAP_ENABLE_DZE
10988 | SWCR_TRAP_ENABLE_OVF
);
10989 swcr
|= (~fpcr
>> 57) & (SWCR_TRAP_ENABLE_UNF
10990 | SWCR_TRAP_ENABLE_INE
);
10991 swcr
|= (fpcr
>> 47) & SWCR_MAP_UMZ
;
10992 swcr
|= (~fpcr
>> 41) & SWCR_TRAP_ENABLE_DNO
;
10994 if (put_user_u64 (swcr
, arg2
))
11000 /* case GSI_IEEE_STATE_AT_SIGNAL:
11001 -- Not implemented in linux kernel.
11003 -- Retrieves current unaligned access state; not much used.
11004 case GSI_PROC_TYPE:
11005 -- Retrieves implver information; surely not used.
11006 case GSI_GET_HWRPB:
11007 -- Grabs a copy of the HWRPB; surely not used.
11012 #if defined(TARGET_NR_osf_setsysinfo) && defined(TARGET_ALPHA)
11013 /* Alpha specific */
11014 case TARGET_NR_osf_setsysinfo
:
11015 ret
= -TARGET_EOPNOTSUPP
;
11017 case TARGET_SSI_IEEE_FP_CONTROL
:
11019 uint64_t swcr
, fpcr
, orig_fpcr
;
11021 if (get_user_u64 (swcr
, arg2
)) {
11024 orig_fpcr
= cpu_alpha_load_fpcr(cpu_env
);
11025 fpcr
= orig_fpcr
& FPCR_DYN_MASK
;
11027 /* Copied from linux ieee_swcr_to_fpcr. */
11028 fpcr
|= (swcr
& SWCR_STATUS_MASK
) << 35;
11029 fpcr
|= (swcr
& SWCR_MAP_DMZ
) << 36;
11030 fpcr
|= (~swcr
& (SWCR_TRAP_ENABLE_INV
11031 | SWCR_TRAP_ENABLE_DZE
11032 | SWCR_TRAP_ENABLE_OVF
)) << 48;
11033 fpcr
|= (~swcr
& (SWCR_TRAP_ENABLE_UNF
11034 | SWCR_TRAP_ENABLE_INE
)) << 57;
11035 fpcr
|= (swcr
& SWCR_MAP_UMZ
? FPCR_UNDZ
| FPCR_UNFD
: 0);
11036 fpcr
|= (~swcr
& SWCR_TRAP_ENABLE_DNO
) << 41;
11038 cpu_alpha_store_fpcr(cpu_env
, fpcr
);
11043 case TARGET_SSI_IEEE_RAISE_EXCEPTION
:
11045 uint64_t exc
, fpcr
, orig_fpcr
;
11048 if (get_user_u64(exc
, arg2
)) {
11052 orig_fpcr
= cpu_alpha_load_fpcr(cpu_env
);
11054 /* We only add to the exception status here. */
11055 fpcr
= orig_fpcr
| ((exc
& SWCR_STATUS_MASK
) << 35);
11057 cpu_alpha_store_fpcr(cpu_env
, fpcr
);
11060 /* Old exceptions are not signaled. */
11061 fpcr
&= ~(orig_fpcr
& FPCR_STATUS_MASK
);
11063 /* If any exceptions set by this call,
11064 and are unmasked, send a signal. */
11066 if ((fpcr
& (FPCR_INE
| FPCR_INED
)) == FPCR_INE
) {
11067 si_code
= TARGET_FPE_FLTRES
;
11069 if ((fpcr
& (FPCR_UNF
| FPCR_UNFD
)) == FPCR_UNF
) {
11070 si_code
= TARGET_FPE_FLTUND
;
11072 if ((fpcr
& (FPCR_OVF
| FPCR_OVFD
)) == FPCR_OVF
) {
11073 si_code
= TARGET_FPE_FLTOVF
;
11075 if ((fpcr
& (FPCR_DZE
| FPCR_DZED
)) == FPCR_DZE
) {
11076 si_code
= TARGET_FPE_FLTDIV
;
11078 if ((fpcr
& (FPCR_INV
| FPCR_INVD
)) == FPCR_INV
) {
11079 si_code
= TARGET_FPE_FLTINV
;
11081 if (si_code
!= 0) {
11082 target_siginfo_t info
;
11083 info
.si_signo
= SIGFPE
;
11085 info
.si_code
= si_code
;
11086 info
._sifields
._sigfault
._addr
11087 = ((CPUArchState
*)cpu_env
)->pc
;
11088 queue_signal((CPUArchState
*)cpu_env
, info
.si_signo
,
11089 QEMU_SI_FAULT
, &info
);
11094 /* case SSI_NVPAIRS:
11095 -- Used with SSIN_UACPROC to enable unaligned accesses.
11096 case SSI_IEEE_STATE_AT_SIGNAL:
11097 case SSI_IEEE_IGNORE_STATE_AT_SIGNAL:
11098 -- Not implemented in linux kernel
11103 #ifdef TARGET_NR_osf_sigprocmask
11104 /* Alpha specific. */
11105 case TARGET_NR_osf_sigprocmask
:
11109 sigset_t set
, oldset
;
11112 case TARGET_SIG_BLOCK
:
11115 case TARGET_SIG_UNBLOCK
:
11118 case TARGET_SIG_SETMASK
:
11122 ret
= -TARGET_EINVAL
;
11126 target_to_host_old_sigset(&set
, &mask
);
11127 ret
= do_sigprocmask(how
, &set
, &oldset
);
11129 host_to_target_old_sigset(&mask
, &oldset
);
11136 #ifdef TARGET_NR_getgid32
11137 case TARGET_NR_getgid32
:
11138 ret
= get_errno(getgid());
11141 #ifdef TARGET_NR_geteuid32
11142 case TARGET_NR_geteuid32
:
11143 ret
= get_errno(geteuid());
11146 #ifdef TARGET_NR_getegid32
11147 case TARGET_NR_getegid32
:
11148 ret
= get_errno(getegid());
11151 #ifdef TARGET_NR_setreuid32
11152 case TARGET_NR_setreuid32
:
11153 ret
= get_errno(setreuid(arg1
, arg2
));
11156 #ifdef TARGET_NR_setregid32
11157 case TARGET_NR_setregid32
:
11158 ret
= get_errno(setregid(arg1
, arg2
));
11161 #ifdef TARGET_NR_getgroups32
11162 case TARGET_NR_getgroups32
:
11164 int gidsetsize
= arg1
;
11165 uint32_t *target_grouplist
;
11169 grouplist
= alloca(gidsetsize
* sizeof(gid_t
));
11170 ret
= get_errno(getgroups(gidsetsize
, grouplist
));
11171 if (gidsetsize
== 0)
11173 if (!is_error(ret
)) {
11174 target_grouplist
= lock_user(VERIFY_WRITE
, arg2
, gidsetsize
* 4, 0);
11175 if (!target_grouplist
) {
11176 ret
= -TARGET_EFAULT
;
11179 for(i
= 0;i
< ret
; i
++)
11180 target_grouplist
[i
] = tswap32(grouplist
[i
]);
11181 unlock_user(target_grouplist
, arg2
, gidsetsize
* 4);
11186 #ifdef TARGET_NR_setgroups32
11187 case TARGET_NR_setgroups32
:
11189 int gidsetsize
= arg1
;
11190 uint32_t *target_grouplist
;
11194 grouplist
= alloca(gidsetsize
* sizeof(gid_t
));
11195 target_grouplist
= lock_user(VERIFY_READ
, arg2
, gidsetsize
* 4, 1);
11196 if (!target_grouplist
) {
11197 ret
= -TARGET_EFAULT
;
11200 for(i
= 0;i
< gidsetsize
; i
++)
11201 grouplist
[i
] = tswap32(target_grouplist
[i
]);
11202 unlock_user(target_grouplist
, arg2
, 0);
11203 ret
= get_errno(setgroups(gidsetsize
, grouplist
));
11207 #ifdef TARGET_NR_fchown32
11208 case TARGET_NR_fchown32
:
11209 ret
= get_errno(fchown(arg1
, arg2
, arg3
));
11212 #ifdef TARGET_NR_setresuid32
11213 case TARGET_NR_setresuid32
:
11214 ret
= get_errno(sys_setresuid(arg1
, arg2
, arg3
));
11217 #ifdef TARGET_NR_getresuid32
11218 case TARGET_NR_getresuid32
:
11220 uid_t ruid
, euid
, suid
;
11221 ret
= get_errno(getresuid(&ruid
, &euid
, &suid
));
11222 if (!is_error(ret
)) {
11223 if (put_user_u32(ruid
, arg1
)
11224 || put_user_u32(euid
, arg2
)
11225 || put_user_u32(suid
, arg3
))
11231 #ifdef TARGET_NR_setresgid32
11232 case TARGET_NR_setresgid32
:
11233 ret
= get_errno(sys_setresgid(arg1
, arg2
, arg3
));
11236 #ifdef TARGET_NR_getresgid32
11237 case TARGET_NR_getresgid32
:
11239 gid_t rgid
, egid
, sgid
;
11240 ret
= get_errno(getresgid(&rgid
, &egid
, &sgid
));
11241 if (!is_error(ret
)) {
11242 if (put_user_u32(rgid
, arg1
)
11243 || put_user_u32(egid
, arg2
)
11244 || put_user_u32(sgid
, arg3
))
11250 #ifdef TARGET_NR_chown32
11251 case TARGET_NR_chown32
:
11252 if (!(p
= lock_user_string(arg1
)))
11254 ret
= get_errno(chown(p
, arg2
, arg3
));
11255 unlock_user(p
, arg1
, 0);
11258 #ifdef TARGET_NR_setuid32
11259 case TARGET_NR_setuid32
:
11260 ret
= get_errno(sys_setuid(arg1
));
11263 #ifdef TARGET_NR_setgid32
11264 case TARGET_NR_setgid32
:
11265 ret
= get_errno(sys_setgid(arg1
));
11268 #ifdef TARGET_NR_setfsuid32
11269 case TARGET_NR_setfsuid32
:
11270 ret
= get_errno(setfsuid(arg1
));
11273 #ifdef TARGET_NR_setfsgid32
11274 case TARGET_NR_setfsgid32
:
11275 ret
= get_errno(setfsgid(arg1
));
11279 case TARGET_NR_pivot_root
:
11280 goto unimplemented
;
11281 #ifdef TARGET_NR_mincore
11282 case TARGET_NR_mincore
:
11285 ret
= -TARGET_ENOMEM
;
11286 a
= lock_user(VERIFY_READ
, arg1
, arg2
, 0);
11290 ret
= -TARGET_EFAULT
;
11291 p
= lock_user_string(arg3
);
11295 ret
= get_errno(mincore(a
, arg2
, p
));
11296 unlock_user(p
, arg3
, ret
);
11298 unlock_user(a
, arg1
, 0);
11302 #ifdef TARGET_NR_arm_fadvise64_64
11303 case TARGET_NR_arm_fadvise64_64
:
11304 /* arm_fadvise64_64 looks like fadvise64_64 but
11305 * with different argument order: fd, advice, offset, len
11306 * rather than the usual fd, offset, len, advice.
11307 * Note that offset and len are both 64-bit so appear as
11308 * pairs of 32-bit registers.
11310 ret
= posix_fadvise(arg1
, target_offset64(arg3
, arg4
),
11311 target_offset64(arg5
, arg6
), arg2
);
11312 ret
= -host_to_target_errno(ret
);
11316 #if TARGET_ABI_BITS == 32
11318 #ifdef TARGET_NR_fadvise64_64
11319 case TARGET_NR_fadvise64_64
:
11320 #if defined(TARGET_PPC)
11321 /* 6 args: fd, advice, offset (high, low), len (high, low) */
11329 /* 6 args: fd, offset (high, low), len (high, low), advice */
11330 if (regpairs_aligned(cpu_env
, num
)) {
11331 /* offset is in (3,4), len in (5,6) and advice in 7 */
11339 ret
= -host_to_target_errno(posix_fadvise(arg1
,
11340 target_offset64(arg2
, arg3
),
11341 target_offset64(arg4
, arg5
),
11346 #ifdef TARGET_NR_fadvise64
11347 case TARGET_NR_fadvise64
:
11348 /* 5 args: fd, offset (high, low), len, advice */
11349 if (regpairs_aligned(cpu_env
, num
)) {
11350 /* offset is in (3,4), len in 5 and advice in 6 */
11356 ret
= -host_to_target_errno(posix_fadvise(arg1
,
11357 target_offset64(arg2
, arg3
),
11362 #else /* not a 32-bit ABI */
11363 #if defined(TARGET_NR_fadvise64_64) || defined(TARGET_NR_fadvise64)
11364 #ifdef TARGET_NR_fadvise64_64
11365 case TARGET_NR_fadvise64_64
:
11367 #ifdef TARGET_NR_fadvise64
11368 case TARGET_NR_fadvise64
:
11370 #ifdef TARGET_S390X
11372 case 4: arg4
= POSIX_FADV_NOREUSE
+ 1; break; /* make sure it's an invalid value */
11373 case 5: arg4
= POSIX_FADV_NOREUSE
+ 2; break; /* ditto */
11374 case 6: arg4
= POSIX_FADV_DONTNEED
; break;
11375 case 7: arg4
= POSIX_FADV_NOREUSE
; break;
11379 ret
= -host_to_target_errno(posix_fadvise(arg1
, arg2
, arg3
, arg4
));
11382 #endif /* end of 64-bit ABI fadvise handling */
11384 #ifdef TARGET_NR_madvise
11385 case TARGET_NR_madvise
:
11386 /* A straight passthrough may not be safe because qemu sometimes
11387 turns private file-backed mappings into anonymous mappings.
11388 This will break MADV_DONTNEED.
11389 This is a hint, so ignoring and returning success is ok. */
11390 ret
= get_errno(0);
11393 #if TARGET_ABI_BITS == 32
11394 case TARGET_NR_fcntl64
:
11398 from_flock64_fn
*copyfrom
= copy_from_user_flock64
;
11399 to_flock64_fn
*copyto
= copy_to_user_flock64
;
11402 if (((CPUARMState
*)cpu_env
)->eabi
) {
11403 copyfrom
= copy_from_user_eabi_flock64
;
11404 copyto
= copy_to_user_eabi_flock64
;
11408 cmd
= target_to_host_fcntl_cmd(arg2
);
11409 if (cmd
== -TARGET_EINVAL
) {
11415 case TARGET_F_GETLK64
:
11416 ret
= copyfrom(&fl
, arg3
);
11420 ret
= get_errno(fcntl(arg1
, cmd
, &fl
));
11422 ret
= copyto(arg3
, &fl
);
11426 case TARGET_F_SETLK64
:
11427 case TARGET_F_SETLKW64
:
11428 ret
= copyfrom(&fl
, arg3
);
11432 ret
= get_errno(safe_fcntl(arg1
, cmd
, &fl
));
11435 ret
= do_fcntl(arg1
, arg2
, arg3
);
11441 #ifdef TARGET_NR_cacheflush
11442 case TARGET_NR_cacheflush
:
11443 /* self-modifying code is handled automatically, so nothing needed */
11447 #ifdef TARGET_NR_security
11448 case TARGET_NR_security
:
11449 goto unimplemented
;
11451 #ifdef TARGET_NR_getpagesize
11452 case TARGET_NR_getpagesize
:
11453 ret
= TARGET_PAGE_SIZE
;
11456 case TARGET_NR_gettid
:
11457 ret
= get_errno(gettid());
11459 #ifdef TARGET_NR_readahead
11460 case TARGET_NR_readahead
:
11461 #if TARGET_ABI_BITS == 32
11462 if (regpairs_aligned(cpu_env
, num
)) {
11467 ret
= get_errno(readahead(arg1
, target_offset64(arg2
, arg3
) , arg4
));
11469 ret
= get_errno(readahead(arg1
, arg2
, arg3
));
11474 #ifdef TARGET_NR_setxattr
11475 case TARGET_NR_listxattr
:
11476 case TARGET_NR_llistxattr
:
11480 b
= lock_user(VERIFY_WRITE
, arg2
, arg3
, 0);
11482 ret
= -TARGET_EFAULT
;
11486 p
= lock_user_string(arg1
);
11488 if (num
== TARGET_NR_listxattr
) {
11489 ret
= get_errno(listxattr(p
, b
, arg3
));
11491 ret
= get_errno(llistxattr(p
, b
, arg3
));
11494 ret
= -TARGET_EFAULT
;
11496 unlock_user(p
, arg1
, 0);
11497 unlock_user(b
, arg2
, arg3
);
11500 case TARGET_NR_flistxattr
:
11504 b
= lock_user(VERIFY_WRITE
, arg2
, arg3
, 0);
11506 ret
= -TARGET_EFAULT
;
11510 ret
= get_errno(flistxattr(arg1
, b
, arg3
));
11511 unlock_user(b
, arg2
, arg3
);
11514 case TARGET_NR_setxattr
:
11515 case TARGET_NR_lsetxattr
:
11517 void *p
, *n
, *v
= 0;
11519 v
= lock_user(VERIFY_READ
, arg3
, arg4
, 1);
11521 ret
= -TARGET_EFAULT
;
11525 p
= lock_user_string(arg1
);
11526 n
= lock_user_string(arg2
);
11528 if (num
== TARGET_NR_setxattr
) {
11529 ret
= get_errno(setxattr(p
, n
, v
, arg4
, arg5
));
11531 ret
= get_errno(lsetxattr(p
, n
, v
, arg4
, arg5
));
11534 ret
= -TARGET_EFAULT
;
11536 unlock_user(p
, arg1
, 0);
11537 unlock_user(n
, arg2
, 0);
11538 unlock_user(v
, arg3
, 0);
11541 case TARGET_NR_fsetxattr
:
11545 v
= lock_user(VERIFY_READ
, arg3
, arg4
, 1);
11547 ret
= -TARGET_EFAULT
;
11551 n
= lock_user_string(arg2
);
11553 ret
= get_errno(fsetxattr(arg1
, n
, v
, arg4
, arg5
));
11555 ret
= -TARGET_EFAULT
;
11557 unlock_user(n
, arg2
, 0);
11558 unlock_user(v
, arg3
, 0);
11561 case TARGET_NR_getxattr
:
11562 case TARGET_NR_lgetxattr
:
11564 void *p
, *n
, *v
= 0;
11566 v
= lock_user(VERIFY_WRITE
, arg3
, arg4
, 0);
11568 ret
= -TARGET_EFAULT
;
11572 p
= lock_user_string(arg1
);
11573 n
= lock_user_string(arg2
);
11575 if (num
== TARGET_NR_getxattr
) {
11576 ret
= get_errno(getxattr(p
, n
, v
, arg4
));
11578 ret
= get_errno(lgetxattr(p
, n
, v
, arg4
));
11581 ret
= -TARGET_EFAULT
;
11583 unlock_user(p
, arg1
, 0);
11584 unlock_user(n
, arg2
, 0);
11585 unlock_user(v
, arg3
, arg4
);
11588 case TARGET_NR_fgetxattr
:
11592 v
= lock_user(VERIFY_WRITE
, arg3
, arg4
, 0);
11594 ret
= -TARGET_EFAULT
;
11598 n
= lock_user_string(arg2
);
11600 ret
= get_errno(fgetxattr(arg1
, n
, v
, arg4
));
11602 ret
= -TARGET_EFAULT
;
11604 unlock_user(n
, arg2
, 0);
11605 unlock_user(v
, arg3
, arg4
);
11608 case TARGET_NR_removexattr
:
11609 case TARGET_NR_lremovexattr
:
11612 p
= lock_user_string(arg1
);
11613 n
= lock_user_string(arg2
);
11615 if (num
== TARGET_NR_removexattr
) {
11616 ret
= get_errno(removexattr(p
, n
));
11618 ret
= get_errno(lremovexattr(p
, n
));
11621 ret
= -TARGET_EFAULT
;
11623 unlock_user(p
, arg1
, 0);
11624 unlock_user(n
, arg2
, 0);
11627 case TARGET_NR_fremovexattr
:
11630 n
= lock_user_string(arg2
);
11632 ret
= get_errno(fremovexattr(arg1
, n
));
11634 ret
= -TARGET_EFAULT
;
11636 unlock_user(n
, arg2
, 0);
11640 #endif /* CONFIG_ATTR */
11641 #ifdef TARGET_NR_set_thread_area
11642 case TARGET_NR_set_thread_area
:
11643 #if defined(TARGET_MIPS)
11644 ((CPUMIPSState
*) cpu_env
)->active_tc
.CP0_UserLocal
= arg1
;
11647 #elif defined(TARGET_CRIS)
11649 ret
= -TARGET_EINVAL
;
11651 ((CPUCRISState
*) cpu_env
)->pregs
[PR_PID
] = arg1
;
11655 #elif defined(TARGET_I386) && defined(TARGET_ABI32)
11656 ret
= do_set_thread_area(cpu_env
, arg1
);
11658 #elif defined(TARGET_M68K)
11660 TaskState
*ts
= cpu
->opaque
;
11661 ts
->tp_value
= arg1
;
11666 goto unimplemented_nowarn
;
11669 #ifdef TARGET_NR_get_thread_area
11670 case TARGET_NR_get_thread_area
:
11671 #if defined(TARGET_I386) && defined(TARGET_ABI32)
11672 ret
= do_get_thread_area(cpu_env
, arg1
);
11674 #elif defined(TARGET_M68K)
11676 TaskState
*ts
= cpu
->opaque
;
11677 ret
= ts
->tp_value
;
11681 goto unimplemented_nowarn
;
11684 #ifdef TARGET_NR_getdomainname
11685 case TARGET_NR_getdomainname
:
11686 goto unimplemented_nowarn
;
11689 #ifdef TARGET_NR_clock_gettime
11690 case TARGET_NR_clock_gettime
:
11692 struct timespec ts
;
11693 ret
= get_errno(clock_gettime(arg1
, &ts
));
11694 if (!is_error(ret
)) {
11695 host_to_target_timespec(arg2
, &ts
);
11700 #ifdef TARGET_NR_clock_getres
11701 case TARGET_NR_clock_getres
:
11703 struct timespec ts
;
11704 ret
= get_errno(clock_getres(arg1
, &ts
));
11705 if (!is_error(ret
)) {
11706 host_to_target_timespec(arg2
, &ts
);
11711 #ifdef TARGET_NR_clock_nanosleep
11712 case TARGET_NR_clock_nanosleep
:
11714 struct timespec ts
;
11715 target_to_host_timespec(&ts
, arg3
);
11716 ret
= get_errno(safe_clock_nanosleep(arg1
, arg2
,
11717 &ts
, arg4
? &ts
: NULL
));
11719 host_to_target_timespec(arg4
, &ts
);
11721 #if defined(TARGET_PPC)
11722 /* clock_nanosleep is odd in that it returns positive errno values.
11723 * On PPC, CR0 bit 3 should be set in such a situation. */
11724 if (ret
&& ret
!= -TARGET_ERESTARTSYS
) {
11725 ((CPUPPCState
*)cpu_env
)->crf
[0] |= 1;
11732 #if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address)
11733 case TARGET_NR_set_tid_address
:
11734 ret
= get_errno(set_tid_address((int *)g2h(arg1
)));
11738 case TARGET_NR_tkill
:
11739 ret
= get_errno(safe_tkill((int)arg1
, target_to_host_signal(arg2
)));
11742 case TARGET_NR_tgkill
:
11743 ret
= get_errno(safe_tgkill((int)arg1
, (int)arg2
,
11744 target_to_host_signal(arg3
)));
11747 #ifdef TARGET_NR_set_robust_list
11748 case TARGET_NR_set_robust_list
:
11749 case TARGET_NR_get_robust_list
:
11750 /* The ABI for supporting robust futexes has userspace pass
11751 * the kernel a pointer to a linked list which is updated by
11752 * userspace after the syscall; the list is walked by the kernel
11753 * when the thread exits. Since the linked list in QEMU guest
11754 * memory isn't a valid linked list for the host and we have
11755 * no way to reliably intercept the thread-death event, we can't
11756 * support these. Silently return ENOSYS so that guest userspace
11757 * falls back to a non-robust futex implementation (which should
11758 * be OK except in the corner case of the guest crashing while
11759 * holding a mutex that is shared with another process via
11762 goto unimplemented_nowarn
;
11765 #if defined(TARGET_NR_utimensat)
11766 case TARGET_NR_utimensat
:
11768 struct timespec
*tsp
, ts
[2];
11772 target_to_host_timespec(ts
, arg3
);
11773 target_to_host_timespec(ts
+1, arg3
+sizeof(struct target_timespec
));
11777 ret
= get_errno(sys_utimensat(arg1
, NULL
, tsp
, arg4
));
11779 if (!(p
= lock_user_string(arg2
))) {
11780 ret
= -TARGET_EFAULT
;
11783 ret
= get_errno(sys_utimensat(arg1
, path(p
), tsp
, arg4
));
11784 unlock_user(p
, arg2
, 0);
11789 case TARGET_NR_futex
:
11790 ret
= do_futex(arg1
, arg2
, arg3
, arg4
, arg5
, arg6
);
11792 #if defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init)
11793 case TARGET_NR_inotify_init
:
11794 ret
= get_errno(sys_inotify_init());
11796 fd_trans_register(ret
, &target_inotify_trans
);
11800 #ifdef CONFIG_INOTIFY1
11801 #if defined(TARGET_NR_inotify_init1) && defined(__NR_inotify_init1)
11802 case TARGET_NR_inotify_init1
:
11803 ret
= get_errno(sys_inotify_init1(target_to_host_bitmask(arg1
,
11804 fcntl_flags_tbl
)));
11806 fd_trans_register(ret
, &target_inotify_trans
);
11811 #if defined(TARGET_NR_inotify_add_watch) && defined(__NR_inotify_add_watch)
11812 case TARGET_NR_inotify_add_watch
:
11813 p
= lock_user_string(arg2
);
11814 ret
= get_errno(sys_inotify_add_watch(arg1
, path(p
), arg3
));
11815 unlock_user(p
, arg2
, 0);
11818 #if defined(TARGET_NR_inotify_rm_watch) && defined(__NR_inotify_rm_watch)
11819 case TARGET_NR_inotify_rm_watch
:
11820 ret
= get_errno(sys_inotify_rm_watch(arg1
, arg2
));
11824 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
11825 case TARGET_NR_mq_open
:
11827 struct mq_attr posix_mq_attr
;
11828 struct mq_attr
*pposix_mq_attr
;
11831 host_flags
= target_to_host_bitmask(arg2
, fcntl_flags_tbl
);
11832 pposix_mq_attr
= NULL
;
11834 if (copy_from_user_mq_attr(&posix_mq_attr
, arg4
) != 0) {
11837 pposix_mq_attr
= &posix_mq_attr
;
11839 p
= lock_user_string(arg1
- 1);
11843 ret
= get_errno(mq_open(p
, host_flags
, arg3
, pposix_mq_attr
));
11844 unlock_user (p
, arg1
, 0);
11848 case TARGET_NR_mq_unlink
:
11849 p
= lock_user_string(arg1
- 1);
11851 ret
= -TARGET_EFAULT
;
11854 ret
= get_errno(mq_unlink(p
));
11855 unlock_user (p
, arg1
, 0);
11858 case TARGET_NR_mq_timedsend
:
11860 struct timespec ts
;
11862 p
= lock_user (VERIFY_READ
, arg2
, arg3
, 1);
11864 target_to_host_timespec(&ts
, arg5
);
11865 ret
= get_errno(safe_mq_timedsend(arg1
, p
, arg3
, arg4
, &ts
));
11866 host_to_target_timespec(arg5
, &ts
);
11868 ret
= get_errno(safe_mq_timedsend(arg1
, p
, arg3
, arg4
, NULL
));
11870 unlock_user (p
, arg2
, arg3
);
11874 case TARGET_NR_mq_timedreceive
:
11876 struct timespec ts
;
11879 p
= lock_user (VERIFY_READ
, arg2
, arg3
, 1);
11881 target_to_host_timespec(&ts
, arg5
);
11882 ret
= get_errno(safe_mq_timedreceive(arg1
, p
, arg3
,
11884 host_to_target_timespec(arg5
, &ts
);
11886 ret
= get_errno(safe_mq_timedreceive(arg1
, p
, arg3
,
11889 unlock_user (p
, arg2
, arg3
);
11891 put_user_u32(prio
, arg4
);
11895 /* Not implemented for now... */
11896 /* case TARGET_NR_mq_notify: */
11899 case TARGET_NR_mq_getsetattr
:
11901 struct mq_attr posix_mq_attr_in
, posix_mq_attr_out
;
11904 ret
= mq_getattr(arg1
, &posix_mq_attr_out
);
11905 copy_to_user_mq_attr(arg3
, &posix_mq_attr_out
);
11908 copy_from_user_mq_attr(&posix_mq_attr_in
, arg2
);
11909 ret
|= mq_setattr(arg1
, &posix_mq_attr_in
, &posix_mq_attr_out
);
11916 #ifdef CONFIG_SPLICE
11917 #ifdef TARGET_NR_tee
11918 case TARGET_NR_tee
:
11920 ret
= get_errno(tee(arg1
,arg2
,arg3
,arg4
));
11924 #ifdef TARGET_NR_splice
11925 case TARGET_NR_splice
:
11927 loff_t loff_in
, loff_out
;
11928 loff_t
*ploff_in
= NULL
, *ploff_out
= NULL
;
11930 if (get_user_u64(loff_in
, arg2
)) {
11933 ploff_in
= &loff_in
;
11936 if (get_user_u64(loff_out
, arg4
)) {
11939 ploff_out
= &loff_out
;
11941 ret
= get_errno(splice(arg1
, ploff_in
, arg3
, ploff_out
, arg5
, arg6
));
11943 if (put_user_u64(loff_in
, arg2
)) {
11948 if (put_user_u64(loff_out
, arg4
)) {
11955 #ifdef TARGET_NR_vmsplice
11956 case TARGET_NR_vmsplice
:
11958 struct iovec
*vec
= lock_iovec(VERIFY_READ
, arg2
, arg3
, 1);
11960 ret
= get_errno(vmsplice(arg1
, vec
, arg3
, arg4
));
11961 unlock_iovec(vec
, arg2
, arg3
, 0);
11963 ret
= -host_to_target_errno(errno
);
11968 #endif /* CONFIG_SPLICE */
11969 #ifdef CONFIG_EVENTFD
11970 #if defined(TARGET_NR_eventfd)
11971 case TARGET_NR_eventfd
:
11972 ret
= get_errno(eventfd(arg1
, 0));
11974 fd_trans_register(ret
, &target_eventfd_trans
);
11978 #if defined(TARGET_NR_eventfd2)
11979 case TARGET_NR_eventfd2
:
11981 int host_flags
= arg2
& (~(TARGET_O_NONBLOCK
| TARGET_O_CLOEXEC
));
11982 if (arg2
& TARGET_O_NONBLOCK
) {
11983 host_flags
|= O_NONBLOCK
;
11985 if (arg2
& TARGET_O_CLOEXEC
) {
11986 host_flags
|= O_CLOEXEC
;
11988 ret
= get_errno(eventfd(arg1
, host_flags
));
11990 fd_trans_register(ret
, &target_eventfd_trans
);
11995 #endif /* CONFIG_EVENTFD */
11996 #if defined(CONFIG_FALLOCATE) && defined(TARGET_NR_fallocate)
11997 case TARGET_NR_fallocate
:
11998 #if TARGET_ABI_BITS == 32
11999 ret
= get_errno(fallocate(arg1
, arg2
, target_offset64(arg3
, arg4
),
12000 target_offset64(arg5
, arg6
)));
12002 ret
= get_errno(fallocate(arg1
, arg2
, arg3
, arg4
));
12006 #if defined(CONFIG_SYNC_FILE_RANGE)
12007 #if defined(TARGET_NR_sync_file_range)
12008 case TARGET_NR_sync_file_range
:
12009 #if TARGET_ABI_BITS == 32
12010 #if defined(TARGET_MIPS)
12011 ret
= get_errno(sync_file_range(arg1
, target_offset64(arg3
, arg4
),
12012 target_offset64(arg5
, arg6
), arg7
));
12014 ret
= get_errno(sync_file_range(arg1
, target_offset64(arg2
, arg3
),
12015 target_offset64(arg4
, arg5
), arg6
));
12016 #endif /* !TARGET_MIPS */
12018 ret
= get_errno(sync_file_range(arg1
, arg2
, arg3
, arg4
));
12022 #if defined(TARGET_NR_sync_file_range2)
12023 case TARGET_NR_sync_file_range2
:
12024 /* This is like sync_file_range but the arguments are reordered */
12025 #if TARGET_ABI_BITS == 32
12026 ret
= get_errno(sync_file_range(arg1
, target_offset64(arg3
, arg4
),
12027 target_offset64(arg5
, arg6
), arg2
));
12029 ret
= get_errno(sync_file_range(arg1
, arg3
, arg4
, arg2
));
12034 #if defined(TARGET_NR_signalfd4)
12035 case TARGET_NR_signalfd4
:
12036 ret
= do_signalfd4(arg1
, arg2
, arg4
);
12039 #if defined(TARGET_NR_signalfd)
12040 case TARGET_NR_signalfd
:
12041 ret
= do_signalfd4(arg1
, arg2
, 0);
12044 #if defined(CONFIG_EPOLL)
12045 #if defined(TARGET_NR_epoll_create)
12046 case TARGET_NR_epoll_create
:
12047 ret
= get_errno(epoll_create(arg1
));
12050 #if defined(TARGET_NR_epoll_create1) && defined(CONFIG_EPOLL_CREATE1)
12051 case TARGET_NR_epoll_create1
:
12052 ret
= get_errno(epoll_create1(arg1
));
12055 #if defined(TARGET_NR_epoll_ctl)
12056 case TARGET_NR_epoll_ctl
:
12058 struct epoll_event ep
;
12059 struct epoll_event
*epp
= 0;
12061 struct target_epoll_event
*target_ep
;
12062 if (!lock_user_struct(VERIFY_READ
, target_ep
, arg4
, 1)) {
12065 ep
.events
= tswap32(target_ep
->events
);
12066 /* The epoll_data_t union is just opaque data to the kernel,
12067 * so we transfer all 64 bits across and need not worry what
12068 * actual data type it is.
12070 ep
.data
.u64
= tswap64(target_ep
->data
.u64
);
12071 unlock_user_struct(target_ep
, arg4
, 0);
12074 ret
= get_errno(epoll_ctl(arg1
, arg2
, arg3
, epp
));
12079 #if defined(TARGET_NR_epoll_wait) || defined(TARGET_NR_epoll_pwait)
12080 #if defined(TARGET_NR_epoll_wait)
12081 case TARGET_NR_epoll_wait
:
12083 #if defined(TARGET_NR_epoll_pwait)
12084 case TARGET_NR_epoll_pwait
:
12087 struct target_epoll_event
*target_ep
;
12088 struct epoll_event
*ep
;
12090 int maxevents
= arg3
;
12091 int timeout
= arg4
;
12093 if (maxevents
<= 0 || maxevents
> TARGET_EP_MAX_EVENTS
) {
12094 ret
= -TARGET_EINVAL
;
12098 target_ep
= lock_user(VERIFY_WRITE
, arg2
,
12099 maxevents
* sizeof(struct target_epoll_event
), 1);
12104 ep
= g_try_new(struct epoll_event
, maxevents
);
12106 unlock_user(target_ep
, arg2
, 0);
12107 ret
= -TARGET_ENOMEM
;
12112 #if defined(TARGET_NR_epoll_pwait)
12113 case TARGET_NR_epoll_pwait
:
12115 target_sigset_t
*target_set
;
12116 sigset_t _set
, *set
= &_set
;
12119 if (arg6
!= sizeof(target_sigset_t
)) {
12120 ret
= -TARGET_EINVAL
;
12124 target_set
= lock_user(VERIFY_READ
, arg5
,
12125 sizeof(target_sigset_t
), 1);
12127 ret
= -TARGET_EFAULT
;
12130 target_to_host_sigset(set
, target_set
);
12131 unlock_user(target_set
, arg5
, 0);
12136 ret
= get_errno(safe_epoll_pwait(epfd
, ep
, maxevents
, timeout
,
12137 set
, SIGSET_T_SIZE
));
12141 #if defined(TARGET_NR_epoll_wait)
12142 case TARGET_NR_epoll_wait
:
12143 ret
= get_errno(safe_epoll_pwait(epfd
, ep
, maxevents
, timeout
,
12148 ret
= -TARGET_ENOSYS
;
12150 if (!is_error(ret
)) {
12152 for (i
= 0; i
< ret
; i
++) {
12153 target_ep
[i
].events
= tswap32(ep
[i
].events
);
12154 target_ep
[i
].data
.u64
= tswap64(ep
[i
].data
.u64
);
12156 unlock_user(target_ep
, arg2
,
12157 ret
* sizeof(struct target_epoll_event
));
12159 unlock_user(target_ep
, arg2
, 0);
12166 #ifdef TARGET_NR_prlimit64
12167 case TARGET_NR_prlimit64
:
12169 /* args: pid, resource number, ptr to new rlimit, ptr to old rlimit */
12170 struct target_rlimit64
*target_rnew
, *target_rold
;
12171 struct host_rlimit64 rnew
, rold
, *rnewp
= 0;
12172 int resource
= target_to_host_resource(arg2
);
12174 if (!lock_user_struct(VERIFY_READ
, target_rnew
, arg3
, 1)) {
12177 rnew
.rlim_cur
= tswap64(target_rnew
->rlim_cur
);
12178 rnew
.rlim_max
= tswap64(target_rnew
->rlim_max
);
12179 unlock_user_struct(target_rnew
, arg3
, 0);
12183 ret
= get_errno(sys_prlimit64(arg1
, resource
, rnewp
, arg4
? &rold
: 0));
12184 if (!is_error(ret
) && arg4
) {
12185 if (!lock_user_struct(VERIFY_WRITE
, target_rold
, arg4
, 1)) {
12188 target_rold
->rlim_cur
= tswap64(rold
.rlim_cur
);
12189 target_rold
->rlim_max
= tswap64(rold
.rlim_max
);
12190 unlock_user_struct(target_rold
, arg4
, 1);
12195 #ifdef TARGET_NR_gethostname
12196 case TARGET_NR_gethostname
:
12198 char *name
= lock_user(VERIFY_WRITE
, arg1
, arg2
, 0);
12200 ret
= get_errno(gethostname(name
, arg2
));
12201 unlock_user(name
, arg1
, arg2
);
12203 ret
= -TARGET_EFAULT
;
12208 #ifdef TARGET_NR_atomic_cmpxchg_32
12209 case TARGET_NR_atomic_cmpxchg_32
:
12211 /* should use start_exclusive from main.c */
12212 abi_ulong mem_value
;
12213 if (get_user_u32(mem_value
, arg6
)) {
12214 target_siginfo_t info
;
12215 info
.si_signo
= SIGSEGV
;
12217 info
.si_code
= TARGET_SEGV_MAPERR
;
12218 info
._sifields
._sigfault
._addr
= arg6
;
12219 queue_signal((CPUArchState
*)cpu_env
, info
.si_signo
,
12220 QEMU_SI_FAULT
, &info
);
12224 if (mem_value
== arg2
)
12225 put_user_u32(arg1
, arg6
);
12230 #ifdef TARGET_NR_atomic_barrier
12231 case TARGET_NR_atomic_barrier
:
12233 /* Like the kernel implementation and the qemu arm barrier, no-op this? */
12239 #ifdef TARGET_NR_timer_create
12240 case TARGET_NR_timer_create
:
12242 /* args: clockid_t clockid, struct sigevent *sevp, timer_t *timerid */
12244 struct sigevent host_sevp
= { {0}, }, *phost_sevp
= NULL
;
12247 int timer_index
= next_free_host_timer();
12249 if (timer_index
< 0) {
12250 ret
= -TARGET_EAGAIN
;
12252 timer_t
*phtimer
= g_posix_timers
+ timer_index
;
12255 phost_sevp
= &host_sevp
;
12256 ret
= target_to_host_sigevent(phost_sevp
, arg2
);
12262 ret
= get_errno(timer_create(clkid
, phost_sevp
, phtimer
));
12266 if (put_user(TIMER_MAGIC
| timer_index
, arg3
, target_timer_t
)) {
12275 #ifdef TARGET_NR_timer_settime
12276 case TARGET_NR_timer_settime
:
12278 /* args: timer_t timerid, int flags, const struct itimerspec *new_value,
12279 * struct itimerspec * old_value */
12280 target_timer_t timerid
= get_timer_id(arg1
);
12284 } else if (arg3
== 0) {
12285 ret
= -TARGET_EINVAL
;
12287 timer_t htimer
= g_posix_timers
[timerid
];
12288 struct itimerspec hspec_new
= {{0},}, hspec_old
= {{0},};
12290 if (target_to_host_itimerspec(&hspec_new
, arg3
)) {
12294 timer_settime(htimer
, arg2
, &hspec_new
, &hspec_old
));
12295 if (arg4
&& host_to_target_itimerspec(arg4
, &hspec_old
)) {
12303 #ifdef TARGET_NR_timer_gettime
12304 case TARGET_NR_timer_gettime
:
12306 /* args: timer_t timerid, struct itimerspec *curr_value */
12307 target_timer_t timerid
= get_timer_id(arg1
);
12311 } else if (!arg2
) {
12312 ret
= -TARGET_EFAULT
;
12314 timer_t htimer
= g_posix_timers
[timerid
];
12315 struct itimerspec hspec
;
12316 ret
= get_errno(timer_gettime(htimer
, &hspec
));
12318 if (host_to_target_itimerspec(arg2
, &hspec
)) {
12319 ret
= -TARGET_EFAULT
;
12326 #ifdef TARGET_NR_timer_getoverrun
12327 case TARGET_NR_timer_getoverrun
:
12329 /* args: timer_t timerid */
12330 target_timer_t timerid
= get_timer_id(arg1
);
12335 timer_t htimer
= g_posix_timers
[timerid
];
12336 ret
= get_errno(timer_getoverrun(htimer
));
12338 fd_trans_unregister(ret
);
12343 #ifdef TARGET_NR_timer_delete
12344 case TARGET_NR_timer_delete
:
12346 /* args: timer_t timerid */
12347 target_timer_t timerid
= get_timer_id(arg1
);
12352 timer_t htimer
= g_posix_timers
[timerid
];
12353 ret
= get_errno(timer_delete(htimer
));
12354 g_posix_timers
[timerid
] = 0;
12360 #if defined(TARGET_NR_timerfd_create) && defined(CONFIG_TIMERFD)
12361 case TARGET_NR_timerfd_create
:
12362 ret
= get_errno(timerfd_create(arg1
,
12363 target_to_host_bitmask(arg2
, fcntl_flags_tbl
)));
12367 #if defined(TARGET_NR_timerfd_gettime) && defined(CONFIG_TIMERFD)
12368 case TARGET_NR_timerfd_gettime
:
12370 struct itimerspec its_curr
;
12372 ret
= get_errno(timerfd_gettime(arg1
, &its_curr
));
12374 if (arg2
&& host_to_target_itimerspec(arg2
, &its_curr
)) {
12381 #if defined(TARGET_NR_timerfd_settime) && defined(CONFIG_TIMERFD)
12382 case TARGET_NR_timerfd_settime
:
12384 struct itimerspec its_new
, its_old
, *p_new
;
12387 if (target_to_host_itimerspec(&its_new
, arg3
)) {
12395 ret
= get_errno(timerfd_settime(arg1
, arg2
, p_new
, &its_old
));
12397 if (arg4
&& host_to_target_itimerspec(arg4
, &its_old
)) {
12404 #if defined(TARGET_NR_ioprio_get) && defined(__NR_ioprio_get)
12405 case TARGET_NR_ioprio_get
:
12406 ret
= get_errno(ioprio_get(arg1
, arg2
));
12410 #if defined(TARGET_NR_ioprio_set) && defined(__NR_ioprio_set)
12411 case TARGET_NR_ioprio_set
:
12412 ret
= get_errno(ioprio_set(arg1
, arg2
, arg3
));
12416 #if defined(TARGET_NR_setns) && defined(CONFIG_SETNS)
12417 case TARGET_NR_setns
:
12418 ret
= get_errno(setns(arg1
, arg2
));
12421 #if defined(TARGET_NR_unshare) && defined(CONFIG_SETNS)
12422 case TARGET_NR_unshare
:
12423 ret
= get_errno(unshare(arg1
));
12426 #if defined(TARGET_NR_kcmp) && defined(__NR_kcmp)
12427 case TARGET_NR_kcmp
:
12428 ret
= get_errno(kcmp(arg1
, arg2
, arg3
, arg4
, arg5
));
12434 gemu_log("qemu: Unsupported syscall: %d\n", num
);
12435 #if defined(TARGET_NR_setxattr) || defined(TARGET_NR_get_thread_area) || defined(TARGET_NR_getdomainname) || defined(TARGET_NR_set_robust_list)
12436 unimplemented_nowarn
:
12438 ret
= -TARGET_ENOSYS
;
12443 gemu_log(" = " TARGET_ABI_FMT_ld
"\n", ret
);
12446 print_syscall_ret(num
, ret
);
12447 trace_guest_user_syscall_ret(cpu
, num
, ret
);
12450 ret
= -TARGET_EFAULT
;