4 * Copyright (c) 2003 Fabrice Bellard
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, see <http://www.gnu.org/licenses/>.
19 #define _ATFILE_SOURCE
20 #include "qemu/osdep.h"
21 #include "qemu/cutils.h"
22 #include "qemu/path.h"
29 #include <sys/mount.h>
31 #include <sys/fsuid.h>
32 #include <sys/personality.h>
33 #include <sys/prctl.h>
34 #include <sys/resource.h>
36 #include <linux/capability.h>
38 #include <sys/timex.h>
40 int __clone2(int (*fn
)(void *), void *child_stack_base
,
41 size_t stack_size
, int flags
, void *arg
, ...);
43 #include <sys/socket.h>
47 #include <sys/times.h>
50 #include <sys/statfs.h>
53 #include <sys/sysinfo.h>
54 #include <sys/signalfd.h>
55 //#include <sys/user.h>
56 #include <netinet/ip.h>
57 #include <netinet/tcp.h>
58 #include <linux/wireless.h>
59 #include <linux/icmp.h>
60 #include <linux/icmpv6.h>
61 #include <linux/errqueue.h>
62 #include <linux/random.h>
63 #include "qemu-common.h"
65 #include <sys/timerfd.h>
71 #include <sys/eventfd.h>
74 #include <sys/epoll.h>
77 #include "qemu/xattr.h"
79 #ifdef CONFIG_SENDFILE
80 #include <sys/sendfile.h>
83 #define termios host_termios
84 #define winsize host_winsize
85 #define termio host_termio
86 #define sgttyb host_sgttyb /* same as target */
87 #define tchars host_tchars /* same as target */
88 #define ltchars host_ltchars /* same as target */
90 #include <linux/termios.h>
91 #include <linux/unistd.h>
92 #include <linux/cdrom.h>
93 #include <linux/hdreg.h>
94 #include <linux/soundcard.h>
96 #include <linux/mtio.h>
98 #if defined(CONFIG_FIEMAP)
99 #include <linux/fiemap.h>
101 #include <linux/fb.h>
102 #include <linux/vt.h>
103 #include <linux/dm-ioctl.h>
104 #include <linux/reboot.h>
105 #include <linux/route.h>
106 #include <linux/filter.h>
107 #include <linux/blkpg.h>
108 #include <netpacket/packet.h>
109 #include <linux/netlink.h>
110 #ifdef CONFIG_RTNETLINK
111 #include <linux/rtnetlink.h>
112 #include <linux/if_bridge.h>
114 #include <linux/audit.h>
115 #include "linux_loop.h"
121 #define CLONE_IO 0x80000000 /* Clone io context */
124 /* We can't directly call the host clone syscall, because this will
125 * badly confuse libc (breaking mutexes, for example). So we must
126 * divide clone flags into:
127 * * flag combinations that look like pthread_create()
128 * * flag combinations that look like fork()
129 * * flags we can implement within QEMU itself
130 * * flags we can't support and will return an error for
132 /* For thread creation, all these flags must be present; for
133 * fork, none must be present.
135 #define CLONE_THREAD_FLAGS \
136 (CLONE_VM | CLONE_FS | CLONE_FILES | \
137 CLONE_SIGHAND | CLONE_THREAD | CLONE_SYSVSEM)
139 /* These flags are ignored:
140 * CLONE_DETACHED is now ignored by the kernel;
141 * CLONE_IO is just an optimisation hint to the I/O scheduler
143 #define CLONE_IGNORED_FLAGS \
144 (CLONE_DETACHED | CLONE_IO)
146 /* Flags for fork which we can implement within QEMU itself */
147 #define CLONE_OPTIONAL_FORK_FLAGS \
148 (CLONE_SETTLS | CLONE_PARENT_SETTID | \
149 CLONE_CHILD_CLEARTID | CLONE_CHILD_SETTID)
151 /* Flags for thread creation which we can implement within QEMU itself */
152 #define CLONE_OPTIONAL_THREAD_FLAGS \
153 (CLONE_SETTLS | CLONE_PARENT_SETTID | \
154 CLONE_CHILD_CLEARTID | CLONE_CHILD_SETTID | CLONE_PARENT)
156 #define CLONE_INVALID_FORK_FLAGS \
157 (~(CSIGNAL | CLONE_OPTIONAL_FORK_FLAGS | CLONE_IGNORED_FLAGS))
159 #define CLONE_INVALID_THREAD_FLAGS \
160 (~(CSIGNAL | CLONE_THREAD_FLAGS | CLONE_OPTIONAL_THREAD_FLAGS | \
161 CLONE_IGNORED_FLAGS))
163 /* CLONE_VFORK is special cased early in do_fork(). The other flag bits
164 * have almost all been allocated. We cannot support any of
165 * CLONE_NEWNS, CLONE_NEWCGROUP, CLONE_NEWUTS, CLONE_NEWIPC,
166 * CLONE_NEWUSER, CLONE_NEWPID, CLONE_NEWNET, CLONE_PTRACE, CLONE_UNTRACED.
167 * The checks against the invalid thread masks above will catch these.
168 * (The one remaining unallocated bit is 0x1000 which used to be CLONE_PID.)
172 /* Define DEBUG_ERESTARTSYS to force every syscall to be restarted
173 * once. This exercises the codepaths for restart.
175 //#define DEBUG_ERESTARTSYS
177 //#include <linux/msdos_fs.h>
178 #define VFAT_IOCTL_READDIR_BOTH _IOR('r', 1, struct linux_dirent [2])
179 #define VFAT_IOCTL_READDIR_SHORT _IOR('r', 2, struct linux_dirent [2])
189 #define _syscall0(type,name) \
190 static type name (void) \
192 return syscall(__NR_##name); \
195 #define _syscall1(type,name,type1,arg1) \
196 static type name (type1 arg1) \
198 return syscall(__NR_##name, arg1); \
201 #define _syscall2(type,name,type1,arg1,type2,arg2) \
202 static type name (type1 arg1,type2 arg2) \
204 return syscall(__NR_##name, arg1, arg2); \
207 #define _syscall3(type,name,type1,arg1,type2,arg2,type3,arg3) \
208 static type name (type1 arg1,type2 arg2,type3 arg3) \
210 return syscall(__NR_##name, arg1, arg2, arg3); \
213 #define _syscall4(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4) \
214 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4) \
216 return syscall(__NR_##name, arg1, arg2, arg3, arg4); \
219 #define _syscall5(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4, \
221 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5) \
223 return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5); \
227 #define _syscall6(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4, \
228 type5,arg5,type6,arg6) \
229 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5, \
232 return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6); \
236 #define __NR_sys_uname __NR_uname
237 #define __NR_sys_getcwd1 __NR_getcwd
238 #define __NR_sys_getdents __NR_getdents
239 #define __NR_sys_getdents64 __NR_getdents64
240 #define __NR_sys_getpriority __NR_getpriority
241 #define __NR_sys_rt_sigqueueinfo __NR_rt_sigqueueinfo
242 #define __NR_sys_rt_tgsigqueueinfo __NR_rt_tgsigqueueinfo
243 #define __NR_sys_syslog __NR_syslog
244 #define __NR_sys_futex __NR_futex
245 #define __NR_sys_inotify_init __NR_inotify_init
246 #define __NR_sys_inotify_add_watch __NR_inotify_add_watch
247 #define __NR_sys_inotify_rm_watch __NR_inotify_rm_watch
249 #if defined(__alpha__) || defined (__ia64__) || defined(__x86_64__) || \
251 #define __NR__llseek __NR_lseek
254 /* Newer kernel ports have llseek() instead of _llseek() */
255 #if defined(TARGET_NR_llseek) && !defined(TARGET_NR__llseek)
256 #define TARGET_NR__llseek TARGET_NR_llseek
260 _syscall0(int, gettid
)
262 /* This is a replacement for the host gettid() and must return a host
264 static int gettid(void) {
268 #if defined(TARGET_NR_getdents) && defined(__NR_getdents)
269 _syscall3(int, sys_getdents
, uint
, fd
, struct linux_dirent
*, dirp
, uint
, count
);
271 #if !defined(__NR_getdents) || \
272 (defined(TARGET_NR_getdents64) && defined(__NR_getdents64))
273 _syscall3(int, sys_getdents64
, uint
, fd
, struct linux_dirent64
*, dirp
, uint
, count
);
275 #if defined(TARGET_NR__llseek) && defined(__NR_llseek)
276 _syscall5(int, _llseek
, uint
, fd
, ulong
, hi
, ulong
, lo
,
277 loff_t
*, res
, uint
, wh
);
279 _syscall3(int, sys_rt_sigqueueinfo
, pid_t
, pid
, int, sig
, siginfo_t
*, uinfo
)
280 _syscall4(int, sys_rt_tgsigqueueinfo
, pid_t
, pid
, pid_t
, tid
, int, sig
,
282 _syscall3(int,sys_syslog
,int,type
,char*,bufp
,int,len
)
283 #ifdef __NR_exit_group
284 _syscall1(int,exit_group
,int,error_code
)
286 #if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address)
287 _syscall1(int,set_tid_address
,int *,tidptr
)
289 #if defined(TARGET_NR_futex) && defined(__NR_futex)
290 _syscall6(int,sys_futex
,int *,uaddr
,int,op
,int,val
,
291 const struct timespec
*,timeout
,int *,uaddr2
,int,val3
)
293 #define __NR_sys_sched_getaffinity __NR_sched_getaffinity
294 _syscall3(int, sys_sched_getaffinity
, pid_t
, pid
, unsigned int, len
,
295 unsigned long *, user_mask_ptr
);
296 #define __NR_sys_sched_setaffinity __NR_sched_setaffinity
297 _syscall3(int, sys_sched_setaffinity
, pid_t
, pid
, unsigned int, len
,
298 unsigned long *, user_mask_ptr
);
299 _syscall4(int, reboot
, int, magic1
, int, magic2
, unsigned int, cmd
,
301 _syscall2(int, capget
, struct __user_cap_header_struct
*, header
,
302 struct __user_cap_data_struct
*, data
);
303 _syscall2(int, capset
, struct __user_cap_header_struct
*, header
,
304 struct __user_cap_data_struct
*, data
);
305 #if defined(TARGET_NR_ioprio_get) && defined(__NR_ioprio_get)
306 _syscall2(int, ioprio_get
, int, which
, int, who
)
308 #if defined(TARGET_NR_ioprio_set) && defined(__NR_ioprio_set)
309 _syscall3(int, ioprio_set
, int, which
, int, who
, int, ioprio
)
311 #if defined(TARGET_NR_getrandom) && defined(__NR_getrandom)
312 _syscall3(int, getrandom
, void *, buf
, size_t, buflen
, unsigned int, flags
)
315 #if defined(TARGET_NR_kcmp) && defined(__NR_kcmp)
316 _syscall5(int, kcmp
, pid_t
, pid1
, pid_t
, pid2
, int, type
,
317 unsigned long, idx1
, unsigned long, idx2
)
320 static bitmask_transtbl fcntl_flags_tbl
[] = {
321 { TARGET_O_ACCMODE
, TARGET_O_WRONLY
, O_ACCMODE
, O_WRONLY
, },
322 { TARGET_O_ACCMODE
, TARGET_O_RDWR
, O_ACCMODE
, O_RDWR
, },
323 { TARGET_O_CREAT
, TARGET_O_CREAT
, O_CREAT
, O_CREAT
, },
324 { TARGET_O_EXCL
, TARGET_O_EXCL
, O_EXCL
, O_EXCL
, },
325 { TARGET_O_NOCTTY
, TARGET_O_NOCTTY
, O_NOCTTY
, O_NOCTTY
, },
326 { TARGET_O_TRUNC
, TARGET_O_TRUNC
, O_TRUNC
, O_TRUNC
, },
327 { TARGET_O_APPEND
, TARGET_O_APPEND
, O_APPEND
, O_APPEND
, },
328 { TARGET_O_NONBLOCK
, TARGET_O_NONBLOCK
, O_NONBLOCK
, O_NONBLOCK
, },
329 { TARGET_O_SYNC
, TARGET_O_DSYNC
, O_SYNC
, O_DSYNC
, },
330 { TARGET_O_SYNC
, TARGET_O_SYNC
, O_SYNC
, O_SYNC
, },
331 { TARGET_FASYNC
, TARGET_FASYNC
, FASYNC
, FASYNC
, },
332 { TARGET_O_DIRECTORY
, TARGET_O_DIRECTORY
, O_DIRECTORY
, O_DIRECTORY
, },
333 { TARGET_O_NOFOLLOW
, TARGET_O_NOFOLLOW
, O_NOFOLLOW
, O_NOFOLLOW
, },
334 #if defined(O_DIRECT)
335 { TARGET_O_DIRECT
, TARGET_O_DIRECT
, O_DIRECT
, O_DIRECT
, },
337 #if defined(O_NOATIME)
338 { TARGET_O_NOATIME
, TARGET_O_NOATIME
, O_NOATIME
, O_NOATIME
},
340 #if defined(O_CLOEXEC)
341 { TARGET_O_CLOEXEC
, TARGET_O_CLOEXEC
, O_CLOEXEC
, O_CLOEXEC
},
344 { TARGET_O_PATH
, TARGET_O_PATH
, O_PATH
, O_PATH
},
346 #if defined(O_TMPFILE)
347 { TARGET_O_TMPFILE
, TARGET_O_TMPFILE
, O_TMPFILE
, O_TMPFILE
},
349 /* Don't terminate the list prematurely on 64-bit host+guest. */
350 #if TARGET_O_LARGEFILE != 0 || O_LARGEFILE != 0
351 { TARGET_O_LARGEFILE
, TARGET_O_LARGEFILE
, O_LARGEFILE
, O_LARGEFILE
, },
358 QEMU_IFLA_BR_FORWARD_DELAY
,
359 QEMU_IFLA_BR_HELLO_TIME
,
360 QEMU_IFLA_BR_MAX_AGE
,
361 QEMU_IFLA_BR_AGEING_TIME
,
362 QEMU_IFLA_BR_STP_STATE
,
363 QEMU_IFLA_BR_PRIORITY
,
364 QEMU_IFLA_BR_VLAN_FILTERING
,
365 QEMU_IFLA_BR_VLAN_PROTOCOL
,
366 QEMU_IFLA_BR_GROUP_FWD_MASK
,
367 QEMU_IFLA_BR_ROOT_ID
,
368 QEMU_IFLA_BR_BRIDGE_ID
,
369 QEMU_IFLA_BR_ROOT_PORT
,
370 QEMU_IFLA_BR_ROOT_PATH_COST
,
371 QEMU_IFLA_BR_TOPOLOGY_CHANGE
,
372 QEMU_IFLA_BR_TOPOLOGY_CHANGE_DETECTED
,
373 QEMU_IFLA_BR_HELLO_TIMER
,
374 QEMU_IFLA_BR_TCN_TIMER
,
375 QEMU_IFLA_BR_TOPOLOGY_CHANGE_TIMER
,
376 QEMU_IFLA_BR_GC_TIMER
,
377 QEMU_IFLA_BR_GROUP_ADDR
,
378 QEMU_IFLA_BR_FDB_FLUSH
,
379 QEMU_IFLA_BR_MCAST_ROUTER
,
380 QEMU_IFLA_BR_MCAST_SNOOPING
,
381 QEMU_IFLA_BR_MCAST_QUERY_USE_IFADDR
,
382 QEMU_IFLA_BR_MCAST_QUERIER
,
383 QEMU_IFLA_BR_MCAST_HASH_ELASTICITY
,
384 QEMU_IFLA_BR_MCAST_HASH_MAX
,
385 QEMU_IFLA_BR_MCAST_LAST_MEMBER_CNT
,
386 QEMU_IFLA_BR_MCAST_STARTUP_QUERY_CNT
,
387 QEMU_IFLA_BR_MCAST_LAST_MEMBER_INTVL
,
388 QEMU_IFLA_BR_MCAST_MEMBERSHIP_INTVL
,
389 QEMU_IFLA_BR_MCAST_QUERIER_INTVL
,
390 QEMU_IFLA_BR_MCAST_QUERY_INTVL
,
391 QEMU_IFLA_BR_MCAST_QUERY_RESPONSE_INTVL
,
392 QEMU_IFLA_BR_MCAST_STARTUP_QUERY_INTVL
,
393 QEMU_IFLA_BR_NF_CALL_IPTABLES
,
394 QEMU_IFLA_BR_NF_CALL_IP6TABLES
,
395 QEMU_IFLA_BR_NF_CALL_ARPTABLES
,
396 QEMU_IFLA_BR_VLAN_DEFAULT_PVID
,
398 QEMU_IFLA_BR_VLAN_STATS_ENABLED
,
399 QEMU_IFLA_BR_MCAST_STATS_ENABLED
,
423 QEMU_IFLA_NET_NS_PID
,
426 QEMU_IFLA_VFINFO_LIST
,
434 QEMU_IFLA_PROMISCUITY
,
435 QEMU_IFLA_NUM_TX_QUEUES
,
436 QEMU_IFLA_NUM_RX_QUEUES
,
438 QEMU_IFLA_PHYS_PORT_ID
,
439 QEMU_IFLA_CARRIER_CHANGES
,
440 QEMU_IFLA_PHYS_SWITCH_ID
,
441 QEMU_IFLA_LINK_NETNSID
,
442 QEMU_IFLA_PHYS_PORT_NAME
,
443 QEMU_IFLA_PROTO_DOWN
,
444 QEMU_IFLA_GSO_MAX_SEGS
,
445 QEMU_IFLA_GSO_MAX_SIZE
,
452 QEMU_IFLA_BRPORT_UNSPEC
,
453 QEMU_IFLA_BRPORT_STATE
,
454 QEMU_IFLA_BRPORT_PRIORITY
,
455 QEMU_IFLA_BRPORT_COST
,
456 QEMU_IFLA_BRPORT_MODE
,
457 QEMU_IFLA_BRPORT_GUARD
,
458 QEMU_IFLA_BRPORT_PROTECT
,
459 QEMU_IFLA_BRPORT_FAST_LEAVE
,
460 QEMU_IFLA_BRPORT_LEARNING
,
461 QEMU_IFLA_BRPORT_UNICAST_FLOOD
,
462 QEMU_IFLA_BRPORT_PROXYARP
,
463 QEMU_IFLA_BRPORT_LEARNING_SYNC
,
464 QEMU_IFLA_BRPORT_PROXYARP_WIFI
,
465 QEMU_IFLA_BRPORT_ROOT_ID
,
466 QEMU_IFLA_BRPORT_BRIDGE_ID
,
467 QEMU_IFLA_BRPORT_DESIGNATED_PORT
,
468 QEMU_IFLA_BRPORT_DESIGNATED_COST
,
471 QEMU_IFLA_BRPORT_TOPOLOGY_CHANGE_ACK
,
472 QEMU_IFLA_BRPORT_CONFIG_PENDING
,
473 QEMU_IFLA_BRPORT_MESSAGE_AGE_TIMER
,
474 QEMU_IFLA_BRPORT_FORWARD_DELAY_TIMER
,
475 QEMU_IFLA_BRPORT_HOLD_TIMER
,
476 QEMU_IFLA_BRPORT_FLUSH
,
477 QEMU_IFLA_BRPORT_MULTICAST_ROUTER
,
478 QEMU_IFLA_BRPORT_PAD
,
479 QEMU___IFLA_BRPORT_MAX
483 QEMU_IFLA_INFO_UNSPEC
,
486 QEMU_IFLA_INFO_XSTATS
,
487 QEMU_IFLA_INFO_SLAVE_KIND
,
488 QEMU_IFLA_INFO_SLAVE_DATA
,
489 QEMU___IFLA_INFO_MAX
,
493 QEMU_IFLA_INET_UNSPEC
,
495 QEMU___IFLA_INET_MAX
,
499 QEMU_IFLA_INET6_UNSPEC
,
500 QEMU_IFLA_INET6_FLAGS
,
501 QEMU_IFLA_INET6_CONF
,
502 QEMU_IFLA_INET6_STATS
,
503 QEMU_IFLA_INET6_MCAST
,
504 QEMU_IFLA_INET6_CACHEINFO
,
505 QEMU_IFLA_INET6_ICMP6STATS
,
506 QEMU_IFLA_INET6_TOKEN
,
507 QEMU_IFLA_INET6_ADDR_GEN_MODE
,
508 QEMU___IFLA_INET6_MAX
511 typedef abi_long (*TargetFdDataFunc
)(void *, size_t);
512 typedef abi_long (*TargetFdAddrFunc
)(void *, abi_ulong
, socklen_t
);
513 typedef struct TargetFdTrans
{
514 TargetFdDataFunc host_to_target_data
;
515 TargetFdDataFunc target_to_host_data
;
516 TargetFdAddrFunc target_to_host_addr
;
519 static TargetFdTrans
**target_fd_trans
;
521 static unsigned int target_fd_max
;
523 static TargetFdDataFunc
fd_trans_target_to_host_data(int fd
)
525 if (fd
>= 0 && fd
< target_fd_max
&& target_fd_trans
[fd
]) {
526 return target_fd_trans
[fd
]->target_to_host_data
;
531 static TargetFdDataFunc
fd_trans_host_to_target_data(int fd
)
533 if (fd
>= 0 && fd
< target_fd_max
&& target_fd_trans
[fd
]) {
534 return target_fd_trans
[fd
]->host_to_target_data
;
539 static TargetFdAddrFunc
fd_trans_target_to_host_addr(int fd
)
541 if (fd
>= 0 && fd
< target_fd_max
&& target_fd_trans
[fd
]) {
542 return target_fd_trans
[fd
]->target_to_host_addr
;
547 static void fd_trans_register(int fd
, TargetFdTrans
*trans
)
551 if (fd
>= target_fd_max
) {
552 oldmax
= target_fd_max
;
553 target_fd_max
= ((fd
>> 6) + 1) << 6; /* by slice of 64 entries */
554 target_fd_trans
= g_renew(TargetFdTrans
*,
555 target_fd_trans
, target_fd_max
);
556 memset((void *)(target_fd_trans
+ oldmax
), 0,
557 (target_fd_max
- oldmax
) * sizeof(TargetFdTrans
*));
559 target_fd_trans
[fd
] = trans
;
562 static void fd_trans_unregister(int fd
)
564 if (fd
>= 0 && fd
< target_fd_max
) {
565 target_fd_trans
[fd
] = NULL
;
569 static void fd_trans_dup(int oldfd
, int newfd
)
571 fd_trans_unregister(newfd
);
572 if (oldfd
< target_fd_max
&& target_fd_trans
[oldfd
]) {
573 fd_trans_register(newfd
, target_fd_trans
[oldfd
]);
577 static int sys_getcwd1(char *buf
, size_t size
)
579 if (getcwd(buf
, size
) == NULL
) {
580 /* getcwd() sets errno */
583 return strlen(buf
)+1;
586 #ifdef TARGET_NR_utimensat
587 #if defined(__NR_utimensat)
588 #define __NR_sys_utimensat __NR_utimensat
589 _syscall4(int,sys_utimensat
,int,dirfd
,const char *,pathname
,
590 const struct timespec
*,tsp
,int,flags
)
592 static int sys_utimensat(int dirfd
, const char *pathname
,
593 const struct timespec times
[2], int flags
)
599 #endif /* TARGET_NR_utimensat */
601 #ifdef CONFIG_INOTIFY
602 #include <sys/inotify.h>
604 #if defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init)
605 static int sys_inotify_init(void)
607 return (inotify_init());
610 #if defined(TARGET_NR_inotify_add_watch) && defined(__NR_inotify_add_watch)
611 static int sys_inotify_add_watch(int fd
,const char *pathname
, int32_t mask
)
613 return (inotify_add_watch(fd
, pathname
, mask
));
616 #if defined(TARGET_NR_inotify_rm_watch) && defined(__NR_inotify_rm_watch)
617 static int sys_inotify_rm_watch(int fd
, int32_t wd
)
619 return (inotify_rm_watch(fd
, wd
));
622 #ifdef CONFIG_INOTIFY1
623 #if defined(TARGET_NR_inotify_init1) && defined(__NR_inotify_init1)
624 static int sys_inotify_init1(int flags
)
626 return (inotify_init1(flags
));
631 /* Userspace can usually survive runtime without inotify */
632 #undef TARGET_NR_inotify_init
633 #undef TARGET_NR_inotify_init1
634 #undef TARGET_NR_inotify_add_watch
635 #undef TARGET_NR_inotify_rm_watch
636 #endif /* CONFIG_INOTIFY */
638 #if defined(TARGET_NR_prlimit64)
639 #ifndef __NR_prlimit64
640 # define __NR_prlimit64 -1
642 #define __NR_sys_prlimit64 __NR_prlimit64
643 /* The glibc rlimit structure may not be that used by the underlying syscall */
644 struct host_rlimit64
{
648 _syscall4(int, sys_prlimit64
, pid_t
, pid
, int, resource
,
649 const struct host_rlimit64
*, new_limit
,
650 struct host_rlimit64
*, old_limit
)
654 #if defined(TARGET_NR_timer_create)
655 /* Maxiumum of 32 active POSIX timers allowed at any one time. */
656 static timer_t g_posix_timers
[32] = { 0, } ;
658 static inline int next_free_host_timer(void)
661 /* FIXME: Does finding the next free slot require a lock? */
662 for (k
= 0; k
< ARRAY_SIZE(g_posix_timers
); k
++) {
663 if (g_posix_timers
[k
] == 0) {
664 g_posix_timers
[k
] = (timer_t
) 1;
672 /* ARM EABI and MIPS expect 64bit types aligned even on pairs or registers */
674 static inline int regpairs_aligned(void *cpu_env
) {
675 return ((((CPUARMState
*)cpu_env
)->eabi
) == 1) ;
677 #elif defined(TARGET_MIPS) && (TARGET_ABI_BITS == 32)
678 static inline int regpairs_aligned(void *cpu_env
) { return 1; }
679 #elif defined(TARGET_PPC) && !defined(TARGET_PPC64)
680 /* SysV AVI for PPC32 expects 64bit parameters to be passed on odd/even pairs
681 * of registers which translates to the same as ARM/MIPS, because we start with
683 static inline int regpairs_aligned(void *cpu_env
) { return 1; }
685 static inline int regpairs_aligned(void *cpu_env
) { return 0; }
688 #define ERRNO_TABLE_SIZE 1200
690 /* target_to_host_errno_table[] is initialized from
691 * host_to_target_errno_table[] in syscall_init(). */
692 static uint16_t target_to_host_errno_table
[ERRNO_TABLE_SIZE
] = {
696 * This list is the union of errno values overridden in asm-<arch>/errno.h
697 * minus the errnos that are not actually generic to all archs.
699 static uint16_t host_to_target_errno_table
[ERRNO_TABLE_SIZE
] = {
700 [EAGAIN
] = TARGET_EAGAIN
,
701 [EIDRM
] = TARGET_EIDRM
,
702 [ECHRNG
] = TARGET_ECHRNG
,
703 [EL2NSYNC
] = TARGET_EL2NSYNC
,
704 [EL3HLT
] = TARGET_EL3HLT
,
705 [EL3RST
] = TARGET_EL3RST
,
706 [ELNRNG
] = TARGET_ELNRNG
,
707 [EUNATCH
] = TARGET_EUNATCH
,
708 [ENOCSI
] = TARGET_ENOCSI
,
709 [EL2HLT
] = TARGET_EL2HLT
,
710 [EDEADLK
] = TARGET_EDEADLK
,
711 [ENOLCK
] = TARGET_ENOLCK
,
712 [EBADE
] = TARGET_EBADE
,
713 [EBADR
] = TARGET_EBADR
,
714 [EXFULL
] = TARGET_EXFULL
,
715 [ENOANO
] = TARGET_ENOANO
,
716 [EBADRQC
] = TARGET_EBADRQC
,
717 [EBADSLT
] = TARGET_EBADSLT
,
718 [EBFONT
] = TARGET_EBFONT
,
719 [ENOSTR
] = TARGET_ENOSTR
,
720 [ENODATA
] = TARGET_ENODATA
,
721 [ETIME
] = TARGET_ETIME
,
722 [ENOSR
] = TARGET_ENOSR
,
723 [ENONET
] = TARGET_ENONET
,
724 [ENOPKG
] = TARGET_ENOPKG
,
725 [EREMOTE
] = TARGET_EREMOTE
,
726 [ENOLINK
] = TARGET_ENOLINK
,
727 [EADV
] = TARGET_EADV
,
728 [ESRMNT
] = TARGET_ESRMNT
,
729 [ECOMM
] = TARGET_ECOMM
,
730 [EPROTO
] = TARGET_EPROTO
,
731 [EDOTDOT
] = TARGET_EDOTDOT
,
732 [EMULTIHOP
] = TARGET_EMULTIHOP
,
733 [EBADMSG
] = TARGET_EBADMSG
,
734 [ENAMETOOLONG
] = TARGET_ENAMETOOLONG
,
735 [EOVERFLOW
] = TARGET_EOVERFLOW
,
736 [ENOTUNIQ
] = TARGET_ENOTUNIQ
,
737 [EBADFD
] = TARGET_EBADFD
,
738 [EREMCHG
] = TARGET_EREMCHG
,
739 [ELIBACC
] = TARGET_ELIBACC
,
740 [ELIBBAD
] = TARGET_ELIBBAD
,
741 [ELIBSCN
] = TARGET_ELIBSCN
,
742 [ELIBMAX
] = TARGET_ELIBMAX
,
743 [ELIBEXEC
] = TARGET_ELIBEXEC
,
744 [EILSEQ
] = TARGET_EILSEQ
,
745 [ENOSYS
] = TARGET_ENOSYS
,
746 [ELOOP
] = TARGET_ELOOP
,
747 [ERESTART
] = TARGET_ERESTART
,
748 [ESTRPIPE
] = TARGET_ESTRPIPE
,
749 [ENOTEMPTY
] = TARGET_ENOTEMPTY
,
750 [EUSERS
] = TARGET_EUSERS
,
751 [ENOTSOCK
] = TARGET_ENOTSOCK
,
752 [EDESTADDRREQ
] = TARGET_EDESTADDRREQ
,
753 [EMSGSIZE
] = TARGET_EMSGSIZE
,
754 [EPROTOTYPE
] = TARGET_EPROTOTYPE
,
755 [ENOPROTOOPT
] = TARGET_ENOPROTOOPT
,
756 [EPROTONOSUPPORT
] = TARGET_EPROTONOSUPPORT
,
757 [ESOCKTNOSUPPORT
] = TARGET_ESOCKTNOSUPPORT
,
758 [EOPNOTSUPP
] = TARGET_EOPNOTSUPP
,
759 [EPFNOSUPPORT
] = TARGET_EPFNOSUPPORT
,
760 [EAFNOSUPPORT
] = TARGET_EAFNOSUPPORT
,
761 [EADDRINUSE
] = TARGET_EADDRINUSE
,
762 [EADDRNOTAVAIL
] = TARGET_EADDRNOTAVAIL
,
763 [ENETDOWN
] = TARGET_ENETDOWN
,
764 [ENETUNREACH
] = TARGET_ENETUNREACH
,
765 [ENETRESET
] = TARGET_ENETRESET
,
766 [ECONNABORTED
] = TARGET_ECONNABORTED
,
767 [ECONNRESET
] = TARGET_ECONNRESET
,
768 [ENOBUFS
] = TARGET_ENOBUFS
,
769 [EISCONN
] = TARGET_EISCONN
,
770 [ENOTCONN
] = TARGET_ENOTCONN
,
771 [EUCLEAN
] = TARGET_EUCLEAN
,
772 [ENOTNAM
] = TARGET_ENOTNAM
,
773 [ENAVAIL
] = TARGET_ENAVAIL
,
774 [EISNAM
] = TARGET_EISNAM
,
775 [EREMOTEIO
] = TARGET_EREMOTEIO
,
776 [EDQUOT
] = TARGET_EDQUOT
,
777 [ESHUTDOWN
] = TARGET_ESHUTDOWN
,
778 [ETOOMANYREFS
] = TARGET_ETOOMANYREFS
,
779 [ETIMEDOUT
] = TARGET_ETIMEDOUT
,
780 [ECONNREFUSED
] = TARGET_ECONNREFUSED
,
781 [EHOSTDOWN
] = TARGET_EHOSTDOWN
,
782 [EHOSTUNREACH
] = TARGET_EHOSTUNREACH
,
783 [EALREADY
] = TARGET_EALREADY
,
784 [EINPROGRESS
] = TARGET_EINPROGRESS
,
785 [ESTALE
] = TARGET_ESTALE
,
786 [ECANCELED
] = TARGET_ECANCELED
,
787 [ENOMEDIUM
] = TARGET_ENOMEDIUM
,
788 [EMEDIUMTYPE
] = TARGET_EMEDIUMTYPE
,
790 [ENOKEY
] = TARGET_ENOKEY
,
793 [EKEYEXPIRED
] = TARGET_EKEYEXPIRED
,
796 [EKEYREVOKED
] = TARGET_EKEYREVOKED
,
799 [EKEYREJECTED
] = TARGET_EKEYREJECTED
,
802 [EOWNERDEAD
] = TARGET_EOWNERDEAD
,
804 #ifdef ENOTRECOVERABLE
805 [ENOTRECOVERABLE
] = TARGET_ENOTRECOVERABLE
,
808 [ENOMSG
] = TARGET_ENOMSG
,
811 [ERFKILL
] = TARGET_ERFKILL
,
814 [EHWPOISON
] = TARGET_EHWPOISON
,
818 static inline int host_to_target_errno(int err
)
820 if (err
>= 0 && err
< ERRNO_TABLE_SIZE
&&
821 host_to_target_errno_table
[err
]) {
822 return host_to_target_errno_table
[err
];
827 static inline int target_to_host_errno(int err
)
829 if (err
>= 0 && err
< ERRNO_TABLE_SIZE
&&
830 target_to_host_errno_table
[err
]) {
831 return target_to_host_errno_table
[err
];
836 static inline abi_long
get_errno(abi_long ret
)
839 return -host_to_target_errno(errno
);
844 static inline int is_error(abi_long ret
)
846 return (abi_ulong
)ret
>= (abi_ulong
)(-4096);
849 const char *target_strerror(int err
)
851 if (err
== TARGET_ERESTARTSYS
) {
852 return "To be restarted";
854 if (err
== TARGET_QEMU_ESIGRETURN
) {
855 return "Successful exit from sigreturn";
858 if ((err
>= ERRNO_TABLE_SIZE
) || (err
< 0)) {
861 return strerror(target_to_host_errno(err
));
864 #define safe_syscall0(type, name) \
865 static type safe_##name(void) \
867 return safe_syscall(__NR_##name); \
870 #define safe_syscall1(type, name, type1, arg1) \
871 static type safe_##name(type1 arg1) \
873 return safe_syscall(__NR_##name, arg1); \
876 #define safe_syscall2(type, name, type1, arg1, type2, arg2) \
877 static type safe_##name(type1 arg1, type2 arg2) \
879 return safe_syscall(__NR_##name, arg1, arg2); \
882 #define safe_syscall3(type, name, type1, arg1, type2, arg2, type3, arg3) \
883 static type safe_##name(type1 arg1, type2 arg2, type3 arg3) \
885 return safe_syscall(__NR_##name, arg1, arg2, arg3); \
888 #define safe_syscall4(type, name, type1, arg1, type2, arg2, type3, arg3, \
890 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4) \
892 return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4); \
895 #define safe_syscall5(type, name, type1, arg1, type2, arg2, type3, arg3, \
896 type4, arg4, type5, arg5) \
897 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, \
900 return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5); \
903 #define safe_syscall6(type, name, type1, arg1, type2, arg2, type3, arg3, \
904 type4, arg4, type5, arg5, type6, arg6) \
905 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, \
906 type5 arg5, type6 arg6) \
908 return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6); \
911 safe_syscall3(ssize_t
, read
, int, fd
, void *, buff
, size_t, count
)
912 safe_syscall3(ssize_t
, write
, int, fd
, const void *, buff
, size_t, count
)
913 safe_syscall4(int, openat
, int, dirfd
, const char *, pathname
, \
914 int, flags
, mode_t
, mode
)
915 safe_syscall4(pid_t
, wait4
, pid_t
, pid
, int *, status
, int, options
, \
916 struct rusage
*, rusage
)
917 safe_syscall5(int, waitid
, idtype_t
, idtype
, id_t
, id
, siginfo_t
*, infop
, \
918 int, options
, struct rusage
*, rusage
)
919 safe_syscall3(int, execve
, const char *, filename
, char **, argv
, char **, envp
)
920 safe_syscall6(int, pselect6
, int, nfds
, fd_set
*, readfds
, fd_set
*, writefds
, \
921 fd_set
*, exceptfds
, struct timespec
*, timeout
, void *, sig
)
922 safe_syscall5(int, ppoll
, struct pollfd
*, ufds
, unsigned int, nfds
,
923 struct timespec
*, tsp
, const sigset_t
*, sigmask
,
925 safe_syscall6(int, epoll_pwait
, int, epfd
, struct epoll_event
*, events
,
926 int, maxevents
, int, timeout
, const sigset_t
*, sigmask
,
928 safe_syscall6(int,futex
,int *,uaddr
,int,op
,int,val
, \
929 const struct timespec
*,timeout
,int *,uaddr2
,int,val3
)
930 safe_syscall2(int, rt_sigsuspend
, sigset_t
*, newset
, size_t, sigsetsize
)
931 safe_syscall2(int, kill
, pid_t
, pid
, int, sig
)
932 safe_syscall2(int, tkill
, int, tid
, int, sig
)
933 safe_syscall3(int, tgkill
, int, tgid
, int, pid
, int, sig
)
934 safe_syscall3(ssize_t
, readv
, int, fd
, const struct iovec
*, iov
, int, iovcnt
)
935 safe_syscall3(ssize_t
, writev
, int, fd
, const struct iovec
*, iov
, int, iovcnt
)
936 safe_syscall5(ssize_t
, preadv
, int, fd
, const struct iovec
*, iov
, int, iovcnt
,
937 unsigned long, pos_l
, unsigned long, pos_h
)
938 safe_syscall5(ssize_t
, pwritev
, int, fd
, const struct iovec
*, iov
, int, iovcnt
,
939 unsigned long, pos_l
, unsigned long, pos_h
)
940 safe_syscall3(int, connect
, int, fd
, const struct sockaddr
*, addr
,
942 safe_syscall6(ssize_t
, sendto
, int, fd
, const void *, buf
, size_t, len
,
943 int, flags
, const struct sockaddr
*, addr
, socklen_t
, addrlen
)
944 safe_syscall6(ssize_t
, recvfrom
, int, fd
, void *, buf
, size_t, len
,
945 int, flags
, struct sockaddr
*, addr
, socklen_t
*, addrlen
)
946 safe_syscall3(ssize_t
, sendmsg
, int, fd
, const struct msghdr
*, msg
, int, flags
)
947 safe_syscall3(ssize_t
, recvmsg
, int, fd
, struct msghdr
*, msg
, int, flags
)
948 safe_syscall2(int, flock
, int, fd
, int, operation
)
949 safe_syscall4(int, rt_sigtimedwait
, const sigset_t
*, these
, siginfo_t
*, uinfo
,
950 const struct timespec
*, uts
, size_t, sigsetsize
)
951 safe_syscall4(int, accept4
, int, fd
, struct sockaddr
*, addr
, socklen_t
*, len
,
953 safe_syscall2(int, nanosleep
, const struct timespec
*, req
,
954 struct timespec
*, rem
)
955 #ifdef TARGET_NR_clock_nanosleep
956 safe_syscall4(int, clock_nanosleep
, const clockid_t
, clock
, int, flags
,
957 const struct timespec
*, req
, struct timespec
*, rem
)
960 safe_syscall4(int, msgsnd
, int, msgid
, const void *, msgp
, size_t, sz
,
962 safe_syscall5(int, msgrcv
, int, msgid
, void *, msgp
, size_t, sz
,
963 long, msgtype
, int, flags
)
964 safe_syscall4(int, semtimedop
, int, semid
, struct sembuf
*, tsops
,
965 unsigned, nsops
, const struct timespec
*, timeout
)
967 /* This host kernel architecture uses a single ipc syscall; fake up
968 * wrappers for the sub-operations to hide this implementation detail.
969 * Annoyingly we can't include linux/ipc.h to get the constant definitions
970 * for the call parameter because some structs in there conflict with the
971 * sys/ipc.h ones. So we just define them here, and rely on them being
972 * the same for all host architectures.
974 #define Q_SEMTIMEDOP 4
977 #define Q_IPCCALL(VERSION, OP) ((VERSION) << 16 | (OP))
979 safe_syscall6(int, ipc
, int, call
, long, first
, long, second
, long, third
,
980 void *, ptr
, long, fifth
)
981 static int safe_msgsnd(int msgid
, const void *msgp
, size_t sz
, int flags
)
983 return safe_ipc(Q_IPCCALL(0, Q_MSGSND
), msgid
, sz
, flags
, (void *)msgp
, 0);
985 static int safe_msgrcv(int msgid
, void *msgp
, size_t sz
, long type
, int flags
)
987 return safe_ipc(Q_IPCCALL(1, Q_MSGRCV
), msgid
, sz
, flags
, msgp
, type
);
989 static int safe_semtimedop(int semid
, struct sembuf
*tsops
, unsigned nsops
,
990 const struct timespec
*timeout
)
992 return safe_ipc(Q_IPCCALL(0, Q_SEMTIMEDOP
), semid
, nsops
, 0, tsops
,
996 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
997 safe_syscall5(int, mq_timedsend
, int, mqdes
, const char *, msg_ptr
,
998 size_t, len
, unsigned, prio
, const struct timespec
*, timeout
)
999 safe_syscall5(int, mq_timedreceive
, int, mqdes
, char *, msg_ptr
,
1000 size_t, len
, unsigned *, prio
, const struct timespec
*, timeout
)
1002 /* We do ioctl like this rather than via safe_syscall3 to preserve the
1003 * "third argument might be integer or pointer or not present" behaviour of
1004 * the libc function.
1006 #define safe_ioctl(...) safe_syscall(__NR_ioctl, __VA_ARGS__)
1007 /* Similarly for fcntl. Note that callers must always:
1008 * pass the F_GETLK64 etc constants rather than the unsuffixed F_GETLK
1009 * use the flock64 struct rather than unsuffixed flock
1010 * This will then work and use a 64-bit offset for both 32-bit and 64-bit hosts.
1013 #define safe_fcntl(...) safe_syscall(__NR_fcntl64, __VA_ARGS__)
1015 #define safe_fcntl(...) safe_syscall(__NR_fcntl, __VA_ARGS__)
1018 static inline int host_to_target_sock_type(int host_type
)
1022 switch (host_type
& 0xf /* SOCK_TYPE_MASK */) {
1024 target_type
= TARGET_SOCK_DGRAM
;
1027 target_type
= TARGET_SOCK_STREAM
;
1030 target_type
= host_type
& 0xf /* SOCK_TYPE_MASK */;
1034 #if defined(SOCK_CLOEXEC)
1035 if (host_type
& SOCK_CLOEXEC
) {
1036 target_type
|= TARGET_SOCK_CLOEXEC
;
1040 #if defined(SOCK_NONBLOCK)
1041 if (host_type
& SOCK_NONBLOCK
) {
1042 target_type
|= TARGET_SOCK_NONBLOCK
;
1049 static abi_ulong target_brk
;
1050 static abi_ulong target_original_brk
;
1051 static abi_ulong brk_page
;
1053 void target_set_brk(abi_ulong new_brk
)
1055 target_original_brk
= target_brk
= HOST_PAGE_ALIGN(new_brk
);
1056 brk_page
= HOST_PAGE_ALIGN(target_brk
);
1059 //#define DEBUGF_BRK(message, args...) do { fprintf(stderr, (message), ## args); } while (0)
1060 #define DEBUGF_BRK(message, args...)
1062 /* do_brk() must return target values and target errnos. */
1063 abi_long
do_brk(abi_ulong new_brk
)
1065 abi_long mapped_addr
;
1066 abi_ulong new_alloc_size
;
1068 DEBUGF_BRK("do_brk(" TARGET_ABI_FMT_lx
") -> ", new_brk
);
1071 DEBUGF_BRK(TARGET_ABI_FMT_lx
" (!new_brk)\n", target_brk
);
1074 if (new_brk
< target_original_brk
) {
1075 DEBUGF_BRK(TARGET_ABI_FMT_lx
" (new_brk < target_original_brk)\n",
1080 /* If the new brk is less than the highest page reserved to the
1081 * target heap allocation, set it and we're almost done... */
1082 if (new_brk
<= brk_page
) {
1083 /* Heap contents are initialized to zero, as for anonymous
1085 if (new_brk
> target_brk
) {
1086 memset(g2h(target_brk
), 0, new_brk
- target_brk
);
1088 target_brk
= new_brk
;
1089 DEBUGF_BRK(TARGET_ABI_FMT_lx
" (new_brk <= brk_page)\n", target_brk
);
1093 /* We need to allocate more memory after the brk... Note that
1094 * we don't use MAP_FIXED because that will map over the top of
1095 * any existing mapping (like the one with the host libc or qemu
1096 * itself); instead we treat "mapped but at wrong address" as
1097 * a failure and unmap again.
1099 new_alloc_size
= HOST_PAGE_ALIGN(new_brk
- brk_page
);
1100 mapped_addr
= get_errno(target_mmap(brk_page
, new_alloc_size
,
1101 PROT_READ
|PROT_WRITE
,
1102 MAP_ANON
|MAP_PRIVATE
, 0, 0));
1104 if (mapped_addr
== brk_page
) {
1105 /* Heap contents are initialized to zero, as for anonymous
1106 * mapped pages. Technically the new pages are already
1107 * initialized to zero since they *are* anonymous mapped
1108 * pages, however we have to take care with the contents that
1109 * come from the remaining part of the previous page: it may
1110 * contains garbage data due to a previous heap usage (grown
1111 * then shrunken). */
1112 memset(g2h(target_brk
), 0, brk_page
- target_brk
);
1114 target_brk
= new_brk
;
1115 brk_page
= HOST_PAGE_ALIGN(target_brk
);
1116 DEBUGF_BRK(TARGET_ABI_FMT_lx
" (mapped_addr == brk_page)\n",
1119 } else if (mapped_addr
!= -1) {
1120 /* Mapped but at wrong address, meaning there wasn't actually
1121 * enough space for this brk.
1123 target_munmap(mapped_addr
, new_alloc_size
);
1125 DEBUGF_BRK(TARGET_ABI_FMT_lx
" (mapped_addr != -1)\n", target_brk
);
1128 DEBUGF_BRK(TARGET_ABI_FMT_lx
" (otherwise)\n", target_brk
);
1131 #if defined(TARGET_ALPHA)
1132 /* We (partially) emulate OSF/1 on Alpha, which requires we
1133 return a proper errno, not an unchanged brk value. */
1134 return -TARGET_ENOMEM
;
1136 /* For everything else, return the previous break. */
1140 static inline abi_long
copy_from_user_fdset(fd_set
*fds
,
1141 abi_ulong target_fds_addr
,
1145 abi_ulong b
, *target_fds
;
1147 nw
= DIV_ROUND_UP(n
, TARGET_ABI_BITS
);
1148 if (!(target_fds
= lock_user(VERIFY_READ
,
1150 sizeof(abi_ulong
) * nw
,
1152 return -TARGET_EFAULT
;
1156 for (i
= 0; i
< nw
; i
++) {
1157 /* grab the abi_ulong */
1158 __get_user(b
, &target_fds
[i
]);
1159 for (j
= 0; j
< TARGET_ABI_BITS
; j
++) {
1160 /* check the bit inside the abi_ulong */
1167 unlock_user(target_fds
, target_fds_addr
, 0);
1172 static inline abi_ulong
copy_from_user_fdset_ptr(fd_set
*fds
, fd_set
**fds_ptr
,
1173 abi_ulong target_fds_addr
,
1176 if (target_fds_addr
) {
1177 if (copy_from_user_fdset(fds
, target_fds_addr
, n
))
1178 return -TARGET_EFAULT
;
1186 static inline abi_long
copy_to_user_fdset(abi_ulong target_fds_addr
,
1192 abi_ulong
*target_fds
;
1194 nw
= DIV_ROUND_UP(n
, TARGET_ABI_BITS
);
1195 if (!(target_fds
= lock_user(VERIFY_WRITE
,
1197 sizeof(abi_ulong
) * nw
,
1199 return -TARGET_EFAULT
;
1202 for (i
= 0; i
< nw
; i
++) {
1204 for (j
= 0; j
< TARGET_ABI_BITS
; j
++) {
1205 v
|= ((abi_ulong
)(FD_ISSET(k
, fds
) != 0) << j
);
1208 __put_user(v
, &target_fds
[i
]);
1211 unlock_user(target_fds
, target_fds_addr
, sizeof(abi_ulong
) * nw
);
1216 #if defined(__alpha__)
1217 #define HOST_HZ 1024
1222 static inline abi_long
host_to_target_clock_t(long ticks
)
1224 #if HOST_HZ == TARGET_HZ
1227 return ((int64_t)ticks
* TARGET_HZ
) / HOST_HZ
;
1231 static inline abi_long
host_to_target_rusage(abi_ulong target_addr
,
1232 const struct rusage
*rusage
)
1234 struct target_rusage
*target_rusage
;
1236 if (!lock_user_struct(VERIFY_WRITE
, target_rusage
, target_addr
, 0))
1237 return -TARGET_EFAULT
;
1238 target_rusage
->ru_utime
.tv_sec
= tswapal(rusage
->ru_utime
.tv_sec
);
1239 target_rusage
->ru_utime
.tv_usec
= tswapal(rusage
->ru_utime
.tv_usec
);
1240 target_rusage
->ru_stime
.tv_sec
= tswapal(rusage
->ru_stime
.tv_sec
);
1241 target_rusage
->ru_stime
.tv_usec
= tswapal(rusage
->ru_stime
.tv_usec
);
1242 target_rusage
->ru_maxrss
= tswapal(rusage
->ru_maxrss
);
1243 target_rusage
->ru_ixrss
= tswapal(rusage
->ru_ixrss
);
1244 target_rusage
->ru_idrss
= tswapal(rusage
->ru_idrss
);
1245 target_rusage
->ru_isrss
= tswapal(rusage
->ru_isrss
);
1246 target_rusage
->ru_minflt
= tswapal(rusage
->ru_minflt
);
1247 target_rusage
->ru_majflt
= tswapal(rusage
->ru_majflt
);
1248 target_rusage
->ru_nswap
= tswapal(rusage
->ru_nswap
);
1249 target_rusage
->ru_inblock
= tswapal(rusage
->ru_inblock
);
1250 target_rusage
->ru_oublock
= tswapal(rusage
->ru_oublock
);
1251 target_rusage
->ru_msgsnd
= tswapal(rusage
->ru_msgsnd
);
1252 target_rusage
->ru_msgrcv
= tswapal(rusage
->ru_msgrcv
);
1253 target_rusage
->ru_nsignals
= tswapal(rusage
->ru_nsignals
);
1254 target_rusage
->ru_nvcsw
= tswapal(rusage
->ru_nvcsw
);
1255 target_rusage
->ru_nivcsw
= tswapal(rusage
->ru_nivcsw
);
1256 unlock_user_struct(target_rusage
, target_addr
, 1);
1261 static inline rlim_t
target_to_host_rlim(abi_ulong target_rlim
)
1263 abi_ulong target_rlim_swap
;
1266 target_rlim_swap
= tswapal(target_rlim
);
1267 if (target_rlim_swap
== TARGET_RLIM_INFINITY
)
1268 return RLIM_INFINITY
;
1270 result
= target_rlim_swap
;
1271 if (target_rlim_swap
!= (rlim_t
)result
)
1272 return RLIM_INFINITY
;
1277 static inline abi_ulong
host_to_target_rlim(rlim_t rlim
)
1279 abi_ulong target_rlim_swap
;
1282 if (rlim
== RLIM_INFINITY
|| rlim
!= (abi_long
)rlim
)
1283 target_rlim_swap
= TARGET_RLIM_INFINITY
;
1285 target_rlim_swap
= rlim
;
1286 result
= tswapal(target_rlim_swap
);
1291 static inline int target_to_host_resource(int code
)
1294 case TARGET_RLIMIT_AS
:
1296 case TARGET_RLIMIT_CORE
:
1298 case TARGET_RLIMIT_CPU
:
1300 case TARGET_RLIMIT_DATA
:
1302 case TARGET_RLIMIT_FSIZE
:
1303 return RLIMIT_FSIZE
;
1304 case TARGET_RLIMIT_LOCKS
:
1305 return RLIMIT_LOCKS
;
1306 case TARGET_RLIMIT_MEMLOCK
:
1307 return RLIMIT_MEMLOCK
;
1308 case TARGET_RLIMIT_MSGQUEUE
:
1309 return RLIMIT_MSGQUEUE
;
1310 case TARGET_RLIMIT_NICE
:
1312 case TARGET_RLIMIT_NOFILE
:
1313 return RLIMIT_NOFILE
;
1314 case TARGET_RLIMIT_NPROC
:
1315 return RLIMIT_NPROC
;
1316 case TARGET_RLIMIT_RSS
:
1318 case TARGET_RLIMIT_RTPRIO
:
1319 return RLIMIT_RTPRIO
;
1320 case TARGET_RLIMIT_SIGPENDING
:
1321 return RLIMIT_SIGPENDING
;
1322 case TARGET_RLIMIT_STACK
:
1323 return RLIMIT_STACK
;
1329 static inline abi_long
copy_from_user_timeval(struct timeval
*tv
,
1330 abi_ulong target_tv_addr
)
1332 struct target_timeval
*target_tv
;
1334 if (!lock_user_struct(VERIFY_READ
, target_tv
, target_tv_addr
, 1))
1335 return -TARGET_EFAULT
;
1337 __get_user(tv
->tv_sec
, &target_tv
->tv_sec
);
1338 __get_user(tv
->tv_usec
, &target_tv
->tv_usec
);
1340 unlock_user_struct(target_tv
, target_tv_addr
, 0);
1345 static inline abi_long
copy_to_user_timeval(abi_ulong target_tv_addr
,
1346 const struct timeval
*tv
)
1348 struct target_timeval
*target_tv
;
1350 if (!lock_user_struct(VERIFY_WRITE
, target_tv
, target_tv_addr
, 0))
1351 return -TARGET_EFAULT
;
1353 __put_user(tv
->tv_sec
, &target_tv
->tv_sec
);
1354 __put_user(tv
->tv_usec
, &target_tv
->tv_usec
);
1356 unlock_user_struct(target_tv
, target_tv_addr
, 1);
1361 static inline abi_long
copy_from_user_timezone(struct timezone
*tz
,
1362 abi_ulong target_tz_addr
)
1364 struct target_timezone
*target_tz
;
1366 if (!lock_user_struct(VERIFY_READ
, target_tz
, target_tz_addr
, 1)) {
1367 return -TARGET_EFAULT
;
1370 __get_user(tz
->tz_minuteswest
, &target_tz
->tz_minuteswest
);
1371 __get_user(tz
->tz_dsttime
, &target_tz
->tz_dsttime
);
1373 unlock_user_struct(target_tz
, target_tz_addr
, 0);
1378 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
1381 static inline abi_long
copy_from_user_mq_attr(struct mq_attr
*attr
,
1382 abi_ulong target_mq_attr_addr
)
1384 struct target_mq_attr
*target_mq_attr
;
1386 if (!lock_user_struct(VERIFY_READ
, target_mq_attr
,
1387 target_mq_attr_addr
, 1))
1388 return -TARGET_EFAULT
;
1390 __get_user(attr
->mq_flags
, &target_mq_attr
->mq_flags
);
1391 __get_user(attr
->mq_maxmsg
, &target_mq_attr
->mq_maxmsg
);
1392 __get_user(attr
->mq_msgsize
, &target_mq_attr
->mq_msgsize
);
1393 __get_user(attr
->mq_curmsgs
, &target_mq_attr
->mq_curmsgs
);
1395 unlock_user_struct(target_mq_attr
, target_mq_attr_addr
, 0);
1400 static inline abi_long
copy_to_user_mq_attr(abi_ulong target_mq_attr_addr
,
1401 const struct mq_attr
*attr
)
1403 struct target_mq_attr
*target_mq_attr
;
1405 if (!lock_user_struct(VERIFY_WRITE
, target_mq_attr
,
1406 target_mq_attr_addr
, 0))
1407 return -TARGET_EFAULT
;
1409 __put_user(attr
->mq_flags
, &target_mq_attr
->mq_flags
);
1410 __put_user(attr
->mq_maxmsg
, &target_mq_attr
->mq_maxmsg
);
1411 __put_user(attr
->mq_msgsize
, &target_mq_attr
->mq_msgsize
);
1412 __put_user(attr
->mq_curmsgs
, &target_mq_attr
->mq_curmsgs
);
1414 unlock_user_struct(target_mq_attr
, target_mq_attr_addr
, 1);
1420 #if defined(TARGET_NR_select) || defined(TARGET_NR__newselect)
1421 /* do_select() must return target values and target errnos. */
1422 static abi_long
do_select(int n
,
1423 abi_ulong rfd_addr
, abi_ulong wfd_addr
,
1424 abi_ulong efd_addr
, abi_ulong target_tv_addr
)
1426 fd_set rfds
, wfds
, efds
;
1427 fd_set
*rfds_ptr
, *wfds_ptr
, *efds_ptr
;
1429 struct timespec ts
, *ts_ptr
;
1432 ret
= copy_from_user_fdset_ptr(&rfds
, &rfds_ptr
, rfd_addr
, n
);
1436 ret
= copy_from_user_fdset_ptr(&wfds
, &wfds_ptr
, wfd_addr
, n
);
1440 ret
= copy_from_user_fdset_ptr(&efds
, &efds_ptr
, efd_addr
, n
);
1445 if (target_tv_addr
) {
1446 if (copy_from_user_timeval(&tv
, target_tv_addr
))
1447 return -TARGET_EFAULT
;
1448 ts
.tv_sec
= tv
.tv_sec
;
1449 ts
.tv_nsec
= tv
.tv_usec
* 1000;
1455 ret
= get_errno(safe_pselect6(n
, rfds_ptr
, wfds_ptr
, efds_ptr
,
1458 if (!is_error(ret
)) {
1459 if (rfd_addr
&& copy_to_user_fdset(rfd_addr
, &rfds
, n
))
1460 return -TARGET_EFAULT
;
1461 if (wfd_addr
&& copy_to_user_fdset(wfd_addr
, &wfds
, n
))
1462 return -TARGET_EFAULT
;
1463 if (efd_addr
&& copy_to_user_fdset(efd_addr
, &efds
, n
))
1464 return -TARGET_EFAULT
;
1466 if (target_tv_addr
) {
1467 tv
.tv_sec
= ts
.tv_sec
;
1468 tv
.tv_usec
= ts
.tv_nsec
/ 1000;
1469 if (copy_to_user_timeval(target_tv_addr
, &tv
)) {
1470 return -TARGET_EFAULT
;
1478 #if defined(TARGET_WANT_OLD_SYS_SELECT)
1479 static abi_long
do_old_select(abi_ulong arg1
)
1481 struct target_sel_arg_struct
*sel
;
1482 abi_ulong inp
, outp
, exp
, tvp
;
1485 if (!lock_user_struct(VERIFY_READ
, sel
, arg1
, 1)) {
1486 return -TARGET_EFAULT
;
1489 nsel
= tswapal(sel
->n
);
1490 inp
= tswapal(sel
->inp
);
1491 outp
= tswapal(sel
->outp
);
1492 exp
= tswapal(sel
->exp
);
1493 tvp
= tswapal(sel
->tvp
);
1495 unlock_user_struct(sel
, arg1
, 0);
1497 return do_select(nsel
, inp
, outp
, exp
, tvp
);
1502 static abi_long
do_pipe2(int host_pipe
[], int flags
)
1505 return pipe2(host_pipe
, flags
);
1511 static abi_long
do_pipe(void *cpu_env
, abi_ulong pipedes
,
1512 int flags
, int is_pipe2
)
1516 ret
= flags
? do_pipe2(host_pipe
, flags
) : pipe(host_pipe
);
1519 return get_errno(ret
);
1521 /* Several targets have special calling conventions for the original
1522 pipe syscall, but didn't replicate this into the pipe2 syscall. */
1524 #if defined(TARGET_ALPHA)
1525 ((CPUAlphaState
*)cpu_env
)->ir
[IR_A4
] = host_pipe
[1];
1526 return host_pipe
[0];
1527 #elif defined(TARGET_MIPS)
1528 ((CPUMIPSState
*)cpu_env
)->active_tc
.gpr
[3] = host_pipe
[1];
1529 return host_pipe
[0];
1530 #elif defined(TARGET_SH4)
1531 ((CPUSH4State
*)cpu_env
)->gregs
[1] = host_pipe
[1];
1532 return host_pipe
[0];
1533 #elif defined(TARGET_SPARC)
1534 ((CPUSPARCState
*)cpu_env
)->regwptr
[1] = host_pipe
[1];
1535 return host_pipe
[0];
1539 if (put_user_s32(host_pipe
[0], pipedes
)
1540 || put_user_s32(host_pipe
[1], pipedes
+ sizeof(host_pipe
[0])))
1541 return -TARGET_EFAULT
;
1542 return get_errno(ret
);
1545 static inline abi_long
target_to_host_ip_mreq(struct ip_mreqn
*mreqn
,
1546 abi_ulong target_addr
,
1549 struct target_ip_mreqn
*target_smreqn
;
1551 target_smreqn
= lock_user(VERIFY_READ
, target_addr
, len
, 1);
1553 return -TARGET_EFAULT
;
1554 mreqn
->imr_multiaddr
.s_addr
= target_smreqn
->imr_multiaddr
.s_addr
;
1555 mreqn
->imr_address
.s_addr
= target_smreqn
->imr_address
.s_addr
;
1556 if (len
== sizeof(struct target_ip_mreqn
))
1557 mreqn
->imr_ifindex
= tswapal(target_smreqn
->imr_ifindex
);
1558 unlock_user(target_smreqn
, target_addr
, 0);
1563 static inline abi_long
target_to_host_sockaddr(int fd
, struct sockaddr
*addr
,
1564 abi_ulong target_addr
,
1567 const socklen_t unix_maxlen
= sizeof (struct sockaddr_un
);
1568 sa_family_t sa_family
;
1569 struct target_sockaddr
*target_saddr
;
1571 if (fd_trans_target_to_host_addr(fd
)) {
1572 return fd_trans_target_to_host_addr(fd
)(addr
, target_addr
, len
);
1575 target_saddr
= lock_user(VERIFY_READ
, target_addr
, len
, 1);
1577 return -TARGET_EFAULT
;
1579 sa_family
= tswap16(target_saddr
->sa_family
);
1581 /* Oops. The caller might send a incomplete sun_path; sun_path
1582 * must be terminated by \0 (see the manual page), but
1583 * unfortunately it is quite common to specify sockaddr_un
1584 * length as "strlen(x->sun_path)" while it should be
1585 * "strlen(...) + 1". We'll fix that here if needed.
1586 * Linux kernel has a similar feature.
1589 if (sa_family
== AF_UNIX
) {
1590 if (len
< unix_maxlen
&& len
> 0) {
1591 char *cp
= (char*)target_saddr
;
1593 if ( cp
[len
-1] && !cp
[len
] )
1596 if (len
> unix_maxlen
)
1600 memcpy(addr
, target_saddr
, len
);
1601 addr
->sa_family
= sa_family
;
1602 if (sa_family
== AF_NETLINK
) {
1603 struct sockaddr_nl
*nladdr
;
1605 nladdr
= (struct sockaddr_nl
*)addr
;
1606 nladdr
->nl_pid
= tswap32(nladdr
->nl_pid
);
1607 nladdr
->nl_groups
= tswap32(nladdr
->nl_groups
);
1608 } else if (sa_family
== AF_PACKET
) {
1609 struct target_sockaddr_ll
*lladdr
;
1611 lladdr
= (struct target_sockaddr_ll
*)addr
;
1612 lladdr
->sll_ifindex
= tswap32(lladdr
->sll_ifindex
);
1613 lladdr
->sll_hatype
= tswap16(lladdr
->sll_hatype
);
1615 unlock_user(target_saddr
, target_addr
, 0);
1620 static inline abi_long
host_to_target_sockaddr(abi_ulong target_addr
,
1621 struct sockaddr
*addr
,
1624 struct target_sockaddr
*target_saddr
;
1631 target_saddr
= lock_user(VERIFY_WRITE
, target_addr
, len
, 0);
1633 return -TARGET_EFAULT
;
1634 memcpy(target_saddr
, addr
, len
);
1635 if (len
>= offsetof(struct target_sockaddr
, sa_family
) +
1636 sizeof(target_saddr
->sa_family
)) {
1637 target_saddr
->sa_family
= tswap16(addr
->sa_family
);
1639 if (addr
->sa_family
== AF_NETLINK
&& len
>= sizeof(struct sockaddr_nl
)) {
1640 struct sockaddr_nl
*target_nl
= (struct sockaddr_nl
*)target_saddr
;
1641 target_nl
->nl_pid
= tswap32(target_nl
->nl_pid
);
1642 target_nl
->nl_groups
= tswap32(target_nl
->nl_groups
);
1643 } else if (addr
->sa_family
== AF_PACKET
) {
1644 struct sockaddr_ll
*target_ll
= (struct sockaddr_ll
*)target_saddr
;
1645 target_ll
->sll_ifindex
= tswap32(target_ll
->sll_ifindex
);
1646 target_ll
->sll_hatype
= tswap16(target_ll
->sll_hatype
);
1647 } else if (addr
->sa_family
== AF_INET6
&&
1648 len
>= sizeof(struct target_sockaddr_in6
)) {
1649 struct target_sockaddr_in6
*target_in6
=
1650 (struct target_sockaddr_in6
*)target_saddr
;
1651 target_in6
->sin6_scope_id
= tswap16(target_in6
->sin6_scope_id
);
1653 unlock_user(target_saddr
, target_addr
, len
);
1658 static inline abi_long
target_to_host_cmsg(struct msghdr
*msgh
,
1659 struct target_msghdr
*target_msgh
)
1661 struct cmsghdr
*cmsg
= CMSG_FIRSTHDR(msgh
);
1662 abi_long msg_controllen
;
1663 abi_ulong target_cmsg_addr
;
1664 struct target_cmsghdr
*target_cmsg
, *target_cmsg_start
;
1665 socklen_t space
= 0;
1667 msg_controllen
= tswapal(target_msgh
->msg_controllen
);
1668 if (msg_controllen
< sizeof (struct target_cmsghdr
))
1670 target_cmsg_addr
= tswapal(target_msgh
->msg_control
);
1671 target_cmsg
= lock_user(VERIFY_READ
, target_cmsg_addr
, msg_controllen
, 1);
1672 target_cmsg_start
= target_cmsg
;
1674 return -TARGET_EFAULT
;
1676 while (cmsg
&& target_cmsg
) {
1677 void *data
= CMSG_DATA(cmsg
);
1678 void *target_data
= TARGET_CMSG_DATA(target_cmsg
);
1680 int len
= tswapal(target_cmsg
->cmsg_len
)
1681 - TARGET_CMSG_ALIGN(sizeof (struct target_cmsghdr
));
1683 space
+= CMSG_SPACE(len
);
1684 if (space
> msgh
->msg_controllen
) {
1685 space
-= CMSG_SPACE(len
);
1686 /* This is a QEMU bug, since we allocated the payload
1687 * area ourselves (unlike overflow in host-to-target
1688 * conversion, which is just the guest giving us a buffer
1689 * that's too small). It can't happen for the payload types
1690 * we currently support; if it becomes an issue in future
1691 * we would need to improve our allocation strategy to
1692 * something more intelligent than "twice the size of the
1693 * target buffer we're reading from".
1695 gemu_log("Host cmsg overflow\n");
1699 if (tswap32(target_cmsg
->cmsg_level
) == TARGET_SOL_SOCKET
) {
1700 cmsg
->cmsg_level
= SOL_SOCKET
;
1702 cmsg
->cmsg_level
= tswap32(target_cmsg
->cmsg_level
);
1704 cmsg
->cmsg_type
= tswap32(target_cmsg
->cmsg_type
);
1705 cmsg
->cmsg_len
= CMSG_LEN(len
);
1707 if (cmsg
->cmsg_level
== SOL_SOCKET
&& cmsg
->cmsg_type
== SCM_RIGHTS
) {
1708 int *fd
= (int *)data
;
1709 int *target_fd
= (int *)target_data
;
1710 int i
, numfds
= len
/ sizeof(int);
1712 for (i
= 0; i
< numfds
; i
++) {
1713 __get_user(fd
[i
], target_fd
+ i
);
1715 } else if (cmsg
->cmsg_level
== SOL_SOCKET
1716 && cmsg
->cmsg_type
== SCM_CREDENTIALS
) {
1717 struct ucred
*cred
= (struct ucred
*)data
;
1718 struct target_ucred
*target_cred
=
1719 (struct target_ucred
*)target_data
;
1721 __get_user(cred
->pid
, &target_cred
->pid
);
1722 __get_user(cred
->uid
, &target_cred
->uid
);
1723 __get_user(cred
->gid
, &target_cred
->gid
);
1725 gemu_log("Unsupported ancillary data: %d/%d\n",
1726 cmsg
->cmsg_level
, cmsg
->cmsg_type
);
1727 memcpy(data
, target_data
, len
);
1730 cmsg
= CMSG_NXTHDR(msgh
, cmsg
);
1731 target_cmsg
= TARGET_CMSG_NXTHDR(target_msgh
, target_cmsg
,
1734 unlock_user(target_cmsg
, target_cmsg_addr
, 0);
1736 msgh
->msg_controllen
= space
;
1740 static inline abi_long
host_to_target_cmsg(struct target_msghdr
*target_msgh
,
1741 struct msghdr
*msgh
)
1743 struct cmsghdr
*cmsg
= CMSG_FIRSTHDR(msgh
);
1744 abi_long msg_controllen
;
1745 abi_ulong target_cmsg_addr
;
1746 struct target_cmsghdr
*target_cmsg
, *target_cmsg_start
;
1747 socklen_t space
= 0;
1749 msg_controllen
= tswapal(target_msgh
->msg_controllen
);
1750 if (msg_controllen
< sizeof (struct target_cmsghdr
))
1752 target_cmsg_addr
= tswapal(target_msgh
->msg_control
);
1753 target_cmsg
= lock_user(VERIFY_WRITE
, target_cmsg_addr
, msg_controllen
, 0);
1754 target_cmsg_start
= target_cmsg
;
1756 return -TARGET_EFAULT
;
1758 while (cmsg
&& target_cmsg
) {
1759 void *data
= CMSG_DATA(cmsg
);
1760 void *target_data
= TARGET_CMSG_DATA(target_cmsg
);
1762 int len
= cmsg
->cmsg_len
- CMSG_ALIGN(sizeof (struct cmsghdr
));
1763 int tgt_len
, tgt_space
;
1765 /* We never copy a half-header but may copy half-data;
1766 * this is Linux's behaviour in put_cmsg(). Note that
1767 * truncation here is a guest problem (which we report
1768 * to the guest via the CTRUNC bit), unlike truncation
1769 * in target_to_host_cmsg, which is a QEMU bug.
1771 if (msg_controllen
< sizeof(struct cmsghdr
)) {
1772 target_msgh
->msg_flags
|= tswap32(MSG_CTRUNC
);
1776 if (cmsg
->cmsg_level
== SOL_SOCKET
) {
1777 target_cmsg
->cmsg_level
= tswap32(TARGET_SOL_SOCKET
);
1779 target_cmsg
->cmsg_level
= tswap32(cmsg
->cmsg_level
);
1781 target_cmsg
->cmsg_type
= tswap32(cmsg
->cmsg_type
);
1783 tgt_len
= TARGET_CMSG_LEN(len
);
1785 /* Payload types which need a different size of payload on
1786 * the target must adjust tgt_len here.
1788 switch (cmsg
->cmsg_level
) {
1790 switch (cmsg
->cmsg_type
) {
1792 tgt_len
= sizeof(struct target_timeval
);
1801 if (msg_controllen
< tgt_len
) {
1802 target_msgh
->msg_flags
|= tswap32(MSG_CTRUNC
);
1803 tgt_len
= msg_controllen
;
1806 /* We must now copy-and-convert len bytes of payload
1807 * into tgt_len bytes of destination space. Bear in mind
1808 * that in both source and destination we may be dealing
1809 * with a truncated value!
1811 switch (cmsg
->cmsg_level
) {
1813 switch (cmsg
->cmsg_type
) {
1816 int *fd
= (int *)data
;
1817 int *target_fd
= (int *)target_data
;
1818 int i
, numfds
= tgt_len
/ sizeof(int);
1820 for (i
= 0; i
< numfds
; i
++) {
1821 __put_user(fd
[i
], target_fd
+ i
);
1827 struct timeval
*tv
= (struct timeval
*)data
;
1828 struct target_timeval
*target_tv
=
1829 (struct target_timeval
*)target_data
;
1831 if (len
!= sizeof(struct timeval
) ||
1832 tgt_len
!= sizeof(struct target_timeval
)) {
1836 /* copy struct timeval to target */
1837 __put_user(tv
->tv_sec
, &target_tv
->tv_sec
);
1838 __put_user(tv
->tv_usec
, &target_tv
->tv_usec
);
1841 case SCM_CREDENTIALS
:
1843 struct ucred
*cred
= (struct ucred
*)data
;
1844 struct target_ucred
*target_cred
=
1845 (struct target_ucred
*)target_data
;
1847 __put_user(cred
->pid
, &target_cred
->pid
);
1848 __put_user(cred
->uid
, &target_cred
->uid
);
1849 __put_user(cred
->gid
, &target_cred
->gid
);
1858 switch (cmsg
->cmsg_type
) {
1861 uint32_t *v
= (uint32_t *)data
;
1862 uint32_t *t_int
= (uint32_t *)target_data
;
1864 __put_user(*v
, t_int
);
1870 struct sock_extended_err ee
;
1871 struct sockaddr_in offender
;
1873 struct errhdr_t
*errh
= (struct errhdr_t
*)data
;
1874 struct errhdr_t
*target_errh
=
1875 (struct errhdr_t
*)target_data
;
1877 __put_user(errh
->ee
.ee_errno
, &target_errh
->ee
.ee_errno
);
1878 __put_user(errh
->ee
.ee_origin
, &target_errh
->ee
.ee_origin
);
1879 __put_user(errh
->ee
.ee_type
, &target_errh
->ee
.ee_type
);
1880 __put_user(errh
->ee
.ee_code
, &target_errh
->ee
.ee_code
);
1881 __put_user(errh
->ee
.ee_pad
, &target_errh
->ee
.ee_pad
);
1882 __put_user(errh
->ee
.ee_info
, &target_errh
->ee
.ee_info
);
1883 __put_user(errh
->ee
.ee_data
, &target_errh
->ee
.ee_data
);
1884 host_to_target_sockaddr((unsigned long) &target_errh
->offender
,
1885 (void *) &errh
->offender
, sizeof(errh
->offender
));
1894 switch (cmsg
->cmsg_type
) {
1897 uint32_t *v
= (uint32_t *)data
;
1898 uint32_t *t_int
= (uint32_t *)target_data
;
1900 __put_user(*v
, t_int
);
1906 struct sock_extended_err ee
;
1907 struct sockaddr_in6 offender
;
1909 struct errhdr6_t
*errh
= (struct errhdr6_t
*)data
;
1910 struct errhdr6_t
*target_errh
=
1911 (struct errhdr6_t
*)target_data
;
1913 __put_user(errh
->ee
.ee_errno
, &target_errh
->ee
.ee_errno
);
1914 __put_user(errh
->ee
.ee_origin
, &target_errh
->ee
.ee_origin
);
1915 __put_user(errh
->ee
.ee_type
, &target_errh
->ee
.ee_type
);
1916 __put_user(errh
->ee
.ee_code
, &target_errh
->ee
.ee_code
);
1917 __put_user(errh
->ee
.ee_pad
, &target_errh
->ee
.ee_pad
);
1918 __put_user(errh
->ee
.ee_info
, &target_errh
->ee
.ee_info
);
1919 __put_user(errh
->ee
.ee_data
, &target_errh
->ee
.ee_data
);
1920 host_to_target_sockaddr((unsigned long) &target_errh
->offender
,
1921 (void *) &errh
->offender
, sizeof(errh
->offender
));
1931 gemu_log("Unsupported ancillary data: %d/%d\n",
1932 cmsg
->cmsg_level
, cmsg
->cmsg_type
);
1933 memcpy(target_data
, data
, MIN(len
, tgt_len
));
1934 if (tgt_len
> len
) {
1935 memset(target_data
+ len
, 0, tgt_len
- len
);
1939 target_cmsg
->cmsg_len
= tswapal(tgt_len
);
1940 tgt_space
= TARGET_CMSG_SPACE(len
);
1941 if (msg_controllen
< tgt_space
) {
1942 tgt_space
= msg_controllen
;
1944 msg_controllen
-= tgt_space
;
1946 cmsg
= CMSG_NXTHDR(msgh
, cmsg
);
1947 target_cmsg
= TARGET_CMSG_NXTHDR(target_msgh
, target_cmsg
,
1950 unlock_user(target_cmsg
, target_cmsg_addr
, space
);
1952 target_msgh
->msg_controllen
= tswapal(space
);
1956 static void tswap_nlmsghdr(struct nlmsghdr
*nlh
)
1958 nlh
->nlmsg_len
= tswap32(nlh
->nlmsg_len
);
1959 nlh
->nlmsg_type
= tswap16(nlh
->nlmsg_type
);
1960 nlh
->nlmsg_flags
= tswap16(nlh
->nlmsg_flags
);
1961 nlh
->nlmsg_seq
= tswap32(nlh
->nlmsg_seq
);
1962 nlh
->nlmsg_pid
= tswap32(nlh
->nlmsg_pid
);
1965 static abi_long
host_to_target_for_each_nlmsg(struct nlmsghdr
*nlh
,
1967 abi_long (*host_to_target_nlmsg
)
1968 (struct nlmsghdr
*))
1973 while (len
> sizeof(struct nlmsghdr
)) {
1975 nlmsg_len
= nlh
->nlmsg_len
;
1976 if (nlmsg_len
< sizeof(struct nlmsghdr
) ||
1981 switch (nlh
->nlmsg_type
) {
1983 tswap_nlmsghdr(nlh
);
1989 struct nlmsgerr
*e
= NLMSG_DATA(nlh
);
1990 e
->error
= tswap32(e
->error
);
1991 tswap_nlmsghdr(&e
->msg
);
1992 tswap_nlmsghdr(nlh
);
1996 ret
= host_to_target_nlmsg(nlh
);
1998 tswap_nlmsghdr(nlh
);
2003 tswap_nlmsghdr(nlh
);
2004 len
-= NLMSG_ALIGN(nlmsg_len
);
2005 nlh
= (struct nlmsghdr
*)(((char*)nlh
) + NLMSG_ALIGN(nlmsg_len
));
2010 static abi_long
target_to_host_for_each_nlmsg(struct nlmsghdr
*nlh
,
2012 abi_long (*target_to_host_nlmsg
)
2013 (struct nlmsghdr
*))
2017 while (len
> sizeof(struct nlmsghdr
)) {
2018 if (tswap32(nlh
->nlmsg_len
) < sizeof(struct nlmsghdr
) ||
2019 tswap32(nlh
->nlmsg_len
) > len
) {
2022 tswap_nlmsghdr(nlh
);
2023 switch (nlh
->nlmsg_type
) {
2030 struct nlmsgerr
*e
= NLMSG_DATA(nlh
);
2031 e
->error
= tswap32(e
->error
);
2032 tswap_nlmsghdr(&e
->msg
);
2036 ret
= target_to_host_nlmsg(nlh
);
2041 len
-= NLMSG_ALIGN(nlh
->nlmsg_len
);
2042 nlh
= (struct nlmsghdr
*)(((char *)nlh
) + NLMSG_ALIGN(nlh
->nlmsg_len
));
2047 #ifdef CONFIG_RTNETLINK
2048 static abi_long
host_to_target_for_each_nlattr(struct nlattr
*nlattr
,
2049 size_t len
, void *context
,
2050 abi_long (*host_to_target_nlattr
)
2054 unsigned short nla_len
;
2057 while (len
> sizeof(struct nlattr
)) {
2058 nla_len
= nlattr
->nla_len
;
2059 if (nla_len
< sizeof(struct nlattr
) ||
2063 ret
= host_to_target_nlattr(nlattr
, context
);
2064 nlattr
->nla_len
= tswap16(nlattr
->nla_len
);
2065 nlattr
->nla_type
= tswap16(nlattr
->nla_type
);
2069 len
-= NLA_ALIGN(nla_len
);
2070 nlattr
= (struct nlattr
*)(((char *)nlattr
) + NLA_ALIGN(nla_len
));
2075 static abi_long
host_to_target_for_each_rtattr(struct rtattr
*rtattr
,
2077 abi_long (*host_to_target_rtattr
)
2080 unsigned short rta_len
;
2083 while (len
> sizeof(struct rtattr
)) {
2084 rta_len
= rtattr
->rta_len
;
2085 if (rta_len
< sizeof(struct rtattr
) ||
2089 ret
= host_to_target_rtattr(rtattr
);
2090 rtattr
->rta_len
= tswap16(rtattr
->rta_len
);
2091 rtattr
->rta_type
= tswap16(rtattr
->rta_type
);
2095 len
-= RTA_ALIGN(rta_len
);
2096 rtattr
= (struct rtattr
*)(((char *)rtattr
) + RTA_ALIGN(rta_len
));
2101 #define NLA_DATA(nla) ((void *)((char *)(nla)) + NLA_HDRLEN)
2103 static abi_long
host_to_target_data_bridge_nlattr(struct nlattr
*nlattr
,
2110 switch (nlattr
->nla_type
) {
2112 case QEMU_IFLA_BR_FDB_FLUSH
:
2115 case QEMU_IFLA_BR_GROUP_ADDR
:
2118 case QEMU_IFLA_BR_VLAN_FILTERING
:
2119 case QEMU_IFLA_BR_TOPOLOGY_CHANGE
:
2120 case QEMU_IFLA_BR_TOPOLOGY_CHANGE_DETECTED
:
2121 case QEMU_IFLA_BR_MCAST_ROUTER
:
2122 case QEMU_IFLA_BR_MCAST_SNOOPING
:
2123 case QEMU_IFLA_BR_MCAST_QUERY_USE_IFADDR
:
2124 case QEMU_IFLA_BR_MCAST_QUERIER
:
2125 case QEMU_IFLA_BR_NF_CALL_IPTABLES
:
2126 case QEMU_IFLA_BR_NF_CALL_IP6TABLES
:
2127 case QEMU_IFLA_BR_NF_CALL_ARPTABLES
:
2130 case QEMU_IFLA_BR_PRIORITY
:
2131 case QEMU_IFLA_BR_VLAN_PROTOCOL
:
2132 case QEMU_IFLA_BR_GROUP_FWD_MASK
:
2133 case QEMU_IFLA_BR_ROOT_PORT
:
2134 case QEMU_IFLA_BR_VLAN_DEFAULT_PVID
:
2135 u16
= NLA_DATA(nlattr
);
2136 *u16
= tswap16(*u16
);
2139 case QEMU_IFLA_BR_FORWARD_DELAY
:
2140 case QEMU_IFLA_BR_HELLO_TIME
:
2141 case QEMU_IFLA_BR_MAX_AGE
:
2142 case QEMU_IFLA_BR_AGEING_TIME
:
2143 case QEMU_IFLA_BR_STP_STATE
:
2144 case QEMU_IFLA_BR_ROOT_PATH_COST
:
2145 case QEMU_IFLA_BR_MCAST_HASH_ELASTICITY
:
2146 case QEMU_IFLA_BR_MCAST_HASH_MAX
:
2147 case QEMU_IFLA_BR_MCAST_LAST_MEMBER_CNT
:
2148 case QEMU_IFLA_BR_MCAST_STARTUP_QUERY_CNT
:
2149 u32
= NLA_DATA(nlattr
);
2150 *u32
= tswap32(*u32
);
2153 case QEMU_IFLA_BR_HELLO_TIMER
:
2154 case QEMU_IFLA_BR_TCN_TIMER
:
2155 case QEMU_IFLA_BR_GC_TIMER
:
2156 case QEMU_IFLA_BR_TOPOLOGY_CHANGE_TIMER
:
2157 case QEMU_IFLA_BR_MCAST_LAST_MEMBER_INTVL
:
2158 case QEMU_IFLA_BR_MCAST_MEMBERSHIP_INTVL
:
2159 case QEMU_IFLA_BR_MCAST_QUERIER_INTVL
:
2160 case QEMU_IFLA_BR_MCAST_QUERY_INTVL
:
2161 case QEMU_IFLA_BR_MCAST_QUERY_RESPONSE_INTVL
:
2162 case QEMU_IFLA_BR_MCAST_STARTUP_QUERY_INTVL
:
2163 u64
= NLA_DATA(nlattr
);
2164 *u64
= tswap64(*u64
);
2166 /* ifla_bridge_id: uin8_t[] */
2167 case QEMU_IFLA_BR_ROOT_ID
:
2168 case QEMU_IFLA_BR_BRIDGE_ID
:
2171 gemu_log("Unknown QEMU_IFLA_BR type %d\n", nlattr
->nla_type
);
2177 static abi_long
host_to_target_slave_data_bridge_nlattr(struct nlattr
*nlattr
,
2184 switch (nlattr
->nla_type
) {
2186 case QEMU_IFLA_BRPORT_STATE
:
2187 case QEMU_IFLA_BRPORT_MODE
:
2188 case QEMU_IFLA_BRPORT_GUARD
:
2189 case QEMU_IFLA_BRPORT_PROTECT
:
2190 case QEMU_IFLA_BRPORT_FAST_LEAVE
:
2191 case QEMU_IFLA_BRPORT_LEARNING
:
2192 case QEMU_IFLA_BRPORT_UNICAST_FLOOD
:
2193 case QEMU_IFLA_BRPORT_PROXYARP
:
2194 case QEMU_IFLA_BRPORT_LEARNING_SYNC
:
2195 case QEMU_IFLA_BRPORT_PROXYARP_WIFI
:
2196 case QEMU_IFLA_BRPORT_TOPOLOGY_CHANGE_ACK
:
2197 case QEMU_IFLA_BRPORT_CONFIG_PENDING
:
2198 case QEMU_IFLA_BRPORT_MULTICAST_ROUTER
:
2201 case QEMU_IFLA_BRPORT_PRIORITY
:
2202 case QEMU_IFLA_BRPORT_DESIGNATED_PORT
:
2203 case QEMU_IFLA_BRPORT_DESIGNATED_COST
:
2204 case QEMU_IFLA_BRPORT_ID
:
2205 case QEMU_IFLA_BRPORT_NO
:
2206 u16
= NLA_DATA(nlattr
);
2207 *u16
= tswap16(*u16
);
2210 case QEMU_IFLA_BRPORT_COST
:
2211 u32
= NLA_DATA(nlattr
);
2212 *u32
= tswap32(*u32
);
2215 case QEMU_IFLA_BRPORT_MESSAGE_AGE_TIMER
:
2216 case QEMU_IFLA_BRPORT_FORWARD_DELAY_TIMER
:
2217 case QEMU_IFLA_BRPORT_HOLD_TIMER
:
2218 u64
= NLA_DATA(nlattr
);
2219 *u64
= tswap64(*u64
);
2221 /* ifla_bridge_id: uint8_t[] */
2222 case QEMU_IFLA_BRPORT_ROOT_ID
:
2223 case QEMU_IFLA_BRPORT_BRIDGE_ID
:
2226 gemu_log("Unknown QEMU_IFLA_BRPORT type %d\n", nlattr
->nla_type
);
2232 struct linkinfo_context
{
2239 static abi_long
host_to_target_data_linkinfo_nlattr(struct nlattr
*nlattr
,
2242 struct linkinfo_context
*li_context
= context
;
2244 switch (nlattr
->nla_type
) {
2246 case QEMU_IFLA_INFO_KIND
:
2247 li_context
->name
= NLA_DATA(nlattr
);
2248 li_context
->len
= nlattr
->nla_len
- NLA_HDRLEN
;
2250 case QEMU_IFLA_INFO_SLAVE_KIND
:
2251 li_context
->slave_name
= NLA_DATA(nlattr
);
2252 li_context
->slave_len
= nlattr
->nla_len
- NLA_HDRLEN
;
2255 case QEMU_IFLA_INFO_XSTATS
:
2256 /* FIXME: only used by CAN */
2259 case QEMU_IFLA_INFO_DATA
:
2260 if (strncmp(li_context
->name
, "bridge",
2261 li_context
->len
) == 0) {
2262 return host_to_target_for_each_nlattr(NLA_DATA(nlattr
),
2265 host_to_target_data_bridge_nlattr
);
2267 gemu_log("Unknown QEMU_IFLA_INFO_KIND %s\n", li_context
->name
);
2270 case QEMU_IFLA_INFO_SLAVE_DATA
:
2271 if (strncmp(li_context
->slave_name
, "bridge",
2272 li_context
->slave_len
) == 0) {
2273 return host_to_target_for_each_nlattr(NLA_DATA(nlattr
),
2276 host_to_target_slave_data_bridge_nlattr
);
2278 gemu_log("Unknown QEMU_IFLA_INFO_SLAVE_KIND %s\n",
2279 li_context
->slave_name
);
2283 gemu_log("Unknown host QEMU_IFLA_INFO type: %d\n", nlattr
->nla_type
);
2290 static abi_long
host_to_target_data_inet_nlattr(struct nlattr
*nlattr
,
2296 switch (nlattr
->nla_type
) {
2297 case QEMU_IFLA_INET_CONF
:
2298 u32
= NLA_DATA(nlattr
);
2299 for (i
= 0; i
< (nlattr
->nla_len
- NLA_HDRLEN
) / sizeof(*u32
);
2301 u32
[i
] = tswap32(u32
[i
]);
2305 gemu_log("Unknown host AF_INET type: %d\n", nlattr
->nla_type
);
2310 static abi_long
host_to_target_data_inet6_nlattr(struct nlattr
*nlattr
,
2315 struct ifla_cacheinfo
*ci
;
2318 switch (nlattr
->nla_type
) {
2320 case QEMU_IFLA_INET6_TOKEN
:
2323 case QEMU_IFLA_INET6_ADDR_GEN_MODE
:
2326 case QEMU_IFLA_INET6_FLAGS
:
2327 u32
= NLA_DATA(nlattr
);
2328 *u32
= tswap32(*u32
);
2331 case QEMU_IFLA_INET6_CONF
:
2332 u32
= NLA_DATA(nlattr
);
2333 for (i
= 0; i
< (nlattr
->nla_len
- NLA_HDRLEN
) / sizeof(*u32
);
2335 u32
[i
] = tswap32(u32
[i
]);
2338 /* ifla_cacheinfo */
2339 case QEMU_IFLA_INET6_CACHEINFO
:
2340 ci
= NLA_DATA(nlattr
);
2341 ci
->max_reasm_len
= tswap32(ci
->max_reasm_len
);
2342 ci
->tstamp
= tswap32(ci
->tstamp
);
2343 ci
->reachable_time
= tswap32(ci
->reachable_time
);
2344 ci
->retrans_time
= tswap32(ci
->retrans_time
);
2347 case QEMU_IFLA_INET6_STATS
:
2348 case QEMU_IFLA_INET6_ICMP6STATS
:
2349 u64
= NLA_DATA(nlattr
);
2350 for (i
= 0; i
< (nlattr
->nla_len
- NLA_HDRLEN
) / sizeof(*u64
);
2352 u64
[i
] = tswap64(u64
[i
]);
2356 gemu_log("Unknown host AF_INET6 type: %d\n", nlattr
->nla_type
);
2361 static abi_long
host_to_target_data_spec_nlattr(struct nlattr
*nlattr
,
2364 switch (nlattr
->nla_type
) {
2366 return host_to_target_for_each_nlattr(NLA_DATA(nlattr
), nlattr
->nla_len
,
2368 host_to_target_data_inet_nlattr
);
2370 return host_to_target_for_each_nlattr(NLA_DATA(nlattr
), nlattr
->nla_len
,
2372 host_to_target_data_inet6_nlattr
);
2374 gemu_log("Unknown host AF_SPEC type: %d\n", nlattr
->nla_type
);
2380 static abi_long
host_to_target_data_link_rtattr(struct rtattr
*rtattr
)
2383 struct rtnl_link_stats
*st
;
2384 struct rtnl_link_stats64
*st64
;
2385 struct rtnl_link_ifmap
*map
;
2386 struct linkinfo_context li_context
;
2388 switch (rtattr
->rta_type
) {
2390 case QEMU_IFLA_ADDRESS
:
2391 case QEMU_IFLA_BROADCAST
:
2393 case QEMU_IFLA_IFNAME
:
2394 case QEMU_IFLA_QDISC
:
2397 case QEMU_IFLA_OPERSTATE
:
2398 case QEMU_IFLA_LINKMODE
:
2399 case QEMU_IFLA_CARRIER
:
2400 case QEMU_IFLA_PROTO_DOWN
:
2404 case QEMU_IFLA_LINK
:
2405 case QEMU_IFLA_WEIGHT
:
2406 case QEMU_IFLA_TXQLEN
:
2407 case QEMU_IFLA_CARRIER_CHANGES
:
2408 case QEMU_IFLA_NUM_RX_QUEUES
:
2409 case QEMU_IFLA_NUM_TX_QUEUES
:
2410 case QEMU_IFLA_PROMISCUITY
:
2411 case QEMU_IFLA_EXT_MASK
:
2412 case QEMU_IFLA_LINK_NETNSID
:
2413 case QEMU_IFLA_GROUP
:
2414 case QEMU_IFLA_MASTER
:
2415 case QEMU_IFLA_NUM_VF
:
2416 case QEMU_IFLA_GSO_MAX_SEGS
:
2417 case QEMU_IFLA_GSO_MAX_SIZE
:
2418 u32
= RTA_DATA(rtattr
);
2419 *u32
= tswap32(*u32
);
2421 /* struct rtnl_link_stats */
2422 case QEMU_IFLA_STATS
:
2423 st
= RTA_DATA(rtattr
);
2424 st
->rx_packets
= tswap32(st
->rx_packets
);
2425 st
->tx_packets
= tswap32(st
->tx_packets
);
2426 st
->rx_bytes
= tswap32(st
->rx_bytes
);
2427 st
->tx_bytes
= tswap32(st
->tx_bytes
);
2428 st
->rx_errors
= tswap32(st
->rx_errors
);
2429 st
->tx_errors
= tswap32(st
->tx_errors
);
2430 st
->rx_dropped
= tswap32(st
->rx_dropped
);
2431 st
->tx_dropped
= tswap32(st
->tx_dropped
);
2432 st
->multicast
= tswap32(st
->multicast
);
2433 st
->collisions
= tswap32(st
->collisions
);
2435 /* detailed rx_errors: */
2436 st
->rx_length_errors
= tswap32(st
->rx_length_errors
);
2437 st
->rx_over_errors
= tswap32(st
->rx_over_errors
);
2438 st
->rx_crc_errors
= tswap32(st
->rx_crc_errors
);
2439 st
->rx_frame_errors
= tswap32(st
->rx_frame_errors
);
2440 st
->rx_fifo_errors
= tswap32(st
->rx_fifo_errors
);
2441 st
->rx_missed_errors
= tswap32(st
->rx_missed_errors
);
2443 /* detailed tx_errors */
2444 st
->tx_aborted_errors
= tswap32(st
->tx_aborted_errors
);
2445 st
->tx_carrier_errors
= tswap32(st
->tx_carrier_errors
);
2446 st
->tx_fifo_errors
= tswap32(st
->tx_fifo_errors
);
2447 st
->tx_heartbeat_errors
= tswap32(st
->tx_heartbeat_errors
);
2448 st
->tx_window_errors
= tswap32(st
->tx_window_errors
);
2451 st
->rx_compressed
= tswap32(st
->rx_compressed
);
2452 st
->tx_compressed
= tswap32(st
->tx_compressed
);
2454 /* struct rtnl_link_stats64 */
2455 case QEMU_IFLA_STATS64
:
2456 st64
= RTA_DATA(rtattr
);
2457 st64
->rx_packets
= tswap64(st64
->rx_packets
);
2458 st64
->tx_packets
= tswap64(st64
->tx_packets
);
2459 st64
->rx_bytes
= tswap64(st64
->rx_bytes
);
2460 st64
->tx_bytes
= tswap64(st64
->tx_bytes
);
2461 st64
->rx_errors
= tswap64(st64
->rx_errors
);
2462 st64
->tx_errors
= tswap64(st64
->tx_errors
);
2463 st64
->rx_dropped
= tswap64(st64
->rx_dropped
);
2464 st64
->tx_dropped
= tswap64(st64
->tx_dropped
);
2465 st64
->multicast
= tswap64(st64
->multicast
);
2466 st64
->collisions
= tswap64(st64
->collisions
);
2468 /* detailed rx_errors: */
2469 st64
->rx_length_errors
= tswap64(st64
->rx_length_errors
);
2470 st64
->rx_over_errors
= tswap64(st64
->rx_over_errors
);
2471 st64
->rx_crc_errors
= tswap64(st64
->rx_crc_errors
);
2472 st64
->rx_frame_errors
= tswap64(st64
->rx_frame_errors
);
2473 st64
->rx_fifo_errors
= tswap64(st64
->rx_fifo_errors
);
2474 st64
->rx_missed_errors
= tswap64(st64
->rx_missed_errors
);
2476 /* detailed tx_errors */
2477 st64
->tx_aborted_errors
= tswap64(st64
->tx_aborted_errors
);
2478 st64
->tx_carrier_errors
= tswap64(st64
->tx_carrier_errors
);
2479 st64
->tx_fifo_errors
= tswap64(st64
->tx_fifo_errors
);
2480 st64
->tx_heartbeat_errors
= tswap64(st64
->tx_heartbeat_errors
);
2481 st64
->tx_window_errors
= tswap64(st64
->tx_window_errors
);
2484 st64
->rx_compressed
= tswap64(st64
->rx_compressed
);
2485 st64
->tx_compressed
= tswap64(st64
->tx_compressed
);
2487 /* struct rtnl_link_ifmap */
2489 map
= RTA_DATA(rtattr
);
2490 map
->mem_start
= tswap64(map
->mem_start
);
2491 map
->mem_end
= tswap64(map
->mem_end
);
2492 map
->base_addr
= tswap64(map
->base_addr
);
2493 map
->irq
= tswap16(map
->irq
);
2496 case QEMU_IFLA_LINKINFO
:
2497 memset(&li_context
, 0, sizeof(li_context
));
2498 return host_to_target_for_each_nlattr(RTA_DATA(rtattr
), rtattr
->rta_len
,
2500 host_to_target_data_linkinfo_nlattr
);
2501 case QEMU_IFLA_AF_SPEC
:
2502 return host_to_target_for_each_nlattr(RTA_DATA(rtattr
), rtattr
->rta_len
,
2504 host_to_target_data_spec_nlattr
);
2506 gemu_log("Unknown host QEMU_IFLA type: %d\n", rtattr
->rta_type
);
2512 static abi_long
host_to_target_data_addr_rtattr(struct rtattr
*rtattr
)
2515 struct ifa_cacheinfo
*ci
;
2517 switch (rtattr
->rta_type
) {
2518 /* binary: depends on family type */
2528 u32
= RTA_DATA(rtattr
);
2529 *u32
= tswap32(*u32
);
2531 /* struct ifa_cacheinfo */
2533 ci
= RTA_DATA(rtattr
);
2534 ci
->ifa_prefered
= tswap32(ci
->ifa_prefered
);
2535 ci
->ifa_valid
= tswap32(ci
->ifa_valid
);
2536 ci
->cstamp
= tswap32(ci
->cstamp
);
2537 ci
->tstamp
= tswap32(ci
->tstamp
);
2540 gemu_log("Unknown host IFA type: %d\n", rtattr
->rta_type
);
2546 static abi_long
host_to_target_data_route_rtattr(struct rtattr
*rtattr
)
2549 switch (rtattr
->rta_type
) {
2550 /* binary: depends on family type */
2559 u32
= RTA_DATA(rtattr
);
2560 *u32
= tswap32(*u32
);
2563 gemu_log("Unknown host RTA type: %d\n", rtattr
->rta_type
);
2569 static abi_long
host_to_target_link_rtattr(struct rtattr
*rtattr
,
2570 uint32_t rtattr_len
)
2572 return host_to_target_for_each_rtattr(rtattr
, rtattr_len
,
2573 host_to_target_data_link_rtattr
);
2576 static abi_long
host_to_target_addr_rtattr(struct rtattr
*rtattr
,
2577 uint32_t rtattr_len
)
2579 return host_to_target_for_each_rtattr(rtattr
, rtattr_len
,
2580 host_to_target_data_addr_rtattr
);
2583 static abi_long
host_to_target_route_rtattr(struct rtattr
*rtattr
,
2584 uint32_t rtattr_len
)
2586 return host_to_target_for_each_rtattr(rtattr
, rtattr_len
,
2587 host_to_target_data_route_rtattr
);
2590 static abi_long
host_to_target_data_route(struct nlmsghdr
*nlh
)
2593 struct ifinfomsg
*ifi
;
2594 struct ifaddrmsg
*ifa
;
2597 nlmsg_len
= nlh
->nlmsg_len
;
2598 switch (nlh
->nlmsg_type
) {
2602 if (nlh
->nlmsg_len
>= NLMSG_LENGTH(sizeof(*ifi
))) {
2603 ifi
= NLMSG_DATA(nlh
);
2604 ifi
->ifi_type
= tswap16(ifi
->ifi_type
);
2605 ifi
->ifi_index
= tswap32(ifi
->ifi_index
);
2606 ifi
->ifi_flags
= tswap32(ifi
->ifi_flags
);
2607 ifi
->ifi_change
= tswap32(ifi
->ifi_change
);
2608 host_to_target_link_rtattr(IFLA_RTA(ifi
),
2609 nlmsg_len
- NLMSG_LENGTH(sizeof(*ifi
)));
2615 if (nlh
->nlmsg_len
>= NLMSG_LENGTH(sizeof(*ifa
))) {
2616 ifa
= NLMSG_DATA(nlh
);
2617 ifa
->ifa_index
= tswap32(ifa
->ifa_index
);
2618 host_to_target_addr_rtattr(IFA_RTA(ifa
),
2619 nlmsg_len
- NLMSG_LENGTH(sizeof(*ifa
)));
2625 if (nlh
->nlmsg_len
>= NLMSG_LENGTH(sizeof(*rtm
))) {
2626 rtm
= NLMSG_DATA(nlh
);
2627 rtm
->rtm_flags
= tswap32(rtm
->rtm_flags
);
2628 host_to_target_route_rtattr(RTM_RTA(rtm
),
2629 nlmsg_len
- NLMSG_LENGTH(sizeof(*rtm
)));
2633 return -TARGET_EINVAL
;
2638 static inline abi_long
host_to_target_nlmsg_route(struct nlmsghdr
*nlh
,
2641 return host_to_target_for_each_nlmsg(nlh
, len
, host_to_target_data_route
);
2644 static abi_long
target_to_host_for_each_rtattr(struct rtattr
*rtattr
,
2646 abi_long (*target_to_host_rtattr
)
2651 while (len
>= sizeof(struct rtattr
)) {
2652 if (tswap16(rtattr
->rta_len
) < sizeof(struct rtattr
) ||
2653 tswap16(rtattr
->rta_len
) > len
) {
2656 rtattr
->rta_len
= tswap16(rtattr
->rta_len
);
2657 rtattr
->rta_type
= tswap16(rtattr
->rta_type
);
2658 ret
= target_to_host_rtattr(rtattr
);
2662 len
-= RTA_ALIGN(rtattr
->rta_len
);
2663 rtattr
= (struct rtattr
*)(((char *)rtattr
) +
2664 RTA_ALIGN(rtattr
->rta_len
));
2669 static abi_long
target_to_host_data_link_rtattr(struct rtattr
*rtattr
)
2671 switch (rtattr
->rta_type
) {
2673 gemu_log("Unknown target QEMU_IFLA type: %d\n", rtattr
->rta_type
);
2679 static abi_long
target_to_host_data_addr_rtattr(struct rtattr
*rtattr
)
2681 switch (rtattr
->rta_type
) {
2682 /* binary: depends on family type */
2687 gemu_log("Unknown target IFA type: %d\n", rtattr
->rta_type
);
2693 static abi_long
target_to_host_data_route_rtattr(struct rtattr
*rtattr
)
2696 switch (rtattr
->rta_type
) {
2697 /* binary: depends on family type */
2705 u32
= RTA_DATA(rtattr
);
2706 *u32
= tswap32(*u32
);
2709 gemu_log("Unknown target RTA type: %d\n", rtattr
->rta_type
);
2715 static void target_to_host_link_rtattr(struct rtattr
*rtattr
,
2716 uint32_t rtattr_len
)
2718 target_to_host_for_each_rtattr(rtattr
, rtattr_len
,
2719 target_to_host_data_link_rtattr
);
2722 static void target_to_host_addr_rtattr(struct rtattr
*rtattr
,
2723 uint32_t rtattr_len
)
2725 target_to_host_for_each_rtattr(rtattr
, rtattr_len
,
2726 target_to_host_data_addr_rtattr
);
2729 static void target_to_host_route_rtattr(struct rtattr
*rtattr
,
2730 uint32_t rtattr_len
)
2732 target_to_host_for_each_rtattr(rtattr
, rtattr_len
,
2733 target_to_host_data_route_rtattr
);
2736 static abi_long
target_to_host_data_route(struct nlmsghdr
*nlh
)
2738 struct ifinfomsg
*ifi
;
2739 struct ifaddrmsg
*ifa
;
2742 switch (nlh
->nlmsg_type
) {
2747 if (nlh
->nlmsg_len
>= NLMSG_LENGTH(sizeof(*ifi
))) {
2748 ifi
= NLMSG_DATA(nlh
);
2749 ifi
->ifi_type
= tswap16(ifi
->ifi_type
);
2750 ifi
->ifi_index
= tswap32(ifi
->ifi_index
);
2751 ifi
->ifi_flags
= tswap32(ifi
->ifi_flags
);
2752 ifi
->ifi_change
= tswap32(ifi
->ifi_change
);
2753 target_to_host_link_rtattr(IFLA_RTA(ifi
), nlh
->nlmsg_len
-
2754 NLMSG_LENGTH(sizeof(*ifi
)));
2760 if (nlh
->nlmsg_len
>= NLMSG_LENGTH(sizeof(*ifa
))) {
2761 ifa
= NLMSG_DATA(nlh
);
2762 ifa
->ifa_index
= tswap32(ifa
->ifa_index
);
2763 target_to_host_addr_rtattr(IFA_RTA(ifa
), nlh
->nlmsg_len
-
2764 NLMSG_LENGTH(sizeof(*ifa
)));
2771 if (nlh
->nlmsg_len
>= NLMSG_LENGTH(sizeof(*rtm
))) {
2772 rtm
= NLMSG_DATA(nlh
);
2773 rtm
->rtm_flags
= tswap32(rtm
->rtm_flags
);
2774 target_to_host_route_rtattr(RTM_RTA(rtm
), nlh
->nlmsg_len
-
2775 NLMSG_LENGTH(sizeof(*rtm
)));
2779 return -TARGET_EOPNOTSUPP
;
2784 static abi_long
target_to_host_nlmsg_route(struct nlmsghdr
*nlh
, size_t len
)
2786 return target_to_host_for_each_nlmsg(nlh
, len
, target_to_host_data_route
);
2788 #endif /* CONFIG_RTNETLINK */
2790 static abi_long
host_to_target_data_audit(struct nlmsghdr
*nlh
)
2792 switch (nlh
->nlmsg_type
) {
2794 gemu_log("Unknown host audit message type %d\n",
2796 return -TARGET_EINVAL
;
2801 static inline abi_long
host_to_target_nlmsg_audit(struct nlmsghdr
*nlh
,
2804 return host_to_target_for_each_nlmsg(nlh
, len
, host_to_target_data_audit
);
2807 static abi_long
target_to_host_data_audit(struct nlmsghdr
*nlh
)
2809 switch (nlh
->nlmsg_type
) {
2811 case AUDIT_FIRST_USER_MSG
... AUDIT_LAST_USER_MSG
:
2812 case AUDIT_FIRST_USER_MSG2
... AUDIT_LAST_USER_MSG2
:
2815 gemu_log("Unknown target audit message type %d\n",
2817 return -TARGET_EINVAL
;
2823 static abi_long
target_to_host_nlmsg_audit(struct nlmsghdr
*nlh
, size_t len
)
2825 return target_to_host_for_each_nlmsg(nlh
, len
, target_to_host_data_audit
);
2828 /* do_setsockopt() Must return target values and target errnos. */
2829 static abi_long
do_setsockopt(int sockfd
, int level
, int optname
,
2830 abi_ulong optval_addr
, socklen_t optlen
)
2834 struct ip_mreqn
*ip_mreq
;
2835 struct ip_mreq_source
*ip_mreq_source
;
2839 /* TCP options all take an 'int' value. */
2840 if (optlen
< sizeof(uint32_t))
2841 return -TARGET_EINVAL
;
2843 if (get_user_u32(val
, optval_addr
))
2844 return -TARGET_EFAULT
;
2845 ret
= get_errno(setsockopt(sockfd
, level
, optname
, &val
, sizeof(val
)));
2852 case IP_ROUTER_ALERT
:
2856 case IP_MTU_DISCOVER
:
2863 case IP_MULTICAST_TTL
:
2864 case IP_MULTICAST_LOOP
:
2866 if (optlen
>= sizeof(uint32_t)) {
2867 if (get_user_u32(val
, optval_addr
))
2868 return -TARGET_EFAULT
;
2869 } else if (optlen
>= 1) {
2870 if (get_user_u8(val
, optval_addr
))
2871 return -TARGET_EFAULT
;
2873 ret
= get_errno(setsockopt(sockfd
, level
, optname
, &val
, sizeof(val
)));
2875 case IP_ADD_MEMBERSHIP
:
2876 case IP_DROP_MEMBERSHIP
:
2877 if (optlen
< sizeof (struct target_ip_mreq
) ||
2878 optlen
> sizeof (struct target_ip_mreqn
))
2879 return -TARGET_EINVAL
;
2881 ip_mreq
= (struct ip_mreqn
*) alloca(optlen
);
2882 target_to_host_ip_mreq(ip_mreq
, optval_addr
, optlen
);
2883 ret
= get_errno(setsockopt(sockfd
, level
, optname
, ip_mreq
, optlen
));
2886 case IP_BLOCK_SOURCE
:
2887 case IP_UNBLOCK_SOURCE
:
2888 case IP_ADD_SOURCE_MEMBERSHIP
:
2889 case IP_DROP_SOURCE_MEMBERSHIP
:
2890 if (optlen
!= sizeof (struct target_ip_mreq_source
))
2891 return -TARGET_EINVAL
;
2893 ip_mreq_source
= lock_user(VERIFY_READ
, optval_addr
, optlen
, 1);
2894 ret
= get_errno(setsockopt(sockfd
, level
, optname
, ip_mreq_source
, optlen
));
2895 unlock_user (ip_mreq_source
, optval_addr
, 0);
2904 case IPV6_MTU_DISCOVER
:
2907 case IPV6_RECVPKTINFO
:
2908 case IPV6_UNICAST_HOPS
:
2910 case IPV6_RECVHOPLIMIT
:
2911 case IPV6_2292HOPLIMIT
:
2914 if (optlen
< sizeof(uint32_t)) {
2915 return -TARGET_EINVAL
;
2917 if (get_user_u32(val
, optval_addr
)) {
2918 return -TARGET_EFAULT
;
2920 ret
= get_errno(setsockopt(sockfd
, level
, optname
,
2921 &val
, sizeof(val
)));
2925 struct in6_pktinfo pki
;
2927 if (optlen
< sizeof(pki
)) {
2928 return -TARGET_EINVAL
;
2931 if (copy_from_user(&pki
, optval_addr
, sizeof(pki
))) {
2932 return -TARGET_EFAULT
;
2935 pki
.ipi6_ifindex
= tswap32(pki
.ipi6_ifindex
);
2937 ret
= get_errno(setsockopt(sockfd
, level
, optname
,
2938 &pki
, sizeof(pki
)));
2949 struct icmp6_filter icmp6f
;
2951 if (optlen
> sizeof(icmp6f
)) {
2952 optlen
= sizeof(icmp6f
);
2955 if (copy_from_user(&icmp6f
, optval_addr
, optlen
)) {
2956 return -TARGET_EFAULT
;
2959 for (val
= 0; val
< 8; val
++) {
2960 icmp6f
.data
[val
] = tswap32(icmp6f
.data
[val
]);
2963 ret
= get_errno(setsockopt(sockfd
, level
, optname
,
2975 /* those take an u32 value */
2976 if (optlen
< sizeof(uint32_t)) {
2977 return -TARGET_EINVAL
;
2980 if (get_user_u32(val
, optval_addr
)) {
2981 return -TARGET_EFAULT
;
2983 ret
= get_errno(setsockopt(sockfd
, level
, optname
,
2984 &val
, sizeof(val
)));
2991 case TARGET_SOL_SOCKET
:
2993 case TARGET_SO_RCVTIMEO
:
2997 optname
= SO_RCVTIMEO
;
3000 if (optlen
!= sizeof(struct target_timeval
)) {
3001 return -TARGET_EINVAL
;
3004 if (copy_from_user_timeval(&tv
, optval_addr
)) {
3005 return -TARGET_EFAULT
;
3008 ret
= get_errno(setsockopt(sockfd
, SOL_SOCKET
, optname
,
3012 case TARGET_SO_SNDTIMEO
:
3013 optname
= SO_SNDTIMEO
;
3015 case TARGET_SO_ATTACH_FILTER
:
3017 struct target_sock_fprog
*tfprog
;
3018 struct target_sock_filter
*tfilter
;
3019 struct sock_fprog fprog
;
3020 struct sock_filter
*filter
;
3023 if (optlen
!= sizeof(*tfprog
)) {
3024 return -TARGET_EINVAL
;
3026 if (!lock_user_struct(VERIFY_READ
, tfprog
, optval_addr
, 0)) {
3027 return -TARGET_EFAULT
;
3029 if (!lock_user_struct(VERIFY_READ
, tfilter
,
3030 tswapal(tfprog
->filter
), 0)) {
3031 unlock_user_struct(tfprog
, optval_addr
, 1);
3032 return -TARGET_EFAULT
;
3035 fprog
.len
= tswap16(tfprog
->len
);
3036 filter
= g_try_new(struct sock_filter
, fprog
.len
);
3037 if (filter
== NULL
) {
3038 unlock_user_struct(tfilter
, tfprog
->filter
, 1);
3039 unlock_user_struct(tfprog
, optval_addr
, 1);
3040 return -TARGET_ENOMEM
;
3042 for (i
= 0; i
< fprog
.len
; i
++) {
3043 filter
[i
].code
= tswap16(tfilter
[i
].code
);
3044 filter
[i
].jt
= tfilter
[i
].jt
;
3045 filter
[i
].jf
= tfilter
[i
].jf
;
3046 filter
[i
].k
= tswap32(tfilter
[i
].k
);
3048 fprog
.filter
= filter
;
3050 ret
= get_errno(setsockopt(sockfd
, SOL_SOCKET
,
3051 SO_ATTACH_FILTER
, &fprog
, sizeof(fprog
)));
3054 unlock_user_struct(tfilter
, tfprog
->filter
, 1);
3055 unlock_user_struct(tfprog
, optval_addr
, 1);
3058 case TARGET_SO_BINDTODEVICE
:
3060 char *dev_ifname
, *addr_ifname
;
3062 if (optlen
> IFNAMSIZ
- 1) {
3063 optlen
= IFNAMSIZ
- 1;
3065 dev_ifname
= lock_user(VERIFY_READ
, optval_addr
, optlen
, 1);
3067 return -TARGET_EFAULT
;
3069 optname
= SO_BINDTODEVICE
;
3070 addr_ifname
= alloca(IFNAMSIZ
);
3071 memcpy(addr_ifname
, dev_ifname
, optlen
);
3072 addr_ifname
[optlen
] = 0;
3073 ret
= get_errno(setsockopt(sockfd
, SOL_SOCKET
, optname
,
3074 addr_ifname
, optlen
));
3075 unlock_user (dev_ifname
, optval_addr
, 0);
3078 /* Options with 'int' argument. */
3079 case TARGET_SO_DEBUG
:
3082 case TARGET_SO_REUSEADDR
:
3083 optname
= SO_REUSEADDR
;
3085 case TARGET_SO_TYPE
:
3088 case TARGET_SO_ERROR
:
3091 case TARGET_SO_DONTROUTE
:
3092 optname
= SO_DONTROUTE
;
3094 case TARGET_SO_BROADCAST
:
3095 optname
= SO_BROADCAST
;
3097 case TARGET_SO_SNDBUF
:
3098 optname
= SO_SNDBUF
;
3100 case TARGET_SO_SNDBUFFORCE
:
3101 optname
= SO_SNDBUFFORCE
;
3103 case TARGET_SO_RCVBUF
:
3104 optname
= SO_RCVBUF
;
3106 case TARGET_SO_RCVBUFFORCE
:
3107 optname
= SO_RCVBUFFORCE
;
3109 case TARGET_SO_KEEPALIVE
:
3110 optname
= SO_KEEPALIVE
;
3112 case TARGET_SO_OOBINLINE
:
3113 optname
= SO_OOBINLINE
;
3115 case TARGET_SO_NO_CHECK
:
3116 optname
= SO_NO_CHECK
;
3118 case TARGET_SO_PRIORITY
:
3119 optname
= SO_PRIORITY
;
3122 case TARGET_SO_BSDCOMPAT
:
3123 optname
= SO_BSDCOMPAT
;
3126 case TARGET_SO_PASSCRED
:
3127 optname
= SO_PASSCRED
;
3129 case TARGET_SO_PASSSEC
:
3130 optname
= SO_PASSSEC
;
3132 case TARGET_SO_TIMESTAMP
:
3133 optname
= SO_TIMESTAMP
;
3135 case TARGET_SO_RCVLOWAT
:
3136 optname
= SO_RCVLOWAT
;
3141 if (optlen
< sizeof(uint32_t))
3142 return -TARGET_EINVAL
;
3144 if (get_user_u32(val
, optval_addr
))
3145 return -TARGET_EFAULT
;
3146 ret
= get_errno(setsockopt(sockfd
, SOL_SOCKET
, optname
, &val
, sizeof(val
)));
3150 gemu_log("Unsupported setsockopt level=%d optname=%d\n", level
, optname
);
3151 ret
= -TARGET_ENOPROTOOPT
;
3156 /* do_getsockopt() Must return target values and target errnos. */
3157 static abi_long
do_getsockopt(int sockfd
, int level
, int optname
,
3158 abi_ulong optval_addr
, abi_ulong optlen
)
3165 case TARGET_SOL_SOCKET
:
3168 /* These don't just return a single integer */
3169 case TARGET_SO_LINGER
:
3170 case TARGET_SO_RCVTIMEO
:
3171 case TARGET_SO_SNDTIMEO
:
3172 case TARGET_SO_PEERNAME
:
3174 case TARGET_SO_PEERCRED
: {
3177 struct target_ucred
*tcr
;
3179 if (get_user_u32(len
, optlen
)) {
3180 return -TARGET_EFAULT
;
3183 return -TARGET_EINVAL
;
3187 ret
= get_errno(getsockopt(sockfd
, level
, SO_PEERCRED
,
3195 if (!lock_user_struct(VERIFY_WRITE
, tcr
, optval_addr
, 0)) {
3196 return -TARGET_EFAULT
;
3198 __put_user(cr
.pid
, &tcr
->pid
);
3199 __put_user(cr
.uid
, &tcr
->uid
);
3200 __put_user(cr
.gid
, &tcr
->gid
);
3201 unlock_user_struct(tcr
, optval_addr
, 1);
3202 if (put_user_u32(len
, optlen
)) {
3203 return -TARGET_EFAULT
;
3207 /* Options with 'int' argument. */
3208 case TARGET_SO_DEBUG
:
3211 case TARGET_SO_REUSEADDR
:
3212 optname
= SO_REUSEADDR
;
3214 case TARGET_SO_TYPE
:
3217 case TARGET_SO_ERROR
:
3220 case TARGET_SO_DONTROUTE
:
3221 optname
= SO_DONTROUTE
;
3223 case TARGET_SO_BROADCAST
:
3224 optname
= SO_BROADCAST
;
3226 case TARGET_SO_SNDBUF
:
3227 optname
= SO_SNDBUF
;
3229 case TARGET_SO_RCVBUF
:
3230 optname
= SO_RCVBUF
;
3232 case TARGET_SO_KEEPALIVE
:
3233 optname
= SO_KEEPALIVE
;
3235 case TARGET_SO_OOBINLINE
:
3236 optname
= SO_OOBINLINE
;
3238 case TARGET_SO_NO_CHECK
:
3239 optname
= SO_NO_CHECK
;
3241 case TARGET_SO_PRIORITY
:
3242 optname
= SO_PRIORITY
;
3245 case TARGET_SO_BSDCOMPAT
:
3246 optname
= SO_BSDCOMPAT
;
3249 case TARGET_SO_PASSCRED
:
3250 optname
= SO_PASSCRED
;
3252 case TARGET_SO_TIMESTAMP
:
3253 optname
= SO_TIMESTAMP
;
3255 case TARGET_SO_RCVLOWAT
:
3256 optname
= SO_RCVLOWAT
;
3258 case TARGET_SO_ACCEPTCONN
:
3259 optname
= SO_ACCEPTCONN
;
3266 /* TCP options all take an 'int' value. */
3268 if (get_user_u32(len
, optlen
))
3269 return -TARGET_EFAULT
;
3271 return -TARGET_EINVAL
;
3273 ret
= get_errno(getsockopt(sockfd
, level
, optname
, &val
, &lv
));
3276 if (optname
== SO_TYPE
) {
3277 val
= host_to_target_sock_type(val
);
3282 if (put_user_u32(val
, optval_addr
))
3283 return -TARGET_EFAULT
;
3285 if (put_user_u8(val
, optval_addr
))
3286 return -TARGET_EFAULT
;
3288 if (put_user_u32(len
, optlen
))
3289 return -TARGET_EFAULT
;
3296 case IP_ROUTER_ALERT
:
3300 case IP_MTU_DISCOVER
:
3306 case IP_MULTICAST_TTL
:
3307 case IP_MULTICAST_LOOP
:
3308 if (get_user_u32(len
, optlen
))
3309 return -TARGET_EFAULT
;
3311 return -TARGET_EINVAL
;
3313 ret
= get_errno(getsockopt(sockfd
, level
, optname
, &val
, &lv
));
3316 if (len
< sizeof(int) && len
> 0 && val
>= 0 && val
< 255) {
3318 if (put_user_u32(len
, optlen
)
3319 || put_user_u8(val
, optval_addr
))
3320 return -TARGET_EFAULT
;
3322 if (len
> sizeof(int))
3324 if (put_user_u32(len
, optlen
)
3325 || put_user_u32(val
, optval_addr
))
3326 return -TARGET_EFAULT
;
3330 ret
= -TARGET_ENOPROTOOPT
;
3336 gemu_log("getsockopt level=%d optname=%d not yet supported\n",
3338 ret
= -TARGET_EOPNOTSUPP
;
3344 static struct iovec
*lock_iovec(int type
, abi_ulong target_addr
,
3345 abi_ulong count
, int copy
)
3347 struct target_iovec
*target_vec
;
3349 abi_ulong total_len
, max_len
;
3352 bool bad_address
= false;
3358 if (count
> IOV_MAX
) {
3363 vec
= g_try_new0(struct iovec
, count
);
3369 target_vec
= lock_user(VERIFY_READ
, target_addr
,
3370 count
* sizeof(struct target_iovec
), 1);
3371 if (target_vec
== NULL
) {
3376 /* ??? If host page size > target page size, this will result in a
3377 value larger than what we can actually support. */
3378 max_len
= 0x7fffffff & TARGET_PAGE_MASK
;
3381 for (i
= 0; i
< count
; i
++) {
3382 abi_ulong base
= tswapal(target_vec
[i
].iov_base
);
3383 abi_long len
= tswapal(target_vec
[i
].iov_len
);
3388 } else if (len
== 0) {
3389 /* Zero length pointer is ignored. */
3390 vec
[i
].iov_base
= 0;
3392 vec
[i
].iov_base
= lock_user(type
, base
, len
, copy
);
3393 /* If the first buffer pointer is bad, this is a fault. But
3394 * subsequent bad buffers will result in a partial write; this
3395 * is realized by filling the vector with null pointers and
3397 if (!vec
[i
].iov_base
) {
3408 if (len
> max_len
- total_len
) {
3409 len
= max_len
- total_len
;
3412 vec
[i
].iov_len
= len
;
3416 unlock_user(target_vec
, target_addr
, 0);
3421 if (tswapal(target_vec
[i
].iov_len
) > 0) {
3422 unlock_user(vec
[i
].iov_base
, tswapal(target_vec
[i
].iov_base
), 0);
3425 unlock_user(target_vec
, target_addr
, 0);
3432 static void unlock_iovec(struct iovec
*vec
, abi_ulong target_addr
,
3433 abi_ulong count
, int copy
)
3435 struct target_iovec
*target_vec
;
3438 target_vec
= lock_user(VERIFY_READ
, target_addr
,
3439 count
* sizeof(struct target_iovec
), 1);
3441 for (i
= 0; i
< count
; i
++) {
3442 abi_ulong base
= tswapal(target_vec
[i
].iov_base
);
3443 abi_long len
= tswapal(target_vec
[i
].iov_len
);
3447 unlock_user(vec
[i
].iov_base
, base
, copy
? vec
[i
].iov_len
: 0);
3449 unlock_user(target_vec
, target_addr
, 0);
3455 static inline int target_to_host_sock_type(int *type
)
3458 int target_type
= *type
;
3460 switch (target_type
& TARGET_SOCK_TYPE_MASK
) {
3461 case TARGET_SOCK_DGRAM
:
3462 host_type
= SOCK_DGRAM
;
3464 case TARGET_SOCK_STREAM
:
3465 host_type
= SOCK_STREAM
;
3468 host_type
= target_type
& TARGET_SOCK_TYPE_MASK
;
3471 if (target_type
& TARGET_SOCK_CLOEXEC
) {
3472 #if defined(SOCK_CLOEXEC)
3473 host_type
|= SOCK_CLOEXEC
;
3475 return -TARGET_EINVAL
;
3478 if (target_type
& TARGET_SOCK_NONBLOCK
) {
3479 #if defined(SOCK_NONBLOCK)
3480 host_type
|= SOCK_NONBLOCK
;
3481 #elif !defined(O_NONBLOCK)
3482 return -TARGET_EINVAL
;
3489 /* Try to emulate socket type flags after socket creation. */
3490 static int sock_flags_fixup(int fd
, int target_type
)
3492 #if !defined(SOCK_NONBLOCK) && defined(O_NONBLOCK)
3493 if (target_type
& TARGET_SOCK_NONBLOCK
) {
3494 int flags
= fcntl(fd
, F_GETFL
);
3495 if (fcntl(fd
, F_SETFL
, O_NONBLOCK
| flags
) == -1) {
3497 return -TARGET_EINVAL
;
3504 static abi_long
packet_target_to_host_sockaddr(void *host_addr
,
3505 abi_ulong target_addr
,
3508 struct sockaddr
*addr
= host_addr
;
3509 struct target_sockaddr
*target_saddr
;
3511 target_saddr
= lock_user(VERIFY_READ
, target_addr
, len
, 1);
3512 if (!target_saddr
) {
3513 return -TARGET_EFAULT
;
3516 memcpy(addr
, target_saddr
, len
);
3517 addr
->sa_family
= tswap16(target_saddr
->sa_family
);
3518 /* spkt_protocol is big-endian */
3520 unlock_user(target_saddr
, target_addr
, 0);
3524 static TargetFdTrans target_packet_trans
= {
3525 .target_to_host_addr
= packet_target_to_host_sockaddr
,
3528 #ifdef CONFIG_RTNETLINK
3529 static abi_long
netlink_route_target_to_host(void *buf
, size_t len
)
3533 ret
= target_to_host_nlmsg_route(buf
, len
);
3541 static abi_long
netlink_route_host_to_target(void *buf
, size_t len
)
3545 ret
= host_to_target_nlmsg_route(buf
, len
);
3553 static TargetFdTrans target_netlink_route_trans
= {
3554 .target_to_host_data
= netlink_route_target_to_host
,
3555 .host_to_target_data
= netlink_route_host_to_target
,
3557 #endif /* CONFIG_RTNETLINK */
3559 static abi_long
netlink_audit_target_to_host(void *buf
, size_t len
)
3563 ret
= target_to_host_nlmsg_audit(buf
, len
);
3571 static abi_long
netlink_audit_host_to_target(void *buf
, size_t len
)
3575 ret
= host_to_target_nlmsg_audit(buf
, len
);
3583 static TargetFdTrans target_netlink_audit_trans
= {
3584 .target_to_host_data
= netlink_audit_target_to_host
,
3585 .host_to_target_data
= netlink_audit_host_to_target
,
3588 /* do_socket() Must return target values and target errnos. */
3589 static abi_long
do_socket(int domain
, int type
, int protocol
)
3591 int target_type
= type
;
3594 ret
= target_to_host_sock_type(&type
);
3599 if (domain
== PF_NETLINK
&& !(
3600 #ifdef CONFIG_RTNETLINK
3601 protocol
== NETLINK_ROUTE
||
3603 protocol
== NETLINK_KOBJECT_UEVENT
||
3604 protocol
== NETLINK_AUDIT
)) {
3605 return -EPFNOSUPPORT
;
3608 if (domain
== AF_PACKET
||
3609 (domain
== AF_INET
&& type
== SOCK_PACKET
)) {
3610 protocol
= tswap16(protocol
);
3613 ret
= get_errno(socket(domain
, type
, protocol
));
3615 ret
= sock_flags_fixup(ret
, target_type
);
3616 if (type
== SOCK_PACKET
) {
3617 /* Manage an obsolete case :
3618 * if socket type is SOCK_PACKET, bind by name
3620 fd_trans_register(ret
, &target_packet_trans
);
3621 } else if (domain
== PF_NETLINK
) {
3623 #ifdef CONFIG_RTNETLINK
3625 fd_trans_register(ret
, &target_netlink_route_trans
);
3628 case NETLINK_KOBJECT_UEVENT
:
3629 /* nothing to do: messages are strings */
3632 fd_trans_register(ret
, &target_netlink_audit_trans
);
3635 g_assert_not_reached();
3642 /* do_bind() Must return target values and target errnos. */
3643 static abi_long
do_bind(int sockfd
, abi_ulong target_addr
,
3649 if ((int)addrlen
< 0) {
3650 return -TARGET_EINVAL
;
3653 addr
= alloca(addrlen
+1);
3655 ret
= target_to_host_sockaddr(sockfd
, addr
, target_addr
, addrlen
);
3659 return get_errno(bind(sockfd
, addr
, addrlen
));
3662 /* do_connect() Must return target values and target errnos. */
3663 static abi_long
do_connect(int sockfd
, abi_ulong target_addr
,
3669 if ((int)addrlen
< 0) {
3670 return -TARGET_EINVAL
;
3673 addr
= alloca(addrlen
+1);
3675 ret
= target_to_host_sockaddr(sockfd
, addr
, target_addr
, addrlen
);
3679 return get_errno(safe_connect(sockfd
, addr
, addrlen
));
3682 /* do_sendrecvmsg_locked() Must return target values and target errnos. */
3683 static abi_long
do_sendrecvmsg_locked(int fd
, struct target_msghdr
*msgp
,
3684 int flags
, int send
)
3690 abi_ulong target_vec
;
3692 if (msgp
->msg_name
) {
3693 msg
.msg_namelen
= tswap32(msgp
->msg_namelen
);
3694 msg
.msg_name
= alloca(msg
.msg_namelen
+1);
3695 ret
= target_to_host_sockaddr(fd
, msg
.msg_name
,
3696 tswapal(msgp
->msg_name
),
3698 if (ret
== -TARGET_EFAULT
) {
3699 /* For connected sockets msg_name and msg_namelen must
3700 * be ignored, so returning EFAULT immediately is wrong.
3701 * Instead, pass a bad msg_name to the host kernel, and
3702 * let it decide whether to return EFAULT or not.
3704 msg
.msg_name
= (void *)-1;
3709 msg
.msg_name
= NULL
;
3710 msg
.msg_namelen
= 0;
3712 msg
.msg_controllen
= 2 * tswapal(msgp
->msg_controllen
);
3713 msg
.msg_control
= alloca(msg
.msg_controllen
);
3714 msg
.msg_flags
= tswap32(msgp
->msg_flags
);
3716 count
= tswapal(msgp
->msg_iovlen
);
3717 target_vec
= tswapal(msgp
->msg_iov
);
3719 if (count
> IOV_MAX
) {
3720 /* sendrcvmsg returns a different errno for this condition than
3721 * readv/writev, so we must catch it here before lock_iovec() does.
3723 ret
= -TARGET_EMSGSIZE
;
3727 vec
= lock_iovec(send
? VERIFY_READ
: VERIFY_WRITE
,
3728 target_vec
, count
, send
);
3730 ret
= -host_to_target_errno(errno
);
3733 msg
.msg_iovlen
= count
;
3737 if (fd_trans_target_to_host_data(fd
)) {
3740 host_msg
= g_malloc(msg
.msg_iov
->iov_len
);
3741 memcpy(host_msg
, msg
.msg_iov
->iov_base
, msg
.msg_iov
->iov_len
);
3742 ret
= fd_trans_target_to_host_data(fd
)(host_msg
,
3743 msg
.msg_iov
->iov_len
);
3745 msg
.msg_iov
->iov_base
= host_msg
;
3746 ret
= get_errno(safe_sendmsg(fd
, &msg
, flags
));
3750 ret
= target_to_host_cmsg(&msg
, msgp
);
3752 ret
= get_errno(safe_sendmsg(fd
, &msg
, flags
));
3756 ret
= get_errno(safe_recvmsg(fd
, &msg
, flags
));
3757 if (!is_error(ret
)) {
3759 if (fd_trans_host_to_target_data(fd
)) {
3760 ret
= fd_trans_host_to_target_data(fd
)(msg
.msg_iov
->iov_base
,
3763 ret
= host_to_target_cmsg(msgp
, &msg
);
3765 if (!is_error(ret
)) {
3766 msgp
->msg_namelen
= tswap32(msg
.msg_namelen
);
3767 if (msg
.msg_name
!= NULL
&& msg
.msg_name
!= (void *)-1) {
3768 ret
= host_to_target_sockaddr(tswapal(msgp
->msg_name
),
3769 msg
.msg_name
, msg
.msg_namelen
);
3781 unlock_iovec(vec
, target_vec
, count
, !send
);
3786 static abi_long
do_sendrecvmsg(int fd
, abi_ulong target_msg
,
3787 int flags
, int send
)
3790 struct target_msghdr
*msgp
;
3792 if (!lock_user_struct(send
? VERIFY_READ
: VERIFY_WRITE
,
3796 return -TARGET_EFAULT
;
3798 ret
= do_sendrecvmsg_locked(fd
, msgp
, flags
, send
);
3799 unlock_user_struct(msgp
, target_msg
, send
? 0 : 1);
3803 /* We don't rely on the C library to have sendmmsg/recvmmsg support,
3804 * so it might not have this *mmsg-specific flag either.
3806 #ifndef MSG_WAITFORONE
3807 #define MSG_WAITFORONE 0x10000
3810 static abi_long
do_sendrecvmmsg(int fd
, abi_ulong target_msgvec
,
3811 unsigned int vlen
, unsigned int flags
,
3814 struct target_mmsghdr
*mmsgp
;
3818 if (vlen
> UIO_MAXIOV
) {
3822 mmsgp
= lock_user(VERIFY_WRITE
, target_msgvec
, sizeof(*mmsgp
) * vlen
, 1);
3824 return -TARGET_EFAULT
;
3827 for (i
= 0; i
< vlen
; i
++) {
3828 ret
= do_sendrecvmsg_locked(fd
, &mmsgp
[i
].msg_hdr
, flags
, send
);
3829 if (is_error(ret
)) {
3832 mmsgp
[i
].msg_len
= tswap32(ret
);
3833 /* MSG_WAITFORONE turns on MSG_DONTWAIT after one packet */
3834 if (flags
& MSG_WAITFORONE
) {
3835 flags
|= MSG_DONTWAIT
;
3839 unlock_user(mmsgp
, target_msgvec
, sizeof(*mmsgp
) * i
);
3841 /* Return number of datagrams sent if we sent any at all;
3842 * otherwise return the error.
3850 /* do_accept4() Must return target values and target errnos. */
3851 static abi_long
do_accept4(int fd
, abi_ulong target_addr
,
3852 abi_ulong target_addrlen_addr
, int flags
)
3859 host_flags
= target_to_host_bitmask(flags
, fcntl_flags_tbl
);
3861 if (target_addr
== 0) {
3862 return get_errno(safe_accept4(fd
, NULL
, NULL
, host_flags
));
3865 /* linux returns EINVAL if addrlen pointer is invalid */
3866 if (get_user_u32(addrlen
, target_addrlen_addr
))
3867 return -TARGET_EINVAL
;
3869 if ((int)addrlen
< 0) {
3870 return -TARGET_EINVAL
;
3873 if (!access_ok(VERIFY_WRITE
, target_addr
, addrlen
))
3874 return -TARGET_EINVAL
;
3876 addr
= alloca(addrlen
);
3878 ret
= get_errno(safe_accept4(fd
, addr
, &addrlen
, host_flags
));
3879 if (!is_error(ret
)) {
3880 host_to_target_sockaddr(target_addr
, addr
, addrlen
);
3881 if (put_user_u32(addrlen
, target_addrlen_addr
))
3882 ret
= -TARGET_EFAULT
;
3887 /* do_getpeername() Must return target values and target errnos. */
3888 static abi_long
do_getpeername(int fd
, abi_ulong target_addr
,
3889 abi_ulong target_addrlen_addr
)
3895 if (get_user_u32(addrlen
, target_addrlen_addr
))
3896 return -TARGET_EFAULT
;
3898 if ((int)addrlen
< 0) {
3899 return -TARGET_EINVAL
;
3902 if (!access_ok(VERIFY_WRITE
, target_addr
, addrlen
))
3903 return -TARGET_EFAULT
;
3905 addr
= alloca(addrlen
);
3907 ret
= get_errno(getpeername(fd
, addr
, &addrlen
));
3908 if (!is_error(ret
)) {
3909 host_to_target_sockaddr(target_addr
, addr
, addrlen
);
3910 if (put_user_u32(addrlen
, target_addrlen_addr
))
3911 ret
= -TARGET_EFAULT
;
3916 /* do_getsockname() Must return target values and target errnos. */
3917 static abi_long
do_getsockname(int fd
, abi_ulong target_addr
,
3918 abi_ulong target_addrlen_addr
)
3924 if (get_user_u32(addrlen
, target_addrlen_addr
))
3925 return -TARGET_EFAULT
;
3927 if ((int)addrlen
< 0) {
3928 return -TARGET_EINVAL
;
3931 if (!access_ok(VERIFY_WRITE
, target_addr
, addrlen
))
3932 return -TARGET_EFAULT
;
3934 addr
= alloca(addrlen
);
3936 ret
= get_errno(getsockname(fd
, addr
, &addrlen
));
3937 if (!is_error(ret
)) {
3938 host_to_target_sockaddr(target_addr
, addr
, addrlen
);
3939 if (put_user_u32(addrlen
, target_addrlen_addr
))
3940 ret
= -TARGET_EFAULT
;
3945 /* do_socketpair() Must return target values and target errnos. */
3946 static abi_long
do_socketpair(int domain
, int type
, int protocol
,
3947 abi_ulong target_tab_addr
)
3952 target_to_host_sock_type(&type
);
3954 ret
= get_errno(socketpair(domain
, type
, protocol
, tab
));
3955 if (!is_error(ret
)) {
3956 if (put_user_s32(tab
[0], target_tab_addr
)
3957 || put_user_s32(tab
[1], target_tab_addr
+ sizeof(tab
[0])))
3958 ret
= -TARGET_EFAULT
;
3963 /* do_sendto() Must return target values and target errnos. */
3964 static abi_long
do_sendto(int fd
, abi_ulong msg
, size_t len
, int flags
,
3965 abi_ulong target_addr
, socklen_t addrlen
)
3969 void *copy_msg
= NULL
;
3972 if ((int)addrlen
< 0) {
3973 return -TARGET_EINVAL
;
3976 host_msg
= lock_user(VERIFY_READ
, msg
, len
, 1);
3978 return -TARGET_EFAULT
;
3979 if (fd_trans_target_to_host_data(fd
)) {
3980 copy_msg
= host_msg
;
3981 host_msg
= g_malloc(len
);
3982 memcpy(host_msg
, copy_msg
, len
);
3983 ret
= fd_trans_target_to_host_data(fd
)(host_msg
, len
);
3989 addr
= alloca(addrlen
+1);
3990 ret
= target_to_host_sockaddr(fd
, addr
, target_addr
, addrlen
);
3994 ret
= get_errno(safe_sendto(fd
, host_msg
, len
, flags
, addr
, addrlen
));
3996 ret
= get_errno(safe_sendto(fd
, host_msg
, len
, flags
, NULL
, 0));
4001 host_msg
= copy_msg
;
4003 unlock_user(host_msg
, msg
, 0);
4007 /* do_recvfrom() Must return target values and target errnos. */
4008 static abi_long
do_recvfrom(int fd
, abi_ulong msg
, size_t len
, int flags
,
4009 abi_ulong target_addr
,
4010 abi_ulong target_addrlen
)
4017 host_msg
= lock_user(VERIFY_WRITE
, msg
, len
, 0);
4019 return -TARGET_EFAULT
;
4021 if (get_user_u32(addrlen
, target_addrlen
)) {
4022 ret
= -TARGET_EFAULT
;
4025 if ((int)addrlen
< 0) {
4026 ret
= -TARGET_EINVAL
;
4029 addr
= alloca(addrlen
);
4030 ret
= get_errno(safe_recvfrom(fd
, host_msg
, len
, flags
,
4033 addr
= NULL
; /* To keep compiler quiet. */
4034 ret
= get_errno(safe_recvfrom(fd
, host_msg
, len
, flags
, NULL
, 0));
4036 if (!is_error(ret
)) {
4037 if (fd_trans_host_to_target_data(fd
)) {
4038 ret
= fd_trans_host_to_target_data(fd
)(host_msg
, ret
);
4041 host_to_target_sockaddr(target_addr
, addr
, addrlen
);
4042 if (put_user_u32(addrlen
, target_addrlen
)) {
4043 ret
= -TARGET_EFAULT
;
4047 unlock_user(host_msg
, msg
, len
);
4050 unlock_user(host_msg
, msg
, 0);
4055 #ifdef TARGET_NR_socketcall
4056 /* do_socketcall() must return target values and target errnos. */
4057 static abi_long
do_socketcall(int num
, abi_ulong vptr
)
4059 static const unsigned nargs
[] = { /* number of arguments per operation */
4060 [TARGET_SYS_SOCKET
] = 3, /* domain, type, protocol */
4061 [TARGET_SYS_BIND
] = 3, /* fd, addr, addrlen */
4062 [TARGET_SYS_CONNECT
] = 3, /* fd, addr, addrlen */
4063 [TARGET_SYS_LISTEN
] = 2, /* fd, backlog */
4064 [TARGET_SYS_ACCEPT
] = 3, /* fd, addr, addrlen */
4065 [TARGET_SYS_GETSOCKNAME
] = 3, /* fd, addr, addrlen */
4066 [TARGET_SYS_GETPEERNAME
] = 3, /* fd, addr, addrlen */
4067 [TARGET_SYS_SOCKETPAIR
] = 4, /* domain, type, protocol, tab */
4068 [TARGET_SYS_SEND
] = 4, /* fd, msg, len, flags */
4069 [TARGET_SYS_RECV
] = 4, /* fd, msg, len, flags */
4070 [TARGET_SYS_SENDTO
] = 6, /* fd, msg, len, flags, addr, addrlen */
4071 [TARGET_SYS_RECVFROM
] = 6, /* fd, msg, len, flags, addr, addrlen */
4072 [TARGET_SYS_SHUTDOWN
] = 2, /* fd, how */
4073 [TARGET_SYS_SETSOCKOPT
] = 5, /* fd, level, optname, optval, optlen */
4074 [TARGET_SYS_GETSOCKOPT
] = 5, /* fd, level, optname, optval, optlen */
4075 [TARGET_SYS_SENDMSG
] = 3, /* fd, msg, flags */
4076 [TARGET_SYS_RECVMSG
] = 3, /* fd, msg, flags */
4077 [TARGET_SYS_ACCEPT4
] = 4, /* fd, addr, addrlen, flags */
4078 [TARGET_SYS_RECVMMSG
] = 4, /* fd, msgvec, vlen, flags */
4079 [TARGET_SYS_SENDMMSG
] = 4, /* fd, msgvec, vlen, flags */
4081 abi_long a
[6]; /* max 6 args */
4084 /* check the range of the first argument num */
4085 /* (TARGET_SYS_SENDMMSG is the highest among TARGET_SYS_xxx) */
4086 if (num
< 1 || num
> TARGET_SYS_SENDMMSG
) {
4087 return -TARGET_EINVAL
;
4089 /* ensure we have space for args */
4090 if (nargs
[num
] > ARRAY_SIZE(a
)) {
4091 return -TARGET_EINVAL
;
4093 /* collect the arguments in a[] according to nargs[] */
4094 for (i
= 0; i
< nargs
[num
]; ++i
) {
4095 if (get_user_ual(a
[i
], vptr
+ i
* sizeof(abi_long
)) != 0) {
4096 return -TARGET_EFAULT
;
4099 /* now when we have the args, invoke the appropriate underlying function */
4101 case TARGET_SYS_SOCKET
: /* domain, type, protocol */
4102 return do_socket(a
[0], a
[1], a
[2]);
4103 case TARGET_SYS_BIND
: /* sockfd, addr, addrlen */
4104 return do_bind(a
[0], a
[1], a
[2]);
4105 case TARGET_SYS_CONNECT
: /* sockfd, addr, addrlen */
4106 return do_connect(a
[0], a
[1], a
[2]);
4107 case TARGET_SYS_LISTEN
: /* sockfd, backlog */
4108 return get_errno(listen(a
[0], a
[1]));
4109 case TARGET_SYS_ACCEPT
: /* sockfd, addr, addrlen */
4110 return do_accept4(a
[0], a
[1], a
[2], 0);
4111 case TARGET_SYS_GETSOCKNAME
: /* sockfd, addr, addrlen */
4112 return do_getsockname(a
[0], a
[1], a
[2]);
4113 case TARGET_SYS_GETPEERNAME
: /* sockfd, addr, addrlen */
4114 return do_getpeername(a
[0], a
[1], a
[2]);
4115 case TARGET_SYS_SOCKETPAIR
: /* domain, type, protocol, tab */
4116 return do_socketpair(a
[0], a
[1], a
[2], a
[3]);
4117 case TARGET_SYS_SEND
: /* sockfd, msg, len, flags */
4118 return do_sendto(a
[0], a
[1], a
[2], a
[3], 0, 0);
4119 case TARGET_SYS_RECV
: /* sockfd, msg, len, flags */
4120 return do_recvfrom(a
[0], a
[1], a
[2], a
[3], 0, 0);
4121 case TARGET_SYS_SENDTO
: /* sockfd, msg, len, flags, addr, addrlen */
4122 return do_sendto(a
[0], a
[1], a
[2], a
[3], a
[4], a
[5]);
4123 case TARGET_SYS_RECVFROM
: /* sockfd, msg, len, flags, addr, addrlen */
4124 return do_recvfrom(a
[0], a
[1], a
[2], a
[3], a
[4], a
[5]);
4125 case TARGET_SYS_SHUTDOWN
: /* sockfd, how */
4126 return get_errno(shutdown(a
[0], a
[1]));
4127 case TARGET_SYS_SETSOCKOPT
: /* sockfd, level, optname, optval, optlen */
4128 return do_setsockopt(a
[0], a
[1], a
[2], a
[3], a
[4]);
4129 case TARGET_SYS_GETSOCKOPT
: /* sockfd, level, optname, optval, optlen */
4130 return do_getsockopt(a
[0], a
[1], a
[2], a
[3], a
[4]);
4131 case TARGET_SYS_SENDMSG
: /* sockfd, msg, flags */
4132 return do_sendrecvmsg(a
[0], a
[1], a
[2], 1);
4133 case TARGET_SYS_RECVMSG
: /* sockfd, msg, flags */
4134 return do_sendrecvmsg(a
[0], a
[1], a
[2], 0);
4135 case TARGET_SYS_ACCEPT4
: /* sockfd, addr, addrlen, flags */
4136 return do_accept4(a
[0], a
[1], a
[2], a
[3]);
4137 case TARGET_SYS_RECVMMSG
: /* sockfd, msgvec, vlen, flags */
4138 return do_sendrecvmmsg(a
[0], a
[1], a
[2], a
[3], 0);
4139 case TARGET_SYS_SENDMMSG
: /* sockfd, msgvec, vlen, flags */
4140 return do_sendrecvmmsg(a
[0], a
[1], a
[2], a
[3], 1);
4142 gemu_log("Unsupported socketcall: %d\n", num
);
4143 return -TARGET_EINVAL
;
4148 #define N_SHM_REGIONS 32
4150 static struct shm_region
{
4154 } shm_regions
[N_SHM_REGIONS
];
4156 #ifndef TARGET_SEMID64_DS
4157 /* asm-generic version of this struct */
4158 struct target_semid64_ds
4160 struct target_ipc_perm sem_perm
;
4161 abi_ulong sem_otime
;
4162 #if TARGET_ABI_BITS == 32
4163 abi_ulong __unused1
;
4165 abi_ulong sem_ctime
;
4166 #if TARGET_ABI_BITS == 32
4167 abi_ulong __unused2
;
4169 abi_ulong sem_nsems
;
4170 abi_ulong __unused3
;
4171 abi_ulong __unused4
;
4175 static inline abi_long
target_to_host_ipc_perm(struct ipc_perm
*host_ip
,
4176 abi_ulong target_addr
)
4178 struct target_ipc_perm
*target_ip
;
4179 struct target_semid64_ds
*target_sd
;
4181 if (!lock_user_struct(VERIFY_READ
, target_sd
, target_addr
, 1))
4182 return -TARGET_EFAULT
;
4183 target_ip
= &(target_sd
->sem_perm
);
4184 host_ip
->__key
= tswap32(target_ip
->__key
);
4185 host_ip
->uid
= tswap32(target_ip
->uid
);
4186 host_ip
->gid
= tswap32(target_ip
->gid
);
4187 host_ip
->cuid
= tswap32(target_ip
->cuid
);
4188 host_ip
->cgid
= tswap32(target_ip
->cgid
);
4189 #if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_PPC)
4190 host_ip
->mode
= tswap32(target_ip
->mode
);
4192 host_ip
->mode
= tswap16(target_ip
->mode
);
4194 #if defined(TARGET_PPC)
4195 host_ip
->__seq
= tswap32(target_ip
->__seq
);
4197 host_ip
->__seq
= tswap16(target_ip
->__seq
);
4199 unlock_user_struct(target_sd
, target_addr
, 0);
4203 static inline abi_long
host_to_target_ipc_perm(abi_ulong target_addr
,
4204 struct ipc_perm
*host_ip
)
4206 struct target_ipc_perm
*target_ip
;
4207 struct target_semid64_ds
*target_sd
;
4209 if (!lock_user_struct(VERIFY_WRITE
, target_sd
, target_addr
, 0))
4210 return -TARGET_EFAULT
;
4211 target_ip
= &(target_sd
->sem_perm
);
4212 target_ip
->__key
= tswap32(host_ip
->__key
);
4213 target_ip
->uid
= tswap32(host_ip
->uid
);
4214 target_ip
->gid
= tswap32(host_ip
->gid
);
4215 target_ip
->cuid
= tswap32(host_ip
->cuid
);
4216 target_ip
->cgid
= tswap32(host_ip
->cgid
);
4217 #if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_PPC)
4218 target_ip
->mode
= tswap32(host_ip
->mode
);
4220 target_ip
->mode
= tswap16(host_ip
->mode
);
4222 #if defined(TARGET_PPC)
4223 target_ip
->__seq
= tswap32(host_ip
->__seq
);
4225 target_ip
->__seq
= tswap16(host_ip
->__seq
);
4227 unlock_user_struct(target_sd
, target_addr
, 1);
4231 static inline abi_long
target_to_host_semid_ds(struct semid_ds
*host_sd
,
4232 abi_ulong target_addr
)
4234 struct target_semid64_ds
*target_sd
;
4236 if (!lock_user_struct(VERIFY_READ
, target_sd
, target_addr
, 1))
4237 return -TARGET_EFAULT
;
4238 if (target_to_host_ipc_perm(&(host_sd
->sem_perm
),target_addr
))
4239 return -TARGET_EFAULT
;
4240 host_sd
->sem_nsems
= tswapal(target_sd
->sem_nsems
);
4241 host_sd
->sem_otime
= tswapal(target_sd
->sem_otime
);
4242 host_sd
->sem_ctime
= tswapal(target_sd
->sem_ctime
);
4243 unlock_user_struct(target_sd
, target_addr
, 0);
4247 static inline abi_long
host_to_target_semid_ds(abi_ulong target_addr
,
4248 struct semid_ds
*host_sd
)
4250 struct target_semid64_ds
*target_sd
;
4252 if (!lock_user_struct(VERIFY_WRITE
, target_sd
, target_addr
, 0))
4253 return -TARGET_EFAULT
;
4254 if (host_to_target_ipc_perm(target_addr
,&(host_sd
->sem_perm
)))
4255 return -TARGET_EFAULT
;
4256 target_sd
->sem_nsems
= tswapal(host_sd
->sem_nsems
);
4257 target_sd
->sem_otime
= tswapal(host_sd
->sem_otime
);
4258 target_sd
->sem_ctime
= tswapal(host_sd
->sem_ctime
);
4259 unlock_user_struct(target_sd
, target_addr
, 1);
4263 struct target_seminfo
{
4276 static inline abi_long
host_to_target_seminfo(abi_ulong target_addr
,
4277 struct seminfo
*host_seminfo
)
4279 struct target_seminfo
*target_seminfo
;
4280 if (!lock_user_struct(VERIFY_WRITE
, target_seminfo
, target_addr
, 0))
4281 return -TARGET_EFAULT
;
4282 __put_user(host_seminfo
->semmap
, &target_seminfo
->semmap
);
4283 __put_user(host_seminfo
->semmni
, &target_seminfo
->semmni
);
4284 __put_user(host_seminfo
->semmns
, &target_seminfo
->semmns
);
4285 __put_user(host_seminfo
->semmnu
, &target_seminfo
->semmnu
);
4286 __put_user(host_seminfo
->semmsl
, &target_seminfo
->semmsl
);
4287 __put_user(host_seminfo
->semopm
, &target_seminfo
->semopm
);
4288 __put_user(host_seminfo
->semume
, &target_seminfo
->semume
);
4289 __put_user(host_seminfo
->semusz
, &target_seminfo
->semusz
);
4290 __put_user(host_seminfo
->semvmx
, &target_seminfo
->semvmx
);
4291 __put_user(host_seminfo
->semaem
, &target_seminfo
->semaem
);
4292 unlock_user_struct(target_seminfo
, target_addr
, 1);
4298 struct semid_ds
*buf
;
4299 unsigned short *array
;
4300 struct seminfo
*__buf
;
4303 union target_semun
{
4310 static inline abi_long
target_to_host_semarray(int semid
, unsigned short **host_array
,
4311 abi_ulong target_addr
)
4314 unsigned short *array
;
4316 struct semid_ds semid_ds
;
4319 semun
.buf
= &semid_ds
;
4321 ret
= semctl(semid
, 0, IPC_STAT
, semun
);
4323 return get_errno(ret
);
4325 nsems
= semid_ds
.sem_nsems
;
4327 *host_array
= g_try_new(unsigned short, nsems
);
4329 return -TARGET_ENOMEM
;
4331 array
= lock_user(VERIFY_READ
, target_addr
,
4332 nsems
*sizeof(unsigned short), 1);
4334 g_free(*host_array
);
4335 return -TARGET_EFAULT
;
4338 for(i
=0; i
<nsems
; i
++) {
4339 __get_user((*host_array
)[i
], &array
[i
]);
4341 unlock_user(array
, target_addr
, 0);
4346 static inline abi_long
host_to_target_semarray(int semid
, abi_ulong target_addr
,
4347 unsigned short **host_array
)
4350 unsigned short *array
;
4352 struct semid_ds semid_ds
;
4355 semun
.buf
= &semid_ds
;
4357 ret
= semctl(semid
, 0, IPC_STAT
, semun
);
4359 return get_errno(ret
);
4361 nsems
= semid_ds
.sem_nsems
;
4363 array
= lock_user(VERIFY_WRITE
, target_addr
,
4364 nsems
*sizeof(unsigned short), 0);
4366 return -TARGET_EFAULT
;
4368 for(i
=0; i
<nsems
; i
++) {
4369 __put_user((*host_array
)[i
], &array
[i
]);
4371 g_free(*host_array
);
4372 unlock_user(array
, target_addr
, 1);
4377 static inline abi_long
do_semctl(int semid
, int semnum
, int cmd
,
4378 abi_ulong target_arg
)
4380 union target_semun target_su
= { .buf
= target_arg
};
4382 struct semid_ds dsarg
;
4383 unsigned short *array
= NULL
;
4384 struct seminfo seminfo
;
4385 abi_long ret
= -TARGET_EINVAL
;
4392 /* In 64 bit cross-endian situations, we will erroneously pick up
4393 * the wrong half of the union for the "val" element. To rectify
4394 * this, the entire 8-byte structure is byteswapped, followed by
4395 * a swap of the 4 byte val field. In other cases, the data is
4396 * already in proper host byte order. */
4397 if (sizeof(target_su
.val
) != (sizeof(target_su
.buf
))) {
4398 target_su
.buf
= tswapal(target_su
.buf
);
4399 arg
.val
= tswap32(target_su
.val
);
4401 arg
.val
= target_su
.val
;
4403 ret
= get_errno(semctl(semid
, semnum
, cmd
, arg
));
4407 err
= target_to_host_semarray(semid
, &array
, target_su
.array
);
4411 ret
= get_errno(semctl(semid
, semnum
, cmd
, arg
));
4412 err
= host_to_target_semarray(semid
, target_su
.array
, &array
);
4419 err
= target_to_host_semid_ds(&dsarg
, target_su
.buf
);
4423 ret
= get_errno(semctl(semid
, semnum
, cmd
, arg
));
4424 err
= host_to_target_semid_ds(target_su
.buf
, &dsarg
);
4430 arg
.__buf
= &seminfo
;
4431 ret
= get_errno(semctl(semid
, semnum
, cmd
, arg
));
4432 err
= host_to_target_seminfo(target_su
.__buf
, &seminfo
);
4440 ret
= get_errno(semctl(semid
, semnum
, cmd
, NULL
));
4447 struct target_sembuf
{
4448 unsigned short sem_num
;
4453 static inline abi_long
target_to_host_sembuf(struct sembuf
*host_sembuf
,
4454 abi_ulong target_addr
,
4457 struct target_sembuf
*target_sembuf
;
4460 target_sembuf
= lock_user(VERIFY_READ
, target_addr
,
4461 nsops
*sizeof(struct target_sembuf
), 1);
4463 return -TARGET_EFAULT
;
4465 for(i
=0; i
<nsops
; i
++) {
4466 __get_user(host_sembuf
[i
].sem_num
, &target_sembuf
[i
].sem_num
);
4467 __get_user(host_sembuf
[i
].sem_op
, &target_sembuf
[i
].sem_op
);
4468 __get_user(host_sembuf
[i
].sem_flg
, &target_sembuf
[i
].sem_flg
);
4471 unlock_user(target_sembuf
, target_addr
, 0);
4476 static inline abi_long
do_semop(int semid
, abi_long ptr
, unsigned nsops
)
4478 struct sembuf sops
[nsops
];
4480 if (target_to_host_sembuf(sops
, ptr
, nsops
))
4481 return -TARGET_EFAULT
;
4483 return get_errno(safe_semtimedop(semid
, sops
, nsops
, NULL
));
4486 struct target_msqid_ds
4488 struct target_ipc_perm msg_perm
;
4489 abi_ulong msg_stime
;
4490 #if TARGET_ABI_BITS == 32
4491 abi_ulong __unused1
;
4493 abi_ulong msg_rtime
;
4494 #if TARGET_ABI_BITS == 32
4495 abi_ulong __unused2
;
4497 abi_ulong msg_ctime
;
4498 #if TARGET_ABI_BITS == 32
4499 abi_ulong __unused3
;
4501 abi_ulong __msg_cbytes
;
4503 abi_ulong msg_qbytes
;
4504 abi_ulong msg_lspid
;
4505 abi_ulong msg_lrpid
;
4506 abi_ulong __unused4
;
4507 abi_ulong __unused5
;
4510 static inline abi_long
target_to_host_msqid_ds(struct msqid_ds
*host_md
,
4511 abi_ulong target_addr
)
4513 struct target_msqid_ds
*target_md
;
4515 if (!lock_user_struct(VERIFY_READ
, target_md
, target_addr
, 1))
4516 return -TARGET_EFAULT
;
4517 if (target_to_host_ipc_perm(&(host_md
->msg_perm
),target_addr
))
4518 return -TARGET_EFAULT
;
4519 host_md
->msg_stime
= tswapal(target_md
->msg_stime
);
4520 host_md
->msg_rtime
= tswapal(target_md
->msg_rtime
);
4521 host_md
->msg_ctime
= tswapal(target_md
->msg_ctime
);
4522 host_md
->__msg_cbytes
= tswapal(target_md
->__msg_cbytes
);
4523 host_md
->msg_qnum
= tswapal(target_md
->msg_qnum
);
4524 host_md
->msg_qbytes
= tswapal(target_md
->msg_qbytes
);
4525 host_md
->msg_lspid
= tswapal(target_md
->msg_lspid
);
4526 host_md
->msg_lrpid
= tswapal(target_md
->msg_lrpid
);
4527 unlock_user_struct(target_md
, target_addr
, 0);
4531 static inline abi_long
host_to_target_msqid_ds(abi_ulong target_addr
,
4532 struct msqid_ds
*host_md
)
4534 struct target_msqid_ds
*target_md
;
4536 if (!lock_user_struct(VERIFY_WRITE
, target_md
, target_addr
, 0))
4537 return -TARGET_EFAULT
;
4538 if (host_to_target_ipc_perm(target_addr
,&(host_md
->msg_perm
)))
4539 return -TARGET_EFAULT
;
4540 target_md
->msg_stime
= tswapal(host_md
->msg_stime
);
4541 target_md
->msg_rtime
= tswapal(host_md
->msg_rtime
);
4542 target_md
->msg_ctime
= tswapal(host_md
->msg_ctime
);
4543 target_md
->__msg_cbytes
= tswapal(host_md
->__msg_cbytes
);
4544 target_md
->msg_qnum
= tswapal(host_md
->msg_qnum
);
4545 target_md
->msg_qbytes
= tswapal(host_md
->msg_qbytes
);
4546 target_md
->msg_lspid
= tswapal(host_md
->msg_lspid
);
4547 target_md
->msg_lrpid
= tswapal(host_md
->msg_lrpid
);
4548 unlock_user_struct(target_md
, target_addr
, 1);
4552 struct target_msginfo
{
4560 unsigned short int msgseg
;
4563 static inline abi_long
host_to_target_msginfo(abi_ulong target_addr
,
4564 struct msginfo
*host_msginfo
)
4566 struct target_msginfo
*target_msginfo
;
4567 if (!lock_user_struct(VERIFY_WRITE
, target_msginfo
, target_addr
, 0))
4568 return -TARGET_EFAULT
;
4569 __put_user(host_msginfo
->msgpool
, &target_msginfo
->msgpool
);
4570 __put_user(host_msginfo
->msgmap
, &target_msginfo
->msgmap
);
4571 __put_user(host_msginfo
->msgmax
, &target_msginfo
->msgmax
);
4572 __put_user(host_msginfo
->msgmnb
, &target_msginfo
->msgmnb
);
4573 __put_user(host_msginfo
->msgmni
, &target_msginfo
->msgmni
);
4574 __put_user(host_msginfo
->msgssz
, &target_msginfo
->msgssz
);
4575 __put_user(host_msginfo
->msgtql
, &target_msginfo
->msgtql
);
4576 __put_user(host_msginfo
->msgseg
, &target_msginfo
->msgseg
);
4577 unlock_user_struct(target_msginfo
, target_addr
, 1);
4581 static inline abi_long
do_msgctl(int msgid
, int cmd
, abi_long ptr
)
4583 struct msqid_ds dsarg
;
4584 struct msginfo msginfo
;
4585 abi_long ret
= -TARGET_EINVAL
;
4593 if (target_to_host_msqid_ds(&dsarg
,ptr
))
4594 return -TARGET_EFAULT
;
4595 ret
= get_errno(msgctl(msgid
, cmd
, &dsarg
));
4596 if (host_to_target_msqid_ds(ptr
,&dsarg
))
4597 return -TARGET_EFAULT
;
4600 ret
= get_errno(msgctl(msgid
, cmd
, NULL
));
4604 ret
= get_errno(msgctl(msgid
, cmd
, (struct msqid_ds
*)&msginfo
));
4605 if (host_to_target_msginfo(ptr
, &msginfo
))
4606 return -TARGET_EFAULT
;
4613 struct target_msgbuf
{
4618 static inline abi_long
do_msgsnd(int msqid
, abi_long msgp
,
4619 ssize_t msgsz
, int msgflg
)
4621 struct target_msgbuf
*target_mb
;
4622 struct msgbuf
*host_mb
;
4626 return -TARGET_EINVAL
;
4629 if (!lock_user_struct(VERIFY_READ
, target_mb
, msgp
, 0))
4630 return -TARGET_EFAULT
;
4631 host_mb
= g_try_malloc(msgsz
+ sizeof(long));
4633 unlock_user_struct(target_mb
, msgp
, 0);
4634 return -TARGET_ENOMEM
;
4636 host_mb
->mtype
= (abi_long
) tswapal(target_mb
->mtype
);
4637 memcpy(host_mb
->mtext
, target_mb
->mtext
, msgsz
);
4638 ret
= get_errno(safe_msgsnd(msqid
, host_mb
, msgsz
, msgflg
));
4640 unlock_user_struct(target_mb
, msgp
, 0);
4645 static inline abi_long
do_msgrcv(int msqid
, abi_long msgp
,
4646 ssize_t msgsz
, abi_long msgtyp
,
4649 struct target_msgbuf
*target_mb
;
4651 struct msgbuf
*host_mb
;
4655 return -TARGET_EINVAL
;
4658 if (!lock_user_struct(VERIFY_WRITE
, target_mb
, msgp
, 0))
4659 return -TARGET_EFAULT
;
4661 host_mb
= g_try_malloc(msgsz
+ sizeof(long));
4663 ret
= -TARGET_ENOMEM
;
4666 ret
= get_errno(safe_msgrcv(msqid
, host_mb
, msgsz
, msgtyp
, msgflg
));
4669 abi_ulong target_mtext_addr
= msgp
+ sizeof(abi_ulong
);
4670 target_mtext
= lock_user(VERIFY_WRITE
, target_mtext_addr
, ret
, 0);
4671 if (!target_mtext
) {
4672 ret
= -TARGET_EFAULT
;
4675 memcpy(target_mb
->mtext
, host_mb
->mtext
, ret
);
4676 unlock_user(target_mtext
, target_mtext_addr
, ret
);
4679 target_mb
->mtype
= tswapal(host_mb
->mtype
);
4683 unlock_user_struct(target_mb
, msgp
, 1);
4688 static inline abi_long
target_to_host_shmid_ds(struct shmid_ds
*host_sd
,
4689 abi_ulong target_addr
)
4691 struct target_shmid_ds
*target_sd
;
4693 if (!lock_user_struct(VERIFY_READ
, target_sd
, target_addr
, 1))
4694 return -TARGET_EFAULT
;
4695 if (target_to_host_ipc_perm(&(host_sd
->shm_perm
), target_addr
))
4696 return -TARGET_EFAULT
;
4697 __get_user(host_sd
->shm_segsz
, &target_sd
->shm_segsz
);
4698 __get_user(host_sd
->shm_atime
, &target_sd
->shm_atime
);
4699 __get_user(host_sd
->shm_dtime
, &target_sd
->shm_dtime
);
4700 __get_user(host_sd
->shm_ctime
, &target_sd
->shm_ctime
);
4701 __get_user(host_sd
->shm_cpid
, &target_sd
->shm_cpid
);
4702 __get_user(host_sd
->shm_lpid
, &target_sd
->shm_lpid
);
4703 __get_user(host_sd
->shm_nattch
, &target_sd
->shm_nattch
);
4704 unlock_user_struct(target_sd
, target_addr
, 0);
4708 static inline abi_long
host_to_target_shmid_ds(abi_ulong target_addr
,
4709 struct shmid_ds
*host_sd
)
4711 struct target_shmid_ds
*target_sd
;
4713 if (!lock_user_struct(VERIFY_WRITE
, target_sd
, target_addr
, 0))
4714 return -TARGET_EFAULT
;
4715 if (host_to_target_ipc_perm(target_addr
, &(host_sd
->shm_perm
)))
4716 return -TARGET_EFAULT
;
4717 __put_user(host_sd
->shm_segsz
, &target_sd
->shm_segsz
);
4718 __put_user(host_sd
->shm_atime
, &target_sd
->shm_atime
);
4719 __put_user(host_sd
->shm_dtime
, &target_sd
->shm_dtime
);
4720 __put_user(host_sd
->shm_ctime
, &target_sd
->shm_ctime
);
4721 __put_user(host_sd
->shm_cpid
, &target_sd
->shm_cpid
);
4722 __put_user(host_sd
->shm_lpid
, &target_sd
->shm_lpid
);
4723 __put_user(host_sd
->shm_nattch
, &target_sd
->shm_nattch
);
4724 unlock_user_struct(target_sd
, target_addr
, 1);
4728 struct target_shminfo
{
4736 static inline abi_long
host_to_target_shminfo(abi_ulong target_addr
,
4737 struct shminfo
*host_shminfo
)
4739 struct target_shminfo
*target_shminfo
;
4740 if (!lock_user_struct(VERIFY_WRITE
, target_shminfo
, target_addr
, 0))
4741 return -TARGET_EFAULT
;
4742 __put_user(host_shminfo
->shmmax
, &target_shminfo
->shmmax
);
4743 __put_user(host_shminfo
->shmmin
, &target_shminfo
->shmmin
);
4744 __put_user(host_shminfo
->shmmni
, &target_shminfo
->shmmni
);
4745 __put_user(host_shminfo
->shmseg
, &target_shminfo
->shmseg
);
4746 __put_user(host_shminfo
->shmall
, &target_shminfo
->shmall
);
4747 unlock_user_struct(target_shminfo
, target_addr
, 1);
4751 struct target_shm_info
{
4756 abi_ulong swap_attempts
;
4757 abi_ulong swap_successes
;
4760 static inline abi_long
host_to_target_shm_info(abi_ulong target_addr
,
4761 struct shm_info
*host_shm_info
)
4763 struct target_shm_info
*target_shm_info
;
4764 if (!lock_user_struct(VERIFY_WRITE
, target_shm_info
, target_addr
, 0))
4765 return -TARGET_EFAULT
;
4766 __put_user(host_shm_info
->used_ids
, &target_shm_info
->used_ids
);
4767 __put_user(host_shm_info
->shm_tot
, &target_shm_info
->shm_tot
);
4768 __put_user(host_shm_info
->shm_rss
, &target_shm_info
->shm_rss
);
4769 __put_user(host_shm_info
->shm_swp
, &target_shm_info
->shm_swp
);
4770 __put_user(host_shm_info
->swap_attempts
, &target_shm_info
->swap_attempts
);
4771 __put_user(host_shm_info
->swap_successes
, &target_shm_info
->swap_successes
);
4772 unlock_user_struct(target_shm_info
, target_addr
, 1);
4776 static inline abi_long
do_shmctl(int shmid
, int cmd
, abi_long buf
)
4778 struct shmid_ds dsarg
;
4779 struct shminfo shminfo
;
4780 struct shm_info shm_info
;
4781 abi_long ret
= -TARGET_EINVAL
;
4789 if (target_to_host_shmid_ds(&dsarg
, buf
))
4790 return -TARGET_EFAULT
;
4791 ret
= get_errno(shmctl(shmid
, cmd
, &dsarg
));
4792 if (host_to_target_shmid_ds(buf
, &dsarg
))
4793 return -TARGET_EFAULT
;
4796 ret
= get_errno(shmctl(shmid
, cmd
, (struct shmid_ds
*)&shminfo
));
4797 if (host_to_target_shminfo(buf
, &shminfo
))
4798 return -TARGET_EFAULT
;
4801 ret
= get_errno(shmctl(shmid
, cmd
, (struct shmid_ds
*)&shm_info
));
4802 if (host_to_target_shm_info(buf
, &shm_info
))
4803 return -TARGET_EFAULT
;
4808 ret
= get_errno(shmctl(shmid
, cmd
, NULL
));
4815 #ifndef TARGET_FORCE_SHMLBA
4816 /* For most architectures, SHMLBA is the same as the page size;
4817 * some architectures have larger values, in which case they should
4818 * define TARGET_FORCE_SHMLBA and provide a target_shmlba() function.
4819 * This corresponds to the kernel arch code defining __ARCH_FORCE_SHMLBA
4820 * and defining its own value for SHMLBA.
4822 * The kernel also permits SHMLBA to be set by the architecture to a
4823 * value larger than the page size without setting __ARCH_FORCE_SHMLBA;
4824 * this means that addresses are rounded to the large size if
4825 * SHM_RND is set but addresses not aligned to that size are not rejected
4826 * as long as they are at least page-aligned. Since the only architecture
4827 * which uses this is ia64 this code doesn't provide for that oddity.
4829 static inline abi_ulong
target_shmlba(CPUArchState
*cpu_env
)
4831 return TARGET_PAGE_SIZE
;
4835 static inline abi_ulong
do_shmat(CPUArchState
*cpu_env
,
4836 int shmid
, abi_ulong shmaddr
, int shmflg
)
4840 struct shmid_ds shm_info
;
4844 /* find out the length of the shared memory segment */
4845 ret
= get_errno(shmctl(shmid
, IPC_STAT
, &shm_info
));
4846 if (is_error(ret
)) {
4847 /* can't get length, bail out */
4851 shmlba
= target_shmlba(cpu_env
);
4853 if (shmaddr
& (shmlba
- 1)) {
4854 if (shmflg
& SHM_RND
) {
4855 shmaddr
&= ~(shmlba
- 1);
4857 return -TARGET_EINVAL
;
4864 host_raddr
= shmat(shmid
, (void *)g2h(shmaddr
), shmflg
);
4866 abi_ulong mmap_start
;
4868 mmap_start
= mmap_find_vma(0, shm_info
.shm_segsz
);
4870 if (mmap_start
== -1) {
4872 host_raddr
= (void *)-1;
4874 host_raddr
= shmat(shmid
, g2h(mmap_start
), shmflg
| SHM_REMAP
);
4877 if (host_raddr
== (void *)-1) {
4879 return get_errno((long)host_raddr
);
4881 raddr
=h2g((unsigned long)host_raddr
);
4883 page_set_flags(raddr
, raddr
+ shm_info
.shm_segsz
,
4884 PAGE_VALID
| PAGE_READ
|
4885 ((shmflg
& SHM_RDONLY
)? 0 : PAGE_WRITE
));
4887 for (i
= 0; i
< N_SHM_REGIONS
; i
++) {
4888 if (!shm_regions
[i
].in_use
) {
4889 shm_regions
[i
].in_use
= true;
4890 shm_regions
[i
].start
= raddr
;
4891 shm_regions
[i
].size
= shm_info
.shm_segsz
;
4901 static inline abi_long
do_shmdt(abi_ulong shmaddr
)
4905 for (i
= 0; i
< N_SHM_REGIONS
; ++i
) {
4906 if (shm_regions
[i
].in_use
&& shm_regions
[i
].start
== shmaddr
) {
4907 shm_regions
[i
].in_use
= false;
4908 page_set_flags(shmaddr
, shmaddr
+ shm_regions
[i
].size
, 0);
4913 return get_errno(shmdt(g2h(shmaddr
)));
4916 #ifdef TARGET_NR_ipc
4917 /* ??? This only works with linear mappings. */
4918 /* do_ipc() must return target values and target errnos. */
4919 static abi_long
do_ipc(CPUArchState
*cpu_env
,
4920 unsigned int call
, abi_long first
,
4921 abi_long second
, abi_long third
,
4922 abi_long ptr
, abi_long fifth
)
4927 version
= call
>> 16;
4932 ret
= do_semop(first
, ptr
, second
);
4936 ret
= get_errno(semget(first
, second
, third
));
4939 case IPCOP_semctl
: {
4940 /* The semun argument to semctl is passed by value, so dereference the
4943 get_user_ual(atptr
, ptr
);
4944 ret
= do_semctl(first
, second
, third
, atptr
);
4949 ret
= get_errno(msgget(first
, second
));
4953 ret
= do_msgsnd(first
, ptr
, second
, third
);
4957 ret
= do_msgctl(first
, second
, ptr
);
4964 struct target_ipc_kludge
{
4969 if (!lock_user_struct(VERIFY_READ
, tmp
, ptr
, 1)) {
4970 ret
= -TARGET_EFAULT
;
4974 ret
= do_msgrcv(first
, tswapal(tmp
->msgp
), second
, tswapal(tmp
->msgtyp
), third
);
4976 unlock_user_struct(tmp
, ptr
, 0);
4980 ret
= do_msgrcv(first
, ptr
, second
, fifth
, third
);
4989 raddr
= do_shmat(cpu_env
, first
, ptr
, second
);
4990 if (is_error(raddr
))
4991 return get_errno(raddr
);
4992 if (put_user_ual(raddr
, third
))
4993 return -TARGET_EFAULT
;
4997 ret
= -TARGET_EINVAL
;
5002 ret
= do_shmdt(ptr
);
5006 /* IPC_* flag values are the same on all linux platforms */
5007 ret
= get_errno(shmget(first
, second
, third
));
5010 /* IPC_* and SHM_* command values are the same on all linux platforms */
5012 ret
= do_shmctl(first
, second
, ptr
);
5015 gemu_log("Unsupported ipc call: %d (version %d)\n", call
, version
);
5016 ret
= -TARGET_ENOSYS
;
5023 /* kernel structure types definitions */
5025 #define STRUCT(name, ...) STRUCT_ ## name,
5026 #define STRUCT_SPECIAL(name) STRUCT_ ## name,
5028 #include "syscall_types.h"
5032 #undef STRUCT_SPECIAL
5034 #define STRUCT(name, ...) static const argtype struct_ ## name ## _def[] = { __VA_ARGS__, TYPE_NULL };
5035 #define STRUCT_SPECIAL(name)
5036 #include "syscall_types.h"
5038 #undef STRUCT_SPECIAL
5040 typedef struct IOCTLEntry IOCTLEntry
;
5042 typedef abi_long
do_ioctl_fn(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
5043 int fd
, int cmd
, abi_long arg
);
5047 unsigned int host_cmd
;
5050 do_ioctl_fn
*do_ioctl
;
5051 const argtype arg_type
[5];
5054 #define IOC_R 0x0001
5055 #define IOC_W 0x0002
5056 #define IOC_RW (IOC_R | IOC_W)
5058 #define MAX_STRUCT_SIZE 4096
5060 #ifdef CONFIG_FIEMAP
5061 /* So fiemap access checks don't overflow on 32 bit systems.
5062 * This is very slightly smaller than the limit imposed by
5063 * the underlying kernel.
5065 #define FIEMAP_MAX_EXTENTS ((UINT_MAX - sizeof(struct fiemap)) \
5066 / sizeof(struct fiemap_extent))
5068 static abi_long
do_ioctl_fs_ioc_fiemap(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
5069 int fd
, int cmd
, abi_long arg
)
5071 /* The parameter for this ioctl is a struct fiemap followed
5072 * by an array of struct fiemap_extent whose size is set
5073 * in fiemap->fm_extent_count. The array is filled in by the
5076 int target_size_in
, target_size_out
;
5078 const argtype
*arg_type
= ie
->arg_type
;
5079 const argtype extent_arg_type
[] = { MK_STRUCT(STRUCT_fiemap_extent
) };
5082 int i
, extent_size
= thunk_type_size(extent_arg_type
, 0);
5086 assert(arg_type
[0] == TYPE_PTR
);
5087 assert(ie
->access
== IOC_RW
);
5089 target_size_in
= thunk_type_size(arg_type
, 0);
5090 argptr
= lock_user(VERIFY_READ
, arg
, target_size_in
, 1);
5092 return -TARGET_EFAULT
;
5094 thunk_convert(buf_temp
, argptr
, arg_type
, THUNK_HOST
);
5095 unlock_user(argptr
, arg
, 0);
5096 fm
= (struct fiemap
*)buf_temp
;
5097 if (fm
->fm_extent_count
> FIEMAP_MAX_EXTENTS
) {
5098 return -TARGET_EINVAL
;
5101 outbufsz
= sizeof (*fm
) +
5102 (sizeof(struct fiemap_extent
) * fm
->fm_extent_count
);
5104 if (outbufsz
> MAX_STRUCT_SIZE
) {
5105 /* We can't fit all the extents into the fixed size buffer.
5106 * Allocate one that is large enough and use it instead.
5108 fm
= g_try_malloc(outbufsz
);
5110 return -TARGET_ENOMEM
;
5112 memcpy(fm
, buf_temp
, sizeof(struct fiemap
));
5115 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, fm
));
5116 if (!is_error(ret
)) {
5117 target_size_out
= target_size_in
;
5118 /* An extent_count of 0 means we were only counting the extents
5119 * so there are no structs to copy
5121 if (fm
->fm_extent_count
!= 0) {
5122 target_size_out
+= fm
->fm_mapped_extents
* extent_size
;
5124 argptr
= lock_user(VERIFY_WRITE
, arg
, target_size_out
, 0);
5126 ret
= -TARGET_EFAULT
;
5128 /* Convert the struct fiemap */
5129 thunk_convert(argptr
, fm
, arg_type
, THUNK_TARGET
);
5130 if (fm
->fm_extent_count
!= 0) {
5131 p
= argptr
+ target_size_in
;
5132 /* ...and then all the struct fiemap_extents */
5133 for (i
= 0; i
< fm
->fm_mapped_extents
; i
++) {
5134 thunk_convert(p
, &fm
->fm_extents
[i
], extent_arg_type
,
5139 unlock_user(argptr
, arg
, target_size_out
);
5149 static abi_long
do_ioctl_ifconf(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
5150 int fd
, int cmd
, abi_long arg
)
5152 const argtype
*arg_type
= ie
->arg_type
;
5156 struct ifconf
*host_ifconf
;
5158 const argtype ifreq_arg_type
[] = { MK_STRUCT(STRUCT_sockaddr_ifreq
) };
5159 int target_ifreq_size
;
5164 abi_long target_ifc_buf
;
5168 assert(arg_type
[0] == TYPE_PTR
);
5169 assert(ie
->access
== IOC_RW
);
5172 target_size
= thunk_type_size(arg_type
, 0);
5174 argptr
= lock_user(VERIFY_READ
, arg
, target_size
, 1);
5176 return -TARGET_EFAULT
;
5177 thunk_convert(buf_temp
, argptr
, arg_type
, THUNK_HOST
);
5178 unlock_user(argptr
, arg
, 0);
5180 host_ifconf
= (struct ifconf
*)(unsigned long)buf_temp
;
5181 target_ifc_len
= host_ifconf
->ifc_len
;
5182 target_ifc_buf
= (abi_long
)(unsigned long)host_ifconf
->ifc_buf
;
5184 target_ifreq_size
= thunk_type_size(ifreq_arg_type
, 0);
5185 nb_ifreq
= target_ifc_len
/ target_ifreq_size
;
5186 host_ifc_len
= nb_ifreq
* sizeof(struct ifreq
);
5188 outbufsz
= sizeof(*host_ifconf
) + host_ifc_len
;
5189 if (outbufsz
> MAX_STRUCT_SIZE
) {
5190 /* We can't fit all the extents into the fixed size buffer.
5191 * Allocate one that is large enough and use it instead.
5193 host_ifconf
= malloc(outbufsz
);
5195 return -TARGET_ENOMEM
;
5197 memcpy(host_ifconf
, buf_temp
, sizeof(*host_ifconf
));
5200 host_ifc_buf
= (char*)host_ifconf
+ sizeof(*host_ifconf
);
5202 host_ifconf
->ifc_len
= host_ifc_len
;
5203 host_ifconf
->ifc_buf
= host_ifc_buf
;
5205 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, host_ifconf
));
5206 if (!is_error(ret
)) {
5207 /* convert host ifc_len to target ifc_len */
5209 nb_ifreq
= host_ifconf
->ifc_len
/ sizeof(struct ifreq
);
5210 target_ifc_len
= nb_ifreq
* target_ifreq_size
;
5211 host_ifconf
->ifc_len
= target_ifc_len
;
5213 /* restore target ifc_buf */
5215 host_ifconf
->ifc_buf
= (char *)(unsigned long)target_ifc_buf
;
5217 /* copy struct ifconf to target user */
5219 argptr
= lock_user(VERIFY_WRITE
, arg
, target_size
, 0);
5221 return -TARGET_EFAULT
;
5222 thunk_convert(argptr
, host_ifconf
, arg_type
, THUNK_TARGET
);
5223 unlock_user(argptr
, arg
, target_size
);
5225 /* copy ifreq[] to target user */
5227 argptr
= lock_user(VERIFY_WRITE
, target_ifc_buf
, target_ifc_len
, 0);
5228 for (i
= 0; i
< nb_ifreq
; i
++) {
5229 thunk_convert(argptr
+ i
* target_ifreq_size
,
5230 host_ifc_buf
+ i
* sizeof(struct ifreq
),
5231 ifreq_arg_type
, THUNK_TARGET
);
5233 unlock_user(argptr
, target_ifc_buf
, target_ifc_len
);
5243 static abi_long
do_ioctl_dm(const IOCTLEntry
*ie
, uint8_t *buf_temp
, int fd
,
5244 int cmd
, abi_long arg
)
5247 struct dm_ioctl
*host_dm
;
5248 abi_long guest_data
;
5249 uint32_t guest_data_size
;
5251 const argtype
*arg_type
= ie
->arg_type
;
5253 void *big_buf
= NULL
;
5257 target_size
= thunk_type_size(arg_type
, 0);
5258 argptr
= lock_user(VERIFY_READ
, arg
, target_size
, 1);
5260 ret
= -TARGET_EFAULT
;
5263 thunk_convert(buf_temp
, argptr
, arg_type
, THUNK_HOST
);
5264 unlock_user(argptr
, arg
, 0);
5266 /* buf_temp is too small, so fetch things into a bigger buffer */
5267 big_buf
= g_malloc0(((struct dm_ioctl
*)buf_temp
)->data_size
* 2);
5268 memcpy(big_buf
, buf_temp
, target_size
);
5272 guest_data
= arg
+ host_dm
->data_start
;
5273 if ((guest_data
- arg
) < 0) {
5274 ret
= -TARGET_EINVAL
;
5277 guest_data_size
= host_dm
->data_size
- host_dm
->data_start
;
5278 host_data
= (char*)host_dm
+ host_dm
->data_start
;
5280 argptr
= lock_user(VERIFY_READ
, guest_data
, guest_data_size
, 1);
5282 ret
= -TARGET_EFAULT
;
5286 switch (ie
->host_cmd
) {
5288 case DM_LIST_DEVICES
:
5291 case DM_DEV_SUSPEND
:
5294 case DM_TABLE_STATUS
:
5295 case DM_TABLE_CLEAR
:
5297 case DM_LIST_VERSIONS
:
5301 case DM_DEV_SET_GEOMETRY
:
5302 /* data contains only strings */
5303 memcpy(host_data
, argptr
, guest_data_size
);
5306 memcpy(host_data
, argptr
, guest_data_size
);
5307 *(uint64_t*)host_data
= tswap64(*(uint64_t*)argptr
);
5311 void *gspec
= argptr
;
5312 void *cur_data
= host_data
;
5313 const argtype arg_type
[] = { MK_STRUCT(STRUCT_dm_target_spec
) };
5314 int spec_size
= thunk_type_size(arg_type
, 0);
5317 for (i
= 0; i
< host_dm
->target_count
; i
++) {
5318 struct dm_target_spec
*spec
= cur_data
;
5322 thunk_convert(spec
, gspec
, arg_type
, THUNK_HOST
);
5323 slen
= strlen((char*)gspec
+ spec_size
) + 1;
5325 spec
->next
= sizeof(*spec
) + slen
;
5326 strcpy((char*)&spec
[1], gspec
+ spec_size
);
5328 cur_data
+= spec
->next
;
5333 ret
= -TARGET_EINVAL
;
5334 unlock_user(argptr
, guest_data
, 0);
5337 unlock_user(argptr
, guest_data
, 0);
5339 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, buf_temp
));
5340 if (!is_error(ret
)) {
5341 guest_data
= arg
+ host_dm
->data_start
;
5342 guest_data_size
= host_dm
->data_size
- host_dm
->data_start
;
5343 argptr
= lock_user(VERIFY_WRITE
, guest_data
, guest_data_size
, 0);
5344 switch (ie
->host_cmd
) {
5349 case DM_DEV_SUSPEND
:
5352 case DM_TABLE_CLEAR
:
5354 case DM_DEV_SET_GEOMETRY
:
5355 /* no return data */
5357 case DM_LIST_DEVICES
:
5359 struct dm_name_list
*nl
= (void*)host_dm
+ host_dm
->data_start
;
5360 uint32_t remaining_data
= guest_data_size
;
5361 void *cur_data
= argptr
;
5362 const argtype arg_type
[] = { MK_STRUCT(STRUCT_dm_name_list
) };
5363 int nl_size
= 12; /* can't use thunk_size due to alignment */
5366 uint32_t next
= nl
->next
;
5368 nl
->next
= nl_size
+ (strlen(nl
->name
) + 1);
5370 if (remaining_data
< nl
->next
) {
5371 host_dm
->flags
|= DM_BUFFER_FULL_FLAG
;
5374 thunk_convert(cur_data
, nl
, arg_type
, THUNK_TARGET
);
5375 strcpy(cur_data
+ nl_size
, nl
->name
);
5376 cur_data
+= nl
->next
;
5377 remaining_data
-= nl
->next
;
5381 nl
= (void*)nl
+ next
;
5386 case DM_TABLE_STATUS
:
5388 struct dm_target_spec
*spec
= (void*)host_dm
+ host_dm
->data_start
;
5389 void *cur_data
= argptr
;
5390 const argtype arg_type
[] = { MK_STRUCT(STRUCT_dm_target_spec
) };
5391 int spec_size
= thunk_type_size(arg_type
, 0);
5394 for (i
= 0; i
< host_dm
->target_count
; i
++) {
5395 uint32_t next
= spec
->next
;
5396 int slen
= strlen((char*)&spec
[1]) + 1;
5397 spec
->next
= (cur_data
- argptr
) + spec_size
+ slen
;
5398 if (guest_data_size
< spec
->next
) {
5399 host_dm
->flags
|= DM_BUFFER_FULL_FLAG
;
5402 thunk_convert(cur_data
, spec
, arg_type
, THUNK_TARGET
);
5403 strcpy(cur_data
+ spec_size
, (char*)&spec
[1]);
5404 cur_data
= argptr
+ spec
->next
;
5405 spec
= (void*)host_dm
+ host_dm
->data_start
+ next
;
5411 void *hdata
= (void*)host_dm
+ host_dm
->data_start
;
5412 int count
= *(uint32_t*)hdata
;
5413 uint64_t *hdev
= hdata
+ 8;
5414 uint64_t *gdev
= argptr
+ 8;
5417 *(uint32_t*)argptr
= tswap32(count
);
5418 for (i
= 0; i
< count
; i
++) {
5419 *gdev
= tswap64(*hdev
);
5425 case DM_LIST_VERSIONS
:
5427 struct dm_target_versions
*vers
= (void*)host_dm
+ host_dm
->data_start
;
5428 uint32_t remaining_data
= guest_data_size
;
5429 void *cur_data
= argptr
;
5430 const argtype arg_type
[] = { MK_STRUCT(STRUCT_dm_target_versions
) };
5431 int vers_size
= thunk_type_size(arg_type
, 0);
5434 uint32_t next
= vers
->next
;
5436 vers
->next
= vers_size
+ (strlen(vers
->name
) + 1);
5438 if (remaining_data
< vers
->next
) {
5439 host_dm
->flags
|= DM_BUFFER_FULL_FLAG
;
5442 thunk_convert(cur_data
, vers
, arg_type
, THUNK_TARGET
);
5443 strcpy(cur_data
+ vers_size
, vers
->name
);
5444 cur_data
+= vers
->next
;
5445 remaining_data
-= vers
->next
;
5449 vers
= (void*)vers
+ next
;
5454 unlock_user(argptr
, guest_data
, 0);
5455 ret
= -TARGET_EINVAL
;
5458 unlock_user(argptr
, guest_data
, guest_data_size
);
5460 argptr
= lock_user(VERIFY_WRITE
, arg
, target_size
, 0);
5462 ret
= -TARGET_EFAULT
;
5465 thunk_convert(argptr
, buf_temp
, arg_type
, THUNK_TARGET
);
5466 unlock_user(argptr
, arg
, target_size
);
5473 static abi_long
do_ioctl_blkpg(const IOCTLEntry
*ie
, uint8_t *buf_temp
, int fd
,
5474 int cmd
, abi_long arg
)
5478 const argtype
*arg_type
= ie
->arg_type
;
5479 const argtype part_arg_type
[] = { MK_STRUCT(STRUCT_blkpg_partition
) };
5482 struct blkpg_ioctl_arg
*host_blkpg
= (void*)buf_temp
;
5483 struct blkpg_partition host_part
;
5485 /* Read and convert blkpg */
5487 target_size
= thunk_type_size(arg_type
, 0);
5488 argptr
= lock_user(VERIFY_READ
, arg
, target_size
, 1);
5490 ret
= -TARGET_EFAULT
;
5493 thunk_convert(buf_temp
, argptr
, arg_type
, THUNK_HOST
);
5494 unlock_user(argptr
, arg
, 0);
5496 switch (host_blkpg
->op
) {
5497 case BLKPG_ADD_PARTITION
:
5498 case BLKPG_DEL_PARTITION
:
5499 /* payload is struct blkpg_partition */
5502 /* Unknown opcode */
5503 ret
= -TARGET_EINVAL
;
5507 /* Read and convert blkpg->data */
5508 arg
= (abi_long
)(uintptr_t)host_blkpg
->data
;
5509 target_size
= thunk_type_size(part_arg_type
, 0);
5510 argptr
= lock_user(VERIFY_READ
, arg
, target_size
, 1);
5512 ret
= -TARGET_EFAULT
;
5515 thunk_convert(&host_part
, argptr
, part_arg_type
, THUNK_HOST
);
5516 unlock_user(argptr
, arg
, 0);
5518 /* Swizzle the data pointer to our local copy and call! */
5519 host_blkpg
->data
= &host_part
;
5520 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, host_blkpg
));
5526 static abi_long
do_ioctl_rt(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
5527 int fd
, int cmd
, abi_long arg
)
5529 const argtype
*arg_type
= ie
->arg_type
;
5530 const StructEntry
*se
;
5531 const argtype
*field_types
;
5532 const int *dst_offsets
, *src_offsets
;
5535 abi_ulong
*target_rt_dev_ptr
;
5536 unsigned long *host_rt_dev_ptr
;
5540 assert(ie
->access
== IOC_W
);
5541 assert(*arg_type
== TYPE_PTR
);
5543 assert(*arg_type
== TYPE_STRUCT
);
5544 target_size
= thunk_type_size(arg_type
, 0);
5545 argptr
= lock_user(VERIFY_READ
, arg
, target_size
, 1);
5547 return -TARGET_EFAULT
;
5550 assert(*arg_type
== (int)STRUCT_rtentry
);
5551 se
= struct_entries
+ *arg_type
++;
5552 assert(se
->convert
[0] == NULL
);
5553 /* convert struct here to be able to catch rt_dev string */
5554 field_types
= se
->field_types
;
5555 dst_offsets
= se
->field_offsets
[THUNK_HOST
];
5556 src_offsets
= se
->field_offsets
[THUNK_TARGET
];
5557 for (i
= 0; i
< se
->nb_fields
; i
++) {
5558 if (dst_offsets
[i
] == offsetof(struct rtentry
, rt_dev
)) {
5559 assert(*field_types
== TYPE_PTRVOID
);
5560 target_rt_dev_ptr
= (abi_ulong
*)(argptr
+ src_offsets
[i
]);
5561 host_rt_dev_ptr
= (unsigned long *)(buf_temp
+ dst_offsets
[i
]);
5562 if (*target_rt_dev_ptr
!= 0) {
5563 *host_rt_dev_ptr
= (unsigned long)lock_user_string(
5564 tswapal(*target_rt_dev_ptr
));
5565 if (!*host_rt_dev_ptr
) {
5566 unlock_user(argptr
, arg
, 0);
5567 return -TARGET_EFAULT
;
5570 *host_rt_dev_ptr
= 0;
5575 field_types
= thunk_convert(buf_temp
+ dst_offsets
[i
],
5576 argptr
+ src_offsets
[i
],
5577 field_types
, THUNK_HOST
);
5579 unlock_user(argptr
, arg
, 0);
5581 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, buf_temp
));
5582 if (*host_rt_dev_ptr
!= 0) {
5583 unlock_user((void *)*host_rt_dev_ptr
,
5584 *target_rt_dev_ptr
, 0);
5589 static abi_long
do_ioctl_kdsigaccept(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
5590 int fd
, int cmd
, abi_long arg
)
5592 int sig
= target_to_host_signal(arg
);
5593 return get_errno(safe_ioctl(fd
, ie
->host_cmd
, sig
));
5596 static IOCTLEntry ioctl_entries
[] = {
5597 #define IOCTL(cmd, access, ...) \
5598 { TARGET_ ## cmd, cmd, #cmd, access, 0, { __VA_ARGS__ } },
5599 #define IOCTL_SPECIAL(cmd, access, dofn, ...) \
5600 { TARGET_ ## cmd, cmd, #cmd, access, dofn, { __VA_ARGS__ } },
5601 #define IOCTL_IGNORE(cmd) \
5602 { TARGET_ ## cmd, 0, #cmd },
5607 /* ??? Implement proper locking for ioctls. */
5608 /* do_ioctl() Must return target values and target errnos. */
5609 static abi_long
do_ioctl(int fd
, int cmd
, abi_long arg
)
5611 const IOCTLEntry
*ie
;
5612 const argtype
*arg_type
;
5614 uint8_t buf_temp
[MAX_STRUCT_SIZE
];
5620 if (ie
->target_cmd
== 0) {
5621 gemu_log("Unsupported ioctl: cmd=0x%04lx\n", (long)cmd
);
5622 return -TARGET_ENOSYS
;
5624 if (ie
->target_cmd
== cmd
)
5628 arg_type
= ie
->arg_type
;
5630 gemu_log("ioctl: cmd=0x%04lx (%s)\n", (long)cmd
, ie
->name
);
5633 return ie
->do_ioctl(ie
, buf_temp
, fd
, cmd
, arg
);
5634 } else if (!ie
->host_cmd
) {
5635 /* Some architectures define BSD ioctls in their headers
5636 that are not implemented in Linux. */
5637 return -TARGET_ENOSYS
;
5640 switch(arg_type
[0]) {
5643 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
));
5647 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, arg
));
5651 target_size
= thunk_type_size(arg_type
, 0);
5652 switch(ie
->access
) {
5654 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, buf_temp
));
5655 if (!is_error(ret
)) {
5656 argptr
= lock_user(VERIFY_WRITE
, arg
, target_size
, 0);
5658 return -TARGET_EFAULT
;
5659 thunk_convert(argptr
, buf_temp
, arg_type
, THUNK_TARGET
);
5660 unlock_user(argptr
, arg
, target_size
);
5664 argptr
= lock_user(VERIFY_READ
, arg
, target_size
, 1);
5666 return -TARGET_EFAULT
;
5667 thunk_convert(buf_temp
, argptr
, arg_type
, THUNK_HOST
);
5668 unlock_user(argptr
, arg
, 0);
5669 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, buf_temp
));
5673 argptr
= lock_user(VERIFY_READ
, arg
, target_size
, 1);
5675 return -TARGET_EFAULT
;
5676 thunk_convert(buf_temp
, argptr
, arg_type
, THUNK_HOST
);
5677 unlock_user(argptr
, arg
, 0);
5678 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, buf_temp
));
5679 if (!is_error(ret
)) {
5680 argptr
= lock_user(VERIFY_WRITE
, arg
, target_size
, 0);
5682 return -TARGET_EFAULT
;
5683 thunk_convert(argptr
, buf_temp
, arg_type
, THUNK_TARGET
);
5684 unlock_user(argptr
, arg
, target_size
);
5690 gemu_log("Unsupported ioctl type: cmd=0x%04lx type=%d\n",
5691 (long)cmd
, arg_type
[0]);
5692 ret
= -TARGET_ENOSYS
;
5698 static const bitmask_transtbl iflag_tbl
[] = {
5699 { TARGET_IGNBRK
, TARGET_IGNBRK
, IGNBRK
, IGNBRK
},
5700 { TARGET_BRKINT
, TARGET_BRKINT
, BRKINT
, BRKINT
},
5701 { TARGET_IGNPAR
, TARGET_IGNPAR
, IGNPAR
, IGNPAR
},
5702 { TARGET_PARMRK
, TARGET_PARMRK
, PARMRK
, PARMRK
},
5703 { TARGET_INPCK
, TARGET_INPCK
, INPCK
, INPCK
},
5704 { TARGET_ISTRIP
, TARGET_ISTRIP
, ISTRIP
, ISTRIP
},
5705 { TARGET_INLCR
, TARGET_INLCR
, INLCR
, INLCR
},
5706 { TARGET_IGNCR
, TARGET_IGNCR
, IGNCR
, IGNCR
},
5707 { TARGET_ICRNL
, TARGET_ICRNL
, ICRNL
, ICRNL
},
5708 { TARGET_IUCLC
, TARGET_IUCLC
, IUCLC
, IUCLC
},
5709 { TARGET_IXON
, TARGET_IXON
, IXON
, IXON
},
5710 { TARGET_IXANY
, TARGET_IXANY
, IXANY
, IXANY
},
5711 { TARGET_IXOFF
, TARGET_IXOFF
, IXOFF
, IXOFF
},
5712 { TARGET_IMAXBEL
, TARGET_IMAXBEL
, IMAXBEL
, IMAXBEL
},
5716 static const bitmask_transtbl oflag_tbl
[] = {
5717 { TARGET_OPOST
, TARGET_OPOST
, OPOST
, OPOST
},
5718 { TARGET_OLCUC
, TARGET_OLCUC
, OLCUC
, OLCUC
},
5719 { TARGET_ONLCR
, TARGET_ONLCR
, ONLCR
, ONLCR
},
5720 { TARGET_OCRNL
, TARGET_OCRNL
, OCRNL
, OCRNL
},
5721 { TARGET_ONOCR
, TARGET_ONOCR
, ONOCR
, ONOCR
},
5722 { TARGET_ONLRET
, TARGET_ONLRET
, ONLRET
, ONLRET
},
5723 { TARGET_OFILL
, TARGET_OFILL
, OFILL
, OFILL
},
5724 { TARGET_OFDEL
, TARGET_OFDEL
, OFDEL
, OFDEL
},
5725 { TARGET_NLDLY
, TARGET_NL0
, NLDLY
, NL0
},
5726 { TARGET_NLDLY
, TARGET_NL1
, NLDLY
, NL1
},
5727 { TARGET_CRDLY
, TARGET_CR0
, CRDLY
, CR0
},
5728 { TARGET_CRDLY
, TARGET_CR1
, CRDLY
, CR1
},
5729 { TARGET_CRDLY
, TARGET_CR2
, CRDLY
, CR2
},
5730 { TARGET_CRDLY
, TARGET_CR3
, CRDLY
, CR3
},
5731 { TARGET_TABDLY
, TARGET_TAB0
, TABDLY
, TAB0
},
5732 { TARGET_TABDLY
, TARGET_TAB1
, TABDLY
, TAB1
},
5733 { TARGET_TABDLY
, TARGET_TAB2
, TABDLY
, TAB2
},
5734 { TARGET_TABDLY
, TARGET_TAB3
, TABDLY
, TAB3
},
5735 { TARGET_BSDLY
, TARGET_BS0
, BSDLY
, BS0
},
5736 { TARGET_BSDLY
, TARGET_BS1
, BSDLY
, BS1
},
5737 { TARGET_VTDLY
, TARGET_VT0
, VTDLY
, VT0
},
5738 { TARGET_VTDLY
, TARGET_VT1
, VTDLY
, VT1
},
5739 { TARGET_FFDLY
, TARGET_FF0
, FFDLY
, FF0
},
5740 { TARGET_FFDLY
, TARGET_FF1
, FFDLY
, FF1
},
5744 static const bitmask_transtbl cflag_tbl
[] = {
5745 { TARGET_CBAUD
, TARGET_B0
, CBAUD
, B0
},
5746 { TARGET_CBAUD
, TARGET_B50
, CBAUD
, B50
},
5747 { TARGET_CBAUD
, TARGET_B75
, CBAUD
, B75
},
5748 { TARGET_CBAUD
, TARGET_B110
, CBAUD
, B110
},
5749 { TARGET_CBAUD
, TARGET_B134
, CBAUD
, B134
},
5750 { TARGET_CBAUD
, TARGET_B150
, CBAUD
, B150
},
5751 { TARGET_CBAUD
, TARGET_B200
, CBAUD
, B200
},
5752 { TARGET_CBAUD
, TARGET_B300
, CBAUD
, B300
},
5753 { TARGET_CBAUD
, TARGET_B600
, CBAUD
, B600
},
5754 { TARGET_CBAUD
, TARGET_B1200
, CBAUD
, B1200
},
5755 { TARGET_CBAUD
, TARGET_B1800
, CBAUD
, B1800
},
5756 { TARGET_CBAUD
, TARGET_B2400
, CBAUD
, B2400
},
5757 { TARGET_CBAUD
, TARGET_B4800
, CBAUD
, B4800
},
5758 { TARGET_CBAUD
, TARGET_B9600
, CBAUD
, B9600
},
5759 { TARGET_CBAUD
, TARGET_B19200
, CBAUD
, B19200
},
5760 { TARGET_CBAUD
, TARGET_B38400
, CBAUD
, B38400
},
5761 { TARGET_CBAUD
, TARGET_B57600
, CBAUD
, B57600
},
5762 { TARGET_CBAUD
, TARGET_B115200
, CBAUD
, B115200
},
5763 { TARGET_CBAUD
, TARGET_B230400
, CBAUD
, B230400
},
5764 { TARGET_CBAUD
, TARGET_B460800
, CBAUD
, B460800
},
5765 { TARGET_CSIZE
, TARGET_CS5
, CSIZE
, CS5
},
5766 { TARGET_CSIZE
, TARGET_CS6
, CSIZE
, CS6
},
5767 { TARGET_CSIZE
, TARGET_CS7
, CSIZE
, CS7
},
5768 { TARGET_CSIZE
, TARGET_CS8
, CSIZE
, CS8
},
5769 { TARGET_CSTOPB
, TARGET_CSTOPB
, CSTOPB
, CSTOPB
},
5770 { TARGET_CREAD
, TARGET_CREAD
, CREAD
, CREAD
},
5771 { TARGET_PARENB
, TARGET_PARENB
, PARENB
, PARENB
},
5772 { TARGET_PARODD
, TARGET_PARODD
, PARODD
, PARODD
},
5773 { TARGET_HUPCL
, TARGET_HUPCL
, HUPCL
, HUPCL
},
5774 { TARGET_CLOCAL
, TARGET_CLOCAL
, CLOCAL
, CLOCAL
},
5775 { TARGET_CRTSCTS
, TARGET_CRTSCTS
, CRTSCTS
, CRTSCTS
},
5779 static const bitmask_transtbl lflag_tbl
[] = {
5780 { TARGET_ISIG
, TARGET_ISIG
, ISIG
, ISIG
},
5781 { TARGET_ICANON
, TARGET_ICANON
, ICANON
, ICANON
},
5782 { TARGET_XCASE
, TARGET_XCASE
, XCASE
, XCASE
},
5783 { TARGET_ECHO
, TARGET_ECHO
, ECHO
, ECHO
},
5784 { TARGET_ECHOE
, TARGET_ECHOE
, ECHOE
, ECHOE
},
5785 { TARGET_ECHOK
, TARGET_ECHOK
, ECHOK
, ECHOK
},
5786 { TARGET_ECHONL
, TARGET_ECHONL
, ECHONL
, ECHONL
},
5787 { TARGET_NOFLSH
, TARGET_NOFLSH
, NOFLSH
, NOFLSH
},
5788 { TARGET_TOSTOP
, TARGET_TOSTOP
, TOSTOP
, TOSTOP
},
5789 { TARGET_ECHOCTL
, TARGET_ECHOCTL
, ECHOCTL
, ECHOCTL
},
5790 { TARGET_ECHOPRT
, TARGET_ECHOPRT
, ECHOPRT
, ECHOPRT
},
5791 { TARGET_ECHOKE
, TARGET_ECHOKE
, ECHOKE
, ECHOKE
},
5792 { TARGET_FLUSHO
, TARGET_FLUSHO
, FLUSHO
, FLUSHO
},
5793 { TARGET_PENDIN
, TARGET_PENDIN
, PENDIN
, PENDIN
},
5794 { TARGET_IEXTEN
, TARGET_IEXTEN
, IEXTEN
, IEXTEN
},
5798 static void target_to_host_termios (void *dst
, const void *src
)
5800 struct host_termios
*host
= dst
;
5801 const struct target_termios
*target
= src
;
5804 target_to_host_bitmask(tswap32(target
->c_iflag
), iflag_tbl
);
5806 target_to_host_bitmask(tswap32(target
->c_oflag
), oflag_tbl
);
5808 target_to_host_bitmask(tswap32(target
->c_cflag
), cflag_tbl
);
5810 target_to_host_bitmask(tswap32(target
->c_lflag
), lflag_tbl
);
5811 host
->c_line
= target
->c_line
;
5813 memset(host
->c_cc
, 0, sizeof(host
->c_cc
));
5814 host
->c_cc
[VINTR
] = target
->c_cc
[TARGET_VINTR
];
5815 host
->c_cc
[VQUIT
] = target
->c_cc
[TARGET_VQUIT
];
5816 host
->c_cc
[VERASE
] = target
->c_cc
[TARGET_VERASE
];
5817 host
->c_cc
[VKILL
] = target
->c_cc
[TARGET_VKILL
];
5818 host
->c_cc
[VEOF
] = target
->c_cc
[TARGET_VEOF
];
5819 host
->c_cc
[VTIME
] = target
->c_cc
[TARGET_VTIME
];
5820 host
->c_cc
[VMIN
] = target
->c_cc
[TARGET_VMIN
];
5821 host
->c_cc
[VSWTC
] = target
->c_cc
[TARGET_VSWTC
];
5822 host
->c_cc
[VSTART
] = target
->c_cc
[TARGET_VSTART
];
5823 host
->c_cc
[VSTOP
] = target
->c_cc
[TARGET_VSTOP
];
5824 host
->c_cc
[VSUSP
] = target
->c_cc
[TARGET_VSUSP
];
5825 host
->c_cc
[VEOL
] = target
->c_cc
[TARGET_VEOL
];
5826 host
->c_cc
[VREPRINT
] = target
->c_cc
[TARGET_VREPRINT
];
5827 host
->c_cc
[VDISCARD
] = target
->c_cc
[TARGET_VDISCARD
];
5828 host
->c_cc
[VWERASE
] = target
->c_cc
[TARGET_VWERASE
];
5829 host
->c_cc
[VLNEXT
] = target
->c_cc
[TARGET_VLNEXT
];
5830 host
->c_cc
[VEOL2
] = target
->c_cc
[TARGET_VEOL2
];
5833 static void host_to_target_termios (void *dst
, const void *src
)
5835 struct target_termios
*target
= dst
;
5836 const struct host_termios
*host
= src
;
5839 tswap32(host_to_target_bitmask(host
->c_iflag
, iflag_tbl
));
5841 tswap32(host_to_target_bitmask(host
->c_oflag
, oflag_tbl
));
5843 tswap32(host_to_target_bitmask(host
->c_cflag
, cflag_tbl
));
5845 tswap32(host_to_target_bitmask(host
->c_lflag
, lflag_tbl
));
5846 target
->c_line
= host
->c_line
;
5848 memset(target
->c_cc
, 0, sizeof(target
->c_cc
));
5849 target
->c_cc
[TARGET_VINTR
] = host
->c_cc
[VINTR
];
5850 target
->c_cc
[TARGET_VQUIT
] = host
->c_cc
[VQUIT
];
5851 target
->c_cc
[TARGET_VERASE
] = host
->c_cc
[VERASE
];
5852 target
->c_cc
[TARGET_VKILL
] = host
->c_cc
[VKILL
];
5853 target
->c_cc
[TARGET_VEOF
] = host
->c_cc
[VEOF
];
5854 target
->c_cc
[TARGET_VTIME
] = host
->c_cc
[VTIME
];
5855 target
->c_cc
[TARGET_VMIN
] = host
->c_cc
[VMIN
];
5856 target
->c_cc
[TARGET_VSWTC
] = host
->c_cc
[VSWTC
];
5857 target
->c_cc
[TARGET_VSTART
] = host
->c_cc
[VSTART
];
5858 target
->c_cc
[TARGET_VSTOP
] = host
->c_cc
[VSTOP
];
5859 target
->c_cc
[TARGET_VSUSP
] = host
->c_cc
[VSUSP
];
5860 target
->c_cc
[TARGET_VEOL
] = host
->c_cc
[VEOL
];
5861 target
->c_cc
[TARGET_VREPRINT
] = host
->c_cc
[VREPRINT
];
5862 target
->c_cc
[TARGET_VDISCARD
] = host
->c_cc
[VDISCARD
];
5863 target
->c_cc
[TARGET_VWERASE
] = host
->c_cc
[VWERASE
];
5864 target
->c_cc
[TARGET_VLNEXT
] = host
->c_cc
[VLNEXT
];
5865 target
->c_cc
[TARGET_VEOL2
] = host
->c_cc
[VEOL2
];
5868 static const StructEntry struct_termios_def
= {
5869 .convert
= { host_to_target_termios
, target_to_host_termios
},
5870 .size
= { sizeof(struct target_termios
), sizeof(struct host_termios
) },
5871 .align
= { __alignof__(struct target_termios
), __alignof__(struct host_termios
) },
5874 static bitmask_transtbl mmap_flags_tbl
[] = {
5875 { TARGET_MAP_SHARED
, TARGET_MAP_SHARED
, MAP_SHARED
, MAP_SHARED
},
5876 { TARGET_MAP_PRIVATE
, TARGET_MAP_PRIVATE
, MAP_PRIVATE
, MAP_PRIVATE
},
5877 { TARGET_MAP_FIXED
, TARGET_MAP_FIXED
, MAP_FIXED
, MAP_FIXED
},
5878 { TARGET_MAP_ANONYMOUS
, TARGET_MAP_ANONYMOUS
, MAP_ANONYMOUS
, MAP_ANONYMOUS
},
5879 { TARGET_MAP_GROWSDOWN
, TARGET_MAP_GROWSDOWN
, MAP_GROWSDOWN
, MAP_GROWSDOWN
},
5880 { TARGET_MAP_DENYWRITE
, TARGET_MAP_DENYWRITE
, MAP_DENYWRITE
, MAP_DENYWRITE
},
5881 { TARGET_MAP_EXECUTABLE
, TARGET_MAP_EXECUTABLE
, MAP_EXECUTABLE
, MAP_EXECUTABLE
},
5882 { TARGET_MAP_LOCKED
, TARGET_MAP_LOCKED
, MAP_LOCKED
, MAP_LOCKED
},
5883 { TARGET_MAP_NORESERVE
, TARGET_MAP_NORESERVE
, MAP_NORESERVE
,
5888 #if defined(TARGET_I386)
5890 /* NOTE: there is really one LDT for all the threads */
5891 static uint8_t *ldt_table
;
5893 static abi_long
read_ldt(abi_ulong ptr
, unsigned long bytecount
)
5900 size
= TARGET_LDT_ENTRIES
* TARGET_LDT_ENTRY_SIZE
;
5901 if (size
> bytecount
)
5903 p
= lock_user(VERIFY_WRITE
, ptr
, size
, 0);
5905 return -TARGET_EFAULT
;
5906 /* ??? Should this by byteswapped? */
5907 memcpy(p
, ldt_table
, size
);
5908 unlock_user(p
, ptr
, size
);
5912 /* XXX: add locking support */
5913 static abi_long
write_ldt(CPUX86State
*env
,
5914 abi_ulong ptr
, unsigned long bytecount
, int oldmode
)
5916 struct target_modify_ldt_ldt_s ldt_info
;
5917 struct target_modify_ldt_ldt_s
*target_ldt_info
;
5918 int seg_32bit
, contents
, read_exec_only
, limit_in_pages
;
5919 int seg_not_present
, useable
, lm
;
5920 uint32_t *lp
, entry_1
, entry_2
;
5922 if (bytecount
!= sizeof(ldt_info
))
5923 return -TARGET_EINVAL
;
5924 if (!lock_user_struct(VERIFY_READ
, target_ldt_info
, ptr
, 1))
5925 return -TARGET_EFAULT
;
5926 ldt_info
.entry_number
= tswap32(target_ldt_info
->entry_number
);
5927 ldt_info
.base_addr
= tswapal(target_ldt_info
->base_addr
);
5928 ldt_info
.limit
= tswap32(target_ldt_info
->limit
);
5929 ldt_info
.flags
= tswap32(target_ldt_info
->flags
);
5930 unlock_user_struct(target_ldt_info
, ptr
, 0);
5932 if (ldt_info
.entry_number
>= TARGET_LDT_ENTRIES
)
5933 return -TARGET_EINVAL
;
5934 seg_32bit
= ldt_info
.flags
& 1;
5935 contents
= (ldt_info
.flags
>> 1) & 3;
5936 read_exec_only
= (ldt_info
.flags
>> 3) & 1;
5937 limit_in_pages
= (ldt_info
.flags
>> 4) & 1;
5938 seg_not_present
= (ldt_info
.flags
>> 5) & 1;
5939 useable
= (ldt_info
.flags
>> 6) & 1;
5943 lm
= (ldt_info
.flags
>> 7) & 1;
5945 if (contents
== 3) {
5947 return -TARGET_EINVAL
;
5948 if (seg_not_present
== 0)
5949 return -TARGET_EINVAL
;
5951 /* allocate the LDT */
5953 env
->ldt
.base
= target_mmap(0,
5954 TARGET_LDT_ENTRIES
* TARGET_LDT_ENTRY_SIZE
,
5955 PROT_READ
|PROT_WRITE
,
5956 MAP_ANONYMOUS
|MAP_PRIVATE
, -1, 0);
5957 if (env
->ldt
.base
== -1)
5958 return -TARGET_ENOMEM
;
5959 memset(g2h(env
->ldt
.base
), 0,
5960 TARGET_LDT_ENTRIES
* TARGET_LDT_ENTRY_SIZE
);
5961 env
->ldt
.limit
= 0xffff;
5962 ldt_table
= g2h(env
->ldt
.base
);
5965 /* NOTE: same code as Linux kernel */
5966 /* Allow LDTs to be cleared by the user. */
5967 if (ldt_info
.base_addr
== 0 && ldt_info
.limit
== 0) {
5970 read_exec_only
== 1 &&
5972 limit_in_pages
== 0 &&
5973 seg_not_present
== 1 &&
5981 entry_1
= ((ldt_info
.base_addr
& 0x0000ffff) << 16) |
5982 (ldt_info
.limit
& 0x0ffff);
5983 entry_2
= (ldt_info
.base_addr
& 0xff000000) |
5984 ((ldt_info
.base_addr
& 0x00ff0000) >> 16) |
5985 (ldt_info
.limit
& 0xf0000) |
5986 ((read_exec_only
^ 1) << 9) |
5988 ((seg_not_present
^ 1) << 15) |
5990 (limit_in_pages
<< 23) |
5994 entry_2
|= (useable
<< 20);
5996 /* Install the new entry ... */
5998 lp
= (uint32_t *)(ldt_table
+ (ldt_info
.entry_number
<< 3));
5999 lp
[0] = tswap32(entry_1
);
6000 lp
[1] = tswap32(entry_2
);
6004 /* specific and weird i386 syscalls */
6005 static abi_long
do_modify_ldt(CPUX86State
*env
, int func
, abi_ulong ptr
,
6006 unsigned long bytecount
)
6012 ret
= read_ldt(ptr
, bytecount
);
6015 ret
= write_ldt(env
, ptr
, bytecount
, 1);
6018 ret
= write_ldt(env
, ptr
, bytecount
, 0);
6021 ret
= -TARGET_ENOSYS
;
6027 #if defined(TARGET_I386) && defined(TARGET_ABI32)
6028 abi_long
do_set_thread_area(CPUX86State
*env
, abi_ulong ptr
)
6030 uint64_t *gdt_table
= g2h(env
->gdt
.base
);
6031 struct target_modify_ldt_ldt_s ldt_info
;
6032 struct target_modify_ldt_ldt_s
*target_ldt_info
;
6033 int seg_32bit
, contents
, read_exec_only
, limit_in_pages
;
6034 int seg_not_present
, useable
, lm
;
6035 uint32_t *lp
, entry_1
, entry_2
;
6038 lock_user_struct(VERIFY_WRITE
, target_ldt_info
, ptr
, 1);
6039 if (!target_ldt_info
)
6040 return -TARGET_EFAULT
;
6041 ldt_info
.entry_number
= tswap32(target_ldt_info
->entry_number
);
6042 ldt_info
.base_addr
= tswapal(target_ldt_info
->base_addr
);
6043 ldt_info
.limit
= tswap32(target_ldt_info
->limit
);
6044 ldt_info
.flags
= tswap32(target_ldt_info
->flags
);
6045 if (ldt_info
.entry_number
== -1) {
6046 for (i
=TARGET_GDT_ENTRY_TLS_MIN
; i
<=TARGET_GDT_ENTRY_TLS_MAX
; i
++) {
6047 if (gdt_table
[i
] == 0) {
6048 ldt_info
.entry_number
= i
;
6049 target_ldt_info
->entry_number
= tswap32(i
);
6054 unlock_user_struct(target_ldt_info
, ptr
, 1);
6056 if (ldt_info
.entry_number
< TARGET_GDT_ENTRY_TLS_MIN
||
6057 ldt_info
.entry_number
> TARGET_GDT_ENTRY_TLS_MAX
)
6058 return -TARGET_EINVAL
;
6059 seg_32bit
= ldt_info
.flags
& 1;
6060 contents
= (ldt_info
.flags
>> 1) & 3;
6061 read_exec_only
= (ldt_info
.flags
>> 3) & 1;
6062 limit_in_pages
= (ldt_info
.flags
>> 4) & 1;
6063 seg_not_present
= (ldt_info
.flags
>> 5) & 1;
6064 useable
= (ldt_info
.flags
>> 6) & 1;
6068 lm
= (ldt_info
.flags
>> 7) & 1;
6071 if (contents
== 3) {
6072 if (seg_not_present
== 0)
6073 return -TARGET_EINVAL
;
6076 /* NOTE: same code as Linux kernel */
6077 /* Allow LDTs to be cleared by the user. */
6078 if (ldt_info
.base_addr
== 0 && ldt_info
.limit
== 0) {
6079 if ((contents
== 0 &&
6080 read_exec_only
== 1 &&
6082 limit_in_pages
== 0 &&
6083 seg_not_present
== 1 &&
6091 entry_1
= ((ldt_info
.base_addr
& 0x0000ffff) << 16) |
6092 (ldt_info
.limit
& 0x0ffff);
6093 entry_2
= (ldt_info
.base_addr
& 0xff000000) |
6094 ((ldt_info
.base_addr
& 0x00ff0000) >> 16) |
6095 (ldt_info
.limit
& 0xf0000) |
6096 ((read_exec_only
^ 1) << 9) |
6098 ((seg_not_present
^ 1) << 15) |
6100 (limit_in_pages
<< 23) |
6105 /* Install the new entry ... */
6107 lp
= (uint32_t *)(gdt_table
+ ldt_info
.entry_number
);
6108 lp
[0] = tswap32(entry_1
);
6109 lp
[1] = tswap32(entry_2
);
6113 static abi_long
do_get_thread_area(CPUX86State
*env
, abi_ulong ptr
)
6115 struct target_modify_ldt_ldt_s
*target_ldt_info
;
6116 uint64_t *gdt_table
= g2h(env
->gdt
.base
);
6117 uint32_t base_addr
, limit
, flags
;
6118 int seg_32bit
, contents
, read_exec_only
, limit_in_pages
, idx
;
6119 int seg_not_present
, useable
, lm
;
6120 uint32_t *lp
, entry_1
, entry_2
;
6122 lock_user_struct(VERIFY_WRITE
, target_ldt_info
, ptr
, 1);
6123 if (!target_ldt_info
)
6124 return -TARGET_EFAULT
;
6125 idx
= tswap32(target_ldt_info
->entry_number
);
6126 if (idx
< TARGET_GDT_ENTRY_TLS_MIN
||
6127 idx
> TARGET_GDT_ENTRY_TLS_MAX
) {
6128 unlock_user_struct(target_ldt_info
, ptr
, 1);
6129 return -TARGET_EINVAL
;
6131 lp
= (uint32_t *)(gdt_table
+ idx
);
6132 entry_1
= tswap32(lp
[0]);
6133 entry_2
= tswap32(lp
[1]);
6135 read_exec_only
= ((entry_2
>> 9) & 1) ^ 1;
6136 contents
= (entry_2
>> 10) & 3;
6137 seg_not_present
= ((entry_2
>> 15) & 1) ^ 1;
6138 seg_32bit
= (entry_2
>> 22) & 1;
6139 limit_in_pages
= (entry_2
>> 23) & 1;
6140 useable
= (entry_2
>> 20) & 1;
6144 lm
= (entry_2
>> 21) & 1;
6146 flags
= (seg_32bit
<< 0) | (contents
<< 1) |
6147 (read_exec_only
<< 3) | (limit_in_pages
<< 4) |
6148 (seg_not_present
<< 5) | (useable
<< 6) | (lm
<< 7);
6149 limit
= (entry_1
& 0xffff) | (entry_2
& 0xf0000);
6150 base_addr
= (entry_1
>> 16) |
6151 (entry_2
& 0xff000000) |
6152 ((entry_2
& 0xff) << 16);
6153 target_ldt_info
->base_addr
= tswapal(base_addr
);
6154 target_ldt_info
->limit
= tswap32(limit
);
6155 target_ldt_info
->flags
= tswap32(flags
);
6156 unlock_user_struct(target_ldt_info
, ptr
, 1);
6159 #endif /* TARGET_I386 && TARGET_ABI32 */
6161 #ifndef TARGET_ABI32
6162 abi_long
do_arch_prctl(CPUX86State
*env
, int code
, abi_ulong addr
)
6169 case TARGET_ARCH_SET_GS
:
6170 case TARGET_ARCH_SET_FS
:
6171 if (code
== TARGET_ARCH_SET_GS
)
6175 cpu_x86_load_seg(env
, idx
, 0);
6176 env
->segs
[idx
].base
= addr
;
6178 case TARGET_ARCH_GET_GS
:
6179 case TARGET_ARCH_GET_FS
:
6180 if (code
== TARGET_ARCH_GET_GS
)
6184 val
= env
->segs
[idx
].base
;
6185 if (put_user(val
, addr
, abi_ulong
))
6186 ret
= -TARGET_EFAULT
;
6189 ret
= -TARGET_EINVAL
;
6196 #endif /* defined(TARGET_I386) */
6198 #define NEW_STACK_SIZE 0x40000
6201 static pthread_mutex_t clone_lock
= PTHREAD_MUTEX_INITIALIZER
;
6204 pthread_mutex_t mutex
;
6205 pthread_cond_t cond
;
6208 abi_ulong child_tidptr
;
6209 abi_ulong parent_tidptr
;
6213 static void *clone_func(void *arg
)
6215 new_thread_info
*info
= arg
;
6220 rcu_register_thread();
6221 tcg_register_thread();
6223 cpu
= ENV_GET_CPU(env
);
6225 ts
= (TaskState
*)cpu
->opaque
;
6226 info
->tid
= gettid();
6228 if (info
->child_tidptr
)
6229 put_user_u32(info
->tid
, info
->child_tidptr
);
6230 if (info
->parent_tidptr
)
6231 put_user_u32(info
->tid
, info
->parent_tidptr
);
6232 /* Enable signals. */
6233 sigprocmask(SIG_SETMASK
, &info
->sigmask
, NULL
);
6234 /* Signal to the parent that we're ready. */
6235 pthread_mutex_lock(&info
->mutex
);
6236 pthread_cond_broadcast(&info
->cond
);
6237 pthread_mutex_unlock(&info
->mutex
);
6238 /* Wait until the parent has finshed initializing the tls state. */
6239 pthread_mutex_lock(&clone_lock
);
6240 pthread_mutex_unlock(&clone_lock
);
6246 /* do_fork() Must return host values and target errnos (unlike most
6247 do_*() functions). */
6248 static int do_fork(CPUArchState
*env
, unsigned int flags
, abi_ulong newsp
,
6249 abi_ulong parent_tidptr
, target_ulong newtls
,
6250 abi_ulong child_tidptr
)
6252 CPUState
*cpu
= ENV_GET_CPU(env
);
6256 CPUArchState
*new_env
;
6259 flags
&= ~CLONE_IGNORED_FLAGS
;
6261 /* Emulate vfork() with fork() */
6262 if (flags
& CLONE_VFORK
)
6263 flags
&= ~(CLONE_VFORK
| CLONE_VM
);
6265 if (flags
& CLONE_VM
) {
6266 TaskState
*parent_ts
= (TaskState
*)cpu
->opaque
;
6267 new_thread_info info
;
6268 pthread_attr_t attr
;
6270 if (((flags
& CLONE_THREAD_FLAGS
) != CLONE_THREAD_FLAGS
) ||
6271 (flags
& CLONE_INVALID_THREAD_FLAGS
)) {
6272 return -TARGET_EINVAL
;
6275 ts
= g_new0(TaskState
, 1);
6276 init_task_state(ts
);
6277 /* we create a new CPU instance. */
6278 new_env
= cpu_copy(env
);
6279 /* Init regs that differ from the parent. */
6280 cpu_clone_regs(new_env
, newsp
);
6281 new_cpu
= ENV_GET_CPU(new_env
);
6282 new_cpu
->opaque
= ts
;
6283 ts
->bprm
= parent_ts
->bprm
;
6284 ts
->info
= parent_ts
->info
;
6285 ts
->signal_mask
= parent_ts
->signal_mask
;
6287 if (flags
& CLONE_CHILD_CLEARTID
) {
6288 ts
->child_tidptr
= child_tidptr
;
6291 if (flags
& CLONE_SETTLS
) {
6292 cpu_set_tls (new_env
, newtls
);
6295 /* Grab a mutex so that thread setup appears atomic. */
6296 pthread_mutex_lock(&clone_lock
);
6298 memset(&info
, 0, sizeof(info
));
6299 pthread_mutex_init(&info
.mutex
, NULL
);
6300 pthread_mutex_lock(&info
.mutex
);
6301 pthread_cond_init(&info
.cond
, NULL
);
6303 if (flags
& CLONE_CHILD_SETTID
) {
6304 info
.child_tidptr
= child_tidptr
;
6306 if (flags
& CLONE_PARENT_SETTID
) {
6307 info
.parent_tidptr
= parent_tidptr
;
6310 ret
= pthread_attr_init(&attr
);
6311 ret
= pthread_attr_setstacksize(&attr
, NEW_STACK_SIZE
);
6312 ret
= pthread_attr_setdetachstate(&attr
, PTHREAD_CREATE_DETACHED
);
6313 /* It is not safe to deliver signals until the child has finished
6314 initializing, so temporarily block all signals. */
6315 sigfillset(&sigmask
);
6316 sigprocmask(SIG_BLOCK
, &sigmask
, &info
.sigmask
);
6318 /* If this is our first additional thread, we need to ensure we
6319 * generate code for parallel execution and flush old translations.
6321 if (!parallel_cpus
) {
6322 parallel_cpus
= true;
6326 ret
= pthread_create(&info
.thread
, &attr
, clone_func
, &info
);
6327 /* TODO: Free new CPU state if thread creation failed. */
6329 sigprocmask(SIG_SETMASK
, &info
.sigmask
, NULL
);
6330 pthread_attr_destroy(&attr
);
6332 /* Wait for the child to initialize. */
6333 pthread_cond_wait(&info
.cond
, &info
.mutex
);
6338 pthread_mutex_unlock(&info
.mutex
);
6339 pthread_cond_destroy(&info
.cond
);
6340 pthread_mutex_destroy(&info
.mutex
);
6341 pthread_mutex_unlock(&clone_lock
);
6343 /* if no CLONE_VM, we consider it is a fork */
6344 if (flags
& CLONE_INVALID_FORK_FLAGS
) {
6345 return -TARGET_EINVAL
;
6348 /* We can't support custom termination signals */
6349 if ((flags
& CSIGNAL
) != TARGET_SIGCHLD
) {
6350 return -TARGET_EINVAL
;
6353 if (block_signals()) {
6354 return -TARGET_ERESTARTSYS
;
6360 /* Child Process. */
6361 cpu_clone_regs(env
, newsp
);
6363 /* There is a race condition here. The parent process could
6364 theoretically read the TID in the child process before the child
6365 tid is set. This would require using either ptrace
6366 (not implemented) or having *_tidptr to point at a shared memory
6367 mapping. We can't repeat the spinlock hack used above because
6368 the child process gets its own copy of the lock. */
6369 if (flags
& CLONE_CHILD_SETTID
)
6370 put_user_u32(gettid(), child_tidptr
);
6371 if (flags
& CLONE_PARENT_SETTID
)
6372 put_user_u32(gettid(), parent_tidptr
);
6373 ts
= (TaskState
*)cpu
->opaque
;
6374 if (flags
& CLONE_SETTLS
)
6375 cpu_set_tls (env
, newtls
);
6376 if (flags
& CLONE_CHILD_CLEARTID
)
6377 ts
->child_tidptr
= child_tidptr
;
6385 /* warning : doesn't handle linux specific flags... */
6386 static int target_to_host_fcntl_cmd(int cmd
)
6389 case TARGET_F_DUPFD
:
6390 case TARGET_F_GETFD
:
6391 case TARGET_F_SETFD
:
6392 case TARGET_F_GETFL
:
6393 case TARGET_F_SETFL
:
6395 case TARGET_F_GETLK
:
6397 case TARGET_F_SETLK
:
6399 case TARGET_F_SETLKW
:
6401 case TARGET_F_GETOWN
:
6403 case TARGET_F_SETOWN
:
6405 case TARGET_F_GETSIG
:
6407 case TARGET_F_SETSIG
:
6409 #if TARGET_ABI_BITS == 32
6410 case TARGET_F_GETLK64
:
6412 case TARGET_F_SETLK64
:
6414 case TARGET_F_SETLKW64
:
6417 case TARGET_F_SETLEASE
:
6419 case TARGET_F_GETLEASE
:
6421 #ifdef F_DUPFD_CLOEXEC
6422 case TARGET_F_DUPFD_CLOEXEC
:
6423 return F_DUPFD_CLOEXEC
;
6425 case TARGET_F_NOTIFY
:
6428 case TARGET_F_GETOWN_EX
:
6432 case TARGET_F_SETOWN_EX
:
6436 case TARGET_F_SETPIPE_SZ
:
6437 return F_SETPIPE_SZ
;
6438 case TARGET_F_GETPIPE_SZ
:
6439 return F_GETPIPE_SZ
;
6442 return -TARGET_EINVAL
;
6444 return -TARGET_EINVAL
;
6447 #define TRANSTBL_CONVERT(a) { -1, TARGET_##a, -1, a }
6448 static const bitmask_transtbl flock_tbl
[] = {
6449 TRANSTBL_CONVERT(F_RDLCK
),
6450 TRANSTBL_CONVERT(F_WRLCK
),
6451 TRANSTBL_CONVERT(F_UNLCK
),
6452 TRANSTBL_CONVERT(F_EXLCK
),
6453 TRANSTBL_CONVERT(F_SHLCK
),
6457 static inline abi_long
copy_from_user_flock(struct flock64
*fl
,
6458 abi_ulong target_flock_addr
)
6460 struct target_flock
*target_fl
;
6463 if (!lock_user_struct(VERIFY_READ
, target_fl
, target_flock_addr
, 1)) {
6464 return -TARGET_EFAULT
;
6467 __get_user(l_type
, &target_fl
->l_type
);
6468 fl
->l_type
= target_to_host_bitmask(l_type
, flock_tbl
);
6469 __get_user(fl
->l_whence
, &target_fl
->l_whence
);
6470 __get_user(fl
->l_start
, &target_fl
->l_start
);
6471 __get_user(fl
->l_len
, &target_fl
->l_len
);
6472 __get_user(fl
->l_pid
, &target_fl
->l_pid
);
6473 unlock_user_struct(target_fl
, target_flock_addr
, 0);
6477 static inline abi_long
copy_to_user_flock(abi_ulong target_flock_addr
,
6478 const struct flock64
*fl
)
6480 struct target_flock
*target_fl
;
6483 if (!lock_user_struct(VERIFY_WRITE
, target_fl
, target_flock_addr
, 0)) {
6484 return -TARGET_EFAULT
;
6487 l_type
= host_to_target_bitmask(fl
->l_type
, flock_tbl
);
6488 __put_user(l_type
, &target_fl
->l_type
);
6489 __put_user(fl
->l_whence
, &target_fl
->l_whence
);
6490 __put_user(fl
->l_start
, &target_fl
->l_start
);
6491 __put_user(fl
->l_len
, &target_fl
->l_len
);
6492 __put_user(fl
->l_pid
, &target_fl
->l_pid
);
6493 unlock_user_struct(target_fl
, target_flock_addr
, 1);
6497 typedef abi_long
from_flock64_fn(struct flock64
*fl
, abi_ulong target_addr
);
6498 typedef abi_long
to_flock64_fn(abi_ulong target_addr
, const struct flock64
*fl
);
6500 #if defined(TARGET_ARM) && TARGET_ABI_BITS == 32
6501 static inline abi_long
copy_from_user_eabi_flock64(struct flock64
*fl
,
6502 abi_ulong target_flock_addr
)
6504 struct target_eabi_flock64
*target_fl
;
6507 if (!lock_user_struct(VERIFY_READ
, target_fl
, target_flock_addr
, 1)) {
6508 return -TARGET_EFAULT
;
6511 __get_user(l_type
, &target_fl
->l_type
);
6512 fl
->l_type
= target_to_host_bitmask(l_type
, flock_tbl
);
6513 __get_user(fl
->l_whence
, &target_fl
->l_whence
);
6514 __get_user(fl
->l_start
, &target_fl
->l_start
);
6515 __get_user(fl
->l_len
, &target_fl
->l_len
);
6516 __get_user(fl
->l_pid
, &target_fl
->l_pid
);
6517 unlock_user_struct(target_fl
, target_flock_addr
, 0);
6521 static inline abi_long
copy_to_user_eabi_flock64(abi_ulong target_flock_addr
,
6522 const struct flock64
*fl
)
6524 struct target_eabi_flock64
*target_fl
;
6527 if (!lock_user_struct(VERIFY_WRITE
, target_fl
, target_flock_addr
, 0)) {
6528 return -TARGET_EFAULT
;
6531 l_type
= host_to_target_bitmask(fl
->l_type
, flock_tbl
);
6532 __put_user(l_type
, &target_fl
->l_type
);
6533 __put_user(fl
->l_whence
, &target_fl
->l_whence
);
6534 __put_user(fl
->l_start
, &target_fl
->l_start
);
6535 __put_user(fl
->l_len
, &target_fl
->l_len
);
6536 __put_user(fl
->l_pid
, &target_fl
->l_pid
);
6537 unlock_user_struct(target_fl
, target_flock_addr
, 1);
6542 static inline abi_long
copy_from_user_flock64(struct flock64
*fl
,
6543 abi_ulong target_flock_addr
)
6545 struct target_flock64
*target_fl
;
6548 if (!lock_user_struct(VERIFY_READ
, target_fl
, target_flock_addr
, 1)) {
6549 return -TARGET_EFAULT
;
6552 __get_user(l_type
, &target_fl
->l_type
);
6553 fl
->l_type
= target_to_host_bitmask(l_type
, flock_tbl
);
6554 __get_user(fl
->l_whence
, &target_fl
->l_whence
);
6555 __get_user(fl
->l_start
, &target_fl
->l_start
);
6556 __get_user(fl
->l_len
, &target_fl
->l_len
);
6557 __get_user(fl
->l_pid
, &target_fl
->l_pid
);
6558 unlock_user_struct(target_fl
, target_flock_addr
, 0);
6562 static inline abi_long
copy_to_user_flock64(abi_ulong target_flock_addr
,
6563 const struct flock64
*fl
)
6565 struct target_flock64
*target_fl
;
6568 if (!lock_user_struct(VERIFY_WRITE
, target_fl
, target_flock_addr
, 0)) {
6569 return -TARGET_EFAULT
;
6572 l_type
= host_to_target_bitmask(fl
->l_type
, flock_tbl
);
6573 __put_user(l_type
, &target_fl
->l_type
);
6574 __put_user(fl
->l_whence
, &target_fl
->l_whence
);
6575 __put_user(fl
->l_start
, &target_fl
->l_start
);
6576 __put_user(fl
->l_len
, &target_fl
->l_len
);
6577 __put_user(fl
->l_pid
, &target_fl
->l_pid
);
6578 unlock_user_struct(target_fl
, target_flock_addr
, 1);
6582 static abi_long
do_fcntl(int fd
, int cmd
, abi_ulong arg
)
6584 struct flock64 fl64
;
6586 struct f_owner_ex fox
;
6587 struct target_f_owner_ex
*target_fox
;
6590 int host_cmd
= target_to_host_fcntl_cmd(cmd
);
6592 if (host_cmd
== -TARGET_EINVAL
)
6596 case TARGET_F_GETLK
:
6597 ret
= copy_from_user_flock(&fl64
, arg
);
6601 ret
= get_errno(safe_fcntl(fd
, host_cmd
, &fl64
));
6603 ret
= copy_to_user_flock(arg
, &fl64
);
6607 case TARGET_F_SETLK
:
6608 case TARGET_F_SETLKW
:
6609 ret
= copy_from_user_flock(&fl64
, arg
);
6613 ret
= get_errno(safe_fcntl(fd
, host_cmd
, &fl64
));
6616 case TARGET_F_GETLK64
:
6617 ret
= copy_from_user_flock64(&fl64
, arg
);
6621 ret
= get_errno(safe_fcntl(fd
, host_cmd
, &fl64
));
6623 ret
= copy_to_user_flock64(arg
, &fl64
);
6626 case TARGET_F_SETLK64
:
6627 case TARGET_F_SETLKW64
:
6628 ret
= copy_from_user_flock64(&fl64
, arg
);
6632 ret
= get_errno(safe_fcntl(fd
, host_cmd
, &fl64
));
6635 case TARGET_F_GETFL
:
6636 ret
= get_errno(safe_fcntl(fd
, host_cmd
, arg
));
6638 ret
= host_to_target_bitmask(ret
, fcntl_flags_tbl
);
6642 case TARGET_F_SETFL
:
6643 ret
= get_errno(safe_fcntl(fd
, host_cmd
,
6644 target_to_host_bitmask(arg
,
6649 case TARGET_F_GETOWN_EX
:
6650 ret
= get_errno(safe_fcntl(fd
, host_cmd
, &fox
));
6652 if (!lock_user_struct(VERIFY_WRITE
, target_fox
, arg
, 0))
6653 return -TARGET_EFAULT
;
6654 target_fox
->type
= tswap32(fox
.type
);
6655 target_fox
->pid
= tswap32(fox
.pid
);
6656 unlock_user_struct(target_fox
, arg
, 1);
6662 case TARGET_F_SETOWN_EX
:
6663 if (!lock_user_struct(VERIFY_READ
, target_fox
, arg
, 1))
6664 return -TARGET_EFAULT
;
6665 fox
.type
= tswap32(target_fox
->type
);
6666 fox
.pid
= tswap32(target_fox
->pid
);
6667 unlock_user_struct(target_fox
, arg
, 0);
6668 ret
= get_errno(safe_fcntl(fd
, host_cmd
, &fox
));
6672 case TARGET_F_SETOWN
:
6673 case TARGET_F_GETOWN
:
6674 case TARGET_F_SETSIG
:
6675 case TARGET_F_GETSIG
:
6676 case TARGET_F_SETLEASE
:
6677 case TARGET_F_GETLEASE
:
6678 case TARGET_F_SETPIPE_SZ
:
6679 case TARGET_F_GETPIPE_SZ
:
6680 ret
= get_errno(safe_fcntl(fd
, host_cmd
, arg
));
6684 ret
= get_errno(safe_fcntl(fd
, cmd
, arg
));
6692 static inline int high2lowuid(int uid
)
6700 static inline int high2lowgid(int gid
)
6708 static inline int low2highuid(int uid
)
6710 if ((int16_t)uid
== -1)
6716 static inline int low2highgid(int gid
)
6718 if ((int16_t)gid
== -1)
6723 static inline int tswapid(int id
)
6728 #define put_user_id(x, gaddr) put_user_u16(x, gaddr)
6730 #else /* !USE_UID16 */
6731 static inline int high2lowuid(int uid
)
6735 static inline int high2lowgid(int gid
)
6739 static inline int low2highuid(int uid
)
6743 static inline int low2highgid(int gid
)
6747 static inline int tswapid(int id
)
6752 #define put_user_id(x, gaddr) put_user_u32(x, gaddr)
6754 #endif /* USE_UID16 */
6756 /* We must do direct syscalls for setting UID/GID, because we want to
6757 * implement the Linux system call semantics of "change only for this thread",
6758 * not the libc/POSIX semantics of "change for all threads in process".
6759 * (See http://ewontfix.com/17/ for more details.)
6760 * We use the 32-bit version of the syscalls if present; if it is not
6761 * then either the host architecture supports 32-bit UIDs natively with
6762 * the standard syscall, or the 16-bit UID is the best we can do.
6764 #ifdef __NR_setuid32
6765 #define __NR_sys_setuid __NR_setuid32
6767 #define __NR_sys_setuid __NR_setuid
6769 #ifdef __NR_setgid32
6770 #define __NR_sys_setgid __NR_setgid32
6772 #define __NR_sys_setgid __NR_setgid
6774 #ifdef __NR_setresuid32
6775 #define __NR_sys_setresuid __NR_setresuid32
6777 #define __NR_sys_setresuid __NR_setresuid
6779 #ifdef __NR_setresgid32
6780 #define __NR_sys_setresgid __NR_setresgid32
6782 #define __NR_sys_setresgid __NR_setresgid
6785 _syscall1(int, sys_setuid
, uid_t
, uid
)
6786 _syscall1(int, sys_setgid
, gid_t
, gid
)
6787 _syscall3(int, sys_setresuid
, uid_t
, ruid
, uid_t
, euid
, uid_t
, suid
)
6788 _syscall3(int, sys_setresgid
, gid_t
, rgid
, gid_t
, egid
, gid_t
, sgid
)
6790 void syscall_init(void)
6793 const argtype
*arg_type
;
6797 thunk_init(STRUCT_MAX
);
6799 #define STRUCT(name, ...) thunk_register_struct(STRUCT_ ## name, #name, struct_ ## name ## _def);
6800 #define STRUCT_SPECIAL(name) thunk_register_struct_direct(STRUCT_ ## name, #name, &struct_ ## name ## _def);
6801 #include "syscall_types.h"
6803 #undef STRUCT_SPECIAL
6805 /* Build target_to_host_errno_table[] table from
6806 * host_to_target_errno_table[]. */
6807 for (i
= 0; i
< ERRNO_TABLE_SIZE
; i
++) {
6808 target_to_host_errno_table
[host_to_target_errno_table
[i
]] = i
;
6811 /* we patch the ioctl size if necessary. We rely on the fact that
6812 no ioctl has all the bits at '1' in the size field */
6814 while (ie
->target_cmd
!= 0) {
6815 if (((ie
->target_cmd
>> TARGET_IOC_SIZESHIFT
) & TARGET_IOC_SIZEMASK
) ==
6816 TARGET_IOC_SIZEMASK
) {
6817 arg_type
= ie
->arg_type
;
6818 if (arg_type
[0] != TYPE_PTR
) {
6819 fprintf(stderr
, "cannot patch size for ioctl 0x%x\n",
6824 size
= thunk_type_size(arg_type
, 0);
6825 ie
->target_cmd
= (ie
->target_cmd
&
6826 ~(TARGET_IOC_SIZEMASK
<< TARGET_IOC_SIZESHIFT
)) |
6827 (size
<< TARGET_IOC_SIZESHIFT
);
6830 /* automatic consistency check if same arch */
6831 #if (defined(__i386__) && defined(TARGET_I386) && defined(TARGET_ABI32)) || \
6832 (defined(__x86_64__) && defined(TARGET_X86_64))
6833 if (unlikely(ie
->target_cmd
!= ie
->host_cmd
)) {
6834 fprintf(stderr
, "ERROR: ioctl(%s): target=0x%x host=0x%x\n",
6835 ie
->name
, ie
->target_cmd
, ie
->host_cmd
);
6842 #if TARGET_ABI_BITS == 32
6843 static inline uint64_t target_offset64(uint32_t word0
, uint32_t word1
)
6845 #ifdef TARGET_WORDS_BIGENDIAN
6846 return ((uint64_t)word0
<< 32) | word1
;
6848 return ((uint64_t)word1
<< 32) | word0
;
6851 #else /* TARGET_ABI_BITS == 32 */
6852 static inline uint64_t target_offset64(uint64_t word0
, uint64_t word1
)
6856 #endif /* TARGET_ABI_BITS != 32 */
6858 #ifdef TARGET_NR_truncate64
6859 static inline abi_long
target_truncate64(void *cpu_env
, const char *arg1
,
6864 if (regpairs_aligned(cpu_env
)) {
6868 return get_errno(truncate64(arg1
, target_offset64(arg2
, arg3
)));
6872 #ifdef TARGET_NR_ftruncate64
6873 static inline abi_long
target_ftruncate64(void *cpu_env
, abi_long arg1
,
6878 if (regpairs_aligned(cpu_env
)) {
6882 return get_errno(ftruncate64(arg1
, target_offset64(arg2
, arg3
)));
6886 static inline abi_long
target_to_host_timespec(struct timespec
*host_ts
,
6887 abi_ulong target_addr
)
6889 struct target_timespec
*target_ts
;
6891 if (!lock_user_struct(VERIFY_READ
, target_ts
, target_addr
, 1))
6892 return -TARGET_EFAULT
;
6893 __get_user(host_ts
->tv_sec
, &target_ts
->tv_sec
);
6894 __get_user(host_ts
->tv_nsec
, &target_ts
->tv_nsec
);
6895 unlock_user_struct(target_ts
, target_addr
, 0);
6899 static inline abi_long
host_to_target_timespec(abi_ulong target_addr
,
6900 struct timespec
*host_ts
)
6902 struct target_timespec
*target_ts
;
6904 if (!lock_user_struct(VERIFY_WRITE
, target_ts
, target_addr
, 0))
6905 return -TARGET_EFAULT
;
6906 __put_user(host_ts
->tv_sec
, &target_ts
->tv_sec
);
6907 __put_user(host_ts
->tv_nsec
, &target_ts
->tv_nsec
);
6908 unlock_user_struct(target_ts
, target_addr
, 1);
6912 static inline abi_long
target_to_host_itimerspec(struct itimerspec
*host_itspec
,
6913 abi_ulong target_addr
)
6915 struct target_itimerspec
*target_itspec
;
6917 if (!lock_user_struct(VERIFY_READ
, target_itspec
, target_addr
, 1)) {
6918 return -TARGET_EFAULT
;
6921 host_itspec
->it_interval
.tv_sec
=
6922 tswapal(target_itspec
->it_interval
.tv_sec
);
6923 host_itspec
->it_interval
.tv_nsec
=
6924 tswapal(target_itspec
->it_interval
.tv_nsec
);
6925 host_itspec
->it_value
.tv_sec
= tswapal(target_itspec
->it_value
.tv_sec
);
6926 host_itspec
->it_value
.tv_nsec
= tswapal(target_itspec
->it_value
.tv_nsec
);
6928 unlock_user_struct(target_itspec
, target_addr
, 1);
6932 static inline abi_long
host_to_target_itimerspec(abi_ulong target_addr
,
6933 struct itimerspec
*host_its
)
6935 struct target_itimerspec
*target_itspec
;
6937 if (!lock_user_struct(VERIFY_WRITE
, target_itspec
, target_addr
, 0)) {
6938 return -TARGET_EFAULT
;
6941 target_itspec
->it_interval
.tv_sec
= tswapal(host_its
->it_interval
.tv_sec
);
6942 target_itspec
->it_interval
.tv_nsec
= tswapal(host_its
->it_interval
.tv_nsec
);
6944 target_itspec
->it_value
.tv_sec
= tswapal(host_its
->it_value
.tv_sec
);
6945 target_itspec
->it_value
.tv_nsec
= tswapal(host_its
->it_value
.tv_nsec
);
6947 unlock_user_struct(target_itspec
, target_addr
, 0);
6951 static inline abi_long
target_to_host_timex(struct timex
*host_tx
,
6952 abi_long target_addr
)
6954 struct target_timex
*target_tx
;
6956 if (!lock_user_struct(VERIFY_READ
, target_tx
, target_addr
, 1)) {
6957 return -TARGET_EFAULT
;
6960 __get_user(host_tx
->modes
, &target_tx
->modes
);
6961 __get_user(host_tx
->offset
, &target_tx
->offset
);
6962 __get_user(host_tx
->freq
, &target_tx
->freq
);
6963 __get_user(host_tx
->maxerror
, &target_tx
->maxerror
);
6964 __get_user(host_tx
->esterror
, &target_tx
->esterror
);
6965 __get_user(host_tx
->status
, &target_tx
->status
);
6966 __get_user(host_tx
->constant
, &target_tx
->constant
);
6967 __get_user(host_tx
->precision
, &target_tx
->precision
);
6968 __get_user(host_tx
->tolerance
, &target_tx
->tolerance
);
6969 __get_user(host_tx
->time
.tv_sec
, &target_tx
->time
.tv_sec
);
6970 __get_user(host_tx
->time
.tv_usec
, &target_tx
->time
.tv_usec
);
6971 __get_user(host_tx
->tick
, &target_tx
->tick
);
6972 __get_user(host_tx
->ppsfreq
, &target_tx
->ppsfreq
);
6973 __get_user(host_tx
->jitter
, &target_tx
->jitter
);
6974 __get_user(host_tx
->shift
, &target_tx
->shift
);
6975 __get_user(host_tx
->stabil
, &target_tx
->stabil
);
6976 __get_user(host_tx
->jitcnt
, &target_tx
->jitcnt
);
6977 __get_user(host_tx
->calcnt
, &target_tx
->calcnt
);
6978 __get_user(host_tx
->errcnt
, &target_tx
->errcnt
);
6979 __get_user(host_tx
->stbcnt
, &target_tx
->stbcnt
);
6980 __get_user(host_tx
->tai
, &target_tx
->tai
);
6982 unlock_user_struct(target_tx
, target_addr
, 0);
6986 static inline abi_long
host_to_target_timex(abi_long target_addr
,
6987 struct timex
*host_tx
)
6989 struct target_timex
*target_tx
;
6991 if (!lock_user_struct(VERIFY_WRITE
, target_tx
, target_addr
, 0)) {
6992 return -TARGET_EFAULT
;
6995 __put_user(host_tx
->modes
, &target_tx
->modes
);
6996 __put_user(host_tx
->offset
, &target_tx
->offset
);
6997 __put_user(host_tx
->freq
, &target_tx
->freq
);
6998 __put_user(host_tx
->maxerror
, &target_tx
->maxerror
);
6999 __put_user(host_tx
->esterror
, &target_tx
->esterror
);
7000 __put_user(host_tx
->status
, &target_tx
->status
);
7001 __put_user(host_tx
->constant
, &target_tx
->constant
);
7002 __put_user(host_tx
->precision
, &target_tx
->precision
);
7003 __put_user(host_tx
->tolerance
, &target_tx
->tolerance
);
7004 __put_user(host_tx
->time
.tv_sec
, &target_tx
->time
.tv_sec
);
7005 __put_user(host_tx
->time
.tv_usec
, &target_tx
->time
.tv_usec
);
7006 __put_user(host_tx
->tick
, &target_tx
->tick
);
7007 __put_user(host_tx
->ppsfreq
, &target_tx
->ppsfreq
);
7008 __put_user(host_tx
->jitter
, &target_tx
->jitter
);
7009 __put_user(host_tx
->shift
, &target_tx
->shift
);
7010 __put_user(host_tx
->stabil
, &target_tx
->stabil
);
7011 __put_user(host_tx
->jitcnt
, &target_tx
->jitcnt
);
7012 __put_user(host_tx
->calcnt
, &target_tx
->calcnt
);
7013 __put_user(host_tx
->errcnt
, &target_tx
->errcnt
);
7014 __put_user(host_tx
->stbcnt
, &target_tx
->stbcnt
);
7015 __put_user(host_tx
->tai
, &target_tx
->tai
);
7017 unlock_user_struct(target_tx
, target_addr
, 1);
7022 static inline abi_long
target_to_host_sigevent(struct sigevent
*host_sevp
,
7023 abi_ulong target_addr
)
7025 struct target_sigevent
*target_sevp
;
7027 if (!lock_user_struct(VERIFY_READ
, target_sevp
, target_addr
, 1)) {
7028 return -TARGET_EFAULT
;
7031 /* This union is awkward on 64 bit systems because it has a 32 bit
7032 * integer and a pointer in it; we follow the conversion approach
7033 * used for handling sigval types in signal.c so the guest should get
7034 * the correct value back even if we did a 64 bit byteswap and it's
7035 * using the 32 bit integer.
7037 host_sevp
->sigev_value
.sival_ptr
=
7038 (void *)(uintptr_t)tswapal(target_sevp
->sigev_value
.sival_ptr
);
7039 host_sevp
->sigev_signo
=
7040 target_to_host_signal(tswap32(target_sevp
->sigev_signo
));
7041 host_sevp
->sigev_notify
= tswap32(target_sevp
->sigev_notify
);
7042 host_sevp
->_sigev_un
._tid
= tswap32(target_sevp
->_sigev_un
._tid
);
7044 unlock_user_struct(target_sevp
, target_addr
, 1);
7048 #if defined(TARGET_NR_mlockall)
7049 static inline int target_to_host_mlockall_arg(int arg
)
7053 if (arg
& TARGET_MLOCKALL_MCL_CURRENT
) {
7054 result
|= MCL_CURRENT
;
7056 if (arg
& TARGET_MLOCKALL_MCL_FUTURE
) {
7057 result
|= MCL_FUTURE
;
7063 static inline abi_long
host_to_target_stat64(void *cpu_env
,
7064 abi_ulong target_addr
,
7065 struct stat
*host_st
)
7067 #if defined(TARGET_ARM) && defined(TARGET_ABI32)
7068 if (((CPUARMState
*)cpu_env
)->eabi
) {
7069 struct target_eabi_stat64
*target_st
;
7071 if (!lock_user_struct(VERIFY_WRITE
, target_st
, target_addr
, 0))
7072 return -TARGET_EFAULT
;
7073 memset(target_st
, 0, sizeof(struct target_eabi_stat64
));
7074 __put_user(host_st
->st_dev
, &target_st
->st_dev
);
7075 __put_user(host_st
->st_ino
, &target_st
->st_ino
);
7076 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
7077 __put_user(host_st
->st_ino
, &target_st
->__st_ino
);
7079 __put_user(host_st
->st_mode
, &target_st
->st_mode
);
7080 __put_user(host_st
->st_nlink
, &target_st
->st_nlink
);
7081 __put_user(host_st
->st_uid
, &target_st
->st_uid
);
7082 __put_user(host_st
->st_gid
, &target_st
->st_gid
);
7083 __put_user(host_st
->st_rdev
, &target_st
->st_rdev
);
7084 __put_user(host_st
->st_size
, &target_st
->st_size
);
7085 __put_user(host_st
->st_blksize
, &target_st
->st_blksize
);
7086 __put_user(host_st
->st_blocks
, &target_st
->st_blocks
);
7087 __put_user(host_st
->st_atime
, &target_st
->target_st_atime
);
7088 __put_user(host_st
->st_mtime
, &target_st
->target_st_mtime
);
7089 __put_user(host_st
->st_ctime
, &target_st
->target_st_ctime
);
7090 unlock_user_struct(target_st
, target_addr
, 1);
7094 #if defined(TARGET_HAS_STRUCT_STAT64)
7095 struct target_stat64
*target_st
;
7097 struct target_stat
*target_st
;
7100 if (!lock_user_struct(VERIFY_WRITE
, target_st
, target_addr
, 0))
7101 return -TARGET_EFAULT
;
7102 memset(target_st
, 0, sizeof(*target_st
));
7103 __put_user(host_st
->st_dev
, &target_st
->st_dev
);
7104 __put_user(host_st
->st_ino
, &target_st
->st_ino
);
7105 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
7106 __put_user(host_st
->st_ino
, &target_st
->__st_ino
);
7108 __put_user(host_st
->st_mode
, &target_st
->st_mode
);
7109 __put_user(host_st
->st_nlink
, &target_st
->st_nlink
);
7110 __put_user(host_st
->st_uid
, &target_st
->st_uid
);
7111 __put_user(host_st
->st_gid
, &target_st
->st_gid
);
7112 __put_user(host_st
->st_rdev
, &target_st
->st_rdev
);
7113 /* XXX: better use of kernel struct */
7114 __put_user(host_st
->st_size
, &target_st
->st_size
);
7115 __put_user(host_st
->st_blksize
, &target_st
->st_blksize
);
7116 __put_user(host_st
->st_blocks
, &target_st
->st_blocks
);
7117 __put_user(host_st
->st_atime
, &target_st
->target_st_atime
);
7118 __put_user(host_st
->st_mtime
, &target_st
->target_st_mtime
);
7119 __put_user(host_st
->st_ctime
, &target_st
->target_st_ctime
);
7120 unlock_user_struct(target_st
, target_addr
, 1);
7126 /* ??? Using host futex calls even when target atomic operations
7127 are not really atomic probably breaks things. However implementing
7128 futexes locally would make futexes shared between multiple processes
7129 tricky. However they're probably useless because guest atomic
7130 operations won't work either. */
7131 static int do_futex(target_ulong uaddr
, int op
, int val
, target_ulong timeout
,
7132 target_ulong uaddr2
, int val3
)
7134 struct timespec ts
, *pts
;
7137 /* ??? We assume FUTEX_* constants are the same on both host
7139 #ifdef FUTEX_CMD_MASK
7140 base_op
= op
& FUTEX_CMD_MASK
;
7146 case FUTEX_WAIT_BITSET
:
7149 target_to_host_timespec(pts
, timeout
);
7153 return get_errno(safe_futex(g2h(uaddr
), op
, tswap32(val
),
7156 return get_errno(safe_futex(g2h(uaddr
), op
, val
, NULL
, NULL
, 0));
7158 return get_errno(safe_futex(g2h(uaddr
), op
, val
, NULL
, NULL
, 0));
7160 case FUTEX_CMP_REQUEUE
:
7162 /* For FUTEX_REQUEUE, FUTEX_CMP_REQUEUE, and FUTEX_WAKE_OP, the
7163 TIMEOUT parameter is interpreted as a uint32_t by the kernel.
7164 But the prototype takes a `struct timespec *'; insert casts
7165 to satisfy the compiler. We do not need to tswap TIMEOUT
7166 since it's not compared to guest memory. */
7167 pts
= (struct timespec
*)(uintptr_t) timeout
;
7168 return get_errno(safe_futex(g2h(uaddr
), op
, val
, pts
,
7170 (base_op
== FUTEX_CMP_REQUEUE
7174 return -TARGET_ENOSYS
;
7177 #if defined(TARGET_NR_name_to_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
7178 static abi_long
do_name_to_handle_at(abi_long dirfd
, abi_long pathname
,
7179 abi_long handle
, abi_long mount_id
,
7182 struct file_handle
*target_fh
;
7183 struct file_handle
*fh
;
7187 unsigned int size
, total_size
;
7189 if (get_user_s32(size
, handle
)) {
7190 return -TARGET_EFAULT
;
7193 name
= lock_user_string(pathname
);
7195 return -TARGET_EFAULT
;
7198 total_size
= sizeof(struct file_handle
) + size
;
7199 target_fh
= lock_user(VERIFY_WRITE
, handle
, total_size
, 0);
7201 unlock_user(name
, pathname
, 0);
7202 return -TARGET_EFAULT
;
7205 fh
= g_malloc0(total_size
);
7206 fh
->handle_bytes
= size
;
7208 ret
= get_errno(name_to_handle_at(dirfd
, path(name
), fh
, &mid
, flags
));
7209 unlock_user(name
, pathname
, 0);
7211 /* man name_to_handle_at(2):
7212 * Other than the use of the handle_bytes field, the caller should treat
7213 * the file_handle structure as an opaque data type
7216 memcpy(target_fh
, fh
, total_size
);
7217 target_fh
->handle_bytes
= tswap32(fh
->handle_bytes
);
7218 target_fh
->handle_type
= tswap32(fh
->handle_type
);
7220 unlock_user(target_fh
, handle
, total_size
);
7222 if (put_user_s32(mid
, mount_id
)) {
7223 return -TARGET_EFAULT
;
7231 #if defined(TARGET_NR_open_by_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
7232 static abi_long
do_open_by_handle_at(abi_long mount_fd
, abi_long handle
,
7235 struct file_handle
*target_fh
;
7236 struct file_handle
*fh
;
7237 unsigned int size
, total_size
;
7240 if (get_user_s32(size
, handle
)) {
7241 return -TARGET_EFAULT
;
7244 total_size
= sizeof(struct file_handle
) + size
;
7245 target_fh
= lock_user(VERIFY_READ
, handle
, total_size
, 1);
7247 return -TARGET_EFAULT
;
7250 fh
= g_memdup(target_fh
, total_size
);
7251 fh
->handle_bytes
= size
;
7252 fh
->handle_type
= tswap32(target_fh
->handle_type
);
7254 ret
= get_errno(open_by_handle_at(mount_fd
, fh
,
7255 target_to_host_bitmask(flags
, fcntl_flags_tbl
)));
7259 unlock_user(target_fh
, handle
, total_size
);
7265 #if defined(TARGET_NR_signalfd) || defined(TARGET_NR_signalfd4)
7267 /* signalfd siginfo conversion */
7270 host_to_target_signalfd_siginfo(struct signalfd_siginfo
*tinfo
,
7271 const struct signalfd_siginfo
*info
)
7273 int sig
= host_to_target_signal(info
->ssi_signo
);
7275 /* linux/signalfd.h defines a ssi_addr_lsb
7276 * not defined in sys/signalfd.h but used by some kernels
7279 #ifdef BUS_MCEERR_AO
7280 if (tinfo
->ssi_signo
== SIGBUS
&&
7281 (tinfo
->ssi_code
== BUS_MCEERR_AR
||
7282 tinfo
->ssi_code
== BUS_MCEERR_AO
)) {
7283 uint16_t *ssi_addr_lsb
= (uint16_t *)(&info
->ssi_addr
+ 1);
7284 uint16_t *tssi_addr_lsb
= (uint16_t *)(&tinfo
->ssi_addr
+ 1);
7285 *tssi_addr_lsb
= tswap16(*ssi_addr_lsb
);
7289 tinfo
->ssi_signo
= tswap32(sig
);
7290 tinfo
->ssi_errno
= tswap32(tinfo
->ssi_errno
);
7291 tinfo
->ssi_code
= tswap32(info
->ssi_code
);
7292 tinfo
->ssi_pid
= tswap32(info
->ssi_pid
);
7293 tinfo
->ssi_uid
= tswap32(info
->ssi_uid
);
7294 tinfo
->ssi_fd
= tswap32(info
->ssi_fd
);
7295 tinfo
->ssi_tid
= tswap32(info
->ssi_tid
);
7296 tinfo
->ssi_band
= tswap32(info
->ssi_band
);
7297 tinfo
->ssi_overrun
= tswap32(info
->ssi_overrun
);
7298 tinfo
->ssi_trapno
= tswap32(info
->ssi_trapno
);
7299 tinfo
->ssi_status
= tswap32(info
->ssi_status
);
7300 tinfo
->ssi_int
= tswap32(info
->ssi_int
);
7301 tinfo
->ssi_ptr
= tswap64(info
->ssi_ptr
);
7302 tinfo
->ssi_utime
= tswap64(info
->ssi_utime
);
7303 tinfo
->ssi_stime
= tswap64(info
->ssi_stime
);
7304 tinfo
->ssi_addr
= tswap64(info
->ssi_addr
);
7307 static abi_long
host_to_target_data_signalfd(void *buf
, size_t len
)
7311 for (i
= 0; i
< len
; i
+= sizeof(struct signalfd_siginfo
)) {
7312 host_to_target_signalfd_siginfo(buf
+ i
, buf
+ i
);
7318 static TargetFdTrans target_signalfd_trans
= {
7319 .host_to_target_data
= host_to_target_data_signalfd
,
7322 static abi_long
do_signalfd4(int fd
, abi_long mask
, int flags
)
7325 target_sigset_t
*target_mask
;
7329 if (flags
& ~(TARGET_O_NONBLOCK
| TARGET_O_CLOEXEC
)) {
7330 return -TARGET_EINVAL
;
7332 if (!lock_user_struct(VERIFY_READ
, target_mask
, mask
, 1)) {
7333 return -TARGET_EFAULT
;
7336 target_to_host_sigset(&host_mask
, target_mask
);
7338 host_flags
= target_to_host_bitmask(flags
, fcntl_flags_tbl
);
7340 ret
= get_errno(signalfd(fd
, &host_mask
, host_flags
));
7342 fd_trans_register(ret
, &target_signalfd_trans
);
7345 unlock_user_struct(target_mask
, mask
, 0);
7351 /* Map host to target signal numbers for the wait family of syscalls.
7352 Assume all other status bits are the same. */
7353 int host_to_target_waitstatus(int status
)
7355 if (WIFSIGNALED(status
)) {
7356 return host_to_target_signal(WTERMSIG(status
)) | (status
& ~0x7f);
7358 if (WIFSTOPPED(status
)) {
7359 return (host_to_target_signal(WSTOPSIG(status
)) << 8)
7365 static int open_self_cmdline(void *cpu_env
, int fd
)
7367 CPUState
*cpu
= ENV_GET_CPU((CPUArchState
*)cpu_env
);
7368 struct linux_binprm
*bprm
= ((TaskState
*)cpu
->opaque
)->bprm
;
7371 for (i
= 0; i
< bprm
->argc
; i
++) {
7372 size_t len
= strlen(bprm
->argv
[i
]) + 1;
7374 if (write(fd
, bprm
->argv
[i
], len
) != len
) {
7382 static int open_self_maps(void *cpu_env
, int fd
)
7384 CPUState
*cpu
= ENV_GET_CPU((CPUArchState
*)cpu_env
);
7385 TaskState
*ts
= cpu
->opaque
;
7391 fp
= fopen("/proc/self/maps", "r");
7396 while ((read
= getline(&line
, &len
, fp
)) != -1) {
7397 int fields
, dev_maj
, dev_min
, inode
;
7398 uint64_t min
, max
, offset
;
7399 char flag_r
, flag_w
, flag_x
, flag_p
;
7400 char path
[512] = "";
7401 fields
= sscanf(line
, "%"PRIx64
"-%"PRIx64
" %c%c%c%c %"PRIx64
" %x:%x %d"
7402 " %512s", &min
, &max
, &flag_r
, &flag_w
, &flag_x
,
7403 &flag_p
, &offset
, &dev_maj
, &dev_min
, &inode
, path
);
7405 if ((fields
< 10) || (fields
> 11)) {
7408 if (h2g_valid(min
)) {
7409 int flags
= page_get_flags(h2g(min
));
7410 max
= h2g_valid(max
- 1) ? max
: (uintptr_t)g2h(GUEST_ADDR_MAX
);
7411 if (page_check_range(h2g(min
), max
- min
, flags
) == -1) {
7414 if (h2g(min
) == ts
->info
->stack_limit
) {
7415 pstrcpy(path
, sizeof(path
), " [stack]");
7417 dprintf(fd
, TARGET_ABI_FMT_lx
"-" TARGET_ABI_FMT_lx
7418 " %c%c%c%c %08" PRIx64
" %02x:%02x %d %s%s\n",
7419 h2g(min
), h2g(max
- 1) + 1, flag_r
, flag_w
,
7420 flag_x
, flag_p
, offset
, dev_maj
, dev_min
, inode
,
7421 path
[0] ? " " : "", path
);
7431 static int open_self_stat(void *cpu_env
, int fd
)
7433 CPUState
*cpu
= ENV_GET_CPU((CPUArchState
*)cpu_env
);
7434 TaskState
*ts
= cpu
->opaque
;
7435 abi_ulong start_stack
= ts
->info
->start_stack
;
7438 for (i
= 0; i
< 44; i
++) {
7446 snprintf(buf
, sizeof(buf
), "%"PRId64
" ", val
);
7447 } else if (i
== 1) {
7449 snprintf(buf
, sizeof(buf
), "(%s) ", ts
->bprm
->argv
[0]);
7450 } else if (i
== 27) {
7453 snprintf(buf
, sizeof(buf
), "%"PRId64
" ", val
);
7455 /* for the rest, there is MasterCard */
7456 snprintf(buf
, sizeof(buf
), "0%c", i
== 43 ? '\n' : ' ');
7460 if (write(fd
, buf
, len
) != len
) {
7468 static int open_self_auxv(void *cpu_env
, int fd
)
7470 CPUState
*cpu
= ENV_GET_CPU((CPUArchState
*)cpu_env
);
7471 TaskState
*ts
= cpu
->opaque
;
7472 abi_ulong auxv
= ts
->info
->saved_auxv
;
7473 abi_ulong len
= ts
->info
->auxv_len
;
7477 * Auxiliary vector is stored in target process stack.
7478 * read in whole auxv vector and copy it to file
7480 ptr
= lock_user(VERIFY_READ
, auxv
, len
, 0);
7484 r
= write(fd
, ptr
, len
);
7491 lseek(fd
, 0, SEEK_SET
);
7492 unlock_user(ptr
, auxv
, len
);
7498 static int is_proc_myself(const char *filename
, const char *entry
)
7500 if (!strncmp(filename
, "/proc/", strlen("/proc/"))) {
7501 filename
+= strlen("/proc/");
7502 if (!strncmp(filename
, "self/", strlen("self/"))) {
7503 filename
+= strlen("self/");
7504 } else if (*filename
>= '1' && *filename
<= '9') {
7506 snprintf(myself
, sizeof(myself
), "%d/", getpid());
7507 if (!strncmp(filename
, myself
, strlen(myself
))) {
7508 filename
+= strlen(myself
);
7515 if (!strcmp(filename
, entry
)) {
7522 #if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN)
7523 static int is_proc(const char *filename
, const char *entry
)
7525 return strcmp(filename
, entry
) == 0;
7528 static int open_net_route(void *cpu_env
, int fd
)
7535 fp
= fopen("/proc/net/route", "r");
7542 read
= getline(&line
, &len
, fp
);
7543 dprintf(fd
, "%s", line
);
7547 while ((read
= getline(&line
, &len
, fp
)) != -1) {
7549 uint32_t dest
, gw
, mask
;
7550 unsigned int flags
, refcnt
, use
, metric
, mtu
, window
, irtt
;
7551 sscanf(line
, "%s\t%08x\t%08x\t%04x\t%d\t%d\t%d\t%08x\t%d\t%u\t%u\n",
7552 iface
, &dest
, &gw
, &flags
, &refcnt
, &use
, &metric
,
7553 &mask
, &mtu
, &window
, &irtt
);
7554 dprintf(fd
, "%s\t%08x\t%08x\t%04x\t%d\t%d\t%d\t%08x\t%d\t%u\t%u\n",
7555 iface
, tswap32(dest
), tswap32(gw
), flags
, refcnt
, use
,
7556 metric
, tswap32(mask
), mtu
, window
, irtt
);
7566 static int do_openat(void *cpu_env
, int dirfd
, const char *pathname
, int flags
, mode_t mode
)
7569 const char *filename
;
7570 int (*fill
)(void *cpu_env
, int fd
);
7571 int (*cmp
)(const char *s1
, const char *s2
);
7573 const struct fake_open
*fake_open
;
7574 static const struct fake_open fakes
[] = {
7575 { "maps", open_self_maps
, is_proc_myself
},
7576 { "stat", open_self_stat
, is_proc_myself
},
7577 { "auxv", open_self_auxv
, is_proc_myself
},
7578 { "cmdline", open_self_cmdline
, is_proc_myself
},
7579 #if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN)
7580 { "/proc/net/route", open_net_route
, is_proc
},
7582 { NULL
, NULL
, NULL
}
7585 if (is_proc_myself(pathname
, "exe")) {
7586 int execfd
= qemu_getauxval(AT_EXECFD
);
7587 return execfd
? execfd
: safe_openat(dirfd
, exec_path
, flags
, mode
);
7590 for (fake_open
= fakes
; fake_open
->filename
; fake_open
++) {
7591 if (fake_open
->cmp(pathname
, fake_open
->filename
)) {
7596 if (fake_open
->filename
) {
7598 char filename
[PATH_MAX
];
7601 /* create temporary file to map stat to */
7602 tmpdir
= getenv("TMPDIR");
7605 snprintf(filename
, sizeof(filename
), "%s/qemu-open.XXXXXX", tmpdir
);
7606 fd
= mkstemp(filename
);
7612 if ((r
= fake_open
->fill(cpu_env
, fd
))) {
7618 lseek(fd
, 0, SEEK_SET
);
7623 return safe_openat(dirfd
, path(pathname
), flags
, mode
);
7626 #define TIMER_MAGIC 0x0caf0000
7627 #define TIMER_MAGIC_MASK 0xffff0000
7629 /* Convert QEMU provided timer ID back to internal 16bit index format */
7630 static target_timer_t
get_timer_id(abi_long arg
)
7632 target_timer_t timerid
= arg
;
7634 if ((timerid
& TIMER_MAGIC_MASK
) != TIMER_MAGIC
) {
7635 return -TARGET_EINVAL
;
7640 if (timerid
>= ARRAY_SIZE(g_posix_timers
)) {
7641 return -TARGET_EINVAL
;
7647 static abi_long
swap_data_eventfd(void *buf
, size_t len
)
7649 uint64_t *counter
= buf
;
7652 if (len
< sizeof(uint64_t)) {
7656 for (i
= 0; i
< len
; i
+= sizeof(uint64_t)) {
7657 *counter
= tswap64(*counter
);
7664 static TargetFdTrans target_eventfd_trans
= {
7665 .host_to_target_data
= swap_data_eventfd
,
7666 .target_to_host_data
= swap_data_eventfd
,
7669 #if (defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init)) || \
7670 (defined(CONFIG_INOTIFY1) && defined(TARGET_NR_inotify_init1) && \
7671 defined(__NR_inotify_init1))
7672 static abi_long
host_to_target_data_inotify(void *buf
, size_t len
)
7674 struct inotify_event
*ev
;
7678 for (i
= 0; i
< len
; i
+= sizeof(struct inotify_event
) + name_len
) {
7679 ev
= (struct inotify_event
*)((char *)buf
+ i
);
7682 ev
->wd
= tswap32(ev
->wd
);
7683 ev
->mask
= tswap32(ev
->mask
);
7684 ev
->cookie
= tswap32(ev
->cookie
);
7685 ev
->len
= tswap32(name_len
);
7691 static TargetFdTrans target_inotify_trans
= {
7692 .host_to_target_data
= host_to_target_data_inotify
,
7696 /* do_syscall() should always have a single exit point at the end so
7697 that actions, such as logging of syscall results, can be performed.
7698 All errnos that do_syscall() returns must be -TARGET_<errcode>. */
7699 abi_long
do_syscall(void *cpu_env
, int num
, abi_long arg1
,
7700 abi_long arg2
, abi_long arg3
, abi_long arg4
,
7701 abi_long arg5
, abi_long arg6
, abi_long arg7
,
7704 CPUState
*cpu
= ENV_GET_CPU(cpu_env
);
7710 #if defined(DEBUG_ERESTARTSYS)
7711 /* Debug-only code for exercising the syscall-restart code paths
7712 * in the per-architecture cpu main loops: restart every syscall
7713 * the guest makes once before letting it through.
7720 return -TARGET_ERESTARTSYS
;
7726 gemu_log("syscall %d", num
);
7728 trace_guest_user_syscall(cpu
, num
, arg1
, arg2
, arg3
, arg4
, arg5
, arg6
, arg7
, arg8
);
7730 print_syscall(num
, arg1
, arg2
, arg3
, arg4
, arg5
, arg6
);
7733 case TARGET_NR_exit
:
7734 /* In old applications this may be used to implement _exit(2).
7735 However in threaded applictions it is used for thread termination,
7736 and _exit_group is used for application termination.
7737 Do thread termination if we have more then one thread. */
7739 if (block_signals()) {
7740 ret
= -TARGET_ERESTARTSYS
;
7746 if (CPU_NEXT(first_cpu
)) {
7749 /* Remove the CPU from the list. */
7750 QTAILQ_REMOVE(&cpus
, cpu
, node
);
7755 if (ts
->child_tidptr
) {
7756 put_user_u32(0, ts
->child_tidptr
);
7757 sys_futex(g2h(ts
->child_tidptr
), FUTEX_WAKE
, INT_MAX
,
7761 object_unref(OBJECT(cpu
));
7763 rcu_unregister_thread();
7771 gdb_exit(cpu_env
, arg1
);
7773 ret
= 0; /* avoid warning */
7775 case TARGET_NR_read
:
7779 if (!(p
= lock_user(VERIFY_WRITE
, arg2
, arg3
, 0)))
7781 ret
= get_errno(safe_read(arg1
, p
, arg3
));
7783 fd_trans_host_to_target_data(arg1
)) {
7784 ret
= fd_trans_host_to_target_data(arg1
)(p
, ret
);
7786 unlock_user(p
, arg2
, ret
);
7789 case TARGET_NR_write
:
7790 if (!(p
= lock_user(VERIFY_READ
, arg2
, arg3
, 1)))
7792 if (fd_trans_target_to_host_data(arg1
)) {
7793 void *copy
= g_malloc(arg3
);
7794 memcpy(copy
, p
, arg3
);
7795 ret
= fd_trans_target_to_host_data(arg1
)(copy
, arg3
);
7797 ret
= get_errno(safe_write(arg1
, copy
, ret
));
7801 ret
= get_errno(safe_write(arg1
, p
, arg3
));
7803 unlock_user(p
, arg2
, 0);
7805 #ifdef TARGET_NR_open
7806 case TARGET_NR_open
:
7807 if (!(p
= lock_user_string(arg1
)))
7809 ret
= get_errno(do_openat(cpu_env
, AT_FDCWD
, p
,
7810 target_to_host_bitmask(arg2
, fcntl_flags_tbl
),
7812 fd_trans_unregister(ret
);
7813 unlock_user(p
, arg1
, 0);
7816 case TARGET_NR_openat
:
7817 if (!(p
= lock_user_string(arg2
)))
7819 ret
= get_errno(do_openat(cpu_env
, arg1
, p
,
7820 target_to_host_bitmask(arg3
, fcntl_flags_tbl
),
7822 fd_trans_unregister(ret
);
7823 unlock_user(p
, arg2
, 0);
7825 #if defined(TARGET_NR_name_to_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
7826 case TARGET_NR_name_to_handle_at
:
7827 ret
= do_name_to_handle_at(arg1
, arg2
, arg3
, arg4
, arg5
);
7830 #if defined(TARGET_NR_open_by_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
7831 case TARGET_NR_open_by_handle_at
:
7832 ret
= do_open_by_handle_at(arg1
, arg2
, arg3
);
7833 fd_trans_unregister(ret
);
7836 case TARGET_NR_close
:
7837 fd_trans_unregister(arg1
);
7838 ret
= get_errno(close(arg1
));
7843 #ifdef TARGET_NR_fork
7844 case TARGET_NR_fork
:
7845 ret
= get_errno(do_fork(cpu_env
, TARGET_SIGCHLD
, 0, 0, 0, 0));
7848 #ifdef TARGET_NR_waitpid
7849 case TARGET_NR_waitpid
:
7852 ret
= get_errno(safe_wait4(arg1
, &status
, arg3
, 0));
7853 if (!is_error(ret
) && arg2
&& ret
7854 && put_user_s32(host_to_target_waitstatus(status
), arg2
))
7859 #ifdef TARGET_NR_waitid
7860 case TARGET_NR_waitid
:
7864 ret
= get_errno(safe_waitid(arg1
, arg2
, &info
, arg4
, NULL
));
7865 if (!is_error(ret
) && arg3
&& info
.si_pid
!= 0) {
7866 if (!(p
= lock_user(VERIFY_WRITE
, arg3
, sizeof(target_siginfo_t
), 0)))
7868 host_to_target_siginfo(p
, &info
);
7869 unlock_user(p
, arg3
, sizeof(target_siginfo_t
));
7874 #ifdef TARGET_NR_creat /* not on alpha */
7875 case TARGET_NR_creat
:
7876 if (!(p
= lock_user_string(arg1
)))
7878 ret
= get_errno(creat(p
, arg2
));
7879 fd_trans_unregister(ret
);
7880 unlock_user(p
, arg1
, 0);
7883 #ifdef TARGET_NR_link
7884 case TARGET_NR_link
:
7887 p
= lock_user_string(arg1
);
7888 p2
= lock_user_string(arg2
);
7890 ret
= -TARGET_EFAULT
;
7892 ret
= get_errno(link(p
, p2
));
7893 unlock_user(p2
, arg2
, 0);
7894 unlock_user(p
, arg1
, 0);
7898 #if defined(TARGET_NR_linkat)
7899 case TARGET_NR_linkat
:
7904 p
= lock_user_string(arg2
);
7905 p2
= lock_user_string(arg4
);
7907 ret
= -TARGET_EFAULT
;
7909 ret
= get_errno(linkat(arg1
, p
, arg3
, p2
, arg5
));
7910 unlock_user(p
, arg2
, 0);
7911 unlock_user(p2
, arg4
, 0);
7915 #ifdef TARGET_NR_unlink
7916 case TARGET_NR_unlink
:
7917 if (!(p
= lock_user_string(arg1
)))
7919 ret
= get_errno(unlink(p
));
7920 unlock_user(p
, arg1
, 0);
7923 #if defined(TARGET_NR_unlinkat)
7924 case TARGET_NR_unlinkat
:
7925 if (!(p
= lock_user_string(arg2
)))
7927 ret
= get_errno(unlinkat(arg1
, p
, arg3
));
7928 unlock_user(p
, arg2
, 0);
7931 case TARGET_NR_execve
:
7933 char **argp
, **envp
;
7936 abi_ulong guest_argp
;
7937 abi_ulong guest_envp
;
7944 for (gp
= guest_argp
; gp
; gp
+= sizeof(abi_ulong
)) {
7945 if (get_user_ual(addr
, gp
))
7953 for (gp
= guest_envp
; gp
; gp
+= sizeof(abi_ulong
)) {
7954 if (get_user_ual(addr
, gp
))
7961 argp
= g_new0(char *, argc
+ 1);
7962 envp
= g_new0(char *, envc
+ 1);
7964 for (gp
= guest_argp
, q
= argp
; gp
;
7965 gp
+= sizeof(abi_ulong
), q
++) {
7966 if (get_user_ual(addr
, gp
))
7970 if (!(*q
= lock_user_string(addr
)))
7972 total_size
+= strlen(*q
) + 1;
7976 for (gp
= guest_envp
, q
= envp
; gp
;
7977 gp
+= sizeof(abi_ulong
), q
++) {
7978 if (get_user_ual(addr
, gp
))
7982 if (!(*q
= lock_user_string(addr
)))
7984 total_size
+= strlen(*q
) + 1;
7988 if (!(p
= lock_user_string(arg1
)))
7990 /* Although execve() is not an interruptible syscall it is
7991 * a special case where we must use the safe_syscall wrapper:
7992 * if we allow a signal to happen before we make the host
7993 * syscall then we will 'lose' it, because at the point of
7994 * execve the process leaves QEMU's control. So we use the
7995 * safe syscall wrapper to ensure that we either take the
7996 * signal as a guest signal, or else it does not happen
7997 * before the execve completes and makes it the other
7998 * program's problem.
8000 ret
= get_errno(safe_execve(p
, argp
, envp
));
8001 unlock_user(p
, arg1
, 0);
8006 ret
= -TARGET_EFAULT
;
8009 for (gp
= guest_argp
, q
= argp
; *q
;
8010 gp
+= sizeof(abi_ulong
), q
++) {
8011 if (get_user_ual(addr
, gp
)
8014 unlock_user(*q
, addr
, 0);
8016 for (gp
= guest_envp
, q
= envp
; *q
;
8017 gp
+= sizeof(abi_ulong
), q
++) {
8018 if (get_user_ual(addr
, gp
)
8021 unlock_user(*q
, addr
, 0);
8028 case TARGET_NR_chdir
:
8029 if (!(p
= lock_user_string(arg1
)))
8031 ret
= get_errno(chdir(p
));
8032 unlock_user(p
, arg1
, 0);
8034 #ifdef TARGET_NR_time
8035 case TARGET_NR_time
:
8038 ret
= get_errno(time(&host_time
));
8041 && put_user_sal(host_time
, arg1
))
8046 #ifdef TARGET_NR_mknod
8047 case TARGET_NR_mknod
:
8048 if (!(p
= lock_user_string(arg1
)))
8050 ret
= get_errno(mknod(p
, arg2
, arg3
));
8051 unlock_user(p
, arg1
, 0);
8054 #if defined(TARGET_NR_mknodat)
8055 case TARGET_NR_mknodat
:
8056 if (!(p
= lock_user_string(arg2
)))
8058 ret
= get_errno(mknodat(arg1
, p
, arg3
, arg4
));
8059 unlock_user(p
, arg2
, 0);
8062 #ifdef TARGET_NR_chmod
8063 case TARGET_NR_chmod
:
8064 if (!(p
= lock_user_string(arg1
)))
8066 ret
= get_errno(chmod(p
, arg2
));
8067 unlock_user(p
, arg1
, 0);
8070 #ifdef TARGET_NR_break
8071 case TARGET_NR_break
:
8074 #ifdef TARGET_NR_oldstat
8075 case TARGET_NR_oldstat
:
8078 case TARGET_NR_lseek
:
8079 ret
= get_errno(lseek(arg1
, arg2
, arg3
));
8081 #if defined(TARGET_NR_getxpid) && defined(TARGET_ALPHA)
8082 /* Alpha specific */
8083 case TARGET_NR_getxpid
:
8084 ((CPUAlphaState
*)cpu_env
)->ir
[IR_A4
] = getppid();
8085 ret
= get_errno(getpid());
8088 #ifdef TARGET_NR_getpid
8089 case TARGET_NR_getpid
:
8090 ret
= get_errno(getpid());
8093 case TARGET_NR_mount
:
8095 /* need to look at the data field */
8099 p
= lock_user_string(arg1
);
8107 p2
= lock_user_string(arg2
);
8110 unlock_user(p
, arg1
, 0);
8116 p3
= lock_user_string(arg3
);
8119 unlock_user(p
, arg1
, 0);
8121 unlock_user(p2
, arg2
, 0);
8128 /* FIXME - arg5 should be locked, but it isn't clear how to
8129 * do that since it's not guaranteed to be a NULL-terminated
8133 ret
= mount(p
, p2
, p3
, (unsigned long)arg4
, NULL
);
8135 ret
= mount(p
, p2
, p3
, (unsigned long)arg4
, g2h(arg5
));
8137 ret
= get_errno(ret
);
8140 unlock_user(p
, arg1
, 0);
8142 unlock_user(p2
, arg2
, 0);
8144 unlock_user(p3
, arg3
, 0);
8148 #ifdef TARGET_NR_umount
8149 case TARGET_NR_umount
:
8150 if (!(p
= lock_user_string(arg1
)))
8152 ret
= get_errno(umount(p
));
8153 unlock_user(p
, arg1
, 0);
8156 #ifdef TARGET_NR_stime /* not on alpha */
8157 case TARGET_NR_stime
:
8160 if (get_user_sal(host_time
, arg1
))
8162 ret
= get_errno(stime(&host_time
));
8166 case TARGET_NR_ptrace
:
8168 #ifdef TARGET_NR_alarm /* not on alpha */
8169 case TARGET_NR_alarm
:
8173 #ifdef TARGET_NR_oldfstat
8174 case TARGET_NR_oldfstat
:
8177 #ifdef TARGET_NR_pause /* not on alpha */
8178 case TARGET_NR_pause
:
8179 if (!block_signals()) {
8180 sigsuspend(&((TaskState
*)cpu
->opaque
)->signal_mask
);
8182 ret
= -TARGET_EINTR
;
8185 #ifdef TARGET_NR_utime
8186 case TARGET_NR_utime
:
8188 struct utimbuf tbuf
, *host_tbuf
;
8189 struct target_utimbuf
*target_tbuf
;
8191 if (!lock_user_struct(VERIFY_READ
, target_tbuf
, arg2
, 1))
8193 tbuf
.actime
= tswapal(target_tbuf
->actime
);
8194 tbuf
.modtime
= tswapal(target_tbuf
->modtime
);
8195 unlock_user_struct(target_tbuf
, arg2
, 0);
8200 if (!(p
= lock_user_string(arg1
)))
8202 ret
= get_errno(utime(p
, host_tbuf
));
8203 unlock_user(p
, arg1
, 0);
8207 #ifdef TARGET_NR_utimes
8208 case TARGET_NR_utimes
:
8210 struct timeval
*tvp
, tv
[2];
8212 if (copy_from_user_timeval(&tv
[0], arg2
)
8213 || copy_from_user_timeval(&tv
[1],
8214 arg2
+ sizeof(struct target_timeval
)))
8220 if (!(p
= lock_user_string(arg1
)))
8222 ret
= get_errno(utimes(p
, tvp
));
8223 unlock_user(p
, arg1
, 0);
8227 #if defined(TARGET_NR_futimesat)
8228 case TARGET_NR_futimesat
:
8230 struct timeval
*tvp
, tv
[2];
8232 if (copy_from_user_timeval(&tv
[0], arg3
)
8233 || copy_from_user_timeval(&tv
[1],
8234 arg3
+ sizeof(struct target_timeval
)))
8240 if (!(p
= lock_user_string(arg2
)))
8242 ret
= get_errno(futimesat(arg1
, path(p
), tvp
));
8243 unlock_user(p
, arg2
, 0);
8247 #ifdef TARGET_NR_stty
8248 case TARGET_NR_stty
:
8251 #ifdef TARGET_NR_gtty
8252 case TARGET_NR_gtty
:
8255 #ifdef TARGET_NR_access
8256 case TARGET_NR_access
:
8257 if (!(p
= lock_user_string(arg1
)))
8259 ret
= get_errno(access(path(p
), arg2
));
8260 unlock_user(p
, arg1
, 0);
8263 #if defined(TARGET_NR_faccessat) && defined(__NR_faccessat)
8264 case TARGET_NR_faccessat
:
8265 if (!(p
= lock_user_string(arg2
)))
8267 ret
= get_errno(faccessat(arg1
, p
, arg3
, 0));
8268 unlock_user(p
, arg2
, 0);
8271 #ifdef TARGET_NR_nice /* not on alpha */
8272 case TARGET_NR_nice
:
8273 ret
= get_errno(nice(arg1
));
8276 #ifdef TARGET_NR_ftime
8277 case TARGET_NR_ftime
:
8280 case TARGET_NR_sync
:
8284 #if defined(TARGET_NR_syncfs) && defined(CONFIG_SYNCFS)
8285 case TARGET_NR_syncfs
:
8286 ret
= get_errno(syncfs(arg1
));
8289 case TARGET_NR_kill
:
8290 ret
= get_errno(safe_kill(arg1
, target_to_host_signal(arg2
)));
8292 #ifdef TARGET_NR_rename
8293 case TARGET_NR_rename
:
8296 p
= lock_user_string(arg1
);
8297 p2
= lock_user_string(arg2
);
8299 ret
= -TARGET_EFAULT
;
8301 ret
= get_errno(rename(p
, p2
));
8302 unlock_user(p2
, arg2
, 0);
8303 unlock_user(p
, arg1
, 0);
8307 #if defined(TARGET_NR_renameat)
8308 case TARGET_NR_renameat
:
8311 p
= lock_user_string(arg2
);
8312 p2
= lock_user_string(arg4
);
8314 ret
= -TARGET_EFAULT
;
8316 ret
= get_errno(renameat(arg1
, p
, arg3
, p2
));
8317 unlock_user(p2
, arg4
, 0);
8318 unlock_user(p
, arg2
, 0);
8322 #ifdef TARGET_NR_mkdir
8323 case TARGET_NR_mkdir
:
8324 if (!(p
= lock_user_string(arg1
)))
8326 ret
= get_errno(mkdir(p
, arg2
));
8327 unlock_user(p
, arg1
, 0);
8330 #if defined(TARGET_NR_mkdirat)
8331 case TARGET_NR_mkdirat
:
8332 if (!(p
= lock_user_string(arg2
)))
8334 ret
= get_errno(mkdirat(arg1
, p
, arg3
));
8335 unlock_user(p
, arg2
, 0);
8338 #ifdef TARGET_NR_rmdir
8339 case TARGET_NR_rmdir
:
8340 if (!(p
= lock_user_string(arg1
)))
8342 ret
= get_errno(rmdir(p
));
8343 unlock_user(p
, arg1
, 0);
8347 ret
= get_errno(dup(arg1
));
8349 fd_trans_dup(arg1
, ret
);
8352 #ifdef TARGET_NR_pipe
8353 case TARGET_NR_pipe
:
8354 ret
= do_pipe(cpu_env
, arg1
, 0, 0);
8357 #ifdef TARGET_NR_pipe2
8358 case TARGET_NR_pipe2
:
8359 ret
= do_pipe(cpu_env
, arg1
,
8360 target_to_host_bitmask(arg2
, fcntl_flags_tbl
), 1);
8363 case TARGET_NR_times
:
8365 struct target_tms
*tmsp
;
8367 ret
= get_errno(times(&tms
));
8369 tmsp
= lock_user(VERIFY_WRITE
, arg1
, sizeof(struct target_tms
), 0);
8372 tmsp
->tms_utime
= tswapal(host_to_target_clock_t(tms
.tms_utime
));
8373 tmsp
->tms_stime
= tswapal(host_to_target_clock_t(tms
.tms_stime
));
8374 tmsp
->tms_cutime
= tswapal(host_to_target_clock_t(tms
.tms_cutime
));
8375 tmsp
->tms_cstime
= tswapal(host_to_target_clock_t(tms
.tms_cstime
));
8378 ret
= host_to_target_clock_t(ret
);
8381 #ifdef TARGET_NR_prof
8382 case TARGET_NR_prof
:
8385 #ifdef TARGET_NR_signal
8386 case TARGET_NR_signal
:
8389 case TARGET_NR_acct
:
8391 ret
= get_errno(acct(NULL
));
8393 if (!(p
= lock_user_string(arg1
)))
8395 ret
= get_errno(acct(path(p
)));
8396 unlock_user(p
, arg1
, 0);
8399 #ifdef TARGET_NR_umount2
8400 case TARGET_NR_umount2
:
8401 if (!(p
= lock_user_string(arg1
)))
8403 ret
= get_errno(umount2(p
, arg2
));
8404 unlock_user(p
, arg1
, 0);
8407 #ifdef TARGET_NR_lock
8408 case TARGET_NR_lock
:
8411 case TARGET_NR_ioctl
:
8412 ret
= do_ioctl(arg1
, arg2
, arg3
);
8414 case TARGET_NR_fcntl
:
8415 ret
= do_fcntl(arg1
, arg2
, arg3
);
8417 #ifdef TARGET_NR_mpx
8421 case TARGET_NR_setpgid
:
8422 ret
= get_errno(setpgid(arg1
, arg2
));
8424 #ifdef TARGET_NR_ulimit
8425 case TARGET_NR_ulimit
:
8428 #ifdef TARGET_NR_oldolduname
8429 case TARGET_NR_oldolduname
:
8432 case TARGET_NR_umask
:
8433 ret
= get_errno(umask(arg1
));
8435 case TARGET_NR_chroot
:
8436 if (!(p
= lock_user_string(arg1
)))
8438 ret
= get_errno(chroot(p
));
8439 unlock_user(p
, arg1
, 0);
8441 #ifdef TARGET_NR_ustat
8442 case TARGET_NR_ustat
:
8445 #ifdef TARGET_NR_dup2
8446 case TARGET_NR_dup2
:
8447 ret
= get_errno(dup2(arg1
, arg2
));
8449 fd_trans_dup(arg1
, arg2
);
8453 #if defined(CONFIG_DUP3) && defined(TARGET_NR_dup3)
8454 case TARGET_NR_dup3
:
8455 ret
= get_errno(dup3(arg1
, arg2
, arg3
));
8457 fd_trans_dup(arg1
, arg2
);
8461 #ifdef TARGET_NR_getppid /* not on alpha */
8462 case TARGET_NR_getppid
:
8463 ret
= get_errno(getppid());
8466 #ifdef TARGET_NR_getpgrp
8467 case TARGET_NR_getpgrp
:
8468 ret
= get_errno(getpgrp());
8471 case TARGET_NR_setsid
:
8472 ret
= get_errno(setsid());
8474 #ifdef TARGET_NR_sigaction
8475 case TARGET_NR_sigaction
:
8477 #if defined(TARGET_ALPHA)
8478 struct target_sigaction act
, oact
, *pact
= 0;
8479 struct target_old_sigaction
*old_act
;
8481 if (!lock_user_struct(VERIFY_READ
, old_act
, arg2
, 1))
8483 act
._sa_handler
= old_act
->_sa_handler
;
8484 target_siginitset(&act
.sa_mask
, old_act
->sa_mask
);
8485 act
.sa_flags
= old_act
->sa_flags
;
8486 act
.sa_restorer
= 0;
8487 unlock_user_struct(old_act
, arg2
, 0);
8490 ret
= get_errno(do_sigaction(arg1
, pact
, &oact
));
8491 if (!is_error(ret
) && arg3
) {
8492 if (!lock_user_struct(VERIFY_WRITE
, old_act
, arg3
, 0))
8494 old_act
->_sa_handler
= oact
._sa_handler
;
8495 old_act
->sa_mask
= oact
.sa_mask
.sig
[0];
8496 old_act
->sa_flags
= oact
.sa_flags
;
8497 unlock_user_struct(old_act
, arg3
, 1);
8499 #elif defined(TARGET_MIPS)
8500 struct target_sigaction act
, oact
, *pact
, *old_act
;
8503 if (!lock_user_struct(VERIFY_READ
, old_act
, arg2
, 1))
8505 act
._sa_handler
= old_act
->_sa_handler
;
8506 target_siginitset(&act
.sa_mask
, old_act
->sa_mask
.sig
[0]);
8507 act
.sa_flags
= old_act
->sa_flags
;
8508 unlock_user_struct(old_act
, arg2
, 0);
8514 ret
= get_errno(do_sigaction(arg1
, pact
, &oact
));
8516 if (!is_error(ret
) && arg3
) {
8517 if (!lock_user_struct(VERIFY_WRITE
, old_act
, arg3
, 0))
8519 old_act
->_sa_handler
= oact
._sa_handler
;
8520 old_act
->sa_flags
= oact
.sa_flags
;
8521 old_act
->sa_mask
.sig
[0] = oact
.sa_mask
.sig
[0];
8522 old_act
->sa_mask
.sig
[1] = 0;
8523 old_act
->sa_mask
.sig
[2] = 0;
8524 old_act
->sa_mask
.sig
[3] = 0;
8525 unlock_user_struct(old_act
, arg3
, 1);
8528 struct target_old_sigaction
*old_act
;
8529 struct target_sigaction act
, oact
, *pact
;
8531 if (!lock_user_struct(VERIFY_READ
, old_act
, arg2
, 1))
8533 act
._sa_handler
= old_act
->_sa_handler
;
8534 target_siginitset(&act
.sa_mask
, old_act
->sa_mask
);
8535 act
.sa_flags
= old_act
->sa_flags
;
8536 act
.sa_restorer
= old_act
->sa_restorer
;
8537 unlock_user_struct(old_act
, arg2
, 0);
8542 ret
= get_errno(do_sigaction(arg1
, pact
, &oact
));
8543 if (!is_error(ret
) && arg3
) {
8544 if (!lock_user_struct(VERIFY_WRITE
, old_act
, arg3
, 0))
8546 old_act
->_sa_handler
= oact
._sa_handler
;
8547 old_act
->sa_mask
= oact
.sa_mask
.sig
[0];
8548 old_act
->sa_flags
= oact
.sa_flags
;
8549 old_act
->sa_restorer
= oact
.sa_restorer
;
8550 unlock_user_struct(old_act
, arg3
, 1);
8556 case TARGET_NR_rt_sigaction
:
8558 #if defined(TARGET_ALPHA)
8559 struct target_sigaction act
, oact
, *pact
= 0;
8560 struct target_rt_sigaction
*rt_act
;
8562 if (arg4
!= sizeof(target_sigset_t
)) {
8563 ret
= -TARGET_EINVAL
;
8567 if (!lock_user_struct(VERIFY_READ
, rt_act
, arg2
, 1))
8569 act
._sa_handler
= rt_act
->_sa_handler
;
8570 act
.sa_mask
= rt_act
->sa_mask
;
8571 act
.sa_flags
= rt_act
->sa_flags
;
8572 act
.sa_restorer
= arg5
;
8573 unlock_user_struct(rt_act
, arg2
, 0);
8576 ret
= get_errno(do_sigaction(arg1
, pact
, &oact
));
8577 if (!is_error(ret
) && arg3
) {
8578 if (!lock_user_struct(VERIFY_WRITE
, rt_act
, arg3
, 0))
8580 rt_act
->_sa_handler
= oact
._sa_handler
;
8581 rt_act
->sa_mask
= oact
.sa_mask
;
8582 rt_act
->sa_flags
= oact
.sa_flags
;
8583 unlock_user_struct(rt_act
, arg3
, 1);
8586 struct target_sigaction
*act
;
8587 struct target_sigaction
*oact
;
8589 if (arg4
!= sizeof(target_sigset_t
)) {
8590 ret
= -TARGET_EINVAL
;
8594 if (!lock_user_struct(VERIFY_READ
, act
, arg2
, 1))
8599 if (!lock_user_struct(VERIFY_WRITE
, oact
, arg3
, 0)) {
8600 ret
= -TARGET_EFAULT
;
8601 goto rt_sigaction_fail
;
8605 ret
= get_errno(do_sigaction(arg1
, act
, oact
));
8608 unlock_user_struct(act
, arg2
, 0);
8610 unlock_user_struct(oact
, arg3
, 1);
8614 #ifdef TARGET_NR_sgetmask /* not on alpha */
8615 case TARGET_NR_sgetmask
:
8618 abi_ulong target_set
;
8619 ret
= do_sigprocmask(0, NULL
, &cur_set
);
8621 host_to_target_old_sigset(&target_set
, &cur_set
);
8627 #ifdef TARGET_NR_ssetmask /* not on alpha */
8628 case TARGET_NR_ssetmask
:
8631 abi_ulong target_set
= arg1
;
8632 target_to_host_old_sigset(&set
, &target_set
);
8633 ret
= do_sigprocmask(SIG_SETMASK
, &set
, &oset
);
8635 host_to_target_old_sigset(&target_set
, &oset
);
8641 #ifdef TARGET_NR_sigprocmask
8642 case TARGET_NR_sigprocmask
:
8644 #if defined(TARGET_ALPHA)
8645 sigset_t set
, oldset
;
8650 case TARGET_SIG_BLOCK
:
8653 case TARGET_SIG_UNBLOCK
:
8656 case TARGET_SIG_SETMASK
:
8660 ret
= -TARGET_EINVAL
;
8664 target_to_host_old_sigset(&set
, &mask
);
8666 ret
= do_sigprocmask(how
, &set
, &oldset
);
8667 if (!is_error(ret
)) {
8668 host_to_target_old_sigset(&mask
, &oldset
);
8670 ((CPUAlphaState
*)cpu_env
)->ir
[IR_V0
] = 0; /* force no error */
8673 sigset_t set
, oldset
, *set_ptr
;
8678 case TARGET_SIG_BLOCK
:
8681 case TARGET_SIG_UNBLOCK
:
8684 case TARGET_SIG_SETMASK
:
8688 ret
= -TARGET_EINVAL
;
8691 if (!(p
= lock_user(VERIFY_READ
, arg2
, sizeof(target_sigset_t
), 1)))
8693 target_to_host_old_sigset(&set
, p
);
8694 unlock_user(p
, arg2
, 0);
8700 ret
= do_sigprocmask(how
, set_ptr
, &oldset
);
8701 if (!is_error(ret
) && arg3
) {
8702 if (!(p
= lock_user(VERIFY_WRITE
, arg3
, sizeof(target_sigset_t
), 0)))
8704 host_to_target_old_sigset(p
, &oldset
);
8705 unlock_user(p
, arg3
, sizeof(target_sigset_t
));
8711 case TARGET_NR_rt_sigprocmask
:
8714 sigset_t set
, oldset
, *set_ptr
;
8716 if (arg4
!= sizeof(target_sigset_t
)) {
8717 ret
= -TARGET_EINVAL
;
8723 case TARGET_SIG_BLOCK
:
8726 case TARGET_SIG_UNBLOCK
:
8729 case TARGET_SIG_SETMASK
:
8733 ret
= -TARGET_EINVAL
;
8736 if (!(p
= lock_user(VERIFY_READ
, arg2
, sizeof(target_sigset_t
), 1)))
8738 target_to_host_sigset(&set
, p
);
8739 unlock_user(p
, arg2
, 0);
8745 ret
= do_sigprocmask(how
, set_ptr
, &oldset
);
8746 if (!is_error(ret
) && arg3
) {
8747 if (!(p
= lock_user(VERIFY_WRITE
, arg3
, sizeof(target_sigset_t
), 0)))
8749 host_to_target_sigset(p
, &oldset
);
8750 unlock_user(p
, arg3
, sizeof(target_sigset_t
));
8754 #ifdef TARGET_NR_sigpending
8755 case TARGET_NR_sigpending
:
8758 ret
= get_errno(sigpending(&set
));
8759 if (!is_error(ret
)) {
8760 if (!(p
= lock_user(VERIFY_WRITE
, arg1
, sizeof(target_sigset_t
), 0)))
8762 host_to_target_old_sigset(p
, &set
);
8763 unlock_user(p
, arg1
, sizeof(target_sigset_t
));
8768 case TARGET_NR_rt_sigpending
:
8772 /* Yes, this check is >, not != like most. We follow the kernel's
8773 * logic and it does it like this because it implements
8774 * NR_sigpending through the same code path, and in that case
8775 * the old_sigset_t is smaller in size.
8777 if (arg2
> sizeof(target_sigset_t
)) {
8778 ret
= -TARGET_EINVAL
;
8782 ret
= get_errno(sigpending(&set
));
8783 if (!is_error(ret
)) {
8784 if (!(p
= lock_user(VERIFY_WRITE
, arg1
, sizeof(target_sigset_t
), 0)))
8786 host_to_target_sigset(p
, &set
);
8787 unlock_user(p
, arg1
, sizeof(target_sigset_t
));
8791 #ifdef TARGET_NR_sigsuspend
8792 case TARGET_NR_sigsuspend
:
8794 TaskState
*ts
= cpu
->opaque
;
8795 #if defined(TARGET_ALPHA)
8796 abi_ulong mask
= arg1
;
8797 target_to_host_old_sigset(&ts
->sigsuspend_mask
, &mask
);
8799 if (!(p
= lock_user(VERIFY_READ
, arg1
, sizeof(target_sigset_t
), 1)))
8801 target_to_host_old_sigset(&ts
->sigsuspend_mask
, p
);
8802 unlock_user(p
, arg1
, 0);
8804 ret
= get_errno(safe_rt_sigsuspend(&ts
->sigsuspend_mask
,
8806 if (ret
!= -TARGET_ERESTARTSYS
) {
8807 ts
->in_sigsuspend
= 1;
8812 case TARGET_NR_rt_sigsuspend
:
8814 TaskState
*ts
= cpu
->opaque
;
8816 if (arg2
!= sizeof(target_sigset_t
)) {
8817 ret
= -TARGET_EINVAL
;
8820 if (!(p
= lock_user(VERIFY_READ
, arg1
, sizeof(target_sigset_t
), 1)))
8822 target_to_host_sigset(&ts
->sigsuspend_mask
, p
);
8823 unlock_user(p
, arg1
, 0);
8824 ret
= get_errno(safe_rt_sigsuspend(&ts
->sigsuspend_mask
,
8826 if (ret
!= -TARGET_ERESTARTSYS
) {
8827 ts
->in_sigsuspend
= 1;
8831 case TARGET_NR_rt_sigtimedwait
:
8834 struct timespec uts
, *puts
;
8837 if (arg4
!= sizeof(target_sigset_t
)) {
8838 ret
= -TARGET_EINVAL
;
8842 if (!(p
= lock_user(VERIFY_READ
, arg1
, sizeof(target_sigset_t
), 1)))
8844 target_to_host_sigset(&set
, p
);
8845 unlock_user(p
, arg1
, 0);
8848 target_to_host_timespec(puts
, arg3
);
8852 ret
= get_errno(safe_rt_sigtimedwait(&set
, &uinfo
, puts
,
8854 if (!is_error(ret
)) {
8856 p
= lock_user(VERIFY_WRITE
, arg2
, sizeof(target_siginfo_t
),
8861 host_to_target_siginfo(p
, &uinfo
);
8862 unlock_user(p
, arg2
, sizeof(target_siginfo_t
));
8864 ret
= host_to_target_signal(ret
);
8868 case TARGET_NR_rt_sigqueueinfo
:
8872 p
= lock_user(VERIFY_READ
, arg3
, sizeof(target_siginfo_t
), 1);
8876 target_to_host_siginfo(&uinfo
, p
);
8877 unlock_user(p
, arg3
, 0);
8878 ret
= get_errno(sys_rt_sigqueueinfo(arg1
, arg2
, &uinfo
));
8881 case TARGET_NR_rt_tgsigqueueinfo
:
8885 p
= lock_user(VERIFY_READ
, arg4
, sizeof(target_siginfo_t
), 1);
8889 target_to_host_siginfo(&uinfo
, p
);
8890 unlock_user(p
, arg4
, 0);
8891 ret
= get_errno(sys_rt_tgsigqueueinfo(arg1
, arg2
, arg3
, &uinfo
));
8894 #ifdef TARGET_NR_sigreturn
8895 case TARGET_NR_sigreturn
:
8896 if (block_signals()) {
8897 ret
= -TARGET_ERESTARTSYS
;
8899 ret
= do_sigreturn(cpu_env
);
8903 case TARGET_NR_rt_sigreturn
:
8904 if (block_signals()) {
8905 ret
= -TARGET_ERESTARTSYS
;
8907 ret
= do_rt_sigreturn(cpu_env
);
8910 case TARGET_NR_sethostname
:
8911 if (!(p
= lock_user_string(arg1
)))
8913 ret
= get_errno(sethostname(p
, arg2
));
8914 unlock_user(p
, arg1
, 0);
8916 case TARGET_NR_setrlimit
:
8918 int resource
= target_to_host_resource(arg1
);
8919 struct target_rlimit
*target_rlim
;
8921 if (!lock_user_struct(VERIFY_READ
, target_rlim
, arg2
, 1))
8923 rlim
.rlim_cur
= target_to_host_rlim(target_rlim
->rlim_cur
);
8924 rlim
.rlim_max
= target_to_host_rlim(target_rlim
->rlim_max
);
8925 unlock_user_struct(target_rlim
, arg2
, 0);
8926 ret
= get_errno(setrlimit(resource
, &rlim
));
8929 case TARGET_NR_getrlimit
:
8931 int resource
= target_to_host_resource(arg1
);
8932 struct target_rlimit
*target_rlim
;
8935 ret
= get_errno(getrlimit(resource
, &rlim
));
8936 if (!is_error(ret
)) {
8937 if (!lock_user_struct(VERIFY_WRITE
, target_rlim
, arg2
, 0))
8939 target_rlim
->rlim_cur
= host_to_target_rlim(rlim
.rlim_cur
);
8940 target_rlim
->rlim_max
= host_to_target_rlim(rlim
.rlim_max
);
8941 unlock_user_struct(target_rlim
, arg2
, 1);
8945 case TARGET_NR_getrusage
:
8947 struct rusage rusage
;
8948 ret
= get_errno(getrusage(arg1
, &rusage
));
8949 if (!is_error(ret
)) {
8950 ret
= host_to_target_rusage(arg2
, &rusage
);
8954 case TARGET_NR_gettimeofday
:
8957 ret
= get_errno(gettimeofday(&tv
, NULL
));
8958 if (!is_error(ret
)) {
8959 if (copy_to_user_timeval(arg1
, &tv
))
8964 case TARGET_NR_settimeofday
:
8966 struct timeval tv
, *ptv
= NULL
;
8967 struct timezone tz
, *ptz
= NULL
;
8970 if (copy_from_user_timeval(&tv
, arg1
)) {
8977 if (copy_from_user_timezone(&tz
, arg2
)) {
8983 ret
= get_errno(settimeofday(ptv
, ptz
));
8986 #if defined(TARGET_NR_select)
8987 case TARGET_NR_select
:
8988 #if defined(TARGET_WANT_NI_OLD_SELECT)
8989 /* some architectures used to have old_select here
8990 * but now ENOSYS it.
8992 ret
= -TARGET_ENOSYS
;
8993 #elif defined(TARGET_WANT_OLD_SYS_SELECT)
8994 ret
= do_old_select(arg1
);
8996 ret
= do_select(arg1
, arg2
, arg3
, arg4
, arg5
);
9000 #ifdef TARGET_NR_pselect6
9001 case TARGET_NR_pselect6
:
9003 abi_long rfd_addr
, wfd_addr
, efd_addr
, n
, ts_addr
;
9004 fd_set rfds
, wfds
, efds
;
9005 fd_set
*rfds_ptr
, *wfds_ptr
, *efds_ptr
;
9006 struct timespec ts
, *ts_ptr
;
9009 * The 6th arg is actually two args smashed together,
9010 * so we cannot use the C library.
9018 abi_ulong arg_sigset
, arg_sigsize
, *arg7
;
9019 target_sigset_t
*target_sigset
;
9027 ret
= copy_from_user_fdset_ptr(&rfds
, &rfds_ptr
, rfd_addr
, n
);
9031 ret
= copy_from_user_fdset_ptr(&wfds
, &wfds_ptr
, wfd_addr
, n
);
9035 ret
= copy_from_user_fdset_ptr(&efds
, &efds_ptr
, efd_addr
, n
);
9041 * This takes a timespec, and not a timeval, so we cannot
9042 * use the do_select() helper ...
9045 if (target_to_host_timespec(&ts
, ts_addr
)) {
9053 /* Extract the two packed args for the sigset */
9056 sig
.size
= SIGSET_T_SIZE
;
9058 arg7
= lock_user(VERIFY_READ
, arg6
, sizeof(*arg7
) * 2, 1);
9062 arg_sigset
= tswapal(arg7
[0]);
9063 arg_sigsize
= tswapal(arg7
[1]);
9064 unlock_user(arg7
, arg6
, 0);
9068 if (arg_sigsize
!= sizeof(*target_sigset
)) {
9069 /* Like the kernel, we enforce correct size sigsets */
9070 ret
= -TARGET_EINVAL
;
9073 target_sigset
= lock_user(VERIFY_READ
, arg_sigset
,
9074 sizeof(*target_sigset
), 1);
9075 if (!target_sigset
) {
9078 target_to_host_sigset(&set
, target_sigset
);
9079 unlock_user(target_sigset
, arg_sigset
, 0);
9087 ret
= get_errno(safe_pselect6(n
, rfds_ptr
, wfds_ptr
, efds_ptr
,
9090 if (!is_error(ret
)) {
9091 if (rfd_addr
&& copy_to_user_fdset(rfd_addr
, &rfds
, n
))
9093 if (wfd_addr
&& copy_to_user_fdset(wfd_addr
, &wfds
, n
))
9095 if (efd_addr
&& copy_to_user_fdset(efd_addr
, &efds
, n
))
9098 if (ts_addr
&& host_to_target_timespec(ts_addr
, &ts
))
9104 #ifdef TARGET_NR_symlink
9105 case TARGET_NR_symlink
:
9108 p
= lock_user_string(arg1
);
9109 p2
= lock_user_string(arg2
);
9111 ret
= -TARGET_EFAULT
;
9113 ret
= get_errno(symlink(p
, p2
));
9114 unlock_user(p2
, arg2
, 0);
9115 unlock_user(p
, arg1
, 0);
9119 #if defined(TARGET_NR_symlinkat)
9120 case TARGET_NR_symlinkat
:
9123 p
= lock_user_string(arg1
);
9124 p2
= lock_user_string(arg3
);
9126 ret
= -TARGET_EFAULT
;
9128 ret
= get_errno(symlinkat(p
, arg2
, p2
));
9129 unlock_user(p2
, arg3
, 0);
9130 unlock_user(p
, arg1
, 0);
9134 #ifdef TARGET_NR_oldlstat
9135 case TARGET_NR_oldlstat
:
9138 #ifdef TARGET_NR_readlink
9139 case TARGET_NR_readlink
:
9142 p
= lock_user_string(arg1
);
9143 p2
= lock_user(VERIFY_WRITE
, arg2
, arg3
, 0);
9145 ret
= -TARGET_EFAULT
;
9147 /* Short circuit this for the magic exe check. */
9148 ret
= -TARGET_EINVAL
;
9149 } else if (is_proc_myself((const char *)p
, "exe")) {
9150 char real
[PATH_MAX
], *temp
;
9151 temp
= realpath(exec_path
, real
);
9152 /* Return value is # of bytes that we wrote to the buffer. */
9154 ret
= get_errno(-1);
9156 /* Don't worry about sign mismatch as earlier mapping
9157 * logic would have thrown a bad address error. */
9158 ret
= MIN(strlen(real
), arg3
);
9159 /* We cannot NUL terminate the string. */
9160 memcpy(p2
, real
, ret
);
9163 ret
= get_errno(readlink(path(p
), p2
, arg3
));
9165 unlock_user(p2
, arg2
, ret
);
9166 unlock_user(p
, arg1
, 0);
9170 #if defined(TARGET_NR_readlinkat)
9171 case TARGET_NR_readlinkat
:
9174 p
= lock_user_string(arg2
);
9175 p2
= lock_user(VERIFY_WRITE
, arg3
, arg4
, 0);
9177 ret
= -TARGET_EFAULT
;
9178 } else if (is_proc_myself((const char *)p
, "exe")) {
9179 char real
[PATH_MAX
], *temp
;
9180 temp
= realpath(exec_path
, real
);
9181 ret
= temp
== NULL
? get_errno(-1) : strlen(real
) ;
9182 snprintf((char *)p2
, arg4
, "%s", real
);
9184 ret
= get_errno(readlinkat(arg1
, path(p
), p2
, arg4
));
9186 unlock_user(p2
, arg3
, ret
);
9187 unlock_user(p
, arg2
, 0);
9191 #ifdef TARGET_NR_uselib
9192 case TARGET_NR_uselib
:
9195 #ifdef TARGET_NR_swapon
9196 case TARGET_NR_swapon
:
9197 if (!(p
= lock_user_string(arg1
)))
9199 ret
= get_errno(swapon(p
, arg2
));
9200 unlock_user(p
, arg1
, 0);
9203 case TARGET_NR_reboot
:
9204 if (arg3
== LINUX_REBOOT_CMD_RESTART2
) {
9205 /* arg4 must be ignored in all other cases */
9206 p
= lock_user_string(arg4
);
9210 ret
= get_errno(reboot(arg1
, arg2
, arg3
, p
));
9211 unlock_user(p
, arg4
, 0);
9213 ret
= get_errno(reboot(arg1
, arg2
, arg3
, NULL
));
9216 #ifdef TARGET_NR_readdir
9217 case TARGET_NR_readdir
:
9220 #ifdef TARGET_NR_mmap
9221 case TARGET_NR_mmap
:
9222 #if (defined(TARGET_I386) && defined(TARGET_ABI32)) || \
9223 (defined(TARGET_ARM) && defined(TARGET_ABI32)) || \
9224 defined(TARGET_M68K) || defined(TARGET_CRIS) || defined(TARGET_MICROBLAZE) \
9225 || defined(TARGET_S390X)
9228 abi_ulong v1
, v2
, v3
, v4
, v5
, v6
;
9229 if (!(v
= lock_user(VERIFY_READ
, arg1
, 6 * sizeof(abi_ulong
), 1)))
9237 unlock_user(v
, arg1
, 0);
9238 ret
= get_errno(target_mmap(v1
, v2
, v3
,
9239 target_to_host_bitmask(v4
, mmap_flags_tbl
),
9243 ret
= get_errno(target_mmap(arg1
, arg2
, arg3
,
9244 target_to_host_bitmask(arg4
, mmap_flags_tbl
),
9250 #ifdef TARGET_NR_mmap2
9251 case TARGET_NR_mmap2
:
9253 #define MMAP_SHIFT 12
9255 ret
= get_errno(target_mmap(arg1
, arg2
, arg3
,
9256 target_to_host_bitmask(arg4
, mmap_flags_tbl
),
9258 arg6
<< MMAP_SHIFT
));
9261 case TARGET_NR_munmap
:
9262 ret
= get_errno(target_munmap(arg1
, arg2
));
9264 case TARGET_NR_mprotect
:
9266 TaskState
*ts
= cpu
->opaque
;
9267 /* Special hack to detect libc making the stack executable. */
9268 if ((arg3
& PROT_GROWSDOWN
)
9269 && arg1
>= ts
->info
->stack_limit
9270 && arg1
<= ts
->info
->start_stack
) {
9271 arg3
&= ~PROT_GROWSDOWN
;
9272 arg2
= arg2
+ arg1
- ts
->info
->stack_limit
;
9273 arg1
= ts
->info
->stack_limit
;
9276 ret
= get_errno(target_mprotect(arg1
, arg2
, arg3
));
9278 #ifdef TARGET_NR_mremap
9279 case TARGET_NR_mremap
:
9280 ret
= get_errno(target_mremap(arg1
, arg2
, arg3
, arg4
, arg5
));
9283 /* ??? msync/mlock/munlock are broken for softmmu. */
9284 #ifdef TARGET_NR_msync
9285 case TARGET_NR_msync
:
9286 ret
= get_errno(msync(g2h(arg1
), arg2
, arg3
));
9289 #ifdef TARGET_NR_mlock
9290 case TARGET_NR_mlock
:
9291 ret
= get_errno(mlock(g2h(arg1
), arg2
));
9294 #ifdef TARGET_NR_munlock
9295 case TARGET_NR_munlock
:
9296 ret
= get_errno(munlock(g2h(arg1
), arg2
));
9299 #ifdef TARGET_NR_mlockall
9300 case TARGET_NR_mlockall
:
9301 ret
= get_errno(mlockall(target_to_host_mlockall_arg(arg1
)));
9304 #ifdef TARGET_NR_munlockall
9305 case TARGET_NR_munlockall
:
9306 ret
= get_errno(munlockall());
9309 case TARGET_NR_truncate
:
9310 if (!(p
= lock_user_string(arg1
)))
9312 ret
= get_errno(truncate(p
, arg2
));
9313 unlock_user(p
, arg1
, 0);
9315 case TARGET_NR_ftruncate
:
9316 ret
= get_errno(ftruncate(arg1
, arg2
));
9318 case TARGET_NR_fchmod
:
9319 ret
= get_errno(fchmod(arg1
, arg2
));
9321 #if defined(TARGET_NR_fchmodat)
9322 case TARGET_NR_fchmodat
:
9323 if (!(p
= lock_user_string(arg2
)))
9325 ret
= get_errno(fchmodat(arg1
, p
, arg3
, 0));
9326 unlock_user(p
, arg2
, 0);
9329 case TARGET_NR_getpriority
:
9330 /* Note that negative values are valid for getpriority, so we must
9331 differentiate based on errno settings. */
9333 ret
= getpriority(arg1
, arg2
);
9334 if (ret
== -1 && errno
!= 0) {
9335 ret
= -host_to_target_errno(errno
);
9339 /* Return value is the unbiased priority. Signal no error. */
9340 ((CPUAlphaState
*)cpu_env
)->ir
[IR_V0
] = 0;
9342 /* Return value is a biased priority to avoid negative numbers. */
9346 case TARGET_NR_setpriority
:
9347 ret
= get_errno(setpriority(arg1
, arg2
, arg3
));
9349 #ifdef TARGET_NR_profil
9350 case TARGET_NR_profil
:
9353 case TARGET_NR_statfs
:
9354 if (!(p
= lock_user_string(arg1
)))
9356 ret
= get_errno(statfs(path(p
), &stfs
));
9357 unlock_user(p
, arg1
, 0);
9359 if (!is_error(ret
)) {
9360 struct target_statfs
*target_stfs
;
9362 if (!lock_user_struct(VERIFY_WRITE
, target_stfs
, arg2
, 0))
9364 __put_user(stfs
.f_type
, &target_stfs
->f_type
);
9365 __put_user(stfs
.f_bsize
, &target_stfs
->f_bsize
);
9366 __put_user(stfs
.f_blocks
, &target_stfs
->f_blocks
);
9367 __put_user(stfs
.f_bfree
, &target_stfs
->f_bfree
);
9368 __put_user(stfs
.f_bavail
, &target_stfs
->f_bavail
);
9369 __put_user(stfs
.f_files
, &target_stfs
->f_files
);
9370 __put_user(stfs
.f_ffree
, &target_stfs
->f_ffree
);
9371 __put_user(stfs
.f_fsid
.__val
[0], &target_stfs
->f_fsid
.val
[0]);
9372 __put_user(stfs
.f_fsid
.__val
[1], &target_stfs
->f_fsid
.val
[1]);
9373 __put_user(stfs
.f_namelen
, &target_stfs
->f_namelen
);
9374 __put_user(stfs
.f_frsize
, &target_stfs
->f_frsize
);
9375 memset(target_stfs
->f_spare
, 0, sizeof(target_stfs
->f_spare
));
9376 unlock_user_struct(target_stfs
, arg2
, 1);
9379 case TARGET_NR_fstatfs
:
9380 ret
= get_errno(fstatfs(arg1
, &stfs
));
9381 goto convert_statfs
;
9382 #ifdef TARGET_NR_statfs64
9383 case TARGET_NR_statfs64
:
9384 if (!(p
= lock_user_string(arg1
)))
9386 ret
= get_errno(statfs(path(p
), &stfs
));
9387 unlock_user(p
, arg1
, 0);
9389 if (!is_error(ret
)) {
9390 struct target_statfs64
*target_stfs
;
9392 if (!lock_user_struct(VERIFY_WRITE
, target_stfs
, arg3
, 0))
9394 __put_user(stfs
.f_type
, &target_stfs
->f_type
);
9395 __put_user(stfs
.f_bsize
, &target_stfs
->f_bsize
);
9396 __put_user(stfs
.f_blocks
, &target_stfs
->f_blocks
);
9397 __put_user(stfs
.f_bfree
, &target_stfs
->f_bfree
);
9398 __put_user(stfs
.f_bavail
, &target_stfs
->f_bavail
);
9399 __put_user(stfs
.f_files
, &target_stfs
->f_files
);
9400 __put_user(stfs
.f_ffree
, &target_stfs
->f_ffree
);
9401 __put_user(stfs
.f_fsid
.__val
[0], &target_stfs
->f_fsid
.val
[0]);
9402 __put_user(stfs
.f_fsid
.__val
[1], &target_stfs
->f_fsid
.val
[1]);
9403 __put_user(stfs
.f_namelen
, &target_stfs
->f_namelen
);
9404 __put_user(stfs
.f_frsize
, &target_stfs
->f_frsize
);
9405 memset(target_stfs
->f_spare
, 0, sizeof(target_stfs
->f_spare
));
9406 unlock_user_struct(target_stfs
, arg3
, 1);
9409 case TARGET_NR_fstatfs64
:
9410 ret
= get_errno(fstatfs(arg1
, &stfs
));
9411 goto convert_statfs64
;
9413 #ifdef TARGET_NR_ioperm
9414 case TARGET_NR_ioperm
:
9417 #ifdef TARGET_NR_socketcall
9418 case TARGET_NR_socketcall
:
9419 ret
= do_socketcall(arg1
, arg2
);
9422 #ifdef TARGET_NR_accept
9423 case TARGET_NR_accept
:
9424 ret
= do_accept4(arg1
, arg2
, arg3
, 0);
9427 #ifdef TARGET_NR_accept4
9428 case TARGET_NR_accept4
:
9429 ret
= do_accept4(arg1
, arg2
, arg3
, arg4
);
9432 #ifdef TARGET_NR_bind
9433 case TARGET_NR_bind
:
9434 ret
= do_bind(arg1
, arg2
, arg3
);
9437 #ifdef TARGET_NR_connect
9438 case TARGET_NR_connect
:
9439 ret
= do_connect(arg1
, arg2
, arg3
);
9442 #ifdef TARGET_NR_getpeername
9443 case TARGET_NR_getpeername
:
9444 ret
= do_getpeername(arg1
, arg2
, arg3
);
9447 #ifdef TARGET_NR_getsockname
9448 case TARGET_NR_getsockname
:
9449 ret
= do_getsockname(arg1
, arg2
, arg3
);
9452 #ifdef TARGET_NR_getsockopt
9453 case TARGET_NR_getsockopt
:
9454 ret
= do_getsockopt(arg1
, arg2
, arg3
, arg4
, arg5
);
9457 #ifdef TARGET_NR_listen
9458 case TARGET_NR_listen
:
9459 ret
= get_errno(listen(arg1
, arg2
));
9462 #ifdef TARGET_NR_recv
9463 case TARGET_NR_recv
:
9464 ret
= do_recvfrom(arg1
, arg2
, arg3
, arg4
, 0, 0);
9467 #ifdef TARGET_NR_recvfrom
9468 case TARGET_NR_recvfrom
:
9469 ret
= do_recvfrom(arg1
, arg2
, arg3
, arg4
, arg5
, arg6
);
9472 #ifdef TARGET_NR_recvmsg
9473 case TARGET_NR_recvmsg
:
9474 ret
= do_sendrecvmsg(arg1
, arg2
, arg3
, 0);
9477 #ifdef TARGET_NR_send
9478 case TARGET_NR_send
:
9479 ret
= do_sendto(arg1
, arg2
, arg3
, arg4
, 0, 0);
9482 #ifdef TARGET_NR_sendmsg
9483 case TARGET_NR_sendmsg
:
9484 ret
= do_sendrecvmsg(arg1
, arg2
, arg3
, 1);
9487 #ifdef TARGET_NR_sendmmsg
9488 case TARGET_NR_sendmmsg
:
9489 ret
= do_sendrecvmmsg(arg1
, arg2
, arg3
, arg4
, 1);
9491 case TARGET_NR_recvmmsg
:
9492 ret
= do_sendrecvmmsg(arg1
, arg2
, arg3
, arg4
, 0);
9495 #ifdef TARGET_NR_sendto
9496 case TARGET_NR_sendto
:
9497 ret
= do_sendto(arg1
, arg2
, arg3
, arg4
, arg5
, arg6
);
9500 #ifdef TARGET_NR_shutdown
9501 case TARGET_NR_shutdown
:
9502 ret
= get_errno(shutdown(arg1
, arg2
));
9505 #if defined(TARGET_NR_getrandom) && defined(__NR_getrandom)
9506 case TARGET_NR_getrandom
:
9507 p
= lock_user(VERIFY_WRITE
, arg1
, arg2
, 0);
9511 ret
= get_errno(getrandom(p
, arg2
, arg3
));
9512 unlock_user(p
, arg1
, ret
);
9515 #ifdef TARGET_NR_socket
9516 case TARGET_NR_socket
:
9517 ret
= do_socket(arg1
, arg2
, arg3
);
9520 #ifdef TARGET_NR_socketpair
9521 case TARGET_NR_socketpair
:
9522 ret
= do_socketpair(arg1
, arg2
, arg3
, arg4
);
9525 #ifdef TARGET_NR_setsockopt
9526 case TARGET_NR_setsockopt
:
9527 ret
= do_setsockopt(arg1
, arg2
, arg3
, arg4
, (socklen_t
) arg5
);
9530 #if defined(TARGET_NR_syslog)
9531 case TARGET_NR_syslog
:
9536 case TARGET_SYSLOG_ACTION_CLOSE
: /* Close log */
9537 case TARGET_SYSLOG_ACTION_OPEN
: /* Open log */
9538 case TARGET_SYSLOG_ACTION_CLEAR
: /* Clear ring buffer */
9539 case TARGET_SYSLOG_ACTION_CONSOLE_OFF
: /* Disable logging */
9540 case TARGET_SYSLOG_ACTION_CONSOLE_ON
: /* Enable logging */
9541 case TARGET_SYSLOG_ACTION_CONSOLE_LEVEL
: /* Set messages level */
9542 case TARGET_SYSLOG_ACTION_SIZE_UNREAD
: /* Number of chars */
9543 case TARGET_SYSLOG_ACTION_SIZE_BUFFER
: /* Size of the buffer */
9545 ret
= get_errno(sys_syslog((int)arg1
, NULL
, (int)arg3
));
9548 case TARGET_SYSLOG_ACTION_READ
: /* Read from log */
9549 case TARGET_SYSLOG_ACTION_READ_CLEAR
: /* Read/clear msgs */
9550 case TARGET_SYSLOG_ACTION_READ_ALL
: /* Read last messages */
9552 ret
= -TARGET_EINVAL
;
9560 p
= lock_user(VERIFY_WRITE
, arg2
, arg3
, 0);
9562 ret
= -TARGET_EFAULT
;
9565 ret
= get_errno(sys_syslog((int)arg1
, p
, (int)arg3
));
9566 unlock_user(p
, arg2
, arg3
);
9576 case TARGET_NR_setitimer
:
9578 struct itimerval value
, ovalue
, *pvalue
;
9582 if (copy_from_user_timeval(&pvalue
->it_interval
, arg2
)
9583 || copy_from_user_timeval(&pvalue
->it_value
,
9584 arg2
+ sizeof(struct target_timeval
)))
9589 ret
= get_errno(setitimer(arg1
, pvalue
, &ovalue
));
9590 if (!is_error(ret
) && arg3
) {
9591 if (copy_to_user_timeval(arg3
,
9592 &ovalue
.it_interval
)
9593 || copy_to_user_timeval(arg3
+ sizeof(struct target_timeval
),
9599 case TARGET_NR_getitimer
:
9601 struct itimerval value
;
9603 ret
= get_errno(getitimer(arg1
, &value
));
9604 if (!is_error(ret
) && arg2
) {
9605 if (copy_to_user_timeval(arg2
,
9607 || copy_to_user_timeval(arg2
+ sizeof(struct target_timeval
),
9613 #ifdef TARGET_NR_stat
9614 case TARGET_NR_stat
:
9615 if (!(p
= lock_user_string(arg1
)))
9617 ret
= get_errno(stat(path(p
), &st
));
9618 unlock_user(p
, arg1
, 0);
9621 #ifdef TARGET_NR_lstat
9622 case TARGET_NR_lstat
:
9623 if (!(p
= lock_user_string(arg1
)))
9625 ret
= get_errno(lstat(path(p
), &st
));
9626 unlock_user(p
, arg1
, 0);
9629 case TARGET_NR_fstat
:
9631 ret
= get_errno(fstat(arg1
, &st
));
9632 #if defined(TARGET_NR_stat) || defined(TARGET_NR_lstat)
9635 if (!is_error(ret
)) {
9636 struct target_stat
*target_st
;
9638 if (!lock_user_struct(VERIFY_WRITE
, target_st
, arg2
, 0))
9640 memset(target_st
, 0, sizeof(*target_st
));
9641 __put_user(st
.st_dev
, &target_st
->st_dev
);
9642 __put_user(st
.st_ino
, &target_st
->st_ino
);
9643 __put_user(st
.st_mode
, &target_st
->st_mode
);
9644 __put_user(st
.st_uid
, &target_st
->st_uid
);
9645 __put_user(st
.st_gid
, &target_st
->st_gid
);
9646 __put_user(st
.st_nlink
, &target_st
->st_nlink
);
9647 __put_user(st
.st_rdev
, &target_st
->st_rdev
);
9648 __put_user(st
.st_size
, &target_st
->st_size
);
9649 __put_user(st
.st_blksize
, &target_st
->st_blksize
);
9650 __put_user(st
.st_blocks
, &target_st
->st_blocks
);
9651 __put_user(st
.st_atime
, &target_st
->target_st_atime
);
9652 __put_user(st
.st_mtime
, &target_st
->target_st_mtime
);
9653 __put_user(st
.st_ctime
, &target_st
->target_st_ctime
);
9654 unlock_user_struct(target_st
, arg2
, 1);
9658 #ifdef TARGET_NR_olduname
9659 case TARGET_NR_olduname
:
9662 #ifdef TARGET_NR_iopl
9663 case TARGET_NR_iopl
:
9666 case TARGET_NR_vhangup
:
9667 ret
= get_errno(vhangup());
9669 #ifdef TARGET_NR_idle
9670 case TARGET_NR_idle
:
9673 #ifdef TARGET_NR_syscall
9674 case TARGET_NR_syscall
:
9675 ret
= do_syscall(cpu_env
, arg1
& 0xffff, arg2
, arg3
, arg4
, arg5
,
9676 arg6
, arg7
, arg8
, 0);
9679 case TARGET_NR_wait4
:
9682 abi_long status_ptr
= arg2
;
9683 struct rusage rusage
, *rusage_ptr
;
9684 abi_ulong target_rusage
= arg4
;
9685 abi_long rusage_err
;
9687 rusage_ptr
= &rusage
;
9690 ret
= get_errno(safe_wait4(arg1
, &status
, arg3
, rusage_ptr
));
9691 if (!is_error(ret
)) {
9692 if (status_ptr
&& ret
) {
9693 status
= host_to_target_waitstatus(status
);
9694 if (put_user_s32(status
, status_ptr
))
9697 if (target_rusage
) {
9698 rusage_err
= host_to_target_rusage(target_rusage
, &rusage
);
9706 #ifdef TARGET_NR_swapoff
9707 case TARGET_NR_swapoff
:
9708 if (!(p
= lock_user_string(arg1
)))
9710 ret
= get_errno(swapoff(p
));
9711 unlock_user(p
, arg1
, 0);
9714 case TARGET_NR_sysinfo
:
9716 struct target_sysinfo
*target_value
;
9717 struct sysinfo value
;
9718 ret
= get_errno(sysinfo(&value
));
9719 if (!is_error(ret
) && arg1
)
9721 if (!lock_user_struct(VERIFY_WRITE
, target_value
, arg1
, 0))
9723 __put_user(value
.uptime
, &target_value
->uptime
);
9724 __put_user(value
.loads
[0], &target_value
->loads
[0]);
9725 __put_user(value
.loads
[1], &target_value
->loads
[1]);
9726 __put_user(value
.loads
[2], &target_value
->loads
[2]);
9727 __put_user(value
.totalram
, &target_value
->totalram
);
9728 __put_user(value
.freeram
, &target_value
->freeram
);
9729 __put_user(value
.sharedram
, &target_value
->sharedram
);
9730 __put_user(value
.bufferram
, &target_value
->bufferram
);
9731 __put_user(value
.totalswap
, &target_value
->totalswap
);
9732 __put_user(value
.freeswap
, &target_value
->freeswap
);
9733 __put_user(value
.procs
, &target_value
->procs
);
9734 __put_user(value
.totalhigh
, &target_value
->totalhigh
);
9735 __put_user(value
.freehigh
, &target_value
->freehigh
);
9736 __put_user(value
.mem_unit
, &target_value
->mem_unit
);
9737 unlock_user_struct(target_value
, arg1
, 1);
9741 #ifdef TARGET_NR_ipc
9743 ret
= do_ipc(cpu_env
, arg1
, arg2
, arg3
, arg4
, arg5
, arg6
);
9746 #ifdef TARGET_NR_semget
9747 case TARGET_NR_semget
:
9748 ret
= get_errno(semget(arg1
, arg2
, arg3
));
9751 #ifdef TARGET_NR_semop
9752 case TARGET_NR_semop
:
9753 ret
= do_semop(arg1
, arg2
, arg3
);
9756 #ifdef TARGET_NR_semctl
9757 case TARGET_NR_semctl
:
9758 ret
= do_semctl(arg1
, arg2
, arg3
, arg4
);
9761 #ifdef TARGET_NR_msgctl
9762 case TARGET_NR_msgctl
:
9763 ret
= do_msgctl(arg1
, arg2
, arg3
);
9766 #ifdef TARGET_NR_msgget
9767 case TARGET_NR_msgget
:
9768 ret
= get_errno(msgget(arg1
, arg2
));
9771 #ifdef TARGET_NR_msgrcv
9772 case TARGET_NR_msgrcv
:
9773 ret
= do_msgrcv(arg1
, arg2
, arg3
, arg4
, arg5
);
9776 #ifdef TARGET_NR_msgsnd
9777 case TARGET_NR_msgsnd
:
9778 ret
= do_msgsnd(arg1
, arg2
, arg3
, arg4
);
9781 #ifdef TARGET_NR_shmget
9782 case TARGET_NR_shmget
:
9783 ret
= get_errno(shmget(arg1
, arg2
, arg3
));
9786 #ifdef TARGET_NR_shmctl
9787 case TARGET_NR_shmctl
:
9788 ret
= do_shmctl(arg1
, arg2
, arg3
);
9791 #ifdef TARGET_NR_shmat
9792 case TARGET_NR_shmat
:
9793 ret
= do_shmat(cpu_env
, arg1
, arg2
, arg3
);
9796 #ifdef TARGET_NR_shmdt
9797 case TARGET_NR_shmdt
:
9798 ret
= do_shmdt(arg1
);
9801 case TARGET_NR_fsync
:
9802 ret
= get_errno(fsync(arg1
));
9804 case TARGET_NR_clone
:
9805 /* Linux manages to have three different orderings for its
9806 * arguments to clone(); the BACKWARDS and BACKWARDS2 defines
9807 * match the kernel's CONFIG_CLONE_* settings.
9808 * Microblaze is further special in that it uses a sixth
9809 * implicit argument to clone for the TLS pointer.
9811 #if defined(TARGET_MICROBLAZE)
9812 ret
= get_errno(do_fork(cpu_env
, arg1
, arg2
, arg4
, arg6
, arg5
));
9813 #elif defined(TARGET_CLONE_BACKWARDS)
9814 ret
= get_errno(do_fork(cpu_env
, arg1
, arg2
, arg3
, arg4
, arg5
));
9815 #elif defined(TARGET_CLONE_BACKWARDS2)
9816 ret
= get_errno(do_fork(cpu_env
, arg2
, arg1
, arg3
, arg5
, arg4
));
9818 ret
= get_errno(do_fork(cpu_env
, arg1
, arg2
, arg3
, arg5
, arg4
));
9821 #ifdef __NR_exit_group
9822 /* new thread calls */
9823 case TARGET_NR_exit_group
:
9827 gdb_exit(cpu_env
, arg1
);
9828 ret
= get_errno(exit_group(arg1
));
9831 case TARGET_NR_setdomainname
:
9832 if (!(p
= lock_user_string(arg1
)))
9834 ret
= get_errno(setdomainname(p
, arg2
));
9835 unlock_user(p
, arg1
, 0);
9837 case TARGET_NR_uname
:
9838 /* no need to transcode because we use the linux syscall */
9840 struct new_utsname
* buf
;
9842 if (!lock_user_struct(VERIFY_WRITE
, buf
, arg1
, 0))
9844 ret
= get_errno(sys_uname(buf
));
9845 if (!is_error(ret
)) {
9846 /* Overwrite the native machine name with whatever is being
9848 strcpy (buf
->machine
, cpu_to_uname_machine(cpu_env
));
9849 /* Allow the user to override the reported release. */
9850 if (qemu_uname_release
&& *qemu_uname_release
) {
9851 g_strlcpy(buf
->release
, qemu_uname_release
,
9852 sizeof(buf
->release
));
9855 unlock_user_struct(buf
, arg1
, 1);
9859 case TARGET_NR_modify_ldt
:
9860 ret
= do_modify_ldt(cpu_env
, arg1
, arg2
, arg3
);
9862 #if !defined(TARGET_X86_64)
9863 case TARGET_NR_vm86old
:
9865 case TARGET_NR_vm86
:
9866 ret
= do_vm86(cpu_env
, arg1
, arg2
);
9870 case TARGET_NR_adjtimex
:
9872 struct timex host_buf
;
9874 if (target_to_host_timex(&host_buf
, arg1
) != 0) {
9877 ret
= get_errno(adjtimex(&host_buf
));
9878 if (!is_error(ret
)) {
9879 if (host_to_target_timex(arg1
, &host_buf
) != 0) {
9885 #if defined(TARGET_NR_clock_adjtime) && defined(CONFIG_CLOCK_ADJTIME)
9886 case TARGET_NR_clock_adjtime
:
9888 struct timex htx
, *phtx
= &htx
;
9890 if (target_to_host_timex(phtx
, arg2
) != 0) {
9893 ret
= get_errno(clock_adjtime(arg1
, phtx
));
9894 if (!is_error(ret
) && phtx
) {
9895 if (host_to_target_timex(arg2
, phtx
) != 0) {
9902 #ifdef TARGET_NR_create_module
9903 case TARGET_NR_create_module
:
9905 case TARGET_NR_init_module
:
9906 case TARGET_NR_delete_module
:
9907 #ifdef TARGET_NR_get_kernel_syms
9908 case TARGET_NR_get_kernel_syms
:
9911 case TARGET_NR_quotactl
:
9913 case TARGET_NR_getpgid
:
9914 ret
= get_errno(getpgid(arg1
));
9916 case TARGET_NR_fchdir
:
9917 ret
= get_errno(fchdir(arg1
));
9919 #ifdef TARGET_NR_bdflush /* not on x86_64 */
9920 case TARGET_NR_bdflush
:
9923 #ifdef TARGET_NR_sysfs
9924 case TARGET_NR_sysfs
:
9927 case TARGET_NR_personality
:
9928 ret
= get_errno(personality(arg1
));
9930 #ifdef TARGET_NR_afs_syscall
9931 case TARGET_NR_afs_syscall
:
9934 #ifdef TARGET_NR__llseek /* Not on alpha */
9935 case TARGET_NR__llseek
:
9938 #if !defined(__NR_llseek)
9939 res
= lseek(arg1
, ((uint64_t)arg2
<< 32) | (abi_ulong
)arg3
, arg5
);
9941 ret
= get_errno(res
);
9946 ret
= get_errno(_llseek(arg1
, arg2
, arg3
, &res
, arg5
));
9948 if ((ret
== 0) && put_user_s64(res
, arg4
)) {
9954 #ifdef TARGET_NR_getdents
9955 case TARGET_NR_getdents
:
9956 #ifdef __NR_getdents
9957 #if TARGET_ABI_BITS == 32 && HOST_LONG_BITS == 64
9959 struct target_dirent
*target_dirp
;
9960 struct linux_dirent
*dirp
;
9961 abi_long count
= arg3
;
9963 dirp
= g_try_malloc(count
);
9965 ret
= -TARGET_ENOMEM
;
9969 ret
= get_errno(sys_getdents(arg1
, dirp
, count
));
9970 if (!is_error(ret
)) {
9971 struct linux_dirent
*de
;
9972 struct target_dirent
*tde
;
9974 int reclen
, treclen
;
9975 int count1
, tnamelen
;
9979 if (!(target_dirp
= lock_user(VERIFY_WRITE
, arg2
, count
, 0)))
9983 reclen
= de
->d_reclen
;
9984 tnamelen
= reclen
- offsetof(struct linux_dirent
, d_name
);
9985 assert(tnamelen
>= 0);
9986 treclen
= tnamelen
+ offsetof(struct target_dirent
, d_name
);
9987 assert(count1
+ treclen
<= count
);
9988 tde
->d_reclen
= tswap16(treclen
);
9989 tde
->d_ino
= tswapal(de
->d_ino
);
9990 tde
->d_off
= tswapal(de
->d_off
);
9991 memcpy(tde
->d_name
, de
->d_name
, tnamelen
);
9992 de
= (struct linux_dirent
*)((char *)de
+ reclen
);
9994 tde
= (struct target_dirent
*)((char *)tde
+ treclen
);
9998 unlock_user(target_dirp
, arg2
, ret
);
10004 struct linux_dirent
*dirp
;
10005 abi_long count
= arg3
;
10007 if (!(dirp
= lock_user(VERIFY_WRITE
, arg2
, count
, 0)))
10009 ret
= get_errno(sys_getdents(arg1
, dirp
, count
));
10010 if (!is_error(ret
)) {
10011 struct linux_dirent
*de
;
10016 reclen
= de
->d_reclen
;
10019 de
->d_reclen
= tswap16(reclen
);
10020 tswapls(&de
->d_ino
);
10021 tswapls(&de
->d_off
);
10022 de
= (struct linux_dirent
*)((char *)de
+ reclen
);
10026 unlock_user(dirp
, arg2
, ret
);
10030 /* Implement getdents in terms of getdents64 */
10032 struct linux_dirent64
*dirp
;
10033 abi_long count
= arg3
;
10035 dirp
= lock_user(VERIFY_WRITE
, arg2
, count
, 0);
10039 ret
= get_errno(sys_getdents64(arg1
, dirp
, count
));
10040 if (!is_error(ret
)) {
10041 /* Convert the dirent64 structs to target dirent. We do this
10042 * in-place, since we can guarantee that a target_dirent is no
10043 * larger than a dirent64; however this means we have to be
10044 * careful to read everything before writing in the new format.
10046 struct linux_dirent64
*de
;
10047 struct target_dirent
*tde
;
10052 tde
= (struct target_dirent
*)dirp
;
10054 int namelen
, treclen
;
10055 int reclen
= de
->d_reclen
;
10056 uint64_t ino
= de
->d_ino
;
10057 int64_t off
= de
->d_off
;
10058 uint8_t type
= de
->d_type
;
10060 namelen
= strlen(de
->d_name
);
10061 treclen
= offsetof(struct target_dirent
, d_name
)
10063 treclen
= QEMU_ALIGN_UP(treclen
, sizeof(abi_long
));
10065 memmove(tde
->d_name
, de
->d_name
, namelen
+ 1);
10066 tde
->d_ino
= tswapal(ino
);
10067 tde
->d_off
= tswapal(off
);
10068 tde
->d_reclen
= tswap16(treclen
);
10069 /* The target_dirent type is in what was formerly a padding
10070 * byte at the end of the structure:
10072 *(((char *)tde
) + treclen
- 1) = type
;
10074 de
= (struct linux_dirent64
*)((char *)de
+ reclen
);
10075 tde
= (struct target_dirent
*)((char *)tde
+ treclen
);
10081 unlock_user(dirp
, arg2
, ret
);
10085 #endif /* TARGET_NR_getdents */
10086 #if defined(TARGET_NR_getdents64) && defined(__NR_getdents64)
10087 case TARGET_NR_getdents64
:
10089 struct linux_dirent64
*dirp
;
10090 abi_long count
= arg3
;
10091 if (!(dirp
= lock_user(VERIFY_WRITE
, arg2
, count
, 0)))
10093 ret
= get_errno(sys_getdents64(arg1
, dirp
, count
));
10094 if (!is_error(ret
)) {
10095 struct linux_dirent64
*de
;
10100 reclen
= de
->d_reclen
;
10103 de
->d_reclen
= tswap16(reclen
);
10104 tswap64s((uint64_t *)&de
->d_ino
);
10105 tswap64s((uint64_t *)&de
->d_off
);
10106 de
= (struct linux_dirent64
*)((char *)de
+ reclen
);
10110 unlock_user(dirp
, arg2
, ret
);
10113 #endif /* TARGET_NR_getdents64 */
10114 #if defined(TARGET_NR__newselect)
10115 case TARGET_NR__newselect
:
10116 ret
= do_select(arg1
, arg2
, arg3
, arg4
, arg5
);
10119 #if defined(TARGET_NR_poll) || defined(TARGET_NR_ppoll)
10120 # ifdef TARGET_NR_poll
10121 case TARGET_NR_poll
:
10123 # ifdef TARGET_NR_ppoll
10124 case TARGET_NR_ppoll
:
10127 struct target_pollfd
*target_pfd
;
10128 unsigned int nfds
= arg2
;
10129 struct pollfd
*pfd
;
10135 if (nfds
> (INT_MAX
/ sizeof(struct target_pollfd
))) {
10136 ret
= -TARGET_EINVAL
;
10140 target_pfd
= lock_user(VERIFY_WRITE
, arg1
,
10141 sizeof(struct target_pollfd
) * nfds
, 1);
10146 pfd
= alloca(sizeof(struct pollfd
) * nfds
);
10147 for (i
= 0; i
< nfds
; i
++) {
10148 pfd
[i
].fd
= tswap32(target_pfd
[i
].fd
);
10149 pfd
[i
].events
= tswap16(target_pfd
[i
].events
);
10154 # ifdef TARGET_NR_ppoll
10155 case TARGET_NR_ppoll
:
10157 struct timespec _timeout_ts
, *timeout_ts
= &_timeout_ts
;
10158 target_sigset_t
*target_set
;
10159 sigset_t _set
, *set
= &_set
;
10162 if (target_to_host_timespec(timeout_ts
, arg3
)) {
10163 unlock_user(target_pfd
, arg1
, 0);
10171 if (arg5
!= sizeof(target_sigset_t
)) {
10172 unlock_user(target_pfd
, arg1
, 0);
10173 ret
= -TARGET_EINVAL
;
10177 target_set
= lock_user(VERIFY_READ
, arg4
, sizeof(target_sigset_t
), 1);
10179 unlock_user(target_pfd
, arg1
, 0);
10182 target_to_host_sigset(set
, target_set
);
10187 ret
= get_errno(safe_ppoll(pfd
, nfds
, timeout_ts
,
10188 set
, SIGSET_T_SIZE
));
10190 if (!is_error(ret
) && arg3
) {
10191 host_to_target_timespec(arg3
, timeout_ts
);
10194 unlock_user(target_set
, arg4
, 0);
10199 # ifdef TARGET_NR_poll
10200 case TARGET_NR_poll
:
10202 struct timespec ts
, *pts
;
10205 /* Convert ms to secs, ns */
10206 ts
.tv_sec
= arg3
/ 1000;
10207 ts
.tv_nsec
= (arg3
% 1000) * 1000000LL;
10210 /* -ve poll() timeout means "infinite" */
10213 ret
= get_errno(safe_ppoll(pfd
, nfds
, pts
, NULL
, 0));
10218 g_assert_not_reached();
10221 if (!is_error(ret
)) {
10222 for(i
= 0; i
< nfds
; i
++) {
10223 target_pfd
[i
].revents
= tswap16(pfd
[i
].revents
);
10226 unlock_user(target_pfd
, arg1
, sizeof(struct target_pollfd
) * nfds
);
10230 case TARGET_NR_flock
:
10231 /* NOTE: the flock constant seems to be the same for every
10233 ret
= get_errno(safe_flock(arg1
, arg2
));
10235 case TARGET_NR_readv
:
10237 struct iovec
*vec
= lock_iovec(VERIFY_WRITE
, arg2
, arg3
, 0);
10239 ret
= get_errno(safe_readv(arg1
, vec
, arg3
));
10240 unlock_iovec(vec
, arg2
, arg3
, 1);
10242 ret
= -host_to_target_errno(errno
);
10246 case TARGET_NR_writev
:
10248 struct iovec
*vec
= lock_iovec(VERIFY_READ
, arg2
, arg3
, 1);
10250 ret
= get_errno(safe_writev(arg1
, vec
, arg3
));
10251 unlock_iovec(vec
, arg2
, arg3
, 0);
10253 ret
= -host_to_target_errno(errno
);
10257 #if defined(TARGET_NR_preadv)
10258 case TARGET_NR_preadv
:
10260 struct iovec
*vec
= lock_iovec(VERIFY_WRITE
, arg2
, arg3
, 0);
10262 ret
= get_errno(safe_preadv(arg1
, vec
, arg3
, arg4
, arg5
));
10263 unlock_iovec(vec
, arg2
, arg3
, 1);
10265 ret
= -host_to_target_errno(errno
);
10270 #if defined(TARGET_NR_pwritev)
10271 case TARGET_NR_pwritev
:
10273 struct iovec
*vec
= lock_iovec(VERIFY_READ
, arg2
, arg3
, 1);
10275 ret
= get_errno(safe_pwritev(arg1
, vec
, arg3
, arg4
, arg5
));
10276 unlock_iovec(vec
, arg2
, arg3
, 0);
10278 ret
= -host_to_target_errno(errno
);
10283 case TARGET_NR_getsid
:
10284 ret
= get_errno(getsid(arg1
));
10286 #if defined(TARGET_NR_fdatasync) /* Not on alpha (osf_datasync ?) */
10287 case TARGET_NR_fdatasync
:
10288 ret
= get_errno(fdatasync(arg1
));
10291 #ifdef TARGET_NR__sysctl
10292 case TARGET_NR__sysctl
:
10293 /* We don't implement this, but ENOTDIR is always a safe
10295 ret
= -TARGET_ENOTDIR
;
10298 case TARGET_NR_sched_getaffinity
:
10300 unsigned int mask_size
;
10301 unsigned long *mask
;
10304 * sched_getaffinity needs multiples of ulong, so need to take
10305 * care of mismatches between target ulong and host ulong sizes.
10307 if (arg2
& (sizeof(abi_ulong
) - 1)) {
10308 ret
= -TARGET_EINVAL
;
10311 mask_size
= (arg2
+ (sizeof(*mask
) - 1)) & ~(sizeof(*mask
) - 1);
10313 mask
= alloca(mask_size
);
10314 ret
= get_errno(sys_sched_getaffinity(arg1
, mask_size
, mask
));
10316 if (!is_error(ret
)) {
10318 /* More data returned than the caller's buffer will fit.
10319 * This only happens if sizeof(abi_long) < sizeof(long)
10320 * and the caller passed us a buffer holding an odd number
10321 * of abi_longs. If the host kernel is actually using the
10322 * extra 4 bytes then fail EINVAL; otherwise we can just
10323 * ignore them and only copy the interesting part.
10325 int numcpus
= sysconf(_SC_NPROCESSORS_CONF
);
10326 if (numcpus
> arg2
* 8) {
10327 ret
= -TARGET_EINVAL
;
10333 if (copy_to_user(arg3
, mask
, ret
)) {
10339 case TARGET_NR_sched_setaffinity
:
10341 unsigned int mask_size
;
10342 unsigned long *mask
;
10345 * sched_setaffinity needs multiples of ulong, so need to take
10346 * care of mismatches between target ulong and host ulong sizes.
10348 if (arg2
& (sizeof(abi_ulong
) - 1)) {
10349 ret
= -TARGET_EINVAL
;
10352 mask_size
= (arg2
+ (sizeof(*mask
) - 1)) & ~(sizeof(*mask
) - 1);
10354 mask
= alloca(mask_size
);
10355 if (!lock_user_struct(VERIFY_READ
, p
, arg3
, 1)) {
10358 memcpy(mask
, p
, arg2
);
10359 unlock_user_struct(p
, arg2
, 0);
10361 ret
= get_errno(sys_sched_setaffinity(arg1
, mask_size
, mask
));
10364 case TARGET_NR_sched_setparam
:
10366 struct sched_param
*target_schp
;
10367 struct sched_param schp
;
10370 return -TARGET_EINVAL
;
10372 if (!lock_user_struct(VERIFY_READ
, target_schp
, arg2
, 1))
10374 schp
.sched_priority
= tswap32(target_schp
->sched_priority
);
10375 unlock_user_struct(target_schp
, arg2
, 0);
10376 ret
= get_errno(sched_setparam(arg1
, &schp
));
10379 case TARGET_NR_sched_getparam
:
10381 struct sched_param
*target_schp
;
10382 struct sched_param schp
;
10385 return -TARGET_EINVAL
;
10387 ret
= get_errno(sched_getparam(arg1
, &schp
));
10388 if (!is_error(ret
)) {
10389 if (!lock_user_struct(VERIFY_WRITE
, target_schp
, arg2
, 0))
10391 target_schp
->sched_priority
= tswap32(schp
.sched_priority
);
10392 unlock_user_struct(target_schp
, arg2
, 1);
10396 case TARGET_NR_sched_setscheduler
:
10398 struct sched_param
*target_schp
;
10399 struct sched_param schp
;
10401 return -TARGET_EINVAL
;
10403 if (!lock_user_struct(VERIFY_READ
, target_schp
, arg3
, 1))
10405 schp
.sched_priority
= tswap32(target_schp
->sched_priority
);
10406 unlock_user_struct(target_schp
, arg3
, 0);
10407 ret
= get_errno(sched_setscheduler(arg1
, arg2
, &schp
));
10410 case TARGET_NR_sched_getscheduler
:
10411 ret
= get_errno(sched_getscheduler(arg1
));
10413 case TARGET_NR_sched_yield
:
10414 ret
= get_errno(sched_yield());
10416 case TARGET_NR_sched_get_priority_max
:
10417 ret
= get_errno(sched_get_priority_max(arg1
));
10419 case TARGET_NR_sched_get_priority_min
:
10420 ret
= get_errno(sched_get_priority_min(arg1
));
10422 case TARGET_NR_sched_rr_get_interval
:
10424 struct timespec ts
;
10425 ret
= get_errno(sched_rr_get_interval(arg1
, &ts
));
10426 if (!is_error(ret
)) {
10427 ret
= host_to_target_timespec(arg2
, &ts
);
10431 case TARGET_NR_nanosleep
:
10433 struct timespec req
, rem
;
10434 target_to_host_timespec(&req
, arg1
);
10435 ret
= get_errno(safe_nanosleep(&req
, &rem
));
10436 if (is_error(ret
) && arg2
) {
10437 host_to_target_timespec(arg2
, &rem
);
10441 #ifdef TARGET_NR_query_module
10442 case TARGET_NR_query_module
:
10443 goto unimplemented
;
10445 #ifdef TARGET_NR_nfsservctl
10446 case TARGET_NR_nfsservctl
:
10447 goto unimplemented
;
10449 case TARGET_NR_prctl
:
10451 case PR_GET_PDEATHSIG
:
10454 ret
= get_errno(prctl(arg1
, &deathsig
, arg3
, arg4
, arg5
));
10455 if (!is_error(ret
) && arg2
10456 && put_user_ual(deathsig
, arg2
)) {
10464 void *name
= lock_user(VERIFY_WRITE
, arg2
, 16, 1);
10468 ret
= get_errno(prctl(arg1
, (unsigned long)name
,
10469 arg3
, arg4
, arg5
));
10470 unlock_user(name
, arg2
, 16);
10475 void *name
= lock_user(VERIFY_READ
, arg2
, 16, 1);
10479 ret
= get_errno(prctl(arg1
, (unsigned long)name
,
10480 arg3
, arg4
, arg5
));
10481 unlock_user(name
, arg2
, 0);
10486 /* Most prctl options have no pointer arguments */
10487 ret
= get_errno(prctl(arg1
, arg2
, arg3
, arg4
, arg5
));
10491 #ifdef TARGET_NR_arch_prctl
10492 case TARGET_NR_arch_prctl
:
10493 #if defined(TARGET_I386) && !defined(TARGET_ABI32)
10494 ret
= do_arch_prctl(cpu_env
, arg1
, arg2
);
10497 goto unimplemented
;
10500 #ifdef TARGET_NR_pread64
10501 case TARGET_NR_pread64
:
10502 if (regpairs_aligned(cpu_env
)) {
10506 if (!(p
= lock_user(VERIFY_WRITE
, arg2
, arg3
, 0)))
10508 ret
= get_errno(pread64(arg1
, p
, arg3
, target_offset64(arg4
, arg5
)));
10509 unlock_user(p
, arg2
, ret
);
10511 case TARGET_NR_pwrite64
:
10512 if (regpairs_aligned(cpu_env
)) {
10516 if (!(p
= lock_user(VERIFY_READ
, arg2
, arg3
, 1)))
10518 ret
= get_errno(pwrite64(arg1
, p
, arg3
, target_offset64(arg4
, arg5
)));
10519 unlock_user(p
, arg2
, 0);
10522 case TARGET_NR_getcwd
:
10523 if (!(p
= lock_user(VERIFY_WRITE
, arg1
, arg2
, 0)))
10525 ret
= get_errno(sys_getcwd1(p
, arg2
));
10526 unlock_user(p
, arg1
, ret
);
10528 case TARGET_NR_capget
:
10529 case TARGET_NR_capset
:
10531 struct target_user_cap_header
*target_header
;
10532 struct target_user_cap_data
*target_data
= NULL
;
10533 struct __user_cap_header_struct header
;
10534 struct __user_cap_data_struct data
[2];
10535 struct __user_cap_data_struct
*dataptr
= NULL
;
10536 int i
, target_datalen
;
10537 int data_items
= 1;
10539 if (!lock_user_struct(VERIFY_WRITE
, target_header
, arg1
, 1)) {
10542 header
.version
= tswap32(target_header
->version
);
10543 header
.pid
= tswap32(target_header
->pid
);
10545 if (header
.version
!= _LINUX_CAPABILITY_VERSION
) {
10546 /* Version 2 and up takes pointer to two user_data structs */
10550 target_datalen
= sizeof(*target_data
) * data_items
;
10553 if (num
== TARGET_NR_capget
) {
10554 target_data
= lock_user(VERIFY_WRITE
, arg2
, target_datalen
, 0);
10556 target_data
= lock_user(VERIFY_READ
, arg2
, target_datalen
, 1);
10558 if (!target_data
) {
10559 unlock_user_struct(target_header
, arg1
, 0);
10563 if (num
== TARGET_NR_capset
) {
10564 for (i
= 0; i
< data_items
; i
++) {
10565 data
[i
].effective
= tswap32(target_data
[i
].effective
);
10566 data
[i
].permitted
= tswap32(target_data
[i
].permitted
);
10567 data
[i
].inheritable
= tswap32(target_data
[i
].inheritable
);
10574 if (num
== TARGET_NR_capget
) {
10575 ret
= get_errno(capget(&header
, dataptr
));
10577 ret
= get_errno(capset(&header
, dataptr
));
10580 /* The kernel always updates version for both capget and capset */
10581 target_header
->version
= tswap32(header
.version
);
10582 unlock_user_struct(target_header
, arg1
, 1);
10585 if (num
== TARGET_NR_capget
) {
10586 for (i
= 0; i
< data_items
; i
++) {
10587 target_data
[i
].effective
= tswap32(data
[i
].effective
);
10588 target_data
[i
].permitted
= tswap32(data
[i
].permitted
);
10589 target_data
[i
].inheritable
= tswap32(data
[i
].inheritable
);
10591 unlock_user(target_data
, arg2
, target_datalen
);
10593 unlock_user(target_data
, arg2
, 0);
10598 case TARGET_NR_sigaltstack
:
10599 ret
= do_sigaltstack(arg1
, arg2
, get_sp_from_cpustate((CPUArchState
*)cpu_env
));
10602 #ifdef CONFIG_SENDFILE
10603 case TARGET_NR_sendfile
:
10605 off_t
*offp
= NULL
;
10608 ret
= get_user_sal(off
, arg3
);
10609 if (is_error(ret
)) {
10614 ret
= get_errno(sendfile(arg1
, arg2
, offp
, arg4
));
10615 if (!is_error(ret
) && arg3
) {
10616 abi_long ret2
= put_user_sal(off
, arg3
);
10617 if (is_error(ret2
)) {
10623 #ifdef TARGET_NR_sendfile64
10624 case TARGET_NR_sendfile64
:
10626 off_t
*offp
= NULL
;
10629 ret
= get_user_s64(off
, arg3
);
10630 if (is_error(ret
)) {
10635 ret
= get_errno(sendfile(arg1
, arg2
, offp
, arg4
));
10636 if (!is_error(ret
) && arg3
) {
10637 abi_long ret2
= put_user_s64(off
, arg3
);
10638 if (is_error(ret2
)) {
10646 case TARGET_NR_sendfile
:
10647 #ifdef TARGET_NR_sendfile64
10648 case TARGET_NR_sendfile64
:
10650 goto unimplemented
;
10653 #ifdef TARGET_NR_getpmsg
10654 case TARGET_NR_getpmsg
:
10655 goto unimplemented
;
10657 #ifdef TARGET_NR_putpmsg
10658 case TARGET_NR_putpmsg
:
10659 goto unimplemented
;
10661 #ifdef TARGET_NR_vfork
10662 case TARGET_NR_vfork
:
10663 ret
= get_errno(do_fork(cpu_env
,
10664 CLONE_VFORK
| CLONE_VM
| TARGET_SIGCHLD
,
10668 #ifdef TARGET_NR_ugetrlimit
10669 case TARGET_NR_ugetrlimit
:
10671 struct rlimit rlim
;
10672 int resource
= target_to_host_resource(arg1
);
10673 ret
= get_errno(getrlimit(resource
, &rlim
));
10674 if (!is_error(ret
)) {
10675 struct target_rlimit
*target_rlim
;
10676 if (!lock_user_struct(VERIFY_WRITE
, target_rlim
, arg2
, 0))
10678 target_rlim
->rlim_cur
= host_to_target_rlim(rlim
.rlim_cur
);
10679 target_rlim
->rlim_max
= host_to_target_rlim(rlim
.rlim_max
);
10680 unlock_user_struct(target_rlim
, arg2
, 1);
10685 #ifdef TARGET_NR_truncate64
10686 case TARGET_NR_truncate64
:
10687 if (!(p
= lock_user_string(arg1
)))
10689 ret
= target_truncate64(cpu_env
, p
, arg2
, arg3
, arg4
);
10690 unlock_user(p
, arg1
, 0);
10693 #ifdef TARGET_NR_ftruncate64
10694 case TARGET_NR_ftruncate64
:
10695 ret
= target_ftruncate64(cpu_env
, arg1
, arg2
, arg3
, arg4
);
10698 #ifdef TARGET_NR_stat64
10699 case TARGET_NR_stat64
:
10700 if (!(p
= lock_user_string(arg1
)))
10702 ret
= get_errno(stat(path(p
), &st
));
10703 unlock_user(p
, arg1
, 0);
10704 if (!is_error(ret
))
10705 ret
= host_to_target_stat64(cpu_env
, arg2
, &st
);
10708 #ifdef TARGET_NR_lstat64
10709 case TARGET_NR_lstat64
:
10710 if (!(p
= lock_user_string(arg1
)))
10712 ret
= get_errno(lstat(path(p
), &st
));
10713 unlock_user(p
, arg1
, 0);
10714 if (!is_error(ret
))
10715 ret
= host_to_target_stat64(cpu_env
, arg2
, &st
);
10718 #ifdef TARGET_NR_fstat64
10719 case TARGET_NR_fstat64
:
10720 ret
= get_errno(fstat(arg1
, &st
));
10721 if (!is_error(ret
))
10722 ret
= host_to_target_stat64(cpu_env
, arg2
, &st
);
10725 #if (defined(TARGET_NR_fstatat64) || defined(TARGET_NR_newfstatat))
10726 #ifdef TARGET_NR_fstatat64
10727 case TARGET_NR_fstatat64
:
10729 #ifdef TARGET_NR_newfstatat
10730 case TARGET_NR_newfstatat
:
10732 if (!(p
= lock_user_string(arg2
)))
10734 ret
= get_errno(fstatat(arg1
, path(p
), &st
, arg4
));
10735 if (!is_error(ret
))
10736 ret
= host_to_target_stat64(cpu_env
, arg3
, &st
);
10739 #ifdef TARGET_NR_lchown
10740 case TARGET_NR_lchown
:
10741 if (!(p
= lock_user_string(arg1
)))
10743 ret
= get_errno(lchown(p
, low2highuid(arg2
), low2highgid(arg3
)));
10744 unlock_user(p
, arg1
, 0);
10747 #ifdef TARGET_NR_getuid
10748 case TARGET_NR_getuid
:
10749 ret
= get_errno(high2lowuid(getuid()));
10752 #ifdef TARGET_NR_getgid
10753 case TARGET_NR_getgid
:
10754 ret
= get_errno(high2lowgid(getgid()));
10757 #ifdef TARGET_NR_geteuid
10758 case TARGET_NR_geteuid
:
10759 ret
= get_errno(high2lowuid(geteuid()));
10762 #ifdef TARGET_NR_getegid
10763 case TARGET_NR_getegid
:
10764 ret
= get_errno(high2lowgid(getegid()));
10767 case TARGET_NR_setreuid
:
10768 ret
= get_errno(setreuid(low2highuid(arg1
), low2highuid(arg2
)));
10770 case TARGET_NR_setregid
:
10771 ret
= get_errno(setregid(low2highgid(arg1
), low2highgid(arg2
)));
10773 case TARGET_NR_getgroups
:
10775 int gidsetsize
= arg1
;
10776 target_id
*target_grouplist
;
10780 grouplist
= alloca(gidsetsize
* sizeof(gid_t
));
10781 ret
= get_errno(getgroups(gidsetsize
, grouplist
));
10782 if (gidsetsize
== 0)
10784 if (!is_error(ret
)) {
10785 target_grouplist
= lock_user(VERIFY_WRITE
, arg2
, gidsetsize
* sizeof(target_id
), 0);
10786 if (!target_grouplist
)
10788 for(i
= 0;i
< ret
; i
++)
10789 target_grouplist
[i
] = tswapid(high2lowgid(grouplist
[i
]));
10790 unlock_user(target_grouplist
, arg2
, gidsetsize
* sizeof(target_id
));
10794 case TARGET_NR_setgroups
:
10796 int gidsetsize
= arg1
;
10797 target_id
*target_grouplist
;
10798 gid_t
*grouplist
= NULL
;
10801 grouplist
= alloca(gidsetsize
* sizeof(gid_t
));
10802 target_grouplist
= lock_user(VERIFY_READ
, arg2
, gidsetsize
* sizeof(target_id
), 1);
10803 if (!target_grouplist
) {
10804 ret
= -TARGET_EFAULT
;
10807 for (i
= 0; i
< gidsetsize
; i
++) {
10808 grouplist
[i
] = low2highgid(tswapid(target_grouplist
[i
]));
10810 unlock_user(target_grouplist
, arg2
, 0);
10812 ret
= get_errno(setgroups(gidsetsize
, grouplist
));
10815 case TARGET_NR_fchown
:
10816 ret
= get_errno(fchown(arg1
, low2highuid(arg2
), low2highgid(arg3
)));
10818 #if defined(TARGET_NR_fchownat)
10819 case TARGET_NR_fchownat
:
10820 if (!(p
= lock_user_string(arg2
)))
10822 ret
= get_errno(fchownat(arg1
, p
, low2highuid(arg3
),
10823 low2highgid(arg4
), arg5
));
10824 unlock_user(p
, arg2
, 0);
10827 #ifdef TARGET_NR_setresuid
10828 case TARGET_NR_setresuid
:
10829 ret
= get_errno(sys_setresuid(low2highuid(arg1
),
10831 low2highuid(arg3
)));
10834 #ifdef TARGET_NR_getresuid
10835 case TARGET_NR_getresuid
:
10837 uid_t ruid
, euid
, suid
;
10838 ret
= get_errno(getresuid(&ruid
, &euid
, &suid
));
10839 if (!is_error(ret
)) {
10840 if (put_user_id(high2lowuid(ruid
), arg1
)
10841 || put_user_id(high2lowuid(euid
), arg2
)
10842 || put_user_id(high2lowuid(suid
), arg3
))
10848 #ifdef TARGET_NR_getresgid
10849 case TARGET_NR_setresgid
:
10850 ret
= get_errno(sys_setresgid(low2highgid(arg1
),
10852 low2highgid(arg3
)));
10855 #ifdef TARGET_NR_getresgid
10856 case TARGET_NR_getresgid
:
10858 gid_t rgid
, egid
, sgid
;
10859 ret
= get_errno(getresgid(&rgid
, &egid
, &sgid
));
10860 if (!is_error(ret
)) {
10861 if (put_user_id(high2lowgid(rgid
), arg1
)
10862 || put_user_id(high2lowgid(egid
), arg2
)
10863 || put_user_id(high2lowgid(sgid
), arg3
))
10869 #ifdef TARGET_NR_chown
10870 case TARGET_NR_chown
:
10871 if (!(p
= lock_user_string(arg1
)))
10873 ret
= get_errno(chown(p
, low2highuid(arg2
), low2highgid(arg3
)));
10874 unlock_user(p
, arg1
, 0);
10877 case TARGET_NR_setuid
:
10878 ret
= get_errno(sys_setuid(low2highuid(arg1
)));
10880 case TARGET_NR_setgid
:
10881 ret
= get_errno(sys_setgid(low2highgid(arg1
)));
10883 case TARGET_NR_setfsuid
:
10884 ret
= get_errno(setfsuid(arg1
));
10886 case TARGET_NR_setfsgid
:
10887 ret
= get_errno(setfsgid(arg1
));
10890 #ifdef TARGET_NR_lchown32
10891 case TARGET_NR_lchown32
:
10892 if (!(p
= lock_user_string(arg1
)))
10894 ret
= get_errno(lchown(p
, arg2
, arg3
));
10895 unlock_user(p
, arg1
, 0);
10898 #ifdef TARGET_NR_getuid32
10899 case TARGET_NR_getuid32
:
10900 ret
= get_errno(getuid());
10904 #if defined(TARGET_NR_getxuid) && defined(TARGET_ALPHA)
10905 /* Alpha specific */
10906 case TARGET_NR_getxuid
:
10910 ((CPUAlphaState
*)cpu_env
)->ir
[IR_A4
]=euid
;
10912 ret
= get_errno(getuid());
10915 #if defined(TARGET_NR_getxgid) && defined(TARGET_ALPHA)
10916 /* Alpha specific */
10917 case TARGET_NR_getxgid
:
10921 ((CPUAlphaState
*)cpu_env
)->ir
[IR_A4
]=egid
;
10923 ret
= get_errno(getgid());
10926 #if defined(TARGET_NR_osf_getsysinfo) && defined(TARGET_ALPHA)
10927 /* Alpha specific */
10928 case TARGET_NR_osf_getsysinfo
:
10929 ret
= -TARGET_EOPNOTSUPP
;
10931 case TARGET_GSI_IEEE_FP_CONTROL
:
10933 uint64_t swcr
, fpcr
= cpu_alpha_load_fpcr (cpu_env
);
10935 /* Copied from linux ieee_fpcr_to_swcr. */
10936 swcr
= (fpcr
>> 35) & SWCR_STATUS_MASK
;
10937 swcr
|= (fpcr
>> 36) & SWCR_MAP_DMZ
;
10938 swcr
|= (~fpcr
>> 48) & (SWCR_TRAP_ENABLE_INV
10939 | SWCR_TRAP_ENABLE_DZE
10940 | SWCR_TRAP_ENABLE_OVF
);
10941 swcr
|= (~fpcr
>> 57) & (SWCR_TRAP_ENABLE_UNF
10942 | SWCR_TRAP_ENABLE_INE
);
10943 swcr
|= (fpcr
>> 47) & SWCR_MAP_UMZ
;
10944 swcr
|= (~fpcr
>> 41) & SWCR_TRAP_ENABLE_DNO
;
10946 if (put_user_u64 (swcr
, arg2
))
10952 /* case GSI_IEEE_STATE_AT_SIGNAL:
10953 -- Not implemented in linux kernel.
10955 -- Retrieves current unaligned access state; not much used.
10956 case GSI_PROC_TYPE:
10957 -- Retrieves implver information; surely not used.
10958 case GSI_GET_HWRPB:
10959 -- Grabs a copy of the HWRPB; surely not used.
10964 #if defined(TARGET_NR_osf_setsysinfo) && defined(TARGET_ALPHA)
10965 /* Alpha specific */
10966 case TARGET_NR_osf_setsysinfo
:
10967 ret
= -TARGET_EOPNOTSUPP
;
10969 case TARGET_SSI_IEEE_FP_CONTROL
:
10971 uint64_t swcr
, fpcr
, orig_fpcr
;
10973 if (get_user_u64 (swcr
, arg2
)) {
10976 orig_fpcr
= cpu_alpha_load_fpcr(cpu_env
);
10977 fpcr
= orig_fpcr
& FPCR_DYN_MASK
;
10979 /* Copied from linux ieee_swcr_to_fpcr. */
10980 fpcr
|= (swcr
& SWCR_STATUS_MASK
) << 35;
10981 fpcr
|= (swcr
& SWCR_MAP_DMZ
) << 36;
10982 fpcr
|= (~swcr
& (SWCR_TRAP_ENABLE_INV
10983 | SWCR_TRAP_ENABLE_DZE
10984 | SWCR_TRAP_ENABLE_OVF
)) << 48;
10985 fpcr
|= (~swcr
& (SWCR_TRAP_ENABLE_UNF
10986 | SWCR_TRAP_ENABLE_INE
)) << 57;
10987 fpcr
|= (swcr
& SWCR_MAP_UMZ
? FPCR_UNDZ
| FPCR_UNFD
: 0);
10988 fpcr
|= (~swcr
& SWCR_TRAP_ENABLE_DNO
) << 41;
10990 cpu_alpha_store_fpcr(cpu_env
, fpcr
);
10995 case TARGET_SSI_IEEE_RAISE_EXCEPTION
:
10997 uint64_t exc
, fpcr
, orig_fpcr
;
11000 if (get_user_u64(exc
, arg2
)) {
11004 orig_fpcr
= cpu_alpha_load_fpcr(cpu_env
);
11006 /* We only add to the exception status here. */
11007 fpcr
= orig_fpcr
| ((exc
& SWCR_STATUS_MASK
) << 35);
11009 cpu_alpha_store_fpcr(cpu_env
, fpcr
);
11012 /* Old exceptions are not signaled. */
11013 fpcr
&= ~(orig_fpcr
& FPCR_STATUS_MASK
);
11015 /* If any exceptions set by this call,
11016 and are unmasked, send a signal. */
11018 if ((fpcr
& (FPCR_INE
| FPCR_INED
)) == FPCR_INE
) {
11019 si_code
= TARGET_FPE_FLTRES
;
11021 if ((fpcr
& (FPCR_UNF
| FPCR_UNFD
)) == FPCR_UNF
) {
11022 si_code
= TARGET_FPE_FLTUND
;
11024 if ((fpcr
& (FPCR_OVF
| FPCR_OVFD
)) == FPCR_OVF
) {
11025 si_code
= TARGET_FPE_FLTOVF
;
11027 if ((fpcr
& (FPCR_DZE
| FPCR_DZED
)) == FPCR_DZE
) {
11028 si_code
= TARGET_FPE_FLTDIV
;
11030 if ((fpcr
& (FPCR_INV
| FPCR_INVD
)) == FPCR_INV
) {
11031 si_code
= TARGET_FPE_FLTINV
;
11033 if (si_code
!= 0) {
11034 target_siginfo_t info
;
11035 info
.si_signo
= SIGFPE
;
11037 info
.si_code
= si_code
;
11038 info
._sifields
._sigfault
._addr
11039 = ((CPUArchState
*)cpu_env
)->pc
;
11040 queue_signal((CPUArchState
*)cpu_env
, info
.si_signo
,
11041 QEMU_SI_FAULT
, &info
);
11046 /* case SSI_NVPAIRS:
11047 -- Used with SSIN_UACPROC to enable unaligned accesses.
11048 case SSI_IEEE_STATE_AT_SIGNAL:
11049 case SSI_IEEE_IGNORE_STATE_AT_SIGNAL:
11050 -- Not implemented in linux kernel
11055 #ifdef TARGET_NR_osf_sigprocmask
11056 /* Alpha specific. */
11057 case TARGET_NR_osf_sigprocmask
:
11061 sigset_t set
, oldset
;
11064 case TARGET_SIG_BLOCK
:
11067 case TARGET_SIG_UNBLOCK
:
11070 case TARGET_SIG_SETMASK
:
11074 ret
= -TARGET_EINVAL
;
11078 target_to_host_old_sigset(&set
, &mask
);
11079 ret
= do_sigprocmask(how
, &set
, &oldset
);
11081 host_to_target_old_sigset(&mask
, &oldset
);
11088 #ifdef TARGET_NR_getgid32
11089 case TARGET_NR_getgid32
:
11090 ret
= get_errno(getgid());
11093 #ifdef TARGET_NR_geteuid32
11094 case TARGET_NR_geteuid32
:
11095 ret
= get_errno(geteuid());
11098 #ifdef TARGET_NR_getegid32
11099 case TARGET_NR_getegid32
:
11100 ret
= get_errno(getegid());
11103 #ifdef TARGET_NR_setreuid32
11104 case TARGET_NR_setreuid32
:
11105 ret
= get_errno(setreuid(arg1
, arg2
));
11108 #ifdef TARGET_NR_setregid32
11109 case TARGET_NR_setregid32
:
11110 ret
= get_errno(setregid(arg1
, arg2
));
11113 #ifdef TARGET_NR_getgroups32
11114 case TARGET_NR_getgroups32
:
11116 int gidsetsize
= arg1
;
11117 uint32_t *target_grouplist
;
11121 grouplist
= alloca(gidsetsize
* sizeof(gid_t
));
11122 ret
= get_errno(getgroups(gidsetsize
, grouplist
));
11123 if (gidsetsize
== 0)
11125 if (!is_error(ret
)) {
11126 target_grouplist
= lock_user(VERIFY_WRITE
, arg2
, gidsetsize
* 4, 0);
11127 if (!target_grouplist
) {
11128 ret
= -TARGET_EFAULT
;
11131 for(i
= 0;i
< ret
; i
++)
11132 target_grouplist
[i
] = tswap32(grouplist
[i
]);
11133 unlock_user(target_grouplist
, arg2
, gidsetsize
* 4);
11138 #ifdef TARGET_NR_setgroups32
11139 case TARGET_NR_setgroups32
:
11141 int gidsetsize
= arg1
;
11142 uint32_t *target_grouplist
;
11146 grouplist
= alloca(gidsetsize
* sizeof(gid_t
));
11147 target_grouplist
= lock_user(VERIFY_READ
, arg2
, gidsetsize
* 4, 1);
11148 if (!target_grouplist
) {
11149 ret
= -TARGET_EFAULT
;
11152 for(i
= 0;i
< gidsetsize
; i
++)
11153 grouplist
[i
] = tswap32(target_grouplist
[i
]);
11154 unlock_user(target_grouplist
, arg2
, 0);
11155 ret
= get_errno(setgroups(gidsetsize
, grouplist
));
11159 #ifdef TARGET_NR_fchown32
11160 case TARGET_NR_fchown32
:
11161 ret
= get_errno(fchown(arg1
, arg2
, arg3
));
11164 #ifdef TARGET_NR_setresuid32
11165 case TARGET_NR_setresuid32
:
11166 ret
= get_errno(sys_setresuid(arg1
, arg2
, arg3
));
11169 #ifdef TARGET_NR_getresuid32
11170 case TARGET_NR_getresuid32
:
11172 uid_t ruid
, euid
, suid
;
11173 ret
= get_errno(getresuid(&ruid
, &euid
, &suid
));
11174 if (!is_error(ret
)) {
11175 if (put_user_u32(ruid
, arg1
)
11176 || put_user_u32(euid
, arg2
)
11177 || put_user_u32(suid
, arg3
))
11183 #ifdef TARGET_NR_setresgid32
11184 case TARGET_NR_setresgid32
:
11185 ret
= get_errno(sys_setresgid(arg1
, arg2
, arg3
));
11188 #ifdef TARGET_NR_getresgid32
11189 case TARGET_NR_getresgid32
:
11191 gid_t rgid
, egid
, sgid
;
11192 ret
= get_errno(getresgid(&rgid
, &egid
, &sgid
));
11193 if (!is_error(ret
)) {
11194 if (put_user_u32(rgid
, arg1
)
11195 || put_user_u32(egid
, arg2
)
11196 || put_user_u32(sgid
, arg3
))
11202 #ifdef TARGET_NR_chown32
11203 case TARGET_NR_chown32
:
11204 if (!(p
= lock_user_string(arg1
)))
11206 ret
= get_errno(chown(p
, arg2
, arg3
));
11207 unlock_user(p
, arg1
, 0);
11210 #ifdef TARGET_NR_setuid32
11211 case TARGET_NR_setuid32
:
11212 ret
= get_errno(sys_setuid(arg1
));
11215 #ifdef TARGET_NR_setgid32
11216 case TARGET_NR_setgid32
:
11217 ret
= get_errno(sys_setgid(arg1
));
11220 #ifdef TARGET_NR_setfsuid32
11221 case TARGET_NR_setfsuid32
:
11222 ret
= get_errno(setfsuid(arg1
));
11225 #ifdef TARGET_NR_setfsgid32
11226 case TARGET_NR_setfsgid32
:
11227 ret
= get_errno(setfsgid(arg1
));
11231 case TARGET_NR_pivot_root
:
11232 goto unimplemented
;
11233 #ifdef TARGET_NR_mincore
11234 case TARGET_NR_mincore
:
11237 ret
= -TARGET_ENOMEM
;
11238 a
= lock_user(VERIFY_READ
, arg1
, arg2
, 0);
11242 ret
= -TARGET_EFAULT
;
11243 p
= lock_user_string(arg3
);
11247 ret
= get_errno(mincore(a
, arg2
, p
));
11248 unlock_user(p
, arg3
, ret
);
11250 unlock_user(a
, arg1
, 0);
11254 #ifdef TARGET_NR_arm_fadvise64_64
11255 case TARGET_NR_arm_fadvise64_64
:
11256 /* arm_fadvise64_64 looks like fadvise64_64 but
11257 * with different argument order: fd, advice, offset, len
11258 * rather than the usual fd, offset, len, advice.
11259 * Note that offset and len are both 64-bit so appear as
11260 * pairs of 32-bit registers.
11262 ret
= posix_fadvise(arg1
, target_offset64(arg3
, arg4
),
11263 target_offset64(arg5
, arg6
), arg2
);
11264 ret
= -host_to_target_errno(ret
);
11268 #if TARGET_ABI_BITS == 32
11270 #ifdef TARGET_NR_fadvise64_64
11271 case TARGET_NR_fadvise64_64
:
11272 #if defined(TARGET_PPC)
11273 /* 6 args: fd, advice, offset (high, low), len (high, low) */
11281 /* 6 args: fd, offset (high, low), len (high, low), advice */
11282 if (regpairs_aligned(cpu_env
)) {
11283 /* offset is in (3,4), len in (5,6) and advice in 7 */
11291 ret
= -host_to_target_errno(posix_fadvise(arg1
,
11292 target_offset64(arg2
, arg3
),
11293 target_offset64(arg4
, arg5
),
11298 #ifdef TARGET_NR_fadvise64
11299 case TARGET_NR_fadvise64
:
11300 /* 5 args: fd, offset (high, low), len, advice */
11301 if (regpairs_aligned(cpu_env
)) {
11302 /* offset is in (3,4), len in 5 and advice in 6 */
11308 ret
= -host_to_target_errno(posix_fadvise(arg1
,
11309 target_offset64(arg2
, arg3
),
11314 #else /* not a 32-bit ABI */
11315 #if defined(TARGET_NR_fadvise64_64) || defined(TARGET_NR_fadvise64)
11316 #ifdef TARGET_NR_fadvise64_64
11317 case TARGET_NR_fadvise64_64
:
11319 #ifdef TARGET_NR_fadvise64
11320 case TARGET_NR_fadvise64
:
11322 #ifdef TARGET_S390X
11324 case 4: arg4
= POSIX_FADV_NOREUSE
+ 1; break; /* make sure it's an invalid value */
11325 case 5: arg4
= POSIX_FADV_NOREUSE
+ 2; break; /* ditto */
11326 case 6: arg4
= POSIX_FADV_DONTNEED
; break;
11327 case 7: arg4
= POSIX_FADV_NOREUSE
; break;
11331 ret
= -host_to_target_errno(posix_fadvise(arg1
, arg2
, arg3
, arg4
));
11334 #endif /* end of 64-bit ABI fadvise handling */
11336 #ifdef TARGET_NR_madvise
11337 case TARGET_NR_madvise
:
11338 /* A straight passthrough may not be safe because qemu sometimes
11339 turns private file-backed mappings into anonymous mappings.
11340 This will break MADV_DONTNEED.
11341 This is a hint, so ignoring and returning success is ok. */
11342 ret
= get_errno(0);
11345 #if TARGET_ABI_BITS == 32
11346 case TARGET_NR_fcntl64
:
11350 from_flock64_fn
*copyfrom
= copy_from_user_flock64
;
11351 to_flock64_fn
*copyto
= copy_to_user_flock64
;
11354 if (((CPUARMState
*)cpu_env
)->eabi
) {
11355 copyfrom
= copy_from_user_eabi_flock64
;
11356 copyto
= copy_to_user_eabi_flock64
;
11360 cmd
= target_to_host_fcntl_cmd(arg2
);
11361 if (cmd
== -TARGET_EINVAL
) {
11367 case TARGET_F_GETLK64
:
11368 ret
= copyfrom(&fl
, arg3
);
11372 ret
= get_errno(fcntl(arg1
, cmd
, &fl
));
11374 ret
= copyto(arg3
, &fl
);
11378 case TARGET_F_SETLK64
:
11379 case TARGET_F_SETLKW64
:
11380 ret
= copyfrom(&fl
, arg3
);
11384 ret
= get_errno(safe_fcntl(arg1
, cmd
, &fl
));
11387 ret
= do_fcntl(arg1
, arg2
, arg3
);
11393 #ifdef TARGET_NR_cacheflush
11394 case TARGET_NR_cacheflush
:
11395 /* self-modifying code is handled automatically, so nothing needed */
11399 #ifdef TARGET_NR_security
11400 case TARGET_NR_security
:
11401 goto unimplemented
;
11403 #ifdef TARGET_NR_getpagesize
11404 case TARGET_NR_getpagesize
:
11405 ret
= TARGET_PAGE_SIZE
;
11408 case TARGET_NR_gettid
:
11409 ret
= get_errno(gettid());
11411 #ifdef TARGET_NR_readahead
11412 case TARGET_NR_readahead
:
11413 #if TARGET_ABI_BITS == 32
11414 if (regpairs_aligned(cpu_env
)) {
11419 ret
= get_errno(readahead(arg1
, target_offset64(arg2
, arg3
) , arg4
));
11421 ret
= get_errno(readahead(arg1
, arg2
, arg3
));
11426 #ifdef TARGET_NR_setxattr
11427 case TARGET_NR_listxattr
:
11428 case TARGET_NR_llistxattr
:
11432 b
= lock_user(VERIFY_WRITE
, arg2
, arg3
, 0);
11434 ret
= -TARGET_EFAULT
;
11438 p
= lock_user_string(arg1
);
11440 if (num
== TARGET_NR_listxattr
) {
11441 ret
= get_errno(listxattr(p
, b
, arg3
));
11443 ret
= get_errno(llistxattr(p
, b
, arg3
));
11446 ret
= -TARGET_EFAULT
;
11448 unlock_user(p
, arg1
, 0);
11449 unlock_user(b
, arg2
, arg3
);
11452 case TARGET_NR_flistxattr
:
11456 b
= lock_user(VERIFY_WRITE
, arg2
, arg3
, 0);
11458 ret
= -TARGET_EFAULT
;
11462 ret
= get_errno(flistxattr(arg1
, b
, arg3
));
11463 unlock_user(b
, arg2
, arg3
);
11466 case TARGET_NR_setxattr
:
11467 case TARGET_NR_lsetxattr
:
11469 void *p
, *n
, *v
= 0;
11471 v
= lock_user(VERIFY_READ
, arg3
, arg4
, 1);
11473 ret
= -TARGET_EFAULT
;
11477 p
= lock_user_string(arg1
);
11478 n
= lock_user_string(arg2
);
11480 if (num
== TARGET_NR_setxattr
) {
11481 ret
= get_errno(setxattr(p
, n
, v
, arg4
, arg5
));
11483 ret
= get_errno(lsetxattr(p
, n
, v
, arg4
, arg5
));
11486 ret
= -TARGET_EFAULT
;
11488 unlock_user(p
, arg1
, 0);
11489 unlock_user(n
, arg2
, 0);
11490 unlock_user(v
, arg3
, 0);
11493 case TARGET_NR_fsetxattr
:
11497 v
= lock_user(VERIFY_READ
, arg3
, arg4
, 1);
11499 ret
= -TARGET_EFAULT
;
11503 n
= lock_user_string(arg2
);
11505 ret
= get_errno(fsetxattr(arg1
, n
, v
, arg4
, arg5
));
11507 ret
= -TARGET_EFAULT
;
11509 unlock_user(n
, arg2
, 0);
11510 unlock_user(v
, arg3
, 0);
11513 case TARGET_NR_getxattr
:
11514 case TARGET_NR_lgetxattr
:
11516 void *p
, *n
, *v
= 0;
11518 v
= lock_user(VERIFY_WRITE
, arg3
, arg4
, 0);
11520 ret
= -TARGET_EFAULT
;
11524 p
= lock_user_string(arg1
);
11525 n
= lock_user_string(arg2
);
11527 if (num
== TARGET_NR_getxattr
) {
11528 ret
= get_errno(getxattr(p
, n
, v
, arg4
));
11530 ret
= get_errno(lgetxattr(p
, n
, v
, arg4
));
11533 ret
= -TARGET_EFAULT
;
11535 unlock_user(p
, arg1
, 0);
11536 unlock_user(n
, arg2
, 0);
11537 unlock_user(v
, arg3
, arg4
);
11540 case TARGET_NR_fgetxattr
:
11544 v
= lock_user(VERIFY_WRITE
, arg3
, arg4
, 0);
11546 ret
= -TARGET_EFAULT
;
11550 n
= lock_user_string(arg2
);
11552 ret
= get_errno(fgetxattr(arg1
, n
, v
, arg4
));
11554 ret
= -TARGET_EFAULT
;
11556 unlock_user(n
, arg2
, 0);
11557 unlock_user(v
, arg3
, arg4
);
11560 case TARGET_NR_removexattr
:
11561 case TARGET_NR_lremovexattr
:
11564 p
= lock_user_string(arg1
);
11565 n
= lock_user_string(arg2
);
11567 if (num
== TARGET_NR_removexattr
) {
11568 ret
= get_errno(removexattr(p
, n
));
11570 ret
= get_errno(lremovexattr(p
, n
));
11573 ret
= -TARGET_EFAULT
;
11575 unlock_user(p
, arg1
, 0);
11576 unlock_user(n
, arg2
, 0);
11579 case TARGET_NR_fremovexattr
:
11582 n
= lock_user_string(arg2
);
11584 ret
= get_errno(fremovexattr(arg1
, n
));
11586 ret
= -TARGET_EFAULT
;
11588 unlock_user(n
, arg2
, 0);
11592 #endif /* CONFIG_ATTR */
11593 #ifdef TARGET_NR_set_thread_area
11594 case TARGET_NR_set_thread_area
:
11595 #if defined(TARGET_MIPS)
11596 ((CPUMIPSState
*) cpu_env
)->active_tc
.CP0_UserLocal
= arg1
;
11599 #elif defined(TARGET_CRIS)
11601 ret
= -TARGET_EINVAL
;
11603 ((CPUCRISState
*) cpu_env
)->pregs
[PR_PID
] = arg1
;
11607 #elif defined(TARGET_I386) && defined(TARGET_ABI32)
11608 ret
= do_set_thread_area(cpu_env
, arg1
);
11610 #elif defined(TARGET_M68K)
11612 TaskState
*ts
= cpu
->opaque
;
11613 ts
->tp_value
= arg1
;
11618 goto unimplemented_nowarn
;
11621 #ifdef TARGET_NR_get_thread_area
11622 case TARGET_NR_get_thread_area
:
11623 #if defined(TARGET_I386) && defined(TARGET_ABI32)
11624 ret
= do_get_thread_area(cpu_env
, arg1
);
11626 #elif defined(TARGET_M68K)
11628 TaskState
*ts
= cpu
->opaque
;
11629 ret
= ts
->tp_value
;
11633 goto unimplemented_nowarn
;
11636 #ifdef TARGET_NR_getdomainname
11637 case TARGET_NR_getdomainname
:
11638 goto unimplemented_nowarn
;
11641 #ifdef TARGET_NR_clock_gettime
11642 case TARGET_NR_clock_gettime
:
11644 struct timespec ts
;
11645 ret
= get_errno(clock_gettime(arg1
, &ts
));
11646 if (!is_error(ret
)) {
11647 host_to_target_timespec(arg2
, &ts
);
11652 #ifdef TARGET_NR_clock_getres
11653 case TARGET_NR_clock_getres
:
11655 struct timespec ts
;
11656 ret
= get_errno(clock_getres(arg1
, &ts
));
11657 if (!is_error(ret
)) {
11658 host_to_target_timespec(arg2
, &ts
);
11663 #ifdef TARGET_NR_clock_nanosleep
11664 case TARGET_NR_clock_nanosleep
:
11666 struct timespec ts
;
11667 target_to_host_timespec(&ts
, arg3
);
11668 ret
= get_errno(safe_clock_nanosleep(arg1
, arg2
,
11669 &ts
, arg4
? &ts
: NULL
));
11671 host_to_target_timespec(arg4
, &ts
);
11673 #if defined(TARGET_PPC)
11674 /* clock_nanosleep is odd in that it returns positive errno values.
11675 * On PPC, CR0 bit 3 should be set in such a situation. */
11676 if (ret
&& ret
!= -TARGET_ERESTARTSYS
) {
11677 ((CPUPPCState
*)cpu_env
)->crf
[0] |= 1;
11684 #if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address)
11685 case TARGET_NR_set_tid_address
:
11686 ret
= get_errno(set_tid_address((int *)g2h(arg1
)));
11690 case TARGET_NR_tkill
:
11691 ret
= get_errno(safe_tkill((int)arg1
, target_to_host_signal(arg2
)));
11694 case TARGET_NR_tgkill
:
11695 ret
= get_errno(safe_tgkill((int)arg1
, (int)arg2
,
11696 target_to_host_signal(arg3
)));
11699 #ifdef TARGET_NR_set_robust_list
11700 case TARGET_NR_set_robust_list
:
11701 case TARGET_NR_get_robust_list
:
11702 /* The ABI for supporting robust futexes has userspace pass
11703 * the kernel a pointer to a linked list which is updated by
11704 * userspace after the syscall; the list is walked by the kernel
11705 * when the thread exits. Since the linked list in QEMU guest
11706 * memory isn't a valid linked list for the host and we have
11707 * no way to reliably intercept the thread-death event, we can't
11708 * support these. Silently return ENOSYS so that guest userspace
11709 * falls back to a non-robust futex implementation (which should
11710 * be OK except in the corner case of the guest crashing while
11711 * holding a mutex that is shared with another process via
11714 goto unimplemented_nowarn
;
11717 #if defined(TARGET_NR_utimensat)
11718 case TARGET_NR_utimensat
:
11720 struct timespec
*tsp
, ts
[2];
11724 target_to_host_timespec(ts
, arg3
);
11725 target_to_host_timespec(ts
+1, arg3
+sizeof(struct target_timespec
));
11729 ret
= get_errno(sys_utimensat(arg1
, NULL
, tsp
, arg4
));
11731 if (!(p
= lock_user_string(arg2
))) {
11732 ret
= -TARGET_EFAULT
;
11735 ret
= get_errno(sys_utimensat(arg1
, path(p
), tsp
, arg4
));
11736 unlock_user(p
, arg2
, 0);
11741 case TARGET_NR_futex
:
11742 ret
= do_futex(arg1
, arg2
, arg3
, arg4
, arg5
, arg6
);
11744 #if defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init)
11745 case TARGET_NR_inotify_init
:
11746 ret
= get_errno(sys_inotify_init());
11748 fd_trans_register(ret
, &target_inotify_trans
);
11752 #ifdef CONFIG_INOTIFY1
11753 #if defined(TARGET_NR_inotify_init1) && defined(__NR_inotify_init1)
11754 case TARGET_NR_inotify_init1
:
11755 ret
= get_errno(sys_inotify_init1(target_to_host_bitmask(arg1
,
11756 fcntl_flags_tbl
)));
11758 fd_trans_register(ret
, &target_inotify_trans
);
11763 #if defined(TARGET_NR_inotify_add_watch) && defined(__NR_inotify_add_watch)
11764 case TARGET_NR_inotify_add_watch
:
11765 p
= lock_user_string(arg2
);
11766 ret
= get_errno(sys_inotify_add_watch(arg1
, path(p
), arg3
));
11767 unlock_user(p
, arg2
, 0);
11770 #if defined(TARGET_NR_inotify_rm_watch) && defined(__NR_inotify_rm_watch)
11771 case TARGET_NR_inotify_rm_watch
:
11772 ret
= get_errno(sys_inotify_rm_watch(arg1
, arg2
));
11776 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
11777 case TARGET_NR_mq_open
:
11779 struct mq_attr posix_mq_attr
;
11780 struct mq_attr
*pposix_mq_attr
;
11783 host_flags
= target_to_host_bitmask(arg2
, fcntl_flags_tbl
);
11784 pposix_mq_attr
= NULL
;
11786 if (copy_from_user_mq_attr(&posix_mq_attr
, arg4
) != 0) {
11789 pposix_mq_attr
= &posix_mq_attr
;
11791 p
= lock_user_string(arg1
- 1);
11795 ret
= get_errno(mq_open(p
, host_flags
, arg3
, pposix_mq_attr
));
11796 unlock_user (p
, arg1
, 0);
11800 case TARGET_NR_mq_unlink
:
11801 p
= lock_user_string(arg1
- 1);
11803 ret
= -TARGET_EFAULT
;
11806 ret
= get_errno(mq_unlink(p
));
11807 unlock_user (p
, arg1
, 0);
11810 case TARGET_NR_mq_timedsend
:
11812 struct timespec ts
;
11814 p
= lock_user (VERIFY_READ
, arg2
, arg3
, 1);
11816 target_to_host_timespec(&ts
, arg5
);
11817 ret
= get_errno(safe_mq_timedsend(arg1
, p
, arg3
, arg4
, &ts
));
11818 host_to_target_timespec(arg5
, &ts
);
11820 ret
= get_errno(safe_mq_timedsend(arg1
, p
, arg3
, arg4
, NULL
));
11822 unlock_user (p
, arg2
, arg3
);
11826 case TARGET_NR_mq_timedreceive
:
11828 struct timespec ts
;
11831 p
= lock_user (VERIFY_READ
, arg2
, arg3
, 1);
11833 target_to_host_timespec(&ts
, arg5
);
11834 ret
= get_errno(safe_mq_timedreceive(arg1
, p
, arg3
,
11836 host_to_target_timespec(arg5
, &ts
);
11838 ret
= get_errno(safe_mq_timedreceive(arg1
, p
, arg3
,
11841 unlock_user (p
, arg2
, arg3
);
11843 put_user_u32(prio
, arg4
);
11847 /* Not implemented for now... */
11848 /* case TARGET_NR_mq_notify: */
11851 case TARGET_NR_mq_getsetattr
:
11853 struct mq_attr posix_mq_attr_in
, posix_mq_attr_out
;
11856 ret
= mq_getattr(arg1
, &posix_mq_attr_out
);
11857 copy_to_user_mq_attr(arg3
, &posix_mq_attr_out
);
11860 copy_from_user_mq_attr(&posix_mq_attr_in
, arg2
);
11861 ret
|= mq_setattr(arg1
, &posix_mq_attr_in
, &posix_mq_attr_out
);
11868 #ifdef CONFIG_SPLICE
11869 #ifdef TARGET_NR_tee
11870 case TARGET_NR_tee
:
11872 ret
= get_errno(tee(arg1
,arg2
,arg3
,arg4
));
11876 #ifdef TARGET_NR_splice
11877 case TARGET_NR_splice
:
11879 loff_t loff_in
, loff_out
;
11880 loff_t
*ploff_in
= NULL
, *ploff_out
= NULL
;
11882 if (get_user_u64(loff_in
, arg2
)) {
11885 ploff_in
= &loff_in
;
11888 if (get_user_u64(loff_out
, arg4
)) {
11891 ploff_out
= &loff_out
;
11893 ret
= get_errno(splice(arg1
, ploff_in
, arg3
, ploff_out
, arg5
, arg6
));
11895 if (put_user_u64(loff_in
, arg2
)) {
11900 if (put_user_u64(loff_out
, arg4
)) {
11907 #ifdef TARGET_NR_vmsplice
11908 case TARGET_NR_vmsplice
:
11910 struct iovec
*vec
= lock_iovec(VERIFY_READ
, arg2
, arg3
, 1);
11912 ret
= get_errno(vmsplice(arg1
, vec
, arg3
, arg4
));
11913 unlock_iovec(vec
, arg2
, arg3
, 0);
11915 ret
= -host_to_target_errno(errno
);
11920 #endif /* CONFIG_SPLICE */
11921 #ifdef CONFIG_EVENTFD
11922 #if defined(TARGET_NR_eventfd)
11923 case TARGET_NR_eventfd
:
11924 ret
= get_errno(eventfd(arg1
, 0));
11926 fd_trans_register(ret
, &target_eventfd_trans
);
11930 #if defined(TARGET_NR_eventfd2)
11931 case TARGET_NR_eventfd2
:
11933 int host_flags
= arg2
& (~(TARGET_O_NONBLOCK
| TARGET_O_CLOEXEC
));
11934 if (arg2
& TARGET_O_NONBLOCK
) {
11935 host_flags
|= O_NONBLOCK
;
11937 if (arg2
& TARGET_O_CLOEXEC
) {
11938 host_flags
|= O_CLOEXEC
;
11940 ret
= get_errno(eventfd(arg1
, host_flags
));
11942 fd_trans_register(ret
, &target_eventfd_trans
);
11947 #endif /* CONFIG_EVENTFD */
11948 #if defined(CONFIG_FALLOCATE) && defined(TARGET_NR_fallocate)
11949 case TARGET_NR_fallocate
:
11950 #if TARGET_ABI_BITS == 32
11951 ret
= get_errno(fallocate(arg1
, arg2
, target_offset64(arg3
, arg4
),
11952 target_offset64(arg5
, arg6
)));
11954 ret
= get_errno(fallocate(arg1
, arg2
, arg3
, arg4
));
11958 #if defined(CONFIG_SYNC_FILE_RANGE)
11959 #if defined(TARGET_NR_sync_file_range)
11960 case TARGET_NR_sync_file_range
:
11961 #if TARGET_ABI_BITS == 32
11962 #if defined(TARGET_MIPS)
11963 ret
= get_errno(sync_file_range(arg1
, target_offset64(arg3
, arg4
),
11964 target_offset64(arg5
, arg6
), arg7
));
11966 ret
= get_errno(sync_file_range(arg1
, target_offset64(arg2
, arg3
),
11967 target_offset64(arg4
, arg5
), arg6
));
11968 #endif /* !TARGET_MIPS */
11970 ret
= get_errno(sync_file_range(arg1
, arg2
, arg3
, arg4
));
11974 #if defined(TARGET_NR_sync_file_range2)
11975 case TARGET_NR_sync_file_range2
:
11976 /* This is like sync_file_range but the arguments are reordered */
11977 #if TARGET_ABI_BITS == 32
11978 ret
= get_errno(sync_file_range(arg1
, target_offset64(arg3
, arg4
),
11979 target_offset64(arg5
, arg6
), arg2
));
11981 ret
= get_errno(sync_file_range(arg1
, arg3
, arg4
, arg2
));
11986 #if defined(TARGET_NR_signalfd4)
11987 case TARGET_NR_signalfd4
:
11988 ret
= do_signalfd4(arg1
, arg2
, arg4
);
11991 #if defined(TARGET_NR_signalfd)
11992 case TARGET_NR_signalfd
:
11993 ret
= do_signalfd4(arg1
, arg2
, 0);
11996 #if defined(CONFIG_EPOLL)
11997 #if defined(TARGET_NR_epoll_create)
11998 case TARGET_NR_epoll_create
:
11999 ret
= get_errno(epoll_create(arg1
));
12002 #if defined(TARGET_NR_epoll_create1) && defined(CONFIG_EPOLL_CREATE1)
12003 case TARGET_NR_epoll_create1
:
12004 ret
= get_errno(epoll_create1(arg1
));
12007 #if defined(TARGET_NR_epoll_ctl)
12008 case TARGET_NR_epoll_ctl
:
12010 struct epoll_event ep
;
12011 struct epoll_event
*epp
= 0;
12013 struct target_epoll_event
*target_ep
;
12014 if (!lock_user_struct(VERIFY_READ
, target_ep
, arg4
, 1)) {
12017 ep
.events
= tswap32(target_ep
->events
);
12018 /* The epoll_data_t union is just opaque data to the kernel,
12019 * so we transfer all 64 bits across and need not worry what
12020 * actual data type it is.
12022 ep
.data
.u64
= tswap64(target_ep
->data
.u64
);
12023 unlock_user_struct(target_ep
, arg4
, 0);
12026 ret
= get_errno(epoll_ctl(arg1
, arg2
, arg3
, epp
));
12031 #if defined(TARGET_NR_epoll_wait) || defined(TARGET_NR_epoll_pwait)
12032 #if defined(TARGET_NR_epoll_wait)
12033 case TARGET_NR_epoll_wait
:
12035 #if defined(TARGET_NR_epoll_pwait)
12036 case TARGET_NR_epoll_pwait
:
12039 struct target_epoll_event
*target_ep
;
12040 struct epoll_event
*ep
;
12042 int maxevents
= arg3
;
12043 int timeout
= arg4
;
12045 if (maxevents
<= 0 || maxevents
> TARGET_EP_MAX_EVENTS
) {
12046 ret
= -TARGET_EINVAL
;
12050 target_ep
= lock_user(VERIFY_WRITE
, arg2
,
12051 maxevents
* sizeof(struct target_epoll_event
), 1);
12056 ep
= g_try_new(struct epoll_event
, maxevents
);
12058 unlock_user(target_ep
, arg2
, 0);
12059 ret
= -TARGET_ENOMEM
;
12064 #if defined(TARGET_NR_epoll_pwait)
12065 case TARGET_NR_epoll_pwait
:
12067 target_sigset_t
*target_set
;
12068 sigset_t _set
, *set
= &_set
;
12071 if (arg6
!= sizeof(target_sigset_t
)) {
12072 ret
= -TARGET_EINVAL
;
12076 target_set
= lock_user(VERIFY_READ
, arg5
,
12077 sizeof(target_sigset_t
), 1);
12079 ret
= -TARGET_EFAULT
;
12082 target_to_host_sigset(set
, target_set
);
12083 unlock_user(target_set
, arg5
, 0);
12088 ret
= get_errno(safe_epoll_pwait(epfd
, ep
, maxevents
, timeout
,
12089 set
, SIGSET_T_SIZE
));
12093 #if defined(TARGET_NR_epoll_wait)
12094 case TARGET_NR_epoll_wait
:
12095 ret
= get_errno(safe_epoll_pwait(epfd
, ep
, maxevents
, timeout
,
12100 ret
= -TARGET_ENOSYS
;
12102 if (!is_error(ret
)) {
12104 for (i
= 0; i
< ret
; i
++) {
12105 target_ep
[i
].events
= tswap32(ep
[i
].events
);
12106 target_ep
[i
].data
.u64
= tswap64(ep
[i
].data
.u64
);
12108 unlock_user(target_ep
, arg2
,
12109 ret
* sizeof(struct target_epoll_event
));
12111 unlock_user(target_ep
, arg2
, 0);
12118 #ifdef TARGET_NR_prlimit64
12119 case TARGET_NR_prlimit64
:
12121 /* args: pid, resource number, ptr to new rlimit, ptr to old rlimit */
12122 struct target_rlimit64
*target_rnew
, *target_rold
;
12123 struct host_rlimit64 rnew
, rold
, *rnewp
= 0;
12124 int resource
= target_to_host_resource(arg2
);
12126 if (!lock_user_struct(VERIFY_READ
, target_rnew
, arg3
, 1)) {
12129 rnew
.rlim_cur
= tswap64(target_rnew
->rlim_cur
);
12130 rnew
.rlim_max
= tswap64(target_rnew
->rlim_max
);
12131 unlock_user_struct(target_rnew
, arg3
, 0);
12135 ret
= get_errno(sys_prlimit64(arg1
, resource
, rnewp
, arg4
? &rold
: 0));
12136 if (!is_error(ret
) && arg4
) {
12137 if (!lock_user_struct(VERIFY_WRITE
, target_rold
, arg4
, 1)) {
12140 target_rold
->rlim_cur
= tswap64(rold
.rlim_cur
);
12141 target_rold
->rlim_max
= tswap64(rold
.rlim_max
);
12142 unlock_user_struct(target_rold
, arg4
, 1);
12147 #ifdef TARGET_NR_gethostname
12148 case TARGET_NR_gethostname
:
12150 char *name
= lock_user(VERIFY_WRITE
, arg1
, arg2
, 0);
12152 ret
= get_errno(gethostname(name
, arg2
));
12153 unlock_user(name
, arg1
, arg2
);
12155 ret
= -TARGET_EFAULT
;
12160 #ifdef TARGET_NR_atomic_cmpxchg_32
12161 case TARGET_NR_atomic_cmpxchg_32
:
12163 /* should use start_exclusive from main.c */
12164 abi_ulong mem_value
;
12165 if (get_user_u32(mem_value
, arg6
)) {
12166 target_siginfo_t info
;
12167 info
.si_signo
= SIGSEGV
;
12169 info
.si_code
= TARGET_SEGV_MAPERR
;
12170 info
._sifields
._sigfault
._addr
= arg6
;
12171 queue_signal((CPUArchState
*)cpu_env
, info
.si_signo
,
12172 QEMU_SI_FAULT
, &info
);
12176 if (mem_value
== arg2
)
12177 put_user_u32(arg1
, arg6
);
12182 #ifdef TARGET_NR_atomic_barrier
12183 case TARGET_NR_atomic_barrier
:
12185 /* Like the kernel implementation and the qemu arm barrier, no-op this? */
12191 #ifdef TARGET_NR_timer_create
12192 case TARGET_NR_timer_create
:
12194 /* args: clockid_t clockid, struct sigevent *sevp, timer_t *timerid */
12196 struct sigevent host_sevp
= { {0}, }, *phost_sevp
= NULL
;
12199 int timer_index
= next_free_host_timer();
12201 if (timer_index
< 0) {
12202 ret
= -TARGET_EAGAIN
;
12204 timer_t
*phtimer
= g_posix_timers
+ timer_index
;
12207 phost_sevp
= &host_sevp
;
12208 ret
= target_to_host_sigevent(phost_sevp
, arg2
);
12214 ret
= get_errno(timer_create(clkid
, phost_sevp
, phtimer
));
12218 if (put_user(TIMER_MAGIC
| timer_index
, arg3
, target_timer_t
)) {
12227 #ifdef TARGET_NR_timer_settime
12228 case TARGET_NR_timer_settime
:
12230 /* args: timer_t timerid, int flags, const struct itimerspec *new_value,
12231 * struct itimerspec * old_value */
12232 target_timer_t timerid
= get_timer_id(arg1
);
12236 } else if (arg3
== 0) {
12237 ret
= -TARGET_EINVAL
;
12239 timer_t htimer
= g_posix_timers
[timerid
];
12240 struct itimerspec hspec_new
= {{0},}, hspec_old
= {{0},};
12242 if (target_to_host_itimerspec(&hspec_new
, arg3
)) {
12246 timer_settime(htimer
, arg2
, &hspec_new
, &hspec_old
));
12247 if (arg4
&& host_to_target_itimerspec(arg4
, &hspec_old
)) {
12255 #ifdef TARGET_NR_timer_gettime
12256 case TARGET_NR_timer_gettime
:
12258 /* args: timer_t timerid, struct itimerspec *curr_value */
12259 target_timer_t timerid
= get_timer_id(arg1
);
12263 } else if (!arg2
) {
12264 ret
= -TARGET_EFAULT
;
12266 timer_t htimer
= g_posix_timers
[timerid
];
12267 struct itimerspec hspec
;
12268 ret
= get_errno(timer_gettime(htimer
, &hspec
));
12270 if (host_to_target_itimerspec(arg2
, &hspec
)) {
12271 ret
= -TARGET_EFAULT
;
12278 #ifdef TARGET_NR_timer_getoverrun
12279 case TARGET_NR_timer_getoverrun
:
12281 /* args: timer_t timerid */
12282 target_timer_t timerid
= get_timer_id(arg1
);
12287 timer_t htimer
= g_posix_timers
[timerid
];
12288 ret
= get_errno(timer_getoverrun(htimer
));
12290 fd_trans_unregister(ret
);
12295 #ifdef TARGET_NR_timer_delete
12296 case TARGET_NR_timer_delete
:
12298 /* args: timer_t timerid */
12299 target_timer_t timerid
= get_timer_id(arg1
);
12304 timer_t htimer
= g_posix_timers
[timerid
];
12305 ret
= get_errno(timer_delete(htimer
));
12306 g_posix_timers
[timerid
] = 0;
12312 #if defined(TARGET_NR_timerfd_create) && defined(CONFIG_TIMERFD)
12313 case TARGET_NR_timerfd_create
:
12314 ret
= get_errno(timerfd_create(arg1
,
12315 target_to_host_bitmask(arg2
, fcntl_flags_tbl
)));
12319 #if defined(TARGET_NR_timerfd_gettime) && defined(CONFIG_TIMERFD)
12320 case TARGET_NR_timerfd_gettime
:
12322 struct itimerspec its_curr
;
12324 ret
= get_errno(timerfd_gettime(arg1
, &its_curr
));
12326 if (arg2
&& host_to_target_itimerspec(arg2
, &its_curr
)) {
12333 #if defined(TARGET_NR_timerfd_settime) && defined(CONFIG_TIMERFD)
12334 case TARGET_NR_timerfd_settime
:
12336 struct itimerspec its_new
, its_old
, *p_new
;
12339 if (target_to_host_itimerspec(&its_new
, arg3
)) {
12347 ret
= get_errno(timerfd_settime(arg1
, arg2
, p_new
, &its_old
));
12349 if (arg4
&& host_to_target_itimerspec(arg4
, &its_old
)) {
12356 #if defined(TARGET_NR_ioprio_get) && defined(__NR_ioprio_get)
12357 case TARGET_NR_ioprio_get
:
12358 ret
= get_errno(ioprio_get(arg1
, arg2
));
12362 #if defined(TARGET_NR_ioprio_set) && defined(__NR_ioprio_set)
12363 case TARGET_NR_ioprio_set
:
12364 ret
= get_errno(ioprio_set(arg1
, arg2
, arg3
));
12368 #if defined(TARGET_NR_setns) && defined(CONFIG_SETNS)
12369 case TARGET_NR_setns
:
12370 ret
= get_errno(setns(arg1
, arg2
));
12373 #if defined(TARGET_NR_unshare) && defined(CONFIG_SETNS)
12374 case TARGET_NR_unshare
:
12375 ret
= get_errno(unshare(arg1
));
12378 #if defined(TARGET_NR_kcmp) && defined(__NR_kcmp)
12379 case TARGET_NR_kcmp
:
12380 ret
= get_errno(kcmp(arg1
, arg2
, arg3
, arg4
, arg5
));
12386 gemu_log("qemu: Unsupported syscall: %d\n", num
);
12387 #if defined(TARGET_NR_setxattr) || defined(TARGET_NR_get_thread_area) || defined(TARGET_NR_getdomainname) || defined(TARGET_NR_set_robust_list)
12388 unimplemented_nowarn
:
12390 ret
= -TARGET_ENOSYS
;
12395 gemu_log(" = " TARGET_ABI_FMT_ld
"\n", ret
);
12398 print_syscall_ret(num
, ret
);
12399 trace_guest_user_syscall_ret(cpu
, num
, ret
);
12402 ret
= -TARGET_EFAULT
;