migration/rdma: Dumb down remaining int error values to -1
[qemu/kevin.git] / bsd-user / bsd-mem.h
blobc3e72e3b866e1a5b97a73e726ea526ae20652ecd
1 /*
2 * memory management system call shims and definitions
4 * Copyright (c) 2013-15 Stacey D. Son
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, see <http://www.gnu.org/licenses/>.
21 * Copyright (c) 1982, 1986, 1993
22 * The Regents of the University of California. All rights reserved.
24 * Redistribution and use in source and binary forms, with or without
25 * modification, are permitted provided that the following conditions
26 * are met:
27 * 1. Redistributions of source code must retain the above copyright
28 * notice, this list of conditions and the following disclaimer.
29 * 2. Redistributions in binary form must reproduce the above copyright
30 * notice, this list of conditions and the following disclaimer in the
31 * documentation and/or other materials provided with the distribution.
32 * 4. Neither the name of the University nor the names of its contributors
33 * may be used to endorse or promote products derived from this software
34 * without specific prior written permission.
36 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
37 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
38 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
39 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
40 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
41 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
42 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
43 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
44 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
45 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
46 * SUCH DAMAGE.
49 #ifndef BSD_USER_BSD_MEM_H
50 #define BSD_USER_BSD_MEM_H
52 #include <sys/types.h>
53 #include <sys/ipc.h>
54 #include <sys/mman.h>
55 #include <sys/shm.h>
56 #include <fcntl.h>
58 #include "qemu-bsd.h"
60 extern struct bsd_shm_regions bsd_shm_regions[];
61 extern abi_ulong target_brk;
62 extern abi_ulong initial_target_brk;
64 /* mmap(2) */
65 static inline abi_long do_bsd_mmap(void *cpu_env, abi_long arg1, abi_long arg2,
66 abi_long arg3, abi_long arg4, abi_long arg5, abi_long arg6, abi_long arg7,
67 abi_long arg8)
69 if (regpairs_aligned(cpu_env) != 0) {
70 arg6 = arg7;
71 arg7 = arg8;
73 return get_errno(target_mmap(arg1, arg2, arg3,
74 target_to_host_bitmask(arg4, mmap_flags_tbl),
75 arg5, target_arg64(arg6, arg7)));
78 /* munmap(2) */
79 static inline abi_long do_bsd_munmap(abi_long arg1, abi_long arg2)
81 return get_errno(target_munmap(arg1, arg2));
84 /* mprotect(2) */
85 static inline abi_long do_bsd_mprotect(abi_long arg1, abi_long arg2,
86 abi_long arg3)
88 return get_errno(target_mprotect(arg1, arg2, arg3));
91 /* msync(2) */
92 static inline abi_long do_bsd_msync(abi_long addr, abi_long len, abi_long flags)
94 if (!guest_range_valid_untagged(addr, len)) {
95 /* It seems odd, but POSIX wants this to be ENOMEM */
96 return -TARGET_ENOMEM;
99 return get_errno(msync(g2h_untagged(addr), len, flags));
102 /* mlock(2) */
103 static inline abi_long do_bsd_mlock(abi_long arg1, abi_long arg2)
105 if (!guest_range_valid_untagged(arg1, arg2)) {
106 return -TARGET_EINVAL;
108 return get_errno(mlock(g2h_untagged(arg1), arg2));
111 /* munlock(2) */
112 static inline abi_long do_bsd_munlock(abi_long arg1, abi_long arg2)
114 if (!guest_range_valid_untagged(arg1, arg2)) {
115 return -TARGET_EINVAL;
117 return get_errno(munlock(g2h_untagged(arg1), arg2));
120 /* mlockall(2) */
121 static inline abi_long do_bsd_mlockall(abi_long arg1)
123 return get_errno(mlockall(arg1));
126 /* munlockall(2) */
127 static inline abi_long do_bsd_munlockall(void)
129 return get_errno(munlockall());
132 /* madvise(2) */
133 static inline abi_long do_bsd_madvise(abi_long arg1, abi_long arg2,
134 abi_long arg3)
136 abi_ulong len;
137 int ret = 0;
138 abi_long start = arg1;
139 abi_long len_in = arg2;
140 abi_long advice = arg3;
142 if (start & ~TARGET_PAGE_MASK) {
143 return -TARGET_EINVAL;
145 if (len_in == 0) {
146 return 0;
148 len = TARGET_PAGE_ALIGN(len_in);
149 if (len == 0 || !guest_range_valid_untagged(start, len)) {
150 return -TARGET_EINVAL;
154 * Most advice values are hints, so ignoring and returning success is ok.
156 * However, some advice values such as MADV_DONTNEED, are not hints and
157 * need to be emulated.
159 * A straight passthrough for those may not be safe because qemu sometimes
160 * turns private file-backed mappings into anonymous mappings.
161 * If all guest pages have PAGE_PASSTHROUGH set, mappings have the
162 * same semantics for the host as for the guest.
164 * MADV_DONTNEED is passed through, if possible.
165 * If passthrough isn't possible, we nevertheless (wrongly!) return
166 * success, which is broken but some userspace programs fail to work
167 * otherwise. Completely implementing such emulation is quite complicated
168 * though.
170 mmap_lock();
171 switch (advice) {
172 case MADV_DONTNEED:
173 if (page_check_range(start, len, PAGE_PASSTHROUGH)) {
174 ret = get_errno(madvise(g2h_untagged(start), len, advice));
175 if (ret == 0) {
176 page_reset_target_data(start, start + len - 1);
180 mmap_unlock();
182 return ret;
185 /* minherit(2) */
186 static inline abi_long do_bsd_minherit(abi_long addr, abi_long len,
187 abi_long inherit)
189 return get_errno(minherit(g2h_untagged(addr), len, inherit));
192 /* mincore(2) */
193 static inline abi_long do_bsd_mincore(abi_ulong target_addr, abi_ulong len,
194 abi_ulong target_vec)
196 abi_long ret;
197 void *p;
198 abi_ulong vec_len = DIV_ROUND_UP(len, TARGET_PAGE_SIZE);
200 if (!guest_range_valid_untagged(target_addr, len)
201 || !page_check_range(target_addr, len, PAGE_VALID)) {
202 return -TARGET_EFAULT;
205 p = lock_user(VERIFY_WRITE, target_vec, vec_len, 0);
206 if (p == NULL) {
207 return -TARGET_EFAULT;
209 ret = get_errno(mincore(g2h_untagged(target_addr), len, p));
210 unlock_user(p, target_vec, vec_len);
212 return ret;
215 /* do_brk() must return target values and target errnos. */
216 static inline abi_long do_obreak(abi_ulong brk_val)
218 abi_long mapped_addr;
219 abi_ulong new_brk;
220 abi_ulong old_brk;
222 /* brk pointers are always untagged */
224 /* do not allow to shrink below initial brk value */
225 if (brk_val < initial_target_brk) {
226 return target_brk;
229 new_brk = TARGET_PAGE_ALIGN(brk_val);
230 old_brk = TARGET_PAGE_ALIGN(target_brk);
232 /* new and old target_brk might be on the same page */
233 if (new_brk == old_brk) {
234 target_brk = brk_val;
235 return target_brk;
238 /* Release heap if necesary */
239 if (new_brk < old_brk) {
240 target_munmap(new_brk, old_brk - new_brk);
242 target_brk = brk_val;
243 return target_brk;
246 mapped_addr = target_mmap(old_brk, new_brk - old_brk,
247 PROT_READ | PROT_WRITE,
248 MAP_FIXED | MAP_EXCL | MAP_ANON | MAP_PRIVATE,
249 -1, 0);
251 if (mapped_addr == old_brk) {
252 target_brk = brk_val;
253 return target_brk;
256 /* For everything else, return the previous break. */
257 return target_brk;
260 /* shm_open(2) */
261 static inline abi_long do_bsd_shm_open(abi_ulong arg1, abi_long arg2,
262 abi_long arg3)
264 int ret;
265 void *p;
267 if (arg1 == (uintptr_t)SHM_ANON) {
268 p = SHM_ANON;
269 } else {
270 p = lock_user_string(arg1);
271 if (p == NULL) {
272 return -TARGET_EFAULT;
275 ret = get_errno(shm_open(p, target_to_host_bitmask(arg2, fcntl_flags_tbl),
276 arg3));
278 if (p != SHM_ANON) {
279 unlock_user(p, arg1, 0);
282 return ret;
285 /* shm_unlink(2) */
286 static inline abi_long do_bsd_shm_unlink(abi_ulong arg1)
288 int ret;
289 void *p;
291 p = lock_user_string(arg1);
292 if (p == NULL) {
293 return -TARGET_EFAULT;
295 ret = get_errno(shm_unlink(p)); /* XXX path(p)? */
296 unlock_user(p, arg1, 0);
298 return ret;
301 /* shmget(2) */
302 static inline abi_long do_bsd_shmget(abi_long arg1, abi_ulong arg2,
303 abi_long arg3)
305 return get_errno(shmget(arg1, arg2, arg3));
308 /* shmctl(2) */
309 static inline abi_long do_bsd_shmctl(abi_long shmid, abi_long cmd,
310 abi_ulong buff)
312 struct shmid_ds dsarg;
313 abi_long ret = -TARGET_EINVAL;
315 cmd &= 0xff;
317 switch (cmd) {
318 case IPC_STAT:
319 if (target_to_host_shmid_ds(&dsarg, buff)) {
320 return -TARGET_EFAULT;
322 ret = get_errno(shmctl(shmid, cmd, &dsarg));
323 if (host_to_target_shmid_ds(buff, &dsarg)) {
324 return -TARGET_EFAULT;
326 break;
328 case IPC_SET:
329 if (target_to_host_shmid_ds(&dsarg, buff)) {
330 return -TARGET_EFAULT;
332 ret = get_errno(shmctl(shmid, cmd, &dsarg));
333 break;
335 case IPC_RMID:
336 ret = get_errno(shmctl(shmid, cmd, NULL));
337 break;
339 default:
340 ret = -TARGET_EINVAL;
341 break;
344 return ret;
347 /* shmat(2) */
348 static inline abi_long do_bsd_shmat(int shmid, abi_ulong shmaddr, int shmflg)
350 abi_ulong raddr;
351 abi_long ret;
352 struct shmid_ds shm_info;
354 /* Find out the length of the shared memory segment. */
355 ret = get_errno(shmctl(shmid, IPC_STAT, &shm_info));
356 if (is_error(ret)) {
357 /* Can't get the length */
358 return ret;
361 if (!guest_range_valid_untagged(shmaddr, shm_info.shm_segsz)) {
362 return -TARGET_EINVAL;
365 WITH_MMAP_LOCK_GUARD() {
366 void *host_raddr;
368 if (shmaddr) {
369 host_raddr = shmat(shmid, (void *)g2h_untagged(shmaddr), shmflg);
370 } else {
371 abi_ulong mmap_start;
373 mmap_start = mmap_find_vma(0, shm_info.shm_segsz);
375 if (mmap_start == -1) {
376 return -TARGET_ENOMEM;
378 host_raddr = shmat(shmid, g2h_untagged(mmap_start),
379 shmflg | SHM_REMAP);
382 if (host_raddr == (void *)-1) {
383 return get_errno(-1);
385 raddr = h2g(host_raddr);
387 page_set_flags(raddr, raddr + shm_info.shm_segsz - 1,
388 PAGE_VALID | PAGE_RESET | PAGE_READ |
389 (shmflg & SHM_RDONLY ? 0 : PAGE_WRITE));
391 for (int i = 0; i < N_BSD_SHM_REGIONS; i++) {
392 if (bsd_shm_regions[i].start == 0) {
393 bsd_shm_regions[i].start = raddr;
394 bsd_shm_regions[i].size = shm_info.shm_segsz;
395 break;
400 return raddr;
403 /* shmdt(2) */
404 static inline abi_long do_bsd_shmdt(abi_ulong shmaddr)
406 abi_long ret;
408 WITH_MMAP_LOCK_GUARD() {
409 int i;
411 for (i = 0; i < N_BSD_SHM_REGIONS; ++i) {
412 if (bsd_shm_regions[i].start == shmaddr) {
413 break;
417 if (i == N_BSD_SHM_REGIONS) {
418 return -TARGET_EINVAL;
421 ret = get_errno(shmdt(g2h_untagged(shmaddr)));
422 if (ret == 0) {
423 abi_ulong size = bsd_shm_regions[i].size;
425 bsd_shm_regions[i].start = 0;
426 page_set_flags(shmaddr, shmaddr + size - 1, 0);
427 mmap_reserve(shmaddr, size);
431 return ret;
434 static inline abi_long do_bsd_vadvise(void)
436 /* See sys_ovadvise() in vm_unix.c */
437 return -TARGET_EINVAL;
440 static inline abi_long do_bsd_sbrk(void)
442 /* see sys_sbrk() in vm_mmap.c */
443 return -TARGET_EOPNOTSUPP;
446 static inline abi_long do_bsd_sstk(void)
448 /* see sys_sstk() in vm_mmap.c */
449 return -TARGET_EOPNOTSUPP;
452 #endif /* BSD_USER_BSD_MEM_H */