mac_nvram: Add block backend to persist NVRAM contents
[qemu/rayw.git] / bsd-user / mmap.c
blobd6c5a344c9b57d23dc826efe2d79023d21142cab
1 /*
2 * mmap support for qemu
4 * Copyright (c) 2003 - 2008 Fabrice Bellard
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, see <http://www.gnu.org/licenses/>.
19 #include "qemu/osdep.h"
21 #include "qemu.h"
23 static pthread_mutex_t mmap_mutex = PTHREAD_MUTEX_INITIALIZER;
24 static __thread int mmap_lock_count;
26 void mmap_lock(void)
28 if (mmap_lock_count++ == 0) {
29 pthread_mutex_lock(&mmap_mutex);
33 void mmap_unlock(void)
35 if (--mmap_lock_count == 0) {
36 pthread_mutex_unlock(&mmap_mutex);
40 bool have_mmap_lock(void)
42 return mmap_lock_count > 0 ? true : false;
45 /* Grab lock to make sure things are in a consistent state after fork(). */
46 void mmap_fork_start(void)
48 if (mmap_lock_count)
49 abort();
50 pthread_mutex_lock(&mmap_mutex);
53 void mmap_fork_end(int child)
55 if (child)
56 pthread_mutex_init(&mmap_mutex, NULL);
57 else
58 pthread_mutex_unlock(&mmap_mutex);
61 /* NOTE: all the constants are the HOST ones, but addresses are target. */
62 int target_mprotect(abi_ulong start, abi_ulong len, int prot)
64 abi_ulong end, host_start, host_end, addr;
65 int prot1, ret;
67 qemu_log_mask(CPU_LOG_PAGE, "mprotect: start=0x" TARGET_ABI_FMT_lx
68 " len=0x" TARGET_ABI_FMT_lx " prot=%c%c%c\n", start, len,
69 prot & PROT_READ ? 'r' : '-',
70 prot & PROT_WRITE ? 'w' : '-',
71 prot & PROT_EXEC ? 'x' : '-');
72 if ((start & ~TARGET_PAGE_MASK) != 0)
73 return -EINVAL;
74 len = TARGET_PAGE_ALIGN(len);
75 end = start + len;
76 if (end < start)
77 return -EINVAL;
78 prot &= PROT_READ | PROT_WRITE | PROT_EXEC;
79 if (len == 0)
80 return 0;
82 mmap_lock();
83 host_start = start & qemu_host_page_mask;
84 host_end = HOST_PAGE_ALIGN(end);
85 if (start > host_start) {
86 /* handle host page containing start */
87 prot1 = prot;
88 for (addr = host_start; addr < start; addr += TARGET_PAGE_SIZE) {
89 prot1 |= page_get_flags(addr);
91 if (host_end == host_start + qemu_host_page_size) {
92 for (addr = end; addr < host_end; addr += TARGET_PAGE_SIZE) {
93 prot1 |= page_get_flags(addr);
95 end = host_end;
97 ret = mprotect(g2h_untagged(host_start),
98 qemu_host_page_size, prot1 & PAGE_BITS);
99 if (ret != 0)
100 goto error;
101 host_start += qemu_host_page_size;
103 if (end < host_end) {
104 prot1 = prot;
105 for (addr = end; addr < host_end; addr += TARGET_PAGE_SIZE) {
106 prot1 |= page_get_flags(addr);
108 ret = mprotect(g2h_untagged(host_end - qemu_host_page_size),
109 qemu_host_page_size, prot1 & PAGE_BITS);
110 if (ret != 0)
111 goto error;
112 host_end -= qemu_host_page_size;
115 /* handle the pages in the middle */
116 if (host_start < host_end) {
117 ret = mprotect(g2h_untagged(host_start), host_end - host_start, prot);
118 if (ret != 0)
119 goto error;
121 page_set_flags(start, start + len, prot | PAGE_VALID);
122 mmap_unlock();
123 return 0;
124 error:
125 mmap_unlock();
126 return ret;
130 * map an incomplete host page
132 * mmap_frag can be called with a valid fd, if flags doesn't contain one of
133 * MAP_ANON, MAP_STACK, MAP_GUARD. If we need to map a page in those cases, we
134 * pass fd == -1. However, if flags contains MAP_GUARD then MAP_ANON cannot be
135 * added.
137 * * If fd is valid (not -1) we want to map the pages with MAP_ANON.
138 * * If flags contains MAP_GUARD we don't want to add MAP_ANON because it
139 * will be rejected. See kern_mmap's enforcing of constraints for MAP_GUARD
140 * in sys/vm/vm_mmap.c.
141 * * If flags contains MAP_ANON it doesn't matter if we add it or not.
142 * * If flags contains MAP_STACK, mmap adds MAP_ANON when called so doesn't
143 * matter if we add it or not either. See enforcing of constraints for
144 * MAP_STACK in kern_mmap.
146 * Don't add MAP_ANON for the flags that use fd == -1 without specifying the
147 * flags directly, with the assumption that future flags that require fd == -1
148 * will also not require MAP_ANON.
150 static int mmap_frag(abi_ulong real_start,
151 abi_ulong start, abi_ulong end,
152 int prot, int flags, int fd, abi_ulong offset)
154 abi_ulong real_end, addr;
155 void *host_start;
156 int prot1, prot_new;
158 real_end = real_start + qemu_host_page_size;
159 host_start = g2h_untagged(real_start);
161 /* get the protection of the target pages outside the mapping */
162 prot1 = 0;
163 for (addr = real_start; addr < real_end; addr++) {
164 if (addr < start || addr >= end)
165 prot1 |= page_get_flags(addr);
168 if (prot1 == 0) {
169 /* no page was there, so we allocate one. See also above. */
170 void *p = mmap(host_start, qemu_host_page_size, prot,
171 flags | ((fd != -1) ? MAP_ANON : 0), -1, 0);
172 if (p == MAP_FAILED)
173 return -1;
174 prot1 = prot;
176 prot1 &= PAGE_BITS;
178 prot_new = prot | prot1;
179 if (fd != -1) {
180 /* msync() won't work here, so we return an error if write is
181 possible while it is a shared mapping */
182 if ((flags & TARGET_BSD_MAP_FLAGMASK) == MAP_SHARED &&
183 (prot & PROT_WRITE))
184 return -1;
186 /* adjust protection to be able to read */
187 if (!(prot1 & PROT_WRITE))
188 mprotect(host_start, qemu_host_page_size, prot1 | PROT_WRITE);
190 /* read the corresponding file data */
191 if (pread(fd, g2h_untagged(start), end - start, offset) == -1) {
192 return -1;
195 /* put final protection */
196 if (prot_new != (prot1 | PROT_WRITE))
197 mprotect(host_start, qemu_host_page_size, prot_new);
198 } else {
199 if (prot_new != prot1) {
200 mprotect(host_start, qemu_host_page_size, prot_new);
202 if (prot_new & PROT_WRITE) {
203 memset(g2h_untagged(start), 0, end - start);
206 return 0;
209 #if HOST_LONG_BITS == 64 && TARGET_ABI_BITS == 64
210 # define TASK_UNMAPPED_BASE (1ul << 38)
211 #else
212 # define TASK_UNMAPPED_BASE 0x40000000
213 #endif
214 abi_ulong mmap_next_start = TASK_UNMAPPED_BASE;
216 unsigned long last_brk;
219 * Subroutine of mmap_find_vma, used when we have pre-allocated a chunk of guest
220 * address space.
222 static abi_ulong mmap_find_vma_reserved(abi_ulong start, abi_ulong size,
223 abi_ulong alignment)
225 abi_ulong addr;
226 abi_ulong end_addr;
227 int prot;
228 int looped = 0;
230 if (size > reserved_va) {
231 return (abi_ulong)-1;
234 size = HOST_PAGE_ALIGN(size) + alignment;
235 end_addr = start + size;
236 if (end_addr > reserved_va) {
237 end_addr = reserved_va;
239 addr = end_addr - qemu_host_page_size;
241 while (1) {
242 if (addr > end_addr) {
243 if (looped) {
244 return (abi_ulong)-1;
246 end_addr = reserved_va;
247 addr = end_addr - qemu_host_page_size;
248 looped = 1;
249 continue;
251 prot = page_get_flags(addr);
252 if (prot) {
253 end_addr = addr;
255 if (end_addr - addr >= size) {
256 break;
258 addr -= qemu_host_page_size;
261 if (start == mmap_next_start) {
262 mmap_next_start = addr;
264 /* addr is sufficiently low to align it up */
265 if (alignment != 0) {
266 addr = (addr + alignment) & ~(alignment - 1);
268 return addr;
272 * Find and reserve a free memory area of size 'size'. The search
273 * starts at 'start'.
274 * It must be called with mmap_lock() held.
275 * Return -1 if error.
277 static abi_ulong mmap_find_vma_aligned(abi_ulong start, abi_ulong size,
278 abi_ulong alignment)
280 void *ptr, *prev;
281 abi_ulong addr;
282 int flags;
283 int wrapped, repeat;
285 /* If 'start' == 0, then a default start address is used. */
286 if (start == 0) {
287 start = mmap_next_start;
288 } else {
289 start &= qemu_host_page_mask;
292 size = HOST_PAGE_ALIGN(size);
294 if (reserved_va) {
295 return mmap_find_vma_reserved(start, size,
296 (alignment != 0 ? 1 << alignment : 0));
299 addr = start;
300 wrapped = repeat = 0;
301 prev = 0;
302 flags = MAP_ANON | MAP_PRIVATE;
303 if (alignment != 0) {
304 flags |= MAP_ALIGNED(alignment);
307 for (;; prev = ptr) {
309 * Reserve needed memory area to avoid a race.
310 * It should be discarded using:
311 * - mmap() with MAP_FIXED flag
312 * - mremap() with MREMAP_FIXED flag
313 * - shmat() with SHM_REMAP flag
315 ptr = mmap(g2h_untagged(addr), size, PROT_NONE,
316 flags, -1, 0);
318 /* ENOMEM, if host address space has no memory */
319 if (ptr == MAP_FAILED) {
320 return (abi_ulong)-1;
324 * Count the number of sequential returns of the same address.
325 * This is used to modify the search algorithm below.
327 repeat = (ptr == prev ? repeat + 1 : 0);
329 if (h2g_valid(ptr + size - 1)) {
330 addr = h2g(ptr);
332 if ((addr & ~TARGET_PAGE_MASK) == 0) {
333 /* Success. */
334 if (start == mmap_next_start && addr >= TASK_UNMAPPED_BASE) {
335 mmap_next_start = addr + size;
337 return addr;
340 /* The address is not properly aligned for the target. */
341 switch (repeat) {
342 case 0:
344 * Assume the result that the kernel gave us is the
345 * first with enough free space, so start again at the
346 * next higher target page.
348 addr = TARGET_PAGE_ALIGN(addr);
349 break;
350 case 1:
352 * Sometimes the kernel decides to perform the allocation
353 * at the top end of memory instead.
355 addr &= TARGET_PAGE_MASK;
356 break;
357 case 2:
358 /* Start over at low memory. */
359 addr = 0;
360 break;
361 default:
362 /* Fail. This unaligned block must the last. */
363 addr = -1;
364 break;
366 } else {
368 * Since the result the kernel gave didn't fit, start
369 * again at low memory. If any repetition, fail.
371 addr = (repeat ? -1 : 0);
374 /* Unmap and try again. */
375 munmap(ptr, size);
377 /* ENOMEM if we checked the whole of the target address space. */
378 if (addr == (abi_ulong)-1) {
379 return (abi_ulong)-1;
380 } else if (addr == 0) {
381 if (wrapped) {
382 return (abi_ulong)-1;
384 wrapped = 1;
386 * Don't actually use 0 when wrapping, instead indicate
387 * that we'd truly like an allocation in low memory.
389 addr = TARGET_PAGE_SIZE;
390 } else if (wrapped && addr >= start) {
391 return (abi_ulong)-1;
396 abi_ulong mmap_find_vma(abi_ulong start, abi_ulong size)
398 return mmap_find_vma_aligned(start, size, 0);
401 /* NOTE: all the constants are the HOST ones */
402 abi_long target_mmap(abi_ulong start, abi_ulong len, int prot,
403 int flags, int fd, off_t offset)
405 abi_ulong ret, end, real_start, real_end, retaddr, host_offset, host_len;
407 mmap_lock();
408 if (qemu_loglevel_mask(CPU_LOG_PAGE)) {
409 qemu_log("mmap: start=0x" TARGET_ABI_FMT_lx
410 " len=0x" TARGET_ABI_FMT_lx " prot=%c%c%c flags=",
411 start, len,
412 prot & PROT_READ ? 'r' : '-',
413 prot & PROT_WRITE ? 'w' : '-',
414 prot & PROT_EXEC ? 'x' : '-');
415 if (flags & MAP_ALIGNMENT_MASK) {
416 qemu_log("MAP_ALIGNED(%u) ",
417 (flags & MAP_ALIGNMENT_MASK) >> MAP_ALIGNMENT_SHIFT);
419 if (flags & MAP_GUARD) {
420 qemu_log("MAP_GUARD ");
422 if (flags & MAP_FIXED) {
423 qemu_log("MAP_FIXED ");
425 if (flags & MAP_ANON) {
426 qemu_log("MAP_ANON ");
428 if (flags & MAP_EXCL) {
429 qemu_log("MAP_EXCL ");
431 if (flags & MAP_PRIVATE) {
432 qemu_log("MAP_PRIVATE ");
434 if (flags & MAP_SHARED) {
435 qemu_log("MAP_SHARED ");
437 if (flags & MAP_NOCORE) {
438 qemu_log("MAP_NOCORE ");
440 if (flags & MAP_STACK) {
441 qemu_log("MAP_STACK ");
443 qemu_log("fd=%d offset=0x%lx\n", fd, offset);
446 if ((flags & MAP_ANON) && fd != -1) {
447 errno = EINVAL;
448 goto fail;
450 if (flags & MAP_STACK) {
451 if ((fd != -1) || ((prot & (PROT_READ | PROT_WRITE)) !=
452 (PROT_READ | PROT_WRITE))) {
453 errno = EINVAL;
454 goto fail;
457 if ((flags & MAP_GUARD) && (prot != PROT_NONE || fd != -1 ||
458 offset != 0 || (flags & (MAP_SHARED | MAP_PRIVATE |
459 /* MAP_PREFAULT | */ /* MAP_PREFAULT not in mman.h */
460 MAP_PREFAULT_READ | MAP_ANON | MAP_STACK)) != 0)) {
461 errno = EINVAL;
462 goto fail;
465 if (offset & ~TARGET_PAGE_MASK) {
466 errno = EINVAL;
467 goto fail;
470 if (len == 0) {
471 errno = EINVAL;
472 goto fail;
475 /* Check for overflows */
476 len = TARGET_PAGE_ALIGN(len);
477 if (len == 0) {
478 errno = ENOMEM;
479 goto fail;
482 real_start = start & qemu_host_page_mask;
483 host_offset = offset & qemu_host_page_mask;
486 * If the user is asking for the kernel to find a location, do that
487 * before we truncate the length for mapping files below.
489 if (!(flags & MAP_FIXED)) {
490 host_len = len + offset - host_offset;
491 host_len = HOST_PAGE_ALIGN(host_len);
492 if ((flags & MAP_ALIGNMENT_MASK) != 0)
493 start = mmap_find_vma_aligned(real_start, host_len,
494 (flags & MAP_ALIGNMENT_MASK) >> MAP_ALIGNMENT_SHIFT);
495 else
496 start = mmap_find_vma(real_start, host_len);
497 if (start == (abi_ulong)-1) {
498 errno = ENOMEM;
499 goto fail;
504 * When mapping files into a memory area larger than the file, accesses
505 * to pages beyond the file size will cause a SIGBUS.
507 * For example, if mmaping a file of 100 bytes on a host with 4K pages
508 * emulating a target with 8K pages, the target expects to be able to
509 * access the first 8K. But the host will trap us on any access beyond
510 * 4K.
512 * When emulating a target with a larger page-size than the hosts, we
513 * may need to truncate file maps at EOF and add extra anonymous pages
514 * up to the targets page boundary.
517 if ((qemu_real_host_page_size() < qemu_host_page_size) && fd != -1) {
518 struct stat sb;
520 if (fstat(fd, &sb) == -1) {
521 goto fail;
524 /* Are we trying to create a map beyond EOF?. */
525 if (offset + len > sb.st_size) {
527 * If so, truncate the file map at eof aligned with
528 * the hosts real pagesize. Additional anonymous maps
529 * will be created beyond EOF.
531 len = REAL_HOST_PAGE_ALIGN(sb.st_size - offset);
535 if (!(flags & MAP_FIXED)) {
536 unsigned long host_start;
537 void *p;
539 host_len = len + offset - host_offset;
540 host_len = HOST_PAGE_ALIGN(host_len);
543 * Note: we prefer to control the mapping address. It is
544 * especially important if qemu_host_page_size >
545 * qemu_real_host_page_size
547 p = mmap(g2h_untagged(start), host_len, prot,
548 flags | MAP_FIXED | ((fd != -1) ? MAP_ANON : 0), -1, 0);
549 if (p == MAP_FAILED)
550 goto fail;
551 /* update start so that it points to the file position at 'offset' */
552 host_start = (unsigned long)p;
553 if (fd != -1) {
554 p = mmap(g2h_untagged(start), len, prot,
555 flags | MAP_FIXED, fd, host_offset);
556 if (p == MAP_FAILED) {
557 munmap(g2h_untagged(start), host_len);
558 goto fail;
560 host_start += offset - host_offset;
562 start = h2g(host_start);
563 } else {
564 if (start & ~TARGET_PAGE_MASK) {
565 errno = EINVAL;
566 goto fail;
568 end = start + len;
569 real_end = HOST_PAGE_ALIGN(end);
572 * Test if requested memory area fits target address space
573 * It can fail only on 64-bit host with 32-bit target.
574 * On any other target/host host mmap() handles this error correctly.
576 if (!guest_range_valid_untagged(start, len)) {
577 errno = EINVAL;
578 goto fail;
582 * worst case: we cannot map the file because the offset is not
583 * aligned, so we read it
585 if (fd != -1 &&
586 (offset & ~qemu_host_page_mask) != (start & ~qemu_host_page_mask)) {
588 * msync() won't work here, so we return an error if write is
589 * possible while it is a shared mapping
591 if ((flags & TARGET_BSD_MAP_FLAGMASK) == MAP_SHARED &&
592 (prot & PROT_WRITE)) {
593 errno = EINVAL;
594 goto fail;
596 retaddr = target_mmap(start, len, prot | PROT_WRITE,
597 MAP_FIXED | MAP_PRIVATE | MAP_ANON,
598 -1, 0);
599 if (retaddr == -1)
600 goto fail;
601 if (pread(fd, g2h_untagged(start), len, offset) == -1) {
602 goto fail;
604 if (!(prot & PROT_WRITE)) {
605 ret = target_mprotect(start, len, prot);
606 assert(ret == 0);
608 goto the_end;
611 /* Reject the mapping if any page within the range is mapped */
612 if ((flags & MAP_EXCL) && page_check_range(start, len, 0) < 0) {
613 errno = EINVAL;
614 goto fail;
617 /* handle the start of the mapping */
618 if (start > real_start) {
619 if (real_end == real_start + qemu_host_page_size) {
620 /* one single host page */
621 ret = mmap_frag(real_start, start, end,
622 prot, flags, fd, offset);
623 if (ret == -1)
624 goto fail;
625 goto the_end1;
627 ret = mmap_frag(real_start, start, real_start + qemu_host_page_size,
628 prot, flags, fd, offset);
629 if (ret == -1)
630 goto fail;
631 real_start += qemu_host_page_size;
633 /* handle the end of the mapping */
634 if (end < real_end) {
635 ret = mmap_frag(real_end - qemu_host_page_size,
636 real_end - qemu_host_page_size, end,
637 prot, flags, fd,
638 offset + real_end - qemu_host_page_size - start);
639 if (ret == -1)
640 goto fail;
641 real_end -= qemu_host_page_size;
644 /* map the middle (easier) */
645 if (real_start < real_end) {
646 void *p;
647 unsigned long offset1;
648 if (flags & MAP_ANON)
649 offset1 = 0;
650 else
651 offset1 = offset + real_start - start;
652 p = mmap(g2h_untagged(real_start), real_end - real_start,
653 prot, flags, fd, offset1);
654 if (p == MAP_FAILED)
655 goto fail;
658 the_end1:
659 page_set_flags(start, start + len, prot | PAGE_VALID);
660 the_end:
661 #ifdef DEBUG_MMAP
662 printf("ret=0x" TARGET_ABI_FMT_lx "\n", start);
663 page_dump(stdout);
664 printf("\n");
665 #endif
666 mmap_unlock();
667 return start;
668 fail:
669 mmap_unlock();
670 return -1;
673 static void mmap_reserve(abi_ulong start, abi_ulong size)
675 abi_ulong real_start;
676 abi_ulong real_end;
677 abi_ulong addr;
678 abi_ulong end;
679 int prot;
681 real_start = start & qemu_host_page_mask;
682 real_end = HOST_PAGE_ALIGN(start + size);
683 end = start + size;
684 if (start > real_start) {
685 /* handle host page containing start */
686 prot = 0;
687 for (addr = real_start; addr < start; addr += TARGET_PAGE_SIZE) {
688 prot |= page_get_flags(addr);
690 if (real_end == real_start + qemu_host_page_size) {
691 for (addr = end; addr < real_end; addr += TARGET_PAGE_SIZE) {
692 prot |= page_get_flags(addr);
694 end = real_end;
696 if (prot != 0) {
697 real_start += qemu_host_page_size;
700 if (end < real_end) {
701 prot = 0;
702 for (addr = end; addr < real_end; addr += TARGET_PAGE_SIZE) {
703 prot |= page_get_flags(addr);
705 if (prot != 0) {
706 real_end -= qemu_host_page_size;
709 if (real_start != real_end) {
710 mmap(g2h_untagged(real_start), real_end - real_start, PROT_NONE,
711 MAP_FIXED | MAP_ANON | MAP_PRIVATE, -1, 0);
715 int target_munmap(abi_ulong start, abi_ulong len)
717 abi_ulong end, real_start, real_end, addr;
718 int prot, ret;
720 #ifdef DEBUG_MMAP
721 printf("munmap: start=0x" TARGET_ABI_FMT_lx " len=0x"
722 TARGET_ABI_FMT_lx "\n",
723 start, len);
724 #endif
725 if (start & ~TARGET_PAGE_MASK)
726 return -EINVAL;
727 len = TARGET_PAGE_ALIGN(len);
728 if (len == 0)
729 return -EINVAL;
730 mmap_lock();
731 end = start + len;
732 real_start = start & qemu_host_page_mask;
733 real_end = HOST_PAGE_ALIGN(end);
735 if (start > real_start) {
736 /* handle host page containing start */
737 prot = 0;
738 for (addr = real_start; addr < start; addr += TARGET_PAGE_SIZE) {
739 prot |= page_get_flags(addr);
741 if (real_end == real_start + qemu_host_page_size) {
742 for (addr = end; addr < real_end; addr += TARGET_PAGE_SIZE) {
743 prot |= page_get_flags(addr);
745 end = real_end;
747 if (prot != 0)
748 real_start += qemu_host_page_size;
750 if (end < real_end) {
751 prot = 0;
752 for (addr = end; addr < real_end; addr += TARGET_PAGE_SIZE) {
753 prot |= page_get_flags(addr);
755 if (prot != 0)
756 real_end -= qemu_host_page_size;
759 ret = 0;
760 /* unmap what we can */
761 if (real_start < real_end) {
762 if (reserved_va) {
763 mmap_reserve(real_start, real_end - real_start);
764 } else {
765 ret = munmap(g2h_untagged(real_start), real_end - real_start);
769 if (ret == 0) {
770 page_set_flags(start, start + len, 0);
772 mmap_unlock();
773 return ret;
776 int target_msync(abi_ulong start, abi_ulong len, int flags)
778 abi_ulong end;
780 if (start & ~TARGET_PAGE_MASK)
781 return -EINVAL;
782 len = TARGET_PAGE_ALIGN(len);
783 end = start + len;
784 if (end < start)
785 return -EINVAL;
786 if (end == start)
787 return 0;
789 start &= qemu_host_page_mask;
790 return msync(g2h_untagged(start), end - start, flags);