2 * mmap support for qemu
4 * Copyright (c) 2003 - 2008 Fabrice Bellard
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, see <http://www.gnu.org/licenses/>.
19 #include "qemu/osdep.h"
22 #include "qemu-common.h"
26 static pthread_mutex_t mmap_mutex
= PTHREAD_MUTEX_INITIALIZER
;
27 static __thread
int mmap_lock_count
;
31 if (mmap_lock_count
++ == 0) {
32 pthread_mutex_lock(&mmap_mutex
);
36 void mmap_unlock(void)
38 if (--mmap_lock_count
== 0) {
39 pthread_mutex_unlock(&mmap_mutex
);
43 bool have_mmap_lock(void)
45 return mmap_lock_count
> 0 ? true : false;
48 /* Grab lock to make sure things are in a consistent state after fork(). */
49 void mmap_fork_start(void)
53 pthread_mutex_lock(&mmap_mutex
);
56 void mmap_fork_end(int child
)
59 pthread_mutex_init(&mmap_mutex
, NULL
);
61 pthread_mutex_unlock(&mmap_mutex
);
64 /* NOTE: all the constants are the HOST ones, but addresses are target. */
65 int target_mprotect(abi_ulong start
, abi_ulong len
, int prot
)
67 abi_ulong end
, host_start
, host_end
, addr
;
71 printf("mprotect: start=0x" TARGET_ABI_FMT_lx
72 "len=0x" TARGET_ABI_FMT_lx
" prot=%c%c%c\n", start
, len
,
73 prot
& PROT_READ
? 'r' : '-',
74 prot
& PROT_WRITE
? 'w' : '-',
75 prot
& PROT_EXEC
? 'x' : '-');
78 if ((start
& ~TARGET_PAGE_MASK
) != 0)
80 len
= TARGET_PAGE_ALIGN(len
);
84 prot
&= PROT_READ
| PROT_WRITE
| PROT_EXEC
;
89 host_start
= start
& qemu_host_page_mask
;
90 host_end
= HOST_PAGE_ALIGN(end
);
91 if (start
> host_start
) {
92 /* handle host page containing start */
94 for (addr
= host_start
; addr
< start
; addr
+= TARGET_PAGE_SIZE
) {
95 prot1
|= page_get_flags(addr
);
97 if (host_end
== host_start
+ qemu_host_page_size
) {
98 for (addr
= end
; addr
< host_end
; addr
+= TARGET_PAGE_SIZE
) {
99 prot1
|= page_get_flags(addr
);
103 ret
= mprotect(g2h_untagged(host_start
),
104 qemu_host_page_size
, prot1
& PAGE_BITS
);
107 host_start
+= qemu_host_page_size
;
109 if (end
< host_end
) {
111 for (addr
= end
; addr
< host_end
; addr
+= TARGET_PAGE_SIZE
) {
112 prot1
|= page_get_flags(addr
);
114 ret
= mprotect(g2h_untagged(host_end
- qemu_host_page_size
),
115 qemu_host_page_size
, prot1
& PAGE_BITS
);
118 host_end
-= qemu_host_page_size
;
121 /* handle the pages in the middle */
122 if (host_start
< host_end
) {
123 ret
= mprotect(g2h_untagged(host_start
), host_end
- host_start
, prot
);
127 page_set_flags(start
, start
+ len
, prot
| PAGE_VALID
);
135 /* map an incomplete host page */
136 static int mmap_frag(abi_ulong real_start
,
137 abi_ulong start
, abi_ulong end
,
138 int prot
, int flags
, int fd
, abi_ulong offset
)
140 abi_ulong real_end
, addr
;
144 real_end
= real_start
+ qemu_host_page_size
;
145 host_start
= g2h_untagged(real_start
);
147 /* get the protection of the target pages outside the mapping */
149 for (addr
= real_start
; addr
< real_end
; addr
++) {
150 if (addr
< start
|| addr
>= end
)
151 prot1
|= page_get_flags(addr
);
155 /* no page was there, so we allocate one */
156 void *p
= mmap(host_start
, qemu_host_page_size
, prot
,
157 flags
| MAP_ANON
, -1, 0);
164 prot_new
= prot
| prot1
;
165 if (!(flags
& MAP_ANON
)) {
166 /* msync() won't work here, so we return an error if write is
167 possible while it is a shared mapping */
168 if ((flags
& TARGET_BSD_MAP_FLAGMASK
) == MAP_SHARED
&&
172 /* adjust protection to be able to read */
173 if (!(prot1
& PROT_WRITE
))
174 mprotect(host_start
, qemu_host_page_size
, prot1
| PROT_WRITE
);
176 /* read the corresponding file data */
177 pread(fd
, g2h_untagged(start
), end
- start
, offset
);
179 /* put final protection */
180 if (prot_new
!= (prot1
| PROT_WRITE
))
181 mprotect(host_start
, qemu_host_page_size
, prot_new
);
183 /* just update the protection */
184 if (prot_new
!= prot1
) {
185 mprotect(host_start
, qemu_host_page_size
, prot_new
);
191 #if HOST_LONG_BITS == 64 && TARGET_ABI_BITS == 64
192 # define TASK_UNMAPPED_BASE (1ul << 38)
194 # define TASK_UNMAPPED_BASE 0x40000000
196 abi_ulong mmap_next_start
= TASK_UNMAPPED_BASE
;
198 unsigned long last_brk
;
201 * Subroutine of mmap_find_vma, used when we have pre-allocated a chunk of guest
204 static abi_ulong
mmap_find_vma_reserved(abi_ulong start
, abi_ulong size
,
212 if (size
> reserved_va
) {
213 return (abi_ulong
)-1;
216 size
= HOST_PAGE_ALIGN(size
) + alignment
;
217 end_addr
= start
+ size
;
218 if (end_addr
> reserved_va
) {
219 end_addr
= reserved_va
;
221 addr
= end_addr
- qemu_host_page_size
;
224 if (addr
> end_addr
) {
226 return (abi_ulong
)-1;
228 end_addr
= reserved_va
;
229 addr
= end_addr
- qemu_host_page_size
;
233 prot
= page_get_flags(addr
);
237 if (end_addr
- addr
>= size
) {
240 addr
-= qemu_host_page_size
;
243 if (start
== mmap_next_start
) {
244 mmap_next_start
= addr
;
246 /* addr is sufficiently low to align it up */
247 if (alignment
!= 0) {
248 addr
= (addr
+ alignment
) & ~(alignment
- 1);
254 * Find and reserve a free memory area of size 'size'. The search
256 * It must be called with mmap_lock() held.
257 * Return -1 if error.
259 static abi_ulong
mmap_find_vma_aligned(abi_ulong start
, abi_ulong size
,
267 /* If 'start' == 0, then a default start address is used. */
269 start
= mmap_next_start
;
271 start
&= qemu_host_page_mask
;
274 size
= HOST_PAGE_ALIGN(size
);
277 return mmap_find_vma_reserved(start
, size
,
278 (alignment
!= 0 ? 1 << alignment
: 0));
282 wrapped
= repeat
= 0;
284 flags
= MAP_ANONYMOUS
| MAP_PRIVATE
;
286 if (alignment
!= 0) {
287 flags
|= MAP_ALIGNED(alignment
);
293 for (;; prev
= ptr
) {
295 * Reserve needed memory area to avoid a race.
296 * It should be discarded using:
297 * - mmap() with MAP_FIXED flag
298 * - mremap() with MREMAP_FIXED flag
299 * - shmat() with SHM_REMAP flag
301 ptr
= mmap(g2h_untagged(addr
), size
, PROT_NONE
,
304 /* ENOMEM, if host address space has no memory */
305 if (ptr
== MAP_FAILED
) {
306 return (abi_ulong
)-1;
310 * Count the number of sequential returns of the same address.
311 * This is used to modify the search algorithm below.
313 repeat
= (ptr
== prev
? repeat
+ 1 : 0);
315 if (h2g_valid(ptr
+ size
- 1)) {
318 if ((addr
& ~TARGET_PAGE_MASK
) == 0) {
320 if (start
== mmap_next_start
&& addr
>= TASK_UNMAPPED_BASE
) {
321 mmap_next_start
= addr
+ size
;
326 /* The address is not properly aligned for the target. */
330 * Assume the result that the kernel gave us is the
331 * first with enough free space, so start again at the
332 * next higher target page.
334 addr
= TARGET_PAGE_ALIGN(addr
);
338 * Sometimes the kernel decides to perform the allocation
339 * at the top end of memory instead.
341 addr
&= TARGET_PAGE_MASK
;
344 /* Start over at low memory. */
348 /* Fail. This unaligned block must the last. */
354 * Since the result the kernel gave didn't fit, start
355 * again at low memory. If any repetition, fail.
357 addr
= (repeat
? -1 : 0);
360 /* Unmap and try again. */
363 /* ENOMEM if we checked the whole of the target address space. */
364 if (addr
== (abi_ulong
)-1) {
365 return (abi_ulong
)-1;
366 } else if (addr
== 0) {
368 return (abi_ulong
)-1;
372 * Don't actually use 0 when wrapping, instead indicate
373 * that we'd truly like an allocation in low memory.
375 addr
= TARGET_PAGE_SIZE
;
376 } else if (wrapped
&& addr
>= start
) {
377 return (abi_ulong
)-1;
382 abi_ulong
mmap_find_vma(abi_ulong start
, abi_ulong size
)
384 return mmap_find_vma_aligned(start
, size
, 0);
387 /* NOTE: all the constants are the HOST ones */
388 abi_long
target_mmap(abi_ulong start
, abi_ulong len
, int prot
,
389 int flags
, int fd
, off_t offset
)
391 abi_ulong ret
, end
, real_start
, real_end
, retaddr
, host_offset
, host_len
;
396 printf("mmap: start=0x" TARGET_ABI_FMT_lx
397 " len=0x" TARGET_ABI_FMT_lx
" prot=%c%c%c flags=",
399 prot
& PROT_READ
? 'r' : '-',
400 prot
& PROT_WRITE
? 'w' : '-',
401 prot
& PROT_EXEC
? 'x' : '-');
402 if (flags
& MAP_ALIGNMENT_MASK
) {
403 printf("MAP_ALIGNED(%u) ", (flags
& MAP_ALIGNMENT_MASK
)
404 >> MAP_ALIGNMENT_SHIFT
);
407 if (flags
& MAP_GUARD
) {
408 printf("MAP_GUARD ");
411 if (flags
& MAP_FIXED
) {
412 printf("MAP_FIXED ");
414 if (flags
& MAP_ANONYMOUS
) {
418 if (flags
& MAP_EXCL
) {
422 if (flags
& MAP_PRIVATE
) {
423 printf("MAP_PRIVATE ");
425 if (flags
& MAP_SHARED
) {
426 printf("MAP_SHARED ");
428 if (flags
& MAP_NOCORE
) {
429 printf("MAP_NOCORE ");
432 if (flags
& MAP_STACK
) {
433 printf("MAP_STACK ");
436 printf("fd=%d offset=0x%llx\n", fd
, offset
);
440 if ((flags
& MAP_ANONYMOUS
) && fd
!= -1) {
445 if (flags
& MAP_STACK
) {
446 if ((fd
!= -1) || ((prot
& (PROT_READ
| PROT_WRITE
)) !=
447 (PROT_READ
| PROT_WRITE
))) {
452 #endif /* MAP_STACK */
454 if ((flags
& MAP_GUARD
) && (prot
!= PROT_NONE
|| fd
!= -1 ||
455 offset
!= 0 || (flags
& (MAP_SHARED
| MAP_PRIVATE
|
456 /* MAP_PREFAULT | */ /* MAP_PREFAULT not in mman.h */
457 MAP_PREFAULT_READ
| MAP_ANON
| MAP_STACK
)) != 0)) {
463 if (offset
& ~TARGET_PAGE_MASK
) {
468 len
= TARGET_PAGE_ALIGN(len
);
473 real_start
= start
& qemu_host_page_mask
;
474 host_offset
= offset
& qemu_host_page_mask
;
477 * If the user is asking for the kernel to find a location, do that
478 * before we truncate the length for mapping files below.
480 if (!(flags
& MAP_FIXED
)) {
481 host_len
= len
+ offset
- host_offset
;
482 host_len
= HOST_PAGE_ALIGN(host_len
);
483 if ((flags
& MAP_ALIGNMENT_MASK
) != 0)
484 start
= mmap_find_vma_aligned(real_start
, host_len
,
485 (flags
& MAP_ALIGNMENT_MASK
) >> MAP_ALIGNMENT_SHIFT
);
487 start
= mmap_find_vma(real_start
, host_len
);
488 if (start
== (abi_ulong
)-1) {
495 * When mapping files into a memory area larger than the file, accesses
496 * to pages beyond the file size will cause a SIGBUS.
498 * For example, if mmaping a file of 100 bytes on a host with 4K pages
499 * emulating a target with 8K pages, the target expects to be able to
500 * access the first 8K. But the host will trap us on any access beyond
503 * When emulating a target with a larger page-size than the hosts, we
504 * may need to truncate file maps at EOF and add extra anonymous pages
505 * up to the targets page boundary.
508 if ((qemu_real_host_page_size
< qemu_host_page_size
) && fd
!= -1) {
511 if (fstat(fd
, &sb
) == -1) {
515 /* Are we trying to create a map beyond EOF?. */
516 if (offset
+ len
> sb
.st_size
) {
518 * If so, truncate the file map at eof aligned with
519 * the hosts real pagesize. Additional anonymous maps
520 * will be created beyond EOF.
522 len
= REAL_HOST_PAGE_ALIGN(sb
.st_size
- offset
);
526 if (!(flags
& MAP_FIXED
)) {
527 unsigned long host_start
;
530 host_len
= len
+ offset
- host_offset
;
531 host_len
= HOST_PAGE_ALIGN(host_len
);
534 * Note: we prefer to control the mapping address. It is
535 * especially important if qemu_host_page_size >
536 * qemu_real_host_page_size
538 p
= mmap(g2h_untagged(start
), host_len
, prot
,
539 flags
| MAP_FIXED
| ((fd
!= -1) ? MAP_ANONYMOUS
: 0), -1, 0);
542 /* update start so that it points to the file position at 'offset' */
543 host_start
= (unsigned long)p
;
545 p
= mmap(g2h_untagged(start
), len
, prot
,
546 flags
| MAP_FIXED
, fd
, host_offset
);
547 if (p
== MAP_FAILED
) {
548 munmap(g2h_untagged(start
), host_len
);
551 host_start
+= offset
- host_offset
;
553 start
= h2g(host_start
);
555 if (start
& ~TARGET_PAGE_MASK
) {
560 real_end
= HOST_PAGE_ALIGN(end
);
563 * Test if requested memory area fits target address space
564 * It can fail only on 64-bit host with 32-bit target.
565 * On any other target/host host mmap() handles this error correctly.
567 #if TARGET_ABI_BITS == 32 && HOST_LONG_BITS == 64
568 if ((unsigned long)start
+ len
- 1 > (abi_ulong
) -1) {
575 * worst case: we cannot map the file because the offset is not
576 * aligned, so we read it
578 if (!(flags
& MAP_ANON
) &&
579 (offset
& ~qemu_host_page_mask
) != (start
& ~qemu_host_page_mask
)) {
581 * msync() won't work here, so we return an error if write is
582 * possible while it is a shared mapping
584 if ((flags
& TARGET_BSD_MAP_FLAGMASK
) == MAP_SHARED
&&
585 (prot
& PROT_WRITE
)) {
589 retaddr
= target_mmap(start
, len
, prot
| PROT_WRITE
,
590 MAP_FIXED
| MAP_PRIVATE
| MAP_ANON
,
594 pread(fd
, g2h_untagged(start
), len
, offset
);
595 if (!(prot
& PROT_WRITE
)) {
596 ret
= target_mprotect(start
, len
, prot
);
605 /* handle the start of the mapping */
606 if (start
> real_start
) {
607 if (real_end
== real_start
+ qemu_host_page_size
) {
608 /* one single host page */
609 ret
= mmap_frag(real_start
, start
, end
,
610 prot
, flags
, fd
, offset
);
615 ret
= mmap_frag(real_start
, start
, real_start
+ qemu_host_page_size
,
616 prot
, flags
, fd
, offset
);
619 real_start
+= qemu_host_page_size
;
621 /* handle the end of the mapping */
622 if (end
< real_end
) {
623 ret
= mmap_frag(real_end
- qemu_host_page_size
,
624 real_end
- qemu_host_page_size
, end
,
626 offset
+ real_end
- qemu_host_page_size
- start
);
629 real_end
-= qemu_host_page_size
;
632 /* map the middle (easier) */
633 if (real_start
< real_end
) {
635 unsigned long offset1
;
636 if (flags
& MAP_ANON
)
639 offset1
= offset
+ real_start
- start
;
640 p
= mmap(g2h_untagged(real_start
), real_end
- real_start
,
641 prot
, flags
, fd
, offset1
);
647 page_set_flags(start
, start
+ len
, prot
| PAGE_VALID
);
650 printf("ret=0x" TARGET_ABI_FMT_lx
"\n", start
);
654 tb_invalidate_phys_range(start
, start
+ len
);
662 static void mmap_reserve(abi_ulong start
, abi_ulong size
)
664 abi_ulong real_start
;
670 real_start
= start
& qemu_host_page_mask
;
671 real_end
= HOST_PAGE_ALIGN(start
+ size
);
673 if (start
> real_start
) {
674 /* handle host page containing start */
676 for (addr
= real_start
; addr
< start
; addr
+= TARGET_PAGE_SIZE
) {
677 prot
|= page_get_flags(addr
);
679 if (real_end
== real_start
+ qemu_host_page_size
) {
680 for (addr
= end
; addr
< real_end
; addr
+= TARGET_PAGE_SIZE
) {
681 prot
|= page_get_flags(addr
);
686 real_start
+= qemu_host_page_size
;
689 if (end
< real_end
) {
691 for (addr
= end
; addr
< real_end
; addr
+= TARGET_PAGE_SIZE
) {
692 prot
|= page_get_flags(addr
);
695 real_end
-= qemu_host_page_size
;
698 if (real_start
!= real_end
) {
699 mmap(g2h_untagged(real_start
), real_end
- real_start
, PROT_NONE
,
700 MAP_FIXED
| MAP_ANONYMOUS
| MAP_PRIVATE
,
705 int target_munmap(abi_ulong start
, abi_ulong len
)
707 abi_ulong end
, real_start
, real_end
, addr
;
711 printf("munmap: start=0x" TARGET_ABI_FMT_lx
" len=0x"
712 TARGET_ABI_FMT_lx
"\n",
715 if (start
& ~TARGET_PAGE_MASK
)
717 len
= TARGET_PAGE_ALIGN(len
);
722 real_start
= start
& qemu_host_page_mask
;
723 real_end
= HOST_PAGE_ALIGN(end
);
725 if (start
> real_start
) {
726 /* handle host page containing start */
728 for (addr
= real_start
; addr
< start
; addr
+= TARGET_PAGE_SIZE
) {
729 prot
|= page_get_flags(addr
);
731 if (real_end
== real_start
+ qemu_host_page_size
) {
732 for (addr
= end
; addr
< real_end
; addr
+= TARGET_PAGE_SIZE
) {
733 prot
|= page_get_flags(addr
);
738 real_start
+= qemu_host_page_size
;
740 if (end
< real_end
) {
742 for (addr
= end
; addr
< real_end
; addr
+= TARGET_PAGE_SIZE
) {
743 prot
|= page_get_flags(addr
);
746 real_end
-= qemu_host_page_size
;
750 /* unmap what we can */
751 if (real_start
< real_end
) {
753 mmap_reserve(real_start
, real_end
- real_start
);
755 ret
= munmap(g2h_untagged(real_start
), real_end
- real_start
);
760 page_set_flags(start
, start
+ len
, 0);
761 tb_invalidate_phys_range(start
, start
+ len
);
767 int target_msync(abi_ulong start
, abi_ulong len
, int flags
)
771 if (start
& ~TARGET_PAGE_MASK
)
773 len
= TARGET_PAGE_ALIGN(len
);
780 start
&= qemu_host_page_mask
;
781 return msync(g2h_untagged(start
), end
- start
, flags
);