2 * mmap support for qemu
4 * Copyright (c) 2003 Fabrice Bellard
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, see <http://www.gnu.org/licenses/>.
19 #include "qemu/osdep.h"
21 #include <linux/mman.h>
22 #include <linux/unistd.h>
25 #include "qemu-common.h"
26 #include "translate-all.h"
30 static pthread_mutex_t mmap_mutex
= PTHREAD_MUTEX_INITIALIZER
;
31 static __thread
int mmap_lock_count
;
35 if (mmap_lock_count
++ == 0) {
36 pthread_mutex_lock(&mmap_mutex
);
40 void mmap_unlock(void)
42 if (--mmap_lock_count
== 0) {
43 pthread_mutex_unlock(&mmap_mutex
);
47 /* Grab lock to make sure things are in a consistent state after fork(). */
48 void mmap_fork_start(void)
52 pthread_mutex_lock(&mmap_mutex
);
55 void mmap_fork_end(int child
)
58 pthread_mutex_init(&mmap_mutex
, NULL
);
60 pthread_mutex_unlock(&mmap_mutex
);
63 /* NOTE: all the constants are the HOST ones, but addresses are target. */
64 int target_mprotect(abi_ulong start
, abi_ulong len
, int prot
)
66 abi_ulong end
, host_start
, host_end
, addr
;
70 printf("mprotect: start=0x" TARGET_ABI_FMT_lx
71 "len=0x" TARGET_ABI_FMT_lx
" prot=%c%c%c\n", start
, len
,
72 prot
& PROT_READ
? 'r' : '-',
73 prot
& PROT_WRITE
? 'w' : '-',
74 prot
& PROT_EXEC
? 'x' : '-');
77 if ((start
& ~TARGET_PAGE_MASK
) != 0)
79 len
= TARGET_PAGE_ALIGN(len
);
83 prot
&= PROT_READ
| PROT_WRITE
| PROT_EXEC
;
88 host_start
= start
& qemu_host_page_mask
;
89 host_end
= HOST_PAGE_ALIGN(end
);
90 if (start
> host_start
) {
91 /* handle host page containing start */
93 for(addr
= host_start
; addr
< start
; addr
+= TARGET_PAGE_SIZE
) {
94 prot1
|= page_get_flags(addr
);
96 if (host_end
== host_start
+ qemu_host_page_size
) {
97 for(addr
= end
; addr
< host_end
; addr
+= TARGET_PAGE_SIZE
) {
98 prot1
|= page_get_flags(addr
);
102 ret
= mprotect(g2h(host_start
), qemu_host_page_size
, prot1
& PAGE_BITS
);
105 host_start
+= qemu_host_page_size
;
107 if (end
< host_end
) {
109 for(addr
= end
; addr
< host_end
; addr
+= TARGET_PAGE_SIZE
) {
110 prot1
|= page_get_flags(addr
);
112 ret
= mprotect(g2h(host_end
- qemu_host_page_size
), qemu_host_page_size
,
116 host_end
-= qemu_host_page_size
;
119 /* handle the pages in the middle */
120 if (host_start
< host_end
) {
121 ret
= mprotect(g2h(host_start
), host_end
- host_start
, prot
);
125 page_set_flags(start
, start
+ len
, prot
| PAGE_VALID
);
133 /* map an incomplete host page */
134 static int mmap_frag(abi_ulong real_start
,
135 abi_ulong start
, abi_ulong end
,
136 int prot
, int flags
, int fd
, abi_ulong offset
)
138 abi_ulong real_end
, addr
;
142 real_end
= real_start
+ qemu_host_page_size
;
143 host_start
= g2h(real_start
);
145 /* get the protection of the target pages outside the mapping */
147 for(addr
= real_start
; addr
< real_end
; addr
++) {
148 if (addr
< start
|| addr
>= end
)
149 prot1
|= page_get_flags(addr
);
153 /* no page was there, so we allocate one */
154 void *p
= mmap(host_start
, qemu_host_page_size
, prot
,
155 flags
| MAP_ANONYMOUS
, -1, 0);
162 prot_new
= prot
| prot1
;
163 if (!(flags
& MAP_ANONYMOUS
)) {
164 /* msync() won't work here, so we return an error if write is
165 possible while it is a shared mapping */
166 if ((flags
& MAP_TYPE
) == MAP_SHARED
&&
170 /* adjust protection to be able to read */
171 if (!(prot1
& PROT_WRITE
))
172 mprotect(host_start
, qemu_host_page_size
, prot1
| PROT_WRITE
);
174 /* read the corresponding file data */
175 if (pread(fd
, g2h(start
), end
- start
, offset
) == -1)
178 /* put final protection */
179 if (prot_new
!= (prot1
| PROT_WRITE
))
180 mprotect(host_start
, qemu_host_page_size
, prot_new
);
182 if (prot_new
!= prot1
) {
183 mprotect(host_start
, qemu_host_page_size
, prot_new
);
185 if (prot_new
& PROT_WRITE
) {
186 memset(g2h(start
), 0, end
- start
);
192 #if HOST_LONG_BITS == 64 && TARGET_ABI_BITS == 64
193 # define TASK_UNMAPPED_BASE (1ul << 38)
194 #elif defined(__CYGWIN__)
195 /* Cygwin doesn't have a whole lot of address space. */
196 # define TASK_UNMAPPED_BASE 0x18000000
198 # define TASK_UNMAPPED_BASE 0x40000000
200 abi_ulong mmap_next_start
= TASK_UNMAPPED_BASE
;
202 unsigned long last_brk
;
204 /* Subroutine of mmap_find_vma, used when we have pre-allocated a chunk
205 of guest address space. */
206 static abi_ulong
mmap_find_vma_reserved(abi_ulong start
, abi_ulong size
)
213 if (size
> reserved_va
) {
214 return (abi_ulong
)-1;
217 size
= HOST_PAGE_ALIGN(size
);
218 end_addr
= start
+ size
;
219 if (end_addr
> reserved_va
) {
220 end_addr
= reserved_va
;
222 addr
= end_addr
- qemu_host_page_size
;
225 if (addr
> end_addr
) {
227 return (abi_ulong
)-1;
229 end_addr
= reserved_va
;
230 addr
= end_addr
- qemu_host_page_size
;
234 prot
= page_get_flags(addr
);
238 if (addr
+ size
== end_addr
) {
241 addr
-= qemu_host_page_size
;
244 if (start
== mmap_next_start
) {
245 mmap_next_start
= addr
;
252 * Find and reserve a free memory area of size 'size'. The search
254 * It must be called with mmap_lock() held.
255 * Return -1 if error.
257 abi_ulong
mmap_find_vma(abi_ulong start
, abi_ulong size
)
263 /* If 'start' == 0, then a default start address is used. */
265 start
= mmap_next_start
;
267 start
&= qemu_host_page_mask
;
270 size
= HOST_PAGE_ALIGN(size
);
273 return mmap_find_vma_reserved(start
, size
);
277 wrapped
= repeat
= 0;
280 for (;; prev
= ptr
) {
282 * Reserve needed memory area to avoid a race.
283 * It should be discarded using:
284 * - mmap() with MAP_FIXED flag
285 * - mremap() with MREMAP_FIXED flag
286 * - shmat() with SHM_REMAP flag
288 ptr
= mmap(g2h(addr
), size
, PROT_NONE
,
289 MAP_ANONYMOUS
|MAP_PRIVATE
|MAP_NORESERVE
, -1, 0);
291 /* ENOMEM, if host address space has no memory */
292 if (ptr
== MAP_FAILED
) {
293 return (abi_ulong
)-1;
296 /* Count the number of sequential returns of the same address.
297 This is used to modify the search algorithm below. */
298 repeat
= (ptr
== prev
? repeat
+ 1 : 0);
300 if (h2g_valid(ptr
+ size
- 1)) {
303 if ((addr
& ~TARGET_PAGE_MASK
) == 0) {
305 if (start
== mmap_next_start
&& addr
>= TASK_UNMAPPED_BASE
) {
306 mmap_next_start
= addr
+ size
;
311 /* The address is not properly aligned for the target. */
314 /* Assume the result that the kernel gave us is the
315 first with enough free space, so start again at the
316 next higher target page. */
317 addr
= TARGET_PAGE_ALIGN(addr
);
320 /* Sometimes the kernel decides to perform the allocation
321 at the top end of memory instead. */
322 addr
&= TARGET_PAGE_MASK
;
325 /* Start over at low memory. */
329 /* Fail. This unaligned block must the last. */
334 /* Since the result the kernel gave didn't fit, start
335 again at low memory. If any repetition, fail. */
336 addr
= (repeat
? -1 : 0);
339 /* Unmap and try again. */
342 /* ENOMEM if we checked the whole of the target address space. */
343 if (addr
== (abi_ulong
)-1) {
344 return (abi_ulong
)-1;
345 } else if (addr
== 0) {
347 return (abi_ulong
)-1;
350 /* Don't actually use 0 when wrapping, instead indicate
351 that we'd truly like an allocation in low memory. */
352 addr
= (mmap_min_addr
> TARGET_PAGE_SIZE
353 ? TARGET_PAGE_ALIGN(mmap_min_addr
)
355 } else if (wrapped
&& addr
>= start
) {
356 return (abi_ulong
)-1;
361 /* NOTE: all the constants are the HOST ones */
362 abi_long
target_mmap(abi_ulong start
, abi_ulong len
, int prot
,
363 int flags
, int fd
, abi_ulong offset
)
365 abi_ulong ret
, end
, real_start
, real_end
, retaddr
, host_offset
, host_len
;
370 printf("mmap: start=0x" TARGET_ABI_FMT_lx
371 " len=0x" TARGET_ABI_FMT_lx
" prot=%c%c%c flags=",
373 prot
& PROT_READ
? 'r' : '-',
374 prot
& PROT_WRITE
? 'w' : '-',
375 prot
& PROT_EXEC
? 'x' : '-');
376 if (flags
& MAP_FIXED
)
377 printf("MAP_FIXED ");
378 if (flags
& MAP_ANONYMOUS
)
380 switch(flags
& MAP_TYPE
) {
382 printf("MAP_PRIVATE ");
385 printf("MAP_SHARED ");
388 printf("[MAP_TYPE=0x%x] ", flags
& MAP_TYPE
);
391 printf("fd=%d offset=" TARGET_ABI_FMT_lx
"\n", fd
, offset
);
395 if (offset
& ~TARGET_PAGE_MASK
) {
400 len
= TARGET_PAGE_ALIGN(len
);
403 real_start
= start
& qemu_host_page_mask
;
404 host_offset
= offset
& qemu_host_page_mask
;
406 /* If the user is asking for the kernel to find a location, do that
407 before we truncate the length for mapping files below. */
408 if (!(flags
& MAP_FIXED
)) {
409 host_len
= len
+ offset
- host_offset
;
410 host_len
= HOST_PAGE_ALIGN(host_len
);
411 start
= mmap_find_vma(real_start
, host_len
);
412 if (start
== (abi_ulong
)-1) {
418 /* When mapping files into a memory area larger than the file, accesses
419 to pages beyond the file size will cause a SIGBUS.
421 For example, if mmaping a file of 100 bytes on a host with 4K pages
422 emulating a target with 8K pages, the target expects to be able to
423 access the first 8K. But the host will trap us on any access beyond
426 When emulating a target with a larger page-size than the hosts, we
427 may need to truncate file maps at EOF and add extra anonymous pages
428 up to the targets page boundary. */
430 if ((qemu_real_host_page_size
< TARGET_PAGE_SIZE
)
431 && !(flags
& MAP_ANONYMOUS
)) {
434 if (fstat (fd
, &sb
) == -1)
437 /* Are we trying to create a map beyond EOF?. */
438 if (offset
+ len
> sb
.st_size
) {
439 /* If so, truncate the file map at eof aligned with
440 the hosts real pagesize. Additional anonymous maps
441 will be created beyond EOF. */
442 len
= REAL_HOST_PAGE_ALIGN(sb
.st_size
- offset
);
446 if (!(flags
& MAP_FIXED
)) {
447 unsigned long host_start
;
450 host_len
= len
+ offset
- host_offset
;
451 host_len
= HOST_PAGE_ALIGN(host_len
);
453 /* Note: we prefer to control the mapping address. It is
454 especially important if qemu_host_page_size >
455 qemu_real_host_page_size */
456 p
= mmap(g2h(start
), host_len
, prot
,
457 flags
| MAP_FIXED
| MAP_ANONYMOUS
, -1, 0);
460 /* update start so that it points to the file position at 'offset' */
461 host_start
= (unsigned long)p
;
462 if (!(flags
& MAP_ANONYMOUS
)) {
463 p
= mmap(g2h(start
), len
, prot
,
464 flags
| MAP_FIXED
, fd
, host_offset
);
465 if (p
== MAP_FAILED
) {
466 munmap(g2h(start
), host_len
);
469 host_start
+= offset
- host_offset
;
471 start
= h2g(host_start
);
473 if (start
& ~TARGET_PAGE_MASK
) {
478 real_end
= HOST_PAGE_ALIGN(end
);
481 * Test if requested memory area fits target address space
482 * It can fail only on 64-bit host with 32-bit target.
483 * On any other target/host host mmap() handles this error correctly.
485 if ((unsigned long)start
+ len
- 1 > (abi_ulong
) -1) {
490 /* worst case: we cannot map the file because the offset is not
491 aligned, so we read it */
492 if (!(flags
& MAP_ANONYMOUS
) &&
493 (offset
& ~qemu_host_page_mask
) != (start
& ~qemu_host_page_mask
)) {
494 /* msync() won't work here, so we return an error if write is
495 possible while it is a shared mapping */
496 if ((flags
& MAP_TYPE
) == MAP_SHARED
&&
497 (prot
& PROT_WRITE
)) {
501 retaddr
= target_mmap(start
, len
, prot
| PROT_WRITE
,
502 MAP_FIXED
| MAP_PRIVATE
| MAP_ANONYMOUS
,
506 if (pread(fd
, g2h(start
), len
, offset
) == -1)
508 if (!(prot
& PROT_WRITE
)) {
509 ret
= target_mprotect(start
, len
, prot
);
515 /* handle the start of the mapping */
516 if (start
> real_start
) {
517 if (real_end
== real_start
+ qemu_host_page_size
) {
518 /* one single host page */
519 ret
= mmap_frag(real_start
, start
, end
,
520 prot
, flags
, fd
, offset
);
525 ret
= mmap_frag(real_start
, start
, real_start
+ qemu_host_page_size
,
526 prot
, flags
, fd
, offset
);
529 real_start
+= qemu_host_page_size
;
531 /* handle the end of the mapping */
532 if (end
< real_end
) {
533 ret
= mmap_frag(real_end
- qemu_host_page_size
,
534 real_end
- qemu_host_page_size
, end
,
536 offset
+ real_end
- qemu_host_page_size
- start
);
539 real_end
-= qemu_host_page_size
;
542 /* map the middle (easier) */
543 if (real_start
< real_end
) {
545 unsigned long offset1
;
546 if (flags
& MAP_ANONYMOUS
)
549 offset1
= offset
+ real_start
- start
;
550 p
= mmap(g2h(real_start
), real_end
- real_start
,
551 prot
, flags
, fd
, offset1
);
557 page_set_flags(start
, start
+ len
, prot
| PAGE_VALID
);
560 printf("ret=0x" TARGET_ABI_FMT_lx
"\n", start
);
564 tb_invalidate_phys_range(start
, start
+ len
);
572 static void mmap_reserve(abi_ulong start
, abi_ulong size
)
574 abi_ulong real_start
;
580 real_start
= start
& qemu_host_page_mask
;
581 real_end
= HOST_PAGE_ALIGN(start
+ size
);
583 if (start
> real_start
) {
584 /* handle host page containing start */
586 for (addr
= real_start
; addr
< start
; addr
+= TARGET_PAGE_SIZE
) {
587 prot
|= page_get_flags(addr
);
589 if (real_end
== real_start
+ qemu_host_page_size
) {
590 for (addr
= end
; addr
< real_end
; addr
+= TARGET_PAGE_SIZE
) {
591 prot
|= page_get_flags(addr
);
596 real_start
+= qemu_host_page_size
;
598 if (end
< real_end
) {
600 for (addr
= end
; addr
< real_end
; addr
+= TARGET_PAGE_SIZE
) {
601 prot
|= page_get_flags(addr
);
604 real_end
-= qemu_host_page_size
;
606 if (real_start
!= real_end
) {
607 mmap(g2h(real_start
), real_end
- real_start
, PROT_NONE
,
608 MAP_FIXED
| MAP_ANONYMOUS
| MAP_PRIVATE
| MAP_NORESERVE
,
613 int target_munmap(abi_ulong start
, abi_ulong len
)
615 abi_ulong end
, real_start
, real_end
, addr
;
619 printf("munmap: start=0x" TARGET_ABI_FMT_lx
" len=0x"
620 TARGET_ABI_FMT_lx
"\n",
623 if (start
& ~TARGET_PAGE_MASK
)
625 len
= TARGET_PAGE_ALIGN(len
);
630 real_start
= start
& qemu_host_page_mask
;
631 real_end
= HOST_PAGE_ALIGN(end
);
633 if (start
> real_start
) {
634 /* handle host page containing start */
636 for(addr
= real_start
; addr
< start
; addr
+= TARGET_PAGE_SIZE
) {
637 prot
|= page_get_flags(addr
);
639 if (real_end
== real_start
+ qemu_host_page_size
) {
640 for(addr
= end
; addr
< real_end
; addr
+= TARGET_PAGE_SIZE
) {
641 prot
|= page_get_flags(addr
);
646 real_start
+= qemu_host_page_size
;
648 if (end
< real_end
) {
650 for(addr
= end
; addr
< real_end
; addr
+= TARGET_PAGE_SIZE
) {
651 prot
|= page_get_flags(addr
);
654 real_end
-= qemu_host_page_size
;
658 /* unmap what we can */
659 if (real_start
< real_end
) {
661 mmap_reserve(real_start
, real_end
- real_start
);
663 ret
= munmap(g2h(real_start
), real_end
- real_start
);
668 page_set_flags(start
, start
+ len
, 0);
669 tb_invalidate_phys_range(start
, start
+ len
);
675 abi_long
target_mremap(abi_ulong old_addr
, abi_ulong old_size
,
676 abi_ulong new_size
, unsigned long flags
,
684 if (flags
& MREMAP_FIXED
) {
685 host_addr
= (void *) syscall(__NR_mremap
, g2h(old_addr
),
690 if (reserved_va
&& host_addr
!= MAP_FAILED
) {
691 /* If new and old addresses overlap then the above mremap will
692 already have failed with EINVAL. */
693 mmap_reserve(old_addr
, old_size
);
695 } else if (flags
& MREMAP_MAYMOVE
) {
696 abi_ulong mmap_start
;
698 mmap_start
= mmap_find_vma(0, new_size
);
700 if (mmap_start
== -1) {
702 host_addr
= MAP_FAILED
;
704 host_addr
= (void *) syscall(__NR_mremap
, g2h(old_addr
),
706 flags
| MREMAP_FIXED
,
709 mmap_reserve(old_addr
, old_size
);
714 if (reserved_va
&& old_size
< new_size
) {
716 for (addr
= old_addr
+ old_size
;
717 addr
< old_addr
+ new_size
;
719 prot
|= page_get_flags(addr
);
723 host_addr
= mremap(g2h(old_addr
), old_size
, new_size
, flags
);
724 if (host_addr
!= MAP_FAILED
&& reserved_va
&& old_size
> new_size
) {
725 mmap_reserve(old_addr
+ old_size
, new_size
- old_size
);
729 host_addr
= MAP_FAILED
;
731 /* Check if address fits target address space */
732 if ((unsigned long)host_addr
+ new_size
> (abi_ulong
)-1) {
733 /* Revert mremap() changes */
734 host_addr
= mremap(g2h(old_addr
), new_size
, old_size
, flags
);
736 host_addr
= MAP_FAILED
;
740 if (host_addr
== MAP_FAILED
) {
743 new_addr
= h2g(host_addr
);
744 prot
= page_get_flags(old_addr
);
745 page_set_flags(old_addr
, old_addr
+ old_size
, 0);
746 page_set_flags(new_addr
, new_addr
+ new_size
, prot
| PAGE_VALID
);
748 tb_invalidate_phys_range(new_addr
, new_addr
+ new_size
);
753 int target_msync(abi_ulong start
, abi_ulong len
, int flags
)
757 if (start
& ~TARGET_PAGE_MASK
)
759 len
= TARGET_PAGE_ALIGN(len
);
766 start
&= qemu_host_page_mask
;
767 return msync(g2h(start
), end
- start
, flags
);