2 * mmap support for qemu
4 * Copyright (c) 2003 Fabrice Bellard
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, see <http://www.gnu.org/licenses/>.
19 #include "qemu/osdep.h"
22 #include "qemu-common.h"
23 #include "translate-all.h"
27 static pthread_mutex_t mmap_mutex
= PTHREAD_MUTEX_INITIALIZER
;
28 static __thread
int mmap_lock_count
;
32 if (mmap_lock_count
++ == 0) {
33 pthread_mutex_lock(&mmap_mutex
);
37 void mmap_unlock(void)
39 if (--mmap_lock_count
== 0) {
40 pthread_mutex_unlock(&mmap_mutex
);
44 bool have_mmap_lock(void)
46 return mmap_lock_count
> 0 ? true : false;
49 /* Grab lock to make sure things are in a consistent state after fork(). */
50 void mmap_fork_start(void)
54 pthread_mutex_lock(&mmap_mutex
);
57 void mmap_fork_end(int child
)
60 pthread_mutex_init(&mmap_mutex
, NULL
);
62 pthread_mutex_unlock(&mmap_mutex
);
65 /* NOTE: all the constants are the HOST ones, but addresses are target. */
66 int target_mprotect(abi_ulong start
, abi_ulong len
, int prot
)
68 abi_ulong end
, host_start
, host_end
, addr
;
72 printf("mprotect: start=0x" TARGET_ABI_FMT_lx
73 "len=0x" TARGET_ABI_FMT_lx
" prot=%c%c%c\n", start
, len
,
74 prot
& PROT_READ
? 'r' : '-',
75 prot
& PROT_WRITE
? 'w' : '-',
76 prot
& PROT_EXEC
? 'x' : '-');
79 if ((start
& ~TARGET_PAGE_MASK
) != 0)
81 len
= TARGET_PAGE_ALIGN(len
);
85 prot
&= PROT_READ
| PROT_WRITE
| PROT_EXEC
;
90 host_start
= start
& qemu_host_page_mask
;
91 host_end
= HOST_PAGE_ALIGN(end
);
92 if (start
> host_start
) {
93 /* handle host page containing start */
95 for(addr
= host_start
; addr
< start
; addr
+= TARGET_PAGE_SIZE
) {
96 prot1
|= page_get_flags(addr
);
98 if (host_end
== host_start
+ qemu_host_page_size
) {
99 for(addr
= end
; addr
< host_end
; addr
+= TARGET_PAGE_SIZE
) {
100 prot1
|= page_get_flags(addr
);
104 ret
= mprotect(g2h(host_start
), qemu_host_page_size
, prot1
& PAGE_BITS
);
107 host_start
+= qemu_host_page_size
;
109 if (end
< host_end
) {
111 for(addr
= end
; addr
< host_end
; addr
+= TARGET_PAGE_SIZE
) {
112 prot1
|= page_get_flags(addr
);
114 ret
= mprotect(g2h(host_end
- qemu_host_page_size
), qemu_host_page_size
,
118 host_end
-= qemu_host_page_size
;
121 /* handle the pages in the middle */
122 if (host_start
< host_end
) {
123 ret
= mprotect(g2h(host_start
), host_end
- host_start
, prot
);
127 page_set_flags(start
, start
+ len
, prot
| PAGE_VALID
);
135 /* map an incomplete host page */
136 static int mmap_frag(abi_ulong real_start
,
137 abi_ulong start
, abi_ulong end
,
138 int prot
, int flags
, int fd
, abi_ulong offset
)
140 abi_ulong real_end
, addr
;
144 real_end
= real_start
+ qemu_host_page_size
;
145 host_start
= g2h(real_start
);
147 /* get the protection of the target pages outside the mapping */
149 for(addr
= real_start
; addr
< real_end
; addr
++) {
150 if (addr
< start
|| addr
>= end
)
151 prot1
|= page_get_flags(addr
);
155 /* no page was there, so we allocate one */
156 void *p
= mmap(host_start
, qemu_host_page_size
, prot
,
157 flags
| MAP_ANONYMOUS
, -1, 0);
164 prot_new
= prot
| prot1
;
165 if (!(flags
& MAP_ANONYMOUS
)) {
166 /* msync() won't work here, so we return an error if write is
167 possible while it is a shared mapping */
168 if ((flags
& MAP_TYPE
) == MAP_SHARED
&&
172 /* adjust protection to be able to read */
173 if (!(prot1
& PROT_WRITE
))
174 mprotect(host_start
, qemu_host_page_size
, prot1
| PROT_WRITE
);
176 /* read the corresponding file data */
177 if (pread(fd
, g2h(start
), end
- start
, offset
) == -1)
180 /* put final protection */
181 if (prot_new
!= (prot1
| PROT_WRITE
))
182 mprotect(host_start
, qemu_host_page_size
, prot_new
);
184 if (prot_new
!= prot1
) {
185 mprotect(host_start
, qemu_host_page_size
, prot_new
);
187 if (prot_new
& PROT_WRITE
) {
188 memset(g2h(start
), 0, end
- start
);
194 #if HOST_LONG_BITS == 64 && TARGET_ABI_BITS == 64
195 # define TASK_UNMAPPED_BASE (1ul << 38)
196 #elif defined(__CYGWIN__)
197 /* Cygwin doesn't have a whole lot of address space. */
198 # define TASK_UNMAPPED_BASE 0x18000000
200 # define TASK_UNMAPPED_BASE 0x40000000
202 abi_ulong mmap_next_start
= TASK_UNMAPPED_BASE
;
204 unsigned long last_brk
;
206 /* Subroutine of mmap_find_vma, used when we have pre-allocated a chunk
207 of guest address space. */
208 static abi_ulong
mmap_find_vma_reserved(abi_ulong start
, abi_ulong size
)
215 if (size
> reserved_va
) {
216 return (abi_ulong
)-1;
219 size
= HOST_PAGE_ALIGN(size
);
220 end_addr
= start
+ size
;
221 if (end_addr
> reserved_va
) {
222 end_addr
= reserved_va
;
224 addr
= end_addr
- qemu_host_page_size
;
227 if (addr
> end_addr
) {
229 return (abi_ulong
)-1;
231 end_addr
= reserved_va
;
232 addr
= end_addr
- qemu_host_page_size
;
236 prot
= page_get_flags(addr
);
240 if (addr
+ size
== end_addr
) {
243 addr
-= qemu_host_page_size
;
246 if (start
== mmap_next_start
) {
247 mmap_next_start
= addr
;
254 * Find and reserve a free memory area of size 'size'. The search
256 * It must be called with mmap_lock() held.
257 * Return -1 if error.
259 abi_ulong
mmap_find_vma(abi_ulong start
, abi_ulong size
)
265 /* If 'start' == 0, then a default start address is used. */
267 start
= mmap_next_start
;
269 start
&= qemu_host_page_mask
;
272 size
= HOST_PAGE_ALIGN(size
);
275 return mmap_find_vma_reserved(start
, size
);
279 wrapped
= repeat
= 0;
282 for (;; prev
= ptr
) {
284 * Reserve needed memory area to avoid a race.
285 * It should be discarded using:
286 * - mmap() with MAP_FIXED flag
287 * - mremap() with MREMAP_FIXED flag
288 * - shmat() with SHM_REMAP flag
290 ptr
= mmap(g2h(addr
), size
, PROT_NONE
,
291 MAP_ANONYMOUS
|MAP_PRIVATE
|MAP_NORESERVE
, -1, 0);
293 /* ENOMEM, if host address space has no memory */
294 if (ptr
== MAP_FAILED
) {
295 return (abi_ulong
)-1;
298 /* Count the number of sequential returns of the same address.
299 This is used to modify the search algorithm below. */
300 repeat
= (ptr
== prev
? repeat
+ 1 : 0);
302 if (h2g_valid(ptr
+ size
- 1)) {
305 if ((addr
& ~TARGET_PAGE_MASK
) == 0) {
307 if (start
== mmap_next_start
&& addr
>= TASK_UNMAPPED_BASE
) {
308 mmap_next_start
= addr
+ size
;
313 /* The address is not properly aligned for the target. */
316 /* Assume the result that the kernel gave us is the
317 first with enough free space, so start again at the
318 next higher target page. */
319 addr
= TARGET_PAGE_ALIGN(addr
);
322 /* Sometimes the kernel decides to perform the allocation
323 at the top end of memory instead. */
324 addr
&= TARGET_PAGE_MASK
;
327 /* Start over at low memory. */
331 /* Fail. This unaligned block must the last. */
336 /* Since the result the kernel gave didn't fit, start
337 again at low memory. If any repetition, fail. */
338 addr
= (repeat
? -1 : 0);
341 /* Unmap and try again. */
344 /* ENOMEM if we checked the whole of the target address space. */
345 if (addr
== (abi_ulong
)-1) {
346 return (abi_ulong
)-1;
347 } else if (addr
== 0) {
349 return (abi_ulong
)-1;
352 /* Don't actually use 0 when wrapping, instead indicate
353 that we'd truly like an allocation in low memory. */
354 addr
= (mmap_min_addr
> TARGET_PAGE_SIZE
355 ? TARGET_PAGE_ALIGN(mmap_min_addr
)
357 } else if (wrapped
&& addr
>= start
) {
358 return (abi_ulong
)-1;
363 /* NOTE: all the constants are the HOST ones */
364 abi_long
target_mmap(abi_ulong start
, abi_ulong len
, int prot
,
365 int flags
, int fd
, abi_ulong offset
)
367 abi_ulong ret
, end
, real_start
, real_end
, retaddr
, host_offset
, host_len
;
372 printf("mmap: start=0x" TARGET_ABI_FMT_lx
373 " len=0x" TARGET_ABI_FMT_lx
" prot=%c%c%c flags=",
375 prot
& PROT_READ
? 'r' : '-',
376 prot
& PROT_WRITE
? 'w' : '-',
377 prot
& PROT_EXEC
? 'x' : '-');
378 if (flags
& MAP_FIXED
)
379 printf("MAP_FIXED ");
380 if (flags
& MAP_ANONYMOUS
)
382 switch(flags
& MAP_TYPE
) {
384 printf("MAP_PRIVATE ");
387 printf("MAP_SHARED ");
390 printf("[MAP_TYPE=0x%x] ", flags
& MAP_TYPE
);
393 printf("fd=%d offset=" TARGET_ABI_FMT_lx
"\n", fd
, offset
);
397 if (offset
& ~TARGET_PAGE_MASK
) {
402 len
= TARGET_PAGE_ALIGN(len
);
405 real_start
= start
& qemu_host_page_mask
;
406 host_offset
= offset
& qemu_host_page_mask
;
408 /* If the user is asking for the kernel to find a location, do that
409 before we truncate the length for mapping files below. */
410 if (!(flags
& MAP_FIXED
)) {
411 host_len
= len
+ offset
- host_offset
;
412 host_len
= HOST_PAGE_ALIGN(host_len
);
413 start
= mmap_find_vma(real_start
, host_len
);
414 if (start
== (abi_ulong
)-1) {
420 /* When mapping files into a memory area larger than the file, accesses
421 to pages beyond the file size will cause a SIGBUS.
423 For example, if mmaping a file of 100 bytes on a host with 4K pages
424 emulating a target with 8K pages, the target expects to be able to
425 access the first 8K. But the host will trap us on any access beyond
428 When emulating a target with a larger page-size than the hosts, we
429 may need to truncate file maps at EOF and add extra anonymous pages
430 up to the targets page boundary. */
432 if ((qemu_real_host_page_size
< TARGET_PAGE_SIZE
)
433 && !(flags
& MAP_ANONYMOUS
)) {
436 if (fstat (fd
, &sb
) == -1)
439 /* Are we trying to create a map beyond EOF?. */
440 if (offset
+ len
> sb
.st_size
) {
441 /* If so, truncate the file map at eof aligned with
442 the hosts real pagesize. Additional anonymous maps
443 will be created beyond EOF. */
444 len
= REAL_HOST_PAGE_ALIGN(sb
.st_size
- offset
);
448 if (!(flags
& MAP_FIXED
)) {
449 unsigned long host_start
;
452 host_len
= len
+ offset
- host_offset
;
453 host_len
= HOST_PAGE_ALIGN(host_len
);
455 /* Note: we prefer to control the mapping address. It is
456 especially important if qemu_host_page_size >
457 qemu_real_host_page_size */
458 p
= mmap(g2h(start
), host_len
, prot
,
459 flags
| MAP_FIXED
| MAP_ANONYMOUS
, -1, 0);
462 /* update start so that it points to the file position at 'offset' */
463 host_start
= (unsigned long)p
;
464 if (!(flags
& MAP_ANONYMOUS
)) {
465 p
= mmap(g2h(start
), len
, prot
,
466 flags
| MAP_FIXED
, fd
, host_offset
);
467 if (p
== MAP_FAILED
) {
468 munmap(g2h(start
), host_len
);
471 host_start
+= offset
- host_offset
;
473 start
= h2g(host_start
);
475 if (start
& ~TARGET_PAGE_MASK
) {
480 real_end
= HOST_PAGE_ALIGN(end
);
483 * Test if requested memory area fits target address space
484 * It can fail only on 64-bit host with 32-bit target.
485 * On any other target/host host mmap() handles this error correctly.
487 if ((unsigned long)start
+ len
- 1 > (abi_ulong
) -1) {
492 /* worst case: we cannot map the file because the offset is not
493 aligned, so we read it */
494 if (!(flags
& MAP_ANONYMOUS
) &&
495 (offset
& ~qemu_host_page_mask
) != (start
& ~qemu_host_page_mask
)) {
496 /* msync() won't work here, so we return an error if write is
497 possible while it is a shared mapping */
498 if ((flags
& MAP_TYPE
) == MAP_SHARED
&&
499 (prot
& PROT_WRITE
)) {
503 retaddr
= target_mmap(start
, len
, prot
| PROT_WRITE
,
504 MAP_FIXED
| MAP_PRIVATE
| MAP_ANONYMOUS
,
508 if (pread(fd
, g2h(start
), len
, offset
) == -1)
510 if (!(prot
& PROT_WRITE
)) {
511 ret
= target_mprotect(start
, len
, prot
);
517 /* handle the start of the mapping */
518 if (start
> real_start
) {
519 if (real_end
== real_start
+ qemu_host_page_size
) {
520 /* one single host page */
521 ret
= mmap_frag(real_start
, start
, end
,
522 prot
, flags
, fd
, offset
);
527 ret
= mmap_frag(real_start
, start
, real_start
+ qemu_host_page_size
,
528 prot
, flags
, fd
, offset
);
531 real_start
+= qemu_host_page_size
;
533 /* handle the end of the mapping */
534 if (end
< real_end
) {
535 ret
= mmap_frag(real_end
- qemu_host_page_size
,
536 real_end
- qemu_host_page_size
, end
,
538 offset
+ real_end
- qemu_host_page_size
- start
);
541 real_end
-= qemu_host_page_size
;
544 /* map the middle (easier) */
545 if (real_start
< real_end
) {
547 unsigned long offset1
;
548 if (flags
& MAP_ANONYMOUS
)
551 offset1
= offset
+ real_start
- start
;
552 p
= mmap(g2h(real_start
), real_end
- real_start
,
553 prot
, flags
, fd
, offset1
);
559 page_set_flags(start
, start
+ len
, prot
| PAGE_VALID
);
562 printf("ret=0x" TARGET_ABI_FMT_lx
"\n", start
);
566 tb_invalidate_phys_range(start
, start
+ len
);
574 static void mmap_reserve(abi_ulong start
, abi_ulong size
)
576 abi_ulong real_start
;
582 real_start
= start
& qemu_host_page_mask
;
583 real_end
= HOST_PAGE_ALIGN(start
+ size
);
585 if (start
> real_start
) {
586 /* handle host page containing start */
588 for (addr
= real_start
; addr
< start
; addr
+= TARGET_PAGE_SIZE
) {
589 prot
|= page_get_flags(addr
);
591 if (real_end
== real_start
+ qemu_host_page_size
) {
592 for (addr
= end
; addr
< real_end
; addr
+= TARGET_PAGE_SIZE
) {
593 prot
|= page_get_flags(addr
);
598 real_start
+= qemu_host_page_size
;
600 if (end
< real_end
) {
602 for (addr
= end
; addr
< real_end
; addr
+= TARGET_PAGE_SIZE
) {
603 prot
|= page_get_flags(addr
);
606 real_end
-= qemu_host_page_size
;
608 if (real_start
!= real_end
) {
609 mmap(g2h(real_start
), real_end
- real_start
, PROT_NONE
,
610 MAP_FIXED
| MAP_ANONYMOUS
| MAP_PRIVATE
| MAP_NORESERVE
,
615 int target_munmap(abi_ulong start
, abi_ulong len
)
617 abi_ulong end
, real_start
, real_end
, addr
;
621 printf("munmap: start=0x" TARGET_ABI_FMT_lx
" len=0x"
622 TARGET_ABI_FMT_lx
"\n",
625 if (start
& ~TARGET_PAGE_MASK
)
627 len
= TARGET_PAGE_ALIGN(len
);
632 real_start
= start
& qemu_host_page_mask
;
633 real_end
= HOST_PAGE_ALIGN(end
);
635 if (start
> real_start
) {
636 /* handle host page containing start */
638 for(addr
= real_start
; addr
< start
; addr
+= TARGET_PAGE_SIZE
) {
639 prot
|= page_get_flags(addr
);
641 if (real_end
== real_start
+ qemu_host_page_size
) {
642 for(addr
= end
; addr
< real_end
; addr
+= TARGET_PAGE_SIZE
) {
643 prot
|= page_get_flags(addr
);
648 real_start
+= qemu_host_page_size
;
650 if (end
< real_end
) {
652 for(addr
= end
; addr
< real_end
; addr
+= TARGET_PAGE_SIZE
) {
653 prot
|= page_get_flags(addr
);
656 real_end
-= qemu_host_page_size
;
660 /* unmap what we can */
661 if (real_start
< real_end
) {
663 mmap_reserve(real_start
, real_end
- real_start
);
665 ret
= munmap(g2h(real_start
), real_end
- real_start
);
670 page_set_flags(start
, start
+ len
, 0);
671 tb_invalidate_phys_range(start
, start
+ len
);
677 abi_long
target_mremap(abi_ulong old_addr
, abi_ulong old_size
,
678 abi_ulong new_size
, unsigned long flags
,
686 if (flags
& MREMAP_FIXED
) {
687 host_addr
= mremap(g2h(old_addr
), old_size
, new_size
,
688 flags
, g2h(new_addr
));
690 if (reserved_va
&& host_addr
!= MAP_FAILED
) {
691 /* If new and old addresses overlap then the above mremap will
692 already have failed with EINVAL. */
693 mmap_reserve(old_addr
, old_size
);
695 } else if (flags
& MREMAP_MAYMOVE
) {
696 abi_ulong mmap_start
;
698 mmap_start
= mmap_find_vma(0, new_size
);
700 if (mmap_start
== -1) {
702 host_addr
= MAP_FAILED
;
704 host_addr
= mremap(g2h(old_addr
), old_size
, new_size
,
705 flags
| MREMAP_FIXED
, g2h(mmap_start
));
707 mmap_reserve(old_addr
, old_size
);
712 if (reserved_va
&& old_size
< new_size
) {
714 for (addr
= old_addr
+ old_size
;
715 addr
< old_addr
+ new_size
;
717 prot
|= page_get_flags(addr
);
721 host_addr
= mremap(g2h(old_addr
), old_size
, new_size
, flags
);
722 if (host_addr
!= MAP_FAILED
&& reserved_va
&& old_size
> new_size
) {
723 mmap_reserve(old_addr
+ old_size
, new_size
- old_size
);
727 host_addr
= MAP_FAILED
;
729 /* Check if address fits target address space */
730 if ((unsigned long)host_addr
+ new_size
> (abi_ulong
)-1) {
731 /* Revert mremap() changes */
732 host_addr
= mremap(g2h(old_addr
), new_size
, old_size
, flags
);
734 host_addr
= MAP_FAILED
;
738 if (host_addr
== MAP_FAILED
) {
741 new_addr
= h2g(host_addr
);
742 prot
= page_get_flags(old_addr
);
743 page_set_flags(old_addr
, old_addr
+ old_size
, 0);
744 page_set_flags(new_addr
, new_addr
+ new_size
, prot
| PAGE_VALID
);
746 tb_invalidate_phys_range(new_addr
, new_addr
+ new_size
);
751 int target_msync(abi_ulong start
, abi_ulong len
, int flags
)
755 if (start
& ~TARGET_PAGE_MASK
)
757 len
= TARGET_PAGE_ALIGN(len
);
764 start
&= qemu_host_page_mask
;
765 return msync(g2h(start
), end
- start
, flags
);