2 * mmap support for qemu
4 * Copyright (c) 2003 Fabrice Bellard
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, see <http://www.gnu.org/licenses/>.
25 #include <sys/types.h>
28 #include <linux/mman.h>
29 #include <linux/unistd.h>
32 #include "qemu-common.h"
36 #if defined(CONFIG_USE_NPTL)
37 static pthread_mutex_t mmap_mutex
= PTHREAD_MUTEX_INITIALIZER
;
38 static __thread
int mmap_lock_count
;
42 if (mmap_lock_count
++ == 0) {
43 pthread_mutex_lock(&mmap_mutex
);
47 void mmap_unlock(void)
49 if (--mmap_lock_count
== 0) {
50 pthread_mutex_unlock(&mmap_mutex
);
54 /* Grab lock to make sure things are in a consistent state after fork(). */
55 void mmap_fork_start(void)
59 pthread_mutex_lock(&mmap_mutex
);
62 void mmap_fork_end(int child
)
65 pthread_mutex_init(&mmap_mutex
, NULL
);
67 pthread_mutex_unlock(&mmap_mutex
);
70 /* We aren't threadsafe to start with, so no need to worry about locking. */
75 void mmap_unlock(void)
80 void *qemu_vmalloc(size_t size
)
85 /* Use map and mark the pages as used. */
86 p
= mmap(NULL
, size
, PROT_READ
| PROT_WRITE
,
87 MAP_PRIVATE
| MAP_ANONYMOUS
, -1, 0);
89 addr
= (unsigned long)p
;
90 if (addr
== (target_ulong
) addr
) {
91 /* Allocated region overlaps guest address space.
93 page_set_flags(addr
& TARGET_PAGE_MASK
, TARGET_PAGE_ALIGN(addr
+ size
),
101 void *qemu_malloc(size_t size
)
105 p
= qemu_vmalloc(size
);
110 /* We use map, which is always zero initialized. */
111 void * qemu_mallocz(size_t size
)
113 return qemu_malloc(size
);
116 void qemu_free(void *ptr
)
118 /* FIXME: We should unmark the reserved pages here. However this gets
119 complicated when one target page spans multiple host pages, so we
122 p
= (size_t *)((char *)ptr
- 16);
126 void *qemu_realloc(void *ptr
, size_t size
)
128 size_t old_size
, copy
;
132 return qemu_malloc(size
);
133 old_size
= *(size_t *)((char *)ptr
- 16);
134 copy
= old_size
< size
? old_size
: size
;
135 new_ptr
= qemu_malloc(size
);
136 memcpy(new_ptr
, ptr
, copy
);
141 /* NOTE: all the constants are the HOST ones, but addresses are target. */
142 int target_mprotect(abi_ulong start
, abi_ulong len
, int prot
)
144 abi_ulong end
, host_start
, host_end
, addr
;
148 printf("mprotect: start=0x" TARGET_ABI_FMT_lx
149 "len=0x" TARGET_ABI_FMT_lx
" prot=%c%c%c\n", start
, len
,
150 prot
& PROT_READ
? 'r' : '-',
151 prot
& PROT_WRITE
? 'w' : '-',
152 prot
& PROT_EXEC
? 'x' : '-');
155 if ((start
& ~TARGET_PAGE_MASK
) != 0)
157 len
= TARGET_PAGE_ALIGN(len
);
161 prot
&= PROT_READ
| PROT_WRITE
| PROT_EXEC
;
166 host_start
= start
& qemu_host_page_mask
;
167 host_end
= HOST_PAGE_ALIGN(end
);
168 if (start
> host_start
) {
169 /* handle host page containing start */
171 for(addr
= host_start
; addr
< start
; addr
+= TARGET_PAGE_SIZE
) {
172 prot1
|= page_get_flags(addr
);
174 if (host_end
== host_start
+ qemu_host_page_size
) {
175 for(addr
= end
; addr
< host_end
; addr
+= TARGET_PAGE_SIZE
) {
176 prot1
|= page_get_flags(addr
);
180 ret
= mprotect(g2h(host_start
), qemu_host_page_size
, prot1
& PAGE_BITS
);
183 host_start
+= qemu_host_page_size
;
185 if (end
< host_end
) {
187 for(addr
= end
; addr
< host_end
; addr
+= TARGET_PAGE_SIZE
) {
188 prot1
|= page_get_flags(addr
);
190 ret
= mprotect(g2h(host_end
- qemu_host_page_size
), qemu_host_page_size
,
194 host_end
-= qemu_host_page_size
;
197 /* handle the pages in the middle */
198 if (host_start
< host_end
) {
199 ret
= mprotect(g2h(host_start
), host_end
- host_start
, prot
);
203 page_set_flags(start
, start
+ len
, prot
| PAGE_VALID
);
211 /* map an incomplete host page */
212 static int mmap_frag(abi_ulong real_start
,
213 abi_ulong start
, abi_ulong end
,
214 int prot
, int flags
, int fd
, abi_ulong offset
)
216 abi_ulong real_end
, addr
;
220 real_end
= real_start
+ qemu_host_page_size
;
221 host_start
= g2h(real_start
);
223 /* get the protection of the target pages outside the mapping */
225 for(addr
= real_start
; addr
< real_end
; addr
++) {
226 if (addr
< start
|| addr
>= end
)
227 prot1
|= page_get_flags(addr
);
231 /* no page was there, so we allocate one */
232 void *p
= mmap(host_start
, qemu_host_page_size
, prot
,
233 flags
| MAP_ANONYMOUS
, -1, 0);
240 prot_new
= prot
| prot1
;
241 if (!(flags
& MAP_ANONYMOUS
)) {
242 /* msync() won't work here, so we return an error if write is
243 possible while it is a shared mapping */
244 if ((flags
& MAP_TYPE
) == MAP_SHARED
&&
248 /* adjust protection to be able to read */
249 if (!(prot1
& PROT_WRITE
))
250 mprotect(host_start
, qemu_host_page_size
, prot1
| PROT_WRITE
);
252 /* read the corresponding file data */
253 if (pread(fd
, g2h(start
), end
- start
, offset
) == -1)
256 /* put final protection */
257 if (prot_new
!= (prot1
| PROT_WRITE
))
258 mprotect(host_start
, qemu_host_page_size
, prot_new
);
260 /* just update the protection */
261 if (prot_new
!= prot1
) {
262 mprotect(host_start
, qemu_host_page_size
, prot_new
);
268 #if defined(__CYGWIN__)
269 /* Cygwin doesn't have a whole lot of address space. */
270 static abi_ulong mmap_next_start
= 0x18000000;
272 static abi_ulong mmap_next_start
= 0x40000000;
275 unsigned long last_brk
;
278 * Find and reserve a free memory area of size 'size'. The search
280 * It must be called with mmap_lock() held.
281 * Return -1 if error.
283 abi_ulong
mmap_find_vma(abi_ulong start
, abi_ulong size
)
288 size
= HOST_PAGE_ALIGN(size
);
289 start
&= qemu_host_page_mask
;
291 /* If 'start' == 0, then a default start address is used. */
293 start
= mmap_next_start
;
299 * Reserve needed memory area to avoid a race.
300 * It should be discarded using:
301 * - mmap() with MAP_FIXED flag
302 * - mremap() with MREMAP_FIXED flag
303 * - shmat() with SHM_REMAP flag
305 ptr
= mmap((void *)(unsigned long)addr
, size
, PROT_NONE
,
306 MAP_ANONYMOUS
|MAP_PRIVATE
|MAP_NORESERVE
, -1, 0);
308 /* ENOMEM, if host address space has no memory */
309 if (ptr
== MAP_FAILED
)
310 return (abi_ulong
)-1;
312 /* If address fits target address space we've found what we need */
313 if ((unsigned long)ptr
+ size
- 1 <= (abi_ulong
)-1)
316 /* Unmap and try again with new page */
318 addr
+= qemu_host_page_size
;
320 /* ENOMEM if we check whole of target address space */
322 return (abi_ulong
)-1;
325 /* Update default start address */
326 if (start
== mmap_next_start
)
327 mmap_next_start
= (unsigned long)ptr
+ size
;
332 /* NOTE: all the constants are the HOST ones */
333 abi_long
target_mmap(abi_ulong start
, abi_ulong len
, int prot
,
334 int flags
, int fd
, abi_ulong offset
)
336 abi_ulong ret
, end
, real_start
, real_end
, retaddr
, host_offset
, host_len
;
337 unsigned long host_start
;
342 printf("mmap: start=0x" TARGET_ABI_FMT_lx
343 " len=0x" TARGET_ABI_FMT_lx
" prot=%c%c%c flags=",
345 prot
& PROT_READ
? 'r' : '-',
346 prot
& PROT_WRITE
? 'w' : '-',
347 prot
& PROT_EXEC
? 'x' : '-');
348 if (flags
& MAP_FIXED
)
349 printf("MAP_FIXED ");
350 if (flags
& MAP_ANONYMOUS
)
352 switch(flags
& MAP_TYPE
) {
354 printf("MAP_PRIVATE ");
357 printf("MAP_SHARED ");
360 printf("[MAP_TYPE=0x%x] ", flags
& MAP_TYPE
);
363 printf("fd=%d offset=" TARGET_ABI_FMT_lx
"\n", fd
, offset
);
367 if (offset
& ~TARGET_PAGE_MASK
) {
372 len
= TARGET_PAGE_ALIGN(len
);
375 real_start
= start
& qemu_host_page_mask
;
377 /* When mapping files into a memory area larger than the file, accesses
378 to pages beyond the file size will cause a SIGBUS.
380 For example, if mmaping a file of 100 bytes on a host with 4K pages
381 emulating a target with 8K pages, the target expects to be able to
382 access the first 8K. But the host will trap us on any access beyond
385 When emulating a target with a larger page-size than the hosts, we
386 may need to truncate file maps at EOF and add extra anonymous pages
387 up to the targets page boundary. */
389 if ((qemu_real_host_page_size
< TARGET_PAGE_SIZE
)
390 && !(flags
& MAP_ANONYMOUS
)) {
393 if (fstat (fd
, &sb
) == -1)
396 /* Are we trying to create a map beyond EOF?. */
397 if (offset
+ len
> sb
.st_size
) {
398 /* If so, truncate the file map at eof aligned with
399 the hosts real pagesize. Additional anonymous maps
400 will be created beyond EOF. */
401 len
= (sb
.st_size
- offset
);
402 len
+= qemu_real_host_page_size
- 1;
403 len
&= ~(qemu_real_host_page_size
- 1);
407 if (!(flags
& MAP_FIXED
)) {
408 abi_ulong mmap_start
;
410 host_offset
= offset
& qemu_host_page_mask
;
411 host_len
= len
+ offset
- host_offset
;
412 host_len
= HOST_PAGE_ALIGN(host_len
);
413 mmap_start
= mmap_find_vma(real_start
, host_len
);
414 if (mmap_start
== (abi_ulong
)-1) {
418 /* Note: we prefer to control the mapping address. It is
419 especially important if qemu_host_page_size >
420 qemu_real_host_page_size */
421 p
= mmap(g2h(mmap_start
),
422 host_len
, prot
, flags
| MAP_FIXED
| MAP_ANONYMOUS
, -1, 0);
425 /* update start so that it points to the file position at 'offset' */
426 host_start
= (unsigned long)p
;
427 if (!(flags
& MAP_ANONYMOUS
)) {
428 p
= mmap(g2h(mmap_start
), len
, prot
,
429 flags
| MAP_FIXED
, fd
, host_offset
);
430 host_start
+= offset
- host_offset
;
432 start
= h2g(host_start
);
437 if (start
& ~TARGET_PAGE_MASK
) {
442 real_end
= HOST_PAGE_ALIGN(end
);
445 * Test if requested memory area fits target address space
446 * It can fail only on 64-bit host with 32-bit target.
447 * On any other target/host host mmap() handles this error correctly.
449 if ((unsigned long)start
+ len
- 1 > (abi_ulong
) -1) {
454 for(addr
= real_start
; addr
< real_end
; addr
+= TARGET_PAGE_SIZE
) {
455 flg
= page_get_flags(addr
);
456 if (flg
& PAGE_RESERVED
) {
462 /* worst case: we cannot map the file because the offset is not
463 aligned, so we read it */
464 if (!(flags
& MAP_ANONYMOUS
) &&
465 (offset
& ~qemu_host_page_mask
) != (start
& ~qemu_host_page_mask
)) {
466 /* msync() won't work here, so we return an error if write is
467 possible while it is a shared mapping */
468 if ((flags
& MAP_TYPE
) == MAP_SHARED
&&
469 (prot
& PROT_WRITE
)) {
473 retaddr
= target_mmap(start
, len
, prot
| PROT_WRITE
,
474 MAP_FIXED
| MAP_PRIVATE
| MAP_ANONYMOUS
,
478 if (pread(fd
, g2h(start
), len
, offset
) == -1)
480 if (!(prot
& PROT_WRITE
)) {
481 ret
= target_mprotect(start
, len
, prot
);
490 /* handle the start of the mapping */
491 if (start
> real_start
) {
492 if (real_end
== real_start
+ qemu_host_page_size
) {
493 /* one single host page */
494 ret
= mmap_frag(real_start
, start
, end
,
495 prot
, flags
, fd
, offset
);
500 ret
= mmap_frag(real_start
, start
, real_start
+ qemu_host_page_size
,
501 prot
, flags
, fd
, offset
);
504 real_start
+= qemu_host_page_size
;
506 /* handle the end of the mapping */
507 if (end
< real_end
) {
508 ret
= mmap_frag(real_end
- qemu_host_page_size
,
509 real_end
- qemu_host_page_size
, real_end
,
511 offset
+ real_end
- qemu_host_page_size
- start
);
514 real_end
-= qemu_host_page_size
;
517 /* map the middle (easier) */
518 if (real_start
< real_end
) {
520 unsigned long offset1
;
521 if (flags
& MAP_ANONYMOUS
)
524 offset1
= offset
+ real_start
- start
;
525 p
= mmap(g2h(real_start
), real_end
- real_start
,
526 prot
, flags
, fd
, offset1
);
532 page_set_flags(start
, start
+ len
, prot
| PAGE_VALID
);
535 printf("ret=0x" TARGET_ABI_FMT_lx
"\n", start
);
546 int target_munmap(abi_ulong start
, abi_ulong len
)
548 abi_ulong end
, real_start
, real_end
, addr
;
552 printf("munmap: start=0x" TARGET_ABI_FMT_lx
" len=0x"
553 TARGET_ABI_FMT_lx
"\n",
556 if (start
& ~TARGET_PAGE_MASK
)
558 len
= TARGET_PAGE_ALIGN(len
);
563 real_start
= start
& qemu_host_page_mask
;
564 real_end
= HOST_PAGE_ALIGN(end
);
566 if (start
> real_start
) {
567 /* handle host page containing start */
569 for(addr
= real_start
; addr
< start
; addr
+= TARGET_PAGE_SIZE
) {
570 prot
|= page_get_flags(addr
);
572 if (real_end
== real_start
+ qemu_host_page_size
) {
573 for(addr
= end
; addr
< real_end
; addr
+= TARGET_PAGE_SIZE
) {
574 prot
|= page_get_flags(addr
);
579 real_start
+= qemu_host_page_size
;
581 if (end
< real_end
) {
583 for(addr
= end
; addr
< real_end
; addr
+= TARGET_PAGE_SIZE
) {
584 prot
|= page_get_flags(addr
);
587 real_end
-= qemu_host_page_size
;
591 /* unmap what we can */
592 if (real_start
< real_end
) {
593 ret
= munmap(g2h(real_start
), real_end
- real_start
);
597 page_set_flags(start
, start
+ len
, 0);
602 abi_long
target_mremap(abi_ulong old_addr
, abi_ulong old_size
,
603 abi_ulong new_size
, unsigned long flags
,
611 if (flags
& MREMAP_FIXED
)
612 host_addr
= (void *) syscall(__NR_mremap
, g2h(old_addr
),
616 else if (flags
& MREMAP_MAYMOVE
) {
617 abi_ulong mmap_start
;
619 mmap_start
= mmap_find_vma(0, new_size
);
621 if (mmap_start
== -1) {
623 host_addr
= MAP_FAILED
;
625 host_addr
= (void *) syscall(__NR_mremap
, g2h(old_addr
),
627 flags
| MREMAP_FIXED
,
630 host_addr
= mremap(g2h(old_addr
), old_size
, new_size
, flags
);
631 /* Check if address fits target address space */
632 if ((unsigned long)host_addr
+ new_size
> (abi_ulong
)-1) {
633 /* Revert mremap() changes */
634 host_addr
= mremap(g2h(old_addr
), new_size
, old_size
, flags
);
636 host_addr
= MAP_FAILED
;
640 if (host_addr
== MAP_FAILED
) {
643 new_addr
= h2g(host_addr
);
644 prot
= page_get_flags(old_addr
);
645 page_set_flags(old_addr
, old_addr
+ old_size
, 0);
646 page_set_flags(new_addr
, new_addr
+ new_size
, prot
| PAGE_VALID
);
652 int target_msync(abi_ulong start
, abi_ulong len
, int flags
)
656 if (start
& ~TARGET_PAGE_MASK
)
658 len
= TARGET_PAGE_ALIGN(len
);
665 start
&= qemu_host_page_mask
;
666 return msync(g2h(start
), end
- start
, flags
);