2 * mmap support for qemu
4 * Copyright (c) 2003 Fabrice Bellard
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin Street - Fifth Floor, Boston,
27 #include <sys/types.h>
30 #include <linux/mman.h>
31 #include <linux/unistd.h>
34 #include "qemu-common.h"
39 pthread_mutex_t mmap_mutex
= PTHREAD_MUTEX_INITIALIZER
;
40 static int __thread mmap_lock_count
;
44 if (mmap_lock_count
++ == 0) {
45 pthread_mutex_lock(&mmap_mutex
);
49 void mmap_unlock(void)
51 if (--mmap_lock_count
== 0) {
52 pthread_mutex_unlock(&mmap_mutex
);
56 /* Grab lock to make sure things are in a consistent state after fork(). */
57 void mmap_fork_start(void)
61 pthread_mutex_lock(&mmap_mutex
);
64 void mmap_fork_end(int child
)
67 pthread_mutex_init(&mmap_mutex
, NULL
);
69 pthread_mutex_unlock(&mmap_mutex
);
72 /* We aren't threadsafe to start with, so no need to worry about locking. */
77 void mmap_unlock(void)
82 void *qemu_vmalloc(size_t size
)
87 /* Use map and mark the pages as used. */
88 p
= mmap(NULL
, size
, PROT_READ
| PROT_WRITE
,
89 MAP_PRIVATE
| MAP_ANONYMOUS
, -1, 0);
91 addr
= (unsigned long)p
;
92 if (addr
== (target_ulong
) addr
) {
93 /* Allocated region overlaps guest address space.
95 page_set_flags(addr
& TARGET_PAGE_MASK
, TARGET_PAGE_ALIGN(addr
+ size
),
103 void *qemu_malloc(size_t size
)
107 p
= qemu_vmalloc(size
);
112 /* We use map, which is always zero initialized. */
113 void * qemu_mallocz(size_t size
)
115 return qemu_malloc(size
);
118 void qemu_free(void *ptr
)
120 /* FIXME: We should unmark the reserved pages here. However this gets
121 complicated when one target page spans multiple host pages, so we
124 p
= (size_t *)((char *)ptr
- 16);
128 void *qemu_realloc(void *ptr
, size_t size
)
130 size_t old_size
, copy
;
134 return qemu_malloc(size
);
135 old_size
= *(size_t *)((char *)ptr
- 16);
136 copy
= old_size
< size
? old_size
: size
;
137 new_ptr
= qemu_malloc(size
);
138 memcpy(new_ptr
, ptr
, copy
);
143 /* NOTE: all the constants are the HOST ones, but addresses are target. */
144 int target_mprotect(abi_ulong start
, abi_ulong len
, int prot
)
146 abi_ulong end
, host_start
, host_end
, addr
;
150 printf("mprotect: start=0x" TARGET_FMT_lx
151 "len=0x" TARGET_FMT_lx
" prot=%c%c%c\n", start
, len
,
152 prot
& PROT_READ
? 'r' : '-',
153 prot
& PROT_WRITE
? 'w' : '-',
154 prot
& PROT_EXEC
? 'x' : '-');
157 if ((start
& ~TARGET_PAGE_MASK
) != 0)
159 len
= TARGET_PAGE_ALIGN(len
);
163 prot
&= PROT_READ
| PROT_WRITE
| PROT_EXEC
;
168 host_start
= start
& qemu_host_page_mask
;
169 host_end
= HOST_PAGE_ALIGN(end
);
170 if (start
> host_start
) {
171 /* handle host page containing start */
173 for(addr
= host_start
; addr
< start
; addr
+= TARGET_PAGE_SIZE
) {
174 prot1
|= page_get_flags(addr
);
176 if (host_end
== host_start
+ qemu_host_page_size
) {
177 for(addr
= end
; addr
< host_end
; addr
+= TARGET_PAGE_SIZE
) {
178 prot1
|= page_get_flags(addr
);
182 ret
= mprotect(g2h(host_start
), qemu_host_page_size
, prot1
& PAGE_BITS
);
185 host_start
+= qemu_host_page_size
;
187 if (end
< host_end
) {
189 for(addr
= end
; addr
< host_end
; addr
+= TARGET_PAGE_SIZE
) {
190 prot1
|= page_get_flags(addr
);
192 ret
= mprotect(g2h(host_end
- qemu_host_page_size
), qemu_host_page_size
,
196 host_end
-= qemu_host_page_size
;
199 /* handle the pages in the middle */
200 if (host_start
< host_end
) {
201 ret
= mprotect(g2h(host_start
), host_end
- host_start
, prot
);
205 page_set_flags(start
, start
+ len
, prot
| PAGE_VALID
);
213 /* map an incomplete host page */
214 static int mmap_frag(abi_ulong real_start
,
215 abi_ulong start
, abi_ulong end
,
216 int prot
, int flags
, int fd
, abi_ulong offset
)
218 abi_ulong real_end
, addr
;
222 real_end
= real_start
+ qemu_host_page_size
;
223 host_start
= g2h(real_start
);
225 /* get the protection of the target pages outside the mapping */
227 for(addr
= real_start
; addr
< real_end
; addr
++) {
228 if (addr
< start
|| addr
>= end
)
229 prot1
|= page_get_flags(addr
);
233 /* no page was there, so we allocate one */
234 void *p
= mmap(host_start
, qemu_host_page_size
, prot
,
235 flags
| MAP_ANONYMOUS
, -1, 0);
242 prot_new
= prot
| prot1
;
243 if (!(flags
& MAP_ANONYMOUS
)) {
244 /* msync() won't work here, so we return an error if write is
245 possible while it is a shared mapping */
246 if ((flags
& MAP_TYPE
) == MAP_SHARED
&&
250 /* adjust protection to be able to read */
251 if (!(prot1
& PROT_WRITE
))
252 mprotect(host_start
, qemu_host_page_size
, prot1
| PROT_WRITE
);
254 /* read the corresponding file data */
255 pread(fd
, g2h(start
), end
- start
, offset
);
257 /* put final protection */
258 if (prot_new
!= (prot1
| PROT_WRITE
))
259 mprotect(host_start
, qemu_host_page_size
, prot_new
);
261 /* just update the protection */
262 if (prot_new
!= prot1
) {
263 mprotect(host_start
, qemu_host_page_size
, prot_new
);
269 #if defined(__CYGWIN__)
270 /* Cygwin doesn't have a whole lot of address space. */
271 static abi_ulong mmap_next_start
= 0x18000000;
273 static abi_ulong mmap_next_start
= 0x40000000;
276 unsigned long last_brk
;
278 /* find a free memory area of size 'size'. The search starts at
279 'start'. If 'start' == 0, then a default start address is used.
282 /* page_init() marks pages used by the host as reserved to be sure not
284 abi_ulong
mmap_find_vma(abi_ulong start
, abi_ulong size
)
286 abi_ulong addr
, addr1
, addr_start
;
288 unsigned long new_brk
;
290 new_brk
= (unsigned long)sbrk(0);
291 if (last_brk
&& last_brk
< new_brk
&& last_brk
== (target_ulong
)last_brk
) {
292 /* This is a hack to catch the host allocating memory with brk().
293 If it uses mmap then we loose.
294 FIXME: We really want to avoid the host allocating memory in
295 the first place, and maybe leave some slack to avoid switching
297 page_set_flags(last_brk
& TARGET_PAGE_MASK
,
298 TARGET_PAGE_ALIGN(new_brk
),
303 size
= HOST_PAGE_ALIGN(size
);
304 start
= start
& qemu_host_page_mask
;
307 addr
= mmap_next_start
;
311 for(addr1
= addr
; addr1
< (addr
+ size
); addr1
+= TARGET_PAGE_SIZE
) {
312 prot
|= page_get_flags(addr1
);
316 addr
+= qemu_host_page_size
;
317 /* we found nothing */
318 if (addr
== addr_start
)
319 return (abi_ulong
)-1;
322 mmap_next_start
= addr
+ size
;
326 /* NOTE: all the constants are the HOST ones */
327 abi_long
target_mmap(abi_ulong start
, abi_ulong len
, int prot
,
328 int flags
, int fd
, abi_ulong offset
)
330 abi_ulong ret
, end
, real_start
, real_end
, retaddr
, host_offset
, host_len
;
331 unsigned long host_start
;
336 printf("mmap: start=0x" TARGET_FMT_lx
337 " len=0x" TARGET_FMT_lx
" prot=%c%c%c flags=",
339 prot
& PROT_READ
? 'r' : '-',
340 prot
& PROT_WRITE
? 'w' : '-',
341 prot
& PROT_EXEC
? 'x' : '-');
342 if (flags
& MAP_FIXED
)
343 printf("MAP_FIXED ");
344 if (flags
& MAP_ANONYMOUS
)
346 switch(flags
& MAP_TYPE
) {
348 printf("MAP_PRIVATE ");
351 printf("MAP_SHARED ");
354 printf("[MAP_TYPE=0x%x] ", flags
& MAP_TYPE
);
357 printf("fd=%d offset=" TARGET_FMT_lx
"\n", fd
, offset
);
361 if (offset
& ~TARGET_PAGE_MASK
) {
366 len
= TARGET_PAGE_ALIGN(len
);
369 real_start
= start
& qemu_host_page_mask
;
371 /* When mapping files into a memory area larger than the file, accesses
372 to pages beyond the file size will cause a SIGBUS.
374 For example, if mmaping a file of 100 bytes on a host with 4K pages
375 emulating a target with 8K pages, the target expects to be able to
376 access the first 8K. But the host will trap us on any access beyond
379 When emulating a target with a larger page-size than the hosts, we
380 may need to truncate file maps at EOF and add extra anonymous pages
381 up to the targets page boundary. */
383 if ((qemu_real_host_page_size
< TARGET_PAGE_SIZE
)
384 && !(flags
& MAP_ANONYMOUS
)) {
387 if (fstat (fd
, &sb
) == -1)
390 /* Are we trying to create a map beyond EOF?. */
391 if (offset
+ len
> sb
.st_size
) {
392 /* If so, truncate the file map at eof aligned with
393 the hosts real pagesize. Additional anonymous maps
394 will be created beyond EOF. */
395 len
= (sb
.st_size
- offset
);
396 len
+= qemu_real_host_page_size
- 1;
397 len
&= ~(qemu_real_host_page_size
- 1);
401 if (!(flags
& MAP_FIXED
)) {
402 abi_ulong mmap_start
;
404 host_offset
= offset
& qemu_host_page_mask
;
405 host_len
= len
+ offset
- host_offset
;
406 host_len
= HOST_PAGE_ALIGN(host_len
);
407 mmap_start
= mmap_find_vma(real_start
, host_len
);
408 if (mmap_start
== (abi_ulong
)-1) {
412 /* Note: we prefer to control the mapping address. It is
413 especially important if qemu_host_page_size >
414 qemu_real_host_page_size */
415 p
= mmap(g2h(mmap_start
),
416 host_len
, prot
, flags
| MAP_FIXED
| MAP_ANONYMOUS
, -1, 0);
419 /* update start so that it points to the file position at 'offset' */
420 host_start
= (unsigned long)p
;
421 if (!(flags
& MAP_ANONYMOUS
)) {
422 p
= mmap(g2h(mmap_start
), len
, prot
,
423 flags
| MAP_FIXED
, fd
, host_offset
);
424 host_start
+= offset
- host_offset
;
426 start
= h2g(host_start
);
431 if (start
& ~TARGET_PAGE_MASK
) {
436 real_end
= HOST_PAGE_ALIGN(end
);
439 * Test if requested memory area fits target address space
440 * It can fail only on 64-bit host with 32-bit target.
441 * On any other target/host host mmap() handles this error correctly.
443 if ((unsigned long)start
+ len
- 1 > (abi_ulong
) -1) {
448 for(addr
= real_start
; addr
< real_end
; addr
+= TARGET_PAGE_SIZE
) {
449 flg
= page_get_flags(addr
);
450 if (flg
& PAGE_RESERVED
) {
456 /* worst case: we cannot map the file because the offset is not
457 aligned, so we read it */
458 if (!(flags
& MAP_ANONYMOUS
) &&
459 (offset
& ~qemu_host_page_mask
) != (start
& ~qemu_host_page_mask
)) {
460 /* msync() won't work here, so we return an error if write is
461 possible while it is a shared mapping */
462 if ((flags
& MAP_TYPE
) == MAP_SHARED
&&
463 (prot
& PROT_WRITE
)) {
467 retaddr
= target_mmap(start
, len
, prot
| PROT_WRITE
,
468 MAP_FIXED
| MAP_PRIVATE
| MAP_ANONYMOUS
,
472 pread(fd
, g2h(start
), len
, offset
);
473 if (!(prot
& PROT_WRITE
)) {
474 ret
= target_mprotect(start
, len
, prot
);
483 /* handle the start of the mapping */
484 if (start
> real_start
) {
485 if (real_end
== real_start
+ qemu_host_page_size
) {
486 /* one single host page */
487 ret
= mmap_frag(real_start
, start
, end
,
488 prot
, flags
, fd
, offset
);
493 ret
= mmap_frag(real_start
, start
, real_start
+ qemu_host_page_size
,
494 prot
, flags
, fd
, offset
);
497 real_start
+= qemu_host_page_size
;
499 /* handle the end of the mapping */
500 if (end
< real_end
) {
501 ret
= mmap_frag(real_end
- qemu_host_page_size
,
502 real_end
- qemu_host_page_size
, real_end
,
504 offset
+ real_end
- qemu_host_page_size
- start
);
507 real_end
-= qemu_host_page_size
;
510 /* map the middle (easier) */
511 if (real_start
< real_end
) {
513 unsigned long offset1
;
514 if (flags
& MAP_ANONYMOUS
)
517 offset1
= offset
+ real_start
- start
;
518 p
= mmap(g2h(real_start
), real_end
- real_start
,
519 prot
, flags
, fd
, offset1
);
525 page_set_flags(start
, start
+ len
, prot
| PAGE_VALID
);
528 printf("ret=0x" TARGET_FMT_lx
"\n", start
);
539 int target_munmap(abi_ulong start
, abi_ulong len
)
541 abi_ulong end
, real_start
, real_end
, addr
;
545 printf("munmap: start=0x%lx len=0x%lx\n", start
, len
);
547 if (start
& ~TARGET_PAGE_MASK
)
549 len
= TARGET_PAGE_ALIGN(len
);
554 real_start
= start
& qemu_host_page_mask
;
555 real_end
= HOST_PAGE_ALIGN(end
);
557 if (start
> real_start
) {
558 /* handle host page containing start */
560 for(addr
= real_start
; addr
< start
; addr
+= TARGET_PAGE_SIZE
) {
561 prot
|= page_get_flags(addr
);
563 if (real_end
== real_start
+ qemu_host_page_size
) {
564 for(addr
= end
; addr
< real_end
; addr
+= TARGET_PAGE_SIZE
) {
565 prot
|= page_get_flags(addr
);
570 real_start
+= qemu_host_page_size
;
572 if (end
< real_end
) {
574 for(addr
= end
; addr
< real_end
; addr
+= TARGET_PAGE_SIZE
) {
575 prot
|= page_get_flags(addr
);
578 real_end
-= qemu_host_page_size
;
582 /* unmap what we can */
583 if (real_start
< real_end
) {
584 ret
= munmap(g2h(real_start
), real_end
- real_start
);
588 page_set_flags(start
, start
+ len
, 0);
593 abi_long
target_mremap(abi_ulong old_addr
, abi_ulong old_size
,
594 abi_ulong new_size
, unsigned long flags
,
602 if (flags
& MREMAP_FIXED
)
603 host_addr
= (void *) syscall(__NR_mremap
, g2h(old_addr
),
607 else if (flags
& MREMAP_MAYMOVE
) {
608 abi_ulong mmap_start
;
610 mmap_start
= mmap_find_vma(0, new_size
);
612 if (mmap_start
== -1) {
614 host_addr
= MAP_FAILED
;
616 host_addr
= (void *) syscall(__NR_mremap
, g2h(old_addr
),
618 flags
| MREMAP_FIXED
,
621 host_addr
= mremap(g2h(old_addr
), old_size
, new_size
, flags
);
622 /* Check if address fits target address space */
623 if ((unsigned long)host_addr
+ new_size
> (abi_ulong
)-1) {
624 /* Revert mremap() changes */
625 host_addr
= mremap(g2h(old_addr
), new_size
, old_size
, flags
);
627 host_addr
= MAP_FAILED
;
631 if (host_addr
== MAP_FAILED
) {
634 new_addr
= h2g(host_addr
);
635 prot
= page_get_flags(old_addr
);
636 page_set_flags(old_addr
, old_addr
+ old_size
, 0);
637 page_set_flags(new_addr
, new_addr
+ new_size
, prot
| PAGE_VALID
);
643 int target_msync(abi_ulong start
, abi_ulong len
, int flags
)
647 if (start
& ~TARGET_PAGE_MASK
)
649 len
= TARGET_PAGE_ALIGN(len
);
656 start
&= qemu_host_page_mask
;
657 return msync(g2h(start
), end
- start
, flags
);