2 * mmap support for qemu
4 * Copyright (c) 2003 - 2008 Fabrice Bellard
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin Street - Fifth Floor, Boston,
30 #include "qemu-common.h"
36 pthread_mutex_t mmap_mutex
;
37 static int __thread mmap_lock_count
;
41 if (mmap_lock_count
++ == 0) {
42 pthread_mutex_lock(&mmap_mutex
);
46 void mmap_unlock(void)
48 if (--mmap_lock_count
== 0) {
49 pthread_mutex_unlock(&mmap_mutex
);
53 /* Grab lock to make sure things are in a consistent state after fork(). */
54 void mmap_fork_start(void)
58 pthread_mutex_lock(&mmap_mutex
);
61 void mmap_fork_end(int child
)
64 pthread_mutex_init(&mmap_mutex
, NULL
);
66 pthread_mutex_unlock(&mmap_mutex
);
69 /* We aren't threadsafe to start with, so no need to worry about locking. */
74 void mmap_unlock(void)
79 void *qemu_vmalloc(size_t size
)
84 /* Use map and mark the pages as used. */
85 p
= mmap(NULL
, size
, PROT_READ
| PROT_WRITE
,
86 MAP_PRIVATE
| MAP_ANON
, -1, 0);
88 addr
= (unsigned long)p
;
89 if (addr
== (target_ulong
) addr
) {
90 /* Allocated region overlaps guest address space.
92 page_set_flags(addr
& TARGET_PAGE_MASK
, TARGET_PAGE_ALIGN(addr
+ size
),
100 void *qemu_malloc(size_t size
)
104 p
= qemu_vmalloc(size
);
109 /* We use map, which is always zero initialized. */
110 void * qemu_mallocz(size_t size
)
112 return qemu_malloc(size
);
115 void qemu_free(void *ptr
)
117 /* FIXME: We should unmark the reserved pages here. However this gets
118 complicated when one target page spans multiple host pages, so we
121 p
= (size_t *)((char *)ptr
- 16);
125 /* NOTE: all the constants are the HOST ones, but addresses are target. */
126 int target_mprotect(abi_ulong start
, abi_ulong len
, int prot
)
128 abi_ulong end
, host_start
, host_end
, addr
;
132 printf("mprotect: start=0x" TARGET_FMT_lx
133 " len=0x" TARGET_FMT_lx
" prot=%c%c%c\n", start
, len
,
134 prot
& PROT_READ
? 'r' : '-',
135 prot
& PROT_WRITE
? 'w' : '-',
136 prot
& PROT_EXEC
? 'x' : '-');
139 if ((start
& ~TARGET_PAGE_MASK
) != 0)
141 len
= TARGET_PAGE_ALIGN(len
);
145 prot
&= PROT_READ
| PROT_WRITE
| PROT_EXEC
;
150 host_start
= start
& qemu_host_page_mask
;
151 host_end
= HOST_PAGE_ALIGN(end
);
152 if (start
> host_start
) {
153 /* handle host page containing start */
155 for(addr
= host_start
; addr
< start
; addr
+= TARGET_PAGE_SIZE
) {
156 prot1
|= page_get_flags(addr
);
158 if (host_end
== host_start
+ qemu_host_page_size
) {
159 for(addr
= end
; addr
< host_end
; addr
+= TARGET_PAGE_SIZE
) {
160 prot1
|= page_get_flags(addr
);
164 ret
= mprotect(g2h(host_start
), qemu_host_page_size
, prot1
& PAGE_BITS
);
167 host_start
+= qemu_host_page_size
;
169 if (end
< host_end
) {
171 for(addr
= end
; addr
< host_end
; addr
+= TARGET_PAGE_SIZE
) {
172 prot1
|= page_get_flags(addr
);
174 ret
= mprotect(g2h(host_end
- qemu_host_page_size
), qemu_host_page_size
,
178 host_end
-= qemu_host_page_size
;
181 /* handle the pages in the middle */
182 if (host_start
< host_end
) {
183 ret
= mprotect(g2h(host_start
), host_end
- host_start
, prot
);
187 page_set_flags(start
, start
+ len
, prot
| PAGE_VALID
);
195 /* map an incomplete host page */
196 static int mmap_frag(abi_ulong real_start
,
197 abi_ulong start
, abi_ulong end
,
198 int prot
, int flags
, int fd
, abi_ulong offset
)
200 abi_ulong real_end
, addr
;
204 real_end
= real_start
+ qemu_host_page_size
;
205 host_start
= g2h(real_start
);
207 /* get the protection of the target pages outside the mapping */
209 for(addr
= real_start
; addr
< real_end
; addr
++) {
210 if (addr
< start
|| addr
>= end
)
211 prot1
|= page_get_flags(addr
);
215 /* no page was there, so we allocate one */
216 void *p
= mmap(host_start
, qemu_host_page_size
, prot
,
217 flags
| MAP_ANON
, -1, 0);
224 prot_new
= prot
| prot1
;
225 if (!(flags
& MAP_ANON
)) {
226 /* msync() won't work here, so we return an error if write is
227 possible while it is a shared mapping */
228 if ((flags
& TARGET_BSD_MAP_FLAGMASK
) == MAP_SHARED
&&
232 /* adjust protection to be able to read */
233 if (!(prot1
& PROT_WRITE
))
234 mprotect(host_start
, qemu_host_page_size
, prot1
| PROT_WRITE
);
236 /* read the corresponding file data */
237 pread(fd
, g2h(start
), end
- start
, offset
);
239 /* put final protection */
240 if (prot_new
!= (prot1
| PROT_WRITE
))
241 mprotect(host_start
, qemu_host_page_size
, prot_new
);
243 /* just update the protection */
244 if (prot_new
!= prot1
) {
245 mprotect(host_start
, qemu_host_page_size
, prot_new
);
251 #if defined(__CYGWIN__)
252 /* Cygwin doesn't have a whole lot of address space. */
253 static abi_ulong mmap_next_start
= 0x18000000;
255 static abi_ulong mmap_next_start
= 0x40000000;
258 unsigned long last_brk
;
260 /* find a free memory area of size 'size'. The search starts at
261 'start'. If 'start' == 0, then a default start address is used.
264 /* page_init() marks pages used by the host as reserved to be sure not
266 static abi_ulong
mmap_find_vma(abi_ulong start
, abi_ulong size
)
268 abi_ulong addr
, addr1
, addr_start
;
270 unsigned long new_brk
;
272 new_brk
= (unsigned long)sbrk(0);
273 if (last_brk
&& last_brk
< new_brk
&& last_brk
== (target_ulong
)last_brk
) {
274 /* This is a hack to catch the host allocating memory with brk().
275 If it uses mmap then we loose.
276 FIXME: We really want to avoid the host allocating memory in
277 the first place, and maybe leave some slack to avoid switching
279 page_set_flags(last_brk
& TARGET_PAGE_MASK
,
280 TARGET_PAGE_ALIGN(new_brk
),
285 size
= HOST_PAGE_ALIGN(size
);
286 start
= start
& qemu_host_page_mask
;
289 addr
= mmap_next_start
;
293 for(addr1
= addr
; addr1
< (addr
+ size
); addr1
+= TARGET_PAGE_SIZE
) {
294 prot
|= page_get_flags(addr1
);
298 addr
+= qemu_host_page_size
;
299 /* we found nothing */
300 if (addr
== addr_start
)
301 return (abi_ulong
)-1;
304 mmap_next_start
= addr
+ size
;
308 /* NOTE: all the constants are the HOST ones */
309 abi_long
target_mmap(abi_ulong start
, abi_ulong len
, int prot
,
310 int flags
, int fd
, abi_ulong offset
)
312 abi_ulong ret
, end
, real_start
, real_end
, retaddr
, host_offset
, host_len
;
313 unsigned long host_start
;
318 printf("mmap: start=0x" TARGET_FMT_lx
319 " len=0x" TARGET_FMT_lx
" prot=%c%c%c flags=",
321 prot
& PROT_READ
? 'r' : '-',
322 prot
& PROT_WRITE
? 'w' : '-',
323 prot
& PROT_EXEC
? 'x' : '-');
324 if (flags
& MAP_FIXED
)
325 printf("MAP_FIXED ");
326 if (flags
& MAP_ANON
)
328 switch(flags
& TARGET_BSD_MAP_FLAGMASK
) {
330 printf("MAP_PRIVATE ");
333 printf("MAP_SHARED ");
336 printf("[MAP_FLAGMASK=0x%x] ", flags
& TARGET_BSD_MAP_FLAGMASK
);
339 printf("fd=%d offset=" TARGET_FMT_lx
"\n", fd
, offset
);
343 if (offset
& ~TARGET_PAGE_MASK
) {
348 len
= TARGET_PAGE_ALIGN(len
);
351 real_start
= start
& qemu_host_page_mask
;
353 if (!(flags
& MAP_FIXED
)) {
354 abi_ulong mmap_start
;
356 host_offset
= offset
& qemu_host_page_mask
;
357 host_len
= len
+ offset
- host_offset
;
358 host_len
= HOST_PAGE_ALIGN(host_len
);
359 mmap_start
= mmap_find_vma(real_start
, host_len
);
360 if (mmap_start
== (abi_ulong
)-1) {
364 /* Note: we prefer to control the mapping address. It is
365 especially important if qemu_host_page_size >
366 qemu_real_host_page_size */
367 p
= mmap(g2h(mmap_start
),
368 host_len
, prot
, flags
| MAP_FIXED
, fd
, host_offset
);
371 /* update start so that it points to the file position at 'offset' */
372 host_start
= (unsigned long)p
;
373 if (!(flags
& MAP_ANON
))
374 host_start
+= offset
- host_offset
;
375 start
= h2g(host_start
);
380 if (start
& ~TARGET_PAGE_MASK
) {
385 real_end
= HOST_PAGE_ALIGN(end
);
387 for(addr
= real_start
; addr
< real_end
; addr
+= TARGET_PAGE_SIZE
) {
388 flg
= page_get_flags(addr
);
389 if (flg
& PAGE_RESERVED
) {
395 /* worst case: we cannot map the file because the offset is not
396 aligned, so we read it */
397 if (!(flags
& MAP_ANON
) &&
398 (offset
& ~qemu_host_page_mask
) != (start
& ~qemu_host_page_mask
)) {
399 /* msync() won't work here, so we return an error if write is
400 possible while it is a shared mapping */
401 if ((flags
& TARGET_BSD_MAP_FLAGMASK
) == MAP_SHARED
&&
402 (prot
& PROT_WRITE
)) {
406 retaddr
= target_mmap(start
, len
, prot
| PROT_WRITE
,
407 MAP_FIXED
| MAP_PRIVATE
| MAP_ANON
,
411 pread(fd
, g2h(start
), len
, offset
);
412 if (!(prot
& PROT_WRITE
)) {
413 ret
= target_mprotect(start
, len
, prot
);
422 /* handle the start of the mapping */
423 if (start
> real_start
) {
424 if (real_end
== real_start
+ qemu_host_page_size
) {
425 /* one single host page */
426 ret
= mmap_frag(real_start
, start
, end
,
427 prot
, flags
, fd
, offset
);
432 ret
= mmap_frag(real_start
, start
, real_start
+ qemu_host_page_size
,
433 prot
, flags
, fd
, offset
);
436 real_start
+= qemu_host_page_size
;
438 /* handle the end of the mapping */
439 if (end
< real_end
) {
440 ret
= mmap_frag(real_end
- qemu_host_page_size
,
441 real_end
- qemu_host_page_size
, real_end
,
443 offset
+ real_end
- qemu_host_page_size
- start
);
446 real_end
-= qemu_host_page_size
;
449 /* map the middle (easier) */
450 if (real_start
< real_end
) {
452 unsigned long offset1
;
453 if (flags
& MAP_ANON
)
456 offset1
= offset
+ real_start
- start
;
457 p
= mmap(g2h(real_start
), real_end
- real_start
,
458 prot
, flags
, fd
, offset1
);
464 page_set_flags(start
, start
+ len
, prot
| PAGE_VALID
);
467 printf("ret=0x" TARGET_FMT_lx
"\n", start
);
478 int target_munmap(abi_ulong start
, abi_ulong len
)
480 abi_ulong end
, real_start
, real_end
, addr
;
484 printf("munmap: start=0x%lx len=0x%lx\n", start
, len
);
486 if (start
& ~TARGET_PAGE_MASK
)
488 len
= TARGET_PAGE_ALIGN(len
);
493 real_start
= start
& qemu_host_page_mask
;
494 real_end
= HOST_PAGE_ALIGN(end
);
496 if (start
> real_start
) {
497 /* handle host page containing start */
499 for(addr
= real_start
; addr
< start
; addr
+= TARGET_PAGE_SIZE
) {
500 prot
|= page_get_flags(addr
);
502 if (real_end
== real_start
+ qemu_host_page_size
) {
503 for(addr
= end
; addr
< real_end
; addr
+= TARGET_PAGE_SIZE
) {
504 prot
|= page_get_flags(addr
);
509 real_start
+= qemu_host_page_size
;
511 if (end
< real_end
) {
513 for(addr
= end
; addr
< real_end
; addr
+= TARGET_PAGE_SIZE
) {
514 prot
|= page_get_flags(addr
);
517 real_end
-= qemu_host_page_size
;
521 /* unmap what we can */
522 if (real_start
< real_end
) {
523 ret
= munmap(g2h(real_start
), real_end
- real_start
);
527 page_set_flags(start
, start
+ len
, 0);
532 int target_msync(abi_ulong start
, abi_ulong len
, int flags
)
536 if (start
& ~TARGET_PAGE_MASK
)
538 len
= TARGET_PAGE_ALIGN(len
);
545 start
&= qemu_host_page_mask
;
546 return msync(g2h(start
), end
- start
, flags
);