2 * mmap support for qemu
4 * Copyright (c) 2003 Fabrice Bellard
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, see <http://www.gnu.org/licenses/>.
19 #include "qemu/osdep.h"
23 #include "user-internals.h"
24 #include "user-mmap.h"
25 #include "target_mman.h"
27 static pthread_mutex_t mmap_mutex
= PTHREAD_MUTEX_INITIALIZER
;
28 static __thread
int mmap_lock_count
;
32 if (mmap_lock_count
++ == 0) {
33 pthread_mutex_lock(&mmap_mutex
);
37 void mmap_unlock(void)
39 if (--mmap_lock_count
== 0) {
40 pthread_mutex_unlock(&mmap_mutex
);
44 bool have_mmap_lock(void)
46 return mmap_lock_count
> 0 ? true : false;
49 /* Grab lock to make sure things are in a consistent state after fork(). */
50 void mmap_fork_start(void)
54 pthread_mutex_lock(&mmap_mutex
);
57 void mmap_fork_end(int child
)
60 pthread_mutex_init(&mmap_mutex
, NULL
);
62 pthread_mutex_unlock(&mmap_mutex
);
66 * Validate target prot bitmask.
67 * Return the prot bitmask for the host in *HOST_PROT.
68 * Return 0 if the target prot bitmask is invalid, otherwise
69 * the internal qemu page_flags (which will include PAGE_VALID).
71 static int validate_prot_to_pageflags(int *host_prot
, int prot
)
73 int valid
= PROT_READ
| PROT_WRITE
| PROT_EXEC
| TARGET_PROT_SEM
;
74 int page_flags
= (prot
& PAGE_BITS
) | PAGE_VALID
;
77 * For the host, we need not pass anything except read/write/exec.
78 * While PROT_SEM is allowed by all hosts, it is also ignored, so
79 * don't bother transforming guest bit to host bit. Any other
80 * target-specific prot bits will not be understood by the host
81 * and will need to be encoded into page_flags for qemu emulation.
83 * Pages that are executable by the guest will never be executed
84 * by the host, but the host will need to be able to read them.
86 *host_prot
= (prot
& (PROT_READ
| PROT_WRITE
))
87 | (prot
& PROT_EXEC
? PROT_READ
: 0);
91 ARMCPU
*cpu
= ARM_CPU(thread_cpu
);
94 * The PROT_BTI bit is only accepted if the cpu supports the feature.
95 * Since this is the unusual case, don't bother checking unless
96 * the bit has been requested. If set and valid, record the bit
97 * within QEMU's page_flags.
99 if ((prot
& TARGET_PROT_BTI
) && cpu_isar_feature(aa64_bti
, cpu
)) {
100 valid
|= TARGET_PROT_BTI
;
101 page_flags
|= PAGE_BTI
;
103 /* Similarly for the PROT_MTE bit. */
104 if ((prot
& TARGET_PROT_MTE
) && cpu_isar_feature(aa64_mte
, cpu
)) {
105 valid
|= TARGET_PROT_MTE
;
106 page_flags
|= PAGE_MTE
;
109 #elif defined(TARGET_HPPA)
110 valid
|= PROT_GROWSDOWN
| PROT_GROWSUP
;
113 return prot
& ~valid
? 0 : page_flags
;
116 /* NOTE: all the constants are the HOST ones, but addresses are target. */
117 int target_mprotect(abi_ulong start
, abi_ulong len
, int target_prot
)
119 abi_ulong end
, host_start
, host_end
, addr
;
120 int prot1
, ret
, page_flags
, host_prot
;
122 trace_target_mprotect(start
, len
, target_prot
);
124 if ((start
& ~TARGET_PAGE_MASK
) != 0) {
125 return -TARGET_EINVAL
;
127 page_flags
= validate_prot_to_pageflags(&host_prot
, target_prot
);
129 return -TARGET_EINVAL
;
131 len
= TARGET_PAGE_ALIGN(len
);
133 if (!guest_range_valid_untagged(start
, len
)) {
134 return -TARGET_ENOMEM
;
141 host_start
= start
& qemu_host_page_mask
;
142 host_end
= HOST_PAGE_ALIGN(end
);
143 if (start
> host_start
) {
144 /* handle host page containing start */
146 for (addr
= host_start
; addr
< start
; addr
+= TARGET_PAGE_SIZE
) {
147 prot1
|= page_get_flags(addr
);
149 if (host_end
== host_start
+ qemu_host_page_size
) {
150 for (addr
= end
; addr
< host_end
; addr
+= TARGET_PAGE_SIZE
) {
151 prot1
|= page_get_flags(addr
);
155 ret
= mprotect(g2h_untagged(host_start
), qemu_host_page_size
,
160 host_start
+= qemu_host_page_size
;
162 if (end
< host_end
) {
164 for (addr
= end
; addr
< host_end
; addr
+= TARGET_PAGE_SIZE
) {
165 prot1
|= page_get_flags(addr
);
167 ret
= mprotect(g2h_untagged(host_end
- qemu_host_page_size
),
168 qemu_host_page_size
, prot1
& PAGE_BITS
);
172 host_end
-= qemu_host_page_size
;
175 /* handle the pages in the middle */
176 if (host_start
< host_end
) {
177 ret
= mprotect(g2h_untagged(host_start
),
178 host_end
- host_start
, host_prot
);
184 page_set_flags(start
, start
+ len
- 1, page_flags
);
192 /* map an incomplete host page */
193 static int mmap_frag(abi_ulong real_start
,
194 abi_ulong start
, abi_ulong end
,
195 int prot
, int flags
, int fd
, abi_ulong offset
)
197 abi_ulong real_end
, addr
;
201 real_end
= real_start
+ qemu_host_page_size
;
202 host_start
= g2h_untagged(real_start
);
204 /* get the protection of the target pages outside the mapping */
206 for(addr
= real_start
; addr
< real_end
; addr
++) {
207 if (addr
< start
|| addr
>= end
)
208 prot1
|= page_get_flags(addr
);
212 /* no page was there, so we allocate one */
213 void *p
= mmap(host_start
, qemu_host_page_size
, prot
,
214 flags
| MAP_ANONYMOUS
, -1, 0);
221 prot_new
= prot
| prot1
;
222 if (!(flags
& MAP_ANONYMOUS
)) {
223 /* msync() won't work here, so we return an error if write is
224 possible while it is a shared mapping */
225 if ((flags
& MAP_TYPE
) == MAP_SHARED
&&
229 /* adjust protection to be able to read */
230 if (!(prot1
& PROT_WRITE
))
231 mprotect(host_start
, qemu_host_page_size
, prot1
| PROT_WRITE
);
233 /* read the corresponding file data */
234 if (pread(fd
, g2h_untagged(start
), end
- start
, offset
) == -1)
237 /* put final protection */
238 if (prot_new
!= (prot1
| PROT_WRITE
))
239 mprotect(host_start
, qemu_host_page_size
, prot_new
);
241 if (prot_new
!= prot1
) {
242 mprotect(host_start
, qemu_host_page_size
, prot_new
);
244 if (prot_new
& PROT_WRITE
) {
245 memset(g2h_untagged(start
), 0, end
- start
);
251 #if HOST_LONG_BITS == 64 && TARGET_ABI_BITS == 64
252 #ifdef TARGET_AARCH64
253 # define TASK_UNMAPPED_BASE 0x5500000000
255 # define TASK_UNMAPPED_BASE (1ul << 38)
259 # define TASK_UNMAPPED_BASE 0xfa000000
261 # define TASK_UNMAPPED_BASE 0x40000000
264 abi_ulong mmap_next_start
= TASK_UNMAPPED_BASE
;
266 unsigned long last_brk
;
268 /* Subroutine of mmap_find_vma, used when we have pre-allocated a chunk
269 of guest address space. */
270 static abi_ulong
mmap_find_vma_reserved(abi_ulong start
, abi_ulong size
,
273 abi_ulong addr
, end_addr
, incr
= qemu_host_page_size
;
277 if (size
> reserved_va
) {
278 return (abi_ulong
)-1;
281 /* Note that start and size have already been aligned by mmap_find_vma. */
283 end_addr
= start
+ size
;
285 * Start at the top of the address space, ignoring the last page.
286 * If reserved_va == UINT32_MAX, then end_addr wraps to 0,
287 * throwing the rest of the calculations off.
288 * TODO: rewrite using last_addr instead.
289 * TODO: use the interval tree instead of probing every page.
291 if (start
> reserved_va
- size
) {
292 end_addr
= ((reserved_va
- size
) & -align
) + size
;
296 /* Search downward from END_ADDR, checking to see if a page is in use. */
300 if (addr
> end_addr
) {
302 /* Failure. The entire address space has been searched. */
303 return (abi_ulong
)-1;
305 /* Re-start at the top of the address space (see above). */
306 addr
= end_addr
= ((reserved_va
- size
) & -align
) + size
;
309 prot
= page_get_flags(addr
);
311 /* Page in use. Restart below this page. */
312 addr
= end_addr
= ((addr
- size
) & -align
) + size
;
313 } else if (addr
&& addr
+ size
== end_addr
) {
314 /* Success! All pages between ADDR and END_ADDR are free. */
315 if (start
== mmap_next_start
) {
316 mmap_next_start
= addr
;
325 * Find and reserve a free memory area of size 'size'. The search
327 * It must be called with mmap_lock() held.
328 * Return -1 if error.
330 abi_ulong
mmap_find_vma(abi_ulong start
, abi_ulong size
, abi_ulong align
)
336 align
= MAX(align
, qemu_host_page_size
);
338 /* If 'start' == 0, then a default start address is used. */
340 start
= mmap_next_start
;
342 start
&= qemu_host_page_mask
;
344 start
= ROUND_UP(start
, align
);
346 size
= HOST_PAGE_ALIGN(size
);
349 return mmap_find_vma_reserved(start
, size
, align
);
353 wrapped
= repeat
= 0;
356 for (;; prev
= ptr
) {
358 * Reserve needed memory area to avoid a race.
359 * It should be discarded using:
360 * - mmap() with MAP_FIXED flag
361 * - mremap() with MREMAP_FIXED flag
362 * - shmat() with SHM_REMAP flag
364 ptr
= mmap(g2h_untagged(addr
), size
, PROT_NONE
,
365 MAP_ANONYMOUS
|MAP_PRIVATE
|MAP_NORESERVE
, -1, 0);
367 /* ENOMEM, if host address space has no memory */
368 if (ptr
== MAP_FAILED
) {
369 return (abi_ulong
)-1;
372 /* Count the number of sequential returns of the same address.
373 This is used to modify the search algorithm below. */
374 repeat
= (ptr
== prev
? repeat
+ 1 : 0);
376 if (h2g_valid(ptr
+ size
- 1)) {
379 if ((addr
& (align
- 1)) == 0) {
381 if (start
== mmap_next_start
&& addr
>= TASK_UNMAPPED_BASE
) {
382 mmap_next_start
= addr
+ size
;
387 /* The address is not properly aligned for the target. */
390 /* Assume the result that the kernel gave us is the
391 first with enough free space, so start again at the
392 next higher target page. */
393 addr
= ROUND_UP(addr
, align
);
396 /* Sometimes the kernel decides to perform the allocation
397 at the top end of memory instead. */
401 /* Start over at low memory. */
405 /* Fail. This unaligned block must the last. */
410 /* Since the result the kernel gave didn't fit, start
411 again at low memory. If any repetition, fail. */
412 addr
= (repeat
? -1 : 0);
415 /* Unmap and try again. */
418 /* ENOMEM if we checked the whole of the target address space. */
419 if (addr
== (abi_ulong
)-1) {
420 return (abi_ulong
)-1;
421 } else if (addr
== 0) {
423 return (abi_ulong
)-1;
426 /* Don't actually use 0 when wrapping, instead indicate
427 that we'd truly like an allocation in low memory. */
428 addr
= (mmap_min_addr
> TARGET_PAGE_SIZE
429 ? TARGET_PAGE_ALIGN(mmap_min_addr
)
431 } else if (wrapped
&& addr
>= start
) {
432 return (abi_ulong
)-1;
437 /* NOTE: all the constants are the HOST ones */
438 abi_long
target_mmap(abi_ulong start
, abi_ulong len
, int target_prot
,
439 int flags
, int fd
, abi_ulong offset
)
441 abi_ulong ret
, end
, real_start
, real_end
, retaddr
, host_offset
, host_len
,
442 passthrough_start
= -1, passthrough_end
= -1;
443 int page_flags
, host_prot
;
446 trace_target_mmap(start
, len
, target_prot
, flags
, fd
, offset
);
453 page_flags
= validate_prot_to_pageflags(&host_prot
, target_prot
);
459 /* Also check for overflows... */
460 len
= TARGET_PAGE_ALIGN(len
);
466 if (offset
& ~TARGET_PAGE_MASK
) {
472 * If we're mapping shared memory, ensure we generate code for parallel
473 * execution and flush old translations. This will work up to the level
474 * supported by the host -- anything that requires EXCP_ATOMIC will not
475 * be atomic with respect to an external process.
477 if (flags
& MAP_SHARED
) {
478 CPUState
*cpu
= thread_cpu
;
479 if (!(cpu
->tcg_cflags
& CF_PARALLEL
)) {
480 cpu
->tcg_cflags
|= CF_PARALLEL
;
485 real_start
= start
& qemu_host_page_mask
;
486 host_offset
= offset
& qemu_host_page_mask
;
488 /* If the user is asking for the kernel to find a location, do that
489 before we truncate the length for mapping files below. */
490 if (!(flags
& MAP_FIXED
)) {
491 host_len
= len
+ offset
- host_offset
;
492 host_len
= HOST_PAGE_ALIGN(host_len
);
493 start
= mmap_find_vma(real_start
, host_len
, TARGET_PAGE_SIZE
);
494 if (start
== (abi_ulong
)-1) {
500 /* When mapping files into a memory area larger than the file, accesses
501 to pages beyond the file size will cause a SIGBUS.
503 For example, if mmaping a file of 100 bytes on a host with 4K pages
504 emulating a target with 8K pages, the target expects to be able to
505 access the first 8K. But the host will trap us on any access beyond
508 When emulating a target with a larger page-size than the hosts, we
509 may need to truncate file maps at EOF and add extra anonymous pages
510 up to the targets page boundary. */
512 if ((qemu_real_host_page_size() < qemu_host_page_size
) &&
513 !(flags
& MAP_ANONYMOUS
)) {
516 if (fstat (fd
, &sb
) == -1)
519 /* Are we trying to create a map beyond EOF?. */
520 if (offset
+ len
> sb
.st_size
) {
521 /* If so, truncate the file map at eof aligned with
522 the hosts real pagesize. Additional anonymous maps
523 will be created beyond EOF. */
524 len
= REAL_HOST_PAGE_ALIGN(sb
.st_size
- offset
);
528 if (!(flags
& MAP_FIXED
)) {
529 unsigned long host_start
;
532 host_len
= len
+ offset
- host_offset
;
533 host_len
= HOST_PAGE_ALIGN(host_len
);
535 /* Note: we prefer to control the mapping address. It is
536 especially important if qemu_host_page_size >
537 qemu_real_host_page_size */
538 p
= mmap(g2h_untagged(start
), host_len
, host_prot
,
539 flags
| MAP_FIXED
| MAP_ANONYMOUS
, -1, 0);
540 if (p
== MAP_FAILED
) {
543 /* update start so that it points to the file position at 'offset' */
544 host_start
= (unsigned long)p
;
545 if (!(flags
& MAP_ANONYMOUS
)) {
546 p
= mmap(g2h_untagged(start
), len
, host_prot
,
547 flags
| MAP_FIXED
, fd
, host_offset
);
548 if (p
== MAP_FAILED
) {
549 munmap(g2h_untagged(start
), host_len
);
552 host_start
+= offset
- host_offset
;
554 start
= h2g(host_start
);
555 passthrough_start
= start
;
556 passthrough_end
= start
+ len
;
558 if (start
& ~TARGET_PAGE_MASK
) {
563 real_end
= HOST_PAGE_ALIGN(end
);
566 * Test if requested memory area fits target address space
567 * It can fail only on 64-bit host with 32-bit target.
568 * On any other target/host host mmap() handles this error correctly.
570 if (end
< start
|| !guest_range_valid_untagged(start
, len
)) {
575 /* worst case: we cannot map the file because the offset is not
576 aligned, so we read it */
577 if (!(flags
& MAP_ANONYMOUS
) &&
578 (offset
& ~qemu_host_page_mask
) != (start
& ~qemu_host_page_mask
)) {
579 /* msync() won't work here, so we return an error if write is
580 possible while it is a shared mapping */
581 if ((flags
& MAP_TYPE
) == MAP_SHARED
&&
582 (host_prot
& PROT_WRITE
)) {
586 retaddr
= target_mmap(start
, len
, target_prot
| PROT_WRITE
,
587 MAP_FIXED
| MAP_PRIVATE
| MAP_ANONYMOUS
,
591 if (pread(fd
, g2h_untagged(start
), len
, offset
) == -1)
593 if (!(host_prot
& PROT_WRITE
)) {
594 ret
= target_mprotect(start
, len
, target_prot
);
600 /* handle the start of the mapping */
601 if (start
> real_start
) {
602 if (real_end
== real_start
+ qemu_host_page_size
) {
603 /* one single host page */
604 ret
= mmap_frag(real_start
, start
, end
,
605 host_prot
, flags
, fd
, offset
);
610 ret
= mmap_frag(real_start
, start
, real_start
+ qemu_host_page_size
,
611 host_prot
, flags
, fd
, offset
);
614 real_start
+= qemu_host_page_size
;
616 /* handle the end of the mapping */
617 if (end
< real_end
) {
618 ret
= mmap_frag(real_end
- qemu_host_page_size
,
619 real_end
- qemu_host_page_size
, end
,
620 host_prot
, flags
, fd
,
621 offset
+ real_end
- qemu_host_page_size
- start
);
624 real_end
-= qemu_host_page_size
;
627 /* map the middle (easier) */
628 if (real_start
< real_end
) {
630 unsigned long offset1
;
631 if (flags
& MAP_ANONYMOUS
)
634 offset1
= offset
+ real_start
- start
;
635 p
= mmap(g2h_untagged(real_start
), real_end
- real_start
,
636 host_prot
, flags
, fd
, offset1
);
639 passthrough_start
= real_start
;
640 passthrough_end
= real_end
;
644 if (flags
& MAP_ANONYMOUS
) {
645 page_flags
|= PAGE_ANON
;
647 page_flags
|= PAGE_RESET
;
648 if (passthrough_start
== passthrough_end
) {
649 page_set_flags(start
, start
+ len
- 1, page_flags
);
651 if (start
< passthrough_start
) {
652 page_set_flags(start
, passthrough_start
- 1, page_flags
);
654 page_set_flags(passthrough_start
, passthrough_end
- 1,
655 page_flags
| PAGE_PASSTHROUGH
);
656 if (passthrough_end
< start
+ len
) {
657 page_set_flags(passthrough_end
, start
+ len
- 1, page_flags
);
661 trace_target_mmap_complete(start
);
662 if (qemu_loglevel_mask(CPU_LOG_PAGE
)) {
663 FILE *f
= qemu_log_trylock();
665 fprintf(f
, "page layout changed following mmap\n");
677 static void mmap_reserve(abi_ulong start
, abi_ulong size
)
679 abi_ulong real_start
;
685 real_start
= start
& qemu_host_page_mask
;
686 real_end
= HOST_PAGE_ALIGN(start
+ size
);
688 if (start
> real_start
) {
689 /* handle host page containing start */
691 for (addr
= real_start
; addr
< start
; addr
+= TARGET_PAGE_SIZE
) {
692 prot
|= page_get_flags(addr
);
694 if (real_end
== real_start
+ qemu_host_page_size
) {
695 for (addr
= end
; addr
< real_end
; addr
+= TARGET_PAGE_SIZE
) {
696 prot
|= page_get_flags(addr
);
701 real_start
+= qemu_host_page_size
;
703 if (end
< real_end
) {
705 for (addr
= end
; addr
< real_end
; addr
+= TARGET_PAGE_SIZE
) {
706 prot
|= page_get_flags(addr
);
709 real_end
-= qemu_host_page_size
;
711 if (real_start
!= real_end
) {
712 mmap(g2h_untagged(real_start
), real_end
- real_start
, PROT_NONE
,
713 MAP_FIXED
| MAP_ANONYMOUS
| MAP_PRIVATE
| MAP_NORESERVE
,
718 int target_munmap(abi_ulong start
, abi_ulong len
)
720 abi_ulong end
, real_start
, real_end
, addr
;
723 trace_target_munmap(start
, len
);
725 if (start
& ~TARGET_PAGE_MASK
)
726 return -TARGET_EINVAL
;
727 len
= TARGET_PAGE_ALIGN(len
);
728 if (len
== 0 || !guest_range_valid_untagged(start
, len
)) {
729 return -TARGET_EINVAL
;
734 real_start
= start
& qemu_host_page_mask
;
735 real_end
= HOST_PAGE_ALIGN(end
);
737 if (start
> real_start
) {
738 /* handle host page containing start */
740 for(addr
= real_start
; addr
< start
; addr
+= TARGET_PAGE_SIZE
) {
741 prot
|= page_get_flags(addr
);
743 if (real_end
== real_start
+ qemu_host_page_size
) {
744 for(addr
= end
; addr
< real_end
; addr
+= TARGET_PAGE_SIZE
) {
745 prot
|= page_get_flags(addr
);
750 real_start
+= qemu_host_page_size
;
752 if (end
< real_end
) {
754 for(addr
= end
; addr
< real_end
; addr
+= TARGET_PAGE_SIZE
) {
755 prot
|= page_get_flags(addr
);
758 real_end
-= qemu_host_page_size
;
762 /* unmap what we can */
763 if (real_start
< real_end
) {
765 mmap_reserve(real_start
, real_end
- real_start
);
767 ret
= munmap(g2h_untagged(real_start
), real_end
- real_start
);
772 page_set_flags(start
, start
+ len
- 1, 0);
778 abi_long
target_mremap(abi_ulong old_addr
, abi_ulong old_size
,
779 abi_ulong new_size
, unsigned long flags
,
785 if (!guest_range_valid_untagged(old_addr
, old_size
) ||
786 ((flags
& MREMAP_FIXED
) &&
787 !guest_range_valid_untagged(new_addr
, new_size
)) ||
788 ((flags
& MREMAP_MAYMOVE
) == 0 &&
789 !guest_range_valid_untagged(old_addr
, new_size
))) {
796 if (flags
& MREMAP_FIXED
) {
797 host_addr
= mremap(g2h_untagged(old_addr
), old_size
, new_size
,
798 flags
, g2h_untagged(new_addr
));
800 if (reserved_va
&& host_addr
!= MAP_FAILED
) {
801 /* If new and old addresses overlap then the above mremap will
802 already have failed with EINVAL. */
803 mmap_reserve(old_addr
, old_size
);
805 } else if (flags
& MREMAP_MAYMOVE
) {
806 abi_ulong mmap_start
;
808 mmap_start
= mmap_find_vma(0, new_size
, TARGET_PAGE_SIZE
);
810 if (mmap_start
== -1) {
812 host_addr
= MAP_FAILED
;
814 host_addr
= mremap(g2h_untagged(old_addr
), old_size
, new_size
,
815 flags
| MREMAP_FIXED
,
816 g2h_untagged(mmap_start
));
818 mmap_reserve(old_addr
, old_size
);
823 if (reserved_va
&& old_size
< new_size
) {
825 for (addr
= old_addr
+ old_size
;
826 addr
< old_addr
+ new_size
;
828 prot
|= page_get_flags(addr
);
832 host_addr
= mremap(g2h_untagged(old_addr
),
833 old_size
, new_size
, flags
);
835 if (host_addr
!= MAP_FAILED
) {
836 /* Check if address fits target address space */
837 if (!guest_range_valid_untagged(h2g(host_addr
), new_size
)) {
838 /* Revert mremap() changes */
839 host_addr
= mremap(g2h_untagged(old_addr
),
840 new_size
, old_size
, flags
);
842 host_addr
= MAP_FAILED
;
843 } else if (reserved_va
&& old_size
> new_size
) {
844 mmap_reserve(old_addr
+ old_size
, old_size
- new_size
);
849 host_addr
= MAP_FAILED
;
853 if (host_addr
== MAP_FAILED
) {
856 new_addr
= h2g(host_addr
);
857 prot
= page_get_flags(old_addr
);
858 page_set_flags(old_addr
, old_addr
+ old_size
- 1, 0);
859 page_set_flags(new_addr
, new_addr
+ new_size
- 1,
860 prot
| PAGE_VALID
| PAGE_RESET
);
866 static bool can_passthrough_madvise(abi_ulong start
, abi_ulong end
)
870 if ((start
| end
) & ~qemu_host_page_mask
) {
874 for (addr
= start
; addr
< end
; addr
+= TARGET_PAGE_SIZE
) {
875 if (!(page_get_flags(addr
) & PAGE_PASSTHROUGH
)) {
883 abi_long
target_madvise(abi_ulong start
, abi_ulong len_in
, int advice
)
888 if (start
& ~TARGET_PAGE_MASK
) {
889 return -TARGET_EINVAL
;
891 len
= TARGET_PAGE_ALIGN(len_in
);
893 if (len_in
&& !len
) {
894 return -TARGET_EINVAL
;
899 return -TARGET_EINVAL
;
906 if (!guest_range_valid_untagged(start
, len
)) {
907 return -TARGET_EINVAL
;
910 /* Translate for some architectures which have different MADV_xxx values */
912 case TARGET_MADV_DONTNEED
: /* alpha */
913 advice
= MADV_DONTNEED
;
915 case TARGET_MADV_WIPEONFORK
: /* parisc */
916 advice
= MADV_WIPEONFORK
;
918 case TARGET_MADV_KEEPONFORK
: /* parisc */
919 advice
= MADV_KEEPONFORK
;
921 /* we do not care about the other MADV_xxx values yet */
925 * Most advice values are hints, so ignoring and returning success is ok.
927 * However, some advice values such as MADV_DONTNEED, MADV_WIPEONFORK and
928 * MADV_KEEPONFORK are not hints and need to be emulated.
930 * A straight passthrough for those may not be safe because qemu sometimes
931 * turns private file-backed mappings into anonymous mappings.
932 * can_passthrough_madvise() helps to check if a passthrough is possible by
933 * comparing mappings that are known to have the same semantics in the host
934 * and the guest. In this case passthrough is safe.
936 * We pass through MADV_WIPEONFORK and MADV_KEEPONFORK if possible and
937 * return failure if not.
939 * MADV_DONTNEED is passed through as well, if possible.
940 * If passthrough isn't possible, we nevertheless (wrongly!) return
941 * success, which is broken but some userspace programs fail to work
942 * otherwise. Completely implementing such emulation is quite complicated
947 case MADV_WIPEONFORK
:
948 case MADV_KEEPONFORK
:
952 if (can_passthrough_madvise(start
, end
)) {
953 ret
= get_errno(madvise(g2h_untagged(start
), len
, advice
));
954 if ((advice
== MADV_DONTNEED
) && (ret
== 0)) {
955 page_reset_target_data(start
, start
+ len
- 1);