2 * mmap support for qemu
4 * Copyright (c) 2003 Fabrice Bellard
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, see <http://www.gnu.org/licenses/>.
19 #include "qemu/osdep.h"
23 #include "exec/page-protection.h"
25 #include "user-internals.h"
26 #include "user-mmap.h"
27 #include "target_mman.h"
28 #include "qemu/interval-tree.h"
31 #include "target/arm/cpu-features.h"
34 static pthread_mutex_t mmap_mutex
= PTHREAD_MUTEX_INITIALIZER
;
35 static __thread
int mmap_lock_count
;
39 if (mmap_lock_count
++ == 0) {
40 pthread_mutex_lock(&mmap_mutex
);
44 void mmap_unlock(void)
46 assert(mmap_lock_count
> 0);
47 if (--mmap_lock_count
== 0) {
48 pthread_mutex_unlock(&mmap_mutex
);
52 bool have_mmap_lock(void)
54 return mmap_lock_count
> 0 ? true : false;
57 /* Grab lock to make sure things are in a consistent state after fork(). */
58 void mmap_fork_start(void)
62 pthread_mutex_lock(&mmap_mutex
);
65 void mmap_fork_end(int child
)
68 pthread_mutex_init(&mmap_mutex
, NULL
);
70 pthread_mutex_unlock(&mmap_mutex
);
74 /* Protected by mmap_lock. */
75 static IntervalTreeRoot shm_regions
;
77 static void shm_region_add(abi_ptr start
, abi_ptr last
)
79 IntervalTreeNode
*i
= g_new0(IntervalTreeNode
, 1);
83 interval_tree_insert(i
, &shm_regions
);
86 static abi_ptr
shm_region_find(abi_ptr start
)
90 for (i
= interval_tree_iter_first(&shm_regions
, start
, start
); i
;
91 i
= interval_tree_iter_next(i
, start
, start
)) {
92 if (i
->start
== start
) {
99 static void shm_region_rm_complete(abi_ptr start
, abi_ptr last
)
101 IntervalTreeNode
*i
, *n
;
103 for (i
= interval_tree_iter_first(&shm_regions
, start
, last
); i
; i
= n
) {
104 n
= interval_tree_iter_next(i
, start
, last
);
105 if (i
->start
>= start
&& i
->last
<= last
) {
106 interval_tree_remove(i
, &shm_regions
);
113 * Validate target prot bitmask.
114 * Return the prot bitmask for the host in *HOST_PROT.
115 * Return 0 if the target prot bitmask is invalid, otherwise
116 * the internal qemu page_flags (which will include PAGE_VALID).
118 static int validate_prot_to_pageflags(int prot
)
120 int valid
= PROT_READ
| PROT_WRITE
| PROT_EXEC
| TARGET_PROT_SEM
;
121 int page_flags
= (prot
& PAGE_RWX
) | PAGE_VALID
;
123 #ifdef TARGET_AARCH64
125 ARMCPU
*cpu
= ARM_CPU(thread_cpu
);
128 * The PROT_BTI bit is only accepted if the cpu supports the feature.
129 * Since this is the unusual case, don't bother checking unless
130 * the bit has been requested. If set and valid, record the bit
131 * within QEMU's page_flags.
133 if ((prot
& TARGET_PROT_BTI
) && cpu_isar_feature(aa64_bti
, cpu
)) {
134 valid
|= TARGET_PROT_BTI
;
135 page_flags
|= PAGE_BTI
;
137 /* Similarly for the PROT_MTE bit. */
138 if ((prot
& TARGET_PROT_MTE
) && cpu_isar_feature(aa64_mte
, cpu
)) {
139 valid
|= TARGET_PROT_MTE
;
140 page_flags
|= PAGE_MTE
;
143 #elif defined(TARGET_HPPA)
144 valid
|= PROT_GROWSDOWN
| PROT_GROWSUP
;
147 return prot
& ~valid
? 0 : page_flags
;
151 * For the host, we need not pass anything except read/write/exec.
152 * While PROT_SEM is allowed by all hosts, it is also ignored, so
153 * don't bother transforming guest bit to host bit. Any other
154 * target-specific prot bits will not be understood by the host
155 * and will need to be encoded into page_flags for qemu emulation.
157 * Pages that are executable by the guest will never be executed
158 * by the host, but the host will need to be able to read them.
160 static int target_to_host_prot(int prot
)
162 return (prot
& (PROT_READ
| PROT_WRITE
)) |
163 (prot
& PROT_EXEC
? PROT_READ
: 0);
166 /* NOTE: all the constants are the HOST ones, but addresses are target. */
167 int target_mprotect(abi_ulong start
, abi_ulong len
, int target_prot
)
169 int host_page_size
= qemu_real_host_page_size();
173 abi_ulong host_start
, host_last
, last
;
174 int prot1
, ret
, page_flags
, nranges
;
176 trace_target_mprotect(start
, len
, target_prot
);
178 if ((start
& ~TARGET_PAGE_MASK
) != 0) {
179 return -TARGET_EINVAL
;
181 page_flags
= validate_prot_to_pageflags(target_prot
);
183 return -TARGET_EINVAL
;
188 len
= TARGET_PAGE_ALIGN(len
);
189 if (!guest_range_valid_untagged(start
, len
)) {
190 return -TARGET_ENOMEM
;
193 last
= start
+ len
- 1;
194 host_start
= start
& -host_page_size
;
195 host_last
= ROUND_UP(last
, host_page_size
) - 1;
200 if (host_last
- host_start
< host_page_size
) {
201 /* Single host page contains all guest pages: sum the prot. */
203 for (abi_ulong a
= host_start
; a
< start
; a
+= TARGET_PAGE_SIZE
) {
204 prot1
|= page_get_flags(a
);
206 for (abi_ulong a
= last
; a
< host_last
; a
+= TARGET_PAGE_SIZE
) {
207 prot1
|= page_get_flags(a
+ 1);
209 starts
[nranges
] = host_start
;
210 lens
[nranges
] = host_page_size
;
211 prots
[nranges
] = prot1
;
214 if (host_start
< start
) {
215 /* Host page contains more than one guest page: sum the prot. */
217 for (abi_ulong a
= host_start
; a
< start
; a
+= TARGET_PAGE_SIZE
) {
218 prot1
|= page_get_flags(a
);
220 /* If the resulting sum differs, create a new range. */
221 if (prot1
!= target_prot
) {
222 starts
[nranges
] = host_start
;
223 lens
[nranges
] = host_page_size
;
224 prots
[nranges
] = prot1
;
226 host_start
+= host_page_size
;
230 if (last
< host_last
) {
231 /* Host page contains more than one guest page: sum the prot. */
233 for (abi_ulong a
= last
; a
< host_last
; a
+= TARGET_PAGE_SIZE
) {
234 prot1
|= page_get_flags(a
+ 1);
236 /* If the resulting sum differs, create a new range. */
237 if (prot1
!= target_prot
) {
238 host_last
-= host_page_size
;
239 starts
[nranges
] = host_last
+ 1;
240 lens
[nranges
] = host_page_size
;
241 prots
[nranges
] = prot1
;
246 /* Create a range for the middle, if any remains. */
247 if (host_start
< host_last
) {
248 starts
[nranges
] = host_start
;
249 lens
[nranges
] = host_last
- host_start
+ 1;
250 prots
[nranges
] = target_prot
;
255 for (int i
= 0; i
< nranges
; ++i
) {
256 ret
= mprotect(g2h_untagged(starts
[i
]), lens
[i
],
257 target_to_host_prot(prots
[i
]));
263 page_set_flags(start
, last
, page_flags
);
272 * Perform munmap on behalf of the target, with host parameters.
273 * If reserved_va, we must replace the memory reservation.
275 static int do_munmap(void *addr
, size_t len
)
278 void *ptr
= mmap(addr
, len
, PROT_NONE
,
279 MAP_FIXED
| MAP_ANONYMOUS
280 | MAP_PRIVATE
| MAP_NORESERVE
, -1, 0);
281 return ptr
== addr
? 0 : -1;
283 return munmap(addr
, len
);
287 * Map an incomplete host page.
289 * Here be dragons. This case will not work if there is an existing
290 * overlapping host page, which is file mapped, and for which the mapping
291 * is beyond the end of the file. In that case, we will see SIGBUS when
292 * trying to write a portion of this page.
294 * FIXME: Work around this with a temporary signal handler and longjmp.
296 static bool mmap_frag(abi_ulong real_start
, abi_ulong start
, abi_ulong last
,
297 int prot
, int flags
, int fd
, off_t offset
)
299 int host_page_size
= qemu_real_host_page_size();
302 int prot_old
, prot_new
;
303 int host_prot_old
, host_prot_new
;
305 if (!(flags
& MAP_ANONYMOUS
)
306 && (flags
& MAP_TYPE
) == MAP_SHARED
307 && (prot
& PROT_WRITE
)) {
309 * msync() won't work with the partial page, so we return an
310 * error if write is possible while it is a shared mapping.
316 real_last
= real_start
+ host_page_size
- 1;
317 host_start
= g2h_untagged(real_start
);
319 /* Get the protection of the target pages outside the mapping. */
321 for (abi_ulong a
= real_start
; a
< start
; a
+= TARGET_PAGE_SIZE
) {
322 prot_old
|= page_get_flags(a
);
324 for (abi_ulong a
= real_last
; a
> last
; a
-= TARGET_PAGE_SIZE
) {
325 prot_old
|= page_get_flags(a
);
330 * Since !(prot_old & PAGE_VALID), there were no guest pages
331 * outside of the fragment we need to map. Allocate a new host
332 * page to cover, discarding whatever else may have been present.
334 void *p
= mmap(host_start
, host_page_size
,
335 target_to_host_prot(prot
),
336 flags
| MAP_ANONYMOUS
, -1, 0);
337 if (p
!= host_start
) {
338 if (p
!= MAP_FAILED
) {
339 do_munmap(p
, host_page_size
);
346 prot_new
= prot
| prot_old
;
348 host_prot_old
= target_to_host_prot(prot_old
);
349 host_prot_new
= target_to_host_prot(prot_new
);
351 /* Adjust protection to be able to write. */
352 if (!(host_prot_old
& PROT_WRITE
)) {
353 host_prot_old
|= PROT_WRITE
;
354 mprotect(host_start
, host_page_size
, host_prot_old
);
357 /* Read or zero the new guest pages. */
358 if (flags
& MAP_ANONYMOUS
) {
359 memset(g2h_untagged(start
), 0, last
- start
+ 1);
361 if (pread(fd
, g2h_untagged(start
), last
- start
+ 1, offset
) == -1) {
366 /* Put final protection */
367 if (host_prot_new
!= host_prot_old
) {
368 mprotect(host_start
, host_page_size
, host_prot_new
);
373 abi_ulong task_unmapped_base
;
374 abi_ulong elf_et_dyn_base
;
375 abi_ulong mmap_next_start
;
378 * Subroutine of mmap_find_vma, used when we have pre-allocated
379 * a chunk of guest address space.
381 static abi_ulong
mmap_find_vma_reserved(abi_ulong start
, abi_ulong size
,
386 ret
= page_find_range_empty(start
, reserved_va
, size
, align
);
387 if (ret
== -1 && start
> mmap_min_addr
) {
388 /* Restart at the beginning of the address space. */
389 ret
= page_find_range_empty(mmap_min_addr
, start
- 1, size
, align
);
396 * Find and reserve a free memory area of size 'size'. The search
398 * It must be called with mmap_lock() held.
399 * Return -1 if error.
401 abi_ulong
mmap_find_vma(abi_ulong start
, abi_ulong size
, abi_ulong align
)
403 int host_page_size
= qemu_real_host_page_size();
408 align
= MAX(align
, host_page_size
);
410 /* If 'start' == 0, then a default start address is used. */
412 start
= mmap_next_start
;
414 start
&= -host_page_size
;
416 start
= ROUND_UP(start
, align
);
417 size
= ROUND_UP(size
, host_page_size
);
420 return mmap_find_vma_reserved(start
, size
, align
);
424 wrapped
= repeat
= 0;
427 for (;; prev
= ptr
) {
429 * Reserve needed memory area to avoid a race.
430 * It should be discarded using:
431 * - mmap() with MAP_FIXED flag
432 * - mremap() with MREMAP_FIXED flag
433 * - shmat() with SHM_REMAP flag
435 ptr
= mmap(g2h_untagged(addr
), size
, PROT_NONE
,
436 MAP_ANONYMOUS
| MAP_PRIVATE
| MAP_NORESERVE
, -1, 0);
438 /* ENOMEM, if host address space has no memory */
439 if (ptr
== MAP_FAILED
) {
440 return (abi_ulong
)-1;
444 * Count the number of sequential returns of the same address.
445 * This is used to modify the search algorithm below.
447 repeat
= (ptr
== prev
? repeat
+ 1 : 0);
449 if (h2g_valid(ptr
+ size
- 1)) {
452 if ((addr
& (align
- 1)) == 0) {
454 if (start
== mmap_next_start
&& addr
>= task_unmapped_base
) {
455 mmap_next_start
= addr
+ size
;
460 /* The address is not properly aligned for the target. */
464 * Assume the result that the kernel gave us is the
465 * first with enough free space, so start again at the
466 * next higher target page.
468 addr
= ROUND_UP(addr
, align
);
472 * Sometimes the kernel decides to perform the allocation
473 * at the top end of memory instead.
478 /* Start over at low memory. */
482 /* Fail. This unaligned block must the last. */
488 * Since the result the kernel gave didn't fit, start
489 * again at low memory. If any repetition, fail.
491 addr
= (repeat
? -1 : 0);
494 /* Unmap and try again. */
497 /* ENOMEM if we checked the whole of the target address space. */
498 if (addr
== (abi_ulong
)-1) {
499 return (abi_ulong
)-1;
500 } else if (addr
== 0) {
502 return (abi_ulong
)-1;
506 * Don't actually use 0 when wrapping, instead indicate
507 * that we'd truly like an allocation in low memory.
509 addr
= (mmap_min_addr
> TARGET_PAGE_SIZE
510 ? TARGET_PAGE_ALIGN(mmap_min_addr
)
512 } else if (wrapped
&& addr
>= start
) {
513 return (abi_ulong
)-1;
519 * Record a successful mmap within the user-exec interval tree.
521 static abi_long
mmap_end(abi_ulong start
, abi_ulong last
,
522 abi_ulong passthrough_start
,
523 abi_ulong passthrough_last
,
524 int flags
, int page_flags
)
526 if (flags
& MAP_ANONYMOUS
) {
527 page_flags
|= PAGE_ANON
;
529 page_flags
|= PAGE_RESET
;
530 if (passthrough_start
> passthrough_last
) {
531 page_set_flags(start
, last
, page_flags
);
533 if (start
< passthrough_start
) {
534 page_set_flags(start
, passthrough_start
- 1, page_flags
);
536 page_set_flags(passthrough_start
, passthrough_last
,
537 page_flags
| PAGE_PASSTHROUGH
);
538 if (passthrough_last
< last
) {
539 page_set_flags(passthrough_last
+ 1, last
, page_flags
);
542 shm_region_rm_complete(start
, last
);
543 trace_target_mmap_complete(start
);
544 if (qemu_loglevel_mask(CPU_LOG_PAGE
)) {
545 FILE *f
= qemu_log_trylock();
547 fprintf(f
, "page layout changed following mmap\n");
556 * Special case host page size == target page size,
557 * where there are no edge conditions.
559 static abi_long
mmap_h_eq_g(abi_ulong start
, abi_ulong len
,
560 int host_prot
, int flags
, int page_flags
,
561 int fd
, off_t offset
)
563 void *p
, *want_p
= g2h_untagged(start
);
566 p
= mmap(want_p
, len
, host_prot
, flags
, fd
, offset
);
567 if (p
== MAP_FAILED
) {
570 /* If the host kernel does not support MAP_FIXED_NOREPLACE, emulate. */
571 if ((flags
& MAP_FIXED_NOREPLACE
) && p
!= want_p
) {
578 last
= start
+ len
- 1;
579 return mmap_end(start
, last
, start
, last
, flags
, page_flags
);
583 * Special case host page size < target page size.
585 * The two special cases are increased guest alignment, and mapping
586 * past the end of a file.
588 * When mapping files into a memory area larger than the file,
589 * accesses to pages beyond the file size will cause a SIGBUS.
591 * For example, if mmaping a file of 100 bytes on a host with 4K
592 * pages emulating a target with 8K pages, the target expects to
593 * be able to access the first 8K. But the host will trap us on
594 * any access beyond 4K.
596 * When emulating a target with a larger page-size than the hosts,
597 * we may need to truncate file maps at EOF and add extra anonymous
598 * pages up to the targets page boundary.
600 * This workaround only works for files that do not change.
601 * If the file is later extended (e.g. ftruncate), the SIGBUS
602 * vanishes and the proper behaviour is that changes within the
603 * anon page should be reflected in the file.
605 * However, this case is rather common with executable images,
606 * so the workaround is important for even trivial tests, whereas
607 * the mmap of of a file being extended is less common.
609 static abi_long
mmap_h_lt_g(abi_ulong start
, abi_ulong len
, int host_prot
,
610 int mmap_flags
, int page_flags
, int fd
,
611 off_t offset
, int host_page_size
)
613 void *p
, *want_p
= g2h_untagged(start
);
614 off_t fileend_adj
= 0;
615 int flags
= mmap_flags
;
616 abi_ulong last
, pass_last
;
618 if (!(flags
& MAP_ANONYMOUS
)) {
621 if (fstat(fd
, &sb
) == -1) {
624 if (offset
>= sb
.st_size
) {
626 * The entire map is beyond the end of the file.
627 * Transform it to an anonymous mapping.
629 flags
|= MAP_ANONYMOUS
;
632 } else if (offset
+ len
> sb
.st_size
) {
634 * A portion of the map is beyond the end of the file.
635 * Truncate the file portion of the allocation.
637 fileend_adj
= offset
+ len
- sb
.st_size
;
641 if (flags
& (MAP_FIXED
| MAP_FIXED_NOREPLACE
)) {
643 p
= mmap(want_p
, len
, host_prot
, flags
| MAP_ANONYMOUS
, -1, 0);
645 p
= mmap(want_p
, len
, host_prot
, flags
, fd
, offset
);
648 if (p
!= MAP_FAILED
) {
649 /* Host does not support MAP_FIXED_NOREPLACE: emulate. */
657 void *t
= mmap(p
, len
- fileend_adj
, host_prot
,
658 (flags
& ~MAP_FIXED_NOREPLACE
) | MAP_FIXED
,
661 if (t
== MAP_FAILED
) {
662 int save_errno
= errno
;
665 * We failed a map over the top of the successful anonymous
666 * mapping above. The only failure mode is running out of VMAs,
667 * and there's nothing that we can do to detect that earlier.
668 * If we have replaced an existing mapping with MAP_FIXED,
669 * then we cannot properly recover. It's a coin toss whether
670 * it would be better to exit or continue here.
672 if (!(flags
& MAP_FIXED_NOREPLACE
) &&
673 !page_check_range_empty(start
, start
+ len
- 1)) {
674 qemu_log("QEMU target_mmap late failure: %s",
675 strerror(save_errno
));
678 do_munmap(want_p
, len
);
684 size_t host_len
, part_len
;
687 * Take care to align the host memory. Perform a larger anonymous
688 * allocation and extract the aligned portion. Remap the file on
691 host_len
= len
+ TARGET_PAGE_SIZE
- host_page_size
;
692 p
= mmap(want_p
, host_len
, host_prot
, flags
| MAP_ANONYMOUS
, -1, 0);
693 if (p
== MAP_FAILED
) {
697 part_len
= (uintptr_t)p
& (TARGET_PAGE_SIZE
- 1);
699 part_len
= TARGET_PAGE_SIZE
- part_len
;
700 do_munmap(p
, part_len
);
702 host_len
-= part_len
;
704 if (len
< host_len
) {
705 do_munmap(p
+ len
, host_len
- len
);
708 if (!(flags
& MAP_ANONYMOUS
)) {
709 void *t
= mmap(p
, len
- fileend_adj
, host_prot
,
710 flags
| MAP_FIXED
, fd
, offset
);
712 if (t
== MAP_FAILED
) {
713 int save_errno
= errno
;
723 last
= start
+ len
- 1;
725 pass_last
= ROUND_UP(last
- fileend_adj
, host_page_size
) - 1;
729 return mmap_end(start
, last
, start
, pass_last
, mmap_flags
, page_flags
);
733 * Special case host page size > target page size.
735 * The two special cases are address and file offsets that are valid
736 * for the guest that cannot be directly represented by the host.
738 static abi_long
mmap_h_gt_g(abi_ulong start
, abi_ulong len
,
739 int target_prot
, int host_prot
,
740 int flags
, int page_flags
, int fd
,
741 off_t offset
, int host_page_size
)
743 void *p
, *want_p
= g2h_untagged(start
);
744 off_t host_offset
= offset
& -host_page_size
;
745 abi_ulong last
, real_start
, real_last
;
746 bool misaligned_offset
= false;
749 if (!(flags
& (MAP_FIXED
| MAP_FIXED_NOREPLACE
))) {
751 * Adjust the offset to something representable on the host.
753 host_len
= len
+ offset
- host_offset
;
754 p
= mmap(want_p
, host_len
, host_prot
, flags
, fd
, host_offset
);
755 if (p
== MAP_FAILED
) {
759 /* Update start to the file position at offset. */
760 p
+= offset
- host_offset
;
763 last
= start
+ len
- 1;
764 return mmap_end(start
, last
, start
, last
, flags
, page_flags
);
767 if (!(flags
& MAP_ANONYMOUS
)) {
768 misaligned_offset
= (start
^ offset
) & (host_page_size
- 1);
771 * The fallback for misalignment is a private mapping + read.
772 * This carries none of semantics required of MAP_SHARED.
774 if (misaligned_offset
&& (flags
& MAP_TYPE
) != MAP_PRIVATE
) {
780 last
= start
+ len
- 1;
781 real_start
= start
& -host_page_size
;
782 real_last
= ROUND_UP(last
, host_page_size
) - 1;
785 * Handle the start and end of the mapping.
787 if (real_start
< start
) {
788 abi_ulong real_page_last
= real_start
+ host_page_size
- 1;
789 if (last
<= real_page_last
) {
790 /* Entire allocation a subset of one host page. */
791 if (!mmap_frag(real_start
, start
, last
, target_prot
,
792 flags
, fd
, offset
)) {
795 return mmap_end(start
, last
, -1, 0, flags
, page_flags
);
798 if (!mmap_frag(real_start
, start
, real_page_last
, target_prot
,
799 flags
, fd
, offset
)) {
802 real_start
= real_page_last
+ 1;
805 if (last
< real_last
) {
806 abi_ulong real_page_start
= real_last
- host_page_size
+ 1;
807 if (!mmap_frag(real_page_start
, real_page_start
, last
,
808 target_prot
, flags
, fd
,
809 offset
+ real_page_start
- start
)) {
812 real_last
= real_page_start
- 1;
815 if (real_start
> real_last
) {
816 return mmap_end(start
, last
, -1, 0, flags
, page_flags
);
820 * Handle the middle of the mapping.
823 host_len
= real_last
- real_start
+ 1;
824 want_p
+= real_start
- start
;
826 if (flags
& MAP_ANONYMOUS
) {
827 p
= mmap(want_p
, host_len
, host_prot
, flags
, -1, 0);
828 } else if (!misaligned_offset
) {
829 p
= mmap(want_p
, host_len
, host_prot
, flags
, fd
,
830 offset
+ real_start
- start
);
832 p
= mmap(want_p
, host_len
, host_prot
| PROT_WRITE
,
833 flags
| MAP_ANONYMOUS
, -1, 0);
836 if (p
!= MAP_FAILED
) {
837 do_munmap(p
, host_len
);
843 if (misaligned_offset
) {
844 /* TODO: The read could be short. */
845 if (pread(fd
, p
, host_len
, offset
+ real_start
- start
) != host_len
) {
846 do_munmap(p
, host_len
);
849 if (!(host_prot
& PROT_WRITE
)) {
850 mprotect(p
, host_len
, host_prot
);
854 return mmap_end(start
, last
, -1, 0, flags
, page_flags
);
857 static abi_long
target_mmap__locked(abi_ulong start
, abi_ulong len
,
858 int target_prot
, int flags
, int page_flags
,
859 int fd
, off_t offset
)
861 int host_page_size
= qemu_real_host_page_size();
865 * For reserved_va, we are in full control of the allocation.
866 * Find a suitable hole and convert to MAP_FIXED.
869 if (flags
& MAP_FIXED_NOREPLACE
) {
870 /* Validate that the chosen range is empty. */
871 if (!page_check_range_empty(start
, start
+ len
- 1)) {
875 flags
= (flags
& ~MAP_FIXED_NOREPLACE
) | MAP_FIXED
;
876 } else if (!(flags
& MAP_FIXED
)) {
877 abi_ulong real_start
= start
& -host_page_size
;
878 off_t host_offset
= offset
& -host_page_size
;
879 size_t real_len
= len
+ offset
- host_offset
;
880 abi_ulong align
= MAX(host_page_size
, TARGET_PAGE_SIZE
);
882 start
= mmap_find_vma(real_start
, real_len
, align
);
883 if (start
== (abi_ulong
)-1) {
887 start
+= offset
- host_offset
;
892 host_prot
= target_to_host_prot(target_prot
);
894 if (host_page_size
== TARGET_PAGE_SIZE
) {
895 return mmap_h_eq_g(start
, len
, host_prot
, flags
,
896 page_flags
, fd
, offset
);
897 } else if (host_page_size
< TARGET_PAGE_SIZE
) {
898 return mmap_h_lt_g(start
, len
, host_prot
, flags
,
899 page_flags
, fd
, offset
, host_page_size
);
901 return mmap_h_gt_g(start
, len
, target_prot
, host_prot
, flags
,
902 page_flags
, fd
, offset
, host_page_size
);
906 /* NOTE: all the constants are the HOST ones */
907 abi_long
target_mmap(abi_ulong start
, abi_ulong len
, int target_prot
,
908 int flags
, int fd
, off_t offset
)
913 trace_target_mmap(start
, len
, target_prot
, flags
, fd
, offset
);
920 page_flags
= validate_prot_to_pageflags(target_prot
);
926 /* Also check for overflows... */
927 len
= TARGET_PAGE_ALIGN(len
);
928 if (!len
|| len
!= (size_t)len
) {
933 if (offset
& ~TARGET_PAGE_MASK
) {
937 if (flags
& (MAP_FIXED
| MAP_FIXED_NOREPLACE
)) {
938 if (start
& ~TARGET_PAGE_MASK
) {
942 if (!guest_range_valid_untagged(start
, len
)) {
950 ret
= target_mmap__locked(start
, len
, target_prot
, flags
,
951 page_flags
, fd
, offset
);
956 * If we're mapping shared memory, ensure we generate code for parallel
957 * execution and flush old translations. This will work up to the level
958 * supported by the host -- anything that requires EXCP_ATOMIC will not
959 * be atomic with respect to an external process.
961 if (ret
!= -1 && (flags
& MAP_TYPE
) != MAP_PRIVATE
) {
962 CPUState
*cpu
= thread_cpu
;
963 if (!tcg_cflags_has(cpu
, CF_PARALLEL
)) {
964 tcg_cflags_set(cpu
, CF_PARALLEL
);
972 static int mmap_reserve_or_unmap(abi_ulong start
, abi_ulong len
)
974 int host_page_size
= qemu_real_host_page_size();
975 abi_ulong real_start
;
983 last
= start
+ len
- 1;
984 real_start
= start
& -host_page_size
;
985 real_last
= ROUND_UP(last
, host_page_size
) - 1;
988 * If guest pages remain on the first or last host pages,
989 * adjust the deallocation to retain those guest pages.
990 * The single page special case is required for the last page,
991 * lest real_start overflow to zero.
993 if (real_last
- real_start
< host_page_size
) {
995 for (a
= real_start
; a
< start
; a
+= TARGET_PAGE_SIZE
) {
996 prot
|= page_get_flags(a
);
998 for (a
= last
; a
< real_last
; a
+= TARGET_PAGE_SIZE
) {
999 prot
|= page_get_flags(a
+ 1);
1005 for (prot
= 0, a
= real_start
; a
< start
; a
+= TARGET_PAGE_SIZE
) {
1006 prot
|= page_get_flags(a
);
1009 real_start
+= host_page_size
;
1012 for (prot
= 0, a
= last
; a
< real_last
; a
+= TARGET_PAGE_SIZE
) {
1013 prot
|= page_get_flags(a
+ 1);
1016 real_last
-= host_page_size
;
1019 if (real_last
< real_start
) {
1024 real_len
= real_last
- real_start
+ 1;
1025 host_start
= g2h_untagged(real_start
);
1027 return do_munmap(host_start
, real_len
);
1030 int target_munmap(abi_ulong start
, abi_ulong len
)
1034 trace_target_munmap(start
, len
);
1036 if (start
& ~TARGET_PAGE_MASK
) {
1040 len
= TARGET_PAGE_ALIGN(len
);
1041 if (len
== 0 || !guest_range_valid_untagged(start
, len
)) {
1047 ret
= mmap_reserve_or_unmap(start
, len
);
1048 if (likely(ret
== 0)) {
1049 page_set_flags(start
, start
+ len
- 1, 0);
1050 shm_region_rm_complete(start
, start
+ len
- 1);
1057 abi_long
target_mremap(abi_ulong old_addr
, abi_ulong old_size
,
1058 abi_ulong new_size
, unsigned long flags
,
1064 if (!guest_range_valid_untagged(old_addr
, old_size
) ||
1065 ((flags
& MREMAP_FIXED
) &&
1066 !guest_range_valid_untagged(new_addr
, new_size
)) ||
1067 ((flags
& MREMAP_MAYMOVE
) == 0 &&
1068 !guest_range_valid_untagged(old_addr
, new_size
))) {
1075 if (flags
& MREMAP_FIXED
) {
1076 host_addr
= mremap(g2h_untagged(old_addr
), old_size
, new_size
,
1077 flags
, g2h_untagged(new_addr
));
1079 if (reserved_va
&& host_addr
!= MAP_FAILED
) {
1081 * If new and old addresses overlap then the above mremap will
1082 * already have failed with EINVAL.
1084 mmap_reserve_or_unmap(old_addr
, old_size
);
1086 } else if (flags
& MREMAP_MAYMOVE
) {
1087 abi_ulong mmap_start
;
1089 mmap_start
= mmap_find_vma(0, new_size
, TARGET_PAGE_SIZE
);
1091 if (mmap_start
== -1) {
1093 host_addr
= MAP_FAILED
;
1095 host_addr
= mremap(g2h_untagged(old_addr
), old_size
, new_size
,
1096 flags
| MREMAP_FIXED
,
1097 g2h_untagged(mmap_start
));
1099 mmap_reserve_or_unmap(old_addr
, old_size
);
1104 if (reserved_va
&& old_size
< new_size
) {
1106 for (addr
= old_addr
+ old_size
;
1107 addr
< old_addr
+ new_size
;
1109 page_flags
|= page_get_flags(addr
);
1112 if (page_flags
== 0) {
1113 host_addr
= mremap(g2h_untagged(old_addr
),
1114 old_size
, new_size
, flags
);
1116 if (host_addr
!= MAP_FAILED
) {
1117 /* Check if address fits target address space */
1118 if (!guest_range_valid_untagged(h2g(host_addr
), new_size
)) {
1119 /* Revert mremap() changes */
1120 host_addr
= mremap(g2h_untagged(old_addr
),
1121 new_size
, old_size
, flags
);
1123 host_addr
= MAP_FAILED
;
1124 } else if (reserved_va
&& old_size
> new_size
) {
1125 mmap_reserve_or_unmap(old_addr
+ old_size
,
1126 old_size
- new_size
);
1131 host_addr
= MAP_FAILED
;
1135 if (host_addr
== MAP_FAILED
) {
1138 new_addr
= h2g(host_addr
);
1139 prot
= page_get_flags(old_addr
);
1140 page_set_flags(old_addr
, old_addr
+ old_size
- 1, 0);
1141 shm_region_rm_complete(old_addr
, old_addr
+ old_size
- 1);
1142 page_set_flags(new_addr
, new_addr
+ new_size
- 1,
1143 prot
| PAGE_VALID
| PAGE_RESET
);
1144 shm_region_rm_complete(new_addr
, new_addr
+ new_size
- 1);
1150 abi_long
target_madvise(abi_ulong start
, abi_ulong len_in
, int advice
)
1155 if (start
& ~TARGET_PAGE_MASK
) {
1156 return -TARGET_EINVAL
;
1161 len
= TARGET_PAGE_ALIGN(len_in
);
1162 if (len
== 0 || !guest_range_valid_untagged(start
, len
)) {
1163 return -TARGET_EINVAL
;
1166 /* Translate for some architectures which have different MADV_xxx values */
1168 case TARGET_MADV_DONTNEED
: /* alpha */
1169 advice
= MADV_DONTNEED
;
1171 case TARGET_MADV_WIPEONFORK
: /* parisc */
1172 advice
= MADV_WIPEONFORK
;
1174 case TARGET_MADV_KEEPONFORK
: /* parisc */
1175 advice
= MADV_KEEPONFORK
;
1177 /* we do not care about the other MADV_xxx values yet */
1181 * Most advice values are hints, so ignoring and returning success is ok.
1183 * However, some advice values such as MADV_DONTNEED, MADV_WIPEONFORK and
1184 * MADV_KEEPONFORK are not hints and need to be emulated.
1186 * A straight passthrough for those may not be safe because qemu sometimes
1187 * turns private file-backed mappings into anonymous mappings.
1188 * If all guest pages have PAGE_PASSTHROUGH set, mappings have the
1189 * same semantics for the host as for the guest.
1191 * We pass through MADV_WIPEONFORK and MADV_KEEPONFORK if possible and
1192 * return failure if not.
1194 * MADV_DONTNEED is passed through as well, if possible.
1195 * If passthrough isn't possible, we nevertheless (wrongly!) return
1196 * success, which is broken but some userspace programs fail to work
1197 * otherwise. Completely implementing such emulation is quite complicated
1202 case MADV_WIPEONFORK
:
1203 case MADV_KEEPONFORK
:
1207 if (page_check_range(start
, len
, PAGE_PASSTHROUGH
)) {
1208 ret
= get_errno(madvise(g2h_untagged(start
), len
, advice
));
1209 if ((advice
== MADV_DONTNEED
) && (ret
== 0)) {
1210 page_reset_target_data(start
, start
+ len
- 1);
1219 #ifndef TARGET_FORCE_SHMLBA
1221 * For most architectures, SHMLBA is the same as the page size;
1222 * some architectures have larger values, in which case they should
1223 * define TARGET_FORCE_SHMLBA and provide a target_shmlba() function.
1224 * This corresponds to the kernel arch code defining __ARCH_FORCE_SHMLBA
1225 * and defining its own value for SHMLBA.
1227 * The kernel also permits SHMLBA to be set by the architecture to a
1228 * value larger than the page size without setting __ARCH_FORCE_SHMLBA;
1229 * this means that addresses are rounded to the large size if
1230 * SHM_RND is set but addresses not aligned to that size are not rejected
1231 * as long as they are at least page-aligned. Since the only architecture
1232 * which uses this is ia64 this code doesn't provide for that oddity.
1234 static inline abi_ulong
target_shmlba(CPUArchState
*cpu_env
)
1236 return TARGET_PAGE_SIZE
;
1240 #if defined(__arm__) || defined(__mips__) || defined(__sparc__)
1241 #define HOST_FORCE_SHMLBA 1
1243 #define HOST_FORCE_SHMLBA 0
1246 abi_ulong
target_shmat(CPUArchState
*cpu_env
, int shmid
,
1247 abi_ulong shmaddr
, int shmflg
)
1249 CPUState
*cpu
= env_cpu(cpu_env
);
1250 struct shmid_ds shm_info
;
1253 int t_shmlba
, h_shmlba
, m_shmlba
;
1254 size_t t_len
, h_len
, m_len
;
1256 /* shmat pointers are always untagged */
1259 * Because we can't use host shmat() unless the address is sufficiently
1260 * aligned for the host, we'll need to check both.
1261 * TODO: Could be fixed with softmmu.
1263 t_shmlba
= target_shmlba(cpu_env
);
1264 h_pagesize
= qemu_real_host_page_size();
1265 h_shmlba
= (HOST_FORCE_SHMLBA
? SHMLBA
: h_pagesize
);
1266 m_shmlba
= MAX(t_shmlba
, h_shmlba
);
1269 if (shmaddr
& (m_shmlba
- 1)) {
1270 if (shmflg
& SHM_RND
) {
1272 * The guest is allowing the kernel to round the address.
1273 * Assume that the guest is ok with us rounding to the
1274 * host required alignment too. Anyway if we don't, we'll
1275 * get an error from the kernel.
1277 shmaddr
&= ~(m_shmlba
- 1);
1278 if (shmaddr
== 0 && (shmflg
& SHM_REMAP
)) {
1279 return -TARGET_EINVAL
;
1282 int require
= TARGET_PAGE_SIZE
;
1283 #ifdef TARGET_FORCE_SHMLBA
1287 * Include host required alignment, as otherwise we cannot
1288 * use host shmat at all.
1290 require
= MAX(require
, h_shmlba
);
1291 if (shmaddr
& (require
- 1)) {
1292 return -TARGET_EINVAL
;
1297 if (shmflg
& SHM_REMAP
) {
1298 return -TARGET_EINVAL
;
1301 /* All rounding now manually concluded. */
1304 /* Find out the length of the shared memory segment. */
1305 ret
= get_errno(shmctl(shmid
, IPC_STAT
, &shm_info
));
1306 if (is_error(ret
)) {
1307 /* can't get length, bail out */
1310 t_len
= TARGET_PAGE_ALIGN(shm_info
.shm_segsz
);
1311 h_len
= ROUND_UP(shm_info
.shm_segsz
, h_pagesize
);
1312 m_len
= MAX(t_len
, h_len
);
1314 if (!guest_range_valid_untagged(shmaddr
, m_len
)) {
1315 return -TARGET_EINVAL
;
1318 WITH_MMAP_LOCK_GUARD() {
1319 bool mapped
= false;
1324 shmaddr
= mmap_find_vma(0, m_len
, m_shmlba
);
1325 if (shmaddr
== -1) {
1326 return -TARGET_ENOMEM
;
1328 mapped
= !reserved_va
;
1329 } else if (shmflg
& SHM_REMAP
) {
1331 * If host page size > target page size, the host shmat may map
1332 * more memory than the guest expects. Reject a mapping that
1333 * would replace memory in the unexpected gap.
1334 * TODO: Could be fixed with softmmu.
1336 if (t_len
< h_len
&&
1337 !page_check_range_empty(shmaddr
+ t_len
,
1338 shmaddr
+ h_len
- 1)) {
1339 return -TARGET_EINVAL
;
1342 if (!page_check_range_empty(shmaddr
, shmaddr
+ m_len
- 1)) {
1343 return -TARGET_EINVAL
;
1347 /* All placement is now complete. */
1348 want
= (void *)g2h_untagged(shmaddr
);
1351 * Map anonymous pages across the entire range, then remap with
1352 * the shared memory. This is required for a number of corner
1353 * cases for which host and guest page sizes differ.
1355 if (h_len
!= t_len
) {
1356 int mmap_p
= PROT_READ
| (shmflg
& SHM_RDONLY
? 0 : PROT_WRITE
);
1357 int mmap_f
= MAP_PRIVATE
| MAP_ANONYMOUS
1358 | (reserved_va
|| mapped
|| (shmflg
& SHM_REMAP
)
1359 ? MAP_FIXED
: MAP_FIXED_NOREPLACE
);
1361 test
= mmap(want
, m_len
, mmap_p
, mmap_f
, -1, 0);
1362 if (unlikely(test
!= want
)) {
1363 /* shmat returns EINVAL not EEXIST like mmap. */
1364 ret
= (test
== MAP_FAILED
&& errno
!= EEXIST
1365 ? get_errno(-1) : -TARGET_EINVAL
);
1367 do_munmap(want
, m_len
);
1374 if (reserved_va
|| mapped
) {
1375 shmflg
|= SHM_REMAP
;
1377 test
= shmat(shmid
, want
, shmflg
);
1378 if (test
== MAP_FAILED
) {
1379 ret
= get_errno(-1);
1381 do_munmap(want
, m_len
);
1385 assert(test
== want
);
1387 last
= shmaddr
+ m_len
- 1;
1388 page_set_flags(shmaddr
, last
,
1389 PAGE_VALID
| PAGE_RESET
| PAGE_READ
|
1390 (shmflg
& SHM_RDONLY
? 0 : PAGE_WRITE
) |
1391 (shmflg
& SHM_EXEC
? PAGE_EXEC
: 0));
1393 shm_region_rm_complete(shmaddr
, last
);
1394 shm_region_add(shmaddr
, last
);
1398 * We're mapping shared memory, so ensure we generate code for parallel
1399 * execution and flush old translations. This will work up to the level
1400 * supported by the host -- anything that requires EXCP_ATOMIC will not
1401 * be atomic with respect to an external process.
1403 if (!tcg_cflags_has(cpu
, CF_PARALLEL
)) {
1404 tcg_cflags_set(cpu
, CF_PARALLEL
);
1408 if (qemu_loglevel_mask(CPU_LOG_PAGE
)) {
1409 FILE *f
= qemu_log_trylock();
1411 fprintf(f
, "page layout changed following shmat\n");
1419 abi_long
target_shmdt(abi_ulong shmaddr
)
1423 /* shmdt pointers are always untagged */
1425 WITH_MMAP_LOCK_GUARD() {
1426 abi_ulong last
= shm_region_find(shmaddr
);
1428 return -TARGET_EINVAL
;
1431 rv
= get_errno(shmdt(g2h_untagged(shmaddr
)));
1433 abi_ulong size
= last
- shmaddr
+ 1;
1435 page_set_flags(shmaddr
, last
, 0);
1436 shm_region_rm_complete(shmaddr
, last
);
1437 mmap_reserve_or_unmap(shmaddr
, size
);