linux-user: Fix types in uaccess.c
[qemu/ar7.git] / linux-user / mmap.c
blob9fe0c634e247191200547ad7aff68472348618f2
1 /*
2 * mmap support for qemu
4 * Copyright (c) 2003 Fabrice Bellard
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, see <http://www.gnu.org/licenses/>.
19 #include "qemu/osdep.h"
20 #include "trace.h"
21 #include "exec/log.h"
22 #include "qemu.h"
24 static pthread_mutex_t mmap_mutex = PTHREAD_MUTEX_INITIALIZER;
25 static __thread int mmap_lock_count;
27 void mmap_lock(void)
29 if (mmap_lock_count++ == 0) {
30 pthread_mutex_lock(&mmap_mutex);
34 void mmap_unlock(void)
36 if (--mmap_lock_count == 0) {
37 pthread_mutex_unlock(&mmap_mutex);
41 bool have_mmap_lock(void)
43 return mmap_lock_count > 0 ? true : false;
46 /* Grab lock to make sure things are in a consistent state after fork(). */
47 void mmap_fork_start(void)
49 if (mmap_lock_count)
50 abort();
51 pthread_mutex_lock(&mmap_mutex);
54 void mmap_fork_end(int child)
56 if (child)
57 pthread_mutex_init(&mmap_mutex, NULL);
58 else
59 pthread_mutex_unlock(&mmap_mutex);
63 * Validate target prot bitmask.
64 * Return the prot bitmask for the host in *HOST_PROT.
65 * Return 0 if the target prot bitmask is invalid, otherwise
66 * the internal qemu page_flags (which will include PAGE_VALID).
68 static int validate_prot_to_pageflags(int *host_prot, int prot)
70 int valid = PROT_READ | PROT_WRITE | PROT_EXEC | TARGET_PROT_SEM;
71 int page_flags = (prot & PAGE_BITS) | PAGE_VALID;
74 * For the host, we need not pass anything except read/write/exec.
75 * While PROT_SEM is allowed by all hosts, it is also ignored, so
76 * don't bother transforming guest bit to host bit. Any other
77 * target-specific prot bits will not be understood by the host
78 * and will need to be encoded into page_flags for qemu emulation.
80 * Pages that are executable by the guest will never be executed
81 * by the host, but the host will need to be able to read them.
83 *host_prot = (prot & (PROT_READ | PROT_WRITE))
84 | (prot & PROT_EXEC ? PROT_READ : 0);
86 #ifdef TARGET_AARCH64
88 * The PROT_BTI bit is only accepted if the cpu supports the feature.
89 * Since this is the unusual case, don't bother checking unless
90 * the bit has been requested. If set and valid, record the bit
91 * within QEMU's page_flags.
93 if (prot & TARGET_PROT_BTI) {
94 ARMCPU *cpu = ARM_CPU(thread_cpu);
95 if (cpu_isar_feature(aa64_bti, cpu)) {
96 valid |= TARGET_PROT_BTI;
97 page_flags |= PAGE_BTI;
100 #endif
102 return prot & ~valid ? 0 : page_flags;
105 /* NOTE: all the constants are the HOST ones, but addresses are target. */
106 int target_mprotect(abi_ulong start, abi_ulong len, int target_prot)
108 abi_ulong end, host_start, host_end, addr;
109 int prot1, ret, page_flags, host_prot;
111 trace_target_mprotect(start, len, target_prot);
113 if ((start & ~TARGET_PAGE_MASK) != 0) {
114 return -TARGET_EINVAL;
116 page_flags = validate_prot_to_pageflags(&host_prot, target_prot);
117 if (!page_flags) {
118 return -TARGET_EINVAL;
120 len = TARGET_PAGE_ALIGN(len);
121 end = start + len;
122 if (!guest_range_valid_untagged(start, len)) {
123 return -TARGET_ENOMEM;
125 if (len == 0) {
126 return 0;
129 mmap_lock();
130 host_start = start & qemu_host_page_mask;
131 host_end = HOST_PAGE_ALIGN(end);
132 if (start > host_start) {
133 /* handle host page containing start */
134 prot1 = host_prot;
135 for (addr = host_start; addr < start; addr += TARGET_PAGE_SIZE) {
136 prot1 |= page_get_flags(addr);
138 if (host_end == host_start + qemu_host_page_size) {
139 for (addr = end; addr < host_end; addr += TARGET_PAGE_SIZE) {
140 prot1 |= page_get_flags(addr);
142 end = host_end;
144 ret = mprotect(g2h_untagged(host_start), qemu_host_page_size,
145 prot1 & PAGE_BITS);
146 if (ret != 0) {
147 goto error;
149 host_start += qemu_host_page_size;
151 if (end < host_end) {
152 prot1 = host_prot;
153 for (addr = end; addr < host_end; addr += TARGET_PAGE_SIZE) {
154 prot1 |= page_get_flags(addr);
156 ret = mprotect(g2h_untagged(host_end - qemu_host_page_size),
157 qemu_host_page_size, prot1 & PAGE_BITS);
158 if (ret != 0) {
159 goto error;
161 host_end -= qemu_host_page_size;
164 /* handle the pages in the middle */
165 if (host_start < host_end) {
166 ret = mprotect(g2h_untagged(host_start),
167 host_end - host_start, host_prot);
168 if (ret != 0) {
169 goto error;
172 page_set_flags(start, start + len, page_flags);
173 mmap_unlock();
174 return 0;
175 error:
176 mmap_unlock();
177 return ret;
180 /* map an incomplete host page */
181 static int mmap_frag(abi_ulong real_start,
182 abi_ulong start, abi_ulong end,
183 int prot, int flags, int fd, abi_ulong offset)
185 abi_ulong real_end, addr;
186 void *host_start;
187 int prot1, prot_new;
189 real_end = real_start + qemu_host_page_size;
190 host_start = g2h_untagged(real_start);
192 /* get the protection of the target pages outside the mapping */
193 prot1 = 0;
194 for(addr = real_start; addr < real_end; addr++) {
195 if (addr < start || addr >= end)
196 prot1 |= page_get_flags(addr);
199 if (prot1 == 0) {
200 /* no page was there, so we allocate one */
201 void *p = mmap(host_start, qemu_host_page_size, prot,
202 flags | MAP_ANONYMOUS, -1, 0);
203 if (p == MAP_FAILED)
204 return -1;
205 prot1 = prot;
207 prot1 &= PAGE_BITS;
209 prot_new = prot | prot1;
210 if (!(flags & MAP_ANONYMOUS)) {
211 /* msync() won't work here, so we return an error if write is
212 possible while it is a shared mapping */
213 if ((flags & MAP_TYPE) == MAP_SHARED &&
214 (prot & PROT_WRITE))
215 return -1;
217 /* adjust protection to be able to read */
218 if (!(prot1 & PROT_WRITE))
219 mprotect(host_start, qemu_host_page_size, prot1 | PROT_WRITE);
221 /* read the corresponding file data */
222 if (pread(fd, g2h_untagged(start), end - start, offset) == -1)
223 return -1;
225 /* put final protection */
226 if (prot_new != (prot1 | PROT_WRITE))
227 mprotect(host_start, qemu_host_page_size, prot_new);
228 } else {
229 if (prot_new != prot1) {
230 mprotect(host_start, qemu_host_page_size, prot_new);
232 if (prot_new & PROT_WRITE) {
233 memset(g2h_untagged(start), 0, end - start);
236 return 0;
239 #if HOST_LONG_BITS == 64 && TARGET_ABI_BITS == 64
240 #ifdef TARGET_AARCH64
241 # define TASK_UNMAPPED_BASE 0x5500000000
242 #else
243 # define TASK_UNMAPPED_BASE (1ul << 38)
244 #endif
245 #else
246 # define TASK_UNMAPPED_BASE 0x40000000
247 #endif
248 abi_ulong mmap_next_start = TASK_UNMAPPED_BASE;
250 unsigned long last_brk;
252 /* Subroutine of mmap_find_vma, used when we have pre-allocated a chunk
253 of guest address space. */
254 static abi_ulong mmap_find_vma_reserved(abi_ulong start, abi_ulong size,
255 abi_ulong align)
257 abi_ulong addr, end_addr, incr = qemu_host_page_size;
258 int prot;
259 bool looped = false;
261 if (size > reserved_va) {
262 return (abi_ulong)-1;
265 /* Note that start and size have already been aligned by mmap_find_vma. */
267 end_addr = start + size;
268 if (start > reserved_va - size) {
269 /* Start at the top of the address space. */
270 end_addr = ((reserved_va - size) & -align) + size;
271 looped = true;
274 /* Search downward from END_ADDR, checking to see if a page is in use. */
275 addr = end_addr;
276 while (1) {
277 addr -= incr;
278 if (addr > end_addr) {
279 if (looped) {
280 /* Failure. The entire address space has been searched. */
281 return (abi_ulong)-1;
283 /* Re-start at the top of the address space. */
284 addr = end_addr = ((reserved_va - size) & -align) + size;
285 looped = true;
286 } else {
287 prot = page_get_flags(addr);
288 if (prot) {
289 /* Page in use. Restart below this page. */
290 addr = end_addr = ((addr - size) & -align) + size;
291 } else if (addr && addr + size == end_addr) {
292 /* Success! All pages between ADDR and END_ADDR are free. */
293 if (start == mmap_next_start) {
294 mmap_next_start = addr;
296 return addr;
303 * Find and reserve a free memory area of size 'size'. The search
304 * starts at 'start'.
305 * It must be called with mmap_lock() held.
306 * Return -1 if error.
308 abi_ulong mmap_find_vma(abi_ulong start, abi_ulong size, abi_ulong align)
310 void *ptr, *prev;
311 abi_ulong addr;
312 int wrapped, repeat;
314 align = MAX(align, qemu_host_page_size);
316 /* If 'start' == 0, then a default start address is used. */
317 if (start == 0) {
318 start = mmap_next_start;
319 } else {
320 start &= qemu_host_page_mask;
322 start = ROUND_UP(start, align);
324 size = HOST_PAGE_ALIGN(size);
326 if (reserved_va) {
327 return mmap_find_vma_reserved(start, size, align);
330 addr = start;
331 wrapped = repeat = 0;
332 prev = 0;
334 for (;; prev = ptr) {
336 * Reserve needed memory area to avoid a race.
337 * It should be discarded using:
338 * - mmap() with MAP_FIXED flag
339 * - mremap() with MREMAP_FIXED flag
340 * - shmat() with SHM_REMAP flag
342 ptr = mmap(g2h_untagged(addr), size, PROT_NONE,
343 MAP_ANONYMOUS|MAP_PRIVATE|MAP_NORESERVE, -1, 0);
345 /* ENOMEM, if host address space has no memory */
346 if (ptr == MAP_FAILED) {
347 return (abi_ulong)-1;
350 /* Count the number of sequential returns of the same address.
351 This is used to modify the search algorithm below. */
352 repeat = (ptr == prev ? repeat + 1 : 0);
354 if (h2g_valid(ptr + size - 1)) {
355 addr = h2g(ptr);
357 if ((addr & (align - 1)) == 0) {
358 /* Success. */
359 if (start == mmap_next_start && addr >= TASK_UNMAPPED_BASE) {
360 mmap_next_start = addr + size;
362 return addr;
365 /* The address is not properly aligned for the target. */
366 switch (repeat) {
367 case 0:
368 /* Assume the result that the kernel gave us is the
369 first with enough free space, so start again at the
370 next higher target page. */
371 addr = ROUND_UP(addr, align);
372 break;
373 case 1:
374 /* Sometimes the kernel decides to perform the allocation
375 at the top end of memory instead. */
376 addr &= -align;
377 break;
378 case 2:
379 /* Start over at low memory. */
380 addr = 0;
381 break;
382 default:
383 /* Fail. This unaligned block must the last. */
384 addr = -1;
385 break;
387 } else {
388 /* Since the result the kernel gave didn't fit, start
389 again at low memory. If any repetition, fail. */
390 addr = (repeat ? -1 : 0);
393 /* Unmap and try again. */
394 munmap(ptr, size);
396 /* ENOMEM if we checked the whole of the target address space. */
397 if (addr == (abi_ulong)-1) {
398 return (abi_ulong)-1;
399 } else if (addr == 0) {
400 if (wrapped) {
401 return (abi_ulong)-1;
403 wrapped = 1;
404 /* Don't actually use 0 when wrapping, instead indicate
405 that we'd truly like an allocation in low memory. */
406 addr = (mmap_min_addr > TARGET_PAGE_SIZE
407 ? TARGET_PAGE_ALIGN(mmap_min_addr)
408 : TARGET_PAGE_SIZE);
409 } else if (wrapped && addr >= start) {
410 return (abi_ulong)-1;
415 /* NOTE: all the constants are the HOST ones */
416 abi_long target_mmap(abi_ulong start, abi_ulong len, int target_prot,
417 int flags, int fd, abi_ulong offset)
419 abi_ulong ret, end, real_start, real_end, retaddr, host_offset, host_len;
420 int page_flags, host_prot;
422 mmap_lock();
423 trace_target_mmap(start, len, target_prot, flags, fd, offset);
425 if (!len) {
426 errno = EINVAL;
427 goto fail;
430 page_flags = validate_prot_to_pageflags(&host_prot, target_prot);
431 if (!page_flags) {
432 errno = EINVAL;
433 goto fail;
436 /* Also check for overflows... */
437 len = TARGET_PAGE_ALIGN(len);
438 if (!len) {
439 errno = ENOMEM;
440 goto fail;
443 if (offset & ~TARGET_PAGE_MASK) {
444 errno = EINVAL;
445 goto fail;
448 real_start = start & qemu_host_page_mask;
449 host_offset = offset & qemu_host_page_mask;
451 /* If the user is asking for the kernel to find a location, do that
452 before we truncate the length for mapping files below. */
453 if (!(flags & MAP_FIXED)) {
454 host_len = len + offset - host_offset;
455 host_len = HOST_PAGE_ALIGN(host_len);
456 start = mmap_find_vma(real_start, host_len, TARGET_PAGE_SIZE);
457 if (start == (abi_ulong)-1) {
458 errno = ENOMEM;
459 goto fail;
463 /* When mapping files into a memory area larger than the file, accesses
464 to pages beyond the file size will cause a SIGBUS.
466 For example, if mmaping a file of 100 bytes on a host with 4K pages
467 emulating a target with 8K pages, the target expects to be able to
468 access the first 8K. But the host will trap us on any access beyond
469 4K.
471 When emulating a target with a larger page-size than the hosts, we
472 may need to truncate file maps at EOF and add extra anonymous pages
473 up to the targets page boundary. */
475 if ((qemu_real_host_page_size < qemu_host_page_size) &&
476 !(flags & MAP_ANONYMOUS)) {
477 struct stat sb;
479 if (fstat (fd, &sb) == -1)
480 goto fail;
482 /* Are we trying to create a map beyond EOF?. */
483 if (offset + len > sb.st_size) {
484 /* If so, truncate the file map at eof aligned with
485 the hosts real pagesize. Additional anonymous maps
486 will be created beyond EOF. */
487 len = REAL_HOST_PAGE_ALIGN(sb.st_size - offset);
491 if (!(flags & MAP_FIXED)) {
492 unsigned long host_start;
493 void *p;
495 host_len = len + offset - host_offset;
496 host_len = HOST_PAGE_ALIGN(host_len);
498 /* Note: we prefer to control the mapping address. It is
499 especially important if qemu_host_page_size >
500 qemu_real_host_page_size */
501 p = mmap(g2h_untagged(start), host_len, host_prot,
502 flags | MAP_FIXED | MAP_ANONYMOUS, -1, 0);
503 if (p == MAP_FAILED) {
504 goto fail;
506 /* update start so that it points to the file position at 'offset' */
507 host_start = (unsigned long)p;
508 if (!(flags & MAP_ANONYMOUS)) {
509 p = mmap(g2h_untagged(start), len, host_prot,
510 flags | MAP_FIXED, fd, host_offset);
511 if (p == MAP_FAILED) {
512 munmap(g2h_untagged(start), host_len);
513 goto fail;
515 host_start += offset - host_offset;
517 start = h2g(host_start);
518 } else {
519 if (start & ~TARGET_PAGE_MASK) {
520 errno = EINVAL;
521 goto fail;
523 end = start + len;
524 real_end = HOST_PAGE_ALIGN(end);
527 * Test if requested memory area fits target address space
528 * It can fail only on 64-bit host with 32-bit target.
529 * On any other target/host host mmap() handles this error correctly.
531 if (end < start || !guest_range_valid_untagged(start, len)) {
532 errno = ENOMEM;
533 goto fail;
536 /* worst case: we cannot map the file because the offset is not
537 aligned, so we read it */
538 if (!(flags & MAP_ANONYMOUS) &&
539 (offset & ~qemu_host_page_mask) != (start & ~qemu_host_page_mask)) {
540 /* msync() won't work here, so we return an error if write is
541 possible while it is a shared mapping */
542 if ((flags & MAP_TYPE) == MAP_SHARED &&
543 (host_prot & PROT_WRITE)) {
544 errno = EINVAL;
545 goto fail;
547 retaddr = target_mmap(start, len, target_prot | PROT_WRITE,
548 MAP_FIXED | MAP_PRIVATE | MAP_ANONYMOUS,
549 -1, 0);
550 if (retaddr == -1)
551 goto fail;
552 if (pread(fd, g2h_untagged(start), len, offset) == -1)
553 goto fail;
554 if (!(host_prot & PROT_WRITE)) {
555 ret = target_mprotect(start, len, target_prot);
556 assert(ret == 0);
558 goto the_end;
561 /* handle the start of the mapping */
562 if (start > real_start) {
563 if (real_end == real_start + qemu_host_page_size) {
564 /* one single host page */
565 ret = mmap_frag(real_start, start, end,
566 host_prot, flags, fd, offset);
567 if (ret == -1)
568 goto fail;
569 goto the_end1;
571 ret = mmap_frag(real_start, start, real_start + qemu_host_page_size,
572 host_prot, flags, fd, offset);
573 if (ret == -1)
574 goto fail;
575 real_start += qemu_host_page_size;
577 /* handle the end of the mapping */
578 if (end < real_end) {
579 ret = mmap_frag(real_end - qemu_host_page_size,
580 real_end - qemu_host_page_size, end,
581 host_prot, flags, fd,
582 offset + real_end - qemu_host_page_size - start);
583 if (ret == -1)
584 goto fail;
585 real_end -= qemu_host_page_size;
588 /* map the middle (easier) */
589 if (real_start < real_end) {
590 void *p;
591 unsigned long offset1;
592 if (flags & MAP_ANONYMOUS)
593 offset1 = 0;
594 else
595 offset1 = offset + real_start - start;
596 p = mmap(g2h_untagged(real_start), real_end - real_start,
597 host_prot, flags, fd, offset1);
598 if (p == MAP_FAILED)
599 goto fail;
602 the_end1:
603 if (flags & MAP_ANONYMOUS) {
604 page_flags |= PAGE_ANON;
606 page_flags |= PAGE_RESET;
607 page_set_flags(start, start + len, page_flags);
608 the_end:
609 trace_target_mmap_complete(start);
610 if (qemu_loglevel_mask(CPU_LOG_PAGE)) {
611 log_page_dump(__func__);
613 tb_invalidate_phys_range(start, start + len);
614 mmap_unlock();
615 return start;
616 fail:
617 mmap_unlock();
618 return -1;
621 static void mmap_reserve(abi_ulong start, abi_ulong size)
623 abi_ulong real_start;
624 abi_ulong real_end;
625 abi_ulong addr;
626 abi_ulong end;
627 int prot;
629 real_start = start & qemu_host_page_mask;
630 real_end = HOST_PAGE_ALIGN(start + size);
631 end = start + size;
632 if (start > real_start) {
633 /* handle host page containing start */
634 prot = 0;
635 for (addr = real_start; addr < start; addr += TARGET_PAGE_SIZE) {
636 prot |= page_get_flags(addr);
638 if (real_end == real_start + qemu_host_page_size) {
639 for (addr = end; addr < real_end; addr += TARGET_PAGE_SIZE) {
640 prot |= page_get_flags(addr);
642 end = real_end;
644 if (prot != 0)
645 real_start += qemu_host_page_size;
647 if (end < real_end) {
648 prot = 0;
649 for (addr = end; addr < real_end; addr += TARGET_PAGE_SIZE) {
650 prot |= page_get_flags(addr);
652 if (prot != 0)
653 real_end -= qemu_host_page_size;
655 if (real_start != real_end) {
656 mmap(g2h_untagged(real_start), real_end - real_start, PROT_NONE,
657 MAP_FIXED | MAP_ANONYMOUS | MAP_PRIVATE | MAP_NORESERVE,
658 -1, 0);
662 int target_munmap(abi_ulong start, abi_ulong len)
664 abi_ulong end, real_start, real_end, addr;
665 int prot, ret;
667 trace_target_munmap(start, len);
669 if (start & ~TARGET_PAGE_MASK)
670 return -TARGET_EINVAL;
671 len = TARGET_PAGE_ALIGN(len);
672 if (len == 0 || !guest_range_valid_untagged(start, len)) {
673 return -TARGET_EINVAL;
676 mmap_lock();
677 end = start + len;
678 real_start = start & qemu_host_page_mask;
679 real_end = HOST_PAGE_ALIGN(end);
681 if (start > real_start) {
682 /* handle host page containing start */
683 prot = 0;
684 for(addr = real_start; addr < start; addr += TARGET_PAGE_SIZE) {
685 prot |= page_get_flags(addr);
687 if (real_end == real_start + qemu_host_page_size) {
688 for(addr = end; addr < real_end; addr += TARGET_PAGE_SIZE) {
689 prot |= page_get_flags(addr);
691 end = real_end;
693 if (prot != 0)
694 real_start += qemu_host_page_size;
696 if (end < real_end) {
697 prot = 0;
698 for(addr = end; addr < real_end; addr += TARGET_PAGE_SIZE) {
699 prot |= page_get_flags(addr);
701 if (prot != 0)
702 real_end -= qemu_host_page_size;
705 ret = 0;
706 /* unmap what we can */
707 if (real_start < real_end) {
708 if (reserved_va) {
709 mmap_reserve(real_start, real_end - real_start);
710 } else {
711 ret = munmap(g2h_untagged(real_start), real_end - real_start);
715 if (ret == 0) {
716 page_set_flags(start, start + len, 0);
717 tb_invalidate_phys_range(start, start + len);
719 mmap_unlock();
720 return ret;
723 abi_long target_mremap(abi_ulong old_addr, abi_ulong old_size,
724 abi_ulong new_size, unsigned long flags,
725 abi_ulong new_addr)
727 int prot;
728 void *host_addr;
730 if (!guest_range_valid_untagged(old_addr, old_size) ||
731 ((flags & MREMAP_FIXED) &&
732 !guest_range_valid_untagged(new_addr, new_size)) ||
733 ((flags & MREMAP_MAYMOVE) == 0 &&
734 !guest_range_valid_untagged(old_addr, new_size))) {
735 errno = ENOMEM;
736 return -1;
739 mmap_lock();
741 if (flags & MREMAP_FIXED) {
742 host_addr = mremap(g2h_untagged(old_addr), old_size, new_size,
743 flags, g2h_untagged(new_addr));
745 if (reserved_va && host_addr != MAP_FAILED) {
746 /* If new and old addresses overlap then the above mremap will
747 already have failed with EINVAL. */
748 mmap_reserve(old_addr, old_size);
750 } else if (flags & MREMAP_MAYMOVE) {
751 abi_ulong mmap_start;
753 mmap_start = mmap_find_vma(0, new_size, TARGET_PAGE_SIZE);
755 if (mmap_start == -1) {
756 errno = ENOMEM;
757 host_addr = MAP_FAILED;
758 } else {
759 host_addr = mremap(g2h_untagged(old_addr), old_size, new_size,
760 flags | MREMAP_FIXED,
761 g2h_untagged(mmap_start));
762 if (reserved_va) {
763 mmap_reserve(old_addr, old_size);
766 } else {
767 int prot = 0;
768 if (reserved_va && old_size < new_size) {
769 abi_ulong addr;
770 for (addr = old_addr + old_size;
771 addr < old_addr + new_size;
772 addr++) {
773 prot |= page_get_flags(addr);
776 if (prot == 0) {
777 host_addr = mremap(g2h_untagged(old_addr),
778 old_size, new_size, flags);
780 if (host_addr != MAP_FAILED) {
781 /* Check if address fits target address space */
782 if (!guest_range_valid_untagged(h2g(host_addr), new_size)) {
783 /* Revert mremap() changes */
784 host_addr = mremap(g2h_untagged(old_addr),
785 new_size, old_size, flags);
786 errno = ENOMEM;
787 host_addr = MAP_FAILED;
788 } else if (reserved_va && old_size > new_size) {
789 mmap_reserve(old_addr + old_size, old_size - new_size);
792 } else {
793 errno = ENOMEM;
794 host_addr = MAP_FAILED;
798 if (host_addr == MAP_FAILED) {
799 new_addr = -1;
800 } else {
801 new_addr = h2g(host_addr);
802 prot = page_get_flags(old_addr);
803 page_set_flags(old_addr, old_addr + old_size, 0);
804 page_set_flags(new_addr, new_addr + new_size,
805 prot | PAGE_VALID | PAGE_RESET);
807 tb_invalidate_phys_range(new_addr, new_addr + new_size);
808 mmap_unlock();
809 return new_addr;