Fix Sparc/Linux host breakage by df70204db53e3611af986f434e74a882bce190ca
[qemu.git] / linux-user / mmap.c
blob07ce051576d5eaec80699635ee8dced2e435a103
1 /*
2 * mmap support for qemu
4 * Copyright (c) 2003 Fabrice Bellard
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, see <http://www.gnu.org/licenses/>.
19 #include <stdlib.h>
20 #include <stdio.h>
21 #include <stdarg.h>
22 #include <string.h>
23 #include <unistd.h>
24 #include <errno.h>
25 #include <sys/types.h>
26 #include <sys/stat.h>
27 #include <sys/mman.h>
28 #include <linux/mman.h>
29 #include <linux/unistd.h>
31 #include "qemu.h"
32 #include "qemu-common.h"
34 //#define DEBUG_MMAP
36 #if defined(CONFIG_USE_NPTL)
37 static pthread_mutex_t mmap_mutex = PTHREAD_MUTEX_INITIALIZER;
38 static int __thread mmap_lock_count;
40 void mmap_lock(void)
42 if (mmap_lock_count++ == 0) {
43 pthread_mutex_lock(&mmap_mutex);
47 void mmap_unlock(void)
49 if (--mmap_lock_count == 0) {
50 pthread_mutex_unlock(&mmap_mutex);
54 /* Grab lock to make sure things are in a consistent state after fork(). */
55 void mmap_fork_start(void)
57 if (mmap_lock_count)
58 abort();
59 pthread_mutex_lock(&mmap_mutex);
62 void mmap_fork_end(int child)
64 if (child)
65 pthread_mutex_init(&mmap_mutex, NULL);
66 else
67 pthread_mutex_unlock(&mmap_mutex);
69 #else
70 /* We aren't threadsafe to start with, so no need to worry about locking. */
71 void mmap_lock(void)
75 void mmap_unlock(void)
78 #endif
80 void *qemu_vmalloc(size_t size)
82 void *p;
83 unsigned long addr;
84 mmap_lock();
85 /* Use map and mark the pages as used. */
86 p = mmap(NULL, size, PROT_READ | PROT_WRITE,
87 MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
89 addr = (unsigned long)p;
90 if (addr == (target_ulong) addr) {
91 /* Allocated region overlaps guest address space.
92 This may recurse. */
93 page_set_flags(addr & TARGET_PAGE_MASK, TARGET_PAGE_ALIGN(addr + size),
94 PAGE_RESERVED);
97 mmap_unlock();
98 return p;
101 void *qemu_malloc(size_t size)
103 char * p;
104 size += 16;
105 p = qemu_vmalloc(size);
106 *(size_t *)p = size;
107 return p + 16;
110 /* We use map, which is always zero initialized. */
111 void * qemu_mallocz(size_t size)
113 return qemu_malloc(size);
116 void qemu_free(void *ptr)
118 /* FIXME: We should unmark the reserved pages here. However this gets
119 complicated when one target page spans multiple host pages, so we
120 don't bother. */
121 size_t *p;
122 p = (size_t *)((char *)ptr - 16);
123 munmap(p, *p);
126 void *qemu_realloc(void *ptr, size_t size)
128 size_t old_size, copy;
129 void *new_ptr;
131 if (!ptr)
132 return qemu_malloc(size);
133 old_size = *(size_t *)((char *)ptr - 16);
134 copy = old_size < size ? old_size : size;
135 new_ptr = qemu_malloc(size);
136 memcpy(new_ptr, ptr, copy);
137 qemu_free(ptr);
138 return new_ptr;
141 /* NOTE: all the constants are the HOST ones, but addresses are target. */
142 int target_mprotect(abi_ulong start, abi_ulong len, int prot)
144 abi_ulong end, host_start, host_end, addr;
145 int prot1, ret;
147 #ifdef DEBUG_MMAP
148 printf("mprotect: start=0x" TARGET_ABI_FMT_lx
149 "len=0x" TARGET_ABI_FMT_lx " prot=%c%c%c\n", start, len,
150 prot & PROT_READ ? 'r' : '-',
151 prot & PROT_WRITE ? 'w' : '-',
152 prot & PROT_EXEC ? 'x' : '-');
153 #endif
155 if ((start & ~TARGET_PAGE_MASK) != 0)
156 return -EINVAL;
157 len = TARGET_PAGE_ALIGN(len);
158 end = start + len;
159 if (end < start)
160 return -EINVAL;
161 prot &= PROT_READ | PROT_WRITE | PROT_EXEC;
162 if (len == 0)
163 return 0;
165 mmap_lock();
166 host_start = start & qemu_host_page_mask;
167 host_end = HOST_PAGE_ALIGN(end);
168 if (start > host_start) {
169 /* handle host page containing start */
170 prot1 = prot;
171 for(addr = host_start; addr < start; addr += TARGET_PAGE_SIZE) {
172 prot1 |= page_get_flags(addr);
174 if (host_end == host_start + qemu_host_page_size) {
175 for(addr = end; addr < host_end; addr += TARGET_PAGE_SIZE) {
176 prot1 |= page_get_flags(addr);
178 end = host_end;
180 ret = mprotect(g2h(host_start), qemu_host_page_size, prot1 & PAGE_BITS);
181 if (ret != 0)
182 goto error;
183 host_start += qemu_host_page_size;
185 if (end < host_end) {
186 prot1 = prot;
187 for(addr = end; addr < host_end; addr += TARGET_PAGE_SIZE) {
188 prot1 |= page_get_flags(addr);
190 ret = mprotect(g2h(host_end - qemu_host_page_size), qemu_host_page_size,
191 prot1 & PAGE_BITS);
192 if (ret != 0)
193 goto error;
194 host_end -= qemu_host_page_size;
197 /* handle the pages in the middle */
198 if (host_start < host_end) {
199 ret = mprotect(g2h(host_start), host_end - host_start, prot);
200 if (ret != 0)
201 goto error;
203 page_set_flags(start, start + len, prot | PAGE_VALID);
204 mmap_unlock();
205 return 0;
206 error:
207 mmap_unlock();
208 return ret;
211 /* map an incomplete host page */
212 static int mmap_frag(abi_ulong real_start,
213 abi_ulong start, abi_ulong end,
214 int prot, int flags, int fd, abi_ulong offset)
216 abi_ulong real_end, addr;
217 void *host_start;
218 int prot1, prot_new;
220 real_end = real_start + qemu_host_page_size;
221 host_start = g2h(real_start);
223 /* get the protection of the target pages outside the mapping */
224 prot1 = 0;
225 for(addr = real_start; addr < real_end; addr++) {
226 if (addr < start || addr >= end)
227 prot1 |= page_get_flags(addr);
230 if (prot1 == 0) {
231 /* no page was there, so we allocate one */
232 void *p = mmap(host_start, qemu_host_page_size, prot,
233 flags | MAP_ANONYMOUS, -1, 0);
234 if (p == MAP_FAILED)
235 return -1;
236 prot1 = prot;
238 prot1 &= PAGE_BITS;
240 prot_new = prot | prot1;
241 if (!(flags & MAP_ANONYMOUS)) {
242 /* msync() won't work here, so we return an error if write is
243 possible while it is a shared mapping */
244 if ((flags & MAP_TYPE) == MAP_SHARED &&
245 (prot & PROT_WRITE))
246 return -EINVAL;
248 /* adjust protection to be able to read */
249 if (!(prot1 & PROT_WRITE))
250 mprotect(host_start, qemu_host_page_size, prot1 | PROT_WRITE);
252 /* read the corresponding file data */
253 pread(fd, g2h(start), end - start, offset);
255 /* put final protection */
256 if (prot_new != (prot1 | PROT_WRITE))
257 mprotect(host_start, qemu_host_page_size, prot_new);
258 } else {
259 /* just update the protection */
260 if (prot_new != prot1) {
261 mprotect(host_start, qemu_host_page_size, prot_new);
264 return 0;
267 #if defined(__CYGWIN__)
268 /* Cygwin doesn't have a whole lot of address space. */
269 static abi_ulong mmap_next_start = 0x18000000;
270 #else
271 static abi_ulong mmap_next_start = 0x40000000;
272 #endif
274 unsigned long last_brk;
277 * Find and reserve a free memory area of size 'size'. The search
278 * starts at 'start'.
279 * It must be called with mmap_lock() held.
280 * Return -1 if error.
282 abi_ulong mmap_find_vma(abi_ulong start, abi_ulong size)
284 void *ptr;
285 abi_ulong addr;
287 size = HOST_PAGE_ALIGN(size);
288 start &= qemu_host_page_mask;
290 /* If 'start' == 0, then a default start address is used. */
291 if (start == 0)
292 start = mmap_next_start;
294 addr = start;
296 for(;;) {
298 * Reserve needed memory area to avoid a race.
299 * It should be discarded using:
300 * - mmap() with MAP_FIXED flag
301 * - mremap() with MREMAP_FIXED flag
302 * - shmat() with SHM_REMAP flag
304 ptr = mmap((void *)(unsigned long)addr, size, PROT_NONE,
305 MAP_ANONYMOUS|MAP_PRIVATE|MAP_NORESERVE, -1, 0);
307 /* ENOMEM, if host address space has no memory */
308 if (ptr == MAP_FAILED)
309 return (abi_ulong)-1;
311 /* If address fits target address space we've found what we need */
312 if ((unsigned long)ptr + size - 1 <= (abi_ulong)-1)
313 break;
315 /* Unmap and try again with new page */
316 munmap(ptr, size);
317 addr += qemu_host_page_size;
319 /* ENOMEM if we check whole of target address space */
320 if (addr == start)
321 return (abi_ulong)-1;
324 /* Update default start address */
325 if (start == mmap_next_start)
326 mmap_next_start = (unsigned long)ptr + size;
328 return h2g(ptr);
331 /* NOTE: all the constants are the HOST ones */
332 abi_long target_mmap(abi_ulong start, abi_ulong len, int prot,
333 int flags, int fd, abi_ulong offset)
335 abi_ulong ret, end, real_start, real_end, retaddr, host_offset, host_len;
336 unsigned long host_start;
338 mmap_lock();
339 #ifdef DEBUG_MMAP
341 printf("mmap: start=0x" TARGET_ABI_FMT_lx
342 " len=0x" TARGET_ABI_FMT_lx " prot=%c%c%c flags=",
343 start, len,
344 prot & PROT_READ ? 'r' : '-',
345 prot & PROT_WRITE ? 'w' : '-',
346 prot & PROT_EXEC ? 'x' : '-');
347 if (flags & MAP_FIXED)
348 printf("MAP_FIXED ");
349 if (flags & MAP_ANONYMOUS)
350 printf("MAP_ANON ");
351 switch(flags & MAP_TYPE) {
352 case MAP_PRIVATE:
353 printf("MAP_PRIVATE ");
354 break;
355 case MAP_SHARED:
356 printf("MAP_SHARED ");
357 break;
358 default:
359 printf("[MAP_TYPE=0x%x] ", flags & MAP_TYPE);
360 break;
362 printf("fd=%d offset=" TARGET_ABI_FMT_lx "\n", fd, offset);
364 #endif
366 if (offset & ~TARGET_PAGE_MASK) {
367 errno = EINVAL;
368 goto fail;
371 len = TARGET_PAGE_ALIGN(len);
372 if (len == 0)
373 goto the_end;
374 real_start = start & qemu_host_page_mask;
376 /* When mapping files into a memory area larger than the file, accesses
377 to pages beyond the file size will cause a SIGBUS.
379 For example, if mmaping a file of 100 bytes on a host with 4K pages
380 emulating a target with 8K pages, the target expects to be able to
381 access the first 8K. But the host will trap us on any access beyond
382 4K.
384 When emulating a target with a larger page-size than the hosts, we
385 may need to truncate file maps at EOF and add extra anonymous pages
386 up to the targets page boundary. */
388 if ((qemu_real_host_page_size < TARGET_PAGE_SIZE)
389 && !(flags & MAP_ANONYMOUS)) {
390 struct stat sb;
392 if (fstat (fd, &sb) == -1)
393 goto fail;
395 /* Are we trying to create a map beyond EOF?. */
396 if (offset + len > sb.st_size) {
397 /* If so, truncate the file map at eof aligned with
398 the hosts real pagesize. Additional anonymous maps
399 will be created beyond EOF. */
400 len = (sb.st_size - offset);
401 len += qemu_real_host_page_size - 1;
402 len &= ~(qemu_real_host_page_size - 1);
406 if (!(flags & MAP_FIXED)) {
407 abi_ulong mmap_start;
408 void *p;
409 host_offset = offset & qemu_host_page_mask;
410 host_len = len + offset - host_offset;
411 host_len = HOST_PAGE_ALIGN(host_len);
412 mmap_start = mmap_find_vma(real_start, host_len);
413 if (mmap_start == (abi_ulong)-1) {
414 errno = ENOMEM;
415 goto fail;
417 /* Note: we prefer to control the mapping address. It is
418 especially important if qemu_host_page_size >
419 qemu_real_host_page_size */
420 p = mmap(g2h(mmap_start),
421 host_len, prot, flags | MAP_FIXED | MAP_ANONYMOUS, -1, 0);
422 if (p == MAP_FAILED)
423 goto fail;
424 /* update start so that it points to the file position at 'offset' */
425 host_start = (unsigned long)p;
426 if (!(flags & MAP_ANONYMOUS)) {
427 p = mmap(g2h(mmap_start), len, prot,
428 flags | MAP_FIXED, fd, host_offset);
429 host_start += offset - host_offset;
431 start = h2g(host_start);
432 } else {
433 int flg;
434 target_ulong addr;
436 if (start & ~TARGET_PAGE_MASK) {
437 errno = EINVAL;
438 goto fail;
440 end = start + len;
441 real_end = HOST_PAGE_ALIGN(end);
444 * Test if requested memory area fits target address space
445 * It can fail only on 64-bit host with 32-bit target.
446 * On any other target/host host mmap() handles this error correctly.
448 if ((unsigned long)start + len - 1 > (abi_ulong) -1) {
449 errno = EINVAL;
450 goto fail;
453 for(addr = real_start; addr < real_end; addr += TARGET_PAGE_SIZE) {
454 flg = page_get_flags(addr);
455 if (flg & PAGE_RESERVED) {
456 errno = ENXIO;
457 goto fail;
461 /* worst case: we cannot map the file because the offset is not
462 aligned, so we read it */
463 if (!(flags & MAP_ANONYMOUS) &&
464 (offset & ~qemu_host_page_mask) != (start & ~qemu_host_page_mask)) {
465 /* msync() won't work here, so we return an error if write is
466 possible while it is a shared mapping */
467 if ((flags & MAP_TYPE) == MAP_SHARED &&
468 (prot & PROT_WRITE)) {
469 errno = EINVAL;
470 goto fail;
472 retaddr = target_mmap(start, len, prot | PROT_WRITE,
473 MAP_FIXED | MAP_PRIVATE | MAP_ANONYMOUS,
474 -1, 0);
475 if (retaddr == -1)
476 goto fail;
477 pread(fd, g2h(start), len, offset);
478 if (!(prot & PROT_WRITE)) {
479 ret = target_mprotect(start, len, prot);
480 if (ret != 0) {
481 start = ret;
482 goto the_end;
485 goto the_end;
488 /* handle the start of the mapping */
489 if (start > real_start) {
490 if (real_end == real_start + qemu_host_page_size) {
491 /* one single host page */
492 ret = mmap_frag(real_start, start, end,
493 prot, flags, fd, offset);
494 if (ret == -1)
495 goto fail;
496 goto the_end1;
498 ret = mmap_frag(real_start, start, real_start + qemu_host_page_size,
499 prot, flags, fd, offset);
500 if (ret == -1)
501 goto fail;
502 real_start += qemu_host_page_size;
504 /* handle the end of the mapping */
505 if (end < real_end) {
506 ret = mmap_frag(real_end - qemu_host_page_size,
507 real_end - qemu_host_page_size, real_end,
508 prot, flags, fd,
509 offset + real_end - qemu_host_page_size - start);
510 if (ret == -1)
511 goto fail;
512 real_end -= qemu_host_page_size;
515 /* map the middle (easier) */
516 if (real_start < real_end) {
517 void *p;
518 unsigned long offset1;
519 if (flags & MAP_ANONYMOUS)
520 offset1 = 0;
521 else
522 offset1 = offset + real_start - start;
523 p = mmap(g2h(real_start), real_end - real_start,
524 prot, flags, fd, offset1);
525 if (p == MAP_FAILED)
526 goto fail;
529 the_end1:
530 page_set_flags(start, start + len, prot | PAGE_VALID);
531 the_end:
532 #ifdef DEBUG_MMAP
533 printf("ret=0x" TARGET_ABI_FMT_lx "\n", start);
534 page_dump(stdout);
535 printf("\n");
536 #endif
537 mmap_unlock();
538 return start;
539 fail:
540 mmap_unlock();
541 return -1;
544 int target_munmap(abi_ulong start, abi_ulong len)
546 abi_ulong end, real_start, real_end, addr;
547 int prot, ret;
549 #ifdef DEBUG_MMAP
550 printf("munmap: start=0x" TARGET_ABI_FMT_lx " len=0x"
551 TARGET_ABI_FMT_lx "\n",
552 start, len);
553 #endif
554 if (start & ~TARGET_PAGE_MASK)
555 return -EINVAL;
556 len = TARGET_PAGE_ALIGN(len);
557 if (len == 0)
558 return -EINVAL;
559 mmap_lock();
560 end = start + len;
561 real_start = start & qemu_host_page_mask;
562 real_end = HOST_PAGE_ALIGN(end);
564 if (start > real_start) {
565 /* handle host page containing start */
566 prot = 0;
567 for(addr = real_start; addr < start; addr += TARGET_PAGE_SIZE) {
568 prot |= page_get_flags(addr);
570 if (real_end == real_start + qemu_host_page_size) {
571 for(addr = end; addr < real_end; addr += TARGET_PAGE_SIZE) {
572 prot |= page_get_flags(addr);
574 end = real_end;
576 if (prot != 0)
577 real_start += qemu_host_page_size;
579 if (end < real_end) {
580 prot = 0;
581 for(addr = end; addr < real_end; addr += TARGET_PAGE_SIZE) {
582 prot |= page_get_flags(addr);
584 if (prot != 0)
585 real_end -= qemu_host_page_size;
588 ret = 0;
589 /* unmap what we can */
590 if (real_start < real_end) {
591 ret = munmap(g2h(real_start), real_end - real_start);
594 if (ret == 0)
595 page_set_flags(start, start + len, 0);
596 mmap_unlock();
597 return ret;
600 abi_long target_mremap(abi_ulong old_addr, abi_ulong old_size,
601 abi_ulong new_size, unsigned long flags,
602 abi_ulong new_addr)
604 int prot;
605 void *host_addr;
607 mmap_lock();
609 if (flags & MREMAP_FIXED)
610 host_addr = (void *) syscall(__NR_mremap, g2h(old_addr),
611 old_size, new_size,
612 flags,
613 new_addr);
614 else if (flags & MREMAP_MAYMOVE) {
615 abi_ulong mmap_start;
617 mmap_start = mmap_find_vma(0, new_size);
619 if (mmap_start == -1) {
620 errno = ENOMEM;
621 host_addr = MAP_FAILED;
622 } else
623 host_addr = (void *) syscall(__NR_mremap, g2h(old_addr),
624 old_size, new_size,
625 flags | MREMAP_FIXED,
626 g2h(mmap_start));
627 } else {
628 host_addr = mremap(g2h(old_addr), old_size, new_size, flags);
629 /* Check if address fits target address space */
630 if ((unsigned long)host_addr + new_size > (abi_ulong)-1) {
631 /* Revert mremap() changes */
632 host_addr = mremap(g2h(old_addr), new_size, old_size, flags);
633 errno = ENOMEM;
634 host_addr = MAP_FAILED;
638 if (host_addr == MAP_FAILED) {
639 new_addr = -1;
640 } else {
641 new_addr = h2g(host_addr);
642 prot = page_get_flags(old_addr);
643 page_set_flags(old_addr, old_addr + old_size, 0);
644 page_set_flags(new_addr, new_addr + new_size, prot | PAGE_VALID);
646 mmap_unlock();
647 return new_addr;
650 int target_msync(abi_ulong start, abi_ulong len, int flags)
652 abi_ulong end;
654 if (start & ~TARGET_PAGE_MASK)
655 return -EINVAL;
656 len = TARGET_PAGE_ALIGN(len);
657 end = start + len;
658 if (end < start)
659 return -EINVAL;
660 if (end == start)
661 return 0;
663 start &= qemu_host_page_mask;
664 return msync(g2h(start), end - start, flags);