target-ppc: update nip before calling an helper in FP instructions
[qemu/mini2440/sniper_sniper_test.git] / linux-user / mmap.c
blob00a941e33ac653fb2b1dcad3eb5b5b16d38d836d
1 /*
2 * mmap support for qemu
4 * Copyright (c) 2003 Fabrice Bellard
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
20 #include <stdlib.h>
21 #include <stdio.h>
22 #include <stdarg.h>
23 #include <string.h>
24 #include <unistd.h>
25 #include <errno.h>
26 #include <sys/mman.h>
28 #include "qemu.h"
29 #include "qemu-common.h"
31 //#define DEBUG_MMAP
33 #if defined(USE_NPTL)
34 pthread_mutex_t mmap_mutex;
35 static int __thread mmap_lock_count;
37 void mmap_lock(void)
39 if (mmap_lock_count++ == 0) {
40 pthread_mutex_lock(&mmap_mutex);
44 void mmap_unlock(void)
46 if (--mmap_lock_count == 0) {
47 pthread_mutex_unlock(&mmap_mutex);
51 /* Grab lock to make sure things are in a consistent state after fork(). */
52 void mmap_fork_start(void)
54 if (mmap_lock_count)
55 abort();
56 pthread_mutex_lock(&mmap_mutex);
59 void mmap_fork_end(int child)
61 if (child)
62 pthread_mutex_init(&mmap_mutex, NULL);
63 else
64 pthread_mutex_unlock(&mmap_mutex);
66 #else
67 /* We aren't threadsafe to start with, so no need to worry about locking. */
68 void mmap_lock(void)
72 void mmap_unlock(void)
75 #endif
77 void *qemu_vmalloc(size_t size)
79 void *p;
80 unsigned long addr;
81 mmap_lock();
82 /* Use map and mark the pages as used. */
83 p = mmap(NULL, size, PROT_READ | PROT_WRITE,
84 MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
86 addr = (unsigned long)p;
87 if (addr == (target_ulong) addr) {
88 /* Allocated region overlaps guest address space.
89 This may recurse. */
90 page_set_flags(addr & TARGET_PAGE_MASK, TARGET_PAGE_ALIGN(addr + size),
91 PAGE_RESERVED);
94 mmap_unlock();
95 return p;
98 void *qemu_malloc(size_t size)
100 char * p;
101 size += 16;
102 p = qemu_vmalloc(size);
103 *(size_t *)p = size;
104 return p + 16;
107 /* We use map, which is always zero initialized. */
108 void * qemu_mallocz(size_t size)
110 return qemu_malloc(size);
113 void qemu_free(void *ptr)
115 /* FIXME: We should unmark the reserved pages here. However this gets
116 complicated when one target page spans multiple host pages, so we
117 don't bother. */
118 size_t *p;
119 p = (size_t *)((char *)ptr - 16);
120 munmap(p, *p);
123 /* NOTE: all the constants are the HOST ones, but addresses are target. */
124 int target_mprotect(abi_ulong start, abi_ulong len, int prot)
126 abi_ulong end, host_start, host_end, addr;
127 int prot1, ret;
129 #ifdef DEBUG_MMAP
130 printf("mprotect: start=0x" TARGET_FMT_lx
131 "len=0x" TARGET_FMT_lx " prot=%c%c%c\n", start, len,
132 prot & PROT_READ ? 'r' : '-',
133 prot & PROT_WRITE ? 'w' : '-',
134 prot & PROT_EXEC ? 'x' : '-');
135 #endif
137 if ((start & ~TARGET_PAGE_MASK) != 0)
138 return -EINVAL;
139 len = TARGET_PAGE_ALIGN(len);
140 end = start + len;
141 if (end < start)
142 return -EINVAL;
143 prot &= PROT_READ | PROT_WRITE | PROT_EXEC;
144 if (len == 0)
145 return 0;
147 mmap_lock();
148 host_start = start & qemu_host_page_mask;
149 host_end = HOST_PAGE_ALIGN(end);
150 if (start > host_start) {
151 /* handle host page containing start */
152 prot1 = prot;
153 for(addr = host_start; addr < start; addr += TARGET_PAGE_SIZE) {
154 prot1 |= page_get_flags(addr);
156 if (host_end == host_start + qemu_host_page_size) {
157 for(addr = end; addr < host_end; addr += TARGET_PAGE_SIZE) {
158 prot1 |= page_get_flags(addr);
160 end = host_end;
162 ret = mprotect(g2h(host_start), qemu_host_page_size, prot1 & PAGE_BITS);
163 if (ret != 0)
164 goto error;
165 host_start += qemu_host_page_size;
167 if (end < host_end) {
168 prot1 = prot;
169 for(addr = end; addr < host_end; addr += TARGET_PAGE_SIZE) {
170 prot1 |= page_get_flags(addr);
172 ret = mprotect(g2h(host_end - qemu_host_page_size), qemu_host_page_size,
173 prot1 & PAGE_BITS);
174 if (ret != 0)
175 goto error;
176 host_end -= qemu_host_page_size;
179 /* handle the pages in the middle */
180 if (host_start < host_end) {
181 ret = mprotect(g2h(host_start), host_end - host_start, prot);
182 if (ret != 0)
183 goto error;
185 page_set_flags(start, start + len, prot | PAGE_VALID);
186 mmap_unlock();
187 return 0;
188 error:
189 mmap_unlock();
190 return ret;
193 /* map an incomplete host page */
194 static int mmap_frag(abi_ulong real_start,
195 abi_ulong start, abi_ulong end,
196 int prot, int flags, int fd, abi_ulong offset)
198 abi_ulong real_end, addr;
199 void *host_start;
200 int prot1, prot_new;
202 real_end = real_start + qemu_host_page_size;
203 host_start = g2h(real_start);
205 /* get the protection of the target pages outside the mapping */
206 prot1 = 0;
207 for(addr = real_start; addr < real_end; addr++) {
208 if (addr < start || addr >= end)
209 prot1 |= page_get_flags(addr);
212 if (prot1 == 0) {
213 /* no page was there, so we allocate one */
214 void *p = mmap(host_start, qemu_host_page_size, prot,
215 flags | MAP_ANONYMOUS, -1, 0);
216 if (p == MAP_FAILED)
217 return -1;
218 prot1 = prot;
220 prot1 &= PAGE_BITS;
222 prot_new = prot | prot1;
223 if (!(flags & MAP_ANONYMOUS)) {
224 /* msync() won't work here, so we return an error if write is
225 possible while it is a shared mapping */
226 if ((flags & MAP_TYPE) == MAP_SHARED &&
227 (prot & PROT_WRITE))
228 return -EINVAL;
230 /* adjust protection to be able to read */
231 if (!(prot1 & PROT_WRITE))
232 mprotect(host_start, qemu_host_page_size, prot1 | PROT_WRITE);
234 /* read the corresponding file data */
235 pread(fd, g2h(start), end - start, offset);
237 /* put final protection */
238 if (prot_new != (prot1 | PROT_WRITE))
239 mprotect(host_start, qemu_host_page_size, prot_new);
240 } else {
241 /* just update the protection */
242 if (prot_new != prot1) {
243 mprotect(host_start, qemu_host_page_size, prot_new);
246 return 0;
249 #if defined(__CYGWIN__)
250 /* Cygwin doesn't have a whole lot of address space. */
251 static abi_ulong mmap_next_start = 0x18000000;
252 #else
253 static abi_ulong mmap_next_start = 0x40000000;
254 #endif
256 unsigned long last_brk;
258 /* find a free memory area of size 'size'. The search starts at
259 'start'. If 'start' == 0, then a default start address is used.
260 Return -1 if error.
262 /* page_init() marks pages used by the host as reserved to be sure not
263 to use them. */
264 static abi_ulong mmap_find_vma(abi_ulong start, abi_ulong size)
266 abi_ulong addr, addr1, addr_start;
267 int prot;
268 unsigned long new_brk;
270 new_brk = (unsigned long)sbrk(0);
271 if (last_brk && last_brk < new_brk && last_brk == (target_ulong)last_brk) {
272 /* This is a hack to catch the host allocating memory with brk().
273 If it uses mmap then we loose.
274 FIXME: We really want to avoid the host allocating memory in
275 the first place, and maybe leave some slack to avoid switching
276 to mmap. */
277 page_set_flags(last_brk & TARGET_PAGE_MASK,
278 TARGET_PAGE_ALIGN(new_brk),
279 PAGE_RESERVED);
281 last_brk = new_brk;
283 size = HOST_PAGE_ALIGN(size);
284 start = start & qemu_host_page_mask;
285 addr = start;
286 if (addr == 0)
287 addr = mmap_next_start;
288 addr_start = addr;
289 for(;;) {
290 prot = 0;
291 for(addr1 = addr; addr1 < (addr + size); addr1 += TARGET_PAGE_SIZE) {
292 prot |= page_get_flags(addr1);
294 if (prot == 0)
295 break;
296 addr += qemu_host_page_size;
297 /* we found nothing */
298 if (addr == addr_start)
299 return (abi_ulong)-1;
301 if (start == 0)
302 mmap_next_start = addr + size;
303 return addr;
306 /* NOTE: all the constants are the HOST ones */
307 abi_long target_mmap(abi_ulong start, abi_ulong len, int prot,
308 int flags, int fd, abi_ulong offset)
310 abi_ulong ret, end, real_start, real_end, retaddr, host_offset, host_len;
311 unsigned long host_start;
313 mmap_lock();
314 #ifdef DEBUG_MMAP
316 printf("mmap: start=0x" TARGET_FMT_lx
317 " len=0x" TARGET_FMT_lx " prot=%c%c%c flags=",
318 start, len,
319 prot & PROT_READ ? 'r' : '-',
320 prot & PROT_WRITE ? 'w' : '-',
321 prot & PROT_EXEC ? 'x' : '-');
322 if (flags & MAP_FIXED)
323 printf("MAP_FIXED ");
324 if (flags & MAP_ANONYMOUS)
325 printf("MAP_ANON ");
326 switch(flags & MAP_TYPE) {
327 case MAP_PRIVATE:
328 printf("MAP_PRIVATE ");
329 break;
330 case MAP_SHARED:
331 printf("MAP_SHARED ");
332 break;
333 default:
334 printf("[MAP_TYPE=0x%x] ", flags & MAP_TYPE);
335 break;
337 printf("fd=%d offset=" TARGET_FMT_lx "\n", fd, offset);
339 #endif
341 if (offset & ~TARGET_PAGE_MASK) {
342 errno = EINVAL;
343 goto fail;
346 len = TARGET_PAGE_ALIGN(len);
347 if (len == 0)
348 goto the_end;
349 real_start = start & qemu_host_page_mask;
351 if (!(flags & MAP_FIXED)) {
352 abi_ulong mmap_start;
353 void *p;
354 host_offset = offset & qemu_host_page_mask;
355 host_len = len + offset - host_offset;
356 host_len = HOST_PAGE_ALIGN(host_len);
357 mmap_start = mmap_find_vma(real_start, host_len);
358 if (mmap_start == (abi_ulong)-1) {
359 errno = ENOMEM;
360 goto fail;
362 /* Note: we prefer to control the mapping address. It is
363 especially important if qemu_host_page_size >
364 qemu_real_host_page_size */
365 p = mmap(g2h(mmap_start),
366 host_len, prot, flags | MAP_FIXED, fd, host_offset);
367 if (p == MAP_FAILED)
368 goto fail;
369 /* update start so that it points to the file position at 'offset' */
370 host_start = (unsigned long)p;
371 if (!(flags & MAP_ANONYMOUS))
372 host_start += offset - host_offset;
373 start = h2g(host_start);
374 } else {
375 int flg;
376 target_ulong addr;
378 if (start & ~TARGET_PAGE_MASK) {
379 errno = EINVAL;
380 goto fail;
382 end = start + len;
383 real_end = HOST_PAGE_ALIGN(end);
386 * Test if requested memory area fits target address space
387 * It can fail only on 64-bit host with 32-bit target.
388 * On any other target/host host mmap() handles this error correctly.
390 if ((unsigned long)start + len - 1 > (abi_ulong) -1) {
391 errno = EINVAL;
392 goto fail;
395 for(addr = real_start; addr < real_end; addr += TARGET_PAGE_SIZE) {
396 flg = page_get_flags(addr);
397 if (flg & PAGE_RESERVED) {
398 errno = ENXIO;
399 goto fail;
403 /* worst case: we cannot map the file because the offset is not
404 aligned, so we read it */
405 if (!(flags & MAP_ANONYMOUS) &&
406 (offset & ~qemu_host_page_mask) != (start & ~qemu_host_page_mask)) {
407 /* msync() won't work here, so we return an error if write is
408 possible while it is a shared mapping */
409 if ((flags & MAP_TYPE) == MAP_SHARED &&
410 (prot & PROT_WRITE)) {
411 errno = EINVAL;
412 goto fail;
414 retaddr = target_mmap(start, len, prot | PROT_WRITE,
415 MAP_FIXED | MAP_PRIVATE | MAP_ANONYMOUS,
416 -1, 0);
417 if (retaddr == -1)
418 goto fail;
419 pread(fd, g2h(start), len, offset);
420 if (!(prot & PROT_WRITE)) {
421 ret = target_mprotect(start, len, prot);
422 if (ret != 0) {
423 start = ret;
424 goto the_end;
427 goto the_end;
430 /* handle the start of the mapping */
431 if (start > real_start) {
432 if (real_end == real_start + qemu_host_page_size) {
433 /* one single host page */
434 ret = mmap_frag(real_start, start, end,
435 prot, flags, fd, offset);
436 if (ret == -1)
437 goto fail;
438 goto the_end1;
440 ret = mmap_frag(real_start, start, real_start + qemu_host_page_size,
441 prot, flags, fd, offset);
442 if (ret == -1)
443 goto fail;
444 real_start += qemu_host_page_size;
446 /* handle the end of the mapping */
447 if (end < real_end) {
448 ret = mmap_frag(real_end - qemu_host_page_size,
449 real_end - qemu_host_page_size, real_end,
450 prot, flags, fd,
451 offset + real_end - qemu_host_page_size - start);
452 if (ret == -1)
453 goto fail;
454 real_end -= qemu_host_page_size;
457 /* map the middle (easier) */
458 if (real_start < real_end) {
459 void *p;
460 unsigned long offset1;
461 if (flags & MAP_ANONYMOUS)
462 offset1 = 0;
463 else
464 offset1 = offset + real_start - start;
465 p = mmap(g2h(real_start), real_end - real_start,
466 prot, flags, fd, offset1);
467 if (p == MAP_FAILED)
468 goto fail;
471 the_end1:
472 page_set_flags(start, start + len, prot | PAGE_VALID);
473 the_end:
474 #ifdef DEBUG_MMAP
475 printf("ret=0x" TARGET_FMT_lx "\n", start);
476 page_dump(stdout);
477 printf("\n");
478 #endif
479 mmap_unlock();
480 return start;
481 fail:
482 mmap_unlock();
483 return -1;
486 int target_munmap(abi_ulong start, abi_ulong len)
488 abi_ulong end, real_start, real_end, addr;
489 int prot, ret;
491 #ifdef DEBUG_MMAP
492 printf("munmap: start=0x%lx len=0x%lx\n", start, len);
493 #endif
494 if (start & ~TARGET_PAGE_MASK)
495 return -EINVAL;
496 len = TARGET_PAGE_ALIGN(len);
497 if (len == 0)
498 return -EINVAL;
499 mmap_lock();
500 end = start + len;
501 real_start = start & qemu_host_page_mask;
502 real_end = HOST_PAGE_ALIGN(end);
504 if (start > real_start) {
505 /* handle host page containing start */
506 prot = 0;
507 for(addr = real_start; addr < start; addr += TARGET_PAGE_SIZE) {
508 prot |= page_get_flags(addr);
510 if (real_end == real_start + qemu_host_page_size) {
511 for(addr = end; addr < real_end; addr += TARGET_PAGE_SIZE) {
512 prot |= page_get_flags(addr);
514 end = real_end;
516 if (prot != 0)
517 real_start += qemu_host_page_size;
519 if (end < real_end) {
520 prot = 0;
521 for(addr = end; addr < real_end; addr += TARGET_PAGE_SIZE) {
522 prot |= page_get_flags(addr);
524 if (prot != 0)
525 real_end -= qemu_host_page_size;
528 ret = 0;
529 /* unmap what we can */
530 if (real_start < real_end) {
531 ret = munmap(g2h(real_start), real_end - real_start);
534 if (ret == 0)
535 page_set_flags(start, start + len, 0);
536 mmap_unlock();
537 return ret;
540 abi_long target_mremap(abi_ulong old_addr, abi_ulong old_size,
541 abi_ulong new_size, unsigned long flags,
542 abi_ulong new_addr)
544 int prot;
545 void *host_addr;
547 mmap_lock();
549 #if defined(MREMAP_FIXED)
550 if (flags & MREMAP_FIXED)
551 host_addr = mremap(g2h(old_addr), old_size, new_size,
552 flags, new_addr);
553 else if (flags & MREMAP_MAYMOVE) {
554 abi_ulong mmap_start;
556 mmap_start = mmap_find_vma(0, new_size);
558 if (mmap_start == -1) {
559 errno = ENOMEM;
560 host_addr = MAP_FAILED;
561 } else
562 host_addr = mremap(g2h(old_addr), old_size, new_size,
563 flags | MREMAP_FIXED, g2h(mmap_start));
564 } else
565 #endif
567 host_addr = mremap(g2h(old_addr), old_size, new_size, flags);
568 /* Check if address fits target address space */
569 if ((unsigned long)host_addr + new_size > (abi_ulong)-1) {
570 /* Revert mremap() changes */
571 host_addr = mremap(g2h(old_addr), new_size, old_size, flags);
572 errno = ENOMEM;
573 host_addr = MAP_FAILED;
577 if (host_addr == MAP_FAILED) {
578 new_addr = -1;
579 } else {
580 new_addr = h2g(host_addr);
581 prot = page_get_flags(old_addr);
582 page_set_flags(old_addr, old_addr + old_size, 0);
583 page_set_flags(new_addr, new_addr + new_size, prot | PAGE_VALID);
585 mmap_unlock();
586 return new_addr;
589 int target_msync(abi_ulong start, abi_ulong len, int flags)
591 abi_ulong end;
593 if (start & ~TARGET_PAGE_MASK)
594 return -EINVAL;
595 len = TARGET_PAGE_ALIGN(len);
596 end = start + len;
597 if (end < start)
598 return -EINVAL;
599 if (end == start)
600 return 0;
602 start &= qemu_host_page_mask;
603 return msync(g2h(start), end - start, flags);