target-ppc: Add SPE register read/write using XML
[qemu/mini2440.git] / linux-user / mmap.c
blobd0fc3e3d1a60883911da71e79ff521bd53d76931
1 /*
2 * mmap support for qemu
4 * Copyright (c) 2003 Fabrice Bellard
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin Street - Fifth Floor, Boston,
19 * MA 02110-1301, USA.
21 #include <stdlib.h>
22 #include <stdio.h>
23 #include <stdarg.h>
24 #include <string.h>
25 #include <unistd.h>
26 #include <errno.h>
27 #include <sys/mman.h>
28 #include <linux/mman.h>
29 #include <linux/unistd.h>
31 #include "qemu.h"
32 #include "qemu-common.h"
34 //#define DEBUG_MMAP
36 #if defined(USE_NPTL)
37 pthread_mutex_t mmap_mutex;
38 static int __thread mmap_lock_count;
40 void mmap_lock(void)
42 if (mmap_lock_count++ == 0) {
43 pthread_mutex_lock(&mmap_mutex);
47 void mmap_unlock(void)
49 if (--mmap_lock_count == 0) {
50 pthread_mutex_unlock(&mmap_mutex);
54 /* Grab lock to make sure things are in a consistent state after fork(). */
55 void mmap_fork_start(void)
57 if (mmap_lock_count)
58 abort();
59 pthread_mutex_lock(&mmap_mutex);
62 void mmap_fork_end(int child)
64 if (child)
65 pthread_mutex_init(&mmap_mutex, NULL);
66 else
67 pthread_mutex_unlock(&mmap_mutex);
69 #else
70 /* We aren't threadsafe to start with, so no need to worry about locking. */
71 void mmap_lock(void)
75 void mmap_unlock(void)
78 #endif
80 void *qemu_vmalloc(size_t size)
82 void *p;
83 unsigned long addr;
84 mmap_lock();
85 /* Use map and mark the pages as used. */
86 p = mmap(NULL, size, PROT_READ | PROT_WRITE,
87 MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
89 addr = (unsigned long)p;
90 if (addr == (target_ulong) addr) {
91 /* Allocated region overlaps guest address space.
92 This may recurse. */
93 page_set_flags(addr & TARGET_PAGE_MASK, TARGET_PAGE_ALIGN(addr + size),
94 PAGE_RESERVED);
97 mmap_unlock();
98 return p;
101 void *qemu_malloc(size_t size)
103 char * p;
104 size += 16;
105 p = qemu_vmalloc(size);
106 *(size_t *)p = size;
107 return p + 16;
110 /* We use map, which is always zero initialized. */
111 void * qemu_mallocz(size_t size)
113 return qemu_malloc(size);
116 void qemu_free(void *ptr)
118 /* FIXME: We should unmark the reserved pages here. However this gets
119 complicated when one target page spans multiple host pages, so we
120 don't bother. */
121 size_t *p;
122 p = (size_t *)((char *)ptr - 16);
123 munmap(p, *p);
126 void *qemu_realloc(void *ptr, size_t size)
128 size_t old_size, copy;
129 void *new_ptr;
131 old_size = *(size_t *)((char *)ptr - 16);
132 copy = old_size < size ? old_size : size;
133 new_ptr = qemu_malloc(size);
134 memcpy(new_ptr, ptr, copy);
135 qemu_free(ptr);
136 return new_ptr;
139 /* NOTE: all the constants are the HOST ones, but addresses are target. */
140 int target_mprotect(abi_ulong start, abi_ulong len, int prot)
142 abi_ulong end, host_start, host_end, addr;
143 int prot1, ret;
145 #ifdef DEBUG_MMAP
146 printf("mprotect: start=0x" TARGET_FMT_lx
147 "len=0x" TARGET_FMT_lx " prot=%c%c%c\n", start, len,
148 prot & PROT_READ ? 'r' : '-',
149 prot & PROT_WRITE ? 'w' : '-',
150 prot & PROT_EXEC ? 'x' : '-');
151 #endif
153 if ((start & ~TARGET_PAGE_MASK) != 0)
154 return -EINVAL;
155 len = TARGET_PAGE_ALIGN(len);
156 end = start + len;
157 if (end < start)
158 return -EINVAL;
159 prot &= PROT_READ | PROT_WRITE | PROT_EXEC;
160 if (len == 0)
161 return 0;
163 mmap_lock();
164 host_start = start & qemu_host_page_mask;
165 host_end = HOST_PAGE_ALIGN(end);
166 if (start > host_start) {
167 /* handle host page containing start */
168 prot1 = prot;
169 for(addr = host_start; addr < start; addr += TARGET_PAGE_SIZE) {
170 prot1 |= page_get_flags(addr);
172 if (host_end == host_start + qemu_host_page_size) {
173 for(addr = end; addr < host_end; addr += TARGET_PAGE_SIZE) {
174 prot1 |= page_get_flags(addr);
176 end = host_end;
178 ret = mprotect(g2h(host_start), qemu_host_page_size, prot1 & PAGE_BITS);
179 if (ret != 0)
180 goto error;
181 host_start += qemu_host_page_size;
183 if (end < host_end) {
184 prot1 = prot;
185 for(addr = end; addr < host_end; addr += TARGET_PAGE_SIZE) {
186 prot1 |= page_get_flags(addr);
188 ret = mprotect(g2h(host_end - qemu_host_page_size), qemu_host_page_size,
189 prot1 & PAGE_BITS);
190 if (ret != 0)
191 goto error;
192 host_end -= qemu_host_page_size;
195 /* handle the pages in the middle */
196 if (host_start < host_end) {
197 ret = mprotect(g2h(host_start), host_end - host_start, prot);
198 if (ret != 0)
199 goto error;
201 page_set_flags(start, start + len, prot | PAGE_VALID);
202 mmap_unlock();
203 return 0;
204 error:
205 mmap_unlock();
206 return ret;
209 /* map an incomplete host page */
210 static int mmap_frag(abi_ulong real_start,
211 abi_ulong start, abi_ulong end,
212 int prot, int flags, int fd, abi_ulong offset)
214 abi_ulong real_end, addr;
215 void *host_start;
216 int prot1, prot_new;
218 real_end = real_start + qemu_host_page_size;
219 host_start = g2h(real_start);
221 /* get the protection of the target pages outside the mapping */
222 prot1 = 0;
223 for(addr = real_start; addr < real_end; addr++) {
224 if (addr < start || addr >= end)
225 prot1 |= page_get_flags(addr);
228 if (prot1 == 0) {
229 /* no page was there, so we allocate one */
230 void *p = mmap(host_start, qemu_host_page_size, prot,
231 flags | MAP_ANONYMOUS, -1, 0);
232 if (p == MAP_FAILED)
233 return -1;
234 prot1 = prot;
236 prot1 &= PAGE_BITS;
238 prot_new = prot | prot1;
239 if (!(flags & MAP_ANONYMOUS)) {
240 /* msync() won't work here, so we return an error if write is
241 possible while it is a shared mapping */
242 if ((flags & MAP_TYPE) == MAP_SHARED &&
243 (prot & PROT_WRITE))
244 return -EINVAL;
246 /* adjust protection to be able to read */
247 if (!(prot1 & PROT_WRITE))
248 mprotect(host_start, qemu_host_page_size, prot1 | PROT_WRITE);
250 /* read the corresponding file data */
251 pread(fd, g2h(start), end - start, offset);
253 /* put final protection */
254 if (prot_new != (prot1 | PROT_WRITE))
255 mprotect(host_start, qemu_host_page_size, prot_new);
256 } else {
257 /* just update the protection */
258 if (prot_new != prot1) {
259 mprotect(host_start, qemu_host_page_size, prot_new);
262 return 0;
265 #if defined(__CYGWIN__)
266 /* Cygwin doesn't have a whole lot of address space. */
267 static abi_ulong mmap_next_start = 0x18000000;
268 #else
269 static abi_ulong mmap_next_start = 0x40000000;
270 #endif
272 unsigned long last_brk;
274 /* find a free memory area of size 'size'. The search starts at
275 'start'. If 'start' == 0, then a default start address is used.
276 Return -1 if error.
278 /* page_init() marks pages used by the host as reserved to be sure not
279 to use them. */
280 static abi_ulong mmap_find_vma(abi_ulong start, abi_ulong size)
282 abi_ulong addr, addr1, addr_start;
283 int prot;
284 unsigned long new_brk;
286 new_brk = (unsigned long)sbrk(0);
287 if (last_brk && last_brk < new_brk && last_brk == (target_ulong)last_brk) {
288 /* This is a hack to catch the host allocating memory with brk().
289 If it uses mmap then we loose.
290 FIXME: We really want to avoid the host allocating memory in
291 the first place, and maybe leave some slack to avoid switching
292 to mmap. */
293 page_set_flags(last_brk & TARGET_PAGE_MASK,
294 TARGET_PAGE_ALIGN(new_brk),
295 PAGE_RESERVED);
297 last_brk = new_brk;
299 size = HOST_PAGE_ALIGN(size);
300 start = start & qemu_host_page_mask;
301 addr = start;
302 if (addr == 0)
303 addr = mmap_next_start;
304 addr_start = addr;
305 for(;;) {
306 prot = 0;
307 for(addr1 = addr; addr1 < (addr + size); addr1 += TARGET_PAGE_SIZE) {
308 prot |= page_get_flags(addr1);
310 if (prot == 0)
311 break;
312 addr += qemu_host_page_size;
313 /* we found nothing */
314 if (addr == addr_start)
315 return (abi_ulong)-1;
317 if (start == 0)
318 mmap_next_start = addr + size;
319 return addr;
322 /* NOTE: all the constants are the HOST ones */
323 abi_long target_mmap(abi_ulong start, abi_ulong len, int prot,
324 int flags, int fd, abi_ulong offset)
326 abi_ulong ret, end, real_start, real_end, retaddr, host_offset, host_len;
327 unsigned long host_start;
329 mmap_lock();
330 #ifdef DEBUG_MMAP
332 printf("mmap: start=0x" TARGET_FMT_lx
333 " len=0x" TARGET_FMT_lx " prot=%c%c%c flags=",
334 start, len,
335 prot & PROT_READ ? 'r' : '-',
336 prot & PROT_WRITE ? 'w' : '-',
337 prot & PROT_EXEC ? 'x' : '-');
338 if (flags & MAP_FIXED)
339 printf("MAP_FIXED ");
340 if (flags & MAP_ANONYMOUS)
341 printf("MAP_ANON ");
342 switch(flags & MAP_TYPE) {
343 case MAP_PRIVATE:
344 printf("MAP_PRIVATE ");
345 break;
346 case MAP_SHARED:
347 printf("MAP_SHARED ");
348 break;
349 default:
350 printf("[MAP_TYPE=0x%x] ", flags & MAP_TYPE);
351 break;
353 printf("fd=%d offset=" TARGET_FMT_lx "\n", fd, offset);
355 #endif
357 if (offset & ~TARGET_PAGE_MASK) {
358 errno = EINVAL;
359 goto fail;
362 len = TARGET_PAGE_ALIGN(len);
363 if (len == 0)
364 goto the_end;
365 real_start = start & qemu_host_page_mask;
367 if (!(flags & MAP_FIXED)) {
368 abi_ulong mmap_start;
369 void *p;
370 host_offset = offset & qemu_host_page_mask;
371 host_len = len + offset - host_offset;
372 host_len = HOST_PAGE_ALIGN(host_len);
373 mmap_start = mmap_find_vma(real_start, host_len);
374 if (mmap_start == (abi_ulong)-1) {
375 errno = ENOMEM;
376 goto fail;
378 /* Note: we prefer to control the mapping address. It is
379 especially important if qemu_host_page_size >
380 qemu_real_host_page_size */
381 p = mmap(g2h(mmap_start),
382 host_len, prot, flags | MAP_FIXED, fd, host_offset);
383 if (p == MAP_FAILED)
384 goto fail;
385 /* update start so that it points to the file position at 'offset' */
386 host_start = (unsigned long)p;
387 if (!(flags & MAP_ANONYMOUS))
388 host_start += offset - host_offset;
389 start = h2g(host_start);
390 } else {
391 int flg;
392 target_ulong addr;
394 if (start & ~TARGET_PAGE_MASK) {
395 errno = EINVAL;
396 goto fail;
398 end = start + len;
399 real_end = HOST_PAGE_ALIGN(end);
402 * Test if requested memory area fits target address space
403 * It can fail only on 64-bit host with 32-bit target.
404 * On any other target/host host mmap() handles this error correctly.
406 if ((unsigned long)start + len - 1 > (abi_ulong) -1) {
407 errno = EINVAL;
408 goto fail;
411 for(addr = real_start; addr < real_end; addr += TARGET_PAGE_SIZE) {
412 flg = page_get_flags(addr);
413 if (flg & PAGE_RESERVED) {
414 errno = ENXIO;
415 goto fail;
419 /* worst case: we cannot map the file because the offset is not
420 aligned, so we read it */
421 if (!(flags & MAP_ANONYMOUS) &&
422 (offset & ~qemu_host_page_mask) != (start & ~qemu_host_page_mask)) {
423 /* msync() won't work here, so we return an error if write is
424 possible while it is a shared mapping */
425 if ((flags & MAP_TYPE) == MAP_SHARED &&
426 (prot & PROT_WRITE)) {
427 errno = EINVAL;
428 goto fail;
430 retaddr = target_mmap(start, len, prot | PROT_WRITE,
431 MAP_FIXED | MAP_PRIVATE | MAP_ANONYMOUS,
432 -1, 0);
433 if (retaddr == -1)
434 goto fail;
435 pread(fd, g2h(start), len, offset);
436 if (!(prot & PROT_WRITE)) {
437 ret = target_mprotect(start, len, prot);
438 if (ret != 0) {
439 start = ret;
440 goto the_end;
443 goto the_end;
446 /* handle the start of the mapping */
447 if (start > real_start) {
448 if (real_end == real_start + qemu_host_page_size) {
449 /* one single host page */
450 ret = mmap_frag(real_start, start, end,
451 prot, flags, fd, offset);
452 if (ret == -1)
453 goto fail;
454 goto the_end1;
456 ret = mmap_frag(real_start, start, real_start + qemu_host_page_size,
457 prot, flags, fd, offset);
458 if (ret == -1)
459 goto fail;
460 real_start += qemu_host_page_size;
462 /* handle the end of the mapping */
463 if (end < real_end) {
464 ret = mmap_frag(real_end - qemu_host_page_size,
465 real_end - qemu_host_page_size, real_end,
466 prot, flags, fd,
467 offset + real_end - qemu_host_page_size - start);
468 if (ret == -1)
469 goto fail;
470 real_end -= qemu_host_page_size;
473 /* map the middle (easier) */
474 if (real_start < real_end) {
475 void *p;
476 unsigned long offset1;
477 if (flags & MAP_ANONYMOUS)
478 offset1 = 0;
479 else
480 offset1 = offset + real_start - start;
481 p = mmap(g2h(real_start), real_end - real_start,
482 prot, flags, fd, offset1);
483 if (p == MAP_FAILED)
484 goto fail;
487 the_end1:
488 page_set_flags(start, start + len, prot | PAGE_VALID);
489 the_end:
490 #ifdef DEBUG_MMAP
491 printf("ret=0x" TARGET_FMT_lx "\n", start);
492 page_dump(stdout);
493 printf("\n");
494 #endif
495 mmap_unlock();
496 return start;
497 fail:
498 mmap_unlock();
499 return -1;
502 int target_munmap(abi_ulong start, abi_ulong len)
504 abi_ulong end, real_start, real_end, addr;
505 int prot, ret;
507 #ifdef DEBUG_MMAP
508 printf("munmap: start=0x%lx len=0x%lx\n", start, len);
509 #endif
510 if (start & ~TARGET_PAGE_MASK)
511 return -EINVAL;
512 len = TARGET_PAGE_ALIGN(len);
513 if (len == 0)
514 return -EINVAL;
515 mmap_lock();
516 end = start + len;
517 real_start = start & qemu_host_page_mask;
518 real_end = HOST_PAGE_ALIGN(end);
520 if (start > real_start) {
521 /* handle host page containing start */
522 prot = 0;
523 for(addr = real_start; addr < start; addr += TARGET_PAGE_SIZE) {
524 prot |= page_get_flags(addr);
526 if (real_end == real_start + qemu_host_page_size) {
527 for(addr = end; addr < real_end; addr += TARGET_PAGE_SIZE) {
528 prot |= page_get_flags(addr);
530 end = real_end;
532 if (prot != 0)
533 real_start += qemu_host_page_size;
535 if (end < real_end) {
536 prot = 0;
537 for(addr = end; addr < real_end; addr += TARGET_PAGE_SIZE) {
538 prot |= page_get_flags(addr);
540 if (prot != 0)
541 real_end -= qemu_host_page_size;
544 ret = 0;
545 /* unmap what we can */
546 if (real_start < real_end) {
547 ret = munmap(g2h(real_start), real_end - real_start);
550 if (ret == 0)
551 page_set_flags(start, start + len, 0);
552 mmap_unlock();
553 return ret;
556 abi_long target_mremap(abi_ulong old_addr, abi_ulong old_size,
557 abi_ulong new_size, unsigned long flags,
558 abi_ulong new_addr)
560 int prot;
561 void *host_addr;
563 mmap_lock();
565 if (flags & MREMAP_FIXED)
566 host_addr = (void *) syscall(__NR_mremap, g2h(old_addr),
567 old_size, new_size,
568 flags,
569 new_addr);
570 else if (flags & MREMAP_MAYMOVE) {
571 abi_ulong mmap_start;
573 mmap_start = mmap_find_vma(0, new_size);
575 if (mmap_start == -1) {
576 errno = ENOMEM;
577 host_addr = MAP_FAILED;
578 } else
579 host_addr = (void *) syscall(__NR_mremap, g2h(old_addr),
580 old_size, new_size,
581 flags | MREMAP_FIXED,
582 g2h(mmap_start));
583 } else {
584 host_addr = mremap(g2h(old_addr), old_size, new_size, flags);
585 /* Check if address fits target address space */
586 if ((unsigned long)host_addr + new_size > (abi_ulong)-1) {
587 /* Revert mremap() changes */
588 host_addr = mremap(g2h(old_addr), new_size, old_size, flags);
589 errno = ENOMEM;
590 host_addr = MAP_FAILED;
594 if (host_addr == MAP_FAILED) {
595 new_addr = -1;
596 } else {
597 new_addr = h2g(host_addr);
598 prot = page_get_flags(old_addr);
599 page_set_flags(old_addr, old_addr + old_size, 0);
600 page_set_flags(new_addr, new_addr + new_size, prot | PAGE_VALID);
602 mmap_unlock();
603 return new_addr;
606 int target_msync(abi_ulong start, abi_ulong len, int flags)
608 abi_ulong end;
610 if (start & ~TARGET_PAGE_MASK)
611 return -EINVAL;
612 len = TARGET_PAGE_ALIGN(len);
613 end = start + len;
614 if (end < start)
615 return -EINVAL;
616 if (end == start)
617 return 0;
619 start &= qemu_host_page_mask;
620 return msync(g2h(start), end - start, flags);