make qemu_announce_self handle non contiguous net tables (Marcelo Tosatti)
[qemu.git] / bsd-user / mmap.c
blobe916a6544e70023a25ce676225d32017304f1988
1 /*
2 * mmap support for qemu
4 * Copyright (c) 2003 - 2008 Fabrice Bellard
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin Street - Fifth Floor, Boston,
19 * MA 02110-1301, USA.
21 #include <stdlib.h>
22 #include <stdio.h>
23 #include <stdarg.h>
24 #include <string.h>
25 #include <unistd.h>
26 #include <errno.h>
27 #include <sys/mman.h>
29 #include "qemu.h"
30 #include "qemu-common.h"
31 #include "bsd-mman.h"
33 //#define DEBUG_MMAP
35 #if defined(USE_NPTL)
36 pthread_mutex_t mmap_mutex;
37 static int __thread mmap_lock_count;
39 void mmap_lock(void)
41 if (mmap_lock_count++ == 0) {
42 pthread_mutex_lock(&mmap_mutex);
46 void mmap_unlock(void)
48 if (--mmap_lock_count == 0) {
49 pthread_mutex_unlock(&mmap_mutex);
53 /* Grab lock to make sure things are in a consistent state after fork(). */
54 void mmap_fork_start(void)
56 if (mmap_lock_count)
57 abort();
58 pthread_mutex_lock(&mmap_mutex);
61 void mmap_fork_end(int child)
63 if (child)
64 pthread_mutex_init(&mmap_mutex, NULL);
65 else
66 pthread_mutex_unlock(&mmap_mutex);
68 #else
69 /* We aren't threadsafe to start with, so no need to worry about locking. */
70 void mmap_lock(void)
74 void mmap_unlock(void)
77 #endif
79 void *qemu_vmalloc(size_t size)
81 void *p;
82 unsigned long addr;
83 mmap_lock();
84 /* Use map and mark the pages as used. */
85 p = mmap(NULL, size, PROT_READ | PROT_WRITE,
86 MAP_PRIVATE | MAP_ANON, -1, 0);
88 addr = (unsigned long)p;
89 if (addr == (target_ulong) addr) {
90 /* Allocated region overlaps guest address space.
91 This may recurse. */
92 page_set_flags(addr & TARGET_PAGE_MASK, TARGET_PAGE_ALIGN(addr + size),
93 PAGE_RESERVED);
96 mmap_unlock();
97 return p;
100 void *qemu_malloc(size_t size)
102 char * p;
103 size += 16;
104 p = qemu_vmalloc(size);
105 *(size_t *)p = size;
106 return p + 16;
109 /* We use map, which is always zero initialized. */
110 void * qemu_mallocz(size_t size)
112 return qemu_malloc(size);
115 void qemu_free(void *ptr)
117 /* FIXME: We should unmark the reserved pages here. However this gets
118 complicated when one target page spans multiple host pages, so we
119 don't bother. */
120 size_t *p;
121 p = (size_t *)((char *)ptr - 16);
122 munmap(p, *p);
125 void *qemu_realloc(void *ptr, size_t size)
127 size_t old_size, copy;
128 void *new_ptr;
130 if (!ptr)
131 return qemu_malloc(size);
132 old_size = *(size_t *)((char *)ptr - 16);
133 copy = old_size < size ? old_size : size;
134 new_ptr = qemu_malloc(size);
135 memcpy(new_ptr, ptr, copy);
136 qemu_free(ptr);
137 return new_ptr;
140 /* NOTE: all the constants are the HOST ones, but addresses are target. */
141 int target_mprotect(abi_ulong start, abi_ulong len, int prot)
143 abi_ulong end, host_start, host_end, addr;
144 int prot1, ret;
146 #ifdef DEBUG_MMAP
147 printf("mprotect: start=0x" TARGET_FMT_lx
148 " len=0x" TARGET_FMT_lx " prot=%c%c%c\n", start, len,
149 prot & PROT_READ ? 'r' : '-',
150 prot & PROT_WRITE ? 'w' : '-',
151 prot & PROT_EXEC ? 'x' : '-');
152 #endif
154 if ((start & ~TARGET_PAGE_MASK) != 0)
155 return -EINVAL;
156 len = TARGET_PAGE_ALIGN(len);
157 end = start + len;
158 if (end < start)
159 return -EINVAL;
160 prot &= PROT_READ | PROT_WRITE | PROT_EXEC;
161 if (len == 0)
162 return 0;
164 mmap_lock();
165 host_start = start & qemu_host_page_mask;
166 host_end = HOST_PAGE_ALIGN(end);
167 if (start > host_start) {
168 /* handle host page containing start */
169 prot1 = prot;
170 for(addr = host_start; addr < start; addr += TARGET_PAGE_SIZE) {
171 prot1 |= page_get_flags(addr);
173 if (host_end == host_start + qemu_host_page_size) {
174 for(addr = end; addr < host_end; addr += TARGET_PAGE_SIZE) {
175 prot1 |= page_get_flags(addr);
177 end = host_end;
179 ret = mprotect(g2h(host_start), qemu_host_page_size, prot1 & PAGE_BITS);
180 if (ret != 0)
181 goto error;
182 host_start += qemu_host_page_size;
184 if (end < host_end) {
185 prot1 = prot;
186 for(addr = end; addr < host_end; addr += TARGET_PAGE_SIZE) {
187 prot1 |= page_get_flags(addr);
189 ret = mprotect(g2h(host_end - qemu_host_page_size), qemu_host_page_size,
190 prot1 & PAGE_BITS);
191 if (ret != 0)
192 goto error;
193 host_end -= qemu_host_page_size;
196 /* handle the pages in the middle */
197 if (host_start < host_end) {
198 ret = mprotect(g2h(host_start), host_end - host_start, prot);
199 if (ret != 0)
200 goto error;
202 page_set_flags(start, start + len, prot | PAGE_VALID);
203 mmap_unlock();
204 return 0;
205 error:
206 mmap_unlock();
207 return ret;
210 /* map an incomplete host page */
211 static int mmap_frag(abi_ulong real_start,
212 abi_ulong start, abi_ulong end,
213 int prot, int flags, int fd, abi_ulong offset)
215 abi_ulong real_end, addr;
216 void *host_start;
217 int prot1, prot_new;
219 real_end = real_start + qemu_host_page_size;
220 host_start = g2h(real_start);
222 /* get the protection of the target pages outside the mapping */
223 prot1 = 0;
224 for(addr = real_start; addr < real_end; addr++) {
225 if (addr < start || addr >= end)
226 prot1 |= page_get_flags(addr);
229 if (prot1 == 0) {
230 /* no page was there, so we allocate one */
231 void *p = mmap(host_start, qemu_host_page_size, prot,
232 flags | MAP_ANON, -1, 0);
233 if (p == MAP_FAILED)
234 return -1;
235 prot1 = prot;
237 prot1 &= PAGE_BITS;
239 prot_new = prot | prot1;
240 if (!(flags & MAP_ANON)) {
241 /* msync() won't work here, so we return an error if write is
242 possible while it is a shared mapping */
243 if ((flags & TARGET_BSD_MAP_FLAGMASK) == MAP_SHARED &&
244 (prot & PROT_WRITE))
245 return -EINVAL;
247 /* adjust protection to be able to read */
248 if (!(prot1 & PROT_WRITE))
249 mprotect(host_start, qemu_host_page_size, prot1 | PROT_WRITE);
251 /* read the corresponding file data */
252 pread(fd, g2h(start), end - start, offset);
254 /* put final protection */
255 if (prot_new != (prot1 | PROT_WRITE))
256 mprotect(host_start, qemu_host_page_size, prot_new);
257 } else {
258 /* just update the protection */
259 if (prot_new != prot1) {
260 mprotect(host_start, qemu_host_page_size, prot_new);
263 return 0;
266 #if defined(__CYGWIN__)
267 /* Cygwin doesn't have a whole lot of address space. */
268 static abi_ulong mmap_next_start = 0x18000000;
269 #else
270 static abi_ulong mmap_next_start = 0x40000000;
271 #endif
273 unsigned long last_brk;
275 /* find a free memory area of size 'size'. The search starts at
276 'start'. If 'start' == 0, then a default start address is used.
277 Return -1 if error.
279 /* page_init() marks pages used by the host as reserved to be sure not
280 to use them. */
281 static abi_ulong mmap_find_vma(abi_ulong start, abi_ulong size)
283 abi_ulong addr, addr1, addr_start;
284 int prot;
285 unsigned long new_brk;
287 new_brk = (unsigned long)sbrk(0);
288 if (last_brk && last_brk < new_brk && last_brk == (target_ulong)last_brk) {
289 /* This is a hack to catch the host allocating memory with brk().
290 If it uses mmap then we loose.
291 FIXME: We really want to avoid the host allocating memory in
292 the first place, and maybe leave some slack to avoid switching
293 to mmap. */
294 page_set_flags(last_brk & TARGET_PAGE_MASK,
295 TARGET_PAGE_ALIGN(new_brk),
296 PAGE_RESERVED);
298 last_brk = new_brk;
300 size = HOST_PAGE_ALIGN(size);
301 start = start & qemu_host_page_mask;
302 addr = start;
303 if (addr == 0)
304 addr = mmap_next_start;
305 addr_start = addr;
306 for(;;) {
307 prot = 0;
308 for(addr1 = addr; addr1 < (addr + size); addr1 += TARGET_PAGE_SIZE) {
309 prot |= page_get_flags(addr1);
311 if (prot == 0)
312 break;
313 addr += qemu_host_page_size;
314 /* we found nothing */
315 if (addr == addr_start)
316 return (abi_ulong)-1;
318 if (start == 0)
319 mmap_next_start = addr + size;
320 return addr;
323 /* NOTE: all the constants are the HOST ones */
324 abi_long target_mmap(abi_ulong start, abi_ulong len, int prot,
325 int flags, int fd, abi_ulong offset)
327 abi_ulong ret, end, real_start, real_end, retaddr, host_offset, host_len;
328 unsigned long host_start;
330 mmap_lock();
331 #ifdef DEBUG_MMAP
333 printf("mmap: start=0x" TARGET_FMT_lx
334 " len=0x" TARGET_FMT_lx " prot=%c%c%c flags=",
335 start, len,
336 prot & PROT_READ ? 'r' : '-',
337 prot & PROT_WRITE ? 'w' : '-',
338 prot & PROT_EXEC ? 'x' : '-');
339 if (flags & MAP_FIXED)
340 printf("MAP_FIXED ");
341 if (flags & MAP_ANON)
342 printf("MAP_ANON ");
343 switch(flags & TARGET_BSD_MAP_FLAGMASK) {
344 case MAP_PRIVATE:
345 printf("MAP_PRIVATE ");
346 break;
347 case MAP_SHARED:
348 printf("MAP_SHARED ");
349 break;
350 default:
351 printf("[MAP_FLAGMASK=0x%x] ", flags & TARGET_BSD_MAP_FLAGMASK);
352 break;
354 printf("fd=%d offset=" TARGET_FMT_lx "\n", fd, offset);
356 #endif
358 if (offset & ~TARGET_PAGE_MASK) {
359 errno = EINVAL;
360 goto fail;
363 len = TARGET_PAGE_ALIGN(len);
364 if (len == 0)
365 goto the_end;
366 real_start = start & qemu_host_page_mask;
368 if (!(flags & MAP_FIXED)) {
369 abi_ulong mmap_start;
370 void *p;
371 host_offset = offset & qemu_host_page_mask;
372 host_len = len + offset - host_offset;
373 host_len = HOST_PAGE_ALIGN(host_len);
374 mmap_start = mmap_find_vma(real_start, host_len);
375 if (mmap_start == (abi_ulong)-1) {
376 errno = ENOMEM;
377 goto fail;
379 /* Note: we prefer to control the mapping address. It is
380 especially important if qemu_host_page_size >
381 qemu_real_host_page_size */
382 p = mmap(g2h(mmap_start),
383 host_len, prot, flags | MAP_FIXED, fd, host_offset);
384 if (p == MAP_FAILED)
385 goto fail;
386 /* update start so that it points to the file position at 'offset' */
387 host_start = (unsigned long)p;
388 if (!(flags & MAP_ANON))
389 host_start += offset - host_offset;
390 start = h2g(host_start);
391 } else {
392 int flg;
393 target_ulong addr;
395 if (start & ~TARGET_PAGE_MASK) {
396 errno = EINVAL;
397 goto fail;
399 end = start + len;
400 real_end = HOST_PAGE_ALIGN(end);
402 for(addr = real_start; addr < real_end; addr += TARGET_PAGE_SIZE) {
403 flg = page_get_flags(addr);
404 if (flg & PAGE_RESERVED) {
405 errno = ENXIO;
406 goto fail;
410 /* worst case: we cannot map the file because the offset is not
411 aligned, so we read it */
412 if (!(flags & MAP_ANON) &&
413 (offset & ~qemu_host_page_mask) != (start & ~qemu_host_page_mask)) {
414 /* msync() won't work here, so we return an error if write is
415 possible while it is a shared mapping */
416 if ((flags & TARGET_BSD_MAP_FLAGMASK) == MAP_SHARED &&
417 (prot & PROT_WRITE)) {
418 errno = EINVAL;
419 goto fail;
421 retaddr = target_mmap(start, len, prot | PROT_WRITE,
422 MAP_FIXED | MAP_PRIVATE | MAP_ANON,
423 -1, 0);
424 if (retaddr == -1)
425 goto fail;
426 pread(fd, g2h(start), len, offset);
427 if (!(prot & PROT_WRITE)) {
428 ret = target_mprotect(start, len, prot);
429 if (ret != 0) {
430 start = ret;
431 goto the_end;
434 goto the_end;
437 /* handle the start of the mapping */
438 if (start > real_start) {
439 if (real_end == real_start + qemu_host_page_size) {
440 /* one single host page */
441 ret = mmap_frag(real_start, start, end,
442 prot, flags, fd, offset);
443 if (ret == -1)
444 goto fail;
445 goto the_end1;
447 ret = mmap_frag(real_start, start, real_start + qemu_host_page_size,
448 prot, flags, fd, offset);
449 if (ret == -1)
450 goto fail;
451 real_start += qemu_host_page_size;
453 /* handle the end of the mapping */
454 if (end < real_end) {
455 ret = mmap_frag(real_end - qemu_host_page_size,
456 real_end - qemu_host_page_size, real_end,
457 prot, flags, fd,
458 offset + real_end - qemu_host_page_size - start);
459 if (ret == -1)
460 goto fail;
461 real_end -= qemu_host_page_size;
464 /* map the middle (easier) */
465 if (real_start < real_end) {
466 void *p;
467 unsigned long offset1;
468 if (flags & MAP_ANON)
469 offset1 = 0;
470 else
471 offset1 = offset + real_start - start;
472 p = mmap(g2h(real_start), real_end - real_start,
473 prot, flags, fd, offset1);
474 if (p == MAP_FAILED)
475 goto fail;
478 the_end1:
479 page_set_flags(start, start + len, prot | PAGE_VALID);
480 the_end:
481 #ifdef DEBUG_MMAP
482 printf("ret=0x" TARGET_FMT_lx "\n", start);
483 page_dump(stdout);
484 printf("\n");
485 #endif
486 mmap_unlock();
487 return start;
488 fail:
489 mmap_unlock();
490 return -1;
493 int target_munmap(abi_ulong start, abi_ulong len)
495 abi_ulong end, real_start, real_end, addr;
496 int prot, ret;
498 #ifdef DEBUG_MMAP
499 printf("munmap: start=0x%lx len=0x%lx\n", start, len);
500 #endif
501 if (start & ~TARGET_PAGE_MASK)
502 return -EINVAL;
503 len = TARGET_PAGE_ALIGN(len);
504 if (len == 0)
505 return -EINVAL;
506 mmap_lock();
507 end = start + len;
508 real_start = start & qemu_host_page_mask;
509 real_end = HOST_PAGE_ALIGN(end);
511 if (start > real_start) {
512 /* handle host page containing start */
513 prot = 0;
514 for(addr = real_start; addr < start; addr += TARGET_PAGE_SIZE) {
515 prot |= page_get_flags(addr);
517 if (real_end == real_start + qemu_host_page_size) {
518 for(addr = end; addr < real_end; addr += TARGET_PAGE_SIZE) {
519 prot |= page_get_flags(addr);
521 end = real_end;
523 if (prot != 0)
524 real_start += qemu_host_page_size;
526 if (end < real_end) {
527 prot = 0;
528 for(addr = end; addr < real_end; addr += TARGET_PAGE_SIZE) {
529 prot |= page_get_flags(addr);
531 if (prot != 0)
532 real_end -= qemu_host_page_size;
535 ret = 0;
536 /* unmap what we can */
537 if (real_start < real_end) {
538 ret = munmap(g2h(real_start), real_end - real_start);
541 if (ret == 0)
542 page_set_flags(start, start + len, 0);
543 mmap_unlock();
544 return ret;
547 int target_msync(abi_ulong start, abi_ulong len, int flags)
549 abi_ulong end;
551 if (start & ~TARGET_PAGE_MASK)
552 return -EINVAL;
553 len = TARGET_PAGE_ALIGN(len);
554 end = start + len;
555 if (end < start)
556 return -EINVAL;
557 if (end == start)
558 return 0;
560 start &= qemu_host_page_mask;
561 return msync(g2h(start), end - start, flags);