target-ppc: fix commit r6789
[qemu/mini2440/sniper_sniper_test.git] / darwin-user / mmap.c
blobf803066aa4438642842e857981376754bd0cd9ed
1 /*
2 * mmap support for qemu
4 * Copyright (c) 2003 Fabrice Bellard
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin Street - Fifth Floor, Boston,
19 * MA 02110-1301, USA.
21 #include <stdlib.h>
22 #include <stdio.h>
23 #include <stdarg.h>
24 #include <string.h>
25 #include <unistd.h>
26 #include <errno.h>
27 #include <sys/mman.h>
29 #include "qemu.h"
31 //#define DEBUG_MMAP
33 /* NOTE: all the constants are the HOST ones */
34 int target_mprotect(unsigned long start, unsigned long len, int prot)
36 unsigned long end, host_start, host_end, addr;
37 int prot1, ret;
39 #ifdef DEBUG_MMAP
40 printf("mprotect: start=0x%lx len=0x%lx prot=%c%c%c\n", start, len,
41 prot & PROT_READ ? 'r' : '-',
42 prot & PROT_WRITE ? 'w' : '-',
43 prot & PROT_EXEC ? 'x' : '-');
44 #endif
46 if ((start & ~TARGET_PAGE_MASK) != 0)
47 return -EINVAL;
48 len = TARGET_PAGE_ALIGN(len);
49 end = start + len;
50 if (end < start)
51 return -EINVAL;
52 if (prot & ~(PROT_READ | PROT_WRITE | PROT_EXEC))
53 return -EINVAL;
54 if (len == 0)
55 return 0;
57 host_start = start & qemu_host_page_mask;
58 host_end = HOST_PAGE_ALIGN(end);
59 if (start > host_start) {
60 /* handle host page containing start */
61 prot1 = prot;
62 for(addr = host_start; addr < start; addr += TARGET_PAGE_SIZE) {
63 prot1 |= page_get_flags(addr);
65 if (host_end == host_start + qemu_host_page_size) {
66 for(addr = end; addr < host_end; addr += TARGET_PAGE_SIZE) {
67 prot1 |= page_get_flags(addr);
69 end = host_end;
71 ret = mprotect((void *)host_start, qemu_host_page_size, prot1 & PAGE_BITS);
72 if (ret != 0)
73 return ret;
74 host_start += qemu_host_page_size;
76 if (end < host_end) {
77 prot1 = prot;
78 for(addr = end; addr < host_end; addr += TARGET_PAGE_SIZE) {
79 prot1 |= page_get_flags(addr);
81 ret = mprotect((void *)(host_end - qemu_host_page_size), qemu_host_page_size,
82 prot1 & PAGE_BITS);
83 if (ret != 0)
84 return ret;
85 host_end -= qemu_host_page_size;
88 /* handle the pages in the middle */
89 if (host_start < host_end) {
90 ret = mprotect((void *)host_start, host_end - host_start, prot);
91 if (ret != 0)
92 return ret;
94 page_set_flags(start, start + len, prot | PAGE_VALID);
95 return 0;
98 /* map an incomplete host page */
99 int mmap_frag(unsigned long host_start,
100 unsigned long start, unsigned long end,
101 int prot, int flags, int fd, unsigned long offset)
103 unsigned long host_end, ret, addr;
104 int prot1, prot_new;
106 host_end = host_start + qemu_host_page_size;
108 /* get the protection of the target pages outside the mapping */
109 prot1 = 0;
110 for(addr = host_start; addr < host_end; addr++) {
111 if (addr < start || addr >= end)
112 prot1 |= page_get_flags(addr);
115 if (prot1 == 0) {
116 /* no page was there, so we allocate one */
117 ret = (long)mmap((void *)host_start, qemu_host_page_size, prot,
118 flags | MAP_ANONYMOUS, -1, 0);
119 if (ret == -1)
120 return ret;
122 prot1 &= PAGE_BITS;
124 prot_new = prot | prot1;
125 if (!(flags & MAP_ANONYMOUS)) {
126 /* msync() won't work here, so we return an error if write is
127 possible while it is a shared mapping */
128 #ifndef __APPLE__
129 if ((flags & MAP_TYPE) == MAP_SHARED &&
130 #else
131 if ((flags & MAP_SHARED) &&
132 #endif
133 (prot & PROT_WRITE))
134 return -EINVAL;
136 /* adjust protection to be able to read */
137 if (!(prot1 & PROT_WRITE))
138 mprotect((void *)host_start, qemu_host_page_size, prot1 | PROT_WRITE);
140 /* read the corresponding file data */
141 pread(fd, (void *)start, end - start, offset);
143 /* put final protection */
144 if (prot_new != (prot1 | PROT_WRITE))
145 mprotect((void *)host_start, qemu_host_page_size, prot_new);
146 } else {
147 /* just update the protection */
148 if (prot_new != prot1) {
149 mprotect((void *)host_start, qemu_host_page_size, prot_new);
152 return 0;
155 /* NOTE: all the constants are the HOST ones */
156 long target_mmap(unsigned long start, unsigned long len, int prot,
157 int flags, int fd, unsigned long offset)
159 unsigned long ret, end, host_start, host_end, retaddr, host_offset, host_len;
160 #if defined(__alpha__) || defined(__sparc__) || defined(__x86_64__)
161 static unsigned long last_start = 0x40000000;
162 #endif
164 #ifdef DEBUG_MMAP
166 printf("mmap: start=0x%lx len=0x%lx prot=%c%c%c flags=",
167 start, len,
168 prot & PROT_READ ? 'r' : '-',
169 prot & PROT_WRITE ? 'w' : '-',
170 prot & PROT_EXEC ? 'x' : '-');
171 if (flags & MAP_FIXED)
172 printf("MAP_FIXED ");
173 if (flags & MAP_ANONYMOUS)
174 printf("MAP_ANON ");
175 #ifndef MAP_TYPE
176 # define MAP_TYPE 0x3
177 #endif
178 switch(flags & MAP_TYPE) {
179 case MAP_PRIVATE:
180 printf("MAP_PRIVATE ");
181 break;
182 case MAP_SHARED:
183 printf("MAP_SHARED ");
184 break;
185 default:
186 printf("[MAP_TYPE=0x%x] ", flags & MAP_TYPE);
187 break;
189 printf("fd=%d offset=%lx\n", fd, offset);
191 #endif
193 if (offset & ~TARGET_PAGE_MASK)
194 return -EINVAL;
196 len = TARGET_PAGE_ALIGN(len);
197 if (len == 0)
198 return start;
199 host_start = start & qemu_host_page_mask;
201 if (!(flags & MAP_FIXED)) {
202 #if defined(__alpha__) || defined(__sparc__) || defined(__x86_64__)
203 /* tell the kernel to search at the same place as i386 */
204 if (host_start == 0) {
205 host_start = last_start;
206 last_start += HOST_PAGE_ALIGN(len);
208 #endif
209 if (qemu_host_page_size != qemu_real_host_page_size) {
210 /* NOTE: this code is only for debugging with '-p' option */
211 /* reserve a memory area */
212 host_len = HOST_PAGE_ALIGN(len) + qemu_host_page_size - TARGET_PAGE_SIZE;
213 host_start = (long)mmap((void *)host_start, host_len, PROT_NONE,
214 MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
215 if (host_start == -1)
216 return host_start;
217 host_end = host_start + host_len;
218 start = HOST_PAGE_ALIGN(host_start);
219 end = start + HOST_PAGE_ALIGN(len);
220 if (start > host_start)
221 munmap((void *)host_start, start - host_start);
222 if (end < host_end)
223 munmap((void *)end, host_end - end);
224 /* use it as a fixed mapping */
225 flags |= MAP_FIXED;
226 } else {
227 /* if not fixed, no need to do anything */
228 host_offset = offset & qemu_host_page_mask;
229 host_len = len + offset - host_offset;
230 start = (long)mmap((void *)host_start, host_len,
231 prot, flags, fd, host_offset);
232 if (start == -1)
233 return start;
234 /* update start so that it points to the file position at 'offset' */
235 if (!(flags & MAP_ANONYMOUS))
236 start += offset - host_offset;
237 goto the_end1;
241 if (start & ~TARGET_PAGE_MASK)
242 return -EINVAL;
243 end = start + len;
244 host_end = HOST_PAGE_ALIGN(end);
246 /* worst case: we cannot map the file because the offset is not
247 aligned, so we read it */
248 if (!(flags & MAP_ANONYMOUS) &&
249 (offset & ~qemu_host_page_mask) != (start & ~qemu_host_page_mask)) {
250 /* msync() won't work here, so we return an error if write is
251 possible while it is a shared mapping */
252 #ifndef __APPLE__
253 if ((flags & MAP_TYPE) == MAP_SHARED &&
254 #else
255 if ((flags & MAP_SHARED) &&
256 #endif
257 (prot & PROT_WRITE))
258 return -EINVAL;
259 retaddr = target_mmap(start, len, prot | PROT_WRITE,
260 MAP_FIXED | MAP_PRIVATE | MAP_ANONYMOUS,
261 -1, 0);
262 if (retaddr == -1)
263 return retaddr;
264 pread(fd, (void *)start, len, offset);
265 if (!(prot & PROT_WRITE)) {
266 ret = target_mprotect(start, len, prot);
267 if (ret != 0)
268 return ret;
270 goto the_end;
273 /* handle the start of the mapping */
274 if (start > host_start) {
275 if (host_end == host_start + qemu_host_page_size) {
276 /* one single host page */
277 ret = mmap_frag(host_start, start, end,
278 prot, flags, fd, offset);
279 if (ret == -1)
280 return ret;
281 goto the_end1;
283 ret = mmap_frag(host_start, start, host_start + qemu_host_page_size,
284 prot, flags, fd, offset);
285 if (ret == -1)
286 return ret;
287 host_start += qemu_host_page_size;
289 /* handle the end of the mapping */
290 if (end < host_end) {
291 ret = mmap_frag(host_end - qemu_host_page_size,
292 host_end - qemu_host_page_size, host_end,
293 prot, flags, fd,
294 offset + host_end - qemu_host_page_size - start);
295 if (ret == -1)
296 return ret;
297 host_end -= qemu_host_page_size;
300 /* map the middle (easier) */
301 if (host_start < host_end) {
302 unsigned long offset1;
303 if (flags & MAP_ANONYMOUS)
304 offset1 = 0;
305 else
306 offset1 = offset + host_start - start;
307 ret = (long)mmap((void *)host_start, host_end - host_start,
308 prot, flags, fd, offset1);
309 if (ret == -1)
310 return ret;
312 the_end1:
313 page_set_flags(start, start + len, prot | PAGE_VALID);
314 the_end:
315 #ifdef DEBUG_MMAP
316 printf("target_mmap: ret=0x%lx\n", (long)start);
317 page_dump(stdout);
318 printf("\n");
319 #endif
320 return start;
323 int target_munmap(unsigned long start, unsigned long len)
325 unsigned long end, host_start, host_end, addr;
326 int prot, ret;
328 #ifdef DEBUG_MMAP
329 printf("munmap: start=0x%lx len=0x%lx\n", start, len);
330 #endif
331 if (start & ~TARGET_PAGE_MASK)
332 return -EINVAL;
333 len = TARGET_PAGE_ALIGN(len);
334 if (len == 0)
335 return -EINVAL;
336 end = start + len;
337 host_start = start & qemu_host_page_mask;
338 host_end = HOST_PAGE_ALIGN(end);
340 if (start > host_start) {
341 /* handle host page containing start */
342 prot = 0;
343 for(addr = host_start; addr < start; addr += TARGET_PAGE_SIZE) {
344 prot |= page_get_flags(addr);
346 if (host_end == host_start + qemu_host_page_size) {
347 for(addr = end; addr < host_end; addr += TARGET_PAGE_SIZE) {
348 prot |= page_get_flags(addr);
350 end = host_end;
352 if (prot != 0)
353 host_start += qemu_host_page_size;
355 if (end < host_end) {
356 prot = 0;
357 for(addr = end; addr < host_end; addr += TARGET_PAGE_SIZE) {
358 prot |= page_get_flags(addr);
360 if (prot != 0)
361 host_end -= qemu_host_page_size;
364 /* unmap what we can */
365 if (host_start < host_end) {
366 ret = munmap((void *)host_start, host_end - host_start);
367 if (ret != 0)
368 return ret;
371 page_set_flags(start, start + len, 0);
372 return 0;
375 /* XXX: currently, we only handle MAP_ANONYMOUS and not MAP_FIXED
376 blocks which have been allocated starting on a host page */
377 long target_mremap(unsigned long old_addr, unsigned long old_size,
378 unsigned long new_size, unsigned long flags,
379 unsigned long new_addr)
381 #ifndef __APPLE__
382 /* XXX: use 5 args syscall */
383 new_addr = (long)mremap((void *)old_addr, old_size, new_size, flags);
384 if (new_addr == -1)
385 return new_addr;
386 prot = page_get_flags(old_addr);
387 page_set_flags(old_addr, old_addr + old_size, 0);
388 page_set_flags(new_addr, new_addr + new_size, prot | PAGE_VALID);
389 return new_addr;
390 #else
391 qerror("target_mremap: unsupported\n");
392 #endif
396 int target_msync(unsigned long start, unsigned long len, int flags)
398 unsigned long end;
400 if (start & ~TARGET_PAGE_MASK)
401 return -EINVAL;
402 len = TARGET_PAGE_ALIGN(len);
403 end = start + len;
404 if (end < start)
405 return -EINVAL;
406 if (end == start)
407 return 0;
409 start &= qemu_host_page_mask;
410 return msync((void *)start, end - start, flags);