msvcp100: Add stub dll.
[wine.git] / libs / wine / mmap.c
blob0e02d16f84af2af39c8f98d82de913a96146583e
1 /*
2 * Wine memory mappings support
4 * Copyright 2000, 2004 Alexandre Julliard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2.1 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA
21 #include "config.h"
22 #include "wine/port.h"
24 #include <assert.h>
25 #include <ctype.h>
26 #include <fcntl.h>
27 #include <stdlib.h>
28 #include <stdio.h>
29 #include <string.h>
30 #include <sys/types.h>
31 #ifdef HAVE_SYS_MMAN_H
32 #include <sys/mman.h>
33 #endif
34 #ifdef HAVE_UNISTD_H
35 # include <unistd.h>
36 #endif
37 #ifdef HAVE_STDINT_H
38 # include <stdint.h>
39 #endif
41 #include "wine/library.h"
42 #include "wine/list.h"
44 #ifdef HAVE_MMAP
46 struct reserved_area
48 struct list entry;
49 void *base;
50 size_t size;
53 static struct list reserved_areas = LIST_INIT(reserved_areas);
54 static const unsigned int granularity_mask = 0xffff; /* reserved areas have 64k granularity */
56 #ifndef MAP_NORESERVE
57 #define MAP_NORESERVE 0
58 #endif
59 #ifndef MAP_PRIVATE
60 #define MAP_PRIVATE 0
61 #endif
62 #ifndef MAP_ANON
63 #define MAP_ANON 0
64 #endif
66 static inline int get_fdzero(void)
68 static int fd = -1;
70 if (MAP_ANON == 0 && fd == -1)
72 if ((fd = open( "/dev/zero", O_RDONLY )) == -1)
74 perror( "/dev/zero: open" );
75 exit(1);
78 return fd;
81 #if (defined(__svr4__) || defined(__NetBSD__)) && !defined(MAP_TRYFIXED)
82 /***********************************************************************
83 * try_mmap_fixed
85 * The purpose of this routine is to emulate the behaviour of
86 * the Linux mmap() routine if a non-NULL address is passed,
87 * but the MAP_FIXED flag is not set. Linux in this case tries
88 * to place the mapping at the specified address, *unless* the
89 * range is already in use. Solaris, however, completely ignores
90 * the address argument in this case.
92 * As Wine code occasionally relies on the Linux behaviour, e.g. to
93 * be able to map non-relocatable PE executables to their proper
94 * start addresses, or to map the DOS memory to 0, this routine
95 * emulates the Linux behaviour by checking whether the desired
96 * address range is still available, and placing the mapping there
97 * using MAP_FIXED if so.
99 static int try_mmap_fixed (void *addr, size_t len, int prot, int flags,
100 int fildes, off_t off)
102 char * volatile result = NULL;
103 int pagesize = getpagesize();
104 pid_t pid;
106 /* We only try to map to a fixed address if
107 addr is non-NULL and properly aligned,
108 and MAP_FIXED isn't already specified. */
110 if ( !addr )
111 return 0;
112 if ( (uintptr_t)addr & (pagesize-1) )
113 return 0;
114 if ( flags & MAP_FIXED )
115 return 0;
117 /* We use vfork() to freeze all threads of the
118 current process. This allows us to check without
119 race condition whether the desired memory range is
120 already in use. Note that because vfork() shares
121 the address spaces between parent and child, we
122 can actually perform the mapping in the child. */
124 if ( (pid = vfork()) == -1 )
126 perror("try_mmap_fixed: vfork");
127 exit(1);
129 if ( pid == 0 )
131 int i;
132 char vec;
134 /* We call mincore() for every page in the desired range.
135 If any of these calls succeeds, the page is already
136 mapped and we must fail. */
137 for ( i = 0; i < len; i += pagesize )
138 if ( mincore( (caddr_t)addr + i, pagesize, &vec ) != -1 )
139 _exit(1);
141 /* Perform the mapping with MAP_FIXED set. This is safe
142 now, as none of the pages is currently in use. */
143 result = mmap( addr, len, prot, flags | MAP_FIXED, fildes, off );
144 if ( result == addr )
145 _exit(0);
147 if ( result != (void *) -1 ) /* This should never happen ... */
148 munmap( result, len );
150 _exit(1);
153 /* vfork() lets the parent continue only after the child
154 has exited. Furthermore, Wine sets SIGCHLD to SIG_IGN,
155 so we don't need to wait for the child. */
157 return result == addr;
160 #elif defined(__APPLE__)
162 #include <mach/mach_init.h>
163 #include <mach/vm_map.h>
166 * On Darwin, we can use the Mach call vm_allocate to allocate
167 * anonymous memory at the specified address, and then use mmap with
168 * MAP_FIXED to replace the mapping.
170 static int try_mmap_fixed (void *addr, size_t len, int prot, int flags,
171 int fildes, off_t off)
173 vm_address_t result = (vm_address_t)addr;
175 if (!vm_allocate(mach_task_self(),&result,len,0))
177 if (mmap( (void *)result, len, prot, flags | MAP_FIXED, fildes, off ) != MAP_FAILED)
178 return 1;
179 vm_deallocate(mach_task_self(),result,len);
181 return 0;
184 #endif /* (__svr4__ || __NetBSD__) && !MAP_TRYFIXED */
187 /***********************************************************************
188 * wine_anon_mmap
190 * Portable wrapper for anonymous mmaps
192 void *wine_anon_mmap( void *start, size_t size, int prot, int flags )
194 #ifdef MAP_SHARED
195 flags &= ~MAP_SHARED;
196 #endif
198 /* Linux EINVAL's on us if we don't pass MAP_PRIVATE to an anon mmap */
199 flags |= MAP_PRIVATE | MAP_ANON;
201 if (!(flags & MAP_FIXED))
203 #if defined(__FreeBSD__) || defined(__FreeBSD_kernel__)
204 /* Even FreeBSD 5.3 does not properly support NULL here. */
205 if( start == NULL ) start = (void *)0x110000;
206 #endif
208 #ifdef MAP_TRYFIXED
209 /* If available, this will attempt a fixed mapping in-kernel */
210 flags |= MAP_TRYFIXED;
211 #elif defined(__svr4__) || defined(__NetBSD__) || defined(__APPLE__)
212 if ( try_mmap_fixed( start, size, prot, flags, get_fdzero(), 0 ) )
213 return start;
214 #endif
216 return mmap( start, size, prot, flags, get_fdzero(), 0 );
220 /***********************************************************************
221 * mmap_reserve
223 * mmap wrapper used for reservations, only maps the specified address
225 static inline int mmap_reserve( void *addr, size_t size )
227 void *ptr;
228 int flags = MAP_PRIVATE | MAP_ANON | MAP_NORESERVE;
230 #ifdef MAP_TRYFIXED
231 flags |= MAP_TRYFIXED;
232 #elif defined(__APPLE__)
233 return try_mmap_fixed( addr, size, PROT_NONE, flags, get_fdzero(), 0 );
234 #endif
235 ptr = mmap( addr, size, PROT_NONE, flags, get_fdzero(), 0 );
236 if (ptr != addr && ptr != (void *)-1) munmap( ptr, size );
237 return (ptr == addr);
241 /***********************************************************************
242 * reserve_area
244 * Reserve as much memory as possible in the given area.
246 static inline void reserve_area( void *addr, void *end )
248 size_t size = (char *)end - (char *)addr;
250 #if (defined(__svr4__) || defined(__NetBSD__)) && !defined(MAP_TRYFIXED)
251 /* try_mmap_fixed is inefficient when using vfork, so we need a different algorithm here */
252 /* we assume no other thread is running at this point */
253 size_t i, pagesize = getpagesize();
254 char vec;
256 while (size)
258 for (i = 0; i < size; i += pagesize)
259 if (mincore( (caddr_t)addr + i, pagesize, &vec ) != -1) break;
261 i &= ~granularity_mask;
262 if (i && mmap( addr, i, PROT_NONE, MAP_FIXED | MAP_PRIVATE | MAP_ANON | MAP_NORESERVE,
263 get_fdzero(), 0 ) != (void *)-1)
264 wine_mmap_add_reserved_area( addr, i );
266 i += granularity_mask + 1;
267 if ((char *)addr + i < (char *)addr) break; /* overflow */
268 addr = (char *)addr + i;
269 if (addr >= end) break;
270 size = (char *)end - (char *)addr;
272 #else
273 if (!size) return;
275 if (mmap_reserve( addr, size ))
277 wine_mmap_add_reserved_area( addr, size );
278 return;
280 if (size > granularity_mask + 1)
282 size_t new_size = (size / 2) & ~granularity_mask;
283 reserve_area( addr, (char *)addr + new_size );
284 reserve_area( (char *)addr + new_size, end );
286 #endif
290 /***********************************************************************
291 * reserve_malloc_space
293 * Solaris malloc is not smart enough to obtain space through mmap(), so try to make
294 * sure that there is some available sbrk() space before we reserve other things.
296 static inline void reserve_malloc_space( size_t size )
298 #ifdef __sun
299 size_t i, count = size / 1024;
300 void **ptrs = malloc( count * sizeof(ptrs[0]) );
302 if (!ptrs) return;
304 for (i = 0; i < count; i++) if (!(ptrs[i] = malloc( 1024 ))) break;
305 if (i--) /* free everything except the last one */
306 while (i) free( ptrs[--i] );
307 free( ptrs );
308 #endif
312 /***********************************************************************
313 * reserve_dos_area
315 * Reserve the DOS area (0x00000000-0x00110000).
317 static inline void reserve_dos_area(void)
319 const size_t page_size = getpagesize();
320 const size_t dos_area_size = 0x110000;
321 void *ptr;
323 /* first page has to be handled specially */
324 ptr = wine_anon_mmap( (void *)page_size, dos_area_size - page_size, PROT_NONE, MAP_NORESERVE );
325 if (ptr != (void *)page_size)
327 if (ptr != (void *)-1) munmap( ptr, dos_area_size - page_size );
328 return;
330 /* now add first page with MAP_FIXED */
331 wine_anon_mmap( NULL, page_size, PROT_NONE, MAP_NORESERVE|MAP_FIXED );
332 wine_mmap_add_reserved_area( NULL, dos_area_size );
336 /***********************************************************************
337 * mmap_init
339 void mmap_init(void)
341 #ifdef __i386__
342 struct reserved_area *area;
343 struct list *ptr;
344 char stack;
345 char * const stack_ptr = &stack;
346 char *user_space_limit = (char *)0x7ffe0000;
348 reserve_malloc_space( 8 * 1024 * 1024 );
350 if (!list_head( &reserved_areas ))
352 /* if we don't have a preloader, try to reserve some space below 2Gb */
353 reserve_area( (void *)0x00110000, (void *)0x40000000 );
356 /* check for a reserved area starting at the user space limit */
357 /* to avoid wasting time trying to allocate it again */
358 LIST_FOR_EACH( ptr, &reserved_areas )
360 area = LIST_ENTRY( ptr, struct reserved_area, entry );
361 if ((char *)area->base > user_space_limit) break;
362 if ((char *)area->base + area->size > user_space_limit)
364 user_space_limit = (char *)area->base + area->size;
365 break;
369 if (stack_ptr >= user_space_limit)
371 char *end = 0;
372 char *base = stack_ptr - ((unsigned int)stack_ptr & granularity_mask) - (granularity_mask + 1);
373 if (base > user_space_limit) reserve_area( user_space_limit, base );
374 base = stack_ptr - ((unsigned int)stack_ptr & granularity_mask) + (granularity_mask + 1);
375 #if defined(linux) || defined(__FreeBSD__) || defined (__FreeBSD_kernel__)
376 /* Heuristic: assume the stack is near the end of the address */
377 /* space, this avoids a lot of futile allocation attempts */
378 end = (char *)(((unsigned long)base + 0x0fffffff) & 0xf0000000);
379 #endif
380 reserve_area( base, end );
382 else reserve_area( user_space_limit, 0 );
384 /* reserve the DOS area if not already done */
386 ptr = list_head( &reserved_areas );
387 if (ptr)
389 area = LIST_ENTRY( ptr, struct reserved_area, entry );
390 if (!area->base) return; /* already reserved */
392 reserve_dos_area();
394 #elif defined(__x86_64__)
396 if (!list_head( &reserved_areas ))
398 /* if we don't have a preloader, try to reserve the space now */
399 reserve_area( (void *)0x000000010000, (void *)0x000068000000 );
400 reserve_area( (void *)0x00007ff00000, (void *)0x00007fff0000 );
401 reserve_area( (void *)0x7ffffe000000, (void *)0x7fffffff0000 );
404 #endif
408 /***********************************************************************
409 * wine_mmap_add_reserved_area
411 * Add an address range to the list of reserved areas.
412 * Caller must have made sure the range is not used by anything else.
414 * Note: the reserved areas functions are not reentrant, caller is
415 * responsible for proper locking.
417 void wine_mmap_add_reserved_area( void *addr, size_t size )
419 struct reserved_area *area;
420 struct list *ptr;
422 if (!((char *)addr + size)) size--; /* avoid wrap-around */
424 LIST_FOR_EACH( ptr, &reserved_areas )
426 area = LIST_ENTRY( ptr, struct reserved_area, entry );
427 if (area->base > addr)
429 /* try to merge with the next one */
430 if ((char *)addr + size == (char *)area->base)
432 area->base = addr;
433 area->size += size;
434 return;
436 break;
438 else if ((char *)area->base + area->size == (char *)addr)
440 /* merge with the previous one */
441 area->size += size;
443 /* try to merge with the next one too */
444 if ((ptr = list_next( &reserved_areas, ptr )))
446 struct reserved_area *next = LIST_ENTRY( ptr, struct reserved_area, entry );
447 if ((char *)addr + size == (char *)next->base)
449 area->size += next->size;
450 list_remove( &next->entry );
451 free( next );
454 return;
458 if ((area = malloc( sizeof(*area) )))
460 area->base = addr;
461 area->size = size;
462 list_add_before( ptr, &area->entry );
467 /***********************************************************************
468 * wine_mmap_remove_reserved_area
470 * Remove an address range from the list of reserved areas.
471 * If 'unmap' is non-zero the range is unmapped too.
473 * Note: the reserved areas functions are not reentrant, caller is
474 * responsible for proper locking.
476 void wine_mmap_remove_reserved_area( void *addr, size_t size, int unmap )
478 struct reserved_area *area;
479 struct list *ptr;
481 if (!((char *)addr + size)) size--; /* avoid wrap-around */
483 ptr = list_head( &reserved_areas );
484 /* find the first area covering address */
485 while (ptr)
487 area = LIST_ENTRY( ptr, struct reserved_area, entry );
488 if ((char *)area->base >= (char *)addr + size) break; /* outside the range */
489 if ((char *)area->base + area->size > (char *)addr) /* overlaps range */
491 if (area->base >= addr)
493 if ((char *)area->base + area->size > (char *)addr + size)
495 /* range overlaps beginning of area only -> shrink area */
496 if (unmap) munmap( area->base, (char *)addr + size - (char *)area->base );
497 area->size -= (char *)addr + size - (char *)area->base;
498 area->base = (char *)addr + size;
499 break;
501 else
503 /* range contains the whole area -> remove area completely */
504 ptr = list_next( &reserved_areas, ptr );
505 if (unmap) munmap( area->base, area->size );
506 list_remove( &area->entry );
507 free( area );
508 continue;
511 else
513 if ((char *)area->base + area->size > (char *)addr + size)
515 /* range is in the middle of area -> split area in two */
516 struct reserved_area *new_area = malloc( sizeof(*new_area) );
517 if (new_area)
519 new_area->base = (char *)addr + size;
520 new_area->size = (char *)area->base + area->size - (char *)new_area->base;
521 list_add_after( ptr, &new_area->entry );
523 else size = (char *)area->base + area->size - (char *)addr;
524 area->size = (char *)addr - (char *)area->base;
525 if (unmap) munmap( addr, size );
526 break;
528 else
530 /* range overlaps end of area only -> shrink area */
531 if (unmap) munmap( addr, (char *)area->base + area->size - (char *)addr );
532 area->size = (char *)addr - (char *)area->base;
536 ptr = list_next( &reserved_areas, ptr );
541 /***********************************************************************
542 * wine_mmap_is_in_reserved_area
544 * Check if the specified range is included in a reserved area.
545 * Returns 1 if range is fully included, 0 if range is not included
546 * at all, and -1 if it is only partially included.
548 * Note: the reserved areas functions are not reentrant, caller is
549 * responsible for proper locking.
551 int wine_mmap_is_in_reserved_area( void *addr, size_t size )
553 struct reserved_area *area;
554 struct list *ptr;
556 LIST_FOR_EACH( ptr, &reserved_areas )
558 area = LIST_ENTRY( ptr, struct reserved_area, entry );
559 if (area->base > addr) break;
560 if ((char *)area->base + area->size <= (char *)addr) continue;
561 /* area must contain block completely */
562 if ((char *)area->base + area->size < (char *)addr + size) return -1;
563 return 1;
565 return 0;
569 /***********************************************************************
570 * wine_mmap_enum_reserved_areas
572 * Enumerate the list of reserved areas, sorted by addresses.
573 * If enum_func returns a non-zero value, enumeration is stopped and the value is returned.
575 * Note: the reserved areas functions are not reentrant, caller is
576 * responsible for proper locking.
578 int wine_mmap_enum_reserved_areas( int (*enum_func)(void *base, size_t size, void *arg), void *arg,
579 int top_down )
581 int ret = 0;
582 struct list *ptr;
584 if (top_down)
586 for (ptr = reserved_areas.prev; ptr != &reserved_areas; ptr = ptr->prev)
588 struct reserved_area *area = LIST_ENTRY( ptr, struct reserved_area, entry );
589 if ((ret = enum_func( area->base, area->size, arg ))) break;
592 else
594 for (ptr = reserved_areas.next; ptr != &reserved_areas; ptr = ptr->next)
596 struct reserved_area *area = LIST_ENTRY( ptr, struct reserved_area, entry );
597 if ((ret = enum_func( area->base, area->size, arg ))) break;
600 return ret;
603 #else /* HAVE_MMAP */
605 void mmap_init(void)
609 #endif