2 * Wine memory mappings support
4 * Copyright 2000, 2004 Alexandre Julliard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2.1 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA
22 #include "wine/port.h"
31 #include <sys/types.h>
32 #ifdef HAVE_SYS_WAIT_H
35 #ifdef HAVE_SYS_MMAN_H
45 #include "wine/library.h"
46 #include "wine/list.h"
57 static struct list reserved_areas
= LIST_INIT(reserved_areas
);
58 static const unsigned int granularity_mask
= 0xffff; /* reserved areas have 64k granularity */
61 #define MAP_NORESERVE 0
70 static inline int get_fdzero(void)
74 if (MAP_ANON
== 0 && fd
== -1)
76 if ((fd
= open( "/dev/zero", O_RDONLY
)) == -1)
78 perror( "/dev/zero: open" );
85 #if (defined(__svr4__) || defined(__NetBSD__)) && !defined(MAP_TRYFIXED)
86 /***********************************************************************
89 * The purpose of this routine is to emulate the behaviour of
90 * the Linux mmap() routine if a non-NULL address is passed,
91 * but the MAP_FIXED flag is not set. Linux in this case tries
92 * to place the mapping at the specified address, *unless* the
93 * range is already in use. Solaris, however, completely ignores
94 * the address argument in this case.
96 * As Wine code occasionally relies on the Linux behaviour, e.g. to
97 * be able to map non-relocatable PE executables to their proper
98 * start addresses, or to map the DOS memory to 0, this routine
99 * emulates the Linux behaviour by checking whether the desired
100 * address range is still available, and placing the mapping there
101 * using MAP_FIXED if so.
103 static int try_mmap_fixed (void *addr
, size_t len
, int prot
, int flags
,
104 int fildes
, off_t off
)
106 char * volatile result
= NULL
;
107 const size_t pagesize
= sysconf( _SC_PAGESIZE
);
110 /* We only try to map to a fixed address if
111 addr is non-NULL and properly aligned,
112 and MAP_FIXED isn't already specified. */
116 if ( (uintptr_t)addr
& (pagesize
-1) )
118 if ( flags
& MAP_FIXED
)
121 /* We use vfork() to freeze all threads of the
122 current process. This allows us to check without
123 race condition whether the desired memory range is
124 already in use. Note that because vfork() shares
125 the address spaces between parent and child, we
126 can actually perform the mapping in the child. */
128 if ( (pid
= vfork()) == -1 )
130 perror("try_mmap_fixed: vfork");
138 /* We call mincore() for every page in the desired range.
139 If any of these calls succeeds, the page is already
140 mapped and we must fail. */
141 for ( i
= 0; i
< len
; i
+= pagesize
)
142 if ( mincore( (caddr_t
)addr
+ i
, pagesize
, &vec
) != -1 )
145 /* Perform the mapping with MAP_FIXED set. This is safe
146 now, as none of the pages is currently in use. */
147 result
= mmap( addr
, len
, prot
, flags
| MAP_FIXED
, fildes
, off
);
148 if ( result
== addr
)
151 if ( result
!= (void *) -1 ) /* This should never happen ... */
152 munmap( result
, len
);
159 wret
= waitpid(pid
, NULL
, 0);
160 } while (wret
< 0 && errno
== EINTR
);
162 return result
== addr
;
165 #elif defined(__APPLE__)
167 #include <mach/mach_init.h>
168 #include <mach/vm_map.h>
171 * On Darwin, we can use the Mach call vm_allocate to allocate
172 * anonymous memory at the specified address, and then use mmap with
173 * MAP_FIXED to replace the mapping.
175 static int try_mmap_fixed (void *addr
, size_t len
, int prot
, int flags
,
176 int fildes
, off_t off
)
178 vm_address_t result
= (vm_address_t
)addr
;
180 if (!vm_allocate(mach_task_self(),&result
,len
,0))
182 if (mmap( (void *)result
, len
, prot
, flags
| MAP_FIXED
, fildes
, off
) != MAP_FAILED
)
184 vm_deallocate(mach_task_self(),result
,len
);
189 #endif /* (__svr4__ || __NetBSD__) && !MAP_TRYFIXED */
192 /***********************************************************************
195 * Portable wrapper for anonymous mmaps
197 void *wine_anon_mmap( void *start
, size_t size
, int prot
, int flags
)
200 flags
&= ~MAP_SHARED
;
203 /* Linux EINVAL's on us if we don't pass MAP_PRIVATE to an anon mmap */
204 flags
|= MAP_PRIVATE
| MAP_ANON
;
206 if (!(flags
& MAP_FIXED
))
208 #if defined(__FreeBSD__) || defined(__FreeBSD_kernel__) || defined(__DragonFly__)
209 /* Even FreeBSD 5.3 does not properly support NULL here. */
210 if( start
== NULL
) start
= (void *)0x110000;
214 /* If available, this will attempt a fixed mapping in-kernel */
215 flags
|= MAP_TRYFIXED
;
216 #elif defined(__svr4__) || defined(__NetBSD__) || defined(__APPLE__)
217 if ( try_mmap_fixed( start
, size
, prot
, flags
, get_fdzero(), 0 ) )
221 return mmap( start
, size
, prot
, flags
, get_fdzero(), 0 );
225 /***********************************************************************
228 * mmap wrapper used for reservations, only maps the specified address
230 static inline int mmap_reserve( void *addr
, size_t size
)
233 int flags
= MAP_PRIVATE
| MAP_ANON
| MAP_NORESERVE
;
236 flags
|= MAP_TRYFIXED
;
237 #elif defined(__APPLE__)
238 return try_mmap_fixed( addr
, size
, PROT_NONE
, flags
, get_fdzero(), 0 );
240 ptr
= mmap( addr
, size
, PROT_NONE
, flags
, get_fdzero(), 0 );
241 if (ptr
!= addr
&& ptr
!= (void *)-1) munmap( ptr
, size
);
242 return (ptr
== addr
);
246 /***********************************************************************
249 * Reserve as much memory as possible in the given area.
251 static inline void reserve_area( void *addr
, void *end
)
253 size_t size
= (char *)end
- (char *)addr
;
255 #if (defined(__svr4__) || defined(__NetBSD__)) && !defined(MAP_TRYFIXED)
256 /* try_mmap_fixed is inefficient when using vfork, so we need a different algorithm here */
257 /* we assume no other thread is running at this point */
258 size_t i
, pagesize
= sysconf( _SC_PAGESIZE
);
263 for (i
= 0; i
< size
; i
+= pagesize
)
264 if (mincore( (caddr_t
)addr
+ i
, pagesize
, &vec
) != -1) break;
266 i
&= ~granularity_mask
;
267 if (i
&& mmap( addr
, i
, PROT_NONE
, MAP_FIXED
| MAP_PRIVATE
| MAP_ANON
| MAP_NORESERVE
,
268 get_fdzero(), 0 ) != (void *)-1)
269 wine_mmap_add_reserved_area( addr
, i
);
271 i
+= granularity_mask
+ 1;
272 if ((char *)addr
+ i
< (char *)addr
) break; /* overflow */
273 addr
= (char *)addr
+ i
;
274 if (addr
>= end
) break;
275 size
= (char *)end
- (char *)addr
;
280 if (mmap_reserve( addr
, size
))
282 wine_mmap_add_reserved_area( addr
, size
);
285 size
= (size
/ 2) & ~granularity_mask
;
288 reserve_area( addr
, (char *)addr
+ size
);
289 reserve_area( (char *)addr
+ size
, end
);
296 /***********************************************************************
297 * reserve_malloc_space
299 * Solaris malloc is not smart enough to obtain space through mmap(), so try to make
300 * sure that there is some available sbrk() space before we reserve other things.
302 static inline void reserve_malloc_space( size_t size
)
305 size_t i
, count
= size
/ 1024;
306 void **ptrs
= malloc( count
* sizeof(ptrs
[0]) );
310 for (i
= 0; i
< count
; i
++) if (!(ptrs
[i
] = malloc( 1024 ))) break;
311 if (i
--) /* free everything except the last one */
312 while (i
) free( ptrs
[--i
] );
318 /***********************************************************************
321 * Reserve the DOS area (0x00000000-0x00110000).
323 static inline void reserve_dos_area(void)
325 const size_t first_page
= 0x1000;
326 const size_t dos_area_size
= 0x110000;
329 /* first page has to be handled specially */
330 ptr
= wine_anon_mmap( (void *)first_page
, dos_area_size
- first_page
, PROT_NONE
, MAP_NORESERVE
);
331 if (ptr
!= (void *)first_page
)
333 if (ptr
!= (void *)-1) munmap( ptr
, dos_area_size
- first_page
);
336 /* now add first page with MAP_FIXED */
337 wine_anon_mmap( NULL
, first_page
, PROT_NONE
, MAP_NORESERVE
|MAP_FIXED
);
338 wine_mmap_add_reserved_area( NULL
, dos_area_size
);
343 /***********************************************************************
349 struct reserved_area
*area
;
352 char * const stack_ptr
= &stack
;
353 char *user_space_limit
= (char *)0x7ffe0000;
355 reserve_malloc_space( 8 * 1024 * 1024 );
357 if (!list_head( &reserved_areas
))
359 /* if we don't have a preloader, try to reserve some space below 2Gb */
360 reserve_area( (void *)0x00110000, (void *)0x40000000 );
363 /* check for a reserved area starting at the user space limit */
364 /* to avoid wasting time trying to allocate it again */
365 LIST_FOR_EACH( ptr
, &reserved_areas
)
367 area
= LIST_ENTRY( ptr
, struct reserved_area
, entry
);
368 if ((char *)area
->base
> user_space_limit
) break;
369 if ((char *)area
->base
+ area
->size
> user_space_limit
)
371 user_space_limit
= (char *)area
->base
+ area
->size
;
376 if (stack_ptr
>= user_space_limit
)
379 char *base
= stack_ptr
- ((unsigned int)stack_ptr
& granularity_mask
) - (granularity_mask
+ 1);
380 if (base
> user_space_limit
) reserve_area( user_space_limit
, base
);
381 base
= stack_ptr
- ((unsigned int)stack_ptr
& granularity_mask
) + (granularity_mask
+ 1);
382 #if defined(linux) || defined(__FreeBSD__) || defined (__FreeBSD_kernel__) || defined(__DragonFly__)
383 /* Heuristic: assume the stack is near the end of the address */
384 /* space, this avoids a lot of futile allocation attempts */
385 end
= (char *)(((unsigned long)base
+ 0x0fffffff) & 0xf0000000);
387 reserve_area( base
, end
);
389 else reserve_area( user_space_limit
, 0 );
391 /* reserve the DOS area if not already done */
393 ptr
= list_head( &reserved_areas
);
396 area
= LIST_ENTRY( ptr
, struct reserved_area
, entry
);
397 if (!area
->base
) return; /* already reserved */
401 #elif defined(__x86_64__)
403 if (!list_head( &reserved_areas
))
405 /* if we don't have a preloader, try to reserve the space now */
406 reserve_area( (void *)0x000000010000, (void *)0x000068000000 );
407 reserve_area( (void *)0x00007ff00000, (void *)0x00007fff0000 );
408 reserve_area( (void *)0x7ffffe000000, (void *)0x7fffffff0000 );
415 /***********************************************************************
416 * wine_mmap_add_reserved_area
418 * Add an address range to the list of reserved areas.
419 * Caller must have made sure the range is not used by anything else.
421 * Note: the reserved areas functions are not reentrant, caller is
422 * responsible for proper locking.
424 void wine_mmap_add_reserved_area( void *addr
, size_t size
)
426 struct reserved_area
*area
;
429 if (!((char *)addr
+ size
)) size
--; /* avoid wrap-around */
431 LIST_FOR_EACH( ptr
, &reserved_areas
)
433 area
= LIST_ENTRY( ptr
, struct reserved_area
, entry
);
434 if (area
->base
> addr
)
436 /* try to merge with the next one */
437 if ((char *)addr
+ size
== (char *)area
->base
)
445 else if ((char *)area
->base
+ area
->size
== (char *)addr
)
447 /* merge with the previous one */
450 /* try to merge with the next one too */
451 if ((ptr
= list_next( &reserved_areas
, ptr
)))
453 struct reserved_area
*next
= LIST_ENTRY( ptr
, struct reserved_area
, entry
);
454 if ((char *)addr
+ size
== (char *)next
->base
)
456 area
->size
+= next
->size
;
457 list_remove( &next
->entry
);
465 if ((area
= malloc( sizeof(*area
) )))
469 list_add_before( ptr
, &area
->entry
);
474 /***********************************************************************
475 * wine_mmap_remove_reserved_area
477 * Remove an address range from the list of reserved areas.
478 * If 'unmap' is non-zero the range is unmapped too.
480 * Note: the reserved areas functions are not reentrant, caller is
481 * responsible for proper locking.
483 void wine_mmap_remove_reserved_area( void *addr
, size_t size
, int unmap
)
485 struct reserved_area
*area
;
488 if (!((char *)addr
+ size
)) size
--; /* avoid wrap-around */
490 ptr
= list_head( &reserved_areas
);
491 /* find the first area covering address */
494 area
= LIST_ENTRY( ptr
, struct reserved_area
, entry
);
495 if ((char *)area
->base
>= (char *)addr
+ size
) break; /* outside the range */
496 if ((char *)area
->base
+ area
->size
> (char *)addr
) /* overlaps range */
498 if (area
->base
>= addr
)
500 if ((char *)area
->base
+ area
->size
> (char *)addr
+ size
)
502 /* range overlaps beginning of area only -> shrink area */
503 if (unmap
) munmap( area
->base
, (char *)addr
+ size
- (char *)area
->base
);
504 area
->size
-= (char *)addr
+ size
- (char *)area
->base
;
505 area
->base
= (char *)addr
+ size
;
510 /* range contains the whole area -> remove area completely */
511 ptr
= list_next( &reserved_areas
, ptr
);
512 if (unmap
) munmap( area
->base
, area
->size
);
513 list_remove( &area
->entry
);
520 if ((char *)area
->base
+ area
->size
> (char *)addr
+ size
)
522 /* range is in the middle of area -> split area in two */
523 struct reserved_area
*new_area
= malloc( sizeof(*new_area
) );
526 new_area
->base
= (char *)addr
+ size
;
527 new_area
->size
= (char *)area
->base
+ area
->size
- (char *)new_area
->base
;
528 list_add_after( ptr
, &new_area
->entry
);
530 else size
= (char *)area
->base
+ area
->size
- (char *)addr
;
531 area
->size
= (char *)addr
- (char *)area
->base
;
532 if (unmap
) munmap( addr
, size
);
537 /* range overlaps end of area only -> shrink area */
538 if (unmap
) munmap( addr
, (char *)area
->base
+ area
->size
- (char *)addr
);
539 area
->size
= (char *)addr
- (char *)area
->base
;
543 ptr
= list_next( &reserved_areas
, ptr
);
548 /***********************************************************************
549 * wine_mmap_is_in_reserved_area
551 * Check if the specified range is included in a reserved area.
552 * Returns 1 if range is fully included, 0 if range is not included
553 * at all, and -1 if it is only partially included.
555 * Note: the reserved areas functions are not reentrant, caller is
556 * responsible for proper locking.
558 int wine_mmap_is_in_reserved_area( void *addr
, size_t size
)
560 struct reserved_area
*area
;
563 LIST_FOR_EACH( ptr
, &reserved_areas
)
565 area
= LIST_ENTRY( ptr
, struct reserved_area
, entry
);
566 if (area
->base
> addr
) break;
567 if ((char *)area
->base
+ area
->size
<= (char *)addr
) continue;
568 /* area must contain block completely */
569 if ((char *)area
->base
+ area
->size
< (char *)addr
+ size
) return -1;
576 /***********************************************************************
577 * wine_mmap_enum_reserved_areas
579 * Enumerate the list of reserved areas, sorted by addresses.
580 * If enum_func returns a non-zero value, enumeration is stopped and the value is returned.
582 * Note: the reserved areas functions are not reentrant, caller is
583 * responsible for proper locking.
585 int wine_mmap_enum_reserved_areas( int (*enum_func
)(void *base
, size_t size
, void *arg
), void *arg
,
593 for (ptr
= reserved_areas
.prev
; ptr
!= &reserved_areas
; ptr
= ptr
->prev
)
595 struct reserved_area
*area
= LIST_ENTRY( ptr
, struct reserved_area
, entry
);
596 if ((ret
= enum_func( area
->base
, area
->size
, arg
))) break;
601 for (ptr
= reserved_areas
.next
; ptr
!= &reserved_areas
; ptr
= ptr
->next
)
603 struct reserved_area
*area
= LIST_ENTRY( ptr
, struct reserved_area
, entry
);
604 if ((ret
= enum_func( area
->base
, area
->size
, arg
))) break;
610 #else /* HAVE_MMAP */