2 * Wine memory mappings support
4 * Copyright 2000, 2004 Alexandre Julliard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2.1 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA
22 #include "wine/port.h"
31 #include <sys/types.h>
32 #ifdef HAVE_SYS_WAIT_H
35 #ifdef HAVE_SYS_MMAN_H
45 #include "wine/library.h"
46 #include "wine/list.h"
57 static struct list reserved_areas
= LIST_INIT(reserved_areas
);
58 static const unsigned int granularity_mask
= 0xffff; /* reserved areas have 64k granularity */
61 #define MAP_NORESERVE 0
70 static inline int get_fdzero(void)
74 if (MAP_ANON
== 0 && fd
== -1)
76 if ((fd
= open( "/dev/zero", O_RDONLY
)) == -1)
78 perror( "/dev/zero: open" );
85 #if (defined(__svr4__) || defined(__NetBSD__)) && !defined(MAP_TRYFIXED)
86 /***********************************************************************
89 * The purpose of this routine is to emulate the behaviour of
90 * the Linux mmap() routine if a non-NULL address is passed,
91 * but the MAP_FIXED flag is not set. Linux in this case tries
92 * to place the mapping at the specified address, *unless* the
93 * range is already in use. Solaris, however, completely ignores
94 * the address argument in this case.
96 * As Wine code occasionally relies on the Linux behaviour, e.g. to
97 * be able to map non-relocatable PE executables to their proper
98 * start addresses, or to map the DOS memory to 0, this routine
99 * emulates the Linux behaviour by checking whether the desired
100 * address range is still available, and placing the mapping there
101 * using MAP_FIXED if so.
103 static int try_mmap_fixed (void *addr
, size_t len
, int prot
, int flags
,
104 int fildes
, off_t off
)
106 char * volatile result
= NULL
;
107 int pagesize
= getpagesize();
110 /* We only try to map to a fixed address if
111 addr is non-NULL and properly aligned,
112 and MAP_FIXED isn't already specified. */
116 if ( (uintptr_t)addr
& (pagesize
-1) )
118 if ( flags
& MAP_FIXED
)
121 /* We use vfork() to freeze all threads of the
122 current process. This allows us to check without
123 race condition whether the desired memory range is
124 already in use. Note that because vfork() shares
125 the address spaces between parent and child, we
126 can actually perform the mapping in the child. */
128 if ( (pid
= vfork()) == -1 )
130 perror("try_mmap_fixed: vfork");
138 /* We call mincore() for every page in the desired range.
139 If any of these calls succeeds, the page is already
140 mapped and we must fail. */
141 for ( i
= 0; i
< len
; i
+= pagesize
)
142 if ( mincore( (caddr_t
)addr
+ i
, pagesize
, &vec
) != -1 )
145 /* Perform the mapping with MAP_FIXED set. This is safe
146 now, as none of the pages is currently in use. */
147 result
= mmap( addr
, len
, prot
, flags
| MAP_FIXED
, fildes
, off
);
148 if ( result
== addr
)
151 if ( result
!= (void *) -1 ) /* This should never happen ... */
152 munmap( result
, len
);
159 wret
= waitpid(pid
, NULL
, 0);
160 } while (wret
< 0 && errno
== EINTR
);
162 return result
== addr
;
165 #elif defined(__APPLE__)
167 #include <mach/mach_init.h>
168 #include <mach/vm_map.h>
171 * On Darwin, we can use the Mach call vm_allocate to allocate
172 * anonymous memory at the specified address, and then use mmap with
173 * MAP_FIXED to replace the mapping.
175 static int try_mmap_fixed (void *addr
, size_t len
, int prot
, int flags
,
176 int fildes
, off_t off
)
178 vm_address_t result
= (vm_address_t
)addr
;
180 if (!vm_allocate(mach_task_self(),&result
,len
,0))
182 if (mmap( (void *)result
, len
, prot
, flags
| MAP_FIXED
, fildes
, off
) != MAP_FAILED
)
184 vm_deallocate(mach_task_self(),result
,len
);
189 #endif /* (__svr4__ || __NetBSD__) && !MAP_TRYFIXED */
192 /***********************************************************************
195 * Portable wrapper for anonymous mmaps
197 void *wine_anon_mmap( void *start
, size_t size
, int prot
, int flags
)
200 flags
&= ~MAP_SHARED
;
203 /* Linux EINVAL's on us if we don't pass MAP_PRIVATE to an anon mmap */
204 flags
|= MAP_PRIVATE
| MAP_ANON
;
206 if (!(flags
& MAP_FIXED
))
208 #if defined(__FreeBSD__) || defined(__FreeBSD_kernel__)
209 /* Even FreeBSD 5.3 does not properly support NULL here. */
210 if( start
== NULL
) start
= (void *)0x110000;
214 /* If available, this will attempt a fixed mapping in-kernel */
215 flags
|= MAP_TRYFIXED
;
216 #elif defined(__svr4__) || defined(__NetBSD__) || defined(__APPLE__)
217 if ( try_mmap_fixed( start
, size
, prot
, flags
, get_fdzero(), 0 ) )
221 return mmap( start
, size
, prot
, flags
, get_fdzero(), 0 );
225 /***********************************************************************
228 * mmap wrapper used for reservations, only maps the specified address
230 static inline int mmap_reserve( void *addr
, size_t size
)
233 int flags
= MAP_PRIVATE
| MAP_ANON
| MAP_NORESERVE
;
236 flags
|= MAP_TRYFIXED
;
237 #elif defined(__APPLE__)
238 return try_mmap_fixed( addr
, size
, PROT_NONE
, flags
, get_fdzero(), 0 );
240 ptr
= mmap( addr
, size
, PROT_NONE
, flags
, get_fdzero(), 0 );
241 if (ptr
!= addr
&& ptr
!= (void *)-1) munmap( ptr
, size
);
242 return (ptr
== addr
);
246 /***********************************************************************
249 * Reserve as much memory as possible in the given area.
251 static inline void reserve_area( void *addr
, void *end
)
253 size_t size
= (char *)end
- (char *)addr
;
255 #if (defined(__svr4__) || defined(__NetBSD__)) && !defined(MAP_TRYFIXED)
256 /* try_mmap_fixed is inefficient when using vfork, so we need a different algorithm here */
257 /* we assume no other thread is running at this point */
258 size_t i
, pagesize
= getpagesize();
263 for (i
= 0; i
< size
; i
+= pagesize
)
264 if (mincore( (caddr_t
)addr
+ i
, pagesize
, &vec
) != -1) break;
266 i
&= ~granularity_mask
;
267 if (i
&& mmap( addr
, i
, PROT_NONE
, MAP_FIXED
| MAP_PRIVATE
| MAP_ANON
| MAP_NORESERVE
,
268 get_fdzero(), 0 ) != (void *)-1)
269 wine_mmap_add_reserved_area( addr
, i
);
271 i
+= granularity_mask
+ 1;
272 if ((char *)addr
+ i
< (char *)addr
) break; /* overflow */
273 addr
= (char *)addr
+ i
;
274 if (addr
>= end
) break;
275 size
= (char *)end
- (char *)addr
;
280 if (mmap_reserve( addr
, size
))
282 wine_mmap_add_reserved_area( addr
, size
);
285 size
= (size
/ 2) & ~granularity_mask
;
288 reserve_area( addr
, (char *)addr
+ size
);
289 reserve_area( (char *)addr
+ size
, end
);
295 /***********************************************************************
296 * reserve_malloc_space
298 * Solaris malloc is not smart enough to obtain space through mmap(), so try to make
299 * sure that there is some available sbrk() space before we reserve other things.
301 static inline void reserve_malloc_space( size_t size
)
304 size_t i
, count
= size
/ 1024;
305 void **ptrs
= malloc( count
* sizeof(ptrs
[0]) );
309 for (i
= 0; i
< count
; i
++) if (!(ptrs
[i
] = malloc( 1024 ))) break;
310 if (i
--) /* free everything except the last one */
311 while (i
) free( ptrs
[--i
] );
317 /***********************************************************************
320 * Reserve the DOS area (0x00000000-0x00110000).
322 static inline void reserve_dos_area(void)
324 const size_t page_size
= getpagesize();
325 const size_t dos_area_size
= 0x110000;
328 /* first page has to be handled specially */
329 ptr
= wine_anon_mmap( (void *)page_size
, dos_area_size
- page_size
, PROT_NONE
, MAP_NORESERVE
);
330 if (ptr
!= (void *)page_size
)
332 if (ptr
!= (void *)-1) munmap( ptr
, dos_area_size
- page_size
);
335 /* now add first page with MAP_FIXED */
336 wine_anon_mmap( NULL
, page_size
, PROT_NONE
, MAP_NORESERVE
|MAP_FIXED
);
337 wine_mmap_add_reserved_area( NULL
, dos_area_size
);
341 /***********************************************************************
347 struct reserved_area
*area
;
350 char * const stack_ptr
= &stack
;
351 char *user_space_limit
= (char *)0x7ffe0000;
353 reserve_malloc_space( 8 * 1024 * 1024 );
355 if (!list_head( &reserved_areas
))
357 /* if we don't have a preloader, try to reserve some space below 2Gb */
358 reserve_area( (void *)0x00110000, (void *)0x40000000 );
361 /* check for a reserved area starting at the user space limit */
362 /* to avoid wasting time trying to allocate it again */
363 LIST_FOR_EACH( ptr
, &reserved_areas
)
365 area
= LIST_ENTRY( ptr
, struct reserved_area
, entry
);
366 if ((char *)area
->base
> user_space_limit
) break;
367 if ((char *)area
->base
+ area
->size
> user_space_limit
)
369 user_space_limit
= (char *)area
->base
+ area
->size
;
374 if (stack_ptr
>= user_space_limit
)
377 char *base
= stack_ptr
- ((unsigned int)stack_ptr
& granularity_mask
) - (granularity_mask
+ 1);
378 if (base
> user_space_limit
) reserve_area( user_space_limit
, base
);
379 base
= stack_ptr
- ((unsigned int)stack_ptr
& granularity_mask
) + (granularity_mask
+ 1);
380 #if defined(linux) || defined(__FreeBSD__) || defined (__FreeBSD_kernel__)
381 /* Heuristic: assume the stack is near the end of the address */
382 /* space, this avoids a lot of futile allocation attempts */
383 end
= (char *)(((unsigned long)base
+ 0x0fffffff) & 0xf0000000);
385 reserve_area( base
, end
);
387 else reserve_area( user_space_limit
, 0 );
389 /* reserve the DOS area if not already done */
391 ptr
= list_head( &reserved_areas
);
394 area
= LIST_ENTRY( ptr
, struct reserved_area
, entry
);
395 if (!area
->base
) return; /* already reserved */
399 #elif defined(__x86_64__)
401 if (!list_head( &reserved_areas
))
403 /* if we don't have a preloader, try to reserve the space now */
404 reserve_area( (void *)0x000000010000, (void *)0x000068000000 );
405 reserve_area( (void *)0x00007ff00000, (void *)0x00007fff0000 );
406 reserve_area( (void *)0x7ffffe000000, (void *)0x7fffffff0000 );
413 /***********************************************************************
414 * wine_mmap_add_reserved_area
416 * Add an address range to the list of reserved areas.
417 * Caller must have made sure the range is not used by anything else.
419 * Note: the reserved areas functions are not reentrant, caller is
420 * responsible for proper locking.
422 void wine_mmap_add_reserved_area( void *addr
, size_t size
)
424 struct reserved_area
*area
;
427 if (!((char *)addr
+ size
)) size
--; /* avoid wrap-around */
429 LIST_FOR_EACH( ptr
, &reserved_areas
)
431 area
= LIST_ENTRY( ptr
, struct reserved_area
, entry
);
432 if (area
->base
> addr
)
434 /* try to merge with the next one */
435 if ((char *)addr
+ size
== (char *)area
->base
)
443 else if ((char *)area
->base
+ area
->size
== (char *)addr
)
445 /* merge with the previous one */
448 /* try to merge with the next one too */
449 if ((ptr
= list_next( &reserved_areas
, ptr
)))
451 struct reserved_area
*next
= LIST_ENTRY( ptr
, struct reserved_area
, entry
);
452 if ((char *)addr
+ size
== (char *)next
->base
)
454 area
->size
+= next
->size
;
455 list_remove( &next
->entry
);
463 if ((area
= malloc( sizeof(*area
) )))
467 list_add_before( ptr
, &area
->entry
);
472 /***********************************************************************
473 * wine_mmap_remove_reserved_area
475 * Remove an address range from the list of reserved areas.
476 * If 'unmap' is non-zero the range is unmapped too.
478 * Note: the reserved areas functions are not reentrant, caller is
479 * responsible for proper locking.
481 void wine_mmap_remove_reserved_area( void *addr
, size_t size
, int unmap
)
483 struct reserved_area
*area
;
486 if (!((char *)addr
+ size
)) size
--; /* avoid wrap-around */
488 ptr
= list_head( &reserved_areas
);
489 /* find the first area covering address */
492 area
= LIST_ENTRY( ptr
, struct reserved_area
, entry
);
493 if ((char *)area
->base
>= (char *)addr
+ size
) break; /* outside the range */
494 if ((char *)area
->base
+ area
->size
> (char *)addr
) /* overlaps range */
496 if (area
->base
>= addr
)
498 if ((char *)area
->base
+ area
->size
> (char *)addr
+ size
)
500 /* range overlaps beginning of area only -> shrink area */
501 if (unmap
) munmap( area
->base
, (char *)addr
+ size
- (char *)area
->base
);
502 area
->size
-= (char *)addr
+ size
- (char *)area
->base
;
503 area
->base
= (char *)addr
+ size
;
508 /* range contains the whole area -> remove area completely */
509 ptr
= list_next( &reserved_areas
, ptr
);
510 if (unmap
) munmap( area
->base
, area
->size
);
511 list_remove( &area
->entry
);
518 if ((char *)area
->base
+ area
->size
> (char *)addr
+ size
)
520 /* range is in the middle of area -> split area in two */
521 struct reserved_area
*new_area
= malloc( sizeof(*new_area
) );
524 new_area
->base
= (char *)addr
+ size
;
525 new_area
->size
= (char *)area
->base
+ area
->size
- (char *)new_area
->base
;
526 list_add_after( ptr
, &new_area
->entry
);
528 else size
= (char *)area
->base
+ area
->size
- (char *)addr
;
529 area
->size
= (char *)addr
- (char *)area
->base
;
530 if (unmap
) munmap( addr
, size
);
535 /* range overlaps end of area only -> shrink area */
536 if (unmap
) munmap( addr
, (char *)area
->base
+ area
->size
- (char *)addr
);
537 area
->size
= (char *)addr
- (char *)area
->base
;
541 ptr
= list_next( &reserved_areas
, ptr
);
546 /***********************************************************************
547 * wine_mmap_is_in_reserved_area
549 * Check if the specified range is included in a reserved area.
550 * Returns 1 if range is fully included, 0 if range is not included
551 * at all, and -1 if it is only partially included.
553 * Note: the reserved areas functions are not reentrant, caller is
554 * responsible for proper locking.
556 int wine_mmap_is_in_reserved_area( void *addr
, size_t size
)
558 struct reserved_area
*area
;
561 LIST_FOR_EACH( ptr
, &reserved_areas
)
563 area
= LIST_ENTRY( ptr
, struct reserved_area
, entry
);
564 if (area
->base
> addr
) break;
565 if ((char *)area
->base
+ area
->size
<= (char *)addr
) continue;
566 /* area must contain block completely */
567 if ((char *)area
->base
+ area
->size
< (char *)addr
+ size
) return -1;
574 /***********************************************************************
575 * wine_mmap_enum_reserved_areas
577 * Enumerate the list of reserved areas, sorted by addresses.
578 * If enum_func returns a non-zero value, enumeration is stopped and the value is returned.
580 * Note: the reserved areas functions are not reentrant, caller is
581 * responsible for proper locking.
583 int wine_mmap_enum_reserved_areas( int (*enum_func
)(void *base
, size_t size
, void *arg
), void *arg
,
591 for (ptr
= reserved_areas
.prev
; ptr
!= &reserved_areas
; ptr
= ptr
->prev
)
593 struct reserved_area
*area
= LIST_ENTRY( ptr
, struct reserved_area
, entry
);
594 if ((ret
= enum_func( area
->base
, area
->size
, arg
))) break;
599 for (ptr
= reserved_areas
.next
; ptr
!= &reserved_areas
; ptr
= ptr
->next
)
601 struct reserved_area
*area
= LIST_ENTRY( ptr
, struct reserved_area
, entry
);
602 if ((ret
= enum_func( area
->base
, area
->size
, arg
))) break;
608 #else /* HAVE_MMAP */