2 * Wine memory mappings support
4 * Copyright 2000, 2004 Alexandre Julliard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2.1 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA
22 #include "wine/port.h"
30 #include <sys/types.h>
31 #ifdef HAVE_SYS_MMAN_H
41 #include "wine/library.h"
42 #include "wine/list.h"
51 static struct list reserved_areas
= LIST_INIT(reserved_areas
);
52 static const unsigned int granularity_mask
= 0xffff; /* reserved areas have 64k granularity */
57 #define MAP_NORESERVE 0
66 static inline int get_fdzero(void)
70 if (MAP_ANON
== 0 && fd
== -1)
72 if ((fd
= open( "/dev/zero", O_RDONLY
)) == -1)
74 perror( "/dev/zero: open" );
81 #if (defined(__svr4__) || defined(__NetBSD__)) && !defined(MAP_TRYFIXED)
82 /***********************************************************************
85 * The purpose of this routine is to emulate the behaviour of
86 * the Linux mmap() routine if a non-NULL address is passed,
87 * but the MAP_FIXED flag is not set. Linux in this case tries
88 * to place the mapping at the specified address, *unless* the
89 * range is already in use. Solaris, however, completely ignores
90 * the address argument in this case.
92 * As Wine code occasionally relies on the Linux behaviour, e.g. to
93 * be able to map non-relocatable PE executables to their proper
94 * start addresses, or to map the DOS memory to 0, this routine
95 * emulates the Linux behaviour by checking whether the desired
96 * address range is still available, and placing the mapping there
97 * using MAP_FIXED if so.
99 static int try_mmap_fixed (void *addr
, size_t len
, int prot
, int flags
,
100 int fildes
, off_t off
)
102 char * volatile result
= NULL
;
103 int pagesize
= getpagesize();
106 /* We only try to map to a fixed address if
107 addr is non-NULL and properly aligned,
108 and MAP_FIXED isn't already specified. */
112 if ( (uintptr_t)addr
& (pagesize
-1) )
114 if ( flags
& MAP_FIXED
)
117 /* We use vfork() to freeze all threads of the
118 current process. This allows us to check without
119 race condition whether the desired memory range is
120 already in use. Note that because vfork() shares
121 the address spaces between parent and child, we
122 can actually perform the mapping in the child. */
124 if ( (pid
= vfork()) == -1 )
126 perror("try_mmap_fixed: vfork");
134 /* We call mincore() for every page in the desired range.
135 If any of these calls succeeds, the page is already
136 mapped and we must fail. */
137 for ( i
= 0; i
< len
; i
+= pagesize
)
138 if ( mincore( (caddr_t
)addr
+ i
, pagesize
, &vec
) != -1 )
141 /* Perform the mapping with MAP_FIXED set. This is safe
142 now, as none of the pages is currently in use. */
143 result
= mmap( addr
, len
, prot
, flags
| MAP_FIXED
, fildes
, off
);
144 if ( result
== addr
)
147 if ( result
!= (void *) -1 ) /* This should never happen ... */
148 munmap( result
, len
);
153 /* vfork() lets the parent continue only after the child
154 has exited. Furthermore, Wine sets SIGCHLD to SIG_IGN,
155 so we don't need to wait for the child. */
157 return result
== addr
;
160 #elif defined(__APPLE__)
162 #include <mach/mach_init.h>
163 #include <mach/vm_map.h>
166 * On Darwin, we can use the Mach call vm_allocate to allocate
167 * anonymous memory at the specified address, and then use mmap with
168 * MAP_FIXED to replace the mapping.
170 static int try_mmap_fixed (void *addr
, size_t len
, int prot
, int flags
,
171 int fildes
, off_t off
)
173 vm_address_t result
= (vm_address_t
)addr
;
175 if (!vm_allocate(mach_task_self(),&result
,len
,0))
177 if (mmap( (void *)result
, len
, prot
, flags
| MAP_FIXED
, fildes
, off
) != MAP_FAILED
)
179 vm_deallocate(mach_task_self(),result
,len
);
184 #endif /* (__svr4__ || __NetBSD__) && !MAP_TRYFIXED */
187 /***********************************************************************
190 * Portable wrapper for anonymous mmaps
192 void *wine_anon_mmap( void *start
, size_t size
, int prot
, int flags
)
195 flags
&= ~MAP_SHARED
;
198 /* Linux EINVAL's on us if we don't pass MAP_PRIVATE to an anon mmap */
199 flags
|= MAP_PRIVATE
| MAP_ANON
;
201 if (!(flags
& MAP_FIXED
))
203 #if defined(__FreeBSD__) || defined(__FreeBSD_kernel__)
204 /* Even FreeBSD 5.3 does not properly support NULL here. */
205 if( start
== NULL
) start
= (void *)0x110000;
209 /* If available, this will attempt a fixed mapping in-kernel */
210 flags
|= MAP_TRYFIXED
;
211 #elif defined(__svr4__) || defined(__NetBSD__) || defined(__APPLE__)
212 if ( try_mmap_fixed( start
, size
, prot
, flags
, get_fdzero(), 0 ) )
216 return mmap( start
, size
, prot
, flags
, get_fdzero(), 0 );
220 /***********************************************************************
223 * mmap wrapper used for reservations, only maps the specified address
225 static inline int mmap_reserve( void *addr
, size_t size
)
228 int flags
= MAP_PRIVATE
| MAP_ANON
| MAP_NORESERVE
;
231 flags
|= MAP_TRYFIXED
;
232 #elif defined(__APPLE__)
233 return try_mmap_fixed( addr
, size
, PROT_NONE
, flags
, get_fdzero(), 0 );
235 ptr
= mmap( addr
, size
, PROT_NONE
, flags
, get_fdzero(), 0 );
236 if (ptr
!= addr
&& ptr
!= (void *)-1) munmap( ptr
, size
);
237 return (ptr
== addr
);
241 /***********************************************************************
244 * Reserve as much memory as possible in the given area.
246 #if defined(__i386__) && !defined(__FreeBSD__) && !defined(__FreeBSD_kernel__) /* commented out until FreeBSD gets fixed */
247 static void reserve_area( void *addr
, void *end
)
249 size_t size
= (char *)end
- (char *)addr
;
251 #if (defined(__svr4__) || defined(__NetBSD__)) && !defined(MAP_TRYFIXED)
252 /* try_mmap_fixed is inefficient when using vfork, so we need a different algorithm here */
253 /* we assume no other thread is running at this point */
254 size_t i
, pagesize
= getpagesize();
259 for (i
= 0; i
< size
; i
+= pagesize
)
260 if (mincore( (caddr_t
)addr
+ i
, pagesize
, &vec
) != -1) break;
262 i
&= ~granularity_mask
;
263 if (i
&& mmap( addr
, i
, PROT_NONE
, MAP_FIXED
| MAP_PRIVATE
| MAP_ANON
| MAP_NORESERVE
,
264 get_fdzero(), 0 ) != (void *)-1)
265 wine_mmap_add_reserved_area( addr
, i
);
267 i
+= granularity_mask
+ 1;
268 if ((char *)addr
+ i
< (char *)addr
) break; /* overflow */
269 addr
= (char *)addr
+ i
;
270 if (addr
>= end
) break;
271 size
= (char *)end
- (char *)addr
;
276 if (mmap_reserve( addr
, size
))
278 wine_mmap_add_reserved_area( addr
, size
);
281 if (size
> granularity_mask
+ 1)
283 size_t new_size
= (size
/ 2) & ~granularity_mask
;
284 reserve_area( addr
, (char *)addr
+ new_size
);
285 reserve_area( (char *)addr
+ new_size
, end
);
292 /***********************************************************************
295 * Reserve the DOS area (0x00000000-0x00110000).
297 static void reserve_dos_area(void)
299 const size_t page_size
= getpagesize();
300 const size_t dos_area_size
= 0x110000;
303 /* first page has to be handled specially */
304 ptr
= wine_anon_mmap( (void *)page_size
, dos_area_size
- page_size
, PROT_NONE
, MAP_NORESERVE
);
305 if (ptr
!= (void *)page_size
)
307 if (ptr
!= (void *)-1) munmap( ptr
, dos_area_size
- page_size
);
310 /* now add first page with MAP_FIXED */
311 wine_anon_mmap( NULL
, page_size
, PROT_NONE
, MAP_NORESERVE
|MAP_FIXED
);
312 wine_mmap_add_reserved_area( NULL
, dos_area_size
);
316 /***********************************************************************
321 struct reserved_area
*area
;
323 #if defined(__i386__) && !defined(__FreeBSD__) && !defined(__FreeBSD_kernel__) /* commented out until FreeBSD gets fixed */
325 char * const stack_ptr
= &stack
;
326 char *user_space_limit
= (char *)0x7ffe0000;
328 /* check for a reserved area starting at the user space limit */
329 /* to avoid wasting time trying to allocate it again */
330 LIST_FOR_EACH( ptr
, &reserved_areas
)
332 area
= LIST_ENTRY( ptr
, struct reserved_area
, entry
);
333 if ((char *)area
->base
> user_space_limit
) break;
334 if ((char *)area
->base
+ area
->size
> user_space_limit
)
336 user_space_limit
= (char *)area
->base
+ area
->size
;
341 if (stack_ptr
>= user_space_limit
)
344 char *base
= stack_ptr
- ((unsigned int)stack_ptr
& granularity_mask
) - (granularity_mask
+ 1);
345 if (base
> user_space_limit
) reserve_area( user_space_limit
, base
);
346 base
= stack_ptr
- ((unsigned int)stack_ptr
& granularity_mask
) + (granularity_mask
+ 1);
348 /* Linux heuristic: assume the stack is near the end of the address */
349 /* space, this avoids a lot of futile allocation attempts */
350 end
= (char *)(((unsigned long)base
+ 0x0fffffff) & 0xf0000000);
352 reserve_area( base
, end
);
354 else reserve_area( user_space_limit
, 0 );
355 #endif /* __i386__ */
357 /* reserve the DOS area if not already done */
359 ptr
= list_head( &reserved_areas
);
362 area
= LIST_ENTRY( ptr
, struct reserved_area
, entry
);
363 if (!area
->base
) return; /* already reserved */
368 #else /* HAVE_MMAP */
370 void *wine_anon_mmap( void *start
, size_t size
, int prot
, int flags
)
375 static inline int munmap( void *ptr
, size_t size
)
386 /***********************************************************************
387 * wine_mmap_add_reserved_area
389 * Add an address range to the list of reserved areas.
390 * Caller must have made sure the range is not used by anything else.
392 * Note: the reserved areas functions are not reentrant, caller is
393 * responsible for proper locking.
395 void wine_mmap_add_reserved_area( void *addr
, size_t size
)
397 struct reserved_area
*area
;
400 if (!((char *)addr
+ size
)) size
--; /* avoid wrap-around */
402 LIST_FOR_EACH( ptr
, &reserved_areas
)
404 area
= LIST_ENTRY( ptr
, struct reserved_area
, entry
);
405 if (area
->base
> addr
)
407 /* try to merge with the next one */
408 if ((char *)addr
+ size
== (char *)area
->base
)
416 else if ((char *)area
->base
+ area
->size
== (char *)addr
)
418 /* merge with the previous one */
421 /* try to merge with the next one too */
422 if ((ptr
= list_next( &reserved_areas
, ptr
)))
424 struct reserved_area
*next
= LIST_ENTRY( ptr
, struct reserved_area
, entry
);
425 if ((char *)addr
+ size
== (char *)next
->base
)
427 area
->size
+= next
->size
;
428 list_remove( &next
->entry
);
436 if ((area
= malloc( sizeof(*area
) )))
440 list_add_before( ptr
, &area
->entry
);
445 /***********************************************************************
446 * wine_mmap_remove_reserved_area
448 * Remove an address range from the list of reserved areas.
449 * If 'unmap' is non-zero the range is unmapped too.
451 * Note: the reserved areas functions are not reentrant, caller is
452 * responsible for proper locking.
454 void wine_mmap_remove_reserved_area( void *addr
, size_t size
, int unmap
)
456 struct reserved_area
*area
;
459 if (!((char *)addr
+ size
)) size
--; /* avoid wrap-around */
461 ptr
= list_head( &reserved_areas
);
462 /* find the first area covering address */
465 area
= LIST_ENTRY( ptr
, struct reserved_area
, entry
);
466 if ((char *)area
->base
>= (char *)addr
+ size
) break; /* outside the range */
467 if ((char *)area
->base
+ area
->size
> (char *)addr
) /* overlaps range */
469 if (area
->base
>= addr
)
471 if ((char *)area
->base
+ area
->size
> (char *)addr
+ size
)
473 /* range overlaps beginning of area only -> shrink area */
474 if (unmap
) munmap( area
->base
, (char *)addr
+ size
- (char *)area
->base
);
475 area
->size
-= (char *)addr
+ size
- (char *)area
->base
;
476 area
->base
= (char *)addr
+ size
;
481 /* range contains the whole area -> remove area completely */
482 ptr
= list_next( &reserved_areas
, ptr
);
483 if (unmap
) munmap( area
->base
, area
->size
);
484 list_remove( &area
->entry
);
491 if ((char *)area
->base
+ area
->size
> (char *)addr
+ size
)
493 /* range is in the middle of area -> split area in two */
494 struct reserved_area
*new_area
= malloc( sizeof(*new_area
) );
497 new_area
->base
= (char *)addr
+ size
;
498 new_area
->size
= (char *)area
->base
+ area
->size
- (char *)new_area
->base
;
499 list_add_after( ptr
, &new_area
->entry
);
501 else size
= (char *)area
->base
+ area
->size
- (char *)addr
;
502 area
->size
= (char *)addr
- (char *)area
->base
;
503 if (unmap
) munmap( addr
, size
);
508 /* range overlaps end of area only -> shrink area */
509 if (unmap
) munmap( addr
, (char *)area
->base
+ area
->size
- (char *)addr
);
510 area
->size
= (char *)addr
- (char *)area
->base
;
514 ptr
= list_next( &reserved_areas
, ptr
);
519 /***********************************************************************
520 * wine_mmap_is_in_reserved_area
522 * Check if the specified range is included in a reserved area.
523 * Returns 1 if range is fully included, 0 if range is not included
524 * at all, and -1 if it is only partially included.
526 * Note: the reserved areas functions are not reentrant, caller is
527 * responsible for proper locking.
529 int wine_mmap_is_in_reserved_area( void *addr
, size_t size
)
531 struct reserved_area
*area
;
534 LIST_FOR_EACH( ptr
, &reserved_areas
)
536 area
= LIST_ENTRY( ptr
, struct reserved_area
, entry
);
537 if (area
->base
> addr
) break;
538 if ((char *)area
->base
+ area
->size
<= (char *)addr
) continue;
539 /* area must contain block completely */
540 if ((char *)area
->base
+ area
->size
< (char *)addr
+ size
) return -1;
547 /***********************************************************************
548 * wine_mmap_enum_reserved_areas
550 * Enumerate the list of reserved areas, sorted by addresses.
551 * If enum_func returns a non-zero value, enumeration is stopped and the value is returned.
553 * Note: the reserved areas functions are not reentrant, caller is
554 * responsible for proper locking.
556 int wine_mmap_enum_reserved_areas( int (*enum_func
)(void *base
, size_t size
, void *arg
), void *arg
,
564 for (ptr
= reserved_areas
.prev
; ptr
!= &reserved_areas
; ptr
= ptr
->prev
)
566 struct reserved_area
*area
= LIST_ENTRY( ptr
, struct reserved_area
, entry
);
567 if ((ret
= enum_func( area
->base
, area
->size
, arg
))) break;
572 for (ptr
= reserved_areas
.next
; ptr
!= &reserved_areas
; ptr
= ptr
->next
)
574 struct reserved_area
*area
= LIST_ENTRY( ptr
, struct reserved_area
, entry
);
575 if ((ret
= enum_func( area
->base
, area
->size
, arg
))) break;