2 * Wine memory mappings support
4 * Copyright 2000, 2004 Alexandre Julliard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2.1 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA
22 #include "wine/port.h"
31 #include <sys/types.h>
32 #ifdef HAVE_SYS_WAIT_H
35 #ifdef HAVE_SYS_MMAN_H
45 #include "wine/list.h"
49 #define MAP_NORESERVE 0
58 static inline int get_fdzero(void)
62 if (MAP_ANON
== 0 && fd
== -1)
64 if ((fd
= open( "/dev/zero", O_RDONLY
)) == -1)
66 perror( "/dev/zero: open" );
73 #if (defined(__svr4__) || defined(__NetBSD__)) && !defined(MAP_TRYFIXED)
74 /***********************************************************************
77 * The purpose of this routine is to emulate the behaviour of
78 * the Linux mmap() routine if a non-NULL address is passed,
79 * but the MAP_FIXED flag is not set. Linux in this case tries
80 * to place the mapping at the specified address, *unless* the
81 * range is already in use. Solaris, however, completely ignores
82 * the address argument in this case.
84 * As Wine code occasionally relies on the Linux behaviour, e.g. to
85 * be able to map non-relocatable PE executables to their proper
86 * start addresses, or to map the DOS memory to 0, this routine
87 * emulates the Linux behaviour by checking whether the desired
88 * address range is still available, and placing the mapping there
89 * using MAP_FIXED if so.
91 static int try_mmap_fixed (void *addr
, size_t len
, int prot
, int flags
,
92 int fildes
, off_t off
)
94 char * volatile result
= NULL
;
95 const size_t pagesize
= sysconf( _SC_PAGESIZE
);
98 /* We only try to map to a fixed address if
99 addr is non-NULL and properly aligned,
100 and MAP_FIXED isn't already specified. */
104 if ( (uintptr_t)addr
& (pagesize
-1) )
106 if ( flags
& MAP_FIXED
)
109 /* We use vfork() to freeze all threads of the
110 current process. This allows us to check without
111 race condition whether the desired memory range is
112 already in use. Note that because vfork() shares
113 the address spaces between parent and child, we
114 can actually perform the mapping in the child. */
116 if ( (pid
= vfork()) == -1 )
118 perror("try_mmap_fixed: vfork");
126 /* We call mincore() for every page in the desired range.
127 If any of these calls succeeds, the page is already
128 mapped and we must fail. */
129 for ( i
= 0; i
< len
; i
+= pagesize
)
130 if ( mincore( (caddr_t
)addr
+ i
, pagesize
, &vec
) != -1 )
133 /* Perform the mapping with MAP_FIXED set. This is safe
134 now, as none of the pages is currently in use. */
135 result
= mmap( addr
, len
, prot
, flags
| MAP_FIXED
, fildes
, off
);
136 if ( result
== addr
)
139 if ( result
!= (void *) -1 ) /* This should never happen ... */
140 munmap( result
, len
);
147 wret
= waitpid(pid
, NULL
, 0);
148 } while (wret
< 0 && errno
== EINTR
);
150 return result
== addr
;
153 #elif defined(__APPLE__)
155 #include <mach/mach_init.h>
156 #include <mach/mach_vm.h>
159 * On Darwin, we can use the Mach call mach_vm_map to allocate
160 * anonymous memory at the specified address and then, if necessary, use
161 * mmap with MAP_FIXED to replace the mapping.
163 static int try_mmap_fixed (void *addr
, size_t len
, int prot
, int flags
,
164 int fildes
, off_t off
)
166 mach_vm_address_t result
= (mach_vm_address_t
)addr
;
167 int vm_flags
= VM_FLAGS_FIXED
;
169 if (flags
& MAP_NOCACHE
)
170 vm_flags
|= VM_FLAGS_NO_CACHE
;
171 if (!mach_vm_map( mach_task_self(), &result
, len
, 0, vm_flags
, MEMORY_OBJECT_NULL
,
172 0, 0, prot
, VM_PROT_ALL
, VM_INHERIT_COPY
))
175 if (((flags
& ~(MAP_NORESERVE
| MAP_NOCACHE
)) == (MAP_ANON
| MAP_FIXED
| MAP_PRIVATE
)) ||
176 mmap( (void *)result
, len
, prot
, flags
, fildes
, off
) != MAP_FAILED
)
178 mach_vm_deallocate(mach_task_self(),result
,len
);
183 #endif /* (__svr4__ || __NetBSD__) && !MAP_TRYFIXED */
186 /***********************************************************************
189 * Portable wrapper for anonymous mmaps
191 void *wine_anon_mmap( void *start
, size_t size
, int prot
, int flags
)
194 flags
&= ~MAP_SHARED
;
197 /* Linux EINVAL's on us if we don't pass MAP_PRIVATE to an anon mmap */
198 flags
|= MAP_PRIVATE
| MAP_ANON
;
200 if (!(flags
& MAP_FIXED
))
203 /* If available, this will attempt a fixed mapping in-kernel */
204 flags
|= MAP_TRYFIXED
;
205 #elif defined(__FreeBSD__) || defined(__FreeBSD_kernel__)
206 if ( start
&& mmap( start
, size
, prot
, flags
| MAP_FIXED
| MAP_EXCL
, get_fdzero(), 0 ) != MAP_FAILED
)
208 #elif defined(__svr4__) || defined(__NetBSD__) || defined(__APPLE__)
209 if ( try_mmap_fixed( start
, size
, prot
, flags
, get_fdzero(), 0 ) )
213 #if defined(__FreeBSD__) || defined(__FreeBSD_kernel__) || defined(__DragonFly__)
214 /* Even FreeBSD 5.3 does not properly support NULL here. */
215 if( start
== NULL
) start
= (void *)0x110000;
218 return mmap( start
, size
, prot
, flags
, get_fdzero(), 0 );
221 #ifdef __ASM_OBSOLETE
230 static struct list reserved_areas
= LIST_INIT(reserved_areas
);
232 static const unsigned int granularity_mask
= 0xffff; /* reserved areas have 64k granularity */
235 void wine_mmap_add_reserved_area_obsolete( void *addr
, size_t size
);
239 /***********************************************************************
242 * Reserve as much memory as possible in the given area.
244 static inline void reserve_area( void *addr
, void *end
)
247 static const mach_vm_address_t max_address
= VM_MAX_ADDRESS
;
249 static const mach_vm_address_t max_address
= MACH_VM_MAX_ADDRESS
;
251 mach_vm_address_t address
= (mach_vm_address_t
)addr
;
252 mach_vm_address_t end_address
= (mach_vm_address_t
)end
;
254 if (!end_address
|| max_address
< end_address
)
255 end_address
= max_address
;
257 while (address
< end_address
)
259 mach_vm_address_t hole_address
= address
;
262 vm_region_basic_info_data_64_t info
;
263 mach_msg_type_number_t count
= VM_REGION_BASIC_INFO_COUNT_64
;
264 mach_port_t dummy_object_name
= MACH_PORT_NULL
;
266 /* find the mapped region at or above the current address. */
267 ret
= mach_vm_region(mach_task_self(), &address
, &size
, VM_REGION_BASIC_INFO_64
,
268 (vm_region_info_t
)&info
, &count
, &dummy_object_name
);
269 if (ret
!= KERN_SUCCESS
)
271 address
= max_address
;
275 if (end_address
< address
)
276 address
= end_address
;
277 if (hole_address
< address
)
279 /* found a hole, attempt to reserve it. */
280 size_t hole_size
= address
- hole_address
;
281 mach_vm_address_t alloc_address
= hole_address
;
283 ret
= mach_vm_map( mach_task_self(), &alloc_address
, hole_size
, 0, VM_FLAGS_FIXED
,
284 MEMORY_OBJECT_NULL
, 0, 0, PROT_NONE
, VM_PROT_ALL
, VM_INHERIT_COPY
);
286 wine_mmap_add_reserved_area_obsolete( (void*)hole_address
, hole_size
);
287 else if (ret
== KERN_NO_SPACE
)
289 /* something filled (part of) the hole before we could.
290 go back and look again. */
291 address
= hole_address
;
301 /***********************************************************************
304 * mmap wrapper used for reservations, only maps the specified address
306 static inline int mmap_reserve( void *addr
, size_t size
)
309 int flags
= MAP_PRIVATE
| MAP_ANON
| MAP_NORESERVE
;
312 flags
|= MAP_TRYFIXED
;
313 #elif defined(__APPLE__)
314 return try_mmap_fixed( addr
, size
, PROT_NONE
, flags
, get_fdzero(), 0 );
316 ptr
= mmap( addr
, size
, PROT_NONE
, flags
, get_fdzero(), 0 );
317 if (ptr
!= addr
&& ptr
!= (void *)-1) munmap( ptr
, size
);
318 return (ptr
== addr
);
322 /***********************************************************************
325 * Reserve as much memory as possible in the given area.
327 static inline void reserve_area( void *addr
, void *end
)
329 size_t size
= (char *)end
- (char *)addr
;
331 #if (defined(__svr4__) || defined(__NetBSD__)) && !defined(MAP_TRYFIXED)
332 /* try_mmap_fixed is inefficient when using vfork, so we need a different algorithm here */
333 /* we assume no other thread is running at this point */
334 size_t i
, pagesize
= sysconf( _SC_PAGESIZE
);
339 for (i
= 0; i
< size
; i
+= pagesize
)
340 if (mincore( (caddr_t
)addr
+ i
, pagesize
, &vec
) != -1) break;
342 i
&= ~granularity_mask
;
343 if (i
&& mmap( addr
, i
, PROT_NONE
, MAP_FIXED
| MAP_PRIVATE
| MAP_ANON
| MAP_NORESERVE
,
344 get_fdzero(), 0 ) != (void *)-1)
345 wine_mmap_add_reserved_area_obsolete( addr
, i
);
347 i
+= granularity_mask
+ 1;
348 if ((char *)addr
+ i
< (char *)addr
) break; /* overflow */
349 addr
= (char *)addr
+ i
;
350 if (addr
>= end
) break;
351 size
= (char *)end
- (char *)addr
;
356 if (mmap_reserve( addr
, size
))
358 wine_mmap_add_reserved_area_obsolete( addr
, size
);
361 size
= (size
/ 2) & ~granularity_mask
;
364 reserve_area( addr
, (char *)addr
+ size
);
365 reserve_area( (char *)addr
+ size
, end
);
370 #endif /* __APPLE__ */
373 /***********************************************************************
374 * reserve_malloc_space
376 * Solaris malloc is not smart enough to obtain space through mmap(), so try to make
377 * sure that there is some available sbrk() space before we reserve other things.
379 static inline void reserve_malloc_space( size_t size
)
382 size_t i
, count
= size
/ 1024;
383 void **ptrs
= malloc( count
* sizeof(ptrs
[0]) );
387 for (i
= 0; i
< count
; i
++) if (!(ptrs
[i
] = malloc( 1024 ))) break;
388 if (i
--) /* free everything except the last one */
389 while (i
) free( ptrs
[--i
] );
395 /***********************************************************************
398 * Reserve the DOS area (0x00000000-0x00110000).
400 static inline void reserve_dos_area(void)
402 const size_t first_page
= 0x1000;
403 const size_t dos_area_size
= 0x110000;
406 /* first page has to be handled specially */
407 ptr
= wine_anon_mmap( (void *)first_page
, dos_area_size
- first_page
, PROT_NONE
, MAP_NORESERVE
);
408 if (ptr
!= (void *)first_page
)
410 if (ptr
!= (void *)-1) munmap( ptr
, dos_area_size
- first_page
);
413 /* now add first page with MAP_FIXED */
414 wine_anon_mmap( NULL
, first_page
, PROT_NONE
, MAP_NORESERVE
|MAP_FIXED
);
415 wine_mmap_add_reserved_area_obsolete( NULL
, dos_area_size
);
420 /***********************************************************************
426 struct reserved_area
*area
;
430 char * const stack_ptr
= &stack
;
432 char *user_space_limit
= (char *)0x7ffe0000;
434 reserve_malloc_space( 8 * 1024 * 1024 );
436 if (!list_head( &reserved_areas
))
438 /* if we don't have a preloader, try to reserve some space below 2Gb */
439 reserve_area( (void *)0x00110000, (void *)0x40000000 );
442 /* check for a reserved area starting at the user space limit */
443 /* to avoid wasting time trying to allocate it again */
444 LIST_FOR_EACH( ptr
, &reserved_areas
)
446 area
= LIST_ENTRY( ptr
, struct reserved_area
, entry
);
447 if ((char *)area
->base
> user_space_limit
) break;
448 if ((char *)area
->base
+ area
->size
> user_space_limit
)
450 user_space_limit
= (char *)area
->base
+ area
->size
;
456 if (stack_ptr
>= user_space_limit
)
459 char *base
= stack_ptr
- ((unsigned int)stack_ptr
& granularity_mask
) - (granularity_mask
+ 1);
460 if (base
> user_space_limit
) reserve_area( user_space_limit
, base
);
461 base
= stack_ptr
- ((unsigned int)stack_ptr
& granularity_mask
) + (granularity_mask
+ 1);
462 #if defined(linux) || defined(__FreeBSD__) || defined (__FreeBSD_kernel__) || defined(__DragonFly__)
463 /* Heuristic: assume the stack is near the end of the address */
464 /* space, this avoids a lot of futile allocation attempts */
465 end
= (char *)(((unsigned long)base
+ 0x0fffffff) & 0xf0000000);
467 reserve_area( base
, end
);
471 reserve_area( user_space_limit
, 0 );
473 /* reserve the DOS area if not already done */
475 ptr
= list_head( &reserved_areas
);
478 area
= LIST_ENTRY( ptr
, struct reserved_area
, entry
);
479 if (!area
->base
) return; /* already reserved */
483 #elif defined(__x86_64__) || defined(__aarch64__)
485 if (!list_head( &reserved_areas
))
487 /* if we don't have a preloader, try to reserve the space now */
488 reserve_area( (void *)0x000000010000, (void *)0x000068000000 );
489 reserve_area( (void *)0x00007ff00000, (void *)0x00007fff0000 );
490 reserve_area( (void *)0x7ffffe000000, (void *)0x7fffffff0000 );
497 /***********************************************************************
498 * wine_mmap_add_reserved_area
500 * Add an address range to the list of reserved areas.
501 * Caller must have made sure the range is not used by anything else.
503 * Note: the reserved areas functions are not reentrant, caller is
504 * responsible for proper locking.
506 void wine_mmap_add_reserved_area_obsolete( void *addr
, size_t size
)
508 struct reserved_area
*area
;
511 if (!((char *)addr
+ size
)) size
--; /* avoid wrap-around */
513 LIST_FOR_EACH( ptr
, &reserved_areas
)
515 area
= LIST_ENTRY( ptr
, struct reserved_area
, entry
);
516 if (area
->base
> addr
)
518 /* try to merge with the next one */
519 if ((char *)addr
+ size
== (char *)area
->base
)
527 else if ((char *)area
->base
+ area
->size
== (char *)addr
)
529 /* merge with the previous one */
532 /* try to merge with the next one too */
533 if ((ptr
= list_next( &reserved_areas
, ptr
)))
535 struct reserved_area
*next
= LIST_ENTRY( ptr
, struct reserved_area
, entry
);
536 if ((char *)addr
+ size
== (char *)next
->base
)
538 area
->size
+= next
->size
;
539 list_remove( &next
->entry
);
547 if ((area
= malloc( sizeof(*area
) )))
551 list_add_before( ptr
, &area
->entry
);
556 /***********************************************************************
557 * wine_mmap_remove_reserved_area
559 * Remove an address range from the list of reserved areas.
560 * If 'unmap' is non-zero the range is unmapped too.
562 * Note: the reserved areas functions are not reentrant, caller is
563 * responsible for proper locking.
565 void wine_mmap_remove_reserved_area_obsolete( void *addr
, size_t size
, int unmap
)
567 struct reserved_area
*area
;
570 if (!((char *)addr
+ size
)) size
--; /* avoid wrap-around */
572 ptr
= list_head( &reserved_areas
);
573 /* find the first area covering address */
576 area
= LIST_ENTRY( ptr
, struct reserved_area
, entry
);
577 if ((char *)area
->base
>= (char *)addr
+ size
) break; /* outside the range */
578 if ((char *)area
->base
+ area
->size
> (char *)addr
) /* overlaps range */
580 if (area
->base
>= addr
)
582 if ((char *)area
->base
+ area
->size
> (char *)addr
+ size
)
584 /* range overlaps beginning of area only -> shrink area */
585 if (unmap
) munmap( area
->base
, (char *)addr
+ size
- (char *)area
->base
);
586 area
->size
-= (char *)addr
+ size
- (char *)area
->base
;
587 area
->base
= (char *)addr
+ size
;
592 /* range contains the whole area -> remove area completely */
593 ptr
= list_next( &reserved_areas
, ptr
);
594 if (unmap
) munmap( area
->base
, area
->size
);
595 list_remove( &area
->entry
);
602 if ((char *)area
->base
+ area
->size
> (char *)addr
+ size
)
604 /* range is in the middle of area -> split area in two */
605 struct reserved_area
*new_area
= malloc( sizeof(*new_area
) );
608 new_area
->base
= (char *)addr
+ size
;
609 new_area
->size
= (char *)area
->base
+ area
->size
- (char *)new_area
->base
;
610 list_add_after( ptr
, &new_area
->entry
);
612 else size
= (char *)area
->base
+ area
->size
- (char *)addr
;
613 area
->size
= (char *)addr
- (char *)area
->base
;
614 if (unmap
) munmap( addr
, size
);
619 /* range overlaps end of area only -> shrink area */
620 if (unmap
) munmap( addr
, (char *)area
->base
+ area
->size
- (char *)addr
);
621 area
->size
= (char *)addr
- (char *)area
->base
;
625 ptr
= list_next( &reserved_areas
, ptr
);
630 /***********************************************************************
631 * wine_mmap_is_in_reserved_area
633 * Check if the specified range is included in a reserved area.
634 * Returns 1 if range is fully included, 0 if range is not included
635 * at all, and -1 if it is only partially included.
637 * Note: the reserved areas functions are not reentrant, caller is
638 * responsible for proper locking.
640 int wine_mmap_is_in_reserved_area_obsolete( void *addr
, size_t size
)
642 struct reserved_area
*area
;
645 LIST_FOR_EACH( ptr
, &reserved_areas
)
647 area
= LIST_ENTRY( ptr
, struct reserved_area
, entry
);
648 if (area
->base
> addr
) break;
649 if ((char *)area
->base
+ area
->size
<= (char *)addr
) continue;
650 /* area must contain block completely */
651 if ((char *)area
->base
+ area
->size
< (char *)addr
+ size
) return -1;
658 /***********************************************************************
659 * wine_mmap_enum_reserved_areas
661 * Enumerate the list of reserved areas, sorted by addresses.
662 * If enum_func returns a non-zero value, enumeration is stopped and the value is returned.
664 * Note: the reserved areas functions are not reentrant, caller is
665 * responsible for proper locking.
667 int wine_mmap_enum_reserved_areas_obsolete( int (*enum_func
)(void *base
, size_t size
, void *arg
), void *arg
,
675 for (ptr
= reserved_areas
.prev
; ptr
!= &reserved_areas
; ptr
= ptr
->prev
)
677 struct reserved_area
*area
= LIST_ENTRY( ptr
, struct reserved_area
, entry
);
678 if ((ret
= enum_func( area
->base
, area
->size
, arg
))) break;
683 for (ptr
= reserved_areas
.next
; ptr
!= &reserved_areas
; ptr
= ptr
->next
)
685 struct reserved_area
*area
= LIST_ENTRY( ptr
, struct reserved_area
, entry
);
686 if ((ret
= enum_func( area
->base
, area
->size
, arg
))) break;
692 __ASM_OBSOLETE(wine_mmap_add_reserved_area
);
693 __ASM_OBSOLETE(wine_mmap_remove_reserved_area
);
694 __ASM_OBSOLETE(wine_mmap_is_in_reserved_area
);
695 __ASM_OBSOLETE(wine_mmap_enum_reserved_areas
);
697 #endif /* __ASM_OBSOLETE */