ntdll/tests: Drop RTL function workarounds for Windows <= 2000.
[wine.git] / libs / wine / mmap.c
blobaf91b3e35234220b5b939aaec185cef96ff29e00
1 /*
2 * Wine memory mappings support
4 * Copyright 2000, 2004 Alexandre Julliard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2.1 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA
21 #include "config.h"
22 #include "wine/port.h"
24 #include <assert.h>
25 #include <ctype.h>
26 #include <errno.h>
27 #include <fcntl.h>
28 #include <stdlib.h>
29 #include <stdio.h>
30 #include <string.h>
31 #include <sys/types.h>
32 #ifdef HAVE_SYS_WAIT_H
33 #include <sys/wait.h>
34 #endif
35 #ifdef HAVE_SYS_MMAN_H
36 #include <sys/mman.h>
37 #endif
38 #ifdef HAVE_UNISTD_H
39 # include <unistd.h>
40 #endif
41 #ifdef HAVE_STDINT_H
42 # include <stdint.h>
43 #endif
45 #include "wine/library.h"
46 #include "wine/list.h"
48 struct reserved_area
50 struct list entry;
51 void *base;
52 size_t size;
55 static struct list reserved_areas = LIST_INIT(reserved_areas);
56 #ifndef __APPLE__
57 static const unsigned int granularity_mask = 0xffff; /* reserved areas have 64k granularity */
58 #endif
60 #ifndef MAP_NORESERVE
61 #define MAP_NORESERVE 0
62 #endif
63 #ifndef MAP_PRIVATE
64 #define MAP_PRIVATE 0
65 #endif
66 #ifndef MAP_ANON
67 #define MAP_ANON 0
68 #endif
70 static inline int get_fdzero(void)
72 static int fd = -1;
74 if (MAP_ANON == 0 && fd == -1)
76 if ((fd = open( "/dev/zero", O_RDONLY )) == -1)
78 perror( "/dev/zero: open" );
79 exit(1);
82 return fd;
85 #if (defined(__svr4__) || defined(__NetBSD__)) && !defined(MAP_TRYFIXED)
86 /***********************************************************************
87 * try_mmap_fixed
89 * The purpose of this routine is to emulate the behaviour of
90 * the Linux mmap() routine if a non-NULL address is passed,
91 * but the MAP_FIXED flag is not set. Linux in this case tries
92 * to place the mapping at the specified address, *unless* the
93 * range is already in use. Solaris, however, completely ignores
94 * the address argument in this case.
96 * As Wine code occasionally relies on the Linux behaviour, e.g. to
97 * be able to map non-relocatable PE executables to their proper
98 * start addresses, or to map the DOS memory to 0, this routine
99 * emulates the Linux behaviour by checking whether the desired
100 * address range is still available, and placing the mapping there
101 * using MAP_FIXED if so.
103 static int try_mmap_fixed (void *addr, size_t len, int prot, int flags,
104 int fildes, off_t off)
106 char * volatile result = NULL;
107 const size_t pagesize = sysconf( _SC_PAGESIZE );
108 pid_t pid, wret;
110 /* We only try to map to a fixed address if
111 addr is non-NULL and properly aligned,
112 and MAP_FIXED isn't already specified. */
114 if ( !addr )
115 return 0;
116 if ( (uintptr_t)addr & (pagesize-1) )
117 return 0;
118 if ( flags & MAP_FIXED )
119 return 0;
121 /* We use vfork() to freeze all threads of the
122 current process. This allows us to check without
123 race condition whether the desired memory range is
124 already in use. Note that because vfork() shares
125 the address spaces between parent and child, we
126 can actually perform the mapping in the child. */
128 if ( (pid = vfork()) == -1 )
130 perror("try_mmap_fixed: vfork");
131 exit(1);
133 if ( pid == 0 )
135 int i;
136 char vec;
138 /* We call mincore() for every page in the desired range.
139 If any of these calls succeeds, the page is already
140 mapped and we must fail. */
141 for ( i = 0; i < len; i += pagesize )
142 if ( mincore( (caddr_t)addr + i, pagesize, &vec ) != -1 )
143 _exit(1);
145 /* Perform the mapping with MAP_FIXED set. This is safe
146 now, as none of the pages is currently in use. */
147 result = mmap( addr, len, prot, flags | MAP_FIXED, fildes, off );
148 if ( result == addr )
149 _exit(0);
151 if ( result != (void *) -1 ) /* This should never happen ... */
152 munmap( result, len );
154 _exit(1);
157 /* reap child */
158 do {
159 wret = waitpid(pid, NULL, 0);
160 } while (wret < 0 && errno == EINTR);
162 return result == addr;
165 #elif defined(__APPLE__)
167 #include <mach/mach_init.h>
168 #include <mach/mach_vm.h>
171 * On Darwin, we can use the Mach call mach_vm_map to allocate
172 * anonymous memory at the specified address and then, if necessary, use
173 * mmap with MAP_FIXED to replace the mapping.
175 static int try_mmap_fixed (void *addr, size_t len, int prot, int flags,
176 int fildes, off_t off)
178 mach_vm_address_t result = (mach_vm_address_t)addr;
179 int vm_flags = VM_FLAGS_FIXED;
181 if (flags & MAP_NOCACHE)
182 vm_flags |= VM_FLAGS_NO_CACHE;
183 if (!mach_vm_map( mach_task_self(), &result, len, 0, vm_flags, MEMORY_OBJECT_NULL,
184 0, 0, prot, VM_PROT_ALL, VM_INHERIT_COPY ))
186 flags |= MAP_FIXED;
187 if (((flags & ~(MAP_NORESERVE | MAP_NOCACHE)) == (MAP_ANON | MAP_FIXED | MAP_PRIVATE)) ||
188 mmap( (void *)result, len, prot, flags, fildes, off ) != MAP_FAILED)
189 return 1;
190 mach_vm_deallocate(mach_task_self(),result,len);
192 return 0;
195 #endif /* (__svr4__ || __NetBSD__) && !MAP_TRYFIXED */
198 /***********************************************************************
199 * wine_anon_mmap
201 * Portable wrapper for anonymous mmaps
203 void *wine_anon_mmap( void *start, size_t size, int prot, int flags )
205 #ifdef MAP_SHARED
206 flags &= ~MAP_SHARED;
207 #endif
209 /* Linux EINVAL's on us if we don't pass MAP_PRIVATE to an anon mmap */
210 flags |= MAP_PRIVATE | MAP_ANON;
212 if (!(flags & MAP_FIXED))
214 #ifdef MAP_TRYFIXED
215 /* If available, this will attempt a fixed mapping in-kernel */
216 flags |= MAP_TRYFIXED;
217 #elif defined(__FreeBSD__) || defined(__FreeBSD_kernel__)
218 if ( start && mmap( start, size, prot, flags | MAP_FIXED | MAP_EXCL, get_fdzero(), 0 ) != MAP_FAILED )
219 return start;
220 #elif defined(__svr4__) || defined(__NetBSD__) || defined(__APPLE__)
221 if ( try_mmap_fixed( start, size, prot, flags, get_fdzero(), 0 ) )
222 return start;
223 #endif
225 #if defined(__FreeBSD__) || defined(__FreeBSD_kernel__) || defined(__DragonFly__)
226 /* Even FreeBSD 5.3 does not properly support NULL here. */
227 if( start == NULL ) start = (void *)0x110000;
228 #endif
230 return mmap( start, size, prot, flags, get_fdzero(), 0 );
234 #ifdef __APPLE__
236 /***********************************************************************
237 * reserve_area
239 * Reserve as much memory as possible in the given area.
241 static inline void reserve_area( void *addr, void *end )
243 #ifdef __i386__
244 static const mach_vm_address_t max_address = VM_MAX_ADDRESS;
245 #else
246 static const mach_vm_address_t max_address = MACH_VM_MAX_ADDRESS;
247 #endif
248 mach_vm_address_t address = (mach_vm_address_t)addr;
249 mach_vm_address_t end_address = (mach_vm_address_t)end;
251 if (!end_address || max_address < end_address)
252 end_address = max_address;
254 while (address < end_address)
256 mach_vm_address_t hole_address = address;
257 kern_return_t ret;
258 mach_vm_size_t size;
259 vm_region_basic_info_data_64_t info;
260 mach_msg_type_number_t count = VM_REGION_BASIC_INFO_COUNT_64;
261 mach_port_t dummy_object_name = MACH_PORT_NULL;
263 /* find the mapped region at or above the current address. */
264 ret = mach_vm_region(mach_task_self(), &address, &size, VM_REGION_BASIC_INFO_64,
265 (vm_region_info_t)&info, &count, &dummy_object_name);
266 if (ret != KERN_SUCCESS)
268 address = max_address;
269 size = 0;
272 if (end_address < address)
273 address = end_address;
274 if (hole_address < address)
276 /* found a hole, attempt to reserve it. */
277 size_t hole_size = address - hole_address;
278 mach_vm_address_t alloc_address = hole_address;
280 ret = mach_vm_map( mach_task_self(), &alloc_address, hole_size, 0, VM_FLAGS_FIXED,
281 MEMORY_OBJECT_NULL, 0, 0, PROT_NONE, VM_PROT_ALL, VM_INHERIT_COPY );
282 if (!ret)
283 wine_mmap_add_reserved_area( (void*)hole_address, hole_size );
284 else if (ret == KERN_NO_SPACE)
286 /* something filled (part of) the hole before we could.
287 go back and look again. */
288 address = hole_address;
289 continue;
292 address += size;
296 #else
298 /***********************************************************************
299 * mmap_reserve
301 * mmap wrapper used for reservations, only maps the specified address
303 static inline int mmap_reserve( void *addr, size_t size )
305 void *ptr;
306 int flags = MAP_PRIVATE | MAP_ANON | MAP_NORESERVE;
308 #ifdef MAP_TRYFIXED
309 flags |= MAP_TRYFIXED;
310 #elif defined(__APPLE__)
311 return try_mmap_fixed( addr, size, PROT_NONE, flags, get_fdzero(), 0 );
312 #endif
313 ptr = mmap( addr, size, PROT_NONE, flags, get_fdzero(), 0 );
314 if (ptr != addr && ptr != (void *)-1) munmap( ptr, size );
315 return (ptr == addr);
319 /***********************************************************************
320 * reserve_area
322 * Reserve as much memory as possible in the given area.
324 static inline void reserve_area( void *addr, void *end )
326 size_t size = (char *)end - (char *)addr;
328 #if (defined(__svr4__) || defined(__NetBSD__)) && !defined(MAP_TRYFIXED)
329 /* try_mmap_fixed is inefficient when using vfork, so we need a different algorithm here */
330 /* we assume no other thread is running at this point */
331 size_t i, pagesize = sysconf( _SC_PAGESIZE );
332 char vec;
334 while (size)
336 for (i = 0; i < size; i += pagesize)
337 if (mincore( (caddr_t)addr + i, pagesize, &vec ) != -1) break;
339 i &= ~granularity_mask;
340 if (i && mmap( addr, i, PROT_NONE, MAP_FIXED | MAP_PRIVATE | MAP_ANON | MAP_NORESERVE,
341 get_fdzero(), 0 ) != (void *)-1)
342 wine_mmap_add_reserved_area( addr, i );
344 i += granularity_mask + 1;
345 if ((char *)addr + i < (char *)addr) break; /* overflow */
346 addr = (char *)addr + i;
347 if (addr >= end) break;
348 size = (char *)end - (char *)addr;
350 #else
351 if (!size) return;
353 if (mmap_reserve( addr, size ))
355 wine_mmap_add_reserved_area( addr, size );
356 return;
358 size = (size / 2) & ~granularity_mask;
359 if (size)
361 reserve_area( addr, (char *)addr + size );
362 reserve_area( (char *)addr + size, end );
364 #endif
367 #endif /* __APPLE__ */
369 #ifdef __i386__
370 /***********************************************************************
371 * reserve_malloc_space
373 * Solaris malloc is not smart enough to obtain space through mmap(), so try to make
374 * sure that there is some available sbrk() space before we reserve other things.
376 static inline void reserve_malloc_space( size_t size )
378 #ifdef __sun
379 size_t i, count = size / 1024;
380 void **ptrs = malloc( count * sizeof(ptrs[0]) );
382 if (!ptrs) return;
384 for (i = 0; i < count; i++) if (!(ptrs[i] = malloc( 1024 ))) break;
385 if (i--) /* free everything except the last one */
386 while (i) free( ptrs[--i] );
387 free( ptrs );
388 #endif
392 /***********************************************************************
393 * reserve_dos_area
395 * Reserve the DOS area (0x00000000-0x00110000).
397 static inline void reserve_dos_area(void)
399 const size_t first_page = 0x1000;
400 const size_t dos_area_size = 0x110000;
401 void *ptr;
403 /* first page has to be handled specially */
404 ptr = wine_anon_mmap( (void *)first_page, dos_area_size - first_page, PROT_NONE, MAP_NORESERVE );
405 if (ptr != (void *)first_page)
407 if (ptr != (void *)-1) munmap( ptr, dos_area_size - first_page );
408 return;
410 /* now add first page with MAP_FIXED */
411 wine_anon_mmap( NULL, first_page, PROT_NONE, MAP_NORESERVE|MAP_FIXED );
412 wine_mmap_add_reserved_area( NULL, dos_area_size );
414 #endif
417 /***********************************************************************
418 * mmap_init
420 void mmap_init(void)
422 #ifdef __i386__
423 struct reserved_area *area;
424 struct list *ptr;
425 #ifndef __APPLE__
426 char stack;
427 char * const stack_ptr = &stack;
428 #endif
429 char *user_space_limit = (char *)0x7ffe0000;
431 reserve_malloc_space( 8 * 1024 * 1024 );
433 if (!list_head( &reserved_areas ))
435 /* if we don't have a preloader, try to reserve some space below 2Gb */
436 reserve_area( (void *)0x00110000, (void *)0x40000000 );
439 /* check for a reserved area starting at the user space limit */
440 /* to avoid wasting time trying to allocate it again */
441 LIST_FOR_EACH( ptr, &reserved_areas )
443 area = LIST_ENTRY( ptr, struct reserved_area, entry );
444 if ((char *)area->base > user_space_limit) break;
445 if ((char *)area->base + area->size > user_space_limit)
447 user_space_limit = (char *)area->base + area->size;
448 break;
452 #ifndef __APPLE__
453 if (stack_ptr >= user_space_limit)
455 char *end = 0;
456 char *base = stack_ptr - ((unsigned int)stack_ptr & granularity_mask) - (granularity_mask + 1);
457 if (base > user_space_limit) reserve_area( user_space_limit, base );
458 base = stack_ptr - ((unsigned int)stack_ptr & granularity_mask) + (granularity_mask + 1);
459 #if defined(linux) || defined(__FreeBSD__) || defined (__FreeBSD_kernel__) || defined(__DragonFly__)
460 /* Heuristic: assume the stack is near the end of the address */
461 /* space, this avoids a lot of futile allocation attempts */
462 end = (char *)(((unsigned long)base + 0x0fffffff) & 0xf0000000);
463 #endif
464 reserve_area( base, end );
466 else
467 #endif
468 reserve_area( user_space_limit, 0 );
470 /* reserve the DOS area if not already done */
472 ptr = list_head( &reserved_areas );
473 if (ptr)
475 area = LIST_ENTRY( ptr, struct reserved_area, entry );
476 if (!area->base) return; /* already reserved */
478 reserve_dos_area();
480 #elif defined(__x86_64__) || defined(__aarch64__)
482 if (!list_head( &reserved_areas ))
484 /* if we don't have a preloader, try to reserve the space now */
485 reserve_area( (void *)0x000000010000, (void *)0x000068000000 );
486 reserve_area( (void *)0x00007ff00000, (void *)0x00007fff0000 );
487 reserve_area( (void *)0x7ffffe000000, (void *)0x7fffffff0000 );
490 #endif
494 /***********************************************************************
495 * wine_mmap_add_reserved_area
497 * Add an address range to the list of reserved areas.
498 * Caller must have made sure the range is not used by anything else.
500 * Note: the reserved areas functions are not reentrant, caller is
501 * responsible for proper locking.
503 void wine_mmap_add_reserved_area( void *addr, size_t size )
505 struct reserved_area *area;
506 struct list *ptr;
508 if (!((char *)addr + size)) size--; /* avoid wrap-around */
510 LIST_FOR_EACH( ptr, &reserved_areas )
512 area = LIST_ENTRY( ptr, struct reserved_area, entry );
513 if (area->base > addr)
515 /* try to merge with the next one */
516 if ((char *)addr + size == (char *)area->base)
518 area->base = addr;
519 area->size += size;
520 return;
522 break;
524 else if ((char *)area->base + area->size == (char *)addr)
526 /* merge with the previous one */
527 area->size += size;
529 /* try to merge with the next one too */
530 if ((ptr = list_next( &reserved_areas, ptr )))
532 struct reserved_area *next = LIST_ENTRY( ptr, struct reserved_area, entry );
533 if ((char *)addr + size == (char *)next->base)
535 area->size += next->size;
536 list_remove( &next->entry );
537 free( next );
540 return;
544 if ((area = malloc( sizeof(*area) )))
546 area->base = addr;
547 area->size = size;
548 list_add_before( ptr, &area->entry );
553 /***********************************************************************
554 * wine_mmap_remove_reserved_area
556 * Remove an address range from the list of reserved areas.
557 * If 'unmap' is non-zero the range is unmapped too.
559 * Note: the reserved areas functions are not reentrant, caller is
560 * responsible for proper locking.
562 void wine_mmap_remove_reserved_area( void *addr, size_t size, int unmap )
564 struct reserved_area *area;
565 struct list *ptr;
567 if (!((char *)addr + size)) size--; /* avoid wrap-around */
569 ptr = list_head( &reserved_areas );
570 /* find the first area covering address */
571 while (ptr)
573 area = LIST_ENTRY( ptr, struct reserved_area, entry );
574 if ((char *)area->base >= (char *)addr + size) break; /* outside the range */
575 if ((char *)area->base + area->size > (char *)addr) /* overlaps range */
577 if (area->base >= addr)
579 if ((char *)area->base + area->size > (char *)addr + size)
581 /* range overlaps beginning of area only -> shrink area */
582 if (unmap) munmap( area->base, (char *)addr + size - (char *)area->base );
583 area->size -= (char *)addr + size - (char *)area->base;
584 area->base = (char *)addr + size;
585 break;
587 else
589 /* range contains the whole area -> remove area completely */
590 ptr = list_next( &reserved_areas, ptr );
591 if (unmap) munmap( area->base, area->size );
592 list_remove( &area->entry );
593 free( area );
594 continue;
597 else
599 if ((char *)area->base + area->size > (char *)addr + size)
601 /* range is in the middle of area -> split area in two */
602 struct reserved_area *new_area = malloc( sizeof(*new_area) );
603 if (new_area)
605 new_area->base = (char *)addr + size;
606 new_area->size = (char *)area->base + area->size - (char *)new_area->base;
607 list_add_after( ptr, &new_area->entry );
609 else size = (char *)area->base + area->size - (char *)addr;
610 area->size = (char *)addr - (char *)area->base;
611 if (unmap) munmap( addr, size );
612 break;
614 else
616 /* range overlaps end of area only -> shrink area */
617 if (unmap) munmap( addr, (char *)area->base + area->size - (char *)addr );
618 area->size = (char *)addr - (char *)area->base;
622 ptr = list_next( &reserved_areas, ptr );
627 /***********************************************************************
628 * wine_mmap_is_in_reserved_area
630 * Check if the specified range is included in a reserved area.
631 * Returns 1 if range is fully included, 0 if range is not included
632 * at all, and -1 if it is only partially included.
634 * Note: the reserved areas functions are not reentrant, caller is
635 * responsible for proper locking.
637 int wine_mmap_is_in_reserved_area( void *addr, size_t size )
639 struct reserved_area *area;
640 struct list *ptr;
642 LIST_FOR_EACH( ptr, &reserved_areas )
644 area = LIST_ENTRY( ptr, struct reserved_area, entry );
645 if (area->base > addr) break;
646 if ((char *)area->base + area->size <= (char *)addr) continue;
647 /* area must contain block completely */
648 if ((char *)area->base + area->size < (char *)addr + size) return -1;
649 return 1;
651 return 0;
655 /***********************************************************************
656 * wine_mmap_enum_reserved_areas
658 * Enumerate the list of reserved areas, sorted by addresses.
659 * If enum_func returns a non-zero value, enumeration is stopped and the value is returned.
661 * Note: the reserved areas functions are not reentrant, caller is
662 * responsible for proper locking.
664 int wine_mmap_enum_reserved_areas( int (*enum_func)(void *base, size_t size, void *arg), void *arg,
665 int top_down )
667 int ret = 0;
668 struct list *ptr;
670 if (top_down)
672 for (ptr = reserved_areas.prev; ptr != &reserved_areas; ptr = ptr->prev)
674 struct reserved_area *area = LIST_ENTRY( ptr, struct reserved_area, entry );
675 if ((ret = enum_func( area->base, area->size, arg ))) break;
678 else
680 for (ptr = reserved_areas.next; ptr != &reserved_areas; ptr = ptr->next)
682 struct reserved_area *area = LIST_ENTRY( ptr, struct reserved_area, entry );
683 if ((ret = enum_func( area->base, area->size, arg ))) break;
686 return ret;