nsiproxy: Implement IP compartment get_all_paramters.
[wine.git] / libs / wine / mmap.c
blobb72d2af20d3bd817e074dd85f8710b4712822ed9
1 /*
2 * Wine memory mappings support
4 * Copyright 2000, 2004 Alexandre Julliard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2.1 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA
21 #include "config.h"
22 #include "wine/port.h"
24 #include <assert.h>
25 #include <ctype.h>
26 #include <errno.h>
27 #include <fcntl.h>
28 #include <stdlib.h>
29 #include <stdio.h>
30 #include <string.h>
31 #include <sys/types.h>
32 #ifdef HAVE_SYS_WAIT_H
33 #include <sys/wait.h>
34 #endif
35 #ifdef HAVE_SYS_MMAN_H
36 #include <sys/mman.h>
37 #endif
38 #ifdef HAVE_UNISTD_H
39 # include <unistd.h>
40 #endif
41 #ifdef HAVE_STDINT_H
42 # include <stdint.h>
43 #endif
45 #include "wine/list.h"
46 #include "wine/asm.h"
48 #ifndef MAP_NORESERVE
49 #define MAP_NORESERVE 0
50 #endif
51 #ifndef MAP_PRIVATE
52 #define MAP_PRIVATE 0
53 #endif
54 #ifndef MAP_ANON
55 #define MAP_ANON 0
56 #endif
58 static inline int get_fdzero(void)
60 static int fd = -1;
62 if (MAP_ANON == 0 && fd == -1)
64 if ((fd = open( "/dev/zero", O_RDONLY )) == -1)
66 perror( "/dev/zero: open" );
67 exit(1);
70 return fd;
73 #if (defined(__svr4__) || defined(__NetBSD__)) && !defined(MAP_TRYFIXED)
74 /***********************************************************************
75 * try_mmap_fixed
77 * The purpose of this routine is to emulate the behaviour of
78 * the Linux mmap() routine if a non-NULL address is passed,
79 * but the MAP_FIXED flag is not set. Linux in this case tries
80 * to place the mapping at the specified address, *unless* the
81 * range is already in use. Solaris, however, completely ignores
82 * the address argument in this case.
84 * As Wine code occasionally relies on the Linux behaviour, e.g. to
85 * be able to map non-relocatable PE executables to their proper
86 * start addresses, or to map the DOS memory to 0, this routine
87 * emulates the Linux behaviour by checking whether the desired
88 * address range is still available, and placing the mapping there
89 * using MAP_FIXED if so.
91 static int try_mmap_fixed (void *addr, size_t len, int prot, int flags,
92 int fildes, off_t off)
94 char * volatile result = NULL;
95 const size_t pagesize = sysconf( _SC_PAGESIZE );
96 pid_t pid, wret;
98 /* We only try to map to a fixed address if
99 addr is non-NULL and properly aligned,
100 and MAP_FIXED isn't already specified. */
102 if ( !addr )
103 return 0;
104 if ( (uintptr_t)addr & (pagesize-1) )
105 return 0;
106 if ( flags & MAP_FIXED )
107 return 0;
109 /* We use vfork() to freeze all threads of the
110 current process. This allows us to check without
111 race condition whether the desired memory range is
112 already in use. Note that because vfork() shares
113 the address spaces between parent and child, we
114 can actually perform the mapping in the child. */
116 if ( (pid = vfork()) == -1 )
118 perror("try_mmap_fixed: vfork");
119 exit(1);
121 if ( pid == 0 )
123 int i;
124 char vec;
126 /* We call mincore() for every page in the desired range.
127 If any of these calls succeeds, the page is already
128 mapped and we must fail. */
129 for ( i = 0; i < len; i += pagesize )
130 if ( mincore( (caddr_t)addr + i, pagesize, &vec ) != -1 )
131 _exit(1);
133 /* Perform the mapping with MAP_FIXED set. This is safe
134 now, as none of the pages is currently in use. */
135 result = mmap( addr, len, prot, flags | MAP_FIXED, fildes, off );
136 if ( result == addr )
137 _exit(0);
139 if ( result != (void *) -1 ) /* This should never happen ... */
140 munmap( result, len );
142 _exit(1);
145 /* reap child */
146 do {
147 wret = waitpid(pid, NULL, 0);
148 } while (wret < 0 && errno == EINTR);
150 return result == addr;
153 #elif defined(__APPLE__)
155 #include <mach/mach_init.h>
156 #include <mach/mach_vm.h>
159 * On Darwin, we can use the Mach call mach_vm_map to allocate
160 * anonymous memory at the specified address and then, if necessary, use
161 * mmap with MAP_FIXED to replace the mapping.
163 static int try_mmap_fixed (void *addr, size_t len, int prot, int flags,
164 int fildes, off_t off)
166 mach_vm_address_t result = (mach_vm_address_t)addr;
167 int vm_flags = VM_FLAGS_FIXED;
169 if (flags & MAP_NOCACHE)
170 vm_flags |= VM_FLAGS_NO_CACHE;
171 if (!mach_vm_map( mach_task_self(), &result, len, 0, vm_flags, MEMORY_OBJECT_NULL,
172 0, 0, prot, VM_PROT_ALL, VM_INHERIT_COPY ))
174 flags |= MAP_FIXED;
175 if (((flags & ~(MAP_NORESERVE | MAP_NOCACHE)) == (MAP_ANON | MAP_FIXED | MAP_PRIVATE)) ||
176 mmap( (void *)result, len, prot, flags, fildes, off ) != MAP_FAILED)
177 return 1;
178 mach_vm_deallocate(mach_task_self(),result,len);
180 return 0;
183 #endif /* (__svr4__ || __NetBSD__) && !MAP_TRYFIXED */
186 /***********************************************************************
187 * wine_anon_mmap
189 * Portable wrapper for anonymous mmaps
191 void *wine_anon_mmap( void *start, size_t size, int prot, int flags )
193 #ifdef MAP_SHARED
194 flags &= ~MAP_SHARED;
195 #endif
197 /* Linux EINVAL's on us if we don't pass MAP_PRIVATE to an anon mmap */
198 flags |= MAP_PRIVATE | MAP_ANON;
200 if (!(flags & MAP_FIXED))
202 #ifdef MAP_TRYFIXED
203 /* If available, this will attempt a fixed mapping in-kernel */
204 flags |= MAP_TRYFIXED;
205 #elif defined(__FreeBSD__) || defined(__FreeBSD_kernel__)
206 if ( start && mmap( start, size, prot, flags | MAP_FIXED | MAP_EXCL, get_fdzero(), 0 ) != MAP_FAILED )
207 return start;
208 #elif defined(__svr4__) || defined(__NetBSD__) || defined(__APPLE__)
209 if ( try_mmap_fixed( start, size, prot, flags, get_fdzero(), 0 ) )
210 return start;
211 #endif
213 #if defined(__FreeBSD__) || defined(__FreeBSD_kernel__) || defined(__DragonFly__)
214 /* Even FreeBSD 5.3 does not properly support NULL here. */
215 if( start == NULL ) start = (void *)0x110000;
216 #endif
218 return mmap( start, size, prot, flags, get_fdzero(), 0 );
221 #ifdef __ASM_OBSOLETE
223 struct reserved_area
225 struct list entry;
226 void *base;
227 size_t size;
230 static struct list reserved_areas = LIST_INIT(reserved_areas);
231 #ifndef __APPLE__
232 static const unsigned int granularity_mask = 0xffff; /* reserved areas have 64k granularity */
233 #endif
235 void wine_mmap_add_reserved_area_obsolete( void *addr, size_t size );
237 #ifdef __APPLE__
239 /***********************************************************************
240 * reserve_area
242 * Reserve as much memory as possible in the given area.
244 static inline void reserve_area( void *addr, void *end )
246 #ifdef __i386__
247 static const mach_vm_address_t max_address = VM_MAX_ADDRESS;
248 #else
249 static const mach_vm_address_t max_address = MACH_VM_MAX_ADDRESS;
250 #endif
251 mach_vm_address_t address = (mach_vm_address_t)addr;
252 mach_vm_address_t end_address = (mach_vm_address_t)end;
254 if (!end_address || max_address < end_address)
255 end_address = max_address;
257 while (address < end_address)
259 mach_vm_address_t hole_address = address;
260 kern_return_t ret;
261 mach_vm_size_t size;
262 vm_region_basic_info_data_64_t info;
263 mach_msg_type_number_t count = VM_REGION_BASIC_INFO_COUNT_64;
264 mach_port_t dummy_object_name = MACH_PORT_NULL;
266 /* find the mapped region at or above the current address. */
267 ret = mach_vm_region(mach_task_self(), &address, &size, VM_REGION_BASIC_INFO_64,
268 (vm_region_info_t)&info, &count, &dummy_object_name);
269 if (ret != KERN_SUCCESS)
271 address = max_address;
272 size = 0;
275 if (end_address < address)
276 address = end_address;
277 if (hole_address < address)
279 /* found a hole, attempt to reserve it. */
280 size_t hole_size = address - hole_address;
281 mach_vm_address_t alloc_address = hole_address;
283 ret = mach_vm_map( mach_task_self(), &alloc_address, hole_size, 0, VM_FLAGS_FIXED,
284 MEMORY_OBJECT_NULL, 0, 0, PROT_NONE, VM_PROT_ALL, VM_INHERIT_COPY );
285 if (!ret)
286 wine_mmap_add_reserved_area_obsolete( (void*)hole_address, hole_size );
287 else if (ret == KERN_NO_SPACE)
289 /* something filled (part of) the hole before we could.
290 go back and look again. */
291 address = hole_address;
292 continue;
295 address += size;
299 #else
301 /***********************************************************************
302 * mmap_reserve
304 * mmap wrapper used for reservations, only maps the specified address
306 static inline int mmap_reserve( void *addr, size_t size )
308 void *ptr;
309 int flags = MAP_PRIVATE | MAP_ANON | MAP_NORESERVE;
311 #ifdef MAP_TRYFIXED
312 flags |= MAP_TRYFIXED;
313 #elif defined(__APPLE__)
314 return try_mmap_fixed( addr, size, PROT_NONE, flags, get_fdzero(), 0 );
315 #endif
316 ptr = mmap( addr, size, PROT_NONE, flags, get_fdzero(), 0 );
317 if (ptr != addr && ptr != (void *)-1) munmap( ptr, size );
318 return (ptr == addr);
322 /***********************************************************************
323 * reserve_area
325 * Reserve as much memory as possible in the given area.
327 static inline void reserve_area( void *addr, void *end )
329 size_t size = (char *)end - (char *)addr;
331 #if (defined(__svr4__) || defined(__NetBSD__)) && !defined(MAP_TRYFIXED)
332 /* try_mmap_fixed is inefficient when using vfork, so we need a different algorithm here */
333 /* we assume no other thread is running at this point */
334 size_t i, pagesize = sysconf( _SC_PAGESIZE );
335 char vec;
337 while (size)
339 for (i = 0; i < size; i += pagesize)
340 if (mincore( (caddr_t)addr + i, pagesize, &vec ) != -1) break;
342 i &= ~granularity_mask;
343 if (i && mmap( addr, i, PROT_NONE, MAP_FIXED | MAP_PRIVATE | MAP_ANON | MAP_NORESERVE,
344 get_fdzero(), 0 ) != (void *)-1)
345 wine_mmap_add_reserved_area_obsolete( addr, i );
347 i += granularity_mask + 1;
348 if ((char *)addr + i < (char *)addr) break; /* overflow */
349 addr = (char *)addr + i;
350 if (addr >= end) break;
351 size = (char *)end - (char *)addr;
353 #else
354 if (!size) return;
356 if (mmap_reserve( addr, size ))
358 wine_mmap_add_reserved_area_obsolete( addr, size );
359 return;
361 size = (size / 2) & ~granularity_mask;
362 if (size)
364 reserve_area( addr, (char *)addr + size );
365 reserve_area( (char *)addr + size, end );
367 #endif
370 #endif /* __APPLE__ */
372 #ifdef __i386__
373 /***********************************************************************
374 * reserve_malloc_space
376 * Solaris malloc is not smart enough to obtain space through mmap(), so try to make
377 * sure that there is some available sbrk() space before we reserve other things.
379 static inline void reserve_malloc_space( size_t size )
381 #ifdef __sun
382 size_t i, count = size / 1024;
383 void **ptrs = malloc( count * sizeof(ptrs[0]) );
385 if (!ptrs) return;
387 for (i = 0; i < count; i++) if (!(ptrs[i] = malloc( 1024 ))) break;
388 if (i--) /* free everything except the last one */
389 while (i) free( ptrs[--i] );
390 free( ptrs );
391 #endif
395 /***********************************************************************
396 * reserve_dos_area
398 * Reserve the DOS area (0x00000000-0x00110000).
400 static inline void reserve_dos_area(void)
402 const size_t first_page = 0x1000;
403 const size_t dos_area_size = 0x110000;
404 void *ptr;
406 /* first page has to be handled specially */
407 ptr = wine_anon_mmap( (void *)first_page, dos_area_size - first_page, PROT_NONE, MAP_NORESERVE );
408 if (ptr != (void *)first_page)
410 if (ptr != (void *)-1) munmap( ptr, dos_area_size - first_page );
411 return;
413 /* now add first page with MAP_FIXED */
414 wine_anon_mmap( NULL, first_page, PROT_NONE, MAP_NORESERVE|MAP_FIXED );
415 wine_mmap_add_reserved_area_obsolete( NULL, dos_area_size );
417 #endif
420 /***********************************************************************
421 * mmap_init
423 void mmap_init(void)
425 #ifdef __i386__
426 struct reserved_area *area;
427 struct list *ptr;
428 #ifndef __APPLE__
429 char stack;
430 char * const stack_ptr = &stack;
431 #endif
432 char *user_space_limit = (char *)0x7ffe0000;
434 reserve_malloc_space( 8 * 1024 * 1024 );
436 if (!list_head( &reserved_areas ))
438 /* if we don't have a preloader, try to reserve some space below 2Gb */
439 reserve_area( (void *)0x00110000, (void *)0x40000000 );
442 /* check for a reserved area starting at the user space limit */
443 /* to avoid wasting time trying to allocate it again */
444 LIST_FOR_EACH( ptr, &reserved_areas )
446 area = LIST_ENTRY( ptr, struct reserved_area, entry );
447 if ((char *)area->base > user_space_limit) break;
448 if ((char *)area->base + area->size > user_space_limit)
450 user_space_limit = (char *)area->base + area->size;
451 break;
455 #ifndef __APPLE__
456 if (stack_ptr >= user_space_limit)
458 char *end = 0;
459 char *base = stack_ptr - ((unsigned int)stack_ptr & granularity_mask) - (granularity_mask + 1);
460 if (base > user_space_limit) reserve_area( user_space_limit, base );
461 base = stack_ptr - ((unsigned int)stack_ptr & granularity_mask) + (granularity_mask + 1);
462 #if defined(linux) || defined(__FreeBSD__) || defined (__FreeBSD_kernel__) || defined(__DragonFly__)
463 /* Heuristic: assume the stack is near the end of the address */
464 /* space, this avoids a lot of futile allocation attempts */
465 end = (char *)(((unsigned long)base + 0x0fffffff) & 0xf0000000);
466 #endif
467 reserve_area( base, end );
469 else
470 #endif
471 reserve_area( user_space_limit, 0 );
473 /* reserve the DOS area if not already done */
475 ptr = list_head( &reserved_areas );
476 if (ptr)
478 area = LIST_ENTRY( ptr, struct reserved_area, entry );
479 if (!area->base) return; /* already reserved */
481 reserve_dos_area();
483 #elif defined(__x86_64__) || defined(__aarch64__)
485 if (!list_head( &reserved_areas ))
487 /* if we don't have a preloader, try to reserve the space now */
488 reserve_area( (void *)0x000000010000, (void *)0x000068000000 );
489 reserve_area( (void *)0x00007ff00000, (void *)0x00007fff0000 );
490 reserve_area( (void *)0x7ffffe000000, (void *)0x7fffffff0000 );
493 #endif
497 /***********************************************************************
498 * wine_mmap_add_reserved_area
500 * Add an address range to the list of reserved areas.
501 * Caller must have made sure the range is not used by anything else.
503 * Note: the reserved areas functions are not reentrant, caller is
504 * responsible for proper locking.
506 void wine_mmap_add_reserved_area_obsolete( void *addr, size_t size )
508 struct reserved_area *area;
509 struct list *ptr;
511 if (!((char *)addr + size)) size--; /* avoid wrap-around */
513 LIST_FOR_EACH( ptr, &reserved_areas )
515 area = LIST_ENTRY( ptr, struct reserved_area, entry );
516 if (area->base > addr)
518 /* try to merge with the next one */
519 if ((char *)addr + size == (char *)area->base)
521 area->base = addr;
522 area->size += size;
523 return;
525 break;
527 else if ((char *)area->base + area->size == (char *)addr)
529 /* merge with the previous one */
530 area->size += size;
532 /* try to merge with the next one too */
533 if ((ptr = list_next( &reserved_areas, ptr )))
535 struct reserved_area *next = LIST_ENTRY( ptr, struct reserved_area, entry );
536 if ((char *)addr + size == (char *)next->base)
538 area->size += next->size;
539 list_remove( &next->entry );
540 free( next );
543 return;
547 if ((area = malloc( sizeof(*area) )))
549 area->base = addr;
550 area->size = size;
551 list_add_before( ptr, &area->entry );
556 /***********************************************************************
557 * wine_mmap_remove_reserved_area
559 * Remove an address range from the list of reserved areas.
560 * If 'unmap' is non-zero the range is unmapped too.
562 * Note: the reserved areas functions are not reentrant, caller is
563 * responsible for proper locking.
565 void wine_mmap_remove_reserved_area_obsolete( void *addr, size_t size, int unmap )
567 struct reserved_area *area;
568 struct list *ptr;
570 if (!((char *)addr + size)) size--; /* avoid wrap-around */
572 ptr = list_head( &reserved_areas );
573 /* find the first area covering address */
574 while (ptr)
576 area = LIST_ENTRY( ptr, struct reserved_area, entry );
577 if ((char *)area->base >= (char *)addr + size) break; /* outside the range */
578 if ((char *)area->base + area->size > (char *)addr) /* overlaps range */
580 if (area->base >= addr)
582 if ((char *)area->base + area->size > (char *)addr + size)
584 /* range overlaps beginning of area only -> shrink area */
585 if (unmap) munmap( area->base, (char *)addr + size - (char *)area->base );
586 area->size -= (char *)addr + size - (char *)area->base;
587 area->base = (char *)addr + size;
588 break;
590 else
592 /* range contains the whole area -> remove area completely */
593 ptr = list_next( &reserved_areas, ptr );
594 if (unmap) munmap( area->base, area->size );
595 list_remove( &area->entry );
596 free( area );
597 continue;
600 else
602 if ((char *)area->base + area->size > (char *)addr + size)
604 /* range is in the middle of area -> split area in two */
605 struct reserved_area *new_area = malloc( sizeof(*new_area) );
606 if (new_area)
608 new_area->base = (char *)addr + size;
609 new_area->size = (char *)area->base + area->size - (char *)new_area->base;
610 list_add_after( ptr, &new_area->entry );
612 else size = (char *)area->base + area->size - (char *)addr;
613 area->size = (char *)addr - (char *)area->base;
614 if (unmap) munmap( addr, size );
615 break;
617 else
619 /* range overlaps end of area only -> shrink area */
620 if (unmap) munmap( addr, (char *)area->base + area->size - (char *)addr );
621 area->size = (char *)addr - (char *)area->base;
625 ptr = list_next( &reserved_areas, ptr );
630 /***********************************************************************
631 * wine_mmap_is_in_reserved_area
633 * Check if the specified range is included in a reserved area.
634 * Returns 1 if range is fully included, 0 if range is not included
635 * at all, and -1 if it is only partially included.
637 * Note: the reserved areas functions are not reentrant, caller is
638 * responsible for proper locking.
640 int wine_mmap_is_in_reserved_area_obsolete( void *addr, size_t size )
642 struct reserved_area *area;
643 struct list *ptr;
645 LIST_FOR_EACH( ptr, &reserved_areas )
647 area = LIST_ENTRY( ptr, struct reserved_area, entry );
648 if (area->base > addr) break;
649 if ((char *)area->base + area->size <= (char *)addr) continue;
650 /* area must contain block completely */
651 if ((char *)area->base + area->size < (char *)addr + size) return -1;
652 return 1;
654 return 0;
658 /***********************************************************************
659 * wine_mmap_enum_reserved_areas
661 * Enumerate the list of reserved areas, sorted by addresses.
662 * If enum_func returns a non-zero value, enumeration is stopped and the value is returned.
664 * Note: the reserved areas functions are not reentrant, caller is
665 * responsible for proper locking.
667 int wine_mmap_enum_reserved_areas_obsolete( int (*enum_func)(void *base, size_t size, void *arg), void *arg,
668 int top_down )
670 int ret = 0;
671 struct list *ptr;
673 if (top_down)
675 for (ptr = reserved_areas.prev; ptr != &reserved_areas; ptr = ptr->prev)
677 struct reserved_area *area = LIST_ENTRY( ptr, struct reserved_area, entry );
678 if ((ret = enum_func( area->base, area->size, arg ))) break;
681 else
683 for (ptr = reserved_areas.next; ptr != &reserved_areas; ptr = ptr->next)
685 struct reserved_area *area = LIST_ENTRY( ptr, struct reserved_area, entry );
686 if ((ret = enum_func( area->base, area->size, arg ))) break;
689 return ret;
692 __ASM_OBSOLETE(wine_mmap_add_reserved_area);
693 __ASM_OBSOLETE(wine_mmap_remove_reserved_area);
694 __ASM_OBSOLETE(wine_mmap_is_in_reserved_area);
695 __ASM_OBSOLETE(wine_mmap_enum_reserved_areas);
697 #endif /* __ASM_OBSOLETE */