Stub implementations for AddMonitorW, DeletePrinterDriverW,
[wine/multimedia.git] / libs / wine / mmap.c
bloba252882188665d3989749222465b219a99ce57be
1 /*
2 * Wine memory mappings support
4 * Copyright 2000, 2004 Alexandre Julliard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2.1 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
21 #include "config.h"
22 #include "wine/port.h"
24 #include <assert.h>
25 #include <ctype.h>
26 #include <fcntl.h>
27 #include <stdlib.h>
28 #include <string.h>
29 #include <sys/types.h>
30 #ifdef HAVE_SYS_MMAN_H
31 #include <sys/mman.h>
32 #endif
33 #ifdef HAVE_UNISTD_H
34 # include <unistd.h>
35 #endif
36 #ifdef HAVE_STDINT_H
37 # include <stdint.h>
38 #endif
40 #include "wine/library.h"
41 #include "wine/list.h"
43 struct reserved_area
45 struct list entry;
46 void *base;
47 size_t size;
50 static struct list reserved_areas = LIST_INIT(reserved_areas);
51 static const int granularity_mask = 0xffff; /* reserved areas have 64k granularity */
53 #ifndef MAP_NORESERVE
54 #define MAP_NORESERVE 0
55 #endif
57 #ifndef HAVE_MMAP
58 static inline int munmap( void *ptr, size_t size ) { return 0; }
59 #endif
62 #if (defined(__svr4__) || defined(__NetBSD__)) && !defined(MAP_TRYFIXED)
63 /***********************************************************************
64 * try_mmap_fixed
66 * The purpose of this routine is to emulate the behaviour of
67 * the Linux mmap() routine if a non-NULL address is passed,
68 * but the MAP_FIXED flag is not set. Linux in this case tries
69 * to place the mapping at the specified address, *unless* the
70 * range is already in use. Solaris, however, completely ignores
71 * the address argument in this case.
73 * As Wine code occasionally relies on the Linux behaviour, e.g. to
74 * be able to map non-relocateable PE executables to their proper
75 * start addresses, or to map the DOS memory to 0, this routine
76 * emulates the Linux behaviour by checking whether the desired
77 * address range is still available, and placing the mapping there
78 * using MAP_FIXED if so.
80 static int try_mmap_fixed (void *addr, size_t len, int prot, int flags,
81 int fildes, off_t off)
83 char * volatile result = NULL;
84 int pagesize = getpagesize();
85 pid_t pid;
87 /* We only try to map to a fixed address if
88 addr is non-NULL and properly aligned,
89 and MAP_FIXED isn't already specified. */
91 if ( !addr )
92 return 0;
93 if ( (uintptr_t)addr & (pagesize-1) )
94 return 0;
95 if ( flags & MAP_FIXED )
96 return 0;
98 /* We use vfork() to freeze all threads of the
99 current process. This allows us to check without
100 race condition whether the desired memory range is
101 already in use. Note that because vfork() shares
102 the address spaces between parent and child, we
103 can actually perform the mapping in the child. */
105 if ( (pid = vfork()) == -1 )
107 perror("try_mmap_fixed: vfork");
108 exit(1);
110 if ( pid == 0 )
112 int i;
113 char vec;
115 /* We call mincore() for every page in the desired range.
116 If any of these calls succeeds, the page is already
117 mapped and we must fail. */
118 for ( i = 0; i < len; i += pagesize )
119 if ( mincore( (caddr_t)addr + i, pagesize, &vec ) != -1 )
120 _exit(1);
122 /* Perform the mapping with MAP_FIXED set. This is safe
123 now, as none of the pages is currently in use. */
124 result = mmap( addr, len, prot, flags | MAP_FIXED, fildes, off );
125 if ( result == addr )
126 _exit(0);
128 if ( result != (void *) -1 ) /* This should never happen ... */
129 munmap( result, len );
131 _exit(1);
134 /* vfork() lets the parent continue only after the child
135 has exited. Furthermore, Wine sets SIGCHLD to SIG_IGN,
136 so we don't need to wait for the child. */
138 return result == addr;
140 #endif /* (__svr4__ || __NetBSD__) && !MAP_TRYFIXED */
143 /***********************************************************************
144 * wine_anon_mmap
146 * Portable wrapper for anonymous mmaps
148 void *wine_anon_mmap( void *start, size_t size, int prot, int flags )
150 #ifdef HAVE_MMAP
151 static int fdzero = -1;
153 #ifdef MAP_ANON
154 flags |= MAP_ANON;
155 #else
156 if (fdzero == -1)
158 if ((fdzero = open( "/dev/zero", O_RDONLY )) == -1)
160 perror( "/dev/zero: open" );
161 exit(1);
164 #endif /* MAP_ANON */
166 #ifdef MAP_SHARED
167 flags &= ~MAP_SHARED;
168 #endif
170 /* Linux EINVAL's on us if we don't pass MAP_PRIVATE to an anon mmap */
171 #ifdef MAP_PRIVATE
172 flags |= MAP_PRIVATE;
173 #endif
175 if (!(flags & MAP_FIXED))
177 #ifdef __FreeBSD__
178 /* Even FreeBSD 5.3 does not properly support NULL here. */
179 if( start == NULL ) start = (void *)0x110000;
180 #endif
182 #ifdef MAP_TRYFIXED
183 /* If available, this will attempt a fixed mapping in-kernel */
184 flags |= MAP_TRYFIXED;
185 #elif defined(__svr4__) || defined(__NetBSD__)
186 if ( try_mmap_fixed( start, size, prot, flags, fdzero, 0 ) )
187 return start;
188 #endif
190 return mmap( start, size, prot, flags, fdzero, 0 );
191 #else
192 return (void *)-1;
193 #endif
197 #ifdef HAVE_MMAP
199 /***********************************************************************
200 * reserve_area
202 * Reserve as much memory as possible in the given area.
203 * FIXME: probably needs a different algorithm for Solaris
205 static void reserve_area( void *addr, void *end )
207 void *ptr;
208 size_t size = (char *)end - (char *)addr;
210 if (!size) return;
212 if ((ptr = wine_anon_mmap( addr, size, PROT_NONE, MAP_NORESERVE )) != (void *)-1)
214 if (ptr == addr)
216 wine_mmap_add_reserved_area( addr, size );
217 return;
219 else munmap( ptr, size );
221 if (size > granularity_mask + 1)
223 size_t new_size = (size / 2) & ~granularity_mask;
224 reserve_area( addr, (char *)addr + new_size );
225 reserve_area( (char *)addr + new_size, end );
230 /***********************************************************************
231 * reserve_dos_area
233 * Reserve the DOS area (0x00000000-0x00110000).
235 static void reserve_dos_area(void)
237 const size_t page_size = getpagesize();
238 const size_t dos_area_size = 0x110000;
239 void *ptr;
241 /* first page has to be handled specially */
242 ptr = wine_anon_mmap( (void *)page_size, dos_area_size - page_size, PROT_NONE, MAP_NORESERVE );
243 if (ptr != (void *)page_size)
245 if (ptr != (void *)-1) munmap( ptr, dos_area_size - page_size );
246 return;
248 /* now add first page with MAP_FIXED */
249 wine_anon_mmap( NULL, page_size, PROT_NONE, MAP_NORESERVE|MAP_FIXED );
250 wine_mmap_add_reserved_area( NULL, dos_area_size );
254 /***********************************************************************
255 * mmap_init
257 void mmap_init(void)
259 struct reserved_area *area;
260 struct list *ptr;
261 #if defined(__i386__) && !defined(__FreeBSD__) /* commented out until FreeBSD gets fixed */
262 char stack;
263 char * const stack_ptr = &stack;
264 char *user_space_limit = (char *)0x80000000;
266 /* check for a reserved area starting at the user space limit */
267 /* to avoid wasting time trying to allocate it again */
268 LIST_FOR_EACH( ptr, &reserved_areas )
270 area = LIST_ENTRY( ptr, struct reserved_area, entry );
271 if ((char *)area->base > user_space_limit) break;
272 if ((char *)area->base + area->size > user_space_limit)
274 user_space_limit = (char *)area->base + area->size;
275 break;
279 if (stack_ptr >= user_space_limit)
281 char *base = stack_ptr - ((unsigned int)stack_ptr & granularity_mask) - (granularity_mask + 1);
282 if (base > user_space_limit) reserve_area( user_space_limit, base );
283 base = stack_ptr - ((unsigned int)stack_ptr & granularity_mask) + (granularity_mask + 1);
284 #ifdef linux
285 /* Linux heuristic: if the stack top is at c0000000, assume the address space */
286 /* ends there, this avoids a lot of futile allocation attempts */
287 if (base != (char *)0xc0000000)
288 #endif
289 reserve_area( base, 0 );
291 else reserve_area( user_space_limit, 0 );
292 #endif /* __i386__ */
294 /* reserve the DOS area if not already done */
296 ptr = list_head( &reserved_areas );
297 if (ptr)
299 area = LIST_ENTRY( ptr, struct reserved_area, entry );
300 if (!area->base) return; /* already reserved */
302 reserve_dos_area();
305 #else /* HAVE_MMAP */
307 void mmap_init(void)
311 #endif
313 /***********************************************************************
314 * wine_mmap_add_reserved_area
316 * Add an address range to the list of reserved areas.
317 * Caller must have made sure the range is not used by anything else.
319 * Note: the reserved areas functions are not reentrant, caller is
320 * responsible for proper locking.
322 void wine_mmap_add_reserved_area( void *addr, size_t size )
324 struct reserved_area *area;
325 struct list *ptr;
327 if (!((char *)addr + size)) size--; /* avoid wrap-around */
329 LIST_FOR_EACH( ptr, &reserved_areas )
331 area = LIST_ENTRY( ptr, struct reserved_area, entry );
332 if (area->base > addr)
334 /* try to merge with the next one */
335 if ((char *)addr + size == (char *)area->base)
337 area->base = addr;
338 area->size += size;
339 return;
341 break;
343 else if ((char *)area->base + area->size == (char *)addr)
345 /* merge with the previous one */
346 area->size += size;
348 /* try to merge with the next one too */
349 if ((ptr = list_next( &reserved_areas, ptr )))
351 struct reserved_area *next = LIST_ENTRY( ptr, struct reserved_area, entry );
352 if ((char *)addr + size == (char *)next->base)
354 area->size += next->size;
355 list_remove( &next->entry );
356 free( next );
359 return;
363 if ((area = malloc( sizeof(*area) )))
365 area->base = addr;
366 area->size = size;
367 list_add_before( ptr, &area->entry );
372 /***********************************************************************
373 * wine_mmap_remove_reserved_area
375 * Remove an address range from the list of reserved areas.
376 * If 'unmap' is non-zero the range is unmapped too.
378 * Note: the reserved areas functions are not reentrant, caller is
379 * responsible for proper locking.
381 void wine_mmap_remove_reserved_area( void *addr, size_t size, int unmap )
383 struct reserved_area *area;
384 struct list *ptr;
386 if (!((char *)addr + size)) size--; /* avoid wrap-around */
388 ptr = list_head( &reserved_areas );
389 /* find the first area covering address */
390 while (ptr)
392 area = LIST_ENTRY( ptr, struct reserved_area, entry );
393 if ((char *)area->base >= (char *)addr + size) break; /* outside the range */
394 if ((char *)area->base + area->size > (char *)addr) /* overlaps range */
396 if (area->base >= addr)
398 if ((char *)area->base + area->size > (char *)addr + size)
400 /* range overlaps beginning of area only -> shrink area */
401 if (unmap) munmap( area->base, (char *)addr + size - (char *)area->base );
402 area->size -= (char *)addr + size - (char *)area->base;
403 area->base = (char *)addr + size;
404 break;
406 else
408 /* range contains the whole area -> remove area completely */
409 ptr = list_next( &reserved_areas, ptr );
410 if (unmap) munmap( area->base, area->size );
411 list_remove( &area->entry );
412 free( area );
413 continue;
416 else
418 if ((char *)area->base + area->size > (char *)addr + size)
420 /* range is in the middle of area -> split area in two */
421 struct reserved_area *new_area = malloc( sizeof(*new_area) );
422 if (new_area)
424 new_area->base = (char *)addr + size;
425 new_area->size = (char *)area->base + area->size - (char *)new_area->base;
426 list_add_after( ptr, &new_area->entry );
428 else size = (char *)area->base + area->size - (char *)addr;
429 area->size = (char *)addr - (char *)area->base;
430 if (unmap) munmap( addr, size );
431 break;
433 else
435 /* range overlaps end of area only -> shrink area */
436 if (unmap) munmap( addr, (char *)area->base + area->size - (char *)addr );
437 area->size = (char *)addr - (char *)area->base;
441 ptr = list_next( &reserved_areas, ptr );
446 /***********************************************************************
447 * wine_mmap_is_in_reserved_area
449 * Check if the specified range is included in a reserved area.
450 * Returns 1 if range is fully included, 0 if range is not included
451 * at all, and -1 if it is only partially included.
453 * Note: the reserved areas functions are not reentrant, caller is
454 * responsible for proper locking.
456 int wine_mmap_is_in_reserved_area( void *addr, size_t size )
458 struct reserved_area *area;
459 struct list *ptr;
461 LIST_FOR_EACH( ptr, &reserved_areas )
463 area = LIST_ENTRY( ptr, struct reserved_area, entry );
464 if (area->base > addr) break;
465 if ((char *)area->base + area->size <= (char *)addr) continue;
466 /* area must contain block completely */
467 if ((char *)area->base + area->size < (char *)addr + size) return -1;
468 return 1;
470 return 0;