ntdll: Always use MAP_FAILED as mmap()'s error value.
[wine.git] / dlls / ntdll / unix / virtual.c
blobe0346a28ea39b05f8a3c397d5e78324c68c5ad30
1 /*
2 * Win32 virtual memory functions
4 * Copyright 1997, 2002, 2020 Alexandre Julliard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2.1 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA
21 #if 0
22 #pragma makedep unix
23 #endif
25 #include "config.h"
26 #include "wine/port.h"
28 #include <assert.h>
29 #include <errno.h>
30 #include <stdarg.h>
31 #include <stdio.h>
32 #include <signal.h>
33 #include <sys/types.h>
34 #ifdef HAVE_SYS_SOCKET_H
35 # include <sys/socket.h>
36 #endif
37 #ifdef HAVE_SYS_STAT_H
38 # include <sys/stat.h>
39 #endif
40 #ifdef HAVE_SYS_MMAN_H
41 # include <sys/mman.h>
42 #endif
43 #ifdef HAVE_SYS_SYSINFO_H
44 # include <sys/sysinfo.h>
45 #endif
46 #ifdef HAVE_UNISTD_H
47 # include <unistd.h>
48 #endif
49 #ifdef HAVE_VALGRIND_VALGRIND_H
50 # include <valgrind/valgrind.h>
51 #endif
52 #if defined(__APPLE__)
53 # include <mach/mach_init.h>
54 # include <mach/mach_vm.h>
55 #endif
57 #include "ntstatus.h"
58 #define WIN32_NO_STATUS
59 #include "windef.h"
60 #include "winnt.h"
61 #include "winternl.h"
62 #include "wine/exception.h"
63 #include "wine/list.h"
64 #include "wine/rbtree.h"
65 #include "unix_private.h"
66 #include "wine/debug.h"
68 WINE_DEFAULT_DEBUG_CHANNEL(virtual);
69 WINE_DECLARE_DEBUG_CHANNEL(module);
71 struct preload_info
73 void *addr;
74 size_t size;
77 struct reserved_area
79 struct list entry;
80 void *base;
81 size_t size;
84 static struct list reserved_areas = LIST_INIT(reserved_areas);
86 struct file_view
88 struct wine_rb_entry entry; /* entry in global view tree */
89 void *base; /* base address */
90 size_t size; /* size in bytes */
91 unsigned int protect; /* protection for all pages at allocation time and SEC_* flags */
94 #define __EXCEPT_SYSCALL __EXCEPT_HANDLER(0)
96 /* per-page protection flags */
97 #define VPROT_READ 0x01
98 #define VPROT_WRITE 0x02
99 #define VPROT_EXEC 0x04
100 #define VPROT_WRITECOPY 0x08
101 #define VPROT_GUARD 0x10
102 #define VPROT_COMMITTED 0x20
103 #define VPROT_WRITEWATCH 0x40
104 /* per-mapping protection flags */
105 #define VPROT_SYSTEM 0x0200 /* system view (underlying mmap not under our control) */
107 /* Conversion from VPROT_* to Win32 flags */
108 static const BYTE VIRTUAL_Win32Flags[16] =
110 PAGE_NOACCESS, /* 0 */
111 PAGE_READONLY, /* READ */
112 PAGE_READWRITE, /* WRITE */
113 PAGE_READWRITE, /* READ | WRITE */
114 PAGE_EXECUTE, /* EXEC */
115 PAGE_EXECUTE_READ, /* READ | EXEC */
116 PAGE_EXECUTE_READWRITE, /* WRITE | EXEC */
117 PAGE_EXECUTE_READWRITE, /* READ | WRITE | EXEC */
118 PAGE_WRITECOPY, /* WRITECOPY */
119 PAGE_WRITECOPY, /* READ | WRITECOPY */
120 PAGE_WRITECOPY, /* WRITE | WRITECOPY */
121 PAGE_WRITECOPY, /* READ | WRITE | WRITECOPY */
122 PAGE_EXECUTE_WRITECOPY, /* EXEC | WRITECOPY */
123 PAGE_EXECUTE_WRITECOPY, /* READ | EXEC | WRITECOPY */
124 PAGE_EXECUTE_WRITECOPY, /* WRITE | EXEC | WRITECOPY */
125 PAGE_EXECUTE_WRITECOPY /* READ | WRITE | EXEC | WRITECOPY */
128 static struct wine_rb_tree views_tree;
129 static pthread_mutex_t virtual_mutex;
131 static const BOOL is_win64 = (sizeof(void *) > sizeof(int));
132 static const UINT page_shift = 12;
133 static const UINT_PTR page_mask = 0xfff;
134 static const UINT_PTR granularity_mask = 0xffff;
136 /* Note: these are Windows limits, you cannot change them. */
137 #ifdef __i386__
138 static void *address_space_start = (void *)0x110000; /* keep DOS area clear */
139 #else
140 static void *address_space_start = (void *)0x10000;
141 #endif
143 #ifdef __aarch64__
144 static void *address_space_limit = (void *)0xffffffff0000; /* top of the total available address space */
145 #elif defined(_WIN64)
146 static void *address_space_limit = (void *)0x7fffffff0000;
147 #else
148 static void *address_space_limit = (void *)0xc0000000;
149 #endif
151 #ifdef _WIN64
152 static void *user_space_limit = (void *)0x7fffffff0000; /* top of the user address space */
153 static void *working_set_limit = (void *)0x7fffffff0000; /* top of the current working set */
154 #else
155 static void *user_space_limit = (void *)0x7fff0000;
156 static void *working_set_limit = (void *)0x7fff0000;
157 #endif
159 struct _KUSER_SHARED_DATA *user_shared_data = (void *)0x7ffe0000;
161 /* TEB allocation blocks */
162 static void *teb_block;
163 static void **next_free_teb;
164 static int teb_block_pos;
165 static struct list teb_list = LIST_INIT( teb_list );
167 #define ROUND_ADDR(addr,mask) ((void *)((UINT_PTR)(addr) & ~(UINT_PTR)(mask)))
168 #define ROUND_SIZE(addr,size) (((SIZE_T)(size) + ((UINT_PTR)(addr) & page_mask) + page_mask) & ~page_mask)
170 #define VIRTUAL_DEBUG_DUMP_VIEW(view) do { if (TRACE_ON(virtual)) dump_view(view); } while (0)
172 #ifndef MAP_NORESERVE
173 #define MAP_NORESERVE 0
174 #endif
176 #ifdef _WIN64 /* on 64-bit the page protection bytes use a 2-level table */
177 static const size_t pages_vprot_shift = 20;
178 static const size_t pages_vprot_mask = (1 << 20) - 1;
179 static size_t pages_vprot_size;
180 static BYTE **pages_vprot;
181 #else /* on 32-bit we use a simple array with one byte per page */
182 static BYTE *pages_vprot;
183 #endif
185 static struct file_view *view_block_start, *view_block_end, *next_free_view;
186 static const size_t view_block_size = 0x100000;
187 static void *preload_reserve_start;
188 static void *preload_reserve_end;
189 static BOOL force_exec_prot; /* whether to force PROT_EXEC on all PROT_READ mmaps */
191 struct range_entry
193 void *base;
194 void *end;
197 static struct range_entry *free_ranges;
198 static struct range_entry *free_ranges_end;
201 static inline BOOL is_inside_signal_stack( void *ptr )
203 return ((char *)ptr >= (char *)get_signal_stack() &&
204 (char *)ptr < (char *)get_signal_stack() + signal_stack_size);
207 static inline BOOL is_beyond_limit( const void *addr, size_t size, const void *limit )
209 return (addr >= limit || (const char *)addr + size > (const char *)limit);
212 /* mmap() anonymous memory at a fixed address */
213 void *anon_mmap_fixed( void *start, size_t size, int prot, int flags )
215 return mmap( start, size, prot, MAP_PRIVATE | MAP_ANON | MAP_FIXED | flags, -1, 0 );
218 /* allocate anonymous mmap() memory at any address */
219 void *anon_mmap_alloc( size_t size, int prot )
221 return mmap( NULL, size, prot, MAP_PRIVATE | MAP_ANON, -1, 0 );
225 static void mmap_add_reserved_area( void *addr, SIZE_T size )
227 struct reserved_area *area;
228 struct list *ptr;
230 if (!((char *)addr + size)) size--; /* avoid wrap-around */
232 LIST_FOR_EACH( ptr, &reserved_areas )
234 area = LIST_ENTRY( ptr, struct reserved_area, entry );
235 if (area->base > addr)
237 /* try to merge with the next one */
238 if ((char *)addr + size == (char *)area->base)
240 area->base = addr;
241 area->size += size;
242 return;
244 break;
246 else if ((char *)area->base + area->size == (char *)addr)
248 /* merge with the previous one */
249 area->size += size;
251 /* try to merge with the next one too */
252 if ((ptr = list_next( &reserved_areas, ptr )))
254 struct reserved_area *next = LIST_ENTRY( ptr, struct reserved_area, entry );
255 if ((char *)addr + size == (char *)next->base)
257 area->size += next->size;
258 list_remove( &next->entry );
259 free( next );
262 return;
266 if ((area = malloc( sizeof(*area) )))
268 area->base = addr;
269 area->size = size;
270 list_add_before( ptr, &area->entry );
274 static void mmap_remove_reserved_area( void *addr, SIZE_T size )
276 struct reserved_area *area;
277 struct list *ptr;
279 if (!((char *)addr + size)) size--; /* avoid wrap-around */
281 ptr = list_head( &reserved_areas );
282 /* find the first area covering address */
283 while (ptr)
285 area = LIST_ENTRY( ptr, struct reserved_area, entry );
286 if ((char *)area->base >= (char *)addr + size) break; /* outside the range */
287 if ((char *)area->base + area->size > (char *)addr) /* overlaps range */
289 if (area->base >= addr)
291 if ((char *)area->base + area->size > (char *)addr + size)
293 /* range overlaps beginning of area only -> shrink area */
294 area->size -= (char *)addr + size - (char *)area->base;
295 area->base = (char *)addr + size;
296 break;
298 else
300 /* range contains the whole area -> remove area completely */
301 ptr = list_next( &reserved_areas, ptr );
302 list_remove( &area->entry );
303 free( area );
304 continue;
307 else
309 if ((char *)area->base + area->size > (char *)addr + size)
311 /* range is in the middle of area -> split area in two */
312 struct reserved_area *new_area = malloc( sizeof(*new_area) );
313 if (new_area)
315 new_area->base = (char *)addr + size;
316 new_area->size = (char *)area->base + area->size - (char *)new_area->base;
317 list_add_after( ptr, &new_area->entry );
319 else size = (char *)area->base + area->size - (char *)addr;
320 area->size = (char *)addr - (char *)area->base;
321 break;
323 else
325 /* range overlaps end of area only -> shrink area */
326 area->size = (char *)addr - (char *)area->base;
330 ptr = list_next( &reserved_areas, ptr );
334 static int mmap_is_in_reserved_area( void *addr, SIZE_T size )
336 struct reserved_area *area;
337 struct list *ptr;
339 LIST_FOR_EACH( ptr, &reserved_areas )
341 area = LIST_ENTRY( ptr, struct reserved_area, entry );
342 if (area->base > addr) break;
343 if ((char *)area->base + area->size <= (char *)addr) continue;
344 /* area must contain block completely */
345 if ((char *)area->base + area->size < (char *)addr + size) return -1;
346 return 1;
348 return 0;
351 static int mmap_enum_reserved_areas( int (CDECL *enum_func)(void *base, SIZE_T size, void *arg),
352 void *arg, int top_down )
354 int ret = 0;
355 struct list *ptr;
357 if (top_down)
359 for (ptr = reserved_areas.prev; ptr != &reserved_areas; ptr = ptr->prev)
361 struct reserved_area *area = LIST_ENTRY( ptr, struct reserved_area, entry );
362 if ((ret = enum_func( area->base, area->size, arg ))) break;
365 else
367 for (ptr = reserved_areas.next; ptr != &reserved_areas; ptr = ptr->next)
369 struct reserved_area *area = LIST_ENTRY( ptr, struct reserved_area, entry );
370 if ((ret = enum_func( area->base, area->size, arg ))) break;
373 return ret;
376 static void *anon_mmap_tryfixed( void *start, size_t size, int prot, int flags )
378 void *ptr;
380 #ifdef MAP_FIXED_NOREPLACE
381 ptr = mmap( start, size, prot, MAP_FIXED_NOREPLACE | MAP_PRIVATE | MAP_ANON | flags, -1, 0 );
382 #elif defined(MAP_TRYFIXED)
383 ptr = mmap( start, size, prot, MAP_TRYFIXED | MAP_PRIVATE | MAP_ANON | flags, -1, 0 );
384 #elif defined(__FreeBSD__) || defined(__FreeBSD_kernel__)
385 ptr = mmap( start, size, prot, MAP_FIXED | MAP_EXCL | MAP_PRIVATE | MAP_ANON | flags, -1, 0 );
386 if (ptr == MAP_FAILED && errno == EINVAL) errno = EEXIST;
387 #elif defined(__APPLE__)
388 mach_vm_address_t result = (mach_vm_address_t)start;
389 kern_return_t ret = mach_vm_map( mach_task_self(), &result, size, 0, VM_FLAGS_FIXED,
390 MEMORY_OBJECT_NULL, 0, 0, prot, VM_PROT_ALL, VM_INHERIT_COPY );
392 if (!ret)
394 if ((ptr = anon_mmap_fixed( start, size, prot, flags )) == MAP_FAILED)
395 mach_vm_deallocate( mach_task_self(), result, size );
397 else
399 errno = (ret == KERN_NO_SPACE ? EEXIST : ENOMEM);
400 ptr = MAP_FAILED;
402 #else
403 ptr = mmap( start, size, prot, MAP_PRIVATE | MAP_ANON | flags, -1, 0 );
404 #endif
405 if (ptr != MAP_FAILED && ptr != start)
407 if (is_beyond_limit( ptr, size, user_space_limit ))
409 anon_mmap_fixed( ptr, size, PROT_NONE, MAP_NORESERVE );
410 mmap_add_reserved_area( ptr, size );
412 else munmap( ptr, size );
413 ptr = MAP_FAILED;
414 errno = EEXIST;
416 return ptr;
419 static void reserve_area( void *addr, void *end )
421 #ifdef __APPLE__
423 #ifdef __i386__
424 static const mach_vm_address_t max_address = VM_MAX_ADDRESS;
425 #else
426 static const mach_vm_address_t max_address = MACH_VM_MAX_ADDRESS;
427 #endif
428 mach_vm_address_t address = (mach_vm_address_t)addr;
429 mach_vm_address_t end_address = (mach_vm_address_t)end;
431 if (!end_address || max_address < end_address)
432 end_address = max_address;
434 while (address < end_address)
436 mach_vm_address_t hole_address = address;
437 kern_return_t ret;
438 mach_vm_size_t size;
439 vm_region_basic_info_data_64_t info;
440 mach_msg_type_number_t count = VM_REGION_BASIC_INFO_COUNT_64;
441 mach_port_t dummy_object_name = MACH_PORT_NULL;
443 /* find the mapped region at or above the current address. */
444 ret = mach_vm_region(mach_task_self(), &address, &size, VM_REGION_BASIC_INFO_64,
445 (vm_region_info_t)&info, &count, &dummy_object_name);
446 if (ret != KERN_SUCCESS)
448 address = max_address;
449 size = 0;
452 if (end_address < address)
453 address = end_address;
454 if (hole_address < address)
456 /* found a hole, attempt to reserve it. */
457 size_t hole_size = address - hole_address;
458 mach_vm_address_t alloc_address = hole_address;
460 ret = mach_vm_map( mach_task_self(), &alloc_address, hole_size, 0, VM_FLAGS_FIXED,
461 MEMORY_OBJECT_NULL, 0, 0, PROT_NONE, VM_PROT_ALL, VM_INHERIT_COPY );
462 if (!ret) mmap_add_reserved_area( (void*)hole_address, hole_size );
463 else if (ret == KERN_NO_SPACE)
465 /* something filled (part of) the hole before we could.
466 go back and look again. */
467 address = hole_address;
468 continue;
471 address += size;
473 #else
474 void *ptr;
475 size_t size = (char *)end - (char *)addr;
477 if (!size) return;
479 if ((ptr = anon_mmap_tryfixed( addr, size, PROT_NONE, MAP_NORESERVE )) != MAP_FAILED)
481 mmap_add_reserved_area( addr, size );
482 return;
484 size = (size / 2) & ~granularity_mask;
485 if (size)
487 reserve_area( addr, (char *)addr + size );
488 reserve_area( (char *)addr + size, end );
490 #endif /* __APPLE__ */
494 static void mmap_init( const struct preload_info *preload_info )
496 #ifndef _WIN64
497 #ifndef __APPLE__
498 char stack;
499 char * const stack_ptr = &stack;
500 #endif
501 char *user_space_limit = (char *)0x7ffe0000;
502 int i;
504 if (preload_info)
506 /* check for a reserved area starting at the user space limit */
507 /* to avoid wasting time trying to allocate it again */
508 for (i = 0; preload_info[i].size; i++)
510 if ((char *)preload_info[i].addr > user_space_limit) break;
511 if ((char *)preload_info[i].addr + preload_info[i].size > user_space_limit)
513 user_space_limit = (char *)preload_info[i].addr + preload_info[i].size;
514 break;
518 else reserve_area( (void *)0x00010000, (void *)0x40000000 );
521 #ifndef __APPLE__
522 if (stack_ptr >= user_space_limit)
524 char *end = 0;
525 char *base = stack_ptr - ((unsigned int)stack_ptr & granularity_mask) - (granularity_mask + 1);
526 if (base > user_space_limit) reserve_area( user_space_limit, base );
527 base = stack_ptr - ((unsigned int)stack_ptr & granularity_mask) + (granularity_mask + 1);
528 #if defined(linux) || defined(__FreeBSD__) || defined (__FreeBSD_kernel__) || defined(__DragonFly__)
529 /* Heuristic: assume the stack is near the end of the address */
530 /* space, this avoids a lot of futile allocation attempts */
531 end = (char *)(((unsigned long)base + 0x0fffffff) & 0xf0000000);
532 #endif
533 reserve_area( base, end );
535 else
536 #endif
537 reserve_area( user_space_limit, 0 );
539 #else
541 if (preload_info) return;
542 /* if we don't have a preloader, try to reserve the space now */
543 reserve_area( (void *)0x000000010000, (void *)0x000068000000 );
544 reserve_area( (void *)0x00007ff00000, (void *)0x00007fff0000 );
545 reserve_area( (void *)0x7ffffe000000, (void *)0x7fffffff0000 );
547 #endif
550 /***********************************************************************
551 * free_ranges_lower_bound
553 * Returns the first range whose end is not less than addr, or end if there's none.
555 static struct range_entry *free_ranges_lower_bound( void *addr )
557 struct range_entry *begin = free_ranges;
558 struct range_entry *end = free_ranges_end;
559 struct range_entry *mid;
561 while (begin < end)
563 mid = begin + (end - begin) / 2;
564 if (mid->end < addr)
565 begin = mid + 1;
566 else
567 end = mid;
570 return begin;
574 /***********************************************************************
575 * free_ranges_insert_view
577 * Updates the free_ranges after a new view has been created.
579 static void free_ranges_insert_view( struct file_view *view )
581 void *view_base = ROUND_ADDR( view->base, granularity_mask );
582 void *view_end = ROUND_ADDR( (char *)view->base + view->size + granularity_mask, granularity_mask );
583 struct range_entry *range = free_ranges_lower_bound( view_base );
584 struct range_entry *next = range + 1;
586 /* free_ranges initial value is such that the view is either inside range or before another one. */
587 assert( range != free_ranges_end );
588 assert( range->end > view_base || next != free_ranges_end );
590 /* this happens because virtual_alloc_thread_stack shrinks a view, then creates another one on top,
591 * or because AT_ROUND_TO_PAGE was used with NtMapViewOfSection to force 4kB aligned mapping. */
592 if ((range->end > view_base && range->base >= view_end) ||
593 (range->end == view_base && next->base >= view_end))
595 /* on Win64, assert that it's correctly aligned so we're not going to be in trouble later */
596 assert( (!is_win64 && !is_wow64) || view->base == view_base );
597 WARN( "range %p - %p is already mapped\n", view_base, view_end );
598 return;
601 /* this should never happen */
602 if (range->base > view_base || range->end < view_end)
603 ERR( "range %p - %p is already partially mapped\n", view_base, view_end );
604 assert( range->base <= view_base && range->end >= view_end );
606 /* need to split the range in two */
607 if (range->base < view_base && range->end > view_end)
609 memmove( next + 1, next, (free_ranges_end - next) * sizeof(struct range_entry) );
610 free_ranges_end += 1;
611 if ((char *)free_ranges_end - (char *)free_ranges > view_block_size)
612 ERR( "Free range sequence is full, trouble ahead!\n" );
613 assert( (char *)free_ranges_end - (char *)free_ranges <= view_block_size );
615 next->base = view_end;
616 next->end = range->end;
617 range->end = view_base;
619 else
621 /* otherwise we just have to shrink it */
622 if (range->base < view_base)
623 range->end = view_base;
624 else
625 range->base = view_end;
627 if (range->base < range->end) return;
629 /* and possibly remove it if it's now empty */
630 memmove( range, next, (free_ranges_end - next) * sizeof(struct range_entry) );
631 free_ranges_end -= 1;
632 assert( free_ranges_end - free_ranges > 0 );
637 /***********************************************************************
638 * free_ranges_remove_view
640 * Updates the free_ranges after a view has been destroyed.
642 static void free_ranges_remove_view( struct file_view *view )
644 void *view_base = ROUND_ADDR( view->base, granularity_mask );
645 void *view_end = ROUND_ADDR( (char *)view->base + view->size + granularity_mask, granularity_mask );
646 struct range_entry *range = free_ranges_lower_bound( view_base );
647 struct range_entry *next = range + 1;
649 /* It's possible to use AT_ROUND_TO_PAGE on 32bit with NtMapViewOfSection to force 4kB alignment,
650 * and this breaks our assumptions. Look at the views around to check if the range is still in use. */
651 #ifndef _WIN64
652 struct file_view *prev_view = WINE_RB_ENTRY_VALUE( wine_rb_prev( &view->entry ), struct file_view, entry );
653 struct file_view *next_view = WINE_RB_ENTRY_VALUE( wine_rb_next( &view->entry ), struct file_view, entry );
654 void *prev_view_base = prev_view ? ROUND_ADDR( prev_view->base, granularity_mask ) : NULL;
655 void *prev_view_end = prev_view ? ROUND_ADDR( (char *)prev_view->base + prev_view->size + granularity_mask, granularity_mask ) : NULL;
656 void *next_view_base = next_view ? ROUND_ADDR( next_view->base, granularity_mask ) : NULL;
657 void *next_view_end = next_view ? ROUND_ADDR( (char *)next_view->base + next_view->size + granularity_mask, granularity_mask ) : NULL;
659 if ((prev_view_base < view_end && prev_view_end > view_base) ||
660 (next_view_base < view_end && next_view_end > view_base))
662 WARN( "range %p - %p is still mapped\n", view_base, view_end );
663 return;
665 #endif
667 /* free_ranges initial value is such that the view is either inside range or before another one. */
668 assert( range != free_ranges_end );
669 assert( range->end > view_base || next != free_ranges_end );
671 /* this should never happen, but we can safely ignore it */
672 if (range->base <= view_base && range->end >= view_end)
674 WARN( "range %p - %p is already unmapped\n", view_base, view_end );
675 return;
678 /* this should never happen */
679 if (range->base < view_end && range->end > view_base)
680 ERR( "range %p - %p is already partially unmapped\n", view_base, view_end );
681 assert( range->end <= view_base || range->base >= view_end );
683 /* merge with next if possible */
684 if (range->end == view_base && next->base == view_end)
686 range->end = next->end;
687 memmove( next, next + 1, (free_ranges_end - next - 1) * sizeof(struct range_entry) );
688 free_ranges_end -= 1;
689 assert( free_ranges_end - free_ranges > 0 );
691 /* or try growing the range */
692 else if (range->end == view_base)
693 range->end = view_end;
694 else if (range->base == view_end)
695 range->base = view_base;
696 /* otherwise create a new one */
697 else
699 memmove( range + 1, range, (free_ranges_end - range) * sizeof(struct range_entry) );
700 free_ranges_end += 1;
701 if ((char *)free_ranges_end - (char *)free_ranges > view_block_size)
702 ERR( "Free range sequence is full, trouble ahead!\n" );
703 assert( (char *)free_ranges_end - (char *)free_ranges <= view_block_size );
705 range->base = view_base;
706 range->end = view_end;
711 static inline int is_view_valloc( const struct file_view *view )
713 return !(view->protect & (SEC_FILE | SEC_RESERVE | SEC_COMMIT));
716 /***********************************************************************
717 * get_page_vprot
719 * Return the page protection byte.
721 static BYTE get_page_vprot( const void *addr )
723 size_t idx = (size_t)addr >> page_shift;
725 #ifdef _WIN64
726 if ((idx >> pages_vprot_shift) >= pages_vprot_size) return 0;
727 if (!pages_vprot[idx >> pages_vprot_shift]) return 0;
728 return pages_vprot[idx >> pages_vprot_shift][idx & pages_vprot_mask];
729 #else
730 return pages_vprot[idx];
731 #endif
735 /***********************************************************************
736 * set_page_vprot
738 * Set a range of page protection bytes.
740 static void set_page_vprot( const void *addr, size_t size, BYTE vprot )
742 size_t idx = (size_t)addr >> page_shift;
743 size_t end = ((size_t)addr + size + page_mask) >> page_shift;
745 #ifdef _WIN64
746 while (idx >> pages_vprot_shift != end >> pages_vprot_shift)
748 size_t dir_size = pages_vprot_mask + 1 - (idx & pages_vprot_mask);
749 memset( pages_vprot[idx >> pages_vprot_shift] + (idx & pages_vprot_mask), vprot, dir_size );
750 idx += dir_size;
752 memset( pages_vprot[idx >> pages_vprot_shift] + (idx & pages_vprot_mask), vprot, end - idx );
753 #else
754 memset( pages_vprot + idx, vprot, end - idx );
755 #endif
759 /***********************************************************************
760 * set_page_vprot_bits
762 * Set or clear bits in a range of page protection bytes.
764 static void set_page_vprot_bits( const void *addr, size_t size, BYTE set, BYTE clear )
766 size_t idx = (size_t)addr >> page_shift;
767 size_t end = ((size_t)addr + size + page_mask) >> page_shift;
769 #ifdef _WIN64
770 for ( ; idx < end; idx++)
772 BYTE *ptr = pages_vprot[idx >> pages_vprot_shift] + (idx & pages_vprot_mask);
773 *ptr = (*ptr & ~clear) | set;
775 #else
776 for ( ; idx < end; idx++) pages_vprot[idx] = (pages_vprot[idx] & ~clear) | set;
777 #endif
781 /***********************************************************************
782 * alloc_pages_vprot
784 * Allocate the page protection bytes for a given range.
786 static BOOL alloc_pages_vprot( const void *addr, size_t size )
788 #ifdef _WIN64
789 size_t idx = (size_t)addr >> page_shift;
790 size_t end = ((size_t)addr + size + page_mask) >> page_shift;
791 size_t i;
792 void *ptr;
794 assert( end <= pages_vprot_size << pages_vprot_shift );
795 for (i = idx >> pages_vprot_shift; i < (end + pages_vprot_mask) >> pages_vprot_shift; i++)
797 if (pages_vprot[i]) continue;
798 if ((ptr = anon_mmap_alloc( pages_vprot_mask + 1, PROT_READ | PROT_WRITE )) == MAP_FAILED)
799 return FALSE;
800 pages_vprot[i] = ptr;
802 #endif
803 return TRUE;
807 /***********************************************************************
808 * compare_view
810 * View comparison function used for the rb tree.
812 static int compare_view( const void *addr, const struct wine_rb_entry *entry )
814 struct file_view *view = WINE_RB_ENTRY_VALUE( entry, struct file_view, entry );
816 if (addr < view->base) return -1;
817 if (addr > view->base) return 1;
818 return 0;
822 /***********************************************************************
823 * get_prot_str
825 static const char *get_prot_str( BYTE prot )
827 static char buffer[6];
828 buffer[0] = (prot & VPROT_COMMITTED) ? 'c' : '-';
829 buffer[1] = (prot & VPROT_GUARD) ? 'g' : ((prot & VPROT_WRITEWATCH) ? 'H' : '-');
830 buffer[2] = (prot & VPROT_READ) ? 'r' : '-';
831 buffer[3] = (prot & VPROT_WRITECOPY) ? 'W' : ((prot & VPROT_WRITE) ? 'w' : '-');
832 buffer[4] = (prot & VPROT_EXEC) ? 'x' : '-';
833 buffer[5] = 0;
834 return buffer;
838 /***********************************************************************
839 * get_unix_prot
841 * Convert page protections to protection for mmap/mprotect.
843 static int get_unix_prot( BYTE vprot )
845 int prot = 0;
846 if ((vprot & VPROT_COMMITTED) && !(vprot & VPROT_GUARD))
848 if (vprot & VPROT_READ) prot |= PROT_READ;
849 if (vprot & VPROT_WRITE) prot |= PROT_WRITE | PROT_READ;
850 if (vprot & VPROT_WRITECOPY) prot |= PROT_WRITE | PROT_READ;
851 if (vprot & VPROT_EXEC) prot |= PROT_EXEC | PROT_READ;
852 if (vprot & VPROT_WRITEWATCH) prot &= ~PROT_WRITE;
854 if (!prot) prot = PROT_NONE;
855 return prot;
859 /***********************************************************************
860 * dump_view
862 static void dump_view( struct file_view *view )
864 UINT i, count;
865 char *addr = view->base;
866 BYTE prot = get_page_vprot( addr );
868 TRACE( "View: %p - %p", addr, addr + view->size - 1 );
869 if (view->protect & VPROT_SYSTEM)
870 TRACE( " (builtin image)\n" );
871 else if (view->protect & SEC_IMAGE)
872 TRACE( " (image)\n" );
873 else if (view->protect & SEC_FILE)
874 TRACE( " (file)\n" );
875 else if (view->protect & (SEC_RESERVE | SEC_COMMIT))
876 TRACE( " (anonymous)\n" );
877 else
878 TRACE( " (valloc)\n");
880 for (count = i = 1; i < view->size >> page_shift; i++, count++)
882 BYTE next = get_page_vprot( addr + (count << page_shift) );
883 if (next == prot) continue;
884 TRACE( " %p - %p %s\n",
885 addr, addr + (count << page_shift) - 1, get_prot_str(prot) );
886 addr += (count << page_shift);
887 prot = next;
888 count = 0;
890 if (count)
891 TRACE( " %p - %p %s\n",
892 addr, addr + (count << page_shift) - 1, get_prot_str(prot) );
896 /***********************************************************************
897 * VIRTUAL_Dump
899 #ifdef WINE_VM_DEBUG
900 static void VIRTUAL_Dump(void)
902 sigset_t sigset;
903 struct file_view *view;
905 TRACE( "Dump of all virtual memory views:\n" );
906 server_enter_uninterrupted_section( &virtual_mutex, &sigset );
907 WINE_RB_FOR_EACH_ENTRY( view, &views_tree, struct file_view, entry )
909 dump_view( view );
911 server_leave_uninterrupted_section( &virtual_mutex, &sigset );
913 #endif
916 /***********************************************************************
917 * find_view
919 * Find the view containing a given address. virtual_mutex must be held by caller.
921 * PARAMS
922 * addr [I] Address
924 * RETURNS
925 * View: Success
926 * NULL: Failure
928 static struct file_view *find_view( const void *addr, size_t size )
930 struct wine_rb_entry *ptr = views_tree.root;
932 if ((const char *)addr + size < (const char *)addr) return NULL; /* overflow */
934 while (ptr)
936 struct file_view *view = WINE_RB_ENTRY_VALUE( ptr, struct file_view, entry );
938 if (view->base > addr) ptr = ptr->left;
939 else if ((const char *)view->base + view->size <= (const char *)addr) ptr = ptr->right;
940 else if ((const char *)view->base + view->size < (const char *)addr + size) break; /* size too large */
941 else return view;
943 return NULL;
947 /***********************************************************************
948 * zero_bits_win_to_64
950 * Convert from Windows hybrid 32bit-based / bitmask to 64bit-based format
952 static inline unsigned short zero_bits_win_to_64( ULONG_PTR zero_bits )
954 unsigned short zero_bits_64;
956 if (zero_bits == 0) return 0;
957 if (zero_bits < 32) return 32 + zero_bits;
958 zero_bits_64 = 63;
959 #ifdef _WIN64
960 if (zero_bits >> 32) { zero_bits_64 -= 32; zero_bits >>= 32; }
961 #endif
962 if (zero_bits >> 16) { zero_bits_64 -= 16; zero_bits >>= 16; }
963 if (zero_bits >> 8) { zero_bits_64 -= 8; zero_bits >>= 8; }
964 if (zero_bits >> 4) { zero_bits_64 -= 4; zero_bits >>= 4; }
965 if (zero_bits >> 2) { zero_bits_64 -= 2; zero_bits >>= 2; }
966 if (zero_bits >> 1) { zero_bits_64 -= 1; }
967 return zero_bits_64;
971 /***********************************************************************
972 * get_zero_bits_64_mask
974 static inline UINT_PTR get_zero_bits_64_mask( USHORT zero_bits_64 )
976 return (UINT_PTR)((~(UINT64)0) >> zero_bits_64);
980 /***********************************************************************
981 * is_write_watch_range
983 static inline BOOL is_write_watch_range( const void *addr, size_t size )
985 struct file_view *view = find_view( addr, size );
986 return view && (view->protect & VPROT_WRITEWATCH);
990 /***********************************************************************
991 * find_view_range
993 * Find the first view overlapping at least part of the specified range.
994 * virtual_mutex must be held by caller.
996 static struct file_view *find_view_range( const void *addr, size_t size )
998 struct wine_rb_entry *ptr = views_tree.root;
1000 while (ptr)
1002 struct file_view *view = WINE_RB_ENTRY_VALUE( ptr, struct file_view, entry );
1004 if ((const char *)view->base >= (const char *)addr + size) ptr = ptr->left;
1005 else if ((const char *)view->base + view->size <= (const char *)addr) ptr = ptr->right;
1006 else return view;
1008 return NULL;
1012 /***********************************************************************
1013 * find_view_inside_range
1015 * Find first (resp. last, if top_down) view inside a range.
1016 * virtual_mutex must be held by caller.
1018 static struct wine_rb_entry *find_view_inside_range( void **base_ptr, void **end_ptr, int top_down )
1020 struct wine_rb_entry *first = NULL, *ptr = views_tree.root;
1021 void *base = *base_ptr, *end = *end_ptr;
1023 /* find the first (resp. last) view inside the range */
1024 while (ptr)
1026 struct file_view *view = WINE_RB_ENTRY_VALUE( ptr, struct file_view, entry );
1027 if ((char *)view->base + view->size >= (char *)end)
1029 end = min( end, view->base );
1030 ptr = ptr->left;
1032 else if (view->base <= base)
1034 base = max( (char *)base, (char *)view->base + view->size );
1035 ptr = ptr->right;
1037 else
1039 first = ptr;
1040 ptr = top_down ? ptr->right : ptr->left;
1044 *base_ptr = base;
1045 *end_ptr = end;
1046 return first;
1050 /***********************************************************************
1051 * try_map_free_area
1053 * Try mmaping some expected free memory region, eventually stepping and
1054 * retrying inside it, and return where it actually succeeded, or NULL.
1056 static void* try_map_free_area( void *base, void *end, ptrdiff_t step,
1057 void *start, size_t size, int unix_prot )
1059 void *ptr;
1061 while (start && base <= start && (char*)start + size <= (char*)end)
1063 if ((ptr = anon_mmap_tryfixed( start, size, unix_prot, 0 )) != MAP_FAILED) return start;
1064 TRACE( "Found free area is already mapped, start %p.\n", start );
1065 if (errno != EEXIST)
1067 ERR( "mmap() error %s, range %p-%p, unix_prot %#x.\n",
1068 strerror(errno), start, (char *)start + size, unix_prot );
1069 return NULL;
1071 if ((step > 0 && (char *)end - (char *)start < step) ||
1072 (step < 0 && (char *)start - (char *)base < -step) ||
1073 step == 0)
1074 break;
1075 start = (char *)start + step;
1078 return NULL;
1082 /***********************************************************************
1083 * map_free_area
1085 * Find a free area between views inside the specified range and map it.
1086 * virtual_mutex must be held by caller.
1088 static void *map_free_area( void *base, void *end, size_t size, int top_down, int unix_prot )
1090 struct wine_rb_entry *first = find_view_inside_range( &base, &end, top_down );
1091 ptrdiff_t step = top_down ? -(granularity_mask + 1) : (granularity_mask + 1);
1092 void *start;
1094 if (top_down)
1096 start = ROUND_ADDR( (char *)end - size, granularity_mask );
1097 if (start >= end || start < base) return NULL;
1099 while (first)
1101 struct file_view *view = WINE_RB_ENTRY_VALUE( first, struct file_view, entry );
1102 if ((start = try_map_free_area( (char *)view->base + view->size, (char *)start + size, step,
1103 start, size, unix_prot ))) break;
1104 start = ROUND_ADDR( (char *)view->base - size, granularity_mask );
1105 /* stop if remaining space is not large enough */
1106 if (!start || start >= end || start < base) return NULL;
1107 first = wine_rb_prev( first );
1110 else
1112 start = ROUND_ADDR( (char *)base + granularity_mask, granularity_mask );
1113 if (!start || start >= end || (char *)end - (char *)start < size) return NULL;
1115 while (first)
1117 struct file_view *view = WINE_RB_ENTRY_VALUE( first, struct file_view, entry );
1118 if ((start = try_map_free_area( start, view->base, step,
1119 start, size, unix_prot ))) break;
1120 start = ROUND_ADDR( (char *)view->base + view->size + granularity_mask, granularity_mask );
1121 /* stop if remaining space is not large enough */
1122 if (!start || start >= end || (char *)end - (char *)start < size) return NULL;
1123 first = wine_rb_next( first );
1127 if (!first)
1128 return try_map_free_area( base, end, step, start, size, unix_prot );
1130 return start;
1134 /***********************************************************************
1135 * find_reserved_free_area
1137 * Find a free area between views inside the specified range.
1138 * virtual_mutex must be held by caller.
1139 * The range must be inside the preloader reserved range.
1141 static void *find_reserved_free_area( void *base, void *end, size_t size, int top_down )
1143 struct range_entry *range;
1144 void *start;
1146 base = ROUND_ADDR( (char *)base + granularity_mask, granularity_mask );
1147 end = (char *)ROUND_ADDR( (char *)end - size, granularity_mask ) + size;
1149 if (top_down)
1151 start = (char *)end - size;
1152 range = free_ranges_lower_bound( start );
1153 assert(range != free_ranges_end && range->end >= start);
1155 if ((char *)range->end - (char *)start < size) start = ROUND_ADDR( (char *)range->end - size, granularity_mask );
1158 if (start >= end || start < base || (char *)end - (char *)start < size) return NULL;
1159 if (start < range->end && start >= range->base && (char *)range->end - (char *)start >= size) break;
1160 if (--range < free_ranges) return NULL;
1161 start = ROUND_ADDR( (char *)range->end - size, granularity_mask );
1163 while (1);
1165 else
1167 start = base;
1168 range = free_ranges_lower_bound( start );
1169 assert(range != free_ranges_end && range->end >= start);
1171 if (start < range->base) start = ROUND_ADDR( (char *)range->base + granularity_mask, granularity_mask );
1174 if (start >= end || start < base || (char *)end - (char *)start < size) return NULL;
1175 if (start < range->end && start >= range->base && (char *)range->end - (char *)start >= size) break;
1176 if (++range == free_ranges_end) return NULL;
1177 start = ROUND_ADDR( (char *)range->base + granularity_mask, granularity_mask );
1179 while (1);
1181 return start;
1185 /***********************************************************************
1186 * add_reserved_area
1188 * Add a reserved area to the list maintained by libwine.
1189 * virtual_mutex must be held by caller.
1191 static void add_reserved_area( void *addr, size_t size )
1193 TRACE( "adding %p-%p\n", addr, (char *)addr + size );
1195 if (addr < user_space_limit)
1197 /* unmap the part of the area that is below the limit */
1198 assert( (char *)addr + size > (char *)user_space_limit );
1199 munmap( addr, (char *)user_space_limit - (char *)addr );
1200 size -= (char *)user_space_limit - (char *)addr;
1201 addr = user_space_limit;
1203 /* blow away existing mappings */
1204 anon_mmap_fixed( addr, size, PROT_NONE, MAP_NORESERVE );
1205 mmap_add_reserved_area( addr, size );
1209 /***********************************************************************
1210 * remove_reserved_area
1212 * Remove a reserved area from the list maintained by libwine.
1213 * virtual_mutex must be held by caller.
1215 static void remove_reserved_area( void *addr, size_t size )
1217 struct file_view *view;
1219 TRACE( "removing %p-%p\n", addr, (char *)addr + size );
1220 mmap_remove_reserved_area( addr, size );
1222 /* unmap areas not covered by an existing view */
1223 WINE_RB_FOR_EACH_ENTRY( view, &views_tree, struct file_view, entry )
1225 if ((char *)view->base >= (char *)addr + size) break;
1226 if ((char *)view->base + view->size <= (char *)addr) continue;
1227 if (view->base > addr) munmap( addr, (char *)view->base - (char *)addr );
1228 if ((char *)view->base + view->size > (char *)addr + size) return;
1229 size = (char *)addr + size - ((char *)view->base + view->size);
1230 addr = (char *)view->base + view->size;
1232 munmap( addr, size );
1236 struct area_boundary
1238 void *base;
1239 size_t size;
1240 void *boundary;
1243 /***********************************************************************
1244 * get_area_boundary_callback
1246 * Get lowest boundary address between reserved area and non-reserved area
1247 * in the specified region. If no boundaries are found, result is NULL.
1248 * virtual_mutex must be held by caller.
1250 static int CDECL get_area_boundary_callback( void *start, SIZE_T size, void *arg )
1252 struct area_boundary *area = arg;
1253 void *end = (char *)start + size;
1255 area->boundary = NULL;
1256 if (area->base >= end) return 0;
1257 if ((char *)start >= (char *)area->base + area->size) return 1;
1258 if (area->base >= start)
1260 if ((char *)area->base + area->size > (char *)end)
1262 area->boundary = end;
1263 return 1;
1265 return 0;
1267 area->boundary = start;
1268 return 1;
1272 /***********************************************************************
1273 * unmap_area
1275 * Unmap an area, or simply replace it by an empty mapping if it is
1276 * in a reserved area. virtual_mutex must be held by caller.
1278 static inline void unmap_area( void *addr, size_t size )
1280 switch (mmap_is_in_reserved_area( addr, size ))
1282 case -1: /* partially in a reserved area */
1284 struct area_boundary area;
1285 size_t lower_size;
1286 area.base = addr;
1287 area.size = size;
1288 mmap_enum_reserved_areas( get_area_boundary_callback, &area, 0 );
1289 assert( area.boundary );
1290 lower_size = (char *)area.boundary - (char *)addr;
1291 unmap_area( addr, lower_size );
1292 unmap_area( area.boundary, size - lower_size );
1293 break;
1295 case 1: /* in a reserved area */
1296 anon_mmap_fixed( addr, size, PROT_NONE, MAP_NORESERVE );
1297 break;
1298 default:
1299 case 0: /* not in a reserved area */
1300 if (is_beyond_limit( addr, size, user_space_limit ))
1301 add_reserved_area( addr, size );
1302 else
1303 munmap( addr, size );
1304 break;
1309 /***********************************************************************
1310 * alloc_view
1312 * Allocate a new view. virtual_mutex must be held by caller.
1314 static struct file_view *alloc_view(void)
1316 if (next_free_view)
1318 struct file_view *ret = next_free_view;
1319 next_free_view = *(struct file_view **)ret;
1320 return ret;
1322 if (view_block_start == view_block_end)
1324 void *ptr = anon_mmap_alloc( view_block_size, PROT_READ | PROT_WRITE );
1325 if (ptr == MAP_FAILED) return NULL;
1326 view_block_start = ptr;
1327 view_block_end = view_block_start + view_block_size / sizeof(*view_block_start);
1329 return view_block_start++;
1333 /***********************************************************************
1334 * delete_view
1336 * Deletes a view. virtual_mutex must be held by caller.
1338 static void delete_view( struct file_view *view ) /* [in] View */
1340 if (!(view->protect & VPROT_SYSTEM)) unmap_area( view->base, view->size );
1341 set_page_vprot( view->base, view->size, 0 );
1342 if (mmap_is_in_reserved_area( view->base, view->size ))
1343 free_ranges_remove_view( view );
1344 wine_rb_remove( &views_tree, &view->entry );
1345 *(struct file_view **)view = next_free_view;
1346 next_free_view = view;
1350 /***********************************************************************
1351 * create_view
1353 * Create a view. virtual_mutex must be held by caller.
1355 static NTSTATUS create_view( struct file_view **view_ret, void *base, size_t size, unsigned int vprot )
1357 struct file_view *view;
1358 int unix_prot = get_unix_prot( vprot );
1360 assert( !((UINT_PTR)base & page_mask) );
1361 assert( !(size & page_mask) );
1363 /* Check for overlapping views. This can happen if the previous view
1364 * was a system view that got unmapped behind our back. In that case
1365 * we recover by simply deleting it. */
1367 while ((view = find_view_range( base, size )))
1369 TRACE( "overlapping view %p-%p for %p-%p\n",
1370 view->base, (char *)view->base + view->size, base, (char *)base + size );
1371 assert( view->protect & VPROT_SYSTEM );
1372 delete_view( view );
1375 if (!alloc_pages_vprot( base, size )) return STATUS_NO_MEMORY;
1377 /* Create the view structure */
1379 if (!(view = alloc_view()))
1381 FIXME( "out of memory for %p-%p\n", base, (char *)base + size );
1382 return STATUS_NO_MEMORY;
1385 view->base = base;
1386 view->size = size;
1387 view->protect = vprot;
1388 set_page_vprot( base, size, vprot );
1390 wine_rb_put( &views_tree, view->base, &view->entry );
1391 if (mmap_is_in_reserved_area( view->base, view->size ))
1392 free_ranges_insert_view( view );
1394 *view_ret = view;
1396 if (force_exec_prot && (unix_prot & PROT_READ) && !(unix_prot & PROT_EXEC))
1398 TRACE( "forcing exec permission on %p-%p\n", base, (char *)base + size - 1 );
1399 mprotect( base, size, unix_prot | PROT_EXEC );
1401 return STATUS_SUCCESS;
1405 /***********************************************************************
1406 * get_win32_prot
1408 * Convert page protections to Win32 flags.
1410 static DWORD get_win32_prot( BYTE vprot, unsigned int map_prot )
1412 DWORD ret = VIRTUAL_Win32Flags[vprot & 0x0f];
1413 if (vprot & VPROT_GUARD) ret |= PAGE_GUARD;
1414 if (map_prot & SEC_NOCACHE) ret |= PAGE_NOCACHE;
1415 return ret;
1419 /***********************************************************************
1420 * get_vprot_flags
1422 * Build page protections from Win32 flags.
1424 static NTSTATUS get_vprot_flags( DWORD protect, unsigned int *vprot, BOOL image )
1426 switch(protect & 0xff)
1428 case PAGE_READONLY:
1429 *vprot = VPROT_READ;
1430 break;
1431 case PAGE_READWRITE:
1432 if (image)
1433 *vprot = VPROT_READ | VPROT_WRITECOPY;
1434 else
1435 *vprot = VPROT_READ | VPROT_WRITE;
1436 break;
1437 case PAGE_WRITECOPY:
1438 *vprot = VPROT_READ | VPROT_WRITECOPY;
1439 break;
1440 case PAGE_EXECUTE:
1441 *vprot = VPROT_EXEC;
1442 break;
1443 case PAGE_EXECUTE_READ:
1444 *vprot = VPROT_EXEC | VPROT_READ;
1445 break;
1446 case PAGE_EXECUTE_READWRITE:
1447 if (image)
1448 *vprot = VPROT_EXEC | VPROT_READ | VPROT_WRITECOPY;
1449 else
1450 *vprot = VPROT_EXEC | VPROT_READ | VPROT_WRITE;
1451 break;
1452 case PAGE_EXECUTE_WRITECOPY:
1453 *vprot = VPROT_EXEC | VPROT_READ | VPROT_WRITECOPY;
1454 break;
1455 case PAGE_NOACCESS:
1456 *vprot = 0;
1457 break;
1458 default:
1459 return STATUS_INVALID_PAGE_PROTECTION;
1461 if (protect & PAGE_GUARD) *vprot |= VPROT_GUARD;
1462 return STATUS_SUCCESS;
1466 /***********************************************************************
1467 * mprotect_exec
1469 * Wrapper for mprotect, adds PROT_EXEC if forced by force_exec_prot
1471 static inline int mprotect_exec( void *base, size_t size, int unix_prot )
1473 if (force_exec_prot && (unix_prot & PROT_READ) && !(unix_prot & PROT_EXEC))
1475 TRACE( "forcing exec permission on %p-%p\n", base, (char *)base + size - 1 );
1476 if (!mprotect( base, size, unix_prot | PROT_EXEC )) return 0;
1477 /* exec + write may legitimately fail, in that case fall back to write only */
1478 if (!(unix_prot & PROT_WRITE)) return -1;
1481 return mprotect( base, size, unix_prot );
1485 /***********************************************************************
1486 * mprotect_range
1488 * Call mprotect on a page range, applying the protections from the per-page byte.
1490 static void mprotect_range( void *base, size_t size, BYTE set, BYTE clear )
1492 size_t i, count;
1493 char *addr = ROUND_ADDR( base, page_mask );
1494 int prot, next;
1496 size = ROUND_SIZE( base, size );
1497 prot = get_unix_prot( (get_page_vprot( addr ) & ~clear ) | set );
1498 for (count = i = 1; i < size >> page_shift; i++, count++)
1500 next = get_unix_prot( (get_page_vprot( addr + (count << page_shift) ) & ~clear) | set );
1501 if (next == prot) continue;
1502 mprotect_exec( addr, count << page_shift, prot );
1503 addr += count << page_shift;
1504 prot = next;
1505 count = 0;
1507 if (count) mprotect_exec( addr, count << page_shift, prot );
1511 /***********************************************************************
1512 * set_vprot
1514 * Change the protection of a range of pages.
1516 static BOOL set_vprot( struct file_view *view, void *base, size_t size, BYTE vprot )
1518 int unix_prot = get_unix_prot(vprot);
1520 if (view->protect & VPROT_WRITEWATCH)
1522 /* each page may need different protections depending on write watch flag */
1523 set_page_vprot_bits( base, size, vprot & ~VPROT_WRITEWATCH, ~vprot & ~VPROT_WRITEWATCH );
1524 mprotect_range( base, size, 0, 0 );
1525 return TRUE;
1528 /* if setting stack guard pages, store the permissions first, as the guard may be
1529 * triggered at any point after mprotect and change the permissions again */
1530 if ((vprot & VPROT_GUARD) &&
1531 (base >= NtCurrentTeb()->DeallocationStack) &&
1532 (base < NtCurrentTeb()->Tib.StackBase))
1534 set_page_vprot( base, size, vprot );
1535 mprotect( base, size, unix_prot );
1536 return TRUE;
1539 if (mprotect_exec( base, size, unix_prot )) /* FIXME: last error */
1540 return FALSE;
1542 set_page_vprot( base, size, vprot );
1543 return TRUE;
1547 /***********************************************************************
1548 * set_protection
1550 * Set page protections on a range of pages
1552 static NTSTATUS set_protection( struct file_view *view, void *base, SIZE_T size, ULONG protect )
1554 unsigned int vprot;
1555 NTSTATUS status;
1557 if ((status = get_vprot_flags( protect, &vprot, view->protect & SEC_IMAGE ))) return status;
1558 if (is_view_valloc( view ))
1560 if (vprot & VPROT_WRITECOPY) return STATUS_INVALID_PAGE_PROTECTION;
1562 else
1564 BYTE access = vprot & (VPROT_READ | VPROT_WRITE | VPROT_EXEC);
1565 if ((view->protect & access) != access) return STATUS_INVALID_PAGE_PROTECTION;
1568 if (!set_vprot( view, base, size, vprot | VPROT_COMMITTED )) return STATUS_ACCESS_DENIED;
1569 return STATUS_SUCCESS;
1573 /***********************************************************************
1574 * update_write_watches
1576 static void update_write_watches( void *base, size_t size, size_t accessed_size )
1578 TRACE( "updating watch %p-%p-%p\n", base, (char *)base + accessed_size, (char *)base + size );
1579 /* clear write watch flag on accessed pages */
1580 set_page_vprot_bits( base, accessed_size, 0, VPROT_WRITEWATCH );
1581 /* restore page protections on the entire range */
1582 mprotect_range( base, size, 0, 0 );
1586 /***********************************************************************
1587 * reset_write_watches
1589 * Reset write watches in a memory range.
1591 static void reset_write_watches( void *base, SIZE_T size )
1593 set_page_vprot_bits( base, size, VPROT_WRITEWATCH, 0 );
1594 mprotect_range( base, size, 0, 0 );
1598 /***********************************************************************
1599 * unmap_extra_space
1601 * Release the extra memory while keeping the range starting on the granularity boundary.
1603 static inline void *unmap_extra_space( void *ptr, size_t total_size, size_t wanted_size )
1605 if ((ULONG_PTR)ptr & granularity_mask)
1607 size_t extra = granularity_mask + 1 - ((ULONG_PTR)ptr & granularity_mask);
1608 munmap( ptr, extra );
1609 ptr = (char *)ptr + extra;
1610 total_size -= extra;
1612 if (total_size > wanted_size)
1613 munmap( (char *)ptr + wanted_size, total_size - wanted_size );
1614 return ptr;
1618 struct alloc_area
1620 size_t size;
1621 int top_down;
1622 void *limit;
1623 void *result;
1626 /***********************************************************************
1627 * alloc_reserved_area_callback
1629 * Try to map some space inside a reserved area. Callback for mmap_enum_reserved_areas.
1631 static int CDECL alloc_reserved_area_callback( void *start, SIZE_T size, void *arg )
1633 struct alloc_area *alloc = arg;
1634 void *end = (char *)start + size;
1636 if (start < address_space_start) start = address_space_start;
1637 if (is_beyond_limit( start, size, alloc->limit )) end = alloc->limit;
1638 if (start >= end) return 0;
1640 /* make sure we don't touch the preloader reserved range */
1641 if (preload_reserve_end >= start)
1643 if (preload_reserve_end >= end)
1645 if (preload_reserve_start <= start) return 0; /* no space in that area */
1646 if (preload_reserve_start < end) end = preload_reserve_start;
1648 else if (preload_reserve_start <= start) start = preload_reserve_end;
1649 else
1651 /* range is split in two by the preloader reservation, try first part */
1652 if ((alloc->result = find_reserved_free_area( start, preload_reserve_start, alloc->size,
1653 alloc->top_down )))
1654 return 1;
1655 /* then fall through to try second part */
1656 start = preload_reserve_end;
1659 if ((alloc->result = find_reserved_free_area( start, end, alloc->size, alloc->top_down )))
1660 return 1;
1662 return 0;
1665 /***********************************************************************
1666 * map_fixed_area
1668 * mmap the fixed memory area.
1669 * virtual_mutex must be held by caller.
1671 static NTSTATUS map_fixed_area( void *base, size_t size, unsigned int vprot )
1673 void *ptr;
1675 switch (mmap_is_in_reserved_area( base, size ))
1677 case -1: /* partially in a reserved area */
1679 NTSTATUS status;
1680 struct area_boundary area;
1681 size_t lower_size;
1682 area.base = base;
1683 area.size = size;
1684 mmap_enum_reserved_areas( get_area_boundary_callback, &area, 0 );
1685 assert( area.boundary );
1686 lower_size = (char *)area.boundary - (char *)base;
1687 status = map_fixed_area( base, lower_size, vprot );
1688 if (status == STATUS_SUCCESS)
1690 status = map_fixed_area( area.boundary, size - lower_size, vprot);
1691 if (status != STATUS_SUCCESS) unmap_area( base, lower_size );
1693 return status;
1695 case 0: /* not in a reserved area, do a normal allocation */
1696 if ((ptr = anon_mmap_tryfixed( base, size, get_unix_prot(vprot), 0 )) == MAP_FAILED)
1698 if (errno == ENOMEM) return STATUS_NO_MEMORY;
1699 if (errno == EEXIST) return STATUS_CONFLICTING_ADDRESSES;
1700 return STATUS_INVALID_PARAMETER;
1702 break;
1704 default:
1705 case 1: /* in a reserved area, make sure the address is available */
1706 if (find_view_range( base, size )) return STATUS_CONFLICTING_ADDRESSES;
1707 /* replace the reserved area by our mapping */
1708 if ((ptr = anon_mmap_fixed( base, size, get_unix_prot(vprot), 0 )) != base)
1709 return STATUS_INVALID_PARAMETER;
1710 break;
1712 if (is_beyond_limit( ptr, size, working_set_limit )) working_set_limit = address_space_limit;
1713 return STATUS_SUCCESS;
1716 /***********************************************************************
1717 * map_view
1719 * Create a view and mmap the corresponding memory area.
1720 * virtual_mutex must be held by caller.
1722 static NTSTATUS map_view( struct file_view **view_ret, void *base, size_t size,
1723 int top_down, unsigned int vprot, unsigned short zero_bits_64 )
1725 void *ptr;
1726 NTSTATUS status;
1728 if (base)
1730 if (is_beyond_limit( base, size, address_space_limit ))
1731 return STATUS_WORKING_SET_LIMIT_RANGE;
1732 status = map_fixed_area( base, size, vprot );
1733 if (status != STATUS_SUCCESS) return status;
1734 ptr = base;
1736 else
1738 size_t view_size = size + granularity_mask + 1;
1739 struct alloc_area alloc;
1741 alloc.size = size;
1742 alloc.top_down = top_down;
1743 alloc.limit = (void*)(get_zero_bits_64_mask( zero_bits_64 ) & (UINT_PTR)user_space_limit);
1745 if (mmap_enum_reserved_areas( alloc_reserved_area_callback, &alloc, top_down ))
1747 ptr = alloc.result;
1748 TRACE( "got mem in reserved area %p-%p\n", ptr, (char *)ptr + size );
1749 if (anon_mmap_fixed( ptr, size, get_unix_prot(vprot), 0 ) != ptr)
1750 return STATUS_INVALID_PARAMETER;
1751 goto done;
1754 if (zero_bits_64)
1756 if (!(ptr = map_free_area( address_space_start, alloc.limit, size,
1757 top_down, get_unix_prot(vprot) )))
1758 return STATUS_NO_MEMORY;
1759 TRACE( "got mem with map_free_area %p-%p\n", ptr, (char *)ptr + size );
1760 goto done;
1763 for (;;)
1765 if ((ptr = anon_mmap_alloc( view_size, get_unix_prot(vprot) )) == MAP_FAILED)
1767 if (errno == ENOMEM) return STATUS_NO_MEMORY;
1768 return STATUS_INVALID_PARAMETER;
1770 TRACE( "got mem with anon mmap %p-%p\n", ptr, (char *)ptr + size );
1771 /* if we got something beyond the user limit, unmap it and retry */
1772 if (is_beyond_limit( ptr, view_size, user_space_limit )) add_reserved_area( ptr, view_size );
1773 else break;
1775 ptr = unmap_extra_space( ptr, view_size, size );
1777 done:
1778 status = create_view( view_ret, ptr, size, vprot );
1779 if (status != STATUS_SUCCESS) unmap_area( ptr, size );
1780 return status;
1784 /***********************************************************************
1785 * map_file_into_view
1787 * Wrapper for mmap() to map a file into a view, falling back to read if mmap fails.
1788 * virtual_mutex must be held by caller.
1790 static NTSTATUS map_file_into_view( struct file_view *view, int fd, size_t start, size_t size,
1791 off_t offset, unsigned int vprot, BOOL removable )
1793 void *ptr;
1794 int prot = get_unix_prot( vprot | VPROT_COMMITTED /* make sure it is accessible */ );
1795 unsigned int flags = MAP_FIXED | ((vprot & VPROT_WRITECOPY) ? MAP_PRIVATE : MAP_SHARED);
1797 assert( start < view->size );
1798 assert( start + size <= view->size );
1800 if (force_exec_prot && (vprot & VPROT_READ))
1802 TRACE( "forcing exec permission on mapping %p-%p\n",
1803 (char *)view->base + start, (char *)view->base + start + size - 1 );
1804 prot |= PROT_EXEC;
1807 /* only try mmap if media is not removable (or if we require write access) */
1808 if (!removable || (flags & MAP_SHARED))
1810 if (mmap( (char *)view->base + start, size, prot, flags, fd, offset ) != MAP_FAILED)
1811 goto done;
1813 switch (errno)
1815 case EINVAL: /* file offset is not page-aligned, fall back to read() */
1816 if (flags & MAP_SHARED) return STATUS_INVALID_PARAMETER;
1817 break;
1818 case ENOEXEC:
1819 case ENODEV: /* filesystem doesn't support mmap(), fall back to read() */
1820 if (vprot & VPROT_WRITE)
1822 ERR( "shared writable mmap not supported, broken filesystem?\n" );
1823 return STATUS_NOT_SUPPORTED;
1825 break;
1826 case EACCES:
1827 case EPERM: /* noexec filesystem, fall back to read() */
1828 if (flags & MAP_SHARED)
1830 if (prot & PROT_EXEC) ERR( "failed to set PROT_EXEC on file map, noexec filesystem?\n" );
1831 return STATUS_ACCESS_DENIED;
1833 if (prot & PROT_EXEC) WARN( "failed to set PROT_EXEC on file map, noexec filesystem?\n" );
1834 break;
1835 default:
1836 return STATUS_NO_MEMORY;
1840 /* Reserve the memory with an anonymous mmap */
1841 ptr = anon_mmap_fixed( (char *)view->base + start, size, PROT_READ | PROT_WRITE, 0 );
1842 if (ptr == MAP_FAILED) return STATUS_NO_MEMORY;
1843 /* Now read in the file */
1844 pread( fd, ptr, size, offset );
1845 if (prot != (PROT_READ|PROT_WRITE)) mprotect( ptr, size, prot ); /* Set the right protection */
1846 done:
1847 set_page_vprot( (char *)view->base + start, size, vprot );
1848 return STATUS_SUCCESS;
1852 /***********************************************************************
1853 * get_committed_size
1855 * Get the size of the committed range starting at base.
1856 * Also return the protections for the first page.
1858 static SIZE_T get_committed_size( struct file_view *view, void *base, BYTE *vprot )
1860 SIZE_T i, start;
1862 start = ((char *)base - (char *)view->base) >> page_shift;
1863 *vprot = get_page_vprot( base );
1865 if (view->protect & SEC_RESERVE)
1867 SIZE_T ret = 0;
1868 SERVER_START_REQ( get_mapping_committed_range )
1870 req->base = wine_server_client_ptr( view->base );
1871 req->offset = start << page_shift;
1872 if (!wine_server_call( req ))
1874 ret = reply->size;
1875 if (reply->committed)
1877 *vprot |= VPROT_COMMITTED;
1878 set_page_vprot_bits( base, ret, VPROT_COMMITTED, 0 );
1882 SERVER_END_REQ;
1883 return ret;
1885 for (i = start + 1; i < view->size >> page_shift; i++)
1886 if ((*vprot ^ get_page_vprot( (char *)view->base + (i << page_shift) )) & VPROT_COMMITTED) break;
1887 return (i - start) << page_shift;
1891 /***********************************************************************
1892 * decommit_view
1894 * Decommit some pages of a given view.
1895 * virtual_mutex must be held by caller.
1897 static NTSTATUS decommit_pages( struct file_view *view, size_t start, size_t size )
1899 if (anon_mmap_fixed( (char *)view->base + start, size, PROT_NONE, 0 ) != MAP_FAILED)
1901 set_page_vprot_bits( (char *)view->base + start, size, 0, VPROT_COMMITTED );
1902 return STATUS_SUCCESS;
1904 return STATUS_NO_MEMORY;
1908 /***********************************************************************
1909 * allocate_dos_memory
1911 * Allocate the DOS memory range.
1913 static NTSTATUS allocate_dos_memory( struct file_view **view, unsigned int vprot )
1915 size_t size;
1916 void *addr = NULL;
1917 void * const low_64k = (void *)0x10000;
1918 const size_t dosmem_size = 0x110000;
1919 int unix_prot = get_unix_prot( vprot );
1921 /* check for existing view */
1923 if (find_view_range( 0, dosmem_size )) return STATUS_CONFLICTING_ADDRESSES;
1925 /* check without the first 64K */
1927 if (mmap_is_in_reserved_area( low_64k, dosmem_size - 0x10000 ) != 1)
1929 addr = anon_mmap_tryfixed( low_64k, dosmem_size - 0x10000, unix_prot, 0 );
1930 if (addr == MAP_FAILED) return map_view( view, NULL, dosmem_size, FALSE, vprot, 0 );
1933 /* now try to allocate the low 64K too */
1935 if (mmap_is_in_reserved_area( NULL, 0x10000 ) != 1)
1937 addr = anon_mmap_tryfixed( (void *)page_size, 0x10000 - page_size, unix_prot, 0 );
1938 if (addr != MAP_FAILED)
1940 if (!anon_mmap_fixed( NULL, page_size, unix_prot, 0 ))
1942 addr = NULL;
1943 TRACE( "successfully mapped low 64K range\n" );
1945 else TRACE( "failed to map page 0\n" );
1947 else
1949 addr = low_64k;
1950 TRACE( "failed to map low 64K range\n" );
1954 /* now reserve the whole range */
1956 size = (char *)dosmem_size - (char *)addr;
1957 anon_mmap_fixed( addr, size, unix_prot, 0 );
1958 return create_view( view, addr, size, vprot );
1962 /***********************************************************************
1963 * map_pe_header
1965 * Map the header of a PE file into memory.
1967 static NTSTATUS map_pe_header( void *ptr, size_t size, int fd, BOOL *removable )
1969 if (!size) return STATUS_INVALID_IMAGE_FORMAT;
1971 if (!*removable)
1973 if (mmap( ptr, size, PROT_READ|PROT_WRITE|PROT_EXEC, MAP_FIXED|MAP_PRIVATE, fd, 0 ) != MAP_FAILED)
1974 return STATUS_SUCCESS;
1976 switch (errno)
1978 case EPERM:
1979 case EACCES:
1980 WARN( "noexec file system, falling back to read\n" );
1981 break;
1982 case ENOEXEC:
1983 case ENODEV:
1984 WARN( "file system doesn't support mmap, falling back to read\n" );
1985 break;
1986 default:
1987 return STATUS_NO_MEMORY;
1989 *removable = TRUE;
1991 pread( fd, ptr, size, 0 );
1992 return STATUS_SUCCESS; /* page protections will be updated later */
1996 /***********************************************************************
1997 * map_image_into_view
1999 * Map an executable (PE format) image into an existing view.
2000 * virtual_mutex must be held by caller.
2002 static NTSTATUS map_image_into_view( struct file_view *view, int fd, void *orig_base,
2003 SIZE_T header_size, ULONG image_flags, int shared_fd, BOOL removable )
2005 IMAGE_DOS_HEADER *dos;
2006 IMAGE_NT_HEADERS *nt;
2007 IMAGE_SECTION_HEADER sections[96];
2008 IMAGE_SECTION_HEADER *sec;
2009 IMAGE_DATA_DIRECTORY *imports;
2010 NTSTATUS status = STATUS_CONFLICTING_ADDRESSES;
2011 int i;
2012 off_t pos;
2013 struct stat st;
2014 char *header_end, *header_start;
2015 char *ptr = view->base;
2016 SIZE_T total_size = view->size;
2018 TRACE_(module)( "mapped PE file at %p-%p\n", ptr, ptr + total_size );
2020 /* map the header */
2022 fstat( fd, &st );
2023 header_size = min( header_size, st.st_size );
2024 if ((status = map_pe_header( view->base, header_size, fd, &removable ))) return status;
2026 status = STATUS_INVALID_IMAGE_FORMAT; /* generic error */
2027 dos = (IMAGE_DOS_HEADER *)ptr;
2028 nt = (IMAGE_NT_HEADERS *)(ptr + dos->e_lfanew);
2029 header_end = ptr + ROUND_SIZE( 0, header_size );
2030 memset( ptr + header_size, 0, header_end - (ptr + header_size) );
2031 if ((char *)(nt + 1) > header_end) return status;
2032 header_start = (char*)&nt->OptionalHeader+nt->FileHeader.SizeOfOptionalHeader;
2033 if (nt->FileHeader.NumberOfSections > ARRAY_SIZE( sections )) return status;
2034 if (header_start + sizeof(*sections) * nt->FileHeader.NumberOfSections > header_end) return status;
2035 /* Some applications (e.g. the Steam version of Borderlands) map over the top of the section headers,
2036 * copying the headers into local memory is necessary to properly load such applications. */
2037 memcpy(sections, header_start, sizeof(*sections) * nt->FileHeader.NumberOfSections);
2038 sec = sections;
2040 imports = nt->OptionalHeader.DataDirectory + IMAGE_DIRECTORY_ENTRY_IMPORT;
2041 if (!imports->Size || !imports->VirtualAddress) imports = NULL;
2043 /* check for non page-aligned binary */
2045 if (image_flags & IMAGE_FLAGS_ImageMappedFlat)
2047 /* unaligned sections, this happens for native subsystem binaries */
2048 /* in that case Windows simply maps in the whole file */
2050 total_size = min( total_size, ROUND_SIZE( 0, st.st_size ));
2051 if (map_file_into_view( view, fd, 0, total_size, 0, VPROT_COMMITTED | VPROT_READ | VPROT_WRITECOPY,
2052 removable ) != STATUS_SUCCESS) return status;
2054 /* check that all sections are loaded at the right offset */
2055 if (nt->OptionalHeader.FileAlignment != nt->OptionalHeader.SectionAlignment) return status;
2056 for (i = 0; i < nt->FileHeader.NumberOfSections; i++)
2058 if (sec[i].VirtualAddress != sec[i].PointerToRawData)
2059 return status; /* Windows refuses to load in that case too */
2062 /* set the image protections */
2063 set_vprot( view, ptr, total_size, VPROT_COMMITTED | VPROT_READ | VPROT_WRITECOPY | VPROT_EXEC );
2065 /* no relocations are performed on non page-aligned binaries */
2066 return STATUS_SUCCESS;
2070 /* map all the sections */
2072 for (i = pos = 0; i < nt->FileHeader.NumberOfSections; i++, sec++)
2074 static const SIZE_T sector_align = 0x1ff;
2075 SIZE_T map_size, file_start, file_size, end;
2077 if (!sec->Misc.VirtualSize)
2078 map_size = ROUND_SIZE( 0, sec->SizeOfRawData );
2079 else
2080 map_size = ROUND_SIZE( 0, sec->Misc.VirtualSize );
2082 /* file positions are rounded to sector boundaries regardless of OptionalHeader.FileAlignment */
2083 file_start = sec->PointerToRawData & ~sector_align;
2084 file_size = (sec->SizeOfRawData + (sec->PointerToRawData & sector_align) + sector_align) & ~sector_align;
2085 if (file_size > map_size) file_size = map_size;
2087 /* a few sanity checks */
2088 end = sec->VirtualAddress + ROUND_SIZE( sec->VirtualAddress, map_size );
2089 if (sec->VirtualAddress > total_size || end > total_size || end < sec->VirtualAddress)
2091 WARN_(module)( "Section %.8s too large (%x+%lx/%lx)\n",
2092 sec->Name, sec->VirtualAddress, map_size, total_size );
2093 return status;
2096 if ((sec->Characteristics & IMAGE_SCN_MEM_SHARED) &&
2097 (sec->Characteristics & IMAGE_SCN_MEM_WRITE))
2099 TRACE_(module)( "mapping shared section %.8s at %p off %x (%x) size %lx (%lx) flags %x\n",
2100 sec->Name, ptr + sec->VirtualAddress,
2101 sec->PointerToRawData, (int)pos, file_size, map_size,
2102 sec->Characteristics );
2103 if (map_file_into_view( view, shared_fd, sec->VirtualAddress, map_size, pos,
2104 VPROT_COMMITTED | VPROT_READ | VPROT_WRITE, FALSE ) != STATUS_SUCCESS)
2106 ERR_(module)( "Could not map shared section %.8s\n", sec->Name );
2107 return status;
2110 /* check if the import directory falls inside this section */
2111 if (imports && imports->VirtualAddress >= sec->VirtualAddress &&
2112 imports->VirtualAddress < sec->VirtualAddress + map_size)
2114 UINT_PTR base = imports->VirtualAddress & ~page_mask;
2115 UINT_PTR end = base + ROUND_SIZE( imports->VirtualAddress, imports->Size );
2116 if (end > sec->VirtualAddress + map_size) end = sec->VirtualAddress + map_size;
2117 if (end > base)
2118 map_file_into_view( view, shared_fd, base, end - base,
2119 pos + (base - sec->VirtualAddress),
2120 VPROT_COMMITTED | VPROT_READ | VPROT_WRITECOPY, FALSE );
2122 pos += map_size;
2123 continue;
2126 TRACE_(module)( "mapping section %.8s at %p off %x size %x virt %x flags %x\n",
2127 sec->Name, ptr + sec->VirtualAddress,
2128 sec->PointerToRawData, sec->SizeOfRawData,
2129 sec->Misc.VirtualSize, sec->Characteristics );
2131 if (!sec->PointerToRawData || !file_size) continue;
2133 /* Note: if the section is not aligned properly map_file_into_view will magically
2134 * fall back to read(), so we don't need to check anything here.
2136 end = file_start + file_size;
2137 if (sec->PointerToRawData >= st.st_size ||
2138 end > ((st.st_size + sector_align) & ~sector_align) ||
2139 end < file_start ||
2140 map_file_into_view( view, fd, sec->VirtualAddress, file_size, file_start,
2141 VPROT_COMMITTED | VPROT_READ | VPROT_WRITECOPY,
2142 removable ) != STATUS_SUCCESS)
2144 ERR_(module)( "Could not map section %.8s, file probably truncated\n", sec->Name );
2145 return status;
2148 if (file_size & page_mask)
2150 end = ROUND_SIZE( 0, file_size );
2151 if (end > map_size) end = map_size;
2152 TRACE_(module)("clearing %p - %p\n",
2153 ptr + sec->VirtualAddress + file_size,
2154 ptr + sec->VirtualAddress + end );
2155 memset( ptr + sec->VirtualAddress + file_size, 0, end - file_size );
2159 /* set the image protections */
2161 set_vprot( view, ptr, ROUND_SIZE( 0, header_size ), VPROT_COMMITTED | VPROT_READ );
2163 sec = sections;
2164 for (i = 0; i < nt->FileHeader.NumberOfSections; i++, sec++)
2166 SIZE_T size;
2167 BYTE vprot = VPROT_COMMITTED;
2169 if (sec->Misc.VirtualSize)
2170 size = ROUND_SIZE( sec->VirtualAddress, sec->Misc.VirtualSize );
2171 else
2172 size = ROUND_SIZE( sec->VirtualAddress, sec->SizeOfRawData );
2174 if (sec->Characteristics & IMAGE_SCN_MEM_READ) vprot |= VPROT_READ;
2175 if (sec->Characteristics & IMAGE_SCN_MEM_WRITE) vprot |= VPROT_WRITECOPY;
2176 if (sec->Characteristics & IMAGE_SCN_MEM_EXECUTE) vprot |= VPROT_EXEC;
2178 /* Dumb game crack lets the AOEP point into a data section. Adjust. */
2179 if ((nt->OptionalHeader.AddressOfEntryPoint >= sec->VirtualAddress) &&
2180 (nt->OptionalHeader.AddressOfEntryPoint < sec->VirtualAddress + size))
2181 vprot |= VPROT_EXEC;
2183 if (!set_vprot( view, ptr + sec->VirtualAddress, size, vprot ) && (vprot & VPROT_EXEC))
2184 ERR( "failed to set %08x protection on section %.8s, noexec filesystem?\n",
2185 sec->Characteristics, sec->Name );
2188 #ifdef VALGRIND_LOAD_PDB_DEBUGINFO
2189 VALGRIND_LOAD_PDB_DEBUGINFO(fd, ptr, total_size, ptr - (char *)orig_base);
2190 #endif
2191 return STATUS_SUCCESS;
2195 /***********************************************************************
2196 * virtual_map_section
2198 * Map a file section into memory.
2200 static NTSTATUS virtual_map_section( HANDLE handle, PVOID *addr_ptr, unsigned short zero_bits_64,
2201 SIZE_T commit_size, const LARGE_INTEGER *offset_ptr, SIZE_T *size_ptr,
2202 ULONG alloc_type, ULONG protect, pe_image_info_t *image_info )
2204 NTSTATUS res;
2205 mem_size_t full_size;
2206 ACCESS_MASK access;
2207 SIZE_T size;
2208 void *base;
2209 int unix_handle = -1, needs_close;
2210 int shared_fd = -1, shared_needs_close = 0;
2211 unsigned int vprot, sec_flags;
2212 struct file_view *view;
2213 HANDLE shared_file;
2214 LARGE_INTEGER offset;
2215 sigset_t sigset;
2217 offset.QuadPart = offset_ptr ? offset_ptr->QuadPart : 0;
2219 switch(protect)
2221 case PAGE_NOACCESS:
2222 case PAGE_READONLY:
2223 case PAGE_WRITECOPY:
2224 access = SECTION_MAP_READ;
2225 break;
2226 case PAGE_READWRITE:
2227 access = SECTION_MAP_WRITE;
2228 break;
2229 case PAGE_EXECUTE:
2230 case PAGE_EXECUTE_READ:
2231 case PAGE_EXECUTE_WRITECOPY:
2232 access = SECTION_MAP_READ | SECTION_MAP_EXECUTE;
2233 break;
2234 case PAGE_EXECUTE_READWRITE:
2235 access = SECTION_MAP_WRITE | SECTION_MAP_EXECUTE;
2236 break;
2237 default:
2238 return STATUS_INVALID_PAGE_PROTECTION;
2241 SERVER_START_REQ( get_mapping_info )
2243 req->handle = wine_server_obj_handle( handle );
2244 req->access = access;
2245 wine_server_set_reply( req, image_info, sizeof(*image_info) );
2246 res = wine_server_call( req );
2247 sec_flags = reply->flags;
2248 full_size = reply->size;
2249 shared_file = wine_server_ptr_handle( reply->shared_file );
2251 SERVER_END_REQ;
2252 if (res) return res;
2254 if ((res = server_get_unix_fd( handle, 0, &unix_handle, &needs_close, NULL, NULL )))
2256 if (shared_file) NtClose( shared_file );
2257 return res;
2260 if (shared_file && ((res = server_get_unix_fd( shared_file, FILE_READ_DATA|FILE_WRITE_DATA,
2261 &shared_fd, &shared_needs_close, NULL, NULL ))))
2263 NtClose( shared_file );
2264 if (needs_close) close( unix_handle );
2265 return res;
2268 res = STATUS_INVALID_PARAMETER;
2269 server_enter_uninterrupted_section( &virtual_mutex, &sigset );
2271 if (sec_flags & SEC_IMAGE)
2273 base = wine_server_get_ptr( image_info->base );
2274 if ((ULONG_PTR)base != image_info->base) base = NULL;
2275 size = image_info->map_size;
2276 vprot = SEC_IMAGE | SEC_FILE | VPROT_COMMITTED | VPROT_READ | VPROT_EXEC | VPROT_WRITECOPY;
2278 if ((char *)base >= (char *)address_space_start) /* make sure the DOS area remains free */
2279 res = map_view( &view, base, size, alloc_type & MEM_TOP_DOWN, vprot, zero_bits_64 );
2281 if (res) res = map_view( &view, NULL, size, alloc_type & MEM_TOP_DOWN, vprot, zero_bits_64 );
2282 if (res) goto done;
2284 res = map_image_into_view( view, unix_handle, base, image_info->header_size,
2285 image_info->image_flags, shared_fd, needs_close );
2287 else
2289 base = *addr_ptr;
2290 if (offset.QuadPart >= full_size) goto done;
2291 if (*size_ptr)
2293 size = *size_ptr;
2294 if (size > full_size - offset.QuadPart)
2296 res = STATUS_INVALID_VIEW_SIZE;
2297 goto done;
2300 else
2302 size = full_size - offset.QuadPart;
2303 if (size != full_size - offset.QuadPart) /* truncated */
2305 WARN( "Files larger than 4Gb (%s) not supported on this platform\n",
2306 wine_dbgstr_longlong(full_size) );
2307 goto done;
2310 if (!(size = ROUND_SIZE( 0, size ))) goto done; /* wrap-around */
2312 get_vprot_flags( protect, &vprot, FALSE );
2313 vprot |= sec_flags;
2314 if (!(sec_flags & SEC_RESERVE)) vprot |= VPROT_COMMITTED;
2315 res = map_view( &view, base, size, alloc_type & MEM_TOP_DOWN, vprot, zero_bits_64 );
2316 if (res) goto done;
2318 TRACE( "handle=%p size=%lx offset=%x%08x\n", handle, size, offset.u.HighPart, offset.u.LowPart );
2319 res = map_file_into_view( view, unix_handle, 0, size, offset.QuadPart, vprot, needs_close );
2320 if (res) ERR( "mapping %p %lx %x%08x failed\n",
2321 view->base, size, offset.u.HighPart, offset.u.LowPart );
2324 if (res == STATUS_SUCCESS)
2326 SERVER_START_REQ( map_view )
2328 req->mapping = wine_server_obj_handle( handle );
2329 req->access = access;
2330 req->base = wine_server_client_ptr( view->base );
2331 req->size = size;
2332 req->start = offset.QuadPart;
2333 res = wine_server_call( req );
2335 SERVER_END_REQ;
2338 if (res >= 0)
2340 *addr_ptr = view->base;
2341 *size_ptr = size;
2342 VIRTUAL_DEBUG_DUMP_VIEW( view );
2344 else delete_view( view );
2346 done:
2347 server_leave_uninterrupted_section( &virtual_mutex, &sigset );
2348 if (needs_close) close( unix_handle );
2349 if (shared_needs_close) close( shared_fd );
2350 if (shared_file) NtClose( shared_file );
2351 return res;
2355 struct alloc_virtual_heap
2357 void *base;
2358 size_t size;
2361 /* callback for mmap_enum_reserved_areas to allocate space for the virtual heap */
2362 static int CDECL alloc_virtual_heap( void *base, SIZE_T size, void *arg )
2364 struct alloc_virtual_heap *alloc = arg;
2366 if (is_beyond_limit( base, size, address_space_limit )) address_space_limit = (char *)base + size;
2367 if (size < alloc->size) return 0;
2368 if (is_win64 && base < (void *)0x80000000) return 0;
2369 alloc->base = anon_mmap_fixed( (char *)base + size - alloc->size, alloc->size, PROT_READ|PROT_WRITE, 0 );
2370 return (alloc->base != MAP_FAILED);
2373 /***********************************************************************
2374 * virtual_init
2376 void virtual_init(void)
2378 const struct preload_info **preload_info = dlsym( RTLD_DEFAULT, "wine_main_preload_info" );
2379 const char *preload = getenv( "WINEPRELOADRESERVE" );
2380 struct alloc_virtual_heap alloc_views;
2381 size_t size;
2382 int i;
2383 pthread_mutexattr_t attr;
2385 pthread_mutexattr_init( &attr );
2386 pthread_mutexattr_settype( &attr, PTHREAD_MUTEX_RECURSIVE );
2387 pthread_mutex_init( &virtual_mutex, &attr );
2388 pthread_mutexattr_destroy( &attr );
2390 if (preload_info && *preload_info)
2391 for (i = 0; (*preload_info)[i].size; i++)
2392 mmap_add_reserved_area( (*preload_info)[i].addr, (*preload_info)[i].size );
2394 mmap_init( preload_info ? *preload_info : NULL );
2396 if ((preload = getenv("WINEPRELOADRESERVE")))
2398 unsigned long start, end;
2399 if (sscanf( preload, "%lx-%lx", &start, &end ) == 2)
2401 preload_reserve_start = (void *)start;
2402 preload_reserve_end = (void *)end;
2403 /* some apps start inside the DOS area */
2404 if (preload_reserve_start)
2405 address_space_start = min( address_space_start, preload_reserve_start );
2409 /* try to find space in a reserved area for the views and pages protection table */
2410 #ifdef _WIN64
2411 pages_vprot_size = ((size_t)address_space_limit >> page_shift >> pages_vprot_shift) + 1;
2412 alloc_views.size = 2 * view_block_size + pages_vprot_size * sizeof(*pages_vprot);
2413 #else
2414 alloc_views.size = 2 * view_block_size + (1U << (32 - page_shift));
2415 #endif
2416 if (mmap_enum_reserved_areas( alloc_virtual_heap, &alloc_views, 1 ))
2417 mmap_remove_reserved_area( alloc_views.base, alloc_views.size );
2418 else
2419 alloc_views.base = anon_mmap_alloc( alloc_views.size, PROT_READ | PROT_WRITE );
2421 assert( alloc_views.base != MAP_FAILED );
2422 view_block_start = alloc_views.base;
2423 view_block_end = view_block_start + view_block_size / sizeof(*view_block_start);
2424 free_ranges = (void *)((char *)alloc_views.base + view_block_size);
2425 pages_vprot = (void *)((char *)alloc_views.base + 2 * view_block_size);
2426 wine_rb_init( &views_tree, compare_view );
2428 free_ranges[0].base = (void *)0;
2429 free_ranges[0].end = (void *)~0;
2430 free_ranges_end = free_ranges + 1;
2432 /* make the DOS area accessible (except the low 64K) to hide bugs in broken apps like Excel 2003 */
2433 size = (char *)address_space_start - (char *)0x10000;
2434 if (size && mmap_is_in_reserved_area( (void*)0x10000, size ) == 1)
2435 anon_mmap_fixed( (void *)0x10000, size, PROT_READ | PROT_WRITE, 0 );
2439 /***********************************************************************
2440 * get_system_affinity_mask
2442 ULONG_PTR get_system_affinity_mask(void)
2444 ULONG num_cpus = NtCurrentTeb()->Peb->NumberOfProcessors;
2445 if (num_cpus >= sizeof(ULONG_PTR) * 8) return ~(ULONG_PTR)0;
2446 return ((ULONG_PTR)1 << num_cpus) - 1;
2449 /***********************************************************************
2450 * virtual_get_system_info
2452 void virtual_get_system_info( SYSTEM_BASIC_INFORMATION *info )
2454 #if defined(HAVE_STRUCT_SYSINFO_TOTALRAM) && defined(HAVE_STRUCT_SYSINFO_MEM_UNIT)
2455 struct sysinfo sinfo;
2457 if (!sysinfo(&sinfo))
2459 ULONG64 total = (ULONG64)sinfo.totalram * sinfo.mem_unit;
2460 info->MmHighestPhysicalPage = max(1, total / page_size);
2462 #elif defined(_SC_PHYS_PAGES)
2463 LONG64 phys_pages = sysconf( _SC_PHYS_PAGES );
2465 info->MmHighestPhysicalPage = max(1, phys_pages);
2466 #else
2467 info->MmHighestPhysicalPage = 0x7fffffff / page_size;
2468 #endif
2470 info->unknown = 0;
2471 info->KeMaximumIncrement = 0; /* FIXME */
2472 info->PageSize = page_size;
2473 info->MmLowestPhysicalPage = 1;
2474 info->MmNumberOfPhysicalPages = info->MmHighestPhysicalPage - info->MmLowestPhysicalPage;
2475 info->AllocationGranularity = granularity_mask + 1;
2476 info->LowestUserAddress = (void *)0x10000;
2477 info->HighestUserAddress = (char *)user_space_limit - 1;
2478 info->ActiveProcessorsAffinityMask = get_system_affinity_mask();
2479 info->NumberOfProcessors = NtCurrentTeb()->Peb->NumberOfProcessors;
2483 /***********************************************************************
2484 * virtual_create_builtin_view
2486 NTSTATUS virtual_create_builtin_view( void *module, pe_image_info_t *info )
2488 NTSTATUS status;
2489 sigset_t sigset;
2490 IMAGE_DOS_HEADER *dos = module;
2491 IMAGE_NT_HEADERS *nt = (IMAGE_NT_HEADERS *)((char *)dos + dos->e_lfanew);
2492 SIZE_T size = info->map_size;
2493 IMAGE_SECTION_HEADER *sec;
2494 struct file_view *view;
2495 void *base = wine_server_get_ptr( info->base );
2496 int i;
2498 server_enter_uninterrupted_section( &virtual_mutex, &sigset );
2499 status = create_view( &view, base, size, SEC_IMAGE | SEC_FILE | VPROT_SYSTEM |
2500 VPROT_COMMITTED | VPROT_READ | VPROT_WRITECOPY | VPROT_EXEC );
2501 if (!status)
2503 TRACE( "created %p-%p\n", base, (char *)base + size );
2505 /* The PE header is always read-only, no write, no execute. */
2506 set_page_vprot( base, page_size, VPROT_COMMITTED | VPROT_READ );
2508 sec = (IMAGE_SECTION_HEADER *)((char *)&nt->OptionalHeader + nt->FileHeader.SizeOfOptionalHeader);
2509 for (i = 0; i < nt->FileHeader.NumberOfSections; i++)
2511 BYTE flags = VPROT_COMMITTED;
2513 if (sec[i].Characteristics & IMAGE_SCN_MEM_EXECUTE) flags |= VPROT_EXEC;
2514 if (sec[i].Characteristics & IMAGE_SCN_MEM_READ) flags |= VPROT_READ;
2515 if (sec[i].Characteristics & IMAGE_SCN_MEM_WRITE) flags |= VPROT_WRITE;
2516 set_page_vprot( (char *)base + sec[i].VirtualAddress, sec[i].Misc.VirtualSize, flags );
2519 SERVER_START_REQ( map_view )
2521 req->base = wine_server_client_ptr( view->base );
2522 req->size = size;
2523 wine_server_add_data( req, info, sizeof(*info) );
2524 status = wine_server_call( req );
2526 SERVER_END_REQ;
2528 if (status >= 0)
2530 VIRTUAL_DEBUG_DUMP_VIEW( view );
2531 if (is_beyond_limit( base, size, working_set_limit )) working_set_limit = address_space_limit;
2533 else delete_view( view );
2535 server_leave_uninterrupted_section( &virtual_mutex, &sigset );
2537 return status;
2541 /* set some initial values in a new TEB */
2542 static void init_teb( TEB *teb, PEB *peb )
2544 struct ntdll_thread_data *thread_data = (struct ntdll_thread_data *)&teb->GdiTebBatch;
2546 #ifndef _WIN64
2547 TEB64 *teb64 = (TEB64 *)((char *)teb - teb_offset);
2549 teb64->Peb = PtrToUlong( (char *)peb + page_size );
2550 teb64->Tib.Self = PtrToUlong( teb64 );
2551 teb64->Tib.ExceptionList = PtrToUlong( teb );
2552 teb64->ActivationContextStackPointer = PtrToUlong( &teb64->ActivationContextStack );
2553 teb64->ActivationContextStack.FrameListCache.Flink =
2554 teb64->ActivationContextStack.FrameListCache.Blink =
2555 PtrToUlong( &teb64->ActivationContextStack.FrameListCache );
2556 teb64->StaticUnicodeString.Buffer = PtrToUlong( teb64->StaticUnicodeBuffer );
2557 teb64->StaticUnicodeString.MaximumLength = sizeof( teb64->StaticUnicodeBuffer );
2558 teb->WOW32Reserved = __wine_syscall_dispatcher;
2559 #endif
2560 teb->Peb = peb;
2561 teb->Tib.Self = &teb->Tib;
2562 teb->Tib.ExceptionList = (void *)~0ul;
2563 teb->Tib.StackBase = (void *)~0ul;
2564 teb->ActivationContextStackPointer = &teb->ActivationContextStack;
2565 InitializeListHead( &teb->ActivationContextStack.FrameListCache );
2566 teb->StaticUnicodeString.Buffer = teb->StaticUnicodeBuffer;
2567 teb->StaticUnicodeString.MaximumLength = sizeof(teb->StaticUnicodeBuffer);
2568 thread_data->request_fd = -1;
2569 thread_data->reply_fd = -1;
2570 thread_data->wait_fd[0] = -1;
2571 thread_data->wait_fd[1] = -1;
2572 list_add_head( &teb_list, &thread_data->entry );
2576 /***********************************************************************
2577 * virtual_alloc_first_teb
2579 TEB *virtual_alloc_first_teb(void)
2581 TEB *teb;
2582 PEB *peb;
2583 void *ptr;
2584 NTSTATUS status;
2585 SIZE_T data_size = page_size;
2586 SIZE_T peb_size = page_size * (is_win64 ? 1 : 2);
2587 SIZE_T block_size = signal_stack_mask + 1;
2588 SIZE_T total = 32 * block_size;
2590 /* reserve space for shared user data */
2591 status = NtAllocateVirtualMemory( NtCurrentProcess(), (void **)&user_shared_data, 0, &data_size,
2592 MEM_RESERVE | MEM_COMMIT, PAGE_READONLY );
2593 if (status)
2595 ERR( "wine: failed to map the shared user data: %08x\n", status );
2596 exit(1);
2599 #ifdef __x86_64__ /* sneak in a syscall dispatcher pointer at a fixed address (7ffe1000) */
2600 ptr = (char *)user_shared_data + page_size;
2601 anon_mmap_fixed( ptr, page_size, PROT_READ | PROT_WRITE, 0 );
2602 *(void **)ptr = __wine_syscall_dispatcher;
2603 #endif
2605 NtAllocateVirtualMemory( NtCurrentProcess(), &teb_block, 0, &total,
2606 MEM_RESERVE | MEM_TOP_DOWN, PAGE_READWRITE );
2607 teb_block_pos = 30;
2608 ptr = ((char *)teb_block + 30 * block_size);
2609 teb = (TEB *)((char *)ptr + teb_offset);
2610 peb = (PEB *)((char *)teb_block + 32 * block_size - peb_size);
2611 NtAllocateVirtualMemory( NtCurrentProcess(), (void **)&ptr, 0, &block_size, MEM_COMMIT, PAGE_READWRITE );
2612 NtAllocateVirtualMemory( NtCurrentProcess(), (void **)&peb, 0, &peb_size, MEM_COMMIT, PAGE_READWRITE );
2613 init_teb( teb, peb );
2614 *(ULONG_PTR *)&peb->CloudFileFlags = get_image_address();
2615 return teb;
2619 /***********************************************************************
2620 * virtual_alloc_teb
2622 NTSTATUS virtual_alloc_teb( TEB **ret_teb )
2624 sigset_t sigset;
2625 TEB *teb;
2626 void *ptr = NULL;
2627 NTSTATUS status = STATUS_SUCCESS;
2628 SIZE_T block_size = signal_stack_mask + 1;
2630 server_enter_uninterrupted_section( &virtual_mutex, &sigset );
2631 if (next_free_teb)
2633 ptr = next_free_teb;
2634 next_free_teb = *(void **)ptr;
2635 memset( ptr, 0, teb_size );
2637 else
2639 if (!teb_block_pos)
2641 SIZE_T total = 32 * block_size;
2643 if ((status = NtAllocateVirtualMemory( NtCurrentProcess(), &ptr, 0, &total,
2644 MEM_RESERVE, PAGE_READWRITE )))
2646 server_leave_uninterrupted_section( &virtual_mutex, &sigset );
2647 return status;
2649 teb_block = ptr;
2650 teb_block_pos = 32;
2652 ptr = ((char *)teb_block + --teb_block_pos * block_size);
2653 NtAllocateVirtualMemory( NtCurrentProcess(), (void **)&ptr, 0, &block_size,
2654 MEM_COMMIT, PAGE_READWRITE );
2656 *ret_teb = teb = (TEB *)((char *)ptr + teb_offset);
2657 init_teb( teb, NtCurrentTeb()->Peb );
2658 server_leave_uninterrupted_section( &virtual_mutex, &sigset );
2660 if ((status = signal_alloc_thread( teb )))
2662 server_enter_uninterrupted_section( &virtual_mutex, &sigset );
2663 *(void **)ptr = next_free_teb;
2664 next_free_teb = ptr;
2665 server_leave_uninterrupted_section( &virtual_mutex, &sigset );
2667 return status;
2671 /***********************************************************************
2672 * virtual_free_teb
2674 void virtual_free_teb( TEB *teb )
2676 struct ntdll_thread_data *thread_data = (struct ntdll_thread_data *)&teb->GdiTebBatch;
2677 void *ptr;
2678 SIZE_T size;
2679 sigset_t sigset;
2681 signal_free_thread( teb );
2682 if (teb->DeallocationStack)
2684 size = 0;
2685 NtFreeVirtualMemory( GetCurrentProcess(), &teb->DeallocationStack, &size, MEM_RELEASE );
2687 if (thread_data->start_stack)
2689 size = 0;
2690 NtFreeVirtualMemory( GetCurrentProcess(), &thread_data->start_stack, &size, MEM_RELEASE );
2693 server_enter_uninterrupted_section( &virtual_mutex, &sigset );
2694 list_remove( &thread_data->entry );
2695 ptr = (char *)teb - teb_offset;
2696 *(void **)ptr = next_free_teb;
2697 next_free_teb = ptr;
2698 server_leave_uninterrupted_section( &virtual_mutex, &sigset );
2702 /***********************************************************************
2703 * virtual_clear_tls_index
2705 NTSTATUS virtual_clear_tls_index( ULONG index )
2707 struct ntdll_thread_data *thread_data;
2708 sigset_t sigset;
2710 if (index < TLS_MINIMUM_AVAILABLE)
2712 server_enter_uninterrupted_section( &virtual_mutex, &sigset );
2713 LIST_FOR_EACH_ENTRY( thread_data, &teb_list, struct ntdll_thread_data, entry )
2715 TEB *teb = CONTAINING_RECORD( thread_data, TEB, GdiTebBatch );
2716 teb->TlsSlots[index] = 0;
2718 server_leave_uninterrupted_section( &virtual_mutex, &sigset );
2720 else
2722 index -= TLS_MINIMUM_AVAILABLE;
2723 if (index >= 8 * sizeof(NtCurrentTeb()->Peb->TlsExpansionBitmapBits))
2724 return STATUS_INVALID_PARAMETER;
2726 server_enter_uninterrupted_section( &virtual_mutex, &sigset );
2727 LIST_FOR_EACH_ENTRY( thread_data, &teb_list, struct ntdll_thread_data, entry )
2729 TEB *teb = CONTAINING_RECORD( thread_data, TEB, GdiTebBatch );
2730 if (teb->TlsExpansionSlots) teb->TlsExpansionSlots[index] = 0;
2732 server_leave_uninterrupted_section( &virtual_mutex, &sigset );
2734 return STATUS_SUCCESS;
2738 /***********************************************************************
2739 * virtual_alloc_thread_stack
2741 NTSTATUS virtual_alloc_thread_stack( INITIAL_TEB *stack, SIZE_T reserve_size, SIZE_T commit_size,
2742 SIZE_T *pthread_size )
2744 struct file_view *view;
2745 NTSTATUS status;
2746 sigset_t sigset;
2747 SIZE_T size, extra_size = 0;
2749 if (!reserve_size || !commit_size)
2751 IMAGE_NT_HEADERS *nt = get_exe_nt_header();
2752 if (!reserve_size) reserve_size = nt->OptionalHeader.SizeOfStackReserve;
2753 if (!commit_size) commit_size = nt->OptionalHeader.SizeOfStackCommit;
2756 size = max( reserve_size, commit_size );
2757 if (size < 1024 * 1024) size = 1024 * 1024; /* Xlib needs a large stack */
2758 size = (size + 0xffff) & ~0xffff; /* round to 64K boundary */
2759 if (pthread_size) *pthread_size = extra_size = max( page_size, ROUND_SIZE( 0, *pthread_size ));
2761 server_enter_uninterrupted_section( &virtual_mutex, &sigset );
2763 if ((status = map_view( &view, NULL, size + extra_size, FALSE,
2764 VPROT_READ | VPROT_WRITE | VPROT_COMMITTED, 0 )) != STATUS_SUCCESS)
2765 goto done;
2767 #ifdef VALGRIND_STACK_REGISTER
2768 VALGRIND_STACK_REGISTER( view->base, (char *)view->base + view->size );
2769 #endif
2771 /* setup no access guard page */
2772 set_page_vprot( view->base, page_size, VPROT_COMMITTED );
2773 set_page_vprot( (char *)view->base + page_size, page_size,
2774 VPROT_READ | VPROT_WRITE | VPROT_COMMITTED | VPROT_GUARD );
2775 mprotect_range( view->base, 2 * page_size, 0, 0 );
2776 VIRTUAL_DEBUG_DUMP_VIEW( view );
2778 if (extra_size)
2780 struct file_view *extra_view;
2782 /* shrink the first view and create a second one for the extra size */
2783 /* this allows the app to free the stack without freeing the thread start portion */
2784 view->size -= extra_size;
2785 status = create_view( &extra_view, (char *)view->base + view->size, extra_size,
2786 VPROT_READ | VPROT_WRITE | VPROT_COMMITTED );
2787 if (status != STATUS_SUCCESS)
2789 view->size += extra_size;
2790 delete_view( view );
2791 goto done;
2795 /* note: limit is lower than base since the stack grows down */
2796 stack->OldStackBase = 0;
2797 stack->OldStackLimit = 0;
2798 stack->DeallocationStack = view->base;
2799 stack->StackBase = (char *)view->base + view->size;
2800 stack->StackLimit = (char *)view->base + 2 * page_size;
2801 done:
2802 server_leave_uninterrupted_section( &virtual_mutex, &sigset );
2803 return status;
2807 /***********************************************************************
2808 * virtual_map_user_shared_data
2810 void virtual_map_user_shared_data(void)
2812 static const WCHAR nameW[] = {'\\','K','e','r','n','e','l','O','b','j','e','c','t','s',
2813 '\\','_','_','w','i','n','e','_','u','s','e','r','_','s','h','a','r','e','d','_','d','a','t','a',0};
2814 UNICODE_STRING name_str = { sizeof(nameW) - sizeof(WCHAR), sizeof(nameW), (WCHAR *)nameW };
2815 OBJECT_ATTRIBUTES attr = { sizeof(attr), 0, &name_str };
2816 NTSTATUS status;
2817 HANDLE section;
2818 int res, fd, needs_close;
2820 if ((status = NtOpenSection( &section, SECTION_ALL_ACCESS, &attr )))
2822 ERR( "failed to open the USD section: %08x\n", status );
2823 exit(1);
2825 if ((res = server_get_unix_fd( section, 0, &fd, &needs_close, NULL, NULL )) ||
2826 (user_shared_data != mmap( user_shared_data, page_size, PROT_READ, MAP_SHARED|MAP_FIXED, fd, 0 )))
2828 ERR( "failed to remap the process USD: %d\n", res );
2829 exit(1);
2831 if (needs_close) close( fd );
2832 NtClose( section );
2836 /***********************************************************************
2837 * grow_thread_stack
2839 static NTSTATUS grow_thread_stack( char *page )
2841 NTSTATUS ret = 0;
2842 size_t guaranteed = max( NtCurrentTeb()->GuaranteedStackBytes, page_size * (is_win64 ? 2 : 1) );
2844 set_page_vprot_bits( page, page_size, 0, VPROT_GUARD );
2845 mprotect_range( page, page_size, 0, 0 );
2846 if (page >= (char *)NtCurrentTeb()->DeallocationStack + page_size + guaranteed)
2848 set_page_vprot_bits( page - page_size, page_size, VPROT_COMMITTED | VPROT_GUARD, 0 );
2849 mprotect_range( page - page_size, page_size, 0, 0 );
2851 else /* inside guaranteed space -> overflow exception */
2853 page = (char *)NtCurrentTeb()->DeallocationStack + page_size;
2854 set_page_vprot_bits( page, guaranteed, VPROT_COMMITTED, VPROT_GUARD );
2855 mprotect_range( page, guaranteed, 0, 0 );
2856 ret = STATUS_STACK_OVERFLOW;
2858 NtCurrentTeb()->Tib.StackLimit = page;
2859 return ret;
2863 /***********************************************************************
2864 * virtual_handle_fault
2866 NTSTATUS virtual_handle_fault( void *addr, DWORD err, void *stack )
2868 NTSTATUS ret = STATUS_ACCESS_VIOLATION;
2869 char *page = ROUND_ADDR( addr, page_mask );
2870 BYTE vprot;
2872 mutex_lock( &virtual_mutex ); /* no need for signal masking inside signal handler */
2873 vprot = get_page_vprot( page );
2874 if (!is_inside_signal_stack( stack ) && (vprot & VPROT_GUARD))
2876 if (page < (char *)NtCurrentTeb()->DeallocationStack ||
2877 page >= (char *)NtCurrentTeb()->Tib.StackBase)
2879 set_page_vprot_bits( page, page_size, 0, VPROT_GUARD );
2880 mprotect_range( page, page_size, 0, 0 );
2881 ret = STATUS_GUARD_PAGE_VIOLATION;
2883 else ret = grow_thread_stack( page );
2885 else if (err & EXCEPTION_WRITE_FAULT)
2887 if (vprot & VPROT_WRITEWATCH)
2889 set_page_vprot_bits( page, page_size, 0, VPROT_WRITEWATCH );
2890 mprotect_range( page, page_size, 0, 0 );
2892 /* ignore fault if page is writable now */
2893 if (get_unix_prot( get_page_vprot( page )) & PROT_WRITE)
2895 if ((vprot & VPROT_WRITEWATCH) || is_write_watch_range( page, page_size ))
2896 ret = STATUS_SUCCESS;
2899 mutex_unlock( &virtual_mutex );
2900 return ret;
2904 /***********************************************************************
2905 * virtual_setup_exception
2907 void *virtual_setup_exception( void *stack_ptr, size_t size, EXCEPTION_RECORD *rec )
2909 char *stack = stack_ptr;
2911 if (is_inside_signal_stack( stack ))
2913 ERR( "nested exception on signal stack in thread %04x addr %p stack %p (%p-%p-%p)\n",
2914 GetCurrentThreadId(), rec->ExceptionAddress, stack, NtCurrentTeb()->DeallocationStack,
2915 NtCurrentTeb()->Tib.StackLimit, NtCurrentTeb()->Tib.StackBase );
2916 abort_thread(1);
2919 if (stack - size > stack || /* check for overflow in subtraction */
2920 stack <= (char *)NtCurrentTeb()->DeallocationStack ||
2921 stack > (char *)NtCurrentTeb()->Tib.StackBase)
2923 WARN( "exception outside of stack limits in thread %04x addr %p stack %p (%p-%p-%p)\n",
2924 GetCurrentThreadId(), rec->ExceptionAddress, stack, NtCurrentTeb()->DeallocationStack,
2925 NtCurrentTeb()->Tib.StackLimit, NtCurrentTeb()->Tib.StackBase );
2926 return stack - size;
2929 stack -= size;
2931 if (stack < (char *)NtCurrentTeb()->DeallocationStack + 4096)
2933 /* stack overflow on last page, unrecoverable */
2934 UINT diff = (char *)NtCurrentTeb()->DeallocationStack + 4096 - stack;
2935 ERR( "stack overflow %u bytes in thread %04x addr %p stack %p (%p-%p-%p)\n",
2936 diff, GetCurrentThreadId(), rec->ExceptionAddress, stack, NtCurrentTeb()->DeallocationStack,
2937 NtCurrentTeb()->Tib.StackLimit, NtCurrentTeb()->Tib.StackBase );
2938 abort_thread(1);
2940 else if (stack < (char *)NtCurrentTeb()->Tib.StackLimit)
2942 mutex_lock( &virtual_mutex ); /* no need for signal masking inside signal handler */
2943 if ((get_page_vprot( stack ) & VPROT_GUARD) && grow_thread_stack( ROUND_ADDR( stack, page_mask )))
2945 rec->ExceptionCode = STATUS_STACK_OVERFLOW;
2946 rec->NumberParameters = 0;
2948 mutex_unlock( &virtual_mutex );
2950 #if defined(VALGRIND_MAKE_MEM_UNDEFINED)
2951 VALGRIND_MAKE_MEM_UNDEFINED( stack, size );
2952 #elif defined(VALGRIND_MAKE_WRITABLE)
2953 VALGRIND_MAKE_WRITABLE( stack, size );
2954 #endif
2955 return stack;
2959 /***********************************************************************
2960 * check_write_access
2962 * Check if the memory range is writable, temporarily disabling write watches if necessary.
2964 static NTSTATUS check_write_access( void *base, size_t size, BOOL *has_write_watch )
2966 size_t i;
2967 char *addr = ROUND_ADDR( base, page_mask );
2969 size = ROUND_SIZE( base, size );
2970 for (i = 0; i < size; i += page_size)
2972 BYTE vprot = get_page_vprot( addr + i );
2973 if (vprot & VPROT_WRITEWATCH) *has_write_watch = TRUE;
2974 if (!(get_unix_prot( vprot & ~VPROT_WRITEWATCH ) & PROT_WRITE))
2975 return STATUS_INVALID_USER_BUFFER;
2977 if (*has_write_watch)
2978 mprotect_range( addr, size, 0, VPROT_WRITEWATCH ); /* temporarily enable write access */
2979 return STATUS_SUCCESS;
2983 /***********************************************************************
2984 * virtual_locked_server_call
2986 unsigned int virtual_locked_server_call( void *req_ptr )
2988 struct __server_request_info * const req = req_ptr;
2989 sigset_t sigset;
2990 void *addr = req->reply_data;
2991 data_size_t size = req->u.req.request_header.reply_size;
2992 BOOL has_write_watch = FALSE;
2993 unsigned int ret = STATUS_ACCESS_VIOLATION;
2995 if (!size) return wine_server_call( req_ptr );
2997 server_enter_uninterrupted_section( &virtual_mutex, &sigset );
2998 if (!(ret = check_write_access( addr, size, &has_write_watch )))
3000 ret = server_call_unlocked( req );
3001 if (has_write_watch) update_write_watches( addr, size, wine_server_reply_size( req ));
3003 else memset( &req->u.reply, 0, sizeof(req->u.reply) );
3004 server_leave_uninterrupted_section( &virtual_mutex, &sigset );
3005 return ret;
3009 /***********************************************************************
3010 * virtual_locked_read
3012 ssize_t virtual_locked_read( int fd, void *addr, size_t size )
3014 sigset_t sigset;
3015 BOOL has_write_watch = FALSE;
3016 int err = EFAULT;
3018 ssize_t ret = read( fd, addr, size );
3019 if (ret != -1 || errno != EFAULT) return ret;
3021 server_enter_uninterrupted_section( &virtual_mutex, &sigset );
3022 if (!check_write_access( addr, size, &has_write_watch ))
3024 ret = read( fd, addr, size );
3025 err = errno;
3026 if (has_write_watch) update_write_watches( addr, size, max( 0, ret ));
3028 server_leave_uninterrupted_section( &virtual_mutex, &sigset );
3029 errno = err;
3030 return ret;
3034 /***********************************************************************
3035 * virtual_locked_pread
3037 ssize_t virtual_locked_pread( int fd, void *addr, size_t size, off_t offset )
3039 sigset_t sigset;
3040 BOOL has_write_watch = FALSE;
3041 int err = EFAULT;
3043 ssize_t ret = pread( fd, addr, size, offset );
3044 if (ret != -1 || errno != EFAULT) return ret;
3046 server_enter_uninterrupted_section( &virtual_mutex, &sigset );
3047 if (!check_write_access( addr, size, &has_write_watch ))
3049 ret = pread( fd, addr, size, offset );
3050 err = errno;
3051 if (has_write_watch) update_write_watches( addr, size, max( 0, ret ));
3053 server_leave_uninterrupted_section( &virtual_mutex, &sigset );
3054 errno = err;
3055 return ret;
3059 /***********************************************************************
3060 * __wine_locked_recvmsg (NTDLL.@)
3062 ssize_t CDECL __wine_locked_recvmsg( int fd, struct msghdr *hdr, int flags )
3064 sigset_t sigset;
3065 size_t i;
3066 BOOL has_write_watch = FALSE;
3067 int err = EFAULT;
3069 ssize_t ret = recvmsg( fd, hdr, flags );
3070 if (ret != -1 || errno != EFAULT) return ret;
3072 server_enter_uninterrupted_section( &virtual_mutex, &sigset );
3073 for (i = 0; i < hdr->msg_iovlen; i++)
3074 if (check_write_access( hdr->msg_iov[i].iov_base, hdr->msg_iov[i].iov_len, &has_write_watch ))
3075 break;
3076 if (i == hdr->msg_iovlen)
3078 ret = recvmsg( fd, hdr, flags );
3079 err = errno;
3081 if (has_write_watch)
3082 while (i--) update_write_watches( hdr->msg_iov[i].iov_base, hdr->msg_iov[i].iov_len, 0 );
3084 server_leave_uninterrupted_section( &virtual_mutex, &sigset );
3085 errno = err;
3086 return ret;
3090 /***********************************************************************
3091 * virtual_is_valid_code_address
3093 BOOL virtual_is_valid_code_address( const void *addr, SIZE_T size )
3095 struct file_view *view;
3096 BOOL ret = FALSE;
3097 sigset_t sigset;
3099 server_enter_uninterrupted_section( &virtual_mutex, &sigset );
3100 if ((view = find_view( addr, size )))
3101 ret = !(view->protect & VPROT_SYSTEM); /* system views are not visible to the app */
3102 server_leave_uninterrupted_section( &virtual_mutex, &sigset );
3103 return ret;
3107 /***********************************************************************
3108 * virtual_check_buffer_for_read
3110 * Check if a memory buffer can be read, triggering page faults if needed for DIB section access.
3112 BOOL virtual_check_buffer_for_read( const void *ptr, SIZE_T size )
3114 if (!size) return TRUE;
3115 if (!ptr) return FALSE;
3117 __TRY
3119 volatile const char *p = ptr;
3120 char dummy __attribute__((unused));
3121 SIZE_T count = size;
3123 while (count > page_size)
3125 dummy = *p;
3126 p += page_size;
3127 count -= page_size;
3129 dummy = p[0];
3130 dummy = p[count - 1];
3132 __EXCEPT_SYSCALL
3134 return FALSE;
3136 __ENDTRY
3137 return TRUE;
3141 /***********************************************************************
3142 * virtual_check_buffer_for_write
3144 * Check if a memory buffer can be written to, triggering page faults if needed for write watches.
3146 BOOL virtual_check_buffer_for_write( void *ptr, SIZE_T size )
3148 if (!size) return TRUE;
3149 if (!ptr) return FALSE;
3151 __TRY
3153 volatile char *p = ptr;
3154 SIZE_T count = size;
3156 while (count > page_size)
3158 *p |= 0;
3159 p += page_size;
3160 count -= page_size;
3162 p[0] |= 0;
3163 p[count - 1] |= 0;
3165 __EXCEPT_SYSCALL
3167 return FALSE;
3169 __ENDTRY
3170 return TRUE;
3174 /***********************************************************************
3175 * virtual_uninterrupted_read_memory
3177 * Similar to NtReadVirtualMemory, but without wineserver calls. Moreover
3178 * permissions are checked before accessing each page, to ensure that no
3179 * exceptions can happen.
3181 SIZE_T virtual_uninterrupted_read_memory( const void *addr, void *buffer, SIZE_T size )
3183 struct file_view *view;
3184 sigset_t sigset;
3185 SIZE_T bytes_read = 0;
3187 if (!size) return 0;
3189 server_enter_uninterrupted_section( &virtual_mutex, &sigset );
3190 if ((view = find_view( addr, size )))
3192 if (!(view->protect & VPROT_SYSTEM))
3194 while (bytes_read < size && (get_unix_prot( get_page_vprot( addr )) & PROT_READ))
3196 SIZE_T block_size = min( size - bytes_read, page_size - ((UINT_PTR)addr & page_mask) );
3197 memcpy( buffer, addr, block_size );
3199 addr = (const void *)((const char *)addr + block_size);
3200 buffer = (void *)((char *)buffer + block_size);
3201 bytes_read += block_size;
3205 server_leave_uninterrupted_section( &virtual_mutex, &sigset );
3206 return bytes_read;
3210 /***********************************************************************
3211 * virtual_uninterrupted_write_memory
3213 * Similar to NtWriteVirtualMemory, but without wineserver calls. Moreover
3214 * permissions are checked before accessing each page, to ensure that no
3215 * exceptions can happen.
3217 NTSTATUS virtual_uninterrupted_write_memory( void *addr, const void *buffer, SIZE_T size )
3219 BOOL has_write_watch = FALSE;
3220 sigset_t sigset;
3221 NTSTATUS ret;
3223 if (!size) return STATUS_SUCCESS;
3225 server_enter_uninterrupted_section( &virtual_mutex, &sigset );
3226 if (!(ret = check_write_access( addr, size, &has_write_watch )))
3228 memcpy( addr, buffer, size );
3229 if (has_write_watch) update_write_watches( addr, size, size );
3231 server_leave_uninterrupted_section( &virtual_mutex, &sigset );
3232 return ret;
3236 /***********************************************************************
3237 * virtual_set_force_exec
3239 * Whether to force exec prot on all views.
3241 void virtual_set_force_exec( BOOL enable )
3243 struct file_view *view;
3244 sigset_t sigset;
3246 server_enter_uninterrupted_section( &virtual_mutex, &sigset );
3247 if (!force_exec_prot != !enable) /* change all existing views */
3249 force_exec_prot = enable;
3251 WINE_RB_FOR_EACH_ENTRY( view, &views_tree, struct file_view, entry )
3253 /* file mappings are always accessible */
3254 BYTE commit = is_view_valloc( view ) ? 0 : VPROT_COMMITTED;
3256 mprotect_range( view->base, view->size, commit, 0 );
3259 server_leave_uninterrupted_section( &virtual_mutex, &sigset );
3262 struct free_range
3264 char *base;
3265 char *limit;
3268 /* free reserved areas above the limit; callback for mmap_enum_reserved_areas */
3269 static int CDECL free_reserved_memory( void *base, SIZE_T size, void *arg )
3271 struct free_range *range = arg;
3273 if ((char *)base >= range->limit) return 0;
3274 if ((char *)base + size <= range->base) return 0;
3275 if ((char *)base < range->base)
3277 size -= range->base - (char *)base;
3278 base = range->base;
3280 if ((char *)base + size > range->limit) size = range->limit - (char *)base;
3281 remove_reserved_area( base, size );
3282 return 1; /* stop enumeration since the list has changed */
3285 /***********************************************************************
3286 * virtual_release_address_space
3288 * Release some address space once we have loaded and initialized the app.
3290 void CDECL virtual_release_address_space(void)
3292 struct free_range range;
3293 sigset_t sigset;
3295 if (is_win64) return;
3297 server_enter_uninterrupted_section( &virtual_mutex, &sigset );
3299 range.base = (char *)0x82000000;
3300 range.limit = user_space_limit;
3302 if (range.limit > range.base)
3304 while (mmap_enum_reserved_areas( free_reserved_memory, &range, 1 )) /* nothing */;
3305 #ifdef __APPLE__
3306 /* On macOS, we still want to free some of low memory, for OpenGL resources */
3307 range.base = (char *)0x40000000;
3308 #else
3309 range.base = NULL;
3310 #endif
3312 else
3313 range.base = (char *)0x20000000;
3315 if (range.base)
3317 range.limit = (char *)0x7f000000;
3318 while (mmap_enum_reserved_areas( free_reserved_memory, &range, 0 )) /* nothing */;
3321 server_leave_uninterrupted_section( &virtual_mutex, &sigset );
3325 /***********************************************************************
3326 * virtual_set_large_address_space
3328 * Enable use of a large address space when allowed by the application.
3330 void virtual_set_large_address_space(void)
3332 /* no large address space on win9x */
3333 if (NtCurrentTeb()->Peb->OSPlatformId != VER_PLATFORM_WIN32_NT) return;
3335 user_space_limit = working_set_limit = address_space_limit;
3339 /***********************************************************************
3340 * NtAllocateVirtualMemory (NTDLL.@)
3341 * ZwAllocateVirtualMemory (NTDLL.@)
3343 NTSTATUS WINAPI NtAllocateVirtualMemory( HANDLE process, PVOID *ret, ULONG_PTR zero_bits,
3344 SIZE_T *size_ptr, ULONG type, ULONG protect )
3346 void *base;
3347 unsigned int vprot;
3348 BOOL is_dos_memory = FALSE;
3349 struct file_view *view;
3350 sigset_t sigset;
3351 SIZE_T size = *size_ptr;
3352 NTSTATUS status = STATUS_SUCCESS;
3353 unsigned short zero_bits_64 = zero_bits_win_to_64( zero_bits );
3355 TRACE("%p %p %08lx %x %08x\n", process, *ret, size, type, protect );
3357 if (!size) return STATUS_INVALID_PARAMETER;
3358 if (zero_bits > 21 && zero_bits < 32) return STATUS_INVALID_PARAMETER_3;
3359 if (!is_win64 && !is_wow64 && zero_bits >= 32) return STATUS_INVALID_PARAMETER_3;
3361 if (process != NtCurrentProcess())
3363 apc_call_t call;
3364 apc_result_t result;
3366 memset( &call, 0, sizeof(call) );
3368 call.virtual_alloc.type = APC_VIRTUAL_ALLOC;
3369 call.virtual_alloc.addr = wine_server_client_ptr( *ret );
3370 call.virtual_alloc.size = *size_ptr;
3371 call.virtual_alloc.zero_bits = zero_bits;
3372 call.virtual_alloc.op_type = type;
3373 call.virtual_alloc.prot = protect;
3374 status = server_queue_process_apc( process, &call, &result );
3375 if (status != STATUS_SUCCESS) return status;
3377 if (result.virtual_alloc.status == STATUS_SUCCESS)
3379 *ret = wine_server_get_ptr( result.virtual_alloc.addr );
3380 *size_ptr = result.virtual_alloc.size;
3382 return result.virtual_alloc.status;
3385 /* Round parameters to a page boundary */
3387 if (is_beyond_limit( 0, size, working_set_limit )) return STATUS_WORKING_SET_LIMIT_RANGE;
3389 if (*ret)
3391 if (type & MEM_RESERVE) /* Round down to 64k boundary */
3392 base = ROUND_ADDR( *ret, granularity_mask );
3393 else
3394 base = ROUND_ADDR( *ret, page_mask );
3395 size = (((UINT_PTR)*ret + size + page_mask) & ~page_mask) - (UINT_PTR)base;
3397 /* disallow low 64k, wrap-around and kernel space */
3398 if (((char *)base < (char *)0x10000) ||
3399 ((char *)base + size < (char *)base) ||
3400 is_beyond_limit( base, size, address_space_limit ))
3402 /* address 1 is magic to mean DOS area */
3403 if (!base && *ret == (void *)1 && size == 0x110000) is_dos_memory = TRUE;
3404 else return STATUS_INVALID_PARAMETER;
3407 else
3409 base = NULL;
3410 size = (size + page_mask) & ~page_mask;
3413 /* Compute the alloc type flags */
3415 if (!(type & (MEM_COMMIT | MEM_RESERVE | MEM_RESET)) ||
3416 (type & ~(MEM_COMMIT | MEM_RESERVE | MEM_TOP_DOWN | MEM_WRITE_WATCH | MEM_RESET)))
3418 WARN("called with wrong alloc type flags (%08x) !\n", type);
3419 return STATUS_INVALID_PARAMETER;
3422 /* Reserve the memory */
3424 server_enter_uninterrupted_section( &virtual_mutex, &sigset );
3426 if ((type & MEM_RESERVE) || !base)
3428 if (!(status = get_vprot_flags( protect, &vprot, FALSE )))
3430 if (type & MEM_COMMIT) vprot |= VPROT_COMMITTED;
3431 if (type & MEM_WRITE_WATCH) vprot |= VPROT_WRITEWATCH;
3432 if (protect & PAGE_NOCACHE) vprot |= SEC_NOCACHE;
3434 if (vprot & VPROT_WRITECOPY) status = STATUS_INVALID_PAGE_PROTECTION;
3435 else if (is_dos_memory) status = allocate_dos_memory( &view, vprot );
3436 else status = map_view( &view, base, size, type & MEM_TOP_DOWN, vprot, zero_bits_64 );
3438 if (status == STATUS_SUCCESS) base = view->base;
3441 else if (type & MEM_RESET)
3443 if (!(view = find_view( base, size ))) status = STATUS_NOT_MAPPED_VIEW;
3444 else madvise( base, size, MADV_DONTNEED );
3446 else /* commit the pages */
3448 if (!(view = find_view( base, size ))) status = STATUS_NOT_MAPPED_VIEW;
3449 else if (view->protect & SEC_FILE) status = STATUS_ALREADY_COMMITTED;
3450 else if (!(status = set_protection( view, base, size, protect )) && (view->protect & SEC_RESERVE))
3452 SERVER_START_REQ( add_mapping_committed_range )
3454 req->base = wine_server_client_ptr( view->base );
3455 req->offset = (char *)base - (char *)view->base;
3456 req->size = size;
3457 wine_server_call( req );
3459 SERVER_END_REQ;
3463 if (!status) VIRTUAL_DEBUG_DUMP_VIEW( view );
3465 server_leave_uninterrupted_section( &virtual_mutex, &sigset );
3467 if (status == STATUS_SUCCESS)
3469 *ret = base;
3470 *size_ptr = size;
3472 return status;
3475 /***********************************************************************
3476 * NtAllocateVirtualMemoryEx (NTDLL.@)
3477 * ZwAllocateVirtualMemoryEx (NTDLL.@)
3479 NTSTATUS WINAPI NtAllocateVirtualMemoryEx( HANDLE process, PVOID *ret, SIZE_T *size_ptr, ULONG type,
3480 ULONG protect, MEM_EXTENDED_PARAMETER *parameters,
3481 ULONG count )
3483 if (count && !parameters) return STATUS_INVALID_PARAMETER;
3485 if (count) FIXME( "Ignoring %d extended parameters %p\n", count, parameters );
3487 return NtAllocateVirtualMemory( process, ret, 0, size_ptr, type, protect );
3491 /***********************************************************************
3492 * NtFreeVirtualMemory (NTDLL.@)
3493 * ZwFreeVirtualMemory (NTDLL.@)
3495 NTSTATUS WINAPI NtFreeVirtualMemory( HANDLE process, PVOID *addr_ptr, SIZE_T *size_ptr, ULONG type )
3497 struct file_view *view;
3498 char *base;
3499 sigset_t sigset;
3500 NTSTATUS status = STATUS_SUCCESS;
3501 LPVOID addr = *addr_ptr;
3502 SIZE_T size = *size_ptr;
3504 TRACE("%p %p %08lx %x\n", process, addr, size, type );
3506 if (process != NtCurrentProcess())
3508 apc_call_t call;
3509 apc_result_t result;
3511 memset( &call, 0, sizeof(call) );
3513 call.virtual_free.type = APC_VIRTUAL_FREE;
3514 call.virtual_free.addr = wine_server_client_ptr( addr );
3515 call.virtual_free.size = size;
3516 call.virtual_free.op_type = type;
3517 status = server_queue_process_apc( process, &call, &result );
3518 if (status != STATUS_SUCCESS) return status;
3520 if (result.virtual_free.status == STATUS_SUCCESS)
3522 *addr_ptr = wine_server_get_ptr( result.virtual_free.addr );
3523 *size_ptr = result.virtual_free.size;
3525 return result.virtual_free.status;
3528 /* Fix the parameters */
3530 size = ROUND_SIZE( addr, size );
3531 base = ROUND_ADDR( addr, page_mask );
3533 /* avoid freeing the DOS area when a broken app passes a NULL pointer */
3534 if (!base) return STATUS_INVALID_PARAMETER;
3536 server_enter_uninterrupted_section( &virtual_mutex, &sigset );
3538 if (!(view = find_view( base, size )) || !is_view_valloc( view ))
3540 status = STATUS_INVALID_PARAMETER;
3542 else if (type == MEM_RELEASE)
3544 /* Free the pages */
3546 if (size || (base != view->base)) status = STATUS_INVALID_PARAMETER;
3547 else
3549 delete_view( view );
3550 *addr_ptr = base;
3551 *size_ptr = size;
3554 else if (type == MEM_DECOMMIT)
3556 status = decommit_pages( view, base - (char *)view->base, size );
3557 if (status == STATUS_SUCCESS)
3559 *addr_ptr = base;
3560 *size_ptr = size;
3563 else
3565 WARN("called with wrong free type flags (%08x) !\n", type);
3566 status = STATUS_INVALID_PARAMETER;
3569 server_leave_uninterrupted_section( &virtual_mutex, &sigset );
3570 return status;
3574 /***********************************************************************
3575 * NtProtectVirtualMemory (NTDLL.@)
3576 * ZwProtectVirtualMemory (NTDLL.@)
3578 NTSTATUS WINAPI NtProtectVirtualMemory( HANDLE process, PVOID *addr_ptr, SIZE_T *size_ptr,
3579 ULONG new_prot, ULONG *old_prot )
3581 struct file_view *view;
3582 sigset_t sigset;
3583 NTSTATUS status = STATUS_SUCCESS;
3584 char *base;
3585 BYTE vprot;
3586 SIZE_T size = *size_ptr;
3587 LPVOID addr = *addr_ptr;
3588 DWORD old;
3590 TRACE("%p %p %08lx %08x\n", process, addr, size, new_prot );
3592 if (!old_prot)
3593 return STATUS_ACCESS_VIOLATION;
3595 if (process != NtCurrentProcess())
3597 apc_call_t call;
3598 apc_result_t result;
3600 memset( &call, 0, sizeof(call) );
3602 call.virtual_protect.type = APC_VIRTUAL_PROTECT;
3603 call.virtual_protect.addr = wine_server_client_ptr( addr );
3604 call.virtual_protect.size = size;
3605 call.virtual_protect.prot = new_prot;
3606 status = server_queue_process_apc( process, &call, &result );
3607 if (status != STATUS_SUCCESS) return status;
3609 if (result.virtual_protect.status == STATUS_SUCCESS)
3611 *addr_ptr = wine_server_get_ptr( result.virtual_protect.addr );
3612 *size_ptr = result.virtual_protect.size;
3613 *old_prot = result.virtual_protect.prot;
3615 return result.virtual_protect.status;
3618 /* Fix the parameters */
3620 size = ROUND_SIZE( addr, size );
3621 base = ROUND_ADDR( addr, page_mask );
3623 server_enter_uninterrupted_section( &virtual_mutex, &sigset );
3625 if ((view = find_view( base, size )))
3627 /* Make sure all the pages are committed */
3628 if (get_committed_size( view, base, &vprot ) >= size && (vprot & VPROT_COMMITTED))
3630 old = get_win32_prot( vprot, view->protect );
3631 status = set_protection( view, base, size, new_prot );
3633 else status = STATUS_NOT_COMMITTED;
3635 else status = STATUS_INVALID_PARAMETER;
3637 if (!status) VIRTUAL_DEBUG_DUMP_VIEW( view );
3639 server_leave_uninterrupted_section( &virtual_mutex, &sigset );
3641 if (status == STATUS_SUCCESS)
3643 *addr_ptr = base;
3644 *size_ptr = size;
3645 *old_prot = old;
3647 return status;
3651 /* retrieve state for a free memory area; callback for mmap_enum_reserved_areas */
3652 static int CDECL get_free_mem_state_callback( void *start, SIZE_T size, void *arg )
3654 MEMORY_BASIC_INFORMATION *info = arg;
3655 void *end = (char *)start + size;
3657 if ((char *)info->BaseAddress + info->RegionSize <= (char *)start) return 0;
3659 if (info->BaseAddress >= end)
3661 if (info->AllocationBase < end) info->AllocationBase = end;
3662 return 0;
3665 if (info->BaseAddress >= start || start <= address_space_start)
3667 /* it's a real free area */
3668 info->State = MEM_FREE;
3669 info->Protect = PAGE_NOACCESS;
3670 info->AllocationBase = 0;
3671 info->AllocationProtect = 0;
3672 info->Type = 0;
3673 if ((char *)info->BaseAddress + info->RegionSize > (char *)end)
3674 info->RegionSize = (char *)end - (char *)info->BaseAddress;
3676 else /* outside of the reserved area, pretend it's allocated */
3678 info->RegionSize = (char *)start - (char *)info->BaseAddress;
3679 info->State = MEM_RESERVE;
3680 info->Protect = PAGE_NOACCESS;
3681 info->AllocationProtect = PAGE_NOACCESS;
3682 info->Type = MEM_PRIVATE;
3684 return 1;
3687 /* get basic information about a memory block */
3688 static NTSTATUS get_basic_memory_info( HANDLE process, LPCVOID addr,
3689 MEMORY_BASIC_INFORMATION *info,
3690 SIZE_T len, SIZE_T *res_len )
3692 struct file_view *view;
3693 char *base, *alloc_base = 0, *alloc_end = working_set_limit;
3694 struct wine_rb_entry *ptr;
3695 sigset_t sigset;
3697 if (len < sizeof(MEMORY_BASIC_INFORMATION))
3698 return STATUS_INFO_LENGTH_MISMATCH;
3700 if (process != NtCurrentProcess())
3702 NTSTATUS status;
3703 apc_call_t call;
3704 apc_result_t result;
3706 memset( &call, 0, sizeof(call) );
3708 call.virtual_query.type = APC_VIRTUAL_QUERY;
3709 call.virtual_query.addr = wine_server_client_ptr( addr );
3710 status = server_queue_process_apc( process, &call, &result );
3711 if (status != STATUS_SUCCESS) return status;
3713 if (result.virtual_query.status == STATUS_SUCCESS)
3715 info->BaseAddress = wine_server_get_ptr( result.virtual_query.base );
3716 info->AllocationBase = wine_server_get_ptr( result.virtual_query.alloc_base );
3717 info->RegionSize = result.virtual_query.size;
3718 info->Protect = result.virtual_query.prot;
3719 info->AllocationProtect = result.virtual_query.alloc_prot;
3720 info->State = (DWORD)result.virtual_query.state << 12;
3721 info->Type = (DWORD)result.virtual_query.alloc_type << 16;
3722 if (info->RegionSize != result.virtual_query.size) /* truncated */
3723 return STATUS_INVALID_PARAMETER; /* FIXME */
3724 if (res_len) *res_len = sizeof(*info);
3726 return result.virtual_query.status;
3729 base = ROUND_ADDR( addr, page_mask );
3731 if (is_beyond_limit( base, 1, working_set_limit )) return STATUS_INVALID_PARAMETER;
3733 /* Find the view containing the address */
3735 server_enter_uninterrupted_section( &virtual_mutex, &sigset );
3736 ptr = views_tree.root;
3737 while (ptr)
3739 view = WINE_RB_ENTRY_VALUE( ptr, struct file_view, entry );
3740 if ((char *)view->base > base)
3742 alloc_end = view->base;
3743 ptr = ptr->left;
3745 else if ((char *)view->base + view->size <= base)
3747 alloc_base = (char *)view->base + view->size;
3748 ptr = ptr->right;
3750 else
3752 alloc_base = view->base;
3753 alloc_end = (char *)view->base + view->size;
3754 break;
3758 /* Fill the info structure */
3760 info->AllocationBase = alloc_base;
3761 info->BaseAddress = base;
3762 info->RegionSize = alloc_end - base;
3764 if (!ptr)
3766 if (!mmap_enum_reserved_areas( get_free_mem_state_callback, info, 0 ))
3768 /* not in a reserved area at all, pretend it's allocated */
3769 #ifdef __i386__
3770 if (base >= (char *)address_space_start)
3772 info->State = MEM_RESERVE;
3773 info->Protect = PAGE_NOACCESS;
3774 info->AllocationProtect = PAGE_NOACCESS;
3775 info->Type = MEM_PRIVATE;
3777 else
3778 #endif
3780 info->State = MEM_FREE;
3781 info->Protect = PAGE_NOACCESS;
3782 info->AllocationBase = 0;
3783 info->AllocationProtect = 0;
3784 info->Type = 0;
3788 else
3790 BYTE vprot;
3791 char *ptr;
3792 SIZE_T range_size = get_committed_size( view, base, &vprot );
3794 info->State = (vprot & VPROT_COMMITTED) ? MEM_COMMIT : MEM_RESERVE;
3795 info->Protect = (vprot & VPROT_COMMITTED) ? get_win32_prot( vprot, view->protect ) : 0;
3796 info->AllocationProtect = get_win32_prot( view->protect, view->protect );
3797 if (view->protect & SEC_IMAGE) info->Type = MEM_IMAGE;
3798 else if (view->protect & (SEC_FILE | SEC_RESERVE | SEC_COMMIT)) info->Type = MEM_MAPPED;
3799 else info->Type = MEM_PRIVATE;
3800 for (ptr = base; ptr < base + range_size; ptr += page_size)
3801 if ((get_page_vprot( ptr ) ^ vprot) & ~VPROT_WRITEWATCH) break;
3802 info->RegionSize = ptr - base;
3804 server_leave_uninterrupted_section( &virtual_mutex, &sigset );
3806 if (res_len) *res_len = sizeof(*info);
3807 return STATUS_SUCCESS;
3810 static NTSTATUS get_working_set_ex( HANDLE process, LPCVOID addr,
3811 MEMORY_WORKING_SET_EX_INFORMATION *info,
3812 SIZE_T len, SIZE_T *res_len )
3814 FILE *f;
3815 MEMORY_WORKING_SET_EX_INFORMATION *p;
3816 sigset_t sigset;
3818 if (process != NtCurrentProcess())
3820 FIXME( "(process=%p,addr=%p) Unimplemented information class: MemoryWorkingSetExInformation\n", process, addr );
3821 return STATUS_INVALID_INFO_CLASS;
3824 f = fopen( "/proc/self/pagemap", "rb" );
3825 if (!f)
3827 static int once;
3828 if (!once++) WARN( "unable to open /proc/self/pagemap\n" );
3831 server_enter_uninterrupted_section( &virtual_mutex, &sigset );
3832 for (p = info; (UINT_PTR)(p + 1) <= (UINT_PTR)info + len; p++)
3834 BYTE vprot;
3835 UINT64 pagemap;
3836 struct file_view *view;
3838 memset( &p->VirtualAttributes, 0, sizeof(p->VirtualAttributes) );
3840 /* If we don't have pagemap information, default to invalid. */
3841 if (!f || fseek( f, ((UINT_PTR)p->VirtualAddress >> 12) * sizeof(pagemap), SEEK_SET ) == -1 ||
3842 fread( &pagemap, sizeof(pagemap), 1, f ) != 1)
3844 pagemap = 0;
3847 if ((view = find_view( p->VirtualAddress, 0 )) &&
3848 get_committed_size( view, p->VirtualAddress, &vprot ) &&
3849 (vprot & VPROT_COMMITTED))
3851 p->VirtualAttributes.Valid = !(vprot & VPROT_GUARD) && (vprot & 0x0f) && (pagemap >> 63);
3852 p->VirtualAttributes.Shared = !is_view_valloc( view ) && ((pagemap >> 61) & 1);
3853 if (p->VirtualAttributes.Shared && p->VirtualAttributes.Valid)
3854 p->VirtualAttributes.ShareCount = 1; /* FIXME */
3855 if (p->VirtualAttributes.Valid)
3856 p->VirtualAttributes.Win32Protection = get_win32_prot( vprot, view->protect );
3859 server_leave_uninterrupted_section( &virtual_mutex, &sigset );
3861 if (f)
3862 fclose( f );
3863 if (res_len)
3864 *res_len = (UINT_PTR)p - (UINT_PTR)info;
3865 return STATUS_SUCCESS;
3868 static NTSTATUS get_memory_section_name( HANDLE process, LPCVOID addr,
3869 MEMORY_SECTION_NAME *info, SIZE_T len, SIZE_T *ret_len )
3871 NTSTATUS status;
3873 if (!info) return STATUS_ACCESS_VIOLATION;
3875 SERVER_START_REQ( get_mapping_filename )
3877 req->process = wine_server_obj_handle( process );
3878 req->addr = wine_server_client_ptr( addr );
3879 if (len > sizeof(*info) + sizeof(WCHAR))
3880 wine_server_set_reply( req, info + 1, len - sizeof(*info) - sizeof(WCHAR) );
3881 status = wine_server_call( req );
3882 if (!status || status == STATUS_BUFFER_OVERFLOW)
3884 if (ret_len) *ret_len = sizeof(*info) + reply->len + sizeof(WCHAR);
3885 if (len < sizeof(*info)) status = STATUS_INFO_LENGTH_MISMATCH;
3886 if (!status)
3888 info->SectionFileName.Buffer = (WCHAR *)(info + 1);
3889 info->SectionFileName.Length = reply->len;
3890 info->SectionFileName.MaximumLength = reply->len + sizeof(WCHAR);
3891 info->SectionFileName.Buffer[reply->len / sizeof(WCHAR)] = 0;
3895 SERVER_END_REQ;
3896 return status;
3899 #define UNIMPLEMENTED_INFO_CLASS(c) \
3900 case c: \
3901 FIXME("(process=%p,addr=%p) Unimplemented information class: " #c "\n", process, addr); \
3902 return STATUS_INVALID_INFO_CLASS
3904 /***********************************************************************
3905 * NtQueryVirtualMemory (NTDLL.@)
3906 * ZwQueryVirtualMemory (NTDLL.@)
3908 NTSTATUS WINAPI NtQueryVirtualMemory( HANDLE process, LPCVOID addr,
3909 MEMORY_INFORMATION_CLASS info_class,
3910 PVOID buffer, SIZE_T len, SIZE_T *res_len )
3912 TRACE("(%p, %p, info_class=%d, %p, %ld, %p)\n",
3913 process, addr, info_class, buffer, len, res_len);
3915 switch(info_class)
3917 case MemoryBasicInformation:
3918 return get_basic_memory_info( process, addr, buffer, len, res_len );
3919 case MemoryWorkingSetExInformation:
3920 return get_working_set_ex( process, addr, buffer, len, res_len );
3921 case MemorySectionName:
3922 return get_memory_section_name( process, addr, buffer, len, res_len );
3924 UNIMPLEMENTED_INFO_CLASS(MemoryWorkingSetList);
3925 UNIMPLEMENTED_INFO_CLASS(MemoryBasicVlmInformation);
3927 default:
3928 FIXME("(%p,%p,info_class=%d,%p,%ld,%p) Unknown information class\n",
3929 process, addr, info_class, buffer, len, res_len);
3930 return STATUS_INVALID_INFO_CLASS;
3935 /***********************************************************************
3936 * NtLockVirtualMemory (NTDLL.@)
3937 * ZwLockVirtualMemory (NTDLL.@)
3939 NTSTATUS WINAPI NtLockVirtualMemory( HANDLE process, PVOID *addr, SIZE_T *size, ULONG unknown )
3941 NTSTATUS status = STATUS_SUCCESS;
3943 if (process != NtCurrentProcess())
3945 apc_call_t call;
3946 apc_result_t result;
3948 memset( &call, 0, sizeof(call) );
3950 call.virtual_lock.type = APC_VIRTUAL_LOCK;
3951 call.virtual_lock.addr = wine_server_client_ptr( *addr );
3952 call.virtual_lock.size = *size;
3953 status = server_queue_process_apc( process, &call, &result );
3954 if (status != STATUS_SUCCESS) return status;
3956 if (result.virtual_lock.status == STATUS_SUCCESS)
3958 *addr = wine_server_get_ptr( result.virtual_lock.addr );
3959 *size = result.virtual_lock.size;
3961 return result.virtual_lock.status;
3964 *size = ROUND_SIZE( *addr, *size );
3965 *addr = ROUND_ADDR( *addr, page_mask );
3967 if (mlock( *addr, *size )) status = STATUS_ACCESS_DENIED;
3968 return status;
3972 /***********************************************************************
3973 * NtUnlockVirtualMemory (NTDLL.@)
3974 * ZwUnlockVirtualMemory (NTDLL.@)
3976 NTSTATUS WINAPI NtUnlockVirtualMemory( HANDLE process, PVOID *addr, SIZE_T *size, ULONG unknown )
3978 NTSTATUS status = STATUS_SUCCESS;
3980 if (process != NtCurrentProcess())
3982 apc_call_t call;
3983 apc_result_t result;
3985 memset( &call, 0, sizeof(call) );
3987 call.virtual_unlock.type = APC_VIRTUAL_UNLOCK;
3988 call.virtual_unlock.addr = wine_server_client_ptr( *addr );
3989 call.virtual_unlock.size = *size;
3990 status = server_queue_process_apc( process, &call, &result );
3991 if (status != STATUS_SUCCESS) return status;
3993 if (result.virtual_unlock.status == STATUS_SUCCESS)
3995 *addr = wine_server_get_ptr( result.virtual_unlock.addr );
3996 *size = result.virtual_unlock.size;
3998 return result.virtual_unlock.status;
4001 *size = ROUND_SIZE( *addr, *size );
4002 *addr = ROUND_ADDR( *addr, page_mask );
4004 if (munlock( *addr, *size )) status = STATUS_ACCESS_DENIED;
4005 return status;
4009 /***********************************************************************
4010 * NtMapViewOfSection (NTDLL.@)
4011 * ZwMapViewOfSection (NTDLL.@)
4013 NTSTATUS WINAPI NtMapViewOfSection( HANDLE handle, HANDLE process, PVOID *addr_ptr, ULONG_PTR zero_bits,
4014 SIZE_T commit_size, const LARGE_INTEGER *offset_ptr, SIZE_T *size_ptr,
4015 SECTION_INHERIT inherit, ULONG alloc_type, ULONG protect )
4017 NTSTATUS res;
4018 SIZE_T mask = granularity_mask;
4019 pe_image_info_t image_info;
4020 LARGE_INTEGER offset;
4021 unsigned short zero_bits_64 = zero_bits_win_to_64( zero_bits );
4023 offset.QuadPart = offset_ptr ? offset_ptr->QuadPart : 0;
4025 TRACE("handle=%p process=%p addr=%p off=%x%08x size=%lx access=%x\n",
4026 handle, process, *addr_ptr, offset.u.HighPart, offset.u.LowPart, *size_ptr, protect );
4028 /* Check parameters */
4029 if (zero_bits > 21 && zero_bits < 32)
4030 return STATUS_INVALID_PARAMETER_4;
4031 if (!is_win64 && !is_wow64 && zero_bits >= 32)
4032 return STATUS_INVALID_PARAMETER_4;
4034 /* If both addr_ptr and zero_bits are passed, they have match */
4035 if (*addr_ptr && zero_bits && zero_bits < 32 &&
4036 (((UINT_PTR)*addr_ptr) >> (32 - zero_bits)))
4037 return STATUS_INVALID_PARAMETER_4;
4038 if (*addr_ptr && zero_bits >= 32 &&
4039 (((UINT_PTR)*addr_ptr) & ~zero_bits))
4040 return STATUS_INVALID_PARAMETER_4;
4042 #ifndef _WIN64
4043 if (!is_wow64 && (alloc_type & AT_ROUND_TO_PAGE))
4045 *addr_ptr = ROUND_ADDR( *addr_ptr, page_mask );
4046 mask = page_mask;
4048 #endif
4050 if ((offset.u.LowPart & mask) || (*addr_ptr && ((UINT_PTR)*addr_ptr & mask)))
4051 return STATUS_MAPPED_ALIGNMENT;
4053 if (process != NtCurrentProcess())
4055 apc_call_t call;
4056 apc_result_t result;
4058 memset( &call, 0, sizeof(call) );
4060 call.map_view.type = APC_MAP_VIEW;
4061 call.map_view.handle = wine_server_obj_handle( handle );
4062 call.map_view.addr = wine_server_client_ptr( *addr_ptr );
4063 call.map_view.size = *size_ptr;
4064 call.map_view.offset = offset.QuadPart;
4065 call.map_view.zero_bits = zero_bits;
4066 call.map_view.alloc_type = alloc_type;
4067 call.map_view.prot = protect;
4068 res = server_queue_process_apc( process, &call, &result );
4069 if (res != STATUS_SUCCESS) return res;
4071 if ((NTSTATUS)result.map_view.status >= 0)
4073 *addr_ptr = wine_server_get_ptr( result.map_view.addr );
4074 *size_ptr = result.map_view.size;
4076 return result.map_view.status;
4079 return virtual_map_section( handle, addr_ptr, zero_bits_64, commit_size,
4080 offset_ptr, size_ptr, alloc_type, protect,
4081 &image_info );
4085 /***********************************************************************
4086 * NtUnmapViewOfSection (NTDLL.@)
4087 * ZwUnmapViewOfSection (NTDLL.@)
4089 NTSTATUS WINAPI NtUnmapViewOfSection( HANDLE process, PVOID addr )
4091 struct file_view *view;
4092 NTSTATUS status = STATUS_NOT_MAPPED_VIEW;
4093 sigset_t sigset;
4095 if (process != NtCurrentProcess())
4097 apc_call_t call;
4098 apc_result_t result;
4100 memset( &call, 0, sizeof(call) );
4102 call.unmap_view.type = APC_UNMAP_VIEW;
4103 call.unmap_view.addr = wine_server_client_ptr( addr );
4104 status = server_queue_process_apc( process, &call, &result );
4105 if (status == STATUS_SUCCESS) status = result.unmap_view.status;
4106 return status;
4109 server_enter_uninterrupted_section( &virtual_mutex, &sigset );
4110 if ((view = find_view( addr, 0 )) && !is_view_valloc( view ))
4112 SERVER_START_REQ( unmap_view )
4114 req->base = wine_server_client_ptr( view->base );
4115 status = wine_server_call( req );
4117 SERVER_END_REQ;
4118 if (!status) delete_view( view );
4119 else FIXME( "failed to unmap %p %x\n", view->base, status );
4121 server_leave_uninterrupted_section( &virtual_mutex, &sigset );
4122 return status;
4126 /******************************************************************************
4127 * virtual_fill_image_information
4129 * Helper for NtQuerySection.
4131 void virtual_fill_image_information( const pe_image_info_t *pe_info, SECTION_IMAGE_INFORMATION *info )
4133 info->TransferAddress = wine_server_get_ptr( pe_info->entry_point );
4134 info->ZeroBits = pe_info->zerobits;
4135 info->MaximumStackSize = pe_info->stack_size;
4136 info->CommittedStackSize = pe_info->stack_commit;
4137 info->SubSystemType = pe_info->subsystem;
4138 info->MinorSubsystemVersion = pe_info->subsystem_minor;
4139 info->MajorSubsystemVersion = pe_info->subsystem_major;
4140 info->MajorOperatingSystemVersion = pe_info->osversion_major;
4141 info->MinorOperatingSystemVersion = pe_info->osversion_minor;
4142 info->ImageCharacteristics = pe_info->image_charact;
4143 info->DllCharacteristics = pe_info->dll_charact;
4144 info->Machine = pe_info->machine;
4145 info->ImageContainsCode = pe_info->contains_code;
4146 info->ImageFlags = pe_info->image_flags;
4147 info->LoaderFlags = pe_info->loader_flags;
4148 info->ImageFileSize = pe_info->file_size;
4149 info->CheckSum = pe_info->checksum;
4150 #ifndef _WIN64 /* don't return 64-bit values to 32-bit processes */
4151 if (pe_info->machine == IMAGE_FILE_MACHINE_AMD64 || pe_info->machine == IMAGE_FILE_MACHINE_ARM64)
4153 info->TransferAddress = (void *)0x81231234; /* sic */
4154 info->MaximumStackSize = 0x100000;
4155 info->CommittedStackSize = 0x10000;
4157 #endif
4160 /******************************************************************************
4161 * NtQuerySection (NTDLL.@)
4162 * ZwQuerySection (NTDLL.@)
4164 NTSTATUS WINAPI NtQuerySection( HANDLE handle, SECTION_INFORMATION_CLASS class, void *ptr,
4165 SIZE_T size, SIZE_T *ret_size )
4167 NTSTATUS status;
4168 pe_image_info_t image_info;
4170 switch (class)
4172 case SectionBasicInformation:
4173 if (size < sizeof(SECTION_BASIC_INFORMATION)) return STATUS_INFO_LENGTH_MISMATCH;
4174 break;
4175 case SectionImageInformation:
4176 if (size < sizeof(SECTION_IMAGE_INFORMATION)) return STATUS_INFO_LENGTH_MISMATCH;
4177 break;
4178 default:
4179 FIXME( "class %u not implemented\n", class );
4180 return STATUS_NOT_IMPLEMENTED;
4182 if (!ptr) return STATUS_ACCESS_VIOLATION;
4184 SERVER_START_REQ( get_mapping_info )
4186 req->handle = wine_server_obj_handle( handle );
4187 req->access = SECTION_QUERY;
4188 wine_server_set_reply( req, &image_info, sizeof(image_info) );
4189 if (!(status = wine_server_call( req )))
4191 if (class == SectionBasicInformation)
4193 SECTION_BASIC_INFORMATION *info = ptr;
4194 info->Attributes = reply->flags;
4195 info->BaseAddress = NULL;
4196 info->Size.QuadPart = reply->size;
4197 if (ret_size) *ret_size = sizeof(*info);
4199 else if (reply->flags & SEC_IMAGE)
4201 SECTION_IMAGE_INFORMATION *info = ptr;
4202 virtual_fill_image_information( &image_info, info );
4203 if (ret_size) *ret_size = sizeof(*info);
4205 else status = STATUS_SECTION_NOT_IMAGE;
4208 SERVER_END_REQ;
4210 return status;
4214 /***********************************************************************
4215 * NtFlushVirtualMemory (NTDLL.@)
4216 * ZwFlushVirtualMemory (NTDLL.@)
4218 NTSTATUS WINAPI NtFlushVirtualMemory( HANDLE process, LPCVOID *addr_ptr,
4219 SIZE_T *size_ptr, ULONG unknown )
4221 struct file_view *view;
4222 NTSTATUS status = STATUS_SUCCESS;
4223 sigset_t sigset;
4224 void *addr = ROUND_ADDR( *addr_ptr, page_mask );
4226 if (process != NtCurrentProcess())
4228 apc_call_t call;
4229 apc_result_t result;
4231 memset( &call, 0, sizeof(call) );
4233 call.virtual_flush.type = APC_VIRTUAL_FLUSH;
4234 call.virtual_flush.addr = wine_server_client_ptr( addr );
4235 call.virtual_flush.size = *size_ptr;
4236 status = server_queue_process_apc( process, &call, &result );
4237 if (status != STATUS_SUCCESS) return status;
4239 if (result.virtual_flush.status == STATUS_SUCCESS)
4241 *addr_ptr = wine_server_get_ptr( result.virtual_flush.addr );
4242 *size_ptr = result.virtual_flush.size;
4244 return result.virtual_flush.status;
4247 server_enter_uninterrupted_section( &virtual_mutex, &sigset );
4248 if (!(view = find_view( addr, *size_ptr ))) status = STATUS_INVALID_PARAMETER;
4249 else
4251 if (!*size_ptr) *size_ptr = view->size;
4252 *addr_ptr = addr;
4253 #ifdef MS_ASYNC
4254 if (msync( addr, *size_ptr, MS_ASYNC )) status = STATUS_NOT_MAPPED_DATA;
4255 #endif
4257 server_leave_uninterrupted_section( &virtual_mutex, &sigset );
4258 return status;
4262 /***********************************************************************
4263 * NtGetWriteWatch (NTDLL.@)
4264 * ZwGetWriteWatch (NTDLL.@)
4266 NTSTATUS WINAPI NtGetWriteWatch( HANDLE process, ULONG flags, PVOID base, SIZE_T size, PVOID *addresses,
4267 ULONG_PTR *count, ULONG *granularity )
4269 NTSTATUS status = STATUS_SUCCESS;
4270 sigset_t sigset;
4272 size = ROUND_SIZE( base, size );
4273 base = ROUND_ADDR( base, page_mask );
4275 if (!count || !granularity) return STATUS_ACCESS_VIOLATION;
4276 if (!*count || !size) return STATUS_INVALID_PARAMETER;
4277 if (flags & ~WRITE_WATCH_FLAG_RESET) return STATUS_INVALID_PARAMETER;
4279 if (!addresses) return STATUS_ACCESS_VIOLATION;
4281 TRACE( "%p %x %p-%p %p %lu\n", process, flags, base, (char *)base + size,
4282 addresses, *count );
4284 server_enter_uninterrupted_section( &virtual_mutex, &sigset );
4286 if (is_write_watch_range( base, size ))
4288 ULONG_PTR pos = 0;
4289 char *addr = base;
4290 char *end = addr + size;
4292 while (pos < *count && addr < end)
4294 if (!(get_page_vprot( addr ) & VPROT_WRITEWATCH)) addresses[pos++] = addr;
4295 addr += page_size;
4297 if (flags & WRITE_WATCH_FLAG_RESET) reset_write_watches( base, addr - (char *)base );
4298 *count = pos;
4299 *granularity = page_size;
4301 else status = STATUS_INVALID_PARAMETER;
4303 server_leave_uninterrupted_section( &virtual_mutex, &sigset );
4304 return status;
4308 /***********************************************************************
4309 * NtResetWriteWatch (NTDLL.@)
4310 * ZwResetWriteWatch (NTDLL.@)
4312 NTSTATUS WINAPI NtResetWriteWatch( HANDLE process, PVOID base, SIZE_T size )
4314 NTSTATUS status = STATUS_SUCCESS;
4315 sigset_t sigset;
4317 size = ROUND_SIZE( base, size );
4318 base = ROUND_ADDR( base, page_mask );
4320 TRACE( "%p %p-%p\n", process, base, (char *)base + size );
4322 if (!size) return STATUS_INVALID_PARAMETER;
4324 server_enter_uninterrupted_section( &virtual_mutex, &sigset );
4326 if (is_write_watch_range( base, size ))
4327 reset_write_watches( base, size );
4328 else
4329 status = STATUS_INVALID_PARAMETER;
4331 server_leave_uninterrupted_section( &virtual_mutex, &sigset );
4332 return status;
4336 /***********************************************************************
4337 * NtReadVirtualMemory (NTDLL.@)
4338 * ZwReadVirtualMemory (NTDLL.@)
4340 NTSTATUS WINAPI NtReadVirtualMemory( HANDLE process, const void *addr, void *buffer,
4341 SIZE_T size, SIZE_T *bytes_read )
4343 NTSTATUS status;
4345 if (virtual_check_buffer_for_write( buffer, size ))
4347 SERVER_START_REQ( read_process_memory )
4349 req->handle = wine_server_obj_handle( process );
4350 req->addr = wine_server_client_ptr( addr );
4351 wine_server_set_reply( req, buffer, size );
4352 if ((status = wine_server_call( req ))) size = 0;
4354 SERVER_END_REQ;
4356 else
4358 status = STATUS_ACCESS_VIOLATION;
4359 size = 0;
4361 if (bytes_read) *bytes_read = size;
4362 return status;
4366 /***********************************************************************
4367 * NtWriteVirtualMemory (NTDLL.@)
4368 * ZwWriteVirtualMemory (NTDLL.@)
4370 NTSTATUS WINAPI NtWriteVirtualMemory( HANDLE process, void *addr, const void *buffer,
4371 SIZE_T size, SIZE_T *bytes_written )
4373 NTSTATUS status;
4375 if (virtual_check_buffer_for_read( buffer, size ))
4377 SERVER_START_REQ( write_process_memory )
4379 req->handle = wine_server_obj_handle( process );
4380 req->addr = wine_server_client_ptr( addr );
4381 wine_server_add_data( req, buffer, size );
4382 if ((status = wine_server_call( req ))) size = 0;
4384 SERVER_END_REQ;
4386 else
4388 status = STATUS_PARTIAL_COPY;
4389 size = 0;
4391 if (bytes_written) *bytes_written = size;
4392 return status;
4396 /***********************************************************************
4397 * NtAreMappedFilesTheSame (NTDLL.@)
4398 * ZwAreMappedFilesTheSame (NTDLL.@)
4400 NTSTATUS WINAPI NtAreMappedFilesTheSame(PVOID addr1, PVOID addr2)
4402 struct file_view *view1, *view2;
4403 NTSTATUS status;
4404 sigset_t sigset;
4406 TRACE("%p %p\n", addr1, addr2);
4408 server_enter_uninterrupted_section( &virtual_mutex, &sigset );
4410 view1 = find_view( addr1, 0 );
4411 view2 = find_view( addr2, 0 );
4413 if (!view1 || !view2)
4414 status = STATUS_INVALID_ADDRESS;
4415 else if (is_view_valloc( view1 ) || is_view_valloc( view2 ))
4416 status = STATUS_CONFLICTING_ADDRESSES;
4417 else if (view1 == view2)
4418 status = STATUS_SUCCESS;
4419 else if ((view1->protect & VPROT_SYSTEM) || (view2->protect & VPROT_SYSTEM))
4420 status = STATUS_NOT_SAME_DEVICE;
4421 else
4423 SERVER_START_REQ( is_same_mapping )
4425 req->base1 = wine_server_client_ptr( view1->base );
4426 req->base2 = wine_server_client_ptr( view2->base );
4427 status = wine_server_call( req );
4429 SERVER_END_REQ;
4432 server_leave_uninterrupted_section( &virtual_mutex, &sigset );
4433 return status;
4437 /**********************************************************************
4438 * NtFlushInstructionCache (NTDLL.@)
4440 NTSTATUS WINAPI NtFlushInstructionCache( HANDLE handle, const void *addr, SIZE_T size )
4442 #if defined(__x86_64__) || defined(__i386__)
4443 /* no-op */
4444 #elif defined(HAVE___CLEAR_CACHE)
4445 if (handle == GetCurrentProcess())
4447 __clear_cache( (char *)addr, (char *)addr + size );
4449 else
4451 static int once;
4452 if (!once++) FIXME( "%p %p %ld other process not supported\n", handle, addr, size );
4454 #else
4455 static int once;
4456 if (!once++) FIXME( "%p %p %ld\n", handle, addr, size );
4457 #endif
4458 return STATUS_SUCCESS;
4462 /**********************************************************************
4463 * NtFlushProcessWriteBuffers (NTDLL.@)
4465 void WINAPI NtFlushProcessWriteBuffers(void)
4467 static int once = 0;
4468 if (!once++) FIXME( "stub\n" );
4472 /**********************************************************************
4473 * NtCreatePagingFile (NTDLL.@)
4475 NTSTATUS WINAPI NtCreatePagingFile( UNICODE_STRING *name, LARGE_INTEGER *min_size,
4476 LARGE_INTEGER *max_size, LARGE_INTEGER *actual_size )
4478 FIXME( "(%s %p %p %p) stub\n", debugstr_us(name), min_size, max_size, actual_size );
4479 return STATUS_SUCCESS;