kernel32: Move ReadConsole and WriteConsole to kernelbase.
[wine.git] / dlls / ntdll / unix / virtual.c
blobd79e3de662e6f9d4b451a392108a05fdae9c9add
1 /*
2 * Win32 virtual memory functions
4 * Copyright 1997, 2002, 2020 Alexandre Julliard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2.1 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA
21 #if 0
22 #pragma makedep unix
23 #endif
25 #include "config.h"
26 #include "wine/port.h"
28 #include <assert.h>
29 #include <errno.h>
30 #include <stdarg.h>
31 #include <stdio.h>
32 #include <signal.h>
33 #include <sys/types.h>
34 #ifdef HAVE_SYS_SOCKET_H
35 # include <sys/socket.h>
36 #endif
37 #ifdef HAVE_SYS_STAT_H
38 # include <sys/stat.h>
39 #endif
40 #ifdef HAVE_SYS_MMAN_H
41 # include <sys/mman.h>
42 #endif
43 #ifdef HAVE_SYS_SYSINFO_H
44 # include <sys/sysinfo.h>
45 #endif
46 #ifdef HAVE_UNISTD_H
47 # include <unistd.h>
48 #endif
49 #ifdef HAVE_VALGRIND_VALGRIND_H
50 # include <valgrind/valgrind.h>
51 #endif
52 #if defined(__APPLE__)
53 # include <mach/mach_init.h>
54 # include <mach/mach_vm.h>
55 #endif
57 #include "ntstatus.h"
58 #define WIN32_NO_STATUS
59 #include "windef.h"
60 #include "winnt.h"
61 #include "winternl.h"
62 #include "wine/exception.h"
63 #include "wine/list.h"
64 #include "wine/rbtree.h"
65 #include "unix_private.h"
66 #include "wine/debug.h"
68 WINE_DEFAULT_DEBUG_CHANNEL(virtual);
69 WINE_DECLARE_DEBUG_CHANNEL(module);
71 struct preload_info
73 void *addr;
74 size_t size;
77 struct reserved_area
79 struct list entry;
80 void *base;
81 size_t size;
84 static struct list reserved_areas = LIST_INIT(reserved_areas);
86 struct file_view
88 struct wine_rb_entry entry; /* entry in global view tree */
89 void *base; /* base address */
90 size_t size; /* size in bytes */
91 unsigned int protect; /* protection for all pages at allocation time and SEC_* flags */
94 #define __EXCEPT_SYSCALL __EXCEPT_HANDLER(0)
96 /* per-page protection flags */
97 #define VPROT_READ 0x01
98 #define VPROT_WRITE 0x02
99 #define VPROT_EXEC 0x04
100 #define VPROT_WRITECOPY 0x08
101 #define VPROT_GUARD 0x10
102 #define VPROT_COMMITTED 0x20
103 #define VPROT_WRITEWATCH 0x40
104 /* per-mapping protection flags */
105 #define VPROT_SYSTEM 0x0200 /* system view (underlying mmap not under our control) */
107 /* Conversion from VPROT_* to Win32 flags */
108 static const BYTE VIRTUAL_Win32Flags[16] =
110 PAGE_NOACCESS, /* 0 */
111 PAGE_READONLY, /* READ */
112 PAGE_READWRITE, /* WRITE */
113 PAGE_READWRITE, /* READ | WRITE */
114 PAGE_EXECUTE, /* EXEC */
115 PAGE_EXECUTE_READ, /* READ | EXEC */
116 PAGE_EXECUTE_READWRITE, /* WRITE | EXEC */
117 PAGE_EXECUTE_READWRITE, /* READ | WRITE | EXEC */
118 PAGE_WRITECOPY, /* WRITECOPY */
119 PAGE_WRITECOPY, /* READ | WRITECOPY */
120 PAGE_WRITECOPY, /* WRITE | WRITECOPY */
121 PAGE_WRITECOPY, /* READ | WRITE | WRITECOPY */
122 PAGE_EXECUTE_WRITECOPY, /* EXEC | WRITECOPY */
123 PAGE_EXECUTE_WRITECOPY, /* READ | EXEC | WRITECOPY */
124 PAGE_EXECUTE_WRITECOPY, /* WRITE | EXEC | WRITECOPY */
125 PAGE_EXECUTE_WRITECOPY /* READ | WRITE | EXEC | WRITECOPY */
128 static struct wine_rb_tree views_tree;
129 static pthread_mutex_t virtual_mutex;
131 static const BOOL is_win64 = (sizeof(void *) > sizeof(int));
132 static const UINT page_shift = 12;
133 static const UINT_PTR page_mask = 0xfff;
134 static const UINT_PTR granularity_mask = 0xffff;
136 /* Note: these are Windows limits, you cannot change them. */
137 #ifdef __i386__
138 static void *address_space_start = (void *)0x110000; /* keep DOS area clear */
139 #else
140 static void *address_space_start = (void *)0x10000;
141 #endif
143 #ifdef __aarch64__
144 static void *address_space_limit = (void *)0xffffffff0000; /* top of the total available address space */
145 #elif defined(_WIN64)
146 static void *address_space_limit = (void *)0x7fffffff0000;
147 #else
148 static void *address_space_limit = (void *)0xc0000000;
149 #endif
151 #ifdef _WIN64
152 static void *user_space_limit = (void *)0x7fffffff0000; /* top of the user address space */
153 static void *working_set_limit = (void *)0x7fffffff0000; /* top of the current working set */
154 #else
155 static void *user_space_limit = (void *)0x7fff0000;
156 static void *working_set_limit = (void *)0x7fff0000;
157 #endif
159 struct _KUSER_SHARED_DATA *user_shared_data = (void *)0x7ffe0000;
161 /* TEB allocation blocks */
162 static void *teb_block;
163 static void **next_free_teb;
164 static int teb_block_pos;
165 static struct list teb_list = LIST_INIT( teb_list );
167 #define ROUND_ADDR(addr,mask) ((void *)((UINT_PTR)(addr) & ~(UINT_PTR)(mask)))
168 #define ROUND_SIZE(addr,size) (((SIZE_T)(size) + ((UINT_PTR)(addr) & page_mask) + page_mask) & ~page_mask)
170 #define VIRTUAL_DEBUG_DUMP_VIEW(view) do { if (TRACE_ON(virtual)) dump_view(view); } while (0)
172 #ifndef MAP_NORESERVE
173 #define MAP_NORESERVE 0
174 #endif
176 #ifdef _WIN64 /* on 64-bit the page protection bytes use a 2-level table */
177 static const size_t pages_vprot_shift = 20;
178 static const size_t pages_vprot_mask = (1 << 20) - 1;
179 static size_t pages_vprot_size;
180 static BYTE **pages_vprot;
181 #else /* on 32-bit we use a simple array with one byte per page */
182 static BYTE *pages_vprot;
183 #endif
185 static struct file_view *view_block_start, *view_block_end, *next_free_view;
186 static const size_t view_block_size = 0x100000;
187 static void *preload_reserve_start;
188 static void *preload_reserve_end;
189 static BOOL force_exec_prot; /* whether to force PROT_EXEC on all PROT_READ mmaps */
191 struct range_entry
193 void *base;
194 void *end;
197 static struct range_entry *free_ranges;
198 static struct range_entry *free_ranges_end;
201 static inline BOOL is_inside_signal_stack( void *ptr )
203 return ((char *)ptr >= (char *)get_signal_stack() &&
204 (char *)ptr < (char *)get_signal_stack() + signal_stack_size);
207 static inline BOOL is_beyond_limit( const void *addr, size_t size, const void *limit )
209 return (addr >= limit || (const char *)addr + size > (const char *)limit);
212 /* mmap() anonymous memory at a fixed address */
213 void *anon_mmap_fixed( void *start, size_t size, int prot, int flags )
215 return mmap( start, size, prot, MAP_PRIVATE | MAP_ANON | MAP_FIXED | flags, -1, 0 );
218 /* allocate anonymous mmap() memory at any address */
219 void *anon_mmap_alloc( size_t size, int prot )
221 return mmap( NULL, size, prot, MAP_PRIVATE | MAP_ANON, -1, 0 );
225 static void mmap_add_reserved_area( void *addr, SIZE_T size )
227 struct reserved_area *area;
228 struct list *ptr;
230 if (!((char *)addr + size)) size--; /* avoid wrap-around */
232 LIST_FOR_EACH( ptr, &reserved_areas )
234 area = LIST_ENTRY( ptr, struct reserved_area, entry );
235 if (area->base > addr)
237 /* try to merge with the next one */
238 if ((char *)addr + size == (char *)area->base)
240 area->base = addr;
241 area->size += size;
242 return;
244 break;
246 else if ((char *)area->base + area->size == (char *)addr)
248 /* merge with the previous one */
249 area->size += size;
251 /* try to merge with the next one too */
252 if ((ptr = list_next( &reserved_areas, ptr )))
254 struct reserved_area *next = LIST_ENTRY( ptr, struct reserved_area, entry );
255 if ((char *)addr + size == (char *)next->base)
257 area->size += next->size;
258 list_remove( &next->entry );
259 free( next );
262 return;
266 if ((area = malloc( sizeof(*area) )))
268 area->base = addr;
269 area->size = size;
270 list_add_before( ptr, &area->entry );
274 static void mmap_remove_reserved_area( void *addr, SIZE_T size )
276 struct reserved_area *area;
277 struct list *ptr;
279 if (!((char *)addr + size)) size--; /* avoid wrap-around */
281 ptr = list_head( &reserved_areas );
282 /* find the first area covering address */
283 while (ptr)
285 area = LIST_ENTRY( ptr, struct reserved_area, entry );
286 if ((char *)area->base >= (char *)addr + size) break; /* outside the range */
287 if ((char *)area->base + area->size > (char *)addr) /* overlaps range */
289 if (area->base >= addr)
291 if ((char *)area->base + area->size > (char *)addr + size)
293 /* range overlaps beginning of area only -> shrink area */
294 area->size -= (char *)addr + size - (char *)area->base;
295 area->base = (char *)addr + size;
296 break;
298 else
300 /* range contains the whole area -> remove area completely */
301 ptr = list_next( &reserved_areas, ptr );
302 list_remove( &area->entry );
303 free( area );
304 continue;
307 else
309 if ((char *)area->base + area->size > (char *)addr + size)
311 /* range is in the middle of area -> split area in two */
312 struct reserved_area *new_area = malloc( sizeof(*new_area) );
313 if (new_area)
315 new_area->base = (char *)addr + size;
316 new_area->size = (char *)area->base + area->size - (char *)new_area->base;
317 list_add_after( ptr, &new_area->entry );
319 else size = (char *)area->base + area->size - (char *)addr;
320 area->size = (char *)addr - (char *)area->base;
321 break;
323 else
325 /* range overlaps end of area only -> shrink area */
326 area->size = (char *)addr - (char *)area->base;
330 ptr = list_next( &reserved_areas, ptr );
334 static int mmap_is_in_reserved_area( void *addr, SIZE_T size )
336 struct reserved_area *area;
337 struct list *ptr;
339 LIST_FOR_EACH( ptr, &reserved_areas )
341 area = LIST_ENTRY( ptr, struct reserved_area, entry );
342 if (area->base > addr) break;
343 if ((char *)area->base + area->size <= (char *)addr) continue;
344 /* area must contain block completely */
345 if ((char *)area->base + area->size < (char *)addr + size) return -1;
346 return 1;
348 return 0;
351 static int mmap_enum_reserved_areas( int (CDECL *enum_func)(void *base, SIZE_T size, void *arg),
352 void *arg, int top_down )
354 int ret = 0;
355 struct list *ptr;
357 if (top_down)
359 for (ptr = reserved_areas.prev; ptr != &reserved_areas; ptr = ptr->prev)
361 struct reserved_area *area = LIST_ENTRY( ptr, struct reserved_area, entry );
362 if ((ret = enum_func( area->base, area->size, arg ))) break;
365 else
367 for (ptr = reserved_areas.next; ptr != &reserved_areas; ptr = ptr->next)
369 struct reserved_area *area = LIST_ENTRY( ptr, struct reserved_area, entry );
370 if ((ret = enum_func( area->base, area->size, arg ))) break;
373 return ret;
376 static void *anon_mmap_tryfixed( void *start, size_t size, int prot, int flags )
378 void *ptr;
380 #ifdef MAP_FIXED_NOREPLACE
381 ptr = mmap( start, size, prot, MAP_FIXED_NOREPLACE | MAP_PRIVATE | MAP_ANON | flags, -1, 0 );
382 #elif defined(MAP_TRYFIXED)
383 ptr = mmap( start, size, prot, MAP_TRYFIXED | MAP_PRIVATE | MAP_ANON | flags, -1, 0 );
384 #elif defined(__FreeBSD__) || defined(__FreeBSD_kernel__)
385 ptr = mmap( start, size, prot, MAP_FIXED | MAP_EXCL | MAP_PRIVATE | MAP_ANON | flags, -1, 0 );
386 if (ptr == MAP_FAILED && errno == EINVAL) errno = EEXIST;
387 #elif defined(__APPLE__)
388 mach_vm_address_t result = (mach_vm_address_t)start;
389 kern_return_t ret = mach_vm_map( mach_task_self(), &result, size, 0, VM_FLAGS_FIXED,
390 MEMORY_OBJECT_NULL, 0, 0, prot, VM_PROT_ALL, VM_INHERIT_COPY );
392 if (!ret)
394 if ((ptr = anon_mmap_fixed( start, size, prot, flags )) == MAP_FAILED)
395 mach_vm_deallocate( mach_task_self(), result, size );
397 else
399 errno = (ret == KERN_NO_SPACE ? EEXIST : ENOMEM);
400 ptr = MAP_FAILED;
402 #else
403 ptr = mmap( start, size, prot, MAP_PRIVATE | MAP_ANON | flags, -1, 0 );
404 #endif
405 if (ptr != MAP_FAILED && ptr != start)
407 if (is_beyond_limit( ptr, size, user_space_limit ))
409 anon_mmap_fixed( ptr, size, PROT_NONE, MAP_NORESERVE );
410 mmap_add_reserved_area( ptr, size );
412 else munmap( ptr, size );
413 ptr = MAP_FAILED;
414 errno = EEXIST;
416 return ptr;
419 static void reserve_area( void *addr, void *end )
421 #ifdef __APPLE__
423 #ifdef __i386__
424 static const mach_vm_address_t max_address = VM_MAX_ADDRESS;
425 #else
426 static const mach_vm_address_t max_address = MACH_VM_MAX_ADDRESS;
427 #endif
428 mach_vm_address_t address = (mach_vm_address_t)addr;
429 mach_vm_address_t end_address = (mach_vm_address_t)end;
431 if (!end_address || max_address < end_address)
432 end_address = max_address;
434 while (address < end_address)
436 mach_vm_address_t hole_address = address;
437 kern_return_t ret;
438 mach_vm_size_t size;
439 vm_region_basic_info_data_64_t info;
440 mach_msg_type_number_t count = VM_REGION_BASIC_INFO_COUNT_64;
441 mach_port_t dummy_object_name = MACH_PORT_NULL;
443 /* find the mapped region at or above the current address. */
444 ret = mach_vm_region(mach_task_self(), &address, &size, VM_REGION_BASIC_INFO_64,
445 (vm_region_info_t)&info, &count, &dummy_object_name);
446 if (ret != KERN_SUCCESS)
448 address = max_address;
449 size = 0;
452 if (end_address < address)
453 address = end_address;
454 if (hole_address < address)
456 /* found a hole, attempt to reserve it. */
457 size_t hole_size = address - hole_address;
458 mach_vm_address_t alloc_address = hole_address;
460 ret = mach_vm_map( mach_task_self(), &alloc_address, hole_size, 0, VM_FLAGS_FIXED,
461 MEMORY_OBJECT_NULL, 0, 0, PROT_NONE, VM_PROT_ALL, VM_INHERIT_COPY );
462 if (!ret) mmap_add_reserved_area( (void*)hole_address, hole_size );
463 else if (ret == KERN_NO_SPACE)
465 /* something filled (part of) the hole before we could.
466 go back and look again. */
467 address = hole_address;
468 continue;
471 address += size;
473 #else
474 void *ptr;
475 size_t size = (char *)end - (char *)addr;
477 if (!size) return;
479 if ((ptr = anon_mmap_tryfixed( addr, size, PROT_NONE, MAP_NORESERVE )) != MAP_FAILED)
481 mmap_add_reserved_area( addr, size );
482 return;
484 size = (size / 2) & ~granularity_mask;
485 if (size)
487 reserve_area( addr, (char *)addr + size );
488 reserve_area( (char *)addr + size, end );
490 #endif /* __APPLE__ */
494 static void mmap_init( const struct preload_info *preload_info )
496 #ifndef _WIN64
497 #ifndef __APPLE__
498 char stack;
499 char * const stack_ptr = &stack;
500 #endif
501 char *user_space_limit = (char *)0x7ffe0000;
502 int i;
504 if (preload_info)
506 /* check for a reserved area starting at the user space limit */
507 /* to avoid wasting time trying to allocate it again */
508 for (i = 0; preload_info[i].size; i++)
510 if ((char *)preload_info[i].addr > user_space_limit) break;
511 if ((char *)preload_info[i].addr + preload_info[i].size > user_space_limit)
513 user_space_limit = (char *)preload_info[i].addr + preload_info[i].size;
514 break;
518 else reserve_area( (void *)0x00010000, (void *)0x40000000 );
521 #ifndef __APPLE__
522 if (stack_ptr >= user_space_limit)
524 char *end = 0;
525 char *base = stack_ptr - ((unsigned int)stack_ptr & granularity_mask) - (granularity_mask + 1);
526 if (base > user_space_limit) reserve_area( user_space_limit, base );
527 base = stack_ptr - ((unsigned int)stack_ptr & granularity_mask) + (granularity_mask + 1);
528 #if defined(linux) || defined(__FreeBSD__) || defined (__FreeBSD_kernel__) || defined(__DragonFly__)
529 /* Heuristic: assume the stack is near the end of the address */
530 /* space, this avoids a lot of futile allocation attempts */
531 end = (char *)(((unsigned long)base + 0x0fffffff) & 0xf0000000);
532 #endif
533 reserve_area( base, end );
535 else
536 #endif
537 reserve_area( user_space_limit, 0 );
539 #else
541 if (preload_info) return;
542 /* if we don't have a preloader, try to reserve the space now */
543 reserve_area( (void *)0x000000010000, (void *)0x000068000000 );
544 reserve_area( (void *)0x00007ff00000, (void *)0x00007fff0000 );
545 reserve_area( (void *)0x7ffffe000000, (void *)0x7fffffff0000 );
547 #endif
550 /***********************************************************************
551 * free_ranges_lower_bound
553 * Returns the first range whose end is not less than addr, or end if there's none.
555 static struct range_entry *free_ranges_lower_bound( void *addr )
557 struct range_entry *begin = free_ranges;
558 struct range_entry *end = free_ranges_end;
559 struct range_entry *mid;
561 while (begin < end)
563 mid = begin + (end - begin) / 2;
564 if (mid->end < addr)
565 begin = mid + 1;
566 else
567 end = mid;
570 return begin;
574 /***********************************************************************
575 * free_ranges_insert_view
577 * Updates the free_ranges after a new view has been created.
579 static void free_ranges_insert_view( struct file_view *view )
581 void *view_base = ROUND_ADDR( view->base, granularity_mask );
582 void *view_end = ROUND_ADDR( (char *)view->base + view->size + granularity_mask, granularity_mask );
583 struct range_entry *range = free_ranges_lower_bound( view_base );
584 struct range_entry *next = range + 1;
586 /* free_ranges initial value is such that the view is either inside range or before another one. */
587 assert( range != free_ranges_end );
588 assert( range->end > view_base || next != free_ranges_end );
590 /* this happens because virtual_alloc_thread_stack shrinks a view, then creates another one on top,
591 * or because AT_ROUND_TO_PAGE was used with NtMapViewOfSection to force 4kB aligned mapping. */
592 if ((range->end > view_base && range->base >= view_end) ||
593 (range->end == view_base && next->base >= view_end))
595 /* on Win64, assert that it's correctly aligned so we're not going to be in trouble later */
596 assert( (!is_win64 && !is_wow64) || view->base == view_base );
597 WARN( "range %p - %p is already mapped\n", view_base, view_end );
598 return;
601 /* this should never happen */
602 if (range->base > view_base || range->end < view_end)
603 ERR( "range %p - %p is already partially mapped\n", view_base, view_end );
604 assert( range->base <= view_base && range->end >= view_end );
606 /* need to split the range in two */
607 if (range->base < view_base && range->end > view_end)
609 memmove( next + 1, next, (free_ranges_end - next) * sizeof(struct range_entry) );
610 free_ranges_end += 1;
611 if ((char *)free_ranges_end - (char *)free_ranges > view_block_size)
612 ERR( "Free range sequence is full, trouble ahead!\n" );
613 assert( (char *)free_ranges_end - (char *)free_ranges <= view_block_size );
615 next->base = view_end;
616 next->end = range->end;
617 range->end = view_base;
619 else
621 /* otherwise we just have to shrink it */
622 if (range->base < view_base)
623 range->end = view_base;
624 else
625 range->base = view_end;
627 if (range->base < range->end) return;
629 /* and possibly remove it if it's now empty */
630 memmove( range, next, (free_ranges_end - next) * sizeof(struct range_entry) );
631 free_ranges_end -= 1;
632 assert( free_ranges_end - free_ranges > 0 );
637 /***********************************************************************
638 * free_ranges_remove_view
640 * Updates the free_ranges after a view has been destroyed.
642 static void free_ranges_remove_view( struct file_view *view )
644 void *view_base = ROUND_ADDR( view->base, granularity_mask );
645 void *view_end = ROUND_ADDR( (char *)view->base + view->size + granularity_mask, granularity_mask );
646 struct range_entry *range = free_ranges_lower_bound( view_base );
647 struct range_entry *next = range + 1;
649 /* It's possible to use AT_ROUND_TO_PAGE on 32bit with NtMapViewOfSection to force 4kB alignment,
650 * and this breaks our assumptions. Look at the views around to check if the range is still in use. */
651 #ifndef _WIN64
652 struct file_view *prev_view = WINE_RB_ENTRY_VALUE( wine_rb_prev( &view->entry ), struct file_view, entry );
653 struct file_view *next_view = WINE_RB_ENTRY_VALUE( wine_rb_next( &view->entry ), struct file_view, entry );
654 void *prev_view_base = prev_view ? ROUND_ADDR( prev_view->base, granularity_mask ) : NULL;
655 void *prev_view_end = prev_view ? ROUND_ADDR( (char *)prev_view->base + prev_view->size + granularity_mask, granularity_mask ) : NULL;
656 void *next_view_base = next_view ? ROUND_ADDR( next_view->base, granularity_mask ) : NULL;
657 void *next_view_end = next_view ? ROUND_ADDR( (char *)next_view->base + next_view->size + granularity_mask, granularity_mask ) : NULL;
659 if ((prev_view_base < view_end && prev_view_end > view_base) ||
660 (next_view_base < view_end && next_view_end > view_base))
662 WARN( "range %p - %p is still mapped\n", view_base, view_end );
663 return;
665 #endif
667 /* free_ranges initial value is such that the view is either inside range or before another one. */
668 assert( range != free_ranges_end );
669 assert( range->end > view_base || next != free_ranges_end );
671 /* this should never happen, but we can safely ignore it */
672 if (range->base <= view_base && range->end >= view_end)
674 WARN( "range %p - %p is already unmapped\n", view_base, view_end );
675 return;
678 /* this should never happen */
679 if (range->base < view_end && range->end > view_base)
680 ERR( "range %p - %p is already partially unmapped\n", view_base, view_end );
681 assert( range->end <= view_base || range->base >= view_end );
683 /* merge with next if possible */
684 if (range->end == view_base && next->base == view_end)
686 range->end = next->end;
687 memmove( next, next + 1, (free_ranges_end - next - 1) * sizeof(struct range_entry) );
688 free_ranges_end -= 1;
689 assert( free_ranges_end - free_ranges > 0 );
691 /* or try growing the range */
692 else if (range->end == view_base)
693 range->end = view_end;
694 else if (range->base == view_end)
695 range->base = view_base;
696 /* otherwise create a new one */
697 else
699 memmove( range + 1, range, (free_ranges_end - range) * sizeof(struct range_entry) );
700 free_ranges_end += 1;
701 if ((char *)free_ranges_end - (char *)free_ranges > view_block_size)
702 ERR( "Free range sequence is full, trouble ahead!\n" );
703 assert( (char *)free_ranges_end - (char *)free_ranges <= view_block_size );
705 range->base = view_base;
706 range->end = view_end;
711 static inline int is_view_valloc( const struct file_view *view )
713 return !(view->protect & (SEC_FILE | SEC_RESERVE | SEC_COMMIT));
716 /***********************************************************************
717 * get_page_vprot
719 * Return the page protection byte.
721 static BYTE get_page_vprot( const void *addr )
723 size_t idx = (size_t)addr >> page_shift;
725 #ifdef _WIN64
726 if ((idx >> pages_vprot_shift) >= pages_vprot_size) return 0;
727 if (!pages_vprot[idx >> pages_vprot_shift]) return 0;
728 return pages_vprot[idx >> pages_vprot_shift][idx & pages_vprot_mask];
729 #else
730 return pages_vprot[idx];
731 #endif
735 /***********************************************************************
736 * set_page_vprot
738 * Set a range of page protection bytes.
740 static void set_page_vprot( const void *addr, size_t size, BYTE vprot )
742 size_t idx = (size_t)addr >> page_shift;
743 size_t end = ((size_t)addr + size + page_mask) >> page_shift;
745 #ifdef _WIN64
746 while (idx >> pages_vprot_shift != end >> pages_vprot_shift)
748 size_t dir_size = pages_vprot_mask + 1 - (idx & pages_vprot_mask);
749 memset( pages_vprot[idx >> pages_vprot_shift] + (idx & pages_vprot_mask), vprot, dir_size );
750 idx += dir_size;
752 memset( pages_vprot[idx >> pages_vprot_shift] + (idx & pages_vprot_mask), vprot, end - idx );
753 #else
754 memset( pages_vprot + idx, vprot, end - idx );
755 #endif
759 /***********************************************************************
760 * set_page_vprot_bits
762 * Set or clear bits in a range of page protection bytes.
764 static void set_page_vprot_bits( const void *addr, size_t size, BYTE set, BYTE clear )
766 size_t idx = (size_t)addr >> page_shift;
767 size_t end = ((size_t)addr + size + page_mask) >> page_shift;
769 #ifdef _WIN64
770 for ( ; idx < end; idx++)
772 BYTE *ptr = pages_vprot[idx >> pages_vprot_shift] + (idx & pages_vprot_mask);
773 *ptr = (*ptr & ~clear) | set;
775 #else
776 for ( ; idx < end; idx++) pages_vprot[idx] = (pages_vprot[idx] & ~clear) | set;
777 #endif
781 /***********************************************************************
782 * alloc_pages_vprot
784 * Allocate the page protection bytes for a given range.
786 static BOOL alloc_pages_vprot( const void *addr, size_t size )
788 #ifdef _WIN64
789 size_t idx = (size_t)addr >> page_shift;
790 size_t end = ((size_t)addr + size + page_mask) >> page_shift;
791 size_t i;
792 void *ptr;
794 assert( end <= pages_vprot_size << pages_vprot_shift );
795 for (i = idx >> pages_vprot_shift; i < (end + pages_vprot_mask) >> pages_vprot_shift; i++)
797 if (pages_vprot[i]) continue;
798 if ((ptr = anon_mmap_alloc( pages_vprot_mask + 1, PROT_READ | PROT_WRITE )) == MAP_FAILED)
799 return FALSE;
800 pages_vprot[i] = ptr;
802 #endif
803 return TRUE;
807 /***********************************************************************
808 * compare_view
810 * View comparison function used for the rb tree.
812 static int compare_view( const void *addr, const struct wine_rb_entry *entry )
814 struct file_view *view = WINE_RB_ENTRY_VALUE( entry, struct file_view, entry );
816 if (addr < view->base) return -1;
817 if (addr > view->base) return 1;
818 return 0;
822 /***********************************************************************
823 * get_prot_str
825 static const char *get_prot_str( BYTE prot )
827 static char buffer[6];
828 buffer[0] = (prot & VPROT_COMMITTED) ? 'c' : '-';
829 buffer[1] = (prot & VPROT_GUARD) ? 'g' : ((prot & VPROT_WRITEWATCH) ? 'H' : '-');
830 buffer[2] = (prot & VPROT_READ) ? 'r' : '-';
831 buffer[3] = (prot & VPROT_WRITECOPY) ? 'W' : ((prot & VPROT_WRITE) ? 'w' : '-');
832 buffer[4] = (prot & VPROT_EXEC) ? 'x' : '-';
833 buffer[5] = 0;
834 return buffer;
838 /***********************************************************************
839 * get_unix_prot
841 * Convert page protections to protection for mmap/mprotect.
843 static int get_unix_prot( BYTE vprot )
845 int prot = 0;
846 if ((vprot & VPROT_COMMITTED) && !(vprot & VPROT_GUARD))
848 if (vprot & VPROT_READ) prot |= PROT_READ;
849 if (vprot & VPROT_WRITE) prot |= PROT_WRITE | PROT_READ;
850 if (vprot & VPROT_WRITECOPY) prot |= PROT_WRITE | PROT_READ;
851 if (vprot & VPROT_EXEC) prot |= PROT_EXEC | PROT_READ;
852 if (vprot & VPROT_WRITEWATCH) prot &= ~PROT_WRITE;
854 if (!prot) prot = PROT_NONE;
855 return prot;
859 /***********************************************************************
860 * dump_view
862 static void dump_view( struct file_view *view )
864 UINT i, count;
865 char *addr = view->base;
866 BYTE prot = get_page_vprot( addr );
868 TRACE( "View: %p - %p", addr, addr + view->size - 1 );
869 if (view->protect & VPROT_SYSTEM)
870 TRACE( " (builtin image)\n" );
871 else if (view->protect & SEC_IMAGE)
872 TRACE( " (image)\n" );
873 else if (view->protect & SEC_FILE)
874 TRACE( " (file)\n" );
875 else if (view->protect & (SEC_RESERVE | SEC_COMMIT))
876 TRACE( " (anonymous)\n" );
877 else
878 TRACE( " (valloc)\n");
880 for (count = i = 1; i < view->size >> page_shift; i++, count++)
882 BYTE next = get_page_vprot( addr + (count << page_shift) );
883 if (next == prot) continue;
884 TRACE( " %p - %p %s\n",
885 addr, addr + (count << page_shift) - 1, get_prot_str(prot) );
886 addr += (count << page_shift);
887 prot = next;
888 count = 0;
890 if (count)
891 TRACE( " %p - %p %s\n",
892 addr, addr + (count << page_shift) - 1, get_prot_str(prot) );
896 /***********************************************************************
897 * VIRTUAL_Dump
899 #ifdef WINE_VM_DEBUG
900 static void VIRTUAL_Dump(void)
902 sigset_t sigset;
903 struct file_view *view;
905 TRACE( "Dump of all virtual memory views:\n" );
906 server_enter_uninterrupted_section( &virtual_mutex, &sigset );
907 WINE_RB_FOR_EACH_ENTRY( view, &views_tree, struct file_view, entry )
909 dump_view( view );
911 server_leave_uninterrupted_section( &virtual_mutex, &sigset );
913 #endif
916 /***********************************************************************
917 * find_view
919 * Find the view containing a given address. virtual_mutex must be held by caller.
921 * PARAMS
922 * addr [I] Address
924 * RETURNS
925 * View: Success
926 * NULL: Failure
928 static struct file_view *find_view( const void *addr, size_t size )
930 struct wine_rb_entry *ptr = views_tree.root;
932 if ((const char *)addr + size < (const char *)addr) return NULL; /* overflow */
934 while (ptr)
936 struct file_view *view = WINE_RB_ENTRY_VALUE( ptr, struct file_view, entry );
938 if (view->base > addr) ptr = ptr->left;
939 else if ((const char *)view->base + view->size <= (const char *)addr) ptr = ptr->right;
940 else if ((const char *)view->base + view->size < (const char *)addr + size) break; /* size too large */
941 else return view;
943 return NULL;
947 /***********************************************************************
948 * zero_bits_win_to_64
950 * Convert from Windows hybrid 32bit-based / bitmask to 64bit-based format
952 static inline unsigned short zero_bits_win_to_64( ULONG_PTR zero_bits )
954 unsigned short zero_bits_64;
956 if (zero_bits == 0) return 0;
957 if (zero_bits < 32) return 32 + zero_bits;
958 zero_bits_64 = 63;
959 #ifdef _WIN64
960 if (zero_bits >> 32) { zero_bits_64 -= 32; zero_bits >>= 32; }
961 #endif
962 if (zero_bits >> 16) { zero_bits_64 -= 16; zero_bits >>= 16; }
963 if (zero_bits >> 8) { zero_bits_64 -= 8; zero_bits >>= 8; }
964 if (zero_bits >> 4) { zero_bits_64 -= 4; zero_bits >>= 4; }
965 if (zero_bits >> 2) { zero_bits_64 -= 2; zero_bits >>= 2; }
966 if (zero_bits >> 1) { zero_bits_64 -= 1; }
967 return zero_bits_64;
971 /***********************************************************************
972 * get_zero_bits_64_mask
974 static inline UINT_PTR get_zero_bits_64_mask( USHORT zero_bits_64 )
976 return (UINT_PTR)((~(UINT64)0) >> zero_bits_64);
980 /***********************************************************************
981 * is_write_watch_range
983 static inline BOOL is_write_watch_range( const void *addr, size_t size )
985 struct file_view *view = find_view( addr, size );
986 return view && (view->protect & VPROT_WRITEWATCH);
990 /***********************************************************************
991 * find_view_range
993 * Find the first view overlapping at least part of the specified range.
994 * virtual_mutex must be held by caller.
996 static struct file_view *find_view_range( const void *addr, size_t size )
998 struct wine_rb_entry *ptr = views_tree.root;
1000 while (ptr)
1002 struct file_view *view = WINE_RB_ENTRY_VALUE( ptr, struct file_view, entry );
1004 if ((const char *)view->base >= (const char *)addr + size) ptr = ptr->left;
1005 else if ((const char *)view->base + view->size <= (const char *)addr) ptr = ptr->right;
1006 else return view;
1008 return NULL;
1012 /***********************************************************************
1013 * find_view_inside_range
1015 * Find first (resp. last, if top_down) view inside a range.
1016 * virtual_mutex must be held by caller.
1018 static struct wine_rb_entry *find_view_inside_range( void **base_ptr, void **end_ptr, int top_down )
1020 struct wine_rb_entry *first = NULL, *ptr = views_tree.root;
1021 void *base = *base_ptr, *end = *end_ptr;
1023 /* find the first (resp. last) view inside the range */
1024 while (ptr)
1026 struct file_view *view = WINE_RB_ENTRY_VALUE( ptr, struct file_view, entry );
1027 if ((char *)view->base + view->size >= (char *)end)
1029 end = min( end, view->base );
1030 ptr = ptr->left;
1032 else if (view->base <= base)
1034 base = max( (char *)base, (char *)view->base + view->size );
1035 ptr = ptr->right;
1037 else
1039 first = ptr;
1040 ptr = top_down ? ptr->right : ptr->left;
1044 *base_ptr = base;
1045 *end_ptr = end;
1046 return first;
1050 /***********************************************************************
1051 * try_map_free_area
1053 * Try mmaping some expected free memory region, eventually stepping and
1054 * retrying inside it, and return where it actually succeeded, or NULL.
1056 static void* try_map_free_area( void *base, void *end, ptrdiff_t step,
1057 void *start, size_t size, int unix_prot )
1059 void *ptr;
1061 while (start && base <= start && (char*)start + size <= (char*)end)
1063 if ((ptr = anon_mmap_tryfixed( start, size, unix_prot, 0 )) != MAP_FAILED) return start;
1064 TRACE( "Found free area is already mapped, start %p.\n", start );
1065 if (errno != EEXIST)
1067 ERR( "mmap() error %s, range %p-%p, unix_prot %#x.\n",
1068 strerror(errno), start, (char *)start + size, unix_prot );
1069 return NULL;
1071 if ((step > 0 && (char *)end - (char *)start < step) ||
1072 (step < 0 && (char *)start - (char *)base < -step) ||
1073 step == 0)
1074 break;
1075 start = (char *)start + step;
1078 return NULL;
1082 /***********************************************************************
1083 * map_free_area
1085 * Find a free area between views inside the specified range and map it.
1086 * virtual_mutex must be held by caller.
1088 static void *map_free_area( void *base, void *end, size_t size, int top_down, int unix_prot )
1090 struct wine_rb_entry *first = find_view_inside_range( &base, &end, top_down );
1091 ptrdiff_t step = top_down ? -(granularity_mask + 1) : (granularity_mask + 1);
1092 void *start;
1094 if (top_down)
1096 start = ROUND_ADDR( (char *)end - size, granularity_mask );
1097 if (start >= end || start < base) return NULL;
1099 while (first)
1101 struct file_view *view = WINE_RB_ENTRY_VALUE( first, struct file_view, entry );
1102 if ((start = try_map_free_area( (char *)view->base + view->size, (char *)start + size, step,
1103 start, size, unix_prot ))) break;
1104 start = ROUND_ADDR( (char *)view->base - size, granularity_mask );
1105 /* stop if remaining space is not large enough */
1106 if (!start || start >= end || start < base) return NULL;
1107 first = wine_rb_prev( first );
1110 else
1112 start = ROUND_ADDR( (char *)base + granularity_mask, granularity_mask );
1113 if (!start || start >= end || (char *)end - (char *)start < size) return NULL;
1115 while (first)
1117 struct file_view *view = WINE_RB_ENTRY_VALUE( first, struct file_view, entry );
1118 if ((start = try_map_free_area( start, view->base, step,
1119 start, size, unix_prot ))) break;
1120 start = ROUND_ADDR( (char *)view->base + view->size + granularity_mask, granularity_mask );
1121 /* stop if remaining space is not large enough */
1122 if (!start || start >= end || (char *)end - (char *)start < size) return NULL;
1123 first = wine_rb_next( first );
1127 if (!first)
1128 return try_map_free_area( base, end, step, start, size, unix_prot );
1130 return start;
1134 /***********************************************************************
1135 * find_reserved_free_area
1137 * Find a free area between views inside the specified range.
1138 * virtual_mutex must be held by caller.
1139 * The range must be inside the preloader reserved range.
1141 static void *find_reserved_free_area( void *base, void *end, size_t size, int top_down )
1143 struct range_entry *range;
1144 void *start;
1146 base = ROUND_ADDR( (char *)base + granularity_mask, granularity_mask );
1147 end = (char *)ROUND_ADDR( (char *)end - size, granularity_mask ) + size;
1149 if (top_down)
1151 start = (char *)end - size;
1152 range = free_ranges_lower_bound( start );
1153 assert(range != free_ranges_end && range->end >= start);
1155 if ((char *)range->end - (char *)start < size) start = ROUND_ADDR( (char *)range->end - size, granularity_mask );
1158 if (start >= end || start < base || (char *)end - (char *)start < size) return NULL;
1159 if (start < range->end && start >= range->base && (char *)range->end - (char *)start >= size) break;
1160 if (--range < free_ranges) return NULL;
1161 start = ROUND_ADDR( (char *)range->end - size, granularity_mask );
1163 while (1);
1165 else
1167 start = base;
1168 range = free_ranges_lower_bound( start );
1169 assert(range != free_ranges_end && range->end >= start);
1171 if (start < range->base) start = ROUND_ADDR( (char *)range->base + granularity_mask, granularity_mask );
1174 if (start >= end || start < base || (char *)end - (char *)start < size) return NULL;
1175 if (start < range->end && start >= range->base && (char *)range->end - (char *)start >= size) break;
1176 if (++range == free_ranges_end) return NULL;
1177 start = ROUND_ADDR( (char *)range->base + granularity_mask, granularity_mask );
1179 while (1);
1181 return start;
1185 /***********************************************************************
1186 * add_reserved_area
1188 * Add a reserved area to the list maintained by libwine.
1189 * virtual_mutex must be held by caller.
1191 static void add_reserved_area( void *addr, size_t size )
1193 TRACE( "adding %p-%p\n", addr, (char *)addr + size );
1195 if (addr < user_space_limit)
1197 /* unmap the part of the area that is below the limit */
1198 assert( (char *)addr + size > (char *)user_space_limit );
1199 munmap( addr, (char *)user_space_limit - (char *)addr );
1200 size -= (char *)user_space_limit - (char *)addr;
1201 addr = user_space_limit;
1203 /* blow away existing mappings */
1204 anon_mmap_fixed( addr, size, PROT_NONE, MAP_NORESERVE );
1205 mmap_add_reserved_area( addr, size );
1209 /***********************************************************************
1210 * remove_reserved_area
1212 * Remove a reserved area from the list maintained by libwine.
1213 * virtual_mutex must be held by caller.
1215 static void remove_reserved_area( void *addr, size_t size )
1217 struct file_view *view;
1219 TRACE( "removing %p-%p\n", addr, (char *)addr + size );
1220 mmap_remove_reserved_area( addr, size );
1222 /* unmap areas not covered by an existing view */
1223 WINE_RB_FOR_EACH_ENTRY( view, &views_tree, struct file_view, entry )
1225 if ((char *)view->base >= (char *)addr + size) break;
1226 if ((char *)view->base + view->size <= (char *)addr) continue;
1227 if (view->base > addr) munmap( addr, (char *)view->base - (char *)addr );
1228 if ((char *)view->base + view->size > (char *)addr + size) return;
1229 size = (char *)addr + size - ((char *)view->base + view->size);
1230 addr = (char *)view->base + view->size;
1232 munmap( addr, size );
1236 struct area_boundary
1238 void *base;
1239 size_t size;
1240 void *boundary;
1243 /***********************************************************************
1244 * get_area_boundary_callback
1246 * Get lowest boundary address between reserved area and non-reserved area
1247 * in the specified region. If no boundaries are found, result is NULL.
1248 * virtual_mutex must be held by caller.
1250 static int CDECL get_area_boundary_callback( void *start, SIZE_T size, void *arg )
1252 struct area_boundary *area = arg;
1253 void *end = (char *)start + size;
1255 area->boundary = NULL;
1256 if (area->base >= end) return 0;
1257 if ((char *)start >= (char *)area->base + area->size) return 1;
1258 if (area->base >= start)
1260 if ((char *)area->base + area->size > (char *)end)
1262 area->boundary = end;
1263 return 1;
1265 return 0;
1267 area->boundary = start;
1268 return 1;
1272 /***********************************************************************
1273 * unmap_area
1275 * Unmap an area, or simply replace it by an empty mapping if it is
1276 * in a reserved area. virtual_mutex must be held by caller.
1278 static inline void unmap_area( void *addr, size_t size )
1280 switch (mmap_is_in_reserved_area( addr, size ))
1282 case -1: /* partially in a reserved area */
1284 struct area_boundary area;
1285 size_t lower_size;
1286 area.base = addr;
1287 area.size = size;
1288 mmap_enum_reserved_areas( get_area_boundary_callback, &area, 0 );
1289 assert( area.boundary );
1290 lower_size = (char *)area.boundary - (char *)addr;
1291 unmap_area( addr, lower_size );
1292 unmap_area( area.boundary, size - lower_size );
1293 break;
1295 case 1: /* in a reserved area */
1296 anon_mmap_fixed( addr, size, PROT_NONE, MAP_NORESERVE );
1297 break;
1298 default:
1299 case 0: /* not in a reserved area */
1300 if (is_beyond_limit( addr, size, user_space_limit ))
1301 add_reserved_area( addr, size );
1302 else
1303 munmap( addr, size );
1304 break;
1309 /***********************************************************************
1310 * alloc_view
1312 * Allocate a new view. virtual_mutex must be held by caller.
1314 static struct file_view *alloc_view(void)
1316 if (next_free_view)
1318 struct file_view *ret = next_free_view;
1319 next_free_view = *(struct file_view **)ret;
1320 return ret;
1322 if (view_block_start == view_block_end)
1324 void *ptr = anon_mmap_alloc( view_block_size, PROT_READ | PROT_WRITE );
1325 if (ptr == MAP_FAILED) return NULL;
1326 view_block_start = ptr;
1327 view_block_end = view_block_start + view_block_size / sizeof(*view_block_start);
1329 return view_block_start++;
1333 /***********************************************************************
1334 * delete_view
1336 * Deletes a view. virtual_mutex must be held by caller.
1338 static void delete_view( struct file_view *view ) /* [in] View */
1340 if (!(view->protect & VPROT_SYSTEM)) unmap_area( view->base, view->size );
1341 set_page_vprot( view->base, view->size, 0 );
1342 if (mmap_is_in_reserved_area( view->base, view->size ))
1343 free_ranges_remove_view( view );
1344 wine_rb_remove( &views_tree, &view->entry );
1345 *(struct file_view **)view = next_free_view;
1346 next_free_view = view;
1350 /***********************************************************************
1351 * create_view
1353 * Create a view. virtual_mutex must be held by caller.
1355 static NTSTATUS create_view( struct file_view **view_ret, void *base, size_t size, unsigned int vprot )
1357 struct file_view *view;
1358 int unix_prot = get_unix_prot( vprot );
1360 assert( !((UINT_PTR)base & page_mask) );
1361 assert( !(size & page_mask) );
1363 /* Check for overlapping views. This can happen if the previous view
1364 * was a system view that got unmapped behind our back. In that case
1365 * we recover by simply deleting it. */
1367 while ((view = find_view_range( base, size )))
1369 TRACE( "overlapping view %p-%p for %p-%p\n",
1370 view->base, (char *)view->base + view->size, base, (char *)base + size );
1371 assert( view->protect & VPROT_SYSTEM );
1372 delete_view( view );
1375 if (!alloc_pages_vprot( base, size )) return STATUS_NO_MEMORY;
1377 /* Create the view structure */
1379 if (!(view = alloc_view()))
1381 FIXME( "out of memory for %p-%p\n", base, (char *)base + size );
1382 return STATUS_NO_MEMORY;
1385 view->base = base;
1386 view->size = size;
1387 view->protect = vprot;
1388 set_page_vprot( base, size, vprot );
1390 wine_rb_put( &views_tree, view->base, &view->entry );
1391 if (mmap_is_in_reserved_area( view->base, view->size ))
1392 free_ranges_insert_view( view );
1394 *view_ret = view;
1396 if (force_exec_prot && (unix_prot & PROT_READ) && !(unix_prot & PROT_EXEC))
1398 TRACE( "forcing exec permission on %p-%p\n", base, (char *)base + size - 1 );
1399 mprotect( base, size, unix_prot | PROT_EXEC );
1401 return STATUS_SUCCESS;
1405 /***********************************************************************
1406 * get_win32_prot
1408 * Convert page protections to Win32 flags.
1410 static DWORD get_win32_prot( BYTE vprot, unsigned int map_prot )
1412 DWORD ret = VIRTUAL_Win32Flags[vprot & 0x0f];
1413 if (vprot & VPROT_GUARD) ret |= PAGE_GUARD;
1414 if (map_prot & SEC_NOCACHE) ret |= PAGE_NOCACHE;
1415 return ret;
1419 /***********************************************************************
1420 * get_vprot_flags
1422 * Build page protections from Win32 flags.
1424 static NTSTATUS get_vprot_flags( DWORD protect, unsigned int *vprot, BOOL image )
1426 switch(protect & 0xff)
1428 case PAGE_READONLY:
1429 *vprot = VPROT_READ;
1430 break;
1431 case PAGE_READWRITE:
1432 if (image)
1433 *vprot = VPROT_READ | VPROT_WRITECOPY;
1434 else
1435 *vprot = VPROT_READ | VPROT_WRITE;
1436 break;
1437 case PAGE_WRITECOPY:
1438 *vprot = VPROT_READ | VPROT_WRITECOPY;
1439 break;
1440 case PAGE_EXECUTE:
1441 *vprot = VPROT_EXEC;
1442 break;
1443 case PAGE_EXECUTE_READ:
1444 *vprot = VPROT_EXEC | VPROT_READ;
1445 break;
1446 case PAGE_EXECUTE_READWRITE:
1447 if (image)
1448 *vprot = VPROT_EXEC | VPROT_READ | VPROT_WRITECOPY;
1449 else
1450 *vprot = VPROT_EXEC | VPROT_READ | VPROT_WRITE;
1451 break;
1452 case PAGE_EXECUTE_WRITECOPY:
1453 *vprot = VPROT_EXEC | VPROT_READ | VPROT_WRITECOPY;
1454 break;
1455 case PAGE_NOACCESS:
1456 *vprot = 0;
1457 break;
1458 default:
1459 return STATUS_INVALID_PAGE_PROTECTION;
1461 if (protect & PAGE_GUARD) *vprot |= VPROT_GUARD;
1462 return STATUS_SUCCESS;
1466 /***********************************************************************
1467 * mprotect_exec
1469 * Wrapper for mprotect, adds PROT_EXEC if forced by force_exec_prot
1471 static inline int mprotect_exec( void *base, size_t size, int unix_prot )
1473 if (force_exec_prot && (unix_prot & PROT_READ) && !(unix_prot & PROT_EXEC))
1475 TRACE( "forcing exec permission on %p-%p\n", base, (char *)base + size - 1 );
1476 if (!mprotect( base, size, unix_prot | PROT_EXEC )) return 0;
1477 /* exec + write may legitimately fail, in that case fall back to write only */
1478 if (!(unix_prot & PROT_WRITE)) return -1;
1481 return mprotect( base, size, unix_prot );
1485 /***********************************************************************
1486 * mprotect_range
1488 * Call mprotect on a page range, applying the protections from the per-page byte.
1490 static void mprotect_range( void *base, size_t size, BYTE set, BYTE clear )
1492 size_t i, count;
1493 char *addr = ROUND_ADDR( base, page_mask );
1494 int prot, next;
1496 size = ROUND_SIZE( base, size );
1497 prot = get_unix_prot( (get_page_vprot( addr ) & ~clear ) | set );
1498 for (count = i = 1; i < size >> page_shift; i++, count++)
1500 next = get_unix_prot( (get_page_vprot( addr + (count << page_shift) ) & ~clear) | set );
1501 if (next == prot) continue;
1502 mprotect_exec( addr, count << page_shift, prot );
1503 addr += count << page_shift;
1504 prot = next;
1505 count = 0;
1507 if (count) mprotect_exec( addr, count << page_shift, prot );
1511 /***********************************************************************
1512 * set_vprot
1514 * Change the protection of a range of pages.
1516 static BOOL set_vprot( struct file_view *view, void *base, size_t size, BYTE vprot )
1518 int unix_prot = get_unix_prot(vprot);
1520 if (view->protect & VPROT_WRITEWATCH)
1522 /* each page may need different protections depending on write watch flag */
1523 set_page_vprot_bits( base, size, vprot & ~VPROT_WRITEWATCH, ~vprot & ~VPROT_WRITEWATCH );
1524 mprotect_range( base, size, 0, 0 );
1525 return TRUE;
1528 /* if setting stack guard pages, store the permissions first, as the guard may be
1529 * triggered at any point after mprotect and change the permissions again */
1530 if ((vprot & VPROT_GUARD) &&
1531 (base >= NtCurrentTeb()->DeallocationStack) &&
1532 (base < NtCurrentTeb()->Tib.StackBase))
1534 set_page_vprot( base, size, vprot );
1535 mprotect( base, size, unix_prot );
1536 return TRUE;
1539 if (mprotect_exec( base, size, unix_prot )) /* FIXME: last error */
1540 return FALSE;
1542 set_page_vprot( base, size, vprot );
1543 return TRUE;
1547 /***********************************************************************
1548 * set_protection
1550 * Set page protections on a range of pages
1552 static NTSTATUS set_protection( struct file_view *view, void *base, SIZE_T size, ULONG protect )
1554 unsigned int vprot;
1555 NTSTATUS status;
1557 if ((status = get_vprot_flags( protect, &vprot, view->protect & SEC_IMAGE ))) return status;
1558 if (is_view_valloc( view ))
1560 if (vprot & VPROT_WRITECOPY) return STATUS_INVALID_PAGE_PROTECTION;
1562 else
1564 BYTE access = vprot & (VPROT_READ | VPROT_WRITE | VPROT_EXEC);
1565 if ((view->protect & access) != access) return STATUS_INVALID_PAGE_PROTECTION;
1568 if (!set_vprot( view, base, size, vprot | VPROT_COMMITTED )) return STATUS_ACCESS_DENIED;
1569 return STATUS_SUCCESS;
1573 /***********************************************************************
1574 * update_write_watches
1576 static void update_write_watches( void *base, size_t size, size_t accessed_size )
1578 TRACE( "updating watch %p-%p-%p\n", base, (char *)base + accessed_size, (char *)base + size );
1579 /* clear write watch flag on accessed pages */
1580 set_page_vprot_bits( base, accessed_size, 0, VPROT_WRITEWATCH );
1581 /* restore page protections on the entire range */
1582 mprotect_range( base, size, 0, 0 );
1586 /***********************************************************************
1587 * reset_write_watches
1589 * Reset write watches in a memory range.
1591 static void reset_write_watches( void *base, SIZE_T size )
1593 set_page_vprot_bits( base, size, VPROT_WRITEWATCH, 0 );
1594 mprotect_range( base, size, 0, 0 );
1598 /***********************************************************************
1599 * unmap_extra_space
1601 * Release the extra memory while keeping the range starting on the granularity boundary.
1603 static inline void *unmap_extra_space( void *ptr, size_t total_size, size_t wanted_size )
1605 if ((ULONG_PTR)ptr & granularity_mask)
1607 size_t extra = granularity_mask + 1 - ((ULONG_PTR)ptr & granularity_mask);
1608 munmap( ptr, extra );
1609 ptr = (char *)ptr + extra;
1610 total_size -= extra;
1612 if (total_size > wanted_size)
1613 munmap( (char *)ptr + wanted_size, total_size - wanted_size );
1614 return ptr;
1618 struct alloc_area
1620 size_t size;
1621 int top_down;
1622 void *limit;
1623 void *result;
1626 /***********************************************************************
1627 * alloc_reserved_area_callback
1629 * Try to map some space inside a reserved area. Callback for mmap_enum_reserved_areas.
1631 static int CDECL alloc_reserved_area_callback( void *start, SIZE_T size, void *arg )
1633 struct alloc_area *alloc = arg;
1634 void *end = (char *)start + size;
1636 if (start < address_space_start) start = address_space_start;
1637 if (is_beyond_limit( start, size, alloc->limit )) end = alloc->limit;
1638 if (start >= end) return 0;
1640 /* make sure we don't touch the preloader reserved range */
1641 if (preload_reserve_end >= start)
1643 if (preload_reserve_end >= end)
1645 if (preload_reserve_start <= start) return 0; /* no space in that area */
1646 if (preload_reserve_start < end) end = preload_reserve_start;
1648 else if (preload_reserve_start <= start) start = preload_reserve_end;
1649 else
1651 /* range is split in two by the preloader reservation, try first part */
1652 if ((alloc->result = find_reserved_free_area( start, preload_reserve_start, alloc->size,
1653 alloc->top_down )))
1654 return 1;
1655 /* then fall through to try second part */
1656 start = preload_reserve_end;
1659 if ((alloc->result = find_reserved_free_area( start, end, alloc->size, alloc->top_down )))
1660 return 1;
1662 return 0;
1665 /***********************************************************************
1666 * map_fixed_area
1668 * mmap the fixed memory area.
1669 * virtual_mutex must be held by caller.
1671 static NTSTATUS map_fixed_area( void *base, size_t size, unsigned int vprot )
1673 void *ptr;
1675 switch (mmap_is_in_reserved_area( base, size ))
1677 case -1: /* partially in a reserved area */
1679 NTSTATUS status;
1680 struct area_boundary area;
1681 size_t lower_size;
1682 area.base = base;
1683 area.size = size;
1684 mmap_enum_reserved_areas( get_area_boundary_callback, &area, 0 );
1685 assert( area.boundary );
1686 lower_size = (char *)area.boundary - (char *)base;
1687 status = map_fixed_area( base, lower_size, vprot );
1688 if (status == STATUS_SUCCESS)
1690 status = map_fixed_area( area.boundary, size - lower_size, vprot);
1691 if (status != STATUS_SUCCESS) unmap_area( base, lower_size );
1693 return status;
1695 case 0: /* not in a reserved area, do a normal allocation */
1696 if ((ptr = anon_mmap_tryfixed( base, size, get_unix_prot(vprot), 0 )) == MAP_FAILED)
1698 if (errno == ENOMEM) return STATUS_NO_MEMORY;
1699 if (errno == EEXIST) return STATUS_CONFLICTING_ADDRESSES;
1700 return STATUS_INVALID_PARAMETER;
1702 break;
1704 default:
1705 case 1: /* in a reserved area, make sure the address is available */
1706 if (find_view_range( base, size )) return STATUS_CONFLICTING_ADDRESSES;
1707 /* replace the reserved area by our mapping */
1708 if ((ptr = anon_mmap_fixed( base, size, get_unix_prot(vprot), 0 )) != base)
1709 return STATUS_INVALID_PARAMETER;
1710 break;
1712 if (is_beyond_limit( ptr, size, working_set_limit )) working_set_limit = address_space_limit;
1713 return STATUS_SUCCESS;
1716 /***********************************************************************
1717 * map_view
1719 * Create a view and mmap the corresponding memory area.
1720 * virtual_mutex must be held by caller.
1722 static NTSTATUS map_view( struct file_view **view_ret, void *base, size_t size,
1723 int top_down, unsigned int vprot, unsigned short zero_bits_64 )
1725 void *ptr;
1726 NTSTATUS status;
1728 if (base)
1730 if (is_beyond_limit( base, size, address_space_limit ))
1731 return STATUS_WORKING_SET_LIMIT_RANGE;
1732 status = map_fixed_area( base, size, vprot );
1733 if (status != STATUS_SUCCESS) return status;
1734 ptr = base;
1736 else
1738 size_t view_size = size + granularity_mask + 1;
1739 struct alloc_area alloc;
1741 alloc.size = size;
1742 alloc.top_down = top_down;
1743 alloc.limit = (void*)(get_zero_bits_64_mask( zero_bits_64 ) & (UINT_PTR)user_space_limit);
1745 if (mmap_enum_reserved_areas( alloc_reserved_area_callback, &alloc, top_down ))
1747 ptr = alloc.result;
1748 TRACE( "got mem in reserved area %p-%p\n", ptr, (char *)ptr + size );
1749 if (anon_mmap_fixed( ptr, size, get_unix_prot(vprot), 0 ) != ptr)
1750 return STATUS_INVALID_PARAMETER;
1751 goto done;
1754 if (zero_bits_64)
1756 if (!(ptr = map_free_area( address_space_start, alloc.limit, size,
1757 top_down, get_unix_prot(vprot) )))
1758 return STATUS_NO_MEMORY;
1759 TRACE( "got mem with map_free_area %p-%p\n", ptr, (char *)ptr + size );
1760 goto done;
1763 for (;;)
1765 if ((ptr = anon_mmap_alloc( view_size, get_unix_prot(vprot) )) == MAP_FAILED)
1767 if (errno == ENOMEM) return STATUS_NO_MEMORY;
1768 return STATUS_INVALID_PARAMETER;
1770 TRACE( "got mem with anon mmap %p-%p\n", ptr, (char *)ptr + size );
1771 /* if we got something beyond the user limit, unmap it and retry */
1772 if (is_beyond_limit( ptr, view_size, user_space_limit )) add_reserved_area( ptr, view_size );
1773 else break;
1775 ptr = unmap_extra_space( ptr, view_size, size );
1777 done:
1778 status = create_view( view_ret, ptr, size, vprot );
1779 if (status != STATUS_SUCCESS) unmap_area( ptr, size );
1780 return status;
1784 /***********************************************************************
1785 * map_file_into_view
1787 * Wrapper for mmap() to map a file into a view, falling back to read if mmap fails.
1788 * virtual_mutex must be held by caller.
1790 static NTSTATUS map_file_into_view( struct file_view *view, int fd, size_t start, size_t size,
1791 off_t offset, unsigned int vprot, BOOL removable )
1793 void *ptr;
1794 int prot = get_unix_prot( vprot | VPROT_COMMITTED /* make sure it is accessible */ );
1795 unsigned int flags = MAP_FIXED | ((vprot & VPROT_WRITECOPY) ? MAP_PRIVATE : MAP_SHARED);
1797 assert( start < view->size );
1798 assert( start + size <= view->size );
1800 if (force_exec_prot && (vprot & VPROT_READ))
1802 TRACE( "forcing exec permission on mapping %p-%p\n",
1803 (char *)view->base + start, (char *)view->base + start + size - 1 );
1804 prot |= PROT_EXEC;
1807 /* only try mmap if media is not removable (or if we require write access) */
1808 if (!removable || (flags & MAP_SHARED))
1810 if (mmap( (char *)view->base + start, size, prot, flags, fd, offset ) != (void *)-1)
1811 goto done;
1813 switch (errno)
1815 case EINVAL: /* file offset is not page-aligned, fall back to read() */
1816 if (flags & MAP_SHARED) return STATUS_INVALID_PARAMETER;
1817 break;
1818 case ENOEXEC:
1819 case ENODEV: /* filesystem doesn't support mmap(), fall back to read() */
1820 if (vprot & VPROT_WRITE)
1822 ERR( "shared writable mmap not supported, broken filesystem?\n" );
1823 return STATUS_NOT_SUPPORTED;
1825 break;
1826 case EACCES:
1827 case EPERM: /* noexec filesystem, fall back to read() */
1828 if (flags & MAP_SHARED)
1830 if (prot & PROT_EXEC) ERR( "failed to set PROT_EXEC on file map, noexec filesystem?\n" );
1831 return STATUS_ACCESS_DENIED;
1833 if (prot & PROT_EXEC) WARN( "failed to set PROT_EXEC on file map, noexec filesystem?\n" );
1834 break;
1835 default:
1836 return STATUS_NO_MEMORY;
1840 /* Reserve the memory with an anonymous mmap */
1841 ptr = anon_mmap_fixed( (char *)view->base + start, size, PROT_READ | PROT_WRITE, 0 );
1842 if (ptr == MAP_FAILED) return STATUS_NO_MEMORY;
1843 /* Now read in the file */
1844 pread( fd, ptr, size, offset );
1845 if (prot != (PROT_READ|PROT_WRITE)) mprotect( ptr, size, prot ); /* Set the right protection */
1846 done:
1847 set_page_vprot( (char *)view->base + start, size, vprot );
1848 return STATUS_SUCCESS;
1852 /***********************************************************************
1853 * get_committed_size
1855 * Get the size of the committed range starting at base.
1856 * Also return the protections for the first page.
1858 static SIZE_T get_committed_size( struct file_view *view, void *base, BYTE *vprot )
1860 SIZE_T i, start;
1862 start = ((char *)base - (char *)view->base) >> page_shift;
1863 *vprot = get_page_vprot( base );
1865 if (view->protect & SEC_RESERVE)
1867 SIZE_T ret = 0;
1868 SERVER_START_REQ( get_mapping_committed_range )
1870 req->base = wine_server_client_ptr( view->base );
1871 req->offset = start << page_shift;
1872 if (!wine_server_call( req ))
1874 ret = reply->size;
1875 if (reply->committed)
1877 *vprot |= VPROT_COMMITTED;
1878 set_page_vprot_bits( base, ret, VPROT_COMMITTED, 0 );
1882 SERVER_END_REQ;
1883 return ret;
1885 for (i = start + 1; i < view->size >> page_shift; i++)
1886 if ((*vprot ^ get_page_vprot( (char *)view->base + (i << page_shift) )) & VPROT_COMMITTED) break;
1887 return (i - start) << page_shift;
1891 /***********************************************************************
1892 * decommit_view
1894 * Decommit some pages of a given view.
1895 * virtual_mutex must be held by caller.
1897 static NTSTATUS decommit_pages( struct file_view *view, size_t start, size_t size )
1899 if (anon_mmap_fixed( (char *)view->base + start, size, PROT_NONE, 0 ) != MAP_FAILED)
1901 set_page_vprot_bits( (char *)view->base + start, size, 0, VPROT_COMMITTED );
1902 return STATUS_SUCCESS;
1904 return STATUS_NO_MEMORY;
1908 /***********************************************************************
1909 * allocate_dos_memory
1911 * Allocate the DOS memory range.
1913 static NTSTATUS allocate_dos_memory( struct file_view **view, unsigned int vprot )
1915 size_t size;
1916 void *addr = NULL;
1917 void * const low_64k = (void *)0x10000;
1918 const size_t dosmem_size = 0x110000;
1919 int unix_prot = get_unix_prot( vprot );
1921 /* check for existing view */
1923 if (find_view_range( 0, dosmem_size )) return STATUS_CONFLICTING_ADDRESSES;
1925 /* check without the first 64K */
1927 if (mmap_is_in_reserved_area( low_64k, dosmem_size - 0x10000 ) != 1)
1929 addr = anon_mmap_tryfixed( low_64k, dosmem_size - 0x10000, unix_prot, 0 );
1930 if (addr == MAP_FAILED) return map_view( view, NULL, dosmem_size, FALSE, vprot, 0 );
1933 /* now try to allocate the low 64K too */
1935 if (mmap_is_in_reserved_area( NULL, 0x10000 ) != 1)
1937 addr = anon_mmap_tryfixed( (void *)page_size, 0x10000 - page_size, unix_prot, 0 );
1938 if (addr != MAP_FAILED)
1940 if (!anon_mmap_fixed( NULL, page_size, unix_prot, 0 ))
1942 addr = NULL;
1943 TRACE( "successfully mapped low 64K range\n" );
1945 else TRACE( "failed to map page 0\n" );
1947 else
1949 addr = low_64k;
1950 TRACE( "failed to map low 64K range\n" );
1954 /* now reserve the whole range */
1956 size = (char *)dosmem_size - (char *)addr;
1957 anon_mmap_fixed( addr, size, unix_prot, 0 );
1958 return create_view( view, addr, size, vprot );
1962 /***********************************************************************
1963 * map_pe_header
1965 * Map the header of a PE file into memory.
1967 static NTSTATUS map_pe_header( void *ptr, size_t size, int fd, BOOL *removable )
1969 if (!size) return STATUS_INVALID_IMAGE_FORMAT;
1971 if (!*removable)
1973 if (mmap( ptr, size, PROT_READ|PROT_WRITE|PROT_EXEC, MAP_FIXED|MAP_PRIVATE, fd, 0 ) != (void *)-1)
1974 return STATUS_SUCCESS;
1976 switch (errno)
1978 case EPERM:
1979 case EACCES:
1980 WARN( "noexec file system, falling back to read\n" );
1981 break;
1982 case ENOEXEC:
1983 case ENODEV:
1984 WARN( "file system doesn't support mmap, falling back to read\n" );
1985 break;
1986 default:
1987 return STATUS_NO_MEMORY;
1989 *removable = TRUE;
1991 pread( fd, ptr, size, 0 );
1992 return STATUS_SUCCESS; /* page protections will be updated later */
1996 /***********************************************************************
1997 * map_image_into_view
1999 * Map an executable (PE format) image into an existing view.
2000 * virtual_mutex must be held by caller.
2002 static NTSTATUS map_image_into_view( struct file_view *view, int fd, void *orig_base,
2003 SIZE_T header_size, ULONG image_flags, int shared_fd, BOOL removable )
2005 IMAGE_DOS_HEADER *dos;
2006 IMAGE_NT_HEADERS *nt;
2007 IMAGE_SECTION_HEADER sections[96];
2008 IMAGE_SECTION_HEADER *sec;
2009 IMAGE_DATA_DIRECTORY *imports;
2010 NTSTATUS status = STATUS_CONFLICTING_ADDRESSES;
2011 int i;
2012 off_t pos;
2013 struct stat st;
2014 char *header_end, *header_start;
2015 char *ptr = view->base;
2016 SIZE_T total_size = view->size;
2018 TRACE_(module)( "mapped PE file at %p-%p\n", ptr, ptr + total_size );
2020 /* map the header */
2022 fstat( fd, &st );
2023 header_size = min( header_size, st.st_size );
2024 if ((status = map_pe_header( view->base, header_size, fd, &removable ))) return status;
2026 status = STATUS_INVALID_IMAGE_FORMAT; /* generic error */
2027 dos = (IMAGE_DOS_HEADER *)ptr;
2028 nt = (IMAGE_NT_HEADERS *)(ptr + dos->e_lfanew);
2029 header_end = ptr + ROUND_SIZE( 0, header_size );
2030 memset( ptr + header_size, 0, header_end - (ptr + header_size) );
2031 if ((char *)(nt + 1) > header_end) return status;
2032 header_start = (char*)&nt->OptionalHeader+nt->FileHeader.SizeOfOptionalHeader;
2033 if (nt->FileHeader.NumberOfSections > ARRAY_SIZE( sections )) return status;
2034 if (header_start + sizeof(*sections) * nt->FileHeader.NumberOfSections > header_end) return status;
2035 /* Some applications (e.g. the Steam version of Borderlands) map over the top of the section headers,
2036 * copying the headers into local memory is necessary to properly load such applications. */
2037 memcpy(sections, header_start, sizeof(*sections) * nt->FileHeader.NumberOfSections);
2038 sec = sections;
2040 imports = nt->OptionalHeader.DataDirectory + IMAGE_DIRECTORY_ENTRY_IMPORT;
2041 if (!imports->Size || !imports->VirtualAddress) imports = NULL;
2043 /* check for non page-aligned binary */
2045 if (image_flags & IMAGE_FLAGS_ImageMappedFlat)
2047 /* unaligned sections, this happens for native subsystem binaries */
2048 /* in that case Windows simply maps in the whole file */
2050 total_size = min( total_size, ROUND_SIZE( 0, st.st_size ));
2051 if (map_file_into_view( view, fd, 0, total_size, 0, VPROT_COMMITTED | VPROT_READ | VPROT_WRITECOPY,
2052 removable ) != STATUS_SUCCESS) return status;
2054 /* check that all sections are loaded at the right offset */
2055 if (nt->OptionalHeader.FileAlignment != nt->OptionalHeader.SectionAlignment) return status;
2056 for (i = 0; i < nt->FileHeader.NumberOfSections; i++)
2058 if (sec[i].VirtualAddress != sec[i].PointerToRawData)
2059 return status; /* Windows refuses to load in that case too */
2062 /* set the image protections */
2063 set_vprot( view, ptr, total_size, VPROT_COMMITTED | VPROT_READ | VPROT_WRITECOPY | VPROT_EXEC );
2065 /* no relocations are performed on non page-aligned binaries */
2066 return STATUS_SUCCESS;
2070 /* map all the sections */
2072 for (i = pos = 0; i < nt->FileHeader.NumberOfSections; i++, sec++)
2074 static const SIZE_T sector_align = 0x1ff;
2075 SIZE_T map_size, file_start, file_size, end;
2077 if (!sec->Misc.VirtualSize)
2078 map_size = ROUND_SIZE( 0, sec->SizeOfRawData );
2079 else
2080 map_size = ROUND_SIZE( 0, sec->Misc.VirtualSize );
2082 /* file positions are rounded to sector boundaries regardless of OptionalHeader.FileAlignment */
2083 file_start = sec->PointerToRawData & ~sector_align;
2084 file_size = (sec->SizeOfRawData + (sec->PointerToRawData & sector_align) + sector_align) & ~sector_align;
2085 if (file_size > map_size) file_size = map_size;
2087 /* a few sanity checks */
2088 end = sec->VirtualAddress + ROUND_SIZE( sec->VirtualAddress, map_size );
2089 if (sec->VirtualAddress > total_size || end > total_size || end < sec->VirtualAddress)
2091 WARN_(module)( "Section %.8s too large (%x+%lx/%lx)\n",
2092 sec->Name, sec->VirtualAddress, map_size, total_size );
2093 return status;
2096 if ((sec->Characteristics & IMAGE_SCN_MEM_SHARED) &&
2097 (sec->Characteristics & IMAGE_SCN_MEM_WRITE))
2099 TRACE_(module)( "mapping shared section %.8s at %p off %x (%x) size %lx (%lx) flags %x\n",
2100 sec->Name, ptr + sec->VirtualAddress,
2101 sec->PointerToRawData, (int)pos, file_size, map_size,
2102 sec->Characteristics );
2103 if (map_file_into_view( view, shared_fd, sec->VirtualAddress, map_size, pos,
2104 VPROT_COMMITTED | VPROT_READ | VPROT_WRITE, FALSE ) != STATUS_SUCCESS)
2106 ERR_(module)( "Could not map shared section %.8s\n", sec->Name );
2107 return status;
2110 /* check if the import directory falls inside this section */
2111 if (imports && imports->VirtualAddress >= sec->VirtualAddress &&
2112 imports->VirtualAddress < sec->VirtualAddress + map_size)
2114 UINT_PTR base = imports->VirtualAddress & ~page_mask;
2115 UINT_PTR end = base + ROUND_SIZE( imports->VirtualAddress, imports->Size );
2116 if (end > sec->VirtualAddress + map_size) end = sec->VirtualAddress + map_size;
2117 if (end > base)
2118 map_file_into_view( view, shared_fd, base, end - base,
2119 pos + (base - sec->VirtualAddress),
2120 VPROT_COMMITTED | VPROT_READ | VPROT_WRITECOPY, FALSE );
2122 pos += map_size;
2123 continue;
2126 TRACE_(module)( "mapping section %.8s at %p off %x size %x virt %x flags %x\n",
2127 sec->Name, ptr + sec->VirtualAddress,
2128 sec->PointerToRawData, sec->SizeOfRawData,
2129 sec->Misc.VirtualSize, sec->Characteristics );
2131 if (!sec->PointerToRawData || !file_size) continue;
2133 /* Note: if the section is not aligned properly map_file_into_view will magically
2134 * fall back to read(), so we don't need to check anything here.
2136 end = file_start + file_size;
2137 if (sec->PointerToRawData >= st.st_size ||
2138 end > ((st.st_size + sector_align) & ~sector_align) ||
2139 end < file_start ||
2140 map_file_into_view( view, fd, sec->VirtualAddress, file_size, file_start,
2141 VPROT_COMMITTED | VPROT_READ | VPROT_WRITECOPY,
2142 removable ) != STATUS_SUCCESS)
2144 ERR_(module)( "Could not map section %.8s, file probably truncated\n", sec->Name );
2145 return status;
2148 if (file_size & page_mask)
2150 end = ROUND_SIZE( 0, file_size );
2151 if (end > map_size) end = map_size;
2152 TRACE_(module)("clearing %p - %p\n",
2153 ptr + sec->VirtualAddress + file_size,
2154 ptr + sec->VirtualAddress + end );
2155 memset( ptr + sec->VirtualAddress + file_size, 0, end - file_size );
2159 /* set the image protections */
2161 set_vprot( view, ptr, ROUND_SIZE( 0, header_size ), VPROT_COMMITTED | VPROT_READ );
2163 sec = sections;
2164 for (i = 0; i < nt->FileHeader.NumberOfSections; i++, sec++)
2166 SIZE_T size;
2167 BYTE vprot = VPROT_COMMITTED;
2169 if (sec->Misc.VirtualSize)
2170 size = ROUND_SIZE( sec->VirtualAddress, sec->Misc.VirtualSize );
2171 else
2172 size = ROUND_SIZE( sec->VirtualAddress, sec->SizeOfRawData );
2174 if (sec->Characteristics & IMAGE_SCN_MEM_READ) vprot |= VPROT_READ;
2175 if (sec->Characteristics & IMAGE_SCN_MEM_WRITE) vprot |= VPROT_WRITECOPY;
2176 if (sec->Characteristics & IMAGE_SCN_MEM_EXECUTE) vprot |= VPROT_EXEC;
2178 /* Dumb game crack lets the AOEP point into a data section. Adjust. */
2179 if ((nt->OptionalHeader.AddressOfEntryPoint >= sec->VirtualAddress) &&
2180 (nt->OptionalHeader.AddressOfEntryPoint < sec->VirtualAddress + size))
2181 vprot |= VPROT_EXEC;
2183 if (!set_vprot( view, ptr + sec->VirtualAddress, size, vprot ) && (vprot & VPROT_EXEC))
2184 ERR( "failed to set %08x protection on section %.8s, noexec filesystem?\n",
2185 sec->Characteristics, sec->Name );
2188 #ifdef VALGRIND_LOAD_PDB_DEBUGINFO
2189 VALGRIND_LOAD_PDB_DEBUGINFO(fd, ptr, total_size, ptr - (char *)orig_base);
2190 #endif
2191 return STATUS_SUCCESS;
2195 /***********************************************************************
2196 * virtual_map_section
2198 * Map a file section into memory.
2200 static NTSTATUS virtual_map_section( HANDLE handle, PVOID *addr_ptr, unsigned short zero_bits_64,
2201 SIZE_T commit_size, const LARGE_INTEGER *offset_ptr, SIZE_T *size_ptr,
2202 ULONG alloc_type, ULONG protect, pe_image_info_t *image_info )
2204 NTSTATUS res;
2205 mem_size_t full_size;
2206 ACCESS_MASK access;
2207 SIZE_T size;
2208 void *base;
2209 int unix_handle = -1, needs_close;
2210 int shared_fd = -1, shared_needs_close = 0;
2211 unsigned int vprot, sec_flags;
2212 struct file_view *view;
2213 HANDLE shared_file;
2214 LARGE_INTEGER offset;
2215 sigset_t sigset;
2217 offset.QuadPart = offset_ptr ? offset_ptr->QuadPart : 0;
2219 switch(protect)
2221 case PAGE_NOACCESS:
2222 case PAGE_READONLY:
2223 case PAGE_WRITECOPY:
2224 access = SECTION_MAP_READ;
2225 break;
2226 case PAGE_READWRITE:
2227 access = SECTION_MAP_WRITE;
2228 break;
2229 case PAGE_EXECUTE:
2230 case PAGE_EXECUTE_READ:
2231 case PAGE_EXECUTE_WRITECOPY:
2232 access = SECTION_MAP_READ | SECTION_MAP_EXECUTE;
2233 break;
2234 case PAGE_EXECUTE_READWRITE:
2235 access = SECTION_MAP_WRITE | SECTION_MAP_EXECUTE;
2236 break;
2237 default:
2238 return STATUS_INVALID_PAGE_PROTECTION;
2241 SERVER_START_REQ( get_mapping_info )
2243 req->handle = wine_server_obj_handle( handle );
2244 req->access = access;
2245 wine_server_set_reply( req, image_info, sizeof(*image_info) );
2246 res = wine_server_call( req );
2247 sec_flags = reply->flags;
2248 full_size = reply->size;
2249 shared_file = wine_server_ptr_handle( reply->shared_file );
2251 SERVER_END_REQ;
2252 if (res) return res;
2254 if ((res = server_get_unix_fd( handle, 0, &unix_handle, &needs_close, NULL, NULL )))
2256 if (shared_file) NtClose( shared_file );
2257 return res;
2260 if (shared_file && ((res = server_get_unix_fd( shared_file, FILE_READ_DATA|FILE_WRITE_DATA,
2261 &shared_fd, &shared_needs_close, NULL, NULL ))))
2263 NtClose( shared_file );
2264 if (needs_close) close( unix_handle );
2265 return res;
2268 res = STATUS_INVALID_PARAMETER;
2269 server_enter_uninterrupted_section( &virtual_mutex, &sigset );
2271 if (sec_flags & SEC_IMAGE)
2273 base = wine_server_get_ptr( image_info->base );
2274 if ((ULONG_PTR)base != image_info->base) base = NULL;
2275 size = image_info->map_size;
2276 vprot = SEC_IMAGE | SEC_FILE | VPROT_COMMITTED | VPROT_READ | VPROT_EXEC | VPROT_WRITECOPY;
2278 if ((char *)base >= (char *)address_space_start) /* make sure the DOS area remains free */
2279 res = map_view( &view, base, size, alloc_type & MEM_TOP_DOWN, vprot, zero_bits_64 );
2281 if (res) res = map_view( &view, NULL, size, alloc_type & MEM_TOP_DOWN, vprot, zero_bits_64 );
2282 if (res) goto done;
2284 res = map_image_into_view( view, unix_handle, base, image_info->header_size,
2285 image_info->image_flags, shared_fd, needs_close );
2287 else
2289 base = *addr_ptr;
2290 if (offset.QuadPart >= full_size) goto done;
2291 if (*size_ptr)
2293 size = *size_ptr;
2294 if (size > full_size - offset.QuadPart)
2296 res = STATUS_INVALID_VIEW_SIZE;
2297 goto done;
2300 else
2302 size = full_size - offset.QuadPart;
2303 if (size != full_size - offset.QuadPart) /* truncated */
2305 WARN( "Files larger than 4Gb (%s) not supported on this platform\n",
2306 wine_dbgstr_longlong(full_size) );
2307 goto done;
2310 if (!(size = ROUND_SIZE( 0, size ))) goto done; /* wrap-around */
2312 get_vprot_flags( protect, &vprot, FALSE );
2313 vprot |= sec_flags;
2314 if (!(sec_flags & SEC_RESERVE)) vprot |= VPROT_COMMITTED;
2315 res = map_view( &view, base, size, alloc_type & MEM_TOP_DOWN, vprot, zero_bits_64 );
2316 if (res) goto done;
2318 TRACE( "handle=%p size=%lx offset=%x%08x\n", handle, size, offset.u.HighPart, offset.u.LowPart );
2319 res = map_file_into_view( view, unix_handle, 0, size, offset.QuadPart, vprot, needs_close );
2320 if (res) ERR( "mapping %p %lx %x%08x failed\n",
2321 view->base, size, offset.u.HighPart, offset.u.LowPart );
2324 if (res == STATUS_SUCCESS)
2326 SERVER_START_REQ( map_view )
2328 req->mapping = wine_server_obj_handle( handle );
2329 req->access = access;
2330 req->base = wine_server_client_ptr( view->base );
2331 req->size = size;
2332 req->start = offset.QuadPart;
2333 res = wine_server_call( req );
2335 SERVER_END_REQ;
2338 if (res >= 0)
2340 *addr_ptr = view->base;
2341 *size_ptr = size;
2342 VIRTUAL_DEBUG_DUMP_VIEW( view );
2344 else delete_view( view );
2346 done:
2347 server_leave_uninterrupted_section( &virtual_mutex, &sigset );
2348 if (needs_close) close( unix_handle );
2349 if (shared_needs_close) close( shared_fd );
2350 if (shared_file) NtClose( shared_file );
2351 return res;
2355 struct alloc_virtual_heap
2357 void *base;
2358 size_t size;
2361 /* callback for mmap_enum_reserved_areas to allocate space for the virtual heap */
2362 static int CDECL alloc_virtual_heap( void *base, SIZE_T size, void *arg )
2364 struct alloc_virtual_heap *alloc = arg;
2366 if (is_beyond_limit( base, size, address_space_limit )) address_space_limit = (char *)base + size;
2367 if (size < alloc->size) return 0;
2368 if (is_win64 && base < (void *)0x80000000) return 0;
2369 alloc->base = anon_mmap_fixed( (char *)base + size - alloc->size, alloc->size, PROT_READ|PROT_WRITE, 0 );
2370 return (alloc->base != MAP_FAILED);
2373 /***********************************************************************
2374 * virtual_init
2376 void virtual_init(void)
2378 const struct preload_info **preload_info = dlsym( RTLD_DEFAULT, "wine_main_preload_info" );
2379 const char *preload = getenv( "WINEPRELOADRESERVE" );
2380 struct alloc_virtual_heap alloc_views;
2381 size_t size;
2382 int i;
2383 pthread_mutexattr_t attr;
2385 pthread_mutexattr_init( &attr );
2386 pthread_mutexattr_settype( &attr, PTHREAD_MUTEX_RECURSIVE );
2387 pthread_mutex_init( &virtual_mutex, &attr );
2388 pthread_mutexattr_destroy( &attr );
2390 if (preload_info && *preload_info)
2391 for (i = 0; (*preload_info)[i].size; i++)
2392 mmap_add_reserved_area( (*preload_info)[i].addr, (*preload_info)[i].size );
2394 mmap_init( preload_info ? *preload_info : NULL );
2396 if ((preload = getenv("WINEPRELOADRESERVE")))
2398 unsigned long start, end;
2399 if (sscanf( preload, "%lx-%lx", &start, &end ) == 2)
2401 preload_reserve_start = (void *)start;
2402 preload_reserve_end = (void *)end;
2403 /* some apps start inside the DOS area */
2404 if (preload_reserve_start)
2405 address_space_start = min( address_space_start, preload_reserve_start );
2409 /* try to find space in a reserved area for the views and pages protection table */
2410 #ifdef _WIN64
2411 pages_vprot_size = ((size_t)address_space_limit >> page_shift >> pages_vprot_shift) + 1;
2412 alloc_views.size = 2 * view_block_size + pages_vprot_size * sizeof(*pages_vprot);
2413 #else
2414 alloc_views.size = 2 * view_block_size + (1U << (32 - page_shift));
2415 #endif
2416 if (mmap_enum_reserved_areas( alloc_virtual_heap, &alloc_views, 1 ))
2417 mmap_remove_reserved_area( alloc_views.base, alloc_views.size );
2418 else
2419 alloc_views.base = anon_mmap_alloc( alloc_views.size, PROT_READ | PROT_WRITE );
2421 assert( alloc_views.base != MAP_FAILED );
2422 view_block_start = alloc_views.base;
2423 view_block_end = view_block_start + view_block_size / sizeof(*view_block_start);
2424 free_ranges = (void *)((char *)alloc_views.base + view_block_size);
2425 pages_vprot = (void *)((char *)alloc_views.base + 2 * view_block_size);
2426 wine_rb_init( &views_tree, compare_view );
2428 free_ranges[0].base = (void *)0;
2429 free_ranges[0].end = (void *)~0;
2430 free_ranges_end = free_ranges + 1;
2432 /* make the DOS area accessible (except the low 64K) to hide bugs in broken apps like Excel 2003 */
2433 size = (char *)address_space_start - (char *)0x10000;
2434 if (size && mmap_is_in_reserved_area( (void*)0x10000, size ) == 1)
2435 anon_mmap_fixed( (void *)0x10000, size, PROT_READ | PROT_WRITE, 0 );
2439 /***********************************************************************
2440 * get_system_affinity_mask
2442 ULONG_PTR get_system_affinity_mask(void)
2444 ULONG num_cpus = NtCurrentTeb()->Peb->NumberOfProcessors;
2445 if (num_cpus >= sizeof(ULONG_PTR) * 8) return ~(ULONG_PTR)0;
2446 return ((ULONG_PTR)1 << num_cpus) - 1;
2449 /***********************************************************************
2450 * virtual_get_system_info
2452 void virtual_get_system_info( SYSTEM_BASIC_INFORMATION *info )
2454 #if defined(HAVE_STRUCT_SYSINFO_TOTALRAM) && defined(HAVE_STRUCT_SYSINFO_MEM_UNIT)
2455 struct sysinfo sinfo;
2457 if (!sysinfo(&sinfo))
2459 ULONG64 total = (ULONG64)sinfo.totalram * sinfo.mem_unit;
2460 info->MmHighestPhysicalPage = max(1, total / page_size);
2462 #elif defined(_SC_PHYS_PAGES)
2463 LONG64 phys_pages = sysconf( _SC_PHYS_PAGES );
2465 info->MmHighestPhysicalPage = max(1, phys_pages);
2466 #else
2467 info->MmHighestPhysicalPage = 0x7fffffff / page_size;
2468 #endif
2470 info->unknown = 0;
2471 info->KeMaximumIncrement = 0; /* FIXME */
2472 info->PageSize = page_size;
2473 info->MmLowestPhysicalPage = 1;
2474 info->MmNumberOfPhysicalPages = info->MmHighestPhysicalPage - info->MmLowestPhysicalPage;
2475 info->AllocationGranularity = granularity_mask + 1;
2476 info->LowestUserAddress = (void *)0x10000;
2477 info->HighestUserAddress = (char *)user_space_limit - 1;
2478 info->ActiveProcessorsAffinityMask = get_system_affinity_mask();
2479 info->NumberOfProcessors = NtCurrentTeb()->Peb->NumberOfProcessors;
2483 /***********************************************************************
2484 * virtual_create_builtin_view
2486 NTSTATUS virtual_create_builtin_view( void *module )
2488 NTSTATUS status;
2489 sigset_t sigset;
2490 IMAGE_DOS_HEADER *dos = module;
2491 IMAGE_NT_HEADERS *nt = (IMAGE_NT_HEADERS *)((char *)dos + dos->e_lfanew);
2492 SIZE_T size = nt->OptionalHeader.SizeOfImage;
2493 IMAGE_SECTION_HEADER *sec;
2494 struct file_view *view;
2495 void *base;
2496 int i;
2498 size = ROUND_SIZE( module, size );
2499 base = ROUND_ADDR( module, page_mask );
2500 server_enter_uninterrupted_section( &virtual_mutex, &sigset );
2501 status = create_view( &view, base, size, SEC_IMAGE | SEC_FILE | VPROT_SYSTEM |
2502 VPROT_COMMITTED | VPROT_READ | VPROT_WRITECOPY | VPROT_EXEC );
2503 if (!status)
2505 TRACE( "created %p-%p\n", base, (char *)base + size );
2507 /* The PE header is always read-only, no write, no execute. */
2508 set_page_vprot( base, page_size, VPROT_COMMITTED | VPROT_READ );
2510 sec = (IMAGE_SECTION_HEADER *)((char *)&nt->OptionalHeader + nt->FileHeader.SizeOfOptionalHeader);
2511 for (i = 0; i < nt->FileHeader.NumberOfSections; i++)
2513 BYTE flags = VPROT_COMMITTED;
2515 if (sec[i].Characteristics & IMAGE_SCN_MEM_EXECUTE) flags |= VPROT_EXEC;
2516 if (sec[i].Characteristics & IMAGE_SCN_MEM_READ) flags |= VPROT_READ;
2517 if (sec[i].Characteristics & IMAGE_SCN_MEM_WRITE) flags |= VPROT_WRITE;
2518 set_page_vprot( (char *)base + sec[i].VirtualAddress, sec[i].Misc.VirtualSize, flags );
2520 VIRTUAL_DEBUG_DUMP_VIEW( view );
2521 if (is_beyond_limit( base, size, working_set_limit )) working_set_limit = address_space_limit;
2523 server_leave_uninterrupted_section( &virtual_mutex, &sigset );
2524 return status;
2528 /* set some initial values in a new TEB */
2529 static void init_teb( TEB *teb, PEB *peb )
2531 struct ntdll_thread_data *thread_data = (struct ntdll_thread_data *)&teb->GdiTebBatch;
2533 #ifndef _WIN64
2534 TEB64 *teb64 = (TEB64 *)((char *)teb - teb_offset);
2536 teb64->Peb = PtrToUlong( (char *)peb + page_size );
2537 teb64->Tib.Self = PtrToUlong( teb64 );
2538 teb64->Tib.ExceptionList = PtrToUlong( teb );
2539 teb64->ActivationContextStackPointer = PtrToUlong( &teb64->ActivationContextStack );
2540 teb64->ActivationContextStack.FrameListCache.Flink =
2541 teb64->ActivationContextStack.FrameListCache.Blink =
2542 PtrToUlong( &teb64->ActivationContextStack.FrameListCache );
2543 teb64->StaticUnicodeString.Buffer = PtrToUlong( teb64->StaticUnicodeBuffer );
2544 teb64->StaticUnicodeString.MaximumLength = sizeof( teb64->StaticUnicodeBuffer );
2545 #endif
2546 teb->Peb = peb;
2547 teb->Tib.Self = &teb->Tib;
2548 teb->Tib.ExceptionList = (void *)~0ul;
2549 teb->Tib.StackBase = (void *)~0ul;
2550 teb->ActivationContextStackPointer = &teb->ActivationContextStack;
2551 InitializeListHead( &teb->ActivationContextStack.FrameListCache );
2552 teb->StaticUnicodeString.Buffer = teb->StaticUnicodeBuffer;
2553 teb->StaticUnicodeString.MaximumLength = sizeof(teb->StaticUnicodeBuffer);
2554 thread_data->request_fd = -1;
2555 thread_data->reply_fd = -1;
2556 thread_data->wait_fd[0] = -1;
2557 thread_data->wait_fd[1] = -1;
2558 list_add_head( &teb_list, &thread_data->entry );
2562 /***********************************************************************
2563 * virtual_alloc_first_teb
2565 TEB *virtual_alloc_first_teb(void)
2567 TEB *teb;
2568 PEB *peb;
2569 void *ptr;
2570 NTSTATUS status;
2571 SIZE_T data_size = page_size;
2572 SIZE_T peb_size = page_size * (is_win64 ? 1 : 2);
2573 SIZE_T block_size = signal_stack_mask + 1;
2574 SIZE_T total = 32 * block_size;
2576 /* reserve space for shared user data */
2577 status = NtAllocateVirtualMemory( NtCurrentProcess(), (void **)&user_shared_data, 0, &data_size,
2578 MEM_RESERVE | MEM_COMMIT, PAGE_READONLY );
2579 if (status)
2581 ERR( "wine: failed to map the shared user data: %08x\n", status );
2582 exit(1);
2585 NtAllocateVirtualMemory( NtCurrentProcess(), &teb_block, 0, &total,
2586 MEM_RESERVE | MEM_TOP_DOWN, PAGE_READWRITE );
2587 teb_block_pos = 30;
2588 ptr = ((char *)teb_block + 30 * block_size);
2589 teb = (TEB *)((char *)ptr + teb_offset);
2590 peb = (PEB *)((char *)teb_block + 32 * block_size - peb_size);
2591 NtAllocateVirtualMemory( NtCurrentProcess(), (void **)&ptr, 0, &block_size, MEM_COMMIT, PAGE_READWRITE );
2592 NtAllocateVirtualMemory( NtCurrentProcess(), (void **)&peb, 0, &peb_size, MEM_COMMIT, PAGE_READWRITE );
2593 init_teb( teb, peb );
2594 *(ULONG_PTR *)peb->Reserved = get_image_address();
2595 return teb;
2599 /***********************************************************************
2600 * virtual_alloc_teb
2602 NTSTATUS virtual_alloc_teb( TEB **ret_teb )
2604 sigset_t sigset;
2605 TEB *teb;
2606 void *ptr = NULL;
2607 NTSTATUS status = STATUS_SUCCESS;
2608 SIZE_T block_size = signal_stack_mask + 1;
2610 server_enter_uninterrupted_section( &virtual_mutex, &sigset );
2611 if (next_free_teb)
2613 ptr = next_free_teb;
2614 next_free_teb = *(void **)ptr;
2615 memset( ptr, 0, teb_size );
2617 else
2619 if (!teb_block_pos)
2621 SIZE_T total = 32 * block_size;
2623 if ((status = NtAllocateVirtualMemory( NtCurrentProcess(), &ptr, 0, &total,
2624 MEM_RESERVE, PAGE_READWRITE )))
2626 server_leave_uninterrupted_section( &virtual_mutex, &sigset );
2627 return status;
2629 teb_block = ptr;
2630 teb_block_pos = 32;
2632 ptr = ((char *)teb_block + --teb_block_pos * block_size);
2633 NtAllocateVirtualMemory( NtCurrentProcess(), (void **)&ptr, 0, &block_size,
2634 MEM_COMMIT, PAGE_READWRITE );
2636 *ret_teb = teb = (TEB *)((char *)ptr + teb_offset);
2637 init_teb( teb, NtCurrentTeb()->Peb );
2638 server_leave_uninterrupted_section( &virtual_mutex, &sigset );
2640 if ((status = signal_alloc_thread( teb )))
2642 server_enter_uninterrupted_section( &virtual_mutex, &sigset );
2643 *(void **)ptr = next_free_teb;
2644 next_free_teb = ptr;
2645 server_leave_uninterrupted_section( &virtual_mutex, &sigset );
2647 return status;
2651 /***********************************************************************
2652 * virtual_free_teb
2654 void virtual_free_teb( TEB *teb )
2656 struct ntdll_thread_data *thread_data = (struct ntdll_thread_data *)&teb->GdiTebBatch;
2657 void *ptr;
2658 SIZE_T size;
2659 sigset_t sigset;
2661 signal_free_thread( teb );
2662 if (teb->DeallocationStack)
2664 size = 0;
2665 NtFreeVirtualMemory( GetCurrentProcess(), &teb->DeallocationStack, &size, MEM_RELEASE );
2667 if (thread_data->start_stack)
2669 size = 0;
2670 NtFreeVirtualMemory( GetCurrentProcess(), &thread_data->start_stack, &size, MEM_RELEASE );
2673 server_enter_uninterrupted_section( &virtual_mutex, &sigset );
2674 list_remove( &thread_data->entry );
2675 ptr = (char *)teb - teb_offset;
2676 *(void **)ptr = next_free_teb;
2677 next_free_teb = ptr;
2678 server_leave_uninterrupted_section( &virtual_mutex, &sigset );
2682 /***********************************************************************
2683 * virtual_clear_tls_index
2685 NTSTATUS virtual_clear_tls_index( ULONG index )
2687 struct ntdll_thread_data *thread_data;
2688 sigset_t sigset;
2690 if (index < TLS_MINIMUM_AVAILABLE)
2692 server_enter_uninterrupted_section( &virtual_mutex, &sigset );
2693 LIST_FOR_EACH_ENTRY( thread_data, &teb_list, struct ntdll_thread_data, entry )
2695 TEB *teb = CONTAINING_RECORD( thread_data, TEB, GdiTebBatch );
2696 teb->TlsSlots[index] = 0;
2698 server_leave_uninterrupted_section( &virtual_mutex, &sigset );
2700 else
2702 index -= TLS_MINIMUM_AVAILABLE;
2703 if (index >= 8 * sizeof(NtCurrentTeb()->Peb->TlsExpansionBitmapBits))
2704 return STATUS_INVALID_PARAMETER;
2706 server_enter_uninterrupted_section( &virtual_mutex, &sigset );
2707 LIST_FOR_EACH_ENTRY( thread_data, &teb_list, struct ntdll_thread_data, entry )
2709 TEB *teb = CONTAINING_RECORD( thread_data, TEB, GdiTebBatch );
2710 if (teb->TlsExpansionSlots) teb->TlsExpansionSlots[index] = 0;
2712 server_leave_uninterrupted_section( &virtual_mutex, &sigset );
2714 return STATUS_SUCCESS;
2718 /***********************************************************************
2719 * virtual_alloc_thread_stack
2721 NTSTATUS virtual_alloc_thread_stack( INITIAL_TEB *stack, SIZE_T reserve_size, SIZE_T commit_size,
2722 SIZE_T *pthread_size )
2724 struct file_view *view;
2725 NTSTATUS status;
2726 sigset_t sigset;
2727 SIZE_T size, extra_size = 0;
2729 if (!reserve_size || !commit_size)
2731 IMAGE_NT_HEADERS *nt = get_exe_nt_header();
2732 if (!reserve_size) reserve_size = nt->OptionalHeader.SizeOfStackReserve;
2733 if (!commit_size) commit_size = nt->OptionalHeader.SizeOfStackCommit;
2736 size = max( reserve_size, commit_size );
2737 if (size < 1024 * 1024) size = 1024 * 1024; /* Xlib needs a large stack */
2738 size = (size + 0xffff) & ~0xffff; /* round to 64K boundary */
2739 if (pthread_size) *pthread_size = extra_size = max( page_size, ROUND_SIZE( 0, *pthread_size ));
2741 server_enter_uninterrupted_section( &virtual_mutex, &sigset );
2743 if ((status = map_view( &view, NULL, size + extra_size, FALSE,
2744 VPROT_READ | VPROT_WRITE | VPROT_COMMITTED, 0 )) != STATUS_SUCCESS)
2745 goto done;
2747 #ifdef VALGRIND_STACK_REGISTER
2748 VALGRIND_STACK_REGISTER( view->base, (char *)view->base + view->size );
2749 #endif
2751 /* setup no access guard page */
2752 set_page_vprot( view->base, page_size, VPROT_COMMITTED );
2753 set_page_vprot( (char *)view->base + page_size, page_size,
2754 VPROT_READ | VPROT_WRITE | VPROT_COMMITTED | VPROT_GUARD );
2755 mprotect_range( view->base, 2 * page_size, 0, 0 );
2756 VIRTUAL_DEBUG_DUMP_VIEW( view );
2758 if (extra_size)
2760 struct file_view *extra_view;
2762 /* shrink the first view and create a second one for the extra size */
2763 /* this allows the app to free the stack without freeing the thread start portion */
2764 view->size -= extra_size;
2765 status = create_view( &extra_view, (char *)view->base + view->size, extra_size,
2766 VPROT_READ | VPROT_WRITE | VPROT_COMMITTED );
2767 if (status != STATUS_SUCCESS)
2769 view->size += extra_size;
2770 delete_view( view );
2771 goto done;
2775 /* note: limit is lower than base since the stack grows down */
2776 stack->OldStackBase = 0;
2777 stack->OldStackLimit = 0;
2778 stack->DeallocationStack = view->base;
2779 stack->StackBase = (char *)view->base + view->size;
2780 stack->StackLimit = (char *)view->base + 2 * page_size;
2781 done:
2782 server_leave_uninterrupted_section( &virtual_mutex, &sigset );
2783 return status;
2787 /***********************************************************************
2788 * virtual_map_user_shared_data
2790 void virtual_map_user_shared_data(void)
2792 static const WCHAR nameW[] = {'\\','K','e','r','n','e','l','O','b','j','e','c','t','s',
2793 '\\','_','_','w','i','n','e','_','u','s','e','r','_','s','h','a','r','e','d','_','d','a','t','a',0};
2794 UNICODE_STRING name_str = { sizeof(nameW) - sizeof(WCHAR), sizeof(nameW), (WCHAR *)nameW };
2795 OBJECT_ATTRIBUTES attr = { sizeof(attr), 0, &name_str };
2796 NTSTATUS status;
2797 HANDLE section;
2798 int res, fd, needs_close;
2800 if ((status = NtOpenSection( &section, SECTION_ALL_ACCESS, &attr )))
2802 ERR( "failed to open the USD section: %08x\n", status );
2803 exit(1);
2805 if ((res = server_get_unix_fd( section, 0, &fd, &needs_close, NULL, NULL )) ||
2806 (user_shared_data != mmap( user_shared_data, page_size, PROT_READ, MAP_SHARED|MAP_FIXED, fd, 0 )))
2808 ERR( "failed to remap the process USD: %d\n", res );
2809 exit(1);
2811 if (needs_close) close( fd );
2812 NtClose( section );
2816 /***********************************************************************
2817 * grow_thread_stack
2819 static NTSTATUS grow_thread_stack( char *page )
2821 NTSTATUS ret = 0;
2822 size_t guaranteed = max( NtCurrentTeb()->GuaranteedStackBytes, page_size * (is_win64 ? 2 : 1) );
2824 set_page_vprot_bits( page, page_size, 0, VPROT_GUARD );
2825 mprotect_range( page, page_size, 0, 0 );
2826 if (page >= (char *)NtCurrentTeb()->DeallocationStack + page_size + guaranteed)
2828 set_page_vprot_bits( page - page_size, page_size, VPROT_COMMITTED | VPROT_GUARD, 0 );
2829 mprotect_range( page - page_size, page_size, 0, 0 );
2831 else /* inside guaranteed space -> overflow exception */
2833 page = (char *)NtCurrentTeb()->DeallocationStack + page_size;
2834 set_page_vprot_bits( page, guaranteed, VPROT_COMMITTED, VPROT_GUARD );
2835 mprotect_range( page, guaranteed, 0, 0 );
2836 ret = STATUS_STACK_OVERFLOW;
2838 NtCurrentTeb()->Tib.StackLimit = page;
2839 return ret;
2843 /***********************************************************************
2844 * virtual_handle_fault
2846 NTSTATUS virtual_handle_fault( void *addr, DWORD err, void *stack )
2848 NTSTATUS ret = STATUS_ACCESS_VIOLATION;
2849 char *page = ROUND_ADDR( addr, page_mask );
2850 BYTE vprot;
2852 mutex_lock( &virtual_mutex ); /* no need for signal masking inside signal handler */
2853 vprot = get_page_vprot( page );
2854 if (!is_inside_signal_stack( stack ) && (vprot & VPROT_GUARD))
2856 if (page < (char *)NtCurrentTeb()->DeallocationStack ||
2857 page >= (char *)NtCurrentTeb()->Tib.StackBase)
2859 set_page_vprot_bits( page, page_size, 0, VPROT_GUARD );
2860 mprotect_range( page, page_size, 0, 0 );
2861 ret = STATUS_GUARD_PAGE_VIOLATION;
2863 else ret = grow_thread_stack( page );
2865 else if (err & EXCEPTION_WRITE_FAULT)
2867 if (vprot & VPROT_WRITEWATCH)
2869 set_page_vprot_bits( page, page_size, 0, VPROT_WRITEWATCH );
2870 mprotect_range( page, page_size, 0, 0 );
2872 /* ignore fault if page is writable now */
2873 if (get_unix_prot( get_page_vprot( page )) & PROT_WRITE)
2875 if ((vprot & VPROT_WRITEWATCH) || is_write_watch_range( page, page_size ))
2876 ret = STATUS_SUCCESS;
2879 mutex_unlock( &virtual_mutex );
2880 return ret;
2884 /***********************************************************************
2885 * virtual_setup_exception
2887 void *virtual_setup_exception( void *stack_ptr, size_t size, EXCEPTION_RECORD *rec )
2889 char *stack = stack_ptr;
2891 if (is_inside_signal_stack( stack ))
2893 ERR( "nested exception on signal stack in thread %04x addr %p stack %p (%p-%p-%p)\n",
2894 GetCurrentThreadId(), rec->ExceptionAddress, stack, NtCurrentTeb()->DeallocationStack,
2895 NtCurrentTeb()->Tib.StackLimit, NtCurrentTeb()->Tib.StackBase );
2896 abort_thread(1);
2899 if (stack - size > stack || /* check for overflow in subtraction */
2900 stack <= (char *)NtCurrentTeb()->DeallocationStack ||
2901 stack > (char *)NtCurrentTeb()->Tib.StackBase)
2903 WARN( "exception outside of stack limits in thread %04x addr %p stack %p (%p-%p-%p)\n",
2904 GetCurrentThreadId(), rec->ExceptionAddress, stack, NtCurrentTeb()->DeallocationStack,
2905 NtCurrentTeb()->Tib.StackLimit, NtCurrentTeb()->Tib.StackBase );
2906 return stack - size;
2909 stack -= size;
2911 if (stack < (char *)NtCurrentTeb()->DeallocationStack + 4096)
2913 /* stack overflow on last page, unrecoverable */
2914 UINT diff = (char *)NtCurrentTeb()->DeallocationStack + 4096 - stack;
2915 ERR( "stack overflow %u bytes in thread %04x addr %p stack %p (%p-%p-%p)\n",
2916 diff, GetCurrentThreadId(), rec->ExceptionAddress, stack, NtCurrentTeb()->DeallocationStack,
2917 NtCurrentTeb()->Tib.StackLimit, NtCurrentTeb()->Tib.StackBase );
2918 abort_thread(1);
2920 else if (stack < (char *)NtCurrentTeb()->Tib.StackLimit)
2922 mutex_lock( &virtual_mutex ); /* no need for signal masking inside signal handler */
2923 if ((get_page_vprot( stack ) & VPROT_GUARD) && grow_thread_stack( ROUND_ADDR( stack, page_mask )))
2925 rec->ExceptionCode = STATUS_STACK_OVERFLOW;
2926 rec->NumberParameters = 0;
2928 mutex_unlock( &virtual_mutex );
2930 #if defined(VALGRIND_MAKE_MEM_UNDEFINED)
2931 VALGRIND_MAKE_MEM_UNDEFINED( stack, size );
2932 #elif defined(VALGRIND_MAKE_WRITABLE)
2933 VALGRIND_MAKE_WRITABLE( stack, size );
2934 #endif
2935 return stack;
2939 /***********************************************************************
2940 * check_write_access
2942 * Check if the memory range is writable, temporarily disabling write watches if necessary.
2944 static NTSTATUS check_write_access( void *base, size_t size, BOOL *has_write_watch )
2946 size_t i;
2947 char *addr = ROUND_ADDR( base, page_mask );
2949 size = ROUND_SIZE( base, size );
2950 for (i = 0; i < size; i += page_size)
2952 BYTE vprot = get_page_vprot( addr + i );
2953 if (vprot & VPROT_WRITEWATCH) *has_write_watch = TRUE;
2954 if (!(get_unix_prot( vprot & ~VPROT_WRITEWATCH ) & PROT_WRITE))
2955 return STATUS_INVALID_USER_BUFFER;
2957 if (*has_write_watch)
2958 mprotect_range( addr, size, 0, VPROT_WRITEWATCH ); /* temporarily enable write access */
2959 return STATUS_SUCCESS;
2963 /***********************************************************************
2964 * virtual_locked_server_call
2966 unsigned int virtual_locked_server_call( void *req_ptr )
2968 struct __server_request_info * const req = req_ptr;
2969 sigset_t sigset;
2970 void *addr = req->reply_data;
2971 data_size_t size = req->u.req.request_header.reply_size;
2972 BOOL has_write_watch = FALSE;
2973 unsigned int ret = STATUS_ACCESS_VIOLATION;
2975 if (!size) return wine_server_call( req_ptr );
2977 server_enter_uninterrupted_section( &virtual_mutex, &sigset );
2978 if (!(ret = check_write_access( addr, size, &has_write_watch )))
2980 ret = server_call_unlocked( req );
2981 if (has_write_watch) update_write_watches( addr, size, wine_server_reply_size( req ));
2983 else memset( &req->u.reply, 0, sizeof(req->u.reply) );
2984 server_leave_uninterrupted_section( &virtual_mutex, &sigset );
2985 return ret;
2989 /***********************************************************************
2990 * virtual_locked_read
2992 ssize_t virtual_locked_read( int fd, void *addr, size_t size )
2994 sigset_t sigset;
2995 BOOL has_write_watch = FALSE;
2996 int err = EFAULT;
2998 ssize_t ret = read( fd, addr, size );
2999 if (ret != -1 || errno != EFAULT) return ret;
3001 server_enter_uninterrupted_section( &virtual_mutex, &sigset );
3002 if (!check_write_access( addr, size, &has_write_watch ))
3004 ret = read( fd, addr, size );
3005 err = errno;
3006 if (has_write_watch) update_write_watches( addr, size, max( 0, ret ));
3008 server_leave_uninterrupted_section( &virtual_mutex, &sigset );
3009 errno = err;
3010 return ret;
3014 /***********************************************************************
3015 * virtual_locked_pread
3017 ssize_t virtual_locked_pread( int fd, void *addr, size_t size, off_t offset )
3019 sigset_t sigset;
3020 BOOL has_write_watch = FALSE;
3021 int err = EFAULT;
3023 ssize_t ret = pread( fd, addr, size, offset );
3024 if (ret != -1 || errno != EFAULT) return ret;
3026 server_enter_uninterrupted_section( &virtual_mutex, &sigset );
3027 if (!check_write_access( addr, size, &has_write_watch ))
3029 ret = pread( fd, addr, size, offset );
3030 err = errno;
3031 if (has_write_watch) update_write_watches( addr, size, max( 0, ret ));
3033 server_leave_uninterrupted_section( &virtual_mutex, &sigset );
3034 errno = err;
3035 return ret;
3039 /***********************************************************************
3040 * __wine_locked_recvmsg (NTDLL.@)
3042 ssize_t CDECL __wine_locked_recvmsg( int fd, struct msghdr *hdr, int flags )
3044 sigset_t sigset;
3045 size_t i;
3046 BOOL has_write_watch = FALSE;
3047 int err = EFAULT;
3049 ssize_t ret = recvmsg( fd, hdr, flags );
3050 if (ret != -1 || errno != EFAULT) return ret;
3052 server_enter_uninterrupted_section( &virtual_mutex, &sigset );
3053 for (i = 0; i < hdr->msg_iovlen; i++)
3054 if (check_write_access( hdr->msg_iov[i].iov_base, hdr->msg_iov[i].iov_len, &has_write_watch ))
3055 break;
3056 if (i == hdr->msg_iovlen)
3058 ret = recvmsg( fd, hdr, flags );
3059 err = errno;
3061 if (has_write_watch)
3062 while (i--) update_write_watches( hdr->msg_iov[i].iov_base, hdr->msg_iov[i].iov_len, 0 );
3064 server_leave_uninterrupted_section( &virtual_mutex, &sigset );
3065 errno = err;
3066 return ret;
3070 /***********************************************************************
3071 * virtual_is_valid_code_address
3073 BOOL virtual_is_valid_code_address( const void *addr, SIZE_T size )
3075 struct file_view *view;
3076 BOOL ret = FALSE;
3077 sigset_t sigset;
3079 server_enter_uninterrupted_section( &virtual_mutex, &sigset );
3080 if ((view = find_view( addr, size )))
3081 ret = !(view->protect & VPROT_SYSTEM); /* system views are not visible to the app */
3082 server_leave_uninterrupted_section( &virtual_mutex, &sigset );
3083 return ret;
3087 /***********************************************************************
3088 * virtual_check_buffer_for_read
3090 * Check if a memory buffer can be read, triggering page faults if needed for DIB section access.
3092 BOOL virtual_check_buffer_for_read( const void *ptr, SIZE_T size )
3094 if (!size) return TRUE;
3095 if (!ptr) return FALSE;
3097 __TRY
3099 volatile const char *p = ptr;
3100 char dummy __attribute__((unused));
3101 SIZE_T count = size;
3103 while (count > page_size)
3105 dummy = *p;
3106 p += page_size;
3107 count -= page_size;
3109 dummy = p[0];
3110 dummy = p[count - 1];
3112 __EXCEPT_SYSCALL
3114 return FALSE;
3116 __ENDTRY
3117 return TRUE;
3121 /***********************************************************************
3122 * virtual_check_buffer_for_write
3124 * Check if a memory buffer can be written to, triggering page faults if needed for write watches.
3126 BOOL virtual_check_buffer_for_write( void *ptr, SIZE_T size )
3128 if (!size) return TRUE;
3129 if (!ptr) return FALSE;
3131 __TRY
3133 volatile char *p = ptr;
3134 SIZE_T count = size;
3136 while (count > page_size)
3138 *p |= 0;
3139 p += page_size;
3140 count -= page_size;
3142 p[0] |= 0;
3143 p[count - 1] |= 0;
3145 __EXCEPT_SYSCALL
3147 return FALSE;
3149 __ENDTRY
3150 return TRUE;
3154 /***********************************************************************
3155 * virtual_uninterrupted_read_memory
3157 * Similar to NtReadVirtualMemory, but without wineserver calls. Moreover
3158 * permissions are checked before accessing each page, to ensure that no
3159 * exceptions can happen.
3161 SIZE_T virtual_uninterrupted_read_memory( const void *addr, void *buffer, SIZE_T size )
3163 struct file_view *view;
3164 sigset_t sigset;
3165 SIZE_T bytes_read = 0;
3167 if (!size) return 0;
3169 server_enter_uninterrupted_section( &virtual_mutex, &sigset );
3170 if ((view = find_view( addr, size )))
3172 if (!(view->protect & VPROT_SYSTEM))
3174 while (bytes_read < size && (get_unix_prot( get_page_vprot( addr )) & PROT_READ))
3176 SIZE_T block_size = min( size - bytes_read, page_size - ((UINT_PTR)addr & page_mask) );
3177 memcpy( buffer, addr, block_size );
3179 addr = (const void *)((const char *)addr + block_size);
3180 buffer = (void *)((char *)buffer + block_size);
3181 bytes_read += block_size;
3185 server_leave_uninterrupted_section( &virtual_mutex, &sigset );
3186 return bytes_read;
3190 /***********************************************************************
3191 * virtual_uninterrupted_write_memory
3193 * Similar to NtWriteVirtualMemory, but without wineserver calls. Moreover
3194 * permissions are checked before accessing each page, to ensure that no
3195 * exceptions can happen.
3197 NTSTATUS virtual_uninterrupted_write_memory( void *addr, const void *buffer, SIZE_T size )
3199 BOOL has_write_watch = FALSE;
3200 sigset_t sigset;
3201 NTSTATUS ret;
3203 if (!size) return STATUS_SUCCESS;
3205 server_enter_uninterrupted_section( &virtual_mutex, &sigset );
3206 if (!(ret = check_write_access( addr, size, &has_write_watch )))
3208 memcpy( addr, buffer, size );
3209 if (has_write_watch) update_write_watches( addr, size, size );
3211 server_leave_uninterrupted_section( &virtual_mutex, &sigset );
3212 return ret;
3216 /***********************************************************************
3217 * virtual_set_force_exec
3219 * Whether to force exec prot on all views.
3221 void virtual_set_force_exec( BOOL enable )
3223 struct file_view *view;
3224 sigset_t sigset;
3226 server_enter_uninterrupted_section( &virtual_mutex, &sigset );
3227 if (!force_exec_prot != !enable) /* change all existing views */
3229 force_exec_prot = enable;
3231 WINE_RB_FOR_EACH_ENTRY( view, &views_tree, struct file_view, entry )
3233 /* file mappings are always accessible */
3234 BYTE commit = is_view_valloc( view ) ? 0 : VPROT_COMMITTED;
3236 mprotect_range( view->base, view->size, commit, 0 );
3239 server_leave_uninterrupted_section( &virtual_mutex, &sigset );
3242 struct free_range
3244 char *base;
3245 char *limit;
3248 /* free reserved areas above the limit; callback for mmap_enum_reserved_areas */
3249 static int CDECL free_reserved_memory( void *base, SIZE_T size, void *arg )
3251 struct free_range *range = arg;
3253 if ((char *)base >= range->limit) return 0;
3254 if ((char *)base + size <= range->base) return 0;
3255 if ((char *)base < range->base)
3257 size -= range->base - (char *)base;
3258 base = range->base;
3260 if ((char *)base + size > range->limit) size = range->limit - (char *)base;
3261 remove_reserved_area( base, size );
3262 return 1; /* stop enumeration since the list has changed */
3265 /***********************************************************************
3266 * virtual_release_address_space
3268 * Release some address space once we have loaded and initialized the app.
3270 void CDECL virtual_release_address_space(void)
3272 struct free_range range;
3273 sigset_t sigset;
3275 if (is_win64) return;
3277 server_enter_uninterrupted_section( &virtual_mutex, &sigset );
3279 range.base = (char *)0x82000000;
3280 range.limit = user_space_limit;
3282 if (range.limit > range.base)
3284 while (mmap_enum_reserved_areas( free_reserved_memory, &range, 1 )) /* nothing */;
3285 #ifdef __APPLE__
3286 /* On macOS, we still want to free some of low memory, for OpenGL resources */
3287 range.base = (char *)0x40000000;
3288 #else
3289 range.base = NULL;
3290 #endif
3292 else
3293 range.base = (char *)0x20000000;
3295 if (range.base)
3297 range.limit = (char *)0x7f000000;
3298 while (mmap_enum_reserved_areas( free_reserved_memory, &range, 0 )) /* nothing */;
3301 server_leave_uninterrupted_section( &virtual_mutex, &sigset );
3305 /***********************************************************************
3306 * virtual_set_large_address_space
3308 * Enable use of a large address space when allowed by the application.
3310 void virtual_set_large_address_space(void)
3312 /* no large address space on win9x */
3313 if (NtCurrentTeb()->Peb->OSPlatformId != VER_PLATFORM_WIN32_NT) return;
3315 user_space_limit = working_set_limit = address_space_limit;
3319 /***********************************************************************
3320 * NtAllocateVirtualMemory (NTDLL.@)
3321 * ZwAllocateVirtualMemory (NTDLL.@)
3323 NTSTATUS WINAPI NtAllocateVirtualMemory( HANDLE process, PVOID *ret, ULONG_PTR zero_bits,
3324 SIZE_T *size_ptr, ULONG type, ULONG protect )
3326 void *base;
3327 unsigned int vprot;
3328 BOOL is_dos_memory = FALSE;
3329 struct file_view *view;
3330 sigset_t sigset;
3331 SIZE_T size = *size_ptr;
3332 NTSTATUS status = STATUS_SUCCESS;
3333 unsigned short zero_bits_64 = zero_bits_win_to_64( zero_bits );
3335 TRACE("%p %p %08lx %x %08x\n", process, *ret, size, type, protect );
3337 if (!size) return STATUS_INVALID_PARAMETER;
3338 if (zero_bits > 21 && zero_bits < 32) return STATUS_INVALID_PARAMETER_3;
3339 if (!is_win64 && !is_wow64 && zero_bits >= 32) return STATUS_INVALID_PARAMETER_3;
3341 if (process != NtCurrentProcess())
3343 apc_call_t call;
3344 apc_result_t result;
3346 memset( &call, 0, sizeof(call) );
3348 call.virtual_alloc.type = APC_VIRTUAL_ALLOC;
3349 call.virtual_alloc.addr = wine_server_client_ptr( *ret );
3350 call.virtual_alloc.size = *size_ptr;
3351 call.virtual_alloc.zero_bits = zero_bits;
3352 call.virtual_alloc.op_type = type;
3353 call.virtual_alloc.prot = protect;
3354 status = server_queue_process_apc( process, &call, &result );
3355 if (status != STATUS_SUCCESS) return status;
3357 if (result.virtual_alloc.status == STATUS_SUCCESS)
3359 *ret = wine_server_get_ptr( result.virtual_alloc.addr );
3360 *size_ptr = result.virtual_alloc.size;
3362 return result.virtual_alloc.status;
3365 /* Round parameters to a page boundary */
3367 if (is_beyond_limit( 0, size, working_set_limit )) return STATUS_WORKING_SET_LIMIT_RANGE;
3369 if (*ret)
3371 if (type & MEM_RESERVE) /* Round down to 64k boundary */
3372 base = ROUND_ADDR( *ret, granularity_mask );
3373 else
3374 base = ROUND_ADDR( *ret, page_mask );
3375 size = (((UINT_PTR)*ret + size + page_mask) & ~page_mask) - (UINT_PTR)base;
3377 /* disallow low 64k, wrap-around and kernel space */
3378 if (((char *)base < (char *)0x10000) ||
3379 ((char *)base + size < (char *)base) ||
3380 is_beyond_limit( base, size, address_space_limit ))
3382 /* address 1 is magic to mean DOS area */
3383 if (!base && *ret == (void *)1 && size == 0x110000) is_dos_memory = TRUE;
3384 else return STATUS_INVALID_PARAMETER;
3387 else
3389 base = NULL;
3390 size = (size + page_mask) & ~page_mask;
3393 /* Compute the alloc type flags */
3395 if (!(type & (MEM_COMMIT | MEM_RESERVE | MEM_RESET)) ||
3396 (type & ~(MEM_COMMIT | MEM_RESERVE | MEM_TOP_DOWN | MEM_WRITE_WATCH | MEM_RESET)))
3398 WARN("called with wrong alloc type flags (%08x) !\n", type);
3399 return STATUS_INVALID_PARAMETER;
3402 /* Reserve the memory */
3404 server_enter_uninterrupted_section( &virtual_mutex, &sigset );
3406 if ((type & MEM_RESERVE) || !base)
3408 if (!(status = get_vprot_flags( protect, &vprot, FALSE )))
3410 if (type & MEM_COMMIT) vprot |= VPROT_COMMITTED;
3411 if (type & MEM_WRITE_WATCH) vprot |= VPROT_WRITEWATCH;
3412 if (protect & PAGE_NOCACHE) vprot |= SEC_NOCACHE;
3414 if (vprot & VPROT_WRITECOPY) status = STATUS_INVALID_PAGE_PROTECTION;
3415 else if (is_dos_memory) status = allocate_dos_memory( &view, vprot );
3416 else status = map_view( &view, base, size, type & MEM_TOP_DOWN, vprot, zero_bits_64 );
3418 if (status == STATUS_SUCCESS) base = view->base;
3421 else if (type & MEM_RESET)
3423 if (!(view = find_view( base, size ))) status = STATUS_NOT_MAPPED_VIEW;
3424 else madvise( base, size, MADV_DONTNEED );
3426 else /* commit the pages */
3428 if (!(view = find_view( base, size ))) status = STATUS_NOT_MAPPED_VIEW;
3429 else if (view->protect & SEC_FILE) status = STATUS_ALREADY_COMMITTED;
3430 else if (!(status = set_protection( view, base, size, protect )) && (view->protect & SEC_RESERVE))
3432 SERVER_START_REQ( add_mapping_committed_range )
3434 req->base = wine_server_client_ptr( view->base );
3435 req->offset = (char *)base - (char *)view->base;
3436 req->size = size;
3437 wine_server_call( req );
3439 SERVER_END_REQ;
3443 if (!status) VIRTUAL_DEBUG_DUMP_VIEW( view );
3445 server_leave_uninterrupted_section( &virtual_mutex, &sigset );
3447 if (status == STATUS_SUCCESS)
3449 *ret = base;
3450 *size_ptr = size;
3452 return status;
3456 /***********************************************************************
3457 * NtFreeVirtualMemory (NTDLL.@)
3458 * ZwFreeVirtualMemory (NTDLL.@)
3460 NTSTATUS WINAPI NtFreeVirtualMemory( HANDLE process, PVOID *addr_ptr, SIZE_T *size_ptr, ULONG type )
3462 struct file_view *view;
3463 char *base;
3464 sigset_t sigset;
3465 NTSTATUS status = STATUS_SUCCESS;
3466 LPVOID addr = *addr_ptr;
3467 SIZE_T size = *size_ptr;
3469 TRACE("%p %p %08lx %x\n", process, addr, size, type );
3471 if (process != NtCurrentProcess())
3473 apc_call_t call;
3474 apc_result_t result;
3476 memset( &call, 0, sizeof(call) );
3478 call.virtual_free.type = APC_VIRTUAL_FREE;
3479 call.virtual_free.addr = wine_server_client_ptr( addr );
3480 call.virtual_free.size = size;
3481 call.virtual_free.op_type = type;
3482 status = server_queue_process_apc( process, &call, &result );
3483 if (status != STATUS_SUCCESS) return status;
3485 if (result.virtual_free.status == STATUS_SUCCESS)
3487 *addr_ptr = wine_server_get_ptr( result.virtual_free.addr );
3488 *size_ptr = result.virtual_free.size;
3490 return result.virtual_free.status;
3493 /* Fix the parameters */
3495 size = ROUND_SIZE( addr, size );
3496 base = ROUND_ADDR( addr, page_mask );
3498 /* avoid freeing the DOS area when a broken app passes a NULL pointer */
3499 if (!base) return STATUS_INVALID_PARAMETER;
3501 server_enter_uninterrupted_section( &virtual_mutex, &sigset );
3503 if (!(view = find_view( base, size )) || !is_view_valloc( view ))
3505 status = STATUS_INVALID_PARAMETER;
3507 else if (type == MEM_RELEASE)
3509 /* Free the pages */
3511 if (size || (base != view->base)) status = STATUS_INVALID_PARAMETER;
3512 else
3514 delete_view( view );
3515 *addr_ptr = base;
3516 *size_ptr = size;
3519 else if (type == MEM_DECOMMIT)
3521 status = decommit_pages( view, base - (char *)view->base, size );
3522 if (status == STATUS_SUCCESS)
3524 *addr_ptr = base;
3525 *size_ptr = size;
3528 else
3530 WARN("called with wrong free type flags (%08x) !\n", type);
3531 status = STATUS_INVALID_PARAMETER;
3534 server_leave_uninterrupted_section( &virtual_mutex, &sigset );
3535 return status;
3539 /***********************************************************************
3540 * NtProtectVirtualMemory (NTDLL.@)
3541 * ZwProtectVirtualMemory (NTDLL.@)
3543 NTSTATUS WINAPI NtProtectVirtualMemory( HANDLE process, PVOID *addr_ptr, SIZE_T *size_ptr,
3544 ULONG new_prot, ULONG *old_prot )
3546 struct file_view *view;
3547 sigset_t sigset;
3548 NTSTATUS status = STATUS_SUCCESS;
3549 char *base;
3550 BYTE vprot;
3551 SIZE_T size = *size_ptr;
3552 LPVOID addr = *addr_ptr;
3553 DWORD old;
3555 TRACE("%p %p %08lx %08x\n", process, addr, size, new_prot );
3557 if (!old_prot)
3558 return STATUS_ACCESS_VIOLATION;
3560 if (process != NtCurrentProcess())
3562 apc_call_t call;
3563 apc_result_t result;
3565 memset( &call, 0, sizeof(call) );
3567 call.virtual_protect.type = APC_VIRTUAL_PROTECT;
3568 call.virtual_protect.addr = wine_server_client_ptr( addr );
3569 call.virtual_protect.size = size;
3570 call.virtual_protect.prot = new_prot;
3571 status = server_queue_process_apc( process, &call, &result );
3572 if (status != STATUS_SUCCESS) return status;
3574 if (result.virtual_protect.status == STATUS_SUCCESS)
3576 *addr_ptr = wine_server_get_ptr( result.virtual_protect.addr );
3577 *size_ptr = result.virtual_protect.size;
3578 *old_prot = result.virtual_protect.prot;
3580 return result.virtual_protect.status;
3583 /* Fix the parameters */
3585 size = ROUND_SIZE( addr, size );
3586 base = ROUND_ADDR( addr, page_mask );
3588 server_enter_uninterrupted_section( &virtual_mutex, &sigset );
3590 if ((view = find_view( base, size )))
3592 /* Make sure all the pages are committed */
3593 if (get_committed_size( view, base, &vprot ) >= size && (vprot & VPROT_COMMITTED))
3595 old = get_win32_prot( vprot, view->protect );
3596 status = set_protection( view, base, size, new_prot );
3598 else status = STATUS_NOT_COMMITTED;
3600 else status = STATUS_INVALID_PARAMETER;
3602 if (!status) VIRTUAL_DEBUG_DUMP_VIEW( view );
3604 server_leave_uninterrupted_section( &virtual_mutex, &sigset );
3606 if (status == STATUS_SUCCESS)
3608 *addr_ptr = base;
3609 *size_ptr = size;
3610 *old_prot = old;
3612 return status;
3616 /* retrieve state for a free memory area; callback for mmap_enum_reserved_areas */
3617 static int CDECL get_free_mem_state_callback( void *start, SIZE_T size, void *arg )
3619 MEMORY_BASIC_INFORMATION *info = arg;
3620 void *end = (char *)start + size;
3622 if ((char *)info->BaseAddress + info->RegionSize <= (char *)start) return 0;
3624 if (info->BaseAddress >= end)
3626 if (info->AllocationBase < end) info->AllocationBase = end;
3627 return 0;
3630 if (info->BaseAddress >= start || start <= address_space_start)
3632 /* it's a real free area */
3633 info->State = MEM_FREE;
3634 info->Protect = PAGE_NOACCESS;
3635 info->AllocationBase = 0;
3636 info->AllocationProtect = 0;
3637 info->Type = 0;
3638 if ((char *)info->BaseAddress + info->RegionSize > (char *)end)
3639 info->RegionSize = (char *)end - (char *)info->BaseAddress;
3641 else /* outside of the reserved area, pretend it's allocated */
3643 info->RegionSize = (char *)start - (char *)info->BaseAddress;
3644 info->State = MEM_RESERVE;
3645 info->Protect = PAGE_NOACCESS;
3646 info->AllocationProtect = PAGE_NOACCESS;
3647 info->Type = MEM_PRIVATE;
3649 return 1;
3652 /* get basic information about a memory block */
3653 static NTSTATUS get_basic_memory_info( HANDLE process, LPCVOID addr,
3654 MEMORY_BASIC_INFORMATION *info,
3655 SIZE_T len, SIZE_T *res_len )
3657 struct file_view *view;
3658 char *base, *alloc_base = 0, *alloc_end = working_set_limit;
3659 struct wine_rb_entry *ptr;
3660 sigset_t sigset;
3662 if (len < sizeof(MEMORY_BASIC_INFORMATION))
3663 return STATUS_INFO_LENGTH_MISMATCH;
3665 if (process != NtCurrentProcess())
3667 NTSTATUS status;
3668 apc_call_t call;
3669 apc_result_t result;
3671 memset( &call, 0, sizeof(call) );
3673 call.virtual_query.type = APC_VIRTUAL_QUERY;
3674 call.virtual_query.addr = wine_server_client_ptr( addr );
3675 status = server_queue_process_apc( process, &call, &result );
3676 if (status != STATUS_SUCCESS) return status;
3678 if (result.virtual_query.status == STATUS_SUCCESS)
3680 info->BaseAddress = wine_server_get_ptr( result.virtual_query.base );
3681 info->AllocationBase = wine_server_get_ptr( result.virtual_query.alloc_base );
3682 info->RegionSize = result.virtual_query.size;
3683 info->Protect = result.virtual_query.prot;
3684 info->AllocationProtect = result.virtual_query.alloc_prot;
3685 info->State = (DWORD)result.virtual_query.state << 12;
3686 info->Type = (DWORD)result.virtual_query.alloc_type << 16;
3687 if (info->RegionSize != result.virtual_query.size) /* truncated */
3688 return STATUS_INVALID_PARAMETER; /* FIXME */
3689 if (res_len) *res_len = sizeof(*info);
3691 return result.virtual_query.status;
3694 base = ROUND_ADDR( addr, page_mask );
3696 if (is_beyond_limit( base, 1, working_set_limit )) return STATUS_INVALID_PARAMETER;
3698 /* Find the view containing the address */
3700 server_enter_uninterrupted_section( &virtual_mutex, &sigset );
3701 ptr = views_tree.root;
3702 while (ptr)
3704 view = WINE_RB_ENTRY_VALUE( ptr, struct file_view, entry );
3705 if ((char *)view->base > base)
3707 alloc_end = view->base;
3708 ptr = ptr->left;
3710 else if ((char *)view->base + view->size <= base)
3712 alloc_base = (char *)view->base + view->size;
3713 ptr = ptr->right;
3715 else
3717 alloc_base = view->base;
3718 alloc_end = (char *)view->base + view->size;
3719 break;
3723 /* Fill the info structure */
3725 info->AllocationBase = alloc_base;
3726 info->BaseAddress = base;
3727 info->RegionSize = alloc_end - base;
3729 if (!ptr)
3731 if (!mmap_enum_reserved_areas( get_free_mem_state_callback, info, 0 ))
3733 /* not in a reserved area at all, pretend it's allocated */
3734 #ifdef __i386__
3735 if (base >= (char *)address_space_start)
3737 info->State = MEM_RESERVE;
3738 info->Protect = PAGE_NOACCESS;
3739 info->AllocationProtect = PAGE_NOACCESS;
3740 info->Type = MEM_PRIVATE;
3742 else
3743 #endif
3745 info->State = MEM_FREE;
3746 info->Protect = PAGE_NOACCESS;
3747 info->AllocationBase = 0;
3748 info->AllocationProtect = 0;
3749 info->Type = 0;
3753 else
3755 BYTE vprot;
3756 char *ptr;
3757 SIZE_T range_size = get_committed_size( view, base, &vprot );
3759 info->State = (vprot & VPROT_COMMITTED) ? MEM_COMMIT : MEM_RESERVE;
3760 info->Protect = (vprot & VPROT_COMMITTED) ? get_win32_prot( vprot, view->protect ) : 0;
3761 info->AllocationProtect = get_win32_prot( view->protect, view->protect );
3762 if (view->protect & SEC_IMAGE) info->Type = MEM_IMAGE;
3763 else if (view->protect & (SEC_FILE | SEC_RESERVE | SEC_COMMIT)) info->Type = MEM_MAPPED;
3764 else info->Type = MEM_PRIVATE;
3765 for (ptr = base; ptr < base + range_size; ptr += page_size)
3766 if ((get_page_vprot( ptr ) ^ vprot) & ~VPROT_WRITEWATCH) break;
3767 info->RegionSize = ptr - base;
3769 server_leave_uninterrupted_section( &virtual_mutex, &sigset );
3771 if (res_len) *res_len = sizeof(*info);
3772 return STATUS_SUCCESS;
3775 static NTSTATUS get_working_set_ex( HANDLE process, LPCVOID addr,
3776 MEMORY_WORKING_SET_EX_INFORMATION *info,
3777 SIZE_T len, SIZE_T *res_len )
3779 FILE *f;
3780 MEMORY_WORKING_SET_EX_INFORMATION *p;
3781 sigset_t sigset;
3783 if (process != NtCurrentProcess())
3785 FIXME( "(process=%p,addr=%p) Unimplemented information class: MemoryWorkingSetExInformation\n", process, addr );
3786 return STATUS_INVALID_INFO_CLASS;
3789 f = fopen( "/proc/self/pagemap", "rb" );
3790 if (!f)
3792 static int once;
3793 if (!once++) WARN( "unable to open /proc/self/pagemap\n" );
3796 server_enter_uninterrupted_section( &virtual_mutex, &sigset );
3797 for (p = info; (UINT_PTR)(p + 1) <= (UINT_PTR)info + len; p++)
3799 BYTE vprot;
3800 UINT64 pagemap;
3801 struct file_view *view;
3803 memset( &p->VirtualAttributes, 0, sizeof(p->VirtualAttributes) );
3805 /* If we don't have pagemap information, default to invalid. */
3806 if (!f || fseek( f, ((UINT_PTR)p->VirtualAddress >> 12) * sizeof(pagemap), SEEK_SET ) == -1 ||
3807 fread( &pagemap, sizeof(pagemap), 1, f ) != 1)
3809 pagemap = 0;
3812 if ((view = find_view( p->VirtualAddress, 0 )) &&
3813 get_committed_size( view, p->VirtualAddress, &vprot ) &&
3814 (vprot & VPROT_COMMITTED))
3816 p->VirtualAttributes.Valid = !(vprot & VPROT_GUARD) && (vprot & 0x0f) && (pagemap >> 63);
3817 p->VirtualAttributes.Shared = !is_view_valloc( view ) && ((pagemap >> 61) & 1);
3818 if (p->VirtualAttributes.Shared && p->VirtualAttributes.Valid)
3819 p->VirtualAttributes.ShareCount = 1; /* FIXME */
3820 if (p->VirtualAttributes.Valid)
3821 p->VirtualAttributes.Win32Protection = get_win32_prot( vprot, view->protect );
3824 server_leave_uninterrupted_section( &virtual_mutex, &sigset );
3826 if (f)
3827 fclose( f );
3828 if (res_len)
3829 *res_len = (UINT_PTR)p - (UINT_PTR)info;
3830 return STATUS_SUCCESS;
3833 #define UNIMPLEMENTED_INFO_CLASS(c) \
3834 case c: \
3835 FIXME("(process=%p,addr=%p) Unimplemented information class: " #c "\n", process, addr); \
3836 return STATUS_INVALID_INFO_CLASS
3838 /***********************************************************************
3839 * NtQueryVirtualMemory (NTDLL.@)
3840 * ZwQueryVirtualMemory (NTDLL.@)
3842 NTSTATUS WINAPI NtQueryVirtualMemory( HANDLE process, LPCVOID addr,
3843 MEMORY_INFORMATION_CLASS info_class,
3844 PVOID buffer, SIZE_T len, SIZE_T *res_len )
3846 TRACE("(%p, %p, info_class=%d, %p, %ld, %p)\n",
3847 process, addr, info_class, buffer, len, res_len);
3849 switch(info_class)
3851 case MemoryBasicInformation:
3852 return get_basic_memory_info( process, addr, buffer, len, res_len );
3854 case MemoryWorkingSetExInformation:
3855 return get_working_set_ex( process, addr, buffer, len, res_len );
3857 UNIMPLEMENTED_INFO_CLASS(MemoryWorkingSetList);
3858 UNIMPLEMENTED_INFO_CLASS(MemorySectionName);
3859 UNIMPLEMENTED_INFO_CLASS(MemoryBasicVlmInformation);
3861 default:
3862 FIXME("(%p,%p,info_class=%d,%p,%ld,%p) Unknown information class\n",
3863 process, addr, info_class, buffer, len, res_len);
3864 return STATUS_INVALID_INFO_CLASS;
3869 /***********************************************************************
3870 * NtLockVirtualMemory (NTDLL.@)
3871 * ZwLockVirtualMemory (NTDLL.@)
3873 NTSTATUS WINAPI NtLockVirtualMemory( HANDLE process, PVOID *addr, SIZE_T *size, ULONG unknown )
3875 NTSTATUS status = STATUS_SUCCESS;
3877 if (process != NtCurrentProcess())
3879 apc_call_t call;
3880 apc_result_t result;
3882 memset( &call, 0, sizeof(call) );
3884 call.virtual_lock.type = APC_VIRTUAL_LOCK;
3885 call.virtual_lock.addr = wine_server_client_ptr( *addr );
3886 call.virtual_lock.size = *size;
3887 status = server_queue_process_apc( process, &call, &result );
3888 if (status != STATUS_SUCCESS) return status;
3890 if (result.virtual_lock.status == STATUS_SUCCESS)
3892 *addr = wine_server_get_ptr( result.virtual_lock.addr );
3893 *size = result.virtual_lock.size;
3895 return result.virtual_lock.status;
3898 *size = ROUND_SIZE( *addr, *size );
3899 *addr = ROUND_ADDR( *addr, page_mask );
3901 if (mlock( *addr, *size )) status = STATUS_ACCESS_DENIED;
3902 return status;
3906 /***********************************************************************
3907 * NtUnlockVirtualMemory (NTDLL.@)
3908 * ZwUnlockVirtualMemory (NTDLL.@)
3910 NTSTATUS WINAPI NtUnlockVirtualMemory( HANDLE process, PVOID *addr, SIZE_T *size, ULONG unknown )
3912 NTSTATUS status = STATUS_SUCCESS;
3914 if (process != NtCurrentProcess())
3916 apc_call_t call;
3917 apc_result_t result;
3919 memset( &call, 0, sizeof(call) );
3921 call.virtual_unlock.type = APC_VIRTUAL_UNLOCK;
3922 call.virtual_unlock.addr = wine_server_client_ptr( *addr );
3923 call.virtual_unlock.size = *size;
3924 status = server_queue_process_apc( process, &call, &result );
3925 if (status != STATUS_SUCCESS) return status;
3927 if (result.virtual_unlock.status == STATUS_SUCCESS)
3929 *addr = wine_server_get_ptr( result.virtual_unlock.addr );
3930 *size = result.virtual_unlock.size;
3932 return result.virtual_unlock.status;
3935 *size = ROUND_SIZE( *addr, *size );
3936 *addr = ROUND_ADDR( *addr, page_mask );
3938 if (munlock( *addr, *size )) status = STATUS_ACCESS_DENIED;
3939 return status;
3943 /***********************************************************************
3944 * NtMapViewOfSection (NTDLL.@)
3945 * ZwMapViewOfSection (NTDLL.@)
3947 NTSTATUS WINAPI NtMapViewOfSection( HANDLE handle, HANDLE process, PVOID *addr_ptr, ULONG_PTR zero_bits,
3948 SIZE_T commit_size, const LARGE_INTEGER *offset_ptr, SIZE_T *size_ptr,
3949 SECTION_INHERIT inherit, ULONG alloc_type, ULONG protect )
3951 NTSTATUS res;
3952 SIZE_T mask = granularity_mask;
3953 pe_image_info_t image_info;
3954 LARGE_INTEGER offset;
3955 unsigned short zero_bits_64 = zero_bits_win_to_64( zero_bits );
3957 offset.QuadPart = offset_ptr ? offset_ptr->QuadPart : 0;
3959 TRACE("handle=%p process=%p addr=%p off=%x%08x size=%lx access=%x\n",
3960 handle, process, *addr_ptr, offset.u.HighPart, offset.u.LowPart, *size_ptr, protect );
3962 /* Check parameters */
3963 if (zero_bits > 21 && zero_bits < 32)
3964 return STATUS_INVALID_PARAMETER_4;
3965 if (!is_win64 && !is_wow64 && zero_bits >= 32)
3966 return STATUS_INVALID_PARAMETER_4;
3968 /* If both addr_ptr and zero_bits are passed, they have match */
3969 if (*addr_ptr && zero_bits && zero_bits < 32 &&
3970 (((UINT_PTR)*addr_ptr) >> (32 - zero_bits)))
3971 return STATUS_INVALID_PARAMETER_4;
3972 if (*addr_ptr && zero_bits >= 32 &&
3973 (((UINT_PTR)*addr_ptr) & ~zero_bits))
3974 return STATUS_INVALID_PARAMETER_4;
3976 #ifndef _WIN64
3977 if (!is_wow64 && (alloc_type & AT_ROUND_TO_PAGE))
3979 *addr_ptr = ROUND_ADDR( *addr_ptr, page_mask );
3980 mask = page_mask;
3982 #endif
3984 if ((offset.u.LowPart & mask) || (*addr_ptr && ((UINT_PTR)*addr_ptr & mask)))
3985 return STATUS_MAPPED_ALIGNMENT;
3987 if (process != NtCurrentProcess())
3989 apc_call_t call;
3990 apc_result_t result;
3992 memset( &call, 0, sizeof(call) );
3994 call.map_view.type = APC_MAP_VIEW;
3995 call.map_view.handle = wine_server_obj_handle( handle );
3996 call.map_view.addr = wine_server_client_ptr( *addr_ptr );
3997 call.map_view.size = *size_ptr;
3998 call.map_view.offset = offset.QuadPart;
3999 call.map_view.zero_bits = zero_bits;
4000 call.map_view.alloc_type = alloc_type;
4001 call.map_view.prot = protect;
4002 res = server_queue_process_apc( process, &call, &result );
4003 if (res != STATUS_SUCCESS) return res;
4005 if ((NTSTATUS)result.map_view.status >= 0)
4007 *addr_ptr = wine_server_get_ptr( result.map_view.addr );
4008 *size_ptr = result.map_view.size;
4010 return result.map_view.status;
4013 return virtual_map_section( handle, addr_ptr, zero_bits_64, commit_size,
4014 offset_ptr, size_ptr, alloc_type, protect,
4015 &image_info );
4019 /***********************************************************************
4020 * NtUnmapViewOfSection (NTDLL.@)
4021 * ZwUnmapViewOfSection (NTDLL.@)
4023 NTSTATUS WINAPI NtUnmapViewOfSection( HANDLE process, PVOID addr )
4025 struct file_view *view;
4026 NTSTATUS status = STATUS_NOT_MAPPED_VIEW;
4027 sigset_t sigset;
4029 if (process != NtCurrentProcess())
4031 apc_call_t call;
4032 apc_result_t result;
4034 memset( &call, 0, sizeof(call) );
4036 call.unmap_view.type = APC_UNMAP_VIEW;
4037 call.unmap_view.addr = wine_server_client_ptr( addr );
4038 status = server_queue_process_apc( process, &call, &result );
4039 if (status == STATUS_SUCCESS) status = result.unmap_view.status;
4040 return status;
4043 server_enter_uninterrupted_section( &virtual_mutex, &sigset );
4044 if ((view = find_view( addr, 0 )) && !is_view_valloc( view ))
4046 if (!(view->protect & VPROT_SYSTEM))
4048 SERVER_START_REQ( unmap_view )
4050 req->base = wine_server_client_ptr( view->base );
4051 status = wine_server_call( req );
4053 SERVER_END_REQ;
4054 if (!status) delete_view( view );
4055 else FIXME( "failed to unmap %p %x\n", view->base, status );
4057 else
4059 delete_view( view );
4060 status = STATUS_SUCCESS;
4063 server_leave_uninterrupted_section( &virtual_mutex, &sigset );
4064 return status;
4068 /******************************************************************************
4069 * virtual_fill_image_information
4071 * Helper for NtQuerySection.
4073 void virtual_fill_image_information( const pe_image_info_t *pe_info, SECTION_IMAGE_INFORMATION *info )
4075 info->TransferAddress = wine_server_get_ptr( pe_info->entry_point );
4076 info->ZeroBits = pe_info->zerobits;
4077 info->MaximumStackSize = pe_info->stack_size;
4078 info->CommittedStackSize = pe_info->stack_commit;
4079 info->SubSystemType = pe_info->subsystem;
4080 info->SubsystemVersionLow = pe_info->subsystem_low;
4081 info->SubsystemVersionHigh = pe_info->subsystem_high;
4082 info->GpValue = pe_info->gp;
4083 info->ImageCharacteristics = pe_info->image_charact;
4084 info->DllCharacteristics = pe_info->dll_charact;
4085 info->Machine = pe_info->machine;
4086 info->ImageContainsCode = pe_info->contains_code;
4087 info->ImageFlags = pe_info->image_flags;
4088 info->LoaderFlags = pe_info->loader_flags;
4089 info->ImageFileSize = pe_info->file_size;
4090 info->CheckSum = pe_info->checksum;
4091 #ifndef _WIN64 /* don't return 64-bit values to 32-bit processes */
4092 if (pe_info->machine == IMAGE_FILE_MACHINE_AMD64 || pe_info->machine == IMAGE_FILE_MACHINE_ARM64)
4094 info->TransferAddress = (void *)0x81231234; /* sic */
4095 info->MaximumStackSize = 0x100000;
4096 info->CommittedStackSize = 0x10000;
4098 #endif
4101 /******************************************************************************
4102 * NtQuerySection (NTDLL.@)
4103 * ZwQuerySection (NTDLL.@)
4105 NTSTATUS WINAPI NtQuerySection( HANDLE handle, SECTION_INFORMATION_CLASS class, void *ptr,
4106 SIZE_T size, SIZE_T *ret_size )
4108 NTSTATUS status;
4109 pe_image_info_t image_info;
4111 switch (class)
4113 case SectionBasicInformation:
4114 if (size < sizeof(SECTION_BASIC_INFORMATION)) return STATUS_INFO_LENGTH_MISMATCH;
4115 break;
4116 case SectionImageInformation:
4117 if (size < sizeof(SECTION_IMAGE_INFORMATION)) return STATUS_INFO_LENGTH_MISMATCH;
4118 break;
4119 default:
4120 FIXME( "class %u not implemented\n", class );
4121 return STATUS_NOT_IMPLEMENTED;
4123 if (!ptr) return STATUS_ACCESS_VIOLATION;
4125 SERVER_START_REQ( get_mapping_info )
4127 req->handle = wine_server_obj_handle( handle );
4128 req->access = SECTION_QUERY;
4129 wine_server_set_reply( req, &image_info, sizeof(image_info) );
4130 if (!(status = wine_server_call( req )))
4132 if (class == SectionBasicInformation)
4134 SECTION_BASIC_INFORMATION *info = ptr;
4135 info->Attributes = reply->flags;
4136 info->BaseAddress = NULL;
4137 info->Size.QuadPart = reply->size;
4138 if (ret_size) *ret_size = sizeof(*info);
4140 else if (reply->flags & SEC_IMAGE)
4142 SECTION_IMAGE_INFORMATION *info = ptr;
4143 virtual_fill_image_information( &image_info, info );
4144 if (ret_size) *ret_size = sizeof(*info);
4146 else status = STATUS_SECTION_NOT_IMAGE;
4149 SERVER_END_REQ;
4151 return status;
4155 /***********************************************************************
4156 * NtFlushVirtualMemory (NTDLL.@)
4157 * ZwFlushVirtualMemory (NTDLL.@)
4159 NTSTATUS WINAPI NtFlushVirtualMemory( HANDLE process, LPCVOID *addr_ptr,
4160 SIZE_T *size_ptr, ULONG unknown )
4162 struct file_view *view;
4163 NTSTATUS status = STATUS_SUCCESS;
4164 sigset_t sigset;
4165 void *addr = ROUND_ADDR( *addr_ptr, page_mask );
4167 if (process != NtCurrentProcess())
4169 apc_call_t call;
4170 apc_result_t result;
4172 memset( &call, 0, sizeof(call) );
4174 call.virtual_flush.type = APC_VIRTUAL_FLUSH;
4175 call.virtual_flush.addr = wine_server_client_ptr( addr );
4176 call.virtual_flush.size = *size_ptr;
4177 status = server_queue_process_apc( process, &call, &result );
4178 if (status != STATUS_SUCCESS) return status;
4180 if (result.virtual_flush.status == STATUS_SUCCESS)
4182 *addr_ptr = wine_server_get_ptr( result.virtual_flush.addr );
4183 *size_ptr = result.virtual_flush.size;
4185 return result.virtual_flush.status;
4188 server_enter_uninterrupted_section( &virtual_mutex, &sigset );
4189 if (!(view = find_view( addr, *size_ptr ))) status = STATUS_INVALID_PARAMETER;
4190 else
4192 if (!*size_ptr) *size_ptr = view->size;
4193 *addr_ptr = addr;
4194 #ifdef MS_ASYNC
4195 if (msync( addr, *size_ptr, MS_ASYNC )) status = STATUS_NOT_MAPPED_DATA;
4196 #endif
4198 server_leave_uninterrupted_section( &virtual_mutex, &sigset );
4199 return status;
4203 /***********************************************************************
4204 * NtGetWriteWatch (NTDLL.@)
4205 * ZwGetWriteWatch (NTDLL.@)
4207 NTSTATUS WINAPI NtGetWriteWatch( HANDLE process, ULONG flags, PVOID base, SIZE_T size, PVOID *addresses,
4208 ULONG_PTR *count, ULONG *granularity )
4210 NTSTATUS status = STATUS_SUCCESS;
4211 sigset_t sigset;
4213 size = ROUND_SIZE( base, size );
4214 base = ROUND_ADDR( base, page_mask );
4216 if (!count || !granularity) return STATUS_ACCESS_VIOLATION;
4217 if (!*count || !size) return STATUS_INVALID_PARAMETER;
4218 if (flags & ~WRITE_WATCH_FLAG_RESET) return STATUS_INVALID_PARAMETER;
4220 if (!addresses) return STATUS_ACCESS_VIOLATION;
4222 TRACE( "%p %x %p-%p %p %lu\n", process, flags, base, (char *)base + size,
4223 addresses, *count );
4225 server_enter_uninterrupted_section( &virtual_mutex, &sigset );
4227 if (is_write_watch_range( base, size ))
4229 ULONG_PTR pos = 0;
4230 char *addr = base;
4231 char *end = addr + size;
4233 while (pos < *count && addr < end)
4235 if (!(get_page_vprot( addr ) & VPROT_WRITEWATCH)) addresses[pos++] = addr;
4236 addr += page_size;
4238 if (flags & WRITE_WATCH_FLAG_RESET) reset_write_watches( base, addr - (char *)base );
4239 *count = pos;
4240 *granularity = page_size;
4242 else status = STATUS_INVALID_PARAMETER;
4244 server_leave_uninterrupted_section( &virtual_mutex, &sigset );
4245 return status;
4249 /***********************************************************************
4250 * NtResetWriteWatch (NTDLL.@)
4251 * ZwResetWriteWatch (NTDLL.@)
4253 NTSTATUS WINAPI NtResetWriteWatch( HANDLE process, PVOID base, SIZE_T size )
4255 NTSTATUS status = STATUS_SUCCESS;
4256 sigset_t sigset;
4258 size = ROUND_SIZE( base, size );
4259 base = ROUND_ADDR( base, page_mask );
4261 TRACE( "%p %p-%p\n", process, base, (char *)base + size );
4263 if (!size) return STATUS_INVALID_PARAMETER;
4265 server_enter_uninterrupted_section( &virtual_mutex, &sigset );
4267 if (is_write_watch_range( base, size ))
4268 reset_write_watches( base, size );
4269 else
4270 status = STATUS_INVALID_PARAMETER;
4272 server_leave_uninterrupted_section( &virtual_mutex, &sigset );
4273 return status;
4277 /***********************************************************************
4278 * NtReadVirtualMemory (NTDLL.@)
4279 * ZwReadVirtualMemory (NTDLL.@)
4281 NTSTATUS WINAPI NtReadVirtualMemory( HANDLE process, const void *addr, void *buffer,
4282 SIZE_T size, SIZE_T *bytes_read )
4284 NTSTATUS status;
4286 if (virtual_check_buffer_for_write( buffer, size ))
4288 SERVER_START_REQ( read_process_memory )
4290 req->handle = wine_server_obj_handle( process );
4291 req->addr = wine_server_client_ptr( addr );
4292 wine_server_set_reply( req, buffer, size );
4293 if ((status = wine_server_call( req ))) size = 0;
4295 SERVER_END_REQ;
4297 else
4299 status = STATUS_ACCESS_VIOLATION;
4300 size = 0;
4302 if (bytes_read) *bytes_read = size;
4303 return status;
4307 /***********************************************************************
4308 * NtWriteVirtualMemory (NTDLL.@)
4309 * ZwWriteVirtualMemory (NTDLL.@)
4311 NTSTATUS WINAPI NtWriteVirtualMemory( HANDLE process, void *addr, const void *buffer,
4312 SIZE_T size, SIZE_T *bytes_written )
4314 NTSTATUS status;
4316 if (virtual_check_buffer_for_read( buffer, size ))
4318 SERVER_START_REQ( write_process_memory )
4320 req->handle = wine_server_obj_handle( process );
4321 req->addr = wine_server_client_ptr( addr );
4322 wine_server_add_data( req, buffer, size );
4323 if ((status = wine_server_call( req ))) size = 0;
4325 SERVER_END_REQ;
4327 else
4329 status = STATUS_PARTIAL_COPY;
4330 size = 0;
4332 if (bytes_written) *bytes_written = size;
4333 return status;
4337 /***********************************************************************
4338 * NtAreMappedFilesTheSame (NTDLL.@)
4339 * ZwAreMappedFilesTheSame (NTDLL.@)
4341 NTSTATUS WINAPI NtAreMappedFilesTheSame(PVOID addr1, PVOID addr2)
4343 struct file_view *view1, *view2;
4344 NTSTATUS status;
4345 sigset_t sigset;
4347 TRACE("%p %p\n", addr1, addr2);
4349 server_enter_uninterrupted_section( &virtual_mutex, &sigset );
4351 view1 = find_view( addr1, 0 );
4352 view2 = find_view( addr2, 0 );
4354 if (!view1 || !view2)
4355 status = STATUS_INVALID_ADDRESS;
4356 else if (is_view_valloc( view1 ) || is_view_valloc( view2 ))
4357 status = STATUS_CONFLICTING_ADDRESSES;
4358 else if (view1 == view2)
4359 status = STATUS_SUCCESS;
4360 else if ((view1->protect & VPROT_SYSTEM) || (view2->protect & VPROT_SYSTEM))
4361 status = STATUS_NOT_SAME_DEVICE;
4362 else
4364 SERVER_START_REQ( is_same_mapping )
4366 req->base1 = wine_server_client_ptr( view1->base );
4367 req->base2 = wine_server_client_ptr( view2->base );
4368 status = wine_server_call( req );
4370 SERVER_END_REQ;
4373 server_leave_uninterrupted_section( &virtual_mutex, &sigset );
4374 return status;
4378 /**********************************************************************
4379 * NtFlushInstructionCache (NTDLL.@)
4381 NTSTATUS WINAPI NtFlushInstructionCache( HANDLE handle, const void *addr, SIZE_T size )
4383 #if defined(__x86_64__) || defined(__i386__)
4384 /* no-op */
4385 #elif defined(HAVE___CLEAR_CACHE)
4386 if (handle == GetCurrentProcess())
4388 __clear_cache( (char *)addr, (char *)addr + size );
4390 else
4392 static int once;
4393 if (!once++) FIXME( "%p %p %ld other process not supported\n", handle, addr, size );
4395 #else
4396 static int once;
4397 if (!once++) FIXME( "%p %p %ld\n", handle, addr, size );
4398 #endif
4399 return STATUS_SUCCESS;
4403 /**********************************************************************
4404 * NtFlushProcessWriteBuffers (NTDLL.@)
4406 void WINAPI NtFlushProcessWriteBuffers(void)
4408 static int once = 0;
4409 if (!once++) FIXME( "stub\n" );
4413 /**********************************************************************
4414 * NtCreatePagingFile (NTDLL.@)
4416 NTSTATUS WINAPI NtCreatePagingFile( UNICODE_STRING *name, LARGE_INTEGER *min_size,
4417 LARGE_INTEGER *max_size, LARGE_INTEGER *actual_size )
4419 FIXME( "(%s %p %p %p) stub\n", debugstr_us(name), min_size, max_size, actual_size );
4420 return STATUS_SUCCESS;