ntdll: Only use sysinfo function when present.
[wine.git] / dlls / ntdll / unix / virtual.c
blob2cca90ac952bbabd211b1bb55f4f6cded6cba354
1 /*
2 * Win32 virtual memory functions
4 * Copyright 1997, 2002, 2020 Alexandre Julliard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2.1 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA
21 #if 0
22 #pragma makedep unix
23 #endif
25 #include "config.h"
26 #include "wine/port.h"
28 #include <assert.h>
29 #include <errno.h>
30 #include <stdarg.h>
31 #include <stdio.h>
32 #include <signal.h>
33 #include <sys/types.h>
34 #ifdef HAVE_SYS_SOCKET_H
35 # include <sys/socket.h>
36 #endif
37 #ifdef HAVE_SYS_STAT_H
38 # include <sys/stat.h>
39 #endif
40 #ifdef HAVE_SYS_MMAN_H
41 # include <sys/mman.h>
42 #endif
43 #ifdef HAVE_SYS_SYSINFO_H
44 # include <sys/sysinfo.h>
45 #endif
46 #ifdef HAVE_UNISTD_H
47 # include <unistd.h>
48 #endif
49 #ifdef HAVE_VALGRIND_VALGRIND_H
50 # include <valgrind/valgrind.h>
51 #endif
52 #if defined(__APPLE__)
53 # include <mach/mach_init.h>
54 # include <mach/mach_vm.h>
55 #endif
57 #include "ntstatus.h"
58 #define WIN32_NO_STATUS
59 #include "windef.h"
60 #include "winnt.h"
61 #include "winternl.h"
62 #include "wine/exception.h"
63 #include "wine/list.h"
64 #include "wine/rbtree.h"
65 #include "unix_private.h"
66 #include "wine/debug.h"
68 WINE_DEFAULT_DEBUG_CHANNEL(virtual);
69 WINE_DECLARE_DEBUG_CHANNEL(module);
71 struct preload_info
73 void *addr;
74 size_t size;
77 struct reserved_area
79 struct list entry;
80 void *base;
81 size_t size;
84 static struct list reserved_areas = LIST_INIT(reserved_areas);
86 struct builtin_module
88 struct list entry;
89 unsigned int refcount;
90 void *handle;
91 void *module;
92 char *unix_name;
93 void *unix_handle;
94 void *unix_entry;
97 static struct list builtin_modules = LIST_INIT( builtin_modules );
99 struct file_view
101 struct wine_rb_entry entry; /* entry in global view tree */
102 void *base; /* base address */
103 size_t size; /* size in bytes */
104 unsigned int protect; /* protection for all pages at allocation time and SEC_* flags */
107 #undef __TRY
108 #undef __EXCEPT
109 #undef __ENDTRY
111 #define __TRY \
112 do { __wine_jmp_buf __jmp; \
113 int __first = 1; \
114 assert( !ntdll_get_thread_data()->jmp_buf ); \
115 for (;;) if (!__first) \
117 do {
119 #define __EXCEPT \
120 } while(0); \
121 ntdll_get_thread_data()->jmp_buf = NULL; \
122 break; \
123 } else { \
124 if (__wine_setjmpex( &__jmp, NULL )) { \
125 do {
127 #define __ENDTRY \
128 } while (0); \
129 break; \
131 ntdll_get_thread_data()->jmp_buf = &__jmp; \
132 __first = 0; \
134 } while (0);
136 /* per-page protection flags */
137 #define VPROT_READ 0x01
138 #define VPROT_WRITE 0x02
139 #define VPROT_EXEC 0x04
140 #define VPROT_WRITECOPY 0x08
141 #define VPROT_GUARD 0x10
142 #define VPROT_COMMITTED 0x20
143 #define VPROT_WRITEWATCH 0x40
144 /* per-mapping protection flags */
145 #define VPROT_SYSTEM 0x0200 /* system view (underlying mmap not under our control) */
147 /* Conversion from VPROT_* to Win32 flags */
148 static const BYTE VIRTUAL_Win32Flags[16] =
150 PAGE_NOACCESS, /* 0 */
151 PAGE_READONLY, /* READ */
152 PAGE_READWRITE, /* WRITE */
153 PAGE_READWRITE, /* READ | WRITE */
154 PAGE_EXECUTE, /* EXEC */
155 PAGE_EXECUTE_READ, /* READ | EXEC */
156 PAGE_EXECUTE_READWRITE, /* WRITE | EXEC */
157 PAGE_EXECUTE_READWRITE, /* READ | WRITE | EXEC */
158 PAGE_WRITECOPY, /* WRITECOPY */
159 PAGE_WRITECOPY, /* READ | WRITECOPY */
160 PAGE_WRITECOPY, /* WRITE | WRITECOPY */
161 PAGE_WRITECOPY, /* READ | WRITE | WRITECOPY */
162 PAGE_EXECUTE_WRITECOPY, /* EXEC | WRITECOPY */
163 PAGE_EXECUTE_WRITECOPY, /* READ | EXEC | WRITECOPY */
164 PAGE_EXECUTE_WRITECOPY, /* WRITE | EXEC | WRITECOPY */
165 PAGE_EXECUTE_WRITECOPY /* READ | WRITE | EXEC | WRITECOPY */
168 static struct wine_rb_tree views_tree;
169 static pthread_mutex_t virtual_mutex;
171 static const UINT page_shift = 12;
172 static const UINT_PTR page_mask = 0xfff;
173 static const UINT_PTR granularity_mask = 0xffff;
175 /* Note: these are Windows limits, you cannot change them. */
176 #ifdef __i386__
177 static void *address_space_start = (void *)0x110000; /* keep DOS area clear */
178 #else
179 static void *address_space_start = (void *)0x10000;
180 #endif
182 #ifdef __aarch64__
183 static void *address_space_limit = (void *)0xffffffff0000; /* top of the total available address space */
184 #elif defined(_WIN64)
185 static void *address_space_limit = (void *)0x7fffffff0000;
186 #else
187 static void *address_space_limit = (void *)0xc0000000;
188 #endif
190 #ifdef _WIN64
191 static void *user_space_limit = (void *)0x7fffffff0000; /* top of the user address space */
192 static void *working_set_limit = (void *)0x7fffffff0000; /* top of the current working set */
193 #else
194 static void *user_space_limit = (void *)0x7fff0000;
195 static void *working_set_limit = (void *)0x7fff0000;
196 #endif
198 struct _KUSER_SHARED_DATA *user_shared_data = (void *)0x7ffe0000;
200 /* TEB allocation blocks */
201 static void *teb_block;
202 static void **next_free_teb;
203 static int teb_block_pos;
204 static struct list teb_list = LIST_INIT( teb_list );
206 #define ROUND_ADDR(addr,mask) ((void *)((UINT_PTR)(addr) & ~(UINT_PTR)(mask)))
207 #define ROUND_SIZE(addr,size) (((SIZE_T)(size) + ((UINT_PTR)(addr) & page_mask) + page_mask) & ~page_mask)
209 #define VIRTUAL_DEBUG_DUMP_VIEW(view) do { if (TRACE_ON(virtual)) dump_view(view); } while (0)
211 #ifndef MAP_NORESERVE
212 #define MAP_NORESERVE 0
213 #endif
215 #ifdef _WIN64 /* on 64-bit the page protection bytes use a 2-level table */
216 static const size_t pages_vprot_shift = 20;
217 static const size_t pages_vprot_mask = (1 << 20) - 1;
218 static size_t pages_vprot_size;
219 static BYTE **pages_vprot;
220 #else /* on 32-bit we use a simple array with one byte per page */
221 static BYTE *pages_vprot;
222 #endif
224 static struct file_view *view_block_start, *view_block_end, *next_free_view;
225 static const size_t view_block_size = 0x100000;
226 static void *preload_reserve_start;
227 static void *preload_reserve_end;
228 static BOOL force_exec_prot; /* whether to force PROT_EXEC on all PROT_READ mmaps */
230 struct range_entry
232 void *base;
233 void *end;
236 static struct range_entry *free_ranges;
237 static struct range_entry *free_ranges_end;
240 static inline BOOL is_beyond_limit( const void *addr, size_t size, const void *limit )
242 return (addr >= limit || (const char *)addr + size > (const char *)limit);
245 /* mmap() anonymous memory at a fixed address */
246 void *anon_mmap_fixed( void *start, size_t size, int prot, int flags )
248 return mmap( start, size, prot, MAP_PRIVATE | MAP_ANON | MAP_FIXED | flags, -1, 0 );
251 /* allocate anonymous mmap() memory at any address */
252 void *anon_mmap_alloc( size_t size, int prot )
254 return mmap( NULL, size, prot, MAP_PRIVATE | MAP_ANON, -1, 0 );
258 static void mmap_add_reserved_area( void *addr, SIZE_T size )
260 struct reserved_area *area;
261 struct list *ptr;
263 if (!((char *)addr + size)) size--; /* avoid wrap-around */
265 LIST_FOR_EACH( ptr, &reserved_areas )
267 area = LIST_ENTRY( ptr, struct reserved_area, entry );
268 if (area->base > addr)
270 /* try to merge with the next one */
271 if ((char *)addr + size == (char *)area->base)
273 area->base = addr;
274 area->size += size;
275 return;
277 break;
279 else if ((char *)area->base + area->size == (char *)addr)
281 /* merge with the previous one */
282 area->size += size;
284 /* try to merge with the next one too */
285 if ((ptr = list_next( &reserved_areas, ptr )))
287 struct reserved_area *next = LIST_ENTRY( ptr, struct reserved_area, entry );
288 if ((char *)addr + size == (char *)next->base)
290 area->size += next->size;
291 list_remove( &next->entry );
292 free( next );
295 return;
299 if ((area = malloc( sizeof(*area) )))
301 area->base = addr;
302 area->size = size;
303 list_add_before( ptr, &area->entry );
307 static void mmap_remove_reserved_area( void *addr, SIZE_T size )
309 struct reserved_area *area;
310 struct list *ptr;
312 if (!((char *)addr + size)) size--; /* avoid wrap-around */
314 ptr = list_head( &reserved_areas );
315 /* find the first area covering address */
316 while (ptr)
318 area = LIST_ENTRY( ptr, struct reserved_area, entry );
319 if ((char *)area->base >= (char *)addr + size) break; /* outside the range */
320 if ((char *)area->base + area->size > (char *)addr) /* overlaps range */
322 if (area->base >= addr)
324 if ((char *)area->base + area->size > (char *)addr + size)
326 /* range overlaps beginning of area only -> shrink area */
327 area->size -= (char *)addr + size - (char *)area->base;
328 area->base = (char *)addr + size;
329 break;
331 else
333 /* range contains the whole area -> remove area completely */
334 ptr = list_next( &reserved_areas, ptr );
335 list_remove( &area->entry );
336 free( area );
337 continue;
340 else
342 if ((char *)area->base + area->size > (char *)addr + size)
344 /* range is in the middle of area -> split area in two */
345 struct reserved_area *new_area = malloc( sizeof(*new_area) );
346 if (new_area)
348 new_area->base = (char *)addr + size;
349 new_area->size = (char *)area->base + area->size - (char *)new_area->base;
350 list_add_after( ptr, &new_area->entry );
352 else size = (char *)area->base + area->size - (char *)addr;
353 area->size = (char *)addr - (char *)area->base;
354 break;
356 else
358 /* range overlaps end of area only -> shrink area */
359 area->size = (char *)addr - (char *)area->base;
363 ptr = list_next( &reserved_areas, ptr );
367 static int mmap_is_in_reserved_area( void *addr, SIZE_T size )
369 struct reserved_area *area;
370 struct list *ptr;
372 LIST_FOR_EACH( ptr, &reserved_areas )
374 area = LIST_ENTRY( ptr, struct reserved_area, entry );
375 if (area->base > addr) break;
376 if ((char *)area->base + area->size <= (char *)addr) continue;
377 /* area must contain block completely */
378 if ((char *)area->base + area->size < (char *)addr + size) return -1;
379 return 1;
381 return 0;
384 static int mmap_enum_reserved_areas( int (*enum_func)(void *base, SIZE_T size, void *arg),
385 void *arg, int top_down )
387 int ret = 0;
388 struct list *ptr;
390 if (top_down)
392 for (ptr = reserved_areas.prev; ptr != &reserved_areas; ptr = ptr->prev)
394 struct reserved_area *area = LIST_ENTRY( ptr, struct reserved_area, entry );
395 if ((ret = enum_func( area->base, area->size, arg ))) break;
398 else
400 for (ptr = reserved_areas.next; ptr != &reserved_areas; ptr = ptr->next)
402 struct reserved_area *area = LIST_ENTRY( ptr, struct reserved_area, entry );
403 if ((ret = enum_func( area->base, area->size, arg ))) break;
406 return ret;
409 static void *anon_mmap_tryfixed( void *start, size_t size, int prot, int flags )
411 void *ptr;
413 #ifdef MAP_FIXED_NOREPLACE
414 ptr = mmap( start, size, prot, MAP_FIXED_NOREPLACE | MAP_PRIVATE | MAP_ANON | flags, -1, 0 );
415 #elif defined(MAP_TRYFIXED)
416 ptr = mmap( start, size, prot, MAP_TRYFIXED | MAP_PRIVATE | MAP_ANON | flags, -1, 0 );
417 #elif defined(__FreeBSD__) || defined(__FreeBSD_kernel__)
418 ptr = mmap( start, size, prot, MAP_FIXED | MAP_EXCL | MAP_PRIVATE | MAP_ANON | flags, -1, 0 );
419 if (ptr == MAP_FAILED && errno == EINVAL) errno = EEXIST;
420 #elif defined(__APPLE__)
421 mach_vm_address_t result = (mach_vm_address_t)start;
422 kern_return_t ret = mach_vm_map( mach_task_self(), &result, size, 0, VM_FLAGS_FIXED,
423 MEMORY_OBJECT_NULL, 0, 0, prot, VM_PROT_ALL, VM_INHERIT_COPY );
425 if (!ret)
427 if ((ptr = anon_mmap_fixed( start, size, prot, flags )) == MAP_FAILED)
428 mach_vm_deallocate( mach_task_self(), result, size );
430 else
432 errno = (ret == KERN_NO_SPACE ? EEXIST : ENOMEM);
433 ptr = MAP_FAILED;
435 #else
436 ptr = mmap( start, size, prot, MAP_PRIVATE | MAP_ANON | flags, -1, 0 );
437 #endif
438 if (ptr != MAP_FAILED && ptr != start)
440 if (is_beyond_limit( ptr, size, user_space_limit ))
442 anon_mmap_fixed( ptr, size, PROT_NONE, MAP_NORESERVE );
443 mmap_add_reserved_area( ptr, size );
445 else munmap( ptr, size );
446 ptr = MAP_FAILED;
447 errno = EEXIST;
449 return ptr;
452 static void reserve_area( void *addr, void *end )
454 #ifdef __APPLE__
456 #ifdef __i386__
457 static const mach_vm_address_t max_address = VM_MAX_ADDRESS;
458 #else
459 static const mach_vm_address_t max_address = MACH_VM_MAX_ADDRESS;
460 #endif
461 mach_vm_address_t address = (mach_vm_address_t)addr;
462 mach_vm_address_t end_address = (mach_vm_address_t)end;
464 if (!end_address || max_address < end_address)
465 end_address = max_address;
467 while (address < end_address)
469 mach_vm_address_t hole_address = address;
470 kern_return_t ret;
471 mach_vm_size_t size;
472 vm_region_basic_info_data_64_t info;
473 mach_msg_type_number_t count = VM_REGION_BASIC_INFO_COUNT_64;
474 mach_port_t dummy_object_name = MACH_PORT_NULL;
476 /* find the mapped region at or above the current address. */
477 ret = mach_vm_region(mach_task_self(), &address, &size, VM_REGION_BASIC_INFO_64,
478 (vm_region_info_t)&info, &count, &dummy_object_name);
479 if (ret != KERN_SUCCESS)
481 address = max_address;
482 size = 0;
485 if (end_address < address)
486 address = end_address;
487 if (hole_address < address)
489 /* found a hole, attempt to reserve it. */
490 size_t hole_size = address - hole_address;
491 mach_vm_address_t alloc_address = hole_address;
493 ret = mach_vm_map( mach_task_self(), &alloc_address, hole_size, 0, VM_FLAGS_FIXED,
494 MEMORY_OBJECT_NULL, 0, 0, PROT_NONE, VM_PROT_ALL, VM_INHERIT_COPY );
495 if (!ret) mmap_add_reserved_area( (void*)hole_address, hole_size );
496 else if (ret == KERN_NO_SPACE)
498 /* something filled (part of) the hole before we could.
499 go back and look again. */
500 address = hole_address;
501 continue;
504 address += size;
506 #else
507 void *ptr;
508 size_t size = (char *)end - (char *)addr;
510 if (!size) return;
512 if ((ptr = anon_mmap_tryfixed( addr, size, PROT_NONE, MAP_NORESERVE )) != MAP_FAILED)
514 mmap_add_reserved_area( addr, size );
515 return;
517 size = (size / 2) & ~granularity_mask;
518 if (size)
520 reserve_area( addr, (char *)addr + size );
521 reserve_area( (char *)addr + size, end );
523 #endif /* __APPLE__ */
527 static void mmap_init( const struct preload_info *preload_info )
529 #ifndef _WIN64
530 #ifndef __APPLE__
531 char stack;
532 char * const stack_ptr = &stack;
533 #endif
534 char *user_space_limit = (char *)0x7ffe0000;
535 int i;
537 if (preload_info)
539 /* check for a reserved area starting at the user space limit */
540 /* to avoid wasting time trying to allocate it again */
541 for (i = 0; preload_info[i].size; i++)
543 if ((char *)preload_info[i].addr > user_space_limit) break;
544 if ((char *)preload_info[i].addr + preload_info[i].size > user_space_limit)
546 user_space_limit = (char *)preload_info[i].addr + preload_info[i].size;
547 break;
551 else reserve_area( (void *)0x00010000, (void *)0x40000000 );
554 #ifndef __APPLE__
555 if (stack_ptr >= user_space_limit)
557 char *end = 0;
558 char *base = stack_ptr - ((unsigned int)stack_ptr & granularity_mask) - (granularity_mask + 1);
559 if (base > user_space_limit) reserve_area( user_space_limit, base );
560 base = stack_ptr - ((unsigned int)stack_ptr & granularity_mask) + (granularity_mask + 1);
561 #if defined(linux) || defined(__FreeBSD__) || defined (__FreeBSD_kernel__) || defined(__DragonFly__)
562 /* Heuristic: assume the stack is near the end of the address */
563 /* space, this avoids a lot of futile allocation attempts */
564 end = (char *)(((unsigned long)base + 0x0fffffff) & 0xf0000000);
565 #endif
566 reserve_area( base, end );
568 else
569 #endif
570 reserve_area( user_space_limit, 0 );
572 #else
574 if (preload_info) return;
575 /* if we don't have a preloader, try to reserve the space now */
576 reserve_area( (void *)0x000000010000, (void *)0x000068000000 );
577 reserve_area( (void *)0x00007ff00000, (void *)0x00007fff0000 );
578 reserve_area( (void *)0x7ffffe000000, (void *)0x7fffffff0000 );
580 #endif
584 /***********************************************************************
585 * get_wow_user_space_limit
587 static void *get_wow_user_space_limit(void)
589 #ifdef _WIN64
590 if (main_image_info.ImageCharacteristics & IMAGE_FILE_LARGE_ADDRESS_AWARE) return (void *)0xc0000000;
591 return (void *)0x7fff0000;
592 #endif
593 return user_space_limit;
597 /***********************************************************************
598 * add_builtin_module
600 static void add_builtin_module( void *module, void *handle )
602 struct builtin_module *builtin;
604 if (!(builtin = malloc( sizeof(*builtin) ))) return;
605 builtin->handle = handle;
606 builtin->module = module;
607 builtin->refcount = 1;
608 builtin->unix_name = NULL;
609 builtin->unix_handle = NULL;
610 builtin->unix_entry = NULL;
611 list_add_tail( &builtin_modules, &builtin->entry );
615 /***********************************************************************
616 * release_builtin_module
618 static void release_builtin_module( void *module )
620 struct builtin_module *builtin;
622 LIST_FOR_EACH_ENTRY( builtin, &builtin_modules, struct builtin_module, entry )
624 if (builtin->module != module) continue;
625 if (!--builtin->refcount)
627 list_remove( &builtin->entry );
628 if (builtin->handle) dlclose( builtin->handle );
629 if (builtin->unix_handle) dlclose( builtin->unix_handle );
630 free( builtin->unix_name );
631 free( builtin );
633 break;
638 /***********************************************************************
639 * get_builtin_so_handle
641 void *get_builtin_so_handle( void *module )
643 sigset_t sigset;
644 void *ret = NULL;
645 struct builtin_module *builtin;
647 server_enter_uninterrupted_section( &virtual_mutex, &sigset );
648 LIST_FOR_EACH_ENTRY( builtin, &builtin_modules, struct builtin_module, entry )
650 if (builtin->module != module) continue;
651 ret = builtin->handle;
652 if (ret) builtin->refcount++;
653 break;
655 server_leave_uninterrupted_section( &virtual_mutex, &sigset );
656 return ret;
660 /***********************************************************************
661 * get_builtin_unix_funcs
663 NTSTATUS get_builtin_unix_funcs( void *module, BOOL wow, void **funcs )
665 const char *ptr_name = wow ? "__wine_unix_call_wow64_funcs" : "__wine_unix_call_funcs";
666 sigset_t sigset;
667 NTSTATUS status = STATUS_DLL_NOT_FOUND;
668 struct builtin_module *builtin;
670 server_enter_uninterrupted_section( &virtual_mutex, &sigset );
671 LIST_FOR_EACH_ENTRY( builtin, &builtin_modules, struct builtin_module, entry )
673 if (builtin->module != module) continue;
674 *funcs = dlsym( builtin->unix_handle, ptr_name );
675 status = STATUS_SUCCESS;
676 break;
678 server_leave_uninterrupted_section( &virtual_mutex, &sigset );
679 return status;
683 /***********************************************************************
684 * get_builtin_unix_info
686 NTSTATUS get_builtin_unix_info( void *module, const char **name, void **handle, void **entry )
688 sigset_t sigset;
689 NTSTATUS status = STATUS_DLL_NOT_FOUND;
690 struct builtin_module *builtin;
692 server_enter_uninterrupted_section( &virtual_mutex, &sigset );
693 LIST_FOR_EACH_ENTRY( builtin, &builtin_modules, struct builtin_module, entry )
695 if (builtin->module != module) continue;
696 *name = builtin->unix_name;
697 *handle = builtin->unix_handle;
698 *entry = builtin->unix_entry;
699 status = STATUS_SUCCESS;
700 break;
702 server_leave_uninterrupted_section( &virtual_mutex, &sigset );
703 return status;
707 /***********************************************************************
708 * set_builtin_unix_handle
710 NTSTATUS set_builtin_unix_handle( void *module, const char *name, void *handle )
712 sigset_t sigset;
713 NTSTATUS status = STATUS_DLL_NOT_FOUND;
714 struct builtin_module *builtin;
716 server_enter_uninterrupted_section( &virtual_mutex, &sigset );
717 LIST_FOR_EACH_ENTRY( builtin, &builtin_modules, struct builtin_module, entry )
719 if (builtin->module != module) continue;
720 if (!builtin->unix_handle)
722 builtin->unix_name = strdup( name );
723 builtin->unix_handle = handle;
724 status = STATUS_SUCCESS;
726 else status = STATUS_IMAGE_ALREADY_LOADED;
727 break;
729 server_leave_uninterrupted_section( &virtual_mutex, &sigset );
730 return status;
734 /***********************************************************************
735 * set_builtin_unix_entry
737 NTSTATUS set_builtin_unix_entry( void *module, void *entry )
739 sigset_t sigset;
740 NTSTATUS status = STATUS_DLL_NOT_FOUND;
741 struct builtin_module *builtin;
743 server_enter_uninterrupted_section( &virtual_mutex, &sigset );
744 LIST_FOR_EACH_ENTRY( builtin, &builtin_modules, struct builtin_module, entry )
746 if (builtin->module != module) continue;
747 if (builtin->unix_handle)
749 builtin->unix_entry = entry;
750 status = STATUS_SUCCESS;
752 break;
754 server_leave_uninterrupted_section( &virtual_mutex, &sigset );
755 return status;
759 /***********************************************************************
760 * free_ranges_lower_bound
762 * Returns the first range whose end is not less than addr, or end if there's none.
764 static struct range_entry *free_ranges_lower_bound( void *addr )
766 struct range_entry *begin = free_ranges;
767 struct range_entry *end = free_ranges_end;
768 struct range_entry *mid;
770 while (begin < end)
772 mid = begin + (end - begin) / 2;
773 if (mid->end < addr)
774 begin = mid + 1;
775 else
776 end = mid;
779 return begin;
783 /***********************************************************************
784 * free_ranges_insert_view
786 * Updates the free_ranges after a new view has been created.
788 static void free_ranges_insert_view( struct file_view *view )
790 void *view_base = ROUND_ADDR( view->base, granularity_mask );
791 void *view_end = ROUND_ADDR( (char *)view->base + view->size + granularity_mask, granularity_mask );
792 struct range_entry *range = free_ranges_lower_bound( view_base );
793 struct range_entry *next = range + 1;
795 /* free_ranges initial value is such that the view is either inside range or before another one. */
796 assert( range != free_ranges_end );
797 assert( range->end > view_base || next != free_ranges_end );
799 /* this happens because virtual_alloc_thread_stack shrinks a view, then creates another one on top,
800 * or because AT_ROUND_TO_PAGE was used with NtMapViewOfSection to force 4kB aligned mapping. */
801 if ((range->end > view_base && range->base >= view_end) ||
802 (range->end == view_base && next->base >= view_end))
804 /* on Win64, assert that it's correctly aligned so we're not going to be in trouble later */
805 #ifdef _WIN64
806 assert( view->base == view_base );
807 #endif
808 WARN( "range %p - %p is already mapped\n", view_base, view_end );
809 return;
812 /* this should never happen */
813 if (range->base > view_base || range->end < view_end)
814 ERR( "range %p - %p is already partially mapped\n", view_base, view_end );
815 assert( range->base <= view_base && range->end >= view_end );
817 /* need to split the range in two */
818 if (range->base < view_base && range->end > view_end)
820 memmove( next + 1, next, (free_ranges_end - next) * sizeof(struct range_entry) );
821 free_ranges_end += 1;
822 if ((char *)free_ranges_end - (char *)free_ranges > view_block_size)
823 ERR( "Free range sequence is full, trouble ahead!\n" );
824 assert( (char *)free_ranges_end - (char *)free_ranges <= view_block_size );
826 next->base = view_end;
827 next->end = range->end;
828 range->end = view_base;
830 else
832 /* otherwise we just have to shrink it */
833 if (range->base < view_base)
834 range->end = view_base;
835 else
836 range->base = view_end;
838 if (range->base < range->end) return;
840 /* and possibly remove it if it's now empty */
841 memmove( range, next, (free_ranges_end - next) * sizeof(struct range_entry) );
842 free_ranges_end -= 1;
843 assert( free_ranges_end - free_ranges > 0 );
848 /***********************************************************************
849 * free_ranges_remove_view
851 * Updates the free_ranges after a view has been destroyed.
853 static void free_ranges_remove_view( struct file_view *view )
855 void *view_base = ROUND_ADDR( view->base, granularity_mask );
856 void *view_end = ROUND_ADDR( (char *)view->base + view->size + granularity_mask, granularity_mask );
857 struct range_entry *range = free_ranges_lower_bound( view_base );
858 struct range_entry *next = range + 1;
860 /* It's possible to use AT_ROUND_TO_PAGE on 32bit with NtMapViewOfSection to force 4kB alignment,
861 * and this breaks our assumptions. Look at the views around to check if the range is still in use. */
862 #ifndef _WIN64
863 struct file_view *prev_view = WINE_RB_ENTRY_VALUE( wine_rb_prev( &view->entry ), struct file_view, entry );
864 struct file_view *next_view = WINE_RB_ENTRY_VALUE( wine_rb_next( &view->entry ), struct file_view, entry );
865 void *prev_view_base = prev_view ? ROUND_ADDR( prev_view->base, granularity_mask ) : NULL;
866 void *prev_view_end = prev_view ? ROUND_ADDR( (char *)prev_view->base + prev_view->size + granularity_mask, granularity_mask ) : NULL;
867 void *next_view_base = next_view ? ROUND_ADDR( next_view->base, granularity_mask ) : NULL;
868 void *next_view_end = next_view ? ROUND_ADDR( (char *)next_view->base + next_view->size + granularity_mask, granularity_mask ) : NULL;
870 if ((prev_view_base < view_end && prev_view_end > view_base) ||
871 (next_view_base < view_end && next_view_end > view_base))
873 WARN( "range %p - %p is still mapped\n", view_base, view_end );
874 return;
876 #endif
878 /* free_ranges initial value is such that the view is either inside range or before another one. */
879 assert( range != free_ranges_end );
880 assert( range->end > view_base || next != free_ranges_end );
882 /* this should never happen, but we can safely ignore it */
883 if (range->base <= view_base && range->end >= view_end)
885 WARN( "range %p - %p is already unmapped\n", view_base, view_end );
886 return;
889 /* this should never happen */
890 if (range->base < view_end && range->end > view_base)
891 ERR( "range %p - %p is already partially unmapped\n", view_base, view_end );
892 assert( range->end <= view_base || range->base >= view_end );
894 /* merge with next if possible */
895 if (range->end == view_base && next->base == view_end)
897 range->end = next->end;
898 memmove( next, next + 1, (free_ranges_end - next - 1) * sizeof(struct range_entry) );
899 free_ranges_end -= 1;
900 assert( free_ranges_end - free_ranges > 0 );
902 /* or try growing the range */
903 else if (range->end == view_base)
904 range->end = view_end;
905 else if (range->base == view_end)
906 range->base = view_base;
907 /* otherwise create a new one */
908 else
910 memmove( range + 1, range, (free_ranges_end - range) * sizeof(struct range_entry) );
911 free_ranges_end += 1;
912 if ((char *)free_ranges_end - (char *)free_ranges > view_block_size)
913 ERR( "Free range sequence is full, trouble ahead!\n" );
914 assert( (char *)free_ranges_end - (char *)free_ranges <= view_block_size );
916 range->base = view_base;
917 range->end = view_end;
922 static inline int is_view_valloc( const struct file_view *view )
924 return !(view->protect & (SEC_FILE | SEC_RESERVE | SEC_COMMIT));
927 /***********************************************************************
928 * get_page_vprot
930 * Return the page protection byte.
932 static BYTE get_page_vprot( const void *addr )
934 size_t idx = (size_t)addr >> page_shift;
936 #ifdef _WIN64
937 if ((idx >> pages_vprot_shift) >= pages_vprot_size) return 0;
938 if (!pages_vprot[idx >> pages_vprot_shift]) return 0;
939 return pages_vprot[idx >> pages_vprot_shift][idx & pages_vprot_mask];
940 #else
941 return pages_vprot[idx];
942 #endif
946 /***********************************************************************
947 * set_page_vprot
949 * Set a range of page protection bytes.
951 static void set_page_vprot( const void *addr, size_t size, BYTE vprot )
953 size_t idx = (size_t)addr >> page_shift;
954 size_t end = ((size_t)addr + size + page_mask) >> page_shift;
956 #ifdef _WIN64
957 while (idx >> pages_vprot_shift != end >> pages_vprot_shift)
959 size_t dir_size = pages_vprot_mask + 1 - (idx & pages_vprot_mask);
960 memset( pages_vprot[idx >> pages_vprot_shift] + (idx & pages_vprot_mask), vprot, dir_size );
961 idx += dir_size;
963 memset( pages_vprot[idx >> pages_vprot_shift] + (idx & pages_vprot_mask), vprot, end - idx );
964 #else
965 memset( pages_vprot + idx, vprot, end - idx );
966 #endif
970 /***********************************************************************
971 * set_page_vprot_bits
973 * Set or clear bits in a range of page protection bytes.
975 static void set_page_vprot_bits( const void *addr, size_t size, BYTE set, BYTE clear )
977 size_t idx = (size_t)addr >> page_shift;
978 size_t end = ((size_t)addr + size + page_mask) >> page_shift;
980 #ifdef _WIN64
981 for ( ; idx < end; idx++)
983 BYTE *ptr = pages_vprot[idx >> pages_vprot_shift] + (idx & pages_vprot_mask);
984 *ptr = (*ptr & ~clear) | set;
986 #else
987 for ( ; idx < end; idx++) pages_vprot[idx] = (pages_vprot[idx] & ~clear) | set;
988 #endif
992 /***********************************************************************
993 * alloc_pages_vprot
995 * Allocate the page protection bytes for a given range.
997 static BOOL alloc_pages_vprot( const void *addr, size_t size )
999 #ifdef _WIN64
1000 size_t idx = (size_t)addr >> page_shift;
1001 size_t end = ((size_t)addr + size + page_mask) >> page_shift;
1002 size_t i;
1003 void *ptr;
1005 assert( end <= pages_vprot_size << pages_vprot_shift );
1006 for (i = idx >> pages_vprot_shift; i < (end + pages_vprot_mask) >> pages_vprot_shift; i++)
1008 if (pages_vprot[i]) continue;
1009 if ((ptr = anon_mmap_alloc( pages_vprot_mask + 1, PROT_READ | PROT_WRITE )) == MAP_FAILED)
1010 return FALSE;
1011 pages_vprot[i] = ptr;
1013 #endif
1014 return TRUE;
1018 /***********************************************************************
1019 * compare_view
1021 * View comparison function used for the rb tree.
1023 static int compare_view( const void *addr, const struct wine_rb_entry *entry )
1025 struct file_view *view = WINE_RB_ENTRY_VALUE( entry, struct file_view, entry );
1027 if (addr < view->base) return -1;
1028 if (addr > view->base) return 1;
1029 return 0;
1033 /***********************************************************************
1034 * get_prot_str
1036 static const char *get_prot_str( BYTE prot )
1038 static char buffer[6];
1039 buffer[0] = (prot & VPROT_COMMITTED) ? 'c' : '-';
1040 buffer[1] = (prot & VPROT_GUARD) ? 'g' : ((prot & VPROT_WRITEWATCH) ? 'H' : '-');
1041 buffer[2] = (prot & VPROT_READ) ? 'r' : '-';
1042 buffer[3] = (prot & VPROT_WRITECOPY) ? 'W' : ((prot & VPROT_WRITE) ? 'w' : '-');
1043 buffer[4] = (prot & VPROT_EXEC) ? 'x' : '-';
1044 buffer[5] = 0;
1045 return buffer;
1049 /***********************************************************************
1050 * get_unix_prot
1052 * Convert page protections to protection for mmap/mprotect.
1054 static int get_unix_prot( BYTE vprot )
1056 int prot = 0;
1057 if ((vprot & VPROT_COMMITTED) && !(vprot & VPROT_GUARD))
1059 if (vprot & VPROT_READ) prot |= PROT_READ;
1060 if (vprot & VPROT_WRITE) prot |= PROT_WRITE | PROT_READ;
1061 if (vprot & VPROT_WRITECOPY) prot |= PROT_WRITE | PROT_READ;
1062 if (vprot & VPROT_EXEC) prot |= PROT_EXEC | PROT_READ;
1063 if (vprot & VPROT_WRITEWATCH) prot &= ~PROT_WRITE;
1065 if (!prot) prot = PROT_NONE;
1066 return prot;
1070 /***********************************************************************
1071 * dump_view
1073 static void dump_view( struct file_view *view )
1075 UINT i, count;
1076 char *addr = view->base;
1077 BYTE prot = get_page_vprot( addr );
1079 TRACE( "View: %p - %p", addr, addr + view->size - 1 );
1080 if (view->protect & VPROT_SYSTEM)
1081 TRACE( " (builtin image)\n" );
1082 else if (view->protect & SEC_IMAGE)
1083 TRACE( " (image)\n" );
1084 else if (view->protect & SEC_FILE)
1085 TRACE( " (file)\n" );
1086 else if (view->protect & (SEC_RESERVE | SEC_COMMIT))
1087 TRACE( " (anonymous)\n" );
1088 else
1089 TRACE( " (valloc)\n");
1091 for (count = i = 1; i < view->size >> page_shift; i++, count++)
1093 BYTE next = get_page_vprot( addr + (count << page_shift) );
1094 if (next == prot) continue;
1095 TRACE( " %p - %p %s\n",
1096 addr, addr + (count << page_shift) - 1, get_prot_str(prot) );
1097 addr += (count << page_shift);
1098 prot = next;
1099 count = 0;
1101 if (count)
1102 TRACE( " %p - %p %s\n",
1103 addr, addr + (count << page_shift) - 1, get_prot_str(prot) );
1107 /***********************************************************************
1108 * VIRTUAL_Dump
1110 #ifdef WINE_VM_DEBUG
1111 static void VIRTUAL_Dump(void)
1113 sigset_t sigset;
1114 struct file_view *view;
1116 TRACE( "Dump of all virtual memory views:\n" );
1117 server_enter_uninterrupted_section( &virtual_mutex, &sigset );
1118 WINE_RB_FOR_EACH_ENTRY( view, &views_tree, struct file_view, entry )
1120 dump_view( view );
1122 server_leave_uninterrupted_section( &virtual_mutex, &sigset );
1124 #endif
1127 /***********************************************************************
1128 * find_view
1130 * Find the view containing a given address. virtual_mutex must be held by caller.
1132 * PARAMS
1133 * addr [I] Address
1135 * RETURNS
1136 * View: Success
1137 * NULL: Failure
1139 static struct file_view *find_view( const void *addr, size_t size )
1141 struct wine_rb_entry *ptr = views_tree.root;
1143 if ((const char *)addr + size < (const char *)addr) return NULL; /* overflow */
1145 while (ptr)
1147 struct file_view *view = WINE_RB_ENTRY_VALUE( ptr, struct file_view, entry );
1149 if (view->base > addr) ptr = ptr->left;
1150 else if ((const char *)view->base + view->size <= (const char *)addr) ptr = ptr->right;
1151 else if ((const char *)view->base + view->size < (const char *)addr + size) break; /* size too large */
1152 else return view;
1154 return NULL;
1158 /***********************************************************************
1159 * get_zero_bits_mask
1161 static inline UINT_PTR get_zero_bits_mask( ULONG_PTR zero_bits )
1163 unsigned int shift;
1165 if (zero_bits == 0) return ~(UINT_PTR)0;
1167 if (zero_bits < 32) shift = 32 + zero_bits;
1168 else
1170 shift = 63;
1171 #ifdef _WIN64
1172 if (zero_bits >> 32) { shift -= 32; zero_bits >>= 32; }
1173 #endif
1174 if (zero_bits >> 16) { shift -= 16; zero_bits >>= 16; }
1175 if (zero_bits >> 8) { shift -= 8; zero_bits >>= 8; }
1176 if (zero_bits >> 4) { shift -= 4; zero_bits >>= 4; }
1177 if (zero_bits >> 2) { shift -= 2; zero_bits >>= 2; }
1178 if (zero_bits >> 1) { shift -= 1; }
1180 return (UINT_PTR)((~(UINT64)0) >> shift);
1184 /***********************************************************************
1185 * is_write_watch_range
1187 static inline BOOL is_write_watch_range( const void *addr, size_t size )
1189 struct file_view *view = find_view( addr, size );
1190 return view && (view->protect & VPROT_WRITEWATCH);
1194 /***********************************************************************
1195 * find_view_range
1197 * Find the first view overlapping at least part of the specified range.
1198 * virtual_mutex must be held by caller.
1200 static struct file_view *find_view_range( const void *addr, size_t size )
1202 struct wine_rb_entry *ptr = views_tree.root;
1204 while (ptr)
1206 struct file_view *view = WINE_RB_ENTRY_VALUE( ptr, struct file_view, entry );
1208 if ((const char *)view->base >= (const char *)addr + size) ptr = ptr->left;
1209 else if ((const char *)view->base + view->size <= (const char *)addr) ptr = ptr->right;
1210 else return view;
1212 return NULL;
1216 /***********************************************************************
1217 * find_view_inside_range
1219 * Find first (resp. last, if top_down) view inside a range.
1220 * virtual_mutex must be held by caller.
1222 static struct wine_rb_entry *find_view_inside_range( void **base_ptr, void **end_ptr, int top_down )
1224 struct wine_rb_entry *first = NULL, *ptr = views_tree.root;
1225 void *base = *base_ptr, *end = *end_ptr;
1227 /* find the first (resp. last) view inside the range */
1228 while (ptr)
1230 struct file_view *view = WINE_RB_ENTRY_VALUE( ptr, struct file_view, entry );
1231 if ((char *)view->base + view->size >= (char *)end)
1233 end = min( end, view->base );
1234 ptr = ptr->left;
1236 else if (view->base <= base)
1238 base = max( (char *)base, (char *)view->base + view->size );
1239 ptr = ptr->right;
1241 else
1243 first = ptr;
1244 ptr = top_down ? ptr->right : ptr->left;
1248 *base_ptr = base;
1249 *end_ptr = end;
1250 return first;
1254 /***********************************************************************
1255 * try_map_free_area
1257 * Try mmaping some expected free memory region, eventually stepping and
1258 * retrying inside it, and return where it actually succeeded, or NULL.
1260 static void* try_map_free_area( void *base, void *end, ptrdiff_t step,
1261 void *start, size_t size, int unix_prot )
1263 void *ptr;
1265 while (start && base <= start && (char*)start + size <= (char*)end)
1267 if ((ptr = anon_mmap_tryfixed( start, size, unix_prot, 0 )) != MAP_FAILED) return start;
1268 TRACE( "Found free area is already mapped, start %p.\n", start );
1269 if (errno != EEXIST)
1271 ERR( "mmap() error %s, range %p-%p, unix_prot %#x.\n",
1272 strerror(errno), start, (char *)start + size, unix_prot );
1273 return NULL;
1275 if ((step > 0 && (char *)end - (char *)start < step) ||
1276 (step < 0 && (char *)start - (char *)base < -step) ||
1277 step == 0)
1278 break;
1279 start = (char *)start + step;
1282 return NULL;
1286 /***********************************************************************
1287 * map_free_area
1289 * Find a free area between views inside the specified range and map it.
1290 * virtual_mutex must be held by caller.
1292 static void *map_free_area( void *base, void *end, size_t size, int top_down, int unix_prot )
1294 struct wine_rb_entry *first = find_view_inside_range( &base, &end, top_down );
1295 ptrdiff_t step = top_down ? -(granularity_mask + 1) : (granularity_mask + 1);
1296 void *start;
1298 if (top_down)
1300 start = ROUND_ADDR( (char *)end - size, granularity_mask );
1301 if (start >= end || start < base) return NULL;
1303 while (first)
1305 struct file_view *view = WINE_RB_ENTRY_VALUE( first, struct file_view, entry );
1306 if ((start = try_map_free_area( (char *)view->base + view->size, (char *)start + size, step,
1307 start, size, unix_prot ))) break;
1308 start = ROUND_ADDR( (char *)view->base - size, granularity_mask );
1309 /* stop if remaining space is not large enough */
1310 if (!start || start >= end || start < base) return NULL;
1311 first = wine_rb_prev( first );
1314 else
1316 start = ROUND_ADDR( (char *)base + granularity_mask, granularity_mask );
1317 if (!start || start >= end || (char *)end - (char *)start < size) return NULL;
1319 while (first)
1321 struct file_view *view = WINE_RB_ENTRY_VALUE( first, struct file_view, entry );
1322 if ((start = try_map_free_area( start, view->base, step,
1323 start, size, unix_prot ))) break;
1324 start = ROUND_ADDR( (char *)view->base + view->size + granularity_mask, granularity_mask );
1325 /* stop if remaining space is not large enough */
1326 if (!start || start >= end || (char *)end - (char *)start < size) return NULL;
1327 first = wine_rb_next( first );
1331 if (!first)
1332 return try_map_free_area( base, end, step, start, size, unix_prot );
1334 return start;
1338 /***********************************************************************
1339 * find_reserved_free_area
1341 * Find a free area between views inside the specified range.
1342 * virtual_mutex must be held by caller.
1343 * The range must be inside the preloader reserved range.
1345 static void *find_reserved_free_area( void *base, void *end, size_t size, int top_down )
1347 struct range_entry *range;
1348 void *start;
1350 base = ROUND_ADDR( (char *)base + granularity_mask, granularity_mask );
1351 end = (char *)ROUND_ADDR( (char *)end - size, granularity_mask ) + size;
1353 if (top_down)
1355 start = (char *)end - size;
1356 range = free_ranges_lower_bound( start );
1357 assert(range != free_ranges_end && range->end >= start);
1359 if ((char *)range->end - (char *)start < size) start = ROUND_ADDR( (char *)range->end - size, granularity_mask );
1362 if (start >= end || start < base || (char *)end - (char *)start < size) return NULL;
1363 if (start < range->end && start >= range->base && (char *)range->end - (char *)start >= size) break;
1364 if (--range < free_ranges) return NULL;
1365 start = ROUND_ADDR( (char *)range->end - size, granularity_mask );
1367 while (1);
1369 else
1371 start = base;
1372 range = free_ranges_lower_bound( start );
1373 assert(range != free_ranges_end && range->end >= start);
1375 if (start < range->base) start = ROUND_ADDR( (char *)range->base + granularity_mask, granularity_mask );
1378 if (start >= end || start < base || (char *)end - (char *)start < size) return NULL;
1379 if (start < range->end && start >= range->base && (char *)range->end - (char *)start >= size) break;
1380 if (++range == free_ranges_end) return NULL;
1381 start = ROUND_ADDR( (char *)range->base + granularity_mask, granularity_mask );
1383 while (1);
1385 return start;
1389 /***********************************************************************
1390 * add_reserved_area
1392 * Add a reserved area to the list maintained by libwine.
1393 * virtual_mutex must be held by caller.
1395 static void add_reserved_area( void *addr, size_t size )
1397 TRACE( "adding %p-%p\n", addr, (char *)addr + size );
1399 if (addr < user_space_limit)
1401 /* unmap the part of the area that is below the limit */
1402 assert( (char *)addr + size > (char *)user_space_limit );
1403 munmap( addr, (char *)user_space_limit - (char *)addr );
1404 size -= (char *)user_space_limit - (char *)addr;
1405 addr = user_space_limit;
1407 /* blow away existing mappings */
1408 anon_mmap_fixed( addr, size, PROT_NONE, MAP_NORESERVE );
1409 mmap_add_reserved_area( addr, size );
1413 /***********************************************************************
1414 * remove_reserved_area
1416 * Remove a reserved area from the list maintained by libwine.
1417 * virtual_mutex must be held by caller.
1419 static void remove_reserved_area( void *addr, size_t size )
1421 struct file_view *view;
1423 TRACE( "removing %p-%p\n", addr, (char *)addr + size );
1424 mmap_remove_reserved_area( addr, size );
1426 /* unmap areas not covered by an existing view */
1427 WINE_RB_FOR_EACH_ENTRY( view, &views_tree, struct file_view, entry )
1429 if ((char *)view->base >= (char *)addr + size) break;
1430 if ((char *)view->base + view->size <= (char *)addr) continue;
1431 if (view->base > addr) munmap( addr, (char *)view->base - (char *)addr );
1432 if ((char *)view->base + view->size > (char *)addr + size) return;
1433 size = (char *)addr + size - ((char *)view->base + view->size);
1434 addr = (char *)view->base + view->size;
1436 munmap( addr, size );
1440 struct area_boundary
1442 void *base;
1443 size_t size;
1444 void *boundary;
1447 /***********************************************************************
1448 * get_area_boundary_callback
1450 * Get lowest boundary address between reserved area and non-reserved area
1451 * in the specified region. If no boundaries are found, result is NULL.
1452 * virtual_mutex must be held by caller.
1454 static int get_area_boundary_callback( void *start, SIZE_T size, void *arg )
1456 struct area_boundary *area = arg;
1457 void *end = (char *)start + size;
1459 area->boundary = NULL;
1460 if (area->base >= end) return 0;
1461 if ((char *)start >= (char *)area->base + area->size) return 1;
1462 if (area->base >= start)
1464 if ((char *)area->base + area->size > (char *)end)
1466 area->boundary = end;
1467 return 1;
1469 return 0;
1471 area->boundary = start;
1472 return 1;
1476 /***********************************************************************
1477 * unmap_area
1479 * Unmap an area, or simply replace it by an empty mapping if it is
1480 * in a reserved area. virtual_mutex must be held by caller.
1482 static inline void unmap_area( void *addr, size_t size )
1484 switch (mmap_is_in_reserved_area( addr, size ))
1486 case -1: /* partially in a reserved area */
1488 struct area_boundary area;
1489 size_t lower_size;
1490 area.base = addr;
1491 area.size = size;
1492 mmap_enum_reserved_areas( get_area_boundary_callback, &area, 0 );
1493 assert( area.boundary );
1494 lower_size = (char *)area.boundary - (char *)addr;
1495 unmap_area( addr, lower_size );
1496 unmap_area( area.boundary, size - lower_size );
1497 break;
1499 case 1: /* in a reserved area */
1500 anon_mmap_fixed( addr, size, PROT_NONE, MAP_NORESERVE );
1501 break;
1502 default:
1503 case 0: /* not in a reserved area */
1504 if (is_beyond_limit( addr, size, user_space_limit ))
1505 add_reserved_area( addr, size );
1506 else
1507 munmap( addr, size );
1508 break;
1513 /***********************************************************************
1514 * alloc_view
1516 * Allocate a new view. virtual_mutex must be held by caller.
1518 static struct file_view *alloc_view(void)
1520 if (next_free_view)
1522 struct file_view *ret = next_free_view;
1523 next_free_view = *(struct file_view **)ret;
1524 return ret;
1526 if (view_block_start == view_block_end)
1528 void *ptr = anon_mmap_alloc( view_block_size, PROT_READ | PROT_WRITE );
1529 if (ptr == MAP_FAILED) return NULL;
1530 view_block_start = ptr;
1531 view_block_end = view_block_start + view_block_size / sizeof(*view_block_start);
1533 return view_block_start++;
1537 /***********************************************************************
1538 * delete_view
1540 * Deletes a view. virtual_mutex must be held by caller.
1542 static void delete_view( struct file_view *view ) /* [in] View */
1544 if (!(view->protect & VPROT_SYSTEM)) unmap_area( view->base, view->size );
1545 set_page_vprot( view->base, view->size, 0 );
1546 if (mmap_is_in_reserved_area( view->base, view->size ))
1547 free_ranges_remove_view( view );
1548 wine_rb_remove( &views_tree, &view->entry );
1549 *(struct file_view **)view = next_free_view;
1550 next_free_view = view;
1554 /***********************************************************************
1555 * create_view
1557 * Create a view. virtual_mutex must be held by caller.
1559 static NTSTATUS create_view( struct file_view **view_ret, void *base, size_t size, unsigned int vprot )
1561 struct file_view *view;
1562 int unix_prot = get_unix_prot( vprot );
1564 assert( !((UINT_PTR)base & page_mask) );
1565 assert( !(size & page_mask) );
1567 /* Check for overlapping views. This can happen if the previous view
1568 * was a system view that got unmapped behind our back. In that case
1569 * we recover by simply deleting it. */
1571 while ((view = find_view_range( base, size )))
1573 TRACE( "overlapping view %p-%p for %p-%p\n",
1574 view->base, (char *)view->base + view->size, base, (char *)base + size );
1575 assert( view->protect & VPROT_SYSTEM );
1576 delete_view( view );
1579 if (!alloc_pages_vprot( base, size )) return STATUS_NO_MEMORY;
1581 /* Create the view structure */
1583 if (!(view = alloc_view()))
1585 FIXME( "out of memory for %p-%p\n", base, (char *)base + size );
1586 return STATUS_NO_MEMORY;
1589 view->base = base;
1590 view->size = size;
1591 view->protect = vprot;
1592 set_page_vprot( base, size, vprot );
1594 wine_rb_put( &views_tree, view->base, &view->entry );
1595 if (mmap_is_in_reserved_area( view->base, view->size ))
1596 free_ranges_insert_view( view );
1598 *view_ret = view;
1600 if (force_exec_prot && (unix_prot & PROT_READ) && !(unix_prot & PROT_EXEC))
1602 TRACE( "forcing exec permission on %p-%p\n", base, (char *)base + size - 1 );
1603 mprotect( base, size, unix_prot | PROT_EXEC );
1605 return STATUS_SUCCESS;
1609 /***********************************************************************
1610 * get_win32_prot
1612 * Convert page protections to Win32 flags.
1614 static DWORD get_win32_prot( BYTE vprot, unsigned int map_prot )
1616 DWORD ret = VIRTUAL_Win32Flags[vprot & 0x0f];
1617 if (vprot & VPROT_GUARD) ret |= PAGE_GUARD;
1618 if (map_prot & SEC_NOCACHE) ret |= PAGE_NOCACHE;
1619 return ret;
1623 /***********************************************************************
1624 * get_vprot_flags
1626 * Build page protections from Win32 flags.
1628 static NTSTATUS get_vprot_flags( DWORD protect, unsigned int *vprot, BOOL image )
1630 switch(protect & 0xff)
1632 case PAGE_READONLY:
1633 *vprot = VPROT_READ;
1634 break;
1635 case PAGE_READWRITE:
1636 if (image)
1637 *vprot = VPROT_READ | VPROT_WRITECOPY;
1638 else
1639 *vprot = VPROT_READ | VPROT_WRITE;
1640 break;
1641 case PAGE_WRITECOPY:
1642 *vprot = VPROT_READ | VPROT_WRITECOPY;
1643 break;
1644 case PAGE_EXECUTE:
1645 *vprot = VPROT_EXEC;
1646 break;
1647 case PAGE_EXECUTE_READ:
1648 *vprot = VPROT_EXEC | VPROT_READ;
1649 break;
1650 case PAGE_EXECUTE_READWRITE:
1651 if (image)
1652 *vprot = VPROT_EXEC | VPROT_READ | VPROT_WRITECOPY;
1653 else
1654 *vprot = VPROT_EXEC | VPROT_READ | VPROT_WRITE;
1655 break;
1656 case PAGE_EXECUTE_WRITECOPY:
1657 *vprot = VPROT_EXEC | VPROT_READ | VPROT_WRITECOPY;
1658 break;
1659 case PAGE_NOACCESS:
1660 *vprot = 0;
1661 break;
1662 default:
1663 return STATUS_INVALID_PAGE_PROTECTION;
1665 if (protect & PAGE_GUARD) *vprot |= VPROT_GUARD;
1666 return STATUS_SUCCESS;
1670 /***********************************************************************
1671 * mprotect_exec
1673 * Wrapper for mprotect, adds PROT_EXEC if forced by force_exec_prot
1675 static inline int mprotect_exec( void *base, size_t size, int unix_prot )
1677 if (force_exec_prot && (unix_prot & PROT_READ) && !(unix_prot & PROT_EXEC))
1679 TRACE( "forcing exec permission on %p-%p\n", base, (char *)base + size - 1 );
1680 if (!mprotect( base, size, unix_prot | PROT_EXEC )) return 0;
1681 /* exec + write may legitimately fail, in that case fall back to write only */
1682 if (!(unix_prot & PROT_WRITE)) return -1;
1685 return mprotect( base, size, unix_prot );
1689 /***********************************************************************
1690 * mprotect_range
1692 * Call mprotect on a page range, applying the protections from the per-page byte.
1694 static void mprotect_range( void *base, size_t size, BYTE set, BYTE clear )
1696 size_t i, count;
1697 char *addr = ROUND_ADDR( base, page_mask );
1698 int prot, next;
1700 size = ROUND_SIZE( base, size );
1701 prot = get_unix_prot( (get_page_vprot( addr ) & ~clear ) | set );
1702 for (count = i = 1; i < size >> page_shift; i++, count++)
1704 next = get_unix_prot( (get_page_vprot( addr + (count << page_shift) ) & ~clear) | set );
1705 if (next == prot) continue;
1706 mprotect_exec( addr, count << page_shift, prot );
1707 addr += count << page_shift;
1708 prot = next;
1709 count = 0;
1711 if (count) mprotect_exec( addr, count << page_shift, prot );
1715 /***********************************************************************
1716 * set_vprot
1718 * Change the protection of a range of pages.
1720 static BOOL set_vprot( struct file_view *view, void *base, size_t size, BYTE vprot )
1722 int unix_prot = get_unix_prot(vprot);
1724 if (view->protect & VPROT_WRITEWATCH)
1726 /* each page may need different protections depending on write watch flag */
1727 set_page_vprot_bits( base, size, vprot & ~VPROT_WRITEWATCH, ~vprot & ~VPROT_WRITEWATCH );
1728 mprotect_range( base, size, 0, 0 );
1729 return TRUE;
1731 if (mprotect_exec( base, size, unix_prot )) return FALSE;
1732 set_page_vprot( base, size, vprot );
1733 return TRUE;
1737 /***********************************************************************
1738 * set_protection
1740 * Set page protections on a range of pages
1742 static NTSTATUS set_protection( struct file_view *view, void *base, SIZE_T size, ULONG protect )
1744 unsigned int vprot;
1745 NTSTATUS status;
1747 if ((status = get_vprot_flags( protect, &vprot, view->protect & SEC_IMAGE ))) return status;
1748 if (is_view_valloc( view ))
1750 if (vprot & VPROT_WRITECOPY) return STATUS_INVALID_PAGE_PROTECTION;
1752 else
1754 BYTE access = vprot & (VPROT_READ | VPROT_WRITE | VPROT_EXEC);
1755 if ((view->protect & access) != access) return STATUS_INVALID_PAGE_PROTECTION;
1758 if (!set_vprot( view, base, size, vprot | VPROT_COMMITTED )) return STATUS_ACCESS_DENIED;
1759 return STATUS_SUCCESS;
1763 /***********************************************************************
1764 * update_write_watches
1766 static void update_write_watches( void *base, size_t size, size_t accessed_size )
1768 TRACE( "updating watch %p-%p-%p\n", base, (char *)base + accessed_size, (char *)base + size );
1769 /* clear write watch flag on accessed pages */
1770 set_page_vprot_bits( base, accessed_size, 0, VPROT_WRITEWATCH );
1771 /* restore page protections on the entire range */
1772 mprotect_range( base, size, 0, 0 );
1776 /***********************************************************************
1777 * reset_write_watches
1779 * Reset write watches in a memory range.
1781 static void reset_write_watches( void *base, SIZE_T size )
1783 set_page_vprot_bits( base, size, VPROT_WRITEWATCH, 0 );
1784 mprotect_range( base, size, 0, 0 );
1788 /***********************************************************************
1789 * unmap_extra_space
1791 * Release the extra memory while keeping the range starting on the granularity boundary.
1793 static inline void *unmap_extra_space( void *ptr, size_t total_size, size_t wanted_size )
1795 if ((ULONG_PTR)ptr & granularity_mask)
1797 size_t extra = granularity_mask + 1 - ((ULONG_PTR)ptr & granularity_mask);
1798 munmap( ptr, extra );
1799 ptr = (char *)ptr + extra;
1800 total_size -= extra;
1802 if (total_size > wanted_size)
1803 munmap( (char *)ptr + wanted_size, total_size - wanted_size );
1804 return ptr;
1808 struct alloc_area
1810 size_t size;
1811 int top_down;
1812 void *limit;
1813 void *result;
1816 /***********************************************************************
1817 * alloc_reserved_area_callback
1819 * Try to map some space inside a reserved area. Callback for mmap_enum_reserved_areas.
1821 static int alloc_reserved_area_callback( void *start, SIZE_T size, void *arg )
1823 struct alloc_area *alloc = arg;
1824 void *end = (char *)start + size;
1826 if (start < address_space_start) start = address_space_start;
1827 if (is_beyond_limit( start, size, alloc->limit )) end = alloc->limit;
1828 if (start >= end) return 0;
1830 /* make sure we don't touch the preloader reserved range */
1831 if (preload_reserve_end >= start)
1833 if (preload_reserve_end >= end)
1835 if (preload_reserve_start <= start) return 0; /* no space in that area */
1836 if (preload_reserve_start < end) end = preload_reserve_start;
1838 else if (preload_reserve_start <= start) start = preload_reserve_end;
1839 else
1841 /* range is split in two by the preloader reservation, try first part */
1842 if ((alloc->result = find_reserved_free_area( start, preload_reserve_start, alloc->size,
1843 alloc->top_down )))
1844 return 1;
1845 /* then fall through to try second part */
1846 start = preload_reserve_end;
1849 if ((alloc->result = find_reserved_free_area( start, end, alloc->size, alloc->top_down )))
1850 return 1;
1852 return 0;
1855 /***********************************************************************
1856 * map_fixed_area
1858 * mmap the fixed memory area.
1859 * virtual_mutex must be held by caller.
1861 static NTSTATUS map_fixed_area( void *base, size_t size, unsigned int vprot )
1863 void *ptr;
1865 switch (mmap_is_in_reserved_area( base, size ))
1867 case -1: /* partially in a reserved area */
1869 NTSTATUS status;
1870 struct area_boundary area;
1871 size_t lower_size;
1872 area.base = base;
1873 area.size = size;
1874 mmap_enum_reserved_areas( get_area_boundary_callback, &area, 0 );
1875 assert( area.boundary );
1876 lower_size = (char *)area.boundary - (char *)base;
1877 status = map_fixed_area( base, lower_size, vprot );
1878 if (status == STATUS_SUCCESS)
1880 status = map_fixed_area( area.boundary, size - lower_size, vprot);
1881 if (status != STATUS_SUCCESS) unmap_area( base, lower_size );
1883 return status;
1885 case 0: /* not in a reserved area, do a normal allocation */
1886 if ((ptr = anon_mmap_tryfixed( base, size, get_unix_prot(vprot), 0 )) == MAP_FAILED)
1888 if (errno == ENOMEM) return STATUS_NO_MEMORY;
1889 if (errno == EEXIST) return STATUS_CONFLICTING_ADDRESSES;
1890 return STATUS_INVALID_PARAMETER;
1892 break;
1894 default:
1895 case 1: /* in a reserved area, make sure the address is available */
1896 if (find_view_range( base, size )) return STATUS_CONFLICTING_ADDRESSES;
1897 /* replace the reserved area by our mapping */
1898 if ((ptr = anon_mmap_fixed( base, size, get_unix_prot(vprot), 0 )) != base)
1899 return STATUS_INVALID_PARAMETER;
1900 break;
1902 if (is_beyond_limit( ptr, size, working_set_limit )) working_set_limit = address_space_limit;
1903 return STATUS_SUCCESS;
1906 /***********************************************************************
1907 * map_view
1909 * Create a view and mmap the corresponding memory area.
1910 * virtual_mutex must be held by caller.
1912 static NTSTATUS map_view( struct file_view **view_ret, void *base, size_t size,
1913 int top_down, unsigned int vprot, ULONG_PTR zero_bits )
1915 void *ptr;
1916 NTSTATUS status;
1918 if (base)
1920 if (is_beyond_limit( base, size, address_space_limit ))
1921 return STATUS_WORKING_SET_LIMIT_RANGE;
1922 status = map_fixed_area( base, size, vprot );
1923 if (status != STATUS_SUCCESS) return status;
1924 ptr = base;
1926 else
1928 size_t view_size = size + granularity_mask + 1;
1929 struct alloc_area alloc;
1931 alloc.size = size;
1932 alloc.top_down = top_down;
1933 alloc.limit = (void*)(get_zero_bits_mask( zero_bits ) & (UINT_PTR)user_space_limit);
1935 if (mmap_enum_reserved_areas( alloc_reserved_area_callback, &alloc, top_down ))
1937 ptr = alloc.result;
1938 TRACE( "got mem in reserved area %p-%p\n", ptr, (char *)ptr + size );
1939 if (anon_mmap_fixed( ptr, size, get_unix_prot(vprot), 0 ) != ptr)
1940 return STATUS_INVALID_PARAMETER;
1941 goto done;
1944 if (zero_bits)
1946 if (!(ptr = map_free_area( address_space_start, alloc.limit, size,
1947 top_down, get_unix_prot(vprot) )))
1948 return STATUS_NO_MEMORY;
1949 TRACE( "got mem with map_free_area %p-%p\n", ptr, (char *)ptr + size );
1950 goto done;
1953 for (;;)
1955 if ((ptr = anon_mmap_alloc( view_size, get_unix_prot(vprot) )) == MAP_FAILED)
1957 if (errno == ENOMEM) return STATUS_NO_MEMORY;
1958 return STATUS_INVALID_PARAMETER;
1960 TRACE( "got mem with anon mmap %p-%p\n", ptr, (char *)ptr + size );
1961 /* if we got something beyond the user limit, unmap it and retry */
1962 if (is_beyond_limit( ptr, view_size, user_space_limit )) add_reserved_area( ptr, view_size );
1963 else break;
1965 ptr = unmap_extra_space( ptr, view_size, size );
1967 done:
1968 status = create_view( view_ret, ptr, size, vprot );
1969 if (status != STATUS_SUCCESS) unmap_area( ptr, size );
1970 return status;
1974 /***********************************************************************
1975 * map_file_into_view
1977 * Wrapper for mmap() to map a file into a view, falling back to read if mmap fails.
1978 * virtual_mutex must be held by caller.
1980 static NTSTATUS map_file_into_view( struct file_view *view, int fd, size_t start, size_t size,
1981 off_t offset, unsigned int vprot, BOOL removable )
1983 void *ptr;
1984 int prot = get_unix_prot( vprot | VPROT_COMMITTED /* make sure it is accessible */ );
1985 unsigned int flags = MAP_FIXED | ((vprot & VPROT_WRITECOPY) ? MAP_PRIVATE : MAP_SHARED);
1987 assert( start < view->size );
1988 assert( start + size <= view->size );
1990 if (force_exec_prot && (vprot & VPROT_READ))
1992 TRACE( "forcing exec permission on mapping %p-%p\n",
1993 (char *)view->base + start, (char *)view->base + start + size - 1 );
1994 prot |= PROT_EXEC;
1997 /* only try mmap if media is not removable (or if we require write access) */
1998 if (!removable || (flags & MAP_SHARED))
2000 if (mmap( (char *)view->base + start, size, prot, flags, fd, offset ) != MAP_FAILED)
2001 goto done;
2003 switch (errno)
2005 case EINVAL: /* file offset is not page-aligned, fall back to read() */
2006 if (flags & MAP_SHARED) return STATUS_INVALID_PARAMETER;
2007 break;
2008 case ENOEXEC:
2009 case ENODEV: /* filesystem doesn't support mmap(), fall back to read() */
2010 if (vprot & VPROT_WRITE)
2012 ERR( "shared writable mmap not supported, broken filesystem?\n" );
2013 return STATUS_NOT_SUPPORTED;
2015 break;
2016 case EACCES:
2017 case EPERM: /* noexec filesystem, fall back to read() */
2018 if (flags & MAP_SHARED)
2020 if (prot & PROT_EXEC) ERR( "failed to set PROT_EXEC on file map, noexec filesystem?\n" );
2021 return STATUS_ACCESS_DENIED;
2023 if (prot & PROT_EXEC) WARN( "failed to set PROT_EXEC on file map, noexec filesystem?\n" );
2024 break;
2025 default:
2026 return STATUS_NO_MEMORY;
2030 /* Reserve the memory with an anonymous mmap */
2031 ptr = anon_mmap_fixed( (char *)view->base + start, size, PROT_READ | PROT_WRITE, 0 );
2032 if (ptr == MAP_FAILED) return STATUS_NO_MEMORY;
2033 /* Now read in the file */
2034 pread( fd, ptr, size, offset );
2035 if (prot != (PROT_READ|PROT_WRITE)) mprotect( ptr, size, prot ); /* Set the right protection */
2036 done:
2037 set_page_vprot( (char *)view->base + start, size, vprot );
2038 return STATUS_SUCCESS;
2042 /***********************************************************************
2043 * get_committed_size
2045 * Get the size of the committed range starting at base.
2046 * Also return the protections for the first page.
2048 static SIZE_T get_committed_size( struct file_view *view, void *base, BYTE *vprot )
2050 SIZE_T i, start;
2052 start = ((char *)base - (char *)view->base) >> page_shift;
2053 *vprot = get_page_vprot( base );
2055 if (view->protect & SEC_RESERVE)
2057 SIZE_T ret = 0;
2058 SERVER_START_REQ( get_mapping_committed_range )
2060 req->base = wine_server_client_ptr( view->base );
2061 req->offset = start << page_shift;
2062 if (!wine_server_call( req ))
2064 ret = reply->size;
2065 if (reply->committed)
2067 *vprot |= VPROT_COMMITTED;
2068 set_page_vprot_bits( base, ret, VPROT_COMMITTED, 0 );
2072 SERVER_END_REQ;
2073 return ret;
2075 for (i = start + 1; i < view->size >> page_shift; i++)
2076 if ((*vprot ^ get_page_vprot( (char *)view->base + (i << page_shift) )) & VPROT_COMMITTED) break;
2077 return (i - start) << page_shift;
2081 /***********************************************************************
2082 * decommit_view
2084 * Decommit some pages of a given view.
2085 * virtual_mutex must be held by caller.
2087 static NTSTATUS decommit_pages( struct file_view *view, size_t start, size_t size )
2089 if (anon_mmap_fixed( (char *)view->base + start, size, PROT_NONE, 0 ) != MAP_FAILED)
2091 set_page_vprot_bits( (char *)view->base + start, size, 0, VPROT_COMMITTED );
2092 return STATUS_SUCCESS;
2094 return STATUS_NO_MEMORY;
2098 /***********************************************************************
2099 * allocate_dos_memory
2101 * Allocate the DOS memory range.
2103 static NTSTATUS allocate_dos_memory( struct file_view **view, unsigned int vprot )
2105 size_t size;
2106 void *addr = NULL;
2107 void * const low_64k = (void *)0x10000;
2108 const size_t dosmem_size = 0x110000;
2109 int unix_prot = get_unix_prot( vprot );
2111 /* check for existing view */
2113 if (find_view_range( 0, dosmem_size )) return STATUS_CONFLICTING_ADDRESSES;
2115 /* check without the first 64K */
2117 if (mmap_is_in_reserved_area( low_64k, dosmem_size - 0x10000 ) != 1)
2119 addr = anon_mmap_tryfixed( low_64k, dosmem_size - 0x10000, unix_prot, 0 );
2120 if (addr == MAP_FAILED) return map_view( view, NULL, dosmem_size, FALSE, vprot, 0 );
2123 /* now try to allocate the low 64K too */
2125 if (mmap_is_in_reserved_area( NULL, 0x10000 ) != 1)
2127 addr = anon_mmap_tryfixed( (void *)page_size, 0x10000 - page_size, unix_prot, 0 );
2128 if (addr != MAP_FAILED)
2130 if (!anon_mmap_fixed( NULL, page_size, unix_prot, 0 ))
2132 addr = NULL;
2133 TRACE( "successfully mapped low 64K range\n" );
2135 else TRACE( "failed to map page 0\n" );
2137 else
2139 addr = low_64k;
2140 TRACE( "failed to map low 64K range\n" );
2144 /* now reserve the whole range */
2146 size = (char *)dosmem_size - (char *)addr;
2147 anon_mmap_fixed( addr, size, unix_prot, 0 );
2148 return create_view( view, addr, size, vprot );
2152 /***********************************************************************
2153 * map_pe_header
2155 * Map the header of a PE file into memory.
2157 static NTSTATUS map_pe_header( void *ptr, size_t size, int fd, BOOL *removable )
2159 if (!size) return STATUS_INVALID_IMAGE_FORMAT;
2161 if (!*removable)
2163 if (mmap( ptr, size, PROT_READ|PROT_WRITE|PROT_EXEC, MAP_FIXED|MAP_PRIVATE, fd, 0 ) != MAP_FAILED)
2164 return STATUS_SUCCESS;
2166 switch (errno)
2168 case EPERM:
2169 case EACCES:
2170 WARN( "noexec file system, falling back to read\n" );
2171 break;
2172 case ENOEXEC:
2173 case ENODEV:
2174 WARN( "file system doesn't support mmap, falling back to read\n" );
2175 break;
2176 default:
2177 return STATUS_NO_MEMORY;
2179 *removable = TRUE;
2181 pread( fd, ptr, size, 0 );
2182 return STATUS_SUCCESS; /* page protections will be updated later */
2186 /***********************************************************************
2187 * map_image_into_view
2189 * Map an executable (PE format) image into an existing view.
2190 * virtual_mutex must be held by caller.
2192 static NTSTATUS map_image_into_view( struct file_view *view, const WCHAR *filename, int fd, void *orig_base,
2193 SIZE_T header_size, ULONG image_flags, int shared_fd, BOOL removable )
2195 IMAGE_DOS_HEADER *dos;
2196 IMAGE_NT_HEADERS *nt;
2197 IMAGE_SECTION_HEADER sections[96];
2198 IMAGE_SECTION_HEADER *sec;
2199 IMAGE_DATA_DIRECTORY *imports;
2200 NTSTATUS status = STATUS_CONFLICTING_ADDRESSES;
2201 int i;
2202 off_t pos;
2203 struct stat st;
2204 char *header_end, *header_start;
2205 char *ptr = view->base;
2206 SIZE_T total_size = view->size;
2208 TRACE_(module)( "mapping PE file %s at %p-%p\n", debugstr_w(filename), ptr, ptr + total_size );
2210 /* map the header */
2212 fstat( fd, &st );
2213 header_size = min( header_size, st.st_size );
2214 if ((status = map_pe_header( view->base, header_size, fd, &removable ))) return status;
2216 status = STATUS_INVALID_IMAGE_FORMAT; /* generic error */
2217 dos = (IMAGE_DOS_HEADER *)ptr;
2218 nt = (IMAGE_NT_HEADERS *)(ptr + dos->e_lfanew);
2219 header_end = ptr + ROUND_SIZE( 0, header_size );
2220 memset( ptr + header_size, 0, header_end - (ptr + header_size) );
2221 if ((char *)(nt + 1) > header_end) return status;
2222 header_start = (char*)&nt->OptionalHeader+nt->FileHeader.SizeOfOptionalHeader;
2223 if (nt->FileHeader.NumberOfSections > ARRAY_SIZE( sections )) return status;
2224 if (header_start + sizeof(*sections) * nt->FileHeader.NumberOfSections > header_end) return status;
2225 /* Some applications (e.g. the Steam version of Borderlands) map over the top of the section headers,
2226 * copying the headers into local memory is necessary to properly load such applications. */
2227 memcpy(sections, header_start, sizeof(*sections) * nt->FileHeader.NumberOfSections);
2228 sec = sections;
2230 imports = nt->OptionalHeader.DataDirectory + IMAGE_DIRECTORY_ENTRY_IMPORT;
2231 if (!imports->Size || !imports->VirtualAddress) imports = NULL;
2233 /* check for non page-aligned binary */
2235 if (image_flags & IMAGE_FLAGS_ImageMappedFlat)
2237 /* unaligned sections, this happens for native subsystem binaries */
2238 /* in that case Windows simply maps in the whole file */
2240 total_size = min( total_size, ROUND_SIZE( 0, st.st_size ));
2241 if (map_file_into_view( view, fd, 0, total_size, 0, VPROT_COMMITTED | VPROT_READ | VPROT_WRITECOPY,
2242 removable ) != STATUS_SUCCESS) return status;
2244 /* check that all sections are loaded at the right offset */
2245 if (nt->OptionalHeader.FileAlignment != nt->OptionalHeader.SectionAlignment) return status;
2246 for (i = 0; i < nt->FileHeader.NumberOfSections; i++)
2248 if (sec[i].VirtualAddress != sec[i].PointerToRawData)
2249 return status; /* Windows refuses to load in that case too */
2252 /* set the image protections */
2253 set_vprot( view, ptr, total_size, VPROT_COMMITTED | VPROT_READ | VPROT_WRITECOPY | VPROT_EXEC );
2255 /* no relocations are performed on non page-aligned binaries */
2256 return STATUS_SUCCESS;
2260 /* map all the sections */
2262 for (i = pos = 0; i < nt->FileHeader.NumberOfSections; i++, sec++)
2264 static const SIZE_T sector_align = 0x1ff;
2265 SIZE_T map_size, file_start, file_size, end;
2267 if (!sec->Misc.VirtualSize)
2268 map_size = ROUND_SIZE( 0, sec->SizeOfRawData );
2269 else
2270 map_size = ROUND_SIZE( 0, sec->Misc.VirtualSize );
2272 /* file positions are rounded to sector boundaries regardless of OptionalHeader.FileAlignment */
2273 file_start = sec->PointerToRawData & ~sector_align;
2274 file_size = (sec->SizeOfRawData + (sec->PointerToRawData & sector_align) + sector_align) & ~sector_align;
2275 if (file_size > map_size) file_size = map_size;
2277 /* a few sanity checks */
2278 end = sec->VirtualAddress + ROUND_SIZE( sec->VirtualAddress, map_size );
2279 if (sec->VirtualAddress > total_size || end > total_size || end < sec->VirtualAddress)
2281 WARN_(module)( "%s section %.8s too large (%x+%lx/%lx)\n",
2282 debugstr_w(filename), sec->Name, sec->VirtualAddress, map_size, total_size );
2283 return status;
2286 if ((sec->Characteristics & IMAGE_SCN_MEM_SHARED) &&
2287 (sec->Characteristics & IMAGE_SCN_MEM_WRITE))
2289 TRACE_(module)( "%s mapping shared section %.8s at %p off %x (%x) size %lx (%lx) flags %x\n",
2290 debugstr_w(filename), sec->Name, ptr + sec->VirtualAddress,
2291 sec->PointerToRawData, (int)pos, file_size, map_size,
2292 sec->Characteristics );
2293 if (map_file_into_view( view, shared_fd, sec->VirtualAddress, map_size, pos,
2294 VPROT_COMMITTED | VPROT_READ | VPROT_WRITE, FALSE ) != STATUS_SUCCESS)
2296 ERR_(module)( "Could not map %s shared section %.8s\n", debugstr_w(filename), sec->Name );
2297 return status;
2300 /* check if the import directory falls inside this section */
2301 if (imports && imports->VirtualAddress >= sec->VirtualAddress &&
2302 imports->VirtualAddress < sec->VirtualAddress + map_size)
2304 UINT_PTR base = imports->VirtualAddress & ~page_mask;
2305 UINT_PTR end = base + ROUND_SIZE( imports->VirtualAddress, imports->Size );
2306 if (end > sec->VirtualAddress + map_size) end = sec->VirtualAddress + map_size;
2307 if (end > base)
2308 map_file_into_view( view, shared_fd, base, end - base,
2309 pos + (base - sec->VirtualAddress),
2310 VPROT_COMMITTED | VPROT_READ | VPROT_WRITECOPY, FALSE );
2312 pos += map_size;
2313 continue;
2316 TRACE_(module)( "mapping %s section %.8s at %p off %x size %x virt %x flags %x\n",
2317 debugstr_w(filename), sec->Name, ptr + sec->VirtualAddress,
2318 sec->PointerToRawData, sec->SizeOfRawData,
2319 sec->Misc.VirtualSize, sec->Characteristics );
2321 if (!sec->PointerToRawData || !file_size) continue;
2323 /* Note: if the section is not aligned properly map_file_into_view will magically
2324 * fall back to read(), so we don't need to check anything here.
2326 end = file_start + file_size;
2327 if (sec->PointerToRawData >= st.st_size ||
2328 end > ((st.st_size + sector_align) & ~sector_align) ||
2329 end < file_start ||
2330 map_file_into_view( view, fd, sec->VirtualAddress, file_size, file_start,
2331 VPROT_COMMITTED | VPROT_READ | VPROT_WRITECOPY,
2332 removable ) != STATUS_SUCCESS)
2334 ERR_(module)( "Could not map %s section %.8s, file probably truncated\n",
2335 debugstr_w(filename), sec->Name );
2336 return status;
2339 if (file_size & page_mask)
2341 end = ROUND_SIZE( 0, file_size );
2342 if (end > map_size) end = map_size;
2343 TRACE_(module)("clearing %p - %p\n",
2344 ptr + sec->VirtualAddress + file_size,
2345 ptr + sec->VirtualAddress + end );
2346 memset( ptr + sec->VirtualAddress + file_size, 0, end - file_size );
2350 /* set the image protections */
2352 set_vprot( view, ptr, ROUND_SIZE( 0, header_size ), VPROT_COMMITTED | VPROT_READ );
2354 sec = sections;
2355 for (i = 0; i < nt->FileHeader.NumberOfSections; i++, sec++)
2357 SIZE_T size;
2358 BYTE vprot = VPROT_COMMITTED;
2360 if (sec->Misc.VirtualSize)
2361 size = ROUND_SIZE( sec->VirtualAddress, sec->Misc.VirtualSize );
2362 else
2363 size = ROUND_SIZE( sec->VirtualAddress, sec->SizeOfRawData );
2365 if (sec->Characteristics & IMAGE_SCN_MEM_READ) vprot |= VPROT_READ;
2366 if (sec->Characteristics & IMAGE_SCN_MEM_WRITE) vprot |= VPROT_WRITECOPY;
2367 if (sec->Characteristics & IMAGE_SCN_MEM_EXECUTE) vprot |= VPROT_EXEC;
2369 /* Dumb game crack lets the AOEP point into a data section. Adjust. */
2370 if ((nt->OptionalHeader.AddressOfEntryPoint >= sec->VirtualAddress) &&
2371 (nt->OptionalHeader.AddressOfEntryPoint < sec->VirtualAddress + size))
2372 vprot |= VPROT_EXEC;
2374 if (!set_vprot( view, ptr + sec->VirtualAddress, size, vprot ) && (vprot & VPROT_EXEC))
2375 ERR( "failed to set %08x protection on %s section %.8s, noexec filesystem?\n",
2376 sec->Characteristics, debugstr_w(filename), sec->Name );
2379 #ifdef VALGRIND_LOAD_PDB_DEBUGINFO
2380 VALGRIND_LOAD_PDB_DEBUGINFO(fd, ptr, total_size, ptr - (char *)orig_base);
2381 #endif
2382 return STATUS_SUCCESS;
2386 /***********************************************************************
2387 * get_mapping_info
2389 static NTSTATUS get_mapping_info( HANDLE handle, ACCESS_MASK access, unsigned int *sec_flags,
2390 mem_size_t *full_size, HANDLE *shared_file, pe_image_info_t **info )
2392 pe_image_info_t *image_info;
2393 SIZE_T total, size = 1024;
2394 NTSTATUS status;
2396 for (;;)
2398 if (!(image_info = malloc( size ))) return STATUS_NO_MEMORY;
2400 SERVER_START_REQ( get_mapping_info )
2402 req->handle = wine_server_obj_handle( handle );
2403 req->access = access;
2404 wine_server_set_reply( req, image_info, size );
2405 status = wine_server_call( req );
2406 *sec_flags = reply->flags;
2407 *full_size = reply->size;
2408 total = reply->total;
2409 *shared_file = wine_server_ptr_handle( reply->shared_file );
2411 SERVER_END_REQ;
2412 if (!status && total <= size - sizeof(WCHAR)) break;
2413 free( image_info );
2414 if (status) return status;
2415 if (*shared_file) NtClose( *shared_file );
2416 size = total + sizeof(WCHAR);
2419 if (total)
2421 WCHAR *filename = (WCHAR *)(image_info + 1);
2423 assert( total >= sizeof(*image_info) );
2424 total -= sizeof(*image_info);
2425 filename[total / sizeof(WCHAR)] = 0;
2426 *info = image_info;
2428 else free( image_info );
2430 return STATUS_SUCCESS;
2434 /***********************************************************************
2435 * virtual_map_image
2437 * Map a PE image section into memory.
2439 static NTSTATUS virtual_map_image( HANDLE mapping, ACCESS_MASK access, void **addr_ptr, SIZE_T *size_ptr,
2440 ULONG_PTR zero_bits, HANDLE shared_file, ULONG alloc_type,
2441 pe_image_info_t *image_info, WCHAR *filename, BOOL is_builtin )
2443 unsigned int vprot = SEC_IMAGE | SEC_FILE | VPROT_COMMITTED | VPROT_READ | VPROT_EXEC | VPROT_WRITECOPY;
2444 int unix_fd = -1, needs_close;
2445 int shared_fd = -1, shared_needs_close = 0;
2446 SIZE_T size = image_info->map_size;
2447 struct file_view *view;
2448 NTSTATUS status;
2449 sigset_t sigset;
2450 void *base;
2452 if ((status = server_get_unix_fd( mapping, 0, &unix_fd, &needs_close, NULL, NULL )))
2453 return status;
2455 if (shared_file && ((status = server_get_unix_fd( shared_file, FILE_READ_DATA|FILE_WRITE_DATA,
2456 &shared_fd, &shared_needs_close, NULL, NULL ))))
2458 if (needs_close) close( unix_fd );
2459 return status;
2462 status = STATUS_INVALID_PARAMETER;
2463 server_enter_uninterrupted_section( &virtual_mutex, &sigset );
2465 base = wine_server_get_ptr( image_info->base );
2466 if ((ULONG_PTR)base != image_info->base) base = NULL;
2468 if ((char *)base >= (char *)address_space_start) /* make sure the DOS area remains free */
2469 status = map_view( &view, base, size, alloc_type & MEM_TOP_DOWN, vprot, zero_bits );
2471 if (status) status = map_view( &view, NULL, size, alloc_type & MEM_TOP_DOWN, vprot, zero_bits );
2472 if (status) goto done;
2474 status = map_image_into_view( view, filename, unix_fd, base, image_info->header_size,
2475 image_info->image_flags, shared_fd, needs_close );
2476 if (status == STATUS_SUCCESS)
2478 SERVER_START_REQ( map_view )
2480 req->mapping = wine_server_obj_handle( mapping );
2481 req->access = access;
2482 req->base = wine_server_client_ptr( view->base );
2483 req->size = size;
2484 status = wine_server_call( req );
2486 SERVER_END_REQ;
2488 if (status >= 0)
2490 if (is_builtin) add_builtin_module( view->base, NULL );
2491 *addr_ptr = view->base;
2492 *size_ptr = size;
2493 VIRTUAL_DEBUG_DUMP_VIEW( view );
2495 else delete_view( view );
2497 done:
2498 server_leave_uninterrupted_section( &virtual_mutex, &sigset );
2499 if (needs_close) close( unix_fd );
2500 if (shared_needs_close) close( shared_fd );
2501 return status;
2505 /***********************************************************************
2506 * virtual_map_section
2508 * Map a file section into memory.
2510 static NTSTATUS virtual_map_section( HANDLE handle, PVOID *addr_ptr, ULONG_PTR zero_bits,
2511 SIZE_T commit_size, const LARGE_INTEGER *offset_ptr, SIZE_T *size_ptr,
2512 ULONG alloc_type, ULONG protect )
2514 NTSTATUS res;
2515 mem_size_t full_size;
2516 ACCESS_MASK access;
2517 SIZE_T size;
2518 pe_image_info_t *image_info = NULL;
2519 WCHAR *filename;
2520 void *base;
2521 int unix_handle = -1, needs_close;
2522 unsigned int vprot, sec_flags;
2523 struct file_view *view;
2524 HANDLE shared_file;
2525 LARGE_INTEGER offset;
2526 sigset_t sigset;
2528 switch(protect)
2530 case PAGE_NOACCESS:
2531 case PAGE_READONLY:
2532 case PAGE_WRITECOPY:
2533 access = SECTION_MAP_READ;
2534 break;
2535 case PAGE_READWRITE:
2536 access = SECTION_MAP_WRITE;
2537 break;
2538 case PAGE_EXECUTE:
2539 case PAGE_EXECUTE_READ:
2540 case PAGE_EXECUTE_WRITECOPY:
2541 access = SECTION_MAP_READ | SECTION_MAP_EXECUTE;
2542 break;
2543 case PAGE_EXECUTE_READWRITE:
2544 access = SECTION_MAP_WRITE | SECTION_MAP_EXECUTE;
2545 break;
2546 default:
2547 return STATUS_INVALID_PAGE_PROTECTION;
2550 res = get_mapping_info( handle, access, &sec_flags, &full_size, &shared_file, &image_info );
2551 if (res) return res;
2553 if (image_info)
2555 filename = (WCHAR *)(image_info + 1);
2556 /* check if we can replace that mapping with the builtin */
2557 res = load_builtin( image_info, filename, addr_ptr, size_ptr );
2558 if (res == STATUS_IMAGE_ALREADY_LOADED)
2559 res = virtual_map_image( handle, access, addr_ptr, size_ptr, zero_bits, shared_file,
2560 alloc_type, image_info, filename, FALSE );
2561 if (shared_file) NtClose( shared_file );
2562 free( image_info );
2563 return res;
2566 base = *addr_ptr;
2567 offset.QuadPart = offset_ptr ? offset_ptr->QuadPart : 0;
2568 if (offset.QuadPart >= full_size) return STATUS_INVALID_PARAMETER;
2569 if (*size_ptr)
2571 size = *size_ptr;
2572 if (size > full_size - offset.QuadPart) return STATUS_INVALID_VIEW_SIZE;
2574 else
2576 size = full_size - offset.QuadPart;
2577 if (size != full_size - offset.QuadPart) /* truncated */
2579 WARN( "Files larger than 4Gb (%s) not supported on this platform\n",
2580 wine_dbgstr_longlong(full_size) );
2581 return STATUS_INVALID_PARAMETER;
2584 if (!(size = ROUND_SIZE( 0, size ))) return STATUS_INVALID_PARAMETER; /* wrap-around */
2586 get_vprot_flags( protect, &vprot, FALSE );
2587 vprot |= sec_flags;
2588 if (!(sec_flags & SEC_RESERVE)) vprot |= VPROT_COMMITTED;
2590 if ((res = server_get_unix_fd( handle, 0, &unix_handle, &needs_close, NULL, NULL ))) return res;
2592 server_enter_uninterrupted_section( &virtual_mutex, &sigset );
2594 res = map_view( &view, base, size, alloc_type & MEM_TOP_DOWN, vprot, zero_bits );
2595 if (res) goto done;
2597 TRACE( "handle=%p size=%lx offset=%x%08x\n", handle, size, offset.u.HighPart, offset.u.LowPart );
2598 res = map_file_into_view( view, unix_handle, 0, size, offset.QuadPart, vprot, needs_close );
2599 if (res == STATUS_SUCCESS)
2601 SERVER_START_REQ( map_view )
2603 req->mapping = wine_server_obj_handle( handle );
2604 req->access = access;
2605 req->base = wine_server_client_ptr( view->base );
2606 req->size = size;
2607 req->start = offset.QuadPart;
2608 res = wine_server_call( req );
2610 SERVER_END_REQ;
2612 else ERR( "mapping %p %lx %x%08x failed\n", view->base, size, offset.u.HighPart, offset.u.LowPart );
2614 if (res >= 0)
2616 *addr_ptr = view->base;
2617 *size_ptr = size;
2618 VIRTUAL_DEBUG_DUMP_VIEW( view );
2620 else delete_view( view );
2622 done:
2623 server_leave_uninterrupted_section( &virtual_mutex, &sigset );
2624 if (needs_close) close( unix_handle );
2625 return res;
2629 struct alloc_virtual_heap
2631 void *base;
2632 size_t size;
2635 /* callback for mmap_enum_reserved_areas to allocate space for the virtual heap */
2636 static int alloc_virtual_heap( void *base, SIZE_T size, void *arg )
2638 struct alloc_virtual_heap *alloc = arg;
2639 void *end = (char *)base + size;
2641 if (is_beyond_limit( base, size, address_space_limit )) address_space_limit = (char *)base + size;
2642 if (is_win64 && base < (void *)0x80000000) return 0;
2643 if (preload_reserve_end >= end)
2645 if (preload_reserve_start <= base) return 0; /* no space in that area */
2646 if (preload_reserve_start < end) end = preload_reserve_start;
2648 else if (preload_reserve_end > base)
2650 if (preload_reserve_start <= base) base = preload_reserve_end;
2651 else if ((char *)end - (char *)preload_reserve_end >= alloc->size) base = preload_reserve_end;
2652 else end = preload_reserve_start;
2654 if ((char *)end - (char *)base < alloc->size) return 0;
2655 alloc->base = anon_mmap_fixed( (char *)end - alloc->size, alloc->size, PROT_READ|PROT_WRITE, 0 );
2656 return (alloc->base != MAP_FAILED);
2659 /***********************************************************************
2660 * virtual_init
2662 void virtual_init(void)
2664 const struct preload_info **preload_info = dlsym( RTLD_DEFAULT, "wine_main_preload_info" );
2665 const char *preload = getenv( "WINEPRELOADRESERVE" );
2666 struct alloc_virtual_heap alloc_views;
2667 size_t size;
2668 int i;
2669 pthread_mutexattr_t attr;
2671 pthread_mutexattr_init( &attr );
2672 pthread_mutexattr_settype( &attr, PTHREAD_MUTEX_RECURSIVE );
2673 pthread_mutex_init( &virtual_mutex, &attr );
2674 pthread_mutexattr_destroy( &attr );
2676 if (preload_info && *preload_info)
2677 for (i = 0; (*preload_info)[i].size; i++)
2678 mmap_add_reserved_area( (*preload_info)[i].addr, (*preload_info)[i].size );
2680 mmap_init( preload_info ? *preload_info : NULL );
2682 if ((preload = getenv("WINEPRELOADRESERVE")))
2684 unsigned long start, end;
2685 if (sscanf( preload, "%lx-%lx", &start, &end ) == 2)
2687 preload_reserve_start = (void *)start;
2688 preload_reserve_end = (void *)end;
2689 /* some apps start inside the DOS area */
2690 if (preload_reserve_start)
2691 address_space_start = min( address_space_start, preload_reserve_start );
2695 /* try to find space in a reserved area for the views and pages protection table */
2696 #ifdef _WIN64
2697 pages_vprot_size = ((size_t)address_space_limit >> page_shift >> pages_vprot_shift) + 1;
2698 alloc_views.size = 2 * view_block_size + pages_vprot_size * sizeof(*pages_vprot);
2699 #else
2700 alloc_views.size = 2 * view_block_size + (1U << (32 - page_shift));
2701 #endif
2702 if (mmap_enum_reserved_areas( alloc_virtual_heap, &alloc_views, 1 ))
2703 mmap_remove_reserved_area( alloc_views.base, alloc_views.size );
2704 else
2705 alloc_views.base = anon_mmap_alloc( alloc_views.size, PROT_READ | PROT_WRITE );
2707 assert( alloc_views.base != MAP_FAILED );
2708 view_block_start = alloc_views.base;
2709 view_block_end = view_block_start + view_block_size / sizeof(*view_block_start);
2710 free_ranges = (void *)((char *)alloc_views.base + view_block_size);
2711 pages_vprot = (void *)((char *)alloc_views.base + 2 * view_block_size);
2712 wine_rb_init( &views_tree, compare_view );
2714 free_ranges[0].base = (void *)0;
2715 free_ranges[0].end = (void *)~0;
2716 free_ranges_end = free_ranges + 1;
2718 /* make the DOS area accessible (except the low 64K) to hide bugs in broken apps like Excel 2003 */
2719 size = (char *)address_space_start - (char *)0x10000;
2720 if (size && mmap_is_in_reserved_area( (void*)0x10000, size ) == 1)
2721 anon_mmap_fixed( (void *)0x10000, size, PROT_READ | PROT_WRITE, 0 );
2725 /***********************************************************************
2726 * get_system_affinity_mask
2728 ULONG_PTR get_system_affinity_mask(void)
2730 ULONG num_cpus = peb->NumberOfProcessors;
2731 if (num_cpus >= sizeof(ULONG_PTR) * 8) return ~(ULONG_PTR)0;
2732 return ((ULONG_PTR)1 << num_cpus) - 1;
2735 /***********************************************************************
2736 * virtual_get_system_info
2738 void virtual_get_system_info( SYSTEM_BASIC_INFORMATION *info, BOOL wow64 )
2740 #if defined(HAVE_SYSINFO) \
2741 && defined(HAVE_STRUCT_SYSINFO_TOTALRAM) && defined(HAVE_STRUCT_SYSINFO_MEM_UNIT)
2742 struct sysinfo sinfo;
2744 if (!sysinfo(&sinfo))
2746 ULONG64 total = (ULONG64)sinfo.totalram * sinfo.mem_unit;
2747 info->MmHighestPhysicalPage = max(1, total / page_size);
2749 #elif defined(_SC_PHYS_PAGES)
2750 LONG64 phys_pages = sysconf( _SC_PHYS_PAGES );
2752 info->MmHighestPhysicalPage = max(1, phys_pages);
2753 #else
2754 info->MmHighestPhysicalPage = 0x7fffffff / page_size;
2755 #endif
2757 info->unknown = 0;
2758 info->KeMaximumIncrement = 0; /* FIXME */
2759 info->PageSize = page_size;
2760 info->MmLowestPhysicalPage = 1;
2761 info->MmNumberOfPhysicalPages = info->MmHighestPhysicalPage - info->MmLowestPhysicalPage;
2762 info->AllocationGranularity = granularity_mask + 1;
2763 info->LowestUserAddress = (void *)0x10000;
2764 info->ActiveProcessorsAffinityMask = get_system_affinity_mask();
2765 info->NumberOfProcessors = peb->NumberOfProcessors;
2766 if (wow64) info->HighestUserAddress = (char *)get_wow_user_space_limit() - 1;
2767 else info->HighestUserAddress = (char *)user_space_limit - 1;
2771 /***********************************************************************
2772 * virtual_map_builtin_module
2774 NTSTATUS virtual_map_builtin_module( HANDLE mapping, void **module, SIZE_T *size,
2775 SECTION_IMAGE_INFORMATION *info, WORD machine, BOOL prefer_native )
2777 mem_size_t full_size;
2778 unsigned int sec_flags;
2779 HANDLE shared_file;
2780 pe_image_info_t *image_info = NULL;
2781 ACCESS_MASK access = SECTION_MAP_READ | SECTION_MAP_EXECUTE;
2782 NTSTATUS status;
2783 WCHAR *filename;
2785 if ((status = get_mapping_info( mapping, access, &sec_flags, &full_size, &shared_file, &image_info )))
2786 return status;
2788 if (!image_info) return STATUS_INVALID_PARAMETER;
2790 *module = NULL;
2791 *size = 0;
2792 filename = (WCHAR *)(image_info + 1);
2794 if (!(image_info->image_flags & IMAGE_FLAGS_WineBuiltin)) /* ignore non-builtins */
2796 WARN( "%s found in WINEDLLPATH but not a builtin, ignoring\n", debugstr_w(filename) );
2797 status = STATUS_DLL_NOT_FOUND;
2799 else if (machine && image_info->machine != machine)
2801 TRACE( "%s is for arch %04x, continuing search\n", debugstr_w(filename), image_info->machine );
2802 status = STATUS_IMAGE_MACHINE_TYPE_MISMATCH;
2804 else if (prefer_native && (image_info->dll_charact & IMAGE_DLLCHARACTERISTICS_PREFER_NATIVE))
2806 TRACE( "%s has prefer-native flag, ignoring builtin\n", debugstr_w(filename) );
2807 status = STATUS_IMAGE_ALREADY_LOADED;
2809 else
2811 status = virtual_map_image( mapping, SECTION_MAP_READ | SECTION_MAP_EXECUTE,
2812 module, size, 0, shared_file, 0, image_info, filename, TRUE );
2813 virtual_fill_image_information( image_info, info );
2816 if (shared_file) NtClose( shared_file );
2817 free( image_info );
2818 return status;
2822 /***********************************************************************
2823 * virtual_create_builtin_view
2825 NTSTATUS virtual_create_builtin_view( void *module, const UNICODE_STRING *nt_name,
2826 pe_image_info_t *info, void *so_handle )
2828 NTSTATUS status;
2829 sigset_t sigset;
2830 IMAGE_DOS_HEADER *dos = module;
2831 IMAGE_NT_HEADERS *nt = (IMAGE_NT_HEADERS *)((char *)dos + dos->e_lfanew);
2832 SIZE_T size = info->map_size;
2833 IMAGE_SECTION_HEADER *sec;
2834 struct file_view *view;
2835 void *base = wine_server_get_ptr( info->base );
2836 int i;
2838 server_enter_uninterrupted_section( &virtual_mutex, &sigset );
2839 status = create_view( &view, base, size, SEC_IMAGE | SEC_FILE | VPROT_SYSTEM |
2840 VPROT_COMMITTED | VPROT_READ | VPROT_WRITECOPY | VPROT_EXEC );
2841 if (!status)
2843 TRACE( "created %p-%p for %s\n", base, (char *)base + size, debugstr_us(nt_name) );
2845 /* The PE header is always read-only, no write, no execute. */
2846 set_page_vprot( base, page_size, VPROT_COMMITTED | VPROT_READ );
2848 sec = (IMAGE_SECTION_HEADER *)((char *)&nt->OptionalHeader + nt->FileHeader.SizeOfOptionalHeader);
2849 for (i = 0; i < nt->FileHeader.NumberOfSections; i++)
2851 BYTE flags = VPROT_COMMITTED;
2853 if (sec[i].Characteristics & IMAGE_SCN_MEM_EXECUTE) flags |= VPROT_EXEC;
2854 if (sec[i].Characteristics & IMAGE_SCN_MEM_READ) flags |= VPROT_READ;
2855 if (sec[i].Characteristics & IMAGE_SCN_MEM_WRITE) flags |= VPROT_WRITE;
2856 set_page_vprot( (char *)base + sec[i].VirtualAddress, sec[i].Misc.VirtualSize, flags );
2859 SERVER_START_REQ( map_view )
2861 req->base = wine_server_client_ptr( view->base );
2862 req->size = size;
2863 wine_server_add_data( req, info, sizeof(*info) );
2864 wine_server_add_data( req, nt_name->Buffer, nt_name->Length );
2865 status = wine_server_call( req );
2867 SERVER_END_REQ;
2869 if (status >= 0)
2871 add_builtin_module( view->base, so_handle );
2872 VIRTUAL_DEBUG_DUMP_VIEW( view );
2873 if (is_beyond_limit( base, size, working_set_limit )) working_set_limit = address_space_limit;
2875 else delete_view( view );
2877 server_leave_uninterrupted_section( &virtual_mutex, &sigset );
2879 return status;
2883 /* set some initial values in a new TEB */
2884 static TEB *init_teb( void *ptr, BOOL is_wow )
2886 struct ntdll_thread_data *thread_data;
2887 TEB *teb;
2888 TEB64 *teb64 = ptr;
2889 TEB32 *teb32 = (TEB32 *)((char *)ptr + teb_offset);
2891 #ifdef _WIN64
2892 teb = (TEB *)teb64;
2893 teb32->Peb = PtrToUlong( (char *)peb + page_size );
2894 teb32->Tib.Self = PtrToUlong( teb32 );
2895 teb32->Tib.ExceptionList = ~0u;
2896 teb32->ActivationContextStackPointer = PtrToUlong( &teb32->ActivationContextStack );
2897 teb32->ActivationContextStack.FrameListCache.Flink =
2898 teb32->ActivationContextStack.FrameListCache.Blink =
2899 PtrToUlong( &teb32->ActivationContextStack.FrameListCache );
2900 teb32->StaticUnicodeString.Buffer = PtrToUlong( teb32->StaticUnicodeBuffer );
2901 teb32->StaticUnicodeString.MaximumLength = sizeof( teb32->StaticUnicodeBuffer );
2902 teb32->GdiBatchCount = PtrToUlong( teb64 );
2903 teb32->WowTebOffset = -teb_offset;
2904 if (is_wow) teb64->WowTebOffset = teb_offset;
2905 #else
2906 teb = (TEB *)teb32;
2907 teb64->Peb = PtrToUlong( (char *)peb - page_size );
2908 teb64->Tib.Self = PtrToUlong( teb64 );
2909 teb64->Tib.ExceptionList = PtrToUlong( teb32 );
2910 teb64->ActivationContextStackPointer = PtrToUlong( &teb64->ActivationContextStack );
2911 teb64->ActivationContextStack.FrameListCache.Flink =
2912 teb64->ActivationContextStack.FrameListCache.Blink =
2913 PtrToUlong( &teb64->ActivationContextStack.FrameListCache );
2914 teb64->StaticUnicodeString.Buffer = PtrToUlong( teb64->StaticUnicodeBuffer );
2915 teb64->StaticUnicodeString.MaximumLength = sizeof( teb64->StaticUnicodeBuffer );
2916 teb64->WowTebOffset = teb_offset;
2917 if (is_wow)
2919 teb32->GdiBatchCount = PtrToUlong( teb64 );
2920 teb32->WowTebOffset = -teb_offset;
2922 #endif
2923 teb->Peb = peb;
2924 teb->Tib.Self = &teb->Tib;
2925 teb->Tib.ExceptionList = (void *)~0ul;
2926 teb->Tib.StackBase = (void *)~0ul;
2927 teb->ActivationContextStackPointer = &teb->ActivationContextStack;
2928 InitializeListHead( &teb->ActivationContextStack.FrameListCache );
2929 teb->StaticUnicodeString.Buffer = teb->StaticUnicodeBuffer;
2930 teb->StaticUnicodeString.MaximumLength = sizeof(teb->StaticUnicodeBuffer);
2931 thread_data = (struct ntdll_thread_data *)&teb->GdiTebBatch;
2932 thread_data->request_fd = -1;
2933 thread_data->reply_fd = -1;
2934 thread_data->wait_fd[0] = -1;
2935 thread_data->wait_fd[1] = -1;
2936 list_add_head( &teb_list, &thread_data->entry );
2937 return teb;
2941 /***********************************************************************
2942 * virtual_alloc_first_teb
2944 TEB *virtual_alloc_first_teb(void)
2946 void *ptr;
2947 NTSTATUS status;
2948 SIZE_T data_size = page_size;
2949 SIZE_T block_size = signal_stack_mask + 1;
2950 SIZE_T total = 32 * block_size;
2952 /* reserve space for shared user data */
2953 status = NtAllocateVirtualMemory( NtCurrentProcess(), (void **)&user_shared_data, 0, &data_size,
2954 MEM_RESERVE | MEM_COMMIT, PAGE_READONLY );
2955 if (status)
2957 ERR( "wine: failed to map the shared user data: %08x\n", status );
2958 exit(1);
2961 NtAllocateVirtualMemory( NtCurrentProcess(), &teb_block, is_win64 ? 0x7fffffff : 0, &total,
2962 MEM_RESERVE | MEM_TOP_DOWN, PAGE_READWRITE );
2963 teb_block_pos = 30;
2964 ptr = (char *)teb_block + 30 * block_size;
2965 data_size = 2 * block_size;
2966 NtAllocateVirtualMemory( NtCurrentProcess(), (void **)&ptr, 0, &data_size, MEM_COMMIT, PAGE_READWRITE );
2967 peb = (PEB *)((char *)teb_block + 31 * block_size + (is_win64 ? 0 : page_size));
2968 return init_teb( ptr, FALSE );
2972 /***********************************************************************
2973 * virtual_alloc_teb
2975 NTSTATUS virtual_alloc_teb( TEB **ret_teb )
2977 sigset_t sigset;
2978 TEB *teb;
2979 void *ptr = NULL;
2980 NTSTATUS status = STATUS_SUCCESS;
2981 SIZE_T block_size = signal_stack_mask + 1;
2983 server_enter_uninterrupted_section( &virtual_mutex, &sigset );
2984 if (next_free_teb)
2986 ptr = next_free_teb;
2987 next_free_teb = *(void **)ptr;
2988 memset( ptr, 0, teb_size );
2990 else
2992 if (!teb_block_pos)
2994 SIZE_T total = 32 * block_size;
2996 if ((status = NtAllocateVirtualMemory( NtCurrentProcess(), &ptr, is_win64 ? 0x7fffffff : 0,
2997 &total, MEM_RESERVE, PAGE_READWRITE )))
2999 server_leave_uninterrupted_section( &virtual_mutex, &sigset );
3000 return status;
3002 teb_block = ptr;
3003 teb_block_pos = 32;
3005 ptr = ((char *)teb_block + --teb_block_pos * block_size);
3006 NtAllocateVirtualMemory( NtCurrentProcess(), (void **)&ptr, 0, &block_size,
3007 MEM_COMMIT, PAGE_READWRITE );
3009 *ret_teb = teb = init_teb( ptr, !!NtCurrentTeb()->WowTebOffset );
3010 server_leave_uninterrupted_section( &virtual_mutex, &sigset );
3012 if ((status = signal_alloc_thread( teb )))
3014 server_enter_uninterrupted_section( &virtual_mutex, &sigset );
3015 *(void **)ptr = next_free_teb;
3016 next_free_teb = ptr;
3017 server_leave_uninterrupted_section( &virtual_mutex, &sigset );
3019 return status;
3023 /***********************************************************************
3024 * virtual_free_teb
3026 void virtual_free_teb( TEB *teb )
3028 struct ntdll_thread_data *thread_data = (struct ntdll_thread_data *)&teb->GdiTebBatch;
3029 void *ptr;
3030 SIZE_T size;
3031 sigset_t sigset;
3032 WOW_TEB *wow_teb = get_wow_teb( teb );
3034 signal_free_thread( teb );
3035 if (teb->DeallocationStack)
3037 size = 0;
3038 NtFreeVirtualMemory( GetCurrentProcess(), &teb->DeallocationStack, &size, MEM_RELEASE );
3040 if (thread_data->kernel_stack)
3042 size = 0;
3043 NtFreeVirtualMemory( GetCurrentProcess(), &thread_data->kernel_stack, &size, MEM_RELEASE );
3045 if (wow_teb && (ptr = ULongToPtr( wow_teb->DeallocationStack )))
3047 size = 0;
3048 NtFreeVirtualMemory( GetCurrentProcess(), &ptr, &size, MEM_RELEASE );
3051 server_enter_uninterrupted_section( &virtual_mutex, &sigset );
3052 list_remove( &thread_data->entry );
3053 ptr = teb;
3054 if (!is_win64) ptr = (char *)ptr - teb_offset;
3055 *(void **)ptr = next_free_teb;
3056 next_free_teb = ptr;
3057 server_leave_uninterrupted_section( &virtual_mutex, &sigset );
3061 /***********************************************************************
3062 * virtual_clear_tls_index
3064 NTSTATUS virtual_clear_tls_index( ULONG index )
3066 struct ntdll_thread_data *thread_data;
3067 sigset_t sigset;
3069 if (index < TLS_MINIMUM_AVAILABLE)
3071 server_enter_uninterrupted_section( &virtual_mutex, &sigset );
3072 LIST_FOR_EACH_ENTRY( thread_data, &teb_list, struct ntdll_thread_data, entry )
3074 TEB *teb = CONTAINING_RECORD( thread_data, TEB, GdiTebBatch );
3075 #ifdef _WIN64
3076 WOW_TEB *wow_teb = get_wow_teb( teb );
3077 if (wow_teb) wow_teb->TlsSlots[index] = 0;
3078 else
3079 #endif
3080 teb->TlsSlots[index] = 0;
3082 server_leave_uninterrupted_section( &virtual_mutex, &sigset );
3084 else
3086 index -= TLS_MINIMUM_AVAILABLE;
3087 if (index >= 8 * sizeof(peb->TlsExpansionBitmapBits)) return STATUS_INVALID_PARAMETER;
3089 server_enter_uninterrupted_section( &virtual_mutex, &sigset );
3090 LIST_FOR_EACH_ENTRY( thread_data, &teb_list, struct ntdll_thread_data, entry )
3092 TEB *teb = CONTAINING_RECORD( thread_data, TEB, GdiTebBatch );
3093 #ifdef _WIN64
3094 WOW_TEB *wow_teb = get_wow_teb( teb );
3095 if (wow_teb)
3097 if (wow_teb->TlsExpansionSlots)
3098 ((ULONG *)ULongToPtr( wow_teb->TlsExpansionSlots ))[index] = 0;
3100 else
3101 #endif
3102 if (teb->TlsExpansionSlots) teb->TlsExpansionSlots[index] = 0;
3104 server_leave_uninterrupted_section( &virtual_mutex, &sigset );
3106 return STATUS_SUCCESS;
3110 /***********************************************************************
3111 * virtual_alloc_thread_stack
3113 NTSTATUS virtual_alloc_thread_stack( INITIAL_TEB *stack, ULONG_PTR zero_bits, SIZE_T reserve_size,
3114 SIZE_T commit_size, SIZE_T extra_size )
3116 struct file_view *view;
3117 NTSTATUS status;
3118 sigset_t sigset;
3119 SIZE_T size;
3121 if (!reserve_size) reserve_size = main_image_info.MaximumStackSize;
3122 if (!commit_size) commit_size = main_image_info.CommittedStackSize;
3124 size = max( reserve_size, commit_size );
3125 if (size < 1024 * 1024) size = 1024 * 1024; /* Xlib needs a large stack */
3126 size = (size + 0xffff) & ~0xffff; /* round to 64K boundary */
3128 server_enter_uninterrupted_section( &virtual_mutex, &sigset );
3130 if ((status = map_view( &view, NULL, size + extra_size, FALSE,
3131 VPROT_READ | VPROT_WRITE | VPROT_COMMITTED, zero_bits )) != STATUS_SUCCESS)
3132 goto done;
3134 #ifdef VALGRIND_STACK_REGISTER
3135 VALGRIND_STACK_REGISTER( view->base, (char *)view->base + view->size );
3136 #endif
3138 /* setup no access guard page */
3139 set_page_vprot( view->base, page_size, VPROT_COMMITTED );
3140 set_page_vprot( (char *)view->base + page_size, page_size,
3141 VPROT_READ | VPROT_WRITE | VPROT_COMMITTED | VPROT_GUARD );
3142 mprotect_range( view->base, 2 * page_size, 0, 0 );
3143 VIRTUAL_DEBUG_DUMP_VIEW( view );
3145 if (extra_size)
3147 struct file_view *extra_view;
3149 /* shrink the first view and create a second one for the extra size */
3150 /* this allows the app to free the stack without freeing the thread start portion */
3151 view->size -= extra_size;
3152 status = create_view( &extra_view, (char *)view->base + view->size, extra_size,
3153 VPROT_READ | VPROT_WRITE | VPROT_COMMITTED );
3154 if (status != STATUS_SUCCESS)
3156 view->size += extra_size;
3157 delete_view( view );
3158 goto done;
3162 /* note: limit is lower than base since the stack grows down */
3163 stack->OldStackBase = 0;
3164 stack->OldStackLimit = 0;
3165 stack->DeallocationStack = view->base;
3166 stack->StackBase = (char *)view->base + view->size;
3167 stack->StackLimit = (char *)view->base + 2 * page_size;
3168 done:
3169 server_leave_uninterrupted_section( &virtual_mutex, &sigset );
3170 return status;
3174 /***********************************************************************
3175 * virtual_map_user_shared_data
3177 void virtual_map_user_shared_data(void)
3179 static const WCHAR nameW[] = {'\\','K','e','r','n','e','l','O','b','j','e','c','t','s',
3180 '\\','_','_','w','i','n','e','_','u','s','e','r','_','s','h','a','r','e','d','_','d','a','t','a',0};
3181 UNICODE_STRING name_str = { sizeof(nameW) - sizeof(WCHAR), sizeof(nameW), (WCHAR *)nameW };
3182 OBJECT_ATTRIBUTES attr = { sizeof(attr), 0, &name_str };
3183 NTSTATUS status;
3184 HANDLE section;
3185 int res, fd, needs_close;
3187 if ((status = NtOpenSection( &section, SECTION_ALL_ACCESS, &attr )))
3189 ERR( "failed to open the USD section: %08x\n", status );
3190 exit(1);
3192 if ((res = server_get_unix_fd( section, 0, &fd, &needs_close, NULL, NULL )) ||
3193 (user_shared_data != mmap( user_shared_data, page_size, PROT_READ, MAP_SHARED|MAP_FIXED, fd, 0 )))
3195 ERR( "failed to remap the process USD: %d\n", res );
3196 exit(1);
3198 if (needs_close) close( fd );
3199 NtClose( section );
3203 struct thread_stack_info
3205 char *start;
3206 char *limit;
3207 char *end;
3208 SIZE_T guaranteed;
3209 BOOL is_wow;
3212 /***********************************************************************
3213 * is_inside_thread_stack
3215 static BOOL is_inside_thread_stack( void *ptr, struct thread_stack_info *stack )
3217 TEB *teb = NtCurrentTeb();
3218 WOW_TEB *wow_teb = get_wow_teb( teb );
3220 stack->start = teb->DeallocationStack;
3221 stack->limit = teb->Tib.StackLimit;
3222 stack->end = teb->Tib.StackBase;
3223 stack->guaranteed = max( teb->GuaranteedStackBytes, page_size * (is_win64 ? 2 : 1) );
3224 stack->is_wow = FALSE;
3225 if ((char *)ptr > stack->start && (char *)ptr <= stack->end) return TRUE;
3227 if (!wow_teb) return FALSE;
3228 stack->start = ULongToPtr( wow_teb->DeallocationStack );
3229 stack->limit = ULongToPtr( wow_teb->Tib.StackLimit );
3230 stack->end = ULongToPtr( wow_teb->Tib.StackBase );
3231 stack->guaranteed = max( wow_teb->GuaranteedStackBytes, page_size * (is_win64 ? 1 : 2) );
3232 stack->is_wow = TRUE;
3233 return ((char *)ptr > stack->start && (char *)ptr <= stack->end);
3237 /***********************************************************************
3238 * grow_thread_stack
3240 static NTSTATUS grow_thread_stack( char *page, struct thread_stack_info *stack_info )
3242 NTSTATUS ret = 0;
3244 set_page_vprot_bits( page, page_size, 0, VPROT_GUARD );
3245 mprotect_range( page, page_size, 0, 0 );
3246 if (page >= stack_info->start + page_size + stack_info->guaranteed)
3248 set_page_vprot_bits( page - page_size, page_size, VPROT_COMMITTED | VPROT_GUARD, 0 );
3249 mprotect_range( page - page_size, page_size, 0, 0 );
3251 else /* inside guaranteed space -> overflow exception */
3253 page = stack_info->start + page_size;
3254 set_page_vprot_bits( page, stack_info->guaranteed, VPROT_COMMITTED, VPROT_GUARD );
3255 mprotect_range( page, stack_info->guaranteed, 0, 0 );
3256 ret = STATUS_STACK_OVERFLOW;
3258 if (stack_info->is_wow)
3260 WOW_TEB *wow_teb = get_wow_teb( NtCurrentTeb() );
3261 wow_teb->Tib.StackLimit = PtrToUlong( page );
3263 else NtCurrentTeb()->Tib.StackLimit = page;
3264 return ret;
3268 /***********************************************************************
3269 * virtual_handle_fault
3271 NTSTATUS virtual_handle_fault( void *addr, DWORD err, void *stack )
3273 NTSTATUS ret = STATUS_ACCESS_VIOLATION;
3274 char *page = ROUND_ADDR( addr, page_mask );
3275 BYTE vprot;
3277 mutex_lock( &virtual_mutex ); /* no need for signal masking inside signal handler */
3278 vprot = get_page_vprot( page );
3279 if (!is_inside_signal_stack( stack ) && (vprot & VPROT_GUARD))
3281 struct thread_stack_info stack_info;
3282 if (!is_inside_thread_stack( page, &stack_info ))
3284 set_page_vprot_bits( page, page_size, 0, VPROT_GUARD );
3285 mprotect_range( page, page_size, 0, 0 );
3286 ret = STATUS_GUARD_PAGE_VIOLATION;
3288 else ret = grow_thread_stack( page, &stack_info );
3290 else if (err & EXCEPTION_WRITE_FAULT)
3292 if (vprot & VPROT_WRITEWATCH)
3294 set_page_vprot_bits( page, page_size, 0, VPROT_WRITEWATCH );
3295 mprotect_range( page, page_size, 0, 0 );
3297 /* ignore fault if page is writable now */
3298 if (get_unix_prot( get_page_vprot( page )) & PROT_WRITE)
3300 if ((vprot & VPROT_WRITEWATCH) || is_write_watch_range( page, page_size ))
3301 ret = STATUS_SUCCESS;
3304 mutex_unlock( &virtual_mutex );
3305 return ret;
3309 /***********************************************************************
3310 * virtual_setup_exception
3312 void *virtual_setup_exception( void *stack_ptr, size_t size, EXCEPTION_RECORD *rec )
3314 char *stack = stack_ptr;
3315 struct thread_stack_info stack_info;
3317 if (!is_inside_thread_stack( stack, &stack_info ))
3319 if (is_inside_signal_stack( stack ))
3321 ERR( "nested exception on signal stack in thread %04x addr %p stack %p\n",
3322 GetCurrentThreadId(), rec->ExceptionAddress, stack );
3323 abort_thread(1);
3325 WARN( "exception outside of stack limits in thread %04x addr %p stack %p (%p-%p-%p)\n",
3326 GetCurrentThreadId(), rec->ExceptionAddress, stack, NtCurrentTeb()->DeallocationStack,
3327 NtCurrentTeb()->Tib.StackLimit, NtCurrentTeb()->Tib.StackBase );
3328 return stack - size;
3331 stack -= size;
3333 if (stack < stack_info.start + 4096)
3335 /* stack overflow on last page, unrecoverable */
3336 UINT diff = stack_info.start + 4096 - stack;
3337 ERR( "stack overflow %u bytes in thread %04x addr %p stack %p (%p-%p-%p)\n",
3338 diff, GetCurrentThreadId(), rec->ExceptionAddress, stack, stack_info.start,
3339 stack_info.limit, stack_info.end );
3340 abort_thread(1);
3342 else if (stack < stack_info.limit)
3344 mutex_lock( &virtual_mutex ); /* no need for signal masking inside signal handler */
3345 if ((get_page_vprot( stack ) & VPROT_GUARD) &&
3346 grow_thread_stack( ROUND_ADDR( stack, page_mask ), &stack_info ))
3348 rec->ExceptionCode = STATUS_STACK_OVERFLOW;
3349 rec->NumberParameters = 0;
3351 mutex_unlock( &virtual_mutex );
3353 #if defined(VALGRIND_MAKE_MEM_UNDEFINED)
3354 VALGRIND_MAKE_MEM_UNDEFINED( stack, size );
3355 #elif defined(VALGRIND_MAKE_WRITABLE)
3356 VALGRIND_MAKE_WRITABLE( stack, size );
3357 #endif
3358 return stack;
3362 /***********************************************************************
3363 * check_write_access
3365 * Check if the memory range is writable, temporarily disabling write watches if necessary.
3367 static NTSTATUS check_write_access( void *base, size_t size, BOOL *has_write_watch )
3369 size_t i;
3370 char *addr = ROUND_ADDR( base, page_mask );
3372 size = ROUND_SIZE( base, size );
3373 for (i = 0; i < size; i += page_size)
3375 BYTE vprot = get_page_vprot( addr + i );
3376 if (vprot & VPROT_WRITEWATCH) *has_write_watch = TRUE;
3377 if (!(get_unix_prot( vprot & ~VPROT_WRITEWATCH ) & PROT_WRITE))
3378 return STATUS_INVALID_USER_BUFFER;
3380 if (*has_write_watch)
3381 mprotect_range( addr, size, 0, VPROT_WRITEWATCH ); /* temporarily enable write access */
3382 return STATUS_SUCCESS;
3386 /***********************************************************************
3387 * virtual_locked_server_call
3389 unsigned int virtual_locked_server_call( void *req_ptr )
3391 struct __server_request_info * const req = req_ptr;
3392 sigset_t sigset;
3393 void *addr = req->reply_data;
3394 data_size_t size = req->u.req.request_header.reply_size;
3395 BOOL has_write_watch = FALSE;
3396 unsigned int ret = STATUS_ACCESS_VIOLATION;
3398 if (!size) return wine_server_call( req_ptr );
3400 server_enter_uninterrupted_section( &virtual_mutex, &sigset );
3401 if (!(ret = check_write_access( addr, size, &has_write_watch )))
3403 ret = server_call_unlocked( req );
3404 if (has_write_watch) update_write_watches( addr, size, wine_server_reply_size( req ));
3406 else memset( &req->u.reply, 0, sizeof(req->u.reply) );
3407 server_leave_uninterrupted_section( &virtual_mutex, &sigset );
3408 return ret;
3412 /***********************************************************************
3413 * virtual_locked_read
3415 ssize_t virtual_locked_read( int fd, void *addr, size_t size )
3417 sigset_t sigset;
3418 BOOL has_write_watch = FALSE;
3419 int err = EFAULT;
3421 ssize_t ret = read( fd, addr, size );
3422 if (ret != -1 || errno != EFAULT) return ret;
3424 server_enter_uninterrupted_section( &virtual_mutex, &sigset );
3425 if (!check_write_access( addr, size, &has_write_watch ))
3427 ret = read( fd, addr, size );
3428 err = errno;
3429 if (has_write_watch) update_write_watches( addr, size, max( 0, ret ));
3431 server_leave_uninterrupted_section( &virtual_mutex, &sigset );
3432 errno = err;
3433 return ret;
3437 /***********************************************************************
3438 * virtual_locked_pread
3440 ssize_t virtual_locked_pread( int fd, void *addr, size_t size, off_t offset )
3442 sigset_t sigset;
3443 BOOL has_write_watch = FALSE;
3444 int err = EFAULT;
3446 ssize_t ret = pread( fd, addr, size, offset );
3447 if (ret != -1 || errno != EFAULT) return ret;
3449 server_enter_uninterrupted_section( &virtual_mutex, &sigset );
3450 if (!check_write_access( addr, size, &has_write_watch ))
3452 ret = pread( fd, addr, size, offset );
3453 err = errno;
3454 if (has_write_watch) update_write_watches( addr, size, max( 0, ret ));
3456 server_leave_uninterrupted_section( &virtual_mutex, &sigset );
3457 errno = err;
3458 return ret;
3462 /***********************************************************************
3463 * virtual_locked_recvmsg
3465 ssize_t virtual_locked_recvmsg( int fd, struct msghdr *hdr, int flags )
3467 sigset_t sigset;
3468 size_t i;
3469 BOOL has_write_watch = FALSE;
3470 int err = EFAULT;
3472 ssize_t ret = recvmsg( fd, hdr, flags );
3473 if (ret != -1 || errno != EFAULT) return ret;
3475 server_enter_uninterrupted_section( &virtual_mutex, &sigset );
3476 for (i = 0; i < hdr->msg_iovlen; i++)
3477 if (check_write_access( hdr->msg_iov[i].iov_base, hdr->msg_iov[i].iov_len, &has_write_watch ))
3478 break;
3479 if (i == hdr->msg_iovlen)
3481 ret = recvmsg( fd, hdr, flags );
3482 err = errno;
3484 if (has_write_watch)
3485 while (i--) update_write_watches( hdr->msg_iov[i].iov_base, hdr->msg_iov[i].iov_len, 0 );
3487 server_leave_uninterrupted_section( &virtual_mutex, &sigset );
3488 errno = err;
3489 return ret;
3493 /***********************************************************************
3494 * virtual_is_valid_code_address
3496 BOOL virtual_is_valid_code_address( const void *addr, SIZE_T size )
3498 struct file_view *view;
3499 BOOL ret = FALSE;
3500 sigset_t sigset;
3502 server_enter_uninterrupted_section( &virtual_mutex, &sigset );
3503 if ((view = find_view( addr, size )))
3504 ret = !(view->protect & VPROT_SYSTEM); /* system views are not visible to the app */
3505 server_leave_uninterrupted_section( &virtual_mutex, &sigset );
3506 return ret;
3510 /***********************************************************************
3511 * virtual_check_buffer_for_read
3513 * Check if a memory buffer can be read, triggering page faults if needed for DIB section access.
3515 BOOL virtual_check_buffer_for_read( const void *ptr, SIZE_T size )
3517 if (!size) return TRUE;
3518 if (!ptr) return FALSE;
3520 __TRY
3522 volatile const char *p = ptr;
3523 char dummy __attribute__((unused));
3524 SIZE_T count = size;
3526 while (count > page_size)
3528 dummy = *p;
3529 p += page_size;
3530 count -= page_size;
3532 dummy = p[0];
3533 dummy = p[count - 1];
3535 __EXCEPT
3537 return FALSE;
3539 __ENDTRY
3540 return TRUE;
3544 /***********************************************************************
3545 * virtual_check_buffer_for_write
3547 * Check if a memory buffer can be written to, triggering page faults if needed for write watches.
3549 BOOL virtual_check_buffer_for_write( void *ptr, SIZE_T size )
3551 if (!size) return TRUE;
3552 if (!ptr) return FALSE;
3554 __TRY
3556 volatile char *p = ptr;
3557 SIZE_T count = size;
3559 while (count > page_size)
3561 *p |= 0;
3562 p += page_size;
3563 count -= page_size;
3565 p[0] |= 0;
3566 p[count - 1] |= 0;
3568 __EXCEPT
3570 return FALSE;
3572 __ENDTRY
3573 return TRUE;
3577 /***********************************************************************
3578 * virtual_uninterrupted_read_memory
3580 * Similar to NtReadVirtualMemory, but without wineserver calls. Moreover
3581 * permissions are checked before accessing each page, to ensure that no
3582 * exceptions can happen.
3584 SIZE_T virtual_uninterrupted_read_memory( const void *addr, void *buffer, SIZE_T size )
3586 struct file_view *view;
3587 sigset_t sigset;
3588 SIZE_T bytes_read = 0;
3590 if (!size) return 0;
3592 server_enter_uninterrupted_section( &virtual_mutex, &sigset );
3593 if ((view = find_view( addr, size )))
3595 if (!(view->protect & VPROT_SYSTEM))
3597 while (bytes_read < size && (get_unix_prot( get_page_vprot( addr )) & PROT_READ))
3599 SIZE_T block_size = min( size - bytes_read, page_size - ((UINT_PTR)addr & page_mask) );
3600 memcpy( buffer, addr, block_size );
3602 addr = (const void *)((const char *)addr + block_size);
3603 buffer = (void *)((char *)buffer + block_size);
3604 bytes_read += block_size;
3608 server_leave_uninterrupted_section( &virtual_mutex, &sigset );
3609 return bytes_read;
3613 /***********************************************************************
3614 * virtual_uninterrupted_write_memory
3616 * Similar to NtWriteVirtualMemory, but without wineserver calls. Moreover
3617 * permissions are checked before accessing each page, to ensure that no
3618 * exceptions can happen.
3620 NTSTATUS virtual_uninterrupted_write_memory( void *addr, const void *buffer, SIZE_T size )
3622 BOOL has_write_watch = FALSE;
3623 sigset_t sigset;
3624 NTSTATUS ret;
3626 if (!size) return STATUS_SUCCESS;
3628 server_enter_uninterrupted_section( &virtual_mutex, &sigset );
3629 if (!(ret = check_write_access( addr, size, &has_write_watch )))
3631 memcpy( addr, buffer, size );
3632 if (has_write_watch) update_write_watches( addr, size, size );
3634 server_leave_uninterrupted_section( &virtual_mutex, &sigset );
3635 return ret;
3639 /***********************************************************************
3640 * virtual_set_force_exec
3642 * Whether to force exec prot on all views.
3644 void virtual_set_force_exec( BOOL enable )
3646 struct file_view *view;
3647 sigset_t sigset;
3649 server_enter_uninterrupted_section( &virtual_mutex, &sigset );
3650 if (!force_exec_prot != !enable) /* change all existing views */
3652 force_exec_prot = enable;
3654 WINE_RB_FOR_EACH_ENTRY( view, &views_tree, struct file_view, entry )
3656 /* file mappings are always accessible */
3657 BYTE commit = is_view_valloc( view ) ? 0 : VPROT_COMMITTED;
3659 mprotect_range( view->base, view->size, commit, 0 );
3662 server_leave_uninterrupted_section( &virtual_mutex, &sigset );
3665 struct free_range
3667 char *base;
3668 char *limit;
3671 /* free reserved areas above the limit; callback for mmap_enum_reserved_areas */
3672 static int free_reserved_memory( void *base, SIZE_T size, void *arg )
3674 struct free_range *range = arg;
3676 if ((char *)base >= range->limit) return 0;
3677 if ((char *)base + size <= range->base) return 0;
3678 if ((char *)base < range->base)
3680 size -= range->base - (char *)base;
3681 base = range->base;
3683 if ((char *)base + size > range->limit) size = range->limit - (char *)base;
3684 remove_reserved_area( base, size );
3685 return 1; /* stop enumeration since the list has changed */
3688 /***********************************************************************
3689 * virtual_release_address_space
3691 * Release some address space once we have loaded and initialized the app.
3693 static void virtual_release_address_space(void)
3695 struct free_range range;
3697 range.base = (char *)0x82000000;
3698 range.limit = get_wow_user_space_limit();
3700 if (range.limit > (char *)0xfffff000) return; /* 64-bit limit, nothing to do */
3702 if (range.limit > range.base)
3704 while (mmap_enum_reserved_areas( free_reserved_memory, &range, 1 )) /* nothing */;
3705 #ifdef __APPLE__
3706 /* On macOS, we still want to free some of low memory, for OpenGL resources */
3707 range.base = (char *)0x40000000;
3708 #else
3709 return;
3710 #endif
3712 else range.base = (char *)0x20000000;
3714 range.limit = (char *)0x7f000000;
3715 while (mmap_enum_reserved_areas( free_reserved_memory, &range, 0 )) /* nothing */;
3719 /***********************************************************************
3720 * virtual_set_large_address_space
3722 * Enable use of a large address space when allowed by the application.
3724 void virtual_set_large_address_space(void)
3726 /* no large address space on win9x */
3727 if (peb->OSPlatformId != VER_PLATFORM_WIN32_NT) return;
3729 user_space_limit = working_set_limit = address_space_limit;
3733 /***********************************************************************
3734 * NtAllocateVirtualMemory (NTDLL.@)
3735 * ZwAllocateVirtualMemory (NTDLL.@)
3737 NTSTATUS WINAPI NtAllocateVirtualMemory( HANDLE process, PVOID *ret, ULONG_PTR zero_bits,
3738 SIZE_T *size_ptr, ULONG type, ULONG protect )
3740 void *base;
3741 unsigned int vprot;
3742 BOOL is_dos_memory = FALSE;
3743 struct file_view *view;
3744 sigset_t sigset;
3745 SIZE_T size = *size_ptr;
3746 NTSTATUS status = STATUS_SUCCESS;
3748 TRACE("%p %p %08lx %x %08x\n", process, *ret, size, type, protect );
3750 if (!size) return STATUS_INVALID_PARAMETER;
3751 if (zero_bits > 21 && zero_bits < 32) return STATUS_INVALID_PARAMETER_3;
3752 if (zero_bits > 32 && zero_bits < granularity_mask) return STATUS_INVALID_PARAMETER_3;
3753 #ifndef _WIN64
3754 if (!is_wow64 && zero_bits >= 32) return STATUS_INVALID_PARAMETER_3;
3755 #endif
3757 if (process != NtCurrentProcess())
3759 apc_call_t call;
3760 apc_result_t result;
3762 memset( &call, 0, sizeof(call) );
3764 call.virtual_alloc.type = APC_VIRTUAL_ALLOC;
3765 call.virtual_alloc.addr = wine_server_client_ptr( *ret );
3766 call.virtual_alloc.size = *size_ptr;
3767 call.virtual_alloc.zero_bits = zero_bits;
3768 call.virtual_alloc.op_type = type;
3769 call.virtual_alloc.prot = protect;
3770 status = server_queue_process_apc( process, &call, &result );
3771 if (status != STATUS_SUCCESS) return status;
3773 if (result.virtual_alloc.status == STATUS_SUCCESS)
3775 *ret = wine_server_get_ptr( result.virtual_alloc.addr );
3776 *size_ptr = result.virtual_alloc.size;
3778 return result.virtual_alloc.status;
3781 /* Round parameters to a page boundary */
3783 if (is_beyond_limit( 0, size, working_set_limit )) return STATUS_WORKING_SET_LIMIT_RANGE;
3785 if (*ret)
3787 if (type & MEM_RESERVE) /* Round down to 64k boundary */
3788 base = ROUND_ADDR( *ret, granularity_mask );
3789 else
3790 base = ROUND_ADDR( *ret, page_mask );
3791 size = (((UINT_PTR)*ret + size + page_mask) & ~page_mask) - (UINT_PTR)base;
3793 /* disallow low 64k, wrap-around and kernel space */
3794 if (((char *)base < (char *)0x10000) ||
3795 ((char *)base + size < (char *)base) ||
3796 is_beyond_limit( base, size, address_space_limit ))
3798 /* address 1 is magic to mean DOS area */
3799 if (!base && *ret == (void *)1 && size == 0x110000) is_dos_memory = TRUE;
3800 else return STATUS_INVALID_PARAMETER;
3803 else
3805 base = NULL;
3806 size = (size + page_mask) & ~page_mask;
3809 /* Compute the alloc type flags */
3811 if (!(type & (MEM_COMMIT | MEM_RESERVE | MEM_RESET)) ||
3812 (type & ~(MEM_COMMIT | MEM_RESERVE | MEM_TOP_DOWN | MEM_WRITE_WATCH | MEM_RESET)))
3814 WARN("called with wrong alloc type flags (%08x) !\n", type);
3815 return STATUS_INVALID_PARAMETER;
3818 /* Reserve the memory */
3820 server_enter_uninterrupted_section( &virtual_mutex, &sigset );
3822 if ((type & MEM_RESERVE) || !base)
3824 if (!(status = get_vprot_flags( protect, &vprot, FALSE )))
3826 if (type & MEM_COMMIT) vprot |= VPROT_COMMITTED;
3827 if (type & MEM_WRITE_WATCH) vprot |= VPROT_WRITEWATCH;
3828 if (protect & PAGE_NOCACHE) vprot |= SEC_NOCACHE;
3830 if (vprot & VPROT_WRITECOPY) status = STATUS_INVALID_PAGE_PROTECTION;
3831 else if (is_dos_memory) status = allocate_dos_memory( &view, vprot );
3832 else status = map_view( &view, base, size, type & MEM_TOP_DOWN, vprot, zero_bits );
3834 if (status == STATUS_SUCCESS) base = view->base;
3837 else if (type & MEM_RESET)
3839 if (!(view = find_view( base, size ))) status = STATUS_NOT_MAPPED_VIEW;
3840 else madvise( base, size, MADV_DONTNEED );
3842 else /* commit the pages */
3844 if (!(view = find_view( base, size ))) status = STATUS_NOT_MAPPED_VIEW;
3845 else if (view->protect & SEC_FILE) status = STATUS_ALREADY_COMMITTED;
3846 else if (!(status = set_protection( view, base, size, protect )) && (view->protect & SEC_RESERVE))
3848 SERVER_START_REQ( add_mapping_committed_range )
3850 req->base = wine_server_client_ptr( view->base );
3851 req->offset = (char *)base - (char *)view->base;
3852 req->size = size;
3853 wine_server_call( req );
3855 SERVER_END_REQ;
3859 if (!status) VIRTUAL_DEBUG_DUMP_VIEW( view );
3861 server_leave_uninterrupted_section( &virtual_mutex, &sigset );
3863 if (status == STATUS_SUCCESS)
3865 *ret = base;
3866 *size_ptr = size;
3868 return status;
3871 /***********************************************************************
3872 * NtAllocateVirtualMemoryEx (NTDLL.@)
3873 * ZwAllocateVirtualMemoryEx (NTDLL.@)
3875 NTSTATUS WINAPI NtAllocateVirtualMemoryEx( HANDLE process, PVOID *ret, SIZE_T *size_ptr, ULONG type,
3876 ULONG protect, MEM_EXTENDED_PARAMETER *parameters,
3877 ULONG count )
3879 if (count && !parameters) return STATUS_INVALID_PARAMETER;
3881 if (count) FIXME( "Ignoring %d extended parameters %p\n", count, parameters );
3883 return NtAllocateVirtualMemory( process, ret, 0, size_ptr, type, protect );
3887 /***********************************************************************
3888 * NtFreeVirtualMemory (NTDLL.@)
3889 * ZwFreeVirtualMemory (NTDLL.@)
3891 NTSTATUS WINAPI NtFreeVirtualMemory( HANDLE process, PVOID *addr_ptr, SIZE_T *size_ptr, ULONG type )
3893 struct file_view *view;
3894 char *base;
3895 sigset_t sigset;
3896 NTSTATUS status = STATUS_SUCCESS;
3897 LPVOID addr = *addr_ptr;
3898 SIZE_T size = *size_ptr;
3900 TRACE("%p %p %08lx %x\n", process, addr, size, type );
3902 if (process != NtCurrentProcess())
3904 apc_call_t call;
3905 apc_result_t result;
3907 memset( &call, 0, sizeof(call) );
3909 call.virtual_free.type = APC_VIRTUAL_FREE;
3910 call.virtual_free.addr = wine_server_client_ptr( addr );
3911 call.virtual_free.size = size;
3912 call.virtual_free.op_type = type;
3913 status = server_queue_process_apc( process, &call, &result );
3914 if (status != STATUS_SUCCESS) return status;
3916 if (result.virtual_free.status == STATUS_SUCCESS)
3918 *addr_ptr = wine_server_get_ptr( result.virtual_free.addr );
3919 *size_ptr = result.virtual_free.size;
3921 return result.virtual_free.status;
3924 /* Fix the parameters */
3926 size = ROUND_SIZE( addr, size );
3927 base = ROUND_ADDR( addr, page_mask );
3929 server_enter_uninterrupted_section( &virtual_mutex, &sigset );
3931 /* avoid freeing the DOS area when a broken app passes a NULL pointer */
3932 if (!base)
3934 /* address 1 is magic to mean release reserved space */
3935 if (addr == (void *)1 && !*size_ptr && type == MEM_RELEASE) virtual_release_address_space();
3936 else status = STATUS_INVALID_PARAMETER;
3938 else if (!(view = find_view( base, size )) || !is_view_valloc( view ))
3940 status = STATUS_INVALID_PARAMETER;
3942 else if (type == MEM_RELEASE)
3944 /* Free the pages */
3946 if (size || (base != view->base)) status = STATUS_INVALID_PARAMETER;
3947 else
3949 delete_view( view );
3950 *addr_ptr = base;
3951 *size_ptr = size;
3954 else if (type == MEM_DECOMMIT)
3956 status = decommit_pages( view, base - (char *)view->base, size );
3957 if (status == STATUS_SUCCESS)
3959 *addr_ptr = base;
3960 *size_ptr = size;
3963 else
3965 WARN("called with wrong free type flags (%08x) !\n", type);
3966 status = STATUS_INVALID_PARAMETER;
3969 server_leave_uninterrupted_section( &virtual_mutex, &sigset );
3970 return status;
3974 /***********************************************************************
3975 * NtProtectVirtualMemory (NTDLL.@)
3976 * ZwProtectVirtualMemory (NTDLL.@)
3978 NTSTATUS WINAPI NtProtectVirtualMemory( HANDLE process, PVOID *addr_ptr, SIZE_T *size_ptr,
3979 ULONG new_prot, ULONG *old_prot )
3981 struct file_view *view;
3982 sigset_t sigset;
3983 NTSTATUS status = STATUS_SUCCESS;
3984 char *base;
3985 BYTE vprot;
3986 SIZE_T size = *size_ptr;
3987 LPVOID addr = *addr_ptr;
3988 DWORD old;
3990 TRACE("%p %p %08lx %08x\n", process, addr, size, new_prot );
3992 if (!old_prot)
3993 return STATUS_ACCESS_VIOLATION;
3995 if (process != NtCurrentProcess())
3997 apc_call_t call;
3998 apc_result_t result;
4000 memset( &call, 0, sizeof(call) );
4002 call.virtual_protect.type = APC_VIRTUAL_PROTECT;
4003 call.virtual_protect.addr = wine_server_client_ptr( addr );
4004 call.virtual_protect.size = size;
4005 call.virtual_protect.prot = new_prot;
4006 status = server_queue_process_apc( process, &call, &result );
4007 if (status != STATUS_SUCCESS) return status;
4009 if (result.virtual_protect.status == STATUS_SUCCESS)
4011 *addr_ptr = wine_server_get_ptr( result.virtual_protect.addr );
4012 *size_ptr = result.virtual_protect.size;
4013 *old_prot = result.virtual_protect.prot;
4015 return result.virtual_protect.status;
4018 /* Fix the parameters */
4020 size = ROUND_SIZE( addr, size );
4021 base = ROUND_ADDR( addr, page_mask );
4023 server_enter_uninterrupted_section( &virtual_mutex, &sigset );
4025 if ((view = find_view( base, size )))
4027 /* Make sure all the pages are committed */
4028 if (get_committed_size( view, base, &vprot ) >= size && (vprot & VPROT_COMMITTED))
4030 old = get_win32_prot( vprot, view->protect );
4031 status = set_protection( view, base, size, new_prot );
4033 else status = STATUS_NOT_COMMITTED;
4035 else status = STATUS_INVALID_PARAMETER;
4037 if (!status) VIRTUAL_DEBUG_DUMP_VIEW( view );
4039 server_leave_uninterrupted_section( &virtual_mutex, &sigset );
4041 if (status == STATUS_SUCCESS)
4043 *addr_ptr = base;
4044 *size_ptr = size;
4045 *old_prot = old;
4047 return status;
4051 /* retrieve state for a free memory area; callback for mmap_enum_reserved_areas */
4052 static int get_free_mem_state_callback( void *start, SIZE_T size, void *arg )
4054 MEMORY_BASIC_INFORMATION *info = arg;
4055 void *end = (char *)start + size;
4057 if ((char *)info->BaseAddress + info->RegionSize <= (char *)start) return 0;
4059 if (info->BaseAddress >= end)
4061 if (info->AllocationBase < end) info->AllocationBase = end;
4062 return 0;
4065 if (info->BaseAddress >= start || start <= address_space_start)
4067 /* it's a real free area */
4068 info->State = MEM_FREE;
4069 info->Protect = PAGE_NOACCESS;
4070 info->AllocationBase = 0;
4071 info->AllocationProtect = 0;
4072 info->Type = 0;
4073 if ((char *)info->BaseAddress + info->RegionSize > (char *)end)
4074 info->RegionSize = (char *)end - (char *)info->BaseAddress;
4076 else /* outside of the reserved area, pretend it's allocated */
4078 info->RegionSize = (char *)start - (char *)info->BaseAddress;
4079 #ifdef __i386__
4080 info->State = MEM_RESERVE;
4081 info->Protect = PAGE_NOACCESS;
4082 info->AllocationProtect = PAGE_NOACCESS;
4083 info->Type = MEM_PRIVATE;
4084 #else
4085 info->State = MEM_FREE;
4086 info->Protect = PAGE_NOACCESS;
4087 info->AllocationBase = 0;
4088 info->AllocationProtect = 0;
4089 info->Type = 0;
4090 #endif
4092 return 1;
4095 /* get basic information about a memory block */
4096 static NTSTATUS get_basic_memory_info( HANDLE process, LPCVOID addr,
4097 MEMORY_BASIC_INFORMATION *info,
4098 SIZE_T len, SIZE_T *res_len )
4100 struct file_view *view;
4101 char *base, *alloc_base = 0, *alloc_end = working_set_limit;
4102 struct wine_rb_entry *ptr;
4103 sigset_t sigset;
4105 if (len < sizeof(MEMORY_BASIC_INFORMATION))
4106 return STATUS_INFO_LENGTH_MISMATCH;
4108 if (process != NtCurrentProcess())
4110 NTSTATUS status;
4111 apc_call_t call;
4112 apc_result_t result;
4114 memset( &call, 0, sizeof(call) );
4116 call.virtual_query.type = APC_VIRTUAL_QUERY;
4117 call.virtual_query.addr = wine_server_client_ptr( addr );
4118 status = server_queue_process_apc( process, &call, &result );
4119 if (status != STATUS_SUCCESS) return status;
4121 if (result.virtual_query.status == STATUS_SUCCESS)
4123 info->BaseAddress = wine_server_get_ptr( result.virtual_query.base );
4124 info->AllocationBase = wine_server_get_ptr( result.virtual_query.alloc_base );
4125 info->RegionSize = result.virtual_query.size;
4126 info->Protect = result.virtual_query.prot;
4127 info->AllocationProtect = result.virtual_query.alloc_prot;
4128 info->State = (DWORD)result.virtual_query.state << 12;
4129 info->Type = (DWORD)result.virtual_query.alloc_type << 16;
4130 if (info->RegionSize != result.virtual_query.size) /* truncated */
4131 return STATUS_INVALID_PARAMETER; /* FIXME */
4132 if (res_len) *res_len = sizeof(*info);
4134 return result.virtual_query.status;
4137 base = ROUND_ADDR( addr, page_mask );
4139 if (is_beyond_limit( base, 1, working_set_limit )) return STATUS_INVALID_PARAMETER;
4141 /* Find the view containing the address */
4143 server_enter_uninterrupted_section( &virtual_mutex, &sigset );
4144 ptr = views_tree.root;
4145 while (ptr)
4147 view = WINE_RB_ENTRY_VALUE( ptr, struct file_view, entry );
4148 if ((char *)view->base > base)
4150 alloc_end = view->base;
4151 ptr = ptr->left;
4153 else if ((char *)view->base + view->size <= base)
4155 alloc_base = (char *)view->base + view->size;
4156 ptr = ptr->right;
4158 else
4160 alloc_base = view->base;
4161 alloc_end = (char *)view->base + view->size;
4162 break;
4166 /* Fill the info structure */
4168 info->AllocationBase = alloc_base;
4169 info->BaseAddress = base;
4170 info->RegionSize = alloc_end - base;
4172 if (!ptr)
4174 if (!mmap_enum_reserved_areas( get_free_mem_state_callback, info, 0 ))
4176 /* not in a reserved area at all, pretend it's allocated */
4177 #ifdef __i386__
4178 if (base >= (char *)address_space_start)
4180 info->State = MEM_RESERVE;
4181 info->Protect = PAGE_NOACCESS;
4182 info->AllocationProtect = PAGE_NOACCESS;
4183 info->Type = MEM_PRIVATE;
4185 else
4186 #endif
4188 info->State = MEM_FREE;
4189 info->Protect = PAGE_NOACCESS;
4190 info->AllocationBase = 0;
4191 info->AllocationProtect = 0;
4192 info->Type = 0;
4196 else
4198 BYTE vprot;
4199 char *ptr;
4200 SIZE_T range_size = get_committed_size( view, base, &vprot );
4202 info->State = (vprot & VPROT_COMMITTED) ? MEM_COMMIT : MEM_RESERVE;
4203 info->Protect = (vprot & VPROT_COMMITTED) ? get_win32_prot( vprot, view->protect ) : 0;
4204 info->AllocationProtect = get_win32_prot( view->protect, view->protect );
4205 if (view->protect & SEC_IMAGE) info->Type = MEM_IMAGE;
4206 else if (view->protect & (SEC_FILE | SEC_RESERVE | SEC_COMMIT)) info->Type = MEM_MAPPED;
4207 else info->Type = MEM_PRIVATE;
4208 for (ptr = base; ptr < base + range_size; ptr += page_size)
4209 if ((get_page_vprot( ptr ) ^ vprot) & ~VPROT_WRITEWATCH) break;
4210 info->RegionSize = ptr - base;
4212 server_leave_uninterrupted_section( &virtual_mutex, &sigset );
4214 if (res_len) *res_len = sizeof(*info);
4215 return STATUS_SUCCESS;
4218 static NTSTATUS get_working_set_ex( HANDLE process, LPCVOID addr,
4219 MEMORY_WORKING_SET_EX_INFORMATION *info,
4220 SIZE_T len, SIZE_T *res_len )
4222 FILE *f;
4223 MEMORY_WORKING_SET_EX_INFORMATION *p;
4224 sigset_t sigset;
4226 if (process != NtCurrentProcess())
4228 FIXME( "(process=%p,addr=%p) Unimplemented information class: MemoryWorkingSetExInformation\n", process, addr );
4229 return STATUS_INVALID_INFO_CLASS;
4232 f = fopen( "/proc/self/pagemap", "rb" );
4233 if (!f)
4235 static int once;
4236 if (!once++) WARN( "unable to open /proc/self/pagemap\n" );
4239 server_enter_uninterrupted_section( &virtual_mutex, &sigset );
4240 for (p = info; (UINT_PTR)(p + 1) <= (UINT_PTR)info + len; p++)
4242 BYTE vprot;
4243 UINT64 pagemap;
4244 struct file_view *view;
4246 memset( &p->VirtualAttributes, 0, sizeof(p->VirtualAttributes) );
4248 /* If we don't have pagemap information, default to invalid. */
4249 if (!f || fseek( f, ((UINT_PTR)p->VirtualAddress >> 12) * sizeof(pagemap), SEEK_SET ) == -1 ||
4250 fread( &pagemap, sizeof(pagemap), 1, f ) != 1)
4252 pagemap = 0;
4255 if ((view = find_view( p->VirtualAddress, 0 )) &&
4256 get_committed_size( view, p->VirtualAddress, &vprot ) &&
4257 (vprot & VPROT_COMMITTED))
4259 p->VirtualAttributes.Valid = !(vprot & VPROT_GUARD) && (vprot & 0x0f) && (pagemap >> 63);
4260 p->VirtualAttributes.Shared = !is_view_valloc( view ) && ((pagemap >> 61) & 1);
4261 if (p->VirtualAttributes.Shared && p->VirtualAttributes.Valid)
4262 p->VirtualAttributes.ShareCount = 1; /* FIXME */
4263 if (p->VirtualAttributes.Valid)
4264 p->VirtualAttributes.Win32Protection = get_win32_prot( vprot, view->protect );
4267 server_leave_uninterrupted_section( &virtual_mutex, &sigset );
4269 if (f)
4270 fclose( f );
4271 if (res_len)
4272 *res_len = (UINT_PTR)p - (UINT_PTR)info;
4273 return STATUS_SUCCESS;
4276 static NTSTATUS get_memory_section_name( HANDLE process, LPCVOID addr,
4277 MEMORY_SECTION_NAME *info, SIZE_T len, SIZE_T *ret_len )
4279 NTSTATUS status;
4281 if (!info) return STATUS_ACCESS_VIOLATION;
4283 SERVER_START_REQ( get_mapping_filename )
4285 req->process = wine_server_obj_handle( process );
4286 req->addr = wine_server_client_ptr( addr );
4287 if (len > sizeof(*info) + sizeof(WCHAR))
4288 wine_server_set_reply( req, info + 1, len - sizeof(*info) - sizeof(WCHAR) );
4289 status = wine_server_call( req );
4290 if (!status || status == STATUS_BUFFER_OVERFLOW)
4292 if (ret_len) *ret_len = sizeof(*info) + reply->len + sizeof(WCHAR);
4293 if (len < sizeof(*info)) status = STATUS_INFO_LENGTH_MISMATCH;
4294 if (!status)
4296 info->SectionFileName.Buffer = (WCHAR *)(info + 1);
4297 info->SectionFileName.Length = reply->len;
4298 info->SectionFileName.MaximumLength = reply->len + sizeof(WCHAR);
4299 info->SectionFileName.Buffer[reply->len / sizeof(WCHAR)] = 0;
4303 SERVER_END_REQ;
4304 return status;
4308 /***********************************************************************
4309 * NtQueryVirtualMemory (NTDLL.@)
4310 * ZwQueryVirtualMemory (NTDLL.@)
4312 NTSTATUS WINAPI NtQueryVirtualMemory( HANDLE process, LPCVOID addr,
4313 MEMORY_INFORMATION_CLASS info_class,
4314 PVOID buffer, SIZE_T len, SIZE_T *res_len )
4316 NTSTATUS status;
4318 TRACE("(%p, %p, info_class=%d, %p, %ld, %p)\n",
4319 process, addr, info_class, buffer, len, res_len);
4321 switch(info_class)
4323 case MemoryBasicInformation:
4324 return get_basic_memory_info( process, addr, buffer, len, res_len );
4326 case MemoryWorkingSetExInformation:
4327 return get_working_set_ex( process, addr, buffer, len, res_len );
4329 case MemoryMappedFilenameInformation:
4330 return get_memory_section_name( process, addr, buffer, len, res_len );
4332 case MemoryWineImageInitFuncs:
4333 if (process == GetCurrentProcess())
4335 void *module = (void *)addr;
4336 void *handle = get_builtin_so_handle( module );
4338 if (handle)
4340 status = get_builtin_init_funcs( handle, buffer, len, res_len );
4341 release_builtin_module( module );
4342 return status;
4345 return STATUS_INVALID_HANDLE;
4347 case MemoryWineUnixFuncs:
4348 case MemoryWineUnixWow64Funcs:
4349 if (len != sizeof(unixlib_handle_t)) return STATUS_INFO_LENGTH_MISMATCH;
4350 if (process == GetCurrentProcess())
4352 void *module = (void *)addr;
4353 void *funcs = NULL;
4355 status = get_builtin_unix_funcs( module, info_class == MemoryWineUnixWow64Funcs, &funcs );
4356 if (!status) *(unixlib_handle_t *)buffer = (UINT_PTR)funcs;
4357 return status;
4359 return STATUS_INVALID_HANDLE;
4361 default:
4362 FIXME("(%p,%p,info_class=%d,%p,%ld,%p) Unknown information class\n",
4363 process, addr, info_class, buffer, len, res_len);
4364 return STATUS_INVALID_INFO_CLASS;
4369 /***********************************************************************
4370 * NtLockVirtualMemory (NTDLL.@)
4371 * ZwLockVirtualMemory (NTDLL.@)
4373 NTSTATUS WINAPI NtLockVirtualMemory( HANDLE process, PVOID *addr, SIZE_T *size, ULONG unknown )
4375 NTSTATUS status = STATUS_SUCCESS;
4377 if (process != NtCurrentProcess())
4379 apc_call_t call;
4380 apc_result_t result;
4382 memset( &call, 0, sizeof(call) );
4384 call.virtual_lock.type = APC_VIRTUAL_LOCK;
4385 call.virtual_lock.addr = wine_server_client_ptr( *addr );
4386 call.virtual_lock.size = *size;
4387 status = server_queue_process_apc( process, &call, &result );
4388 if (status != STATUS_SUCCESS) return status;
4390 if (result.virtual_lock.status == STATUS_SUCCESS)
4392 *addr = wine_server_get_ptr( result.virtual_lock.addr );
4393 *size = result.virtual_lock.size;
4395 return result.virtual_lock.status;
4398 *size = ROUND_SIZE( *addr, *size );
4399 *addr = ROUND_ADDR( *addr, page_mask );
4401 if (mlock( *addr, *size )) status = STATUS_ACCESS_DENIED;
4402 return status;
4406 /***********************************************************************
4407 * NtUnlockVirtualMemory (NTDLL.@)
4408 * ZwUnlockVirtualMemory (NTDLL.@)
4410 NTSTATUS WINAPI NtUnlockVirtualMemory( HANDLE process, PVOID *addr, SIZE_T *size, ULONG unknown )
4412 NTSTATUS status = STATUS_SUCCESS;
4414 if (process != NtCurrentProcess())
4416 apc_call_t call;
4417 apc_result_t result;
4419 memset( &call, 0, sizeof(call) );
4421 call.virtual_unlock.type = APC_VIRTUAL_UNLOCK;
4422 call.virtual_unlock.addr = wine_server_client_ptr( *addr );
4423 call.virtual_unlock.size = *size;
4424 status = server_queue_process_apc( process, &call, &result );
4425 if (status != STATUS_SUCCESS) return status;
4427 if (result.virtual_unlock.status == STATUS_SUCCESS)
4429 *addr = wine_server_get_ptr( result.virtual_unlock.addr );
4430 *size = result.virtual_unlock.size;
4432 return result.virtual_unlock.status;
4435 *size = ROUND_SIZE( *addr, *size );
4436 *addr = ROUND_ADDR( *addr, page_mask );
4438 if (munlock( *addr, *size )) status = STATUS_ACCESS_DENIED;
4439 return status;
4443 /***********************************************************************
4444 * NtMapViewOfSection (NTDLL.@)
4445 * ZwMapViewOfSection (NTDLL.@)
4447 NTSTATUS WINAPI NtMapViewOfSection( HANDLE handle, HANDLE process, PVOID *addr_ptr, ULONG_PTR zero_bits,
4448 SIZE_T commit_size, const LARGE_INTEGER *offset_ptr, SIZE_T *size_ptr,
4449 SECTION_INHERIT inherit, ULONG alloc_type, ULONG protect )
4451 NTSTATUS res;
4452 SIZE_T mask = granularity_mask;
4453 LARGE_INTEGER offset;
4455 offset.QuadPart = offset_ptr ? offset_ptr->QuadPart : 0;
4457 TRACE("handle=%p process=%p addr=%p off=%x%08x size=%lx access=%x\n",
4458 handle, process, *addr_ptr, offset.u.HighPart, offset.u.LowPart, *size_ptr, protect );
4460 /* Check parameters */
4461 if (zero_bits > 21 && zero_bits < 32)
4462 return STATUS_INVALID_PARAMETER_4;
4464 /* If both addr_ptr and zero_bits are passed, they have match */
4465 if (*addr_ptr && zero_bits && zero_bits < 32 &&
4466 (((UINT_PTR)*addr_ptr) >> (32 - zero_bits)))
4467 return STATUS_INVALID_PARAMETER_4;
4468 if (*addr_ptr && zero_bits >= 32 &&
4469 (((UINT_PTR)*addr_ptr) & ~zero_bits))
4470 return STATUS_INVALID_PARAMETER_4;
4472 #ifndef _WIN64
4473 if (!is_wow64)
4475 if (zero_bits >= 32) return STATUS_INVALID_PARAMETER_4;
4476 if (alloc_type & AT_ROUND_TO_PAGE)
4478 *addr_ptr = ROUND_ADDR( *addr_ptr, page_mask );
4479 mask = page_mask;
4482 #endif
4484 if ((offset.u.LowPart & mask) || (*addr_ptr && ((UINT_PTR)*addr_ptr & mask)))
4485 return STATUS_MAPPED_ALIGNMENT;
4487 if (process != NtCurrentProcess())
4489 apc_call_t call;
4490 apc_result_t result;
4492 memset( &call, 0, sizeof(call) );
4494 call.map_view.type = APC_MAP_VIEW;
4495 call.map_view.handle = wine_server_obj_handle( handle );
4496 call.map_view.addr = wine_server_client_ptr( *addr_ptr );
4497 call.map_view.size = *size_ptr;
4498 call.map_view.offset = offset.QuadPart;
4499 call.map_view.zero_bits = zero_bits;
4500 call.map_view.alloc_type = alloc_type;
4501 call.map_view.prot = protect;
4502 res = server_queue_process_apc( process, &call, &result );
4503 if (res != STATUS_SUCCESS) return res;
4505 if ((NTSTATUS)result.map_view.status >= 0)
4507 *addr_ptr = wine_server_get_ptr( result.map_view.addr );
4508 *size_ptr = result.map_view.size;
4510 return result.map_view.status;
4513 return virtual_map_section( handle, addr_ptr, zero_bits, commit_size,
4514 offset_ptr, size_ptr, alloc_type, protect );
4518 /***********************************************************************
4519 * NtUnmapViewOfSection (NTDLL.@)
4520 * ZwUnmapViewOfSection (NTDLL.@)
4522 NTSTATUS WINAPI NtUnmapViewOfSection( HANDLE process, PVOID addr )
4524 struct file_view *view;
4525 NTSTATUS status = STATUS_NOT_MAPPED_VIEW;
4526 sigset_t sigset;
4528 if (process != NtCurrentProcess())
4530 apc_call_t call;
4531 apc_result_t result;
4533 memset( &call, 0, sizeof(call) );
4535 call.unmap_view.type = APC_UNMAP_VIEW;
4536 call.unmap_view.addr = wine_server_client_ptr( addr );
4537 status = server_queue_process_apc( process, &call, &result );
4538 if (status == STATUS_SUCCESS) status = result.unmap_view.status;
4539 return status;
4542 server_enter_uninterrupted_section( &virtual_mutex, &sigset );
4543 if ((view = find_view( addr, 0 )) && !is_view_valloc( view ))
4545 if (view->protect & VPROT_SYSTEM)
4547 struct builtin_module *builtin;
4549 LIST_FOR_EACH_ENTRY( builtin, &builtin_modules, struct builtin_module, entry )
4551 if (builtin->module != view->base) continue;
4552 if (builtin->refcount > 1)
4554 TRACE( "not freeing in-use builtin %p\n", view->base );
4555 builtin->refcount--;
4556 server_leave_uninterrupted_section( &virtual_mutex, &sigset );
4557 return STATUS_SUCCESS;
4562 SERVER_START_REQ( unmap_view )
4564 req->base = wine_server_client_ptr( view->base );
4565 status = wine_server_call( req );
4567 SERVER_END_REQ;
4568 if (!status)
4570 if (view->protect & SEC_IMAGE) release_builtin_module( view->base );
4571 delete_view( view );
4573 else FIXME( "failed to unmap %p %x\n", view->base, status );
4575 server_leave_uninterrupted_section( &virtual_mutex, &sigset );
4576 return status;
4580 /******************************************************************************
4581 * virtual_fill_image_information
4583 * Helper for NtQuerySection.
4585 void virtual_fill_image_information( const pe_image_info_t *pe_info, SECTION_IMAGE_INFORMATION *info )
4587 info->TransferAddress = wine_server_get_ptr( pe_info->base + pe_info->entry_point );
4588 info->ZeroBits = pe_info->zerobits;
4589 info->MaximumStackSize = pe_info->stack_size;
4590 info->CommittedStackSize = pe_info->stack_commit;
4591 info->SubSystemType = pe_info->subsystem;
4592 info->MinorSubsystemVersion = pe_info->subsystem_minor;
4593 info->MajorSubsystemVersion = pe_info->subsystem_major;
4594 info->MajorOperatingSystemVersion = pe_info->osversion_major;
4595 info->MinorOperatingSystemVersion = pe_info->osversion_minor;
4596 info->ImageCharacteristics = pe_info->image_charact;
4597 info->DllCharacteristics = pe_info->dll_charact;
4598 info->Machine = pe_info->machine;
4599 info->ImageContainsCode = pe_info->contains_code;
4600 info->ImageFlags = pe_info->image_flags;
4601 info->LoaderFlags = pe_info->loader_flags;
4602 info->ImageFileSize = pe_info->file_size;
4603 info->CheckSum = pe_info->checksum;
4604 #ifndef _WIN64 /* don't return 64-bit values to 32-bit processes */
4605 if (is_machine_64bit( pe_info->machine ))
4607 info->TransferAddress = (void *)0x81231234; /* sic */
4608 info->MaximumStackSize = 0x100000;
4609 info->CommittedStackSize = 0x10000;
4611 #endif
4614 /******************************************************************************
4615 * NtQuerySection (NTDLL.@)
4616 * ZwQuerySection (NTDLL.@)
4618 NTSTATUS WINAPI NtQuerySection( HANDLE handle, SECTION_INFORMATION_CLASS class, void *ptr,
4619 SIZE_T size, SIZE_T *ret_size )
4621 NTSTATUS status;
4622 pe_image_info_t image_info;
4624 switch (class)
4626 case SectionBasicInformation:
4627 if (size < sizeof(SECTION_BASIC_INFORMATION)) return STATUS_INFO_LENGTH_MISMATCH;
4628 break;
4629 case SectionImageInformation:
4630 if (size < sizeof(SECTION_IMAGE_INFORMATION)) return STATUS_INFO_LENGTH_MISMATCH;
4631 break;
4632 default:
4633 FIXME( "class %u not implemented\n", class );
4634 return STATUS_NOT_IMPLEMENTED;
4636 if (!ptr) return STATUS_ACCESS_VIOLATION;
4638 SERVER_START_REQ( get_mapping_info )
4640 req->handle = wine_server_obj_handle( handle );
4641 req->access = SECTION_QUERY;
4642 wine_server_set_reply( req, &image_info, sizeof(image_info) );
4643 if (!(status = wine_server_call( req )))
4645 if (class == SectionBasicInformation)
4647 SECTION_BASIC_INFORMATION *info = ptr;
4648 info->Attributes = reply->flags;
4649 info->BaseAddress = NULL;
4650 info->Size.QuadPart = reply->size;
4651 if (ret_size) *ret_size = sizeof(*info);
4653 else if (reply->flags & SEC_IMAGE)
4655 SECTION_IMAGE_INFORMATION *info = ptr;
4656 virtual_fill_image_information( &image_info, info );
4657 if (ret_size) *ret_size = sizeof(*info);
4659 else status = STATUS_SECTION_NOT_IMAGE;
4662 SERVER_END_REQ;
4664 return status;
4668 /***********************************************************************
4669 * NtFlushVirtualMemory (NTDLL.@)
4670 * ZwFlushVirtualMemory (NTDLL.@)
4672 NTSTATUS WINAPI NtFlushVirtualMemory( HANDLE process, LPCVOID *addr_ptr,
4673 SIZE_T *size_ptr, ULONG unknown )
4675 struct file_view *view;
4676 NTSTATUS status = STATUS_SUCCESS;
4677 sigset_t sigset;
4678 void *addr = ROUND_ADDR( *addr_ptr, page_mask );
4680 if (process != NtCurrentProcess())
4682 apc_call_t call;
4683 apc_result_t result;
4685 memset( &call, 0, sizeof(call) );
4687 call.virtual_flush.type = APC_VIRTUAL_FLUSH;
4688 call.virtual_flush.addr = wine_server_client_ptr( addr );
4689 call.virtual_flush.size = *size_ptr;
4690 status = server_queue_process_apc( process, &call, &result );
4691 if (status != STATUS_SUCCESS) return status;
4693 if (result.virtual_flush.status == STATUS_SUCCESS)
4695 *addr_ptr = wine_server_get_ptr( result.virtual_flush.addr );
4696 *size_ptr = result.virtual_flush.size;
4698 return result.virtual_flush.status;
4701 server_enter_uninterrupted_section( &virtual_mutex, &sigset );
4702 if (!(view = find_view( addr, *size_ptr ))) status = STATUS_INVALID_PARAMETER;
4703 else
4705 if (!*size_ptr) *size_ptr = view->size;
4706 *addr_ptr = addr;
4707 #ifdef MS_ASYNC
4708 if (msync( addr, *size_ptr, MS_ASYNC )) status = STATUS_NOT_MAPPED_DATA;
4709 #endif
4711 server_leave_uninterrupted_section( &virtual_mutex, &sigset );
4712 return status;
4716 /***********************************************************************
4717 * NtGetWriteWatch (NTDLL.@)
4718 * ZwGetWriteWatch (NTDLL.@)
4720 NTSTATUS WINAPI NtGetWriteWatch( HANDLE process, ULONG flags, PVOID base, SIZE_T size, PVOID *addresses,
4721 ULONG_PTR *count, ULONG *granularity )
4723 NTSTATUS status = STATUS_SUCCESS;
4724 sigset_t sigset;
4726 size = ROUND_SIZE( base, size );
4727 base = ROUND_ADDR( base, page_mask );
4729 if (!count || !granularity) return STATUS_ACCESS_VIOLATION;
4730 if (!*count || !size) return STATUS_INVALID_PARAMETER;
4731 if (flags & ~WRITE_WATCH_FLAG_RESET) return STATUS_INVALID_PARAMETER;
4733 if (!addresses) return STATUS_ACCESS_VIOLATION;
4735 TRACE( "%p %x %p-%p %p %lu\n", process, flags, base, (char *)base + size,
4736 addresses, *count );
4738 server_enter_uninterrupted_section( &virtual_mutex, &sigset );
4740 if (is_write_watch_range( base, size ))
4742 ULONG_PTR pos = 0;
4743 char *addr = base;
4744 char *end = addr + size;
4746 while (pos < *count && addr < end)
4748 if (!(get_page_vprot( addr ) & VPROT_WRITEWATCH)) addresses[pos++] = addr;
4749 addr += page_size;
4751 if (flags & WRITE_WATCH_FLAG_RESET) reset_write_watches( base, addr - (char *)base );
4752 *count = pos;
4753 *granularity = page_size;
4755 else status = STATUS_INVALID_PARAMETER;
4757 server_leave_uninterrupted_section( &virtual_mutex, &sigset );
4758 return status;
4762 /***********************************************************************
4763 * NtResetWriteWatch (NTDLL.@)
4764 * ZwResetWriteWatch (NTDLL.@)
4766 NTSTATUS WINAPI NtResetWriteWatch( HANDLE process, PVOID base, SIZE_T size )
4768 NTSTATUS status = STATUS_SUCCESS;
4769 sigset_t sigset;
4771 size = ROUND_SIZE( base, size );
4772 base = ROUND_ADDR( base, page_mask );
4774 TRACE( "%p %p-%p\n", process, base, (char *)base + size );
4776 if (!size) return STATUS_INVALID_PARAMETER;
4778 server_enter_uninterrupted_section( &virtual_mutex, &sigset );
4780 if (is_write_watch_range( base, size ))
4781 reset_write_watches( base, size );
4782 else
4783 status = STATUS_INVALID_PARAMETER;
4785 server_leave_uninterrupted_section( &virtual_mutex, &sigset );
4786 return status;
4790 /***********************************************************************
4791 * NtReadVirtualMemory (NTDLL.@)
4792 * ZwReadVirtualMemory (NTDLL.@)
4794 NTSTATUS WINAPI NtReadVirtualMemory( HANDLE process, const void *addr, void *buffer,
4795 SIZE_T size, SIZE_T *bytes_read )
4797 NTSTATUS status;
4799 if (virtual_check_buffer_for_write( buffer, size ))
4801 SERVER_START_REQ( read_process_memory )
4803 req->handle = wine_server_obj_handle( process );
4804 req->addr = wine_server_client_ptr( addr );
4805 wine_server_set_reply( req, buffer, size );
4806 if ((status = wine_server_call( req ))) size = 0;
4808 SERVER_END_REQ;
4810 else
4812 status = STATUS_ACCESS_VIOLATION;
4813 size = 0;
4815 if (bytes_read) *bytes_read = size;
4816 return status;
4820 /***********************************************************************
4821 * NtWriteVirtualMemory (NTDLL.@)
4822 * ZwWriteVirtualMemory (NTDLL.@)
4824 NTSTATUS WINAPI NtWriteVirtualMemory( HANDLE process, void *addr, const void *buffer,
4825 SIZE_T size, SIZE_T *bytes_written )
4827 NTSTATUS status;
4829 if (virtual_check_buffer_for_read( buffer, size ))
4831 SERVER_START_REQ( write_process_memory )
4833 req->handle = wine_server_obj_handle( process );
4834 req->addr = wine_server_client_ptr( addr );
4835 wine_server_add_data( req, buffer, size );
4836 if ((status = wine_server_call( req ))) size = 0;
4838 SERVER_END_REQ;
4840 else
4842 status = STATUS_PARTIAL_COPY;
4843 size = 0;
4845 if (bytes_written) *bytes_written = size;
4846 return status;
4850 /***********************************************************************
4851 * NtAreMappedFilesTheSame (NTDLL.@)
4852 * ZwAreMappedFilesTheSame (NTDLL.@)
4854 NTSTATUS WINAPI NtAreMappedFilesTheSame(PVOID addr1, PVOID addr2)
4856 struct file_view *view1, *view2;
4857 NTSTATUS status;
4858 sigset_t sigset;
4860 TRACE("%p %p\n", addr1, addr2);
4862 server_enter_uninterrupted_section( &virtual_mutex, &sigset );
4864 view1 = find_view( addr1, 0 );
4865 view2 = find_view( addr2, 0 );
4867 if (!view1 || !view2)
4868 status = STATUS_INVALID_ADDRESS;
4869 else if (is_view_valloc( view1 ) || is_view_valloc( view2 ))
4870 status = STATUS_CONFLICTING_ADDRESSES;
4871 else if (view1 == view2)
4872 status = STATUS_SUCCESS;
4873 else if ((view1->protect & VPROT_SYSTEM) || (view2->protect & VPROT_SYSTEM))
4874 status = STATUS_NOT_SAME_DEVICE;
4875 else
4877 SERVER_START_REQ( is_same_mapping )
4879 req->base1 = wine_server_client_ptr( view1->base );
4880 req->base2 = wine_server_client_ptr( view2->base );
4881 status = wine_server_call( req );
4883 SERVER_END_REQ;
4886 server_leave_uninterrupted_section( &virtual_mutex, &sigset );
4887 return status;
4891 /**********************************************************************
4892 * NtFlushInstructionCache (NTDLL.@)
4894 NTSTATUS WINAPI NtFlushInstructionCache( HANDLE handle, const void *addr, SIZE_T size )
4896 #if defined(__x86_64__) || defined(__i386__)
4897 /* no-op */
4898 #elif defined(HAVE___CLEAR_CACHE)
4899 if (handle == GetCurrentProcess())
4901 __clear_cache( (char *)addr, (char *)addr + size );
4903 else
4905 static int once;
4906 if (!once++) FIXME( "%p %p %ld other process not supported\n", handle, addr, size );
4908 #else
4909 static int once;
4910 if (!once++) FIXME( "%p %p %ld\n", handle, addr, size );
4911 #endif
4912 return STATUS_SUCCESS;
4916 /**********************************************************************
4917 * NtFlushProcessWriteBuffers (NTDLL.@)
4919 void WINAPI NtFlushProcessWriteBuffers(void)
4921 static int once = 0;
4922 if (!once++) FIXME( "stub\n" );
4926 /**********************************************************************
4927 * NtCreatePagingFile (NTDLL.@)
4929 NTSTATUS WINAPI NtCreatePagingFile( UNICODE_STRING *name, LARGE_INTEGER *min_size,
4930 LARGE_INTEGER *max_size, LARGE_INTEGER *actual_size )
4932 FIXME( "(%s %p %p %p) stub\n", debugstr_us(name), min_size, max_size, actual_size );
4933 return STATUS_SUCCESS;
4936 #ifndef _WIN64
4938 /***********************************************************************
4939 * NtWow64AllocateVirtualMemory64 (NTDLL.@)
4940 * ZwWow64AllocateVirtualMemory64 (NTDLL.@)
4942 NTSTATUS WINAPI NtWow64AllocateVirtualMemory64( HANDLE process, ULONG64 *ret, ULONG64 zero_bits,
4943 ULONG64 *size_ptr, ULONG type, ULONG protect )
4945 void *base;
4946 SIZE_T size;
4947 NTSTATUS status;
4949 TRACE("%p %s %s %x %08x\n", process,
4950 wine_dbgstr_longlong(*ret), wine_dbgstr_longlong(*size_ptr), type, protect );
4952 if (!*size_ptr) return STATUS_INVALID_PARAMETER_4;
4953 if (zero_bits > 21 && zero_bits < 32) return STATUS_INVALID_PARAMETER_3;
4955 if (process != NtCurrentProcess())
4957 apc_call_t call;
4958 apc_result_t result;
4960 memset( &call, 0, sizeof(call) );
4962 call.virtual_alloc.type = APC_VIRTUAL_ALLOC;
4963 call.virtual_alloc.addr = *ret;
4964 call.virtual_alloc.size = *size_ptr;
4965 call.virtual_alloc.zero_bits = zero_bits;
4966 call.virtual_alloc.op_type = type;
4967 call.virtual_alloc.prot = protect;
4968 status = server_queue_process_apc( process, &call, &result );
4969 if (status != STATUS_SUCCESS) return status;
4971 if (result.virtual_alloc.status == STATUS_SUCCESS)
4973 *ret = result.virtual_alloc.addr;
4974 *size_ptr = result.virtual_alloc.size;
4976 return result.virtual_alloc.status;
4979 base = (void *)(ULONG_PTR)*ret;
4980 size = *size_ptr;
4981 if ((ULONG_PTR)base != *ret) return STATUS_CONFLICTING_ADDRESSES;
4982 if (size != *size_ptr) return STATUS_WORKING_SET_LIMIT_RANGE;
4984 status = NtAllocateVirtualMemory( process, &base, zero_bits, &size, type, protect );
4985 if (!status)
4987 *ret = (ULONG_PTR)base;
4988 *size_ptr = size;
4990 return status;
4994 /***********************************************************************
4995 * NtWow64ReadVirtualMemory64 (NTDLL.@)
4996 * ZwWow64ReadVirtualMemory64 (NTDLL.@)
4998 NTSTATUS WINAPI NtWow64ReadVirtualMemory64( HANDLE process, ULONG64 addr, void *buffer,
4999 ULONG64 size, ULONG64 *bytes_read )
5001 NTSTATUS status;
5003 if (size > MAXLONG) size = MAXLONG;
5005 if (virtual_check_buffer_for_write( buffer, size ))
5007 SERVER_START_REQ( read_process_memory )
5009 req->handle = wine_server_obj_handle( process );
5010 req->addr = addr;
5011 wine_server_set_reply( req, buffer, size );
5012 if ((status = wine_server_call( req ))) size = 0;
5014 SERVER_END_REQ;
5016 else
5018 status = STATUS_ACCESS_VIOLATION;
5019 size = 0;
5021 if (bytes_read) *bytes_read = size;
5022 return status;
5026 /***********************************************************************
5027 * NtWow64WriteVirtualMemory64 (NTDLL.@)
5028 * ZwWow64WriteVirtualMemory64 (NTDLL.@)
5030 NTSTATUS WINAPI NtWow64WriteVirtualMemory64( HANDLE process, ULONG64 addr, const void *buffer,
5031 ULONG64 size, ULONG64 *bytes_written )
5033 NTSTATUS status;
5035 if (size > MAXLONG) size = MAXLONG;
5037 if (virtual_check_buffer_for_read( buffer, size ))
5039 SERVER_START_REQ( write_process_memory )
5041 req->handle = wine_server_obj_handle( process );
5042 req->addr = addr;
5043 wine_server_add_data( req, buffer, size );
5044 if ((status = wine_server_call( req ))) size = 0;
5046 SERVER_END_REQ;
5048 else
5050 status = STATUS_PARTIAL_COPY;
5051 size = 0;
5053 if (bytes_written) *bytes_written = size;
5054 return status;
5058 /***********************************************************************
5059 * NtWow64GetNativeSystemInformation (NTDLL.@)
5060 * ZwWow64GetNativeSystemInformation (NTDLL.@)
5062 NTSTATUS WINAPI NtWow64GetNativeSystemInformation( SYSTEM_INFORMATION_CLASS class, void *info,
5063 ULONG len, ULONG *retlen )
5065 switch (class)
5067 case SystemBasicInformation:
5068 case SystemCpuInformation:
5069 case SystemEmulationBasicInformation:
5070 case SystemEmulationProcessorInformation:
5071 return NtQuerySystemInformation( class, info, len, retlen );
5072 case SystemNativeBasicInformation:
5073 return NtQuerySystemInformation( SystemBasicInformation, info, len, retlen );
5074 default:
5075 if (is_wow64) return STATUS_INVALID_INFO_CLASS;
5076 return NtQuerySystemInformation( class, info, len, retlen );
5080 #endif /* _WIN64 */