2 * Win32 virtual memory functions
4 * Copyright 1997, 2002, 2020 Alexandre Julliard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2.1 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA
35 #include <sys/types.h>
36 #include <sys/socket.h>
39 #ifdef HAVE_SYS_SYSINFO_H
40 # include <sys/sysinfo.h>
42 #ifdef HAVE_SYS_SYSCTL_H
43 # include <sys/sysctl.h>
45 #ifdef HAVE_SYS_PARAM_H
46 # include <sys/param.h>
48 #ifdef HAVE_SYS_QUEUE_H
49 # include <sys/queue.h>
51 #ifdef HAVE_SYS_USER_H
52 # include <sys/user.h>
54 #ifdef HAVE_LIBPROCSTAT_H
55 # include <libprocstat.h>
59 #ifdef HAVE_VALGRIND_VALGRIND_H
60 # include <valgrind/valgrind.h>
62 #if defined(__APPLE__)
63 # include <mach/mach_init.h>
64 # include <mach/mach_vm.h>
68 #define WIN32_NO_STATUS
73 #include "wine/list.h"
74 #include "wine/rbtree.h"
75 #include "unix_private.h"
76 #include "wine/debug.h"
78 WINE_DEFAULT_DEBUG_CHANNEL(virtual);
79 WINE_DECLARE_DEBUG_CHANNEL(module
);
80 WINE_DECLARE_DEBUG_CHANNEL(virtual_ranges
);
95 static struct list reserved_areas
= LIST_INIT(reserved_areas
);
100 unsigned int refcount
;
107 static struct list builtin_modules
= LIST_INIT( builtin_modules
);
111 struct wine_rb_entry entry
; /* entry in global view tree */
112 void *base
; /* base address */
113 size_t size
; /* size in bytes */
114 unsigned int protect
; /* protection for all pages at allocation time and SEC_* flags */
117 /* per-page protection flags */
118 #define VPROT_READ 0x01
119 #define VPROT_WRITE 0x02
120 #define VPROT_EXEC 0x04
121 #define VPROT_WRITECOPY 0x08
122 #define VPROT_GUARD 0x10
123 #define VPROT_COMMITTED 0x20
124 #define VPROT_WRITEWATCH 0x40
125 /* per-mapping protection flags */
126 #define VPROT_SYSTEM 0x0200 /* system view (underlying mmap not under our control) */
128 /* Conversion from VPROT_* to Win32 flags */
129 static const BYTE VIRTUAL_Win32Flags
[16] =
131 PAGE_NOACCESS
, /* 0 */
132 PAGE_READONLY
, /* READ */
133 PAGE_READWRITE
, /* WRITE */
134 PAGE_READWRITE
, /* READ | WRITE */
135 PAGE_EXECUTE
, /* EXEC */
136 PAGE_EXECUTE_READ
, /* READ | EXEC */
137 PAGE_EXECUTE_READWRITE
, /* WRITE | EXEC */
138 PAGE_EXECUTE_READWRITE
, /* READ | WRITE | EXEC */
139 PAGE_WRITECOPY
, /* WRITECOPY */
140 PAGE_WRITECOPY
, /* READ | WRITECOPY */
141 PAGE_WRITECOPY
, /* WRITE | WRITECOPY */
142 PAGE_WRITECOPY
, /* READ | WRITE | WRITECOPY */
143 PAGE_EXECUTE_WRITECOPY
, /* EXEC | WRITECOPY */
144 PAGE_EXECUTE_WRITECOPY
, /* READ | EXEC | WRITECOPY */
145 PAGE_EXECUTE_WRITECOPY
, /* WRITE | EXEC | WRITECOPY */
146 PAGE_EXECUTE_WRITECOPY
/* READ | WRITE | EXEC | WRITECOPY */
149 static struct wine_rb_tree views_tree
;
150 static pthread_mutex_t virtual_mutex
;
152 static const UINT page_shift
= 12;
153 static const UINT_PTR page_mask
= 0xfff;
154 static const UINT_PTR granularity_mask
= 0xffff;
156 /* Note: these are Windows limits, you cannot change them. */
158 static void *address_space_start
= (void *)0x110000; /* keep DOS area clear */
160 static void *address_space_start
= (void *)0x10000;
164 static void *address_space_limit
= (void *)0xffffffff0000; /* top of the total available address space */
165 #elif defined(_WIN64)
166 static void *address_space_limit
= (void *)0x7fffffff0000;
168 static void *address_space_limit
= (void *)0xc0000000;
172 static void *user_space_limit
= (void *)0x7fffffff0000; /* top of the user address space */
173 static void *working_set_limit
= (void *)0x7fffffff0000; /* top of the current working set */
175 static void *user_space_limit
= (void *)0x7fff0000;
176 static void *working_set_limit
= (void *)0x7fff0000;
179 static UINT64
*arm64ec_map
;
181 struct _KUSER_SHARED_DATA
*user_shared_data
= (void *)0x7ffe0000;
183 /* TEB allocation blocks */
184 static void *teb_block
;
185 static void **next_free_teb
;
186 static int teb_block_pos
;
187 static struct list teb_list
= LIST_INIT( teb_list
);
189 #define ROUND_ADDR(addr,mask) ((void *)((UINT_PTR)(addr) & ~(UINT_PTR)(mask)))
190 #define ROUND_SIZE(addr,size) (((SIZE_T)(size) + ((UINT_PTR)(addr) & page_mask) + page_mask) & ~page_mask)
192 #define VIRTUAL_DEBUG_DUMP_VIEW(view) do { if (TRACE_ON(virtual)) dump_view(view); } while (0)
193 #define VIRTUAL_DEBUG_DUMP_RANGES() do { if (TRACE_ON(virtual_ranges)) dump_free_ranges(); } while (0)
195 #ifndef MAP_NORESERVE
196 #define MAP_NORESERVE 0
199 #ifdef _WIN64 /* on 64-bit the page protection bytes use a 2-level table */
200 static const size_t pages_vprot_shift
= 20;
201 static const size_t pages_vprot_mask
= (1 << 20) - 1;
202 static size_t pages_vprot_size
;
203 static BYTE
**pages_vprot
;
204 #else /* on 32-bit we use a simple array with one byte per page */
205 static BYTE
*pages_vprot
;
208 static struct file_view
*view_block_start
, *view_block_end
, *next_free_view
;
209 static const size_t view_block_size
= 0x100000;
210 static void *preload_reserve_start
;
211 static void *preload_reserve_end
;
212 static BOOL force_exec_prot
; /* whether to force PROT_EXEC on all PROT_READ mmaps */
220 static struct range_entry
*free_ranges
;
221 static struct range_entry
*free_ranges_end
;
224 static inline BOOL
is_beyond_limit( const void *addr
, size_t size
, const void *limit
)
226 return (addr
>= limit
|| (const char *)addr
+ size
> (const char *)limit
);
229 /* mmap() anonymous memory at a fixed address */
230 void *anon_mmap_fixed( void *start
, size_t size
, int prot
, int flags
)
232 return mmap( start
, size
, prot
, MAP_PRIVATE
| MAP_ANON
| MAP_FIXED
| flags
, -1, 0 );
235 /* allocate anonymous mmap() memory at any address */
236 void *anon_mmap_alloc( size_t size
, int prot
)
238 return mmap( NULL
, size
, prot
, MAP_PRIVATE
| MAP_ANON
, -1, 0 );
242 static void mmap_add_reserved_area( void *addr
, SIZE_T size
)
244 struct reserved_area
*area
;
247 if (!((intptr_t)addr
+ size
)) size
--; /* avoid wrap-around */
249 LIST_FOR_EACH( ptr
, &reserved_areas
)
251 area
= LIST_ENTRY( ptr
, struct reserved_area
, entry
);
252 if (area
->base
> addr
)
254 /* try to merge with the next one */
255 if ((char *)addr
+ size
== (char *)area
->base
)
263 else if ((char *)area
->base
+ area
->size
== (char *)addr
)
265 /* merge with the previous one */
268 /* try to merge with the next one too */
269 if ((ptr
= list_next( &reserved_areas
, ptr
)))
271 struct reserved_area
*next
= LIST_ENTRY( ptr
, struct reserved_area
, entry
);
272 if ((char *)addr
+ size
== (char *)next
->base
)
274 area
->size
+= next
->size
;
275 list_remove( &next
->entry
);
283 if ((area
= malloc( sizeof(*area
) )))
287 list_add_before( ptr
, &area
->entry
);
291 static void mmap_remove_reserved_area( void *addr
, SIZE_T size
)
293 struct reserved_area
*area
;
296 if (!((intptr_t)addr
+ size
)) size
--; /* avoid wrap-around */
298 ptr
= list_head( &reserved_areas
);
299 /* find the first area covering address */
302 area
= LIST_ENTRY( ptr
, struct reserved_area
, entry
);
303 if ((char *)area
->base
>= (char *)addr
+ size
) break; /* outside the range */
304 if ((char *)area
->base
+ area
->size
> (char *)addr
) /* overlaps range */
306 if (area
->base
>= addr
)
308 if ((char *)area
->base
+ area
->size
> (char *)addr
+ size
)
310 /* range overlaps beginning of area only -> shrink area */
311 area
->size
-= (char *)addr
+ size
- (char *)area
->base
;
312 area
->base
= (char *)addr
+ size
;
317 /* range contains the whole area -> remove area completely */
318 ptr
= list_next( &reserved_areas
, ptr
);
319 list_remove( &area
->entry
);
326 if ((char *)area
->base
+ area
->size
> (char *)addr
+ size
)
328 /* range is in the middle of area -> split area in two */
329 struct reserved_area
*new_area
= malloc( sizeof(*new_area
) );
332 new_area
->base
= (char *)addr
+ size
;
333 new_area
->size
= (char *)area
->base
+ area
->size
- (char *)new_area
->base
;
334 list_add_after( ptr
, &new_area
->entry
);
336 else size
= (char *)area
->base
+ area
->size
- (char *)addr
;
337 area
->size
= (char *)addr
- (char *)area
->base
;
342 /* range overlaps end of area only -> shrink area */
343 area
->size
= (char *)addr
- (char *)area
->base
;
347 ptr
= list_next( &reserved_areas
, ptr
);
351 static int mmap_is_in_reserved_area( void *addr
, SIZE_T size
)
353 struct reserved_area
*area
;
356 LIST_FOR_EACH( ptr
, &reserved_areas
)
358 area
= LIST_ENTRY( ptr
, struct reserved_area
, entry
);
359 if (area
->base
> addr
) break;
360 if ((char *)area
->base
+ area
->size
<= (char *)addr
) continue;
361 /* area must contain block completely */
362 if ((char *)area
->base
+ area
->size
< (char *)addr
+ size
) return -1;
368 static int mmap_enum_reserved_areas( int (*enum_func
)(void *base
, SIZE_T size
, void *arg
),
369 void *arg
, int top_down
)
376 for (ptr
= reserved_areas
.prev
; ptr
!= &reserved_areas
; ptr
= ptr
->prev
)
378 struct reserved_area
*area
= LIST_ENTRY( ptr
, struct reserved_area
, entry
);
379 if ((ret
= enum_func( area
->base
, area
->size
, arg
))) break;
384 for (ptr
= reserved_areas
.next
; ptr
!= &reserved_areas
; ptr
= ptr
->next
)
386 struct reserved_area
*area
= LIST_ENTRY( ptr
, struct reserved_area
, entry
);
387 if ((ret
= enum_func( area
->base
, area
->size
, arg
))) break;
393 static void *anon_mmap_tryfixed( void *start
, size_t size
, int prot
, int flags
)
397 #ifdef MAP_FIXED_NOREPLACE
398 ptr
= mmap( start
, size
, prot
, MAP_FIXED_NOREPLACE
| MAP_PRIVATE
| MAP_ANON
| flags
, -1, 0 );
399 #elif defined(MAP_TRYFIXED)
400 ptr
= mmap( start
, size
, prot
, MAP_TRYFIXED
| MAP_PRIVATE
| MAP_ANON
| flags
, -1, 0 );
401 #elif defined(__FreeBSD__) || defined(__FreeBSD_kernel__)
402 ptr
= mmap( start
, size
, prot
, MAP_FIXED
| MAP_EXCL
| MAP_PRIVATE
| MAP_ANON
| flags
, -1, 0 );
403 if (ptr
== MAP_FAILED
&& errno
== EINVAL
) errno
= EEXIST
;
404 #elif defined(__APPLE__)
405 mach_vm_address_t result
= (mach_vm_address_t
)start
;
406 kern_return_t ret
= mach_vm_map( mach_task_self(), &result
, size
, 0, VM_FLAGS_FIXED
,
407 MEMORY_OBJECT_NULL
, 0, 0, prot
, VM_PROT_ALL
, VM_INHERIT_COPY
);
411 if ((ptr
= anon_mmap_fixed( start
, size
, prot
, flags
)) == MAP_FAILED
)
412 mach_vm_deallocate( mach_task_self(), result
, size
);
416 errno
= (ret
== KERN_NO_SPACE
? EEXIST
: ENOMEM
);
420 ptr
= mmap( start
, size
, prot
, MAP_PRIVATE
| MAP_ANON
| flags
, -1, 0 );
422 if (ptr
!= MAP_FAILED
&& ptr
!= start
)
424 if (is_beyond_limit( ptr
, size
, user_space_limit
))
426 anon_mmap_fixed( ptr
, size
, PROT_NONE
, MAP_NORESERVE
);
427 mmap_add_reserved_area( ptr
, size
);
429 else munmap( ptr
, size
);
436 static void reserve_area( void *addr
, void *end
)
441 static const mach_vm_address_t max_address
= VM_MAX_ADDRESS
;
443 static const mach_vm_address_t max_address
= MACH_VM_MAX_ADDRESS
;
445 mach_vm_address_t address
= (mach_vm_address_t
)addr
;
446 mach_vm_address_t end_address
= (mach_vm_address_t
)end
;
448 if (!end_address
|| max_address
< end_address
)
449 end_address
= max_address
;
451 while (address
< end_address
)
453 mach_vm_address_t hole_address
= address
;
456 vm_region_basic_info_data_64_t info
;
457 mach_msg_type_number_t count
= VM_REGION_BASIC_INFO_COUNT_64
;
458 mach_port_t dummy_object_name
= MACH_PORT_NULL
;
460 /* find the mapped region at or above the current address. */
461 ret
= mach_vm_region(mach_task_self(), &address
, &size
, VM_REGION_BASIC_INFO_64
,
462 (vm_region_info_t
)&info
, &count
, &dummy_object_name
);
463 if (ret
!= KERN_SUCCESS
)
465 address
= max_address
;
469 if (end_address
< address
)
470 address
= end_address
;
471 if (hole_address
< address
)
473 /* found a hole, attempt to reserve it. */
474 size_t hole_size
= address
- hole_address
;
475 mach_vm_address_t alloc_address
= hole_address
;
477 ret
= mach_vm_map( mach_task_self(), &alloc_address
, hole_size
, 0, VM_FLAGS_FIXED
,
478 MEMORY_OBJECT_NULL
, 0, 0, PROT_NONE
, VM_PROT_ALL
, VM_INHERIT_COPY
);
479 if (!ret
) mmap_add_reserved_area( (void*)hole_address
, hole_size
);
480 else if (ret
== KERN_NO_SPACE
)
482 /* something filled (part of) the hole before we could.
483 go back and look again. */
484 address
= hole_address
;
492 size_t size
= (char *)end
- (char *)addr
;
496 if ((ptr
= anon_mmap_tryfixed( addr
, size
, PROT_NONE
, MAP_NORESERVE
)) != MAP_FAILED
)
498 mmap_add_reserved_area( addr
, size
);
501 size
= (size
/ 2) & ~granularity_mask
;
504 reserve_area( addr
, (char *)addr
+ size
);
505 reserve_area( (char *)addr
+ size
, end
);
507 #endif /* __APPLE__ */
511 static void mmap_init( const struct preload_info
*preload_info
)
516 char * const stack_ptr
= &stack
;
518 char *user_space_limit
= (char *)0x7ffe0000;
523 /* check for a reserved area starting at the user space limit */
524 /* to avoid wasting time trying to allocate it again */
525 for (i
= 0; preload_info
[i
].size
; i
++)
527 if ((char *)preload_info
[i
].addr
> user_space_limit
) break;
528 if ((char *)preload_info
[i
].addr
+ preload_info
[i
].size
> user_space_limit
)
530 user_space_limit
= (char *)preload_info
[i
].addr
+ preload_info
[i
].size
;
535 else reserve_area( (void *)0x00010000, (void *)0x40000000 );
539 if (stack_ptr
>= user_space_limit
)
542 char *base
= stack_ptr
- ((unsigned int)stack_ptr
& granularity_mask
) - (granularity_mask
+ 1);
543 if (base
> user_space_limit
) reserve_area( user_space_limit
, base
);
544 base
= stack_ptr
- ((unsigned int)stack_ptr
& granularity_mask
) + (granularity_mask
+ 1);
545 #if defined(linux) || defined(__FreeBSD__) || defined (__FreeBSD_kernel__) || defined(__DragonFly__)
546 /* Heuristic: assume the stack is near the end of the address */
547 /* space, this avoids a lot of futile allocation attempts */
548 end
= (char *)(((unsigned long)base
+ 0x0fffffff) & 0xf0000000);
550 reserve_area( base
, end
);
554 reserve_area( user_space_limit
, 0 );
558 if (preload_info
) return;
559 /* if we don't have a preloader, try to reserve the space now */
560 reserve_area( (void *)0x000000010000, (void *)0x000068000000 );
561 reserve_area( (void *)0x00007ff00000, (void *)0x00007fff0000 );
562 reserve_area( (void *)0x7ffffe000000, (void *)0x7fffffff0000 );
568 /***********************************************************************
569 * get_wow_user_space_limit
571 static void *get_wow_user_space_limit(void)
574 if (main_image_info
.ImageCharacteristics
& IMAGE_FILE_LARGE_ADDRESS_AWARE
) return (void *)0xc0000000;
575 return (void *)0x7fff0000;
577 return user_space_limit
;
581 /***********************************************************************
584 static void add_builtin_module( void *module
, void *handle
)
586 struct builtin_module
*builtin
;
588 if (!(builtin
= malloc( sizeof(*builtin
) ))) return;
589 builtin
->handle
= handle
;
590 builtin
->module
= module
;
591 builtin
->refcount
= 1;
592 builtin
->unix_path
= NULL
;
593 builtin
->unix_handle
= NULL
;
594 list_add_tail( &builtin_modules
, &builtin
->entry
);
598 /***********************************************************************
599 * release_builtin_module
601 static void release_builtin_module( void *module
)
603 struct builtin_module
*builtin
;
605 LIST_FOR_EACH_ENTRY( builtin
, &builtin_modules
, struct builtin_module
, entry
)
607 if (builtin
->module
!= module
) continue;
608 if (!--builtin
->refcount
)
610 list_remove( &builtin
->entry
);
611 if (builtin
->handle
) dlclose( builtin
->handle
);
612 if (builtin
->unix_handle
) dlclose( builtin
->unix_handle
);
613 free( builtin
->unix_path
);
621 /***********************************************************************
622 * get_builtin_so_handle
624 void *get_builtin_so_handle( void *module
)
628 struct builtin_module
*builtin
;
630 server_enter_uninterrupted_section( &virtual_mutex
, &sigset
);
631 LIST_FOR_EACH_ENTRY( builtin
, &builtin_modules
, struct builtin_module
, entry
)
633 if (builtin
->module
!= module
) continue;
634 ret
= builtin
->handle
;
635 if (ret
) builtin
->refcount
++;
638 server_leave_uninterrupted_section( &virtual_mutex
, &sigset
);
643 /***********************************************************************
644 * get_builtin_unix_funcs
646 static NTSTATUS
get_builtin_unix_funcs( void *module
, BOOL wow
, const void **funcs
)
648 const char *ptr_name
= wow
? "__wine_unix_call_wow64_funcs" : "__wine_unix_call_funcs";
650 NTSTATUS status
= STATUS_DLL_NOT_FOUND
;
651 struct builtin_module
*builtin
;
653 server_enter_uninterrupted_section( &virtual_mutex
, &sigset
);
654 LIST_FOR_EACH_ENTRY( builtin
, &builtin_modules
, struct builtin_module
, entry
)
656 if (builtin
->module
!= module
) continue;
657 if (builtin
->unix_path
&& !builtin
->unix_handle
)
658 builtin
->unix_handle
= dlopen( builtin
->unix_path
, RTLD_NOW
);
659 if (builtin
->unix_handle
)
661 *funcs
= dlsym( builtin
->unix_handle
, ptr_name
);
662 status
= *funcs
? STATUS_SUCCESS
: STATUS_ENTRYPOINT_NOT_FOUND
;
666 server_leave_uninterrupted_section( &virtual_mutex
, &sigset
);
671 /***********************************************************************
672 * load_builtin_unixlib
674 NTSTATUS
load_builtin_unixlib( void *module
, const char *name
)
677 NTSTATUS status
= STATUS_SUCCESS
;
678 struct builtin_module
*builtin
;
680 server_enter_uninterrupted_section( &virtual_mutex
, &sigset
);
681 LIST_FOR_EACH_ENTRY( builtin
, &builtin_modules
, struct builtin_module
, entry
)
683 if (builtin
->module
!= module
) continue;
684 if (!builtin
->unix_path
) builtin
->unix_path
= strdup( name
);
685 else status
= STATUS_IMAGE_ALREADY_LOADED
;
688 server_leave_uninterrupted_section( &virtual_mutex
, &sigset
);
693 /***********************************************************************
694 * free_ranges_lower_bound
696 * Returns the first range whose end is not less than addr, or end if there's none.
698 static struct range_entry
*free_ranges_lower_bound( void *addr
)
700 struct range_entry
*begin
= free_ranges
;
701 struct range_entry
*end
= free_ranges_end
;
702 struct range_entry
*mid
;
706 mid
= begin
+ (end
- begin
) / 2;
716 static void dump_free_ranges(void)
718 struct range_entry
*r
;
719 for (r
= free_ranges
; r
!= free_ranges_end
; ++r
)
720 TRACE_(virtual_ranges
)("%p - %p.\n", r
->base
, r
->end
);
723 /***********************************************************************
724 * free_ranges_insert_view
726 * Updates the free_ranges after a new view has been created.
728 static void free_ranges_insert_view( struct file_view
*view
)
730 void *view_base
= ROUND_ADDR( view
->base
, granularity_mask
);
731 void *view_end
= ROUND_ADDR( (char *)view
->base
+ view
->size
+ granularity_mask
, granularity_mask
);
732 struct range_entry
*range
= free_ranges_lower_bound( view_base
);
733 struct range_entry
*next
= range
+ 1;
735 /* free_ranges initial value is such that the view is either inside range or before another one. */
736 assert( range
!= free_ranges_end
);
737 assert( range
->end
> view_base
|| next
!= free_ranges_end
);
739 /* Free ranges addresses are aligned at granularity_mask while the views may be not. */
741 if (range
->base
> view_base
)
742 view_base
= range
->base
;
743 if (range
->end
< view_end
)
744 view_end
= range
->end
;
745 if (range
->end
== view_base
&& next
->base
>= view_end
)
746 view_end
= view_base
;
748 TRACE_(virtual_ranges
)( "%p - %p, aligned %p - %p.\n",
749 view
->base
, (char *)view
->base
+ view
->size
, view_base
, view_end
);
751 if (view_end
<= view_base
)
753 VIRTUAL_DEBUG_DUMP_RANGES();
757 /* this should never happen */
758 if (range
->base
> view_base
|| range
->end
< view_end
)
759 ERR( "range %p - %p is already partially mapped\n", view_base
, view_end
);
760 assert( range
->base
<= view_base
&& range
->end
>= view_end
);
762 /* need to split the range in two */
763 if (range
->base
< view_base
&& range
->end
> view_end
)
765 memmove( next
+ 1, next
, (free_ranges_end
- next
) * sizeof(struct range_entry
) );
766 free_ranges_end
+= 1;
767 if ((char *)free_ranges_end
- (char *)free_ranges
> view_block_size
)
768 ERR( "Free range sequence is full, trouble ahead!\n" );
769 assert( (char *)free_ranges_end
- (char *)free_ranges
<= view_block_size
);
771 next
->base
= view_end
;
772 next
->end
= range
->end
;
773 range
->end
= view_base
;
777 /* otherwise we just have to shrink it */
778 if (range
->base
< view_base
)
779 range
->end
= view_base
;
781 range
->base
= view_end
;
783 if (range
->base
< range
->end
)
785 VIRTUAL_DEBUG_DUMP_RANGES();
788 /* and possibly remove it if it's now empty */
789 memmove( range
, next
, (free_ranges_end
- next
) * sizeof(struct range_entry
) );
790 free_ranges_end
-= 1;
791 assert( free_ranges_end
- free_ranges
> 0 );
793 VIRTUAL_DEBUG_DUMP_RANGES();
796 /***********************************************************************
797 * free_ranges_remove_view
799 * Updates the free_ranges after a view has been destroyed.
801 static void free_ranges_remove_view( struct file_view
*view
)
803 void *view_base
= ROUND_ADDR( view
->base
, granularity_mask
);
804 void *view_end
= ROUND_ADDR( (char *)view
->base
+ view
->size
+ granularity_mask
, granularity_mask
);
805 struct range_entry
*range
= free_ranges_lower_bound( view_base
);
806 struct range_entry
*next
= range
+ 1;
808 /* Free ranges addresses are aligned at granularity_mask while the views may be not. */
809 struct file_view
*prev_view
= RB_ENTRY_VALUE( rb_prev( &view
->entry
), struct file_view
, entry
);
810 struct file_view
*next_view
= RB_ENTRY_VALUE( rb_next( &view
->entry
), struct file_view
, entry
);
811 void *prev_view_base
= prev_view
? ROUND_ADDR( prev_view
->base
, granularity_mask
) : NULL
;
812 void *prev_view_end
= prev_view
? ROUND_ADDR( (char *)prev_view
->base
+ prev_view
->size
+ granularity_mask
, granularity_mask
) : NULL
;
813 void *next_view_base
= next_view
? ROUND_ADDR( next_view
->base
, granularity_mask
) : NULL
;
814 void *next_view_end
= next_view
? ROUND_ADDR( (char *)next_view
->base
+ next_view
->size
+ granularity_mask
, granularity_mask
) : NULL
;
816 if (prev_view_end
&& prev_view_end
> view_base
&& prev_view_base
< view_end
)
817 view_base
= prev_view_end
;
818 if (next_view_base
&& next_view_base
< view_end
&& next_view_end
> view_base
)
819 view_end
= next_view_base
;
821 TRACE_(virtual_ranges
)( "%p - %p, aligned %p - %p.\n",
822 view
->base
, (char *)view
->base
+ view
->size
, view_base
, view_end
);
824 if (view_end
<= view_base
)
826 VIRTUAL_DEBUG_DUMP_RANGES();
829 /* free_ranges initial value is such that the view is either inside range or before another one. */
830 assert( range
!= free_ranges_end
);
831 assert( range
->end
> view_base
|| next
!= free_ranges_end
);
833 /* this should never happen, but we can safely ignore it */
834 if (range
->base
<= view_base
&& range
->end
>= view_end
)
836 WARN( "range %p - %p is already unmapped\n", view_base
, view_end
);
840 /* this should never happen */
841 if (range
->base
< view_end
&& range
->end
> view_base
)
842 ERR( "range %p - %p is already partially unmapped\n", view_base
, view_end
);
843 assert( range
->end
<= view_base
|| range
->base
>= view_end
);
845 /* merge with next if possible */
846 if (range
->end
== view_base
&& next
->base
== view_end
)
848 range
->end
= next
->end
;
849 memmove( next
, next
+ 1, (free_ranges_end
- next
- 1) * sizeof(struct range_entry
) );
850 free_ranges_end
-= 1;
851 assert( free_ranges_end
- free_ranges
> 0 );
853 /* or try growing the range */
854 else if (range
->end
== view_base
)
855 range
->end
= view_end
;
856 else if (range
->base
== view_end
)
857 range
->base
= view_base
;
858 /* otherwise create a new one */
861 memmove( range
+ 1, range
, (free_ranges_end
- range
) * sizeof(struct range_entry
) );
862 free_ranges_end
+= 1;
863 if ((char *)free_ranges_end
- (char *)free_ranges
> view_block_size
)
864 ERR( "Free range sequence is full, trouble ahead!\n" );
865 assert( (char *)free_ranges_end
- (char *)free_ranges
<= view_block_size
);
867 range
->base
= view_base
;
868 range
->end
= view_end
;
870 VIRTUAL_DEBUG_DUMP_RANGES();
874 static inline int is_view_valloc( const struct file_view
*view
)
876 return !(view
->protect
& (SEC_FILE
| SEC_RESERVE
| SEC_COMMIT
));
879 /***********************************************************************
882 * Return the page protection byte.
884 static BYTE
get_page_vprot( const void *addr
)
886 size_t idx
= (size_t)addr
>> page_shift
;
889 if ((idx
>> pages_vprot_shift
) >= pages_vprot_size
) return 0;
890 if (!pages_vprot
[idx
>> pages_vprot_shift
]) return 0;
891 return pages_vprot
[idx
>> pages_vprot_shift
][idx
& pages_vprot_mask
];
893 return pages_vprot
[idx
];
898 /***********************************************************************
899 * get_vprot_range_size
901 * Return the size of the region with equal masked vprot byte.
902 * Also return the protections for the first page.
903 * The function assumes that base and size are page aligned,
904 * base + size does not wrap around and the range is within view so
905 * vprot bytes are allocated for the range. */
906 static SIZE_T
get_vprot_range_size( char *base
, SIZE_T size
, BYTE mask
, BYTE
*vprot
)
908 static const UINT_PTR word_from_byte
= (UINT_PTR
)0x101010101010101;
909 static const UINT_PTR index_align_mask
= sizeof(UINT_PTR
) - 1;
910 SIZE_T curr_idx
, start_idx
, end_idx
, aligned_start_idx
;
911 UINT_PTR vprot_word
, mask_word
;
912 const BYTE
*vprot_ptr
;
914 TRACE("base %p, size %p, mask %#x.\n", base
, (void *)size
, mask
);
916 curr_idx
= start_idx
= (size_t)base
>> page_shift
;
917 end_idx
= start_idx
+ (size
>> page_shift
);
919 aligned_start_idx
= (start_idx
+ index_align_mask
) & ~index_align_mask
;
920 if (aligned_start_idx
> end_idx
) aligned_start_idx
= end_idx
;
923 vprot_ptr
= pages_vprot
[curr_idx
>> pages_vprot_shift
] + (curr_idx
& pages_vprot_mask
);
925 vprot_ptr
= pages_vprot
+ curr_idx
;
929 /* Page count page table is at least the multiples of sizeof(UINT_PTR)
930 * so we don't have to worry about crossing the boundary on unaligned idx values. */
932 for (; curr_idx
< aligned_start_idx
; ++curr_idx
, ++vprot_ptr
)
933 if ((*vprot
^ *vprot_ptr
) & mask
) return (curr_idx
- start_idx
) << page_shift
;
935 vprot_word
= word_from_byte
* *vprot
;
936 mask_word
= word_from_byte
* mask
;
937 for (; curr_idx
< end_idx
; curr_idx
+= sizeof(UINT_PTR
), vprot_ptr
+= sizeof(UINT_PTR
))
940 if (!(curr_idx
& pages_vprot_mask
)) vprot_ptr
= pages_vprot
[curr_idx
>> pages_vprot_shift
];
942 if ((vprot_word
^ *(UINT_PTR
*)vprot_ptr
) & mask_word
)
944 for (; curr_idx
< end_idx
; ++curr_idx
, ++vprot_ptr
)
945 if ((*vprot
^ *vprot_ptr
) & mask
) break;
946 return (curr_idx
- start_idx
) << page_shift
;
952 /***********************************************************************
955 * Set a range of page protection bytes.
957 static void set_page_vprot( const void *addr
, size_t size
, BYTE vprot
)
959 size_t idx
= (size_t)addr
>> page_shift
;
960 size_t end
= ((size_t)addr
+ size
+ page_mask
) >> page_shift
;
963 while (idx
>> pages_vprot_shift
!= end
>> pages_vprot_shift
)
965 size_t dir_size
= pages_vprot_mask
+ 1 - (idx
& pages_vprot_mask
);
966 memset( pages_vprot
[idx
>> pages_vprot_shift
] + (idx
& pages_vprot_mask
), vprot
, dir_size
);
969 memset( pages_vprot
[idx
>> pages_vprot_shift
] + (idx
& pages_vprot_mask
), vprot
, end
- idx
);
971 memset( pages_vprot
+ idx
, vprot
, end
- idx
);
976 /***********************************************************************
977 * set_page_vprot_bits
979 * Set or clear bits in a range of page protection bytes.
981 static void set_page_vprot_bits( const void *addr
, size_t size
, BYTE set
, BYTE clear
)
983 size_t idx
= (size_t)addr
>> page_shift
;
984 size_t end
= ((size_t)addr
+ size
+ page_mask
) >> page_shift
;
987 for ( ; idx
< end
; idx
++)
989 BYTE
*ptr
= pages_vprot
[idx
>> pages_vprot_shift
] + (idx
& pages_vprot_mask
);
990 *ptr
= (*ptr
& ~clear
) | set
;
993 for ( ; idx
< end
; idx
++) pages_vprot
[idx
] = (pages_vprot
[idx
] & ~clear
) | set
;
998 /***********************************************************************
1001 * Allocate the page protection bytes for a given range.
1003 static BOOL
alloc_pages_vprot( const void *addr
, size_t size
)
1006 size_t idx
= (size_t)addr
>> page_shift
;
1007 size_t end
= ((size_t)addr
+ size
+ page_mask
) >> page_shift
;
1011 assert( end
<= pages_vprot_size
<< pages_vprot_shift
);
1012 for (i
= idx
>> pages_vprot_shift
; i
< (end
+ pages_vprot_mask
) >> pages_vprot_shift
; i
++)
1014 if (pages_vprot
[i
]) continue;
1015 if ((ptr
= anon_mmap_alloc( pages_vprot_mask
+ 1, PROT_READ
| PROT_WRITE
)) == MAP_FAILED
)
1017 pages_vprot
[i
] = ptr
;
1024 static inline UINT64
maskbits( size_t idx
)
1026 return ~(UINT64
)0 << (idx
& 63);
1029 /***********************************************************************
1032 static void set_arm64ec_range( const void *addr
, size_t size
)
1034 size_t idx
= (size_t)addr
>> page_shift
;
1035 size_t end
= ((size_t)addr
+ size
+ page_mask
) >> page_shift
;
1036 size_t pos
= idx
/ 64;
1037 size_t end_pos
= end
/ 64;
1041 arm64ec_map
[pos
++] |= maskbits( idx
);
1042 while (pos
< end_pos
) arm64ec_map
[pos
++] = ~(UINT64
)0;
1043 if (end
& 63) arm64ec_map
[pos
] |= ~maskbits( end
);
1045 else arm64ec_map
[pos
] |= maskbits( idx
) & ~maskbits( end
);
1049 /***********************************************************************
1050 * clear_arm64ec_range
1052 static void clear_arm64ec_range( const void *addr
, size_t size
)
1054 size_t idx
= (size_t)addr
>> page_shift
;
1055 size_t end
= ((size_t)addr
+ size
+ page_mask
) >> page_shift
;
1056 size_t pos
= idx
/ 64;
1057 size_t end_pos
= end
/ 64;
1061 arm64ec_map
[pos
++] &= ~maskbits( idx
);
1062 while (pos
< end_pos
) arm64ec_map
[pos
++] = 0;
1063 if (end
& 63) arm64ec_map
[pos
] &= maskbits( end
);
1065 else arm64ec_map
[pos
] &= ~maskbits( idx
) | maskbits( end
);
1069 /***********************************************************************
1072 * View comparison function used for the rb tree.
1074 static int compare_view( const void *addr
, const struct wine_rb_entry
*entry
)
1076 struct file_view
*view
= WINE_RB_ENTRY_VALUE( entry
, struct file_view
, entry
);
1078 if (addr
< view
->base
) return -1;
1079 if (addr
> view
->base
) return 1;
1084 /***********************************************************************
1087 static const char *get_prot_str( BYTE prot
)
1089 static char buffer
[6];
1090 buffer
[0] = (prot
& VPROT_COMMITTED
) ? 'c' : '-';
1091 buffer
[1] = (prot
& VPROT_GUARD
) ? 'g' : ((prot
& VPROT_WRITEWATCH
) ? 'H' : '-');
1092 buffer
[2] = (prot
& VPROT_READ
) ? 'r' : '-';
1093 buffer
[3] = (prot
& VPROT_WRITECOPY
) ? 'W' : ((prot
& VPROT_WRITE
) ? 'w' : '-');
1094 buffer
[4] = (prot
& VPROT_EXEC
) ? 'x' : '-';
1100 /***********************************************************************
1103 * Convert page protections to protection for mmap/mprotect.
1105 static int get_unix_prot( BYTE vprot
)
1108 if ((vprot
& VPROT_COMMITTED
) && !(vprot
& VPROT_GUARD
))
1110 if (vprot
& VPROT_READ
) prot
|= PROT_READ
;
1111 if (vprot
& VPROT_WRITE
) prot
|= PROT_WRITE
| PROT_READ
;
1112 if (vprot
& VPROT_WRITECOPY
) prot
|= PROT_WRITE
| PROT_READ
;
1113 if (vprot
& VPROT_EXEC
) prot
|= PROT_EXEC
| PROT_READ
;
1114 if (vprot
& VPROT_WRITEWATCH
) prot
&= ~PROT_WRITE
;
1116 if (!prot
) prot
= PROT_NONE
;
1121 /***********************************************************************
1124 static void dump_view( struct file_view
*view
)
1127 char *addr
= view
->base
;
1128 BYTE prot
= get_page_vprot( addr
);
1130 TRACE( "View: %p - %p", addr
, addr
+ view
->size
- 1 );
1131 if (view
->protect
& VPROT_SYSTEM
)
1132 TRACE( " (builtin image)\n" );
1133 else if (view
->protect
& SEC_IMAGE
)
1134 TRACE( " (image)\n" );
1135 else if (view
->protect
& SEC_FILE
)
1136 TRACE( " (file)\n" );
1137 else if (view
->protect
& (SEC_RESERVE
| SEC_COMMIT
))
1138 TRACE( " (anonymous)\n" );
1140 TRACE( " (valloc)\n");
1142 for (count
= i
= 1; i
< view
->size
>> page_shift
; i
++, count
++)
1144 BYTE next
= get_page_vprot( addr
+ (count
<< page_shift
) );
1145 if (next
== prot
) continue;
1146 TRACE( " %p - %p %s\n",
1147 addr
, addr
+ (count
<< page_shift
) - 1, get_prot_str(prot
) );
1148 addr
+= (count
<< page_shift
);
1153 TRACE( " %p - %p %s\n",
1154 addr
, addr
+ (count
<< page_shift
) - 1, get_prot_str(prot
) );
1158 /***********************************************************************
1161 #ifdef WINE_VM_DEBUG
1162 static void VIRTUAL_Dump(void)
1165 struct file_view
*view
;
1167 TRACE( "Dump of all virtual memory views:\n" );
1168 server_enter_uninterrupted_section( &virtual_mutex
, &sigset
);
1169 WINE_RB_FOR_EACH_ENTRY( view
, &views_tree
, struct file_view
, entry
)
1173 server_leave_uninterrupted_section( &virtual_mutex
, &sigset
);
1178 /***********************************************************************
1181 * Find the view containing a given address. virtual_mutex must be held by caller.
1190 static struct file_view
*find_view( const void *addr
, size_t size
)
1192 struct wine_rb_entry
*ptr
= views_tree
.root
;
1194 if ((const char *)addr
+ size
< (const char *)addr
) return NULL
; /* overflow */
1198 struct file_view
*view
= WINE_RB_ENTRY_VALUE( ptr
, struct file_view
, entry
);
1200 if (view
->base
> addr
) ptr
= ptr
->left
;
1201 else if ((const char *)view
->base
+ view
->size
<= (const char *)addr
) ptr
= ptr
->right
;
1202 else if ((const char *)view
->base
+ view
->size
< (const char *)addr
+ size
) break; /* size too large */
1209 /***********************************************************************
1210 * is_write_watch_range
1212 static inline BOOL
is_write_watch_range( const void *addr
, size_t size
)
1214 struct file_view
*view
= find_view( addr
, size
);
1215 return view
&& (view
->protect
& VPROT_WRITEWATCH
);
1219 /***********************************************************************
1222 * Find the first view overlapping at least part of the specified range.
1223 * virtual_mutex must be held by caller.
1225 static struct file_view
*find_view_range( const void *addr
, size_t size
)
1227 struct wine_rb_entry
*ptr
= views_tree
.root
;
1231 struct file_view
*view
= WINE_RB_ENTRY_VALUE( ptr
, struct file_view
, entry
);
1233 if ((const char *)view
->base
>= (const char *)addr
+ size
) ptr
= ptr
->left
;
1234 else if ((const char *)view
->base
+ view
->size
<= (const char *)addr
) ptr
= ptr
->right
;
1241 /***********************************************************************
1242 * find_view_inside_range
1244 * Find first (resp. last, if top_down) view inside a range.
1245 * virtual_mutex must be held by caller.
1247 static struct wine_rb_entry
*find_view_inside_range( void **base_ptr
, void **end_ptr
, int top_down
)
1249 struct wine_rb_entry
*first
= NULL
, *ptr
= views_tree
.root
;
1250 void *base
= *base_ptr
, *end
= *end_ptr
;
1252 /* find the first (resp. last) view inside the range */
1255 struct file_view
*view
= WINE_RB_ENTRY_VALUE( ptr
, struct file_view
, entry
);
1256 if ((char *)view
->base
+ view
->size
>= (char *)end
)
1258 end
= min( end
, view
->base
);
1261 else if (view
->base
<= base
)
1263 base
= max( (char *)base
, (char *)view
->base
+ view
->size
);
1269 ptr
= top_down
? ptr
->right
: ptr
->left
;
1279 /***********************************************************************
1282 * Try mmaping some expected free memory region, eventually stepping and
1283 * retrying inside it, and return where it actually succeeded, or NULL.
1285 static void* try_map_free_area( void *base
, void *end
, ptrdiff_t step
,
1286 void *start
, size_t size
, int unix_prot
)
1290 while (start
&& base
<= start
&& (char*)start
+ size
<= (char*)end
)
1292 if ((ptr
= anon_mmap_tryfixed( start
, size
, unix_prot
, 0 )) != MAP_FAILED
) return start
;
1293 TRACE( "Found free area is already mapped, start %p.\n", start
);
1294 if (errno
!= EEXIST
)
1296 ERR( "mmap() error %s, range %p-%p, unix_prot %#x.\n",
1297 strerror(errno
), start
, (char *)start
+ size
, unix_prot
);
1300 if ((step
> 0 && (char *)end
- (char *)start
< step
) ||
1301 (step
< 0 && (char *)start
- (char *)base
< -step
) ||
1304 start
= (char *)start
+ step
;
1311 /***********************************************************************
1314 * Find a free area between views inside the specified range and map it.
1315 * virtual_mutex must be held by caller.
1317 static void *map_free_area( void *base
, void *end
, size_t size
, int top_down
, int unix_prot
, size_t align_mask
)
1319 struct wine_rb_entry
*first
= find_view_inside_range( &base
, &end
, top_down
);
1320 ptrdiff_t step
= top_down
? -(align_mask
+ 1) : (align_mask
+ 1);
1325 start
= ROUND_ADDR( (char *)end
- size
, align_mask
);
1326 if (start
>= end
|| start
< base
) return NULL
;
1330 struct file_view
*view
= WINE_RB_ENTRY_VALUE( first
, struct file_view
, entry
);
1331 if ((start
= try_map_free_area( (char *)view
->base
+ view
->size
, (char *)start
+ size
, step
,
1332 start
, size
, unix_prot
))) break;
1333 start
= ROUND_ADDR( (char *)view
->base
- size
, align_mask
);
1334 /* stop if remaining space is not large enough */
1335 if (!start
|| start
>= end
|| start
< base
) return NULL
;
1336 first
= rb_prev( first
);
1341 start
= ROUND_ADDR( (char *)base
+ align_mask
, align_mask
);
1342 if (!start
|| start
>= end
|| (char *)end
- (char *)start
< size
) return NULL
;
1346 struct file_view
*view
= WINE_RB_ENTRY_VALUE( first
, struct file_view
, entry
);
1347 if ((start
= try_map_free_area( start
, view
->base
, step
,
1348 start
, size
, unix_prot
))) break;
1349 start
= ROUND_ADDR( (char *)view
->base
+ view
->size
+ align_mask
, align_mask
);
1350 /* stop if remaining space is not large enough */
1351 if (!start
|| start
>= end
|| (char *)end
- (char *)start
< size
) return NULL
;
1352 first
= rb_next( first
);
1357 return try_map_free_area( base
, end
, step
, start
, size
, unix_prot
);
1363 /***********************************************************************
1364 * find_reserved_free_area
1366 * Find a free area between views inside the specified range.
1367 * virtual_mutex must be held by caller.
1368 * The range must be inside the preloader reserved range.
1370 static void *find_reserved_free_area( void *base
, void *end
, size_t size
, int top_down
, size_t align_mask
)
1372 struct range_entry
*range
;
1375 base
= ROUND_ADDR( (char *)base
+ align_mask
, align_mask
);
1376 end
= (char *)ROUND_ADDR( (char *)end
- size
, align_mask
) + size
;
1380 start
= (char *)end
- size
;
1381 range
= free_ranges_lower_bound( start
);
1382 assert(range
!= free_ranges_end
&& range
->end
>= start
);
1384 if ((char *)range
->end
- (char *)start
< size
) start
= ROUND_ADDR( (char *)range
->end
- size
, align_mask
);
1387 if (start
>= end
|| start
< base
|| (char *)end
- (char *)start
< size
) return NULL
;
1388 if (start
< range
->end
&& start
>= range
->base
&& (char *)range
->end
- (char *)start
>= size
) break;
1389 if (--range
< free_ranges
) return NULL
;
1390 start
= ROUND_ADDR( (char *)range
->end
- size
, align_mask
);
1397 range
= free_ranges_lower_bound( start
);
1398 assert(range
!= free_ranges_end
&& range
->end
>= start
);
1400 if (start
< range
->base
) start
= ROUND_ADDR( (char *)range
->base
+ align_mask
, align_mask
);
1403 if (start
>= end
|| start
< base
|| (char *)end
- (char *)start
< size
) return NULL
;
1404 if (start
< range
->end
&& start
>= range
->base
&& (char *)range
->end
- (char *)start
>= size
) break;
1405 if (++range
== free_ranges_end
) return NULL
;
1406 start
= ROUND_ADDR( (char *)range
->base
+ align_mask
, align_mask
);
1414 /***********************************************************************
1417 * Add a reserved area to the list maintained by libwine.
1418 * virtual_mutex must be held by caller.
1420 static void add_reserved_area( void *addr
, size_t size
)
1422 TRACE( "adding %p-%p\n", addr
, (char *)addr
+ size
);
1424 if (addr
< user_space_limit
)
1426 /* unmap the part of the area that is below the limit */
1427 assert( (char *)addr
+ size
> (char *)user_space_limit
);
1428 munmap( addr
, (char *)user_space_limit
- (char *)addr
);
1429 size
-= (char *)user_space_limit
- (char *)addr
;
1430 addr
= user_space_limit
;
1432 /* blow away existing mappings */
1433 anon_mmap_fixed( addr
, size
, PROT_NONE
, MAP_NORESERVE
);
1434 mmap_add_reserved_area( addr
, size
);
1438 /***********************************************************************
1439 * remove_reserved_area
1441 * Remove a reserved area from the list maintained by libwine.
1442 * virtual_mutex must be held by caller.
1444 static void remove_reserved_area( void *addr
, size_t size
)
1446 struct file_view
*view
;
1448 TRACE( "removing %p-%p\n", addr
, (char *)addr
+ size
);
1449 mmap_remove_reserved_area( addr
, size
);
1451 /* unmap areas not covered by an existing view */
1452 WINE_RB_FOR_EACH_ENTRY( view
, &views_tree
, struct file_view
, entry
)
1454 if ((char *)view
->base
>= (char *)addr
+ size
) break;
1455 if ((char *)view
->base
+ view
->size
<= (char *)addr
) continue;
1456 if (view
->base
> addr
) munmap( addr
, (char *)view
->base
- (char *)addr
);
1457 if ((char *)view
->base
+ view
->size
> (char *)addr
+ size
) return;
1458 size
= (char *)addr
+ size
- ((char *)view
->base
+ view
->size
);
1459 addr
= (char *)view
->base
+ view
->size
;
1461 munmap( addr
, size
);
1465 struct area_boundary
1472 /***********************************************************************
1473 * get_area_boundary_callback
1475 * Get lowest boundary address between reserved area and non-reserved area
1476 * in the specified region. If no boundaries are found, result is NULL.
1477 * virtual_mutex must be held by caller.
1479 static int get_area_boundary_callback( void *start
, SIZE_T size
, void *arg
)
1481 struct area_boundary
*area
= arg
;
1482 void *end
= (char *)start
+ size
;
1484 area
->boundary
= NULL
;
1485 if (area
->base
>= end
) return 0;
1486 if ((char *)start
>= (char *)area
->base
+ area
->size
) return 1;
1487 if (area
->base
>= start
)
1489 if ((char *)area
->base
+ area
->size
> (char *)end
)
1491 area
->boundary
= end
;
1496 area
->boundary
= start
;
1501 /***********************************************************************
1504 * Unmap an area, or simply replace it by an empty mapping if it is
1505 * in a reserved area. virtual_mutex must be held by caller.
1507 static inline void unmap_area( void *addr
, size_t size
)
1509 switch (mmap_is_in_reserved_area( addr
, size
))
1511 case -1: /* partially in a reserved area */
1513 struct area_boundary area
;
1517 mmap_enum_reserved_areas( get_area_boundary_callback
, &area
, 0 );
1518 assert( area
.boundary
);
1519 lower_size
= (char *)area
.boundary
- (char *)addr
;
1520 unmap_area( addr
, lower_size
);
1521 unmap_area( area
.boundary
, size
- lower_size
);
1524 case 1: /* in a reserved area */
1525 anon_mmap_fixed( addr
, size
, PROT_NONE
, MAP_NORESERVE
);
1528 case 0: /* not in a reserved area */
1529 if (is_beyond_limit( addr
, size
, user_space_limit
))
1530 add_reserved_area( addr
, size
);
1532 munmap( addr
, size
);
1538 /***********************************************************************
1541 * Allocate a new view. virtual_mutex must be held by caller.
1543 static struct file_view
*alloc_view(void)
1547 struct file_view
*ret
= next_free_view
;
1548 next_free_view
= *(struct file_view
**)ret
;
1551 if (view_block_start
== view_block_end
)
1553 void *ptr
= anon_mmap_alloc( view_block_size
, PROT_READ
| PROT_WRITE
);
1554 if (ptr
== MAP_FAILED
) return NULL
;
1555 view_block_start
= ptr
;
1556 view_block_end
= view_block_start
+ view_block_size
/ sizeof(*view_block_start
);
1558 return view_block_start
++;
1562 /***********************************************************************
1565 * Free memory for view structure. virtual_mutex must be held by caller.
1567 static void free_view( struct file_view
*view
)
1569 *(struct file_view
**)view
= next_free_view
;
1570 next_free_view
= view
;
1574 /***********************************************************************
1577 * Remove view from the tree and update free ranges. virtual_mutex must be held by caller.
1579 static void unregister_view( struct file_view
*view
)
1581 if (mmap_is_in_reserved_area( view
->base
, view
->size
))
1582 free_ranges_remove_view( view
);
1583 wine_rb_remove( &views_tree
, &view
->entry
);
1587 /***********************************************************************
1590 * Deletes a view. virtual_mutex must be held by caller.
1592 static void delete_view( struct file_view
*view
) /* [in] View */
1594 if (!(view
->protect
& VPROT_SYSTEM
)) unmap_area( view
->base
, view
->size
);
1595 set_page_vprot( view
->base
, view
->size
, 0 );
1596 if (arm64ec_map
) clear_arm64ec_range( view
->base
, view
->size
);
1597 unregister_view( view
);
1602 /***********************************************************************
1605 * Add view to the tree and update free ranges. virtual_mutex must be held by caller.
1607 static void register_view( struct file_view
*view
)
1609 wine_rb_put( &views_tree
, view
->base
, &view
->entry
);
1610 if (mmap_is_in_reserved_area( view
->base
, view
->size
))
1611 free_ranges_insert_view( view
);
1615 /***********************************************************************
1618 * Create a view. virtual_mutex must be held by caller.
1620 static NTSTATUS
create_view( struct file_view
**view_ret
, void *base
, size_t size
, unsigned int vprot
)
1622 struct file_view
*view
;
1623 int unix_prot
= get_unix_prot( vprot
);
1625 assert( !((UINT_PTR
)base
& page_mask
) );
1626 assert( !(size
& page_mask
) );
1628 /* Check for overlapping views. This can happen if the previous view
1629 * was a system view that got unmapped behind our back. In that case
1630 * we recover by simply deleting it. */
1632 while ((view
= find_view_range( base
, size
)))
1634 TRACE( "overlapping view %p-%p for %p-%p\n",
1635 view
->base
, (char *)view
->base
+ view
->size
, base
, (char *)base
+ size
);
1636 assert( view
->protect
& VPROT_SYSTEM
);
1637 delete_view( view
);
1640 if (!alloc_pages_vprot( base
, size
)) return STATUS_NO_MEMORY
;
1642 /* Create the view structure */
1644 if (!(view
= alloc_view()))
1646 FIXME( "out of memory for %p-%p\n", base
, (char *)base
+ size
);
1647 return STATUS_NO_MEMORY
;
1652 view
->protect
= vprot
;
1653 set_page_vprot( base
, size
, vprot
);
1655 register_view( view
);
1659 if (force_exec_prot
&& (unix_prot
& PROT_READ
) && !(unix_prot
& PROT_EXEC
))
1661 TRACE( "forcing exec permission on %p-%p\n", base
, (char *)base
+ size
- 1 );
1662 mprotect( base
, size
, unix_prot
| PROT_EXEC
);
1664 return STATUS_SUCCESS
;
1668 /***********************************************************************
1671 * Convert page protections to Win32 flags.
1673 static DWORD
get_win32_prot( BYTE vprot
, unsigned int map_prot
)
1675 DWORD ret
= VIRTUAL_Win32Flags
[vprot
& 0x0f];
1676 if (vprot
& VPROT_GUARD
) ret
|= PAGE_GUARD
;
1677 if (map_prot
& SEC_NOCACHE
) ret
|= PAGE_NOCACHE
;
1682 /***********************************************************************
1685 * Build page protections from Win32 flags.
1687 static NTSTATUS
get_vprot_flags( DWORD protect
, unsigned int *vprot
, BOOL image
)
1689 switch(protect
& 0xff)
1692 *vprot
= VPROT_READ
;
1694 case PAGE_READWRITE
:
1696 *vprot
= VPROT_READ
| VPROT_WRITECOPY
;
1698 *vprot
= VPROT_READ
| VPROT_WRITE
;
1700 case PAGE_WRITECOPY
:
1701 *vprot
= VPROT_READ
| VPROT_WRITECOPY
;
1704 *vprot
= VPROT_EXEC
;
1706 case PAGE_EXECUTE_READ
:
1707 *vprot
= VPROT_EXEC
| VPROT_READ
;
1709 case PAGE_EXECUTE_READWRITE
:
1711 *vprot
= VPROT_EXEC
| VPROT_READ
| VPROT_WRITECOPY
;
1713 *vprot
= VPROT_EXEC
| VPROT_READ
| VPROT_WRITE
;
1715 case PAGE_EXECUTE_WRITECOPY
:
1716 *vprot
= VPROT_EXEC
| VPROT_READ
| VPROT_WRITECOPY
;
1722 return STATUS_INVALID_PAGE_PROTECTION
;
1724 if (protect
& PAGE_GUARD
) *vprot
|= VPROT_GUARD
;
1725 return STATUS_SUCCESS
;
1729 /***********************************************************************
1732 * Wrapper for mprotect, adds PROT_EXEC if forced by force_exec_prot
1734 static inline int mprotect_exec( void *base
, size_t size
, int unix_prot
)
1736 if (force_exec_prot
&& (unix_prot
& PROT_READ
) && !(unix_prot
& PROT_EXEC
))
1738 TRACE( "forcing exec permission on %p-%p\n", base
, (char *)base
+ size
- 1 );
1739 if (!mprotect( base
, size
, unix_prot
| PROT_EXEC
)) return 0;
1740 /* exec + write may legitimately fail, in that case fall back to write only */
1741 if (!(unix_prot
& PROT_WRITE
)) return -1;
1744 return mprotect( base
, size
, unix_prot
);
1748 /***********************************************************************
1751 * Call mprotect on a page range, applying the protections from the per-page byte.
1753 static void mprotect_range( void *base
, size_t size
, BYTE set
, BYTE clear
)
1756 char *addr
= ROUND_ADDR( base
, page_mask
);
1759 size
= ROUND_SIZE( base
, size
);
1760 prot
= get_unix_prot( (get_page_vprot( addr
) & ~clear
) | set
);
1761 for (count
= i
= 1; i
< size
>> page_shift
; i
++, count
++)
1763 next
= get_unix_prot( (get_page_vprot( addr
+ (count
<< page_shift
) ) & ~clear
) | set
);
1764 if (next
== prot
) continue;
1765 mprotect_exec( addr
, count
<< page_shift
, prot
);
1766 addr
+= count
<< page_shift
;
1770 if (count
) mprotect_exec( addr
, count
<< page_shift
, prot
);
1774 /***********************************************************************
1777 * Change the protection of a range of pages.
1779 static BOOL
set_vprot( struct file_view
*view
, void *base
, size_t size
, BYTE vprot
)
1781 int unix_prot
= get_unix_prot(vprot
);
1783 if (view
->protect
& VPROT_WRITEWATCH
)
1785 /* each page may need different protections depending on write watch flag */
1786 set_page_vprot_bits( base
, size
, vprot
& ~VPROT_WRITEWATCH
, ~vprot
& ~VPROT_WRITEWATCH
);
1787 mprotect_range( base
, size
, 0, 0 );
1790 if (mprotect_exec( base
, size
, unix_prot
)) return FALSE
;
1791 set_page_vprot( base
, size
, vprot
);
1796 /***********************************************************************
1799 * Set page protections on a range of pages
1801 static NTSTATUS
set_protection( struct file_view
*view
, void *base
, SIZE_T size
, ULONG protect
)
1806 if ((status
= get_vprot_flags( protect
, &vprot
, view
->protect
& SEC_IMAGE
))) return status
;
1807 if (is_view_valloc( view
))
1809 if (vprot
& VPROT_WRITECOPY
) return STATUS_INVALID_PAGE_PROTECTION
;
1813 BYTE access
= vprot
& (VPROT_READ
| VPROT_WRITE
| VPROT_EXEC
);
1814 if ((view
->protect
& access
) != access
) return STATUS_INVALID_PAGE_PROTECTION
;
1817 if (!set_vprot( view
, base
, size
, vprot
| VPROT_COMMITTED
)) return STATUS_ACCESS_DENIED
;
1818 return STATUS_SUCCESS
;
1822 /***********************************************************************
1823 * update_write_watches
1825 static void update_write_watches( void *base
, size_t size
, size_t accessed_size
)
1827 TRACE( "updating watch %p-%p-%p\n", base
, (char *)base
+ accessed_size
, (char *)base
+ size
);
1828 /* clear write watch flag on accessed pages */
1829 set_page_vprot_bits( base
, accessed_size
, 0, VPROT_WRITEWATCH
);
1830 /* restore page protections on the entire range */
1831 mprotect_range( base
, size
, 0, 0 );
1835 /***********************************************************************
1836 * reset_write_watches
1838 * Reset write watches in a memory range.
1840 static void reset_write_watches( void *base
, SIZE_T size
)
1842 set_page_vprot_bits( base
, size
, VPROT_WRITEWATCH
, 0 );
1843 mprotect_range( base
, size
, 0, 0 );
1847 /***********************************************************************
1850 * Release the extra memory while keeping the range starting on the alignment boundary.
1852 static inline void *unmap_extra_space( void *ptr
, size_t total_size
, size_t wanted_size
, size_t align_mask
)
1854 if ((ULONG_PTR
)ptr
& align_mask
)
1856 size_t extra
= align_mask
+ 1 - ((ULONG_PTR
)ptr
& align_mask
);
1857 munmap( ptr
, extra
);
1858 ptr
= (char *)ptr
+ extra
;
1859 total_size
-= extra
;
1861 if (total_size
> wanted_size
)
1862 munmap( (char *)ptr
+ wanted_size
, total_size
- wanted_size
);
1876 /***********************************************************************
1877 * alloc_reserved_area_callback
1879 * Try to map some space inside a reserved area. Callback for mmap_enum_reserved_areas.
1881 static int alloc_reserved_area_callback( void *start
, SIZE_T size
, void *arg
)
1883 struct alloc_area
*alloc
= arg
;
1884 void *end
= (char *)start
+ size
;
1886 if (start
< address_space_start
) start
= address_space_start
;
1887 if (is_beyond_limit( start
, size
, alloc
->limit
)) end
= alloc
->limit
;
1888 if (start
>= end
) return 0;
1890 /* make sure we don't touch the preloader reserved range */
1891 if (preload_reserve_end
>= start
)
1893 if (preload_reserve_end
>= end
)
1895 if (preload_reserve_start
<= start
) return 0; /* no space in that area */
1896 if (preload_reserve_start
< end
) end
= preload_reserve_start
;
1898 else if (preload_reserve_start
<= start
) start
= preload_reserve_end
;
1901 /* range is split in two by the preloader reservation, try first part */
1902 if ((alloc
->result
= find_reserved_free_area( start
, preload_reserve_start
, alloc
->size
,
1903 alloc
->top_down
, alloc
->align_mask
)))
1905 /* then fall through to try second part */
1906 start
= preload_reserve_end
;
1909 if ((alloc
->result
= find_reserved_free_area( start
, end
, alloc
->size
, alloc
->top_down
, alloc
->align_mask
)))
1915 /***********************************************************************
1918 * mmap the fixed memory area.
1919 * virtual_mutex must be held by caller.
1921 static NTSTATUS
map_fixed_area( void *base
, size_t size
, unsigned int vprot
)
1925 switch (mmap_is_in_reserved_area( base
, size
))
1927 case -1: /* partially in a reserved area */
1930 struct area_boundary area
;
1934 mmap_enum_reserved_areas( get_area_boundary_callback
, &area
, 0 );
1935 assert( area
.boundary
);
1936 lower_size
= (char *)area
.boundary
- (char *)base
;
1937 status
= map_fixed_area( base
, lower_size
, vprot
);
1938 if (status
== STATUS_SUCCESS
)
1940 status
= map_fixed_area( area
.boundary
, size
- lower_size
, vprot
);
1941 if (status
!= STATUS_SUCCESS
) unmap_area( base
, lower_size
);
1945 case 0: /* not in a reserved area, do a normal allocation */
1946 if ((ptr
= anon_mmap_tryfixed( base
, size
, get_unix_prot(vprot
), 0 )) == MAP_FAILED
)
1948 if (errno
== ENOMEM
) return STATUS_NO_MEMORY
;
1949 if (errno
== EEXIST
) return STATUS_CONFLICTING_ADDRESSES
;
1950 return STATUS_INVALID_PARAMETER
;
1955 case 1: /* in a reserved area, make sure the address is available */
1956 if (find_view_range( base
, size
)) return STATUS_CONFLICTING_ADDRESSES
;
1957 /* replace the reserved area by our mapping */
1958 if ((ptr
= anon_mmap_fixed( base
, size
, get_unix_prot(vprot
), 0 )) != base
)
1959 return STATUS_INVALID_PARAMETER
;
1962 if (is_beyond_limit( ptr
, size
, working_set_limit
)) working_set_limit
= address_space_limit
;
1963 return STATUS_SUCCESS
;
1966 /***********************************************************************
1969 * Create a view and mmap the corresponding memory area.
1970 * virtual_mutex must be held by caller.
1972 static NTSTATUS
map_view( struct file_view
**view_ret
, void *base
, size_t size
,
1973 int top_down
, unsigned int vprot
, ULONG_PTR limit
, size_t align_mask
)
1980 if (is_beyond_limit( base
, size
, address_space_limit
))
1981 return STATUS_WORKING_SET_LIMIT_RANGE
;
1982 if (limit
&& is_beyond_limit( base
, size
, (void *)limit
))
1983 return STATUS_CONFLICTING_ADDRESSES
;
1984 status
= map_fixed_area( base
, size
, vprot
);
1985 if (status
!= STATUS_SUCCESS
) return status
;
1990 struct alloc_area alloc
;
1993 if (!align_mask
) align_mask
= granularity_mask
;
1994 view_size
= size
+ align_mask
+ 1;
1997 alloc
.top_down
= top_down
;
1998 alloc
.limit
= limit
? min( (void *)(limit
+ 1), user_space_limit
) : user_space_limit
;
1999 alloc
.align_mask
= align_mask
;
2001 if (mmap_enum_reserved_areas( alloc_reserved_area_callback
, &alloc
, top_down
))
2004 TRACE( "got mem in reserved area %p-%p\n", ptr
, (char *)ptr
+ size
);
2005 if (anon_mmap_fixed( ptr
, size
, get_unix_prot(vprot
), 0 ) != ptr
)
2006 return STATUS_INVALID_PARAMETER
;
2012 if (!(ptr
= map_free_area( address_space_start
, alloc
.limit
, size
,
2013 top_down
, get_unix_prot(vprot
), align_mask
)))
2014 return STATUS_NO_MEMORY
;
2015 TRACE( "got mem with map_free_area %p-%p\n", ptr
, (char *)ptr
+ size
);
2021 if ((ptr
= anon_mmap_alloc( view_size
, get_unix_prot(vprot
) )) == MAP_FAILED
)
2023 if (errno
== ENOMEM
) return STATUS_NO_MEMORY
;
2024 return STATUS_INVALID_PARAMETER
;
2026 TRACE( "got mem with anon mmap %p-%p\n", ptr
, (char *)ptr
+ size
);
2027 /* if we got something beyond the user limit, unmap it and retry */
2028 if (is_beyond_limit( ptr
, view_size
, user_space_limit
)) add_reserved_area( ptr
, view_size
);
2031 ptr
= unmap_extra_space( ptr
, view_size
, size
, align_mask
);
2034 status
= create_view( view_ret
, ptr
, size
, vprot
);
2035 if (status
!= STATUS_SUCCESS
) unmap_area( ptr
, size
);
2040 /***********************************************************************
2041 * map_file_into_view
2043 * Wrapper for mmap() to map a file into a view, falling back to read if mmap fails.
2044 * virtual_mutex must be held by caller.
2046 static NTSTATUS
map_file_into_view( struct file_view
*view
, int fd
, size_t start
, size_t size
,
2047 off_t offset
, unsigned int vprot
, BOOL removable
)
2050 int prot
= get_unix_prot( vprot
| VPROT_COMMITTED
/* make sure it is accessible */ );
2051 unsigned int flags
= MAP_FIXED
| ((vprot
& VPROT_WRITECOPY
) ? MAP_PRIVATE
: MAP_SHARED
);
2053 assert( start
< view
->size
);
2054 assert( start
+ size
<= view
->size
);
2056 if (force_exec_prot
&& (vprot
& VPROT_READ
))
2058 TRACE( "forcing exec permission on mapping %p-%p\n",
2059 (char *)view
->base
+ start
, (char *)view
->base
+ start
+ size
- 1 );
2063 /* only try mmap if media is not removable (or if we require write access) */
2064 if (!removable
|| (flags
& MAP_SHARED
))
2066 if (mmap( (char *)view
->base
+ start
, size
, prot
, flags
, fd
, offset
) != MAP_FAILED
)
2071 case EINVAL
: /* file offset is not page-aligned, fall back to read() */
2072 if (flags
& MAP_SHARED
) return STATUS_INVALID_PARAMETER
;
2075 case ENODEV
: /* filesystem doesn't support mmap(), fall back to read() */
2076 if (vprot
& VPROT_WRITE
)
2078 ERR( "shared writable mmap not supported, broken filesystem?\n" );
2079 return STATUS_NOT_SUPPORTED
;
2083 case EPERM
: /* noexec filesystem, fall back to read() */
2084 if (flags
& MAP_SHARED
)
2086 if (prot
& PROT_EXEC
) ERR( "failed to set PROT_EXEC on file map, noexec filesystem?\n" );
2087 return STATUS_ACCESS_DENIED
;
2089 if (prot
& PROT_EXEC
) WARN( "failed to set PROT_EXEC on file map, noexec filesystem?\n" );
2092 return STATUS_NO_MEMORY
;
2096 /* Reserve the memory with an anonymous mmap */
2097 ptr
= anon_mmap_fixed( (char *)view
->base
+ start
, size
, PROT_READ
| PROT_WRITE
, 0 );
2098 if (ptr
== MAP_FAILED
) return STATUS_NO_MEMORY
;
2099 /* Now read in the file */
2100 pread( fd
, ptr
, size
, offset
);
2101 if (prot
!= (PROT_READ
|PROT_WRITE
)) mprotect( ptr
, size
, prot
); /* Set the right protection */
2103 set_page_vprot( (char *)view
->base
+ start
, size
, vprot
);
2104 return STATUS_SUCCESS
;
2108 /***********************************************************************
2109 * get_committed_size
2111 * Get the size of the committed range with equal masked vprot bytes starting at base.
2112 * Also return the protections for the first page.
2114 static SIZE_T
get_committed_size( struct file_view
*view
, void *base
, BYTE
*vprot
, BYTE vprot_mask
)
2116 SIZE_T offset
, size
;
2118 base
= ROUND_ADDR( base
, page_mask
);
2119 offset
= (char *)base
- (char *)view
->base
;
2121 if (view
->protect
& SEC_RESERVE
)
2125 *vprot
= get_page_vprot( base
);
2127 SERVER_START_REQ( get_mapping_committed_range
)
2129 req
->base
= wine_server_client_ptr( view
->base
);
2130 req
->offset
= offset
;
2131 if (!wine_server_call( req
))
2134 if (reply
->committed
)
2136 *vprot
|= VPROT_COMMITTED
;
2137 set_page_vprot_bits( base
, size
, VPROT_COMMITTED
, 0 );
2143 if (!size
|| !(vprot_mask
& ~VPROT_COMMITTED
)) return size
;
2145 else size
= view
->size
- offset
;
2147 return get_vprot_range_size( base
, size
, vprot_mask
, vprot
);
2151 /***********************************************************************
2154 * Decommit some pages of a given view.
2155 * virtual_mutex must be held by caller.
2157 static NTSTATUS
decommit_pages( struct file_view
*view
, size_t start
, size_t size
)
2159 if (!size
) size
= view
->size
;
2160 if (anon_mmap_fixed( (char *)view
->base
+ start
, size
, PROT_NONE
, 0 ) != MAP_FAILED
)
2162 set_page_vprot_bits( (char *)view
->base
+ start
, size
, 0, VPROT_COMMITTED
);
2163 return STATUS_SUCCESS
;
2165 return STATUS_NO_MEMORY
;
2169 /***********************************************************************
2172 * Free some pages of a given view.
2173 * virtual_mutex must be held by caller.
2175 static NTSTATUS
free_pages( struct file_view
*view
, char *base
, size_t size
)
2177 if (size
== view
->size
)
2179 assert( base
== view
->base
);
2180 delete_view( view
);
2181 return STATUS_SUCCESS
;
2183 if (view
->base
!= base
&& base
+ size
!= (char *)view
->base
+ view
->size
)
2185 struct file_view
*new_view
= alloc_view();
2189 ERR( "out of memory for %p-%p\n", base
, base
+ size
);
2190 return STATUS_NO_MEMORY
;
2192 new_view
->base
= base
+ size
;
2193 new_view
->size
= (char *)view
->base
+ view
->size
- (char *)new_view
->base
;
2194 new_view
->protect
= view
->protect
;
2196 unregister_view( view
);
2197 view
->size
= base
- (char *)view
->base
;
2198 register_view( view
);
2199 register_view( new_view
);
2201 VIRTUAL_DEBUG_DUMP_VIEW( view
);
2202 VIRTUAL_DEBUG_DUMP_VIEW( new_view
);
2206 unregister_view( view
);
2207 if (view
->base
== base
)
2209 view
->base
= base
+ size
;
2212 else view
->size
= base
- (char *)view
->base
;
2214 register_view( view
);
2215 VIRTUAL_DEBUG_DUMP_VIEW( view
);
2218 set_page_vprot( base
, size
, 0 );
2219 if (arm64ec_map
) clear_arm64ec_range( base
, size
);
2220 unmap_area( base
, size
);
2221 return STATUS_SUCCESS
;
2225 /***********************************************************************
2226 * allocate_dos_memory
2228 * Allocate the DOS memory range.
2230 static NTSTATUS
allocate_dos_memory( struct file_view
**view
, unsigned int vprot
)
2234 void * const low_64k
= (void *)0x10000;
2235 const size_t dosmem_size
= 0x110000;
2236 int unix_prot
= get_unix_prot( vprot
);
2238 /* check for existing view */
2240 if (find_view_range( 0, dosmem_size
)) return STATUS_CONFLICTING_ADDRESSES
;
2242 /* check without the first 64K */
2244 if (mmap_is_in_reserved_area( low_64k
, dosmem_size
- 0x10000 ) != 1)
2246 addr
= anon_mmap_tryfixed( low_64k
, dosmem_size
- 0x10000, unix_prot
, 0 );
2247 if (addr
== MAP_FAILED
) return map_view( view
, NULL
, dosmem_size
, FALSE
, vprot
, 0, 0 );
2250 /* now try to allocate the low 64K too */
2252 if (mmap_is_in_reserved_area( NULL
, 0x10000 ) != 1)
2254 addr
= anon_mmap_tryfixed( (void *)page_size
, 0x10000 - page_size
, unix_prot
, 0 );
2255 if (addr
!= MAP_FAILED
)
2257 if (!anon_mmap_fixed( NULL
, page_size
, unix_prot
, 0 ))
2260 TRACE( "successfully mapped low 64K range\n" );
2262 else TRACE( "failed to map page 0\n" );
2267 TRACE( "failed to map low 64K range\n" );
2271 /* now reserve the whole range */
2273 size
= (char *)dosmem_size
- (char *)addr
;
2274 anon_mmap_fixed( addr
, size
, unix_prot
, 0 );
2275 return create_view( view
, addr
, size
, vprot
);
2279 /***********************************************************************
2282 * Map the header of a PE file into memory.
2284 static NTSTATUS
map_pe_header( void *ptr
, size_t size
, int fd
, BOOL
*removable
)
2286 if (!size
) return STATUS_INVALID_IMAGE_FORMAT
;
2290 if (mmap( ptr
, size
, PROT_READ
|PROT_WRITE
|PROT_EXEC
, MAP_FIXED
|MAP_PRIVATE
, fd
, 0 ) != MAP_FAILED
)
2291 return STATUS_SUCCESS
;
2297 WARN( "noexec file system, falling back to read\n" );
2301 WARN( "file system doesn't support mmap, falling back to read\n" );
2304 return STATUS_NO_MEMORY
;
2308 pread( fd
, ptr
, size
, 0 );
2309 return STATUS_SUCCESS
; /* page protections will be updated later */
2314 /***********************************************************************
2315 * apply_arm64x_relocations
2317 static void apply_arm64x_relocations( char *base
, const IMAGE_BASE_RELOCATION
*reloc
, size_t size
)
2319 const IMAGE_BASE_RELOCATION
*reloc_end
= (const IMAGE_BASE_RELOCATION
*)((const char *)reloc
+ size
);
2321 while (reloc
< reloc_end
- 1 && reloc
->SizeOfBlock
)
2323 const USHORT
*rel
= (const USHORT
*)(reloc
+ 1);
2324 const USHORT
*rel_end
= (const USHORT
*)reloc
+ reloc
->SizeOfBlock
/ sizeof(USHORT
);
2325 char *page
= base
+ reloc
->VirtualAddress
;
2327 while (rel
< rel_end
&& *rel
)
2329 USHORT offset
= *rel
& 0xfff;
2330 USHORT type
= (*rel
>> 12) & 3;
2331 USHORT arg
= *rel
>> 14;
2336 case IMAGE_DVRT_ARM64X_FIXUP_TYPE_ZEROFILL
:
2337 memset( page
+ offset
, 0, 1 << arg
);
2339 case IMAGE_DVRT_ARM64X_FIXUP_TYPE_VALUE
:
2340 memcpy( page
+ offset
, rel
, 1 << arg
);
2341 rel
+= (1 << arg
) / sizeof(USHORT
);
2343 case IMAGE_DVRT_ARM64X_FIXUP_TYPE_DELTA
:
2344 val
= (unsigned int)*rel
++ * ((arg
& 2) ? 8 : 4);
2345 if (arg
& 1) val
= -val
;
2346 *(int *)(page
+ offset
) += val
;
2350 reloc
= (const IMAGE_BASE_RELOCATION
*)rel_end
;
2355 /***********************************************************************
2356 * update_arm64x_mapping
2358 static void update_arm64x_mapping( char *base
, IMAGE_NT_HEADERS
*nt
, IMAGE_SECTION_HEADER
*sections
)
2360 ULONG i
, size
, sec
, offset
;
2361 const IMAGE_DATA_DIRECTORY
*dir
;
2362 const IMAGE_LOAD_CONFIG_DIRECTORY
*cfg
;
2363 const IMAGE_ARM64EC_METADATA
*metadata
;
2364 const IMAGE_DYNAMIC_RELOCATION_TABLE
*table
;
2365 const char *ptr
, *end
;
2367 /* retrieve config directory */
2369 if (nt
->OptionalHeader
.Magic
!= IMAGE_NT_OPTIONAL_HDR64_MAGIC
) return;
2370 dir
= nt
->OptionalHeader
.DataDirectory
+ IMAGE_DIRECTORY_ENTRY_LOAD_CONFIG
;
2371 if (!dir
->VirtualAddress
|| !dir
->Size
) return;
2372 cfg
= (void *)(base
+ dir
->VirtualAddress
);
2373 size
= min( dir
->Size
, cfg
->Size
);
2375 /* update code ranges */
2377 if (size
<= offsetof( IMAGE_LOAD_CONFIG_DIRECTORY
, CHPEMetadataPointer
)) return;
2378 metadata
= (void *)(base
+ (cfg
->CHPEMetadataPointer
- nt
->OptionalHeader
.ImageBase
));
2379 if (metadata
->CodeMap
&& arm64ec_map
)
2381 const IMAGE_CHPE_RANGE_ENTRY
*map
= (void *)(base
+ metadata
->CodeMap
);
2383 for (i
= 0; i
< metadata
->CodeMapCount
; i
++)
2385 if ((map
[i
].StartOffset
& 0x3) != 1 /* arm64ec */) continue;
2386 set_arm64ec_range( base
+ (map
[i
].StartOffset
& ~3), map
[i
].Length
);
2390 /* apply dynamic relocations */
2392 if (size
<= offsetof( IMAGE_LOAD_CONFIG_DIRECTORY
, DynamicValueRelocTableSection
)) return;
2393 offset
= cfg
->DynamicValueRelocTableOffset
;
2394 sec
= cfg
->DynamicValueRelocTableSection
;
2395 if (!sec
|| sec
> nt
->FileHeader
.NumberOfSections
) return;
2396 if (offset
>= sections
[sec
- 1].Misc
.VirtualSize
) return;
2397 table
= (const IMAGE_DYNAMIC_RELOCATION_TABLE
*)(base
+ sections
[sec
- 1].VirtualAddress
+ offset
);
2398 ptr
= (const char *)(table
+ 1);
2399 end
= ptr
+ table
->Size
;
2400 switch (table
->Version
)
2405 const IMAGE_DYNAMIC_RELOCATION64
*dyn
= (const IMAGE_DYNAMIC_RELOCATION64
*)ptr
;
2406 if (dyn
->Symbol
== IMAGE_DYNAMIC_RELOCATION_ARM64X
)
2408 apply_arm64x_relocations( base
, (const IMAGE_BASE_RELOCATION
*)(dyn
+ 1),
2409 dyn
->BaseRelocSize
);
2412 ptr
+= sizeof(*dyn
) + dyn
->BaseRelocSize
;
2418 const IMAGE_DYNAMIC_RELOCATION64_V2
*dyn
= (const IMAGE_DYNAMIC_RELOCATION64_V2
*)ptr
;
2419 if (dyn
->Symbol
== IMAGE_DYNAMIC_RELOCATION_ARM64X
)
2421 apply_arm64x_relocations( base
, (const IMAGE_BASE_RELOCATION
*)(dyn
+ 1),
2422 dyn
->FixupInfoSize
);
2425 ptr
+= dyn
->HeaderSize
+ dyn
->FixupInfoSize
;
2429 FIXME( "unsupported version %u\n", table
->Version
);
2434 #endif /* __aarch64__ */
2436 /***********************************************************************
2437 * map_image_into_view
2439 * Map an executable (PE format) image into an existing view.
2440 * virtual_mutex must be held by caller.
2442 static NTSTATUS
map_image_into_view( struct file_view
*view
, const WCHAR
*filename
, int fd
, void *orig_base
,
2443 SIZE_T header_size
, ULONG image_flags
, int shared_fd
, BOOL removable
)
2445 IMAGE_DOS_HEADER
*dos
;
2446 IMAGE_NT_HEADERS
*nt
;
2447 IMAGE_SECTION_HEADER sections
[96];
2448 IMAGE_SECTION_HEADER
*sec
;
2449 IMAGE_DATA_DIRECTORY
*imports
;
2450 NTSTATUS status
= STATUS_CONFLICTING_ADDRESSES
;
2454 char *header_end
, *header_start
;
2455 char *ptr
= view
->base
;
2456 SIZE_T total_size
= view
->size
;
2458 TRACE_(module
)( "mapping PE file %s at %p-%p\n", debugstr_w(filename
), ptr
, ptr
+ total_size
);
2460 /* map the header */
2463 header_size
= min( header_size
, st
.st_size
);
2464 if ((status
= map_pe_header( view
->base
, header_size
, fd
, &removable
))) return status
;
2466 status
= STATUS_INVALID_IMAGE_FORMAT
; /* generic error */
2467 dos
= (IMAGE_DOS_HEADER
*)ptr
;
2468 nt
= (IMAGE_NT_HEADERS
*)(ptr
+ dos
->e_lfanew
);
2469 header_end
= ptr
+ ROUND_SIZE( 0, header_size
);
2470 memset( ptr
+ header_size
, 0, header_end
- (ptr
+ header_size
) );
2471 if ((char *)(nt
+ 1) > header_end
) return status
;
2472 header_start
= (char*)&nt
->OptionalHeader
+nt
->FileHeader
.SizeOfOptionalHeader
;
2473 if (nt
->FileHeader
.NumberOfSections
> ARRAY_SIZE( sections
)) return status
;
2474 if (header_start
+ sizeof(*sections
) * nt
->FileHeader
.NumberOfSections
> header_end
) return status
;
2475 /* Some applications (e.g. the Steam version of Borderlands) map over the top of the section headers,
2476 * copying the headers into local memory is necessary to properly load such applications. */
2477 memcpy(sections
, header_start
, sizeof(*sections
) * nt
->FileHeader
.NumberOfSections
);
2480 imports
= nt
->OptionalHeader
.DataDirectory
+ IMAGE_DIRECTORY_ENTRY_IMPORT
;
2481 if (!imports
->Size
|| !imports
->VirtualAddress
) imports
= NULL
;
2483 /* check for non page-aligned binary */
2485 if (image_flags
& IMAGE_FLAGS_ImageMappedFlat
)
2487 /* unaligned sections, this happens for native subsystem binaries */
2488 /* in that case Windows simply maps in the whole file */
2490 total_size
= min( total_size
, ROUND_SIZE( 0, st
.st_size
));
2491 if (map_file_into_view( view
, fd
, 0, total_size
, 0, VPROT_COMMITTED
| VPROT_READ
| VPROT_WRITECOPY
,
2492 removable
) != STATUS_SUCCESS
) return status
;
2494 /* check that all sections are loaded at the right offset */
2495 if (nt
->OptionalHeader
.FileAlignment
!= nt
->OptionalHeader
.SectionAlignment
) return status
;
2496 for (i
= 0; i
< nt
->FileHeader
.NumberOfSections
; i
++)
2498 if (sec
[i
].VirtualAddress
!= sec
[i
].PointerToRawData
)
2499 return status
; /* Windows refuses to load in that case too */
2502 /* set the image protections */
2503 set_vprot( view
, ptr
, total_size
, VPROT_COMMITTED
| VPROT_READ
| VPROT_WRITECOPY
| VPROT_EXEC
);
2505 /* no relocations are performed on non page-aligned binaries */
2506 return STATUS_SUCCESS
;
2510 /* map all the sections */
2512 for (i
= pos
= 0; i
< nt
->FileHeader
.NumberOfSections
; i
++, sec
++)
2514 static const SIZE_T sector_align
= 0x1ff;
2515 SIZE_T map_size
, file_start
, file_size
, end
;
2517 if (!sec
->Misc
.VirtualSize
)
2518 map_size
= ROUND_SIZE( 0, sec
->SizeOfRawData
);
2520 map_size
= ROUND_SIZE( 0, sec
->Misc
.VirtualSize
);
2522 /* file positions are rounded to sector boundaries regardless of OptionalHeader.FileAlignment */
2523 file_start
= sec
->PointerToRawData
& ~sector_align
;
2524 file_size
= (sec
->SizeOfRawData
+ (sec
->PointerToRawData
& sector_align
) + sector_align
) & ~sector_align
;
2525 if (file_size
> map_size
) file_size
= map_size
;
2527 /* a few sanity checks */
2528 end
= sec
->VirtualAddress
+ ROUND_SIZE( sec
->VirtualAddress
, map_size
);
2529 if (sec
->VirtualAddress
> total_size
|| end
> total_size
|| end
< sec
->VirtualAddress
)
2531 WARN_(module
)( "%s section %.8s too large (%x+%lx/%lx)\n",
2532 debugstr_w(filename
), sec
->Name
, (int)sec
->VirtualAddress
, map_size
, total_size
);
2536 if ((sec
->Characteristics
& IMAGE_SCN_MEM_SHARED
) &&
2537 (sec
->Characteristics
& IMAGE_SCN_MEM_WRITE
))
2539 TRACE_(module
)( "%s mapping shared section %.8s at %p off %x (%x) size %lx (%lx) flags %x\n",
2540 debugstr_w(filename
), sec
->Name
, ptr
+ sec
->VirtualAddress
,
2541 (int)sec
->PointerToRawData
, (int)pos
, file_size
, map_size
,
2542 (int)sec
->Characteristics
);
2543 if (map_file_into_view( view
, shared_fd
, sec
->VirtualAddress
, map_size
, pos
,
2544 VPROT_COMMITTED
| VPROT_READ
| VPROT_WRITE
, FALSE
) != STATUS_SUCCESS
)
2546 ERR_(module
)( "Could not map %s shared section %.8s\n", debugstr_w(filename
), sec
->Name
);
2550 /* check if the import directory falls inside this section */
2551 if (imports
&& imports
->VirtualAddress
>= sec
->VirtualAddress
&&
2552 imports
->VirtualAddress
< sec
->VirtualAddress
+ map_size
)
2554 UINT_PTR base
= imports
->VirtualAddress
& ~page_mask
;
2555 UINT_PTR end
= base
+ ROUND_SIZE( imports
->VirtualAddress
, imports
->Size
);
2556 if (end
> sec
->VirtualAddress
+ map_size
) end
= sec
->VirtualAddress
+ map_size
;
2558 map_file_into_view( view
, shared_fd
, base
, end
- base
,
2559 pos
+ (base
- sec
->VirtualAddress
),
2560 VPROT_COMMITTED
| VPROT_READ
| VPROT_WRITECOPY
, FALSE
);
2566 TRACE_(module
)( "mapping %s section %.8s at %p off %x size %x virt %x flags %x\n",
2567 debugstr_w(filename
), sec
->Name
, ptr
+ sec
->VirtualAddress
,
2568 (int)sec
->PointerToRawData
, (int)sec
->SizeOfRawData
,
2569 (int)sec
->Misc
.VirtualSize
, (int)sec
->Characteristics
);
2571 if (!sec
->PointerToRawData
|| !file_size
) continue;
2573 /* Note: if the section is not aligned properly map_file_into_view will magically
2574 * fall back to read(), so we don't need to check anything here.
2576 end
= file_start
+ file_size
;
2577 if (sec
->PointerToRawData
>= st
.st_size
||
2578 end
> ((st
.st_size
+ sector_align
) & ~sector_align
) ||
2580 map_file_into_view( view
, fd
, sec
->VirtualAddress
, file_size
, file_start
,
2581 VPROT_COMMITTED
| VPROT_READ
| VPROT_WRITECOPY
,
2582 removable
) != STATUS_SUCCESS
)
2584 ERR_(module
)( "Could not map %s section %.8s, file probably truncated\n",
2585 debugstr_w(filename
), sec
->Name
);
2589 if (file_size
& page_mask
)
2591 end
= ROUND_SIZE( 0, file_size
);
2592 if (end
> map_size
) end
= map_size
;
2593 TRACE_(module
)("clearing %p - %p\n",
2594 ptr
+ sec
->VirtualAddress
+ file_size
,
2595 ptr
+ sec
->VirtualAddress
+ end
);
2596 memset( ptr
+ sec
->VirtualAddress
+ file_size
, 0, end
- file_size
);
2601 if (main_image_info
.Machine
== IMAGE_FILE_MACHINE_AMD64
&&
2602 nt
->FileHeader
.Machine
== IMAGE_FILE_MACHINE_ARM64
)
2603 update_arm64x_mapping( ptr
, nt
, sections
);
2606 /* set the image protections */
2608 set_vprot( view
, ptr
, ROUND_SIZE( 0, header_size
), VPROT_COMMITTED
| VPROT_READ
);
2611 for (i
= 0; i
< nt
->FileHeader
.NumberOfSections
; i
++, sec
++)
2614 BYTE vprot
= VPROT_COMMITTED
;
2616 if (sec
->Misc
.VirtualSize
)
2617 size
= ROUND_SIZE( sec
->VirtualAddress
, sec
->Misc
.VirtualSize
);
2619 size
= ROUND_SIZE( sec
->VirtualAddress
, sec
->SizeOfRawData
);
2621 if (sec
->Characteristics
& IMAGE_SCN_MEM_READ
) vprot
|= VPROT_READ
;
2622 if (sec
->Characteristics
& IMAGE_SCN_MEM_WRITE
) vprot
|= VPROT_WRITECOPY
;
2623 if (sec
->Characteristics
& IMAGE_SCN_MEM_EXECUTE
) vprot
|= VPROT_EXEC
;
2625 if (!set_vprot( view
, ptr
+ sec
->VirtualAddress
, size
, vprot
) && (vprot
& VPROT_EXEC
))
2626 ERR( "failed to set %08x protection on %s section %.8s, noexec filesystem?\n",
2627 (int)sec
->Characteristics
, debugstr_w(filename
), sec
->Name
);
2630 #ifdef VALGRIND_LOAD_PDB_DEBUGINFO
2631 VALGRIND_LOAD_PDB_DEBUGINFO(fd
, ptr
, total_size
, ptr
- (char *)orig_base
);
2633 return STATUS_SUCCESS
;
2637 /***********************************************************************
2640 static unsigned int get_mapping_info( HANDLE handle
, ACCESS_MASK access
, unsigned int *sec_flags
,
2641 mem_size_t
*full_size
, HANDLE
*shared_file
, pe_image_info_t
**info
)
2643 pe_image_info_t
*image_info
;
2644 SIZE_T total
, size
= 1024;
2645 unsigned int status
;
2649 if (!(image_info
= malloc( size
))) return STATUS_NO_MEMORY
;
2651 SERVER_START_REQ( get_mapping_info
)
2653 req
->handle
= wine_server_obj_handle( handle
);
2654 req
->access
= access
;
2655 wine_server_set_reply( req
, image_info
, size
);
2656 status
= wine_server_call( req
);
2657 *sec_flags
= reply
->flags
;
2658 *full_size
= reply
->size
;
2659 total
= reply
->total
;
2660 *shared_file
= wine_server_ptr_handle( reply
->shared_file
);
2663 if (!status
&& total
<= size
- sizeof(WCHAR
)) break;
2665 if (status
) return status
;
2666 if (*shared_file
) NtClose( *shared_file
);
2667 size
= total
+ sizeof(WCHAR
);
2672 WCHAR
*filename
= (WCHAR
*)(image_info
+ 1);
2674 assert( total
>= sizeof(*image_info
) );
2675 total
-= sizeof(*image_info
);
2676 filename
[total
/ sizeof(WCHAR
)] = 0;
2679 else free( image_info
);
2681 return STATUS_SUCCESS
;
2685 /***********************************************************************
2688 * Map a PE image section into memory.
2690 static NTSTATUS
virtual_map_image( HANDLE mapping
, ACCESS_MASK access
, void **addr_ptr
, SIZE_T
*size_ptr
,
2691 ULONG_PTR limit
, HANDLE shared_file
, ULONG alloc_type
,
2692 pe_image_info_t
*image_info
, WCHAR
*filename
, BOOL is_builtin
)
2694 unsigned int vprot
= SEC_IMAGE
| SEC_FILE
| VPROT_COMMITTED
| VPROT_READ
| VPROT_EXEC
| VPROT_WRITECOPY
;
2695 int unix_fd
= -1, needs_close
;
2696 int shared_fd
= -1, shared_needs_close
= 0;
2697 SIZE_T size
= image_info
->map_size
;
2698 struct file_view
*view
;
2699 unsigned int status
;
2703 if ((status
= server_get_unix_fd( mapping
, 0, &unix_fd
, &needs_close
, NULL
, NULL
)))
2706 if (shared_file
&& ((status
= server_get_unix_fd( shared_file
, FILE_READ_DATA
|FILE_WRITE_DATA
,
2707 &shared_fd
, &shared_needs_close
, NULL
, NULL
))))
2709 if (needs_close
) close( unix_fd
);
2713 status
= STATUS_INVALID_PARAMETER
;
2714 server_enter_uninterrupted_section( &virtual_mutex
, &sigset
);
2716 base
= wine_server_get_ptr( image_info
->base
);
2717 if ((ULONG_PTR
)base
!= image_info
->base
) base
= NULL
;
2719 if ((char *)base
>= (char *)address_space_start
) /* make sure the DOS area remains free */
2720 status
= map_view( &view
, base
, size
, alloc_type
& MEM_TOP_DOWN
, vprot
, limit
, 0 );
2722 if (status
) status
= map_view( &view
, NULL
, size
, alloc_type
& MEM_TOP_DOWN
, vprot
, limit
, 0 );
2723 if (status
) goto done
;
2725 status
= map_image_into_view( view
, filename
, unix_fd
, base
, image_info
->header_size
,
2726 image_info
->image_flags
, shared_fd
, needs_close
);
2727 if (status
== STATUS_SUCCESS
)
2729 SERVER_START_REQ( map_view
)
2731 req
->mapping
= wine_server_obj_handle( mapping
);
2732 req
->access
= access
;
2733 req
->base
= wine_server_client_ptr( view
->base
);
2735 status
= wine_server_call( req
);
2739 if (NT_SUCCESS(status
))
2741 if (is_builtin
) add_builtin_module( view
->base
, NULL
);
2742 *addr_ptr
= view
->base
;
2744 VIRTUAL_DEBUG_DUMP_VIEW( view
);
2746 else delete_view( view
);
2749 server_leave_uninterrupted_section( &virtual_mutex
, &sigset
);
2750 if (needs_close
) close( unix_fd
);
2751 if (shared_needs_close
) close( shared_fd
);
2756 /***********************************************************************
2757 * virtual_map_section
2759 * Map a file section into memory.
2761 static unsigned int virtual_map_section( HANDLE handle
, PVOID
*addr_ptr
, ULONG_PTR limit
,
2762 SIZE_T commit_size
, const LARGE_INTEGER
*offset_ptr
, SIZE_T
*size_ptr
,
2763 ULONG alloc_type
, ULONG protect
)
2766 mem_size_t full_size
;
2769 pe_image_info_t
*image_info
= NULL
;
2772 int unix_handle
= -1, needs_close
;
2773 unsigned int vprot
, sec_flags
;
2774 struct file_view
*view
;
2776 LARGE_INTEGER offset
;
2783 case PAGE_WRITECOPY
:
2784 access
= SECTION_MAP_READ
;
2786 case PAGE_READWRITE
:
2787 access
= SECTION_MAP_WRITE
;
2790 case PAGE_EXECUTE_READ
:
2791 case PAGE_EXECUTE_WRITECOPY
:
2792 access
= SECTION_MAP_READ
| SECTION_MAP_EXECUTE
;
2794 case PAGE_EXECUTE_READWRITE
:
2795 access
= SECTION_MAP_WRITE
| SECTION_MAP_EXECUTE
;
2798 return STATUS_INVALID_PAGE_PROTECTION
;
2801 res
= get_mapping_info( handle
, access
, &sec_flags
, &full_size
, &shared_file
, &image_info
);
2802 if (res
) return res
;
2806 filename
= (WCHAR
*)(image_info
+ 1);
2807 /* check if we can replace that mapping with the builtin */
2808 res
= load_builtin( image_info
, filename
, addr_ptr
, size_ptr
, limit
);
2809 if (res
== STATUS_IMAGE_ALREADY_LOADED
)
2810 res
= virtual_map_image( handle
, access
, addr_ptr
, size_ptr
, limit
, shared_file
,
2811 alloc_type
, image_info
, filename
, FALSE
);
2812 if (shared_file
) NtClose( shared_file
);
2818 offset
.QuadPart
= offset_ptr
? offset_ptr
->QuadPart
: 0;
2819 if (offset
.QuadPart
>= full_size
) return STATUS_INVALID_PARAMETER
;
2823 if (size
> full_size
- offset
.QuadPart
) return STATUS_INVALID_VIEW_SIZE
;
2827 size
= full_size
- offset
.QuadPart
;
2828 if (size
!= full_size
- offset
.QuadPart
) /* truncated */
2830 WARN( "Files larger than 4Gb (%s) not supported on this platform\n",
2831 wine_dbgstr_longlong(full_size
) );
2832 return STATUS_INVALID_PARAMETER
;
2835 if (!(size
= ROUND_SIZE( 0, size
))) return STATUS_INVALID_PARAMETER
; /* wrap-around */
2837 get_vprot_flags( protect
, &vprot
, FALSE
);
2839 if (!(sec_flags
& SEC_RESERVE
)) vprot
|= VPROT_COMMITTED
;
2841 if ((res
= server_get_unix_fd( handle
, 0, &unix_handle
, &needs_close
, NULL
, NULL
))) return res
;
2843 server_enter_uninterrupted_section( &virtual_mutex
, &sigset
);
2845 res
= map_view( &view
, base
, size
, alloc_type
& MEM_TOP_DOWN
, vprot
, limit
, 0 );
2848 TRACE( "handle=%p size=%lx offset=%s\n", handle
, size
, wine_dbgstr_longlong(offset
.QuadPart
) );
2849 res
= map_file_into_view( view
, unix_handle
, 0, size
, offset
.QuadPart
, vprot
, needs_close
);
2850 if (res
== STATUS_SUCCESS
)
2852 SERVER_START_REQ( map_view
)
2854 req
->mapping
= wine_server_obj_handle( handle
);
2855 req
->access
= access
;
2856 req
->base
= wine_server_client_ptr( view
->base
);
2858 req
->start
= offset
.QuadPart
;
2859 res
= wine_server_call( req
);
2863 else ERR( "mapping %p %lx %s failed\n", view
->base
, size
, wine_dbgstr_longlong(offset
.QuadPart
) );
2865 if (NT_SUCCESS(res
))
2867 *addr_ptr
= view
->base
;
2869 VIRTUAL_DEBUG_DUMP_VIEW( view
);
2871 else delete_view( view
);
2874 server_leave_uninterrupted_section( &virtual_mutex
, &sigset
);
2875 if (needs_close
) close( unix_handle
);
2880 struct alloc_virtual_heap
2886 /* callback for mmap_enum_reserved_areas to allocate space for the virtual heap */
2887 static int alloc_virtual_heap( void *base
, SIZE_T size
, void *arg
)
2889 struct alloc_virtual_heap
*alloc
= arg
;
2890 void *end
= (char *)base
+ size
;
2892 if (is_beyond_limit( base
, size
, address_space_limit
)) address_space_limit
= (char *)base
+ size
;
2893 if (is_win64
&& base
< (void *)0x80000000) return 0;
2894 if (preload_reserve_end
>= end
)
2896 if (preload_reserve_start
<= base
) return 0; /* no space in that area */
2897 if (preload_reserve_start
< end
) end
= preload_reserve_start
;
2899 else if (preload_reserve_end
> base
)
2901 if (preload_reserve_start
<= base
) base
= preload_reserve_end
;
2902 else if ((char *)end
- (char *)preload_reserve_end
>= alloc
->size
) base
= preload_reserve_end
;
2903 else end
= preload_reserve_start
;
2905 if ((char *)end
- (char *)base
< alloc
->size
) return 0;
2906 alloc
->base
= anon_mmap_fixed( (char *)end
- alloc
->size
, alloc
->size
, PROT_READ
|PROT_WRITE
, 0 );
2907 return (alloc
->base
!= MAP_FAILED
);
2910 /***********************************************************************
2913 void virtual_init(void)
2915 const struct preload_info
**preload_info
= dlsym( RTLD_DEFAULT
, "wine_main_preload_info" );
2916 const char *preload
= getenv( "WINEPRELOADRESERVE" );
2917 struct alloc_virtual_heap alloc_views
;
2920 pthread_mutexattr_t attr
;
2922 pthread_mutexattr_init( &attr
);
2923 pthread_mutexattr_settype( &attr
, PTHREAD_MUTEX_RECURSIVE
);
2924 pthread_mutex_init( &virtual_mutex
, &attr
);
2925 pthread_mutexattr_destroy( &attr
);
2927 if (preload_info
&& *preload_info
)
2928 for (i
= 0; (*preload_info
)[i
].size
; i
++)
2929 mmap_add_reserved_area( (*preload_info
)[i
].addr
, (*preload_info
)[i
].size
);
2931 mmap_init( preload_info
? *preload_info
: NULL
);
2933 if ((preload
= getenv("WINEPRELOADRESERVE")))
2935 unsigned long start
, end
;
2936 if (sscanf( preload
, "%lx-%lx", &start
, &end
) == 2)
2938 preload_reserve_start
= (void *)start
;
2939 preload_reserve_end
= (void *)end
;
2940 /* some apps start inside the DOS area */
2941 if (preload_reserve_start
)
2942 address_space_start
= min( address_space_start
, preload_reserve_start
);
2946 /* try to find space in a reserved area for the views and pages protection table */
2948 pages_vprot_size
= ((size_t)address_space_limit
>> page_shift
>> pages_vprot_shift
) + 1;
2949 alloc_views
.size
= 2 * view_block_size
+ pages_vprot_size
* sizeof(*pages_vprot
);
2951 alloc_views
.size
= 2 * view_block_size
+ (1U << (32 - page_shift
));
2953 if (mmap_enum_reserved_areas( alloc_virtual_heap
, &alloc_views
, 1 ))
2954 mmap_remove_reserved_area( alloc_views
.base
, alloc_views
.size
);
2956 alloc_views
.base
= anon_mmap_alloc( alloc_views
.size
, PROT_READ
| PROT_WRITE
);
2958 assert( alloc_views
.base
!= MAP_FAILED
);
2959 view_block_start
= alloc_views
.base
;
2960 view_block_end
= view_block_start
+ view_block_size
/ sizeof(*view_block_start
);
2961 free_ranges
= (void *)((char *)alloc_views
.base
+ view_block_size
);
2962 pages_vprot
= (void *)((char *)alloc_views
.base
+ 2 * view_block_size
);
2963 wine_rb_init( &views_tree
, compare_view
);
2965 free_ranges
[0].base
= (void *)0;
2966 free_ranges
[0].end
= (void *)~0;
2967 free_ranges_end
= free_ranges
+ 1;
2969 /* make the DOS area accessible (except the low 64K) to hide bugs in broken apps like Excel 2003 */
2970 size
= (char *)address_space_start
- (char *)0x10000;
2971 if (size
&& mmap_is_in_reserved_area( (void*)0x10000, size
) == 1)
2972 anon_mmap_fixed( (void *)0x10000, size
, PROT_READ
| PROT_WRITE
, 0 );
2976 /***********************************************************************
2977 * get_system_affinity_mask
2979 ULONG_PTR
get_system_affinity_mask(void)
2981 ULONG num_cpus
= peb
->NumberOfProcessors
;
2982 if (num_cpus
>= sizeof(ULONG_PTR
) * 8) return ~(ULONG_PTR
)0;
2983 return ((ULONG_PTR
)1 << num_cpus
) - 1;
2986 /***********************************************************************
2987 * virtual_get_system_info
2989 void virtual_get_system_info( SYSTEM_BASIC_INFORMATION
*info
, BOOL wow64
)
2991 #if defined(HAVE_SYSINFO) \
2992 && defined(HAVE_STRUCT_SYSINFO_TOTALRAM) && defined(HAVE_STRUCT_SYSINFO_MEM_UNIT)
2993 struct sysinfo sinfo
;
2995 if (!sysinfo(&sinfo
))
2997 ULONG64 total
= (ULONG64
)sinfo
.totalram
* sinfo
.mem_unit
;
2998 info
->MmHighestPhysicalPage
= max(1, total
/ page_size
);
3000 #elif defined(_SC_PHYS_PAGES)
3001 LONG64 phys_pages
= sysconf( _SC_PHYS_PAGES
);
3003 info
->MmHighestPhysicalPage
= max(1, phys_pages
);
3005 info
->MmHighestPhysicalPage
= 0x7fffffff / page_size
;
3009 info
->KeMaximumIncrement
= 0; /* FIXME */
3010 info
->PageSize
= page_size
;
3011 info
->MmLowestPhysicalPage
= 1;
3012 info
->MmNumberOfPhysicalPages
= info
->MmHighestPhysicalPage
- info
->MmLowestPhysicalPage
;
3013 info
->AllocationGranularity
= granularity_mask
+ 1;
3014 info
->LowestUserAddress
= (void *)0x10000;
3015 info
->ActiveProcessorsAffinityMask
= get_system_affinity_mask();
3016 info
->NumberOfProcessors
= peb
->NumberOfProcessors
;
3017 if (wow64
) info
->HighestUserAddress
= (char *)get_wow_user_space_limit() - 1;
3018 else info
->HighestUserAddress
= (char *)user_space_limit
- 1;
3022 /***********************************************************************
3023 * virtual_map_builtin_module
3025 NTSTATUS
virtual_map_builtin_module( HANDLE mapping
, void **module
, SIZE_T
*size
, SECTION_IMAGE_INFORMATION
*info
,
3026 ULONG_PTR limit
, WORD machine
, BOOL prefer_native
)
3028 mem_size_t full_size
;
3029 unsigned int sec_flags
;
3031 pe_image_info_t
*image_info
= NULL
;
3032 ACCESS_MASK access
= SECTION_MAP_READ
| SECTION_MAP_EXECUTE
;
3036 if ((status
= get_mapping_info( mapping
, access
, &sec_flags
, &full_size
, &shared_file
, &image_info
)))
3039 if (!image_info
) return STATUS_INVALID_PARAMETER
;
3043 filename
= (WCHAR
*)(image_info
+ 1);
3045 if (!(image_info
->image_flags
& IMAGE_FLAGS_WineBuiltin
)) /* ignore non-builtins */
3047 WARN( "%s found in WINEDLLPATH but not a builtin, ignoring\n", debugstr_w(filename
) );
3048 status
= STATUS_DLL_NOT_FOUND
;
3050 else if (machine
&& image_info
->machine
!= machine
)
3052 TRACE( "%s is for arch %04x, continuing search\n", debugstr_w(filename
), image_info
->machine
);
3053 status
= STATUS_IMAGE_MACHINE_TYPE_MISMATCH
;
3055 else if (prefer_native
&& (image_info
->dll_charact
& IMAGE_DLLCHARACTERISTICS_PREFER_NATIVE
))
3057 TRACE( "%s has prefer-native flag, ignoring builtin\n", debugstr_w(filename
) );
3058 status
= STATUS_IMAGE_ALREADY_LOADED
;
3062 status
= virtual_map_image( mapping
, SECTION_MAP_READ
| SECTION_MAP_EXECUTE
,
3063 module
, size
, limit
, shared_file
, 0, image_info
, filename
, TRUE
);
3064 virtual_fill_image_information( image_info
, info
);
3067 if (shared_file
) NtClose( shared_file
);
3073 /***********************************************************************
3074 * virtual_create_builtin_view
3076 NTSTATUS
virtual_create_builtin_view( void *module
, const UNICODE_STRING
*nt_name
,
3077 pe_image_info_t
*info
, void *so_handle
)
3081 IMAGE_DOS_HEADER
*dos
= module
;
3082 IMAGE_NT_HEADERS
*nt
= (IMAGE_NT_HEADERS
*)((char *)dos
+ dos
->e_lfanew
);
3083 SIZE_T size
= info
->map_size
;
3084 IMAGE_SECTION_HEADER
*sec
;
3085 struct file_view
*view
;
3086 void *base
= wine_server_get_ptr( info
->base
);
3089 server_enter_uninterrupted_section( &virtual_mutex
, &sigset
);
3090 status
= create_view( &view
, base
, size
, SEC_IMAGE
| SEC_FILE
| VPROT_SYSTEM
|
3091 VPROT_COMMITTED
| VPROT_READ
| VPROT_WRITECOPY
| VPROT_EXEC
);
3094 TRACE( "created %p-%p for %s\n", base
, (char *)base
+ size
, debugstr_us(nt_name
) );
3096 /* The PE header is always read-only, no write, no execute. */
3097 set_page_vprot( base
, page_size
, VPROT_COMMITTED
| VPROT_READ
);
3099 sec
= (IMAGE_SECTION_HEADER
*)((char *)&nt
->OptionalHeader
+ nt
->FileHeader
.SizeOfOptionalHeader
);
3100 for (i
= 0; i
< nt
->FileHeader
.NumberOfSections
; i
++)
3102 BYTE flags
= VPROT_COMMITTED
;
3104 if (sec
[i
].Characteristics
& IMAGE_SCN_MEM_EXECUTE
) flags
|= VPROT_EXEC
;
3105 if (sec
[i
].Characteristics
& IMAGE_SCN_MEM_READ
) flags
|= VPROT_READ
;
3106 if (sec
[i
].Characteristics
& IMAGE_SCN_MEM_WRITE
) flags
|= VPROT_WRITE
;
3107 set_page_vprot( (char *)base
+ sec
[i
].VirtualAddress
, sec
[i
].Misc
.VirtualSize
, flags
);
3110 SERVER_START_REQ( map_builtin_view
)
3112 wine_server_add_data( req
, info
, sizeof(*info
) );
3113 wine_server_add_data( req
, nt_name
->Buffer
, nt_name
->Length
);
3114 status
= wine_server_call( req
);
3120 add_builtin_module( view
->base
, so_handle
);
3121 VIRTUAL_DEBUG_DUMP_VIEW( view
);
3122 if (is_beyond_limit( base
, size
, working_set_limit
)) working_set_limit
= address_space_limit
;
3124 else delete_view( view
);
3126 server_leave_uninterrupted_section( &virtual_mutex
, &sigset
);
3132 /* set some initial values in a new TEB */
3133 static TEB
*init_teb( void *ptr
, BOOL is_wow
)
3135 struct ntdll_thread_data
*thread_data
;
3138 TEB32
*teb32
= (TEB32
*)((char *)ptr
+ teb_offset
);
3142 teb32
->Peb
= PtrToUlong( (char *)peb
+ page_size
);
3143 teb32
->Tib
.Self
= PtrToUlong( teb32
);
3144 teb32
->Tib
.ExceptionList
= ~0u;
3145 teb32
->ActivationContextStackPointer
= PtrToUlong( &teb32
->ActivationContextStack
);
3146 teb32
->ActivationContextStack
.FrameListCache
.Flink
=
3147 teb32
->ActivationContextStack
.FrameListCache
.Blink
=
3148 PtrToUlong( &teb32
->ActivationContextStack
.FrameListCache
);
3149 teb32
->StaticUnicodeString
.Buffer
= PtrToUlong( teb32
->StaticUnicodeBuffer
);
3150 teb32
->StaticUnicodeString
.MaximumLength
= sizeof( teb32
->StaticUnicodeBuffer
);
3151 teb32
->GdiBatchCount
= PtrToUlong( teb64
);
3152 teb32
->WowTebOffset
= -teb_offset
;
3153 if (is_wow
) teb64
->WowTebOffset
= teb_offset
;
3156 teb64
->Peb
= PtrToUlong( (char *)peb
- page_size
);
3157 teb64
->Tib
.Self
= PtrToUlong( teb64
);
3158 teb64
->Tib
.ExceptionList
= PtrToUlong( teb32
);
3159 teb64
->ActivationContextStackPointer
= PtrToUlong( &teb64
->ActivationContextStack
);
3160 teb64
->ActivationContextStack
.FrameListCache
.Flink
=
3161 teb64
->ActivationContextStack
.FrameListCache
.Blink
=
3162 PtrToUlong( &teb64
->ActivationContextStack
.FrameListCache
);
3163 teb64
->StaticUnicodeString
.Buffer
= PtrToUlong( teb64
->StaticUnicodeBuffer
);
3164 teb64
->StaticUnicodeString
.MaximumLength
= sizeof( teb64
->StaticUnicodeBuffer
);
3165 teb64
->WowTebOffset
= teb_offset
;
3168 teb32
->GdiBatchCount
= PtrToUlong( teb64
);
3169 teb32
->WowTebOffset
= -teb_offset
;
3173 teb
->Tib
.Self
= &teb
->Tib
;
3174 teb
->Tib
.ExceptionList
= (void *)~0ul;
3175 teb
->Tib
.StackBase
= (void *)~0ul;
3176 teb
->ActivationContextStackPointer
= &teb
->ActivationContextStack
;
3177 InitializeListHead( &teb
->ActivationContextStack
.FrameListCache
);
3178 teb
->StaticUnicodeString
.Buffer
= teb
->StaticUnicodeBuffer
;
3179 teb
->StaticUnicodeString
.MaximumLength
= sizeof(teb
->StaticUnicodeBuffer
);
3180 thread_data
= (struct ntdll_thread_data
*)&teb
->GdiTebBatch
;
3181 thread_data
->request_fd
= -1;
3182 thread_data
->reply_fd
= -1;
3183 thread_data
->wait_fd
[0] = -1;
3184 thread_data
->wait_fd
[1] = -1;
3185 list_add_head( &teb_list
, &thread_data
->entry
);
3190 /***********************************************************************
3191 * virtual_alloc_first_teb
3193 TEB
*virtual_alloc_first_teb(void)
3197 unsigned int status
;
3198 SIZE_T data_size
= page_size
;
3199 SIZE_T block_size
= signal_stack_mask
+ 1;
3200 SIZE_T total
= 32 * block_size
;
3202 /* reserve space for shared user data */
3203 status
= NtAllocateVirtualMemory( NtCurrentProcess(), (void **)&user_shared_data
, 0, &data_size
,
3204 MEM_RESERVE
| MEM_COMMIT
, PAGE_READONLY
);
3207 ERR( "wine: failed to map the shared user data: %08x\n", status
);
3211 NtAllocateVirtualMemory( NtCurrentProcess(), &teb_block
, is_win64
? 0x7fffffff : 0, &total
,
3212 MEM_RESERVE
| MEM_TOP_DOWN
, PAGE_READWRITE
);
3214 ptr
= (char *)teb_block
+ 30 * block_size
;
3215 data_size
= 2 * block_size
;
3216 NtAllocateVirtualMemory( NtCurrentProcess(), (void **)&ptr
, 0, &data_size
, MEM_COMMIT
, PAGE_READWRITE
);
3217 peb
= (PEB
*)((char *)teb_block
+ 31 * block_size
+ (is_win64
? 0 : page_size
));
3218 teb
= init_teb( ptr
, FALSE
);
3219 pthread_key_create( &teb_key
, NULL
);
3220 pthread_setspecific( teb_key
, teb
);
3225 /***********************************************************************
3228 NTSTATUS
virtual_alloc_teb( TEB
**ret_teb
)
3233 NTSTATUS status
= STATUS_SUCCESS
;
3234 SIZE_T block_size
= signal_stack_mask
+ 1;
3236 server_enter_uninterrupted_section( &virtual_mutex
, &sigset
);
3239 ptr
= next_free_teb
;
3240 next_free_teb
= *(void **)ptr
;
3241 memset( ptr
, 0, teb_size
);
3247 SIZE_T total
= 32 * block_size
;
3249 if ((status
= NtAllocateVirtualMemory( NtCurrentProcess(), &ptr
, is_win64
&& is_wow64() ? 0x7fffffff : 0,
3250 &total
, MEM_RESERVE
, PAGE_READWRITE
)))
3252 server_leave_uninterrupted_section( &virtual_mutex
, &sigset
);
3258 ptr
= ((char *)teb_block
+ --teb_block_pos
* block_size
);
3259 NtAllocateVirtualMemory( NtCurrentProcess(), (void **)&ptr
, 0, &block_size
,
3260 MEM_COMMIT
, PAGE_READWRITE
);
3262 *ret_teb
= teb
= init_teb( ptr
, is_wow64() );
3263 server_leave_uninterrupted_section( &virtual_mutex
, &sigset
);
3265 if ((status
= signal_alloc_thread( teb
)))
3267 server_enter_uninterrupted_section( &virtual_mutex
, &sigset
);
3268 *(void **)ptr
= next_free_teb
;
3269 next_free_teb
= ptr
;
3270 server_leave_uninterrupted_section( &virtual_mutex
, &sigset
);
3276 /***********************************************************************
3279 void virtual_free_teb( TEB
*teb
)
3281 struct ntdll_thread_data
*thread_data
= (struct ntdll_thread_data
*)&teb
->GdiTebBatch
;
3285 WOW_TEB
*wow_teb
= get_wow_teb( teb
);
3287 signal_free_thread( teb
);
3288 if (teb
->DeallocationStack
)
3291 NtFreeVirtualMemory( GetCurrentProcess(), &teb
->DeallocationStack
, &size
, MEM_RELEASE
);
3293 if (thread_data
->kernel_stack
)
3296 NtFreeVirtualMemory( GetCurrentProcess(), &thread_data
->kernel_stack
, &size
, MEM_RELEASE
);
3298 if (wow_teb
&& (ptr
= ULongToPtr( wow_teb
->DeallocationStack
)))
3301 NtFreeVirtualMemory( GetCurrentProcess(), &ptr
, &size
, MEM_RELEASE
);
3304 server_enter_uninterrupted_section( &virtual_mutex
, &sigset
);
3305 list_remove( &thread_data
->entry
);
3307 if (!is_win64
) ptr
= (char *)ptr
- teb_offset
;
3308 *(void **)ptr
= next_free_teb
;
3309 next_free_teb
= ptr
;
3310 server_leave_uninterrupted_section( &virtual_mutex
, &sigset
);
3314 /***********************************************************************
3315 * virtual_clear_tls_index
3317 NTSTATUS
virtual_clear_tls_index( ULONG index
)
3319 struct ntdll_thread_data
*thread_data
;
3322 if (index
< TLS_MINIMUM_AVAILABLE
)
3324 server_enter_uninterrupted_section( &virtual_mutex
, &sigset
);
3325 LIST_FOR_EACH_ENTRY( thread_data
, &teb_list
, struct ntdll_thread_data
, entry
)
3327 TEB
*teb
= CONTAINING_RECORD( thread_data
, TEB
, GdiTebBatch
);
3329 WOW_TEB
*wow_teb
= get_wow_teb( teb
);
3330 if (wow_teb
) wow_teb
->TlsSlots
[index
] = 0;
3333 teb
->TlsSlots
[index
] = 0;
3335 server_leave_uninterrupted_section( &virtual_mutex
, &sigset
);
3339 index
-= TLS_MINIMUM_AVAILABLE
;
3340 if (index
>= 8 * sizeof(peb
->TlsExpansionBitmapBits
)) return STATUS_INVALID_PARAMETER
;
3342 server_enter_uninterrupted_section( &virtual_mutex
, &sigset
);
3343 LIST_FOR_EACH_ENTRY( thread_data
, &teb_list
, struct ntdll_thread_data
, entry
)
3345 TEB
*teb
= CONTAINING_RECORD( thread_data
, TEB
, GdiTebBatch
);
3347 WOW_TEB
*wow_teb
= get_wow_teb( teb
);
3350 if (wow_teb
->TlsExpansionSlots
)
3351 ((ULONG
*)ULongToPtr( wow_teb
->TlsExpansionSlots
))[index
] = 0;
3355 if (teb
->TlsExpansionSlots
) teb
->TlsExpansionSlots
[index
] = 0;
3357 server_leave_uninterrupted_section( &virtual_mutex
, &sigset
);
3359 return STATUS_SUCCESS
;
3363 /***********************************************************************
3364 * virtual_alloc_thread_stack
3366 NTSTATUS
virtual_alloc_thread_stack( INITIAL_TEB
*stack
, ULONG_PTR limit
, SIZE_T reserve_size
,
3367 SIZE_T commit_size
, BOOL guard_page
)
3369 struct file_view
*view
;
3374 if (!reserve_size
) reserve_size
= main_image_info
.MaximumStackSize
;
3375 if (!commit_size
) commit_size
= main_image_info
.CommittedStackSize
;
3377 size
= max( reserve_size
, commit_size
);
3378 if (size
< 1024 * 1024) size
= 1024 * 1024; /* Xlib needs a large stack */
3379 size
= (size
+ 0xffff) & ~0xffff; /* round to 64K boundary */
3381 server_enter_uninterrupted_section( &virtual_mutex
, &sigset
);
3383 status
= map_view( &view
, NULL
, size
, FALSE
, VPROT_READ
| VPROT_WRITE
| VPROT_COMMITTED
, limit
, 0 );
3384 if (status
!= STATUS_SUCCESS
) goto done
;
3386 #ifdef VALGRIND_STACK_REGISTER
3387 VALGRIND_STACK_REGISTER( view
->base
, (char *)view
->base
+ view
->size
);
3390 /* setup no access guard page */
3393 set_page_vprot( view
->base
, page_size
, VPROT_COMMITTED
);
3394 set_page_vprot( (char *)view
->base
+ page_size
, page_size
,
3395 VPROT_READ
| VPROT_WRITE
| VPROT_COMMITTED
| VPROT_GUARD
);
3396 mprotect_range( view
->base
, 2 * page_size
, 0, 0 );
3398 VIRTUAL_DEBUG_DUMP_VIEW( view
);
3400 /* note: limit is lower than base since the stack grows down */
3401 stack
->OldStackBase
= 0;
3402 stack
->OldStackLimit
= 0;
3403 stack
->DeallocationStack
= view
->base
;
3404 stack
->StackBase
= (char *)view
->base
+ view
->size
;
3405 stack
->StackLimit
= (char *)view
->base
+ (guard_page
? 2 * page_size
: 0);
3407 server_leave_uninterrupted_section( &virtual_mutex
, &sigset
);
3412 /***********************************************************************
3413 * virtual_alloc_arm64ec_map
3415 void *virtual_alloc_arm64ec_map(void)
3418 SIZE_T size
= ((ULONG_PTR
)user_space_limit
+ page_size
) >> (page_shift
+ 3); /* one bit per page */
3419 unsigned int status
= NtAllocateVirtualMemory( NtCurrentProcess(), (void **)&arm64ec_map
, 0, &size
,
3420 MEM_COMMIT
, PAGE_READWRITE
);
3423 ERR( "failed to allocate ARM64EC map: %08x\n", status
);
3431 /***********************************************************************
3432 * virtual_map_user_shared_data
3434 void virtual_map_user_shared_data(void)
3436 static const WCHAR nameW
[] = {'\\','K','e','r','n','e','l','O','b','j','e','c','t','s',
3437 '\\','_','_','w','i','n','e','_','u','s','e','r','_','s','h','a','r','e','d','_','d','a','t','a',0};
3438 UNICODE_STRING name_str
= RTL_CONSTANT_STRING( nameW
);
3439 OBJECT_ATTRIBUTES attr
= { sizeof(attr
), 0, &name_str
};
3440 unsigned int status
;
3442 int res
, fd
, needs_close
;
3444 if ((status
= NtOpenSection( §ion
, SECTION_ALL_ACCESS
, &attr
)))
3446 ERR( "failed to open the USD section: %08x\n", status
);
3449 if ((res
= server_get_unix_fd( section
, 0, &fd
, &needs_close
, NULL
, NULL
)) ||
3450 (user_shared_data
!= mmap( user_shared_data
, page_size
, PROT_READ
, MAP_SHARED
|MAP_FIXED
, fd
, 0 )))
3452 ERR( "failed to remap the process USD: %d\n", res
);
3455 if (needs_close
) close( fd
);
3460 struct thread_stack_info
3469 /***********************************************************************
3470 * is_inside_thread_stack
3472 static BOOL
is_inside_thread_stack( void *ptr
, struct thread_stack_info
*stack
)
3474 TEB
*teb
= NtCurrentTeb();
3475 WOW_TEB
*wow_teb
= get_wow_teb( teb
);
3477 stack
->start
= teb
->DeallocationStack
;
3478 stack
->limit
= teb
->Tib
.StackLimit
;
3479 stack
->end
= teb
->Tib
.StackBase
;
3480 stack
->guaranteed
= max( teb
->GuaranteedStackBytes
, page_size
* (is_win64
? 2 : 1) );
3481 stack
->is_wow
= FALSE
;
3482 if ((char *)ptr
> stack
->start
&& (char *)ptr
<= stack
->end
) return TRUE
;
3484 if (!wow_teb
) return FALSE
;
3485 stack
->start
= ULongToPtr( wow_teb
->DeallocationStack
);
3486 stack
->limit
= ULongToPtr( wow_teb
->Tib
.StackLimit
);
3487 stack
->end
= ULongToPtr( wow_teb
->Tib
.StackBase
);
3488 stack
->guaranteed
= max( wow_teb
->GuaranteedStackBytes
, page_size
* (is_win64
? 1 : 2) );
3489 stack
->is_wow
= TRUE
;
3490 return ((char *)ptr
> stack
->start
&& (char *)ptr
<= stack
->end
);
3494 /***********************************************************************
3497 static NTSTATUS
grow_thread_stack( char *page
, struct thread_stack_info
*stack_info
)
3501 set_page_vprot_bits( page
, page_size
, 0, VPROT_GUARD
);
3502 mprotect_range( page
, page_size
, 0, 0 );
3503 if (page
>= stack_info
->start
+ page_size
+ stack_info
->guaranteed
)
3505 set_page_vprot_bits( page
- page_size
, page_size
, VPROT_COMMITTED
| VPROT_GUARD
, 0 );
3506 mprotect_range( page
- page_size
, page_size
, 0, 0 );
3508 else /* inside guaranteed space -> overflow exception */
3510 page
= stack_info
->start
+ page_size
;
3511 set_page_vprot_bits( page
, stack_info
->guaranteed
, VPROT_COMMITTED
, VPROT_GUARD
);
3512 mprotect_range( page
, stack_info
->guaranteed
, 0, 0 );
3513 ret
= STATUS_STACK_OVERFLOW
;
3515 if (stack_info
->is_wow
)
3517 WOW_TEB
*wow_teb
= get_wow_teb( NtCurrentTeb() );
3518 wow_teb
->Tib
.StackLimit
= PtrToUlong( page
);
3520 else NtCurrentTeb()->Tib
.StackLimit
= page
;
3525 /***********************************************************************
3526 * virtual_handle_fault
3528 NTSTATUS
virtual_handle_fault( void *addr
, DWORD err
, void *stack
)
3530 NTSTATUS ret
= STATUS_ACCESS_VIOLATION
;
3531 char *page
= ROUND_ADDR( addr
, page_mask
);
3534 mutex_lock( &virtual_mutex
); /* no need for signal masking inside signal handler */
3535 vprot
= get_page_vprot( page
);
3536 if (!is_inside_signal_stack( stack
) && (vprot
& VPROT_GUARD
))
3538 struct thread_stack_info stack_info
;
3539 if (!is_inside_thread_stack( page
, &stack_info
))
3541 set_page_vprot_bits( page
, page_size
, 0, VPROT_GUARD
);
3542 mprotect_range( page
, page_size
, 0, 0 );
3543 ret
= STATUS_GUARD_PAGE_VIOLATION
;
3545 else ret
= grow_thread_stack( page
, &stack_info
);
3547 else if (err
& EXCEPTION_WRITE_FAULT
)
3549 if (vprot
& VPROT_WRITEWATCH
)
3551 set_page_vprot_bits( page
, page_size
, 0, VPROT_WRITEWATCH
);
3552 mprotect_range( page
, page_size
, 0, 0 );
3554 /* ignore fault if page is writable now */
3555 if (get_unix_prot( get_page_vprot( page
)) & PROT_WRITE
)
3557 if ((vprot
& VPROT_WRITEWATCH
) || is_write_watch_range( page
, page_size
))
3558 ret
= STATUS_SUCCESS
;
3561 mutex_unlock( &virtual_mutex
);
3566 /***********************************************************************
3567 * virtual_setup_exception
3569 void *virtual_setup_exception( void *stack_ptr
, size_t size
, EXCEPTION_RECORD
*rec
)
3571 char *stack
= stack_ptr
;
3572 struct thread_stack_info stack_info
;
3574 if (!is_inside_thread_stack( stack
, &stack_info
))
3576 if (is_inside_signal_stack( stack
))
3578 ERR( "nested exception on signal stack addr %p stack %p\n", rec
->ExceptionAddress
, stack
);
3581 WARN( "exception outside of stack limits addr %p stack %p (%p-%p-%p)\n",
3582 rec
->ExceptionAddress
, stack
, NtCurrentTeb()->DeallocationStack
,
3583 NtCurrentTeb()->Tib
.StackLimit
, NtCurrentTeb()->Tib
.StackBase
);
3584 return stack
- size
;
3589 if (stack
< stack_info
.start
+ 4096)
3591 /* stack overflow on last page, unrecoverable */
3592 UINT diff
= stack_info
.start
+ 4096 - stack
;
3593 ERR( "stack overflow %u bytes addr %p stack %p (%p-%p-%p)\n",
3594 diff
, rec
->ExceptionAddress
, stack
, stack_info
.start
, stack_info
.limit
, stack_info
.end
);
3597 else if (stack
< stack_info
.limit
)
3599 mutex_lock( &virtual_mutex
); /* no need for signal masking inside signal handler */
3600 if ((get_page_vprot( stack
) & VPROT_GUARD
) &&
3601 grow_thread_stack( ROUND_ADDR( stack
, page_mask
), &stack_info
))
3603 rec
->ExceptionCode
= STATUS_STACK_OVERFLOW
;
3604 rec
->NumberParameters
= 0;
3606 mutex_unlock( &virtual_mutex
);
3608 #if defined(VALGRIND_MAKE_MEM_UNDEFINED)
3609 VALGRIND_MAKE_MEM_UNDEFINED( stack
, size
);
3610 #elif defined(VALGRIND_MAKE_WRITABLE)
3611 VALGRIND_MAKE_WRITABLE( stack
, size
);
3617 /***********************************************************************
3618 * check_write_access
3620 * Check if the memory range is writable, temporarily disabling write watches if necessary.
3622 static NTSTATUS
check_write_access( void *base
, size_t size
, BOOL
*has_write_watch
)
3625 char *addr
= ROUND_ADDR( base
, page_mask
);
3627 size
= ROUND_SIZE( base
, size
);
3628 for (i
= 0; i
< size
; i
+= page_size
)
3630 BYTE vprot
= get_page_vprot( addr
+ i
);
3631 if (vprot
& VPROT_WRITEWATCH
) *has_write_watch
= TRUE
;
3632 if (!(get_unix_prot( vprot
& ~VPROT_WRITEWATCH
) & PROT_WRITE
))
3633 return STATUS_INVALID_USER_BUFFER
;
3635 if (*has_write_watch
)
3636 mprotect_range( addr
, size
, 0, VPROT_WRITEWATCH
); /* temporarily enable write access */
3637 return STATUS_SUCCESS
;
3641 /***********************************************************************
3642 * virtual_locked_server_call
3644 unsigned int virtual_locked_server_call( void *req_ptr
)
3646 struct __server_request_info
* const req
= req_ptr
;
3648 void *addr
= req
->reply_data
;
3649 data_size_t size
= req
->u
.req
.request_header
.reply_size
;
3650 BOOL has_write_watch
= FALSE
;
3651 unsigned int ret
= STATUS_ACCESS_VIOLATION
;
3653 if (!size
) return wine_server_call( req_ptr
);
3655 server_enter_uninterrupted_section( &virtual_mutex
, &sigset
);
3656 if (!(ret
= check_write_access( addr
, size
, &has_write_watch
)))
3658 ret
= server_call_unlocked( req
);
3659 if (has_write_watch
) update_write_watches( addr
, size
, wine_server_reply_size( req
));
3661 else memset( &req
->u
.reply
, 0, sizeof(req
->u
.reply
) );
3662 server_leave_uninterrupted_section( &virtual_mutex
, &sigset
);
3667 /***********************************************************************
3668 * virtual_locked_read
3670 ssize_t
virtual_locked_read( int fd
, void *addr
, size_t size
)
3673 BOOL has_write_watch
= FALSE
;
3676 ssize_t ret
= read( fd
, addr
, size
);
3677 if (ret
!= -1 || errno
!= EFAULT
) return ret
;
3679 server_enter_uninterrupted_section( &virtual_mutex
, &sigset
);
3680 if (!check_write_access( addr
, size
, &has_write_watch
))
3682 ret
= read( fd
, addr
, size
);
3684 if (has_write_watch
) update_write_watches( addr
, size
, max( 0, ret
));
3686 server_leave_uninterrupted_section( &virtual_mutex
, &sigset
);
3692 /***********************************************************************
3693 * virtual_locked_pread
3695 ssize_t
virtual_locked_pread( int fd
, void *addr
, size_t size
, off_t offset
)
3698 BOOL has_write_watch
= FALSE
;
3701 ssize_t ret
= pread( fd
, addr
, size
, offset
);
3702 if (ret
!= -1 || errno
!= EFAULT
) return ret
;
3704 server_enter_uninterrupted_section( &virtual_mutex
, &sigset
);
3705 if (!check_write_access( addr
, size
, &has_write_watch
))
3707 ret
= pread( fd
, addr
, size
, offset
);
3709 if (has_write_watch
) update_write_watches( addr
, size
, max( 0, ret
));
3711 server_leave_uninterrupted_section( &virtual_mutex
, &sigset
);
3717 /***********************************************************************
3718 * virtual_locked_recvmsg
3720 ssize_t
virtual_locked_recvmsg( int fd
, struct msghdr
*hdr
, int flags
)
3724 BOOL has_write_watch
= FALSE
;
3727 ssize_t ret
= recvmsg( fd
, hdr
, flags
);
3728 if (ret
!= -1 || errno
!= EFAULT
) return ret
;
3730 server_enter_uninterrupted_section( &virtual_mutex
, &sigset
);
3731 for (i
= 0; i
< hdr
->msg_iovlen
; i
++)
3732 if (check_write_access( hdr
->msg_iov
[i
].iov_base
, hdr
->msg_iov
[i
].iov_len
, &has_write_watch
))
3734 if (i
== hdr
->msg_iovlen
)
3736 ret
= recvmsg( fd
, hdr
, flags
);
3739 if (has_write_watch
)
3740 while (i
--) update_write_watches( hdr
->msg_iov
[i
].iov_base
, hdr
->msg_iov
[i
].iov_len
, 0 );
3742 server_leave_uninterrupted_section( &virtual_mutex
, &sigset
);
3748 /***********************************************************************
3749 * virtual_is_valid_code_address
3751 BOOL
virtual_is_valid_code_address( const void *addr
, SIZE_T size
)
3753 struct file_view
*view
;
3757 server_enter_uninterrupted_section( &virtual_mutex
, &sigset
);
3758 if ((view
= find_view( addr
, size
)))
3759 ret
= !(view
->protect
& VPROT_SYSTEM
); /* system views are not visible to the app */
3760 server_leave_uninterrupted_section( &virtual_mutex
, &sigset
);
3765 /***********************************************************************
3766 * virtual_check_buffer_for_read
3768 * Check if a memory buffer can be read, triggering page faults if needed for DIB section access.
3770 BOOL
virtual_check_buffer_for_read( const void *ptr
, SIZE_T size
)
3772 if (!size
) return TRUE
;
3773 if (!ptr
) return FALSE
;
3777 volatile const char *p
= ptr
;
3778 char dummy
__attribute__((unused
));
3779 SIZE_T count
= size
;
3781 while (count
> page_size
)
3788 dummy
= p
[count
- 1];
3799 /***********************************************************************
3800 * virtual_check_buffer_for_write
3802 * Check if a memory buffer can be written to, triggering page faults if needed for write watches.
3804 BOOL
virtual_check_buffer_for_write( void *ptr
, SIZE_T size
)
3806 if (!size
) return TRUE
;
3807 if (!ptr
) return FALSE
;
3811 volatile char *p
= ptr
;
3812 SIZE_T count
= size
;
3814 while (count
> page_size
)
3832 /***********************************************************************
3833 * virtual_uninterrupted_read_memory
3835 * Similar to NtReadVirtualMemory, but without wineserver calls. Moreover
3836 * permissions are checked before accessing each page, to ensure that no
3837 * exceptions can happen.
3839 SIZE_T
virtual_uninterrupted_read_memory( const void *addr
, void *buffer
, SIZE_T size
)
3841 struct file_view
*view
;
3843 SIZE_T bytes_read
= 0;
3845 if (!size
) return 0;
3847 server_enter_uninterrupted_section( &virtual_mutex
, &sigset
);
3848 if ((view
= find_view( addr
, size
)))
3850 if (!(view
->protect
& VPROT_SYSTEM
))
3852 while (bytes_read
< size
&& (get_unix_prot( get_page_vprot( addr
)) & PROT_READ
))
3854 SIZE_T block_size
= min( size
- bytes_read
, page_size
- ((UINT_PTR
)addr
& page_mask
) );
3855 memcpy( buffer
, addr
, block_size
);
3857 addr
= (const void *)((const char *)addr
+ block_size
);
3858 buffer
= (void *)((char *)buffer
+ block_size
);
3859 bytes_read
+= block_size
;
3863 server_leave_uninterrupted_section( &virtual_mutex
, &sigset
);
3868 /***********************************************************************
3869 * virtual_uninterrupted_write_memory
3871 * Similar to NtWriteVirtualMemory, but without wineserver calls. Moreover
3872 * permissions are checked before accessing each page, to ensure that no
3873 * exceptions can happen.
3875 NTSTATUS
virtual_uninterrupted_write_memory( void *addr
, const void *buffer
, SIZE_T size
)
3877 BOOL has_write_watch
= FALSE
;
3881 if (!size
) return STATUS_SUCCESS
;
3883 server_enter_uninterrupted_section( &virtual_mutex
, &sigset
);
3884 if (!(ret
= check_write_access( addr
, size
, &has_write_watch
)))
3886 memcpy( addr
, buffer
, size
);
3887 if (has_write_watch
) update_write_watches( addr
, size
, size
);
3889 server_leave_uninterrupted_section( &virtual_mutex
, &sigset
);
3894 /***********************************************************************
3895 * virtual_set_force_exec
3897 * Whether to force exec prot on all views.
3899 void virtual_set_force_exec( BOOL enable
)
3901 struct file_view
*view
;
3904 server_enter_uninterrupted_section( &virtual_mutex
, &sigset
);
3905 if (!force_exec_prot
!= !enable
) /* change all existing views */
3907 force_exec_prot
= enable
;
3909 WINE_RB_FOR_EACH_ENTRY( view
, &views_tree
, struct file_view
, entry
)
3911 /* file mappings are always accessible */
3912 BYTE commit
= is_view_valloc( view
) ? 0 : VPROT_COMMITTED
;
3914 mprotect_range( view
->base
, view
->size
, commit
, 0 );
3917 server_leave_uninterrupted_section( &virtual_mutex
, &sigset
);
3926 /* free reserved areas above the limit; callback for mmap_enum_reserved_areas */
3927 static int free_reserved_memory( void *base
, SIZE_T size
, void *arg
)
3929 struct free_range
*range
= arg
;
3931 if ((char *)base
>= range
->limit
) return 0;
3932 if ((char *)base
+ size
<= range
->base
) return 0;
3933 if ((char *)base
< range
->base
)
3935 size
-= range
->base
- (char *)base
;
3938 if ((char *)base
+ size
> range
->limit
) size
= range
->limit
- (char *)base
;
3939 remove_reserved_area( base
, size
);
3940 return 1; /* stop enumeration since the list has changed */
3943 /***********************************************************************
3944 * virtual_release_address_space
3946 * Release some address space once we have loaded and initialized the app.
3948 static void virtual_release_address_space(void)
3950 struct free_range range
;
3952 range
.base
= (char *)0x82000000;
3953 range
.limit
= get_wow_user_space_limit();
3955 if (range
.limit
> (char *)0xfffff000) return; /* 64-bit limit, nothing to do */
3957 if (range
.limit
> range
.base
)
3959 while (mmap_enum_reserved_areas( free_reserved_memory
, &range
, 1 )) /* nothing */;
3961 /* On macOS, we still want to free some of low memory, for OpenGL resources */
3962 range
.base
= (char *)0x40000000;
3967 else range
.base
= (char *)0x20000000;
3969 range
.limit
= (char *)0x7f000000;
3970 while (mmap_enum_reserved_areas( free_reserved_memory
, &range
, 0 )) /* nothing */;
3974 /***********************************************************************
3975 * virtual_set_large_address_space
3977 * Enable use of a large address space when allowed by the application.
3979 void virtual_set_large_address_space(void)
3981 /* no large address space on win9x */
3982 if (peb
->OSPlatformId
!= VER_PLATFORM_WIN32_NT
) return;
3984 user_space_limit
= working_set_limit
= address_space_limit
;
3988 /***********************************************************************
3989 * allocate_virtual_memory
3991 * NtAllocateVirtualMemory[Ex] implementation.
3993 static NTSTATUS
allocate_virtual_memory( void **ret
, SIZE_T
*size_ptr
, ULONG type
, ULONG protect
,
3994 ULONG_PTR limit
, ULONG_PTR align
, ULONG attributes
)
3998 BOOL is_dos_memory
= FALSE
;
3999 struct file_view
*view
;
4001 SIZE_T size
= *size_ptr
;
4002 NTSTATUS status
= STATUS_SUCCESS
;
4004 /* Round parameters to a page boundary */
4006 if (is_beyond_limit( 0, size
, working_set_limit
)) return STATUS_WORKING_SET_LIMIT_RANGE
;
4010 if (type
& MEM_RESERVE
) /* Round down to 64k boundary */
4011 base
= ROUND_ADDR( *ret
, granularity_mask
);
4013 base
= ROUND_ADDR( *ret
, page_mask
);
4014 size
= (((UINT_PTR
)*ret
+ size
+ page_mask
) & ~page_mask
) - (UINT_PTR
)base
;
4016 /* disallow low 64k, wrap-around and kernel space */
4017 if (((char *)base
< (char *)0x10000) ||
4018 ((char *)base
+ size
< (char *)base
) ||
4019 is_beyond_limit( base
, size
, address_space_limit
))
4021 /* address 1 is magic to mean DOS area */
4022 if (!base
&& *ret
== (void *)1 && size
== 0x110000) is_dos_memory
= TRUE
;
4023 else return STATUS_INVALID_PARAMETER
;
4029 size
= (size
+ page_mask
) & ~page_mask
;
4032 /* Compute the alloc type flags */
4034 if (!(type
& (MEM_COMMIT
| MEM_RESERVE
| MEM_RESET
)) ||
4035 (type
& ~(MEM_COMMIT
| MEM_RESERVE
| MEM_TOP_DOWN
| MEM_WRITE_WATCH
| MEM_RESET
)))
4037 WARN("called with wrong alloc type flags (%08x) !\n", (int)type
);
4038 return STATUS_INVALID_PARAMETER
;
4041 if (!arm64ec_map
&& (attributes
& MEM_EXTENDED_PARAMETER_EC_CODE
)) return STATUS_INVALID_PARAMETER
;
4043 /* Reserve the memory */
4045 server_enter_uninterrupted_section( &virtual_mutex
, &sigset
);
4047 if ((type
& MEM_RESERVE
) || !base
)
4049 if (!(status
= get_vprot_flags( protect
, &vprot
, FALSE
)))
4051 if (type
& MEM_COMMIT
) vprot
|= VPROT_COMMITTED
;
4052 if (type
& MEM_WRITE_WATCH
) vprot
|= VPROT_WRITEWATCH
;
4053 if (protect
& PAGE_NOCACHE
) vprot
|= SEC_NOCACHE
;
4055 if (vprot
& VPROT_WRITECOPY
) status
= STATUS_INVALID_PAGE_PROTECTION
;
4056 else if (is_dos_memory
) status
= allocate_dos_memory( &view
, vprot
);
4057 else status
= map_view( &view
, base
, size
, type
& MEM_TOP_DOWN
, vprot
, limit
,
4058 align
? align
- 1 : granularity_mask
);
4060 if (status
== STATUS_SUCCESS
) base
= view
->base
;
4063 else if (type
& MEM_RESET
)
4065 if (!(view
= find_view( base
, size
))) status
= STATUS_NOT_MAPPED_VIEW
;
4066 else madvise( base
, size
, MADV_DONTNEED
);
4068 else /* commit the pages */
4070 if (!(view
= find_view( base
, size
))) status
= STATUS_NOT_MAPPED_VIEW
;
4071 else if (view
->protect
& SEC_FILE
) status
= STATUS_ALREADY_COMMITTED
;
4072 else if (!(status
= set_protection( view
, base
, size
, protect
)) && (view
->protect
& SEC_RESERVE
))
4074 SERVER_START_REQ( add_mapping_committed_range
)
4076 req
->base
= wine_server_client_ptr( view
->base
);
4077 req
->offset
= (char *)base
- (char *)view
->base
;
4079 wine_server_call( req
);
4085 if (!status
&& (attributes
& MEM_EXTENDED_PARAMETER_EC_CODE
)) set_arm64ec_range( base
, size
);
4087 if (!status
) VIRTUAL_DEBUG_DUMP_VIEW( view
);
4089 server_leave_uninterrupted_section( &virtual_mutex
, &sigset
);
4091 if (status
== STATUS_SUCCESS
)
4100 /***********************************************************************
4101 * NtAllocateVirtualMemory (NTDLL.@)
4102 * ZwAllocateVirtualMemory (NTDLL.@)
4104 NTSTATUS WINAPI
NtAllocateVirtualMemory( HANDLE process
, PVOID
*ret
, ULONG_PTR zero_bits
,
4105 SIZE_T
*size_ptr
, ULONG type
, ULONG protect
)
4109 TRACE("%p %p %08lx %x %08x\n", process
, *ret
, *size_ptr
, (int)type
, (int)protect
);
4111 if (!*size_ptr
) return STATUS_INVALID_PARAMETER
;
4112 if (zero_bits
> 21 && zero_bits
< 32) return STATUS_INVALID_PARAMETER_3
;
4113 if (zero_bits
> 32 && zero_bits
< granularity_mask
) return STATUS_INVALID_PARAMETER_3
;
4115 if (!is_old_wow64() && zero_bits
>= 32) return STATUS_INVALID_PARAMETER_3
;
4118 if (process
!= NtCurrentProcess())
4121 apc_result_t result
;
4122 unsigned int status
;
4124 memset( &call
, 0, sizeof(call
) );
4126 call
.virtual_alloc
.type
= APC_VIRTUAL_ALLOC
;
4127 call
.virtual_alloc
.addr
= wine_server_client_ptr( *ret
);
4128 call
.virtual_alloc
.size
= *size_ptr
;
4129 call
.virtual_alloc
.zero_bits
= zero_bits
;
4130 call
.virtual_alloc
.op_type
= type
;
4131 call
.virtual_alloc
.prot
= protect
;
4132 status
= server_queue_process_apc( process
, &call
, &result
);
4133 if (status
!= STATUS_SUCCESS
) return status
;
4135 if (result
.virtual_alloc
.status
== STATUS_SUCCESS
)
4137 *ret
= wine_server_get_ptr( result
.virtual_alloc
.addr
);
4138 *size_ptr
= result
.virtual_alloc
.size
;
4140 return result
.virtual_alloc
.status
;
4144 limit
= get_zero_bits_limit( zero_bits
);
4148 return allocate_virtual_memory( ret
, size_ptr
, type
, protect
, limit
, 0, 0 );
4152 static NTSTATUS
get_extended_params( const MEM_EXTENDED_PARAMETER
*parameters
, ULONG count
,
4153 ULONG_PTR
*limit
, ULONG_PTR
*align
, ULONG
*attributes
)
4155 MEM_ADDRESS_REQUIREMENTS
*r
= NULL
;
4158 if (count
&& !parameters
) return STATUS_INVALID_PARAMETER
;
4160 for (i
= 0; i
< count
; ++i
)
4162 switch (parameters
[i
].Type
)
4164 case MemExtendedParameterAddressRequirements
:
4165 if (r
) return STATUS_INVALID_PARAMETER
;
4166 r
= parameters
[i
].Pointer
;
4168 if (r
->LowestStartingAddress
)
4169 FIXME( "Not supported requirements LowestStartingAddress %p, Alignment %p.\n",
4170 r
->LowestStartingAddress
, (void *)r
->Alignment
);
4174 if ((r
->Alignment
& (r
->Alignment
- 1)) || r
->Alignment
- 1 < granularity_mask
)
4176 WARN( "Invalid alignment %lu.\n", r
->Alignment
);
4177 return STATUS_INVALID_PARAMETER
;
4179 *align
= r
->Alignment
;
4181 if (r
->HighestEndingAddress
)
4183 *limit
= (ULONG_PTR
)r
->HighestEndingAddress
;
4184 if (*limit
> (ULONG_PTR
)user_space_limit
|| ((*limit
+ 1) & (page_mask
- 1)))
4186 WARN( "Invalid limit %p.\n", r
->HighestEndingAddress
);
4187 return STATUS_INVALID_PARAMETER
;
4192 case MemExtendedParameterAttributeFlags
:
4193 *attributes
= parameters
[i
].ULong
;
4196 case MemExtendedParameterNumaNode
:
4197 case MemExtendedParameterPartitionHandle
:
4198 case MemExtendedParameterUserPhysicalHandle
:
4199 case MemExtendedParameterImageMachine
:
4200 FIXME( "Parameter type %d is not supported.\n", parameters
[i
].Type
);
4204 WARN( "Invalid parameter type %u\n", parameters
[i
].Type
);
4205 return STATUS_INVALID_PARAMETER
;
4208 return STATUS_SUCCESS
;
4212 /***********************************************************************
4213 * NtAllocateVirtualMemoryEx (NTDLL.@)
4214 * ZwAllocateVirtualMemoryEx (NTDLL.@)
4216 NTSTATUS WINAPI
NtAllocateVirtualMemoryEx( HANDLE process
, PVOID
*ret
, SIZE_T
*size_ptr
, ULONG type
,
4217 ULONG protect
, MEM_EXTENDED_PARAMETER
*parameters
,
4220 ULONG_PTR limit
= 0;
4221 ULONG_PTR align
= 0;
4222 ULONG attributes
= 0;
4223 unsigned int status
;
4225 TRACE( "%p %p %08lx %x %08x %p %u\n",
4226 process
, *ret
, *size_ptr
, (int)type
, (int)protect
, parameters
, (int)count
);
4228 status
= get_extended_params( parameters
, count
, &limit
, &align
, &attributes
);
4229 if (status
) return status
;
4231 if (*ret
&& (align
|| limit
)) return STATUS_INVALID_PARAMETER
;
4232 if (!*size_ptr
) return STATUS_INVALID_PARAMETER
;
4234 if (process
!= NtCurrentProcess())
4237 apc_result_t result
;
4239 memset( &call
, 0, sizeof(call
) );
4241 call
.virtual_alloc_ex
.type
= APC_VIRTUAL_ALLOC_EX
;
4242 call
.virtual_alloc_ex
.addr
= wine_server_client_ptr( *ret
);
4243 call
.virtual_alloc_ex
.size
= *size_ptr
;
4244 call
.virtual_alloc_ex
.limit
= limit
;
4245 call
.virtual_alloc_ex
.align
= align
;
4246 call
.virtual_alloc_ex
.op_type
= type
;
4247 call
.virtual_alloc_ex
.prot
= protect
;
4248 call
.virtual_alloc_ex
.attributes
= attributes
;
4249 status
= server_queue_process_apc( process
, &call
, &result
);
4250 if (status
!= STATUS_SUCCESS
) return status
;
4252 if (result
.virtual_alloc_ex
.status
== STATUS_SUCCESS
)
4254 *ret
= wine_server_get_ptr( result
.virtual_alloc_ex
.addr
);
4255 *size_ptr
= result
.virtual_alloc_ex
.size
;
4257 return result
.virtual_alloc_ex
.status
;
4260 return allocate_virtual_memory( ret
, size_ptr
, type
, protect
, limit
, align
, attributes
);
4264 /***********************************************************************
4265 * NtFreeVirtualMemory (NTDLL.@)
4266 * ZwFreeVirtualMemory (NTDLL.@)
4268 NTSTATUS WINAPI
NtFreeVirtualMemory( HANDLE process
, PVOID
*addr_ptr
, SIZE_T
*size_ptr
, ULONG type
)
4270 struct file_view
*view
;
4273 unsigned int status
= STATUS_SUCCESS
;
4274 LPVOID addr
= *addr_ptr
;
4275 SIZE_T size
= *size_ptr
;
4277 TRACE("%p %p %08lx %x\n", process
, addr
, size
, (int)type
);
4279 if (process
!= NtCurrentProcess())
4282 apc_result_t result
;
4284 memset( &call
, 0, sizeof(call
) );
4286 call
.virtual_free
.type
= APC_VIRTUAL_FREE
;
4287 call
.virtual_free
.addr
= wine_server_client_ptr( addr
);
4288 call
.virtual_free
.size
= size
;
4289 call
.virtual_free
.op_type
= type
;
4290 status
= server_queue_process_apc( process
, &call
, &result
);
4291 if (status
!= STATUS_SUCCESS
) return status
;
4293 if (result
.virtual_free
.status
== STATUS_SUCCESS
)
4295 *addr_ptr
= wine_server_get_ptr( result
.virtual_free
.addr
);
4296 *size_ptr
= result
.virtual_free
.size
;
4298 return result
.virtual_free
.status
;
4301 /* Fix the parameters */
4303 if (size
) size
= ROUND_SIZE( addr
, size
);
4304 base
= ROUND_ADDR( addr
, page_mask
);
4306 server_enter_uninterrupted_section( &virtual_mutex
, &sigset
);
4308 /* avoid freeing the DOS area when a broken app passes a NULL pointer */
4311 /* address 1 is magic to mean release reserved space */
4312 if (addr
== (void *)1 && !size
&& type
== MEM_RELEASE
) virtual_release_address_space();
4313 else status
= STATUS_INVALID_PARAMETER
;
4315 else if (!(view
= find_view( base
, 0 ))) status
= STATUS_MEMORY_NOT_ALLOCATED
;
4316 else if (!is_view_valloc( view
)) status
= STATUS_INVALID_PARAMETER
;
4317 else if (!size
&& base
!= view
->base
) status
= STATUS_FREE_VM_NOT_AT_BASE
;
4318 else if ((char *)view
->base
+ view
->size
- base
< size
) status
= STATUS_UNABLE_TO_FREE_VM
;
4319 else if (type
== MEM_DECOMMIT
) status
= decommit_pages( view
, base
- (char *)view
->base
, size
);
4320 else if (type
== MEM_RELEASE
)
4322 if (!size
) size
= view
->size
;
4323 status
= free_pages( view
, base
, size
);
4325 else status
= STATUS_INVALID_PARAMETER
;
4327 if (status
== STATUS_SUCCESS
)
4332 server_leave_uninterrupted_section( &virtual_mutex
, &sigset
);
4337 /***********************************************************************
4338 * NtProtectVirtualMemory (NTDLL.@)
4339 * ZwProtectVirtualMemory (NTDLL.@)
4341 NTSTATUS WINAPI
NtProtectVirtualMemory( HANDLE process
, PVOID
*addr_ptr
, SIZE_T
*size_ptr
,
4342 ULONG new_prot
, ULONG
*old_prot
)
4344 struct file_view
*view
;
4346 unsigned int status
= STATUS_SUCCESS
;
4349 SIZE_T size
= *size_ptr
;
4350 LPVOID addr
= *addr_ptr
;
4353 TRACE("%p %p %08lx %08x\n", process
, addr
, size
, (int)new_prot
);
4356 return STATUS_ACCESS_VIOLATION
;
4358 if (process
!= NtCurrentProcess())
4361 apc_result_t result
;
4363 memset( &call
, 0, sizeof(call
) );
4365 call
.virtual_protect
.type
= APC_VIRTUAL_PROTECT
;
4366 call
.virtual_protect
.addr
= wine_server_client_ptr( addr
);
4367 call
.virtual_protect
.size
= size
;
4368 call
.virtual_protect
.prot
= new_prot
;
4369 status
= server_queue_process_apc( process
, &call
, &result
);
4370 if (status
!= STATUS_SUCCESS
) return status
;
4372 if (result
.virtual_protect
.status
== STATUS_SUCCESS
)
4374 *addr_ptr
= wine_server_get_ptr( result
.virtual_protect
.addr
);
4375 *size_ptr
= result
.virtual_protect
.size
;
4376 *old_prot
= result
.virtual_protect
.prot
;
4378 return result
.virtual_protect
.status
;
4381 /* Fix the parameters */
4383 size
= ROUND_SIZE( addr
, size
);
4384 base
= ROUND_ADDR( addr
, page_mask
);
4386 server_enter_uninterrupted_section( &virtual_mutex
, &sigset
);
4388 if ((view
= find_view( base
, size
)))
4390 /* Make sure all the pages are committed */
4391 if (get_committed_size( view
, base
, &vprot
, VPROT_COMMITTED
) >= size
&& (vprot
& VPROT_COMMITTED
))
4393 old
= get_win32_prot( vprot
, view
->protect
);
4394 status
= set_protection( view
, base
, size
, new_prot
);
4396 else status
= STATUS_NOT_COMMITTED
;
4398 else status
= STATUS_INVALID_PARAMETER
;
4400 if (!status
) VIRTUAL_DEBUG_DUMP_VIEW( view
);
4402 server_leave_uninterrupted_section( &virtual_mutex
, &sigset
);
4404 if (status
== STATUS_SUCCESS
)
4414 /* retrieve state for a free memory area; callback for mmap_enum_reserved_areas */
4415 static int get_free_mem_state_callback( void *start
, SIZE_T size
, void *arg
)
4417 MEMORY_BASIC_INFORMATION
*info
= arg
;
4418 void *end
= (char *)start
+ size
;
4420 if ((char *)info
->BaseAddress
+ info
->RegionSize
<= (char *)start
) return 0;
4422 if (info
->BaseAddress
>= end
)
4424 if (info
->AllocationBase
< end
) info
->AllocationBase
= end
;
4428 if (info
->BaseAddress
>= start
|| start
<= address_space_start
)
4430 /* it's a real free area */
4431 info
->State
= MEM_FREE
;
4432 info
->Protect
= PAGE_NOACCESS
;
4433 info
->AllocationBase
= 0;
4434 info
->AllocationProtect
= 0;
4436 if ((char *)info
->BaseAddress
+ info
->RegionSize
> (char *)end
)
4437 info
->RegionSize
= (char *)end
- (char *)info
->BaseAddress
;
4439 else /* outside of the reserved area, pretend it's allocated */
4441 info
->RegionSize
= (char *)start
- (char *)info
->BaseAddress
;
4443 info
->State
= MEM_RESERVE
;
4444 info
->Protect
= PAGE_NOACCESS
;
4445 info
->AllocationProtect
= PAGE_NOACCESS
;
4446 info
->Type
= MEM_PRIVATE
;
4448 info
->State
= MEM_FREE
;
4449 info
->Protect
= PAGE_NOACCESS
;
4450 info
->AllocationBase
= 0;
4451 info
->AllocationProtect
= 0;
4458 static unsigned int fill_basic_memory_info( const void *addr
, MEMORY_BASIC_INFORMATION
*info
)
4460 char *base
, *alloc_base
= 0, *alloc_end
= working_set_limit
;
4461 struct wine_rb_entry
*ptr
;
4462 struct file_view
*view
;
4465 base
= ROUND_ADDR( addr
, page_mask
);
4467 if (is_beyond_limit( base
, 1, working_set_limit
)) return STATUS_INVALID_PARAMETER
;
4469 /* Find the view containing the address */
4471 server_enter_uninterrupted_section( &virtual_mutex
, &sigset
);
4472 ptr
= views_tree
.root
;
4475 view
= WINE_RB_ENTRY_VALUE( ptr
, struct file_view
, entry
);
4476 if ((char *)view
->base
> base
)
4478 alloc_end
= view
->base
;
4481 else if ((char *)view
->base
+ view
->size
<= base
)
4483 alloc_base
= (char *)view
->base
+ view
->size
;
4488 alloc_base
= view
->base
;
4489 alloc_end
= (char *)view
->base
+ view
->size
;
4494 /* Fill the info structure */
4496 info
->AllocationBase
= alloc_base
;
4497 info
->BaseAddress
= base
;
4498 info
->RegionSize
= alloc_end
- base
;
4502 if (!mmap_enum_reserved_areas( get_free_mem_state_callback
, info
, 0 ))
4504 /* not in a reserved area at all, pretend it's allocated */
4506 if (base
>= (char *)address_space_start
)
4508 info
->State
= MEM_RESERVE
;
4509 info
->Protect
= PAGE_NOACCESS
;
4510 info
->AllocationProtect
= PAGE_NOACCESS
;
4511 info
->Type
= MEM_PRIVATE
;
4516 info
->State
= MEM_FREE
;
4517 info
->Protect
= PAGE_NOACCESS
;
4518 info
->AllocationBase
= 0;
4519 info
->AllocationProtect
= 0;
4528 info
->RegionSize
= get_committed_size( view
, base
, &vprot
, ~VPROT_WRITEWATCH
);
4529 info
->State
= (vprot
& VPROT_COMMITTED
) ? MEM_COMMIT
: MEM_RESERVE
;
4530 info
->Protect
= (vprot
& VPROT_COMMITTED
) ? get_win32_prot( vprot
, view
->protect
) : 0;
4531 info
->AllocationProtect
= get_win32_prot( view
->protect
, view
->protect
);
4532 if (view
->protect
& SEC_IMAGE
) info
->Type
= MEM_IMAGE
;
4533 else if (view
->protect
& (SEC_FILE
| SEC_RESERVE
| SEC_COMMIT
)) info
->Type
= MEM_MAPPED
;
4534 else info
->Type
= MEM_PRIVATE
;
4536 server_leave_uninterrupted_section( &virtual_mutex
, &sigset
);
4538 return STATUS_SUCCESS
;
4541 /* get basic information about a memory block */
4542 static unsigned int get_basic_memory_info( HANDLE process
, LPCVOID addr
,
4543 MEMORY_BASIC_INFORMATION
*info
,
4544 SIZE_T len
, SIZE_T
*res_len
)
4546 unsigned int status
;
4548 if (len
< sizeof(*info
))
4549 return STATUS_INFO_LENGTH_MISMATCH
;
4551 if (process
!= NtCurrentProcess())
4554 apc_result_t result
;
4556 memset( &call
, 0, sizeof(call
) );
4558 call
.virtual_query
.type
= APC_VIRTUAL_QUERY
;
4559 call
.virtual_query
.addr
= wine_server_client_ptr( addr
);
4560 status
= server_queue_process_apc( process
, &call
, &result
);
4561 if (status
!= STATUS_SUCCESS
) return status
;
4563 if (result
.virtual_query
.status
== STATUS_SUCCESS
)
4565 info
->BaseAddress
= wine_server_get_ptr( result
.virtual_query
.base
);
4566 info
->AllocationBase
= wine_server_get_ptr( result
.virtual_query
.alloc_base
);
4567 info
->RegionSize
= result
.virtual_query
.size
;
4568 info
->Protect
= result
.virtual_query
.prot
;
4569 info
->AllocationProtect
= result
.virtual_query
.alloc_prot
;
4570 info
->State
= (DWORD
)result
.virtual_query
.state
<< 12;
4571 info
->Type
= (DWORD
)result
.virtual_query
.alloc_type
<< 16;
4572 if (info
->RegionSize
!= result
.virtual_query
.size
) /* truncated */
4573 return STATUS_INVALID_PARAMETER
; /* FIXME */
4574 if (res_len
) *res_len
= sizeof(*info
);
4576 return result
.virtual_query
.status
;
4579 if ((status
= fill_basic_memory_info( addr
, info
))) return status
;
4581 if (res_len
) *res_len
= sizeof(*info
);
4582 return STATUS_SUCCESS
;
4585 static unsigned int get_memory_region_info( HANDLE process
, LPCVOID addr
, MEMORY_REGION_INFORMATION
*info
,
4586 SIZE_T len
, SIZE_T
*res_len
)
4588 MEMORY_BASIC_INFORMATION basic_info
;
4589 unsigned int status
;
4591 if (len
< FIELD_OFFSET(MEMORY_REGION_INFORMATION
, CommitSize
))
4592 return STATUS_INFO_LENGTH_MISMATCH
;
4594 if (process
!= NtCurrentProcess())
4596 FIXME("Unimplemented for other processes.\n");
4597 return STATUS_NOT_IMPLEMENTED
;
4600 if ((status
= fill_basic_memory_info( addr
, &basic_info
))) return status
;
4602 info
->AllocationBase
= basic_info
.AllocationBase
;
4603 info
->AllocationProtect
= basic_info
.AllocationProtect
;
4604 info
->RegionType
= 0; /* FIXME */
4605 if (len
>= FIELD_OFFSET(MEMORY_REGION_INFORMATION
, CommitSize
))
4606 info
->RegionSize
= basic_info
.RegionSize
;
4607 if (len
>= FIELD_OFFSET(MEMORY_REGION_INFORMATION
, PartitionId
))
4608 info
->CommitSize
= basic_info
.State
== MEM_COMMIT
? basic_info
.RegionSize
: 0;
4610 if (res_len
) *res_len
= sizeof(*info
);
4611 return STATUS_SUCCESS
;
4614 static NTSTATUS
get_working_set_ex( HANDLE process
, LPCVOID addr
,
4615 MEMORY_WORKING_SET_EX_INFORMATION
*info
,
4616 SIZE_T len
, SIZE_T
*res_len
)
4618 #if !defined(HAVE_LIBPROCSTAT)
4619 static int pagemap_fd
= -2;
4621 MEMORY_WORKING_SET_EX_INFORMATION
*p
;
4624 if (process
!= NtCurrentProcess())
4626 FIXME( "(process=%p,addr=%p) Unimplemented information class: MemoryWorkingSetExInformation\n", process
, addr
);
4627 return STATUS_INVALID_INFO_CLASS
;
4630 #if defined(HAVE_LIBPROCSTAT)
4632 struct procstat
*pstat
;
4633 unsigned int proc_count
;
4634 struct kinfo_proc
*kip
= NULL
;
4635 unsigned int vmentry_count
= 0;
4636 struct kinfo_vmentry
*vmentries
= NULL
;
4638 pstat
= procstat_open_sysctl();
4640 kip
= procstat_getprocs( pstat
, KERN_PROC_PID
, getpid(), &proc_count
);
4642 vmentries
= procstat_getvmmap( pstat
, kip
, &vmentry_count
);
4643 if (vmentries
== NULL
)
4644 WARN( "couldn't get process vmmap, errno %d\n", errno
);
4646 server_enter_uninterrupted_section( &virtual_mutex
, &sigset
);
4647 for (p
= info
; (UINT_PTR
)(p
+ 1) <= (UINT_PTR
)info
+ len
; p
++)
4650 struct kinfo_vmentry
*entry
= NULL
;
4652 struct file_view
*view
;
4654 memset( &p
->VirtualAttributes
, 0, sizeof(p
->VirtualAttributes
) );
4655 if ((view
= find_view( p
->VirtualAddress
, 0 )) &&
4656 get_committed_size( view
, p
->VirtualAddress
, &vprot
, VPROT_COMMITTED
) &&
4657 (vprot
& VPROT_COMMITTED
))
4659 for (i
= 0; i
< vmentry_count
&& entry
== NULL
; i
++)
4661 if (vmentries
[i
].kve_start
<= (ULONG_PTR
)p
->VirtualAddress
&& (ULONG_PTR
)p
->VirtualAddress
<= vmentries
[i
].kve_end
)
4662 entry
= &vmentries
[i
];
4665 p
->VirtualAttributes
.Valid
= !(vprot
& VPROT_GUARD
) && (vprot
& 0x0f) && entry
&& entry
->kve_type
!= KVME_TYPE_SWAP
;
4666 p
->VirtualAttributes
.Shared
= !is_view_valloc( view
);
4667 if (p
->VirtualAttributes
.Shared
&& p
->VirtualAttributes
.Valid
)
4668 p
->VirtualAttributes
.ShareCount
= 1; /* FIXME */
4669 if (p
->VirtualAttributes
.Valid
)
4670 p
->VirtualAttributes
.Win32Protection
= get_win32_prot( vprot
, view
->protect
);
4673 server_leave_uninterrupted_section( &virtual_mutex
, &sigset
);
4676 procstat_freevmmap( pstat
, vmentries
);
4678 procstat_freeprocs( pstat
, kip
);
4680 procstat_close( pstat
);
4683 server_enter_uninterrupted_section( &virtual_mutex
, &sigset
);
4684 if (pagemap_fd
== -2)
4687 if ((pagemap_fd
= open( "/proc/self/pagemap", O_RDONLY
| O_CLOEXEC
, 0 )) == -1 && errno
== EINVAL
)
4689 pagemap_fd
= open( "/proc/self/pagemap", O_RDONLY
, 0 );
4691 if (pagemap_fd
== -1) WARN( "unable to open /proc/self/pagemap\n" );
4692 else fcntl(pagemap_fd
, F_SETFD
, FD_CLOEXEC
); /* in case O_CLOEXEC isn't supported */
4695 for (p
= info
; (UINT_PTR
)(p
+ 1) <= (UINT_PTR
)info
+ len
; p
++)
4699 struct file_view
*view
;
4701 memset( &p
->VirtualAttributes
, 0, sizeof(p
->VirtualAttributes
) );
4703 if ((view
= find_view( p
->VirtualAddress
, 0 )) &&
4704 get_committed_size( view
, p
->VirtualAddress
, &vprot
, VPROT_COMMITTED
) &&
4705 (vprot
& VPROT_COMMITTED
))
4707 if (pagemap_fd
== -1 ||
4708 pread( pagemap_fd
, &pagemap
, sizeof(pagemap
), ((UINT_PTR
)p
->VirtualAddress
>> page_shift
) * sizeof(pagemap
) ) != sizeof(pagemap
))
4710 /* If we don't have pagemap information, default to invalid. */
4714 p
->VirtualAttributes
.Valid
= !(vprot
& VPROT_GUARD
) && (vprot
& 0x0f) && (pagemap
>> 63);
4715 p
->VirtualAttributes
.Shared
= !is_view_valloc( view
) && ((pagemap
>> 61) & 1);
4716 if (p
->VirtualAttributes
.Shared
&& p
->VirtualAttributes
.Valid
)
4717 p
->VirtualAttributes
.ShareCount
= 1; /* FIXME */
4718 if (p
->VirtualAttributes
.Valid
)
4719 p
->VirtualAttributes
.Win32Protection
= get_win32_prot( vprot
, view
->protect
);
4722 server_leave_uninterrupted_section( &virtual_mutex
, &sigset
);
4726 *res_len
= (UINT_PTR
)p
- (UINT_PTR
)info
;
4727 return STATUS_SUCCESS
;
4730 static unsigned int get_memory_section_name( HANDLE process
, LPCVOID addr
,
4731 MEMORY_SECTION_NAME
*info
, SIZE_T len
, SIZE_T
*ret_len
)
4733 unsigned int status
;
4735 if (!info
) return STATUS_ACCESS_VIOLATION
;
4737 SERVER_START_REQ( get_mapping_filename
)
4739 req
->process
= wine_server_obj_handle( process
);
4740 req
->addr
= wine_server_client_ptr( addr
);
4741 if (len
> sizeof(*info
) + sizeof(WCHAR
))
4742 wine_server_set_reply( req
, info
+ 1, len
- sizeof(*info
) - sizeof(WCHAR
) );
4743 status
= wine_server_call( req
);
4744 if (!status
|| status
== STATUS_BUFFER_OVERFLOW
)
4746 if (ret_len
) *ret_len
= sizeof(*info
) + reply
->len
+ sizeof(WCHAR
);
4747 if (len
< sizeof(*info
)) status
= STATUS_INFO_LENGTH_MISMATCH
;
4750 info
->SectionFileName
.Buffer
= (WCHAR
*)(info
+ 1);
4751 info
->SectionFileName
.Length
= reply
->len
;
4752 info
->SectionFileName
.MaximumLength
= reply
->len
+ sizeof(WCHAR
);
4753 info
->SectionFileName
.Buffer
[reply
->len
/ sizeof(WCHAR
)] = 0;
4762 /***********************************************************************
4763 * NtQueryVirtualMemory (NTDLL.@)
4764 * ZwQueryVirtualMemory (NTDLL.@)
4766 NTSTATUS WINAPI
NtQueryVirtualMemory( HANDLE process
, LPCVOID addr
,
4767 MEMORY_INFORMATION_CLASS info_class
,
4768 PVOID buffer
, SIZE_T len
, SIZE_T
*res_len
)
4772 TRACE("(%p, %p, info_class=%d, %p, %ld, %p)\n",
4773 process
, addr
, info_class
, buffer
, len
, res_len
);
4777 case MemoryBasicInformation
:
4778 return get_basic_memory_info( process
, addr
, buffer
, len
, res_len
);
4780 case MemoryWorkingSetExInformation
:
4781 return get_working_set_ex( process
, addr
, buffer
, len
, res_len
);
4783 case MemoryMappedFilenameInformation
:
4784 return get_memory_section_name( process
, addr
, buffer
, len
, res_len
);
4786 case MemoryRegionInformation
:
4787 return get_memory_region_info( process
, addr
, buffer
, len
, res_len
);
4789 case MemoryWineUnixFuncs
:
4790 case MemoryWineUnixWow64Funcs
:
4791 if (len
!= sizeof(unixlib_handle_t
)) return STATUS_INFO_LENGTH_MISMATCH
;
4792 if (process
== GetCurrentProcess())
4794 void *module
= (void *)addr
;
4795 const void *funcs
= NULL
;
4797 status
= get_builtin_unix_funcs( module
, info_class
== MemoryWineUnixWow64Funcs
, &funcs
);
4798 if (!status
) *(unixlib_handle_t
*)buffer
= (UINT_PTR
)funcs
;
4801 return STATUS_INVALID_HANDLE
;
4804 FIXME("(%p,%p,info_class=%d,%p,%ld,%p) Unknown information class\n",
4805 process
, addr
, info_class
, buffer
, len
, res_len
);
4806 return STATUS_INVALID_INFO_CLASS
;
4811 /***********************************************************************
4812 * NtLockVirtualMemory (NTDLL.@)
4813 * ZwLockVirtualMemory (NTDLL.@)
4815 NTSTATUS WINAPI
NtLockVirtualMemory( HANDLE process
, PVOID
*addr
, SIZE_T
*size
, ULONG unknown
)
4817 unsigned int status
= STATUS_SUCCESS
;
4819 if (process
!= NtCurrentProcess())
4822 apc_result_t result
;
4824 memset( &call
, 0, sizeof(call
) );
4826 call
.virtual_lock
.type
= APC_VIRTUAL_LOCK
;
4827 call
.virtual_lock
.addr
= wine_server_client_ptr( *addr
);
4828 call
.virtual_lock
.size
= *size
;
4829 status
= server_queue_process_apc( process
, &call
, &result
);
4830 if (status
!= STATUS_SUCCESS
) return status
;
4832 if (result
.virtual_lock
.status
== STATUS_SUCCESS
)
4834 *addr
= wine_server_get_ptr( result
.virtual_lock
.addr
);
4835 *size
= result
.virtual_lock
.size
;
4837 return result
.virtual_lock
.status
;
4840 *size
= ROUND_SIZE( *addr
, *size
);
4841 *addr
= ROUND_ADDR( *addr
, page_mask
);
4843 if (mlock( *addr
, *size
)) status
= STATUS_ACCESS_DENIED
;
4848 /***********************************************************************
4849 * NtUnlockVirtualMemory (NTDLL.@)
4850 * ZwUnlockVirtualMemory (NTDLL.@)
4852 NTSTATUS WINAPI
NtUnlockVirtualMemory( HANDLE process
, PVOID
*addr
, SIZE_T
*size
, ULONG unknown
)
4854 unsigned int status
= STATUS_SUCCESS
;
4856 if (process
!= NtCurrentProcess())
4859 apc_result_t result
;
4861 memset( &call
, 0, sizeof(call
) );
4863 call
.virtual_unlock
.type
= APC_VIRTUAL_UNLOCK
;
4864 call
.virtual_unlock
.addr
= wine_server_client_ptr( *addr
);
4865 call
.virtual_unlock
.size
= *size
;
4866 status
= server_queue_process_apc( process
, &call
, &result
);
4867 if (status
!= STATUS_SUCCESS
) return status
;
4869 if (result
.virtual_unlock
.status
== STATUS_SUCCESS
)
4871 *addr
= wine_server_get_ptr( result
.virtual_unlock
.addr
);
4872 *size
= result
.virtual_unlock
.size
;
4874 return result
.virtual_unlock
.status
;
4877 *size
= ROUND_SIZE( *addr
, *size
);
4878 *addr
= ROUND_ADDR( *addr
, page_mask
);
4880 if (munlock( *addr
, *size
)) status
= STATUS_ACCESS_DENIED
;
4885 /***********************************************************************
4886 * NtMapViewOfSection (NTDLL.@)
4887 * ZwMapViewOfSection (NTDLL.@)
4889 NTSTATUS WINAPI
NtMapViewOfSection( HANDLE handle
, HANDLE process
, PVOID
*addr_ptr
, ULONG_PTR zero_bits
,
4890 SIZE_T commit_size
, const LARGE_INTEGER
*offset_ptr
, SIZE_T
*size_ptr
,
4891 SECTION_INHERIT inherit
, ULONG alloc_type
, ULONG protect
)
4894 SIZE_T mask
= granularity_mask
;
4895 LARGE_INTEGER offset
;
4897 offset
.QuadPart
= offset_ptr
? offset_ptr
->QuadPart
: 0;
4899 TRACE("handle=%p process=%p addr=%p off=%s size=%lx access=%x\n",
4900 handle
, process
, *addr_ptr
, wine_dbgstr_longlong(offset
.QuadPart
), *size_ptr
, (int)protect
);
4902 /* Check parameters */
4903 if (zero_bits
> 21 && zero_bits
< 32)
4904 return STATUS_INVALID_PARAMETER_4
;
4906 /* If both addr_ptr and zero_bits are passed, they have match */
4907 if (*addr_ptr
&& zero_bits
&& zero_bits
< 32 &&
4908 (((UINT_PTR
)*addr_ptr
) >> (32 - zero_bits
)))
4909 return STATUS_INVALID_PARAMETER_4
;
4910 if (*addr_ptr
&& zero_bits
>= 32 &&
4911 (((UINT_PTR
)*addr_ptr
) & ~zero_bits
))
4912 return STATUS_INVALID_PARAMETER_4
;
4915 if (!is_old_wow64())
4917 if (zero_bits
>= 32) return STATUS_INVALID_PARAMETER_4
;
4918 if (alloc_type
& AT_ROUND_TO_PAGE
)
4920 *addr_ptr
= ROUND_ADDR( *addr_ptr
, page_mask
);
4926 if ((offset
.u
.LowPart
& mask
) || (*addr_ptr
&& ((UINT_PTR
)*addr_ptr
& mask
)))
4927 return STATUS_MAPPED_ALIGNMENT
;
4929 if (process
!= NtCurrentProcess())
4932 apc_result_t result
;
4934 memset( &call
, 0, sizeof(call
) );
4936 call
.map_view
.type
= APC_MAP_VIEW
;
4937 call
.map_view
.handle
= wine_server_obj_handle( handle
);
4938 call
.map_view
.addr
= wine_server_client_ptr( *addr_ptr
);
4939 call
.map_view
.size
= *size_ptr
;
4940 call
.map_view
.offset
= offset
.QuadPart
;
4941 call
.map_view
.zero_bits
= zero_bits
;
4942 call
.map_view
.alloc_type
= alloc_type
;
4943 call
.map_view
.prot
= protect
;
4944 res
= server_queue_process_apc( process
, &call
, &result
);
4945 if (res
!= STATUS_SUCCESS
) return res
;
4947 if (NT_SUCCESS(result
.map_view
.status
))
4949 *addr_ptr
= wine_server_get_ptr( result
.map_view
.addr
);
4950 *size_ptr
= result
.map_view
.size
;
4952 return result
.map_view
.status
;
4955 return virtual_map_section( handle
, addr_ptr
, get_zero_bits_limit( zero_bits
), commit_size
,
4956 offset_ptr
, size_ptr
, alloc_type
, protect
);
4959 /***********************************************************************
4960 * NtMapViewOfSectionEx (NTDLL.@)
4961 * ZwMapViewOfSectionEx (NTDLL.@)
4963 NTSTATUS WINAPI
NtMapViewOfSectionEx( HANDLE handle
, HANDLE process
, PVOID
*addr_ptr
,
4964 const LARGE_INTEGER
*offset_ptr
, SIZE_T
*size_ptr
,
4965 ULONG alloc_type
, ULONG protect
,
4966 MEM_EXTENDED_PARAMETER
*parameters
, ULONG count
)
4968 ULONG_PTR limit
= 0, align
= 0;
4969 ULONG attributes
= 0;
4970 unsigned int status
;
4971 SIZE_T mask
= granularity_mask
;
4972 LARGE_INTEGER offset
;
4974 offset
.QuadPart
= offset_ptr
? offset_ptr
->QuadPart
: 0;
4976 TRACE( "handle=%p process=%p addr=%p off=%s size=%lx access=%x\n",
4977 handle
, process
, *addr_ptr
, wine_dbgstr_longlong(offset
.QuadPart
), *size_ptr
, (int)protect
);
4979 status
= get_extended_params( parameters
, count
, &limit
, &align
, &attributes
);
4980 if (status
) return status
;
4982 if (align
) return STATUS_INVALID_PARAMETER
;
4983 if (*addr_ptr
&& limit
) return STATUS_INVALID_PARAMETER
;
4986 if (!is_old_wow64() && (alloc_type
& AT_ROUND_TO_PAGE
))
4988 *addr_ptr
= ROUND_ADDR( *addr_ptr
, page_mask
);
4993 if ((offset
.u
.LowPart
& mask
) || (*addr_ptr
&& ((UINT_PTR
)*addr_ptr
& mask
)))
4994 return STATUS_MAPPED_ALIGNMENT
;
4996 if (process
!= NtCurrentProcess())
4999 apc_result_t result
;
5001 memset( &call
, 0, sizeof(call
) );
5003 call
.map_view_ex
.type
= APC_MAP_VIEW_EX
;
5004 call
.map_view_ex
.handle
= wine_server_obj_handle( handle
);
5005 call
.map_view_ex
.addr
= wine_server_client_ptr( *addr_ptr
);
5006 call
.map_view_ex
.size
= *size_ptr
;
5007 call
.map_view_ex
.offset
= offset
.QuadPart
;
5008 call
.map_view_ex
.limit
= limit
;
5009 call
.map_view_ex
.alloc_type
= alloc_type
;
5010 call
.map_view_ex
.prot
= protect
;
5011 status
= server_queue_process_apc( process
, &call
, &result
);
5012 if (status
!= STATUS_SUCCESS
) return status
;
5014 if (NT_SUCCESS(result
.map_view_ex
.status
))
5016 *addr_ptr
= wine_server_get_ptr( result
.map_view_ex
.addr
);
5017 *size_ptr
= result
.map_view_ex
.size
;
5019 return result
.map_view_ex
.status
;
5022 return virtual_map_section( handle
, addr_ptr
, limit
, 0, offset_ptr
, size_ptr
, alloc_type
, protect
);
5025 /***********************************************************************
5026 * NtUnmapViewOfSection (NTDLL.@)
5027 * ZwUnmapViewOfSection (NTDLL.@)
5029 NTSTATUS WINAPI
NtUnmapViewOfSection( HANDLE process
, PVOID addr
)
5031 struct file_view
*view
;
5032 unsigned int status
= STATUS_NOT_MAPPED_VIEW
;
5035 if (process
!= NtCurrentProcess())
5038 apc_result_t result
;
5040 memset( &call
, 0, sizeof(call
) );
5042 call
.unmap_view
.type
= APC_UNMAP_VIEW
;
5043 call
.unmap_view
.addr
= wine_server_client_ptr( addr
);
5044 status
= server_queue_process_apc( process
, &call
, &result
);
5045 if (status
== STATUS_SUCCESS
) status
= result
.unmap_view
.status
;
5049 server_enter_uninterrupted_section( &virtual_mutex
, &sigset
);
5050 if ((view
= find_view( addr
, 0 )) && !is_view_valloc( view
))
5052 if (view
->protect
& VPROT_SYSTEM
)
5054 struct builtin_module
*builtin
;
5056 LIST_FOR_EACH_ENTRY( builtin
, &builtin_modules
, struct builtin_module
, entry
)
5058 if (builtin
->module
!= view
->base
) continue;
5059 if (builtin
->refcount
> 1)
5061 TRACE( "not freeing in-use builtin %p\n", view
->base
);
5062 builtin
->refcount
--;
5063 server_leave_uninterrupted_section( &virtual_mutex
, &sigset
);
5064 return STATUS_SUCCESS
;
5069 SERVER_START_REQ( unmap_view
)
5071 req
->base
= wine_server_client_ptr( view
->base
);
5072 status
= wine_server_call( req
);
5077 if (view
->protect
& SEC_IMAGE
) release_builtin_module( view
->base
);
5078 delete_view( view
);
5080 else FIXME( "failed to unmap %p %x\n", view
->base
, status
);
5082 server_leave_uninterrupted_section( &virtual_mutex
, &sigset
);
5086 /***********************************************************************
5087 * NtUnmapViewOfSectionEx (NTDLL.@)
5088 * ZwUnmapViewOfSectionEx (NTDLL.@)
5090 NTSTATUS WINAPI
NtUnmapViewOfSectionEx( HANDLE process
, PVOID addr
, ULONG flags
)
5092 if (flags
) FIXME("Ignoring flags %#x.\n", (int)flags
);
5093 return NtUnmapViewOfSection( process
, addr
);
5096 /******************************************************************************
5097 * virtual_fill_image_information
5099 * Helper for NtQuerySection.
5101 void virtual_fill_image_information( const pe_image_info_t
*pe_info
, SECTION_IMAGE_INFORMATION
*info
)
5103 info
->TransferAddress
= wine_server_get_ptr( pe_info
->base
+ pe_info
->entry_point
);
5104 info
->ZeroBits
= pe_info
->zerobits
;
5105 info
->MaximumStackSize
= pe_info
->stack_size
;
5106 info
->CommittedStackSize
= pe_info
->stack_commit
;
5107 info
->SubSystemType
= pe_info
->subsystem
;
5108 info
->MinorSubsystemVersion
= pe_info
->subsystem_minor
;
5109 info
->MajorSubsystemVersion
= pe_info
->subsystem_major
;
5110 info
->MajorOperatingSystemVersion
= pe_info
->osversion_major
;
5111 info
->MinorOperatingSystemVersion
= pe_info
->osversion_minor
;
5112 info
->ImageCharacteristics
= pe_info
->image_charact
;
5113 info
->DllCharacteristics
= pe_info
->dll_charact
;
5114 info
->Machine
= pe_info
->machine
;
5115 info
->ImageContainsCode
= pe_info
->contains_code
;
5116 info
->ImageFlags
= pe_info
->image_flags
;
5117 info
->LoaderFlags
= pe_info
->loader_flags
;
5118 info
->ImageFileSize
= pe_info
->file_size
;
5119 info
->CheckSum
= pe_info
->checksum
;
5120 #ifndef _WIN64 /* don't return 64-bit values to 32-bit processes */
5121 if (is_machine_64bit( pe_info
->machine
))
5123 info
->TransferAddress
= (void *)0x81231234; /* sic */
5124 info
->MaximumStackSize
= 0x100000;
5125 info
->CommittedStackSize
= 0x10000;
5130 /******************************************************************************
5131 * NtQuerySection (NTDLL.@)
5132 * ZwQuerySection (NTDLL.@)
5134 NTSTATUS WINAPI
NtQuerySection( HANDLE handle
, SECTION_INFORMATION_CLASS
class, void *ptr
,
5135 SIZE_T size
, SIZE_T
*ret_size
)
5137 unsigned int status
;
5138 pe_image_info_t image_info
;
5142 case SectionBasicInformation
:
5143 if (size
< sizeof(SECTION_BASIC_INFORMATION
)) return STATUS_INFO_LENGTH_MISMATCH
;
5145 case SectionImageInformation
:
5146 if (size
< sizeof(SECTION_IMAGE_INFORMATION
)) return STATUS_INFO_LENGTH_MISMATCH
;
5149 FIXME( "class %u not implemented\n", class );
5150 return STATUS_NOT_IMPLEMENTED
;
5152 if (!ptr
) return STATUS_ACCESS_VIOLATION
;
5154 SERVER_START_REQ( get_mapping_info
)
5156 req
->handle
= wine_server_obj_handle( handle
);
5157 req
->access
= SECTION_QUERY
;
5158 wine_server_set_reply( req
, &image_info
, sizeof(image_info
) );
5159 if (!(status
= wine_server_call( req
)))
5161 if (class == SectionBasicInformation
)
5163 SECTION_BASIC_INFORMATION
*info
= ptr
;
5164 info
->Attributes
= reply
->flags
;
5165 info
->BaseAddress
= NULL
;
5166 info
->Size
.QuadPart
= reply
->size
;
5167 if (ret_size
) *ret_size
= sizeof(*info
);
5169 else if (reply
->flags
& SEC_IMAGE
)
5171 SECTION_IMAGE_INFORMATION
*info
= ptr
;
5172 virtual_fill_image_information( &image_info
, info
);
5173 if (ret_size
) *ret_size
= sizeof(*info
);
5175 else status
= STATUS_SECTION_NOT_IMAGE
;
5184 /***********************************************************************
5185 * NtFlushVirtualMemory (NTDLL.@)
5186 * ZwFlushVirtualMemory (NTDLL.@)
5188 NTSTATUS WINAPI
NtFlushVirtualMemory( HANDLE process
, LPCVOID
*addr_ptr
,
5189 SIZE_T
*size_ptr
, ULONG unknown
)
5191 struct file_view
*view
;
5192 unsigned int status
= STATUS_SUCCESS
;
5194 void *addr
= ROUND_ADDR( *addr_ptr
, page_mask
);
5196 if (process
!= NtCurrentProcess())
5199 apc_result_t result
;
5201 memset( &call
, 0, sizeof(call
) );
5203 call
.virtual_flush
.type
= APC_VIRTUAL_FLUSH
;
5204 call
.virtual_flush
.addr
= wine_server_client_ptr( addr
);
5205 call
.virtual_flush
.size
= *size_ptr
;
5206 status
= server_queue_process_apc( process
, &call
, &result
);
5207 if (status
!= STATUS_SUCCESS
) return status
;
5209 if (result
.virtual_flush
.status
== STATUS_SUCCESS
)
5211 *addr_ptr
= wine_server_get_ptr( result
.virtual_flush
.addr
);
5212 *size_ptr
= result
.virtual_flush
.size
;
5214 return result
.virtual_flush
.status
;
5217 server_enter_uninterrupted_section( &virtual_mutex
, &sigset
);
5218 if (!(view
= find_view( addr
, *size_ptr
))) status
= STATUS_INVALID_PARAMETER
;
5221 if (!*size_ptr
) *size_ptr
= view
->size
;
5224 if (msync( addr
, *size_ptr
, MS_ASYNC
)) status
= STATUS_NOT_MAPPED_DATA
;
5227 server_leave_uninterrupted_section( &virtual_mutex
, &sigset
);
5232 /***********************************************************************
5233 * NtGetWriteWatch (NTDLL.@)
5234 * ZwGetWriteWatch (NTDLL.@)
5236 NTSTATUS WINAPI
NtGetWriteWatch( HANDLE process
, ULONG flags
, PVOID base
, SIZE_T size
, PVOID
*addresses
,
5237 ULONG_PTR
*count
, ULONG
*granularity
)
5239 NTSTATUS status
= STATUS_SUCCESS
;
5242 size
= ROUND_SIZE( base
, size
);
5243 base
= ROUND_ADDR( base
, page_mask
);
5245 if (!count
|| !granularity
) return STATUS_ACCESS_VIOLATION
;
5246 if (!*count
|| !size
) return STATUS_INVALID_PARAMETER
;
5247 if (flags
& ~WRITE_WATCH_FLAG_RESET
) return STATUS_INVALID_PARAMETER
;
5249 if (!addresses
) return STATUS_ACCESS_VIOLATION
;
5251 TRACE( "%p %x %p-%p %p %lu\n", process
, (int)flags
, base
, (char *)base
+ size
,
5252 addresses
, *count
);
5254 server_enter_uninterrupted_section( &virtual_mutex
, &sigset
);
5256 if (is_write_watch_range( base
, size
))
5260 char *end
= addr
+ size
;
5262 while (pos
< *count
&& addr
< end
)
5264 if (!(get_page_vprot( addr
) & VPROT_WRITEWATCH
)) addresses
[pos
++] = addr
;
5267 if (flags
& WRITE_WATCH_FLAG_RESET
) reset_write_watches( base
, addr
- (char *)base
);
5269 *granularity
= page_size
;
5271 else status
= STATUS_INVALID_PARAMETER
;
5273 server_leave_uninterrupted_section( &virtual_mutex
, &sigset
);
5278 /***********************************************************************
5279 * NtResetWriteWatch (NTDLL.@)
5280 * ZwResetWriteWatch (NTDLL.@)
5282 NTSTATUS WINAPI
NtResetWriteWatch( HANDLE process
, PVOID base
, SIZE_T size
)
5284 NTSTATUS status
= STATUS_SUCCESS
;
5287 size
= ROUND_SIZE( base
, size
);
5288 base
= ROUND_ADDR( base
, page_mask
);
5290 TRACE( "%p %p-%p\n", process
, base
, (char *)base
+ size
);
5292 if (!size
) return STATUS_INVALID_PARAMETER
;
5294 server_enter_uninterrupted_section( &virtual_mutex
, &sigset
);
5296 if (is_write_watch_range( base
, size
))
5297 reset_write_watches( base
, size
);
5299 status
= STATUS_INVALID_PARAMETER
;
5301 server_leave_uninterrupted_section( &virtual_mutex
, &sigset
);
5306 /***********************************************************************
5307 * NtReadVirtualMemory (NTDLL.@)
5308 * ZwReadVirtualMemory (NTDLL.@)
5310 NTSTATUS WINAPI
NtReadVirtualMemory( HANDLE process
, const void *addr
, void *buffer
,
5311 SIZE_T size
, SIZE_T
*bytes_read
)
5313 unsigned int status
;
5315 if (virtual_check_buffer_for_write( buffer
, size
))
5317 SERVER_START_REQ( read_process_memory
)
5319 req
->handle
= wine_server_obj_handle( process
);
5320 req
->addr
= wine_server_client_ptr( addr
);
5321 wine_server_set_reply( req
, buffer
, size
);
5322 if ((status
= wine_server_call( req
))) size
= 0;
5328 status
= STATUS_ACCESS_VIOLATION
;
5331 if (bytes_read
) *bytes_read
= size
;
5336 /***********************************************************************
5337 * NtWriteVirtualMemory (NTDLL.@)
5338 * ZwWriteVirtualMemory (NTDLL.@)
5340 NTSTATUS WINAPI
NtWriteVirtualMemory( HANDLE process
, void *addr
, const void *buffer
,
5341 SIZE_T size
, SIZE_T
*bytes_written
)
5343 unsigned int status
;
5345 if (virtual_check_buffer_for_read( buffer
, size
))
5347 SERVER_START_REQ( write_process_memory
)
5349 req
->handle
= wine_server_obj_handle( process
);
5350 req
->addr
= wine_server_client_ptr( addr
);
5351 wine_server_add_data( req
, buffer
, size
);
5352 if ((status
= wine_server_call( req
))) size
= 0;
5358 status
= STATUS_PARTIAL_COPY
;
5361 if (bytes_written
) *bytes_written
= size
;
5366 /***********************************************************************
5367 * NtAreMappedFilesTheSame (NTDLL.@)
5368 * ZwAreMappedFilesTheSame (NTDLL.@)
5370 NTSTATUS WINAPI
NtAreMappedFilesTheSame(PVOID addr1
, PVOID addr2
)
5372 struct file_view
*view1
, *view2
;
5373 unsigned int status
;
5376 TRACE("%p %p\n", addr1
, addr2
);
5378 server_enter_uninterrupted_section( &virtual_mutex
, &sigset
);
5380 view1
= find_view( addr1
, 0 );
5381 view2
= find_view( addr2
, 0 );
5383 if (!view1
|| !view2
)
5384 status
= STATUS_INVALID_ADDRESS
;
5385 else if (is_view_valloc( view1
) || is_view_valloc( view2
))
5386 status
= STATUS_CONFLICTING_ADDRESSES
;
5387 else if (view1
== view2
)
5388 status
= STATUS_SUCCESS
;
5389 else if ((view1
->protect
& VPROT_SYSTEM
) || (view2
->protect
& VPROT_SYSTEM
))
5390 status
= STATUS_NOT_SAME_DEVICE
;
5393 SERVER_START_REQ( is_same_mapping
)
5395 req
->base1
= wine_server_client_ptr( view1
->base
);
5396 req
->base2
= wine_server_client_ptr( view2
->base
);
5397 status
= wine_server_call( req
);
5402 server_leave_uninterrupted_section( &virtual_mutex
, &sigset
);
5407 static NTSTATUS
prefetch_memory( HANDLE process
, ULONG_PTR count
,
5408 PMEMORY_RANGE_ENTRY addresses
, ULONG flags
)
5413 static unsigned int once
;
5417 FIXME( "(process=%p,flags=%u) NtSetInformationVirtualMemory(VmPrefetchInformation) partial stub\n",
5418 process
, (int)flags
);
5421 for (i
= 0; i
< count
; i
++)
5423 if (!addresses
[i
].NumberOfBytes
) return STATUS_INVALID_PARAMETER_4
;
5426 if (process
!= NtCurrentProcess()) return STATUS_SUCCESS
;
5428 for (i
= 0; i
< count
; i
++)
5430 base
= ROUND_ADDR( addresses
[i
].VirtualAddress
, page_mask
);
5431 size
= ROUND_SIZE( addresses
[i
].VirtualAddress
, addresses
[i
].NumberOfBytes
);
5432 madvise( base
, size
, MADV_WILLNEED
);
5435 return STATUS_SUCCESS
;
5438 /***********************************************************************
5439 * NtSetInformationVirtualMemory (NTDLL.@)
5440 * ZwSetInformationVirtualMemory (NTDLL.@)
5442 NTSTATUS WINAPI
NtSetInformationVirtualMemory( HANDLE process
,
5443 VIRTUAL_MEMORY_INFORMATION_CLASS info_class
,
5444 ULONG_PTR count
, PMEMORY_RANGE_ENTRY addresses
,
5445 PVOID ptr
, ULONG size
)
5447 TRACE("(%p, info_class=%d, %lu, %p, %p, %u)\n",
5448 process
, info_class
, count
, addresses
, ptr
, (int)size
);
5452 case VmPrefetchInformation
:
5453 if (!ptr
) return STATUS_INVALID_PARAMETER_5
;
5454 if (size
!= sizeof(ULONG
)) return STATUS_INVALID_PARAMETER_6
;
5455 if (!count
) return STATUS_INVALID_PARAMETER_3
;
5456 return prefetch_memory( process
, count
, addresses
, *(ULONG
*)ptr
);
5459 FIXME("(%p,info_class=%d,%lu,%p,%p,%u) Unknown information class\n",
5460 process
, info_class
, count
, addresses
, ptr
, (int)size
);
5461 return STATUS_INVALID_PARAMETER_2
;
5466 /**********************************************************************
5467 * NtFlushInstructionCache (NTDLL.@)
5469 NTSTATUS WINAPI
NtFlushInstructionCache( HANDLE handle
, const void *addr
, SIZE_T size
)
5471 #if defined(__x86_64__) || defined(__i386__)
5473 #elif defined(HAVE___CLEAR_CACHE)
5474 if (handle
== GetCurrentProcess())
5476 __clear_cache( (char *)addr
, (char *)addr
+ size
);
5481 if (!once
++) FIXME( "%p %p %ld other process not supported\n", handle
, addr
, size
);
5485 if (!once
++) FIXME( "%p %p %ld\n", handle
, addr
, size
);
5487 return STATUS_SUCCESS
;
5491 /**********************************************************************
5492 * NtFlushProcessWriteBuffers (NTDLL.@)
5494 void WINAPI
NtFlushProcessWriteBuffers(void)
5496 static int once
= 0;
5497 if (!once
++) FIXME( "stub\n" );
5501 /**********************************************************************
5502 * NtCreatePagingFile (NTDLL.@)
5504 NTSTATUS WINAPI
NtCreatePagingFile( UNICODE_STRING
*name
, LARGE_INTEGER
*min_size
,
5505 LARGE_INTEGER
*max_size
, LARGE_INTEGER
*actual_size
)
5507 FIXME( "(%s %p %p %p) stub\n", debugstr_us(name
), min_size
, max_size
, actual_size
);
5508 return STATUS_SUCCESS
;
5513 /***********************************************************************
5514 * NtWow64AllocateVirtualMemory64 (NTDLL.@)
5515 * ZwWow64AllocateVirtualMemory64 (NTDLL.@)
5517 NTSTATUS WINAPI
NtWow64AllocateVirtualMemory64( HANDLE process
, ULONG64
*ret
, ULONG64 zero_bits
,
5518 ULONG64
*size_ptr
, ULONG type
, ULONG protect
)
5522 unsigned int status
;
5524 TRACE("%p %s %s %x %08x\n", process
,
5525 wine_dbgstr_longlong(*ret
), wine_dbgstr_longlong(*size_ptr
), (int)type
, (int)protect
);
5527 if (!*size_ptr
) return STATUS_INVALID_PARAMETER_4
;
5528 if (zero_bits
> 21 && zero_bits
< 32) return STATUS_INVALID_PARAMETER_3
;
5530 if (process
!= NtCurrentProcess())
5533 apc_result_t result
;
5535 memset( &call
, 0, sizeof(call
) );
5537 call
.virtual_alloc
.type
= APC_VIRTUAL_ALLOC
;
5538 call
.virtual_alloc
.addr
= *ret
;
5539 call
.virtual_alloc
.size
= *size_ptr
;
5540 call
.virtual_alloc
.zero_bits
= zero_bits
;
5541 call
.virtual_alloc
.op_type
= type
;
5542 call
.virtual_alloc
.prot
= protect
;
5543 status
= server_queue_process_apc( process
, &call
, &result
);
5544 if (status
!= STATUS_SUCCESS
) return status
;
5546 if (result
.virtual_alloc
.status
== STATUS_SUCCESS
)
5548 *ret
= result
.virtual_alloc
.addr
;
5549 *size_ptr
= result
.virtual_alloc
.size
;
5551 return result
.virtual_alloc
.status
;
5554 base
= (void *)(ULONG_PTR
)*ret
;
5556 if ((ULONG_PTR
)base
!= *ret
) return STATUS_CONFLICTING_ADDRESSES
;
5557 if (size
!= *size_ptr
) return STATUS_WORKING_SET_LIMIT_RANGE
;
5559 status
= NtAllocateVirtualMemory( process
, &base
, zero_bits
, &size
, type
, protect
);
5562 *ret
= (ULONG_PTR
)base
;
5569 /***********************************************************************
5570 * NtWow64ReadVirtualMemory64 (NTDLL.@)
5571 * ZwWow64ReadVirtualMemory64 (NTDLL.@)
5573 NTSTATUS WINAPI
NtWow64ReadVirtualMemory64( HANDLE process
, ULONG64 addr
, void *buffer
,
5574 ULONG64 size
, ULONG64
*bytes_read
)
5576 unsigned int status
;
5578 if (size
> MAXLONG
) size
= MAXLONG
;
5580 if (virtual_check_buffer_for_write( buffer
, size
))
5582 SERVER_START_REQ( read_process_memory
)
5584 req
->handle
= wine_server_obj_handle( process
);
5586 wine_server_set_reply( req
, buffer
, size
);
5587 if ((status
= wine_server_call( req
))) size
= 0;
5593 status
= STATUS_ACCESS_VIOLATION
;
5596 if (bytes_read
) *bytes_read
= size
;
5601 /***********************************************************************
5602 * NtWow64WriteVirtualMemory64 (NTDLL.@)
5603 * ZwWow64WriteVirtualMemory64 (NTDLL.@)
5605 NTSTATUS WINAPI
NtWow64WriteVirtualMemory64( HANDLE process
, ULONG64 addr
, const void *buffer
,
5606 ULONG64 size
, ULONG64
*bytes_written
)
5608 unsigned int status
;
5610 if (size
> MAXLONG
) size
= MAXLONG
;
5612 if (virtual_check_buffer_for_read( buffer
, size
))
5614 SERVER_START_REQ( write_process_memory
)
5616 req
->handle
= wine_server_obj_handle( process
);
5618 wine_server_add_data( req
, buffer
, size
);
5619 if ((status
= wine_server_call( req
))) size
= 0;
5625 status
= STATUS_PARTIAL_COPY
;
5628 if (bytes_written
) *bytes_written
= size
;
5633 /***********************************************************************
5634 * NtWow64GetNativeSystemInformation (NTDLL.@)
5635 * ZwWow64GetNativeSystemInformation (NTDLL.@)
5637 NTSTATUS WINAPI
NtWow64GetNativeSystemInformation( SYSTEM_INFORMATION_CLASS
class, void *info
,
5638 ULONG len
, ULONG
*retlen
)
5644 case SystemCpuInformation
:
5645 status
= NtQuerySystemInformation( class, info
, len
, retlen
);
5646 if (!status
&& is_old_wow64())
5648 SYSTEM_CPU_INFORMATION
*cpu
= info
;
5650 if (cpu
->ProcessorArchitecture
== PROCESSOR_ARCHITECTURE_INTEL
)
5651 cpu
->ProcessorArchitecture
= PROCESSOR_ARCHITECTURE_AMD64
;
5654 case SystemBasicInformation
:
5655 case SystemEmulationBasicInformation
:
5656 case SystemEmulationProcessorInformation
:
5657 return NtQuerySystemInformation( class, info
, len
, retlen
);
5658 case SystemNativeBasicInformation
:
5659 return NtQuerySystemInformation( SystemBasicInformation
, info
, len
, retlen
);
5661 if (is_old_wow64()) return STATUS_INVALID_INFO_CLASS
;
5662 return NtQuerySystemInformation( class, info
, len
, retlen
);
5666 /***********************************************************************
5667 * NtWow64IsProcessorFeaturePresent (NTDLL.@)
5668 * ZwWow64IsProcessorFeaturePresent (NTDLL.@)
5670 NTSTATUS WINAPI
NtWow64IsProcessorFeaturePresent( UINT feature
)
5672 return feature
< PROCESSOR_FEATURE_MAX
&& user_shared_data
->ProcessorFeatures
[feature
];