wow64: In wow64_NtSetInformationToken forward TokenIntegrityLevel.
[wine.git] / server / mapping.c
blob92eb0c9f0767992fe210e0871acee45309133efe
1 /*
2 * Server-side file mapping management
4 * Copyright (C) 1999 Alexandre Julliard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2.1 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA
21 #include "config.h"
23 #include <assert.h>
24 #include <fcntl.h>
25 #include <stdarg.h>
26 #include <stdio.h>
27 #include <stdlib.h>
28 #include <sys/types.h>
29 #include <sys/stat.h>
30 #include <sys/mman.h>
31 #include <unistd.h>
33 #include "ntstatus.h"
34 #define WIN32_NO_STATUS
35 #include "windef.h"
36 #include "winternl.h"
37 #include "ddk/wdm.h"
39 #include "file.h"
40 #include "handle.h"
41 #include "thread.h"
42 #include "process.h"
43 #include "request.h"
44 #include "security.h"
46 /* list of memory ranges, used to store committed info */
47 struct ranges
49 struct object obj; /* object header */
50 unsigned int count; /* number of used ranges */
51 unsigned int max; /* number of allocated ranges */
52 struct range
54 file_pos_t start;
55 file_pos_t end;
56 } *ranges;
59 static void ranges_dump( struct object *obj, int verbose );
60 static void ranges_destroy( struct object *obj );
62 static const struct object_ops ranges_ops =
64 sizeof(struct ranges), /* size */
65 &no_type, /* type */
66 ranges_dump, /* dump */
67 no_add_queue, /* add_queue */
68 NULL, /* remove_queue */
69 NULL, /* signaled */
70 NULL, /* satisfied */
71 no_signal, /* signal */
72 no_get_fd, /* get_fd */
73 default_map_access, /* map_access */
74 default_get_sd, /* get_sd */
75 default_set_sd, /* set_sd */
76 no_get_full_name, /* get_full_name */
77 no_lookup_name, /* lookup_name */
78 no_link_name, /* link_name */
79 NULL, /* unlink_name */
80 no_open_file, /* open_file */
81 no_kernel_obj_list, /* get_kernel_obj_list */
82 no_close_handle, /* close_handle */
83 ranges_destroy /* destroy */
86 /* file backing the shared sections of a PE image mapping */
87 struct shared_map
89 struct object obj; /* object header */
90 struct fd *fd; /* file descriptor of the mapped PE file */
91 struct file *file; /* temp file holding the shared data */
92 struct list entry; /* entry in global shared maps list */
95 static void shared_map_dump( struct object *obj, int verbose );
96 static void shared_map_destroy( struct object *obj );
98 static const struct object_ops shared_map_ops =
100 sizeof(struct shared_map), /* size */
101 &no_type, /* type */
102 shared_map_dump, /* dump */
103 no_add_queue, /* add_queue */
104 NULL, /* remove_queue */
105 NULL, /* signaled */
106 NULL, /* satisfied */
107 no_signal, /* signal */
108 no_get_fd, /* get_fd */
109 default_map_access, /* map_access */
110 default_get_sd, /* get_sd */
111 default_set_sd, /* set_sd */
112 no_get_full_name, /* get_full_name */
113 no_lookup_name, /* lookup_name */
114 no_link_name, /* link_name */
115 NULL, /* unlink_name */
116 no_open_file, /* open_file */
117 no_kernel_obj_list, /* get_kernel_obj_list */
118 no_close_handle, /* close_handle */
119 shared_map_destroy /* destroy */
122 static struct list shared_map_list = LIST_INIT( shared_map_list );
124 /* memory view mapped in client address space */
125 struct memory_view
127 struct list entry; /* entry in per-process view list */
128 struct fd *fd; /* fd for mapped file */
129 struct ranges *committed; /* list of committed ranges in this mapping */
130 struct shared_map *shared; /* temp file for shared PE mapping */
131 pe_image_info_t image; /* image info (for PE image mapping) */
132 unsigned int flags; /* SEC_* flags */
133 client_ptr_t base; /* view base address (in process addr space) */
134 mem_size_t size; /* view size */
135 file_pos_t start; /* start offset in mapping */
136 data_size_t namelen;
137 WCHAR name[1]; /* filename for .so dll image views */
141 static const WCHAR mapping_name[] = {'S','e','c','t','i','o','n'};
143 struct type_descr mapping_type =
145 { mapping_name, sizeof(mapping_name) }, /* name */
146 SECTION_ALL_ACCESS | SYNCHRONIZE, /* valid_access */
147 { /* mapping */
148 STANDARD_RIGHTS_READ | SECTION_QUERY | SECTION_MAP_READ,
149 STANDARD_RIGHTS_WRITE | SECTION_MAP_WRITE,
150 STANDARD_RIGHTS_EXECUTE | SECTION_MAP_EXECUTE,
151 SECTION_ALL_ACCESS
155 struct mapping
157 struct object obj; /* object header */
158 mem_size_t size; /* mapping size */
159 unsigned int flags; /* SEC_* flags */
160 struct fd *fd; /* fd for mapped file */
161 pe_image_info_t image; /* image info (for PE image mapping) */
162 struct ranges *committed; /* list of committed ranges in this mapping */
163 struct shared_map *shared; /* temp file for shared PE mapping */
166 static void mapping_dump( struct object *obj, int verbose );
167 static struct fd *mapping_get_fd( struct object *obj );
168 static void mapping_destroy( struct object *obj );
169 static enum server_fd_type mapping_get_fd_type( struct fd *fd );
171 static const struct object_ops mapping_ops =
173 sizeof(struct mapping), /* size */
174 &mapping_type, /* type */
175 mapping_dump, /* dump */
176 no_add_queue, /* add_queue */
177 NULL, /* remove_queue */
178 NULL, /* signaled */
179 NULL, /* satisfied */
180 no_signal, /* signal */
181 mapping_get_fd, /* get_fd */
182 default_map_access, /* map_access */
183 default_get_sd, /* get_sd */
184 default_set_sd, /* set_sd */
185 default_get_full_name, /* get_full_name */
186 no_lookup_name, /* lookup_name */
187 directory_link_name, /* link_name */
188 default_unlink_name, /* unlink_name */
189 no_open_file, /* open_file */
190 no_kernel_obj_list, /* get_kernel_obj_list */
191 no_close_handle, /* close_handle */
192 mapping_destroy /* destroy */
195 static const struct fd_ops mapping_fd_ops =
197 default_fd_get_poll_events, /* get_poll_events */
198 default_poll_event, /* poll_event */
199 mapping_get_fd_type, /* get_fd_type */
200 no_fd_read, /* read */
201 no_fd_write, /* write */
202 no_fd_flush, /* flush */
203 no_fd_get_file_info, /* get_file_info */
204 no_fd_get_volume_info, /* get_volume_info */
205 no_fd_ioctl, /* ioctl */
206 default_fd_cancel_async, /* cancel_async */
207 no_fd_queue_async, /* queue_async */
208 default_fd_reselect_async /* reselect_async */
211 /* free address ranges for PE image mappings */
212 struct addr_range
214 unsigned int count;
215 unsigned int size;
216 struct
218 client_ptr_t base;
219 mem_size_t size;
220 } *free;
223 static size_t page_mask;
224 static const mem_size_t granularity_mask = 0xffff;
225 static struct addr_range ranges32;
226 static struct addr_range ranges64;
228 struct session_block
230 struct list entry; /* entry in the session block list */
231 const char *data; /* base pointer for the mmaped data */
232 mem_size_t offset; /* offset of data in the session shared mapping */
233 mem_size_t used_size; /* used size for previously allocated objects */
234 mem_size_t block_size; /* total size of the block */
237 struct session_object
239 struct list entry; /* entry in the session free object list */
240 mem_size_t offset; /* offset of obj in the session shared mapping */
241 shared_object_t obj; /* object actually shared with the client */
244 struct session
246 struct list blocks;
247 struct list free_objects;
248 object_id_t last_object_id;
251 static struct mapping *session_mapping;
252 static struct session session =
254 .blocks = LIST_INIT(session.blocks),
255 .free_objects = LIST_INIT(session.free_objects),
258 #define ROUND_SIZE(size) (((size) + page_mask) & ~page_mask)
260 void init_memory(void)
262 page_mask = sysconf( _SC_PAGESIZE ) - 1;
263 free_map_addr( 0x60000000, 0x1c000000 );
264 free_map_addr( 0x600000000000, 0x100000000000 );
267 static void ranges_dump( struct object *obj, int verbose )
269 struct ranges *ranges = (struct ranges *)obj;
270 fprintf( stderr, "Memory ranges count=%u\n", ranges->count );
273 static void ranges_destroy( struct object *obj )
275 struct ranges *ranges = (struct ranges *)obj;
276 free( ranges->ranges );
279 static void shared_map_dump( struct object *obj, int verbose )
281 struct shared_map *shared = (struct shared_map *)obj;
282 fprintf( stderr, "Shared mapping fd=%p file=%p\n", shared->fd, shared->file );
285 static void shared_map_destroy( struct object *obj )
287 struct shared_map *shared = (struct shared_map *)obj;
289 release_object( shared->fd );
290 release_object( shared->file );
291 list_remove( &shared->entry );
294 /* extend a file beyond the current end of file */
295 int grow_file( int unix_fd, file_pos_t new_size )
297 static const char zero;
298 off_t size = new_size;
300 if (sizeof(new_size) > sizeof(size) && size != new_size)
302 set_error( STATUS_INVALID_PARAMETER );
303 return 0;
305 /* extend the file one byte beyond the requested size and then truncate it */
306 /* this should work around ftruncate implementations that can't extend files */
307 if (pwrite( unix_fd, &zero, 1, size ) != -1)
309 ftruncate( unix_fd, size );
310 return 1;
312 file_set_error();
313 return 0;
316 /* simplified version of mkstemps() */
317 static int make_temp_file( char name[16] )
319 static unsigned int value;
320 int i, fd = -1;
322 value += (current_time >> 16) + current_time;
323 for (i = 0; i < 0x8000 && fd < 0; i++, value += 7777)
325 snprintf( name, 16, "tmpmap-%08x", value );
326 fd = open( name, O_RDWR | O_CREAT | O_EXCL, 0600 );
328 return fd;
331 /* check if the current directory allows exec mappings */
332 static int check_current_dir_for_exec(void)
334 int fd;
335 char tmpfn[16];
336 void *ret = MAP_FAILED;
338 fd = make_temp_file( tmpfn );
339 if (fd == -1) return 0;
340 if (grow_file( fd, 1 ))
342 ret = mmap( NULL, get_page_size(), PROT_READ | PROT_EXEC, MAP_PRIVATE, fd, 0 );
343 if (ret != MAP_FAILED) munmap( ret, get_page_size() );
345 close( fd );
346 unlink( tmpfn );
347 return (ret != MAP_FAILED);
350 /* create a temp file for anonymous mappings */
351 static int create_temp_file( file_pos_t size )
353 static int temp_dir_fd = -1;
354 char tmpfn[16];
355 int fd;
357 if (temp_dir_fd == -1)
359 temp_dir_fd = server_dir_fd;
360 if (!check_current_dir_for_exec())
362 /* the server dir is noexec, try the config dir instead */
363 fchdir( config_dir_fd );
364 if (check_current_dir_for_exec())
365 temp_dir_fd = config_dir_fd;
366 else /* neither works, fall back to server dir */
367 fchdir( server_dir_fd );
370 else if (temp_dir_fd != server_dir_fd) fchdir( temp_dir_fd );
372 fd = make_temp_file( tmpfn );
373 if (fd != -1)
375 if (!grow_file( fd, size ))
377 close( fd );
378 fd = -1;
380 unlink( tmpfn );
382 else file_set_error();
384 if (temp_dir_fd != server_dir_fd) fchdir( server_dir_fd );
385 return fd;
388 /* find a memory view from its base address */
389 struct memory_view *find_mapped_view( struct process *process, client_ptr_t base )
391 struct memory_view *view;
393 LIST_FOR_EACH_ENTRY( view, &process->views, struct memory_view, entry )
394 if (view->base == base) return view;
396 set_error( STATUS_NOT_MAPPED_VIEW );
397 return NULL;
400 /* find a memory view from any address inside it */
401 static struct memory_view *find_mapped_addr( struct process *process, client_ptr_t addr )
403 struct memory_view *view;
405 LIST_FOR_EACH_ENTRY( view, &process->views, struct memory_view, entry )
406 if (addr >= view->base && addr < view->base + view->size) return view;
408 set_error( STATUS_NOT_MAPPED_VIEW );
409 return NULL;
412 /* check if an address range is valid for creating a view */
413 static int is_valid_view_addr( struct process *process, client_ptr_t addr, mem_size_t size )
415 struct memory_view *view;
417 if (!size) return 0;
418 if (addr & page_mask) return 0;
419 if (addr + size < addr) return 0; /* overflow */
421 /* check for overlapping view */
422 LIST_FOR_EACH_ENTRY( view, &process->views, struct memory_view, entry )
424 if (view->base + view->size <= addr) continue;
425 if (view->base >= addr + size) continue;
426 return 0;
428 return 1;
431 /* get the main exe memory view */
432 struct memory_view *get_exe_view( struct process *process )
434 return LIST_ENTRY( list_head( &process->views ), struct memory_view, entry );
437 static int generate_dll_event( struct thread *thread, int code, struct memory_view *view )
439 if (!(view->flags & SEC_IMAGE)) return 0;
440 generate_debug_event( thread, code, view );
441 return 1;
444 /* add a view to the process list */
445 /* return 1 if this is the main exe view */
446 static int add_process_view( struct thread *thread, struct memory_view *view )
448 struct process *process = thread->process;
449 struct unicode_str name;
451 if (view->flags & SEC_IMAGE)
453 if (is_process_init_done( process ))
455 generate_dll_event( thread, DbgLoadDllStateChange, view );
457 else if (!(view->image.image_charact & IMAGE_FILE_DLL))
459 /* main exe */
460 free( process->image );
461 process->image = NULL;
462 if (get_view_nt_name( view, &name ) && (process->image = memdup( name.str, name.len )))
463 process->imagelen = name.len;
464 process->image_info = view->image;
465 list_add_head( &process->views, &view->entry );
466 return 1;
469 list_add_tail( &process->views, &view->entry );
470 return 0;
473 static void free_memory_view( struct memory_view *view )
475 if (view->fd) release_object( view->fd );
476 if (view->committed) release_object( view->committed );
477 if (view->shared) release_object( view->shared );
478 list_remove( &view->entry );
479 free( view );
482 /* free all mapped views at process exit */
483 void free_mapped_views( struct process *process )
485 struct list *ptr;
487 while ((ptr = list_head( &process->views )))
488 free_memory_view( LIST_ENTRY( ptr, struct memory_view, entry ));
491 /* find the shared PE mapping for a given mapping */
492 static struct shared_map *get_shared_file( struct fd *fd )
494 struct shared_map *ptr;
496 LIST_FOR_EACH_ENTRY( ptr, &shared_map_list, struct shared_map, entry )
497 if (is_same_file_fd( ptr->fd, fd ))
498 return (struct shared_map *)grab_object( ptr );
499 return NULL;
502 /* return the size of the memory mapping and file range of a given section */
503 static inline void get_section_sizes( const IMAGE_SECTION_HEADER *sec, size_t *map_size,
504 off_t *file_start, size_t *file_size )
506 static const unsigned int sector_align = 0x1ff;
508 if (!sec->Misc.VirtualSize) *map_size = ROUND_SIZE( sec->SizeOfRawData );
509 else *map_size = ROUND_SIZE( sec->Misc.VirtualSize );
511 *file_start = sec->PointerToRawData & ~sector_align;
512 *file_size = (sec->SizeOfRawData + (sec->PointerToRawData & sector_align) + sector_align) & ~sector_align;
513 if (*file_size > *map_size) *file_size = *map_size;
516 /* add a range to the committed list */
517 static void add_committed_range( struct memory_view *view, file_pos_t start, file_pos_t end )
519 unsigned int i, j;
520 struct ranges *committed = view->committed;
521 struct range *ranges;
523 if ((start & page_mask) || (end & page_mask) ||
524 start >= view->size || end >= view->size ||
525 start >= end)
527 set_error( STATUS_INVALID_PARAMETER );
528 return;
531 if (!committed) return; /* everything committed already */
533 start += view->start;
534 end += view->start;
536 for (i = 0, ranges = committed->ranges; i < committed->count; i++)
538 if (ranges[i].start > end) break;
539 if (ranges[i].end < start) continue;
540 if (ranges[i].start > start) ranges[i].start = start; /* extend downwards */
541 if (ranges[i].end < end) /* extend upwards and maybe merge with next */
543 for (j = i + 1; j < committed->count; j++)
545 if (ranges[j].start > end) break;
546 if (ranges[j].end > end) end = ranges[j].end;
548 if (j > i + 1)
550 memmove( &ranges[i + 1], &ranges[j], (committed->count - j) * sizeof(*ranges) );
551 committed->count -= j - (i + 1);
553 ranges[i].end = end;
555 return;
558 /* now add a new range */
560 if (committed->count == committed->max)
562 unsigned int new_size = committed->max * 2;
563 struct range *new_ptr = realloc( committed->ranges, new_size * sizeof(*new_ptr) );
564 if (!new_ptr) return;
565 committed->max = new_size;
566 ranges = committed->ranges = new_ptr;
568 memmove( &ranges[i + 1], &ranges[i], (committed->count - i) * sizeof(*ranges) );
569 ranges[i].start = start;
570 ranges[i].end = end;
571 committed->count++;
574 /* find the range containing start and return whether it's committed */
575 static int find_committed_range( struct memory_view *view, file_pos_t start, mem_size_t *size )
577 unsigned int i;
578 struct ranges *committed = view->committed;
579 struct range *ranges;
581 if ((start & page_mask) || start >= view->size)
583 set_error( STATUS_INVALID_PARAMETER );
584 return 0;
586 if (!committed) /* everything is committed */
588 *size = view->size - start;
589 return 1;
591 for (i = 0, ranges = committed->ranges; i < committed->count; i++)
593 if (ranges[i].start > view->start + start)
595 *size = min( ranges[i].start, view->start + view->size ) - (view->start + start);
596 return 0;
598 if (ranges[i].end > view->start + start)
600 *size = min( ranges[i].end, view->start + view->size ) - (view->start + start);
601 return 1;
604 *size = view->size - start;
605 return 0;
608 /* allocate and fill the temp file for a shared PE image mapping */
609 static int build_shared_mapping( struct mapping *mapping, int fd,
610 IMAGE_SECTION_HEADER *sec, unsigned int nb_sec )
612 struct shared_map *shared;
613 struct file *file;
614 unsigned int i;
615 mem_size_t total_size;
616 size_t file_size, map_size, max_size;
617 off_t shared_pos, read_pos, write_pos;
618 char *buffer = NULL;
619 int shared_fd;
620 long toread;
622 /* compute the total size of the shared mapping */
624 total_size = max_size = 0;
625 for (i = 0; i < nb_sec; i++)
627 if ((sec[i].Characteristics & IMAGE_SCN_MEM_SHARED) &&
628 (sec[i].Characteristics & IMAGE_SCN_MEM_WRITE))
630 get_section_sizes( &sec[i], &map_size, &read_pos, &file_size );
631 if (file_size > max_size) max_size = file_size;
632 total_size += map_size;
635 if (!total_size) return 1; /* nothing to do */
637 if ((mapping->shared = get_shared_file( mapping->fd ))) return 1;
639 /* create a temp file for the mapping */
641 if ((shared_fd = create_temp_file( total_size )) == -1) return 0;
642 if (!(file = create_file_for_fd( shared_fd, FILE_GENERIC_READ|FILE_GENERIC_WRITE, 0 ))) return 0;
644 if (!(buffer = malloc( max_size ))) goto error;
646 /* copy the shared sections data into the temp file */
648 shared_pos = 0;
649 for (i = 0; i < nb_sec; i++)
651 if (!(sec[i].Characteristics & IMAGE_SCN_MEM_SHARED)) continue;
652 if (!(sec[i].Characteristics & IMAGE_SCN_MEM_WRITE)) continue;
653 get_section_sizes( &sec[i], &map_size, &read_pos, &file_size );
654 write_pos = shared_pos;
655 shared_pos += map_size;
656 if (!sec[i].PointerToRawData || !file_size) continue;
657 toread = file_size;
658 while (toread)
660 long res = pread( fd, buffer + file_size - toread, toread, read_pos );
661 if (!res && toread < 0x200) /* partial sector at EOF is not an error */
663 file_size -= toread;
664 break;
666 if (res <= 0) goto error;
667 toread -= res;
668 read_pos += res;
670 if (pwrite( shared_fd, buffer, file_size, write_pos ) != file_size) goto error;
673 if (!(shared = alloc_object( &shared_map_ops ))) goto error;
674 shared->fd = (struct fd *)grab_object( mapping->fd );
675 shared->file = file;
676 list_add_head( &shared_map_list, &shared->entry );
677 mapping->shared = shared;
678 free( buffer );
679 return 1;
681 error:
682 release_object( file );
683 free( buffer );
684 return 0;
687 /* load a data directory header from its section */
688 static int load_data_dir( void *dir, size_t dir_size, size_t va, size_t size, int unix_fd,
689 IMAGE_SECTION_HEADER *sec, unsigned int nb_sec )
691 size_t map_size, file_size;
692 off_t file_start;
693 unsigned int i;
695 if (!va || !size) return 0;
697 for (i = 0; i < nb_sec; i++)
699 if (va < sec[i].VirtualAddress) continue;
700 if (sec[i].Misc.VirtualSize && va - sec[i].VirtualAddress >= sec[i].Misc.VirtualSize) continue;
701 get_section_sizes( &sec[i], &map_size, &file_start, &file_size );
702 if (size >= map_size) continue;
703 if (va - sec[i].VirtualAddress >= map_size - size) continue;
704 if (size > dir_size) size = dir_size;
705 if (size > file_size) size = file_size;
706 return pread( unix_fd, dir, size, file_start + va - sec[i].VirtualAddress );
708 return 0;
711 /* load the CLR header from its section */
712 static int load_clr_header( IMAGE_COR20_HEADER *hdr, size_t va, size_t size, int unix_fd,
713 IMAGE_SECTION_HEADER *sec, unsigned int nb_sec )
715 int ret = load_data_dir( hdr, sizeof(*hdr), va, size, unix_fd, sec, nb_sec );
717 if (ret <= 0) return 0;
718 if (ret < sizeof(*hdr)) memset( (char *)hdr + ret, 0, sizeof(*hdr) - ret );
719 return (hdr->MajorRuntimeVersion > COR_VERSION_MAJOR_V2 ||
720 (hdr->MajorRuntimeVersion == COR_VERSION_MAJOR_V2 &&
721 hdr->MinorRuntimeVersion >= COR_VERSION_MINOR));
724 /* load the LOAD_CONFIG header from its section */
725 static int load_cfg_header( IMAGE_LOAD_CONFIG_DIRECTORY64 *cfg, size_t va, size_t size,
726 int unix_fd, IMAGE_SECTION_HEADER *sec, unsigned int nb_sec )
728 unsigned int cfg_size;
729 int ret = load_data_dir( cfg, sizeof(*cfg), va, size, unix_fd, sec, nb_sec );
731 if (ret <= 0) return 0;
732 cfg_size = ret;
733 if (cfg_size < offsetof( IMAGE_LOAD_CONFIG_DIRECTORY64, Size ) + sizeof(cfg_size)) return 0;
734 if (cfg_size > cfg->Size) cfg_size = cfg->Size;
735 if (cfg_size < sizeof(*cfg)) memset( (char *)cfg + cfg_size, 0, sizeof(*cfg) - cfg_size );
736 return 1;
739 /* retrieve the mapping parameters for an executable (PE) image */
740 static unsigned int get_image_params( struct mapping *mapping, file_pos_t file_size, int unix_fd )
742 static const char builtin_signature[] = "Wine builtin DLL";
743 static const char fakedll_signature[] = "Wine placeholder DLL";
745 IMAGE_COR20_HEADER clr;
746 IMAGE_SECTION_HEADER sec[96];
747 struct
749 IMAGE_DOS_HEADER dos;
750 char buffer[32];
751 } mz;
752 struct
754 DWORD Signature;
755 IMAGE_FILE_HEADER FileHeader;
756 union
758 IMAGE_OPTIONAL_HEADER32 hdr32;
759 IMAGE_OPTIONAL_HEADER64 hdr64;
760 } opt;
761 } nt;
762 union
764 IMAGE_LOAD_CONFIG_DIRECTORY32 cfg32;
765 IMAGE_LOAD_CONFIG_DIRECTORY64 cfg64;
766 } cfg;
767 off_t pos;
768 int size, has_relocs;
769 size_t mz_size, clr_va = 0, clr_size = 0, cfg_va, cfg_size;
770 unsigned int i;
772 /* load the headers */
774 if (!file_size) return STATUS_INVALID_FILE_FOR_SECTION;
775 size = pread( unix_fd, &mz, sizeof(mz), 0 );
776 if (size < sizeof(mz.dos)) return STATUS_INVALID_IMAGE_NOT_MZ;
777 if (mz.dos.e_magic != IMAGE_DOS_SIGNATURE) return STATUS_INVALID_IMAGE_NOT_MZ;
778 mz_size = size;
779 pos = mz.dos.e_lfanew;
781 size = pread( unix_fd, &nt, sizeof(nt), pos );
782 if (size < sizeof(nt.Signature) + sizeof(nt.FileHeader)) return STATUS_INVALID_IMAGE_PROTECT;
783 if (size < sizeof(nt)) memset( (char *)&nt + size, 0, sizeof(nt) - size );
784 if (nt.Signature != IMAGE_NT_SIGNATURE)
786 IMAGE_OS2_HEADER *os2 = (IMAGE_OS2_HEADER *)&nt;
787 if (os2->ne_magic != IMAGE_OS2_SIGNATURE) return STATUS_INVALID_IMAGE_PROTECT;
788 if (os2->ne_exetyp == 2) return STATUS_INVALID_IMAGE_WIN_16;
789 if (os2->ne_exetyp == 5) return STATUS_INVALID_IMAGE_PROTECT;
790 return STATUS_INVALID_IMAGE_NE_FORMAT;
793 switch (nt.opt.hdr32.Magic)
795 case IMAGE_NT_OPTIONAL_HDR32_MAGIC:
796 if (!is_machine_32bit( nt.FileHeader.Machine )) return STATUS_INVALID_IMAGE_FORMAT;
797 if (!is_machine_supported( nt.FileHeader.Machine )) return STATUS_INVALID_IMAGE_FORMAT;
799 if (nt.FileHeader.Machine != IMAGE_FILE_MACHINE_I386) /* non-x86 platforms are more strict */
801 if (nt.opt.hdr32.SectionAlignment & page_mask)
802 return STATUS_INVALID_IMAGE_FORMAT;
803 if (!(nt.opt.hdr32.DllCharacteristics & IMAGE_DLLCHARACTERISTICS_NX_COMPAT))
804 return STATUS_INVALID_IMAGE_FORMAT;
805 if (!(nt.opt.hdr32.DllCharacteristics & IMAGE_DLLCHARACTERISTICS_DYNAMIC_BASE))
806 return STATUS_INVALID_IMAGE_FORMAT;
808 if (nt.opt.hdr32.NumberOfRvaAndSizes > IMAGE_DIRECTORY_ENTRY_COM_DESCRIPTOR)
810 clr_va = nt.opt.hdr32.DataDirectory[IMAGE_DIRECTORY_ENTRY_COM_DESCRIPTOR].VirtualAddress;
811 clr_size = nt.opt.hdr32.DataDirectory[IMAGE_DIRECTORY_ENTRY_COM_DESCRIPTOR].Size;
813 cfg_va = nt.opt.hdr32.DataDirectory[IMAGE_DIRECTORY_ENTRY_LOAD_CONFIG].VirtualAddress;
814 cfg_size = nt.opt.hdr32.DataDirectory[IMAGE_DIRECTORY_ENTRY_LOAD_CONFIG].Size;
816 mapping->image.base = nt.opt.hdr32.ImageBase;
817 mapping->image.entry_point = nt.opt.hdr32.AddressOfEntryPoint;
818 mapping->image.map_size = ROUND_SIZE( nt.opt.hdr32.SizeOfImage );
819 mapping->image.stack_size = nt.opt.hdr32.SizeOfStackReserve;
820 mapping->image.stack_commit = nt.opt.hdr32.SizeOfStackCommit;
821 mapping->image.subsystem = nt.opt.hdr32.Subsystem;
822 mapping->image.subsystem_minor = nt.opt.hdr32.MinorSubsystemVersion;
823 mapping->image.subsystem_major = nt.opt.hdr32.MajorSubsystemVersion;
824 mapping->image.osversion_minor = nt.opt.hdr32.MinorOperatingSystemVersion;
825 mapping->image.osversion_major = nt.opt.hdr32.MajorOperatingSystemVersion;
826 mapping->image.dll_charact = nt.opt.hdr32.DllCharacteristics;
827 mapping->image.contains_code = (nt.opt.hdr32.SizeOfCode ||
828 nt.opt.hdr32.AddressOfEntryPoint ||
829 nt.opt.hdr32.SectionAlignment & page_mask);
830 mapping->image.header_size = nt.opt.hdr32.SizeOfHeaders;
831 mapping->image.checksum = nt.opt.hdr32.CheckSum;
832 mapping->image.image_flags = 0;
834 has_relocs = (nt.opt.hdr32.NumberOfRvaAndSizes > IMAGE_DIRECTORY_ENTRY_BASERELOC &&
835 nt.opt.hdr32.DataDirectory[IMAGE_DIRECTORY_ENTRY_BASERELOC].VirtualAddress &&
836 nt.opt.hdr32.DataDirectory[IMAGE_DIRECTORY_ENTRY_BASERELOC].Size &&
837 !(nt.FileHeader.Characteristics & IMAGE_FILE_RELOCS_STRIPPED));
838 if (nt.opt.hdr32.SectionAlignment & page_mask)
839 mapping->image.image_flags |= IMAGE_FLAGS_ImageMappedFlat;
840 else if ((nt.opt.hdr32.DllCharacteristics & IMAGE_DLLCHARACTERISTICS_DYNAMIC_BASE) &&
841 (has_relocs || mapping->image.contains_code) && !(clr_va && clr_size))
842 mapping->image.image_flags |= IMAGE_FLAGS_ImageDynamicallyRelocated;
843 break;
845 case IMAGE_NT_OPTIONAL_HDR64_MAGIC:
846 if (!is_machine_64bit( native_machine )) return STATUS_INVALID_IMAGE_WIN_64;
847 if (!is_machine_64bit( nt.FileHeader.Machine )) return STATUS_INVALID_IMAGE_FORMAT;
848 if (!is_machine_supported( nt.FileHeader.Machine )) return STATUS_INVALID_IMAGE_FORMAT;
850 if (nt.FileHeader.Machine != IMAGE_FILE_MACHINE_AMD64) /* non-x86 platforms are more strict */
852 if (nt.opt.hdr64.SectionAlignment & page_mask)
853 return STATUS_INVALID_IMAGE_FORMAT;
854 if (!(nt.opt.hdr64.DllCharacteristics & IMAGE_DLLCHARACTERISTICS_NX_COMPAT))
855 return STATUS_INVALID_IMAGE_FORMAT;
856 if (!(nt.opt.hdr64.DllCharacteristics & IMAGE_DLLCHARACTERISTICS_DYNAMIC_BASE))
857 return STATUS_INVALID_IMAGE_FORMAT;
859 if (nt.opt.hdr64.NumberOfRvaAndSizes > IMAGE_DIRECTORY_ENTRY_COM_DESCRIPTOR)
861 clr_va = nt.opt.hdr64.DataDirectory[IMAGE_DIRECTORY_ENTRY_COM_DESCRIPTOR].VirtualAddress;
862 clr_size = nt.opt.hdr64.DataDirectory[IMAGE_DIRECTORY_ENTRY_COM_DESCRIPTOR].Size;
864 cfg_va = nt.opt.hdr64.DataDirectory[IMAGE_DIRECTORY_ENTRY_LOAD_CONFIG].VirtualAddress;
865 cfg_size = nt.opt.hdr64.DataDirectory[IMAGE_DIRECTORY_ENTRY_LOAD_CONFIG].Size;
867 mapping->image.base = nt.opt.hdr64.ImageBase;
868 mapping->image.entry_point = nt.opt.hdr64.AddressOfEntryPoint;
869 mapping->image.map_size = ROUND_SIZE( nt.opt.hdr64.SizeOfImage );
870 mapping->image.stack_size = nt.opt.hdr64.SizeOfStackReserve;
871 mapping->image.stack_commit = nt.opt.hdr64.SizeOfStackCommit;
872 mapping->image.subsystem = nt.opt.hdr64.Subsystem;
873 mapping->image.subsystem_minor = nt.opt.hdr64.MinorSubsystemVersion;
874 mapping->image.subsystem_major = nt.opt.hdr64.MajorSubsystemVersion;
875 mapping->image.osversion_minor = nt.opt.hdr64.MinorOperatingSystemVersion;
876 mapping->image.osversion_major = nt.opt.hdr64.MajorOperatingSystemVersion;
877 mapping->image.dll_charact = nt.opt.hdr64.DllCharacteristics;
878 mapping->image.contains_code = (nt.opt.hdr64.SizeOfCode ||
879 nt.opt.hdr64.AddressOfEntryPoint ||
880 nt.opt.hdr64.SectionAlignment & page_mask);
881 mapping->image.header_size = nt.opt.hdr64.SizeOfHeaders;
882 mapping->image.checksum = nt.opt.hdr64.CheckSum;
883 mapping->image.image_flags = 0;
885 has_relocs = (nt.opt.hdr64.NumberOfRvaAndSizes > IMAGE_DIRECTORY_ENTRY_BASERELOC &&
886 nt.opt.hdr64.DataDirectory[IMAGE_DIRECTORY_ENTRY_BASERELOC].VirtualAddress &&
887 nt.opt.hdr64.DataDirectory[IMAGE_DIRECTORY_ENTRY_BASERELOC].Size &&
888 !(nt.FileHeader.Characteristics & IMAGE_FILE_RELOCS_STRIPPED));
889 if (nt.opt.hdr64.SectionAlignment & page_mask)
890 mapping->image.image_flags |= IMAGE_FLAGS_ImageMappedFlat;
891 else if ((nt.opt.hdr64.DllCharacteristics & IMAGE_DLLCHARACTERISTICS_DYNAMIC_BASE) &&
892 (has_relocs || mapping->image.contains_code) && !(clr_va && clr_size))
893 mapping->image.image_flags |= IMAGE_FLAGS_ImageDynamicallyRelocated;
894 break;
896 default:
897 return STATUS_INVALID_IMAGE_FORMAT;
900 mapping->image.is_hybrid = 0;
901 mapping->image.padding = 0;
902 mapping->image.map_addr = get_fd_map_address( mapping->fd );
903 mapping->image.image_charact = nt.FileHeader.Characteristics;
904 mapping->image.machine = nt.FileHeader.Machine;
905 mapping->image.dbg_offset = nt.FileHeader.PointerToSymbolTable;
906 mapping->image.dbg_size = nt.FileHeader.NumberOfSymbols;
907 mapping->image.zerobits = 0; /* FIXME */
908 mapping->image.file_size = file_size;
909 mapping->image.loader_flags = clr_va && clr_size;
910 mapping->image.wine_builtin = (mz_size == sizeof(mz) &&
911 !memcmp( mz.buffer, builtin_signature, sizeof(builtin_signature) ));
912 mapping->image.wine_fakedll = (mz_size == sizeof(mz) &&
913 !memcmp( mz.buffer, fakedll_signature, sizeof(fakedll_signature) ));
915 /* load the section headers */
917 pos += sizeof(nt.Signature) + sizeof(nt.FileHeader) + nt.FileHeader.SizeOfOptionalHeader;
918 if (nt.FileHeader.NumberOfSections > ARRAY_SIZE( sec )) return STATUS_INVALID_IMAGE_FORMAT;
919 size = sizeof(*sec) * nt.FileHeader.NumberOfSections;
920 if (!mapping->size) mapping->size = mapping->image.map_size;
921 else if (mapping->size > mapping->image.map_size) return STATUS_SECTION_TOO_BIG;
922 if (pos + size > mapping->image.map_size) return STATUS_INVALID_FILE_FOR_SECTION;
923 if (pos + size > mapping->image.header_size) mapping->image.header_size = pos + size;
924 if (pread( unix_fd, sec, size, pos ) != size) return STATUS_INVALID_FILE_FOR_SECTION;
926 for (i = 0; i < nt.FileHeader.NumberOfSections && !mapping->image.contains_code; i++)
927 if (sec[i].Characteristics & IMAGE_SCN_MEM_EXECUTE) mapping->image.contains_code = 1;
929 if (load_clr_header( &clr, clr_va, clr_size, unix_fd, sec, nt.FileHeader.NumberOfSections ) &&
930 (clr.Flags & COMIMAGE_FLAGS_ILONLY))
932 mapping->image.image_flags |= IMAGE_FLAGS_ComPlusILOnly;
933 if (nt.opt.hdr32.Magic == IMAGE_NT_OPTIONAL_HDR32_MAGIC)
935 if (!(clr.Flags & COMIMAGE_FLAGS_32BITREQUIRED))
936 mapping->image.image_flags |= IMAGE_FLAGS_ComPlusNativeReady;
937 if (clr.Flags & COMIMAGE_FLAGS_32BITPREFERRED)
938 mapping->image.image_flags |= IMAGE_FLAGS_ComPlusPrefer32bit;
942 if (load_cfg_header( &cfg.cfg64, cfg_va, cfg_size, unix_fd, sec, nt.FileHeader.NumberOfSections ))
944 if (nt.opt.hdr32.Magic == IMAGE_NT_OPTIONAL_HDR32_MAGIC)
945 mapping->image.is_hybrid = !!cfg.cfg32.CHPEMetadataPointer;
946 else
947 mapping->image.is_hybrid = !!cfg.cfg64.CHPEMetadataPointer;
950 if (!build_shared_mapping( mapping, unix_fd, sec, nt.FileHeader.NumberOfSections ))
951 return STATUS_INVALID_FILE_FOR_SECTION;
953 return STATUS_SUCCESS;
956 static struct ranges *create_ranges(void)
958 struct ranges *ranges = alloc_object( &ranges_ops );
960 if (!ranges) return NULL;
961 ranges->count = 0;
962 ranges->max = 8;
963 if (!(ranges->ranges = mem_alloc( ranges->max * sizeof(*ranges->ranges) )))
965 release_object( ranges );
966 return NULL;
968 return ranges;
971 static unsigned int get_mapping_flags( obj_handle_t handle, unsigned int flags )
973 switch (flags & (SEC_IMAGE | SEC_RESERVE | SEC_COMMIT | SEC_FILE))
975 case SEC_IMAGE:
976 if (flags & (SEC_WRITECOMBINE | SEC_LARGE_PAGES)) break;
977 if (handle) return SEC_FILE | SEC_IMAGE;
978 set_error( STATUS_INVALID_FILE_FOR_SECTION );
979 return 0;
980 case SEC_COMMIT:
981 if (!handle) return flags;
982 /* fall through */
983 case SEC_RESERVE:
984 if (flags & SEC_LARGE_PAGES) break;
985 if (handle) return SEC_FILE | (flags & (SEC_NOCACHE | SEC_WRITECOMBINE));
986 return flags;
988 set_error( STATUS_INVALID_PARAMETER );
989 return 0;
993 static struct mapping *create_mapping( struct object *root, const struct unicode_str *name,
994 unsigned int attr, mem_size_t size, unsigned int flags,
995 obj_handle_t handle, unsigned int file_access,
996 const struct security_descriptor *sd )
998 struct mapping *mapping;
999 struct file *file;
1000 struct fd *fd;
1001 int unix_fd;
1002 struct stat st;
1004 if (!(mapping = create_named_object( root, &mapping_ops, name, attr, sd )))
1005 return NULL;
1006 if (get_error() == STATUS_OBJECT_NAME_EXISTS)
1007 return mapping; /* Nothing else to do */
1009 mapping->size = size;
1010 mapping->fd = NULL;
1011 mapping->shared = NULL;
1012 mapping->committed = NULL;
1014 if (!(mapping->flags = get_mapping_flags( handle, flags ))) goto error;
1016 if (handle)
1018 const unsigned int sharing = FILE_SHARE_READ | FILE_SHARE_WRITE | FILE_SHARE_DELETE;
1019 unsigned int mapping_access = FILE_MAPPING_ACCESS;
1021 if (!(file = get_file_obj( current->process, handle, file_access ))) goto error;
1022 fd = get_obj_fd( (struct object *)file );
1024 /* file sharing rules for mappings are different so we use magic the access rights */
1025 if (flags & SEC_IMAGE) mapping_access |= FILE_MAPPING_IMAGE;
1026 else if (file_access & FILE_WRITE_DATA) mapping_access |= FILE_MAPPING_WRITE;
1028 if (!(mapping->fd = get_fd_object_for_mapping( fd, mapping_access, sharing )))
1030 mapping->fd = dup_fd_object( fd, mapping_access, sharing, FILE_SYNCHRONOUS_IO_NONALERT );
1031 if (mapping->fd) set_fd_user( mapping->fd, &mapping_fd_ops, NULL );
1033 release_object( file );
1034 release_object( fd );
1035 if (!mapping->fd) goto error;
1037 if ((unix_fd = get_unix_fd( mapping->fd )) == -1) goto error;
1038 if (fstat( unix_fd, &st ) == -1)
1040 file_set_error();
1041 goto error;
1043 if (flags & SEC_IMAGE)
1045 unsigned int err = get_image_params( mapping, st.st_size, unix_fd );
1046 if (!err) return mapping;
1047 set_error( err );
1048 goto error;
1050 if (!mapping->size)
1052 if (!(mapping->size = st.st_size))
1054 set_error( STATUS_MAPPED_FILE_SIZE_ZERO );
1055 goto error;
1058 else if (st.st_size < mapping->size)
1060 if (!(file_access & FILE_WRITE_DATA) || mapping->size >> 54 /* ntfs limit */)
1062 set_error( STATUS_SECTION_TOO_BIG );
1063 goto error;
1065 if (!grow_file( unix_fd, mapping->size )) goto error;
1068 else /* Anonymous mapping (no associated file) */
1070 if (!mapping->size)
1072 set_error( STATUS_INVALID_PARAMETER );
1073 goto error;
1075 if ((flags & SEC_RESERVE) && !(mapping->committed = create_ranges())) goto error;
1076 mapping->size = (mapping->size + page_mask) & ~((mem_size_t)page_mask);
1077 if ((unix_fd = create_temp_file( mapping->size )) == -1) goto error;
1078 if (!(mapping->fd = create_anonymous_fd( &mapping_fd_ops, unix_fd, &mapping->obj,
1079 FILE_SYNCHRONOUS_IO_NONALERT ))) goto error;
1080 allow_fd_caching( mapping->fd );
1082 return mapping;
1084 error:
1085 release_object( mapping );
1086 return NULL;
1089 /* create a read-only file mapping for the specified fd */
1090 struct mapping *create_fd_mapping( struct object *root, const struct unicode_str *name,
1091 struct fd *fd, unsigned int attr, const struct security_descriptor *sd )
1093 struct mapping *mapping;
1094 int unix_fd;
1095 struct stat st;
1097 if (!(mapping = create_named_object( root, &mapping_ops, name, attr, sd ))) return NULL;
1098 if (get_error() == STATUS_OBJECT_NAME_EXISTS) return mapping; /* Nothing else to do */
1100 mapping->shared = NULL;
1101 mapping->committed = NULL;
1102 mapping->flags = SEC_FILE;
1103 mapping->fd = (struct fd *)grab_object( fd );
1104 set_fd_user( mapping->fd, &mapping_fd_ops, NULL );
1106 if ((unix_fd = get_unix_fd( mapping->fd )) == -1) goto error;
1107 if (fstat( unix_fd, &st ) == -1)
1109 file_set_error();
1110 goto error;
1112 if (!(mapping->size = st.st_size))
1114 set_error( STATUS_MAPPED_FILE_SIZE_ZERO );
1115 goto error;
1117 return mapping;
1119 error:
1120 release_object( mapping );
1121 return NULL;
1124 static struct mapping *get_mapping_obj( struct process *process, obj_handle_t handle, unsigned int access )
1126 return (struct mapping *)get_handle_obj( process, handle, access, &mapping_ops );
1129 /* open a new file for the file descriptor backing the view */
1130 struct file *get_view_file( const struct memory_view *view, unsigned int access, unsigned int sharing )
1132 if (!view->fd) return NULL;
1133 return create_file_for_fd_obj( view->fd, access, sharing );
1136 /* get the image info for a SEC_IMAGE mapped view */
1137 const pe_image_info_t *get_view_image_info( const struct memory_view *view, client_ptr_t *base )
1139 if (!(view->flags & SEC_IMAGE)) return NULL;
1140 *base = view->base;
1141 return &view->image;
1144 /* get the file name for a mapped view */
1145 int get_view_nt_name( const struct memory_view *view, struct unicode_str *name )
1147 if (view->namelen) /* .so builtin */
1149 name->str = view->name;
1150 name->len = view->namelen;
1151 return 1;
1153 if (!view->fd) return 0;
1154 get_nt_name( view->fd, name );
1155 return 1;
1158 /* generate all startup events of a given process */
1159 void generate_startup_debug_events( struct process *process )
1161 struct memory_view *view;
1162 struct list *ptr = list_head( &process->views );
1163 struct thread *thread, *first_thread = get_process_first_thread( process );
1165 if (!ptr) return;
1166 view = LIST_ENTRY( ptr, struct memory_view, entry );
1167 generate_debug_event( first_thread, DbgCreateProcessStateChange, view );
1169 /* generate ntdll.dll load event */
1170 while (ptr && (ptr = list_next( &process->views, ptr )))
1172 view = LIST_ENTRY( ptr, struct memory_view, entry );
1173 if (generate_dll_event( first_thread, DbgLoadDllStateChange, view )) break;
1176 /* generate creation events */
1177 LIST_FOR_EACH_ENTRY( thread, &process->thread_list, struct thread, proc_entry )
1179 if (thread != first_thread)
1180 generate_debug_event( thread, DbgCreateThreadStateChange, NULL );
1183 /* generate dll events (in loading order) */
1184 while (ptr && (ptr = list_next( &process->views, ptr )))
1186 view = LIST_ENTRY( ptr, struct memory_view, entry );
1187 generate_dll_event( first_thread, DbgLoadDllStateChange, view );
1191 static void mapping_dump( struct object *obj, int verbose )
1193 struct mapping *mapping = (struct mapping *)obj;
1194 assert( obj->ops == &mapping_ops );
1195 fprintf( stderr, "Mapping size=%08x%08x flags=%08x fd=%p shared=%p\n",
1196 (unsigned int)(mapping->size >> 32), (unsigned int)mapping->size,
1197 mapping->flags, mapping->fd, mapping->shared );
1200 static struct fd *mapping_get_fd( struct object *obj )
1202 struct mapping *mapping = (struct mapping *)obj;
1203 return (struct fd *)grab_object( mapping->fd );
1206 static void mapping_destroy( struct object *obj )
1208 struct mapping *mapping = (struct mapping *)obj;
1209 assert( obj->ops == &mapping_ops );
1210 if (mapping->fd) release_object( mapping->fd );
1211 if (mapping->committed) release_object( mapping->committed );
1212 if (mapping->shared) release_object( mapping->shared );
1215 static enum server_fd_type mapping_get_fd_type( struct fd *fd )
1217 return FD_TYPE_FILE;
1220 /* assign a mapping address to a PE image mapping */
1221 static client_ptr_t assign_map_address( struct mapping *mapping )
1223 unsigned int i;
1224 client_ptr_t ret;
1225 struct addr_range *range = (mapping->image.base >> 32) ? &ranges64 : &ranges32;
1226 mem_size_t size = (mapping->size + granularity_mask) & ~granularity_mask;
1228 if (!(mapping->image.image_charact & IMAGE_FILE_DLL)) return 0;
1230 if ((ret = get_fd_map_address( mapping->fd ))) return ret;
1232 size += granularity_mask + 1; /* leave some free space between mappings */
1234 for (i = 0; i < range->count; i++)
1236 if (range->free[i].size < size) continue;
1237 range->free[i].size -= size;
1238 ret = range->free[i].base + range->free[i].size;
1239 set_fd_map_address( mapping->fd, ret, size );
1240 return ret;
1242 return 0;
1245 /* free a PE mapping address range when the last mapping is closed */
1246 void free_map_addr( client_ptr_t base, mem_size_t size )
1248 unsigned int i;
1249 client_ptr_t end = base + size;
1250 struct addr_range *range = (base >> 32) ? &ranges64 : &ranges32;
1252 for (i = 0; i < range->count; i++)
1254 if (range->free[i].base > end) continue;
1255 if (range->free[i].base + range->free[i].size < base) break;
1256 if (range->free[i].base == end)
1258 if (i + 1 < range->count && range->free[i + 1].base + range->free[i + 1].size == base)
1260 size += range->free[i].size;
1261 range->count--;
1262 memmove( &range->free[i], &range->free[i + 1], (range->count - i) * sizeof(*range->free) );
1264 else range->free[i].base = base;
1266 range->free[i].size += size;
1267 return;
1270 if (range->count == range->size)
1272 unsigned int new_size = max( 256, range->size * 2 );
1273 void *new_free = realloc( range->free, new_size * sizeof(*range->free) );
1274 if (!new_free) return;
1275 range->size = new_size;
1276 range->free = new_free;
1278 memmove( &range->free[i + 1], &range->free[i], (range->count - i) * sizeof(*range->free) );
1279 range->free[i].base = base;
1280 range->free[i].size = size;
1281 range->count++;
1284 int get_page_size(void)
1286 return page_mask + 1;
1289 struct mapping *create_session_mapping( struct object *root, const struct unicode_str *name,
1290 unsigned int attr, const struct security_descriptor *sd )
1292 static const unsigned int access = FILE_READ_DATA | FILE_WRITE_DATA;
1293 mem_size_t size = max( sizeof(shared_object_t) * 512, 0x10000 );
1295 return create_mapping( root, name, attr, size, SEC_COMMIT, 0, access, sd );
1298 void set_session_mapping( struct mapping *mapping )
1300 int unix_fd = get_unix_fd( mapping->fd );
1301 mem_size_t size = mapping->size;
1302 struct session_block *block;
1303 void *tmp;
1305 if (!(block = mem_alloc( sizeof(*block) ))) return;
1306 if ((tmp = mmap( NULL, size, PROT_READ | PROT_WRITE, MAP_SHARED, unix_fd, 0 )) == MAP_FAILED)
1308 free( block );
1309 return;
1312 block->data = tmp;
1313 block->offset = 0;
1314 block->used_size = 0;
1315 block->block_size = size;
1317 session_mapping = mapping;
1318 list_add_tail( &session.blocks, &block->entry );
1321 static struct session_block *grow_session_mapping( mem_size_t needed )
1323 mem_size_t old_size = session_mapping->size, new_size;
1324 struct session_block *block;
1325 int unix_fd;
1326 void *tmp;
1328 new_size = max( old_size * 3 / 2, old_size + max( needed, 0x10000 ) );
1329 new_size = (new_size + page_mask) & ~((mem_size_t)page_mask);
1330 assert( new_size > old_size );
1332 unix_fd = get_unix_fd( session_mapping->fd );
1333 if (!grow_file( unix_fd, new_size )) return NULL;
1335 if (!(block = mem_alloc( sizeof(*block) ))) return NULL;
1336 if ((tmp = mmap( NULL, new_size - old_size, PROT_READ | PROT_WRITE, MAP_SHARED, unix_fd, old_size )) == MAP_FAILED)
1338 file_set_error();
1339 free( block );
1340 return NULL;
1343 block->data = tmp;
1344 block->offset = old_size;
1345 block->used_size = 0;
1346 block->block_size = new_size - old_size;
1348 session_mapping->size = new_size;
1349 list_add_tail( &session.blocks, &block->entry );
1351 return block;
1354 static struct session_block *find_free_session_block( mem_size_t size )
1356 struct session_block *block;
1358 LIST_FOR_EACH_ENTRY( block, &session.blocks, struct session_block, entry )
1359 if (size < block->block_size && block->used_size < block->block_size - size) return block;
1361 return grow_session_mapping( size );
1364 const volatile void *alloc_shared_object(void)
1366 struct session_object *object;
1367 struct list *ptr;
1369 if ((ptr = list_head( &session.free_objects )))
1371 object = CONTAINING_RECORD( ptr, struct session_object, entry );
1372 list_remove( &object->entry );
1374 else
1376 mem_size_t size = sizeof(*object);
1377 struct session_block *block;
1379 if (!(block = find_free_session_block( size ))) return NULL;
1380 object = (struct session_object *)(block->data + block->used_size);
1381 object->offset = (char *)&object->obj - block->data;
1382 block->used_size += size;
1385 SHARED_WRITE_BEGIN( &object->obj.shm, object_shm_t )
1387 /* mark the object data as uninitialized */
1388 mark_block_uninitialized( (void *)shared, sizeof(*shared) );
1389 CONTAINING_RECORD( shared, shared_object_t, shm )->id = ++session.last_object_id;
1391 SHARED_WRITE_END;
1393 return &object->obj.shm;
1396 void free_shared_object( const volatile void *object_shm )
1398 struct session_object *object = CONTAINING_RECORD( object_shm, struct session_object, obj.shm );
1400 SHARED_WRITE_BEGIN( &object->obj.shm, object_shm_t )
1402 mark_block_noaccess( (void *)shared, sizeof(*shared) );
1403 CONTAINING_RECORD( shared, shared_object_t, shm )->id = 0;
1405 SHARED_WRITE_END;
1407 list_add_tail( &session.free_objects, &object->entry );
1410 obj_locator_t get_shared_object_locator( const volatile void *object_shm )
1412 struct session_object *object = CONTAINING_RECORD( object_shm, struct session_object, obj.shm );
1413 obj_locator_t locator = {.offset = object->offset, .id = object->obj.id};
1414 return locator;
1417 struct object *create_user_data_mapping( struct object *root, const struct unicode_str *name,
1418 unsigned int attr, const struct security_descriptor *sd )
1420 void *ptr;
1421 struct mapping *mapping;
1423 if (!(mapping = create_mapping( root, name, attr, sizeof(KSHARED_USER_DATA),
1424 SEC_COMMIT, 0, FILE_READ_DATA | FILE_WRITE_DATA, sd ))) return NULL;
1425 ptr = mmap( NULL, mapping->size, PROT_WRITE, MAP_SHARED, get_unix_fd( mapping->fd ), 0 );
1426 if (ptr != MAP_FAILED)
1428 user_shared_data = ptr;
1429 user_shared_data->SystemCall = 1;
1431 return &mapping->obj;
1434 /* create a file mapping */
1435 DECL_HANDLER(create_mapping)
1437 struct object *root;
1438 struct mapping *mapping;
1439 struct unicode_str name;
1440 const struct security_descriptor *sd;
1441 const struct object_attributes *objattr = get_req_object_attributes( &sd, &name, &root );
1443 if (!objattr) return;
1445 if ((mapping = create_mapping( root, &name, objattr->attributes, req->size, req->flags,
1446 req->file_handle, req->file_access, sd )))
1448 if (get_error() == STATUS_OBJECT_NAME_EXISTS)
1449 reply->handle = alloc_handle( current->process, &mapping->obj, req->access, objattr->attributes );
1450 else
1451 reply->handle = alloc_handle_no_access_check( current->process, &mapping->obj,
1452 req->access, objattr->attributes );
1453 release_object( mapping );
1456 if (root) release_object( root );
1459 /* open a handle to a mapping */
1460 DECL_HANDLER(open_mapping)
1462 struct unicode_str name = get_req_unicode_str();
1464 reply->handle = open_object( current->process, req->rootdir, req->access,
1465 &mapping_ops, &name, req->attributes );
1468 /* get a mapping information */
1469 DECL_HANDLER(get_mapping_info)
1471 struct mapping *mapping;
1473 if (!(mapping = get_mapping_obj( current->process, req->handle, req->access ))) return;
1475 reply->size = mapping->size;
1476 reply->flags = mapping->flags;
1478 if (mapping->flags & SEC_IMAGE)
1480 struct unicode_str name = { NULL, 0 };
1481 data_size_t size;
1482 void *data;
1484 if (mapping->fd) get_nt_name( mapping->fd, &name );
1485 size = min( sizeof(pe_image_info_t) + name.len, get_reply_max_size() );
1486 if ((data = set_reply_data_size( size )))
1488 memcpy( data, &mapping->image, min( sizeof(pe_image_info_t), size ));
1489 if (size > sizeof(pe_image_info_t))
1490 memcpy( (pe_image_info_t *)data + 1, name.str, size - sizeof(pe_image_info_t) );
1492 reply->total = sizeof(pe_image_info_t) + name.len;
1495 if (!(req->access & (SECTION_MAP_READ | SECTION_MAP_WRITE))) /* query only */
1497 release_object( mapping );
1498 return;
1501 if (mapping->shared)
1502 reply->shared_file = alloc_handle( current->process, mapping->shared->file,
1503 GENERIC_READ|GENERIC_WRITE, 0 );
1504 release_object( mapping );
1507 /* get the address to use to map an image mapping */
1508 DECL_HANDLER(get_image_map_address)
1510 struct mapping *mapping;
1512 if (!(mapping = get_mapping_obj( current->process, req->handle, SECTION_MAP_READ ))) return;
1514 if ((mapping->flags & SEC_IMAGE) &&
1515 (mapping->image.image_flags & IMAGE_FLAGS_ImageDynamicallyRelocated))
1517 if (!mapping->image.map_addr) mapping->image.map_addr = assign_map_address( mapping );
1518 reply->addr = mapping->image.map_addr;
1520 else set_error( STATUS_INVALID_PARAMETER );
1522 release_object( mapping );
1525 /* add a memory view in the current process */
1526 DECL_HANDLER(map_view)
1528 struct mapping *mapping;
1529 struct memory_view *view;
1531 if (!is_valid_view_addr( current->process, req->base, req->size ))
1533 set_error( STATUS_INVALID_PARAMETER );
1534 return;
1537 if (!(mapping = get_mapping_obj( current->process, req->mapping, req->access ))) return;
1539 if ((mapping->flags & SEC_IMAGE) ||
1540 req->start >= mapping->size ||
1541 req->start + req->size < req->start ||
1542 req->start + req->size > ((mapping->size + page_mask) & ~(mem_size_t)page_mask))
1544 set_error( STATUS_INVALID_PARAMETER );
1545 goto done;
1548 if ((view = mem_alloc( sizeof(*view) )))
1550 view->base = req->base;
1551 view->size = req->size;
1552 view->start = req->start;
1553 view->flags = mapping->flags;
1554 view->namelen = 0;
1555 view->fd = !is_fd_removable( mapping->fd ) ? (struct fd *)grab_object( mapping->fd ) : NULL;
1556 view->committed = mapping->committed ? (struct ranges *)grab_object( mapping->committed ) : NULL;
1557 view->shared = NULL;
1558 add_process_view( current, view );
1561 done:
1562 release_object( mapping );
1565 /* add a memory view for an image mapping in the current process */
1566 DECL_HANDLER(map_image_view)
1568 struct mapping *mapping;
1569 struct memory_view *view;
1571 if (!is_valid_view_addr( current->process, req->base, req->size ))
1573 set_error( STATUS_INVALID_PARAMETER );
1574 return;
1577 if (!(mapping = get_mapping_obj( current->process, req->mapping, SECTION_MAP_READ ))) return;
1579 if (!(mapping->flags & SEC_IMAGE) || req->size > mapping->image.map_size)
1581 set_error( STATUS_INVALID_PARAMETER );
1582 goto done;
1585 if ((view = mem_alloc( sizeof(*view) )))
1587 view->base = req->base;
1588 view->size = req->size;
1589 view->flags = mapping->flags;
1590 view->start = 0;
1591 view->namelen = 0;
1592 view->fd = !is_fd_removable( mapping->fd ) ? (struct fd *)grab_object( mapping->fd ) : NULL;
1593 view->committed = NULL;
1594 view->shared = mapping->shared ? (struct shared_map *)grab_object( mapping->shared ) : NULL;
1595 view->image = mapping->image;
1596 if (add_process_view( current, view ))
1598 current->entry_point = view->base + req->entry;
1599 current->process->machine = (view->image.image_flags & IMAGE_FLAGS_ComPlusNativeReady) ?
1600 native_machine : req->machine;
1603 if (view->base != (mapping->image.map_addr ? mapping->image.map_addr : mapping->image.base))
1604 set_error( STATUS_IMAGE_NOT_AT_BASE );
1605 if (req->machine != current->process->machine)
1607 /* on 32-bit, the native 64-bit machine is allowed */
1608 if (is_machine_64bit( current->process->machine ) || req->machine != native_machine)
1609 set_error( STATUS_IMAGE_MACHINE_TYPE_MISMATCH );
1613 done:
1614 release_object( mapping );
1617 /* add a memory view for a builtin dll in the current process */
1618 DECL_HANDLER(map_builtin_view)
1620 struct memory_view *view;
1621 const pe_image_info_t *image = get_req_data();
1622 data_size_t namelen = get_req_data_size() - sizeof(*image);
1624 if (get_req_data_size() < sizeof(*image) ||
1625 (namelen & (sizeof(WCHAR) - 1)) ||
1626 !is_valid_view_addr( current->process, image->base, image->map_size ))
1628 set_error( STATUS_INVALID_PARAMETER );
1629 return;
1632 if ((view = mem_alloc( sizeof(struct memory_view) + namelen )))
1634 memset( view, 0, sizeof(*view) );
1635 view->base = image->base;
1636 view->size = image->map_size;
1637 view->flags = SEC_IMAGE;
1638 view->image = *image;
1639 view->namelen = namelen;
1640 memcpy( view->name, image + 1, namelen );
1641 if (add_process_view( current, view ))
1643 current->entry_point = view->base + image->entry_point;
1644 current->process->machine = image->machine;
1649 /* unmap a memory view from the current process */
1650 DECL_HANDLER(unmap_view)
1652 struct memory_view *view = find_mapped_view( current->process, req->base );
1654 if (!view) return;
1655 generate_dll_event( current, DbgUnloadDllStateChange, view );
1656 free_memory_view( view );
1659 /* get information about a mapped image view */
1660 DECL_HANDLER(get_image_view_info)
1662 struct process *process;
1663 struct memory_view *view;
1665 if (!(process = get_process_from_handle( req->process, PROCESS_QUERY_INFORMATION ))) return;
1667 if ((view = find_mapped_addr( process, req->addr )) && (view->flags & SEC_IMAGE))
1669 reply->base = view->base;
1670 reply->size = view->size;
1673 release_object( process );
1676 /* get a range of committed pages in a file mapping */
1677 DECL_HANDLER(get_mapping_committed_range)
1679 struct memory_view *view = find_mapped_view( current->process, req->base );
1681 if (view) reply->committed = find_committed_range( view, req->offset, &reply->size );
1684 /* add a range to the committed pages in a file mapping */
1685 DECL_HANDLER(add_mapping_committed_range)
1687 struct memory_view *view = find_mapped_view( current->process, req->base );
1689 if (view) add_committed_range( view, req->offset, req->offset + req->size );
1692 /* check if two memory maps are for the same file */
1693 DECL_HANDLER(is_same_mapping)
1695 struct memory_view *view1 = find_mapped_view( current->process, req->base1 );
1696 struct memory_view *view2 = find_mapped_view( current->process, req->base2 );
1698 if (!view1 || !view2) return;
1699 if (!view1->fd || !view2->fd || !(view1->flags & SEC_IMAGE) || !is_same_file_fd( view1->fd, view2->fd ))
1700 set_error( STATUS_NOT_SAME_DEVICE );
1703 /* get the filename of a mapping */
1704 DECL_HANDLER(get_mapping_filename)
1706 struct process *process;
1707 struct memory_view *view;
1708 struct unicode_str name;
1710 if (!(process = get_process_from_handle( req->process, PROCESS_QUERY_INFORMATION ))) return;
1712 if ((view = find_mapped_addr( process, req->addr )) && get_view_nt_name( view, &name ))
1714 reply->len = name.len;
1715 if (name.len > get_reply_max_size()) set_error( STATUS_BUFFER_OVERFLOW );
1716 else if (!name.len) set_error( STATUS_FILE_INVALID );
1717 else set_reply_data( name.str, name.len );
1719 else set_error( STATUS_INVALID_ADDRESS );
1721 release_object( process );