ws2_32: Reimplement inet_pton on top of ntdll functions.
[wine.git] / server / mapping.c
blobffd20450df5130e948fe606ca5af7f7a05b37a87
1 /*
2 * Server-side file mapping management
4 * Copyright (C) 1999 Alexandre Julliard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2.1 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA
21 #include "config.h"
22 #include "wine/port.h"
24 #include <assert.h>
25 #include <stdarg.h>
26 #include <stdio.h>
27 #include <stdlib.h>
28 #include <sys/stat.h>
29 #ifdef HAVE_SYS_MMAN_H
30 # include <sys/mman.h>
31 #endif
32 #include <unistd.h>
34 #include "ntstatus.h"
35 #define WIN32_NO_STATUS
36 #include "windef.h"
37 #include "winternl.h"
38 #include "ddk/wdm.h"
40 #include "file.h"
41 #include "handle.h"
42 #include "thread.h"
43 #include "process.h"
44 #include "request.h"
45 #include "security.h"
47 /* list of memory ranges, used to store committed info */
48 struct ranges
50 struct object obj; /* object header */
51 unsigned int count; /* number of used ranges */
52 unsigned int max; /* number of allocated ranges */
53 struct range
55 file_pos_t start;
56 file_pos_t end;
57 } *ranges;
60 static void ranges_dump( struct object *obj, int verbose );
61 static void ranges_destroy( struct object *obj );
63 static const struct object_ops ranges_ops =
65 sizeof(struct ranges), /* size */
66 ranges_dump, /* dump */
67 no_get_type, /* get_type */
68 no_add_queue, /* add_queue */
69 NULL, /* remove_queue */
70 NULL, /* signaled */
71 NULL, /* satisfied */
72 no_signal, /* signal */
73 no_get_fd, /* get_fd */
74 no_map_access, /* map_access */
75 default_get_sd, /* get_sd */
76 default_set_sd, /* set_sd */
77 no_lookup_name, /* lookup_name */
78 no_link_name, /* link_name */
79 NULL, /* unlink_name */
80 no_open_file, /* open_file */
81 no_kernel_obj_list, /* get_kernel_obj_list */
82 no_close_handle, /* close_handle */
83 ranges_destroy /* destroy */
86 /* file backing the shared sections of a PE image mapping */
87 struct shared_map
89 struct object obj; /* object header */
90 struct fd *fd; /* file descriptor of the mapped PE file */
91 struct file *file; /* temp file holding the shared data */
92 struct list entry; /* entry in global shared maps list */
95 static void shared_map_dump( struct object *obj, int verbose );
96 static void shared_map_destroy( struct object *obj );
98 static const struct object_ops shared_map_ops =
100 sizeof(struct shared_map), /* size */
101 shared_map_dump, /* dump */
102 no_get_type, /* get_type */
103 no_add_queue, /* add_queue */
104 NULL, /* remove_queue */
105 NULL, /* signaled */
106 NULL, /* satisfied */
107 no_signal, /* signal */
108 no_get_fd, /* get_fd */
109 no_map_access, /* map_access */
110 default_get_sd, /* get_sd */
111 default_set_sd, /* set_sd */
112 no_lookup_name, /* lookup_name */
113 no_link_name, /* link_name */
114 NULL, /* unlink_name */
115 no_open_file, /* open_file */
116 no_kernel_obj_list, /* get_kernel_obj_list */
117 no_close_handle, /* close_handle */
118 shared_map_destroy /* destroy */
121 static struct list shared_map_list = LIST_INIT( shared_map_list );
123 /* memory view mapped in client address space */
124 struct memory_view
126 struct list entry; /* entry in per-process view list */
127 struct fd *fd; /* fd for mapped file */
128 struct ranges *committed; /* list of committed ranges in this mapping */
129 struct shared_map *shared; /* temp file for shared PE mapping */
130 unsigned int flags; /* SEC_* flags */
131 client_ptr_t base; /* view base address (in process addr space) */
132 mem_size_t size; /* view size */
133 file_pos_t start; /* start offset in mapping */
136 struct mapping
138 struct object obj; /* object header */
139 mem_size_t size; /* mapping size */
140 unsigned int flags; /* SEC_* flags */
141 struct fd *fd; /* fd for mapped file */
142 pe_image_info_t image; /* image info (for PE image mapping) */
143 struct ranges *committed; /* list of committed ranges in this mapping */
144 struct shared_map *shared; /* temp file for shared PE mapping */
147 static void mapping_dump( struct object *obj, int verbose );
148 static struct object_type *mapping_get_type( struct object *obj );
149 static struct fd *mapping_get_fd( struct object *obj );
150 static unsigned int mapping_map_access( struct object *obj, unsigned int access );
151 static void mapping_destroy( struct object *obj );
152 static enum server_fd_type mapping_get_fd_type( struct fd *fd );
154 static const struct object_ops mapping_ops =
156 sizeof(struct mapping), /* size */
157 mapping_dump, /* dump */
158 mapping_get_type, /* get_type */
159 no_add_queue, /* add_queue */
160 NULL, /* remove_queue */
161 NULL, /* signaled */
162 NULL, /* satisfied */
163 no_signal, /* signal */
164 mapping_get_fd, /* get_fd */
165 mapping_map_access, /* map_access */
166 default_get_sd, /* get_sd */
167 default_set_sd, /* set_sd */
168 no_lookup_name, /* lookup_name */
169 directory_link_name, /* link_name */
170 default_unlink_name, /* unlink_name */
171 no_open_file, /* open_file */
172 no_kernel_obj_list, /* get_kernel_obj_list */
173 fd_close_handle, /* close_handle */
174 mapping_destroy /* destroy */
177 static const struct fd_ops mapping_fd_ops =
179 default_fd_get_poll_events, /* get_poll_events */
180 default_poll_event, /* poll_event */
181 mapping_get_fd_type, /* get_fd_type */
182 no_fd_read, /* read */
183 no_fd_write, /* write */
184 no_fd_flush, /* flush */
185 no_fd_get_file_info, /* get_file_info */
186 no_fd_get_volume_info, /* get_volume_info */
187 no_fd_ioctl, /* ioctl */
188 no_fd_queue_async, /* queue_async */
189 default_fd_reselect_async /* reselect_async */
192 static size_t page_mask;
194 #define ROUND_SIZE(size) (((size) + page_mask) & ~page_mask)
197 static void ranges_dump( struct object *obj, int verbose )
199 struct ranges *ranges = (struct ranges *)obj;
200 fprintf( stderr, "Memory ranges count=%u\n", ranges->count );
203 static void ranges_destroy( struct object *obj )
205 struct ranges *ranges = (struct ranges *)obj;
206 free( ranges->ranges );
209 static void shared_map_dump( struct object *obj, int verbose )
211 struct shared_map *shared = (struct shared_map *)obj;
212 fprintf( stderr, "Shared mapping fd=%p file=%p\n", shared->fd, shared->file );
215 static void shared_map_destroy( struct object *obj )
217 struct shared_map *shared = (struct shared_map *)obj;
219 release_object( shared->fd );
220 release_object( shared->file );
221 list_remove( &shared->entry );
224 /* extend a file beyond the current end of file */
225 static int grow_file( int unix_fd, file_pos_t new_size )
227 static const char zero;
228 off_t size = new_size;
230 if (sizeof(new_size) > sizeof(size) && size != new_size)
232 set_error( STATUS_INVALID_PARAMETER );
233 return 0;
235 /* extend the file one byte beyond the requested size and then truncate it */
236 /* this should work around ftruncate implementations that can't extend files */
237 if (pwrite( unix_fd, &zero, 1, size ) != -1)
239 ftruncate( unix_fd, size );
240 return 1;
242 file_set_error();
243 return 0;
246 /* check if the current directory allows exec mappings */
247 static int check_current_dir_for_exec(void)
249 int fd;
250 char tmpfn[] = "anonmap.XXXXXX";
251 void *ret = MAP_FAILED;
253 fd = mkstemps( tmpfn, 0 );
254 if (fd == -1) return 0;
255 if (grow_file( fd, 1 ))
257 ret = mmap( NULL, get_page_size(), PROT_READ | PROT_EXEC, MAP_PRIVATE, fd, 0 );
258 if (ret != MAP_FAILED) munmap( ret, get_page_size() );
260 close( fd );
261 unlink( tmpfn );
262 return (ret != MAP_FAILED);
265 /* create a temp file for anonymous mappings */
266 static int create_temp_file( file_pos_t size )
268 static int temp_dir_fd = -1;
269 char tmpfn[] = "anonmap.XXXXXX";
270 int fd;
272 if (temp_dir_fd == -1)
274 temp_dir_fd = server_dir_fd;
275 if (!check_current_dir_for_exec())
277 /* the server dir is noexec, try the config dir instead */
278 fchdir( config_dir_fd );
279 if (check_current_dir_for_exec())
280 temp_dir_fd = config_dir_fd;
281 else /* neither works, fall back to server dir */
282 fchdir( server_dir_fd );
285 else if (temp_dir_fd != server_dir_fd) fchdir( temp_dir_fd );
287 fd = mkstemps( tmpfn, 0 );
288 if (fd != -1)
290 if (!grow_file( fd, size ))
292 close( fd );
293 fd = -1;
295 unlink( tmpfn );
297 else file_set_error();
299 if (temp_dir_fd != server_dir_fd) fchdir( server_dir_fd );
300 return fd;
303 /* find a memory view from its base address */
304 static struct memory_view *find_mapped_view( struct process *process, client_ptr_t base )
306 struct memory_view *view;
308 LIST_FOR_EACH_ENTRY( view, &process->views, struct memory_view, entry )
309 if (view->base == base) return view;
311 set_error( STATUS_NOT_MAPPED_VIEW );
312 return NULL;
315 static void free_memory_view( struct memory_view *view )
317 if (view->fd) release_object( view->fd );
318 if (view->committed) release_object( view->committed );
319 if (view->shared) release_object( view->shared );
320 list_remove( &view->entry );
321 free( view );
324 /* free all mapped views at process exit */
325 void free_mapped_views( struct process *process )
327 struct list *ptr;
329 while ((ptr = list_head( &process->views )))
330 free_memory_view( LIST_ENTRY( ptr, struct memory_view, entry ));
333 /* find the shared PE mapping for a given mapping */
334 static struct shared_map *get_shared_file( struct fd *fd )
336 struct shared_map *ptr;
338 LIST_FOR_EACH_ENTRY( ptr, &shared_map_list, struct shared_map, entry )
339 if (is_same_file_fd( ptr->fd, fd ))
340 return (struct shared_map *)grab_object( ptr );
341 return NULL;
344 /* return the size of the memory mapping and file range of a given section */
345 static inline void get_section_sizes( const IMAGE_SECTION_HEADER *sec, size_t *map_size,
346 off_t *file_start, size_t *file_size )
348 static const unsigned int sector_align = 0x1ff;
350 if (!sec->Misc.VirtualSize) *map_size = ROUND_SIZE( sec->SizeOfRawData );
351 else *map_size = ROUND_SIZE( sec->Misc.VirtualSize );
353 *file_start = sec->PointerToRawData & ~sector_align;
354 *file_size = (sec->SizeOfRawData + (sec->PointerToRawData & sector_align) + sector_align) & ~sector_align;
355 if (*file_size > *map_size) *file_size = *map_size;
358 /* add a range to the committed list */
359 static void add_committed_range( struct memory_view *view, file_pos_t start, file_pos_t end )
361 unsigned int i, j;
362 struct ranges *committed = view->committed;
363 struct range *ranges;
365 if ((start & page_mask) || (end & page_mask) ||
366 start >= view->size || end >= view->size ||
367 start >= end)
369 set_error( STATUS_INVALID_PARAMETER );
370 return;
373 if (!committed) return; /* everything committed already */
375 start += view->start;
376 end += view->start;
378 for (i = 0, ranges = committed->ranges; i < committed->count; i++)
380 if (ranges[i].start > end) break;
381 if (ranges[i].end < start) continue;
382 if (ranges[i].start > start) ranges[i].start = start; /* extend downwards */
383 if (ranges[i].end < end) /* extend upwards and maybe merge with next */
385 for (j = i + 1; j < committed->count; j++)
387 if (ranges[j].start > end) break;
388 if (ranges[j].end > end) end = ranges[j].end;
390 if (j > i + 1)
392 memmove( &ranges[i + 1], &ranges[j], (committed->count - j) * sizeof(*ranges) );
393 committed->count -= j - (i + 1);
395 ranges[i].end = end;
397 return;
400 /* now add a new range */
402 if (committed->count == committed->max)
404 unsigned int new_size = committed->max * 2;
405 struct range *new_ptr = realloc( committed->ranges, new_size * sizeof(*new_ptr) );
406 if (!new_ptr) return;
407 committed->max = new_size;
408 ranges = committed->ranges = new_ptr;
410 memmove( &ranges[i + 1], &ranges[i], (committed->count - i) * sizeof(*ranges) );
411 ranges[i].start = start;
412 ranges[i].end = end;
413 committed->count++;
416 /* find the range containing start and return whether it's committed */
417 static int find_committed_range( struct memory_view *view, file_pos_t start, mem_size_t *size )
419 unsigned int i;
420 struct ranges *committed = view->committed;
421 struct range *ranges;
423 if ((start & page_mask) || start >= view->size)
425 set_error( STATUS_INVALID_PARAMETER );
426 return 0;
428 if (!committed) /* everything is committed */
430 *size = view->size - start;
431 return 1;
433 for (i = 0, ranges = committed->ranges; i < committed->count; i++)
435 if (ranges[i].start > view->start + start)
437 *size = min( ranges[i].start, view->start + view->size ) - (view->start + start);
438 return 0;
440 if (ranges[i].end > view->start + start)
442 *size = min( ranges[i].end, view->start + view->size ) - (view->start + start);
443 return 1;
446 *size = view->size - start;
447 return 0;
450 /* allocate and fill the temp file for a shared PE image mapping */
451 static int build_shared_mapping( struct mapping *mapping, int fd,
452 IMAGE_SECTION_HEADER *sec, unsigned int nb_sec )
454 struct shared_map *shared;
455 struct file *file;
456 unsigned int i;
457 mem_size_t total_size;
458 size_t file_size, map_size, max_size;
459 off_t shared_pos, read_pos, write_pos;
460 char *buffer = NULL;
461 int shared_fd;
462 long toread;
464 /* compute the total size of the shared mapping */
466 total_size = max_size = 0;
467 for (i = 0; i < nb_sec; i++)
469 if ((sec[i].Characteristics & IMAGE_SCN_MEM_SHARED) &&
470 (sec[i].Characteristics & IMAGE_SCN_MEM_WRITE))
472 get_section_sizes( &sec[i], &map_size, &read_pos, &file_size );
473 if (file_size > max_size) max_size = file_size;
474 total_size += map_size;
477 if (!total_size) return 1; /* nothing to do */
479 if ((mapping->shared = get_shared_file( mapping->fd ))) return 1;
481 /* create a temp file for the mapping */
483 if ((shared_fd = create_temp_file( total_size )) == -1) return 0;
484 if (!(file = create_file_for_fd( shared_fd, FILE_GENERIC_READ|FILE_GENERIC_WRITE, 0 ))) return 0;
486 if (!(buffer = malloc( max_size ))) goto error;
488 /* copy the shared sections data into the temp file */
490 shared_pos = 0;
491 for (i = 0; i < nb_sec; i++)
493 if (!(sec[i].Characteristics & IMAGE_SCN_MEM_SHARED)) continue;
494 if (!(sec[i].Characteristics & IMAGE_SCN_MEM_WRITE)) continue;
495 get_section_sizes( &sec[i], &map_size, &read_pos, &file_size );
496 write_pos = shared_pos;
497 shared_pos += map_size;
498 if (!sec[i].PointerToRawData || !file_size) continue;
499 toread = file_size;
500 while (toread)
502 long res = pread( fd, buffer + file_size - toread, toread, read_pos );
503 if (!res && toread < 0x200) /* partial sector at EOF is not an error */
505 file_size -= toread;
506 break;
508 if (res <= 0) goto error;
509 toread -= res;
510 read_pos += res;
512 if (pwrite( shared_fd, buffer, file_size, write_pos ) != file_size) goto error;
515 if (!(shared = alloc_object( &shared_map_ops ))) goto error;
516 shared->fd = (struct fd *)grab_object( mapping->fd );
517 shared->file = file;
518 list_add_head( &shared_map_list, &shared->entry );
519 mapping->shared = shared;
520 free( buffer );
521 return 1;
523 error:
524 release_object( file );
525 free( buffer );
526 return 0;
529 /* load the CLR header from its section */
530 static int load_clr_header( IMAGE_COR20_HEADER *hdr, size_t va, size_t size, int unix_fd,
531 IMAGE_SECTION_HEADER *sec, unsigned int nb_sec )
533 ssize_t ret;
534 size_t map_size, file_size;
535 off_t file_start;
536 unsigned int i;
538 if (!va || !size) return 0;
540 for (i = 0; i < nb_sec; i++)
542 if (va < sec[i].VirtualAddress) continue;
543 if (sec[i].Misc.VirtualSize && va - sec[i].VirtualAddress >= sec[i].Misc.VirtualSize) continue;
544 get_section_sizes( &sec[i], &map_size, &file_start, &file_size );
545 if (size >= map_size) continue;
546 if (va - sec[i].VirtualAddress >= map_size - size) continue;
547 file_size = min( file_size, map_size );
548 size = min( size, sizeof(*hdr) );
549 ret = pread( unix_fd, hdr, min( size, file_size ), file_start + va - sec[i].VirtualAddress );
550 if (ret <= 0) break;
551 if (ret < sizeof(*hdr)) memset( (char *)hdr + ret, 0, sizeof(*hdr) - ret );
552 return (hdr->MajorRuntimeVersion > COR_VERSION_MAJOR_V2 ||
553 (hdr->MajorRuntimeVersion == COR_VERSION_MAJOR_V2 &&
554 hdr->MinorRuntimeVersion >= COR_VERSION_MINOR));
556 return 0;
559 /* retrieve the mapping parameters for an executable (PE) image */
560 static unsigned int get_image_params( struct mapping *mapping, file_pos_t file_size, int unix_fd )
562 static const char builtin_signature[] = "Wine builtin DLL";
563 static const char fakedll_signature[] = "Wine placeholder DLL";
565 IMAGE_COR20_HEADER clr;
566 IMAGE_SECTION_HEADER sec[96];
567 struct
569 IMAGE_DOS_HEADER dos;
570 char buffer[32];
571 } mz;
572 struct
574 DWORD Signature;
575 IMAGE_FILE_HEADER FileHeader;
576 union
578 IMAGE_OPTIONAL_HEADER32 hdr32;
579 IMAGE_OPTIONAL_HEADER64 hdr64;
580 } opt;
581 } nt;
582 off_t pos;
583 int size, opt_size;
584 size_t mz_size, clr_va, clr_size;
585 unsigned int i, cpu_mask = get_supported_cpu_mask();
587 /* load the headers */
589 if (!file_size) return STATUS_INVALID_FILE_FOR_SECTION;
590 size = pread( unix_fd, &mz, sizeof(mz), 0 );
591 if (size < sizeof(mz.dos)) return STATUS_INVALID_IMAGE_NOT_MZ;
592 if (mz.dos.e_magic != IMAGE_DOS_SIGNATURE) return STATUS_INVALID_IMAGE_NOT_MZ;
593 mz_size = size;
594 pos = mz.dos.e_lfanew;
596 size = pread( unix_fd, &nt, sizeof(nt), pos );
597 if (size < sizeof(nt.Signature) + sizeof(nt.FileHeader)) return STATUS_INVALID_IMAGE_PROTECT;
598 /* zero out Optional header in the case it's not present or partial */
599 opt_size = max( nt.FileHeader.SizeOfOptionalHeader, offsetof( IMAGE_OPTIONAL_HEADER32, CheckSum ));
600 size = min( size, sizeof(nt.Signature) + sizeof(nt.FileHeader) + opt_size );
601 if (size < sizeof(nt)) memset( (char *)&nt + size, 0, sizeof(nt) - size );
602 if (nt.Signature != IMAGE_NT_SIGNATURE)
604 IMAGE_OS2_HEADER *os2 = (IMAGE_OS2_HEADER *)&nt;
605 if (os2->ne_magic != IMAGE_OS2_SIGNATURE) return STATUS_INVALID_IMAGE_PROTECT;
606 if (os2->ne_exetyp == 2) return STATUS_INVALID_IMAGE_WIN_16;
607 if (os2->ne_exetyp == 5) return STATUS_INVALID_IMAGE_PROTECT;
608 return STATUS_INVALID_IMAGE_NE_FORMAT;
611 switch (nt.opt.hdr32.Magic)
613 case IMAGE_NT_OPTIONAL_HDR32_MAGIC:
614 switch (nt.FileHeader.Machine)
616 case IMAGE_FILE_MACHINE_I386:
617 mapping->image.cpu = CPU_x86;
618 if (cpu_mask & (CPU_FLAG(CPU_x86) | CPU_FLAG(CPU_x86_64))) break;
619 return STATUS_INVALID_IMAGE_FORMAT;
620 case IMAGE_FILE_MACHINE_ARM:
621 case IMAGE_FILE_MACHINE_THUMB:
622 case IMAGE_FILE_MACHINE_ARMNT:
623 mapping->image.cpu = CPU_ARM;
624 if (cpu_mask & (CPU_FLAG(CPU_ARM) | CPU_FLAG(CPU_ARM64))) break;
625 return STATUS_INVALID_IMAGE_FORMAT;
626 case IMAGE_FILE_MACHINE_POWERPC:
627 mapping->image.cpu = CPU_POWERPC;
628 if (cpu_mask & CPU_FLAG(CPU_POWERPC)) break;
629 return STATUS_INVALID_IMAGE_FORMAT;
630 default:
631 return STATUS_INVALID_IMAGE_FORMAT;
633 clr_va = nt.opt.hdr32.DataDirectory[IMAGE_DIRECTORY_ENTRY_COM_DESCRIPTOR].VirtualAddress;
634 clr_size = nt.opt.hdr32.DataDirectory[IMAGE_DIRECTORY_ENTRY_COM_DESCRIPTOR].Size;
636 mapping->image.base = nt.opt.hdr32.ImageBase;
637 mapping->image.entry_point = nt.opt.hdr32.ImageBase + nt.opt.hdr32.AddressOfEntryPoint;
638 mapping->image.map_size = ROUND_SIZE( nt.opt.hdr32.SizeOfImage );
639 mapping->image.stack_size = nt.opt.hdr32.SizeOfStackReserve;
640 mapping->image.stack_commit = nt.opt.hdr32.SizeOfStackCommit;
641 mapping->image.subsystem = nt.opt.hdr32.Subsystem;
642 mapping->image.subsystem_low = nt.opt.hdr32.MinorSubsystemVersion;
643 mapping->image.subsystem_high = nt.opt.hdr32.MajorSubsystemVersion;
644 mapping->image.dll_charact = nt.opt.hdr32.DllCharacteristics;
645 mapping->image.contains_code = (nt.opt.hdr32.SizeOfCode ||
646 nt.opt.hdr32.AddressOfEntryPoint ||
647 nt.opt.hdr32.SectionAlignment & page_mask);
648 mapping->image.header_size = nt.opt.hdr32.SizeOfHeaders;
649 mapping->image.checksum = nt.opt.hdr32.CheckSum;
650 mapping->image.image_flags = 0;
651 if (nt.opt.hdr32.SectionAlignment & page_mask)
652 mapping->image.image_flags |= IMAGE_FLAGS_ImageMappedFlat;
653 if ((nt.opt.hdr32.DllCharacteristics & IMAGE_DLLCHARACTERISTICS_DYNAMIC_BASE) &&
654 mapping->image.contains_code && !(clr_va && clr_size))
655 mapping->image.image_flags |= IMAGE_FLAGS_ImageDynamicallyRelocated;
656 break;
658 case IMAGE_NT_OPTIONAL_HDR64_MAGIC:
659 if (!(cpu_mask & CPU_64BIT_MASK)) return STATUS_INVALID_IMAGE_WIN_64;
660 switch (nt.FileHeader.Machine)
662 case IMAGE_FILE_MACHINE_AMD64:
663 mapping->image.cpu = CPU_x86_64;
664 if (cpu_mask & (CPU_FLAG(CPU_x86) | CPU_FLAG(CPU_x86_64))) break;
665 return STATUS_INVALID_IMAGE_FORMAT;
666 case IMAGE_FILE_MACHINE_ARM64:
667 mapping->image.cpu = CPU_ARM64;
668 if (cpu_mask & (CPU_FLAG(CPU_ARM) | CPU_FLAG(CPU_ARM64))) break;
669 return STATUS_INVALID_IMAGE_FORMAT;
670 default:
671 return STATUS_INVALID_IMAGE_FORMAT;
673 clr_va = nt.opt.hdr64.DataDirectory[IMAGE_DIRECTORY_ENTRY_COM_DESCRIPTOR].VirtualAddress;
674 clr_size = nt.opt.hdr64.DataDirectory[IMAGE_DIRECTORY_ENTRY_COM_DESCRIPTOR].Size;
676 mapping->image.base = nt.opt.hdr64.ImageBase;
677 mapping->image.entry_point = nt.opt.hdr64.ImageBase + nt.opt.hdr64.AddressOfEntryPoint;
678 mapping->image.map_size = ROUND_SIZE( nt.opt.hdr64.SizeOfImage );
679 mapping->image.stack_size = nt.opt.hdr64.SizeOfStackReserve;
680 mapping->image.stack_commit = nt.opt.hdr64.SizeOfStackCommit;
681 mapping->image.subsystem = nt.opt.hdr64.Subsystem;
682 mapping->image.subsystem_low = nt.opt.hdr64.MinorSubsystemVersion;
683 mapping->image.subsystem_high = nt.opt.hdr64.MajorSubsystemVersion;
684 mapping->image.dll_charact = nt.opt.hdr64.DllCharacteristics;
685 mapping->image.contains_code = (nt.opt.hdr64.SizeOfCode ||
686 nt.opt.hdr64.AddressOfEntryPoint ||
687 nt.opt.hdr64.SectionAlignment & page_mask);
688 mapping->image.header_size = nt.opt.hdr64.SizeOfHeaders;
689 mapping->image.checksum = nt.opt.hdr64.CheckSum;
690 mapping->image.image_flags = 0;
691 if (nt.opt.hdr64.SectionAlignment & page_mask)
692 mapping->image.image_flags |= IMAGE_FLAGS_ImageMappedFlat;
693 if ((nt.opt.hdr64.DllCharacteristics & IMAGE_DLLCHARACTERISTICS_DYNAMIC_BASE) &&
694 mapping->image.contains_code && !(clr_va && clr_size))
695 mapping->image.image_flags |= IMAGE_FLAGS_ImageDynamicallyRelocated;
696 break;
698 default:
699 return STATUS_INVALID_IMAGE_FORMAT;
702 mapping->image.image_charact = nt.FileHeader.Characteristics;
703 mapping->image.machine = nt.FileHeader.Machine;
704 mapping->image.zerobits = 0; /* FIXME */
705 mapping->image.gp = 0; /* FIXME */
706 mapping->image.file_size = file_size;
707 mapping->image.loader_flags = clr_va && clr_size;
708 mapping->image.__pad = 0;
709 if (mz_size == sizeof(mz) && !memcmp( mz.buffer, builtin_signature, sizeof(builtin_signature) ))
710 mapping->image.image_flags |= IMAGE_FLAGS_WineBuiltin;
711 else if (mz_size == sizeof(mz) && !memcmp( mz.buffer, fakedll_signature, sizeof(fakedll_signature) ))
712 mapping->image.image_flags |= IMAGE_FLAGS_WineFakeDll;
714 /* load the section headers */
716 pos += sizeof(nt.Signature) + sizeof(nt.FileHeader) + nt.FileHeader.SizeOfOptionalHeader;
717 if (nt.FileHeader.NumberOfSections > ARRAY_SIZE( sec )) return STATUS_INVALID_IMAGE_FORMAT;
718 size = sizeof(*sec) * nt.FileHeader.NumberOfSections;
719 if (!mapping->size) mapping->size = mapping->image.map_size;
720 else if (mapping->size > mapping->image.map_size) return STATUS_SECTION_TOO_BIG;
721 if (pos + size > mapping->image.map_size) return STATUS_INVALID_FILE_FOR_SECTION;
722 if (pos + size > mapping->image.header_size) mapping->image.header_size = pos + size;
723 if (pread( unix_fd, sec, size, pos ) != size) return STATUS_INVALID_FILE_FOR_SECTION;
725 for (i = 0; i < nt.FileHeader.NumberOfSections && !mapping->image.contains_code; i++)
726 if (sec[i].Characteristics & IMAGE_SCN_MEM_EXECUTE) mapping->image.contains_code = 1;
728 if (load_clr_header( &clr, clr_va, clr_size, unix_fd, sec, nt.FileHeader.NumberOfSections ) &&
729 (clr.Flags & COMIMAGE_FLAGS_ILONLY))
731 mapping->image.image_flags |= IMAGE_FLAGS_ComPlusILOnly;
732 if (nt.opt.hdr32.Magic == IMAGE_NT_OPTIONAL_HDR32_MAGIC &&
733 !(clr.Flags & COMIMAGE_FLAGS_32BITREQUIRED))
735 mapping->image.image_flags |= IMAGE_FLAGS_ComPlusNativeReady;
736 if (cpu_mask & CPU_FLAG(CPU_x86_64)) mapping->image.cpu = CPU_x86_64;
737 else if (cpu_mask & CPU_FLAG(CPU_ARM64)) mapping->image.cpu = CPU_ARM64;
741 if (!build_shared_mapping( mapping, unix_fd, sec, nt.FileHeader.NumberOfSections ))
742 return STATUS_INVALID_FILE_FOR_SECTION;
744 return STATUS_SUCCESS;
747 static struct ranges *create_ranges(void)
749 struct ranges *ranges = alloc_object( &ranges_ops );
751 if (!ranges) return NULL;
752 ranges->count = 0;
753 ranges->max = 8;
754 if (!(ranges->ranges = mem_alloc( ranges->max * sizeof(*ranges->ranges) )))
756 release_object( ranges );
757 return NULL;
759 return ranges;
762 static unsigned int get_mapping_flags( obj_handle_t handle, unsigned int flags )
764 switch (flags & (SEC_IMAGE | SEC_RESERVE | SEC_COMMIT | SEC_FILE))
766 case SEC_IMAGE:
767 if (flags & (SEC_WRITECOMBINE | SEC_LARGE_PAGES)) break;
768 if (handle) return SEC_FILE | SEC_IMAGE;
769 set_error( STATUS_INVALID_FILE_FOR_SECTION );
770 return 0;
771 case SEC_COMMIT:
772 if (!handle) return flags;
773 /* fall through */
774 case SEC_RESERVE:
775 if (flags & SEC_LARGE_PAGES) break;
776 if (handle) return SEC_FILE | (flags & (SEC_NOCACHE | SEC_WRITECOMBINE));
777 return flags;
779 set_error( STATUS_INVALID_PARAMETER );
780 return 0;
784 static struct object *create_mapping( struct object *root, const struct unicode_str *name,
785 unsigned int attr, mem_size_t size, unsigned int flags,
786 obj_handle_t handle, unsigned int file_access,
787 const struct security_descriptor *sd )
789 struct mapping *mapping;
790 struct file *file;
791 struct fd *fd;
792 int unix_fd;
793 struct stat st;
795 if (!page_mask) page_mask = sysconf( _SC_PAGESIZE ) - 1;
797 if (!(mapping = create_named_object( root, &mapping_ops, name, attr, sd )))
798 return NULL;
799 if (get_error() == STATUS_OBJECT_NAME_EXISTS)
800 return &mapping->obj; /* Nothing else to do */
802 mapping->size = size;
803 mapping->fd = NULL;
804 mapping->shared = NULL;
805 mapping->committed = NULL;
807 if (!(mapping->flags = get_mapping_flags( handle, flags ))) goto error;
809 if (handle)
811 const unsigned int sharing = FILE_SHARE_READ | FILE_SHARE_WRITE | FILE_SHARE_DELETE;
812 unsigned int mapping_access = FILE_MAPPING_ACCESS;
814 if (!(file = get_file_obj( current->process, handle, file_access ))) goto error;
815 fd = get_obj_fd( (struct object *)file );
817 /* file sharing rules for mappings are different so we use magic the access rights */
818 if (flags & SEC_IMAGE) mapping_access |= FILE_MAPPING_IMAGE;
819 else if (file_access & FILE_WRITE_DATA) mapping_access |= FILE_MAPPING_WRITE;
821 if (!(mapping->fd = get_fd_object_for_mapping( fd, mapping_access, sharing )))
823 mapping->fd = dup_fd_object( fd, mapping_access, sharing, FILE_SYNCHRONOUS_IO_NONALERT );
824 if (mapping->fd) set_fd_user( mapping->fd, &mapping_fd_ops, NULL );
826 release_object( file );
827 release_object( fd );
828 if (!mapping->fd) goto error;
830 if ((unix_fd = get_unix_fd( mapping->fd )) == -1) goto error;
831 if (fstat( unix_fd, &st ) == -1)
833 file_set_error();
834 goto error;
836 if (flags & SEC_IMAGE)
838 unsigned int err = get_image_params( mapping, st.st_size, unix_fd );
839 if (!err) return &mapping->obj;
840 set_error( err );
841 goto error;
843 if (!mapping->size)
845 if (!(mapping->size = st.st_size))
847 set_error( STATUS_MAPPED_FILE_SIZE_ZERO );
848 goto error;
851 else if (st.st_size < mapping->size)
853 if (!(file_access & FILE_WRITE_DATA))
855 set_error( STATUS_SECTION_TOO_BIG );
856 goto error;
858 if (!grow_file( unix_fd, mapping->size )) goto error;
861 else /* Anonymous mapping (no associated file) */
863 if (!mapping->size)
865 set_error( STATUS_INVALID_PARAMETER );
866 goto error;
868 if ((flags & SEC_RESERVE) && !(mapping->committed = create_ranges())) goto error;
869 mapping->size = (mapping->size + page_mask) & ~((mem_size_t)page_mask);
870 if ((unix_fd = create_temp_file( mapping->size )) == -1) goto error;
871 if (!(mapping->fd = create_anonymous_fd( &mapping_fd_ops, unix_fd, &mapping->obj,
872 FILE_SYNCHRONOUS_IO_NONALERT ))) goto error;
873 allow_fd_caching( mapping->fd );
875 return &mapping->obj;
877 error:
878 release_object( mapping );
879 return NULL;
882 struct mapping *get_mapping_obj( struct process *process, obj_handle_t handle, unsigned int access )
884 return (struct mapping *)get_handle_obj( process, handle, access, &mapping_ops );
887 /* open a new file for the file descriptor backing the mapping */
888 struct file *get_mapping_file( struct process *process, client_ptr_t base,
889 unsigned int access, unsigned int sharing )
891 struct memory_view *view = find_mapped_view( process, base );
893 if (!view || !view->fd) return NULL;
894 return create_file_for_fd_obj( view->fd, access, sharing );
897 static void mapping_dump( struct object *obj, int verbose )
899 struct mapping *mapping = (struct mapping *)obj;
900 assert( obj->ops == &mapping_ops );
901 fprintf( stderr, "Mapping size=%08x%08x flags=%08x fd=%p shared=%p\n",
902 (unsigned int)(mapping->size >> 32), (unsigned int)mapping->size,
903 mapping->flags, mapping->fd, mapping->shared );
906 static struct object_type *mapping_get_type( struct object *obj )
908 static const WCHAR name[] = {'S','e','c','t','i','o','n'};
909 static const struct unicode_str str = { name, sizeof(name) };
910 return get_object_type( &str );
913 static struct fd *mapping_get_fd( struct object *obj )
915 struct mapping *mapping = (struct mapping *)obj;
916 return (struct fd *)grab_object( mapping->fd );
919 static unsigned int mapping_map_access( struct object *obj, unsigned int access )
921 if (access & GENERIC_READ) access |= STANDARD_RIGHTS_READ | SECTION_QUERY | SECTION_MAP_READ;
922 if (access & GENERIC_WRITE) access |= STANDARD_RIGHTS_WRITE | SECTION_MAP_WRITE;
923 if (access & GENERIC_EXECUTE) access |= STANDARD_RIGHTS_EXECUTE | SECTION_MAP_EXECUTE;
924 if (access & GENERIC_ALL) access |= SECTION_ALL_ACCESS;
925 return access & ~(GENERIC_READ | GENERIC_WRITE | GENERIC_EXECUTE | GENERIC_ALL);
928 static void mapping_destroy( struct object *obj )
930 struct mapping *mapping = (struct mapping *)obj;
931 assert( obj->ops == &mapping_ops );
932 if (mapping->fd) release_object( mapping->fd );
933 if (mapping->committed) release_object( mapping->committed );
934 if (mapping->shared) release_object( mapping->shared );
937 static enum server_fd_type mapping_get_fd_type( struct fd *fd )
939 return FD_TYPE_FILE;
942 int get_page_size(void)
944 if (!page_mask) page_mask = sysconf( _SC_PAGESIZE ) - 1;
945 return page_mask + 1;
948 static KSHARED_USER_DATA *kusd = MAP_FAILED;
949 static const timeout_t kusd_timeout = 16 * -TICKS_PER_SEC / 1000;
951 static void kusd_set_current_time( void *private )
953 ULONG system_time_high = current_time >> 32;
954 ULONG system_time_low = current_time & 0xffffffff;
955 ULONG interrupt_time_high = monotonic_time >> 32;
956 ULONG interrupt_time_low = monotonic_time & 0xffffffff;
957 ULONG tick_count_high = (monotonic_time * 1000 / TICKS_PER_SEC) >> 32;
958 ULONG tick_count_low = (monotonic_time * 1000 / TICKS_PER_SEC) & 0xffffffff;
959 KSHARED_USER_DATA *ptr = kusd;
961 add_timeout_user( kusd_timeout, kusd_set_current_time, NULL );
963 /* on X86 there should be total store order guarantees, so volatile is enough
964 * to ensure the stores aren't reordered by the compiler, and then they will
965 * always be seen in-order from other CPUs. On other archs, we need atomic
966 * intrinsics to guarantee that. */
967 #if defined(__i386__) || defined(__x86_64__)
968 ptr->SystemTime.High2Time = system_time_high;
969 ptr->SystemTime.LowPart = system_time_low;
970 ptr->SystemTime.High1Time = system_time_high;
972 ptr->InterruptTime.High2Time = interrupt_time_high;
973 ptr->InterruptTime.LowPart = interrupt_time_low;
974 ptr->InterruptTime.High1Time = interrupt_time_high;
976 ptr->TickCount.High2Time = tick_count_high;
977 ptr->TickCount.LowPart = tick_count_low;
978 ptr->TickCount.High1Time = tick_count_high;
979 *(volatile ULONG *)&ptr->TickCountLowDeprecated = tick_count_low;
980 #else
981 __atomic_store_n(&ptr->SystemTime.High2Time, system_time_high, __ATOMIC_SEQ_CST);
982 __atomic_store_n(&ptr->SystemTime.LowPart, system_time_low, __ATOMIC_SEQ_CST);
983 __atomic_store_n(&ptr->SystemTime.High1Time, system_time_high, __ATOMIC_SEQ_CST);
985 __atomic_store_n(&ptr->InterruptTime.High2Time, interrupt_time_high, __ATOMIC_SEQ_CST);
986 __atomic_store_n(&ptr->InterruptTime.LowPart, interrupt_time_low, __ATOMIC_SEQ_CST);
987 __atomic_store_n(&ptr->InterruptTime.High1Time, interrupt_time_high, __ATOMIC_SEQ_CST);
989 __atomic_store_n(&ptr->TickCount.High2Time, tick_count_high, __ATOMIC_SEQ_CST);
990 __atomic_store_n(&ptr->TickCount.LowPart, tick_count_low, __ATOMIC_SEQ_CST);
991 __atomic_store_n(&ptr->TickCount.High1Time, tick_count_high, __ATOMIC_SEQ_CST);
992 __atomic_store_n(&ptr->TickCountLowDeprecated, tick_count_low, __ATOMIC_SEQ_CST);
993 #endif
996 void init_kusd_mapping( struct mapping *mapping )
998 if (kusd != MAP_FAILED) return;
1000 grab_object( mapping );
1001 make_object_static( &mapping->obj );
1003 if ((kusd = mmap( NULL, mapping->size, PROT_WRITE, MAP_SHARED,
1004 get_unix_fd( mapping->fd ), 0 )) == MAP_FAILED)
1005 set_error( STATUS_NO_MEMORY );
1006 else
1007 kusd_set_current_time( NULL );
1010 /* create a file mapping */
1011 DECL_HANDLER(create_mapping)
1013 struct object *root, *obj;
1014 struct unicode_str name;
1015 const struct security_descriptor *sd;
1016 const struct object_attributes *objattr = get_req_object_attributes( &sd, &name, &root );
1018 if (!objattr) return;
1020 if ((obj = create_mapping( root, &name, objattr->attributes, req->size, req->flags,
1021 req->file_handle, req->file_access, sd )))
1023 if (get_error() == STATUS_OBJECT_NAME_EXISTS)
1024 reply->handle = alloc_handle( current->process, obj, req->access, objattr->attributes );
1025 else
1026 reply->handle = alloc_handle_no_access_check( current->process, obj,
1027 req->access, objattr->attributes );
1028 release_object( obj );
1031 if (root) release_object( root );
1034 /* open a handle to a mapping */
1035 DECL_HANDLER(open_mapping)
1037 struct unicode_str name = get_req_unicode_str();
1039 reply->handle = open_object( current->process, req->rootdir, req->access,
1040 &mapping_ops, &name, req->attributes );
1043 /* get a mapping information */
1044 DECL_HANDLER(get_mapping_info)
1046 struct mapping *mapping;
1048 if (!(mapping = get_mapping_obj( current->process, req->handle, req->access ))) return;
1050 reply->size = mapping->size;
1051 reply->flags = mapping->flags;
1053 if (mapping->flags & SEC_IMAGE)
1054 set_reply_data( &mapping->image, min( sizeof(mapping->image), get_reply_max_size() ));
1056 if (!(req->access & (SECTION_MAP_READ | SECTION_MAP_WRITE))) /* query only */
1058 release_object( mapping );
1059 return;
1062 if (mapping->shared)
1063 reply->shared_file = alloc_handle( current->process, mapping->shared->file,
1064 GENERIC_READ|GENERIC_WRITE, 0 );
1065 release_object( mapping );
1068 /* add a memory view in the current process */
1069 DECL_HANDLER(map_view)
1071 struct mapping *mapping = NULL;
1072 struct memory_view *view;
1074 if (!req->size || (req->base & page_mask) || req->base + req->size < req->base) /* overflow */
1076 set_error( STATUS_INVALID_PARAMETER );
1077 return;
1080 /* make sure we don't already have an overlapping view */
1081 LIST_FOR_EACH_ENTRY( view, &current->process->views, struct memory_view, entry )
1083 if (view->base + view->size <= req->base) continue;
1084 if (view->base >= req->base + req->size) continue;
1085 set_error( STATUS_INVALID_PARAMETER );
1086 return;
1089 if (!(mapping = get_mapping_obj( current->process, req->mapping, req->access ))) return;
1091 if (mapping->flags & SEC_IMAGE)
1093 if (req->start || req->size > mapping->image.map_size)
1095 set_error( STATUS_INVALID_PARAMETER );
1096 goto done;
1099 else if (req->start >= mapping->size ||
1100 req->start + req->size < req->start ||
1101 req->start + req->size > ((mapping->size + page_mask) & ~(mem_size_t)page_mask))
1103 set_error( STATUS_INVALID_PARAMETER );
1104 goto done;
1107 if ((view = mem_alloc( sizeof(*view) )))
1109 view->base = req->base;
1110 view->size = req->size;
1111 view->start = req->start;
1112 view->flags = mapping->flags;
1113 view->fd = !is_fd_removable( mapping->fd ) ? (struct fd *)grab_object( mapping->fd ) : NULL;
1114 view->committed = mapping->committed ? (struct ranges *)grab_object( mapping->committed ) : NULL;
1115 view->shared = mapping->shared ? (struct shared_map *)grab_object( mapping->shared ) : NULL;
1116 list_add_tail( &current->process->views, &view->entry );
1119 done:
1120 release_object( mapping );
1123 /* unmap a memory view from the current process */
1124 DECL_HANDLER(unmap_view)
1126 struct memory_view *view = find_mapped_view( current->process, req->base );
1128 if (view) free_memory_view( view );
1131 /* get a range of committed pages in a file mapping */
1132 DECL_HANDLER(get_mapping_committed_range)
1134 struct memory_view *view = find_mapped_view( current->process, req->base );
1136 if (view) reply->committed = find_committed_range( view, req->offset, &reply->size );
1139 /* add a range to the committed pages in a file mapping */
1140 DECL_HANDLER(add_mapping_committed_range)
1142 struct memory_view *view = find_mapped_view( current->process, req->base );
1144 if (view) add_committed_range( view, req->offset, req->offset + req->size );
1147 /* check if two memory maps are for the same file */
1148 DECL_HANDLER(is_same_mapping)
1150 struct memory_view *view1 = find_mapped_view( current->process, req->base1 );
1151 struct memory_view *view2 = find_mapped_view( current->process, req->base2 );
1153 if (!view1 || !view2) return;
1154 if (!view1->fd || !view2->fd ||
1155 !(view1->flags & SEC_IMAGE) || !(view2->flags & SEC_IMAGE) ||
1156 !is_same_file_fd( view1->fd, view2->fd ))
1157 set_error( STATUS_NOT_SAME_DEVICE );