2 * Server-side file mapping management
4 * Copyright (C) 1999 Alexandre Julliard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2.1 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA
22 #include "wine/port.h"
29 #ifdef HAVE_SYS_MMAN_H
30 # include <sys/mman.h>
35 #define WIN32_NO_STATUS
47 /* list of memory ranges, used to store committed info */
50 struct object obj
; /* object header */
51 unsigned int count
; /* number of used ranges */
52 unsigned int max
; /* number of allocated ranges */
60 static void ranges_dump( struct object
*obj
, int verbose
);
61 static void ranges_destroy( struct object
*obj
);
63 static const struct object_ops ranges_ops
=
65 sizeof(struct ranges
), /* size */
66 ranges_dump
, /* dump */
67 no_get_type
, /* get_type */
68 no_add_queue
, /* add_queue */
69 NULL
, /* remove_queue */
72 no_signal
, /* signal */
73 no_get_fd
, /* get_fd */
74 no_map_access
, /* map_access */
75 default_get_sd
, /* get_sd */
76 default_set_sd
, /* set_sd */
77 no_get_full_name
, /* get_full_name */
78 no_lookup_name
, /* lookup_name */
79 no_link_name
, /* link_name */
80 NULL
, /* unlink_name */
81 no_open_file
, /* open_file */
82 no_kernel_obj_list
, /* get_kernel_obj_list */
83 no_close_handle
, /* close_handle */
84 ranges_destroy
/* destroy */
87 /* file backing the shared sections of a PE image mapping */
90 struct object obj
; /* object header */
91 struct fd
*fd
; /* file descriptor of the mapped PE file */
92 struct file
*file
; /* temp file holding the shared data */
93 struct list entry
; /* entry in global shared maps list */
96 static void shared_map_dump( struct object
*obj
, int verbose
);
97 static void shared_map_destroy( struct object
*obj
);
99 static const struct object_ops shared_map_ops
=
101 sizeof(struct shared_map
), /* size */
102 shared_map_dump
, /* dump */
103 no_get_type
, /* get_type */
104 no_add_queue
, /* add_queue */
105 NULL
, /* remove_queue */
107 NULL
, /* satisfied */
108 no_signal
, /* signal */
109 no_get_fd
, /* get_fd */
110 no_map_access
, /* map_access */
111 default_get_sd
, /* get_sd */
112 default_set_sd
, /* set_sd */
113 no_get_full_name
, /* get_full_name */
114 no_lookup_name
, /* lookup_name */
115 no_link_name
, /* link_name */
116 NULL
, /* unlink_name */
117 no_open_file
, /* open_file */
118 no_kernel_obj_list
, /* get_kernel_obj_list */
119 no_close_handle
, /* close_handle */
120 shared_map_destroy
/* destroy */
123 static struct list shared_map_list
= LIST_INIT( shared_map_list
);
125 /* memory view mapped in client address space */
128 struct list entry
; /* entry in per-process view list */
129 struct fd
*fd
; /* fd for mapped file */
130 struct ranges
*committed
; /* list of committed ranges in this mapping */
131 struct shared_map
*shared
; /* temp file for shared PE mapping */
132 pe_image_info_t image
; /* image info (for PE image mapping) */
133 unsigned int flags
; /* SEC_* flags */
134 client_ptr_t base
; /* view base address (in process addr space) */
135 mem_size_t size
; /* view size */
136 file_pos_t start
; /* start offset in mapping */
141 struct object obj
; /* object header */
142 mem_size_t size
; /* mapping size */
143 unsigned int flags
; /* SEC_* flags */
144 struct fd
*fd
; /* fd for mapped file */
145 pe_image_info_t image
; /* image info (for PE image mapping) */
146 struct ranges
*committed
; /* list of committed ranges in this mapping */
147 struct shared_map
*shared
; /* temp file for shared PE mapping */
150 static void mapping_dump( struct object
*obj
, int verbose
);
151 static struct object_type
*mapping_get_type( struct object
*obj
);
152 static struct fd
*mapping_get_fd( struct object
*obj
);
153 static unsigned int mapping_map_access( struct object
*obj
, unsigned int access
);
154 static void mapping_destroy( struct object
*obj
);
155 static enum server_fd_type
mapping_get_fd_type( struct fd
*fd
);
157 static const struct object_ops mapping_ops
=
159 sizeof(struct mapping
), /* size */
160 mapping_dump
, /* dump */
161 mapping_get_type
, /* get_type */
162 no_add_queue
, /* add_queue */
163 NULL
, /* remove_queue */
165 NULL
, /* satisfied */
166 no_signal
, /* signal */
167 mapping_get_fd
, /* get_fd */
168 mapping_map_access
, /* map_access */
169 default_get_sd
, /* get_sd */
170 default_set_sd
, /* set_sd */
171 default_get_full_name
, /* get_full_name */
172 no_lookup_name
, /* lookup_name */
173 directory_link_name
, /* link_name */
174 default_unlink_name
, /* unlink_name */
175 no_open_file
, /* open_file */
176 no_kernel_obj_list
, /* get_kernel_obj_list */
177 fd_close_handle
, /* close_handle */
178 mapping_destroy
/* destroy */
181 static const struct fd_ops mapping_fd_ops
=
183 default_fd_get_poll_events
, /* get_poll_events */
184 default_poll_event
, /* poll_event */
185 mapping_get_fd_type
, /* get_fd_type */
186 no_fd_read
, /* read */
187 no_fd_write
, /* write */
188 no_fd_flush
, /* flush */
189 no_fd_get_file_info
, /* get_file_info */
190 no_fd_get_volume_info
, /* get_volume_info */
191 no_fd_ioctl
, /* ioctl */
192 no_fd_queue_async
, /* queue_async */
193 default_fd_reselect_async
/* reselect_async */
196 static size_t page_mask
;
198 #define ROUND_SIZE(size) (((size) + page_mask) & ~page_mask)
201 static void ranges_dump( struct object
*obj
, int verbose
)
203 struct ranges
*ranges
= (struct ranges
*)obj
;
204 fprintf( stderr
, "Memory ranges count=%u\n", ranges
->count
);
207 static void ranges_destroy( struct object
*obj
)
209 struct ranges
*ranges
= (struct ranges
*)obj
;
210 free( ranges
->ranges
);
213 static void shared_map_dump( struct object
*obj
, int verbose
)
215 struct shared_map
*shared
= (struct shared_map
*)obj
;
216 fprintf( stderr
, "Shared mapping fd=%p file=%p\n", shared
->fd
, shared
->file
);
219 static void shared_map_destroy( struct object
*obj
)
221 struct shared_map
*shared
= (struct shared_map
*)obj
;
223 release_object( shared
->fd
);
224 release_object( shared
->file
);
225 list_remove( &shared
->entry
);
228 /* extend a file beyond the current end of file */
229 static int grow_file( int unix_fd
, file_pos_t new_size
)
231 static const char zero
;
232 off_t size
= new_size
;
234 if (sizeof(new_size
) > sizeof(size
) && size
!= new_size
)
236 set_error( STATUS_INVALID_PARAMETER
);
239 /* extend the file one byte beyond the requested size and then truncate it */
240 /* this should work around ftruncate implementations that can't extend files */
241 if (pwrite( unix_fd
, &zero
, 1, size
) != -1)
243 ftruncate( unix_fd
, size
);
250 /* check if the current directory allows exec mappings */
251 static int check_current_dir_for_exec(void)
254 char tmpfn
[] = "anonmap.XXXXXX";
255 void *ret
= MAP_FAILED
;
257 fd
= mkstemps( tmpfn
, 0 );
258 if (fd
== -1) return 0;
259 if (grow_file( fd
, 1 ))
261 ret
= mmap( NULL
, get_page_size(), PROT_READ
| PROT_EXEC
, MAP_PRIVATE
, fd
, 0 );
262 if (ret
!= MAP_FAILED
) munmap( ret
, get_page_size() );
266 return (ret
!= MAP_FAILED
);
269 /* create a temp file for anonymous mappings */
270 static int create_temp_file( file_pos_t size
)
272 static int temp_dir_fd
= -1;
273 char tmpfn
[] = "anonmap.XXXXXX";
276 if (temp_dir_fd
== -1)
278 temp_dir_fd
= server_dir_fd
;
279 if (!check_current_dir_for_exec())
281 /* the server dir is noexec, try the config dir instead */
282 fchdir( config_dir_fd
);
283 if (check_current_dir_for_exec())
284 temp_dir_fd
= config_dir_fd
;
285 else /* neither works, fall back to server dir */
286 fchdir( server_dir_fd
);
289 else if (temp_dir_fd
!= server_dir_fd
) fchdir( temp_dir_fd
);
291 fd
= mkstemps( tmpfn
, 0 );
294 if (!grow_file( fd
, size
))
301 else file_set_error();
303 if (temp_dir_fd
!= server_dir_fd
) fchdir( server_dir_fd
);
307 /* find a memory view from its base address */
308 static struct memory_view
*find_mapped_view( struct process
*process
, client_ptr_t base
)
310 struct memory_view
*view
;
312 LIST_FOR_EACH_ENTRY( view
, &process
->views
, struct memory_view
, entry
)
313 if (view
->base
== base
) return view
;
315 set_error( STATUS_NOT_MAPPED_VIEW
);
319 static void free_memory_view( struct memory_view
*view
)
321 if (view
->fd
) release_object( view
->fd
);
322 if (view
->committed
) release_object( view
->committed
);
323 if (view
->shared
) release_object( view
->shared
);
324 list_remove( &view
->entry
);
328 /* free all mapped views at process exit */
329 void free_mapped_views( struct process
*process
)
333 while ((ptr
= list_head( &process
->views
)))
334 free_memory_view( LIST_ENTRY( ptr
, struct memory_view
, entry
));
337 /* find the shared PE mapping for a given mapping */
338 static struct shared_map
*get_shared_file( struct fd
*fd
)
340 struct shared_map
*ptr
;
342 LIST_FOR_EACH_ENTRY( ptr
, &shared_map_list
, struct shared_map
, entry
)
343 if (is_same_file_fd( ptr
->fd
, fd
))
344 return (struct shared_map
*)grab_object( ptr
);
348 /* return the size of the memory mapping and file range of a given section */
349 static inline void get_section_sizes( const IMAGE_SECTION_HEADER
*sec
, size_t *map_size
,
350 off_t
*file_start
, size_t *file_size
)
352 static const unsigned int sector_align
= 0x1ff;
354 if (!sec
->Misc
.VirtualSize
) *map_size
= ROUND_SIZE( sec
->SizeOfRawData
);
355 else *map_size
= ROUND_SIZE( sec
->Misc
.VirtualSize
);
357 *file_start
= sec
->PointerToRawData
& ~sector_align
;
358 *file_size
= (sec
->SizeOfRawData
+ (sec
->PointerToRawData
& sector_align
) + sector_align
) & ~sector_align
;
359 if (*file_size
> *map_size
) *file_size
= *map_size
;
362 /* add a range to the committed list */
363 static void add_committed_range( struct memory_view
*view
, file_pos_t start
, file_pos_t end
)
366 struct ranges
*committed
= view
->committed
;
367 struct range
*ranges
;
369 if ((start
& page_mask
) || (end
& page_mask
) ||
370 start
>= view
->size
|| end
>= view
->size
||
373 set_error( STATUS_INVALID_PARAMETER
);
377 if (!committed
) return; /* everything committed already */
379 start
+= view
->start
;
382 for (i
= 0, ranges
= committed
->ranges
; i
< committed
->count
; i
++)
384 if (ranges
[i
].start
> end
) break;
385 if (ranges
[i
].end
< start
) continue;
386 if (ranges
[i
].start
> start
) ranges
[i
].start
= start
; /* extend downwards */
387 if (ranges
[i
].end
< end
) /* extend upwards and maybe merge with next */
389 for (j
= i
+ 1; j
< committed
->count
; j
++)
391 if (ranges
[j
].start
> end
) break;
392 if (ranges
[j
].end
> end
) end
= ranges
[j
].end
;
396 memmove( &ranges
[i
+ 1], &ranges
[j
], (committed
->count
- j
) * sizeof(*ranges
) );
397 committed
->count
-= j
- (i
+ 1);
404 /* now add a new range */
406 if (committed
->count
== committed
->max
)
408 unsigned int new_size
= committed
->max
* 2;
409 struct range
*new_ptr
= realloc( committed
->ranges
, new_size
* sizeof(*new_ptr
) );
410 if (!new_ptr
) return;
411 committed
->max
= new_size
;
412 ranges
= committed
->ranges
= new_ptr
;
414 memmove( &ranges
[i
+ 1], &ranges
[i
], (committed
->count
- i
) * sizeof(*ranges
) );
415 ranges
[i
].start
= start
;
420 /* find the range containing start and return whether it's committed */
421 static int find_committed_range( struct memory_view
*view
, file_pos_t start
, mem_size_t
*size
)
424 struct ranges
*committed
= view
->committed
;
425 struct range
*ranges
;
427 if ((start
& page_mask
) || start
>= view
->size
)
429 set_error( STATUS_INVALID_PARAMETER
);
432 if (!committed
) /* everything is committed */
434 *size
= view
->size
- start
;
437 for (i
= 0, ranges
= committed
->ranges
; i
< committed
->count
; i
++)
439 if (ranges
[i
].start
> view
->start
+ start
)
441 *size
= min( ranges
[i
].start
, view
->start
+ view
->size
) - (view
->start
+ start
);
444 if (ranges
[i
].end
> view
->start
+ start
)
446 *size
= min( ranges
[i
].end
, view
->start
+ view
->size
) - (view
->start
+ start
);
450 *size
= view
->size
- start
;
454 /* allocate and fill the temp file for a shared PE image mapping */
455 static int build_shared_mapping( struct mapping
*mapping
, int fd
,
456 IMAGE_SECTION_HEADER
*sec
, unsigned int nb_sec
)
458 struct shared_map
*shared
;
461 mem_size_t total_size
;
462 size_t file_size
, map_size
, max_size
;
463 off_t shared_pos
, read_pos
, write_pos
;
468 /* compute the total size of the shared mapping */
470 total_size
= max_size
= 0;
471 for (i
= 0; i
< nb_sec
; i
++)
473 if ((sec
[i
].Characteristics
& IMAGE_SCN_MEM_SHARED
) &&
474 (sec
[i
].Characteristics
& IMAGE_SCN_MEM_WRITE
))
476 get_section_sizes( &sec
[i
], &map_size
, &read_pos
, &file_size
);
477 if (file_size
> max_size
) max_size
= file_size
;
478 total_size
+= map_size
;
481 if (!total_size
) return 1; /* nothing to do */
483 if ((mapping
->shared
= get_shared_file( mapping
->fd
))) return 1;
485 /* create a temp file for the mapping */
487 if ((shared_fd
= create_temp_file( total_size
)) == -1) return 0;
488 if (!(file
= create_file_for_fd( shared_fd
, FILE_GENERIC_READ
|FILE_GENERIC_WRITE
, 0 ))) return 0;
490 if (!(buffer
= malloc( max_size
))) goto error
;
492 /* copy the shared sections data into the temp file */
495 for (i
= 0; i
< nb_sec
; i
++)
497 if (!(sec
[i
].Characteristics
& IMAGE_SCN_MEM_SHARED
)) continue;
498 if (!(sec
[i
].Characteristics
& IMAGE_SCN_MEM_WRITE
)) continue;
499 get_section_sizes( &sec
[i
], &map_size
, &read_pos
, &file_size
);
500 write_pos
= shared_pos
;
501 shared_pos
+= map_size
;
502 if (!sec
[i
].PointerToRawData
|| !file_size
) continue;
506 long res
= pread( fd
, buffer
+ file_size
- toread
, toread
, read_pos
);
507 if (!res
&& toread
< 0x200) /* partial sector at EOF is not an error */
512 if (res
<= 0) goto error
;
516 if (pwrite( shared_fd
, buffer
, file_size
, write_pos
) != file_size
) goto error
;
519 if (!(shared
= alloc_object( &shared_map_ops
))) goto error
;
520 shared
->fd
= (struct fd
*)grab_object( mapping
->fd
);
522 list_add_head( &shared_map_list
, &shared
->entry
);
523 mapping
->shared
= shared
;
528 release_object( file
);
533 /* load the CLR header from its section */
534 static int load_clr_header( IMAGE_COR20_HEADER
*hdr
, size_t va
, size_t size
, int unix_fd
,
535 IMAGE_SECTION_HEADER
*sec
, unsigned int nb_sec
)
538 size_t map_size
, file_size
;
542 if (!va
|| !size
) return 0;
544 for (i
= 0; i
< nb_sec
; i
++)
546 if (va
< sec
[i
].VirtualAddress
) continue;
547 if (sec
[i
].Misc
.VirtualSize
&& va
- sec
[i
].VirtualAddress
>= sec
[i
].Misc
.VirtualSize
) continue;
548 get_section_sizes( &sec
[i
], &map_size
, &file_start
, &file_size
);
549 if (size
>= map_size
) continue;
550 if (va
- sec
[i
].VirtualAddress
>= map_size
- size
) continue;
551 file_size
= min( file_size
, map_size
);
552 size
= min( size
, sizeof(*hdr
) );
553 ret
= pread( unix_fd
, hdr
, min( size
, file_size
), file_start
+ va
- sec
[i
].VirtualAddress
);
555 if (ret
< sizeof(*hdr
)) memset( (char *)hdr
+ ret
, 0, sizeof(*hdr
) - ret
);
556 return (hdr
->MajorRuntimeVersion
> COR_VERSION_MAJOR_V2
||
557 (hdr
->MajorRuntimeVersion
== COR_VERSION_MAJOR_V2
&&
558 hdr
->MinorRuntimeVersion
>= COR_VERSION_MINOR
));
563 /* retrieve the mapping parameters for an executable (PE) image */
564 static unsigned int get_image_params( struct mapping
*mapping
, file_pos_t file_size
, int unix_fd
)
566 static const char builtin_signature
[] = "Wine builtin DLL";
567 static const char fakedll_signature
[] = "Wine placeholder DLL";
569 IMAGE_COR20_HEADER clr
;
570 IMAGE_SECTION_HEADER sec
[96];
573 IMAGE_DOS_HEADER dos
;
579 IMAGE_FILE_HEADER FileHeader
;
582 IMAGE_OPTIONAL_HEADER32 hdr32
;
583 IMAGE_OPTIONAL_HEADER64 hdr64
;
588 size_t mz_size
, clr_va
, clr_size
;
589 unsigned int i
, cpu_mask
= get_supported_cpu_mask();
591 /* load the headers */
593 if (!file_size
) return STATUS_INVALID_FILE_FOR_SECTION
;
594 size
= pread( unix_fd
, &mz
, sizeof(mz
), 0 );
595 if (size
< sizeof(mz
.dos
)) return STATUS_INVALID_IMAGE_NOT_MZ
;
596 if (mz
.dos
.e_magic
!= IMAGE_DOS_SIGNATURE
) return STATUS_INVALID_IMAGE_NOT_MZ
;
598 pos
= mz
.dos
.e_lfanew
;
600 size
= pread( unix_fd
, &nt
, sizeof(nt
), pos
);
601 if (size
< sizeof(nt
.Signature
) + sizeof(nt
.FileHeader
)) return STATUS_INVALID_IMAGE_PROTECT
;
602 /* zero out Optional header in the case it's not present or partial */
603 opt_size
= max( nt
.FileHeader
.SizeOfOptionalHeader
, offsetof( IMAGE_OPTIONAL_HEADER32
, CheckSum
));
604 size
= min( size
, sizeof(nt
.Signature
) + sizeof(nt
.FileHeader
) + opt_size
);
605 if (size
< sizeof(nt
)) memset( (char *)&nt
+ size
, 0, sizeof(nt
) - size
);
606 if (nt
.Signature
!= IMAGE_NT_SIGNATURE
)
608 IMAGE_OS2_HEADER
*os2
= (IMAGE_OS2_HEADER
*)&nt
;
609 if (os2
->ne_magic
!= IMAGE_OS2_SIGNATURE
) return STATUS_INVALID_IMAGE_PROTECT
;
610 if (os2
->ne_exetyp
== 2) return STATUS_INVALID_IMAGE_WIN_16
;
611 if (os2
->ne_exetyp
== 5) return STATUS_INVALID_IMAGE_PROTECT
;
612 return STATUS_INVALID_IMAGE_NE_FORMAT
;
615 switch (nt
.opt
.hdr32
.Magic
)
617 case IMAGE_NT_OPTIONAL_HDR32_MAGIC
:
618 switch (nt
.FileHeader
.Machine
)
620 case IMAGE_FILE_MACHINE_I386
:
621 mapping
->image
.cpu
= CPU_x86
;
622 if (cpu_mask
& (CPU_FLAG(CPU_x86
) | CPU_FLAG(CPU_x86_64
))) break;
623 return STATUS_INVALID_IMAGE_FORMAT
;
624 case IMAGE_FILE_MACHINE_ARM
:
625 case IMAGE_FILE_MACHINE_THUMB
:
626 case IMAGE_FILE_MACHINE_ARMNT
:
627 mapping
->image
.cpu
= CPU_ARM
;
628 if (cpu_mask
& (CPU_FLAG(CPU_ARM
) | CPU_FLAG(CPU_ARM64
))) break;
629 return STATUS_INVALID_IMAGE_FORMAT
;
630 case IMAGE_FILE_MACHINE_POWERPC
:
631 mapping
->image
.cpu
= CPU_POWERPC
;
632 if (cpu_mask
& CPU_FLAG(CPU_POWERPC
)) break;
633 return STATUS_INVALID_IMAGE_FORMAT
;
635 return STATUS_INVALID_IMAGE_FORMAT
;
637 clr_va
= nt
.opt
.hdr32
.DataDirectory
[IMAGE_DIRECTORY_ENTRY_COM_DESCRIPTOR
].VirtualAddress
;
638 clr_size
= nt
.opt
.hdr32
.DataDirectory
[IMAGE_DIRECTORY_ENTRY_COM_DESCRIPTOR
].Size
;
640 mapping
->image
.base
= nt
.opt
.hdr32
.ImageBase
;
641 mapping
->image
.entry_point
= nt
.opt
.hdr32
.ImageBase
+ nt
.opt
.hdr32
.AddressOfEntryPoint
;
642 mapping
->image
.map_size
= ROUND_SIZE( nt
.opt
.hdr32
.SizeOfImage
);
643 mapping
->image
.stack_size
= nt
.opt
.hdr32
.SizeOfStackReserve
;
644 mapping
->image
.stack_commit
= nt
.opt
.hdr32
.SizeOfStackCommit
;
645 mapping
->image
.subsystem
= nt
.opt
.hdr32
.Subsystem
;
646 mapping
->image
.subsystem_low
= nt
.opt
.hdr32
.MinorSubsystemVersion
;
647 mapping
->image
.subsystem_high
= nt
.opt
.hdr32
.MajorSubsystemVersion
;
648 mapping
->image
.dll_charact
= nt
.opt
.hdr32
.DllCharacteristics
;
649 mapping
->image
.contains_code
= (nt
.opt
.hdr32
.SizeOfCode
||
650 nt
.opt
.hdr32
.AddressOfEntryPoint
||
651 nt
.opt
.hdr32
.SectionAlignment
& page_mask
);
652 mapping
->image
.header_size
= nt
.opt
.hdr32
.SizeOfHeaders
;
653 mapping
->image
.checksum
= nt
.opt
.hdr32
.CheckSum
;
654 mapping
->image
.image_flags
= 0;
655 if (nt
.opt
.hdr32
.SectionAlignment
& page_mask
)
656 mapping
->image
.image_flags
|= IMAGE_FLAGS_ImageMappedFlat
;
657 if ((nt
.opt
.hdr32
.DllCharacteristics
& IMAGE_DLLCHARACTERISTICS_DYNAMIC_BASE
) &&
658 mapping
->image
.contains_code
&& !(clr_va
&& clr_size
))
659 mapping
->image
.image_flags
|= IMAGE_FLAGS_ImageDynamicallyRelocated
;
662 case IMAGE_NT_OPTIONAL_HDR64_MAGIC
:
663 if (!(cpu_mask
& CPU_64BIT_MASK
)) return STATUS_INVALID_IMAGE_WIN_64
;
664 switch (nt
.FileHeader
.Machine
)
666 case IMAGE_FILE_MACHINE_AMD64
:
667 mapping
->image
.cpu
= CPU_x86_64
;
668 if (cpu_mask
& (CPU_FLAG(CPU_x86
) | CPU_FLAG(CPU_x86_64
))) break;
669 return STATUS_INVALID_IMAGE_FORMAT
;
670 case IMAGE_FILE_MACHINE_ARM64
:
671 mapping
->image
.cpu
= CPU_ARM64
;
672 if (cpu_mask
& (CPU_FLAG(CPU_ARM
) | CPU_FLAG(CPU_ARM64
))) break;
673 return STATUS_INVALID_IMAGE_FORMAT
;
675 return STATUS_INVALID_IMAGE_FORMAT
;
677 clr_va
= nt
.opt
.hdr64
.DataDirectory
[IMAGE_DIRECTORY_ENTRY_COM_DESCRIPTOR
].VirtualAddress
;
678 clr_size
= nt
.opt
.hdr64
.DataDirectory
[IMAGE_DIRECTORY_ENTRY_COM_DESCRIPTOR
].Size
;
680 mapping
->image
.base
= nt
.opt
.hdr64
.ImageBase
;
681 mapping
->image
.entry_point
= nt
.opt
.hdr64
.ImageBase
+ nt
.opt
.hdr64
.AddressOfEntryPoint
;
682 mapping
->image
.map_size
= ROUND_SIZE( nt
.opt
.hdr64
.SizeOfImage
);
683 mapping
->image
.stack_size
= nt
.opt
.hdr64
.SizeOfStackReserve
;
684 mapping
->image
.stack_commit
= nt
.opt
.hdr64
.SizeOfStackCommit
;
685 mapping
->image
.subsystem
= nt
.opt
.hdr64
.Subsystem
;
686 mapping
->image
.subsystem_low
= nt
.opt
.hdr64
.MinorSubsystemVersion
;
687 mapping
->image
.subsystem_high
= nt
.opt
.hdr64
.MajorSubsystemVersion
;
688 mapping
->image
.dll_charact
= nt
.opt
.hdr64
.DllCharacteristics
;
689 mapping
->image
.contains_code
= (nt
.opt
.hdr64
.SizeOfCode
||
690 nt
.opt
.hdr64
.AddressOfEntryPoint
||
691 nt
.opt
.hdr64
.SectionAlignment
& page_mask
);
692 mapping
->image
.header_size
= nt
.opt
.hdr64
.SizeOfHeaders
;
693 mapping
->image
.checksum
= nt
.opt
.hdr64
.CheckSum
;
694 mapping
->image
.image_flags
= 0;
695 if (nt
.opt
.hdr64
.SectionAlignment
& page_mask
)
696 mapping
->image
.image_flags
|= IMAGE_FLAGS_ImageMappedFlat
;
697 if ((nt
.opt
.hdr64
.DllCharacteristics
& IMAGE_DLLCHARACTERISTICS_DYNAMIC_BASE
) &&
698 mapping
->image
.contains_code
&& !(clr_va
&& clr_size
))
699 mapping
->image
.image_flags
|= IMAGE_FLAGS_ImageDynamicallyRelocated
;
703 return STATUS_INVALID_IMAGE_FORMAT
;
706 mapping
->image
.image_charact
= nt
.FileHeader
.Characteristics
;
707 mapping
->image
.machine
= nt
.FileHeader
.Machine
;
708 mapping
->image
.zerobits
= 0; /* FIXME */
709 mapping
->image
.gp
= 0; /* FIXME */
710 mapping
->image
.file_size
= file_size
;
711 mapping
->image
.loader_flags
= clr_va
&& clr_size
;
712 mapping
->image
.__pad
= 0;
713 if (mz_size
== sizeof(mz
) && !memcmp( mz
.buffer
, builtin_signature
, sizeof(builtin_signature
) ))
714 mapping
->image
.image_flags
|= IMAGE_FLAGS_WineBuiltin
;
715 else if (mz_size
== sizeof(mz
) && !memcmp( mz
.buffer
, fakedll_signature
, sizeof(fakedll_signature
) ))
716 mapping
->image
.image_flags
|= IMAGE_FLAGS_WineFakeDll
;
718 /* load the section headers */
720 pos
+= sizeof(nt
.Signature
) + sizeof(nt
.FileHeader
) + nt
.FileHeader
.SizeOfOptionalHeader
;
721 if (nt
.FileHeader
.NumberOfSections
> ARRAY_SIZE( sec
)) return STATUS_INVALID_IMAGE_FORMAT
;
722 size
= sizeof(*sec
) * nt
.FileHeader
.NumberOfSections
;
723 if (!mapping
->size
) mapping
->size
= mapping
->image
.map_size
;
724 else if (mapping
->size
> mapping
->image
.map_size
) return STATUS_SECTION_TOO_BIG
;
725 if (pos
+ size
> mapping
->image
.map_size
) return STATUS_INVALID_FILE_FOR_SECTION
;
726 if (pos
+ size
> mapping
->image
.header_size
) mapping
->image
.header_size
= pos
+ size
;
727 if (pread( unix_fd
, sec
, size
, pos
) != size
) return STATUS_INVALID_FILE_FOR_SECTION
;
729 for (i
= 0; i
< nt
.FileHeader
.NumberOfSections
&& !mapping
->image
.contains_code
; i
++)
730 if (sec
[i
].Characteristics
& IMAGE_SCN_MEM_EXECUTE
) mapping
->image
.contains_code
= 1;
732 if (load_clr_header( &clr
, clr_va
, clr_size
, unix_fd
, sec
, nt
.FileHeader
.NumberOfSections
) &&
733 (clr
.Flags
& COMIMAGE_FLAGS_ILONLY
))
735 mapping
->image
.image_flags
|= IMAGE_FLAGS_ComPlusILOnly
;
736 if (nt
.opt
.hdr32
.Magic
== IMAGE_NT_OPTIONAL_HDR32_MAGIC
&&
737 !(clr
.Flags
& COMIMAGE_FLAGS_32BITREQUIRED
))
739 mapping
->image
.image_flags
|= IMAGE_FLAGS_ComPlusNativeReady
;
740 if (cpu_mask
& CPU_FLAG(CPU_x86_64
)) mapping
->image
.cpu
= CPU_x86_64
;
741 else if (cpu_mask
& CPU_FLAG(CPU_ARM64
)) mapping
->image
.cpu
= CPU_ARM64
;
745 if (!build_shared_mapping( mapping
, unix_fd
, sec
, nt
.FileHeader
.NumberOfSections
))
746 return STATUS_INVALID_FILE_FOR_SECTION
;
748 return STATUS_SUCCESS
;
751 static struct ranges
*create_ranges(void)
753 struct ranges
*ranges
= alloc_object( &ranges_ops
);
755 if (!ranges
) return NULL
;
758 if (!(ranges
->ranges
= mem_alloc( ranges
->max
* sizeof(*ranges
->ranges
) )))
760 release_object( ranges
);
766 static unsigned int get_mapping_flags( obj_handle_t handle
, unsigned int flags
)
768 switch (flags
& (SEC_IMAGE
| SEC_RESERVE
| SEC_COMMIT
| SEC_FILE
))
771 if (flags
& (SEC_WRITECOMBINE
| SEC_LARGE_PAGES
)) break;
772 if (handle
) return SEC_FILE
| SEC_IMAGE
;
773 set_error( STATUS_INVALID_FILE_FOR_SECTION
);
776 if (!handle
) return flags
;
779 if (flags
& SEC_LARGE_PAGES
) break;
780 if (handle
) return SEC_FILE
| (flags
& (SEC_NOCACHE
| SEC_WRITECOMBINE
));
783 set_error( STATUS_INVALID_PARAMETER
);
788 static struct mapping
*create_mapping( struct object
*root
, const struct unicode_str
*name
,
789 unsigned int attr
, mem_size_t size
, unsigned int flags
,
790 obj_handle_t handle
, unsigned int file_access
,
791 const struct security_descriptor
*sd
)
793 struct mapping
*mapping
;
799 if (!page_mask
) page_mask
= sysconf( _SC_PAGESIZE
) - 1;
801 if (!(mapping
= create_named_object( root
, &mapping_ops
, name
, attr
, sd
)))
803 if (get_error() == STATUS_OBJECT_NAME_EXISTS
)
804 return mapping
; /* Nothing else to do */
806 mapping
->size
= size
;
808 mapping
->shared
= NULL
;
809 mapping
->committed
= NULL
;
811 if (!(mapping
->flags
= get_mapping_flags( handle
, flags
))) goto error
;
815 const unsigned int sharing
= FILE_SHARE_READ
| FILE_SHARE_WRITE
| FILE_SHARE_DELETE
;
816 unsigned int mapping_access
= FILE_MAPPING_ACCESS
;
818 if (!(file
= get_file_obj( current
->process
, handle
, file_access
))) goto error
;
819 fd
= get_obj_fd( (struct object
*)file
);
821 /* file sharing rules for mappings are different so we use magic the access rights */
822 if (flags
& SEC_IMAGE
) mapping_access
|= FILE_MAPPING_IMAGE
;
823 else if (file_access
& FILE_WRITE_DATA
) mapping_access
|= FILE_MAPPING_WRITE
;
825 if (!(mapping
->fd
= get_fd_object_for_mapping( fd
, mapping_access
, sharing
)))
827 mapping
->fd
= dup_fd_object( fd
, mapping_access
, sharing
, FILE_SYNCHRONOUS_IO_NONALERT
);
828 if (mapping
->fd
) set_fd_user( mapping
->fd
, &mapping_fd_ops
, NULL
);
830 release_object( file
);
831 release_object( fd
);
832 if (!mapping
->fd
) goto error
;
834 if ((unix_fd
= get_unix_fd( mapping
->fd
)) == -1) goto error
;
835 if (fstat( unix_fd
, &st
) == -1)
840 if (flags
& SEC_IMAGE
)
842 unsigned int err
= get_image_params( mapping
, st
.st_size
, unix_fd
);
843 if (!err
) return mapping
;
849 if (!(mapping
->size
= st
.st_size
))
851 set_error( STATUS_MAPPED_FILE_SIZE_ZERO
);
855 else if (st
.st_size
< mapping
->size
)
857 if (!(file_access
& FILE_WRITE_DATA
))
859 set_error( STATUS_SECTION_TOO_BIG
);
862 if (!grow_file( unix_fd
, mapping
->size
)) goto error
;
865 else /* Anonymous mapping (no associated file) */
869 set_error( STATUS_INVALID_PARAMETER
);
872 if ((flags
& SEC_RESERVE
) && !(mapping
->committed
= create_ranges())) goto error
;
873 mapping
->size
= (mapping
->size
+ page_mask
) & ~((mem_size_t
)page_mask
);
874 if ((unix_fd
= create_temp_file( mapping
->size
)) == -1) goto error
;
875 if (!(mapping
->fd
= create_anonymous_fd( &mapping_fd_ops
, unix_fd
, &mapping
->obj
,
876 FILE_SYNCHRONOUS_IO_NONALERT
))) goto error
;
877 allow_fd_caching( mapping
->fd
);
882 release_object( mapping
);
886 /* create a read-only file mapping for the specified fd */
887 struct mapping
*create_fd_mapping( struct object
*root
, const struct unicode_str
*name
,
888 struct fd
*fd
, unsigned int attr
, const struct security_descriptor
*sd
)
890 struct mapping
*mapping
;
894 if (!(mapping
= create_named_object( root
, &mapping_ops
, name
, attr
, sd
))) return NULL
;
895 if (get_error() == STATUS_OBJECT_NAME_EXISTS
) return mapping
; /* Nothing else to do */
897 mapping
->shared
= NULL
;
898 mapping
->committed
= NULL
;
899 mapping
->flags
= SEC_FILE
;
900 mapping
->fd
= (struct fd
*)grab_object( fd
);
901 set_fd_user( mapping
->fd
, &mapping_fd_ops
, NULL
);
903 if ((unix_fd
= get_unix_fd( mapping
->fd
)) == -1) goto error
;
904 if (fstat( unix_fd
, &st
) == -1)
909 if (!(mapping
->size
= st
.st_size
))
911 set_error( STATUS_MAPPED_FILE_SIZE_ZERO
);
917 release_object( mapping
);
921 static struct mapping
*get_mapping_obj( struct process
*process
, obj_handle_t handle
, unsigned int access
)
923 return (struct mapping
*)get_handle_obj( process
, handle
, access
, &mapping_ops
);
926 /* open a new file for the file descriptor backing the mapping */
927 struct file
*get_mapping_file( struct process
*process
, client_ptr_t base
,
928 unsigned int access
, unsigned int sharing
)
930 struct memory_view
*view
= find_mapped_view( process
, base
);
932 if (!view
|| !view
->fd
) return NULL
;
933 return create_file_for_fd_obj( view
->fd
, access
, sharing
);
936 /* get the image info for a SEC_IMAGE mapping */
937 const pe_image_info_t
*get_mapping_image_info( struct process
*process
, client_ptr_t base
)
939 struct memory_view
*view
= find_mapped_view( process
, base
);
941 if (!view
|| !(view
->flags
& SEC_IMAGE
)) return NULL
;
945 static void mapping_dump( struct object
*obj
, int verbose
)
947 struct mapping
*mapping
= (struct mapping
*)obj
;
948 assert( obj
->ops
== &mapping_ops
);
949 fprintf( stderr
, "Mapping size=%08x%08x flags=%08x fd=%p shared=%p\n",
950 (unsigned int)(mapping
->size
>> 32), (unsigned int)mapping
->size
,
951 mapping
->flags
, mapping
->fd
, mapping
->shared
);
954 static struct object_type
*mapping_get_type( struct object
*obj
)
956 static const WCHAR name
[] = {'S','e','c','t','i','o','n'};
957 static const struct unicode_str str
= { name
, sizeof(name
) };
958 return get_object_type( &str
);
961 static struct fd
*mapping_get_fd( struct object
*obj
)
963 struct mapping
*mapping
= (struct mapping
*)obj
;
964 return (struct fd
*)grab_object( mapping
->fd
);
967 static unsigned int mapping_map_access( struct object
*obj
, unsigned int access
)
969 if (access
& GENERIC_READ
) access
|= STANDARD_RIGHTS_READ
| SECTION_QUERY
| SECTION_MAP_READ
;
970 if (access
& GENERIC_WRITE
) access
|= STANDARD_RIGHTS_WRITE
| SECTION_MAP_WRITE
;
971 if (access
& GENERIC_EXECUTE
) access
|= STANDARD_RIGHTS_EXECUTE
| SECTION_MAP_EXECUTE
;
972 if (access
& GENERIC_ALL
) access
|= SECTION_ALL_ACCESS
;
973 return access
& ~(GENERIC_READ
| GENERIC_WRITE
| GENERIC_EXECUTE
| GENERIC_ALL
);
976 static void mapping_destroy( struct object
*obj
)
978 struct mapping
*mapping
= (struct mapping
*)obj
;
979 assert( obj
->ops
== &mapping_ops
);
980 if (mapping
->fd
) release_object( mapping
->fd
);
981 if (mapping
->committed
) release_object( mapping
->committed
);
982 if (mapping
->shared
) release_object( mapping
->shared
);
985 static enum server_fd_type
mapping_get_fd_type( struct fd
*fd
)
990 int get_page_size(void)
992 if (!page_mask
) page_mask
= sysconf( _SC_PAGESIZE
) - 1;
993 return page_mask
+ 1;
996 struct object
*create_user_data_mapping( struct object
*root
, const struct unicode_str
*name
,
997 unsigned int attr
, const struct security_descriptor
*sd
)
1000 struct mapping
*mapping
;
1002 if (!(mapping
= create_mapping( root
, name
, attr
, sizeof(KSHARED_USER_DATA
),
1003 SEC_COMMIT
, 0, FILE_READ_DATA
| FILE_WRITE_DATA
, sd
))) return NULL
;
1004 ptr
= mmap( NULL
, mapping
->size
, PROT_WRITE
, MAP_SHARED
, get_unix_fd( mapping
->fd
), 0 );
1005 if (ptr
!= MAP_FAILED
)
1007 user_shared_data
= ptr
;
1008 user_shared_data
->SystemCall
= 1;
1010 return &mapping
->obj
;
1013 /* create a file mapping */
1014 DECL_HANDLER(create_mapping
)
1016 struct object
*root
;
1017 struct mapping
*mapping
;
1018 struct unicode_str name
;
1019 const struct security_descriptor
*sd
;
1020 const struct object_attributes
*objattr
= get_req_object_attributes( &sd
, &name
, &root
);
1022 if (!objattr
) return;
1024 if ((mapping
= create_mapping( root
, &name
, objattr
->attributes
, req
->size
, req
->flags
,
1025 req
->file_handle
, req
->file_access
, sd
)))
1027 if (get_error() == STATUS_OBJECT_NAME_EXISTS
)
1028 reply
->handle
= alloc_handle( current
->process
, &mapping
->obj
, req
->access
, objattr
->attributes
);
1030 reply
->handle
= alloc_handle_no_access_check( current
->process
, &mapping
->obj
,
1031 req
->access
, objattr
->attributes
);
1032 release_object( mapping
);
1035 if (root
) release_object( root
);
1038 /* open a handle to a mapping */
1039 DECL_HANDLER(open_mapping
)
1041 struct unicode_str name
= get_req_unicode_str();
1043 reply
->handle
= open_object( current
->process
, req
->rootdir
, req
->access
,
1044 &mapping_ops
, &name
, req
->attributes
);
1047 /* get a mapping information */
1048 DECL_HANDLER(get_mapping_info
)
1050 struct mapping
*mapping
;
1052 if (!(mapping
= get_mapping_obj( current
->process
, req
->handle
, req
->access
))) return;
1054 reply
->size
= mapping
->size
;
1055 reply
->flags
= mapping
->flags
;
1057 if (mapping
->flags
& SEC_IMAGE
)
1058 set_reply_data( &mapping
->image
, min( sizeof(mapping
->image
), get_reply_max_size() ));
1060 if (!(req
->access
& (SECTION_MAP_READ
| SECTION_MAP_WRITE
))) /* query only */
1062 release_object( mapping
);
1066 if (mapping
->shared
)
1067 reply
->shared_file
= alloc_handle( current
->process
, mapping
->shared
->file
,
1068 GENERIC_READ
|GENERIC_WRITE
, 0 );
1069 release_object( mapping
);
1072 /* add a memory view in the current process */
1073 DECL_HANDLER(map_view
)
1075 struct mapping
*mapping
= NULL
;
1076 struct memory_view
*view
;
1078 if (!req
->size
|| (req
->base
& page_mask
) || req
->base
+ req
->size
< req
->base
) /* overflow */
1080 set_error( STATUS_INVALID_PARAMETER
);
1084 /* make sure we don't already have an overlapping view */
1085 LIST_FOR_EACH_ENTRY( view
, ¤t
->process
->views
, struct memory_view
, entry
)
1087 if (view
->base
+ view
->size
<= req
->base
) continue;
1088 if (view
->base
>= req
->base
+ req
->size
) continue;
1089 set_error( STATUS_INVALID_PARAMETER
);
1093 if (!(mapping
= get_mapping_obj( current
->process
, req
->mapping
, req
->access
))) return;
1095 if (mapping
->flags
& SEC_IMAGE
)
1097 if (req
->start
|| req
->size
> mapping
->image
.map_size
)
1099 set_error( STATUS_INVALID_PARAMETER
);
1103 else if (req
->start
>= mapping
->size
||
1104 req
->start
+ req
->size
< req
->start
||
1105 req
->start
+ req
->size
> ((mapping
->size
+ page_mask
) & ~(mem_size_t
)page_mask
))
1107 set_error( STATUS_INVALID_PARAMETER
);
1111 if ((view
= mem_alloc( sizeof(*view
) )))
1113 view
->base
= req
->base
;
1114 view
->size
= req
->size
;
1115 view
->start
= req
->start
;
1116 view
->flags
= mapping
->flags
;
1117 view
->fd
= !is_fd_removable( mapping
->fd
) ? (struct fd
*)grab_object( mapping
->fd
) : NULL
;
1118 view
->committed
= mapping
->committed
? (struct ranges
*)grab_object( mapping
->committed
) : NULL
;
1119 view
->shared
= mapping
->shared
? (struct shared_map
*)grab_object( mapping
->shared
) : NULL
;
1120 if (mapping
->flags
& SEC_IMAGE
)
1122 view
->image
= mapping
->image
;
1123 if (view
->base
!= mapping
->image
.base
) set_error( STATUS_IMAGE_NOT_AT_BASE
);
1125 list_add_tail( ¤t
->process
->views
, &view
->entry
);
1129 release_object( mapping
);
1132 /* unmap a memory view from the current process */
1133 DECL_HANDLER(unmap_view
)
1135 struct memory_view
*view
= find_mapped_view( current
->process
, req
->base
);
1137 if (view
) free_memory_view( view
);
1140 /* get a range of committed pages in a file mapping */
1141 DECL_HANDLER(get_mapping_committed_range
)
1143 struct memory_view
*view
= find_mapped_view( current
->process
, req
->base
);
1145 if (view
) reply
->committed
= find_committed_range( view
, req
->offset
, &reply
->size
);
1148 /* add a range to the committed pages in a file mapping */
1149 DECL_HANDLER(add_mapping_committed_range
)
1151 struct memory_view
*view
= find_mapped_view( current
->process
, req
->base
);
1153 if (view
) add_committed_range( view
, req
->offset
, req
->offset
+ req
->size
);
1156 /* check if two memory maps are for the same file */
1157 DECL_HANDLER(is_same_mapping
)
1159 struct memory_view
*view1
= find_mapped_view( current
->process
, req
->base1
);
1160 struct memory_view
*view2
= find_mapped_view( current
->process
, req
->base2
);
1162 if (!view1
|| !view2
) return;
1163 if (!view1
->fd
|| !view2
->fd
||
1164 !(view1
->flags
& SEC_IMAGE
) || !(view2
->flags
& SEC_IMAGE
) ||
1165 !is_same_file_fd( view1
->fd
, view2
->fd
))
1166 set_error( STATUS_NOT_SAME_DEVICE
);