2 * Server-side file mapping management
4 * Copyright (C) 1999 Alexandre Julliard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2.1 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA
22 #include "wine/port.h"
29 #ifdef HAVE_SYS_MMAN_H
30 # include <sys/mman.h>
35 #define WIN32_NO_STATUS
46 /* list of memory ranges, used to store committed info */
49 struct object obj
; /* object header */
50 unsigned int count
; /* number of used ranges */
51 unsigned int max
; /* number of allocated ranges */
59 static void ranges_dump( struct object
*obj
, int verbose
);
60 static void ranges_destroy( struct object
*obj
);
62 static const struct object_ops ranges_ops
=
64 sizeof(struct ranges
), /* size */
65 ranges_dump
, /* dump */
66 no_get_type
, /* get_type */
67 no_add_queue
, /* add_queue */
68 NULL
, /* remove_queue */
71 no_signal
, /* signal */
72 no_get_fd
, /* get_fd */
73 no_map_access
, /* map_access */
74 default_get_sd
, /* get_sd */
75 default_set_sd
, /* set_sd */
76 no_lookup_name
, /* lookup_name */
77 no_link_name
, /* link_name */
78 NULL
, /* unlink_name */
79 no_open_file
, /* open_file */
80 no_close_handle
, /* close_handle */
81 ranges_destroy
/* destroy */
84 /* file backing the shared sections of a PE image mapping */
87 struct object obj
; /* object header */
88 struct fd
*fd
; /* file descriptor of the mapped PE file */
89 struct file
*file
; /* temp file holding the shared data */
90 struct list entry
; /* entry in global shared maps list */
93 static void shared_map_dump( struct object
*obj
, int verbose
);
94 static void shared_map_destroy( struct object
*obj
);
96 static const struct object_ops shared_map_ops
=
98 sizeof(struct shared_map
), /* size */
99 shared_map_dump
, /* dump */
100 no_get_type
, /* get_type */
101 no_add_queue
, /* add_queue */
102 NULL
, /* remove_queue */
104 NULL
, /* satisfied */
105 no_signal
, /* signal */
106 no_get_fd
, /* get_fd */
107 no_map_access
, /* map_access */
108 default_get_sd
, /* get_sd */
109 default_set_sd
, /* set_sd */
110 no_lookup_name
, /* lookup_name */
111 no_link_name
, /* link_name */
112 NULL
, /* unlink_name */
113 no_open_file
, /* open_file */
114 no_close_handle
, /* close_handle */
115 shared_map_destroy
/* destroy */
118 static struct list shared_map_list
= LIST_INIT( shared_map_list
);
120 /* memory view mapped in client address space */
123 struct list entry
; /* entry in per-process view list */
124 struct fd
*fd
; /* fd for mapped file */
125 struct ranges
*committed
; /* list of committed ranges in this mapping */
126 struct shared_map
*shared
; /* temp file for shared PE mapping */
127 unsigned int flags
; /* SEC_* flags */
128 client_ptr_t base
; /* view base address (in process addr space) */
129 mem_size_t size
; /* view size */
130 file_pos_t start
; /* start offset in mapping */
135 struct object obj
; /* object header */
136 mem_size_t size
; /* mapping size */
137 unsigned int flags
; /* SEC_* flags */
138 struct fd
*fd
; /* fd for mapped file */
139 pe_image_info_t image
; /* image info (for PE image mapping) */
140 struct ranges
*committed
; /* list of committed ranges in this mapping */
141 struct shared_map
*shared
; /* temp file for shared PE mapping */
144 static void mapping_dump( struct object
*obj
, int verbose
);
145 static struct object_type
*mapping_get_type( struct object
*obj
);
146 static struct fd
*mapping_get_fd( struct object
*obj
);
147 static unsigned int mapping_map_access( struct object
*obj
, unsigned int access
);
148 static void mapping_destroy( struct object
*obj
);
149 static enum server_fd_type
mapping_get_fd_type( struct fd
*fd
);
151 static const struct object_ops mapping_ops
=
153 sizeof(struct mapping
), /* size */
154 mapping_dump
, /* dump */
155 mapping_get_type
, /* get_type */
156 no_add_queue
, /* add_queue */
157 NULL
, /* remove_queue */
159 NULL
, /* satisfied */
160 no_signal
, /* signal */
161 mapping_get_fd
, /* get_fd */
162 mapping_map_access
, /* map_access */
163 default_get_sd
, /* get_sd */
164 default_set_sd
, /* set_sd */
165 no_lookup_name
, /* lookup_name */
166 directory_link_name
, /* link_name */
167 default_unlink_name
, /* unlink_name */
168 no_open_file
, /* open_file */
169 fd_close_handle
, /* close_handle */
170 mapping_destroy
/* destroy */
173 static const struct fd_ops mapping_fd_ops
=
175 default_fd_get_poll_events
, /* get_poll_events */
176 default_poll_event
, /* poll_event */
177 mapping_get_fd_type
, /* get_fd_type */
178 no_fd_read
, /* read */
179 no_fd_write
, /* write */
180 no_fd_flush
, /* flush */
181 no_fd_get_file_info
, /* get_file_info */
182 no_fd_get_volume_info
, /* get_volume_info */
183 no_fd_ioctl
, /* ioctl */
184 no_fd_queue_async
, /* queue_async */
185 default_fd_reselect_async
/* reselect_async */
188 static size_t page_mask
;
190 #define ROUND_SIZE(size) (((size) + page_mask) & ~page_mask)
193 static void ranges_dump( struct object
*obj
, int verbose
)
195 struct ranges
*ranges
= (struct ranges
*)obj
;
196 fprintf( stderr
, "Memory ranges count=%u\n", ranges
->count
);
199 static void ranges_destroy( struct object
*obj
)
201 struct ranges
*ranges
= (struct ranges
*)obj
;
202 free( ranges
->ranges
);
205 static void shared_map_dump( struct object
*obj
, int verbose
)
207 struct shared_map
*shared
= (struct shared_map
*)obj
;
208 fprintf( stderr
, "Shared mapping fd=%p file=%p\n", shared
->fd
, shared
->file
);
211 static void shared_map_destroy( struct object
*obj
)
213 struct shared_map
*shared
= (struct shared_map
*)obj
;
215 release_object( shared
->fd
);
216 release_object( shared
->file
);
217 list_remove( &shared
->entry
);
220 /* extend a file beyond the current end of file */
221 static int grow_file( int unix_fd
, file_pos_t new_size
)
223 static const char zero
;
224 off_t size
= new_size
;
226 if (sizeof(new_size
) > sizeof(size
) && size
!= new_size
)
228 set_error( STATUS_INVALID_PARAMETER
);
231 /* extend the file one byte beyond the requested size and then truncate it */
232 /* this should work around ftruncate implementations that can't extend files */
233 if (pwrite( unix_fd
, &zero
, 1, size
) != -1)
235 ftruncate( unix_fd
, size
);
242 /* check if the current directory allows exec mappings */
243 static int check_current_dir_for_exec(void)
246 char tmpfn
[] = "anonmap.XXXXXX";
247 void *ret
= MAP_FAILED
;
249 fd
= mkstemps( tmpfn
, 0 );
250 if (fd
== -1) return 0;
251 if (grow_file( fd
, 1 ))
253 ret
= mmap( NULL
, get_page_size(), PROT_READ
| PROT_EXEC
, MAP_PRIVATE
, fd
, 0 );
254 if (ret
!= MAP_FAILED
) munmap( ret
, get_page_size() );
258 return (ret
!= MAP_FAILED
);
261 /* create a temp file for anonymous mappings */
262 static int create_temp_file( file_pos_t size
)
264 static int temp_dir_fd
= -1;
265 char tmpfn
[] = "anonmap.XXXXXX";
268 if (temp_dir_fd
== -1)
270 temp_dir_fd
= server_dir_fd
;
271 if (!check_current_dir_for_exec())
273 /* the server dir is noexec, try the config dir instead */
274 fchdir( config_dir_fd
);
275 if (check_current_dir_for_exec())
276 temp_dir_fd
= config_dir_fd
;
277 else /* neither works, fall back to server dir */
278 fchdir( server_dir_fd
);
281 else if (temp_dir_fd
!= server_dir_fd
) fchdir( temp_dir_fd
);
283 fd
= mkstemps( tmpfn
, 0 );
286 if (!grow_file( fd
, size
))
293 else file_set_error();
295 if (temp_dir_fd
!= server_dir_fd
) fchdir( server_dir_fd
);
299 /* find a memory view from its base address */
300 static struct memory_view
*find_mapped_view( struct process
*process
, client_ptr_t base
)
302 struct memory_view
*view
;
304 LIST_FOR_EACH_ENTRY( view
, &process
->views
, struct memory_view
, entry
)
305 if (view
->base
== base
) return view
;
307 set_error( STATUS_NOT_MAPPED_VIEW
);
311 static void free_memory_view( struct memory_view
*view
)
313 if (view
->fd
) release_object( view
->fd
);
314 if (view
->committed
) release_object( view
->committed
);
315 if (view
->shared
) release_object( view
->shared
);
316 list_remove( &view
->entry
);
320 /* free all mapped views at process exit */
321 void free_mapped_views( struct process
*process
)
325 while ((ptr
= list_head( &process
->views
)))
326 free_memory_view( LIST_ENTRY( ptr
, struct memory_view
, entry
));
329 /* find the shared PE mapping for a given mapping */
330 static struct shared_map
*get_shared_file( struct fd
*fd
)
332 struct shared_map
*ptr
;
334 LIST_FOR_EACH_ENTRY( ptr
, &shared_map_list
, struct shared_map
, entry
)
335 if (is_same_file_fd( ptr
->fd
, fd
))
336 return (struct shared_map
*)grab_object( ptr
);
340 /* return the size of the memory mapping and file range of a given section */
341 static inline void get_section_sizes( const IMAGE_SECTION_HEADER
*sec
, size_t *map_size
,
342 off_t
*file_start
, size_t *file_size
)
344 static const unsigned int sector_align
= 0x1ff;
346 if (!sec
->Misc
.VirtualSize
) *map_size
= ROUND_SIZE( sec
->SizeOfRawData
);
347 else *map_size
= ROUND_SIZE( sec
->Misc
.VirtualSize
);
349 *file_start
= sec
->PointerToRawData
& ~sector_align
;
350 *file_size
= (sec
->SizeOfRawData
+ (sec
->PointerToRawData
& sector_align
) + sector_align
) & ~sector_align
;
351 if (*file_size
> *map_size
) *file_size
= *map_size
;
354 /* add a range to the committed list */
355 static void add_committed_range( struct memory_view
*view
, file_pos_t start
, file_pos_t end
)
358 struct ranges
*committed
= view
->committed
;
359 struct range
*ranges
;
361 if ((start
& page_mask
) || (end
& page_mask
) ||
362 start
>= view
->size
|| end
>= view
->size
||
365 set_error( STATUS_INVALID_PARAMETER
);
369 if (!committed
) return; /* everything committed already */
371 start
+= view
->start
;
374 for (i
= 0, ranges
= committed
->ranges
; i
< committed
->count
; i
++)
376 if (ranges
[i
].start
> end
) break;
377 if (ranges
[i
].end
< start
) continue;
378 if (ranges
[i
].start
> start
) ranges
[i
].start
= start
; /* extend downwards */
379 if (ranges
[i
].end
< end
) /* extend upwards and maybe merge with next */
381 for (j
= i
+ 1; j
< committed
->count
; j
++)
383 if (ranges
[j
].start
> end
) break;
384 if (ranges
[j
].end
> end
) end
= ranges
[j
].end
;
388 memmove( &ranges
[i
+ 1], &ranges
[j
], (committed
->count
- j
) * sizeof(*ranges
) );
389 committed
->count
-= j
- (i
+ 1);
396 /* now add a new range */
398 if (committed
->count
== committed
->max
)
400 unsigned int new_size
= committed
->max
* 2;
401 struct range
*new_ptr
= realloc( committed
->ranges
, new_size
* sizeof(*new_ptr
) );
402 if (!new_ptr
) return;
403 committed
->max
= new_size
;
404 committed
->ranges
= new_ptr
;
406 memmove( &ranges
[i
+ 1], &ranges
[i
], (committed
->count
- i
) * sizeof(*ranges
) );
407 ranges
[i
].start
= start
;
412 /* find the range containing start and return whether it's committed */
413 static int find_committed_range( struct memory_view
*view
, file_pos_t start
, mem_size_t
*size
)
416 struct ranges
*committed
= view
->committed
;
417 struct range
*ranges
;
419 if ((start
& page_mask
) || start
>= view
->size
)
421 set_error( STATUS_INVALID_PARAMETER
);
424 if (!committed
) /* everything is committed */
426 *size
= view
->size
- start
;
429 for (i
= 0, ranges
= committed
->ranges
; i
< committed
->count
; i
++)
431 if (ranges
[i
].start
> view
->start
+ start
)
433 *size
= min( ranges
[i
].start
, view
->start
+ view
->size
) - (view
->start
+ start
);
436 if (ranges
[i
].end
> view
->start
+ start
)
438 *size
= min( ranges
[i
].end
, view
->start
+ view
->size
) - (view
->start
+ start
);
442 *size
= view
->size
- start
;
446 /* allocate and fill the temp file for a shared PE image mapping */
447 static int build_shared_mapping( struct mapping
*mapping
, int fd
,
448 IMAGE_SECTION_HEADER
*sec
, unsigned int nb_sec
)
450 struct shared_map
*shared
;
453 mem_size_t total_size
;
454 size_t file_size
, map_size
, max_size
;
455 off_t shared_pos
, read_pos
, write_pos
;
460 /* compute the total size of the shared mapping */
462 total_size
= max_size
= 0;
463 for (i
= 0; i
< nb_sec
; i
++)
465 if ((sec
[i
].Characteristics
& IMAGE_SCN_MEM_SHARED
) &&
466 (sec
[i
].Characteristics
& IMAGE_SCN_MEM_WRITE
))
468 get_section_sizes( &sec
[i
], &map_size
, &read_pos
, &file_size
);
469 if (file_size
> max_size
) max_size
= file_size
;
470 total_size
+= map_size
;
473 if (!total_size
) return 1; /* nothing to do */
475 if ((mapping
->shared
= get_shared_file( mapping
->fd
))) return 1;
477 /* create a temp file for the mapping */
479 if ((shared_fd
= create_temp_file( total_size
)) == -1) return 0;
480 if (!(file
= create_file_for_fd( shared_fd
, FILE_GENERIC_READ
|FILE_GENERIC_WRITE
, 0 ))) return 0;
482 if (!(buffer
= malloc( max_size
))) goto error
;
484 /* copy the shared sections data into the temp file */
487 for (i
= 0; i
< nb_sec
; i
++)
489 if (!(sec
[i
].Characteristics
& IMAGE_SCN_MEM_SHARED
)) continue;
490 if (!(sec
[i
].Characteristics
& IMAGE_SCN_MEM_WRITE
)) continue;
491 get_section_sizes( &sec
[i
], &map_size
, &read_pos
, &file_size
);
492 write_pos
= shared_pos
;
493 shared_pos
+= map_size
;
494 if (!sec
[i
].PointerToRawData
|| !file_size
) continue;
498 long res
= pread( fd
, buffer
+ file_size
- toread
, toread
, read_pos
);
499 if (!res
&& toread
< 0x200) /* partial sector at EOF is not an error */
504 if (res
<= 0) goto error
;
508 if (pwrite( shared_fd
, buffer
, file_size
, write_pos
) != file_size
) goto error
;
511 if (!(shared
= alloc_object( &shared_map_ops
))) goto error
;
512 shared
->fd
= (struct fd
*)grab_object( mapping
->fd
);
514 list_add_head( &shared_map_list
, &shared
->entry
);
515 mapping
->shared
= shared
;
520 release_object( file
);
525 /* load the CLR header from its section */
526 static int load_clr_header( IMAGE_COR20_HEADER
*hdr
, size_t va
, size_t size
, int unix_fd
,
527 IMAGE_SECTION_HEADER
*sec
, unsigned int nb_sec
)
530 size_t map_size
, file_size
;
534 if (!va
|| !size
) return 0;
536 for (i
= 0; i
< nb_sec
; i
++)
538 if (va
< sec
[i
].VirtualAddress
) continue;
539 if (sec
[i
].Misc
.VirtualSize
&& va
- sec
[i
].VirtualAddress
>= sec
[i
].Misc
.VirtualSize
) continue;
540 get_section_sizes( &sec
[i
], &map_size
, &file_start
, &file_size
);
541 if (size
>= map_size
) continue;
542 if (va
- sec
[i
].VirtualAddress
>= map_size
- size
) continue;
543 file_size
= min( file_size
, map_size
);
544 size
= min( size
, sizeof(*hdr
) );
545 ret
= pread( unix_fd
, hdr
, min( size
, file_size
), file_start
+ va
- sec
[i
].VirtualAddress
);
547 if (ret
< sizeof(*hdr
)) memset( (char *)hdr
+ ret
, 0, sizeof(*hdr
) - ret
);
548 return (hdr
->MajorRuntimeVersion
> COR_VERSION_MAJOR_V2
||
549 (hdr
->MajorRuntimeVersion
== COR_VERSION_MAJOR_V2
&&
550 hdr
->MinorRuntimeVersion
>= COR_VERSION_MINOR
));
555 /* retrieve the mapping parameters for an executable (PE) image */
556 static unsigned int get_image_params( struct mapping
*mapping
, file_pos_t file_size
, int unix_fd
)
558 IMAGE_DOS_HEADER dos
;
559 IMAGE_COR20_HEADER clr
;
560 IMAGE_SECTION_HEADER sec
[96];
564 IMAGE_FILE_HEADER FileHeader
;
567 IMAGE_OPTIONAL_HEADER32 hdr32
;
568 IMAGE_OPTIONAL_HEADER64 hdr64
;
573 size_t clr_va
, clr_size
;
574 unsigned int i
, cpu_mask
= get_supported_cpu_mask();
576 /* load the headers */
578 if (!file_size
) return STATUS_INVALID_FILE_FOR_SECTION
;
579 if (pread( unix_fd
, &dos
, sizeof(dos
), 0 ) != sizeof(dos
)) return STATUS_INVALID_IMAGE_NOT_MZ
;
580 if (dos
.e_magic
!= IMAGE_DOS_SIGNATURE
) return STATUS_INVALID_IMAGE_NOT_MZ
;
583 size
= pread( unix_fd
, &nt
, sizeof(nt
), pos
);
584 if (size
< sizeof(nt
.Signature
) + sizeof(nt
.FileHeader
)) return STATUS_INVALID_IMAGE_FORMAT
;
585 /* zero out Optional header in the case it's not present or partial */
586 size
= min( size
, sizeof(nt
.Signature
) + sizeof(nt
.FileHeader
) + nt
.FileHeader
.SizeOfOptionalHeader
);
587 if (size
< sizeof(nt
)) memset( (char *)&nt
+ size
, 0, sizeof(nt
) - size
);
588 if (nt
.Signature
!= IMAGE_NT_SIGNATURE
)
590 if (*(WORD
*)&nt
.Signature
== IMAGE_OS2_SIGNATURE
) return STATUS_INVALID_IMAGE_NE_FORMAT
;
591 return STATUS_INVALID_IMAGE_PROTECT
;
594 switch (nt
.opt
.hdr32
.Magic
)
596 case IMAGE_NT_OPTIONAL_HDR32_MAGIC
:
597 switch (nt
.FileHeader
.Machine
)
599 case IMAGE_FILE_MACHINE_I386
:
600 if (cpu_mask
& (CPU_FLAG(CPU_x86
) | CPU_FLAG(CPU_x86_64
))) break;
601 return STATUS_INVALID_IMAGE_FORMAT
;
602 case IMAGE_FILE_MACHINE_ARM
:
603 case IMAGE_FILE_MACHINE_THUMB
:
604 case IMAGE_FILE_MACHINE_ARMNT
:
605 if (cpu_mask
& (CPU_FLAG(CPU_ARM
) | CPU_FLAG(CPU_ARM64
))) break;
606 return STATUS_INVALID_IMAGE_FORMAT
;
607 case IMAGE_FILE_MACHINE_POWERPC
:
608 if (cpu_mask
& CPU_FLAG(CPU_POWERPC
)) break;
609 return STATUS_INVALID_IMAGE_FORMAT
;
611 return STATUS_INVALID_IMAGE_FORMAT
;
613 clr_va
= nt
.opt
.hdr32
.DataDirectory
[IMAGE_DIRECTORY_ENTRY_COM_DESCRIPTOR
].VirtualAddress
;
614 clr_size
= nt
.opt
.hdr32
.DataDirectory
[IMAGE_DIRECTORY_ENTRY_COM_DESCRIPTOR
].Size
;
616 mapping
->image
.base
= nt
.opt
.hdr32
.ImageBase
;
617 mapping
->image
.entry_point
= nt
.opt
.hdr32
.ImageBase
+ nt
.opt
.hdr32
.AddressOfEntryPoint
;
618 mapping
->image
.map_size
= ROUND_SIZE( nt
.opt
.hdr32
.SizeOfImage
);
619 mapping
->image
.stack_size
= nt
.opt
.hdr32
.SizeOfStackReserve
;
620 mapping
->image
.stack_commit
= nt
.opt
.hdr32
.SizeOfStackCommit
;
621 mapping
->image
.subsystem
= nt
.opt
.hdr32
.Subsystem
;
622 mapping
->image
.subsystem_low
= nt
.opt
.hdr32
.MinorSubsystemVersion
;
623 mapping
->image
.subsystem_high
= nt
.opt
.hdr32
.MajorSubsystemVersion
;
624 mapping
->image
.dll_charact
= nt
.opt
.hdr32
.DllCharacteristics
;
625 mapping
->image
.contains_code
= (nt
.opt
.hdr32
.SizeOfCode
||
626 nt
.opt
.hdr32
.AddressOfEntryPoint
||
627 nt
.opt
.hdr32
.SectionAlignment
& page_mask
);
628 mapping
->image
.header_size
= nt
.opt
.hdr32
.SizeOfHeaders
;
629 mapping
->image
.checksum
= nt
.opt
.hdr32
.CheckSum
;
630 mapping
->image
.image_flags
= 0;
631 if (nt
.opt
.hdr32
.SectionAlignment
& page_mask
)
632 mapping
->image
.image_flags
|= IMAGE_FLAGS_ImageMappedFlat
;
633 if ((nt
.opt
.hdr32
.DllCharacteristics
& IMAGE_DLLCHARACTERISTICS_DYNAMIC_BASE
) &&
634 mapping
->image
.contains_code
&& !(clr_va
&& clr_size
))
635 mapping
->image
.image_flags
|= IMAGE_FLAGS_ImageDynamicallyRelocated
;
638 case IMAGE_NT_OPTIONAL_HDR64_MAGIC
:
639 if (!(cpu_mask
& CPU_64BIT_MASK
)) return STATUS_INVALID_IMAGE_WIN_64
;
640 switch (nt
.FileHeader
.Machine
)
642 case IMAGE_FILE_MACHINE_AMD64
:
643 if (cpu_mask
& (CPU_FLAG(CPU_x86
) | CPU_FLAG(CPU_x86_64
))) break;
644 return STATUS_INVALID_IMAGE_FORMAT
;
645 case IMAGE_FILE_MACHINE_ARM64
:
646 if (cpu_mask
& (CPU_FLAG(CPU_ARM
) | CPU_FLAG(CPU_ARM64
))) break;
647 return STATUS_INVALID_IMAGE_FORMAT
;
649 return STATUS_INVALID_IMAGE_FORMAT
;
651 clr_va
= nt
.opt
.hdr64
.DataDirectory
[IMAGE_DIRECTORY_ENTRY_COM_DESCRIPTOR
].VirtualAddress
;
652 clr_size
= nt
.opt
.hdr64
.DataDirectory
[IMAGE_DIRECTORY_ENTRY_COM_DESCRIPTOR
].Size
;
654 mapping
->image
.base
= nt
.opt
.hdr64
.ImageBase
;
655 mapping
->image
.entry_point
= nt
.opt
.hdr64
.ImageBase
+ nt
.opt
.hdr64
.AddressOfEntryPoint
;
656 mapping
->image
.map_size
= ROUND_SIZE( nt
.opt
.hdr64
.SizeOfImage
);
657 mapping
->image
.stack_size
= nt
.opt
.hdr64
.SizeOfStackReserve
;
658 mapping
->image
.stack_commit
= nt
.opt
.hdr64
.SizeOfStackCommit
;
659 mapping
->image
.subsystem
= nt
.opt
.hdr64
.Subsystem
;
660 mapping
->image
.subsystem_low
= nt
.opt
.hdr64
.MinorSubsystemVersion
;
661 mapping
->image
.subsystem_high
= nt
.opt
.hdr64
.MajorSubsystemVersion
;
662 mapping
->image
.dll_charact
= nt
.opt
.hdr64
.DllCharacteristics
;
663 mapping
->image
.contains_code
= (nt
.opt
.hdr64
.SizeOfCode
||
664 nt
.opt
.hdr64
.AddressOfEntryPoint
||
665 nt
.opt
.hdr64
.SectionAlignment
& page_mask
);
666 mapping
->image
.header_size
= nt
.opt
.hdr64
.SizeOfHeaders
;
667 mapping
->image
.checksum
= nt
.opt
.hdr64
.CheckSum
;
668 mapping
->image
.image_flags
= 0;
669 if (nt
.opt
.hdr64
.SectionAlignment
& page_mask
)
670 mapping
->image
.image_flags
|= IMAGE_FLAGS_ImageMappedFlat
;
671 if ((nt
.opt
.hdr64
.DllCharacteristics
& IMAGE_DLLCHARACTERISTICS_DYNAMIC_BASE
) &&
672 mapping
->image
.contains_code
&& !(clr_va
&& clr_size
))
673 mapping
->image
.image_flags
|= IMAGE_FLAGS_ImageDynamicallyRelocated
;
677 return STATUS_INVALID_IMAGE_FORMAT
;
680 mapping
->image
.image_charact
= nt
.FileHeader
.Characteristics
;
681 mapping
->image
.machine
= nt
.FileHeader
.Machine
;
682 mapping
->image
.zerobits
= 0; /* FIXME */
683 mapping
->image
.gp
= 0; /* FIXME */
684 mapping
->image
.file_size
= file_size
;
685 mapping
->image
.loader_flags
= clr_va
&& clr_size
;
687 /* load the section headers */
689 pos
+= sizeof(nt
.Signature
) + sizeof(nt
.FileHeader
) + nt
.FileHeader
.SizeOfOptionalHeader
;
690 if (nt
.FileHeader
.NumberOfSections
> sizeof(sec
)/sizeof(sec
[0])) return STATUS_INVALID_IMAGE_FORMAT
;
691 size
= sizeof(*sec
) * nt
.FileHeader
.NumberOfSections
;
692 if (!mapping
->size
) mapping
->size
= mapping
->image
.map_size
;
693 else if (mapping
->size
> mapping
->image
.map_size
) return STATUS_SECTION_TOO_BIG
;
694 if (pos
+ size
> mapping
->image
.map_size
) return STATUS_INVALID_FILE_FOR_SECTION
;
695 if (pos
+ size
> mapping
->image
.header_size
) mapping
->image
.header_size
= pos
+ size
;
696 if (pread( unix_fd
, sec
, size
, pos
) != size
) return STATUS_INVALID_FILE_FOR_SECTION
;
698 for (i
= 0; i
< nt
.FileHeader
.NumberOfSections
&& !mapping
->image
.contains_code
; i
++)
699 if (sec
[i
].Characteristics
& IMAGE_SCN_MEM_EXECUTE
) mapping
->image
.contains_code
= 1;
701 if (load_clr_header( &clr
, clr_va
, clr_size
, unix_fd
, sec
, nt
.FileHeader
.NumberOfSections
) &&
702 (clr
.Flags
& COMIMAGE_FLAGS_ILONLY
))
704 mapping
->image
.image_flags
|= IMAGE_FLAGS_ComPlusILOnly
;
705 if (nt
.opt
.hdr32
.Magic
== IMAGE_NT_OPTIONAL_HDR32_MAGIC
&&
706 !(clr
.Flags
& COMIMAGE_FLAGS_32BITREQUIRED
))
707 mapping
->image
.image_flags
|= IMAGE_FLAGS_ComPlusNativeReady
;
710 if (!build_shared_mapping( mapping
, unix_fd
, sec
, nt
.FileHeader
.NumberOfSections
))
711 return STATUS_INVALID_FILE_FOR_SECTION
;
713 return STATUS_SUCCESS
;
716 static struct ranges
*create_ranges(void)
718 struct ranges
*ranges
= alloc_object( &ranges_ops
);
720 if (!ranges
) return NULL
;
723 if (!(ranges
->ranges
= mem_alloc( ranges
->max
* sizeof(*ranges
->ranges
) )))
725 release_object( ranges
);
731 static unsigned int get_mapping_flags( obj_handle_t handle
, unsigned int flags
)
733 switch (flags
& (SEC_IMAGE
| SEC_RESERVE
| SEC_COMMIT
| SEC_FILE
))
736 if (flags
& (SEC_WRITECOMBINE
| SEC_LARGE_PAGES
)) break;
737 if (handle
) return SEC_FILE
| SEC_IMAGE
;
738 set_error( STATUS_INVALID_FILE_FOR_SECTION
);
741 if (!handle
) return flags
;
744 if (flags
& SEC_LARGE_PAGES
) break;
745 if (handle
) return SEC_FILE
| (flags
& (SEC_NOCACHE
| SEC_WRITECOMBINE
));
748 set_error( STATUS_INVALID_PARAMETER
);
753 static struct object
*create_mapping( struct object
*root
, const struct unicode_str
*name
,
754 unsigned int attr
, mem_size_t size
, unsigned int flags
,
755 obj_handle_t handle
, unsigned int file_access
,
756 const struct security_descriptor
*sd
)
758 struct mapping
*mapping
;
764 if (!page_mask
) page_mask
= sysconf( _SC_PAGESIZE
) - 1;
766 if (!(mapping
= create_named_object( root
, &mapping_ops
, name
, attr
, sd
)))
768 if (get_error() == STATUS_OBJECT_NAME_EXISTS
)
769 return &mapping
->obj
; /* Nothing else to do */
771 mapping
->size
= size
;
773 mapping
->shared
= NULL
;
774 mapping
->committed
= NULL
;
776 if (!(mapping
->flags
= get_mapping_flags( handle
, flags
))) goto error
;
780 const unsigned int sharing
= FILE_SHARE_READ
| FILE_SHARE_WRITE
| FILE_SHARE_DELETE
;
781 unsigned int mapping_access
= FILE_MAPPING_ACCESS
;
783 if (!(file
= get_file_obj( current
->process
, handle
, file_access
))) goto error
;
784 fd
= get_obj_fd( (struct object
*)file
);
786 /* file sharing rules for mappings are different so we use magic the access rights */
787 if (flags
& SEC_IMAGE
) mapping_access
|= FILE_MAPPING_IMAGE
;
788 else if (file_access
& FILE_WRITE_DATA
) mapping_access
|= FILE_MAPPING_WRITE
;
790 if (!(mapping
->fd
= get_fd_object_for_mapping( fd
, mapping_access
, sharing
)))
792 mapping
->fd
= dup_fd_object( fd
, mapping_access
, sharing
, FILE_SYNCHRONOUS_IO_NONALERT
);
793 if (mapping
->fd
) set_fd_user( mapping
->fd
, &mapping_fd_ops
, NULL
);
795 release_object( file
);
796 release_object( fd
);
797 if (!mapping
->fd
) goto error
;
799 if ((unix_fd
= get_unix_fd( mapping
->fd
)) == -1) goto error
;
800 if (fstat( unix_fd
, &st
) == -1)
805 if (flags
& SEC_IMAGE
)
807 unsigned int err
= get_image_params( mapping
, st
.st_size
, unix_fd
);
808 if (!err
) return &mapping
->obj
;
814 if (!(mapping
->size
= st
.st_size
))
816 set_error( STATUS_MAPPED_FILE_SIZE_ZERO
);
820 else if (st
.st_size
< mapping
->size
)
822 if (!(file_access
& FILE_WRITE_DATA
))
824 set_error( STATUS_SECTION_TOO_BIG
);
827 if (!grow_file( unix_fd
, mapping
->size
)) goto error
;
830 else /* Anonymous mapping (no associated file) */
834 set_error( STATUS_INVALID_PARAMETER
);
837 if ((flags
& SEC_RESERVE
) && !(mapping
->committed
= create_ranges())) goto error
;
838 mapping
->size
= (mapping
->size
+ page_mask
) & ~((mem_size_t
)page_mask
);
839 if ((unix_fd
= create_temp_file( mapping
->size
)) == -1) goto error
;
840 if (!(mapping
->fd
= create_anonymous_fd( &mapping_fd_ops
, unix_fd
, &mapping
->obj
,
841 FILE_SYNCHRONOUS_IO_NONALERT
))) goto error
;
842 allow_fd_caching( mapping
->fd
);
844 return &mapping
->obj
;
847 release_object( mapping
);
851 struct mapping
*get_mapping_obj( struct process
*process
, obj_handle_t handle
, unsigned int access
)
853 return (struct mapping
*)get_handle_obj( process
, handle
, access
, &mapping_ops
);
856 /* open a new file for the file descriptor backing the mapping */
857 struct file
*get_mapping_file( struct process
*process
, client_ptr_t base
,
858 unsigned int access
, unsigned int sharing
)
860 struct memory_view
*view
= find_mapped_view( process
, base
);
862 if (!view
|| !view
->fd
) return NULL
;
863 return create_file_for_fd_obj( view
->fd
, access
, sharing
);
866 static void mapping_dump( struct object
*obj
, int verbose
)
868 struct mapping
*mapping
= (struct mapping
*)obj
;
869 assert( obj
->ops
== &mapping_ops
);
870 fprintf( stderr
, "Mapping size=%08x%08x flags=%08x fd=%p shared=%p\n",
871 (unsigned int)(mapping
->size
>> 32), (unsigned int)mapping
->size
,
872 mapping
->flags
, mapping
->fd
, mapping
->shared
);
875 static struct object_type
*mapping_get_type( struct object
*obj
)
877 static const WCHAR name
[] = {'S','e','c','t','i','o','n'};
878 static const struct unicode_str str
= { name
, sizeof(name
) };
879 return get_object_type( &str
);
882 static struct fd
*mapping_get_fd( struct object
*obj
)
884 struct mapping
*mapping
= (struct mapping
*)obj
;
885 return (struct fd
*)grab_object( mapping
->fd
);
888 static unsigned int mapping_map_access( struct object
*obj
, unsigned int access
)
890 if (access
& GENERIC_READ
) access
|= STANDARD_RIGHTS_READ
| SECTION_QUERY
| SECTION_MAP_READ
;
891 if (access
& GENERIC_WRITE
) access
|= STANDARD_RIGHTS_WRITE
| SECTION_MAP_WRITE
;
892 if (access
& GENERIC_EXECUTE
) access
|= STANDARD_RIGHTS_EXECUTE
| SECTION_MAP_EXECUTE
;
893 if (access
& GENERIC_ALL
) access
|= SECTION_ALL_ACCESS
;
894 return access
& ~(GENERIC_READ
| GENERIC_WRITE
| GENERIC_EXECUTE
| GENERIC_ALL
);
897 static void mapping_destroy( struct object
*obj
)
899 struct mapping
*mapping
= (struct mapping
*)obj
;
900 assert( obj
->ops
== &mapping_ops
);
901 if (mapping
->fd
) release_object( mapping
->fd
);
902 if (mapping
->committed
) release_object( mapping
->committed
);
903 if (mapping
->shared
) release_object( mapping
->shared
);
906 static enum server_fd_type
mapping_get_fd_type( struct fd
*fd
)
911 int get_page_size(void)
913 if (!page_mask
) page_mask
= sysconf( _SC_PAGESIZE
) - 1;
914 return page_mask
+ 1;
917 /* create a file mapping */
918 DECL_HANDLER(create_mapping
)
920 struct object
*root
, *obj
;
921 struct unicode_str name
;
922 const struct security_descriptor
*sd
;
923 const struct object_attributes
*objattr
= get_req_object_attributes( &sd
, &name
, &root
);
925 if (!objattr
) return;
927 if ((obj
= create_mapping( root
, &name
, objattr
->attributes
, req
->size
, req
->flags
,
928 req
->file_handle
, req
->file_access
, sd
)))
930 if (get_error() == STATUS_OBJECT_NAME_EXISTS
)
931 reply
->handle
= alloc_handle( current
->process
, obj
, req
->access
, objattr
->attributes
);
933 reply
->handle
= alloc_handle_no_access_check( current
->process
, obj
,
934 req
->access
, objattr
->attributes
);
935 release_object( obj
);
938 if (root
) release_object( root
);
941 /* open a handle to a mapping */
942 DECL_HANDLER(open_mapping
)
944 struct unicode_str name
= get_req_unicode_str();
946 reply
->handle
= open_object( current
->process
, req
->rootdir
, req
->access
,
947 &mapping_ops
, &name
, req
->attributes
);
950 /* get a mapping information */
951 DECL_HANDLER(get_mapping_info
)
953 struct mapping
*mapping
;
955 if (!(mapping
= get_mapping_obj( current
->process
, req
->handle
, req
->access
))) return;
957 reply
->size
= mapping
->size
;
958 reply
->flags
= mapping
->flags
;
960 if (mapping
->flags
& SEC_IMAGE
)
961 set_reply_data( &mapping
->image
, min( sizeof(mapping
->image
), get_reply_max_size() ));
963 if (!(req
->access
& (SECTION_MAP_READ
| SECTION_MAP_WRITE
))) /* query only */
965 release_object( mapping
);
970 reply
->shared_file
= alloc_handle( current
->process
, mapping
->shared
->file
,
971 GENERIC_READ
|GENERIC_WRITE
, 0 );
972 release_object( mapping
);
975 /* add a memory view in the current process */
976 DECL_HANDLER(map_view
)
978 struct mapping
*mapping
= NULL
;
979 struct memory_view
*view
;
981 if (!req
->size
|| (req
->base
& page_mask
) || req
->base
+ req
->size
< req
->base
) /* overflow */
983 set_error( STATUS_INVALID_PARAMETER
);
987 /* make sure we don't already have an overlapping view */
988 LIST_FOR_EACH_ENTRY( view
, ¤t
->process
->views
, struct memory_view
, entry
)
990 if (view
->base
+ view
->size
<= req
->base
) continue;
991 if (view
->base
>= req
->base
+ req
->size
) continue;
992 set_error( STATUS_INVALID_PARAMETER
);
996 if (!(mapping
= get_mapping_obj( current
->process
, req
->mapping
, req
->access
))) return;
998 if (mapping
->flags
& SEC_IMAGE
)
1000 if (req
->start
|| req
->size
> mapping
->image
.map_size
)
1002 set_error( STATUS_INVALID_PARAMETER
);
1006 else if (req
->start
>= mapping
->size
||
1007 req
->start
+ req
->size
< req
->start
||
1008 req
->start
+ req
->size
> ((mapping
->size
+ page_mask
) & ~(mem_size_t
)page_mask
))
1010 set_error( STATUS_INVALID_PARAMETER
);
1014 if ((view
= mem_alloc( sizeof(*view
) )))
1016 view
->base
= req
->base
;
1017 view
->size
= req
->size
;
1018 view
->start
= req
->start
;
1019 view
->flags
= mapping
->flags
;
1020 view
->fd
= !is_fd_removable( mapping
->fd
) ? (struct fd
*)grab_object( mapping
->fd
) : NULL
;
1021 view
->committed
= mapping
->committed
? (struct ranges
*)grab_object( mapping
->committed
) : NULL
;
1022 view
->shared
= mapping
->shared
? (struct shared_map
*)grab_object( mapping
->shared
) : NULL
;
1023 list_add_tail( ¤t
->process
->views
, &view
->entry
);
1027 release_object( mapping
);
1030 /* unmap a memory view from the current process */
1031 DECL_HANDLER(unmap_view
)
1033 struct memory_view
*view
= find_mapped_view( current
->process
, req
->base
);
1035 if (view
) free_memory_view( view
);
1038 /* get a range of committed pages in a file mapping */
1039 DECL_HANDLER(get_mapping_committed_range
)
1041 struct memory_view
*view
= find_mapped_view( current
->process
, req
->base
);
1043 if (view
) reply
->committed
= find_committed_range( view
, req
->offset
, &reply
->size
);
1046 /* add a range to the committed pages in a file mapping */
1047 DECL_HANDLER(add_mapping_committed_range
)
1049 struct memory_view
*view
= find_mapped_view( current
->process
, req
->base
);
1051 if (view
) add_committed_range( view
, req
->offset
, req
->offset
+ req
->size
);
1054 /* check if two memory maps are for the same file */
1055 DECL_HANDLER(is_same_mapping
)
1057 struct memory_view
*view1
= find_mapped_view( current
->process
, req
->base1
);
1058 struct memory_view
*view2
= find_mapped_view( current
->process
, req
->base2
);
1060 if (!view1
|| !view2
) return;
1061 if (!view1
->fd
|| !view2
->fd
||
1062 !(view1
->flags
& SEC_IMAGE
) || !(view2
->flags
& SEC_IMAGE
) ||
1063 !is_same_file_fd( view1
->fd
, view2
->fd
))
1064 set_error( STATUS_NOT_SAME_DEVICE
);