2 * Server-side file mapping management
4 * Copyright (C) 1999 Alexandre Julliard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2.1 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA
22 #include "wine/port.h"
29 #ifdef HAVE_SYS_MMAN_H
30 # include <sys/mman.h>
35 #define WIN32_NO_STATUS
46 /* list of memory ranges, used to store committed info */
49 struct object obj
; /* object header */
50 unsigned int count
; /* number of used ranges */
51 unsigned int max
; /* number of allocated ranges */
59 static void ranges_dump( struct object
*obj
, int verbose
);
60 static void ranges_destroy( struct object
*obj
);
62 static const struct object_ops ranges_ops
=
64 sizeof(struct ranges
), /* size */
65 ranges_dump
, /* dump */
66 no_get_type
, /* get_type */
67 no_add_queue
, /* add_queue */
68 NULL
, /* remove_queue */
71 no_signal
, /* signal */
72 no_get_fd
, /* get_fd */
73 no_map_access
, /* map_access */
74 default_get_sd
, /* get_sd */
75 default_set_sd
, /* set_sd */
76 no_lookup_name
, /* lookup_name */
77 no_link_name
, /* link_name */
78 NULL
, /* unlink_name */
79 no_open_file
, /* open_file */
80 no_close_handle
, /* close_handle */
81 ranges_destroy
/* destroy */
84 /* file backing the shared sections of a PE image mapping */
87 struct object obj
; /* object header */
88 struct fd
*fd
; /* file descriptor of the mapped PE file */
89 struct file
*file
; /* temp file holding the shared data */
90 struct list entry
; /* entry in global shared maps list */
93 static void shared_map_dump( struct object
*obj
, int verbose
);
94 static void shared_map_destroy( struct object
*obj
);
96 static const struct object_ops shared_map_ops
=
98 sizeof(struct shared_map
), /* size */
99 shared_map_dump
, /* dump */
100 no_get_type
, /* get_type */
101 no_add_queue
, /* add_queue */
102 NULL
, /* remove_queue */
104 NULL
, /* satisfied */
105 no_signal
, /* signal */
106 no_get_fd
, /* get_fd */
107 no_map_access
, /* map_access */
108 default_get_sd
, /* get_sd */
109 default_set_sd
, /* set_sd */
110 no_lookup_name
, /* lookup_name */
111 no_link_name
, /* link_name */
112 NULL
, /* unlink_name */
113 no_open_file
, /* open_file */
114 no_close_handle
, /* close_handle */
115 shared_map_destroy
/* destroy */
118 static struct list shared_map_list
= LIST_INIT( shared_map_list
);
120 /* memory view mapped in client address space */
123 struct list entry
; /* entry in per-process view list */
124 struct fd
*fd
; /* fd for mapped file */
125 struct ranges
*committed
; /* list of committed ranges in this mapping */
126 struct shared_map
*shared
; /* temp file for shared PE mapping */
127 unsigned int flags
; /* SEC_* flags */
128 client_ptr_t base
; /* view base address (in process addr space) */
129 mem_size_t size
; /* view size */
130 file_pos_t start
; /* start offset in mapping */
135 struct object obj
; /* object header */
136 mem_size_t size
; /* mapping size */
137 unsigned int flags
; /* SEC_* flags */
138 struct fd
*fd
; /* fd for mapped file */
139 enum cpu_type cpu
; /* client CPU (for PE image mapping) */
140 pe_image_info_t image
; /* image info (for PE image mapping) */
141 struct ranges
*committed
; /* list of committed ranges in this mapping */
142 struct shared_map
*shared
; /* temp file for shared PE mapping */
145 static void mapping_dump( struct object
*obj
, int verbose
);
146 static struct object_type
*mapping_get_type( struct object
*obj
);
147 static struct fd
*mapping_get_fd( struct object
*obj
);
148 static unsigned int mapping_map_access( struct object
*obj
, unsigned int access
);
149 static void mapping_destroy( struct object
*obj
);
150 static enum server_fd_type
mapping_get_fd_type( struct fd
*fd
);
152 static const struct object_ops mapping_ops
=
154 sizeof(struct mapping
), /* size */
155 mapping_dump
, /* dump */
156 mapping_get_type
, /* get_type */
157 no_add_queue
, /* add_queue */
158 NULL
, /* remove_queue */
160 NULL
, /* satisfied */
161 no_signal
, /* signal */
162 mapping_get_fd
, /* get_fd */
163 mapping_map_access
, /* map_access */
164 default_get_sd
, /* get_sd */
165 default_set_sd
, /* set_sd */
166 no_lookup_name
, /* lookup_name */
167 directory_link_name
, /* link_name */
168 default_unlink_name
, /* unlink_name */
169 no_open_file
, /* open_file */
170 fd_close_handle
, /* close_handle */
171 mapping_destroy
/* destroy */
174 static const struct fd_ops mapping_fd_ops
=
176 default_fd_get_poll_events
, /* get_poll_events */
177 default_poll_event
, /* poll_event */
178 mapping_get_fd_type
, /* get_fd_type */
179 no_fd_read
, /* read */
180 no_fd_write
, /* write */
181 no_fd_flush
, /* flush */
182 no_fd_get_volume_info
, /* get_volume_info */
183 no_fd_ioctl
, /* ioctl */
184 no_fd_queue_async
, /* queue_async */
185 default_fd_reselect_async
/* reselect_async */
188 static size_t page_mask
;
190 #define ROUND_SIZE(size) (((size) + page_mask) & ~page_mask)
193 static void ranges_dump( struct object
*obj
, int verbose
)
195 struct ranges
*ranges
= (struct ranges
*)obj
;
196 fprintf( stderr
, "Memory ranges count=%u\n", ranges
->count
);
199 static void ranges_destroy( struct object
*obj
)
201 struct ranges
*ranges
= (struct ranges
*)obj
;
202 free( ranges
->ranges
);
205 static void shared_map_dump( struct object
*obj
, int verbose
)
207 struct shared_map
*shared
= (struct shared_map
*)obj
;
208 fprintf( stderr
, "Shared mapping fd=%p file=%p\n", shared
->fd
, shared
->file
);
211 static void shared_map_destroy( struct object
*obj
)
213 struct shared_map
*shared
= (struct shared_map
*)obj
;
215 release_object( shared
->fd
);
216 release_object( shared
->file
);
217 list_remove( &shared
->entry
);
220 /* extend a file beyond the current end of file */
221 static int grow_file( int unix_fd
, file_pos_t new_size
)
223 static const char zero
;
224 off_t size
= new_size
;
226 if (sizeof(new_size
) > sizeof(size
) && size
!= new_size
)
228 set_error( STATUS_INVALID_PARAMETER
);
231 /* extend the file one byte beyond the requested size and then truncate it */
232 /* this should work around ftruncate implementations that can't extend files */
233 if (pwrite( unix_fd
, &zero
, 1, size
) != -1)
235 ftruncate( unix_fd
, size
);
242 /* check if the current directory allows exec mappings */
243 static int check_current_dir_for_exec(void)
246 char tmpfn
[] = "anonmap.XXXXXX";
247 void *ret
= MAP_FAILED
;
249 fd
= mkstemps( tmpfn
, 0 );
250 if (fd
== -1) return 0;
251 if (grow_file( fd
, 1 ))
253 ret
= mmap( NULL
, get_page_size(), PROT_READ
| PROT_EXEC
, MAP_PRIVATE
, fd
, 0 );
254 if (ret
!= MAP_FAILED
) munmap( ret
, get_page_size() );
258 return (ret
!= MAP_FAILED
);
261 /* create a temp file for anonymous mappings */
262 static int create_temp_file( file_pos_t size
)
264 static int temp_dir_fd
= -1;
265 char tmpfn
[] = "anonmap.XXXXXX";
268 if (temp_dir_fd
== -1)
270 temp_dir_fd
= server_dir_fd
;
271 if (!check_current_dir_for_exec())
273 /* the server dir is noexec, try the config dir instead */
274 fchdir( config_dir_fd
);
275 if (check_current_dir_for_exec())
276 temp_dir_fd
= config_dir_fd
;
277 else /* neither works, fall back to server dir */
278 fchdir( server_dir_fd
);
281 else if (temp_dir_fd
!= server_dir_fd
) fchdir( temp_dir_fd
);
283 fd
= mkstemps( tmpfn
, 0 );
286 if (!grow_file( fd
, size
))
293 else file_set_error();
295 if (temp_dir_fd
!= server_dir_fd
) fchdir( server_dir_fd
);
299 /* find a memory view from its base address */
300 static struct memory_view
*find_mapped_view( struct process
*process
, client_ptr_t base
)
302 struct memory_view
*view
;
304 LIST_FOR_EACH_ENTRY( view
, &process
->views
, struct memory_view
, entry
)
305 if (view
->base
== base
) return view
;
307 set_error( STATUS_NOT_MAPPED_VIEW
);
311 static void free_memory_view( struct memory_view
*view
)
313 if (view
->fd
) release_object( view
->fd
);
314 if (view
->committed
) release_object( view
->committed
);
315 if (view
->shared
) release_object( view
->shared
);
316 list_remove( &view
->entry
);
320 /* free all mapped views at process exit */
321 void free_mapped_views( struct process
*process
)
325 while ((ptr
= list_head( &process
->views
)))
326 free_memory_view( LIST_ENTRY( ptr
, struct memory_view
, entry
));
329 /* find the shared PE mapping for a given mapping */
330 static struct shared_map
*get_shared_file( struct fd
*fd
)
332 struct shared_map
*ptr
;
334 LIST_FOR_EACH_ENTRY( ptr
, &shared_map_list
, struct shared_map
, entry
)
335 if (is_same_file_fd( ptr
->fd
, fd
))
336 return (struct shared_map
*)grab_object( ptr
);
340 /* return the size of the memory mapping and file range of a given section */
341 static inline void get_section_sizes( const IMAGE_SECTION_HEADER
*sec
, size_t *map_size
,
342 off_t
*file_start
, size_t *file_size
)
344 static const unsigned int sector_align
= 0x1ff;
346 if (!sec
->Misc
.VirtualSize
) *map_size
= ROUND_SIZE( sec
->SizeOfRawData
);
347 else *map_size
= ROUND_SIZE( sec
->Misc
.VirtualSize
);
349 *file_start
= sec
->PointerToRawData
& ~sector_align
;
350 *file_size
= (sec
->SizeOfRawData
+ (sec
->PointerToRawData
& sector_align
) + sector_align
) & ~sector_align
;
351 if (*file_size
> *map_size
) *file_size
= *map_size
;
354 /* add a range to the committed list */
355 static void add_committed_range( struct memory_view
*view
, file_pos_t start
, file_pos_t end
)
358 struct ranges
*committed
= view
->committed
;
359 struct range
*ranges
;
361 if ((start
& page_mask
) || (end
& page_mask
) ||
362 start
>= view
->size
|| end
>= view
->size
||
365 set_error( STATUS_INVALID_PARAMETER
);
369 if (!committed
) return; /* everything committed already */
371 start
+= view
->start
;
374 for (i
= 0, ranges
= committed
->ranges
; i
< committed
->count
; i
++)
376 if (ranges
[i
].start
> end
) break;
377 if (ranges
[i
].end
< start
) continue;
378 if (ranges
[i
].start
> start
) ranges
[i
].start
= start
; /* extend downwards */
379 if (ranges
[i
].end
< end
) /* extend upwards and maybe merge with next */
381 for (j
= i
+ 1; j
< committed
->count
; j
++)
383 if (ranges
[j
].start
> end
) break;
384 if (ranges
[j
].end
> end
) end
= ranges
[j
].end
;
388 memmove( &ranges
[i
+ 1], &ranges
[j
], (committed
->count
- j
) * sizeof(*ranges
) );
389 committed
->count
-= j
- (i
+ 1);
396 /* now add a new range */
398 if (committed
->count
== committed
->max
)
400 unsigned int new_size
= committed
->max
* 2;
401 struct range
*new_ptr
= realloc( committed
->ranges
, new_size
* sizeof(*new_ptr
) );
402 if (!new_ptr
) return;
403 committed
->max
= new_size
;
404 committed
->ranges
= new_ptr
;
406 memmove( &ranges
[i
+ 1], &ranges
[i
], (committed
->count
- i
) * sizeof(*ranges
) );
407 ranges
[i
].start
= start
;
412 /* find the range containing start and return whether it's committed */
413 static int find_committed_range( struct memory_view
*view
, file_pos_t start
, mem_size_t
*size
)
416 struct ranges
*committed
= view
->committed
;
417 struct range
*ranges
;
419 if ((start
& page_mask
) || start
>= view
->size
)
421 set_error( STATUS_INVALID_PARAMETER
);
424 if (!committed
) /* everything is committed */
426 *size
= view
->size
- start
;
429 for (i
= 0, ranges
= committed
->ranges
; i
< committed
->count
; i
++)
431 if (ranges
[i
].start
> view
->start
+ start
)
433 *size
= min( ranges
[i
].start
, view
->start
+ view
->size
) - (view
->start
+ start
);
436 if (ranges
[i
].end
> view
->start
+ start
)
438 *size
= min( ranges
[i
].end
, view
->start
+ view
->size
) - (view
->start
+ start
);
442 *size
= view
->size
- start
;
446 /* allocate and fill the temp file for a shared PE image mapping */
447 static int build_shared_mapping( struct mapping
*mapping
, int fd
,
448 IMAGE_SECTION_HEADER
*sec
, unsigned int nb_sec
)
450 struct shared_map
*shared
;
453 mem_size_t total_size
;
454 size_t file_size
, map_size
, max_size
;
455 off_t shared_pos
, read_pos
, write_pos
;
460 /* compute the total size of the shared mapping */
462 total_size
= max_size
= 0;
463 for (i
= 0; i
< nb_sec
; i
++)
465 if ((sec
[i
].Characteristics
& IMAGE_SCN_MEM_SHARED
) &&
466 (sec
[i
].Characteristics
& IMAGE_SCN_MEM_WRITE
))
468 get_section_sizes( &sec
[i
], &map_size
, &read_pos
, &file_size
);
469 if (file_size
> max_size
) max_size
= file_size
;
470 total_size
+= map_size
;
473 if (!total_size
) return 1; /* nothing to do */
475 if ((mapping
->shared
= get_shared_file( mapping
->fd
))) return 1;
477 /* create a temp file for the mapping */
479 if ((shared_fd
= create_temp_file( total_size
)) == -1) return 0;
480 if (!(file
= create_file_for_fd( shared_fd
, FILE_GENERIC_READ
|FILE_GENERIC_WRITE
, 0 ))) return 0;
482 if (!(buffer
= malloc( max_size
))) goto error
;
484 /* copy the shared sections data into the temp file */
487 for (i
= 0; i
< nb_sec
; i
++)
489 if (!(sec
[i
].Characteristics
& IMAGE_SCN_MEM_SHARED
)) continue;
490 if (!(sec
[i
].Characteristics
& IMAGE_SCN_MEM_WRITE
)) continue;
491 get_section_sizes( &sec
[i
], &map_size
, &read_pos
, &file_size
);
492 write_pos
= shared_pos
;
493 shared_pos
+= map_size
;
494 if (!sec
[i
].PointerToRawData
|| !file_size
) continue;
498 long res
= pread( fd
, buffer
+ file_size
- toread
, toread
, read_pos
);
499 if (!res
&& toread
< 0x200) /* partial sector at EOF is not an error */
504 if (res
<= 0) goto error
;
508 if (pwrite( shared_fd
, buffer
, file_size
, write_pos
) != file_size
) goto error
;
511 if (!(shared
= alloc_object( &shared_map_ops
))) goto error
;
512 shared
->fd
= (struct fd
*)grab_object( mapping
->fd
);
514 list_add_head( &shared_map_list
, &shared
->entry
);
515 mapping
->shared
= shared
;
520 release_object( file
);
525 /* retrieve the mapping parameters for an executable (PE) image */
526 static unsigned int get_image_params( struct mapping
*mapping
, file_pos_t file_size
, int unix_fd
)
528 IMAGE_DOS_HEADER dos
;
529 IMAGE_SECTION_HEADER
*sec
= NULL
;
533 IMAGE_FILE_HEADER FileHeader
;
536 IMAGE_OPTIONAL_HEADER32 hdr32
;
537 IMAGE_OPTIONAL_HEADER64 hdr64
;
543 /* load the headers */
545 if (!file_size
) return STATUS_INVALID_FILE_FOR_SECTION
;
546 if (pread( unix_fd
, &dos
, sizeof(dos
), 0 ) != sizeof(dos
)) return STATUS_INVALID_IMAGE_NOT_MZ
;
547 if (dos
.e_magic
!= IMAGE_DOS_SIGNATURE
) return STATUS_INVALID_IMAGE_NOT_MZ
;
550 size
= pread( unix_fd
, &nt
, sizeof(nt
), pos
);
551 if (size
< sizeof(nt
.Signature
) + sizeof(nt
.FileHeader
)) return STATUS_INVALID_IMAGE_FORMAT
;
552 /* zero out Optional header in the case it's not present or partial */
553 size
= min( size
, sizeof(nt
.Signature
) + sizeof(nt
.FileHeader
) + nt
.FileHeader
.SizeOfOptionalHeader
);
554 if (size
< sizeof(nt
)) memset( (char *)&nt
+ size
, 0, sizeof(nt
) - size
);
555 if (nt
.Signature
!= IMAGE_NT_SIGNATURE
)
557 if (*(WORD
*)&nt
.Signature
== IMAGE_OS2_SIGNATURE
) return STATUS_INVALID_IMAGE_NE_FORMAT
;
558 return STATUS_INVALID_IMAGE_PROTECT
;
561 mapping
->cpu
= current
->process
->cpu
;
562 switch (mapping
->cpu
)
565 if (nt
.FileHeader
.Machine
!= IMAGE_FILE_MACHINE_I386
) return STATUS_INVALID_IMAGE_FORMAT
;
566 if (nt
.opt
.hdr32
.Magic
!= IMAGE_NT_OPTIONAL_HDR32_MAGIC
) return STATUS_INVALID_IMAGE_FORMAT
;
569 if (nt
.FileHeader
.Machine
!= IMAGE_FILE_MACHINE_AMD64
) return STATUS_INVALID_IMAGE_FORMAT
;
570 if (nt
.opt
.hdr64
.Magic
!= IMAGE_NT_OPTIONAL_HDR64_MAGIC
) return STATUS_INVALID_IMAGE_FORMAT
;
573 if (nt
.FileHeader
.Machine
!= IMAGE_FILE_MACHINE_POWERPC
) return STATUS_INVALID_IMAGE_FORMAT
;
574 if (nt
.opt
.hdr32
.Magic
!= IMAGE_NT_OPTIONAL_HDR32_MAGIC
) return STATUS_INVALID_IMAGE_FORMAT
;
577 if (nt
.FileHeader
.Machine
!= IMAGE_FILE_MACHINE_ARM
&&
578 nt
.FileHeader
.Machine
!= IMAGE_FILE_MACHINE_THUMB
&&
579 nt
.FileHeader
.Machine
!= IMAGE_FILE_MACHINE_ARMNT
) return STATUS_INVALID_IMAGE_FORMAT
;
580 if (nt
.opt
.hdr32
.Magic
!= IMAGE_NT_OPTIONAL_HDR32_MAGIC
) return STATUS_INVALID_IMAGE_FORMAT
;
583 if (nt
.FileHeader
.Machine
!= IMAGE_FILE_MACHINE_ARM64
) return STATUS_INVALID_IMAGE_FORMAT
;
584 if (nt
.opt
.hdr64
.Magic
!= IMAGE_NT_OPTIONAL_HDR64_MAGIC
) return STATUS_INVALID_IMAGE_FORMAT
;
587 return STATUS_INVALID_IMAGE_FORMAT
;
590 switch (nt
.opt
.hdr32
.Magic
)
592 case IMAGE_NT_OPTIONAL_HDR32_MAGIC
:
593 mapping
->image
.base
= nt
.opt
.hdr32
.ImageBase
;
594 mapping
->image
.entry_point
= nt
.opt
.hdr32
.ImageBase
+ nt
.opt
.hdr32
.AddressOfEntryPoint
;
595 mapping
->image
.map_size
= ROUND_SIZE( nt
.opt
.hdr32
.SizeOfImage
);
596 mapping
->image
.stack_size
= nt
.opt
.hdr32
.SizeOfStackReserve
;
597 mapping
->image
.stack_commit
= nt
.opt
.hdr32
.SizeOfStackCommit
;
598 mapping
->image
.subsystem
= nt
.opt
.hdr32
.Subsystem
;
599 mapping
->image
.subsystem_low
= nt
.opt
.hdr32
.MinorSubsystemVersion
;
600 mapping
->image
.subsystem_high
= nt
.opt
.hdr32
.MajorSubsystemVersion
;
601 mapping
->image
.dll_charact
= nt
.opt
.hdr32
.DllCharacteristics
;
602 mapping
->image
.loader_flags
= nt
.opt
.hdr32
.LoaderFlags
;
603 mapping
->image
.header_size
= nt
.opt
.hdr32
.SizeOfHeaders
;
604 mapping
->image
.checksum
= nt
.opt
.hdr32
.CheckSum
;
606 case IMAGE_NT_OPTIONAL_HDR64_MAGIC
:
607 mapping
->image
.base
= nt
.opt
.hdr64
.ImageBase
;
608 mapping
->image
.entry_point
= nt
.opt
.hdr64
.ImageBase
+ nt
.opt
.hdr64
.AddressOfEntryPoint
;
609 mapping
->image
.map_size
= ROUND_SIZE( nt
.opt
.hdr64
.SizeOfImage
);
610 mapping
->image
.stack_size
= nt
.opt
.hdr64
.SizeOfStackReserve
;
611 mapping
->image
.stack_commit
= nt
.opt
.hdr64
.SizeOfStackCommit
;
612 mapping
->image
.subsystem
= nt
.opt
.hdr64
.Subsystem
;
613 mapping
->image
.subsystem_low
= nt
.opt
.hdr64
.MinorSubsystemVersion
;
614 mapping
->image
.subsystem_high
= nt
.opt
.hdr64
.MajorSubsystemVersion
;
615 mapping
->image
.dll_charact
= nt
.opt
.hdr64
.DllCharacteristics
;
616 mapping
->image
.loader_flags
= nt
.opt
.hdr64
.LoaderFlags
;
617 mapping
->image
.header_size
= nt
.opt
.hdr64
.SizeOfHeaders
;
618 mapping
->image
.checksum
= nt
.opt
.hdr64
.CheckSum
;
621 mapping
->image
.image_charact
= nt
.FileHeader
.Characteristics
;
622 mapping
->image
.machine
= nt
.FileHeader
.Machine
;
623 mapping
->image
.zerobits
= 0; /* FIXME */
624 mapping
->image
.gp
= 0; /* FIXME */
625 mapping
->image
.contains_code
= 0; /* FIXME */
626 mapping
->image
.image_flags
= 0; /* FIXME */
627 mapping
->image
.file_size
= file_size
;
629 /* load the section headers */
631 pos
+= sizeof(nt
.Signature
) + sizeof(nt
.FileHeader
) + nt
.FileHeader
.SizeOfOptionalHeader
;
632 size
= sizeof(*sec
) * nt
.FileHeader
.NumberOfSections
;
633 if (!mapping
->size
) mapping
->size
= mapping
->image
.map_size
;
634 else if (mapping
->size
> mapping
->image
.map_size
) return STATUS_SECTION_TOO_BIG
;
635 if (pos
+ size
> mapping
->image
.map_size
) return STATUS_INVALID_FILE_FOR_SECTION
;
636 if (pos
+ size
> mapping
->image
.header_size
) mapping
->image
.header_size
= pos
+ size
;
637 if (!(sec
= malloc( size
))) goto error
;
638 if (pread( unix_fd
, sec
, size
, pos
) != size
) goto error
;
640 if (!build_shared_mapping( mapping
, unix_fd
, sec
, nt
.FileHeader
.NumberOfSections
)) goto error
;
647 return STATUS_INVALID_FILE_FOR_SECTION
;
650 static struct ranges
*create_ranges(void)
652 struct ranges
*ranges
= alloc_object( &ranges_ops
);
654 if (!ranges
) return NULL
;
657 if (!(ranges
->ranges
= mem_alloc( ranges
->max
* sizeof(*ranges
->ranges
) )))
659 release_object( ranges
);
665 static unsigned int get_mapping_flags( obj_handle_t handle
, unsigned int flags
)
667 switch (flags
& (SEC_IMAGE
| SEC_RESERVE
| SEC_COMMIT
| SEC_FILE
))
670 if (flags
& (SEC_WRITECOMBINE
| SEC_LARGE_PAGES
)) break;
671 if (handle
) return SEC_FILE
| SEC_IMAGE
;
672 set_error( STATUS_INVALID_FILE_FOR_SECTION
);
675 if (!handle
) return flags
;
678 if (flags
& SEC_LARGE_PAGES
) break;
679 if (handle
) return SEC_FILE
| (flags
& (SEC_NOCACHE
| SEC_WRITECOMBINE
));
682 set_error( STATUS_INVALID_PARAMETER
);
687 static struct object
*create_mapping( struct object
*root
, const struct unicode_str
*name
,
688 unsigned int attr
, mem_size_t size
, unsigned int flags
,
689 obj_handle_t handle
, unsigned int file_access
,
690 const struct security_descriptor
*sd
)
692 struct mapping
*mapping
;
698 if (!page_mask
) page_mask
= sysconf( _SC_PAGESIZE
) - 1;
700 if (!(mapping
= create_named_object( root
, &mapping_ops
, name
, attr
, sd
)))
702 if (get_error() == STATUS_OBJECT_NAME_EXISTS
)
703 return &mapping
->obj
; /* Nothing else to do */
705 mapping
->size
= size
;
707 mapping
->shared
= NULL
;
708 mapping
->committed
= NULL
;
710 if (!(mapping
->flags
= get_mapping_flags( handle
, flags
))) goto error
;
714 const unsigned int sharing
= FILE_SHARE_READ
| FILE_SHARE_WRITE
| FILE_SHARE_DELETE
;
715 unsigned int mapping_access
= FILE_MAPPING_ACCESS
;
717 if (!(file
= get_file_obj( current
->process
, handle
, file_access
))) goto error
;
718 fd
= get_obj_fd( (struct object
*)file
);
720 /* file sharing rules for mappings are different so we use magic the access rights */
721 if (flags
& SEC_IMAGE
) mapping_access
|= FILE_MAPPING_IMAGE
;
722 else if (file_access
& FILE_WRITE_DATA
) mapping_access
|= FILE_MAPPING_WRITE
;
724 if (!(mapping
->fd
= get_fd_object_for_mapping( fd
, mapping_access
, sharing
)))
726 mapping
->fd
= dup_fd_object( fd
, mapping_access
, sharing
, FILE_SYNCHRONOUS_IO_NONALERT
);
727 if (mapping
->fd
) set_fd_user( mapping
->fd
, &mapping_fd_ops
, NULL
);
729 release_object( file
);
730 release_object( fd
);
731 if (!mapping
->fd
) goto error
;
733 if ((unix_fd
= get_unix_fd( mapping
->fd
)) == -1) goto error
;
734 if (fstat( unix_fd
, &st
) == -1)
739 if (flags
& SEC_IMAGE
)
741 unsigned int err
= get_image_params( mapping
, st
.st_size
, unix_fd
);
742 if (!err
) return &mapping
->obj
;
748 if (!(mapping
->size
= st
.st_size
))
750 set_error( STATUS_MAPPED_FILE_SIZE_ZERO
);
754 else if (st
.st_size
< mapping
->size
)
756 if (!(file_access
& FILE_WRITE_DATA
))
758 set_error( STATUS_SECTION_TOO_BIG
);
761 if (!grow_file( unix_fd
, mapping
->size
)) goto error
;
764 else /* Anonymous mapping (no associated file) */
768 set_error( STATUS_INVALID_PARAMETER
);
771 if ((flags
& SEC_RESERVE
) && !(mapping
->committed
= create_ranges())) goto error
;
772 mapping
->size
= (mapping
->size
+ page_mask
) & ~((mem_size_t
)page_mask
);
773 if ((unix_fd
= create_temp_file( mapping
->size
)) == -1) goto error
;
774 if (!(mapping
->fd
= create_anonymous_fd( &mapping_fd_ops
, unix_fd
, &mapping
->obj
,
775 FILE_SYNCHRONOUS_IO_NONALERT
))) goto error
;
776 allow_fd_caching( mapping
->fd
);
778 return &mapping
->obj
;
781 release_object( mapping
);
785 struct mapping
*get_mapping_obj( struct process
*process
, obj_handle_t handle
, unsigned int access
)
787 return (struct mapping
*)get_handle_obj( process
, handle
, access
, &mapping_ops
);
790 /* open a new file for the file descriptor backing the mapping */
791 struct file
*get_mapping_file( struct process
*process
, client_ptr_t base
,
792 unsigned int access
, unsigned int sharing
)
794 struct memory_view
*view
= find_mapped_view( process
, base
);
796 if (!view
|| !view
->fd
) return NULL
;
797 return create_file_for_fd_obj( view
->fd
, access
, sharing
);
800 static void mapping_dump( struct object
*obj
, int verbose
)
802 struct mapping
*mapping
= (struct mapping
*)obj
;
803 assert( obj
->ops
== &mapping_ops
);
804 fprintf( stderr
, "Mapping size=%08x%08x flags=%08x fd=%p shared=%p\n",
805 (unsigned int)(mapping
->size
>> 32), (unsigned int)mapping
->size
,
806 mapping
->flags
, mapping
->fd
, mapping
->shared
);
809 static struct object_type
*mapping_get_type( struct object
*obj
)
811 static const WCHAR name
[] = {'S','e','c','t','i','o','n'};
812 static const struct unicode_str str
= { name
, sizeof(name
) };
813 return get_object_type( &str
);
816 static struct fd
*mapping_get_fd( struct object
*obj
)
818 struct mapping
*mapping
= (struct mapping
*)obj
;
819 return (struct fd
*)grab_object( mapping
->fd
);
822 static unsigned int mapping_map_access( struct object
*obj
, unsigned int access
)
824 if (access
& GENERIC_READ
) access
|= STANDARD_RIGHTS_READ
| SECTION_QUERY
| SECTION_MAP_READ
;
825 if (access
& GENERIC_WRITE
) access
|= STANDARD_RIGHTS_WRITE
| SECTION_MAP_WRITE
;
826 if (access
& GENERIC_EXECUTE
) access
|= STANDARD_RIGHTS_EXECUTE
| SECTION_MAP_EXECUTE
;
827 if (access
& GENERIC_ALL
) access
|= SECTION_ALL_ACCESS
;
828 return access
& ~(GENERIC_READ
| GENERIC_WRITE
| GENERIC_EXECUTE
| GENERIC_ALL
);
831 static void mapping_destroy( struct object
*obj
)
833 struct mapping
*mapping
= (struct mapping
*)obj
;
834 assert( obj
->ops
== &mapping_ops
);
835 if (mapping
->fd
) release_object( mapping
->fd
);
836 if (mapping
->committed
) release_object( mapping
->committed
);
837 if (mapping
->shared
) release_object( mapping
->shared
);
840 static enum server_fd_type
mapping_get_fd_type( struct fd
*fd
)
845 int get_page_size(void)
847 if (!page_mask
) page_mask
= sysconf( _SC_PAGESIZE
) - 1;
848 return page_mask
+ 1;
851 /* create a file mapping */
852 DECL_HANDLER(create_mapping
)
854 struct object
*root
, *obj
;
855 struct unicode_str name
;
856 const struct security_descriptor
*sd
;
857 const struct object_attributes
*objattr
= get_req_object_attributes( &sd
, &name
, &root
);
859 if (!objattr
) return;
861 if ((obj
= create_mapping( root
, &name
, objattr
->attributes
, req
->size
, req
->flags
,
862 req
->file_handle
, req
->file_access
, sd
)))
864 if (get_error() == STATUS_OBJECT_NAME_EXISTS
)
865 reply
->handle
= alloc_handle( current
->process
, obj
, req
->access
, objattr
->attributes
);
867 reply
->handle
= alloc_handle_no_access_check( current
->process
, obj
,
868 req
->access
, objattr
->attributes
);
869 release_object( obj
);
872 if (root
) release_object( root
);
875 /* open a handle to a mapping */
876 DECL_HANDLER(open_mapping
)
878 struct unicode_str name
= get_req_unicode_str();
880 reply
->handle
= open_object( current
->process
, req
->rootdir
, req
->access
,
881 &mapping_ops
, &name
, req
->attributes
);
884 /* get a mapping information */
885 DECL_HANDLER(get_mapping_info
)
887 struct mapping
*mapping
;
889 if (!(mapping
= get_mapping_obj( current
->process
, req
->handle
, req
->access
))) return;
891 reply
->size
= mapping
->size
;
892 reply
->flags
= mapping
->flags
;
894 if (mapping
->flags
& SEC_IMAGE
)
895 set_reply_data( &mapping
->image
, min( sizeof(mapping
->image
), get_reply_max_size() ));
897 if (!(req
->access
& (SECTION_MAP_READ
| SECTION_MAP_WRITE
))) /* query only */
899 release_object( mapping
);
903 if ((mapping
->flags
& SEC_IMAGE
) && mapping
->cpu
!= current
->process
->cpu
)
905 set_error( STATUS_INVALID_IMAGE_FORMAT
);
906 release_object( mapping
);
911 reply
->shared_file
= alloc_handle( current
->process
, mapping
->shared
->file
,
912 GENERIC_READ
|GENERIC_WRITE
, 0 );
913 release_object( mapping
);
916 /* add a memory view in the current process */
917 DECL_HANDLER(map_view
)
919 struct mapping
*mapping
= NULL
;
920 struct memory_view
*view
;
922 if (!req
->size
|| (req
->base
& page_mask
) || req
->base
+ req
->size
< req
->base
) /* overflow */
924 set_error( STATUS_INVALID_PARAMETER
);
928 /* make sure we don't already have an overlapping view */
929 LIST_FOR_EACH_ENTRY( view
, ¤t
->process
->views
, struct memory_view
, entry
)
931 if (view
->base
+ view
->size
<= req
->base
) continue;
932 if (view
->base
>= req
->base
+ req
->size
) continue;
933 set_error( STATUS_INVALID_PARAMETER
);
937 if (!(mapping
= get_mapping_obj( current
->process
, req
->mapping
, req
->access
))) return;
939 if (mapping
->flags
& SEC_IMAGE
)
941 if (req
->start
|| req
->size
> mapping
->image
.map_size
)
943 set_error( STATUS_INVALID_PARAMETER
);
947 else if (req
->start
>= mapping
->size
||
948 req
->start
+ req
->size
< req
->start
||
949 req
->start
+ req
->size
> ((mapping
->size
+ page_mask
) & ~(mem_size_t
)page_mask
))
951 set_error( STATUS_INVALID_PARAMETER
);
955 if ((view
= mem_alloc( sizeof(*view
) )))
957 view
->base
= req
->base
;
958 view
->size
= req
->size
;
959 view
->start
= req
->start
;
960 view
->flags
= mapping
->flags
;
961 view
->fd
= !is_fd_removable( mapping
->fd
) ? (struct fd
*)grab_object( mapping
->fd
) : NULL
;
962 view
->committed
= mapping
->committed
? (struct ranges
*)grab_object( mapping
->committed
) : NULL
;
963 view
->shared
= mapping
->shared
? (struct shared_map
*)grab_object( mapping
->shared
) : NULL
;
964 list_add_tail( ¤t
->process
->views
, &view
->entry
);
968 release_object( mapping
);
971 /* unmap a memory view from the current process */
972 DECL_HANDLER(unmap_view
)
974 struct memory_view
*view
= find_mapped_view( current
->process
, req
->base
);
976 if (view
) free_memory_view( view
);
979 /* get a range of committed pages in a file mapping */
980 DECL_HANDLER(get_mapping_committed_range
)
982 struct memory_view
*view
= find_mapped_view( current
->process
, req
->base
);
984 if (view
) reply
->committed
= find_committed_range( view
, req
->offset
, &reply
->size
);
987 /* add a range to the committed pages in a file mapping */
988 DECL_HANDLER(add_mapping_committed_range
)
990 struct memory_view
*view
= find_mapped_view( current
->process
, req
->base
);
992 if (view
) add_committed_range( view
, req
->offset
, req
->offset
+ req
->size
);
995 /* check if two memory maps are for the same file */
996 DECL_HANDLER(is_same_mapping
)
998 struct memory_view
*view1
= find_mapped_view( current
->process
, req
->base1
);
999 struct memory_view
*view2
= find_mapped_view( current
->process
, req
->base2
);
1001 if (!view1
|| !view2
) return;
1002 if (!view1
->fd
|| !view2
->fd
||
1003 !(view1
->flags
& SEC_IMAGE
) || !(view2
->flags
& SEC_IMAGE
) ||
1004 !is_same_file_fd( view1
->fd
, view2
->fd
))
1005 set_error( STATUS_NOT_SAME_DEVICE
);