4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
22 * Copyright 2009 Sun Microsystems, Inc. All rights reserved.
23 * Use is subject to license terms.
24 * Copyright 2014 Joyent, Inc. All rights reserved.
27 #include <sys/types.h>
28 #include <sys/sysmacros.h>
30 #include <sys/param.h>
31 #include <sys/systm.h>
32 #include <sys/errno.h>
34 #include <sys/cmn_err.h>
36 #include <sys/vmsystm.h>
37 #include <sys/machsystm.h>
38 #include <sys/debug.h>
41 #include <sys/vmparam.h>
44 #include <sys/machelf.h>
45 #include <sys/corectl.h>
47 #include <sys/exechdr.h>
48 #include <sys/autoconf.h>
50 #include <vm/seg_dev.h>
51 #include <sys/vmparam.h>
52 #include <sys/mmapobj.h>
53 #include <sys/atomic.h>
58 * The main driving force behind mmapobj is to interpret and map ELF files
59 * inside of the kernel instead of having the linker be responsible for this.
61 * mmapobj also supports the AOUT 4.x binary format as well as flat files in
64 * When interpreting and mapping an ELF file, mmapobj will map each PT_LOAD
65 * or PT_SUNWBSS segment according to the ELF standard. Refer to the "Linker
66 * and Libraries Guide" for more information about the standard and mapping
69 * Having mmapobj interpret and map objects will allow the kernel to make the
70 * best decision for where to place the mappings for said objects. Thus, we
71 * can make optimizations inside of the kernel for specific platforms or cache
72 * mapping information to make mapping objects faster. The cache is ignored
75 * The lib_va_hash will be one such optimization. For each ELF object that
76 * mmapobj is asked to interpret, we will attempt to cache the information
77 * about the PT_LOAD and PT_SUNWBSS sections to speed up future mappings of
78 * the same objects. We will cache up to LIBVA_CACHED_SEGS (see below) program
79 * headers which should cover a majority of the libraries out there without
80 * wasting space. In order to make sure that the cached information is valid,
81 * we check the passed in vnode's mtime and ctime to make sure the vnode
82 * has not been modified since the last time we used it.
84 * In addition, the lib_va_hash may contain a preferred starting VA for the
85 * object which can be useful for platforms which support a shared context.
86 * This will increase the likelyhood that library text can be shared among
87 * many different processes. We limit the reserved VA space for 32 bit objects
88 * in order to minimize fragmenting the processes address space.
90 * In addition to the above, the mmapobj interface allows for padding to be
91 * requested before the first mapping and after the last mapping created.
92 * When padding is requested, no additional optimizations will be made for
97 * Threshold to prevent allocating too much kernel memory to read in the
98 * program headers for an object. If it requires more than below,
99 * we will use a KM_NOSLEEP allocation to allocate memory to hold all of the
100 * program headers which could possibly fail. If less memory than below is
101 * needed, then we use a KM_SLEEP allocation and are willing to wait for the
102 * memory if we need to.
104 size_t mmapobj_alloc_threshold
= 65536;
106 /* Debug stats for test coverage */
109 uint_t mobjs_unmap_called
;
110 uint_t mobjs_remap_devnull
;
111 uint_t mobjs_lookup_start
;
112 uint_t mobjs_alloc_start
;
113 uint_t mobjs_alloc_vmem
;
114 uint_t mobjs_add_collision
;
115 uint_t mobjs_get_addr
;
116 uint_t mobjs_map_flat_no_padding
;
117 uint_t mobjs_map_flat_padding
;
118 uint_t mobjs_map_ptload_text
;
119 uint_t mobjs_map_ptload_initdata
;
120 uint_t mobjs_map_ptload_preread
;
121 uint_t mobjs_map_ptload_unaligned_text
;
122 uint_t mobjs_map_ptload_unaligned_map_fail
;
123 uint_t mobjs_map_ptload_unaligned_read_fail
;
124 uint_t mobjs_zfoddiff
;
125 uint_t mobjs_zfoddiff_nowrite
;
126 uint_t mobjs_zfodextra
;
127 uint_t mobjs_ptload_failed
;
128 uint_t mobjs_map_elf_no_holes
;
129 uint_t mobjs_unmap_hole
;
130 uint_t mobjs_nomem_header
;
131 uint_t mobjs_inval_header
;
132 uint_t mobjs_overlap_header
;
133 uint_t mobjs_np2_align
;
134 uint_t mobjs_np2_align_overflow
;
135 uint_t mobjs_exec_padding
;
136 uint_t mobjs_exec_addr_mapped
;
137 uint_t mobjs_exec_addr_devnull
;
138 uint_t mobjs_exec_addr_in_use
;
139 uint_t mobjs_lvp_found
;
140 uint_t mobjs_no_loadable_yet
;
141 uint_t mobjs_nothing_to_map
;
143 uint_t mobjs_dyn_pad_align
;
144 uint_t mobjs_dyn_pad_noalign
;
145 uint_t mobjs_alloc_start_fail
;
146 uint_t mobjs_lvp_nocache
;
147 uint_t mobjs_extra_padding
;
148 uint_t mobjs_lvp_not_needed
;
149 uint_t mobjs_no_mem_map_sz
;
150 uint_t mobjs_check_exec_failed
;
151 uint_t mobjs_lvp_used
;
152 uint_t mobjs_wrong_model
;
153 uint_t mobjs_noexec_fs
;
154 uint_t mobjs_e2big_et_rel
;
155 uint_t mobjs_et_rel_mapped
;
156 uint_t mobjs_unknown_elf_type
;
157 uint_t mobjs_phent32_too_small
;
158 uint_t mobjs_phent64_too_small
;
159 uint_t mobjs_inval_elf_class
;
160 uint_t mobjs_too_many_phdrs
;
161 uint_t mobjs_no_phsize
;
162 uint_t mobjs_phsize_large
;
163 uint_t mobjs_phsize_xtralarge
;
164 uint_t mobjs_fast_wrong_model
;
165 uint_t mobjs_fast_e2big
;
167 uint_t mobjs_fast_success
;
168 uint_t mobjs_fast_not_now
;
169 uint_t mobjs_small_file
;
170 uint_t mobjs_read_error
;
171 uint_t mobjs_unsupported
;
172 uint_t mobjs_flat_e2big
;
173 uint_t mobjs_phent_align32
;
174 uint_t mobjs_phent_align64
;
175 uint_t mobjs_lib_va_find_hit
;
176 uint_t mobjs_lib_va_find_delay_delete
;
177 uint_t mobjs_lib_va_find_delete
;
178 uint_t mobjs_lib_va_add_delay_delete
;
179 uint_t mobjs_lib_va_add_delete
;
180 uint_t mobjs_lib_va_create_failure
;
181 uint_t mobjs_min_align
;
184 #define MOBJ_STAT_ADD(stat) ((mobj_stats.mobjs_##stat)++)
186 #define MOBJ_STAT_ADD(stat)
190 * Check if addr is at or above the address space reserved for the stack.
191 * The stack is at the top of the address space for all sparc processes
192 * and 64 bit x86 processes. For 32 bit x86, the stack is not at the top
193 * of the address space and thus this check wil always return false for
194 * 32 bit x86 processes.
197 #define OVERLAPS_STACK(addr, p) \
198 (addr >= (p->p_usrstack - ((p->p_stk_ctl + PAGEOFFSET) & PAGEMASK)))
199 #elif defined(__amd64)
200 #define OVERLAPS_STACK(addr, p) \
201 ((p->p_model == DATAMODEL_LP64) && \
202 (addr >= (p->p_usrstack - ((p->p_stk_ctl + PAGEOFFSET) & PAGEMASK))))
203 #elif defined(__i386)
204 #define OVERLAPS_STACK(addr, p) 0
207 /* lv_flags values - bitmap */
208 #define LV_ELF32 0x1 /* 32 bit ELF file */
209 #define LV_ELF64 0x2 /* 64 bit ELF file */
210 #define LV_DEL 0x4 /* delete when lv_refcnt hits zero */
213 * Note: lv_num_segs will denote how many segments this file has and will
214 * only be set after the lv_mps array has been filled out.
215 * lv_mps can only be valid if lv_num_segs is non-zero.
218 struct lib_va
*lv_next
;
219 caddr_t lv_base_va
; /* start va for library */
220 ssize_t lv_len
; /* total va span of library */
221 size_t lv_align
; /* minimum alignment */
222 uint64_t lv_nodeid
; /* filesystem node id */
223 uint64_t lv_fsid
; /* filesystem id */
224 timestruc_t lv_ctime
; /* last time file was changed */
225 timestruc_t lv_mtime
; /* or modified */
226 mmapobj_result_t lv_mps
[LIBVA_CACHED_SEGS
]; /* cached pheaders */
227 int lv_num_segs
; /* # segs for this file */
229 uint_t lv_refcnt
; /* number of holds on struct */
232 #define LIB_VA_SIZE 1024
233 #define LIB_VA_MASK (LIB_VA_SIZE - 1)
234 #define LIB_VA_MUTEX_SHIFT 3
236 #if (LIB_VA_SIZE & (LIB_VA_SIZE - 1))
237 #error "LIB_VA_SIZE is not a power of 2"
240 static struct lib_va
*lib_va_hash
[LIB_VA_SIZE
];
241 static kmutex_t lib_va_hash_mutex
[LIB_VA_SIZE
>> LIB_VA_MUTEX_SHIFT
];
243 #define LIB_VA_HASH_MUTEX(index) \
244 (&lib_va_hash_mutex[index >> LIB_VA_MUTEX_SHIFT])
246 #define LIB_VA_HASH(nodeid) \
247 (((nodeid) ^ ((nodeid) << 7) ^ ((nodeid) << 13)) & LIB_VA_MASK)
249 #define LIB_VA_MATCH_ID(arg1, arg2) \
250 ((arg1)->lv_nodeid == (arg2)->va_nodeid && \
251 (arg1)->lv_fsid == (arg2)->va_fsid)
253 #define LIB_VA_MATCH_TIME(arg1, arg2) \
254 ((arg1)->lv_ctime.tv_sec == (arg2)->va_ctime.tv_sec && \
255 (arg1)->lv_mtime.tv_sec == (arg2)->va_mtime.tv_sec && \
256 (arg1)->lv_ctime.tv_nsec == (arg2)->va_ctime.tv_nsec && \
257 (arg1)->lv_mtime.tv_nsec == (arg2)->va_mtime.tv_nsec)
259 #define LIB_VA_MATCH(arg1, arg2) \
260 (LIB_VA_MATCH_ID(arg1, arg2) && LIB_VA_MATCH_TIME(arg1, arg2))
263 * lib_va will be used for optimized allocation of address ranges for
264 * libraries, such that subsequent mappings of the same library will attempt
265 * to use the same VA as previous mappings of that library.
266 * In order to map libraries at the same VA in many processes, we need to carve
267 * out our own address space for them which is unique across many processes.
268 * We use different arenas for 32 bit and 64 bit libraries.
270 * Since the 32 bit address space is relatively small, we limit the number of
271 * libraries which try to use consistent virtual addresses to lib_threshold.
272 * For 64 bit libraries there is no such limit since the address space is large.
274 static vmem_t
*lib_va_32_arena
;
275 static vmem_t
*lib_va_64_arena
;
276 uint_t lib_threshold
= 20; /* modifiable via /etc/system */
278 static kmutex_t lib_va_init_mutex
; /* no need to initialize */
281 * Number of 32 bit and 64 bit libraries in lib_va hash.
283 static uint_t libs_mapped_32
= 0;
284 static uint_t libs_mapped_64
= 0;
287 * Free up the resources associated with lvp as well as lvp itself.
288 * We also decrement the number of libraries mapped via a lib_va
289 * cached virtual address.
292 lib_va_free(struct lib_va
*lvp
)
294 int is_64bit
= lvp
->lv_flags
& LV_ELF64
;
295 ASSERT(lvp
->lv_refcnt
== 0);
297 if (lvp
->lv_base_va
!= NULL
) {
298 vmem_xfree(is_64bit
? lib_va_64_arena
: lib_va_32_arena
,
299 lvp
->lv_base_va
, lvp
->lv_len
);
301 atomic_dec_32(&libs_mapped_64
);
303 atomic_dec_32(&libs_mapped_32
);
306 kmem_free(lvp
, sizeof (struct lib_va
));
310 * See if the file associated with the vap passed in is in the lib_va hash.
311 * If it is and the file has not been modified since last use, then
312 * return a pointer to that data. Otherwise, return NULL if the file has
313 * changed or the file was not found in the hash.
315 static struct lib_va
*
316 lib_va_find(vattr_t
*vap
)
319 struct lib_va
*del
= NULL
;
322 index
= LIB_VA_HASH(vap
->va_nodeid
);
324 mutex_enter(LIB_VA_HASH_MUTEX(index
));
325 tmp
= &lib_va_hash
[index
];
326 while (*tmp
!= NULL
) {
328 if (LIB_VA_MATCH_ID(lvp
, vap
)) {
329 if (LIB_VA_MATCH_TIME(lvp
, vap
)) {
330 ASSERT((lvp
->lv_flags
& LV_DEL
) == 0);
332 MOBJ_STAT_ADD(lib_va_find_hit
);
335 * file was updated since last use.
336 * need to remove it from list.
342 * If we can't delete it now, mark it for later
344 if (del
->lv_refcnt
) {
345 MOBJ_STAT_ADD(lib_va_find_delay_delete
);
346 del
->lv_flags
|= LV_DEL
;
351 mutex_exit(LIB_VA_HASH_MUTEX(index
));
353 ASSERT(del
->lv_refcnt
== 0);
354 MOBJ_STAT_ADD(lib_va_find_delete
);
361 mutex_exit(LIB_VA_HASH_MUTEX(index
));
366 * Add a new entry to the lib_va hash.
367 * Search the hash while holding the appropriate mutex to make sure that the
368 * data is not already in the cache. If we find data that is in the cache
369 * already and has not been modified since last use, we return NULL. If it
370 * has been modified since last use, we will remove that entry from
371 * the hash and it will be deleted once it's reference count reaches zero.
372 * If there is no current entry in the hash we will add the new entry and
373 * return it to the caller who is responsible for calling lib_va_release to
374 * drop their reference count on it.
376 * lv_num_segs will be set to zero since the caller needs to add that
377 * information to the data structure.
379 static struct lib_va
*
380 lib_va_add_hash(caddr_t base_va
, ssize_t len
, size_t align
, vattr_t
*vap
)
386 struct lib_va
*del
= NULL
;
388 model
= get_udatamodel();
389 index
= LIB_VA_HASH(vap
->va_nodeid
);
391 lvp
= kmem_alloc(sizeof (struct lib_va
), KM_SLEEP
);
393 mutex_enter(LIB_VA_HASH_MUTEX(index
));
396 * Make sure not adding same data a second time.
397 * The hash chains should be relatively short and adding
398 * is a relatively rare event, so it's worth the check.
400 tmp
= &lib_va_hash
[index
];
401 while (*tmp
!= NULL
) {
402 if (LIB_VA_MATCH_ID(*tmp
, vap
)) {
403 if (LIB_VA_MATCH_TIME(*tmp
, vap
)) {
404 mutex_exit(LIB_VA_HASH_MUTEX(index
));
405 kmem_free(lvp
, sizeof (struct lib_va
));
410 * We have the same nodeid and fsid but the file has
411 * been modified since we last saw it.
412 * Need to remove the old node and add this new
414 * Could probably use a callback mechanism to make
423 * Check to see if we can free it. If lv_refcnt
424 * is greater than zero, than some other thread
425 * has a reference to the one we want to delete
426 * and we can not delete it. All of this is done
427 * under the lib_va_hash_mutex lock so it is atomic.
429 if (del
->lv_refcnt
) {
430 MOBJ_STAT_ADD(lib_va_add_delay_delete
);
431 del
->lv_flags
|= LV_DEL
;
434 /* tmp is already advanced */
437 tmp
= &((*tmp
)->lv_next
);
440 lvp
->lv_base_va
= base_va
;
442 lvp
->lv_align
= align
;
443 lvp
->lv_nodeid
= vap
->va_nodeid
;
444 lvp
->lv_fsid
= vap
->va_fsid
;
445 lvp
->lv_ctime
.tv_sec
= vap
->va_ctime
.tv_sec
;
446 lvp
->lv_ctime
.tv_nsec
= vap
->va_ctime
.tv_nsec
;
447 lvp
->lv_mtime
.tv_sec
= vap
->va_mtime
.tv_sec
;
448 lvp
->lv_mtime
.tv_nsec
= vap
->va_mtime
.tv_nsec
;
452 /* Caller responsible for filling this and lv_mps out */
453 lvp
->lv_num_segs
= 0;
455 if (model
== DATAMODEL_LP64
) {
456 lvp
->lv_flags
= LV_ELF64
;
458 ASSERT(model
== DATAMODEL_ILP32
);
459 lvp
->lv_flags
= LV_ELF32
;
462 if (base_va
!= NULL
) {
463 if (model
== DATAMODEL_LP64
) {
464 atomic_inc_32(&libs_mapped_64
);
466 ASSERT(model
== DATAMODEL_ILP32
);
467 atomic_inc_32(&libs_mapped_32
);
470 ASSERT(*tmp
== NULL
);
472 mutex_exit(LIB_VA_HASH_MUTEX(index
));
474 ASSERT(del
->lv_refcnt
== 0);
475 MOBJ_STAT_ADD(lib_va_add_delete
);
482 * Release the hold on lvp which was acquired by lib_va_find or lib_va_add_hash.
483 * In addition, if this is the last hold and lvp is marked for deletion,
484 * free up it's reserved address space and free the structure.
487 lib_va_release(struct lib_va
*lvp
)
492 ASSERT(lvp
->lv_refcnt
> 0);
494 index
= LIB_VA_HASH(lvp
->lv_nodeid
);
495 mutex_enter(LIB_VA_HASH_MUTEX(index
));
496 if (--lvp
->lv_refcnt
== 0 && (lvp
->lv_flags
& LV_DEL
)) {
499 mutex_exit(LIB_VA_HASH_MUTEX(index
));
501 ASSERT(lvp
->lv_next
== 0);
507 * Dummy function for mapping through /dev/null
508 * Normally I would have used mmmmap in common/io/mem.c
509 * but that is a static function, and for /dev/null, it
514 mmapobj_dummy(dev_t dev
, off_t off
, int prot
)
520 * Called when an error occurred which requires mmapobj to return failure.
521 * All mapped objects will be unmapped and /dev/null mappings will be
522 * reclaimed if necessary.
523 * num_mapped is the number of elements of mrp which have been mapped, and
524 * num_segs is the total number of elements in mrp.
525 * For e_type ET_EXEC, we need to unmap all of the elements in mrp since
526 * we had already made reservations for them.
527 * If num_mapped equals num_segs, then we know that we had fully mapped
528 * the file and only need to clean up the segments described.
529 * If they are not equal, then for ET_DYN we will unmap the range from the
530 * end of the last mapped segment to the end of the last segment in mrp
531 * since we would have made a reservation for that memory earlier.
532 * If e_type is passed in as zero, num_mapped must equal num_segs.
535 mmapobj_unmap(mmapobj_result_t
*mrp
, int num_mapped
, int num_segs
,
539 struct as
*as
= curproc
->p_as
;
543 if (e_type
== ET_EXEC
) {
544 num_mapped
= num_segs
;
548 ASSERT(num_mapped
== num_segs
);
552 MOBJ_STAT_ADD(unmap_called
);
553 for (i
= 0; i
< num_mapped
; i
++) {
556 * If we are going to have to create a mapping we need to
557 * make sure that no one else will use the address we
558 * need to remap between the time it is unmapped and
561 if (mrp
[i
].mr_flags
& MR_RESV
) {
564 /* Always need to unmap what we mapped */
565 (void) as_unmap(as
, mrp
[i
].mr_addr
, mrp
[i
].mr_msize
);
567 /* Need to reclaim /dev/null reservation from earlier */
568 if (mrp
[i
].mr_flags
& MR_RESV
) {
569 struct segdev_crargs dev_a
;
571 ASSERT(e_type
!= ET_DYN
);
573 * Use seg_dev segment driver for /dev/null mapping.
575 dev_a
.mapfunc
= mmapobj_dummy
;
576 dev_a
.dev
= makedevice(mm_major
, M_NULL
);
578 dev_a
.type
= 0; /* neither PRIVATE nor SHARED */
579 dev_a
.prot
= dev_a
.maxprot
= (uchar_t
)PROT_NONE
;
583 (void) as_map(as
, mrp
[i
].mr_addr
, mrp
[i
].mr_msize
,
584 segdev_create
, &dev_a
);
585 MOBJ_STAT_ADD(remap_devnull
);
590 if (num_mapped
!= num_segs
) {
591 ASSERT(e_type
== ET_DYN
);
592 /* Need to unmap any reservation made after last mapped seg */
593 if (num_mapped
== 0) {
594 addr
= mrp
[0].mr_addr
;
596 addr
= mrp
[num_mapped
- 1].mr_addr
+
597 mrp
[num_mapped
- 1].mr_msize
;
599 size
= (size_t)mrp
[num_segs
- 1].mr_addr
+
600 mrp
[num_segs
- 1].mr_msize
- (size_t)addr
;
601 (void) as_unmap(as
, addr
, size
);
604 * Now we need to unmap the holes between mapped segs.
605 * Note that we have not mapped all of the segments and thus
606 * the holes between segments would not have been unmapped
607 * yet. If num_mapped == num_segs, then all of the holes
608 * between segments would have already been unmapped.
611 for (i
= 1; i
< num_mapped
; i
++) {
612 addr
= mrp
[i
- 1].mr_addr
+ mrp
[i
- 1].mr_msize
;
613 size
= mrp
[i
].mr_addr
- addr
;
614 (void) as_unmap(as
, addr
, size
);
620 * We need to add the start address into mrp so that the unmap function
621 * has absolute addresses to use.
624 mmapobj_unmap_exec(mmapobj_result_t
*mrp
, int num_mapped
, caddr_t start_addr
)
628 for (i
= 0; i
< num_mapped
; i
++) {
629 mrp
[i
].mr_addr
+= (size_t)start_addr
;
631 mmapobj_unmap(mrp
, num_mapped
, num_mapped
, ET_EXEC
);
635 mmapobj_lookup_start_addr(struct lib_va
*lvp
)
638 struct as
*as
= p
->p_as
;
639 struct segvn_crargs crargs
= SEGVN_ZFOD_ARGS(PROT_USER
, PROT_ALL
);
641 uint_t ma_flags
= _MAP_LOW32
;
647 MOBJ_STAT_ADD(lookup_start
);
651 base
= lvp
->lv_base_va
;
655 * If we don't have an expected base address, or the one that we want
656 * to use is not available or acceptable, go get an acceptable
659 if (base
== NULL
|| as_gap(as
, len
, &base
, &len
, 0, NULL
) ||
660 valid_usr_range(base
, len
, PROT_ALL
, as
, as
->a_userlimit
) !=
661 RANGE_OKAY
|| OVERLAPS_STACK(base
+ len
, p
)) {
662 if (lvp
->lv_flags
& LV_ELF64
) {
666 align
= lvp
->lv_align
;
668 ma_flags
|= MAP_ALIGN
;
671 base
= (caddr_t
)align
;
672 map_addr(&base
, len
, 0, 1, ma_flags
);
676 * Need to reserve the address space we're going to use.
677 * Don't reserve swap space since we'll be mapping over this.
680 crargs
.flags
|= MAP_NORESERVE
;
681 error
= as_map(as
, base
, len
, segvn_create
, &crargs
);
692 * Get the starting address for a given file to be mapped and return it
693 * to the caller. If we're using lib_va and we need to allocate an address,
694 * we will attempt to allocate it from the global reserved pool such that the
695 * same address can be used in the future for this file. If we can't use the
696 * reserved address then we just get one that will fit in our address space.
698 * Returns the starting virtual address for the range to be mapped or NULL
699 * if an error is encountered. If we successfully insert the requested info
700 * into the lib_va hash, then *lvpp will be set to point to this lib_va
701 * structure. The structure will have a hold on it and thus lib_va_release
702 * needs to be called on it by the caller. This function will not fill out
703 * lv_mps or lv_num_segs since it does not have enough information to do so.
704 * The caller is responsible for doing this making sure that any modifications
705 * to lv_mps are visible before setting lv_num_segs.
708 mmapobj_alloc_start_addr(struct lib_va
**lvpp
, size_t len
, int use_lib_va
,
709 int randomize
, int primary
, size_t align
, vattr_t
*vap
)
712 struct as
*as
= p
->p_as
;
713 struct segvn_crargs crargs
= SEGVN_ZFOD_ARGS(PROT_USER
, PROT_ALL
);
716 uint_t ma_flags
= _MAP_LOW32
;
723 ASSERT(lvpp
!= NULL
);
724 ASSERT(!(randomize
&& use_lib_va
));
725 /* XXX: I'd prefer not to need to do this */
726 ASSERT(!(primary
&& use_lib_va
));
728 MOBJ_STAT_ADD(alloc_start
);
729 model
= get_udatamodel();
731 if (model
== DATAMODEL_LP64
) {
733 model_vmem
= lib_va_64_arena
;
735 ASSERT(model
== DATAMODEL_ILP32
);
736 model_vmem
= lib_va_32_arena
;
740 ma_flags
|= MAP_ALIGN
;
744 ma_flags
|= _MAP_RANDOMIZE
;
747 ma_flags
|= _MAP_STARTLOW
;
751 * The first time through, we need to setup the lib_va arenas.
752 * We call map_addr to find a suitable range of memory to map
753 * the given library, and we will set the highest address
754 * in our vmem arena to the end of this adddress range.
755 * We allow up to half of the address space to be used
756 * for lib_va addresses but we do not prevent any allocations
757 * in this range from other allocation paths.
759 if (lib_va_64_arena
== NULL
&& model
== DATAMODEL_LP64
) {
760 mutex_enter(&lib_va_init_mutex
);
761 if (lib_va_64_arena
== NULL
) {
762 base
= (caddr_t
)align
;
764 map_addr(&base
, len
, 0, 1, ma_flags
);
767 mutex_exit(&lib_va_init_mutex
);
768 MOBJ_STAT_ADD(lib_va_create_failure
);
771 lib_va_end
= (size_t)base
+ len
;
772 lib_va_len
= lib_va_end
>> 1;
773 lib_va_len
= P2ROUNDUP(lib_va_len
, PAGESIZE
);
774 lib_va_start
= lib_va_end
- lib_va_len
;
777 * Need to make sure we avoid the address hole.
778 * We know lib_va_end is valid but we need to
779 * make sure lib_va_start is as well.
781 if ((lib_va_end
> (size_t)hole_end
) &&
782 (lib_va_start
< (size_t)hole_end
)) {
783 lib_va_start
= P2ROUNDUP(
784 (size_t)hole_end
, PAGESIZE
);
785 lib_va_len
= lib_va_end
- lib_va_start
;
787 lib_va_64_arena
= vmem_create("lib_va_64",
788 (void *)lib_va_start
, lib_va_len
, PAGESIZE
,
790 VM_NOSLEEP
| VMC_IDENTIFIER
);
791 if (lib_va_64_arena
== NULL
) {
792 mutex_exit(&lib_va_init_mutex
);
796 model_vmem
= lib_va_64_arena
;
797 mutex_exit(&lib_va_init_mutex
);
798 } else if (lib_va_32_arena
== NULL
&&
799 model
== DATAMODEL_ILP32
) {
800 mutex_enter(&lib_va_init_mutex
);
801 if (lib_va_32_arena
== NULL
) {
802 base
= (caddr_t
)align
;
804 map_addr(&base
, len
, 0, 1, ma_flags
);
807 mutex_exit(&lib_va_init_mutex
);
808 MOBJ_STAT_ADD(lib_va_create_failure
);
811 lib_va_end
= (size_t)base
+ len
;
812 lib_va_len
= lib_va_end
>> 1;
813 lib_va_len
= P2ROUNDUP(lib_va_len
, PAGESIZE
);
814 lib_va_start
= lib_va_end
- lib_va_len
;
815 lib_va_32_arena
= vmem_create("lib_va_32",
816 (void *)lib_va_start
, lib_va_len
, PAGESIZE
,
818 VM_NOSLEEP
| VMC_IDENTIFIER
);
819 if (lib_va_32_arena
== NULL
) {
820 mutex_exit(&lib_va_init_mutex
);
824 model_vmem
= lib_va_32_arena
;
825 mutex_exit(&lib_va_init_mutex
);
828 if (model
== DATAMODEL_LP64
|| libs_mapped_32
< lib_threshold
) {
829 base
= vmem_xalloc(model_vmem
, len
, align
, 0, 0, NULL
,
830 NULL
, VM_NOSLEEP
| VM_ENDALLOC
);
831 MOBJ_STAT_ADD(alloc_vmem
);
835 * Even if the address fails to fit in our address space,
836 * or we can't use a reserved address,
837 * we should still save it off in lib_va_hash.
839 *lvpp
= lib_va_add_hash(base
, len
, align
, vap
);
842 * Check for collision on insertion and free up our VA space.
843 * This is expected to be rare, so we'll just reset base to
844 * NULL instead of looking it up in the lib_va hash.
848 vmem_xfree(model_vmem
, base
, len
);
850 MOBJ_STAT_ADD(add_collision
);
859 * If we don't have an expected base address, or the one that we want
860 * to use is not available or acceptable, go get an acceptable
863 * If ASLR is enabled, we should never have used the cache, and should
864 * also start our real work here, in the consequent of the next
868 ASSERT(base
== NULL
);
870 if (base
== NULL
|| as_gap(as
, len
, &base
, &len
, 0, NULL
) ||
871 valid_usr_range(base
, len
, PROT_ALL
, as
, as
->a_userlimit
) !=
872 RANGE_OKAY
|| OVERLAPS_STACK(base
+ len
, p
)) {
873 MOBJ_STAT_ADD(get_addr
);
874 base
= (caddr_t
)align
;
875 map_addr(&base
, len
, 0, 1, ma_flags
);
879 * Need to reserve the address space we're going to use.
880 * Don't reserve swap space since we'll be mapping over this.
883 /* Don't reserve swap space since we'll be mapping over this */
884 crargs
.flags
|= MAP_NORESERVE
;
885 error
= as_map(as
, base
, len
, segvn_create
, &crargs
);
896 * Map the file associated with vp into the address space as a single
897 * read only private mapping.
898 * Returns 0 for success, and non-zero for failure to map the file.
901 mmapobj_map_flat(vnode_t
*vp
, mmapobj_result_t
*mrp
, size_t padding
,
905 struct as
*as
= curproc
->p_as
;
910 int prot
= PROT_USER
| PROT_READ
;
911 uint_t ma_flags
= _MAP_LOW32
;
913 struct segvn_crargs crargs
= SEGVN_ZFOD_ARGS(PROT_USER
, PROT_ALL
);
915 if (get_udatamodel() == DATAMODEL_LP64
) {
919 vattr
.va_mask
= AT_SIZE
;
920 error
= fop_getattr(vp
, &vattr
, 0, fcred
, NULL
);
927 ma_flags
|= MAP_PRIVATE
;
929 MOBJ_STAT_ADD(map_flat_no_padding
);
930 error
= fop_map(vp
, 0, as
, &addr
, len
, prot
, PROT_ALL
,
931 ma_flags
, fcred
, NULL
);
933 mrp
[0].mr_addr
= addr
;
934 mrp
[0].mr_msize
= len
;
935 mrp
[0].mr_fsize
= len
;
936 mrp
[0].mr_offset
= 0;
937 mrp
[0].mr_prot
= prot
;
943 /* padding was requested so there's more work to be done */
944 MOBJ_STAT_ADD(map_flat_padding
);
946 /* No need to reserve swap space now since it will be reserved later */
947 crargs
.flags
|= MAP_NORESERVE
;
949 /* Need to setup padding which can only be in PAGESIZE increments. */
950 ASSERT((padding
& PAGEOFFSET
) == 0);
951 pad_len
= len
+ (2 * padding
);
954 map_addr(&addr
, pad_len
, 0, 1, ma_flags
);
955 error
= as_map(as
, addr
, pad_len
, segvn_create
, &crargs
);
962 ma_flags
|= MAP_FIXED
;
963 error
= fop_map(vp
, 0, as
, &addr
, len
, prot
, PROT_ALL
, ma_flags
,
966 mrp
[0].mr_addr
= start_addr
;
967 mrp
[0].mr_msize
= padding
;
969 mrp
[0].mr_offset
= 0;
971 mrp
[0].mr_flags
= MR_PADDING
;
973 mrp
[1].mr_addr
= addr
;
974 mrp
[1].mr_msize
= len
;
975 mrp
[1].mr_fsize
= len
;
976 mrp
[1].mr_offset
= 0;
977 mrp
[1].mr_prot
= prot
;
980 mrp
[2].mr_addr
= addr
+ P2ROUNDUP(len
, PAGESIZE
);
981 mrp
[2].mr_msize
= padding
;
983 mrp
[2].mr_offset
= 0;
985 mrp
[2].mr_flags
= MR_PADDING
;
987 /* Need to cleanup the as_map from earlier */
988 (void) as_unmap(as
, start_addr
, pad_len
);
994 * Map a PT_LOAD or PT_SUNWBSS section of an executable file into the user's
996 * vp - vnode to be mapped in
997 * addr - start address
998 * len - length of vp to be mapped
999 * zfodlen - length of zero filled memory after len above
1000 * offset - offset into file where mapping should start
1001 * prot - protections for this mapping
1002 * fcred - credentials for the file associated with vp at open time.
1005 mmapobj_map_ptload(struct vnode
*vp
, caddr_t addr
, size_t len
, size_t zfodlen
,
1006 off_t offset
, int prot
, cred_t
*fcred
)
1009 caddr_t zfodbase
, oldaddr
;
1014 struct as
*as
= curproc
->p_as
;
1019 * See if addr and offset are aligned such that we can map in
1020 * full pages instead of partial pages.
1022 full_page
= (((uintptr_t)addr
& PAGEOFFSET
) ==
1023 ((uintptr_t)offset
& PAGEOFFSET
));
1025 model
= get_udatamodel();
1028 addr
= (caddr_t
)((uintptr_t)addr
& (uintptr_t)PAGEMASK
);
1030 spgcnt_t availm
, npages
;
1032 uint_t mflag
= MAP_PRIVATE
| MAP_FIXED
;
1034 if (model
== DATAMODEL_ILP32
) {
1035 mflag
|= _MAP_LOW32
;
1037 /* We may need to map in extra bytes */
1039 len
+= ((size_t)oldaddr
& PAGEOFFSET
);
1042 offset
= (off_t
)((uintptr_t)offset
& PAGEMASK
);
1043 if ((prot
& (PROT_WRITE
| PROT_EXEC
)) == PROT_EXEC
) {
1045 MOBJ_STAT_ADD(map_ptload_text
);
1047 mflag
|= MAP_INITDATA
;
1048 MOBJ_STAT_ADD(map_ptload_initdata
);
1052 * maxprot is passed as PROT_ALL so that mdb can
1053 * write to this segment.
1055 if (error
= fop_map(vp
, (offset_t
)offset
, as
, &addr
,
1056 len
, prot
, PROT_ALL
, mflag
, fcred
, NULL
)) {
1061 * If the segment can fit and is relatively small, then
1062 * we prefault the entire segment in. This is based
1063 * on the model that says the best working set of a
1064 * small program is all of its pages.
1065 * We only do this if freemem will not drop below
1066 * lotsfree since we don't want to induce paging.
1068 npages
= (spgcnt_t
)btopr(len
);
1069 availm
= freemem
- lotsfree
;
1070 preread
= (npages
< availm
&& len
< PGTHRESH
) ? 1 : 0;
1073 * If we aren't prefaulting the segment,
1074 * increment "deficit", if necessary to ensure
1075 * that pages will become available when this
1076 * process starts executing.
1078 if (preread
== 0 && npages
> availm
&&
1079 deficit
< lotsfree
) {
1080 deficit
+= MIN((pgcnt_t
)(npages
- availm
),
1081 lotsfree
- deficit
);
1085 (void) as_faulta(as
, addr
, len
);
1086 MOBJ_STAT_ADD(map_ptload_preread
);
1090 * addr and offset were not aligned such that we could
1091 * use fop_map, thus we need to as_map the memory we
1092 * need and then read the data in from disk.
1093 * This code path is a corner case which should never
1094 * be taken, but hand crafted binaries could trigger
1095 * this logic and it needs to work correctly.
1097 MOBJ_STAT_ADD(map_ptload_unaligned_text
);
1099 (void) as_unmap(as
, addr
, len
);
1102 * We use zfod_argsp because we need to be able to
1103 * write to the mapping and then we'll change the
1104 * protections later if they are incorrect.
1106 error
= as_map(as
, addr
, len
, segvn_create
, zfod_argsp
);
1109 MOBJ_STAT_ADD(map_ptload_unaligned_map_fail
);
1113 /* Now read in the data from disk */
1114 error
= vn_rdwr(UIO_READ
, vp
, oldaddr
, oldlen
, offset
,
1115 UIO_USERSPACE
, 0, (rlim64_t
)0, fcred
, NULL
);
1117 MOBJ_STAT_ADD(map_ptload_unaligned_read_fail
);
1122 * Now set protections.
1124 if (prot
!= PROT_ZFOD
) {
1125 (void) as_setprot(as
, addr
, len
, prot
);
1131 end
= (size_t)addr
+ len
;
1132 zfodbase
= (caddr_t
)P2ROUNDUP(end
, PAGESIZE
);
1133 zfoddiff
= (uintptr_t)zfodbase
- end
;
1136 * Before we go to zero the remaining space on the last
1137 * page, make sure we have write permission.
1139 * We need to be careful how we zero-fill the last page
1140 * if the protection does not include PROT_WRITE. Using
1141 * as_setprot() can cause the VM segment code to call
1142 * segvn_vpage(), which must allocate a page struct for
1143 * each page in the segment. If we have a very large
1144 * segment, this may fail, so we check for that, even
1145 * though we ignore other return values from as_setprot.
1147 MOBJ_STAT_ADD(zfoddiff
);
1148 if ((prot
& PROT_WRITE
) == 0) {
1149 if (as_setprot(as
, (caddr_t
)end
, zfoddiff
,
1150 prot
| PROT_WRITE
) == ENOMEM
)
1152 MOBJ_STAT_ADD(zfoddiff_nowrite
);
1154 if (on_fault(&ljb
)) {
1156 if ((prot
& PROT_WRITE
) == 0) {
1157 (void) as_setprot(as
, (caddr_t
)end
,
1162 uzero((void *)end
, zfoddiff
);
1166 * Remove write protection to return to original state
1168 if ((prot
& PROT_WRITE
) == 0) {
1169 (void) as_setprot(as
, (caddr_t
)end
,
1173 if (zfodlen
> zfoddiff
) {
1174 struct segvn_crargs crargs
=
1175 SEGVN_ZFOD_ARGS(prot
, PROT_ALL
);
1177 MOBJ_STAT_ADD(zfodextra
);
1178 zfodlen
-= zfoddiff
;
1179 crargs
.szc
= AS_MAP_NO_LPOOB
;
1183 (void) as_unmap(as
, (caddr_t
)zfodbase
, zfodlen
);
1184 error
= as_map(as
, (caddr_t
)zfodbase
,
1185 zfodlen
, segvn_create
, &crargs
);
1196 * Map the ELF file represented by vp into the users address space. The
1197 * first mapping will start at start_addr and there will be num_elements
1198 * mappings. The mappings are described by the data in mrp which may be
1199 * modified upon returning from this function.
1200 * Returns 0 for success or errno for failure.
1203 mmapobj_map_elf(struct vnode
*vp
, caddr_t start_addr
, mmapobj_result_t
*mrp
,
1204 int num_elements
, cred_t
*fcred
, ushort_t e_type
)
1210 struct as
*as
= curproc
->p_as
;
1212 for (i
= 0; i
< num_elements
; i
++) {
1221 /* Always need to adjust mr_addr */
1222 addr
= start_addr
+ (size_t)(mrp
[i
].mr_addr
);
1224 (caddr_t
)((uintptr_t)addr
& (uintptr_t)PAGEMASK
);
1226 /* Padding has already been mapped */
1227 if (MR_GET_TYPE(mrp
[i
].mr_flags
) == MR_PADDING
) {
1231 /* Can't execute code from "noexec" mounted filesystem. */
1232 if (((vp
->v_vfsp
->vfs_flag
& VFS_NOEXEC
) != 0) &&
1233 ((mrp
[i
].mr_prot
& PROT_EXEC
) != 0)) {
1234 MOBJ_STAT_ADD(noexec_fs
);
1238 p_memsz
= mrp
[i
].mr_msize
;
1239 p_filesz
= mrp
[i
].mr_fsize
;
1240 zfodlen
= p_memsz
- p_filesz
;
1241 p_offset
= mrp
[i
].mr_offset
;
1242 dif
= (uintptr_t)(addr
) & PAGEOFFSET
;
1243 prot
= mrp
[i
].mr_prot
| PROT_USER
;
1244 ret
= mmapobj_map_ptload(vp
, addr
, p_filesz
, zfodlen
,
1245 p_offset
, prot
, fcred
);
1247 MOBJ_STAT_ADD(ptload_failed
);
1248 mmapobj_unmap(mrp
, i
, num_elements
, e_type
);
1252 /* Need to cleanup mrp to reflect the actual values used */
1253 mrp
[i
].mr_msize
+= dif
;
1254 mrp
[i
].mr_offset
= (size_t)addr
& PAGEOFFSET
;
1257 /* Also need to unmap any holes created above */
1258 if (num_elements
== 1) {
1259 MOBJ_STAT_ADD(map_elf_no_holes
);
1262 if (e_type
== ET_EXEC
) {
1268 hi
= mrp
[0].mr_addr
;
1270 /* Remove holes made by the rest of the segments */
1271 for (i
= 0; i
< num_elements
- 1; i
++) {
1272 lo
= (caddr_t
)P2ROUNDUP((size_t)(mrp
[i
].mr_addr
) +
1273 mrp
[i
].mr_msize
, PAGESIZE
);
1274 hi
= mrp
[i
+ 1].mr_addr
;
1277 * If as_unmap fails we just use up a bit of extra
1280 (void) as_unmap(as
, (caddr_t
)lo
,
1281 (size_t)hi
- (size_t)lo
);
1282 MOBJ_STAT_ADD(unmap_hole
);
1290 /* Ugly hack to get STRUCT_* macros to work below */
1292 Phdr x
; /* native version */
1300 * Calculate and return the number of loadable segments in the ELF Phdr
1301 * represented by phdrbase as well as the len of the total mapping and
1302 * the max alignment that is needed for a given segment. On success,
1303 * 0 is returned, and *len, *loadable and *align have been filled out.
1304 * On failure, errno will be returned, which in this case is ENOTSUP
1305 * if we were passed an ELF file with overlapping segments.
1308 calc_loadable(Ehdr
*ehdrp
, caddr_t phdrbase
, int nphdrs
, size_t *len
,
1309 int *loadable
, size_t *align
)
1314 ushort_t e_type
= ehdrp
->e_type
; /* same offset 32 and 64 bit */
1321 caddr_t start_addr
= NULL
;
1322 caddr_t p_end
= NULL
;
1323 size_t max_align
= 0;
1324 size_t min_align
= PAGESIZE
; /* needed for vmem_xalloc */
1325 STRUCT_HANDLE(myphdr
, mph
);
1326 #if defined(__sparc)
1327 extern int vac_size
;
1330 * Want to prevent aliasing by making the start address at least be
1331 * aligned to vac_size.
1333 min_align
= MAX(PAGESIZE
, vac_size
);
1336 model
= get_udatamodel();
1337 STRUCT_SET_HANDLE(mph
, model
, (struct myphdr
*)phdrbase
);
1339 /* hsize alignment should have been checked before calling this func */
1340 if (model
== DATAMODEL_LP64
) {
1341 hsize
= ehdrp
->e_phentsize
;
1346 ASSERT(model
== DATAMODEL_ILP32
);
1347 hsize
= ((Elf32_Ehdr
*)ehdrp
)->e_phentsize
;
1354 * Determine the span of all loadable segments and calculate the
1355 * number of loadable segments.
1357 for (i
= 0; i
< nphdrs
; i
++) {
1358 p_type
= STRUCT_FGET(mph
, x
.p_type
);
1359 if (p_type
== PT_LOAD
|| p_type
== PT_SUNWBSS
) {
1360 vaddr
= (caddr_t
)(uintptr_t)STRUCT_FGET(mph
, x
.p_vaddr
);
1361 p_memsz
= STRUCT_FGET(mph
, x
.p_memsz
);
1364 * Skip this header if it requests no memory to be
1368 STRUCT_SET_HANDLE(mph
, model
,
1369 (struct myphdr
*)((size_t)STRUCT_BUF(mph
) +
1371 MOBJ_STAT_ADD(nomem_header
);
1374 if (num_segs
++ == 0) {
1376 * The p_vaddr of the first PT_LOAD segment
1377 * must either be NULL or within the first
1378 * page in order to be interpreted.
1379 * Otherwise, its an invalid file.
1381 if (e_type
== ET_DYN
&&
1382 ((caddr_t
)((uintptr_t)vaddr
&
1383 (uintptr_t)PAGEMASK
) != NULL
)) {
1384 MOBJ_STAT_ADD(inval_header
);
1389 * For the first segment, we need to map from
1390 * the beginning of the file, so we will
1391 * adjust the size of the mapping to include
1394 p_offset
= STRUCT_FGET(mph
, x
.p_offset
);
1399 * Check to make sure that this mapping wouldn't
1400 * overlap a previous mapping.
1402 if (vaddr
< p_end
) {
1403 MOBJ_STAT_ADD(overlap_header
);
1407 p_end
= vaddr
+ p_memsz
+ p_offset
;
1408 p_end
= (caddr_t
)P2ROUNDUP((size_t)p_end
, PAGESIZE
);
1410 p_align
= STRUCT_FGET(mph
, x
.p_align
);
1411 if (p_align
> 1 && p_align
> max_align
) {
1412 max_align
= p_align
;
1413 if (max_align
< min_align
) {
1414 max_align
= min_align
;
1415 MOBJ_STAT_ADD(min_align
);
1419 STRUCT_SET_HANDLE(mph
, model
,
1420 (struct myphdr
*)((size_t)STRUCT_BUF(mph
) + hsize
));
1424 * The alignment should be a power of 2, if it isn't we forgive it
1425 * and round up. On overflow, we'll set the alignment to max_align
1426 * rounded down to the nearest power of 2.
1428 if (max_align
> 0 && !ISP2(max_align
)) {
1429 MOBJ_STAT_ADD(np2_align
);
1430 *align
= 2 * (1L << (highbit(max_align
) - 1));
1431 if (*align
< max_align
||
1432 (*align
> UINT_MAX
&& model
== DATAMODEL_ILP32
)) {
1433 MOBJ_STAT_ADD(np2_align_overflow
);
1434 *align
= 1L << (highbit(max_align
) - 1);
1440 ASSERT(*align
>= PAGESIZE
|| *align
== 0);
1442 *loadable
= num_segs
;
1443 *len
= p_end
- start_addr
;
1448 * Check the address space to see if the virtual addresses to be used are
1449 * available. If they are not, return errno for failure. On success, 0
1450 * will be returned, and the virtual addresses for each mmapobj_result_t
1451 * will be reserved. Note that a reservation could have earlier been made
1452 * for a given segment via a /dev/null mapping. If that is the case, then
1453 * we can use that VA space for our mappings.
1454 * Note: this function will only be used for ET_EXEC binaries.
1457 check_exec_addrs(int loadable
, mmapobj_result_t
*mrp
, caddr_t start_addr
)
1460 struct as
*as
= curproc
->p_as
;
1461 struct segvn_crargs crargs
= SEGVN_ZFOD_ARGS(PROT_ZFOD
, PROT_ALL
);
1467 /* No need to reserve swap space now since it will be reserved later */
1468 crargs
.flags
|= MAP_NORESERVE
;
1470 for (i
= 0; i
< loadable
; i
++) {
1472 myaddr
= start_addr
+ (size_t)mrp
[i
].mr_addr
;
1473 mylen
= mrp
[i
].mr_msize
;
1475 /* See if there is a hole in the as for this range */
1476 if (as_gap(as
, mylen
, &myaddr
, &mylen
, 0, NULL
) == 0) {
1477 ASSERT(myaddr
== start_addr
+ (size_t)mrp
[i
].mr_addr
);
1478 ASSERT(mylen
== mrp
[i
].mr_msize
);
1481 if (MR_GET_TYPE(mrp
[i
].mr_flags
) == MR_PADDING
) {
1482 MOBJ_STAT_ADD(exec_padding
);
1485 ret
= as_map(as
, myaddr
, mylen
, segvn_create
, &crargs
);
1488 mmapobj_unmap_exec(mrp
, i
, start_addr
);
1493 * There is a mapping that exists in the range
1494 * so check to see if it was a "reservation"
1495 * from /dev/null. The mapping is from
1496 * /dev/null if the mapping comes from
1497 * segdev and the type is neither MAP_SHARED
1500 AS_LOCK_ENTER(as
, RW_READER
);
1501 seg
= as_findseg(as
, myaddr
, 0);
1502 MOBJ_STAT_ADD(exec_addr_mapped
);
1503 if (seg
&& seg
->s_ops
== &segdev_ops
&&
1504 ((segop_gettype(seg
, myaddr
) &
1505 (MAP_SHARED
| MAP_PRIVATE
)) == 0) &&
1506 myaddr
>= seg
->s_base
&&
1508 seg
->s_base
+ seg
->s_size
) {
1509 MOBJ_STAT_ADD(exec_addr_devnull
);
1511 (void) as_unmap(as
, myaddr
, mylen
);
1512 ret
= as_map(as
, myaddr
, mylen
, segvn_create
,
1514 mrp
[i
].mr_flags
|= MR_RESV
;
1517 /* Need to remap what we unmapped */
1518 mmapobj_unmap_exec(mrp
, i
+ 1,
1525 mmapobj_unmap_exec(mrp
, i
, start_addr
);
1526 MOBJ_STAT_ADD(exec_addr_in_use
);
1527 return (EADDRINUSE
);
1536 * Walk through the ELF program headers and extract all useful information
1537 * for PT_LOAD and PT_SUNWBSS segments into mrp.
1538 * Return 0 on success or error on failure.
1541 process_phdrs(Ehdr
*ehdrp
, caddr_t phdrbase
, int nphdrs
, mmapobj_result_t
*mrp
,
1542 vnode_t
*vp
, uint_t
*num_mapped
, size_t padding
, cred_t
*fcred
,
1546 caddr_t start_addr
= NULL
;
1552 struct lib_va
*lvp
= NULL
;
1554 struct as
*as
= curproc
->p_as
;
1562 ushort_t e_type
= ehdrp
->e_type
; /* same offset 32 and 64 bit */
1570 STRUCT_HANDLE(myphdr
, mph
);
1572 model
= get_udatamodel();
1573 STRUCT_SET_HANDLE(mph
, model
, (struct myphdr
*)phdrbase
);
1576 * Need to make sure that hsize is aligned properly.
1577 * For 32bit processes, 4 byte alignment is required.
1578 * For 64bit processes, 8 byte alignment is required.
1579 * If the alignment isn't correct, we need to return failure
1580 * since it could cause an alignment error panic while walking
1583 if (model
== DATAMODEL_LP64
) {
1584 hsize
= ehdrp
->e_phentsize
;
1586 MOBJ_STAT_ADD(phent_align64
);
1590 ASSERT(model
== DATAMODEL_ILP32
);
1591 hsize
= ((Elf32_Ehdr
*)ehdrp
)->e_phentsize
;
1593 MOBJ_STAT_ADD(phent_align32
);
1598 if ((padding
!= 0) || secflag_enabled(curproc
, PROC_SEC_ASLR
) ||
1599 (flags
& MMOBJ_PRIMARY
) != 0) {
1602 if (e_type
== ET_DYN
) {
1603 vattr
.va_mask
= AT_FSID
| AT_NODEID
| AT_CTIME
| AT_MTIME
;
1604 error
= fop_getattr(vp
, &vattr
, 0, fcred
, NULL
);
1608 /* Check to see if we already have a description for this lib */
1609 if (!secflag_enabled(curproc
, PROC_SEC_ASLR
))
1610 lvp
= lib_va_find(&vattr
);
1613 MOBJ_STAT_ADD(lvp_found
);
1615 start_addr
= mmapobj_lookup_start_addr(lvp
);
1616 if (start_addr
== NULL
) {
1617 lib_va_release(lvp
);
1623 * loadable may be zero if the original allocator
1624 * of lvp hasn't finished setting it up but the rest
1625 * of the fields will be accurate.
1627 loadable
= lvp
->lv_num_segs
;
1629 align
= lvp
->lv_align
;
1634 * Determine the span of all loadable segments and calculate the
1635 * number of loadable segments, the total len spanned by the mappings
1636 * and the max alignment, if we didn't get them above.
1638 if (loadable
== 0) {
1639 MOBJ_STAT_ADD(no_loadable_yet
);
1640 ret
= calc_loadable(ehdrp
, phdrbase
, nphdrs
, &len
,
1644 * Since it'd be an invalid file, we shouldn't have
1645 * cached it previously.
1647 ASSERT(lvp
== NULL
);
1652 ASSERT(len
== lvp
->lv_len
);
1653 ASSERT(align
== lvp
->lv_align
);
1658 /* Make sure there's something to map. */
1659 if (len
== 0 || loadable
== 0) {
1661 * Since it'd be an invalid file, we shouldn't have
1662 * cached it previously.
1664 ASSERT(lvp
== NULL
);
1665 MOBJ_STAT_ADD(nothing_to_map
);
1673 if (loadable
> *num_mapped
) {
1674 *num_mapped
= loadable
;
1675 /* cleanup previous reservation */
1677 (void) as_unmap(as
, start_addr
, lib_len
);
1679 MOBJ_STAT_ADD(e2big
);
1681 lib_va_release(lvp
);
1687 * We now know the size of the object to map and now we need to
1688 * get the start address to map it at. It's possible we already
1689 * have it if we found all the info we need in the lib_va cache.
1691 if (e_type
== ET_DYN
&& start_addr
== NULL
) {
1693 * Need to make sure padding does not throw off
1694 * required alignment. We can only specify an
1695 * alignment for the starting address to be mapped,
1696 * so we round padding up to the alignment and map
1697 * from there and then throw out the extra later.
1701 add_pad
= P2ROUNDUP(padding
, align
);
1703 MOBJ_STAT_ADD(dyn_pad_align
);
1705 MOBJ_STAT_ADD(dyn_pad_noalign
);
1706 len
+= padding
; /* at beginning */
1708 len
+= padding
; /* at end of mapping */
1711 * At this point, if lvp is non-NULL, then above we
1712 * already found it in the cache but did not get
1713 * the start address since we were not going to use lib_va.
1714 * Since we know that lib_va will not be used, it's safe
1715 * to call mmapobj_alloc_start_addr and know that lvp
1716 * will not be modified.
1718 ASSERT(lvp
? use_lib_va
== 0 : 1);
1719 start_addr
= mmapobj_alloc_start_addr(&lvp
, len
,
1721 secflag_enabled(curproc
, PROC_SEC_ASLR
),
1722 flags
& MMOBJ_PRIMARY
,
1724 if (start_addr
== NULL
) {
1726 lib_va_release(lvp
);
1728 MOBJ_STAT_ADD(alloc_start_fail
);
1732 * If we can't cache it, no need to hang on to it.
1733 * Setting lv_num_segs to non-zero will make that
1734 * field active and since there are too many segments
1735 * to cache, all future users will not try to use lv_mps.
1737 if (lvp
!= NULL
&& loadable
> LIBVA_CACHED_SEGS
&& use_lib_va
) {
1738 lvp
->lv_num_segs
= loadable
;
1739 lib_va_release(lvp
);
1741 MOBJ_STAT_ADD(lvp_nocache
);
1744 * Free the beginning of the mapping if the padding
1745 * was not aligned correctly.
1747 if (padding
!= 0 && add_pad
!= padding
) {
1748 (void) as_unmap(as
, start_addr
,
1750 start_addr
+= (add_pad
- padding
);
1751 MOBJ_STAT_ADD(extra_padding
);
1756 * At this point, we have reserved the virtual address space
1757 * for our mappings. Now we need to start filling out the mrp
1758 * array to describe all of the individual mappings we are going
1760 * For ET_EXEC there has been no memory reservation since we are
1761 * using fixed addresses. While filling in the mrp array below,
1762 * we will have the first segment biased to start at addr 0
1763 * and the rest will be biased by this same amount. Thus if there
1764 * is padding, the first padding will start at addr 0, and the next
1765 * segment will start at the value of padding.
1768 /* We'll fill out padding later, so start filling in mrp at index 1 */
1773 /* If we have no more need for lvp let it go now */
1774 if (lvp
!= NULL
&& use_lib_va
== 0) {
1775 lib_va_release(lvp
);
1776 MOBJ_STAT_ADD(lvp_not_needed
);
1780 /* Now fill out the mrp structs from the program headers */
1781 STRUCT_SET_HANDLE(mph
, model
, (struct myphdr
*)phdrbase
);
1782 for (i
= 0; i
< nphdrs
; i
++) {
1783 p_type
= STRUCT_FGET(mph
, x
.p_type
);
1784 if (p_type
== PT_LOAD
|| p_type
== PT_SUNWBSS
) {
1785 vaddr
= (caddr_t
)(uintptr_t)STRUCT_FGET(mph
, x
.p_vaddr
);
1786 p_memsz
= STRUCT_FGET(mph
, x
.p_memsz
);
1787 p_filesz
= STRUCT_FGET(mph
, x
.p_filesz
);
1788 p_offset
= STRUCT_FGET(mph
, x
.p_offset
);
1789 p_flags
= STRUCT_FGET(mph
, x
.p_flags
);
1792 * Skip this header if it requests no memory to be
1796 STRUCT_SET_HANDLE(mph
, model
,
1797 (struct myphdr
*)((size_t)STRUCT_BUF(mph
) +
1799 MOBJ_STAT_ADD(no_mem_map_sz
);
1811 ASSERT(current
< loadable
);
1812 mrp
[current
].mr_msize
= p_memsz
;
1813 mrp
[current
].mr_fsize
= p_filesz
;
1814 mrp
[current
].mr_offset
= p_offset
;
1815 mrp
[current
].mr_prot
= prot
;
1817 if (hdr_seen
== 0 && p_filesz
!= 0) {
1818 mrp
[current
].mr_flags
= MR_HDR_ELF
;
1820 * We modify mr_offset because we
1821 * need to map the ELF header as well, and if
1822 * we didn't then the header could be left out
1823 * of the mapping that we will create later.
1824 * Since we're removing the offset, we need to
1825 * account for that in the other fields as well
1826 * since we will be mapping the memory from 0
1829 if (e_type
== ET_DYN
) {
1830 mrp
[current
].mr_offset
= 0;
1831 mrp
[current
].mr_msize
+= p_offset
;
1832 mrp
[current
].mr_fsize
+= p_offset
;
1834 ASSERT(e_type
== ET_EXEC
);
1836 * Save off the start addr which will be
1837 * our bias for the rest of the
1840 start_addr
= vaddr
- padding
;
1842 mrp
[current
].mr_addr
= (caddr_t
)padding
;
1845 if (e_type
== ET_EXEC
) {
1847 mrp
[current
].mr_addr
=
1848 vaddr
- (size_t)start_addr
;
1850 mrp
[current
].mr_addr
= vaddr
+ padding
;
1852 mrp
[current
].mr_flags
= 0;
1857 /* Move to next phdr */
1858 STRUCT_SET_HANDLE(mph
, model
,
1859 (struct myphdr
*)((size_t)STRUCT_BUF(mph
) +
1863 /* Now fill out the padding segments */
1865 mrp
[0].mr_addr
= NULL
;
1866 mrp
[0].mr_msize
= padding
;
1867 mrp
[0].mr_fsize
= 0;
1868 mrp
[0].mr_offset
= 0;
1870 mrp
[0].mr_flags
= MR_PADDING
;
1872 /* Setup padding for the last segment */
1873 ASSERT(current
== loadable
- 1);
1874 mrp
[current
].mr_addr
= (caddr_t
)lib_len
+ padding
;
1875 mrp
[current
].mr_msize
= padding
;
1876 mrp
[current
].mr_fsize
= 0;
1877 mrp
[current
].mr_offset
= 0;
1878 mrp
[current
].mr_prot
= 0;
1879 mrp
[current
].mr_flags
= MR_PADDING
;
1883 * Need to make sure address ranges desired are not in use or
1884 * are previously allocated reservations from /dev/null. For
1885 * ET_DYN, we already made sure our address range was free.
1887 if (e_type
== ET_EXEC
) {
1888 ret
= check_exec_addrs(loadable
, mrp
, start_addr
);
1890 ASSERT(lvp
== NULL
);
1891 MOBJ_STAT_ADD(check_exec_failed
);
1896 /* Finish up our business with lvp. */
1898 ASSERT(e_type
== ET_DYN
);
1899 if (lvp
->lv_num_segs
== 0 && loadable
<= LIBVA_CACHED_SEGS
) {
1900 bcopy(mrp
, lvp
->lv_mps
,
1901 loadable
* sizeof (mmapobj_result_t
));
1905 * Setting lv_num_segs to a non-zero value indicates that
1906 * lv_mps is now valid and can be used by other threads.
1907 * So, the above stores need to finish before lv_num_segs
1908 * is updated. lv_mps is only valid if lv_num_segs is
1909 * greater than LIBVA_CACHED_SEGS.
1911 lvp
->lv_num_segs
= loadable
;
1912 lib_va_release(lvp
);
1913 MOBJ_STAT_ADD(lvp_used
);
1916 /* Now that we have mrp completely filled out go map it */
1917 ret
= mmapobj_map_elf(vp
, start_addr
, mrp
, loadable
, fcred
, e_type
);
1919 *num_mapped
= loadable
;
1926 * Take the ELF file passed in, and do the work of mapping it.
1927 * num_mapped in - # elements in user buffer
1928 * num_mapped out - # sections mapped and length of mrp array if
1932 doelfwork(Ehdr
*ehdrp
, vnode_t
*vp
, mmapobj_result_t
*mrp
,
1933 uint_t
*num_mapped
, size_t padding
, cred_t
*fcred
, uint_t flags
)
1938 unsigned char ei_class
;
1939 unsigned short phentsize
;
1945 ei_class
= ehdrp
->e_ident
[EI_CLASS
];
1946 model
= get_udatamodel();
1947 if ((model
== DATAMODEL_ILP32
&& ei_class
== ELFCLASS64
) ||
1948 (model
== DATAMODEL_LP64
&& ei_class
== ELFCLASS32
)) {
1949 MOBJ_STAT_ADD(wrong_model
);
1953 /* Can't execute code from "noexec" mounted filesystem. */
1954 if (ehdrp
->e_type
== ET_EXEC
&&
1955 (vp
->v_vfsp
->vfs_flag
& VFS_NOEXEC
) != 0) {
1956 MOBJ_STAT_ADD(noexec_fs
);
1961 * Relocatable and core files are mapped as a single flat file
1962 * since no interpretation is done on them by mmapobj.
1964 if (ehdrp
->e_type
== ET_REL
|| ehdrp
->e_type
== ET_CORE
) {
1965 to_map
= padding
? 3 : 1;
1966 if (*num_mapped
< to_map
) {
1967 *num_mapped
= to_map
;
1968 MOBJ_STAT_ADD(e2big_et_rel
);
1971 error
= mmapobj_map_flat(vp
, mrp
, padding
, fcred
);
1973 *num_mapped
= to_map
;
1974 mrp
[padding
? 1 : 0].mr_flags
= MR_HDR_ELF
;
1975 MOBJ_STAT_ADD(et_rel_mapped
);
1980 /* Check for an unknown ELF type */
1981 if (ehdrp
->e_type
!= ET_EXEC
&& ehdrp
->e_type
!= ET_DYN
) {
1982 MOBJ_STAT_ADD(unknown_elf_type
);
1986 if (ei_class
== ELFCLASS32
) {
1987 Elf32_Ehdr
*e32hdr
= (Elf32_Ehdr
*)ehdrp
;
1988 ASSERT(model
== DATAMODEL_ILP32
);
1989 nphdrs
= e32hdr
->e_phnum
;
1990 phentsize
= e32hdr
->e_phentsize
;
1991 if (phentsize
< sizeof (Elf32_Phdr
)) {
1992 MOBJ_STAT_ADD(phent32_too_small
);
1995 phoff
= e32hdr
->e_phoff
;
1996 } else if (ei_class
== ELFCLASS64
) {
1997 Elf64_Ehdr
*e64hdr
= (Elf64_Ehdr
*)ehdrp
;
1998 ASSERT(model
== DATAMODEL_LP64
);
1999 nphdrs
= e64hdr
->e_phnum
;
2000 phentsize
= e64hdr
->e_phentsize
;
2001 if (phentsize
< sizeof (Elf64_Phdr
)) {
2002 MOBJ_STAT_ADD(phent64_too_small
);
2005 phoff
= e64hdr
->e_phoff
;
2007 /* fallthrough case for an invalid ELF class */
2008 MOBJ_STAT_ADD(inval_elf_class
);
2013 * nphdrs should only have this value for core files which are handled
2014 * above as a single mapping. If other file types ever use this
2015 * sentinel, then we'll add the support needed to handle this here.
2017 if (nphdrs
== PN_XNUM
) {
2018 MOBJ_STAT_ADD(too_many_phdrs
);
2022 phsizep
= nphdrs
* phentsize
;
2025 MOBJ_STAT_ADD(no_phsize
);
2029 /* Make sure we only wait for memory if it's a reasonable request */
2030 if (phsizep
> mmapobj_alloc_threshold
) {
2031 MOBJ_STAT_ADD(phsize_large
);
2032 if ((phbasep
= kmem_alloc(phsizep
, KM_NOSLEEP
)) == NULL
) {
2033 MOBJ_STAT_ADD(phsize_xtralarge
);
2037 phbasep
= kmem_alloc(phsizep
, KM_SLEEP
);
2040 if ((error
= vn_rdwr(UIO_READ
, vp
, phbasep
, phsizep
,
2041 (offset_t
)phoff
, UIO_SYSSPACE
, 0, (rlim64_t
)0,
2042 fcred
, NULL
)) != 0) {
2043 kmem_free(phbasep
, phsizep
);
2047 /* Now process the phdr's */
2048 error
= process_phdrs(ehdrp
, phbasep
, nphdrs
, mrp
, vp
, num_mapped
,
2049 padding
, fcred
, flags
);
2050 kmem_free(phbasep
, phsizep
);
2055 * These are the two types of files that we can interpret and we want to read
2056 * in enough info to cover both types when looking at the initial header.
2058 #define MAX_HEADER_SIZE (MAX(sizeof (Ehdr), sizeof (struct exec)))
2061 * Map vp passed in in an interpreted manner. ELF and AOUT files will be
2062 * interpreted and mapped appropriately for execution.
2063 * num_mapped in - # elements in mrp
2064 * num_mapped out - # sections mapped and length of mrp array if
2065 * no errors or E2BIG returned.
2067 * Returns 0 on success, errno value on failure.
2070 mmapobj_map_interpret(vnode_t
*vp
, mmapobj_result_t
*mrp
,
2071 uint_t
*num_mapped
, size_t padding
, cred_t
*fcred
, uint_t flags
)
2080 * header has to be aligned to the native size of ulong_t in order
2081 * to avoid an unaligned access when dereferencing the header as
2082 * a ulong_t. Thus we allocate our array on the stack of type
2083 * ulong_t and then have header, which we dereference later as a char
2084 * array point at lheader.
2086 ulong_t lheader
[(MAX_HEADER_SIZE
/ (sizeof (ulong_t
))) + 1];
2087 caddr_t header
= (caddr_t
)&lheader
;
2089 vattr
.va_mask
= AT_FSID
| AT_NODEID
| AT_CTIME
| AT_MTIME
| AT_SIZE
;
2090 error
= fop_getattr(vp
, &vattr
, 0, fcred
, NULL
);
2096 * Check lib_va to see if we already have a full description
2097 * for this library. This is the fast path and only used for
2098 * ET_DYN ELF files (dynamic libraries).
2100 if (padding
== 0 && !secflag_enabled(curproc
, PROC_SEC_ASLR
) &&
2101 ((lvp
= lib_va_find(&vattr
)) != NULL
)) {
2104 model
= get_udatamodel();
2105 if ((model
== DATAMODEL_ILP32
&&
2106 lvp
->lv_flags
& LV_ELF64
) ||
2107 (model
== DATAMODEL_LP64
&&
2108 lvp
->lv_flags
& LV_ELF32
)) {
2109 lib_va_release(lvp
);
2110 MOBJ_STAT_ADD(fast_wrong_model
);
2113 num_segs
= lvp
->lv_num_segs
;
2114 if (*num_mapped
< num_segs
) {
2115 *num_mapped
= num_segs
;
2116 lib_va_release(lvp
);
2117 MOBJ_STAT_ADD(fast_e2big
);
2122 * Check to see if we have all the mappable program headers
2125 if (num_segs
<= LIBVA_CACHED_SEGS
&& num_segs
!= 0) {
2126 MOBJ_STAT_ADD(fast
);
2127 start_addr
= mmapobj_lookup_start_addr(lvp
);
2128 if (start_addr
== NULL
) {
2129 lib_va_release(lvp
);
2133 bcopy(lvp
->lv_mps
, mrp
,
2134 num_segs
* sizeof (mmapobj_result_t
));
2136 error
= mmapobj_map_elf(vp
, start_addr
, mrp
,
2137 num_segs
, fcred
, ET_DYN
);
2139 lib_va_release(lvp
);
2141 *num_mapped
= num_segs
;
2142 MOBJ_STAT_ADD(fast_success
);
2146 MOBJ_STAT_ADD(fast_not_now
);
2148 /* Release it for now since we'll look it up below */
2149 lib_va_release(lvp
);
2153 * Time to see if this is a file we can interpret. If it's smaller
2154 * than this, then we can't interpret it.
2156 if (vattr
.va_size
< MAX_HEADER_SIZE
) {
2157 MOBJ_STAT_ADD(small_file
);
2161 if ((error
= vn_rdwr(UIO_READ
, vp
, header
, MAX_HEADER_SIZE
, 0,
2162 UIO_SYSSPACE
, 0, (rlim64_t
)0, fcred
, NULL
)) != 0) {
2163 MOBJ_STAT_ADD(read_error
);
2167 /* Verify file type */
2168 if (header
[EI_MAG0
] == ELFMAG0
&& header
[EI_MAG1
] == ELFMAG1
&&
2169 header
[EI_MAG2
] == ELFMAG2
&& header
[EI_MAG3
] == ELFMAG3
) {
2170 return (doelfwork((Ehdr
*)lheader
, vp
, mrp
, num_mapped
,
2171 padding
, fcred
, flags
));
2174 /* Unsupported type */
2175 MOBJ_STAT_ADD(unsupported
);
2180 * Given a vnode, map it as either a flat file or interpret it and map
2181 * it according to the rules of the file type.
2182 * *num_mapped will contain the size of the mmapobj_result_t array passed in.
2183 * If padding is non-zero, the mappings will be padded by that amount
2184 * rounded up to the nearest pagesize.
2185 * If the mapping is successful, *num_mapped will contain the number of
2186 * distinct mappings created, and mrp will point to the array of
2187 * mmapobj_result_t's which describe these mappings.
2189 * On error, -1 is returned and errno is set appropriately.
2190 * A special error case will set errno to E2BIG when there are more than
2191 * *num_mapped mappings to be created and *num_mapped will be set to the
2192 * number of mappings needed.
2195 mmapobj(vnode_t
*vp
, uint_t flags
, mmapobj_result_t
*mrp
,
2196 uint_t
*num_mapped
, size_t padding
, cred_t
*fcred
)
2201 ASSERT((padding
& PAGEOFFSET
) == 0);
2202 ASSERT((flags
& ~MMOBJ_ALL_FLAGS
) == 0);
2203 ASSERT(num_mapped
!= NULL
);
2204 ASSERT((flags
& MMOBJ_PADDING
) ? padding
!= 0 : padding
== 0);
2206 if ((flags
& MMOBJ_INTERPRET
) == 0) {
2207 to_map
= padding
? 3 : 1;
2208 if (*num_mapped
< to_map
) {
2209 *num_mapped
= to_map
;
2210 MOBJ_STAT_ADD(flat_e2big
);
2213 error
= mmapobj_map_flat(vp
, mrp
, padding
, fcred
);
2218 *num_mapped
= to_map
;
2222 error
= mmapobj_map_interpret(vp
, mrp
, num_mapped
, padding
, fcred
,