4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
22 * Copyright 2009 Sun Microsystems, Inc. All rights reserved.
23 * Use is subject to license terms.
26 #include <sys/types.h>
27 #include <sys/sysmacros.h>
29 #include <sys/param.h>
30 #include <sys/systm.h>
31 #include <sys/errno.h>
33 #include <sys/cmn_err.h>
35 #include <sys/vmsystm.h>
36 #include <sys/machsystm.h>
37 #include <sys/debug.h>
40 #include <sys/vmparam.h>
43 #include <sys/machelf.h>
44 #include <sys/corectl.h>
46 #include <sys/exechdr.h>
47 #include <sys/autoconf.h>
49 #include <vm/seg_dev.h>
50 #include <sys/vmparam.h>
51 #include <sys/mmapobj.h>
52 #include <sys/atomic.h>
57 * The main driving force behind mmapobj is to interpret and map ELF files
58 * inside of the kernel instead of having the linker be responsible for this.
60 * mmapobj also supports the AOUT 4.x binary format as well as flat files in
63 * When interpreting and mapping an ELF file, mmapobj will map each PT_LOAD
64 * or PT_SUNWBSS segment according to the ELF standard. Refer to the "Linker
65 * and Libraries Guide" for more information about the standard and mapping
68 * Having mmapobj interpret and map objects will allow the kernel to make the
69 * best decision for where to place the mappings for said objects. Thus, we
70 * can make optimizations inside of the kernel for specific platforms or
71 * cache mapping information to make mapping objects faster.
73 * The lib_va_hash will be one such optimization. For each ELF object that
74 * mmapobj is asked to interpret, we will attempt to cache the information
75 * about the PT_LOAD and PT_SUNWBSS sections to speed up future mappings of
76 * the same objects. We will cache up to LIBVA_CACHED_SEGS (see below) program
77 * headers which should cover a majority of the libraries out there without
78 * wasting space. In order to make sure that the cached information is valid,
79 * we check the passed in vnode's mtime and ctime to make sure the vnode
80 * has not been modified since the last time we used it.
82 * In addition, the lib_va_hash may contain a preferred starting VA for the
83 * object which can be useful for platforms which support a shared context.
84 * This will increase the likelyhood that library text can be shared among
85 * many different processes. We limit the reserved VA space for 32 bit objects
86 * in order to minimize fragmenting the processes address space.
88 * In addition to the above, the mmapobj interface allows for padding to be
89 * requested before the first mapping and after the last mapping created.
90 * When padding is requested, no additional optimizations will be made for
95 * Threshold to prevent allocating too much kernel memory to read in the
96 * program headers for an object. If it requires more than below,
97 * we will use a KM_NOSLEEP allocation to allocate memory to hold all of the
98 * program headers which could possibly fail. If less memory than below is
99 * needed, then we use a KM_SLEEP allocation and are willing to wait for the
100 * memory if we need to.
102 size_t mmapobj_alloc_threshold
= 65536;
104 /* Debug stats for test coverage */
107 uint_t mobjs_unmap_called
;
108 uint_t mobjs_remap_devnull
;
109 uint_t mobjs_lookup_start
;
110 uint_t mobjs_alloc_start
;
111 uint_t mobjs_alloc_vmem
;
112 uint_t mobjs_add_collision
;
113 uint_t mobjs_get_addr
;
114 uint_t mobjs_map_flat_no_padding
;
115 uint_t mobjs_map_flat_padding
;
116 uint_t mobjs_map_ptload_text
;
117 uint_t mobjs_map_ptload_initdata
;
118 uint_t mobjs_map_ptload_preread
;
119 uint_t mobjs_map_ptload_unaligned_text
;
120 uint_t mobjs_map_ptload_unaligned_map_fail
;
121 uint_t mobjs_map_ptload_unaligned_read_fail
;
122 uint_t mobjs_zfoddiff
;
123 uint_t mobjs_zfoddiff_nowrite
;
124 uint_t mobjs_zfodextra
;
125 uint_t mobjs_ptload_failed
;
126 uint_t mobjs_map_elf_no_holes
;
127 uint_t mobjs_unmap_hole
;
128 uint_t mobjs_nomem_header
;
129 uint_t mobjs_inval_header
;
130 uint_t mobjs_overlap_header
;
131 uint_t mobjs_np2_align
;
132 uint_t mobjs_np2_align_overflow
;
133 uint_t mobjs_exec_padding
;
134 uint_t mobjs_exec_addr_mapped
;
135 uint_t mobjs_exec_addr_devnull
;
136 uint_t mobjs_exec_addr_in_use
;
137 uint_t mobjs_lvp_found
;
138 uint_t mobjs_no_loadable_yet
;
139 uint_t mobjs_nothing_to_map
;
141 uint_t mobjs_dyn_pad_align
;
142 uint_t mobjs_dyn_pad_noalign
;
143 uint_t mobjs_alloc_start_fail
;
144 uint_t mobjs_lvp_nocache
;
145 uint_t mobjs_extra_padding
;
146 uint_t mobjs_lvp_not_needed
;
147 uint_t mobjs_no_mem_map_sz
;
148 uint_t mobjs_check_exec_failed
;
149 uint_t mobjs_lvp_used
;
150 uint_t mobjs_wrong_model
;
151 uint_t mobjs_noexec_fs
;
152 uint_t mobjs_e2big_et_rel
;
153 uint_t mobjs_et_rel_mapped
;
154 uint_t mobjs_unknown_elf_type
;
155 uint_t mobjs_phent32_too_small
;
156 uint_t mobjs_phent64_too_small
;
157 uint_t mobjs_inval_elf_class
;
158 uint_t mobjs_too_many_phdrs
;
159 uint_t mobjs_no_phsize
;
160 uint_t mobjs_phsize_large
;
161 uint_t mobjs_phsize_xtralarge
;
162 uint_t mobjs_fast_wrong_model
;
163 uint_t mobjs_fast_e2big
;
165 uint_t mobjs_fast_success
;
166 uint_t mobjs_fast_not_now
;
167 uint_t mobjs_small_file
;
168 uint_t mobjs_read_error
;
169 uint_t mobjs_unsupported
;
170 uint_t mobjs_flat_e2big
;
171 uint_t mobjs_phent_align32
;
172 uint_t mobjs_phent_align64
;
173 uint_t mobjs_lib_va_find_hit
;
174 uint_t mobjs_lib_va_find_delay_delete
;
175 uint_t mobjs_lib_va_find_delete
;
176 uint_t mobjs_lib_va_add_delay_delete
;
177 uint_t mobjs_lib_va_add_delete
;
178 uint_t mobjs_lib_va_create_failure
;
179 uint_t mobjs_min_align
;
181 uint_t mobjs_aout_uzero_fault
;
182 uint_t mobjs_aout_64bit_try
;
183 uint_t mobjs_aout_noexec
;
184 uint_t mobjs_aout_e2big
;
185 uint_t mobjs_aout_lib
;
186 uint_t mobjs_aout_fixed
;
187 uint_t mobjs_aout_zfoddiff
;
188 uint_t mobjs_aout_map_bss
;
189 uint_t mobjs_aout_bss_fail
;
190 uint_t mobjs_aout_nlist
;
191 uint_t mobjs_aout_addr_in_use
;
195 #define MOBJ_STAT_ADD(stat) ((mobj_stats.mobjs_##stat)++)
197 #define MOBJ_STAT_ADD(stat)
201 * Check if addr is at or above the address space reserved for the stack.
202 * The stack is at the top of the address space for all sparc processes
203 * and 64 bit x86 processes. For 32 bit x86, the stack is not at the top
204 * of the address space and thus this check wil always return false for
205 * 32 bit x86 processes.
208 #define OVERLAPS_STACK(addr, p) \
209 (addr >= (p->p_usrstack - ((p->p_stk_ctl + PAGEOFFSET) & PAGEMASK)))
210 #elif defined(__amd64)
211 #define OVERLAPS_STACK(addr, p) \
212 ((p->p_model == DATAMODEL_LP64) && \
213 (addr >= (p->p_usrstack - ((p->p_stk_ctl + PAGEOFFSET) & PAGEMASK))))
214 #elif defined(__i386)
215 #define OVERLAPS_STACK(addr, p) 0
218 /* lv_flags values - bitmap */
219 #define LV_ELF32 0x1 /* 32 bit ELF file */
220 #define LV_ELF64 0x2 /* 64 bit ELF file */
221 #define LV_DEL 0x4 /* delete when lv_refcnt hits zero */
224 * Note: lv_num_segs will denote how many segments this file has and will
225 * only be set after the lv_mps array has been filled out.
226 * lv_mps can only be valid if lv_num_segs is non-zero.
229 struct lib_va
*lv_next
;
230 caddr_t lv_base_va
; /* start va for library */
231 ssize_t lv_len
; /* total va span of library */
232 size_t lv_align
; /* minimum alignment */
233 uint64_t lv_nodeid
; /* filesystem node id */
234 uint64_t lv_fsid
; /* filesystem id */
235 timestruc_t lv_ctime
; /* last time file was changed */
236 timestruc_t lv_mtime
; /* or modified */
237 mmapobj_result_t lv_mps
[LIBVA_CACHED_SEGS
]; /* cached pheaders */
238 int lv_num_segs
; /* # segs for this file */
240 uint_t lv_refcnt
; /* number of holds on struct */
243 #define LIB_VA_SIZE 1024
244 #define LIB_VA_MASK (LIB_VA_SIZE - 1)
245 #define LIB_VA_MUTEX_SHIFT 3
247 #if (LIB_VA_SIZE & (LIB_VA_SIZE - 1))
248 #error "LIB_VA_SIZE is not a power of 2"
251 static struct lib_va
*lib_va_hash
[LIB_VA_SIZE
];
252 static kmutex_t lib_va_hash_mutex
[LIB_VA_SIZE
>> LIB_VA_MUTEX_SHIFT
];
254 #define LIB_VA_HASH_MUTEX(index) \
255 (&lib_va_hash_mutex[index >> LIB_VA_MUTEX_SHIFT])
257 #define LIB_VA_HASH(nodeid) \
258 (((nodeid) ^ ((nodeid) << 7) ^ ((nodeid) << 13)) & LIB_VA_MASK)
260 #define LIB_VA_MATCH_ID(arg1, arg2) \
261 ((arg1)->lv_nodeid == (arg2)->va_nodeid && \
262 (arg1)->lv_fsid == (arg2)->va_fsid)
264 #define LIB_VA_MATCH_TIME(arg1, arg2) \
265 ((arg1)->lv_ctime.tv_sec == (arg2)->va_ctime.tv_sec && \
266 (arg1)->lv_mtime.tv_sec == (arg2)->va_mtime.tv_sec && \
267 (arg1)->lv_ctime.tv_nsec == (arg2)->va_ctime.tv_nsec && \
268 (arg1)->lv_mtime.tv_nsec == (arg2)->va_mtime.tv_nsec)
270 #define LIB_VA_MATCH(arg1, arg2) \
271 (LIB_VA_MATCH_ID(arg1, arg2) && LIB_VA_MATCH_TIME(arg1, arg2))
274 * lib_va will be used for optimized allocation of address ranges for
275 * libraries, such that subsequent mappings of the same library will attempt
276 * to use the same VA as previous mappings of that library.
277 * In order to map libraries at the same VA in many processes, we need to carve
278 * out our own address space for them which is unique across many processes.
279 * We use different arenas for 32 bit and 64 bit libraries.
281 * Since the 32 bit address space is relatively small, we limit the number of
282 * libraries which try to use consistent virtual addresses to lib_threshold.
283 * For 64 bit libraries there is no such limit since the address space is large.
285 static vmem_t
*lib_va_32_arena
;
286 static vmem_t
*lib_va_64_arena
;
287 uint_t lib_threshold
= 20; /* modifiable via /etc/system */
289 static kmutex_t lib_va_init_mutex
; /* no need to initialize */
292 * Number of 32 bit and 64 bit libraries in lib_va hash.
294 static uint_t libs_mapped_32
= 0;
295 static uint_t libs_mapped_64
= 0;
298 * Free up the resources associated with lvp as well as lvp itself.
299 * We also decrement the number of libraries mapped via a lib_va
300 * cached virtual address.
303 lib_va_free(struct lib_va
*lvp
)
305 int is_64bit
= lvp
->lv_flags
& LV_ELF64
;
306 ASSERT(lvp
->lv_refcnt
== 0);
308 if (lvp
->lv_base_va
!= NULL
) {
309 vmem_xfree(is_64bit
? lib_va_64_arena
: lib_va_32_arena
,
310 lvp
->lv_base_va
, lvp
->lv_len
);
312 atomic_dec_32(&libs_mapped_64
);
314 atomic_dec_32(&libs_mapped_32
);
317 kmem_free(lvp
, sizeof (struct lib_va
));
321 * See if the file associated with the vap passed in is in the lib_va hash.
322 * If it is and the file has not been modified since last use, then
323 * return a pointer to that data. Otherwise, return NULL if the file has
324 * changed or the file was not found in the hash.
326 static struct lib_va
*
327 lib_va_find(vattr_t
*vap
)
330 struct lib_va
*del
= NULL
;
333 index
= LIB_VA_HASH(vap
->va_nodeid
);
335 mutex_enter(LIB_VA_HASH_MUTEX(index
));
336 tmp
= &lib_va_hash
[index
];
337 while (*tmp
!= NULL
) {
339 if (LIB_VA_MATCH_ID(lvp
, vap
)) {
340 if (LIB_VA_MATCH_TIME(lvp
, vap
)) {
341 ASSERT((lvp
->lv_flags
& LV_DEL
) == 0);
343 MOBJ_STAT_ADD(lib_va_find_hit
);
346 * file was updated since last use.
347 * need to remove it from list.
353 * If we can't delete it now, mark it for later
355 if (del
->lv_refcnt
) {
356 MOBJ_STAT_ADD(lib_va_find_delay_delete
);
357 del
->lv_flags
|= LV_DEL
;
362 mutex_exit(LIB_VA_HASH_MUTEX(index
));
364 ASSERT(del
->lv_refcnt
== 0);
365 MOBJ_STAT_ADD(lib_va_find_delete
);
372 mutex_exit(LIB_VA_HASH_MUTEX(index
));
377 * Add a new entry to the lib_va hash.
378 * Search the hash while holding the appropriate mutex to make sure that the
379 * data is not already in the cache. If we find data that is in the cache
380 * already and has not been modified since last use, we return NULL. If it
381 * has been modified since last use, we will remove that entry from
382 * the hash and it will be deleted once it's reference count reaches zero.
383 * If there is no current entry in the hash we will add the new entry and
384 * return it to the caller who is responsible for calling lib_va_release to
385 * drop their reference count on it.
387 * lv_num_segs will be set to zero since the caller needs to add that
388 * information to the data structure.
390 static struct lib_va
*
391 lib_va_add_hash(caddr_t base_va
, ssize_t len
, size_t align
, vattr_t
*vap
)
397 struct lib_va
*del
= NULL
;
399 model
= get_udatamodel();
400 index
= LIB_VA_HASH(vap
->va_nodeid
);
402 lvp
= kmem_alloc(sizeof (struct lib_va
), KM_SLEEP
);
404 mutex_enter(LIB_VA_HASH_MUTEX(index
));
407 * Make sure not adding same data a second time.
408 * The hash chains should be relatively short and adding
409 * is a relatively rare event, so it's worth the check.
411 tmp
= &lib_va_hash
[index
];
412 while (*tmp
!= NULL
) {
413 if (LIB_VA_MATCH_ID(*tmp
, vap
)) {
414 if (LIB_VA_MATCH_TIME(*tmp
, vap
)) {
415 mutex_exit(LIB_VA_HASH_MUTEX(index
));
416 kmem_free(lvp
, sizeof (struct lib_va
));
421 * We have the same nodeid and fsid but the file has
422 * been modified since we last saw it.
423 * Need to remove the old node and add this new
425 * Could probably use a callback mechanism to make
434 * Check to see if we can free it. If lv_refcnt
435 * is greater than zero, than some other thread
436 * has a reference to the one we want to delete
437 * and we can not delete it. All of this is done
438 * under the lib_va_hash_mutex lock so it is atomic.
440 if (del
->lv_refcnt
) {
441 MOBJ_STAT_ADD(lib_va_add_delay_delete
);
442 del
->lv_flags
|= LV_DEL
;
445 /* tmp is already advanced */
448 tmp
= &((*tmp
)->lv_next
);
451 lvp
->lv_base_va
= base_va
;
453 lvp
->lv_align
= align
;
454 lvp
->lv_nodeid
= vap
->va_nodeid
;
455 lvp
->lv_fsid
= vap
->va_fsid
;
456 lvp
->lv_ctime
.tv_sec
= vap
->va_ctime
.tv_sec
;
457 lvp
->lv_ctime
.tv_nsec
= vap
->va_ctime
.tv_nsec
;
458 lvp
->lv_mtime
.tv_sec
= vap
->va_mtime
.tv_sec
;
459 lvp
->lv_mtime
.tv_nsec
= vap
->va_mtime
.tv_nsec
;
463 /* Caller responsible for filling this and lv_mps out */
464 lvp
->lv_num_segs
= 0;
466 if (model
== DATAMODEL_LP64
) {
467 lvp
->lv_flags
= LV_ELF64
;
469 ASSERT(model
== DATAMODEL_ILP32
);
470 lvp
->lv_flags
= LV_ELF32
;
473 if (base_va
!= NULL
) {
474 if (model
== DATAMODEL_LP64
) {
475 atomic_inc_32(&libs_mapped_64
);
477 ASSERT(model
== DATAMODEL_ILP32
);
478 atomic_inc_32(&libs_mapped_32
);
481 ASSERT(*tmp
== NULL
);
483 mutex_exit(LIB_VA_HASH_MUTEX(index
));
485 ASSERT(del
->lv_refcnt
== 0);
486 MOBJ_STAT_ADD(lib_va_add_delete
);
493 * Release the hold on lvp which was acquired by lib_va_find or lib_va_add_hash.
494 * In addition, if this is the last hold and lvp is marked for deletion,
495 * free up it's reserved address space and free the structure.
498 lib_va_release(struct lib_va
*lvp
)
503 ASSERT(lvp
->lv_refcnt
> 0);
505 index
= LIB_VA_HASH(lvp
->lv_nodeid
);
506 mutex_enter(LIB_VA_HASH_MUTEX(index
));
507 if (--lvp
->lv_refcnt
== 0 && (lvp
->lv_flags
& LV_DEL
)) {
510 mutex_exit(LIB_VA_HASH_MUTEX(index
));
512 ASSERT(lvp
->lv_next
== 0);
518 * Dummy function for mapping through /dev/null
519 * Normally I would have used mmmmap in common/io/mem.c
520 * but that is a static function, and for /dev/null, it
525 mmapobj_dummy(dev_t dev
, off_t off
, int prot
)
531 * Called when an error occurred which requires mmapobj to return failure.
532 * All mapped objects will be unmapped and /dev/null mappings will be
533 * reclaimed if necessary.
534 * num_mapped is the number of elements of mrp which have been mapped, and
535 * num_segs is the total number of elements in mrp.
536 * For e_type ET_EXEC, we need to unmap all of the elements in mrp since
537 * we had already made reservations for them.
538 * If num_mapped equals num_segs, then we know that we had fully mapped
539 * the file and only need to clean up the segments described.
540 * If they are not equal, then for ET_DYN we will unmap the range from the
541 * end of the last mapped segment to the end of the last segment in mrp
542 * since we would have made a reservation for that memory earlier.
543 * If e_type is passed in as zero, num_mapped must equal num_segs.
546 mmapobj_unmap(mmapobj_result_t
*mrp
, int num_mapped
, int num_segs
,
550 struct as
*as
= curproc
->p_as
;
554 if (e_type
== ET_EXEC
) {
555 num_mapped
= num_segs
;
559 ASSERT(num_mapped
== num_segs
);
563 MOBJ_STAT_ADD(unmap_called
);
564 for (i
= 0; i
< num_mapped
; i
++) {
567 * If we are going to have to create a mapping we need to
568 * make sure that no one else will use the address we
569 * need to remap between the time it is unmapped and
572 if (mrp
[i
].mr_flags
& MR_RESV
) {
575 /* Always need to unmap what we mapped */
576 (void) as_unmap(as
, mrp
[i
].mr_addr
, mrp
[i
].mr_msize
);
578 /* Need to reclaim /dev/null reservation from earlier */
579 if (mrp
[i
].mr_flags
& MR_RESV
) {
580 struct segdev_crargs dev_a
;
582 ASSERT(e_type
!= ET_DYN
);
584 * Use seg_dev segment driver for /dev/null mapping.
586 dev_a
.mapfunc
= mmapobj_dummy
;
587 dev_a
.dev
= makedevice(mm_major
, M_NULL
);
589 dev_a
.type
= 0; /* neither PRIVATE nor SHARED */
590 dev_a
.prot
= dev_a
.maxprot
= (uchar_t
)PROT_NONE
;
594 (void) as_map(as
, mrp
[i
].mr_addr
, mrp
[i
].mr_msize
,
595 segdev_create
, &dev_a
);
596 MOBJ_STAT_ADD(remap_devnull
);
601 if (num_mapped
!= num_segs
) {
602 ASSERT(e_type
== ET_DYN
);
603 /* Need to unmap any reservation made after last mapped seg */
604 if (num_mapped
== 0) {
605 addr
= mrp
[0].mr_addr
;
607 addr
= mrp
[num_mapped
- 1].mr_addr
+
608 mrp
[num_mapped
- 1].mr_msize
;
610 size
= (size_t)mrp
[num_segs
- 1].mr_addr
+
611 mrp
[num_segs
- 1].mr_msize
- (size_t)addr
;
612 (void) as_unmap(as
, addr
, size
);
615 * Now we need to unmap the holes between mapped segs.
616 * Note that we have not mapped all of the segments and thus
617 * the holes between segments would not have been unmapped
618 * yet. If num_mapped == num_segs, then all of the holes
619 * between segments would have already been unmapped.
622 for (i
= 1; i
< num_mapped
; i
++) {
623 addr
= mrp
[i
- 1].mr_addr
+ mrp
[i
- 1].mr_msize
;
624 size
= mrp
[i
].mr_addr
- addr
;
625 (void) as_unmap(as
, addr
, size
);
631 * We need to add the start address into mrp so that the unmap function
632 * has absolute addresses to use.
635 mmapobj_unmap_exec(mmapobj_result_t
*mrp
, int num_mapped
, caddr_t start_addr
)
639 for (i
= 0; i
< num_mapped
; i
++) {
640 mrp
[i
].mr_addr
+= (size_t)start_addr
;
642 mmapobj_unmap(mrp
, num_mapped
, num_mapped
, ET_EXEC
);
646 mmapobj_lookup_start_addr(struct lib_va
*lvp
)
649 struct as
*as
= p
->p_as
;
650 struct segvn_crargs crargs
= SEGVN_ZFOD_ARGS(PROT_USER
, PROT_ALL
);
652 uint_t ma_flags
= _MAP_LOW32
;
658 MOBJ_STAT_ADD(lookup_start
);
662 base
= lvp
->lv_base_va
;
666 * If we don't have an expected base address, or the one that we want
667 * to use is not available or acceptable, go get an acceptable
670 if (base
== NULL
|| as_gap(as
, len
, &base
, &len
, 0, NULL
) ||
671 valid_usr_range(base
, len
, PROT_ALL
, as
, as
->a_userlimit
) !=
672 RANGE_OKAY
|| OVERLAPS_STACK(base
+ len
, p
)) {
673 if (lvp
->lv_flags
& LV_ELF64
) {
677 align
= lvp
->lv_align
;
679 ma_flags
|= MAP_ALIGN
;
682 base
= (caddr_t
)align
;
683 map_addr(&base
, len
, 0, 1, ma_flags
);
687 * Need to reserve the address space we're going to use.
688 * Don't reserve swap space since we'll be mapping over this.
691 crargs
.flags
|= MAP_NORESERVE
;
692 error
= as_map(as
, base
, len
, segvn_create
, &crargs
);
703 * Get the starting address for a given file to be mapped and return it
704 * to the caller. If we're using lib_va and we need to allocate an address,
705 * we will attempt to allocate it from the global reserved pool such that the
706 * same address can be used in the future for this file. If we can't use the
707 * reserved address then we just get one that will fit in our address space.
709 * Returns the starting virtual address for the range to be mapped or NULL
710 * if an error is encountered. If we successfully insert the requested info
711 * into the lib_va hash, then *lvpp will be set to point to this lib_va
712 * structure. The structure will have a hold on it and thus lib_va_release
713 * needs to be called on it by the caller. This function will not fill out
714 * lv_mps or lv_num_segs since it does not have enough information to do so.
715 * The caller is responsible for doing this making sure that any modifications
716 * to lv_mps are visible before setting lv_num_segs.
719 mmapobj_alloc_start_addr(struct lib_va
**lvpp
, size_t len
, int use_lib_va
,
720 size_t align
, vattr_t
*vap
)
723 struct as
*as
= p
->p_as
;
724 struct segvn_crargs crargs
= SEGVN_ZFOD_ARGS(PROT_USER
, PROT_ALL
);
727 uint_t ma_flags
= _MAP_LOW32
;
734 ASSERT(lvpp
!= NULL
);
736 MOBJ_STAT_ADD(alloc_start
);
737 model
= get_udatamodel();
739 if (model
== DATAMODEL_LP64
) {
741 model_vmem
= lib_va_64_arena
;
743 ASSERT(model
== DATAMODEL_ILP32
);
744 model_vmem
= lib_va_32_arena
;
748 ma_flags
|= MAP_ALIGN
;
752 * The first time through, we need to setup the lib_va arenas.
753 * We call map_addr to find a suitable range of memory to map
754 * the given library, and we will set the highest address
755 * in our vmem arena to the end of this adddress range.
756 * We allow up to half of the address space to be used
757 * for lib_va addresses but we do not prevent any allocations
758 * in this range from other allocation paths.
760 if (lib_va_64_arena
== NULL
&& model
== DATAMODEL_LP64
) {
761 mutex_enter(&lib_va_init_mutex
);
762 if (lib_va_64_arena
== NULL
) {
763 base
= (caddr_t
)align
;
765 map_addr(&base
, len
, 0, 1, ma_flags
);
768 mutex_exit(&lib_va_init_mutex
);
769 MOBJ_STAT_ADD(lib_va_create_failure
);
772 lib_va_end
= (size_t)base
+ len
;
773 lib_va_len
= lib_va_end
>> 1;
774 lib_va_len
= P2ROUNDUP(lib_va_len
, PAGESIZE
);
775 lib_va_start
= lib_va_end
- lib_va_len
;
778 * Need to make sure we avoid the address hole.
779 * We know lib_va_end is valid but we need to
780 * make sure lib_va_start is as well.
782 if ((lib_va_end
> (size_t)hole_end
) &&
783 (lib_va_start
< (size_t)hole_end
)) {
784 lib_va_start
= P2ROUNDUP(
785 (size_t)hole_end
, PAGESIZE
);
786 lib_va_len
= lib_va_end
- lib_va_start
;
788 lib_va_64_arena
= vmem_create("lib_va_64",
789 (void *)lib_va_start
, lib_va_len
, PAGESIZE
,
791 VM_NOSLEEP
| VMC_IDENTIFIER
);
792 if (lib_va_64_arena
== NULL
) {
793 mutex_exit(&lib_va_init_mutex
);
797 model_vmem
= lib_va_64_arena
;
798 mutex_exit(&lib_va_init_mutex
);
799 } else if (lib_va_32_arena
== NULL
&&
800 model
== DATAMODEL_ILP32
) {
801 mutex_enter(&lib_va_init_mutex
);
802 if (lib_va_32_arena
== NULL
) {
803 base
= (caddr_t
)align
;
805 map_addr(&base
, len
, 0, 1, ma_flags
);
808 mutex_exit(&lib_va_init_mutex
);
809 MOBJ_STAT_ADD(lib_va_create_failure
);
812 lib_va_end
= (size_t)base
+ len
;
813 lib_va_len
= lib_va_end
>> 1;
814 lib_va_len
= P2ROUNDUP(lib_va_len
, PAGESIZE
);
815 lib_va_start
= lib_va_end
- lib_va_len
;
816 lib_va_32_arena
= vmem_create("lib_va_32",
817 (void *)lib_va_start
, lib_va_len
, PAGESIZE
,
819 VM_NOSLEEP
| VMC_IDENTIFIER
);
820 if (lib_va_32_arena
== NULL
) {
821 mutex_exit(&lib_va_init_mutex
);
825 model_vmem
= lib_va_32_arena
;
826 mutex_exit(&lib_va_init_mutex
);
829 if (model
== DATAMODEL_LP64
|| libs_mapped_32
< lib_threshold
) {
830 base
= vmem_xalloc(model_vmem
, len
, align
, 0, 0, NULL
,
831 NULL
, VM_NOSLEEP
| VM_ENDALLOC
);
832 MOBJ_STAT_ADD(alloc_vmem
);
836 * Even if the address fails to fit in our address space,
837 * or we can't use a reserved address,
838 * we should still save it off in lib_va_hash.
840 *lvpp
= lib_va_add_hash(base
, len
, align
, vap
);
843 * Check for collision on insertion and free up our VA space.
844 * This is expected to be rare, so we'll just reset base to
845 * NULL instead of looking it up in the lib_va hash.
849 vmem_xfree(model_vmem
, base
, len
);
851 MOBJ_STAT_ADD(add_collision
);
860 * If we don't have an expected base address, or the one that we want
861 * to use is not available or acceptable, go get an acceptable
864 if (base
== NULL
|| as_gap(as
, len
, &base
, &len
, 0, NULL
) ||
865 valid_usr_range(base
, len
, PROT_ALL
, as
, as
->a_userlimit
) !=
866 RANGE_OKAY
|| OVERLAPS_STACK(base
+ len
, p
)) {
867 MOBJ_STAT_ADD(get_addr
);
868 base
= (caddr_t
)align
;
869 map_addr(&base
, len
, 0, 1, ma_flags
);
873 * Need to reserve the address space we're going to use.
874 * Don't reserve swap space since we'll be mapping over this.
877 /* Don't reserve swap space since we'll be mapping over this */
878 crargs
.flags
|= MAP_NORESERVE
;
879 error
= as_map(as
, base
, len
, segvn_create
, &crargs
);
890 * Map the file associated with vp into the address space as a single
891 * read only private mapping.
892 * Returns 0 for success, and non-zero for failure to map the file.
895 mmapobj_map_flat(vnode_t
*vp
, mmapobj_result_t
*mrp
, size_t padding
,
899 struct as
*as
= curproc
->p_as
;
904 int prot
= PROT_USER
| PROT_READ
;
905 uint_t ma_flags
= _MAP_LOW32
;
907 struct segvn_crargs crargs
= SEGVN_ZFOD_ARGS(PROT_USER
, PROT_ALL
);
909 if (get_udatamodel() == DATAMODEL_LP64
) {
913 vattr
.va_mask
= AT_SIZE
;
914 error
= VOP_GETATTR(vp
, &vattr
, 0, fcred
, NULL
);
921 ma_flags
|= MAP_PRIVATE
;
923 MOBJ_STAT_ADD(map_flat_no_padding
);
924 error
= VOP_MAP(vp
, 0, as
, &addr
, len
, prot
, PROT_ALL
,
925 ma_flags
, fcred
, NULL
);
927 mrp
[0].mr_addr
= addr
;
928 mrp
[0].mr_msize
= len
;
929 mrp
[0].mr_fsize
= len
;
930 mrp
[0].mr_offset
= 0;
931 mrp
[0].mr_prot
= prot
;
937 /* padding was requested so there's more work to be done */
938 MOBJ_STAT_ADD(map_flat_padding
);
940 /* No need to reserve swap space now since it will be reserved later */
941 crargs
.flags
|= MAP_NORESERVE
;
943 /* Need to setup padding which can only be in PAGESIZE increments. */
944 ASSERT((padding
& PAGEOFFSET
) == 0);
945 pad_len
= len
+ (2 * padding
);
948 map_addr(&addr
, pad_len
, 0, 1, ma_flags
);
949 error
= as_map(as
, addr
, pad_len
, segvn_create
, &crargs
);
956 ma_flags
|= MAP_FIXED
;
957 error
= VOP_MAP(vp
, 0, as
, &addr
, len
, prot
, PROT_ALL
, ma_flags
,
960 mrp
[0].mr_addr
= start_addr
;
961 mrp
[0].mr_msize
= padding
;
963 mrp
[0].mr_offset
= 0;
965 mrp
[0].mr_flags
= MR_PADDING
;
967 mrp
[1].mr_addr
= addr
;
968 mrp
[1].mr_msize
= len
;
969 mrp
[1].mr_fsize
= len
;
970 mrp
[1].mr_offset
= 0;
971 mrp
[1].mr_prot
= prot
;
974 mrp
[2].mr_addr
= addr
+ P2ROUNDUP(len
, PAGESIZE
);
975 mrp
[2].mr_msize
= padding
;
977 mrp
[2].mr_offset
= 0;
979 mrp
[2].mr_flags
= MR_PADDING
;
981 /* Need to cleanup the as_map from earlier */
982 (void) as_unmap(as
, start_addr
, pad_len
);
988 * Map a PT_LOAD or PT_SUNWBSS section of an executable file into the user's
990 * vp - vnode to be mapped in
991 * addr - start address
992 * len - length of vp to be mapped
993 * zfodlen - length of zero filled memory after len above
994 * offset - offset into file where mapping should start
995 * prot - protections for this mapping
996 * fcred - credentials for the file associated with vp at open time.
999 mmapobj_map_ptload(struct vnode
*vp
, caddr_t addr
, size_t len
, size_t zfodlen
,
1000 off_t offset
, int prot
, cred_t
*fcred
)
1003 caddr_t zfodbase
, oldaddr
;
1008 struct as
*as
= curproc
->p_as
;
1013 * See if addr and offset are aligned such that we can map in
1014 * full pages instead of partial pages.
1016 full_page
= (((uintptr_t)addr
& PAGEOFFSET
) ==
1017 ((uintptr_t)offset
& PAGEOFFSET
));
1019 model
= get_udatamodel();
1022 addr
= (caddr_t
)((uintptr_t)addr
& (uintptr_t)PAGEMASK
);
1024 spgcnt_t availm
, npages
;
1026 uint_t mflag
= MAP_PRIVATE
| MAP_FIXED
;
1028 if (model
== DATAMODEL_ILP32
) {
1029 mflag
|= _MAP_LOW32
;
1031 /* We may need to map in extra bytes */
1033 len
+= ((size_t)oldaddr
& PAGEOFFSET
);
1036 offset
= (off_t
)((uintptr_t)offset
& PAGEMASK
);
1037 if ((prot
& (PROT_WRITE
| PROT_EXEC
)) == PROT_EXEC
) {
1039 MOBJ_STAT_ADD(map_ptload_text
);
1041 mflag
|= MAP_INITDATA
;
1042 MOBJ_STAT_ADD(map_ptload_initdata
);
1046 * maxprot is passed as PROT_ALL so that mdb can
1047 * write to this segment.
1049 if (error
= VOP_MAP(vp
, (offset_t
)offset
, as
, &addr
,
1050 len
, prot
, PROT_ALL
, mflag
, fcred
, NULL
)) {
1055 * If the segment can fit and is relatively small, then
1056 * we prefault the entire segment in. This is based
1057 * on the model that says the best working set of a
1058 * small program is all of its pages.
1059 * We only do this if freemem will not drop below
1060 * lotsfree since we don't want to induce paging.
1062 npages
= (spgcnt_t
)btopr(len
);
1063 availm
= freemem
- lotsfree
;
1064 preread
= (npages
< availm
&& len
< PGTHRESH
) ? 1 : 0;
1067 * If we aren't prefaulting the segment,
1068 * increment "deficit", if necessary to ensure
1069 * that pages will become available when this
1070 * process starts executing.
1072 if (preread
== 0 && npages
> availm
&&
1073 deficit
< lotsfree
) {
1074 deficit
+= MIN((pgcnt_t
)(npages
- availm
),
1075 lotsfree
- deficit
);
1079 (void) as_faulta(as
, addr
, len
);
1080 MOBJ_STAT_ADD(map_ptload_preread
);
1084 * addr and offset were not aligned such that we could
1085 * use VOP_MAP, thus we need to as_map the memory we
1086 * need and then read the data in from disk.
1087 * This code path is a corner case which should never
1088 * be taken, but hand crafted binaries could trigger
1089 * this logic and it needs to work correctly.
1091 MOBJ_STAT_ADD(map_ptload_unaligned_text
);
1093 (void) as_unmap(as
, addr
, len
);
1096 * We use zfod_argsp because we need to be able to
1097 * write to the mapping and then we'll change the
1098 * protections later if they are incorrect.
1100 error
= as_map(as
, addr
, len
, segvn_create
, zfod_argsp
);
1103 MOBJ_STAT_ADD(map_ptload_unaligned_map_fail
);
1107 /* Now read in the data from disk */
1108 error
= vn_rdwr(UIO_READ
, vp
, oldaddr
, oldlen
, offset
,
1109 UIO_USERSPACE
, 0, (rlim64_t
)0, fcred
, NULL
);
1111 MOBJ_STAT_ADD(map_ptload_unaligned_read_fail
);
1116 * Now set protections.
1118 if (prot
!= PROT_ZFOD
) {
1119 (void) as_setprot(as
, addr
, len
, prot
);
1125 end
= (size_t)addr
+ len
;
1126 zfodbase
= (caddr_t
)P2ROUNDUP(end
, PAGESIZE
);
1127 zfoddiff
= (uintptr_t)zfodbase
- end
;
1129 MOBJ_STAT_ADD(zfoddiff
);
1130 if ((prot
& PROT_WRITE
) == 0) {
1131 (void) as_setprot(as
, (caddr_t
)end
,
1132 zfoddiff
, prot
| PROT_WRITE
);
1133 MOBJ_STAT_ADD(zfoddiff_nowrite
);
1135 if (on_fault(&ljb
)) {
1137 if ((prot
& PROT_WRITE
) == 0) {
1138 (void) as_setprot(as
, (caddr_t
)end
,
1143 uzero((void *)end
, zfoddiff
);
1147 * Remove write protection to return to original state
1149 if ((prot
& PROT_WRITE
) == 0) {
1150 (void) as_setprot(as
, (caddr_t
)end
,
1154 if (zfodlen
> zfoddiff
) {
1155 struct segvn_crargs crargs
=
1156 SEGVN_ZFOD_ARGS(prot
, PROT_ALL
);
1158 MOBJ_STAT_ADD(zfodextra
);
1159 zfodlen
-= zfoddiff
;
1160 crargs
.szc
= AS_MAP_NO_LPOOB
;
1164 (void) as_unmap(as
, (caddr_t
)zfodbase
, zfodlen
);
1165 error
= as_map(as
, (caddr_t
)zfodbase
,
1166 zfodlen
, segvn_create
, &crargs
);
1177 * Map the ELF file represented by vp into the users address space. The
1178 * first mapping will start at start_addr and there will be num_elements
1179 * mappings. The mappings are described by the data in mrp which may be
1180 * modified upon returning from this function.
1181 * Returns 0 for success or errno for failure.
1184 mmapobj_map_elf(struct vnode
*vp
, caddr_t start_addr
, mmapobj_result_t
*mrp
,
1185 int num_elements
, cred_t
*fcred
, ushort_t e_type
)
1191 struct as
*as
= curproc
->p_as
;
1193 for (i
= 0; i
< num_elements
; i
++) {
1202 /* Always need to adjust mr_addr */
1203 addr
= start_addr
+ (size_t)(mrp
[i
].mr_addr
);
1205 (caddr_t
)((uintptr_t)addr
& (uintptr_t)PAGEMASK
);
1207 /* Padding has already been mapped */
1208 if (MR_GET_TYPE(mrp
[i
].mr_flags
) == MR_PADDING
) {
1211 p_memsz
= mrp
[i
].mr_msize
;
1212 p_filesz
= mrp
[i
].mr_fsize
;
1213 zfodlen
= p_memsz
- p_filesz
;
1214 p_offset
= mrp
[i
].mr_offset
;
1215 dif
= (uintptr_t)(addr
) & PAGEOFFSET
;
1216 prot
= mrp
[i
].mr_prot
| PROT_USER
;
1217 ret
= mmapobj_map_ptload(vp
, addr
, p_filesz
, zfodlen
,
1218 p_offset
, prot
, fcred
);
1220 MOBJ_STAT_ADD(ptload_failed
);
1221 mmapobj_unmap(mrp
, i
, num_elements
, e_type
);
1225 /* Need to cleanup mrp to reflect the actual values used */
1226 mrp
[i
].mr_msize
+= dif
;
1227 mrp
[i
].mr_offset
= (size_t)addr
& PAGEOFFSET
;
1230 /* Also need to unmap any holes created above */
1231 if (num_elements
== 1) {
1232 MOBJ_STAT_ADD(map_elf_no_holes
);
1235 if (e_type
== ET_EXEC
) {
1241 hi
= mrp
[0].mr_addr
;
1243 /* Remove holes made by the rest of the segments */
1244 for (i
= 0; i
< num_elements
- 1; i
++) {
1245 lo
= (caddr_t
)P2ROUNDUP((size_t)(mrp
[i
].mr_addr
) +
1246 mrp
[i
].mr_msize
, PAGESIZE
);
1247 hi
= mrp
[i
+ 1].mr_addr
;
1250 * If as_unmap fails we just use up a bit of extra
1253 (void) as_unmap(as
, (caddr_t
)lo
,
1254 (size_t)hi
- (size_t)lo
);
1255 MOBJ_STAT_ADD(unmap_hole
);
1263 /* Ugly hack to get STRUCT_* macros to work below */
1265 Phdr x
; /* native version */
1273 * Calculate and return the number of loadable segments in the ELF Phdr
1274 * represented by phdrbase as well as the len of the total mapping and
1275 * the max alignment that is needed for a given segment. On success,
1276 * 0 is returned, and *len, *loadable and *align have been filled out.
1277 * On failure, errno will be returned, which in this case is ENOTSUP
1278 * if we were passed an ELF file with overlapping segments.
1281 calc_loadable(Ehdr
*ehdrp
, caddr_t phdrbase
, int nphdrs
, size_t *len
,
1282 int *loadable
, size_t *align
)
1287 ushort_t e_type
= ehdrp
->e_type
; /* same offset 32 and 64 bit */
1294 caddr_t start_addr
= NULL
;
1295 caddr_t p_end
= NULL
;
1296 size_t max_align
= 0;
1297 size_t min_align
= PAGESIZE
; /* needed for vmem_xalloc */
1298 STRUCT_HANDLE(myphdr
, mph
);
1299 #if defined(__sparc)
1300 extern int vac_size
;
1303 * Want to prevent aliasing by making the start address at least be
1304 * aligned to vac_size.
1306 min_align
= MAX(PAGESIZE
, vac_size
);
1309 model
= get_udatamodel();
1310 STRUCT_SET_HANDLE(mph
, model
, (struct myphdr
*)phdrbase
);
1312 /* hsize alignment should have been checked before calling this func */
1313 if (model
== DATAMODEL_LP64
) {
1314 hsize
= ehdrp
->e_phentsize
;
1319 ASSERT(model
== DATAMODEL_ILP32
);
1320 hsize
= ((Elf32_Ehdr
*)ehdrp
)->e_phentsize
;
1327 * Determine the span of all loadable segments and calculate the
1328 * number of loadable segments.
1330 for (i
= 0; i
< nphdrs
; i
++) {
1331 p_type
= STRUCT_FGET(mph
, x
.p_type
);
1332 if (p_type
== PT_LOAD
|| p_type
== PT_SUNWBSS
) {
1333 vaddr
= (caddr_t
)(uintptr_t)STRUCT_FGET(mph
, x
.p_vaddr
);
1334 p_memsz
= STRUCT_FGET(mph
, x
.p_memsz
);
1337 * Skip this header if it requests no memory to be
1341 STRUCT_SET_HANDLE(mph
, model
,
1342 (struct myphdr
*)((size_t)STRUCT_BUF(mph
) +
1344 MOBJ_STAT_ADD(nomem_header
);
1347 if (num_segs
++ == 0) {
1349 * The p_vaddr of the first PT_LOAD segment
1350 * must either be NULL or within the first
1351 * page in order to be interpreted.
1352 * Otherwise, its an invalid file.
1354 if (e_type
== ET_DYN
&&
1355 ((caddr_t
)((uintptr_t)vaddr
&
1356 (uintptr_t)PAGEMASK
) != NULL
)) {
1357 MOBJ_STAT_ADD(inval_header
);
1362 * For the first segment, we need to map from
1363 * the beginning of the file, so we will
1364 * adjust the size of the mapping to include
1367 p_offset
= STRUCT_FGET(mph
, x
.p_offset
);
1372 * Check to make sure that this mapping wouldn't
1373 * overlap a previous mapping.
1375 if (vaddr
< p_end
) {
1376 MOBJ_STAT_ADD(overlap_header
);
1380 p_end
= vaddr
+ p_memsz
+ p_offset
;
1381 p_end
= (caddr_t
)P2ROUNDUP((size_t)p_end
, PAGESIZE
);
1383 p_align
= STRUCT_FGET(mph
, x
.p_align
);
1384 if (p_align
> 1 && p_align
> max_align
) {
1385 max_align
= p_align
;
1386 if (max_align
< min_align
) {
1387 max_align
= min_align
;
1388 MOBJ_STAT_ADD(min_align
);
1392 STRUCT_SET_HANDLE(mph
, model
,
1393 (struct myphdr
*)((size_t)STRUCT_BUF(mph
) + hsize
));
1397 * The alignment should be a power of 2, if it isn't we forgive it
1398 * and round up. On overflow, we'll set the alignment to max_align
1399 * rounded down to the nearest power of 2.
1401 if (max_align
> 0 && !ISP2(max_align
)) {
1402 MOBJ_STAT_ADD(np2_align
);
1403 *align
= 2 * (1L << (highbit(max_align
) - 1));
1404 if (*align
< max_align
||
1405 (*align
> UINT_MAX
&& model
== DATAMODEL_ILP32
)) {
1406 MOBJ_STAT_ADD(np2_align_overflow
);
1407 *align
= 1L << (highbit(max_align
) - 1);
1413 ASSERT(*align
>= PAGESIZE
|| *align
== 0);
1415 *loadable
= num_segs
;
1416 *len
= p_end
- start_addr
;
1421 * Check the address space to see if the virtual addresses to be used are
1422 * available. If they are not, return errno for failure. On success, 0
1423 * will be returned, and the virtual addresses for each mmapobj_result_t
1424 * will be reserved. Note that a reservation could have earlier been made
1425 * for a given segment via a /dev/null mapping. If that is the case, then
1426 * we can use that VA space for our mappings.
1427 * Note: this function will only be used for ET_EXEC binaries.
1430 check_exec_addrs(int loadable
, mmapobj_result_t
*mrp
, caddr_t start_addr
)
1433 struct as
*as
= curproc
->p_as
;
1434 struct segvn_crargs crargs
= SEGVN_ZFOD_ARGS(PROT_ZFOD
, PROT_ALL
);
1440 /* No need to reserve swap space now since it will be reserved later */
1441 crargs
.flags
|= MAP_NORESERVE
;
1443 for (i
= 0; i
< loadable
; i
++) {
1445 myaddr
= start_addr
+ (size_t)mrp
[i
].mr_addr
;
1446 mylen
= mrp
[i
].mr_msize
;
1448 /* See if there is a hole in the as for this range */
1449 if (as_gap(as
, mylen
, &myaddr
, &mylen
, 0, NULL
) == 0) {
1450 ASSERT(myaddr
== start_addr
+ (size_t)mrp
[i
].mr_addr
);
1451 ASSERT(mylen
== mrp
[i
].mr_msize
);
1454 if (MR_GET_TYPE(mrp
[i
].mr_flags
) == MR_PADDING
) {
1455 MOBJ_STAT_ADD(exec_padding
);
1458 ret
= as_map(as
, myaddr
, mylen
, segvn_create
, &crargs
);
1461 mmapobj_unmap_exec(mrp
, i
, start_addr
);
1466 * There is a mapping that exists in the range
1467 * so check to see if it was a "reservation"
1468 * from /dev/null. The mapping is from
1469 * /dev/null if the mapping comes from
1470 * segdev and the type is neither MAP_SHARED
1473 AS_LOCK_ENTER(as
, &as
->a_lock
, RW_READER
);
1474 seg
= as_findseg(as
, myaddr
, 0);
1475 MOBJ_STAT_ADD(exec_addr_mapped
);
1476 if (seg
&& seg
->s_ops
== &segdev_ops
&&
1477 ((SEGOP_GETTYPE(seg
, myaddr
) &
1478 (MAP_SHARED
| MAP_PRIVATE
)) == 0) &&
1479 myaddr
>= seg
->s_base
&&
1481 seg
->s_base
+ seg
->s_size
) {
1482 MOBJ_STAT_ADD(exec_addr_devnull
);
1483 AS_LOCK_EXIT(as
, &as
->a_lock
);
1484 (void) as_unmap(as
, myaddr
, mylen
);
1485 ret
= as_map(as
, myaddr
, mylen
, segvn_create
,
1487 mrp
[i
].mr_flags
|= MR_RESV
;
1490 /* Need to remap what we unmapped */
1491 mmapobj_unmap_exec(mrp
, i
+ 1,
1496 AS_LOCK_EXIT(as
, &as
->a_lock
);
1498 mmapobj_unmap_exec(mrp
, i
, start_addr
);
1499 MOBJ_STAT_ADD(exec_addr_in_use
);
1500 return (EADDRINUSE
);
1509 * Walk through the ELF program headers and extract all useful information
1510 * for PT_LOAD and PT_SUNWBSS segments into mrp.
1511 * Return 0 on success or error on failure.
1514 process_phdr(Ehdr
*ehdrp
, caddr_t phdrbase
, int nphdrs
, mmapobj_result_t
*mrp
,
1515 vnode_t
*vp
, uint_t
*num_mapped
, size_t padding
, cred_t
*fcred
)
1518 caddr_t start_addr
= NULL
;
1524 struct lib_va
*lvp
= NULL
;
1526 struct as
*as
= curproc
->p_as
;
1534 ushort_t e_type
= ehdrp
->e_type
; /* same offset 32 and 64 bit */
1542 STRUCT_HANDLE(myphdr
, mph
);
1544 model
= get_udatamodel();
1545 STRUCT_SET_HANDLE(mph
, model
, (struct myphdr
*)phdrbase
);
1548 * Need to make sure that hsize is aligned properly.
1549 * For 32bit processes, 4 byte alignment is required.
1550 * For 64bit processes, 8 byte alignment is required.
1551 * If the alignment isn't correct, we need to return failure
1552 * since it could cause an alignment error panic while walking
1555 if (model
== DATAMODEL_LP64
) {
1556 hsize
= ehdrp
->e_phentsize
;
1558 MOBJ_STAT_ADD(phent_align64
);
1562 ASSERT(model
== DATAMODEL_ILP32
);
1563 hsize
= ((Elf32_Ehdr
*)ehdrp
)->e_phentsize
;
1565 MOBJ_STAT_ADD(phent_align32
);
1573 if (e_type
== ET_DYN
) {
1574 vattr
.va_mask
= AT_FSID
| AT_NODEID
| AT_CTIME
| AT_MTIME
;
1575 error
= VOP_GETATTR(vp
, &vattr
, 0, fcred
, NULL
);
1579 /* Check to see if we already have a description for this lib */
1580 lvp
= lib_va_find(&vattr
);
1583 MOBJ_STAT_ADD(lvp_found
);
1585 start_addr
= mmapobj_lookup_start_addr(lvp
);
1586 if (start_addr
== NULL
) {
1587 lib_va_release(lvp
);
1593 * loadable may be zero if the original allocator
1594 * of lvp hasn't finished setting it up but the rest
1595 * of the fields will be accurate.
1597 loadable
= lvp
->lv_num_segs
;
1599 align
= lvp
->lv_align
;
1604 * Determine the span of all loadable segments and calculate the
1605 * number of loadable segments, the total len spanned by the mappings
1606 * and the max alignment, if we didn't get them above.
1608 if (loadable
== 0) {
1609 MOBJ_STAT_ADD(no_loadable_yet
);
1610 ret
= calc_loadable(ehdrp
, phdrbase
, nphdrs
, &len
,
1614 * Since it'd be an invalid file, we shouldn't have
1615 * cached it previously.
1617 ASSERT(lvp
== NULL
);
1622 ASSERT(len
== lvp
->lv_len
);
1623 ASSERT(align
== lvp
->lv_align
);
1628 /* Make sure there's something to map. */
1629 if (len
== 0 || loadable
== 0) {
1631 * Since it'd be an invalid file, we shouldn't have
1632 * cached it previously.
1634 ASSERT(lvp
== NULL
);
1635 MOBJ_STAT_ADD(nothing_to_map
);
1643 if (loadable
> *num_mapped
) {
1644 *num_mapped
= loadable
;
1645 /* cleanup previous reservation */
1647 (void) as_unmap(as
, start_addr
, lib_len
);
1649 MOBJ_STAT_ADD(e2big
);
1651 lib_va_release(lvp
);
1657 * We now know the size of the object to map and now we need to
1658 * get the start address to map it at. It's possible we already
1659 * have it if we found all the info we need in the lib_va cache.
1661 if (e_type
== ET_DYN
&& start_addr
== NULL
) {
1663 * Need to make sure padding does not throw off
1664 * required alignment. We can only specify an
1665 * alignment for the starting address to be mapped,
1666 * so we round padding up to the alignment and map
1667 * from there and then throw out the extra later.
1671 add_pad
= P2ROUNDUP(padding
, align
);
1673 MOBJ_STAT_ADD(dyn_pad_align
);
1675 MOBJ_STAT_ADD(dyn_pad_noalign
);
1676 len
+= padding
; /* at beginning */
1678 len
+= padding
; /* at end of mapping */
1681 * At this point, if lvp is non-NULL, then above we
1682 * already found it in the cache but did not get
1683 * the start address since we were not going to use lib_va.
1684 * Since we know that lib_va will not be used, it's safe
1685 * to call mmapobj_alloc_start_addr and know that lvp
1686 * will not be modified.
1688 ASSERT(lvp
? use_lib_va
== 0 : 1);
1689 start_addr
= mmapobj_alloc_start_addr(&lvp
, len
,
1690 use_lib_va
, align
, &vattr
);
1691 if (start_addr
== NULL
) {
1693 lib_va_release(lvp
);
1695 MOBJ_STAT_ADD(alloc_start_fail
);
1699 * If we can't cache it, no need to hang on to it.
1700 * Setting lv_num_segs to non-zero will make that
1701 * field active and since there are too many segments
1702 * to cache, all future users will not try to use lv_mps.
1704 if (lvp
!= NULL
&& loadable
> LIBVA_CACHED_SEGS
&& use_lib_va
) {
1705 lvp
->lv_num_segs
= loadable
;
1706 lib_va_release(lvp
);
1708 MOBJ_STAT_ADD(lvp_nocache
);
1711 * Free the beginning of the mapping if the padding
1712 * was not aligned correctly.
1714 if (padding
!= 0 && add_pad
!= padding
) {
1715 (void) as_unmap(as
, start_addr
,
1717 start_addr
+= (add_pad
- padding
);
1718 MOBJ_STAT_ADD(extra_padding
);
1723 * At this point, we have reserved the virtual address space
1724 * for our mappings. Now we need to start filling out the mrp
1725 * array to describe all of the individual mappings we are going
1727 * For ET_EXEC there has been no memory reservation since we are
1728 * using fixed addresses. While filling in the mrp array below,
1729 * we will have the first segment biased to start at addr 0
1730 * and the rest will be biased by this same amount. Thus if there
1731 * is padding, the first padding will start at addr 0, and the next
1732 * segment will start at the value of padding.
1735 /* We'll fill out padding later, so start filling in mrp at index 1 */
1740 /* If we have no more need for lvp let it go now */
1741 if (lvp
!= NULL
&& use_lib_va
== 0) {
1742 lib_va_release(lvp
);
1743 MOBJ_STAT_ADD(lvp_not_needed
);
1747 /* Now fill out the mrp structs from the program headers */
1748 STRUCT_SET_HANDLE(mph
, model
, (struct myphdr
*)phdrbase
);
1749 for (i
= 0; i
< nphdrs
; i
++) {
1750 p_type
= STRUCT_FGET(mph
, x
.p_type
);
1751 if (p_type
== PT_LOAD
|| p_type
== PT_SUNWBSS
) {
1752 vaddr
= (caddr_t
)(uintptr_t)STRUCT_FGET(mph
, x
.p_vaddr
);
1753 p_memsz
= STRUCT_FGET(mph
, x
.p_memsz
);
1754 p_filesz
= STRUCT_FGET(mph
, x
.p_filesz
);
1755 p_offset
= STRUCT_FGET(mph
, x
.p_offset
);
1756 p_flags
= STRUCT_FGET(mph
, x
.p_flags
);
1759 * Skip this header if it requests no memory to be
1763 STRUCT_SET_HANDLE(mph
, model
,
1764 (struct myphdr
*)((size_t)STRUCT_BUF(mph
) +
1766 MOBJ_STAT_ADD(no_mem_map_sz
);
1778 ASSERT(current
< loadable
);
1779 mrp
[current
].mr_msize
= p_memsz
;
1780 mrp
[current
].mr_fsize
= p_filesz
;
1781 mrp
[current
].mr_offset
= p_offset
;
1782 mrp
[current
].mr_prot
= prot
;
1784 if (hdr_seen
== 0 && p_filesz
!= 0) {
1785 mrp
[current
].mr_flags
= MR_HDR_ELF
;
1787 * We modify mr_offset because we
1788 * need to map the ELF header as well, and if
1789 * we didn't then the header could be left out
1790 * of the mapping that we will create later.
1791 * Since we're removing the offset, we need to
1792 * account for that in the other fields as well
1793 * since we will be mapping the memory from 0
1796 if (e_type
== ET_DYN
) {
1797 mrp
[current
].mr_offset
= 0;
1798 mrp
[current
].mr_msize
+= p_offset
;
1799 mrp
[current
].mr_fsize
+= p_offset
;
1801 ASSERT(e_type
== ET_EXEC
);
1803 * Save off the start addr which will be
1804 * our bias for the rest of the
1807 start_addr
= vaddr
- padding
;
1809 mrp
[current
].mr_addr
= (caddr_t
)padding
;
1812 if (e_type
== ET_EXEC
) {
1814 mrp
[current
].mr_addr
=
1815 vaddr
- (size_t)start_addr
;
1817 mrp
[current
].mr_addr
= vaddr
+ padding
;
1819 mrp
[current
].mr_flags
= 0;
1824 /* Move to next phdr */
1825 STRUCT_SET_HANDLE(mph
, model
,
1826 (struct myphdr
*)((size_t)STRUCT_BUF(mph
) +
1830 /* Now fill out the padding segments */
1832 mrp
[0].mr_addr
= NULL
;
1833 mrp
[0].mr_msize
= padding
;
1834 mrp
[0].mr_fsize
= 0;
1835 mrp
[0].mr_offset
= 0;
1837 mrp
[0].mr_flags
= MR_PADDING
;
1839 /* Setup padding for the last segment */
1840 ASSERT(current
== loadable
- 1);
1841 mrp
[current
].mr_addr
= (caddr_t
)lib_len
+ padding
;
1842 mrp
[current
].mr_msize
= padding
;
1843 mrp
[current
].mr_fsize
= 0;
1844 mrp
[current
].mr_offset
= 0;
1845 mrp
[current
].mr_prot
= 0;
1846 mrp
[current
].mr_flags
= MR_PADDING
;
1850 * Need to make sure address ranges desired are not in use or
1851 * are previously allocated reservations from /dev/null. For
1852 * ET_DYN, we already made sure our address range was free.
1854 if (e_type
== ET_EXEC
) {
1855 ret
= check_exec_addrs(loadable
, mrp
, start_addr
);
1857 ASSERT(lvp
== NULL
);
1858 MOBJ_STAT_ADD(check_exec_failed
);
1863 /* Finish up our business with lvp. */
1865 ASSERT(e_type
== ET_DYN
);
1866 if (lvp
->lv_num_segs
== 0 && loadable
<= LIBVA_CACHED_SEGS
) {
1867 bcopy(mrp
, lvp
->lv_mps
,
1868 loadable
* sizeof (mmapobj_result_t
));
1872 * Setting lv_num_segs to a non-zero value indicates that
1873 * lv_mps is now valid and can be used by other threads.
1874 * So, the above stores need to finish before lv_num_segs
1875 * is updated. lv_mps is only valid if lv_num_segs is
1876 * greater than LIBVA_CACHED_SEGS.
1878 lvp
->lv_num_segs
= loadable
;
1879 lib_va_release(lvp
);
1880 MOBJ_STAT_ADD(lvp_used
);
1883 /* Now that we have mrp completely filled out go map it */
1884 ret
= mmapobj_map_elf(vp
, start_addr
, mrp
, loadable
, fcred
, e_type
);
1886 *num_mapped
= loadable
;
1893 * Take the ELF file passed in, and do the work of mapping it.
1894 * num_mapped in - # elements in user buffer
1895 * num_mapped out - # sections mapped and length of mrp array if
1899 doelfwork(Ehdr
*ehdrp
, vnode_t
*vp
, mmapobj_result_t
*mrp
,
1900 uint_t
*num_mapped
, size_t padding
, cred_t
*fcred
)
1905 unsigned char ei_class
;
1906 unsigned short phentsize
;
1912 ei_class
= ehdrp
->e_ident
[EI_CLASS
];
1913 model
= get_udatamodel();
1914 if ((model
== DATAMODEL_ILP32
&& ei_class
== ELFCLASS64
) ||
1915 (model
== DATAMODEL_LP64
&& ei_class
== ELFCLASS32
)) {
1916 MOBJ_STAT_ADD(wrong_model
);
1920 /* Can't execute code from "noexec" mounted filesystem. */
1921 if (ehdrp
->e_type
== ET_EXEC
&&
1922 (vp
->v_vfsp
->vfs_flag
& VFS_NOEXEC
) != 0) {
1923 MOBJ_STAT_ADD(noexec_fs
);
1928 * Relocatable and core files are mapped as a single flat file
1929 * since no interpretation is done on them by mmapobj.
1931 if (ehdrp
->e_type
== ET_REL
|| ehdrp
->e_type
== ET_CORE
) {
1932 to_map
= padding
? 3 : 1;
1933 if (*num_mapped
< to_map
) {
1934 *num_mapped
= to_map
;
1935 MOBJ_STAT_ADD(e2big_et_rel
);
1938 error
= mmapobj_map_flat(vp
, mrp
, padding
, fcred
);
1940 *num_mapped
= to_map
;
1941 mrp
[padding
? 1 : 0].mr_flags
= MR_HDR_ELF
;
1942 MOBJ_STAT_ADD(et_rel_mapped
);
1947 /* Check for an unknown ELF type */
1948 if (ehdrp
->e_type
!= ET_EXEC
&& ehdrp
->e_type
!= ET_DYN
) {
1949 MOBJ_STAT_ADD(unknown_elf_type
);
1953 if (ei_class
== ELFCLASS32
) {
1954 Elf32_Ehdr
*e32hdr
= (Elf32_Ehdr
*)ehdrp
;
1955 ASSERT(model
== DATAMODEL_ILP32
);
1956 nphdrs
= e32hdr
->e_phnum
;
1957 phentsize
= e32hdr
->e_phentsize
;
1958 if (phentsize
< sizeof (Elf32_Phdr
)) {
1959 MOBJ_STAT_ADD(phent32_too_small
);
1962 phoff
= e32hdr
->e_phoff
;
1963 } else if (ei_class
== ELFCLASS64
) {
1964 Elf64_Ehdr
*e64hdr
= (Elf64_Ehdr
*)ehdrp
;
1965 ASSERT(model
== DATAMODEL_LP64
);
1966 nphdrs
= e64hdr
->e_phnum
;
1967 phentsize
= e64hdr
->e_phentsize
;
1968 if (phentsize
< sizeof (Elf64_Phdr
)) {
1969 MOBJ_STAT_ADD(phent64_too_small
);
1972 phoff
= e64hdr
->e_phoff
;
1974 /* fallthrough case for an invalid ELF class */
1975 MOBJ_STAT_ADD(inval_elf_class
);
1980 * nphdrs should only have this value for core files which are handled
1981 * above as a single mapping. If other file types ever use this
1982 * sentinel, then we'll add the support needed to handle this here.
1984 if (nphdrs
== PN_XNUM
) {
1985 MOBJ_STAT_ADD(too_many_phdrs
);
1989 phsizep
= nphdrs
* phentsize
;
1992 MOBJ_STAT_ADD(no_phsize
);
1996 /* Make sure we only wait for memory if it's a reasonable request */
1997 if (phsizep
> mmapobj_alloc_threshold
) {
1998 MOBJ_STAT_ADD(phsize_large
);
1999 if ((phbasep
= kmem_alloc(phsizep
, KM_NOSLEEP
)) == NULL
) {
2000 MOBJ_STAT_ADD(phsize_xtralarge
);
2004 phbasep
= kmem_alloc(phsizep
, KM_SLEEP
);
2007 if ((error
= vn_rdwr(UIO_READ
, vp
, phbasep
, phsizep
,
2008 (offset_t
)phoff
, UIO_SYSSPACE
, 0, (rlim64_t
)0,
2009 fcred
, NULL
)) != 0) {
2010 kmem_free(phbasep
, phsizep
);
2014 /* Now process the phdr's */
2015 error
= process_phdr(ehdrp
, phbasep
, nphdrs
, mrp
, vp
, num_mapped
,
2017 kmem_free(phbasep
, phsizep
);
2021 #if defined(__sparc)
2023 * Hack to support 64 bit kernels running AOUT 4.x programs.
2024 * This is the sizeof (struct nlist) for a 32 bit kernel.
2025 * Since AOUT programs are 32 bit only, they will never use the 64 bit
2026 * sizeof (struct nlist) and thus creating a #define is the simplest
2027 * way around this since this is a format which is not being updated.
2028 * This will be used in the place of sizeof (struct nlist) below.
2030 #define NLIST_SIZE (0xC)
2033 doaoutwork(vnode_t
*vp
, mmapobj_result_t
*mrp
,
2034 uint_t
*num_mapped
, struct exec
*hdr
, cred_t
*fcred
)
2039 size_t nsize
; /* nlist size */
2044 struct as
*as
= curproc
->p_as
;
2045 int prot
= PROT_USER
| PROT_READ
| PROT_EXEC
;
2046 uint_t mflag
= MAP_PRIVATE
| _MAP_LOW32
;
2051 struct segvn_crargs crargs
= SEGVN_ZFOD_ARGS(PROT_ZFOD
, PROT_ALL
);
2053 /* Only 32bit apps supported by this file format */
2054 if (get_udatamodel() != DATAMODEL_ILP32
) {
2055 MOBJ_STAT_ADD(aout_64bit_try
);
2059 /* Check to see if this is a library */
2060 if (hdr
->a_magic
== ZMAGIC
&& hdr
->a_entry
< PAGESIZE
) {
2064 /* Can't execute code from "noexec" mounted filesystem. */
2065 if (((vp
->v_vfsp
->vfs_flag
& VFS_NOEXEC
) != 0) && (is_library
== 0)) {
2066 MOBJ_STAT_ADD(aout_noexec
);
2071 * There are 2 ways to calculate the mapped size of executable:
2072 * 1) rounded text size + data size + bss size.
2073 * 2) starting offset for text + text size + data size + text relocation
2074 * size + data relocation size + room for nlist data structure.
2076 * The larger of the two sizes will be used to map this binary.
2078 osize
= P2ROUNDUP(hdr
->a_text
, PAGESIZE
) + hdr
->a_data
+ hdr
->a_bss
;
2080 off
= hdr
->a_magic
== ZMAGIC
? 0 : sizeof (struct exec
);
2082 nsize
= off
+ hdr
->a_text
+ hdr
->a_data
+ hdr
->a_trsize
+
2083 hdr
->a_drsize
+ NLIST_SIZE
;
2085 size
= MAX(osize
, nsize
);
2086 if (size
!= nsize
) {
2091 * 1 seg for text and 1 seg for initialized data.
2092 * 1 seg for bss (if can't fit in leftover space of init data)
2093 * 1 seg for nlist if needed.
2095 to_map
= 2 + (nsize
? 1 : 0) +
2096 (hdr
->a_bss
> PAGESIZE
- P2PHASE(hdr
->a_data
, PAGESIZE
) ? 1 : 0);
2097 if (*num_mapped
< to_map
) {
2098 *num_mapped
= to_map
;
2099 MOBJ_STAT_ADD(aout_e2big
);
2103 /* Reserve address space for the whole mapping */
2105 /* We'll let VOP_MAP below pick our address for us */
2107 MOBJ_STAT_ADD(aout_lib
);
2110 * default start address for fixed binaries from AOUT 4.x
2113 MOBJ_STAT_ADD(aout_fixed
);
2115 addr
= (caddr_t
)0x2000;
2117 if (as_gap(as
, size
, &addr
, &size
, 0, NULL
) != 0) {
2119 MOBJ_STAT_ADD(aout_addr_in_use
);
2120 return (EADDRINUSE
);
2122 crargs
.flags
|= MAP_NORESERVE
;
2123 error
= as_map(as
, addr
, size
, segvn_create
, &crargs
);
2124 ASSERT(addr
== (caddr_t
)0x2000);
2132 * Map as large as we need, backed by file, this will be text, and
2133 * possibly the nlist segment. We map over this mapping for bss and
2134 * initialized data segments.
2136 error
= VOP_MAP(vp
, off
, as
, &addr
, size
, prot
, PROT_ALL
,
2137 mflag
, fcred
, NULL
);
2140 (void) as_unmap(as
, start_addr
, osize
);
2145 /* pickup the value of start_addr and osize for libraries */
2150 * We have our initial reservation/allocation so we need to use fixed
2151 * addresses from now on.
2155 mrp
[0].mr_addr
= addr
;
2156 mrp
[0].mr_msize
= hdr
->a_text
;
2157 mrp
[0].mr_fsize
= hdr
->a_text
;
2158 mrp
[0].mr_offset
= 0;
2159 mrp
[0].mr_prot
= PROT_READ
| PROT_EXEC
;
2160 mrp
[0].mr_flags
= MR_HDR_AOUT
;
2164 * Map initialized data. We are mapping over a portion of the
2165 * previous mapping which will be unmapped in VOP_MAP below.
2167 off
= P2ROUNDUP((offset_t
)(hdr
->a_text
), PAGESIZE
);
2171 error
= VOP_MAP(vp
, off
, as
, &addr
, size
, PROT_ALL
, PROT_ALL
,
2172 mflag
, fcred
, NULL
);
2174 (void) as_unmap(as
, start_addr
, osize
);
2178 mrp
[1].mr_addr
= addr
;
2179 mrp
[1].mr_msize
= size
;
2180 mrp
[1].mr_fsize
= size
;
2181 mrp
[1].mr_offset
= 0;
2182 mrp
[1].mr_prot
= PROT_READ
| PROT_WRITE
| PROT_EXEC
;
2183 mrp
[1].mr_flags
= 0;
2185 /* Need to zero out remainder of page */
2186 addr
+= hdr
->a_data
;
2187 zfoddiff
= P2PHASE((size_t)addr
, PAGESIZE
);
2191 MOBJ_STAT_ADD(aout_zfoddiff
);
2192 zfoddiff
= PAGESIZE
- zfoddiff
;
2193 if (on_fault(&ljb
)) {
2195 MOBJ_STAT_ADD(aout_uzero_fault
);
2196 (void) as_unmap(as
, start_addr
, osize
);
2199 uzero(addr
, zfoddiff
);
2206 if (hdr
->a_bss
> zfoddiff
) {
2207 struct segvn_crargs crargs
=
2208 SEGVN_ZFOD_ARGS(PROT_ZFOD
, PROT_ALL
);
2209 MOBJ_STAT_ADD(aout_map_bss
);
2211 size
= hdr
->a_bss
- zfoddiff
;
2213 (void) as_unmap(as
, addr
, size
);
2214 error
= as_map(as
, addr
, size
, segvn_create
, &crargs
);
2219 MOBJ_STAT_ADD(aout_bss_fail
);
2220 (void) as_unmap(as
, start_addr
, osize
);
2223 mrp
[2].mr_addr
= addr
;
2224 mrp
[2].mr_msize
= size
;
2225 mrp
[2].mr_fsize
= 0;
2226 mrp
[2].mr_offset
= 0;
2227 mrp
[2].mr_prot
= PROT_READ
| PROT_WRITE
| PROT_EXEC
;
2228 mrp
[2].mr_flags
= 0;
2235 * If we have extra bits left over, we need to include that in how
2236 * much we mapped to make sure the nlist logic is correct
2238 msize
= P2ROUNDUP(msize
, PAGESIZE
);
2240 if (nsize
&& msize
< nsize
) {
2241 MOBJ_STAT_ADD(aout_nlist
);
2242 mrp
[segnum
].mr_addr
= addr
;
2243 mrp
[segnum
].mr_msize
= nsize
- msize
;
2244 mrp
[segnum
].mr_fsize
= 0;
2245 mrp
[segnum
].mr_offset
= 0;
2246 mrp
[segnum
].mr_prot
= PROT_READ
| PROT_EXEC
;
2247 mrp
[segnum
].mr_flags
= 0;
2250 *num_mapped
= to_map
;
2256 * These are the two types of files that we can interpret and we want to read
2257 * in enough info to cover both types when looking at the initial header.
2259 #define MAX_HEADER_SIZE (MAX(sizeof (Ehdr), sizeof (struct exec)))
2262 * Map vp passed in in an interpreted manner. ELF and AOUT files will be
2263 * interpreted and mapped appropriately for execution.
2264 * num_mapped in - # elements in mrp
2265 * num_mapped out - # sections mapped and length of mrp array if
2266 * no errors or E2BIG returned.
2268 * Returns 0 on success, errno value on failure.
2271 mmapobj_map_interpret(vnode_t
*vp
, mmapobj_result_t
*mrp
,
2272 uint_t
*num_mapped
, size_t padding
, cred_t
*fcred
)
2281 * header has to be aligned to the native size of ulong_t in order
2282 * to avoid an unaligned access when dereferencing the header as
2283 * a ulong_t. Thus we allocate our array on the stack of type
2284 * ulong_t and then have header, which we dereference later as a char
2285 * array point at lheader.
2287 ulong_t lheader
[(MAX_HEADER_SIZE
/ (sizeof (ulong_t
))) + 1];
2288 caddr_t header
= (caddr_t
)&lheader
;
2290 vattr
.va_mask
= AT_FSID
| AT_NODEID
| AT_CTIME
| AT_MTIME
| AT_SIZE
;
2291 error
= VOP_GETATTR(vp
, &vattr
, 0, fcred
, NULL
);
2297 * Check lib_va to see if we already have a full description
2298 * for this library. This is the fast path and only used for
2299 * ET_DYN ELF files (dynamic libraries).
2301 if (padding
== 0 && (lvp
= lib_va_find(&vattr
)) != NULL
) {
2304 model
= get_udatamodel();
2305 if ((model
== DATAMODEL_ILP32
&&
2306 lvp
->lv_flags
& LV_ELF64
) ||
2307 (model
== DATAMODEL_LP64
&&
2308 lvp
->lv_flags
& LV_ELF32
)) {
2309 lib_va_release(lvp
);
2310 MOBJ_STAT_ADD(fast_wrong_model
);
2313 num_segs
= lvp
->lv_num_segs
;
2314 if (*num_mapped
< num_segs
) {
2315 *num_mapped
= num_segs
;
2316 lib_va_release(lvp
);
2317 MOBJ_STAT_ADD(fast_e2big
);
2322 * Check to see if we have all the mappable program headers
2325 if (num_segs
<= LIBVA_CACHED_SEGS
&& num_segs
!= 0) {
2326 MOBJ_STAT_ADD(fast
);
2327 start_addr
= mmapobj_lookup_start_addr(lvp
);
2328 if (start_addr
== NULL
) {
2329 lib_va_release(lvp
);
2333 bcopy(lvp
->lv_mps
, mrp
,
2334 num_segs
* sizeof (mmapobj_result_t
));
2336 error
= mmapobj_map_elf(vp
, start_addr
, mrp
,
2337 num_segs
, fcred
, ET_DYN
);
2339 lib_va_release(lvp
);
2341 *num_mapped
= num_segs
;
2342 MOBJ_STAT_ADD(fast_success
);
2346 MOBJ_STAT_ADD(fast_not_now
);
2348 /* Release it for now since we'll look it up below */
2349 lib_va_release(lvp
);
2353 * Time to see if this is a file we can interpret. If it's smaller
2354 * than this, then we can't interpret it.
2356 if (vattr
.va_size
< MAX_HEADER_SIZE
) {
2357 MOBJ_STAT_ADD(small_file
);
2361 if ((error
= vn_rdwr(UIO_READ
, vp
, header
, MAX_HEADER_SIZE
, 0,
2362 UIO_SYSSPACE
, 0, (rlim64_t
)0, fcred
, NULL
)) != 0) {
2363 MOBJ_STAT_ADD(read_error
);
2367 /* Verify file type */
2368 if (header
[EI_MAG0
] == ELFMAG0
&& header
[EI_MAG1
] == ELFMAG1
&&
2369 header
[EI_MAG2
] == ELFMAG2
&& header
[EI_MAG3
] == ELFMAG3
) {
2370 return (doelfwork((Ehdr
*)lheader
, vp
, mrp
, num_mapped
,
2374 #if defined(__sparc)
2375 /* On sparc, check for 4.X AOUT format */
2376 switch (((struct exec
*)header
)->a_magic
) {
2380 return (doaoutwork(vp
, mrp
, num_mapped
,
2381 (struct exec
*)lheader
, fcred
));
2385 /* Unsupported type */
2386 MOBJ_STAT_ADD(unsupported
);
2391 * Given a vnode, map it as either a flat file or interpret it and map
2392 * it according to the rules of the file type.
2393 * *num_mapped will contain the size of the mmapobj_result_t array passed in.
2394 * If padding is non-zero, the mappings will be padded by that amount
2395 * rounded up to the nearest pagesize.
2396 * If the mapping is successful, *num_mapped will contain the number of
2397 * distinct mappings created, and mrp will point to the array of
2398 * mmapobj_result_t's which describe these mappings.
2400 * On error, -1 is returned and errno is set appropriately.
2401 * A special error case will set errno to E2BIG when there are more than
2402 * *num_mapped mappings to be created and *num_mapped will be set to the
2403 * number of mappings needed.
2406 mmapobj(vnode_t
*vp
, uint_t flags
, mmapobj_result_t
*mrp
,
2407 uint_t
*num_mapped
, size_t padding
, cred_t
*fcred
)
2412 ASSERT((padding
& PAGEOFFSET
) == 0);
2413 ASSERT((flags
& ~MMOBJ_ALL_FLAGS
) == 0);
2414 ASSERT(num_mapped
!= NULL
);
2415 ASSERT((flags
& MMOBJ_PADDING
) ? padding
!= 0 : padding
== 0);
2417 if ((flags
& MMOBJ_INTERPRET
) == 0) {
2418 to_map
= padding
? 3 : 1;
2419 if (*num_mapped
< to_map
) {
2420 *num_mapped
= to_map
;
2421 MOBJ_STAT_ADD(flat_e2big
);
2424 error
= mmapobj_map_flat(vp
, mrp
, padding
, fcred
);
2429 *num_mapped
= to_map
;
2433 error
= mmapobj_map_interpret(vp
, mrp
, num_mapped
, padding
, fcred
);