2 * Copyright 1996-1998 John D. Polstra.
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
15 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
16 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
17 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
18 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
19 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
20 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
21 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
22 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
23 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28 #include <sys/param.h>
41 static Elf_Ehdr
*get_elf_header(int, const char *);
42 static int convert_flags(int); /* Elf flags -> mmap flags */
45 * Map a shared object into memory. The "fd" argument is a file descriptor,
46 * which must be open on the object and positioned at its beginning.
47 * The "path" argument is a pathname that is used only for error messages.
49 * The return value is a pointer to a newly-allocated Obj_Entry structure
50 * for the shared object. Returns NULL on failure.
53 map_object(int fd
, const char *path
, const struct stat
*sb
)
81 size_t nclear
, phsize
;
91 hdr
= get_elf_header(fd
, path
);
95 if (__ld_sharedlib_base
) {
96 shlib_base
= (void *)(intptr_t)strtoul(__ld_sharedlib_base
, NULL
, 0);
102 * Scan the program header entries, and save key information.
104 * We expect that the loadable segments are ordered by load address.
106 phdr
= (Elf_Phdr
*) ((char *)hdr
+ hdr
->e_phoff
);
107 phsize
= hdr
->e_phnum
* sizeof (phdr
[0]);
108 phlimit
= phdr
+ hdr
->e_phnum
;
110 phdyn
= phinterp
= phtls
= NULL
;
116 segs
= alloca(sizeof(segs
[0]) * hdr
->e_phnum
);
117 stack_flags
= RTLD_DEFAULT_STACK_PF_EXEC
| PF_R
| PF_W
;
118 while (phdr
< phlimit
) {
119 switch (phdr
->p_type
) {
126 segs
[++nsegs
] = phdr
;
127 if ((segs
[nsegs
]->p_align
& (PAGE_SIZE
- 1)) != 0) {
128 _rtld_error("%s: PT_LOAD segment %d not page-aligned",
135 phdr_vaddr
= phdr
->p_vaddr
;
136 phsize
= phdr
->p_memsz
;
148 stack_flags
= phdr
->p_flags
;
152 relro_page
= phdr
->p_vaddr
;
153 relro_size
= phdr
->p_memsz
;
157 if (phdr
->p_offset
> PAGE_SIZE
||
158 phdr
->p_offset
+ phdr
->p_filesz
> PAGE_SIZE
)
160 note_start
= (Elf_Addr
)(char *)hdr
+ phdr
->p_offset
;
161 note_end
= note_start
+ phdr
->p_filesz
;
168 _rtld_error("%s: object is not dynamically-linked", path
);
173 _rtld_error("%s: too few PT_LOAD segments", path
);
178 * Map the entire address space of the object, to stake out our
179 * contiguous region, and to establish the base address for relocation.
181 base_vaddr
= trunc_page(segs
[0]->p_vaddr
);
182 base_vlimit
= round_page(segs
[nsegs
]->p_vaddr
+ segs
[nsegs
]->p_memsz
);
183 mapsize
= base_vlimit
- base_vaddr
;
184 base_addr
= (caddr_t
) base_vaddr
;
186 if (base_addr
== NULL
&& shlib_base
) {
187 size_t limit
= 1024 * 256 * 1024;
190 for (offset
= 0; offset
< limit
; offset
+= 256 * 1024) {
191 mapbase
= mmap(shlib_base
+ offset
, mapsize
,
193 MAP_ANON
| MAP_PRIVATE
| MAP_NOCORE
|
196 if (mapbase
!= MAP_FAILED
)
200 mapbase
= mmap(base_addr
, mapsize
,
202 MAP_ANON
| MAP_PRIVATE
| MAP_NOCORE
,
205 if (mapbase
== (caddr_t
) -1) {
206 _rtld_error("%s: mmap of entire address space failed: %s",
207 path
, rtld_strerror(errno
));
210 if (base_addr
!= NULL
&& mapbase
!= base_addr
) {
211 _rtld_error("%s: mmap returned wrong address: wanted %p, got %p",
212 path
, base_addr
, mapbase
);
216 for (i
= 0; i
<= nsegs
; i
++) {
217 /* Overlay the segment onto the proper region. */
218 data_offset
= trunc_page(segs
[i
]->p_offset
);
219 data_vaddr
= trunc_page(segs
[i
]->p_vaddr
);
220 data_vlimit
= round_page(segs
[i
]->p_vaddr
+ segs
[i
]->p_filesz
);
221 data_addr
= mapbase
+ (data_vaddr
- base_vaddr
);
222 data_prot
= convert_prot(segs
[i
]->p_flags
);
223 data_flags
= convert_flags(segs
[i
]->p_flags
) | MAP_FIXED
;
224 if (mmap(data_addr
, data_vlimit
- data_vaddr
, data_prot
,
225 data_flags
, fd
, data_offset
) == (caddr_t
) -1) {
226 _rtld_error("%s: mmap of data failed: %s", path
,
227 rtld_strerror(errno
));
232 if (segs
[i
]->p_filesz
!= segs
[i
]->p_memsz
) {
234 /* Clear any BSS in the last page of the segment. */
235 clear_vaddr
= segs
[i
]->p_vaddr
+ segs
[i
]->p_filesz
;
236 clear_addr
= mapbase
+ (clear_vaddr
- base_vaddr
);
237 clear_page
= mapbase
+ (trunc_page(clear_vaddr
) - base_vaddr
);
239 if ((nclear
= data_vlimit
- clear_vaddr
) > 0) {
240 /* Make sure the end of the segment is writable */
241 if ((data_prot
& PROT_WRITE
) == 0 && -1 ==
242 mprotect(clear_page
, PAGE_SIZE
, data_prot
|PROT_WRITE
)) {
243 _rtld_error("%s: mprotect failed: %s", path
,
244 rtld_strerror(errno
));
248 memset(clear_addr
, 0, nclear
);
251 * reset the data protection back, enable the segment to be
252 * coredumped since we modified it.
254 if ((data_prot
& PROT_WRITE
) == 0) {
255 madvise(clear_page
, PAGE_SIZE
, MADV_CORE
);
256 mprotect(clear_page
, PAGE_SIZE
, data_prot
);
260 /* Overlay the BSS segment onto the proper region. */
261 bss_vaddr
= data_vlimit
;
262 bss_vlimit
= round_page(segs
[i
]->p_vaddr
+ segs
[i
]->p_memsz
);
263 bss_addr
= mapbase
+ (bss_vaddr
- base_vaddr
);
264 if (bss_vlimit
> bss_vaddr
) { /* There is something to do */
265 if (mmap(bss_addr
, bss_vlimit
- bss_vaddr
, data_prot
,
266 data_flags
| MAP_ANON
, -1, 0) == (caddr_t
)-1) {
267 _rtld_error("%s: mmap of bss failed: %s", path
,
268 rtld_strerror(errno
));
274 if (phdr_vaddr
== 0 && data_offset
<= hdr
->e_phoff
&&
275 (data_vlimit
- data_vaddr
+ data_offset
) >=
276 (hdr
->e_phoff
+ hdr
->e_phnum
* sizeof (Elf_Phdr
))) {
277 phdr_vaddr
= data_vaddr
+ hdr
->e_phoff
- data_offset
;
283 obj
->dev
= sb
->st_dev
;
284 obj
->ino
= sb
->st_ino
;
286 obj
->mapbase
= mapbase
;
287 obj
->mapsize
= mapsize
;
288 obj
->textsize
= round_page(segs
[0]->p_vaddr
+ segs
[0]->p_memsz
) -
290 obj
->vaddrbase
= base_vaddr
;
291 obj
->relocbase
= mapbase
- base_vaddr
;
292 obj
->dynamic
= (const Elf_Dyn
*) (obj
->relocbase
+ phdyn
->p_vaddr
);
293 if (hdr
->e_entry
!= 0)
294 obj
->entry
= (caddr_t
) (obj
->relocbase
+ hdr
->e_entry
);
295 if (phdr_vaddr
!= 0) {
296 obj
->phdr
= (const Elf_Phdr
*) (obj
->relocbase
+ phdr_vaddr
);
298 obj
->phdr
= malloc(phsize
);
299 if (obj
->phdr
== NULL
) {
301 _rtld_error("%s: cannot allocate program header", path
);
304 memcpy((char *)obj
->phdr
, (char *)hdr
+ hdr
->e_phoff
, phsize
);
305 obj
->phdr_alloc
= true;
307 obj
->phsize
= phsize
;
308 if (phinterp
!= NULL
)
309 obj
->interp
= (const char *) (obj
->relocbase
+ phinterp
->p_vaddr
);
311 tls_dtv_generation
++;
312 obj
->tlsindex
= ++tls_max_index
;
313 obj
->tlssize
= phtls
->p_memsz
;
314 obj
->tlsalign
= phtls
->p_align
;
315 obj
->tlsinitsize
= phtls
->p_filesz
;
316 obj
->tlsinit
= mapbase
+ phtls
->p_vaddr
;
318 obj
->stack_flags
= stack_flags
;
320 obj
->relro_page
= obj
->relocbase
+ trunc_page(relro_page
);
321 obj
->relro_size
= round_page(relro_size
);
323 if (note_start
< note_end
)
324 digest_notes(obj
, note_start
, note_end
);
325 munmap(hdr
, PAGE_SIZE
);
329 munmap(mapbase
, mapsize
);
331 munmap(hdr
, PAGE_SIZE
);
336 get_elf_header(int fd
, const char *path
)
340 /* DragonFly mmap does not have MAP_PREFAULT_READ */
341 hdr
= mmap(NULL
, PAGE_SIZE
, PROT_READ
, MAP_PRIVATE
, fd
, 0);
342 if (hdr
== (Elf_Ehdr
*)MAP_FAILED
) {
343 _rtld_error("%s: read error: %s", path
, rtld_strerror(errno
));
347 /* Make sure the file is valid */
349 _rtld_error("%s: invalid file format", path
);
352 if (hdr
->e_ident
[EI_CLASS
] != ELF_TARG_CLASS
||
353 hdr
->e_ident
[EI_DATA
] != ELF_TARG_DATA
) {
354 _rtld_error("%s: unsupported file layout", path
);
357 if (hdr
->e_ident
[EI_VERSION
] != EV_CURRENT
||
358 hdr
->e_version
!= EV_CURRENT
) {
359 _rtld_error("%s: unsupported file version", path
);
362 if (hdr
->e_type
!= ET_EXEC
&& hdr
->e_type
!= ET_DYN
) {
363 _rtld_error("%s: unsupported file type", path
);
366 if (hdr
->e_machine
!= ELF_TARG_MACH
) {
367 _rtld_error("%s: unsupported machine", path
);
372 * We rely on the program header being in the first page. This is
373 * not strictly required by the ABI specification, but it seems to
374 * always true in practice. And, it simplifies things considerably.
376 if (hdr
->e_phentsize
!= sizeof(Elf_Phdr
)) {
378 "%s: invalid shared object: e_phentsize != sizeof(Elf_Phdr)", path
);
381 if (hdr
->e_phoff
+ hdr
->e_phnum
* sizeof(Elf_Phdr
) >
383 _rtld_error("%s: program header too large", path
);
389 munmap(hdr
, PAGE_SIZE
);
394 obj_free(Obj_Entry
*obj
)
399 free_tls_offset(obj
);
400 while (obj
->needed
!= NULL
) {
401 Needed_Entry
*needed
= obj
->needed
;
402 obj
->needed
= needed
->next
;
405 while (!STAILQ_EMPTY(&obj
->names
)) {
406 Name_Entry
*entry
= STAILQ_FIRST(&obj
->names
);
407 STAILQ_REMOVE_HEAD(&obj
->names
, link
);
410 while (!STAILQ_EMPTY(&obj
->dldags
)) {
411 elm
= STAILQ_FIRST(&obj
->dldags
);
412 STAILQ_REMOVE_HEAD(&obj
->dldags
, link
);
415 while (!STAILQ_EMPTY(&obj
->dagmembers
)) {
416 elm
= STAILQ_FIRST(&obj
->dagmembers
);
417 STAILQ_REMOVE_HEAD(&obj
->dagmembers
, link
);
422 if (obj
->origin_path
)
423 free(obj
->origin_path
);
431 free((void *)obj
->phdr
);
440 obj
= CNEW(Obj_Entry
);
441 STAILQ_INIT(&obj
->dldags
);
442 STAILQ_INIT(&obj
->dagmembers
);
443 STAILQ_INIT(&obj
->names
);
448 * Given a set of ELF protection flags, return the corresponding protection
452 convert_prot(int elfflags
)
465 convert_flags(int elfflags
)
467 int flags
= MAP_PRIVATE
; /* All mappings are private */
470 * Readonly mappings are marked "MAP_NOCORE", because they can be
471 * reconstructed by a debugger.
473 if (!(elfflags
& PF_W
))