2 * Copyright 1996-1998 John D. Polstra.
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
15 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
16 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
17 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
18 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
19 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
20 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
21 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
22 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
23 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28 #include <sys/param.h>
41 static Elf_Ehdr
*get_elf_header(int, const char *);
42 static int convert_prot(int); /* Elf flags -> mmap protection */
43 static int convert_flags(int); /* Elf flags -> mmap flags */
46 * Map a shared object into memory. The "fd" argument is a file descriptor,
47 * which must be open on the object and positioned at its beginning.
48 * The "path" argument is a pathname that is used only for error messages.
50 * The return value is a pointer to a newly-allocated Obj_Entry structure
51 * for the shared object. Returns NULL on failure.
54 map_object(int fd
, const char *path
, const struct stat
*sb
)
82 size_t nclear
, phsize
;
92 hdr
= get_elf_header(fd
, path
);
96 if (__ld_sharedlib_base
) {
97 shlib_base
= (void *)(intptr_t)strtoul(__ld_sharedlib_base
, NULL
, 0);
103 * Scan the program header entries, and save key information.
105 * We expect that the loadable segments are ordered by load address.
107 phdr
= (Elf_Phdr
*) ((char *)hdr
+ hdr
->e_phoff
);
108 phsize
= hdr
->e_phnum
* sizeof (phdr
[0]);
109 phlimit
= phdr
+ hdr
->e_phnum
;
111 phdyn
= phinterp
= phtls
= NULL
;
117 segs
= alloca(sizeof(segs
[0]) * hdr
->e_phnum
);
118 stack_flags
= RTLD_DEFAULT_STACK_PF_EXEC
| PF_R
| PF_W
;
119 while (phdr
< phlimit
) {
120 switch (phdr
->p_type
) {
127 segs
[++nsegs
] = phdr
;
128 if ((segs
[nsegs
]->p_align
& (PAGE_SIZE
- 1)) != 0) {
129 _rtld_error("%s: PT_LOAD segment %d not page-aligned",
136 phdr_vaddr
= phdr
->p_vaddr
;
137 phsize
= phdr
->p_memsz
;
149 stack_flags
= phdr
->p_flags
;
153 relro_page
= phdr
->p_vaddr
;
154 relro_size
= phdr
->p_memsz
;
158 if (phdr
->p_offset
> PAGE_SIZE
||
159 phdr
->p_offset
+ phdr
->p_filesz
> PAGE_SIZE
)
161 note_start
= (Elf_Addr
)(char *)hdr
+ phdr
->p_offset
;
162 note_end
= note_start
+ phdr
->p_filesz
;
169 _rtld_error("%s: object is not dynamically-linked", path
);
174 _rtld_error("%s: too few PT_LOAD segments", path
);
179 * Map the entire address space of the object, to stake out our
180 * contiguous region, and to establish the base address for relocation.
182 base_vaddr
= trunc_page(segs
[0]->p_vaddr
);
183 base_vlimit
= round_page(segs
[nsegs
]->p_vaddr
+ segs
[nsegs
]->p_memsz
);
184 mapsize
= base_vlimit
- base_vaddr
;
185 base_addr
= (caddr_t
) base_vaddr
;
187 if (base_addr
== NULL
&& shlib_base
) {
188 size_t limit
= 1024 * 256 * 1024;
191 for (offset
= 0; offset
< limit
; offset
+= 256 * 1024) {
192 mapbase
= mmap(shlib_base
+ offset
, mapsize
,
194 MAP_ANON
| MAP_PRIVATE
| MAP_NOCORE
|
197 if (mapbase
!= MAP_FAILED
)
201 mapbase
= mmap(base_addr
, mapsize
,
203 MAP_ANON
| MAP_PRIVATE
| MAP_NOCORE
,
206 if (mapbase
== (caddr_t
) -1) {
207 _rtld_error("%s: mmap of entire address space failed: %s",
208 path
, rtld_strerror(errno
));
211 if (base_addr
!= NULL
&& mapbase
!= base_addr
) {
212 _rtld_error("%s: mmap returned wrong address: wanted %p, got %p",
213 path
, base_addr
, mapbase
);
217 for (i
= 0; i
<= nsegs
; i
++) {
218 /* Overlay the segment onto the proper region. */
219 data_offset
= trunc_page(segs
[i
]->p_offset
);
220 data_vaddr
= trunc_page(segs
[i
]->p_vaddr
);
221 data_vlimit
= round_page(segs
[i
]->p_vaddr
+ segs
[i
]->p_filesz
);
222 data_addr
= mapbase
+ (data_vaddr
- base_vaddr
);
223 data_prot
= convert_prot(segs
[i
]->p_flags
);
224 data_flags
= convert_flags(segs
[i
]->p_flags
) | MAP_FIXED
;
225 if (mmap(data_addr
, data_vlimit
- data_vaddr
, data_prot
,
226 data_flags
, fd
, data_offset
) == (caddr_t
) -1) {
227 _rtld_error("%s: mmap of data failed: %s", path
,
228 rtld_strerror(errno
));
233 if (segs
[i
]->p_filesz
!= segs
[i
]->p_memsz
) {
235 /* Clear any BSS in the last page of the segment. */
236 clear_vaddr
= segs
[i
]->p_vaddr
+ segs
[i
]->p_filesz
;
237 clear_addr
= mapbase
+ (clear_vaddr
- base_vaddr
);
238 clear_page
= mapbase
+ (trunc_page(clear_vaddr
) - base_vaddr
);
240 if ((nclear
= data_vlimit
- clear_vaddr
) > 0) {
241 /* Make sure the end of the segment is writable */
242 if ((data_prot
& PROT_WRITE
) == 0 && -1 ==
243 mprotect(clear_page
, PAGE_SIZE
, data_prot
|PROT_WRITE
)) {
244 _rtld_error("%s: mprotect failed: %s", path
,
245 rtld_strerror(errno
));
249 memset(clear_addr
, 0, nclear
);
252 * reset the data protection back, enable the segment to be
253 * coredumped since we modified it.
255 if ((data_prot
& PROT_WRITE
) == 0) {
256 madvise(clear_page
, PAGE_SIZE
, MADV_CORE
);
257 mprotect(clear_page
, PAGE_SIZE
, data_prot
);
261 /* Overlay the BSS segment onto the proper region. */
262 bss_vaddr
= data_vlimit
;
263 bss_vlimit
= round_page(segs
[i
]->p_vaddr
+ segs
[i
]->p_memsz
);
264 bss_addr
= mapbase
+ (bss_vaddr
- base_vaddr
);
265 if (bss_vlimit
> bss_vaddr
) { /* There is something to do */
266 if (mmap(bss_addr
, bss_vlimit
- bss_vaddr
, data_prot
,
267 data_flags
| MAP_ANON
, -1, 0) == (caddr_t
)-1) {
268 _rtld_error("%s: mmap of bss failed: %s", path
,
269 rtld_strerror(errno
));
275 if (phdr_vaddr
== 0 && data_offset
<= hdr
->e_phoff
&&
276 (data_vlimit
- data_vaddr
+ data_offset
) >=
277 (hdr
->e_phoff
+ hdr
->e_phnum
* sizeof (Elf_Phdr
))) {
278 phdr_vaddr
= data_vaddr
+ hdr
->e_phoff
- data_offset
;
284 obj
->dev
= sb
->st_dev
;
285 obj
->ino
= sb
->st_ino
;
287 obj
->mapbase
= mapbase
;
288 obj
->mapsize
= mapsize
;
289 obj
->textsize
= round_page(segs
[0]->p_vaddr
+ segs
[0]->p_memsz
) -
291 obj
->vaddrbase
= base_vaddr
;
292 obj
->relocbase
= mapbase
- base_vaddr
;
293 obj
->dynamic
= (const Elf_Dyn
*) (obj
->relocbase
+ phdyn
->p_vaddr
);
294 if (hdr
->e_entry
!= 0)
295 obj
->entry
= (caddr_t
) (obj
->relocbase
+ hdr
->e_entry
);
296 if (phdr_vaddr
!= 0) {
297 obj
->phdr
= (const Elf_Phdr
*) (obj
->relocbase
+ phdr_vaddr
);
299 obj
->phdr
= malloc(phsize
);
300 if (obj
->phdr
== NULL
) {
302 _rtld_error("%s: cannot allocate program header", path
);
305 memcpy((char *)obj
->phdr
, (char *)hdr
+ hdr
->e_phoff
, phsize
);
306 obj
->phdr_alloc
= true;
308 obj
->phsize
= phsize
;
309 if (phinterp
!= NULL
)
310 obj
->interp
= (const char *) (obj
->relocbase
+ phinterp
->p_vaddr
);
312 tls_dtv_generation
++;
313 obj
->tlsindex
= ++tls_max_index
;
314 obj
->tlssize
= phtls
->p_memsz
;
315 obj
->tlsalign
= phtls
->p_align
;
316 obj
->tlsinitsize
= phtls
->p_filesz
;
317 obj
->tlsinit
= mapbase
+ phtls
->p_vaddr
;
319 obj
->stack_flags
= stack_flags
;
321 obj
->relro_page
= obj
->relocbase
+ trunc_page(relro_page
);
322 obj
->relro_size
= round_page(relro_size
);
324 if (note_start
< note_end
)
325 digest_notes(obj
, note_start
, note_end
);
326 munmap(hdr
, PAGE_SIZE
);
330 munmap(mapbase
, mapsize
);
332 munmap(hdr
, PAGE_SIZE
);
337 get_elf_header(int fd
, const char *path
)
341 /* DragonFly mmap does not have MAP_PREFAULT_READ */
342 hdr
= mmap(NULL
, PAGE_SIZE
, PROT_READ
, MAP_PRIVATE
, fd
, 0);
343 if (hdr
== (Elf_Ehdr
*)MAP_FAILED
) {
344 _rtld_error("%s: read error: %s", path
, rtld_strerror(errno
));
348 /* Make sure the file is valid */
350 _rtld_error("%s: invalid file format", path
);
353 if (hdr
->e_ident
[EI_CLASS
] != ELF_TARG_CLASS
||
354 hdr
->e_ident
[EI_DATA
] != ELF_TARG_DATA
) {
355 _rtld_error("%s: unsupported file layout", path
);
358 if (hdr
->e_ident
[EI_VERSION
] != EV_CURRENT
||
359 hdr
->e_version
!= EV_CURRENT
) {
360 _rtld_error("%s: unsupported file version", path
);
363 if (hdr
->e_type
!= ET_EXEC
&& hdr
->e_type
!= ET_DYN
) {
364 _rtld_error("%s: unsupported file type", path
);
367 if (hdr
->e_machine
!= ELF_TARG_MACH
) {
368 _rtld_error("%s: unsupported machine", path
);
373 * We rely on the program header being in the first page. This is
374 * not strictly required by the ABI specification, but it seems to
375 * always true in practice. And, it simplifies things considerably.
377 if (hdr
->e_phentsize
!= sizeof(Elf_Phdr
)) {
379 "%s: invalid shared object: e_phentsize != sizeof(Elf_Phdr)", path
);
382 if (hdr
->e_phoff
+ hdr
->e_phnum
* sizeof(Elf_Phdr
) >
384 _rtld_error("%s: program header too large", path
);
390 munmap(hdr
, PAGE_SIZE
);
395 obj_free(Obj_Entry
*obj
)
400 free_tls_offset(obj
);
401 while (obj
->needed
!= NULL
) {
402 Needed_Entry
*needed
= obj
->needed
;
403 obj
->needed
= needed
->next
;
406 while (!STAILQ_EMPTY(&obj
->names
)) {
407 Name_Entry
*entry
= STAILQ_FIRST(&obj
->names
);
408 STAILQ_REMOVE_HEAD(&obj
->names
, link
);
411 while (!STAILQ_EMPTY(&obj
->dldags
)) {
412 elm
= STAILQ_FIRST(&obj
->dldags
);
413 STAILQ_REMOVE_HEAD(&obj
->dldags
, link
);
416 while (!STAILQ_EMPTY(&obj
->dagmembers
)) {
417 elm
= STAILQ_FIRST(&obj
->dagmembers
);
418 STAILQ_REMOVE_HEAD(&obj
->dagmembers
, link
);
423 if (obj
->origin_path
)
424 free(obj
->origin_path
);
432 free((void *)obj
->phdr
);
441 obj
= CNEW(Obj_Entry
);
442 STAILQ_INIT(&obj
->dldags
);
443 STAILQ_INIT(&obj
->dagmembers
);
444 STAILQ_INIT(&obj
->names
);
449 * Given a set of ELF protection flags, return the corresponding protection
453 convert_prot(int elfflags
)
466 convert_flags(int elfflags
)
468 int flags
= MAP_PRIVATE
; /* All mappings are private */
471 * Readonly mappings are marked "MAP_NOCORE", because they can be
472 * reconstructed by a debugger.
474 if (!(elfflags
& PF_W
))