Sun Jul 21 06:48:38 1996 Roland McGrath <roland@delasyd.gnu.ai.mit.edu>
[glibc.git] / elf / dl-load.c
blobfc2733252afe7751f85d34fd66f74ac14dfd0afa
1 /* _dl_map_object -- Map in a shared object's segments from the file.
2 Copyright (C) 1995, 1996 Free Software Foundation, Inc.
3 This file is part of the GNU C Library.
5 The GNU C Library is free software; you can redistribute it and/or
6 modify it under the terms of the GNU Library General Public License as
7 published by the Free Software Foundation; either version 2 of the
8 License, or (at your option) any later version.
10 The GNU C Library is distributed in the hope that it will be useful,
11 but WITHOUT ANY WARRANTY; without even the implied warranty of
12 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 Library General Public License for more details.
15 You should have received a copy of the GNU Library General Public
16 License along with the GNU C Library; see the file COPYING.LIB. If
17 not, write to the Free Software Foundation, Inc., 675 Mass Ave,
18 Cambridge, MA 02139, USA. */
20 #include <link.h>
21 #include <sys/types.h>
22 #include <sys/mman.h>
23 #include <string.h>
24 #include <fcntl.h>
25 #include <unistd.h>
26 #include <stdlib.h>
27 #include <errno.h>
28 #include "dynamic-link.h"
31 /* On some systems, no flag bits are given to specify file mapping. */
32 #ifndef MAP_FILE
33 #define MAP_FILE 0
34 #endif
36 /* The right way to map in the shared library files is MAP_COPY, which
37 makes a virtual copy of the data at the time of the mmap call; this
38 guarantees the mapped pages will be consistent even if the file is
39 overwritten. Some losing VM systems like Linux's lack MAP_COPY. All we
40 get is MAP_PRIVATE, which copies each page when it is modified; this
41 means if the file is overwritten, we may at some point get some pages
42 from the new version after starting with pages from the old version. */
43 #ifndef MAP_COPY
44 #define MAP_COPY MAP_PRIVATE
45 #endif
48 #include <endian.h>
49 #if BYTE_ORDER == BIG_ENDIAN
50 #define byteorder ELFDATA2MSB
51 #define byteorder_name "big-endian"
52 #elif BYTE_ORDER == LITTLE_ENDIAN
53 #define byteorder ELFDATA2LSB
54 #define byteorder_name "little-endian"
55 #else
56 #error "Unknown BYTE_ORDER " BYTE_ORDER
57 #define byteorder ELFDATANONE
58 #endif
60 #define STRING(x) #x
62 #ifdef MAP_ANON
63 /* The fd is not examined when using MAP_ANON. */
64 #define ANONFD -1
65 #else
66 int _dl_zerofd = -1;
67 #define ANONFD _dl_zerofd
68 #endif
70 size_t _dl_pagesize;
73 /* Map in the shared object NAME, actually located in REALNAME, and already
74 opened on FD. */
76 struct link_map *
77 _dl_map_object_from_fd (const char *name, int fd, char *realname,
78 struct link_map *loader, int l_type)
80 struct link_map *l = NULL;
81 void *file_mapping = NULL;
82 size_t mapping_size = 0;
84 #define LOSE(s) lose (0, (s))
85 void lose (int code, const char *msg)
87 (void) __close (fd);
88 if (file_mapping)
89 __munmap (file_mapping, mapping_size);
90 if (l)
92 /* Remove the stillborn object from the list and free it. */
93 if (l->l_prev)
94 l->l_prev->l_next = l->l_next;
95 if (l->l_next)
96 l->l_next->l_prev = l->l_prev;
97 free (l);
99 free (realname);
100 _dl_signal_error (code, name, msg);
103 inline caddr_t map_segment (ElfW(Addr) mapstart, size_t len,
104 int prot, int fixed, off_t offset)
106 caddr_t mapat = __mmap ((caddr_t) mapstart, len, prot,
107 fixed|MAP_COPY|MAP_FILE,
108 fd, offset);
109 if (mapat == (caddr_t) -1)
110 lose (errno, "failed to map segment from shared object");
111 return mapat;
114 /* Make sure LOCATION is mapped in. */
115 void *map (off_t location, size_t size)
117 if ((off_t) mapping_size <= location + (off_t) size)
119 void *result;
120 if (file_mapping)
121 __munmap (file_mapping, mapping_size);
122 mapping_size = (location + size + 1 + _dl_pagesize - 1);
123 mapping_size &= ~(_dl_pagesize - 1);
124 result = __mmap (file_mapping, mapping_size, PROT_READ,
125 MAP_COPY|MAP_FILE, fd, 0);
126 if (result == (void *) -1)
127 lose (errno, "cannot map file data");
128 file_mapping = result;
130 return file_mapping + location;
133 const ElfW(Ehdr) *header;
134 const ElfW(Phdr) *phdr;
135 const ElfW(Phdr) *ph;
136 int type;
138 /* Look again to see if the real name matched another already loaded. */
139 for (l = _dl_loaded; l; l = l->l_next)
140 if (! strcmp (realname, l->l_name))
142 /* The object is already loaded.
143 Just bump its reference count and return it. */
144 __close (fd);
145 free (realname);
146 ++l->l_opencount;
147 return l;
150 if (_dl_pagesize == 0)
151 _dl_pagesize = __getpagesize ();
153 /* Map in the first page to read the header. */
154 header = map (0, sizeof *header);
156 /* Check the header for basic validity. */
157 if (*(Elf32_Word *) &header->e_ident !=
158 #if BYTE_ORDER == LITTLE_ENDIAN
159 ((ELFMAG0 << (EI_MAG0 * 8)) |
160 (ELFMAG1 << (EI_MAG1 * 8)) |
161 (ELFMAG2 << (EI_MAG2 * 8)) |
162 (ELFMAG3 << (EI_MAG3 * 8)))
163 #else
164 ((ELFMAG0 << (EI_MAG3 * 8)) |
165 (ELFMAG1 << (EI_MAG2 * 8)) |
166 (ELFMAG2 << (EI_MAG1 * 8)) |
167 (ELFMAG3 << (EI_MAG0 * 8)))
168 #endif
170 LOSE ("invalid ELF header");
171 #define ELF32_CLASS ELFCLASS32
172 #define ELF64_CLASS ELFCLASS64
173 if (header->e_ident[EI_CLASS] != ELFW(CLASS))
174 LOSE ("ELF file class not " STRING(__ELF_WORDSIZE) "-bit");
175 if (header->e_ident[EI_DATA] != byteorder)
176 LOSE ("ELF file data encoding not " byteorder_name);
177 if (header->e_ident[EI_VERSION] != EV_CURRENT)
178 LOSE ("ELF file version ident not " STRING(EV_CURRENT));
179 if (header->e_version != EV_CURRENT)
180 LOSE ("ELF file version not " STRING(EV_CURRENT));
181 if (! elf_machine_matches_host (header->e_machine))
182 LOSE ("ELF file machine architecture not " ELF_MACHINE_NAME);
183 if (header->e_phentsize != sizeof (ElfW(Phdr)))
184 LOSE ("ELF file's phentsize not the expected size");
186 #ifndef MAP_ANON
187 #define MAP_ANON 0
188 if (_dl_zerofd == -1)
190 _dl_zerofd = _dl_sysdep_open_zero_fill ();
191 if (_dl_zerofd == -1)
193 __close (fd);
194 _dl_signal_error (errno, NULL, "cannot open zero fill device");
197 #endif
199 /* Enter the new object in the list of loaded objects. */
200 l = _dl_new_object (realname, name, l_type);
201 if (! l)
202 lose (ENOMEM, "cannot create shared object descriptor");
203 l->l_opencount = 1;
204 l->l_loader = loader;
206 /* Extract the remaining details we need from the ELF header
207 and then map in the program header table. */
208 l->l_entry = header->e_entry;
209 type = header->e_type;
210 l->l_phnum = header->e_phnum;
211 phdr = map (header->e_phoff, l->l_phnum * sizeof (ElfW(Phdr)));
214 /* Scan the program header table, collecting its load commands. */
215 struct loadcmd
217 ElfW(Addr) mapstart, mapend, dataend, allocend;
218 off_t mapoff;
219 int prot;
220 } loadcmds[l->l_phnum], *c;
221 size_t nloadcmds = 0;
223 l->l_ld = 0;
224 l->l_phdr = 0;
225 l->l_addr = 0;
226 for (ph = phdr; ph < &phdr[l->l_phnum]; ++ph)
227 switch (ph->p_type)
229 /* These entries tell us where to find things once the file's
230 segments are mapped in. We record the addresses it says
231 verbatim, and later correct for the run-time load address. */
232 case PT_DYNAMIC:
233 l->l_ld = (void *) ph->p_vaddr;
234 break;
235 case PT_PHDR:
236 l->l_phdr = (void *) ph->p_vaddr;
237 break;
239 case PT_LOAD:
240 /* A load command tells us to map in part of the file.
241 We record the load commands and process them all later. */
242 if (ph->p_align % _dl_pagesize != 0)
243 LOSE ("ELF load command alignment not page-aligned");
244 if ((ph->p_vaddr - ph->p_offset) % ph->p_align)
245 LOSE ("ELF load command address/offset not properly aligned");
247 struct loadcmd *c = &loadcmds[nloadcmds++];
248 c->mapstart = ph->p_vaddr & ~(ph->p_align - 1);
249 c->mapend = ((ph->p_vaddr + ph->p_filesz + _dl_pagesize - 1)
250 & ~(_dl_pagesize - 1));
251 c->dataend = ph->p_vaddr + ph->p_filesz;
252 c->allocend = ph->p_vaddr + ph->p_memsz;
253 c->mapoff = ph->p_offset & ~(ph->p_align - 1);
254 c->prot = 0;
255 if (ph->p_flags & PF_R)
256 c->prot |= PROT_READ;
257 if (ph->p_flags & PF_W)
258 c->prot |= PROT_WRITE;
259 if (ph->p_flags & PF_X)
260 c->prot |= PROT_EXEC;
261 break;
265 /* We are done reading the file's headers now. Unmap them. */
266 __munmap (file_mapping, mapping_size);
268 /* Now process the load commands and map segments into memory. */
269 c = loadcmds;
271 if (type == ET_DYN || type == ET_REL)
273 /* This is a position-independent shared object. We can let the
274 kernel map it anywhere it likes, but we must have space for all
275 the segments in their specified positions relative to the first.
276 So we map the first segment without MAP_FIXED, but with its
277 extent increased to cover all the segments. Then we remove
278 access from excess portion, and there is known sufficient space
279 there to remap from the later segments. */
280 caddr_t mapat;
281 mapat = map_segment (c->mapstart,
282 loadcmds[nloadcmds - 1].allocend - c->mapstart,
283 c->prot, 0, c->mapoff);
284 l->l_addr = (ElfW(Addr)) mapat - c->mapstart;
286 /* Change protection on the excess portion to disallow all access;
287 the portions we do not remap later will be inaccessible as if
288 unallocated. Then jump into the normal segment-mapping loop to
289 handle the portion of the segment past the end of the file
290 mapping. */
291 __mprotect ((caddr_t) (l->l_addr + c->mapend),
292 loadcmds[nloadcmds - 1].allocend - c->mapend,
294 goto postmap;
297 while (c < &loadcmds[nloadcmds])
299 if (c->mapend > c->mapstart)
300 /* Map the segment contents from the file. */
301 map_segment (l->l_addr + c->mapstart, c->mapend - c->mapstart,
302 c->prot, MAP_FIXED, c->mapoff);
304 postmap:
305 if (c->allocend > c->dataend)
307 /* Extra zero pages should appear at the end of this segment,
308 after the data mapped from the file. */
309 ElfW(Addr) zero, zeroend, zeropage;
311 zero = l->l_addr + c->dataend;
312 zeroend = l->l_addr + c->allocend;
313 zeropage = (zero + _dl_pagesize - 1) & ~(_dl_pagesize - 1);
315 if (zeroend < zeropage)
316 /* All the extra data is in the last page of the segment.
317 We can just zero it. */
318 zeropage = zeroend;
320 if (zeropage > zero)
322 /* Zero the final part of the last page of the segment. */
323 if ((c->prot & PROT_WRITE) == 0)
325 /* Dag nab it. */
326 if (__mprotect ((caddr_t) (zero & ~(_dl_pagesize - 1)),
327 _dl_pagesize, c->prot|PROT_WRITE) < 0)
328 lose (errno, "cannot change memory protections");
330 memset ((void *) zero, 0, zeropage - zero);
331 if ((c->prot & PROT_WRITE) == 0)
332 __mprotect ((caddr_t) (zero & ~(_dl_pagesize - 1)),
333 _dl_pagesize, c->prot);
336 if (zeroend > zeropage)
338 /* Map the remaining zero pages in from the zero fill FD. */
339 caddr_t mapat;
340 mapat = __mmap ((caddr_t) zeropage, zeroend - zeropage,
341 c->prot, MAP_ANON|MAP_PRIVATE|MAP_FIXED,
342 ANONFD, 0);
343 if (mapat == (caddr_t) -1)
344 lose (errno, "cannot map zero-fill pages");
348 ++c;
351 if (l->l_phdr == 0)
353 /* There was no PT_PHDR specified. We need to find the phdr in the
354 load image ourselves. We assume it is in fact in the load image
355 somewhere, and that the first load command starts at the
356 beginning of the file and thus contains the ELF file header. */
357 ElfW(Addr) bof = l->l_addr + loadcmds[0].mapstart;
358 assert (loadcmds[0].mapoff == 0);
359 l->l_phdr = (void *) (bof + ((const ElfW(Ehdr) *) bof)->e_phoff);
361 else
362 /* Adjust the PT_PHDR value by the runtime load address. */
363 (ElfW(Addr)) l->l_phdr += l->l_addr;
366 /* We are done mapping in the file. We no longer need the descriptor. */
367 __close (fd);
369 if (l->l_type == lt_library && type == ET_EXEC)
370 l->l_type = lt_executable;
372 if (l->l_ld == 0)
374 if (type == ET_DYN)
375 LOSE ("object file has no dynamic section");
377 else
378 (ElfW(Addr)) l->l_ld += l->l_addr;
380 l->l_entry += l->l_addr;
382 elf_get_dynamic_info (l->l_ld, l->l_info);
383 if (l->l_info[DT_HASH])
384 _dl_setup_hash (l);
386 return l;
389 /* Try to open NAME in one of the directories in DIRPATH.
390 Return the fd, or -1. If successful, fill in *REALNAME
391 with the malloc'd full directory name. */
393 static int
394 open_path (const char *name, size_t namelen,
395 const char *dirpath,
396 char **realname)
398 char *buf;
399 const char *p;
400 int fd;
402 p = dirpath;
403 if (p == NULL || *p == '\0')
405 errno = ENOENT;
406 return -1;
409 buf = __alloca (strlen (dirpath) + 1 + namelen);
412 size_t buflen;
414 dirpath = p;
415 p = strpbrk (dirpath, ":;");
416 if (p == NULL)
417 p = strchr (dirpath, '\0');
419 if (p == dirpath)
421 /* Two adjacent colons, or a colon at the beginning or the end of
422 the path means to search the current directory. */
423 (void) memcpy (buf, name, namelen);
424 buflen = namelen;
426 else
428 /* Construct the pathname to try. */
429 (void) memcpy (buf, dirpath, p - dirpath);
430 buf[p - dirpath] = '/';
431 (void) memcpy (&buf[(p - dirpath) + 1], name, namelen);
432 buflen = p - dirpath + 1 + namelen;
435 fd = __open (buf, O_RDONLY);
436 if (fd != -1)
438 *realname = malloc (buflen);
439 if (*realname)
441 memcpy (*realname, buf, buflen);
442 return fd;
444 else
446 /* No memory for the name, we certainly won't be able
447 to load and link it. */
448 __close (fd);
449 return -1;
452 if (errno != ENOENT && errno != EACCES)
453 /* The file exists and is readable, but something went wrong. */
454 return -1;
456 while (*p++ != '\0');
458 return -1;
461 /* Map in the shared object file NAME. */
463 struct link_map *
464 _dl_map_object (struct link_map *loader, const char *name, int type)
466 int fd;
467 char *realname;
468 struct link_map *l;
470 /* Look for this name among those already loaded. */
471 for (l = _dl_loaded; l; l = l->l_next)
472 if (! strcmp (name, l->l_libname) || /* NAME was requested before. */
473 ! strcmp (name, l->l_name) || /* NAME was found before. */
474 /* If the requested name matches the soname of a loaded object,
475 use that object. */
476 (l->l_info[DT_SONAME] &&
477 ! strcmp (name, (const char *) (l->l_addr +
478 l->l_info[DT_STRTAB]->d_un.d_ptr +
479 l->l_info[DT_SONAME]->d_un.d_val))))
481 /* The object is already loaded.
482 Just bump its reference count and return it. */
483 ++l->l_opencount;
484 return l;
487 if (strchr (name, '/') == NULL)
489 /* Search for NAME in several places. */
491 size_t namelen = strlen (name) + 1;
493 inline void trypath (const char *dirpath)
495 fd = open_path (name, namelen, dirpath, &realname);
498 fd = -1;
500 /* First try the DT_RPATH of the dependent object that caused NAME
501 to be loaded. Then that object's dependent, and on up. */
502 for (l = loader; fd == -1 && l; l = l->l_loader)
503 if (l && l->l_info[DT_RPATH])
504 trypath ((const char *) (l->l_addr +
505 l->l_info[DT_STRTAB]->d_un.d_ptr +
506 l->l_info[DT_RPATH]->d_un.d_val));
507 /* If dynamically linked, try the DT_RPATH of the executable itself. */
508 l = _dl_loaded;
509 if (fd == -1 && l && l->l_type != lt_loaded && l->l_info[DT_RPATH])
510 trypath ((const char *) (l->l_addr +
511 l->l_info[DT_STRTAB]->d_un.d_ptr +
512 l->l_info[DT_RPATH]->d_un.d_val));
513 /* Try an environment variable (unless setuid). */
514 if (fd == -1 && ! _dl_secure)
515 trypath (getenv ("LD_LIBRARY_PATH"));
516 if (fd == -1)
518 /* Check the list of libraries in the file /etc/ld.so.cache,
519 for compatibility with Linux's ldconfig program. */
520 extern const char *_dl_load_cache_lookup (const char *name);
521 const char *cached = _dl_load_cache_lookup (name);
522 if (cached)
524 fd = __open (cached, O_RDONLY);
525 if (fd != -1)
527 size_t cl = strlen (cached) + 1;
528 realname = malloc (cl);
529 if (realname)
530 memcpy (realname, cached, cl);
531 else
533 __close (fd);
534 fd = -1;
539 /* Finally, try the default path. */
540 if (fd == -1)
542 extern const char *_dl_rpath; /* Set in rtld.c. */
543 trypath (_dl_rpath);
546 else
548 fd = __open (name, O_RDONLY);
549 if (fd != -1)
551 size_t len = strlen (name) + 1;
552 realname = malloc (len);
553 if (realname)
554 memcpy (realname, name, len);
555 else
557 __close (fd);
558 fd = -1;
563 if (fd == -1)
564 _dl_signal_error (errno, name, "cannot open shared object file");
566 return _dl_map_object_from_fd (name, fd, realname, loader, type);