Update.
[glibc.git] / elf / dl-load.c
blobc93446a737961d5bdb07379d24d949db47bbed37
1 /* Map in a shared object's segments from the file.
2 Copyright (C) 1995, 1996, 1997, 1998 Free Software Foundation, Inc.
3 This file is part of the GNU C Library.
5 The GNU C Library is free software; you can redistribute it and/or
6 modify it under the terms of the GNU Library General Public License as
7 published by the Free Software Foundation; either version 2 of the
8 License, or (at your option) any later version.
10 The GNU C Library is distributed in the hope that it will be useful,
11 but WITHOUT ANY WARRANTY; without even the implied warranty of
12 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 Library General Public License for more details.
15 You should have received a copy of the GNU Library General Public
16 License along with the GNU C Library; see the file COPYING.LIB. If not,
17 write to the Free Software Foundation, Inc., 59 Temple Place - Suite 330,
18 Boston, MA 02111-1307, USA. */
20 #include <errno.h>
21 #include <fcntl.h>
22 #include <link.h>
23 #include <stdlib.h>
24 #include <string.h>
25 #include <unistd.h>
26 #include <sys/mman.h>
27 #include <sys/stat.h>
28 #include <sys/types.h>
29 #include "dynamic-link.h"
32 /* On some systems, no flag bits are given to specify file mapping. */
33 #ifndef MAP_FILE
34 #define MAP_FILE 0
35 #endif
37 /* The right way to map in the shared library files is MAP_COPY, which
38 makes a virtual copy of the data at the time of the mmap call; this
39 guarantees the mapped pages will be consistent even if the file is
40 overwritten. Some losing VM systems like Linux's lack MAP_COPY. All we
41 get is MAP_PRIVATE, which copies each page when it is modified; this
42 means if the file is overwritten, we may at some point get some pages
43 from the new version after starting with pages from the old version. */
44 #ifndef MAP_COPY
45 #define MAP_COPY MAP_PRIVATE
46 #endif
48 /* Some systems link their relocatable objects for another base address
49 than 0. We want to know the base address for these such that we can
50 subtract this address from the segment addresses during mapping.
51 This results in a more efficient address space usage. Defaults to
52 zero for almost all systems. */
53 #ifndef MAP_BASE_ADDR
54 #define MAP_BASE_ADDR(l) 0
55 #endif
58 #include <endian.h>
59 #if BYTE_ORDER == BIG_ENDIAN
60 #define byteorder ELFDATA2MSB
61 #define byteorder_name "big-endian"
62 #elif BYTE_ORDER == LITTLE_ENDIAN
63 #define byteorder ELFDATA2LSB
64 #define byteorder_name "little-endian"
65 #else
66 #error "Unknown BYTE_ORDER " BYTE_ORDER
67 #define byteorder ELFDATANONE
68 #endif
70 #define STRING(x) #x
72 #ifdef MAP_ANON
73 /* The fd is not examined when using MAP_ANON. */
74 #define ANONFD -1
75 #else
76 int _dl_zerofd = -1;
77 #define ANONFD _dl_zerofd
78 #endif
80 /* Handle situations where we have a preferred location in memory for
81 the shared objects. */
82 #ifdef ELF_PREFERRED_ADDRESS_DATA
83 ELF_PREFERRED_ADDRESS_DATA;
84 #endif
85 #ifndef ELF_PREFERRED_ADDRESS
86 #define ELF_PREFERRED_ADDRESS(loader, maplength, mapstartpref) (mapstartpref)
87 #endif
88 #ifndef ELF_FIXED_ADDRESS
89 #define ELF_FIXED_ADDRESS(loader, mapstart) ((void) 0)
90 #endif
92 size_t _dl_pagesize;
94 extern const char *_dl_platform;
95 extern size_t _dl_platformlen;
97 /* This is a fake list to store the RPATH information for static
98 binaries. */
99 static struct r_search_path_elem **fake_path_list;
102 /* Local version of `strdup' function. */
103 static inline char *
104 local_strdup (const char *s)
106 size_t len = strlen (s) + 1;
107 void *new = malloc (len);
109 if (new == NULL)
110 return NULL;
112 return (char *) memcpy (new, s, len);
115 /* Add `name' to the list of names for a particular shared object.
116 `name' is expected to have been allocated with malloc and will
117 be freed if the shared object already has this name.
118 Returns false if the object already had this name. */
119 static int
120 add_name_to_object (struct link_map *l, char *name)
122 struct libname_list *lnp, *lastp;
123 struct libname_list *newname;
125 if (name == NULL)
127 /* No more memory. */
128 _dl_signal_error (ENOMEM, NULL, "could not allocate name string");
129 return 0;
132 lastp = NULL;
133 for (lnp = l->l_libname; lnp != NULL; lastp = lnp, lnp = lnp->next)
134 if (strcmp (name, lnp->name) == 0)
136 free (name);
137 return 0;
140 newname = malloc (sizeof *newname);
141 if (newname == NULL)
143 /* No more memory. */
144 _dl_signal_error (ENOMEM, name, "cannot allocate name record");
145 free (name);
146 return 0;
148 /* The object should have a libname set from _dl_new_object. */
149 assert (lastp != NULL);
151 newname->name = name;
152 newname->next = NULL;
153 lastp->next = newname;
154 return 1;
158 /* Implement cache for search path lookup. */
159 #include "rtldtbl.h"
161 static size_t max_dirnamelen;
163 static inline struct r_search_path_elem **
164 fillin_rpath (char *rpath, struct r_search_path_elem **result, const char *sep,
165 const char **trusted)
167 char *cp;
168 size_t nelems = 0;
170 while ((cp = __strsep (&rpath, sep)) != NULL)
172 struct r_search_path_elem *dirp;
173 size_t len = strlen (cp);
174 /* Remove trailing slashes. */
175 while (len > 1 && cp[len - 1] == '/')
176 --len;
178 /* Make sure we don't use untrusted directories if we run SUID. */
179 if (trusted != NULL)
181 const char **trun = trusted;
183 /* All trusted directory must be complete name. */
184 if (cp[0] != '/')
185 continue;
187 while (*trun != NULL
188 && (memcmp (*trun, cp, len) != 0 || (*trun)[len] != '\0'))
189 ++trun;
191 if (*trun == NULL)
192 /* It's no trusted directory, skip it. */
193 continue;
196 /* Now add one. */
197 if (len > 0)
198 cp[len++] = '/';
200 /* See if this directory is already known. */
201 for (dirp = all_dirs; dirp != NULL; dirp = dirp->next)
202 if (dirp->dirnamelen == len && strcmp (cp, dirp->dirname) == 0)
203 break;
205 if (dirp != NULL)
207 /* It is available, see whether it's in our own list. */
208 size_t cnt;
209 for (cnt = 0; cnt < nelems; ++cnt)
210 if (result[cnt] == dirp)
211 break;
213 if (cnt == nelems)
214 result[nelems++] = dirp;
216 else
218 /* It's a new directory. Create an entry and add it. */
219 dirp = (struct r_search_path_elem *) malloc (sizeof (*dirp));
220 if (dirp == NULL)
221 _dl_signal_error (ENOMEM, NULL,
222 "cannot create cache for search path");
224 dirp->dirnamelen = len;
225 /* We have to make sure all the relative directories are never
226 ignored. The current directory might change and all our
227 saved information would be void. */
228 dirp->dirstatus = cp[0] != '/' ? existing : unknown;
230 /* Add the name of the machine dependent directory if a machine
231 is defined. */
232 if (_dl_platform != NULL)
234 char *tmp;
236 dirp->machdirnamelen = len + _dl_platformlen + 1;
237 tmp = (char *) malloc (len + _dl_platformlen + 2);
238 if (tmp == NULL)
239 _dl_signal_error (ENOMEM, NULL,
240 "cannot create cache for search path");
241 dirp->dirname = tmp;
242 tmp = __mempcpy (tmp, cp, len);
243 tmp = __mempcpy (tmp, _dl_platform, _dl_platformlen);
244 *tmp++ = '/';
245 *tmp = '\0';
247 dirp->machdirstatus = dirp->dirstatus;
249 if (max_dirnamelen < dirp->machdirnamelen)
250 max_dirnamelen = dirp->machdirnamelen;
252 else
254 char *tmp;
256 dirp->machdirnamelen = len;
257 dirp->machdirstatus = nonexisting;
259 tmp = (char *) malloc (len + 1);
260 if (tmp == NULL)
261 _dl_signal_error (ENOMEM, NULL,
262 "cannot create cache for search path");
263 dirp->dirname = tmp;
264 *((char *) __mempcpy (tmp, cp, len)) = '\0';
266 if (max_dirnamelen < dirp->dirnamelen)
267 max_dirnamelen = dirp->dirnamelen;
270 dirp->next = all_dirs;
271 all_dirs = dirp;
273 /* Put it in the result array. */
274 result[nelems++] = dirp;
278 /* Terminate the array. */
279 result[nelems] = NULL;
281 return result;
285 static struct r_search_path_elem **
286 decompose_rpath (const char *rpath, size_t additional_room)
288 /* Make a copy we can work with. */
289 char *copy = strdupa (rpath);
290 char *cp;
291 struct r_search_path_elem **result;
292 /* First count the number of necessary elements in the result array. */
293 size_t nelems = 0;
295 for (cp = copy; *cp != '\0'; ++cp)
296 if (*cp == ':')
297 ++nelems;
299 /* Allocate room for the result. NELEMS + 1 + ADDITIONAL_ROOM is an upper
300 limit for the number of necessary entries. */
301 result = (struct r_search_path_elem **) malloc ((nelems + 1
302 + additional_room + 1)
303 * sizeof (*result));
304 if (result == NULL)
305 _dl_signal_error (ENOMEM, NULL, "cannot create cache for search path");
307 return fillin_rpath (copy, result, ":", NULL);
311 void
312 _dl_init_paths (const char *llp)
314 static const char *trusted_dirs[] =
316 #include "trusted-dirs.h"
317 NULL
320 struct r_search_path_elem **pelem;
322 /* We have in `search_path' the information about the RPATH of the
323 dynamic loader. Now fill in the information about the applications
324 RPATH and the directories addressed by the LD_LIBRARY_PATH environment
325 variable. */
326 struct link_map *l;
328 /* Number of elements in the library path. */
329 size_t nllp;
331 /* If the user has not specified a library path consider the environment
332 variable. */
333 if (llp == NULL)
334 llp = getenv ("LD_LIBRARY_PATH");
336 /* First determine how many elements the LD_LIBRARY_PATH contents has. */
337 if (llp != NULL && *llp != '\0')
339 /* Simply count the number of colons. */
340 const char *cp = llp;
341 nllp = 1;
342 while (*cp)
343 if (*cp++ == ':')
344 ++nllp;
346 else
347 nllp = 0;
349 l = _dl_loaded;
350 if (l != NULL)
352 if (l->l_type != lt_loaded && l->l_info[DT_RPATH])
354 /* Allocate room for the search path and fill in information
355 from RPATH. */
356 l->l_rpath_dirs =
357 decompose_rpath ((const char *)
358 (l->l_addr + l->l_info[DT_STRTAB]->d_un.d_ptr
359 + l->l_info[DT_RPATH]->d_un.d_val),
360 nllp);
362 else
364 /* If we have no LD_LIBRARY_PATH and no RPATH we must tell
365 this somehow to prevent we look this up again and again. */
366 if (nllp == 0)
367 l->l_rpath_dirs = (struct r_search_path_elem **) -1l;
368 else
370 l->l_rpath_dirs = (struct r_search_path_elem **)
371 malloc ((nllp + 1) * sizeof (*l->l_rpath_dirs));
372 if (l->l_rpath_dirs == NULL)
373 _dl_signal_error (ENOMEM, NULL,
374 "cannot create cache for search path");
375 l->l_rpath_dirs[0] = NULL;
379 /* We don't need to search the list of fake entries which is searched
380 when no dynamic objects were loaded at this time. */
381 fake_path_list = NULL;
383 if (nllp > 0)
385 char *copy = strdupa (llp);
387 /* Decompose the LD_LIBRARY_PATH and fill in the result.
388 First search for the next place to enter elements. */
389 struct r_search_path_elem **result = l->l_rpath_dirs;
390 while (*result != NULL)
391 ++result;
393 /* We need to take care that the LD_LIBRARY_PATH environment
394 variable can contain a semicolon. */
395 (void) fillin_rpath (copy, result, ":;",
396 __libc_enable_secure ? trusted_dirs : NULL);
399 else
401 /* This is a statically linked program but we still have to
402 take care for the LD_LIBRARY_PATH environment variable. We
403 use a fake link_map entry. This will only contain the
404 l_rpath_dirs information. */
406 if (nllp == 0)
407 fake_path_list = NULL;
408 else
410 fake_path_list = (struct r_search_path_elem **)
411 malloc ((nllp + 1) * sizeof (struct r_search_path_elem *));
412 if (fake_path_list == NULL)
413 _dl_signal_error (ENOMEM, NULL,
414 "cannot create cache for search path");
416 (void) fillin_rpath (local_strdup (llp), fake_path_list, ":;",
417 __libc_enable_secure ? trusted_dirs : NULL);
421 /* Now set up the rest of the rtld_search_dirs. */
422 for (pelem = rtld_search_dirs; *pelem != NULL; ++pelem)
424 struct r_search_path_elem *relem = *pelem;
426 if (_dl_platform != NULL)
428 char *tmp, *orig;
430 relem->machdirnamelen = relem->dirnamelen + _dl_platformlen + 1;
431 tmp = (char *) malloc (relem->machdirnamelen + 1);
432 if (tmp == NULL)
433 _dl_signal_error (ENOMEM, NULL,
434 "cannot create cache for search path");
436 orig = tmp;
437 tmp = __mempcpy (tmp, relem->dirname, relem->dirnamelen);
438 tmp = __mempcpy (tmp, _dl_platform, _dl_platformlen);
439 *tmp++ = '/';
440 *tmp = '\0';
441 relem->dirname = orig;
443 relem->machdirstatus = unknown;
445 if (max_dirnamelen < relem->machdirnamelen)
446 max_dirnamelen = relem->machdirnamelen;
448 else
450 relem->machdirnamelen = relem->dirnamelen;
451 relem->machdirstatus = nonexisting;
453 if (max_dirnamelen < relem->dirnamelen)
454 max_dirnamelen = relem->dirnamelen;
460 /* Map in the shared object NAME, actually located in REALNAME, and already
461 opened on FD. */
463 struct link_map *
464 _dl_map_object_from_fd (char *name, int fd, char *realname,
465 struct link_map *loader, int l_type)
467 struct link_map *l = NULL;
468 void *file_mapping = NULL;
469 size_t mapping_size = 0;
471 #define LOSE(s) lose (0, (s))
472 void lose (int code, const char *msg)
474 (void) __close (fd);
475 if (file_mapping)
476 __munmap (file_mapping, mapping_size);
477 if (l)
479 /* Remove the stillborn object from the list and free it. */
480 if (l->l_prev)
481 l->l_prev->l_next = l->l_next;
482 if (l->l_next)
483 l->l_next->l_prev = l->l_prev;
484 free (l);
486 free (realname);
487 _dl_signal_error (code, name, msg);
488 free (name); /* Hmmm. Can this leak memory? Better
489 than a segfault, anyway. */
492 inline caddr_t map_segment (ElfW(Addr) mapstart, size_t len,
493 int prot, int fixed, off_t offset)
495 caddr_t mapat = __mmap ((caddr_t) mapstart, len, prot,
496 fixed|MAP_COPY|MAP_FILE,
497 fd, offset);
498 if (mapat == MAP_FAILED)
499 lose (errno, "failed to map segment from shared object");
500 return mapat;
503 /* Make sure LOCATION is mapped in. */
504 void *map (off_t location, size_t size)
506 if ((off_t) mapping_size <= location + (off_t) size)
508 void *result;
509 if (file_mapping)
510 __munmap (file_mapping, mapping_size);
511 mapping_size = (location + size + 1 + _dl_pagesize - 1);
512 mapping_size &= ~(_dl_pagesize - 1);
513 result = __mmap (file_mapping, mapping_size, PROT_READ,
514 MAP_COPY|MAP_FILE, fd, 0);
515 if (result == MAP_FAILED)
516 lose (errno, "cannot map file data");
517 file_mapping = result;
519 return file_mapping + location;
522 const ElfW(Ehdr) *header;
523 const ElfW(Phdr) *phdr;
524 const ElfW(Phdr) *ph;
525 int type;
527 /* Look again to see if the real name matched another already loaded. */
528 for (l = _dl_loaded; l; l = l->l_next)
529 if (! strcmp (realname, l->l_name))
531 /* The object is already loaded.
532 Just bump its reference count and return it. */
533 __close (fd);
535 /* If the name is not in the list of names for this object add
536 it. */
537 free (realname);
538 add_name_to_object (l, name);
539 ++l->l_opencount;
540 return l;
543 /* Map in the first page to read the header. */
544 header = map (0, sizeof *header);
546 /* Check the header for basic validity. */
547 if (*(Elf32_Word *) &header->e_ident !=
548 #if BYTE_ORDER == LITTLE_ENDIAN
549 ((ELFMAG0 << (EI_MAG0 * 8)) |
550 (ELFMAG1 << (EI_MAG1 * 8)) |
551 (ELFMAG2 << (EI_MAG2 * 8)) |
552 (ELFMAG3 << (EI_MAG3 * 8)))
553 #else
554 ((ELFMAG0 << (EI_MAG3 * 8)) |
555 (ELFMAG1 << (EI_MAG2 * 8)) |
556 (ELFMAG2 << (EI_MAG1 * 8)) |
557 (ELFMAG3 << (EI_MAG0 * 8)))
558 #endif
560 LOSE ("invalid ELF header");
561 #define ELF32_CLASS ELFCLASS32
562 #define ELF64_CLASS ELFCLASS64
563 if (header->e_ident[EI_CLASS] != ELFW(CLASS))
564 LOSE ("ELF file class not " STRING(__ELF_WORDSIZE) "-bit");
565 if (header->e_ident[EI_DATA] != byteorder)
566 LOSE ("ELF file data encoding not " byteorder_name);
567 if (header->e_ident[EI_VERSION] != EV_CURRENT)
568 LOSE ("ELF file version ident not " STRING(EV_CURRENT));
569 if (header->e_version != EV_CURRENT)
570 LOSE ("ELF file version not " STRING(EV_CURRENT));
571 if (! elf_machine_matches_host (header->e_machine))
572 LOSE ("ELF file machine architecture not " ELF_MACHINE_NAME);
573 if (header->e_phentsize != sizeof (ElfW(Phdr)))
574 LOSE ("ELF file's phentsize not the expected size");
576 #ifndef MAP_ANON
577 #define MAP_ANON 0
578 if (_dl_zerofd == -1)
580 _dl_zerofd = _dl_sysdep_open_zero_fill ();
581 if (_dl_zerofd == -1)
583 __close (fd);
584 _dl_signal_error (errno, NULL, "cannot open zero fill device");
587 #endif
589 /* Enter the new object in the list of loaded objects. */
590 l = _dl_new_object (realname, name, l_type);
591 if (! l)
592 lose (ENOMEM, "cannot create shared object descriptor");
593 l->l_opencount = 1;
594 l->l_loader = loader;
596 /* Extract the remaining details we need from the ELF header
597 and then map in the program header table. */
598 l->l_entry = header->e_entry;
599 type = header->e_type;
600 l->l_phnum = header->e_phnum;
601 phdr = map (header->e_phoff, l->l_phnum * sizeof (ElfW(Phdr)));
604 /* Scan the program header table, collecting its load commands. */
605 struct loadcmd
607 ElfW(Addr) mapstart, mapend, dataend, allocend;
608 off_t mapoff;
609 int prot;
610 } loadcmds[l->l_phnum], *c;
611 size_t nloadcmds = 0;
613 l->l_ld = 0;
614 l->l_phdr = 0;
615 l->l_addr = 0;
616 for (ph = phdr; ph < &phdr[l->l_phnum]; ++ph)
617 switch (ph->p_type)
619 /* These entries tell us where to find things once the file's
620 segments are mapped in. We record the addresses it says
621 verbatim, and later correct for the run-time load address. */
622 case PT_DYNAMIC:
623 l->l_ld = (void *) ph->p_vaddr;
624 break;
625 case PT_PHDR:
626 l->l_phdr = (void *) ph->p_vaddr;
627 break;
629 case PT_LOAD:
630 /* A load command tells us to map in part of the file.
631 We record the load commands and process them all later. */
632 if (ph->p_align % _dl_pagesize != 0)
633 LOSE ("ELF load command alignment not page-aligned");
634 if ((ph->p_vaddr - ph->p_offset) % ph->p_align)
635 LOSE ("ELF load command address/offset not properly aligned");
637 struct loadcmd *c = &loadcmds[nloadcmds++];
638 c->mapstart = ph->p_vaddr & ~(ph->p_align - 1);
639 c->mapend = ((ph->p_vaddr + ph->p_filesz + _dl_pagesize - 1)
640 & ~(_dl_pagesize - 1));
641 c->dataend = ph->p_vaddr + ph->p_filesz;
642 c->allocend = ph->p_vaddr + ph->p_memsz;
643 c->mapoff = ph->p_offset & ~(ph->p_align - 1);
644 c->prot = 0;
645 if (ph->p_flags & PF_R)
646 c->prot |= PROT_READ;
647 if (ph->p_flags & PF_W)
648 c->prot |= PROT_WRITE;
649 if (ph->p_flags & PF_X)
650 c->prot |= PROT_EXEC;
651 break;
655 /* We are done reading the file's headers now. Unmap them. */
656 __munmap (file_mapping, mapping_size);
658 /* Now process the load commands and map segments into memory. */
659 c = loadcmds;
661 if (type == ET_DYN || type == ET_REL)
663 /* This is a position-independent shared object. We can let the
664 kernel map it anywhere it likes, but we must have space for all
665 the segments in their specified positions relative to the first.
666 So we map the first segment without MAP_FIXED, but with its
667 extent increased to cover all the segments. Then we remove
668 access from excess portion, and there is known sufficient space
669 there to remap from the later segments.
671 As a refinement, sometimes we have an address that we would
672 prefer to map such objects at; but this is only a preference,
673 the OS can do whatever it likes. */
674 caddr_t mapat;
675 ElfW(Addr) mappref;
676 size_t maplength = loadcmds[nloadcmds - 1].allocend - c->mapstart;
677 mappref = (ELF_PREFERRED_ADDRESS (loader, maplength, c->mapstart)
678 - MAP_BASE_ADDR (l));
679 mapat = map_segment (mappref, maplength, c->prot, 0, c->mapoff);
680 l->l_addr = (ElfW(Addr)) mapat - c->mapstart;
682 /* Change protection on the excess portion to disallow all access;
683 the portions we do not remap later will be inaccessible as if
684 unallocated. Then jump into the normal segment-mapping loop to
685 handle the portion of the segment past the end of the file
686 mapping. */
687 __mprotect ((caddr_t) (l->l_addr + c->mapend),
688 loadcmds[nloadcmds - 1].allocend - c->mapend,
690 goto postmap;
692 else
694 /* Notify ELF_PREFERRED_ADDRESS that we have to load this one
695 fixed. */
696 ELF_FIXED_ADDRESS (loader, c->mapstart);
699 while (c < &loadcmds[nloadcmds])
701 if (c->mapend > c->mapstart)
702 /* Map the segment contents from the file. */
703 map_segment (l->l_addr + c->mapstart, c->mapend - c->mapstart,
704 c->prot, MAP_FIXED, c->mapoff);
706 postmap:
707 if (c->allocend > c->dataend)
709 /* Extra zero pages should appear at the end of this segment,
710 after the data mapped from the file. */
711 ElfW(Addr) zero, zeroend, zeropage;
713 zero = l->l_addr + c->dataend;
714 zeroend = l->l_addr + c->allocend;
715 zeropage = (zero + _dl_pagesize - 1) & ~(_dl_pagesize - 1);
717 if (zeroend < zeropage)
718 /* All the extra data is in the last page of the segment.
719 We can just zero it. */
720 zeropage = zeroend;
722 if (zeropage > zero)
724 /* Zero the final part of the last page of the segment. */
725 if ((c->prot & PROT_WRITE) == 0)
727 /* Dag nab it. */
728 if (__mprotect ((caddr_t) (zero & ~(_dl_pagesize - 1)),
729 _dl_pagesize, c->prot|PROT_WRITE) < 0)
730 lose (errno, "cannot change memory protections");
732 memset ((void *) zero, 0, zeropage - zero);
733 if ((c->prot & PROT_WRITE) == 0)
734 __mprotect ((caddr_t) (zero & ~(_dl_pagesize - 1)),
735 _dl_pagesize, c->prot);
738 if (zeroend > zeropage)
740 /* Map the remaining zero pages in from the zero fill FD. */
741 caddr_t mapat;
742 mapat = __mmap ((caddr_t) zeropage, zeroend - zeropage,
743 c->prot, MAP_ANON|MAP_PRIVATE|MAP_FIXED,
744 ANONFD, 0);
745 if (mapat == MAP_FAILED)
746 lose (errno, "cannot map zero-fill pages");
750 ++c;
753 if (l->l_phdr == 0)
755 /* There was no PT_PHDR specified. We need to find the phdr in the
756 load image ourselves. We assume it is in fact in the load image
757 somewhere, and that the first load command starts at the
758 beginning of the file and thus contains the ELF file header. */
759 ElfW(Addr) bof = l->l_addr + loadcmds[0].mapstart;
760 assert (loadcmds[0].mapoff == 0);
761 l->l_phdr = (void *) (bof + ((const ElfW(Ehdr) *) bof)->e_phoff);
763 else
764 /* Adjust the PT_PHDR value by the runtime load address. */
765 (ElfW(Addr)) l->l_phdr += l->l_addr;
768 /* We are done mapping in the file. We no longer need the descriptor. */
769 __close (fd);
771 if (l->l_type == lt_library && type == ET_EXEC)
772 l->l_type = lt_executable;
774 if (l->l_ld == 0)
776 if (type == ET_DYN)
777 LOSE ("object file has no dynamic section");
779 else
780 (ElfW(Addr)) l->l_ld += l->l_addr;
782 l->l_entry += l->l_addr;
784 elf_get_dynamic_info (l->l_ld, l->l_info);
785 if (l->l_info[DT_HASH])
786 _dl_setup_hash (l);
788 return l;
791 /* Try to open NAME in one of the directories in DIRS.
792 Return the fd, or -1. If successful, fill in *REALNAME
793 with the malloc'd full directory name. */
795 static int
796 open_path (const char *name, size_t namelen, int preloaded,
797 struct r_search_path_elem **dirs,
798 char **realname)
800 char *buf;
801 int fd = -1;
803 if (dirs == NULL || *dirs == NULL)
805 __set_errno (ENOENT);
806 return -1;
809 buf = __alloca (max_dirnamelen + namelen);
812 struct r_search_path_elem *this_dir = *dirs;
813 size_t buflen = 0;
815 if (this_dir->machdirstatus != nonexisting)
817 /* Construct the pathname to try. */
818 buflen = ((char *) __mempcpy (__mempcpy (buf, this_dir->dirname,
819 this_dir->machdirnamelen),
820 name, namelen)
821 - buf);
823 fd = __open (buf, O_RDONLY);
824 if (this_dir->machdirstatus == unknown)
825 if (fd != -1)
826 this_dir->machdirstatus = existing;
827 else
829 /* We failed to open machine dependent library. Let's
830 test whether there is any directory at all. */
831 struct stat st;
833 buf[this_dir->machdirnamelen - 1] = '\0';
835 if (__xstat (_STAT_VER, buf, &st) != 0
836 || ! S_ISDIR (st.st_mode))
837 /* The directory does not exist ot it is no directory. */
838 this_dir->machdirstatus = nonexisting;
839 else
840 this_dir->machdirstatus = existing;
842 if (fd != -1 && preloaded && __libc_enable_secure)
844 /* This is an extra security effort to make sure nobody can
845 preload broken shared objects which are in the trusted
846 directories and so exploit the bugs. */
847 struct stat st;
849 if (__fxstat (_STAT_VER, fd, &st) != 0
850 || (st.st_mode & S_ISUID) == 0)
852 /* The shared object cannot be tested for being SUID
853 or this bit is not set. In this case we must not
854 use this object. */
855 __close (fd);
856 fd = -1;
857 /* We simply ignore the file, signal this by setting
858 the error value which would have been set by `open'. */
859 errno = ENOENT;
863 else
864 errno = ENOENT;
866 if (fd == -1 && errno == ENOENT && this_dir->dirstatus != nonexisting)
868 /* Construct the pathname to try. */
869 buflen = ((char *) __mempcpy (__mempcpy (buf, this_dir->dirname,
870 this_dir->dirnamelen),
871 name, namelen)
872 - buf);
874 fd = __open (buf, O_RDONLY);
875 if (this_dir->dirstatus == unknown)
876 if (fd != -1)
877 this_dir->dirstatus = existing;
878 else
879 /* We failed to open library. Let's test whether there
880 is any directory at all. */
881 if (this_dir->dirnamelen <= 1)
882 this_dir->dirstatus = existing;
883 else
885 struct stat st;
887 buf[this_dir->dirnamelen - 1] = '\0';
889 if (__xstat (_STAT_VER, buf, &st) != 0
890 || ! S_ISDIR (st.st_mode))
891 /* The directory does not exist ot it is no directory. */
892 this_dir->dirstatus = nonexisting;
893 else
894 this_dir->dirstatus = existing;
896 if (fd != -1 && preloaded && __libc_enable_secure)
898 /* This is an extra security effort to make sure nobody can
899 preload broken shared objects which are in the trusted
900 directories and so exploit the bugs. */
901 struct stat st;
903 if (__fxstat (_STAT_VER, fd, &st) != 0
904 || (st.st_mode & S_ISUID) == 0)
906 /* The shared object cannot be tested for being SUID
907 or this bit is not set. In this case we must not
908 use this object. */
909 __close (fd);
910 fd = -1;
911 /* We simply ignore the file, signal this by setting
912 the error value which would have been set by `open'. */
913 errno = ENOENT;
918 if (fd != -1)
920 *realname = malloc (buflen);
921 if (*realname != NULL)
923 memcpy (*realname, buf, buflen);
924 return fd;
926 else
928 /* No memory for the name, we certainly won't be able
929 to load and link it. */
930 __close (fd);
931 return -1;
934 if (errno != ENOENT && errno != EACCES)
935 /* The file exists and is readable, but something went wrong. */
936 return -1;
938 while (*++dirs != NULL);
940 return -1;
943 /* Map in the shared object file NAME. */
945 struct link_map *
946 _dl_map_object (struct link_map *loader, const char *name, int preloaded,
947 int type, int trace_mode)
949 int fd;
950 char *realname;
951 char *name_copy;
952 struct link_map *l;
954 /* Look for this name among those already loaded. */
955 for (l = _dl_loaded; l; l = l->l_next)
957 /* If the requested name matches the soname of a loaded object,
958 use that object. Elide this check for names that have not
959 yet been opened. */
960 if (l->l_opencount <= 0)
961 continue;
962 if (!_dl_name_match_p (name, l))
964 const char *soname;
966 if (l->l_info[DT_SONAME] == NULL)
967 continue;
969 soname = (const char *) (l->l_addr
970 + l->l_info[DT_STRTAB]->d_un.d_ptr
971 + l->l_info[DT_SONAME]->d_un.d_val);
972 if (strcmp (name, soname) != 0)
973 continue;
975 /* We have a match on a new name -- cache it. */
976 add_name_to_object (l, local_strdup (soname));
979 /* We have a match -- bump the reference count and return it. */
980 ++l->l_opencount;
981 return l;
984 if (strchr (name, '/') == NULL)
986 /* Search for NAME in several places. */
988 size_t namelen = strlen (name) + 1;
990 fd = -1;
992 /* First try the DT_RPATH of the dependent object that caused NAME
993 to be loaded. Then that object's dependent, and on up. */
994 for (l = loader; fd == -1 && l; l = l->l_loader)
995 if (l && l->l_info[DT_RPATH])
997 /* Make sure the cache information is available. */
998 if (l->l_rpath_dirs == NULL)
1000 size_t ptrval = (l->l_addr
1001 + l->l_info[DT_STRTAB]->d_un.d_ptr
1002 + l->l_info[DT_RPATH]->d_un.d_val);
1003 l->l_rpath_dirs =
1004 decompose_rpath ((const char *) ptrval, 0);
1007 if (l->l_rpath_dirs != (struct r_search_path_elem **) -1l)
1008 fd = open_path (name, namelen, preloaded, l->l_rpath_dirs,
1009 &realname);
1012 /* If dynamically linked, try the DT_RPATH of the executable itself
1013 and the LD_LIBRARY_PATH environment variable. */
1014 l = _dl_loaded;
1015 if (fd == -1 && l && l->l_type != lt_loaded
1016 && l->l_rpath_dirs != (struct r_search_path_elem **) -1l)
1017 fd = open_path (name, namelen, preloaded, l->l_rpath_dirs, &realname);
1019 /* This is used if a static binary uses dynamic loading and there
1020 is a LD_LIBRARY_PATH given. */
1021 if (fd == -1 && fake_path_list != NULL)
1022 fd = open_path (name, namelen, preloaded, fake_path_list, &realname);
1024 if (fd == -1)
1026 /* Check the list of libraries in the file /etc/ld.so.cache,
1027 for compatibility with Linux's ldconfig program. */
1028 extern const char *_dl_load_cache_lookup (const char *name);
1029 const char *cached = _dl_load_cache_lookup (name);
1030 if (cached)
1032 fd = __open (cached, O_RDONLY);
1033 if (fd != -1)
1035 realname = local_strdup (cached);
1036 if (realname == NULL)
1038 __close (fd);
1039 fd = -1;
1045 /* Finally, try the default path. */
1046 if (fd == -1)
1047 fd = open_path (name, namelen, preloaded, rtld_search_dirs, &realname);
1049 else
1051 fd = __open (name, O_RDONLY);
1052 if (fd != -1)
1054 realname = local_strdup (name);
1055 if (realname == NULL)
1057 __close (fd);
1058 fd = -1;
1063 if (fd != -1)
1065 name_copy = local_strdup (name);
1066 if (name_copy == NULL)
1068 __close (fd);
1069 fd = -1;
1073 if (fd == -1)
1075 if (trace_mode)
1077 /* We haven't found an appropriate library. But since we
1078 are only interested in the list of libraries this isn't
1079 so severe. Fake an entry with all the information we
1080 have. */
1081 static const ElfW(Symndx) dummy_bucket = STN_UNDEF;
1083 /* Enter the new object in the list of loaded objects. */
1084 if ((name_copy = local_strdup (name)) == NULL
1085 || (l = _dl_new_object (name_copy, name, type)) == NULL)
1086 _dl_signal_error (ENOMEM, name,
1087 "cannot create shared object descriptor");
1088 /* We use an opencount of 0 as a sign for the faked entry. */
1089 l->l_opencount = 0;
1090 l->l_reserved = 0;
1091 l->l_buckets = &dummy_bucket;
1092 l->l_nbuckets = 1;
1093 l->l_relocated = 1;
1095 return l;
1097 else
1098 _dl_signal_error (errno, name, "cannot open shared object file");
1101 return _dl_map_object_from_fd (name_copy, fd, realname, loader, type);