Updated to fedora-glibc-20080728T2320
[glibc.git] / elf / dl-load.c
blob8a8936f7bde0a73efb0407d0ce1c7731f820d550
1 /* Map in a shared object's segments from the file.
2 Copyright (C) 1995-2005, 2006, 2007 Free Software Foundation, Inc.
3 This file is part of the GNU C Library.
5 The GNU C Library is free software; you can redistribute it and/or
6 modify it under the terms of the GNU Lesser General Public
7 License as published by the Free Software Foundation; either
8 version 2.1 of the License, or (at your option) any later version.
10 The GNU C Library is distributed in the hope that it will be useful,
11 but WITHOUT ANY WARRANTY; without even the implied warranty of
12 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 Lesser General Public License for more details.
15 You should have received a copy of the GNU Lesser General Public
16 License along with the GNU C Library; if not, write to the Free
17 Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
18 02111-1307 USA. */
20 #include <elf.h>
21 #include <errno.h>
22 #include <fcntl.h>
23 #include <libintl.h>
24 #include <stdbool.h>
25 #include <stdlib.h>
26 #include <string.h>
27 #include <unistd.h>
28 #include <ldsodefs.h>
29 #include <bits/wordsize.h>
30 #include <sys/mman.h>
31 #include <sys/param.h>
32 #include <sys/stat.h>
33 #include <sys/types.h>
34 #include "dynamic-link.h"
35 #include <abi-tag.h>
36 #include <stackinfo.h>
37 #include <caller.h>
38 #include <sysdep.h>
40 #include <dl-dst.h>
42 /* On some systems, no flag bits are given to specify file mapping. */
43 #ifndef MAP_FILE
44 # define MAP_FILE 0
45 #endif
47 /* The right way to map in the shared library files is MAP_COPY, which
48 makes a virtual copy of the data at the time of the mmap call; this
49 guarantees the mapped pages will be consistent even if the file is
50 overwritten. Some losing VM systems like Linux's lack MAP_COPY. All we
51 get is MAP_PRIVATE, which copies each page when it is modified; this
52 means if the file is overwritten, we may at some point get some pages
53 from the new version after starting with pages from the old version.
55 To make up for the lack and avoid the overwriting problem,
56 what Linux does have is MAP_DENYWRITE. This prevents anyone
57 from modifying the file while we have it mapped. */
58 #ifndef MAP_COPY
59 # ifdef MAP_DENYWRITE
60 # define MAP_COPY (MAP_PRIVATE | MAP_DENYWRITE)
61 # else
62 # define MAP_COPY MAP_PRIVATE
63 # endif
64 #endif
66 /* Some systems link their relocatable objects for another base address
67 than 0. We want to know the base address for these such that we can
68 subtract this address from the segment addresses during mapping.
69 This results in a more efficient address space usage. Defaults to
70 zero for almost all systems. */
71 #ifndef MAP_BASE_ADDR
72 # define MAP_BASE_ADDR(l) 0
73 #endif
76 #include <endian.h>
77 #if BYTE_ORDER == BIG_ENDIAN
78 # define byteorder ELFDATA2MSB
79 #elif BYTE_ORDER == LITTLE_ENDIAN
80 # define byteorder ELFDATA2LSB
81 #else
82 # error "Unknown BYTE_ORDER " BYTE_ORDER
83 # define byteorder ELFDATANONE
84 #endif
86 #define STRING(x) __STRING (x)
88 #ifdef MAP_ANON
89 /* The fd is not examined when using MAP_ANON. */
90 # define ANONFD -1
91 #else
92 int _dl_zerofd = -1;
93 # define ANONFD _dl_zerofd
94 #endif
96 /* Handle situations where we have a preferred location in memory for
97 the shared objects. */
98 #ifdef ELF_PREFERRED_ADDRESS_DATA
99 ELF_PREFERRED_ADDRESS_DATA;
100 #endif
101 #ifndef ELF_PREFERRED_ADDRESS
102 # define ELF_PREFERRED_ADDRESS(loader, maplength, mapstartpref) (mapstartpref)
103 #endif
104 #ifndef ELF_FIXED_ADDRESS
105 # define ELF_FIXED_ADDRESS(loader, mapstart) ((void) 0)
106 #endif
109 int __stack_prot attribute_hidden attribute_relro
110 #if _STACK_GROWS_DOWN && defined PROT_GROWSDOWN
111 = PROT_GROWSDOWN;
112 #elif _STACK_GROWS_UP && defined PROT_GROWSUP
113 = PROT_GROWSUP;
114 #else
115 = 0;
116 #endif
119 /* Type for the buffer we put the ELF header and hopefully the program
120 header. This buffer does not really have to be too large. In most
121 cases the program header follows the ELF header directly. If this
122 is not the case all bets are off and we can make the header
123 arbitrarily large and still won't get it read. This means the only
124 question is how large are the ELF and program header combined. The
125 ELF header 32-bit files is 52 bytes long and in 64-bit files is 64
126 bytes long. Each program header entry is again 32 and 56 bytes
127 long respectively. I.e., even with a file which has 10 program
128 header entries we only have to read 372B/624B respectively. Add to
129 this a bit of margin for program notes and reading 512B and 832B
130 for 32-bit and 64-bit files respecitvely is enough. If this
131 heuristic should really fail for some file the code in
132 `_dl_map_object_from_fd' knows how to recover. */
133 struct filebuf
135 ssize_t len;
136 #if __WORDSIZE == 32
137 # define FILEBUF_SIZE 512
138 #else
139 # define FILEBUF_SIZE 832
140 #endif
141 char buf[FILEBUF_SIZE] __attribute__ ((aligned (__alignof (ElfW(Ehdr)))));
144 /* This is the decomposed LD_LIBRARY_PATH search path. */
145 static struct r_search_path_struct env_path_list attribute_relro;
147 /* List of the hardware capabilities we might end up using. */
148 static const struct r_strlenpair *capstr attribute_relro;
149 static size_t ncapstr attribute_relro;
150 static size_t max_capstrlen attribute_relro;
153 /* Get the generated information about the trusted directories. */
154 #include "trusted-dirs.h"
156 static const char system_dirs[] = SYSTEM_DIRS;
157 static const size_t system_dirs_len[] =
159 SYSTEM_DIRS_LEN
161 #define nsystem_dirs_len \
162 (sizeof (system_dirs_len) / sizeof (system_dirs_len[0]))
165 /* Local version of `strdup' function. */
166 static char *
167 local_strdup (const char *s)
169 size_t len = strlen (s) + 1;
170 void *new = malloc (len);
172 if (new == NULL)
173 return NULL;
175 return (char *) memcpy (new, s, len);
179 static size_t
180 is_dst (const char *start, const char *name, const char *str,
181 int is_path, int secure)
183 size_t len;
184 bool is_curly = false;
186 if (name[0] == '{')
188 is_curly = true;
189 ++name;
192 len = 0;
193 while (name[len] == str[len] && name[len] != '\0')
194 ++len;
196 if (is_curly)
198 if (name[len] != '}')
199 return 0;
201 /* Point again at the beginning of the name. */
202 --name;
203 /* Skip over closing curly brace and adjust for the --name. */
204 len += 2;
206 else if (name[len] != '\0' && name[len] != '/'
207 && (!is_path || name[len] != ':'))
208 return 0;
210 if (__builtin_expect (secure, 0)
211 && ((name[len] != '\0' && (!is_path || name[len] != ':'))
212 || (name != start + 1 && (!is_path || name[-2] != ':'))))
213 return 0;
215 return len;
219 size_t
220 _dl_dst_count (const char *name, int is_path)
222 const char *const start = name;
223 size_t cnt = 0;
227 size_t len;
229 /* $ORIGIN is not expanded for SUID/GUID programs (except if it
230 is $ORIGIN alone) and it must always appear first in path. */
231 ++name;
232 if ((len = is_dst (start, name, "ORIGIN", is_path,
233 INTUSE(__libc_enable_secure))) != 0
234 || (len = is_dst (start, name, "PLATFORM", is_path, 0)) != 0
235 || (len = is_dst (start, name, "LIB", is_path, 0)) != 0)
236 ++cnt;
238 name = strchr (name + len, '$');
240 while (name != NULL);
242 return cnt;
246 char *
247 _dl_dst_substitute (struct link_map *l, const char *name, char *result,
248 int is_path)
250 const char *const start = name;
251 char *last_elem, *wp;
253 /* Now fill the result path. While copying over the string we keep
254 track of the start of the last path element. When we come accross
255 a DST we copy over the value or (if the value is not available)
256 leave the entire path element out. */
257 last_elem = wp = result;
261 if (__builtin_expect (*name == '$', 0))
263 const char *repl = NULL;
264 size_t len;
266 ++name;
267 if ((len = is_dst (start, name, "ORIGIN", is_path,
268 INTUSE(__libc_enable_secure))) != 0)
270 #ifndef SHARED
271 if (l == NULL)
272 repl = _dl_get_origin ();
273 else
274 #endif
275 repl = l->l_origin;
277 else if ((len = is_dst (start, name, "PLATFORM", is_path, 0)) != 0)
278 repl = GLRO(dl_platform);
279 else if ((len = is_dst (start, name, "LIB", is_path, 0)) != 0)
280 repl = DL_DST_LIB;
282 if (repl != NULL && repl != (const char *) -1)
284 wp = __stpcpy (wp, repl);
285 name += len;
287 else if (len > 1)
289 /* We cannot use this path element, the value of the
290 replacement is unknown. */
291 wp = last_elem;
292 name += len;
293 while (*name != '\0' && (!is_path || *name != ':'))
294 ++name;
296 else
297 /* No DST we recognize. */
298 *wp++ = '$';
300 else
302 *wp++ = *name++;
303 if (is_path && *name == ':')
304 last_elem = wp;
307 while (*name != '\0');
309 *wp = '\0';
311 return result;
315 /* Return copy of argument with all recognized dynamic string tokens
316 ($ORIGIN and $PLATFORM for now) replaced. On some platforms it
317 might not be possible to determine the path from which the object
318 belonging to the map is loaded. In this case the path element
319 containing $ORIGIN is left out. */
320 static char *
321 expand_dynamic_string_token (struct link_map *l, const char *s)
323 /* We make two runs over the string. First we determine how large the
324 resulting string is and then we copy it over. Since this is now
325 frequently executed operation we are looking here not for performance
326 but rather for code size. */
327 size_t cnt;
328 size_t total;
329 char *result;
331 /* Determine the number of DST elements. */
332 cnt = DL_DST_COUNT (s, 1);
334 /* If we do not have to replace anything simply copy the string. */
335 if (__builtin_expect (cnt, 0) == 0)
336 return local_strdup (s);
338 /* Determine the length of the substituted string. */
339 total = DL_DST_REQUIRED (l, s, strlen (s), cnt);
341 /* Allocate the necessary memory. */
342 result = (char *) malloc (total + 1);
343 if (result == NULL)
344 return NULL;
346 return _dl_dst_substitute (l, s, result, 1);
350 /* Add `name' to the list of names for a particular shared object.
351 `name' is expected to have been allocated with malloc and will
352 be freed if the shared object already has this name.
353 Returns false if the object already had this name. */
354 static void
355 internal_function
356 add_name_to_object (struct link_map *l, const char *name)
358 struct libname_list *lnp, *lastp;
359 struct libname_list *newname;
360 size_t name_len;
362 lastp = NULL;
363 for (lnp = l->l_libname; lnp != NULL; lastp = lnp, lnp = lnp->next)
364 if (strcmp (name, lnp->name) == 0)
365 return;
367 name_len = strlen (name) + 1;
368 newname = (struct libname_list *) malloc (sizeof *newname + name_len);
369 if (newname == NULL)
371 /* No more memory. */
372 _dl_signal_error (ENOMEM, name, NULL, N_("cannot allocate name record"));
373 return;
375 /* The object should have a libname set from _dl_new_object. */
376 assert (lastp != NULL);
378 newname->name = memcpy (newname + 1, name, name_len);
379 newname->next = NULL;
380 newname->dont_free = 0;
381 lastp->next = newname;
384 /* Standard search directories. */
385 static struct r_search_path_struct rtld_search_dirs attribute_relro;
387 static size_t max_dirnamelen;
389 static struct r_search_path_elem **
390 fillin_rpath (char *rpath, struct r_search_path_elem **result, const char *sep,
391 int check_trusted, const char *what, const char *where)
393 char *cp;
394 size_t nelems = 0;
396 while ((cp = __strsep (&rpath, sep)) != NULL)
398 struct r_search_path_elem *dirp;
399 size_t len = strlen (cp);
401 /* `strsep' can pass an empty string. This has to be
402 interpreted as `use the current directory'. */
403 if (len == 0)
405 static const char curwd[] = "./";
406 cp = (char *) curwd;
409 /* Remove trailing slashes (except for "/"). */
410 while (len > 1 && cp[len - 1] == '/')
411 --len;
413 /* Now add one if there is none so far. */
414 if (len > 0 && cp[len - 1] != '/')
415 cp[len++] = '/';
417 /* Make sure we don't use untrusted directories if we run SUID. */
418 if (__builtin_expect (check_trusted, 0))
420 const char *trun = system_dirs;
421 size_t idx;
422 int unsecure = 1;
424 /* All trusted directories must be complete names. */
425 if (cp[0] == '/')
427 for (idx = 0; idx < nsystem_dirs_len; ++idx)
429 if (len == system_dirs_len[idx]
430 && memcmp (trun, cp, len) == 0)
432 /* Found it. */
433 unsecure = 0;
434 break;
437 trun += system_dirs_len[idx] + 1;
441 if (unsecure)
442 /* Simply drop this directory. */
443 continue;
446 /* See if this directory is already known. */
447 for (dirp = GL(dl_all_dirs); dirp != NULL; dirp = dirp->next)
448 if (dirp->dirnamelen == len && memcmp (cp, dirp->dirname, len) == 0)
449 break;
451 if (dirp != NULL)
453 /* It is available, see whether it's on our own list. */
454 size_t cnt;
455 for (cnt = 0; cnt < nelems; ++cnt)
456 if (result[cnt] == dirp)
457 break;
459 if (cnt == nelems)
460 result[nelems++] = dirp;
462 else
464 size_t cnt;
465 enum r_dir_status init_val;
466 size_t where_len = where ? strlen (where) + 1 : 0;
468 /* It's a new directory. Create an entry and add it. */
469 dirp = (struct r_search_path_elem *)
470 malloc (sizeof (*dirp) + ncapstr * sizeof (enum r_dir_status)
471 + where_len + len + 1);
472 if (dirp == NULL)
473 _dl_signal_error (ENOMEM, NULL, NULL,
474 N_("cannot create cache for search path"));
476 dirp->dirname = ((char *) dirp + sizeof (*dirp)
477 + ncapstr * sizeof (enum r_dir_status));
478 *((char *) __mempcpy ((char *) dirp->dirname, cp, len)) = '\0';
479 dirp->dirnamelen = len;
481 if (len > max_dirnamelen)
482 max_dirnamelen = len;
484 /* We have to make sure all the relative directories are
485 never ignored. The current directory might change and
486 all our saved information would be void. */
487 init_val = cp[0] != '/' ? existing : unknown;
488 for (cnt = 0; cnt < ncapstr; ++cnt)
489 dirp->status[cnt] = init_val;
491 dirp->what = what;
492 if (__builtin_expect (where != NULL, 1))
493 dirp->where = memcpy ((char *) dirp + sizeof (*dirp) + len + 1
494 + (ncapstr * sizeof (enum r_dir_status)),
495 where, where_len);
496 else
497 dirp->where = NULL;
499 dirp->next = GL(dl_all_dirs);
500 GL(dl_all_dirs) = dirp;
502 /* Put it in the result array. */
503 result[nelems++] = dirp;
507 /* Terminate the array. */
508 result[nelems] = NULL;
510 return result;
514 static bool
515 internal_function
516 decompose_rpath (struct r_search_path_struct *sps,
517 const char *rpath, struct link_map *l, const char *what)
519 /* Make a copy we can work with. */
520 const char *where = l->l_name;
521 char *copy;
522 char *cp;
523 struct r_search_path_elem **result;
524 size_t nelems;
525 /* Initialize to please the compiler. */
526 const char *errstring = NULL;
528 /* First see whether we must forget the RUNPATH and RPATH from this
529 object. */
530 if (__builtin_expect (GLRO(dl_inhibit_rpath) != NULL, 0)
531 && !INTUSE(__libc_enable_secure))
533 const char *inhp = GLRO(dl_inhibit_rpath);
537 const char *wp = where;
539 while (*inhp == *wp && *wp != '\0')
541 ++inhp;
542 ++wp;
545 if (*wp == '\0' && (*inhp == '\0' || *inhp == ':'))
547 /* This object is on the list of objects for which the
548 RUNPATH and RPATH must not be used. */
549 sps->dirs = (void *) -1;
550 return false;
553 while (*inhp != '\0')
554 if (*inhp++ == ':')
555 break;
557 while (*inhp != '\0');
560 /* Make a writable copy. At the same time expand possible dynamic
561 string tokens. */
562 copy = expand_dynamic_string_token (l, rpath);
563 if (copy == NULL)
565 errstring = N_("cannot create RUNPATH/RPATH copy");
566 goto signal_error;
569 /* Count the number of necessary elements in the result array. */
570 nelems = 0;
571 for (cp = copy; *cp != '\0'; ++cp)
572 if (*cp == ':')
573 ++nelems;
575 /* Allocate room for the result. NELEMS + 1 is an upper limit for the
576 number of necessary entries. */
577 result = (struct r_search_path_elem **) malloc ((nelems + 1 + 1)
578 * sizeof (*result));
579 if (result == NULL)
581 free (copy);
582 errstring = N_("cannot create cache for search path");
583 signal_error:
584 _dl_signal_error (ENOMEM, NULL, NULL, errstring);
587 fillin_rpath (copy, result, ":", 0, what, where);
589 /* Free the copied RPATH string. `fillin_rpath' make own copies if
590 necessary. */
591 free (copy);
593 sps->dirs = result;
594 /* The caller will change this value if we haven't used a real malloc. */
595 sps->malloced = 1;
596 return true;
599 /* Make sure cached path information is stored in *SP
600 and return true if there are any paths to search there. */
601 static bool
602 cache_rpath (struct link_map *l,
603 struct r_search_path_struct *sp,
604 int tag,
605 const char *what)
607 if (sp->dirs == (void *) -1)
608 return false;
610 if (sp->dirs != NULL)
611 return true;
613 if (l->l_info[tag] == NULL)
615 /* There is no path. */
616 sp->dirs = (void *) -1;
617 return false;
620 /* Make sure the cache information is available. */
621 return decompose_rpath (sp, (const char *) (D_PTR (l, l_info[DT_STRTAB])
622 + l->l_info[tag]->d_un.d_val),
623 l, what);
627 void
628 internal_function
629 _dl_init_paths (const char *llp)
631 size_t idx;
632 const char *strp;
633 struct r_search_path_elem *pelem, **aelem;
634 size_t round_size;
635 #ifdef SHARED
636 struct link_map *l;
637 #endif
638 /* Initialize to please the compiler. */
639 const char *errstring = NULL;
641 /* Fill in the information about the application's RPATH and the
642 directories addressed by the LD_LIBRARY_PATH environment variable. */
644 /* Get the capabilities. */
645 capstr = _dl_important_hwcaps (GLRO(dl_platform), GLRO(dl_platformlen),
646 &ncapstr, &max_capstrlen);
648 /* First set up the rest of the default search directory entries. */
649 aelem = rtld_search_dirs.dirs = (struct r_search_path_elem **)
650 malloc ((nsystem_dirs_len + 1) * sizeof (struct r_search_path_elem *));
651 if (rtld_search_dirs.dirs == NULL)
653 errstring = N_("cannot create search path array");
654 signal_error:
655 _dl_signal_error (ENOMEM, NULL, NULL, errstring);
658 round_size = ((2 * sizeof (struct r_search_path_elem) - 1
659 + ncapstr * sizeof (enum r_dir_status))
660 / sizeof (struct r_search_path_elem));
662 rtld_search_dirs.dirs[0] = (struct r_search_path_elem *)
663 malloc ((sizeof (system_dirs) / sizeof (system_dirs[0]))
664 * round_size * sizeof (struct r_search_path_elem));
665 if (rtld_search_dirs.dirs[0] == NULL)
667 errstring = N_("cannot create cache for search path");
668 goto signal_error;
671 rtld_search_dirs.malloced = 0;
672 pelem = GL(dl_all_dirs) = rtld_search_dirs.dirs[0];
673 strp = system_dirs;
674 idx = 0;
678 size_t cnt;
680 *aelem++ = pelem;
682 pelem->what = "system search path";
683 pelem->where = NULL;
685 pelem->dirname = strp;
686 pelem->dirnamelen = system_dirs_len[idx];
687 strp += system_dirs_len[idx] + 1;
689 /* System paths must be absolute. */
690 assert (pelem->dirname[0] == '/');
691 for (cnt = 0; cnt < ncapstr; ++cnt)
692 pelem->status[cnt] = unknown;
694 pelem->next = (++idx == nsystem_dirs_len ? NULL : (pelem + round_size));
696 pelem += round_size;
698 while (idx < nsystem_dirs_len);
700 max_dirnamelen = SYSTEM_DIRS_MAX_LEN;
701 *aelem = NULL;
703 #ifdef SHARED
704 /* This points to the map of the main object. */
705 l = GL(dl_ns)[LM_ID_BASE]._ns_loaded;
706 if (l != NULL)
708 assert (l->l_type != lt_loaded);
710 if (l->l_info[DT_RUNPATH])
712 /* Allocate room for the search path and fill in information
713 from RUNPATH. */
714 decompose_rpath (&l->l_runpath_dirs,
715 (const void *) (D_PTR (l, l_info[DT_STRTAB])
716 + l->l_info[DT_RUNPATH]->d_un.d_val),
717 l, "RUNPATH");
719 /* The RPATH is ignored. */
720 l->l_rpath_dirs.dirs = (void *) -1;
722 else
724 l->l_runpath_dirs.dirs = (void *) -1;
726 if (l->l_info[DT_RPATH])
728 /* Allocate room for the search path and fill in information
729 from RPATH. */
730 decompose_rpath (&l->l_rpath_dirs,
731 (const void *) (D_PTR (l, l_info[DT_STRTAB])
732 + l->l_info[DT_RPATH]->d_un.d_val),
733 l, "RPATH");
734 l->l_rpath_dirs.malloced = 0;
736 else
737 l->l_rpath_dirs.dirs = (void *) -1;
740 #endif /* SHARED */
742 if (llp != NULL && *llp != '\0')
744 size_t nllp;
745 const char *cp = llp;
746 char *llp_tmp;
748 #ifdef SHARED
749 /* Expand DSTs. */
750 size_t cnt = DL_DST_COUNT (llp, 1);
751 if (__builtin_expect (cnt == 0, 1))
752 llp_tmp = strdupa (llp);
753 else
755 /* Determine the length of the substituted string. */
756 size_t total = DL_DST_REQUIRED (l, llp, strlen (llp), cnt);
758 /* Allocate the necessary memory. */
759 llp_tmp = (char *) alloca (total + 1);
760 llp_tmp = _dl_dst_substitute (l, llp, llp_tmp, 1);
762 #else
763 llp_tmp = strdupa (llp);
764 #endif
766 /* Decompose the LD_LIBRARY_PATH contents. First determine how many
767 elements it has. */
768 nllp = 1;
769 while (*cp)
771 if (*cp == ':' || *cp == ';')
772 ++nllp;
773 ++cp;
776 env_path_list.dirs = (struct r_search_path_elem **)
777 malloc ((nllp + 1) * sizeof (struct r_search_path_elem *));
778 if (env_path_list.dirs == NULL)
780 errstring = N_("cannot create cache for search path");
781 goto signal_error;
784 (void) fillin_rpath (llp_tmp, env_path_list.dirs, ":;",
785 INTUSE(__libc_enable_secure), "LD_LIBRARY_PATH",
786 NULL);
788 if (env_path_list.dirs[0] == NULL)
790 free (env_path_list.dirs);
791 env_path_list.dirs = (void *) -1;
794 env_path_list.malloced = 0;
796 else
797 env_path_list.dirs = (void *) -1;
799 /* Remember the last search directory added at startup. */
800 GLRO(dl_init_all_dirs) = GL(dl_all_dirs);
804 static void
805 __attribute__ ((noreturn, noinline))
806 lose (int code, int fd, const char *name, char *realname, struct link_map *l,
807 const char *msg, struct r_debug *r)
809 /* The file might already be closed. */
810 if (fd != -1)
811 (void) __close (fd);
812 if (l != NULL)
814 /* Remove the stillborn object from the list and free it. */
815 assert (l->l_next == NULL);
816 if (l->l_prev == NULL)
817 /* No other module loaded. This happens only in the static library,
818 or in rtld under --verify. */
819 GL(dl_ns)[l->l_ns]._ns_loaded = NULL;
820 else
821 l->l_prev->l_next = NULL;
822 --GL(dl_ns)[l->l_ns]._ns_nloaded;
823 free (l);
825 free (realname);
827 if (r != NULL)
829 r->r_state = RT_CONSISTENT;
830 _dl_debug_state ();
833 _dl_signal_error (code, name, NULL, msg);
837 /* Map in the shared object NAME, actually located in REALNAME, and already
838 opened on FD. */
840 #ifndef EXTERNAL_MAP_FROM_FD
841 static
842 #endif
843 struct link_map *
844 _dl_map_object_from_fd (const char *name, int fd, struct filebuf *fbp,
845 char *realname, struct link_map *loader, int l_type,
846 int mode, void **stack_endp, Lmid_t nsid)
848 struct link_map *l = NULL;
849 const ElfW(Ehdr) *header;
850 const ElfW(Phdr) *phdr;
851 const ElfW(Phdr) *ph;
852 size_t maplength;
853 int type;
854 struct stat64 st;
855 /* Initialize to keep the compiler happy. */
856 const char *errstring = NULL;
857 int errval = 0;
858 struct r_debug *r = _dl_debug_initialize (0, nsid);
859 bool make_consistent = false;
861 /* Get file information. */
862 if (__builtin_expect (__fxstat64 (_STAT_VER, fd, &st) < 0, 0))
864 errstring = N_("cannot stat shared object");
865 call_lose_errno:
866 errval = errno;
867 call_lose:
868 lose (errval, fd, name, realname, l, errstring,
869 make_consistent ? r : NULL);
872 /* Look again to see if the real name matched another already loaded. */
873 for (l = GL(dl_ns)[nsid]._ns_loaded; l; l = l->l_next)
874 if (l->l_removed == 0 && l->l_ino == st.st_ino && l->l_dev == st.st_dev)
876 /* The object is already loaded.
877 Just bump its reference count and return it. */
878 __close (fd);
880 /* If the name is not in the list of names for this object add
881 it. */
882 free (realname);
883 add_name_to_object (l, name);
885 return l;
888 #ifdef SHARED
889 /* When loading into a namespace other than the base one we must
890 avoid loading ld.so since there can only be one copy. Ever. */
891 if (__builtin_expect (nsid != LM_ID_BASE, 0)
892 && ((st.st_ino == GL(dl_rtld_map).l_ino
893 && st.st_dev == GL(dl_rtld_map).l_dev)
894 || _dl_name_match_p (name, &GL(dl_rtld_map))))
896 /* This is indeed ld.so. Create a new link_map which refers to
897 the real one for almost everything. */
898 l = _dl_new_object (realname, name, l_type, loader, mode, nsid);
899 if (l == NULL)
900 goto fail_new;
902 /* Refer to the real descriptor. */
903 l->l_real = &GL(dl_rtld_map);
905 /* No need to bump the refcount of the real object, ld.so will
906 never be unloaded. */
907 __close (fd);
909 return l;
911 #endif
913 if (mode & RTLD_NOLOAD)
914 /* We are not supposed to load the object unless it is already
915 loaded. So return now. */
916 return NULL;
918 /* Print debugging message. */
919 if (__builtin_expect (GLRO(dl_debug_mask) & DL_DEBUG_FILES, 0))
920 _dl_debug_printf ("file=%s [%lu]; generating link map\n", name, nsid);
922 /* This is the ELF header. We read it in `open_verify'. */
923 header = (void *) fbp->buf;
925 #ifndef MAP_ANON
926 # define MAP_ANON 0
927 if (_dl_zerofd == -1)
929 _dl_zerofd = _dl_sysdep_open_zero_fill ();
930 if (_dl_zerofd == -1)
932 __close (fd);
933 _dl_signal_error (errno, NULL, NULL,
934 N_("cannot open zero fill device"));
937 #endif
939 /* Signal that we are going to add new objects. */
940 if (r->r_state == RT_CONSISTENT)
942 #ifdef SHARED
943 /* Auditing checkpoint: we are going to add new objects. */
944 if (__builtin_expect (GLRO(dl_naudit) > 0, 0))
946 struct link_map *head = GL(dl_ns)[nsid]._ns_loaded;
947 /* Do not call the functions for any auditing object. */
948 if (head->l_auditing == 0)
950 struct audit_ifaces *afct = GLRO(dl_audit);
951 for (unsigned int cnt = 0; cnt < GLRO(dl_naudit); ++cnt)
953 if (afct->activity != NULL)
954 afct->activity (&head->l_audit[cnt].cookie, LA_ACT_ADD);
956 afct = afct->next;
960 #endif
962 /* Notify the debugger we have added some objects. We need to
963 call _dl_debug_initialize in a static program in case dynamic
964 linking has not been used before. */
965 r->r_state = RT_ADD;
966 _dl_debug_state ();
967 make_consistent = true;
969 else
970 assert (r->r_state == RT_ADD);
972 /* Enter the new object in the list of loaded objects. */
973 l = _dl_new_object (realname, name, l_type, loader, mode, nsid);
974 if (__builtin_expect (l == NULL, 0))
976 #ifdef SHARED
977 fail_new:
978 #endif
979 errstring = N_("cannot create shared object descriptor");
980 goto call_lose_errno;
983 /* Extract the remaining details we need from the ELF header
984 and then read in the program header table. */
985 l->l_entry = header->e_entry;
986 type = header->e_type;
987 l->l_phnum = header->e_phnum;
989 maplength = header->e_phnum * sizeof (ElfW(Phdr));
990 if (header->e_phoff + maplength <= (size_t) fbp->len)
991 phdr = (void *) (fbp->buf + header->e_phoff);
992 else
994 phdr = alloca (maplength);
995 __lseek (fd, header->e_phoff, SEEK_SET);
996 if ((size_t) __libc_read (fd, (void *) phdr, maplength) != maplength)
998 errstring = N_("cannot read file data");
999 goto call_lose_errno;
1003 /* Presumed absent PT_GNU_STACK. */
1004 uint_fast16_t stack_flags = PF_R|PF_W|PF_X;
1007 /* Scan the program header table, collecting its load commands. */
1008 struct loadcmd
1010 ElfW(Addr) mapstart, mapend, dataend, allocend;
1011 off_t mapoff;
1012 int prot;
1013 } loadcmds[l->l_phnum], *c;
1014 size_t nloadcmds = 0;
1015 bool has_holes = false;
1017 /* The struct is initialized to zero so this is not necessary:
1018 l->l_ld = 0;
1019 l->l_phdr = 0;
1020 l->l_addr = 0; */
1021 for (ph = phdr; ph < &phdr[l->l_phnum]; ++ph)
1022 switch (ph->p_type)
1024 /* These entries tell us where to find things once the file's
1025 segments are mapped in. We record the addresses it says
1026 verbatim, and later correct for the run-time load address. */
1027 case PT_DYNAMIC:
1028 l->l_ld = (void *) ph->p_vaddr;
1029 l->l_ldnum = ph->p_memsz / sizeof (ElfW(Dyn));
1030 break;
1032 case PT_PHDR:
1033 l->l_phdr = (void *) ph->p_vaddr;
1034 break;
1036 case PT_LOAD:
1037 /* A load command tells us to map in part of the file.
1038 We record the load commands and process them all later. */
1039 if (__builtin_expect ((ph->p_align & (GLRO(dl_pagesize) - 1)) != 0,
1042 errstring = N_("ELF load command alignment not page-aligned");
1043 goto call_lose;
1045 if (__builtin_expect (((ph->p_vaddr - ph->p_offset)
1046 & (ph->p_align - 1)) != 0, 0))
1048 errstring
1049 = N_("ELF load command address/offset not properly aligned");
1050 goto call_lose;
1053 c = &loadcmds[nloadcmds++];
1054 c->mapstart = ph->p_vaddr & ~(GLRO(dl_pagesize) - 1);
1055 c->mapend = ((ph->p_vaddr + ph->p_filesz + GLRO(dl_pagesize) - 1)
1056 & ~(GLRO(dl_pagesize) - 1));
1057 c->dataend = ph->p_vaddr + ph->p_filesz;
1058 c->allocend = ph->p_vaddr + ph->p_memsz;
1059 c->mapoff = ph->p_offset & ~(GLRO(dl_pagesize) - 1);
1061 /* Determine whether there is a gap between the last segment
1062 and this one. */
1063 if (nloadcmds > 1 && c[-1].mapend != c->mapstart)
1064 has_holes = true;
1066 /* Optimize a common case. */
1067 #if (PF_R | PF_W | PF_X) == 7 && (PROT_READ | PROT_WRITE | PROT_EXEC) == 7
1068 c->prot = (PF_TO_PROT
1069 >> ((ph->p_flags & (PF_R | PF_W | PF_X)) * 4)) & 0xf;
1070 #else
1071 c->prot = 0;
1072 if (ph->p_flags & PF_R)
1073 c->prot |= PROT_READ;
1074 if (ph->p_flags & PF_W)
1075 c->prot |= PROT_WRITE;
1076 if (ph->p_flags & PF_X)
1077 c->prot |= PROT_EXEC;
1078 #endif
1079 break;
1081 case PT_TLS:
1082 if (ph->p_memsz == 0)
1083 /* Nothing to do for an empty segment. */
1084 break;
1086 l->l_tls_blocksize = ph->p_memsz;
1087 l->l_tls_align = ph->p_align;
1088 if (ph->p_align == 0)
1089 l->l_tls_firstbyte_offset = 0;
1090 else
1091 l->l_tls_firstbyte_offset = ph->p_vaddr & (ph->p_align - 1);
1092 l->l_tls_initimage_size = ph->p_filesz;
1093 /* Since we don't know the load address yet only store the
1094 offset. We will adjust it later. */
1095 l->l_tls_initimage = (void *) ph->p_vaddr;
1097 /* If not loading the initial set of shared libraries,
1098 check whether we should permit loading a TLS segment. */
1099 if (__builtin_expect (l->l_type == lt_library, 1)
1100 /* If GL(dl_tls_dtv_slotinfo_list) == NULL, then rtld.c did
1101 not set up TLS data structures, so don't use them now. */
1102 || __builtin_expect (GL(dl_tls_dtv_slotinfo_list) != NULL, 1))
1104 /* Assign the next available module ID. */
1105 l->l_tls_modid = _dl_next_tls_modid ();
1106 break;
1109 #ifdef SHARED
1110 if (l->l_prev == NULL || (mode & __RTLD_AUDIT) != 0)
1111 /* We are loading the executable itself when the dynamic linker
1112 was executed directly. The setup will happen later. */
1113 break;
1115 /* In a static binary there is no way to tell if we dynamically
1116 loaded libpthread. */
1117 if (GL(dl_error_catch_tsd) == &_dl_initial_error_catch_tsd)
1118 #endif
1120 /* We have not yet loaded libpthread.
1121 We can do the TLS setup right now! */
1123 void *tcb;
1125 /* The first call allocates TLS bookkeeping data structures.
1126 Then we allocate the TCB for the initial thread. */
1127 if (__builtin_expect (_dl_tls_setup (), 0)
1128 || __builtin_expect ((tcb = _dl_allocate_tls (NULL)) == NULL,
1131 errval = ENOMEM;
1132 errstring = N_("\
1133 cannot allocate TLS data structures for initial thread");
1134 goto call_lose;
1137 /* Now we install the TCB in the thread register. */
1138 errstring = TLS_INIT_TP (tcb, 0);
1139 if (__builtin_expect (errstring == NULL, 1))
1141 /* Now we are all good. */
1142 l->l_tls_modid = ++GL(dl_tls_max_dtv_idx);
1143 break;
1146 /* The kernel is too old or somesuch. */
1147 errval = 0;
1148 _dl_deallocate_tls (tcb, 1);
1149 goto call_lose;
1152 /* Uh-oh, the binary expects TLS support but we cannot
1153 provide it. */
1154 errval = 0;
1155 errstring = N_("cannot handle TLS data");
1156 goto call_lose;
1157 break;
1159 case PT_GNU_STACK:
1160 stack_flags = ph->p_flags;
1161 break;
1163 case PT_GNU_RELRO:
1164 l->l_relro_addr = ph->p_vaddr;
1165 l->l_relro_size = ph->p_memsz;
1166 break;
1169 if (__builtin_expect (nloadcmds == 0, 0))
1171 /* This only happens for a bogus object that will be caught with
1172 another error below. But we don't want to go through the
1173 calculations below using NLOADCMDS - 1. */
1174 errstring = N_("object file has no loadable segments");
1175 goto call_lose;
1178 /* Now process the load commands and map segments into memory. */
1179 c = loadcmds;
1181 /* Length of the sections to be loaded. */
1182 maplength = loadcmds[nloadcmds - 1].allocend - c->mapstart;
1184 if (__builtin_expect (type, ET_DYN) == ET_DYN)
1186 /* This is a position-independent shared object. We can let the
1187 kernel map it anywhere it likes, but we must have space for all
1188 the segments in their specified positions relative to the first.
1189 So we map the first segment without MAP_FIXED, but with its
1190 extent increased to cover all the segments. Then we remove
1191 access from excess portion, and there is known sufficient space
1192 there to remap from the later segments.
1194 As a refinement, sometimes we have an address that we would
1195 prefer to map such objects at; but this is only a preference,
1196 the OS can do whatever it likes. */
1197 ElfW(Addr) mappref;
1198 mappref = (ELF_PREFERRED_ADDRESS (loader, maplength,
1199 c->mapstart & GLRO(dl_use_load_bias))
1200 - MAP_BASE_ADDR (l));
1202 /* Remember which part of the address space this object uses. */
1203 l->l_map_start = (ElfW(Addr)) __mmap ((void *) mappref, maplength,
1204 c->prot,
1205 MAP_COPY|MAP_FILE,
1206 fd, c->mapoff);
1207 if (__builtin_expect ((void *) l->l_map_start == MAP_FAILED, 0))
1209 map_error:
1210 errstring = N_("failed to map segment from shared object");
1211 goto call_lose_errno;
1214 l->l_map_end = l->l_map_start + maplength;
1215 l->l_addr = l->l_map_start - c->mapstart;
1217 if (has_holes)
1218 /* Change protection on the excess portion to disallow all access;
1219 the portions we do not remap later will be inaccessible as if
1220 unallocated. Then jump into the normal segment-mapping loop to
1221 handle the portion of the segment past the end of the file
1222 mapping. */
1223 __mprotect ((caddr_t) (l->l_addr + c->mapend),
1224 loadcmds[nloadcmds - 1].mapstart - c->mapend,
1225 PROT_NONE);
1227 l->l_contiguous = 1;
1229 goto postmap;
1232 /* This object is loaded at a fixed address. This must never
1233 happen for objects loaded with dlopen(). */
1234 if (__builtin_expect ((mode & __RTLD_OPENEXEC) == 0, 0))
1236 errstring = N_("cannot dynamically load executable");
1237 goto call_lose;
1240 /* Notify ELF_PREFERRED_ADDRESS that we have to load this one
1241 fixed. */
1242 ELF_FIXED_ADDRESS (loader, c->mapstart);
1245 /* Remember which part of the address space this object uses. */
1246 l->l_map_start = c->mapstart + l->l_addr;
1247 l->l_map_end = l->l_map_start + maplength;
1248 l->l_contiguous = !has_holes;
1250 while (c < &loadcmds[nloadcmds])
1252 if (c->mapend > c->mapstart
1253 /* Map the segment contents from the file. */
1254 && (__mmap ((void *) (l->l_addr + c->mapstart),
1255 c->mapend - c->mapstart, c->prot,
1256 MAP_FIXED|MAP_COPY|MAP_FILE,
1257 fd, c->mapoff)
1258 == MAP_FAILED))
1259 goto map_error;
1261 postmap:
1262 if (c->prot & PROT_EXEC)
1263 l->l_text_end = l->l_addr + c->mapend;
1265 if (l->l_phdr == 0
1266 && (ElfW(Off)) c->mapoff <= header->e_phoff
1267 && ((size_t) (c->mapend - c->mapstart + c->mapoff)
1268 >= header->e_phoff + header->e_phnum * sizeof (ElfW(Phdr))))
1269 /* Found the program header in this segment. */
1270 l->l_phdr = (void *) (c->mapstart + header->e_phoff - c->mapoff);
1272 if (c->allocend > c->dataend)
1274 /* Extra zero pages should appear at the end of this segment,
1275 after the data mapped from the file. */
1276 ElfW(Addr) zero, zeroend, zeropage;
1278 zero = l->l_addr + c->dataend;
1279 zeroend = l->l_addr + c->allocend;
1280 zeropage = ((zero + GLRO(dl_pagesize) - 1)
1281 & ~(GLRO(dl_pagesize) - 1));
1283 if (zeroend < zeropage)
1284 /* All the extra data is in the last page of the segment.
1285 We can just zero it. */
1286 zeropage = zeroend;
1288 if (zeropage > zero)
1290 /* Zero the final part of the last page of the segment. */
1291 if (__builtin_expect ((c->prot & PROT_WRITE) == 0, 0))
1293 /* Dag nab it. */
1294 if (__mprotect ((caddr_t) (zero
1295 & ~(GLRO(dl_pagesize) - 1)),
1296 GLRO(dl_pagesize), c->prot|PROT_WRITE) < 0)
1298 errstring = N_("cannot change memory protections");
1299 goto call_lose_errno;
1302 memset ((void *) zero, '\0', zeropage - zero);
1303 if (__builtin_expect ((c->prot & PROT_WRITE) == 0, 0))
1304 __mprotect ((caddr_t) (zero & ~(GLRO(dl_pagesize) - 1)),
1305 GLRO(dl_pagesize), c->prot);
1308 if (zeroend > zeropage)
1310 /* Map the remaining zero pages in from the zero fill FD. */
1311 caddr_t mapat;
1312 mapat = __mmap ((caddr_t) zeropage, zeroend - zeropage,
1313 c->prot, MAP_ANON|MAP_PRIVATE|MAP_FIXED,
1314 ANONFD, 0);
1315 if (__builtin_expect (mapat == MAP_FAILED, 0))
1317 errstring = N_("cannot map zero-fill pages");
1318 goto call_lose_errno;
1323 ++c;
1327 if (l->l_ld == 0)
1329 if (__builtin_expect (type == ET_DYN, 0))
1331 errstring = N_("object file has no dynamic section");
1332 goto call_lose;
1335 else
1336 l->l_ld = (ElfW(Dyn) *) ((ElfW(Addr)) l->l_ld + l->l_addr);
1338 elf_get_dynamic_info (l, NULL);
1340 /* Make sure we are not dlopen'ing an object that has the
1341 DF_1_NOOPEN flag set. */
1342 if (__builtin_expect (l->l_flags_1 & DF_1_NOOPEN, 0)
1343 && (mode & __RTLD_DLOPEN))
1345 /* We are not supposed to load this object. Free all resources. */
1346 __munmap ((void *) l->l_map_start, l->l_map_end - l->l_map_start);
1348 if (!l->l_libname->dont_free)
1349 free (l->l_libname);
1351 if (l->l_phdr_allocated)
1352 free ((void *) l->l_phdr);
1354 errstring = N_("shared object cannot be dlopen()ed");
1355 goto call_lose;
1358 if (l->l_phdr == NULL)
1360 /* The program header is not contained in any of the segments.
1361 We have to allocate memory ourself and copy it over from out
1362 temporary place. */
1363 ElfW(Phdr) *newp = (ElfW(Phdr) *) malloc (header->e_phnum
1364 * sizeof (ElfW(Phdr)));
1365 if (newp == NULL)
1367 errstring = N_("cannot allocate memory for program header");
1368 goto call_lose_errno;
1371 l->l_phdr = memcpy (newp, phdr,
1372 (header->e_phnum * sizeof (ElfW(Phdr))));
1373 l->l_phdr_allocated = 1;
1375 else
1376 /* Adjust the PT_PHDR value by the runtime load address. */
1377 l->l_phdr = (ElfW(Phdr) *) ((ElfW(Addr)) l->l_phdr + l->l_addr);
1379 if (__builtin_expect ((stack_flags &~ GL(dl_stack_flags)) & PF_X, 0))
1381 if (__builtin_expect (__check_caller (RETURN_ADDRESS (0), allow_ldso),
1382 0) != 0)
1384 errstring = N_("invalid caller");
1385 goto call_lose;
1388 /* The stack is presently not executable, but this module
1389 requires that it be executable. We must change the
1390 protection of the variable which contains the flags used in
1391 the mprotect calls. */
1392 #ifdef SHARED
1393 if ((mode & (__RTLD_DLOPEN | __RTLD_AUDIT)) == __RTLD_DLOPEN)
1395 const uintptr_t p = (uintptr_t) &__stack_prot & -GLRO(dl_pagesize);
1396 const size_t s = (uintptr_t) (&__stack_prot + 1) - p;
1398 struct link_map *const m = &GL(dl_rtld_map);
1399 const uintptr_t relro_end = ((m->l_addr + m->l_relro_addr
1400 + m->l_relro_size)
1401 & -GLRO(dl_pagesize));
1402 if (__builtin_expect (p + s <= relro_end, 1))
1404 /* The variable lies in the region protected by RELRO. */
1405 __mprotect ((void *) p, s, PROT_READ|PROT_WRITE);
1406 __stack_prot |= PROT_READ|PROT_WRITE|PROT_EXEC;
1407 __mprotect ((void *) p, s, PROT_READ);
1409 else
1410 __stack_prot |= PROT_READ|PROT_WRITE|PROT_EXEC;
1412 else
1413 #endif
1414 __stack_prot |= PROT_READ|PROT_WRITE|PROT_EXEC;
1416 #ifdef check_consistency
1417 check_consistency ();
1418 #endif
1420 errval = (*GL(dl_make_stack_executable_hook)) (stack_endp);
1421 if (errval)
1423 errstring = N_("\
1424 cannot enable executable stack as shared object requires");
1425 goto call_lose;
1429 /* Adjust the address of the TLS initialization image. */
1430 if (l->l_tls_initimage != NULL)
1431 l->l_tls_initimage = (char *) l->l_tls_initimage + l->l_addr;
1433 /* We are done mapping in the file. We no longer need the descriptor. */
1434 if (__builtin_expect (__close (fd) != 0, 0))
1436 errstring = N_("cannot close file descriptor");
1437 goto call_lose_errno;
1439 /* Signal that we closed the file. */
1440 fd = -1;
1442 if (l->l_type == lt_library && type == ET_EXEC)
1443 l->l_type = lt_executable;
1445 l->l_entry += l->l_addr;
1447 if (__builtin_expect (GLRO(dl_debug_mask) & DL_DEBUG_FILES, 0))
1448 _dl_debug_printf ("\
1449 dynamic: 0x%0*lx base: 0x%0*lx size: 0x%0*Zx\n\
1450 entry: 0x%0*lx phdr: 0x%0*lx phnum: %*u\n\n",
1451 (int) sizeof (void *) * 2,
1452 (unsigned long int) l->l_ld,
1453 (int) sizeof (void *) * 2,
1454 (unsigned long int) l->l_addr,
1455 (int) sizeof (void *) * 2, maplength,
1456 (int) sizeof (void *) * 2,
1457 (unsigned long int) l->l_entry,
1458 (int) sizeof (void *) * 2,
1459 (unsigned long int) l->l_phdr,
1460 (int) sizeof (void *) * 2, l->l_phnum);
1462 /* Set up the symbol hash table. */
1463 _dl_setup_hash (l);
1465 /* If this object has DT_SYMBOLIC set modify now its scope. We don't
1466 have to do this for the main map. */
1467 if ((mode & RTLD_DEEPBIND) == 0
1468 && __builtin_expect (l->l_info[DT_SYMBOLIC] != NULL, 0)
1469 && &l->l_searchlist != l->l_scope[0])
1471 /* Create an appropriate searchlist. It contains only this map.
1472 This is the definition of DT_SYMBOLIC in SysVr4. */
1473 l->l_symbolic_searchlist.r_list[0] = l;
1474 l->l_symbolic_searchlist.r_nlist = 1;
1476 /* Now move the existing entries one back. */
1477 memmove (&l->l_scope[1], &l->l_scope[0],
1478 (l->l_scope_max - 1) * sizeof (l->l_scope[0]));
1480 /* Now add the new entry. */
1481 l->l_scope[0] = &l->l_symbolic_searchlist;
1484 /* Remember whether this object must be initialized first. */
1485 if (l->l_flags_1 & DF_1_INITFIRST)
1486 GL(dl_initfirst) = l;
1488 /* Finally the file information. */
1489 l->l_dev = st.st_dev;
1490 l->l_ino = st.st_ino;
1492 /* When we profile the SONAME might be needed for something else but
1493 loading. Add it right away. */
1494 if (__builtin_expect (GLRO(dl_profile) != NULL, 0)
1495 && l->l_info[DT_SONAME] != NULL)
1496 add_name_to_object (l, ((const char *) D_PTR (l, l_info[DT_STRTAB])
1497 + l->l_info[DT_SONAME]->d_un.d_val));
1499 #ifdef SHARED
1500 /* Auditing checkpoint: we have a new object. */
1501 if (__builtin_expect (GLRO(dl_naudit) > 0, 0)
1502 && !GL(dl_ns)[l->l_ns]._ns_loaded->l_auditing)
1504 struct audit_ifaces *afct = GLRO(dl_audit);
1505 for (unsigned int cnt = 0; cnt < GLRO(dl_naudit); ++cnt)
1507 if (afct->objopen != NULL)
1509 l->l_audit[cnt].bindflags
1510 = afct->objopen (l, nsid, &l->l_audit[cnt].cookie);
1512 l->l_audit_any_plt |= l->l_audit[cnt].bindflags != 0;
1515 afct = afct->next;
1518 #endif
1520 return l;
1523 /* Print search path. */
1524 static void
1525 print_search_path (struct r_search_path_elem **list,
1526 const char *what, const char *name)
1528 char buf[max_dirnamelen + max_capstrlen];
1529 int first = 1;
1531 _dl_debug_printf (" search path=");
1533 while (*list != NULL && (*list)->what == what) /* Yes, ==. */
1535 char *endp = __mempcpy (buf, (*list)->dirname, (*list)->dirnamelen);
1536 size_t cnt;
1538 for (cnt = 0; cnt < ncapstr; ++cnt)
1539 if ((*list)->status[cnt] != nonexisting)
1541 char *cp = __mempcpy (endp, capstr[cnt].str, capstr[cnt].len);
1542 if (cp == buf || (cp == buf + 1 && buf[0] == '/'))
1543 cp[0] = '\0';
1544 else
1545 cp[-1] = '\0';
1547 _dl_debug_printf_c (first ? "%s" : ":%s", buf);
1548 first = 0;
1551 ++list;
1554 if (name != NULL)
1555 _dl_debug_printf_c ("\t\t(%s from file %s)\n", what,
1556 name[0] ? name : rtld_progname);
1557 else
1558 _dl_debug_printf_c ("\t\t(%s)\n", what);
1561 /* Open a file and verify it is an ELF file for this architecture. We
1562 ignore only ELF files for other architectures. Non-ELF files and
1563 ELF files with different header information cause fatal errors since
1564 this could mean there is something wrong in the installation and the
1565 user might want to know about this. */
1566 static int
1567 open_verify (const char *name, struct filebuf *fbp, struct link_map *loader,
1568 int whatcode, bool *found_other_class, bool free_name)
1570 /* This is the expected ELF header. */
1571 #define ELF32_CLASS ELFCLASS32
1572 #define ELF64_CLASS ELFCLASS64
1573 #ifndef VALID_ELF_HEADER
1574 # define VALID_ELF_HEADER(hdr,exp,size) (memcmp (hdr, exp, size) == 0)
1575 # define VALID_ELF_OSABI(osabi) (osabi == ELFOSABI_SYSV)
1576 # define VALID_ELF_ABIVERSION(ver) (ver == 0)
1577 #endif
1578 static const unsigned char expected[EI_PAD] =
1580 [EI_MAG0] = ELFMAG0,
1581 [EI_MAG1] = ELFMAG1,
1582 [EI_MAG2] = ELFMAG2,
1583 [EI_MAG3] = ELFMAG3,
1584 [EI_CLASS] = ELFW(CLASS),
1585 [EI_DATA] = byteorder,
1586 [EI_VERSION] = EV_CURRENT,
1587 [EI_OSABI] = ELFOSABI_SYSV,
1588 [EI_ABIVERSION] = 0
1590 static const struct
1592 ElfW(Word) vendorlen;
1593 ElfW(Word) datalen;
1594 ElfW(Word) type;
1595 char vendor[4];
1596 } expected_note = { 4, 16, 1, "GNU" };
1597 /* Initialize it to make the compiler happy. */
1598 const char *errstring = NULL;
1599 int errval = 0;
1601 #ifdef SHARED
1602 /* Give the auditing libraries a chance. */
1603 if (__builtin_expect (GLRO(dl_naudit) > 0, 0) && whatcode != 0
1604 && loader->l_auditing == 0)
1606 struct audit_ifaces *afct = GLRO(dl_audit);
1607 for (unsigned int cnt = 0; cnt < GLRO(dl_naudit); ++cnt)
1609 if (afct->objsearch != NULL)
1611 name = afct->objsearch (name, &loader->l_audit[cnt].cookie,
1612 whatcode);
1613 if (name == NULL)
1614 /* Ignore the path. */
1615 return -1;
1618 afct = afct->next;
1621 #endif
1623 /* Open the file. We always open files read-only. */
1624 int fd = __open (name, O_RDONLY);
1625 if (fd != -1)
1627 ElfW(Ehdr) *ehdr;
1628 ElfW(Phdr) *phdr, *ph;
1629 ElfW(Word) *abi_note;
1630 unsigned int osversion;
1631 size_t maplength;
1633 /* We successfully openened the file. Now verify it is a file
1634 we can use. */
1635 __set_errno (0);
1636 fbp->len = __libc_read (fd, fbp->buf, sizeof (fbp->buf));
1638 /* This is where the ELF header is loaded. */
1639 assert (sizeof (fbp->buf) > sizeof (ElfW(Ehdr)));
1640 ehdr = (ElfW(Ehdr) *) fbp->buf;
1642 /* Now run the tests. */
1643 if (__builtin_expect (fbp->len < (ssize_t) sizeof (ElfW(Ehdr)), 0))
1645 errval = errno;
1646 errstring = (errval == 0
1647 ? N_("file too short") : N_("cannot read file data"));
1648 call_lose:
1649 if (free_name)
1651 char *realname = (char *) name;
1652 name = strdupa (realname);
1653 free (realname);
1655 lose (errval, fd, name, NULL, NULL, errstring, NULL);
1658 /* See whether the ELF header is what we expect. */
1659 if (__builtin_expect (! VALID_ELF_HEADER (ehdr->e_ident, expected,
1660 EI_PAD), 0))
1662 /* Something is wrong. */
1663 if (*(Elf32_Word *) &ehdr->e_ident !=
1664 #if BYTE_ORDER == LITTLE_ENDIAN
1665 ((ELFMAG0 << (EI_MAG0 * 8)) |
1666 (ELFMAG1 << (EI_MAG1 * 8)) |
1667 (ELFMAG2 << (EI_MAG2 * 8)) |
1668 (ELFMAG3 << (EI_MAG3 * 8)))
1669 #else
1670 ((ELFMAG0 << (EI_MAG3 * 8)) |
1671 (ELFMAG1 << (EI_MAG2 * 8)) |
1672 (ELFMAG2 << (EI_MAG1 * 8)) |
1673 (ELFMAG3 << (EI_MAG0 * 8)))
1674 #endif
1676 errstring = N_("invalid ELF header");
1677 else if (ehdr->e_ident[EI_CLASS] != ELFW(CLASS))
1679 /* This is not a fatal error. On architectures where
1680 32-bit and 64-bit binaries can be run this might
1681 happen. */
1682 *found_other_class = true;
1683 goto close_and_out;
1685 else if (ehdr->e_ident[EI_DATA] != byteorder)
1687 if (BYTE_ORDER == BIG_ENDIAN)
1688 errstring = N_("ELF file data encoding not big-endian");
1689 else
1690 errstring = N_("ELF file data encoding not little-endian");
1692 else if (ehdr->e_ident[EI_VERSION] != EV_CURRENT)
1693 errstring
1694 = N_("ELF file version ident does not match current one");
1695 /* XXX We should be able so set system specific versions which are
1696 allowed here. */
1697 else if (!VALID_ELF_OSABI (ehdr->e_ident[EI_OSABI]))
1698 errstring = N_("ELF file OS ABI invalid");
1699 else if (!VALID_ELF_ABIVERSION (ehdr->e_ident[EI_ABIVERSION]))
1700 errstring = N_("ELF file ABI version invalid");
1701 else
1702 /* Otherwise we don't know what went wrong. */
1703 errstring = N_("internal error");
1705 goto call_lose;
1708 if (__builtin_expect (ehdr->e_version, EV_CURRENT) != EV_CURRENT)
1710 errstring = N_("ELF file version does not match current one");
1711 goto call_lose;
1713 if (! __builtin_expect (elf_machine_matches_host (ehdr), 1))
1714 goto close_and_out;
1715 else if (__builtin_expect (ehdr->e_type, ET_DYN) != ET_DYN
1716 && __builtin_expect (ehdr->e_type, ET_EXEC) != ET_EXEC)
1718 errstring = N_("only ET_DYN and ET_EXEC can be loaded");
1719 goto call_lose;
1721 else if (__builtin_expect (ehdr->e_phentsize, sizeof (ElfW(Phdr)))
1722 != sizeof (ElfW(Phdr)))
1724 errstring = N_("ELF file's phentsize not the expected size");
1725 goto call_lose;
1728 maplength = ehdr->e_phnum * sizeof (ElfW(Phdr));
1729 if (ehdr->e_phoff + maplength <= (size_t) fbp->len)
1730 phdr = (void *) (fbp->buf + ehdr->e_phoff);
1731 else
1733 phdr = alloca (maplength);
1734 __lseek (fd, ehdr->e_phoff, SEEK_SET);
1735 if ((size_t) __libc_read (fd, (void *) phdr, maplength) != maplength)
1737 read_error:
1738 errval = errno;
1739 errstring = N_("cannot read file data");
1740 goto call_lose;
1744 /* Check .note.ABI-tag if present. */
1745 for (ph = phdr; ph < &phdr[ehdr->e_phnum]; ++ph)
1746 if (ph->p_type == PT_NOTE && ph->p_filesz >= 32 && ph->p_align >= 4)
1748 ElfW(Addr) size = ph->p_filesz;
1750 if (ph->p_offset + size <= (size_t) fbp->len)
1751 abi_note = (void *) (fbp->buf + ph->p_offset);
1752 else
1754 abi_note = alloca (size);
1755 __lseek (fd, ph->p_offset, SEEK_SET);
1756 if (__libc_read (fd, (void *) abi_note, size) != size)
1757 goto read_error;
1760 while (memcmp (abi_note, &expected_note, sizeof (expected_note)))
1762 #define ROUND(len) (((len) + sizeof (ElfW(Word)) - 1) & -sizeof (ElfW(Word)))
1763 ElfW(Addr) note_size = 3 * sizeof (ElfW(Word))
1764 + ROUND (abi_note[0])
1765 + ROUND (abi_note[1]);
1767 if (size - 32 < note_size)
1769 size = 0;
1770 break;
1772 size -= note_size;
1773 abi_note = (void *) abi_note + note_size;
1776 if (size == 0)
1777 continue;
1779 osversion = (abi_note[5] & 0xff) * 65536
1780 + (abi_note[6] & 0xff) * 256
1781 + (abi_note[7] & 0xff);
1782 if (abi_note[4] != __ABI_TAG_OS
1783 || (GLRO(dl_osversion) && GLRO(dl_osversion) < osversion))
1785 close_and_out:
1786 __close (fd);
1787 __set_errno (ENOENT);
1788 fd = -1;
1791 break;
1795 return fd;
1798 /* Try to open NAME in one of the directories in *DIRSP.
1799 Return the fd, or -1. If successful, fill in *REALNAME
1800 with the malloc'd full directory name. If it turns out
1801 that none of the directories in *DIRSP exists, *DIRSP is
1802 replaced with (void *) -1, and the old value is free()d
1803 if MAY_FREE_DIRS is true. */
1805 static int
1806 open_path (const char *name, size_t namelen, int preloaded,
1807 struct r_search_path_struct *sps, char **realname,
1808 struct filebuf *fbp, struct link_map *loader, int whatcode,
1809 bool *found_other_class)
1811 struct r_search_path_elem **dirs = sps->dirs;
1812 char *buf;
1813 int fd = -1;
1814 const char *current_what = NULL;
1815 int any = 0;
1817 if (__builtin_expect (dirs == NULL, 0))
1818 /* We're called before _dl_init_paths when loading the main executable
1819 given on the command line when rtld is run directly. */
1820 return -1;
1822 buf = alloca (max_dirnamelen + max_capstrlen + namelen);
1825 struct r_search_path_elem *this_dir = *dirs;
1826 size_t buflen = 0;
1827 size_t cnt;
1828 char *edp;
1829 int here_any = 0;
1830 int err;
1832 /* If we are debugging the search for libraries print the path
1833 now if it hasn't happened now. */
1834 if (__builtin_expect (GLRO(dl_debug_mask) & DL_DEBUG_LIBS, 0)
1835 && current_what != this_dir->what)
1837 current_what = this_dir->what;
1838 print_search_path (dirs, current_what, this_dir->where);
1841 edp = (char *) __mempcpy (buf, this_dir->dirname, this_dir->dirnamelen);
1842 for (cnt = 0; fd == -1 && cnt < ncapstr; ++cnt)
1844 /* Skip this directory if we know it does not exist. */
1845 if (this_dir->status[cnt] == nonexisting)
1846 continue;
1848 buflen =
1849 ((char *) __mempcpy (__mempcpy (edp, capstr[cnt].str,
1850 capstr[cnt].len),
1851 name, namelen)
1852 - buf);
1854 /* Print name we try if this is wanted. */
1855 if (__builtin_expect (GLRO(dl_debug_mask) & DL_DEBUG_LIBS, 0))
1856 _dl_debug_printf (" trying file=%s\n", buf);
1858 fd = open_verify (buf, fbp, loader, whatcode, found_other_class,
1859 false);
1860 if (this_dir->status[cnt] == unknown)
1862 if (fd != -1)
1863 this_dir->status[cnt] = existing;
1864 /* Do not update the directory information when loading
1865 auditing code. We must try to disturb the program as
1866 little as possible. */
1867 else if (loader == NULL
1868 || GL(dl_ns)[loader->l_ns]._ns_loaded->l_auditing == 0)
1870 /* We failed to open machine dependent library. Let's
1871 test whether there is any directory at all. */
1872 struct stat64 st;
1874 buf[buflen - namelen - 1] = '\0';
1876 if (__xstat64 (_STAT_VER, buf, &st) != 0
1877 || ! S_ISDIR (st.st_mode))
1878 /* The directory does not exist or it is no directory. */
1879 this_dir->status[cnt] = nonexisting;
1880 else
1881 this_dir->status[cnt] = existing;
1885 /* Remember whether we found any existing directory. */
1886 here_any |= this_dir->status[cnt] != nonexisting;
1888 if (fd != -1 && __builtin_expect (preloaded, 0)
1889 && INTUSE(__libc_enable_secure))
1891 /* This is an extra security effort to make sure nobody can
1892 preload broken shared objects which are in the trusted
1893 directories and so exploit the bugs. */
1894 struct stat64 st;
1896 if (__fxstat64 (_STAT_VER, fd, &st) != 0
1897 || (st.st_mode & S_ISUID) == 0)
1899 /* The shared object cannot be tested for being SUID
1900 or this bit is not set. In this case we must not
1901 use this object. */
1902 __close (fd);
1903 fd = -1;
1904 /* We simply ignore the file, signal this by setting
1905 the error value which would have been set by `open'. */
1906 errno = ENOENT;
1911 if (fd != -1)
1913 *realname = (char *) malloc (buflen);
1914 if (*realname != NULL)
1916 memcpy (*realname, buf, buflen);
1917 return fd;
1919 else
1921 /* No memory for the name, we certainly won't be able
1922 to load and link it. */
1923 __close (fd);
1924 return -1;
1927 if (here_any && (err = errno) != ENOENT && err != EACCES)
1928 /* The file exists and is readable, but something went wrong. */
1929 return -1;
1931 /* Remember whether we found anything. */
1932 any |= here_any;
1934 while (*++dirs != NULL);
1936 /* Remove the whole path if none of the directories exists. */
1937 if (__builtin_expect (! any, 0))
1939 /* Paths which were allocated using the minimal malloc() in ld.so
1940 must not be freed using the general free() in libc. */
1941 if (sps->malloced)
1942 free (sps->dirs);
1944 /* rtld_search_dirs is attribute_relro, therefore avoid writing
1945 into it. */
1946 if (sps != &rtld_search_dirs)
1947 sps->dirs = (void *) -1;
1950 return -1;
1953 /* Map in the shared object file NAME. */
1955 struct link_map *
1956 internal_function
1957 _dl_map_object (struct link_map *loader, const char *name, int preloaded,
1958 int type, int trace_mode, int mode, Lmid_t nsid)
1960 int fd;
1961 char *realname;
1962 char *name_copy;
1963 struct link_map *l;
1964 struct filebuf fb;
1966 assert (nsid >= 0);
1967 assert (nsid < DL_NNS);
1969 /* Look for this name among those already loaded. */
1970 for (l = GL(dl_ns)[nsid]._ns_loaded; l; l = l->l_next)
1972 /* If the requested name matches the soname of a loaded object,
1973 use that object. Elide this check for names that have not
1974 yet been opened. */
1975 if (__builtin_expect (l->l_faked, 0) != 0
1976 || __builtin_expect (l->l_removed, 0) != 0)
1977 continue;
1978 if (!_dl_name_match_p (name, l))
1980 const char *soname;
1982 if (__builtin_expect (l->l_soname_added, 1)
1983 || l->l_info[DT_SONAME] == NULL)
1984 continue;
1986 soname = ((const char *) D_PTR (l, l_info[DT_STRTAB])
1987 + l->l_info[DT_SONAME]->d_un.d_val);
1988 if (strcmp (name, soname) != 0)
1989 continue;
1991 /* We have a match on a new name -- cache it. */
1992 add_name_to_object (l, soname);
1993 l->l_soname_added = 1;
1996 /* We have a match. */
1997 return l;
2000 /* Display information if we are debugging. */
2001 if (__builtin_expect (GLRO(dl_debug_mask) & DL_DEBUG_FILES, 0)
2002 && loader != NULL)
2003 _dl_debug_printf ("\nfile=%s [%lu]; needed by %s [%lu]\n", name, nsid,
2004 loader->l_name[0]
2005 ? loader->l_name : rtld_progname, loader->l_ns);
2007 #ifdef SHARED
2008 /* Give the auditing libraries a chance to change the name before we
2009 try anything. */
2010 if (__builtin_expect (GLRO(dl_naudit) > 0, 0)
2011 && (loader == NULL || loader->l_auditing == 0))
2013 struct audit_ifaces *afct = GLRO(dl_audit);
2014 for (unsigned int cnt = 0; cnt < GLRO(dl_naudit); ++cnt)
2016 if (afct->objsearch != NULL)
2018 name = afct->objsearch (name, &loader->l_audit[cnt].cookie,
2019 LA_SER_ORIG);
2020 if (name == NULL)
2022 /* Do not try anything further. */
2023 fd = -1;
2024 goto no_file;
2028 afct = afct->next;
2031 #endif
2033 /* Will be true if we found a DSO which is of the other ELF class. */
2034 bool found_other_class = false;
2036 if (strchr (name, '/') == NULL)
2038 /* Search for NAME in several places. */
2040 size_t namelen = strlen (name) + 1;
2042 if (__builtin_expect (GLRO(dl_debug_mask) & DL_DEBUG_LIBS, 0))
2043 _dl_debug_printf ("find library=%s [%lu]; searching\n", name, nsid);
2045 fd = -1;
2047 /* When the object has the RUNPATH information we don't use any
2048 RPATHs. */
2049 if (loader == NULL || loader->l_info[DT_RUNPATH] == NULL)
2051 /* This is the executable's map (if there is one). Make sure that
2052 we do not look at it twice. */
2053 struct link_map *main_map = GL(dl_ns)[LM_ID_BASE]._ns_loaded;
2054 bool did_main_map = false;
2056 /* First try the DT_RPATH of the dependent object that caused NAME
2057 to be loaded. Then that object's dependent, and on up. */
2058 for (l = loader; l; l = l->l_loader)
2059 if (cache_rpath (l, &l->l_rpath_dirs, DT_RPATH, "RPATH"))
2061 fd = open_path (name, namelen, preloaded, &l->l_rpath_dirs,
2062 &realname, &fb, loader, LA_SER_RUNPATH,
2063 &found_other_class);
2064 if (fd != -1)
2065 break;
2067 did_main_map |= l == main_map;
2070 /* If dynamically linked, try the DT_RPATH of the executable
2071 itself. NB: we do this for lookups in any namespace. */
2072 if (fd == -1 && !did_main_map
2073 && main_map != NULL && main_map->l_type != lt_loaded
2074 && cache_rpath (main_map, &main_map->l_rpath_dirs, DT_RPATH,
2075 "RPATH"))
2076 fd = open_path (name, namelen, preloaded, &main_map->l_rpath_dirs,
2077 &realname, &fb, loader ?: main_map, LA_SER_RUNPATH,
2078 &found_other_class);
2081 /* Try the LD_LIBRARY_PATH environment variable. */
2082 if (fd == -1 && env_path_list.dirs != (void *) -1)
2083 fd = open_path (name, namelen, preloaded, &env_path_list,
2084 &realname, &fb,
2085 loader ?: GL(dl_ns)[LM_ID_BASE]._ns_loaded,
2086 LA_SER_LIBPATH, &found_other_class);
2088 /* Look at the RUNPATH information for this binary. */
2089 if (fd == -1 && loader != NULL
2090 && cache_rpath (loader, &loader->l_runpath_dirs,
2091 DT_RUNPATH, "RUNPATH"))
2092 fd = open_path (name, namelen, preloaded,
2093 &loader->l_runpath_dirs, &realname, &fb, loader,
2094 LA_SER_RUNPATH, &found_other_class);
2096 if (fd == -1
2097 && (__builtin_expect (! preloaded, 1)
2098 || ! INTUSE(__libc_enable_secure)))
2100 /* Check the list of libraries in the file /etc/ld.so.cache,
2101 for compatibility with Linux's ldconfig program. */
2102 const char *cached = _dl_load_cache_lookup (name);
2104 if (cached != NULL)
2106 #ifdef SHARED
2107 // XXX Correct to unconditionally default to namespace 0?
2108 l = loader ?: GL(dl_ns)[LM_ID_BASE]._ns_loaded;
2109 #else
2110 l = loader;
2111 #endif
2113 /* If the loader has the DF_1_NODEFLIB flag set we must not
2114 use a cache entry from any of these directories. */
2115 if (
2116 #ifndef SHARED
2117 /* 'l' is always != NULL for dynamically linked objects. */
2118 l != NULL &&
2119 #endif
2120 __builtin_expect (l->l_flags_1 & DF_1_NODEFLIB, 0))
2122 const char *dirp = system_dirs;
2123 unsigned int cnt = 0;
2127 if (memcmp (cached, dirp, system_dirs_len[cnt]) == 0)
2129 /* The prefix matches. Don't use the entry. */
2130 cached = NULL;
2131 break;
2134 dirp += system_dirs_len[cnt] + 1;
2135 ++cnt;
2137 while (cnt < nsystem_dirs_len);
2140 if (cached != NULL)
2142 fd = open_verify (cached,
2143 &fb, loader ?: GL(dl_ns)[nsid]._ns_loaded,
2144 LA_SER_CONFIG, &found_other_class, false);
2145 if (__builtin_expect (fd != -1, 1))
2147 realname = local_strdup (cached);
2148 if (realname == NULL)
2150 __close (fd);
2151 fd = -1;
2158 /* Finally, try the default path. */
2159 if (fd == -1
2160 && ((l = loader ?: GL(dl_ns)[nsid]._ns_loaded) == NULL
2161 || __builtin_expect (!(l->l_flags_1 & DF_1_NODEFLIB), 1))
2162 && rtld_search_dirs.dirs != (void *) -1)
2163 fd = open_path (name, namelen, preloaded, &rtld_search_dirs,
2164 &realname, &fb, l, LA_SER_DEFAULT, &found_other_class);
2166 /* Add another newline when we are tracing the library loading. */
2167 if (__builtin_expect (GLRO(dl_debug_mask) & DL_DEBUG_LIBS, 0))
2168 _dl_debug_printf ("\n");
2170 else
2172 /* The path may contain dynamic string tokens. */
2173 realname = (loader
2174 ? expand_dynamic_string_token (loader, name)
2175 : local_strdup (name));
2176 if (realname == NULL)
2177 fd = -1;
2178 else
2180 fd = open_verify (realname, &fb,
2181 loader ?: GL(dl_ns)[nsid]._ns_loaded, 0,
2182 &found_other_class, true);
2183 if (__builtin_expect (fd, 0) == -1)
2184 free (realname);
2188 #ifdef SHARED
2189 no_file:
2190 #endif
2191 /* In case the LOADER information has only been provided to get to
2192 the appropriate RUNPATH/RPATH information we do not need it
2193 anymore. */
2194 if (mode & __RTLD_CALLMAP)
2195 loader = NULL;
2197 if (__builtin_expect (fd, 0) == -1)
2199 if (trace_mode
2200 && __builtin_expect (GLRO(dl_debug_mask) & DL_DEBUG_PRELINK, 0) == 0)
2202 /* We haven't found an appropriate library. But since we
2203 are only interested in the list of libraries this isn't
2204 so severe. Fake an entry with all the information we
2205 have. */
2206 static const Elf_Symndx dummy_bucket = STN_UNDEF;
2208 /* Enter the new object in the list of loaded objects. */
2209 if ((name_copy = local_strdup (name)) == NULL
2210 || (l = _dl_new_object (name_copy, name, type, loader,
2211 mode, nsid)) == NULL)
2213 free (name_copy);
2214 _dl_signal_error (ENOMEM, name, NULL,
2215 N_("cannot create shared object descriptor"));
2217 /* Signal that this is a faked entry. */
2218 l->l_faked = 1;
2219 /* Since the descriptor is initialized with zero we do not
2220 have do this here.
2221 l->l_reserved = 0; */
2222 l->l_buckets = &dummy_bucket;
2223 l->l_nbuckets = 1;
2224 l->l_relocated = 1;
2226 return l;
2228 else if (found_other_class)
2229 _dl_signal_error (0, name, NULL,
2230 ELFW(CLASS) == ELFCLASS32
2231 ? N_("wrong ELF class: ELFCLASS64")
2232 : N_("wrong ELF class: ELFCLASS32"));
2233 else
2234 _dl_signal_error (errno, name, NULL,
2235 N_("cannot open shared object file"));
2238 void *stack_end = __libc_stack_end;
2239 return _dl_map_object_from_fd (name, fd, &fb, realname, loader, type, mode,
2240 &stack_end, nsid);
2244 void
2245 internal_function
2246 _dl_rtld_di_serinfo (struct link_map *loader, Dl_serinfo *si, bool counting)
2248 if (counting)
2250 si->dls_cnt = 0;
2251 si->dls_size = 0;
2254 unsigned int idx = 0;
2255 char *allocptr = (char *) &si->dls_serpath[si->dls_cnt];
2256 void add_path (const struct r_search_path_struct *sps, unsigned int flags)
2257 # define add_path(sps, flags) add_path(sps, 0) /* XXX */
2259 if (sps->dirs != (void *) -1)
2261 struct r_search_path_elem **dirs = sps->dirs;
2264 const struct r_search_path_elem *const r = *dirs++;
2265 if (counting)
2267 si->dls_cnt++;
2268 si->dls_size += MAX (2, r->dirnamelen);
2270 else
2272 Dl_serpath *const sp = &si->dls_serpath[idx++];
2273 sp->dls_name = allocptr;
2274 if (r->dirnamelen < 2)
2275 *allocptr++ = r->dirnamelen ? '/' : '.';
2276 else
2277 allocptr = __mempcpy (allocptr,
2278 r->dirname, r->dirnamelen - 1);
2279 *allocptr++ = '\0';
2280 sp->dls_flags = flags;
2283 while (*dirs != NULL);
2287 /* When the object has the RUNPATH information we don't use any RPATHs. */
2288 if (loader->l_info[DT_RUNPATH] == NULL)
2290 /* First try the DT_RPATH of the dependent object that caused NAME
2291 to be loaded. Then that object's dependent, and on up. */
2293 struct link_map *l = loader;
2296 if (cache_rpath (l, &l->l_rpath_dirs, DT_RPATH, "RPATH"))
2297 add_path (&l->l_rpath_dirs, XXX_RPATH);
2298 l = l->l_loader;
2300 while (l != NULL);
2302 /* If dynamically linked, try the DT_RPATH of the executable itself. */
2303 if (loader->l_ns == LM_ID_BASE)
2305 l = GL(dl_ns)[LM_ID_BASE]._ns_loaded;
2306 if (l != NULL && l->l_type != lt_loaded && l != loader)
2307 if (cache_rpath (l, &l->l_rpath_dirs, DT_RPATH, "RPATH"))
2308 add_path (&l->l_rpath_dirs, XXX_RPATH);
2312 /* Try the LD_LIBRARY_PATH environment variable. */
2313 add_path (&env_path_list, XXX_ENV);
2315 /* Look at the RUNPATH information for this binary. */
2316 if (cache_rpath (loader, &loader->l_runpath_dirs, DT_RUNPATH, "RUNPATH"))
2317 add_path (&loader->l_runpath_dirs, XXX_RUNPATH);
2319 /* XXX
2320 Here is where ld.so.cache gets checked, but we don't have
2321 a way to indicate that in the results for Dl_serinfo. */
2323 /* Finally, try the default path. */
2324 if (!(loader->l_flags_1 & DF_1_NODEFLIB))
2325 add_path (&rtld_search_dirs, XXX_default);
2327 if (counting)
2328 /* Count the struct size before the string area, which we didn't
2329 know before we completed dls_cnt. */
2330 si->dls_size += (char *) &si->dls_serpath[si->dls_cnt] - (char *) si;