Incorrect x86 CPU family and model check.
[glibc.git] / elf / dl-load.c
blob0adddf5aaa2008c5c2ee91fd4b96489605581baa
1 /* Map in a shared object's segments from the file.
2 Copyright (C) 1995-2005, 2006, 2007, 2009, 2010 Free Software Foundation, Inc.
3 This file is part of the GNU C Library.
5 The GNU C Library is free software; you can redistribute it and/or
6 modify it under the terms of the GNU Lesser General Public
7 License as published by the Free Software Foundation; either
8 version 2.1 of the License, or (at your option) any later version.
10 The GNU C Library is distributed in the hope that it will be useful,
11 but WITHOUT ANY WARRANTY; without even the implied warranty of
12 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 Lesser General Public License for more details.
15 You should have received a copy of the GNU Lesser General Public
16 License along with the GNU C Library; if not, write to the Free
17 Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
18 02111-1307 USA. */
20 #include <elf.h>
21 #include <errno.h>
22 #include <fcntl.h>
23 #include <libintl.h>
24 #include <stdbool.h>
25 #include <stdlib.h>
26 #include <string.h>
27 #include <unistd.h>
28 #include <ldsodefs.h>
29 #include <bits/wordsize.h>
30 #include <sys/mman.h>
31 #include <sys/param.h>
32 #include <sys/stat.h>
33 #include <sys/types.h>
34 #include "dynamic-link.h"
35 #include <abi-tag.h>
36 #include <stackinfo.h>
37 #include <caller.h>
38 #include <sysdep.h>
40 #include <dl-dst.h>
42 /* On some systems, no flag bits are given to specify file mapping. */
43 #ifndef MAP_FILE
44 # define MAP_FILE 0
45 #endif
47 /* The right way to map in the shared library files is MAP_COPY, which
48 makes a virtual copy of the data at the time of the mmap call; this
49 guarantees the mapped pages will be consistent even if the file is
50 overwritten. Some losing VM systems like Linux's lack MAP_COPY. All we
51 get is MAP_PRIVATE, which copies each page when it is modified; this
52 means if the file is overwritten, we may at some point get some pages
53 from the new version after starting with pages from the old version.
55 To make up for the lack and avoid the overwriting problem,
56 what Linux does have is MAP_DENYWRITE. This prevents anyone
57 from modifying the file while we have it mapped. */
58 #ifndef MAP_COPY
59 # ifdef MAP_DENYWRITE
60 # define MAP_COPY (MAP_PRIVATE | MAP_DENYWRITE)
61 # else
62 # define MAP_COPY MAP_PRIVATE
63 # endif
64 #endif
66 /* Some systems link their relocatable objects for another base address
67 than 0. We want to know the base address for these such that we can
68 subtract this address from the segment addresses during mapping.
69 This results in a more efficient address space usage. Defaults to
70 zero for almost all systems. */
71 #ifndef MAP_BASE_ADDR
72 # define MAP_BASE_ADDR(l) 0
73 #endif
76 #include <endian.h>
77 #if BYTE_ORDER == BIG_ENDIAN
78 # define byteorder ELFDATA2MSB
79 #elif BYTE_ORDER == LITTLE_ENDIAN
80 # define byteorder ELFDATA2LSB
81 #else
82 # error "Unknown BYTE_ORDER " BYTE_ORDER
83 # define byteorder ELFDATANONE
84 #endif
86 #define STRING(x) __STRING (x)
88 /* Handle situations where we have a preferred location in memory for
89 the shared objects. */
90 #ifdef ELF_PREFERRED_ADDRESS_DATA
91 ELF_PREFERRED_ADDRESS_DATA;
92 #endif
93 #ifndef ELF_PREFERRED_ADDRESS
94 # define ELF_PREFERRED_ADDRESS(loader, maplength, mapstartpref) (mapstartpref)
95 #endif
96 #ifndef ELF_FIXED_ADDRESS
97 # define ELF_FIXED_ADDRESS(loader, mapstart) ((void) 0)
98 #endif
101 int __stack_prot attribute_hidden attribute_relro
102 #if _STACK_GROWS_DOWN && defined PROT_GROWSDOWN
103 = PROT_GROWSDOWN;
104 #elif _STACK_GROWS_UP && defined PROT_GROWSUP
105 = PROT_GROWSUP;
106 #else
107 = 0;
108 #endif
111 /* Type for the buffer we put the ELF header and hopefully the program
112 header. This buffer does not really have to be too large. In most
113 cases the program header follows the ELF header directly. If this
114 is not the case all bets are off and we can make the header
115 arbitrarily large and still won't get it read. This means the only
116 question is how large are the ELF and program header combined. The
117 ELF header 32-bit files is 52 bytes long and in 64-bit files is 64
118 bytes long. Each program header entry is again 32 and 56 bytes
119 long respectively. I.e., even with a file which has 10 program
120 header entries we only have to read 372B/624B respectively. Add to
121 this a bit of margin for program notes and reading 512B and 832B
122 for 32-bit and 64-bit files respecitvely is enough. If this
123 heuristic should really fail for some file the code in
124 `_dl_map_object_from_fd' knows how to recover. */
125 struct filebuf
127 ssize_t len;
128 #if __WORDSIZE == 32
129 # define FILEBUF_SIZE 512
130 #else
131 # define FILEBUF_SIZE 832
132 #endif
133 char buf[FILEBUF_SIZE] __attribute__ ((aligned (__alignof (ElfW(Ehdr)))));
136 /* This is the decomposed LD_LIBRARY_PATH search path. */
137 static struct r_search_path_struct env_path_list attribute_relro;
139 /* List of the hardware capabilities we might end up using. */
140 static const struct r_strlenpair *capstr attribute_relro;
141 static size_t ncapstr attribute_relro;
142 static size_t max_capstrlen attribute_relro;
145 /* Get the generated information about the trusted directories. */
146 #include "trusted-dirs.h"
148 static const char system_dirs[] = SYSTEM_DIRS;
149 static const size_t system_dirs_len[] =
151 SYSTEM_DIRS_LEN
153 #define nsystem_dirs_len \
154 (sizeof (system_dirs_len) / sizeof (system_dirs_len[0]))
157 /* Local version of `strdup' function. */
158 static char *
159 local_strdup (const char *s)
161 size_t len = strlen (s) + 1;
162 void *new = malloc (len);
164 if (new == NULL)
165 return NULL;
167 return (char *) memcpy (new, s, len);
171 static size_t
172 is_dst (const char *start, const char *name, const char *str,
173 int is_path, int secure)
175 size_t len;
176 bool is_curly = false;
178 if (name[0] == '{')
180 is_curly = true;
181 ++name;
184 len = 0;
185 while (name[len] == str[len] && name[len] != '\0')
186 ++len;
188 if (is_curly)
190 if (name[len] != '}')
191 return 0;
193 /* Point again at the beginning of the name. */
194 --name;
195 /* Skip over closing curly brace and adjust for the --name. */
196 len += 2;
198 else if (name[len] != '\0' && name[len] != '/'
199 && (!is_path || name[len] != ':'))
200 return 0;
202 if (__builtin_expect (secure, 0)
203 && ((name[len] != '\0' && (!is_path || name[len] != ':'))
204 || (name != start + 1 && (!is_path || name[-2] != ':'))))
205 return 0;
207 return len;
211 size_t
212 _dl_dst_count (const char *name, int is_path)
214 const char *const start = name;
215 size_t cnt = 0;
219 size_t len;
221 /* $ORIGIN is not expanded for SUID/GUID programs (except if it
222 is $ORIGIN alone) and it must always appear first in path. */
223 ++name;
224 if ((len = is_dst (start, name, "ORIGIN", is_path,
225 INTUSE(__libc_enable_secure))) != 0
226 || (len = is_dst (start, name, "PLATFORM", is_path, 0)) != 0
227 || (len = is_dst (start, name, "LIB", is_path, 0)) != 0)
228 ++cnt;
230 name = strchr (name + len, '$');
232 while (name != NULL);
234 return cnt;
238 char *
239 _dl_dst_substitute (struct link_map *l, const char *name, char *result,
240 int is_path)
242 const char *const start = name;
243 char *last_elem, *wp;
245 /* Now fill the result path. While copying over the string we keep
246 track of the start of the last path element. When we come accross
247 a DST we copy over the value or (if the value is not available)
248 leave the entire path element out. */
249 last_elem = wp = result;
253 if (__builtin_expect (*name == '$', 0))
255 const char *repl = NULL;
256 size_t len;
258 ++name;
259 if ((len = is_dst (start, name, "ORIGIN", is_path,
260 INTUSE(__libc_enable_secure))) != 0)
262 #ifndef SHARED
263 if (l == NULL)
264 repl = _dl_get_origin ();
265 else
266 #endif
267 repl = l->l_origin;
269 else if ((len = is_dst (start, name, "PLATFORM", is_path, 0)) != 0)
270 repl = GLRO(dl_platform);
271 else if ((len = is_dst (start, name, "LIB", is_path, 0)) != 0)
272 repl = DL_DST_LIB;
274 if (repl != NULL && repl != (const char *) -1)
276 wp = __stpcpy (wp, repl);
277 name += len;
279 else if (len > 1)
281 /* We cannot use this path element, the value of the
282 replacement is unknown. */
283 wp = last_elem;
284 name += len;
285 while (*name != '\0' && (!is_path || *name != ':'))
286 ++name;
288 else
289 /* No DST we recognize. */
290 *wp++ = '$';
292 else
294 *wp++ = *name++;
295 if (is_path && *name == ':')
296 last_elem = wp;
299 while (*name != '\0');
301 *wp = '\0';
303 return result;
307 /* Return copy of argument with all recognized dynamic string tokens
308 ($ORIGIN and $PLATFORM for now) replaced. On some platforms it
309 might not be possible to determine the path from which the object
310 belonging to the map is loaded. In this case the path element
311 containing $ORIGIN is left out. */
312 static char *
313 expand_dynamic_string_token (struct link_map *l, const char *s)
315 /* We make two runs over the string. First we determine how large the
316 resulting string is and then we copy it over. Since this is no
317 frequently executed operation we are looking here not for performance
318 but rather for code size. */
319 size_t cnt;
320 size_t total;
321 char *result;
323 /* Determine the number of DST elements. */
324 cnt = DL_DST_COUNT (s, 1);
326 /* If we do not have to replace anything simply copy the string. */
327 if (__builtin_expect (cnt, 0) == 0)
328 return local_strdup (s);
330 /* Determine the length of the substituted string. */
331 total = DL_DST_REQUIRED (l, s, strlen (s), cnt);
333 /* Allocate the necessary memory. */
334 result = (char *) malloc (total + 1);
335 if (result == NULL)
336 return NULL;
338 return _dl_dst_substitute (l, s, result, 1);
342 /* Add `name' to the list of names for a particular shared object.
343 `name' is expected to have been allocated with malloc and will
344 be freed if the shared object already has this name.
345 Returns false if the object already had this name. */
346 static void
347 internal_function
348 add_name_to_object (struct link_map *l, const char *name)
350 struct libname_list *lnp, *lastp;
351 struct libname_list *newname;
352 size_t name_len;
354 lastp = NULL;
355 for (lnp = l->l_libname; lnp != NULL; lastp = lnp, lnp = lnp->next)
356 if (strcmp (name, lnp->name) == 0)
357 return;
359 name_len = strlen (name) + 1;
360 newname = (struct libname_list *) malloc (sizeof *newname + name_len);
361 if (newname == NULL)
363 /* No more memory. */
364 _dl_signal_error (ENOMEM, name, NULL, N_("cannot allocate name record"));
365 return;
367 /* The object should have a libname set from _dl_new_object. */
368 assert (lastp != NULL);
370 newname->name = memcpy (newname + 1, name, name_len);
371 newname->next = NULL;
372 newname->dont_free = 0;
373 lastp->next = newname;
376 /* Standard search directories. */
377 static struct r_search_path_struct rtld_search_dirs attribute_relro;
379 static size_t max_dirnamelen;
381 static struct r_search_path_elem **
382 fillin_rpath (char *rpath, struct r_search_path_elem **result, const char *sep,
383 int check_trusted, const char *what, const char *where)
385 char *cp;
386 size_t nelems = 0;
388 while ((cp = __strsep (&rpath, sep)) != NULL)
390 struct r_search_path_elem *dirp;
391 size_t len = strlen (cp);
393 /* `strsep' can pass an empty string. This has to be
394 interpreted as `use the current directory'. */
395 if (len == 0)
397 static const char curwd[] = "./";
398 cp = (char *) curwd;
401 /* Remove trailing slashes (except for "/"). */
402 while (len > 1 && cp[len - 1] == '/')
403 --len;
405 /* Now add one if there is none so far. */
406 if (len > 0 && cp[len - 1] != '/')
407 cp[len++] = '/';
409 /* Make sure we don't use untrusted directories if we run SUID. */
410 if (__builtin_expect (check_trusted, 0))
412 const char *trun = system_dirs;
413 size_t idx;
414 int unsecure = 1;
416 /* All trusted directories must be complete names. */
417 if (cp[0] == '/')
419 for (idx = 0; idx < nsystem_dirs_len; ++idx)
421 if (len == system_dirs_len[idx]
422 && memcmp (trun, cp, len) == 0)
424 /* Found it. */
425 unsecure = 0;
426 break;
429 trun += system_dirs_len[idx] + 1;
433 if (unsecure)
434 /* Simply drop this directory. */
435 continue;
438 /* See if this directory is already known. */
439 for (dirp = GL(dl_all_dirs); dirp != NULL; dirp = dirp->next)
440 if (dirp->dirnamelen == len && memcmp (cp, dirp->dirname, len) == 0)
441 break;
443 if (dirp != NULL)
445 /* It is available, see whether it's on our own list. */
446 size_t cnt;
447 for (cnt = 0; cnt < nelems; ++cnt)
448 if (result[cnt] == dirp)
449 break;
451 if (cnt == nelems)
452 result[nelems++] = dirp;
454 else
456 size_t cnt;
457 enum r_dir_status init_val;
458 size_t where_len = where ? strlen (where) + 1 : 0;
460 /* It's a new directory. Create an entry and add it. */
461 dirp = (struct r_search_path_elem *)
462 malloc (sizeof (*dirp) + ncapstr * sizeof (enum r_dir_status)
463 + where_len + len + 1);
464 if (dirp == NULL)
465 _dl_signal_error (ENOMEM, NULL, NULL,
466 N_("cannot create cache for search path"));
468 dirp->dirname = ((char *) dirp + sizeof (*dirp)
469 + ncapstr * sizeof (enum r_dir_status));
470 *((char *) __mempcpy ((char *) dirp->dirname, cp, len)) = '\0';
471 dirp->dirnamelen = len;
473 if (len > max_dirnamelen)
474 max_dirnamelen = len;
476 /* We have to make sure all the relative directories are
477 never ignored. The current directory might change and
478 all our saved information would be void. */
479 init_val = cp[0] != '/' ? existing : unknown;
480 for (cnt = 0; cnt < ncapstr; ++cnt)
481 dirp->status[cnt] = init_val;
483 dirp->what = what;
484 if (__builtin_expect (where != NULL, 1))
485 dirp->where = memcpy ((char *) dirp + sizeof (*dirp) + len + 1
486 + (ncapstr * sizeof (enum r_dir_status)),
487 where, where_len);
488 else
489 dirp->where = NULL;
491 dirp->next = GL(dl_all_dirs);
492 GL(dl_all_dirs) = dirp;
494 /* Put it in the result array. */
495 result[nelems++] = dirp;
499 /* Terminate the array. */
500 result[nelems] = NULL;
502 return result;
506 static bool
507 internal_function
508 decompose_rpath (struct r_search_path_struct *sps,
509 const char *rpath, struct link_map *l, const char *what)
511 /* Make a copy we can work with. */
512 const char *where = l->l_name;
513 char *copy;
514 char *cp;
515 struct r_search_path_elem **result;
516 size_t nelems;
517 /* Initialize to please the compiler. */
518 const char *errstring = NULL;
520 /* First see whether we must forget the RUNPATH and RPATH from this
521 object. */
522 if (__builtin_expect (GLRO(dl_inhibit_rpath) != NULL, 0)
523 && !INTUSE(__libc_enable_secure))
525 const char *inhp = GLRO(dl_inhibit_rpath);
529 const char *wp = where;
531 while (*inhp == *wp && *wp != '\0')
533 ++inhp;
534 ++wp;
537 if (*wp == '\0' && (*inhp == '\0' || *inhp == ':'))
539 /* This object is on the list of objects for which the
540 RUNPATH and RPATH must not be used. */
541 sps->dirs = (void *) -1;
542 return false;
545 while (*inhp != '\0')
546 if (*inhp++ == ':')
547 break;
549 while (*inhp != '\0');
552 /* Make a writable copy. At the same time expand possible dynamic
553 string tokens. */
554 copy = expand_dynamic_string_token (l, rpath);
555 if (copy == NULL)
557 errstring = N_("cannot create RUNPATH/RPATH copy");
558 goto signal_error;
561 /* Count the number of necessary elements in the result array. */
562 nelems = 0;
563 for (cp = copy; *cp != '\0'; ++cp)
564 if (*cp == ':')
565 ++nelems;
567 /* Allocate room for the result. NELEMS + 1 is an upper limit for the
568 number of necessary entries. */
569 result = (struct r_search_path_elem **) malloc ((nelems + 1 + 1)
570 * sizeof (*result));
571 if (result == NULL)
573 free (copy);
574 errstring = N_("cannot create cache for search path");
575 signal_error:
576 _dl_signal_error (ENOMEM, NULL, NULL, errstring);
579 fillin_rpath (copy, result, ":", 0, what, where);
581 /* Free the copied RPATH string. `fillin_rpath' make own copies if
582 necessary. */
583 free (copy);
585 sps->dirs = result;
586 /* The caller will change this value if we haven't used a real malloc. */
587 sps->malloced = 1;
588 return true;
591 /* Make sure cached path information is stored in *SP
592 and return true if there are any paths to search there. */
593 static bool
594 cache_rpath (struct link_map *l,
595 struct r_search_path_struct *sp,
596 int tag,
597 const char *what)
599 if (sp->dirs == (void *) -1)
600 return false;
602 if (sp->dirs != NULL)
603 return true;
605 if (l->l_info[tag] == NULL)
607 /* There is no path. */
608 sp->dirs = (void *) -1;
609 return false;
612 /* Make sure the cache information is available. */
613 return decompose_rpath (sp, (const char *) (D_PTR (l, l_info[DT_STRTAB])
614 + l->l_info[tag]->d_un.d_val),
615 l, what);
619 void
620 internal_function
621 _dl_init_paths (const char *llp)
623 size_t idx;
624 const char *strp;
625 struct r_search_path_elem *pelem, **aelem;
626 size_t round_size;
627 #ifdef SHARED
628 struct link_map *l;
629 #endif
630 /* Initialize to please the compiler. */
631 const char *errstring = NULL;
633 /* Fill in the information about the application's RPATH and the
634 directories addressed by the LD_LIBRARY_PATH environment variable. */
636 /* Get the capabilities. */
637 capstr = _dl_important_hwcaps (GLRO(dl_platform), GLRO(dl_platformlen),
638 &ncapstr, &max_capstrlen);
640 /* First set up the rest of the default search directory entries. */
641 aelem = rtld_search_dirs.dirs = (struct r_search_path_elem **)
642 malloc ((nsystem_dirs_len + 1) * sizeof (struct r_search_path_elem *));
643 if (rtld_search_dirs.dirs == NULL)
645 errstring = N_("cannot create search path array");
646 signal_error:
647 _dl_signal_error (ENOMEM, NULL, NULL, errstring);
650 round_size = ((2 * sizeof (struct r_search_path_elem) - 1
651 + ncapstr * sizeof (enum r_dir_status))
652 / sizeof (struct r_search_path_elem));
654 rtld_search_dirs.dirs[0] = (struct r_search_path_elem *)
655 malloc ((sizeof (system_dirs) / sizeof (system_dirs[0]))
656 * round_size * sizeof (struct r_search_path_elem));
657 if (rtld_search_dirs.dirs[0] == NULL)
659 errstring = N_("cannot create cache for search path");
660 goto signal_error;
663 rtld_search_dirs.malloced = 0;
664 pelem = GL(dl_all_dirs) = rtld_search_dirs.dirs[0];
665 strp = system_dirs;
666 idx = 0;
670 size_t cnt;
672 *aelem++ = pelem;
674 pelem->what = "system search path";
675 pelem->where = NULL;
677 pelem->dirname = strp;
678 pelem->dirnamelen = system_dirs_len[idx];
679 strp += system_dirs_len[idx] + 1;
681 /* System paths must be absolute. */
682 assert (pelem->dirname[0] == '/');
683 for (cnt = 0; cnt < ncapstr; ++cnt)
684 pelem->status[cnt] = unknown;
686 pelem->next = (++idx == nsystem_dirs_len ? NULL : (pelem + round_size));
688 pelem += round_size;
690 while (idx < nsystem_dirs_len);
692 max_dirnamelen = SYSTEM_DIRS_MAX_LEN;
693 *aelem = NULL;
695 #ifdef SHARED
696 /* This points to the map of the main object. */
697 l = GL(dl_ns)[LM_ID_BASE]._ns_loaded;
698 if (l != NULL)
700 assert (l->l_type != lt_loaded);
702 if (l->l_info[DT_RUNPATH])
704 /* Allocate room for the search path and fill in information
705 from RUNPATH. */
706 decompose_rpath (&l->l_runpath_dirs,
707 (const void *) (D_PTR (l, l_info[DT_STRTAB])
708 + l->l_info[DT_RUNPATH]->d_un.d_val),
709 l, "RUNPATH");
711 /* The RPATH is ignored. */
712 l->l_rpath_dirs.dirs = (void *) -1;
714 else
716 l->l_runpath_dirs.dirs = (void *) -1;
718 if (l->l_info[DT_RPATH])
720 /* Allocate room for the search path and fill in information
721 from RPATH. */
722 decompose_rpath (&l->l_rpath_dirs,
723 (const void *) (D_PTR (l, l_info[DT_STRTAB])
724 + l->l_info[DT_RPATH]->d_un.d_val),
725 l, "RPATH");
726 l->l_rpath_dirs.malloced = 0;
728 else
729 l->l_rpath_dirs.dirs = (void *) -1;
732 #endif /* SHARED */
734 if (llp != NULL && *llp != '\0')
736 size_t nllp;
737 const char *cp = llp;
738 char *llp_tmp;
740 #ifdef SHARED
741 /* Expand DSTs. */
742 size_t cnt = DL_DST_COUNT (llp, 1);
743 if (__builtin_expect (cnt == 0, 1))
744 llp_tmp = strdupa (llp);
745 else
747 /* Determine the length of the substituted string. */
748 size_t total = DL_DST_REQUIRED (l, llp, strlen (llp), cnt);
750 /* Allocate the necessary memory. */
751 llp_tmp = (char *) alloca (total + 1);
752 llp_tmp = _dl_dst_substitute (l, llp, llp_tmp, 1);
754 #else
755 llp_tmp = strdupa (llp);
756 #endif
758 /* Decompose the LD_LIBRARY_PATH contents. First determine how many
759 elements it has. */
760 nllp = 1;
761 while (*cp)
763 if (*cp == ':' || *cp == ';')
764 ++nllp;
765 ++cp;
768 env_path_list.dirs = (struct r_search_path_elem **)
769 malloc ((nllp + 1) * sizeof (struct r_search_path_elem *));
770 if (env_path_list.dirs == NULL)
772 errstring = N_("cannot create cache for search path");
773 goto signal_error;
776 (void) fillin_rpath (llp_tmp, env_path_list.dirs, ":;",
777 INTUSE(__libc_enable_secure), "LD_LIBRARY_PATH",
778 NULL);
780 if (env_path_list.dirs[0] == NULL)
782 free (env_path_list.dirs);
783 env_path_list.dirs = (void *) -1;
786 env_path_list.malloced = 0;
788 else
789 env_path_list.dirs = (void *) -1;
791 /* Remember the last search directory added at startup. */
792 GLRO(dl_init_all_dirs) = GL(dl_all_dirs);
796 static void
797 __attribute__ ((noreturn, noinline))
798 lose (int code, int fd, const char *name, char *realname, struct link_map *l,
799 const char *msg, struct r_debug *r)
801 /* The file might already be closed. */
802 if (fd != -1)
803 (void) __close (fd);
804 if (l != NULL)
806 /* We modify the list of loaded objects. */
807 __rtld_lock_lock_recursive (GL(dl_load_write_lock));
808 /* Remove the stillborn object from the list and free it. */
809 assert (l->l_next == NULL);
810 if (l->l_prev == NULL)
811 /* No other module loaded. This happens only in the static library,
812 or in rtld under --verify. */
813 GL(dl_ns)[l->l_ns]._ns_loaded = NULL;
814 else
815 l->l_prev->l_next = NULL;
816 --GL(dl_ns)[l->l_ns]._ns_nloaded;
817 free (l);
818 __rtld_lock_unlock_recursive (GL(dl_load_write_lock));
820 free (realname);
822 if (r != NULL)
824 r->r_state = RT_CONSISTENT;
825 _dl_debug_state ();
828 _dl_signal_error (code, name, NULL, msg);
832 /* Map in the shared object NAME, actually located in REALNAME, and already
833 opened on FD. */
835 #ifndef EXTERNAL_MAP_FROM_FD
836 static
837 #endif
838 struct link_map *
839 _dl_map_object_from_fd (const char *name, int fd, struct filebuf *fbp,
840 char *realname, struct link_map *loader, int l_type,
841 int mode, void **stack_endp, Lmid_t nsid)
843 struct link_map *l = NULL;
844 const ElfW(Ehdr) *header;
845 const ElfW(Phdr) *phdr;
846 const ElfW(Phdr) *ph;
847 size_t maplength;
848 int type;
849 struct stat64 st;
850 /* Initialize to keep the compiler happy. */
851 const char *errstring = NULL;
852 int errval = 0;
853 struct r_debug *r = _dl_debug_initialize (0, nsid);
854 bool make_consistent = false;
856 /* Get file information. */
857 if (__builtin_expect (__fxstat64 (_STAT_VER, fd, &st) < 0, 0))
859 errstring = N_("cannot stat shared object");
860 call_lose_errno:
861 errval = errno;
862 call_lose:
863 lose (errval, fd, name, realname, l, errstring,
864 make_consistent ? r : NULL);
867 /* Look again to see if the real name matched another already loaded. */
868 for (l = GL(dl_ns)[nsid]._ns_loaded; l; l = l->l_next)
869 if (l->l_removed == 0 && l->l_ino == st.st_ino && l->l_dev == st.st_dev)
871 /* The object is already loaded.
872 Just bump its reference count and return it. */
873 __close (fd);
875 /* If the name is not in the list of names for this object add
876 it. */
877 free (realname);
878 add_name_to_object (l, name);
880 return l;
883 #ifdef SHARED
884 /* When loading into a namespace other than the base one we must
885 avoid loading ld.so since there can only be one copy. Ever. */
886 if (__builtin_expect (nsid != LM_ID_BASE, 0)
887 && ((st.st_ino == GL(dl_rtld_map).l_ino
888 && st.st_dev == GL(dl_rtld_map).l_dev)
889 || _dl_name_match_p (name, &GL(dl_rtld_map))))
891 /* This is indeed ld.so. Create a new link_map which refers to
892 the real one for almost everything. */
893 l = _dl_new_object (realname, name, l_type, loader, mode, nsid);
894 if (l == NULL)
895 goto fail_new;
897 /* Refer to the real descriptor. */
898 l->l_real = &GL(dl_rtld_map);
900 /* No need to bump the refcount of the real object, ld.so will
901 never be unloaded. */
902 __close (fd);
904 return l;
906 #endif
908 if (mode & RTLD_NOLOAD)
910 /* We are not supposed to load the object unless it is already
911 loaded. So return now. */
912 __close (fd);
913 return NULL;
916 /* Print debugging message. */
917 if (__builtin_expect (GLRO(dl_debug_mask) & DL_DEBUG_FILES, 0))
918 _dl_debug_printf ("file=%s [%lu]; generating link map\n", name, nsid);
920 /* This is the ELF header. We read it in `open_verify'. */
921 header = (void *) fbp->buf;
923 #ifndef MAP_ANON
924 # define MAP_ANON 0
925 if (_dl_zerofd == -1)
927 _dl_zerofd = _dl_sysdep_open_zero_fill ();
928 if (_dl_zerofd == -1)
930 __close (fd);
931 _dl_signal_error (errno, NULL, NULL,
932 N_("cannot open zero fill device"));
935 #endif
937 /* Signal that we are going to add new objects. */
938 if (r->r_state == RT_CONSISTENT)
940 #ifdef SHARED
941 /* Auditing checkpoint: we are going to add new objects. */
942 if ((mode & __RTLD_AUDIT) == 0
943 && __builtin_expect (GLRO(dl_naudit) > 0, 0))
945 struct link_map *head = GL(dl_ns)[nsid]._ns_loaded;
946 /* Do not call the functions for any auditing object. */
947 if (head->l_auditing == 0)
949 struct audit_ifaces *afct = GLRO(dl_audit);
950 for (unsigned int cnt = 0; cnt < GLRO(dl_naudit); ++cnt)
952 if (afct->activity != NULL)
953 afct->activity (&head->l_audit[cnt].cookie, LA_ACT_ADD);
955 afct = afct->next;
959 #endif
961 /* Notify the debugger we have added some objects. We need to
962 call _dl_debug_initialize in a static program in case dynamic
963 linking has not been used before. */
964 r->r_state = RT_ADD;
965 _dl_debug_state ();
966 make_consistent = true;
968 else
969 assert (r->r_state == RT_ADD);
971 /* Enter the new object in the list of loaded objects. */
972 l = _dl_new_object (realname, name, l_type, loader, mode, nsid);
973 if (__builtin_expect (l == NULL, 0))
975 #ifdef SHARED
976 fail_new:
977 #endif
978 errstring = N_("cannot create shared object descriptor");
979 goto call_lose_errno;
982 /* Extract the remaining details we need from the ELF header
983 and then read in the program header table. */
984 l->l_entry = header->e_entry;
985 type = header->e_type;
986 l->l_phnum = header->e_phnum;
988 maplength = header->e_phnum * sizeof (ElfW(Phdr));
989 if (header->e_phoff + maplength <= (size_t) fbp->len)
990 phdr = (void *) (fbp->buf + header->e_phoff);
991 else
993 phdr = alloca (maplength);
994 __lseek (fd, header->e_phoff, SEEK_SET);
995 if ((size_t) __libc_read (fd, (void *) phdr, maplength) != maplength)
997 errstring = N_("cannot read file data");
998 goto call_lose_errno;
1002 /* Presumed absent PT_GNU_STACK. */
1003 uint_fast16_t stack_flags = PF_R|PF_W|PF_X;
1006 /* Scan the program header table, collecting its load commands. */
1007 struct loadcmd
1009 ElfW(Addr) mapstart, mapend, dataend, allocend;
1010 off_t mapoff;
1011 int prot;
1012 } loadcmds[l->l_phnum], *c;
1013 size_t nloadcmds = 0;
1014 bool has_holes = false;
1016 /* The struct is initialized to zero so this is not necessary:
1017 l->l_ld = 0;
1018 l->l_phdr = 0;
1019 l->l_addr = 0; */
1020 for (ph = phdr; ph < &phdr[l->l_phnum]; ++ph)
1021 switch (ph->p_type)
1023 /* These entries tell us where to find things once the file's
1024 segments are mapped in. We record the addresses it says
1025 verbatim, and later correct for the run-time load address. */
1026 case PT_DYNAMIC:
1027 l->l_ld = (void *) ph->p_vaddr;
1028 l->l_ldnum = ph->p_memsz / sizeof (ElfW(Dyn));
1029 break;
1031 case PT_PHDR:
1032 l->l_phdr = (void *) ph->p_vaddr;
1033 break;
1035 case PT_LOAD:
1036 /* A load command tells us to map in part of the file.
1037 We record the load commands and process them all later. */
1038 if (__builtin_expect ((ph->p_align & (GLRO(dl_pagesize) - 1)) != 0,
1041 errstring = N_("ELF load command alignment not page-aligned");
1042 goto call_lose;
1044 if (__builtin_expect (((ph->p_vaddr - ph->p_offset)
1045 & (ph->p_align - 1)) != 0, 0))
1047 errstring
1048 = N_("ELF load command address/offset not properly aligned");
1049 goto call_lose;
1052 c = &loadcmds[nloadcmds++];
1053 c->mapstart = ph->p_vaddr & ~(GLRO(dl_pagesize) - 1);
1054 c->mapend = ((ph->p_vaddr + ph->p_filesz + GLRO(dl_pagesize) - 1)
1055 & ~(GLRO(dl_pagesize) - 1));
1056 c->dataend = ph->p_vaddr + ph->p_filesz;
1057 c->allocend = ph->p_vaddr + ph->p_memsz;
1058 c->mapoff = ph->p_offset & ~(GLRO(dl_pagesize) - 1);
1060 /* Determine whether there is a gap between the last segment
1061 and this one. */
1062 if (nloadcmds > 1 && c[-1].mapend != c->mapstart)
1063 has_holes = true;
1065 /* Optimize a common case. */
1066 #if (PF_R | PF_W | PF_X) == 7 && (PROT_READ | PROT_WRITE | PROT_EXEC) == 7
1067 c->prot = (PF_TO_PROT
1068 >> ((ph->p_flags & (PF_R | PF_W | PF_X)) * 4)) & 0xf;
1069 #else
1070 c->prot = 0;
1071 if (ph->p_flags & PF_R)
1072 c->prot |= PROT_READ;
1073 if (ph->p_flags & PF_W)
1074 c->prot |= PROT_WRITE;
1075 if (ph->p_flags & PF_X)
1076 c->prot |= PROT_EXEC;
1077 #endif
1078 break;
1080 case PT_TLS:
1081 if (ph->p_memsz == 0)
1082 /* Nothing to do for an empty segment. */
1083 break;
1085 l->l_tls_blocksize = ph->p_memsz;
1086 l->l_tls_align = ph->p_align;
1087 if (ph->p_align == 0)
1088 l->l_tls_firstbyte_offset = 0;
1089 else
1090 l->l_tls_firstbyte_offset = ph->p_vaddr & (ph->p_align - 1);
1091 l->l_tls_initimage_size = ph->p_filesz;
1092 /* Since we don't know the load address yet only store the
1093 offset. We will adjust it later. */
1094 l->l_tls_initimage = (void *) ph->p_vaddr;
1096 /* If not loading the initial set of shared libraries,
1097 check whether we should permit loading a TLS segment. */
1098 if (__builtin_expect (l->l_type == lt_library, 1)
1099 /* If GL(dl_tls_dtv_slotinfo_list) == NULL, then rtld.c did
1100 not set up TLS data structures, so don't use them now. */
1101 || __builtin_expect (GL(dl_tls_dtv_slotinfo_list) != NULL, 1))
1103 /* Assign the next available module ID. */
1104 l->l_tls_modid = _dl_next_tls_modid ();
1105 break;
1108 #ifdef SHARED
1109 if (l->l_prev == NULL || (mode & __RTLD_AUDIT) != 0)
1110 /* We are loading the executable itself when the dynamic linker
1111 was executed directly. The setup will happen later. */
1112 break;
1114 /* In a static binary there is no way to tell if we dynamically
1115 loaded libpthread. */
1116 if (GL(dl_error_catch_tsd) == &_dl_initial_error_catch_tsd)
1117 #endif
1119 /* We have not yet loaded libpthread.
1120 We can do the TLS setup right now! */
1122 void *tcb;
1124 /* The first call allocates TLS bookkeeping data structures.
1125 Then we allocate the TCB for the initial thread. */
1126 if (__builtin_expect (_dl_tls_setup (), 0)
1127 || __builtin_expect ((tcb = _dl_allocate_tls (NULL)) == NULL,
1130 errval = ENOMEM;
1131 errstring = N_("\
1132 cannot allocate TLS data structures for initial thread");
1133 goto call_lose;
1136 /* Now we install the TCB in the thread register. */
1137 errstring = TLS_INIT_TP (tcb, 0);
1138 if (__builtin_expect (errstring == NULL, 1))
1140 /* Now we are all good. */
1141 l->l_tls_modid = ++GL(dl_tls_max_dtv_idx);
1142 break;
1145 /* The kernel is too old or somesuch. */
1146 errval = 0;
1147 _dl_deallocate_tls (tcb, 1);
1148 goto call_lose;
1151 /* Uh-oh, the binary expects TLS support but we cannot
1152 provide it. */
1153 errval = 0;
1154 errstring = N_("cannot handle TLS data");
1155 goto call_lose;
1156 break;
1158 case PT_GNU_STACK:
1159 stack_flags = ph->p_flags;
1160 break;
1162 case PT_GNU_RELRO:
1163 l->l_relro_addr = ph->p_vaddr;
1164 l->l_relro_size = ph->p_memsz;
1165 break;
1168 if (__builtin_expect (nloadcmds == 0, 0))
1170 /* This only happens for a bogus object that will be caught with
1171 another error below. But we don't want to go through the
1172 calculations below using NLOADCMDS - 1. */
1173 errstring = N_("object file has no loadable segments");
1174 goto call_lose;
1177 /* Now process the load commands and map segments into memory. */
1178 c = loadcmds;
1180 /* Length of the sections to be loaded. */
1181 maplength = loadcmds[nloadcmds - 1].allocend - c->mapstart;
1183 if (__builtin_expect (type, ET_DYN) == ET_DYN)
1185 /* This is a position-independent shared object. We can let the
1186 kernel map it anywhere it likes, but we must have space for all
1187 the segments in their specified positions relative to the first.
1188 So we map the first segment without MAP_FIXED, but with its
1189 extent increased to cover all the segments. Then we remove
1190 access from excess portion, and there is known sufficient space
1191 there to remap from the later segments.
1193 As a refinement, sometimes we have an address that we would
1194 prefer to map such objects at; but this is only a preference,
1195 the OS can do whatever it likes. */
1196 ElfW(Addr) mappref;
1197 mappref = (ELF_PREFERRED_ADDRESS (loader, maplength,
1198 c->mapstart & GLRO(dl_use_load_bias))
1199 - MAP_BASE_ADDR (l));
1201 /* Remember which part of the address space this object uses. */
1202 l->l_map_start = (ElfW(Addr)) __mmap ((void *) mappref, maplength,
1203 c->prot,
1204 MAP_COPY|MAP_FILE,
1205 fd, c->mapoff);
1206 if (__builtin_expect ((void *) l->l_map_start == MAP_FAILED, 0))
1208 map_error:
1209 errstring = N_("failed to map segment from shared object");
1210 goto call_lose_errno;
1213 l->l_map_end = l->l_map_start + maplength;
1214 l->l_addr = l->l_map_start - c->mapstart;
1216 if (has_holes)
1217 /* Change protection on the excess portion to disallow all access;
1218 the portions we do not remap later will be inaccessible as if
1219 unallocated. Then jump into the normal segment-mapping loop to
1220 handle the portion of the segment past the end of the file
1221 mapping. */
1222 __mprotect ((caddr_t) (l->l_addr + c->mapend),
1223 loadcmds[nloadcmds - 1].mapstart - c->mapend,
1224 PROT_NONE);
1226 l->l_contiguous = 1;
1228 goto postmap;
1231 /* This object is loaded at a fixed address. This must never
1232 happen for objects loaded with dlopen(). */
1233 if (__builtin_expect ((mode & __RTLD_OPENEXEC) == 0, 0))
1235 errstring = N_("cannot dynamically load executable");
1236 goto call_lose;
1239 /* Notify ELF_PREFERRED_ADDRESS that we have to load this one
1240 fixed. */
1241 ELF_FIXED_ADDRESS (loader, c->mapstart);
1244 /* Remember which part of the address space this object uses. */
1245 l->l_map_start = c->mapstart + l->l_addr;
1246 l->l_map_end = l->l_map_start + maplength;
1247 l->l_contiguous = !has_holes;
1249 while (c < &loadcmds[nloadcmds])
1251 if (c->mapend > c->mapstart
1252 /* Map the segment contents from the file. */
1253 && (__mmap ((void *) (l->l_addr + c->mapstart),
1254 c->mapend - c->mapstart, c->prot,
1255 MAP_FIXED|MAP_COPY|MAP_FILE,
1256 fd, c->mapoff)
1257 == MAP_FAILED))
1258 goto map_error;
1260 postmap:
1261 if (c->prot & PROT_EXEC)
1262 l->l_text_end = l->l_addr + c->mapend;
1264 if (l->l_phdr == 0
1265 && (ElfW(Off)) c->mapoff <= header->e_phoff
1266 && ((size_t) (c->mapend - c->mapstart + c->mapoff)
1267 >= header->e_phoff + header->e_phnum * sizeof (ElfW(Phdr))))
1268 /* Found the program header in this segment. */
1269 l->l_phdr = (void *) (c->mapstart + header->e_phoff - c->mapoff);
1271 if (c->allocend > c->dataend)
1273 /* Extra zero pages should appear at the end of this segment,
1274 after the data mapped from the file. */
1275 ElfW(Addr) zero, zeroend, zeropage;
1277 zero = l->l_addr + c->dataend;
1278 zeroend = l->l_addr + c->allocend;
1279 zeropage = ((zero + GLRO(dl_pagesize) - 1)
1280 & ~(GLRO(dl_pagesize) - 1));
1282 if (zeroend < zeropage)
1283 /* All the extra data is in the last page of the segment.
1284 We can just zero it. */
1285 zeropage = zeroend;
1287 if (zeropage > zero)
1289 /* Zero the final part of the last page of the segment. */
1290 if (__builtin_expect ((c->prot & PROT_WRITE) == 0, 0))
1292 /* Dag nab it. */
1293 if (__mprotect ((caddr_t) (zero
1294 & ~(GLRO(dl_pagesize) - 1)),
1295 GLRO(dl_pagesize), c->prot|PROT_WRITE) < 0)
1297 errstring = N_("cannot change memory protections");
1298 goto call_lose_errno;
1301 memset ((void *) zero, '\0', zeropage - zero);
1302 if (__builtin_expect ((c->prot & PROT_WRITE) == 0, 0))
1303 __mprotect ((caddr_t) (zero & ~(GLRO(dl_pagesize) - 1)),
1304 GLRO(dl_pagesize), c->prot);
1307 if (zeroend > zeropage)
1309 /* Map the remaining zero pages in from the zero fill FD. */
1310 caddr_t mapat;
1311 mapat = __mmap ((caddr_t) zeropage, zeroend - zeropage,
1312 c->prot, MAP_ANON|MAP_PRIVATE|MAP_FIXED,
1313 -1, 0);
1314 if (__builtin_expect (mapat == MAP_FAILED, 0))
1316 errstring = N_("cannot map zero-fill pages");
1317 goto call_lose_errno;
1322 ++c;
1326 if (l->l_ld == 0)
1328 if (__builtin_expect (type == ET_DYN, 0))
1330 errstring = N_("object file has no dynamic section");
1331 goto call_lose;
1334 else
1335 l->l_ld = (ElfW(Dyn) *) ((ElfW(Addr)) l->l_ld + l->l_addr);
1337 elf_get_dynamic_info (l, NULL);
1339 /* Make sure we are not dlopen'ing an object that has the
1340 DF_1_NOOPEN flag set. */
1341 if (__builtin_expect (l->l_flags_1 & DF_1_NOOPEN, 0)
1342 && (mode & __RTLD_DLOPEN))
1344 /* We are not supposed to load this object. Free all resources. */
1345 __munmap ((void *) l->l_map_start, l->l_map_end - l->l_map_start);
1347 if (!l->l_libname->dont_free)
1348 free (l->l_libname);
1350 if (l->l_phdr_allocated)
1351 free ((void *) l->l_phdr);
1353 errstring = N_("shared object cannot be dlopen()ed");
1354 goto call_lose;
1357 if (l->l_phdr == NULL)
1359 /* The program header is not contained in any of the segments.
1360 We have to allocate memory ourself and copy it over from out
1361 temporary place. */
1362 ElfW(Phdr) *newp = (ElfW(Phdr) *) malloc (header->e_phnum
1363 * sizeof (ElfW(Phdr)));
1364 if (newp == NULL)
1366 errstring = N_("cannot allocate memory for program header");
1367 goto call_lose_errno;
1370 l->l_phdr = memcpy (newp, phdr,
1371 (header->e_phnum * sizeof (ElfW(Phdr))));
1372 l->l_phdr_allocated = 1;
1374 else
1375 /* Adjust the PT_PHDR value by the runtime load address. */
1376 l->l_phdr = (ElfW(Phdr) *) ((ElfW(Addr)) l->l_phdr + l->l_addr);
1378 if (__builtin_expect ((stack_flags &~ GL(dl_stack_flags)) & PF_X, 0))
1380 if (__builtin_expect (__check_caller (RETURN_ADDRESS (0), allow_ldso),
1381 0) != 0)
1383 errstring = N_("invalid caller");
1384 goto call_lose;
1387 /* The stack is presently not executable, but this module
1388 requires that it be executable. We must change the
1389 protection of the variable which contains the flags used in
1390 the mprotect calls. */
1391 #ifdef SHARED
1392 if ((mode & (__RTLD_DLOPEN | __RTLD_AUDIT)) == __RTLD_DLOPEN)
1394 const uintptr_t p = (uintptr_t) &__stack_prot & -GLRO(dl_pagesize);
1395 const size_t s = (uintptr_t) (&__stack_prot + 1) - p;
1397 struct link_map *const m = &GL(dl_rtld_map);
1398 const uintptr_t relro_end = ((m->l_addr + m->l_relro_addr
1399 + m->l_relro_size)
1400 & -GLRO(dl_pagesize));
1401 if (__builtin_expect (p + s <= relro_end, 1))
1403 /* The variable lies in the region protected by RELRO. */
1404 __mprotect ((void *) p, s, PROT_READ|PROT_WRITE);
1405 __stack_prot |= PROT_READ|PROT_WRITE|PROT_EXEC;
1406 __mprotect ((void *) p, s, PROT_READ);
1408 else
1409 __stack_prot |= PROT_READ|PROT_WRITE|PROT_EXEC;
1411 else
1412 #endif
1413 __stack_prot |= PROT_READ|PROT_WRITE|PROT_EXEC;
1415 #ifdef check_consistency
1416 check_consistency ();
1417 #endif
1419 errval = (*GL(dl_make_stack_executable_hook)) (stack_endp);
1420 if (errval)
1422 errstring = N_("\
1423 cannot enable executable stack as shared object requires");
1424 goto call_lose;
1428 /* Adjust the address of the TLS initialization image. */
1429 if (l->l_tls_initimage != NULL)
1430 l->l_tls_initimage = (char *) l->l_tls_initimage + l->l_addr;
1432 /* We are done mapping in the file. We no longer need the descriptor. */
1433 if (__builtin_expect (__close (fd) != 0, 0))
1435 errstring = N_("cannot close file descriptor");
1436 goto call_lose_errno;
1438 /* Signal that we closed the file. */
1439 fd = -1;
1441 if (l->l_type == lt_library && type == ET_EXEC)
1442 l->l_type = lt_executable;
1444 l->l_entry += l->l_addr;
1446 if (__builtin_expect (GLRO(dl_debug_mask) & DL_DEBUG_FILES, 0))
1447 _dl_debug_printf ("\
1448 dynamic: 0x%0*lx base: 0x%0*lx size: 0x%0*Zx\n\
1449 entry: 0x%0*lx phdr: 0x%0*lx phnum: %*u\n\n",
1450 (int) sizeof (void *) * 2,
1451 (unsigned long int) l->l_ld,
1452 (int) sizeof (void *) * 2,
1453 (unsigned long int) l->l_addr,
1454 (int) sizeof (void *) * 2, maplength,
1455 (int) sizeof (void *) * 2,
1456 (unsigned long int) l->l_entry,
1457 (int) sizeof (void *) * 2,
1458 (unsigned long int) l->l_phdr,
1459 (int) sizeof (void *) * 2, l->l_phnum);
1461 /* Set up the symbol hash table. */
1462 _dl_setup_hash (l);
1464 /* If this object has DT_SYMBOLIC set modify now its scope. We don't
1465 have to do this for the main map. */
1466 if ((mode & RTLD_DEEPBIND) == 0
1467 && __builtin_expect (l->l_info[DT_SYMBOLIC] != NULL, 0)
1468 && &l->l_searchlist != l->l_scope[0])
1470 /* Create an appropriate searchlist. It contains only this map.
1471 This is the definition of DT_SYMBOLIC in SysVr4. */
1472 l->l_symbolic_searchlist.r_list[0] = l;
1473 l->l_symbolic_searchlist.r_nlist = 1;
1475 /* Now move the existing entries one back. */
1476 memmove (&l->l_scope[1], &l->l_scope[0],
1477 (l->l_scope_max - 1) * sizeof (l->l_scope[0]));
1479 /* Now add the new entry. */
1480 l->l_scope[0] = &l->l_symbolic_searchlist;
1483 /* Remember whether this object must be initialized first. */
1484 if (l->l_flags_1 & DF_1_INITFIRST)
1485 GL(dl_initfirst) = l;
1487 /* Finally the file information. */
1488 l->l_dev = st.st_dev;
1489 l->l_ino = st.st_ino;
1491 /* When we profile the SONAME might be needed for something else but
1492 loading. Add it right away. */
1493 if (__builtin_expect (GLRO(dl_profile) != NULL, 0)
1494 && l->l_info[DT_SONAME] != NULL)
1495 add_name_to_object (l, ((const char *) D_PTR (l, l_info[DT_STRTAB])
1496 + l->l_info[DT_SONAME]->d_un.d_val));
1498 #ifdef SHARED
1499 /* Auditing checkpoint: we have a new object. */
1500 if (__builtin_expect (GLRO(dl_naudit) > 0, 0)
1501 && !GL(dl_ns)[l->l_ns]._ns_loaded->l_auditing)
1503 struct audit_ifaces *afct = GLRO(dl_audit);
1504 for (unsigned int cnt = 0; cnt < GLRO(dl_naudit); ++cnt)
1506 if (afct->objopen != NULL)
1508 l->l_audit[cnt].bindflags
1509 = afct->objopen (l, nsid, &l->l_audit[cnt].cookie);
1511 l->l_audit_any_plt |= l->l_audit[cnt].bindflags != 0;
1514 afct = afct->next;
1517 #endif
1519 return l;
1522 /* Print search path. */
1523 static void
1524 print_search_path (struct r_search_path_elem **list,
1525 const char *what, const char *name)
1527 char buf[max_dirnamelen + max_capstrlen];
1528 int first = 1;
1530 _dl_debug_printf (" search path=");
1532 while (*list != NULL && (*list)->what == what) /* Yes, ==. */
1534 char *endp = __mempcpy (buf, (*list)->dirname, (*list)->dirnamelen);
1535 size_t cnt;
1537 for (cnt = 0; cnt < ncapstr; ++cnt)
1538 if ((*list)->status[cnt] != nonexisting)
1540 char *cp = __mempcpy (endp, capstr[cnt].str, capstr[cnt].len);
1541 if (cp == buf || (cp == buf + 1 && buf[0] == '/'))
1542 cp[0] = '\0';
1543 else
1544 cp[-1] = '\0';
1546 _dl_debug_printf_c (first ? "%s" : ":%s", buf);
1547 first = 0;
1550 ++list;
1553 if (name != NULL)
1554 _dl_debug_printf_c ("\t\t(%s from file %s)\n", what,
1555 name[0] ? name : rtld_progname);
1556 else
1557 _dl_debug_printf_c ("\t\t(%s)\n", what);
1560 /* Open a file and verify it is an ELF file for this architecture. We
1561 ignore only ELF files for other architectures. Non-ELF files and
1562 ELF files with different header information cause fatal errors since
1563 this could mean there is something wrong in the installation and the
1564 user might want to know about this. */
1565 static int
1566 open_verify (const char *name, struct filebuf *fbp, struct link_map *loader,
1567 int whatcode, bool *found_other_class, bool free_name)
1569 /* This is the expected ELF header. */
1570 #define ELF32_CLASS ELFCLASS32
1571 #define ELF64_CLASS ELFCLASS64
1572 #ifndef VALID_ELF_HEADER
1573 # define VALID_ELF_HEADER(hdr,exp,size) (memcmp (hdr, exp, size) == 0)
1574 # define VALID_ELF_OSABI(osabi) (osabi == ELFOSABI_SYSV)
1575 # define VALID_ELF_ABIVERSION(osabi,ver) (ver == 0)
1576 #elif defined MORE_ELF_HEADER_DATA
1577 MORE_ELF_HEADER_DATA;
1578 #endif
1579 static const unsigned char expected[EI_NIDENT] =
1581 [EI_MAG0] = ELFMAG0,
1582 [EI_MAG1] = ELFMAG1,
1583 [EI_MAG2] = ELFMAG2,
1584 [EI_MAG3] = ELFMAG3,
1585 [EI_CLASS] = ELFW(CLASS),
1586 [EI_DATA] = byteorder,
1587 [EI_VERSION] = EV_CURRENT,
1588 [EI_OSABI] = ELFOSABI_SYSV,
1589 [EI_ABIVERSION] = 0
1591 static const struct
1593 ElfW(Word) vendorlen;
1594 ElfW(Word) datalen;
1595 ElfW(Word) type;
1596 char vendor[4];
1597 } expected_note = { 4, 16, 1, "GNU" };
1598 /* Initialize it to make the compiler happy. */
1599 const char *errstring = NULL;
1600 int errval = 0;
1602 #ifdef SHARED
1603 /* Give the auditing libraries a chance. */
1604 if (__builtin_expect (GLRO(dl_naudit) > 0, 0) && whatcode != 0
1605 && loader->l_auditing == 0)
1607 struct audit_ifaces *afct = GLRO(dl_audit);
1608 for (unsigned int cnt = 0; cnt < GLRO(dl_naudit); ++cnt)
1610 if (afct->objsearch != NULL)
1612 name = afct->objsearch (name, &loader->l_audit[cnt].cookie,
1613 whatcode);
1614 if (name == NULL)
1615 /* Ignore the path. */
1616 return -1;
1619 afct = afct->next;
1622 #endif
1624 /* Open the file. We always open files read-only. */
1625 int fd = __open (name, O_RDONLY);
1626 if (fd != -1)
1628 ElfW(Ehdr) *ehdr;
1629 ElfW(Phdr) *phdr, *ph;
1630 ElfW(Word) *abi_note;
1631 unsigned int osversion;
1632 size_t maplength;
1634 /* We successfully openened the file. Now verify it is a file
1635 we can use. */
1636 __set_errno (0);
1637 fbp->len = __libc_read (fd, fbp->buf, sizeof (fbp->buf));
1639 /* This is where the ELF header is loaded. */
1640 assert (sizeof (fbp->buf) > sizeof (ElfW(Ehdr)));
1641 ehdr = (ElfW(Ehdr) *) fbp->buf;
1643 /* Now run the tests. */
1644 if (__builtin_expect (fbp->len < (ssize_t) sizeof (ElfW(Ehdr)), 0))
1646 errval = errno;
1647 errstring = (errval == 0
1648 ? N_("file too short") : N_("cannot read file data"));
1649 call_lose:
1650 if (free_name)
1652 char *realname = (char *) name;
1653 name = strdupa (realname);
1654 free (realname);
1656 lose (errval, fd, name, NULL, NULL, errstring, NULL);
1659 /* See whether the ELF header is what we expect. */
1660 if (__builtin_expect (! VALID_ELF_HEADER (ehdr->e_ident, expected,
1661 EI_ABIVERSION)
1662 || !VALID_ELF_ABIVERSION (ehdr->e_ident[EI_OSABI],
1663 ehdr->e_ident[EI_ABIVERSION])
1664 || memcmp (&ehdr->e_ident[EI_PAD],
1665 &expected[EI_PAD],
1666 EI_NIDENT - EI_PAD) != 0,
1669 /* Something is wrong. */
1670 const Elf32_Word *magp = (const void *) ehdr->e_ident;
1671 if (*magp !=
1672 #if BYTE_ORDER == LITTLE_ENDIAN
1673 ((ELFMAG0 << (EI_MAG0 * 8)) |
1674 (ELFMAG1 << (EI_MAG1 * 8)) |
1675 (ELFMAG2 << (EI_MAG2 * 8)) |
1676 (ELFMAG3 << (EI_MAG3 * 8)))
1677 #else
1678 ((ELFMAG0 << (EI_MAG3 * 8)) |
1679 (ELFMAG1 << (EI_MAG2 * 8)) |
1680 (ELFMAG2 << (EI_MAG1 * 8)) |
1681 (ELFMAG3 << (EI_MAG0 * 8)))
1682 #endif
1684 errstring = N_("invalid ELF header");
1685 else if (ehdr->e_ident[EI_CLASS] != ELFW(CLASS))
1687 /* This is not a fatal error. On architectures where
1688 32-bit and 64-bit binaries can be run this might
1689 happen. */
1690 *found_other_class = true;
1691 goto close_and_out;
1693 else if (ehdr->e_ident[EI_DATA] != byteorder)
1695 if (BYTE_ORDER == BIG_ENDIAN)
1696 errstring = N_("ELF file data encoding not big-endian");
1697 else
1698 errstring = N_("ELF file data encoding not little-endian");
1700 else if (ehdr->e_ident[EI_VERSION] != EV_CURRENT)
1701 errstring
1702 = N_("ELF file version ident does not match current one");
1703 /* XXX We should be able so set system specific versions which are
1704 allowed here. */
1705 else if (!VALID_ELF_OSABI (ehdr->e_ident[EI_OSABI]))
1706 errstring = N_("ELF file OS ABI invalid");
1707 else if (!VALID_ELF_ABIVERSION (ehdr->e_ident[EI_OSABI],
1708 ehdr->e_ident[EI_ABIVERSION]))
1709 errstring = N_("ELF file ABI version invalid");
1710 else if (memcmp (&ehdr->e_ident[EI_PAD], &expected[EI_PAD],
1711 EI_NIDENT - EI_PAD) != 0)
1712 errstring = N_("nonzero padding in e_ident");
1713 else
1714 /* Otherwise we don't know what went wrong. */
1715 errstring = N_("internal error");
1717 goto call_lose;
1720 if (__builtin_expect (ehdr->e_version, EV_CURRENT) != EV_CURRENT)
1722 errstring = N_("ELF file version does not match current one");
1723 goto call_lose;
1725 if (! __builtin_expect (elf_machine_matches_host (ehdr), 1))
1726 goto close_and_out;
1727 else if (__builtin_expect (ehdr->e_type, ET_DYN) != ET_DYN
1728 && __builtin_expect (ehdr->e_type, ET_EXEC) != ET_EXEC)
1730 errstring = N_("only ET_DYN and ET_EXEC can be loaded");
1731 goto call_lose;
1733 else if (__builtin_expect (ehdr->e_phentsize, sizeof (ElfW(Phdr)))
1734 != sizeof (ElfW(Phdr)))
1736 errstring = N_("ELF file's phentsize not the expected size");
1737 goto call_lose;
1740 maplength = ehdr->e_phnum * sizeof (ElfW(Phdr));
1741 if (ehdr->e_phoff + maplength <= (size_t) fbp->len)
1742 phdr = (void *) (fbp->buf + ehdr->e_phoff);
1743 else
1745 phdr = alloca (maplength);
1746 __lseek (fd, ehdr->e_phoff, SEEK_SET);
1747 if ((size_t) __libc_read (fd, (void *) phdr, maplength) != maplength)
1749 read_error:
1750 errval = errno;
1751 errstring = N_("cannot read file data");
1752 goto call_lose;
1756 /* Check .note.ABI-tag if present. */
1757 for (ph = phdr; ph < &phdr[ehdr->e_phnum]; ++ph)
1758 if (ph->p_type == PT_NOTE && ph->p_filesz >= 32 && ph->p_align >= 4)
1760 ElfW(Addr) size = ph->p_filesz;
1762 if (ph->p_offset + size <= (size_t) fbp->len)
1763 abi_note = (void *) (fbp->buf + ph->p_offset);
1764 else
1766 abi_note = alloca (size);
1767 __lseek (fd, ph->p_offset, SEEK_SET);
1768 if (__libc_read (fd, (void *) abi_note, size) != size)
1769 goto read_error;
1772 while (memcmp (abi_note, &expected_note, sizeof (expected_note)))
1774 #define ROUND(len) (((len) + sizeof (ElfW(Word)) - 1) & -sizeof (ElfW(Word)))
1775 ElfW(Addr) note_size = 3 * sizeof (ElfW(Word))
1776 + ROUND (abi_note[0])
1777 + ROUND (abi_note[1]);
1779 if (size - 32 < note_size)
1781 size = 0;
1782 break;
1784 size -= note_size;
1785 abi_note = (void *) abi_note + note_size;
1788 if (size == 0)
1789 continue;
1791 osversion = (abi_note[5] & 0xff) * 65536
1792 + (abi_note[6] & 0xff) * 256
1793 + (abi_note[7] & 0xff);
1794 if (abi_note[4] != __ABI_TAG_OS
1795 || (GLRO(dl_osversion) && GLRO(dl_osversion) < osversion))
1797 close_and_out:
1798 __close (fd);
1799 __set_errno (ENOENT);
1800 fd = -1;
1803 break;
1807 return fd;
1810 /* Try to open NAME in one of the directories in *DIRSP.
1811 Return the fd, or -1. If successful, fill in *REALNAME
1812 with the malloc'd full directory name. If it turns out
1813 that none of the directories in *DIRSP exists, *DIRSP is
1814 replaced with (void *) -1, and the old value is free()d
1815 if MAY_FREE_DIRS is true. */
1817 static int
1818 open_path (const char *name, size_t namelen, int preloaded,
1819 struct r_search_path_struct *sps, char **realname,
1820 struct filebuf *fbp, struct link_map *loader, int whatcode,
1821 bool *found_other_class)
1823 struct r_search_path_elem **dirs = sps->dirs;
1824 char *buf;
1825 int fd = -1;
1826 const char *current_what = NULL;
1827 int any = 0;
1829 if (__builtin_expect (dirs == NULL, 0))
1830 /* We're called before _dl_init_paths when loading the main executable
1831 given on the command line when rtld is run directly. */
1832 return -1;
1834 buf = alloca (max_dirnamelen + max_capstrlen + namelen);
1837 struct r_search_path_elem *this_dir = *dirs;
1838 size_t buflen = 0;
1839 size_t cnt;
1840 char *edp;
1841 int here_any = 0;
1842 int err;
1844 /* If we are debugging the search for libraries print the path
1845 now if it hasn't happened now. */
1846 if (__builtin_expect (GLRO(dl_debug_mask) & DL_DEBUG_LIBS, 0)
1847 && current_what != this_dir->what)
1849 current_what = this_dir->what;
1850 print_search_path (dirs, current_what, this_dir->where);
1853 edp = (char *) __mempcpy (buf, this_dir->dirname, this_dir->dirnamelen);
1854 for (cnt = 0; fd == -1 && cnt < ncapstr; ++cnt)
1856 /* Skip this directory if we know it does not exist. */
1857 if (this_dir->status[cnt] == nonexisting)
1858 continue;
1860 buflen =
1861 ((char *) __mempcpy (__mempcpy (edp, capstr[cnt].str,
1862 capstr[cnt].len),
1863 name, namelen)
1864 - buf);
1866 /* Print name we try if this is wanted. */
1867 if (__builtin_expect (GLRO(dl_debug_mask) & DL_DEBUG_LIBS, 0))
1868 _dl_debug_printf (" trying file=%s\n", buf);
1870 fd = open_verify (buf, fbp, loader, whatcode, found_other_class,
1871 false);
1872 if (this_dir->status[cnt] == unknown)
1874 if (fd != -1)
1875 this_dir->status[cnt] = existing;
1876 /* Do not update the directory information when loading
1877 auditing code. We must try to disturb the program as
1878 little as possible. */
1879 else if (loader == NULL
1880 || GL(dl_ns)[loader->l_ns]._ns_loaded->l_auditing == 0)
1882 /* We failed to open machine dependent library. Let's
1883 test whether there is any directory at all. */
1884 struct stat64 st;
1886 buf[buflen - namelen - 1] = '\0';
1888 if (__xstat64 (_STAT_VER, buf, &st) != 0
1889 || ! S_ISDIR (st.st_mode))
1890 /* The directory does not exist or it is no directory. */
1891 this_dir->status[cnt] = nonexisting;
1892 else
1893 this_dir->status[cnt] = existing;
1897 /* Remember whether we found any existing directory. */
1898 here_any |= this_dir->status[cnt] != nonexisting;
1900 if (fd != -1 && __builtin_expect (preloaded, 0)
1901 && INTUSE(__libc_enable_secure))
1903 /* This is an extra security effort to make sure nobody can
1904 preload broken shared objects which are in the trusted
1905 directories and so exploit the bugs. */
1906 struct stat64 st;
1908 if (__fxstat64 (_STAT_VER, fd, &st) != 0
1909 || (st.st_mode & S_ISUID) == 0)
1911 /* The shared object cannot be tested for being SUID
1912 or this bit is not set. In this case we must not
1913 use this object. */
1914 __close (fd);
1915 fd = -1;
1916 /* We simply ignore the file, signal this by setting
1917 the error value which would have been set by `open'. */
1918 errno = ENOENT;
1923 if (fd != -1)
1925 *realname = (char *) malloc (buflen);
1926 if (*realname != NULL)
1928 memcpy (*realname, buf, buflen);
1929 return fd;
1931 else
1933 /* No memory for the name, we certainly won't be able
1934 to load and link it. */
1935 __close (fd);
1936 return -1;
1939 if (here_any && (err = errno) != ENOENT && err != EACCES)
1940 /* The file exists and is readable, but something went wrong. */
1941 return -1;
1943 /* Remember whether we found anything. */
1944 any |= here_any;
1946 while (*++dirs != NULL);
1948 /* Remove the whole path if none of the directories exists. */
1949 if (__builtin_expect (! any, 0))
1951 /* Paths which were allocated using the minimal malloc() in ld.so
1952 must not be freed using the general free() in libc. */
1953 if (sps->malloced)
1954 free (sps->dirs);
1956 /* rtld_search_dirs is attribute_relro, therefore avoid writing
1957 into it. */
1958 if (sps != &rtld_search_dirs)
1959 sps->dirs = (void *) -1;
1962 return -1;
1965 /* Map in the shared object file NAME. */
1967 struct link_map *
1968 internal_function
1969 _dl_map_object (struct link_map *loader, const char *name, int preloaded,
1970 int type, int trace_mode, int mode, Lmid_t nsid)
1972 int fd;
1973 char *realname;
1974 char *name_copy;
1975 struct link_map *l;
1976 struct filebuf fb;
1978 assert (nsid >= 0);
1979 assert (nsid < GL(dl_nns));
1981 /* Look for this name among those already loaded. */
1982 for (l = GL(dl_ns)[nsid]._ns_loaded; l; l = l->l_next)
1984 /* If the requested name matches the soname of a loaded object,
1985 use that object. Elide this check for names that have not
1986 yet been opened. */
1987 if (__builtin_expect (l->l_faked, 0) != 0
1988 || __builtin_expect (l->l_removed, 0) != 0)
1989 continue;
1990 if (!_dl_name_match_p (name, l))
1992 const char *soname;
1994 if (__builtin_expect (l->l_soname_added, 1)
1995 || l->l_info[DT_SONAME] == NULL)
1996 continue;
1998 soname = ((const char *) D_PTR (l, l_info[DT_STRTAB])
1999 + l->l_info[DT_SONAME]->d_un.d_val);
2000 if (strcmp (name, soname) != 0)
2001 continue;
2003 /* We have a match on a new name -- cache it. */
2004 add_name_to_object (l, soname);
2005 l->l_soname_added = 1;
2008 /* We have a match. */
2009 return l;
2012 /* Display information if we are debugging. */
2013 if (__builtin_expect (GLRO(dl_debug_mask) & DL_DEBUG_FILES, 0)
2014 && loader != NULL)
2015 _dl_debug_printf ("\nfile=%s [%lu]; needed by %s [%lu]\n", name, nsid,
2016 loader->l_name[0]
2017 ? loader->l_name : rtld_progname, loader->l_ns);
2019 #ifdef SHARED
2020 /* Give the auditing libraries a chance to change the name before we
2021 try anything. */
2022 if (__builtin_expect (GLRO(dl_naudit) > 0, 0)
2023 && (loader == NULL || loader->l_auditing == 0))
2025 struct audit_ifaces *afct = GLRO(dl_audit);
2026 for (unsigned int cnt = 0; cnt < GLRO(dl_naudit); ++cnt)
2028 if (afct->objsearch != NULL)
2030 name = afct->objsearch (name, &loader->l_audit[cnt].cookie,
2031 LA_SER_ORIG);
2032 if (name == NULL)
2034 /* Do not try anything further. */
2035 fd = -1;
2036 goto no_file;
2040 afct = afct->next;
2043 #endif
2045 /* Will be true if we found a DSO which is of the other ELF class. */
2046 bool found_other_class = false;
2048 if (strchr (name, '/') == NULL)
2050 /* Search for NAME in several places. */
2052 size_t namelen = strlen (name) + 1;
2054 if (__builtin_expect (GLRO(dl_debug_mask) & DL_DEBUG_LIBS, 0))
2055 _dl_debug_printf ("find library=%s [%lu]; searching\n", name, nsid);
2057 fd = -1;
2059 /* When the object has the RUNPATH information we don't use any
2060 RPATHs. */
2061 if (loader == NULL || loader->l_info[DT_RUNPATH] == NULL)
2063 /* This is the executable's map (if there is one). Make sure that
2064 we do not look at it twice. */
2065 struct link_map *main_map = GL(dl_ns)[LM_ID_BASE]._ns_loaded;
2066 bool did_main_map = false;
2068 /* First try the DT_RPATH of the dependent object that caused NAME
2069 to be loaded. Then that object's dependent, and on up. */
2070 for (l = loader; l; l = l->l_loader)
2071 if (cache_rpath (l, &l->l_rpath_dirs, DT_RPATH, "RPATH"))
2073 fd = open_path (name, namelen, preloaded, &l->l_rpath_dirs,
2074 &realname, &fb, loader, LA_SER_RUNPATH,
2075 &found_other_class);
2076 if (fd != -1)
2077 break;
2079 did_main_map |= l == main_map;
2082 /* If dynamically linked, try the DT_RPATH of the executable
2083 itself. NB: we do this for lookups in any namespace. */
2084 if (fd == -1 && !did_main_map
2085 && main_map != NULL && main_map->l_type != lt_loaded
2086 && cache_rpath (main_map, &main_map->l_rpath_dirs, DT_RPATH,
2087 "RPATH"))
2088 fd = open_path (name, namelen, preloaded, &main_map->l_rpath_dirs,
2089 &realname, &fb, loader ?: main_map, LA_SER_RUNPATH,
2090 &found_other_class);
2093 /* Try the LD_LIBRARY_PATH environment variable. */
2094 if (fd == -1 && env_path_list.dirs != (void *) -1)
2095 fd = open_path (name, namelen, preloaded, &env_path_list,
2096 &realname, &fb,
2097 loader ?: GL(dl_ns)[LM_ID_BASE]._ns_loaded,
2098 LA_SER_LIBPATH, &found_other_class);
2100 /* Look at the RUNPATH information for this binary. */
2101 if (fd == -1 && loader != NULL
2102 && cache_rpath (loader, &loader->l_runpath_dirs,
2103 DT_RUNPATH, "RUNPATH"))
2104 fd = open_path (name, namelen, preloaded,
2105 &loader->l_runpath_dirs, &realname, &fb, loader,
2106 LA_SER_RUNPATH, &found_other_class);
2108 if (fd == -1
2109 && (__builtin_expect (! preloaded, 1)
2110 || ! INTUSE(__libc_enable_secure)))
2112 /* Check the list of libraries in the file /etc/ld.so.cache,
2113 for compatibility with Linux's ldconfig program. */
2114 const char *cached = _dl_load_cache_lookup (name);
2116 if (cached != NULL)
2118 #ifdef SHARED
2119 // XXX Correct to unconditionally default to namespace 0?
2120 l = loader ?: GL(dl_ns)[LM_ID_BASE]._ns_loaded;
2121 #else
2122 l = loader;
2123 #endif
2125 /* If the loader has the DF_1_NODEFLIB flag set we must not
2126 use a cache entry from any of these directories. */
2127 if (
2128 #ifndef SHARED
2129 /* 'l' is always != NULL for dynamically linked objects. */
2130 l != NULL &&
2131 #endif
2132 __builtin_expect (l->l_flags_1 & DF_1_NODEFLIB, 0))
2134 const char *dirp = system_dirs;
2135 unsigned int cnt = 0;
2139 if (memcmp (cached, dirp, system_dirs_len[cnt]) == 0)
2141 /* The prefix matches. Don't use the entry. */
2142 cached = NULL;
2143 break;
2146 dirp += system_dirs_len[cnt] + 1;
2147 ++cnt;
2149 while (cnt < nsystem_dirs_len);
2152 if (cached != NULL)
2154 fd = open_verify (cached,
2155 &fb, loader ?: GL(dl_ns)[nsid]._ns_loaded,
2156 LA_SER_CONFIG, &found_other_class, false);
2157 if (__builtin_expect (fd != -1, 1))
2159 realname = local_strdup (cached);
2160 if (realname == NULL)
2162 __close (fd);
2163 fd = -1;
2170 /* Finally, try the default path. */
2171 if (fd == -1
2172 && ((l = loader ?: GL(dl_ns)[nsid]._ns_loaded) == NULL
2173 || __builtin_expect (!(l->l_flags_1 & DF_1_NODEFLIB), 1))
2174 && rtld_search_dirs.dirs != (void *) -1)
2175 fd = open_path (name, namelen, preloaded, &rtld_search_dirs,
2176 &realname, &fb, l, LA_SER_DEFAULT, &found_other_class);
2178 /* Add another newline when we are tracing the library loading. */
2179 if (__builtin_expect (GLRO(dl_debug_mask) & DL_DEBUG_LIBS, 0))
2180 _dl_debug_printf ("\n");
2182 else
2184 /* The path may contain dynamic string tokens. */
2185 realname = (loader
2186 ? expand_dynamic_string_token (loader, name)
2187 : local_strdup (name));
2188 if (realname == NULL)
2189 fd = -1;
2190 else
2192 fd = open_verify (realname, &fb,
2193 loader ?: GL(dl_ns)[nsid]._ns_loaded, 0,
2194 &found_other_class, true);
2195 if (__builtin_expect (fd, 0) == -1)
2196 free (realname);
2200 #ifdef SHARED
2201 no_file:
2202 #endif
2203 /* In case the LOADER information has only been provided to get to
2204 the appropriate RUNPATH/RPATH information we do not need it
2205 anymore. */
2206 if (mode & __RTLD_CALLMAP)
2207 loader = NULL;
2209 if (__builtin_expect (fd, 0) == -1)
2211 if (trace_mode
2212 && __builtin_expect (GLRO(dl_debug_mask) & DL_DEBUG_PRELINK, 0) == 0)
2214 /* We haven't found an appropriate library. But since we
2215 are only interested in the list of libraries this isn't
2216 so severe. Fake an entry with all the information we
2217 have. */
2218 static const Elf_Symndx dummy_bucket = STN_UNDEF;
2220 /* Enter the new object in the list of loaded objects. */
2221 if ((name_copy = local_strdup (name)) == NULL
2222 || (l = _dl_new_object (name_copy, name, type, loader,
2223 mode, nsid)) == NULL)
2225 free (name_copy);
2226 _dl_signal_error (ENOMEM, name, NULL,
2227 N_("cannot create shared object descriptor"));
2229 /* Signal that this is a faked entry. */
2230 l->l_faked = 1;
2231 /* Since the descriptor is initialized with zero we do not
2232 have do this here.
2233 l->l_reserved = 0; */
2234 l->l_buckets = &dummy_bucket;
2235 l->l_nbuckets = 1;
2236 l->l_relocated = 1;
2238 return l;
2240 else if (found_other_class)
2241 _dl_signal_error (0, name, NULL,
2242 ELFW(CLASS) == ELFCLASS32
2243 ? N_("wrong ELF class: ELFCLASS64")
2244 : N_("wrong ELF class: ELFCLASS32"));
2245 else
2246 _dl_signal_error (errno, name, NULL,
2247 N_("cannot open shared object file"));
2250 void *stack_end = __libc_stack_end;
2251 return _dl_map_object_from_fd (name, fd, &fb, realname, loader, type, mode,
2252 &stack_end, nsid);
2256 void
2257 internal_function
2258 _dl_rtld_di_serinfo (struct link_map *loader, Dl_serinfo *si, bool counting)
2260 if (counting)
2262 si->dls_cnt = 0;
2263 si->dls_size = 0;
2266 unsigned int idx = 0;
2267 char *allocptr = (char *) &si->dls_serpath[si->dls_cnt];
2268 void add_path (const struct r_search_path_struct *sps, unsigned int flags)
2269 # define add_path(sps, flags) add_path(sps, 0) /* XXX */
2271 if (sps->dirs != (void *) -1)
2273 struct r_search_path_elem **dirs = sps->dirs;
2276 const struct r_search_path_elem *const r = *dirs++;
2277 if (counting)
2279 si->dls_cnt++;
2280 si->dls_size += MAX (2, r->dirnamelen);
2282 else
2284 Dl_serpath *const sp = &si->dls_serpath[idx++];
2285 sp->dls_name = allocptr;
2286 if (r->dirnamelen < 2)
2287 *allocptr++ = r->dirnamelen ? '/' : '.';
2288 else
2289 allocptr = __mempcpy (allocptr,
2290 r->dirname, r->dirnamelen - 1);
2291 *allocptr++ = '\0';
2292 sp->dls_flags = flags;
2295 while (*dirs != NULL);
2299 /* When the object has the RUNPATH information we don't use any RPATHs. */
2300 if (loader->l_info[DT_RUNPATH] == NULL)
2302 /* First try the DT_RPATH of the dependent object that caused NAME
2303 to be loaded. Then that object's dependent, and on up. */
2305 struct link_map *l = loader;
2308 if (cache_rpath (l, &l->l_rpath_dirs, DT_RPATH, "RPATH"))
2309 add_path (&l->l_rpath_dirs, XXX_RPATH);
2310 l = l->l_loader;
2312 while (l != NULL);
2314 /* If dynamically linked, try the DT_RPATH of the executable itself. */
2315 if (loader->l_ns == LM_ID_BASE)
2317 l = GL(dl_ns)[LM_ID_BASE]._ns_loaded;
2318 if (l != NULL && l->l_type != lt_loaded && l != loader)
2319 if (cache_rpath (l, &l->l_rpath_dirs, DT_RPATH, "RPATH"))
2320 add_path (&l->l_rpath_dirs, XXX_RPATH);
2324 /* Try the LD_LIBRARY_PATH environment variable. */
2325 add_path (&env_path_list, XXX_ENV);
2327 /* Look at the RUNPATH information for this binary. */
2328 if (cache_rpath (loader, &loader->l_runpath_dirs, DT_RUNPATH, "RUNPATH"))
2329 add_path (&loader->l_runpath_dirs, XXX_RUNPATH);
2331 /* XXX
2332 Here is where ld.so.cache gets checked, but we don't have
2333 a way to indicate that in the results for Dl_serinfo. */
2335 /* Finally, try the default path. */
2336 if (!(loader->l_flags_1 & DF_1_NODEFLIB))
2337 add_path (&rtld_search_dirs, XXX_default);
2339 if (counting)
2340 /* Count the struct size before the string area, which we didn't
2341 know before we completed dls_cnt. */
2342 si->dls_size += (char *) &si->dls_serpath[si->dls_cnt] - (char *) si;