Move BZ#11261 from 2.18 to 2.16 bug list.
[glibc.git] / elf / dl-deps.c
blobcd1c236b2e46e223abbe997534b7df129ba966a5
1 /* Load the dependencies of a mapped object.
2 Copyright (C) 1996-2013 Free Software Foundation, Inc.
3 This file is part of the GNU C Library.
5 The GNU C Library is free software; you can redistribute it and/or
6 modify it under the terms of the GNU Lesser General Public
7 License as published by the Free Software Foundation; either
8 version 2.1 of the License, or (at your option) any later version.
10 The GNU C Library is distributed in the hope that it will be useful,
11 but WITHOUT ANY WARRANTY; without even the implied warranty of
12 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 Lesser General Public License for more details.
15 You should have received a copy of the GNU Lesser General Public
16 License along with the GNU C Library; if not, see
17 <http://www.gnu.org/licenses/>. */
19 #include <atomic.h>
20 #include <assert.h>
21 #include <dlfcn.h>
22 #include <errno.h>
23 #include <libintl.h>
24 #include <stddef.h>
25 #include <stdlib.h>
26 #include <string.h>
27 #include <unistd.h>
28 #include <sys/param.h>
29 #include <ldsodefs.h>
31 #include <dl-dst.h>
33 /* Whether an shared object references one or more auxiliary objects
34 is signaled by the AUXTAG entry in l_info. */
35 #define AUXTAG (DT_NUM + DT_THISPROCNUM + DT_VERSIONTAGNUM \
36 + DT_EXTRATAGIDX (DT_AUXILIARY))
37 /* Whether an shared object references one or more auxiliary objects
38 is signaled by the AUXTAG entry in l_info. */
39 #define FILTERTAG (DT_NUM + DT_THISPROCNUM + DT_VERSIONTAGNUM \
40 + DT_EXTRATAGIDX (DT_FILTER))
43 /* When loading auxiliary objects we must ignore errors. It's ok if
44 an object is missing. */
45 struct openaux_args
47 /* The arguments to openaux. */
48 struct link_map *map;
49 int trace_mode;
50 int open_mode;
51 const char *strtab;
52 const char *name;
54 /* The return value of openaux. */
55 struct link_map *aux;
58 static void
59 openaux (void *a)
61 struct openaux_args *args = (struct openaux_args *) a;
63 args->aux = _dl_map_object (args->map, args->name,
64 (args->map->l_type == lt_executable
65 ? lt_library : args->map->l_type),
66 args->trace_mode, args->open_mode,
67 args->map->l_ns);
70 static ptrdiff_t
71 internal_function
72 _dl_build_local_scope (struct link_map **list, struct link_map *map)
74 struct link_map **p = list;
75 struct link_map **q;
77 *p++ = map;
78 map->l_reserved = 1;
79 if (map->l_initfini)
80 for (q = map->l_initfini + 1; *q; ++q)
81 if (! (*q)->l_reserved)
82 p += _dl_build_local_scope (p, *q);
83 return p - list;
87 /* We use a very special kind of list to track the path
88 through the list of loaded shared objects. We have to
89 produce a flat list with unique members of all involved objects.
91 struct list
93 int done; /* Nonzero if this map was processed. */
94 struct link_map *map; /* The data. */
95 struct list *next; /* Elements for normal list. */
99 /* Macro to expand DST. It is an macro since we use `alloca'. */
100 #define expand_dst(l, str, fatal) \
101 ({ \
102 const char *__str = (str); \
103 const char *__result = __str; \
104 size_t __dst_cnt = DL_DST_COUNT (__str, 0); \
106 if (__dst_cnt != 0) \
108 char *__newp; \
110 /* DST must not appear in SUID/SGID programs. */ \
111 if (INTUSE(__libc_enable_secure)) \
112 _dl_signal_error (0, __str, NULL, N_("\
113 DST not allowed in SUID/SGID programs")); \
115 __newp = (char *) alloca (DL_DST_REQUIRED (l, __str, strlen (__str), \
116 __dst_cnt)); \
118 __result = _dl_dst_substitute (l, __str, __newp, 0); \
120 if (*__result == '\0') \
122 /* The replacement for the DST is not known. We can't \
123 processed. */ \
124 if (fatal) \
125 _dl_signal_error (0, __str, NULL, N_("\
126 empty dynamic string token substitution")); \
127 else \
129 /* This is for DT_AUXILIARY. */ \
130 if (__builtin_expect (GLRO(dl_debug_mask) & DL_DEBUG_LIBS, 0))\
131 _dl_debug_printf (N_("\
132 cannot load auxiliary `%s' because of empty dynamic string token " \
133 "substitution\n"), __str); \
134 continue; \
139 __result; })
142 void
143 internal_function
144 _dl_map_object_deps (struct link_map *map,
145 struct link_map **preloads, unsigned int npreloads,
146 int trace_mode, int open_mode)
148 struct list *known = __alloca (sizeof *known * (1 + npreloads + 1));
149 struct list *runp, *tail;
150 unsigned int nlist, i;
151 /* Object name. */
152 const char *name;
153 int errno_saved;
154 int errno_reason;
155 const char *errstring;
156 const char *objname;
158 void preload (struct link_map *map)
160 known[nlist].done = 0;
161 known[nlist].map = map;
162 known[nlist].next = &known[nlist + 1];
164 ++nlist;
165 /* We use `l_reserved' as a mark bit to detect objects we have
166 already put in the search list and avoid adding duplicate
167 elements later in the list. */
168 map->l_reserved = 1;
171 /* No loaded object so far. */
172 nlist = 0;
174 /* First load MAP itself. */
175 preload (map);
177 /* Add the preloaded items after MAP but before any of its dependencies. */
178 for (i = 0; i < npreloads; ++i)
179 preload (preloads[i]);
181 /* Terminate the lists. */
182 known[nlist - 1].next = NULL;
184 /* Pointer to last unique object. */
185 tail = &known[nlist - 1];
187 /* No alloca'd space yet. */
188 struct link_map **needed_space = NULL;
189 size_t needed_space_bytes = 0;
191 /* Process each element of the search list, loading each of its
192 auxiliary objects and immediate dependencies. Auxiliary objects
193 will be added in the list before the object itself and
194 dependencies will be appended to the list as we step through it.
195 This produces a flat, ordered list that represents a
196 breadth-first search of the dependency tree.
198 The whole process is complicated by the fact that we better
199 should use alloca for the temporary list elements. But using
200 alloca means we cannot use recursive function calls. */
201 errno_saved = errno;
202 errno_reason = 0;
203 errstring = NULL;
204 errno = 0;
205 name = NULL;
206 for (runp = known; runp; )
208 struct link_map *l = runp->map;
209 struct link_map **needed = NULL;
210 unsigned int nneeded = 0;
212 /* Unless otherwise stated, this object is handled. */
213 runp->done = 1;
215 /* Allocate a temporary record to contain the references to the
216 dependencies of this object. */
217 if (l->l_searchlist.r_list == NULL && l->l_initfini == NULL
218 && l != map && l->l_ldnum > 0)
220 size_t new_size = l->l_ldnum * sizeof (struct link_map *);
222 if (new_size > needed_space_bytes)
223 needed_space
224 = extend_alloca (needed_space, needed_space_bytes, new_size);
226 needed = needed_space;
229 if (l->l_info[DT_NEEDED] || l->l_info[AUXTAG] || l->l_info[FILTERTAG])
231 const char *strtab = (const void *) D_PTR (l, l_info[DT_STRTAB]);
232 struct openaux_args args;
233 struct list *orig;
234 const ElfW(Dyn) *d;
236 args.strtab = strtab;
237 args.map = l;
238 args.trace_mode = trace_mode;
239 args.open_mode = open_mode;
240 orig = runp;
242 for (d = l->l_ld; d->d_tag != DT_NULL; ++d)
243 if (__builtin_expect (d->d_tag, DT_NEEDED) == DT_NEEDED)
245 /* Map in the needed object. */
246 struct link_map *dep;
248 /* Recognize DSTs. */
249 name = expand_dst (l, strtab + d->d_un.d_val, 0);
250 /* Store the tag in the argument structure. */
251 args.name = name;
253 bool malloced;
254 int err = _dl_catch_error (&objname, &errstring, &malloced,
255 openaux, &args);
256 if (__builtin_expect (errstring != NULL, 0))
258 char *new_errstring = strdupa (errstring);
259 objname = strdupa (objname);
260 if (malloced)
261 free ((char *) errstring);
262 errstring = new_errstring;
264 if (err)
265 errno_reason = err;
266 else
267 errno_reason = -1;
268 goto out;
270 else
271 dep = args.aux;
273 if (! dep->l_reserved)
275 /* Allocate new entry. */
276 struct list *newp;
278 newp = alloca (sizeof (struct list));
280 /* Append DEP to the list. */
281 newp->map = dep;
282 newp->done = 0;
283 newp->next = NULL;
284 tail->next = newp;
285 tail = newp;
286 ++nlist;
287 /* Set the mark bit that says it's already in the list. */
288 dep->l_reserved = 1;
291 /* Remember this dependency. */
292 if (needed != NULL)
293 needed[nneeded++] = dep;
295 else if (d->d_tag == DT_AUXILIARY || d->d_tag == DT_FILTER)
297 struct list *newp;
299 /* Recognize DSTs. */
300 name = expand_dst (l, strtab + d->d_un.d_val,
301 d->d_tag == DT_AUXILIARY);
302 /* Store the tag in the argument structure. */
303 args.name = name;
305 if (d->d_tag == DT_AUXILIARY)
307 /* Say that we are about to load an auxiliary library. */
308 if (__builtin_expect (GLRO(dl_debug_mask) & DL_DEBUG_LIBS,
310 _dl_debug_printf ("load auxiliary object=%s"
311 " requested by file=%s\n",
312 name,
313 l->l_name[0]
314 ? l->l_name : rtld_progname);
316 /* We must be prepared that the addressed shared
317 object is not available. */
318 bool malloced;
319 (void) _dl_catch_error (&objname, &errstring, &malloced,
320 openaux, &args);
321 if (__builtin_expect (errstring != NULL, 0))
323 /* We are not interested in the error message. */
324 assert (errstring != NULL);
325 if (malloced)
326 free ((char *) errstring);
328 /* Simply ignore this error and continue the work. */
329 continue;
332 else
334 /* Say that we are about to load an auxiliary library. */
335 if (__builtin_expect (GLRO(dl_debug_mask) & DL_DEBUG_LIBS,
337 _dl_debug_printf ("load filtered object=%s"
338 " requested by file=%s\n",
339 name,
340 l->l_name[0]
341 ? l->l_name : rtld_progname);
343 /* For filter objects the dependency must be available. */
344 bool malloced;
345 int err = _dl_catch_error (&objname, &errstring, &malloced,
346 openaux, &args);
347 if (__builtin_expect (errstring != NULL, 0))
349 char *new_errstring = strdupa (errstring);
350 objname = strdupa (objname);
351 if (malloced)
352 free ((char *) errstring);
353 errstring = new_errstring;
355 if (err)
356 errno_reason = err;
357 else
358 errno_reason = -1;
359 goto out;
363 /* The auxiliary object is actually available.
364 Incorporate the map in all the lists. */
366 /* Allocate new entry. This always has to be done. */
367 newp = alloca (sizeof (struct list));
369 /* We want to insert the new map before the current one,
370 but we have no back links. So we copy the contents of
371 the current entry over. Note that ORIG and NEWP now
372 have switched their meanings. */
373 memcpy (newp, orig, sizeof (*newp));
375 /* Initialize new entry. */
376 orig->done = 0;
377 orig->map = args.aux;
379 /* Remember this dependency. */
380 if (needed != NULL)
381 needed[nneeded++] = args.aux;
383 /* We must handle two situations here: the map is new,
384 so we must add it in all three lists. If the map
385 is already known, we have two further possibilities:
386 - if the object is before the current map in the
387 search list, we do nothing. It is already found
388 early
389 - if the object is after the current one, we must
390 move it just before the current map to make sure
391 the symbols are found early enough
393 if (args.aux->l_reserved)
395 /* The object is already somewhere in the list.
396 Locate it first. */
397 struct list *late;
399 /* This object is already in the search list we
400 are building. Don't add a duplicate pointer.
401 Just added by _dl_map_object. */
402 for (late = newp; late->next != NULL; late = late->next)
403 if (late->next->map == args.aux)
404 break;
406 if (late->next != NULL)
408 /* The object is somewhere behind the current
409 position in the search path. We have to
410 move it to this earlier position. */
411 orig->next = newp;
413 /* Now remove the later entry from the list
414 and adjust the tail pointer. */
415 if (tail == late->next)
416 tail = late;
417 late->next = late->next->next;
419 /* We must move the object earlier in the chain. */
420 if (args.aux->l_prev != NULL)
421 args.aux->l_prev->l_next = args.aux->l_next;
422 if (args.aux->l_next != NULL)
423 args.aux->l_next->l_prev = args.aux->l_prev;
425 args.aux->l_prev = newp->map->l_prev;
426 newp->map->l_prev = args.aux;
427 if (args.aux->l_prev != NULL)
428 args.aux->l_prev->l_next = args.aux;
429 args.aux->l_next = newp->map;
431 else
433 /* The object must be somewhere earlier in the
434 list. Undo to the current list element what
435 we did above. */
436 memcpy (orig, newp, sizeof (*newp));
437 continue;
440 else
442 /* This is easy. We just add the symbol right here. */
443 orig->next = newp;
444 ++nlist;
445 /* Set the mark bit that says it's already in the list. */
446 args.aux->l_reserved = 1;
448 /* The only problem is that in the double linked
449 list of all objects we don't have this new
450 object at the correct place. Correct this here. */
451 if (args.aux->l_prev)
452 args.aux->l_prev->l_next = args.aux->l_next;
453 if (args.aux->l_next)
454 args.aux->l_next->l_prev = args.aux->l_prev;
456 args.aux->l_prev = newp->map->l_prev;
457 newp->map->l_prev = args.aux;
458 if (args.aux->l_prev != NULL)
459 args.aux->l_prev->l_next = args.aux;
460 args.aux->l_next = newp->map;
463 /* Move the tail pointer if necessary. */
464 if (orig == tail)
465 tail = newp;
467 /* Move on the insert point. */
468 orig = newp;
472 /* Terminate the list of dependencies and store the array address. */
473 if (needed != NULL)
475 needed[nneeded++] = NULL;
477 struct link_map **l_initfini = (struct link_map **)
478 malloc ((2 * nneeded + 1) * sizeof needed[0]);
479 if (l_initfini == NULL)
480 _dl_signal_error (ENOMEM, map->l_name, NULL,
481 N_("cannot allocate dependency list"));
482 l_initfini[0] = l;
483 memcpy (&l_initfini[1], needed, nneeded * sizeof needed[0]);
484 memcpy (&l_initfini[nneeded + 1], l_initfini,
485 nneeded * sizeof needed[0]);
486 atomic_write_barrier ();
487 l->l_initfini = l_initfini;
488 l->l_free_initfini = 1;
491 /* If we have no auxiliary objects just go on to the next map. */
492 if (runp->done)
494 runp = runp->next;
495 while (runp != NULL && runp->done);
498 out:
499 if (errno == 0 && errno_saved != 0)
500 __set_errno (errno_saved);
502 struct link_map **old_l_initfini = NULL;
503 if (map->l_initfini != NULL && map->l_type == lt_loaded)
505 /* This object was previously loaded as a dependency and we have
506 a separate l_initfini list. We don't need it anymore. */
507 assert (map->l_searchlist.r_list == NULL);
508 old_l_initfini = map->l_initfini;
511 /* Store the search list we built in the object. It will be used for
512 searches in the scope of this object. */
513 struct link_map **l_initfini =
514 (struct link_map **) malloc ((2 * nlist + 1)
515 * sizeof (struct link_map *));
516 if (l_initfini == NULL)
517 _dl_signal_error (ENOMEM, map->l_name, NULL,
518 N_("cannot allocate symbol search list"));
521 map->l_searchlist.r_list = &l_initfini[nlist + 1];
522 map->l_searchlist.r_nlist = nlist;
524 for (nlist = 0, runp = known; runp; runp = runp->next)
526 if (__builtin_expect (trace_mode, 0) && runp->map->l_faked)
527 /* This can happen when we trace the loading. */
528 --map->l_searchlist.r_nlist;
529 else
530 map->l_searchlist.r_list[nlist++] = runp->map;
532 /* Now clear all the mark bits we set in the objects on the search list
533 to avoid duplicates, so the next call starts fresh. */
534 runp->map->l_reserved = 0;
537 if (__builtin_expect (GLRO(dl_debug_mask) & DL_DEBUG_PRELINK, 0) != 0
538 && map == GL(dl_ns)[LM_ID_BASE]._ns_loaded)
540 /* If we are to compute conflicts, we have to build local scope
541 for each library, not just the ultimate loader. */
542 for (i = 0; i < nlist; ++i)
544 struct link_map *l = map->l_searchlist.r_list[i];
545 unsigned int j, cnt;
547 /* The local scope has been already computed. */
548 if (l == map
549 || (l->l_local_scope[0]
550 && l->l_local_scope[0]->r_nlist) != 0)
551 continue;
553 if (l->l_info[AUXTAG] || l->l_info[FILTERTAG])
555 /* As current DT_AUXILIARY/DT_FILTER implementation needs to be
556 rewritten, no need to bother with prelinking the old
557 implementation. */
558 _dl_signal_error (EINVAL, l->l_name, NULL, N_("\
559 Filters not supported with LD_TRACE_PRELINKING"));
562 cnt = _dl_build_local_scope (l_initfini, l);
563 assert (cnt <= nlist);
564 for (j = 0; j < cnt; j++)
566 l_initfini[j]->l_reserved = 0;
567 if (j && __builtin_expect (l_initfini[j]->l_info[DT_SYMBOLIC]
568 != NULL, 0))
569 l->l_symbolic_in_local_scope = true;
572 l->l_local_scope[0] =
573 (struct r_scope_elem *) malloc (sizeof (struct r_scope_elem)
574 + (cnt
575 * sizeof (struct link_map *)));
576 if (l->l_local_scope[0] == NULL)
577 _dl_signal_error (ENOMEM, map->l_name, NULL,
578 N_("cannot allocate symbol search list"));
579 l->l_local_scope[0]->r_nlist = cnt;
580 l->l_local_scope[0]->r_list =
581 (struct link_map **) (l->l_local_scope[0] + 1);
582 memcpy (l->l_local_scope[0]->r_list, l_initfini,
583 cnt * sizeof (struct link_map *));
587 /* Maybe we can remove some relocation dependencies now. */
588 assert (map->l_searchlist.r_list[0] == map);
589 struct link_map_reldeps *l_reldeps = NULL;
590 if (map->l_reldeps != NULL)
592 for (i = 1; i < nlist; ++i)
593 map->l_searchlist.r_list[i]->l_reserved = 1;
595 struct link_map **list = &map->l_reldeps->list[0];
596 for (i = 0; i < map->l_reldeps->act; ++i)
597 if (list[i]->l_reserved)
599 /* Need to allocate new array of relocation dependencies. */
600 struct link_map_reldeps *l_reldeps;
601 l_reldeps = malloc (sizeof (*l_reldeps)
602 + map->l_reldepsmax
603 * sizeof (struct link_map *));
604 if (l_reldeps == NULL)
605 /* Bad luck, keep the reldeps duplicated between
606 map->l_reldeps->list and map->l_initfini lists. */
608 else
610 unsigned int j = i;
611 memcpy (&l_reldeps->list[0], &list[0],
612 i * sizeof (struct link_map *));
613 for (i = i + 1; i < map->l_reldeps->act; ++i)
614 if (!list[i]->l_reserved)
615 l_reldeps->list[j++] = list[i];
616 l_reldeps->act = j;
620 for (i = 1; i < nlist; ++i)
621 map->l_searchlist.r_list[i]->l_reserved = 0;
624 /* Sort the initializer list to take dependencies into account. The binary
625 itself will always be initialize last. */
626 memcpy (l_initfini, map->l_searchlist.r_list,
627 nlist * sizeof (struct link_map *));
628 if (__builtin_expect (nlist > 1, 1))
630 /* We can skip looking for the binary itself which is at the front
631 of the search list. */
632 i = 1;
633 uint16_t seen[nlist];
634 memset (seen, 0, nlist * sizeof (seen[0]));
635 while (1)
637 /* Keep track of which object we looked at this round. */
638 ++seen[i];
639 struct link_map *thisp = l_initfini[i];
641 /* Find the last object in the list for which the current one is
642 a dependency and move the current object behind the object
643 with the dependency. */
644 unsigned int k = nlist - 1;
645 while (k > i)
647 struct link_map **runp = l_initfini[k]->l_initfini;
648 if (runp != NULL)
649 /* Look through the dependencies of the object. */
650 while (*runp != NULL)
651 if (__builtin_expect (*runp++ == thisp, 0))
653 /* Move the current object to the back past the last
654 object with it as the dependency. */
655 memmove (&l_initfini[i], &l_initfini[i + 1],
656 (k - i) * sizeof (l_initfini[0]));
657 l_initfini[k] = thisp;
659 if (seen[i + 1] > nlist - i)
661 ++i;
662 goto next_clear;
665 uint16_t this_seen = seen[i];
666 memmove (&seen[i], &seen[i + 1],
667 (k - i) * sizeof (seen[0]));
668 seen[k] = this_seen;
670 goto next;
673 --k;
676 if (++i == nlist)
677 break;
678 next_clear:
679 memset (&seen[i], 0, (nlist - i) * sizeof (seen[0]));
681 next:;
685 /* Terminate the list of dependencies. */
686 l_initfini[nlist] = NULL;
687 atomic_write_barrier ();
688 map->l_initfini = l_initfini;
689 map->l_free_initfini = 1;
690 if (l_reldeps != NULL)
692 atomic_write_barrier ();
693 void *old_l_reldeps = map->l_reldeps;
694 map->l_reldeps = l_reldeps;
695 _dl_scope_free (old_l_reldeps);
697 if (old_l_initfini != NULL)
698 _dl_scope_free (old_l_initfini);
700 if (errno_reason)
701 _dl_signal_error (errno_reason == -1 ? 0 : errno_reason, objname,
702 NULL, errstring);