Fix powerpc abilist sort order
[glibc.git] / elf / dl-open.c
blob9c39a34a4b0baafd93f37b1364c10724b0fa6d9b
1 /* Load a shared object at runtime, relocate it, and run its initializer.
2 Copyright (C) 1996-2012 Free Software Foundation, Inc.
3 This file is part of the GNU C Library.
5 The GNU C Library is free software; you can redistribute it and/or
6 modify it under the terms of the GNU Lesser General Public
7 License as published by the Free Software Foundation; either
8 version 2.1 of the License, or (at your option) any later version.
10 The GNU C Library is distributed in the hope that it will be useful,
11 but WITHOUT ANY WARRANTY; without even the implied warranty of
12 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 Lesser General Public License for more details.
15 You should have received a copy of the GNU Lesser General Public
16 License along with the GNU C Library; if not, see
17 <http://www.gnu.org/licenses/>. */
19 #include <assert.h>
20 #include <dlfcn.h>
21 #include <errno.h>
22 #include <libintl.h>
23 #include <stdio.h>
24 #include <stdlib.h>
25 #include <string.h>
26 #include <unistd.h>
27 #include <sys/mman.h> /* Check whether MAP_COPY is defined. */
28 #include <sys/param.h>
29 #include <bits/libc-lock.h>
30 #include <ldsodefs.h>
31 #include <bp-sym.h>
32 #include <caller.h>
33 #include <sysdep-cancel.h>
34 #include <tls.h>
35 #include <stap-probe.h>
36 #include <atomic.h>
38 #include <dl-dst.h>
41 extern ElfW(Addr) _dl_sysdep_start (void **start_argptr,
42 void (*dl_main) (const ElfW(Phdr) *phdr,
43 ElfW(Word) phnum,
44 ElfW(Addr) *user_entry,
45 ElfW(auxv_t) *auxv));
46 weak_extern (BP_SYM (_dl_sysdep_start))
48 extern int __libc_multiple_libcs; /* Defined in init-first.c. */
50 /* We must be careful not to leave us in an inconsistent state. Thus we
51 catch any error and re-raise it after cleaning up. */
53 struct dl_open_args
55 const char *file;
56 int mode;
57 /* This is the caller of the dlopen() function. */
58 const void *caller_dlopen;
59 /* This is the caller of _dl_open(). */
60 const void *caller_dl_open;
61 struct link_map *map;
62 /* Namespace ID. */
63 Lmid_t nsid;
64 /* Original parameters to the program and the current environment. */
65 int argc;
66 char **argv;
67 char **env;
71 static int
72 add_to_global (struct link_map *new)
74 struct link_map **new_global;
75 unsigned int to_add = 0;
76 unsigned int cnt;
78 /* Count the objects we have to put in the global scope. */
79 for (cnt = 0; cnt < new->l_searchlist.r_nlist; ++cnt)
80 if (new->l_searchlist.r_list[cnt]->l_global == 0)
81 ++to_add;
83 /* The symbols of the new objects and its dependencies are to be
84 introduced into the global scope that will be used to resolve
85 references from other dynamically-loaded objects.
87 The global scope is the searchlist in the main link map. We
88 extend this list if necessary. There is one problem though:
89 since this structure was allocated very early (before the libc
90 is loaded) the memory it uses is allocated by the malloc()-stub
91 in the ld.so. When we come here these functions are not used
92 anymore. Instead the malloc() implementation of the libc is
93 used. But this means the block from the main map cannot be used
94 in an realloc() call. Therefore we allocate a completely new
95 array the first time we have to add something to the locale scope. */
97 struct link_namespaces *ns = &GL(dl_ns)[new->l_ns];
98 if (ns->_ns_global_scope_alloc == 0)
100 /* This is the first dynamic object given global scope. */
101 ns->_ns_global_scope_alloc
102 = ns->_ns_main_searchlist->r_nlist + to_add + 8;
103 new_global = (struct link_map **)
104 malloc (ns->_ns_global_scope_alloc * sizeof (struct link_map *));
105 if (new_global == NULL)
107 ns->_ns_global_scope_alloc = 0;
108 nomem:
109 _dl_signal_error (ENOMEM, new->l_libname->name, NULL,
110 N_("cannot extend global scope"));
111 return 1;
114 /* Copy over the old entries. */
115 ns->_ns_main_searchlist->r_list
116 = memcpy (new_global, ns->_ns_main_searchlist->r_list,
117 (ns->_ns_main_searchlist->r_nlist
118 * sizeof (struct link_map *)));
120 else if (ns->_ns_main_searchlist->r_nlist + to_add
121 > ns->_ns_global_scope_alloc)
123 /* We have to extend the existing array of link maps in the
124 main map. */
125 struct link_map **old_global
126 = GL(dl_ns)[new->l_ns]._ns_main_searchlist->r_list;
127 size_t new_nalloc = ((ns->_ns_global_scope_alloc + to_add) * 2);
129 new_global = (struct link_map **)
130 malloc (new_nalloc * sizeof (struct link_map *));
131 if (new_global == NULL)
132 goto nomem;
134 memcpy (new_global, old_global,
135 ns->_ns_global_scope_alloc * sizeof (struct link_map *));
137 ns->_ns_global_scope_alloc = new_nalloc;
138 ns->_ns_main_searchlist->r_list = new_global;
140 if (!RTLD_SINGLE_THREAD_P)
141 THREAD_GSCOPE_WAIT ();
143 free (old_global);
146 /* Now add the new entries. */
147 unsigned int new_nlist = ns->_ns_main_searchlist->r_nlist;
148 for (cnt = 0; cnt < new->l_searchlist.r_nlist; ++cnt)
150 struct link_map *map = new->l_searchlist.r_list[cnt];
152 if (map->l_global == 0)
154 map->l_global = 1;
155 ns->_ns_main_searchlist->r_list[new_nlist++] = map;
157 /* We modify the global scope. Report this. */
158 if (__builtin_expect (GLRO(dl_debug_mask) & DL_DEBUG_SCOPES, 0))
159 _dl_debug_printf ("\nadd %s [%lu] to global scope\n",
160 map->l_name, map->l_ns);
163 atomic_write_barrier ();
164 ns->_ns_main_searchlist->r_nlist = new_nlist;
166 return 0;
169 static void
170 dl_open_worker (void *a)
172 struct dl_open_args *args = a;
173 const char *file = args->file;
174 int mode = args->mode;
175 struct link_map *call_map = NULL;
177 /* Check whether _dl_open() has been called from a valid DSO. */
178 if (__check_caller (args->caller_dl_open,
179 allow_libc|allow_libdl|allow_ldso) != 0)
180 _dl_signal_error (0, "dlopen", NULL, N_("invalid caller"));
182 /* Determine the caller's map if necessary. This is needed in case
183 we have a DST, when we don't know the namespace ID we have to put
184 the new object in, or when the file name has no path in which
185 case we need to look along the RUNPATH/RPATH of the caller. */
186 const char *dst = strchr (file, '$');
187 if (dst != NULL || args->nsid == __LM_ID_CALLER
188 || strchr (file, '/') == NULL)
190 const void *caller_dlopen = args->caller_dlopen;
192 #ifdef SHARED
193 /* We have to find out from which object the caller is calling.
194 By default we assume this is the main application. */
195 call_map = GL(dl_ns)[LM_ID_BASE]._ns_loaded;
196 #endif
198 struct link_map *l;
199 for (Lmid_t ns = 0; ns < GL(dl_nns); ++ns)
200 for (l = GL(dl_ns)[ns]._ns_loaded; l != NULL; l = l->l_next)
201 if (caller_dlopen >= (const void *) l->l_map_start
202 && caller_dlopen < (const void *) l->l_map_end
203 && (l->l_contiguous
204 || _dl_addr_inside_object (l, (ElfW(Addr)) caller_dlopen)))
206 assert (ns == l->l_ns);
207 call_map = l;
208 goto found_caller;
211 found_caller:
212 if (args->nsid == __LM_ID_CALLER)
214 #ifndef SHARED
215 /* In statically linked apps there might be no loaded object. */
216 if (call_map == NULL)
217 args->nsid = LM_ID_BASE;
218 else
219 #endif
220 args->nsid = call_map->l_ns;
224 assert (_dl_debug_initialize (0, args->nsid)->r_state == RT_CONSISTENT);
226 /* Load the named object. */
227 struct link_map *new;
228 args->map = new = _dl_map_object (call_map, file, lt_loaded, 0,
229 mode | __RTLD_CALLMAP, args->nsid);
231 /* If the pointer returned is NULL this means the RTLD_NOLOAD flag is
232 set and the object is not already loaded. */
233 if (new == NULL)
235 assert (mode & RTLD_NOLOAD);
236 return;
239 if (__builtin_expect (mode & __RTLD_SPROF, 0))
240 /* This happens only if we load a DSO for 'sprof'. */
241 return;
243 /* This object is directly loaded. */
244 ++new->l_direct_opencount;
246 /* It was already open. */
247 if (__builtin_expect (new->l_searchlist.r_list != NULL, 0))
249 /* Let the user know about the opencount. */
250 if (__builtin_expect (GLRO(dl_debug_mask) & DL_DEBUG_FILES, 0))
251 _dl_debug_printf ("opening file=%s [%lu]; direct_opencount=%u\n\n",
252 new->l_name, new->l_ns, new->l_direct_opencount);
254 /* If the user requested the object to be in the global namespace
255 but it is not so far, add it now. */
256 if ((mode & RTLD_GLOBAL) && new->l_global == 0)
257 (void) add_to_global (new);
259 assert (_dl_debug_initialize (0, args->nsid)->r_state == RT_CONSISTENT);
261 return;
264 /* Load that object's dependencies. */
265 _dl_map_object_deps (new, NULL, 0, 0,
266 mode & (__RTLD_DLOPEN | RTLD_DEEPBIND | __RTLD_AUDIT));
268 /* So far, so good. Now check the versions. */
269 for (unsigned int i = 0; i < new->l_searchlist.r_nlist; ++i)
270 if (new->l_searchlist.r_list[i]->l_real->l_versions == NULL)
271 (void) _dl_check_map_versions (new->l_searchlist.r_list[i]->l_real,
272 0, 0);
274 #ifdef SHARED
275 /* Auditing checkpoint: we have added all objects. */
276 if (__builtin_expect (GLRO(dl_naudit) > 0, 0))
278 struct link_map *head = GL(dl_ns)[new->l_ns]._ns_loaded;
279 /* Do not call the functions for any auditing object. */
280 if (head->l_auditing == 0)
282 struct audit_ifaces *afct = GLRO(dl_audit);
283 for (unsigned int cnt = 0; cnt < GLRO(dl_naudit); ++cnt)
285 if (afct->activity != NULL)
286 afct->activity (&head->l_audit[cnt].cookie, LA_ACT_CONSISTENT);
288 afct = afct->next;
292 #endif
294 /* Notify the debugger all new objects are now ready to go. */
295 struct r_debug *r = _dl_debug_initialize (0, args->nsid);
296 r->r_state = RT_CONSISTENT;
297 _dl_debug_state ();
298 LIBC_PROBE (map_complete, 3, args->nsid, r, new);
300 /* Print scope information. */
301 if (__builtin_expect (GLRO(dl_debug_mask) & DL_DEBUG_SCOPES, 0))
302 _dl_show_scope (new, 0);
304 /* Only do lazy relocation if `LD_BIND_NOW' is not set. */
305 int reloc_mode = mode & __RTLD_AUDIT;
306 if (GLRO(dl_lazy))
307 reloc_mode |= mode & RTLD_LAZY;
309 /* Sort the objects by dependency for the relocation process. This
310 allows IFUNC relocations to work and it also means copy
311 relocation of dependencies are if necessary overwritten. */
312 size_t nmaps = 0;
313 struct link_map *l = new;
316 if (! l->l_real->l_relocated)
317 ++nmaps;
318 l = l->l_next;
320 while (l != NULL);
321 struct link_map *maps[nmaps];
322 nmaps = 0;
323 l = new;
326 if (! l->l_real->l_relocated)
327 maps[nmaps++] = l;
328 l = l->l_next;
330 while (l != NULL);
331 if (nmaps > 1)
333 uint16_t seen[nmaps];
334 memset (seen, '\0', nmaps);
335 size_t i = 0;
336 while (1)
338 ++seen[i];
339 struct link_map *thisp = maps[i];
341 /* Find the last object in the list for which the current one is
342 a dependency and move the current object behind the object
343 with the dependency. */
344 size_t k = nmaps - 1;
345 while (k > i)
347 struct link_map **runp = maps[k]->l_initfini;
348 if (runp != NULL)
349 /* Look through the dependencies of the object. */
350 while (*runp != NULL)
351 if (__builtin_expect (*runp++ == thisp, 0))
353 /* Move the current object to the back past the last
354 object with it as the dependency. */
355 memmove (&maps[i], &maps[i + 1],
356 (k - i) * sizeof (maps[0]));
357 maps[k] = thisp;
359 if (seen[i + 1] > nmaps - i)
361 ++i;
362 goto next_clear;
365 uint16_t this_seen = seen[i];
366 memmove (&seen[i], &seen[i + 1],
367 (k - i) * sizeof (seen[0]));
368 seen[k] = this_seen;
370 goto next;
373 --k;
376 if (++i == nmaps)
377 break;
378 next_clear:
379 memset (&seen[i], 0, (nmaps - i) * sizeof (seen[0]));
380 next:;
384 int relocation_in_progress = 0;
386 for (size_t i = nmaps; i-- > 0; )
388 l = maps[i];
390 if (! relocation_in_progress)
392 /* Notify the debugger that relocations are about to happen. */
393 LIBC_PROBE (reloc_start, 2, args->nsid, r);
394 relocation_in_progress = 1;
397 #ifdef SHARED
398 if (__builtin_expect (GLRO(dl_profile) != NULL, 0))
400 /* If this here is the shared object which we want to profile
401 make sure the profile is started. We can find out whether
402 this is necessary or not by observing the `_dl_profile_map'
403 variable. If it was NULL but is not NULL afterwars we must
404 start the profiling. */
405 struct link_map *old_profile_map = GL(dl_profile_map);
407 _dl_relocate_object (l, l->l_scope, reloc_mode | RTLD_LAZY, 1);
409 if (old_profile_map == NULL && GL(dl_profile_map) != NULL)
411 /* We must prepare the profiling. */
412 _dl_start_profile ();
414 /* Prevent unloading the object. */
415 GL(dl_profile_map)->l_flags_1 |= DF_1_NODELETE;
418 else
419 #endif
420 _dl_relocate_object (l, l->l_scope, reloc_mode, 0);
423 /* If the file is not loaded now as a dependency, add the search
424 list of the newly loaded object to the scope. */
425 bool any_tls = false;
426 unsigned int first_static_tls = new->l_searchlist.r_nlist;
427 for (unsigned int i = 0; i < new->l_searchlist.r_nlist; ++i)
429 struct link_map *imap = new->l_searchlist.r_list[i];
430 int from_scope = 0;
432 /* If the initializer has been called already, the object has
433 not been loaded here and now. */
434 if (imap->l_init_called && imap->l_type == lt_loaded)
436 struct r_scope_elem **runp = imap->l_scope;
437 size_t cnt = 0;
439 while (*runp != NULL)
441 if (*runp == &new->l_searchlist)
442 break;
443 ++cnt;
444 ++runp;
447 if (*runp != NULL)
448 /* Avoid duplicates. */
449 continue;
451 if (__builtin_expect (cnt + 1 >= imap->l_scope_max, 0))
453 /* The 'r_scope' array is too small. Allocate a new one
454 dynamically. */
455 size_t new_size;
456 struct r_scope_elem **newp;
458 #define SCOPE_ELEMS(imap) \
459 (sizeof (imap->l_scope_mem) / sizeof (imap->l_scope_mem[0]))
461 if (imap->l_scope != imap->l_scope_mem
462 && imap->l_scope_max < SCOPE_ELEMS (imap))
464 new_size = SCOPE_ELEMS (imap);
465 newp = imap->l_scope_mem;
467 else
469 new_size = imap->l_scope_max * 2;
470 newp = (struct r_scope_elem **)
471 malloc (new_size * sizeof (struct r_scope_elem *));
472 if (newp == NULL)
473 _dl_signal_error (ENOMEM, "dlopen", NULL,
474 N_("cannot create scope list"));
477 memcpy (newp, imap->l_scope, cnt * sizeof (imap->l_scope[0]));
478 struct r_scope_elem **old = imap->l_scope;
480 imap->l_scope = newp;
482 if (old != imap->l_scope_mem)
483 _dl_scope_free (old);
485 imap->l_scope_max = new_size;
488 /* First terminate the extended list. Otherwise a thread
489 might use the new last element and then use the garbage
490 at offset IDX+1. */
491 imap->l_scope[cnt + 1] = NULL;
492 atomic_write_barrier ();
493 imap->l_scope[cnt] = &new->l_searchlist;
495 /* Print only new scope information. */
496 from_scope = cnt;
498 /* Only add TLS memory if this object is loaded now and
499 therefore is not yet initialized. */
500 else if (! imap->l_init_called
501 /* Only if the module defines thread local data. */
502 && __builtin_expect (imap->l_tls_blocksize > 0, 0))
504 /* Now that we know the object is loaded successfully add
505 modules containing TLS data to the slot info table. We
506 might have to increase its size. */
507 _dl_add_to_slotinfo (imap);
509 if (imap->l_need_tls_init
510 && first_static_tls == new->l_searchlist.r_nlist)
511 first_static_tls = i;
513 /* We have to bump the generation counter. */
514 any_tls = true;
517 /* Print scope information. */
518 if (__builtin_expect (GLRO(dl_debug_mask) & DL_DEBUG_SCOPES, 0))
519 _dl_show_scope (imap, from_scope);
522 /* Bump the generation number if necessary. */
523 if (any_tls && __builtin_expect (++GL(dl_tls_generation) == 0, 0))
524 _dl_fatal_printf (N_("\
525 TLS generation counter wrapped! Please report this."));
527 /* We need a second pass for static tls data, because _dl_update_slotinfo
528 must not be run while calls to _dl_add_to_slotinfo are still pending. */
529 for (unsigned int i = first_static_tls; i < new->l_searchlist.r_nlist; ++i)
531 struct link_map *imap = new->l_searchlist.r_list[i];
533 if (imap->l_need_tls_init
534 && ! imap->l_init_called
535 && imap->l_tls_blocksize > 0)
537 /* For static TLS we have to allocate the memory here and
538 now. This includes allocating memory in the DTV. But we
539 cannot change any DTV other than our own. So, if we
540 cannot guarantee that there is room in the DTV we don't
541 even try it and fail the load.
543 XXX We could track the minimum DTV slots allocated in
544 all threads. */
545 if (! RTLD_SINGLE_THREAD_P && imap->l_tls_modid > DTV_SURPLUS)
546 _dl_signal_error (0, "dlopen", NULL, N_("\
547 cannot load any more object with static TLS"));
549 imap->l_need_tls_init = 0;
550 #ifdef SHARED
551 /* Update the slot information data for at least the
552 generation of the DSO we are allocating data for. */
553 _dl_update_slotinfo (imap->l_tls_modid);
554 #endif
556 GL(dl_init_static_tls) (imap);
557 assert (imap->l_need_tls_init == 0);
561 /* Notify the debugger all new objects have been relocated. */
562 if (relocation_in_progress)
563 LIBC_PROBE (reloc_complete, 3, args->nsid, r, new);
565 /* Run the initializer functions of new objects. */
566 _dl_init (new, args->argc, args->argv, args->env);
568 /* Now we can make the new map available in the global scope. */
569 if (mode & RTLD_GLOBAL)
570 /* Move the object in the global namespace. */
571 if (add_to_global (new) != 0)
572 /* It failed. */
573 return;
575 /* Mark the object as not deletable if the RTLD_NODELETE flags was
576 passed. */
577 if (__builtin_expect (mode & RTLD_NODELETE, 0))
578 new->l_flags_1 |= DF_1_NODELETE;
580 #ifndef SHARED
581 /* We must be the static _dl_open in libc.a. A static program that
582 has loaded a dynamic object now has competition. */
583 __libc_multiple_libcs = 1;
584 #endif
586 /* Let the user know about the opencount. */
587 if (__builtin_expect (GLRO(dl_debug_mask) & DL_DEBUG_FILES, 0))
588 _dl_debug_printf ("opening file=%s [%lu]; direct_opencount=%u\n\n",
589 new->l_name, new->l_ns, new->l_direct_opencount);
593 void *
594 _dl_open (const char *file, int mode, const void *caller_dlopen, Lmid_t nsid,
595 int argc, char *argv[], char *env[])
597 if ((mode & RTLD_BINDING_MASK) == 0)
598 /* One of the flags must be set. */
599 _dl_signal_error (EINVAL, file, NULL, N_("invalid mode for dlopen()"));
601 /* Make sure we are alone. */
602 __rtld_lock_lock_recursive (GL(dl_load_lock));
604 if (__builtin_expect (nsid == LM_ID_NEWLM, 0))
606 /* Find a new namespace. */
607 for (nsid = 1; DL_NNS > 1 && nsid < GL(dl_nns); ++nsid)
608 if (GL(dl_ns)[nsid]._ns_loaded == NULL)
609 break;
611 if (__builtin_expect (nsid == DL_NNS, 0))
613 /* No more namespace available. */
614 __rtld_lock_unlock_recursive (GL(dl_load_lock));
616 _dl_signal_error (EINVAL, file, NULL, N_("\
617 no more namespaces available for dlmopen()"));
619 else if (nsid == GL(dl_nns))
621 __rtld_lock_initialize (GL(dl_ns)[nsid]._ns_unique_sym_table.lock);
622 ++GL(dl_nns);
625 _dl_debug_initialize (0, nsid)->r_state = RT_CONSISTENT;
627 /* Never allow loading a DSO in a namespace which is empty. Such
628 direct placements is only causing problems. Also don't allow
629 loading into a namespace used for auditing. */
630 else if (__builtin_expect (nsid != LM_ID_BASE && nsid != __LM_ID_CALLER, 0)
631 && (GL(dl_ns)[nsid]._ns_nloaded == 0
632 || GL(dl_ns)[nsid]._ns_loaded->l_auditing))
633 _dl_signal_error (EINVAL, file, NULL,
634 N_("invalid target namespace in dlmopen()"));
635 #ifndef SHARED
636 else if ((nsid == LM_ID_BASE || nsid == __LM_ID_CALLER)
637 && GL(dl_ns)[LM_ID_BASE]._ns_loaded == NULL
638 && GL(dl_nns) == 0)
639 GL(dl_nns) = 1;
640 #endif
642 struct dl_open_args args;
643 args.file = file;
644 args.mode = mode;
645 args.caller_dlopen = caller_dlopen;
646 args.caller_dl_open = RETURN_ADDRESS (0);
647 args.map = NULL;
648 args.nsid = nsid;
649 args.argc = argc;
650 args.argv = argv;
651 args.env = env;
653 const char *objname;
654 const char *errstring;
655 bool malloced;
656 int errcode = _dl_catch_error (&objname, &errstring, &malloced,
657 dl_open_worker, &args);
659 #if defined USE_LDCONFIG && !defined MAP_COPY
660 /* We must unmap the cache file. */
661 _dl_unload_cache ();
662 #endif
664 /* See if an error occurred during loading. */
665 if (__builtin_expect (errstring != NULL, 0))
667 /* Remove the object from memory. It may be in an inconsistent
668 state if relocation failed, for example. */
669 if (args.map)
671 /* Maybe some of the modules which were loaded use TLS.
672 Since it will be removed in the following _dl_close call
673 we have to mark the dtv array as having gaps to fill the
674 holes. This is a pessimistic assumption which won't hurt
675 if not true. There is no need to do this when we are
676 loading the auditing DSOs since TLS has not yet been set
677 up. */
678 if ((mode & __RTLD_AUDIT) == 0)
679 GL(dl_tls_dtv_gaps) = true;
681 _dl_close_worker (args.map);
684 assert (_dl_debug_initialize (0, args.nsid)->r_state == RT_CONSISTENT);
686 /* Release the lock. */
687 __rtld_lock_unlock_recursive (GL(dl_load_lock));
689 /* Make a local copy of the error string so that we can release the
690 memory allocated for it. */
691 size_t len_errstring = strlen (errstring) + 1;
692 char *local_errstring;
693 if (objname == errstring + len_errstring)
695 size_t total_len = len_errstring + strlen (objname) + 1;
696 local_errstring = alloca (total_len);
697 memcpy (local_errstring, errstring, total_len);
698 objname = local_errstring + len_errstring;
700 else
702 local_errstring = alloca (len_errstring);
703 memcpy (local_errstring, errstring, len_errstring);
706 if (malloced)
707 free ((char *) errstring);
709 /* Reraise the error. */
710 _dl_signal_error (errcode, objname, NULL, local_errstring);
713 assert (_dl_debug_initialize (0, args.nsid)->r_state == RT_CONSISTENT);
715 /* Release the lock. */
716 __rtld_lock_unlock_recursive (GL(dl_load_lock));
718 #ifndef SHARED
719 DL_STATIC_INIT (args.map);
720 #endif
722 return args.map;
726 void
727 _dl_show_scope (struct link_map *l, int from)
729 _dl_debug_printf ("object=%s [%lu]\n",
730 *l->l_name ? l->l_name : rtld_progname, l->l_ns);
731 if (l->l_scope != NULL)
732 for (int scope_cnt = from; l->l_scope[scope_cnt] != NULL; ++scope_cnt)
734 _dl_debug_printf (" scope %u:", scope_cnt);
736 for (unsigned int cnt = 0; cnt < l->l_scope[scope_cnt]->r_nlist; ++cnt)
737 if (*l->l_scope[scope_cnt]->r_list[cnt]->l_name)
738 _dl_debug_printf_c (" %s",
739 l->l_scope[scope_cnt]->r_list[cnt]->l_name);
740 else
741 _dl_debug_printf_c (" %s", rtld_progname);
743 _dl_debug_printf_c ("\n");
745 else
746 _dl_debug_printf (" no scope\n");
747 _dl_debug_printf ("\n");
750 #ifdef IS_IN_rtld
751 /* Return non-zero if ADDR lies within one of L's segments. */
753 internal_function
754 _dl_addr_inside_object (struct link_map *l, const ElfW(Addr) addr)
756 int n = l->l_phnum;
757 const ElfW(Addr) reladdr = addr - l->l_addr;
759 while (--n >= 0)
760 if (l->l_phdr[n].p_type == PT_LOAD
761 && reladdr - l->l_phdr[n].p_vaddr >= 0
762 && reladdr - l->l_phdr[n].p_vaddr < l->l_phdr[n].p_memsz)
763 return 1;
764 return 0;
766 #endif