hppa: Remove unused lll_unlock_wake_cb.
[glibc.git] / elf / dl-open.c
blob67f7e739bdb261dd7e1b589f3347ed35d1fb0e77
1 /* Load a shared object at runtime, relocate it, and run its initializer.
2 Copyright (C) 1996-2013 Free Software Foundation, Inc.
3 This file is part of the GNU C Library.
5 The GNU C Library is free software; you can redistribute it and/or
6 modify it under the terms of the GNU Lesser General Public
7 License as published by the Free Software Foundation; either
8 version 2.1 of the License, or (at your option) any later version.
10 The GNU C Library is distributed in the hope that it will be useful,
11 but WITHOUT ANY WARRANTY; without even the implied warranty of
12 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 Lesser General Public License for more details.
15 You should have received a copy of the GNU Lesser General Public
16 License along with the GNU C Library; if not, see
17 <http://www.gnu.org/licenses/>. */
19 #include <assert.h>
20 #include <dlfcn.h>
21 #include <errno.h>
22 #include <libintl.h>
23 #include <stdio.h>
24 #include <stdlib.h>
25 #include <string.h>
26 #include <unistd.h>
27 #include <sys/mman.h> /* Check whether MAP_COPY is defined. */
28 #include <sys/param.h>
29 #include <bits/libc-lock.h>
30 #include <ldsodefs.h>
31 #include <caller.h>
32 #include <sysdep-cancel.h>
33 #include <tls.h>
34 #include <stap-probe.h>
35 #include <atomic.h>
37 #include <dl-dst.h>
40 extern ElfW(Addr) _dl_sysdep_start (void **start_argptr,
41 void (*dl_main) (const ElfW(Phdr) *phdr,
42 ElfW(Word) phnum,
43 ElfW(Addr) *user_entry,
44 ElfW(auxv_t) *auxv));
45 weak_extern (_dl_sysdep_start)
47 extern int __libc_multiple_libcs; /* Defined in init-first.c. */
49 /* We must be careful not to leave us in an inconsistent state. Thus we
50 catch any error and re-raise it after cleaning up. */
52 struct dl_open_args
54 const char *file;
55 int mode;
56 /* This is the caller of the dlopen() function. */
57 const void *caller_dlopen;
58 /* This is the caller of _dl_open(). */
59 const void *caller_dl_open;
60 struct link_map *map;
61 /* Namespace ID. */
62 Lmid_t nsid;
63 /* Original parameters to the program and the current environment. */
64 int argc;
65 char **argv;
66 char **env;
70 static int
71 add_to_global (struct link_map *new)
73 struct link_map **new_global;
74 unsigned int to_add = 0;
75 unsigned int cnt;
77 /* Count the objects we have to put in the global scope. */
78 for (cnt = 0; cnt < new->l_searchlist.r_nlist; ++cnt)
79 if (new->l_searchlist.r_list[cnt]->l_global == 0)
80 ++to_add;
82 /* The symbols of the new objects and its dependencies are to be
83 introduced into the global scope that will be used to resolve
84 references from other dynamically-loaded objects.
86 The global scope is the searchlist in the main link map. We
87 extend this list if necessary. There is one problem though:
88 since this structure was allocated very early (before the libc
89 is loaded) the memory it uses is allocated by the malloc()-stub
90 in the ld.so. When we come here these functions are not used
91 anymore. Instead the malloc() implementation of the libc is
92 used. But this means the block from the main map cannot be used
93 in an realloc() call. Therefore we allocate a completely new
94 array the first time we have to add something to the locale scope. */
96 struct link_namespaces *ns = &GL(dl_ns)[new->l_ns];
97 if (ns->_ns_global_scope_alloc == 0)
99 /* This is the first dynamic object given global scope. */
100 ns->_ns_global_scope_alloc
101 = ns->_ns_main_searchlist->r_nlist + to_add + 8;
102 new_global = (struct link_map **)
103 malloc (ns->_ns_global_scope_alloc * sizeof (struct link_map *));
104 if (new_global == NULL)
106 ns->_ns_global_scope_alloc = 0;
107 nomem:
108 _dl_signal_error (ENOMEM, new->l_libname->name, NULL,
109 N_("cannot extend global scope"));
110 return 1;
113 /* Copy over the old entries. */
114 ns->_ns_main_searchlist->r_list
115 = memcpy (new_global, ns->_ns_main_searchlist->r_list,
116 (ns->_ns_main_searchlist->r_nlist
117 * sizeof (struct link_map *)));
119 else if (ns->_ns_main_searchlist->r_nlist + to_add
120 > ns->_ns_global_scope_alloc)
122 /* We have to extend the existing array of link maps in the
123 main map. */
124 struct link_map **old_global
125 = GL(dl_ns)[new->l_ns]._ns_main_searchlist->r_list;
126 size_t new_nalloc = ((ns->_ns_global_scope_alloc + to_add) * 2);
128 new_global = (struct link_map **)
129 malloc (new_nalloc * sizeof (struct link_map *));
130 if (new_global == NULL)
131 goto nomem;
133 memcpy (new_global, old_global,
134 ns->_ns_global_scope_alloc * sizeof (struct link_map *));
136 ns->_ns_global_scope_alloc = new_nalloc;
137 ns->_ns_main_searchlist->r_list = new_global;
139 if (!RTLD_SINGLE_THREAD_P)
140 THREAD_GSCOPE_WAIT ();
142 free (old_global);
145 /* Now add the new entries. */
146 unsigned int new_nlist = ns->_ns_main_searchlist->r_nlist;
147 for (cnt = 0; cnt < new->l_searchlist.r_nlist; ++cnt)
149 struct link_map *map = new->l_searchlist.r_list[cnt];
151 if (map->l_global == 0)
153 map->l_global = 1;
154 ns->_ns_main_searchlist->r_list[new_nlist++] = map;
156 /* We modify the global scope. Report this. */
157 if (__builtin_expect (GLRO(dl_debug_mask) & DL_DEBUG_SCOPES, 0))
158 _dl_debug_printf ("\nadd %s [%lu] to global scope\n",
159 map->l_name, map->l_ns);
162 atomic_write_barrier ();
163 ns->_ns_main_searchlist->r_nlist = new_nlist;
165 return 0;
168 static void
169 dl_open_worker (void *a)
171 struct dl_open_args *args = a;
172 const char *file = args->file;
173 int mode = args->mode;
174 struct link_map *call_map = NULL;
176 /* Check whether _dl_open() has been called from a valid DSO. */
177 if (__check_caller (args->caller_dl_open,
178 allow_libc|allow_libdl|allow_ldso) != 0)
179 _dl_signal_error (0, "dlopen", NULL, N_("invalid caller"));
181 /* Determine the caller's map if necessary. This is needed in case
182 we have a DST, when we don't know the namespace ID we have to put
183 the new object in, or when the file name has no path in which
184 case we need to look along the RUNPATH/RPATH of the caller. */
185 const char *dst = strchr (file, '$');
186 if (dst != NULL || args->nsid == __LM_ID_CALLER
187 || strchr (file, '/') == NULL)
189 const void *caller_dlopen = args->caller_dlopen;
191 #ifdef SHARED
192 /* We have to find out from which object the caller is calling.
193 By default we assume this is the main application. */
194 call_map = GL(dl_ns)[LM_ID_BASE]._ns_loaded;
195 #endif
197 struct link_map *l;
198 for (Lmid_t ns = 0; ns < GL(dl_nns); ++ns)
199 for (l = GL(dl_ns)[ns]._ns_loaded; l != NULL; l = l->l_next)
200 if (caller_dlopen >= (const void *) l->l_map_start
201 && caller_dlopen < (const void *) l->l_map_end
202 && (l->l_contiguous
203 || _dl_addr_inside_object (l, (ElfW(Addr)) caller_dlopen)))
205 assert (ns == l->l_ns);
206 call_map = l;
207 goto found_caller;
210 found_caller:
211 if (args->nsid == __LM_ID_CALLER)
213 #ifndef SHARED
214 /* In statically linked apps there might be no loaded object. */
215 if (call_map == NULL)
216 args->nsid = LM_ID_BASE;
217 else
218 #endif
219 args->nsid = call_map->l_ns;
223 assert (_dl_debug_initialize (0, args->nsid)->r_state == RT_CONSISTENT);
225 /* Load the named object. */
226 struct link_map *new;
227 args->map = new = _dl_map_object (call_map, file, lt_loaded, 0,
228 mode | __RTLD_CALLMAP, args->nsid);
230 /* If the pointer returned is NULL this means the RTLD_NOLOAD flag is
231 set and the object is not already loaded. */
232 if (new == NULL)
234 assert (mode & RTLD_NOLOAD);
235 return;
238 if (__builtin_expect (mode & __RTLD_SPROF, 0))
239 /* This happens only if we load a DSO for 'sprof'. */
240 return;
242 /* This object is directly loaded. */
243 ++new->l_direct_opencount;
245 /* It was already open. */
246 if (__builtin_expect (new->l_searchlist.r_list != NULL, 0))
248 /* Let the user know about the opencount. */
249 if (__builtin_expect (GLRO(dl_debug_mask) & DL_DEBUG_FILES, 0))
250 _dl_debug_printf ("opening file=%s [%lu]; direct_opencount=%u\n\n",
251 new->l_name, new->l_ns, new->l_direct_opencount);
253 /* If the user requested the object to be in the global namespace
254 but it is not so far, add it now. */
255 if ((mode & RTLD_GLOBAL) && new->l_global == 0)
256 (void) add_to_global (new);
258 assert (_dl_debug_initialize (0, args->nsid)->r_state == RT_CONSISTENT);
260 return;
263 /* Load that object's dependencies. */
264 _dl_map_object_deps (new, NULL, 0, 0,
265 mode & (__RTLD_DLOPEN | RTLD_DEEPBIND | __RTLD_AUDIT));
267 /* So far, so good. Now check the versions. */
268 for (unsigned int i = 0; i < new->l_searchlist.r_nlist; ++i)
269 if (new->l_searchlist.r_list[i]->l_real->l_versions == NULL)
270 (void) _dl_check_map_versions (new->l_searchlist.r_list[i]->l_real,
271 0, 0);
273 #ifdef SHARED
274 /* Auditing checkpoint: we have added all objects. */
275 if (__builtin_expect (GLRO(dl_naudit) > 0, 0))
277 struct link_map *head = GL(dl_ns)[new->l_ns]._ns_loaded;
278 /* Do not call the functions for any auditing object. */
279 if (head->l_auditing == 0)
281 struct audit_ifaces *afct = GLRO(dl_audit);
282 for (unsigned int cnt = 0; cnt < GLRO(dl_naudit); ++cnt)
284 if (afct->activity != NULL)
285 afct->activity (&head->l_audit[cnt].cookie, LA_ACT_CONSISTENT);
287 afct = afct->next;
291 #endif
293 /* Notify the debugger all new objects are now ready to go. */
294 struct r_debug *r = _dl_debug_initialize (0, args->nsid);
295 r->r_state = RT_CONSISTENT;
296 _dl_debug_state ();
297 LIBC_PROBE (map_complete, 3, args->nsid, r, new);
299 /* Print scope information. */
300 if (__builtin_expect (GLRO(dl_debug_mask) & DL_DEBUG_SCOPES, 0))
301 _dl_show_scope (new, 0);
303 /* Only do lazy relocation if `LD_BIND_NOW' is not set. */
304 int reloc_mode = mode & __RTLD_AUDIT;
305 if (GLRO(dl_lazy))
306 reloc_mode |= mode & RTLD_LAZY;
308 /* Sort the objects by dependency for the relocation process. This
309 allows IFUNC relocations to work and it also means copy
310 relocation of dependencies are if necessary overwritten. */
311 size_t nmaps = 0;
312 struct link_map *l = new;
315 if (! l->l_real->l_relocated)
316 ++nmaps;
317 l = l->l_next;
319 while (l != NULL);
320 struct link_map *maps[nmaps];
321 nmaps = 0;
322 l = new;
325 if (! l->l_real->l_relocated)
326 maps[nmaps++] = l;
327 l = l->l_next;
329 while (l != NULL);
330 if (nmaps > 1)
332 uint16_t seen[nmaps];
333 memset (seen, '\0', nmaps);
334 size_t i = 0;
335 while (1)
337 ++seen[i];
338 struct link_map *thisp = maps[i];
340 /* Find the last object in the list for which the current one is
341 a dependency and move the current object behind the object
342 with the dependency. */
343 size_t k = nmaps - 1;
344 while (k > i)
346 struct link_map **runp = maps[k]->l_initfini;
347 if (runp != NULL)
348 /* Look through the dependencies of the object. */
349 while (*runp != NULL)
350 if (__builtin_expect (*runp++ == thisp, 0))
352 /* Move the current object to the back past the last
353 object with it as the dependency. */
354 memmove (&maps[i], &maps[i + 1],
355 (k - i) * sizeof (maps[0]));
356 maps[k] = thisp;
358 if (seen[i + 1] > nmaps - i)
360 ++i;
361 goto next_clear;
364 uint16_t this_seen = seen[i];
365 memmove (&seen[i], &seen[i + 1],
366 (k - i) * sizeof (seen[0]));
367 seen[k] = this_seen;
369 goto next;
372 --k;
375 if (++i == nmaps)
376 break;
377 next_clear:
378 memset (&seen[i], 0, (nmaps - i) * sizeof (seen[0]));
379 next:;
383 int relocation_in_progress = 0;
385 for (size_t i = nmaps; i-- > 0; )
387 l = maps[i];
389 if (! relocation_in_progress)
391 /* Notify the debugger that relocations are about to happen. */
392 LIBC_PROBE (reloc_start, 2, args->nsid, r);
393 relocation_in_progress = 1;
396 #ifdef SHARED
397 if (__builtin_expect (GLRO(dl_profile) != NULL, 0))
399 /* If this here is the shared object which we want to profile
400 make sure the profile is started. We can find out whether
401 this is necessary or not by observing the `_dl_profile_map'
402 variable. If it was NULL but is not NULL afterwars we must
403 start the profiling. */
404 struct link_map *old_profile_map = GL(dl_profile_map);
406 _dl_relocate_object (l, l->l_scope, reloc_mode | RTLD_LAZY, 1);
408 if (old_profile_map == NULL && GL(dl_profile_map) != NULL)
410 /* We must prepare the profiling. */
411 _dl_start_profile ();
413 /* Prevent unloading the object. */
414 GL(dl_profile_map)->l_flags_1 |= DF_1_NODELETE;
417 else
418 #endif
419 _dl_relocate_object (l, l->l_scope, reloc_mode, 0);
422 /* If the file is not loaded now as a dependency, add the search
423 list of the newly loaded object to the scope. */
424 bool any_tls = false;
425 unsigned int first_static_tls = new->l_searchlist.r_nlist;
426 for (unsigned int i = 0; i < new->l_searchlist.r_nlist; ++i)
428 struct link_map *imap = new->l_searchlist.r_list[i];
429 int from_scope = 0;
431 /* If the initializer has been called already, the object has
432 not been loaded here and now. */
433 if (imap->l_init_called && imap->l_type == lt_loaded)
435 struct r_scope_elem **runp = imap->l_scope;
436 size_t cnt = 0;
438 while (*runp != NULL)
440 if (*runp == &new->l_searchlist)
441 break;
442 ++cnt;
443 ++runp;
446 if (*runp != NULL)
447 /* Avoid duplicates. */
448 continue;
450 if (__builtin_expect (cnt + 1 >= imap->l_scope_max, 0))
452 /* The 'r_scope' array is too small. Allocate a new one
453 dynamically. */
454 size_t new_size;
455 struct r_scope_elem **newp;
457 #define SCOPE_ELEMS(imap) \
458 (sizeof (imap->l_scope_mem) / sizeof (imap->l_scope_mem[0]))
460 if (imap->l_scope != imap->l_scope_mem
461 && imap->l_scope_max < SCOPE_ELEMS (imap))
463 new_size = SCOPE_ELEMS (imap);
464 newp = imap->l_scope_mem;
466 else
468 new_size = imap->l_scope_max * 2;
469 newp = (struct r_scope_elem **)
470 malloc (new_size * sizeof (struct r_scope_elem *));
471 if (newp == NULL)
472 _dl_signal_error (ENOMEM, "dlopen", NULL,
473 N_("cannot create scope list"));
476 memcpy (newp, imap->l_scope, cnt * sizeof (imap->l_scope[0]));
477 struct r_scope_elem **old = imap->l_scope;
479 imap->l_scope = newp;
481 if (old != imap->l_scope_mem)
482 _dl_scope_free (old);
484 imap->l_scope_max = new_size;
487 /* First terminate the extended list. Otherwise a thread
488 might use the new last element and then use the garbage
489 at offset IDX+1. */
490 imap->l_scope[cnt + 1] = NULL;
491 atomic_write_barrier ();
492 imap->l_scope[cnt] = &new->l_searchlist;
494 /* Print only new scope information. */
495 from_scope = cnt;
497 /* Only add TLS memory if this object is loaded now and
498 therefore is not yet initialized. */
499 else if (! imap->l_init_called
500 /* Only if the module defines thread local data. */
501 && __builtin_expect (imap->l_tls_blocksize > 0, 0))
503 /* Now that we know the object is loaded successfully add
504 modules containing TLS data to the slot info table. We
505 might have to increase its size. */
506 _dl_add_to_slotinfo (imap);
508 if (imap->l_need_tls_init
509 && first_static_tls == new->l_searchlist.r_nlist)
510 first_static_tls = i;
512 /* We have to bump the generation counter. */
513 any_tls = true;
516 /* Print scope information. */
517 if (__builtin_expect (GLRO(dl_debug_mask) & DL_DEBUG_SCOPES, 0))
518 _dl_show_scope (imap, from_scope);
521 /* Bump the generation number if necessary. */
522 if (any_tls && __builtin_expect (++GL(dl_tls_generation) == 0, 0))
523 _dl_fatal_printf (N_("\
524 TLS generation counter wrapped! Please report this."));
526 /* We need a second pass for static tls data, because _dl_update_slotinfo
527 must not be run while calls to _dl_add_to_slotinfo are still pending. */
528 for (unsigned int i = first_static_tls; i < new->l_searchlist.r_nlist; ++i)
530 struct link_map *imap = new->l_searchlist.r_list[i];
532 if (imap->l_need_tls_init
533 && ! imap->l_init_called
534 && imap->l_tls_blocksize > 0)
536 /* For static TLS we have to allocate the memory here and
537 now. This includes allocating memory in the DTV. But we
538 cannot change any DTV other than our own. So, if we
539 cannot guarantee that there is room in the DTV we don't
540 even try it and fail the load.
542 XXX We could track the minimum DTV slots allocated in
543 all threads. */
544 if (! RTLD_SINGLE_THREAD_P && imap->l_tls_modid > DTV_SURPLUS)
545 _dl_signal_error (0, "dlopen", NULL, N_("\
546 cannot load any more object with static TLS"));
548 imap->l_need_tls_init = 0;
549 #ifdef SHARED
550 /* Update the slot information data for at least the
551 generation of the DSO we are allocating data for. */
552 _dl_update_slotinfo (imap->l_tls_modid);
553 #endif
555 GL(dl_init_static_tls) (imap);
556 assert (imap->l_need_tls_init == 0);
560 /* Notify the debugger all new objects have been relocated. */
561 if (relocation_in_progress)
562 LIBC_PROBE (reloc_complete, 3, args->nsid, r, new);
564 /* Run the initializer functions of new objects. */
565 _dl_init (new, args->argc, args->argv, args->env);
567 /* Now we can make the new map available in the global scope. */
568 if (mode & RTLD_GLOBAL)
569 /* Move the object in the global namespace. */
570 if (add_to_global (new) != 0)
571 /* It failed. */
572 return;
574 /* Mark the object as not deletable if the RTLD_NODELETE flags was
575 passed. */
576 if (__builtin_expect (mode & RTLD_NODELETE, 0))
577 new->l_flags_1 |= DF_1_NODELETE;
579 #ifndef SHARED
580 /* We must be the static _dl_open in libc.a. A static program that
581 has loaded a dynamic object now has competition. */
582 __libc_multiple_libcs = 1;
583 #endif
585 /* Let the user know about the opencount. */
586 if (__builtin_expect (GLRO(dl_debug_mask) & DL_DEBUG_FILES, 0))
587 _dl_debug_printf ("opening file=%s [%lu]; direct_opencount=%u\n\n",
588 new->l_name, new->l_ns, new->l_direct_opencount);
592 void *
593 _dl_open (const char *file, int mode, const void *caller_dlopen, Lmid_t nsid,
594 int argc, char *argv[], char *env[])
596 if ((mode & RTLD_BINDING_MASK) == 0)
597 /* One of the flags must be set. */
598 _dl_signal_error (EINVAL, file, NULL, N_("invalid mode for dlopen()"));
600 /* Make sure we are alone. */
601 __rtld_lock_lock_recursive (GL(dl_load_lock));
603 if (__builtin_expect (nsid == LM_ID_NEWLM, 0))
605 /* Find a new namespace. */
606 for (nsid = 1; DL_NNS > 1 && nsid < GL(dl_nns); ++nsid)
607 if (GL(dl_ns)[nsid]._ns_loaded == NULL)
608 break;
610 if (__builtin_expect (nsid == DL_NNS, 0))
612 /* No more namespace available. */
613 __rtld_lock_unlock_recursive (GL(dl_load_lock));
615 _dl_signal_error (EINVAL, file, NULL, N_("\
616 no more namespaces available for dlmopen()"));
618 else if (nsid == GL(dl_nns))
620 __rtld_lock_initialize (GL(dl_ns)[nsid]._ns_unique_sym_table.lock);
621 ++GL(dl_nns);
624 _dl_debug_initialize (0, nsid)->r_state = RT_CONSISTENT;
626 /* Never allow loading a DSO in a namespace which is empty. Such
627 direct placements is only causing problems. Also don't allow
628 loading into a namespace used for auditing. */
629 else if (__builtin_expect (nsid != LM_ID_BASE && nsid != __LM_ID_CALLER, 0)
630 && (GL(dl_ns)[nsid]._ns_nloaded == 0
631 || GL(dl_ns)[nsid]._ns_loaded->l_auditing))
632 _dl_signal_error (EINVAL, file, NULL,
633 N_("invalid target namespace in dlmopen()"));
634 #ifndef SHARED
635 else if ((nsid == LM_ID_BASE || nsid == __LM_ID_CALLER)
636 && GL(dl_ns)[LM_ID_BASE]._ns_loaded == NULL
637 && GL(dl_nns) == 0)
638 GL(dl_nns) = 1;
639 #endif
641 struct dl_open_args args;
642 args.file = file;
643 args.mode = mode;
644 args.caller_dlopen = caller_dlopen;
645 args.caller_dl_open = RETURN_ADDRESS (0);
646 args.map = NULL;
647 args.nsid = nsid;
648 args.argc = argc;
649 args.argv = argv;
650 args.env = env;
652 const char *objname;
653 const char *errstring;
654 bool malloced;
655 int errcode = _dl_catch_error (&objname, &errstring, &malloced,
656 dl_open_worker, &args);
658 #if defined USE_LDCONFIG && !defined MAP_COPY
659 /* We must unmap the cache file. */
660 _dl_unload_cache ();
661 #endif
663 /* See if an error occurred during loading. */
664 if (__builtin_expect (errstring != NULL, 0))
666 /* Remove the object from memory. It may be in an inconsistent
667 state if relocation failed, for example. */
668 if (args.map)
670 /* Maybe some of the modules which were loaded use TLS.
671 Since it will be removed in the following _dl_close call
672 we have to mark the dtv array as having gaps to fill the
673 holes. This is a pessimistic assumption which won't hurt
674 if not true. There is no need to do this when we are
675 loading the auditing DSOs since TLS has not yet been set
676 up. */
677 if ((mode & __RTLD_AUDIT) == 0)
678 GL(dl_tls_dtv_gaps) = true;
680 _dl_close_worker (args.map);
683 assert (_dl_debug_initialize (0, args.nsid)->r_state == RT_CONSISTENT);
685 /* Release the lock. */
686 __rtld_lock_unlock_recursive (GL(dl_load_lock));
688 /* Make a local copy of the error string so that we can release the
689 memory allocated for it. */
690 size_t len_errstring = strlen (errstring) + 1;
691 char *local_errstring;
692 if (objname == errstring + len_errstring)
694 size_t total_len = len_errstring + strlen (objname) + 1;
695 local_errstring = alloca (total_len);
696 memcpy (local_errstring, errstring, total_len);
697 objname = local_errstring + len_errstring;
699 else
701 local_errstring = alloca (len_errstring);
702 memcpy (local_errstring, errstring, len_errstring);
705 if (malloced)
706 free ((char *) errstring);
708 /* Reraise the error. */
709 _dl_signal_error (errcode, objname, NULL, local_errstring);
712 assert (_dl_debug_initialize (0, args.nsid)->r_state == RT_CONSISTENT);
714 /* Release the lock. */
715 __rtld_lock_unlock_recursive (GL(dl_load_lock));
717 #ifndef SHARED
718 DL_STATIC_INIT (args.map);
719 #endif
721 return args.map;
725 void
726 _dl_show_scope (struct link_map *l, int from)
728 _dl_debug_printf ("object=%s [%lu]\n",
729 *l->l_name ? l->l_name : rtld_progname, l->l_ns);
730 if (l->l_scope != NULL)
731 for (int scope_cnt = from; l->l_scope[scope_cnt] != NULL; ++scope_cnt)
733 _dl_debug_printf (" scope %u:", scope_cnt);
735 for (unsigned int cnt = 0; cnt < l->l_scope[scope_cnt]->r_nlist; ++cnt)
736 if (*l->l_scope[scope_cnt]->r_list[cnt]->l_name)
737 _dl_debug_printf_c (" %s",
738 l->l_scope[scope_cnt]->r_list[cnt]->l_name);
739 else
740 _dl_debug_printf_c (" %s", rtld_progname);
742 _dl_debug_printf_c ("\n");
744 else
745 _dl_debug_printf (" no scope\n");
746 _dl_debug_printf ("\n");
749 #ifdef IS_IN_rtld
750 /* Return non-zero if ADDR lies within one of L's segments. */
752 internal_function
753 _dl_addr_inside_object (struct link_map *l, const ElfW(Addr) addr)
755 int n = l->l_phnum;
756 const ElfW(Addr) reladdr = addr - l->l_addr;
758 while (--n >= 0)
759 if (l->l_phdr[n].p_type == PT_LOAD
760 && reladdr - l->l_phdr[n].p_vaddr >= 0
761 && reladdr - l->l_phdr[n].p_vaddr < l->l_phdr[n].p_memsz)
762 return 1;
763 return 0;
765 #endif