Sort BZ # in NEWS
[glibc.git] / elf / dl-open.c
blob0bc447aae04f3a1704effca4f2a64f318f12a432
1 /* Load a shared object at runtime, relocate it, and run its initializer.
2 Copyright (C) 1996-2012 Free Software Foundation, Inc.
3 This file is part of the GNU C Library.
5 The GNU C Library is free software; you can redistribute it and/or
6 modify it under the terms of the GNU Lesser General Public
7 License as published by the Free Software Foundation; either
8 version 2.1 of the License, or (at your option) any later version.
10 The GNU C Library is distributed in the hope that it will be useful,
11 but WITHOUT ANY WARRANTY; without even the implied warranty of
12 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 Lesser General Public License for more details.
15 You should have received a copy of the GNU Lesser General Public
16 License along with the GNU C Library; if not, see
17 <http://www.gnu.org/licenses/>. */
19 #include <assert.h>
20 #include <dlfcn.h>
21 #include <errno.h>
22 #include <libintl.h>
23 #include <stdio.h>
24 #include <stdlib.h>
25 #include <string.h>
26 #include <unistd.h>
27 #include <sys/mman.h> /* Check whether MAP_COPY is defined. */
28 #include <sys/param.h>
29 #include <bits/libc-lock.h>
30 #include <ldsodefs.h>
31 #include <bp-sym.h>
32 #include <caller.h>
33 #include <sysdep-cancel.h>
34 #include <tls.h>
35 #include <stap-probe.h>
36 #include <atomic.h>
38 #include <dl-dst.h>
41 extern ElfW(Addr) _dl_sysdep_start (void **start_argptr,
42 void (*dl_main) (const ElfW(Phdr) *phdr,
43 ElfW(Word) phnum,
44 ElfW(Addr) *user_entry,
45 ElfW(auxv_t) *auxv));
46 weak_extern (BP_SYM (_dl_sysdep_start))
48 extern int __libc_multiple_libcs; /* Defined in init-first.c. */
50 /* We must be careful not to leave us in an inconsistent state. Thus we
51 catch any error and re-raise it after cleaning up. */
53 struct dl_open_args
55 const char *file;
56 int mode;
57 /* This is the caller of the dlopen() function. */
58 const void *caller_dlopen;
59 /* This is the caller of _dl_open(). */
60 const void *caller_dl_open;
61 struct link_map *map;
62 /* Namespace ID. */
63 Lmid_t nsid;
64 /* Original parameters to the program and the current environment. */
65 int argc;
66 char **argv;
67 char **env;
71 static int
72 add_to_global (struct link_map *new)
74 struct link_map **new_global;
75 unsigned int to_add = 0;
76 unsigned int cnt;
78 /* Count the objects we have to put in the global scope. */
79 for (cnt = 0; cnt < new->l_searchlist.r_nlist; ++cnt)
80 if (new->l_searchlist.r_list[cnt]->l_global == 0)
81 ++to_add;
83 /* The symbols of the new objects and its dependencies are to be
84 introduced into the global scope that will be used to resolve
85 references from other dynamically-loaded objects.
87 The global scope is the searchlist in the main link map. We
88 extend this list if necessary. There is one problem though:
89 since this structure was allocated very early (before the libc
90 is loaded) the memory it uses is allocated by the malloc()-stub
91 in the ld.so. When we come here these functions are not used
92 anymore. Instead the malloc() implementation of the libc is
93 used. But this means the block from the main map cannot be used
94 in an realloc() call. Therefore we allocate a completely new
95 array the first time we have to add something to the locale scope. */
97 struct link_namespaces *ns = &GL(dl_ns)[new->l_ns];
98 if (ns->_ns_global_scope_alloc == 0)
100 /* This is the first dynamic object given global scope. */
101 ns->_ns_global_scope_alloc
102 = ns->_ns_main_searchlist->r_nlist + to_add + 8;
103 new_global = (struct link_map **)
104 malloc (ns->_ns_global_scope_alloc * sizeof (struct link_map *));
105 if (new_global == NULL)
107 ns->_ns_global_scope_alloc = 0;
108 nomem:
109 _dl_signal_error (ENOMEM, new->l_libname->name, NULL,
110 N_("cannot extend global scope"));
111 return 1;
114 /* Copy over the old entries. */
115 ns->_ns_main_searchlist->r_list
116 = memcpy (new_global, ns->_ns_main_searchlist->r_list,
117 (ns->_ns_main_searchlist->r_nlist
118 * sizeof (struct link_map *)));
120 else if (ns->_ns_main_searchlist->r_nlist + to_add
121 > ns->_ns_global_scope_alloc)
123 /* We have to extend the existing array of link maps in the
124 main map. */
125 struct link_map **old_global
126 = GL(dl_ns)[new->l_ns]._ns_main_searchlist->r_list;
127 size_t new_nalloc = ((ns->_ns_global_scope_alloc + to_add) * 2);
129 new_global = (struct link_map **)
130 malloc (new_nalloc * sizeof (struct link_map *));
131 if (new_global == NULL)
132 goto nomem;
134 memcpy (new_global, old_global,
135 ns->_ns_global_scope_alloc * sizeof (struct link_map *));
137 ns->_ns_global_scope_alloc = new_nalloc;
138 ns->_ns_main_searchlist->r_list = new_global;
140 if (!RTLD_SINGLE_THREAD_P)
141 THREAD_GSCOPE_WAIT ();
143 free (old_global);
146 /* Now add the new entries. */
147 unsigned int new_nlist = ns->_ns_main_searchlist->r_nlist;
148 for (cnt = 0; cnt < new->l_searchlist.r_nlist; ++cnt)
150 struct link_map *map = new->l_searchlist.r_list[cnt];
152 if (map->l_global == 0)
154 map->l_global = 1;
155 ns->_ns_main_searchlist->r_list[new_nlist++] = map;
157 /* We modify the global scope. Report this. */
158 if (__builtin_expect (GLRO(dl_debug_mask) & DL_DEBUG_SCOPES, 0))
159 _dl_debug_printf ("\nadd %s [%lu] to global scope\n",
160 map->l_name, map->l_ns);
163 atomic_write_barrier ();
164 ns->_ns_main_searchlist->r_nlist = new_nlist;
166 return 0;
169 static void
170 dl_open_worker (void *a)
172 struct dl_open_args *args = a;
173 const char *file = args->file;
174 int mode = args->mode;
175 struct link_map *call_map = NULL;
177 /* Check whether _dl_open() has been called from a valid DSO. */
178 if (__check_caller (args->caller_dl_open,
179 allow_libc|allow_libdl|allow_ldso) != 0)
180 _dl_signal_error (0, "dlopen", NULL, N_("invalid caller"));
182 /* Determine the caller's map if necessary. This is needed in case
183 we have a DST, when we don't know the namespace ID we have to put
184 the new object in, or when the file name has no path in which
185 case we need to look along the RUNPATH/RPATH of the caller. */
186 const char *dst = strchr (file, '$');
187 if (dst != NULL || args->nsid == __LM_ID_CALLER
188 || strchr (file, '/') == NULL)
190 const void *caller_dlopen = args->caller_dlopen;
192 /* We have to find out from which object the caller is calling.
193 By default we assume this is the main application. */
194 call_map = GL(dl_ns)[LM_ID_BASE]._ns_loaded;
196 struct link_map *l;
197 for (Lmid_t ns = 0; ns < GL(dl_nns); ++ns)
198 for (l = GL(dl_ns)[ns]._ns_loaded; l != NULL; l = l->l_next)
199 if (caller_dlopen >= (const void *) l->l_map_start
200 && caller_dlopen < (const void *) l->l_map_end
201 && (l->l_contiguous
202 || _dl_addr_inside_object (l, (ElfW(Addr)) caller_dlopen)))
204 assert (ns == l->l_ns);
205 call_map = l;
206 goto found_caller;
209 found_caller:
210 if (args->nsid == __LM_ID_CALLER)
212 #ifndef SHARED
213 /* In statically linked apps there might be no loaded object. */
214 if (call_map == NULL)
215 args->nsid = LM_ID_BASE;
216 else
217 #endif
218 args->nsid = call_map->l_ns;
222 assert (_dl_debug_initialize (0, args->nsid)->r_state == RT_CONSISTENT);
224 /* Load the named object. */
225 struct link_map *new;
226 args->map = new = _dl_map_object (call_map, file, lt_loaded, 0,
227 mode | __RTLD_CALLMAP, args->nsid);
229 /* If the pointer returned is NULL this means the RTLD_NOLOAD flag is
230 set and the object is not already loaded. */
231 if (new == NULL)
233 assert (mode & RTLD_NOLOAD);
234 return;
237 if (__builtin_expect (mode & __RTLD_SPROF, 0))
238 /* This happens only if we load a DSO for 'sprof'. */
239 return;
241 /* This object is directly loaded. */
242 ++new->l_direct_opencount;
244 /* It was already open. */
245 if (__builtin_expect (new->l_searchlist.r_list != NULL, 0))
247 /* Let the user know about the opencount. */
248 if (__builtin_expect (GLRO(dl_debug_mask) & DL_DEBUG_FILES, 0))
249 _dl_debug_printf ("opening file=%s [%lu]; direct_opencount=%u\n\n",
250 new->l_name, new->l_ns, new->l_direct_opencount);
252 /* If the user requested the object to be in the global namespace
253 but it is not so far, add it now. */
254 if ((mode & RTLD_GLOBAL) && new->l_global == 0)
255 (void) add_to_global (new);
257 assert (_dl_debug_initialize (0, args->nsid)->r_state == RT_CONSISTENT);
259 return;
262 /* Load that object's dependencies. */
263 _dl_map_object_deps (new, NULL, 0, 0,
264 mode & (__RTLD_DLOPEN | RTLD_DEEPBIND | __RTLD_AUDIT));
266 /* So far, so good. Now check the versions. */
267 for (unsigned int i = 0; i < new->l_searchlist.r_nlist; ++i)
268 if (new->l_searchlist.r_list[i]->l_real->l_versions == NULL)
269 (void) _dl_check_map_versions (new->l_searchlist.r_list[i]->l_real,
270 0, 0);
272 #ifdef SHARED
273 /* Auditing checkpoint: we have added all objects. */
274 if (__builtin_expect (GLRO(dl_naudit) > 0, 0))
276 struct link_map *head = GL(dl_ns)[new->l_ns]._ns_loaded;
277 /* Do not call the functions for any auditing object. */
278 if (head->l_auditing == 0)
280 struct audit_ifaces *afct = GLRO(dl_audit);
281 for (unsigned int cnt = 0; cnt < GLRO(dl_naudit); ++cnt)
283 if (afct->activity != NULL)
284 afct->activity (&head->l_audit[cnt].cookie, LA_ACT_CONSISTENT);
286 afct = afct->next;
290 #endif
292 /* Notify the debugger all new objects are now ready to go. */
293 struct r_debug *r = _dl_debug_initialize (0, args->nsid);
294 r->r_state = RT_CONSISTENT;
295 _dl_debug_state ();
296 LIBC_PROBE (map_complete, 3, args->nsid, r, new);
298 /* Print scope information. */
299 if (__builtin_expect (GLRO(dl_debug_mask) & DL_DEBUG_SCOPES, 0))
300 _dl_show_scope (new, 0);
302 /* Only do lazy relocation if `LD_BIND_NOW' is not set. */
303 int reloc_mode = mode & __RTLD_AUDIT;
304 if (GLRO(dl_lazy))
305 reloc_mode |= mode & RTLD_LAZY;
307 /* Sort the objects by dependency for the relocation process. This
308 allows IFUNC relocations to work and it also means copy
309 relocation of dependencies are if necessary overwritten. */
310 size_t nmaps = 0;
311 struct link_map *l = new;
314 if (! l->l_real->l_relocated)
315 ++nmaps;
316 l = l->l_next;
318 while (l != NULL);
319 struct link_map *maps[nmaps];
320 nmaps = 0;
321 l = new;
324 if (! l->l_real->l_relocated)
325 maps[nmaps++] = l;
326 l = l->l_next;
328 while (l != NULL);
329 if (nmaps > 1)
331 uint16_t seen[nmaps];
332 memset (seen, '\0', nmaps);
333 size_t i = 0;
334 while (1)
336 ++seen[i];
337 struct link_map *thisp = maps[i];
339 /* Find the last object in the list for which the current one is
340 a dependency and move the current object behind the object
341 with the dependency. */
342 size_t k = nmaps - 1;
343 while (k > i)
345 struct link_map **runp = maps[k]->l_initfini;
346 if (runp != NULL)
347 /* Look through the dependencies of the object. */
348 while (*runp != NULL)
349 if (__builtin_expect (*runp++ == thisp, 0))
351 /* Move the current object to the back past the last
352 object with it as the dependency. */
353 memmove (&maps[i], &maps[i + 1],
354 (k - i) * sizeof (maps[0]));
355 maps[k] = thisp;
357 if (seen[i + 1] > nmaps - i)
359 ++i;
360 goto next_clear;
363 uint16_t this_seen = seen[i];
364 memmove (&seen[i], &seen[i + 1],
365 (k - i) * sizeof (seen[0]));
366 seen[k] = this_seen;
368 goto next;
371 --k;
374 if (++i == nmaps)
375 break;
376 next_clear:
377 memset (&seen[i], 0, (nmaps - i) * sizeof (seen[0]));
378 next:;
382 int relocation_in_progress = 0;
384 for (size_t i = nmaps; i-- > 0; )
386 l = maps[i];
388 if (! relocation_in_progress)
390 /* Notify the debugger that relocations are about to happen. */
391 LIBC_PROBE (reloc_start, 2, args->nsid, r);
392 relocation_in_progress = 1;
395 #ifdef SHARED
396 if (__builtin_expect (GLRO(dl_profile) != NULL, 0))
398 /* If this here is the shared object which we want to profile
399 make sure the profile is started. We can find out whether
400 this is necessary or not by observing the `_dl_profile_map'
401 variable. If it was NULL but is not NULL afterwars we must
402 start the profiling. */
403 struct link_map *old_profile_map = GL(dl_profile_map);
405 _dl_relocate_object (l, l->l_scope, reloc_mode | RTLD_LAZY, 1);
407 if (old_profile_map == NULL && GL(dl_profile_map) != NULL)
409 /* We must prepare the profiling. */
410 _dl_start_profile ();
412 /* Prevent unloading the object. */
413 GL(dl_profile_map)->l_flags_1 |= DF_1_NODELETE;
416 else
417 #endif
418 _dl_relocate_object (l, l->l_scope, reloc_mode, 0);
421 /* If the file is not loaded now as a dependency, add the search
422 list of the newly loaded object to the scope. */
423 bool any_tls = false;
424 unsigned int first_static_tls = new->l_searchlist.r_nlist;
425 for (unsigned int i = 0; i < new->l_searchlist.r_nlist; ++i)
427 struct link_map *imap = new->l_searchlist.r_list[i];
428 int from_scope = 0;
430 /* If the initializer has been called already, the object has
431 not been loaded here and now. */
432 if (imap->l_init_called && imap->l_type == lt_loaded)
434 struct r_scope_elem **runp = imap->l_scope;
435 size_t cnt = 0;
437 while (*runp != NULL)
439 if (*runp == &new->l_searchlist)
440 break;
441 ++cnt;
442 ++runp;
445 if (*runp != NULL)
446 /* Avoid duplicates. */
447 continue;
449 if (__builtin_expect (cnt + 1 >= imap->l_scope_max, 0))
451 /* The 'r_scope' array is too small. Allocate a new one
452 dynamically. */
453 size_t new_size;
454 struct r_scope_elem **newp;
456 #define SCOPE_ELEMS(imap) \
457 (sizeof (imap->l_scope_mem) / sizeof (imap->l_scope_mem[0]))
459 if (imap->l_scope != imap->l_scope_mem
460 && imap->l_scope_max < SCOPE_ELEMS (imap))
462 new_size = SCOPE_ELEMS (imap);
463 newp = imap->l_scope_mem;
465 else
467 new_size = imap->l_scope_max * 2;
468 newp = (struct r_scope_elem **)
469 malloc (new_size * sizeof (struct r_scope_elem *));
470 if (newp == NULL)
471 _dl_signal_error (ENOMEM, "dlopen", NULL,
472 N_("cannot create scope list"));
475 memcpy (newp, imap->l_scope, cnt * sizeof (imap->l_scope[0]));
476 struct r_scope_elem **old = imap->l_scope;
478 imap->l_scope = newp;
480 if (old != imap->l_scope_mem)
481 _dl_scope_free (old);
483 imap->l_scope_max = new_size;
486 /* First terminate the extended list. Otherwise a thread
487 might use the new last element and then use the garbage
488 at offset IDX+1. */
489 imap->l_scope[cnt + 1] = NULL;
490 atomic_write_barrier ();
491 imap->l_scope[cnt] = &new->l_searchlist;
493 /* Print only new scope information. */
494 from_scope = cnt;
496 /* Only add TLS memory if this object is loaded now and
497 therefore is not yet initialized. */
498 else if (! imap->l_init_called
499 /* Only if the module defines thread local data. */
500 && __builtin_expect (imap->l_tls_blocksize > 0, 0))
502 /* Now that we know the object is loaded successfully add
503 modules containing TLS data to the slot info table. We
504 might have to increase its size. */
505 _dl_add_to_slotinfo (imap);
507 if (imap->l_need_tls_init
508 && first_static_tls == new->l_searchlist.r_nlist)
509 first_static_tls = i;
511 /* We have to bump the generation counter. */
512 any_tls = true;
515 /* Print scope information. */
516 if (__builtin_expect (GLRO(dl_debug_mask) & DL_DEBUG_SCOPES, 0))
517 _dl_show_scope (imap, from_scope);
520 /* Bump the generation number if necessary. */
521 if (any_tls && __builtin_expect (++GL(dl_tls_generation) == 0, 0))
522 _dl_fatal_printf (N_("\
523 TLS generation counter wrapped! Please report this."));
525 /* We need a second pass for static tls data, because _dl_update_slotinfo
526 must not be run while calls to _dl_add_to_slotinfo are still pending. */
527 for (unsigned int i = first_static_tls; i < new->l_searchlist.r_nlist; ++i)
529 struct link_map *imap = new->l_searchlist.r_list[i];
531 if (imap->l_need_tls_init
532 && ! imap->l_init_called
533 && imap->l_tls_blocksize > 0)
535 /* For static TLS we have to allocate the memory here and
536 now. This includes allocating memory in the DTV. But we
537 cannot change any DTV other than our own. So, if we
538 cannot guarantee that there is room in the DTV we don't
539 even try it and fail the load.
541 XXX We could track the minimum DTV slots allocated in
542 all threads. */
543 if (! RTLD_SINGLE_THREAD_P && imap->l_tls_modid > DTV_SURPLUS)
544 _dl_signal_error (0, "dlopen", NULL, N_("\
545 cannot load any more object with static TLS"));
547 imap->l_need_tls_init = 0;
548 #ifdef SHARED
549 /* Update the slot information data for at least the
550 generation of the DSO we are allocating data for. */
551 _dl_update_slotinfo (imap->l_tls_modid);
552 #endif
554 GL(dl_init_static_tls) (imap);
555 assert (imap->l_need_tls_init == 0);
559 /* Notify the debugger all new objects have been relocated. */
560 if (relocation_in_progress)
561 LIBC_PROBE (reloc_complete, 3, args->nsid, r, new);
563 /* Run the initializer functions of new objects. */
564 _dl_init (new, args->argc, args->argv, args->env);
566 /* Now we can make the new map available in the global scope. */
567 if (mode & RTLD_GLOBAL)
568 /* Move the object in the global namespace. */
569 if (add_to_global (new) != 0)
570 /* It failed. */
571 return;
573 /* Mark the object as not deletable if the RTLD_NODELETE flags was
574 passed. */
575 if (__builtin_expect (mode & RTLD_NODELETE, 0))
576 new->l_flags_1 |= DF_1_NODELETE;
578 #ifndef SHARED
579 /* We must be the static _dl_open in libc.a. A static program that
580 has loaded a dynamic object now has competition. */
581 __libc_multiple_libcs = 1;
582 #endif
584 /* Let the user know about the opencount. */
585 if (__builtin_expect (GLRO(dl_debug_mask) & DL_DEBUG_FILES, 0))
586 _dl_debug_printf ("opening file=%s [%lu]; direct_opencount=%u\n\n",
587 new->l_name, new->l_ns, new->l_direct_opencount);
591 void *
592 _dl_open (const char *file, int mode, const void *caller_dlopen, Lmid_t nsid,
593 int argc, char *argv[], char *env[])
595 if ((mode & RTLD_BINDING_MASK) == 0)
596 /* One of the flags must be set. */
597 _dl_signal_error (EINVAL, file, NULL, N_("invalid mode for dlopen()"));
599 /* Make sure we are alone. */
600 __rtld_lock_lock_recursive (GL(dl_load_lock));
602 if (__builtin_expect (nsid == LM_ID_NEWLM, 0))
604 /* Find a new namespace. */
605 for (nsid = 1; DL_NNS > 1 && nsid < GL(dl_nns); ++nsid)
606 if (GL(dl_ns)[nsid]._ns_loaded == NULL)
607 break;
609 if (__builtin_expect (nsid == DL_NNS, 0))
611 /* No more namespace available. */
612 __rtld_lock_unlock_recursive (GL(dl_load_lock));
614 _dl_signal_error (EINVAL, file, NULL, N_("\
615 no more namespaces available for dlmopen()"));
617 else if (nsid == GL(dl_nns))
619 __rtld_lock_initialize (GL(dl_ns)[nsid]._ns_unique_sym_table.lock);
620 ++GL(dl_nns);
623 _dl_debug_initialize (0, nsid)->r_state = RT_CONSISTENT;
625 /* Never allow loading a DSO in a namespace which is empty. Such
626 direct placements is only causing problems. Also don't allow
627 loading into a namespace used for auditing. */
628 else if (__builtin_expect (nsid != LM_ID_BASE && nsid != __LM_ID_CALLER, 0)
629 && (GL(dl_ns)[nsid]._ns_nloaded == 0
630 || GL(dl_ns)[nsid]._ns_loaded->l_auditing))
631 _dl_signal_error (EINVAL, file, NULL,
632 N_("invalid target namespace in dlmopen()"));
633 #ifndef SHARED
634 else if ((nsid == LM_ID_BASE || nsid == __LM_ID_CALLER)
635 && GL(dl_ns)[LM_ID_BASE]._ns_loaded == NULL
636 && GL(dl_nns) == 0)
637 GL(dl_nns) = 1;
638 #endif
640 struct dl_open_args args;
641 args.file = file;
642 args.mode = mode;
643 args.caller_dlopen = caller_dlopen;
644 args.caller_dl_open = RETURN_ADDRESS (0);
645 args.map = NULL;
646 args.nsid = nsid;
647 args.argc = argc;
648 args.argv = argv;
649 args.env = env;
651 const char *objname;
652 const char *errstring;
653 bool malloced;
654 int errcode = _dl_catch_error (&objname, &errstring, &malloced,
655 dl_open_worker, &args);
657 #ifndef MAP_COPY
658 /* We must munmap() the cache file. */
659 _dl_unload_cache ();
660 #endif
662 /* See if an error occurred during loading. */
663 if (__builtin_expect (errstring != NULL, 0))
665 /* Remove the object from memory. It may be in an inconsistent
666 state if relocation failed, for example. */
667 if (args.map)
669 /* Maybe some of the modules which were loaded use TLS.
670 Since it will be removed in the following _dl_close call
671 we have to mark the dtv array as having gaps to fill the
672 holes. This is a pessimistic assumption which won't hurt
673 if not true. There is no need to do this when we are
674 loading the auditing DSOs since TLS has not yet been set
675 up. */
676 if ((mode & __RTLD_AUDIT) == 0)
677 GL(dl_tls_dtv_gaps) = true;
679 _dl_close_worker (args.map);
682 assert (_dl_debug_initialize (0, args.nsid)->r_state == RT_CONSISTENT);
684 /* Release the lock. */
685 __rtld_lock_unlock_recursive (GL(dl_load_lock));
687 /* Make a local copy of the error string so that we can release the
688 memory allocated for it. */
689 size_t len_errstring = strlen (errstring) + 1;
690 char *local_errstring;
691 if (objname == errstring + len_errstring)
693 size_t total_len = len_errstring + strlen (objname) + 1;
694 local_errstring = alloca (total_len);
695 memcpy (local_errstring, errstring, total_len);
696 objname = local_errstring + len_errstring;
698 else
700 local_errstring = alloca (len_errstring);
701 memcpy (local_errstring, errstring, len_errstring);
704 if (malloced)
705 free ((char *) errstring);
707 /* Reraise the error. */
708 _dl_signal_error (errcode, objname, NULL, local_errstring);
711 assert (_dl_debug_initialize (0, args.nsid)->r_state == RT_CONSISTENT);
713 /* Release the lock. */
714 __rtld_lock_unlock_recursive (GL(dl_load_lock));
716 #ifndef SHARED
717 DL_STATIC_INIT (args.map);
718 #endif
720 return args.map;
724 void
725 _dl_show_scope (struct link_map *l, int from)
727 _dl_debug_printf ("object=%s [%lu]\n",
728 *l->l_name ? l->l_name : rtld_progname, l->l_ns);
729 if (l->l_scope != NULL)
730 for (int scope_cnt = from; l->l_scope[scope_cnt] != NULL; ++scope_cnt)
732 _dl_debug_printf (" scope %u:", scope_cnt);
734 for (unsigned int cnt = 0; cnt < l->l_scope[scope_cnt]->r_nlist; ++cnt)
735 if (*l->l_scope[scope_cnt]->r_list[cnt]->l_name)
736 _dl_debug_printf_c (" %s",
737 l->l_scope[scope_cnt]->r_list[cnt]->l_name);
738 else
739 _dl_debug_printf_c (" %s", rtld_progname);
741 _dl_debug_printf_c ("\n");
743 else
744 _dl_debug_printf (" no scope\n");
745 _dl_debug_printf ("\n");
748 #ifdef IS_IN_rtld
749 /* Return non-zero if ADDR lies within one of L's segments. */
751 internal_function
752 _dl_addr_inside_object (struct link_map *l, const ElfW(Addr) addr)
754 int n = l->l_phnum;
755 const ElfW(Addr) reladdr = addr - l->l_addr;
757 while (--n >= 0)
758 if (l->l_phdr[n].p_type == PT_LOAD
759 && reladdr - l->l_phdr[n].p_vaddr >= 0
760 && reladdr - l->l_phdr[n].p_vaddr < l->l_phdr[n].p_memsz)
761 return 1;
762 return 0;
764 #endif