Fix BZ #16634.
[glibc.git] / elf / dl-open.c
blob7cc4cc16e196dd0cc4073eba07f01d247ae29635
1 /* Load a shared object at runtime, relocate it, and run its initializer.
2 Copyright (C) 1996-2014 Free Software Foundation, Inc.
3 This file is part of the GNU C Library.
5 The GNU C Library is free software; you can redistribute it and/or
6 modify it under the terms of the GNU Lesser General Public
7 License as published by the Free Software Foundation; either
8 version 2.1 of the License, or (at your option) any later version.
10 The GNU C Library is distributed in the hope that it will be useful,
11 but WITHOUT ANY WARRANTY; without even the implied warranty of
12 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 Lesser General Public License for more details.
15 You should have received a copy of the GNU Lesser General Public
16 License along with the GNU C Library; if not, see
17 <http://www.gnu.org/licenses/>. */
19 #include <assert.h>
20 #include <dlfcn.h>
21 #include <errno.h>
22 #include <libintl.h>
23 #include <stdio.h>
24 #include <stdlib.h>
25 #include <string.h>
26 #include <unistd.h>
27 #include <sys/mman.h> /* Check whether MAP_COPY is defined. */
28 #include <sys/param.h>
29 #include <bits/libc-lock.h>
30 #include <ldsodefs.h>
31 #include <caller.h>
32 #include <sysdep-cancel.h>
33 #include <tls.h>
34 #include <stap-probe.h>
35 #include <atomic.h>
37 #include <dl-dst.h>
40 extern int __libc_multiple_libcs; /* Defined in init-first.c. */
42 /* We must be careful not to leave us in an inconsistent state. Thus we
43 catch any error and re-raise it after cleaning up. */
45 struct dl_open_args
47 const char *file;
48 int mode;
49 /* This is the caller of the dlopen() function. */
50 const void *caller_dlopen;
51 /* This is the caller of _dl_open(). */
52 const void *caller_dl_open;
53 struct link_map *map;
54 /* Namespace ID. */
55 Lmid_t nsid;
56 /* Original parameters to the program and the current environment. */
57 int argc;
58 char **argv;
59 char **env;
63 static int
64 add_to_global (struct link_map *new)
66 struct link_map **new_global;
67 unsigned int to_add = 0;
68 unsigned int cnt;
70 /* Count the objects we have to put in the global scope. */
71 for (cnt = 0; cnt < new->l_searchlist.r_nlist; ++cnt)
72 if (new->l_searchlist.r_list[cnt]->l_global == 0)
73 ++to_add;
75 /* The symbols of the new objects and its dependencies are to be
76 introduced into the global scope that will be used to resolve
77 references from other dynamically-loaded objects.
79 The global scope is the searchlist in the main link map. We
80 extend this list if necessary. There is one problem though:
81 since this structure was allocated very early (before the libc
82 is loaded) the memory it uses is allocated by the malloc()-stub
83 in the ld.so. When we come here these functions are not used
84 anymore. Instead the malloc() implementation of the libc is
85 used. But this means the block from the main map cannot be used
86 in an realloc() call. Therefore we allocate a completely new
87 array the first time we have to add something to the locale scope. */
89 struct link_namespaces *ns = &GL(dl_ns)[new->l_ns];
90 if (ns->_ns_global_scope_alloc == 0)
92 /* This is the first dynamic object given global scope. */
93 ns->_ns_global_scope_alloc
94 = ns->_ns_main_searchlist->r_nlist + to_add + 8;
95 new_global = (struct link_map **)
96 malloc (ns->_ns_global_scope_alloc * sizeof (struct link_map *));
97 if (new_global == NULL)
99 ns->_ns_global_scope_alloc = 0;
100 nomem:
101 _dl_signal_error (ENOMEM, new->l_libname->name, NULL,
102 N_("cannot extend global scope"));
103 return 1;
106 /* Copy over the old entries. */
107 ns->_ns_main_searchlist->r_list
108 = memcpy (new_global, ns->_ns_main_searchlist->r_list,
109 (ns->_ns_main_searchlist->r_nlist
110 * sizeof (struct link_map *)));
112 else if (ns->_ns_main_searchlist->r_nlist + to_add
113 > ns->_ns_global_scope_alloc)
115 /* We have to extend the existing array of link maps in the
116 main map. */
117 struct link_map **old_global
118 = GL(dl_ns)[new->l_ns]._ns_main_searchlist->r_list;
119 size_t new_nalloc = ((ns->_ns_global_scope_alloc + to_add) * 2);
121 new_global = (struct link_map **)
122 malloc (new_nalloc * sizeof (struct link_map *));
123 if (new_global == NULL)
124 goto nomem;
126 memcpy (new_global, old_global,
127 ns->_ns_global_scope_alloc * sizeof (struct link_map *));
129 ns->_ns_global_scope_alloc = new_nalloc;
130 ns->_ns_main_searchlist->r_list = new_global;
132 if (!RTLD_SINGLE_THREAD_P)
133 THREAD_GSCOPE_WAIT ();
135 free (old_global);
138 /* Now add the new entries. */
139 unsigned int new_nlist = ns->_ns_main_searchlist->r_nlist;
140 for (cnt = 0; cnt < new->l_searchlist.r_nlist; ++cnt)
142 struct link_map *map = new->l_searchlist.r_list[cnt];
144 if (map->l_global == 0)
146 map->l_global = 1;
147 ns->_ns_main_searchlist->r_list[new_nlist++] = map;
149 /* We modify the global scope. Report this. */
150 if (__glibc_unlikely (GLRO(dl_debug_mask) & DL_DEBUG_SCOPES))
151 _dl_debug_printf ("\nadd %s [%lu] to global scope\n",
152 map->l_name, map->l_ns);
155 atomic_write_barrier ();
156 ns->_ns_main_searchlist->r_nlist = new_nlist;
158 return 0;
161 /* Search link maps in all namespaces for the DSO that contains the object at
162 address ADDR. Returns the pointer to the link map of the matching DSO, or
163 NULL if a match is not found. */
164 struct link_map *
165 internal_function
166 _dl_find_dso_for_object (const ElfW(Addr) addr)
168 struct link_map *l;
170 /* Find the highest-addressed object that ADDR is not below. */
171 for (Lmid_t ns = 0; ns < GL(dl_nns); ++ns)
172 for (l = GL(dl_ns)[ns]._ns_loaded; l != NULL; l = l->l_next)
173 if (addr >= l->l_map_start && addr < l->l_map_end
174 && (l->l_contiguous
175 || _dl_addr_inside_object (l, (ElfW(Addr)) addr)))
177 assert (ns == l->l_ns);
178 return l;
180 return NULL;
182 rtld_hidden_def (_dl_find_dso_for_object);
184 static void
185 dl_open_worker (void *a)
187 struct dl_open_args *args = a;
188 const char *file = args->file;
189 int mode = args->mode;
190 struct link_map *call_map = NULL;
192 /* Check whether _dl_open() has been called from a valid DSO. */
193 if (__check_caller (args->caller_dl_open,
194 allow_libc|allow_libdl|allow_ldso) != 0)
195 _dl_signal_error (0, "dlopen", NULL, N_("invalid caller"));
197 /* Determine the caller's map if necessary. This is needed in case
198 we have a DST, when we don't know the namespace ID we have to put
199 the new object in, or when the file name has no path in which
200 case we need to look along the RUNPATH/RPATH of the caller. */
201 const char *dst = strchr (file, '$');
202 if (dst != NULL || args->nsid == __LM_ID_CALLER
203 || strchr (file, '/') == NULL)
205 const void *caller_dlopen = args->caller_dlopen;
207 /* We have to find out from which object the caller is calling.
208 By default we assume this is the main application. */
209 call_map = GL(dl_ns)[LM_ID_BASE]._ns_loaded;
211 struct link_map *l = _dl_find_dso_for_object ((ElfW(Addr)) caller_dlopen);
213 if (l)
214 call_map = l;
216 if (args->nsid == __LM_ID_CALLER)
217 args->nsid = call_map->l_ns;
220 assert (_dl_debug_initialize (0, args->nsid)->r_state == RT_CONSISTENT);
222 /* Load the named object. */
223 struct link_map *new;
224 args->map = new = _dl_map_object (call_map, file, lt_loaded, 0,
225 mode | __RTLD_CALLMAP, args->nsid);
227 /* If the pointer returned is NULL this means the RTLD_NOLOAD flag is
228 set and the object is not already loaded. */
229 if (new == NULL)
231 assert (mode & RTLD_NOLOAD);
232 return;
235 if (__glibc_unlikely (mode & __RTLD_SPROF))
236 /* This happens only if we load a DSO for 'sprof'. */
237 return;
239 /* This object is directly loaded. */
240 ++new->l_direct_opencount;
242 /* It was already open. */
243 if (__glibc_unlikely (new->l_searchlist.r_list != NULL))
245 /* Let the user know about the opencount. */
246 if (__glibc_unlikely (GLRO(dl_debug_mask) & DL_DEBUG_FILES))
247 _dl_debug_printf ("opening file=%s [%lu]; direct_opencount=%u\n\n",
248 new->l_name, new->l_ns, new->l_direct_opencount);
250 /* If the user requested the object to be in the global namespace
251 but it is not so far, add it now. */
252 if ((mode & RTLD_GLOBAL) && new->l_global == 0)
253 (void) add_to_global (new);
255 assert (_dl_debug_initialize (0, args->nsid)->r_state == RT_CONSISTENT);
257 return;
260 /* Load that object's dependencies. */
261 _dl_map_object_deps (new, NULL, 0, 0,
262 mode & (__RTLD_DLOPEN | RTLD_DEEPBIND | __RTLD_AUDIT));
264 /* So far, so good. Now check the versions. */
265 for (unsigned int i = 0; i < new->l_searchlist.r_nlist; ++i)
266 if (new->l_searchlist.r_list[i]->l_real->l_versions == NULL)
267 (void) _dl_check_map_versions (new->l_searchlist.r_list[i]->l_real,
268 0, 0);
270 #ifdef SHARED
271 /* Auditing checkpoint: we have added all objects. */
272 if (__glibc_unlikely (GLRO(dl_naudit) > 0))
274 struct link_map *head = GL(dl_ns)[new->l_ns]._ns_loaded;
275 /* Do not call the functions for any auditing object. */
276 if (head->l_auditing == 0)
278 struct audit_ifaces *afct = GLRO(dl_audit);
279 for (unsigned int cnt = 0; cnt < GLRO(dl_naudit); ++cnt)
281 if (afct->activity != NULL)
282 afct->activity (&head->l_audit[cnt].cookie, LA_ACT_CONSISTENT);
284 afct = afct->next;
288 #endif
290 /* Notify the debugger all new objects are now ready to go. */
291 struct r_debug *r = _dl_debug_initialize (0, args->nsid);
292 r->r_state = RT_CONSISTENT;
293 _dl_debug_state ();
294 LIBC_PROBE (map_complete, 3, args->nsid, r, new);
296 /* Print scope information. */
297 if (__glibc_unlikely (GLRO(dl_debug_mask) & DL_DEBUG_SCOPES))
298 _dl_show_scope (new, 0);
300 /* Only do lazy relocation if `LD_BIND_NOW' is not set. */
301 int reloc_mode = mode & __RTLD_AUDIT;
302 if (GLRO(dl_lazy))
303 reloc_mode |= mode & RTLD_LAZY;
305 /* Sort the objects by dependency for the relocation process. This
306 allows IFUNC relocations to work and it also means copy
307 relocation of dependencies are if necessary overwritten. */
308 size_t nmaps = 0;
309 struct link_map *l = new;
312 if (! l->l_real->l_relocated)
313 ++nmaps;
314 l = l->l_next;
316 while (l != NULL);
317 struct link_map *maps[nmaps];
318 nmaps = 0;
319 l = new;
322 if (! l->l_real->l_relocated)
323 maps[nmaps++] = l;
324 l = l->l_next;
326 while (l != NULL);
327 if (nmaps > 1)
329 uint16_t seen[nmaps];
330 memset (seen, '\0', sizeof (seen));
331 size_t i = 0;
332 while (1)
334 ++seen[i];
335 struct link_map *thisp = maps[i];
337 /* Find the last object in the list for which the current one is
338 a dependency and move the current object behind the object
339 with the dependency. */
340 size_t k = nmaps - 1;
341 while (k > i)
343 struct link_map **runp = maps[k]->l_initfini;
344 if (runp != NULL)
345 /* Look through the dependencies of the object. */
346 while (*runp != NULL)
347 if (__glibc_unlikely (*runp++ == thisp))
349 /* Move the current object to the back past the last
350 object with it as the dependency. */
351 memmove (&maps[i], &maps[i + 1],
352 (k - i) * sizeof (maps[0]));
353 maps[k] = thisp;
355 if (seen[i + 1] > nmaps - i)
357 ++i;
358 goto next_clear;
361 uint16_t this_seen = seen[i];
362 memmove (&seen[i], &seen[i + 1],
363 (k - i) * sizeof (seen[0]));
364 seen[k] = this_seen;
366 goto next;
369 --k;
372 if (++i == nmaps)
373 break;
374 next_clear:
375 memset (&seen[i], 0, (nmaps - i) * sizeof (seen[0]));
376 next:;
380 int relocation_in_progress = 0;
382 for (size_t i = nmaps; i-- > 0; )
384 l = maps[i];
386 if (! relocation_in_progress)
388 /* Notify the debugger that relocations are about to happen. */
389 LIBC_PROBE (reloc_start, 2, args->nsid, r);
390 relocation_in_progress = 1;
393 #ifdef SHARED
394 if (__glibc_unlikely (GLRO(dl_profile) != NULL))
396 /* If this here is the shared object which we want to profile
397 make sure the profile is started. We can find out whether
398 this is necessary or not by observing the `_dl_profile_map'
399 variable. If it was NULL but is not NULL afterwards we must
400 start the profiling. */
401 struct link_map *old_profile_map = GL(dl_profile_map);
403 _dl_relocate_object (l, l->l_scope, reloc_mode | RTLD_LAZY, 1);
405 if (old_profile_map == NULL && GL(dl_profile_map) != NULL)
407 /* We must prepare the profiling. */
408 _dl_start_profile ();
410 /* Prevent unloading the object. */
411 GL(dl_profile_map)->l_flags_1 |= DF_1_NODELETE;
414 else
415 #endif
416 _dl_relocate_object (l, l->l_scope, reloc_mode, 0);
419 /* If the file is not loaded now as a dependency, add the search
420 list of the newly loaded object to the scope. */
421 bool any_tls = false;
422 unsigned int first_static_tls = new->l_searchlist.r_nlist;
423 for (unsigned int i = 0; i < new->l_searchlist.r_nlist; ++i)
425 struct link_map *imap = new->l_searchlist.r_list[i];
426 int from_scope = 0;
428 /* If the initializer has been called already, the object has
429 not been loaded here and now. */
430 if (imap->l_init_called && imap->l_type == lt_loaded)
432 struct r_scope_elem **runp = imap->l_scope;
433 size_t cnt = 0;
435 while (*runp != NULL)
437 if (*runp == &new->l_searchlist)
438 break;
439 ++cnt;
440 ++runp;
443 if (*runp != NULL)
444 /* Avoid duplicates. */
445 continue;
447 if (__glibc_unlikely (cnt + 1 >= imap->l_scope_max))
449 /* The 'r_scope' array is too small. Allocate a new one
450 dynamically. */
451 size_t new_size;
452 struct r_scope_elem **newp;
454 #define SCOPE_ELEMS(imap) \
455 (sizeof (imap->l_scope_mem) / sizeof (imap->l_scope_mem[0]))
457 if (imap->l_scope != imap->l_scope_mem
458 && imap->l_scope_max < SCOPE_ELEMS (imap))
460 new_size = SCOPE_ELEMS (imap);
461 newp = imap->l_scope_mem;
463 else
465 new_size = imap->l_scope_max * 2;
466 newp = (struct r_scope_elem **)
467 malloc (new_size * sizeof (struct r_scope_elem *));
468 if (newp == NULL)
469 _dl_signal_error (ENOMEM, "dlopen", NULL,
470 N_("cannot create scope list"));
473 memcpy (newp, imap->l_scope, cnt * sizeof (imap->l_scope[0]));
474 struct r_scope_elem **old = imap->l_scope;
476 imap->l_scope = newp;
478 if (old != imap->l_scope_mem)
479 _dl_scope_free (old);
481 imap->l_scope_max = new_size;
484 /* First terminate the extended list. Otherwise a thread
485 might use the new last element and then use the garbage
486 at offset IDX+1. */
487 imap->l_scope[cnt + 1] = NULL;
488 atomic_write_barrier ();
489 imap->l_scope[cnt] = &new->l_searchlist;
491 /* Print only new scope information. */
492 from_scope = cnt;
494 /* Only add TLS memory if this object is loaded now and
495 therefore is not yet initialized. */
496 else if (! imap->l_init_called
497 /* Only if the module defines thread local data. */
498 && __builtin_expect (imap->l_tls_blocksize > 0, 0))
500 /* Now that we know the object is loaded successfully add
501 modules containing TLS data to the slot info table. We
502 might have to increase its size. */
503 _dl_add_to_slotinfo (imap);
505 if (imap->l_need_tls_init
506 && first_static_tls == new->l_searchlist.r_nlist)
507 first_static_tls = i;
509 /* We have to bump the generation counter. */
510 any_tls = true;
513 /* Print scope information. */
514 if (__glibc_unlikely (GLRO(dl_debug_mask) & DL_DEBUG_SCOPES))
515 _dl_show_scope (imap, from_scope);
518 /* Bump the generation number if necessary. */
519 if (any_tls && __builtin_expect (++GL(dl_tls_generation) == 0, 0))
520 _dl_fatal_printf (N_("\
521 TLS generation counter wrapped! Please report this."));
523 /* We need a second pass for static tls data, because _dl_update_slotinfo
524 must not be run while calls to _dl_add_to_slotinfo are still pending. */
525 for (unsigned int i = first_static_tls; i < new->l_searchlist.r_nlist; ++i)
527 struct link_map *imap = new->l_searchlist.r_list[i];
529 if (imap->l_need_tls_init
530 && ! imap->l_init_called
531 && imap->l_tls_blocksize > 0)
533 /* For static TLS we have to allocate the memory here and
534 now. This includes allocating memory in the DTV. But we
535 cannot change any DTV other than our own. So, if we
536 cannot guarantee that there is room in the DTV we don't
537 even try it and fail the load.
539 XXX We could track the minimum DTV slots allocated in
540 all threads. */
541 if (! RTLD_SINGLE_THREAD_P && imap->l_tls_modid > DTV_SURPLUS)
542 _dl_signal_error (0, "dlopen", NULL, N_("\
543 cannot load any more object with static TLS"));
545 imap->l_need_tls_init = 0;
546 #ifdef SHARED
547 /* Update the slot information data for at least the
548 generation of the DSO we are allocating data for. */
549 _dl_update_slotinfo (imap->l_tls_modid);
550 #endif
552 GL(dl_init_static_tls) (imap);
553 assert (imap->l_need_tls_init == 0);
557 /* Notify the debugger all new objects have been relocated. */
558 if (relocation_in_progress)
559 LIBC_PROBE (reloc_complete, 3, args->nsid, r, new);
561 #ifndef SHARED
562 DL_STATIC_INIT (new);
563 #endif
565 /* Run the initializer functions of new objects. */
566 _dl_init (new, args->argc, args->argv, args->env);
568 /* Now we can make the new map available in the global scope. */
569 if (mode & RTLD_GLOBAL)
570 /* Move the object in the global namespace. */
571 if (add_to_global (new) != 0)
572 /* It failed. */
573 return;
575 /* Mark the object as not deletable if the RTLD_NODELETE flags was
576 passed. */
577 if (__glibc_unlikely (mode & RTLD_NODELETE))
578 new->l_flags_1 |= DF_1_NODELETE;
580 #ifndef SHARED
581 /* We must be the static _dl_open in libc.a. A static program that
582 has loaded a dynamic object now has competition. */
583 __libc_multiple_libcs = 1;
584 #endif
586 /* Let the user know about the opencount. */
587 if (__glibc_unlikely (GLRO(dl_debug_mask) & DL_DEBUG_FILES))
588 _dl_debug_printf ("opening file=%s [%lu]; direct_opencount=%u\n\n",
589 new->l_name, new->l_ns, new->l_direct_opencount);
593 void *
594 _dl_open (const char *file, int mode, const void *caller_dlopen, Lmid_t nsid,
595 int argc, char *argv[], char *env[])
597 if ((mode & RTLD_BINDING_MASK) == 0)
598 /* One of the flags must be set. */
599 _dl_signal_error (EINVAL, file, NULL, N_("invalid mode for dlopen()"));
601 /* Make sure we are alone. */
602 __rtld_lock_lock_recursive (GL(dl_load_lock));
604 if (__glibc_unlikely (nsid == LM_ID_NEWLM))
606 /* Find a new namespace. */
607 for (nsid = 1; DL_NNS > 1 && nsid < GL(dl_nns); ++nsid)
608 if (GL(dl_ns)[nsid]._ns_loaded == NULL)
609 break;
611 if (__glibc_unlikely (nsid == DL_NNS))
613 /* No more namespace available. */
614 __rtld_lock_unlock_recursive (GL(dl_load_lock));
616 _dl_signal_error (EINVAL, file, NULL, N_("\
617 no more namespaces available for dlmopen()"));
619 else if (nsid == GL(dl_nns))
621 __rtld_lock_initialize (GL(dl_ns)[nsid]._ns_unique_sym_table.lock);
622 ++GL(dl_nns);
625 _dl_debug_initialize (0, nsid)->r_state = RT_CONSISTENT;
627 /* Never allow loading a DSO in a namespace which is empty. Such
628 direct placements is only causing problems. Also don't allow
629 loading into a namespace used for auditing. */
630 else if (__builtin_expect (nsid != LM_ID_BASE && nsid != __LM_ID_CALLER, 0)
631 && (GL(dl_ns)[nsid]._ns_nloaded == 0
632 || GL(dl_ns)[nsid]._ns_loaded->l_auditing))
633 _dl_signal_error (EINVAL, file, NULL,
634 N_("invalid target namespace in dlmopen()"));
636 struct dl_open_args args;
637 args.file = file;
638 args.mode = mode;
639 args.caller_dlopen = caller_dlopen;
640 args.caller_dl_open = RETURN_ADDRESS (0);
641 args.map = NULL;
642 args.nsid = nsid;
643 args.argc = argc;
644 args.argv = argv;
645 args.env = env;
647 const char *objname;
648 const char *errstring;
649 bool malloced;
650 int errcode = _dl_catch_error (&objname, &errstring, &malloced,
651 dl_open_worker, &args);
653 #if defined USE_LDCONFIG && !defined MAP_COPY
654 /* We must unmap the cache file. */
655 _dl_unload_cache ();
656 #endif
658 /* See if an error occurred during loading. */
659 if (__glibc_unlikely (errstring != NULL))
661 /* Remove the object from memory. It may be in an inconsistent
662 state if relocation failed, for example. */
663 if (args.map)
665 /* Maybe some of the modules which were loaded use TLS.
666 Since it will be removed in the following _dl_close call
667 we have to mark the dtv array as having gaps to fill the
668 holes. This is a pessimistic assumption which won't hurt
669 if not true. There is no need to do this when we are
670 loading the auditing DSOs since TLS has not yet been set
671 up. */
672 if ((mode & __RTLD_AUDIT) == 0)
673 GL(dl_tls_dtv_gaps) = true;
675 _dl_close_worker (args.map);
678 assert (_dl_debug_initialize (0, args.nsid)->r_state == RT_CONSISTENT);
680 /* Release the lock. */
681 __rtld_lock_unlock_recursive (GL(dl_load_lock));
683 /* Make a local copy of the error string so that we can release the
684 memory allocated for it. */
685 size_t len_errstring = strlen (errstring) + 1;
686 char *local_errstring;
687 if (objname == errstring + len_errstring)
689 size_t total_len = len_errstring + strlen (objname) + 1;
690 local_errstring = alloca (total_len);
691 memcpy (local_errstring, errstring, total_len);
692 objname = local_errstring + len_errstring;
694 else
696 local_errstring = alloca (len_errstring);
697 memcpy (local_errstring, errstring, len_errstring);
700 if (malloced)
701 free ((char *) errstring);
703 /* Reraise the error. */
704 _dl_signal_error (errcode, objname, NULL, local_errstring);
707 assert (_dl_debug_initialize (0, args.nsid)->r_state == RT_CONSISTENT);
709 /* Release the lock. */
710 __rtld_lock_unlock_recursive (GL(dl_load_lock));
712 return args.map;
716 void
717 _dl_show_scope (struct link_map *l, int from)
719 _dl_debug_printf ("object=%s [%lu]\n",
720 DSO_FILENAME (l->l_name), l->l_ns);
721 if (l->l_scope != NULL)
722 for (int scope_cnt = from; l->l_scope[scope_cnt] != NULL; ++scope_cnt)
724 _dl_debug_printf (" scope %u:", scope_cnt);
726 for (unsigned int cnt = 0; cnt < l->l_scope[scope_cnt]->r_nlist; ++cnt)
727 if (*l->l_scope[scope_cnt]->r_list[cnt]->l_name)
728 _dl_debug_printf_c (" %s",
729 l->l_scope[scope_cnt]->r_list[cnt]->l_name);
730 else
731 _dl_debug_printf_c (" %s", RTLD_PROGNAME);
733 _dl_debug_printf_c ("\n");
735 else
736 _dl_debug_printf (" no scope\n");
737 _dl_debug_printf ("\n");
740 #ifdef IS_IN_rtld
741 /* Return non-zero if ADDR lies within one of L's segments. */
743 internal_function
744 _dl_addr_inside_object (struct link_map *l, const ElfW(Addr) addr)
746 int n = l->l_phnum;
747 const ElfW(Addr) reladdr = addr - l->l_addr;
749 while (--n >= 0)
750 if (l->l_phdr[n].p_type == PT_LOAD
751 && reladdr - l->l_phdr[n].p_vaddr >= 0
752 && reladdr - l->l_phdr[n].p_vaddr < l->l_phdr[n].p_memsz)
753 return 1;
754 return 0;
756 #endif