2012-06-21 Jeff Law <law@redhat.com>
[glibc.git] / elf / dl-open.c
blob9fe0a7ff6ac29f5da94ce7ee7f64fac04336fc2c
1 /* Load a shared object at runtime, relocate it, and run its initializer.
2 Copyright (C) 1996-2007, 2009-2012 Free Software Foundation, Inc.
3 This file is part of the GNU C Library.
5 The GNU C Library is free software; you can redistribute it and/or
6 modify it under the terms of the GNU Lesser General Public
7 License as published by the Free Software Foundation; either
8 version 2.1 of the License, or (at your option) any later version.
10 The GNU C Library is distributed in the hope that it will be useful,
11 but WITHOUT ANY WARRANTY; without even the implied warranty of
12 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 Lesser General Public License for more details.
15 You should have received a copy of the GNU Lesser General Public
16 License along with the GNU C Library; if not, see
17 <http://www.gnu.org/licenses/>. */
19 #include <assert.h>
20 #include <dlfcn.h>
21 #include <errno.h>
22 #include <libintl.h>
23 #include <stdio.h>
24 #include <stdlib.h>
25 #include <string.h>
26 #include <unistd.h>
27 #include <sys/mman.h> /* Check whether MAP_COPY is defined. */
28 #include <sys/param.h>
29 #include <bits/libc-lock.h>
30 #include <ldsodefs.h>
31 #include <bp-sym.h>
32 #include <caller.h>
33 #include <sysdep-cancel.h>
34 #include <tls.h>
36 #include <dl-dst.h>
39 extern ElfW(Addr) _dl_sysdep_start (void **start_argptr,
40 void (*dl_main) (const ElfW(Phdr) *phdr,
41 ElfW(Word) phnum,
42 ElfW(Addr) *user_entry,
43 ElfW(auxv_t) *auxv));
44 weak_extern (BP_SYM (_dl_sysdep_start))
46 extern int __libc_multiple_libcs; /* Defined in init-first.c. */
48 /* We must be carefull not to leave us in an inconsistent state. Thus we
49 catch any error and re-raise it after cleaning up. */
51 struct dl_open_args
53 const char *file;
54 int mode;
55 /* This is the caller of the dlopen() function. */
56 const void *caller_dlopen;
57 /* This is the caller if _dl_open(). */
58 const void *caller_dl_open;
59 struct link_map *map;
60 /* Namespace ID. */
61 Lmid_t nsid;
62 /* Original parameters to the program and the current environment. */
63 int argc;
64 char **argv;
65 char **env;
69 static int
70 add_to_global (struct link_map *new)
72 struct link_map **new_global;
73 unsigned int to_add = 0;
74 unsigned int cnt;
76 /* Count the objects we have to put in the global scope. */
77 for (cnt = 0; cnt < new->l_searchlist.r_nlist; ++cnt)
78 if (new->l_searchlist.r_list[cnt]->l_global == 0)
79 ++to_add;
81 /* The symbols of the new objects and its dependencies are to be
82 introduced into the global scope that will be used to resolve
83 references from other dynamically-loaded objects.
85 The global scope is the searchlist in the main link map. We
86 extend this list if necessary. There is one problem though:
87 since this structure was allocated very early (before the libc
88 is loaded) the memory it uses is allocated by the malloc()-stub
89 in the ld.so. When we come here these functions are not used
90 anymore. Instead the malloc() implementation of the libc is
91 used. But this means the block from the main map cannot be used
92 in an realloc() call. Therefore we allocate a completely new
93 array the first time we have to add something to the locale scope. */
95 struct link_namespaces *ns = &GL(dl_ns)[new->l_ns];
96 if (ns->_ns_global_scope_alloc == 0)
98 /* This is the first dynamic object given global scope. */
99 ns->_ns_global_scope_alloc
100 = ns->_ns_main_searchlist->r_nlist + to_add + 8;
101 new_global = (struct link_map **)
102 malloc (ns->_ns_global_scope_alloc * sizeof (struct link_map *));
103 if (new_global == NULL)
105 ns->_ns_global_scope_alloc = 0;
106 nomem:
107 _dl_signal_error (ENOMEM, new->l_libname->name, NULL,
108 N_("cannot extend global scope"));
109 return 1;
112 /* Copy over the old entries. */
113 ns->_ns_main_searchlist->r_list
114 = memcpy (new_global, ns->_ns_main_searchlist->r_list,
115 (ns->_ns_main_searchlist->r_nlist
116 * sizeof (struct link_map *)));
118 else if (ns->_ns_main_searchlist->r_nlist + to_add
119 > ns->_ns_global_scope_alloc)
121 /* We have to extend the existing array of link maps in the
122 main map. */
123 struct link_map **old_global
124 = GL(dl_ns)[new->l_ns]._ns_main_searchlist->r_list;
125 size_t new_nalloc = ((ns->_ns_global_scope_alloc + to_add) * 2);
127 new_global = (struct link_map **)
128 malloc (new_nalloc * sizeof (struct link_map *));
129 if (new_global == NULL)
130 goto nomem;
132 memcpy (new_global, old_global,
133 ns->_ns_global_scope_alloc * sizeof (struct link_map *));
135 ns->_ns_global_scope_alloc = new_nalloc;
136 ns->_ns_main_searchlist->r_list = new_global;
138 if (!RTLD_SINGLE_THREAD_P)
139 THREAD_GSCOPE_WAIT ();
141 free (old_global);
144 /* Now add the new entries. */
145 unsigned int new_nlist = ns->_ns_main_searchlist->r_nlist;
146 for (cnt = 0; cnt < new->l_searchlist.r_nlist; ++cnt)
148 struct link_map *map = new->l_searchlist.r_list[cnt];
150 if (map->l_global == 0)
152 map->l_global = 1;
153 ns->_ns_main_searchlist->r_list[new_nlist++] = map;
155 /* We modify the global scope. Report this. */
156 if (__builtin_expect (GLRO(dl_debug_mask) & DL_DEBUG_SCOPES, 0))
157 _dl_debug_printf ("\nadd %s [%lu] to global scope\n",
158 map->l_name, map->l_ns);
161 atomic_write_barrier ();
162 ns->_ns_main_searchlist->r_nlist = new_nlist;
164 return 0;
167 static void
168 dl_open_worker (void *a)
170 struct dl_open_args *args = a;
171 const char *file = args->file;
172 int mode = args->mode;
173 struct link_map *call_map = NULL;
175 /* Check whether _dl_open() has been called from a valid DSO. */
176 if (__check_caller (args->caller_dl_open,
177 allow_libc|allow_libdl|allow_ldso) != 0)
178 _dl_signal_error (0, "dlopen", NULL, N_("invalid caller"));
180 /* Determine the caller's map if necessary. This is needed in case
181 we have a DST, when we don't know the namespace ID we have to put
182 the new object in, or when the file name has no path in which
183 case we need to look along the RUNPATH/RPATH of the caller. */
184 const char *dst = strchr (file, '$');
185 if (dst != NULL || args->nsid == __LM_ID_CALLER
186 || strchr (file, '/') == NULL)
188 const void *caller_dlopen = args->caller_dlopen;
190 /* We have to find out from which object the caller is calling.
191 By default we assume this is the main application. */
192 call_map = GL(dl_ns)[LM_ID_BASE]._ns_loaded;
194 struct link_map *l;
195 for (Lmid_t ns = 0; ns < GL(dl_nns); ++ns)
196 for (l = GL(dl_ns)[ns]._ns_loaded; l != NULL; l = l->l_next)
197 if (caller_dlopen >= (const void *) l->l_map_start
198 && caller_dlopen < (const void *) l->l_map_end
199 && (l->l_contiguous
200 || _dl_addr_inside_object (l, (ElfW(Addr)) caller_dlopen)))
202 assert (ns == l->l_ns);
203 call_map = l;
204 goto found_caller;
207 found_caller:
208 if (args->nsid == __LM_ID_CALLER)
210 #ifndef SHARED
211 /* In statically linked apps there might be no loaded object. */
212 if (call_map == NULL)
213 args->nsid = LM_ID_BASE;
214 else
215 #endif
216 args->nsid = call_map->l_ns;
220 assert (_dl_debug_initialize (0, args->nsid)->r_state == RT_CONSISTENT);
222 /* Load the named object. */
223 struct link_map *new;
224 args->map = new = _dl_map_object (call_map, file, lt_loaded, 0,
225 mode | __RTLD_CALLMAP, args->nsid);
227 /* If the pointer returned is NULL this means the RTLD_NOLOAD flag is
228 set and the object is not already loaded. */
229 if (new == NULL)
231 assert (mode & RTLD_NOLOAD);
232 return;
235 if (__builtin_expect (mode & __RTLD_SPROF, 0))
236 /* This happens only if we load a DSO for 'sprof'. */
237 return;
239 /* This object is directly loaded. */
240 ++new->l_direct_opencount;
242 /* It was already open. */
243 if (__builtin_expect (new->l_searchlist.r_list != NULL, 0))
245 /* Let the user know about the opencount. */
246 if (__builtin_expect (GLRO(dl_debug_mask) & DL_DEBUG_FILES, 0))
247 _dl_debug_printf ("opening file=%s [%lu]; direct_opencount=%u\n\n",
248 new->l_name, new->l_ns, new->l_direct_opencount);
250 /* If the user requested the object to be in the global namespace
251 but it is not so far, add it now. */
252 if ((mode & RTLD_GLOBAL) && new->l_global == 0)
253 (void) add_to_global (new);
255 assert (_dl_debug_initialize (0, args->nsid)->r_state == RT_CONSISTENT);
257 return;
260 /* Load that object's dependencies. */
261 _dl_map_object_deps (new, NULL, 0, 0,
262 mode & (__RTLD_DLOPEN | RTLD_DEEPBIND | __RTLD_AUDIT));
264 /* So far, so good. Now check the versions. */
265 for (unsigned int i = 0; i < new->l_searchlist.r_nlist; ++i)
266 if (new->l_searchlist.r_list[i]->l_real->l_versions == NULL)
267 (void) _dl_check_map_versions (new->l_searchlist.r_list[i]->l_real,
268 0, 0);
270 #ifdef SHARED
271 /* Auditing checkpoint: we have added all objects. */
272 if (__builtin_expect (GLRO(dl_naudit) > 0, 0))
274 struct link_map *head = GL(dl_ns)[new->l_ns]._ns_loaded;
275 /* Do not call the functions for any auditing object. */
276 if (head->l_auditing == 0)
278 struct audit_ifaces *afct = GLRO(dl_audit);
279 for (unsigned int cnt = 0; cnt < GLRO(dl_naudit); ++cnt)
281 if (afct->activity != NULL)
282 afct->activity (&head->l_audit[cnt].cookie, LA_ACT_CONSISTENT);
284 afct = afct->next;
288 #endif
290 /* Notify the debugger all new objects are now ready to go. */
291 struct r_debug *r = _dl_debug_initialize (0, args->nsid);
292 r->r_state = RT_CONSISTENT;
293 _dl_debug_state ();
295 /* Print scope information. */
296 if (__builtin_expect (GLRO(dl_debug_mask) & DL_DEBUG_SCOPES, 0))
297 _dl_show_scope (new, 0);
299 /* Only do lazy relocation if `LD_BIND_NOW' is not set. */
300 int reloc_mode = mode & __RTLD_AUDIT;
301 if (GLRO(dl_lazy))
302 reloc_mode |= mode & RTLD_LAZY;
304 /* Sort the objects by dependency for the relocation process. This
305 allows IFUNC relocations to work and it also means copy
306 relocation of dependencies are if necessary overwritten. */
307 size_t nmaps = 0;
308 struct link_map *l = new;
311 if (! l->l_real->l_relocated)
312 ++nmaps;
313 l = l->l_next;
315 while (l != NULL);
316 struct link_map *maps[nmaps];
317 nmaps = 0;
318 l = new;
321 if (! l->l_real->l_relocated)
322 maps[nmaps++] = l;
323 l = l->l_next;
325 while (l != NULL);
326 if (nmaps > 1)
328 uint16_t seen[nmaps];
329 memset (seen, '\0', nmaps);
330 size_t i = 0;
331 while (1)
333 ++seen[i];
334 struct link_map *thisp = maps[i];
336 /* Find the last object in the list for which the current one is
337 a dependency and move the current object behind the object
338 with the dependency. */
339 size_t k = nmaps - 1;
340 while (k > i)
342 struct link_map **runp = maps[k]->l_initfini;
343 if (runp != NULL)
344 /* Look through the dependencies of the object. */
345 while (*runp != NULL)
346 if (__builtin_expect (*runp++ == thisp, 0))
348 /* Move the current object to the back past the last
349 object with it as the dependency. */
350 memmove (&maps[i], &maps[i + 1],
351 (k - i) * sizeof (maps[0]));
352 maps[k] = thisp;
354 if (seen[i + 1] > nmaps - i)
356 ++i;
357 goto next_clear;
360 uint16_t this_seen = seen[i];
361 memmove (&seen[i], &seen[i + 1],
362 (k - i) * sizeof (seen[0]));
363 seen[k] = this_seen;
365 goto next;
368 --k;
371 if (++i == nmaps)
372 break;
373 next_clear:
374 memset (&seen[i], 0, (nmaps - i) * sizeof (seen[0]));
375 next:;
379 for (size_t i = nmaps; i-- > 0; )
381 l = maps[i];
383 #ifdef SHARED
384 if (__builtin_expect (GLRO(dl_profile) != NULL, 0))
386 /* If this here is the shared object which we want to profile
387 make sure the profile is started. We can find out whether
388 this is necessary or not by observing the `_dl_profile_map'
389 variable. If it was NULL but is not NULL afterwars we must
390 start the profiling. */
391 struct link_map *old_profile_map = GL(dl_profile_map);
393 _dl_relocate_object (l, l->l_scope, reloc_mode | RTLD_LAZY, 1);
395 if (old_profile_map == NULL && GL(dl_profile_map) != NULL)
397 /* We must prepare the profiling. */
398 _dl_start_profile ();
400 /* Prevent unloading the object. */
401 GL(dl_profile_map)->l_flags_1 |= DF_1_NODELETE;
404 else
405 #endif
406 _dl_relocate_object (l, l->l_scope, reloc_mode, 0);
409 /* If the file is not loaded now as a dependency, add the search
410 list of the newly loaded object to the scope. */
411 bool any_tls = false;
412 unsigned int first_static_tls = new->l_searchlist.r_nlist;
413 for (unsigned int i = 0; i < new->l_searchlist.r_nlist; ++i)
415 struct link_map *imap = new->l_searchlist.r_list[i];
416 int from_scope = 0;
418 /* If the initializer has been called already, the object has
419 not been loaded here and now. */
420 if (imap->l_init_called && imap->l_type == lt_loaded)
422 struct r_scope_elem **runp = imap->l_scope;
423 size_t cnt = 0;
425 while (*runp != NULL)
427 if (*runp == &new->l_searchlist)
428 break;
429 ++cnt;
430 ++runp;
433 if (*runp != NULL)
434 /* Avoid duplicates. */
435 continue;
437 if (__builtin_expect (cnt + 1 >= imap->l_scope_max, 0))
439 /* The 'r_scope' array is too small. Allocate a new one
440 dynamically. */
441 size_t new_size;
442 struct r_scope_elem **newp;
444 #define SCOPE_ELEMS(imap) \
445 (sizeof (imap->l_scope_mem) / sizeof (imap->l_scope_mem[0]))
447 if (imap->l_scope != imap->l_scope_mem
448 && imap->l_scope_max < SCOPE_ELEMS (imap))
450 new_size = SCOPE_ELEMS (imap);
451 newp = imap->l_scope_mem;
453 else
455 new_size = imap->l_scope_max * 2;
456 newp = (struct r_scope_elem **)
457 malloc (new_size * sizeof (struct r_scope_elem *));
458 if (newp == NULL)
459 _dl_signal_error (ENOMEM, "dlopen", NULL,
460 N_("cannot create scope list"));
463 memcpy (newp, imap->l_scope, cnt * sizeof (imap->l_scope[0]));
464 struct r_scope_elem **old = imap->l_scope;
466 imap->l_scope = newp;
468 if (old != imap->l_scope_mem)
469 _dl_scope_free (old);
471 imap->l_scope_max = new_size;
474 /* First terminate the extended list. Otherwise a thread
475 might use the new last element and then use the garbage
476 at offset IDX+1. */
477 imap->l_scope[cnt + 1] = NULL;
478 atomic_write_barrier ();
479 imap->l_scope[cnt] = &new->l_searchlist;
481 /* Print only new scope information. */
482 from_scope = cnt;
484 /* Only add TLS memory if this object is loaded now and
485 therefore is not yet initialized. */
486 else if (! imap->l_init_called
487 /* Only if the module defines thread local data. */
488 && __builtin_expect (imap->l_tls_blocksize > 0, 0))
490 /* Now that we know the object is loaded successfully add
491 modules containing TLS data to the slot info table. We
492 might have to increase its size. */
493 _dl_add_to_slotinfo (imap);
495 if (imap->l_need_tls_init
496 && first_static_tls == new->l_searchlist.r_nlist)
497 first_static_tls = i;
499 /* We have to bump the generation counter. */
500 any_tls = true;
503 /* Print scope information. */
504 if (__builtin_expect (GLRO(dl_debug_mask) & DL_DEBUG_SCOPES, 0))
505 _dl_show_scope (imap, from_scope);
508 /* Bump the generation number if necessary. */
509 if (any_tls && __builtin_expect (++GL(dl_tls_generation) == 0, 0))
510 _dl_fatal_printf (N_("\
511 TLS generation counter wrapped! Please report this."));
513 /* We need a second pass for static tls data, because _dl_update_slotinfo
514 must not be run while calls to _dl_add_to_slotinfo are still pending. */
515 for (unsigned int i = first_static_tls; i < new->l_searchlist.r_nlist; ++i)
517 struct link_map *imap = new->l_searchlist.r_list[i];
519 if (imap->l_need_tls_init
520 && ! imap->l_init_called
521 && imap->l_tls_blocksize > 0)
523 /* For static TLS we have to allocate the memory here and
524 now. This includes allocating memory in the DTV. But we
525 cannot change any DTV other than our own. So, if we
526 cannot guarantee that there is room in the DTV we don't
527 even try it and fail the load.
529 XXX We could track the minimum DTV slots allocated in
530 all threads. */
531 if (! RTLD_SINGLE_THREAD_P && imap->l_tls_modid > DTV_SURPLUS)
532 _dl_signal_error (0, "dlopen", NULL, N_("\
533 cannot load any more object with static TLS"));
535 imap->l_need_tls_init = 0;
536 #ifdef SHARED
537 /* Update the slot information data for at least the
538 generation of the DSO we are allocating data for. */
539 _dl_update_slotinfo (imap->l_tls_modid);
540 #endif
542 GL(dl_init_static_tls) (imap);
543 assert (imap->l_need_tls_init == 0);
547 /* Run the initializer functions of new objects. */
548 _dl_init (new, args->argc, args->argv, args->env);
550 /* Now we can make the new map available in the global scope. */
551 if (mode & RTLD_GLOBAL)
552 /* Move the object in the global namespace. */
553 if (add_to_global (new) != 0)
554 /* It failed. */
555 return;
557 /* Mark the object as not deletable if the RTLD_NODELETE flags was
558 passed. */
559 if (__builtin_expect (mode & RTLD_NODELETE, 0))
560 new->l_flags_1 |= DF_1_NODELETE;
562 #ifndef SHARED
563 /* We must be the static _dl_open in libc.a. A static program that
564 has loaded a dynamic object now has competition. */
565 __libc_multiple_libcs = 1;
566 #endif
568 /* Let the user know about the opencount. */
569 if (__builtin_expect (GLRO(dl_debug_mask) & DL_DEBUG_FILES, 0))
570 _dl_debug_printf ("opening file=%s [%lu]; direct_opencount=%u\n\n",
571 new->l_name, new->l_ns, new->l_direct_opencount);
575 void *
576 _dl_open (const char *file, int mode, const void *caller_dlopen, Lmid_t nsid,
577 int argc, char *argv[], char *env[])
579 if ((mode & RTLD_BINDING_MASK) == 0)
580 /* One of the flags must be set. */
581 _dl_signal_error (EINVAL, file, NULL, N_("invalid mode for dlopen()"));
583 /* Make sure we are alone. */
584 __rtld_lock_lock_recursive (GL(dl_load_lock));
586 if (__builtin_expect (nsid == LM_ID_NEWLM, 0))
588 /* Find a new namespace. */
589 for (nsid = 1; DL_NNS > 1 && nsid < GL(dl_nns); ++nsid)
590 if (GL(dl_ns)[nsid]._ns_loaded == NULL)
591 break;
593 if (__builtin_expect (nsid == DL_NNS, 0))
595 /* No more namespace available. */
596 __rtld_lock_unlock_recursive (GL(dl_load_lock));
598 _dl_signal_error (EINVAL, file, NULL, N_("\
599 no more namespaces available for dlmopen()"));
601 else if (nsid == GL(dl_nns))
603 __rtld_lock_initialize (GL(dl_ns)[nsid]._ns_unique_sym_table.lock);
604 ++GL(dl_nns);
607 _dl_debug_initialize (0, nsid)->r_state = RT_CONSISTENT;
609 /* Never allow loading a DSO in a namespace which is empty. Such
610 direct placements is only causing problems. Also don't allow
611 loading into a namespace used for auditing. */
612 else if (__builtin_expect (nsid != LM_ID_BASE && nsid != __LM_ID_CALLER, 0)
613 && (GL(dl_ns)[nsid]._ns_nloaded == 0
614 || GL(dl_ns)[nsid]._ns_loaded->l_auditing))
615 _dl_signal_error (EINVAL, file, NULL,
616 N_("invalid target namespace in dlmopen()"));
617 #ifndef SHARED
618 else if ((nsid == LM_ID_BASE || nsid == __LM_ID_CALLER)
619 && GL(dl_ns)[LM_ID_BASE]._ns_loaded == NULL
620 && GL(dl_nns) == 0)
621 GL(dl_nns) = 1;
622 #endif
624 struct dl_open_args args;
625 args.file = file;
626 args.mode = mode;
627 args.caller_dlopen = caller_dlopen;
628 args.caller_dl_open = RETURN_ADDRESS (0);
629 args.map = NULL;
630 args.nsid = nsid;
631 args.argc = argc;
632 args.argv = argv;
633 args.env = env;
635 const char *objname;
636 const char *errstring;
637 bool malloced;
638 int errcode = _dl_catch_error (&objname, &errstring, &malloced,
639 dl_open_worker, &args);
641 #ifndef MAP_COPY
642 /* We must munmap() the cache file. */
643 _dl_unload_cache ();
644 #endif
646 /* See if an error occurred during loading. */
647 if (__builtin_expect (errstring != NULL, 0))
649 /* Remove the object from memory. It may be in an inconsistent
650 state if relocation failed, for example. */
651 if (args.map)
653 /* Maybe some of the modules which were loaded use TLS.
654 Since it will be removed in the following _dl_close call
655 we have to mark the dtv array as having gaps to fill the
656 holes. This is a pessimistic assumption which won't hurt
657 if not true. There is no need to do this when we are
658 loading the auditing DSOs since TLS has not yet been set
659 up. */
660 if ((mode & __RTLD_AUDIT) == 0)
661 GL(dl_tls_dtv_gaps) = true;
663 _dl_close_worker (args.map);
666 assert (_dl_debug_initialize (0, args.nsid)->r_state == RT_CONSISTENT);
668 /* Release the lock. */
669 __rtld_lock_unlock_recursive (GL(dl_load_lock));
671 /* Make a local copy of the error string so that we can release the
672 memory allocated for it. */
673 size_t len_errstring = strlen (errstring) + 1;
674 char *local_errstring;
675 if (objname == errstring + len_errstring)
677 size_t total_len = len_errstring + strlen (objname) + 1;
678 local_errstring = alloca (total_len);
679 memcpy (local_errstring, errstring, total_len);
680 objname = local_errstring + len_errstring;
682 else
684 local_errstring = alloca (len_errstring);
685 memcpy (local_errstring, errstring, len_errstring);
688 if (malloced)
689 free ((char *) errstring);
691 /* Reraise the error. */
692 _dl_signal_error (errcode, objname, NULL, local_errstring);
695 assert (_dl_debug_initialize (0, args.nsid)->r_state == RT_CONSISTENT);
697 /* Release the lock. */
698 __rtld_lock_unlock_recursive (GL(dl_load_lock));
700 #ifndef SHARED
701 DL_STATIC_INIT (args.map);
702 #endif
704 return args.map;
708 void
709 _dl_show_scope (struct link_map *l, int from)
711 _dl_debug_printf ("object=%s [%lu]\n",
712 *l->l_name ? l->l_name : rtld_progname, l->l_ns);
713 if (l->l_scope != NULL)
714 for (int scope_cnt = from; l->l_scope[scope_cnt] != NULL; ++scope_cnt)
716 _dl_debug_printf (" scope %u:", scope_cnt);
718 for (unsigned int cnt = 0; cnt < l->l_scope[scope_cnt]->r_nlist; ++cnt)
719 if (*l->l_scope[scope_cnt]->r_list[cnt]->l_name)
720 _dl_debug_printf_c (" %s",
721 l->l_scope[scope_cnt]->r_list[cnt]->l_name);
722 else
723 _dl_debug_printf_c (" %s", rtld_progname);
725 _dl_debug_printf_c ("\n");
727 else
728 _dl_debug_printf (" no scope\n");
729 _dl_debug_printf ("\n");
732 #ifdef IS_IN_rtld
733 /* Return non-zero if ADDR lies within one of L's segments. */
735 internal_function
736 _dl_addr_inside_object (struct link_map *l, const ElfW(Addr) addr)
738 int n = l->l_phnum;
739 const ElfW(Addr) reladdr = addr - l->l_addr;
741 while (--n >= 0)
742 if (l->l_phdr[n].p_type == PT_LOAD
743 && reladdr - l->l_phdr[n].p_vaddr >= 0
744 && reladdr - l->l_phdr[n].p_vaddr < l->l_phdr[n].p_memsz)
745 return 1;
746 return 0;
748 #endif