Consolidate non cancellable openat call
[glibc.git] / elf / dl-open.c
blob2d8948aab15f92b949aa90630b39b45d71bd94a4
1 /* Load a shared object at runtime, relocate it, and run its initializer.
2 Copyright (C) 1996-2017 Free Software Foundation, Inc.
3 This file is part of the GNU C Library.
5 The GNU C Library is free software; you can redistribute it and/or
6 modify it under the terms of the GNU Lesser General Public
7 License as published by the Free Software Foundation; either
8 version 2.1 of the License, or (at your option) any later version.
10 The GNU C Library is distributed in the hope that it will be useful,
11 but WITHOUT ANY WARRANTY; without even the implied warranty of
12 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 Lesser General Public License for more details.
15 You should have received a copy of the GNU Lesser General Public
16 License along with the GNU C Library; if not, see
17 <http://www.gnu.org/licenses/>. */
19 #include <assert.h>
20 #include <dlfcn.h>
21 #include <errno.h>
22 #include <libintl.h>
23 #include <stdio.h>
24 #include <stdlib.h>
25 #include <string.h>
26 #include <unistd.h>
27 #include <sys/mman.h> /* Check whether MAP_COPY is defined. */
28 #include <sys/param.h>
29 #include <libc-lock.h>
30 #include <ldsodefs.h>
31 #include <caller.h>
32 #include <sysdep-cancel.h>
33 #include <tls.h>
34 #include <stap-probe.h>
35 #include <atomic.h>
37 #include <dl-dst.h>
40 extern int __libc_multiple_libcs; /* Defined in init-first.c. */
42 /* We must be careful not to leave us in an inconsistent state. Thus we
43 catch any error and re-raise it after cleaning up. */
45 struct dl_open_args
47 const char *file;
48 int mode;
49 /* This is the caller of the dlopen() function. */
50 const void *caller_dlopen;
51 /* This is the caller of _dl_open(). */
52 const void *caller_dl_open;
53 struct link_map *map;
54 /* Namespace ID. */
55 Lmid_t nsid;
56 /* Original parameters to the program and the current environment. */
57 int argc;
58 char **argv;
59 char **env;
63 static int
64 add_to_global (struct link_map *new)
66 struct link_map **new_global;
67 unsigned int to_add = 0;
68 unsigned int cnt;
70 /* Count the objects we have to put in the global scope. */
71 for (cnt = 0; cnt < new->l_searchlist.r_nlist; ++cnt)
72 if (new->l_searchlist.r_list[cnt]->l_global == 0)
73 ++to_add;
75 /* The symbols of the new objects and its dependencies are to be
76 introduced into the global scope that will be used to resolve
77 references from other dynamically-loaded objects.
79 The global scope is the searchlist in the main link map. We
80 extend this list if necessary. There is one problem though:
81 since this structure was allocated very early (before the libc
82 is loaded) the memory it uses is allocated by the malloc()-stub
83 in the ld.so. When we come here these functions are not used
84 anymore. Instead the malloc() implementation of the libc is
85 used. But this means the block from the main map cannot be used
86 in an realloc() call. Therefore we allocate a completely new
87 array the first time we have to add something to the locale scope. */
89 struct link_namespaces *ns = &GL(dl_ns)[new->l_ns];
90 if (ns->_ns_global_scope_alloc == 0)
92 /* This is the first dynamic object given global scope. */
93 ns->_ns_global_scope_alloc
94 = ns->_ns_main_searchlist->r_nlist + to_add + 8;
95 new_global = (struct link_map **)
96 malloc (ns->_ns_global_scope_alloc * sizeof (struct link_map *));
97 if (new_global == NULL)
99 ns->_ns_global_scope_alloc = 0;
100 nomem:
101 _dl_signal_error (ENOMEM, new->l_libname->name, NULL,
102 N_("cannot extend global scope"));
103 return 1;
106 /* Copy over the old entries. */
107 ns->_ns_main_searchlist->r_list
108 = memcpy (new_global, ns->_ns_main_searchlist->r_list,
109 (ns->_ns_main_searchlist->r_nlist
110 * sizeof (struct link_map *)));
112 else if (ns->_ns_main_searchlist->r_nlist + to_add
113 > ns->_ns_global_scope_alloc)
115 /* We have to extend the existing array of link maps in the
116 main map. */
117 struct link_map **old_global
118 = GL(dl_ns)[new->l_ns]._ns_main_searchlist->r_list;
119 size_t new_nalloc = ((ns->_ns_global_scope_alloc + to_add) * 2);
121 new_global = (struct link_map **)
122 malloc (new_nalloc * sizeof (struct link_map *));
123 if (new_global == NULL)
124 goto nomem;
126 memcpy (new_global, old_global,
127 ns->_ns_global_scope_alloc * sizeof (struct link_map *));
129 ns->_ns_global_scope_alloc = new_nalloc;
130 ns->_ns_main_searchlist->r_list = new_global;
132 if (!RTLD_SINGLE_THREAD_P)
133 THREAD_GSCOPE_WAIT ();
135 free (old_global);
138 /* Now add the new entries. */
139 unsigned int new_nlist = ns->_ns_main_searchlist->r_nlist;
140 for (cnt = 0; cnt < new->l_searchlist.r_nlist; ++cnt)
142 struct link_map *map = new->l_searchlist.r_list[cnt];
144 if (map->l_global == 0)
146 map->l_global = 1;
147 ns->_ns_main_searchlist->r_list[new_nlist++] = map;
149 /* We modify the global scope. Report this. */
150 if (__glibc_unlikely (GLRO(dl_debug_mask) & DL_DEBUG_SCOPES))
151 _dl_debug_printf ("\nadd %s [%lu] to global scope\n",
152 map->l_name, map->l_ns);
155 atomic_write_barrier ();
156 ns->_ns_main_searchlist->r_nlist = new_nlist;
158 return 0;
161 /* Search link maps in all namespaces for the DSO that contains the object at
162 address ADDR. Returns the pointer to the link map of the matching DSO, or
163 NULL if a match is not found. */
164 struct link_map *
165 _dl_find_dso_for_object (const ElfW(Addr) addr)
167 struct link_map *l;
169 /* Find the highest-addressed object that ADDR is not below. */
170 for (Lmid_t ns = 0; ns < GL(dl_nns); ++ns)
171 for (l = GL(dl_ns)[ns]._ns_loaded; l != NULL; l = l->l_next)
172 if (addr >= l->l_map_start && addr < l->l_map_end
173 && (l->l_contiguous
174 || _dl_addr_inside_object (l, (ElfW(Addr)) addr)))
176 assert (ns == l->l_ns);
177 return l;
179 return NULL;
181 rtld_hidden_def (_dl_find_dso_for_object);
183 static void
184 dl_open_worker (void *a)
186 struct dl_open_args *args = a;
187 const char *file = args->file;
188 int mode = args->mode;
189 struct link_map *call_map = NULL;
191 /* Check whether _dl_open() has been called from a valid DSO. */
192 if (__check_caller (args->caller_dl_open,
193 allow_libc|allow_libdl|allow_ldso) != 0)
194 _dl_signal_error (0, "dlopen", NULL, N_("invalid caller"));
196 /* Determine the caller's map if necessary. This is needed in case
197 we have a DST, when we don't know the namespace ID we have to put
198 the new object in, or when the file name has no path in which
199 case we need to look along the RUNPATH/RPATH of the caller. */
200 const char *dst = strchr (file, '$');
201 if (dst != NULL || args->nsid == __LM_ID_CALLER
202 || strchr (file, '/') == NULL)
204 const void *caller_dlopen = args->caller_dlopen;
206 /* We have to find out from which object the caller is calling.
207 By default we assume this is the main application. */
208 call_map = GL(dl_ns)[LM_ID_BASE]._ns_loaded;
210 struct link_map *l = _dl_find_dso_for_object ((ElfW(Addr)) caller_dlopen);
212 if (l)
213 call_map = l;
215 if (args->nsid == __LM_ID_CALLER)
216 args->nsid = call_map->l_ns;
219 /* One might be tempted to assert that we are RT_CONSISTENT at this point, but that
220 may not be true if this is a recursive call to dlopen. */
221 _dl_debug_initialize (0, args->nsid);
223 /* Load the named object. */
224 struct link_map *new;
225 args->map = new = _dl_map_object (call_map, file, lt_loaded, 0,
226 mode | __RTLD_CALLMAP, args->nsid);
228 /* If the pointer returned is NULL this means the RTLD_NOLOAD flag is
229 set and the object is not already loaded. */
230 if (new == NULL)
232 assert (mode & RTLD_NOLOAD);
233 return;
236 /* Mark the object as not deletable if the RTLD_NODELETE flags was passed.
237 Do this early so that we don't skip marking the object if it was
238 already loaded. */
239 if (__glibc_unlikely (mode & RTLD_NODELETE))
240 new->l_flags_1 |= DF_1_NODELETE;
242 if (__glibc_unlikely (mode & __RTLD_SPROF))
243 /* This happens only if we load a DSO for 'sprof'. */
244 return;
246 /* This object is directly loaded. */
247 ++new->l_direct_opencount;
249 /* It was already open. */
250 if (__glibc_unlikely (new->l_searchlist.r_list != NULL))
252 /* Let the user know about the opencount. */
253 if (__glibc_unlikely (GLRO(dl_debug_mask) & DL_DEBUG_FILES))
254 _dl_debug_printf ("opening file=%s [%lu]; direct_opencount=%u\n\n",
255 new->l_name, new->l_ns, new->l_direct_opencount);
257 /* If the user requested the object to be in the global namespace
258 but it is not so far, add it now. */
259 if ((mode & RTLD_GLOBAL) && new->l_global == 0)
260 (void) add_to_global (new);
262 assert (_dl_debug_initialize (0, args->nsid)->r_state == RT_CONSISTENT);
264 return;
267 /* Load that object's dependencies. */
268 _dl_map_object_deps (new, NULL, 0, 0,
269 mode & (__RTLD_DLOPEN | RTLD_DEEPBIND | __RTLD_AUDIT));
271 /* So far, so good. Now check the versions. */
272 for (unsigned int i = 0; i < new->l_searchlist.r_nlist; ++i)
273 if (new->l_searchlist.r_list[i]->l_real->l_versions == NULL)
274 (void) _dl_check_map_versions (new->l_searchlist.r_list[i]->l_real,
275 0, 0);
277 #ifdef SHARED
278 /* Auditing checkpoint: we have added all objects. */
279 if (__glibc_unlikely (GLRO(dl_naudit) > 0))
281 struct link_map *head = GL(dl_ns)[new->l_ns]._ns_loaded;
282 /* Do not call the functions for any auditing object. */
283 if (head->l_auditing == 0)
285 struct audit_ifaces *afct = GLRO(dl_audit);
286 for (unsigned int cnt = 0; cnt < GLRO(dl_naudit); ++cnt)
288 if (afct->activity != NULL)
289 afct->activity (&head->l_audit[cnt].cookie, LA_ACT_CONSISTENT);
291 afct = afct->next;
295 #endif
297 /* Notify the debugger all new objects are now ready to go. */
298 struct r_debug *r = _dl_debug_initialize (0, args->nsid);
299 r->r_state = RT_CONSISTENT;
300 _dl_debug_state ();
301 LIBC_PROBE (map_complete, 3, args->nsid, r, new);
303 /* Print scope information. */
304 if (__glibc_unlikely (GLRO(dl_debug_mask) & DL_DEBUG_SCOPES))
305 _dl_show_scope (new, 0);
307 /* Only do lazy relocation if `LD_BIND_NOW' is not set. */
308 int reloc_mode = mode & __RTLD_AUDIT;
309 if (GLRO(dl_lazy))
310 reloc_mode |= mode & RTLD_LAZY;
312 /* Sort the objects by dependency for the relocation process. This
313 allows IFUNC relocations to work and it also means copy
314 relocation of dependencies are if necessary overwritten. */
315 size_t nmaps = 0;
316 struct link_map *l = new;
319 if (! l->l_real->l_relocated)
320 ++nmaps;
321 l = l->l_next;
323 while (l != NULL);
324 struct link_map *maps[nmaps];
325 nmaps = 0;
326 l = new;
329 if (! l->l_real->l_relocated)
330 maps[nmaps++] = l;
331 l = l->l_next;
333 while (l != NULL);
334 if (nmaps > 1)
336 uint16_t seen[nmaps];
337 memset (seen, '\0', sizeof (seen));
338 size_t i = 0;
339 while (1)
341 ++seen[i];
342 struct link_map *thisp = maps[i];
344 /* Find the last object in the list for which the current one is
345 a dependency and move the current object behind the object
346 with the dependency. */
347 size_t k = nmaps - 1;
348 while (k > i)
350 struct link_map **runp = maps[k]->l_initfini;
351 if (runp != NULL)
352 /* Look through the dependencies of the object. */
353 while (*runp != NULL)
354 if (__glibc_unlikely (*runp++ == thisp))
356 /* Move the current object to the back past the last
357 object with it as the dependency. */
358 memmove (&maps[i], &maps[i + 1],
359 (k - i) * sizeof (maps[0]));
360 maps[k] = thisp;
362 if (seen[i + 1] > nmaps - i)
364 ++i;
365 goto next_clear;
368 uint16_t this_seen = seen[i];
369 memmove (&seen[i], &seen[i + 1],
370 (k - i) * sizeof (seen[0]));
371 seen[k] = this_seen;
373 goto next;
376 --k;
379 if (++i == nmaps)
380 break;
381 next_clear:
382 memset (&seen[i], 0, (nmaps - i) * sizeof (seen[0]));
383 next:;
387 int relocation_in_progress = 0;
389 for (size_t i = nmaps; i-- > 0; )
391 l = maps[i];
393 if (! relocation_in_progress)
395 /* Notify the debugger that relocations are about to happen. */
396 LIBC_PROBE (reloc_start, 2, args->nsid, r);
397 relocation_in_progress = 1;
400 #ifdef SHARED
401 if (__glibc_unlikely (GLRO(dl_profile) != NULL))
403 /* If this here is the shared object which we want to profile
404 make sure the profile is started. We can find out whether
405 this is necessary or not by observing the `_dl_profile_map'
406 variable. If it was NULL but is not NULL afterwards we must
407 start the profiling. */
408 struct link_map *old_profile_map = GL(dl_profile_map);
410 _dl_relocate_object (l, l->l_scope, reloc_mode | RTLD_LAZY, 1);
412 if (old_profile_map == NULL && GL(dl_profile_map) != NULL)
414 /* We must prepare the profiling. */
415 _dl_start_profile ();
417 /* Prevent unloading the object. */
418 GL(dl_profile_map)->l_flags_1 |= DF_1_NODELETE;
421 else
422 #endif
423 _dl_relocate_object (l, l->l_scope, reloc_mode, 0);
426 /* If the file is not loaded now as a dependency, add the search
427 list of the newly loaded object to the scope. */
428 bool any_tls = false;
429 unsigned int first_static_tls = new->l_searchlist.r_nlist;
430 for (unsigned int i = 0; i < new->l_searchlist.r_nlist; ++i)
432 struct link_map *imap = new->l_searchlist.r_list[i];
433 int from_scope = 0;
435 /* If the initializer has been called already, the object has
436 not been loaded here and now. */
437 if (imap->l_init_called && imap->l_type == lt_loaded)
439 struct r_scope_elem **runp = imap->l_scope;
440 size_t cnt = 0;
442 while (*runp != NULL)
444 if (*runp == &new->l_searchlist)
445 break;
446 ++cnt;
447 ++runp;
450 if (*runp != NULL)
451 /* Avoid duplicates. */
452 continue;
454 if (__glibc_unlikely (cnt + 1 >= imap->l_scope_max))
456 /* The 'r_scope' array is too small. Allocate a new one
457 dynamically. */
458 size_t new_size;
459 struct r_scope_elem **newp;
461 #define SCOPE_ELEMS(imap) \
462 (sizeof (imap->l_scope_mem) / sizeof (imap->l_scope_mem[0]))
464 if (imap->l_scope != imap->l_scope_mem
465 && imap->l_scope_max < SCOPE_ELEMS (imap))
467 new_size = SCOPE_ELEMS (imap);
468 newp = imap->l_scope_mem;
470 else
472 new_size = imap->l_scope_max * 2;
473 newp = (struct r_scope_elem **)
474 malloc (new_size * sizeof (struct r_scope_elem *));
475 if (newp == NULL)
476 _dl_signal_error (ENOMEM, "dlopen", NULL,
477 N_("cannot create scope list"));
480 memcpy (newp, imap->l_scope, cnt * sizeof (imap->l_scope[0]));
481 struct r_scope_elem **old = imap->l_scope;
483 imap->l_scope = newp;
485 if (old != imap->l_scope_mem)
486 _dl_scope_free (old);
488 imap->l_scope_max = new_size;
491 /* First terminate the extended list. Otherwise a thread
492 might use the new last element and then use the garbage
493 at offset IDX+1. */
494 imap->l_scope[cnt + 1] = NULL;
495 atomic_write_barrier ();
496 imap->l_scope[cnt] = &new->l_searchlist;
498 /* Print only new scope information. */
499 from_scope = cnt;
501 /* Only add TLS memory if this object is loaded now and
502 therefore is not yet initialized. */
503 else if (! imap->l_init_called
504 /* Only if the module defines thread local data. */
505 && __builtin_expect (imap->l_tls_blocksize > 0, 0))
507 /* Now that we know the object is loaded successfully add
508 modules containing TLS data to the slot info table. We
509 might have to increase its size. */
510 _dl_add_to_slotinfo (imap);
512 if (imap->l_need_tls_init
513 && first_static_tls == new->l_searchlist.r_nlist)
514 first_static_tls = i;
516 /* We have to bump the generation counter. */
517 any_tls = true;
520 /* Print scope information. */
521 if (__glibc_unlikely (GLRO(dl_debug_mask) & DL_DEBUG_SCOPES))
522 _dl_show_scope (imap, from_scope);
525 /* Bump the generation number if necessary. */
526 if (any_tls && __builtin_expect (++GL(dl_tls_generation) == 0, 0))
527 _dl_fatal_printf (N_("\
528 TLS generation counter wrapped! Please report this."));
530 /* We need a second pass for static tls data, because _dl_update_slotinfo
531 must not be run while calls to _dl_add_to_slotinfo are still pending. */
532 for (unsigned int i = first_static_tls; i < new->l_searchlist.r_nlist; ++i)
534 struct link_map *imap = new->l_searchlist.r_list[i];
536 if (imap->l_need_tls_init
537 && ! imap->l_init_called
538 && imap->l_tls_blocksize > 0)
540 /* For static TLS we have to allocate the memory here and
541 now, but we can delay updating the DTV. */
542 imap->l_need_tls_init = 0;
543 #ifdef SHARED
544 /* Update the slot information data for at least the
545 generation of the DSO we are allocating data for. */
546 _dl_update_slotinfo (imap->l_tls_modid);
547 #endif
549 GL(dl_init_static_tls) (imap);
550 assert (imap->l_need_tls_init == 0);
554 /* Notify the debugger all new objects have been relocated. */
555 if (relocation_in_progress)
556 LIBC_PROBE (reloc_complete, 3, args->nsid, r, new);
558 #ifndef SHARED
559 DL_STATIC_INIT (new);
560 #endif
562 /* Run the initializer functions of new objects. */
563 _dl_init (new, args->argc, args->argv, args->env);
565 /* Now we can make the new map available in the global scope. */
566 if (mode & RTLD_GLOBAL)
567 /* Move the object in the global namespace. */
568 if (add_to_global (new) != 0)
569 /* It failed. */
570 return;
572 #ifndef SHARED
573 /* We must be the static _dl_open in libc.a. A static program that
574 has loaded a dynamic object now has competition. */
575 __libc_multiple_libcs = 1;
576 #endif
578 /* Let the user know about the opencount. */
579 if (__glibc_unlikely (GLRO(dl_debug_mask) & DL_DEBUG_FILES))
580 _dl_debug_printf ("opening file=%s [%lu]; direct_opencount=%u\n\n",
581 new->l_name, new->l_ns, new->l_direct_opencount);
585 void *
586 _dl_open (const char *file, int mode, const void *caller_dlopen, Lmid_t nsid,
587 int argc, char *argv[], char *env[])
589 if ((mode & RTLD_BINDING_MASK) == 0)
590 /* One of the flags must be set. */
591 _dl_signal_error (EINVAL, file, NULL, N_("invalid mode for dlopen()"));
593 /* Make sure we are alone. */
594 __rtld_lock_lock_recursive (GL(dl_load_lock));
596 if (__glibc_unlikely (nsid == LM_ID_NEWLM))
598 /* Find a new namespace. */
599 for (nsid = 1; DL_NNS > 1 && nsid < GL(dl_nns); ++nsid)
600 if (GL(dl_ns)[nsid]._ns_loaded == NULL)
601 break;
603 if (__glibc_unlikely (nsid == DL_NNS))
605 /* No more namespace available. */
606 __rtld_lock_unlock_recursive (GL(dl_load_lock));
608 _dl_signal_error (EINVAL, file, NULL, N_("\
609 no more namespaces available for dlmopen()"));
611 else if (nsid == GL(dl_nns))
613 __rtld_lock_initialize (GL(dl_ns)[nsid]._ns_unique_sym_table.lock);
614 ++GL(dl_nns);
617 _dl_debug_initialize (0, nsid)->r_state = RT_CONSISTENT;
619 /* Never allow loading a DSO in a namespace which is empty. Such
620 direct placements is only causing problems. Also don't allow
621 loading into a namespace used for auditing. */
622 else if (__glibc_unlikely (nsid != LM_ID_BASE && nsid != __LM_ID_CALLER)
623 && (__glibc_unlikely (nsid < 0 || nsid >= GL(dl_nns))
624 /* This prevents the [NSID] index expressions from being
625 evaluated, so the compiler won't think that we are
626 accessing an invalid index here in the !SHARED case where
627 DL_NNS is 1 and so any NSID != 0 is invalid. */
628 || DL_NNS == 1
629 || GL(dl_ns)[nsid]._ns_nloaded == 0
630 || GL(dl_ns)[nsid]._ns_loaded->l_auditing))
631 _dl_signal_error (EINVAL, file, NULL,
632 N_("invalid target namespace in dlmopen()"));
634 struct dl_open_args args;
635 args.file = file;
636 args.mode = mode;
637 args.caller_dlopen = caller_dlopen;
638 args.caller_dl_open = RETURN_ADDRESS (0);
639 args.map = NULL;
640 args.nsid = nsid;
641 args.argc = argc;
642 args.argv = argv;
643 args.env = env;
645 struct dl_exception exception;
646 int errcode = _dl_catch_exception (&exception, dl_open_worker, &args);
648 #if defined USE_LDCONFIG && !defined MAP_COPY
649 /* We must unmap the cache file. */
650 _dl_unload_cache ();
651 #endif
653 /* See if an error occurred during loading. */
654 if (__glibc_unlikely (exception.errstring != NULL))
656 /* Remove the object from memory. It may be in an inconsistent
657 state if relocation failed, for example. */
658 if (args.map)
660 /* Maybe some of the modules which were loaded use TLS.
661 Since it will be removed in the following _dl_close call
662 we have to mark the dtv array as having gaps to fill the
663 holes. This is a pessimistic assumption which won't hurt
664 if not true. There is no need to do this when we are
665 loading the auditing DSOs since TLS has not yet been set
666 up. */
667 if ((mode & __RTLD_AUDIT) == 0)
668 GL(dl_tls_dtv_gaps) = true;
670 _dl_close_worker (args.map, true);
673 assert (_dl_debug_initialize (0, args.nsid)->r_state == RT_CONSISTENT);
675 /* Release the lock. */
676 __rtld_lock_unlock_recursive (GL(dl_load_lock));
678 /* Reraise the error. */
679 _dl_signal_exception (errcode, &exception, NULL);
682 assert (_dl_debug_initialize (0, args.nsid)->r_state == RT_CONSISTENT);
684 /* Release the lock. */
685 __rtld_lock_unlock_recursive (GL(dl_load_lock));
687 return args.map;
691 void
692 _dl_show_scope (struct link_map *l, int from)
694 _dl_debug_printf ("object=%s [%lu]\n",
695 DSO_FILENAME (l->l_name), l->l_ns);
696 if (l->l_scope != NULL)
697 for (int scope_cnt = from; l->l_scope[scope_cnt] != NULL; ++scope_cnt)
699 _dl_debug_printf (" scope %u:", scope_cnt);
701 for (unsigned int cnt = 0; cnt < l->l_scope[scope_cnt]->r_nlist; ++cnt)
702 if (*l->l_scope[scope_cnt]->r_list[cnt]->l_name)
703 _dl_debug_printf_c (" %s",
704 l->l_scope[scope_cnt]->r_list[cnt]->l_name);
705 else
706 _dl_debug_printf_c (" %s", RTLD_PROGNAME);
708 _dl_debug_printf_c ("\n");
710 else
711 _dl_debug_printf (" no scope\n");
712 _dl_debug_printf ("\n");