posix: fix glob bugs with long login names
[glibc.git] / elf / dl-open.c
blobc539f10cf38bde7c4a8705c2c2bfa65240acd258
1 /* Load a shared object at runtime, relocate it, and run its initializer.
2 Copyright (C) 1996-2017 Free Software Foundation, Inc.
3 This file is part of the GNU C Library.
5 The GNU C Library is free software; you can redistribute it and/or
6 modify it under the terms of the GNU Lesser General Public
7 License as published by the Free Software Foundation; either
8 version 2.1 of the License, or (at your option) any later version.
10 The GNU C Library is distributed in the hope that it will be useful,
11 but WITHOUT ANY WARRANTY; without even the implied warranty of
12 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 Lesser General Public License for more details.
15 You should have received a copy of the GNU Lesser General Public
16 License along with the GNU C Library; if not, see
17 <http://www.gnu.org/licenses/>. */
19 #include <assert.h>
20 #include <dlfcn.h>
21 #include <errno.h>
22 #include <libintl.h>
23 #include <stdio.h>
24 #include <stdlib.h>
25 #include <string.h>
26 #include <unistd.h>
27 #include <sys/mman.h> /* Check whether MAP_COPY is defined. */
28 #include <sys/param.h>
29 #include <libc-lock.h>
30 #include <ldsodefs.h>
31 #include <caller.h>
32 #include <sysdep-cancel.h>
33 #include <tls.h>
34 #include <stap-probe.h>
35 #include <atomic.h>
36 #include <libc-internal.h>
38 #include <dl-dst.h>
41 /* We must be careful not to leave us in an inconsistent state. Thus we
42 catch any error and re-raise it after cleaning up. */
44 struct dl_open_args
46 const char *file;
47 int mode;
48 /* This is the caller of the dlopen() function. */
49 const void *caller_dlopen;
50 /* This is the caller of _dl_open(). */
51 const void *caller_dl_open;
52 struct link_map *map;
53 /* Namespace ID. */
54 Lmid_t nsid;
55 /* Original parameters to the program and the current environment. */
56 int argc;
57 char **argv;
58 char **env;
62 static int
63 add_to_global (struct link_map *new)
65 struct link_map **new_global;
66 unsigned int to_add = 0;
67 unsigned int cnt;
69 /* Count the objects we have to put in the global scope. */
70 for (cnt = 0; cnt < new->l_searchlist.r_nlist; ++cnt)
71 if (new->l_searchlist.r_list[cnt]->l_global == 0)
72 ++to_add;
74 /* The symbols of the new objects and its dependencies are to be
75 introduced into the global scope that will be used to resolve
76 references from other dynamically-loaded objects.
78 The global scope is the searchlist in the main link map. We
79 extend this list if necessary. There is one problem though:
80 since this structure was allocated very early (before the libc
81 is loaded) the memory it uses is allocated by the malloc()-stub
82 in the ld.so. When we come here these functions are not used
83 anymore. Instead the malloc() implementation of the libc is
84 used. But this means the block from the main map cannot be used
85 in an realloc() call. Therefore we allocate a completely new
86 array the first time we have to add something to the locale scope. */
88 struct link_namespaces *ns = &GL(dl_ns)[new->l_ns];
89 if (ns->_ns_global_scope_alloc == 0)
91 /* This is the first dynamic object given global scope. */
92 ns->_ns_global_scope_alloc
93 = ns->_ns_main_searchlist->r_nlist + to_add + 8;
94 new_global = (struct link_map **)
95 malloc (ns->_ns_global_scope_alloc * sizeof (struct link_map *));
96 if (new_global == NULL)
98 ns->_ns_global_scope_alloc = 0;
99 nomem:
100 _dl_signal_error (ENOMEM, new->l_libname->name, NULL,
101 N_("cannot extend global scope"));
102 return 1;
105 /* Copy over the old entries. */
106 ns->_ns_main_searchlist->r_list
107 = memcpy (new_global, ns->_ns_main_searchlist->r_list,
108 (ns->_ns_main_searchlist->r_nlist
109 * sizeof (struct link_map *)));
111 else if (ns->_ns_main_searchlist->r_nlist + to_add
112 > ns->_ns_global_scope_alloc)
114 /* We have to extend the existing array of link maps in the
115 main map. */
116 struct link_map **old_global
117 = GL(dl_ns)[new->l_ns]._ns_main_searchlist->r_list;
118 size_t new_nalloc = ((ns->_ns_global_scope_alloc + to_add) * 2);
120 new_global = (struct link_map **)
121 malloc (new_nalloc * sizeof (struct link_map *));
122 if (new_global == NULL)
123 goto nomem;
125 memcpy (new_global, old_global,
126 ns->_ns_global_scope_alloc * sizeof (struct link_map *));
128 ns->_ns_global_scope_alloc = new_nalloc;
129 ns->_ns_main_searchlist->r_list = new_global;
131 if (!RTLD_SINGLE_THREAD_P)
132 THREAD_GSCOPE_WAIT ();
134 free (old_global);
137 /* Now add the new entries. */
138 unsigned int new_nlist = ns->_ns_main_searchlist->r_nlist;
139 for (cnt = 0; cnt < new->l_searchlist.r_nlist; ++cnt)
141 struct link_map *map = new->l_searchlist.r_list[cnt];
143 if (map->l_global == 0)
145 map->l_global = 1;
146 ns->_ns_main_searchlist->r_list[new_nlist++] = map;
148 /* We modify the global scope. Report this. */
149 if (__glibc_unlikely (GLRO(dl_debug_mask) & DL_DEBUG_SCOPES))
150 _dl_debug_printf ("\nadd %s [%lu] to global scope\n",
151 map->l_name, map->l_ns);
154 atomic_write_barrier ();
155 ns->_ns_main_searchlist->r_nlist = new_nlist;
157 return 0;
160 /* Search link maps in all namespaces for the DSO that contains the object at
161 address ADDR. Returns the pointer to the link map of the matching DSO, or
162 NULL if a match is not found. */
163 struct link_map *
164 _dl_find_dso_for_object (const ElfW(Addr) addr)
166 struct link_map *l;
168 /* Find the highest-addressed object that ADDR is not below. */
169 for (Lmid_t ns = 0; ns < GL(dl_nns); ++ns)
170 for (l = GL(dl_ns)[ns]._ns_loaded; l != NULL; l = l->l_next)
171 if (addr >= l->l_map_start && addr < l->l_map_end
172 && (l->l_contiguous
173 || _dl_addr_inside_object (l, (ElfW(Addr)) addr)))
175 assert (ns == l->l_ns);
176 return l;
178 return NULL;
180 rtld_hidden_def (_dl_find_dso_for_object);
182 static void
183 dl_open_worker (void *a)
185 struct dl_open_args *args = a;
186 const char *file = args->file;
187 int mode = args->mode;
188 struct link_map *call_map = NULL;
190 /* Check whether _dl_open() has been called from a valid DSO. */
191 if (__check_caller (args->caller_dl_open,
192 allow_libc|allow_libdl|allow_ldso) != 0)
193 _dl_signal_error (0, "dlopen", NULL, N_("invalid caller"));
195 /* Determine the caller's map if necessary. This is needed in case
196 we have a DST, when we don't know the namespace ID we have to put
197 the new object in, or when the file name has no path in which
198 case we need to look along the RUNPATH/RPATH of the caller. */
199 const char *dst = strchr (file, '$');
200 if (dst != NULL || args->nsid == __LM_ID_CALLER
201 || strchr (file, '/') == NULL)
203 const void *caller_dlopen = args->caller_dlopen;
205 /* We have to find out from which object the caller is calling.
206 By default we assume this is the main application. */
207 call_map = GL(dl_ns)[LM_ID_BASE]._ns_loaded;
209 struct link_map *l = _dl_find_dso_for_object ((ElfW(Addr)) caller_dlopen);
211 if (l)
212 call_map = l;
214 if (args->nsid == __LM_ID_CALLER)
215 args->nsid = call_map->l_ns;
218 /* One might be tempted to assert that we are RT_CONSISTENT at this point, but that
219 may not be true if this is a recursive call to dlopen. */
220 _dl_debug_initialize (0, args->nsid);
222 /* Load the named object. */
223 struct link_map *new;
224 args->map = new = _dl_map_object (call_map, file, lt_loaded, 0,
225 mode | __RTLD_CALLMAP, args->nsid);
227 /* If the pointer returned is NULL this means the RTLD_NOLOAD flag is
228 set and the object is not already loaded. */
229 if (new == NULL)
231 assert (mode & RTLD_NOLOAD);
232 return;
235 /* Mark the object as not deletable if the RTLD_NODELETE flags was passed.
236 Do this early so that we don't skip marking the object if it was
237 already loaded. */
238 if (__glibc_unlikely (mode & RTLD_NODELETE))
239 new->l_flags_1 |= DF_1_NODELETE;
241 if (__glibc_unlikely (mode & __RTLD_SPROF))
242 /* This happens only if we load a DSO for 'sprof'. */
243 return;
245 /* This object is directly loaded. */
246 ++new->l_direct_opencount;
248 /* It was already open. */
249 if (__glibc_unlikely (new->l_searchlist.r_list != NULL))
251 /* Let the user know about the opencount. */
252 if (__glibc_unlikely (GLRO(dl_debug_mask) & DL_DEBUG_FILES))
253 _dl_debug_printf ("opening file=%s [%lu]; direct_opencount=%u\n\n",
254 new->l_name, new->l_ns, new->l_direct_opencount);
256 /* If the user requested the object to be in the global namespace
257 but it is not so far, add it now. */
258 if ((mode & RTLD_GLOBAL) && new->l_global == 0)
259 (void) add_to_global (new);
261 assert (_dl_debug_initialize (0, args->nsid)->r_state == RT_CONSISTENT);
263 return;
266 /* Load that object's dependencies. */
267 _dl_map_object_deps (new, NULL, 0, 0,
268 mode & (__RTLD_DLOPEN | RTLD_DEEPBIND | __RTLD_AUDIT));
270 /* So far, so good. Now check the versions. */
271 for (unsigned int i = 0; i < new->l_searchlist.r_nlist; ++i)
272 if (new->l_searchlist.r_list[i]->l_real->l_versions == NULL)
273 (void) _dl_check_map_versions (new->l_searchlist.r_list[i]->l_real,
274 0, 0);
276 #ifdef SHARED
277 /* Auditing checkpoint: we have added all objects. */
278 if (__glibc_unlikely (GLRO(dl_naudit) > 0))
280 struct link_map *head = GL(dl_ns)[new->l_ns]._ns_loaded;
281 /* Do not call the functions for any auditing object. */
282 if (head->l_auditing == 0)
284 struct audit_ifaces *afct = GLRO(dl_audit);
285 for (unsigned int cnt = 0; cnt < GLRO(dl_naudit); ++cnt)
287 if (afct->activity != NULL)
288 afct->activity (&head->l_audit[cnt].cookie, LA_ACT_CONSISTENT);
290 afct = afct->next;
294 #endif
296 /* Notify the debugger all new objects are now ready to go. */
297 struct r_debug *r = _dl_debug_initialize (0, args->nsid);
298 r->r_state = RT_CONSISTENT;
299 _dl_debug_state ();
300 LIBC_PROBE (map_complete, 3, args->nsid, r, new);
302 /* Print scope information. */
303 if (__glibc_unlikely (GLRO(dl_debug_mask) & DL_DEBUG_SCOPES))
304 _dl_show_scope (new, 0);
306 /* Only do lazy relocation if `LD_BIND_NOW' is not set. */
307 int reloc_mode = mode & __RTLD_AUDIT;
308 if (GLRO(dl_lazy))
309 reloc_mode |= mode & RTLD_LAZY;
311 /* Sort the objects by dependency for the relocation process. This
312 allows IFUNC relocations to work and it also means copy
313 relocation of dependencies are if necessary overwritten. */
314 size_t nmaps = 0;
315 struct link_map *l = new;
318 if (! l->l_real->l_relocated)
319 ++nmaps;
320 l = l->l_next;
322 while (l != NULL);
323 struct link_map *maps[nmaps];
324 nmaps = 0;
325 l = new;
328 if (! l->l_real->l_relocated)
329 maps[nmaps++] = l;
330 l = l->l_next;
332 while (l != NULL);
333 if (nmaps > 1)
335 uint16_t seen[nmaps];
336 memset (seen, '\0', sizeof (seen));
337 size_t i = 0;
338 while (1)
340 ++seen[i];
341 struct link_map *thisp = maps[i];
343 /* Find the last object in the list for which the current one is
344 a dependency and move the current object behind the object
345 with the dependency. */
346 size_t k = nmaps - 1;
347 while (k > i)
349 struct link_map **runp = maps[k]->l_initfini;
350 if (runp != NULL)
351 /* Look through the dependencies of the object. */
352 while (*runp != NULL)
353 if (__glibc_unlikely (*runp++ == thisp))
355 /* Move the current object to the back past the last
356 object with it as the dependency. */
357 memmove (&maps[i], &maps[i + 1],
358 (k - i) * sizeof (maps[0]));
359 maps[k] = thisp;
361 if (seen[i + 1] > nmaps - i)
363 ++i;
364 goto next_clear;
367 uint16_t this_seen = seen[i];
368 memmove (&seen[i], &seen[i + 1],
369 (k - i) * sizeof (seen[0]));
370 seen[k] = this_seen;
372 goto next;
375 --k;
378 if (++i == nmaps)
379 break;
380 next_clear:
381 memset (&seen[i], 0, (nmaps - i) * sizeof (seen[0]));
382 next:;
386 int relocation_in_progress = 0;
388 for (size_t i = nmaps; i-- > 0; )
390 l = maps[i];
392 if (! relocation_in_progress)
394 /* Notify the debugger that relocations are about to happen. */
395 LIBC_PROBE (reloc_start, 2, args->nsid, r);
396 relocation_in_progress = 1;
399 #ifdef SHARED
400 if (__glibc_unlikely (GLRO(dl_profile) != NULL))
402 /* If this here is the shared object which we want to profile
403 make sure the profile is started. We can find out whether
404 this is necessary or not by observing the `_dl_profile_map'
405 variable. If it was NULL but is not NULL afterwards we must
406 start the profiling. */
407 struct link_map *old_profile_map = GL(dl_profile_map);
409 _dl_relocate_object (l, l->l_scope, reloc_mode | RTLD_LAZY, 1);
411 if (old_profile_map == NULL && GL(dl_profile_map) != NULL)
413 /* We must prepare the profiling. */
414 _dl_start_profile ();
416 /* Prevent unloading the object. */
417 GL(dl_profile_map)->l_flags_1 |= DF_1_NODELETE;
420 else
421 #endif
422 _dl_relocate_object (l, l->l_scope, reloc_mode, 0);
425 /* If the file is not loaded now as a dependency, add the search
426 list of the newly loaded object to the scope. */
427 bool any_tls = false;
428 unsigned int first_static_tls = new->l_searchlist.r_nlist;
429 for (unsigned int i = 0; i < new->l_searchlist.r_nlist; ++i)
431 struct link_map *imap = new->l_searchlist.r_list[i];
432 int from_scope = 0;
434 /* If the initializer has been called already, the object has
435 not been loaded here and now. */
436 if (imap->l_init_called && imap->l_type == lt_loaded)
438 struct r_scope_elem **runp = imap->l_scope;
439 size_t cnt = 0;
441 while (*runp != NULL)
443 if (*runp == &new->l_searchlist)
444 break;
445 ++cnt;
446 ++runp;
449 if (*runp != NULL)
450 /* Avoid duplicates. */
451 continue;
453 if (__glibc_unlikely (cnt + 1 >= imap->l_scope_max))
455 /* The 'r_scope' array is too small. Allocate a new one
456 dynamically. */
457 size_t new_size;
458 struct r_scope_elem **newp;
460 #define SCOPE_ELEMS(imap) \
461 (sizeof (imap->l_scope_mem) / sizeof (imap->l_scope_mem[0]))
463 if (imap->l_scope != imap->l_scope_mem
464 && imap->l_scope_max < SCOPE_ELEMS (imap))
466 new_size = SCOPE_ELEMS (imap);
467 newp = imap->l_scope_mem;
469 else
471 new_size = imap->l_scope_max * 2;
472 newp = (struct r_scope_elem **)
473 malloc (new_size * sizeof (struct r_scope_elem *));
474 if (newp == NULL)
475 _dl_signal_error (ENOMEM, "dlopen", NULL,
476 N_("cannot create scope list"));
479 memcpy (newp, imap->l_scope, cnt * sizeof (imap->l_scope[0]));
480 struct r_scope_elem **old = imap->l_scope;
482 imap->l_scope = newp;
484 if (old != imap->l_scope_mem)
485 _dl_scope_free (old);
487 imap->l_scope_max = new_size;
490 /* First terminate the extended list. Otherwise a thread
491 might use the new last element and then use the garbage
492 at offset IDX+1. */
493 imap->l_scope[cnt + 1] = NULL;
494 atomic_write_barrier ();
495 imap->l_scope[cnt] = &new->l_searchlist;
497 /* Print only new scope information. */
498 from_scope = cnt;
500 /* Only add TLS memory if this object is loaded now and
501 therefore is not yet initialized. */
502 else if (! imap->l_init_called
503 /* Only if the module defines thread local data. */
504 && __builtin_expect (imap->l_tls_blocksize > 0, 0))
506 /* Now that we know the object is loaded successfully add
507 modules containing TLS data to the slot info table. We
508 might have to increase its size. */
509 _dl_add_to_slotinfo (imap);
511 if (imap->l_need_tls_init
512 && first_static_tls == new->l_searchlist.r_nlist)
513 first_static_tls = i;
515 /* We have to bump the generation counter. */
516 any_tls = true;
519 /* Print scope information. */
520 if (__glibc_unlikely (GLRO(dl_debug_mask) & DL_DEBUG_SCOPES))
521 _dl_show_scope (imap, from_scope);
524 /* Bump the generation number if necessary. */
525 if (any_tls && __builtin_expect (++GL(dl_tls_generation) == 0, 0))
526 _dl_fatal_printf (N_("\
527 TLS generation counter wrapped! Please report this."));
529 /* We need a second pass for static tls data, because _dl_update_slotinfo
530 must not be run while calls to _dl_add_to_slotinfo are still pending. */
531 for (unsigned int i = first_static_tls; i < new->l_searchlist.r_nlist; ++i)
533 struct link_map *imap = new->l_searchlist.r_list[i];
535 if (imap->l_need_tls_init
536 && ! imap->l_init_called
537 && imap->l_tls_blocksize > 0)
539 /* For static TLS we have to allocate the memory here and
540 now, but we can delay updating the DTV. */
541 imap->l_need_tls_init = 0;
542 #ifdef SHARED
543 /* Update the slot information data for at least the
544 generation of the DSO we are allocating data for. */
545 _dl_update_slotinfo (imap->l_tls_modid);
546 #endif
548 GL(dl_init_static_tls) (imap);
549 assert (imap->l_need_tls_init == 0);
553 /* Notify the debugger all new objects have been relocated. */
554 if (relocation_in_progress)
555 LIBC_PROBE (reloc_complete, 3, args->nsid, r, new);
557 #ifndef SHARED
558 DL_STATIC_INIT (new);
559 #endif
561 /* Run the initializer functions of new objects. */
562 _dl_init (new, args->argc, args->argv, args->env);
564 /* Now we can make the new map available in the global scope. */
565 if (mode & RTLD_GLOBAL)
566 /* Move the object in the global namespace. */
567 if (add_to_global (new) != 0)
568 /* It failed. */
569 return;
571 #ifndef SHARED
572 /* We must be the static _dl_open in libc.a. A static program that
573 has loaded a dynamic object now has competition. */
574 __libc_multiple_libcs = 1;
575 #endif
577 /* Let the user know about the opencount. */
578 if (__glibc_unlikely (GLRO(dl_debug_mask) & DL_DEBUG_FILES))
579 _dl_debug_printf ("opening file=%s [%lu]; direct_opencount=%u\n\n",
580 new->l_name, new->l_ns, new->l_direct_opencount);
584 void *
585 _dl_open (const char *file, int mode, const void *caller_dlopen, Lmid_t nsid,
586 int argc, char *argv[], char *env[])
588 if ((mode & RTLD_BINDING_MASK) == 0)
589 /* One of the flags must be set. */
590 _dl_signal_error (EINVAL, file, NULL, N_("invalid mode for dlopen()"));
592 /* Make sure we are alone. */
593 __rtld_lock_lock_recursive (GL(dl_load_lock));
595 if (__glibc_unlikely (nsid == LM_ID_NEWLM))
597 /* Find a new namespace. */
598 for (nsid = 1; DL_NNS > 1 && nsid < GL(dl_nns); ++nsid)
599 if (GL(dl_ns)[nsid]._ns_loaded == NULL)
600 break;
602 if (__glibc_unlikely (nsid == DL_NNS))
604 /* No more namespace available. */
605 __rtld_lock_unlock_recursive (GL(dl_load_lock));
607 _dl_signal_error (EINVAL, file, NULL, N_("\
608 no more namespaces available for dlmopen()"));
610 else if (nsid == GL(dl_nns))
612 __rtld_lock_initialize (GL(dl_ns)[nsid]._ns_unique_sym_table.lock);
613 ++GL(dl_nns);
616 _dl_debug_initialize (0, nsid)->r_state = RT_CONSISTENT;
618 /* Never allow loading a DSO in a namespace which is empty. Such
619 direct placements is only causing problems. Also don't allow
620 loading into a namespace used for auditing. */
621 else if (__glibc_unlikely (nsid != LM_ID_BASE && nsid != __LM_ID_CALLER)
622 && (__glibc_unlikely (nsid < 0 || nsid >= GL(dl_nns))
623 /* This prevents the [NSID] index expressions from being
624 evaluated, so the compiler won't think that we are
625 accessing an invalid index here in the !SHARED case where
626 DL_NNS is 1 and so any NSID != 0 is invalid. */
627 || DL_NNS == 1
628 || GL(dl_ns)[nsid]._ns_nloaded == 0
629 || GL(dl_ns)[nsid]._ns_loaded->l_auditing))
630 _dl_signal_error (EINVAL, file, NULL,
631 N_("invalid target namespace in dlmopen()"));
633 struct dl_open_args args;
634 args.file = file;
635 args.mode = mode;
636 args.caller_dlopen = caller_dlopen;
637 args.caller_dl_open = RETURN_ADDRESS (0);
638 args.map = NULL;
639 args.nsid = nsid;
640 args.argc = argc;
641 args.argv = argv;
642 args.env = env;
644 struct dl_exception exception;
645 int errcode = _dl_catch_exception (&exception, dl_open_worker, &args);
647 #if defined USE_LDCONFIG && !defined MAP_COPY
648 /* We must unmap the cache file. */
649 _dl_unload_cache ();
650 #endif
652 /* See if an error occurred during loading. */
653 if (__glibc_unlikely (exception.errstring != NULL))
655 /* Remove the object from memory. It may be in an inconsistent
656 state if relocation failed, for example. */
657 if (args.map)
659 /* Maybe some of the modules which were loaded use TLS.
660 Since it will be removed in the following _dl_close call
661 we have to mark the dtv array as having gaps to fill the
662 holes. This is a pessimistic assumption which won't hurt
663 if not true. There is no need to do this when we are
664 loading the auditing DSOs since TLS has not yet been set
665 up. */
666 if ((mode & __RTLD_AUDIT) == 0)
667 GL(dl_tls_dtv_gaps) = true;
669 _dl_close_worker (args.map, true);
672 assert (_dl_debug_initialize (0, args.nsid)->r_state == RT_CONSISTENT);
674 /* Release the lock. */
675 __rtld_lock_unlock_recursive (GL(dl_load_lock));
677 /* Reraise the error. */
678 _dl_signal_exception (errcode, &exception, NULL);
681 assert (_dl_debug_initialize (0, args.nsid)->r_state == RT_CONSISTENT);
683 /* Release the lock. */
684 __rtld_lock_unlock_recursive (GL(dl_load_lock));
686 return args.map;
690 void
691 _dl_show_scope (struct link_map *l, int from)
693 _dl_debug_printf ("object=%s [%lu]\n",
694 DSO_FILENAME (l->l_name), l->l_ns);
695 if (l->l_scope != NULL)
696 for (int scope_cnt = from; l->l_scope[scope_cnt] != NULL; ++scope_cnt)
698 _dl_debug_printf (" scope %u:", scope_cnt);
700 for (unsigned int cnt = 0; cnt < l->l_scope[scope_cnt]->r_nlist; ++cnt)
701 if (*l->l_scope[scope_cnt]->r_list[cnt]->l_name)
702 _dl_debug_printf_c (" %s",
703 l->l_scope[scope_cnt]->r_list[cnt]->l_name);
704 else
705 _dl_debug_printf_c (" %s", RTLD_PROGNAME);
707 _dl_debug_printf_c ("\n");
709 else
710 _dl_debug_printf (" no scope\n");
711 _dl_debug_printf ("\n");