Check if deriv->steps is NULL before freeing it
[glibc.git] / elf / dl-close.c
blobe6ff7e75d0d5a0bd9abeed9f2b1ec1c13582f8b8
1 /* Close a shared object opened by `_dl_open'.
2 Copyright (C) 1996-2012 Free Software Foundation, Inc.
3 This file is part of the GNU C Library.
5 The GNU C Library is free software; you can redistribute it and/or
6 modify it under the terms of the GNU Lesser General Public
7 License as published by the Free Software Foundation; either
8 version 2.1 of the License, or (at your option) any later version.
10 The GNU C Library is distributed in the hope that it will be useful,
11 but WITHOUT ANY WARRANTY; without even the implied warranty of
12 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 Lesser General Public License for more details.
15 You should have received a copy of the GNU Lesser General Public
16 License along with the GNU C Library; if not, see
17 <http://www.gnu.org/licenses/>. */
19 #include <assert.h>
20 #include <dlfcn.h>
21 #include <errno.h>
22 #include <libintl.h>
23 #include <stddef.h>
24 #include <stdio.h>
25 #include <stdlib.h>
26 #include <string.h>
27 #include <unistd.h>
28 #include <bits/libc-lock.h>
29 #include <ldsodefs.h>
30 #include <sys/types.h>
31 #include <sys/mman.h>
32 #include <sysdep-cancel.h>
33 #include <tls.h>
34 #include <stap-probe.h>
37 /* Type of the constructor functions. */
38 typedef void (*fini_t) (void);
41 /* Special l_idx value used to indicate which objects remain loaded. */
42 #define IDX_STILL_USED -1
45 /* Returns true we an non-empty was found. */
46 static bool
47 remove_slotinfo (size_t idx, struct dtv_slotinfo_list *listp, size_t disp,
48 bool should_be_there)
50 if (idx - disp >= listp->len)
52 if (listp->next == NULL)
54 /* The index is not actually valid in the slotinfo list,
55 because this object was closed before it was fully set
56 up due to some error. */
57 assert (! should_be_there);
59 else
61 if (remove_slotinfo (idx, listp->next, disp + listp->len,
62 should_be_there))
63 return true;
65 /* No non-empty entry. Search from the end of this element's
66 slotinfo array. */
67 idx = disp + listp->len;
70 else
72 struct link_map *old_map = listp->slotinfo[idx - disp].map;
74 /* The entry might still be in its unused state if we are closing an
75 object that wasn't fully set up. */
76 if (__builtin_expect (old_map != NULL, 1))
78 assert (old_map->l_tls_modid == idx);
80 /* Mark the entry as unused. */
81 listp->slotinfo[idx - disp].gen = GL(dl_tls_generation) + 1;
82 listp->slotinfo[idx - disp].map = NULL;
85 /* If this is not the last currently used entry no need to look
86 further. */
87 if (idx != GL(dl_tls_max_dtv_idx))
88 return true;
91 while (idx - disp > (disp == 0 ? 1 + GL(dl_tls_static_nelem) : 0))
93 --idx;
95 if (listp->slotinfo[idx - disp].map != NULL)
97 /* Found a new last used index. */
98 GL(dl_tls_max_dtv_idx) = idx;
99 return true;
103 /* No non-entry in this list element. */
104 return false;
108 void
109 _dl_close_worker (struct link_map *map)
111 /* One less direct use. */
112 --map->l_direct_opencount;
114 /* If _dl_close is called recursively (some destructor call dlclose),
115 just record that the parent _dl_close will need to do garbage collection
116 again and return. */
117 static enum { not_pending, pending, rerun } dl_close_state;
119 if (map->l_direct_opencount > 0 || map->l_type != lt_loaded
120 || dl_close_state != not_pending)
122 if (map->l_direct_opencount == 0 && map->l_type == lt_loaded)
123 dl_close_state = rerun;
125 /* There are still references to this object. Do nothing more. */
126 if (__builtin_expect (GLRO(dl_debug_mask) & DL_DEBUG_FILES, 0))
127 _dl_debug_printf ("\nclosing file=%s; direct_opencount=%u\n",
128 map->l_name, map->l_direct_opencount);
130 return;
133 Lmid_t nsid = map->l_ns;
134 struct link_namespaces *ns = &GL(dl_ns)[nsid];
136 retry:
137 dl_close_state = pending;
139 bool any_tls = false;
140 const unsigned int nloaded = ns->_ns_nloaded;
141 char used[nloaded];
142 char done[nloaded];
143 struct link_map *maps[nloaded];
145 /* Run over the list and assign indexes to the link maps and enter
146 them into the MAPS array. */
147 int idx = 0;
148 for (struct link_map *l = ns->_ns_loaded; l != NULL; l = l->l_next)
150 l->l_idx = idx;
151 maps[idx] = l;
152 ++idx;
154 assert (idx == nloaded);
156 /* Prepare the bitmaps. */
157 memset (used, '\0', sizeof (used));
158 memset (done, '\0', sizeof (done));
160 /* Keep track of the lowest index link map we have covered already. */
161 int done_index = -1;
162 while (++done_index < nloaded)
164 struct link_map *l = maps[done_index];
166 if (done[done_index])
167 /* Already handled. */
168 continue;
170 /* Check whether this object is still used. */
171 if (l->l_type == lt_loaded
172 && l->l_direct_opencount == 0
173 && (l->l_flags_1 & DF_1_NODELETE) == 0
174 && !used[done_index])
175 continue;
177 /* We need this object and we handle it now. */
178 done[done_index] = 1;
179 used[done_index] = 1;
180 /* Signal the object is still needed. */
181 l->l_idx = IDX_STILL_USED;
183 /* Mark all dependencies as used. */
184 if (l->l_initfini != NULL)
186 struct link_map **lp = &l->l_initfini[1];
187 while (*lp != NULL)
189 if ((*lp)->l_idx != IDX_STILL_USED)
191 assert ((*lp)->l_idx >= 0 && (*lp)->l_idx < nloaded);
193 if (!used[(*lp)->l_idx])
195 used[(*lp)->l_idx] = 1;
196 if ((*lp)->l_idx - 1 < done_index)
197 done_index = (*lp)->l_idx - 1;
201 ++lp;
204 /* And the same for relocation dependencies. */
205 if (l->l_reldeps != NULL)
206 for (unsigned int j = 0; j < l->l_reldeps->act; ++j)
208 struct link_map *jmap = l->l_reldeps->list[j];
210 if (jmap->l_idx != IDX_STILL_USED)
212 assert (jmap->l_idx >= 0 && jmap->l_idx < nloaded);
214 if (!used[jmap->l_idx])
216 used[jmap->l_idx] = 1;
217 if (jmap->l_idx - 1 < done_index)
218 done_index = jmap->l_idx - 1;
224 /* Sort the entries. */
225 _dl_sort_fini (maps, nloaded, used, nsid);
227 /* Call all termination functions at once. */
228 #ifdef SHARED
229 bool do_audit = GLRO(dl_naudit) > 0 && !ns->_ns_loaded->l_auditing;
230 #endif
231 bool unload_any = false;
232 bool scope_mem_left = false;
233 unsigned int unload_global = 0;
234 unsigned int first_loaded = ~0;
235 for (unsigned int i = 0; i < nloaded; ++i)
237 struct link_map *imap = maps[i];
239 /* All elements must be in the same namespace. */
240 assert (imap->l_ns == nsid);
242 if (!used[i])
244 assert (imap->l_type == lt_loaded
245 && (imap->l_flags_1 & DF_1_NODELETE) == 0);
247 /* Call its termination function. Do not do it for
248 half-cooked objects. */
249 if (imap->l_init_called)
251 /* When debugging print a message first. */
252 if (__builtin_expect (GLRO(dl_debug_mask) & DL_DEBUG_IMPCALLS,
254 _dl_debug_printf ("\ncalling fini: %s [%lu]\n\n",
255 imap->l_name, nsid);
257 if (imap->l_info[DT_FINI_ARRAY] != NULL)
259 ElfW(Addr) *array =
260 (ElfW(Addr) *) (imap->l_addr
261 + imap->l_info[DT_FINI_ARRAY]->d_un.d_ptr);
262 unsigned int sz = (imap->l_info[DT_FINI_ARRAYSZ]->d_un.d_val
263 / sizeof (ElfW(Addr)));
265 while (sz-- > 0)
266 ((fini_t) array[sz]) ();
269 /* Next try the old-style destructor. */
270 if (imap->l_info[DT_FINI] != NULL)
271 (*(void (*) (void)) DL_DT_FINI_ADDRESS
272 (imap, ((void *) imap->l_addr
273 + imap->l_info[DT_FINI]->d_un.d_ptr))) ();
276 #ifdef SHARED
277 /* Auditing checkpoint: we remove an object. */
278 if (__builtin_expect (do_audit, 0))
280 struct audit_ifaces *afct = GLRO(dl_audit);
281 for (unsigned int cnt = 0; cnt < GLRO(dl_naudit); ++cnt)
283 if (afct->objclose != NULL)
284 /* Return value is ignored. */
285 (void) afct->objclose (&imap->l_audit[cnt].cookie);
287 afct = afct->next;
290 #endif
292 /* This object must not be used anymore. */
293 imap->l_removed = 1;
295 /* We indeed have an object to remove. */
296 unload_any = true;
298 if (imap->l_global)
299 ++unload_global;
301 /* Remember where the first dynamically loaded object is. */
302 if (i < first_loaded)
303 first_loaded = i;
305 /* Else used[i]. */
306 else if (imap->l_type == lt_loaded)
308 struct r_scope_elem *new_list = NULL;
310 if (imap->l_searchlist.r_list == NULL && imap->l_initfini != NULL)
312 /* The object is still used. But one of the objects we are
313 unloading right now is responsible for loading it. If
314 the current object does not have it's own scope yet we
315 have to create one. This has to be done before running
316 the finalizers.
318 To do this count the number of dependencies. */
319 unsigned int cnt;
320 for (cnt = 1; imap->l_initfini[cnt] != NULL; ++cnt)
323 /* We simply reuse the l_initfini list. */
324 imap->l_searchlist.r_list = &imap->l_initfini[cnt + 1];
325 imap->l_searchlist.r_nlist = cnt;
327 new_list = &imap->l_searchlist;
330 /* Count the number of scopes which remain after the unload.
331 When we add the local search list count it. Always add
332 one for the terminating NULL pointer. */
333 size_t remain = (new_list != NULL) + 1;
334 bool removed_any = false;
335 for (size_t cnt = 0; imap->l_scope[cnt] != NULL; ++cnt)
336 /* This relies on l_scope[] entries being always set either
337 to its own l_symbolic_searchlist address, or some map's
338 l_searchlist address. */
339 if (imap->l_scope[cnt] != &imap->l_symbolic_searchlist)
341 struct link_map *tmap = (struct link_map *)
342 ((char *) imap->l_scope[cnt]
343 - offsetof (struct link_map, l_searchlist));
344 assert (tmap->l_ns == nsid);
345 if (tmap->l_idx == IDX_STILL_USED)
346 ++remain;
347 else
348 removed_any = true;
350 else
351 ++remain;
353 if (removed_any)
355 /* Always allocate a new array for the scope. This is
356 necessary since we must be able to determine the last
357 user of the current array. If possible use the link map's
358 memory. */
359 size_t new_size;
360 struct r_scope_elem **newp;
362 #define SCOPE_ELEMS(imap) \
363 (sizeof (imap->l_scope_mem) / sizeof (imap->l_scope_mem[0]))
365 if (imap->l_scope != imap->l_scope_mem
366 && remain < SCOPE_ELEMS (imap))
368 new_size = SCOPE_ELEMS (imap);
369 newp = imap->l_scope_mem;
371 else
373 new_size = imap->l_scope_max;
374 newp = (struct r_scope_elem **)
375 malloc (new_size * sizeof (struct r_scope_elem *));
376 if (newp == NULL)
377 _dl_signal_error (ENOMEM, "dlclose", NULL,
378 N_("cannot create scope list"));
381 /* Copy over the remaining scope elements. */
382 remain = 0;
383 for (size_t cnt = 0; imap->l_scope[cnt] != NULL; ++cnt)
385 if (imap->l_scope[cnt] != &imap->l_symbolic_searchlist)
387 struct link_map *tmap = (struct link_map *)
388 ((char *) imap->l_scope[cnt]
389 - offsetof (struct link_map, l_searchlist));
390 if (tmap->l_idx != IDX_STILL_USED)
392 /* Remove the scope. Or replace with own map's
393 scope. */
394 if (new_list != NULL)
396 newp[remain++] = new_list;
397 new_list = NULL;
399 continue;
403 newp[remain++] = imap->l_scope[cnt];
405 newp[remain] = NULL;
407 struct r_scope_elem **old = imap->l_scope;
409 imap->l_scope = newp;
411 /* No user anymore, we can free it now. */
412 if (old != imap->l_scope_mem)
414 if (_dl_scope_free (old))
415 /* If _dl_scope_free used THREAD_GSCOPE_WAIT (),
416 no need to repeat it. */
417 scope_mem_left = false;
419 else
420 scope_mem_left = true;
422 imap->l_scope_max = new_size;
424 else if (new_list != NULL)
426 /* We didn't change the scope array, so reset the search
427 list. */
428 imap->l_searchlist.r_list = NULL;
429 imap->l_searchlist.r_nlist = 0;
432 /* The loader is gone, so mark the object as not having one.
433 Note: l_idx != IDX_STILL_USED -> object will be removed. */
434 if (imap->l_loader != NULL
435 && imap->l_loader->l_idx != IDX_STILL_USED)
436 imap->l_loader = NULL;
438 /* Remember where the first dynamically loaded object is. */
439 if (i < first_loaded)
440 first_loaded = i;
444 /* If there are no objects to unload, do nothing further. */
445 if (!unload_any)
446 goto out;
448 #ifdef SHARED
449 /* Auditing checkpoint: we will start deleting objects. */
450 if (__builtin_expect (do_audit, 0))
452 struct link_map *head = ns->_ns_loaded;
453 struct audit_ifaces *afct = GLRO(dl_audit);
454 /* Do not call the functions for any auditing object. */
455 if (head->l_auditing == 0)
457 for (unsigned int cnt = 0; cnt < GLRO(dl_naudit); ++cnt)
459 if (afct->activity != NULL)
460 afct->activity (&head->l_audit[cnt].cookie, LA_ACT_DELETE);
462 afct = afct->next;
466 #endif
468 /* Notify the debugger we are about to remove some loaded objects. */
469 struct r_debug *r = _dl_debug_initialize (0, nsid);
470 r->r_state = RT_DELETE;
471 _dl_debug_state ();
472 LIBC_PROBE (unmap_start, 2, nsid, r);
474 if (unload_global)
476 /* Some objects are in the global scope list. Remove them. */
477 struct r_scope_elem *ns_msl = ns->_ns_main_searchlist;
478 unsigned int i;
479 unsigned int j = 0;
480 unsigned int cnt = ns_msl->r_nlist;
482 while (cnt > 0 && ns_msl->r_list[cnt - 1]->l_removed)
483 --cnt;
485 if (cnt + unload_global == ns_msl->r_nlist)
486 /* Speed up removing most recently added objects. */
487 j = cnt;
488 else
489 for (i = 0; i < cnt; i++)
490 if (ns_msl->r_list[i]->l_removed == 0)
492 if (i != j)
493 ns_msl->r_list[j] = ns_msl->r_list[i];
494 j++;
496 ns_msl->r_nlist = j;
499 if (!RTLD_SINGLE_THREAD_P
500 && (unload_global
501 || scope_mem_left
502 || (GL(dl_scope_free_list) != NULL
503 && GL(dl_scope_free_list)->count)))
505 THREAD_GSCOPE_WAIT ();
507 /* Now we can free any queued old scopes. */
508 struct dl_scope_free_list *fsl = GL(dl_scope_free_list);
509 if (fsl != NULL)
510 while (fsl->count > 0)
511 free (fsl->list[--fsl->count]);
514 size_t tls_free_start;
515 size_t tls_free_end;
516 tls_free_start = tls_free_end = NO_TLS_OFFSET;
518 /* We modify the list of loaded objects. */
519 __rtld_lock_lock_recursive (GL(dl_load_write_lock));
521 /* Check each element of the search list to see if all references to
522 it are gone. */
523 for (unsigned int i = first_loaded; i < nloaded; ++i)
525 struct link_map *imap = maps[i];
526 if (!used[i])
528 assert (imap->l_type == lt_loaded);
530 /* That was the last reference, and this was a dlopen-loaded
531 object. We can unmap it. */
533 /* Remove the object from the dtv slotinfo array if it uses TLS. */
534 if (__builtin_expect (imap->l_tls_blocksize > 0, 0))
536 any_tls = true;
538 if (GL(dl_tls_dtv_slotinfo_list) != NULL
539 && ! remove_slotinfo (imap->l_tls_modid,
540 GL(dl_tls_dtv_slotinfo_list), 0,
541 imap->l_init_called))
542 /* All dynamically loaded modules with TLS are unloaded. */
543 GL(dl_tls_max_dtv_idx) = GL(dl_tls_static_nelem);
545 if (imap->l_tls_offset != NO_TLS_OFFSET
546 && imap->l_tls_offset != FORCED_DYNAMIC_TLS_OFFSET)
548 /* Collect a contiguous chunk built from the objects in
549 this search list, going in either direction. When the
550 whole chunk is at the end of the used area then we can
551 reclaim it. */
552 #if TLS_TCB_AT_TP
553 if (tls_free_start == NO_TLS_OFFSET
554 || (size_t) imap->l_tls_offset == tls_free_start)
556 /* Extend the contiguous chunk being reclaimed. */
557 tls_free_start
558 = imap->l_tls_offset - imap->l_tls_blocksize;
560 if (tls_free_end == NO_TLS_OFFSET)
561 tls_free_end = imap->l_tls_offset;
563 else if (imap->l_tls_offset - imap->l_tls_blocksize
564 == tls_free_end)
565 /* Extend the chunk backwards. */
566 tls_free_end = imap->l_tls_offset;
567 else
569 /* This isn't contiguous with the last chunk freed.
570 One of them will be leaked unless we can free
571 one block right away. */
572 if (tls_free_end == GL(dl_tls_static_used))
574 GL(dl_tls_static_used) = tls_free_start;
575 tls_free_end = imap->l_tls_offset;
576 tls_free_start
577 = tls_free_end - imap->l_tls_blocksize;
579 else if ((size_t) imap->l_tls_offset
580 == GL(dl_tls_static_used))
581 GL(dl_tls_static_used)
582 = imap->l_tls_offset - imap->l_tls_blocksize;
583 else if (tls_free_end < (size_t) imap->l_tls_offset)
585 /* We pick the later block. It has a chance to
586 be freed. */
587 tls_free_end = imap->l_tls_offset;
588 tls_free_start
589 = tls_free_end - imap->l_tls_blocksize;
592 #elif TLS_DTV_AT_TP
593 if (tls_free_start == NO_TLS_OFFSET)
595 tls_free_start = imap->l_tls_firstbyte_offset;
596 tls_free_end = (imap->l_tls_offset
597 + imap->l_tls_blocksize);
599 else if (imap->l_tls_firstbyte_offset == tls_free_end)
600 /* Extend the contiguous chunk being reclaimed. */
601 tls_free_end = imap->l_tls_offset + imap->l_tls_blocksize;
602 else if (imap->l_tls_offset + imap->l_tls_blocksize
603 == tls_free_start)
604 /* Extend the chunk backwards. */
605 tls_free_start = imap->l_tls_firstbyte_offset;
606 /* This isn't contiguous with the last chunk freed.
607 One of them will be leaked unless we can free
608 one block right away. */
609 else if (imap->l_tls_offset + imap->l_tls_blocksize
610 == GL(dl_tls_static_used))
611 GL(dl_tls_static_used) = imap->l_tls_firstbyte_offset;
612 else if (tls_free_end == GL(dl_tls_static_used))
614 GL(dl_tls_static_used) = tls_free_start;
615 tls_free_start = imap->l_tls_firstbyte_offset;
616 tls_free_end = imap->l_tls_offset + imap->l_tls_blocksize;
618 else if (tls_free_end < imap->l_tls_firstbyte_offset)
620 /* We pick the later block. It has a chance to
621 be freed. */
622 tls_free_start = imap->l_tls_firstbyte_offset;
623 tls_free_end = imap->l_tls_offset + imap->l_tls_blocksize;
625 #else
626 # error "Either TLS_TCB_AT_TP or TLS_DTV_AT_TP must be defined"
627 #endif
631 /* We can unmap all the maps at once. We determined the
632 start address and length when we loaded the object and
633 the `munmap' call does the rest. */
634 DL_UNMAP (imap);
636 /* Finally, unlink the data structure and free it. */
637 if (imap->l_prev != NULL)
638 imap->l_prev->l_next = imap->l_next;
639 else
641 #ifdef SHARED
642 assert (nsid != LM_ID_BASE);
643 #endif
644 ns->_ns_loaded = imap->l_next;
646 /* Update the pointer to the head of the list
647 we leave for debuggers to examine. */
648 r->r_map = (void *) ns->_ns_loaded;
651 --ns->_ns_nloaded;
652 if (imap->l_next != NULL)
653 imap->l_next->l_prev = imap->l_prev;
655 free (imap->l_versions);
656 if (imap->l_origin != (char *) -1)
657 free ((char *) imap->l_origin);
659 free (imap->l_reldeps);
661 /* Print debugging message. */
662 if (__builtin_expect (GLRO(dl_debug_mask) & DL_DEBUG_FILES, 0))
663 _dl_debug_printf ("\nfile=%s [%lu]; destroying link map\n",
664 imap->l_name, imap->l_ns);
666 /* This name always is allocated. */
667 free (imap->l_name);
668 /* Remove the list with all the names of the shared object. */
670 struct libname_list *lnp = imap->l_libname;
673 struct libname_list *this = lnp;
674 lnp = lnp->next;
675 if (!this->dont_free)
676 free (this);
678 while (lnp != NULL);
680 /* Remove the searchlists. */
681 free (imap->l_initfini);
683 /* Remove the scope array if we allocated it. */
684 if (imap->l_scope != imap->l_scope_mem)
685 free (imap->l_scope);
687 if (imap->l_phdr_allocated)
688 free ((void *) imap->l_phdr);
690 if (imap->l_rpath_dirs.dirs != (void *) -1)
691 free (imap->l_rpath_dirs.dirs);
692 if (imap->l_runpath_dirs.dirs != (void *) -1)
693 free (imap->l_runpath_dirs.dirs);
695 free (imap);
699 __rtld_lock_unlock_recursive (GL(dl_load_write_lock));
701 /* If we removed any object which uses TLS bump the generation counter. */
702 if (any_tls)
704 if (__builtin_expect (++GL(dl_tls_generation) == 0, 0))
705 _dl_fatal_printf ("TLS generation counter wrapped! Please report as described in <http://www.gnu.org/software/libc/bugs.html>.\n");
707 if (tls_free_end == GL(dl_tls_static_used))
708 GL(dl_tls_static_used) = tls_free_start;
711 #ifdef SHARED
712 /* Auditing checkpoint: we have deleted all objects. */
713 if (__builtin_expect (do_audit, 0))
715 struct link_map *head = ns->_ns_loaded;
716 /* Do not call the functions for any auditing object. */
717 if (head->l_auditing == 0)
719 struct audit_ifaces *afct = GLRO(dl_audit);
720 for (unsigned int cnt = 0; cnt < GLRO(dl_naudit); ++cnt)
722 if (afct->activity != NULL)
723 afct->activity (&head->l_audit[cnt].cookie, LA_ACT_CONSISTENT);
725 afct = afct->next;
729 #endif
731 if (__builtin_expect (ns->_ns_loaded == NULL, 0)
732 && nsid == GL(dl_nns) - 1)
735 --GL(dl_nns);
736 #ifndef SHARED
737 if (GL(dl_nns) == 0)
738 break;
739 #endif
741 while (GL(dl_ns)[GL(dl_nns) - 1]._ns_loaded == NULL);
743 /* Notify the debugger those objects are finalized and gone. */
744 r->r_state = RT_CONSISTENT;
745 _dl_debug_state ();
746 LIBC_PROBE (unmap_complete, 2, nsid, r);
748 /* Recheck if we need to retry, release the lock. */
749 out:
750 if (dl_close_state == rerun)
751 goto retry;
753 dl_close_state = not_pending;
757 void
758 _dl_close (void *_map)
760 struct link_map *map = _map;
762 /* First see whether we can remove the object at all. */
763 if (__builtin_expect (map->l_flags_1 & DF_1_NODELETE, 0))
765 assert (map->l_init_called);
766 /* Nope. Do nothing. */
767 return;
770 if (__builtin_expect (map->l_direct_opencount, 1) == 0)
771 GLRO(dl_signal_error) (0, map->l_name, NULL, N_("shared object not open"));
773 /* Acquire the lock. */
774 __rtld_lock_lock_recursive (GL(dl_load_lock));
776 _dl_close_worker (map);
778 __rtld_lock_unlock_recursive (GL(dl_load_lock));