Add a test for BZ #15674
[glibc.git] / elf / dl-close.c
blobfe3014cca3f593cff9f230013d485ccadf577981
1 /* Close a shared object opened by `_dl_open'.
2 Copyright (C) 1996-2013 Free Software Foundation, Inc.
3 This file is part of the GNU C Library.
5 The GNU C Library is free software; you can redistribute it and/or
6 modify it under the terms of the GNU Lesser General Public
7 License as published by the Free Software Foundation; either
8 version 2.1 of the License, or (at your option) any later version.
10 The GNU C Library is distributed in the hope that it will be useful,
11 but WITHOUT ANY WARRANTY; without even the implied warranty of
12 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 Lesser General Public License for more details.
15 You should have received a copy of the GNU Lesser General Public
16 License along with the GNU C Library; if not, see
17 <http://www.gnu.org/licenses/>. */
19 #include <assert.h>
20 #include <dlfcn.h>
21 #include <errno.h>
22 #include <libintl.h>
23 #include <stddef.h>
24 #include <stdio.h>
25 #include <stdlib.h>
26 #include <string.h>
27 #include <unistd.h>
28 #include <bits/libc-lock.h>
29 #include <ldsodefs.h>
30 #include <sys/types.h>
31 #include <sys/mman.h>
32 #include <sysdep-cancel.h>
33 #include <tls.h>
34 #include <stap-probe.h>
37 /* Type of the constructor functions. */
38 typedef void (*fini_t) (void);
41 /* Special l_idx value used to indicate which objects remain loaded. */
42 #define IDX_STILL_USED -1
45 /* Returns true we an non-empty was found. */
46 static bool
47 remove_slotinfo (size_t idx, struct dtv_slotinfo_list *listp, size_t disp,
48 bool should_be_there)
50 if (idx - disp >= listp->len)
52 if (listp->next == NULL)
54 /* The index is not actually valid in the slotinfo list,
55 because this object was closed before it was fully set
56 up due to some error. */
57 assert (! should_be_there);
59 else
61 if (remove_slotinfo (idx, listp->next, disp + listp->len,
62 should_be_there))
63 return true;
65 /* No non-empty entry. Search from the end of this element's
66 slotinfo array. */
67 idx = disp + listp->len;
70 else
72 struct link_map *old_map = listp->slotinfo[idx - disp].map;
74 /* The entry might still be in its unused state if we are closing an
75 object that wasn't fully set up. */
76 if (__builtin_expect (old_map != NULL, 1))
78 assert (old_map->l_tls_modid == idx);
80 /* Mark the entry as unused. */
81 listp->slotinfo[idx - disp].gen = GL(dl_tls_generation) + 1;
82 listp->slotinfo[idx - disp].map = NULL;
85 /* If this is not the last currently used entry no need to look
86 further. */
87 if (idx != GL(dl_tls_max_dtv_idx))
88 return true;
91 while (idx - disp > (disp == 0 ? 1 + GL(dl_tls_static_nelem) : 0))
93 --idx;
95 if (listp->slotinfo[idx - disp].map != NULL)
97 /* Found a new last used index. */
98 GL(dl_tls_max_dtv_idx) = idx;
99 return true;
103 /* No non-entry in this list element. */
104 return false;
108 void
109 _dl_close_worker (struct link_map *map)
111 /* One less direct use. */
112 --map->l_direct_opencount;
114 /* If _dl_close is called recursively (some destructor call dlclose),
115 just record that the parent _dl_close will need to do garbage collection
116 again and return. */
117 static enum { not_pending, pending, rerun } dl_close_state;
119 if (map->l_direct_opencount > 0 || map->l_type != lt_loaded
120 || dl_close_state != not_pending)
122 if (map->l_direct_opencount == 0 && map->l_type == lt_loaded)
123 dl_close_state = rerun;
125 /* There are still references to this object. Do nothing more. */
126 if (__builtin_expect (GLRO(dl_debug_mask) & DL_DEBUG_FILES, 0))
127 _dl_debug_printf ("\nclosing file=%s; direct_opencount=%u\n",
128 map->l_name, map->l_direct_opencount);
130 return;
133 Lmid_t nsid = map->l_ns;
134 struct link_namespaces *ns = &GL(dl_ns)[nsid];
136 retry:
137 dl_close_state = pending;
139 bool any_tls = false;
140 const unsigned int nloaded = ns->_ns_nloaded;
141 char used[nloaded];
142 char done[nloaded];
143 struct link_map *maps[nloaded];
145 /* Run over the list and assign indexes to the link maps and enter
146 them into the MAPS array. */
147 int idx = 0;
148 for (struct link_map *l = ns->_ns_loaded; l != NULL; l = l->l_next)
150 l->l_idx = idx;
151 maps[idx] = l;
152 ++idx;
154 assert (idx == nloaded);
156 /* Prepare the bitmaps. */
157 memset (used, '\0', sizeof (used));
158 memset (done, '\0', sizeof (done));
160 /* Keep track of the lowest index link map we have covered already. */
161 int done_index = -1;
162 while (++done_index < nloaded)
164 struct link_map *l = maps[done_index];
166 if (done[done_index])
167 /* Already handled. */
168 continue;
170 /* Check whether this object is still used. */
171 if (l->l_type == lt_loaded
172 && l->l_direct_opencount == 0
173 && (l->l_flags_1 & DF_1_NODELETE) == 0
174 && !used[done_index])
175 continue;
177 /* We need this object and we handle it now. */
178 done[done_index] = 1;
179 used[done_index] = 1;
180 /* Signal the object is still needed. */
181 l->l_idx = IDX_STILL_USED;
183 /* Mark all dependencies as used. */
184 if (l->l_initfini != NULL)
186 /* We are always the zeroth entry, and since we don't include
187 ourselves in the dependency analysis start at 1. */
188 struct link_map **lp = &l->l_initfini[1];
189 while (*lp != NULL)
191 if ((*lp)->l_idx != IDX_STILL_USED)
193 assert ((*lp)->l_idx >= 0 && (*lp)->l_idx < nloaded);
195 if (!used[(*lp)->l_idx])
197 used[(*lp)->l_idx] = 1;
198 /* If we marked a new object as used, and we've
199 already processed it, then we need to go back
200 and process again from that point forward to
201 ensure we keep all of its dependencies also. */
202 if ((*lp)->l_idx - 1 < done_index)
203 done_index = (*lp)->l_idx - 1;
207 ++lp;
210 /* And the same for relocation dependencies. */
211 if (l->l_reldeps != NULL)
212 for (unsigned int j = 0; j < l->l_reldeps->act; ++j)
214 struct link_map *jmap = l->l_reldeps->list[j];
216 if (jmap->l_idx != IDX_STILL_USED)
218 assert (jmap->l_idx >= 0 && jmap->l_idx < nloaded);
220 if (!used[jmap->l_idx])
222 used[jmap->l_idx] = 1;
223 if (jmap->l_idx - 1 < done_index)
224 done_index = jmap->l_idx - 1;
230 /* Sort the entries. */
231 _dl_sort_fini (maps, nloaded, used, nsid);
233 /* Call all termination functions at once. */
234 #ifdef SHARED
235 bool do_audit = GLRO(dl_naudit) > 0 && !ns->_ns_loaded->l_auditing;
236 #endif
237 bool unload_any = false;
238 bool scope_mem_left = false;
239 unsigned int unload_global = 0;
240 unsigned int first_loaded = ~0;
241 for (unsigned int i = 0; i < nloaded; ++i)
243 struct link_map *imap = maps[i];
245 /* All elements must be in the same namespace. */
246 assert (imap->l_ns == nsid);
248 if (!used[i])
250 assert (imap->l_type == lt_loaded
251 && (imap->l_flags_1 & DF_1_NODELETE) == 0);
253 /* Call its termination function. Do not do it for
254 half-cooked objects. */
255 if (imap->l_init_called)
257 /* When debugging print a message first. */
258 if (__builtin_expect (GLRO(dl_debug_mask) & DL_DEBUG_IMPCALLS,
260 _dl_debug_printf ("\ncalling fini: %s [%lu]\n\n",
261 imap->l_name, nsid);
263 if (imap->l_info[DT_FINI_ARRAY] != NULL)
265 ElfW(Addr) *array =
266 (ElfW(Addr) *) (imap->l_addr
267 + imap->l_info[DT_FINI_ARRAY]->d_un.d_ptr);
268 unsigned int sz = (imap->l_info[DT_FINI_ARRAYSZ]->d_un.d_val
269 / sizeof (ElfW(Addr)));
271 while (sz-- > 0)
272 ((fini_t) array[sz]) ();
275 /* Next try the old-style destructor. */
276 if (imap->l_info[DT_FINI] != NULL)
277 (*(void (*) (void)) DL_DT_FINI_ADDRESS
278 (imap, ((void *) imap->l_addr
279 + imap->l_info[DT_FINI]->d_un.d_ptr))) ();
282 #ifdef SHARED
283 /* Auditing checkpoint: we remove an object. */
284 if (__builtin_expect (do_audit, 0))
286 struct audit_ifaces *afct = GLRO(dl_audit);
287 for (unsigned int cnt = 0; cnt < GLRO(dl_naudit); ++cnt)
289 if (afct->objclose != NULL)
290 /* Return value is ignored. */
291 (void) afct->objclose (&imap->l_audit[cnt].cookie);
293 afct = afct->next;
296 #endif
298 /* This object must not be used anymore. */
299 imap->l_removed = 1;
301 /* We indeed have an object to remove. */
302 unload_any = true;
304 if (imap->l_global)
305 ++unload_global;
307 /* Remember where the first dynamically loaded object is. */
308 if (i < first_loaded)
309 first_loaded = i;
311 /* Else used[i]. */
312 else if (imap->l_type == lt_loaded)
314 struct r_scope_elem *new_list = NULL;
316 if (imap->l_searchlist.r_list == NULL && imap->l_initfini != NULL)
318 /* The object is still used. But one of the objects we are
319 unloading right now is responsible for loading it. If
320 the current object does not have it's own scope yet we
321 have to create one. This has to be done before running
322 the finalizers.
324 To do this count the number of dependencies. */
325 unsigned int cnt;
326 for (cnt = 1; imap->l_initfini[cnt] != NULL; ++cnt)
329 /* We simply reuse the l_initfini list. */
330 imap->l_searchlist.r_list = &imap->l_initfini[cnt + 1];
331 imap->l_searchlist.r_nlist = cnt;
333 new_list = &imap->l_searchlist;
336 /* Count the number of scopes which remain after the unload.
337 When we add the local search list count it. Always add
338 one for the terminating NULL pointer. */
339 size_t remain = (new_list != NULL) + 1;
340 bool removed_any = false;
341 for (size_t cnt = 0; imap->l_scope[cnt] != NULL; ++cnt)
342 /* This relies on l_scope[] entries being always set either
343 to its own l_symbolic_searchlist address, or some map's
344 l_searchlist address. */
345 if (imap->l_scope[cnt] != &imap->l_symbolic_searchlist)
347 struct link_map *tmap = (struct link_map *)
348 ((char *) imap->l_scope[cnt]
349 - offsetof (struct link_map, l_searchlist));
350 assert (tmap->l_ns == nsid);
351 if (tmap->l_idx == IDX_STILL_USED)
352 ++remain;
353 else
354 removed_any = true;
356 else
357 ++remain;
359 if (removed_any)
361 /* Always allocate a new array for the scope. This is
362 necessary since we must be able to determine the last
363 user of the current array. If possible use the link map's
364 memory. */
365 size_t new_size;
366 struct r_scope_elem **newp;
368 #define SCOPE_ELEMS(imap) \
369 (sizeof (imap->l_scope_mem) / sizeof (imap->l_scope_mem[0]))
371 if (imap->l_scope != imap->l_scope_mem
372 && remain < SCOPE_ELEMS (imap))
374 new_size = SCOPE_ELEMS (imap);
375 newp = imap->l_scope_mem;
377 else
379 new_size = imap->l_scope_max;
380 newp = (struct r_scope_elem **)
381 malloc (new_size * sizeof (struct r_scope_elem *));
382 if (newp == NULL)
383 _dl_signal_error (ENOMEM, "dlclose", NULL,
384 N_("cannot create scope list"));
387 /* Copy over the remaining scope elements. */
388 remain = 0;
389 for (size_t cnt = 0; imap->l_scope[cnt] != NULL; ++cnt)
391 if (imap->l_scope[cnt] != &imap->l_symbolic_searchlist)
393 struct link_map *tmap = (struct link_map *)
394 ((char *) imap->l_scope[cnt]
395 - offsetof (struct link_map, l_searchlist));
396 if (tmap->l_idx != IDX_STILL_USED)
398 /* Remove the scope. Or replace with own map's
399 scope. */
400 if (new_list != NULL)
402 newp[remain++] = new_list;
403 new_list = NULL;
405 continue;
409 newp[remain++] = imap->l_scope[cnt];
411 newp[remain] = NULL;
413 struct r_scope_elem **old = imap->l_scope;
415 imap->l_scope = newp;
417 /* No user anymore, we can free it now. */
418 if (old != imap->l_scope_mem)
420 if (_dl_scope_free (old))
421 /* If _dl_scope_free used THREAD_GSCOPE_WAIT (),
422 no need to repeat it. */
423 scope_mem_left = false;
425 else
426 scope_mem_left = true;
428 imap->l_scope_max = new_size;
430 else if (new_list != NULL)
432 /* We didn't change the scope array, so reset the search
433 list. */
434 imap->l_searchlist.r_list = NULL;
435 imap->l_searchlist.r_nlist = 0;
438 /* The loader is gone, so mark the object as not having one.
439 Note: l_idx != IDX_STILL_USED -> object will be removed. */
440 if (imap->l_loader != NULL
441 && imap->l_loader->l_idx != IDX_STILL_USED)
442 imap->l_loader = NULL;
444 /* Remember where the first dynamically loaded object is. */
445 if (i < first_loaded)
446 first_loaded = i;
450 /* If there are no objects to unload, do nothing further. */
451 if (!unload_any)
452 goto out;
454 #ifdef SHARED
455 /* Auditing checkpoint: we will start deleting objects. */
456 if (__builtin_expect (do_audit, 0))
458 struct link_map *head = ns->_ns_loaded;
459 struct audit_ifaces *afct = GLRO(dl_audit);
460 /* Do not call the functions for any auditing object. */
461 if (head->l_auditing == 0)
463 for (unsigned int cnt = 0; cnt < GLRO(dl_naudit); ++cnt)
465 if (afct->activity != NULL)
466 afct->activity (&head->l_audit[cnt].cookie, LA_ACT_DELETE);
468 afct = afct->next;
472 #endif
474 /* Notify the debugger we are about to remove some loaded objects. */
475 struct r_debug *r = _dl_debug_initialize (0, nsid);
476 r->r_state = RT_DELETE;
477 _dl_debug_state ();
478 LIBC_PROBE (unmap_start, 2, nsid, r);
480 if (unload_global)
482 /* Some objects are in the global scope list. Remove them. */
483 struct r_scope_elem *ns_msl = ns->_ns_main_searchlist;
484 unsigned int i;
485 unsigned int j = 0;
486 unsigned int cnt = ns_msl->r_nlist;
488 while (cnt > 0 && ns_msl->r_list[cnt - 1]->l_removed)
489 --cnt;
491 if (cnt + unload_global == ns_msl->r_nlist)
492 /* Speed up removing most recently added objects. */
493 j = cnt;
494 else
495 for (i = 0; i < cnt; i++)
496 if (ns_msl->r_list[i]->l_removed == 0)
498 if (i != j)
499 ns_msl->r_list[j] = ns_msl->r_list[i];
500 j++;
502 ns_msl->r_nlist = j;
505 if (!RTLD_SINGLE_THREAD_P
506 && (unload_global
507 || scope_mem_left
508 || (GL(dl_scope_free_list) != NULL
509 && GL(dl_scope_free_list)->count)))
511 THREAD_GSCOPE_WAIT ();
513 /* Now we can free any queued old scopes. */
514 struct dl_scope_free_list *fsl = GL(dl_scope_free_list);
515 if (fsl != NULL)
516 while (fsl->count > 0)
517 free (fsl->list[--fsl->count]);
520 size_t tls_free_start;
521 size_t tls_free_end;
522 tls_free_start = tls_free_end = NO_TLS_OFFSET;
524 /* We modify the list of loaded objects. */
525 __rtld_lock_lock_recursive (GL(dl_load_write_lock));
527 /* Check each element of the search list to see if all references to
528 it are gone. */
529 for (unsigned int i = first_loaded; i < nloaded; ++i)
531 struct link_map *imap = maps[i];
532 if (!used[i])
534 assert (imap->l_type == lt_loaded);
536 /* That was the last reference, and this was a dlopen-loaded
537 object. We can unmap it. */
539 /* Remove the object from the dtv slotinfo array if it uses TLS. */
540 if (__builtin_expect (imap->l_tls_blocksize > 0, 0))
542 any_tls = true;
544 if (GL(dl_tls_dtv_slotinfo_list) != NULL
545 && ! remove_slotinfo (imap->l_tls_modid,
546 GL(dl_tls_dtv_slotinfo_list), 0,
547 imap->l_init_called))
548 /* All dynamically loaded modules with TLS are unloaded. */
549 GL(dl_tls_max_dtv_idx) = GL(dl_tls_static_nelem);
551 if (imap->l_tls_offset != NO_TLS_OFFSET
552 && imap->l_tls_offset != FORCED_DYNAMIC_TLS_OFFSET)
554 /* Collect a contiguous chunk built from the objects in
555 this search list, going in either direction. When the
556 whole chunk is at the end of the used area then we can
557 reclaim it. */
558 #if TLS_TCB_AT_TP
559 if (tls_free_start == NO_TLS_OFFSET
560 || (size_t) imap->l_tls_offset == tls_free_start)
562 /* Extend the contiguous chunk being reclaimed. */
563 tls_free_start
564 = imap->l_tls_offset - imap->l_tls_blocksize;
566 if (tls_free_end == NO_TLS_OFFSET)
567 tls_free_end = imap->l_tls_offset;
569 else if (imap->l_tls_offset - imap->l_tls_blocksize
570 == tls_free_end)
571 /* Extend the chunk backwards. */
572 tls_free_end = imap->l_tls_offset;
573 else
575 /* This isn't contiguous with the last chunk freed.
576 One of them will be leaked unless we can free
577 one block right away. */
578 if (tls_free_end == GL(dl_tls_static_used))
580 GL(dl_tls_static_used) = tls_free_start;
581 tls_free_end = imap->l_tls_offset;
582 tls_free_start
583 = tls_free_end - imap->l_tls_blocksize;
585 else if ((size_t) imap->l_tls_offset
586 == GL(dl_tls_static_used))
587 GL(dl_tls_static_used)
588 = imap->l_tls_offset - imap->l_tls_blocksize;
589 else if (tls_free_end < (size_t) imap->l_tls_offset)
591 /* We pick the later block. It has a chance to
592 be freed. */
593 tls_free_end = imap->l_tls_offset;
594 tls_free_start
595 = tls_free_end - imap->l_tls_blocksize;
598 #elif TLS_DTV_AT_TP
599 if (tls_free_start == NO_TLS_OFFSET)
601 tls_free_start = imap->l_tls_firstbyte_offset;
602 tls_free_end = (imap->l_tls_offset
603 + imap->l_tls_blocksize);
605 else if (imap->l_tls_firstbyte_offset == tls_free_end)
606 /* Extend the contiguous chunk being reclaimed. */
607 tls_free_end = imap->l_tls_offset + imap->l_tls_blocksize;
608 else if (imap->l_tls_offset + imap->l_tls_blocksize
609 == tls_free_start)
610 /* Extend the chunk backwards. */
611 tls_free_start = imap->l_tls_firstbyte_offset;
612 /* This isn't contiguous with the last chunk freed.
613 One of them will be leaked unless we can free
614 one block right away. */
615 else if (imap->l_tls_offset + imap->l_tls_blocksize
616 == GL(dl_tls_static_used))
617 GL(dl_tls_static_used) = imap->l_tls_firstbyte_offset;
618 else if (tls_free_end == GL(dl_tls_static_used))
620 GL(dl_tls_static_used) = tls_free_start;
621 tls_free_start = imap->l_tls_firstbyte_offset;
622 tls_free_end = imap->l_tls_offset + imap->l_tls_blocksize;
624 else if (tls_free_end < imap->l_tls_firstbyte_offset)
626 /* We pick the later block. It has a chance to
627 be freed. */
628 tls_free_start = imap->l_tls_firstbyte_offset;
629 tls_free_end = imap->l_tls_offset + imap->l_tls_blocksize;
631 #else
632 # error "Either TLS_TCB_AT_TP or TLS_DTV_AT_TP must be defined"
633 #endif
637 /* We can unmap all the maps at once. We determined the
638 start address and length when we loaded the object and
639 the `munmap' call does the rest. */
640 DL_UNMAP (imap);
642 /* Finally, unlink the data structure and free it. */
643 if (imap->l_prev != NULL)
644 imap->l_prev->l_next = imap->l_next;
645 else
647 #ifdef SHARED
648 assert (nsid != LM_ID_BASE);
649 #endif
650 ns->_ns_loaded = imap->l_next;
652 /* Update the pointer to the head of the list
653 we leave for debuggers to examine. */
654 r->r_map = (void *) ns->_ns_loaded;
657 --ns->_ns_nloaded;
658 if (imap->l_next != NULL)
659 imap->l_next->l_prev = imap->l_prev;
661 free (imap->l_versions);
662 if (imap->l_origin != (char *) -1)
663 free ((char *) imap->l_origin);
665 free (imap->l_reldeps);
667 /* Print debugging message. */
668 if (__builtin_expect (GLRO(dl_debug_mask) & DL_DEBUG_FILES, 0))
669 _dl_debug_printf ("\nfile=%s [%lu]; destroying link map\n",
670 imap->l_name, imap->l_ns);
672 /* This name always is allocated. */
673 free (imap->l_name);
674 /* Remove the list with all the names of the shared object. */
676 struct libname_list *lnp = imap->l_libname;
679 struct libname_list *this = lnp;
680 lnp = lnp->next;
681 if (!this->dont_free)
682 free (this);
684 while (lnp != NULL);
686 /* Remove the searchlists. */
687 free (imap->l_initfini);
689 /* Remove the scope array if we allocated it. */
690 if (imap->l_scope != imap->l_scope_mem)
691 free (imap->l_scope);
693 if (imap->l_phdr_allocated)
694 free ((void *) imap->l_phdr);
696 if (imap->l_rpath_dirs.dirs != (void *) -1)
697 free (imap->l_rpath_dirs.dirs);
698 if (imap->l_runpath_dirs.dirs != (void *) -1)
699 free (imap->l_runpath_dirs.dirs);
701 free (imap);
705 __rtld_lock_unlock_recursive (GL(dl_load_write_lock));
707 /* If we removed any object which uses TLS bump the generation counter. */
708 if (any_tls)
710 if (__builtin_expect (++GL(dl_tls_generation) == 0, 0))
711 _dl_fatal_printf ("TLS generation counter wrapped! Please report as described in "REPORT_BUGS_TO".\n");
713 if (tls_free_end == GL(dl_tls_static_used))
714 GL(dl_tls_static_used) = tls_free_start;
717 #ifdef SHARED
718 /* Auditing checkpoint: we have deleted all objects. */
719 if (__builtin_expect (do_audit, 0))
721 struct link_map *head = ns->_ns_loaded;
722 /* Do not call the functions for any auditing object. */
723 if (head->l_auditing == 0)
725 struct audit_ifaces *afct = GLRO(dl_audit);
726 for (unsigned int cnt = 0; cnt < GLRO(dl_naudit); ++cnt)
728 if (afct->activity != NULL)
729 afct->activity (&head->l_audit[cnt].cookie, LA_ACT_CONSISTENT);
731 afct = afct->next;
735 #endif
737 if (__builtin_expect (ns->_ns_loaded == NULL, 0)
738 && nsid == GL(dl_nns) - 1)
741 --GL(dl_nns);
742 #ifndef SHARED
743 if (GL(dl_nns) == 0)
744 break;
745 #endif
747 while (GL(dl_ns)[GL(dl_nns) - 1]._ns_loaded == NULL);
749 /* Notify the debugger those objects are finalized and gone. */
750 r->r_state = RT_CONSISTENT;
751 _dl_debug_state ();
752 LIBC_PROBE (unmap_complete, 2, nsid, r);
754 /* Recheck if we need to retry, release the lock. */
755 out:
756 if (dl_close_state == rerun)
757 goto retry;
759 dl_close_state = not_pending;
763 void
764 _dl_close (void *_map)
766 struct link_map *map = _map;
768 /* First see whether we can remove the object at all. */
769 if (__builtin_expect (map->l_flags_1 & DF_1_NODELETE, 0))
771 assert (map->l_init_called);
772 /* Nope. Do nothing. */
773 return;
776 if (__builtin_expect (map->l_direct_opencount, 1) == 0)
777 GLRO(dl_signal_error) (0, map->l_name, NULL, N_("shared object not open"));
779 /* Acquire the lock. */
780 __rtld_lock_lock_recursive (GL(dl_load_lock));
782 _dl_close_worker (map);
784 __rtld_lock_unlock_recursive (GL(dl_load_lock));