Fix vsyslog namespace (bug 18533).
[glibc.git] / elf / dl-close.c
blob412f71d70bace8bff7d7d9f1f5bd11746bf1691d
1 /* Close a shared object opened by `_dl_open'.
2 Copyright (C) 1996-2015 Free Software Foundation, Inc.
3 This file is part of the GNU C Library.
5 The GNU C Library is free software; you can redistribute it and/or
6 modify it under the terms of the GNU Lesser General Public
7 License as published by the Free Software Foundation; either
8 version 2.1 of the License, or (at your option) any later version.
10 The GNU C Library is distributed in the hope that it will be useful,
11 but WITHOUT ANY WARRANTY; without even the implied warranty of
12 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 Lesser General Public License for more details.
15 You should have received a copy of the GNU Lesser General Public
16 License along with the GNU C Library; if not, see
17 <http://www.gnu.org/licenses/>. */
19 #include <assert.h>
20 #include <dlfcn.h>
21 #include <errno.h>
22 #include <libintl.h>
23 #include <stddef.h>
24 #include <stdio.h>
25 #include <stdlib.h>
26 #include <string.h>
27 #include <unistd.h>
28 #include <bits/libc-lock.h>
29 #include <ldsodefs.h>
30 #include <sys/types.h>
31 #include <sys/mman.h>
32 #include <sysdep-cancel.h>
33 #include <tls.h>
34 #include <stap-probe.h>
36 #include <dl-unmap-segments.h>
39 /* Type of the constructor functions. */
40 typedef void (*fini_t) (void);
43 /* Special l_idx value used to indicate which objects remain loaded. */
44 #define IDX_STILL_USED -1
47 /* Returns true we an non-empty was found. */
48 static bool
49 remove_slotinfo (size_t idx, struct dtv_slotinfo_list *listp, size_t disp,
50 bool should_be_there)
52 if (idx - disp >= listp->len)
54 if (listp->next == NULL)
56 /* The index is not actually valid in the slotinfo list,
57 because this object was closed before it was fully set
58 up due to some error. */
59 assert (! should_be_there);
61 else
63 if (remove_slotinfo (idx, listp->next, disp + listp->len,
64 should_be_there))
65 return true;
67 /* No non-empty entry. Search from the end of this element's
68 slotinfo array. */
69 idx = disp + listp->len;
72 else
74 struct link_map *old_map = listp->slotinfo[idx - disp].map;
76 /* The entry might still be in its unused state if we are closing an
77 object that wasn't fully set up. */
78 if (__glibc_likely (old_map != NULL))
80 assert (old_map->l_tls_modid == idx);
82 /* Mark the entry as unused. */
83 listp->slotinfo[idx - disp].gen = GL(dl_tls_generation) + 1;
84 listp->slotinfo[idx - disp].map = NULL;
87 /* If this is not the last currently used entry no need to look
88 further. */
89 if (idx != GL(dl_tls_max_dtv_idx))
90 return true;
93 while (idx - disp > (disp == 0 ? 1 + GL(dl_tls_static_nelem) : 0))
95 --idx;
97 if (listp->slotinfo[idx - disp].map != NULL)
99 /* Found a new last used index. */
100 GL(dl_tls_max_dtv_idx) = idx;
101 return true;
105 /* No non-entry in this list element. */
106 return false;
110 void
111 _dl_close_worker (struct link_map *map)
113 /* One less direct use. */
114 --map->l_direct_opencount;
116 /* If _dl_close is called recursively (some destructor call dlclose),
117 just record that the parent _dl_close will need to do garbage collection
118 again and return. */
119 static enum { not_pending, pending, rerun } dl_close_state;
121 if (map->l_direct_opencount > 0 || map->l_type != lt_loaded
122 || dl_close_state != not_pending)
124 if (map->l_direct_opencount == 0 && map->l_type == lt_loaded)
125 dl_close_state = rerun;
127 /* There are still references to this object. Do nothing more. */
128 if (__glibc_unlikely (GLRO(dl_debug_mask) & DL_DEBUG_FILES))
129 _dl_debug_printf ("\nclosing file=%s; direct_opencount=%u\n",
130 map->l_name, map->l_direct_opencount);
132 return;
135 Lmid_t nsid = map->l_ns;
136 struct link_namespaces *ns = &GL(dl_ns)[nsid];
138 retry:
139 dl_close_state = pending;
141 bool any_tls = false;
142 const unsigned int nloaded = ns->_ns_nloaded;
143 char used[nloaded];
144 char done[nloaded];
145 struct link_map *maps[nloaded];
147 /* Run over the list and assign indexes to the link maps and enter
148 them into the MAPS array. */
149 int idx = 0;
150 for (struct link_map *l = ns->_ns_loaded; l != NULL; l = l->l_next)
152 l->l_idx = idx;
153 maps[idx] = l;
154 ++idx;
156 assert (idx == nloaded);
158 /* Prepare the bitmaps. */
159 memset (used, '\0', sizeof (used));
160 memset (done, '\0', sizeof (done));
162 /* Keep track of the lowest index link map we have covered already. */
163 int done_index = -1;
164 while (++done_index < nloaded)
166 struct link_map *l = maps[done_index];
168 if (done[done_index])
169 /* Already handled. */
170 continue;
172 /* Check whether this object is still used. */
173 if (l->l_type == lt_loaded
174 && l->l_direct_opencount == 0
175 && (l->l_flags_1 & DF_1_NODELETE) == 0
176 && !used[done_index])
177 continue;
179 /* We need this object and we handle it now. */
180 done[done_index] = 1;
181 used[done_index] = 1;
182 /* Signal the object is still needed. */
183 l->l_idx = IDX_STILL_USED;
185 /* Mark all dependencies as used. */
186 if (l->l_initfini != NULL)
188 /* We are always the zeroth entry, and since we don't include
189 ourselves in the dependency analysis start at 1. */
190 struct link_map **lp = &l->l_initfini[1];
191 while (*lp != NULL)
193 if ((*lp)->l_idx != IDX_STILL_USED)
195 assert ((*lp)->l_idx >= 0 && (*lp)->l_idx < nloaded);
197 if (!used[(*lp)->l_idx])
199 used[(*lp)->l_idx] = 1;
200 /* If we marked a new object as used, and we've
201 already processed it, then we need to go back
202 and process again from that point forward to
203 ensure we keep all of its dependencies also. */
204 if ((*lp)->l_idx - 1 < done_index)
205 done_index = (*lp)->l_idx - 1;
209 ++lp;
212 /* And the same for relocation dependencies. */
213 if (l->l_reldeps != NULL)
214 for (unsigned int j = 0; j < l->l_reldeps->act; ++j)
216 struct link_map *jmap = l->l_reldeps->list[j];
218 if (jmap->l_idx != IDX_STILL_USED)
220 assert (jmap->l_idx >= 0 && jmap->l_idx < nloaded);
222 if (!used[jmap->l_idx])
224 used[jmap->l_idx] = 1;
225 if (jmap->l_idx - 1 < done_index)
226 done_index = jmap->l_idx - 1;
232 /* Sort the entries. */
233 _dl_sort_fini (maps, nloaded, used, nsid);
235 /* Call all termination functions at once. */
236 #ifdef SHARED
237 bool do_audit = GLRO(dl_naudit) > 0 && !ns->_ns_loaded->l_auditing;
238 #endif
239 bool unload_any = false;
240 bool scope_mem_left = false;
241 unsigned int unload_global = 0;
242 unsigned int first_loaded = ~0;
243 for (unsigned int i = 0; i < nloaded; ++i)
245 struct link_map *imap = maps[i];
247 /* All elements must be in the same namespace. */
248 assert (imap->l_ns == nsid);
250 if (!used[i])
252 assert (imap->l_type == lt_loaded
253 && (imap->l_flags_1 & DF_1_NODELETE) == 0);
255 /* Call its termination function. Do not do it for
256 half-cooked objects. */
257 if (imap->l_init_called)
259 /* When debugging print a message first. */
260 if (__builtin_expect (GLRO(dl_debug_mask) & DL_DEBUG_IMPCALLS,
262 _dl_debug_printf ("\ncalling fini: %s [%lu]\n\n",
263 imap->l_name, nsid);
265 if (imap->l_info[DT_FINI_ARRAY] != NULL)
267 ElfW(Addr) *array =
268 (ElfW(Addr) *) (imap->l_addr
269 + imap->l_info[DT_FINI_ARRAY]->d_un.d_ptr);
270 unsigned int sz = (imap->l_info[DT_FINI_ARRAYSZ]->d_un.d_val
271 / sizeof (ElfW(Addr)));
273 while (sz-- > 0)
274 ((fini_t) array[sz]) ();
277 /* Next try the old-style destructor. */
278 if (imap->l_info[DT_FINI] != NULL)
279 DL_CALL_DT_FINI (imap, ((void *) imap->l_addr
280 + imap->l_info[DT_FINI]->d_un.d_ptr));
283 #ifdef SHARED
284 /* Auditing checkpoint: we remove an object. */
285 if (__glibc_unlikely (do_audit))
287 struct audit_ifaces *afct = GLRO(dl_audit);
288 for (unsigned int cnt = 0; cnt < GLRO(dl_naudit); ++cnt)
290 if (afct->objclose != NULL)
291 /* Return value is ignored. */
292 (void) afct->objclose (&imap->l_audit[cnt].cookie);
294 afct = afct->next;
297 #endif
299 /* This object must not be used anymore. */
300 imap->l_removed = 1;
302 /* We indeed have an object to remove. */
303 unload_any = true;
305 if (imap->l_global)
306 ++unload_global;
308 /* Remember where the first dynamically loaded object is. */
309 if (i < first_loaded)
310 first_loaded = i;
312 /* Else used[i]. */
313 else if (imap->l_type == lt_loaded)
315 struct r_scope_elem *new_list = NULL;
317 if (imap->l_searchlist.r_list == NULL && imap->l_initfini != NULL)
319 /* The object is still used. But one of the objects we are
320 unloading right now is responsible for loading it. If
321 the current object does not have it's own scope yet we
322 have to create one. This has to be done before running
323 the finalizers.
325 To do this count the number of dependencies. */
326 unsigned int cnt;
327 for (cnt = 1; imap->l_initfini[cnt] != NULL; ++cnt)
330 /* We simply reuse the l_initfini list. */
331 imap->l_searchlist.r_list = &imap->l_initfini[cnt + 1];
332 imap->l_searchlist.r_nlist = cnt;
334 new_list = &imap->l_searchlist;
337 /* Count the number of scopes which remain after the unload.
338 When we add the local search list count it. Always add
339 one for the terminating NULL pointer. */
340 size_t remain = (new_list != NULL) + 1;
341 bool removed_any = false;
342 for (size_t cnt = 0; imap->l_scope[cnt] != NULL; ++cnt)
343 /* This relies on l_scope[] entries being always set either
344 to its own l_symbolic_searchlist address, or some map's
345 l_searchlist address. */
346 if (imap->l_scope[cnt] != &imap->l_symbolic_searchlist)
348 struct link_map *tmap = (struct link_map *)
349 ((char *) imap->l_scope[cnt]
350 - offsetof (struct link_map, l_searchlist));
351 assert (tmap->l_ns == nsid);
352 if (tmap->l_idx == IDX_STILL_USED)
353 ++remain;
354 else
355 removed_any = true;
357 else
358 ++remain;
360 if (removed_any)
362 /* Always allocate a new array for the scope. This is
363 necessary since we must be able to determine the last
364 user of the current array. If possible use the link map's
365 memory. */
366 size_t new_size;
367 struct r_scope_elem **newp;
369 #define SCOPE_ELEMS(imap) \
370 (sizeof (imap->l_scope_mem) / sizeof (imap->l_scope_mem[0]))
372 if (imap->l_scope != imap->l_scope_mem
373 && remain < SCOPE_ELEMS (imap))
375 new_size = SCOPE_ELEMS (imap);
376 newp = imap->l_scope_mem;
378 else
380 new_size = imap->l_scope_max;
381 newp = (struct r_scope_elem **)
382 malloc (new_size * sizeof (struct r_scope_elem *));
383 if (newp == NULL)
384 _dl_signal_error (ENOMEM, "dlclose", NULL,
385 N_("cannot create scope list"));
388 /* Copy over the remaining scope elements. */
389 remain = 0;
390 for (size_t cnt = 0; imap->l_scope[cnt] != NULL; ++cnt)
392 if (imap->l_scope[cnt] != &imap->l_symbolic_searchlist)
394 struct link_map *tmap = (struct link_map *)
395 ((char *) imap->l_scope[cnt]
396 - offsetof (struct link_map, l_searchlist));
397 if (tmap->l_idx != IDX_STILL_USED)
399 /* Remove the scope. Or replace with own map's
400 scope. */
401 if (new_list != NULL)
403 newp[remain++] = new_list;
404 new_list = NULL;
406 continue;
410 newp[remain++] = imap->l_scope[cnt];
412 newp[remain] = NULL;
414 struct r_scope_elem **old = imap->l_scope;
416 imap->l_scope = newp;
418 /* No user anymore, we can free it now. */
419 if (old != imap->l_scope_mem)
421 if (_dl_scope_free (old))
422 /* If _dl_scope_free used THREAD_GSCOPE_WAIT (),
423 no need to repeat it. */
424 scope_mem_left = false;
426 else
427 scope_mem_left = true;
429 imap->l_scope_max = new_size;
431 else if (new_list != NULL)
433 /* We didn't change the scope array, so reset the search
434 list. */
435 imap->l_searchlist.r_list = NULL;
436 imap->l_searchlist.r_nlist = 0;
439 /* The loader is gone, so mark the object as not having one.
440 Note: l_idx != IDX_STILL_USED -> object will be removed. */
441 if (imap->l_loader != NULL
442 && imap->l_loader->l_idx != IDX_STILL_USED)
443 imap->l_loader = NULL;
445 /* Remember where the first dynamically loaded object is. */
446 if (i < first_loaded)
447 first_loaded = i;
451 /* If there are no objects to unload, do nothing further. */
452 if (!unload_any)
453 goto out;
455 #ifdef SHARED
456 /* Auditing checkpoint: we will start deleting objects. */
457 if (__glibc_unlikely (do_audit))
459 struct link_map *head = ns->_ns_loaded;
460 struct audit_ifaces *afct = GLRO(dl_audit);
461 /* Do not call the functions for any auditing object. */
462 if (head->l_auditing == 0)
464 for (unsigned int cnt = 0; cnt < GLRO(dl_naudit); ++cnt)
466 if (afct->activity != NULL)
467 afct->activity (&head->l_audit[cnt].cookie, LA_ACT_DELETE);
469 afct = afct->next;
473 #endif
475 /* Notify the debugger we are about to remove some loaded objects. */
476 struct r_debug *r = _dl_debug_initialize (0, nsid);
477 r->r_state = RT_DELETE;
478 _dl_debug_state ();
479 LIBC_PROBE (unmap_start, 2, nsid, r);
481 if (unload_global)
483 /* Some objects are in the global scope list. Remove them. */
484 struct r_scope_elem *ns_msl = ns->_ns_main_searchlist;
485 unsigned int i;
486 unsigned int j = 0;
487 unsigned int cnt = ns_msl->r_nlist;
489 while (cnt > 0 && ns_msl->r_list[cnt - 1]->l_removed)
490 --cnt;
492 if (cnt + unload_global == ns_msl->r_nlist)
493 /* Speed up removing most recently added objects. */
494 j = cnt;
495 else
496 for (i = 0; i < cnt; i++)
497 if (ns_msl->r_list[i]->l_removed == 0)
499 if (i != j)
500 ns_msl->r_list[j] = ns_msl->r_list[i];
501 j++;
503 ns_msl->r_nlist = j;
506 if (!RTLD_SINGLE_THREAD_P
507 && (unload_global
508 || scope_mem_left
509 || (GL(dl_scope_free_list) != NULL
510 && GL(dl_scope_free_list)->count)))
512 THREAD_GSCOPE_WAIT ();
514 /* Now we can free any queued old scopes. */
515 struct dl_scope_free_list *fsl = GL(dl_scope_free_list);
516 if (fsl != NULL)
517 while (fsl->count > 0)
518 free (fsl->list[--fsl->count]);
521 size_t tls_free_start;
522 size_t tls_free_end;
523 tls_free_start = tls_free_end = NO_TLS_OFFSET;
525 /* We modify the list of loaded objects. */
526 __rtld_lock_lock_recursive (GL(dl_load_write_lock));
528 /* Check each element of the search list to see if all references to
529 it are gone. */
530 for (unsigned int i = first_loaded; i < nloaded; ++i)
532 struct link_map *imap = maps[i];
533 if (!used[i])
535 assert (imap->l_type == lt_loaded);
537 /* That was the last reference, and this was a dlopen-loaded
538 object. We can unmap it. */
540 /* Remove the object from the dtv slotinfo array if it uses TLS. */
541 if (__glibc_unlikely (imap->l_tls_blocksize > 0))
543 any_tls = true;
545 if (GL(dl_tls_dtv_slotinfo_list) != NULL
546 && ! remove_slotinfo (imap->l_tls_modid,
547 GL(dl_tls_dtv_slotinfo_list), 0,
548 imap->l_init_called))
549 /* All dynamically loaded modules with TLS are unloaded. */
550 GL(dl_tls_max_dtv_idx) = GL(dl_tls_static_nelem);
552 if (imap->l_tls_offset != NO_TLS_OFFSET
553 && imap->l_tls_offset != FORCED_DYNAMIC_TLS_OFFSET)
555 /* Collect a contiguous chunk built from the objects in
556 this search list, going in either direction. When the
557 whole chunk is at the end of the used area then we can
558 reclaim it. */
559 #if TLS_TCB_AT_TP
560 if (tls_free_start == NO_TLS_OFFSET
561 || (size_t) imap->l_tls_offset == tls_free_start)
563 /* Extend the contiguous chunk being reclaimed. */
564 tls_free_start
565 = imap->l_tls_offset - imap->l_tls_blocksize;
567 if (tls_free_end == NO_TLS_OFFSET)
568 tls_free_end = imap->l_tls_offset;
570 else if (imap->l_tls_offset - imap->l_tls_blocksize
571 == tls_free_end)
572 /* Extend the chunk backwards. */
573 tls_free_end = imap->l_tls_offset;
574 else
576 /* This isn't contiguous with the last chunk freed.
577 One of them will be leaked unless we can free
578 one block right away. */
579 if (tls_free_end == GL(dl_tls_static_used))
581 GL(dl_tls_static_used) = tls_free_start;
582 tls_free_end = imap->l_tls_offset;
583 tls_free_start
584 = tls_free_end - imap->l_tls_blocksize;
586 else if ((size_t) imap->l_tls_offset
587 == GL(dl_tls_static_used))
588 GL(dl_tls_static_used)
589 = imap->l_tls_offset - imap->l_tls_blocksize;
590 else if (tls_free_end < (size_t) imap->l_tls_offset)
592 /* We pick the later block. It has a chance to
593 be freed. */
594 tls_free_end = imap->l_tls_offset;
595 tls_free_start
596 = tls_free_end - imap->l_tls_blocksize;
599 #elif TLS_DTV_AT_TP
600 if (tls_free_start == NO_TLS_OFFSET)
602 tls_free_start = imap->l_tls_firstbyte_offset;
603 tls_free_end = (imap->l_tls_offset
604 + imap->l_tls_blocksize);
606 else if (imap->l_tls_firstbyte_offset == tls_free_end)
607 /* Extend the contiguous chunk being reclaimed. */
608 tls_free_end = imap->l_tls_offset + imap->l_tls_blocksize;
609 else if (imap->l_tls_offset + imap->l_tls_blocksize
610 == tls_free_start)
611 /* Extend the chunk backwards. */
612 tls_free_start = imap->l_tls_firstbyte_offset;
613 /* This isn't contiguous with the last chunk freed.
614 One of them will be leaked unless we can free
615 one block right away. */
616 else if (imap->l_tls_offset + imap->l_tls_blocksize
617 == GL(dl_tls_static_used))
618 GL(dl_tls_static_used) = imap->l_tls_firstbyte_offset;
619 else if (tls_free_end == GL(dl_tls_static_used))
621 GL(dl_tls_static_used) = tls_free_start;
622 tls_free_start = imap->l_tls_firstbyte_offset;
623 tls_free_end = imap->l_tls_offset + imap->l_tls_blocksize;
625 else if (tls_free_end < imap->l_tls_firstbyte_offset)
627 /* We pick the later block. It has a chance to
628 be freed. */
629 tls_free_start = imap->l_tls_firstbyte_offset;
630 tls_free_end = imap->l_tls_offset + imap->l_tls_blocksize;
632 #else
633 # error "Either TLS_TCB_AT_TP or TLS_DTV_AT_TP must be defined"
634 #endif
638 /* We can unmap all the maps at once. We determined the
639 start address and length when we loaded the object and
640 the `munmap' call does the rest. */
641 DL_UNMAP (imap);
643 /* Finally, unlink the data structure and free it. */
644 #if DL_NNS == 1
645 /* The assert in the (imap->l_prev == NULL) case gives
646 the compiler license to warn that NS points outside
647 the dl_ns array bounds in that case (as nsid != LM_ID_BASE
648 is tantamount to nsid >= DL_NNS). That should be impossible
649 in this configuration, so just assert about it instead. */
650 assert (nsid == LM_ID_BASE);
651 assert (imap->l_prev != NULL);
652 #else
653 if (imap->l_prev == NULL)
655 assert (nsid != LM_ID_BASE);
656 ns->_ns_loaded = imap->l_next;
658 /* Update the pointer to the head of the list
659 we leave for debuggers to examine. */
660 r->r_map = (void *) ns->_ns_loaded;
662 else
663 #endif
664 imap->l_prev->l_next = imap->l_next;
666 --ns->_ns_nloaded;
667 if (imap->l_next != NULL)
668 imap->l_next->l_prev = imap->l_prev;
670 free (imap->l_versions);
671 if (imap->l_origin != (char *) -1)
672 free ((char *) imap->l_origin);
674 free (imap->l_reldeps);
676 /* Print debugging message. */
677 if (__glibc_unlikely (GLRO(dl_debug_mask) & DL_DEBUG_FILES))
678 _dl_debug_printf ("\nfile=%s [%lu]; destroying link map\n",
679 imap->l_name, imap->l_ns);
681 /* This name always is allocated. */
682 free (imap->l_name);
683 /* Remove the list with all the names of the shared object. */
685 struct libname_list *lnp = imap->l_libname;
688 struct libname_list *this = lnp;
689 lnp = lnp->next;
690 if (!this->dont_free)
691 free (this);
693 while (lnp != NULL);
695 /* Remove the searchlists. */
696 free (imap->l_initfini);
698 /* Remove the scope array if we allocated it. */
699 if (imap->l_scope != imap->l_scope_mem)
700 free (imap->l_scope);
702 if (imap->l_phdr_allocated)
703 free ((void *) imap->l_phdr);
705 if (imap->l_rpath_dirs.dirs != (void *) -1)
706 free (imap->l_rpath_dirs.dirs);
707 if (imap->l_runpath_dirs.dirs != (void *) -1)
708 free (imap->l_runpath_dirs.dirs);
710 free (imap);
714 __rtld_lock_unlock_recursive (GL(dl_load_write_lock));
716 /* If we removed any object which uses TLS bump the generation counter. */
717 if (any_tls)
719 if (__glibc_unlikely (++GL(dl_tls_generation) == 0))
720 _dl_fatal_printf ("TLS generation counter wrapped! Please report as described in "REPORT_BUGS_TO".\n");
722 if (tls_free_end == GL(dl_tls_static_used))
723 GL(dl_tls_static_used) = tls_free_start;
726 #ifdef SHARED
727 /* Auditing checkpoint: we have deleted all objects. */
728 if (__glibc_unlikely (do_audit))
730 struct link_map *head = ns->_ns_loaded;
731 /* Do not call the functions for any auditing object. */
732 if (head->l_auditing == 0)
734 struct audit_ifaces *afct = GLRO(dl_audit);
735 for (unsigned int cnt = 0; cnt < GLRO(dl_naudit); ++cnt)
737 if (afct->activity != NULL)
738 afct->activity (&head->l_audit[cnt].cookie, LA_ACT_CONSISTENT);
740 afct = afct->next;
744 #endif
746 if (__builtin_expect (ns->_ns_loaded == NULL, 0)
747 && nsid == GL(dl_nns) - 1)
749 --GL(dl_nns);
750 while (GL(dl_ns)[GL(dl_nns) - 1]._ns_loaded == NULL);
752 /* Notify the debugger those objects are finalized and gone. */
753 r->r_state = RT_CONSISTENT;
754 _dl_debug_state ();
755 LIBC_PROBE (unmap_complete, 2, nsid, r);
757 /* Recheck if we need to retry, release the lock. */
758 out:
759 if (dl_close_state == rerun)
760 goto retry;
762 dl_close_state = not_pending;
766 void
767 _dl_close (void *_map)
769 struct link_map *map = _map;
771 /* First see whether we can remove the object at all. */
772 if (__glibc_unlikely (map->l_flags_1 & DF_1_NODELETE))
774 assert (map->l_init_called);
775 /* Nope. Do nothing. */
776 return;
779 if (__builtin_expect (map->l_direct_opencount, 1) == 0)
780 GLRO(dl_signal_error) (0, map->l_name, NULL, N_("shared object not open"));
782 /* Acquire the lock. */
783 __rtld_lock_lock_recursive (GL(dl_load_lock));
785 _dl_close_worker (map);
787 __rtld_lock_unlock_recursive (GL(dl_load_lock));