Add forced deletion support to _dl_close_worker
[glibc.git] / elf / dl-close.c
blob2104674bd09990a4b3a04f640d05fe57042db940
1 /* Close a shared object opened by `_dl_open'.
2 Copyright (C) 1996-2015 Free Software Foundation, Inc.
3 This file is part of the GNU C Library.
5 The GNU C Library is free software; you can redistribute it and/or
6 modify it under the terms of the GNU Lesser General Public
7 License as published by the Free Software Foundation; either
8 version 2.1 of the License, or (at your option) any later version.
10 The GNU C Library is distributed in the hope that it will be useful,
11 but WITHOUT ANY WARRANTY; without even the implied warranty of
12 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 Lesser General Public License for more details.
15 You should have received a copy of the GNU Lesser General Public
16 License along with the GNU C Library; if not, see
17 <http://www.gnu.org/licenses/>. */
19 #include <assert.h>
20 #include <dlfcn.h>
21 #include <errno.h>
22 #include <libintl.h>
23 #include <stddef.h>
24 #include <stdio.h>
25 #include <stdlib.h>
26 #include <string.h>
27 #include <unistd.h>
28 #include <bits/libc-lock.h>
29 #include <ldsodefs.h>
30 #include <sys/types.h>
31 #include <sys/mman.h>
32 #include <sysdep-cancel.h>
33 #include <tls.h>
34 #include <stap-probe.h>
36 #include <dl-unmap-segments.h>
39 /* Type of the constructor functions. */
40 typedef void (*fini_t) (void);
43 /* Special l_idx value used to indicate which objects remain loaded. */
44 #define IDX_STILL_USED -1
47 /* Returns true we an non-empty was found. */
48 static bool
49 remove_slotinfo (size_t idx, struct dtv_slotinfo_list *listp, size_t disp,
50 bool should_be_there)
52 if (idx - disp >= listp->len)
54 if (listp->next == NULL)
56 /* The index is not actually valid in the slotinfo list,
57 because this object was closed before it was fully set
58 up due to some error. */
59 assert (! should_be_there);
61 else
63 if (remove_slotinfo (idx, listp->next, disp + listp->len,
64 should_be_there))
65 return true;
67 /* No non-empty entry. Search from the end of this element's
68 slotinfo array. */
69 idx = disp + listp->len;
72 else
74 struct link_map *old_map = listp->slotinfo[idx - disp].map;
76 /* The entry might still be in its unused state if we are closing an
77 object that wasn't fully set up. */
78 if (__glibc_likely (old_map != NULL))
80 assert (old_map->l_tls_modid == idx);
82 /* Mark the entry as unused. */
83 listp->slotinfo[idx - disp].gen = GL(dl_tls_generation) + 1;
84 listp->slotinfo[idx - disp].map = NULL;
87 /* If this is not the last currently used entry no need to look
88 further. */
89 if (idx != GL(dl_tls_max_dtv_idx))
90 return true;
93 while (idx - disp > (disp == 0 ? 1 + GL(dl_tls_static_nelem) : 0))
95 --idx;
97 if (listp->slotinfo[idx - disp].map != NULL)
99 /* Found a new last used index. */
100 GL(dl_tls_max_dtv_idx) = idx;
101 return true;
105 /* No non-entry in this list element. */
106 return false;
110 void
111 _dl_close_worker (struct link_map *map, bool force)
113 /* One less direct use. */
114 --map->l_direct_opencount;
116 /* If _dl_close is called recursively (some destructor call dlclose),
117 just record that the parent _dl_close will need to do garbage collection
118 again and return. */
119 static enum { not_pending, pending, rerun } dl_close_state;
121 if (map->l_direct_opencount > 0 || map->l_type != lt_loaded
122 || dl_close_state != not_pending)
124 if (map->l_direct_opencount == 0 && map->l_type == lt_loaded)
125 dl_close_state = rerun;
127 /* There are still references to this object. Do nothing more. */
128 if (__glibc_unlikely (GLRO(dl_debug_mask) & DL_DEBUG_FILES))
129 _dl_debug_printf ("\nclosing file=%s; direct_opencount=%u\n",
130 map->l_name, map->l_direct_opencount);
132 return;
135 Lmid_t nsid = map->l_ns;
136 struct link_namespaces *ns = &GL(dl_ns)[nsid];
138 retry:
139 dl_close_state = pending;
141 bool any_tls = false;
142 const unsigned int nloaded = ns->_ns_nloaded;
143 char used[nloaded];
144 char done[nloaded];
145 struct link_map *maps[nloaded];
147 /* Run over the list and assign indexes to the link maps and enter
148 them into the MAPS array. */
149 int idx = 0;
150 for (struct link_map *l = ns->_ns_loaded; l != NULL; l = l->l_next)
152 l->l_idx = idx;
153 maps[idx] = l;
154 ++idx;
156 /* Clear DF_1_NODELETE to force object deletion. */
157 if (force)
158 l->l_flags_1 &= ~DF_1_NODELETE;
160 assert (idx == nloaded);
162 /* Prepare the bitmaps. */
163 memset (used, '\0', sizeof (used));
164 memset (done, '\0', sizeof (done));
166 /* Keep track of the lowest index link map we have covered already. */
167 int done_index = -1;
168 while (++done_index < nloaded)
170 struct link_map *l = maps[done_index];
172 if (done[done_index])
173 /* Already handled. */
174 continue;
176 /* Check whether this object is still used. */
177 if (l->l_type == lt_loaded
178 && l->l_direct_opencount == 0
179 && (l->l_flags_1 & DF_1_NODELETE) == 0
180 && !used[done_index])
181 continue;
183 /* We need this object and we handle it now. */
184 done[done_index] = 1;
185 used[done_index] = 1;
186 /* Signal the object is still needed. */
187 l->l_idx = IDX_STILL_USED;
189 /* Mark all dependencies as used. */
190 if (l->l_initfini != NULL)
192 /* We are always the zeroth entry, and since we don't include
193 ourselves in the dependency analysis start at 1. */
194 struct link_map **lp = &l->l_initfini[1];
195 while (*lp != NULL)
197 if ((*lp)->l_idx != IDX_STILL_USED)
199 assert ((*lp)->l_idx >= 0 && (*lp)->l_idx < nloaded);
201 if (!used[(*lp)->l_idx])
203 used[(*lp)->l_idx] = 1;
204 /* If we marked a new object as used, and we've
205 already processed it, then we need to go back
206 and process again from that point forward to
207 ensure we keep all of its dependencies also. */
208 if ((*lp)->l_idx - 1 < done_index)
209 done_index = (*lp)->l_idx - 1;
213 ++lp;
216 /* And the same for relocation dependencies. */
217 if (l->l_reldeps != NULL)
218 for (unsigned int j = 0; j < l->l_reldeps->act; ++j)
220 struct link_map *jmap = l->l_reldeps->list[j];
222 if (jmap->l_idx != IDX_STILL_USED)
224 assert (jmap->l_idx >= 0 && jmap->l_idx < nloaded);
226 if (!used[jmap->l_idx])
228 used[jmap->l_idx] = 1;
229 if (jmap->l_idx - 1 < done_index)
230 done_index = jmap->l_idx - 1;
236 /* Sort the entries. */
237 _dl_sort_fini (maps, nloaded, used, nsid);
239 /* Call all termination functions at once. */
240 #ifdef SHARED
241 bool do_audit = GLRO(dl_naudit) > 0 && !ns->_ns_loaded->l_auditing;
242 #endif
243 bool unload_any = false;
244 bool scope_mem_left = false;
245 unsigned int unload_global = 0;
246 unsigned int first_loaded = ~0;
247 for (unsigned int i = 0; i < nloaded; ++i)
249 struct link_map *imap = maps[i];
251 /* All elements must be in the same namespace. */
252 assert (imap->l_ns == nsid);
254 if (!used[i])
256 assert (imap->l_type == lt_loaded
257 && (imap->l_flags_1 & DF_1_NODELETE) == 0);
259 /* Call its termination function. Do not do it for
260 half-cooked objects. */
261 if (imap->l_init_called)
263 /* When debugging print a message first. */
264 if (__builtin_expect (GLRO(dl_debug_mask) & DL_DEBUG_IMPCALLS,
266 _dl_debug_printf ("\ncalling fini: %s [%lu]\n\n",
267 imap->l_name, nsid);
269 if (imap->l_info[DT_FINI_ARRAY] != NULL)
271 ElfW(Addr) *array =
272 (ElfW(Addr) *) (imap->l_addr
273 + imap->l_info[DT_FINI_ARRAY]->d_un.d_ptr);
274 unsigned int sz = (imap->l_info[DT_FINI_ARRAYSZ]->d_un.d_val
275 / sizeof (ElfW(Addr)));
277 while (sz-- > 0)
278 ((fini_t) array[sz]) ();
281 /* Next try the old-style destructor. */
282 if (imap->l_info[DT_FINI] != NULL)
283 DL_CALL_DT_FINI (imap, ((void *) imap->l_addr
284 + imap->l_info[DT_FINI]->d_un.d_ptr));
287 #ifdef SHARED
288 /* Auditing checkpoint: we remove an object. */
289 if (__glibc_unlikely (do_audit))
291 struct audit_ifaces *afct = GLRO(dl_audit);
292 for (unsigned int cnt = 0; cnt < GLRO(dl_naudit); ++cnt)
294 if (afct->objclose != NULL)
295 /* Return value is ignored. */
296 (void) afct->objclose (&imap->l_audit[cnt].cookie);
298 afct = afct->next;
301 #endif
303 /* This object must not be used anymore. */
304 imap->l_removed = 1;
306 /* We indeed have an object to remove. */
307 unload_any = true;
309 if (imap->l_global)
310 ++unload_global;
312 /* Remember where the first dynamically loaded object is. */
313 if (i < first_loaded)
314 first_loaded = i;
316 /* Else used[i]. */
317 else if (imap->l_type == lt_loaded)
319 struct r_scope_elem *new_list = NULL;
321 if (imap->l_searchlist.r_list == NULL && imap->l_initfini != NULL)
323 /* The object is still used. But one of the objects we are
324 unloading right now is responsible for loading it. If
325 the current object does not have it's own scope yet we
326 have to create one. This has to be done before running
327 the finalizers.
329 To do this count the number of dependencies. */
330 unsigned int cnt;
331 for (cnt = 1; imap->l_initfini[cnt] != NULL; ++cnt)
334 /* We simply reuse the l_initfini list. */
335 imap->l_searchlist.r_list = &imap->l_initfini[cnt + 1];
336 imap->l_searchlist.r_nlist = cnt;
338 new_list = &imap->l_searchlist;
341 /* Count the number of scopes which remain after the unload.
342 When we add the local search list count it. Always add
343 one for the terminating NULL pointer. */
344 size_t remain = (new_list != NULL) + 1;
345 bool removed_any = false;
346 for (size_t cnt = 0; imap->l_scope[cnt] != NULL; ++cnt)
347 /* This relies on l_scope[] entries being always set either
348 to its own l_symbolic_searchlist address, or some map's
349 l_searchlist address. */
350 if (imap->l_scope[cnt] != &imap->l_symbolic_searchlist)
352 struct link_map *tmap = (struct link_map *)
353 ((char *) imap->l_scope[cnt]
354 - offsetof (struct link_map, l_searchlist));
355 assert (tmap->l_ns == nsid);
356 if (tmap->l_idx == IDX_STILL_USED)
357 ++remain;
358 else
359 removed_any = true;
361 else
362 ++remain;
364 if (removed_any)
366 /* Always allocate a new array for the scope. This is
367 necessary since we must be able to determine the last
368 user of the current array. If possible use the link map's
369 memory. */
370 size_t new_size;
371 struct r_scope_elem **newp;
373 #define SCOPE_ELEMS(imap) \
374 (sizeof (imap->l_scope_mem) / sizeof (imap->l_scope_mem[0]))
376 if (imap->l_scope != imap->l_scope_mem
377 && remain < SCOPE_ELEMS (imap))
379 new_size = SCOPE_ELEMS (imap);
380 newp = imap->l_scope_mem;
382 else
384 new_size = imap->l_scope_max;
385 newp = (struct r_scope_elem **)
386 malloc (new_size * sizeof (struct r_scope_elem *));
387 if (newp == NULL)
388 _dl_signal_error (ENOMEM, "dlclose", NULL,
389 N_("cannot create scope list"));
392 /* Copy over the remaining scope elements. */
393 remain = 0;
394 for (size_t cnt = 0; imap->l_scope[cnt] != NULL; ++cnt)
396 if (imap->l_scope[cnt] != &imap->l_symbolic_searchlist)
398 struct link_map *tmap = (struct link_map *)
399 ((char *) imap->l_scope[cnt]
400 - offsetof (struct link_map, l_searchlist));
401 if (tmap->l_idx != IDX_STILL_USED)
403 /* Remove the scope. Or replace with own map's
404 scope. */
405 if (new_list != NULL)
407 newp[remain++] = new_list;
408 new_list = NULL;
410 continue;
414 newp[remain++] = imap->l_scope[cnt];
416 newp[remain] = NULL;
418 struct r_scope_elem **old = imap->l_scope;
420 imap->l_scope = newp;
422 /* No user anymore, we can free it now. */
423 if (old != imap->l_scope_mem)
425 if (_dl_scope_free (old))
426 /* If _dl_scope_free used THREAD_GSCOPE_WAIT (),
427 no need to repeat it. */
428 scope_mem_left = false;
430 else
431 scope_mem_left = true;
433 imap->l_scope_max = new_size;
435 else if (new_list != NULL)
437 /* We didn't change the scope array, so reset the search
438 list. */
439 imap->l_searchlist.r_list = NULL;
440 imap->l_searchlist.r_nlist = 0;
443 /* The loader is gone, so mark the object as not having one.
444 Note: l_idx != IDX_STILL_USED -> object will be removed. */
445 if (imap->l_loader != NULL
446 && imap->l_loader->l_idx != IDX_STILL_USED)
447 imap->l_loader = NULL;
449 /* Remember where the first dynamically loaded object is. */
450 if (i < first_loaded)
451 first_loaded = i;
455 /* If there are no objects to unload, do nothing further. */
456 if (!unload_any)
457 goto out;
459 #ifdef SHARED
460 /* Auditing checkpoint: we will start deleting objects. */
461 if (__glibc_unlikely (do_audit))
463 struct link_map *head = ns->_ns_loaded;
464 struct audit_ifaces *afct = GLRO(dl_audit);
465 /* Do not call the functions for any auditing object. */
466 if (head->l_auditing == 0)
468 for (unsigned int cnt = 0; cnt < GLRO(dl_naudit); ++cnt)
470 if (afct->activity != NULL)
471 afct->activity (&head->l_audit[cnt].cookie, LA_ACT_DELETE);
473 afct = afct->next;
477 #endif
479 /* Notify the debugger we are about to remove some loaded objects. */
480 struct r_debug *r = _dl_debug_initialize (0, nsid);
481 r->r_state = RT_DELETE;
482 _dl_debug_state ();
483 LIBC_PROBE (unmap_start, 2, nsid, r);
485 if (unload_global)
487 /* Some objects are in the global scope list. Remove them. */
488 struct r_scope_elem *ns_msl = ns->_ns_main_searchlist;
489 unsigned int i;
490 unsigned int j = 0;
491 unsigned int cnt = ns_msl->r_nlist;
493 while (cnt > 0 && ns_msl->r_list[cnt - 1]->l_removed)
494 --cnt;
496 if (cnt + unload_global == ns_msl->r_nlist)
497 /* Speed up removing most recently added objects. */
498 j = cnt;
499 else
500 for (i = 0; i < cnt; i++)
501 if (ns_msl->r_list[i]->l_removed == 0)
503 if (i != j)
504 ns_msl->r_list[j] = ns_msl->r_list[i];
505 j++;
507 ns_msl->r_nlist = j;
510 if (!RTLD_SINGLE_THREAD_P
511 && (unload_global
512 || scope_mem_left
513 || (GL(dl_scope_free_list) != NULL
514 && GL(dl_scope_free_list)->count)))
516 THREAD_GSCOPE_WAIT ();
518 /* Now we can free any queued old scopes. */
519 struct dl_scope_free_list *fsl = GL(dl_scope_free_list);
520 if (fsl != NULL)
521 while (fsl->count > 0)
522 free (fsl->list[--fsl->count]);
525 size_t tls_free_start;
526 size_t tls_free_end;
527 tls_free_start = tls_free_end = NO_TLS_OFFSET;
529 /* We modify the list of loaded objects. */
530 __rtld_lock_lock_recursive (GL(dl_load_write_lock));
532 /* Check each element of the search list to see if all references to
533 it are gone. */
534 for (unsigned int i = first_loaded; i < nloaded; ++i)
536 struct link_map *imap = maps[i];
537 if (!used[i])
539 assert (imap->l_type == lt_loaded);
541 /* That was the last reference, and this was a dlopen-loaded
542 object. We can unmap it. */
544 /* Remove the object from the dtv slotinfo array if it uses TLS. */
545 if (__glibc_unlikely (imap->l_tls_blocksize > 0))
547 any_tls = true;
549 if (GL(dl_tls_dtv_slotinfo_list) != NULL
550 && ! remove_slotinfo (imap->l_tls_modid,
551 GL(dl_tls_dtv_slotinfo_list), 0,
552 imap->l_init_called))
553 /* All dynamically loaded modules with TLS are unloaded. */
554 GL(dl_tls_max_dtv_idx) = GL(dl_tls_static_nelem);
556 if (imap->l_tls_offset != NO_TLS_OFFSET
557 && imap->l_tls_offset != FORCED_DYNAMIC_TLS_OFFSET)
559 /* Collect a contiguous chunk built from the objects in
560 this search list, going in either direction. When the
561 whole chunk is at the end of the used area then we can
562 reclaim it. */
563 #if TLS_TCB_AT_TP
564 if (tls_free_start == NO_TLS_OFFSET
565 || (size_t) imap->l_tls_offset == tls_free_start)
567 /* Extend the contiguous chunk being reclaimed. */
568 tls_free_start
569 = imap->l_tls_offset - imap->l_tls_blocksize;
571 if (tls_free_end == NO_TLS_OFFSET)
572 tls_free_end = imap->l_tls_offset;
574 else if (imap->l_tls_offset - imap->l_tls_blocksize
575 == tls_free_end)
576 /* Extend the chunk backwards. */
577 tls_free_end = imap->l_tls_offset;
578 else
580 /* This isn't contiguous with the last chunk freed.
581 One of them will be leaked unless we can free
582 one block right away. */
583 if (tls_free_end == GL(dl_tls_static_used))
585 GL(dl_tls_static_used) = tls_free_start;
586 tls_free_end = imap->l_tls_offset;
587 tls_free_start
588 = tls_free_end - imap->l_tls_blocksize;
590 else if ((size_t) imap->l_tls_offset
591 == GL(dl_tls_static_used))
592 GL(dl_tls_static_used)
593 = imap->l_tls_offset - imap->l_tls_blocksize;
594 else if (tls_free_end < (size_t) imap->l_tls_offset)
596 /* We pick the later block. It has a chance to
597 be freed. */
598 tls_free_end = imap->l_tls_offset;
599 tls_free_start
600 = tls_free_end - imap->l_tls_blocksize;
603 #elif TLS_DTV_AT_TP
604 if (tls_free_start == NO_TLS_OFFSET)
606 tls_free_start = imap->l_tls_firstbyte_offset;
607 tls_free_end = (imap->l_tls_offset
608 + imap->l_tls_blocksize);
610 else if (imap->l_tls_firstbyte_offset == tls_free_end)
611 /* Extend the contiguous chunk being reclaimed. */
612 tls_free_end = imap->l_tls_offset + imap->l_tls_blocksize;
613 else if (imap->l_tls_offset + imap->l_tls_blocksize
614 == tls_free_start)
615 /* Extend the chunk backwards. */
616 tls_free_start = imap->l_tls_firstbyte_offset;
617 /* This isn't contiguous with the last chunk freed.
618 One of them will be leaked unless we can free
619 one block right away. */
620 else if (imap->l_tls_offset + imap->l_tls_blocksize
621 == GL(dl_tls_static_used))
622 GL(dl_tls_static_used) = imap->l_tls_firstbyte_offset;
623 else if (tls_free_end == GL(dl_tls_static_used))
625 GL(dl_tls_static_used) = tls_free_start;
626 tls_free_start = imap->l_tls_firstbyte_offset;
627 tls_free_end = imap->l_tls_offset + imap->l_tls_blocksize;
629 else if (tls_free_end < imap->l_tls_firstbyte_offset)
631 /* We pick the later block. It has a chance to
632 be freed. */
633 tls_free_start = imap->l_tls_firstbyte_offset;
634 tls_free_end = imap->l_tls_offset + imap->l_tls_blocksize;
636 #else
637 # error "Either TLS_TCB_AT_TP or TLS_DTV_AT_TP must be defined"
638 #endif
642 /* Reset unique symbols if forced. */
643 if (force)
645 struct unique_sym_table *tab = &ns->_ns_unique_sym_table;
646 __rtld_lock_lock_recursive (tab->lock);
647 struct unique_sym *entries = tab->entries;
648 if (entries != NULL)
650 size_t idx, size = tab->size;
651 for (idx = 0; idx < size; ++idx)
653 /* Clear unique symbol entries that belong to this
654 object. */
655 if (entries[idx].name != NULL
656 && entries[idx].map == imap)
658 entries[idx].name = NULL;
659 entries[idx].hashval = 0;
660 tab->n_elements--;
664 __rtld_lock_unlock_recursive (tab->lock);
667 /* We can unmap all the maps at once. We determined the
668 start address and length when we loaded the object and
669 the `munmap' call does the rest. */
670 DL_UNMAP (imap);
672 /* Finally, unlink the data structure and free it. */
673 #if DL_NNS == 1
674 /* The assert in the (imap->l_prev == NULL) case gives
675 the compiler license to warn that NS points outside
676 the dl_ns array bounds in that case (as nsid != LM_ID_BASE
677 is tantamount to nsid >= DL_NNS). That should be impossible
678 in this configuration, so just assert about it instead. */
679 assert (nsid == LM_ID_BASE);
680 assert (imap->l_prev != NULL);
681 #else
682 if (imap->l_prev == NULL)
684 assert (nsid != LM_ID_BASE);
685 ns->_ns_loaded = imap->l_next;
687 /* Update the pointer to the head of the list
688 we leave for debuggers to examine. */
689 r->r_map = (void *) ns->_ns_loaded;
691 else
692 #endif
693 imap->l_prev->l_next = imap->l_next;
695 --ns->_ns_nloaded;
696 if (imap->l_next != NULL)
697 imap->l_next->l_prev = imap->l_prev;
699 free (imap->l_versions);
700 if (imap->l_origin != (char *) -1)
701 free ((char *) imap->l_origin);
703 free (imap->l_reldeps);
705 /* Print debugging message. */
706 if (__glibc_unlikely (GLRO(dl_debug_mask) & DL_DEBUG_FILES))
707 _dl_debug_printf ("\nfile=%s [%lu]; destroying link map\n",
708 imap->l_name, imap->l_ns);
710 /* This name always is allocated. */
711 free (imap->l_name);
712 /* Remove the list with all the names of the shared object. */
714 struct libname_list *lnp = imap->l_libname;
717 struct libname_list *this = lnp;
718 lnp = lnp->next;
719 if (!this->dont_free)
720 free (this);
722 while (lnp != NULL);
724 /* Remove the searchlists. */
725 free (imap->l_initfini);
727 /* Remove the scope array if we allocated it. */
728 if (imap->l_scope != imap->l_scope_mem)
729 free (imap->l_scope);
731 if (imap->l_phdr_allocated)
732 free ((void *) imap->l_phdr);
734 if (imap->l_rpath_dirs.dirs != (void *) -1)
735 free (imap->l_rpath_dirs.dirs);
736 if (imap->l_runpath_dirs.dirs != (void *) -1)
737 free (imap->l_runpath_dirs.dirs);
739 free (imap);
743 __rtld_lock_unlock_recursive (GL(dl_load_write_lock));
745 /* If we removed any object which uses TLS bump the generation counter. */
746 if (any_tls)
748 if (__glibc_unlikely (++GL(dl_tls_generation) == 0))
749 _dl_fatal_printf ("TLS generation counter wrapped! Please report as described in "REPORT_BUGS_TO".\n");
751 if (tls_free_end == GL(dl_tls_static_used))
752 GL(dl_tls_static_used) = tls_free_start;
755 #ifdef SHARED
756 /* Auditing checkpoint: we have deleted all objects. */
757 if (__glibc_unlikely (do_audit))
759 struct link_map *head = ns->_ns_loaded;
760 /* Do not call the functions for any auditing object. */
761 if (head->l_auditing == 0)
763 struct audit_ifaces *afct = GLRO(dl_audit);
764 for (unsigned int cnt = 0; cnt < GLRO(dl_naudit); ++cnt)
766 if (afct->activity != NULL)
767 afct->activity (&head->l_audit[cnt].cookie, LA_ACT_CONSISTENT);
769 afct = afct->next;
773 #endif
775 if (__builtin_expect (ns->_ns_loaded == NULL, 0)
776 && nsid == GL(dl_nns) - 1)
778 --GL(dl_nns);
779 while (GL(dl_ns)[GL(dl_nns) - 1]._ns_loaded == NULL);
781 /* Notify the debugger those objects are finalized and gone. */
782 r->r_state = RT_CONSISTENT;
783 _dl_debug_state ();
784 LIBC_PROBE (unmap_complete, 2, nsid, r);
786 /* Recheck if we need to retry, release the lock. */
787 out:
788 if (dl_close_state == rerun)
789 goto retry;
791 dl_close_state = not_pending;
795 void
796 _dl_close (void *_map)
798 struct link_map *map = _map;
800 /* First see whether we can remove the object at all. */
801 if (__glibc_unlikely (map->l_flags_1 & DF_1_NODELETE))
803 assert (map->l_init_called);
804 /* Nope. Do nothing. */
805 return;
808 if (__builtin_expect (map->l_direct_opencount, 1) == 0)
809 GLRO(dl_signal_error) (0, map->l_name, NULL, N_("shared object not open"));
811 /* Acquire the lock. */
812 __rtld_lock_lock_recursive (GL(dl_load_lock));
814 _dl_close_worker (map, false);
816 __rtld_lock_unlock_recursive (GL(dl_load_lock));