hppa: Remove custom lowlevellock.h.
[glibc.git] / elf / dl-close.c
blob910527746e34b1e183c5f301b1c8c126efcdee46
1 /* Close a shared object opened by `_dl_open'.
2 Copyright (C) 1996-2015 Free Software Foundation, Inc.
3 This file is part of the GNU C Library.
5 The GNU C Library is free software; you can redistribute it and/or
6 modify it under the terms of the GNU Lesser General Public
7 License as published by the Free Software Foundation; either
8 version 2.1 of the License, or (at your option) any later version.
10 The GNU C Library is distributed in the hope that it will be useful,
11 but WITHOUT ANY WARRANTY; without even the implied warranty of
12 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 Lesser General Public License for more details.
15 You should have received a copy of the GNU Lesser General Public
16 License along with the GNU C Library; if not, see
17 <http://www.gnu.org/licenses/>. */
19 #include <assert.h>
20 #include <dlfcn.h>
21 #include <errno.h>
22 #include <libintl.h>
23 #include <stddef.h>
24 #include <stdio.h>
25 #include <stdlib.h>
26 #include <string.h>
27 #include <unistd.h>
28 #include <bits/libc-lock.h>
29 #include <ldsodefs.h>
30 #include <sys/types.h>
31 #include <sys/mman.h>
32 #include <sysdep-cancel.h>
33 #include <tls.h>
34 #include <stap-probe.h>
36 #include <dl-unmap-segments.h>
39 /* Type of the constructor functions. */
40 typedef void (*fini_t) (void);
43 /* Special l_idx value used to indicate which objects remain loaded. */
44 #define IDX_STILL_USED -1
47 /* Returns true we an non-empty was found. */
48 static bool
49 remove_slotinfo (size_t idx, struct dtv_slotinfo_list *listp, size_t disp,
50 bool should_be_there)
52 if (idx - disp >= listp->len)
54 if (listp->next == NULL)
56 /* The index is not actually valid in the slotinfo list,
57 because this object was closed before it was fully set
58 up due to some error. */
59 assert (! should_be_there);
61 else
63 if (remove_slotinfo (idx, listp->next, disp + listp->len,
64 should_be_there))
65 return true;
67 /* No non-empty entry. Search from the end of this element's
68 slotinfo array. */
69 idx = disp + listp->len;
72 else
74 struct link_map *old_map = listp->slotinfo[idx - disp].map;
76 /* The entry might still be in its unused state if we are closing an
77 object that wasn't fully set up. */
78 if (__glibc_likely (old_map != NULL))
80 assert (old_map->l_tls_modid == idx);
82 /* Mark the entry as unused. */
83 listp->slotinfo[idx - disp].gen = GL(dl_tls_generation) + 1;
84 listp->slotinfo[idx - disp].map = NULL;
87 /* If this is not the last currently used entry no need to look
88 further. */
89 if (idx != GL(dl_tls_max_dtv_idx))
90 return true;
93 while (idx - disp > (disp == 0 ? 1 + GL(dl_tls_static_nelem) : 0))
95 --idx;
97 if (listp->slotinfo[idx - disp].map != NULL)
99 /* Found a new last used index. */
100 GL(dl_tls_max_dtv_idx) = idx;
101 return true;
105 /* No non-entry in this list element. */
106 return false;
110 void
111 _dl_close_worker (struct link_map *map, bool force)
113 /* One less direct use. */
114 --map->l_direct_opencount;
116 /* If _dl_close is called recursively (some destructor call dlclose),
117 just record that the parent _dl_close will need to do garbage collection
118 again and return. */
119 static enum { not_pending, pending, rerun } dl_close_state;
121 if (map->l_direct_opencount > 0 || map->l_type != lt_loaded
122 || dl_close_state != not_pending)
124 if (map->l_direct_opencount == 0 && map->l_type == lt_loaded)
125 dl_close_state = rerun;
127 /* There are still references to this object. Do nothing more. */
128 if (__glibc_unlikely (GLRO(dl_debug_mask) & DL_DEBUG_FILES))
129 _dl_debug_printf ("\nclosing file=%s; direct_opencount=%u\n",
130 map->l_name, map->l_direct_opencount);
132 return;
135 Lmid_t nsid = map->l_ns;
136 struct link_namespaces *ns = &GL(dl_ns)[nsid];
138 retry:
139 dl_close_state = pending;
141 bool any_tls = false;
142 const unsigned int nloaded = ns->_ns_nloaded;
143 char used[nloaded];
144 char done[nloaded];
145 struct link_map *maps[nloaded];
147 /* Run over the list and assign indexes to the link maps and enter
148 them into the MAPS array. */
149 int idx = 0;
150 for (struct link_map *l = ns->_ns_loaded; l != NULL; l = l->l_next)
152 l->l_idx = idx;
153 maps[idx] = l;
154 ++idx;
156 /* Clear DF_1_NODELETE to force object deletion. We don't need to touch
157 l_tls_dtor_count because forced object deletion only happens when an
158 error occurs during object load. Destructor registration for TLS
159 non-POD objects should not have happened till then for this
160 object. */
161 if (force)
162 l->l_flags_1 &= ~DF_1_NODELETE;
164 assert (idx == nloaded);
166 /* Prepare the bitmaps. */
167 memset (used, '\0', sizeof (used));
168 memset (done, '\0', sizeof (done));
170 /* Keep track of the lowest index link map we have covered already. */
171 int done_index = -1;
172 while (++done_index < nloaded)
174 struct link_map *l = maps[done_index];
176 if (done[done_index])
177 /* Already handled. */
178 continue;
180 /* Check whether this object is still used. */
181 if (l->l_type == lt_loaded
182 && l->l_direct_opencount == 0
183 && (l->l_flags_1 & DF_1_NODELETE) == 0
184 /* See CONCURRENCY NOTES in cxa_thread_atexit_impl.c to know why
185 acquire is sufficient and correct. */
186 && atomic_load_acquire (&l->l_tls_dtor_count) == 0
187 && !used[done_index])
188 continue;
190 /* We need this object and we handle it now. */
191 done[done_index] = 1;
192 used[done_index] = 1;
193 /* Signal the object is still needed. */
194 l->l_idx = IDX_STILL_USED;
196 /* Mark all dependencies as used. */
197 if (l->l_initfini != NULL)
199 /* We are always the zeroth entry, and since we don't include
200 ourselves in the dependency analysis start at 1. */
201 struct link_map **lp = &l->l_initfini[1];
202 while (*lp != NULL)
204 if ((*lp)->l_idx != IDX_STILL_USED)
206 assert ((*lp)->l_idx >= 0 && (*lp)->l_idx < nloaded);
208 if (!used[(*lp)->l_idx])
210 used[(*lp)->l_idx] = 1;
211 /* If we marked a new object as used, and we've
212 already processed it, then we need to go back
213 and process again from that point forward to
214 ensure we keep all of its dependencies also. */
215 if ((*lp)->l_idx - 1 < done_index)
216 done_index = (*lp)->l_idx - 1;
220 ++lp;
223 /* And the same for relocation dependencies. */
224 if (l->l_reldeps != NULL)
225 for (unsigned int j = 0; j < l->l_reldeps->act; ++j)
227 struct link_map *jmap = l->l_reldeps->list[j];
229 if (jmap->l_idx != IDX_STILL_USED)
231 assert (jmap->l_idx >= 0 && jmap->l_idx < nloaded);
233 if (!used[jmap->l_idx])
235 used[jmap->l_idx] = 1;
236 if (jmap->l_idx - 1 < done_index)
237 done_index = jmap->l_idx - 1;
243 /* Sort the entries. */
244 _dl_sort_fini (maps, nloaded, used, nsid);
246 /* Call all termination functions at once. */
247 #ifdef SHARED
248 bool do_audit = GLRO(dl_naudit) > 0 && !ns->_ns_loaded->l_auditing;
249 #endif
250 bool unload_any = false;
251 bool scope_mem_left = false;
252 unsigned int unload_global = 0;
253 unsigned int first_loaded = ~0;
254 for (unsigned int i = 0; i < nloaded; ++i)
256 struct link_map *imap = maps[i];
258 /* All elements must be in the same namespace. */
259 assert (imap->l_ns == nsid);
261 if (!used[i])
263 assert (imap->l_type == lt_loaded
264 && (imap->l_flags_1 & DF_1_NODELETE) == 0);
266 /* Call its termination function. Do not do it for
267 half-cooked objects. */
268 if (imap->l_init_called)
270 /* When debugging print a message first. */
271 if (__builtin_expect (GLRO(dl_debug_mask) & DL_DEBUG_IMPCALLS,
273 _dl_debug_printf ("\ncalling fini: %s [%lu]\n\n",
274 imap->l_name, nsid);
276 if (imap->l_info[DT_FINI_ARRAY] != NULL)
278 ElfW(Addr) *array =
279 (ElfW(Addr) *) (imap->l_addr
280 + imap->l_info[DT_FINI_ARRAY]->d_un.d_ptr);
281 unsigned int sz = (imap->l_info[DT_FINI_ARRAYSZ]->d_un.d_val
282 / sizeof (ElfW(Addr)));
284 while (sz-- > 0)
285 ((fini_t) array[sz]) ();
288 /* Next try the old-style destructor. */
289 if (imap->l_info[DT_FINI] != NULL)
290 DL_CALL_DT_FINI (imap, ((void *) imap->l_addr
291 + imap->l_info[DT_FINI]->d_un.d_ptr));
294 #ifdef SHARED
295 /* Auditing checkpoint: we remove an object. */
296 if (__glibc_unlikely (do_audit))
298 struct audit_ifaces *afct = GLRO(dl_audit);
299 for (unsigned int cnt = 0; cnt < GLRO(dl_naudit); ++cnt)
301 if (afct->objclose != NULL)
302 /* Return value is ignored. */
303 (void) afct->objclose (&imap->l_audit[cnt].cookie);
305 afct = afct->next;
308 #endif
310 /* This object must not be used anymore. */
311 imap->l_removed = 1;
313 /* We indeed have an object to remove. */
314 unload_any = true;
316 if (imap->l_global)
317 ++unload_global;
319 /* Remember where the first dynamically loaded object is. */
320 if (i < first_loaded)
321 first_loaded = i;
323 /* Else used[i]. */
324 else if (imap->l_type == lt_loaded)
326 struct r_scope_elem *new_list = NULL;
328 if (imap->l_searchlist.r_list == NULL && imap->l_initfini != NULL)
330 /* The object is still used. But one of the objects we are
331 unloading right now is responsible for loading it. If
332 the current object does not have it's own scope yet we
333 have to create one. This has to be done before running
334 the finalizers.
336 To do this count the number of dependencies. */
337 unsigned int cnt;
338 for (cnt = 1; imap->l_initfini[cnt] != NULL; ++cnt)
341 /* We simply reuse the l_initfini list. */
342 imap->l_searchlist.r_list = &imap->l_initfini[cnt + 1];
343 imap->l_searchlist.r_nlist = cnt;
345 new_list = &imap->l_searchlist;
348 /* Count the number of scopes which remain after the unload.
349 When we add the local search list count it. Always add
350 one for the terminating NULL pointer. */
351 size_t remain = (new_list != NULL) + 1;
352 bool removed_any = false;
353 for (size_t cnt = 0; imap->l_scope[cnt] != NULL; ++cnt)
354 /* This relies on l_scope[] entries being always set either
355 to its own l_symbolic_searchlist address, or some map's
356 l_searchlist address. */
357 if (imap->l_scope[cnt] != &imap->l_symbolic_searchlist)
359 struct link_map *tmap = (struct link_map *)
360 ((char *) imap->l_scope[cnt]
361 - offsetof (struct link_map, l_searchlist));
362 assert (tmap->l_ns == nsid);
363 if (tmap->l_idx == IDX_STILL_USED)
364 ++remain;
365 else
366 removed_any = true;
368 else
369 ++remain;
371 if (removed_any)
373 /* Always allocate a new array for the scope. This is
374 necessary since we must be able to determine the last
375 user of the current array. If possible use the link map's
376 memory. */
377 size_t new_size;
378 struct r_scope_elem **newp;
380 #define SCOPE_ELEMS(imap) \
381 (sizeof (imap->l_scope_mem) / sizeof (imap->l_scope_mem[0]))
383 if (imap->l_scope != imap->l_scope_mem
384 && remain < SCOPE_ELEMS (imap))
386 new_size = SCOPE_ELEMS (imap);
387 newp = imap->l_scope_mem;
389 else
391 new_size = imap->l_scope_max;
392 newp = (struct r_scope_elem **)
393 malloc (new_size * sizeof (struct r_scope_elem *));
394 if (newp == NULL)
395 _dl_signal_error (ENOMEM, "dlclose", NULL,
396 N_("cannot create scope list"));
399 /* Copy over the remaining scope elements. */
400 remain = 0;
401 for (size_t cnt = 0; imap->l_scope[cnt] != NULL; ++cnt)
403 if (imap->l_scope[cnt] != &imap->l_symbolic_searchlist)
405 struct link_map *tmap = (struct link_map *)
406 ((char *) imap->l_scope[cnt]
407 - offsetof (struct link_map, l_searchlist));
408 if (tmap->l_idx != IDX_STILL_USED)
410 /* Remove the scope. Or replace with own map's
411 scope. */
412 if (new_list != NULL)
414 newp[remain++] = new_list;
415 new_list = NULL;
417 continue;
421 newp[remain++] = imap->l_scope[cnt];
423 newp[remain] = NULL;
425 struct r_scope_elem **old = imap->l_scope;
427 imap->l_scope = newp;
429 /* No user anymore, we can free it now. */
430 if (old != imap->l_scope_mem)
432 if (_dl_scope_free (old))
433 /* If _dl_scope_free used THREAD_GSCOPE_WAIT (),
434 no need to repeat it. */
435 scope_mem_left = false;
437 else
438 scope_mem_left = true;
440 imap->l_scope_max = new_size;
442 else if (new_list != NULL)
444 /* We didn't change the scope array, so reset the search
445 list. */
446 imap->l_searchlist.r_list = NULL;
447 imap->l_searchlist.r_nlist = 0;
450 /* The loader is gone, so mark the object as not having one.
451 Note: l_idx != IDX_STILL_USED -> object will be removed. */
452 if (imap->l_loader != NULL
453 && imap->l_loader->l_idx != IDX_STILL_USED)
454 imap->l_loader = NULL;
456 /* Remember where the first dynamically loaded object is. */
457 if (i < first_loaded)
458 first_loaded = i;
462 /* If there are no objects to unload, do nothing further. */
463 if (!unload_any)
464 goto out;
466 #ifdef SHARED
467 /* Auditing checkpoint: we will start deleting objects. */
468 if (__glibc_unlikely (do_audit))
470 struct link_map *head = ns->_ns_loaded;
471 struct audit_ifaces *afct = GLRO(dl_audit);
472 /* Do not call the functions for any auditing object. */
473 if (head->l_auditing == 0)
475 for (unsigned int cnt = 0; cnt < GLRO(dl_naudit); ++cnt)
477 if (afct->activity != NULL)
478 afct->activity (&head->l_audit[cnt].cookie, LA_ACT_DELETE);
480 afct = afct->next;
484 #endif
486 /* Notify the debugger we are about to remove some loaded objects. */
487 struct r_debug *r = _dl_debug_initialize (0, nsid);
488 r->r_state = RT_DELETE;
489 _dl_debug_state ();
490 LIBC_PROBE (unmap_start, 2, nsid, r);
492 if (unload_global)
494 /* Some objects are in the global scope list. Remove them. */
495 struct r_scope_elem *ns_msl = ns->_ns_main_searchlist;
496 unsigned int i;
497 unsigned int j = 0;
498 unsigned int cnt = ns_msl->r_nlist;
500 while (cnt > 0 && ns_msl->r_list[cnt - 1]->l_removed)
501 --cnt;
503 if (cnt + unload_global == ns_msl->r_nlist)
504 /* Speed up removing most recently added objects. */
505 j = cnt;
506 else
507 for (i = 0; i < cnt; i++)
508 if (ns_msl->r_list[i]->l_removed == 0)
510 if (i != j)
511 ns_msl->r_list[j] = ns_msl->r_list[i];
512 j++;
514 ns_msl->r_nlist = j;
517 if (!RTLD_SINGLE_THREAD_P
518 && (unload_global
519 || scope_mem_left
520 || (GL(dl_scope_free_list) != NULL
521 && GL(dl_scope_free_list)->count)))
523 THREAD_GSCOPE_WAIT ();
525 /* Now we can free any queued old scopes. */
526 struct dl_scope_free_list *fsl = GL(dl_scope_free_list);
527 if (fsl != NULL)
528 while (fsl->count > 0)
529 free (fsl->list[--fsl->count]);
532 size_t tls_free_start;
533 size_t tls_free_end;
534 tls_free_start = tls_free_end = NO_TLS_OFFSET;
536 /* We modify the list of loaded objects. */
537 __rtld_lock_lock_recursive (GL(dl_load_write_lock));
539 /* Check each element of the search list to see if all references to
540 it are gone. */
541 for (unsigned int i = first_loaded; i < nloaded; ++i)
543 struct link_map *imap = maps[i];
544 if (!used[i])
546 assert (imap->l_type == lt_loaded);
548 /* That was the last reference, and this was a dlopen-loaded
549 object. We can unmap it. */
551 /* Remove the object from the dtv slotinfo array if it uses TLS. */
552 if (__glibc_unlikely (imap->l_tls_blocksize > 0))
554 any_tls = true;
556 if (GL(dl_tls_dtv_slotinfo_list) != NULL
557 && ! remove_slotinfo (imap->l_tls_modid,
558 GL(dl_tls_dtv_slotinfo_list), 0,
559 imap->l_init_called))
560 /* All dynamically loaded modules with TLS are unloaded. */
561 GL(dl_tls_max_dtv_idx) = GL(dl_tls_static_nelem);
563 if (imap->l_tls_offset != NO_TLS_OFFSET
564 && imap->l_tls_offset != FORCED_DYNAMIC_TLS_OFFSET)
566 /* Collect a contiguous chunk built from the objects in
567 this search list, going in either direction. When the
568 whole chunk is at the end of the used area then we can
569 reclaim it. */
570 #if TLS_TCB_AT_TP
571 if (tls_free_start == NO_TLS_OFFSET
572 || (size_t) imap->l_tls_offset == tls_free_start)
574 /* Extend the contiguous chunk being reclaimed. */
575 tls_free_start
576 = imap->l_tls_offset - imap->l_tls_blocksize;
578 if (tls_free_end == NO_TLS_OFFSET)
579 tls_free_end = imap->l_tls_offset;
581 else if (imap->l_tls_offset - imap->l_tls_blocksize
582 == tls_free_end)
583 /* Extend the chunk backwards. */
584 tls_free_end = imap->l_tls_offset;
585 else
587 /* This isn't contiguous with the last chunk freed.
588 One of them will be leaked unless we can free
589 one block right away. */
590 if (tls_free_end == GL(dl_tls_static_used))
592 GL(dl_tls_static_used) = tls_free_start;
593 tls_free_end = imap->l_tls_offset;
594 tls_free_start
595 = tls_free_end - imap->l_tls_blocksize;
597 else if ((size_t) imap->l_tls_offset
598 == GL(dl_tls_static_used))
599 GL(dl_tls_static_used)
600 = imap->l_tls_offset - imap->l_tls_blocksize;
601 else if (tls_free_end < (size_t) imap->l_tls_offset)
603 /* We pick the later block. It has a chance to
604 be freed. */
605 tls_free_end = imap->l_tls_offset;
606 tls_free_start
607 = tls_free_end - imap->l_tls_blocksize;
610 #elif TLS_DTV_AT_TP
611 if (tls_free_start == NO_TLS_OFFSET)
613 tls_free_start = imap->l_tls_firstbyte_offset;
614 tls_free_end = (imap->l_tls_offset
615 + imap->l_tls_blocksize);
617 else if (imap->l_tls_firstbyte_offset == tls_free_end)
618 /* Extend the contiguous chunk being reclaimed. */
619 tls_free_end = imap->l_tls_offset + imap->l_tls_blocksize;
620 else if (imap->l_tls_offset + imap->l_tls_blocksize
621 == tls_free_start)
622 /* Extend the chunk backwards. */
623 tls_free_start = imap->l_tls_firstbyte_offset;
624 /* This isn't contiguous with the last chunk freed.
625 One of them will be leaked unless we can free
626 one block right away. */
627 else if (imap->l_tls_offset + imap->l_tls_blocksize
628 == GL(dl_tls_static_used))
629 GL(dl_tls_static_used) = imap->l_tls_firstbyte_offset;
630 else if (tls_free_end == GL(dl_tls_static_used))
632 GL(dl_tls_static_used) = tls_free_start;
633 tls_free_start = imap->l_tls_firstbyte_offset;
634 tls_free_end = imap->l_tls_offset + imap->l_tls_blocksize;
636 else if (tls_free_end < imap->l_tls_firstbyte_offset)
638 /* We pick the later block. It has a chance to
639 be freed. */
640 tls_free_start = imap->l_tls_firstbyte_offset;
641 tls_free_end = imap->l_tls_offset + imap->l_tls_blocksize;
643 #else
644 # error "Either TLS_TCB_AT_TP or TLS_DTV_AT_TP must be defined"
645 #endif
649 /* Reset unique symbols if forced. */
650 if (force)
652 struct unique_sym_table *tab = &ns->_ns_unique_sym_table;
653 __rtld_lock_lock_recursive (tab->lock);
654 struct unique_sym *entries = tab->entries;
655 if (entries != NULL)
657 size_t idx, size = tab->size;
658 for (idx = 0; idx < size; ++idx)
660 /* Clear unique symbol entries that belong to this
661 object. */
662 if (entries[idx].name != NULL
663 && entries[idx].map == imap)
665 entries[idx].name = NULL;
666 entries[idx].hashval = 0;
667 tab->n_elements--;
671 __rtld_lock_unlock_recursive (tab->lock);
674 /* We can unmap all the maps at once. We determined the
675 start address and length when we loaded the object and
676 the `munmap' call does the rest. */
677 DL_UNMAP (imap);
679 /* Finally, unlink the data structure and free it. */
680 #if DL_NNS == 1
681 /* The assert in the (imap->l_prev == NULL) case gives
682 the compiler license to warn that NS points outside
683 the dl_ns array bounds in that case (as nsid != LM_ID_BASE
684 is tantamount to nsid >= DL_NNS). That should be impossible
685 in this configuration, so just assert about it instead. */
686 assert (nsid == LM_ID_BASE);
687 assert (imap->l_prev != NULL);
688 #else
689 if (imap->l_prev == NULL)
691 assert (nsid != LM_ID_BASE);
692 ns->_ns_loaded = imap->l_next;
694 /* Update the pointer to the head of the list
695 we leave for debuggers to examine. */
696 r->r_map = (void *) ns->_ns_loaded;
698 else
699 #endif
700 imap->l_prev->l_next = imap->l_next;
702 --ns->_ns_nloaded;
703 if (imap->l_next != NULL)
704 imap->l_next->l_prev = imap->l_prev;
706 free (imap->l_versions);
707 if (imap->l_origin != (char *) -1)
708 free ((char *) imap->l_origin);
710 free (imap->l_reldeps);
712 /* Print debugging message. */
713 if (__glibc_unlikely (GLRO(dl_debug_mask) & DL_DEBUG_FILES))
714 _dl_debug_printf ("\nfile=%s [%lu]; destroying link map\n",
715 imap->l_name, imap->l_ns);
717 /* This name always is allocated. */
718 free (imap->l_name);
719 /* Remove the list with all the names of the shared object. */
721 struct libname_list *lnp = imap->l_libname;
724 struct libname_list *this = lnp;
725 lnp = lnp->next;
726 if (!this->dont_free)
727 free (this);
729 while (lnp != NULL);
731 /* Remove the searchlists. */
732 free (imap->l_initfini);
734 /* Remove the scope array if we allocated it. */
735 if (imap->l_scope != imap->l_scope_mem)
736 free (imap->l_scope);
738 if (imap->l_phdr_allocated)
739 free ((void *) imap->l_phdr);
741 if (imap->l_rpath_dirs.dirs != (void *) -1)
742 free (imap->l_rpath_dirs.dirs);
743 if (imap->l_runpath_dirs.dirs != (void *) -1)
744 free (imap->l_runpath_dirs.dirs);
746 free (imap);
750 __rtld_lock_unlock_recursive (GL(dl_load_write_lock));
752 /* If we removed any object which uses TLS bump the generation counter. */
753 if (any_tls)
755 if (__glibc_unlikely (++GL(dl_tls_generation) == 0))
756 _dl_fatal_printf ("TLS generation counter wrapped! Please report as described in "REPORT_BUGS_TO".\n");
758 if (tls_free_end == GL(dl_tls_static_used))
759 GL(dl_tls_static_used) = tls_free_start;
762 #ifdef SHARED
763 /* Auditing checkpoint: we have deleted all objects. */
764 if (__glibc_unlikely (do_audit))
766 struct link_map *head = ns->_ns_loaded;
767 /* Do not call the functions for any auditing object. */
768 if (head->l_auditing == 0)
770 struct audit_ifaces *afct = GLRO(dl_audit);
771 for (unsigned int cnt = 0; cnt < GLRO(dl_naudit); ++cnt)
773 if (afct->activity != NULL)
774 afct->activity (&head->l_audit[cnt].cookie, LA_ACT_CONSISTENT);
776 afct = afct->next;
780 #endif
782 if (__builtin_expect (ns->_ns_loaded == NULL, 0)
783 && nsid == GL(dl_nns) - 1)
785 --GL(dl_nns);
786 while (GL(dl_ns)[GL(dl_nns) - 1]._ns_loaded == NULL);
788 /* Notify the debugger those objects are finalized and gone. */
789 r->r_state = RT_CONSISTENT;
790 _dl_debug_state ();
791 LIBC_PROBE (unmap_complete, 2, nsid, r);
793 /* Recheck if we need to retry, release the lock. */
794 out:
795 if (dl_close_state == rerun)
796 goto retry;
798 dl_close_state = not_pending;
802 void
803 _dl_close (void *_map)
805 struct link_map *map = _map;
807 /* First see whether we can remove the object at all. */
808 if (__glibc_unlikely (map->l_flags_1 & DF_1_NODELETE))
810 assert (map->l_init_called);
811 /* Nope. Do nothing. */
812 return;
815 if (__builtin_expect (map->l_direct_opencount, 1) == 0)
816 GLRO(dl_signal_error) (0, map->l_name, NULL, N_("shared object not open"));
818 /* Acquire the lock. */
819 __rtld_lock_lock_recursive (GL(dl_load_lock));
821 _dl_close_worker (map, false);
823 __rtld_lock_unlock_recursive (GL(dl_load_lock));