makedb: fix build with libselinux >= 3.1 (Bug 26233)
[glibc.git] / elf / dl-close.c
blobbcd6e206e94b1c67cbe0b884f82eed847ccd228e
1 /* Close a shared object opened by `_dl_open'.
2 Copyright (C) 1996-2022 Free Software Foundation, Inc.
3 This file is part of the GNU C Library.
5 The GNU C Library is free software; you can redistribute it and/or
6 modify it under the terms of the GNU Lesser General Public
7 License as published by the Free Software Foundation; either
8 version 2.1 of the License, or (at your option) any later version.
10 The GNU C Library is distributed in the hope that it will be useful,
11 but WITHOUT ANY WARRANTY; without even the implied warranty of
12 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 Lesser General Public License for more details.
15 You should have received a copy of the GNU Lesser General Public
16 License along with the GNU C Library; if not, see
17 <https://www.gnu.org/licenses/>. */
19 #include <assert.h>
20 #include <dlfcn.h>
21 #include <errno.h>
22 #include <libintl.h>
23 #include <stddef.h>
24 #include <stdio.h>
25 #include <stdlib.h>
26 #include <string.h>
27 #include <unistd.h>
28 #include <libc-lock.h>
29 #include <ldsodefs.h>
30 #include <sys/types.h>
31 #include <sys/mman.h>
32 #include <sysdep-cancel.h>
33 #include <tls.h>
34 #include <stap-probe.h>
35 #include <dl-find_object.h>
37 #include <dl-unmap-segments.h>
40 /* Type of the constructor functions. */
41 typedef void (*fini_t) (void);
44 /* Special l_idx value used to indicate which objects remain loaded. */
45 #define IDX_STILL_USED -1
48 /* Returns true we an non-empty was found. */
49 static bool
50 remove_slotinfo (size_t idx, struct dtv_slotinfo_list *listp, size_t disp,
51 bool should_be_there)
53 if (idx - disp >= listp->len)
55 if (listp->next == NULL)
57 /* The index is not actually valid in the slotinfo list,
58 because this object was closed before it was fully set
59 up due to some error. */
60 assert (! should_be_there);
62 else
64 if (remove_slotinfo (idx, listp->next, disp + listp->len,
65 should_be_there))
66 return true;
68 /* No non-empty entry. Search from the end of this element's
69 slotinfo array. */
70 idx = disp + listp->len;
73 else
75 struct link_map *old_map = listp->slotinfo[idx - disp].map;
77 /* The entry might still be in its unused state if we are closing an
78 object that wasn't fully set up. */
79 if (__glibc_likely (old_map != NULL))
81 /* Mark the entry as unused. These can be read concurrently. */
82 atomic_store_relaxed (&listp->slotinfo[idx - disp].gen,
83 GL(dl_tls_generation) + 1);
84 atomic_store_relaxed (&listp->slotinfo[idx - disp].map, NULL);
87 /* If this is not the last currently used entry no need to look
88 further. */
89 if (idx != GL(dl_tls_max_dtv_idx))
91 /* There is an unused dtv entry in the middle. */
92 GL(dl_tls_dtv_gaps) = true;
93 return true;
97 while (idx - disp > (disp == 0 ? 1 + GL(dl_tls_static_nelem) : 0))
99 --idx;
101 if (listp->slotinfo[idx - disp].map != NULL)
103 /* Found a new last used index. This can be read concurrently. */
104 atomic_store_relaxed (&GL(dl_tls_max_dtv_idx), idx);
105 return true;
109 /* No non-entry in this list element. */
110 return false;
113 /* Invoke dstructors for CLOSURE (a struct link_map *). Called with
114 exception handling temporarily disabled, to make errors fatal. */
115 static void
116 call_destructors (void *closure)
118 struct link_map *map = closure;
120 if (map->l_info[DT_FINI_ARRAY] != NULL)
122 ElfW(Addr) *array =
123 (ElfW(Addr) *) (map->l_addr
124 + map->l_info[DT_FINI_ARRAY]->d_un.d_ptr);
125 unsigned int sz = (map->l_info[DT_FINI_ARRAYSZ]->d_un.d_val
126 / sizeof (ElfW(Addr)));
128 while (sz-- > 0)
129 ((fini_t) array[sz]) ();
132 /* Next try the old-style destructor. */
133 if (map->l_info[DT_FINI] != NULL)
134 DL_CALL_DT_FINI (map, ((void *) map->l_addr
135 + map->l_info[DT_FINI]->d_un.d_ptr));
138 void
139 _dl_close_worker (struct link_map *map, bool force)
141 /* One less direct use. */
142 --map->l_direct_opencount;
144 /* If _dl_close is called recursively (some destructor call dlclose),
145 just record that the parent _dl_close will need to do garbage collection
146 again and return. */
147 static enum { not_pending, pending, rerun } dl_close_state;
149 if (map->l_direct_opencount > 0 || map->l_type != lt_loaded
150 || dl_close_state != not_pending)
152 if (map->l_direct_opencount == 0 && map->l_type == lt_loaded)
153 dl_close_state = rerun;
155 /* There are still references to this object. Do nothing more. */
156 if (__glibc_unlikely (GLRO(dl_debug_mask) & DL_DEBUG_FILES))
157 _dl_debug_printf ("\nclosing file=%s; direct_opencount=%u\n",
158 map->l_name, map->l_direct_opencount);
160 return;
163 Lmid_t nsid = map->l_ns;
164 struct link_namespaces *ns = &GL(dl_ns)[nsid];
166 retry:
167 dl_close_state = pending;
169 bool any_tls = false;
170 const unsigned int nloaded = ns->_ns_nloaded;
171 struct link_map *maps[nloaded];
173 /* Run over the list and assign indexes to the link maps and enter
174 them into the MAPS array. */
175 int idx = 0;
176 for (struct link_map *l = ns->_ns_loaded; l != NULL; l = l->l_next)
178 l->l_map_used = 0;
179 l->l_map_done = 0;
180 l->l_idx = idx;
181 maps[idx] = l;
182 ++idx;
184 assert (idx == nloaded);
186 /* Keep track of the lowest index link map we have covered already. */
187 int done_index = -1;
188 while (++done_index < nloaded)
190 struct link_map *l = maps[done_index];
192 if (l->l_map_done)
193 /* Already handled. */
194 continue;
196 /* Check whether this object is still used. */
197 if (l->l_type == lt_loaded
198 && l->l_direct_opencount == 0
199 && !l->l_nodelete_active
200 /* See CONCURRENCY NOTES in cxa_thread_atexit_impl.c to know why
201 acquire is sufficient and correct. */
202 && atomic_load_acquire (&l->l_tls_dtor_count) == 0
203 && !l->l_map_used)
204 continue;
206 /* We need this object and we handle it now. */
207 l->l_map_used = 1;
208 l->l_map_done = 1;
209 /* Signal the object is still needed. */
210 l->l_idx = IDX_STILL_USED;
212 /* Mark all dependencies as used. */
213 if (l->l_initfini != NULL)
215 /* We are always the zeroth entry, and since we don't include
216 ourselves in the dependency analysis start at 1. */
217 struct link_map **lp = &l->l_initfini[1];
218 while (*lp != NULL)
220 if ((*lp)->l_idx != IDX_STILL_USED)
222 assert ((*lp)->l_idx >= 0 && (*lp)->l_idx < nloaded);
224 if (!(*lp)->l_map_used)
226 (*lp)->l_map_used = 1;
227 /* If we marked a new object as used, and we've
228 already processed it, then we need to go back
229 and process again from that point forward to
230 ensure we keep all of its dependencies also. */
231 if ((*lp)->l_idx - 1 < done_index)
232 done_index = (*lp)->l_idx - 1;
236 ++lp;
239 /* And the same for relocation dependencies. */
240 if (l->l_reldeps != NULL)
241 for (unsigned int j = 0; j < l->l_reldeps->act; ++j)
243 struct link_map *jmap = l->l_reldeps->list[j];
245 if (jmap->l_idx != IDX_STILL_USED)
247 assert (jmap->l_idx >= 0 && jmap->l_idx < nloaded);
249 if (!jmap->l_map_used)
251 jmap->l_map_used = 1;
252 if (jmap->l_idx - 1 < done_index)
253 done_index = jmap->l_idx - 1;
259 /* Sort the entries. We can skip looking for the binary itself which is
260 at the front of the search list for the main namespace. */
261 _dl_sort_maps (maps, nloaded, (nsid == LM_ID_BASE), true);
263 /* Call all termination functions at once. */
264 bool unload_any = false;
265 bool scope_mem_left = false;
266 unsigned int unload_global = 0;
267 unsigned int first_loaded = ~0;
268 for (unsigned int i = 0; i < nloaded; ++i)
270 struct link_map *imap = maps[i];
272 /* All elements must be in the same namespace. */
273 assert (imap->l_ns == nsid);
275 if (!imap->l_map_used)
277 assert (imap->l_type == lt_loaded && !imap->l_nodelete_active);
279 /* Call its termination function. Do not do it for
280 half-cooked objects. Temporarily disable exception
281 handling, so that errors are fatal. */
282 if (imap->l_init_called)
284 /* When debugging print a message first. */
285 if (__builtin_expect (GLRO(dl_debug_mask) & DL_DEBUG_IMPCALLS,
287 _dl_debug_printf ("\ncalling fini: %s [%lu]\n\n",
288 imap->l_name, nsid);
290 if (imap->l_info[DT_FINI_ARRAY] != NULL
291 || imap->l_info[DT_FINI] != NULL)
292 _dl_catch_exception (NULL, call_destructors, imap);
295 #ifdef SHARED
296 /* Auditing checkpoint: we remove an object. */
297 _dl_audit_objclose (imap);
298 #endif
300 /* This object must not be used anymore. */
301 imap->l_removed = 1;
303 /* We indeed have an object to remove. */
304 unload_any = true;
306 if (imap->l_global)
307 ++unload_global;
309 /* Remember where the first dynamically loaded object is. */
310 if (i < first_loaded)
311 first_loaded = i;
313 /* Else imap->l_map_used. */
314 else if (imap->l_type == lt_loaded)
316 struct r_scope_elem *new_list = NULL;
318 if (imap->l_searchlist.r_list == NULL && imap->l_initfini != NULL)
320 /* The object is still used. But one of the objects we are
321 unloading right now is responsible for loading it. If
322 the current object does not have it's own scope yet we
323 have to create one. This has to be done before running
324 the finalizers.
326 To do this count the number of dependencies. */
327 unsigned int cnt;
328 for (cnt = 1; imap->l_initfini[cnt] != NULL; ++cnt)
331 /* We simply reuse the l_initfini list. */
332 imap->l_searchlist.r_list = &imap->l_initfini[cnt + 1];
333 imap->l_searchlist.r_nlist = cnt;
335 new_list = &imap->l_searchlist;
338 /* Count the number of scopes which remain after the unload.
339 When we add the local search list count it. Always add
340 one for the terminating NULL pointer. */
341 size_t remain = (new_list != NULL) + 1;
342 bool removed_any = false;
343 for (size_t cnt = 0; imap->l_scope[cnt] != NULL; ++cnt)
344 /* This relies on l_scope[] entries being always set either
345 to its own l_symbolic_searchlist address, or some map's
346 l_searchlist address. */
347 if (imap->l_scope[cnt] != &imap->l_symbolic_searchlist)
349 struct link_map *tmap = (struct link_map *)
350 ((char *) imap->l_scope[cnt]
351 - offsetof (struct link_map, l_searchlist));
352 assert (tmap->l_ns == nsid);
353 if (tmap->l_idx == IDX_STILL_USED)
354 ++remain;
355 else
356 removed_any = true;
358 else
359 ++remain;
361 if (removed_any)
363 /* Always allocate a new array for the scope. This is
364 necessary since we must be able to determine the last
365 user of the current array. If possible use the link map's
366 memory. */
367 size_t new_size;
368 struct r_scope_elem **newp;
370 #define SCOPE_ELEMS(imap) \
371 (sizeof (imap->l_scope_mem) / sizeof (imap->l_scope_mem[0]))
373 if (imap->l_scope != imap->l_scope_mem
374 && remain < SCOPE_ELEMS (imap))
376 new_size = SCOPE_ELEMS (imap);
377 newp = imap->l_scope_mem;
379 else
381 new_size = imap->l_scope_max;
382 newp = (struct r_scope_elem **)
383 malloc (new_size * sizeof (struct r_scope_elem *));
384 if (newp == NULL)
385 _dl_signal_error (ENOMEM, "dlclose", NULL,
386 N_("cannot create scope list"));
389 /* Copy over the remaining scope elements. */
390 remain = 0;
391 for (size_t cnt = 0; imap->l_scope[cnt] != NULL; ++cnt)
393 if (imap->l_scope[cnt] != &imap->l_symbolic_searchlist)
395 struct link_map *tmap = (struct link_map *)
396 ((char *) imap->l_scope[cnt]
397 - offsetof (struct link_map, l_searchlist));
398 if (tmap->l_idx != IDX_STILL_USED)
400 /* Remove the scope. Or replace with own map's
401 scope. */
402 if (new_list != NULL)
404 newp[remain++] = new_list;
405 new_list = NULL;
407 continue;
411 newp[remain++] = imap->l_scope[cnt];
413 newp[remain] = NULL;
415 struct r_scope_elem **old = imap->l_scope;
417 imap->l_scope = newp;
419 /* No user anymore, we can free it now. */
420 if (old != imap->l_scope_mem)
422 if (_dl_scope_free (old))
423 /* If _dl_scope_free used THREAD_GSCOPE_WAIT (),
424 no need to repeat it. */
425 scope_mem_left = false;
427 else
428 scope_mem_left = true;
430 imap->l_scope_max = new_size;
432 else if (new_list != NULL)
434 /* We didn't change the scope array, so reset the search
435 list. */
436 imap->l_searchlist.r_list = NULL;
437 imap->l_searchlist.r_nlist = 0;
440 /* The loader is gone, so mark the object as not having one.
441 Note: l_idx != IDX_STILL_USED -> object will be removed. */
442 if (imap->l_loader != NULL
443 && imap->l_loader->l_idx != IDX_STILL_USED)
444 imap->l_loader = NULL;
446 /* Remember where the first dynamically loaded object is. */
447 if (i < first_loaded)
448 first_loaded = i;
452 /* If there are no objects to unload, do nothing further. */
453 if (!unload_any)
454 goto out;
456 #ifdef SHARED
457 /* Auditing checkpoint: we will start deleting objects. */
458 _dl_audit_activity_nsid (nsid, LA_ACT_DELETE);
459 #endif
461 /* Notify the debugger we are about to remove some loaded objects. */
462 struct r_debug *r = _dl_debug_update (nsid);
463 r->r_state = RT_DELETE;
464 _dl_debug_state ();
465 LIBC_PROBE (unmap_start, 2, nsid, r);
467 if (unload_global)
469 /* Some objects are in the global scope list. Remove them. */
470 struct r_scope_elem *ns_msl = ns->_ns_main_searchlist;
471 unsigned int i;
472 unsigned int j = 0;
473 unsigned int cnt = ns_msl->r_nlist;
475 while (cnt > 0 && ns_msl->r_list[cnt - 1]->l_removed)
476 --cnt;
478 if (cnt + unload_global == ns_msl->r_nlist)
479 /* Speed up removing most recently added objects. */
480 j = cnt;
481 else
482 for (i = 0; i < cnt; i++)
483 if (ns_msl->r_list[i]->l_removed == 0)
485 if (i != j)
486 ns_msl->r_list[j] = ns_msl->r_list[i];
487 j++;
489 ns_msl->r_nlist = j;
492 if (!RTLD_SINGLE_THREAD_P
493 && (unload_global
494 || scope_mem_left
495 || (GL(dl_scope_free_list) != NULL
496 && GL(dl_scope_free_list)->count)))
498 THREAD_GSCOPE_WAIT ();
500 /* Now we can free any queued old scopes. */
501 struct dl_scope_free_list *fsl = GL(dl_scope_free_list);
502 if (fsl != NULL)
503 while (fsl->count > 0)
504 free (fsl->list[--fsl->count]);
507 size_t tls_free_start;
508 size_t tls_free_end;
509 tls_free_start = tls_free_end = NO_TLS_OFFSET;
511 /* Protects global and module specitic TLS state. */
512 __rtld_lock_lock_recursive (GL(dl_load_tls_lock));
514 /* We modify the list of loaded objects. */
515 __rtld_lock_lock_recursive (GL(dl_load_write_lock));
517 /* Check each element of the search list to see if all references to
518 it are gone. */
519 for (unsigned int i = first_loaded; i < nloaded; ++i)
521 struct link_map *imap = maps[i];
522 if (!imap->l_map_used)
524 assert (imap->l_type == lt_loaded);
526 /* That was the last reference, and this was a dlopen-loaded
527 object. We can unmap it. */
529 /* Remove the object from the dtv slotinfo array if it uses TLS. */
530 if (__glibc_unlikely (imap->l_tls_blocksize > 0))
532 any_tls = true;
534 if (GL(dl_tls_dtv_slotinfo_list) != NULL
535 && ! remove_slotinfo (imap->l_tls_modid,
536 GL(dl_tls_dtv_slotinfo_list), 0,
537 imap->l_init_called))
538 /* All dynamically loaded modules with TLS are unloaded. */
539 /* Can be read concurrently. */
540 atomic_store_relaxed (&GL(dl_tls_max_dtv_idx),
541 GL(dl_tls_static_nelem));
543 if (imap->l_tls_offset != NO_TLS_OFFSET
544 && imap->l_tls_offset != FORCED_DYNAMIC_TLS_OFFSET)
546 /* Collect a contiguous chunk built from the objects in
547 this search list, going in either direction. When the
548 whole chunk is at the end of the used area then we can
549 reclaim it. */
550 #if TLS_TCB_AT_TP
551 if (tls_free_start == NO_TLS_OFFSET
552 || (size_t) imap->l_tls_offset == tls_free_start)
554 /* Extend the contiguous chunk being reclaimed. */
555 tls_free_start
556 = imap->l_tls_offset - imap->l_tls_blocksize;
558 if (tls_free_end == NO_TLS_OFFSET)
559 tls_free_end = imap->l_tls_offset;
561 else if (imap->l_tls_offset - imap->l_tls_blocksize
562 == tls_free_end)
563 /* Extend the chunk backwards. */
564 tls_free_end = imap->l_tls_offset;
565 else
567 /* This isn't contiguous with the last chunk freed.
568 One of them will be leaked unless we can free
569 one block right away. */
570 if (tls_free_end == GL(dl_tls_static_used))
572 GL(dl_tls_static_used) = tls_free_start;
573 tls_free_end = imap->l_tls_offset;
574 tls_free_start
575 = tls_free_end - imap->l_tls_blocksize;
577 else if ((size_t) imap->l_tls_offset
578 == GL(dl_tls_static_used))
579 GL(dl_tls_static_used)
580 = imap->l_tls_offset - imap->l_tls_blocksize;
581 else if (tls_free_end < (size_t) imap->l_tls_offset)
583 /* We pick the later block. It has a chance to
584 be freed. */
585 tls_free_end = imap->l_tls_offset;
586 tls_free_start
587 = tls_free_end - imap->l_tls_blocksize;
590 #elif TLS_DTV_AT_TP
591 if (tls_free_start == NO_TLS_OFFSET)
593 tls_free_start = imap->l_tls_firstbyte_offset;
594 tls_free_end = (imap->l_tls_offset
595 + imap->l_tls_blocksize);
597 else if (imap->l_tls_firstbyte_offset == tls_free_end)
598 /* Extend the contiguous chunk being reclaimed. */
599 tls_free_end = imap->l_tls_offset + imap->l_tls_blocksize;
600 else if (imap->l_tls_offset + imap->l_tls_blocksize
601 == tls_free_start)
602 /* Extend the chunk backwards. */
603 tls_free_start = imap->l_tls_firstbyte_offset;
604 /* This isn't contiguous with the last chunk freed.
605 One of them will be leaked unless we can free
606 one block right away. */
607 else if (imap->l_tls_offset + imap->l_tls_blocksize
608 == GL(dl_tls_static_used))
609 GL(dl_tls_static_used) = imap->l_tls_firstbyte_offset;
610 else if (tls_free_end == GL(dl_tls_static_used))
612 GL(dl_tls_static_used) = tls_free_start;
613 tls_free_start = imap->l_tls_firstbyte_offset;
614 tls_free_end = imap->l_tls_offset + imap->l_tls_blocksize;
616 else if (tls_free_end < imap->l_tls_firstbyte_offset)
618 /* We pick the later block. It has a chance to
619 be freed. */
620 tls_free_start = imap->l_tls_firstbyte_offset;
621 tls_free_end = imap->l_tls_offset + imap->l_tls_blocksize;
623 #else
624 # error "Either TLS_TCB_AT_TP or TLS_DTV_AT_TP must be defined"
625 #endif
629 /* Reset unique symbols if forced. */
630 if (force)
632 struct unique_sym_table *tab = &ns->_ns_unique_sym_table;
633 __rtld_lock_lock_recursive (tab->lock);
634 struct unique_sym *entries = tab->entries;
635 if (entries != NULL)
637 size_t idx, size = tab->size;
638 for (idx = 0; idx < size; ++idx)
640 /* Clear unique symbol entries that belong to this
641 object. */
642 if (entries[idx].name != NULL
643 && entries[idx].map == imap)
645 entries[idx].name = NULL;
646 entries[idx].hashval = 0;
647 tab->n_elements--;
651 __rtld_lock_unlock_recursive (tab->lock);
654 /* We can unmap all the maps at once. We determined the
655 start address and length when we loaded the object and
656 the `munmap' call does the rest. */
657 DL_UNMAP (imap);
659 /* Finally, unlink the data structure and free it. */
660 #if DL_NNS == 1
661 /* The assert in the (imap->l_prev == NULL) case gives
662 the compiler license to warn that NS points outside
663 the dl_ns array bounds in that case (as nsid != LM_ID_BASE
664 is tantamount to nsid >= DL_NNS). That should be impossible
665 in this configuration, so just assert about it instead. */
666 assert (nsid == LM_ID_BASE);
667 assert (imap->l_prev != NULL);
668 #else
669 if (imap->l_prev == NULL)
671 assert (nsid != LM_ID_BASE);
672 ns->_ns_loaded = imap->l_next;
674 /* Update the pointer to the head of the list
675 we leave for debuggers to examine. */
676 r->r_map = (void *) ns->_ns_loaded;
678 else
679 #endif
680 imap->l_prev->l_next = imap->l_next;
682 --ns->_ns_nloaded;
683 if (imap->l_next != NULL)
684 imap->l_next->l_prev = imap->l_prev;
686 /* Update the data used by _dl_find_object. */
687 _dl_find_object_dlclose (imap);
689 free (imap->l_versions);
690 if (imap->l_origin != (char *) -1)
691 free ((char *) imap->l_origin);
693 free (imap->l_reldeps);
695 /* Print debugging message. */
696 if (__glibc_unlikely (GLRO(dl_debug_mask) & DL_DEBUG_FILES))
697 _dl_debug_printf ("\nfile=%s [%lu]; destroying link map\n",
698 imap->l_name, imap->l_ns);
700 /* This name always is allocated. */
701 free (imap->l_name);
702 /* Remove the list with all the names of the shared object. */
704 struct libname_list *lnp = imap->l_libname;
707 struct libname_list *this = lnp;
708 lnp = lnp->next;
709 if (!this->dont_free)
710 free (this);
712 while (lnp != NULL);
714 /* Remove the searchlists. */
715 free (imap->l_initfini);
717 /* Remove the scope array if we allocated it. */
718 if (imap->l_scope != imap->l_scope_mem)
719 free (imap->l_scope);
721 if (imap->l_phdr_allocated)
722 free ((void *) imap->l_phdr);
724 if (imap->l_rpath_dirs.dirs != (void *) -1)
725 free (imap->l_rpath_dirs.dirs);
726 if (imap->l_runpath_dirs.dirs != (void *) -1)
727 free (imap->l_runpath_dirs.dirs);
729 /* Clear GL(dl_initfirst) when freeing its link_map memory. */
730 if (imap == GL(dl_initfirst))
731 GL(dl_initfirst) = NULL;
733 free (imap);
737 __rtld_lock_unlock_recursive (GL(dl_load_write_lock));
739 /* If we removed any object which uses TLS bump the generation counter. */
740 if (any_tls)
742 size_t newgen = GL(dl_tls_generation) + 1;
743 if (__glibc_unlikely (newgen == 0))
744 _dl_fatal_printf ("TLS generation counter wrapped! Please report as described in "REPORT_BUGS_TO".\n");
745 /* Can be read concurrently. */
746 atomic_store_relaxed (&GL(dl_tls_generation), newgen);
748 if (tls_free_end == GL(dl_tls_static_used))
749 GL(dl_tls_static_used) = tls_free_start;
752 /* TLS is cleaned up for the unloaded modules. */
753 __rtld_lock_unlock_recursive (GL(dl_load_tls_lock));
755 #ifdef SHARED
756 /* Auditing checkpoint: we have deleted all objects. Also, do not notify
757 auditors of the cleanup of a failed audit module loading attempt. */
758 _dl_audit_activity_nsid (nsid, LA_ACT_CONSISTENT);
759 #endif
761 if (__builtin_expect (ns->_ns_loaded == NULL, 0)
762 && nsid == GL(dl_nns) - 1)
764 --GL(dl_nns);
765 while (GL(dl_ns)[GL(dl_nns) - 1]._ns_loaded == NULL);
767 /* Notify the debugger those objects are finalized and gone. */
768 r->r_state = RT_CONSISTENT;
769 _dl_debug_state ();
770 LIBC_PROBE (unmap_complete, 2, nsid, r);
772 /* Recheck if we need to retry, release the lock. */
773 out:
774 if (dl_close_state == rerun)
775 goto retry;
777 dl_close_state = not_pending;
781 void
782 _dl_close (void *_map)
784 struct link_map *map = _map;
786 /* We must take the lock to examine the contents of map and avoid
787 concurrent dlopens. */
788 __rtld_lock_lock_recursive (GL(dl_load_lock));
790 /* At this point we are guaranteed nobody else is touching the list of
791 loaded maps, but a concurrent dlclose might have freed our map
792 before we took the lock. There is no way to detect this (see below)
793 so we proceed assuming this isn't the case. First see whether we
794 can remove the object at all. */
795 if (__glibc_unlikely (map->l_nodelete_active))
797 /* Nope. Do nothing. */
798 __rtld_lock_unlock_recursive (GL(dl_load_lock));
799 return;
802 /* At present this is an unreliable check except in the case where the
803 caller has recursively called dlclose and we are sure the link map
804 has not been freed. In a non-recursive dlclose the map itself
805 might have been freed and this access is potentially a data race
806 with whatever other use this memory might have now, or worse we
807 might silently corrupt memory if it looks enough like a link map.
808 POSIX has language in dlclose that appears to guarantee that this
809 should be a detectable case and given that dlclose should be threadsafe
810 we need this to be a reliable detection.
811 This is bug 20990. */
812 if (__builtin_expect (map->l_direct_opencount, 1) == 0)
814 __rtld_lock_unlock_recursive (GL(dl_load_lock));
815 _dl_signal_error (0, map->l_name, NULL, N_("shared object not open"));
818 _dl_close_worker (map, false);
820 __rtld_lock_unlock_recursive (GL(dl_load_lock));