AArch64: Optimize memcmp
[glibc.git] / elf / dl-close.c
blob4f5cfcc1c31a03a1968a09168f66c6efffb69f00
1 /* Close a shared object opened by `_dl_open'.
2 Copyright (C) 1996-2021 Free Software Foundation, Inc.
3 This file is part of the GNU C Library.
5 The GNU C Library is free software; you can redistribute it and/or
6 modify it under the terms of the GNU Lesser General Public
7 License as published by the Free Software Foundation; either
8 version 2.1 of the License, or (at your option) any later version.
10 The GNU C Library is distributed in the hope that it will be useful,
11 but WITHOUT ANY WARRANTY; without even the implied warranty of
12 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 Lesser General Public License for more details.
15 You should have received a copy of the GNU Lesser General Public
16 License along with the GNU C Library; if not, see
17 <https://www.gnu.org/licenses/>. */
19 #include <assert.h>
20 #include <dlfcn.h>
21 #include <errno.h>
22 #include <libintl.h>
23 #include <stddef.h>
24 #include <stdio.h>
25 #include <stdlib.h>
26 #include <string.h>
27 #include <unistd.h>
28 #include <libc-lock.h>
29 #include <ldsodefs.h>
30 #include <sys/types.h>
31 #include <sys/mman.h>
32 #include <sysdep-cancel.h>
33 #include <tls.h>
34 #include <stap-probe.h>
36 #include <dl-unmap-segments.h>
39 /* Type of the constructor functions. */
40 typedef void (*fini_t) (void);
43 /* Special l_idx value used to indicate which objects remain loaded. */
44 #define IDX_STILL_USED -1
47 /* Returns true we an non-empty was found. */
48 static bool
49 remove_slotinfo (size_t idx, struct dtv_slotinfo_list *listp, size_t disp,
50 bool should_be_there)
52 if (idx - disp >= listp->len)
54 if (listp->next == NULL)
56 /* The index is not actually valid in the slotinfo list,
57 because this object was closed before it was fully set
58 up due to some error. */
59 assert (! should_be_there);
61 else
63 if (remove_slotinfo (idx, listp->next, disp + listp->len,
64 should_be_there))
65 return true;
67 /* No non-empty entry. Search from the end of this element's
68 slotinfo array. */
69 idx = disp + listp->len;
72 else
74 struct link_map *old_map = listp->slotinfo[idx - disp].map;
76 /* The entry might still be in its unused state if we are closing an
77 object that wasn't fully set up. */
78 if (__glibc_likely (old_map != NULL))
80 /* Mark the entry as unused. These can be read concurrently. */
81 atomic_store_relaxed (&listp->slotinfo[idx - disp].gen,
82 GL(dl_tls_generation) + 1);
83 atomic_store_relaxed (&listp->slotinfo[idx - disp].map, NULL);
86 /* If this is not the last currently used entry no need to look
87 further. */
88 if (idx != GL(dl_tls_max_dtv_idx))
90 /* There is an unused dtv entry in the middle. */
91 GL(dl_tls_dtv_gaps) = true;
92 return true;
96 while (idx - disp > (disp == 0 ? 1 + GL(dl_tls_static_nelem) : 0))
98 --idx;
100 if (listp->slotinfo[idx - disp].map != NULL)
102 /* Found a new last used index. This can be read concurrently. */
103 atomic_store_relaxed (&GL(dl_tls_max_dtv_idx), idx);
104 return true;
108 /* No non-entry in this list element. */
109 return false;
112 /* Invoke dstructors for CLOSURE (a struct link_map *). Called with
113 exception handling temporarily disabled, to make errors fatal. */
114 static void
115 call_destructors (void *closure)
117 struct link_map *map = closure;
119 if (map->l_info[DT_FINI_ARRAY] != NULL)
121 ElfW(Addr) *array =
122 (ElfW(Addr) *) (map->l_addr
123 + map->l_info[DT_FINI_ARRAY]->d_un.d_ptr);
124 unsigned int sz = (map->l_info[DT_FINI_ARRAYSZ]->d_un.d_val
125 / sizeof (ElfW(Addr)));
127 while (sz-- > 0)
128 ((fini_t) array[sz]) ();
131 /* Next try the old-style destructor. */
132 if (map->l_info[DT_FINI] != NULL)
133 DL_CALL_DT_FINI (map, ((void *) map->l_addr
134 + map->l_info[DT_FINI]->d_un.d_ptr));
137 void
138 _dl_close_worker (struct link_map *map, bool force)
140 /* One less direct use. */
141 --map->l_direct_opencount;
143 /* If _dl_close is called recursively (some destructor call dlclose),
144 just record that the parent _dl_close will need to do garbage collection
145 again and return. */
146 static enum { not_pending, pending, rerun } dl_close_state;
148 if (map->l_direct_opencount > 0 || map->l_type != lt_loaded
149 || dl_close_state != not_pending)
151 if (map->l_direct_opencount == 0 && map->l_type == lt_loaded)
152 dl_close_state = rerun;
154 /* There are still references to this object. Do nothing more. */
155 if (__glibc_unlikely (GLRO(dl_debug_mask) & DL_DEBUG_FILES))
156 _dl_debug_printf ("\nclosing file=%s; direct_opencount=%u\n",
157 map->l_name, map->l_direct_opencount);
159 return;
162 Lmid_t nsid = map->l_ns;
163 struct link_namespaces *ns = &GL(dl_ns)[nsid];
165 retry:
166 dl_close_state = pending;
168 bool any_tls = false;
169 const unsigned int nloaded = ns->_ns_nloaded;
170 struct link_map *maps[nloaded];
172 /* Run over the list and assign indexes to the link maps and enter
173 them into the MAPS array. */
174 int idx = 0;
175 for (struct link_map *l = ns->_ns_loaded; l != NULL; l = l->l_next)
177 l->l_map_used = 0;
178 l->l_map_done = 0;
179 l->l_idx = idx;
180 maps[idx] = l;
181 ++idx;
183 assert (idx == nloaded);
185 /* Keep track of the lowest index link map we have covered already. */
186 int done_index = -1;
187 while (++done_index < nloaded)
189 struct link_map *l = maps[done_index];
191 if (l->l_map_done)
192 /* Already handled. */
193 continue;
195 /* Check whether this object is still used. */
196 if (l->l_type == lt_loaded
197 && l->l_direct_opencount == 0
198 && !l->l_nodelete_active
199 /* See CONCURRENCY NOTES in cxa_thread_atexit_impl.c to know why
200 acquire is sufficient and correct. */
201 && atomic_load_acquire (&l->l_tls_dtor_count) == 0
202 && !l->l_map_used)
203 continue;
205 /* We need this object and we handle it now. */
206 l->l_map_used = 1;
207 l->l_map_done = 1;
208 /* Signal the object is still needed. */
209 l->l_idx = IDX_STILL_USED;
211 /* Mark all dependencies as used. */
212 if (l->l_initfini != NULL)
214 /* We are always the zeroth entry, and since we don't include
215 ourselves in the dependency analysis start at 1. */
216 struct link_map **lp = &l->l_initfini[1];
217 while (*lp != NULL)
219 if ((*lp)->l_idx != IDX_STILL_USED)
221 assert ((*lp)->l_idx >= 0 && (*lp)->l_idx < nloaded);
223 if (!(*lp)->l_map_used)
225 (*lp)->l_map_used = 1;
226 /* If we marked a new object as used, and we've
227 already processed it, then we need to go back
228 and process again from that point forward to
229 ensure we keep all of its dependencies also. */
230 if ((*lp)->l_idx - 1 < done_index)
231 done_index = (*lp)->l_idx - 1;
235 ++lp;
238 /* And the same for relocation dependencies. */
239 if (l->l_reldeps != NULL)
240 for (unsigned int j = 0; j < l->l_reldeps->act; ++j)
242 struct link_map *jmap = l->l_reldeps->list[j];
244 if (jmap->l_idx != IDX_STILL_USED)
246 assert (jmap->l_idx >= 0 && jmap->l_idx < nloaded);
248 if (!jmap->l_map_used)
250 jmap->l_map_used = 1;
251 if (jmap->l_idx - 1 < done_index)
252 done_index = jmap->l_idx - 1;
258 /* Sort the entries. We can skip looking for the binary itself which is
259 at the front of the search list for the main namespace. */
260 _dl_sort_maps (maps, nloaded, (nsid == LM_ID_BASE), true);
262 /* Call all termination functions at once. */
263 #ifdef SHARED
264 bool do_audit = GLRO(dl_naudit) > 0 && !ns->_ns_loaded->l_auditing;
265 #endif
266 bool unload_any = false;
267 bool scope_mem_left = false;
268 unsigned int unload_global = 0;
269 unsigned int first_loaded = ~0;
270 for (unsigned int i = 0; i < nloaded; ++i)
272 struct link_map *imap = maps[i];
274 /* All elements must be in the same namespace. */
275 assert (imap->l_ns == nsid);
277 if (!imap->l_map_used)
279 assert (imap->l_type == lt_loaded && !imap->l_nodelete_active);
281 /* Call its termination function. Do not do it for
282 half-cooked objects. Temporarily disable exception
283 handling, so that errors are fatal. */
284 if (imap->l_init_called)
286 /* When debugging print a message first. */
287 if (__builtin_expect (GLRO(dl_debug_mask) & DL_DEBUG_IMPCALLS,
289 _dl_debug_printf ("\ncalling fini: %s [%lu]\n\n",
290 imap->l_name, nsid);
292 if (imap->l_info[DT_FINI_ARRAY] != NULL
293 || imap->l_info[DT_FINI] != NULL)
294 _dl_catch_exception (NULL, call_destructors, imap);
297 #ifdef SHARED
298 /* Auditing checkpoint: we remove an object. */
299 if (__glibc_unlikely (do_audit))
301 struct audit_ifaces *afct = GLRO(dl_audit);
302 for (unsigned int cnt = 0; cnt < GLRO(dl_naudit); ++cnt)
304 if (afct->objclose != NULL)
306 struct auditstate *state
307 = link_map_audit_state (imap, cnt);
308 /* Return value is ignored. */
309 (void) afct->objclose (&state->cookie);
312 afct = afct->next;
315 #endif
317 /* This object must not be used anymore. */
318 imap->l_removed = 1;
320 /* We indeed have an object to remove. */
321 unload_any = true;
323 if (imap->l_global)
324 ++unload_global;
326 /* Remember where the first dynamically loaded object is. */
327 if (i < first_loaded)
328 first_loaded = i;
330 /* Else imap->l_map_used. */
331 else if (imap->l_type == lt_loaded)
333 struct r_scope_elem *new_list = NULL;
335 if (imap->l_searchlist.r_list == NULL && imap->l_initfini != NULL)
337 /* The object is still used. But one of the objects we are
338 unloading right now is responsible for loading it. If
339 the current object does not have it's own scope yet we
340 have to create one. This has to be done before running
341 the finalizers.
343 To do this count the number of dependencies. */
344 unsigned int cnt;
345 for (cnt = 1; imap->l_initfini[cnt] != NULL; ++cnt)
348 /* We simply reuse the l_initfini list. */
349 imap->l_searchlist.r_list = &imap->l_initfini[cnt + 1];
350 imap->l_searchlist.r_nlist = cnt;
352 new_list = &imap->l_searchlist;
355 /* Count the number of scopes which remain after the unload.
356 When we add the local search list count it. Always add
357 one for the terminating NULL pointer. */
358 size_t remain = (new_list != NULL) + 1;
359 bool removed_any = false;
360 for (size_t cnt = 0; imap->l_scope[cnt] != NULL; ++cnt)
361 /* This relies on l_scope[] entries being always set either
362 to its own l_symbolic_searchlist address, or some map's
363 l_searchlist address. */
364 if (imap->l_scope[cnt] != &imap->l_symbolic_searchlist)
366 struct link_map *tmap = (struct link_map *)
367 ((char *) imap->l_scope[cnt]
368 - offsetof (struct link_map, l_searchlist));
369 assert (tmap->l_ns == nsid);
370 if (tmap->l_idx == IDX_STILL_USED)
371 ++remain;
372 else
373 removed_any = true;
375 else
376 ++remain;
378 if (removed_any)
380 /* Always allocate a new array for the scope. This is
381 necessary since we must be able to determine the last
382 user of the current array. If possible use the link map's
383 memory. */
384 size_t new_size;
385 struct r_scope_elem **newp;
387 #define SCOPE_ELEMS(imap) \
388 (sizeof (imap->l_scope_mem) / sizeof (imap->l_scope_mem[0]))
390 if (imap->l_scope != imap->l_scope_mem
391 && remain < SCOPE_ELEMS (imap))
393 new_size = SCOPE_ELEMS (imap);
394 newp = imap->l_scope_mem;
396 else
398 new_size = imap->l_scope_max;
399 newp = (struct r_scope_elem **)
400 malloc (new_size * sizeof (struct r_scope_elem *));
401 if (newp == NULL)
402 _dl_signal_error (ENOMEM, "dlclose", NULL,
403 N_("cannot create scope list"));
406 /* Copy over the remaining scope elements. */
407 remain = 0;
408 for (size_t cnt = 0; imap->l_scope[cnt] != NULL; ++cnt)
410 if (imap->l_scope[cnt] != &imap->l_symbolic_searchlist)
412 struct link_map *tmap = (struct link_map *)
413 ((char *) imap->l_scope[cnt]
414 - offsetof (struct link_map, l_searchlist));
415 if (tmap->l_idx != IDX_STILL_USED)
417 /* Remove the scope. Or replace with own map's
418 scope. */
419 if (new_list != NULL)
421 newp[remain++] = new_list;
422 new_list = NULL;
424 continue;
428 newp[remain++] = imap->l_scope[cnt];
430 newp[remain] = NULL;
432 struct r_scope_elem **old = imap->l_scope;
434 imap->l_scope = newp;
436 /* No user anymore, we can free it now. */
437 if (old != imap->l_scope_mem)
439 if (_dl_scope_free (old))
440 /* If _dl_scope_free used THREAD_GSCOPE_WAIT (),
441 no need to repeat it. */
442 scope_mem_left = false;
444 else
445 scope_mem_left = true;
447 imap->l_scope_max = new_size;
449 else if (new_list != NULL)
451 /* We didn't change the scope array, so reset the search
452 list. */
453 imap->l_searchlist.r_list = NULL;
454 imap->l_searchlist.r_nlist = 0;
457 /* The loader is gone, so mark the object as not having one.
458 Note: l_idx != IDX_STILL_USED -> object will be removed. */
459 if (imap->l_loader != NULL
460 && imap->l_loader->l_idx != IDX_STILL_USED)
461 imap->l_loader = NULL;
463 /* Remember where the first dynamically loaded object is. */
464 if (i < first_loaded)
465 first_loaded = i;
469 /* If there are no objects to unload, do nothing further. */
470 if (!unload_any)
471 goto out;
473 #ifdef SHARED
474 /* Auditing checkpoint: we will start deleting objects. */
475 if (__glibc_unlikely (do_audit))
477 struct link_map *head = ns->_ns_loaded;
478 struct audit_ifaces *afct = GLRO(dl_audit);
479 /* Do not call the functions for any auditing object. */
480 if (head->l_auditing == 0)
482 for (unsigned int cnt = 0; cnt < GLRO(dl_naudit); ++cnt)
484 if (afct->activity != NULL)
486 struct auditstate *state = link_map_audit_state (head, cnt);
487 afct->activity (&state->cookie, LA_ACT_DELETE);
490 afct = afct->next;
494 #endif
496 /* Notify the debugger we are about to remove some loaded objects. */
497 struct r_debug *r = _dl_debug_update (nsid);
498 r->r_state = RT_DELETE;
499 _dl_debug_state ();
500 LIBC_PROBE (unmap_start, 2, nsid, r);
502 if (unload_global)
504 /* Some objects are in the global scope list. Remove them. */
505 struct r_scope_elem *ns_msl = ns->_ns_main_searchlist;
506 unsigned int i;
507 unsigned int j = 0;
508 unsigned int cnt = ns_msl->r_nlist;
510 while (cnt > 0 && ns_msl->r_list[cnt - 1]->l_removed)
511 --cnt;
513 if (cnt + unload_global == ns_msl->r_nlist)
514 /* Speed up removing most recently added objects. */
515 j = cnt;
516 else
517 for (i = 0; i < cnt; i++)
518 if (ns_msl->r_list[i]->l_removed == 0)
520 if (i != j)
521 ns_msl->r_list[j] = ns_msl->r_list[i];
522 j++;
524 ns_msl->r_nlist = j;
527 if (!RTLD_SINGLE_THREAD_P
528 && (unload_global
529 || scope_mem_left
530 || (GL(dl_scope_free_list) != NULL
531 && GL(dl_scope_free_list)->count)))
533 THREAD_GSCOPE_WAIT ();
535 /* Now we can free any queued old scopes. */
536 struct dl_scope_free_list *fsl = GL(dl_scope_free_list);
537 if (fsl != NULL)
538 while (fsl->count > 0)
539 free (fsl->list[--fsl->count]);
542 size_t tls_free_start;
543 size_t tls_free_end;
544 tls_free_start = tls_free_end = NO_TLS_OFFSET;
546 /* Protects global and module specitic TLS state. */
547 __rtld_lock_lock_recursive (GL(dl_load_tls_lock));
549 /* We modify the list of loaded objects. */
550 __rtld_lock_lock_recursive (GL(dl_load_write_lock));
552 /* Check each element of the search list to see if all references to
553 it are gone. */
554 for (unsigned int i = first_loaded; i < nloaded; ++i)
556 struct link_map *imap = maps[i];
557 if (!imap->l_map_used)
559 assert (imap->l_type == lt_loaded);
561 /* That was the last reference, and this was a dlopen-loaded
562 object. We can unmap it. */
564 /* Remove the object from the dtv slotinfo array if it uses TLS. */
565 if (__glibc_unlikely (imap->l_tls_blocksize > 0))
567 any_tls = true;
569 if (GL(dl_tls_dtv_slotinfo_list) != NULL
570 && ! remove_slotinfo (imap->l_tls_modid,
571 GL(dl_tls_dtv_slotinfo_list), 0,
572 imap->l_init_called))
573 /* All dynamically loaded modules with TLS are unloaded. */
574 /* Can be read concurrently. */
575 atomic_store_relaxed (&GL(dl_tls_max_dtv_idx),
576 GL(dl_tls_static_nelem));
578 if (imap->l_tls_offset != NO_TLS_OFFSET
579 && imap->l_tls_offset != FORCED_DYNAMIC_TLS_OFFSET)
581 /* Collect a contiguous chunk built from the objects in
582 this search list, going in either direction. When the
583 whole chunk is at the end of the used area then we can
584 reclaim it. */
585 #if TLS_TCB_AT_TP
586 if (tls_free_start == NO_TLS_OFFSET
587 || (size_t) imap->l_tls_offset == tls_free_start)
589 /* Extend the contiguous chunk being reclaimed. */
590 tls_free_start
591 = imap->l_tls_offset - imap->l_tls_blocksize;
593 if (tls_free_end == NO_TLS_OFFSET)
594 tls_free_end = imap->l_tls_offset;
596 else if (imap->l_tls_offset - imap->l_tls_blocksize
597 == tls_free_end)
598 /* Extend the chunk backwards. */
599 tls_free_end = imap->l_tls_offset;
600 else
602 /* This isn't contiguous with the last chunk freed.
603 One of them will be leaked unless we can free
604 one block right away. */
605 if (tls_free_end == GL(dl_tls_static_used))
607 GL(dl_tls_static_used) = tls_free_start;
608 tls_free_end = imap->l_tls_offset;
609 tls_free_start
610 = tls_free_end - imap->l_tls_blocksize;
612 else if ((size_t) imap->l_tls_offset
613 == GL(dl_tls_static_used))
614 GL(dl_tls_static_used)
615 = imap->l_tls_offset - imap->l_tls_blocksize;
616 else if (tls_free_end < (size_t) imap->l_tls_offset)
618 /* We pick the later block. It has a chance to
619 be freed. */
620 tls_free_end = imap->l_tls_offset;
621 tls_free_start
622 = tls_free_end - imap->l_tls_blocksize;
625 #elif TLS_DTV_AT_TP
626 if (tls_free_start == NO_TLS_OFFSET)
628 tls_free_start = imap->l_tls_firstbyte_offset;
629 tls_free_end = (imap->l_tls_offset
630 + imap->l_tls_blocksize);
632 else if (imap->l_tls_firstbyte_offset == tls_free_end)
633 /* Extend the contiguous chunk being reclaimed. */
634 tls_free_end = imap->l_tls_offset + imap->l_tls_blocksize;
635 else if (imap->l_tls_offset + imap->l_tls_blocksize
636 == tls_free_start)
637 /* Extend the chunk backwards. */
638 tls_free_start = imap->l_tls_firstbyte_offset;
639 /* This isn't contiguous with the last chunk freed.
640 One of them will be leaked unless we can free
641 one block right away. */
642 else if (imap->l_tls_offset + imap->l_tls_blocksize
643 == GL(dl_tls_static_used))
644 GL(dl_tls_static_used) = imap->l_tls_firstbyte_offset;
645 else if (tls_free_end == GL(dl_tls_static_used))
647 GL(dl_tls_static_used) = tls_free_start;
648 tls_free_start = imap->l_tls_firstbyte_offset;
649 tls_free_end = imap->l_tls_offset + imap->l_tls_blocksize;
651 else if (tls_free_end < imap->l_tls_firstbyte_offset)
653 /* We pick the later block. It has a chance to
654 be freed. */
655 tls_free_start = imap->l_tls_firstbyte_offset;
656 tls_free_end = imap->l_tls_offset + imap->l_tls_blocksize;
658 #else
659 # error "Either TLS_TCB_AT_TP or TLS_DTV_AT_TP must be defined"
660 #endif
664 /* Reset unique symbols if forced. */
665 if (force)
667 struct unique_sym_table *tab = &ns->_ns_unique_sym_table;
668 __rtld_lock_lock_recursive (tab->lock);
669 struct unique_sym *entries = tab->entries;
670 if (entries != NULL)
672 size_t idx, size = tab->size;
673 for (idx = 0; idx < size; ++idx)
675 /* Clear unique symbol entries that belong to this
676 object. */
677 if (entries[idx].name != NULL
678 && entries[idx].map == imap)
680 entries[idx].name = NULL;
681 entries[idx].hashval = 0;
682 tab->n_elements--;
686 __rtld_lock_unlock_recursive (tab->lock);
689 /* We can unmap all the maps at once. We determined the
690 start address and length when we loaded the object and
691 the `munmap' call does the rest. */
692 DL_UNMAP (imap);
694 /* Finally, unlink the data structure and free it. */
695 #if DL_NNS == 1
696 /* The assert in the (imap->l_prev == NULL) case gives
697 the compiler license to warn that NS points outside
698 the dl_ns array bounds in that case (as nsid != LM_ID_BASE
699 is tantamount to nsid >= DL_NNS). That should be impossible
700 in this configuration, so just assert about it instead. */
701 assert (nsid == LM_ID_BASE);
702 assert (imap->l_prev != NULL);
703 #else
704 if (imap->l_prev == NULL)
706 assert (nsid != LM_ID_BASE);
707 ns->_ns_loaded = imap->l_next;
709 /* Update the pointer to the head of the list
710 we leave for debuggers to examine. */
711 r->r_map = (void *) ns->_ns_loaded;
713 else
714 #endif
715 imap->l_prev->l_next = imap->l_next;
717 --ns->_ns_nloaded;
718 if (imap->l_next != NULL)
719 imap->l_next->l_prev = imap->l_prev;
721 free (imap->l_versions);
722 if (imap->l_origin != (char *) -1)
723 free ((char *) imap->l_origin);
725 free (imap->l_reldeps);
727 /* Print debugging message. */
728 if (__glibc_unlikely (GLRO(dl_debug_mask) & DL_DEBUG_FILES))
729 _dl_debug_printf ("\nfile=%s [%lu]; destroying link map\n",
730 imap->l_name, imap->l_ns);
732 /* This name always is allocated. */
733 free (imap->l_name);
734 /* Remove the list with all the names of the shared object. */
736 struct libname_list *lnp = imap->l_libname;
739 struct libname_list *this = lnp;
740 lnp = lnp->next;
741 if (!this->dont_free)
742 free (this);
744 while (lnp != NULL);
746 /* Remove the searchlists. */
747 free (imap->l_initfini);
749 /* Remove the scope array if we allocated it. */
750 if (imap->l_scope != imap->l_scope_mem)
751 free (imap->l_scope);
753 if (imap->l_phdr_allocated)
754 free ((void *) imap->l_phdr);
756 if (imap->l_rpath_dirs.dirs != (void *) -1)
757 free (imap->l_rpath_dirs.dirs);
758 if (imap->l_runpath_dirs.dirs != (void *) -1)
759 free (imap->l_runpath_dirs.dirs);
761 /* Clear GL(dl_initfirst) when freeing its link_map memory. */
762 if (imap == GL(dl_initfirst))
763 GL(dl_initfirst) = NULL;
765 free (imap);
769 __rtld_lock_unlock_recursive (GL(dl_load_write_lock));
771 /* If we removed any object which uses TLS bump the generation counter. */
772 if (any_tls)
774 size_t newgen = GL(dl_tls_generation) + 1;
775 if (__glibc_unlikely (newgen == 0))
776 _dl_fatal_printf ("TLS generation counter wrapped! Please report as described in "REPORT_BUGS_TO".\n");
777 /* Can be read concurrently. */
778 atomic_store_relaxed (&GL(dl_tls_generation), newgen);
780 if (tls_free_end == GL(dl_tls_static_used))
781 GL(dl_tls_static_used) = tls_free_start;
784 /* TLS is cleaned up for the unloaded modules. */
785 __rtld_lock_unlock_recursive (GL(dl_load_tls_lock));
787 #ifdef SHARED
788 /* Auditing checkpoint: we have deleted all objects. */
789 if (__glibc_unlikely (do_audit))
791 struct link_map *head = ns->_ns_loaded;
792 /* If head is NULL, the namespace has become empty, and the
793 audit interface does not give us a way to signal
794 LA_ACT_CONSISTENT for it because the first loaded module is
795 used to identify the namespace.
797 Furthermore, do not notify auditors of the cleanup of a
798 failed audit module loading attempt. */
799 if (head != NULL && head->l_auditing == 0)
801 struct audit_ifaces *afct = GLRO(dl_audit);
802 for (unsigned int cnt = 0; cnt < GLRO(dl_naudit); ++cnt)
804 if (afct->activity != NULL)
806 struct auditstate *state = link_map_audit_state (head, cnt);
807 afct->activity (&state->cookie, LA_ACT_CONSISTENT);
810 afct = afct->next;
814 #endif
816 if (__builtin_expect (ns->_ns_loaded == NULL, 0)
817 && nsid == GL(dl_nns) - 1)
819 --GL(dl_nns);
820 while (GL(dl_ns)[GL(dl_nns) - 1]._ns_loaded == NULL);
822 /* Notify the debugger those objects are finalized and gone. */
823 r->r_state = RT_CONSISTENT;
824 _dl_debug_state ();
825 LIBC_PROBE (unmap_complete, 2, nsid, r);
827 /* Recheck if we need to retry, release the lock. */
828 out:
829 if (dl_close_state == rerun)
830 goto retry;
832 dl_close_state = not_pending;
836 void
837 _dl_close (void *_map)
839 struct link_map *map = _map;
841 /* We must take the lock to examine the contents of map and avoid
842 concurrent dlopens. */
843 __rtld_lock_lock_recursive (GL(dl_load_lock));
845 /* At this point we are guaranteed nobody else is touching the list of
846 loaded maps, but a concurrent dlclose might have freed our map
847 before we took the lock. There is no way to detect this (see below)
848 so we proceed assuming this isn't the case. First see whether we
849 can remove the object at all. */
850 if (__glibc_unlikely (map->l_nodelete_active))
852 /* Nope. Do nothing. */
853 __rtld_lock_unlock_recursive (GL(dl_load_lock));
854 return;
857 /* At present this is an unreliable check except in the case where the
858 caller has recursively called dlclose and we are sure the link map
859 has not been freed. In a non-recursive dlclose the map itself
860 might have been freed and this access is potentially a data race
861 with whatever other use this memory might have now, or worse we
862 might silently corrupt memory if it looks enough like a link map.
863 POSIX has language in dlclose that appears to guarantee that this
864 should be a detectable case and given that dlclose should be threadsafe
865 we need this to be a reliable detection.
866 This is bug 20990. */
867 if (__builtin_expect (map->l_direct_opencount, 1) == 0)
869 __rtld_lock_unlock_recursive (GL(dl_load_lock));
870 _dl_signal_error (0, map->l_name, NULL, N_("shared object not open"));
873 _dl_close_worker (map, false);
875 __rtld_lock_unlock_recursive (GL(dl_load_lock));