sparc64: Remove unwind information from signal return stubs [BZ#31244]
[glibc.git] / elf / dl-close.c
blob88226245eb4b7a81a9318a035c7ea3a5c4344ad4
1 /* Close a shared object opened by `_dl_open'.
2 Copyright (C) 1996-2024 Free Software Foundation, Inc.
3 This file is part of the GNU C Library.
5 The GNU C Library is free software; you can redistribute it and/or
6 modify it under the terms of the GNU Lesser General Public
7 License as published by the Free Software Foundation; either
8 version 2.1 of the License, or (at your option) any later version.
10 The GNU C Library is distributed in the hope that it will be useful,
11 but WITHOUT ANY WARRANTY; without even the implied warranty of
12 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 Lesser General Public License for more details.
15 You should have received a copy of the GNU Lesser General Public
16 License along with the GNU C Library; if not, see
17 <https://www.gnu.org/licenses/>. */
19 #include <assert.h>
20 #include <dlfcn.h>
21 #include <errno.h>
22 #include <libintl.h>
23 #include <stddef.h>
24 #include <stdio.h>
25 #include <stdlib.h>
26 #include <string.h>
27 #include <unistd.h>
28 #include <libc-lock.h>
29 #include <ldsodefs.h>
30 #include <sys/types.h>
31 #include <sys/mman.h>
32 #include <sysdep-cancel.h>
33 #include <tls.h>
34 #include <stap-probe.h>
35 #include <dl-find_object.h>
37 #include <dl-unmap-segments.h>
39 /* Special l_idx value used to indicate which objects remain loaded. */
40 #define IDX_STILL_USED -1
43 /* Returns true we an non-empty was found. */
44 static bool
45 remove_slotinfo (size_t idx, struct dtv_slotinfo_list *listp, size_t disp,
46 bool should_be_there)
48 if (idx - disp >= listp->len)
50 if (listp->next == NULL)
52 /* The index is not actually valid in the slotinfo list,
53 because this object was closed before it was fully set
54 up due to some error. */
55 assert (! should_be_there);
57 else
59 if (remove_slotinfo (idx, listp->next, disp + listp->len,
60 should_be_there))
61 return true;
63 /* No non-empty entry. Search from the end of this element's
64 slotinfo array. */
65 idx = disp + listp->len;
68 else
70 struct link_map *old_map = listp->slotinfo[idx - disp].map;
72 /* The entry might still be in its unused state if we are closing an
73 object that wasn't fully set up. */
74 if (__glibc_likely (old_map != NULL))
76 /* Mark the entry as unused. These can be read concurrently. */
77 atomic_store_relaxed (&listp->slotinfo[idx - disp].gen,
78 GL(dl_tls_generation) + 1);
79 atomic_store_relaxed (&listp->slotinfo[idx - disp].map, NULL);
82 /* If this is not the last currently used entry no need to look
83 further. */
84 if (idx != GL(dl_tls_max_dtv_idx))
86 /* There is an unused dtv entry in the middle. */
87 GL(dl_tls_dtv_gaps) = true;
88 return true;
92 while (idx - disp > (disp == 0 ? 1 + GL(dl_tls_static_nelem) : 0))
94 --idx;
96 if (listp->slotinfo[idx - disp].map != NULL)
98 /* Found a new last used index. This can be read concurrently. */
99 atomic_store_relaxed (&GL(dl_tls_max_dtv_idx), idx);
100 return true;
104 /* No non-entry in this list element. */
105 return false;
108 void
109 _dl_close_worker (struct link_map *map, bool force)
111 /* One less direct use. */
112 --map->l_direct_opencount;
114 /* If _dl_close is called recursively (some destructor call dlclose),
115 just record that the parent _dl_close will need to do garbage collection
116 again and return. */
117 static enum { not_pending, pending, rerun } dl_close_state;
119 if (map->l_direct_opencount > 0 || map->l_type != lt_loaded
120 || dl_close_state != not_pending)
122 if (map->l_direct_opencount == 0 && map->l_type == lt_loaded)
123 dl_close_state = rerun;
125 /* There are still references to this object. Do nothing more. */
126 if (__glibc_unlikely (GLRO(dl_debug_mask) & DL_DEBUG_FILES))
127 _dl_debug_printf ("\nclosing file=%s; direct_opencount=%u\n",
128 map->l_name, map->l_direct_opencount);
130 return;
133 Lmid_t nsid = map->l_ns;
134 struct link_namespaces *ns = &GL(dl_ns)[nsid];
136 retry:
137 dl_close_state = pending;
139 bool any_tls = false;
140 const unsigned int nloaded = ns->_ns_nloaded;
141 struct link_map *maps[nloaded];
143 /* Run over the list and assign indexes to the link maps and enter
144 them into the MAPS array. */
145 int idx = 0;
146 for (struct link_map *l = ns->_ns_loaded; l != NULL; l = l->l_next)
148 l->l_map_used = 0;
149 l->l_map_done = 0;
150 l->l_idx = idx;
151 maps[idx] = l;
152 ++idx;
154 assert (idx == nloaded);
156 /* Put the dlclose'd map first, so that its destructor runs first.
157 The map variable is NULL after a retry. */
158 if (map != NULL)
160 maps[map->l_idx] = maps[0];
161 maps[map->l_idx]->l_idx = map->l_idx;
162 maps[0] = map;
163 maps[0]->l_idx = 0;
166 /* Keep track of the lowest index link map we have covered already. */
167 int done_index = -1;
168 while (++done_index < nloaded)
170 struct link_map *l = maps[done_index];
172 if (l->l_map_done)
173 /* Already handled. */
174 continue;
176 /* Check whether this object is still used. */
177 if (l->l_type == lt_loaded
178 && l->l_direct_opencount == 0
179 && !l->l_nodelete_active
180 /* See CONCURRENCY NOTES in cxa_thread_atexit_impl.c to know why
181 acquire is sufficient and correct. */
182 && atomic_load_acquire (&l->l_tls_dtor_count) == 0
183 && !l->l_map_used)
184 continue;
186 /* We need this object and we handle it now. */
187 l->l_map_used = 1;
188 l->l_map_done = 1;
189 /* Signal the object is still needed. */
190 l->l_idx = IDX_STILL_USED;
192 /* Mark all dependencies as used. */
193 if (l->l_initfini != NULL)
195 /* We are always the zeroth entry, and since we don't include
196 ourselves in the dependency analysis start at 1. */
197 struct link_map **lp = &l->l_initfini[1];
198 while (*lp != NULL)
200 if ((*lp)->l_idx != IDX_STILL_USED)
202 assert ((*lp)->l_idx >= 0 && (*lp)->l_idx < nloaded);
204 if (!(*lp)->l_map_used)
206 (*lp)->l_map_used = 1;
207 /* If we marked a new object as used, and we've
208 already processed it, then we need to go back
209 and process again from that point forward to
210 ensure we keep all of its dependencies also. */
211 if ((*lp)->l_idx - 1 < done_index)
212 done_index = (*lp)->l_idx - 1;
216 ++lp;
219 /* And the same for relocation dependencies. */
220 if (l->l_reldeps != NULL)
221 for (unsigned int j = 0; j < l->l_reldeps->act; ++j)
223 struct link_map *jmap = l->l_reldeps->list[j];
225 if (jmap->l_idx != IDX_STILL_USED)
227 assert (jmap->l_idx >= 0 && jmap->l_idx < nloaded);
229 if (!jmap->l_map_used)
231 jmap->l_map_used = 1;
232 if (jmap->l_idx - 1 < done_index)
233 done_index = jmap->l_idx - 1;
239 /* Sort the entries. Unless retrying, the maps[0] object (the
240 original argument to dlclose) needs to remain first, so that its
241 destructor runs first. */
242 _dl_sort_maps (maps, nloaded, /* force_first */ map != NULL, true);
244 /* Call all termination functions at once. */
245 bool unload_any = false;
246 bool scope_mem_left = false;
247 unsigned int unload_global = 0;
248 unsigned int first_loaded = ~0;
249 for (unsigned int i = 0; i < nloaded; ++i)
251 struct link_map *imap = maps[i];
253 /* All elements must be in the same namespace. */
254 assert (imap->l_ns == nsid);
256 if (!imap->l_map_used)
258 assert (imap->l_type == lt_loaded && !imap->l_nodelete_active);
260 /* Call its termination function. Do not do it for
261 half-cooked objects. Temporarily disable exception
262 handling, so that errors are fatal. */
263 if (imap->l_init_called)
264 _dl_catch_exception (NULL, _dl_call_fini, imap);
266 #ifdef SHARED
267 /* Auditing checkpoint: we remove an object. */
268 _dl_audit_objclose (imap);
269 #endif
271 /* This object must not be used anymore. */
272 imap->l_removed = 1;
274 /* We indeed have an object to remove. */
275 unload_any = true;
277 if (imap->l_global)
278 ++unload_global;
280 /* Remember where the first dynamically loaded object is. */
281 if (i < first_loaded)
282 first_loaded = i;
284 /* Else imap->l_map_used. */
285 else if (imap->l_type == lt_loaded)
287 struct r_scope_elem *new_list = NULL;
289 if (imap->l_searchlist.r_list == NULL && imap->l_initfini != NULL)
291 /* The object is still used. But one of the objects we are
292 unloading right now is responsible for loading it. If
293 the current object does not have it's own scope yet we
294 have to create one. This has to be done before running
295 the finalizers.
297 To do this count the number of dependencies. */
298 unsigned int cnt;
299 for (cnt = 1; imap->l_initfini[cnt] != NULL; ++cnt)
302 /* We simply reuse the l_initfini list. */
303 imap->l_searchlist.r_list = &imap->l_initfini[cnt + 1];
304 imap->l_searchlist.r_nlist = cnt;
306 new_list = &imap->l_searchlist;
309 /* Count the number of scopes which remain after the unload.
310 When we add the local search list count it. Always add
311 one for the terminating NULL pointer. */
312 size_t remain = (new_list != NULL) + 1;
313 bool removed_any = false;
314 for (size_t cnt = 0; imap->l_scope[cnt] != NULL; ++cnt)
315 /* This relies on l_scope[] entries being always set either
316 to its own l_symbolic_searchlist address, or some map's
317 l_searchlist address. */
318 if (imap->l_scope[cnt] != &imap->l_symbolic_searchlist)
320 struct link_map *tmap = (struct link_map *)
321 ((char *) imap->l_scope[cnt]
322 - offsetof (struct link_map, l_searchlist));
323 assert (tmap->l_ns == nsid);
324 if (tmap->l_idx == IDX_STILL_USED)
325 ++remain;
326 else
327 removed_any = true;
329 else
330 ++remain;
332 if (removed_any)
334 /* Always allocate a new array for the scope. This is
335 necessary since we must be able to determine the last
336 user of the current array. If possible use the link map's
337 memory. */
338 size_t new_size;
339 struct r_scope_elem **newp;
341 #define SCOPE_ELEMS(imap) \
342 (sizeof (imap->l_scope_mem) / sizeof (imap->l_scope_mem[0]))
344 if (imap->l_scope != imap->l_scope_mem
345 && remain < SCOPE_ELEMS (imap))
347 new_size = SCOPE_ELEMS (imap);
348 newp = imap->l_scope_mem;
350 else
352 new_size = imap->l_scope_max;
353 newp = (struct r_scope_elem **)
354 malloc (new_size * sizeof (struct r_scope_elem *));
355 if (newp == NULL)
356 _dl_signal_error (ENOMEM, "dlclose", NULL,
357 N_("cannot create scope list"));
360 /* Copy over the remaining scope elements. */
361 remain = 0;
362 for (size_t cnt = 0; imap->l_scope[cnt] != NULL; ++cnt)
364 if (imap->l_scope[cnt] != &imap->l_symbolic_searchlist)
366 struct link_map *tmap = (struct link_map *)
367 ((char *) imap->l_scope[cnt]
368 - offsetof (struct link_map, l_searchlist));
369 if (tmap->l_idx != IDX_STILL_USED)
371 /* Remove the scope. Or replace with own map's
372 scope. */
373 if (new_list != NULL)
375 newp[remain++] = new_list;
376 new_list = NULL;
378 continue;
382 newp[remain++] = imap->l_scope[cnt];
384 newp[remain] = NULL;
386 struct r_scope_elem **old = imap->l_scope;
388 imap->l_scope = newp;
390 /* No user anymore, we can free it now. */
391 if (old != imap->l_scope_mem)
393 if (_dl_scope_free (old))
394 /* If _dl_scope_free used THREAD_GSCOPE_WAIT (),
395 no need to repeat it. */
396 scope_mem_left = false;
398 else
399 scope_mem_left = true;
401 imap->l_scope_max = new_size;
403 else if (new_list != NULL)
405 /* We didn't change the scope array, so reset the search
406 list. */
407 imap->l_searchlist.r_list = NULL;
408 imap->l_searchlist.r_nlist = 0;
411 /* The loader is gone, so mark the object as not having one.
412 Note: l_idx != IDX_STILL_USED -> object will be removed. */
413 if (imap->l_loader != NULL
414 && imap->l_loader->l_idx != IDX_STILL_USED)
415 imap->l_loader = NULL;
417 /* Remember where the first dynamically loaded object is. */
418 if (i < first_loaded)
419 first_loaded = i;
423 /* If there are no objects to unload, do nothing further. */
424 if (!unload_any)
425 goto out;
427 #ifdef SHARED
428 /* Auditing checkpoint: we will start deleting objects. */
429 _dl_audit_activity_nsid (nsid, LA_ACT_DELETE);
430 #endif
432 /* Notify the debugger we are about to remove some loaded objects. */
433 struct r_debug *r = _dl_debug_update (nsid);
434 r->r_state = RT_DELETE;
435 _dl_debug_state ();
436 LIBC_PROBE (unmap_start, 2, nsid, r);
438 if (unload_global)
440 /* Some objects are in the global scope list. Remove them. */
441 struct r_scope_elem *ns_msl = ns->_ns_main_searchlist;
442 unsigned int i;
443 unsigned int j = 0;
444 unsigned int cnt = ns_msl->r_nlist;
446 while (cnt > 0 && ns_msl->r_list[cnt - 1]->l_removed)
447 --cnt;
449 if (cnt + unload_global == ns_msl->r_nlist)
450 /* Speed up removing most recently added objects. */
451 j = cnt;
452 else
453 for (i = 0; i < cnt; i++)
454 if (ns_msl->r_list[i]->l_removed == 0)
456 if (i != j)
457 ns_msl->r_list[j] = ns_msl->r_list[i];
458 j++;
460 ns_msl->r_nlist = j;
463 if (!RTLD_SINGLE_THREAD_P
464 && (unload_global
465 || scope_mem_left
466 || (GL(dl_scope_free_list) != NULL
467 && GL(dl_scope_free_list)->count)))
469 THREAD_GSCOPE_WAIT ();
471 /* Now we can free any queued old scopes. */
472 struct dl_scope_free_list *fsl = GL(dl_scope_free_list);
473 if (fsl != NULL)
474 while (fsl->count > 0)
475 free (fsl->list[--fsl->count]);
478 size_t tls_free_start;
479 size_t tls_free_end;
480 tls_free_start = tls_free_end = NO_TLS_OFFSET;
482 /* Protects global and module specitic TLS state. */
483 __rtld_lock_lock_recursive (GL(dl_load_tls_lock));
485 /* We modify the list of loaded objects. */
486 __rtld_lock_lock_recursive (GL(dl_load_write_lock));
488 /* Check each element of the search list to see if all references to
489 it are gone. */
490 for (unsigned int i = first_loaded; i < nloaded; ++i)
492 struct link_map *imap = maps[i];
493 if (!imap->l_map_used)
495 assert (imap->l_type == lt_loaded);
497 /* That was the last reference, and this was a dlopen-loaded
498 object. We can unmap it. */
500 /* Remove the object from the dtv slotinfo array if it uses TLS. */
501 if (__glibc_unlikely (imap->l_tls_blocksize > 0))
503 any_tls = true;
505 if (GL(dl_tls_dtv_slotinfo_list) != NULL
506 && ! remove_slotinfo (imap->l_tls_modid,
507 GL(dl_tls_dtv_slotinfo_list), 0,
508 imap->l_init_called))
509 /* All dynamically loaded modules with TLS are unloaded. */
510 /* Can be read concurrently. */
511 atomic_store_relaxed (&GL(dl_tls_max_dtv_idx),
512 GL(dl_tls_static_nelem));
514 if (imap->l_tls_offset != NO_TLS_OFFSET
515 && imap->l_tls_offset != FORCED_DYNAMIC_TLS_OFFSET)
517 /* Collect a contiguous chunk built from the objects in
518 this search list, going in either direction. When the
519 whole chunk is at the end of the used area then we can
520 reclaim it. */
521 #if TLS_TCB_AT_TP
522 if (tls_free_start == NO_TLS_OFFSET
523 || (size_t) imap->l_tls_offset == tls_free_start)
525 /* Extend the contiguous chunk being reclaimed. */
526 tls_free_start
527 = imap->l_tls_offset - imap->l_tls_blocksize;
529 if (tls_free_end == NO_TLS_OFFSET)
530 tls_free_end = imap->l_tls_offset;
532 else if (imap->l_tls_offset - imap->l_tls_blocksize
533 == tls_free_end)
534 /* Extend the chunk backwards. */
535 tls_free_end = imap->l_tls_offset;
536 else
538 /* This isn't contiguous with the last chunk freed.
539 One of them will be leaked unless we can free
540 one block right away. */
541 if (tls_free_end == GL(dl_tls_static_used))
543 GL(dl_tls_static_used) = tls_free_start;
544 tls_free_end = imap->l_tls_offset;
545 tls_free_start
546 = tls_free_end - imap->l_tls_blocksize;
548 else if ((size_t) imap->l_tls_offset
549 == GL(dl_tls_static_used))
550 GL(dl_tls_static_used)
551 = imap->l_tls_offset - imap->l_tls_blocksize;
552 else if (tls_free_end < (size_t) imap->l_tls_offset)
554 /* We pick the later block. It has a chance to
555 be freed. */
556 tls_free_end = imap->l_tls_offset;
557 tls_free_start
558 = tls_free_end - imap->l_tls_blocksize;
561 #elif TLS_DTV_AT_TP
562 if (tls_free_start == NO_TLS_OFFSET)
564 tls_free_start = imap->l_tls_firstbyte_offset;
565 tls_free_end = (imap->l_tls_offset
566 + imap->l_tls_blocksize);
568 else if (imap->l_tls_firstbyte_offset == tls_free_end)
569 /* Extend the contiguous chunk being reclaimed. */
570 tls_free_end = imap->l_tls_offset + imap->l_tls_blocksize;
571 else if (imap->l_tls_offset + imap->l_tls_blocksize
572 == tls_free_start)
573 /* Extend the chunk backwards. */
574 tls_free_start = imap->l_tls_firstbyte_offset;
575 /* This isn't contiguous with the last chunk freed.
576 One of them will be leaked unless we can free
577 one block right away. */
578 else if (imap->l_tls_offset + imap->l_tls_blocksize
579 == GL(dl_tls_static_used))
580 GL(dl_tls_static_used) = imap->l_tls_firstbyte_offset;
581 else if (tls_free_end == GL(dl_tls_static_used))
583 GL(dl_tls_static_used) = tls_free_start;
584 tls_free_start = imap->l_tls_firstbyte_offset;
585 tls_free_end = imap->l_tls_offset + imap->l_tls_blocksize;
587 else if (tls_free_end < imap->l_tls_firstbyte_offset)
589 /* We pick the later block. It has a chance to
590 be freed. */
591 tls_free_start = imap->l_tls_firstbyte_offset;
592 tls_free_end = imap->l_tls_offset + imap->l_tls_blocksize;
594 #else
595 # error "Either TLS_TCB_AT_TP or TLS_DTV_AT_TP must be defined"
596 #endif
600 /* Reset unique symbols if forced. */
601 if (force)
603 struct unique_sym_table *tab = &ns->_ns_unique_sym_table;
604 __rtld_lock_lock_recursive (tab->lock);
605 struct unique_sym *entries = tab->entries;
606 if (entries != NULL)
608 size_t idx, size = tab->size;
609 for (idx = 0; idx < size; ++idx)
611 /* Clear unique symbol entries that belong to this
612 object. */
613 if (entries[idx].name != NULL
614 && entries[idx].map == imap)
616 entries[idx].name = NULL;
617 entries[idx].hashval = 0;
618 tab->n_elements--;
622 __rtld_lock_unlock_recursive (tab->lock);
625 /* We can unmap all the maps at once. We determined the
626 start address and length when we loaded the object and
627 the `munmap' call does the rest. */
628 DL_UNMAP (imap);
630 /* Finally, unlink the data structure and free it. */
631 #if DL_NNS == 1
632 /* The assert in the (imap->l_prev == NULL) case gives
633 the compiler license to warn that NS points outside
634 the dl_ns array bounds in that case (as nsid != LM_ID_BASE
635 is tantamount to nsid >= DL_NNS). That should be impossible
636 in this configuration, so just assert about it instead. */
637 assert (nsid == LM_ID_BASE);
638 assert (imap->l_prev != NULL);
639 #else
640 if (imap->l_prev == NULL)
642 assert (nsid != LM_ID_BASE);
643 ns->_ns_loaded = imap->l_next;
645 /* Update the pointer to the head of the list
646 we leave for debuggers to examine. */
647 r->r_map = (void *) ns->_ns_loaded;
649 else
650 #endif
651 imap->l_prev->l_next = imap->l_next;
653 --ns->_ns_nloaded;
654 if (imap->l_next != NULL)
655 imap->l_next->l_prev = imap->l_prev;
657 /* Update the data used by _dl_find_object. */
658 _dl_find_object_dlclose (imap);
660 free (imap->l_versions);
661 if (imap->l_origin != (char *) -1)
662 free ((char *) imap->l_origin);
664 free (imap->l_reldeps);
666 /* Print debugging message. */
667 if (__glibc_unlikely (GLRO(dl_debug_mask) & DL_DEBUG_FILES))
668 _dl_debug_printf ("\nfile=%s [%lu]; destroying link map\n",
669 imap->l_name, imap->l_ns);
671 /* This name always is allocated. */
672 free (imap->l_name);
673 /* Remove the list with all the names of the shared object. */
675 struct libname_list *lnp = imap->l_libname;
678 struct libname_list *this = lnp;
679 lnp = lnp->next;
680 if (!this->dont_free)
681 free (this);
683 while (lnp != NULL);
685 /* Remove the searchlists. */
686 free (imap->l_initfini);
688 /* Remove the scope array if we allocated it. */
689 if (imap->l_scope != imap->l_scope_mem)
690 free (imap->l_scope);
692 if (imap->l_phdr_allocated)
693 free ((void *) imap->l_phdr);
695 if (imap->l_rpath_dirs.dirs != (void *) -1)
696 free (imap->l_rpath_dirs.dirs);
697 if (imap->l_runpath_dirs.dirs != (void *) -1)
698 free (imap->l_runpath_dirs.dirs);
700 /* Clear GL(dl_initfirst) when freeing its link_map memory. */
701 if (imap == GL(dl_initfirst))
702 GL(dl_initfirst) = NULL;
704 free (imap);
708 __rtld_lock_unlock_recursive (GL(dl_load_write_lock));
710 /* If we removed any object which uses TLS bump the generation counter. */
711 if (any_tls)
713 size_t newgen = GL(dl_tls_generation) + 1;
714 if (__glibc_unlikely (newgen == 0))
715 _dl_fatal_printf ("TLS generation counter wrapped! Please report as described in "REPORT_BUGS_TO".\n");
716 /* Can be read concurrently. */
717 atomic_store_release (&GL(dl_tls_generation), newgen);
719 if (tls_free_end == GL(dl_tls_static_used))
720 GL(dl_tls_static_used) = tls_free_start;
723 /* TLS is cleaned up for the unloaded modules. */
724 __rtld_lock_unlock_recursive (GL(dl_load_tls_lock));
726 #ifdef SHARED
727 /* Auditing checkpoint: we have deleted all objects. Also, do not notify
728 auditors of the cleanup of a failed audit module loading attempt. */
729 _dl_audit_activity_nsid (nsid, LA_ACT_CONSISTENT);
730 #endif
732 if (__builtin_expect (ns->_ns_loaded == NULL, 0)
733 && nsid == GL(dl_nns) - 1)
735 --GL(dl_nns);
736 while (GL(dl_ns)[GL(dl_nns) - 1]._ns_loaded == NULL);
738 /* Notify the debugger those objects are finalized and gone. */
739 r->r_state = RT_CONSISTENT;
740 _dl_debug_state ();
741 LIBC_PROBE (unmap_complete, 2, nsid, r);
743 /* Recheck if we need to retry, release the lock. */
744 out:
745 if (dl_close_state == rerun)
747 /* The map may have been deallocated. */
748 map = NULL;
749 goto retry;
752 dl_close_state = not_pending;
756 void
757 _dl_close (void *_map)
759 struct link_map *map = _map;
761 /* We must take the lock to examine the contents of map and avoid
762 concurrent dlopens. */
763 __rtld_lock_lock_recursive (GL(dl_load_lock));
765 /* At this point we are guaranteed nobody else is touching the list of
766 loaded maps, but a concurrent dlclose might have freed our map
767 before we took the lock. There is no way to detect this (see below)
768 so we proceed assuming this isn't the case. First see whether we
769 can remove the object at all. */
770 if (__glibc_unlikely (map->l_nodelete_active))
772 /* Nope. Do nothing. */
773 __rtld_lock_unlock_recursive (GL(dl_load_lock));
774 return;
777 /* At present this is an unreliable check except in the case where the
778 caller has recursively called dlclose and we are sure the link map
779 has not been freed. In a non-recursive dlclose the map itself
780 might have been freed and this access is potentially a data race
781 with whatever other use this memory might have now, or worse we
782 might silently corrupt memory if it looks enough like a link map.
783 POSIX has language in dlclose that appears to guarantee that this
784 should be a detectable case and given that dlclose should be threadsafe
785 we need this to be a reliable detection.
786 This is bug 20990. */
787 if (__builtin_expect (map->l_direct_opencount, 1) == 0)
789 __rtld_lock_unlock_recursive (GL(dl_load_lock));
790 _dl_signal_error (0, map->l_name, NULL, N_("shared object not open"));
793 _dl_close_worker (map, false);
795 __rtld_lock_unlock_recursive (GL(dl_load_lock));