1 /* Close a shared object opened by `_dl_open'.
2 Copyright (C) 1996-2023 Free Software Foundation, Inc.
3 This file is part of the GNU C Library.
5 The GNU C Library is free software; you can redistribute it and/or
6 modify it under the terms of the GNU Lesser General Public
7 License as published by the Free Software Foundation; either
8 version 2.1 of the License, or (at your option) any later version.
10 The GNU C Library is distributed in the hope that it will be useful,
11 but WITHOUT ANY WARRANTY; without even the implied warranty of
12 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 Lesser General Public License for more details.
15 You should have received a copy of the GNU Lesser General Public
16 License along with the GNU C Library; if not, see
17 <https://www.gnu.org/licenses/>. */
28 #include <libc-lock.h>
30 #include <sys/types.h>
32 #include <sysdep-cancel.h>
34 #include <stap-probe.h>
35 #include <dl-find_object.h>
37 #include <dl-unmap-segments.h>
39 /* Special l_idx value used to indicate which objects remain loaded. */
40 #define IDX_STILL_USED -1
43 /* Returns true we an non-empty was found. */
45 remove_slotinfo (size_t idx
, struct dtv_slotinfo_list
*listp
, size_t disp
,
48 if (idx
- disp
>= listp
->len
)
50 if (listp
->next
== NULL
)
52 /* The index is not actually valid in the slotinfo list,
53 because this object was closed before it was fully set
54 up due to some error. */
55 assert (! should_be_there
);
59 if (remove_slotinfo (idx
, listp
->next
, disp
+ listp
->len
,
63 /* No non-empty entry. Search from the end of this element's
65 idx
= disp
+ listp
->len
;
70 struct link_map
*old_map
= listp
->slotinfo
[idx
- disp
].map
;
72 /* The entry might still be in its unused state if we are closing an
73 object that wasn't fully set up. */
74 if (__glibc_likely (old_map
!= NULL
))
76 /* Mark the entry as unused. These can be read concurrently. */
77 atomic_store_relaxed (&listp
->slotinfo
[idx
- disp
].gen
,
78 GL(dl_tls_generation
) + 1);
79 atomic_store_relaxed (&listp
->slotinfo
[idx
- disp
].map
, NULL
);
82 /* If this is not the last currently used entry no need to look
84 if (idx
!= GL(dl_tls_max_dtv_idx
))
86 /* There is an unused dtv entry in the middle. */
87 GL(dl_tls_dtv_gaps
) = true;
92 while (idx
- disp
> (disp
== 0 ? 1 + GL(dl_tls_static_nelem
) : 0))
96 if (listp
->slotinfo
[idx
- disp
].map
!= NULL
)
98 /* Found a new last used index. This can be read concurrently. */
99 atomic_store_relaxed (&GL(dl_tls_max_dtv_idx
), idx
);
104 /* No non-entry in this list element. */
109 _dl_close_worker (struct link_map
*map
, bool force
)
111 /* One less direct use. */
112 --map
->l_direct_opencount
;
114 /* If _dl_close is called recursively (some destructor call dlclose),
115 just record that the parent _dl_close will need to do garbage collection
117 static enum { not_pending
, pending
, rerun
} dl_close_state
;
119 if (map
->l_direct_opencount
> 0 || map
->l_type
!= lt_loaded
120 || dl_close_state
!= not_pending
)
122 if (map
->l_direct_opencount
== 0 && map
->l_type
== lt_loaded
)
123 dl_close_state
= rerun
;
125 /* There are still references to this object. Do nothing more. */
126 if (__glibc_unlikely (GLRO(dl_debug_mask
) & DL_DEBUG_FILES
))
127 _dl_debug_printf ("\nclosing file=%s; direct_opencount=%u\n",
128 map
->l_name
, map
->l_direct_opencount
);
133 Lmid_t nsid
= map
->l_ns
;
134 struct link_namespaces
*ns
= &GL(dl_ns
)[nsid
];
137 dl_close_state
= pending
;
139 bool any_tls
= false;
140 const unsigned int nloaded
= ns
->_ns_nloaded
;
141 struct link_map
*maps
[nloaded
];
143 /* Run over the list and assign indexes to the link maps and enter
144 them into the MAPS array. */
146 for (struct link_map
*l
= ns
->_ns_loaded
; l
!= NULL
; l
= l
->l_next
)
154 assert (idx
== nloaded
);
156 /* Keep track of the lowest index link map we have covered already. */
158 while (++done_index
< nloaded
)
160 struct link_map
*l
= maps
[done_index
];
163 /* Already handled. */
166 /* Check whether this object is still used. */
167 if (l
->l_type
== lt_loaded
168 && l
->l_direct_opencount
== 0
169 && !l
->l_nodelete_active
170 /* See CONCURRENCY NOTES in cxa_thread_atexit_impl.c to know why
171 acquire is sufficient and correct. */
172 && atomic_load_acquire (&l
->l_tls_dtor_count
) == 0
176 /* We need this object and we handle it now. */
179 /* Signal the object is still needed. */
180 l
->l_idx
= IDX_STILL_USED
;
182 /* Mark all dependencies as used. */
183 if (l
->l_initfini
!= NULL
)
185 /* We are always the zeroth entry, and since we don't include
186 ourselves in the dependency analysis start at 1. */
187 struct link_map
**lp
= &l
->l_initfini
[1];
190 if ((*lp
)->l_idx
!= IDX_STILL_USED
)
192 assert ((*lp
)->l_idx
>= 0 && (*lp
)->l_idx
< nloaded
);
194 if (!(*lp
)->l_map_used
)
196 (*lp
)->l_map_used
= 1;
197 /* If we marked a new object as used, and we've
198 already processed it, then we need to go back
199 and process again from that point forward to
200 ensure we keep all of its dependencies also. */
201 if ((*lp
)->l_idx
- 1 < done_index
)
202 done_index
= (*lp
)->l_idx
- 1;
209 /* And the same for relocation dependencies. */
210 if (l
->l_reldeps
!= NULL
)
211 for (unsigned int j
= 0; j
< l
->l_reldeps
->act
; ++j
)
213 struct link_map
*jmap
= l
->l_reldeps
->list
[j
];
215 if (jmap
->l_idx
!= IDX_STILL_USED
)
217 assert (jmap
->l_idx
>= 0 && jmap
->l_idx
< nloaded
);
219 if (!jmap
->l_map_used
)
221 jmap
->l_map_used
= 1;
222 if (jmap
->l_idx
- 1 < done_index
)
223 done_index
= jmap
->l_idx
- 1;
229 /* Sort the entries. We can skip looking for the binary itself which is
230 at the front of the search list for the main namespace. */
231 _dl_sort_maps (maps
, nloaded
, (nsid
== LM_ID_BASE
), true);
233 /* Call all termination functions at once. */
234 bool unload_any
= false;
235 bool scope_mem_left
= false;
236 unsigned int unload_global
= 0;
237 unsigned int first_loaded
= ~0;
238 for (unsigned int i
= 0; i
< nloaded
; ++i
)
240 struct link_map
*imap
= maps
[i
];
242 /* All elements must be in the same namespace. */
243 assert (imap
->l_ns
== nsid
);
245 if (!imap
->l_map_used
)
247 assert (imap
->l_type
== lt_loaded
&& !imap
->l_nodelete_active
);
249 /* Call its termination function. Do not do it for
250 half-cooked objects. Temporarily disable exception
251 handling, so that errors are fatal. */
252 if (imap
->l_init_called
)
253 _dl_catch_exception (NULL
, _dl_call_fini
, imap
);
256 /* Auditing checkpoint: we remove an object. */
257 _dl_audit_objclose (imap
);
260 /* This object must not be used anymore. */
263 /* We indeed have an object to remove. */
269 /* Remember where the first dynamically loaded object is. */
270 if (i
< first_loaded
)
273 /* Else imap->l_map_used. */
274 else if (imap
->l_type
== lt_loaded
)
276 struct r_scope_elem
*new_list
= NULL
;
278 if (imap
->l_searchlist
.r_list
== NULL
&& imap
->l_initfini
!= NULL
)
280 /* The object is still used. But one of the objects we are
281 unloading right now is responsible for loading it. If
282 the current object does not have it's own scope yet we
283 have to create one. This has to be done before running
286 To do this count the number of dependencies. */
288 for (cnt
= 1; imap
->l_initfini
[cnt
] != NULL
; ++cnt
)
291 /* We simply reuse the l_initfini list. */
292 imap
->l_searchlist
.r_list
= &imap
->l_initfini
[cnt
+ 1];
293 imap
->l_searchlist
.r_nlist
= cnt
;
295 new_list
= &imap
->l_searchlist
;
298 /* Count the number of scopes which remain after the unload.
299 When we add the local search list count it. Always add
300 one for the terminating NULL pointer. */
301 size_t remain
= (new_list
!= NULL
) + 1;
302 bool removed_any
= false;
303 for (size_t cnt
= 0; imap
->l_scope
[cnt
] != NULL
; ++cnt
)
304 /* This relies on l_scope[] entries being always set either
305 to its own l_symbolic_searchlist address, or some map's
306 l_searchlist address. */
307 if (imap
->l_scope
[cnt
] != &imap
->l_symbolic_searchlist
)
309 struct link_map
*tmap
= (struct link_map
*)
310 ((char *) imap
->l_scope
[cnt
]
311 - offsetof (struct link_map
, l_searchlist
));
312 assert (tmap
->l_ns
== nsid
);
313 if (tmap
->l_idx
== IDX_STILL_USED
)
323 /* Always allocate a new array for the scope. This is
324 necessary since we must be able to determine the last
325 user of the current array. If possible use the link map's
328 struct r_scope_elem
**newp
;
330 #define SCOPE_ELEMS(imap) \
331 (sizeof (imap->l_scope_mem) / sizeof (imap->l_scope_mem[0]))
333 if (imap
->l_scope
!= imap
->l_scope_mem
334 && remain
< SCOPE_ELEMS (imap
))
336 new_size
= SCOPE_ELEMS (imap
);
337 newp
= imap
->l_scope_mem
;
341 new_size
= imap
->l_scope_max
;
342 newp
= (struct r_scope_elem
**)
343 malloc (new_size
* sizeof (struct r_scope_elem
*));
345 _dl_signal_error (ENOMEM
, "dlclose", NULL
,
346 N_("cannot create scope list"));
349 /* Copy over the remaining scope elements. */
351 for (size_t cnt
= 0; imap
->l_scope
[cnt
] != NULL
; ++cnt
)
353 if (imap
->l_scope
[cnt
] != &imap
->l_symbolic_searchlist
)
355 struct link_map
*tmap
= (struct link_map
*)
356 ((char *) imap
->l_scope
[cnt
]
357 - offsetof (struct link_map
, l_searchlist
));
358 if (tmap
->l_idx
!= IDX_STILL_USED
)
360 /* Remove the scope. Or replace with own map's
362 if (new_list
!= NULL
)
364 newp
[remain
++] = new_list
;
371 newp
[remain
++] = imap
->l_scope
[cnt
];
375 struct r_scope_elem
**old
= imap
->l_scope
;
377 imap
->l_scope
= newp
;
379 /* No user anymore, we can free it now. */
380 if (old
!= imap
->l_scope_mem
)
382 if (_dl_scope_free (old
))
383 /* If _dl_scope_free used THREAD_GSCOPE_WAIT (),
384 no need to repeat it. */
385 scope_mem_left
= false;
388 scope_mem_left
= true;
390 imap
->l_scope_max
= new_size
;
392 else if (new_list
!= NULL
)
394 /* We didn't change the scope array, so reset the search
396 imap
->l_searchlist
.r_list
= NULL
;
397 imap
->l_searchlist
.r_nlist
= 0;
400 /* The loader is gone, so mark the object as not having one.
401 Note: l_idx != IDX_STILL_USED -> object will be removed. */
402 if (imap
->l_loader
!= NULL
403 && imap
->l_loader
->l_idx
!= IDX_STILL_USED
)
404 imap
->l_loader
= NULL
;
406 /* Remember where the first dynamically loaded object is. */
407 if (i
< first_loaded
)
412 /* If there are no objects to unload, do nothing further. */
417 /* Auditing checkpoint: we will start deleting objects. */
418 _dl_audit_activity_nsid (nsid
, LA_ACT_DELETE
);
421 /* Notify the debugger we are about to remove some loaded objects. */
422 struct r_debug
*r
= _dl_debug_update (nsid
);
423 r
->r_state
= RT_DELETE
;
425 LIBC_PROBE (unmap_start
, 2, nsid
, r
);
429 /* Some objects are in the global scope list. Remove them. */
430 struct r_scope_elem
*ns_msl
= ns
->_ns_main_searchlist
;
433 unsigned int cnt
= ns_msl
->r_nlist
;
435 while (cnt
> 0 && ns_msl
->r_list
[cnt
- 1]->l_removed
)
438 if (cnt
+ unload_global
== ns_msl
->r_nlist
)
439 /* Speed up removing most recently added objects. */
442 for (i
= 0; i
< cnt
; i
++)
443 if (ns_msl
->r_list
[i
]->l_removed
== 0)
446 ns_msl
->r_list
[j
] = ns_msl
->r_list
[i
];
452 if (!RTLD_SINGLE_THREAD_P
455 || (GL(dl_scope_free_list
) != NULL
456 && GL(dl_scope_free_list
)->count
)))
458 THREAD_GSCOPE_WAIT ();
460 /* Now we can free any queued old scopes. */
461 struct dl_scope_free_list
*fsl
= GL(dl_scope_free_list
);
463 while (fsl
->count
> 0)
464 free (fsl
->list
[--fsl
->count
]);
467 size_t tls_free_start
;
469 tls_free_start
= tls_free_end
= NO_TLS_OFFSET
;
471 /* Protects global and module specitic TLS state. */
472 __rtld_lock_lock_recursive (GL(dl_load_tls_lock
));
474 /* We modify the list of loaded objects. */
475 __rtld_lock_lock_recursive (GL(dl_load_write_lock
));
477 /* Check each element of the search list to see if all references to
479 for (unsigned int i
= first_loaded
; i
< nloaded
; ++i
)
481 struct link_map
*imap
= maps
[i
];
482 if (!imap
->l_map_used
)
484 assert (imap
->l_type
== lt_loaded
);
486 /* That was the last reference, and this was a dlopen-loaded
487 object. We can unmap it. */
489 /* Remove the object from the dtv slotinfo array if it uses TLS. */
490 if (__glibc_unlikely (imap
->l_tls_blocksize
> 0))
494 if (GL(dl_tls_dtv_slotinfo_list
) != NULL
495 && ! remove_slotinfo (imap
->l_tls_modid
,
496 GL(dl_tls_dtv_slotinfo_list
), 0,
497 imap
->l_init_called
))
498 /* All dynamically loaded modules with TLS are unloaded. */
499 /* Can be read concurrently. */
500 atomic_store_relaxed (&GL(dl_tls_max_dtv_idx
),
501 GL(dl_tls_static_nelem
));
503 if (imap
->l_tls_offset
!= NO_TLS_OFFSET
504 && imap
->l_tls_offset
!= FORCED_DYNAMIC_TLS_OFFSET
)
506 /* Collect a contiguous chunk built from the objects in
507 this search list, going in either direction. When the
508 whole chunk is at the end of the used area then we can
511 if (tls_free_start
== NO_TLS_OFFSET
512 || (size_t) imap
->l_tls_offset
== tls_free_start
)
514 /* Extend the contiguous chunk being reclaimed. */
516 = imap
->l_tls_offset
- imap
->l_tls_blocksize
;
518 if (tls_free_end
== NO_TLS_OFFSET
)
519 tls_free_end
= imap
->l_tls_offset
;
521 else if (imap
->l_tls_offset
- imap
->l_tls_blocksize
523 /* Extend the chunk backwards. */
524 tls_free_end
= imap
->l_tls_offset
;
527 /* This isn't contiguous with the last chunk freed.
528 One of them will be leaked unless we can free
529 one block right away. */
530 if (tls_free_end
== GL(dl_tls_static_used
))
532 GL(dl_tls_static_used
) = tls_free_start
;
533 tls_free_end
= imap
->l_tls_offset
;
535 = tls_free_end
- imap
->l_tls_blocksize
;
537 else if ((size_t) imap
->l_tls_offset
538 == GL(dl_tls_static_used
))
539 GL(dl_tls_static_used
)
540 = imap
->l_tls_offset
- imap
->l_tls_blocksize
;
541 else if (tls_free_end
< (size_t) imap
->l_tls_offset
)
543 /* We pick the later block. It has a chance to
545 tls_free_end
= imap
->l_tls_offset
;
547 = tls_free_end
- imap
->l_tls_blocksize
;
551 if (tls_free_start
== NO_TLS_OFFSET
)
553 tls_free_start
= imap
->l_tls_firstbyte_offset
;
554 tls_free_end
= (imap
->l_tls_offset
555 + imap
->l_tls_blocksize
);
557 else if (imap
->l_tls_firstbyte_offset
== tls_free_end
)
558 /* Extend the contiguous chunk being reclaimed. */
559 tls_free_end
= imap
->l_tls_offset
+ imap
->l_tls_blocksize
;
560 else if (imap
->l_tls_offset
+ imap
->l_tls_blocksize
562 /* Extend the chunk backwards. */
563 tls_free_start
= imap
->l_tls_firstbyte_offset
;
564 /* This isn't contiguous with the last chunk freed.
565 One of them will be leaked unless we can free
566 one block right away. */
567 else if (imap
->l_tls_offset
+ imap
->l_tls_blocksize
568 == GL(dl_tls_static_used
))
569 GL(dl_tls_static_used
) = imap
->l_tls_firstbyte_offset
;
570 else if (tls_free_end
== GL(dl_tls_static_used
))
572 GL(dl_tls_static_used
) = tls_free_start
;
573 tls_free_start
= imap
->l_tls_firstbyte_offset
;
574 tls_free_end
= imap
->l_tls_offset
+ imap
->l_tls_blocksize
;
576 else if (tls_free_end
< imap
->l_tls_firstbyte_offset
)
578 /* We pick the later block. It has a chance to
580 tls_free_start
= imap
->l_tls_firstbyte_offset
;
581 tls_free_end
= imap
->l_tls_offset
+ imap
->l_tls_blocksize
;
584 # error "Either TLS_TCB_AT_TP or TLS_DTV_AT_TP must be defined"
589 /* Reset unique symbols if forced. */
592 struct unique_sym_table
*tab
= &ns
->_ns_unique_sym_table
;
593 __rtld_lock_lock_recursive (tab
->lock
);
594 struct unique_sym
*entries
= tab
->entries
;
597 size_t idx
, size
= tab
->size
;
598 for (idx
= 0; idx
< size
; ++idx
)
600 /* Clear unique symbol entries that belong to this
602 if (entries
[idx
].name
!= NULL
603 && entries
[idx
].map
== imap
)
605 entries
[idx
].name
= NULL
;
606 entries
[idx
].hashval
= 0;
611 __rtld_lock_unlock_recursive (tab
->lock
);
614 /* We can unmap all the maps at once. We determined the
615 start address and length when we loaded the object and
616 the `munmap' call does the rest. */
619 /* Finally, unlink the data structure and free it. */
621 /* The assert in the (imap->l_prev == NULL) case gives
622 the compiler license to warn that NS points outside
623 the dl_ns array bounds in that case (as nsid != LM_ID_BASE
624 is tantamount to nsid >= DL_NNS). That should be impossible
625 in this configuration, so just assert about it instead. */
626 assert (nsid
== LM_ID_BASE
);
627 assert (imap
->l_prev
!= NULL
);
629 if (imap
->l_prev
== NULL
)
631 assert (nsid
!= LM_ID_BASE
);
632 ns
->_ns_loaded
= imap
->l_next
;
634 /* Update the pointer to the head of the list
635 we leave for debuggers to examine. */
636 r
->r_map
= (void *) ns
->_ns_loaded
;
640 imap
->l_prev
->l_next
= imap
->l_next
;
643 if (imap
->l_next
!= NULL
)
644 imap
->l_next
->l_prev
= imap
->l_prev
;
646 /* Update the data used by _dl_find_object. */
647 _dl_find_object_dlclose (imap
);
649 free (imap
->l_versions
);
650 if (imap
->l_origin
!= (char *) -1)
651 free ((char *) imap
->l_origin
);
653 free (imap
->l_reldeps
);
655 /* Print debugging message. */
656 if (__glibc_unlikely (GLRO(dl_debug_mask
) & DL_DEBUG_FILES
))
657 _dl_debug_printf ("\nfile=%s [%lu]; destroying link map\n",
658 imap
->l_name
, imap
->l_ns
);
660 /* This name always is allocated. */
662 /* Remove the list with all the names of the shared object. */
664 struct libname_list
*lnp
= imap
->l_libname
;
667 struct libname_list
*this = lnp
;
669 if (!this->dont_free
)
674 /* Remove the searchlists. */
675 free (imap
->l_initfini
);
677 /* Remove the scope array if we allocated it. */
678 if (imap
->l_scope
!= imap
->l_scope_mem
)
679 free (imap
->l_scope
);
681 if (imap
->l_phdr_allocated
)
682 free ((void *) imap
->l_phdr
);
684 if (imap
->l_rpath_dirs
.dirs
!= (void *) -1)
685 free (imap
->l_rpath_dirs
.dirs
);
686 if (imap
->l_runpath_dirs
.dirs
!= (void *) -1)
687 free (imap
->l_runpath_dirs
.dirs
);
689 /* Clear GL(dl_initfirst) when freeing its link_map memory. */
690 if (imap
== GL(dl_initfirst
))
691 GL(dl_initfirst
) = NULL
;
697 __rtld_lock_unlock_recursive (GL(dl_load_write_lock
));
699 /* If we removed any object which uses TLS bump the generation counter. */
702 size_t newgen
= GL(dl_tls_generation
) + 1;
703 if (__glibc_unlikely (newgen
== 0))
704 _dl_fatal_printf ("TLS generation counter wrapped! Please report as described in "REPORT_BUGS_TO
".\n");
705 /* Can be read concurrently. */
706 atomic_store_relaxed (&GL(dl_tls_generation
), newgen
);
708 if (tls_free_end
== GL(dl_tls_static_used
))
709 GL(dl_tls_static_used
) = tls_free_start
;
712 /* TLS is cleaned up for the unloaded modules. */
713 __rtld_lock_unlock_recursive (GL(dl_load_tls_lock
));
716 /* Auditing checkpoint: we have deleted all objects. Also, do not notify
717 auditors of the cleanup of a failed audit module loading attempt. */
718 _dl_audit_activity_nsid (nsid
, LA_ACT_CONSISTENT
);
721 if (__builtin_expect (ns
->_ns_loaded
== NULL
, 0)
722 && nsid
== GL(dl_nns
) - 1)
725 while (GL(dl_ns
)[GL(dl_nns
) - 1]._ns_loaded
== NULL
);
727 /* Notify the debugger those objects are finalized and gone. */
728 r
->r_state
= RT_CONSISTENT
;
730 LIBC_PROBE (unmap_complete
, 2, nsid
, r
);
732 /* Recheck if we need to retry, release the lock. */
734 if (dl_close_state
== rerun
)
737 dl_close_state
= not_pending
;
742 _dl_close (void *_map
)
744 struct link_map
*map
= _map
;
746 /* We must take the lock to examine the contents of map and avoid
747 concurrent dlopens. */
748 __rtld_lock_lock_recursive (GL(dl_load_lock
));
750 /* At this point we are guaranteed nobody else is touching the list of
751 loaded maps, but a concurrent dlclose might have freed our map
752 before we took the lock. There is no way to detect this (see below)
753 so we proceed assuming this isn't the case. First see whether we
754 can remove the object at all. */
755 if (__glibc_unlikely (map
->l_nodelete_active
))
757 /* Nope. Do nothing. */
758 __rtld_lock_unlock_recursive (GL(dl_load_lock
));
762 /* At present this is an unreliable check except in the case where the
763 caller has recursively called dlclose and we are sure the link map
764 has not been freed. In a non-recursive dlclose the map itself
765 might have been freed and this access is potentially a data race
766 with whatever other use this memory might have now, or worse we
767 might silently corrupt memory if it looks enough like a link map.
768 POSIX has language in dlclose that appears to guarantee that this
769 should be a detectable case and given that dlclose should be threadsafe
770 we need this to be a reliable detection.
771 This is bug 20990. */
772 if (__builtin_expect (map
->l_direct_opencount
, 1) == 0)
774 __rtld_lock_unlock_recursive (GL(dl_load_lock
));
775 _dl_signal_error (0, map
->l_name
, NULL
, N_("shared object not open"));
778 _dl_close_worker (map
, false);
780 __rtld_lock_unlock_recursive (GL(dl_load_lock
));