1 /* Close a shared object opened by `_dl_open'.
2 Copyright (C) 1996-2021 Free Software Foundation, Inc.
3 This file is part of the GNU C Library.
5 The GNU C Library is free software; you can redistribute it and/or
6 modify it under the terms of the GNU Lesser General Public
7 License as published by the Free Software Foundation; either
8 version 2.1 of the License, or (at your option) any later version.
10 The GNU C Library is distributed in the hope that it will be useful,
11 but WITHOUT ANY WARRANTY; without even the implied warranty of
12 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 Lesser General Public License for more details.
15 You should have received a copy of the GNU Lesser General Public
16 License along with the GNU C Library; if not, see
17 <https://www.gnu.org/licenses/>. */
28 #include <libc-lock.h>
30 #include <sys/types.h>
32 #include <sysdep-cancel.h>
34 #include <stap-probe.h>
36 #include <dl-unmap-segments.h>
39 /* Type of the constructor functions. */
40 typedef void (*fini_t
) (void);
43 /* Special l_idx value used to indicate which objects remain loaded. */
44 #define IDX_STILL_USED -1
47 /* Returns true we an non-empty was found. */
49 remove_slotinfo (size_t idx
, struct dtv_slotinfo_list
*listp
, size_t disp
,
52 if (idx
- disp
>= listp
->len
)
54 if (listp
->next
== NULL
)
56 /* The index is not actually valid in the slotinfo list,
57 because this object was closed before it was fully set
58 up due to some error. */
59 assert (! should_be_there
);
63 if (remove_slotinfo (idx
, listp
->next
, disp
+ listp
->len
,
67 /* No non-empty entry. Search from the end of this element's
69 idx
= disp
+ listp
->len
;
74 struct link_map
*old_map
= listp
->slotinfo
[idx
- disp
].map
;
76 /* The entry might still be in its unused state if we are closing an
77 object that wasn't fully set up. */
78 if (__glibc_likely (old_map
!= NULL
))
80 assert (old_map
->l_tls_modid
== idx
);
82 /* Mark the entry as unused. */
83 listp
->slotinfo
[idx
- disp
].gen
= GL(dl_tls_generation
) + 1;
84 listp
->slotinfo
[idx
- disp
].map
= NULL
;
87 /* If this is not the last currently used entry no need to look
89 if (idx
!= GL(dl_tls_max_dtv_idx
))
93 while (idx
- disp
> (disp
== 0 ? 1 + GL(dl_tls_static_nelem
) : 0))
97 if (listp
->slotinfo
[idx
- disp
].map
!= NULL
)
99 /* Found a new last used index. */
100 GL(dl_tls_max_dtv_idx
) = idx
;
105 /* No non-entry in this list element. */
109 /* Invoke dstructors for CLOSURE (a struct link_map *). Called with
110 exception handling temporarily disabled, to make errors fatal. */
112 call_destructors (void *closure
)
114 struct link_map
*map
= closure
;
116 if (map
->l_info
[DT_FINI_ARRAY
] != NULL
)
119 (ElfW(Addr
) *) (map
->l_addr
120 + map
->l_info
[DT_FINI_ARRAY
]->d_un
.d_ptr
);
121 unsigned int sz
= (map
->l_info
[DT_FINI_ARRAYSZ
]->d_un
.d_val
122 / sizeof (ElfW(Addr
)));
125 ((fini_t
) array
[sz
]) ();
128 /* Next try the old-style destructor. */
129 if (map
->l_info
[DT_FINI
] != NULL
)
130 DL_CALL_DT_FINI (map
, ((void *) map
->l_addr
131 + map
->l_info
[DT_FINI
]->d_un
.d_ptr
));
135 _dl_close_worker (struct link_map
*map
, bool force
)
137 /* One less direct use. */
138 --map
->l_direct_opencount
;
140 /* If _dl_close is called recursively (some destructor call dlclose),
141 just record that the parent _dl_close will need to do garbage collection
143 static enum { not_pending
, pending
, rerun
} dl_close_state
;
145 if (map
->l_direct_opencount
> 0 || map
->l_type
!= lt_loaded
146 || dl_close_state
!= not_pending
)
148 if (map
->l_direct_opencount
== 0 && map
->l_type
== lt_loaded
)
149 dl_close_state
= rerun
;
151 /* There are still references to this object. Do nothing more. */
152 if (__glibc_unlikely (GLRO(dl_debug_mask
) & DL_DEBUG_FILES
))
153 _dl_debug_printf ("\nclosing file=%s; direct_opencount=%u\n",
154 map
->l_name
, map
->l_direct_opencount
);
159 Lmid_t nsid
= map
->l_ns
;
160 struct link_namespaces
*ns
= &GL(dl_ns
)[nsid
];
163 dl_close_state
= pending
;
165 bool any_tls
= false;
166 const unsigned int nloaded
= ns
->_ns_nloaded
;
169 struct link_map
*maps
[nloaded
];
171 /* Run over the list and assign indexes to the link maps and enter
172 them into the MAPS array. */
174 for (struct link_map
*l
= ns
->_ns_loaded
; l
!= NULL
; l
= l
->l_next
)
181 assert (idx
== nloaded
);
183 /* Prepare the bitmaps. */
184 memset (used
, '\0', sizeof (used
));
185 memset (done
, '\0', sizeof (done
));
187 /* Keep track of the lowest index link map we have covered already. */
189 while (++done_index
< nloaded
)
191 struct link_map
*l
= maps
[done_index
];
193 if (done
[done_index
])
194 /* Already handled. */
197 /* Check whether this object is still used. */
198 if (l
->l_type
== lt_loaded
199 && l
->l_direct_opencount
== 0
200 && !l
->l_nodelete_active
201 /* See CONCURRENCY NOTES in cxa_thread_atexit_impl.c to know why
202 acquire is sufficient and correct. */
203 && atomic_load_acquire (&l
->l_tls_dtor_count
) == 0
204 && !used
[done_index
])
207 /* We need this object and we handle it now. */
208 done
[done_index
] = 1;
209 used
[done_index
] = 1;
210 /* Signal the object is still needed. */
211 l
->l_idx
= IDX_STILL_USED
;
213 /* Mark all dependencies as used. */
214 if (l
->l_initfini
!= NULL
)
216 /* We are always the zeroth entry, and since we don't include
217 ourselves in the dependency analysis start at 1. */
218 struct link_map
**lp
= &l
->l_initfini
[1];
221 if ((*lp
)->l_idx
!= IDX_STILL_USED
)
223 assert ((*lp
)->l_idx
>= 0 && (*lp
)->l_idx
< nloaded
);
225 if (!used
[(*lp
)->l_idx
])
227 used
[(*lp
)->l_idx
] = 1;
228 /* If we marked a new object as used, and we've
229 already processed it, then we need to go back
230 and process again from that point forward to
231 ensure we keep all of its dependencies also. */
232 if ((*lp
)->l_idx
- 1 < done_index
)
233 done_index
= (*lp
)->l_idx
- 1;
240 /* And the same for relocation dependencies. */
241 if (l
->l_reldeps
!= NULL
)
242 for (unsigned int j
= 0; j
< l
->l_reldeps
->act
; ++j
)
244 struct link_map
*jmap
= l
->l_reldeps
->list
[j
];
246 if (jmap
->l_idx
!= IDX_STILL_USED
)
248 assert (jmap
->l_idx
>= 0 && jmap
->l_idx
< nloaded
);
250 if (!used
[jmap
->l_idx
])
252 used
[jmap
->l_idx
] = 1;
253 if (jmap
->l_idx
- 1 < done_index
)
254 done_index
= jmap
->l_idx
- 1;
260 /* Sort the entries. We can skip looking for the binary itself which is
261 at the front of the search list for the main namespace. */
262 _dl_sort_maps (maps
+ (nsid
== LM_ID_BASE
), nloaded
- (nsid
== LM_ID_BASE
),
263 used
+ (nsid
== LM_ID_BASE
), true);
265 /* Call all termination functions at once. */
267 bool do_audit
= GLRO(dl_naudit
) > 0 && !ns
->_ns_loaded
->l_auditing
;
269 bool unload_any
= false;
270 bool scope_mem_left
= false;
271 unsigned int unload_global
= 0;
272 unsigned int first_loaded
= ~0;
273 for (unsigned int i
= 0; i
< nloaded
; ++i
)
275 struct link_map
*imap
= maps
[i
];
277 /* All elements must be in the same namespace. */
278 assert (imap
->l_ns
== nsid
);
282 assert (imap
->l_type
== lt_loaded
&& !imap
->l_nodelete_active
);
284 /* Call its termination function. Do not do it for
285 half-cooked objects. Temporarily disable exception
286 handling, so that errors are fatal. */
287 if (imap
->l_init_called
)
289 /* When debugging print a message first. */
290 if (__builtin_expect (GLRO(dl_debug_mask
) & DL_DEBUG_IMPCALLS
,
292 _dl_debug_printf ("\ncalling fini: %s [%lu]\n\n",
295 if (imap
->l_info
[DT_FINI_ARRAY
] != NULL
296 || imap
->l_info
[DT_FINI
] != NULL
)
297 _dl_catch_exception (NULL
, call_destructors
, imap
);
301 /* Auditing checkpoint: we remove an object. */
302 if (__glibc_unlikely (do_audit
))
304 struct audit_ifaces
*afct
= GLRO(dl_audit
);
305 for (unsigned int cnt
= 0; cnt
< GLRO(dl_naudit
); ++cnt
)
307 if (afct
->objclose
!= NULL
)
309 struct auditstate
*state
310 = link_map_audit_state (imap
, cnt
);
311 /* Return value is ignored. */
312 (void) afct
->objclose (&state
->cookie
);
320 /* This object must not be used anymore. */
323 /* We indeed have an object to remove. */
329 /* Remember where the first dynamically loaded object is. */
330 if (i
< first_loaded
)
334 else if (imap
->l_type
== lt_loaded
)
336 struct r_scope_elem
*new_list
= NULL
;
338 if (imap
->l_searchlist
.r_list
== NULL
&& imap
->l_initfini
!= NULL
)
340 /* The object is still used. But one of the objects we are
341 unloading right now is responsible for loading it. If
342 the current object does not have it's own scope yet we
343 have to create one. This has to be done before running
346 To do this count the number of dependencies. */
348 for (cnt
= 1; imap
->l_initfini
[cnt
] != NULL
; ++cnt
)
351 /* We simply reuse the l_initfini list. */
352 imap
->l_searchlist
.r_list
= &imap
->l_initfini
[cnt
+ 1];
353 imap
->l_searchlist
.r_nlist
= cnt
;
355 new_list
= &imap
->l_searchlist
;
358 /* Count the number of scopes which remain after the unload.
359 When we add the local search list count it. Always add
360 one for the terminating NULL pointer. */
361 size_t remain
= (new_list
!= NULL
) + 1;
362 bool removed_any
= false;
363 for (size_t cnt
= 0; imap
->l_scope
[cnt
] != NULL
; ++cnt
)
364 /* This relies on l_scope[] entries being always set either
365 to its own l_symbolic_searchlist address, or some map's
366 l_searchlist address. */
367 if (imap
->l_scope
[cnt
] != &imap
->l_symbolic_searchlist
)
369 struct link_map
*tmap
= (struct link_map
*)
370 ((char *) imap
->l_scope
[cnt
]
371 - offsetof (struct link_map
, l_searchlist
));
372 assert (tmap
->l_ns
== nsid
);
373 if (tmap
->l_idx
== IDX_STILL_USED
)
383 /* Always allocate a new array for the scope. This is
384 necessary since we must be able to determine the last
385 user of the current array. If possible use the link map's
388 struct r_scope_elem
**newp
;
390 #define SCOPE_ELEMS(imap) \
391 (sizeof (imap->l_scope_mem) / sizeof (imap->l_scope_mem[0]))
393 if (imap
->l_scope
!= imap
->l_scope_mem
394 && remain
< SCOPE_ELEMS (imap
))
396 new_size
= SCOPE_ELEMS (imap
);
397 newp
= imap
->l_scope_mem
;
401 new_size
= imap
->l_scope_max
;
402 newp
= (struct r_scope_elem
**)
403 malloc (new_size
* sizeof (struct r_scope_elem
*));
405 _dl_signal_error (ENOMEM
, "dlclose", NULL
,
406 N_("cannot create scope list"));
409 /* Copy over the remaining scope elements. */
411 for (size_t cnt
= 0; imap
->l_scope
[cnt
] != NULL
; ++cnt
)
413 if (imap
->l_scope
[cnt
] != &imap
->l_symbolic_searchlist
)
415 struct link_map
*tmap
= (struct link_map
*)
416 ((char *) imap
->l_scope
[cnt
]
417 - offsetof (struct link_map
, l_searchlist
));
418 if (tmap
->l_idx
!= IDX_STILL_USED
)
420 /* Remove the scope. Or replace with own map's
422 if (new_list
!= NULL
)
424 newp
[remain
++] = new_list
;
431 newp
[remain
++] = imap
->l_scope
[cnt
];
435 struct r_scope_elem
**old
= imap
->l_scope
;
437 imap
->l_scope
= newp
;
439 /* No user anymore, we can free it now. */
440 if (old
!= imap
->l_scope_mem
)
442 if (_dl_scope_free (old
))
443 /* If _dl_scope_free used THREAD_GSCOPE_WAIT (),
444 no need to repeat it. */
445 scope_mem_left
= false;
448 scope_mem_left
= true;
450 imap
->l_scope_max
= new_size
;
452 else if (new_list
!= NULL
)
454 /* We didn't change the scope array, so reset the search
456 imap
->l_searchlist
.r_list
= NULL
;
457 imap
->l_searchlist
.r_nlist
= 0;
460 /* The loader is gone, so mark the object as not having one.
461 Note: l_idx != IDX_STILL_USED -> object will be removed. */
462 if (imap
->l_loader
!= NULL
463 && imap
->l_loader
->l_idx
!= IDX_STILL_USED
)
464 imap
->l_loader
= NULL
;
466 /* Remember where the first dynamically loaded object is. */
467 if (i
< first_loaded
)
472 /* If there are no objects to unload, do nothing further. */
477 /* Auditing checkpoint: we will start deleting objects. */
478 if (__glibc_unlikely (do_audit
))
480 struct link_map
*head
= ns
->_ns_loaded
;
481 struct audit_ifaces
*afct
= GLRO(dl_audit
);
482 /* Do not call the functions for any auditing object. */
483 if (head
->l_auditing
== 0)
485 for (unsigned int cnt
= 0; cnt
< GLRO(dl_naudit
); ++cnt
)
487 if (afct
->activity
!= NULL
)
489 struct auditstate
*state
= link_map_audit_state (head
, cnt
);
490 afct
->activity (&state
->cookie
, LA_ACT_DELETE
);
499 /* Notify the debugger we are about to remove some loaded objects. */
500 struct r_debug
*r
= _dl_debug_initialize (0, nsid
);
501 r
->r_state
= RT_DELETE
;
503 LIBC_PROBE (unmap_start
, 2, nsid
, r
);
507 /* Some objects are in the global scope list. Remove them. */
508 struct r_scope_elem
*ns_msl
= ns
->_ns_main_searchlist
;
511 unsigned int cnt
= ns_msl
->r_nlist
;
513 while (cnt
> 0 && ns_msl
->r_list
[cnt
- 1]->l_removed
)
516 if (cnt
+ unload_global
== ns_msl
->r_nlist
)
517 /* Speed up removing most recently added objects. */
520 for (i
= 0; i
< cnt
; i
++)
521 if (ns_msl
->r_list
[i
]->l_removed
== 0)
524 ns_msl
->r_list
[j
] = ns_msl
->r_list
[i
];
530 if (!RTLD_SINGLE_THREAD_P
533 || (GL(dl_scope_free_list
) != NULL
534 && GL(dl_scope_free_list
)->count
)))
536 THREAD_GSCOPE_WAIT ();
538 /* Now we can free any queued old scopes. */
539 struct dl_scope_free_list
*fsl
= GL(dl_scope_free_list
);
541 while (fsl
->count
> 0)
542 free (fsl
->list
[--fsl
->count
]);
545 size_t tls_free_start
;
547 tls_free_start
= tls_free_end
= NO_TLS_OFFSET
;
549 /* We modify the list of loaded objects. */
550 __rtld_lock_lock_recursive (GL(dl_load_write_lock
));
552 /* Check each element of the search list to see if all references to
554 for (unsigned int i
= first_loaded
; i
< nloaded
; ++i
)
556 struct link_map
*imap
= maps
[i
];
559 assert (imap
->l_type
== lt_loaded
);
561 /* That was the last reference, and this was a dlopen-loaded
562 object. We can unmap it. */
564 /* Remove the object from the dtv slotinfo array if it uses TLS. */
565 if (__glibc_unlikely (imap
->l_tls_blocksize
> 0))
569 if (GL(dl_tls_dtv_slotinfo_list
) != NULL
570 && ! remove_slotinfo (imap
->l_tls_modid
,
571 GL(dl_tls_dtv_slotinfo_list
), 0,
572 imap
->l_init_called
))
573 /* All dynamically loaded modules with TLS are unloaded. */
574 GL(dl_tls_max_dtv_idx
) = GL(dl_tls_static_nelem
);
576 if (imap
->l_tls_offset
!= NO_TLS_OFFSET
577 && imap
->l_tls_offset
!= FORCED_DYNAMIC_TLS_OFFSET
)
579 /* Collect a contiguous chunk built from the objects in
580 this search list, going in either direction. When the
581 whole chunk is at the end of the used area then we can
584 if (tls_free_start
== NO_TLS_OFFSET
585 || (size_t) imap
->l_tls_offset
== tls_free_start
)
587 /* Extend the contiguous chunk being reclaimed. */
589 = imap
->l_tls_offset
- imap
->l_tls_blocksize
;
591 if (tls_free_end
== NO_TLS_OFFSET
)
592 tls_free_end
= imap
->l_tls_offset
;
594 else if (imap
->l_tls_offset
- imap
->l_tls_blocksize
596 /* Extend the chunk backwards. */
597 tls_free_end
= imap
->l_tls_offset
;
600 /* This isn't contiguous with the last chunk freed.
601 One of them will be leaked unless we can free
602 one block right away. */
603 if (tls_free_end
== GL(dl_tls_static_used
))
605 GL(dl_tls_static_used
) = tls_free_start
;
606 tls_free_end
= imap
->l_tls_offset
;
608 = tls_free_end
- imap
->l_tls_blocksize
;
610 else if ((size_t) imap
->l_tls_offset
611 == GL(dl_tls_static_used
))
612 GL(dl_tls_static_used
)
613 = imap
->l_tls_offset
- imap
->l_tls_blocksize
;
614 else if (tls_free_end
< (size_t) imap
->l_tls_offset
)
616 /* We pick the later block. It has a chance to
618 tls_free_end
= imap
->l_tls_offset
;
620 = tls_free_end
- imap
->l_tls_blocksize
;
624 if (tls_free_start
== NO_TLS_OFFSET
)
626 tls_free_start
= imap
->l_tls_firstbyte_offset
;
627 tls_free_end
= (imap
->l_tls_offset
628 + imap
->l_tls_blocksize
);
630 else if (imap
->l_tls_firstbyte_offset
== tls_free_end
)
631 /* Extend the contiguous chunk being reclaimed. */
632 tls_free_end
= imap
->l_tls_offset
+ imap
->l_tls_blocksize
;
633 else if (imap
->l_tls_offset
+ imap
->l_tls_blocksize
635 /* Extend the chunk backwards. */
636 tls_free_start
= imap
->l_tls_firstbyte_offset
;
637 /* This isn't contiguous with the last chunk freed.
638 One of them will be leaked unless we can free
639 one block right away. */
640 else if (imap
->l_tls_offset
+ imap
->l_tls_blocksize
641 == GL(dl_tls_static_used
))
642 GL(dl_tls_static_used
) = imap
->l_tls_firstbyte_offset
;
643 else if (tls_free_end
== GL(dl_tls_static_used
))
645 GL(dl_tls_static_used
) = tls_free_start
;
646 tls_free_start
= imap
->l_tls_firstbyte_offset
;
647 tls_free_end
= imap
->l_tls_offset
+ imap
->l_tls_blocksize
;
649 else if (tls_free_end
< imap
->l_tls_firstbyte_offset
)
651 /* We pick the later block. It has a chance to
653 tls_free_start
= imap
->l_tls_firstbyte_offset
;
654 tls_free_end
= imap
->l_tls_offset
+ imap
->l_tls_blocksize
;
657 # error "Either TLS_TCB_AT_TP or TLS_DTV_AT_TP must be defined"
662 /* Reset unique symbols if forced. */
665 struct unique_sym_table
*tab
= &ns
->_ns_unique_sym_table
;
666 __rtld_lock_lock_recursive (tab
->lock
);
667 struct unique_sym
*entries
= tab
->entries
;
670 size_t idx
, size
= tab
->size
;
671 for (idx
= 0; idx
< size
; ++idx
)
673 /* Clear unique symbol entries that belong to this
675 if (entries
[idx
].name
!= NULL
676 && entries
[idx
].map
== imap
)
678 entries
[idx
].name
= NULL
;
679 entries
[idx
].hashval
= 0;
684 __rtld_lock_unlock_recursive (tab
->lock
);
687 /* We can unmap all the maps at once. We determined the
688 start address and length when we loaded the object and
689 the `munmap' call does the rest. */
692 /* Finally, unlink the data structure and free it. */
694 /* The assert in the (imap->l_prev == NULL) case gives
695 the compiler license to warn that NS points outside
696 the dl_ns array bounds in that case (as nsid != LM_ID_BASE
697 is tantamount to nsid >= DL_NNS). That should be impossible
698 in this configuration, so just assert about it instead. */
699 assert (nsid
== LM_ID_BASE
);
700 assert (imap
->l_prev
!= NULL
);
702 if (imap
->l_prev
== NULL
)
704 assert (nsid
!= LM_ID_BASE
);
705 ns
->_ns_loaded
= imap
->l_next
;
707 /* Update the pointer to the head of the list
708 we leave for debuggers to examine. */
709 r
->r_map
= (void *) ns
->_ns_loaded
;
713 imap
->l_prev
->l_next
= imap
->l_next
;
716 if (imap
->l_next
!= NULL
)
717 imap
->l_next
->l_prev
= imap
->l_prev
;
719 free (imap
->l_versions
);
720 if (imap
->l_origin
!= (char *) -1)
721 free ((char *) imap
->l_origin
);
723 free (imap
->l_reldeps
);
725 /* Print debugging message. */
726 if (__glibc_unlikely (GLRO(dl_debug_mask
) & DL_DEBUG_FILES
))
727 _dl_debug_printf ("\nfile=%s [%lu]; destroying link map\n",
728 imap
->l_name
, imap
->l_ns
);
730 /* This name always is allocated. */
732 /* Remove the list with all the names of the shared object. */
734 struct libname_list
*lnp
= imap
->l_libname
;
737 struct libname_list
*this = lnp
;
739 if (!this->dont_free
)
744 /* Remove the searchlists. */
745 free (imap
->l_initfini
);
747 /* Remove the scope array if we allocated it. */
748 if (imap
->l_scope
!= imap
->l_scope_mem
)
749 free (imap
->l_scope
);
751 if (imap
->l_phdr_allocated
)
752 free ((void *) imap
->l_phdr
);
754 if (imap
->l_rpath_dirs
.dirs
!= (void *) -1)
755 free (imap
->l_rpath_dirs
.dirs
);
756 if (imap
->l_runpath_dirs
.dirs
!= (void *) -1)
757 free (imap
->l_runpath_dirs
.dirs
);
759 /* Clear GL(dl_initfirst) when freeing its link_map memory. */
760 if (imap
== GL(dl_initfirst
))
761 GL(dl_initfirst
) = NULL
;
767 __rtld_lock_unlock_recursive (GL(dl_load_write_lock
));
769 /* If we removed any object which uses TLS bump the generation counter. */
772 if (__glibc_unlikely (++GL(dl_tls_generation
) == 0))
773 _dl_fatal_printf ("TLS generation counter wrapped! Please report as described in "REPORT_BUGS_TO
".\n");
775 if (tls_free_end
== GL(dl_tls_static_used
))
776 GL(dl_tls_static_used
) = tls_free_start
;
780 /* Auditing checkpoint: we have deleted all objects. */
781 if (__glibc_unlikely (do_audit
))
783 struct link_map
*head
= ns
->_ns_loaded
;
784 /* If head is NULL, the namespace has become empty, and the
785 audit interface does not give us a way to signal
786 LA_ACT_CONSISTENT for it because the first loaded module is
787 used to identify the namespace.
789 Furthermore, do not notify auditors of the cleanup of a
790 failed audit module loading attempt. */
791 if (head
!= NULL
&& head
->l_auditing
== 0)
793 struct audit_ifaces
*afct
= GLRO(dl_audit
);
794 for (unsigned int cnt
= 0; cnt
< GLRO(dl_naudit
); ++cnt
)
796 if (afct
->activity
!= NULL
)
798 struct auditstate
*state
= link_map_audit_state (head
, cnt
);
799 afct
->activity (&state
->cookie
, LA_ACT_CONSISTENT
);
808 if (__builtin_expect (ns
->_ns_loaded
== NULL
, 0)
809 && nsid
== GL(dl_nns
) - 1)
812 while (GL(dl_ns
)[GL(dl_nns
) - 1]._ns_loaded
== NULL
);
814 /* Notify the debugger those objects are finalized and gone. */
815 r
->r_state
= RT_CONSISTENT
;
817 LIBC_PROBE (unmap_complete
, 2, nsid
, r
);
819 /* Recheck if we need to retry, release the lock. */
821 if (dl_close_state
== rerun
)
824 dl_close_state
= not_pending
;
829 _dl_close (void *_map
)
831 struct link_map
*map
= _map
;
833 /* We must take the lock to examine the contents of map and avoid
834 concurrent dlopens. */
835 __rtld_lock_lock_recursive (GL(dl_load_lock
));
837 /* At this point we are guaranteed nobody else is touching the list of
838 loaded maps, but a concurrent dlclose might have freed our map
839 before we took the lock. There is no way to detect this (see below)
840 so we proceed assuming this isn't the case. First see whether we
841 can remove the object at all. */
842 if (__glibc_unlikely (map
->l_nodelete_active
))
844 /* Nope. Do nothing. */
845 __rtld_lock_unlock_recursive (GL(dl_load_lock
));
849 /* At present this is an unreliable check except in the case where the
850 caller has recursively called dlclose and we are sure the link map
851 has not been freed. In a non-recursive dlclose the map itself
852 might have been freed and this access is potentially a data race
853 with whatever other use this memory might have now, or worse we
854 might silently corrupt memory if it looks enough like a link map.
855 POSIX has language in dlclose that appears to guarantee that this
856 should be a detectable case and given that dlclose should be threadsafe
857 we need this to be a reliable detection.
858 This is bug 20990. */
859 if (__builtin_expect (map
->l_direct_opencount
, 1) == 0)
861 __rtld_lock_unlock_recursive (GL(dl_load_lock
));
862 _dl_signal_error (0, map
->l_name
, NULL
, N_("shared object not open"));
865 _dl_close_worker (map
, false);
867 __rtld_lock_unlock_recursive (GL(dl_load_lock
));