1 /* Close a shared object opened by `_dl_open'.
2 Copyright (C) 1996-2007, 2009, 2010, 2011 Free Software Foundation, Inc.
3 This file is part of the GNU C Library.
5 The GNU C Library is free software; you can redistribute it and/or
6 modify it under the terms of the GNU Lesser General Public
7 License as published by the Free Software Foundation; either
8 version 2.1 of the License, or (at your option) any later version.
10 The GNU C Library is distributed in the hope that it will be useful,
11 but WITHOUT ANY WARRANTY; without even the implied warranty of
12 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 Lesser General Public License for more details.
15 You should have received a copy of the GNU Lesser General Public
16 License along with the GNU C Library; if not, see
17 <http://www.gnu.org/licenses/>. */
28 #include <bits/libc-lock.h>
30 #include <sys/types.h>
32 #include <sysdep-cancel.h>
36 /* Type of the constructor functions. */
37 typedef void (*fini_t
) (void);
40 /* Special l_idx value used to indicate which objects remain loaded. */
41 #define IDX_STILL_USED -1
44 /* Returns true we an non-empty was found. */
46 remove_slotinfo (size_t idx
, struct dtv_slotinfo_list
*listp
, size_t disp
,
49 if (idx
- disp
>= listp
->len
)
51 if (listp
->next
== NULL
)
53 /* The index is not actually valid in the slotinfo list,
54 because this object was closed before it was fully set
55 up due to some error. */
56 assert (! should_be_there
);
60 if (remove_slotinfo (idx
, listp
->next
, disp
+ listp
->len
,
64 /* No non-empty entry. Search from the end of this element's
66 idx
= disp
+ listp
->len
;
71 struct link_map
*old_map
= listp
->slotinfo
[idx
- disp
].map
;
73 /* The entry might still be in its unused state if we are closing an
74 object that wasn't fully set up. */
75 if (__builtin_expect (old_map
!= NULL
, 1))
77 assert (old_map
->l_tls_modid
== idx
);
79 /* Mark the entry as unused. */
80 listp
->slotinfo
[idx
- disp
].gen
= GL(dl_tls_generation
) + 1;
81 listp
->slotinfo
[idx
- disp
].map
= NULL
;
84 /* If this is not the last currently used entry no need to look
86 if (idx
!= GL(dl_tls_max_dtv_idx
))
90 while (idx
- disp
> (disp
== 0 ? 1 + GL(dl_tls_static_nelem
) : 0))
94 if (listp
->slotinfo
[idx
- disp
].map
!= NULL
)
96 /* Found a new last used index. */
97 GL(dl_tls_max_dtv_idx
) = idx
;
102 /* No non-entry in this list element. */
108 _dl_close_worker (struct link_map
*map
)
110 /* One less direct use. */
111 --map
->l_direct_opencount
;
113 /* If _dl_close is called recursively (some destructor call dlclose),
114 just record that the parent _dl_close will need to do garbage collection
116 static enum { not_pending
, pending
, rerun
} dl_close_state
;
118 if (map
->l_direct_opencount
> 0 || map
->l_type
!= lt_loaded
119 || dl_close_state
!= not_pending
)
121 if (map
->l_direct_opencount
== 0)
123 if (map
->l_type
== lt_loaded
)
124 dl_close_state
= rerun
;
125 else if (map
->l_type
== lt_library
)
127 struct link_map
**oldp
= map
->l_initfini
;
128 map
->l_initfini
= map
->l_orig_initfini
;
129 _dl_scope_free (oldp
);
133 /* There are still references to this object. Do nothing more. */
134 if (__builtin_expect (GLRO(dl_debug_mask
) & DL_DEBUG_FILES
, 0))
135 _dl_debug_printf ("\nclosing file=%s; direct_opencount=%u\n",
136 map
->l_name
, map
->l_direct_opencount
);
141 Lmid_t nsid
= map
->l_ns
;
142 struct link_namespaces
*ns
= &GL(dl_ns
)[nsid
];
145 dl_close_state
= pending
;
147 bool any_tls
= false;
148 const unsigned int nloaded
= ns
->_ns_nloaded
;
151 struct link_map
*maps
[nloaded
];
153 /* Run over the list and assign indexes to the link maps and enter
154 them into the MAPS array. */
156 for (struct link_map
*l
= ns
->_ns_loaded
; l
!= NULL
; l
= l
->l_next
)
162 assert (idx
== nloaded
);
164 /* Prepare the bitmaps. */
165 memset (used
, '\0', sizeof (used
));
166 memset (done
, '\0', sizeof (done
));
168 /* Keep track of the lowest index link map we have covered already. */
170 while (++done_index
< nloaded
)
172 struct link_map
*l
= maps
[done_index
];
174 if (done
[done_index
])
175 /* Already handled. */
178 /* Check whether this object is still used. */
179 if (l
->l_type
== lt_loaded
180 && l
->l_direct_opencount
== 0
181 && (l
->l_flags_1
& DF_1_NODELETE
) == 0
182 && !used
[done_index
])
185 /* We need this object and we handle it now. */
186 done
[done_index
] = 1;
187 used
[done_index
] = 1;
188 /* Signal the object is still needed. */
189 l
->l_idx
= IDX_STILL_USED
;
191 /* Mark all dependencies as used. */
192 if (l
->l_initfini
!= NULL
)
194 struct link_map
**lp
= &l
->l_initfini
[1];
197 if ((*lp
)->l_idx
!= IDX_STILL_USED
)
199 assert ((*lp
)->l_idx
>= 0 && (*lp
)->l_idx
< nloaded
);
201 if (!used
[(*lp
)->l_idx
])
203 used
[(*lp
)->l_idx
] = 1;
204 if ((*lp
)->l_idx
- 1 < done_index
)
205 done_index
= (*lp
)->l_idx
- 1;
212 /* And the same for relocation dependencies. */
213 if (l
->l_reldeps
!= NULL
)
214 for (unsigned int j
= 0; j
< l
->l_reldeps
->act
; ++j
)
216 struct link_map
*jmap
= l
->l_reldeps
->list
[j
];
218 if (jmap
->l_idx
!= IDX_STILL_USED
)
220 assert (jmap
->l_idx
>= 0 && jmap
->l_idx
< nloaded
);
222 if (!used
[jmap
->l_idx
])
224 used
[jmap
->l_idx
] = 1;
225 if (jmap
->l_idx
- 1 < done_index
)
226 done_index
= jmap
->l_idx
- 1;
232 /* Sort the entries. */
233 _dl_sort_fini (maps
, nloaded
, used
, nsid
);
235 /* Call all termination functions at once. */
237 bool do_audit
= GLRO(dl_naudit
) > 0 && !ns
->_ns_loaded
->l_auditing
;
239 bool unload_any
= false;
240 bool scope_mem_left
= false;
241 unsigned int unload_global
= 0;
242 unsigned int first_loaded
= ~0;
243 for (unsigned int i
= 0; i
< nloaded
; ++i
)
245 struct link_map
*imap
= maps
[i
];
247 /* All elements must be in the same namespace. */
248 assert (imap
->l_ns
== nsid
);
252 assert (imap
->l_type
== lt_loaded
253 && (imap
->l_flags_1
& DF_1_NODELETE
) == 0);
255 /* Call its termination function. Do not do it for
256 half-cooked objects. */
257 if (imap
->l_init_called
)
259 /* When debugging print a message first. */
260 if (__builtin_expect (GLRO(dl_debug_mask
) & DL_DEBUG_IMPCALLS
,
262 _dl_debug_printf ("\ncalling fini: %s [%lu]\n\n",
265 if (imap
->l_info
[DT_FINI_ARRAY
] != NULL
)
268 (ElfW(Addr
) *) (imap
->l_addr
269 + imap
->l_info
[DT_FINI_ARRAY
]->d_un
.d_ptr
);
270 unsigned int sz
= (imap
->l_info
[DT_FINI_ARRAYSZ
]->d_un
.d_val
271 / sizeof (ElfW(Addr
)));
274 ((fini_t
) array
[sz
]) ();
277 /* Next try the old-style destructor. */
278 if (imap
->l_info
[DT_FINI
] != NULL
)
279 (*(void (*) (void)) DL_DT_FINI_ADDRESS
280 (imap
, ((void *) imap
->l_addr
281 + imap
->l_info
[DT_FINI
]->d_un
.d_ptr
))) ();
285 /* Auditing checkpoint: we remove an object. */
286 if (__builtin_expect (do_audit
, 0))
288 struct audit_ifaces
*afct
= GLRO(dl_audit
);
289 for (unsigned int cnt
= 0; cnt
< GLRO(dl_naudit
); ++cnt
)
291 if (afct
->objclose
!= NULL
)
292 /* Return value is ignored. */
293 (void) afct
->objclose (&imap
->l_audit
[cnt
].cookie
);
300 /* This object must not be used anymore. */
303 /* We indeed have an object to remove. */
309 /* Remember where the first dynamically loaded object is. */
310 if (i
< first_loaded
)
314 else if (imap
->l_type
== lt_loaded
)
316 struct r_scope_elem
*new_list
= NULL
;
318 if (imap
->l_searchlist
.r_list
== NULL
&& imap
->l_initfini
!= NULL
)
320 /* The object is still used. But one of the objects we are
321 unloading right now is responsible for loading it. If
322 the current object does not have it's own scope yet we
323 have to create one. This has to be done before running
326 To do this count the number of dependencies. */
328 for (cnt
= 1; imap
->l_initfini
[cnt
] != NULL
; ++cnt
)
331 /* We simply reuse the l_initfini list. */
332 imap
->l_searchlist
.r_list
= &imap
->l_initfini
[cnt
+ 1];
333 imap
->l_searchlist
.r_nlist
= cnt
;
335 new_list
= &imap
->l_searchlist
;
338 /* Count the number of scopes which remain after the unload.
339 When we add the local search list count it. Always add
340 one for the terminating NULL pointer. */
341 size_t remain
= (new_list
!= NULL
) + 1;
342 bool removed_any
= false;
343 for (size_t cnt
= 0; imap
->l_scope
[cnt
] != NULL
; ++cnt
)
344 /* This relies on l_scope[] entries being always set either
345 to its own l_symbolic_searchlist address, or some map's
346 l_searchlist address. */
347 if (imap
->l_scope
[cnt
] != &imap
->l_symbolic_searchlist
)
349 struct link_map
*tmap
= (struct link_map
*)
350 ((char *) imap
->l_scope
[cnt
]
351 - offsetof (struct link_map
, l_searchlist
));
352 assert (tmap
->l_ns
== nsid
);
353 if (tmap
->l_idx
== IDX_STILL_USED
)
363 /* Always allocate a new array for the scope. This is
364 necessary since we must be able to determine the last
365 user of the current array. If possible use the link map's
368 struct r_scope_elem
**newp
;
370 #define SCOPE_ELEMS(imap) \
371 (sizeof (imap->l_scope_mem) / sizeof (imap->l_scope_mem[0]))
373 if (imap
->l_scope
!= imap
->l_scope_mem
374 && remain
< SCOPE_ELEMS (imap
))
376 new_size
= SCOPE_ELEMS (imap
);
377 newp
= imap
->l_scope_mem
;
381 new_size
= imap
->l_scope_max
;
382 newp
= (struct r_scope_elem
**)
383 malloc (new_size
* sizeof (struct r_scope_elem
*));
385 _dl_signal_error (ENOMEM
, "dlclose", NULL
,
386 N_("cannot create scope list"));
389 /* Copy over the remaining scope elements. */
391 for (size_t cnt
= 0; imap
->l_scope
[cnt
] != NULL
; ++cnt
)
393 if (imap
->l_scope
[cnt
] != &imap
->l_symbolic_searchlist
)
395 struct link_map
*tmap
= (struct link_map
*)
396 ((char *) imap
->l_scope
[cnt
]
397 - offsetof (struct link_map
, l_searchlist
));
398 if (tmap
->l_idx
!= IDX_STILL_USED
)
400 /* Remove the scope. Or replace with own map's
402 if (new_list
!= NULL
)
404 newp
[remain
++] = new_list
;
411 newp
[remain
++] = imap
->l_scope
[cnt
];
415 struct r_scope_elem
**old
= imap
->l_scope
;
417 imap
->l_scope
= newp
;
419 /* No user anymore, we can free it now. */
420 if (old
!= imap
->l_scope_mem
)
422 if (_dl_scope_free (old
))
423 /* If _dl_scope_free used THREAD_GSCOPE_WAIT (),
424 no need to repeat it. */
425 scope_mem_left
= false;
428 scope_mem_left
= true;
430 imap
->l_scope_max
= new_size
;
432 else if (new_list
!= NULL
)
434 /* We didn't change the scope array, so reset the search
436 imap
->l_searchlist
.r_list
= NULL
;
437 imap
->l_searchlist
.r_nlist
= 0;
440 /* The loader is gone, so mark the object as not having one.
441 Note: l_idx != IDX_STILL_USED -> object will be removed. */
442 if (imap
->l_loader
!= NULL
443 && imap
->l_loader
->l_idx
!= IDX_STILL_USED
)
444 imap
->l_loader
= NULL
;
446 /* Remember where the first dynamically loaded object is. */
447 if (i
< first_loaded
)
452 /* If there are no objects to unload, do nothing further. */
457 /* Auditing checkpoint: we will start deleting objects. */
458 if (__builtin_expect (do_audit
, 0))
460 struct link_map
*head
= ns
->_ns_loaded
;
461 struct audit_ifaces
*afct
= GLRO(dl_audit
);
462 /* Do not call the functions for any auditing object. */
463 if (head
->l_auditing
== 0)
465 for (unsigned int cnt
= 0; cnt
< GLRO(dl_naudit
); ++cnt
)
467 if (afct
->activity
!= NULL
)
468 afct
->activity (&head
->l_audit
[cnt
].cookie
, LA_ACT_DELETE
);
476 /* Notify the debugger we are about to remove some loaded objects. */
477 struct r_debug
*r
= _dl_debug_initialize (0, nsid
);
478 r
->r_state
= RT_DELETE
;
483 /* Some objects are in the global scope list. Remove them. */
484 struct r_scope_elem
*ns_msl
= ns
->_ns_main_searchlist
;
487 unsigned int cnt
= ns_msl
->r_nlist
;
489 while (cnt
> 0 && ns_msl
->r_list
[cnt
- 1]->l_removed
)
492 if (cnt
+ unload_global
== ns_msl
->r_nlist
)
493 /* Speed up removing most recently added objects. */
496 for (i
= 0; i
< cnt
; i
++)
497 if (ns_msl
->r_list
[i
]->l_removed
== 0)
500 ns_msl
->r_list
[j
] = ns_msl
->r_list
[i
];
506 if (!RTLD_SINGLE_THREAD_P
509 || (GL(dl_scope_free_list
) != NULL
510 && GL(dl_scope_free_list
)->count
)))
512 THREAD_GSCOPE_WAIT ();
514 /* Now we can free any queued old scopes. */
515 struct dl_scope_free_list
*fsl
= GL(dl_scope_free_list
);
517 while (fsl
->count
> 0)
518 free (fsl
->list
[--fsl
->count
]);
521 size_t tls_free_start
;
523 tls_free_start
= tls_free_end
= NO_TLS_OFFSET
;
525 /* We modify the list of loaded objects. */
526 __rtld_lock_lock_recursive (GL(dl_load_write_lock
));
528 /* Check each element of the search list to see if all references to
530 for (unsigned int i
= first_loaded
; i
< nloaded
; ++i
)
532 struct link_map
*imap
= maps
[i
];
535 assert (imap
->l_type
== lt_loaded
);
537 /* That was the last reference, and this was a dlopen-loaded
538 object. We can unmap it. */
540 /* Remove the object from the dtv slotinfo array if it uses TLS. */
541 if (__builtin_expect (imap
->l_tls_blocksize
> 0, 0))
545 if (GL(dl_tls_dtv_slotinfo_list
) != NULL
546 && ! remove_slotinfo (imap
->l_tls_modid
,
547 GL(dl_tls_dtv_slotinfo_list
), 0,
548 imap
->l_init_called
))
549 /* All dynamically loaded modules with TLS are unloaded. */
550 GL(dl_tls_max_dtv_idx
) = GL(dl_tls_static_nelem
);
552 if (imap
->l_tls_offset
!= NO_TLS_OFFSET
553 && imap
->l_tls_offset
!= FORCED_DYNAMIC_TLS_OFFSET
)
555 /* Collect a contiguous chunk built from the objects in
556 this search list, going in either direction. When the
557 whole chunk is at the end of the used area then we can
560 if (tls_free_start
== NO_TLS_OFFSET
561 || (size_t) imap
->l_tls_offset
== tls_free_start
)
563 /* Extend the contiguous chunk being reclaimed. */
565 = imap
->l_tls_offset
- imap
->l_tls_blocksize
;
567 if (tls_free_end
== NO_TLS_OFFSET
)
568 tls_free_end
= imap
->l_tls_offset
;
570 else if (imap
->l_tls_offset
- imap
->l_tls_blocksize
572 /* Extend the chunk backwards. */
573 tls_free_end
= imap
->l_tls_offset
;
576 /* This isn't contiguous with the last chunk freed.
577 One of them will be leaked unless we can free
578 one block right away. */
579 if (tls_free_end
== GL(dl_tls_static_used
))
581 GL(dl_tls_static_used
) = tls_free_start
;
582 tls_free_end
= imap
->l_tls_offset
;
584 = tls_free_end
- imap
->l_tls_blocksize
;
586 else if ((size_t) imap
->l_tls_offset
587 == GL(dl_tls_static_used
))
588 GL(dl_tls_static_used
)
589 = imap
->l_tls_offset
- imap
->l_tls_blocksize
;
590 else if (tls_free_end
< (size_t) imap
->l_tls_offset
)
592 /* We pick the later block. It has a chance to
594 tls_free_end
= imap
->l_tls_offset
;
596 = tls_free_end
- imap
->l_tls_blocksize
;
600 if (tls_free_start
== NO_TLS_OFFSET
)
602 tls_free_start
= imap
->l_tls_firstbyte_offset
;
603 tls_free_end
= (imap
->l_tls_offset
604 + imap
->l_tls_blocksize
);
606 else if (imap
->l_tls_firstbyte_offset
== tls_free_end
)
607 /* Extend the contiguous chunk being reclaimed. */
608 tls_free_end
= imap
->l_tls_offset
+ imap
->l_tls_blocksize
;
609 else if (imap
->l_tls_offset
+ imap
->l_tls_blocksize
611 /* Extend the chunk backwards. */
612 tls_free_start
= imap
->l_tls_firstbyte_offset
;
613 /* This isn't contiguous with the last chunk freed.
614 One of them will be leaked unless we can free
615 one block right away. */
616 else if (imap
->l_tls_offset
+ imap
->l_tls_blocksize
617 == GL(dl_tls_static_used
))
618 GL(dl_tls_static_used
) = imap
->l_tls_firstbyte_offset
;
619 else if (tls_free_end
== GL(dl_tls_static_used
))
621 GL(dl_tls_static_used
) = tls_free_start
;
622 tls_free_start
= imap
->l_tls_firstbyte_offset
;
623 tls_free_end
= imap
->l_tls_offset
+ imap
->l_tls_blocksize
;
625 else if (tls_free_end
< imap
->l_tls_firstbyte_offset
)
627 /* We pick the later block. It has a chance to
629 tls_free_start
= imap
->l_tls_firstbyte_offset
;
630 tls_free_end
= imap
->l_tls_offset
+ imap
->l_tls_blocksize
;
633 # error "Either TLS_TCB_AT_TP or TLS_DTV_AT_TP must be defined"
638 /* We can unmap all the maps at once. We determined the
639 start address and length when we loaded the object and
640 the `munmap' call does the rest. */
643 /* Finally, unlink the data structure and free it. */
644 if (imap
->l_prev
!= NULL
)
645 imap
->l_prev
->l_next
= imap
->l_next
;
649 assert (nsid
!= LM_ID_BASE
);
651 ns
->_ns_loaded
= imap
->l_next
;
655 if (imap
->l_next
!= NULL
)
656 imap
->l_next
->l_prev
= imap
->l_prev
;
658 free (imap
->l_versions
);
659 if (imap
->l_origin
!= (char *) -1)
660 free ((char *) imap
->l_origin
);
662 free (imap
->l_reldeps
);
664 /* Print debugging message. */
665 if (__builtin_expect (GLRO(dl_debug_mask
) & DL_DEBUG_FILES
, 0))
666 _dl_debug_printf ("\nfile=%s [%lu]; destroying link map\n",
667 imap
->l_name
, imap
->l_ns
);
669 /* This name always is allocated. */
671 /* Remove the list with all the names of the shared object. */
673 struct libname_list
*lnp
= imap
->l_libname
;
676 struct libname_list
*this = lnp
;
678 if (!this->dont_free
)
683 /* Remove the searchlists. */
684 free (imap
->l_initfini
);
686 /* Remove the scope array if we allocated it. */
687 if (imap
->l_scope
!= imap
->l_scope_mem
)
688 free (imap
->l_scope
);
690 if (imap
->l_phdr_allocated
)
691 free ((void *) imap
->l_phdr
);
693 if (imap
->l_rpath_dirs
.dirs
!= (void *) -1)
694 free (imap
->l_rpath_dirs
.dirs
);
695 if (imap
->l_runpath_dirs
.dirs
!= (void *) -1)
696 free (imap
->l_runpath_dirs
.dirs
);
702 __rtld_lock_unlock_recursive (GL(dl_load_write_lock
));
704 /* If we removed any object which uses TLS bump the generation counter. */
707 if (__builtin_expect (++GL(dl_tls_generation
) == 0, 0))
708 _dl_fatal_printf ("TLS generation counter wrapped! Please report as described in <http://www.gnu.org/software/libc/bugs.html>.\n");
710 if (tls_free_end
== GL(dl_tls_static_used
))
711 GL(dl_tls_static_used
) = tls_free_start
;
715 /* Auditing checkpoint: we have deleted all objects. */
716 if (__builtin_expect (do_audit
, 0))
718 struct link_map
*head
= ns
->_ns_loaded
;
719 /* Do not call the functions for any auditing object. */
720 if (head
->l_auditing
== 0)
722 struct audit_ifaces
*afct
= GLRO(dl_audit
);
723 for (unsigned int cnt
= 0; cnt
< GLRO(dl_naudit
); ++cnt
)
725 if (afct
->activity
!= NULL
)
726 afct
->activity (&head
->l_audit
[cnt
].cookie
, LA_ACT_CONSISTENT
);
734 if (__builtin_expect (ns
->_ns_loaded
== NULL
, 0)
735 && nsid
== GL(dl_nns
) - 1)
744 while (GL(dl_ns
)[GL(dl_nns
) - 1]._ns_loaded
== NULL
);
746 /* Notify the debugger those objects are finalized and gone. */
747 r
->r_state
= RT_CONSISTENT
;
750 /* Recheck if we need to retry, release the lock. */
752 if (dl_close_state
== rerun
)
755 dl_close_state
= not_pending
;
760 _dl_close (void *_map
)
762 struct link_map
*map
= _map
;
764 /* First see whether we can remove the object at all. */
765 if (__builtin_expect (map
->l_flags_1
& DF_1_NODELETE
, 0))
767 assert (map
->l_init_called
);
768 /* Nope. Do nothing. */
772 if (__builtin_expect (map
->l_direct_opencount
, 1) == 0)
773 GLRO(dl_signal_error
) (0, map
->l_name
, NULL
, N_("shared object not open"));
775 /* Acquire the lock. */
776 __rtld_lock_lock_recursive (GL(dl_load_lock
));
778 _dl_close_worker (map
);
780 __rtld_lock_unlock_recursive (GL(dl_load_lock
));