1 /* Close a shared object opened by `_dl_open'.
2 Copyright (C) 1996-2002, 2003, 2004, 2005 Free Software Foundation, Inc.
3 This file is part of the GNU C Library.
5 The GNU C Library is free software; you can redistribute it and/or
6 modify it under the terms of the GNU Lesser General Public
7 License as published by the Free Software Foundation; either
8 version 2.1 of the License, or (at your option) any later version.
10 The GNU C Library is distributed in the hope that it will be useful,
11 but WITHOUT ANY WARRANTY; without even the implied warranty of
12 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 Lesser General Public License for more details.
15 You should have received a copy of the GNU Lesser General Public
16 License along with the GNU C Library; if not, write to the Free
17 Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
28 #include <bits/libc-lock.h>
30 #include <sys/types.h>
34 /* Type of the constructor functions. */
35 typedef void (*fini_t
) (void);
39 /* Returns true we an non-empty was found. */
41 remove_slotinfo (size_t idx
, struct dtv_slotinfo_list
*listp
, size_t disp
,
44 if (idx
- disp
>= listp
->len
)
46 if (listp
->next
== NULL
)
48 /* The index is not actually valid in the slotinfo list,
49 because this object was closed before it was fully set
50 up due to some error. */
51 assert (! should_be_there
);
55 if (remove_slotinfo (idx
, listp
->next
, disp
+ listp
->len
,
59 /* No non-empty entry. Search from the end of this element's
61 idx
= disp
+ listp
->len
;
66 struct link_map
*old_map
= listp
->slotinfo
[idx
- disp
].map
;
68 /* The entry might still be in its unused state if we are closing an
69 object that wasn't fully set up. */
70 if (__builtin_expect (old_map
!= NULL
, 1))
72 assert (old_map
->l_tls_modid
== idx
);
74 /* Mark the entry as unused. */
75 listp
->slotinfo
[idx
- disp
].gen
= GL(dl_tls_generation
) + 1;
76 listp
->slotinfo
[idx
- disp
].map
= NULL
;
79 /* If this is not the last currently used entry no need to look
81 if (idx
!= GL(dl_tls_max_dtv_idx
))
85 while (idx
- disp
> (disp
== 0 ? 1 + GL(dl_tls_static_nelem
) : 0))
89 if (listp
->slotinfo
[idx
- disp
].map
!= NULL
)
91 /* Found a new last used index. */
92 GL(dl_tls_max_dtv_idx
) = idx
;
97 /* No non-entry in this list element. */
104 _dl_close (void *_map
)
106 struct link_map
*map
= _map
;
107 Lmid_t ns
= map
->l_ns
;
109 /* First see whether we can remove the object at all. */
110 if (__builtin_expect (map
->l_flags_1
& DF_1_NODELETE
, 0)
111 && map
->l_init_called
)
112 /* Nope. Do nothing. */
115 if (__builtin_expect (map
->l_direct_opencount
, 1) == 0)
116 GLRO(dl_signal_error
) (0, map
->l_name
, NULL
, N_("shared object not open"));
118 /* Acquire the lock. */
119 __rtld_lock_lock_recursive (GL(dl_load_lock
));
121 /* One less direct use. */
122 --map
->l_direct_opencount
;
124 /* If _dl_close is called recursively (some destructor call dlclose),
125 just record that the parent _dl_close will need to do garbage collection
127 static enum { not_pending
, pending
, rerun
} dl_close_state
;
129 if (map
->l_direct_opencount
> 0 || map
->l_type
!= lt_loaded
130 || dl_close_state
!= not_pending
)
132 if (map
->l_direct_opencount
== 0 && map
->l_type
== lt_loaded
)
133 dl_close_state
= rerun
;
135 /* There are still references to this object. Do nothing more. */
136 if (__builtin_expect (GLRO(dl_debug_mask
) & DL_DEBUG_FILES
, 0))
137 _dl_debug_printf ("\nclosing file=%s; direct_opencount=%u\n",
138 map
->l_name
, map
->l_direct_opencount
);
140 __rtld_lock_unlock_recursive (GL(dl_load_lock
));
145 dl_close_state
= pending
;
148 bool any_tls
= false;
150 const unsigned int nloaded
= GL(dl_ns
)[ns
]._ns_nloaded
;
153 struct link_map
*maps
[nloaded
];
155 /* Run over the list and assign indexes to the link maps and enter
156 them into the MAPS array. */
158 for (struct link_map
*l
= GL(dl_ns
)[ns
]._ns_loaded
; l
!= NULL
; l
= l
->l_next
)
164 assert (idx
== nloaded
);
166 /* Prepare the bitmaps. */
167 memset (used
, '\0', sizeof (used
));
168 memset (done
, '\0', sizeof (done
));
170 /* Keep track of the lowest index link map we have covered already. */
172 while (++done_index
< nloaded
)
174 struct link_map
*l
= maps
[done_index
];
176 if (done
[done_index
])
177 /* Already handled. */
180 /* Check whether this object is still used. */
181 if (l
->l_type
== lt_loaded
182 && l
->l_direct_opencount
== 0
183 && (l
->l_flags_1
& DF_1_NODELETE
) == 0
184 && !used
[done_index
])
187 /* We need this object and we handle it now. */
188 done
[done_index
] = 1;
189 used
[done_index
] = 1;
190 /* Signal the object is still needed. */
193 /* Mark all dependencies as used. */
194 if (l
->l_initfini
!= NULL
)
196 struct link_map
**lp
= &l
->l_initfini
[1];
199 if ((*lp
)->l_idx
!= -1)
201 assert ((*lp
)->l_idx
>= 0 && (*lp
)->l_idx
< nloaded
);
203 if (!used
[(*lp
)->l_idx
])
205 used
[(*lp
)->l_idx
] = 1;
206 if ((*lp
)->l_idx
- 1 < done_index
)
207 done_index
= (*lp
)->l_idx
- 1;
214 /* And the same for relocation dependencies. */
215 if (l
->l_reldeps
!= NULL
)
216 for (unsigned int j
= 0; j
< l
->l_reldepsact
; ++j
)
218 struct link_map
*jmap
= l
->l_reldeps
[j
];
220 if (jmap
->l_idx
!= -1)
222 assert (jmap
->l_idx
>= 0 && jmap
->l_idx
< nloaded
);
224 if (!used
[jmap
->l_idx
])
226 used
[jmap
->l_idx
] = 1;
227 if (jmap
->l_idx
- 1 < done_index
)
228 done_index
= jmap
->l_idx
- 1;
234 /* Sort the entries. */
235 _dl_sort_fini (GL(dl_ns
)[ns
]._ns_loaded
, maps
, nloaded
, used
, ns
);
237 /* Call all termination functions at once. */
239 bool do_audit
= GLRO(dl_naudit
) > 0 && !GL(dl_ns
)[ns
]._ns_loaded
->l_auditing
;
241 bool unload_any
= false;
242 unsigned int first_loaded
= ~0;
243 for (i
= 0; i
< nloaded
; ++i
)
245 struct link_map
*imap
= maps
[i
];
247 /* All elements must be in the same namespace. */
248 assert (imap
->l_ns
== ns
);
252 assert (imap
->l_type
== lt_loaded
253 && (imap
->l_flags_1
& DF_1_NODELETE
) == 0);
255 /* Call its termination function. Do not do it for
256 half-cooked objects. */
257 if (imap
->l_init_called
)
259 /* When debugging print a message first. */
260 if (__builtin_expect (GLRO(dl_debug_mask
) & DL_DEBUG_IMPCALLS
,
262 _dl_debug_printf ("\ncalling fini: %s [%lu]\n\n",
265 if (imap
->l_info
[DT_FINI_ARRAY
] != NULL
)
268 (ElfW(Addr
) *) (imap
->l_addr
269 + imap
->l_info
[DT_FINI_ARRAY
]->d_un
.d_ptr
);
270 unsigned int sz
= (imap
->l_info
[DT_FINI_ARRAYSZ
]->d_un
.d_val
271 / sizeof (ElfW(Addr
)));
274 ((fini_t
) array
[sz
]) ();
277 /* Next try the old-style destructor. */
278 if (imap
->l_info
[DT_FINI
] != NULL
)
279 (*(void (*) (void)) DL_DT_FINI_ADDRESS
280 (imap
, ((void *) imap
->l_addr
281 + imap
->l_info
[DT_FINI
]->d_un
.d_ptr
))) ();
285 /* Auditing checkpoint: we have a new object. */
286 if (__builtin_expect (do_audit
, 0))
288 struct audit_ifaces
*afct
= GLRO(dl_audit
);
289 for (unsigned int cnt
= 0; cnt
< GLRO(dl_naudit
); ++cnt
)
291 if (afct
->objclose
!= NULL
)
292 /* Return value is ignored. */
293 (void) afct
->objclose (&imap
->l_audit
[cnt
].cookie
);
300 /* This object must not be used anymore. */
303 /* We indeed have an object to remove. */
306 /* Remember where the first dynamically loaded object is. */
307 if (i
< first_loaded
)
311 else if (imap
->l_type
== lt_loaded
)
313 if (imap
->l_searchlist
.r_list
== NULL
314 && imap
->l_initfini
!= NULL
)
316 /* The object is still used. But one of the objects we are
317 unloading right now is responsible for loading it. If
318 the current object does not have it's own scope yet we
319 have to create one. This has to be done before running
322 To do this count the number of dependencies. */
324 for (cnt
= 1; imap
->l_initfini
[cnt
] != NULL
; ++cnt
)
327 /* We simply reuse the l_initfini list. */
328 imap
->l_searchlist
.r_list
= &imap
->l_initfini
[cnt
+ 1];
329 imap
->l_searchlist
.r_nlist
= cnt
;
331 for (cnt
= 0; imap
->l_scope
[cnt
] != NULL
; ++cnt
)
332 /* This relies on l_scope[] entries being always set either
333 to its own l_symbolic_searchlist address, or some other map's
334 l_searchlist address. */
335 if (imap
->l_scope
[cnt
] != &imap
->l_symbolic_searchlist
)
337 struct link_map
*tmap
;
339 tmap
= (struct link_map
*) ((char *) imap
->l_scope
[cnt
]
340 - offsetof (struct link_map
,
342 assert (tmap
->l_ns
== ns
);
343 if (tmap
->l_idx
!= -1)
345 imap
->l_scope
[cnt
] = &imap
->l_searchlist
;
351 /* The loader is gone, so mark the object as not having one.
352 Note: l_idx != -1 -> object will be removed. */
353 if (imap
->l_loader
!= NULL
&& imap
->l_loader
->l_idx
!= -1)
354 imap
->l_loader
= NULL
;
356 /* Remember where the first dynamically loaded object is. */
357 if (i
< first_loaded
)
362 /* If there are no objects to unload, do nothing further. */
367 /* Auditing checkpoint: we will start deleting objects. */
368 if (__builtin_expect (do_audit
, 0))
370 struct link_map
*head
= GL(dl_ns
)[ns
]._ns_loaded
;
371 struct audit_ifaces
*afct
= GLRO(dl_audit
);
372 /* Do not call the functions for any auditing object. */
373 if (head
->l_auditing
== 0)
375 for (unsigned int cnt
= 0; cnt
< GLRO(dl_naudit
); ++cnt
)
377 if (afct
->activity
!= NULL
)
378 afct
->activity (&head
->l_audit
[cnt
].cookie
, LA_ACT_DELETE
);
386 /* Notify the debugger we are about to remove some loaded objects. */
387 struct r_debug
*r
= _dl_debug_initialize (0, ns
);
388 r
->r_state
= RT_DELETE
;
392 size_t tls_free_start
;
394 tls_free_start
= tls_free_end
= NO_TLS_OFFSET
;
397 /* Check each element of the search list to see if all references to
399 for (i
= first_loaded
; i
< nloaded
; ++i
)
401 struct link_map
*imap
= maps
[i
];
404 assert (imap
->l_type
== lt_loaded
);
406 /* That was the last reference, and this was a dlopen-loaded
407 object. We can unmap it. */
408 if (__builtin_expect (imap
->l_global
, 0))
410 /* This object is in the global scope list. Remove it. */
411 unsigned int cnt
= GL(dl_ns
)[ns
]._ns_main_searchlist
->r_nlist
;
415 while (GL(dl_ns
)[ns
]._ns_main_searchlist
->r_list
[cnt
] != imap
);
417 /* The object was already correctly registered. */
419 < GL(dl_ns
)[ns
]._ns_main_searchlist
->r_nlist
)
420 GL(dl_ns
)[ns
]._ns_main_searchlist
->r_list
[cnt
- 1]
421 = GL(dl_ns
)[ns
]._ns_main_searchlist
->r_list
[cnt
];
423 --GL(dl_ns
)[ns
]._ns_main_searchlist
->r_nlist
;
427 /* Remove the object from the dtv slotinfo array if it uses TLS. */
428 if (__builtin_expect (imap
->l_tls_blocksize
> 0, 0))
432 if (GL(dl_tls_dtv_slotinfo_list
) != NULL
433 && ! remove_slotinfo (imap
->l_tls_modid
,
434 GL(dl_tls_dtv_slotinfo_list
), 0,
435 imap
->l_init_called
))
436 /* All dynamically loaded modules with TLS are unloaded. */
437 GL(dl_tls_max_dtv_idx
) = GL(dl_tls_static_nelem
);
439 if (imap
->l_tls_offset
!= NO_TLS_OFFSET
)
441 /* Collect a contiguous chunk built from the objects in
442 this search list, going in either direction. When the
443 whole chunk is at the end of the used area then we can
446 if (tls_free_start
== NO_TLS_OFFSET
447 || (size_t) imap
->l_tls_offset
== tls_free_start
)
449 /* Extend the contiguous chunk being reclaimed. */
451 = imap
->l_tls_offset
- imap
->l_tls_blocksize
;
453 if (tls_free_end
== NO_TLS_OFFSET
)
454 tls_free_end
= imap
->l_tls_offset
;
456 else if (imap
->l_tls_offset
- imap
->l_tls_blocksize
458 /* Extend the chunk backwards. */
459 tls_free_end
= imap
->l_tls_offset
;
462 /* This isn't contiguous with the last chunk freed.
463 One of them will be leaked unless we can free
464 one block right away. */
465 if (tls_free_end
== GL(dl_tls_static_used
))
467 GL(dl_tls_static_used
) = tls_free_start
;
468 tls_free_end
= imap
->l_tls_offset
;
470 = tls_free_end
- imap
->l_tls_blocksize
;
472 else if ((size_t) imap
->l_tls_offset
473 == GL(dl_tls_static_used
))
474 GL(dl_tls_static_used
)
475 = imap
->l_tls_offset
- imap
->l_tls_blocksize
;
476 else if (tls_free_end
< (size_t) imap
->l_tls_offset
)
478 /* We pick the later block. It has a chance to
480 tls_free_end
= imap
->l_tls_offset
;
482 = tls_free_end
- imap
->l_tls_blocksize
;
486 if ((size_t) imap
->l_tls_offset
== tls_free_end
)
487 /* Extend the contiguous chunk being reclaimed. */
488 tls_free_end
-= imap
->l_tls_blocksize
;
489 else if (imap
->l_tls_offset
+ imap
->l_tls_blocksize
491 /* Extend the chunk backwards. */
492 tls_free_start
= imap
->l_tls_offset
;
495 /* This isn't contiguous with the last chunk freed.
496 One of them will be leaked. */
497 if (tls_free_end
== GL(dl_tls_static_used
))
498 GL(dl_tls_static_used
) = tls_free_start
;
499 tls_free_start
= imap
->l_tls_offset
;
500 tls_free_end
= tls_free_start
+ imap
->l_tls_blocksize
;
503 # error "Either TLS_TCB_AT_TP or TLS_DTV_AT_TP must be defined"
509 /* We can unmap all the maps at once. We determined the
510 start address and length when we loaded the object and
511 the `munmap' call does the rest. */
514 /* Finally, unlink the data structure and free it. */
515 if (imap
->l_prev
!= NULL
)
516 imap
->l_prev
->l_next
= imap
->l_next
;
520 assert (ns
!= LM_ID_BASE
);
522 GL(dl_ns
)[ns
]._ns_loaded
= imap
->l_next
;
525 --GL(dl_ns
)[ns
]._ns_nloaded
;
526 if (imap
->l_next
!= NULL
)
527 imap
->l_next
->l_prev
= imap
->l_prev
;
529 free (imap
->l_versions
);
530 if (imap
->l_origin
!= (char *) -1)
531 free ((char *) imap
->l_origin
);
533 free (imap
->l_reldeps
);
535 /* Print debugging message. */
536 if (__builtin_expect (GLRO(dl_debug_mask
) & DL_DEBUG_FILES
, 0))
537 _dl_debug_printf ("\nfile=%s [%lu]; destroying link map\n",
538 imap
->l_name
, imap
->l_ns
);
540 /* This name always is allocated. */
542 /* Remove the list with all the names of the shared object. */
544 struct libname_list
*lnp
= imap
->l_libname
;
547 struct libname_list
*this = lnp
;
549 if (!this->dont_free
)
554 /* Remove the searchlists. */
555 free (imap
->l_initfini
);
557 /* Remove the scope array if we allocated it. */
558 if (imap
->l_scope
!= imap
->l_scope_mem
)
559 free (imap
->l_scope
);
561 if (imap
->l_phdr_allocated
)
562 free ((void *) imap
->l_phdr
);
564 if (imap
->l_rpath_dirs
.dirs
!= (void *) -1)
565 free (imap
->l_rpath_dirs
.dirs
);
566 if (imap
->l_runpath_dirs
.dirs
!= (void *) -1)
567 free (imap
->l_runpath_dirs
.dirs
);
574 /* If we removed any object which uses TLS bump the generation counter. */
577 if (__builtin_expect (++GL(dl_tls_generation
) == 0, 0))
578 _dl_fatal_printf ("TLS generation counter wrapped! Please report as described in <http://www.gnu.org/software/libc/bugs.html>.\n");
580 if (tls_free_end
== GL(dl_tls_static_used
))
581 GL(dl_tls_static_used
) = tls_free_start
;
586 /* Auditing checkpoint: we have deleted all objects. */
587 if (__builtin_expect (do_audit
, 0))
589 struct link_map
*head
= GL(dl_ns
)[ns
]._ns_loaded
;
590 /* Do not call the functions for any auditing object. */
591 if (head
->l_auditing
== 0)
593 struct audit_ifaces
*afct
= GLRO(dl_audit
);
594 for (unsigned int cnt
= 0; cnt
< GLRO(dl_naudit
); ++cnt
)
596 if (afct
->activity
!= NULL
)
597 afct
->activity (&head
->l_audit
[cnt
].cookie
, LA_ACT_CONSISTENT
);
605 /* Notify the debugger those objects are finalized and gone. */
606 r
->r_state
= RT_CONSISTENT
;
609 /* Recheck if we need to retry, release the lock. */
611 if (dl_close_state
== rerun
)
614 dl_close_state
= not_pending
;
615 __rtld_lock_unlock_recursive (GL(dl_load_lock
));
620 static bool __libc_freeres_fn_section
621 free_slotinfo (struct dtv_slotinfo_list
**elemp
)
626 /* Nothing here, all is removed (or there never was anything). */
629 if (!free_slotinfo (&(*elemp
)->next
))
630 /* We cannot free the entry. */
633 /* That cleared our next pointer for us. */
635 for (cnt
= 0; cnt
< (*elemp
)->len
; ++cnt
)
636 if ((*elemp
)->slotinfo
[cnt
].map
!= NULL
)
640 /* We can remove the list element. */
649 libc_freeres_fn (free_mem
)
651 for (Lmid_t ns
= 0; ns
< DL_NNS
; ++ns
)
652 if (__builtin_expect (GL(dl_ns
)[ns
]._ns_global_scope_alloc
, 0) != 0
653 && (GL(dl_ns
)[ns
]._ns_main_searchlist
->r_nlist
654 // XXX Check whether we need NS-specific initial_searchlist
655 == GLRO(dl_initial_searchlist
).r_nlist
))
657 /* All object dynamically loaded by the program are unloaded. Free
658 the memory allocated for the global scope variable. */
659 struct link_map
**old
= GL(dl_ns
)[ns
]._ns_main_searchlist
->r_list
;
661 /* Put the old map in. */
662 GL(dl_ns
)[ns
]._ns_main_searchlist
->r_list
663 // XXX Check whether we need NS-specific initial_searchlist
664 = GLRO(dl_initial_searchlist
).r_list
;
665 /* Signal that the original map is used. */
666 GL(dl_ns
)[ns
]._ns_global_scope_alloc
= 0;
668 /* Now free the old map. */
673 if (USE___THREAD
|| GL(dl_tls_dtv_slotinfo_list
) != NULL
)
675 /* Free the memory allocated for the dtv slotinfo array. We can do
676 this only if all modules which used this memory are unloaded. */
678 if (GL(dl_initial_dtv
) == NULL
)
679 /* There was no initial TLS setup, it was set up later when
680 it used the normal malloc. */
681 free_slotinfo (&GL(dl_tls_dtv_slotinfo_list
));
684 /* The first element of the list does not have to be deallocated.
685 It was allocated in the dynamic linker (i.e., with a different
686 malloc), and in the static library it's in .bss space. */
687 free_slotinfo (&GL(dl_tls_dtv_slotinfo_list
)->next
);