1 /* Close a shared object opened by `_dl_open'.
2 Copyright (C) 1996-2002, 2003, 2004 Free Software Foundation, Inc.
3 This file is part of the GNU C Library.
5 The GNU C Library is free software; you can redistribute it and/or
6 modify it under the terms of the GNU Lesser General Public
7 License as published by the Free Software Foundation; either
8 version 2.1 of the License, or (at your option) any later version.
10 The GNU C Library is distributed in the hope that it will be useful,
11 but WITHOUT ANY WARRANTY; without even the implied warranty of
12 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 Lesser General Public License for more details.
15 You should have received a copy of the GNU Lesser General Public
16 License along with the GNU C Library; if not, write to the Free
17 Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
26 #include <bits/libc-lock.h>
28 #include <sys/types.h>
32 /* Type of the constructor functions. */
33 typedef void (*fini_t
) (void);
37 /* Returns true we an non-empty was found. */
39 remove_slotinfo (size_t idx
, struct dtv_slotinfo_list
*listp
, size_t disp
,
42 if (idx
- disp
>= listp
->len
)
44 if (listp
->next
== NULL
)
46 /* The index is not actually valid in the slotinfo list,
47 because this object was closed before it was fully set
48 up due to some error. */
49 assert (! should_be_there
);
53 if (remove_slotinfo (idx
, listp
->next
, disp
+ listp
->len
,
57 /* No non-empty entry. Search from the end of this element's
59 idx
= disp
+ listp
->len
;
64 struct link_map
*old_map
= listp
->slotinfo
[idx
- disp
].map
;
66 /* The entry might still be in its unused state if we are closing an
67 object that wasn't fully set up. */
68 if (__builtin_expect (old_map
!= NULL
, 1))
70 assert (old_map
->l_tls_modid
== idx
);
72 /* Mark the entry as unused. */
73 listp
->slotinfo
[idx
- disp
].gen
= GL(dl_tls_generation
) + 1;
74 listp
->slotinfo
[idx
- disp
].map
= NULL
;
77 /* If this is not the last currently used entry no need to look
79 if (idx
!= GL(dl_tls_max_dtv_idx
))
83 while (idx
- disp
> (disp
== 0 ? 1 + GL(dl_tls_static_nelem
) : 0))
87 if (listp
->slotinfo
[idx
- disp
].map
!= NULL
)
89 /* Found a new last used index. */
90 GL(dl_tls_max_dtv_idx
) = idx
;
95 /* No non-entry in this list element. */
103 _dl_close (void *_map
)
107 struct link_map
**rellist
;
108 unsigned int nrellist
;
109 unsigned int nhandled
;
110 struct reldep_list
*next
;
113 struct link_map
**list
;
114 struct link_map
*map
= _map
;
116 unsigned int *new_opencount
;
118 bool any_tls
= false;
121 /* First see whether we can remove the object at all. */
122 if (__builtin_expect (map
->l_flags_1
& DF_1_NODELETE
, 0)
123 && map
->l_init_called
)
124 /* Nope. Do nothing. */
127 if (__builtin_expect (map
->l_opencount
, 1) == 0)
128 GLRO(dl_signal_error
) (0, map
->l_name
, NULL
, N_("shared object not open"));
130 /* Acquire the lock. */
131 __rtld_lock_lock_recursive (GL(dl_load_lock
));
133 /* One less direct use. */
134 assert (map
->l_direct_opencount
> 0);
135 --map
->l_direct_opencount
;
137 /* Decrement the reference count. */
138 if (map
->l_opencount
> 1 || map
->l_type
!= lt_loaded
)
140 /* There are still references to this object. Do nothing more. */
141 if (__builtin_expect (GLRO(dl_debug_mask
) & DL_DEBUG_FILES
, 0))
142 GLRO(dl_debug_printf
) ("\nclosing file=%s; opencount == %u\n",
143 map
->l_name
, map
->l_opencount
);
145 /* Decrement the object's reference counter, not the dependencies'. */
148 /* If the direct use counter reaches zero we have to decrement
149 all the dependencies' usage counter. */
150 if (map
->l_direct_opencount
== 0)
151 for (i
= 1; i
< map
->l_searchlist
.r_nlist
; ++i
)
152 --map
->l_searchlist
.r_list
[i
]->l_opencount
;
154 __rtld_lock_unlock_recursive (GL(dl_load_lock
));
158 list
= map
->l_initfini
;
160 /* Compute the new l_opencount values. */
161 i
= map
->l_searchlist
.r_nlist
;
162 if (__builtin_expect (i
== 0, 0))
163 /* This can happen if we handle relocation dependencies for an
164 object which wasn't loaded directly. */
165 for (i
= 1; list
[i
] != NULL
; ++i
)
168 unsigned int nopencount
= i
;
169 new_opencount
= (unsigned int *) alloca (i
* sizeof (unsigned int));
171 for (i
= 0; list
[i
] != NULL
; ++i
)
174 new_opencount
[i
] = list
[i
]->l_opencount
;
177 for (i
= 1; list
[i
] != NULL
; ++i
)
178 if ((list
[i
]->l_flags_1
& DF_1_NODELETE
) == 0
179 /* Decrement counter. */
180 && (assert (new_opencount
[i
] > 0), --new_opencount
[i
] == 0))
182 void mark_removed (struct link_map
*remmap
)
184 /* Test whether this object was also loaded directly. */
185 if (remmap
->l_searchlist
.r_list
!= NULL
186 && remmap
->l_direct_opencount
> 0)
188 /* In this case we have to decrement all the dependencies of
189 this object. They are all in MAP's dependency list. */
191 struct link_map
**dep_list
= remmap
->l_searchlist
.r_list
;
193 for (j
= 1; j
< remmap
->l_searchlist
.r_nlist
; ++j
)
194 if (! (dep_list
[j
]->l_flags_1
& DF_1_NODELETE
)
195 || ! dep_list
[j
]->l_init_called
)
197 assert (dep_list
[j
]->l_idx
< map
->l_searchlist
.r_nlist
);
198 assert (new_opencount
[dep_list
[j
]->l_idx
] > 0);
199 if (--new_opencount
[dep_list
[j
]->l_idx
] == 0)
201 assert (dep_list
[j
]->l_type
== lt_loaded
);
202 mark_removed (dep_list
[j
]);
207 if (remmap
->l_reldeps
!= NULL
)
210 for (j
= 0; j
< remmap
->l_reldepsact
; ++j
)
212 struct link_map
*depmap
= remmap
->l_reldeps
[j
];
214 /* Find out whether this object is in our list. */
215 if (depmap
->l_idx
< nopencount
216 && list
[depmap
->l_idx
] == depmap
)
218 /* Yes, it is. If is has a search list, make a
219 recursive call to handle this. */
220 if (depmap
->l_searchlist
.r_list
!= NULL
)
222 assert (new_opencount
[depmap
->l_idx
] > 0);
223 if (--new_opencount
[depmap
->l_idx
] == 0)
225 /* This one is now gone, too. */
226 assert (depmap
->l_type
== lt_loaded
);
227 mark_removed (depmap
);
232 /* Otherwise we have to handle the dependency
233 deallocation here. */
235 for (k
= 0; depmap
->l_initfini
[k
] != NULL
; ++k
)
237 struct link_map
*rl
= depmap
->l_initfini
[k
];
239 if (rl
->l_idx
< nopencount
240 && list
[rl
->l_idx
] == rl
)
242 assert (new_opencount
[rl
->l_idx
] > 0);
243 if (--new_opencount
[rl
->l_idx
] == 0)
245 /* Another module to remove. */
246 assert (rl
->l_type
== lt_loaded
);
252 assert (rl
->l_opencount
> 0);
253 if (--rl
->l_opencount
== 0)
263 mark_removed (list
[i
]);
265 assert (new_opencount
[0] == 0);
267 /* Call all termination functions at once. */
268 for (i
= 0; list
[i
] != NULL
; ++i
)
270 struct link_map
*imap
= list
[i
];
271 if (new_opencount
[i
] == 0 && imap
->l_type
== lt_loaded
272 && (imap
->l_flags_1
& DF_1_NODELETE
) == 0)
274 /* When debugging print a message first. */
275 if (__builtin_expect (GLRO(dl_debug_mask
) & DL_DEBUG_IMPCALLS
, 0))
276 GLRO(dl_debug_printf
) ("\ncalling fini: %s [%lu]\n\n",
277 imap
->l_name
, imap
->l_ns
);
279 /* Call its termination function. Do not do it for
280 half-cooked objects. */
281 if (imap
->l_init_called
)
283 if (imap
->l_info
[DT_FINI_ARRAY
] != NULL
)
286 (ElfW(Addr
) *) (imap
->l_addr
287 + imap
->l_info
[DT_FINI_ARRAY
]->d_un
.d_ptr
);
288 unsigned int sz
= (imap
->l_info
[DT_FINI_ARRAYSZ
]->d_un
.d_val
289 / sizeof (ElfW(Addr
)));
292 ((fini_t
) array
[sz
]) ();
295 /* Next try the old-style destructor. */
296 if (imap
->l_info
[DT_FINI
] != NULL
)
297 (*(void (*) (void)) DL_DT_FINI_ADDRESS
298 (imap
, ((void *) imap
->l_addr
299 + imap
->l_info
[DT_FINI
]->d_un
.d_ptr
))) ();
302 /* This object must not be used anymore. We must remove the
303 reference from the scope. */
305 struct link_map
**searchlist
= map
->l_searchlist
.r_list
;
306 unsigned int nsearchlist
= map
->l_searchlist
.r_nlist
;
311 for (j
= 0; j
< nsearchlist
; ++j
)
312 if (imap
== searchlist
[j
])
314 /* This is the object to remove. Copy all the
316 while (++j
< nsearchlist
)
317 searchlist
[j
- 1] = searchlist
[j
];
319 searchlist
[j
- 1] = NULL
;
321 --map
->l_searchlist
.r_nlist
;
330 else if (new_opencount
[i
] != 0 && imap
->l_type
== lt_loaded
331 && imap
->l_searchlist
.r_list
== NULL
332 && imap
->l_initfini
!= NULL
)
334 /* The object is still used. But the object we are
335 unloading right now is responsible for loading it. If
336 the current object does not have it's own scope yet we
337 have to create one. This has to be done before running
340 To do this count the number of dependencies. */
342 for (cnt
= 1; imap
->l_initfini
[cnt
] != NULL
; ++cnt
)
343 if (imap
->l_initfini
[cnt
]->l_idx
>= i
344 && imap
->l_initfini
[cnt
]->l_idx
< nopencount
)
345 ++new_opencount
[imap
->l_initfini
[cnt
]->l_idx
];
347 ++imap
->l_initfini
[cnt
]->l_opencount
;
349 /* We simply reuse the l_initfini list. */
350 imap
->l_searchlist
.r_list
= &imap
->l_initfini
[cnt
+ 1];
351 imap
->l_searchlist
.r_nlist
= cnt
;
353 for (cnt
= 0; imap
->l_scope
[cnt
] != NULL
; ++cnt
)
354 if (imap
->l_scope
[cnt
] == &map
->l_searchlist
)
356 imap
->l_scope
[cnt
] = &imap
->l_searchlist
;
361 /* Store the new l_opencount value. */
362 imap
->l_opencount
= new_opencount
[i
];
364 /* Just a sanity check. */
365 assert (imap
->l_type
== lt_loaded
|| imap
->l_opencount
> 0);
368 /* Notify the debugger we are about to remove some loaded objects. */
369 _r_debug
.r_state
= RT_DELETE
;
370 GLRO(dl_debug_state
) ();
373 size_t tls_free_start
;
375 tls_free_start
= tls_free_end
= NO_TLS_OFFSET
;
378 /* Check each element of the search list to see if all references to
380 for (i
= 0; list
[i
] != NULL
; ++i
)
382 struct link_map
*imap
= list
[i
];
383 if (imap
->l_opencount
== 0 && imap
->l_type
== lt_loaded
)
385 struct libname_list
*lnp
;
387 /* That was the last reference, and this was a dlopen-loaded
388 object. We can unmap it. */
389 if (__builtin_expect (imap
->l_global
, 0))
391 /* This object is in the global scope list. Remove it. */
393 = GL(dl_ns
)[imap
->l_ns
]._ns_main_searchlist
->r_nlist
;
397 while (GL(dl_ns
)[imap
->l_ns
]._ns_main_searchlist
->r_list
[cnt
]
400 /* The object was already correctly registered. */
402 < GL(dl_ns
)[imap
->l_ns
]._ns_main_searchlist
->r_nlist
)
403 GL(dl_ns
)[imap
->l_ns
]._ns_main_searchlist
->r_list
[cnt
- 1]
404 = GL(dl_ns
)[imap
->l_ns
]._ns_main_searchlist
->r_list
[cnt
];
406 --GL(dl_ns
)[imap
->l_ns
]._ns_main_searchlist
->r_nlist
;
410 /* Remove the object from the dtv slotinfo array if it uses TLS. */
411 if (__builtin_expect (imap
->l_tls_blocksize
> 0, 0))
415 if (! remove_slotinfo (imap
->l_tls_modid
,
416 GL(dl_tls_dtv_slotinfo_list
), 0,
417 imap
->l_init_called
))
418 /* All dynamically loaded modules with TLS are unloaded. */
419 GL(dl_tls_max_dtv_idx
) = GL(dl_tls_static_nelem
);
421 if (imap
->l_tls_offset
!= NO_TLS_OFFSET
)
423 /* Collect a contiguous chunk built from the objects in
424 this search list, going in either direction. When the
425 whole chunk is at the end of the used area then we can
428 if (tls_free_start
== NO_TLS_OFFSET
429 || (size_t) imap
->l_tls_offset
== tls_free_start
)
431 /* Extend the contiguous chunk being reclaimed. */
433 = imap
->l_tls_offset
- imap
->l_tls_blocksize
;
435 if (tls_free_end
== NO_TLS_OFFSET
)
436 tls_free_end
= imap
->l_tls_offset
;
438 else if (imap
->l_tls_offset
- imap
->l_tls_blocksize
440 /* Extend the chunk backwards. */
441 tls_free_end
= imap
->l_tls_offset
;
444 /* This isn't contiguous with the last chunk freed.
445 One of them will be leaked unless we can free
446 one block right away. */
447 if (tls_free_end
== GL(dl_tls_static_used
))
449 GL(dl_tls_static_used
) = tls_free_start
;
450 tls_free_end
= imap
->l_tls_offset
;
452 = tls_free_end
- imap
->l_tls_blocksize
;
454 else if ((size_t) imap
->l_tls_offset
455 == GL(dl_tls_static_used
))
456 GL(dl_tls_static_used
)
457 = imap
->l_tls_offset
- imap
->l_tls_blocksize
;
458 else if (tls_free_end
< (size_t) imap
->l_tls_offset
)
460 /* We pick the later block. It has a chance to
462 tls_free_end
= imap
->l_tls_offset
;
464 = tls_free_end
- imap
->l_tls_blocksize
;
468 if ((size_t) imap
->l_tls_offset
== tls_free_end
)
469 /* Extend the contiguous chunk being reclaimed. */
470 tls_free_end
-= imap
->l_tls_blocksize
;
471 else if (imap
->l_tls_offset
+ imap
->l_tls_blocksize
473 /* Extend the chunk backwards. */
474 tls_free_start
= imap
->l_tls_offset
;
477 /* This isn't contiguous with the last chunk freed.
478 One of them will be leaked. */
479 if (tls_free_end
== GL(dl_tls_static_used
))
480 GL(dl_tls_static_used
) = tls_free_start
;
481 tls_free_start
= imap
->l_tls_offset
;
482 tls_free_end
= tls_free_start
+ imap
->l_tls_blocksize
;
485 # error "Either TLS_TCB_AT_TP or TLS_DTV_AT_TP must be defined"
491 /* We can unmap all the maps at once. We determined the
492 start address and length when we loaded the object and
493 the `munmap' call does the rest. */
496 /* Finally, unlink the data structure and free it. */
497 if (imap
->l_prev
!= NULL
)
498 imap
->l_prev
->l_next
= imap
->l_next
;
502 assert (imap
->l_ns
!= LM_ID_BASE
);
504 GL(dl_ns
)[imap
->l_ns
]._ns_loaded
= imap
->l_next
;
507 --GL(dl_ns
)[imap
->l_ns
]._ns_nloaded
;
508 if (imap
->l_next
!= NULL
)
509 imap
->l_next
->l_prev
= imap
->l_prev
;
511 free (imap
->l_versions
);
512 if (imap
->l_origin
!= (char *) -1)
513 free ((char *) imap
->l_origin
);
515 /* If the object has relocation dependencies save this
516 information for latter. */
517 if (__builtin_expect (imap
->l_reldeps
!= NULL
, 0))
519 struct reldep_list
*newrel
;
521 newrel
= (struct reldep_list
*) alloca (sizeof (*reldeps
)
522 + (imap
->l_reldepsact
524 newrel
->rellist
= imap
->l_reldeps
;
525 newrel
->nrellist
= imap
->l_reldepsact
;
526 newrel
->next
= reldeps
;
528 newrel
->nhandled
= imap
->l_reldepsact
;
530 for (j
= 0; j
< imap
->l_reldepsact
; ++j
)
532 /* Find out whether this object is in our list. */
533 if (imap
->l_reldeps
[j
]->l_idx
< nopencount
534 && list
[imap
->l_reldeps
[j
]->l_idx
] == imap
->l_reldeps
[j
])
536 newrel
->handled
[j
] = true;
538 newrel
->handled
[j
] = false;
544 /* This name always is allocated. */
546 /* Remove the list with all the names of the shared object. */
547 lnp
= imap
->l_libname
;
550 struct libname_list
*this = lnp
;
552 if (!this->dont_free
)
557 /* Remove the searchlists. */
559 free (imap
->l_initfini
);
561 /* Remove the scope array if we allocated it. */
562 if (imap
->l_scope
!= imap
->l_scope_mem
)
563 free (imap
->l_scope
);
565 if (imap
->l_phdr_allocated
)
566 free ((void *) imap
->l_phdr
);
568 if (imap
->l_rpath_dirs
.dirs
!= (void *) -1)
569 free (imap
->l_rpath_dirs
.dirs
);
570 if (imap
->l_runpath_dirs
.dirs
!= (void *) -1)
571 free (imap
->l_runpath_dirs
.dirs
);
578 /* If we removed any object which uses TLS bump the generation counter. */
581 if (__builtin_expect (++GL(dl_tls_generation
) == 0, 0))
582 __libc_fatal (_("TLS generation counter wrapped! Please report as described in <http://www.gnu.org/software/libc/bugs.html>."));
584 if (tls_free_end
== GL(dl_tls_static_used
))
585 GL(dl_tls_static_used
) = tls_free_start
;
589 /* Notify the debugger those objects are finalized and gone. */
590 _r_debug
.r_state
= RT_CONSISTENT
;
591 GLRO(dl_debug_state
) ();
593 /* Now we can perhaps also remove the modules for which we had
594 dependencies because of symbol lookup. */
595 while (__builtin_expect (reldeps
!= NULL
, 0))
597 while (reldeps
->nrellist
-- > 0)
598 /* Some of the relocation dependencies might be on the
599 dependency list of the object we are closing right now.
600 They were already handled. Do not close them again. */
601 if (reldeps
->nrellist
< reldeps
->nhandled
602 && ! reldeps
->handled
[reldeps
->nrellist
])
603 _dl_close (reldeps
->rellist
[reldeps
->nrellist
]);
605 free (reldeps
->rellist
);
607 reldeps
= reldeps
->next
;
612 /* Release the lock. */
613 __rtld_lock_unlock_recursive (GL(dl_load_lock
));
615 libc_hidden_def (_dl_close
)
619 static bool __libc_freeres_fn_section
620 free_slotinfo (struct dtv_slotinfo_list
**elemp
)
625 /* Nothing here, all is removed (or there never was anything). */
628 if (!free_slotinfo (&(*elemp
)->next
))
629 /* We cannot free the entry. */
632 /* That cleared our next pointer for us. */
634 for (cnt
= 0; cnt
< (*elemp
)->len
; ++cnt
)
635 if ((*elemp
)->slotinfo
[cnt
].map
!= NULL
)
639 /* We can remove the list element. */
648 libc_freeres_fn (free_mem
)
650 for (Lmid_t ns
= 0; ns
< DL_NNS
; ++ns
)
651 if (__builtin_expect (GL(dl_ns
)[ns
]._ns_global_scope_alloc
, 0) != 0
652 && (GL(dl_ns
)[ns
]._ns_main_searchlist
->r_nlist
653 // XXX Check whether we need NS-specific initial_searchlist
654 == GLRO(dl_initial_searchlist
).r_nlist
))
656 /* All object dynamically loaded by the program are unloaded. Free
657 the memory allocated for the global scope variable. */
658 struct link_map
**old
= GL(dl_ns
)[ns
]._ns_main_searchlist
->r_list
;
660 /* Put the old map in. */
661 GL(dl_ns
)[ns
]._ns_main_searchlist
->r_list
662 // XXX Check whether we need NS-specific initial_searchlist
663 = GLRO(dl_initial_searchlist
).r_list
;
664 /* Signal that the original map is used. */
665 GL(dl_ns
)[ns
]._ns_global_scope_alloc
= 0;
667 /* Now free the old map. */
672 if (USE___THREAD
|| GL(dl_tls_dtv_slotinfo_list
) != NULL
)
674 /* Free the memory allocated for the dtv slotinfo array. We can do
675 this only if all modules which used this memory are unloaded. */
677 if (GL(dl_initial_dtv
) == NULL
)
678 /* There was no initial TLS setup, it was set up later when
679 it used the normal malloc. */
680 free_slotinfo (&GL(dl_tls_dtv_slotinfo_list
));
683 /* The first element of the list does not have to be deallocated.
684 It was allocated in the dynamic linker (i.e., with a different
685 malloc), and in the static library it's in .bss space. */
686 free_slotinfo (&GL(dl_tls_dtv_slotinfo_list
)->next
);