Updated to fedora-glibc-2_3-20051023T0123
[glibc.git] / elf / dl-close.c
blobf54cc03286d82dfecaeff5d8cd92375fb650753b
1 /* Close a shared object opened by `_dl_open'.
2 Copyright (C) 1996-2002, 2003, 2004 Free Software Foundation, Inc.
3 This file is part of the GNU C Library.
5 The GNU C Library is free software; you can redistribute it and/or
6 modify it under the terms of the GNU Lesser General Public
7 License as published by the Free Software Foundation; either
8 version 2.1 of the License, or (at your option) any later version.
10 The GNU C Library is distributed in the hope that it will be useful,
11 but WITHOUT ANY WARRANTY; without even the implied warranty of
12 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 Lesser General Public License for more details.
15 You should have received a copy of the GNU Lesser General Public
16 License along with the GNU C Library; if not, write to the Free
17 Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
18 02111-1307 USA. */
20 #include <assert.h>
21 #include <dlfcn.h>
22 #include <libintl.h>
23 #include <stddef.h>
24 #include <stdio.h>
25 #include <stdlib.h>
26 #include <string.h>
27 #include <bits/libc-lock.h>
28 #include <ldsodefs.h>
29 #include <sys/types.h>
30 #include <sys/mman.h>
33 /* Type of the constructor functions. */
34 typedef void (*fini_t) (void);
37 #ifdef USE_TLS
38 /* Returns true we an non-empty was found. */
39 static bool
40 remove_slotinfo (size_t idx, struct dtv_slotinfo_list *listp, size_t disp,
41 bool should_be_there)
43 if (idx - disp >= listp->len)
45 if (listp->next == NULL)
47 /* The index is not actually valid in the slotinfo list,
48 because this object was closed before it was fully set
49 up due to some error. */
50 assert (! should_be_there);
52 else
54 if (remove_slotinfo (idx, listp->next, disp + listp->len,
55 should_be_there))
56 return true;
58 /* No non-empty entry. Search from the end of this element's
59 slotinfo array. */
60 idx = disp + listp->len;
63 else
65 struct link_map *old_map = listp->slotinfo[idx - disp].map;
67 /* The entry might still be in its unused state if we are closing an
68 object that wasn't fully set up. */
69 if (__builtin_expect (old_map != NULL, 1))
71 assert (old_map->l_tls_modid == idx);
73 /* Mark the entry as unused. */
74 listp->slotinfo[idx - disp].gen = GL(dl_tls_generation) + 1;
75 listp->slotinfo[idx - disp].map = NULL;
78 /* If this is not the last currently used entry no need to look
79 further. */
80 if (idx != GL(dl_tls_max_dtv_idx))
81 return true;
84 while (idx - disp > (disp == 0 ? 1 + GL(dl_tls_static_nelem) : 0))
86 --idx;
88 if (listp->slotinfo[idx - disp].map != NULL)
90 /* Found a new last used index. */
91 GL(dl_tls_max_dtv_idx) = idx;
92 return true;
96 /* No non-entry in this list element. */
97 return false;
99 #endif
102 void
103 internal_function
104 _dl_close (void *_map)
106 struct link_map *map = _map;
107 unsigned int i;
108 Lmid_t ns = map->l_ns;
110 /* First see whether we can remove the object at all. */
111 if (__builtin_expect (map->l_flags_1 & DF_1_NODELETE, 0)
112 && map->l_init_called)
113 /* Nope. Do nothing. */
114 return;
116 if (__builtin_expect (map->l_direct_opencount, 1) == 0)
117 GLRO(dl_signal_error) (0, map->l_name, NULL, N_("shared object not open"));
119 /* Acquire the lock. */
120 __rtld_lock_lock_recursive (GL(dl_load_lock));
122 /* One less direct use. */
123 --map->l_direct_opencount;
125 /* If _dl_close is called recursively (some destructor call dlclose),
126 just record that the parent _dl_close will need to do garbage collection
127 again and return. */
128 static enum { not_pending, pending, rerun } dl_close_state;
130 if (map->l_direct_opencount > 0 || map->l_type != lt_loaded
131 || dl_close_state != not_pending)
133 if (map->l_direct_opencount == 0 && map->l_type == lt_loaded)
134 dl_close_state = rerun;
136 /* There are still references to this object. Do nothing more. */
137 if (__builtin_expect (GLRO(dl_debug_mask) & DL_DEBUG_FILES, 0))
138 GLRO(dl_debug_printf) ("\nclosing file=%s; direct_opencount == %u\n",
139 map->l_name, map->l_direct_opencount);
141 __rtld_lock_unlock_recursive (GL(dl_load_lock));
142 return;
145 retry:
146 dl_close_state = pending;
148 #ifdef USE_TLS
149 bool any_tls = false;
150 #endif
151 const unsigned int nloaded = GL(dl_ns)[ns]._ns_nloaded;
152 char used[nloaded];
153 char done[nloaded];
154 struct link_map *maps[nloaded];
156 /* Run over the list and assign indexes to the link maps and enter
157 them into the MAPS array. */
158 int idx = 0;
159 for (struct link_map *l = GL(dl_ns)[ns]._ns_loaded; l != NULL; l = l->l_next)
161 l->l_idx = idx;
162 maps[idx] = l;
163 ++idx;
165 assert (idx == nloaded);
167 /* Prepare the bitmaps. */
168 memset (used, '\0', sizeof (used));
169 memset (done, '\0', sizeof (done));
171 /* Keep track of the lowest index link map we have covered already. */
172 int done_index = -1;
173 while (++done_index < nloaded)
175 struct link_map *l = maps[done_index];
177 if (done[done_index])
178 /* Already handled. */
179 continue;
181 /* Check whether this object is still used. */
182 if (l->l_type == lt_loaded
183 && l->l_direct_opencount == 0
184 && (l->l_flags_1 & DF_1_NODELETE) == 0
185 && !used[done_index])
186 continue;
188 /* We need this object and we handle it now. */
189 done[done_index] = 1;
190 used[done_index] = 1;
191 /* Signal the object is still needed. */
192 l->l_idx = -1;
194 /* Mark all dependencies as used. */
195 if (l->l_initfini != NULL)
197 struct link_map **lp = &l->l_initfini[1];
198 while (*lp != NULL)
200 if ((*lp)->l_idx != -1)
202 assert ((*lp)->l_idx >= 0 && (*lp)->l_idx < nloaded);
204 if (!used[(*lp)->l_idx])
206 used[(*lp)->l_idx] = 1;
207 if ((*lp)->l_idx - 1 < done_index)
208 done_index = (*lp)->l_idx - 1;
212 ++lp;
215 /* And the same for relocation dependencies. */
216 if (l->l_reldeps != NULL)
217 for (unsigned int j = 0; j < l->l_reldepsact; ++j)
219 struct link_map *jmap = l->l_reldeps[j];
221 if (jmap->l_idx != -1)
223 assert (jmap->l_idx >= 0 && jmap->l_idx < nloaded);
225 if (!used[jmap->l_idx])
227 used[jmap->l_idx] = 1;
228 if (jmap->l_idx - 1 < done_index)
229 done_index = jmap->l_idx - 1;
235 /* Sort the entries. */
236 _dl_sort_fini (GL(dl_ns)[ns]._ns_loaded, maps, nloaded, used, ns);
238 bool unload_any = false;
239 unsigned int first_loaded = ~0;
240 for (i = 0; i < nloaded; ++i)
242 struct link_map *imap = maps[i];
244 if (!used[i])
246 assert (imap->l_type == lt_loaded
247 && (imap->l_flags_1 & DF_1_NODELETE) == 0);
249 /* Call its termination function. Do not do it for
250 half-cooked objects. */
251 if (imap->l_init_called)
253 /* When debugging print a message first. */
254 if (__builtin_expect (GLRO(dl_debug_mask) & DL_DEBUG_IMPCALLS, 0))
255 GLRO(dl_debug_printf) ("\ncalling fini: %s [%lu]\n\n",
256 imap->l_name, ns);
258 if (imap->l_info[DT_FINI_ARRAY] != NULL)
260 ElfW(Addr) *array =
261 (ElfW(Addr) *) (imap->l_addr
262 + imap->l_info[DT_FINI_ARRAY]->d_un.d_ptr);
263 unsigned int sz = (imap->l_info[DT_FINI_ARRAYSZ]->d_un.d_val
264 / sizeof (ElfW(Addr)));
266 while (sz-- > 0)
267 ((fini_t) array[sz]) ();
270 /* Next try the old-style destructor. */
271 if (imap->l_info[DT_FINI] != NULL)
272 (*(void (*) (void)) DL_DT_FINI_ADDRESS
273 (imap, ((void *) imap->l_addr
274 + imap->l_info[DT_FINI]->d_un.d_ptr))) ();
277 /* This object must not be used anymore. */
278 imap->l_removed = 1;
280 /* We indeed have an object to remove. */
281 unload_any = true;
283 /* Remember where the first dynamically loaded object is. */
284 if (i < first_loaded)
285 first_loaded = i;
287 /* Else used[i]. */
288 else if (imap->l_type == lt_loaded)
290 if (imap->l_searchlist.r_list == NULL
291 && imap->l_initfini != NULL)
293 /* The object is still used. But one of the objects we are
294 unloading right now is responsible for loading it. If
295 the current object does not have it's own scope yet we
296 have to create one. This has to be done before running
297 the finalizers.
299 To do this count the number of dependencies. */
300 unsigned int cnt;
301 for (cnt = 1; imap->l_initfini[cnt] != NULL; ++cnt)
304 /* We simply reuse the l_initfini list. */
305 imap->l_searchlist.r_list = &imap->l_initfini[cnt + 1];
306 imap->l_searchlist.r_nlist = cnt;
308 for (cnt = 0; imap->l_scope[cnt] != NULL; ++cnt)
309 /* This relies on l_scope[] entries being always set either
310 to its own l_symbolic_searchlist address, or some other map's
311 l_searchlist address. */
312 if (imap->l_scope[cnt] != &imap->l_symbolic_searchlist)
314 struct link_map *tmap;
316 tmap = (struct link_map *) ((char *) imap->l_scope[cnt]
317 - offsetof (struct link_map,
318 l_searchlist));
319 assert (tmap->l_ns == ns);
320 if (tmap->l_idx != -1)
322 imap->l_scope[cnt] = &imap->l_searchlist;
323 break;
328 /* The loader is gone, so mark the object as not having one.
329 Note: l_idx != -1 -> object will be removed. */
330 if (imap->l_loader != NULL && imap->l_loader->l_idx != -1)
331 imap->l_loader = NULL;
333 /* Remember where the first dynamically loaded object is. */
334 if (i < first_loaded)
335 first_loaded = i;
339 /* If there are no objects to unload, do nothing further. */
340 if (!unload_any)
341 goto out;
343 /* Notify the debugger we are about to remove some loaded objects. */
344 _r_debug.r_state = RT_DELETE;
345 GLRO(dl_debug_state) ();
347 #ifdef USE_TLS
348 size_t tls_free_start;
349 size_t tls_free_end;
350 tls_free_start = tls_free_end = NO_TLS_OFFSET;
351 #endif
353 /* Check each element of the search list to see if all references to
354 it are gone. */
355 for (i = first_loaded; i < nloaded; ++i)
357 struct link_map *imap = maps[i];
358 if (!used[i])
360 assert (imap->l_type == lt_loaded);
362 /* That was the last reference, and this was a dlopen-loaded
363 object. We can unmap it. */
364 if (__builtin_expect (imap->l_global, 0))
366 /* This object is in the global scope list. Remove it. */
367 unsigned int cnt
368 = GL(dl_ns)[imap->l_ns]._ns_main_searchlist->r_nlist;
371 --cnt;
372 while (GL(dl_ns)[imap->l_ns]._ns_main_searchlist->r_list[cnt]
373 != imap);
375 /* The object was already correctly registered. */
376 while (++cnt
377 < GL(dl_ns)[imap->l_ns]._ns_main_searchlist->r_nlist)
378 GL(dl_ns)[imap->l_ns]._ns_main_searchlist->r_list[cnt - 1]
379 = GL(dl_ns)[imap->l_ns]._ns_main_searchlist->r_list[cnt];
381 --GL(dl_ns)[imap->l_ns]._ns_main_searchlist->r_nlist;
384 #ifdef USE_TLS
385 /* Remove the object from the dtv slotinfo array if it uses TLS. */
386 if (__builtin_expect (imap->l_tls_blocksize > 0, 0))
388 any_tls = true;
390 if (! remove_slotinfo (imap->l_tls_modid,
391 GL(dl_tls_dtv_slotinfo_list), 0,
392 imap->l_init_called))
393 /* All dynamically loaded modules with TLS are unloaded. */
394 GL(dl_tls_max_dtv_idx) = GL(dl_tls_static_nelem);
396 if (imap->l_tls_offset != NO_TLS_OFFSET)
398 /* Collect a contiguous chunk built from the objects in
399 this search list, going in either direction. When the
400 whole chunk is at the end of the used area then we can
401 reclaim it. */
402 # if TLS_TCB_AT_TP
403 if (tls_free_start == NO_TLS_OFFSET
404 || (size_t) imap->l_tls_offset == tls_free_start)
406 /* Extend the contiguous chunk being reclaimed. */
407 tls_free_start
408 = imap->l_tls_offset - imap->l_tls_blocksize;
410 if (tls_free_end == NO_TLS_OFFSET)
411 tls_free_end = imap->l_tls_offset;
413 else if (imap->l_tls_offset - imap->l_tls_blocksize
414 == tls_free_end)
415 /* Extend the chunk backwards. */
416 tls_free_end = imap->l_tls_offset;
417 else
419 /* This isn't contiguous with the last chunk freed.
420 One of them will be leaked unless we can free
421 one block right away. */
422 if (tls_free_end == GL(dl_tls_static_used))
424 GL(dl_tls_static_used) = tls_free_start;
425 tls_free_end = imap->l_tls_offset;
426 tls_free_start
427 = tls_free_end - imap->l_tls_blocksize;
429 else if ((size_t) imap->l_tls_offset
430 == GL(dl_tls_static_used))
431 GL(dl_tls_static_used)
432 = imap->l_tls_offset - imap->l_tls_blocksize;
433 else if (tls_free_end < (size_t) imap->l_tls_offset)
435 /* We pick the later block. It has a chance to
436 be freed. */
437 tls_free_end = imap->l_tls_offset;
438 tls_free_start
439 = tls_free_end - imap->l_tls_blocksize;
442 # elif TLS_DTV_AT_TP
443 if ((size_t) imap->l_tls_offset == tls_free_end)
444 /* Extend the contiguous chunk being reclaimed. */
445 tls_free_end -= imap->l_tls_blocksize;
446 else if (imap->l_tls_offset + imap->l_tls_blocksize
447 == tls_free_start)
448 /* Extend the chunk backwards. */
449 tls_free_start = imap->l_tls_offset;
450 else
452 /* This isn't contiguous with the last chunk freed.
453 One of them will be leaked. */
454 if (tls_free_end == GL(dl_tls_static_used))
455 GL(dl_tls_static_used) = tls_free_start;
456 tls_free_start = imap->l_tls_offset;
457 tls_free_end = tls_free_start + imap->l_tls_blocksize;
459 # else
460 # error "Either TLS_TCB_AT_TP or TLS_DTV_AT_TP must be defined"
461 # endif
464 #endif
466 /* We can unmap all the maps at once. We determined the
467 start address and length when we loaded the object and
468 the `munmap' call does the rest. */
469 DL_UNMAP (imap);
471 /* Finally, unlink the data structure and free it. */
472 if (imap->l_prev != NULL)
473 imap->l_prev->l_next = imap->l_next;
474 else
476 #ifdef SHARED
477 assert (imap->l_ns != LM_ID_BASE);
478 #endif
479 GL(dl_ns)[imap->l_ns]._ns_loaded = imap->l_next;
482 --GL(dl_ns)[imap->l_ns]._ns_nloaded;
483 if (imap->l_next != NULL)
484 imap->l_next->l_prev = imap->l_prev;
486 free (imap->l_versions);
487 if (imap->l_origin != (char *) -1)
488 free ((char *) imap->l_origin);
490 free (imap->l_reldeps);
492 /* This name always is allocated. */
493 free (imap->l_name);
494 /* Remove the list with all the names of the shared object. */
496 struct libname_list *lnp = imap->l_libname;
499 struct libname_list *this = lnp;
500 lnp = lnp->next;
501 if (!this->dont_free)
502 free (this);
504 while (lnp != NULL);
506 /* Remove the searchlists. */
507 free (imap->l_initfini);
509 /* Remove the scope array if we allocated it. */
510 if (imap->l_scope != imap->l_scope_mem)
511 free (imap->l_scope);
513 if (imap->l_phdr_allocated)
514 free ((void *) imap->l_phdr);
516 if (imap->l_rpath_dirs.dirs != (void *) -1)
517 free (imap->l_rpath_dirs.dirs);
518 if (imap->l_runpath_dirs.dirs != (void *) -1)
519 free (imap->l_runpath_dirs.dirs);
521 free (imap);
525 #ifdef USE_TLS
526 /* If we removed any object which uses TLS bump the generation counter. */
527 if (any_tls)
529 if (__builtin_expect (++GL(dl_tls_generation) == 0, 0))
530 __libc_fatal (_("TLS generation counter wrapped! Please report as described in <http://www.gnu.org/software/libc/bugs.html>."));
532 if (tls_free_end == GL(dl_tls_static_used))
533 GL(dl_tls_static_used) = tls_free_start;
535 #endif
537 /* Notify the debugger those objects are finalized and gone. */
538 _r_debug.r_state = RT_CONSISTENT;
539 GLRO(dl_debug_state) ();
541 /* Recheck if we need to retry, release the lock. */
542 out:
543 if (dl_close_state == rerun)
544 goto retry;
546 dl_close_state = not_pending;
547 __rtld_lock_unlock_recursive (GL(dl_load_lock));
549 libc_hidden_def (_dl_close)
552 #ifdef USE_TLS
553 static bool __libc_freeres_fn_section
554 free_slotinfo (struct dtv_slotinfo_list **elemp)
556 size_t cnt;
558 if (*elemp == NULL)
559 /* Nothing here, all is removed (or there never was anything). */
560 return true;
562 if (!free_slotinfo (&(*elemp)->next))
563 /* We cannot free the entry. */
564 return false;
566 /* That cleared our next pointer for us. */
568 for (cnt = 0; cnt < (*elemp)->len; ++cnt)
569 if ((*elemp)->slotinfo[cnt].map != NULL)
570 /* Still used. */
571 return false;
573 /* We can remove the list element. */
574 free (*elemp);
575 *elemp = NULL;
577 return true;
579 #endif
582 libc_freeres_fn (free_mem)
584 for (Lmid_t ns = 0; ns < DL_NNS; ++ns)
585 if (__builtin_expect (GL(dl_ns)[ns]._ns_global_scope_alloc, 0) != 0
586 && (GL(dl_ns)[ns]._ns_main_searchlist->r_nlist
587 // XXX Check whether we need NS-specific initial_searchlist
588 == GLRO(dl_initial_searchlist).r_nlist))
590 /* All object dynamically loaded by the program are unloaded. Free
591 the memory allocated for the global scope variable. */
592 struct link_map **old = GL(dl_ns)[ns]._ns_main_searchlist->r_list;
594 /* Put the old map in. */
595 GL(dl_ns)[ns]._ns_main_searchlist->r_list
596 // XXX Check whether we need NS-specific initial_searchlist
597 = GLRO(dl_initial_searchlist).r_list;
598 /* Signal that the original map is used. */
599 GL(dl_ns)[ns]._ns_global_scope_alloc = 0;
601 /* Now free the old map. */
602 free (old);
605 #ifdef USE_TLS
606 if (USE___THREAD || GL(dl_tls_dtv_slotinfo_list) != NULL)
608 /* Free the memory allocated for the dtv slotinfo array. We can do
609 this only if all modules which used this memory are unloaded. */
610 # ifdef SHARED
611 if (GL(dl_initial_dtv) == NULL)
612 /* There was no initial TLS setup, it was set up later when
613 it used the normal malloc. */
614 free_slotinfo (&GL(dl_tls_dtv_slotinfo_list));
615 else
616 # endif
617 /* The first element of the list does not have to be deallocated.
618 It was allocated in the dynamic linker (i.e., with a different
619 malloc), and in the static library it's in .bss space. */
620 free_slotinfo (&GL(dl_tls_dtv_slotinfo_list)->next);
622 #endif
625 #ifdef SHARED
626 #include "dl-fini.c"
627 #endif