Update.
[glibc.git] / elf / dl-close.c
blobca60ae5abeca34b49e837d93742b3a6b8f5610e7
1 /* Close a shared object opened by `_dl_open'.
2 Copyright (C) 1996-2002, 2003, 2004 Free Software Foundation, Inc.
3 This file is part of the GNU C Library.
5 The GNU C Library is free software; you can redistribute it and/or
6 modify it under the terms of the GNU Lesser General Public
7 License as published by the Free Software Foundation; either
8 version 2.1 of the License, or (at your option) any later version.
10 The GNU C Library is distributed in the hope that it will be useful,
11 but WITHOUT ANY WARRANTY; without even the implied warranty of
12 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 Lesser General Public License for more details.
15 You should have received a copy of the GNU Lesser General Public
16 License along with the GNU C Library; if not, write to the Free
17 Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
18 02111-1307 USA. */
20 #include <assert.h>
21 #include <dlfcn.h>
22 #include <libintl.h>
23 #include <stdio.h>
24 #include <stdlib.h>
25 #include <string.h>
26 #include <bits/libc-lock.h>
27 #include <ldsodefs.h>
28 #include <sys/types.h>
29 #include <sys/mman.h>
32 /* Type of the constructor functions. */
33 typedef void (*fini_t) (void);
36 #ifdef USE_TLS
37 /* Returns true we an non-empty was found. */
38 static bool
39 remove_slotinfo (size_t idx, struct dtv_slotinfo_list *listp, size_t disp,
40 bool should_be_there)
42 if (idx - disp >= listp->len)
44 if (listp->next == NULL)
46 /* The index is not actually valid in the slotinfo list,
47 because this object was closed before it was fully set
48 up due to some error. */
49 assert (! should_be_there);
51 else
53 if (remove_slotinfo (idx, listp->next, disp + listp->len,
54 should_be_there))
55 return true;
57 /* No non-empty entry. Search from the end of this element's
58 slotinfo array. */
59 idx = disp + listp->len;
62 else
64 struct link_map *old_map = listp->slotinfo[idx - disp].map;
66 /* The entry might still be in its unused state if we are closing an
67 object that wasn't fully set up. */
68 if (__builtin_expect (old_map != NULL, 1))
70 assert (old_map->l_tls_modid == idx);
72 /* Mark the entry as unused. */
73 listp->slotinfo[idx - disp].gen = GL(dl_tls_generation) + 1;
74 listp->slotinfo[idx - disp].map = NULL;
77 /* If this is not the last currently used entry no need to look
78 further. */
79 if (idx != GL(dl_tls_max_dtv_idx))
80 return true;
83 while (idx - disp > (disp == 0 ? 1 + GL(dl_tls_static_nelem) : 0))
85 --idx;
87 if (listp->slotinfo[idx - disp].map != NULL)
89 /* Found a new last used index. */
90 GL(dl_tls_max_dtv_idx) = idx;
91 return true;
95 /* No non-entry in this list element. */
96 return false;
98 #endif
101 void
102 internal_function
103 _dl_close (void *_map)
105 struct reldep_list
107 struct link_map **rellist;
108 unsigned int nrellist;
109 unsigned int nhandled;
110 struct reldep_list *next;
111 bool handled[0];
112 } *reldeps = NULL;
113 struct link_map **list;
114 struct link_map *map = _map;
115 unsigned int i;
116 unsigned int *new_opencount;
117 #ifdef USE_TLS
118 bool any_tls = false;
119 #endif
121 /* First see whether we can remove the object at all. */
122 if (__builtin_expect (map->l_flags_1 & DF_1_NODELETE, 0)
123 && map->l_init_called)
124 /* Nope. Do nothing. */
125 return;
127 if (__builtin_expect (map->l_opencount, 1) == 0)
128 GLRO(dl_signal_error) (0, map->l_name, NULL, N_("shared object not open"));
130 /* Acquire the lock. */
131 __rtld_lock_lock_recursive (GL(dl_load_lock));
133 /* Decrement the reference count. */
134 if (map->l_opencount > 1 || map->l_type != lt_loaded)
136 /* There are still references to this object. Do nothing more. */
137 if (__builtin_expect (GLRO(dl_debug_mask) & DL_DEBUG_FILES, 0))
138 GLRO(dl_debug_printf) ("\nclosing file=%s; opencount == %u\n",
139 map->l_name, map->l_opencount);
141 /* Decrement the object's reference counter, not the dependencies'. */
142 --map->l_opencount;
144 __rtld_lock_unlock_recursive (GL(dl_load_lock));
145 return;
148 list = map->l_initfini;
150 /* Compute the new l_opencount values. */
151 i = map->l_searchlist.r_nlist;
152 if (__builtin_expect (i == 0, 0))
153 /* This can happen if we handle relocation dependencies for an
154 object which wasn't loaded directly. */
155 for (i = 1; list[i] != NULL; ++i)
158 unsigned int nopencount = i;
159 new_opencount = (unsigned int *) alloca (i * sizeof (unsigned int));
161 for (i = 0; list[i] != NULL; ++i)
163 list[i]->l_idx = i;
164 new_opencount[i] = list[i]->l_opencount;
166 --new_opencount[0];
167 for (i = 1; list[i] != NULL; ++i)
168 if ((list[i]->l_flags_1 & DF_1_NODELETE) == 0
169 /* Decrement counter. */
170 && --new_opencount[i] == 0)
172 void mark_removed (struct link_map *remmap)
174 /* Test whether this object was also loaded directly. */
175 if (remmap->l_searchlist.r_list != NULL)
177 /* In this case we have to decrement all the dependencies of
178 this object. They are all in MAP's dependency list. */
179 unsigned int j;
180 struct link_map **dep_list = remmap->l_searchlist.r_list;
182 for (j = 1; j < remmap->l_searchlist.r_nlist; ++j)
183 if (! (dep_list[j]->l_flags_1 & DF_1_NODELETE)
184 || ! dep_list[j]->l_init_called)
186 assert (dep_list[j]->l_idx < map->l_searchlist.r_nlist);
187 if (--new_opencount[dep_list[j]->l_idx] == 0)
189 assert (dep_list[j]->l_type == lt_loaded);
190 mark_removed (dep_list[j]);
195 if (remmap->l_reldeps != NULL)
197 unsigned int j;
198 for (j = 0; j < remmap->l_reldepsact; ++j)
200 /* Find out whether this object is in our list. */
201 if (remmap->l_reldeps[j]->l_idx < nopencount
202 && (list[remmap->l_reldeps[j]->l_idx]
203 == remmap->l_reldeps[j]))
204 /* Yes, it is. */
205 if (--new_opencount[remmap->l_reldeps[j]->l_idx] == 0)
207 /* This one is now gone, too. */
208 assert (remmap->l_reldeps[j]->l_type == lt_loaded);
209 mark_removed (remmap->l_reldeps[j]);
215 mark_removed (list[i]);
217 assert (new_opencount[0] == 0);
219 /* Call all termination functions at once. */
220 for (i = 0; list[i] != NULL; ++i)
222 struct link_map *imap = list[i];
223 if (new_opencount[i] == 0 && imap->l_type == lt_loaded
224 && (imap->l_flags_1 & DF_1_NODELETE) == 0)
226 /* When debugging print a message first. */
227 if (__builtin_expect (GLRO(dl_debug_mask) & DL_DEBUG_IMPCALLS, 0))
228 GLRO(dl_debug_printf) ("\ncalling fini: %s\n\n", imap->l_name);
230 /* Call its termination function. Do not do it for
231 half-cooked objects. */
232 if (imap->l_init_called)
234 if (imap->l_info[DT_FINI_ARRAY] != NULL)
236 ElfW(Addr) *array =
237 (ElfW(Addr) *) (imap->l_addr
238 + imap->l_info[DT_FINI_ARRAY]->d_un.d_ptr);
239 unsigned int sz = (imap->l_info[DT_FINI_ARRAYSZ]->d_un.d_val
240 / sizeof (ElfW(Addr)));
242 while (sz-- > 0)
243 ((fini_t) array[sz]) ();
246 /* Next try the old-style destructor. */
247 if (imap->l_info[DT_FINI] != NULL)
248 (*(void (*) (void)) DL_DT_FINI_ADDRESS
249 (imap, ((void *) imap->l_addr
250 + imap->l_info[DT_FINI]->d_un.d_ptr))) ();
253 /* This object must not be used anymore. We must remove the
254 reference from the scope. */
255 unsigned int j;
256 struct link_map **searchlist = map->l_searchlist.r_list;
257 unsigned int nsearchlist = map->l_searchlist.r_nlist;
259 #ifndef NDEBUG
260 bool found = false;
261 #endif
262 for (j = 0; j < nsearchlist; ++j)
263 if (imap == searchlist[j])
265 /* This is the object to remove. Copy all the
266 following ones. */
267 while (++j < nsearchlist)
268 searchlist[j - 1] = searchlist[j];
270 searchlist[j - 1] = NULL;
272 --map->l_searchlist.r_nlist;
274 #ifndef NDEBUG
275 found = true;
276 #endif
277 break;
279 assert (found);
281 else if (new_opencount[i] != 0 && imap->l_type == lt_loaded
282 && imap->l_searchlist.r_list == NULL
283 && imap->l_initfini != NULL)
285 /* The object is still used. But the object we are
286 unloading right now is responsible for loading it. If
287 the current object does not have it's own scope yet we
288 have to create one. This has to be done before running
289 the finalizers.
291 To do this count the number of dependencies. */
292 unsigned int cnt;
293 for (cnt = 1; imap->l_initfini[cnt] != NULL; ++cnt)
294 if (imap->l_initfini[cnt]->l_idx >= i
295 && imap->l_initfini[cnt]->l_idx < nopencount)
296 ++new_opencount[imap->l_initfini[cnt]->l_idx];
297 else
298 ++imap->l_initfini[cnt]->l_opencount;
300 /* We simply reuse the l_initfini list. */
301 imap->l_searchlist.r_list = &imap->l_initfini[cnt + 1];
302 imap->l_searchlist.r_nlist = cnt;
304 for (cnt = 0; imap->l_scope[cnt] != NULL; ++cnt)
305 if (imap->l_scope[cnt] == &map->l_searchlist)
307 imap->l_scope[cnt] = &imap->l_searchlist;
308 break;
312 /* Store the new l_opencount value. */
313 imap->l_opencount = new_opencount[i];
315 /* Just a sanity check. */
316 assert (imap->l_type == lt_loaded || imap->l_opencount > 0);
319 /* Notify the debugger we are about to remove some loaded objects. */
320 _r_debug.r_state = RT_DELETE;
321 GLRO(dl_debug_state) ();
323 #ifdef USE_TLS
324 size_t tls_free_start;
325 size_t tls_free_end;
326 tls_free_start = tls_free_end = NO_TLS_OFFSET;
327 #endif
329 /* Check each element of the search list to see if all references to
330 it are gone. */
331 for (i = 0; list[i] != NULL; ++i)
333 struct link_map *imap = list[i];
334 if (imap->l_opencount == 0 && imap->l_type == lt_loaded)
336 struct libname_list *lnp;
338 /* That was the last reference, and this was a dlopen-loaded
339 object. We can unmap it. */
340 if (__builtin_expect (imap->l_global, 0))
342 /* This object is in the global scope list. Remove it. */
343 unsigned int cnt = GL(dl_main_searchlist)->r_nlist;
346 --cnt;
347 while (GL(dl_main_searchlist)->r_list[cnt] != imap);
349 /* The object was already correctly registered. */
350 while (++cnt < GL(dl_main_searchlist)->r_nlist)
351 GL(dl_main_searchlist)->r_list[cnt - 1]
352 = GL(dl_main_searchlist)->r_list[cnt];
354 --GL(dl_main_searchlist)->r_nlist;
357 #ifdef USE_TLS
358 /* Remove the object from the dtv slotinfo array if it uses TLS. */
359 if (__builtin_expect (imap->l_tls_blocksize > 0, 0))
361 any_tls = true;
363 if (! remove_slotinfo (imap->l_tls_modid,
364 GL(dl_tls_dtv_slotinfo_list), 0,
365 imap->l_init_called))
366 /* All dynamically loaded modules with TLS are unloaded. */
367 GL(dl_tls_max_dtv_idx) = GL(dl_tls_static_nelem);
369 if (imap->l_tls_offset != NO_TLS_OFFSET)
371 /* Collect a contiguous chunk built from the objects in
372 this search list, going in either direction. When the
373 whole chunk is at the end of the used area then we can
374 reclaim it. */
375 # if TLS_TCB_AT_TP
376 if (tls_free_start == NO_TLS_OFFSET
377 || (size_t) imap->l_tls_offset == tls_free_start)
379 /* Extend the contiguous chunk being reclaimed. */
380 tls_free_start
381 = imap->l_tls_offset - imap->l_tls_blocksize;
383 if (tls_free_end == NO_TLS_OFFSET)
384 tls_free_end = imap->l_tls_offset;
386 else if (imap->l_tls_offset - imap->l_tls_blocksize
387 == tls_free_end)
388 /* Extend the chunk backwards. */
389 tls_free_end = imap->l_tls_offset;
390 else
392 /* This isn't contiguous with the last chunk freed.
393 One of them will be leaked unless we can free
394 one block right away. */
395 if (tls_free_end == GL(dl_tls_static_used))
397 GL(dl_tls_static_used) = tls_free_start;
398 tls_free_end = imap->l_tls_offset;
399 tls_free_start
400 = tls_free_end - imap->l_tls_blocksize;
402 else if ((size_t) imap->l_tls_offset
403 == GL(dl_tls_static_used))
404 GL(dl_tls_static_used)
405 = imap->l_tls_offset - imap->l_tls_blocksize;
406 else if (tls_free_end < (size_t) imap->l_tls_offset)
408 /* We pick the later block. It has a chance to
409 be freed. */
410 tls_free_end = imap->l_tls_offset;
411 tls_free_start
412 = tls_free_end - imap->l_tls_blocksize;
415 # elif TLS_DTV_AT_TP
416 if ((size_t) imap->l_tls_offset == tls_free_end)
417 /* Extend the contiguous chunk being reclaimed. */
418 tls_free_end -= imap->l_tls_blocksize;
419 else if (imap->l_tls_offset + imap->l_tls_blocksize
420 == tls_free_start)
421 /* Extend the chunk backwards. */
422 tls_free_start = imap->l_tls_offset;
423 else
425 /* This isn't contiguous with the last chunk freed.
426 One of them will be leaked. */
427 if (tls_free_end == GL(dl_tls_static_used))
428 GL(dl_tls_static_used) = tls_free_start;
429 tls_free_start = imap->l_tls_offset;
430 tls_free_end = tls_free_start + imap->l_tls_blocksize;
432 # else
433 # error "Either TLS_TCB_AT_TP or TLS_DTV_AT_TP must be defined"
434 # endif
437 #endif
439 /* We can unmap all the maps at once. We determined the
440 start address and length when we loaded the object and
441 the `munmap' call does the rest. */
442 DL_UNMAP (imap);
444 /* Finally, unlink the data structure and free it. */
445 #ifdef SHARED
446 /* We will unlink the first object only if this is a statically
447 linked program. */
448 assert (imap->l_prev != NULL);
449 imap->l_prev->l_next = imap->l_next;
450 #else
451 if (imap->l_prev != NULL)
452 imap->l_prev->l_next = imap->l_next;
453 else
454 GL(dl_loaded) = imap->l_next;
455 #endif
456 --GL(dl_nloaded);
457 if (imap->l_next)
458 imap->l_next->l_prev = imap->l_prev;
460 free (imap->l_versions);
461 if (imap->l_origin != (char *) -1)
462 free ((char *) imap->l_origin);
464 /* If the object has relocation dependencies save this
465 information for latter. */
466 if (__builtin_expect (imap->l_reldeps != NULL, 0))
468 struct reldep_list *newrel;
470 newrel = (struct reldep_list *) alloca (sizeof (*reldeps)
471 + (imap->l_reldepsact
472 * sizeof (bool)));
473 newrel->rellist = imap->l_reldeps;
474 newrel->nrellist = imap->l_reldepsact;
475 newrel->next = reldeps;
477 newrel->nhandled = imap->l_reldepsact;
478 unsigned int j;
479 for (j = 0; j < imap->l_reldepsact; ++j)
481 /* Find out whether this object is in our list. */
482 if (imap->l_reldeps[j]->l_idx < nopencount
483 && list[imap->l_reldeps[j]->l_idx] == imap->l_reldeps[j])
484 /* Yes, it is. */
485 newrel->handled[j] = true;
486 else
487 newrel->handled[j] = false;
490 reldeps = newrel;
493 /* This name always is allocated. */
494 free (imap->l_name);
495 /* Remove the list with all the names of the shared object. */
496 lnp = imap->l_libname;
499 struct libname_list *this = lnp;
500 lnp = lnp->next;
501 if (!this->dont_free)
502 free (this);
504 while (lnp != NULL);
506 /* Remove the searchlists. */
507 if (imap != map)
508 free (imap->l_initfini);
510 /* Remove the scope array if we allocated it. */
511 if (imap->l_scope != imap->l_scope_mem)
512 free (imap->l_scope);
514 if (imap->l_phdr_allocated)
515 free ((void *) imap->l_phdr);
517 if (imap->l_rpath_dirs.dirs != (void *) -1)
518 free (imap->l_rpath_dirs.dirs);
519 if (imap->l_runpath_dirs.dirs != (void *) -1)
520 free (imap->l_runpath_dirs.dirs);
522 free (imap);
526 #ifdef USE_TLS
527 /* If we removed any object which uses TLS bump the generation counter. */
528 if (any_tls)
530 if (__builtin_expect (++GL(dl_tls_generation) == 0, 0))
531 __libc_fatal (_("TLS generation counter wrapped! Please send report with the 'glibcbug' script."));
533 if (tls_free_end == GL(dl_tls_static_used))
534 GL(dl_tls_static_used) = tls_free_start;
536 #endif
538 /* Notify the debugger those objects are finalized and gone. */
539 _r_debug.r_state = RT_CONSISTENT;
540 GLRO(dl_debug_state) ();
542 /* Now we can perhaps also remove the modules for which we had
543 dependencies because of symbol lookup. */
544 while (__builtin_expect (reldeps != NULL, 0))
546 while (reldeps->nrellist-- > 0)
547 /* Some of the relocation dependencies might be on the
548 dependency list of the object we are closing right now.
549 They were already handled. Do not close them again. */
550 if (reldeps->nrellist < reldeps->nhandled
551 && ! reldeps->handled[reldeps->nrellist])
552 _dl_close (reldeps->rellist[reldeps->nrellist]);
554 free (reldeps->rellist);
556 reldeps = reldeps->next;
559 free (list);
561 /* Release the lock. */
562 __rtld_lock_unlock_recursive (GL(dl_load_lock));
564 libc_hidden_def (_dl_close)
567 #ifdef USE_TLS
568 static bool __libc_freeres_fn_section
569 free_slotinfo (struct dtv_slotinfo_list **elemp)
571 size_t cnt;
573 if (*elemp == NULL)
574 /* Nothing here, all is removed (or there never was anything). */
575 return true;
577 if (!free_slotinfo (&(*elemp)->next))
578 /* We cannot free the entry. */
579 return false;
581 /* That cleared our next pointer for us. */
583 for (cnt = 0; cnt < (*elemp)->len; ++cnt)
584 if ((*elemp)->slotinfo[cnt].map != NULL)
585 /* Still used. */
586 return false;
588 /* We can remove the list element. */
589 free (*elemp);
590 *elemp = NULL;
592 return true;
594 #endif
597 libc_freeres_fn (free_mem)
599 if (__builtin_expect (GL(dl_global_scope_alloc), 0) != 0
600 && (GL(dl_main_searchlist)->r_nlist
601 == GLRO(dl_initial_searchlist).r_nlist))
603 /* All object dynamically loaded by the program are unloaded. Free
604 the memory allocated for the global scope variable. */
605 struct link_map **old = GL(dl_main_searchlist)->r_list;
607 /* Put the old map in. */
608 GL(dl_main_searchlist)->r_list = GLRO(dl_initial_searchlist).r_list;
609 /* Signal that the original map is used. */
610 GL(dl_global_scope_alloc) = 0;
612 /* Now free the old map. */
613 free (old);
616 #ifdef USE_TLS
617 if (USE___THREAD || GL(dl_tls_dtv_slotinfo_list) != NULL)
619 /* Free the memory allocated for the dtv slotinfo array. We can do
620 this only if all modules which used this memory are unloaded. */
621 # ifdef SHARED
622 if (GL(dl_initial_dtv) == NULL)
623 /* There was no initial TLS setup, it was set up later when
624 it used the normal malloc. */
625 free_slotinfo (&GL(dl_tls_dtv_slotinfo_list));
626 else
627 # endif
628 /* The first element of the list does not have to be deallocated.
629 It was allocated in the dynamic linker (i.e., with a different
630 malloc), and in the static library it's in .bss space. */
631 free_slotinfo (&GL(dl_tls_dtv_slotinfo_list)->next);
633 #endif