Use INTERNAL_SYSCALL.
[glibc.git] / elf / dl-close.c
blob84c10cbf3ae6d2ee1eef9afe9102be01432b49bb
1 /* Close a shared object opened by `_dl_open'.
2 Copyright (C) 1996-2001, 2002 Free Software Foundation, Inc.
3 This file is part of the GNU C Library.
5 The GNU C Library is free software; you can redistribute it and/or
6 modify it under the terms of the GNU Lesser General Public
7 License as published by the Free Software Foundation; either
8 version 2.1 of the License, or (at your option) any later version.
10 The GNU C Library is distributed in the hope that it will be useful,
11 but WITHOUT ANY WARRANTY; without even the implied warranty of
12 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 Lesser General Public License for more details.
15 You should have received a copy of the GNU Lesser General Public
16 License along with the GNU C Library; if not, write to the Free
17 Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
18 02111-1307 USA. */
20 #include <assert.h>
21 #include <dlfcn.h>
22 #include <libintl.h>
23 #include <stdio.h>
24 #include <stdlib.h>
25 #include <string.h>
26 #include <bits/libc-lock.h>
27 #include <ldsodefs.h>
28 #include <sys/types.h>
29 #include <sys/mman.h>
32 /* Type of the constructor functions. */
33 typedef void (*fini_t) (void);
36 #ifdef USE_TLS
37 /* Returns true we an non-empty was found. */
38 static bool
39 remove_slotinfo (size_t idx, struct dtv_slotinfo_list *listp, size_t disp,
40 bool should_be_there)
42 if (idx - disp >= listp->len)
44 if (listp->next == NULL)
46 /* The index is not actually valid in the slotinfo list,
47 because this object was closed before it was fully set
48 up due to some error. */
49 assert (! should_be_there);
51 else
53 if (remove_slotinfo (idx, listp->next, disp + listp->len,
54 should_be_there))
55 return true;
57 /* No non-empty entry. Search from the end of this element's
58 slotinfo array. */
59 idx = disp + listp->len;
62 else
64 struct link_map *old_map = listp->slotinfo[idx - disp].map;
66 /* The entry might still be in its unused state if we are closing an
67 object that wasn't fully set up. */
68 if (__builtin_expect (old_map != NULL, 1))
70 assert (old_map->l_tls_modid == idx);
72 /* Mark the entry as unused. */
73 listp->slotinfo[idx - disp].gen = GL(dl_tls_generation) + 1;
74 listp->slotinfo[idx - disp].map = NULL;
77 /* If this is not the last currently used entry no need to look
78 further. */
79 if (idx != GL(dl_tls_max_dtv_idx))
80 return true;
83 while (idx - disp > disp == 0 ? 1 + GL(dl_tls_static_nelem) : 0)
85 --idx;
87 if (listp->slotinfo[idx - disp].map != NULL)
89 /* Found a new last used index. */
90 GL(dl_tls_max_dtv_idx) = idx;
91 return true;
95 /* No non-entry in this list element. */
96 return false;
98 #endif
101 void
102 internal_function
103 _dl_close (void *_map)
105 struct reldep_list
107 struct link_map **rellist;
108 unsigned int nrellist;
109 struct reldep_list *next;
110 } *reldeps = NULL;
111 struct link_map **list;
112 struct link_map *map = _map;
113 unsigned int i;
114 unsigned int *new_opencount;
115 #ifdef USE_TLS
116 bool any_tls = false;
117 #endif
119 /* First see whether we can remove the object at all. */
120 if (__builtin_expect (map->l_flags_1 & DF_1_NODELETE, 0)
121 && map->l_init_called)
122 /* Nope. Do nothing. */
123 return;
125 if (__builtin_expect (map->l_opencount, 1) == 0)
126 _dl_signal_error (0, map->l_name, NULL, N_("shared object not open"));
128 /* Acquire the lock. */
129 __rtld_lock_lock_recursive (GL(dl_load_lock));
131 /* Decrement the reference count. */
132 if (map->l_opencount > 1 || map->l_type != lt_loaded)
134 /* There are still references to this object. Do nothing more. */
135 if (__builtin_expect (GL(dl_debug_mask) & DL_DEBUG_FILES, 0))
136 _dl_debug_printf ("\nclosing file=%s; opencount == %u\n",
137 map->l_name, map->l_opencount);
139 /* One decrement the object itself, not the dependencies. */
140 --map->l_opencount;
142 __rtld_lock_unlock_recursive (GL(dl_load_lock));
143 return;
146 list = map->l_initfini;
148 /* Compute the new l_opencount values. */
149 i = map->l_searchlist.r_nlist;
150 if (__builtin_expect (i == 0, 0))
151 /* This can happen if we handle relocation dependencies for an
152 object which wasn't loaded directly. */
153 for (i = 1; list[i] != NULL; ++i)
156 new_opencount = (unsigned int *) alloca (i * sizeof (unsigned int));
158 for (i = 0; list[i] != NULL; ++i)
160 list[i]->l_idx = i;
161 new_opencount[i] = list[i]->l_opencount;
163 --new_opencount[0];
164 for (i = 1; list[i] != NULL; ++i)
165 if ((! (list[i]->l_flags_1 & DF_1_NODELETE) || ! list[i]->l_init_called)
166 /* Decrement counter. */
167 && --new_opencount[i] == 0
168 /* Test whether this object was also loaded directly. */
169 && list[i]->l_searchlist.r_list != NULL)
171 /* In this case we have the decrement all the dependencies of
172 this object. They are all in MAP's dependency list. */
173 unsigned int j;
174 struct link_map **dep_list = list[i]->l_searchlist.r_list;
176 for (j = 1; j < list[i]->l_searchlist.r_nlist; ++j)
177 if (! (dep_list[j]->l_flags_1 & DF_1_NODELETE)
178 || ! dep_list[j]->l_init_called)
180 assert (dep_list[j]->l_idx < map->l_searchlist.r_nlist);
181 --new_opencount[dep_list[j]->l_idx];
184 assert (new_opencount[0] == 0);
186 /* Call all termination functions at once. */
187 for (i = 0; list[i] != NULL; ++i)
189 struct link_map *imap = list[i];
190 if (new_opencount[i] == 0 && imap->l_type == lt_loaded
191 && (imap->l_info[DT_FINI] || imap->l_info[DT_FINI_ARRAY])
192 && (! (imap->l_flags_1 & DF_1_NODELETE) || ! imap->l_init_called)
193 /* Skip any half-cooked objects that were never initialized. */
194 && imap->l_init_called)
196 /* When debugging print a message first. */
197 if (__builtin_expect (GL(dl_debug_mask) & DL_DEBUG_IMPCALLS, 0))
198 _dl_debug_printf ("\ncalling fini: %s\n\n", imap->l_name);
200 /* Call its termination function. */
201 if (imap->l_info[DT_FINI_ARRAY] != NULL)
203 ElfW(Addr) *array =
204 (ElfW(Addr) *) (imap->l_addr
205 + imap->l_info[DT_FINI_ARRAY]->d_un.d_ptr);
206 unsigned int sz = (imap->l_info[DT_FINI_ARRAYSZ]->d_un.d_val
207 / sizeof (ElfW(Addr)));
208 unsigned int cnt;
210 for (cnt = 0; cnt < sz; ++cnt)
211 ((fini_t) (imap->l_addr + array[cnt])) ();
214 /* Next try the old-style destructor. */
215 if (imap->l_info[DT_FINI] != NULL)
216 (*(void (*) (void)) DL_DT_FINI_ADDRESS
217 (imap, (void *) imap->l_addr
218 + imap->l_info[DT_FINI]->d_un.d_ptr)) ();
220 else if (new_opencount[i] != 0 && imap->l_type == lt_loaded)
222 /* The object is still used. But the object we are unloading
223 right now is responsible for loading it and therefore we
224 have the search list of the current object in its scope.
225 Remove it. */
226 struct r_scope_elem **runp = imap->l_scope;
228 while (*runp != NULL)
229 if (*runp == &map->l_searchlist)
231 /* Copy all later elements. */
232 while ((runp[0] = runp[1]) != NULL)
233 ++runp;
234 break;
236 else
237 ++runp;
240 /* Store the new l_opencount value. */
241 imap->l_opencount = new_opencount[i];
242 /* Just a sanity check. */
243 assert (imap->l_type == lt_loaded || imap->l_opencount > 0);
246 /* Notify the debugger we are about to remove some loaded objects. */
247 _r_debug.r_state = RT_DELETE;
248 _dl_debug_state ();
250 #ifdef USE_TLS
251 size_t tls_free_start, tls_free_end;
252 tls_free_start = tls_free_end = GL(dl_tls_static_used);
253 #endif
255 /* Check each element of the search list to see if all references to
256 it are gone. */
257 for (i = 0; list[i] != NULL; ++i)
259 struct link_map *imap = list[i];
260 if (imap->l_opencount == 0 && imap->l_type == lt_loaded)
262 struct libname_list *lnp;
264 /* That was the last reference, and this was a dlopen-loaded
265 object. We can unmap it. */
266 if (__builtin_expect (imap->l_global, 0))
268 /* This object is in the global scope list. Remove it. */
269 unsigned int cnt = GL(dl_main_searchlist)->r_nlist;
272 --cnt;
273 while (GL(dl_main_searchlist)->r_list[cnt] != imap);
275 /* The object was already correctly registered. */
276 while (++cnt < GL(dl_main_searchlist)->r_nlist)
277 GL(dl_main_searchlist)->r_list[cnt - 1]
278 = GL(dl_main_searchlist)->r_list[cnt];
280 --GL(dl_main_searchlist)->r_nlist;
283 #ifdef USE_TLS
284 /* Remove the object from the dtv slotinfo array if it uses TLS. */
285 if (__builtin_expect (imap->l_tls_blocksize > 0, 0))
287 any_tls = true;
289 if (! remove_slotinfo (imap->l_tls_modid,
290 GL(dl_tls_dtv_slotinfo_list), 0,
291 imap->l_init_called))
292 /* All dynamically loaded modules with TLS are unloaded. */
293 GL(dl_tls_max_dtv_idx) = GL(dl_tls_static_nelem);
295 if (imap->l_tls_offset != 0)
297 /* Collect a contiguous chunk built from the objects in
298 this search list, going in either direction. When the
299 whole chunk is at the end of the used area then we can
300 reclaim it. */
301 if (imap->l_tls_offset == tls_free_end)
302 /* Extend the contiguous chunk being reclaimed. */
303 tls_free_end += imap->l_tls_blocksize;
304 else if (imap->l_tls_offset + imap->l_tls_blocksize
305 == tls_free_start)
306 /* Extend the chunk backwards. */
307 tls_free_start = imap->l_tls_offset;
308 else
310 /* This isn't contiguous with the last chunk freed.
311 One of them will be leaked. */
312 if (tls_free_end == GL(dl_tls_static_used))
313 GL(dl_tls_static_used) = tls_free_start;
314 tls_free_start = imap->l_tls_offset;
315 tls_free_end = tls_free_start + imap->l_tls_blocksize;
319 #endif
321 /* We can unmap all the maps at once. We determined the
322 start address and length when we loaded the object and
323 the `munmap' call does the rest. */
324 DL_UNMAP (imap);
326 /* Finally, unlink the data structure and free it. */
327 #ifdef SHARED
328 /* We will unlink the first object only if this is a statically
329 linked program. */
330 assert (imap->l_prev != NULL);
331 imap->l_prev->l_next = imap->l_next;
332 #else
333 if (imap->l_prev != NULL)
334 imap->l_prev->l_next = imap->l_next;
335 else
336 GL(dl_loaded) = imap->l_next;
337 #endif
338 --GL(dl_nloaded);
339 if (imap->l_next)
340 imap->l_next->l_prev = imap->l_prev;
342 if (imap->l_versions != NULL)
343 free (imap->l_versions);
344 if (imap->l_origin != NULL && imap->l_origin != (char *) -1)
345 free ((char *) imap->l_origin);
347 /* If the object has relocation dependencies save this
348 information for latter. */
349 if (__builtin_expect (imap->l_reldeps != NULL, 0))
351 struct reldep_list *newrel;
353 newrel = (struct reldep_list *) alloca (sizeof (*reldeps));
354 newrel->rellist = imap->l_reldeps;
355 newrel->nrellist = imap->l_reldepsact;
356 newrel->next = reldeps;
358 reldeps = newrel;
361 /* This name always is allocated. */
362 free (imap->l_name);
363 /* Remove the list with all the names of the shared object. */
364 lnp = imap->l_libname;
367 struct libname_list *this = lnp;
368 lnp = lnp->next;
369 if (!this->dont_free)
370 free (this);
372 while (lnp != NULL);
374 /* Remove the searchlists. */
375 if (imap != map)
376 free (imap->l_initfini);
378 /* Remove the scope array if we allocated it. */
379 if (imap->l_scope != imap->l_scope_mem)
380 free (imap->l_scope);
382 if (imap->l_phdr_allocated)
383 free ((void *) imap->l_phdr);
385 if (imap->l_rpath_dirs.dirs != (void *) -1)
386 free (imap->l_rpath_dirs.dirs);
387 if (imap->l_runpath_dirs.dirs != (void *) -1)
388 free (imap->l_runpath_dirs.dirs);
390 free (imap);
394 #ifdef USE_TLS
395 /* If we removed any object which uses TLS bump the generation counter. */
396 if (any_tls)
398 if (__builtin_expect (++GL(dl_tls_generation) == 0, 0))
399 __libc_fatal (_("TLS generation counter wrapped! Please send report with the 'glibcbug' script."));
401 if (tls_free_end == GL(dl_tls_static_used))
402 GL(dl_tls_static_used) = tls_free_start;
404 #endif
406 /* Notify the debugger those objects are finalized and gone. */
407 _r_debug.r_state = RT_CONSISTENT;
408 _dl_debug_state ();
410 /* Now we can perhaps also remove the modules for which we had
411 dependencies because of symbol lookup. */
412 while (__builtin_expect (reldeps != NULL, 0))
414 while (reldeps->nrellist-- > 0)
415 _dl_close (reldeps->rellist[reldeps->nrellist]);
417 free (reldeps->rellist);
419 reldeps = reldeps->next;
422 free (list);
424 /* Release the lock. */
425 __rtld_lock_unlock_recursive (GL(dl_load_lock));
427 libc_hidden_def (_dl_close)
430 #ifdef USE_TLS
431 static bool
432 free_slotinfo (struct dtv_slotinfo_list **elemp)
434 size_t cnt;
436 if (*elemp == NULL)
437 /* Nothing here, all is removed (or there never was anything). */
438 return true;
440 if (!free_slotinfo (&(*elemp)->next))
441 /* We cannot free the entry. */
442 return false;
444 /* That cleared our next pointer for us. */
446 for (cnt = 0; cnt < (*elemp)->len; ++cnt)
447 if ((*elemp)->slotinfo[cnt].map != NULL)
448 /* Still used. */
449 return false;
451 /* We can remove the list element. */
452 free (*elemp);
453 *elemp = NULL;
455 return true;
457 #endif
460 libc_freeres_fn (free_mem)
462 if (__builtin_expect (GL(dl_global_scope_alloc), 0) != 0
463 && GL(dl_main_searchlist)->r_nlist == GL(dl_initial_searchlist).r_nlist)
465 /* All object dynamically loaded by the program are unloaded. Free
466 the memory allocated for the global scope variable. */
467 struct link_map **old = GL(dl_main_searchlist)->r_list;
469 /* Put the old map in. */
470 GL(dl_main_searchlist)->r_list = GL(dl_initial_searchlist).r_list;
471 /* Signal that the original map is used. */
472 GL(dl_global_scope_alloc) = 0;
474 /* Now free the old map. */
475 free (old);
478 #ifdef USE_TLS
479 if (USE___THREAD || GL(dl_tls_dtv_slotinfo_list) != NULL)
481 /* Free the memory allocated for the dtv slotinfo array. We can do
482 this only if all modules which used this memory are unloaded. */
483 # ifdef SHARED
484 if (GL(dl_initial_dtv) == NULL)
485 /* There was no initial TLS setup, it was set up later when
486 it used the normal malloc. */
487 free_slotinfo (&GL(dl_tls_dtv_slotinfo_list));
488 # endif
489 /* The first element of the list does not have to be deallocated.
490 It was allocated in the dynamic linker (i.e., with a different
491 malloc), and in the static library it's in .bss space. */
492 free_slotinfo (&GL(dl_tls_dtv_slotinfo_list)->next);
494 #endif