Fix memory leak in dlopen with RTLD_NOLOAD.
[glibc.git] / elf / dl-close.c
blobefb2b584f2a13556428155189ce17d40c5acafba
1 /* Close a shared object opened by `_dl_open'.
2 Copyright (C) 1996-2007, 2009, 2010, 2011 Free Software Foundation, Inc.
3 This file is part of the GNU C Library.
5 The GNU C Library is free software; you can redistribute it and/or
6 modify it under the terms of the GNU Lesser General Public
7 License as published by the Free Software Foundation; either
8 version 2.1 of the License, or (at your option) any later version.
10 The GNU C Library is distributed in the hope that it will be useful,
11 but WITHOUT ANY WARRANTY; without even the implied warranty of
12 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 Lesser General Public License for more details.
15 You should have received a copy of the GNU Lesser General Public
16 License along with the GNU C Library; if not, write to the Free
17 Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
18 02111-1307 USA. */
20 #include <assert.h>
21 #include <dlfcn.h>
22 #include <errno.h>
23 #include <libintl.h>
24 #include <stddef.h>
25 #include <stdio.h>
26 #include <stdlib.h>
27 #include <string.h>
28 #include <unistd.h>
29 #include <bits/libc-lock.h>
30 #include <ldsodefs.h>
31 #include <sys/types.h>
32 #include <sys/mman.h>
33 #include <sysdep-cancel.h>
34 #include <tls.h>
37 /* Type of the constructor functions. */
38 typedef void (*fini_t) (void);
41 /* Special l_idx value used to indicate which objects remain loaded. */
42 #define IDX_STILL_USED -1
45 /* Returns true we an non-empty was found. */
46 static bool
47 remove_slotinfo (size_t idx, struct dtv_slotinfo_list *listp, size_t disp,
48 bool should_be_there)
50 if (idx - disp >= listp->len)
52 if (listp->next == NULL)
54 /* The index is not actually valid in the slotinfo list,
55 because this object was closed before it was fully set
56 up due to some error. */
57 assert (! should_be_there);
59 else
61 if (remove_slotinfo (idx, listp->next, disp + listp->len,
62 should_be_there))
63 return true;
65 /* No non-empty entry. Search from the end of this element's
66 slotinfo array. */
67 idx = disp + listp->len;
70 else
72 struct link_map *old_map = listp->slotinfo[idx - disp].map;
74 /* The entry might still be in its unused state if we are closing an
75 object that wasn't fully set up. */
76 if (__builtin_expect (old_map != NULL, 1))
78 assert (old_map->l_tls_modid == idx);
80 /* Mark the entry as unused. */
81 listp->slotinfo[idx - disp].gen = GL(dl_tls_generation) + 1;
82 listp->slotinfo[idx - disp].map = NULL;
85 /* If this is not the last currently used entry no need to look
86 further. */
87 if (idx != GL(dl_tls_max_dtv_idx))
88 return true;
91 while (idx - disp > (disp == 0 ? 1 + GL(dl_tls_static_nelem) : 0))
93 --idx;
95 if (listp->slotinfo[idx - disp].map != NULL)
97 /* Found a new last used index. */
98 GL(dl_tls_max_dtv_idx) = idx;
99 return true;
103 /* No non-entry in this list element. */
104 return false;
108 void
109 _dl_close_worker (struct link_map *map)
111 /* One less direct use. */
112 --map->l_direct_opencount;
114 /* If _dl_close is called recursively (some destructor call dlclose),
115 just record that the parent _dl_close will need to do garbage collection
116 again and return. */
117 static enum { not_pending, pending, rerun } dl_close_state;
119 if (map->l_direct_opencount > 0 || map->l_type != lt_loaded
120 || dl_close_state != not_pending)
122 if (map->l_direct_opencount == 0)
124 if (map->l_type == lt_loaded)
125 dl_close_state = rerun;
126 else if (map->l_type == lt_library)
128 struct link_map **oldp = map->l_initfini;
129 map->l_initfini = map->l_orig_initfini;
130 _dl_scope_free (oldp);
134 /* There are still references to this object. Do nothing more. */
135 if (__builtin_expect (GLRO(dl_debug_mask) & DL_DEBUG_FILES, 0))
136 _dl_debug_printf ("\nclosing file=%s; direct_opencount=%u\n",
137 map->l_name, map->l_direct_opencount);
139 return;
142 Lmid_t nsid = map->l_ns;
143 struct link_namespaces *ns = &GL(dl_ns)[nsid];
145 retry:
146 dl_close_state = pending;
148 bool any_tls = false;
149 const unsigned int nloaded = ns->_ns_nloaded;
150 char used[nloaded];
151 char done[nloaded];
152 struct link_map *maps[nloaded];
154 /* Run over the list and assign indexes to the link maps and enter
155 them into the MAPS array. */
156 int idx = 0;
157 for (struct link_map *l = ns->_ns_loaded; l != NULL; l = l->l_next)
159 l->l_idx = idx;
160 maps[idx] = l;
161 ++idx;
163 assert (idx == nloaded);
165 /* Prepare the bitmaps. */
166 memset (used, '\0', sizeof (used));
167 memset (done, '\0', sizeof (done));
169 /* Keep track of the lowest index link map we have covered already. */
170 int done_index = -1;
171 while (++done_index < nloaded)
173 struct link_map *l = maps[done_index];
175 if (done[done_index])
176 /* Already handled. */
177 continue;
179 /* Check whether this object is still used. */
180 if (l->l_type == lt_loaded
181 && l->l_direct_opencount == 0
182 && (l->l_flags_1 & DF_1_NODELETE) == 0
183 && !used[done_index])
184 continue;
186 /* We need this object and we handle it now. */
187 done[done_index] = 1;
188 used[done_index] = 1;
189 /* Signal the object is still needed. */
190 l->l_idx = IDX_STILL_USED;
192 /* Mark all dependencies as used. */
193 if (l->l_initfini != NULL)
195 struct link_map **lp = &l->l_initfini[1];
196 while (*lp != NULL)
198 if ((*lp)->l_idx != IDX_STILL_USED)
200 assert ((*lp)->l_idx >= 0 && (*lp)->l_idx < nloaded);
202 if (!used[(*lp)->l_idx])
204 used[(*lp)->l_idx] = 1;
205 if ((*lp)->l_idx - 1 < done_index)
206 done_index = (*lp)->l_idx - 1;
210 ++lp;
213 /* And the same for relocation dependencies. */
214 if (l->l_reldeps != NULL)
215 for (unsigned int j = 0; j < l->l_reldeps->act; ++j)
217 struct link_map *jmap = l->l_reldeps->list[j];
219 if (jmap->l_idx != IDX_STILL_USED)
221 assert (jmap->l_idx >= 0 && jmap->l_idx < nloaded);
223 if (!used[jmap->l_idx])
225 used[jmap->l_idx] = 1;
226 if (jmap->l_idx - 1 < done_index)
227 done_index = jmap->l_idx - 1;
233 /* Sort the entries. */
234 _dl_sort_fini (ns->_ns_loaded, maps, nloaded, used, nsid);
236 /* Call all termination functions at once. */
237 #ifdef SHARED
238 bool do_audit = GLRO(dl_naudit) > 0 && !ns->_ns_loaded->l_auditing;
239 #endif
240 bool unload_any = false;
241 bool scope_mem_left = false;
242 unsigned int unload_global = 0;
243 unsigned int first_loaded = ~0;
244 for (unsigned int i = 0; i < nloaded; ++i)
246 struct link_map *imap = maps[i];
248 /* All elements must be in the same namespace. */
249 assert (imap->l_ns == nsid);
251 if (!used[i])
253 assert (imap->l_type == lt_loaded
254 && (imap->l_flags_1 & DF_1_NODELETE) == 0);
256 /* Call its termination function. Do not do it for
257 half-cooked objects. */
258 if (imap->l_init_called)
260 /* When debugging print a message first. */
261 if (__builtin_expect (GLRO(dl_debug_mask) & DL_DEBUG_IMPCALLS,
263 _dl_debug_printf ("\ncalling fini: %s [%lu]\n\n",
264 imap->l_name, nsid);
266 if (imap->l_info[DT_FINI_ARRAY] != NULL)
268 ElfW(Addr) *array =
269 (ElfW(Addr) *) (imap->l_addr
270 + imap->l_info[DT_FINI_ARRAY]->d_un.d_ptr);
271 unsigned int sz = (imap->l_info[DT_FINI_ARRAYSZ]->d_un.d_val
272 / sizeof (ElfW(Addr)));
274 while (sz-- > 0)
275 ((fini_t) array[sz]) ();
278 /* Next try the old-style destructor. */
279 if (imap->l_info[DT_FINI] != NULL)
280 (*(void (*) (void)) DL_DT_FINI_ADDRESS
281 (imap, ((void *) imap->l_addr
282 + imap->l_info[DT_FINI]->d_un.d_ptr))) ();
285 #ifdef SHARED
286 /* Auditing checkpoint: we remove an object. */
287 if (__builtin_expect (do_audit, 0))
289 struct audit_ifaces *afct = GLRO(dl_audit);
290 for (unsigned int cnt = 0; cnt < GLRO(dl_naudit); ++cnt)
292 if (afct->objclose != NULL)
293 /* Return value is ignored. */
294 (void) afct->objclose (&imap->l_audit[cnt].cookie);
296 afct = afct->next;
299 #endif
301 /* This object must not be used anymore. */
302 imap->l_removed = 1;
304 /* We indeed have an object to remove. */
305 unload_any = true;
307 if (imap->l_global)
308 ++unload_global;
310 /* Remember where the first dynamically loaded object is. */
311 if (i < first_loaded)
312 first_loaded = i;
314 /* Else used[i]. */
315 else if (imap->l_type == lt_loaded)
317 struct r_scope_elem *new_list = NULL;
319 if (imap->l_searchlist.r_list == NULL && imap->l_initfini != NULL)
321 /* The object is still used. But one of the objects we are
322 unloading right now is responsible for loading it. If
323 the current object does not have it's own scope yet we
324 have to create one. This has to be done before running
325 the finalizers.
327 To do this count the number of dependencies. */
328 unsigned int cnt;
329 for (cnt = 1; imap->l_initfini[cnt] != NULL; ++cnt)
332 /* We simply reuse the l_initfini list. */
333 imap->l_searchlist.r_list = &imap->l_initfini[cnt + 1];
334 imap->l_searchlist.r_nlist = cnt;
336 new_list = &imap->l_searchlist;
339 /* Count the number of scopes which remain after the unload.
340 When we add the local search list count it. Always add
341 one for the terminating NULL pointer. */
342 size_t remain = (new_list != NULL) + 1;
343 bool removed_any = false;
344 for (size_t cnt = 0; imap->l_scope[cnt] != NULL; ++cnt)
345 /* This relies on l_scope[] entries being always set either
346 to its own l_symbolic_searchlist address, or some map's
347 l_searchlist address. */
348 if (imap->l_scope[cnt] != &imap->l_symbolic_searchlist)
350 struct link_map *tmap = (struct link_map *)
351 ((char *) imap->l_scope[cnt]
352 - offsetof (struct link_map, l_searchlist));
353 assert (tmap->l_ns == nsid);
354 if (tmap->l_idx == IDX_STILL_USED)
355 ++remain;
356 else
357 removed_any = true;
359 else
360 ++remain;
362 if (removed_any)
364 /* Always allocate a new array for the scope. This is
365 necessary since we must be able to determine the last
366 user of the current array. If possible use the link map's
367 memory. */
368 size_t new_size;
369 struct r_scope_elem **newp;
371 #define SCOPE_ELEMS(imap) \
372 (sizeof (imap->l_scope_mem) / sizeof (imap->l_scope_mem[0]))
374 if (imap->l_scope != imap->l_scope_mem
375 && remain < SCOPE_ELEMS (imap))
377 new_size = SCOPE_ELEMS (imap);
378 newp = imap->l_scope_mem;
380 else
382 new_size = imap->l_scope_max;
383 newp = (struct r_scope_elem **)
384 malloc (new_size * sizeof (struct r_scope_elem *));
385 if (newp == NULL)
386 _dl_signal_error (ENOMEM, "dlclose", NULL,
387 N_("cannot create scope list"));
390 /* Copy over the remaining scope elements. */
391 remain = 0;
392 for (size_t cnt = 0; imap->l_scope[cnt] != NULL; ++cnt)
394 if (imap->l_scope[cnt] != &imap->l_symbolic_searchlist)
396 struct link_map *tmap = (struct link_map *)
397 ((char *) imap->l_scope[cnt]
398 - offsetof (struct link_map, l_searchlist));
399 if (tmap->l_idx != IDX_STILL_USED)
401 /* Remove the scope. Or replace with own map's
402 scope. */
403 if (new_list != NULL)
405 newp[remain++] = new_list;
406 new_list = NULL;
408 continue;
412 newp[remain++] = imap->l_scope[cnt];
414 newp[remain] = NULL;
416 struct r_scope_elem **old = imap->l_scope;
418 imap->l_scope = newp;
420 /* No user anymore, we can free it now. */
421 if (old != imap->l_scope_mem)
423 if (_dl_scope_free (old))
424 /* If _dl_scope_free used THREAD_GSCOPE_WAIT (),
425 no need to repeat it. */
426 scope_mem_left = false;
428 else
429 scope_mem_left = true;
431 imap->l_scope_max = new_size;
434 /* The loader is gone, so mark the object as not having one.
435 Note: l_idx != IDX_STILL_USED -> object will be removed. */
436 if (imap->l_loader != NULL
437 && imap->l_loader->l_idx != IDX_STILL_USED)
438 imap->l_loader = NULL;
440 /* Remember where the first dynamically loaded object is. */
441 if (i < first_loaded)
442 first_loaded = i;
446 /* If there are no objects to unload, do nothing further. */
447 if (!unload_any)
448 goto out;
450 #ifdef SHARED
451 /* Auditing checkpoint: we will start deleting objects. */
452 if (__builtin_expect (do_audit, 0))
454 struct link_map *head = ns->_ns_loaded;
455 struct audit_ifaces *afct = GLRO(dl_audit);
456 /* Do not call the functions for any auditing object. */
457 if (head->l_auditing == 0)
459 for (unsigned int cnt = 0; cnt < GLRO(dl_naudit); ++cnt)
461 if (afct->activity != NULL)
462 afct->activity (&head->l_audit[cnt].cookie, LA_ACT_DELETE);
464 afct = afct->next;
468 #endif
470 /* Notify the debugger we are about to remove some loaded objects. */
471 struct r_debug *r = _dl_debug_initialize (0, nsid);
472 r->r_state = RT_DELETE;
473 _dl_debug_state ();
475 if (unload_global)
477 /* Some objects are in the global scope list. Remove them. */
478 struct r_scope_elem *ns_msl = ns->_ns_main_searchlist;
479 unsigned int i;
480 unsigned int j = 0;
481 unsigned int cnt = ns_msl->r_nlist;
483 while (cnt > 0 && ns_msl->r_list[cnt - 1]->l_removed)
484 --cnt;
486 if (cnt + unload_global == ns_msl->r_nlist)
487 /* Speed up removing most recently added objects. */
488 j = cnt;
489 else
490 for (i = 0; i < cnt; i++)
491 if (ns_msl->r_list[i]->l_removed == 0)
493 if (i != j)
494 ns_msl->r_list[j] = ns_msl->r_list[i];
495 j++;
497 ns_msl->r_nlist = j;
500 if (!RTLD_SINGLE_THREAD_P
501 && (unload_global
502 || scope_mem_left
503 || (GL(dl_scope_free_list) != NULL
504 && GL(dl_scope_free_list)->count)))
506 THREAD_GSCOPE_WAIT ();
508 /* Now we can free any queued old scopes. */
509 struct dl_scope_free_list *fsl = GL(dl_scope_free_list);
510 if (fsl != NULL)
511 while (fsl->count > 0)
512 free (fsl->list[--fsl->count]);
515 size_t tls_free_start;
516 size_t tls_free_end;
517 tls_free_start = tls_free_end = NO_TLS_OFFSET;
519 /* We modify the list of loaded objects. */
520 __rtld_lock_lock_recursive (GL(dl_load_write_lock));
522 /* Check each element of the search list to see if all references to
523 it are gone. */
524 for (unsigned int i = first_loaded; i < nloaded; ++i)
526 struct link_map *imap = maps[i];
527 if (!used[i])
529 assert (imap->l_type == lt_loaded);
531 /* That was the last reference, and this was a dlopen-loaded
532 object. We can unmap it. */
534 /* Remove the object from the dtv slotinfo array if it uses TLS. */
535 if (__builtin_expect (imap->l_tls_blocksize > 0, 0))
537 any_tls = true;
539 if (GL(dl_tls_dtv_slotinfo_list) != NULL
540 && ! remove_slotinfo (imap->l_tls_modid,
541 GL(dl_tls_dtv_slotinfo_list), 0,
542 imap->l_init_called))
543 /* All dynamically loaded modules with TLS are unloaded. */
544 GL(dl_tls_max_dtv_idx) = GL(dl_tls_static_nelem);
546 if (imap->l_tls_offset != NO_TLS_OFFSET
547 && imap->l_tls_offset != FORCED_DYNAMIC_TLS_OFFSET)
549 /* Collect a contiguous chunk built from the objects in
550 this search list, going in either direction. When the
551 whole chunk is at the end of the used area then we can
552 reclaim it. */
553 #if TLS_TCB_AT_TP
554 if (tls_free_start == NO_TLS_OFFSET
555 || (size_t) imap->l_tls_offset == tls_free_start)
557 /* Extend the contiguous chunk being reclaimed. */
558 tls_free_start
559 = imap->l_tls_offset - imap->l_tls_blocksize;
561 if (tls_free_end == NO_TLS_OFFSET)
562 tls_free_end = imap->l_tls_offset;
564 else if (imap->l_tls_offset - imap->l_tls_blocksize
565 == tls_free_end)
566 /* Extend the chunk backwards. */
567 tls_free_end = imap->l_tls_offset;
568 else
570 /* This isn't contiguous with the last chunk freed.
571 One of them will be leaked unless we can free
572 one block right away. */
573 if (tls_free_end == GL(dl_tls_static_used))
575 GL(dl_tls_static_used) = tls_free_start;
576 tls_free_end = imap->l_tls_offset;
577 tls_free_start
578 = tls_free_end - imap->l_tls_blocksize;
580 else if ((size_t) imap->l_tls_offset
581 == GL(dl_tls_static_used))
582 GL(dl_tls_static_used)
583 = imap->l_tls_offset - imap->l_tls_blocksize;
584 else if (tls_free_end < (size_t) imap->l_tls_offset)
586 /* We pick the later block. It has a chance to
587 be freed. */
588 tls_free_end = imap->l_tls_offset;
589 tls_free_start
590 = tls_free_end - imap->l_tls_blocksize;
593 #elif TLS_DTV_AT_TP
594 if ((size_t) imap->l_tls_offset == tls_free_end)
595 /* Extend the contiguous chunk being reclaimed. */
596 tls_free_end -= imap->l_tls_blocksize;
597 else if (imap->l_tls_offset + imap->l_tls_blocksize
598 == tls_free_start)
599 /* Extend the chunk backwards. */
600 tls_free_start = imap->l_tls_offset;
601 else
603 /* This isn't contiguous with the last chunk freed.
604 One of them will be leaked. */
605 if (tls_free_end == GL(dl_tls_static_used))
606 GL(dl_tls_static_used) = tls_free_start;
607 tls_free_start = imap->l_tls_offset;
608 tls_free_end = tls_free_start + imap->l_tls_blocksize;
610 #else
611 # error "Either TLS_TCB_AT_TP or TLS_DTV_AT_TP must be defined"
612 #endif
616 /* We can unmap all the maps at once. We determined the
617 start address and length when we loaded the object and
618 the `munmap' call does the rest. */
619 DL_UNMAP (imap);
621 /* Finally, unlink the data structure and free it. */
622 if (imap->l_prev != NULL)
623 imap->l_prev->l_next = imap->l_next;
624 else
626 #ifdef SHARED
627 assert (nsid != LM_ID_BASE);
628 #endif
629 ns->_ns_loaded = imap->l_next;
632 --ns->_ns_nloaded;
633 if (imap->l_next != NULL)
634 imap->l_next->l_prev = imap->l_prev;
636 free (imap->l_versions);
637 if (imap->l_origin != (char *) -1)
638 free ((char *) imap->l_origin);
640 free (imap->l_reldeps);
642 /* Print debugging message. */
643 if (__builtin_expect (GLRO(dl_debug_mask) & DL_DEBUG_FILES, 0))
644 _dl_debug_printf ("\nfile=%s [%lu]; destroying link map\n",
645 imap->l_name, imap->l_ns);
647 /* This name always is allocated. */
648 free (imap->l_name);
649 /* Remove the list with all the names of the shared object. */
651 struct libname_list *lnp = imap->l_libname;
654 struct libname_list *this = lnp;
655 lnp = lnp->next;
656 if (!this->dont_free)
657 free (this);
659 while (lnp != NULL);
661 /* Remove the searchlists. */
662 free (imap->l_initfini);
664 /* Remove the scope array if we allocated it. */
665 if (imap->l_scope != imap->l_scope_mem)
666 free (imap->l_scope);
668 if (imap->l_phdr_allocated)
669 free ((void *) imap->l_phdr);
671 if (imap->l_rpath_dirs.dirs != (void *) -1)
672 free (imap->l_rpath_dirs.dirs);
673 if (imap->l_runpath_dirs.dirs != (void *) -1)
674 free (imap->l_runpath_dirs.dirs);
676 free (imap);
680 __rtld_lock_unlock_recursive (GL(dl_load_write_lock));
682 /* If we removed any object which uses TLS bump the generation counter. */
683 if (any_tls)
685 if (__builtin_expect (++GL(dl_tls_generation) == 0, 0))
686 _dl_fatal_printf ("TLS generation counter wrapped! Please report as described in <http://www.gnu.org/software/libc/bugs.html>.\n");
688 if (tls_free_end == GL(dl_tls_static_used))
689 GL(dl_tls_static_used) = tls_free_start;
692 #ifdef SHARED
693 /* Auditing checkpoint: we have deleted all objects. */
694 if (__builtin_expect (do_audit, 0))
696 struct link_map *head = ns->_ns_loaded;
697 /* Do not call the functions for any auditing object. */
698 if (head->l_auditing == 0)
700 struct audit_ifaces *afct = GLRO(dl_audit);
701 for (unsigned int cnt = 0; cnt < GLRO(dl_naudit); ++cnt)
703 if (afct->activity != NULL)
704 afct->activity (&head->l_audit[cnt].cookie, LA_ACT_CONSISTENT);
706 afct = afct->next;
710 #endif
712 if (__builtin_expect (ns->_ns_loaded == NULL, 0)
713 && nsid == GL(dl_nns) - 1)
716 --GL(dl_nns);
717 #ifndef SHARED
718 if (GL(dl_nns) == 0)
719 break;
720 #endif
722 while (GL(dl_ns)[GL(dl_nns) - 1]._ns_loaded == NULL);
724 /* Notify the debugger those objects are finalized and gone. */
725 r->r_state = RT_CONSISTENT;
726 _dl_debug_state ();
728 /* Recheck if we need to retry, release the lock. */
729 out:
730 if (dl_close_state == rerun)
731 goto retry;
733 dl_close_state = not_pending;
737 void
738 _dl_close (void *_map)
740 struct link_map *map = _map;
742 /* First see whether we can remove the object at all. */
743 if (__builtin_expect (map->l_flags_1 & DF_1_NODELETE, 0))
745 assert (map->l_init_called);
746 /* Nope. Do nothing. */
747 return;
750 if (__builtin_expect (map->l_direct_opencount, 1) == 0)
751 GLRO(dl_signal_error) (0, map->l_name, NULL, N_("shared object not open"));
753 /* Acquire the lock. */
754 __rtld_lock_lock_recursive (GL(dl_load_lock));
756 _dl_close_worker (map);
758 __rtld_lock_unlock_recursive (GL(dl_load_lock));