.
[glibc.git] / elf / dl-close.c
blob5cd8b9be2dc11dfc04fc69453830d2546ffcd3e5
1 /* Close a shared object opened by `_dl_open'.
2 Copyright (C) 1996-2005, 2006, 2007 Free Software Foundation, Inc.
3 This file is part of the GNU C Library.
5 The GNU C Library is free software; you can redistribute it and/or
6 modify it under the terms of the GNU Lesser General Public
7 License as published by the Free Software Foundation; either
8 version 2.1 of the License, or (at your option) any later version.
10 The GNU C Library is distributed in the hope that it will be useful,
11 but WITHOUT ANY WARRANTY; without even the implied warranty of
12 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 Lesser General Public License for more details.
15 You should have received a copy of the GNU Lesser General Public
16 License along with the GNU C Library; if not, write to the Free
17 Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
18 02111-1307 USA. */
20 #include <assert.h>
21 #include <dlfcn.h>
22 #include <errno.h>
23 #include <libintl.h>
24 #include <stddef.h>
25 #include <stdio.h>
26 #include <stdlib.h>
27 #include <string.h>
28 #include <unistd.h>
29 #include <bits/libc-lock.h>
30 #include <ldsodefs.h>
31 #include <sys/types.h>
32 #include <sys/mman.h>
33 #include <sysdep-cancel.h>
34 #include <tls.h>
37 /* Type of the constructor functions. */
38 typedef void (*fini_t) (void);
41 /* Special l_idx value used to indicate which objects remain loaded. */
42 #define IDX_STILL_USED -1
45 #ifdef USE_TLS
46 /* Returns true we an non-empty was found. */
47 static bool
48 remove_slotinfo (size_t idx, struct dtv_slotinfo_list *listp, size_t disp,
49 bool should_be_there)
51 if (idx - disp >= listp->len)
53 if (listp->next == NULL)
55 /* The index is not actually valid in the slotinfo list,
56 because this object was closed before it was fully set
57 up due to some error. */
58 assert (! should_be_there);
60 else
62 if (remove_slotinfo (idx, listp->next, disp + listp->len,
63 should_be_there))
64 return true;
66 /* No non-empty entry. Search from the end of this element's
67 slotinfo array. */
68 idx = disp + listp->len;
71 else
73 struct link_map *old_map = listp->slotinfo[idx - disp].map;
75 /* The entry might still be in its unused state if we are closing an
76 object that wasn't fully set up. */
77 if (__builtin_expect (old_map != NULL, 1))
79 assert (old_map->l_tls_modid == idx);
81 /* Mark the entry as unused. */
82 listp->slotinfo[idx - disp].gen = GL(dl_tls_generation) + 1;
83 listp->slotinfo[idx - disp].map = NULL;
86 /* If this is not the last currently used entry no need to look
87 further. */
88 if (idx != GL(dl_tls_max_dtv_idx))
89 return true;
92 while (idx - disp > (disp == 0 ? 1 + GL(dl_tls_static_nelem) : 0))
94 --idx;
96 if (listp->slotinfo[idx - disp].map != NULL)
98 /* Found a new last used index. */
99 GL(dl_tls_max_dtv_idx) = idx;
100 return true;
104 /* No non-entry in this list element. */
105 return false;
107 #endif
110 void
111 _dl_close_worker (struct link_map *map)
113 /* One less direct use. */
114 --map->l_direct_opencount;
116 /* If _dl_close is called recursively (some destructor call dlclose),
117 just record that the parent _dl_close will need to do garbage collection
118 again and return. */
119 static enum { not_pending, pending, rerun } dl_close_state;
121 if (map->l_direct_opencount > 0 || map->l_type != lt_loaded
122 || dl_close_state != not_pending)
124 if (map->l_direct_opencount == 0 && map->l_type == lt_loaded)
125 dl_close_state = rerun;
127 /* There are still references to this object. Do nothing more. */
128 if (__builtin_expect (GLRO(dl_debug_mask) & DL_DEBUG_FILES, 0))
129 _dl_debug_printf ("\nclosing file=%s; direct_opencount=%u\n",
130 map->l_name, map->l_direct_opencount);
132 return;
135 Lmid_t nsid = map->l_ns;
136 struct link_namespaces *ns = &GL(dl_ns)[nsid];
138 retry:
139 dl_close_state = pending;
141 #ifdef USE_TLS
142 bool any_tls = false;
143 #endif
144 const unsigned int nloaded = ns->_ns_nloaded;
145 char used[nloaded];
146 char done[nloaded];
147 struct link_map *maps[nloaded];
149 /* Run over the list and assign indexes to the link maps and enter
150 them into the MAPS array. */
151 int idx = 0;
152 for (struct link_map *l = ns->_ns_loaded; l != NULL; l = l->l_next)
154 l->l_idx = idx;
155 maps[idx] = l;
156 ++idx;
158 assert (idx == nloaded);
160 /* Prepare the bitmaps. */
161 memset (used, '\0', sizeof (used));
162 memset (done, '\0', sizeof (done));
164 /* Keep track of the lowest index link map we have covered already. */
165 int done_index = -1;
166 while (++done_index < nloaded)
168 struct link_map *l = maps[done_index];
170 if (done[done_index])
171 /* Already handled. */
172 continue;
174 /* Check whether this object is still used. */
175 if (l->l_type == lt_loaded
176 && l->l_direct_opencount == 0
177 && (l->l_flags_1 & DF_1_NODELETE) == 0
178 && !used[done_index])
179 continue;
181 /* We need this object and we handle it now. */
182 done[done_index] = 1;
183 used[done_index] = 1;
184 /* Signal the object is still needed. */
185 l->l_idx = IDX_STILL_USED;
187 /* Mark all dependencies as used. */
188 if (l->l_initfini != NULL)
190 struct link_map **lp = &l->l_initfini[1];
191 while (*lp != NULL)
193 if ((*lp)->l_idx != IDX_STILL_USED)
195 assert ((*lp)->l_idx >= 0 && (*lp)->l_idx < nloaded);
197 if (!used[(*lp)->l_idx])
199 used[(*lp)->l_idx] = 1;
200 if ((*lp)->l_idx - 1 < done_index)
201 done_index = (*lp)->l_idx - 1;
205 ++lp;
208 /* And the same for relocation dependencies. */
209 if (l->l_reldeps != NULL)
210 for (unsigned int j = 0; j < l->l_reldepsact; ++j)
212 struct link_map *jmap = l->l_reldeps[j];
214 if (jmap->l_idx != IDX_STILL_USED)
216 assert (jmap->l_idx >= 0 && jmap->l_idx < nloaded);
218 if (!used[jmap->l_idx])
220 used[jmap->l_idx] = 1;
221 if (jmap->l_idx - 1 < done_index)
222 done_index = jmap->l_idx - 1;
228 /* Sort the entries. */
229 _dl_sort_fini (ns->_ns_loaded, maps, nloaded, used, nsid);
231 /* Call all termination functions at once. */
232 #ifdef SHARED
233 bool do_audit = GLRO(dl_naudit) > 0 && !ns->_ns_loaded->l_auditing;
234 #endif
235 bool unload_any = false;
236 bool scope_mem_left = false;
237 unsigned int unload_global = 0;
238 unsigned int first_loaded = ~0;
239 for (unsigned int i = 0; i < nloaded; ++i)
241 struct link_map *imap = maps[i];
243 /* All elements must be in the same namespace. */
244 assert (imap->l_ns == nsid);
246 if (!used[i])
248 assert (imap->l_type == lt_loaded
249 && (imap->l_flags_1 & DF_1_NODELETE) == 0);
251 /* Call its termination function. Do not do it for
252 half-cooked objects. */
253 if (imap->l_init_called)
255 /* When debugging print a message first. */
256 if (__builtin_expect (GLRO(dl_debug_mask) & DL_DEBUG_IMPCALLS,
258 _dl_debug_printf ("\ncalling fini: %s [%lu]\n\n",
259 imap->l_name, nsid);
261 if (imap->l_info[DT_FINI_ARRAY] != NULL)
263 ElfW(Addr) *array =
264 (ElfW(Addr) *) (imap->l_addr
265 + imap->l_info[DT_FINI_ARRAY]->d_un.d_ptr);
266 unsigned int sz = (imap->l_info[DT_FINI_ARRAYSZ]->d_un.d_val
267 / sizeof (ElfW(Addr)));
269 while (sz-- > 0)
270 ((fini_t) array[sz]) ();
273 /* Next try the old-style destructor. */
274 if (imap->l_info[DT_FINI] != NULL)
275 (*(void (*) (void)) DL_DT_FINI_ADDRESS
276 (imap, ((void *) imap->l_addr
277 + imap->l_info[DT_FINI]->d_un.d_ptr))) ();
280 #ifdef SHARED
281 /* Auditing checkpoint: we have a new object. */
282 if (__builtin_expect (do_audit, 0))
284 struct audit_ifaces *afct = GLRO(dl_audit);
285 for (unsigned int cnt = 0; cnt < GLRO(dl_naudit); ++cnt)
287 if (afct->objclose != NULL)
288 /* Return value is ignored. */
289 (void) afct->objclose (&imap->l_audit[cnt].cookie);
291 afct = afct->next;
294 #endif
296 /* This object must not be used anymore. */
297 imap->l_removed = 1;
299 /* We indeed have an object to remove. */
300 unload_any = true;
302 if (imap->l_global)
303 ++unload_global;
305 /* Remember where the first dynamically loaded object is. */
306 if (i < first_loaded)
307 first_loaded = i;
309 /* Else used[i]. */
310 else if (imap->l_type == lt_loaded)
312 struct r_scope_elem *new_list = NULL;
314 if (imap->l_searchlist.r_list == NULL && imap->l_initfini != NULL)
316 /* The object is still used. But one of the objects we are
317 unloading right now is responsible for loading it. If
318 the current object does not have it's own scope yet we
319 have to create one. This has to be done before running
320 the finalizers.
322 To do this count the number of dependencies. */
323 unsigned int cnt;
324 for (cnt = 1; imap->l_initfini[cnt] != NULL; ++cnt)
327 /* We simply reuse the l_initfini list. */
328 imap->l_searchlist.r_list = &imap->l_initfini[cnt + 1];
329 imap->l_searchlist.r_nlist = cnt;
331 new_list = &imap->l_searchlist;
334 /* Count the number of scopes which remain after the unload.
335 When we add the local search list count it. Always add
336 one for the terminating NULL pointer. */
337 size_t remain = (new_list != NULL) + 1;
338 bool removed_any = false;
339 for (size_t cnt = 0; imap->l_scope[cnt] != NULL; ++cnt)
340 /* This relies on l_scope[] entries being always set either
341 to its own l_symbolic_searchlist address, or some map's
342 l_searchlist address. */
343 if (imap->l_scope[cnt] != &imap->l_symbolic_searchlist)
345 struct link_map *tmap = (struct link_map *)
346 ((char *) imap->l_scope[cnt]
347 - offsetof (struct link_map, l_searchlist));
348 assert (tmap->l_ns == nsid);
349 if (tmap->l_idx == IDX_STILL_USED)
350 ++remain;
351 else
352 removed_any = true;
354 else
355 ++remain;
357 if (removed_any)
359 /* Always allocate a new array for the scope. This is
360 necessary since we must be able to determine the last
361 user of the current array. If possible use the link map's
362 memory. */
363 size_t new_size;
364 struct r_scope_elem **newp;
366 #define SCOPE_ELEMS(imap) \
367 (sizeof (imap->l_scope_mem) / sizeof (imap->l_scope_mem[0]))
369 if (imap->l_scope != imap->l_scope_mem
370 && remain < SCOPE_ELEMS (imap))
372 new_size = SCOPE_ELEMS (imap);
373 newp = imap->l_scope_mem;
375 else
377 new_size = imap->l_scope_max;
378 newp = (struct r_scope_elem **)
379 malloc (new_size * sizeof (struct r_scope_elem *));
380 if (newp == NULL)
381 _dl_signal_error (ENOMEM, "dlclose", NULL,
382 N_("cannot create scope list"));
385 /* Copy over the remaining scope elements. */
386 remain = 0;
387 for (size_t cnt = 0; imap->l_scope[cnt] != NULL; ++cnt)
389 if (imap->l_scope[cnt] != &imap->l_symbolic_searchlist)
391 struct link_map *tmap = (struct link_map *)
392 ((char *) imap->l_scope[cnt]
393 - offsetof (struct link_map, l_searchlist));
394 if (tmap->l_idx != IDX_STILL_USED)
396 /* Remove the scope. Or replace with own map's
397 scope. */
398 if (new_list != NULL)
400 newp[remain++] = new_list;
401 new_list = NULL;
403 continue;
407 newp[remain++] = imap->l_scope[cnt];
409 newp[remain] = NULL;
411 struct r_scope_elem **old = imap->l_scope;
413 imap->l_scope = newp;
415 /* No user anymore, we can free it now. */
416 if (old != imap->l_scope_mem)
418 if (_dl_scope_free (old))
419 /* If _dl_scope_free used THREAD_GSCOPE_WAIT (),
420 no need to repeat it. */
421 scope_mem_left = false;
423 else
424 scope_mem_left = true;
426 imap->l_scope_max = new_size;
429 /* The loader is gone, so mark the object as not having one.
430 Note: l_idx != IDX_STILL_USED -> object will be removed. */
431 if (imap->l_loader != NULL
432 && imap->l_loader->l_idx != IDX_STILL_USED)
433 imap->l_loader = NULL;
435 /* Remember where the first dynamically loaded object is. */
436 if (i < first_loaded)
437 first_loaded = i;
441 /* If there are no objects to unload, do nothing further. */
442 if (!unload_any)
443 goto out;
445 #ifdef SHARED
446 /* Auditing checkpoint: we will start deleting objects. */
447 if (__builtin_expect (do_audit, 0))
449 struct link_map *head = ns->_ns_loaded;
450 struct audit_ifaces *afct = GLRO(dl_audit);
451 /* Do not call the functions for any auditing object. */
452 if (head->l_auditing == 0)
454 for (unsigned int cnt = 0; cnt < GLRO(dl_naudit); ++cnt)
456 if (afct->activity != NULL)
457 afct->activity (&head->l_audit[cnt].cookie, LA_ACT_DELETE);
459 afct = afct->next;
463 #endif
465 /* Notify the debugger we are about to remove some loaded objects. */
466 struct r_debug *r = _dl_debug_initialize (0, nsid);
467 r->r_state = RT_DELETE;
468 _dl_debug_state ();
470 if (unload_global)
472 /* Some objects are in the global scope list. Remove them. */
473 struct r_scope_elem *ns_msl = ns->_ns_main_searchlist;
474 unsigned int i;
475 unsigned int j = 0;
476 unsigned int cnt = ns_msl->r_nlist;
478 while (cnt > 0 && ns_msl->r_list[cnt - 1]->l_removed)
479 --cnt;
481 if (cnt + unload_global == ns_msl->r_nlist)
482 /* Speed up removing most recently added objects. */
483 j = cnt;
484 else
485 for (i = 0; i < cnt; i++)
486 if (ns_msl->r_list[i]->l_removed == 0)
488 if (i != j)
489 ns_msl->r_list[j] = ns_msl->r_list[i];
490 j++;
492 ns_msl->r_nlist = j;
495 if (!RTLD_SINGLE_THREAD_P
496 && (unload_global
497 || scope_mem_left
498 || (GL(dl_scope_free_list) != NULL
499 && GL(dl_scope_free_list)->count)))
501 struct dl_scope_free_list *fsl;
503 THREAD_GSCOPE_WAIT ();
504 /* Now we can free any queued old scopes. */
505 if ((fsl = GL(dl_scope_free_list)) != NULL)
506 while (fsl->count > 0)
507 free (fsl->list[--fsl->count]);
510 #ifdef USE_TLS
511 size_t tls_free_start;
512 size_t tls_free_end;
513 tls_free_start = tls_free_end = NO_TLS_OFFSET;
514 #endif
516 /* Check each element of the search list to see if all references to
517 it are gone. */
518 for (unsigned int i = first_loaded; i < nloaded; ++i)
520 struct link_map *imap = maps[i];
521 if (!used[i])
523 assert (imap->l_type == lt_loaded);
525 /* That was the last reference, and this was a dlopen-loaded
526 object. We can unmap it. */
528 #ifdef USE_TLS
529 /* Remove the object from the dtv slotinfo array if it uses TLS. */
530 if (__builtin_expect (imap->l_tls_blocksize > 0, 0))
532 any_tls = true;
534 if (GL(dl_tls_dtv_slotinfo_list) != NULL
535 && ! remove_slotinfo (imap->l_tls_modid,
536 GL(dl_tls_dtv_slotinfo_list), 0,
537 imap->l_init_called))
538 /* All dynamically loaded modules with TLS are unloaded. */
539 GL(dl_tls_max_dtv_idx) = GL(dl_tls_static_nelem);
541 if (imap->l_tls_offset != NO_TLS_OFFSET)
543 /* Collect a contiguous chunk built from the objects in
544 this search list, going in either direction. When the
545 whole chunk is at the end of the used area then we can
546 reclaim it. */
547 # if TLS_TCB_AT_TP
548 if (tls_free_start == NO_TLS_OFFSET
549 || (size_t) imap->l_tls_offset == tls_free_start)
551 /* Extend the contiguous chunk being reclaimed. */
552 tls_free_start
553 = imap->l_tls_offset - imap->l_tls_blocksize;
555 if (tls_free_end == NO_TLS_OFFSET)
556 tls_free_end = imap->l_tls_offset;
558 else if (imap->l_tls_offset - imap->l_tls_blocksize
559 == tls_free_end)
560 /* Extend the chunk backwards. */
561 tls_free_end = imap->l_tls_offset;
562 else
564 /* This isn't contiguous with the last chunk freed.
565 One of them will be leaked unless we can free
566 one block right away. */
567 if (tls_free_end == GL(dl_tls_static_used))
569 GL(dl_tls_static_used) = tls_free_start;
570 tls_free_end = imap->l_tls_offset;
571 tls_free_start
572 = tls_free_end - imap->l_tls_blocksize;
574 else if ((size_t) imap->l_tls_offset
575 == GL(dl_tls_static_used))
576 GL(dl_tls_static_used)
577 = imap->l_tls_offset - imap->l_tls_blocksize;
578 else if (tls_free_end < (size_t) imap->l_tls_offset)
580 /* We pick the later block. It has a chance to
581 be freed. */
582 tls_free_end = imap->l_tls_offset;
583 tls_free_start
584 = tls_free_end - imap->l_tls_blocksize;
587 # elif TLS_DTV_AT_TP
588 if ((size_t) imap->l_tls_offset == tls_free_end)
589 /* Extend the contiguous chunk being reclaimed. */
590 tls_free_end -= imap->l_tls_blocksize;
591 else if (imap->l_tls_offset + imap->l_tls_blocksize
592 == tls_free_start)
593 /* Extend the chunk backwards. */
594 tls_free_start = imap->l_tls_offset;
595 else
597 /* This isn't contiguous with the last chunk freed.
598 One of them will be leaked. */
599 if (tls_free_end == GL(dl_tls_static_used))
600 GL(dl_tls_static_used) = tls_free_start;
601 tls_free_start = imap->l_tls_offset;
602 tls_free_end = tls_free_start + imap->l_tls_blocksize;
604 # else
605 # error "Either TLS_TCB_AT_TP or TLS_DTV_AT_TP must be defined"
606 # endif
609 #endif
611 /* We can unmap all the maps at once. We determined the
612 start address and length when we loaded the object and
613 the `munmap' call does the rest. */
614 DL_UNMAP (imap);
616 /* Finally, unlink the data structure and free it. */
617 if (imap->l_prev != NULL)
618 imap->l_prev->l_next = imap->l_next;
619 else
621 #ifdef SHARED
622 assert (nsid != LM_ID_BASE);
623 #endif
624 ns->_ns_loaded = imap->l_next;
627 --ns->_ns_nloaded;
628 if (imap->l_next != NULL)
629 imap->l_next->l_prev = imap->l_prev;
631 free (imap->l_versions);
632 if (imap->l_origin != (char *) -1)
633 free ((char *) imap->l_origin);
635 free (imap->l_reldeps);
637 /* Print debugging message. */
638 if (__builtin_expect (GLRO(dl_debug_mask) & DL_DEBUG_FILES, 0))
639 _dl_debug_printf ("\nfile=%s [%lu]; destroying link map\n",
640 imap->l_name, imap->l_ns);
642 /* This name always is allocated. */
643 free (imap->l_name);
644 /* Remove the list with all the names of the shared object. */
646 struct libname_list *lnp = imap->l_libname;
649 struct libname_list *this = lnp;
650 lnp = lnp->next;
651 if (!this->dont_free)
652 free (this);
654 while (lnp != NULL);
656 /* Remove the searchlists. */
657 free (imap->l_initfini);
659 /* Remove the scope array if we allocated it. */
660 if (imap->l_scope != imap->l_scope_mem)
661 free (imap->l_scope);
663 if (imap->l_phdr_allocated)
664 free ((void *) imap->l_phdr);
666 if (imap->l_rpath_dirs.dirs != (void *) -1)
667 free (imap->l_rpath_dirs.dirs);
668 if (imap->l_runpath_dirs.dirs != (void *) -1)
669 free (imap->l_runpath_dirs.dirs);
671 free (imap);
675 #ifdef USE_TLS
676 /* If we removed any object which uses TLS bump the generation counter. */
677 if (any_tls)
679 if (__builtin_expect (++GL(dl_tls_generation) == 0, 0))
680 _dl_fatal_printf ("TLS generation counter wrapped! Please report as described in <http://www.gnu.org/software/libc/bugs.html>.\n");
682 if (tls_free_end == GL(dl_tls_static_used))
683 GL(dl_tls_static_used) = tls_free_start;
685 #endif
687 #ifdef SHARED
688 /* Auditing checkpoint: we have deleted all objects. */
689 if (__builtin_expect (do_audit, 0))
691 struct link_map *head = ns->_ns_loaded;
692 /* Do not call the functions for any auditing object. */
693 if (head->l_auditing == 0)
695 struct audit_ifaces *afct = GLRO(dl_audit);
696 for (unsigned int cnt = 0; cnt < GLRO(dl_naudit); ++cnt)
698 if (afct->activity != NULL)
699 afct->activity (&head->l_audit[cnt].cookie, LA_ACT_CONSISTENT);
701 afct = afct->next;
705 #endif
707 /* Notify the debugger those objects are finalized and gone. */
708 r->r_state = RT_CONSISTENT;
709 _dl_debug_state ();
711 /* Recheck if we need to retry, release the lock. */
712 out:
713 if (dl_close_state == rerun)
714 goto retry;
716 dl_close_state = not_pending;
720 void
721 _dl_close (void *_map)
723 struct link_map *map = _map;
725 /* First see whether we can remove the object at all. */
726 if (__builtin_expect (map->l_flags_1 & DF_1_NODELETE, 0))
728 assert (map->l_init_called);
729 /* Nope. Do nothing. */
730 return;
733 if (__builtin_expect (map->l_direct_opencount, 1) == 0)
734 GLRO(dl_signal_error) (0, map->l_name, NULL, N_("shared object not open"));
736 /* Acquire the lock. */
737 __rtld_lock_lock_recursive (GL(dl_load_lock));
739 _dl_close_worker (map);
741 __rtld_lock_unlock_recursive (GL(dl_load_lock));
745 #ifdef USE_TLS
746 static bool __libc_freeres_fn_section
747 free_slotinfo (struct dtv_slotinfo_list **elemp)
749 size_t cnt;
751 if (*elemp == NULL)
752 /* Nothing here, all is removed (or there never was anything). */
753 return true;
755 if (!free_slotinfo (&(*elemp)->next))
756 /* We cannot free the entry. */
757 return false;
759 /* That cleared our next pointer for us. */
761 for (cnt = 0; cnt < (*elemp)->len; ++cnt)
762 if ((*elemp)->slotinfo[cnt].map != NULL)
763 /* Still used. */
764 return false;
766 /* We can remove the list element. */
767 free (*elemp);
768 *elemp = NULL;
770 return true;
772 #endif
775 libc_freeres_fn (free_mem)
777 for (Lmid_t nsid = 0; nsid < DL_NNS; ++nsid)
778 if (__builtin_expect (GL(dl_ns)[nsid]._ns_global_scope_alloc, 0) != 0
779 && (GL(dl_ns)[nsid]._ns_main_searchlist->r_nlist
780 // XXX Check whether we need NS-specific initial_searchlist
781 == GLRO(dl_initial_searchlist).r_nlist))
783 /* All object dynamically loaded by the program are unloaded. Free
784 the memory allocated for the global scope variable. */
785 struct link_map **old = GL(dl_ns)[nsid]._ns_main_searchlist->r_list;
787 /* Put the old map in. */
788 GL(dl_ns)[nsid]._ns_main_searchlist->r_list
789 // XXX Check whether we need NS-specific initial_searchlist
790 = GLRO(dl_initial_searchlist).r_list;
791 /* Signal that the original map is used. */
792 GL(dl_ns)[nsid]._ns_global_scope_alloc = 0;
794 /* Now free the old map. */
795 free (old);
798 #ifdef USE_TLS
799 if (USE___THREAD || GL(dl_tls_dtv_slotinfo_list) != NULL)
801 /* Free the memory allocated for the dtv slotinfo array. We can do
802 this only if all modules which used this memory are unloaded. */
803 # ifdef SHARED
804 if (GL(dl_initial_dtv) == NULL)
805 /* There was no initial TLS setup, it was set up later when
806 it used the normal malloc. */
807 free_slotinfo (&GL(dl_tls_dtv_slotinfo_list));
808 else
809 # endif
810 /* The first element of the list does not have to be deallocated.
811 It was allocated in the dynamic linker (i.e., with a different
812 malloc), and in the static library it's in .bss space. */
813 free_slotinfo (&GL(dl_tls_dtv_slotinfo_list)->next);
815 #endif
817 void *scope_free_list = GL(dl_scope_free_list);
818 GL(dl_scope_free_list) = NULL;
819 free (scope_free_list);