* elf/rtld.c (dl_main): Don't call init_tls more than once.
[glibc.git] / elf / dl-close.c
blob2c2b3b61635254c166c0b5c5fb2d7f23e15bde36
1 /* Close a shared object opened by `_dl_open'.
2 Copyright (C) 1996-2005, 2006, 2007 Free Software Foundation, Inc.
3 This file is part of the GNU C Library.
5 The GNU C Library is free software; you can redistribute it and/or
6 modify it under the terms of the GNU Lesser General Public
7 License as published by the Free Software Foundation; either
8 version 2.1 of the License, or (at your option) any later version.
10 The GNU C Library is distributed in the hope that it will be useful,
11 but WITHOUT ANY WARRANTY; without even the implied warranty of
12 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 Lesser General Public License for more details.
15 You should have received a copy of the GNU Lesser General Public
16 License along with the GNU C Library; if not, write to the Free
17 Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
18 02111-1307 USA. */
20 #include <assert.h>
21 #include <dlfcn.h>
22 #include <errno.h>
23 #include <libintl.h>
24 #include <stddef.h>
25 #include <stdio.h>
26 #include <stdlib.h>
27 #include <string.h>
28 #include <unistd.h>
29 #include <bits/libc-lock.h>
30 #include <ldsodefs.h>
31 #include <sys/types.h>
32 #include <sys/mman.h>
33 #include <sysdep-cancel.h>
34 #include <tls.h>
37 /* Type of the constructor functions. */
38 typedef void (*fini_t) (void);
41 /* Special l_idx value used to indicate which objects remain loaded. */
42 #define IDX_STILL_USED -1
45 /* Returns true we an non-empty was found. */
46 static bool
47 remove_slotinfo (size_t idx, struct dtv_slotinfo_list *listp, size_t disp,
48 bool should_be_there)
50 if (idx - disp >= listp->len)
52 if (listp->next == NULL)
54 /* The index is not actually valid in the slotinfo list,
55 because this object was closed before it was fully set
56 up due to some error. */
57 assert (! should_be_there);
59 else
61 if (remove_slotinfo (idx, listp->next, disp + listp->len,
62 should_be_there))
63 return true;
65 /* No non-empty entry. Search from the end of this element's
66 slotinfo array. */
67 idx = disp + listp->len;
70 else
72 struct link_map *old_map = listp->slotinfo[idx - disp].map;
74 /* The entry might still be in its unused state if we are closing an
75 object that wasn't fully set up. */
76 if (__builtin_expect (old_map != NULL, 1))
78 assert (old_map->l_tls_modid == idx);
80 /* Mark the entry as unused. */
81 listp->slotinfo[idx - disp].gen = GL(dl_tls_generation) + 1;
82 listp->slotinfo[idx - disp].map = NULL;
85 /* If this is not the last currently used entry no need to look
86 further. */
87 if (idx != GL(dl_tls_max_dtv_idx))
88 return true;
91 while (idx - disp > (disp == 0 ? 1 + GL(dl_tls_static_nelem) : 0))
93 --idx;
95 if (listp->slotinfo[idx - disp].map != NULL)
97 /* Found a new last used index. */
98 GL(dl_tls_max_dtv_idx) = idx;
99 return true;
103 /* No non-entry in this list element. */
104 return false;
108 void
109 _dl_close_worker (struct link_map *map)
111 /* One less direct use. */
112 --map->l_direct_opencount;
114 /* If _dl_close is called recursively (some destructor call dlclose),
115 just record that the parent _dl_close will need to do garbage collection
116 again and return. */
117 static enum { not_pending, pending, rerun } dl_close_state;
119 if (map->l_direct_opencount > 0 || map->l_type != lt_loaded
120 || dl_close_state != not_pending)
122 if (map->l_direct_opencount == 0 && map->l_type == lt_loaded)
123 dl_close_state = rerun;
125 /* There are still references to this object. Do nothing more. */
126 if (__builtin_expect (GLRO(dl_debug_mask) & DL_DEBUG_FILES, 0))
127 _dl_debug_printf ("\nclosing file=%s; direct_opencount=%u\n",
128 map->l_name, map->l_direct_opencount);
130 return;
133 Lmid_t nsid = map->l_ns;
134 struct link_namespaces *ns = &GL(dl_ns)[nsid];
136 retry:
137 dl_close_state = pending;
139 bool any_tls = false;
140 const unsigned int nloaded = ns->_ns_nloaded;
141 char used[nloaded];
142 char done[nloaded];
143 struct link_map *maps[nloaded];
145 /* Run over the list and assign indexes to the link maps and enter
146 them into the MAPS array. */
147 int idx = 0;
148 for (struct link_map *l = ns->_ns_loaded; l != NULL; l = l->l_next)
150 l->l_idx = idx;
151 maps[idx] = l;
152 ++idx;
154 assert (idx == nloaded);
156 /* Prepare the bitmaps. */
157 memset (used, '\0', sizeof (used));
158 memset (done, '\0', sizeof (done));
160 /* Keep track of the lowest index link map we have covered already. */
161 int done_index = -1;
162 while (++done_index < nloaded)
164 struct link_map *l = maps[done_index];
166 if (done[done_index])
167 /* Already handled. */
168 continue;
170 /* Check whether this object is still used. */
171 if (l->l_type == lt_loaded
172 && l->l_direct_opencount == 0
173 && (l->l_flags_1 & DF_1_NODELETE) == 0
174 && !used[done_index])
175 continue;
177 /* We need this object and we handle it now. */
178 done[done_index] = 1;
179 used[done_index] = 1;
180 /* Signal the object is still needed. */
181 l->l_idx = IDX_STILL_USED;
183 /* Mark all dependencies as used. */
184 if (l->l_initfini != NULL)
186 struct link_map **lp = &l->l_initfini[1];
187 while (*lp != NULL)
189 if ((*lp)->l_idx != IDX_STILL_USED)
191 assert ((*lp)->l_idx >= 0 && (*lp)->l_idx < nloaded);
193 if (!used[(*lp)->l_idx])
195 used[(*lp)->l_idx] = 1;
196 if ((*lp)->l_idx - 1 < done_index)
197 done_index = (*lp)->l_idx - 1;
201 ++lp;
204 /* And the same for relocation dependencies. */
205 if (l->l_reldeps != NULL)
206 for (unsigned int j = 0; j < l->l_reldepsact; ++j)
208 struct link_map *jmap = l->l_reldeps[j];
210 if (jmap->l_idx != IDX_STILL_USED)
212 assert (jmap->l_idx >= 0 && jmap->l_idx < nloaded);
214 if (!used[jmap->l_idx])
216 used[jmap->l_idx] = 1;
217 if (jmap->l_idx - 1 < done_index)
218 done_index = jmap->l_idx - 1;
224 /* Sort the entries. */
225 _dl_sort_fini (ns->_ns_loaded, maps, nloaded, used, nsid);
227 /* Call all termination functions at once. */
228 #ifdef SHARED
229 bool do_audit = GLRO(dl_naudit) > 0 && !ns->_ns_loaded->l_auditing;
230 #endif
231 bool unload_any = false;
232 unsigned int unload_global = 0;
233 unsigned int first_loaded = ~0;
234 for (unsigned int i = 0; i < nloaded; ++i)
236 struct link_map *imap = maps[i];
238 /* All elements must be in the same namespace. */
239 assert (imap->l_ns == nsid);
241 if (!used[i])
243 assert (imap->l_type == lt_loaded
244 && (imap->l_flags_1 & DF_1_NODELETE) == 0);
246 /* Call its termination function. Do not do it for
247 half-cooked objects. */
248 if (imap->l_init_called)
250 /* When debugging print a message first. */
251 if (__builtin_expect (GLRO(dl_debug_mask) & DL_DEBUG_IMPCALLS,
253 _dl_debug_printf ("\ncalling fini: %s [%lu]\n\n",
254 imap->l_name, nsid);
256 if (imap->l_info[DT_FINI_ARRAY] != NULL)
258 ElfW(Addr) *array =
259 (ElfW(Addr) *) (imap->l_addr
260 + imap->l_info[DT_FINI_ARRAY]->d_un.d_ptr);
261 unsigned int sz = (imap->l_info[DT_FINI_ARRAYSZ]->d_un.d_val
262 / sizeof (ElfW(Addr)));
264 while (sz-- > 0)
265 ((fini_t) array[sz]) ();
268 /* Next try the old-style destructor. */
269 if (imap->l_info[DT_FINI] != NULL)
270 (*(void (*) (void)) DL_DT_FINI_ADDRESS
271 (imap, ((void *) imap->l_addr
272 + imap->l_info[DT_FINI]->d_un.d_ptr))) ();
275 #ifdef SHARED
276 /* Auditing checkpoint: we have a new object. */
277 if (__builtin_expect (do_audit, 0))
279 struct audit_ifaces *afct = GLRO(dl_audit);
280 for (unsigned int cnt = 0; cnt < GLRO(dl_naudit); ++cnt)
282 if (afct->objclose != NULL)
283 /* Return value is ignored. */
284 (void) afct->objclose (&imap->l_audit[cnt].cookie);
286 afct = afct->next;
289 #endif
291 /* This object must not be used anymore. */
292 imap->l_removed = 1;
294 /* We indeed have an object to remove. */
295 unload_any = true;
297 if (imap->l_global)
298 ++unload_global;
300 /* Remember where the first dynamically loaded object is. */
301 if (i < first_loaded)
302 first_loaded = i;
304 /* Else used[i]. */
305 else if (imap->l_type == lt_loaded)
307 struct r_scope_elem *new_list = NULL;
309 if (imap->l_searchlist.r_list == NULL && imap->l_initfini != NULL)
311 /* The object is still used. But one of the objects we are
312 unloading right now is responsible for loading it. If
313 the current object does not have it's own scope yet we
314 have to create one. This has to be done before running
315 the finalizers.
317 To do this count the number of dependencies. */
318 unsigned int cnt;
319 for (cnt = 1; imap->l_initfini[cnt] != NULL; ++cnt)
322 /* We simply reuse the l_initfini list. */
323 imap->l_searchlist.r_list = &imap->l_initfini[cnt + 1];
324 imap->l_searchlist.r_nlist = cnt;
326 new_list = &imap->l_searchlist;
329 /* Count the number of scopes which remain after the unload.
330 When we add the local search list count it. Always add
331 one for the terminating NULL pointer. */
332 size_t remain = (new_list != NULL) + 1;
333 bool removed_any = false;
334 for (size_t cnt = 0; imap->l_scope[cnt] != NULL; ++cnt)
335 /* This relies on l_scope[] entries being always set either
336 to its own l_symbolic_searchlist address, or some map's
337 l_searchlist address. */
338 if (imap->l_scope[cnt] != &imap->l_symbolic_searchlist)
340 struct link_map *tmap = (struct link_map *)
341 ((char *) imap->l_scope[cnt]
342 - offsetof (struct link_map, l_searchlist));
343 assert (tmap->l_ns == nsid);
344 if (tmap->l_idx == IDX_STILL_USED)
345 ++remain;
346 else
347 removed_any = true;
349 else
350 ++remain;
352 if (removed_any)
354 /* Always allocate a new array for the scope. This is
355 necessary since we must be able to determine the last
356 user of the current array. If possible use the link map's
357 memory. */
358 size_t new_size;
359 struct r_scope_elem **newp;
361 #define SCOPE_ELEMS(imap) \
362 (sizeof (imap->l_scope_mem) / sizeof (imap->l_scope_mem[0]))
364 if (imap->l_scope != imap->l_scope_mem
365 && remain < SCOPE_ELEMS (imap))
367 new_size = SCOPE_ELEMS (imap);
368 newp = imap->l_scope_mem;
370 else
372 new_size = imap->l_scope_max;
373 newp = (struct r_scope_elem **)
374 malloc (new_size * sizeof (struct r_scope_elem *));
375 if (newp == NULL)
376 _dl_signal_error (ENOMEM, "dlclose", NULL,
377 N_("cannot create scope list"));
380 /* Copy over the remaining scope elements. */
381 remain = 0;
382 for (size_t cnt = 0; imap->l_scope[cnt] != NULL; ++cnt)
384 if (imap->l_scope[cnt] != &imap->l_symbolic_searchlist)
386 struct link_map *tmap = (struct link_map *)
387 ((char *) imap->l_scope[cnt]
388 - offsetof (struct link_map, l_searchlist));
389 if (tmap->l_idx != IDX_STILL_USED)
391 /* Remove the scope. Or replace with own map's
392 scope. */
393 if (new_list != NULL)
395 newp[remain++] = new_list;
396 new_list = NULL;
398 continue;
402 newp[remain++] = imap->l_scope[cnt];
404 newp[remain] = NULL;
406 struct r_scope_elem **old = imap->l_scope;
408 if (RTLD_SINGLE_THREAD_P)
409 imap->l_scope = newp;
410 else
412 __rtld_mrlock_change (imap->l_scope_lock);
413 imap->l_scope = newp;
414 __rtld_mrlock_done (imap->l_scope_lock);
417 /* No user anymore, we can free it now. */
418 if (old != imap->l_scope_mem)
419 free (old);
421 imap->l_scope_max = new_size;
424 /* The loader is gone, so mark the object as not having one.
425 Note: l_idx != IDX_STILL_USED -> object will be removed. */
426 if (imap->l_loader != NULL
427 && imap->l_loader->l_idx != IDX_STILL_USED)
428 imap->l_loader = NULL;
430 /* Remember where the first dynamically loaded object is. */
431 if (i < first_loaded)
432 first_loaded = i;
436 /* If there are no objects to unload, do nothing further. */
437 if (!unload_any)
438 goto out;
440 #ifdef SHARED
441 /* Auditing checkpoint: we will start deleting objects. */
442 if (__builtin_expect (do_audit, 0))
444 struct link_map *head = ns->_ns_loaded;
445 struct audit_ifaces *afct = GLRO(dl_audit);
446 /* Do not call the functions for any auditing object. */
447 if (head->l_auditing == 0)
449 for (unsigned int cnt = 0; cnt < GLRO(dl_naudit); ++cnt)
451 if (afct->activity != NULL)
452 afct->activity (&head->l_audit[cnt].cookie, LA_ACT_DELETE);
454 afct = afct->next;
458 #endif
460 /* Notify the debugger we are about to remove some loaded objects. */
461 struct r_debug *r = _dl_debug_initialize (0, nsid);
462 r->r_state = RT_DELETE;
463 _dl_debug_state ();
465 if (unload_global)
467 /* Some objects are in the global scope list. Remove them. */
468 struct r_scope_elem *ns_msl = ns->_ns_main_searchlist;
469 unsigned int i;
470 unsigned int j = 0;
471 unsigned int cnt = ns_msl->r_nlist;
473 while (cnt > 0 && ns_msl->r_list[cnt - 1]->l_removed)
474 --cnt;
476 if (cnt + unload_global == ns_msl->r_nlist)
477 /* Speed up removing most recently added objects. */
478 j = cnt;
479 else
480 for (i = 0; i < cnt; i++)
481 if (ns_msl->r_list[i]->l_removed == 0)
483 if (i != j)
484 ns_msl->r_list[j] = ns_msl->r_list[i];
485 j++;
487 ns_msl->r_nlist = j;
489 if (!RTLD_SINGLE_THREAD_P)
490 THREAD_GSCOPE_WAIT ();
493 size_t tls_free_start;
494 size_t tls_free_end;
495 tls_free_start = tls_free_end = NO_TLS_OFFSET;
497 /* Check each element of the search list to see if all references to
498 it are gone. */
499 for (unsigned int i = first_loaded; i < nloaded; ++i)
501 struct link_map *imap = maps[i];
502 if (!used[i])
504 assert (imap->l_type == lt_loaded);
506 /* That was the last reference, and this was a dlopen-loaded
507 object. We can unmap it. */
509 /* Remove the object from the dtv slotinfo array if it uses TLS. */
510 if (__builtin_expect (imap->l_tls_blocksize > 0, 0))
512 any_tls = true;
514 if (GL(dl_tls_dtv_slotinfo_list) != NULL
515 && ! remove_slotinfo (imap->l_tls_modid,
516 GL(dl_tls_dtv_slotinfo_list), 0,
517 imap->l_init_called))
518 /* All dynamically loaded modules with TLS are unloaded. */
519 GL(dl_tls_max_dtv_idx) = GL(dl_tls_static_nelem);
521 if (imap->l_tls_offset != NO_TLS_OFFSET)
523 /* Collect a contiguous chunk built from the objects in
524 this search list, going in either direction. When the
525 whole chunk is at the end of the used area then we can
526 reclaim it. */
527 #if TLS_TCB_AT_TP
528 if (tls_free_start == NO_TLS_OFFSET
529 || (size_t) imap->l_tls_offset == tls_free_start)
531 /* Extend the contiguous chunk being reclaimed. */
532 tls_free_start
533 = imap->l_tls_offset - imap->l_tls_blocksize;
535 if (tls_free_end == NO_TLS_OFFSET)
536 tls_free_end = imap->l_tls_offset;
538 else if (imap->l_tls_offset - imap->l_tls_blocksize
539 == tls_free_end)
540 /* Extend the chunk backwards. */
541 tls_free_end = imap->l_tls_offset;
542 else
544 /* This isn't contiguous with the last chunk freed.
545 One of them will be leaked unless we can free
546 one block right away. */
547 if (tls_free_end == GL(dl_tls_static_used))
549 GL(dl_tls_static_used) = tls_free_start;
550 tls_free_end = imap->l_tls_offset;
551 tls_free_start
552 = tls_free_end - imap->l_tls_blocksize;
554 else if ((size_t) imap->l_tls_offset
555 == GL(dl_tls_static_used))
556 GL(dl_tls_static_used)
557 = imap->l_tls_offset - imap->l_tls_blocksize;
558 else if (tls_free_end < (size_t) imap->l_tls_offset)
560 /* We pick the later block. It has a chance to
561 be freed. */
562 tls_free_end = imap->l_tls_offset;
563 tls_free_start
564 = tls_free_end - imap->l_tls_blocksize;
567 #elif TLS_DTV_AT_TP
568 if ((size_t) imap->l_tls_offset == tls_free_end)
569 /* Extend the contiguous chunk being reclaimed. */
570 tls_free_end -= imap->l_tls_blocksize;
571 else if (imap->l_tls_offset + imap->l_tls_blocksize
572 == tls_free_start)
573 /* Extend the chunk backwards. */
574 tls_free_start = imap->l_tls_offset;
575 else
577 /* This isn't contiguous with the last chunk freed.
578 One of them will be leaked. */
579 if (tls_free_end == GL(dl_tls_static_used))
580 GL(dl_tls_static_used) = tls_free_start;
581 tls_free_start = imap->l_tls_offset;
582 tls_free_end = tls_free_start + imap->l_tls_blocksize;
584 #else
585 # error "Either TLS_TCB_AT_TP or TLS_DTV_AT_TP must be defined"
586 #endif
590 /* We can unmap all the maps at once. We determined the
591 start address and length when we loaded the object and
592 the `munmap' call does the rest. */
593 DL_UNMAP (imap);
595 /* Finally, unlink the data structure and free it. */
596 if (imap->l_prev != NULL)
597 imap->l_prev->l_next = imap->l_next;
598 else
600 #ifdef SHARED
601 assert (nsid != LM_ID_BASE);
602 #endif
603 ns->_ns_loaded = imap->l_next;
606 --ns->_ns_nloaded;
607 if (imap->l_next != NULL)
608 imap->l_next->l_prev = imap->l_prev;
610 free (imap->l_versions);
611 if (imap->l_origin != (char *) -1)
612 free ((char *) imap->l_origin);
614 free (imap->l_reldeps);
616 /* Print debugging message. */
617 if (__builtin_expect (GLRO(dl_debug_mask) & DL_DEBUG_FILES, 0))
618 _dl_debug_printf ("\nfile=%s [%lu]; destroying link map\n",
619 imap->l_name, imap->l_ns);
621 /* This name always is allocated. */
622 free (imap->l_name);
623 /* Remove the list with all the names of the shared object. */
625 struct libname_list *lnp = imap->l_libname;
628 struct libname_list *this = lnp;
629 lnp = lnp->next;
630 if (!this->dont_free)
631 free (this);
633 while (lnp != NULL);
635 /* Remove the searchlists. */
636 free (imap->l_initfini);
638 /* Remove the scope array if we allocated it. */
639 if (imap->l_scope != imap->l_scope_mem)
640 free (imap->l_scope);
642 if (imap->l_phdr_allocated)
643 free ((void *) imap->l_phdr);
645 if (imap->l_rpath_dirs.dirs != (void *) -1)
646 free (imap->l_rpath_dirs.dirs);
647 if (imap->l_runpath_dirs.dirs != (void *) -1)
648 free (imap->l_runpath_dirs.dirs);
650 free (imap);
654 /* If we removed any object which uses TLS bump the generation counter. */
655 if (any_tls)
657 if (__builtin_expect (++GL(dl_tls_generation) == 0, 0))
658 _dl_fatal_printf ("TLS generation counter wrapped! Please report as described in <http://www.gnu.org/software/libc/bugs.html>.\n");
660 if (tls_free_end == GL(dl_tls_static_used))
661 GL(dl_tls_static_used) = tls_free_start;
664 #ifdef SHARED
665 /* Auditing checkpoint: we have deleted all objects. */
666 if (__builtin_expect (do_audit, 0))
668 struct link_map *head = ns->_ns_loaded;
669 /* Do not call the functions for any auditing object. */
670 if (head->l_auditing == 0)
672 struct audit_ifaces *afct = GLRO(dl_audit);
673 for (unsigned int cnt = 0; cnt < GLRO(dl_naudit); ++cnt)
675 if (afct->activity != NULL)
676 afct->activity (&head->l_audit[cnt].cookie, LA_ACT_CONSISTENT);
678 afct = afct->next;
682 #endif
684 /* Notify the debugger those objects are finalized and gone. */
685 r->r_state = RT_CONSISTENT;
686 _dl_debug_state ();
688 /* Recheck if we need to retry, release the lock. */
689 out:
690 if (dl_close_state == rerun)
691 goto retry;
693 dl_close_state = not_pending;
697 void
698 _dl_close (void *_map)
700 struct link_map *map = _map;
702 /* First see whether we can remove the object at all. */
703 if (__builtin_expect (map->l_flags_1 & DF_1_NODELETE, 0))
705 assert (map->l_init_called);
706 /* Nope. Do nothing. */
707 return;
710 if (__builtin_expect (map->l_direct_opencount, 1) == 0)
711 GLRO(dl_signal_error) (0, map->l_name, NULL, N_("shared object not open"));
713 /* Acquire the lock. */
714 __rtld_lock_lock_recursive (GL(dl_load_lock));
716 _dl_close_worker (map);
718 __rtld_lock_unlock_recursive (GL(dl_load_lock));
722 static bool __libc_freeres_fn_section
723 free_slotinfo (struct dtv_slotinfo_list **elemp)
725 size_t cnt;
727 if (*elemp == NULL)
728 /* Nothing here, all is removed (or there never was anything). */
729 return true;
731 if (!free_slotinfo (&(*elemp)->next))
732 /* We cannot free the entry. */
733 return false;
735 /* That cleared our next pointer for us. */
737 for (cnt = 0; cnt < (*elemp)->len; ++cnt)
738 if ((*elemp)->slotinfo[cnt].map != NULL)
739 /* Still used. */
740 return false;
742 /* We can remove the list element. */
743 free (*elemp);
744 *elemp = NULL;
746 return true;
750 libc_freeres_fn (free_mem)
752 for (Lmid_t nsid = 0; nsid < DL_NNS; ++nsid)
753 if (__builtin_expect (GL(dl_ns)[nsid]._ns_global_scope_alloc, 0) != 0
754 && (GL(dl_ns)[nsid]._ns_main_searchlist->r_nlist
755 // XXX Check whether we need NS-specific initial_searchlist
756 == GLRO(dl_initial_searchlist).r_nlist))
758 /* All object dynamically loaded by the program are unloaded. Free
759 the memory allocated for the global scope variable. */
760 struct link_map **old = GL(dl_ns)[nsid]._ns_main_searchlist->r_list;
762 /* Put the old map in. */
763 GL(dl_ns)[nsid]._ns_main_searchlist->r_list
764 // XXX Check whether we need NS-specific initial_searchlist
765 = GLRO(dl_initial_searchlist).r_list;
766 /* Signal that the original map is used. */
767 GL(dl_ns)[nsid]._ns_global_scope_alloc = 0;
769 /* Now free the old map. */
770 free (old);
773 if (USE___THREAD || GL(dl_tls_dtv_slotinfo_list) != NULL)
775 /* Free the memory allocated for the dtv slotinfo array. We can do
776 this only if all modules which used this memory are unloaded. */
777 #ifdef SHARED
778 if (GL(dl_initial_dtv) == NULL)
779 /* There was no initial TLS setup, it was set up later when
780 it used the normal malloc. */
781 free_slotinfo (&GL(dl_tls_dtv_slotinfo_list));
782 else
783 #endif
784 /* The first element of the list does not have to be deallocated.
785 It was allocated in the dynamic linker (i.e., with a different
786 malloc), and in the static library it's in .bss space. */
787 free_slotinfo (&GL(dl_tls_dtv_slotinfo_list)->next);