* elf/Makefile: Add rules to build and run tst-thrlock.
[glibc.git] / elf / dl-close.c
blobfdd9fe6719346b0e037e8c136eb471ce95e8143a
1 /* Close a shared object opened by `_dl_open'.
2 Copyright (C) 1996-2005, 2006 Free Software Foundation, Inc.
3 This file is part of the GNU C Library.
5 The GNU C Library is free software; you can redistribute it and/or
6 modify it under the terms of the GNU Lesser General Public
7 License as published by the Free Software Foundation; either
8 version 2.1 of the License, or (at your option) any later version.
10 The GNU C Library is distributed in the hope that it will be useful,
11 but WITHOUT ANY WARRANTY; without even the implied warranty of
12 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 Lesser General Public License for more details.
15 You should have received a copy of the GNU Lesser General Public
16 License along with the GNU C Library; if not, write to the Free
17 Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
18 02111-1307 USA. */
20 #include <assert.h>
21 #include <dlfcn.h>
22 #include <errno.h>
23 #include <libintl.h>
24 #include <stddef.h>
25 #include <stdio.h>
26 #include <stdlib.h>
27 #include <string.h>
28 #include <unistd.h>
29 #include <bits/libc-lock.h>
30 #include <ldsodefs.h>
31 #include <sys/types.h>
32 #include <sys/mman.h>
33 #include <sysdep-cancel.h>
36 /* Type of the constructor functions. */
37 typedef void (*fini_t) (void);
40 /* Special l_idx value used to indicate which objects remain loaded. */
41 #define IDX_STILL_USED -1
44 #ifdef USE_TLS
45 /* Returns true we an non-empty was found. */
46 static bool
47 remove_slotinfo (size_t idx, struct dtv_slotinfo_list *listp, size_t disp,
48 bool should_be_there)
50 if (idx - disp >= listp->len)
52 if (listp->next == NULL)
54 /* The index is not actually valid in the slotinfo list,
55 because this object was closed before it was fully set
56 up due to some error. */
57 assert (! should_be_there);
59 else
61 if (remove_slotinfo (idx, listp->next, disp + listp->len,
62 should_be_there))
63 return true;
65 /* No non-empty entry. Search from the end of this element's
66 slotinfo array. */
67 idx = disp + listp->len;
70 else
72 struct link_map *old_map = listp->slotinfo[idx - disp].map;
74 /* The entry might still be in its unused state if we are closing an
75 object that wasn't fully set up. */
76 if (__builtin_expect (old_map != NULL, 1))
78 assert (old_map->l_tls_modid == idx);
80 /* Mark the entry as unused. */
81 listp->slotinfo[idx - disp].gen = GL(dl_tls_generation) + 1;
82 listp->slotinfo[idx - disp].map = NULL;
85 /* If this is not the last currently used entry no need to look
86 further. */
87 if (idx != GL(dl_tls_max_dtv_idx))
88 return true;
91 while (idx - disp > (disp == 0 ? 1 + GL(dl_tls_static_nelem) : 0))
93 --idx;
95 if (listp->slotinfo[idx - disp].map != NULL)
97 /* Found a new last used index. */
98 GL(dl_tls_max_dtv_idx) = idx;
99 return true;
103 /* No non-entry in this list element. */
104 return false;
106 #endif
109 void
110 _dl_close_worker (struct link_map *map)
112 Lmid_t ns = map->l_ns;
114 /* One less direct use. */
115 --map->l_direct_opencount;
117 /* If _dl_close is called recursively (some destructor call dlclose),
118 just record that the parent _dl_close will need to do garbage collection
119 again and return. */
120 static enum { not_pending, pending, rerun } dl_close_state;
122 if (map->l_direct_opencount > 0 || map->l_type != lt_loaded
123 || dl_close_state != not_pending)
125 if (map->l_direct_opencount == 0 && map->l_type == lt_loaded)
126 dl_close_state = rerun;
128 /* There are still references to this object. Do nothing more. */
129 if (__builtin_expect (GLRO(dl_debug_mask) & DL_DEBUG_FILES, 0))
130 _dl_debug_printf ("\nclosing file=%s; direct_opencount=%u\n",
131 map->l_name, map->l_direct_opencount);
133 return;
136 retry:
137 dl_close_state = pending;
139 #ifdef USE_TLS
140 bool any_tls = false;
141 #endif
142 const unsigned int nloaded = GL(dl_ns)[ns]._ns_nloaded;
143 char used[nloaded];
144 char done[nloaded];
145 struct link_map *maps[nloaded];
147 /* Run over the list and assign indexes to the link maps and enter
148 them into the MAPS array. */
149 int idx = 0;
150 for (struct link_map *l = GL(dl_ns)[ns]._ns_loaded; l != NULL; l = l->l_next)
152 l->l_idx = idx;
153 maps[idx] = l;
154 ++idx;
156 assert (idx == nloaded);
158 /* Prepare the bitmaps. */
159 memset (used, '\0', sizeof (used));
160 memset (done, '\0', sizeof (done));
162 /* Keep track of the lowest index link map we have covered already. */
163 int done_index = -1;
164 while (++done_index < nloaded)
166 struct link_map *l = maps[done_index];
168 if (done[done_index])
169 /* Already handled. */
170 continue;
172 /* Check whether this object is still used. */
173 if (l->l_type == lt_loaded
174 && l->l_direct_opencount == 0
175 && (l->l_flags_1 & DF_1_NODELETE) == 0
176 && !used[done_index])
177 continue;
179 /* We need this object and we handle it now. */
180 done[done_index] = 1;
181 used[done_index] = 1;
182 /* Signal the object is still needed. */
183 l->l_idx = IDX_STILL_USED;
185 /* Mark all dependencies as used. */
186 if (l->l_initfini != NULL)
188 struct link_map **lp = &l->l_initfini[1];
189 while (*lp != NULL)
191 if ((*lp)->l_idx != IDX_STILL_USED)
193 assert ((*lp)->l_idx >= 0 && (*lp)->l_idx < nloaded);
195 if (!used[(*lp)->l_idx])
197 used[(*lp)->l_idx] = 1;
198 if ((*lp)->l_idx - 1 < done_index)
199 done_index = (*lp)->l_idx - 1;
203 ++lp;
206 /* And the same for relocation dependencies. */
207 if (l->l_reldeps != NULL)
208 for (unsigned int j = 0; j < l->l_reldepsact; ++j)
210 struct link_map *jmap = l->l_reldeps[j];
212 if (jmap->l_idx != IDX_STILL_USED)
214 assert (jmap->l_idx >= 0 && jmap->l_idx < nloaded);
216 if (!used[jmap->l_idx])
218 used[jmap->l_idx] = 1;
219 if (jmap->l_idx - 1 < done_index)
220 done_index = jmap->l_idx - 1;
226 /* Sort the entries. */
227 _dl_sort_fini (GL(dl_ns)[ns]._ns_loaded, maps, nloaded, used, ns);
229 /* Call all termination functions at once. */
230 #ifdef SHARED
231 bool do_audit = GLRO(dl_naudit) > 0 && !GL(dl_ns)[ns]._ns_loaded->l_auditing;
232 #endif
233 bool unload_any = false;
234 unsigned int first_loaded = ~0;
235 for (unsigned int i = 0; i < nloaded; ++i)
237 struct link_map *imap = maps[i];
239 /* All elements must be in the same namespace. */
240 assert (imap->l_ns == ns);
242 if (!used[i])
244 assert (imap->l_type == lt_loaded
245 && (imap->l_flags_1 & DF_1_NODELETE) == 0);
247 /* Call its termination function. Do not do it for
248 half-cooked objects. */
249 if (imap->l_init_called)
251 /* When debugging print a message first. */
252 if (__builtin_expect (GLRO(dl_debug_mask) & DL_DEBUG_IMPCALLS,
254 _dl_debug_printf ("\ncalling fini: %s [%lu]\n\n",
255 imap->l_name, ns);
257 if (imap->l_info[DT_FINI_ARRAY] != NULL)
259 ElfW(Addr) *array =
260 (ElfW(Addr) *) (imap->l_addr
261 + imap->l_info[DT_FINI_ARRAY]->d_un.d_ptr);
262 unsigned int sz = (imap->l_info[DT_FINI_ARRAYSZ]->d_un.d_val
263 / sizeof (ElfW(Addr)));
265 while (sz-- > 0)
266 ((fini_t) array[sz]) ();
269 /* Next try the old-style destructor. */
270 if (imap->l_info[DT_FINI] != NULL)
271 (*(void (*) (void)) DL_DT_FINI_ADDRESS
272 (imap, ((void *) imap->l_addr
273 + imap->l_info[DT_FINI]->d_un.d_ptr))) ();
276 #ifdef SHARED
277 /* Auditing checkpoint: we have a new object. */
278 if (__builtin_expect (do_audit, 0))
280 struct audit_ifaces *afct = GLRO(dl_audit);
281 for (unsigned int cnt = 0; cnt < GLRO(dl_naudit); ++cnt)
283 if (afct->objclose != NULL)
284 /* Return value is ignored. */
285 (void) afct->objclose (&imap->l_audit[cnt].cookie);
287 afct = afct->next;
290 #endif
292 /* This object must not be used anymore. */
293 imap->l_removed = 1;
295 /* We indeed have an object to remove. */
296 unload_any = true;
298 /* Remember where the first dynamically loaded object is. */
299 if (i < first_loaded)
300 first_loaded = i;
302 /* Else used[i]. */
303 else if (imap->l_type == lt_loaded)
305 struct r_scope_elem *new_list = NULL;
307 if (imap->l_searchlist.r_list == NULL && imap->l_initfini != NULL)
309 /* The object is still used. But one of the objects we are
310 unloading right now is responsible for loading it. If
311 the current object does not have it's own scope yet we
312 have to create one. This has to be done before running
313 the finalizers.
315 To do this count the number of dependencies. */
316 unsigned int cnt;
317 for (cnt = 1; imap->l_initfini[cnt] != NULL; ++cnt)
320 /* We simply reuse the l_initfini list. */
321 imap->l_searchlist.r_list = &imap->l_initfini[cnt + 1];
322 imap->l_searchlist.r_nlist = cnt;
324 new_list = &imap->l_searchlist;
327 /* Count the number of scopes which remain after the unload.
328 When we add the local search list count it. Always add
329 one for the terminating NULL pointer. */
330 size_t remain = (new_list != NULL) + 1;
331 bool removed_any = false;
332 for (size_t cnt = 0; imap->l_scope[cnt] != NULL; ++cnt)
333 /* This relies on l_scope[] entries being always set either
334 to its own l_symbolic_searchlist address, or some map's
335 l_searchlist address. */
336 if (imap->l_scope[cnt] != &imap->l_symbolic_searchlist)
338 struct link_map *tmap = (struct link_map *)
339 ((char *) imap->l_scope[cnt]
340 - offsetof (struct link_map, l_searchlist));
341 assert (tmap->l_ns == ns);
342 if (tmap->l_idx == IDX_STILL_USED)
343 ++remain;
344 else
345 removed_any = true;
347 else
348 ++remain;
350 if (removed_any)
352 /* Always allocate a new array for the scope. This is
353 necessary since we must be able to determine the last
354 user of the current array. If possible use the link map's
355 memory. */
356 size_t new_size;
357 struct r_scope_elem **newp;
359 #define SCOPE_ELEMS(imap) \
360 (sizeof (imap->l_scope_mem) / sizeof (imap->l_scope_mem[0]))
362 if (imap->l_scope != imap->l_scope_mem
363 && remain < SCOPE_ELEMS (imap))
365 new_size = SCOPE_ELEMS (imap);
366 newp = imap->l_scope_mem;
368 else
370 new_size = imap->l_scope_max;
371 newp = (struct r_scope_elem **)
372 malloc (new_size * sizeof (struct r_scope_elem *));
373 if (newp == NULL)
374 _dl_signal_error (ENOMEM, "dlclose", NULL,
375 N_("cannot create scope list"));
378 /* Copy over the remaining scope elements. */
379 remain = 0;
380 for (size_t cnt = 0; imap->l_scope[cnt] != NULL; ++cnt)
382 if (imap->l_scope[cnt] != &imap->l_symbolic_searchlist)
384 struct link_map *tmap = (struct link_map *)
385 ((char *) imap->l_scope[cnt]
386 - offsetof (struct link_map, l_searchlist));
387 if (tmap->l_idx != IDX_STILL_USED)
389 /* Remove the scope. Or replace with own map's
390 scope. */
391 if (new_list != NULL)
393 newp[remain++] = new_list;
394 new_list = NULL;
396 continue;
400 newp[remain++] = imap->l_scope[cnt];
402 newp[remain] = NULL;
404 struct r_scope_elem **old = imap->l_scope;
406 if (SINGLE_THREAD_P)
407 imap->l_scope = newp;
408 else
410 __rtld_mrlock_change (imap->l_scope_lock);
411 imap->l_scope = newp;
412 __rtld_mrlock_done (imap->l_scope_lock);
415 /* No user anymore, we can free it now. */
416 if (old != imap->l_scope_mem)
417 free (old);
419 imap->l_scope_max = new_size;
422 /* The loader is gone, so mark the object as not having one.
423 Note: l_idx != IDX_STILL_USED -> object will be removed. */
424 if (imap->l_loader != NULL
425 && imap->l_loader->l_idx != IDX_STILL_USED)
426 imap->l_loader = NULL;
428 /* Remember where the first dynamically loaded object is. */
429 if (i < first_loaded)
430 first_loaded = i;
434 /* If there are no objects to unload, do nothing further. */
435 if (!unload_any)
436 goto out;
438 #ifdef SHARED
439 /* Auditing checkpoint: we will start deleting objects. */
440 if (__builtin_expect (do_audit, 0))
442 struct link_map *head = GL(dl_ns)[ns]._ns_loaded;
443 struct audit_ifaces *afct = GLRO(dl_audit);
444 /* Do not call the functions for any auditing object. */
445 if (head->l_auditing == 0)
447 for (unsigned int cnt = 0; cnt < GLRO(dl_naudit); ++cnt)
449 if (afct->activity != NULL)
450 afct->activity (&head->l_audit[cnt].cookie, LA_ACT_DELETE);
452 afct = afct->next;
456 #endif
458 /* Notify the debugger we are about to remove some loaded objects. */
459 struct r_debug *r = _dl_debug_initialize (0, ns);
460 r->r_state = RT_DELETE;
461 _dl_debug_state ();
463 #ifdef USE_TLS
464 size_t tls_free_start;
465 size_t tls_free_end;
466 tls_free_start = tls_free_end = NO_TLS_OFFSET;
467 #endif
469 /* Check each element of the search list to see if all references to
470 it are gone. */
471 for (unsigned int i = first_loaded; i < nloaded; ++i)
473 struct link_map *imap = maps[i];
474 if (!used[i])
476 assert (imap->l_type == lt_loaded);
478 /* That was the last reference, and this was a dlopen-loaded
479 object. We can unmap it. */
480 if (__builtin_expect (imap->l_global, 0))
482 /* This object is in the global scope list. Remove it. */
483 unsigned int cnt = GL(dl_ns)[ns]._ns_main_searchlist->r_nlist;
486 --cnt;
487 while (GL(dl_ns)[ns]._ns_main_searchlist->r_list[cnt] != imap);
489 /* The object was already correctly registered. */
490 while (++cnt
491 < GL(dl_ns)[ns]._ns_main_searchlist->r_nlist)
492 GL(dl_ns)[ns]._ns_main_searchlist->r_list[cnt - 1]
493 = GL(dl_ns)[ns]._ns_main_searchlist->r_list[cnt];
495 --GL(dl_ns)[ns]._ns_main_searchlist->r_nlist;
498 #ifdef USE_TLS
499 /* Remove the object from the dtv slotinfo array if it uses TLS. */
500 if (__builtin_expect (imap->l_tls_blocksize > 0, 0))
502 any_tls = true;
504 if (GL(dl_tls_dtv_slotinfo_list) != NULL
505 && ! remove_slotinfo (imap->l_tls_modid,
506 GL(dl_tls_dtv_slotinfo_list), 0,
507 imap->l_init_called))
508 /* All dynamically loaded modules with TLS are unloaded. */
509 GL(dl_tls_max_dtv_idx) = GL(dl_tls_static_nelem);
511 if (imap->l_tls_offset != NO_TLS_OFFSET)
513 /* Collect a contiguous chunk built from the objects in
514 this search list, going in either direction. When the
515 whole chunk is at the end of the used area then we can
516 reclaim it. */
517 # if TLS_TCB_AT_TP
518 if (tls_free_start == NO_TLS_OFFSET
519 || (size_t) imap->l_tls_offset == tls_free_start)
521 /* Extend the contiguous chunk being reclaimed. */
522 tls_free_start
523 = imap->l_tls_offset - imap->l_tls_blocksize;
525 if (tls_free_end == NO_TLS_OFFSET)
526 tls_free_end = imap->l_tls_offset;
528 else if (imap->l_tls_offset - imap->l_tls_blocksize
529 == tls_free_end)
530 /* Extend the chunk backwards. */
531 tls_free_end = imap->l_tls_offset;
532 else
534 /* This isn't contiguous with the last chunk freed.
535 One of them will be leaked unless we can free
536 one block right away. */
537 if (tls_free_end == GL(dl_tls_static_used))
539 GL(dl_tls_static_used) = tls_free_start;
540 tls_free_end = imap->l_tls_offset;
541 tls_free_start
542 = tls_free_end - imap->l_tls_blocksize;
544 else if ((size_t) imap->l_tls_offset
545 == GL(dl_tls_static_used))
546 GL(dl_tls_static_used)
547 = imap->l_tls_offset - imap->l_tls_blocksize;
548 else if (tls_free_end < (size_t) imap->l_tls_offset)
550 /* We pick the later block. It has a chance to
551 be freed. */
552 tls_free_end = imap->l_tls_offset;
553 tls_free_start
554 = tls_free_end - imap->l_tls_blocksize;
557 # elif TLS_DTV_AT_TP
558 if ((size_t) imap->l_tls_offset == tls_free_end)
559 /* Extend the contiguous chunk being reclaimed. */
560 tls_free_end -= imap->l_tls_blocksize;
561 else if (imap->l_tls_offset + imap->l_tls_blocksize
562 == tls_free_start)
563 /* Extend the chunk backwards. */
564 tls_free_start = imap->l_tls_offset;
565 else
567 /* This isn't contiguous with the last chunk freed.
568 One of them will be leaked. */
569 if (tls_free_end == GL(dl_tls_static_used))
570 GL(dl_tls_static_used) = tls_free_start;
571 tls_free_start = imap->l_tls_offset;
572 tls_free_end = tls_free_start + imap->l_tls_blocksize;
574 # else
575 # error "Either TLS_TCB_AT_TP or TLS_DTV_AT_TP must be defined"
576 # endif
579 #endif
581 /* We can unmap all the maps at once. We determined the
582 start address and length when we loaded the object and
583 the `munmap' call does the rest. */
584 DL_UNMAP (imap);
586 /* Finally, unlink the data structure and free it. */
587 if (imap->l_prev != NULL)
588 imap->l_prev->l_next = imap->l_next;
589 else
591 #ifdef SHARED
592 assert (ns != LM_ID_BASE);
593 #endif
594 GL(dl_ns)[ns]._ns_loaded = imap->l_next;
597 --GL(dl_ns)[ns]._ns_nloaded;
598 if (imap->l_next != NULL)
599 imap->l_next->l_prev = imap->l_prev;
601 free (imap->l_versions);
602 if (imap->l_origin != (char *) -1)
603 free ((char *) imap->l_origin);
605 free (imap->l_reldeps);
607 /* Print debugging message. */
608 if (__builtin_expect (GLRO(dl_debug_mask) & DL_DEBUG_FILES, 0))
609 _dl_debug_printf ("\nfile=%s [%lu]; destroying link map\n",
610 imap->l_name, imap->l_ns);
612 /* This name always is allocated. */
613 free (imap->l_name);
614 /* Remove the list with all the names of the shared object. */
616 struct libname_list *lnp = imap->l_libname;
619 struct libname_list *this = lnp;
620 lnp = lnp->next;
621 if (!this->dont_free)
622 free (this);
624 while (lnp != NULL);
626 /* Remove the searchlists. */
627 free (imap->l_initfini);
629 /* Remove the scope array if we allocated it. */
630 if (imap->l_scope != imap->l_scope_mem)
631 free (imap->l_scope);
633 if (imap->l_phdr_allocated)
634 free ((void *) imap->l_phdr);
636 if (imap->l_rpath_dirs.dirs != (void *) -1)
637 free (imap->l_rpath_dirs.dirs);
638 if (imap->l_runpath_dirs.dirs != (void *) -1)
639 free (imap->l_runpath_dirs.dirs);
641 free (imap);
645 #ifdef USE_TLS
646 /* If we removed any object which uses TLS bump the generation counter. */
647 if (any_tls)
649 if (__builtin_expect (++GL(dl_tls_generation) == 0, 0))
650 _dl_fatal_printf ("TLS generation counter wrapped! Please report as described in <http://www.gnu.org/software/libc/bugs.html>.\n");
652 if (tls_free_end == GL(dl_tls_static_used))
653 GL(dl_tls_static_used) = tls_free_start;
655 #endif
657 #ifdef SHARED
658 /* Auditing checkpoint: we have deleted all objects. */
659 if (__builtin_expect (do_audit, 0))
661 struct link_map *head = GL(dl_ns)[ns]._ns_loaded;
662 /* Do not call the functions for any auditing object. */
663 if (head->l_auditing == 0)
665 struct audit_ifaces *afct = GLRO(dl_audit);
666 for (unsigned int cnt = 0; cnt < GLRO(dl_naudit); ++cnt)
668 if (afct->activity != NULL)
669 afct->activity (&head->l_audit[cnt].cookie, LA_ACT_CONSISTENT);
671 afct = afct->next;
675 #endif
677 /* Notify the debugger those objects are finalized and gone. */
678 r->r_state = RT_CONSISTENT;
679 _dl_debug_state ();
681 /* Recheck if we need to retry, release the lock. */
682 out:
683 if (dl_close_state == rerun)
684 goto retry;
686 dl_close_state = not_pending;
690 void
691 _dl_close (void *_map)
693 struct link_map *map = _map;
695 /* First see whether we can remove the object at all. */
696 if (__builtin_expect (map->l_flags_1 & DF_1_NODELETE, 0))
698 assert (map->l_init_called);
699 /* Nope. Do nothing. */
700 return;
703 if (__builtin_expect (map->l_direct_opencount, 1) == 0)
704 GLRO(dl_signal_error) (0, map->l_name, NULL, N_("shared object not open"));
706 /* Acquire the lock. */
707 __rtld_lock_lock_recursive (GL(dl_load_lock));
709 _dl_close_worker (map);
711 __rtld_lock_unlock_recursive (GL(dl_load_lock));
715 #ifdef USE_TLS
716 static bool __libc_freeres_fn_section
717 free_slotinfo (struct dtv_slotinfo_list **elemp)
719 size_t cnt;
721 if (*elemp == NULL)
722 /* Nothing here, all is removed (or there never was anything). */
723 return true;
725 if (!free_slotinfo (&(*elemp)->next))
726 /* We cannot free the entry. */
727 return false;
729 /* That cleared our next pointer for us. */
731 for (cnt = 0; cnt < (*elemp)->len; ++cnt)
732 if ((*elemp)->slotinfo[cnt].map != NULL)
733 /* Still used. */
734 return false;
736 /* We can remove the list element. */
737 free (*elemp);
738 *elemp = NULL;
740 return true;
742 #endif
745 libc_freeres_fn (free_mem)
747 for (Lmid_t ns = 0; ns < DL_NNS; ++ns)
748 if (__builtin_expect (GL(dl_ns)[ns]._ns_global_scope_alloc, 0) != 0
749 && (GL(dl_ns)[ns]._ns_main_searchlist->r_nlist
750 // XXX Check whether we need NS-specific initial_searchlist
751 == GLRO(dl_initial_searchlist).r_nlist))
753 /* All object dynamically loaded by the program are unloaded. Free
754 the memory allocated for the global scope variable. */
755 struct link_map **old = GL(dl_ns)[ns]._ns_main_searchlist->r_list;
757 /* Put the old map in. */
758 GL(dl_ns)[ns]._ns_main_searchlist->r_list
759 // XXX Check whether we need NS-specific initial_searchlist
760 = GLRO(dl_initial_searchlist).r_list;
761 /* Signal that the original map is used. */
762 GL(dl_ns)[ns]._ns_global_scope_alloc = 0;
764 /* Now free the old map. */
765 free (old);
768 #ifdef USE_TLS
769 if (USE___THREAD || GL(dl_tls_dtv_slotinfo_list) != NULL)
771 /* Free the memory allocated for the dtv slotinfo array. We can do
772 this only if all modules which used this memory are unloaded. */
773 # ifdef SHARED
774 if (GL(dl_initial_dtv) == NULL)
775 /* There was no initial TLS setup, it was set up later when
776 it used the normal malloc. */
777 free_slotinfo (&GL(dl_tls_dtv_slotinfo_list));
778 else
779 # endif
780 /* The first element of the list does not have to be deallocated.
781 It was allocated in the dynamic linker (i.e., with a different
782 malloc), and in the static library it's in .bss space. */
783 free_slotinfo (&GL(dl_tls_dtv_slotinfo_list)->next);
785 #endif