* sysdeps/i386/i486/bits/atomic.h: Define
[glibc.git] / elf / dl-close.c
blobdf968fe649e3050cb4750605f609cecea1318e70
1 /* Close a shared object opened by `_dl_open'.
2 Copyright (C) 1996-2005, 2006 Free Software Foundation, Inc.
3 This file is part of the GNU C Library.
5 The GNU C Library is free software; you can redistribute it and/or
6 modify it under the terms of the GNU Lesser General Public
7 License as published by the Free Software Foundation; either
8 version 2.1 of the License, or (at your option) any later version.
10 The GNU C Library is distributed in the hope that it will be useful,
11 but WITHOUT ANY WARRANTY; without even the implied warranty of
12 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 Lesser General Public License for more details.
15 You should have received a copy of the GNU Lesser General Public
16 License along with the GNU C Library; if not, write to the Free
17 Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
18 02111-1307 USA. */
20 #include <assert.h>
21 #include <dlfcn.h>
22 #include <errno.h>
23 #include <libintl.h>
24 #include <stddef.h>
25 #include <stdio.h>
26 #include <stdlib.h>
27 #include <string.h>
28 #include <unistd.h>
29 #include <bits/libc-lock.h>
30 #include <ldsodefs.h>
31 #include <sys/types.h>
32 #include <sys/mman.h>
33 #include <sysdep-cancel.h>
36 /* Type of the constructor functions. */
37 typedef void (*fini_t) (void);
40 /* Special l_idx value used to indicate which objects remain loaded. */
41 #define IDX_STILL_USED -1
44 /* Returns true we an non-empty was found. */
45 static bool
46 remove_slotinfo (size_t idx, struct dtv_slotinfo_list *listp, size_t disp,
47 bool should_be_there)
49 if (idx - disp >= listp->len)
51 if (listp->next == NULL)
53 /* The index is not actually valid in the slotinfo list,
54 because this object was closed before it was fully set
55 up due to some error. */
56 assert (! should_be_there);
58 else
60 if (remove_slotinfo (idx, listp->next, disp + listp->len,
61 should_be_there))
62 return true;
64 /* No non-empty entry. Search from the end of this element's
65 slotinfo array. */
66 idx = disp + listp->len;
69 else
71 struct link_map *old_map = listp->slotinfo[idx - disp].map;
73 /* The entry might still be in its unused state if we are closing an
74 object that wasn't fully set up. */
75 if (__builtin_expect (old_map != NULL, 1))
77 assert (old_map->l_tls_modid == idx);
79 /* Mark the entry as unused. */
80 listp->slotinfo[idx - disp].gen = GL(dl_tls_generation) + 1;
81 listp->slotinfo[idx - disp].map = NULL;
84 /* If this is not the last currently used entry no need to look
85 further. */
86 if (idx != GL(dl_tls_max_dtv_idx))
87 return true;
90 while (idx - disp > (disp == 0 ? 1 + GL(dl_tls_static_nelem) : 0))
92 --idx;
94 if (listp->slotinfo[idx - disp].map != NULL)
96 /* Found a new last used index. */
97 GL(dl_tls_max_dtv_idx) = idx;
98 return true;
102 /* No non-entry in this list element. */
103 return false;
107 void
108 _dl_close_worker (struct link_map *map)
110 Lmid_t ns = map->l_ns;
112 /* One less direct use. */
113 --map->l_direct_opencount;
115 /* If _dl_close is called recursively (some destructor call dlclose),
116 just record that the parent _dl_close will need to do garbage collection
117 again and return. */
118 static enum { not_pending, pending, rerun } dl_close_state;
120 if (map->l_direct_opencount > 0 || map->l_type != lt_loaded
121 || dl_close_state != not_pending)
123 if (map->l_direct_opencount == 0 && map->l_type == lt_loaded)
124 dl_close_state = rerun;
126 /* There are still references to this object. Do nothing more. */
127 if (__builtin_expect (GLRO(dl_debug_mask) & DL_DEBUG_FILES, 0))
128 _dl_debug_printf ("\nclosing file=%s; direct_opencount=%u\n",
129 map->l_name, map->l_direct_opencount);
131 return;
134 retry:
135 dl_close_state = pending;
137 bool any_tls = false;
138 const unsigned int nloaded = GL(dl_ns)[ns]._ns_nloaded;
139 char used[nloaded];
140 char done[nloaded];
141 struct link_map *maps[nloaded];
143 /* Run over the list and assign indexes to the link maps and enter
144 them into the MAPS array. */
145 int idx = 0;
146 for (struct link_map *l = GL(dl_ns)[ns]._ns_loaded; l != NULL; l = l->l_next)
148 l->l_idx = idx;
149 maps[idx] = l;
150 ++idx;
152 assert (idx == nloaded);
154 /* Prepare the bitmaps. */
155 memset (used, '\0', sizeof (used));
156 memset (done, '\0', sizeof (done));
158 /* Keep track of the lowest index link map we have covered already. */
159 int done_index = -1;
160 while (++done_index < nloaded)
162 struct link_map *l = maps[done_index];
164 if (done[done_index])
165 /* Already handled. */
166 continue;
168 /* Check whether this object is still used. */
169 if (l->l_type == lt_loaded
170 && l->l_direct_opencount == 0
171 && (l->l_flags_1 & DF_1_NODELETE) == 0
172 && !used[done_index])
173 continue;
175 /* We need this object and we handle it now. */
176 done[done_index] = 1;
177 used[done_index] = 1;
178 /* Signal the object is still needed. */
179 l->l_idx = IDX_STILL_USED;
181 /* Mark all dependencies as used. */
182 if (l->l_initfini != NULL)
184 struct link_map **lp = &l->l_initfini[1];
185 while (*lp != NULL)
187 if ((*lp)->l_idx != IDX_STILL_USED)
189 assert ((*lp)->l_idx >= 0 && (*lp)->l_idx < nloaded);
191 if (!used[(*lp)->l_idx])
193 used[(*lp)->l_idx] = 1;
194 if ((*lp)->l_idx - 1 < done_index)
195 done_index = (*lp)->l_idx - 1;
199 ++lp;
202 /* And the same for relocation dependencies. */
203 if (l->l_reldeps != NULL)
204 for (unsigned int j = 0; j < l->l_reldepsact; ++j)
206 struct link_map *jmap = l->l_reldeps[j];
208 if (jmap->l_idx != IDX_STILL_USED)
210 assert (jmap->l_idx >= 0 && jmap->l_idx < nloaded);
212 if (!used[jmap->l_idx])
214 used[jmap->l_idx] = 1;
215 if (jmap->l_idx - 1 < done_index)
216 done_index = jmap->l_idx - 1;
222 /* Sort the entries. */
223 _dl_sort_fini (GL(dl_ns)[ns]._ns_loaded, maps, nloaded, used, ns);
225 /* Call all termination functions at once. */
226 #ifdef SHARED
227 bool do_audit = GLRO(dl_naudit) > 0 && !GL(dl_ns)[ns]._ns_loaded->l_auditing;
228 #endif
229 bool unload_any = false;
230 unsigned int first_loaded = ~0;
231 for (unsigned int i = 0; i < nloaded; ++i)
233 struct link_map *imap = maps[i];
235 /* All elements must be in the same namespace. */
236 assert (imap->l_ns == ns);
238 if (!used[i])
240 assert (imap->l_type == lt_loaded
241 && (imap->l_flags_1 & DF_1_NODELETE) == 0);
243 /* Call its termination function. Do not do it for
244 half-cooked objects. */
245 if (imap->l_init_called)
247 /* When debugging print a message first. */
248 if (__builtin_expect (GLRO(dl_debug_mask) & DL_DEBUG_IMPCALLS,
250 _dl_debug_printf ("\ncalling fini: %s [%lu]\n\n",
251 imap->l_name, ns);
253 if (imap->l_info[DT_FINI_ARRAY] != NULL)
255 ElfW(Addr) *array =
256 (ElfW(Addr) *) (imap->l_addr
257 + imap->l_info[DT_FINI_ARRAY]->d_un.d_ptr);
258 unsigned int sz = (imap->l_info[DT_FINI_ARRAYSZ]->d_un.d_val
259 / sizeof (ElfW(Addr)));
261 while (sz-- > 0)
262 ((fini_t) array[sz]) ();
265 /* Next try the old-style destructor. */
266 if (imap->l_info[DT_FINI] != NULL)
267 (*(void (*) (void)) DL_DT_FINI_ADDRESS
268 (imap, ((void *) imap->l_addr
269 + imap->l_info[DT_FINI]->d_un.d_ptr))) ();
272 #ifdef SHARED
273 /* Auditing checkpoint: we have a new object. */
274 if (__builtin_expect (do_audit, 0))
276 struct audit_ifaces *afct = GLRO(dl_audit);
277 for (unsigned int cnt = 0; cnt < GLRO(dl_naudit); ++cnt)
279 if (afct->objclose != NULL)
280 /* Return value is ignored. */
281 (void) afct->objclose (&imap->l_audit[cnt].cookie);
283 afct = afct->next;
286 #endif
288 /* This object must not be used anymore. */
289 imap->l_removed = 1;
291 /* We indeed have an object to remove. */
292 unload_any = true;
294 /* Remember where the first dynamically loaded object is. */
295 if (i < first_loaded)
296 first_loaded = i;
298 /* Else used[i]. */
299 else if (imap->l_type == lt_loaded)
301 struct r_scope_elem *new_list = NULL;
303 if (imap->l_searchlist.r_list == NULL && imap->l_initfini != NULL)
305 /* The object is still used. But one of the objects we are
306 unloading right now is responsible for loading it. If
307 the current object does not have it's own scope yet we
308 have to create one. This has to be done before running
309 the finalizers.
311 To do this count the number of dependencies. */
312 unsigned int cnt;
313 for (cnt = 1; imap->l_initfini[cnt] != NULL; ++cnt)
316 /* We simply reuse the l_initfini list. */
317 imap->l_searchlist.r_list = &imap->l_initfini[cnt + 1];
318 imap->l_searchlist.r_nlist = cnt;
320 new_list = &imap->l_searchlist;
323 /* Count the number of scopes which remain after the unload.
324 When we add the local search list count it. Always add
325 one for the terminating NULL pointer. */
326 size_t remain = (new_list != NULL) + 1;
327 bool removed_any = false;
328 for (size_t cnt = 0; imap->l_scope[cnt] != NULL; ++cnt)
329 /* This relies on l_scope[] entries being always set either
330 to its own l_symbolic_searchlist address, or some map's
331 l_searchlist address. */
332 if (imap->l_scope[cnt] != &imap->l_symbolic_searchlist)
334 struct link_map *tmap = (struct link_map *)
335 ((char *) imap->l_scope[cnt]
336 - offsetof (struct link_map, l_searchlist));
337 assert (tmap->l_ns == ns);
338 if (tmap->l_idx == IDX_STILL_USED)
339 ++remain;
340 else
341 removed_any = true;
343 else
344 ++remain;
346 if (removed_any)
348 /* Always allocate a new array for the scope. This is
349 necessary since we must be able to determine the last
350 user of the current array. If possible use the link map's
351 memory. */
352 size_t new_size;
353 struct r_scope_elem **newp;
355 #define SCOPE_ELEMS(imap) \
356 (sizeof (imap->l_scope_mem) / sizeof (imap->l_scope_mem[0]))
358 if (imap->l_scope != imap->l_scope_mem
359 && remain < SCOPE_ELEMS (imap))
361 new_size = SCOPE_ELEMS (imap);
362 newp = imap->l_scope_mem;
364 else
366 new_size = imap->l_scope_max;
367 newp = (struct r_scope_elem **)
368 malloc (new_size * sizeof (struct r_scope_elem *));
369 if (newp == NULL)
370 _dl_signal_error (ENOMEM, "dlclose", NULL,
371 N_("cannot create scope list"));
374 /* Copy over the remaining scope elements. */
375 remain = 0;
376 for (size_t cnt = 0; imap->l_scope[cnt] != NULL; ++cnt)
378 if (imap->l_scope[cnt] != &imap->l_symbolic_searchlist)
380 struct link_map *tmap = (struct link_map *)
381 ((char *) imap->l_scope[cnt]
382 - offsetof (struct link_map, l_searchlist));
383 if (tmap->l_idx != IDX_STILL_USED)
385 /* Remove the scope. Or replace with own map's
386 scope. */
387 if (new_list != NULL)
389 newp[remain++] = new_list;
390 new_list = NULL;
392 continue;
396 newp[remain++] = imap->l_scope[cnt];
398 newp[remain] = NULL;
400 struct r_scope_elem **old = imap->l_scope;
402 if (RTLD_SINGLE_THREAD_P)
403 imap->l_scope = newp;
404 else
406 __rtld_mrlock_change (imap->l_scope_lock);
407 imap->l_scope = newp;
408 __rtld_mrlock_done (imap->l_scope_lock);
411 /* No user anymore, we can free it now. */
412 if (old != imap->l_scope_mem)
413 free (old);
415 imap->l_scope_max = new_size;
418 /* The loader is gone, so mark the object as not having one.
419 Note: l_idx != IDX_STILL_USED -> object will be removed. */
420 if (imap->l_loader != NULL
421 && imap->l_loader->l_idx != IDX_STILL_USED)
422 imap->l_loader = NULL;
424 /* Remember where the first dynamically loaded object is. */
425 if (i < first_loaded)
426 first_loaded = i;
430 /* If there are no objects to unload, do nothing further. */
431 if (!unload_any)
432 goto out;
434 #ifdef SHARED
435 /* Auditing checkpoint: we will start deleting objects. */
436 if (__builtin_expect (do_audit, 0))
438 struct link_map *head = GL(dl_ns)[ns]._ns_loaded;
439 struct audit_ifaces *afct = GLRO(dl_audit);
440 /* Do not call the functions for any auditing object. */
441 if (head->l_auditing == 0)
443 for (unsigned int cnt = 0; cnt < GLRO(dl_naudit); ++cnt)
445 if (afct->activity != NULL)
446 afct->activity (&head->l_audit[cnt].cookie, LA_ACT_DELETE);
448 afct = afct->next;
452 #endif
454 /* Notify the debugger we are about to remove some loaded objects. */
455 struct r_debug *r = _dl_debug_initialize (0, ns);
456 r->r_state = RT_DELETE;
457 _dl_debug_state ();
459 size_t tls_free_start;
460 size_t tls_free_end;
461 tls_free_start = tls_free_end = NO_TLS_OFFSET;
463 /* Check each element of the search list to see if all references to
464 it are gone. */
465 for (unsigned int i = first_loaded; i < nloaded; ++i)
467 struct link_map *imap = maps[i];
468 if (!used[i])
470 assert (imap->l_type == lt_loaded);
472 /* That was the last reference, and this was a dlopen-loaded
473 object. We can unmap it. */
474 if (__builtin_expect (imap->l_global, 0))
476 /* This object is in the global scope list. Remove it. */
477 unsigned int cnt = GL(dl_ns)[ns]._ns_main_searchlist->r_nlist;
480 --cnt;
481 while (GL(dl_ns)[ns]._ns_main_searchlist->r_list[cnt] != imap);
483 /* The object was already correctly registered. */
484 while (++cnt
485 < GL(dl_ns)[ns]._ns_main_searchlist->r_nlist)
486 GL(dl_ns)[ns]._ns_main_searchlist->r_list[cnt - 1]
487 = GL(dl_ns)[ns]._ns_main_searchlist->r_list[cnt];
489 --GL(dl_ns)[ns]._ns_main_searchlist->r_nlist;
492 /* Remove the object from the dtv slotinfo array if it uses TLS. */
493 if (__builtin_expect (imap->l_tls_blocksize > 0, 0))
495 any_tls = true;
497 if (GL(dl_tls_dtv_slotinfo_list) != NULL
498 && ! remove_slotinfo (imap->l_tls_modid,
499 GL(dl_tls_dtv_slotinfo_list), 0,
500 imap->l_init_called))
501 /* All dynamically loaded modules with TLS are unloaded. */
502 GL(dl_tls_max_dtv_idx) = GL(dl_tls_static_nelem);
504 if (imap->l_tls_offset != NO_TLS_OFFSET)
506 /* Collect a contiguous chunk built from the objects in
507 this search list, going in either direction. When the
508 whole chunk is at the end of the used area then we can
509 reclaim it. */
510 #if TLS_TCB_AT_TP
511 if (tls_free_start == NO_TLS_OFFSET
512 || (size_t) imap->l_tls_offset == tls_free_start)
514 /* Extend the contiguous chunk being reclaimed. */
515 tls_free_start
516 = imap->l_tls_offset - imap->l_tls_blocksize;
518 if (tls_free_end == NO_TLS_OFFSET)
519 tls_free_end = imap->l_tls_offset;
521 else if (imap->l_tls_offset - imap->l_tls_blocksize
522 == tls_free_end)
523 /* Extend the chunk backwards. */
524 tls_free_end = imap->l_tls_offset;
525 else
527 /* This isn't contiguous with the last chunk freed.
528 One of them will be leaked unless we can free
529 one block right away. */
530 if (tls_free_end == GL(dl_tls_static_used))
532 GL(dl_tls_static_used) = tls_free_start;
533 tls_free_end = imap->l_tls_offset;
534 tls_free_start
535 = tls_free_end - imap->l_tls_blocksize;
537 else if ((size_t) imap->l_tls_offset
538 == GL(dl_tls_static_used))
539 GL(dl_tls_static_used)
540 = imap->l_tls_offset - imap->l_tls_blocksize;
541 else if (tls_free_end < (size_t) imap->l_tls_offset)
543 /* We pick the later block. It has a chance to
544 be freed. */
545 tls_free_end = imap->l_tls_offset;
546 tls_free_start
547 = tls_free_end - imap->l_tls_blocksize;
550 #elif TLS_DTV_AT_TP
551 if ((size_t) imap->l_tls_offset == tls_free_end)
552 /* Extend the contiguous chunk being reclaimed. */
553 tls_free_end -= imap->l_tls_blocksize;
554 else if (imap->l_tls_offset + imap->l_tls_blocksize
555 == tls_free_start)
556 /* Extend the chunk backwards. */
557 tls_free_start = imap->l_tls_offset;
558 else
560 /* This isn't contiguous with the last chunk freed.
561 One of them will be leaked. */
562 if (tls_free_end == GL(dl_tls_static_used))
563 GL(dl_tls_static_used) = tls_free_start;
564 tls_free_start = imap->l_tls_offset;
565 tls_free_end = tls_free_start + imap->l_tls_blocksize;
567 #else
568 # error "Either TLS_TCB_AT_TP or TLS_DTV_AT_TP must be defined"
569 #endif
573 /* We can unmap all the maps at once. We determined the
574 start address and length when we loaded the object and
575 the `munmap' call does the rest. */
576 DL_UNMAP (imap);
578 /* Finally, unlink the data structure and free it. */
579 if (imap->l_prev != NULL)
580 imap->l_prev->l_next = imap->l_next;
581 else
583 #ifdef SHARED
584 assert (ns != LM_ID_BASE);
585 #endif
586 GL(dl_ns)[ns]._ns_loaded = imap->l_next;
589 --GL(dl_ns)[ns]._ns_nloaded;
590 if (imap->l_next != NULL)
591 imap->l_next->l_prev = imap->l_prev;
593 free (imap->l_versions);
594 if (imap->l_origin != (char *) -1)
595 free ((char *) imap->l_origin);
597 free (imap->l_reldeps);
599 /* Print debugging message. */
600 if (__builtin_expect (GLRO(dl_debug_mask) & DL_DEBUG_FILES, 0))
601 _dl_debug_printf ("\nfile=%s [%lu]; destroying link map\n",
602 imap->l_name, imap->l_ns);
604 /* This name always is allocated. */
605 free (imap->l_name);
606 /* Remove the list with all the names of the shared object. */
608 struct libname_list *lnp = imap->l_libname;
611 struct libname_list *this = lnp;
612 lnp = lnp->next;
613 if (!this->dont_free)
614 free (this);
616 while (lnp != NULL);
618 /* Remove the searchlists. */
619 free (imap->l_initfini);
621 /* Remove the scope array if we allocated it. */
622 if (imap->l_scope != imap->l_scope_mem)
623 free (imap->l_scope);
625 if (imap->l_phdr_allocated)
626 free ((void *) imap->l_phdr);
628 if (imap->l_rpath_dirs.dirs != (void *) -1)
629 free (imap->l_rpath_dirs.dirs);
630 if (imap->l_runpath_dirs.dirs != (void *) -1)
631 free (imap->l_runpath_dirs.dirs);
633 free (imap);
637 /* If we removed any object which uses TLS bump the generation counter. */
638 if (any_tls)
640 if (__builtin_expect (++GL(dl_tls_generation) == 0, 0))
641 _dl_fatal_printf ("TLS generation counter wrapped! Please report as described in <http://www.gnu.org/software/libc/bugs.html>.\n");
643 if (tls_free_end == GL(dl_tls_static_used))
644 GL(dl_tls_static_used) = tls_free_start;
647 #ifdef SHARED
648 /* Auditing checkpoint: we have deleted all objects. */
649 if (__builtin_expect (do_audit, 0))
651 struct link_map *head = GL(dl_ns)[ns]._ns_loaded;
652 /* Do not call the functions for any auditing object. */
653 if (head->l_auditing == 0)
655 struct audit_ifaces *afct = GLRO(dl_audit);
656 for (unsigned int cnt = 0; cnt < GLRO(dl_naudit); ++cnt)
658 if (afct->activity != NULL)
659 afct->activity (&head->l_audit[cnt].cookie, LA_ACT_CONSISTENT);
661 afct = afct->next;
665 #endif
667 /* Notify the debugger those objects are finalized and gone. */
668 r->r_state = RT_CONSISTENT;
669 _dl_debug_state ();
671 /* Recheck if we need to retry, release the lock. */
672 out:
673 if (dl_close_state == rerun)
674 goto retry;
676 dl_close_state = not_pending;
680 void
681 _dl_close (void *_map)
683 struct link_map *map = _map;
685 /* First see whether we can remove the object at all. */
686 if (__builtin_expect (map->l_flags_1 & DF_1_NODELETE, 0))
688 assert (map->l_init_called);
689 /* Nope. Do nothing. */
690 return;
693 if (__builtin_expect (map->l_direct_opencount, 1) == 0)
694 GLRO(dl_signal_error) (0, map->l_name, NULL, N_("shared object not open"));
696 /* Acquire the lock. */
697 __rtld_lock_lock_recursive (GL(dl_load_lock));
699 _dl_close_worker (map);
701 __rtld_lock_unlock_recursive (GL(dl_load_lock));
705 static bool __libc_freeres_fn_section
706 free_slotinfo (struct dtv_slotinfo_list **elemp)
708 size_t cnt;
710 if (*elemp == NULL)
711 /* Nothing here, all is removed (or there never was anything). */
712 return true;
714 if (!free_slotinfo (&(*elemp)->next))
715 /* We cannot free the entry. */
716 return false;
718 /* That cleared our next pointer for us. */
720 for (cnt = 0; cnt < (*elemp)->len; ++cnt)
721 if ((*elemp)->slotinfo[cnt].map != NULL)
722 /* Still used. */
723 return false;
725 /* We can remove the list element. */
726 free (*elemp);
727 *elemp = NULL;
729 return true;
733 libc_freeres_fn (free_mem)
735 for (Lmid_t ns = 0; ns < DL_NNS; ++ns)
736 if (__builtin_expect (GL(dl_ns)[ns]._ns_global_scope_alloc, 0) != 0
737 && (GL(dl_ns)[ns]._ns_main_searchlist->r_nlist
738 // XXX Check whether we need NS-specific initial_searchlist
739 == GLRO(dl_initial_searchlist).r_nlist))
741 /* All object dynamically loaded by the program are unloaded. Free
742 the memory allocated for the global scope variable. */
743 struct link_map **old = GL(dl_ns)[ns]._ns_main_searchlist->r_list;
745 /* Put the old map in. */
746 GL(dl_ns)[ns]._ns_main_searchlist->r_list
747 // XXX Check whether we need NS-specific initial_searchlist
748 = GLRO(dl_initial_searchlist).r_list;
749 /* Signal that the original map is used. */
750 GL(dl_ns)[ns]._ns_global_scope_alloc = 0;
752 /* Now free the old map. */
753 free (old);
756 if (USE___THREAD || GL(dl_tls_dtv_slotinfo_list) != NULL)
758 /* Free the memory allocated for the dtv slotinfo array. We can do
759 this only if all modules which used this memory are unloaded. */
760 #ifdef SHARED
761 if (GL(dl_initial_dtv) == NULL)
762 /* There was no initial TLS setup, it was set up later when
763 it used the normal malloc. */
764 free_slotinfo (&GL(dl_tls_dtv_slotinfo_list));
765 else
766 #endif
767 /* The first element of the list does not have to be deallocated.
768 It was allocated in the dynamic linker (i.e., with a different
769 malloc), and in the static library it's in .bss space. */
770 free_slotinfo (&GL(dl_tls_dtv_slotinfo_list)->next);