1 /* Close a shared object opened by `_dl_open'.
2 Copyright (C) 1996-2007, 2009, 2010 Free Software Foundation, Inc.
3 This file is part of the GNU C Library.
5 The GNU C Library is free software; you can redistribute it and/or
6 modify it under the terms of the GNU Lesser General Public
7 License as published by the Free Software Foundation; either
8 version 2.1 of the License, or (at your option) any later version.
10 The GNU C Library is distributed in the hope that it will be useful,
11 but WITHOUT ANY WARRANTY; without even the implied warranty of
12 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 Lesser General Public License for more details.
15 You should have received a copy of the GNU Lesser General Public
16 License along with the GNU C Library; if not, write to the Free
17 Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
29 #include <bits/libc-lock.h>
31 #include <sys/types.h>
33 #include <sysdep-cancel.h>
37 /* Type of the constructor functions. */
38 typedef void (*fini_t
) (void);
41 /* Special l_idx value used to indicate which objects remain loaded. */
42 #define IDX_STILL_USED -1
45 /* Returns true we an non-empty was found. */
47 remove_slotinfo (size_t idx
, struct dtv_slotinfo_list
*listp
, size_t disp
,
50 if (idx
- disp
>= listp
->len
)
52 if (listp
->next
== NULL
)
54 /* The index is not actually valid in the slotinfo list,
55 because this object was closed before it was fully set
56 up due to some error. */
57 assert (! should_be_there
);
61 if (remove_slotinfo (idx
, listp
->next
, disp
+ listp
->len
,
65 /* No non-empty entry. Search from the end of this element's
67 idx
= disp
+ listp
->len
;
72 struct link_map
*old_map
= listp
->slotinfo
[idx
- disp
].map
;
74 /* The entry might still be in its unused state if we are closing an
75 object that wasn't fully set up. */
76 if (__builtin_expect (old_map
!= NULL
, 1))
78 assert (old_map
->l_tls_modid
== idx
);
80 /* Mark the entry as unused. */
81 listp
->slotinfo
[idx
- disp
].gen
= GL(dl_tls_generation
) + 1;
82 listp
->slotinfo
[idx
- disp
].map
= NULL
;
85 /* If this is not the last currently used entry no need to look
87 if (idx
!= GL(dl_tls_max_dtv_idx
))
91 while (idx
- disp
> (disp
== 0 ? 1 + GL(dl_tls_static_nelem
) : 0))
95 if (listp
->slotinfo
[idx
- disp
].map
!= NULL
)
97 /* Found a new last used index. */
98 GL(dl_tls_max_dtv_idx
) = idx
;
103 /* No non-entry in this list element. */
109 _dl_close_worker (struct link_map
*map
)
111 /* One less direct use. */
112 --map
->l_direct_opencount
;
114 /* If _dl_close is called recursively (some destructor call dlclose),
115 just record that the parent _dl_close will need to do garbage collection
117 static enum { not_pending
, pending
, rerun
} dl_close_state
;
119 if (map
->l_direct_opencount
> 0 || map
->l_type
!= lt_loaded
120 || dl_close_state
!= not_pending
)
122 if (map
->l_direct_opencount
== 0 && map
->l_type
== lt_loaded
)
123 dl_close_state
= rerun
;
125 /* There are still references to this object. Do nothing more. */
126 if (__builtin_expect (GLRO(dl_debug_mask
) & DL_DEBUG_FILES
, 0))
127 _dl_debug_printf ("\nclosing file=%s; direct_opencount=%u\n",
128 map
->l_name
, map
->l_direct_opencount
);
133 Lmid_t nsid
= map
->l_ns
;
134 struct link_namespaces
*ns
= &GL(dl_ns
)[nsid
];
137 dl_close_state
= pending
;
139 bool any_tls
= false;
140 const unsigned int nloaded
= ns
->_ns_nloaded
;
143 struct link_map
*maps
[nloaded
];
145 /* Run over the list and assign indexes to the link maps and enter
146 them into the MAPS array. */
148 for (struct link_map
*l
= ns
->_ns_loaded
; l
!= NULL
; l
= l
->l_next
)
154 assert (idx
== nloaded
);
156 /* Prepare the bitmaps. */
157 memset (used
, '\0', sizeof (used
));
158 memset (done
, '\0', sizeof (done
));
160 /* Keep track of the lowest index link map we have covered already. */
162 while (++done_index
< nloaded
)
164 struct link_map
*l
= maps
[done_index
];
166 if (done
[done_index
])
167 /* Already handled. */
170 /* Check whether this object is still used. */
171 if (l
->l_type
== lt_loaded
172 && l
->l_direct_opencount
== 0
173 && (l
->l_flags_1
& DF_1_NODELETE
) == 0
174 && !used
[done_index
])
177 /* We need this object and we handle it now. */
178 done
[done_index
] = 1;
179 used
[done_index
] = 1;
180 /* Signal the object is still needed. */
181 l
->l_idx
= IDX_STILL_USED
;
183 /* Mark all dependencies as used. */
184 if (l
->l_initfini
!= NULL
)
186 struct link_map
**lp
= &l
->l_initfini
[1];
189 if ((*lp
)->l_idx
!= IDX_STILL_USED
)
191 assert ((*lp
)->l_idx
>= 0 && (*lp
)->l_idx
< nloaded
);
193 if (!used
[(*lp
)->l_idx
])
195 used
[(*lp
)->l_idx
] = 1;
196 if ((*lp
)->l_idx
- 1 < done_index
)
197 done_index
= (*lp
)->l_idx
- 1;
204 /* And the same for relocation dependencies. */
205 if (l
->l_reldeps
!= NULL
)
206 for (unsigned int j
= 0; j
< l
->l_reldeps
->act
; ++j
)
208 struct link_map
*jmap
= l
->l_reldeps
->list
[j
];
210 if (jmap
->l_idx
!= IDX_STILL_USED
)
212 assert (jmap
->l_idx
>= 0 && jmap
->l_idx
< nloaded
);
214 if (!used
[jmap
->l_idx
])
216 used
[jmap
->l_idx
] = 1;
217 if (jmap
->l_idx
- 1 < done_index
)
218 done_index
= jmap
->l_idx
- 1;
224 /* Sort the entries. */
225 _dl_sort_fini (ns
->_ns_loaded
, maps
, nloaded
, used
, nsid
);
227 /* Call all termination functions at once. */
229 bool do_audit
= GLRO(dl_naudit
) > 0 && !ns
->_ns_loaded
->l_auditing
;
231 bool unload_any
= false;
232 bool scope_mem_left
= false;
233 unsigned int unload_global
= 0;
234 unsigned int first_loaded
= ~0;
235 for (unsigned int i
= 0; i
< nloaded
; ++i
)
237 struct link_map
*imap
= maps
[i
];
239 /* All elements must be in the same namespace. */
240 assert (imap
->l_ns
== nsid
);
244 assert (imap
->l_type
== lt_loaded
245 && (imap
->l_flags_1
& DF_1_NODELETE
) == 0);
247 /* Call its termination function. Do not do it for
248 half-cooked objects. */
249 if (imap
->l_init_called
)
251 /* When debugging print a message first. */
252 if (__builtin_expect (GLRO(dl_debug_mask
) & DL_DEBUG_IMPCALLS
,
254 _dl_debug_printf ("\ncalling fini: %s [%lu]\n\n",
257 if (imap
->l_info
[DT_FINI_ARRAY
] != NULL
)
260 (ElfW(Addr
) *) (imap
->l_addr
261 + imap
->l_info
[DT_FINI_ARRAY
]->d_un
.d_ptr
);
262 unsigned int sz
= (imap
->l_info
[DT_FINI_ARRAYSZ
]->d_un
.d_val
263 / sizeof (ElfW(Addr
)));
266 ((fini_t
) array
[sz
]) ();
269 /* Next try the old-style destructor. */
270 if (imap
->l_info
[DT_FINI
] != NULL
)
271 (*(void (*) (void)) DL_DT_FINI_ADDRESS
272 (imap
, ((void *) imap
->l_addr
273 + imap
->l_info
[DT_FINI
]->d_un
.d_ptr
))) ();
277 /* Auditing checkpoint: we remove an object. */
278 if (__builtin_expect (do_audit
, 0))
280 struct audit_ifaces
*afct
= GLRO(dl_audit
);
281 for (unsigned int cnt
= 0; cnt
< GLRO(dl_naudit
); ++cnt
)
283 if (afct
->objclose
!= NULL
)
284 /* Return value is ignored. */
285 (void) afct
->objclose (&imap
->l_audit
[cnt
].cookie
);
292 /* This object must not be used anymore. */
295 /* We indeed have an object to remove. */
301 /* Remember where the first dynamically loaded object is. */
302 if (i
< first_loaded
)
306 else if (imap
->l_type
== lt_loaded
)
308 struct r_scope_elem
*new_list
= NULL
;
310 if (imap
->l_searchlist
.r_list
== NULL
&& imap
->l_initfini
!= NULL
)
312 /* The object is still used. But one of the objects we are
313 unloading right now is responsible for loading it. If
314 the current object does not have it's own scope yet we
315 have to create one. This has to be done before running
318 To do this count the number of dependencies. */
320 for (cnt
= 1; imap
->l_initfini
[cnt
] != NULL
; ++cnt
)
323 /* We simply reuse the l_initfini list. */
324 imap
->l_searchlist
.r_list
= &imap
->l_initfini
[cnt
+ 1];
325 imap
->l_searchlist
.r_nlist
= cnt
;
327 new_list
= &imap
->l_searchlist
;
330 /* Count the number of scopes which remain after the unload.
331 When we add the local search list count it. Always add
332 one for the terminating NULL pointer. */
333 size_t remain
= (new_list
!= NULL
) + 1;
334 bool removed_any
= false;
335 for (size_t cnt
= 0; imap
->l_scope
[cnt
] != NULL
; ++cnt
)
336 /* This relies on l_scope[] entries being always set either
337 to its own l_symbolic_searchlist address, or some map's
338 l_searchlist address. */
339 if (imap
->l_scope
[cnt
] != &imap
->l_symbolic_searchlist
)
341 struct link_map
*tmap
= (struct link_map
*)
342 ((char *) imap
->l_scope
[cnt
]
343 - offsetof (struct link_map
, l_searchlist
));
344 assert (tmap
->l_ns
== nsid
);
345 if (tmap
->l_idx
== IDX_STILL_USED
)
355 /* Always allocate a new array for the scope. This is
356 necessary since we must be able to determine the last
357 user of the current array. If possible use the link map's
360 struct r_scope_elem
**newp
;
362 #define SCOPE_ELEMS(imap) \
363 (sizeof (imap->l_scope_mem) / sizeof (imap->l_scope_mem[0]))
365 if (imap
->l_scope
!= imap
->l_scope_mem
366 && remain
< SCOPE_ELEMS (imap
))
368 new_size
= SCOPE_ELEMS (imap
);
369 newp
= imap
->l_scope_mem
;
373 new_size
= imap
->l_scope_max
;
374 newp
= (struct r_scope_elem
**)
375 malloc (new_size
* sizeof (struct r_scope_elem
*));
377 _dl_signal_error (ENOMEM
, "dlclose", NULL
,
378 N_("cannot create scope list"));
381 /* Copy over the remaining scope elements. */
383 for (size_t cnt
= 0; imap
->l_scope
[cnt
] != NULL
; ++cnt
)
385 if (imap
->l_scope
[cnt
] != &imap
->l_symbolic_searchlist
)
387 struct link_map
*tmap
= (struct link_map
*)
388 ((char *) imap
->l_scope
[cnt
]
389 - offsetof (struct link_map
, l_searchlist
));
390 if (tmap
->l_idx
!= IDX_STILL_USED
)
392 /* Remove the scope. Or replace with own map's
394 if (new_list
!= NULL
)
396 newp
[remain
++] = new_list
;
403 newp
[remain
++] = imap
->l_scope
[cnt
];
407 struct r_scope_elem
**old
= imap
->l_scope
;
409 imap
->l_scope
= newp
;
411 /* No user anymore, we can free it now. */
412 if (old
!= imap
->l_scope_mem
)
414 if (_dl_scope_free (old
))
415 /* If _dl_scope_free used THREAD_GSCOPE_WAIT (),
416 no need to repeat it. */
417 scope_mem_left
= false;
420 scope_mem_left
= true;
422 imap
->l_scope_max
= new_size
;
424 else if (new_list
!= NULL
)
426 /* We didn't change the scope array, so reset the search
428 imap
->l_searchlist
.r_list
= NULL
;
429 imap
->l_searchlist
.r_nlist
= 0;
432 /* The loader is gone, so mark the object as not having one.
433 Note: l_idx != IDX_STILL_USED -> object will be removed. */
434 if (imap
->l_loader
!= NULL
435 && imap
->l_loader
->l_idx
!= IDX_STILL_USED
)
436 imap
->l_loader
= NULL
;
438 /* Remember where the first dynamically loaded object is. */
439 if (i
< first_loaded
)
444 /* If there are no objects to unload, do nothing further. */
449 /* Auditing checkpoint: we will start deleting objects. */
450 if (__builtin_expect (do_audit
, 0))
452 struct link_map
*head
= ns
->_ns_loaded
;
453 struct audit_ifaces
*afct
= GLRO(dl_audit
);
454 /* Do not call the functions for any auditing object. */
455 if (head
->l_auditing
== 0)
457 for (unsigned int cnt
= 0; cnt
< GLRO(dl_naudit
); ++cnt
)
459 if (afct
->activity
!= NULL
)
460 afct
->activity (&head
->l_audit
[cnt
].cookie
, LA_ACT_DELETE
);
468 /* Notify the debugger we are about to remove some loaded objects. */
469 struct r_debug
*r
= _dl_debug_initialize (0, nsid
);
470 r
->r_state
= RT_DELETE
;
475 /* Some objects are in the global scope list. Remove them. */
476 struct r_scope_elem
*ns_msl
= ns
->_ns_main_searchlist
;
479 unsigned int cnt
= ns_msl
->r_nlist
;
481 while (cnt
> 0 && ns_msl
->r_list
[cnt
- 1]->l_removed
)
484 if (cnt
+ unload_global
== ns_msl
->r_nlist
)
485 /* Speed up removing most recently added objects. */
488 for (i
= 0; i
< cnt
; i
++)
489 if (ns_msl
->r_list
[i
]->l_removed
== 0)
492 ns_msl
->r_list
[j
] = ns_msl
->r_list
[i
];
498 if (!RTLD_SINGLE_THREAD_P
501 || (GL(dl_scope_free_list
) != NULL
502 && GL(dl_scope_free_list
)->count
)))
504 THREAD_GSCOPE_WAIT ();
506 /* Now we can free any queued old scopes. */
507 struct dl_scope_free_list
*fsl
= GL(dl_scope_free_list
);
509 while (fsl
->count
> 0)
510 free (fsl
->list
[--fsl
->count
]);
513 size_t tls_free_start
;
515 tls_free_start
= tls_free_end
= NO_TLS_OFFSET
;
517 /* We modify the list of loaded objects. */
518 __rtld_lock_lock_recursive (GL(dl_load_write_lock
));
520 /* Check each element of the search list to see if all references to
522 for (unsigned int i
= first_loaded
; i
< nloaded
; ++i
)
524 struct link_map
*imap
= maps
[i
];
527 assert (imap
->l_type
== lt_loaded
);
529 /* That was the last reference, and this was a dlopen-loaded
530 object. We can unmap it. */
532 /* Remove the object from the dtv slotinfo array if it uses TLS. */
533 if (__builtin_expect (imap
->l_tls_blocksize
> 0, 0))
537 if (GL(dl_tls_dtv_slotinfo_list
) != NULL
538 && ! remove_slotinfo (imap
->l_tls_modid
,
539 GL(dl_tls_dtv_slotinfo_list
), 0,
540 imap
->l_init_called
))
541 /* All dynamically loaded modules with TLS are unloaded. */
542 GL(dl_tls_max_dtv_idx
) = GL(dl_tls_static_nelem
);
544 if (imap
->l_tls_offset
!= NO_TLS_OFFSET
545 && imap
->l_tls_offset
!= FORCED_DYNAMIC_TLS_OFFSET
)
547 /* Collect a contiguous chunk built from the objects in
548 this search list, going in either direction. When the
549 whole chunk is at the end of the used area then we can
552 if (tls_free_start
== NO_TLS_OFFSET
553 || (size_t) imap
->l_tls_offset
== tls_free_start
)
555 /* Extend the contiguous chunk being reclaimed. */
557 = imap
->l_tls_offset
- imap
->l_tls_blocksize
;
559 if (tls_free_end
== NO_TLS_OFFSET
)
560 tls_free_end
= imap
->l_tls_offset
;
562 else if (imap
->l_tls_offset
- imap
->l_tls_blocksize
564 /* Extend the chunk backwards. */
565 tls_free_end
= imap
->l_tls_offset
;
568 /* This isn't contiguous with the last chunk freed.
569 One of them will be leaked unless we can free
570 one block right away. */
571 if (tls_free_end
== GL(dl_tls_static_used
))
573 GL(dl_tls_static_used
) = tls_free_start
;
574 tls_free_end
= imap
->l_tls_offset
;
576 = tls_free_end
- imap
->l_tls_blocksize
;
578 else if ((size_t) imap
->l_tls_offset
579 == GL(dl_tls_static_used
))
580 GL(dl_tls_static_used
)
581 = imap
->l_tls_offset
- imap
->l_tls_blocksize
;
582 else if (tls_free_end
< (size_t) imap
->l_tls_offset
)
584 /* We pick the later block. It has a chance to
586 tls_free_end
= imap
->l_tls_offset
;
588 = tls_free_end
- imap
->l_tls_blocksize
;
592 if ((size_t) imap
->l_tls_offset
== tls_free_end
)
593 /* Extend the contiguous chunk being reclaimed. */
594 tls_free_end
-= imap
->l_tls_blocksize
;
595 else if (imap
->l_tls_offset
+ imap
->l_tls_blocksize
597 /* Extend the chunk backwards. */
598 tls_free_start
= imap
->l_tls_offset
;
601 /* This isn't contiguous with the last chunk freed.
602 One of them will be leaked. */
603 if (tls_free_end
== GL(dl_tls_static_used
))
604 GL(dl_tls_static_used
) = tls_free_start
;
605 tls_free_start
= imap
->l_tls_offset
;
606 tls_free_end
= tls_free_start
+ imap
->l_tls_blocksize
;
609 # error "Either TLS_TCB_AT_TP or TLS_DTV_AT_TP must be defined"
614 /* We can unmap all the maps at once. We determined the
615 start address and length when we loaded the object and
616 the `munmap' call does the rest. */
619 /* Finally, unlink the data structure and free it. */
620 if (imap
->l_prev
!= NULL
)
621 imap
->l_prev
->l_next
= imap
->l_next
;
625 assert (nsid
!= LM_ID_BASE
);
627 ns
->_ns_loaded
= imap
->l_next
;
631 if (imap
->l_next
!= NULL
)
632 imap
->l_next
->l_prev
= imap
->l_prev
;
634 free (imap
->l_versions
);
635 if (imap
->l_origin
!= (char *) -1)
636 free ((char *) imap
->l_origin
);
638 free (imap
->l_reldeps
);
640 /* Print debugging message. */
641 if (__builtin_expect (GLRO(dl_debug_mask
) & DL_DEBUG_FILES
, 0))
642 _dl_debug_printf ("\nfile=%s [%lu]; destroying link map\n",
643 imap
->l_name
, imap
->l_ns
);
645 /* This name always is allocated. */
647 /* Remove the list with all the names of the shared object. */
649 struct libname_list
*lnp
= imap
->l_libname
;
652 struct libname_list
*this = lnp
;
654 if (!this->dont_free
)
659 /* Remove the searchlists. */
660 free (imap
->l_initfini
);
662 /* Remove the scope array if we allocated it. */
663 if (imap
->l_scope
!= imap
->l_scope_mem
)
664 free (imap
->l_scope
);
666 if (imap
->l_phdr_allocated
)
667 free ((void *) imap
->l_phdr
);
669 if (imap
->l_rpath_dirs
.dirs
!= (void *) -1)
670 free (imap
->l_rpath_dirs
.dirs
);
671 if (imap
->l_runpath_dirs
.dirs
!= (void *) -1)
672 free (imap
->l_runpath_dirs
.dirs
);
678 __rtld_lock_unlock_recursive (GL(dl_load_write_lock
));
680 /* If we removed any object which uses TLS bump the generation counter. */
683 if (__builtin_expect (++GL(dl_tls_generation
) == 0, 0))
684 _dl_fatal_printf ("TLS generation counter wrapped! Please report as described in <http://www.gnu.org/software/libc/bugs.html>.\n");
686 if (tls_free_end
== GL(dl_tls_static_used
))
687 GL(dl_tls_static_used
) = tls_free_start
;
691 /* Auditing checkpoint: we have deleted all objects. */
692 if (__builtin_expect (do_audit
, 0))
694 struct link_map
*head
= ns
->_ns_loaded
;
695 /* Do not call the functions for any auditing object. */
696 if (head
->l_auditing
== 0)
698 struct audit_ifaces
*afct
= GLRO(dl_audit
);
699 for (unsigned int cnt
= 0; cnt
< GLRO(dl_naudit
); ++cnt
)
701 if (afct
->activity
!= NULL
)
702 afct
->activity (&head
->l_audit
[cnt
].cookie
, LA_ACT_CONSISTENT
);
710 if (__builtin_expect (ns
->_ns_loaded
== NULL
, 0)
711 && nsid
== GL(dl_nns
) - 1)
720 while (GL(dl_ns
)[GL(dl_nns
) - 1]._ns_loaded
== NULL
);
722 /* Notify the debugger those objects are finalized and gone. */
723 r
->r_state
= RT_CONSISTENT
;
726 /* Recheck if we need to retry, release the lock. */
728 if (dl_close_state
== rerun
)
731 dl_close_state
= not_pending
;
736 _dl_close (void *_map
)
738 struct link_map
*map
= _map
;
740 /* First see whether we can remove the object at all. */
741 if (__builtin_expect (map
->l_flags_1
& DF_1_NODELETE
, 0))
743 assert (map
->l_init_called
);
744 /* Nope. Do nothing. */
748 if (__builtin_expect (map
->l_direct_opencount
, 1) == 0)
749 GLRO(dl_signal_error
) (0, map
->l_name
, NULL
, N_("shared object not open"));
751 /* Acquire the lock. */
752 __rtld_lock_lock_recursive (GL(dl_load_lock
));
754 _dl_close_worker (map
);
756 __rtld_lock_unlock_recursive (GL(dl_load_lock
));
760 static bool __libc_freeres_fn_section
761 free_slotinfo (struct dtv_slotinfo_list
**elemp
)
766 /* Nothing here, all is removed (or there never was anything). */
769 if (!free_slotinfo (&(*elemp
)->next
))
770 /* We cannot free the entry. */
773 /* That cleared our next pointer for us. */
775 for (cnt
= 0; cnt
< (*elemp
)->len
; ++cnt
)
776 if ((*elemp
)->slotinfo
[cnt
].map
!= NULL
)
780 /* We can remove the list element. */
788 libc_freeres_fn (free_mem
)
790 for (Lmid_t nsid
= 0; nsid
< GL(dl_nns
); ++nsid
)
791 if (__builtin_expect (GL(dl_ns
)[nsid
]._ns_global_scope_alloc
, 0) != 0
792 && (GL(dl_ns
)[nsid
]._ns_main_searchlist
->r_nlist
793 // XXX Check whether we need NS-specific initial_searchlist
794 == GLRO(dl_initial_searchlist
).r_nlist
))
796 /* All object dynamically loaded by the program are unloaded. Free
797 the memory allocated for the global scope variable. */
798 struct link_map
**old
= GL(dl_ns
)[nsid
]._ns_main_searchlist
->r_list
;
800 /* Put the old map in. */
801 GL(dl_ns
)[nsid
]._ns_main_searchlist
->r_list
802 // XXX Check whether we need NS-specific initial_searchlist
803 = GLRO(dl_initial_searchlist
).r_list
;
804 /* Signal that the original map is used. */
805 GL(dl_ns
)[nsid
]._ns_global_scope_alloc
= 0;
807 /* Now free the old map. */
811 if (USE___THREAD
|| GL(dl_tls_dtv_slotinfo_list
) != NULL
)
813 /* Free the memory allocated for the dtv slotinfo array. We can do
814 this only if all modules which used this memory are unloaded. */
816 if (GL(dl_initial_dtv
) == NULL
)
817 /* There was no initial TLS setup, it was set up later when
818 it used the normal malloc. */
819 free_slotinfo (&GL(dl_tls_dtv_slotinfo_list
));
822 /* The first element of the list does not have to be deallocated.
823 It was allocated in the dynamic linker (i.e., with a different
824 malloc), and in the static library it's in .bss space. */
825 free_slotinfo (&GL(dl_tls_dtv_slotinfo_list
)->next
);
828 void *scope_free_list
= GL(dl_scope_free_list
);
829 GL(dl_scope_free_list
) = NULL
;
830 free (scope_free_list
);