Don't use NULL in last s_fma{,f} change
[glibc.git] / elf / dl-close.c
blob8fb55d0fbcd9cefa512a8d67c487a96546fcde0b
1 /* Close a shared object opened by `_dl_open'.
2 Copyright (C) 1996-2007, 2009, 2010, 2011 Free Software Foundation, Inc.
3 This file is part of the GNU C Library.
5 The GNU C Library is free software; you can redistribute it and/or
6 modify it under the terms of the GNU Lesser General Public
7 License as published by the Free Software Foundation; either
8 version 2.1 of the License, or (at your option) any later version.
10 The GNU C Library is distributed in the hope that it will be useful,
11 but WITHOUT ANY WARRANTY; without even the implied warranty of
12 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 Lesser General Public License for more details.
15 You should have received a copy of the GNU Lesser General Public
16 License along with the GNU C Library; if not, write to the Free
17 Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
18 02111-1307 USA. */
20 #include <assert.h>
21 #include <dlfcn.h>
22 #include <errno.h>
23 #include <libintl.h>
24 #include <stddef.h>
25 #include <stdio.h>
26 #include <stdlib.h>
27 #include <string.h>
28 #include <unistd.h>
29 #include <bits/libc-lock.h>
30 #include <ldsodefs.h>
31 #include <sys/types.h>
32 #include <sys/mman.h>
33 #include <sysdep-cancel.h>
34 #include <tls.h>
37 /* Type of the constructor functions. */
38 typedef void (*fini_t) (void);
41 /* Special l_idx value used to indicate which objects remain loaded. */
42 #define IDX_STILL_USED -1
45 /* Returns true we an non-empty was found. */
46 static bool
47 remove_slotinfo (size_t idx, struct dtv_slotinfo_list *listp, size_t disp,
48 bool should_be_there)
50 if (idx - disp >= listp->len)
52 if (listp->next == NULL)
54 /* The index is not actually valid in the slotinfo list,
55 because this object was closed before it was fully set
56 up due to some error. */
57 assert (! should_be_there);
59 else
61 if (remove_slotinfo (idx, listp->next, disp + listp->len,
62 should_be_there))
63 return true;
65 /* No non-empty entry. Search from the end of this element's
66 slotinfo array. */
67 idx = disp + listp->len;
70 else
72 struct link_map *old_map = listp->slotinfo[idx - disp].map;
74 /* The entry might still be in its unused state if we are closing an
75 object that wasn't fully set up. */
76 if (__builtin_expect (old_map != NULL, 1))
78 assert (old_map->l_tls_modid == idx);
80 /* Mark the entry as unused. */
81 listp->slotinfo[idx - disp].gen = GL(dl_tls_generation) + 1;
82 listp->slotinfo[idx - disp].map = NULL;
85 /* If this is not the last currently used entry no need to look
86 further. */
87 if (idx != GL(dl_tls_max_dtv_idx))
88 return true;
91 while (idx - disp > (disp == 0 ? 1 + GL(dl_tls_static_nelem) : 0))
93 --idx;
95 if (listp->slotinfo[idx - disp].map != NULL)
97 /* Found a new last used index. */
98 GL(dl_tls_max_dtv_idx) = idx;
99 return true;
103 /* No non-entry in this list element. */
104 return false;
108 void
109 _dl_close_worker (struct link_map *map)
111 /* One less direct use. */
112 --map->l_direct_opencount;
114 /* If _dl_close is called recursively (some destructor call dlclose),
115 just record that the parent _dl_close will need to do garbage collection
116 again and return. */
117 static enum { not_pending, pending, rerun } dl_close_state;
119 if (map->l_direct_opencount > 0 || map->l_type != lt_loaded
120 || dl_close_state != not_pending)
122 if (map->l_direct_opencount == 0)
124 if (map->l_type == lt_loaded)
125 dl_close_state = rerun;
126 else if (map->l_type == lt_library)
128 struct link_map **oldp = map->l_initfini;
129 map->l_initfini = map->l_orig_initfini;
130 _dl_scope_free (oldp);
134 /* There are still references to this object. Do nothing more. */
135 if (__builtin_expect (GLRO(dl_debug_mask) & DL_DEBUG_FILES, 0))
136 _dl_debug_printf ("\nclosing file=%s; direct_opencount=%u\n",
137 map->l_name, map->l_direct_opencount);
139 return;
142 Lmid_t nsid = map->l_ns;
143 struct link_namespaces *ns = &GL(dl_ns)[nsid];
145 retry:
146 dl_close_state = pending;
148 bool any_tls = false;
149 const unsigned int nloaded = ns->_ns_nloaded;
150 char used[nloaded];
151 char done[nloaded];
152 struct link_map *maps[nloaded];
154 /* Run over the list and assign indexes to the link maps and enter
155 them into the MAPS array. */
156 int idx = 0;
157 for (struct link_map *l = ns->_ns_loaded; l != NULL; l = l->l_next)
159 l->l_idx = idx;
160 maps[idx] = l;
161 ++idx;
163 assert (idx == nloaded);
165 /* Prepare the bitmaps. */
166 memset (used, '\0', sizeof (used));
167 memset (done, '\0', sizeof (done));
169 /* Keep track of the lowest index link map we have covered already. */
170 int done_index = -1;
171 while (++done_index < nloaded)
173 struct link_map *l = maps[done_index];
175 if (done[done_index])
176 /* Already handled. */
177 continue;
179 /* Check whether this object is still used. */
180 if (l->l_type == lt_loaded
181 && l->l_direct_opencount == 0
182 && (l->l_flags_1 & DF_1_NODELETE) == 0
183 && !used[done_index])
184 continue;
186 /* We need this object and we handle it now. */
187 done[done_index] = 1;
188 used[done_index] = 1;
189 /* Signal the object is still needed. */
190 l->l_idx = IDX_STILL_USED;
192 /* Mark all dependencies as used. */
193 if (l->l_initfini != NULL)
195 struct link_map **lp = &l->l_initfini[1];
196 while (*lp != NULL)
198 if ((*lp)->l_idx != IDX_STILL_USED)
200 assert ((*lp)->l_idx >= 0 && (*lp)->l_idx < nloaded);
202 if (!used[(*lp)->l_idx])
204 used[(*lp)->l_idx] = 1;
205 if ((*lp)->l_idx - 1 < done_index)
206 done_index = (*lp)->l_idx - 1;
210 ++lp;
213 /* And the same for relocation dependencies. */
214 if (l->l_reldeps != NULL)
215 for (unsigned int j = 0; j < l->l_reldeps->act; ++j)
217 struct link_map *jmap = l->l_reldeps->list[j];
219 if (jmap->l_idx != IDX_STILL_USED)
221 assert (jmap->l_idx >= 0 && jmap->l_idx < nloaded);
223 if (!used[jmap->l_idx])
225 used[jmap->l_idx] = 1;
226 if (jmap->l_idx - 1 < done_index)
227 done_index = jmap->l_idx - 1;
233 /* Sort the entries. */
234 _dl_sort_fini (maps, nloaded, used, nsid);
236 /* Call all termination functions at once. */
237 #ifdef SHARED
238 bool do_audit = GLRO(dl_naudit) > 0 && !ns->_ns_loaded->l_auditing;
239 #endif
240 bool unload_any = false;
241 bool scope_mem_left = false;
242 unsigned int unload_global = 0;
243 unsigned int first_loaded = ~0;
244 for (unsigned int i = 0; i < nloaded; ++i)
246 struct link_map *imap = maps[i];
248 /* All elements must be in the same namespace. */
249 assert (imap->l_ns == nsid);
251 if (!used[i])
253 assert (imap->l_type == lt_loaded
254 && (imap->l_flags_1 & DF_1_NODELETE) == 0);
256 /* Call its termination function. Do not do it for
257 half-cooked objects. */
258 if (imap->l_init_called)
260 /* When debugging print a message first. */
261 if (__builtin_expect (GLRO(dl_debug_mask) & DL_DEBUG_IMPCALLS,
263 _dl_debug_printf ("\ncalling fini: %s [%lu]\n\n",
264 imap->l_name, nsid);
266 if (imap->l_info[DT_FINI_ARRAY] != NULL)
268 ElfW(Addr) *array =
269 (ElfW(Addr) *) (imap->l_addr
270 + imap->l_info[DT_FINI_ARRAY]->d_un.d_ptr);
271 unsigned int sz = (imap->l_info[DT_FINI_ARRAYSZ]->d_un.d_val
272 / sizeof (ElfW(Addr)));
274 while (sz-- > 0)
275 ((fini_t) array[sz]) ();
278 /* Next try the old-style destructor. */
279 if (imap->l_info[DT_FINI] != NULL)
280 (*(void (*) (void)) DL_DT_FINI_ADDRESS
281 (imap, ((void *) imap->l_addr
282 + imap->l_info[DT_FINI]->d_un.d_ptr))) ();
285 #ifdef SHARED
286 /* Auditing checkpoint: we remove an object. */
287 if (__builtin_expect (do_audit, 0))
289 struct audit_ifaces *afct = GLRO(dl_audit);
290 for (unsigned int cnt = 0; cnt < GLRO(dl_naudit); ++cnt)
292 if (afct->objclose != NULL)
293 /* Return value is ignored. */
294 (void) afct->objclose (&imap->l_audit[cnt].cookie);
296 afct = afct->next;
299 #endif
301 /* This object must not be used anymore. */
302 imap->l_removed = 1;
304 /* We indeed have an object to remove. */
305 unload_any = true;
307 if (imap->l_global)
308 ++unload_global;
310 /* Remember where the first dynamically loaded object is. */
311 if (i < first_loaded)
312 first_loaded = i;
314 /* Else used[i]. */
315 else if (imap->l_type == lt_loaded)
317 struct r_scope_elem *new_list = NULL;
319 if (imap->l_searchlist.r_list == NULL && imap->l_initfini != NULL)
321 /* The object is still used. But one of the objects we are
322 unloading right now is responsible for loading it. If
323 the current object does not have it's own scope yet we
324 have to create one. This has to be done before running
325 the finalizers.
327 To do this count the number of dependencies. */
328 unsigned int cnt;
329 for (cnt = 1; imap->l_initfini[cnt] != NULL; ++cnt)
332 /* We simply reuse the l_initfini list. */
333 imap->l_searchlist.r_list = &imap->l_initfini[cnt + 1];
334 imap->l_searchlist.r_nlist = cnt;
336 new_list = &imap->l_searchlist;
339 /* Count the number of scopes which remain after the unload.
340 When we add the local search list count it. Always add
341 one for the terminating NULL pointer. */
342 size_t remain = (new_list != NULL) + 1;
343 bool removed_any = false;
344 for (size_t cnt = 0; imap->l_scope[cnt] != NULL; ++cnt)
345 /* This relies on l_scope[] entries being always set either
346 to its own l_symbolic_searchlist address, or some map's
347 l_searchlist address. */
348 if (imap->l_scope[cnt] != &imap->l_symbolic_searchlist)
350 struct link_map *tmap = (struct link_map *)
351 ((char *) imap->l_scope[cnt]
352 - offsetof (struct link_map, l_searchlist));
353 assert (tmap->l_ns == nsid);
354 if (tmap->l_idx == IDX_STILL_USED)
355 ++remain;
356 else
357 removed_any = true;
359 else
360 ++remain;
362 if (removed_any)
364 /* Always allocate a new array for the scope. This is
365 necessary since we must be able to determine the last
366 user of the current array. If possible use the link map's
367 memory. */
368 size_t new_size;
369 struct r_scope_elem **newp;
371 #define SCOPE_ELEMS(imap) \
372 (sizeof (imap->l_scope_mem) / sizeof (imap->l_scope_mem[0]))
374 if (imap->l_scope != imap->l_scope_mem
375 && remain < SCOPE_ELEMS (imap))
377 new_size = SCOPE_ELEMS (imap);
378 newp = imap->l_scope_mem;
380 else
382 new_size = imap->l_scope_max;
383 newp = (struct r_scope_elem **)
384 malloc (new_size * sizeof (struct r_scope_elem *));
385 if (newp == NULL)
386 _dl_signal_error (ENOMEM, "dlclose", NULL,
387 N_("cannot create scope list"));
390 /* Copy over the remaining scope elements. */
391 remain = 0;
392 for (size_t cnt = 0; imap->l_scope[cnt] != NULL; ++cnt)
394 if (imap->l_scope[cnt] != &imap->l_symbolic_searchlist)
396 struct link_map *tmap = (struct link_map *)
397 ((char *) imap->l_scope[cnt]
398 - offsetof (struct link_map, l_searchlist));
399 if (tmap->l_idx != IDX_STILL_USED)
401 /* Remove the scope. Or replace with own map's
402 scope. */
403 if (new_list != NULL)
405 newp[remain++] = new_list;
406 new_list = NULL;
408 continue;
412 newp[remain++] = imap->l_scope[cnt];
414 newp[remain] = NULL;
416 struct r_scope_elem **old = imap->l_scope;
418 imap->l_scope = newp;
420 /* No user anymore, we can free it now. */
421 if (old != imap->l_scope_mem)
423 if (_dl_scope_free (old))
424 /* If _dl_scope_free used THREAD_GSCOPE_WAIT (),
425 no need to repeat it. */
426 scope_mem_left = false;
428 else
429 scope_mem_left = true;
431 imap->l_scope_max = new_size;
433 else if (new_list != NULL)
435 /* We didn't change the scope array, so reset the search
436 list. */
437 imap->l_searchlist.r_list = NULL;
438 imap->l_searchlist.r_nlist = 0;
441 /* The loader is gone, so mark the object as not having one.
442 Note: l_idx != IDX_STILL_USED -> object will be removed. */
443 if (imap->l_loader != NULL
444 && imap->l_loader->l_idx != IDX_STILL_USED)
445 imap->l_loader = NULL;
447 /* Remember where the first dynamically loaded object is. */
448 if (i < first_loaded)
449 first_loaded = i;
453 /* If there are no objects to unload, do nothing further. */
454 if (!unload_any)
455 goto out;
457 #ifdef SHARED
458 /* Auditing checkpoint: we will start deleting objects. */
459 if (__builtin_expect (do_audit, 0))
461 struct link_map *head = ns->_ns_loaded;
462 struct audit_ifaces *afct = GLRO(dl_audit);
463 /* Do not call the functions for any auditing object. */
464 if (head->l_auditing == 0)
466 for (unsigned int cnt = 0; cnt < GLRO(dl_naudit); ++cnt)
468 if (afct->activity != NULL)
469 afct->activity (&head->l_audit[cnt].cookie, LA_ACT_DELETE);
471 afct = afct->next;
475 #endif
477 /* Notify the debugger we are about to remove some loaded objects. */
478 struct r_debug *r = _dl_debug_initialize (0, nsid);
479 r->r_state = RT_DELETE;
480 _dl_debug_state ();
482 if (unload_global)
484 /* Some objects are in the global scope list. Remove them. */
485 struct r_scope_elem *ns_msl = ns->_ns_main_searchlist;
486 unsigned int i;
487 unsigned int j = 0;
488 unsigned int cnt = ns_msl->r_nlist;
490 while (cnt > 0 && ns_msl->r_list[cnt - 1]->l_removed)
491 --cnt;
493 if (cnt + unload_global == ns_msl->r_nlist)
494 /* Speed up removing most recently added objects. */
495 j = cnt;
496 else
497 for (i = 0; i < cnt; i++)
498 if (ns_msl->r_list[i]->l_removed == 0)
500 if (i != j)
501 ns_msl->r_list[j] = ns_msl->r_list[i];
502 j++;
504 ns_msl->r_nlist = j;
507 if (!RTLD_SINGLE_THREAD_P
508 && (unload_global
509 || scope_mem_left
510 || (GL(dl_scope_free_list) != NULL
511 && GL(dl_scope_free_list)->count)))
513 THREAD_GSCOPE_WAIT ();
515 /* Now we can free any queued old scopes. */
516 struct dl_scope_free_list *fsl = GL(dl_scope_free_list);
517 if (fsl != NULL)
518 while (fsl->count > 0)
519 free (fsl->list[--fsl->count]);
522 size_t tls_free_start;
523 size_t tls_free_end;
524 tls_free_start = tls_free_end = NO_TLS_OFFSET;
526 /* We modify the list of loaded objects. */
527 __rtld_lock_lock_recursive (GL(dl_load_write_lock));
529 /* Check each element of the search list to see if all references to
530 it are gone. */
531 for (unsigned int i = first_loaded; i < nloaded; ++i)
533 struct link_map *imap = maps[i];
534 if (!used[i])
536 assert (imap->l_type == lt_loaded);
538 /* That was the last reference, and this was a dlopen-loaded
539 object. We can unmap it. */
541 /* Remove the object from the dtv slotinfo array if it uses TLS. */
542 if (__builtin_expect (imap->l_tls_blocksize > 0, 0))
544 any_tls = true;
546 if (GL(dl_tls_dtv_slotinfo_list) != NULL
547 && ! remove_slotinfo (imap->l_tls_modid,
548 GL(dl_tls_dtv_slotinfo_list), 0,
549 imap->l_init_called))
550 /* All dynamically loaded modules with TLS are unloaded. */
551 GL(dl_tls_max_dtv_idx) = GL(dl_tls_static_nelem);
553 if (imap->l_tls_offset != NO_TLS_OFFSET
554 && imap->l_tls_offset != FORCED_DYNAMIC_TLS_OFFSET)
556 /* Collect a contiguous chunk built from the objects in
557 this search list, going in either direction. When the
558 whole chunk is at the end of the used area then we can
559 reclaim it. */
560 #if TLS_TCB_AT_TP
561 if (tls_free_start == NO_TLS_OFFSET
562 || (size_t) imap->l_tls_offset == tls_free_start)
564 /* Extend the contiguous chunk being reclaimed. */
565 tls_free_start
566 = imap->l_tls_offset - imap->l_tls_blocksize;
568 if (tls_free_end == NO_TLS_OFFSET)
569 tls_free_end = imap->l_tls_offset;
571 else if (imap->l_tls_offset - imap->l_tls_blocksize
572 == tls_free_end)
573 /* Extend the chunk backwards. */
574 tls_free_end = imap->l_tls_offset;
575 else
577 /* This isn't contiguous with the last chunk freed.
578 One of them will be leaked unless we can free
579 one block right away. */
580 if (tls_free_end == GL(dl_tls_static_used))
582 GL(dl_tls_static_used) = tls_free_start;
583 tls_free_end = imap->l_tls_offset;
584 tls_free_start
585 = tls_free_end - imap->l_tls_blocksize;
587 else if ((size_t) imap->l_tls_offset
588 == GL(dl_tls_static_used))
589 GL(dl_tls_static_used)
590 = imap->l_tls_offset - imap->l_tls_blocksize;
591 else if (tls_free_end < (size_t) imap->l_tls_offset)
593 /* We pick the later block. It has a chance to
594 be freed. */
595 tls_free_end = imap->l_tls_offset;
596 tls_free_start
597 = tls_free_end - imap->l_tls_blocksize;
600 #elif TLS_DTV_AT_TP
601 if (tls_free_start == NO_TLS_OFFSET)
603 tls_free_start = imap->l_tls_firstbyte_offset;
604 tls_free_end = (imap->l_tls_offset
605 + imap->l_tls_blocksize);
607 else if (imap->l_tls_firstbyte_offset == tls_free_end)
608 /* Extend the contiguous chunk being reclaimed. */
609 tls_free_end = imap->l_tls_offset + imap->l_tls_blocksize;
610 else if (imap->l_tls_offset + imap->l_tls_blocksize
611 == tls_free_start)
612 /* Extend the chunk backwards. */
613 tls_free_start = imap->l_tls_firstbyte_offset;
614 /* This isn't contiguous with the last chunk freed.
615 One of them will be leaked unless we can free
616 one block right away. */
617 else if (imap->l_tls_offset + imap->l_tls_blocksize
618 == GL(dl_tls_static_used))
619 GL(dl_tls_static_used) = imap->l_tls_firstbyte_offset;
620 else if (tls_free_end == GL(dl_tls_static_used))
622 GL(dl_tls_static_used) = tls_free_start;
623 tls_free_start = imap->l_tls_firstbyte_offset;
624 tls_free_end = imap->l_tls_offset + imap->l_tls_blocksize;
626 else if (tls_free_end < imap->l_tls_firstbyte_offset)
628 /* We pick the later block. It has a chance to
629 be freed. */
630 tls_free_start = imap->l_tls_firstbyte_offset;
631 tls_free_end = imap->l_tls_offset + imap->l_tls_blocksize;
633 #else
634 # error "Either TLS_TCB_AT_TP or TLS_DTV_AT_TP must be defined"
635 #endif
639 /* We can unmap all the maps at once. We determined the
640 start address and length when we loaded the object and
641 the `munmap' call does the rest. */
642 DL_UNMAP (imap);
644 /* Finally, unlink the data structure and free it. */
645 if (imap->l_prev != NULL)
646 imap->l_prev->l_next = imap->l_next;
647 else
649 #ifdef SHARED
650 assert (nsid != LM_ID_BASE);
651 #endif
652 ns->_ns_loaded = imap->l_next;
655 --ns->_ns_nloaded;
656 if (imap->l_next != NULL)
657 imap->l_next->l_prev = imap->l_prev;
659 free (imap->l_versions);
660 if (imap->l_origin != (char *) -1)
661 free ((char *) imap->l_origin);
663 free (imap->l_reldeps);
665 /* Print debugging message. */
666 if (__builtin_expect (GLRO(dl_debug_mask) & DL_DEBUG_FILES, 0))
667 _dl_debug_printf ("\nfile=%s [%lu]; destroying link map\n",
668 imap->l_name, imap->l_ns);
670 /* This name always is allocated. */
671 free (imap->l_name);
672 /* Remove the list with all the names of the shared object. */
674 struct libname_list *lnp = imap->l_libname;
677 struct libname_list *this = lnp;
678 lnp = lnp->next;
679 if (!this->dont_free)
680 free (this);
682 while (lnp != NULL);
684 /* Remove the searchlists. */
685 free (imap->l_initfini);
687 /* Remove the scope array if we allocated it. */
688 if (imap->l_scope != imap->l_scope_mem)
689 free (imap->l_scope);
691 if (imap->l_phdr_allocated)
692 free ((void *) imap->l_phdr);
694 if (imap->l_rpath_dirs.dirs != (void *) -1)
695 free (imap->l_rpath_dirs.dirs);
696 if (imap->l_runpath_dirs.dirs != (void *) -1)
697 free (imap->l_runpath_dirs.dirs);
699 free (imap);
703 __rtld_lock_unlock_recursive (GL(dl_load_write_lock));
705 /* If we removed any object which uses TLS bump the generation counter. */
706 if (any_tls)
708 if (__builtin_expect (++GL(dl_tls_generation) == 0, 0))
709 _dl_fatal_printf ("TLS generation counter wrapped! Please report as described in <http://www.gnu.org/software/libc/bugs.html>.\n");
711 if (tls_free_end == GL(dl_tls_static_used))
712 GL(dl_tls_static_used) = tls_free_start;
715 #ifdef SHARED
716 /* Auditing checkpoint: we have deleted all objects. */
717 if (__builtin_expect (do_audit, 0))
719 struct link_map *head = ns->_ns_loaded;
720 /* Do not call the functions for any auditing object. */
721 if (head->l_auditing == 0)
723 struct audit_ifaces *afct = GLRO(dl_audit);
724 for (unsigned int cnt = 0; cnt < GLRO(dl_naudit); ++cnt)
726 if (afct->activity != NULL)
727 afct->activity (&head->l_audit[cnt].cookie, LA_ACT_CONSISTENT);
729 afct = afct->next;
733 #endif
735 if (__builtin_expect (ns->_ns_loaded == NULL, 0)
736 && nsid == GL(dl_nns) - 1)
739 --GL(dl_nns);
740 #ifndef SHARED
741 if (GL(dl_nns) == 0)
742 break;
743 #endif
745 while (GL(dl_ns)[GL(dl_nns) - 1]._ns_loaded == NULL);
747 /* Notify the debugger those objects are finalized and gone. */
748 r->r_state = RT_CONSISTENT;
749 _dl_debug_state ();
751 /* Recheck if we need to retry, release the lock. */
752 out:
753 if (dl_close_state == rerun)
754 goto retry;
756 dl_close_state = not_pending;
760 void
761 _dl_close (void *_map)
763 struct link_map *map = _map;
765 /* First see whether we can remove the object at all. */
766 if (__builtin_expect (map->l_flags_1 & DF_1_NODELETE, 0))
768 assert (map->l_init_called);
769 /* Nope. Do nothing. */
770 return;
773 if (__builtin_expect (map->l_direct_opencount, 1) == 0)
774 GLRO(dl_signal_error) (0, map->l_name, NULL, N_("shared object not open"));
776 /* Acquire the lock. */
777 __rtld_lock_lock_recursive (GL(dl_load_lock));
779 _dl_close_worker (map);
781 __rtld_lock_unlock_recursive (GL(dl_load_lock));