1 /* Close a shared object opened by `_dl_open'.
2 Copyright (C) 1996-2002, 2003, 2004 Free Software Foundation, Inc.
3 This file is part of the GNU C Library.
5 The GNU C Library is free software; you can redistribute it and/or
6 modify it under the terms of the GNU Lesser General Public
7 License as published by the Free Software Foundation; either
8 version 2.1 of the License, or (at your option) any later version.
10 The GNU C Library is distributed in the hope that it will be useful,
11 but WITHOUT ANY WARRANTY; without even the implied warranty of
12 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 Lesser General Public License for more details.
15 You should have received a copy of the GNU Lesser General Public
16 License along with the GNU C Library; if not, write to the Free
17 Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
26 #include <bits/libc-lock.h>
28 #include <sys/types.h>
32 /* Type of the constructor functions. */
33 typedef void (*fini_t
) (void);
37 /* Returns true we an non-empty was found. */
39 remove_slotinfo (size_t idx
, struct dtv_slotinfo_list
*listp
, size_t disp
,
42 if (idx
- disp
>= listp
->len
)
44 if (listp
->next
== NULL
)
46 /* The index is not actually valid in the slotinfo list,
47 because this object was closed before it was fully set
48 up due to some error. */
49 assert (! should_be_there
);
53 if (remove_slotinfo (idx
, listp
->next
, disp
+ listp
->len
,
57 /* No non-empty entry. Search from the end of this element's
59 idx
= disp
+ listp
->len
;
64 struct link_map
*old_map
= listp
->slotinfo
[idx
- disp
].map
;
66 /* The entry might still be in its unused state if we are closing an
67 object that wasn't fully set up. */
68 if (__builtin_expect (old_map
!= NULL
, 1))
70 assert (old_map
->l_tls_modid
== idx
);
72 /* Mark the entry as unused. */
73 listp
->slotinfo
[idx
- disp
].gen
= GL(dl_tls_generation
) + 1;
74 listp
->slotinfo
[idx
- disp
].map
= NULL
;
77 /* If this is not the last currently used entry no need to look
79 if (idx
!= GL(dl_tls_max_dtv_idx
))
83 while (idx
- disp
> (disp
== 0 ? 1 + GL(dl_tls_static_nelem
) : 0))
87 if (listp
->slotinfo
[idx
- disp
].map
!= NULL
)
89 /* Found a new last used index. */
90 GL(dl_tls_max_dtv_idx
) = idx
;
95 /* No non-entry in this list element. */
103 _dl_close (void *_map
)
105 struct link_map
*map
= _map
;
107 Lmid_t ns
= map
->l_ns
;
109 bool any_tls
= false;
112 /* First see whether we can remove the object at all. */
113 if (__builtin_expect (map
->l_flags_1
& DF_1_NODELETE
, 0)
114 && map
->l_init_called
)
115 /* Nope. Do nothing. */
118 if (__builtin_expect (map
->l_direct_opencount
, 1) == 0)
119 GLRO(dl_signal_error
) (0, map
->l_name
, NULL
, N_("shared object not open"));
121 /* Acquire the lock. */
122 __rtld_lock_lock_recursive (GL(dl_load_lock
));
124 /* One less direct use. */
125 --map
->l_direct_opencount
;
127 /* Decrement the reference count. */
128 if (map
->l_direct_opencount
> 1 || map
->l_type
!= lt_loaded
)
130 /* There are still references to this object. Do nothing more. */
131 if (__builtin_expect (GLRO(dl_debug_mask
) & DL_DEBUG_FILES
, 0))
132 GLRO(dl_debug_printf
) ("\nclosing file=%s; direct_opencount == %u\n",
133 map
->l_name
, map
->l_direct_opencount
);
135 __rtld_lock_unlock_recursive (GL(dl_load_lock
));
139 const unsigned int nloaded
= GL(dl_ns
)[ns
]._ns_nloaded
;
142 struct link_map
*maps
[nloaded
];
144 /* Run over the list and assign indeces to the link maps and enter
145 them into the MAPS array. */
147 for (struct link_map
*l
= GL(dl_ns
)[ns
]._ns_loaded
; l
!= NULL
; l
= l
->l_next
)
153 assert (idx
== nloaded
);
155 /* Prepare the bitmaps. */
156 memset (used
, '\0', sizeof (used
));
157 memset (done
, '\0', sizeof (done
));
159 /* Keep track of the lowest index link map we have covered already. */
161 while (++done_index
< nloaded
)
163 struct link_map
*l
= maps
[done_index
];
165 if (done
[done_index
])
166 /* Already handled. */
169 /* Check whether this object is still used. */
170 if (l
->l_type
== lt_loaded
171 && l
->l_direct_opencount
== 0
172 && (l
->l_flags_1
& DF_1_NODELETE
) == 0
173 && !used
[done_index
])
176 /* We need this object and we handle it now. */
177 done
[done_index
] = 1;
178 used
[done_index
] = 1;
179 /* Signal the object is still needed. */
182 /* Mark all dependencies as used. */
183 if (l
->l_initfini
!= NULL
)
185 struct link_map
**lp
= &l
->l_initfini
[1];
188 if ((*lp
)->l_idx
!= -1)
190 assert ((*lp
)->l_idx
>= 0 && (*lp
)->l_idx
< nloaded
);
192 if (!used
[(*lp
)->l_idx
])
194 used
[(*lp
)->l_idx
] = 1;
195 if ((*lp
)->l_idx
- 1 < done_index
)
196 done_index
= (*lp
)->l_idx
- 1;
203 /* And the same for relocation dependencies. */
204 if (l
->l_reldeps
!= NULL
)
205 for (unsigned int j
= 0; j
< l
->l_reldepsact
; ++j
)
207 struct link_map
*jmap
= l
->l_reldeps
[j
];
209 if (jmap
->l_idx
!= -1)
211 assert (jmap
->l_idx
>= 0 && jmap
->l_idx
< nloaded
);
213 if (!used
[jmap
->l_idx
])
215 used
[jmap
->l_idx
] = 1;
216 if (jmap
->l_idx
- 1 < done_index
)
217 done_index
= jmap
->l_idx
- 1;
223 /* Sort the entries. */
224 _dl_sort_fini (GL(dl_ns
)[ns
]._ns_loaded
, maps
, nloaded
, used
, ns
);
226 bool unload_any
= false;
227 unsigned int first_loaded
= ~0;
228 for (i
= 0; i
< nloaded
; ++i
)
230 struct link_map
*imap
= maps
[i
];
234 assert (imap
->l_type
== lt_loaded
235 && (imap
->l_flags_1
& DF_1_NODELETE
) == 0);
237 /* Call its termination function. Do not do it for
238 half-cooked objects. */
239 if (imap
->l_init_called
)
241 /* When debugging print a message first. */
242 if (__builtin_expect (GLRO(dl_debug_mask
) & DL_DEBUG_IMPCALLS
, 0))
243 GLRO(dl_debug_printf
) ("\ncalling fini: %s [%lu]\n\n",
246 if (imap
->l_info
[DT_FINI_ARRAY
] != NULL
)
249 (ElfW(Addr
) *) (imap
->l_addr
250 + imap
->l_info
[DT_FINI_ARRAY
]->d_un
.d_ptr
);
251 unsigned int sz
= (imap
->l_info
[DT_FINI_ARRAYSZ
]->d_un
.d_val
252 / sizeof (ElfW(Addr
)));
255 ((fini_t
) array
[sz
]) ();
258 /* Next try the old-style destructor. */
259 if (imap
->l_info
[DT_FINI
] != NULL
)
260 (*(void (*) (void)) DL_DT_FINI_ADDRESS
261 (imap
, ((void *) imap
->l_addr
262 + imap
->l_info
[DT_FINI
]->d_un
.d_ptr
))) ();
265 /* This object must not be used anymore. */
268 /* We indeed have an object to remove. */
271 /* Remember where the first dynamically loaded object is. */
272 if (i
< first_loaded
)
276 else if (imap
->l_type
== lt_loaded
)
278 if (imap
->l_searchlist
.r_list
== NULL
279 && imap
->l_initfini
!= NULL
)
281 /* The object is still used. But the object we are
282 unloading right now is responsible for loading it. If
283 the current object does not have it's own scope yet we
284 have to create one. This has to be done before running
287 To do this count the number of dependencies. */
289 for (cnt
= 1; imap
->l_initfini
[cnt
] != NULL
; ++cnt
)
292 /* We simply reuse the l_initfini list. */
293 imap
->l_searchlist
.r_list
= &imap
->l_initfini
[cnt
+ 1];
294 imap
->l_searchlist
.r_nlist
= cnt
;
296 for (cnt
= 0; imap
->l_scope
[cnt
] != NULL
; ++cnt
)
297 if (imap
->l_scope
[cnt
] == &map
->l_searchlist
)
299 imap
->l_scope
[cnt
] = &imap
->l_searchlist
;
304 /* The loader is gone, so mark the object as not having one.
305 Note: l_idx == -1 -> object will be removed. */
306 if (imap
->l_loader
!= NULL
&& imap
->l_loader
->l_idx
!= -1)
307 imap
->l_loader
= NULL
;
309 /* Remember where the first dynamically loaded object is. */
310 if (i
< first_loaded
)
315 /* If there are no objects to unload, do nothing further. */
319 /* Notify the debugger we are about to remove some loaded objects. */
320 _r_debug
.r_state
= RT_DELETE
;
321 GLRO(dl_debug_state
) ();
324 size_t tls_free_start
;
326 tls_free_start
= tls_free_end
= NO_TLS_OFFSET
;
329 /* Check each element of the search list to see if all references to
331 for (i
= first_loaded
; i
< nloaded
; ++i
)
333 struct link_map
*imap
= maps
[i
];
336 assert (imap
->l_type
== lt_loaded
);
338 /* That was the last reference, and this was a dlopen-loaded
339 object. We can unmap it. */
340 if (__builtin_expect (imap
->l_global
, 0))
342 /* This object is in the global scope list. Remove it. */
344 = GL(dl_ns
)[imap
->l_ns
]._ns_main_searchlist
->r_nlist
;
348 while (GL(dl_ns
)[imap
->l_ns
]._ns_main_searchlist
->r_list
[cnt
]
351 /* The object was already correctly registered. */
353 < GL(dl_ns
)[imap
->l_ns
]._ns_main_searchlist
->r_nlist
)
354 GL(dl_ns
)[imap
->l_ns
]._ns_main_searchlist
->r_list
[cnt
- 1]
355 = GL(dl_ns
)[imap
->l_ns
]._ns_main_searchlist
->r_list
[cnt
];
357 --GL(dl_ns
)[imap
->l_ns
]._ns_main_searchlist
->r_nlist
;
361 /* Remove the object from the dtv slotinfo array if it uses TLS. */
362 if (__builtin_expect (imap
->l_tls_blocksize
> 0, 0))
366 if (! remove_slotinfo (imap
->l_tls_modid
,
367 GL(dl_tls_dtv_slotinfo_list
), 0,
368 imap
->l_init_called
))
369 /* All dynamically loaded modules with TLS are unloaded. */
370 GL(dl_tls_max_dtv_idx
) = GL(dl_tls_static_nelem
);
372 if (imap
->l_tls_offset
!= NO_TLS_OFFSET
)
374 /* Collect a contiguous chunk built from the objects in
375 this search list, going in either direction. When the
376 whole chunk is at the end of the used area then we can
379 if (tls_free_start
== NO_TLS_OFFSET
380 || (size_t) imap
->l_tls_offset
== tls_free_start
)
382 /* Extend the contiguous chunk being reclaimed. */
384 = imap
->l_tls_offset
- imap
->l_tls_blocksize
;
386 if (tls_free_end
== NO_TLS_OFFSET
)
387 tls_free_end
= imap
->l_tls_offset
;
389 else if (imap
->l_tls_offset
- imap
->l_tls_blocksize
391 /* Extend the chunk backwards. */
392 tls_free_end
= imap
->l_tls_offset
;
395 /* This isn't contiguous with the last chunk freed.
396 One of them will be leaked unless we can free
397 one block right away. */
398 if (tls_free_end
== GL(dl_tls_static_used
))
400 GL(dl_tls_static_used
) = tls_free_start
;
401 tls_free_end
= imap
->l_tls_offset
;
403 = tls_free_end
- imap
->l_tls_blocksize
;
405 else if ((size_t) imap
->l_tls_offset
406 == GL(dl_tls_static_used
))
407 GL(dl_tls_static_used
)
408 = imap
->l_tls_offset
- imap
->l_tls_blocksize
;
409 else if (tls_free_end
< (size_t) imap
->l_tls_offset
)
411 /* We pick the later block. It has a chance to
413 tls_free_end
= imap
->l_tls_offset
;
415 = tls_free_end
- imap
->l_tls_blocksize
;
419 if ((size_t) imap
->l_tls_offset
== tls_free_end
)
420 /* Extend the contiguous chunk being reclaimed. */
421 tls_free_end
-= imap
->l_tls_blocksize
;
422 else if (imap
->l_tls_offset
+ imap
->l_tls_blocksize
424 /* Extend the chunk backwards. */
425 tls_free_start
= imap
->l_tls_offset
;
428 /* This isn't contiguous with the last chunk freed.
429 One of them will be leaked. */
430 if (tls_free_end
== GL(dl_tls_static_used
))
431 GL(dl_tls_static_used
) = tls_free_start
;
432 tls_free_start
= imap
->l_tls_offset
;
433 tls_free_end
= tls_free_start
+ imap
->l_tls_blocksize
;
436 # error "Either TLS_TCB_AT_TP or TLS_DTV_AT_TP must be defined"
442 /* We can unmap all the maps at once. We determined the
443 start address and length when we loaded the object and
444 the `munmap' call does the rest. */
447 /* Finally, unlink the data structure and free it. */
448 if (imap
->l_prev
!= NULL
)
449 imap
->l_prev
->l_next
= imap
->l_next
;
453 assert (imap
->l_ns
!= LM_ID_BASE
);
455 GL(dl_ns
)[imap
->l_ns
]._ns_loaded
= imap
->l_next
;
458 --GL(dl_ns
)[imap
->l_ns
]._ns_nloaded
;
459 if (imap
->l_next
!= NULL
)
460 imap
->l_next
->l_prev
= imap
->l_prev
;
462 free (imap
->l_versions
);
463 if (imap
->l_origin
!= (char *) -1)
464 free ((char *) imap
->l_origin
);
466 free (imap
->l_reldeps
);
468 /* This name always is allocated. */
470 /* Remove the list with all the names of the shared object. */
472 struct libname_list
*lnp
= imap
->l_libname
;
475 struct libname_list
*this = lnp
;
477 if (!this->dont_free
)
482 /* Remove the searchlists. */
483 free (imap
->l_initfini
);
485 /* Remove the scope array if we allocated it. */
486 if (imap
->l_scope
!= imap
->l_scope_mem
)
487 free (imap
->l_scope
);
489 if (imap
->l_phdr_allocated
)
490 free ((void *) imap
->l_phdr
);
492 if (imap
->l_rpath_dirs
.dirs
!= (void *) -1)
493 free (imap
->l_rpath_dirs
.dirs
);
494 if (imap
->l_runpath_dirs
.dirs
!= (void *) -1)
495 free (imap
->l_runpath_dirs
.dirs
);
502 /* If we removed any object which uses TLS bump the generation counter. */
505 if (__builtin_expect (++GL(dl_tls_generation
) == 0, 0))
506 __libc_fatal (_("TLS generation counter wrapped! Please report as described in <http://www.gnu.org/software/libc/bugs.html>."));
508 if (tls_free_end
== GL(dl_tls_static_used
))
509 GL(dl_tls_static_used
) = tls_free_start
;
513 /* Notify the debugger those objects are finalized and gone. */
514 _r_debug
.r_state
= RT_CONSISTENT
;
515 GLRO(dl_debug_state
) ();
517 /* Release the lock. */
519 __rtld_lock_unlock_recursive (GL(dl_load_lock
));
521 libc_hidden_def (_dl_close
)
525 static bool __libc_freeres_fn_section
526 free_slotinfo (struct dtv_slotinfo_list
**elemp
)
531 /* Nothing here, all is removed (or there never was anything). */
534 if (!free_slotinfo (&(*elemp
)->next
))
535 /* We cannot free the entry. */
538 /* That cleared our next pointer for us. */
540 for (cnt
= 0; cnt
< (*elemp
)->len
; ++cnt
)
541 if ((*elemp
)->slotinfo
[cnt
].map
!= NULL
)
545 /* We can remove the list element. */
554 libc_freeres_fn (free_mem
)
556 for (Lmid_t ns
= 0; ns
< DL_NNS
; ++ns
)
557 if (__builtin_expect (GL(dl_ns
)[ns
]._ns_global_scope_alloc
, 0) != 0
558 && (GL(dl_ns
)[ns
]._ns_main_searchlist
->r_nlist
559 // XXX Check whether we need NS-specific initial_searchlist
560 == GLRO(dl_initial_searchlist
).r_nlist
))
562 /* All object dynamically loaded by the program are unloaded. Free
563 the memory allocated for the global scope variable. */
564 struct link_map
**old
= GL(dl_ns
)[ns
]._ns_main_searchlist
->r_list
;
566 /* Put the old map in. */
567 GL(dl_ns
)[ns
]._ns_main_searchlist
->r_list
568 // XXX Check whether we need NS-specific initial_searchlist
569 = GLRO(dl_initial_searchlist
).r_list
;
570 /* Signal that the original map is used. */
571 GL(dl_ns
)[ns
]._ns_global_scope_alloc
= 0;
573 /* Now free the old map. */
578 if (USE___THREAD
|| GL(dl_tls_dtv_slotinfo_list
) != NULL
)
580 /* Free the memory allocated for the dtv slotinfo array. We can do
581 this only if all modules which used this memory are unloaded. */
583 if (GL(dl_initial_dtv
) == NULL
)
584 /* There was no initial TLS setup, it was set up later when
585 it used the normal malloc. */
586 free_slotinfo (&GL(dl_tls_dtv_slotinfo_list
));
589 /* The first element of the list does not have to be deallocated.
590 It was allocated in the dynamic linker (i.e., with a different
591 malloc), and in the static library it's in .bss space. */
592 free_slotinfo (&GL(dl_tls_dtv_slotinfo_list
)->next
);