[BZ #5768]
[glibc.git] / elf / dl-fini.c
blob273bc3a99d24fc515850f0aa6a2d5dabcc4ecad6
1 /* Call the termination functions of loaded shared objects.
2 Copyright (C) 1995,96,1998-2002,2004, 2005 Free Software Foundation, Inc.
3 This file is part of the GNU C Library.
5 The GNU C Library is free software; you can redistribute it and/or
6 modify it under the terms of the GNU Lesser General Public
7 License as published by the Free Software Foundation; either
8 version 2.1 of the License, or (at your option) any later version.
10 The GNU C Library is distributed in the hope that it will be useful,
11 but WITHOUT ANY WARRANTY; without even the implied warranty of
12 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 Lesser General Public License for more details.
15 You should have received a copy of the GNU Lesser General Public
16 License along with the GNU C Library; if not, write to the Free
17 Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
18 02111-1307 USA. */
20 #include <alloca.h>
21 #include <assert.h>
22 #include <string.h>
23 #include <ldsodefs.h>
26 /* Type of the constructor functions. */
27 typedef void (*fini_t) (void);
30 void
31 internal_function
32 _dl_sort_fini (struct link_map *l, struct link_map **maps, size_t nmaps,
33 char *used, Lmid_t ns)
35 if (ns == LM_ID_BASE)
36 /* The main executable always comes first. */
37 l = l->l_next;
39 for (; l != NULL; l = l->l_next)
40 /* Do not handle ld.so in secondary namespaces and object which
41 are not removed. */
42 if (l == l->l_real && l->l_idx != -1)
44 /* Find the place in the 'maps' array. */
45 unsigned int j;
46 for (j = ns == LM_ID_BASE ? 1 : 0; maps[j] != l; ++j)
47 assert (j < nmaps);
49 /* Find all object for which the current one is a dependency
50 and move the found object (if necessary) in front. */
51 for (unsigned int k = j + 1; k < nmaps; ++k)
53 struct link_map **runp = maps[k]->l_initfini;
54 if (runp != NULL)
56 while (*runp != NULL)
57 if (*runp == l)
59 struct link_map *here = maps[k];
61 /* Move it now. */
62 memmove (&maps[j] + 1,
63 &maps[j], (k - j) * sizeof (struct link_map *));
64 maps[j] = here;
66 if (used != NULL)
68 char here_used = used[k];
70 memmove (&used[j] + 1,
71 &used[j], (k - j) * sizeof (char));
72 used[j] = here_used;
75 ++j;
77 break;
79 else
80 ++runp;
83 if (__builtin_expect (maps[k]->l_reldeps != NULL, 0))
85 unsigned int m = maps[k]->l_reldeps->act;
86 struct link_map **relmaps = &maps[k]->l_reldeps->list[0];
88 while (m-- > 0)
90 if (relmaps[m] == l)
92 struct link_map *here = maps[k];
94 /* Move it now. */
95 memmove (&maps[j] + 1,
96 &maps[j],
97 (k - j) * sizeof (struct link_map *));
98 maps[j] = here;
100 if (used != NULL)
102 char here_used = used[k];
104 memmove (&used[j] + 1,
105 &used[j], (k - j) * sizeof (char));
106 used[j] = here_used;
109 break;
118 void
119 internal_function
120 _dl_fini (void)
122 /* Lots of fun ahead. We have to call the destructors for all still
123 loaded objects, in all namespaces. The problem is that the ELF
124 specification now demands that dependencies between the modules
125 are taken into account. I.e., the destructor for a module is
126 called before the ones for any of its dependencies.
128 To make things more complicated, we cannot simply use the reverse
129 order of the constructors. Since the user might have loaded objects
130 using `dlopen' there are possibly several other modules with its
131 dependencies to be taken into account. Therefore we have to start
132 determining the order of the modules once again from the beginning. */
133 struct link_map **maps = NULL;
134 size_t maps_size = 0;
136 /* We run the destructors of the main namespaces last. As for the
137 other namespaces, we pick run the destructors in them in reverse
138 order of the namespace ID. */
139 #ifdef SHARED
140 int do_audit = 0;
141 again:
142 #endif
143 for (Lmid_t ns = DL_NNS - 1; ns >= 0; --ns)
145 /* Protect against concurrent loads and unloads. */
146 __rtld_lock_lock_recursive (GL(dl_load_lock));
148 unsigned int nmaps = 0;
149 unsigned int nloaded = GL(dl_ns)[ns]._ns_nloaded;
150 /* No need to do anything for empty namespaces or those used for
151 auditing DSOs. */
152 if (nloaded == 0
153 #ifdef SHARED
154 || GL(dl_ns)[ns]._ns_loaded->l_auditing != do_audit
155 #endif
157 goto out;
159 /* XXX Could it be (in static binaries) that there is no object
160 loaded? */
161 assert (ns != LM_ID_BASE || nloaded > 0);
163 /* Now we can allocate an array to hold all the pointers and copy
164 the pointers in. */
165 if (maps_size < nloaded * sizeof (struct link_map *))
167 if (maps_size == 0)
169 maps_size = nloaded * sizeof (struct link_map *);
170 maps = (struct link_map **) alloca (maps_size);
172 else
173 maps = (struct link_map **)
174 extend_alloca (maps, maps_size,
175 nloaded * sizeof (struct link_map *));
178 unsigned int i;
179 struct link_map *l;
180 assert (nloaded != 0 || GL(dl_ns)[ns]._ns_loaded == NULL);
181 for (l = GL(dl_ns)[ns]._ns_loaded, i = 0; l != NULL; l = l->l_next)
182 /* Do not handle ld.so in secondary namespaces. */
183 if (l == l->l_real)
185 assert (i < nloaded);
187 maps[i] = l;
188 l->l_idx = i;
189 ++i;
191 /* Bump l_direct_opencount of all objects so that they are
192 not dlclose()ed from underneath us. */
193 ++l->l_direct_opencount;
195 assert (ns != LM_ID_BASE || i == nloaded);
196 assert (ns == LM_ID_BASE || i == nloaded || i == nloaded - 1);
197 nmaps = i;
199 if (nmaps != 0)
200 /* Now we have to do the sorting. */
201 _dl_sort_fini (GL(dl_ns)[ns]._ns_loaded, maps, nmaps, NULL, ns);
203 /* We do not rely on the linked list of loaded object anymore from
204 this point on. We have our own list here (maps). The various
205 members of this list cannot vanish since the open count is too
206 high and will be decremented in this loop. So we release the
207 lock so that some code which might be called from a destructor
208 can directly or indirectly access the lock. */
209 out:
210 __rtld_lock_unlock_recursive (GL(dl_load_lock));
212 /* 'maps' now contains the objects in the right order. Now call the
213 destructors. We have to process this array from the front. */
214 for (i = 0; i < nmaps; ++i)
216 l = maps[i];
218 if (l->l_init_called)
220 /* Make sure nothing happens if we are called twice. */
221 l->l_init_called = 0;
223 /* Is there a destructor function? */
224 if (l->l_info[DT_FINI_ARRAY] != NULL
225 || l->l_info[DT_FINI] != NULL)
227 /* When debugging print a message first. */
228 if (__builtin_expect (GLRO(dl_debug_mask)
229 & DL_DEBUG_IMPCALLS, 0))
230 _dl_debug_printf ("\ncalling fini: %s [%lu]\n\n",
231 l->l_name[0] ? l->l_name : rtld_progname,
232 ns);
234 /* First see whether an array is given. */
235 if (l->l_info[DT_FINI_ARRAY] != NULL)
237 ElfW(Addr) *array =
238 (ElfW(Addr) *) (l->l_addr
239 + l->l_info[DT_FINI_ARRAY]->d_un.d_ptr);
240 unsigned int i = (l->l_info[DT_FINI_ARRAYSZ]->d_un.d_val
241 / sizeof (ElfW(Addr)));
242 while (i-- > 0)
243 ((fini_t) array[i]) ();
246 /* Next try the old-style destructor. */
247 if (l->l_info[DT_FINI] != NULL)
248 ((fini_t) DL_DT_FINI_ADDRESS (l, l->l_addr + l->l_info[DT_FINI]->d_un.d_ptr)) ();
251 #ifdef SHARED
252 /* Auditing checkpoint: another object closed. */
253 if (!do_audit && __builtin_expect (GLRO(dl_naudit) > 0, 0))
255 struct audit_ifaces *afct = GLRO(dl_audit);
256 for (unsigned int cnt = 0; cnt < GLRO(dl_naudit); ++cnt)
258 if (afct->objclose != NULL)
259 /* Return value is ignored. */
260 (void) afct->objclose (&l->l_audit[cnt].cookie);
262 afct = afct->next;
265 #endif
268 /* Correct the previous increment. */
269 --l->l_direct_opencount;
273 #ifdef SHARED
274 if (! do_audit && GLRO(dl_naudit) > 0)
276 do_audit = 1;
277 goto again;
280 if (__builtin_expect (GLRO(dl_debug_mask) & DL_DEBUG_STATISTICS, 0))
281 _dl_debug_printf ("\nruntime linker statistics:\n"
282 " final number of relocations: %lu\n"
283 "final number of relocations from cache: %lu\n",
284 GL(dl_num_relocations),
285 GL(dl_num_cache_relocations));
286 #endif