Updated to fedora-glibc-20050208T0948
[glibc.git] / elf / dl-fini.c
blobb3282089a933ce5dca3ba8b0e34b292d745e6a9f
1 /* Call the termination functions of loaded shared objects.
2 Copyright (C) 1995,96,1998-2002,2004, 2005 Free Software Foundation, Inc.
3 This file is part of the GNU C Library.
5 The GNU C Library is free software; you can redistribute it and/or
6 modify it under the terms of the GNU Lesser General Public
7 License as published by the Free Software Foundation; either
8 version 2.1 of the License, or (at your option) any later version.
10 The GNU C Library is distributed in the hope that it will be useful,
11 but WITHOUT ANY WARRANTY; without even the implied warranty of
12 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 Lesser General Public License for more details.
15 You should have received a copy of the GNU Lesser General Public
16 License along with the GNU C Library; if not, write to the Free
17 Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
18 02111-1307 USA. */
20 #include <alloca.h>
21 #include <assert.h>
22 #include <string.h>
23 #include <ldsodefs.h>
26 /* Type of the constructor functions. */
27 typedef void (*fini_t) (void);
30 void
31 internal_function
32 _dl_fini (void)
34 /* Lots of fun ahead. We have to call the destructors for all still
35 loaded objects, in all namespaces. The problem is that the ELF
36 specification now demands that dependencies between the modules
37 are taken into account. I.e., the destructor for a module is
38 called before the ones for any of its dependencies.
40 To make things more complicated, we cannot simply use the reverse
41 order of the constructors. Since the user might have loaded objects
42 using `dlopen' there are possibly several other modules with its
43 dependencies to be taken into account. Therefore we have to start
44 determining the order of the modules once again from the beginning. */
45 struct link_map **maps = NULL;
46 size_t maps_size = 0;
48 /* We run the destructors of the main namespaces last. As for the
49 other namespaces, we pick run the destructors in them in reverse
50 order of the namespace ID. */
51 #ifdef SHARED
52 int do_audit = 0;
53 again:
54 #endif
55 for (Lmid_t cnt = DL_NNS - 1; cnt >= 0; --cnt)
57 /* Protect against concurrent loads and unloads. */
58 __rtld_lock_lock_recursive (GL(dl_load_lock));
60 unsigned int nmaps = 0;
61 unsigned int nloaded = GL(dl_ns)[cnt]._ns_nloaded;
62 /* No need to do anything for empty namespaces or those used for
63 auditing DSOs. */
64 if (nloaded == 0
65 #ifdef SHARED
66 || GL(dl_ns)[cnt]._ns_loaded->l_auditing != do_audit
67 #endif
69 goto out;
71 /* XXX Could it be (in static binaries) that there is no object
72 loaded? */
73 assert (cnt != LM_ID_BASE || nloaded > 0);
75 /* Now we can allocate an array to hold all the pointers and copy
76 the pointers in. */
77 if (maps_size < nloaded * sizeof (struct link_map *))
79 if (maps_size == 0)
81 maps_size = nloaded * sizeof (struct link_map *);
82 maps = (struct link_map **) alloca (maps_size);
84 else
85 maps = (struct link_map **)
86 extend_alloca (maps, maps_size,
87 nloaded * sizeof (struct link_map *));
90 unsigned int i;
91 struct link_map *l;
92 assert (nloaded != 0 || GL(dl_ns)[cnt]._ns_loaded == NULL);
93 for (l = GL(dl_ns)[cnt]._ns_loaded, i = 0; l != NULL; l = l->l_next)
94 /* Do not handle ld.so in secondary namespaces. */
95 if (l == l->l_real)
97 assert (i < nloaded);
99 maps[i++] = l;
101 /* Bump l_opencount of all objects so that they are not
102 dlclose()ed from underneath us. */
103 ++l->l_opencount;
105 assert (cnt != LM_ID_BASE || i == nloaded);
106 assert (cnt == LM_ID_BASE || i == nloaded || i == nloaded - 1);
107 nmaps = i;
109 if (nmaps != 0)
111 /* Now we have to do the sorting. */
112 l = GL(dl_ns)[cnt]._ns_loaded;
113 if (cnt == LM_ID_BASE)
114 /* The main executable always comes first. */
115 l = l->l_next;
116 for (; l != NULL; l = l->l_next)
117 /* Do not handle ld.so in secondary namespaces. */
118 if (l == l->l_real)
120 /* Find the place in the 'maps' array. */
121 unsigned int j;
122 for (j = cnt == LM_ID_BASE ? 1 : 0; maps[j] != l; ++j)
123 assert (j < nmaps);
125 /* Find all object for which the current one is a dependency
126 and move the found object (if necessary) in front. */
127 for (unsigned int k = j + 1; k < nmaps; ++k)
129 struct link_map **runp = maps[k]->l_initfini;
130 if (runp != NULL)
132 while (*runp != NULL)
133 if (*runp == l)
135 struct link_map *here = maps[k];
137 /* Move it now. */
138 memmove (&maps[j] + 1,
139 &maps[j],
140 (k - j) * sizeof (struct link_map *));
141 maps[j++] = here;
143 break;
145 else
146 ++runp;
149 if (__builtin_expect (maps[k]->l_reldeps != NULL, 0))
151 unsigned int m = maps[k]->l_reldepsact;
152 struct link_map **relmaps = maps[k]->l_reldeps;
154 while (m-- > 0)
156 if (relmaps[m] == l)
158 struct link_map *here = maps[k];
160 /* Move it now. */
161 memmove (&maps[j] + 1,
162 &maps[j],
163 (k - j) * sizeof (struct link_map *));
164 maps[j] = here;
166 break;
174 /* We do not rely on the linked list of loaded object anymore from
175 this point on. We have our own list here (maps). The various
176 members of this list cannot vanish since the open count is too
177 high and will be decremented in this loop. So we release the
178 lock so that some code which might be called from a destructor
179 can directly or indirectly access the lock. */
180 out:
181 __rtld_lock_unlock_recursive (GL(dl_load_lock));
183 /* 'maps' now contains the objects in the right order. Now call the
184 destructors. We have to process this array from the front. */
185 for (i = 0; i < nmaps; ++i)
187 l = maps[i];
189 if (l->l_init_called)
191 /* Make sure nothing happens if we are called twice. */
192 l->l_init_called = 0;
194 /* Is there a destructor function? */
195 if (l->l_info[DT_FINI_ARRAY] != NULL
196 || l->l_info[DT_FINI] != NULL)
198 /* When debugging print a message first. */
199 if (__builtin_expect (GLRO(dl_debug_mask)
200 & DL_DEBUG_IMPCALLS, 0))
201 _dl_debug_printf ("\ncalling fini: %s [%lu]\n\n",
202 l->l_name[0] ? l->l_name : rtld_progname,
203 cnt);
205 /* First see whether an array is given. */
206 if (l->l_info[DT_FINI_ARRAY] != NULL)
208 ElfW(Addr) *array =
209 (ElfW(Addr) *) (l->l_addr
210 + l->l_info[DT_FINI_ARRAY]->d_un.d_ptr);
211 unsigned int i = (l->l_info[DT_FINI_ARRAYSZ]->d_un.d_val
212 / sizeof (ElfW(Addr)));
213 while (i-- > 0)
214 ((fini_t) array[i]) ();
217 /* Next try the old-style destructor. */
218 if (l->l_info[DT_FINI] != NULL)
219 ((fini_t) DL_DT_FINI_ADDRESS (l, l->l_addr + l->l_info[DT_FINI]->d_un.d_ptr)) ();
222 #ifdef SHARED
223 /* Auditing checkpoint: another object closed. */
224 if (!do_audit && __builtin_expect (GLRO(dl_naudit) > 0, 0))
226 struct audit_ifaces *afct = GLRO(dl_audit);
227 for (unsigned int cnt = 0; cnt < GLRO(dl_naudit); ++cnt)
229 if (afct->objclose != NULL)
230 /* Return value is ignored. */
231 (void) afct->objclose (&l->l_audit[cnt].cookie);
233 afct = afct->next;
236 #endif
239 /* Correct the previous increment. */
240 --l->l_opencount;
244 #ifdef SHARED
245 if (! do_audit && GLRO(dl_naudit) > 0)
247 do_audit = 1;
248 goto again;
250 #endif
252 if (__builtin_expect (GLRO(dl_debug_mask) & DL_DEBUG_STATISTICS, 0))
253 _dl_debug_printf ("\nruntime linker statistics:\n"
254 " final number of relocations: %lu\n"
255 "final number of relocations from cache: %lu\n",
256 GL(dl_num_relocations),
257 GL(dl_num_cache_relocations));