(gaih_inet): If NAME is a numerical IP address and AI_CANONNAME is set, return copy...
[glibc.git] / elf / dl-fini.c
blobdd405de415ed624439645427ca19800f05ea4007
1 /* Call the termination functions of loaded shared objects.
2 Copyright (C) 1995,96,1998-2002,2004 Free Software Foundation, Inc.
3 This file is part of the GNU C Library.
5 The GNU C Library is free software; you can redistribute it and/or
6 modify it under the terms of the GNU Lesser General Public
7 License as published by the Free Software Foundation; either
8 version 2.1 of the License, or (at your option) any later version.
10 The GNU C Library is distributed in the hope that it will be useful,
11 but WITHOUT ANY WARRANTY; without even the implied warranty of
12 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 Lesser General Public License for more details.
15 You should have received a copy of the GNU Lesser General Public
16 License along with the GNU C Library; if not, write to the Free
17 Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
18 02111-1307 USA. */
20 #include <alloca.h>
21 #include <assert.h>
22 #include <string.h>
23 #include <ldsodefs.h>
26 /* Type of the constructor functions. */
27 typedef void (*fini_t) (void);
30 void
31 internal_function
32 _dl_fini (void)
34 /* Lots of fun ahead. We have to call the destructors for all still
35 loaded objects. The problem is that the ELF specification now
36 demands that dependencies between the modules are taken into account.
37 I.e., the destructor for a module is called before the ones for any
38 of its dependencies.
40 To make things more complicated, we cannot simply use the reverse
41 order of the constructors. Since the user might have loaded objects
42 using `dlopen' there are possibly several other modules with its
43 dependencies to be taken into account. Therefore we have to start
44 determining the order of the modules once again from the beginning. */
45 unsigned int i;
46 unsigned int nloaded;
47 struct link_map *l;
48 struct link_map **maps;
50 /* Protect against concurrent loads and unloads. */
51 __rtld_lock_lock_recursive (GL(dl_load_lock));
53 nloaded = GL(dl_nloaded);
55 /* XXX Could it be (in static binaries) that there is no object loaded? */
56 assert (nloaded > 0);
58 /* Now we can allocate an array to hold all the pointers and copy
59 the pointers in. */
60 maps = (struct link_map **) alloca (nloaded * sizeof (struct link_map *));
61 for (l = GL(dl_loaded), i = 0; l != NULL; l = l->l_next)
63 assert (i < nloaded);
65 maps[i++] = l;
67 /* Bump l_opencount of all objects so that they are not dlclose()ed
68 from underneath us. */
69 ++l->l_opencount;
71 assert (i == nloaded);
73 /* Now we have to do the sorting. */
74 for (l = GL(dl_loaded)->l_next; l != NULL; l = l->l_next)
76 unsigned int j;
77 unsigned int k;
79 /* Find the place in the 'maps' array. */
80 for (j = 1; maps[j] != l; ++j)
83 /* Find all object for which the current one is a dependency and
84 move the found object (if necessary) in front. */
85 for (k = j + 1; k < nloaded; ++k)
87 struct link_map **runp = maps[k]->l_initfini;
88 if (runp != NULL)
90 while (*runp != NULL)
91 if (*runp == l)
93 struct link_map *here = maps[k];
95 /* Move it now. */
96 memmove (&maps[j] + 1,
97 &maps[j],
98 (k - j) * sizeof (struct link_map *));
99 maps[j++] = here;
101 break;
103 else
104 ++runp;
107 if (__builtin_expect (maps[k]->l_reldeps != NULL, 0))
109 unsigned int m = maps[k]->l_reldepsact;
110 struct link_map **relmaps = maps[k]->l_reldeps;
112 while (m-- > 0)
114 if (relmaps[m] == l)
116 struct link_map *here = maps[k];
118 /* Move it now. */
119 memmove (&maps[j] + 1,
120 &maps[j],
121 (k - j) * sizeof (struct link_map *));
122 maps[j] = here;
124 break;
131 /* We do not rely on the linked list of loaded object anymore from
132 this point on. We have our own list here (maps). The various
133 members of this list cannot vanish since the open count is too
134 high and will be decremented in this loop. So we release the
135 lock so that some code which might be called from a destructor
136 can directly or indirectly access the lock. */
137 __rtld_lock_unlock_recursive (GL(dl_load_lock));
139 /* 'maps' now contains the objects in the right order. Now call the
140 destructors. We have to process this array from the front. */
141 for (i = 0; i < nloaded; ++i)
143 l = maps[i];
145 if (l->l_init_called)
147 /* Make sure nothing happens if we are called twice. */
148 l->l_init_called = 0;
150 /* Don't call the destructors for objects we are not supposed to. */
151 if (l->l_name[0] == '\0' && l->l_type == lt_executable)
152 continue;
154 /* Is there a destructor function? */
155 if (l->l_info[DT_FINI_ARRAY] == NULL && l->l_info[DT_FINI] == NULL)
156 continue;
158 /* When debugging print a message first. */
159 if (__builtin_expect (GLRO(dl_debug_mask) & DL_DEBUG_IMPCALLS, 0))
160 _dl_debug_printf ("\ncalling fini: %s\n\n",
161 l->l_name[0] ? l->l_name : rtld_progname);
163 /* First see whether an array is given. */
164 if (l->l_info[DT_FINI_ARRAY] != NULL)
166 ElfW(Addr) *array =
167 (ElfW(Addr) *) (l->l_addr
168 + l->l_info[DT_FINI_ARRAY]->d_un.d_ptr);
169 unsigned int i = (l->l_info[DT_FINI_ARRAYSZ]->d_un.d_val
170 / sizeof (ElfW(Addr)));
171 while (i-- > 0)
172 ((fini_t) array[i]) ();
175 /* Next try the old-style destructor. */
176 if (l->l_info[DT_FINI] != NULL)
177 ((fini_t) DL_DT_FINI_ADDRESS (l, l->l_addr + l->l_info[DT_FINI]->d_un.d_ptr)) ();
180 /* Correct the previous increment. */
181 --l->l_opencount;
184 if (__builtin_expect (GLRO(dl_debug_mask) & DL_DEBUG_STATISTICS, 0))
185 _dl_debug_printf ("\nruntime linker statistics:\n"
186 " final number of relocations: %lu\n"
187 "final number of relocations from cache: %lu\n",
188 GL(dl_num_relocations),
189 GL(dl_num_cache_relocations));