1 /* Call the termination functions of loaded shared objects.
2 Copyright (C) 1995, 1996, 1998-2002, 2004-2005, 2009, 2011-2012
3 Free Software Foundation, Inc.
4 This file is part of the GNU C Library.
6 The GNU C Library is free software; you can redistribute it and/or
7 modify it under the terms of the GNU Lesser General Public
8 License as published by the Free Software Foundation; either
9 version 2.1 of the License, or (at your option) any later version.
11 The GNU C Library is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 Lesser General Public License for more details.
16 You should have received a copy of the GNU Lesser General Public
17 License along with the GNU C Library; if not, see
18 <http://www.gnu.org/licenses/>. */
26 /* Type of the constructor functions. */
27 typedef void (*fini_t
) (void);
32 _dl_sort_fini (struct link_map
**maps
, size_t nmaps
, char *used
, Lmid_t ns
)
34 /* A list of one element need not be sorted. */
38 /* We can skip looking for the binary itself which is at the front
39 of the search list for the main namespace. */
40 unsigned int i
= ns
== LM_ID_BASE
;
42 memset (seen
, 0, nmaps
* sizeof (seen
[0]));
45 /* Keep track of which object we looked at this round. */
47 struct link_map
*thisp
= maps
[i
];
49 /* Do not handle ld.so in secondary namespaces and object which
51 if (thisp
!= thisp
->l_real
|| thisp
->l_idx
== -1)
54 /* Find the last object in the list for which the current one is
55 a dependency and move the current object behind the object
56 with the dependency. */
57 unsigned int k
= nmaps
- 1;
60 struct link_map
**runp
= maps
[k
]->l_initfini
;
62 /* Look through the dependencies of the object. */
64 if (__builtin_expect (*runp
++ == thisp
, 0))
67 /* Move the current object to the back past the last
68 object with it as the dependency. */
69 memmove (&maps
[i
], &maps
[i
+ 1],
70 (k
- i
) * sizeof (maps
[0]));
75 char here_used
= used
[i
];
76 memmove (&used
[i
], &used
[i
+ 1],
77 (k
- i
) * sizeof (used
[0]));
81 if (seen
[i
+ 1] > nmaps
- i
)
87 uint16_t this_seen
= seen
[i
];
88 memmove (&seen
[i
], &seen
[i
+ 1], (k
- i
) * sizeof (seen
[0]));
94 if (__builtin_expect (maps
[k
]->l_reldeps
!= NULL
, 0))
96 unsigned int m
= maps
[k
]->l_reldeps
->act
;
97 struct link_map
**relmaps
= &maps
[k
]->l_reldeps
->list
[0];
99 /* Look through the relocation dependencies of the object. */
101 if (__builtin_expect (relmaps
[m
] == thisp
, 0))
103 /* If a cycle exists with a link time dependency,
104 preserve the latter. */
105 struct link_map
**runp
= thisp
->l_initfini
;
107 while (*runp
!= NULL
)
108 if (__builtin_expect (*runp
++ == maps
[k
], 0))
122 memset (&seen
[i
], 0, (nmaps
- i
) * sizeof (seen
[0]));
133 /* Lots of fun ahead. We have to call the destructors for all still
134 loaded objects, in all namespaces. The problem is that the ELF
135 specification now demands that dependencies between the modules
136 are taken into account. I.e., the destructor for a module is
137 called before the ones for any of its dependencies.
139 To make things more complicated, we cannot simply use the reverse
140 order of the constructors. Since the user might have loaded objects
141 using `dlopen' there are possibly several other modules with its
142 dependencies to be taken into account. Therefore we have to start
143 determining the order of the modules once again from the beginning. */
144 struct link_map
**maps
= NULL
;
145 size_t maps_size
= 0;
147 /* We run the destructors of the main namespaces last. As for the
148 other namespaces, we pick run the destructors in them in reverse
149 order of the namespace ID. */
154 for (Lmid_t ns
= GL(dl_nns
) - 1; ns
>= 0; --ns
)
156 /* Protect against concurrent loads and unloads. */
157 __rtld_lock_lock_recursive (GL(dl_load_lock
));
159 unsigned int nmaps
= 0;
160 unsigned int nloaded
= GL(dl_ns
)[ns
]._ns_nloaded
;
161 /* No need to do anything for empty namespaces or those used for
165 || GL(dl_ns
)[ns
]._ns_loaded
->l_auditing
!= do_audit
170 /* XXX Could it be (in static binaries) that there is no object
172 assert (ns
!= LM_ID_BASE
|| nloaded
> 0);
174 /* Now we can allocate an array to hold all the pointers and copy
176 if (maps_size
< nloaded
* sizeof (struct link_map
*))
180 maps_size
= nloaded
* sizeof (struct link_map
*);
181 maps
= (struct link_map
**) alloca (maps_size
);
184 maps
= (struct link_map
**)
185 extend_alloca (maps
, maps_size
,
186 nloaded
* sizeof (struct link_map
*));
191 assert (nloaded
!= 0 || GL(dl_ns
)[ns
]._ns_loaded
== NULL
);
192 for (l
= GL(dl_ns
)[ns
]._ns_loaded
, i
= 0; l
!= NULL
; l
= l
->l_next
)
193 /* Do not handle ld.so in secondary namespaces. */
196 assert (i
< nloaded
);
202 /* Bump l_direct_opencount of all objects so that they are
203 not dlclose()ed from underneath us. */
204 ++l
->l_direct_opencount
;
206 assert (ns
!= LM_ID_BASE
|| i
== nloaded
);
207 assert (ns
== LM_ID_BASE
|| i
== nloaded
|| i
== nloaded
- 1);
210 /* Now we have to do the sorting. */
211 _dl_sort_fini (maps
, nmaps
, NULL
, ns
);
213 /* We do not rely on the linked list of loaded object anymore from
214 this point on. We have our own list here (maps). The various
215 members of this list cannot vanish since the open count is too
216 high and will be decremented in this loop. So we release the
217 lock so that some code which might be called from a destructor
218 can directly or indirectly access the lock. */
220 __rtld_lock_unlock_recursive (GL(dl_load_lock
));
222 /* 'maps' now contains the objects in the right order. Now call the
223 destructors. We have to process this array from the front. */
224 for (i
= 0; i
< nmaps
; ++i
)
228 if (l
->l_init_called
)
230 /* Make sure nothing happens if we are called twice. */
231 l
->l_init_called
= 0;
233 /* Is there a destructor function? */
234 if (l
->l_info
[DT_FINI_ARRAY
] != NULL
235 || l
->l_info
[DT_FINI
] != NULL
)
237 /* When debugging print a message first. */
238 if (__builtin_expect (GLRO(dl_debug_mask
)
239 & DL_DEBUG_IMPCALLS
, 0))
240 _dl_debug_printf ("\ncalling fini: %s [%lu]\n\n",
241 l
->l_name
[0] ? l
->l_name
: rtld_progname
,
244 /* First see whether an array is given. */
245 if (l
->l_info
[DT_FINI_ARRAY
] != NULL
)
248 (ElfW(Addr
) *) (l
->l_addr
249 + l
->l_info
[DT_FINI_ARRAY
]->d_un
.d_ptr
);
250 unsigned int i
= (l
->l_info
[DT_FINI_ARRAYSZ
]->d_un
.d_val
251 / sizeof (ElfW(Addr
)));
253 ((fini_t
) array
[i
]) ();
256 /* Next try the old-style destructor. */
257 if (l
->l_info
[DT_FINI
] != NULL
)
258 ((fini_t
) DL_DT_FINI_ADDRESS (l
, l
->l_addr
+ l
->l_info
[DT_FINI
]->d_un
.d_ptr
)) ();
262 /* Auditing checkpoint: another object closed. */
263 if (!do_audit
&& __builtin_expect (GLRO(dl_naudit
) > 0, 0))
265 struct audit_ifaces
*afct
= GLRO(dl_audit
);
266 for (unsigned int cnt
= 0; cnt
< GLRO(dl_naudit
); ++cnt
)
268 if (afct
->objclose
!= NULL
)
269 /* Return value is ignored. */
270 (void) afct
->objclose (&l
->l_audit
[cnt
].cookie
);
278 /* Correct the previous increment. */
279 --l
->l_direct_opencount
;
284 if (! do_audit
&& GLRO(dl_naudit
) > 0)
290 if (__builtin_expect (GLRO(dl_debug_mask
) & DL_DEBUG_STATISTICS
, 0))
291 _dl_debug_printf ("\nruntime linker statistics:\n"
292 " final number of relocations: %lu\n"
293 "final number of relocations from cache: %lu\n",
294 GL(dl_num_relocations
),
295 GL(dl_num_cache_relocations
));