1 /* Call the termination functions of loaded shared objects.
2 Copyright (C) 1995,96,1998-2002,2004-2005,2009,2011
3 Free Software Foundation, Inc.
4 This file is part of the GNU C Library.
6 The GNU C Library is free software; you can redistribute it and/or
7 modify it under the terms of the GNU Lesser General Public
8 License as published by the Free Software Foundation; either
9 version 2.1 of the License, or (at your option) any later version.
11 The GNU C Library is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 Lesser General Public License for more details.
16 You should have received a copy of the GNU Lesser General Public
17 License along with the GNU C Library; if not, write to the Free
18 Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
27 /* Type of the constructor functions. */
28 typedef void (*fini_t
) (void);
33 _dl_sort_fini (struct link_map
**maps
, size_t nmaps
, char *used
, Lmid_t ns
)
35 /* A list of one element need not be sorted. */
39 /* We can skip looking for the binary itself which is at the front
40 of the search list for the main namespace. */
41 unsigned int i
= ns
== LM_ID_BASE
;
43 memset (seen
, 0, nmaps
* sizeof (seen
[0]));
46 /* Keep track of which object we looked at this round. */
48 struct link_map
*thisp
= maps
[i
];
50 /* Do not handle ld.so in secondary namespaces and object which
52 if (thisp
!= thisp
->l_real
|| thisp
->l_idx
== -1)
55 /* Find the last object in the list for which the current one is
56 a dependency and move the current object behind the object
57 with the dependency. */
58 unsigned int k
= nmaps
- 1;
61 struct link_map
**runp
= maps
[k
]->l_initfini
;
63 /* Look through the dependencies of the object. */
65 if (__builtin_expect (*runp
++ == thisp
, 0))
68 /* Move the current object to the back past the last
69 object with it as the dependency. */
70 memmove (&maps
[i
], &maps
[i
+ 1],
71 (k
- i
) * sizeof (maps
[0]));
76 char here_used
= used
[i
];
77 memmove (&used
[i
], &used
[i
+ 1],
78 (k
- i
) * sizeof (used
[0]));
88 char this_seen
= seen
[i
];
89 memmove (&seen
[i
], &seen
[i
+ 1], (k
- i
) * sizeof (seen
[0]));
95 if (__builtin_expect (maps
[k
]->l_reldeps
!= NULL
, 0))
97 unsigned int m
= maps
[k
]->l_reldeps
->act
;
98 struct link_map
**relmaps
= &maps
[k
]->l_reldeps
->list
[0];
100 /* Look through the relocation dependencies of the object. */
102 if (__builtin_expect (relmaps
[m
] == thisp
, 0))
104 /* If a cycle exists with a link time dependency,
105 preserve the latter. */
106 struct link_map
**runp
= thisp
->l_initfini
;
108 while (*runp
!= NULL
)
109 if (__builtin_expect (*runp
++ == maps
[k
], 0))
123 memset (&seen
[i
], 0, (nmaps
- i
) * sizeof (seen
[0]));
134 /* Lots of fun ahead. We have to call the destructors for all still
135 loaded objects, in all namespaces. The problem is that the ELF
136 specification now demands that dependencies between the modules
137 are taken into account. I.e., the destructor for a module is
138 called before the ones for any of its dependencies.
140 To make things more complicated, we cannot simply use the reverse
141 order of the constructors. Since the user might have loaded objects
142 using `dlopen' there are possibly several other modules with its
143 dependencies to be taken into account. Therefore we have to start
144 determining the order of the modules once again from the beginning. */
145 struct link_map
**maps
= NULL
;
146 size_t maps_size
= 0;
148 /* We run the destructors of the main namespaces last. As for the
149 other namespaces, we pick run the destructors in them in reverse
150 order of the namespace ID. */
155 for (Lmid_t ns
= GL(dl_nns
) - 1; ns
>= 0; --ns
)
157 /* Protect against concurrent loads and unloads. */
158 __rtld_lock_lock_recursive (GL(dl_load_lock
));
160 unsigned int nmaps
= 0;
161 unsigned int nloaded
= GL(dl_ns
)[ns
]._ns_nloaded
;
162 /* No need to do anything for empty namespaces or those used for
166 || GL(dl_ns
)[ns
]._ns_loaded
->l_auditing
!= do_audit
171 /* XXX Could it be (in static binaries) that there is no object
173 assert (ns
!= LM_ID_BASE
|| nloaded
> 0);
175 /* Now we can allocate an array to hold all the pointers and copy
177 if (maps_size
< nloaded
* sizeof (struct link_map
*))
181 maps_size
= nloaded
* sizeof (struct link_map
*);
182 maps
= (struct link_map
**) alloca (maps_size
);
185 maps
= (struct link_map
**)
186 extend_alloca (maps
, maps_size
,
187 nloaded
* sizeof (struct link_map
*));
192 assert (nloaded
!= 0 || GL(dl_ns
)[ns
]._ns_loaded
== NULL
);
193 for (l
= GL(dl_ns
)[ns
]._ns_loaded
, i
= 0; l
!= NULL
; l
= l
->l_next
)
194 /* Do not handle ld.so in secondary namespaces. */
197 assert (i
< nloaded
);
203 /* Bump l_direct_opencount of all objects so that they are
204 not dlclose()ed from underneath us. */
205 ++l
->l_direct_opencount
;
207 assert (ns
!= LM_ID_BASE
|| i
== nloaded
);
208 assert (ns
== LM_ID_BASE
|| i
== nloaded
|| i
== nloaded
- 1);
211 /* Now we have to do the sorting. */
212 _dl_sort_fini (maps
, nmaps
, NULL
, ns
);
214 /* We do not rely on the linked list of loaded object anymore from
215 this point on. We have our own list here (maps). The various
216 members of this list cannot vanish since the open count is too
217 high and will be decremented in this loop. So we release the
218 lock so that some code which might be called from a destructor
219 can directly or indirectly access the lock. */
221 __rtld_lock_unlock_recursive (GL(dl_load_lock
));
223 /* 'maps' now contains the objects in the right order. Now call the
224 destructors. We have to process this array from the front. */
225 for (i
= 0; i
< nmaps
; ++i
)
229 if (l
->l_init_called
)
231 /* Make sure nothing happens if we are called twice. */
232 l
->l_init_called
= 0;
234 /* Is there a destructor function? */
235 if (l
->l_info
[DT_FINI_ARRAY
] != NULL
236 || l
->l_info
[DT_FINI
] != NULL
)
238 /* When debugging print a message first. */
239 if (__builtin_expect (GLRO(dl_debug_mask
)
240 & DL_DEBUG_IMPCALLS
, 0))
241 _dl_debug_printf ("\ncalling fini: %s [%lu]\n\n",
242 l
->l_name
[0] ? l
->l_name
: rtld_progname
,
245 /* First see whether an array is given. */
246 if (l
->l_info
[DT_FINI_ARRAY
] != NULL
)
249 (ElfW(Addr
) *) (l
->l_addr
250 + l
->l_info
[DT_FINI_ARRAY
]->d_un
.d_ptr
);
251 unsigned int i
= (l
->l_info
[DT_FINI_ARRAYSZ
]->d_un
.d_val
252 / sizeof (ElfW(Addr
)));
254 ((fini_t
) array
[i
]) ();
257 /* Next try the old-style destructor. */
258 if (l
->l_info
[DT_FINI
] != NULL
)
259 ((fini_t
) DL_DT_FINI_ADDRESS (l
, l
->l_addr
+ l
->l_info
[DT_FINI
]->d_un
.d_ptr
)) ();
263 /* Auditing checkpoint: another object closed. */
264 if (!do_audit
&& __builtin_expect (GLRO(dl_naudit
) > 0, 0))
266 struct audit_ifaces
*afct
= GLRO(dl_audit
);
267 for (unsigned int cnt
= 0; cnt
< GLRO(dl_naudit
); ++cnt
)
269 if (afct
->objclose
!= NULL
)
270 /* Return value is ignored. */
271 (void) afct
->objclose (&l
->l_audit
[cnt
].cookie
);
279 /* Correct the previous increment. */
280 --l
->l_direct_opencount
;
285 if (! do_audit
&& GLRO(dl_naudit
) > 0)
291 if (__builtin_expect (GLRO(dl_debug_mask
) & DL_DEBUG_STATISTICS
, 0))
292 _dl_debug_printf ("\nruntime linker statistics:\n"
293 " final number of relocations: %lu\n"
294 "final number of relocations from cache: %lu\n",
295 GL(dl_num_relocations
),
296 GL(dl_num_cache_relocations
));