1 /* Call the termination functions of loaded shared objects.
2 Copyright (C) 1995,96,1998-2002,2004 Free Software Foundation, Inc.
3 This file is part of the GNU C Library.
5 The GNU C Library is free software; you can redistribute it and/or
6 modify it under the terms of the GNU Lesser General Public
7 License as published by the Free Software Foundation; either
8 version 2.1 of the License, or (at your option) any later version.
10 The GNU C Library is distributed in the hope that it will be useful,
11 but WITHOUT ANY WARRANTY; without even the implied warranty of
12 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 Lesser General Public License for more details.
15 You should have received a copy of the GNU Lesser General Public
16 License along with the GNU C Library; if not, write to the Free
17 Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
26 /* Type of the constructor functions. */
27 typedef void (*fini_t
) (void);
34 /* Lots of fun ahead. We have to call the destructors for all still
35 loaded objects, in all namespaces. The problem is that the ELF
36 specification now demands that dependencies between the modules
37 are taken into account. I.e., the destructor for a module is
38 called before the ones for any of its dependencies.
40 To make things more complicated, we cannot simply use the reverse
41 order of the constructors. Since the user might have loaded objects
42 using `dlopen' there are possibly several other modules with its
43 dependencies to be taken into account. Therefore we have to start
44 determining the order of the modules once again from the beginning. */
48 struct link_map
**maps
= NULL
;
51 /* We First run the destructors of the main namespaces, then the
52 other ones. The order should not matter since the namespace
53 content is supposed to be independent. But we can have auditing
54 code in a auxiliaty namespace and we want it to monitor the
56 for (Lmid_t cnt
= 0; cnt
< DL_NNS
; ++cnt
)
58 /* Protect against concurrent loads and unloads. */
59 __rtld_lock_lock_recursive (GL(dl_load_lock
));
61 nloaded
= GL(dl_ns
)[cnt
]._ns_nloaded
;
63 /* XXX Could it be (in static binaries) that there is no object
65 assert (cnt
!= LM_ID_BASE
|| nloaded
> 0);
67 /* Now we can allocate an array to hold all the pointers and copy
69 if (maps_size
< nloaded
* sizeof (struct link_map
*))
73 maps_size
= nloaded
* sizeof (struct link_map
*);
74 maps
= (struct link_map
**) alloca (maps_size
);
77 maps
= (struct link_map
**)
78 extend_alloca (maps
, maps_size
,
79 nloaded
* sizeof (struct link_map
*));
82 for (l
= GL(dl_ns
)[cnt
]._ns_loaded
, i
= 0; l
!= NULL
; l
= l
->l_next
)
86 /* Do not handle ld.so in secondary namespaces. */
91 /* Bump l_opencount of all objects so that they are not
92 dlclose()ed from underneath us. */
96 assert (cnt
!= LM_ID_BASE
|| i
== nloaded
);
97 assert (cnt
== LM_ID_BASE
|| i
== nloaded
|| i
== nloaded
- 1);
98 unsigned int nmaps
= i
;
102 /* Now we have to do the sorting. */
103 l
= GL(dl_ns
)[cnt
]._ns_loaded
;
104 if (cnt
== LM_ID_BASE
)
105 /* The main executable always comes first. */
107 for (; l
!= NULL
; l
= l
->l_next
)
112 /* Find the place in the 'maps' array. */
113 for (j
= 1; maps
[j
] != l
; ++j
)
116 /* Find all object for which the current one is a dependency and
117 move the found object (if necessary) in front. */
118 for (k
= j
+ 1; k
< nmaps
; ++k
)
120 struct link_map
**runp
= maps
[k
]->l_initfini
;
123 while (*runp
!= NULL
)
126 struct link_map
*here
= maps
[k
];
129 memmove (&maps
[j
] + 1,
131 (k
- j
) * sizeof (struct link_map
*));
140 if (__builtin_expect (maps
[k
]->l_reldeps
!= NULL
, 0))
142 unsigned int m
= maps
[k
]->l_reldepsact
;
143 struct link_map
**relmaps
= maps
[k
]->l_reldeps
;
149 struct link_map
*here
= maps
[k
];
152 memmove (&maps
[j
] + 1,
154 (k
- j
) * sizeof (struct link_map
*));
165 /* We do not rely on the linked list of loaded object anymore from
166 this point on. We have our own list here (maps). The various
167 members of this list cannot vanish since the open count is too
168 high and will be decremented in this loop. So we release the
169 lock so that some code which might be called from a destructor
170 can directly or indirectly access the lock. */
171 __rtld_lock_unlock_recursive (GL(dl_load_lock
));
173 /* 'maps' now contains the objects in the right order. Now call the
174 destructors. We have to process this array from the front. */
175 for (i
= 0; i
< nmaps
; ++i
)
179 if (l
->l_init_called
)
181 /* Make sure nothing happens if we are called twice. */
182 l
->l_init_called
= 0;
184 /* Don't call the destructors for objects we are not
186 if (l
->l_name
[0] == '\0' && l
->l_type
== lt_executable
)
189 /* Is there a destructor function? */
190 if (l
->l_info
[DT_FINI_ARRAY
] == NULL
191 && l
->l_info
[DT_FINI
] == NULL
)
194 /* When debugging print a message first. */
195 if (__builtin_expect (GLRO(dl_debug_mask
) & DL_DEBUG_IMPCALLS
,
197 _dl_debug_printf ("\ncalling fini: %s [%lu]\n\n",
198 l
->l_name
[0] ? l
->l_name
: rtld_progname
,
201 /* First see whether an array is given. */
202 if (l
->l_info
[DT_FINI_ARRAY
] != NULL
)
205 (ElfW(Addr
) *) (l
->l_addr
206 + l
->l_info
[DT_FINI_ARRAY
]->d_un
.d_ptr
);
207 unsigned int i
= (l
->l_info
[DT_FINI_ARRAYSZ
]->d_un
.d_val
208 / sizeof (ElfW(Addr
)));
210 ((fini_t
) array
[i
]) ();
213 /* Next try the old-style destructor. */
214 if (l
->l_info
[DT_FINI
] != NULL
)
215 ((fini_t
) DL_DT_FINI_ADDRESS (l
, l
->l_addr
+ l
->l_info
[DT_FINI
]->d_un
.d_ptr
)) ();
218 /* Correct the previous increment. */
223 if (__builtin_expect (GLRO(dl_debug_mask
) & DL_DEBUG_STATISTICS
, 0))
224 _dl_debug_printf ("\nruntime linker statistics:\n"
225 " final number of relocations: %lu\n"
226 "final number of relocations from cache: %lu\n",
227 GL(dl_num_relocations
),
228 GL(dl_num_cache_relocations
));