1 /* Call the termination functions of loaded shared objects.
2 Copyright (C) 1995-2017 Free Software Foundation, Inc.
3 This file is part of the GNU C Library.
5 The GNU C Library is free software; you can redistribute it and/or
6 modify it under the terms of the GNU Lesser General Public
7 License as published by the Free Software Foundation; either
8 version 2.1 of the License, or (at your option) any later version.
10 The GNU C Library is distributed in the hope that it will be useful,
11 but WITHOUT ANY WARRANTY; without even the implied warranty of
12 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 Lesser General Public License for more details.
15 You should have received a copy of the GNU Lesser General Public
16 License along with the GNU C Library; if not, see
17 <http://www.gnu.org/licenses/>. */
24 /* Type of the constructor functions. */
25 typedef void (*fini_t
) (void);
31 /* Lots of fun ahead. We have to call the destructors for all still
32 loaded objects, in all namespaces. The problem is that the ELF
33 specification now demands that dependencies between the modules
34 are taken into account. I.e., the destructor for a module is
35 called before the ones for any of its dependencies.
37 To make things more complicated, we cannot simply use the reverse
38 order of the constructors. Since the user might have loaded objects
39 using `dlopen' there are possibly several other modules with its
40 dependencies to be taken into account. Therefore we have to start
41 determining the order of the modules once again from the beginning. */
43 /* We run the destructors of the main namespaces last. As for the
44 other namespaces, we pick run the destructors in them in reverse
45 order of the namespace ID. */
50 for (Lmid_t ns
= GL(dl_nns
) - 1; ns
>= 0; --ns
)
52 /* Protect against concurrent loads and unloads. */
53 __rtld_lock_lock_recursive (GL(dl_load_lock
));
55 unsigned int nloaded
= GL(dl_ns
)[ns
]._ns_nloaded
;
56 /* No need to do anything for empty namespaces or those used for
60 || GL(dl_ns
)[ns
]._ns_loaded
->l_auditing
!= do_audit
63 __rtld_lock_unlock_recursive (GL(dl_load_lock
));
66 /* Now we can allocate an array to hold all the pointers and
67 copy the pointers in. */
68 struct link_map
*maps
[nloaded
];
72 assert (nloaded
!= 0 || GL(dl_ns
)[ns
]._ns_loaded
== NULL
);
73 for (l
= GL(dl_ns
)[ns
]._ns_loaded
, i
= 0; l
!= NULL
; l
= l
->l_next
)
74 /* Do not handle ld.so in secondary namespaces. */
83 /* Bump l_direct_opencount of all objects so that they
84 are not dlclose()ed from underneath us. */
85 ++l
->l_direct_opencount
;
87 assert (ns
!= LM_ID_BASE
|| i
== nloaded
);
88 assert (ns
== LM_ID_BASE
|| i
== nloaded
|| i
== nloaded
- 1);
89 unsigned int nmaps
= i
;
91 /* Now we have to do the sorting. We can skip looking for the
92 binary itself which is at the front of the search list for
93 the main namespace. */
94 _dl_sort_maps (maps
+ (ns
== LM_ID_BASE
), nmaps
- (ns
== LM_ID_BASE
),
97 /* We do not rely on the linked list of loaded object anymore
98 from this point on. We have our own list here (maps). The
99 various members of this list cannot vanish since the open
100 count is too high and will be decremented in this loop. So
101 we release the lock so that some code which might be called
102 from a destructor can directly or indirectly access the
104 __rtld_lock_unlock_recursive (GL(dl_load_lock
));
106 /* 'maps' now contains the objects in the right order. Now
107 call the destructors. We have to process this array from
109 for (i
= 0; i
< nmaps
; ++i
)
111 struct link_map
*l
= maps
[i
];
113 if (l
->l_init_called
)
115 /* Make sure nothing happens if we are called twice. */
116 l
->l_init_called
= 0;
118 /* Is there a destructor function? */
119 if (l
->l_info
[DT_FINI_ARRAY
] != NULL
120 || l
->l_info
[DT_FINI
] != NULL
)
122 /* When debugging print a message first. */
123 if (__builtin_expect (GLRO(dl_debug_mask
)
124 & DL_DEBUG_IMPCALLS
, 0))
125 _dl_debug_printf ("\ncalling fini: %s [%lu]\n\n",
126 DSO_FILENAME (l
->l_name
),
129 /* First see whether an array is given. */
130 if (l
->l_info
[DT_FINI_ARRAY
] != NULL
)
133 (ElfW(Addr
) *) (l
->l_addr
134 + l
->l_info
[DT_FINI_ARRAY
]->d_un
.d_ptr
);
135 unsigned int i
= (l
->l_info
[DT_FINI_ARRAYSZ
]->d_un
.d_val
136 / sizeof (ElfW(Addr
)));
138 ((fini_t
) array
[i
]) ();
141 /* Next try the old-style destructor. */
142 if (l
->l_info
[DT_FINI
] != NULL
)
144 (l
, l
->l_addr
+ l
->l_info
[DT_FINI
]->d_un
.d_ptr
);
148 /* Auditing checkpoint: another object closed. */
149 if (!do_audit
&& __builtin_expect (GLRO(dl_naudit
) > 0, 0))
151 struct audit_ifaces
*afct
= GLRO(dl_audit
);
152 for (unsigned int cnt
= 0; cnt
< GLRO(dl_naudit
); ++cnt
)
154 if (afct
->objclose
!= NULL
)
155 /* Return value is ignored. */
156 (void) afct
->objclose (&l
->l_audit
[cnt
].cookie
);
164 /* Correct the previous increment. */
165 --l
->l_direct_opencount
;
171 if (! do_audit
&& GLRO(dl_naudit
) > 0)
177 if (__glibc_unlikely (GLRO(dl_debug_mask
) & DL_DEBUG_STATISTICS
))
178 _dl_debug_printf ("\nruntime linker statistics:\n"
179 " final number of relocations: %lu\n"
180 "final number of relocations from cache: %lu\n",
181 GL(dl_num_relocations
),
182 GL(dl_num_cache_relocations
));