1 /* Call the termination functions of loaded shared objects.
2 Copyright (C) 1995-2021 Free Software Foundation, Inc.
3 This file is part of the GNU C Library.
5 The GNU C Library is free software; you can redistribute it and/or
6 modify it under the terms of the GNU Lesser General Public
7 License as published by the Free Software Foundation; either
8 version 2.1 of the License, or (at your option) any later version.
10 The GNU C Library is distributed in the hope that it will be useful,
11 but WITHOUT ANY WARRANTY; without even the implied warranty of
12 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 Lesser General Public License for more details.
15 You should have received a copy of the GNU Lesser General Public
16 License along with the GNU C Library; if not, see
17 <https://www.gnu.org/licenses/>. */
22 #include <elf-initfini.h>
25 /* Type of the constructor functions. */
26 typedef void (*fini_t
) (void);
32 /* Lots of fun ahead. We have to call the destructors for all still
33 loaded objects, in all namespaces. The problem is that the ELF
34 specification now demands that dependencies between the modules
35 are taken into account. I.e., the destructor for a module is
36 called before the ones for any of its dependencies.
38 To make things more complicated, we cannot simply use the reverse
39 order of the constructors. Since the user might have loaded objects
40 using `dlopen' there are possibly several other modules with its
41 dependencies to be taken into account. Therefore we have to start
42 determining the order of the modules once again from the beginning. */
44 /* We run the destructors of the main namespaces last. As for the
45 other namespaces, we pick run the destructors in them in reverse
46 order of the namespace ID. */
51 for (Lmid_t ns
= GL(dl_nns
) - 1; ns
>= 0; --ns
)
53 /* Protect against concurrent loads and unloads. */
54 __rtld_lock_lock_recursive (GL(dl_load_lock
));
56 unsigned int nloaded
= GL(dl_ns
)[ns
]._ns_nloaded
;
57 /* No need to do anything for empty namespaces or those used for
61 || GL(dl_ns
)[ns
]._ns_loaded
->l_auditing
!= do_audit
64 __rtld_lock_unlock_recursive (GL(dl_load_lock
));
67 /* Now we can allocate an array to hold all the pointers and
68 copy the pointers in. */
69 struct link_map
*maps
[nloaded
];
73 assert (nloaded
!= 0 || GL(dl_ns
)[ns
]._ns_loaded
== NULL
);
74 for (l
= GL(dl_ns
)[ns
]._ns_loaded
, i
= 0; l
!= NULL
; l
= l
->l_next
)
75 /* Do not handle ld.so in secondary namespaces. */
84 /* Bump l_direct_opencount of all objects so that they
85 are not dlclose()ed from underneath us. */
86 ++l
->l_direct_opencount
;
88 assert (ns
!= LM_ID_BASE
|| i
== nloaded
);
89 assert (ns
== LM_ID_BASE
|| i
== nloaded
|| i
== nloaded
- 1);
90 unsigned int nmaps
= i
;
92 /* Now we have to do the sorting. We can skip looking for the
93 binary itself which is at the front of the search list for
94 the main namespace. */
95 _dl_sort_maps (maps
+ (ns
== LM_ID_BASE
), nmaps
- (ns
== LM_ID_BASE
),
98 /* We do not rely on the linked list of loaded object anymore
99 from this point on. We have our own list here (maps). The
100 various members of this list cannot vanish since the open
101 count is too high and will be decremented in this loop. So
102 we release the lock so that some code which might be called
103 from a destructor can directly or indirectly access the
105 __rtld_lock_unlock_recursive (GL(dl_load_lock
));
107 /* 'maps' now contains the objects in the right order. Now
108 call the destructors. We have to process this array from
110 for (i
= 0; i
< nmaps
; ++i
)
112 struct link_map
*l
= maps
[i
];
114 if (l
->l_init_called
)
116 /* Make sure nothing happens if we are called twice. */
117 l
->l_init_called
= 0;
119 /* Is there a destructor function? */
120 if (l
->l_info
[DT_FINI_ARRAY
] != NULL
121 || (ELF_INITFINI
&& l
->l_info
[DT_FINI
] != NULL
))
123 /* When debugging print a message first. */
124 if (__builtin_expect (GLRO(dl_debug_mask
)
125 & DL_DEBUG_IMPCALLS
, 0))
126 _dl_debug_printf ("\ncalling fini: %s [%lu]\n\n",
127 DSO_FILENAME (l
->l_name
),
130 /* First see whether an array is given. */
131 if (l
->l_info
[DT_FINI_ARRAY
] != NULL
)
134 (ElfW(Addr
) *) (l
->l_addr
135 + l
->l_info
[DT_FINI_ARRAY
]->d_un
.d_ptr
);
136 unsigned int i
= (l
->l_info
[DT_FINI_ARRAYSZ
]->d_un
.d_val
137 / sizeof (ElfW(Addr
)));
139 ((fini_t
) array
[i
]) ();
142 /* Next try the old-style destructor. */
143 if (ELF_INITFINI
&& l
->l_info
[DT_FINI
] != NULL
)
145 (l
, l
->l_addr
+ l
->l_info
[DT_FINI
]->d_un
.d_ptr
);
149 /* Auditing checkpoint: another object closed. */
150 if (!do_audit
&& __builtin_expect (GLRO(dl_naudit
) > 0, 0))
152 struct audit_ifaces
*afct
= GLRO(dl_audit
);
153 for (unsigned int cnt
= 0; cnt
< GLRO(dl_naudit
); ++cnt
)
155 if (afct
->objclose
!= NULL
)
157 struct auditstate
*state
158 = link_map_audit_state (l
, cnt
);
159 /* Return value is ignored. */
160 (void) afct
->objclose (&state
->cookie
);
168 /* Correct the previous increment. */
169 --l
->l_direct_opencount
;
175 if (! do_audit
&& GLRO(dl_naudit
) > 0)
181 if (__glibc_unlikely (GLRO(dl_debug_mask
) & DL_DEBUG_STATISTICS
))
182 _dl_debug_printf ("\nruntime linker statistics:\n"
183 " final number of relocations: %lu\n"
184 "final number of relocations from cache: %lu\n",
185 GL(dl_num_relocations
),
186 GL(dl_num_cache_relocations
));