elf: Make glibc.rtld.enable_secure ignore alias environment variables
[glibc.git] / elf / dl-find_object.c
blob449302eda35ce96fec45f6e871baf8ca8b74c12c
1 /* Locating objects in the process image. ld.so implementation.
2 Copyright (C) 2021-2024 Free Software Foundation, Inc.
3 This file is part of the GNU C Library.
5 The GNU C Library is free software; you can redistribute it and/or
6 modify it under the terms of the GNU Lesser General Public
7 License as published by the Free Software Foundation; either
8 version 2.1 of the License, or (at your option) any later version.
10 The GNU C Library is distributed in the hope that it will be useful,
11 but WITHOUT ANY WARRANTY; without even the implied warranty of
12 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 Lesser General Public License for more details.
15 You should have received a copy of the GNU Lesser General Public
16 License along with the GNU C Library; if not, see
17 <https://www.gnu.org/licenses/>. */
19 #include <assert.h>
20 #include <atomic.h>
21 #include <atomic_wide_counter.h>
22 #include <dl-find_object.h>
23 #include <dlfcn.h>
24 #include <ldsodefs.h>
25 #include <link.h>
26 #include <stdbool.h>
27 #include <stddef.h>
28 #include <stdint.h>
30 /* Fallback implementation of _dl_find_object. It uses a linear
31 search, needs locking, and is not async-signal-safe. It is used in
32 _dl_find_object prior to initialization, when called from audit
33 modules. It also serves as the reference implementation for
34 _dl_find_object. */
35 static int
36 _dl_find_object_slow (void *pc, struct dl_find_object *result)
38 ElfW(Addr) addr = (ElfW(Addr)) pc;
39 for (Lmid_t ns = 0; ns < GL(dl_nns); ++ns)
40 for (struct link_map *l = GL(dl_ns)[ns]._ns_loaded; l != NULL;
41 l = l->l_next)
42 if (addr >= l->l_map_start && addr < l->l_map_end
43 && (l->l_contiguous || _dl_addr_inside_object (l, addr)))
45 assert (ns == l->l_ns);
46 struct dl_find_object_internal internal;
47 _dl_find_object_from_map (l, &internal);
48 _dl_find_object_to_external (&internal, result);
49 return 0;
52 /* Object not found. */
53 return -1;
56 /* Data for the main executable. There is usually a large gap between
57 the main executable and initially loaded shared objects. Record
58 the main executable separately, to increase the chance that the
59 range for the non-closeable mappings below covers only the shared
60 objects (and not also the gap between main executable and shared
61 objects). */
62 static struct dl_find_object_internal _dlfo_main attribute_relro;
64 /* Data for initially loaded shared objects that cannot be unloaded.
65 (This may also contain non-contiguous mappings from the main
66 executable.) The mappings are stored in address order in the
67 _dlfo_nodelete_mappings array (containing
68 _dlfo_nodelete_mappings_size elements). It is not modified after
69 initialization. */
70 static uintptr_t _dlfo_nodelete_mappings_end attribute_relro;
71 static size_t _dlfo_nodelete_mappings_size attribute_relro;
72 static struct dl_find_object_internal *_dlfo_nodelete_mappings
73 attribute_relro;
75 /* Mappings created by dlopen can go away with dlclose, so a dynamic
76 data structure with some synchronization is needed. Individual
77 segments are similar to the _dlfo_nodelete_mappings array above.
78 The previous segment contains lower addresses and is at most half
79 as long. Checking the address of the base address of the first
80 element during a lookup can therefore approximate a binary search
81 over all segments, even though the data is not stored in one
82 contiguous array.
84 During updates, the segments are overwritten in place. A software
85 transactional memory construct (involving the
86 _dlfo_loaded_mappings_version variable) is used to detect
87 concurrent modification, and retry as necessary. (This approach is
88 similar to seqlocks, except that two copies are used, and there is
89 only one writer, ever, due to the loader lock.) Technically,
90 relaxed MO loads and stores need to be used for the shared TM data,
91 to avoid data races.
93 The memory allocations are never deallocated, but slots used for
94 objects that have been dlclose'd can be reused by dlopen. The
95 memory can live in the regular C malloc heap.
97 The segments are populated from the start of the list, with the
98 mappings with the highest address. Only if this segment is full,
99 previous segments are used for mappings at lower addresses. The
100 remaining segments are populated as needed, but after allocating
101 further segments, some of the initial segments (at the end of the
102 linked list) can be empty (with size 0).
104 Adding new elements to this data structure is another source of
105 quadratic behavior for dlopen. If the other causes of quadratic
106 behavior are eliminated, a more complicated data structure will be
107 needed. */
108 struct dlfo_mappings_segment
110 /* The previous segment has lower base addresses. Constant after
111 initialization; read in the TM region. */
112 struct dlfo_mappings_segment *previous;
114 /* Used by __libc_freeres to deallocate malloc'ed memory. */
115 void *to_free;
117 /* Count of array elements in use and allocated. */
118 size_t size; /* Read in the TM region. */
119 size_t allocated;
121 struct dl_find_object_internal objects[]; /* Read in the TM region. */
124 /* To achieve async-signal-safety, two copies of the data structure
125 are used, so that a signal handler can still use this data even if
126 dlopen or dlclose modify the other copy. The the least significant
127 bit in _dlfo_loaded_mappings_version determines which array element
128 is the currently active region. */
129 static struct dlfo_mappings_segment *_dlfo_loaded_mappings[2];
131 /* Returns the number of actually used elements in all segments
132 starting at SEG. */
133 static inline size_t
134 _dlfo_mappings_segment_count_used (struct dlfo_mappings_segment *seg)
136 size_t count = 0;
137 for (; seg != NULL && seg->size > 0; seg = seg->previous)
138 for (size_t i = 0; i < seg->size; ++i)
139 /* Exclude elements which have been dlclose'd. */
140 count += seg->objects[i].map != NULL;
141 return count;
144 /* Compute the total number of available allocated segments linked
145 from SEG. */
146 static inline size_t
147 _dlfo_mappings_segment_count_allocated (struct dlfo_mappings_segment *seg)
149 size_t count = 0;
150 for (; seg != NULL; seg = seg->previous)
151 count += seg->allocated;
152 return count;
155 /* This is essentially an arbitrary value. dlopen allocates plenty of
156 memory anyway, so over-allocated a bit does not hurt. Not having
157 many small-ish segments helps to avoid many small binary searches.
158 Not using a power of 2 means that we do not waste an extra page
159 just for the malloc header if a mapped allocation is used in the
160 glibc allocator. */
161 enum { dlfo_mappings_initial_segment_size = 63 };
163 /* Allocate an empty segment. This used for the first ever
164 allocation. */
165 static struct dlfo_mappings_segment *
166 _dlfo_mappings_segment_allocate_unpadded (size_t size)
168 if (size < dlfo_mappings_initial_segment_size)
169 size = dlfo_mappings_initial_segment_size;
170 /* No overflow checks here because the size is a mapping count, and
171 struct link_map is larger than what we allocate here. */
172 enum
174 element_size = sizeof ((struct dlfo_mappings_segment) {}.objects[0])
176 size_t to_allocate = (sizeof (struct dlfo_mappings_segment)
177 + size * element_size);
178 struct dlfo_mappings_segment *result = malloc (to_allocate);
179 if (result != NULL)
181 result->previous = NULL;
182 result->to_free = NULL; /* Minimal malloc memory cannot be freed. */
183 result->size = 0;
184 result->allocated = size;
186 return result;
189 /* Allocate an empty segment that is at least SIZE large. PREVIOUS
190 points to the chain of previously allocated segments and can be
191 NULL. */
192 static struct dlfo_mappings_segment *
193 _dlfo_mappings_segment_allocate (size_t size,
194 struct dlfo_mappings_segment * previous)
196 /* Exponential sizing policies, so that lookup approximates a binary
197 search. */
199 size_t minimum_growth;
200 if (previous == NULL)
201 minimum_growth = dlfo_mappings_initial_segment_size;
202 else
203 minimum_growth = 2* previous->allocated;
204 if (size < minimum_growth)
205 size = minimum_growth;
207 enum { cache_line_size_estimate = 128 };
208 /* No overflow checks here because the size is a mapping count, and
209 struct link_map is larger than what we allocate here. */
210 enum
212 element_size = sizeof ((struct dlfo_mappings_segment) {}.objects[0])
214 size_t to_allocate = (sizeof (struct dlfo_mappings_segment)
215 + size * element_size
216 + 2 * cache_line_size_estimate);
217 char *ptr = malloc (to_allocate);
218 if (ptr == NULL)
219 return NULL;
220 char *original_ptr = ptr;
221 /* Start and end at a (conservative) 128-byte cache line boundary.
222 Do not use memalign for compatibility with partially interposing
223 malloc implementations. */
224 char *end = PTR_ALIGN_DOWN (ptr + to_allocate, cache_line_size_estimate);
225 ptr = PTR_ALIGN_UP (ptr, cache_line_size_estimate);
226 struct dlfo_mappings_segment *result
227 = (struct dlfo_mappings_segment *) ptr;
228 result->previous = previous;
229 result->to_free = original_ptr;
230 result->size = 0;
231 /* We may have obtained slightly more space if malloc happened
232 to provide an over-aligned pointer. */
233 result->allocated = (((uintptr_t) (end - ptr)
234 - sizeof (struct dlfo_mappings_segment))
235 / element_size);
236 assert (result->allocated >= size);
237 return result;
240 /* Monotonic counter for software transactional memory. The lowest
241 bit indicates which element of the _dlfo_loaded_mappings contains
242 up-to-date data. */
243 static __atomic_wide_counter _dlfo_loaded_mappings_version;
245 /* TM version at the start of the read operation. */
246 static inline uint64_t
247 _dlfo_read_start_version (void)
249 /* Acquire MO load synchronizes with the fences at the beginning and
250 end of the TM update region in _dlfo_mappings_begin_update,
251 _dlfo_mappings_end_update. */
252 return __atomic_wide_counter_load_acquire (&_dlfo_loaded_mappings_version);
255 /* Optimized variant of _dlfo_read_start_version which can be called
256 when the loader is write-locked. */
257 static inline uint64_t
258 _dlfo_read_version_locked (void)
260 return __atomic_wide_counter_load_relaxed (&_dlfo_loaded_mappings_version);
263 /* Update the version to reflect that an update is happening. This
264 does not change the bit that controls the active segment chain. */
265 static inline void
266 _dlfo_mappings_begin_update (void)
268 /* The fence synchronizes with loads in _dlfo_read_start_version
269 (also called from _dlfo_read_success). */
270 atomic_thread_fence_release ();
273 /* Installs the just-updated version as the active version. */
274 static inline void
275 _dlfo_mappings_end_update (void)
277 /* The fence synchronizes with loads in _dlfo_read_start_version
278 (also called from _dlfo_read_success). */
279 atomic_thread_fence_release ();
280 /* No atomic read-modify-write update needed because of the loader
281 lock. */
282 __atomic_wide_counter_add_relaxed (&_dlfo_loaded_mappings_version, 1);
285 /* Return true if the read was successful, given the start
286 version. */
287 static inline bool
288 _dlfo_read_success (uint64_t start_version)
290 /* See Hans Boehm, Can Seqlocks Get Along with Programming Language
291 Memory Models?, Section 4. This is necessary so that loads in
292 the TM region are not ordered past the version check below. */
293 atomic_thread_fence_acquire ();
295 /* Synchronizes with the fences in _dlfo_mappings_begin_update,
296 _dlfo_mappings_end_update. It is important that all stores from
297 the last update have become visible, and stores from the next
298 update to this version are not before the version number is
299 updated.
301 Unlike with seqlocks, there is no check for odd versions here
302 because we have read the unmodified copy (confirmed to be
303 unmodified by the unchanged version). */
304 return _dlfo_read_start_version () == start_version;
307 /* Returns the active segment identified by the specified start
308 version. */
309 static struct dlfo_mappings_segment *
310 _dlfo_mappings_active_segment (uint64_t start_version)
312 return _dlfo_loaded_mappings[start_version & 1];
315 /* Searches PC among the address-sorted array [FIRST1, FIRST1 +
316 SIZE). Assumes PC >= FIRST1->map_start. Returns a pointer to the
317 element that contains PC, or NULL if there is no such element. */
318 static inline struct dl_find_object_internal *
319 _dlfo_lookup (uintptr_t pc, struct dl_find_object_internal *first1, size_t size)
321 struct dl_find_object_internal *end = first1 + size;
323 /* Search for a lower bound in first. */
324 struct dl_find_object_internal *first = first1;
325 while (size > 0)
327 size_t half = size >> 1;
328 struct dl_find_object_internal *middle = first + half;
329 if (atomic_load_relaxed (&middle->map_start) < pc)
331 first = middle + 1;
332 size -= half + 1;
334 else
335 size = half;
338 if (first != end && pc == atomic_load_relaxed (&first->map_start))
340 if (pc < atomic_load_relaxed (&first->map_end))
341 return first;
342 else
343 /* Zero-length mapping after dlclose. */
344 return NULL;
346 else
348 /* Check to see if PC is in the previous mapping. */
349 --first;
350 if (pc < atomic_load_relaxed (&first->map_end))
351 /* pc >= first->map_start implied by the search above. */
352 return first;
353 else
354 return NULL;
359 __dl_find_object (void *pc1, struct dl_find_object *result)
361 uintptr_t pc = (uintptr_t) pc1;
363 if (__glibc_unlikely (_dlfo_main.map_end == 0))
365 /* Not initialized. No locking is needed here because this can
366 only be called from audit modules, which cannot create
367 threads. */
368 return _dl_find_object_slow (pc1, result);
371 /* Main executable. */
372 if (pc >= _dlfo_main.map_start && pc < _dlfo_main.map_end)
374 _dl_find_object_to_external (&_dlfo_main, result);
375 return 0;
378 /* Other initially loaded objects. */
379 if (pc >= _dlfo_nodelete_mappings->map_start
380 && pc < _dlfo_nodelete_mappings_end)
382 struct dl_find_object_internal *obj
383 = _dlfo_lookup (pc, _dlfo_nodelete_mappings,
384 _dlfo_nodelete_mappings_size);
385 if (obj != NULL)
387 _dl_find_object_to_external (obj, result);
388 return 0;
390 /* Fall through to the full search. The kernel may have mapped
391 the initial mappings with gaps that are later filled by
392 dlopen with other mappings. */
395 /* Handle audit modules, dlopen, dlopen objects. This uses software
396 transactional memory, with a retry loop in case the version
397 changes during execution. */
398 while (true)
400 retry:
402 uint64_t start_version = _dlfo_read_start_version ();
404 /* The read through seg->previous assumes that the CPU
405 recognizes the load dependency, so that no invalid size
406 values is read. Furthermore, the code assumes that no
407 out-of-thin-air value for seg->size is observed. Together,
408 this ensures that the observed seg->size value is always less
409 than seg->allocated, so that _dlfo_mappings_index does not
410 read out-of-bounds. (This avoids intermediate TM version
411 verification. A concurrent version update will lead to
412 invalid lookup results, but not to out-of-memory access.)
414 Either seg == NULL or seg->size == 0 terminates the segment
415 list. _dl_find_object_update does not bother to clear the
416 size on earlier unused segments. */
417 for (struct dlfo_mappings_segment *seg
418 = _dlfo_mappings_active_segment (start_version);
419 seg != NULL;
420 seg = atomic_load_acquire (&seg->previous))
422 size_t seg_size = atomic_load_relaxed (&seg->size);
423 if (seg_size == 0)
424 break;
426 if (pc >= atomic_load_relaxed (&seg->objects[0].map_start))
428 /* PC may lie within this segment. If it is less than the
429 segment start address, it can only lie in a previous
430 segment, due to the base address sorting. */
431 struct dl_find_object_internal *obj
432 = _dlfo_lookup (pc, seg->objects, seg_size);
434 if (obj != NULL)
436 /* Found the right mapping. Copy out the data prior to
437 checking if the read transaction was successful. */
438 struct dl_find_object_internal copy;
439 _dl_find_object_internal_copy (obj, &copy);
440 if (_dlfo_read_success (start_version))
442 _dl_find_object_to_external (&copy, result);
443 return 0;
445 else
446 /* Read transaction failure. */
447 goto retry;
449 else
451 /* PC is not covered by this mapping. */
452 if (_dlfo_read_success (start_version))
453 return -1;
454 else
455 /* Read transaction failure. */
456 goto retry;
458 } /* if: PC might lie within the current seg. */
461 /* PC is not covered by any segment. */
462 if (_dlfo_read_success (start_version))
463 return -1;
464 } /* Transaction retry loop. */
466 hidden_def (__dl_find_object)
467 weak_alias (__dl_find_object, _dl_find_object)
469 /* _dlfo_process_initial is called twice. First to compute the array
470 sizes from the initial loaded mappings. Second to fill in the
471 bases and infos arrays with the (still unsorted) data. Returns the
472 number of loaded (non-nodelete) mappings. */
473 static size_t
474 _dlfo_process_initial (void)
476 struct link_map *main_map = GL(dl_ns)[LM_ID_BASE]._ns_loaded;
478 size_t nodelete = 0;
479 if (!main_map->l_contiguous)
481 struct dl_find_object_internal dlfo;
482 _dl_find_object_from_map (main_map, &dlfo);
484 /* PT_LOAD segments for a non-contiguous are added to the
485 non-closeable mappings. */
486 for (const ElfW(Phdr) *ph = main_map->l_phdr,
487 *ph_end = main_map->l_phdr + main_map->l_phnum;
488 ph < ph_end; ++ph)
489 if (ph->p_type == PT_LOAD)
491 if (_dlfo_nodelete_mappings != NULL)
493 /* Second pass only. */
494 _dlfo_nodelete_mappings[nodelete] = dlfo;
495 _dlfo_nodelete_mappings[nodelete].map_start
496 = ph->p_vaddr + main_map->l_addr;
497 _dlfo_nodelete_mappings[nodelete].map_end
498 = _dlfo_nodelete_mappings[nodelete].map_start + ph->p_memsz;
500 ++nodelete;
504 size_t loaded = 0;
505 for (Lmid_t ns = 0; ns < GL(dl_nns); ++ns)
506 for (struct link_map *l = GL(dl_ns)[ns]._ns_loaded; l != NULL;
507 l = l->l_next)
508 /* Skip the main map processed above, and proxy maps. */
509 if (l != main_map && l == l->l_real)
511 /* lt_library link maps are implicitly NODELETE. */
512 if (l->l_type == lt_library || l->l_nodelete_active)
514 if (_dlfo_nodelete_mappings != NULL)
515 /* Second pass only. */
516 _dl_find_object_from_map
517 (l, _dlfo_nodelete_mappings + nodelete);
518 ++nodelete;
520 else if (l->l_type == lt_loaded)
522 if (_dlfo_loaded_mappings[0] != NULL)
523 /* Second pass only. */
524 _dl_find_object_from_map
525 (l, &_dlfo_loaded_mappings[0]->objects[loaded]);
526 ++loaded;
530 _dlfo_nodelete_mappings_size = nodelete;
531 return loaded;
534 /* Selection sort based on mapping start address. */
535 void
536 _dlfo_sort_mappings (struct dl_find_object_internal *objects, size_t size)
538 if (size < 2)
539 return;
541 for (size_t i = 0; i < size - 1; ++i)
543 /* Find minimum. */
544 size_t min_idx = i;
545 uintptr_t min_val = objects[i].map_start;
546 for (size_t j = i + 1; j < size; ++j)
547 if (objects[j].map_start < min_val)
549 min_idx = j;
550 min_val = objects[j].map_start;
553 /* Swap into place. */
554 struct dl_find_object_internal tmp = objects[min_idx];
555 objects[min_idx] = objects[i];
556 objects[i] = tmp;
560 void
561 _dl_find_object_init (void)
563 /* Cover the main mapping. */
565 struct link_map *main_map = GL(dl_ns)[LM_ID_BASE]._ns_loaded;
567 if (main_map->l_contiguous)
568 _dl_find_object_from_map (main_map, &_dlfo_main);
569 else
571 /* Non-contiguous main maps are handled in
572 _dlfo_process_initial. Mark as initialized, but not
573 coverying any valid PC. */
574 _dlfo_main.map_start = -1;
575 _dlfo_main.map_end = -1;
579 /* Allocate the data structures. */
580 size_t loaded_size = _dlfo_process_initial ();
581 _dlfo_nodelete_mappings = malloc (_dlfo_nodelete_mappings_size
582 * sizeof (*_dlfo_nodelete_mappings));
583 if (loaded_size > 0)
584 _dlfo_loaded_mappings[0]
585 = _dlfo_mappings_segment_allocate_unpadded (loaded_size);
586 if (_dlfo_nodelete_mappings == NULL
587 || (loaded_size > 0 && _dlfo_loaded_mappings[0] == NULL))
588 _dl_fatal_printf ("\
589 Fatal glibc error: cannot allocate memory for find-object data\n");
590 /* Fill in the data with the second call. */
591 _dlfo_nodelete_mappings_size = 0;
592 _dlfo_process_initial ();
594 /* Sort both arrays. */
595 if (_dlfo_nodelete_mappings_size > 0)
597 _dlfo_sort_mappings (_dlfo_nodelete_mappings,
598 _dlfo_nodelete_mappings_size);
599 size_t last_idx = _dlfo_nodelete_mappings_size - 1;
600 _dlfo_nodelete_mappings_end = _dlfo_nodelete_mappings[last_idx].map_end;
602 if (loaded_size > 0)
603 _dlfo_sort_mappings (_dlfo_loaded_mappings[0]->objects,
604 _dlfo_loaded_mappings[0]->size);
607 static void
608 _dl_find_object_link_map_sort (struct link_map **loaded, size_t size)
610 /* Selection sort based on map_start. */
611 if (size < 2)
612 return;
613 for (size_t i = 0; i < size - 1; ++i)
615 /* Find minimum. */
616 size_t min_idx = i;
617 ElfW(Addr) min_val = loaded[i]->l_map_start;
618 for (size_t j = i + 1; j < size; ++j)
619 if (loaded[j]->l_map_start < min_val)
621 min_idx = j;
622 min_val = loaded[j]->l_map_start;
625 /* Swap into place. */
626 struct link_map *tmp = loaded[min_idx];
627 loaded[min_idx] = loaded[i];
628 loaded[i] = tmp;
632 /* Initializes the segment for writing. Returns the target write
633 index (plus 1) in this segment. The index is chosen so that a
634 partially filled segment still has data at index 0. */
635 static inline size_t
636 _dlfo_update_init_seg (struct dlfo_mappings_segment *seg,
637 size_t remaining_to_add)
639 size_t new_seg_size;
640 if (remaining_to_add < seg->allocated)
641 /* Partially filled segment. */
642 new_seg_size = remaining_to_add;
643 else
644 new_seg_size = seg->allocated;
645 atomic_store_relaxed (&seg->size, new_seg_size);
646 return new_seg_size;
649 /* Invoked from _dl_find_object_update after sorting. Stores to the
650 shared data need to use relaxed MO. But plain loads can be used
651 because the loader lock prevents concurrent stores. */
652 static bool
653 _dl_find_object_update_1 (struct link_map **loaded, size_t count)
655 int active_idx = _dlfo_read_version_locked () & 1;
657 struct dlfo_mappings_segment *current_seg
658 = _dlfo_loaded_mappings[active_idx];
659 size_t current_used = _dlfo_mappings_segment_count_used (current_seg);
661 struct dlfo_mappings_segment *target_seg
662 = _dlfo_loaded_mappings[!active_idx];
663 size_t remaining_to_add = current_used + count;
665 /* Ensure that the new segment chain has enough space. */
667 size_t new_allocated
668 = _dlfo_mappings_segment_count_allocated (target_seg);
669 if (new_allocated < remaining_to_add)
671 size_t more = remaining_to_add - new_allocated;
672 target_seg = _dlfo_mappings_segment_allocate (more, target_seg);
673 if (target_seg == NULL)
674 /* Out of memory. Do not end the update and keep the
675 current version unchanged. */
676 return false;
678 /* Start update cycle. */
679 _dlfo_mappings_begin_update ();
681 /* The barrier ensures that a concurrent TM read or fork does
682 not see a partially initialized segment. */
683 atomic_store_release (&_dlfo_loaded_mappings[!active_idx], target_seg);
685 else
686 /* Start update cycle without allocation. */
687 _dlfo_mappings_begin_update ();
690 size_t target_seg_index1 = _dlfo_update_init_seg (target_seg,
691 remaining_to_add);
693 /* Merge the current_seg segment list with the loaded array into the
694 target_set. Merging occurs backwards, in decreasing l_map_start
695 order. */
696 size_t loaded_index1 = count;
697 size_t current_seg_index1;
698 if (current_seg == NULL)
699 current_seg_index1 = 0;
700 else
701 current_seg_index1 = current_seg->size;
702 while (true)
704 if (current_seg_index1 == 0)
706 /* Switch to the previous segment. */
707 if (current_seg != NULL)
708 current_seg = current_seg->previous;
709 if (current_seg != NULL)
711 current_seg_index1 = current_seg->size;
712 if (current_seg_index1 == 0)
713 /* No more data in previous segments. */
714 current_seg = NULL;
718 if (current_seg != NULL
719 && (current_seg->objects[current_seg_index1 - 1].map == NULL))
721 /* This mapping has been dlclose'd. Do not copy it. */
722 --current_seg_index1;
723 continue;
726 if (loaded_index1 == 0 && current_seg == NULL)
727 /* No more data in either source. */
728 break;
730 /* Make room for another mapping. */
731 assert (remaining_to_add > 0);
732 if (target_seg_index1 == 0)
734 /* Switch segments and set the size of the segment. */
735 target_seg = target_seg->previous;
736 target_seg_index1 = _dlfo_update_init_seg (target_seg,
737 remaining_to_add);
740 /* Determine where to store the data. */
741 struct dl_find_object_internal *dlfo
742 = &target_seg->objects[target_seg_index1 - 1];
744 if (loaded_index1 == 0
745 || (current_seg != NULL
746 && (loaded[loaded_index1 - 1]->l_map_start
747 < current_seg->objects[current_seg_index1 - 1].map_start)))
749 /* Prefer mapping in current_seg. */
750 assert (current_seg_index1 > 0);
751 _dl_find_object_internal_copy
752 (&current_seg->objects[current_seg_index1 - 1], dlfo);
753 --current_seg_index1;
755 else
757 /* Prefer newly loaded link map. */
758 assert (loaded_index1 > 0);
759 _dl_find_object_from_map (loaded[loaded_index1 - 1], dlfo);
760 loaded[loaded_index1 - 1]->l_find_object_processed = 1;
761 --loaded_index1;
764 /* Consume space in target segment. */
765 --target_seg_index1;
767 --remaining_to_add;
770 /* Everything has been added. */
771 assert (remaining_to_add == 0);
773 /* The segment must have been filled up to the beginning. */
774 assert (target_seg_index1 == 0);
776 /* Prevent searching further into unused segments. */
777 if (target_seg->previous != NULL)
778 atomic_store_relaxed (&target_seg->previous->size, 0);
780 _dlfo_mappings_end_update ();
781 return true;
784 bool
785 _dl_find_object_update (struct link_map *new_map)
787 /* Copy the newly-loaded link maps into an array for sorting. */
788 size_t count = 0;
789 for (struct link_map *l = new_map; l != NULL; l = l->l_next)
790 /* Skip proxy maps and already-processed maps. */
791 count += l == l->l_real && !l->l_find_object_processed;
792 if (count == 0)
793 return true;
795 struct link_map **map_array = malloc (count * sizeof (*map_array));
796 if (map_array == NULL)
797 return false;
799 size_t i = 0;
800 for (struct link_map *l = new_map; l != NULL; l = l->l_next)
801 if (l == l->l_real && !l->l_find_object_processed)
802 map_array[i++] = l;
805 _dl_find_object_link_map_sort (map_array, count);
806 bool ok = _dl_find_object_update_1 (map_array, count);
807 free (map_array);
808 return ok;
811 void
812 _dl_find_object_dlclose (struct link_map *map)
814 uint64_t start_version = _dlfo_read_version_locked ();
815 uintptr_t map_start = map->l_map_start;
818 /* Directly patch the size information in the mapping to mark it as
819 unused. See the parallel lookup logic in _dl_find_object. Do
820 not check for previous dlclose at the same mapping address
821 because that cannot happen (there would have to be an
822 intermediate dlopen, which drops size-zero mappings). */
823 for (struct dlfo_mappings_segment *seg
824 = _dlfo_mappings_active_segment (start_version);
825 seg != NULL && seg->size > 0; seg = seg->previous)
826 if (map_start >= seg->objects[0].map_start)
828 struct dl_find_object_internal *obj
829 = _dlfo_lookup (map_start, seg->objects, seg->size);
830 if (obj == NULL)
831 /* Ignore missing link maps because of potential shutdown
832 issues around __libc_freeres. */
833 return;
835 /* Mark as closed. This does not change the overall data
836 structure, so no TM cycle is needed. */
837 atomic_store_relaxed (&obj->map_end, obj->map_start);
838 atomic_store_relaxed (&obj->map, NULL);
839 return;
843 void
844 _dl_find_object_freeres (void)
846 for (int idx = 0; idx < 2; ++idx)
848 for (struct dlfo_mappings_segment *seg = _dlfo_loaded_mappings[idx];
849 seg != NULL; )
851 struct dlfo_mappings_segment *previous = seg->previous;
852 free (seg->to_free);
853 seg = previous;
855 /* Stop searching in shared objects. */
856 _dlfo_loaded_mappings[idx] = 0;