1 /* Locating objects in the process image. ld.so implementation.
2 Copyright (C) 2021-2022 Free Software Foundation, Inc.
3 This file is part of the GNU C Library.
5 The GNU C Library is free software; you can redistribute it and/or
6 modify it under the terms of the GNU Lesser General Public
7 License as published by the Free Software Foundation; either
8 version 2.1 of the License, or (at your option) any later version.
10 The GNU C Library is distributed in the hope that it will be useful,
11 but WITHOUT ANY WARRANTY; without even the implied warranty of
12 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 Lesser General Public License for more details.
15 You should have received a copy of the GNU Lesser General Public
16 License along with the GNU C Library; if not, see
17 <https://www.gnu.org/licenses/>. */
21 #include <atomic_wide_counter.h>
22 #include <dl-find_object.h>
30 /* Fallback implementation of _dl_find_object. It uses a linear
31 search, needs locking, and is not async-signal-safe. It is used in
32 _dl_find_object prior to initialization, when called from audit
33 modules. It also serves as the reference implementation for
36 _dl_find_object_slow (void *pc
, struct dl_find_object
*result
)
38 ElfW(Addr
) addr
= (ElfW(Addr
)) pc
;
39 for (Lmid_t ns
= 0; ns
< GL(dl_nns
); ++ns
)
40 for (struct link_map
*l
= GL(dl_ns
)[ns
]._ns_loaded
; l
!= NULL
;
42 if (addr
>= l
->l_map_start
&& addr
< l
->l_map_end
43 && (l
->l_contiguous
|| _dl_addr_inside_object (l
, addr
)))
45 assert (ns
== l
->l_ns
);
46 struct dl_find_object_internal internal
;
47 _dl_find_object_from_map (l
, &internal
);
48 _dl_find_object_to_external (&internal
, result
);
52 /* Object not found. */
56 /* Data for the main executable. There is usually a large gap between
57 the main executable and initially loaded shared objects. Record
58 the main executable separately, to increase the chance that the
59 range for the non-closeable mappings below covers only the shared
60 objects (and not also the gap between main executable and shared
62 static struct dl_find_object_internal _dlfo_main attribute_relro
;
64 /* Data for initially loaded shared objects that cannot be unloaded.
65 (This may also contain non-contiguous mappings from the main
66 executable.) The mappings are stored in address order in the
67 _dlfo_nodelete_mappings array (containing
68 _dlfo_nodelete_mappings_size elements). It is not modified after
70 static uintptr_t _dlfo_nodelete_mappings_end attribute_relro
;
71 static size_t _dlfo_nodelete_mappings_size attribute_relro
;
72 static struct dl_find_object_internal
*_dlfo_nodelete_mappings
75 /* Mappings created by dlopen can go away with dlclose, so a dynamic
76 data structure with some synchronization is needed. Individual
77 segments are similar to the _dlfo_nodelete_mappings array above.
78 The previous segment contains lower addresses and is at most half
79 as long. Checking the address of the base address of the first
80 element during a lookup can therefore approximate a binary search
81 over all segments, even though the data is not stored in one
84 During updates, the segments are overwritten in place. A software
85 transactional memory construct (involving the
86 _dlfo_loaded_mappings_version variable) is used to detect
87 concurrent modification, and retry as necessary. (This approach is
88 similar to seqlocks, except that two copies are used, and there is
89 only one writer, ever, due to the loader lock.) Technically,
90 relaxed MO loads and stores need to be used for the shared TM data,
93 The memory allocations are never deallocated, but slots used for
94 objects that have been dlclose'd can be reused by dlopen. The
95 memory can live in the regular C malloc heap.
97 The segments are populated from the start of the list, with the
98 mappings with the highest address. Only if this segment is full,
99 previous segments are used for mappings at lower addresses. The
100 remaining segments are populated as needed, but after allocating
101 further segments, some of the initial segments (at the end of the
102 linked list) can be empty (with size 0).
104 Adding new elements to this data structure is another source of
105 quadratic behavior for dlopen. If the other causes of quadratic
106 behavior are eliminated, a more complicated data structure will be
108 struct dlfo_mappings_segment
110 /* The previous segment has lower base addresses. Constant after
111 initialization; read in the TM region. */
112 struct dlfo_mappings_segment
*previous
;
114 /* Used by __libc_freeres to deallocate malloc'ed memory. */
117 /* Count of array elements in use and allocated. */
118 size_t size
; /* Read in the TM region. */
121 struct dl_find_object_internal objects
[]; /* Read in the TM region. */
124 /* To achieve async-signal-safety, two copies of the data structure
125 are used, so that a signal handler can still use this data even if
126 dlopen or dlclose modify the other copy. The the least significant
127 bit in _dlfo_loaded_mappings_version determines which array element
128 is the currently active region. */
129 static struct dlfo_mappings_segment
*_dlfo_loaded_mappings
[2];
131 /* Returns the number of actually used elements in all segments
134 _dlfo_mappings_segment_count_used (struct dlfo_mappings_segment
*seg
)
137 for (; seg
!= NULL
&& seg
->size
> 0; seg
= seg
->previous
)
138 for (size_t i
= 0; i
< seg
->size
; ++i
)
139 /* Exclude elements which have been dlclose'd. */
140 count
+= seg
->objects
[i
].map
!= NULL
;
144 /* Compute the total number of available allocated segments linked
147 _dlfo_mappings_segment_count_allocated (struct dlfo_mappings_segment
*seg
)
150 for (; seg
!= NULL
; seg
= seg
->previous
)
151 count
+= seg
->allocated
;
155 /* This is essentially an arbitrary value. dlopen allocates plenty of
156 memory anyway, so over-allocated a bit does not hurt. Not having
157 many small-ish segments helps to avoid many small binary searches.
158 Not using a power of 2 means that we do not waste an extra page
159 just for the malloc header if a mapped allocation is used in the
161 enum { dlfo_mappings_initial_segment_size
= 63 };
163 /* Allocate an empty segment. This used for the first ever
165 static struct dlfo_mappings_segment
*
166 _dlfo_mappings_segment_allocate_unpadded (size_t size
)
168 if (size
< dlfo_mappings_initial_segment_size
)
169 size
= dlfo_mappings_initial_segment_size
;
170 /* No overflow checks here because the size is a mapping count, and
171 struct link_map is larger than what we allocate here. */
174 element_size
= sizeof ((struct dlfo_mappings_segment
) {}.objects
[0])
176 size_t to_allocate
= (sizeof (struct dlfo_mappings_segment
)
177 + size
* element_size
);
178 struct dlfo_mappings_segment
*result
= malloc (to_allocate
);
181 result
->previous
= NULL
;
182 result
->to_free
= NULL
; /* Minimal malloc memory cannot be freed. */
184 result
->allocated
= size
;
189 /* Allocate an empty segment that is at least SIZE large. PREVIOUS
190 points to the chain of previously allocated segments and can be
192 static struct dlfo_mappings_segment
*
193 _dlfo_mappings_segment_allocate (size_t size
,
194 struct dlfo_mappings_segment
* previous
)
196 /* Exponential sizing policies, so that lookup approximates a binary
199 size_t minimum_growth
;
200 if (previous
== NULL
)
201 minimum_growth
= dlfo_mappings_initial_segment_size
;
203 minimum_growth
= 2* previous
->allocated
;
204 if (size
< minimum_growth
)
205 size
= minimum_growth
;
207 enum { cache_line_size_estimate
= 128 };
208 /* No overflow checks here because the size is a mapping count, and
209 struct link_map is larger than what we allocate here. */
212 element_size
= sizeof ((struct dlfo_mappings_segment
) {}.objects
[0])
214 size_t to_allocate
= (sizeof (struct dlfo_mappings_segment
)
215 + size
* element_size
216 + 2 * cache_line_size_estimate
);
217 char *ptr
= malloc (to_allocate
);
220 char *original_ptr
= ptr
;
221 /* Start and end at a (conservative) 128-byte cache line boundary.
222 Do not use memalign for compatibility with partially interposing
223 malloc implementations. */
224 char *end
= PTR_ALIGN_DOWN (ptr
+ to_allocate
, cache_line_size_estimate
);
225 ptr
= PTR_ALIGN_UP (ptr
, cache_line_size_estimate
);
226 struct dlfo_mappings_segment
*result
227 = (struct dlfo_mappings_segment
*) ptr
;
228 result
->previous
= previous
;
229 result
->to_free
= original_ptr
;
231 /* We may have obtained slightly more space if malloc happened
232 to provide an over-aligned pointer. */
233 result
->allocated
= (((uintptr_t) (end
- ptr
)
234 - sizeof (struct dlfo_mappings_segment
))
236 assert (result
->allocated
>= size
);
240 /* Monotonic counter for software transactional memory. The lowest
241 bit indicates which element of the _dlfo_loaded_mappings contains
243 static __atomic_wide_counter _dlfo_loaded_mappings_version
;
245 /* TM version at the start of the read operation. */
246 static inline uint64_t
247 _dlfo_read_start_version (void)
249 /* Acquire MO load synchronizes with the fences at the beginning and
250 end of the TM update region in _dlfo_mappings_begin_update,
251 _dlfo_mappings_end_update. */
252 return __atomic_wide_counter_load_acquire (&_dlfo_loaded_mappings_version
);
255 /* Optimized variant of _dlfo_read_start_version which can be called
256 when the loader is write-locked. */
257 static inline uint64_t
258 _dlfo_read_version_locked (void)
260 return __atomic_wide_counter_load_relaxed (&_dlfo_loaded_mappings_version
);
263 /* Update the version to reflect that an update is happening. This
264 does not change the bit that controls the active segment chain. */
266 _dlfo_mappings_begin_update (void)
268 /* The fence synchronizes with loads in _dlfo_read_start_version
269 (also called from _dlfo_read_success). */
270 atomic_thread_fence_release ();
273 /* Installs the just-updated version as the active version. */
275 _dlfo_mappings_end_update (void)
277 /* The fence synchronizes with loads in _dlfo_read_start_version
278 (also called from _dlfo_read_success). */
279 atomic_thread_fence_release ();
280 /* No atomic read-modify-write update needed because of the loader
282 __atomic_wide_counter_add_relaxed (&_dlfo_loaded_mappings_version
, 1);
285 /* Return true if the read was successful, given the start
288 _dlfo_read_success (uint64_t start_version
)
290 /* See Hans Boehm, Can Seqlocks Get Along with Programming Language
291 Memory Models?, Section 4. This is necessary so that loads in
292 the TM region are not ordered past the version check below. */
293 atomic_thread_fence_acquire ();
295 /* Synchronizes with the fences in _dlfo_mappings_begin_update,
296 _dlfo_mappings_end_update. It is important that all stores from
297 the last update have become visible, and stores from the next
298 update to this version are not before the version number is
301 Unlike with seqlocks, there is no check for odd versions here
302 because we have read the unmodified copy (confirmed to be
303 unmodified by the unchanged version). */
304 return _dlfo_read_start_version () == start_version
;
307 /* Returns the active segment identified by the specified start
309 static struct dlfo_mappings_segment
*
310 _dlfo_mappings_active_segment (uint64_t start_version
)
312 return _dlfo_loaded_mappings
[start_version
& 1];
315 /* Searches PC amoung the address-sorted array [FIRST1, FIRST1 +
316 SIZE). Assumes PC >= FIRST1->map_start. Returns a pointer to the
317 element that contains PC, or NULL if there is no such element. */
318 static inline struct dl_find_object_internal
*
319 _dlfo_lookup (uintptr_t pc
, struct dl_find_object_internal
*first1
, size_t size
)
321 struct dl_find_object_internal
*end
= first1
+ size
;
323 /* Search for a lower bound in first. */
324 struct dl_find_object_internal
*first
= first1
;
327 size_t half
= size
>> 1;
328 struct dl_find_object_internal
*middle
= first
+ half
;
329 if (atomic_load_relaxed (&middle
->map_start
) < pc
)
338 if (first
!= end
&& pc
== atomic_load_relaxed (&first
->map_start
))
340 if (pc
< atomic_load_relaxed (&first
->map_end
))
343 /* Zero-length mapping after dlclose. */
348 /* Check to see if PC is in the previous mapping. */
350 if (pc
< atomic_load_relaxed (&first
->map_end
))
351 /* pc >= first->map_start implied by the search above. */
359 _dl_find_object (void *pc1
, struct dl_find_object
*result
)
361 uintptr_t pc
= (uintptr_t) pc1
;
363 if (__glibc_unlikely (_dlfo_main
.map_end
== 0))
365 /* Not initialized. No locking is needed here because this can
366 only be called from audit modules, which cannot create
368 return _dl_find_object_slow (pc1
, result
);
371 /* Main executable. */
372 if (pc
>= _dlfo_main
.map_start
&& pc
< _dlfo_main
.map_end
)
374 _dl_find_object_to_external (&_dlfo_main
, result
);
378 /* Other initially loaded objects. */
379 if (pc
>= _dlfo_nodelete_mappings
->map_start
380 && pc
< _dlfo_nodelete_mappings_end
)
382 struct dl_find_object_internal
*obj
383 = _dlfo_lookup (pc
, _dlfo_nodelete_mappings
,
384 _dlfo_nodelete_mappings_size
);
387 _dl_find_object_to_external (obj
, result
);
390 /* Fall through to the full search. The kernel may have mapped
391 the initial mappings with gaps that are later filled by
392 dlopen with other mappings. */
395 /* Handle audit modules, dlopen, dlopen objects. This uses software
396 transactional memory, with a retry loop in case the version
397 changes during execution. */
402 uint64_t start_version
= _dlfo_read_start_version ();
404 /* The read through seg->previous assumes that the CPU
405 recognizes the load dependency, so that no invalid size
406 values is read. Furthermore, the code assumes that no
407 out-of-thin-air value for seg->size is observed. Together,
408 this ensures that the observed seg->size value is always less
409 than seg->allocated, so that _dlfo_mappings_index does not
410 read out-of-bounds. (This avoids intermediate TM version
411 verification. A concurrent version update will lead to
412 invalid lookup results, but not to out-of-memory access.)
414 Either seg == NULL or seg->size == 0 terminates the segment
415 list. _dl_find_object_update does not bother to clear the
416 size on earlier unused segments. */
417 for (struct dlfo_mappings_segment
*seg
418 = _dlfo_mappings_active_segment (start_version
);
420 seg
= atomic_load_acquire (&seg
->previous
))
422 size_t seg_size
= atomic_load_relaxed (&seg
->size
);
426 if (pc
>= atomic_load_relaxed (&seg
->objects
[0].map_start
))
428 /* PC may lie within this segment. If it is less than the
429 segment start address, it can only lie in a previous
430 segment, due to the base address sorting. */
431 struct dl_find_object_internal
*obj
432 = _dlfo_lookup (pc
, seg
->objects
, seg_size
);
436 /* Found the right mapping. Copy out the data prior to
437 checking if the read transaction was successful. */
438 struct dl_find_object_internal copy
;
439 _dl_find_object_internal_copy (obj
, ©
);
440 if (_dlfo_read_success (start_version
))
442 _dl_find_object_to_external (©
, result
);
446 /* Read transaction failure. */
451 /* PC is not covered by this mapping. */
452 if (_dlfo_read_success (start_version
))
455 /* Read transaction failure. */
458 } /* if: PC might lie within the current seg. */
461 /* PC is not covered by any segment. */
462 if (_dlfo_read_success (start_version
))
464 } /* Transaction retry loop. */
466 rtld_hidden_def (_dl_find_object
)
468 /* _dlfo_process_initial is called twice. First to compute the array
469 sizes from the initial loaded mappings. Second to fill in the
470 bases and infos arrays with the (still unsorted) data. Returns the
471 number of loaded (non-nodelete) mappings. */
473 _dlfo_process_initial (void)
475 struct link_map
*main_map
= GL(dl_ns
)[LM_ID_BASE
]._ns_loaded
;
478 if (!main_map
->l_contiguous
)
480 struct dl_find_object_internal dlfo
;
481 _dl_find_object_from_map (main_map
, &dlfo
);
483 /* PT_LOAD segments for a non-contiguous are added to the
484 non-closeable mappings. */
485 for (const ElfW(Phdr
) *ph
= main_map
->l_phdr
,
486 *ph_end
= main_map
->l_phdr
+ main_map
->l_phnum
;
488 if (ph
->p_type
== PT_LOAD
)
490 if (_dlfo_nodelete_mappings
!= NULL
)
492 /* Second pass only. */
493 _dlfo_nodelete_mappings
[nodelete
] = dlfo
;
494 _dlfo_nodelete_mappings
[nodelete
].map_start
495 = ph
->p_vaddr
+ main_map
->l_addr
;
496 _dlfo_nodelete_mappings
[nodelete
].map_end
497 = _dlfo_nodelete_mappings
[nodelete
].map_start
+ ph
->p_memsz
;
504 for (Lmid_t ns
= 0; ns
< GL(dl_nns
); ++ns
)
505 for (struct link_map
*l
= GL(dl_ns
)[ns
]._ns_loaded
; l
!= NULL
;
507 /* Skip the main map processed above, and proxy maps. */
508 if (l
!= main_map
&& l
== l
->l_real
)
510 /* lt_library link maps are implicitly NODELETE. */
511 if (l
->l_type
== lt_library
|| l
->l_nodelete_active
)
513 if (_dlfo_nodelete_mappings
!= NULL
)
514 /* Second pass only. */
515 _dl_find_object_from_map
516 (l
, _dlfo_nodelete_mappings
+ nodelete
);
519 else if (l
->l_type
== lt_loaded
)
521 if (_dlfo_loaded_mappings
[0] != NULL
)
522 /* Second pass only. */
523 _dl_find_object_from_map
524 (l
, &_dlfo_loaded_mappings
[0]->objects
[loaded
]);
529 _dlfo_nodelete_mappings_size
= nodelete
;
533 /* Selection sort based on mapping start address. */
535 _dlfo_sort_mappings (struct dl_find_object_internal
*objects
, size_t size
)
540 for (size_t i
= 0; i
< size
- 1; ++i
)
544 uintptr_t min_val
= objects
[i
].map_start
;
545 for (size_t j
= i
+ 1; j
< size
; ++j
)
546 if (objects
[j
].map_start
< min_val
)
549 min_val
= objects
[j
].map_start
;
552 /* Swap into place. */
553 struct dl_find_object_internal tmp
= objects
[min_idx
];
554 objects
[min_idx
] = objects
[i
];
560 _dl_find_object_init (void)
562 /* Cover the main mapping. */
564 struct link_map
*main_map
= GL(dl_ns
)[LM_ID_BASE
]._ns_loaded
;
566 if (main_map
->l_contiguous
)
567 _dl_find_object_from_map (main_map
, &_dlfo_main
);
570 /* Non-contiguous main maps are handled in
571 _dlfo_process_initial. Mark as initialized, but not
572 coverying any valid PC. */
573 _dlfo_main
.map_start
= -1;
574 _dlfo_main
.map_end
= -1;
578 /* Allocate the data structures. */
579 size_t loaded_size
= _dlfo_process_initial ();
580 _dlfo_nodelete_mappings
= malloc (_dlfo_nodelete_mappings_size
581 * sizeof (*_dlfo_nodelete_mappings
));
583 _dlfo_loaded_mappings
[0]
584 = _dlfo_mappings_segment_allocate_unpadded (loaded_size
);
585 if (_dlfo_nodelete_mappings
== NULL
586 || (loaded_size
> 0 && _dlfo_loaded_mappings
[0] == NULL
))
588 Fatal glibc error: cannot allocate memory for find-object data\n");
589 /* Fill in the data with the second call. */
590 _dlfo_nodelete_mappings_size
= 0;
591 _dlfo_process_initial ();
593 /* Sort both arrays. */
594 if (_dlfo_nodelete_mappings_size
> 0)
596 _dlfo_sort_mappings (_dlfo_nodelete_mappings
,
597 _dlfo_nodelete_mappings_size
);
598 size_t last_idx
= _dlfo_nodelete_mappings_size
- 1;
599 _dlfo_nodelete_mappings_end
= _dlfo_nodelete_mappings
[last_idx
].map_end
;
602 _dlfo_sort_mappings (_dlfo_loaded_mappings
[0]->objects
,
603 _dlfo_loaded_mappings
[0]->size
);
607 _dl_find_object_link_map_sort (struct link_map
**loaded
, size_t size
)
609 /* Selection sort based on map_start. */
612 for (size_t i
= 0; i
< size
- 1; ++i
)
616 ElfW(Addr
) min_val
= loaded
[i
]->l_map_start
;
617 for (size_t j
= i
+ 1; j
< size
; ++j
)
618 if (loaded
[j
]->l_map_start
< min_val
)
621 min_val
= loaded
[j
]->l_map_start
;
624 /* Swap into place. */
625 struct link_map
*tmp
= loaded
[min_idx
];
626 loaded
[min_idx
] = loaded
[i
];
631 /* Initializes the segment for writing. Returns the target write
632 index (plus 1) in this segment. The index is chosen so that a
633 partially filled segment still has data at index 0. */
635 _dlfo_update_init_seg (struct dlfo_mappings_segment
*seg
,
636 size_t remaining_to_add
)
639 if (remaining_to_add
< seg
->allocated
)
640 /* Partially filled segment. */
641 new_seg_size
= remaining_to_add
;
643 new_seg_size
= seg
->allocated
;
644 atomic_store_relaxed (&seg
->size
, new_seg_size
);
648 /* Invoked from _dl_find_object_update after sorting. Stores to the
649 shared data need to use relaxed MO. But plain loads can be used
650 because the loader lock prevents concurrent stores. */
652 _dl_find_object_update_1 (struct link_map
**loaded
, size_t count
)
654 int active_idx
= _dlfo_read_version_locked () & 1;
656 struct dlfo_mappings_segment
*current_seg
657 = _dlfo_loaded_mappings
[active_idx
];
658 size_t current_used
= _dlfo_mappings_segment_count_used (current_seg
);
660 struct dlfo_mappings_segment
*target_seg
661 = _dlfo_loaded_mappings
[!active_idx
];
662 size_t remaining_to_add
= current_used
+ count
;
664 /* Ensure that the new segment chain has enough space. */
667 = _dlfo_mappings_segment_count_allocated (target_seg
);
668 if (new_allocated
< remaining_to_add
)
670 size_t more
= remaining_to_add
- new_allocated
;
671 target_seg
= _dlfo_mappings_segment_allocate (more
, target_seg
);
672 if (target_seg
== NULL
)
673 /* Out of memory. Do not end the update and keep the
674 current version unchanged. */
677 /* Start update cycle. */
678 _dlfo_mappings_begin_update ();
680 /* The barrier ensures that a concurrent TM read or fork does
681 not see a partially initialized segment. */
682 atomic_store_release (&_dlfo_loaded_mappings
[!active_idx
], target_seg
);
685 /* Start update cycle without allocation. */
686 _dlfo_mappings_begin_update ();
689 size_t target_seg_index1
= _dlfo_update_init_seg (target_seg
,
692 /* Merge the current_seg segment list with the loaded array into the
693 target_set. Merging occurs backwards, in decreasing l_map_start
695 size_t loaded_index1
= count
;
696 size_t current_seg_index1
;
697 if (current_seg
== NULL
)
698 current_seg_index1
= 0;
700 current_seg_index1
= current_seg
->size
;
703 if (current_seg_index1
== 0)
705 /* Switch to the previous segment. */
706 if (current_seg
!= NULL
)
707 current_seg
= current_seg
->previous
;
708 if (current_seg
!= NULL
)
710 current_seg_index1
= current_seg
->size
;
711 if (current_seg_index1
== 0)
712 /* No more data in previous segments. */
717 if (current_seg
!= NULL
718 && (current_seg
->objects
[current_seg_index1
- 1].map
== NULL
))
720 /* This mapping has been dlclose'd. Do not copy it. */
721 --current_seg_index1
;
725 if (loaded_index1
== 0 && current_seg
== NULL
)
726 /* No more data in either source. */
729 /* Make room for another mapping. */
730 assert (remaining_to_add
> 0);
731 if (target_seg_index1
== 0)
733 /* Switch segments and set the size of the segment. */
734 target_seg
= target_seg
->previous
;
735 target_seg_index1
= _dlfo_update_init_seg (target_seg
,
739 /* Determine where to store the data. */
740 struct dl_find_object_internal
*dlfo
741 = &target_seg
->objects
[target_seg_index1
- 1];
743 if (loaded_index1
== 0
744 || (current_seg
!= NULL
745 && (loaded
[loaded_index1
- 1]->l_map_start
746 < current_seg
->objects
[current_seg_index1
- 1].map_start
)))
748 /* Prefer mapping in current_seg. */
749 assert (current_seg_index1
> 0);
750 _dl_find_object_internal_copy
751 (¤t_seg
->objects
[current_seg_index1
- 1], dlfo
);
752 --current_seg_index1
;
756 /* Prefer newly loaded link map. */
757 assert (loaded_index1
> 0);
758 _dl_find_object_from_map (loaded
[loaded_index1
- 1], dlfo
);
759 loaded
[loaded_index1
- 1]->l_find_object_processed
= 1;
763 /* Consume space in target segment. */
769 /* Everything has been added. */
770 assert (remaining_to_add
== 0);
772 /* The segment must have been filled up to the beginning. */
773 assert (target_seg_index1
== 0);
775 /* Prevent searching further into unused segments. */
776 if (target_seg
->previous
!= NULL
)
777 atomic_store_relaxed (&target_seg
->previous
->size
, 0);
779 _dlfo_mappings_end_update ();
784 _dl_find_object_update (struct link_map
*new_map
)
786 /* Copy the newly-loaded link maps into an array for sorting. */
788 for (struct link_map
*l
= new_map
; l
!= NULL
; l
= l
->l_next
)
789 /* Skip proxy maps and already-processed maps. */
790 count
+= l
== l
->l_real
&& !l
->l_find_object_processed
;
791 struct link_map
**map_array
= malloc (count
* sizeof (*map_array
));
792 if (map_array
== NULL
)
796 for (struct link_map
*l
= new_map
; l
!= NULL
; l
= l
->l_next
)
797 if (l
== l
->l_real
&& !l
->l_find_object_processed
)
803 _dl_find_object_link_map_sort (map_array
, count
);
804 bool ok
= _dl_find_object_update_1 (map_array
, count
);
810 _dl_find_object_dlclose (struct link_map
*map
)
812 uint64_t start_version
= _dlfo_read_version_locked ();
813 uintptr_t map_start
= map
->l_map_start
;
816 /* Directly patch the size information in the mapping to mark it as
817 unused. See the parallel lookup logic in _dl_find_object. Do
818 not check for previous dlclose at the same mapping address
819 because that cannot happen (there would have to be an
820 intermediate dlopen, which drops size-zero mappings). */
821 for (struct dlfo_mappings_segment
*seg
822 = _dlfo_mappings_active_segment (start_version
);
823 seg
!= NULL
&& seg
->size
> 0; seg
= seg
->previous
)
824 if (map_start
>= seg
->objects
[0].map_start
)
826 struct dl_find_object_internal
*obj
827 = _dlfo_lookup (map_start
, seg
->objects
, seg
->size
);
829 /* Ignore missing link maps because of potential shutdown
830 issues around __libc_freeres. */
833 /* Mark as closed. This does not change the overall data
834 structure, so no TM cycle is needed. */
835 atomic_store_relaxed (&obj
->map_end
, obj
->map_start
);
836 atomic_store_relaxed (&obj
->map
, NULL
);
842 _dl_find_object_freeres (void)
844 for (int idx
= 0; idx
< 2; ++idx
)
846 for (struct dlfo_mappings_segment
*seg
= _dlfo_loaded_mappings
[idx
];
849 struct dlfo_mappings_segment
*previous
= seg
->previous
;
853 /* Stop searching in shared objects. */
854 _dlfo_loaded_mappings
[idx
] = 0;