1 /* Thread-local storage handling in the ELF dynamic linker. Generic version.
2 Copyright (C) 2002-2015 Free Software Foundation, Inc.
3 This file is part of the GNU C Library.
5 The GNU C Library is free software; you can redistribute it and/or
6 modify it under the terms of the GNU Lesser General Public
7 License as published by the Free Software Foundation; either
8 version 2.1 of the License, or (at your option) any later version.
10 The GNU C Library is distributed in the hope that it will be useful,
11 but WITHOUT ANY WARRANTY; without even the implied warranty of
12 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 Lesser General Public License for more details.
15 You should have received a copy of the GNU Lesser General Public
16 License along with the GNU C Library; if not, see
17 <http://www.gnu.org/licenses/>. */
25 #include <sys/param.h>
32 /* Amount of excess space to allocate in the static TLS area
33 to allow dynamic loading of modules defining IE-model TLS data. */
34 #define TLS_STATIC_SURPLUS 64 + DL_NNS * 100
37 /* Out-of-memory handler. */
39 __attribute__ ((__noreturn__
))
42 _dl_fatal_printf ("cannot allocate memory for thread-local data: ABORT\n");
48 _dl_next_tls_modid (void)
52 if (__builtin_expect (GL(dl_tls_dtv_gaps
), false))
55 struct dtv_slotinfo_list
*runp
= GL(dl_tls_dtv_slotinfo_list
);
57 /* Note that this branch will never be executed during program
58 start since there are no gaps at that time. Therefore it
59 does not matter that the dl_tls_dtv_slotinfo is not allocated
60 yet when the function is called for the first times.
62 NB: the offset +1 is due to the fact that DTV[0] is used
63 for something else. */
64 result
= GL(dl_tls_static_nelem
) + 1;
65 if (result
<= GL(dl_tls_max_dtv_idx
))
68 while (result
- disp
< runp
->len
)
70 if (runp
->slotinfo
[result
- disp
].map
== NULL
)
74 assert (result
<= GL(dl_tls_max_dtv_idx
) + 1);
77 if (result
- disp
< runp
->len
)
82 while ((runp
= runp
->next
) != NULL
);
84 if (result
> GL(dl_tls_max_dtv_idx
))
86 /* The new index must indeed be exactly one higher than the
88 assert (result
== GL(dl_tls_max_dtv_idx
) + 1);
89 /* There is no gap anymore. */
90 GL(dl_tls_dtv_gaps
) = false;
97 /* No gaps, allocate a new entry. */
100 result
= ++GL(dl_tls_max_dtv_idx
);
109 _dl_count_modids (void)
111 /* It is rare that we have gaps; see elf/dl-open.c (_dl_open) where
112 we fail to load a module and unload it leaving a gap. If we don't
113 have gaps then the number of modids is the current maximum so
115 if (__glibc_likely (!GL(dl_tls_dtv_gaps
)))
116 return GL(dl_tls_max_dtv_idx
);
118 /* We have gaps and are forced to count the non-NULL entries. */
120 struct dtv_slotinfo_list
*runp
= GL(dl_tls_dtv_slotinfo_list
);
123 for (size_t i
= 0; i
< runp
->len
; ++i
)
124 if (runp
->slotinfo
[i
].map
!= NULL
)
137 _dl_determine_tlsoffset (void)
139 size_t max_align
= TLS_TCB_ALIGN
;
141 size_t freebottom
= 0;
143 /* The first element of the dtv slot info list is allocated. */
144 assert (GL(dl_tls_dtv_slotinfo_list
) != NULL
);
145 /* There is at this point only one element in the
146 dl_tls_dtv_slotinfo_list list. */
147 assert (GL(dl_tls_dtv_slotinfo_list
)->next
== NULL
);
149 struct dtv_slotinfo
*slotinfo
= GL(dl_tls_dtv_slotinfo_list
)->slotinfo
;
151 /* Determining the offset of the various parts of the static TLS
152 block has several dependencies. In addition we have to work
153 around bugs in some toolchains.
155 Each TLS block from the objects available at link time has a size
156 and an alignment requirement. The GNU ld computes the alignment
157 requirements for the data at the positions *in the file*, though.
158 I.e, it is not simply possible to allocate a block with the size
159 of the TLS program header entry. The data is layed out assuming
160 that the first byte of the TLS block fulfills
162 p_vaddr mod p_align == &TLS_BLOCK mod p_align
164 This means we have to add artificial padding at the beginning of
165 the TLS block. These bytes are never used for the TLS data in
166 this module but the first byte allocated must be aligned
167 according to mod p_align == 0 so that the first byte of the TLS
168 block is aligned according to p_vaddr mod p_align. This is ugly
169 and the linker can help by computing the offsets in the TLS block
170 assuming the first byte of the TLS block is aligned according to
173 The extra space which might be allocated before the first byte of
174 the TLS block need not go unused. The code below tries to use
175 that memory for the next TLS block. This can work if the total
176 memory requirement for the next TLS block is smaller than the
180 /* We simply start with zero. */
183 for (size_t cnt
= 0; slotinfo
[cnt
].map
!= NULL
; ++cnt
)
185 assert (cnt
< GL(dl_tls_dtv_slotinfo_list
)->len
);
187 size_t firstbyte
= (-slotinfo
[cnt
].map
->l_tls_firstbyte_offset
188 & (slotinfo
[cnt
].map
->l_tls_align
- 1));
190 max_align
= MAX (max_align
, slotinfo
[cnt
].map
->l_tls_align
);
192 if (freebottom
- freetop
>= slotinfo
[cnt
].map
->l_tls_blocksize
)
194 off
= roundup (freetop
+ slotinfo
[cnt
].map
->l_tls_blocksize
195 - firstbyte
, slotinfo
[cnt
].map
->l_tls_align
)
197 if (off
<= freebottom
)
201 /* XXX For some architectures we perhaps should store the
203 slotinfo
[cnt
].map
->l_tls_offset
= off
;
208 off
= roundup (offset
+ slotinfo
[cnt
].map
->l_tls_blocksize
- firstbyte
,
209 slotinfo
[cnt
].map
->l_tls_align
) + firstbyte
;
210 if (off
> offset
+ slotinfo
[cnt
].map
->l_tls_blocksize
211 + (freebottom
- freetop
))
214 freebottom
= off
- slotinfo
[cnt
].map
->l_tls_blocksize
;
218 /* XXX For some architectures we perhaps should store the
220 slotinfo
[cnt
].map
->l_tls_offset
= off
;
223 GL(dl_tls_static_used
) = offset
;
224 GL(dl_tls_static_size
) = (roundup (offset
+ TLS_STATIC_SURPLUS
, max_align
)
227 /* The TLS blocks start right after the TCB. */
228 size_t offset
= TLS_TCB_SIZE
;
230 for (size_t cnt
= 0; slotinfo
[cnt
].map
!= NULL
; ++cnt
)
232 assert (cnt
< GL(dl_tls_dtv_slotinfo_list
)->len
);
234 size_t firstbyte
= (-slotinfo
[cnt
].map
->l_tls_firstbyte_offset
235 & (slotinfo
[cnt
].map
->l_tls_align
- 1));
237 max_align
= MAX (max_align
, slotinfo
[cnt
].map
->l_tls_align
);
239 if (slotinfo
[cnt
].map
->l_tls_blocksize
<= freetop
- freebottom
)
241 off
= roundup (freebottom
, slotinfo
[cnt
].map
->l_tls_align
);
242 if (off
- freebottom
< firstbyte
)
243 off
+= slotinfo
[cnt
].map
->l_tls_align
;
244 if (off
+ slotinfo
[cnt
].map
->l_tls_blocksize
- firstbyte
<= freetop
)
246 slotinfo
[cnt
].map
->l_tls_offset
= off
- firstbyte
;
247 freebottom
= (off
+ slotinfo
[cnt
].map
->l_tls_blocksize
253 off
= roundup (offset
, slotinfo
[cnt
].map
->l_tls_align
);
254 if (off
- offset
< firstbyte
)
255 off
+= slotinfo
[cnt
].map
->l_tls_align
;
257 slotinfo
[cnt
].map
->l_tls_offset
= off
- firstbyte
;
258 if (off
- firstbyte
- offset
> freetop
- freebottom
)
261 freetop
= off
- firstbyte
;
264 offset
= off
+ slotinfo
[cnt
].map
->l_tls_blocksize
- firstbyte
;
267 GL(dl_tls_static_used
) = offset
;
268 GL(dl_tls_static_size
) = roundup (offset
+ TLS_STATIC_SURPLUS
,
271 # error "Either TLS_TCB_AT_TP or TLS_DTV_AT_TP must be defined"
274 /* The alignment requirement for the static TLS block. */
275 GL(dl_tls_static_align
) = max_align
;
279 /* This is called only when the data structure setup was skipped at startup,
280 when there was no need for it then. Now we have dynamically loaded
281 something needing TLS, or libpthread needs it. */
286 assert (GL(dl_tls_dtv_slotinfo_list
) == NULL
);
287 assert (GL(dl_tls_max_dtv_idx
) == 0);
289 const size_t nelem
= 2 + TLS_SLOTINFO_SURPLUS
;
291 GL(dl_tls_dtv_slotinfo_list
)
292 = calloc (1, (sizeof (struct dtv_slotinfo_list
)
293 + nelem
* sizeof (struct dtv_slotinfo
)));
294 if (GL(dl_tls_dtv_slotinfo_list
) == NULL
)
297 GL(dl_tls_dtv_slotinfo_list
)->len
= nelem
;
299 /* Number of elements in the static TLS block. It can't be zero
300 because of various assumptions. The one element is null. */
301 GL(dl_tls_static_nelem
) = GL(dl_tls_max_dtv_idx
) = 1;
303 /* This initializes more variables for us. */
304 _dl_determine_tlsoffset ();
308 rtld_hidden_def (_dl_tls_setup
)
313 allocate_dtv (void *result
)
318 /* We allocate a few more elements in the dtv than are needed for the
319 initial set of modules. This should avoid in most cases expansions
321 dtv_length
= GL(dl_tls_max_dtv_idx
) + DTV_SURPLUS
;
322 dtv
= calloc (dtv_length
+ 2, sizeof (dtv_t
));
325 /* This is the initial length of the dtv. */
326 dtv
[0].counter
= dtv_length
;
328 /* The rest of the dtv (including the generation counter) is
329 Initialize with zero to indicate nothing there. */
331 /* Add the dtv to the thread data structures. */
332 INSTALL_DTV (result
, dtv
);
341 /* Get size and alignment requirements of the static TLS block. */
344 _dl_get_tls_static_info (size_t *sizep
, size_t *alignp
)
346 *sizep
= GL(dl_tls_static_size
);
347 *alignp
= GL(dl_tls_static_align
);
353 _dl_allocate_tls_storage (void)
356 size_t size
= GL(dl_tls_static_size
);
360 [ TLS_PRE_TCB_SIZE ] [ TLS_TCB_SIZE ] [ TLS blocks ]
361 ^ This should be returned. */
362 size
+= (TLS_PRE_TCB_SIZE
+ GL(dl_tls_static_align
) - 1)
363 & ~(GL(dl_tls_static_align
) - 1);
366 /* Allocate a correctly aligned chunk of memory. */
367 result
= __libc_memalign (GL(dl_tls_static_align
), size
);
368 if (__builtin_expect (result
!= NULL
, 1))
370 /* Allocate the DTV. */
371 void *allocated
= result
;
374 /* The TCB follows the TLS blocks. */
375 result
= (char *) result
+ size
- TLS_TCB_SIZE
;
377 /* Clear the TCB data structure. We can't ask the caller (i.e.
378 libpthread) to do it, because we will initialize the DTV et al. */
379 memset (result
, '\0', TLS_TCB_SIZE
);
381 result
= (char *) result
+ size
- GL(dl_tls_static_size
);
383 /* Clear the TCB data structure and TLS_PRE_TCB_SIZE bytes before it.
384 We can't ask the caller (i.e. libpthread) to do it, because we will
385 initialize the DTV et al. */
386 memset ((char *) result
- TLS_PRE_TCB_SIZE
, '\0',
387 TLS_PRE_TCB_SIZE
+ TLS_TCB_SIZE
);
390 result
= allocate_dtv (result
);
400 extern dtv_t _dl_static_dtv
[];
401 # define _dl_initial_dtv (&_dl_static_dtv[1])
405 _dl_resize_dtv (dtv_t
*dtv
)
407 /* Resize the dtv. */
409 /* Load GL(dl_tls_max_dtv_idx) atomically since it may be written to by
410 other threads concurrently. */
412 = atomic_load_acquire (&GL(dl_tls_max_dtv_idx
)) + DTV_SURPLUS
;
413 size_t oldsize
= dtv
[-1].counter
;
415 if (dtv
== GL(dl_initial_dtv
))
417 /* This is the initial dtv that was either statically allocated in
418 __libc_setup_tls or allocated during rtld startup using the
419 dl-minimal.c malloc instead of the real malloc. We can't free
420 it, we have to abandon the old storage. */
422 newp
= malloc ((2 + newsize
) * sizeof (dtv_t
));
425 memcpy (newp
, &dtv
[-1], (2 + oldsize
) * sizeof (dtv_t
));
429 newp
= realloc (&dtv
[-1],
430 (2 + newsize
) * sizeof (dtv_t
));
435 newp
[0].counter
= newsize
;
437 /* Clear the newly allocated part. */
438 memset (newp
+ 2 + oldsize
, '\0',
439 (newsize
- oldsize
) * sizeof (dtv_t
));
441 /* Return the generation counter. */
448 _dl_allocate_tls_init (void *result
)
451 /* The memory allocation failed. */
454 dtv_t
*dtv
= GET_DTV (result
);
455 struct dtv_slotinfo_list
*listp
;
459 /* Check if the current dtv is big enough. */
460 if (dtv
[-1].counter
< GL(dl_tls_max_dtv_idx
))
462 /* Resize the dtv. */
463 dtv
= _dl_resize_dtv (dtv
);
465 /* Install this new dtv in the thread data structures. */
466 INSTALL_DTV (result
, &dtv
[-1]);
469 /* We have to prepare the dtv for all currently loaded modules using
470 TLS. For those which are dynamically loaded we add the values
471 indicating deferred allocation. */
472 listp
= GL(dl_tls_dtv_slotinfo_list
);
477 for (cnt
= total
== 0 ? 1 : 0; cnt
< listp
->len
; ++cnt
)
479 struct link_map
*map
;
482 /* Check for the total number of used slots. */
483 if (total
+ cnt
> GL(dl_tls_max_dtv_idx
))
486 map
= listp
->slotinfo
[cnt
].map
;
491 /* Keep track of the maximum generation number. This might
492 not be the generation counter. */
493 assert (listp
->slotinfo
[cnt
].gen
<= GL(dl_tls_generation
));
494 maxgen
= MAX (maxgen
, listp
->slotinfo
[cnt
].gen
);
496 if (map
->l_tls_offset
== NO_TLS_OFFSET
497 || map
->l_tls_offset
== FORCED_DYNAMIC_TLS_OFFSET
)
499 /* For dynamically loaded modules we simply store
500 the value indicating deferred allocation. */
501 dtv
[map
->l_tls_modid
].pointer
.val
= TLS_DTV_UNALLOCATED
;
502 dtv
[map
->l_tls_modid
].pointer
.is_static
= false;
506 assert (map
->l_tls_modid
== cnt
);
507 assert (map
->l_tls_blocksize
>= map
->l_tls_initimage_size
);
509 assert ((size_t) map
->l_tls_offset
>= map
->l_tls_blocksize
);
510 dest
= (char *) result
- map
->l_tls_offset
;
512 dest
= (char *) result
+ map
->l_tls_offset
;
514 # error "Either TLS_TCB_AT_TP or TLS_DTV_AT_TP must be defined"
517 /* Copy the initialization image and clear the BSS part. */
518 dtv
[map
->l_tls_modid
].pointer
.val
= dest
;
519 dtv
[map
->l_tls_modid
].pointer
.is_static
= true;
520 memset (__mempcpy (dest
, map
->l_tls_initimage
,
521 map
->l_tls_initimage_size
), '\0',
522 map
->l_tls_blocksize
- map
->l_tls_initimage_size
);
526 if (total
>= GL(dl_tls_max_dtv_idx
))
530 assert (listp
!= NULL
);
533 /* The DTV version is up-to-date now. */
534 dtv
[0].counter
= maxgen
;
538 rtld_hidden_def (_dl_allocate_tls_init
)
542 _dl_allocate_tls (void *mem
)
544 return _dl_allocate_tls_init (mem
== NULL
545 ? _dl_allocate_tls_storage ()
546 : allocate_dtv (mem
));
548 rtld_hidden_def (_dl_allocate_tls
)
553 _dl_deallocate_tls (void *tcb
, bool dealloc_tcb
)
555 dtv_t
*dtv
= GET_DTV (tcb
);
557 /* We need to free the memory allocated for non-static TLS. */
558 for (size_t cnt
= 0; cnt
< dtv
[-1].counter
; ++cnt
)
559 if (! dtv
[1 + cnt
].pointer
.is_static
560 && dtv
[1 + cnt
].pointer
.val
!= TLS_DTV_UNALLOCATED
)
561 free (dtv
[1 + cnt
].pointer
.val
);
563 /* The array starts with dtv[-1]. */
564 if (dtv
!= GL(dl_initial_dtv
))
570 /* The TCB follows the TLS blocks. Back up to free the whole block. */
571 tcb
-= GL(dl_tls_static_size
) - TLS_TCB_SIZE
;
573 /* Back up the TLS_PRE_TCB_SIZE bytes. */
574 tcb
-= (TLS_PRE_TCB_SIZE
+ GL(dl_tls_static_align
) - 1)
575 & ~(GL(dl_tls_static_align
) - 1);
580 rtld_hidden_def (_dl_deallocate_tls
)
584 /* The __tls_get_addr function has two basic forms which differ in the
585 arguments. The IA-64 form takes two parameters, the module ID and
586 offset. The form used, among others, on IA-32 takes a reference to
587 a special structure which contain the same information. The second
588 form seems to be more often used (in the moment) so we default to
589 it. Users of the IA-64 form have to provide adequate definitions
590 of the following macros. */
591 # ifndef GET_ADDR_ARGS
592 # define GET_ADDR_ARGS tls_index *ti
593 # define GET_ADDR_PARAM ti
595 # ifndef GET_ADDR_MODULE
596 # define GET_ADDR_MODULE ti->ti_module
598 # ifndef GET_ADDR_OFFSET
599 # define GET_ADDR_OFFSET ti->ti_offset
604 allocate_and_init (struct link_map
*map
)
608 newp
= __libc_memalign (map
->l_tls_align
, map
->l_tls_blocksize
);
612 /* Initialize the memory. */
613 memset (__mempcpy (newp
, map
->l_tls_initimage
, map
->l_tls_initimage_size
),
614 '\0', map
->l_tls_blocksize
- map
->l_tls_initimage_size
);
621 _dl_update_slotinfo (unsigned long int req_modid
)
623 struct link_map
*the_map
= NULL
;
624 dtv_t
*dtv
= THREAD_DTV ();
626 /* The global dl_tls_dtv_slotinfo array contains for each module
627 index the generation counter current when the entry was created.
628 This array never shrinks so that all module indices which were
629 valid at some time can be used to access it. Before the first
630 use of a new module index in this function the array was extended
631 appropriately. Access also does not have to be guarded against
632 modifications of the array. It is assumed that pointer-size
633 values can be read atomically even in SMP environments. It is
634 possible that other threads at the same time dynamically load
635 code and therefore add to the slotinfo list. This is a problem
636 since we must not pick up any information about incomplete work.
637 The solution to this is to ignore all dtv slots which were
638 created after the one we are currently interested. We know that
639 dynamic loading for this module is completed and this is the last
640 load operation we know finished. */
641 unsigned long int idx
= req_modid
;
642 struct dtv_slotinfo_list
*listp
= GL(dl_tls_dtv_slotinfo_list
);
644 while (idx
>= listp
->len
)
650 if (dtv
[0].counter
< listp
->slotinfo
[idx
].gen
)
652 /* The generation counter for the slot is higher than what the
653 current dtv implements. We have to update the whole dtv but
654 only those entries with a generation counter <= the one for
655 the entry we need. */
656 size_t new_gen
= listp
->slotinfo
[idx
].gen
;
659 /* We have to look through the entire dtv slotinfo list. */
660 listp
= GL(dl_tls_dtv_slotinfo_list
);
663 for (size_t cnt
= total
== 0 ? 1 : 0; cnt
< listp
->len
; ++cnt
)
665 size_t gen
= listp
->slotinfo
[cnt
].gen
;
668 /* This is a slot for a generation younger than the
669 one we are handling now. It might be incompletely
670 set up so ignore it. */
673 /* If the entry is older than the current dtv layout we
674 know we don't have to handle it. */
675 if (gen
<= dtv
[0].counter
)
678 /* If there is no map this means the entry is empty. */
679 struct link_map
*map
= listp
->slotinfo
[cnt
].map
;
682 /* If this modid was used at some point the memory
683 might still be allocated. */
684 if (! dtv
[total
+ cnt
].pointer
.is_static
685 && dtv
[total
+ cnt
].pointer
.val
!= TLS_DTV_UNALLOCATED
)
687 free (dtv
[total
+ cnt
].pointer
.val
);
688 dtv
[total
+ cnt
].pointer
.val
= TLS_DTV_UNALLOCATED
;
694 /* Check whether the current dtv array is large enough. */
695 size_t modid
= map
->l_tls_modid
;
696 assert (total
+ cnt
== modid
);
697 if (dtv
[-1].counter
< modid
)
699 /* Resize the dtv. */
700 dtv
= _dl_resize_dtv (dtv
);
702 assert (modid
<= dtv
[-1].counter
);
704 /* Install this new dtv in the thread data
706 INSTALL_NEW_DTV (dtv
);
709 /* If there is currently memory allocate for this
710 dtv entry free it. */
711 /* XXX Ideally we will at some point create a memory
713 if (! dtv
[modid
].pointer
.is_static
714 && dtv
[modid
].pointer
.val
!= TLS_DTV_UNALLOCATED
)
715 /* Note that free is called for NULL is well. We
716 deallocate even if it is this dtv entry we are
717 supposed to load. The reason is that we call
718 memalign and not malloc. */
719 free (dtv
[modid
].pointer
.val
);
721 /* This module is loaded dynamically- We defer memory
723 dtv
[modid
].pointer
.is_static
= false;
724 dtv
[modid
].pointer
.val
= TLS_DTV_UNALLOCATED
;
726 if (modid
== req_modid
)
732 while ((listp
= listp
->next
) != NULL
);
734 /* This will be the new maximum generation counter. */
735 dtv
[0].counter
= new_gen
;
743 __attribute_noinline__
744 tls_get_addr_tail (GET_ADDR_ARGS
, dtv_t
*dtv
, struct link_map
*the_map
)
746 /* The allocation was deferred. Do it now. */
749 /* Find the link map for this module. */
750 size_t idx
= GET_ADDR_MODULE
;
751 struct dtv_slotinfo_list
*listp
= GL(dl_tls_dtv_slotinfo_list
);
753 while (idx
>= listp
->len
)
759 the_map
= listp
->slotinfo
[idx
].map
;
763 /* Make sure that, if a dlopen running in parallel forces the
764 variable into static storage, we'll wait until the address in the
765 static TLS block is set up, and use that. If we're undecided
766 yet, make sure we make the decision holding the lock as well. */
767 if (__builtin_expect (the_map
->l_tls_offset
768 != FORCED_DYNAMIC_TLS_OFFSET
, 0))
770 __rtld_lock_lock_recursive (GL(dl_load_lock
));
771 if (__glibc_likely (the_map
->l_tls_offset
== NO_TLS_OFFSET
))
773 the_map
->l_tls_offset
= FORCED_DYNAMIC_TLS_OFFSET
;
774 __rtld_lock_unlock_recursive (GL(dl_load_lock
));
778 __rtld_lock_unlock_recursive (GL(dl_load_lock
));
779 if (__builtin_expect (the_map
->l_tls_offset
780 != FORCED_DYNAMIC_TLS_OFFSET
, 1))
782 void *p
= dtv
[GET_ADDR_MODULE
].pointer
.val
;
783 if (__glibc_unlikely (p
== TLS_DTV_UNALLOCATED
))
786 return (char *) p
+ GET_ADDR_OFFSET
;
790 void *p
= dtv
[GET_ADDR_MODULE
].pointer
.val
= allocate_and_init (the_map
);
791 dtv
[GET_ADDR_MODULE
].pointer
.is_static
= false;
793 return (char *) p
+ GET_ADDR_OFFSET
;
797 static struct link_map
*
798 __attribute_noinline__
799 update_get_addr (GET_ADDR_ARGS
)
801 struct link_map
*the_map
= _dl_update_slotinfo (GET_ADDR_MODULE
);
802 dtv_t
*dtv
= THREAD_DTV ();
804 void *p
= dtv
[GET_ADDR_MODULE
].pointer
.val
;
806 if (__glibc_unlikely (p
== TLS_DTV_UNALLOCATED
))
807 return tls_get_addr_tail (GET_ADDR_PARAM
, dtv
, the_map
);
809 return (void *) p
+ GET_ADDR_OFFSET
;
812 /* For all machines that have a non-macro version of __tls_get_addr, we
813 want to use rtld_hidden_proto/rtld_hidden_def in order to call the
814 internal alias for __tls_get_addr from ld.so. This avoids a PLT entry
815 in ld.so for __tls_get_addr. */
817 #ifndef __tls_get_addr
818 extern void * __tls_get_addr (GET_ADDR_ARGS
);
819 rtld_hidden_proto (__tls_get_addr
)
820 rtld_hidden_def (__tls_get_addr
)
823 /* The generic dynamic and local dynamic model cannot be used in
824 statically linked applications. */
826 __tls_get_addr (GET_ADDR_ARGS
)
828 dtv_t
*dtv
= THREAD_DTV ();
830 if (__glibc_unlikely (dtv
[0].counter
!= GL(dl_tls_generation
)))
831 return update_get_addr (GET_ADDR_PARAM
);
833 void *p
= dtv
[GET_ADDR_MODULE
].pointer
.val
;
835 if (__glibc_unlikely (p
== TLS_DTV_UNALLOCATED
))
836 return tls_get_addr_tail (GET_ADDR_PARAM
, dtv
, NULL
);
838 return (char *) p
+ GET_ADDR_OFFSET
;
843 /* Look up the module's TLS block as for __tls_get_addr,
844 but never touch anything. Return null if it's not allocated yet. */
846 _dl_tls_get_addr_soft (struct link_map
*l
)
848 if (__glibc_unlikely (l
->l_tls_modid
== 0))
849 /* This module has no TLS segment. */
852 dtv_t
*dtv
= THREAD_DTV ();
853 if (__glibc_unlikely (dtv
[0].counter
!= GL(dl_tls_generation
)))
855 /* This thread's DTV is not completely current,
856 but it might already cover this module. */
858 if (l
->l_tls_modid
>= dtv
[-1].counter
)
862 size_t idx
= l
->l_tls_modid
;
863 struct dtv_slotinfo_list
*listp
= GL(dl_tls_dtv_slotinfo_list
);
864 while (idx
>= listp
->len
)
870 /* We've reached the slot for this module.
871 If its generation counter is higher than the DTV's,
872 this thread does not know about this module yet. */
873 if (dtv
[0].counter
< listp
->slotinfo
[idx
].gen
)
877 void *data
= dtv
[l
->l_tls_modid
].pointer
.val
;
878 if (__glibc_unlikely (data
== TLS_DTV_UNALLOCATED
))
879 /* The DTV is current, but this thread has not yet needed
880 to allocate this module's segment. */
888 _dl_add_to_slotinfo (struct link_map
*l
)
890 /* Now that we know the object is loaded successfully add
891 modules containing TLS data to the dtv info table. We
892 might have to increase its size. */
893 struct dtv_slotinfo_list
*listp
;
894 struct dtv_slotinfo_list
*prevp
;
895 size_t idx
= l
->l_tls_modid
;
897 /* Find the place in the dtv slotinfo list. */
898 listp
= GL(dl_tls_dtv_slotinfo_list
);
899 prevp
= NULL
; /* Needed to shut up gcc. */
902 /* Does it fit in the array of this list element? */
903 if (idx
< listp
->len
)
909 while (listp
!= NULL
);
913 /* When we come here it means we have to add a new element
914 to the slotinfo list. And the new module must be in
918 listp
= prevp
->next
= (struct dtv_slotinfo_list
*)
919 malloc (sizeof (struct dtv_slotinfo_list
)
920 + TLS_SLOTINFO_SURPLUS
* sizeof (struct dtv_slotinfo
));
923 /* We ran out of memory. We will simply fail this
924 call but don't undo anything we did so far. The
925 application will crash or be terminated anyway very
928 /* We have to do this since some entries in the dtv
929 slotinfo array might already point to this
931 ++GL(dl_tls_generation
);
933 _dl_signal_error (ENOMEM
, "dlopen", NULL
, N_("\
934 cannot create TLS data structures"));
937 listp
->len
= TLS_SLOTINFO_SURPLUS
;
939 memset (listp
->slotinfo
, '\0',
940 TLS_SLOTINFO_SURPLUS
* sizeof (struct dtv_slotinfo
));
943 /* Add the information into the slotinfo data structure. */
944 listp
->slotinfo
[idx
].map
= l
;
945 listp
->slotinfo
[idx
].gen
= GL(dl_tls_generation
) + 1;