1 /* Thread-local storage handling in the ELF dynamic linker. Generic version.
2 Copyright (C) 2002-2014 Free Software Foundation, Inc.
3 This file is part of the GNU C Library.
5 The GNU C Library is free software; you can redistribute it and/or
6 modify it under the terms of the GNU Lesser General Public
7 License as published by the Free Software Foundation; either
8 version 2.1 of the License, or (at your option) any later version.
10 The GNU C Library is distributed in the hope that it will be useful,
11 but WITHOUT ANY WARRANTY; without even the implied warranty of
12 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 Lesser General Public License for more details.
15 You should have received a copy of the GNU Lesser General Public
16 License along with the GNU C Library; if not, see
17 <http://www.gnu.org/licenses/>. */
25 #include <sys/param.h>
31 /* Amount of excess space to allocate in the static TLS area
32 to allow dynamic loading of modules defining IE-model TLS data. */
33 #define TLS_STATIC_SURPLUS 64 + DL_NNS * 100
36 /* Out-of-memory handler. */
39 __attribute__ ((__noreturn__
))
42 _dl_fatal_printf ("cannot allocate memory for thread-local data: ABORT\n");
49 _dl_next_tls_modid (void)
53 if (__builtin_expect (GL(dl_tls_dtv_gaps
), false))
56 struct dtv_slotinfo_list
*runp
= GL(dl_tls_dtv_slotinfo_list
);
58 /* Note that this branch will never be executed during program
59 start since there are no gaps at that time. Therefore it
60 does not matter that the dl_tls_dtv_slotinfo is not allocated
61 yet when the function is called for the first times.
63 NB: the offset +1 is due to the fact that DTV[0] is used
64 for something else. */
65 result
= GL(dl_tls_static_nelem
) + 1;
66 if (result
<= GL(dl_tls_max_dtv_idx
))
69 while (result
- disp
< runp
->len
)
71 if (runp
->slotinfo
[result
- disp
].map
== NULL
)
75 assert (result
<= GL(dl_tls_max_dtv_idx
) + 1);
78 if (result
- disp
< runp
->len
)
83 while ((runp
= runp
->next
) != NULL
);
85 if (result
> GL(dl_tls_max_dtv_idx
))
87 /* The new index must indeed be exactly one higher than the
89 assert (result
== GL(dl_tls_max_dtv_idx
) + 1);
90 /* There is no gap anymore. */
91 GL(dl_tls_dtv_gaps
) = false;
98 /* No gaps, allocate a new entry. */
101 result
= ++GL(dl_tls_max_dtv_idx
);
110 _dl_count_modids (void)
112 /* It is rare that we have gaps; see elf/dl-open.c (_dl_open) where
113 we fail to load a module and unload it leaving a gap. If we don't
114 have gaps then the number of modids is the current maximum so
116 if (__glibc_likely (!GL(dl_tls_dtv_gaps
)))
117 return GL(dl_tls_max_dtv_idx
);
119 /* We have gaps and are forced to count the non-NULL entries. */
121 struct dtv_slotinfo_list
*runp
= GL(dl_tls_dtv_slotinfo_list
);
124 for (size_t i
= 0; i
< runp
->len
; ++i
)
125 if (runp
->slotinfo
[i
].map
!= NULL
)
138 _dl_determine_tlsoffset (void)
140 size_t max_align
= TLS_TCB_ALIGN
;
142 size_t freebottom
= 0;
144 /* The first element of the dtv slot info list is allocated. */
145 assert (GL(dl_tls_dtv_slotinfo_list
) != NULL
);
146 /* There is at this point only one element in the
147 dl_tls_dtv_slotinfo_list list. */
148 assert (GL(dl_tls_dtv_slotinfo_list
)->next
== NULL
);
150 struct dtv_slotinfo
*slotinfo
= GL(dl_tls_dtv_slotinfo_list
)->slotinfo
;
152 /* Determining the offset of the various parts of the static TLS
153 block has several dependencies. In addition we have to work
154 around bugs in some toolchains.
156 Each TLS block from the objects available at link time has a size
157 and an alignment requirement. The GNU ld computes the alignment
158 requirements for the data at the positions *in the file*, though.
159 I.e, it is not simply possible to allocate a block with the size
160 of the TLS program header entry. The data is layed out assuming
161 that the first byte of the TLS block fulfills
163 p_vaddr mod p_align == &TLS_BLOCK mod p_align
165 This means we have to add artificial padding at the beginning of
166 the TLS block. These bytes are never used for the TLS data in
167 this module but the first byte allocated must be aligned
168 according to mod p_align == 0 so that the first byte of the TLS
169 block is aligned according to p_vaddr mod p_align. This is ugly
170 and the linker can help by computing the offsets in the TLS block
171 assuming the first byte of the TLS block is aligned according to
174 The extra space which might be allocated before the first byte of
175 the TLS block need not go unused. The code below tries to use
176 that memory for the next TLS block. This can work if the total
177 memory requirement for the next TLS block is smaller than the
181 /* We simply start with zero. */
184 for (size_t cnt
= 0; slotinfo
[cnt
].map
!= NULL
; ++cnt
)
186 assert (cnt
< GL(dl_tls_dtv_slotinfo_list
)->len
);
188 size_t firstbyte
= (-slotinfo
[cnt
].map
->l_tls_firstbyte_offset
189 & (slotinfo
[cnt
].map
->l_tls_align
- 1));
191 max_align
= MAX (max_align
, slotinfo
[cnt
].map
->l_tls_align
);
193 if (freebottom
- freetop
>= slotinfo
[cnt
].map
->l_tls_blocksize
)
195 off
= roundup (freetop
+ slotinfo
[cnt
].map
->l_tls_blocksize
196 - firstbyte
, slotinfo
[cnt
].map
->l_tls_align
)
198 if (off
<= freebottom
)
202 /* XXX For some architectures we perhaps should store the
204 slotinfo
[cnt
].map
->l_tls_offset
= off
;
209 off
= roundup (offset
+ slotinfo
[cnt
].map
->l_tls_blocksize
- firstbyte
,
210 slotinfo
[cnt
].map
->l_tls_align
) + firstbyte
;
211 if (off
> offset
+ slotinfo
[cnt
].map
->l_tls_blocksize
212 + (freebottom
- freetop
))
215 freebottom
= off
- slotinfo
[cnt
].map
->l_tls_blocksize
;
219 /* XXX For some architectures we perhaps should store the
221 slotinfo
[cnt
].map
->l_tls_offset
= off
;
224 GL(dl_tls_static_used
) = offset
;
225 GL(dl_tls_static_size
) = (roundup (offset
+ TLS_STATIC_SURPLUS
, max_align
)
228 /* The TLS blocks start right after the TCB. */
229 size_t offset
= TLS_TCB_SIZE
;
231 for (size_t cnt
= 0; slotinfo
[cnt
].map
!= NULL
; ++cnt
)
233 assert (cnt
< GL(dl_tls_dtv_slotinfo_list
)->len
);
235 size_t firstbyte
= (-slotinfo
[cnt
].map
->l_tls_firstbyte_offset
236 & (slotinfo
[cnt
].map
->l_tls_align
- 1));
238 max_align
= MAX (max_align
, slotinfo
[cnt
].map
->l_tls_align
);
240 if (slotinfo
[cnt
].map
->l_tls_blocksize
<= freetop
- freebottom
)
242 off
= roundup (freebottom
, slotinfo
[cnt
].map
->l_tls_align
);
243 if (off
- freebottom
< firstbyte
)
244 off
+= slotinfo
[cnt
].map
->l_tls_align
;
245 if (off
+ slotinfo
[cnt
].map
->l_tls_blocksize
- firstbyte
<= freetop
)
247 slotinfo
[cnt
].map
->l_tls_offset
= off
- firstbyte
;
248 freebottom
= (off
+ slotinfo
[cnt
].map
->l_tls_blocksize
254 off
= roundup (offset
, slotinfo
[cnt
].map
->l_tls_align
);
255 if (off
- offset
< firstbyte
)
256 off
+= slotinfo
[cnt
].map
->l_tls_align
;
258 slotinfo
[cnt
].map
->l_tls_offset
= off
- firstbyte
;
259 if (off
- firstbyte
- offset
> freetop
- freebottom
)
262 freetop
= off
- firstbyte
;
265 offset
= off
+ slotinfo
[cnt
].map
->l_tls_blocksize
- firstbyte
;
268 GL(dl_tls_static_used
) = offset
;
269 GL(dl_tls_static_size
) = roundup (offset
+ TLS_STATIC_SURPLUS
,
272 # error "Either TLS_TCB_AT_TP or TLS_DTV_AT_TP must be defined"
275 /* The alignment requirement for the static TLS block. */
276 GL(dl_tls_static_align
) = max_align
;
280 /* This is called only when the data structure setup was skipped at startup,
281 when there was no need for it then. Now we have dynamically loaded
282 something needing TLS, or libpthread needs it. */
287 assert (GL(dl_tls_dtv_slotinfo_list
) == NULL
);
288 assert (GL(dl_tls_max_dtv_idx
) == 0);
290 const size_t nelem
= 2 + TLS_SLOTINFO_SURPLUS
;
292 GL(dl_tls_dtv_slotinfo_list
)
293 = calloc (1, (sizeof (struct dtv_slotinfo_list
)
294 + nelem
* sizeof (struct dtv_slotinfo
)));
295 if (GL(dl_tls_dtv_slotinfo_list
) == NULL
)
298 GL(dl_tls_dtv_slotinfo_list
)->len
= nelem
;
300 /* Number of elements in the static TLS block. It can't be zero
301 because of various assumptions. The one element is null. */
302 GL(dl_tls_static_nelem
) = GL(dl_tls_max_dtv_idx
) = 1;
304 /* This initializes more variables for us. */
305 _dl_determine_tlsoffset ();
309 rtld_hidden_def (_dl_tls_setup
)
314 allocate_dtv (void *result
)
319 /* We allocate a few more elements in the dtv than are needed for the
320 initial set of modules. This should avoid in most cases expansions
322 dtv_length
= GL(dl_tls_max_dtv_idx
) + DTV_SURPLUS
;
323 dtv
= calloc (dtv_length
+ 2, sizeof (dtv_t
));
326 /* This is the initial length of the dtv. */
327 dtv
[0].counter
= dtv_length
;
329 /* The rest of the dtv (including the generation counter) is
330 Initialize with zero to indicate nothing there. */
332 /* Add the dtv to the thread data structures. */
333 INSTALL_DTV (result
, dtv
);
342 /* Get size and alignment requirements of the static TLS block. */
345 _dl_get_tls_static_info (size_t *sizep
, size_t *alignp
)
347 *sizep
= GL(dl_tls_static_size
);
348 *alignp
= GL(dl_tls_static_align
);
354 _dl_allocate_tls_storage (void)
357 size_t size
= GL(dl_tls_static_size
);
361 [ TLS_PRE_TCB_SIZE ] [ TLS_TCB_SIZE ] [ TLS blocks ]
362 ^ This should be returned. */
363 size
+= (TLS_PRE_TCB_SIZE
+ GL(dl_tls_static_align
) - 1)
364 & ~(GL(dl_tls_static_align
) - 1);
367 /* Allocate a correctly aligned chunk of memory. */
368 result
= __libc_memalign (GL(dl_tls_static_align
), size
);
369 if (__builtin_expect (result
!= NULL
, 1))
371 /* Allocate the DTV. */
372 void *allocated
= result
;
375 /* The TCB follows the TLS blocks. */
376 result
= (char *) result
+ size
- TLS_TCB_SIZE
;
378 /* Clear the TCB data structure. We can't ask the caller (i.e.
379 libpthread) to do it, because we will initialize the DTV et al. */
380 memset (result
, '\0', TLS_TCB_SIZE
);
382 result
= (char *) result
+ size
- GL(dl_tls_static_size
);
384 /* Clear the TCB data structure and TLS_PRE_TCB_SIZE bytes before it.
385 We can't ask the caller (i.e. libpthread) to do it, because we will
386 initialize the DTV et al. */
387 memset ((char *) result
- TLS_PRE_TCB_SIZE
, '\0',
388 TLS_PRE_TCB_SIZE
+ TLS_TCB_SIZE
);
391 result
= allocate_dtv (result
);
402 _dl_allocate_tls_init (void *result
)
405 /* The memory allocation failed. */
408 dtv_t
*dtv
= GET_DTV (result
);
409 struct dtv_slotinfo_list
*listp
;
413 /* We have to prepare the dtv for all currently loaded modules using
414 TLS. For those which are dynamically loaded we add the values
415 indicating deferred allocation. */
416 listp
= GL(dl_tls_dtv_slotinfo_list
);
421 for (cnt
= total
== 0 ? 1 : 0; cnt
< listp
->len
; ++cnt
)
423 struct link_map
*map
;
426 /* Check for the total number of used slots. */
427 if (total
+ cnt
> GL(dl_tls_max_dtv_idx
))
430 map
= listp
->slotinfo
[cnt
].map
;
435 /* Keep track of the maximum generation number. This might
436 not be the generation counter. */
437 assert (listp
->slotinfo
[cnt
].gen
<= GL(dl_tls_generation
));
438 maxgen
= MAX (maxgen
, listp
->slotinfo
[cnt
].gen
);
440 if (map
->l_tls_offset
== NO_TLS_OFFSET
441 || map
->l_tls_offset
== FORCED_DYNAMIC_TLS_OFFSET
)
443 /* For dynamically loaded modules we simply store
444 the value indicating deferred allocation. */
445 dtv
[map
->l_tls_modid
].pointer
.val
= TLS_DTV_UNALLOCATED
;
446 dtv
[map
->l_tls_modid
].pointer
.is_static
= false;
450 assert (map
->l_tls_modid
== cnt
);
451 assert (map
->l_tls_blocksize
>= map
->l_tls_initimage_size
);
453 assert ((size_t) map
->l_tls_offset
>= map
->l_tls_blocksize
);
454 dest
= (char *) result
- map
->l_tls_offset
;
456 dest
= (char *) result
+ map
->l_tls_offset
;
458 # error "Either TLS_TCB_AT_TP or TLS_DTV_AT_TP must be defined"
461 /* Copy the initialization image and clear the BSS part. */
462 dtv
[map
->l_tls_modid
].pointer
.val
= dest
;
463 dtv
[map
->l_tls_modid
].pointer
.is_static
= true;
464 memset (__mempcpy (dest
, map
->l_tls_initimage
,
465 map
->l_tls_initimage_size
), '\0',
466 map
->l_tls_blocksize
- map
->l_tls_initimage_size
);
470 if (total
>= GL(dl_tls_max_dtv_idx
))
474 assert (listp
!= NULL
);
477 /* The DTV version is up-to-date now. */
478 dtv
[0].counter
= maxgen
;
482 rtld_hidden_def (_dl_allocate_tls_init
)
486 _dl_allocate_tls (void *mem
)
488 return _dl_allocate_tls_init (mem
== NULL
489 ? _dl_allocate_tls_storage ()
490 : allocate_dtv (mem
));
492 rtld_hidden_def (_dl_allocate_tls
)
496 extern dtv_t _dl_static_dtv
[];
497 # define _dl_initial_dtv (&_dl_static_dtv[1])
502 _dl_deallocate_tls (void *tcb
, bool dealloc_tcb
)
504 dtv_t
*dtv
= GET_DTV (tcb
);
506 /* We need to free the memory allocated for non-static TLS. */
507 for (size_t cnt
= 0; cnt
< dtv
[-1].counter
; ++cnt
)
508 if (! dtv
[1 + cnt
].pointer
.is_static
509 && dtv
[1 + cnt
].pointer
.val
!= TLS_DTV_UNALLOCATED
)
510 free (dtv
[1 + cnt
].pointer
.val
);
512 /* The array starts with dtv[-1]. */
513 if (dtv
!= GL(dl_initial_dtv
))
519 /* The TCB follows the TLS blocks. Back up to free the whole block. */
520 tcb
-= GL(dl_tls_static_size
) - TLS_TCB_SIZE
;
522 /* Back up the TLS_PRE_TCB_SIZE bytes. */
523 tcb
-= (TLS_PRE_TCB_SIZE
+ GL(dl_tls_static_align
) - 1)
524 & ~(GL(dl_tls_static_align
) - 1);
529 rtld_hidden_def (_dl_deallocate_tls
)
533 /* The __tls_get_addr function has two basic forms which differ in the
534 arguments. The IA-64 form takes two parameters, the module ID and
535 offset. The form used, among others, on IA-32 takes a reference to
536 a special structure which contain the same information. The second
537 form seems to be more often used (in the moment) so we default to
538 it. Users of the IA-64 form have to provide adequate definitions
539 of the following macros. */
540 # ifndef GET_ADDR_ARGS
541 # define GET_ADDR_ARGS tls_index *ti
542 # define GET_ADDR_PARAM ti
544 # ifndef GET_ADDR_MODULE
545 # define GET_ADDR_MODULE ti->ti_module
547 # ifndef GET_ADDR_OFFSET
548 # define GET_ADDR_OFFSET ti->ti_offset
553 allocate_and_init (struct link_map
*map
)
557 newp
= __libc_memalign (map
->l_tls_align
, map
->l_tls_blocksize
);
561 /* Initialize the memory. */
562 memset (__mempcpy (newp
, map
->l_tls_initimage
, map
->l_tls_initimage_size
),
563 '\0', map
->l_tls_blocksize
- map
->l_tls_initimage_size
);
570 _dl_update_slotinfo (unsigned long int req_modid
)
572 struct link_map
*the_map
= NULL
;
573 dtv_t
*dtv
= THREAD_DTV ();
575 /* The global dl_tls_dtv_slotinfo array contains for each module
576 index the generation counter current when the entry was created.
577 This array never shrinks so that all module indices which were
578 valid at some time can be used to access it. Before the first
579 use of a new module index in this function the array was extended
580 appropriately. Access also does not have to be guarded against
581 modifications of the array. It is assumed that pointer-size
582 values can be read atomically even in SMP environments. It is
583 possible that other threads at the same time dynamically load
584 code and therefore add to the slotinfo list. This is a problem
585 since we must not pick up any information about incomplete work.
586 The solution to this is to ignore all dtv slots which were
587 created after the one we are currently interested. We know that
588 dynamic loading for this module is completed and this is the last
589 load operation we know finished. */
590 unsigned long int idx
= req_modid
;
591 struct dtv_slotinfo_list
*listp
= GL(dl_tls_dtv_slotinfo_list
);
593 while (idx
>= listp
->len
)
599 if (dtv
[0].counter
< listp
->slotinfo
[idx
].gen
)
601 /* The generation counter for the slot is higher than what the
602 current dtv implements. We have to update the whole dtv but
603 only those entries with a generation counter <= the one for
604 the entry we need. */
605 size_t new_gen
= listp
->slotinfo
[idx
].gen
;
608 /* We have to look through the entire dtv slotinfo list. */
609 listp
= GL(dl_tls_dtv_slotinfo_list
);
612 for (size_t cnt
= total
== 0 ? 1 : 0; cnt
< listp
->len
; ++cnt
)
614 size_t gen
= listp
->slotinfo
[cnt
].gen
;
617 /* This is a slot for a generation younger than the
618 one we are handling now. It might be incompletely
619 set up so ignore it. */
622 /* If the entry is older than the current dtv layout we
623 know we don't have to handle it. */
624 if (gen
<= dtv
[0].counter
)
627 /* If there is no map this means the entry is empty. */
628 struct link_map
*map
= listp
->slotinfo
[cnt
].map
;
631 /* If this modid was used at some point the memory
632 might still be allocated. */
633 if (! dtv
[total
+ cnt
].pointer
.is_static
634 && dtv
[total
+ cnt
].pointer
.val
!= TLS_DTV_UNALLOCATED
)
636 free (dtv
[total
+ cnt
].pointer
.val
);
637 dtv
[total
+ cnt
].pointer
.val
= TLS_DTV_UNALLOCATED
;
643 /* Check whether the current dtv array is large enough. */
644 size_t modid
= map
->l_tls_modid
;
645 assert (total
+ cnt
== modid
);
646 if (dtv
[-1].counter
< modid
)
648 /* Reallocate the dtv. */
650 size_t newsize
= GL(dl_tls_max_dtv_idx
) + DTV_SURPLUS
;
651 size_t oldsize
= dtv
[-1].counter
;
653 assert (map
->l_tls_modid
<= newsize
);
655 if (dtv
== GL(dl_initial_dtv
))
657 /* This is the initial dtv that was allocated
658 during rtld startup using the dl-minimal.c
659 malloc instead of the real malloc. We can't
660 free it, we have to abandon the old storage. */
662 newp
= malloc ((2 + newsize
) * sizeof (dtv_t
));
665 memcpy (newp
, &dtv
[-1], (2 + oldsize
) * sizeof (dtv_t
));
669 newp
= realloc (&dtv
[-1],
670 (2 + newsize
) * sizeof (dtv_t
));
675 newp
[0].counter
= newsize
;
677 /* Clear the newly allocated part. */
678 memset (newp
+ 2 + oldsize
, '\0',
679 (newsize
- oldsize
) * sizeof (dtv_t
));
681 /* Point dtv to the generation counter. */
684 /* Install this new dtv in the thread data
686 INSTALL_NEW_DTV (dtv
);
689 /* If there is currently memory allocate for this
690 dtv entry free it. */
691 /* XXX Ideally we will at some point create a memory
693 if (! dtv
[modid
].pointer
.is_static
694 && dtv
[modid
].pointer
.val
!= TLS_DTV_UNALLOCATED
)
695 /* Note that free is called for NULL is well. We
696 deallocate even if it is this dtv entry we are
697 supposed to load. The reason is that we call
698 memalign and not malloc. */
699 free (dtv
[modid
].pointer
.val
);
701 /* This module is loaded dynamically- We defer memory
703 dtv
[modid
].pointer
.is_static
= false;
704 dtv
[modid
].pointer
.val
= TLS_DTV_UNALLOCATED
;
706 if (modid
== req_modid
)
712 while ((listp
= listp
->next
) != NULL
);
714 /* This will be the new maximum generation counter. */
715 dtv
[0].counter
= new_gen
;
723 __attribute_noinline__
724 tls_get_addr_tail (GET_ADDR_ARGS
, dtv_t
*dtv
, struct link_map
*the_map
)
726 /* The allocation was deferred. Do it now. */
729 /* Find the link map for this module. */
730 size_t idx
= GET_ADDR_MODULE
;
731 struct dtv_slotinfo_list
*listp
= GL(dl_tls_dtv_slotinfo_list
);
733 while (idx
>= listp
->len
)
739 the_map
= listp
->slotinfo
[idx
].map
;
743 /* Make sure that, if a dlopen running in parallel forces the
744 variable into static storage, we'll wait until the address in the
745 static TLS block is set up, and use that. If we're undecided
746 yet, make sure we make the decision holding the lock as well. */
747 if (__builtin_expect (the_map
->l_tls_offset
748 != FORCED_DYNAMIC_TLS_OFFSET
, 0))
750 __rtld_lock_lock_recursive (GL(dl_load_lock
));
751 if (__glibc_likely (the_map
->l_tls_offset
== NO_TLS_OFFSET
))
753 the_map
->l_tls_offset
= FORCED_DYNAMIC_TLS_OFFSET
;
754 __rtld_lock_unlock_recursive (GL(dl_load_lock
));
758 __rtld_lock_unlock_recursive (GL(dl_load_lock
));
759 if (__builtin_expect (the_map
->l_tls_offset
760 != FORCED_DYNAMIC_TLS_OFFSET
, 1))
762 void *p
= dtv
[GET_ADDR_MODULE
].pointer
.val
;
763 if (__glibc_unlikely (p
== TLS_DTV_UNALLOCATED
))
766 return (char *) p
+ GET_ADDR_OFFSET
;
770 void *p
= dtv
[GET_ADDR_MODULE
].pointer
.val
= allocate_and_init (the_map
);
771 dtv
[GET_ADDR_MODULE
].pointer
.is_static
= false;
773 return (char *) p
+ GET_ADDR_OFFSET
;
777 static struct link_map
*
778 __attribute_noinline__
779 update_get_addr (GET_ADDR_ARGS
)
781 struct link_map
*the_map
= _dl_update_slotinfo (GET_ADDR_MODULE
);
782 dtv_t
*dtv
= THREAD_DTV ();
784 void *p
= dtv
[GET_ADDR_MODULE
].pointer
.val
;
786 if (__glibc_unlikely (p
== TLS_DTV_UNALLOCATED
))
787 return tls_get_addr_tail (GET_ADDR_PARAM
, dtv
, the_map
);
789 return (void *) p
+ GET_ADDR_OFFSET
;
793 /* The generic dynamic and local dynamic model cannot be used in
794 statically linked applications. */
796 __tls_get_addr (GET_ADDR_ARGS
)
798 dtv_t
*dtv
= THREAD_DTV ();
800 if (__glibc_unlikely (dtv
[0].counter
!= GL(dl_tls_generation
)))
801 return update_get_addr (GET_ADDR_PARAM
);
803 void *p
= dtv
[GET_ADDR_MODULE
].pointer
.val
;
805 if (__glibc_unlikely (p
== TLS_DTV_UNALLOCATED
))
806 return tls_get_addr_tail (GET_ADDR_PARAM
, dtv
, NULL
);
808 return (char *) p
+ GET_ADDR_OFFSET
;
813 /* Look up the module's TLS block as for __tls_get_addr,
814 but never touch anything. Return null if it's not allocated yet. */
816 _dl_tls_get_addr_soft (struct link_map
*l
)
818 if (__glibc_unlikely (l
->l_tls_modid
== 0))
819 /* This module has no TLS segment. */
822 dtv_t
*dtv
= THREAD_DTV ();
823 if (__glibc_unlikely (dtv
[0].counter
!= GL(dl_tls_generation
)))
825 /* This thread's DTV is not completely current,
826 but it might already cover this module. */
828 if (l
->l_tls_modid
>= dtv
[-1].counter
)
832 size_t idx
= l
->l_tls_modid
;
833 struct dtv_slotinfo_list
*listp
= GL(dl_tls_dtv_slotinfo_list
);
834 while (idx
>= listp
->len
)
840 /* We've reached the slot for this module.
841 If its generation counter is higher than the DTV's,
842 this thread does not know about this module yet. */
843 if (dtv
[0].counter
< listp
->slotinfo
[idx
].gen
)
847 void *data
= dtv
[l
->l_tls_modid
].pointer
.val
;
848 if (__glibc_unlikely (data
== TLS_DTV_UNALLOCATED
))
849 /* The DTV is current, but this thread has not yet needed
850 to allocate this module's segment. */
858 _dl_add_to_slotinfo (struct link_map
*l
)
860 /* Now that we know the object is loaded successfully add
861 modules containing TLS data to the dtv info table. We
862 might have to increase its size. */
863 struct dtv_slotinfo_list
*listp
;
864 struct dtv_slotinfo_list
*prevp
;
865 size_t idx
= l
->l_tls_modid
;
867 /* Find the place in the dtv slotinfo list. */
868 listp
= GL(dl_tls_dtv_slotinfo_list
);
869 prevp
= NULL
; /* Needed to shut up gcc. */
872 /* Does it fit in the array of this list element? */
873 if (idx
< listp
->len
)
879 while (listp
!= NULL
);
883 /* When we come here it means we have to add a new element
884 to the slotinfo list. And the new module must be in
888 listp
= prevp
->next
= (struct dtv_slotinfo_list
*)
889 malloc (sizeof (struct dtv_slotinfo_list
)
890 + TLS_SLOTINFO_SURPLUS
* sizeof (struct dtv_slotinfo
));
893 /* We ran out of memory. We will simply fail this
894 call but don't undo anything we did so far. The
895 application will crash or be terminated anyway very
898 /* We have to do this since some entries in the dtv
899 slotinfo array might already point to this
901 ++GL(dl_tls_generation
);
903 _dl_signal_error (ENOMEM
, "dlopen", NULL
, N_("\
904 cannot create TLS data structures"));
907 listp
->len
= TLS_SLOTINFO_SURPLUS
;
909 memset (listp
->slotinfo
, '\0',
910 TLS_SLOTINFO_SURPLUS
* sizeof (struct dtv_slotinfo
));
913 /* Add the information into the slotinfo data structure. */
914 listp
->slotinfo
[idx
].map
= l
;
915 listp
->slotinfo
[idx
].gen
= GL(dl_tls_generation
) + 1;