1 /* Thread-local storage handling in the ELF dynamic linker. Generic version.
2 Copyright (C) 2002-2014 Free Software Foundation, Inc.
3 This file is part of the GNU C Library.
5 The GNU C Library is free software; you can redistribute it and/or
6 modify it under the terms of the GNU Lesser General Public
7 License as published by the Free Software Foundation; either
8 version 2.1 of the License, or (at your option) any later version.
10 The GNU C Library is distributed in the hope that it will be useful,
11 but WITHOUT ANY WARRANTY; without even the implied warranty of
12 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 Lesser General Public License for more details.
15 You should have received a copy of the GNU Lesser General Public
16 License along with the GNU C Library; if not, see
17 <http://www.gnu.org/licenses/>. */
26 #include <sys/param.h>
32 /* Amount of excess space to allocate in the static TLS area
33 to allow dynamic loading of modules defining IE-model TLS data. */
34 #define TLS_STATIC_SURPLUS 64 + DL_NNS * 100
37 /* Out-of-memory handler. */
40 __attribute__ ((__noreturn__
))
43 _dl_fatal_printf ("cannot allocate memory for thread-local data: ABORT\n");
50 _dl_next_tls_modid (void)
54 if (__builtin_expect (GL(dl_tls_dtv_gaps
), false))
57 struct dtv_slotinfo_list
*runp
= GL(dl_tls_dtv_slotinfo_list
);
59 /* Note that this branch will never be executed during program
60 start since there are no gaps at that time. Therefore it
61 does not matter that the dl_tls_dtv_slotinfo is not allocated
62 yet when the function is called for the first times.
64 NB: the offset +1 is due to the fact that DTV[0] is used
65 for something else. */
66 result
= GL(dl_tls_static_nelem
) + 1;
67 if (result
<= GL(dl_tls_max_dtv_idx
))
70 while (result
- disp
< runp
->len
)
72 if (runp
->slotinfo
[result
- disp
].map
== NULL
)
76 assert (result
<= GL(dl_tls_max_dtv_idx
) + 1);
79 if (result
- disp
< runp
->len
)
84 while ((runp
= runp
->next
) != NULL
);
86 if (result
> GL(dl_tls_max_dtv_idx
))
88 /* The new index must indeed be exactly one higher than the
90 assert (result
== GL(dl_tls_max_dtv_idx
) + 1);
91 /* There is no gap anymore. */
92 GL(dl_tls_dtv_gaps
) = false;
99 /* No gaps, allocate a new entry. */
102 result
= ++GL(dl_tls_max_dtv_idx
);
112 _dl_determine_tlsoffset (void)
114 size_t max_align
= TLS_TCB_ALIGN
;
116 size_t freebottom
= 0;
118 /* The first element of the dtv slot info list is allocated. */
119 assert (GL(dl_tls_dtv_slotinfo_list
) != NULL
);
120 /* There is at this point only one element in the
121 dl_tls_dtv_slotinfo_list list. */
122 assert (GL(dl_tls_dtv_slotinfo_list
)->next
== NULL
);
124 struct dtv_slotinfo
*slotinfo
= GL(dl_tls_dtv_slotinfo_list
)->slotinfo
;
126 /* Determining the offset of the various parts of the static TLS
127 block has several dependencies. In addition we have to work
128 around bugs in some toolchains.
130 Each TLS block from the objects available at link time has a size
131 and an alignment requirement. The GNU ld computes the alignment
132 requirements for the data at the positions *in the file*, though.
133 I.e, it is not simply possible to allocate a block with the size
134 of the TLS program header entry. The data is layed out assuming
135 that the first byte of the TLS block fulfills
137 p_vaddr mod p_align == &TLS_BLOCK mod p_align
139 This means we have to add artificial padding at the beginning of
140 the TLS block. These bytes are never used for the TLS data in
141 this module but the first byte allocated must be aligned
142 according to mod p_align == 0 so that the first byte of the TLS
143 block is aligned according to p_vaddr mod p_align. This is ugly
144 and the linker can help by computing the offsets in the TLS block
145 assuming the first byte of the TLS block is aligned according to
148 The extra space which might be allocated before the first byte of
149 the TLS block need not go unused. The code below tries to use
150 that memory for the next TLS block. This can work if the total
151 memory requirement for the next TLS block is smaller than the
155 /* We simply start with zero. */
158 for (size_t cnt
= 0; slotinfo
[cnt
].map
!= NULL
; ++cnt
)
160 assert (cnt
< GL(dl_tls_dtv_slotinfo_list
)->len
);
162 size_t firstbyte
= (-slotinfo
[cnt
].map
->l_tls_firstbyte_offset
163 & (slotinfo
[cnt
].map
->l_tls_align
- 1));
165 max_align
= MAX (max_align
, slotinfo
[cnt
].map
->l_tls_align
);
167 if (freebottom
- freetop
>= slotinfo
[cnt
].map
->l_tls_blocksize
)
169 off
= roundup (freetop
+ slotinfo
[cnt
].map
->l_tls_blocksize
170 - firstbyte
, slotinfo
[cnt
].map
->l_tls_align
)
172 if (off
<= freebottom
)
176 /* XXX For some architectures we perhaps should store the
178 slotinfo
[cnt
].map
->l_tls_offset
= off
;
183 off
= roundup (offset
+ slotinfo
[cnt
].map
->l_tls_blocksize
- firstbyte
,
184 slotinfo
[cnt
].map
->l_tls_align
) + firstbyte
;
185 if (off
> offset
+ slotinfo
[cnt
].map
->l_tls_blocksize
186 + (freebottom
- freetop
))
189 freebottom
= off
- slotinfo
[cnt
].map
->l_tls_blocksize
;
193 /* XXX For some architectures we perhaps should store the
195 slotinfo
[cnt
].map
->l_tls_offset
= off
;
198 GL(dl_tls_static_used
) = offset
;
199 GL(dl_tls_static_size
) = (roundup (offset
+ TLS_STATIC_SURPLUS
, max_align
)
202 /* The TLS blocks start right after the TCB. */
203 size_t offset
= TLS_TCB_SIZE
;
205 for (size_t cnt
= 0; slotinfo
[cnt
].map
!= NULL
; ++cnt
)
207 assert (cnt
< GL(dl_tls_dtv_slotinfo_list
)->len
);
209 size_t firstbyte
= (-slotinfo
[cnt
].map
->l_tls_firstbyte_offset
210 & (slotinfo
[cnt
].map
->l_tls_align
- 1));
212 max_align
= MAX (max_align
, slotinfo
[cnt
].map
->l_tls_align
);
214 if (slotinfo
[cnt
].map
->l_tls_blocksize
<= freetop
- freebottom
)
216 off
= roundup (freebottom
, slotinfo
[cnt
].map
->l_tls_align
);
217 if (off
- freebottom
< firstbyte
)
218 off
+= slotinfo
[cnt
].map
->l_tls_align
;
219 if (off
+ slotinfo
[cnt
].map
->l_tls_blocksize
- firstbyte
<= freetop
)
221 slotinfo
[cnt
].map
->l_tls_offset
= off
- firstbyte
;
222 freebottom
= (off
+ slotinfo
[cnt
].map
->l_tls_blocksize
228 off
= roundup (offset
, slotinfo
[cnt
].map
->l_tls_align
);
229 if (off
- offset
< firstbyte
)
230 off
+= slotinfo
[cnt
].map
->l_tls_align
;
232 slotinfo
[cnt
].map
->l_tls_offset
= off
- firstbyte
;
233 if (off
- firstbyte
- offset
> freetop
- freebottom
)
236 freetop
= off
- firstbyte
;
239 offset
= off
+ slotinfo
[cnt
].map
->l_tls_blocksize
- firstbyte
;
242 GL(dl_tls_static_used
) = offset
;
243 GL(dl_tls_static_size
) = roundup (offset
+ TLS_STATIC_SURPLUS
,
246 # error "Either TLS_TCB_AT_TP or TLS_DTV_AT_TP must be defined"
249 /* The alignment requirement for the static TLS block. */
250 GL(dl_tls_static_align
) = max_align
;
254 /* This is called only when the data structure setup was skipped at startup,
255 when there was no need for it then. Now we have dynamically loaded
256 something needing TLS, or libpthread needs it. */
261 assert (GL(dl_tls_dtv_slotinfo_list
) == NULL
);
262 assert (GL(dl_tls_max_dtv_idx
) == 0);
264 const size_t nelem
= 2 + TLS_SLOTINFO_SURPLUS
;
266 GL(dl_tls_dtv_slotinfo_list
)
267 = calloc (1, (sizeof (struct dtv_slotinfo_list
)
268 + nelem
* sizeof (struct dtv_slotinfo
)));
269 if (GL(dl_tls_dtv_slotinfo_list
) == NULL
)
272 GL(dl_tls_dtv_slotinfo_list
)->len
= nelem
;
274 /* Number of elements in the static TLS block. It can't be zero
275 because of various assumptions. The one element is null. */
276 GL(dl_tls_static_nelem
) = GL(dl_tls_max_dtv_idx
) = 1;
278 /* This initializes more variables for us. */
279 _dl_determine_tlsoffset ();
283 rtld_hidden_def (_dl_tls_setup
)
288 allocate_dtv (void *result
)
293 /* We allocate a few more elements in the dtv than are needed for the
294 initial set of modules. This should avoid in most cases expansions
296 dtv_length
= GL(dl_tls_max_dtv_idx
) + DTV_SURPLUS
;
297 dtv
= __signal_safe_calloc (dtv_length
+ 2, sizeof (dtv_t
));
300 /* This is the initial length of the dtv. */
301 dtv
[0].counter
= dtv_length
;
303 /* The rest of the dtv (including the generation counter) is
304 Initialize with zero to indicate nothing there. */
306 /* Add the dtv to the thread data structures. */
307 INSTALL_DTV (result
, dtv
);
316 /* Get size and alignment requirements of the static TLS block. */
319 _dl_get_tls_static_info (size_t *sizep
, size_t *alignp
)
321 *sizep
= GL(dl_tls_static_size
);
322 *alignp
= GL(dl_tls_static_align
);
328 _dl_allocate_tls_storage (void)
331 size_t size
= GL(dl_tls_static_size
);
335 [ TLS_PRE_TCB_SIZE ] [ TLS_TCB_SIZE ] [ TLS blocks ]
336 ^ This should be returned. */
337 size
+= (TLS_PRE_TCB_SIZE
+ GL(dl_tls_static_align
) - 1)
338 & ~(GL(dl_tls_static_align
) - 1);
341 /* Allocate a correctly aligned chunk of memory. */
342 result
= __libc_memalign (GL(dl_tls_static_align
), size
);
343 if (__builtin_expect (result
!= NULL
, 1))
345 /* Allocate the DTV. */
346 void *allocated
= result
;
349 /* The TCB follows the TLS blocks. */
350 result
= (char *) result
+ size
- TLS_TCB_SIZE
;
352 /* Clear the TCB data structure. We can't ask the caller (i.e.
353 libpthread) to do it, because we will initialize the DTV et al. */
354 memset (result
, '\0', TLS_TCB_SIZE
);
356 result
= (char *) result
+ size
- GL(dl_tls_static_size
);
358 /* Clear the TCB data structure and TLS_PRE_TCB_SIZE bytes before it.
359 We can't ask the caller (i.e. libpthread) to do it, because we will
360 initialize the DTV et al. */
361 memset ((char *) result
- TLS_PRE_TCB_SIZE
, '\0',
362 TLS_PRE_TCB_SIZE
+ TLS_TCB_SIZE
);
365 result
= allocate_dtv (result
);
376 _dl_allocate_tls_init (void *result
)
379 /* The memory allocation failed. */
382 dtv_t
*dtv
= GET_DTV (result
);
383 struct dtv_slotinfo_list
*listp
;
387 /* We have to prepare the dtv for all currently loaded modules using
388 TLS. For those which are dynamically loaded we add the values
389 indicating deferred allocation. */
390 listp
= GL(dl_tls_dtv_slotinfo_list
);
395 for (cnt
= total
== 0 ? 1 : 0; cnt
< listp
->len
; ++cnt
)
397 struct link_map
*map
;
400 /* Check for the total number of used slots. */
401 if (total
+ cnt
> GL(dl_tls_max_dtv_idx
))
404 map
= listp
->slotinfo
[cnt
].map
;
409 /* Keep track of the maximum generation number. This might
410 not be the generation counter. */
411 maxgen
= MAX (maxgen
, listp
->slotinfo
[cnt
].gen
);
413 if (map
->l_tls_offset
== NO_TLS_OFFSET
414 || map
->l_tls_offset
== FORCED_DYNAMIC_TLS_OFFSET
)
416 /* For dynamically loaded modules we simply store
417 the value indicating deferred allocation. */
418 dtv
[map
->l_tls_modid
].pointer
.val
= TLS_DTV_UNALLOCATED
;
419 dtv
[map
->l_tls_modid
].pointer
.is_static
= false;
423 assert (map
->l_tls_modid
== cnt
);
424 assert (map
->l_tls_blocksize
>= map
->l_tls_initimage_size
);
426 assert ((size_t) map
->l_tls_offset
>= map
->l_tls_blocksize
);
427 dest
= (char *) result
- map
->l_tls_offset
;
429 dest
= (char *) result
+ map
->l_tls_offset
;
431 # error "Either TLS_TCB_AT_TP or TLS_DTV_AT_TP must be defined"
434 /* Copy the initialization image and clear the BSS part. */
435 dtv
[map
->l_tls_modid
].pointer
.val
= dest
;
436 dtv
[map
->l_tls_modid
].pointer
.is_static
= true;
437 memset (__mempcpy (dest
, map
->l_tls_initimage
,
438 map
->l_tls_initimage_size
), '\0',
439 map
->l_tls_blocksize
- map
->l_tls_initimage_size
);
443 if (total
>= GL(dl_tls_max_dtv_idx
))
447 assert (listp
!= NULL
);
450 /* The DTV version is up-to-date now. */
451 dtv
[0].counter
= maxgen
;
455 rtld_hidden_def (_dl_allocate_tls_init
)
459 _dl_allocate_tls (void *mem
)
461 return _dl_allocate_tls_init (mem
== NULL
462 ? _dl_allocate_tls_storage ()
463 : allocate_dtv (mem
));
465 rtld_hidden_def (_dl_allocate_tls
)
469 _dl_clear_dtv (dtv_t
*dtv
)
471 for (size_t cnt
= 0; cnt
< dtv
[-1].counter
; ++cnt
)
472 if (! dtv
[1 + cnt
].pointer
.is_static
473 && dtv
[1 + cnt
].pointer
.val
!= TLS_DTV_UNALLOCATED
)
474 __signal_safe_free (dtv
[1 + cnt
].pointer
.val
);
475 memset (dtv
, '\0', (dtv
[-1].counter
+ 1) * sizeof (dtv_t
));
478 rtld_hidden_def (_dl_clear_dtv
)
481 extern dtv_t _dl_static_dtv
[];
482 # define _dl_initial_dtv (&_dl_static_dtv[1])
487 _dl_deallocate_tls (void *tcb
, bool dealloc_tcb
)
489 dtv_t
*dtv
= GET_DTV (tcb
);
491 /* We need to free the memory allocated for non-static TLS. */
492 for (size_t cnt
= 0; cnt
< dtv
[-1].counter
; ++cnt
)
493 if (! dtv
[1 + cnt
].pointer
.is_static
494 && dtv
[1 + cnt
].pointer
.val
!= TLS_DTV_UNALLOCATED
)
495 __signal_safe_free (dtv
[1 + cnt
].pointer
.val
);
497 /* The array starts with dtv[-1]. */
498 if (dtv
!= GL(dl_initial_dtv
))
499 __signal_safe_free (dtv
- 1);
504 /* The TCB follows the TLS blocks. Back up to free the whole block. */
505 tcb
-= GL(dl_tls_static_size
) - TLS_TCB_SIZE
;
507 /* Back up the TLS_PRE_TCB_SIZE bytes. */
508 tcb
-= (TLS_PRE_TCB_SIZE
+ GL(dl_tls_static_align
) - 1)
509 & ~(GL(dl_tls_static_align
) - 1);
514 rtld_hidden_def (_dl_deallocate_tls
)
518 /* The __tls_get_addr function has two basic forms which differ in the
519 arguments. The IA-64 form takes two parameters, the module ID and
520 offset. The form used, among others, on IA-32 takes a reference to
521 a special structure which contain the same information. The second
522 form seems to be more often used (in the moment) so we default to
523 it. Users of the IA-64 form have to provide adequate definitions
524 of the following macros. */
525 # ifndef GET_ADDR_ARGS
526 # define GET_ADDR_ARGS tls_index *ti
527 # define GET_ADDR_PARAM ti
529 # ifndef GET_ADDR_MODULE
530 # define GET_ADDR_MODULE ti->ti_module
532 # ifndef GET_ADDR_OFFSET
533 # define GET_ADDR_OFFSET ti->ti_offset
538 allocate_and_init (dtv_t
*dtv
, struct link_map
*map
)
541 newp
= __signal_safe_memalign (map
->l_tls_align
, map
->l_tls_blocksize
);
545 /* Initialize the memory. Since this is our thread's space, we are
546 under a signal mask, and no one has touched this section before,
547 we can safely just overwrite whatever's there. */
548 memset (__mempcpy (newp
, map
->l_tls_initimage
, map
->l_tls_initimage_size
),
549 '\0', map
->l_tls_blocksize
- map
->l_tls_initimage_size
);
551 dtv
->pointer
.val
= newp
;
556 _dl_update_slotinfo (unsigned long int req_modid
)
558 struct link_map
*the_map
= NULL
;
559 dtv_t
*dtv
= THREAD_DTV ();
561 /* The global dl_tls_dtv_slotinfo array contains for each module
562 index the generation counter current when the entry was created.
563 This array never shrinks so that all module indices which were
564 valid at some time can be used to access it. Before the first
565 use of a new module index in this function the array was extended
566 appropriately. Access also does not have to be guarded against
567 modifications of the array. It is assumed that pointer-size
568 values can be read atomically even in SMP environments. It is
569 possible that other threads at the same time dynamically load
570 code and therefore add to the slotinfo list. This is a problem
571 since we must not pick up any information about incomplete work.
572 The solution to this is to ignore all dtv slots which were
573 created after the one we are currently interested. We know that
574 dynamic loading for this module is completed and this is the last
575 load operation we know finished. */
576 unsigned long int idx
= req_modid
;
577 struct dtv_slotinfo_list
*listp
= GL(dl_tls_dtv_slotinfo_list
);
579 while (idx
>= listp
->len
)
585 if (dtv
[0].counter
< listp
->slotinfo
[idx
].gen
)
587 /* The generation counter for the slot is higher than what the
588 current dtv implements. We have to update the whole dtv but
589 only those entries with a generation counter <= the one for
590 the entry we need. */
591 size_t new_gen
= listp
->slotinfo
[idx
].gen
;
595 _dl_mask_all_signals (&old
);
596 /* We use the signal mask as a lock against reentrancy here.
597 Check that a signal taken before the lock didn't already
600 if (dtv
[0].counter
>= listp
->slotinfo
[idx
].gen
)
602 /* We have to look through the entire dtv slotinfo list. */
603 listp
= GL(dl_tls_dtv_slotinfo_list
);
606 for (size_t cnt
= total
== 0 ? 1 : 0; cnt
< listp
->len
; ++cnt
)
608 size_t gen
= listp
->slotinfo
[cnt
].gen
;
611 /* This is a slot for a generation younger than the
612 one we are handling now. It might be incompletely
613 set up so ignore it. */
616 /* If the entry is older than the current dtv layout we
617 know we don't have to handle it. */
618 if (gen
<= dtv
[0].counter
)
621 size_t modid
= total
+ cnt
;
623 /* If there is no map this means the entry is empty. */
624 struct link_map
*map
= listp
->slotinfo
[cnt
].map
;
627 /* If this modid was used at some point the memory
628 might still be allocated. */
629 if (dtv
[-1].counter
>= modid
630 && !dtv
[modid
].pointer
.is_static
631 && dtv
[modid
].pointer
.val
!= TLS_DTV_UNALLOCATED
)
633 __signal_safe_free (dtv
[modid
].pointer
.val
);
634 dtv
[modid
].pointer
.val
= TLS_DTV_UNALLOCATED
;
640 assert (modid
== map
->l_tls_modid
);
641 /* Check whether the current dtv array is large enough. */
642 if (dtv
[-1].counter
< modid
)
644 /* Reallocate the dtv. */
646 size_t newsize
= GL(dl_tls_max_dtv_idx
) + DTV_SURPLUS
;
647 size_t oldsize
= dtv
[-1].counter
;
649 assert (map
->l_tls_modid
<= newsize
);
651 if (dtv
== GL(dl_initial_dtv
))
653 /* This is the initial dtv that was allocated
654 during rtld startup using the dl-minimal.c
655 malloc instead of the real allocator. We can't
656 free it, we have to abandon the old storage. */
658 newp
= __signal_safe_malloc (
659 (2 + newsize
) * sizeof (dtv_t
));
662 memcpy (newp
, &dtv
[-1], (2 + oldsize
) * sizeof (dtv_t
));
666 newp
= __signal_safe_realloc (&dtv
[-1],
667 (2 + newsize
) * sizeof (dtv_t
));
672 newp
[0].counter
= newsize
;
674 /* Clear the newly allocated part. */
675 memset (newp
+ 2 + oldsize
, '\0',
676 (newsize
- oldsize
) * sizeof (dtv_t
));
678 /* Point dtv to the generation counter. */
681 /* Install this new dtv in the thread data
683 INSTALL_NEW_DTV (dtv
);
686 /* If there is currently memory allocate for this
687 dtv entry free it. */
688 /* XXX Ideally we will at some point create a memory
690 if (! dtv
[modid
].pointer
.is_static
691 && dtv
[modid
].pointer
.val
!= TLS_DTV_UNALLOCATED
)
692 /* Note that free is called for NULL is well. We
693 deallocate even if it is this dtv entry we are
694 supposed to load. The reason is that we call
695 memalign and not malloc. */
696 __signal_safe_free (dtv
[modid
].pointer
.val
);
698 /* This module is loaded dynamically- We defer memory
700 dtv
[modid
].pointer
.is_static
= false;
701 dtv
[modid
].pointer
.val
= TLS_DTV_UNALLOCATED
;
703 if (modid
== req_modid
)
709 while ((listp
= listp
->next
) != NULL
);
711 /* This will be the new maximum generation counter. */
712 dtv
[0].counter
= new_gen
;
714 _dl_unmask_signals (&old
);
722 __attribute_noinline__
723 tls_get_addr_tail (GET_ADDR_ARGS
, dtv_t
*dtv
, struct link_map
*the_map
)
725 /* The allocation was deferred. Do it now. */
728 /* Find the link map for this module. */
729 size_t idx
= GET_ADDR_MODULE
;
730 struct dtv_slotinfo_list
*listp
= GL(dl_tls_dtv_slotinfo_list
);
732 while (idx
>= listp
->len
)
738 the_map
= listp
->slotinfo
[idx
].map
;
741 _dl_mask_all_signals (&old
);
743 /* As with update_slotinfo, we use the sigmask as a check against
745 if (dtv
[GET_ADDR_MODULE
].pointer
.val
!= TLS_DTV_UNALLOCATED
)
748 /* Synchronize against a parallel dlopen() forcing this variable
749 into static storage. If that happens, we have to be more careful
750 about initializing the area, as that dlopen() will be iterating
751 the threads to do so itself. */
753 if ((offset
= the_map
->l_tls_offset
) == NO_TLS_OFFSET
)
755 /* l_tls_offset starts out at NO_TLS_OFFSET, and all attempts to
756 change it go from NO_TLS_OFFSET to some other value. We use
757 compare_and_exchange to ensure only one attempt succeeds. We
758 don't actually need any memory ordering here, but _acq is the
759 weakest available. */
760 (void) atomic_compare_and_exchange_bool_acq (&the_map
->l_tls_offset
,
761 FORCED_DYNAMIC_TLS_OFFSET
,
763 offset
= the_map
->l_tls_offset
;
764 assert (offset
!= NO_TLS_OFFSET
);
766 if (offset
== FORCED_DYNAMIC_TLS_OFFSET
)
768 allocate_and_init (&dtv
[GET_ADDR_MODULE
], the_map
);
772 void **pp
= &dtv
[GET_ADDR_MODULE
].pointer
.val
;
773 while (atomic_forced_read (*pp
) == TLS_DTV_UNALLOCATED
)
775 /* for lack of a better (safe) thing to do, just spin.
776 Someone else (not us; it's done under a signal mask) set
777 this map to a static TLS offset, and they'll iterate all
778 threads to initialize it. They'll eventually write
779 to pointer.val, at which point we know they've fully
780 completed initialization. */
783 /* Make sure we've picked up their initialization of the actual
784 block; this pairs against the write barrier in
785 init_one_static_tls, guaranteeing that we see their write of
786 the tls_initimage into the static region. */
787 atomic_read_barrier ();
790 assert (dtv
[GET_ADDR_MODULE
].pointer
.val
!= TLS_DTV_UNALLOCATED
);
791 _dl_unmask_signals (&old
);
793 return (char *) dtv
[GET_ADDR_MODULE
].pointer
.val
+ GET_ADDR_OFFSET
;
797 static struct link_map
*
798 __attribute_noinline__
799 update_get_addr (GET_ADDR_ARGS
)
801 struct link_map
*the_map
= _dl_update_slotinfo (GET_ADDR_MODULE
);
802 dtv_t
*dtv
= THREAD_DTV ();
804 void *p
= dtv
[GET_ADDR_MODULE
].pointer
.val
;
806 if (__builtin_expect (p
== TLS_DTV_UNALLOCATED
, 0))
807 return tls_get_addr_tail (GET_ADDR_PARAM
, dtv
, the_map
);
809 return (void *) p
+ GET_ADDR_OFFSET
;
813 /* The generic dynamic and local dynamic model cannot be used in
814 statically linked applications. */
816 __tls_get_addr (GET_ADDR_ARGS
)
818 dtv_t
*dtv
= THREAD_DTV ();
820 if (__builtin_expect (dtv
[0].counter
!= GL(dl_tls_generation
), 0))
821 return update_get_addr (GET_ADDR_PARAM
);
823 void *p
= dtv
[GET_ADDR_MODULE
].pointer
.val
;
825 if (__builtin_expect (p
== TLS_DTV_UNALLOCATED
, 0))
826 return tls_get_addr_tail (GET_ADDR_PARAM
, dtv
, NULL
);
828 return (char *) p
+ GET_ADDR_OFFSET
;
833 /* Look up the module's TLS block as for __tls_get_addr,
834 but never touch anything. Return null if it's not allocated yet. */
836 _dl_tls_get_addr_soft (struct link_map
*l
)
838 if (__builtin_expect (l
->l_tls_modid
== 0, 0))
839 /* This module has no TLS segment. */
842 dtv_t
*dtv
= THREAD_DTV ();
843 if (__builtin_expect (dtv
[0].counter
!= GL(dl_tls_generation
), 0))
845 /* This thread's DTV is not completely current,
846 but it might already cover this module. */
848 if (l
->l_tls_modid
>= dtv
[-1].counter
)
852 size_t idx
= l
->l_tls_modid
;
853 struct dtv_slotinfo_list
*listp
= GL(dl_tls_dtv_slotinfo_list
);
854 while (idx
>= listp
->len
)
860 /* We've reached the slot for this module.
861 If its generation counter is higher than the DTV's,
862 this thread does not know about this module yet. */
863 if (dtv
[0].counter
< listp
->slotinfo
[idx
].gen
)
867 void *data
= dtv
[l
->l_tls_modid
].pointer
.val
;
868 if (__builtin_expect (data
== TLS_DTV_UNALLOCATED
, 0))
869 /* The DTV is current, but this thread has not yet needed
870 to allocate this module's segment. */
878 _dl_add_to_slotinfo (struct link_map
*l
)
880 /* Now that we know the object is loaded successfully add
881 modules containing TLS data to the dtv info table. We
882 might have to increase its size. */
883 struct dtv_slotinfo_list
*listp
;
884 struct dtv_slotinfo_list
*prevp
;
885 size_t idx
= l
->l_tls_modid
;
887 /* Find the place in the dtv slotinfo list. */
888 listp
= GL(dl_tls_dtv_slotinfo_list
);
889 prevp
= NULL
; /* Needed to shut up gcc. */
892 /* Does it fit in the array of this list element? */
893 if (idx
< listp
->len
)
899 while (listp
!= NULL
);
903 /* When we come here it means we have to add a new element
904 to the slotinfo list. And the new module must be in
908 listp
= prevp
->next
= (struct dtv_slotinfo_list
*)
909 malloc (sizeof (struct dtv_slotinfo_list
)
910 + TLS_SLOTINFO_SURPLUS
* sizeof (struct dtv_slotinfo
));
913 /* We ran out of memory. We will simply fail this
914 call but don't undo anything we did so far. The
915 application will crash or be terminated anyway very
918 /* We have to do this since some entries in the dtv
919 slotinfo array might already point to this
921 ++GL(dl_tls_generation
);
923 _dl_signal_error (ENOMEM
, "dlopen", NULL
, N_("\
924 cannot create TLS data structures"));
927 listp
->len
= TLS_SLOTINFO_SURPLUS
;
929 memset (listp
->slotinfo
, '\0',
930 TLS_SLOTINFO_SURPLUS
* sizeof (struct dtv_slotinfo
));
933 /* Add the information into the slotinfo data structure. */
934 listp
->slotinfo
[idx
].map
= l
;
935 listp
->slotinfo
[idx
].gen
= GL(dl_tls_generation
) + 1;