x86: Move abilist files out of nptl/ subdirectories.
[glibc.git] / elf / dl-tls.c
blob5204fdaac06e17f2c4f7c2b009cdbfee0215eb72
1 /* Thread-local storage handling in the ELF dynamic linker. Generic version.
2 Copyright (C) 2002-2014 Free Software Foundation, Inc.
3 This file is part of the GNU C Library.
5 The GNU C Library is free software; you can redistribute it and/or
6 modify it under the terms of the GNU Lesser General Public
7 License as published by the Free Software Foundation; either
8 version 2.1 of the License, or (at your option) any later version.
10 The GNU C Library is distributed in the hope that it will be useful,
11 but WITHOUT ANY WARRANTY; without even the implied warranty of
12 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 Lesser General Public License for more details.
15 You should have received a copy of the GNU Lesser General Public
16 License along with the GNU C Library; if not, see
17 <http://www.gnu.org/licenses/>. */
19 #include <assert.h>
20 #include <errno.h>
21 #include <libintl.h>
22 #include <signal.h>
23 #include <stdlib.h>
24 #include <unistd.h>
25 #include <sys/param.h>
27 #include <tls.h>
28 #include <dl-tls.h>
29 #include <ldsodefs.h>
31 /* Amount of excess space to allocate in the static TLS area
32 to allow dynamic loading of modules defining IE-model TLS data. */
33 #define TLS_STATIC_SURPLUS 64 + DL_NNS * 100
36 /* Out-of-memory handler. */
37 #ifdef SHARED
38 static void
39 __attribute__ ((__noreturn__))
40 oom (void)
42 _dl_fatal_printf ("cannot allocate memory for thread-local data: ABORT\n");
44 #endif
47 size_t
48 internal_function
49 _dl_next_tls_modid (void)
51 size_t result;
53 if (__builtin_expect (GL(dl_tls_dtv_gaps), false))
55 size_t disp = 0;
56 struct dtv_slotinfo_list *runp = GL(dl_tls_dtv_slotinfo_list);
58 /* Note that this branch will never be executed during program
59 start since there are no gaps at that time. Therefore it
60 does not matter that the dl_tls_dtv_slotinfo is not allocated
61 yet when the function is called for the first times.
63 NB: the offset +1 is due to the fact that DTV[0] is used
64 for something else. */
65 result = GL(dl_tls_static_nelem) + 1;
66 if (result <= GL(dl_tls_max_dtv_idx))
69 while (result - disp < runp->len)
71 if (runp->slotinfo[result - disp].map == NULL)
72 break;
74 ++result;
75 assert (result <= GL(dl_tls_max_dtv_idx) + 1);
78 if (result - disp < runp->len)
79 break;
81 disp += runp->len;
83 while ((runp = runp->next) != NULL);
85 if (result > GL(dl_tls_max_dtv_idx))
87 /* The new index must indeed be exactly one higher than the
88 previous high. */
89 assert (result == GL(dl_tls_max_dtv_idx) + 1);
90 /* There is no gap anymore. */
91 GL(dl_tls_dtv_gaps) = false;
93 goto nogaps;
96 else
98 /* No gaps, allocate a new entry. */
99 nogaps:
101 result = ++GL(dl_tls_max_dtv_idx);
104 return result;
108 size_t
109 internal_function
110 _dl_count_modids (void)
112 /* It is rare that we have gaps; see elf/dl-open.c (_dl_open) where
113 we fail to load a module and unload it leaving a gap. If we don't
114 have gaps then the number of modids is the current maximum so
115 return that. */
116 if (__glibc_likely (!GL(dl_tls_dtv_gaps)))
117 return GL(dl_tls_max_dtv_idx);
119 /* We have gaps and are forced to count the non-NULL entries. */
120 size_t n = 0;
121 struct dtv_slotinfo_list *runp = GL(dl_tls_dtv_slotinfo_list);
122 while (runp != NULL)
124 for (size_t i = 0; i < runp->len; ++i)
125 if (runp->slotinfo[i].map != NULL)
126 ++n;
128 runp = runp->next;
131 return n;
135 #ifdef SHARED
136 void
137 internal_function
138 _dl_determine_tlsoffset (void)
140 size_t max_align = TLS_TCB_ALIGN;
141 size_t freetop = 0;
142 size_t freebottom = 0;
144 /* The first element of the dtv slot info list is allocated. */
145 assert (GL(dl_tls_dtv_slotinfo_list) != NULL);
146 /* There is at this point only one element in the
147 dl_tls_dtv_slotinfo_list list. */
148 assert (GL(dl_tls_dtv_slotinfo_list)->next == NULL);
150 struct dtv_slotinfo *slotinfo = GL(dl_tls_dtv_slotinfo_list)->slotinfo;
152 /* Determining the offset of the various parts of the static TLS
153 block has several dependencies. In addition we have to work
154 around bugs in some toolchains.
156 Each TLS block from the objects available at link time has a size
157 and an alignment requirement. The GNU ld computes the alignment
158 requirements for the data at the positions *in the file*, though.
159 I.e, it is not simply possible to allocate a block with the size
160 of the TLS program header entry. The data is layed out assuming
161 that the first byte of the TLS block fulfills
163 p_vaddr mod p_align == &TLS_BLOCK mod p_align
165 This means we have to add artificial padding at the beginning of
166 the TLS block. These bytes are never used for the TLS data in
167 this module but the first byte allocated must be aligned
168 according to mod p_align == 0 so that the first byte of the TLS
169 block is aligned according to p_vaddr mod p_align. This is ugly
170 and the linker can help by computing the offsets in the TLS block
171 assuming the first byte of the TLS block is aligned according to
172 p_align.
174 The extra space which might be allocated before the first byte of
175 the TLS block need not go unused. The code below tries to use
176 that memory for the next TLS block. This can work if the total
177 memory requirement for the next TLS block is smaller than the
178 gap. */
180 #if TLS_TCB_AT_TP
181 /* We simply start with zero. */
182 size_t offset = 0;
184 for (size_t cnt = 0; slotinfo[cnt].map != NULL; ++cnt)
186 assert (cnt < GL(dl_tls_dtv_slotinfo_list)->len);
188 size_t firstbyte = (-slotinfo[cnt].map->l_tls_firstbyte_offset
189 & (slotinfo[cnt].map->l_tls_align - 1));
190 size_t off;
191 max_align = MAX (max_align, slotinfo[cnt].map->l_tls_align);
193 if (freebottom - freetop >= slotinfo[cnt].map->l_tls_blocksize)
195 off = roundup (freetop + slotinfo[cnt].map->l_tls_blocksize
196 - firstbyte, slotinfo[cnt].map->l_tls_align)
197 + firstbyte;
198 if (off <= freebottom)
200 freetop = off;
202 /* XXX For some architectures we perhaps should store the
203 negative offset. */
204 slotinfo[cnt].map->l_tls_offset = off;
205 continue;
209 off = roundup (offset + slotinfo[cnt].map->l_tls_blocksize - firstbyte,
210 slotinfo[cnt].map->l_tls_align) + firstbyte;
211 if (off > offset + slotinfo[cnt].map->l_tls_blocksize
212 + (freebottom - freetop))
214 freetop = offset;
215 freebottom = off - slotinfo[cnt].map->l_tls_blocksize;
217 offset = off;
219 /* XXX For some architectures we perhaps should store the
220 negative offset. */
221 slotinfo[cnt].map->l_tls_offset = off;
224 GL(dl_tls_static_used) = offset;
225 GL(dl_tls_static_size) = (roundup (offset + TLS_STATIC_SURPLUS, max_align)
226 + TLS_TCB_SIZE);
227 #elif TLS_DTV_AT_TP
228 /* The TLS blocks start right after the TCB. */
229 size_t offset = TLS_TCB_SIZE;
231 for (size_t cnt = 0; slotinfo[cnt].map != NULL; ++cnt)
233 assert (cnt < GL(dl_tls_dtv_slotinfo_list)->len);
235 size_t firstbyte = (-slotinfo[cnt].map->l_tls_firstbyte_offset
236 & (slotinfo[cnt].map->l_tls_align - 1));
237 size_t off;
238 max_align = MAX (max_align, slotinfo[cnt].map->l_tls_align);
240 if (slotinfo[cnt].map->l_tls_blocksize <= freetop - freebottom)
242 off = roundup (freebottom, slotinfo[cnt].map->l_tls_align);
243 if (off - freebottom < firstbyte)
244 off += slotinfo[cnt].map->l_tls_align;
245 if (off + slotinfo[cnt].map->l_tls_blocksize - firstbyte <= freetop)
247 slotinfo[cnt].map->l_tls_offset = off - firstbyte;
248 freebottom = (off + slotinfo[cnt].map->l_tls_blocksize
249 - firstbyte);
250 continue;
254 off = roundup (offset, slotinfo[cnt].map->l_tls_align);
255 if (off - offset < firstbyte)
256 off += slotinfo[cnt].map->l_tls_align;
258 slotinfo[cnt].map->l_tls_offset = off - firstbyte;
259 if (off - firstbyte - offset > freetop - freebottom)
261 freebottom = offset;
262 freetop = off - firstbyte;
265 offset = off + slotinfo[cnt].map->l_tls_blocksize - firstbyte;
268 GL(dl_tls_static_used) = offset;
269 GL(dl_tls_static_size) = roundup (offset + TLS_STATIC_SURPLUS,
270 TLS_TCB_ALIGN);
271 #else
272 # error "Either TLS_TCB_AT_TP or TLS_DTV_AT_TP must be defined"
273 #endif
275 /* The alignment requirement for the static TLS block. */
276 GL(dl_tls_static_align) = max_align;
280 /* This is called only when the data structure setup was skipped at startup,
281 when there was no need for it then. Now we have dynamically loaded
282 something needing TLS, or libpthread needs it. */
284 internal_function
285 _dl_tls_setup (void)
287 assert (GL(dl_tls_dtv_slotinfo_list) == NULL);
288 assert (GL(dl_tls_max_dtv_idx) == 0);
290 const size_t nelem = 2 + TLS_SLOTINFO_SURPLUS;
292 GL(dl_tls_dtv_slotinfo_list)
293 = calloc (1, (sizeof (struct dtv_slotinfo_list)
294 + nelem * sizeof (struct dtv_slotinfo)));
295 if (GL(dl_tls_dtv_slotinfo_list) == NULL)
296 return -1;
298 GL(dl_tls_dtv_slotinfo_list)->len = nelem;
300 /* Number of elements in the static TLS block. It can't be zero
301 because of various assumptions. The one element is null. */
302 GL(dl_tls_static_nelem) = GL(dl_tls_max_dtv_idx) = 1;
304 /* This initializes more variables for us. */
305 _dl_determine_tlsoffset ();
307 return 0;
309 rtld_hidden_def (_dl_tls_setup)
310 #endif
312 static void *
313 internal_function
314 allocate_dtv (void *result)
316 dtv_t *dtv;
317 size_t dtv_length;
319 /* We allocate a few more elements in the dtv than are needed for the
320 initial set of modules. This should avoid in most cases expansions
321 of the dtv. */
322 dtv_length = GL(dl_tls_max_dtv_idx) + DTV_SURPLUS;
323 dtv = calloc (dtv_length + 2, sizeof (dtv_t));
324 if (dtv != NULL)
326 /* This is the initial length of the dtv. */
327 dtv[0].counter = dtv_length;
329 /* The rest of the dtv (including the generation counter) is
330 Initialize with zero to indicate nothing there. */
332 /* Add the dtv to the thread data structures. */
333 INSTALL_DTV (result, dtv);
335 else
336 result = NULL;
338 return result;
342 /* Get size and alignment requirements of the static TLS block. */
343 void
344 internal_function
345 _dl_get_tls_static_info (size_t *sizep, size_t *alignp)
347 *sizep = GL(dl_tls_static_size);
348 *alignp = GL(dl_tls_static_align);
352 void *
353 internal_function
354 _dl_allocate_tls_storage (void)
356 void *result;
357 size_t size = GL(dl_tls_static_size);
359 #if TLS_DTV_AT_TP
360 /* Memory layout is:
361 [ TLS_PRE_TCB_SIZE ] [ TLS_TCB_SIZE ] [ TLS blocks ]
362 ^ This should be returned. */
363 size += (TLS_PRE_TCB_SIZE + GL(dl_tls_static_align) - 1)
364 & ~(GL(dl_tls_static_align) - 1);
365 #endif
367 /* Allocate a correctly aligned chunk of memory. */
368 result = __libc_memalign (GL(dl_tls_static_align), size);
369 if (__builtin_expect (result != NULL, 1))
371 /* Allocate the DTV. */
372 void *allocated = result;
374 #if TLS_TCB_AT_TP
375 /* The TCB follows the TLS blocks. */
376 result = (char *) result + size - TLS_TCB_SIZE;
378 /* Clear the TCB data structure. We can't ask the caller (i.e.
379 libpthread) to do it, because we will initialize the DTV et al. */
380 memset (result, '\0', TLS_TCB_SIZE);
381 #elif TLS_DTV_AT_TP
382 result = (char *) result + size - GL(dl_tls_static_size);
384 /* Clear the TCB data structure and TLS_PRE_TCB_SIZE bytes before it.
385 We can't ask the caller (i.e. libpthread) to do it, because we will
386 initialize the DTV et al. */
387 memset ((char *) result - TLS_PRE_TCB_SIZE, '\0',
388 TLS_PRE_TCB_SIZE + TLS_TCB_SIZE);
389 #endif
391 result = allocate_dtv (result);
392 if (result == NULL)
393 free (allocated);
396 return result;
400 void *
401 internal_function
402 _dl_allocate_tls_init (void *result)
404 if (result == NULL)
405 /* The memory allocation failed. */
406 return NULL;
408 dtv_t *dtv = GET_DTV (result);
409 struct dtv_slotinfo_list *listp;
410 size_t total = 0;
411 size_t maxgen = 0;
413 /* We have to prepare the dtv for all currently loaded modules using
414 TLS. For those which are dynamically loaded we add the values
415 indicating deferred allocation. */
416 listp = GL(dl_tls_dtv_slotinfo_list);
417 while (1)
419 size_t cnt;
421 for (cnt = total == 0 ? 1 : 0; cnt < listp->len; ++cnt)
423 struct link_map *map;
424 void *dest;
426 /* Check for the total number of used slots. */
427 if (total + cnt > GL(dl_tls_max_dtv_idx))
428 break;
430 map = listp->slotinfo[cnt].map;
431 if (map == NULL)
432 /* Unused entry. */
433 continue;
435 /* Keep track of the maximum generation number. This might
436 not be the generation counter. */
437 assert (listp->slotinfo[cnt].gen <= GL(dl_tls_generation));
438 maxgen = MAX (maxgen, listp->slotinfo[cnt].gen);
440 if (map->l_tls_offset == NO_TLS_OFFSET
441 || map->l_tls_offset == FORCED_DYNAMIC_TLS_OFFSET)
443 /* For dynamically loaded modules we simply store
444 the value indicating deferred allocation. */
445 dtv[map->l_tls_modid].pointer.val = TLS_DTV_UNALLOCATED;
446 dtv[map->l_tls_modid].pointer.is_static = false;
447 continue;
450 assert (map->l_tls_modid == cnt);
451 assert (map->l_tls_blocksize >= map->l_tls_initimage_size);
452 #if TLS_TCB_AT_TP
453 assert ((size_t) map->l_tls_offset >= map->l_tls_blocksize);
454 dest = (char *) result - map->l_tls_offset;
455 #elif TLS_DTV_AT_TP
456 dest = (char *) result + map->l_tls_offset;
457 #else
458 # error "Either TLS_TCB_AT_TP or TLS_DTV_AT_TP must be defined"
459 #endif
461 /* Copy the initialization image and clear the BSS part. */
462 dtv[map->l_tls_modid].pointer.val = dest;
463 dtv[map->l_tls_modid].pointer.is_static = true;
464 memset (__mempcpy (dest, map->l_tls_initimage,
465 map->l_tls_initimage_size), '\0',
466 map->l_tls_blocksize - map->l_tls_initimage_size);
469 total += cnt;
470 if (total >= GL(dl_tls_max_dtv_idx))
471 break;
473 listp = listp->next;
474 assert (listp != NULL);
477 /* The DTV version is up-to-date now. */
478 dtv[0].counter = maxgen;
480 return result;
482 rtld_hidden_def (_dl_allocate_tls_init)
484 void *
485 internal_function
486 _dl_allocate_tls (void *mem)
488 return _dl_allocate_tls_init (mem == NULL
489 ? _dl_allocate_tls_storage ()
490 : allocate_dtv (mem));
492 rtld_hidden_def (_dl_allocate_tls)
495 #ifndef SHARED
496 extern dtv_t _dl_static_dtv[];
497 # define _dl_initial_dtv (&_dl_static_dtv[1])
498 #endif
500 void
501 internal_function
502 _dl_deallocate_tls (void *tcb, bool dealloc_tcb)
504 dtv_t *dtv = GET_DTV (tcb);
506 /* We need to free the memory allocated for non-static TLS. */
507 for (size_t cnt = 0; cnt < dtv[-1].counter; ++cnt)
508 if (! dtv[1 + cnt].pointer.is_static
509 && dtv[1 + cnt].pointer.val != TLS_DTV_UNALLOCATED)
510 free (dtv[1 + cnt].pointer.val);
512 /* The array starts with dtv[-1]. */
513 if (dtv != GL(dl_initial_dtv))
514 free (dtv - 1);
516 if (dealloc_tcb)
518 #if TLS_TCB_AT_TP
519 /* The TCB follows the TLS blocks. Back up to free the whole block. */
520 tcb -= GL(dl_tls_static_size) - TLS_TCB_SIZE;
521 #elif TLS_DTV_AT_TP
522 /* Back up the TLS_PRE_TCB_SIZE bytes. */
523 tcb -= (TLS_PRE_TCB_SIZE + GL(dl_tls_static_align) - 1)
524 & ~(GL(dl_tls_static_align) - 1);
525 #endif
526 free (tcb);
529 rtld_hidden_def (_dl_deallocate_tls)
532 #ifdef SHARED
533 /* The __tls_get_addr function has two basic forms which differ in the
534 arguments. The IA-64 form takes two parameters, the module ID and
535 offset. The form used, among others, on IA-32 takes a reference to
536 a special structure which contain the same information. The second
537 form seems to be more often used (in the moment) so we default to
538 it. Users of the IA-64 form have to provide adequate definitions
539 of the following macros. */
540 # ifndef GET_ADDR_ARGS
541 # define GET_ADDR_ARGS tls_index *ti
542 # define GET_ADDR_PARAM ti
543 # endif
544 # ifndef GET_ADDR_MODULE
545 # define GET_ADDR_MODULE ti->ti_module
546 # endif
547 # ifndef GET_ADDR_OFFSET
548 # define GET_ADDR_OFFSET ti->ti_offset
549 # endif
552 static void *
553 allocate_and_init (struct link_map *map)
555 void *newp;
557 newp = __libc_memalign (map->l_tls_align, map->l_tls_blocksize);
558 if (newp == NULL)
559 oom ();
561 /* Initialize the memory. */
562 memset (__mempcpy (newp, map->l_tls_initimage, map->l_tls_initimage_size),
563 '\0', map->l_tls_blocksize - map->l_tls_initimage_size);
565 return newp;
569 struct link_map *
570 _dl_update_slotinfo (unsigned long int req_modid)
572 struct link_map *the_map = NULL;
573 dtv_t *dtv = THREAD_DTV ();
575 /* The global dl_tls_dtv_slotinfo array contains for each module
576 index the generation counter current when the entry was created.
577 This array never shrinks so that all module indices which were
578 valid at some time can be used to access it. Before the first
579 use of a new module index in this function the array was extended
580 appropriately. Access also does not have to be guarded against
581 modifications of the array. It is assumed that pointer-size
582 values can be read atomically even in SMP environments. It is
583 possible that other threads at the same time dynamically load
584 code and therefore add to the slotinfo list. This is a problem
585 since we must not pick up any information about incomplete work.
586 The solution to this is to ignore all dtv slots which were
587 created after the one we are currently interested. We know that
588 dynamic loading for this module is completed and this is the last
589 load operation we know finished. */
590 unsigned long int idx = req_modid;
591 struct dtv_slotinfo_list *listp = GL(dl_tls_dtv_slotinfo_list);
593 while (idx >= listp->len)
595 idx -= listp->len;
596 listp = listp->next;
599 if (dtv[0].counter < listp->slotinfo[idx].gen)
601 /* The generation counter for the slot is higher than what the
602 current dtv implements. We have to update the whole dtv but
603 only those entries with a generation counter <= the one for
604 the entry we need. */
605 size_t new_gen = listp->slotinfo[idx].gen;
606 size_t total = 0;
608 /* We have to look through the entire dtv slotinfo list. */
609 listp = GL(dl_tls_dtv_slotinfo_list);
612 for (size_t cnt = total == 0 ? 1 : 0; cnt < listp->len; ++cnt)
614 size_t gen = listp->slotinfo[cnt].gen;
616 if (gen > new_gen)
617 /* This is a slot for a generation younger than the
618 one we are handling now. It might be incompletely
619 set up so ignore it. */
620 continue;
622 /* If the entry is older than the current dtv layout we
623 know we don't have to handle it. */
624 if (gen <= dtv[0].counter)
625 continue;
627 /* If there is no map this means the entry is empty. */
628 struct link_map *map = listp->slotinfo[cnt].map;
629 if (map == NULL)
631 /* If this modid was used at some point the memory
632 might still be allocated. */
633 if (! dtv[total + cnt].pointer.is_static
634 && dtv[total + cnt].pointer.val != TLS_DTV_UNALLOCATED)
636 free (dtv[total + cnt].pointer.val);
637 dtv[total + cnt].pointer.val = TLS_DTV_UNALLOCATED;
640 continue;
643 /* Check whether the current dtv array is large enough. */
644 size_t modid = map->l_tls_modid;
645 assert (total + cnt == modid);
646 if (dtv[-1].counter < modid)
648 /* Reallocate the dtv. */
649 dtv_t *newp;
650 size_t newsize = GL(dl_tls_max_dtv_idx) + DTV_SURPLUS;
651 size_t oldsize = dtv[-1].counter;
653 assert (map->l_tls_modid <= newsize);
655 if (dtv == GL(dl_initial_dtv))
657 /* This is the initial dtv that was allocated
658 during rtld startup using the dl-minimal.c
659 malloc instead of the real malloc. We can't
660 free it, we have to abandon the old storage. */
662 newp = malloc ((2 + newsize) * sizeof (dtv_t));
663 if (newp == NULL)
664 oom ();
665 memcpy (newp, &dtv[-1], (2 + oldsize) * sizeof (dtv_t));
667 else
669 newp = realloc (&dtv[-1],
670 (2 + newsize) * sizeof (dtv_t));
671 if (newp == NULL)
672 oom ();
675 newp[0].counter = newsize;
677 /* Clear the newly allocated part. */
678 memset (newp + 2 + oldsize, '\0',
679 (newsize - oldsize) * sizeof (dtv_t));
681 /* Point dtv to the generation counter. */
682 dtv = &newp[1];
684 /* Install this new dtv in the thread data
685 structures. */
686 INSTALL_NEW_DTV (dtv);
689 /* If there is currently memory allocate for this
690 dtv entry free it. */
691 /* XXX Ideally we will at some point create a memory
692 pool. */
693 if (! dtv[modid].pointer.is_static
694 && dtv[modid].pointer.val != TLS_DTV_UNALLOCATED)
695 /* Note that free is called for NULL is well. We
696 deallocate even if it is this dtv entry we are
697 supposed to load. The reason is that we call
698 memalign and not malloc. */
699 free (dtv[modid].pointer.val);
701 /* This module is loaded dynamically- We defer memory
702 allocation. */
703 dtv[modid].pointer.is_static = false;
704 dtv[modid].pointer.val = TLS_DTV_UNALLOCATED;
706 if (modid == req_modid)
707 the_map = map;
710 total += listp->len;
712 while ((listp = listp->next) != NULL);
714 /* This will be the new maximum generation counter. */
715 dtv[0].counter = new_gen;
718 return the_map;
722 static void *
723 __attribute_noinline__
724 tls_get_addr_tail (GET_ADDR_ARGS, dtv_t *dtv, struct link_map *the_map)
726 /* The allocation was deferred. Do it now. */
727 if (the_map == NULL)
729 /* Find the link map for this module. */
730 size_t idx = GET_ADDR_MODULE;
731 struct dtv_slotinfo_list *listp = GL(dl_tls_dtv_slotinfo_list);
733 while (idx >= listp->len)
735 idx -= listp->len;
736 listp = listp->next;
739 the_map = listp->slotinfo[idx].map;
742 again:
743 /* Make sure that, if a dlopen running in parallel forces the
744 variable into static storage, we'll wait until the address in the
745 static TLS block is set up, and use that. If we're undecided
746 yet, make sure we make the decision holding the lock as well. */
747 if (__builtin_expect (the_map->l_tls_offset
748 != FORCED_DYNAMIC_TLS_OFFSET, 0))
750 __rtld_lock_lock_recursive (GL(dl_load_lock));
751 if (__glibc_likely (the_map->l_tls_offset == NO_TLS_OFFSET))
753 the_map->l_tls_offset = FORCED_DYNAMIC_TLS_OFFSET;
754 __rtld_lock_unlock_recursive (GL(dl_load_lock));
756 else
758 __rtld_lock_unlock_recursive (GL(dl_load_lock));
759 if (__builtin_expect (the_map->l_tls_offset
760 != FORCED_DYNAMIC_TLS_OFFSET, 1))
762 void *p = dtv[GET_ADDR_MODULE].pointer.val;
763 if (__glibc_unlikely (p == TLS_DTV_UNALLOCATED))
764 goto again;
766 return (char *) p + GET_ADDR_OFFSET;
770 void *p = dtv[GET_ADDR_MODULE].pointer.val = allocate_and_init (the_map);
771 dtv[GET_ADDR_MODULE].pointer.is_static = false;
773 return (char *) p + GET_ADDR_OFFSET;
777 static struct link_map *
778 __attribute_noinline__
779 update_get_addr (GET_ADDR_ARGS)
781 struct link_map *the_map = _dl_update_slotinfo (GET_ADDR_MODULE);
782 dtv_t *dtv = THREAD_DTV ();
784 void *p = dtv[GET_ADDR_MODULE].pointer.val;
786 if (__glibc_unlikely (p == TLS_DTV_UNALLOCATED))
787 return tls_get_addr_tail (GET_ADDR_PARAM, dtv, the_map);
789 return (void *) p + GET_ADDR_OFFSET;
793 /* The generic dynamic and local dynamic model cannot be used in
794 statically linked applications. */
795 void *
796 __tls_get_addr (GET_ADDR_ARGS)
798 dtv_t *dtv = THREAD_DTV ();
800 if (__glibc_unlikely (dtv[0].counter != GL(dl_tls_generation)))
801 return update_get_addr (GET_ADDR_PARAM);
803 void *p = dtv[GET_ADDR_MODULE].pointer.val;
805 if (__glibc_unlikely (p == TLS_DTV_UNALLOCATED))
806 return tls_get_addr_tail (GET_ADDR_PARAM, dtv, NULL);
808 return (char *) p + GET_ADDR_OFFSET;
810 #endif
813 /* Look up the module's TLS block as for __tls_get_addr,
814 but never touch anything. Return null if it's not allocated yet. */
815 void *
816 _dl_tls_get_addr_soft (struct link_map *l)
818 if (__glibc_unlikely (l->l_tls_modid == 0))
819 /* This module has no TLS segment. */
820 return NULL;
822 dtv_t *dtv = THREAD_DTV ();
823 if (__glibc_unlikely (dtv[0].counter != GL(dl_tls_generation)))
825 /* This thread's DTV is not completely current,
826 but it might already cover this module. */
828 if (l->l_tls_modid >= dtv[-1].counter)
829 /* Nope. */
830 return NULL;
832 size_t idx = l->l_tls_modid;
833 struct dtv_slotinfo_list *listp = GL(dl_tls_dtv_slotinfo_list);
834 while (idx >= listp->len)
836 idx -= listp->len;
837 listp = listp->next;
840 /* We've reached the slot for this module.
841 If its generation counter is higher than the DTV's,
842 this thread does not know about this module yet. */
843 if (dtv[0].counter < listp->slotinfo[idx].gen)
844 return NULL;
847 void *data = dtv[l->l_tls_modid].pointer.val;
848 if (__glibc_unlikely (data == TLS_DTV_UNALLOCATED))
849 /* The DTV is current, but this thread has not yet needed
850 to allocate this module's segment. */
851 data = NULL;
853 return data;
857 void
858 _dl_add_to_slotinfo (struct link_map *l)
860 /* Now that we know the object is loaded successfully add
861 modules containing TLS data to the dtv info table. We
862 might have to increase its size. */
863 struct dtv_slotinfo_list *listp;
864 struct dtv_slotinfo_list *prevp;
865 size_t idx = l->l_tls_modid;
867 /* Find the place in the dtv slotinfo list. */
868 listp = GL(dl_tls_dtv_slotinfo_list);
869 prevp = NULL; /* Needed to shut up gcc. */
872 /* Does it fit in the array of this list element? */
873 if (idx < listp->len)
874 break;
875 idx -= listp->len;
876 prevp = listp;
877 listp = listp->next;
879 while (listp != NULL);
881 if (listp == NULL)
883 /* When we come here it means we have to add a new element
884 to the slotinfo list. And the new module must be in
885 the first slot. */
886 assert (idx == 0);
888 listp = prevp->next = (struct dtv_slotinfo_list *)
889 malloc (sizeof (struct dtv_slotinfo_list)
890 + TLS_SLOTINFO_SURPLUS * sizeof (struct dtv_slotinfo));
891 if (listp == NULL)
893 /* We ran out of memory. We will simply fail this
894 call but don't undo anything we did so far. The
895 application will crash or be terminated anyway very
896 soon. */
898 /* We have to do this since some entries in the dtv
899 slotinfo array might already point to this
900 generation. */
901 ++GL(dl_tls_generation);
903 _dl_signal_error (ENOMEM, "dlopen", NULL, N_("\
904 cannot create TLS data structures"));
907 listp->len = TLS_SLOTINFO_SURPLUS;
908 listp->next = NULL;
909 memset (listp->slotinfo, '\0',
910 TLS_SLOTINFO_SURPLUS * sizeof (struct dtv_slotinfo));
913 /* Add the information into the slotinfo data structure. */
914 listp->slotinfo[idx].map = l;
915 listp->slotinfo[idx].gen = GL(dl_tls_generation) + 1;