Remove _dl_initial_dtv
[glibc.git] / elf / dl-tls.c
blob9d896b7bdd891dd6db2102b9055b9037244874f3
1 /* Thread-local storage handling in the ELF dynamic linker. Generic version.
2 Copyright (C) 2002-2006,2008,2011,2012 Free Software Foundation, Inc.
3 This file is part of the GNU C Library.
5 The GNU C Library is free software; you can redistribute it and/or
6 modify it under the terms of the GNU Lesser General Public
7 License as published by the Free Software Foundation; either
8 version 2.1 of the License, or (at your option) any later version.
10 The GNU C Library is distributed in the hope that it will be useful,
11 but WITHOUT ANY WARRANTY; without even the implied warranty of
12 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 Lesser General Public License for more details.
15 You should have received a copy of the GNU Lesser General Public
16 License along with the GNU C Library; if not, see
17 <http://www.gnu.org/licenses/>. */
19 #include <assert.h>
20 #include <errno.h>
21 #include <libintl.h>
22 #include <signal.h>
23 #include <stdlib.h>
24 #include <unistd.h>
25 #include <sys/param.h>
27 #include <tls.h>
28 #include <dl-tls.h>
29 #include <ldsodefs.h>
31 /* Amount of excess space to allocate in the static TLS area
32 to allow dynamic loading of modules defining IE-model TLS data. */
33 #define TLS_STATIC_SURPLUS 64 + DL_NNS * 100
36 /* Out-of-memory handler. */
37 #ifdef SHARED
38 static void
39 __attribute__ ((__noreturn__))
40 oom (void)
42 _dl_fatal_printf ("cannot allocate memory for thread-local data: ABORT\n");
44 #endif
47 size_t
48 internal_function
49 _dl_next_tls_modid (void)
51 size_t result;
53 if (__builtin_expect (GL(dl_tls_dtv_gaps), false))
55 size_t disp = 0;
56 struct dtv_slotinfo_list *runp = GL(dl_tls_dtv_slotinfo_list);
58 /* Note that this branch will never be executed during program
59 start since there are no gaps at that time. Therefore it
60 does not matter that the dl_tls_dtv_slotinfo is not allocated
61 yet when the function is called for the first times.
63 NB: the offset +1 is due to the fact that DTV[0] is used
64 for something else. */
65 result = GL(dl_tls_static_nelem) + 1;
66 if (result <= GL(dl_tls_max_dtv_idx))
69 while (result - disp < runp->len)
71 if (runp->slotinfo[result - disp].map == NULL)
72 break;
74 ++result;
75 assert (result <= GL(dl_tls_max_dtv_idx) + 1);
78 if (result - disp < runp->len)
79 break;
81 disp += runp->len;
83 while ((runp = runp->next) != NULL);
85 if (result > GL(dl_tls_max_dtv_idx))
87 /* The new index must indeed be exactly one higher than the
88 previous high. */
89 assert (result == GL(dl_tls_max_dtv_idx) + 1);
90 /* There is no gap anymore. */
91 GL(dl_tls_dtv_gaps) = false;
93 goto nogaps;
96 else
98 /* No gaps, allocate a new entry. */
99 nogaps:
101 result = ++GL(dl_tls_max_dtv_idx);
104 return result;
108 #ifdef SHARED
109 void
110 internal_function
111 _dl_determine_tlsoffset (void)
113 size_t max_align = TLS_TCB_ALIGN;
114 size_t freetop = 0;
115 size_t freebottom = 0;
117 /* The first element of the dtv slot info list is allocated. */
118 assert (GL(dl_tls_dtv_slotinfo_list) != NULL);
119 /* There is at this point only one element in the
120 dl_tls_dtv_slotinfo_list list. */
121 assert (GL(dl_tls_dtv_slotinfo_list)->next == NULL);
123 struct dtv_slotinfo *slotinfo = GL(dl_tls_dtv_slotinfo_list)->slotinfo;
125 /* Determining the offset of the various parts of the static TLS
126 block has several dependencies. In addition we have to work
127 around bugs in some toolchains.
129 Each TLS block from the objects available at link time has a size
130 and an alignment requirement. The GNU ld computes the alignment
131 requirements for the data at the positions *in the file*, though.
132 I.e, it is not simply possible to allocate a block with the size
133 of the TLS program header entry. The data is layed out assuming
134 that the first byte of the TLS block fulfills
136 p_vaddr mod p_align == &TLS_BLOCK mod p_align
138 This means we have to add artificial padding at the beginning of
139 the TLS block. These bytes are never used for the TLS data in
140 this module but the first byte allocated must be aligned
141 according to mod p_align == 0 so that the first byte of the TLS
142 block is aligned according to p_vaddr mod p_align. This is ugly
143 and the linker can help by computing the offsets in the TLS block
144 assuming the first byte of the TLS block is aligned according to
145 p_align.
147 The extra space which might be allocated before the first byte of
148 the TLS block need not go unused. The code below tries to use
149 that memory for the next TLS block. This can work if the total
150 memory requirement for the next TLS block is smaller than the
151 gap. */
153 #if TLS_TCB_AT_TP
154 /* We simply start with zero. */
155 size_t offset = 0;
157 for (size_t cnt = 0; slotinfo[cnt].map != NULL; ++cnt)
159 assert (cnt < GL(dl_tls_dtv_slotinfo_list)->len);
161 size_t firstbyte = (-slotinfo[cnt].map->l_tls_firstbyte_offset
162 & (slotinfo[cnt].map->l_tls_align - 1));
163 size_t off;
164 max_align = MAX (max_align, slotinfo[cnt].map->l_tls_align);
166 if (freebottom - freetop >= slotinfo[cnt].map->l_tls_blocksize)
168 off = roundup (freetop + slotinfo[cnt].map->l_tls_blocksize
169 - firstbyte, slotinfo[cnt].map->l_tls_align)
170 + firstbyte;
171 if (off <= freebottom)
173 freetop = off;
175 /* XXX For some architectures we perhaps should store the
176 negative offset. */
177 slotinfo[cnt].map->l_tls_offset = off;
178 continue;
182 off = roundup (offset + slotinfo[cnt].map->l_tls_blocksize - firstbyte,
183 slotinfo[cnt].map->l_tls_align) + firstbyte;
184 if (off > offset + slotinfo[cnt].map->l_tls_blocksize
185 + (freebottom - freetop))
187 freetop = offset;
188 freebottom = off - slotinfo[cnt].map->l_tls_blocksize;
190 offset = off;
192 /* XXX For some architectures we perhaps should store the
193 negative offset. */
194 slotinfo[cnt].map->l_tls_offset = off;
197 GL(dl_tls_static_used) = offset;
198 GL(dl_tls_static_size) = (roundup (offset + TLS_STATIC_SURPLUS, max_align)
199 + TLS_TCB_SIZE);
200 #elif TLS_DTV_AT_TP
201 /* The TLS blocks start right after the TCB. */
202 size_t offset = TLS_TCB_SIZE;
204 for (size_t cnt = 0; slotinfo[cnt].map != NULL; ++cnt)
206 assert (cnt < GL(dl_tls_dtv_slotinfo_list)->len);
208 size_t firstbyte = (-slotinfo[cnt].map->l_tls_firstbyte_offset
209 & (slotinfo[cnt].map->l_tls_align - 1));
210 size_t off;
211 max_align = MAX (max_align, slotinfo[cnt].map->l_tls_align);
213 if (slotinfo[cnt].map->l_tls_blocksize <= freetop - freebottom)
215 off = roundup (freebottom, slotinfo[cnt].map->l_tls_align);
216 if (off - freebottom < firstbyte)
217 off += slotinfo[cnt].map->l_tls_align;
218 if (off + slotinfo[cnt].map->l_tls_blocksize - firstbyte <= freetop)
220 slotinfo[cnt].map->l_tls_offset = off - firstbyte;
221 freebottom = (off + slotinfo[cnt].map->l_tls_blocksize
222 - firstbyte);
223 continue;
227 off = roundup (offset, slotinfo[cnt].map->l_tls_align);
228 if (off - offset < firstbyte)
229 off += slotinfo[cnt].map->l_tls_align;
231 slotinfo[cnt].map->l_tls_offset = off - firstbyte;
232 if (off - firstbyte - offset > freetop - freebottom)
234 freebottom = offset;
235 freetop = off - firstbyte;
238 offset = off + slotinfo[cnt].map->l_tls_blocksize - firstbyte;
241 GL(dl_tls_static_used) = offset;
242 GL(dl_tls_static_size) = roundup (offset + TLS_STATIC_SURPLUS,
243 TLS_TCB_ALIGN);
244 #else
245 # error "Either TLS_TCB_AT_TP or TLS_DTV_AT_TP must be defined"
246 #endif
248 /* The alignment requirement for the static TLS block. */
249 GL(dl_tls_static_align) = max_align;
253 /* This is called only when the data structure setup was skipped at startup,
254 when there was no need for it then. Now we have dynamically loaded
255 something needing TLS, or libpthread needs it. */
257 internal_function
258 _dl_tls_setup (void)
260 assert (GL(dl_tls_dtv_slotinfo_list) == NULL);
261 assert (GL(dl_tls_max_dtv_idx) == 0);
263 const size_t nelem = 2 + TLS_SLOTINFO_SURPLUS;
265 GL(dl_tls_dtv_slotinfo_list)
266 = calloc (1, (sizeof (struct dtv_slotinfo_list)
267 + nelem * sizeof (struct dtv_slotinfo)));
268 if (GL(dl_tls_dtv_slotinfo_list) == NULL)
269 return -1;
271 GL(dl_tls_dtv_slotinfo_list)->len = nelem;
273 /* Number of elements in the static TLS block. It can't be zero
274 because of various assumptions. The one element is null. */
275 GL(dl_tls_static_nelem) = GL(dl_tls_max_dtv_idx) = 1;
277 /* This initializes more variables for us. */
278 _dl_determine_tlsoffset ();
280 return 0;
282 rtld_hidden_def (_dl_tls_setup)
283 #endif
285 static void *
286 internal_function
287 allocate_dtv (void *result)
289 dtv_t *dtv;
290 size_t dtv_length;
292 /* We allocate a few more elements in the dtv than are needed for the
293 initial set of modules. This should avoid in most cases expansions
294 of the dtv. */
295 dtv_length = GL(dl_tls_max_dtv_idx) + DTV_SURPLUS;
296 dtv = calloc (dtv_length + 2, sizeof (dtv_t));
297 if (dtv != NULL)
299 /* This is the initial length of the dtv. */
300 dtv[0].counter = dtv_length;
302 /* The rest of the dtv (including the generation counter) is
303 Initialize with zero to indicate nothing there. */
305 /* Add the dtv to the thread data structures. */
306 INSTALL_DTV (result, dtv);
308 else
309 result = NULL;
311 return result;
315 /* Get size and alignment requirements of the static TLS block. */
316 void
317 internal_function
318 _dl_get_tls_static_info (size_t *sizep, size_t *alignp)
320 *sizep = GL(dl_tls_static_size);
321 *alignp = GL(dl_tls_static_align);
325 void *
326 internal_function
327 _dl_allocate_tls_storage (void)
329 void *result;
330 size_t size = GL(dl_tls_static_size);
332 #if TLS_DTV_AT_TP
333 /* Memory layout is:
334 [ TLS_PRE_TCB_SIZE ] [ TLS_TCB_SIZE ] [ TLS blocks ]
335 ^ This should be returned. */
336 size += (TLS_PRE_TCB_SIZE + GL(dl_tls_static_align) - 1)
337 & ~(GL(dl_tls_static_align) - 1);
338 #endif
340 /* Allocate a correctly aligned chunk of memory. */
341 result = __libc_memalign (GL(dl_tls_static_align), size);
342 if (__builtin_expect (result != NULL, 1))
344 /* Allocate the DTV. */
345 void *allocated = result;
347 #if TLS_TCB_AT_TP
348 /* The TCB follows the TLS blocks. */
349 result = (char *) result + size - TLS_TCB_SIZE;
351 /* Clear the TCB data structure. We can't ask the caller (i.e.
352 libpthread) to do it, because we will initialize the DTV et al. */
353 memset (result, '\0', TLS_TCB_SIZE);
354 #elif TLS_DTV_AT_TP
355 result = (char *) result + size - GL(dl_tls_static_size);
357 /* Clear the TCB data structure and TLS_PRE_TCB_SIZE bytes before it.
358 We can't ask the caller (i.e. libpthread) to do it, because we will
359 initialize the DTV et al. */
360 memset ((char *) result - TLS_PRE_TCB_SIZE, '\0',
361 TLS_PRE_TCB_SIZE + TLS_TCB_SIZE);
362 #endif
364 result = allocate_dtv (result);
365 if (result == NULL)
366 free (allocated);
369 return result;
373 void *
374 internal_function
375 _dl_allocate_tls_init (void *result)
377 if (result == NULL)
378 /* The memory allocation failed. */
379 return NULL;
381 dtv_t *dtv = GET_DTV (result);
382 struct dtv_slotinfo_list *listp;
383 size_t total = 0;
384 size_t maxgen = 0;
386 /* We have to prepare the dtv for all currently loaded modules using
387 TLS. For those which are dynamically loaded we add the values
388 indicating deferred allocation. */
389 listp = GL(dl_tls_dtv_slotinfo_list);
390 while (1)
392 size_t cnt;
394 for (cnt = total == 0 ? 1 : 0; cnt < listp->len; ++cnt)
396 struct link_map *map;
397 void *dest;
399 /* Check for the total number of used slots. */
400 if (total + cnt > GL(dl_tls_max_dtv_idx))
401 break;
403 map = listp->slotinfo[cnt].map;
404 if (map == NULL)
405 /* Unused entry. */
406 continue;
408 /* Keep track of the maximum generation number. This might
409 not be the generation counter. */
410 maxgen = MAX (maxgen, listp->slotinfo[cnt].gen);
412 if (map->l_tls_offset == NO_TLS_OFFSET
413 || map->l_tls_offset == FORCED_DYNAMIC_TLS_OFFSET)
415 /* For dynamically loaded modules we simply store
416 the value indicating deferred allocation. */
417 dtv[map->l_tls_modid].pointer.val = TLS_DTV_UNALLOCATED;
418 dtv[map->l_tls_modid].pointer.is_static = false;
419 continue;
422 assert (map->l_tls_modid == cnt);
423 assert (map->l_tls_blocksize >= map->l_tls_initimage_size);
424 #if TLS_TCB_AT_TP
425 assert ((size_t) map->l_tls_offset >= map->l_tls_blocksize);
426 dest = (char *) result - map->l_tls_offset;
427 #elif TLS_DTV_AT_TP
428 dest = (char *) result + map->l_tls_offset;
429 #else
430 # error "Either TLS_TCB_AT_TP or TLS_DTV_AT_TP must be defined"
431 #endif
433 /* Copy the initialization image and clear the BSS part. */
434 dtv[map->l_tls_modid].pointer.val = dest;
435 dtv[map->l_tls_modid].pointer.is_static = true;
436 memset (__mempcpy (dest, map->l_tls_initimage,
437 map->l_tls_initimage_size), '\0',
438 map->l_tls_blocksize - map->l_tls_initimage_size);
441 total += cnt;
442 if (total >= GL(dl_tls_max_dtv_idx))
443 break;
445 listp = listp->next;
446 assert (listp != NULL);
449 /* The DTV version is up-to-date now. */
450 dtv[0].counter = maxgen;
452 return result;
454 rtld_hidden_def (_dl_allocate_tls_init)
456 void *
457 internal_function
458 _dl_allocate_tls (void *mem)
460 return _dl_allocate_tls_init (mem == NULL
461 ? _dl_allocate_tls_storage ()
462 : allocate_dtv (mem));
464 rtld_hidden_def (_dl_allocate_tls)
467 #ifndef SHARED
468 extern dtv_t _dl_static_dtv[];
469 # define DL_INITIAL_DTV (&_dl_static_dtv[1])
470 #else
471 # define DL_INITIAL_DTV GL(dl_initial_dtv)
472 #endif
474 void
475 internal_function
476 _dl_deallocate_tls (void *tcb, bool dealloc_tcb)
478 dtv_t *dtv = GET_DTV (tcb);
480 /* We need to free the memory allocated for non-static TLS. */
481 for (size_t cnt = 0; cnt < dtv[-1].counter; ++cnt)
482 if (! dtv[1 + cnt].pointer.is_static
483 && dtv[1 + cnt].pointer.val != TLS_DTV_UNALLOCATED)
484 free (dtv[1 + cnt].pointer.val);
486 /* The array starts with dtv[-1]. */
487 if (dtv != DL_INITIAL_DTV)
488 free (dtv - 1);
490 if (dealloc_tcb)
492 #if TLS_TCB_AT_TP
493 /* The TCB follows the TLS blocks. Back up to free the whole block. */
494 tcb -= GL(dl_tls_static_size) - TLS_TCB_SIZE;
495 #elif TLS_DTV_AT_TP
496 /* Back up the TLS_PRE_TCB_SIZE bytes. */
497 tcb -= (TLS_PRE_TCB_SIZE + GL(dl_tls_static_align) - 1)
498 & ~(GL(dl_tls_static_align) - 1);
499 #endif
500 free (tcb);
503 rtld_hidden_def (_dl_deallocate_tls)
506 #ifdef SHARED
507 /* The __tls_get_addr function has two basic forms which differ in the
508 arguments. The IA-64 form takes two parameters, the module ID and
509 offset. The form used, among others, on IA-32 takes a reference to
510 a special structure which contain the same information. The second
511 form seems to be more often used (in the moment) so we default to
512 it. Users of the IA-64 form have to provide adequate definitions
513 of the following macros. */
514 # ifndef GET_ADDR_ARGS
515 # define GET_ADDR_ARGS tls_index *ti
516 # define GET_ADDR_PARAM ti
517 # endif
518 # ifndef GET_ADDR_MODULE
519 # define GET_ADDR_MODULE ti->ti_module
520 # endif
521 # ifndef GET_ADDR_OFFSET
522 # define GET_ADDR_OFFSET ti->ti_offset
523 # endif
526 static void *
527 allocate_and_init (struct link_map *map)
529 void *newp;
531 newp = __libc_memalign (map->l_tls_align, map->l_tls_blocksize);
532 if (newp == NULL)
533 oom ();
535 /* Initialize the memory. */
536 memset (__mempcpy (newp, map->l_tls_initimage, map->l_tls_initimage_size),
537 '\0', map->l_tls_blocksize - map->l_tls_initimage_size);
539 return newp;
543 struct link_map *
544 _dl_update_slotinfo (unsigned long int req_modid)
546 struct link_map *the_map = NULL;
547 dtv_t *dtv = THREAD_DTV ();
549 /* The global dl_tls_dtv_slotinfo array contains for each module
550 index the generation counter current when the entry was created.
551 This array never shrinks so that all module indices which were
552 valid at some time can be used to access it. Before the first
553 use of a new module index in this function the array was extended
554 appropriately. Access also does not have to be guarded against
555 modifications of the array. It is assumed that pointer-size
556 values can be read atomically even in SMP environments. It is
557 possible that other threads at the same time dynamically load
558 code and therefore add to the slotinfo list. This is a problem
559 since we must not pick up any information about incomplete work.
560 The solution to this is to ignore all dtv slots which were
561 created after the one we are currently interested. We know that
562 dynamic loading for this module is completed and this is the last
563 load operation we know finished. */
564 unsigned long int idx = req_modid;
565 struct dtv_slotinfo_list *listp = GL(dl_tls_dtv_slotinfo_list);
567 while (idx >= listp->len)
569 idx -= listp->len;
570 listp = listp->next;
573 if (dtv[0].counter < listp->slotinfo[idx].gen)
575 /* The generation counter for the slot is higher than what the
576 current dtv implements. We have to update the whole dtv but
577 only those entries with a generation counter <= the one for
578 the entry we need. */
579 size_t new_gen = listp->slotinfo[idx].gen;
580 size_t total = 0;
582 /* We have to look through the entire dtv slotinfo list. */
583 listp = GL(dl_tls_dtv_slotinfo_list);
586 for (size_t cnt = total == 0 ? 1 : 0; cnt < listp->len; ++cnt)
588 size_t gen = listp->slotinfo[cnt].gen;
590 if (gen > new_gen)
591 /* This is a slot for a generation younger than the
592 one we are handling now. It might be incompletely
593 set up so ignore it. */
594 continue;
596 /* If the entry is older than the current dtv layout we
597 know we don't have to handle it. */
598 if (gen <= dtv[0].counter)
599 continue;
601 /* If there is no map this means the entry is empty. */
602 struct link_map *map = listp->slotinfo[cnt].map;
603 if (map == NULL)
605 /* If this modid was used at some point the memory
606 might still be allocated. */
607 if (! dtv[total + cnt].pointer.is_static
608 && dtv[total + cnt].pointer.val != TLS_DTV_UNALLOCATED)
610 free (dtv[total + cnt].pointer.val);
611 dtv[total + cnt].pointer.val = TLS_DTV_UNALLOCATED;
614 continue;
617 /* Check whether the current dtv array is large enough. */
618 size_t modid = map->l_tls_modid;
619 assert (total + cnt == modid);
620 if (dtv[-1].counter < modid)
622 /* Reallocate the dtv. */
623 dtv_t *newp;
624 size_t newsize = GL(dl_tls_max_dtv_idx) + DTV_SURPLUS;
625 size_t oldsize = dtv[-1].counter;
627 assert (map->l_tls_modid <= newsize);
629 if (dtv == GL(dl_initial_dtv))
631 /* This is the initial dtv that was allocated
632 during rtld startup using the dl-minimal.c
633 malloc instead of the real malloc. We can't
634 free it, we have to abandon the old storage. */
636 newp = malloc ((2 + newsize) * sizeof (dtv_t));
637 if (newp == NULL)
638 oom ();
639 memcpy (newp, &dtv[-1], (2 + oldsize) * sizeof (dtv_t));
641 else
643 newp = realloc (&dtv[-1],
644 (2 + newsize) * sizeof (dtv_t));
645 if (newp == NULL)
646 oom ();
649 newp[0].counter = newsize;
651 /* Clear the newly allocated part. */
652 memset (newp + 2 + oldsize, '\0',
653 (newsize - oldsize) * sizeof (dtv_t));
655 /* Point dtv to the generation counter. */
656 dtv = &newp[1];
658 /* Install this new dtv in the thread data
659 structures. */
660 INSTALL_NEW_DTV (dtv);
663 /* If there is currently memory allocate for this
664 dtv entry free it. */
665 /* XXX Ideally we will at some point create a memory
666 pool. */
667 if (! dtv[modid].pointer.is_static
668 && dtv[modid].pointer.val != TLS_DTV_UNALLOCATED)
669 /* Note that free is called for NULL is well. We
670 deallocate even if it is this dtv entry we are
671 supposed to load. The reason is that we call
672 memalign and not malloc. */
673 free (dtv[modid].pointer.val);
675 /* This module is loaded dynamically- We defer memory
676 allocation. */
677 dtv[modid].pointer.is_static = false;
678 dtv[modid].pointer.val = TLS_DTV_UNALLOCATED;
680 if (modid == req_modid)
681 the_map = map;
684 total += listp->len;
686 while ((listp = listp->next) != NULL);
688 /* This will be the new maximum generation counter. */
689 dtv[0].counter = new_gen;
692 return the_map;
696 static void *
697 __attribute_noinline__
698 tls_get_addr_tail (GET_ADDR_ARGS, dtv_t *dtv, struct link_map *the_map)
700 /* The allocation was deferred. Do it now. */
701 if (the_map == NULL)
703 /* Find the link map for this module. */
704 size_t idx = GET_ADDR_MODULE;
705 struct dtv_slotinfo_list *listp = GL(dl_tls_dtv_slotinfo_list);
707 while (idx >= listp->len)
709 idx -= listp->len;
710 listp = listp->next;
713 the_map = listp->slotinfo[idx].map;
716 again:
717 /* Make sure that, if a dlopen running in parallel forces the
718 variable into static storage, we'll wait until the address in the
719 static TLS block is set up, and use that. If we're undecided
720 yet, make sure we make the decision holding the lock as well. */
721 if (__builtin_expect (the_map->l_tls_offset
722 != FORCED_DYNAMIC_TLS_OFFSET, 0))
724 __rtld_lock_lock_recursive (GL(dl_load_lock));
725 if (__builtin_expect (the_map->l_tls_offset == NO_TLS_OFFSET, 1))
727 the_map->l_tls_offset = FORCED_DYNAMIC_TLS_OFFSET;
728 __rtld_lock_unlock_recursive (GL(dl_load_lock));
730 else
732 __rtld_lock_unlock_recursive (GL(dl_load_lock));
733 if (__builtin_expect (the_map->l_tls_offset
734 != FORCED_DYNAMIC_TLS_OFFSET, 1))
736 void *p = dtv[GET_ADDR_MODULE].pointer.val;
737 if (__builtin_expect (p == TLS_DTV_UNALLOCATED, 0))
738 goto again;
740 return (char *) p + GET_ADDR_OFFSET;
744 void *p = dtv[GET_ADDR_MODULE].pointer.val = allocate_and_init (the_map);
745 dtv[GET_ADDR_MODULE].pointer.is_static = false;
747 return (char *) p + GET_ADDR_OFFSET;
751 static struct link_map *
752 __attribute_noinline__
753 update_get_addr (GET_ADDR_ARGS)
755 struct link_map *the_map = _dl_update_slotinfo (GET_ADDR_MODULE);
756 dtv_t *dtv = THREAD_DTV ();
758 void *p = dtv[GET_ADDR_MODULE].pointer.val;
760 if (__builtin_expect (p == TLS_DTV_UNALLOCATED, 0))
761 return tls_get_addr_tail (GET_ADDR_PARAM, dtv, the_map);
763 return (void *) p + GET_ADDR_OFFSET;
767 /* The generic dynamic and local dynamic model cannot be used in
768 statically linked applications. */
769 void *
770 __tls_get_addr (GET_ADDR_ARGS)
772 dtv_t *dtv = THREAD_DTV ();
774 if (__builtin_expect (dtv[0].counter != GL(dl_tls_generation), 0))
775 return update_get_addr (GET_ADDR_PARAM);
777 void *p = dtv[GET_ADDR_MODULE].pointer.val;
779 if (__builtin_expect (p == TLS_DTV_UNALLOCATED, 0))
780 return tls_get_addr_tail (GET_ADDR_PARAM, dtv, NULL);
782 return (char *) p + GET_ADDR_OFFSET;
784 #endif
787 /* Look up the module's TLS block as for __tls_get_addr,
788 but never touch anything. Return null if it's not allocated yet. */
789 void *
790 _dl_tls_get_addr_soft (struct link_map *l)
792 if (__builtin_expect (l->l_tls_modid == 0, 0))
793 /* This module has no TLS segment. */
794 return NULL;
796 dtv_t *dtv = THREAD_DTV ();
797 if (__builtin_expect (dtv[0].counter != GL(dl_tls_generation), 0))
799 /* This thread's DTV is not completely current,
800 but it might already cover this module. */
802 if (l->l_tls_modid >= dtv[-1].counter)
803 /* Nope. */
804 return NULL;
806 size_t idx = l->l_tls_modid;
807 struct dtv_slotinfo_list *listp = GL(dl_tls_dtv_slotinfo_list);
808 while (idx >= listp->len)
810 idx -= listp->len;
811 listp = listp->next;
814 /* We've reached the slot for this module.
815 If its generation counter is higher than the DTV's,
816 this thread does not know about this module yet. */
817 if (dtv[0].counter < listp->slotinfo[idx].gen)
818 return NULL;
821 void *data = dtv[l->l_tls_modid].pointer.val;
822 if (__builtin_expect (data == TLS_DTV_UNALLOCATED, 0))
823 /* The DTV is current, but this thread has not yet needed
824 to allocate this module's segment. */
825 data = NULL;
827 return data;
831 void
832 _dl_add_to_slotinfo (struct link_map *l)
834 /* Now that we know the object is loaded successfully add
835 modules containing TLS data to the dtv info table. We
836 might have to increase its size. */
837 struct dtv_slotinfo_list *listp;
838 struct dtv_slotinfo_list *prevp;
839 size_t idx = l->l_tls_modid;
841 /* Find the place in the dtv slotinfo list. */
842 listp = GL(dl_tls_dtv_slotinfo_list);
843 prevp = NULL; /* Needed to shut up gcc. */
846 /* Does it fit in the array of this list element? */
847 if (idx < listp->len)
848 break;
849 idx -= listp->len;
850 prevp = listp;
851 listp = listp->next;
853 while (listp != NULL);
855 if (listp == NULL)
857 /* When we come here it means we have to add a new element
858 to the slotinfo list. And the new module must be in
859 the first slot. */
860 assert (idx == 0);
862 listp = prevp->next = (struct dtv_slotinfo_list *)
863 malloc (sizeof (struct dtv_slotinfo_list)
864 + TLS_SLOTINFO_SURPLUS * sizeof (struct dtv_slotinfo));
865 if (listp == NULL)
867 /* We ran out of memory. We will simply fail this
868 call but don't undo anything we did so far. The
869 application will crash or be terminated anyway very
870 soon. */
872 /* We have to do this since some entries in the dtv
873 slotinfo array might already point to this
874 generation. */
875 ++GL(dl_tls_generation);
877 _dl_signal_error (ENOMEM, "dlopen", NULL, N_("\
878 cannot create TLS data structures"));
881 listp->len = TLS_SLOTINFO_SURPLUS;
882 listp->next = NULL;
883 memset (listp->slotinfo, '\0',
884 TLS_SLOTINFO_SURPLUS * sizeof (struct dtv_slotinfo));
887 /* Add the information into the slotinfo data structure. */
888 listp->slotinfo[idx].map = l;
889 listp->slotinfo[idx].gen = GL(dl_tls_generation) + 1;