Fix unwind info in x86 memcmp-ssse3.
[glibc.git] / elf / dl-tls.c
blob824adc196d1ab63646c7e8ea4e3546107b7590bd
1 /* Thread-local storage handling in the ELF dynamic linker. Generic version.
2 Copyright (C) 2002,2003,2004,2005,2006,2008 Free Software Foundation, Inc.
3 This file is part of the GNU C Library.
5 The GNU C Library is free software; you can redistribute it and/or
6 modify it under the terms of the GNU Lesser General Public
7 License as published by the Free Software Foundation; either
8 version 2.1 of the License, or (at your option) any later version.
10 The GNU C Library is distributed in the hope that it will be useful,
11 but WITHOUT ANY WARRANTY; without even the implied warranty of
12 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 Lesser General Public License for more details.
15 You should have received a copy of the GNU Lesser General Public
16 License along with the GNU C Library; if not, write to the Free
17 Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
18 02111-1307 USA. */
20 #include <assert.h>
21 #include <errno.h>
22 #include <libintl.h>
23 #include <signal.h>
24 #include <stdlib.h>
25 #include <unistd.h>
26 #include <sys/param.h>
28 #include <tls.h>
29 #include <dl-tls.h>
30 #include <ldsodefs.h>
32 /* Amount of excess space to allocate in the static TLS area
33 to allow dynamic loading of modules defining IE-model TLS data. */
34 #define TLS_STATIC_SURPLUS 64 + DL_NNS * 100
36 /* Value used for dtv entries for which the allocation is delayed. */
37 #define TLS_DTV_UNALLOCATED ((void *) -1l)
40 /* Out-of-memory handler. */
41 #ifdef SHARED
42 static void
43 __attribute__ ((__noreturn__))
44 oom (void)
46 _dl_fatal_printf ("cannot allocate memory for thread-local data: ABORT\n");
48 #endif
51 size_t
52 internal_function
53 _dl_next_tls_modid (void)
55 size_t result;
57 if (__builtin_expect (GL(dl_tls_dtv_gaps), false))
59 size_t disp = 0;
60 struct dtv_slotinfo_list *runp = GL(dl_tls_dtv_slotinfo_list);
62 /* Note that this branch will never be executed during program
63 start since there are no gaps at that time. Therefore it
64 does not matter that the dl_tls_dtv_slotinfo is not allocated
65 yet when the function is called for the first times.
67 NB: the offset +1 is due to the fact that DTV[0] is used
68 for something else. */
69 result = GL(dl_tls_static_nelem) + 1;
70 if (result <= GL(dl_tls_max_dtv_idx))
73 while (result - disp < runp->len)
75 if (runp->slotinfo[result - disp].map == NULL)
76 break;
78 ++result;
79 assert (result <= GL(dl_tls_max_dtv_idx) + 1);
82 if (result - disp < runp->len)
83 break;
85 disp += runp->len;
87 while ((runp = runp->next) != NULL);
89 if (result > GL(dl_tls_max_dtv_idx))
91 /* The new index must indeed be exactly one higher than the
92 previous high. */
93 assert (result == GL(dl_tls_max_dtv_idx) + 1);
94 /* There is no gap anymore. */
95 GL(dl_tls_dtv_gaps) = false;
97 goto nogaps;
100 else
102 /* No gaps, allocate a new entry. */
103 nogaps:
105 result = ++GL(dl_tls_max_dtv_idx);
108 return result;
112 #ifdef SHARED
113 void
114 internal_function
115 _dl_determine_tlsoffset (void)
117 size_t max_align = TLS_TCB_ALIGN;
118 size_t freetop = 0;
119 size_t freebottom = 0;
121 /* The first element of the dtv slot info list is allocated. */
122 assert (GL(dl_tls_dtv_slotinfo_list) != NULL);
123 /* There is at this point only one element in the
124 dl_tls_dtv_slotinfo_list list. */
125 assert (GL(dl_tls_dtv_slotinfo_list)->next == NULL);
127 struct dtv_slotinfo *slotinfo = GL(dl_tls_dtv_slotinfo_list)->slotinfo;
129 /* Determining the offset of the various parts of the static TLS
130 block has several dependencies. In addition we have to work
131 around bugs in some toolchains.
133 Each TLS block from the objects available at link time has a size
134 and an alignment requirement. The GNU ld computes the alignment
135 requirements for the data at the positions *in the file*, though.
136 I.e, it is not simply possible to allocate a block with the size
137 of the TLS program header entry. The data is layed out assuming
138 that the first byte of the TLS block fulfills
140 p_vaddr mod p_align == &TLS_BLOCK mod p_align
142 This means we have to add artificial padding at the beginning of
143 the TLS block. These bytes are never used for the TLS data in
144 this module but the first byte allocated must be aligned
145 according to mod p_align == 0 so that the first byte of the TLS
146 block is aligned according to p_vaddr mod p_align. This is ugly
147 and the linker can help by computing the offsets in the TLS block
148 assuming the first byte of the TLS block is aligned according to
149 p_align.
151 The extra space which might be allocated before the first byte of
152 the TLS block need not go unused. The code below tries to use
153 that memory for the next TLS block. This can work if the total
154 memory requirement for the next TLS block is smaller than the
155 gap. */
157 #if TLS_TCB_AT_TP
158 /* We simply start with zero. */
159 size_t offset = 0;
161 for (size_t cnt = 0; slotinfo[cnt].map != NULL; ++cnt)
163 assert (cnt < GL(dl_tls_dtv_slotinfo_list)->len);
165 size_t firstbyte = (-slotinfo[cnt].map->l_tls_firstbyte_offset
166 & (slotinfo[cnt].map->l_tls_align - 1));
167 size_t off;
168 max_align = MAX (max_align, slotinfo[cnt].map->l_tls_align);
170 if (freebottom - freetop >= slotinfo[cnt].map->l_tls_blocksize)
172 off = roundup (freetop + slotinfo[cnt].map->l_tls_blocksize
173 - firstbyte, slotinfo[cnt].map->l_tls_align)
174 + firstbyte;
175 if (off <= freebottom)
177 freetop = off;
179 /* XXX For some architectures we perhaps should store the
180 negative offset. */
181 slotinfo[cnt].map->l_tls_offset = off;
182 continue;
186 off = roundup (offset + slotinfo[cnt].map->l_tls_blocksize - firstbyte,
187 slotinfo[cnt].map->l_tls_align) + firstbyte;
188 if (off > offset + slotinfo[cnt].map->l_tls_blocksize
189 + (freebottom - freetop))
191 freetop = offset;
192 freebottom = off - slotinfo[cnt].map->l_tls_blocksize;
194 offset = off;
196 /* XXX For some architectures we perhaps should store the
197 negative offset. */
198 slotinfo[cnt].map->l_tls_offset = off;
201 GL(dl_tls_static_used) = offset;
202 GL(dl_tls_static_size) = (roundup (offset + TLS_STATIC_SURPLUS, max_align)
203 + TLS_TCB_SIZE);
204 #elif TLS_DTV_AT_TP
205 /* The TLS blocks start right after the TCB. */
206 size_t offset = TLS_TCB_SIZE;
208 for (size_t cnt = 0; slotinfo[cnt].map != NULL; ++cnt)
210 assert (cnt < GL(dl_tls_dtv_slotinfo_list)->len);
212 size_t firstbyte = (-slotinfo[cnt].map->l_tls_firstbyte_offset
213 & (slotinfo[cnt].map->l_tls_align - 1));
214 size_t off;
215 max_align = MAX (max_align, slotinfo[cnt].map->l_tls_align);
217 if (slotinfo[cnt].map->l_tls_blocksize <= freetop - freebottom)
219 off = roundup (freebottom, slotinfo[cnt].map->l_tls_align);
220 if (off - freebottom < firstbyte)
221 off += slotinfo[cnt].map->l_tls_align;
222 if (off + slotinfo[cnt].map->l_tls_blocksize - firstbyte <= freetop)
224 slotinfo[cnt].map->l_tls_offset = off - firstbyte;
225 freebottom = (off + slotinfo[cnt].map->l_tls_blocksize
226 - firstbyte);
227 continue;
231 off = roundup (offset, slotinfo[cnt].map->l_tls_align);
232 if (off - offset < firstbyte)
233 off += slotinfo[cnt].map->l_tls_align;
235 slotinfo[cnt].map->l_tls_offset = off - firstbyte;
236 if (off - firstbyte - offset > freetop - freebottom)
238 freebottom = offset;
239 freetop = off - firstbyte;
242 offset = off + slotinfo[cnt].map->l_tls_blocksize - firstbyte;
245 GL(dl_tls_static_used) = offset;
246 GL(dl_tls_static_size) = roundup (offset + TLS_STATIC_SURPLUS,
247 TLS_TCB_ALIGN);
248 #else
249 # error "Either TLS_TCB_AT_TP or TLS_DTV_AT_TP must be defined"
250 #endif
252 /* The alignment requirement for the static TLS block. */
253 GL(dl_tls_static_align) = max_align;
257 /* This is called only when the data structure setup was skipped at startup,
258 when there was no need for it then. Now we have dynamically loaded
259 something needing TLS, or libpthread needs it. */
261 internal_function
262 _dl_tls_setup (void)
264 assert (GL(dl_tls_dtv_slotinfo_list) == NULL);
265 assert (GL(dl_tls_max_dtv_idx) == 0);
267 const size_t nelem = 2 + TLS_SLOTINFO_SURPLUS;
269 GL(dl_tls_dtv_slotinfo_list)
270 = calloc (1, (sizeof (struct dtv_slotinfo_list)
271 + nelem * sizeof (struct dtv_slotinfo)));
272 if (GL(dl_tls_dtv_slotinfo_list) == NULL)
273 return -1;
275 GL(dl_tls_dtv_slotinfo_list)->len = nelem;
277 /* Number of elements in the static TLS block. It can't be zero
278 because of various assumptions. The one element is null. */
279 GL(dl_tls_static_nelem) = GL(dl_tls_max_dtv_idx) = 1;
281 /* This initializes more variables for us. */
282 _dl_determine_tlsoffset ();
284 return 0;
286 rtld_hidden_def (_dl_tls_setup)
287 #endif
289 static void *
290 internal_function
291 allocate_dtv (void *result)
293 dtv_t *dtv;
294 size_t dtv_length;
296 /* We allocate a few more elements in the dtv than are needed for the
297 initial set of modules. This should avoid in most cases expansions
298 of the dtv. */
299 dtv_length = GL(dl_tls_max_dtv_idx) + DTV_SURPLUS;
300 dtv = calloc (dtv_length + 2, sizeof (dtv_t));
301 if (dtv != NULL)
303 /* This is the initial length of the dtv. */
304 dtv[0].counter = dtv_length;
306 /* The rest of the dtv (including the generation counter) is
307 Initialize with zero to indicate nothing there. */
309 /* Add the dtv to the thread data structures. */
310 INSTALL_DTV (result, dtv);
312 else
313 result = NULL;
315 return result;
319 /* Get size and alignment requirements of the static TLS block. */
320 void
321 internal_function
322 _dl_get_tls_static_info (size_t *sizep, size_t *alignp)
324 *sizep = GL(dl_tls_static_size);
325 *alignp = GL(dl_tls_static_align);
329 void *
330 internal_function
331 _dl_allocate_tls_storage (void)
333 void *result;
334 size_t size = GL(dl_tls_static_size);
336 #if TLS_DTV_AT_TP
337 /* Memory layout is:
338 [ TLS_PRE_TCB_SIZE ] [ TLS_TCB_SIZE ] [ TLS blocks ]
339 ^ This should be returned. */
340 size += (TLS_PRE_TCB_SIZE + GL(dl_tls_static_align) - 1)
341 & ~(GL(dl_tls_static_align) - 1);
342 #endif
344 /* Allocate a correctly aligned chunk of memory. */
345 result = __libc_memalign (GL(dl_tls_static_align), size);
346 if (__builtin_expect (result != NULL, 1))
348 /* Allocate the DTV. */
349 void *allocated = result;
351 #if TLS_TCB_AT_TP
352 /* The TCB follows the TLS blocks. */
353 result = (char *) result + size - TLS_TCB_SIZE;
355 /* Clear the TCB data structure. We can't ask the caller (i.e.
356 libpthread) to do it, because we will initialize the DTV et al. */
357 memset (result, '\0', TLS_TCB_SIZE);
358 #elif TLS_DTV_AT_TP
359 result = (char *) result + size - GL(dl_tls_static_size);
361 /* Clear the TCB data structure and TLS_PRE_TCB_SIZE bytes before it.
362 We can't ask the caller (i.e. libpthread) to do it, because we will
363 initialize the DTV et al. */
364 memset ((char *) result - TLS_PRE_TCB_SIZE, '\0',
365 TLS_PRE_TCB_SIZE + TLS_TCB_SIZE);
366 #endif
368 result = allocate_dtv (result);
369 if (result == NULL)
370 free (allocated);
373 return result;
377 void *
378 internal_function
379 _dl_allocate_tls_init (void *result)
381 if (result == NULL)
382 /* The memory allocation failed. */
383 return NULL;
385 dtv_t *dtv = GET_DTV (result);
386 struct dtv_slotinfo_list *listp;
387 size_t total = 0;
388 size_t maxgen = 0;
390 /* We have to prepare the dtv for all currently loaded modules using
391 TLS. For those which are dynamically loaded we add the values
392 indicating deferred allocation. */
393 listp = GL(dl_tls_dtv_slotinfo_list);
394 while (1)
396 size_t cnt;
398 for (cnt = total == 0 ? 1 : 0; cnt < listp->len; ++cnt)
400 struct link_map *map;
401 void *dest;
403 /* Check for the total number of used slots. */
404 if (total + cnt > GL(dl_tls_max_dtv_idx))
405 break;
407 map = listp->slotinfo[cnt].map;
408 if (map == NULL)
409 /* Unused entry. */
410 continue;
412 /* Keep track of the maximum generation number. This might
413 not be the generation counter. */
414 maxgen = MAX (maxgen, listp->slotinfo[cnt].gen);
416 if (map->l_tls_offset == NO_TLS_OFFSET
417 || map->l_tls_offset == FORCED_DYNAMIC_TLS_OFFSET)
419 /* For dynamically loaded modules we simply store
420 the value indicating deferred allocation. */
421 dtv[map->l_tls_modid].pointer.val = TLS_DTV_UNALLOCATED;
422 dtv[map->l_tls_modid].pointer.is_static = false;
423 continue;
426 assert (map->l_tls_modid == cnt);
427 assert (map->l_tls_blocksize >= map->l_tls_initimage_size);
428 #if TLS_TCB_AT_TP
429 assert ((size_t) map->l_tls_offset >= map->l_tls_blocksize);
430 dest = (char *) result - map->l_tls_offset;
431 #elif TLS_DTV_AT_TP
432 dest = (char *) result + map->l_tls_offset;
433 #else
434 # error "Either TLS_TCB_AT_TP or TLS_DTV_AT_TP must be defined"
435 #endif
437 /* Copy the initialization image and clear the BSS part. */
438 dtv[map->l_tls_modid].pointer.val = dest;
439 dtv[map->l_tls_modid].pointer.is_static = true;
440 memset (__mempcpy (dest, map->l_tls_initimage,
441 map->l_tls_initimage_size), '\0',
442 map->l_tls_blocksize - map->l_tls_initimage_size);
445 total += cnt;
446 if (total >= GL(dl_tls_max_dtv_idx))
447 break;
449 listp = listp->next;
450 assert (listp != NULL);
453 /* The DTV version is up-to-date now. */
454 dtv[0].counter = maxgen;
456 return result;
458 rtld_hidden_def (_dl_allocate_tls_init)
460 void *
461 internal_function
462 _dl_allocate_tls (void *mem)
464 return _dl_allocate_tls_init (mem == NULL
465 ? _dl_allocate_tls_storage ()
466 : allocate_dtv (mem));
468 rtld_hidden_def (_dl_allocate_tls)
471 void
472 internal_function
473 _dl_deallocate_tls (void *tcb, bool dealloc_tcb)
475 dtv_t *dtv = GET_DTV (tcb);
477 /* We need to free the memory allocated for non-static TLS. */
478 for (size_t cnt = 0; cnt < dtv[-1].counter; ++cnt)
479 if (! dtv[1 + cnt].pointer.is_static
480 && dtv[1 + cnt].pointer.val != TLS_DTV_UNALLOCATED)
481 free (dtv[1 + cnt].pointer.val);
483 /* The array starts with dtv[-1]. */
484 #ifdef SHARED
485 if (dtv != GL(dl_initial_dtv))
486 #endif
487 free (dtv - 1);
489 if (dealloc_tcb)
491 #if TLS_TCB_AT_TP
492 /* The TCB follows the TLS blocks. Back up to free the whole block. */
493 tcb -= GL(dl_tls_static_size) - TLS_TCB_SIZE;
494 #elif TLS_DTV_AT_TP
495 /* Back up the TLS_PRE_TCB_SIZE bytes. */
496 tcb -= (TLS_PRE_TCB_SIZE + GL(dl_tls_static_align) - 1)
497 & ~(GL(dl_tls_static_align) - 1);
498 #endif
499 free (tcb);
502 rtld_hidden_def (_dl_deallocate_tls)
505 #ifdef SHARED
506 /* The __tls_get_addr function has two basic forms which differ in the
507 arguments. The IA-64 form takes two parameters, the module ID and
508 offset. The form used, among others, on IA-32 takes a reference to
509 a special structure which contain the same information. The second
510 form seems to be more often used (in the moment) so we default to
511 it. Users of the IA-64 form have to provide adequate definitions
512 of the following macros. */
513 # ifndef GET_ADDR_ARGS
514 # define GET_ADDR_ARGS tls_index *ti
515 # endif
516 # ifndef GET_ADDR_MODULE
517 # define GET_ADDR_MODULE ti->ti_module
518 # endif
519 # ifndef GET_ADDR_OFFSET
520 # define GET_ADDR_OFFSET ti->ti_offset
521 # endif
524 static void *
525 allocate_and_init (struct link_map *map)
527 void *newp;
529 newp = __libc_memalign (map->l_tls_align, map->l_tls_blocksize);
530 if (newp == NULL)
531 oom ();
533 /* Initialize the memory. */
534 memset (__mempcpy (newp, map->l_tls_initimage, map->l_tls_initimage_size),
535 '\0', map->l_tls_blocksize - map->l_tls_initimage_size);
537 return newp;
541 struct link_map *
542 _dl_update_slotinfo (unsigned long int req_modid)
544 struct link_map *the_map = NULL;
545 dtv_t *dtv = THREAD_DTV ();
547 /* The global dl_tls_dtv_slotinfo array contains for each module
548 index the generation counter current when the entry was created.
549 This array never shrinks so that all module indices which were
550 valid at some time can be used to access it. Before the first
551 use of a new module index in this function the array was extended
552 appropriately. Access also does not have to be guarded against
553 modifications of the array. It is assumed that pointer-size
554 values can be read atomically even in SMP environments. It is
555 possible that other threads at the same time dynamically load
556 code and therefore add to the slotinfo list. This is a problem
557 since we must not pick up any information about incomplete work.
558 The solution to this is to ignore all dtv slots which were
559 created after the one we are currently interested. We know that
560 dynamic loading for this module is completed and this is the last
561 load operation we know finished. */
562 unsigned long int idx = req_modid;
563 struct dtv_slotinfo_list *listp = GL(dl_tls_dtv_slotinfo_list);
565 while (idx >= listp->len)
567 idx -= listp->len;
568 listp = listp->next;
571 if (dtv[0].counter < listp->slotinfo[idx].gen)
573 /* The generation counter for the slot is higher than what the
574 current dtv implements. We have to update the whole dtv but
575 only those entries with a generation counter <= the one for
576 the entry we need. */
577 size_t new_gen = listp->slotinfo[idx].gen;
578 size_t total = 0;
580 /* We have to look through the entire dtv slotinfo list. */
581 listp = GL(dl_tls_dtv_slotinfo_list);
584 for (size_t cnt = total == 0 ? 1 : 0; cnt < listp->len; ++cnt)
586 size_t gen = listp->slotinfo[cnt].gen;
588 if (gen > new_gen)
589 /* This is a slot for a generation younger than the
590 one we are handling now. It might be incompletely
591 set up so ignore it. */
592 continue;
594 /* If the entry is older than the current dtv layout we
595 know we don't have to handle it. */
596 if (gen <= dtv[0].counter)
597 continue;
599 /* If there is no map this means the entry is empty. */
600 struct link_map *map = listp->slotinfo[cnt].map;
601 if (map == NULL)
603 /* If this modid was used at some point the memory
604 might still be allocated. */
605 if (! dtv[total + cnt].pointer.is_static
606 && dtv[total + cnt].pointer.val != TLS_DTV_UNALLOCATED)
608 free (dtv[total + cnt].pointer.val);
609 dtv[total + cnt].pointer.val = TLS_DTV_UNALLOCATED;
612 continue;
615 /* Check whether the current dtv array is large enough. */
616 size_t modid = map->l_tls_modid;
617 assert (total + cnt == modid);
618 if (dtv[-1].counter < modid)
620 /* Reallocate the dtv. */
621 dtv_t *newp;
622 size_t newsize = GL(dl_tls_max_dtv_idx) + DTV_SURPLUS;
623 size_t oldsize = dtv[-1].counter;
625 assert (map->l_tls_modid <= newsize);
627 if (dtv == GL(dl_initial_dtv))
629 /* This is the initial dtv that was allocated
630 during rtld startup using the dl-minimal.c
631 malloc instead of the real malloc. We can't
632 free it, we have to abandon the old storage. */
634 newp = malloc ((2 + newsize) * sizeof (dtv_t));
635 if (newp == NULL)
636 oom ();
637 memcpy (newp, &dtv[-1], (2 + oldsize) * sizeof (dtv_t));
639 else
641 newp = realloc (&dtv[-1],
642 (2 + newsize) * sizeof (dtv_t));
643 if (newp == NULL)
644 oom ();
647 newp[0].counter = newsize;
649 /* Clear the newly allocated part. */
650 memset (newp + 2 + oldsize, '\0',
651 (newsize - oldsize) * sizeof (dtv_t));
653 /* Point dtv to the generation counter. */
654 dtv = &newp[1];
656 /* Install this new dtv in the thread data
657 structures. */
658 INSTALL_NEW_DTV (dtv);
661 /* If there is currently memory allocate for this
662 dtv entry free it. */
663 /* XXX Ideally we will at some point create a memory
664 pool. */
665 if (! dtv[modid].pointer.is_static
666 && dtv[modid].pointer.val != TLS_DTV_UNALLOCATED)
667 /* Note that free is called for NULL is well. We
668 deallocate even if it is this dtv entry we are
669 supposed to load. The reason is that we call
670 memalign and not malloc. */
671 free (dtv[modid].pointer.val);
673 /* This module is loaded dynamically- We defer memory
674 allocation. */
675 dtv[modid].pointer.is_static = false;
676 dtv[modid].pointer.val = TLS_DTV_UNALLOCATED;
678 if (modid == req_modid)
679 the_map = map;
682 total += listp->len;
684 while ((listp = listp->next) != NULL);
686 /* This will be the new maximum generation counter. */
687 dtv[0].counter = new_gen;
690 return the_map;
694 static void *
695 __attribute_noinline__
696 tls_get_addr_tail (dtv_t *dtv, struct link_map *the_map, size_t module)
698 /* The allocation was deferred. Do it now. */
699 if (the_map == NULL)
701 /* Find the link map for this module. */
702 size_t idx = module;
703 struct dtv_slotinfo_list *listp = GL(dl_tls_dtv_slotinfo_list);
705 while (idx >= listp->len)
707 idx -= listp->len;
708 listp = listp->next;
711 the_map = listp->slotinfo[idx].map;
714 again:
715 /* Make sure that, if a dlopen running in parallel forces the
716 variable into static storage, we'll wait until the address in the
717 static TLS block is set up, and use that. If we're undecided
718 yet, make sure we make the decision holding the lock as well. */
719 if (__builtin_expect (the_map->l_tls_offset
720 != FORCED_DYNAMIC_TLS_OFFSET, 0))
722 __rtld_lock_lock_recursive (GL(dl_load_lock));
723 if (__builtin_expect (the_map->l_tls_offset == NO_TLS_OFFSET, 1))
725 the_map->l_tls_offset = FORCED_DYNAMIC_TLS_OFFSET;
726 __rtld_lock_unlock_recursive (GL(dl_load_lock));
728 else
730 __rtld_lock_unlock_recursive (GL(dl_load_lock));
731 if (__builtin_expect (the_map->l_tls_offset
732 != FORCED_DYNAMIC_TLS_OFFSET, 1))
734 void *p = dtv[module].pointer.val;
735 if (__builtin_expect (p == TLS_DTV_UNALLOCATED, 0))
736 goto again;
738 return p;
742 void *p = dtv[module].pointer.val = allocate_and_init (the_map);
743 dtv[module].pointer.is_static = false;
745 return p;
749 /* The generic dynamic and local dynamic model cannot be used in
750 statically linked applications. */
751 void *
752 __tls_get_addr (GET_ADDR_ARGS)
754 dtv_t *dtv = THREAD_DTV ();
755 struct link_map *the_map = NULL;
756 void *p;
758 if (__builtin_expect (dtv[0].counter != GL(dl_tls_generation), 0))
760 the_map = _dl_update_slotinfo (GET_ADDR_MODULE);
761 dtv = THREAD_DTV ();
764 p = dtv[GET_ADDR_MODULE].pointer.val;
766 if (__builtin_expect (p == TLS_DTV_UNALLOCATED, 0))
767 p = tls_get_addr_tail (dtv, the_map, GET_ADDR_MODULE);
769 return (char *) p + GET_ADDR_OFFSET;
771 #endif
774 /* Look up the module's TLS block as for __tls_get_addr,
775 but never touch anything. Return null if it's not allocated yet. */
776 void *
777 _dl_tls_get_addr_soft (struct link_map *l)
779 if (__builtin_expect (l->l_tls_modid == 0, 0))
780 /* This module has no TLS segment. */
781 return NULL;
783 dtv_t *dtv = THREAD_DTV ();
784 if (__builtin_expect (dtv[0].counter != GL(dl_tls_generation), 0))
786 /* This thread's DTV is not completely current,
787 but it might already cover this module. */
789 if (l->l_tls_modid >= dtv[-1].counter)
790 /* Nope. */
791 return NULL;
793 size_t idx = l->l_tls_modid;
794 struct dtv_slotinfo_list *listp = GL(dl_tls_dtv_slotinfo_list);
795 while (idx >= listp->len)
797 idx -= listp->len;
798 listp = listp->next;
801 /* We've reached the slot for this module.
802 If its generation counter is higher than the DTV's,
803 this thread does not know about this module yet. */
804 if (dtv[0].counter < listp->slotinfo[idx].gen)
805 return NULL;
808 void *data = dtv[l->l_tls_modid].pointer.val;
809 if (__builtin_expect (data == TLS_DTV_UNALLOCATED, 0))
810 /* The DTV is current, but this thread has not yet needed
811 to allocate this module's segment. */
812 data = NULL;
814 return data;
818 void
819 _dl_add_to_slotinfo (struct link_map *l)
821 /* Now that we know the object is loaded successfully add
822 modules containing TLS data to the dtv info table. We
823 might have to increase its size. */
824 struct dtv_slotinfo_list *listp;
825 struct dtv_slotinfo_list *prevp;
826 size_t idx = l->l_tls_modid;
828 /* Find the place in the dtv slotinfo list. */
829 listp = GL(dl_tls_dtv_slotinfo_list);
830 prevp = NULL; /* Needed to shut up gcc. */
833 /* Does it fit in the array of this list element? */
834 if (idx < listp->len)
835 break;
836 idx -= listp->len;
837 prevp = listp;
838 listp = listp->next;
840 while (listp != NULL);
842 if (listp == NULL)
844 /* When we come here it means we have to add a new element
845 to the slotinfo list. And the new module must be in
846 the first slot. */
847 assert (idx == 0);
849 listp = prevp->next = (struct dtv_slotinfo_list *)
850 malloc (sizeof (struct dtv_slotinfo_list)
851 + TLS_SLOTINFO_SURPLUS * sizeof (struct dtv_slotinfo));
852 if (listp == NULL)
854 /* We ran out of memory. We will simply fail this
855 call but don't undo anything we did so far. The
856 application will crash or be terminated anyway very
857 soon. */
859 /* We have to do this since some entries in the dtv
860 slotinfo array might already point to this
861 generation. */
862 ++GL(dl_tls_generation);
864 _dl_signal_error (ENOMEM, "dlopen", NULL, N_("\
865 cannot create TLS data structures"));
868 listp->len = TLS_SLOTINFO_SURPLUS;
869 listp->next = NULL;
870 memset (listp->slotinfo, '\0',
871 TLS_SLOTINFO_SURPLUS * sizeof (struct dtv_slotinfo));
874 /* Add the information into the slotinfo data structure. */
875 listp->slotinfo[idx].map = l;
876 listp->slotinfo[idx].gen = GL(dl_tls_generation) + 1;