2.5-18.1
[glibc.git] / elf / dl-tls.c
bloba0f4f77ffa58d333acedc1c97f3a52340d489113
1 /* Thread-local storage handling in the ELF dynamic linker. Generic version.
2 Copyright (C) 2002,2003,2004,2005,2006 Free Software Foundation, Inc.
3 This file is part of the GNU C Library.
5 The GNU C Library is free software; you can redistribute it and/or
6 modify it under the terms of the GNU Lesser General Public
7 License as published by the Free Software Foundation; either
8 version 2.1 of the License, or (at your option) any later version.
10 The GNU C Library is distributed in the hope that it will be useful,
11 but WITHOUT ANY WARRANTY; without even the implied warranty of
12 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 Lesser General Public License for more details.
15 You should have received a copy of the GNU Lesser General Public
16 License along with the GNU C Library; if not, write to the Free
17 Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
18 02111-1307 USA. */
20 #include <assert.h>
21 #include <errno.h>
22 #include <libintl.h>
23 #include <signal.h>
24 #include <stdlib.h>
25 #include <unistd.h>
26 #include <sys/param.h>
28 #include <tls.h>
30 /* We don't need any of this if TLS is not supported. */
31 #ifdef USE_TLS
33 # include <dl-tls.h>
34 # include <ldsodefs.h>
36 /* Amount of excess space to allocate in the static TLS area
37 to allow dynamic loading of modules defining IE-model TLS data. */
38 # define TLS_STATIC_SURPLUS 64 + DL_NNS * 100
40 /* Value used for dtv entries for which the allocation is delayed. */
41 # define TLS_DTV_UNALLOCATED ((void *) -1l)
44 /* Out-of-memory handler. */
45 # ifdef SHARED
46 static void
47 __attribute__ ((__noreturn__))
48 oom (void)
50 _dl_fatal_printf ("cannot allocate memory for thread-local data: ABORT\n");
52 # endif
55 size_t
56 internal_function
57 _dl_next_tls_modid (void)
59 size_t result;
61 if (__builtin_expect (GL(dl_tls_dtv_gaps), false))
63 size_t disp = 0;
64 struct dtv_slotinfo_list *runp = GL(dl_tls_dtv_slotinfo_list);
66 /* Note that this branch will never be executed during program
67 start since there are no gaps at that time. Therefore it
68 does not matter that the dl_tls_dtv_slotinfo is not allocated
69 yet when the function is called for the first times.
71 NB: the offset +1 is due to the fact that DTV[0] is used
72 for something else. */
73 result = GL(dl_tls_static_nelem) + 1;
74 if (result <= GL(dl_tls_max_dtv_idx))
77 while (result - disp < runp->len)
79 if (runp->slotinfo[result - disp].map == NULL)
80 break;
82 ++result;
83 assert (result <= GL(dl_tls_max_dtv_idx) + 1);
86 if (result - disp < runp->len)
87 break;
89 disp += runp->len;
91 while ((runp = runp->next) != NULL);
93 if (result > GL(dl_tls_max_dtv_idx))
95 /* The new index must indeed be exactly one higher than the
96 previous high. */
97 assert (result == GL(dl_tls_max_dtv_idx) + 1);
98 /* There is no gap anymore. */
99 GL(dl_tls_dtv_gaps) = false;
101 goto nogaps;
104 else
106 /* No gaps, allocate a new entry. */
107 nogaps:
109 result = ++GL(dl_tls_max_dtv_idx);
112 return result;
116 # ifdef SHARED
117 void
118 internal_function
119 _dl_determine_tlsoffset (void)
121 size_t max_align = TLS_TCB_ALIGN;
122 size_t freetop = 0;
123 size_t freebottom = 0;
125 /* The first element of the dtv slot info list is allocated. */
126 assert (GL(dl_tls_dtv_slotinfo_list) != NULL);
127 /* There is at this point only one element in the
128 dl_tls_dtv_slotinfo_list list. */
129 assert (GL(dl_tls_dtv_slotinfo_list)->next == NULL);
131 struct dtv_slotinfo *slotinfo = GL(dl_tls_dtv_slotinfo_list)->slotinfo;
133 /* Determining the offset of the various parts of the static TLS
134 block has several dependencies. In addition we have to work
135 around bugs in some toolchains.
137 Each TLS block from the objects available at link time has a size
138 and an alignment requirement. The GNU ld computes the alignment
139 requirements for the data at the positions *in the file*, though.
140 I.e, it is not simply possible to allocate a block with the size
141 of the TLS program header entry. The data is layed out assuming
142 that the first byte of the TLS block fulfills
144 p_vaddr mod p_align == &TLS_BLOCK mod p_align
146 This means we have to add artificial padding at the beginning of
147 the TLS block. These bytes are never used for the TLS data in
148 this module but the first byte allocated must be aligned
149 according to mod p_align == 0 so that the first byte of the TLS
150 block is aligned according to p_vaddr mod p_align. This is ugly
151 and the linker can help by computing the offsets in the TLS block
152 assuming the first byte of the TLS block is aligned according to
153 p_align.
155 The extra space which might be allocated before the first byte of
156 the TLS block need not go unused. The code below tries to use
157 that memory for the next TLS block. This can work if the total
158 memory requirement for the next TLS block is smaller than the
159 gap. */
161 # if TLS_TCB_AT_TP
162 /* We simply start with zero. */
163 size_t offset = 0;
165 for (size_t cnt = 0; slotinfo[cnt].map != NULL; ++cnt)
167 assert (cnt < GL(dl_tls_dtv_slotinfo_list)->len);
169 size_t firstbyte = (-slotinfo[cnt].map->l_tls_firstbyte_offset
170 & (slotinfo[cnt].map->l_tls_align - 1));
171 size_t off;
172 max_align = MAX (max_align, slotinfo[cnt].map->l_tls_align);
174 if (freebottom - freetop >= slotinfo[cnt].map->l_tls_blocksize)
176 off = roundup (freetop + slotinfo[cnt].map->l_tls_blocksize
177 - firstbyte, slotinfo[cnt].map->l_tls_align)
178 + firstbyte;
179 if (off <= freebottom)
181 freetop = off;
183 /* XXX For some architectures we perhaps should store the
184 negative offset. */
185 slotinfo[cnt].map->l_tls_offset = off;
186 continue;
190 off = roundup (offset + slotinfo[cnt].map->l_tls_blocksize - firstbyte,
191 slotinfo[cnt].map->l_tls_align) + firstbyte;
192 if (off > offset + slotinfo[cnt].map->l_tls_blocksize
193 + (freebottom - freetop))
195 freetop = offset;
196 freebottom = off - slotinfo[cnt].map->l_tls_blocksize;
198 offset = off;
200 /* XXX For some architectures we perhaps should store the
201 negative offset. */
202 slotinfo[cnt].map->l_tls_offset = off;
205 GL(dl_tls_static_used) = offset;
206 GL(dl_tls_static_size) = (roundup (offset + TLS_STATIC_SURPLUS, max_align)
207 + TLS_TCB_SIZE);
208 # elif TLS_DTV_AT_TP
209 /* The TLS blocks start right after the TCB. */
210 size_t offset = TLS_TCB_SIZE;
212 for (size_t cnt = 0; slotinfo[cnt].map != NULL; ++cnt)
214 assert (cnt < GL(dl_tls_dtv_slotinfo_list)->len);
216 size_t firstbyte = (-slotinfo[cnt].map->l_tls_firstbyte_offset
217 & (slotinfo[cnt].map->l_tls_align - 1));
218 size_t off;
219 max_align = MAX (max_align, slotinfo[cnt].map->l_tls_align);
221 if (slotinfo[cnt].map->l_tls_blocksize <= freetop - freebottom)
223 off = roundup (freebottom, slotinfo[cnt].map->l_tls_align);
224 if (off - freebottom < firstbyte)
225 off += slotinfo[cnt].map->l_tls_align;
226 if (off + slotinfo[cnt].map->l_tls_blocksize - firstbyte <= freetop)
228 slotinfo[cnt].map->l_tls_offset = off - firstbyte;
229 freebottom = (off + slotinfo[cnt].map->l_tls_blocksize
230 - firstbyte);
231 continue;
235 off = roundup (offset, slotinfo[cnt].map->l_tls_align);
236 if (off - offset < firstbyte)
237 off += slotinfo[cnt].map->l_tls_align;
239 slotinfo[cnt].map->l_tls_offset = off - firstbyte;
240 if (off - firstbyte - offset > freetop - freebottom)
242 freebottom = offset;
243 freetop = off - firstbyte;
246 offset = off + slotinfo[cnt].map->l_tls_blocksize - firstbyte;
249 GL(dl_tls_static_used) = offset;
250 GL(dl_tls_static_size) = roundup (offset + TLS_STATIC_SURPLUS,
251 TLS_TCB_ALIGN);
252 # else
253 # error "Either TLS_TCB_AT_TP or TLS_DTV_AT_TP must be defined"
254 # endif
256 /* The alignment requirement for the static TLS block. */
257 GL(dl_tls_static_align) = max_align;
261 /* This is called only when the data structure setup was skipped at startup,
262 when there was no need for it then. Now we have dynamically loaded
263 something needing TLS, or libpthread needs it. */
265 internal_function
266 _dl_tls_setup (void)
268 assert (GL(dl_tls_dtv_slotinfo_list) == NULL);
269 assert (GL(dl_tls_max_dtv_idx) == 0);
271 const size_t nelem = 2 + TLS_SLOTINFO_SURPLUS;
273 GL(dl_tls_dtv_slotinfo_list)
274 = calloc (1, (sizeof (struct dtv_slotinfo_list)
275 + nelem * sizeof (struct dtv_slotinfo)));
276 if (GL(dl_tls_dtv_slotinfo_list) == NULL)
277 return -1;
279 GL(dl_tls_dtv_slotinfo_list)->len = nelem;
281 /* Number of elements in the static TLS block. It can't be zero
282 because of various assumptions. The one element is null. */
283 GL(dl_tls_static_nelem) = GL(dl_tls_max_dtv_idx) = 1;
285 /* This initializes more variables for us. */
286 _dl_determine_tlsoffset ();
288 return 0;
290 rtld_hidden_def (_dl_tls_setup)
291 # endif
293 static void *
294 internal_function
295 allocate_dtv (void *result)
297 dtv_t *dtv;
298 size_t dtv_length;
300 /* We allocate a few more elements in the dtv than are needed for the
301 initial set of modules. This should avoid in most cases expansions
302 of the dtv. */
303 dtv_length = GL(dl_tls_max_dtv_idx) + DTV_SURPLUS;
304 dtv = calloc (dtv_length + 2, sizeof (dtv_t));
305 if (dtv != NULL)
307 /* This is the initial length of the dtv. */
308 dtv[0].counter = dtv_length;
310 /* The rest of the dtv (including the generation counter) is
311 Initialize with zero to indicate nothing there. */
313 /* Add the dtv to the thread data structures. */
314 INSTALL_DTV (result, dtv);
316 else
317 result = NULL;
319 return result;
323 /* Get size and alignment requirements of the static TLS block. */
324 void
325 internal_function
326 _dl_get_tls_static_info (size_t *sizep, size_t *alignp)
328 *sizep = GL(dl_tls_static_size);
329 *alignp = GL(dl_tls_static_align);
333 void *
334 internal_function
335 _dl_allocate_tls_storage (void)
337 void *result;
338 size_t size = GL(dl_tls_static_size);
340 # if TLS_DTV_AT_TP
341 /* Memory layout is:
342 [ TLS_PRE_TCB_SIZE ] [ TLS_TCB_SIZE ] [ TLS blocks ]
343 ^ This should be returned. */
344 size += (TLS_PRE_TCB_SIZE + GL(dl_tls_static_align) - 1)
345 & ~(GL(dl_tls_static_align) - 1);
346 # endif
348 /* Allocate a correctly aligned chunk of memory. */
349 result = __libc_memalign (GL(dl_tls_static_align), size);
350 if (__builtin_expect (result != NULL, 1))
352 /* Allocate the DTV. */
353 void *allocated = result;
355 # if TLS_TCB_AT_TP
356 /* The TCB follows the TLS blocks. */
357 result = (char *) result + size - TLS_TCB_SIZE;
359 /* Clear the TCB data structure. We can't ask the caller (i.e.
360 libpthread) to do it, because we will initialize the DTV et al. */
361 memset (result, '\0', TLS_TCB_SIZE);
362 # elif TLS_DTV_AT_TP
363 result = (char *) result + size - GL(dl_tls_static_size);
365 /* Clear the TCB data structure and TLS_PRE_TCB_SIZE bytes before it.
366 We can't ask the caller (i.e. libpthread) to do it, because we will
367 initialize the DTV et al. */
368 memset ((char *) result - TLS_PRE_TCB_SIZE, '\0',
369 TLS_PRE_TCB_SIZE + TLS_TCB_SIZE);
370 # endif
372 result = allocate_dtv (result);
373 if (result == NULL)
374 free (allocated);
377 return result;
381 void *
382 internal_function
383 _dl_allocate_tls_init (void *result)
385 if (result == NULL)
386 /* The memory allocation failed. */
387 return NULL;
389 dtv_t *dtv = GET_DTV (result);
390 struct dtv_slotinfo_list *listp;
391 size_t total = 0;
392 size_t maxgen = 0;
394 /* We have to prepare the dtv for all currently loaded modules using
395 TLS. For those which are dynamically loaded we add the values
396 indicating deferred allocation. */
397 listp = GL(dl_tls_dtv_slotinfo_list);
398 while (1)
400 size_t cnt;
402 for (cnt = total == 0 ? 1 : 0; cnt < listp->len; ++cnt)
404 struct link_map *map;
405 void *dest;
407 /* Check for the total number of used slots. */
408 if (total + cnt > GL(dl_tls_max_dtv_idx))
409 break;
411 map = listp->slotinfo[cnt].map;
412 if (map == NULL)
413 /* Unused entry. */
414 continue;
416 /* Keep track of the maximum generation number. This might
417 not be the generation counter. */
418 maxgen = MAX (maxgen, listp->slotinfo[cnt].gen);
420 if (map->l_tls_offset == NO_TLS_OFFSET)
422 /* For dynamically loaded modules we simply store
423 the value indicating deferred allocation. */
424 dtv[map->l_tls_modid].pointer.val = TLS_DTV_UNALLOCATED;
425 dtv[map->l_tls_modid].pointer.is_static = false;
426 continue;
429 assert (map->l_tls_modid == cnt);
430 assert (map->l_tls_blocksize >= map->l_tls_initimage_size);
431 # if TLS_TCB_AT_TP
432 assert ((size_t) map->l_tls_offset >= map->l_tls_blocksize);
433 dest = (char *) result - map->l_tls_offset;
434 # elif TLS_DTV_AT_TP
435 dest = (char *) result + map->l_tls_offset;
436 # else
437 # error "Either TLS_TCB_AT_TP or TLS_DTV_AT_TP must be defined"
438 # endif
440 /* Copy the initialization image and clear the BSS part. */
441 dtv[map->l_tls_modid].pointer.val = dest;
442 dtv[map->l_tls_modid].pointer.is_static = true;
443 memset (__mempcpy (dest, map->l_tls_initimage,
444 map->l_tls_initimage_size), '\0',
445 map->l_tls_blocksize - map->l_tls_initimage_size);
448 total += cnt;
449 if (total >= GL(dl_tls_max_dtv_idx))
450 break;
452 listp = listp->next;
453 assert (listp != NULL);
456 /* The DTV version is up-to-date now. */
457 dtv[0].counter = maxgen;
459 return result;
461 rtld_hidden_def (_dl_allocate_tls_init)
463 void *
464 internal_function
465 _dl_allocate_tls (void *mem)
467 return _dl_allocate_tls_init (mem == NULL
468 ? _dl_allocate_tls_storage ()
469 : allocate_dtv (mem));
471 rtld_hidden_def (_dl_allocate_tls)
474 void
475 internal_function
476 _dl_deallocate_tls (void *tcb, bool dealloc_tcb)
478 dtv_t *dtv = GET_DTV (tcb);
480 /* We need to free the memory allocated for non-static TLS. */
481 for (size_t cnt = 0; cnt < dtv[-1].counter; ++cnt)
482 if (! dtv[1 + cnt].pointer.is_static
483 && dtv[1 + cnt].pointer.val != TLS_DTV_UNALLOCATED)
484 free (dtv[1 + cnt].pointer.val);
486 /* The array starts with dtv[-1]. */
487 #ifdef SHARED
488 if (dtv != GL(dl_initial_dtv))
489 #endif
490 free (dtv - 1);
492 if (dealloc_tcb)
494 # if TLS_TCB_AT_TP
495 /* The TCB follows the TLS blocks. Back up to free the whole block. */
496 tcb -= GL(dl_tls_static_size) - TLS_TCB_SIZE;
497 # elif TLS_DTV_AT_TP
498 /* Back up the TLS_PRE_TCB_SIZE bytes. */
499 tcb -= (TLS_PRE_TCB_SIZE + GL(dl_tls_static_align) - 1)
500 & ~(GL(dl_tls_static_align) - 1);
501 # endif
502 free (tcb);
505 rtld_hidden_def (_dl_deallocate_tls)
508 # ifdef SHARED
509 /* The __tls_get_addr function has two basic forms which differ in the
510 arguments. The IA-64 form takes two parameters, the module ID and
511 offset. The form used, among others, on IA-32 takes a reference to
512 a special structure which contain the same information. The second
513 form seems to be more often used (in the moment) so we default to
514 it. Users of the IA-64 form have to provide adequate definitions
515 of the following macros. */
516 # ifndef GET_ADDR_ARGS
517 # define GET_ADDR_ARGS tls_index *ti
518 # endif
519 # ifndef GET_ADDR_MODULE
520 # define GET_ADDR_MODULE ti->ti_module
521 # endif
522 # ifndef GET_ADDR_OFFSET
523 # define GET_ADDR_OFFSET ti->ti_offset
524 # endif
527 static void *
528 allocate_and_init (struct link_map *map)
530 void *newp;
532 newp = __libc_memalign (map->l_tls_align, map->l_tls_blocksize);
533 if (newp == NULL)
534 oom ();
536 /* Initialize the memory. */
537 memset (__mempcpy (newp, map->l_tls_initimage, map->l_tls_initimage_size),
538 '\0', map->l_tls_blocksize - map->l_tls_initimage_size);
540 return newp;
544 struct link_map *
545 _dl_update_slotinfo (unsigned long int req_modid)
547 struct link_map *the_map = NULL;
548 dtv_t *dtv = THREAD_DTV ();
550 /* The global dl_tls_dtv_slotinfo array contains for each module
551 index the generation counter current when the entry was created.
552 This array never shrinks so that all module indices which were
553 valid at some time can be used to access it. Before the first
554 use of a new module index in this function the array was extended
555 appropriately. Access also does not have to be guarded against
556 modifications of the array. It is assumed that pointer-size
557 values can be read atomically even in SMP environments. It is
558 possible that other threads at the same time dynamically load
559 code and therefore add to the slotinfo list. This is a problem
560 since we must not pick up any information about incomplete work.
561 The solution to this is to ignore all dtv slots which were
562 created after the one we are currently interested. We know that
563 dynamic loading for this module is completed and this is the last
564 load operation we know finished. */
565 unsigned long int idx = req_modid;
566 struct dtv_slotinfo_list *listp = GL(dl_tls_dtv_slotinfo_list);
568 while (idx >= listp->len)
570 idx -= listp->len;
571 listp = listp->next;
574 if (dtv[0].counter < listp->slotinfo[idx].gen)
576 /* The generation counter for the slot is higher than what the
577 current dtv implements. We have to update the whole dtv but
578 only those entries with a generation counter <= the one for
579 the entry we need. */
580 size_t new_gen = listp->slotinfo[idx].gen;
581 size_t total = 0;
583 /* We have to look through the entire dtv slotinfo list. */
584 listp = GL(dl_tls_dtv_slotinfo_list);
587 for (size_t cnt = total == 0 ? 1 : 0; cnt < listp->len; ++cnt)
589 size_t gen = listp->slotinfo[cnt].gen;
591 if (gen > new_gen)
592 /* This is a slot for a generation younger than the
593 one we are handling now. It might be incompletely
594 set up so ignore it. */
595 continue;
597 /* If the entry is older than the current dtv layout we
598 know we don't have to handle it. */
599 if (gen <= dtv[0].counter)
600 continue;
602 /* If there is no map this means the entry is empty. */
603 struct link_map *map = listp->slotinfo[cnt].map;
604 if (map == NULL)
606 /* If this modid was used at some point the memory
607 might still be allocated. */
608 if (! dtv[total + cnt].pointer.is_static
609 && dtv[total + cnt].pointer.val != TLS_DTV_UNALLOCATED)
611 free (dtv[total + cnt].pointer.val);
612 dtv[total + cnt].pointer.val = TLS_DTV_UNALLOCATED;
615 continue;
618 /* Check whether the current dtv array is large enough. */
619 size_t modid = map->l_tls_modid;
620 assert (total + cnt == modid);
621 if (dtv[-1].counter < modid)
623 /* Reallocate the dtv. */
624 dtv_t *newp;
625 size_t newsize = GL(dl_tls_max_dtv_idx) + DTV_SURPLUS;
626 size_t oldsize = dtv[-1].counter;
628 assert (map->l_tls_modid <= newsize);
630 if (dtv == GL(dl_initial_dtv))
632 /* This is the initial dtv that was allocated
633 during rtld startup using the dl-minimal.c
634 malloc instead of the real malloc. We can't
635 free it, we have to abandon the old storage. */
637 newp = malloc ((2 + newsize) * sizeof (dtv_t));
638 if (newp == NULL)
639 oom ();
640 memcpy (newp, &dtv[-1], oldsize * sizeof (dtv_t));
642 else
644 newp = realloc (&dtv[-1],
645 (2 + newsize) * sizeof (dtv_t));
646 if (newp == NULL)
647 oom ();
650 newp[0].counter = newsize;
652 /* Clear the newly allocated part. */
653 memset (newp + 2 + oldsize, '\0',
654 (newsize - oldsize) * sizeof (dtv_t));
656 /* Point dtv to the generation counter. */
657 dtv = &newp[1];
659 /* Install this new dtv in the thread data
660 structures. */
661 INSTALL_NEW_DTV (dtv);
664 /* If there is currently memory allocate for this
665 dtv entry free it. */
666 /* XXX Ideally we will at some point create a memory
667 pool. */
668 if (! dtv[modid].pointer.is_static
669 && dtv[modid].pointer.val != TLS_DTV_UNALLOCATED)
670 /* Note that free is called for NULL is well. We
671 deallocate even if it is this dtv entry we are
672 supposed to load. The reason is that we call
673 memalign and not malloc. */
674 free (dtv[modid].pointer.val);
676 /* This module is loaded dynamically- We defer memory
677 allocation. */
678 dtv[modid].pointer.is_static = false;
679 dtv[modid].pointer.val = TLS_DTV_UNALLOCATED;
681 if (modid == req_modid)
682 the_map = map;
685 total += listp->len;
687 while ((listp = listp->next) != NULL);
689 /* This will be the new maximum generation counter. */
690 dtv[0].counter = new_gen;
693 return the_map;
697 /* The generic dynamic and local dynamic model cannot be used in
698 statically linked applications. */
699 void *
700 __tls_get_addr (GET_ADDR_ARGS)
702 dtv_t *dtv = THREAD_DTV ();
703 struct link_map *the_map = NULL;
704 void *p;
706 if (__builtin_expect (dtv[0].counter != GL(dl_tls_generation), 0))
707 the_map = _dl_update_slotinfo (GET_ADDR_MODULE);
709 p = dtv[GET_ADDR_MODULE].pointer.val;
711 if (__builtin_expect (p == TLS_DTV_UNALLOCATED, 0))
713 /* The allocation was deferred. Do it now. */
714 if (the_map == NULL)
716 /* Find the link map for this module. */
717 size_t idx = GET_ADDR_MODULE;
718 struct dtv_slotinfo_list *listp = GL(dl_tls_dtv_slotinfo_list);
720 while (idx >= listp->len)
722 idx -= listp->len;
723 listp = listp->next;
726 the_map = listp->slotinfo[idx].map;
729 p = dtv[GET_ADDR_MODULE].pointer.val = allocate_and_init (the_map);
730 dtv[GET_ADDR_MODULE].pointer.is_static = false;
733 return (char *) p + GET_ADDR_OFFSET;
735 # endif
738 /* Look up the module's TLS block as for __tls_get_addr,
739 but never touch anything. Return null if it's not allocated yet. */
740 void *
741 internal_function
742 _dl_tls_get_addr_soft (struct link_map *l)
744 if (__builtin_expect (l->l_tls_modid == 0, 0))
745 /* This module has no TLS segment. */
746 return NULL;
748 dtv_t *dtv = THREAD_DTV ();
749 if (__builtin_expect (dtv[0].counter != GL(dl_tls_generation), 0))
751 /* This thread's DTV is not completely current,
752 but it might already cover this module. */
754 if (l->l_tls_modid >= dtv[-1].counter)
755 /* Nope. */
756 return NULL;
758 size_t idx = l->l_tls_modid;
759 struct dtv_slotinfo_list *listp = GL(dl_tls_dtv_slotinfo_list);
760 while (idx >= listp->len)
762 idx -= listp->len;
763 listp = listp->next;
766 /* We've reached the slot for this module.
767 If its generation counter is higher than the DTV's,
768 this thread does not know about this module yet. */
769 if (dtv[0].counter < listp->slotinfo[idx].gen)
770 return NULL;
773 void *data = dtv[l->l_tls_modid].pointer.val;
774 if (__builtin_expect (data == TLS_DTV_UNALLOCATED, 0))
775 /* The DTV is current, but this thread has not yet needed
776 to allocate this module's segment. */
777 data = NULL;
779 return data;
783 void
784 _dl_add_to_slotinfo (struct link_map *l)
786 /* Now that we know the object is loaded successfully add
787 modules containing TLS data to the dtv info table. We
788 might have to increase its size. */
789 struct dtv_slotinfo_list *listp;
790 struct dtv_slotinfo_list *prevp;
791 size_t idx = l->l_tls_modid;
793 /* Find the place in the dtv slotinfo list. */
794 listp = GL(dl_tls_dtv_slotinfo_list);
795 prevp = NULL; /* Needed to shut up gcc. */
798 /* Does it fit in the array of this list element? */
799 if (idx < listp->len)
800 break;
801 idx -= listp->len;
802 prevp = listp;
803 listp = listp->next;
805 while (listp != NULL);
807 if (listp == NULL)
809 /* When we come here it means we have to add a new element
810 to the slotinfo list. And the new module must be in
811 the first slot. */
812 assert (idx == 0);
814 listp = prevp->next = (struct dtv_slotinfo_list *)
815 malloc (sizeof (struct dtv_slotinfo_list)
816 + TLS_SLOTINFO_SURPLUS * sizeof (struct dtv_slotinfo));
817 if (listp == NULL)
819 /* We ran out of memory. We will simply fail this
820 call but don't undo anything we did so far. The
821 application will crash or be terminated anyway very
822 soon. */
824 /* We have to do this since some entries in the dtv
825 slotinfo array might already point to this
826 generation. */
827 ++GL(dl_tls_generation);
829 _dl_signal_error (ENOMEM, "dlopen", NULL, N_("\
830 cannot create TLS data structures"));
833 listp->len = TLS_SLOTINFO_SURPLUS;
834 listp->next = NULL;
835 memset (listp->slotinfo, '\0',
836 TLS_SLOTINFO_SURPLUS * sizeof (struct dtv_slotinfo));
839 /* Add the information into the slotinfo data structure. */
840 listp->slotinfo[idx].map = l;
841 listp->slotinfo[idx].gen = GL(dl_tls_generation) + 1;
843 #endif /* use TLS */