1 /* Thread-local storage handling in the ELF dynamic linker. Generic version.
2 Copyright (C) 2002-2006,2008,2011 Free Software Foundation, Inc.
3 This file is part of the GNU C Library.
5 The GNU C Library is free software; you can redistribute it and/or
6 modify it under the terms of the GNU Lesser General Public
7 License as published by the Free Software Foundation; either
8 version 2.1 of the License, or (at your option) any later version.
10 The GNU C Library is distributed in the hope that it will be useful,
11 but WITHOUT ANY WARRANTY; without even the implied warranty of
12 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 Lesser General Public License for more details.
15 You should have received a copy of the GNU Lesser General Public
16 License along with the GNU C Library; if not, write to the Free
17 Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
26 #include <sys/param.h>
32 /* Amount of excess space to allocate in the static TLS area
33 to allow dynamic loading of modules defining IE-model TLS data. */
34 #define TLS_STATIC_SURPLUS 64 + DL_NNS * 100
37 /* Out-of-memory handler. */
40 __attribute__ ((__noreturn__
))
43 _dl_fatal_printf ("cannot allocate memory for thread-local data: ABORT\n");
50 _dl_next_tls_modid (void)
54 if (__builtin_expect (GL(dl_tls_dtv_gaps
), false))
57 struct dtv_slotinfo_list
*runp
= GL(dl_tls_dtv_slotinfo_list
);
59 /* Note that this branch will never be executed during program
60 start since there are no gaps at that time. Therefore it
61 does not matter that the dl_tls_dtv_slotinfo is not allocated
62 yet when the function is called for the first times.
64 NB: the offset +1 is due to the fact that DTV[0] is used
65 for something else. */
66 result
= GL(dl_tls_static_nelem
) + 1;
67 if (result
<= GL(dl_tls_max_dtv_idx
))
70 while (result
- disp
< runp
->len
)
72 if (runp
->slotinfo
[result
- disp
].map
== NULL
)
76 assert (result
<= GL(dl_tls_max_dtv_idx
) + 1);
79 if (result
- disp
< runp
->len
)
84 while ((runp
= runp
->next
) != NULL
);
86 if (result
> GL(dl_tls_max_dtv_idx
))
88 /* The new index must indeed be exactly one higher than the
90 assert (result
== GL(dl_tls_max_dtv_idx
) + 1);
91 /* There is no gap anymore. */
92 GL(dl_tls_dtv_gaps
) = false;
99 /* No gaps, allocate a new entry. */
102 result
= ++GL(dl_tls_max_dtv_idx
);
112 _dl_determine_tlsoffset (void)
114 size_t max_align
= TLS_TCB_ALIGN
;
116 size_t freebottom
= 0;
118 /* The first element of the dtv slot info list is allocated. */
119 assert (GL(dl_tls_dtv_slotinfo_list
) != NULL
);
120 /* There is at this point only one element in the
121 dl_tls_dtv_slotinfo_list list. */
122 assert (GL(dl_tls_dtv_slotinfo_list
)->next
== NULL
);
124 struct dtv_slotinfo
*slotinfo
= GL(dl_tls_dtv_slotinfo_list
)->slotinfo
;
126 /* Determining the offset of the various parts of the static TLS
127 block has several dependencies. In addition we have to work
128 around bugs in some toolchains.
130 Each TLS block from the objects available at link time has a size
131 and an alignment requirement. The GNU ld computes the alignment
132 requirements for the data at the positions *in the file*, though.
133 I.e, it is not simply possible to allocate a block with the size
134 of the TLS program header entry. The data is layed out assuming
135 that the first byte of the TLS block fulfills
137 p_vaddr mod p_align == &TLS_BLOCK mod p_align
139 This means we have to add artificial padding at the beginning of
140 the TLS block. These bytes are never used for the TLS data in
141 this module but the first byte allocated must be aligned
142 according to mod p_align == 0 so that the first byte of the TLS
143 block is aligned according to p_vaddr mod p_align. This is ugly
144 and the linker can help by computing the offsets in the TLS block
145 assuming the first byte of the TLS block is aligned according to
148 The extra space which might be allocated before the first byte of
149 the TLS block need not go unused. The code below tries to use
150 that memory for the next TLS block. This can work if the total
151 memory requirement for the next TLS block is smaller than the
155 /* We simply start with zero. */
158 for (size_t cnt
= 0; slotinfo
[cnt
].map
!= NULL
; ++cnt
)
160 assert (cnt
< GL(dl_tls_dtv_slotinfo_list
)->len
);
162 size_t firstbyte
= (-slotinfo
[cnt
].map
->l_tls_firstbyte_offset
163 & (slotinfo
[cnt
].map
->l_tls_align
- 1));
165 max_align
= MAX (max_align
, slotinfo
[cnt
].map
->l_tls_align
);
167 if (freebottom
- freetop
>= slotinfo
[cnt
].map
->l_tls_blocksize
)
169 off
= roundup (freetop
+ slotinfo
[cnt
].map
->l_tls_blocksize
170 - firstbyte
, slotinfo
[cnt
].map
->l_tls_align
)
172 if (off
<= freebottom
)
176 /* XXX For some architectures we perhaps should store the
178 slotinfo
[cnt
].map
->l_tls_offset
= off
;
183 off
= roundup (offset
+ slotinfo
[cnt
].map
->l_tls_blocksize
- firstbyte
,
184 slotinfo
[cnt
].map
->l_tls_align
) + firstbyte
;
185 if (off
> offset
+ slotinfo
[cnt
].map
->l_tls_blocksize
186 + (freebottom
- freetop
))
189 freebottom
= off
- slotinfo
[cnt
].map
->l_tls_blocksize
;
193 /* XXX For some architectures we perhaps should store the
195 slotinfo
[cnt
].map
->l_tls_offset
= off
;
198 GL(dl_tls_static_used
) = offset
;
199 GL(dl_tls_static_size
) = (roundup (offset
+ TLS_STATIC_SURPLUS
, max_align
)
202 /* The TLS blocks start right after the TCB. */
203 size_t offset
= TLS_TCB_SIZE
;
205 for (size_t cnt
= 0; slotinfo
[cnt
].map
!= NULL
; ++cnt
)
207 assert (cnt
< GL(dl_tls_dtv_slotinfo_list
)->len
);
209 size_t firstbyte
= (-slotinfo
[cnt
].map
->l_tls_firstbyte_offset
210 & (slotinfo
[cnt
].map
->l_tls_align
- 1));
212 max_align
= MAX (max_align
, slotinfo
[cnt
].map
->l_tls_align
);
214 if (slotinfo
[cnt
].map
->l_tls_blocksize
<= freetop
- freebottom
)
216 off
= roundup (freebottom
, slotinfo
[cnt
].map
->l_tls_align
);
217 if (off
- freebottom
< firstbyte
)
218 off
+= slotinfo
[cnt
].map
->l_tls_align
;
219 if (off
+ slotinfo
[cnt
].map
->l_tls_blocksize
- firstbyte
<= freetop
)
221 slotinfo
[cnt
].map
->l_tls_offset
= off
- firstbyte
;
222 freebottom
= (off
+ slotinfo
[cnt
].map
->l_tls_blocksize
228 off
= roundup (offset
, slotinfo
[cnt
].map
->l_tls_align
);
229 if (off
- offset
< firstbyte
)
230 off
+= slotinfo
[cnt
].map
->l_tls_align
;
232 slotinfo
[cnt
].map
->l_tls_offset
= off
- firstbyte
;
233 if (off
- firstbyte
- offset
> freetop
- freebottom
)
236 freetop
= off
- firstbyte
;
239 offset
= off
+ slotinfo
[cnt
].map
->l_tls_blocksize
- firstbyte
;
242 GL(dl_tls_static_used
) = offset
;
243 GL(dl_tls_static_size
) = roundup (offset
+ TLS_STATIC_SURPLUS
,
246 # error "Either TLS_TCB_AT_TP or TLS_DTV_AT_TP must be defined"
249 /* The alignment requirement for the static TLS block. */
250 GL(dl_tls_static_align
) = max_align
;
254 /* This is called only when the data structure setup was skipped at startup,
255 when there was no need for it then. Now we have dynamically loaded
256 something needing TLS, or libpthread needs it. */
261 assert (GL(dl_tls_dtv_slotinfo_list
) == NULL
);
262 assert (GL(dl_tls_max_dtv_idx
) == 0);
264 const size_t nelem
= 2 + TLS_SLOTINFO_SURPLUS
;
266 GL(dl_tls_dtv_slotinfo_list
)
267 = calloc (1, (sizeof (struct dtv_slotinfo_list
)
268 + nelem
* sizeof (struct dtv_slotinfo
)));
269 if (GL(dl_tls_dtv_slotinfo_list
) == NULL
)
272 GL(dl_tls_dtv_slotinfo_list
)->len
= nelem
;
274 /* Number of elements in the static TLS block. It can't be zero
275 because of various assumptions. The one element is null. */
276 GL(dl_tls_static_nelem
) = GL(dl_tls_max_dtv_idx
) = 1;
278 /* This initializes more variables for us. */
279 _dl_determine_tlsoffset ();
283 rtld_hidden_def (_dl_tls_setup
)
288 allocate_dtv (void *result
)
293 /* We allocate a few more elements in the dtv than are needed for the
294 initial set of modules. This should avoid in most cases expansions
296 dtv_length
= GL(dl_tls_max_dtv_idx
) + DTV_SURPLUS
;
297 dtv
= calloc (dtv_length
+ 2, sizeof (dtv_t
));
300 /* This is the initial length of the dtv. */
301 dtv
[0].counter
= dtv_length
;
303 /* The rest of the dtv (including the generation counter) is
304 Initialize with zero to indicate nothing there. */
306 /* Add the dtv to the thread data structures. */
307 INSTALL_DTV (result
, dtv
);
316 /* Get size and alignment requirements of the static TLS block. */
319 _dl_get_tls_static_info (size_t *sizep
, size_t *alignp
)
321 *sizep
= GL(dl_tls_static_size
);
322 *alignp
= GL(dl_tls_static_align
);
328 _dl_allocate_tls_storage (void)
331 size_t size
= GL(dl_tls_static_size
);
335 [ TLS_PRE_TCB_SIZE ] [ TLS_TCB_SIZE ] [ TLS blocks ]
336 ^ This should be returned. */
337 size
+= (TLS_PRE_TCB_SIZE
+ GL(dl_tls_static_align
) - 1)
338 & ~(GL(dl_tls_static_align
) - 1);
341 /* Allocate a correctly aligned chunk of memory. */
342 result
= __libc_memalign (GL(dl_tls_static_align
), size
);
343 if (__builtin_expect (result
!= NULL
, 1))
345 /* Allocate the DTV. */
346 void *allocated
= result
;
349 /* The TCB follows the TLS blocks. */
350 result
= (char *) result
+ size
- TLS_TCB_SIZE
;
352 /* Clear the TCB data structure. We can't ask the caller (i.e.
353 libpthread) to do it, because we will initialize the DTV et al. */
354 memset (result
, '\0', TLS_TCB_SIZE
);
356 result
= (char *) result
+ size
- GL(dl_tls_static_size
);
358 /* Clear the TCB data structure and TLS_PRE_TCB_SIZE bytes before it.
359 We can't ask the caller (i.e. libpthread) to do it, because we will
360 initialize the DTV et al. */
361 memset ((char *) result
- TLS_PRE_TCB_SIZE
, '\0',
362 TLS_PRE_TCB_SIZE
+ TLS_TCB_SIZE
);
365 result
= allocate_dtv (result
);
376 _dl_allocate_tls_init (void *result
)
379 /* The memory allocation failed. */
382 dtv_t
*dtv
= GET_DTV (result
);
383 struct dtv_slotinfo_list
*listp
;
387 /* We have to prepare the dtv for all currently loaded modules using
388 TLS. For those which are dynamically loaded we add the values
389 indicating deferred allocation. */
390 listp
= GL(dl_tls_dtv_slotinfo_list
);
395 for (cnt
= total
== 0 ? 1 : 0; cnt
< listp
->len
; ++cnt
)
397 struct link_map
*map
;
400 /* Check for the total number of used slots. */
401 if (total
+ cnt
> GL(dl_tls_max_dtv_idx
))
404 map
= listp
->slotinfo
[cnt
].map
;
409 /* Keep track of the maximum generation number. This might
410 not be the generation counter. */
411 maxgen
= MAX (maxgen
, listp
->slotinfo
[cnt
].gen
);
413 if (map
->l_tls_offset
== NO_TLS_OFFSET
414 || map
->l_tls_offset
== FORCED_DYNAMIC_TLS_OFFSET
)
416 /* For dynamically loaded modules we simply store
417 the value indicating deferred allocation. */
418 dtv
[map
->l_tls_modid
].pointer
.val
= TLS_DTV_UNALLOCATED
;
419 dtv
[map
->l_tls_modid
].pointer
.is_static
= false;
423 assert (map
->l_tls_modid
== cnt
);
424 assert (map
->l_tls_blocksize
>= map
->l_tls_initimage_size
);
426 assert ((size_t) map
->l_tls_offset
>= map
->l_tls_blocksize
);
427 dest
= (char *) result
- map
->l_tls_offset
;
429 dest
= (char *) result
+ map
->l_tls_offset
;
431 # error "Either TLS_TCB_AT_TP or TLS_DTV_AT_TP must be defined"
434 /* Copy the initialization image and clear the BSS part. */
435 dtv
[map
->l_tls_modid
].pointer
.val
= dest
;
436 dtv
[map
->l_tls_modid
].pointer
.is_static
= true;
437 memset (__mempcpy (dest
, map
->l_tls_initimage
,
438 map
->l_tls_initimage_size
), '\0',
439 map
->l_tls_blocksize
- map
->l_tls_initimage_size
);
443 if (total
>= GL(dl_tls_max_dtv_idx
))
447 assert (listp
!= NULL
);
450 /* The DTV version is up-to-date now. */
451 dtv
[0].counter
= maxgen
;
455 rtld_hidden_def (_dl_allocate_tls_init
)
459 _dl_allocate_tls (void *mem
)
461 return _dl_allocate_tls_init (mem
== NULL
462 ? _dl_allocate_tls_storage ()
463 : allocate_dtv (mem
));
465 rtld_hidden_def (_dl_allocate_tls
)
470 _dl_deallocate_tls (void *tcb
, bool dealloc_tcb
)
472 dtv_t
*dtv
= GET_DTV (tcb
);
474 /* We need to free the memory allocated for non-static TLS. */
475 for (size_t cnt
= 0; cnt
< dtv
[-1].counter
; ++cnt
)
476 if (! dtv
[1 + cnt
].pointer
.is_static
477 && dtv
[1 + cnt
].pointer
.val
!= TLS_DTV_UNALLOCATED
)
478 free (dtv
[1 + cnt
].pointer
.val
);
480 /* The array starts with dtv[-1]. */
482 if (dtv
!= GL(dl_initial_dtv
))
489 /* The TCB follows the TLS blocks. Back up to free the whole block. */
490 tcb
-= GL(dl_tls_static_size
) - TLS_TCB_SIZE
;
492 /* Back up the TLS_PRE_TCB_SIZE bytes. */
493 tcb
-= (TLS_PRE_TCB_SIZE
+ GL(dl_tls_static_align
) - 1)
494 & ~(GL(dl_tls_static_align
) - 1);
499 rtld_hidden_def (_dl_deallocate_tls
)
503 /* The __tls_get_addr function has two basic forms which differ in the
504 arguments. The IA-64 form takes two parameters, the module ID and
505 offset. The form used, among others, on IA-32 takes a reference to
506 a special structure which contain the same information. The second
507 form seems to be more often used (in the moment) so we default to
508 it. Users of the IA-64 form have to provide adequate definitions
509 of the following macros. */
510 # ifndef GET_ADDR_ARGS
511 # define GET_ADDR_ARGS tls_index *ti
513 # ifndef GET_ADDR_MODULE
514 # define GET_ADDR_MODULE ti->ti_module
516 # ifndef GET_ADDR_OFFSET
517 # define GET_ADDR_OFFSET ti->ti_offset
522 allocate_and_init (struct link_map
*map
)
526 newp
= __libc_memalign (map
->l_tls_align
, map
->l_tls_blocksize
);
530 /* Initialize the memory. */
531 memset (__mempcpy (newp
, map
->l_tls_initimage
, map
->l_tls_initimage_size
),
532 '\0', map
->l_tls_blocksize
- map
->l_tls_initimage_size
);
539 _dl_update_slotinfo (unsigned long int req_modid
)
541 struct link_map
*the_map
= NULL
;
542 dtv_t
*dtv
= THREAD_DTV ();
544 /* The global dl_tls_dtv_slotinfo array contains for each module
545 index the generation counter current when the entry was created.
546 This array never shrinks so that all module indices which were
547 valid at some time can be used to access it. Before the first
548 use of a new module index in this function the array was extended
549 appropriately. Access also does not have to be guarded against
550 modifications of the array. It is assumed that pointer-size
551 values can be read atomically even in SMP environments. It is
552 possible that other threads at the same time dynamically load
553 code and therefore add to the slotinfo list. This is a problem
554 since we must not pick up any information about incomplete work.
555 The solution to this is to ignore all dtv slots which were
556 created after the one we are currently interested. We know that
557 dynamic loading for this module is completed and this is the last
558 load operation we know finished. */
559 unsigned long int idx
= req_modid
;
560 struct dtv_slotinfo_list
*listp
= GL(dl_tls_dtv_slotinfo_list
);
562 while (idx
>= listp
->len
)
568 if (dtv
[0].counter
< listp
->slotinfo
[idx
].gen
)
570 /* The generation counter for the slot is higher than what the
571 current dtv implements. We have to update the whole dtv but
572 only those entries with a generation counter <= the one for
573 the entry we need. */
574 size_t new_gen
= listp
->slotinfo
[idx
].gen
;
577 /* We have to look through the entire dtv slotinfo list. */
578 listp
= GL(dl_tls_dtv_slotinfo_list
);
581 for (size_t cnt
= total
== 0 ? 1 : 0; cnt
< listp
->len
; ++cnt
)
583 size_t gen
= listp
->slotinfo
[cnt
].gen
;
586 /* This is a slot for a generation younger than the
587 one we are handling now. It might be incompletely
588 set up so ignore it. */
591 /* If the entry is older than the current dtv layout we
592 know we don't have to handle it. */
593 if (gen
<= dtv
[0].counter
)
596 /* If there is no map this means the entry is empty. */
597 struct link_map
*map
= listp
->slotinfo
[cnt
].map
;
600 /* If this modid was used at some point the memory
601 might still be allocated. */
602 if (! dtv
[total
+ cnt
].pointer
.is_static
603 && dtv
[total
+ cnt
].pointer
.val
!= TLS_DTV_UNALLOCATED
)
605 free (dtv
[total
+ cnt
].pointer
.val
);
606 dtv
[total
+ cnt
].pointer
.val
= TLS_DTV_UNALLOCATED
;
612 /* Check whether the current dtv array is large enough. */
613 size_t modid
= map
->l_tls_modid
;
614 assert (total
+ cnt
== modid
);
615 if (dtv
[-1].counter
< modid
)
617 /* Reallocate the dtv. */
619 size_t newsize
= GL(dl_tls_max_dtv_idx
) + DTV_SURPLUS
;
620 size_t oldsize
= dtv
[-1].counter
;
622 assert (map
->l_tls_modid
<= newsize
);
624 if (dtv
== GL(dl_initial_dtv
))
626 /* This is the initial dtv that was allocated
627 during rtld startup using the dl-minimal.c
628 malloc instead of the real malloc. We can't
629 free it, we have to abandon the old storage. */
631 newp
= malloc ((2 + newsize
) * sizeof (dtv_t
));
634 memcpy (newp
, &dtv
[-1], (2 + oldsize
) * sizeof (dtv_t
));
638 newp
= realloc (&dtv
[-1],
639 (2 + newsize
) * sizeof (dtv_t
));
644 newp
[0].counter
= newsize
;
646 /* Clear the newly allocated part. */
647 memset (newp
+ 2 + oldsize
, '\0',
648 (newsize
- oldsize
) * sizeof (dtv_t
));
650 /* Point dtv to the generation counter. */
653 /* Install this new dtv in the thread data
655 INSTALL_NEW_DTV (dtv
);
658 /* If there is currently memory allocate for this
659 dtv entry free it. */
660 /* XXX Ideally we will at some point create a memory
662 if (! dtv
[modid
].pointer
.is_static
663 && dtv
[modid
].pointer
.val
!= TLS_DTV_UNALLOCATED
)
664 /* Note that free is called for NULL is well. We
665 deallocate even if it is this dtv entry we are
666 supposed to load. The reason is that we call
667 memalign and not malloc. */
668 free (dtv
[modid
].pointer
.val
);
670 /* This module is loaded dynamically- We defer memory
672 dtv
[modid
].pointer
.is_static
= false;
673 dtv
[modid
].pointer
.val
= TLS_DTV_UNALLOCATED
;
675 if (modid
== req_modid
)
681 while ((listp
= listp
->next
) != NULL
);
683 /* This will be the new maximum generation counter. */
684 dtv
[0].counter
= new_gen
;
692 __attribute_noinline__
693 tls_get_addr_tail (dtv_t
*dtv
, struct link_map
*the_map
, size_t module
)
695 /* The allocation was deferred. Do it now. */
698 /* Find the link map for this module. */
700 struct dtv_slotinfo_list
*listp
= GL(dl_tls_dtv_slotinfo_list
);
702 while (idx
>= listp
->len
)
708 the_map
= listp
->slotinfo
[idx
].map
;
712 /* Make sure that, if a dlopen running in parallel forces the
713 variable into static storage, we'll wait until the address in the
714 static TLS block is set up, and use that. If we're undecided
715 yet, make sure we make the decision holding the lock as well. */
716 if (__builtin_expect (the_map
->l_tls_offset
717 != FORCED_DYNAMIC_TLS_OFFSET
, 0))
719 __rtld_lock_lock_recursive (GL(dl_load_lock
));
720 if (__builtin_expect (the_map
->l_tls_offset
== NO_TLS_OFFSET
, 1))
722 the_map
->l_tls_offset
= FORCED_DYNAMIC_TLS_OFFSET
;
723 __rtld_lock_unlock_recursive (GL(dl_load_lock
));
727 __rtld_lock_unlock_recursive (GL(dl_load_lock
));
728 if (__builtin_expect (the_map
->l_tls_offset
729 != FORCED_DYNAMIC_TLS_OFFSET
, 1))
731 void *p
= dtv
[module
].pointer
.val
;
732 if (__builtin_expect (p
== TLS_DTV_UNALLOCATED
, 0))
739 void *p
= dtv
[module
].pointer
.val
= allocate_and_init (the_map
);
740 dtv
[module
].pointer
.is_static
= false;
746 /* The generic dynamic and local dynamic model cannot be used in
747 statically linked applications. */
749 __tls_get_addr (GET_ADDR_ARGS
)
751 dtv_t
*dtv
= THREAD_DTV ();
752 struct link_map
*the_map
= NULL
;
755 if (__builtin_expect (dtv
[0].counter
!= GL(dl_tls_generation
), 0))
757 the_map
= _dl_update_slotinfo (GET_ADDR_MODULE
);
761 p
= dtv
[GET_ADDR_MODULE
].pointer
.val
;
763 if (__builtin_expect (p
== TLS_DTV_UNALLOCATED
, 0))
764 p
= tls_get_addr_tail (dtv
, the_map
, GET_ADDR_MODULE
);
766 return (char *) p
+ GET_ADDR_OFFSET
;
771 /* Look up the module's TLS block as for __tls_get_addr,
772 but never touch anything. Return null if it's not allocated yet. */
774 _dl_tls_get_addr_soft (struct link_map
*l
)
776 if (__builtin_expect (l
->l_tls_modid
== 0, 0))
777 /* This module has no TLS segment. */
780 dtv_t
*dtv
= THREAD_DTV ();
781 if (__builtin_expect (dtv
[0].counter
!= GL(dl_tls_generation
), 0))
783 /* This thread's DTV is not completely current,
784 but it might already cover this module. */
786 if (l
->l_tls_modid
>= dtv
[-1].counter
)
790 size_t idx
= l
->l_tls_modid
;
791 struct dtv_slotinfo_list
*listp
= GL(dl_tls_dtv_slotinfo_list
);
792 while (idx
>= listp
->len
)
798 /* We've reached the slot for this module.
799 If its generation counter is higher than the DTV's,
800 this thread does not know about this module yet. */
801 if (dtv
[0].counter
< listp
->slotinfo
[idx
].gen
)
805 void *data
= dtv
[l
->l_tls_modid
].pointer
.val
;
806 if (__builtin_expect (data
== TLS_DTV_UNALLOCATED
, 0))
807 /* The DTV is current, but this thread has not yet needed
808 to allocate this module's segment. */
816 _dl_add_to_slotinfo (struct link_map
*l
)
818 /* Now that we know the object is loaded successfully add
819 modules containing TLS data to the dtv info table. We
820 might have to increase its size. */
821 struct dtv_slotinfo_list
*listp
;
822 struct dtv_slotinfo_list
*prevp
;
823 size_t idx
= l
->l_tls_modid
;
825 /* Find the place in the dtv slotinfo list. */
826 listp
= GL(dl_tls_dtv_slotinfo_list
);
827 prevp
= NULL
; /* Needed to shut up gcc. */
830 /* Does it fit in the array of this list element? */
831 if (idx
< listp
->len
)
837 while (listp
!= NULL
);
841 /* When we come here it means we have to add a new element
842 to the slotinfo list. And the new module must be in
846 listp
= prevp
->next
= (struct dtv_slotinfo_list
*)
847 malloc (sizeof (struct dtv_slotinfo_list
)
848 + TLS_SLOTINFO_SURPLUS
* sizeof (struct dtv_slotinfo
));
851 /* We ran out of memory. We will simply fail this
852 call but don't undo anything we did so far. The
853 application will crash or be terminated anyway very
856 /* We have to do this since some entries in the dtv
857 slotinfo array might already point to this
859 ++GL(dl_tls_generation
);
861 _dl_signal_error (ENOMEM
, "dlopen", NULL
, N_("\
862 cannot create TLS data structures"));
865 listp
->len
= TLS_SLOTINFO_SURPLUS
;
867 memset (listp
->slotinfo
, '\0',
868 TLS_SLOTINFO_SURPLUS
* sizeof (struct dtv_slotinfo
));
871 /* Add the information into the slotinfo data structure. */
872 listp
->slotinfo
[idx
].map
= l
;
873 listp
->slotinfo
[idx
].gen
= GL(dl_tls_generation
) + 1;