remove libintl stub and libintl.h header
[uclibc-ng.git] / libpthread / nptl / sysdeps / generic / dl-tls.c
blob989e587a2a0e99c03d275e758362f76e597af066
1 /* Thread-local storage handling in the ELF dynamic linker. Generic version.
2 Copyright (C) 2002, 2003, 2004, 2005 Free Software Foundation, Inc.
3 This file is part of the GNU C Library.
5 The GNU C Library is free software; you can redistribute it and/or
6 modify it under the terms of the GNU Lesser General Public
7 License as published by the Free Software Foundation; either
8 version 2.1 of the License, or (at your option) any later version.
10 The GNU C Library is distributed in the hope that it will be useful,
11 but WITHOUT ANY WARRANTY; without even the implied warranty of
12 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 Lesser General Public License for more details.
15 You should have received a copy of the GNU Lesser General Public
16 License along with the GNU C Library; if not, see
17 <http://www.gnu.org/licenses/>. */
19 #if defined SHARED || defined NOT_IN_libc
20 # error in buildsystem: This file is for libc.a
21 #endif
22 #include <signal.h>
23 #include <stdlib.h>
24 #include <sys/param.h>
25 #include <tls.h>
26 #include <dl-tls.h>
27 #include <ldsodefs.h>
28 #include <dl-elf.h>
29 #include <dl-hash.h>
31 #include <assert.h>
32 #include <link.h>
33 #include <string.h>
34 #include <unistd.h>
35 #include <stdio.h>
37 #define _dl_malloc malloc
38 #define _dl_memset memset
39 #define _dl_mempcpy mempcpy
40 #define _dl_dprintf fprintf
41 #define _dl_debug_file stderr
42 #define _dl_exit exit
44 /* Amount of excess space to allocate in the static TLS area
45 to allow dynamic loading of modules defining IE-model TLS data. */
46 # define TLS_STATIC_SURPLUS 64 + DL_NNS * 100
48 /* Value used for dtv entries for which the allocation is delayed. */
49 # define TLS_DTV_UNALLOCATED ((void *) -1l)
52 /* Out-of-memory handler. */
53 # ifdef SHARED
54 static void
55 __attribute__ ((__noreturn__))
56 oom (void)
58 do {
59 _dl_dprintf (_dl_debug_file,
60 "cannot allocate thread-local memory: ABORT\n");
61 _dl_exit (127);
62 } while (1);
64 # endif
67 void *_dl_memalign(size_t alignment, size_t bytes);
68 void *_dl_memalign(size_t alignment, size_t bytes)
70 return _dl_malloc(bytes);
75 * We are trying to perform a static TLS relocation in MAP, but it was
76 * dynamically loaded. This can only work if there is enough surplus in
77 * the static TLS area already allocated for each running thread. If this
78 * object's TLS segment is too big to fit, we fail. If it fits,
79 * we set MAP->l_tls_offset and return.
80 * This function intentionally does not return any value but signals error
81 * directly, as static TLS should be rare and code handling it should
82 * not be inlined as much as possible.
86 void
87 internal_function __attribute_noinline__
88 _dl_allocate_static_tls (struct link_map *map)
90 /* If the alignment requirements are too high fail. */
91 if (map->l_tls_align > _dl_tls_static_align)
93 fail:
94 _dl_dprintf(_dl_debug_file, "cannot allocate memory in static TLS block");
95 _dl_exit(30);
98 # if defined(TLS_TCB_AT_TP)
99 size_t freebytes;
100 size_t n;
101 size_t blsize;
103 freebytes = _dl_tls_static_size - _dl_tls_static_used - TLS_TCB_SIZE;
105 blsize = map->l_tls_blocksize + map->l_tls_firstbyte_offset;
106 if (freebytes < blsize)
107 goto fail;
109 n = (freebytes - blsize) / map->l_tls_align;
111 size_t offset = _dl_tls_static_used + (freebytes - n * map->l_tls_align
112 - map->l_tls_firstbyte_offset);
114 map->l_tls_offset = _dl_tls_static_used = offset;
115 # elif defined(TLS_DTV_AT_TP)
116 size_t used;
117 size_t check;
119 size_t offset = roundup (_dl_tls_static_used, map->l_tls_align);
120 used = offset + map->l_tls_blocksize;
121 check = used;
123 /* dl_tls_static_used includes the TCB at the beginning. */
124 if (check > _dl_tls_static_size)
125 goto fail;
127 map->l_tls_offset = offset;
128 _dl_tls_static_used = used;
129 # else
130 # error "Either TLS_TCB_AT_TP or TLS_DTV_AT_TP must be defined"
131 # endif
134 * If the object is not yet relocated we cannot initialize the
135 * static TLS region. Delay it.
137 if (((struct elf_resolve *) map)->init_flag & RELOCS_DONE)
139 #ifdef SHARED
141 * Update the slot information data for at least the generation of
142 * the DSO we are allocating data for.
144 if (__builtin_expect (THREAD_DTV()[0].counter != _dl_tls_generation, 0))
145 (void) _dl_update_slotinfo (map->l_tls_modid);
146 #endif
147 _dl_init_static_tls (map);
149 else
150 map->l_need_tls_init = 1;
153 size_t
154 internal_function
155 _dl_next_tls_modid (void)
157 size_t result;
159 if (__builtin_expect (GL(dl_tls_dtv_gaps), false))
161 size_t disp = 0;
162 struct dtv_slotinfo_list *runp = GL(dl_tls_dtv_slotinfo_list);
164 /* Note that this branch will never be executed during program
165 start since there are no gaps at that time. Therefore it
166 does not matter that the dl_tls_dtv_slotinfo is not allocated
167 yet when the function is called for the first times.
169 NB: the offset +1 is due to the fact that DTV[0] is used
170 for something else. */
171 result = GL(dl_tls_static_nelem) + 1;
172 if (result <= GL(dl_tls_max_dtv_idx))
175 while (result - disp < runp->len)
177 if (runp->slotinfo[result - disp].map == NULL)
178 break;
180 ++result;
181 assert (result <= GL(dl_tls_max_dtv_idx) + 1);
184 if (result - disp < runp->len)
185 break;
187 disp += runp->len;
189 while ((runp = runp->next) != NULL);
191 if (result > GL(dl_tls_max_dtv_idx))
193 /* The new index must indeed be exactly one higher than the
194 previous high. */
195 assert (result == GL(dl_tls_max_dtv_idx) + 1);
196 /* There is no gap anymore. */
197 GL(dl_tls_dtv_gaps) = false;
199 goto nogaps;
202 else
204 /* No gaps, allocate a new entry. */
205 nogaps:
207 result = ++GL(dl_tls_max_dtv_idx);
210 return result;
214 # ifdef SHARED
215 void
216 internal_function
217 _dl_determine_tlsoffset (void)
219 size_t max_align = TLS_TCB_ALIGN;
220 size_t freetop = 0;
221 size_t freebottom = 0;
223 /* The first element of the dtv slot info list is allocated. */
224 assert (GL(dl_tls_dtv_slotinfo_list) != NULL);
225 /* There is at this point only one element in the
226 dl_tls_dtv_slotinfo_list list. */
227 assert (GL(dl_tls_dtv_slotinfo_list)->next == NULL);
229 struct dtv_slotinfo *slotinfo = GL(dl_tls_dtv_slotinfo_list)->slotinfo;
231 /* Determining the offset of the various parts of the static TLS
232 block has several dependencies. In addition we have to work
233 around bugs in some toolchains.
235 Each TLS block from the objects available at link time has a size
236 and an alignment requirement. The GNU ld computes the alignment
237 requirements for the data at the positions *in the file*, though.
238 I.e, it is not simply possible to allocate a block with the size
239 of the TLS program header entry. The data is layed out assuming
240 that the first byte of the TLS block fulfills
242 p_vaddr mod p_align == &TLS_BLOCK mod p_align
244 This means we have to add artificial padding at the beginning of
245 the TLS block. These bytes are never used for the TLS data in
246 this module but the first byte allocated must be aligned
247 according to mod p_align == 0 so that the first byte of the TLS
248 block is aligned according to p_vaddr mod p_align. This is ugly
249 and the linker can help by computing the offsets in the TLS block
250 assuming the first byte of the TLS block is aligned according to
251 p_align.
253 The extra space which might be allocated before the first byte of
254 the TLS block need not go unused. The code below tries to use
255 that memory for the next TLS block. This can work if the total
256 memory requirement for the next TLS block is smaller than the
257 gap. */
259 # if defined(TLS_TCB_AT_TP)
260 /* We simply start with zero. */
261 size_t offset = 0;
263 size_t cnt;
264 for (cnt = 0; slotinfo[cnt].map != NULL; ++cnt)
266 assert (cnt < GL(dl_tls_dtv_slotinfo_list)->len);
268 size_t firstbyte = (-slotinfo[cnt].map->l_tls_firstbyte_offset
269 & (slotinfo[cnt].map->l_tls_align - 1));
270 size_t off;
271 max_align = MAX (max_align, slotinfo[cnt].map->l_tls_align);
273 if (freebottom - freetop >= slotinfo[cnt].map->l_tls_blocksize)
275 off = roundup (freetop + slotinfo[cnt].map->l_tls_blocksize
276 - firstbyte, slotinfo[cnt].map->l_tls_align)
277 + firstbyte;
278 if (off <= freebottom)
280 freetop = off;
282 /* XXX For some architectures we perhaps should store the
283 negative offset. */
284 slotinfo[cnt].map->l_tls_offset = off;
285 continue;
289 off = roundup (offset + slotinfo[cnt].map->l_tls_blocksize - firstbyte,
290 slotinfo[cnt].map->l_tls_align) + firstbyte;
291 if (off > offset + slotinfo[cnt].map->l_tls_blocksize
292 + (freebottom - freetop))
294 freetop = offset;
295 freebottom = off - slotinfo[cnt].map->l_tls_blocksize;
297 offset = off;
299 /* XXX For some architectures we perhaps should store the
300 negative offset. */
301 slotinfo[cnt].map->l_tls_offset = off;
304 GL(dl_tls_static_used) = offset;
305 GL(dl_tls_static_size) = (roundup (offset + TLS_STATIC_SURPLUS, max_align)
306 + TLS_TCB_SIZE);
307 # elif defined(TLS_DTV_AT_TP)
308 /* The TLS blocks start right after the TCB. */
309 size_t offset = TLS_TCB_SIZE;
310 size_t cnt;
312 for (cnt = 0; slotinfo[cnt].map != NULL; ++cnt)
314 assert (cnt < GL(dl_tls_dtv_slotinfo_list)->len);
316 size_t firstbyte = (-slotinfo[cnt].map->l_tls_firstbyte_offset
317 & (slotinfo[cnt].map->l_tls_align - 1));
318 size_t off;
319 max_align = MAX (max_align, slotinfo[cnt].map->l_tls_align);
321 if (slotinfo[cnt].map->l_tls_blocksize <= freetop - freebottom)
323 off = roundup (freebottom, slotinfo[cnt].map->l_tls_align);
324 if (off - freebottom < firstbyte)
325 off += slotinfo[cnt].map->l_tls_align;
326 if (off + slotinfo[cnt].map->l_tls_blocksize - firstbyte <= freetop)
328 slotinfo[cnt].map->l_tls_offset = off - firstbyte;
329 freebottom = (off + slotinfo[cnt].map->l_tls_blocksize
330 - firstbyte);
331 continue;
335 off = roundup (offset, slotinfo[cnt].map->l_tls_align);
336 if (off - offset < firstbyte)
337 off += slotinfo[cnt].map->l_tls_align;
339 slotinfo[cnt].map->l_tls_offset = off - firstbyte;
340 if (off - firstbyte - offset > freetop - freebottom)
342 freebottom = offset;
343 freetop = off - firstbyte;
346 offset = off + slotinfo[cnt].map->l_tls_blocksize - firstbyte;
349 GL(dl_tls_static_used) = offset;
350 GL(dl_tls_static_size) = roundup (offset + TLS_STATIC_SURPLUS,
351 TLS_TCB_ALIGN);
352 # else
353 # error "Either TLS_TCB_AT_TP or TLS_DTV_AT_TP must be defined"
354 # endif
356 /* The alignment requirement for the static TLS block. */
357 GL(dl_tls_static_align) = max_align;
361 /* This is called only when the data structure setup was skipped at startup,
362 when there was no need for it then. Now we have dynamically loaded
363 something needing TLS, or libpthread needs it. */
365 internal_function
366 _dl_tls_setup (void)
368 assert (GL(dl_tls_dtv_slotinfo_list) == NULL);
369 assert (GL(dl_tls_max_dtv_idx) == 0);
371 const size_t nelem = 2 + TLS_SLOTINFO_SURPLUS;
373 GL(dl_tls_dtv_slotinfo_list)
374 = calloc (1, (sizeof (struct dtv_slotinfo_list)
375 + nelem * sizeof (struct dtv_slotinfo)));
376 if (GL(dl_tls_dtv_slotinfo_list) == NULL)
377 return -1;
379 GL(dl_tls_dtv_slotinfo_list)->len = nelem;
381 /* Number of elements in the static TLS block. It can't be zero
382 because of various assumptions. The one element is null. */
383 GL(dl_tls_static_nelem) = GL(dl_tls_max_dtv_idx) = 1;
385 /* This initializes more variables for us. */
386 _dl_determine_tlsoffset ();
388 return 0;
390 # endif
392 static void *
393 internal_function
394 allocate_dtv (void *result)
396 dtv_t *dtv;
397 size_t dtv_length;
399 /* We allocate a few more elements in the dtv than are needed for the
400 initial set of modules. This should avoid in most cases expansions
401 of the dtv. */
402 dtv_length = GL(dl_tls_max_dtv_idx) + DTV_SURPLUS;
403 dtv = calloc (dtv_length + 2, sizeof (dtv_t));
404 if (dtv != NULL)
406 /* This is the initial length of the dtv. */
407 dtv[0].counter = dtv_length;
409 /* The rest of the dtv (including the generation counter) is
410 Initialize with zero to indicate nothing there. */
412 /* Add the dtv to the thread data structures. */
413 INSTALL_DTV (result, dtv);
415 else
416 result = NULL;
418 return result;
422 /* Get size and alignment requirements of the static TLS block. */
423 void
424 internal_function
425 _dl_get_tls_static_info (size_t *sizep, size_t *alignp)
427 *sizep = GL(dl_tls_static_size);
428 *alignp = GL(dl_tls_static_align);
432 void *
433 internal_function
434 _dl_allocate_tls_storage (void)
436 void *result;
437 size_t size = GL(dl_tls_static_size);
439 # if defined(TLS_DTV_AT_TP)
440 /* Memory layout is:
441 [ TLS_PRE_TCB_SIZE ] [ TLS_TCB_SIZE ] [ TLS blocks ]
442 ^ This should be returned. */
443 size += (TLS_PRE_TCB_SIZE + GL(dl_tls_static_align) - 1)
444 & ~(GL(dl_tls_static_align) - 1);
445 # endif
447 /* Allocate a correctly aligned chunk of memory. */
448 result = _dl_memalign (GL(dl_tls_static_align), size);
449 if (__builtin_expect (result != NULL, 1))
451 /* Allocate the DTV. */
452 void *allocated = result;
454 # if defined(TLS_TCB_AT_TP)
455 /* The TCB follows the TLS blocks. */
456 result = (char *) result + size - TLS_TCB_SIZE;
458 /* Clear the TCB data structure. We can't ask the caller (i.e.
459 libpthread) to do it, because we will initialize the DTV et al. */
460 _dl_memset (result, '\0', TLS_TCB_SIZE);
461 # elif defined(TLS_DTV_AT_TP)
462 result = (char *) result + size - GL(dl_tls_static_size);
464 /* Clear the TCB data structure and TLS_PRE_TCB_SIZE bytes before it.
465 We can't ask the caller (i.e. libpthread) to do it, because we will
466 initialize the DTV et al. */
467 _dl_memset ((char *) result - TLS_PRE_TCB_SIZE, '\0',
468 TLS_PRE_TCB_SIZE + TLS_TCB_SIZE);
469 # endif
471 result = allocate_dtv (result);
472 if (result == NULL)
473 free (allocated);
476 return result;
480 void *
481 internal_function
482 _dl_allocate_tls_init (void *result)
484 if (result == NULL)
485 /* The memory allocation failed. */
486 return NULL;
488 dtv_t *dtv = GET_DTV (result);
489 struct dtv_slotinfo_list *listp;
490 size_t total = 0;
491 size_t maxgen = 0;
493 /* We have to prepare the dtv for all currently loaded modules using
494 TLS. For those which are dynamically loaded we add the values
495 indicating deferred allocation. */
496 listp = GL(dl_tls_dtv_slotinfo_list);
497 while (1)
499 size_t cnt;
501 for (cnt = total == 0 ? 1 : 0; cnt < listp->len; ++cnt)
503 struct link_map *map;
504 void *dest;
506 /* Check for the total number of used slots. */
507 if (total + cnt > GL(dl_tls_max_dtv_idx))
508 break;
510 map = listp->slotinfo[cnt].map;
511 if (map == NULL)
512 /* Unused entry. */
513 continue;
515 /* Keep track of the maximum generation number. This might
516 not be the generation counter. */
517 maxgen = MAX (maxgen, listp->slotinfo[cnt].gen);
519 if (map->l_tls_offset == NO_TLS_OFFSET)
521 /* For dynamically loaded modules we simply store
522 the value indicating deferred allocation. */
523 dtv[map->l_tls_modid].pointer.val = TLS_DTV_UNALLOCATED;
524 dtv[map->l_tls_modid].pointer.is_static = false;
525 continue;
528 assert (map->l_tls_modid == cnt);
529 assert (map->l_tls_blocksize >= map->l_tls_initimage_size);
530 # if defined(TLS_TCB_AT_TP)
531 assert ((size_t) map->l_tls_offset >= map->l_tls_blocksize);
532 dest = (char *) result - map->l_tls_offset;
533 # elif defined(TLS_DTV_AT_TP)
534 dest = (char *) result + map->l_tls_offset;
535 # else
536 # error "Either TLS_TCB_AT_TP or TLS_DTV_AT_TP must be defined"
537 # endif
539 /* Copy the initialization image and clear the BSS part. */
540 dtv[map->l_tls_modid].pointer.val = dest;
541 dtv[map->l_tls_modid].pointer.is_static = true;
542 _dl_memset (_dl_mempcpy (dest, map->l_tls_initimage,
543 map->l_tls_initimage_size), '\0',
544 map->l_tls_blocksize - map->l_tls_initimage_size);
547 total += cnt;
548 if (total >= GL(dl_tls_max_dtv_idx))
549 break;
551 listp = listp->next;
552 assert (listp != NULL);
555 /* The DTV version is up-to-date now. */
556 dtv[0].counter = maxgen;
558 return result;
561 void *
562 internal_function
563 _dl_allocate_tls (void *mem)
565 return _dl_allocate_tls_init (mem == NULL
566 ? _dl_allocate_tls_storage ()
567 : allocate_dtv (mem));
571 void
572 internal_function
573 _dl_deallocate_tls (void *tcb, bool dealloc_tcb)
575 dtv_t *dtv = GET_DTV (tcb);
576 size_t cnt;
578 /* We need to free the memory allocated for non-static TLS. */
579 for (cnt = 0; cnt < dtv[-1].counter; ++cnt)
580 if (! dtv[1 + cnt].pointer.is_static
581 && dtv[1 + cnt].pointer.val != TLS_DTV_UNALLOCATED)
582 free (dtv[1 + cnt].pointer.val);
584 /* The array starts with dtv[-1]. */
585 #ifdef SHARED
586 if (dtv != GL(dl_initial_dtv))
587 #endif
588 free (dtv - 1);
590 if (dealloc_tcb)
592 # if defined(TLS_TCB_AT_TP)
593 /* The TCB follows the TLS blocks. Back up to free the whole block. */
594 tcb -= GL(dl_tls_static_size) - TLS_TCB_SIZE;
595 # elif defined(TLS_DTV_AT_TP)
596 /* Back up the TLS_PRE_TCB_SIZE bytes. */
597 tcb -= (TLS_PRE_TCB_SIZE + GL(dl_tls_static_align) - 1)
598 & ~(GL(dl_tls_static_align) - 1);
599 # endif
600 free (tcb);
605 # ifdef SHARED
606 /* The __tls_get_addr function has two basic forms which differ in the
607 arguments. The IA-64 form takes two parameters, the module ID and
608 offset. The form used, among others, on IA-32 takes a reference to
609 a special structure which contain the same information. The second
610 form seems to be more often used (in the moment) so we default to
611 it. Users of the IA-64 form have to provide adequate definitions
612 of the following macros. */
613 # ifndef GET_ADDR_ARGS
614 # define GET_ADDR_ARGS tls_index *ti
615 # endif
616 # ifndef GET_ADDR_MODULE
617 # define GET_ADDR_MODULE ti->ti_module
618 # endif
619 # ifndef GET_ADDR_OFFSET
620 # define GET_ADDR_OFFSET ti->ti_offset
621 # endif
624 static void *
625 allocate_and_init (struct link_map *map)
627 void *newp;
629 newp = _dl_memalign (map->l_tls_align, map->l_tls_blocksize);
630 if (newp == NULL)
631 oom ();
633 /* Initialize the memory. */
634 _dl_memset (_dl_mempcpy (newp, map->l_tls_initimage, map->l_tls_initimage_size),
635 '\0', map->l_tls_blocksize - map->l_tls_initimage_size);
637 return newp;
641 struct link_map *
642 _dl_update_slotinfo (unsigned long int req_modid)
644 struct link_map *the_map = NULL;
645 dtv_t *dtv = THREAD_DTV ();
647 /* The global dl_tls_dtv_slotinfo array contains for each module
648 index the generation counter current when the entry was created.
649 This array never shrinks so that all module indices which were
650 valid at some time can be used to access it. Before the first
651 use of a new module index in this function the array was extended
652 appropriately. Access also does not have to be guarded against
653 modifications of the array. It is assumed that pointer-size
654 values can be read atomically even in SMP environments. It is
655 possible that other threads at the same time dynamically load
656 code and therefore add to the slotinfo list. This is a problem
657 since we must not pick up any information about incomplete work.
658 The solution to this is to ignore all dtv slots which were
659 created after the one we are currently interested. We know that
660 dynamic loading for this module is completed and this is the last
661 load operation we know finished. */
662 unsigned long int idx = req_modid;
663 struct dtv_slotinfo_list *listp = GL(dl_tls_dtv_slotinfo_list);
665 while (idx >= listp->len)
667 idx -= listp->len;
668 listp = listp->next;
671 if (dtv[0].counter < listp->slotinfo[idx].gen)
673 /* The generation counter for the slot is higher than what the
674 current dtv implements. We have to update the whole dtv but
675 only those entries with a generation counter <= the one for
676 the entry we need. */
677 size_t new_gen = listp->slotinfo[idx].gen;
678 size_t total = 0;
680 /* We have to look through the entire dtv slotinfo list. */
681 listp = GL(dl_tls_dtv_slotinfo_list);
684 size_t cnt;
686 for (cnt = total == 0 ? 1 : 0; cnt < listp->len; ++cnt)
688 size_t gen = listp->slotinfo[cnt].gen;
690 if (gen > new_gen)
691 /* This is a slot for a generation younger than the
692 one we are handling now. It might be incompletely
693 set up so ignore it. */
694 continue;
696 /* If the entry is older than the current dtv layout we
697 know we don't have to handle it. */
698 if (gen <= dtv[0].counter)
699 continue;
701 /* If there is no map this means the entry is empty. */
702 struct link_map *map = listp->slotinfo[cnt].map;
703 if (map == NULL)
705 /* If this modid was used at some point the memory
706 might still be allocated. */
707 if (! dtv[total + cnt].pointer.is_static
708 && dtv[total + cnt].pointer.val != TLS_DTV_UNALLOCATED)
710 free (dtv[total + cnt].pointer.val);
711 dtv[total + cnt].pointer.val = TLS_DTV_UNALLOCATED;
714 continue;
717 /* Check whether the current dtv array is large enough. */
718 size_t modid = map->l_tls_modid;
719 assert (total + cnt == modid);
720 if (dtv[-1].counter < modid)
722 /* Reallocate the dtv. */
723 dtv_t *newp;
724 size_t newsize = GL(dl_tls_max_dtv_idx) + DTV_SURPLUS;
725 size_t oldsize = dtv[-1].counter;
727 assert (map->l_tls_modid <= newsize);
729 if (dtv == GL(dl_initial_dtv))
731 /* This is the initial dtv that was allocated
732 during rtld startup using the dl-minimal.c
733 malloc instead of the real malloc. We can't
734 free it, we have to abandon the old storage. */
736 newp = malloc ((2 + newsize) * sizeof (dtv_t));
737 if (newp == NULL)
738 oom ();
739 _dl_memcpy (newp, &dtv[-1], oldsize * sizeof (dtv_t));
741 else
743 newp = realloc (&dtv[-1],
744 (2 + newsize) * sizeof (dtv_t));
745 if (newp == NULL)
746 oom ();
749 newp[0].counter = newsize;
751 /* Clear the newly allocated part. */
752 _dl_memset (newp + 2 + oldsize, '\0',
753 (newsize - oldsize) * sizeof (dtv_t));
755 /* Point dtv to the generation counter. */
756 dtv = &newp[1];
758 /* Install this new dtv in the thread data
759 structures. */
760 INSTALL_NEW_DTV (dtv);
763 /* If there is currently memory allocate for this
764 dtv entry free it. */
765 /* XXX Ideally we will at some point create a memory
766 pool. */
767 if (! dtv[modid].pointer.is_static
768 && dtv[modid].pointer.val != TLS_DTV_UNALLOCATED)
769 /* Note that free is called for NULL is well. We
770 deallocate even if it is this dtv entry we are
771 supposed to load. The reason is that we call
772 memalign and not malloc. */
773 free (dtv[modid].pointer.val);
775 /* This module is loaded dynamically- We defer memory
776 allocation. */
777 dtv[modid].pointer.is_static = false;
778 dtv[modid].pointer.val = TLS_DTV_UNALLOCATED;
780 if (modid == req_modid)
781 the_map = map;
784 total += listp->len;
786 while ((listp = listp->next) != NULL);
788 /* This will be the new maximum generation counter. */
789 dtv[0].counter = new_gen;
792 return the_map;
796 /* The generic dynamic and local dynamic model cannot be used in
797 statically linked applications. */
798 void *
799 __tls_get_addr (GET_ADDR_ARGS)
801 dtv_t *dtv = THREAD_DTV ();
802 struct link_map *the_map = NULL;
803 void *p;
805 if (__builtin_expect (dtv[0].counter != GL(dl_tls_generation), 0))
806 the_map = _dl_update_slotinfo (GET_ADDR_MODULE);
808 p = dtv[GET_ADDR_MODULE].pointer.val;
810 if (__builtin_expect (p == TLS_DTV_UNALLOCATED, 0))
812 /* The allocation was deferred. Do it now. */
813 if (the_map == NULL)
815 /* Find the link map for this module. */
816 size_t idx = GET_ADDR_MODULE;
817 struct dtv_slotinfo_list *listp = GL(dl_tls_dtv_slotinfo_list);
819 while (idx >= listp->len)
821 idx -= listp->len;
822 listp = listp->next;
825 the_map = listp->slotinfo[idx].map;
828 p = dtv[GET_ADDR_MODULE].pointer.val = allocate_and_init (the_map);
829 dtv[GET_ADDR_MODULE].pointer.is_static = false;
832 return (char *) p + GET_ADDR_OFFSET;
834 # endif
838 void _dl_add_to_slotinfo (struct link_map *l);
839 void
840 _dl_add_to_slotinfo (struct link_map *l)
842 /* Now that we know the object is loaded successfully add
843 modules containing TLS data to the dtv info table. We
844 might have to increase its size. */
845 struct dtv_slotinfo_list *listp;
846 struct dtv_slotinfo_list *prevp;
847 size_t idx = l->l_tls_modid;
849 /* Find the place in the dtv slotinfo list. */
850 listp = GL(dl_tls_dtv_slotinfo_list);
851 prevp = NULL; /* Needed to shut up gcc. */
854 /* Does it fit in the array of this list element? */
855 if (idx < listp->len)
856 break;
857 idx -= listp->len;
858 prevp = listp;
859 listp = listp->next;
861 while (listp != NULL);
863 if (listp == NULL)
865 /* When we come here it means we have to add a new element
866 to the slotinfo list. And the new module must be in
867 the first slot. */
868 assert (idx == 0);
870 listp = prevp->next = (struct dtv_slotinfo_list *)
871 malloc (sizeof (struct dtv_slotinfo_list)
872 + TLS_SLOTINFO_SURPLUS * sizeof (struct dtv_slotinfo));
873 if (listp == NULL)
875 /* We ran out of memory. We will simply fail this
876 call but don't undo anything we did so far. The
877 application will crash or be terminated anyway very
878 soon. */
880 /* We have to do this since some entries in the dtv
881 slotinfo array might already point to this
882 generation. */
883 ++GL(dl_tls_generation);
885 _dl_dprintf (_dl_debug_file,
886 "cannot create TLS data structures: ABORT\n");
887 _dl_exit (127);
890 listp->len = TLS_SLOTINFO_SURPLUS;
891 listp->next = NULL;
892 _dl_memset (listp->slotinfo, '\0',
893 TLS_SLOTINFO_SURPLUS * sizeof (struct dtv_slotinfo));
896 /* Add the information into the slotinfo data structure. */
897 listp->slotinfo[idx].map = l;
898 listp->slotinfo[idx].gen = GL(dl_tls_generation) + 1;