2 * Copyright (C) 2017 Waldemar Brodkorb <wbx@uclibc-ng.org>
3 * Ported from GNU C Library
4 * Licensed under the LGPL v2.1, see the file COPYING.LIB in this tarball.
7 /* Thread-local storage handling in the ELF dynamic linker.
9 Copyright (C) 2011-2017 Free Software Foundation, Inc.
11 The GNU C Library is free software; you can redistribute it and/or
12 modify it under the terms of the GNU Lesser General Public
13 License as published by the Free Software Foundation; either
14 version 2.1 of the License, or (at your option) any later version.
16 The GNU C Library is distributed in the hope that it will be useful,
17 but WITHOUT ANY WARRANTY; without even the implied warranty of
18 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
19 Lesser General Public License for more details.
21 You should have received a copy of the GNU Lesser General Public
22 License along with the GNU C Library; if not, see
23 <http://www.gnu.org/licenses/>. */
27 #if defined __UCLIBC_HAS_TLS__
32 #define PTR_REG(n) x##n
33 #define PTR_LOG_SIZE 3
34 #define PTR_SIZE (1<<PTR_LOG_SIZE)
36 #define NSAVEDQREGPAIRS 16
37 #define SAVE_Q_REGISTERS \
38 stp q0, q1, [sp, #-32*NSAVEDQREGPAIRS]!; \
39 cfi_adjust_cfa_offset (32*NSAVEDQREGPAIRS); \
40 stp q2, q3, [sp, #32*1]; \
41 stp q4, q5, [sp, #32*2]; \
42 stp q6, q7, [sp, #32*3]; \
43 stp q8, q9, [sp, #32*4]; \
44 stp q10, q11, [sp, #32*5]; \
45 stp q12, q13, [sp, #32*6]; \
46 stp q14, q15, [sp, #32*7]; \
47 stp q16, q17, [sp, #32*8]; \
48 stp q18, q19, [sp, #32*9]; \
49 stp q20, q21, [sp, #32*10]; \
50 stp q22, q23, [sp, #32*11]; \
51 stp q24, q25, [sp, #32*12]; \
52 stp q26, q27, [sp, #32*13]; \
53 stp q28, q29, [sp, #32*14]; \
54 stp q30, q31, [sp, #32*15];
56 #define RESTORE_Q_REGISTERS \
57 ldp q2, q3, [sp, #32*1]; \
58 ldp q4, q5, [sp, #32*2]; \
59 ldp q6, q7, [sp, #32*3]; \
60 ldp q8, q9, [sp, #32*4]; \
61 ldp q10, q11, [sp, #32*5]; \
62 ldp q12, q13, [sp, #32*6]; \
63 ldp q14, q15, [sp, #32*7]; \
64 ldp q16, q17, [sp, #32*8]; \
65 ldp q18, q19, [sp, #32*9]; \
66 ldp q20, q21, [sp, #32*10]; \
67 ldp q22, q23, [sp, #32*11]; \
68 ldp q24, q25, [sp, #32*12]; \
69 ldp q26, q27, [sp, #32*13]; \
70 ldp q28, q29, [sp, #32*14]; \
71 ldp q30, q31, [sp, #32*15]; \
72 ldp q0, q1, [sp], #32*NSAVEDQREGPAIRS; \
73 cfi_adjust_cfa_offset (-32*NSAVEDQREGPAIRS);
77 /* Compute the thread pointer offset for symbols in the static
78 TLS block. The offset is the same for all threads.
80 _dl_tlsdesc_return (tlsdesc *) ;
82 .hidden _dl_tlsdesc_return
83 .global _dl_tlsdesc_return
84 .type _dl_tlsdesc_return,%function
89 .size _dl_tlsdesc_return, .-_dl_tlsdesc_return
92 /* Handler for dynamic TLS symbols.
94 _dl_tlsdesc_dynamic (tlsdesc *) ;
96 The second word of the descriptor points to a
97 tlsdesc_dynamic_arg structure.
99 Returns the offset between the thread pointer and the
100 object referenced by the argument.
103 __attribute__ ((__regparm__ (1)))
104 _dl_tlsdesc_dynamic (struct tlsdesc *tdp)
106 struct tlsdesc_dynamic_arg *td = tdp->arg;
107 dtv_t *dtv = *(dtv_t **)((char *)__thread_pointer + TCBHEAD_DTV);
108 if (__builtin_expect (td->gen_count <= dtv[0].counter
109 && (dtv[td->tlsinfo.ti_module].pointer.val
110 != TLS_DTV_UNALLOCATED),
112 return dtv[td->tlsinfo.ti_module].pointer.val
113 + td->tlsinfo.ti_offset
116 return ___tls_get_addr (&td->tlsinfo) - __thread_pointer;
120 .hidden _dl_tlsdesc_dynamic
121 .global _dl_tlsdesc_dynamic
122 .type _dl_tlsdesc_dynamic,%function
126 # define NSAVEXREGPAIRS 2
127 stp x29, x30, [sp,#-(32+16*NSAVEXREGPAIRS)]!
128 cfi_adjust_cfa_offset (32+16*NSAVEXREGPAIRS)
131 /* Save just enough registers to support fast path, if we fall
132 into slow path we will save additional registers. */
134 stp x1, x2, [sp, #32+16*0]
135 stp x3, x4, [sp, #32+16*1]
138 /* The ldar here happens after the load from [x0] at the call site
139 (that is generated by the compiler as part of the TLS access ABI),
140 so it reads the same value (this function is the final value of
141 td->entry) and thus it synchronizes with the release store to
142 td->entry in _dl_tlsdesc_resolve_rela_fixup ensuring that the load
143 from [x0,#PTR_SIZE] here happens after the initialization of td->arg. */
144 ldar PTR_REG (zr), [x0]
145 ldr PTR_REG (1), [x0,#TLSDESC_ARG]
146 ldr PTR_REG (0), [x4,#TCBHEAD_DTV]
147 ldr PTR_REG (3), [x1,#TLSDESC_GEN_COUNT]
148 ldr PTR_REG (2), [x0,#DTV_COUNTER]
149 cmp PTR_REG (3), PTR_REG (2)
151 ldr PTR_REG (2), [x1,#TLSDESC_MODID]
152 add PTR_REG (0), PTR_REG (0), PTR_REG (2), lsl #(PTR_LOG_SIZE + 1)
153 ldr PTR_REG (0), [x0] /* Load val member of DTV entry. */
154 cmp x0, #TLS_DTV_UNALLOCATED
156 ldr PTR_REG (1), [x1,#TLSDESC_MODOFF]
157 add PTR_REG (0), PTR_REG (0), PTR_REG (1)
158 sub PTR_REG (0), PTR_REG (0), PTR_REG (4)
160 ldp x1, x2, [sp, #32+16*0]
161 ldp x3, x4, [sp, #32+16*1]
163 ldp x29, x30, [sp], #(32+16*NSAVEXREGPAIRS)
164 cfi_adjust_cfa_offset (-32-16*NSAVEXREGPAIRS)
165 # undef NSAVEXREGPAIRS
168 /* This is the slow path. We need to call __tls_get_addr() which
169 means we need to save and restore all the register that the
170 callee will trash. */
172 /* Save the remaining registers that we must treat as caller save. */
173 # define NSAVEXREGPAIRS 7
174 stp x5, x6, [sp, #-16*NSAVEXREGPAIRS]!
175 cfi_adjust_cfa_offset (16*NSAVEXREGPAIRS)
176 stp x7, x8, [sp, #16*1]
177 stp x9, x10, [sp, #16*2]
178 stp x11, x12, [sp, #16*3]
179 stp x13, x14, [sp, #16*4]
180 stp x15, x16, [sp, #16*5]
181 stp x17, x18, [sp, #16*6]
189 sub PTR_REG (0), PTR_REG (0), PTR_REG (1)
193 ldp x7, x8, [sp, #16*1]
194 ldp x9, x10, [sp, #16*2]
195 ldp x11, x12, [sp, #16*3]
196 ldp x13, x14, [sp, #16*4]
197 ldp x15, x16, [sp, #16*5]
198 ldp x17, x18, [sp, #16*6]
199 ldp x5, x6, [sp], #16*NSAVEXREGPAIRS
200 cfi_adjust_cfa_offset (-16*NSAVEXREGPAIRS)
203 .size _dl_tlsdesc_dynamic, .-_dl_tlsdesc_dynamic
204 # undef NSAVEXREGPAIRS
207 #endif // __UCLIBC_HAS_TLS__