i386: Change offset of __private_ss to 0x30 [BZ #23250]
[glibc.git] / sysdeps / i386 / nptl / tls.h
blobafb71ce4310d8aa78b9c70ae799d262b3c73462b
1 /* Definition for thread-local data handling. nptl/i386 version.
2 Copyright (C) 2002-2018 Free Software Foundation, Inc.
3 This file is part of the GNU C Library.
5 The GNU C Library is free software; you can redistribute it and/or
6 modify it under the terms of the GNU Lesser General Public
7 License as published by the Free Software Foundation; either
8 version 2.1 of the License, or (at your option) any later version.
10 The GNU C Library is distributed in the hope that it will be useful,
11 but WITHOUT ANY WARRANTY; without even the implied warranty of
12 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 Lesser General Public License for more details.
15 You should have received a copy of the GNU Lesser General Public
16 License along with the GNU C Library; if not, see
17 <http://www.gnu.org/licenses/>. */
19 #ifndef _TLS_H
20 #define _TLS_H 1
22 #include <dl-sysdep.h>
23 #ifndef __ASSEMBLER__
24 # include <stdbool.h>
25 # include <stddef.h>
26 # include <stdint.h>
27 # include <stdlib.h>
28 # include <sysdep.h>
29 # include <libc-pointer-arith.h> /* For cast_to_integer. */
30 # include <kernel-features.h>
31 # include <dl-dtv.h>
33 typedef struct
35 void *tcb; /* Pointer to the TCB. Not necessarily the
36 thread descriptor used by libpthread. */
37 dtv_t *dtv;
38 void *self; /* Pointer to the thread descriptor. */
39 int multiple_threads;
40 uintptr_t sysinfo;
41 uintptr_t stack_guard;
42 uintptr_t pointer_guard;
43 int gscope_flag;
44 int __glibc_reserved1;
45 /* Reservation of some values for the TM ABI. */
46 void *__private_tm[3];
47 /* GCC split stack support. */
48 void *__private_ss;
49 void *__glibc_reserved2;
50 } tcbhead_t;
52 /* morestack.S in libgcc uses offset 0x30 to access __private_ss, */
53 _Static_assert (offsetof (tcbhead_t, __private_ss) == 0x30,
54 "offset of __private_ss != 0x30");
56 # define TLS_MULTIPLE_THREADS_IN_TCB 1
58 #else /* __ASSEMBLER__ */
59 # include <tcb-offsets.h>
60 #endif
63 /* Alignment requirement for the stack. For IA-32 this is governed by
64 the SSE memory functions. */
65 #define STACK_ALIGN 16
67 #ifndef __ASSEMBLER__
68 /* Get system call information. */
69 # include <sysdep.h>
71 /* The old way: using LDT. */
73 /* Structure passed to `modify_ldt', 'set_thread_area', and 'clone' calls. */
74 struct user_desc
76 unsigned int entry_number;
77 unsigned long int base_addr;
78 unsigned int limit;
79 unsigned int seg_32bit:1;
80 unsigned int contents:2;
81 unsigned int read_exec_only:1;
82 unsigned int limit_in_pages:1;
83 unsigned int seg_not_present:1;
84 unsigned int useable:1;
85 unsigned int empty:25;
88 /* Initializing bit fields is slow. We speed it up by using a union. */
89 union user_desc_init
91 struct user_desc desc;
92 unsigned int vals[4];
96 /* This is the size of the initial TCB. Can't be just sizeof (tcbhead_t),
97 because NPTL getpid, __libc_alloca_cutoff etc. need (almost) the whole
98 struct pthread even when not linked with -lpthread. */
99 # define TLS_INIT_TCB_SIZE sizeof (struct pthread)
101 /* Alignment requirements for the initial TCB. */
102 # define TLS_INIT_TCB_ALIGN __alignof__ (struct pthread)
104 /* This is the size of the TCB. */
105 # define TLS_TCB_SIZE sizeof (struct pthread)
107 /* Alignment requirements for the TCB. */
108 # define TLS_TCB_ALIGN __alignof__ (struct pthread)
110 /* The TCB can have any size and the memory following the address the
111 thread pointer points to is unspecified. Allocate the TCB there. */
112 # define TLS_TCB_AT_TP 1
113 # define TLS_DTV_AT_TP 0
115 /* Get the thread descriptor definition. */
116 # include <nptl/descr.h>
119 /* Install the dtv pointer. The pointer passed is to the element with
120 index -1 which contain the length. */
121 # define INSTALL_DTV(descr, dtvp) \
122 ((tcbhead_t *) (descr))->dtv = (dtvp) + 1
124 /* Install new dtv for current thread. */
125 # define INSTALL_NEW_DTV(dtvp) \
126 ({ struct pthread *__pd; \
127 THREAD_SETMEM (__pd, header.dtv, (dtvp)); })
129 /* Return dtv of given thread descriptor. */
130 # define GET_DTV(descr) \
131 (((tcbhead_t *) (descr))->dtv)
133 /* Macros to load from and store into segment registers. */
134 # ifndef TLS_GET_GS
135 # define TLS_GET_GS() \
136 ({ int __seg; __asm ("movw %%gs, %w0" : "=q" (__seg)); __seg & 0xffff; })
137 # endif
138 # ifndef TLS_SET_GS
139 # define TLS_SET_GS(val) \
140 __asm ("movw %w0, %%gs" :: "q" (val))
141 # endif
143 #ifdef NEED_DL_SYSINFO
144 # define INIT_SYSINFO \
145 _head->sysinfo = GLRO(dl_sysinfo)
146 # define SETUP_THREAD_SYSINFO(pd) \
147 ((pd)->header.sysinfo = THREAD_GETMEM (THREAD_SELF, header.sysinfo))
148 # define CHECK_THREAD_SYSINFO(pd) \
149 assert ((pd)->header.sysinfo == THREAD_GETMEM (THREAD_SELF, header.sysinfo))
150 #else
151 # define INIT_SYSINFO
152 #endif
154 #ifndef LOCK_PREFIX
155 # ifdef UP
156 # define LOCK_PREFIX /* nothing */
157 # else
158 # define LOCK_PREFIX "lock;"
159 # endif
160 #endif
162 static inline void __attribute__ ((unused, always_inline))
163 tls_fill_user_desc (union user_desc_init *desc,
164 unsigned int entry_number,
165 void *pd)
167 desc->vals[0] = entry_number;
168 /* The 'base_addr' field. Pointer to the TCB. */
169 desc->vals[1] = (unsigned long int) pd;
170 /* The 'limit' field. We use 4GB which is 0xfffff pages. */
171 desc->vals[2] = 0xfffff;
172 /* Collapsed value of the bitfield:
173 .seg_32bit = 1
174 .contents = 0
175 .read_exec_only = 0
176 .limit_in_pages = 1
177 .seg_not_present = 0
178 .useable = 1 */
179 desc->vals[3] = 0x51;
182 /* Code to initially initialize the thread pointer. This might need
183 special attention since 'errno' is not yet available and if the
184 operation can cause a failure 'errno' must not be touched. */
185 # define TLS_INIT_TP(thrdescr) \
186 ({ void *_thrdescr = (thrdescr); \
187 tcbhead_t *_head = _thrdescr; \
188 union user_desc_init _segdescr; \
189 int _result; \
191 _head->tcb = _thrdescr; \
192 /* For now the thread descriptor is at the same address. */ \
193 _head->self = _thrdescr; \
194 /* New syscall handling support. */ \
195 INIT_SYSINFO; \
197 /* Let the kernel pick a value for the 'entry_number' field. */ \
198 tls_fill_user_desc (&_segdescr, -1, _thrdescr); \
200 /* Install the TLS. */ \
201 INTERNAL_SYSCALL_DECL (err); \
202 _result = INTERNAL_SYSCALL (set_thread_area, err, 1, &_segdescr.desc); \
204 if (_result == 0) \
205 /* We know the index in the GDT, now load the segment register. \
206 The use of the GDT is described by the value 3 in the lower \
207 three bits of the segment descriptor value. \
209 Note that we have to do this even if the numeric value of \
210 the descriptor does not change. Loading the segment register \
211 causes the segment information from the GDT to be loaded \
212 which is necessary since we have changed it. */ \
213 TLS_SET_GS (_segdescr.desc.entry_number * 8 + 3); \
215 _result == 0 ? NULL \
216 : "set_thread_area failed when setting up thread-local storage\n"; })
218 # define TLS_DEFINE_INIT_TP(tp, pd) \
219 union user_desc_init _segdescr; \
220 /* Find the 'entry_number' field that the kernel selected in TLS_INIT_TP. \
221 The first three bits of the segment register value select the GDT, \
222 ignore them. We get the index from the value of the %gs register in \
223 the current thread. */ \
224 tls_fill_user_desc (&_segdescr, TLS_GET_GS () >> 3, pd); \
225 const struct user_desc *tp = &_segdescr.desc
228 /* Return the address of the dtv for the current thread. */
229 # define THREAD_DTV() \
230 ({ struct pthread *__pd; \
231 THREAD_GETMEM (__pd, header.dtv); })
234 /* Return the thread descriptor for the current thread.
236 The contained asm must *not* be marked volatile since otherwise
237 assignments like
238 pthread_descr self = thread_self();
239 do not get optimized away. */
240 # define THREAD_SELF \
241 ({ struct pthread *__self; \
242 asm ("movl %%gs:%c1,%0" : "=r" (__self) \
243 : "i" (offsetof (struct pthread, header.self))); \
244 __self;})
246 /* Magic for libthread_db to know how to do THREAD_SELF. */
247 # define DB_THREAD_SELF \
248 REGISTER_THREAD_AREA (32, offsetof (struct user_regs_struct, xgs), 3) \
249 REGISTER_THREAD_AREA (64, 26 * 8, 3) /* x86-64's user_regs_struct->gs */
252 /* Read member of the thread descriptor directly. */
253 # define THREAD_GETMEM(descr, member) \
254 ({ __typeof (descr->member) __value; \
255 if (sizeof (__value) == 1) \
256 asm volatile ("movb %%gs:%P2,%b0" \
257 : "=q" (__value) \
258 : "0" (0), "i" (offsetof (struct pthread, member))); \
259 else if (sizeof (__value) == 4) \
260 asm volatile ("movl %%gs:%P1,%0" \
261 : "=r" (__value) \
262 : "i" (offsetof (struct pthread, member))); \
263 else \
265 if (sizeof (__value) != 8) \
266 /* There should not be any value with a size other than 1, \
267 4 or 8. */ \
268 abort (); \
270 asm volatile ("movl %%gs:%P1,%%eax\n\t" \
271 "movl %%gs:%P2,%%edx" \
272 : "=A" (__value) \
273 : "i" (offsetof (struct pthread, member)), \
274 "i" (offsetof (struct pthread, member) + 4)); \
276 __value; })
279 /* Same as THREAD_GETMEM, but the member offset can be non-constant. */
280 # define THREAD_GETMEM_NC(descr, member, idx) \
281 ({ __typeof (descr->member[0]) __value; \
282 if (sizeof (__value) == 1) \
283 asm volatile ("movb %%gs:%P2(%3),%b0" \
284 : "=q" (__value) \
285 : "0" (0), "i" (offsetof (struct pthread, member[0])), \
286 "r" (idx)); \
287 else if (sizeof (__value) == 4) \
288 asm volatile ("movl %%gs:%P1(,%2,4),%0" \
289 : "=r" (__value) \
290 : "i" (offsetof (struct pthread, member[0])), \
291 "r" (idx)); \
292 else \
294 if (sizeof (__value) != 8) \
295 /* There should not be any value with a size other than 1, \
296 4 or 8. */ \
297 abort (); \
299 asm volatile ("movl %%gs:%P1(,%2,8),%%eax\n\t" \
300 "movl %%gs:4+%P1(,%2,8),%%edx" \
301 : "=&A" (__value) \
302 : "i" (offsetof (struct pthread, member[0])), \
303 "r" (idx)); \
305 __value; })
309 /* Set member of the thread descriptor directly. */
310 # define THREAD_SETMEM(descr, member, value) \
311 ({ if (sizeof (descr->member) == 1) \
312 asm volatile ("movb %b0,%%gs:%P1" : \
313 : "iq" (value), \
314 "i" (offsetof (struct pthread, member))); \
315 else if (sizeof (descr->member) == 4) \
316 asm volatile ("movl %0,%%gs:%P1" : \
317 : "ir" (value), \
318 "i" (offsetof (struct pthread, member))); \
319 else \
321 if (sizeof (descr->member) != 8) \
322 /* There should not be any value with a size other than 1, \
323 4 or 8. */ \
324 abort (); \
326 asm volatile ("movl %%eax,%%gs:%P1\n\t" \
327 "movl %%edx,%%gs:%P2" : \
328 : "A" ((uint64_t) cast_to_integer (value)), \
329 "i" (offsetof (struct pthread, member)), \
330 "i" (offsetof (struct pthread, member) + 4)); \
334 /* Same as THREAD_SETMEM, but the member offset can be non-constant. */
335 # define THREAD_SETMEM_NC(descr, member, idx, value) \
336 ({ if (sizeof (descr->member[0]) == 1) \
337 asm volatile ("movb %b0,%%gs:%P1(%2)" : \
338 : "iq" (value), \
339 "i" (offsetof (struct pthread, member)), \
340 "r" (idx)); \
341 else if (sizeof (descr->member[0]) == 4) \
342 asm volatile ("movl %0,%%gs:%P1(,%2,4)" : \
343 : "ir" (value), \
344 "i" (offsetof (struct pthread, member)), \
345 "r" (idx)); \
346 else \
348 if (sizeof (descr->member[0]) != 8) \
349 /* There should not be any value with a size other than 1, \
350 4 or 8. */ \
351 abort (); \
353 asm volatile ("movl %%eax,%%gs:%P1(,%2,8)\n\t" \
354 "movl %%edx,%%gs:4+%P1(,%2,8)" : \
355 : "A" ((uint64_t) cast_to_integer (value)), \
356 "i" (offsetof (struct pthread, member)), \
357 "r" (idx)); \
361 /* Atomic compare and exchange on TLS, returning old value. */
362 #define THREAD_ATOMIC_CMPXCHG_VAL(descr, member, newval, oldval) \
363 ({ __typeof (descr->member) __ret; \
364 __typeof (oldval) __old = (oldval); \
365 if (sizeof (descr->member) == 4) \
366 asm volatile (LOCK_PREFIX "cmpxchgl %2, %%gs:%P3" \
367 : "=a" (__ret) \
368 : "0" (__old), "r" (newval), \
369 "i" (offsetof (struct pthread, member))); \
370 else \
371 /* Not necessary for other sizes in the moment. */ \
372 abort (); \
373 __ret; })
376 /* Atomic logical and. */
377 #define THREAD_ATOMIC_AND(descr, member, val) \
378 (void) ({ if (sizeof ((descr)->member) == 4) \
379 asm volatile (LOCK_PREFIX "andl %1, %%gs:%P0" \
380 :: "i" (offsetof (struct pthread, member)), \
381 "ir" (val)); \
382 else \
383 /* Not necessary for other sizes in the moment. */ \
384 abort (); })
387 /* Atomic set bit. */
388 #define THREAD_ATOMIC_BIT_SET(descr, member, bit) \
389 (void) ({ if (sizeof ((descr)->member) == 4) \
390 asm volatile (LOCK_PREFIX "orl %1, %%gs:%P0" \
391 :: "i" (offsetof (struct pthread, member)), \
392 "ir" (1 << (bit))); \
393 else \
394 /* Not necessary for other sizes in the moment. */ \
395 abort (); })
398 /* Set the stack guard field in TCB head. */
399 #define THREAD_SET_STACK_GUARD(value) \
400 THREAD_SETMEM (THREAD_SELF, header.stack_guard, value)
401 #define THREAD_COPY_STACK_GUARD(descr) \
402 ((descr)->header.stack_guard \
403 = THREAD_GETMEM (THREAD_SELF, header.stack_guard))
406 /* Set the pointer guard field in the TCB head. */
407 #define THREAD_SET_POINTER_GUARD(value) \
408 THREAD_SETMEM (THREAD_SELF, header.pointer_guard, value)
409 #define THREAD_COPY_POINTER_GUARD(descr) \
410 ((descr)->header.pointer_guard \
411 = THREAD_GETMEM (THREAD_SELF, header.pointer_guard))
414 /* Get and set the global scope generation counter in the TCB head. */
415 #define THREAD_GSCOPE_IN_TCB 1
416 #define THREAD_GSCOPE_FLAG_UNUSED 0
417 #define THREAD_GSCOPE_FLAG_USED 1
418 #define THREAD_GSCOPE_FLAG_WAIT 2
419 #define THREAD_GSCOPE_RESET_FLAG() \
420 do \
421 { int __res; \
422 asm volatile ("xchgl %0, %%gs:%P1" \
423 : "=r" (__res) \
424 : "i" (offsetof (struct pthread, header.gscope_flag)), \
425 "0" (THREAD_GSCOPE_FLAG_UNUSED)); \
426 if (__res == THREAD_GSCOPE_FLAG_WAIT) \
427 lll_futex_wake (&THREAD_SELF->header.gscope_flag, 1, LLL_PRIVATE); \
429 while (0)
430 #define THREAD_GSCOPE_SET_FLAG() \
431 THREAD_SETMEM (THREAD_SELF, header.gscope_flag, THREAD_GSCOPE_FLAG_USED)
432 #define THREAD_GSCOPE_WAIT() \
433 GL(dl_wait_lookup_done) ()
435 #endif /* __ASSEMBLER__ */
437 #endif /* tls.h */