LoongArch: Update ulps
[glibc.git] / sysdeps / mach / hurd / i386 / tls.h
blob08b600aa82fddebb33f1a6c87a9f9194beb0ab31
1 /* Definitions for thread-local data handling. Hurd/i386 version.
2 Copyright (C) 2003-2024 Free Software Foundation, Inc.
3 This file is part of the GNU C Library.
5 The GNU C Library is free software; you can redistribute it and/or
6 modify it under the terms of the GNU Lesser General Public
7 License as published by the Free Software Foundation; either
8 version 2.1 of the License, or (at your option) any later version.
10 The GNU C Library is distributed in the hope that it will be useful,
11 but WITHOUT ANY WARRANTY; without even the implied warranty of
12 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 Lesser General Public License for more details.
15 You should have received a copy of the GNU Lesser General Public
16 License along with the GNU C Library; if not, see
17 <https://www.gnu.org/licenses/>. */
19 #ifndef _I386_TLS_H
20 #define _I386_TLS_H
23 /* Some things really need not be machine-dependent. */
24 #include <sysdeps/mach/hurd/tls.h>
27 #ifndef __ASSEMBLER__
28 # include <dl-dtv.h>
30 /* Type of the TCB. */
31 typedef struct
33 void *tcb; /* Points to this structure. */
34 dtv_t *dtv; /* Vector of pointers to TLS data. */
35 thread_t self_do_not_use; /* This thread's control port. */
36 int multiple_threads;
37 uintptr_t sysinfo;
38 uintptr_t stack_guard;
39 uintptr_t pointer_guard;
40 int gscope_flag;
41 unsigned int feature_1;
42 /* Reservation of some values for the TM ABI. */
43 void *__private_tm[3];
44 /* GCC split stack support. */
45 void *__private_ss;
46 void *__glibc_padding1;
48 /* Keep these fields last, so offsets of fields above can continue being
49 compatible with the i386 Linux version. */
50 mach_port_t reply_port; /* This thread's reply port. */
51 struct hurd_sigstate *_hurd_sigstate;
53 /* Used by the exception handling implementation in the dynamic loader. */
54 struct rtld_catch *rtld_catch;
55 } tcbhead_t;
57 /* GCC generates %gs:0x14 to access the stack guard. */
58 _Static_assert (offsetof (tcbhead_t, stack_guard) == 0x14,
59 "stack guard offset");
60 /* libgcc uses %gs:0x30 to access the split stack pointer. */
61 _Static_assert (offsetof (tcbhead_t, __private_ss) == 0x30,
62 "split stack pointer offset");
64 /* Return tcbhead_t from a TLS segment descriptor. */
65 # define HURD_DESC_TLS(desc) \
66 ({ \
67 (tcbhead_t *) ( (desc->low_word >> 16) \
68 | ((desc->high_word & 0xff) << 16) \
69 | (desc->high_word & 0xff000000)); \
72 #endif
74 /* The TCB can have any size and the memory following the address the
75 thread pointer points to is unspecified. Allocate the TCB there. */
76 #define TLS_TCB_AT_TP 1
77 #define TLS_DTV_AT_TP 0
79 /* Alignment requirement for TCB.
81 Some processors such as Intel Atom pay a big penalty on every
82 access using a segment override if that segment's base is not
83 aligned to the size of a cache line. (See Intel 64 and IA-32
84 Architectures Optimization Reference Manual, section 13.3.3.3,
85 "Segment Base".) On such machines, a cache line is 64 bytes. */
86 #define TCB_ALIGNMENT 64
88 #ifndef __ASSEMBLER__
90 /* Use i386-specific RPCs to arrange that %gs segment register prefix
91 addresses the TCB in each thread. */
92 # include <mach/i386/mach_i386.h>
94 # ifndef HAVE_I386_SET_GDT
95 # define __i386_set_gdt(thr, sel, desc) ((void) (thr), (void) (sel), (void) (desc), MIG_BAD_ID)
96 # endif
98 # include <errno.h>
99 # include <assert.h>
101 # define HURD_TLS_DESC_DECL(desc, tcb) \
102 struct descriptor desc = \
103 { /* low word: */ \
104 0xffff /* limit 0..15 */ \
105 | (((unsigned int) (tcb)) << 16) /* base 0..15 */ \
106 , /* high word: */ \
107 ((((unsigned int) (tcb)) >> 16) & 0xff) /* base 16..23 */ \
108 | ((0x12 | 0x60 | 0x80) << 8) /* access = ACC_DATA_W|ACC_PL_U|ACC_P */ \
109 | (0xf << 16) /* limit 16..19 */ \
110 | ((4 | 8) << 20) /* granularity = SZ_32|SZ_G */ \
111 | (((unsigned int) (tcb)) & 0xff000000) /* base 24..31 */ \
114 # define HURD_SEL_LDT(sel) (__builtin_expect ((sel) & 4, 0))
116 #ifndef SHARED
117 extern unsigned short __init1_desc;
118 # define __HURD_DESC_INITIAL(gs, ds) ((gs) == (ds) || (gs) == __init1_desc)
119 #else
120 # define __HURD_DESC_INITIAL(gs, ds) ((gs) == (ds))
121 #endif
123 #if !defined (SHARED) || IS_IN (rtld)
124 /* Return 1 if TLS is not initialized yet. */
125 extern inline bool __attribute__ ((unused))
126 __LIBC_NO_TLS (void)
128 unsigned short ds, gs;
129 asm ("movw %%ds, %w0\n"
130 "movw %%gs, %w1"
131 : "=q" (ds), "=q" (gs));
132 return __glibc_unlikely (__HURD_DESC_INITIAL (gs, ds));
135 /* Code to initially initialize the thread pointer. This might need
136 special attention since 'errno' is not yet available and if the
137 operation can cause a failure 'errno' must not be touched. */
138 static inline bool __attribute__ ((unused))
139 _hurd_tls_init (tcbhead_t *tcb, bool full)
141 HURD_TLS_DESC_DECL (desc, tcb);
142 thread_t self = __mach_thread_self ();
143 bool success = true;
144 extern mach_port_t __hurd_reply_port0;
146 /* This field is used by TLS accesses to get our "thread pointer"
147 from the TLS point of view. */
148 tcb->tcb = tcb;
149 /* We always at least start the sigthread anyway. */
150 tcb->multiple_threads = 1;
151 if (full)
152 /* Take over the reply port we've been using. */
153 tcb->reply_port = __hurd_reply_port0;
155 /* Get the first available selector. */
156 int sel = -1;
157 error_t err = __i386_set_gdt (self, &sel, desc);
158 if (err == MIG_BAD_ID)
160 /* Old kernel, use a per-thread LDT. */
161 sel = 0x27;
162 err = __i386_set_ldt (self, sel, &desc, 1);
163 assert_perror (err);
164 if (err)
166 success = false;
167 goto out;
170 else if (err)
172 assert_perror (err); /* Separate from above with different line #. */
173 success = false;
174 goto out;
177 /* Now install the new selector. */
178 asm volatile ("mov %w0, %%gs" :: "q" (sel));
179 if (full)
180 /* This port is now owned by the TCB. */
181 __hurd_reply_port0 = MACH_PORT_NULL;
182 #ifndef SHARED
183 else
184 __init1_desc = sel;
185 #endif
187 out:
188 __mach_port_deallocate (__mach_task_self (), self);
189 return success;
192 # define TLS_INIT_TP(descr) _hurd_tls_init ((tcbhead_t *) (descr), 1)
193 #else /* defined (SHARED) && !IS_IN (rtld) */
194 # define __LIBC_NO_TLS() 0
195 #endif
197 # if __GNUC_PREREQ (6, 0)
199 # define THREAD_SELF \
200 (*(tcbhead_t * __seg_gs *) offsetof (tcbhead_t, tcb))
201 # define THREAD_GETMEM(descr, member) \
202 (*(__typeof (descr->member) __seg_gs *) offsetof (tcbhead_t, member))
203 # define THREAD_GETMEM_NC(descr, member, idx) \
204 (*(__typeof (descr->member[0]) __seg_gs *) \
205 (offsetof (tcbhead_t, member) + (idx) * sizeof (descr->member[0])))
206 # define THREAD_SETMEM(descr, member, value) \
207 (*(__typeof (descr->member) __seg_gs *) offsetof (tcbhead_t, member) = value)
208 # define THREAD_SETMEM_NC(descr, member, index, value) \
209 (*(__typeof (descr->member[0]) __seg_gs *) \
210 (offsetof (tcbhead_t, member) + (idx) * sizeof (descr->member[0])))
212 # else
214 /* Return the TCB address of the current thread. */
215 # define THREAD_SELF \
216 ({ tcbhead_t *__tcb; \
217 __asm__ ("movl %%gs:%c1,%0" : "=r" (__tcb) \
218 : "i" (offsetof (tcbhead_t, tcb))); \
219 __tcb;})
221 /* Read member of the thread descriptor directly. */
222 # define THREAD_GETMEM(descr, member) \
223 ({ __typeof (descr->member) __value; \
224 _Static_assert (sizeof (__value) == 1 \
225 || sizeof (__value) == 4 \
226 || sizeof (__value) == 8, \
227 "size of per-thread data"); \
228 if (sizeof (__value) == 1) \
229 asm volatile ("movb %%gs:%P2,%b0" \
230 : "=q" (__value) \
231 : "0" (0), "i" (offsetof (tcbhead_t, member))); \
232 else if (sizeof (__value) == 4) \
233 asm volatile ("movl %%gs:%P1,%0" \
234 : "=r" (__value) \
235 : "i" (offsetof (tcbhead_t, member))); \
236 else /* 8 */ \
238 asm volatile ("movl %%gs:%P1,%%eax\n\t" \
239 "movl %%gs:%P2,%%edx" \
240 : "=A" (__value) \
241 : "i" (offsetof (tcbhead_t, member)), \
242 "i" (offsetof (tcbhead_t, member) + 4)); \
244 __value; })
247 /* Same as THREAD_GETMEM, but the member offset can be non-constant. */
248 # define THREAD_GETMEM_NC(descr, member, idx) \
249 ({ __typeof (descr->member[0]) __value; \
250 _Static_assert (sizeof (__value) == 1 \
251 || sizeof (__value) == 4 \
252 || sizeof (__value) == 8, \
253 "size of per-thread data"); \
254 if (sizeof (__value) == 1) \
255 asm volatile ("movb %%gs:%P2(%3),%b0" \
256 : "=q" (__value) \
257 : "0" (0), "i" (offsetof (tcbhead_t, member[0])), \
258 "r" (idx)); \
259 else if (sizeof (__value) == 4) \
260 asm volatile ("movl %%gs:%P1(,%2,4),%0" \
261 : "=r" (__value) \
262 : "i" (offsetof (tcbhead_t, member[0])), \
263 "r" (idx)); \
264 else /* 8 */ \
266 asm volatile ("movl %%gs:%P1(,%2,8),%%eax\n\t" \
267 "movl %%gs:4+%P1(,%2,8),%%edx" \
268 : "=&A" (__value) \
269 : "i" (offsetof (tcbhead_t, member[0])), \
270 "r" (idx)); \
272 __value; })
276 /* Set member of the thread descriptor directly. */
277 # define THREAD_SETMEM(descr, member, value) \
278 ({ \
279 _Static_assert (sizeof (descr->member) == 1 \
280 || sizeof (descr->member) == 4 \
281 || sizeof (descr->member) == 8, \
282 "size of per-thread data"); \
283 if (sizeof (descr->member) == 1) \
284 asm volatile ("movb %b0,%%gs:%P1" : \
285 : "iq" (value), \
286 "i" (offsetof (tcbhead_t, member))); \
287 else if (sizeof (descr->member) == 4) \
288 asm volatile ("movl %0,%%gs:%P1" : \
289 : "ir" (value), \
290 "i" (offsetof (tcbhead_t, member))); \
291 else /* 8 */ \
293 asm volatile ("movl %%eax,%%gs:%P1\n\t" \
294 "movl %%edx,%%gs:%P2" : \
295 : "A" ((uint64_t) cast_to_integer (value)), \
296 "i" (offsetof (tcbhead_t, member)), \
297 "i" (offsetof (tcbhead_t, member) + 4)); \
301 /* Same as THREAD_SETMEM, but the member offset can be non-constant. */
302 # define THREAD_SETMEM_NC(descr, member, idx, value) \
303 ({ \
304 _Static_assert (sizeof (descr->member[0]) == 1 \
305 || sizeof (descr->member[0]) == 4 \
306 || sizeof (descr->member[0]) == 8, \
307 "size of per-thread data"); \
308 if (sizeof (descr->member[0]) == 1) \
309 asm volatile ("movb %b0,%%gs:%P1(%2)" : \
310 : "iq" (value), \
311 "i" (offsetof (tcbhead_t, member)), \
312 "r" (idx)); \
313 else if (sizeof (descr->member[0]) == 4) \
314 asm volatile ("movl %0,%%gs:%P1(,%2,4)" : \
315 : "ir" (value), \
316 "i" (offsetof (tcbhead_t, member)), \
317 "r" (idx)); \
318 else /* 8 */ \
320 asm volatile ("movl %%eax,%%gs:%P1(,%2,8)\n\t" \
321 "movl %%edx,%%gs:4+%P1(,%2,8)" : \
322 : "A" ((uint64_t) cast_to_integer (value)), \
323 "i" (offsetof (tcbhead_t, member)), \
324 "r" (idx)); \
327 # endif /* __GNUC_PREREQ (6, 0) */
329 /* Return the TCB address of a thread given its state.
330 Note: this is expensive. */
331 # define THREAD_TCB(thread, thread_state) \
332 ({ int __sel = (thread_state)->basic.gs; \
333 struct descriptor __desc, *___desc = &__desc; \
334 unsigned int __count = 1; \
335 kern_return_t __err; \
336 if (HURD_SEL_LDT (__sel)) \
337 __err = __i386_get_ldt ((thread), __sel, 1, &___desc, &__count); \
338 else \
339 __err = __i386_get_gdt ((thread), __sel, &__desc); \
340 assert_perror (__err); \
341 assert (__count == 1); \
342 HURD_DESC_TLS (___desc);})
344 /* Install new dtv for current thread. */
345 # define INSTALL_NEW_DTV(dtvp) THREAD_SETMEM (THREAD_SELF, dtv, dtvp)
347 /* Return the address of the dtv for the current thread. */
348 # define THREAD_DTV() THREAD_GETMEM (THREAD_SELF, dtv)
351 /* Set the stack guard field in TCB head. */
352 #define THREAD_SET_STACK_GUARD(value) \
353 THREAD_SETMEM (THREAD_SELF, stack_guard, value)
354 #define THREAD_COPY_STACK_GUARD(descr) \
355 ((descr)->stack_guard \
356 = THREAD_GETMEM (THREAD_SELF, stack_guard))
358 /* Set the pointer guard field in the TCB head. */
359 #define THREAD_SET_POINTER_GUARD(value) \
360 THREAD_SETMEM (THREAD_SELF, pointer_guard, value)
361 #define THREAD_COPY_POINTER_GUARD(descr) \
362 ((descr)->pointer_guard \
363 = THREAD_GETMEM (THREAD_SELF, pointer_guard))
366 # include <mach/machine/thread_status.h>
368 /* Set up TLS in the new thread of a fork child, copying from the original. */
369 static inline kern_return_t __attribute__ ((unused))
370 _hurd_tls_fork (thread_t child, thread_t orig, struct i386_thread_state *state)
372 /* Fetch the selector set by _hurd_tls_init. */
373 int sel;
374 asm ("mov %%gs, %w0" : "=q" (sel) : "0" (0));
375 if (sel == state->ds) /* _hurd_tls_init was never called. */
376 return 0;
378 struct descriptor desc, *_desc = &desc;
379 error_t err;
380 unsigned int count = 1;
382 if (HURD_SEL_LDT (sel))
383 err = __i386_get_ldt (orig, sel, 1, &_desc, &count);
384 else
385 err = __i386_get_gdt (orig, sel, &desc);
387 assert_perror (err);
388 if (err)
389 return err;
391 if (HURD_SEL_LDT (sel))
392 err = __i386_set_ldt (child, sel, &desc, 1);
393 else
394 err = __i386_set_gdt (child, &sel, desc);
396 state->gs = sel;
397 return err;
400 static inline kern_return_t __attribute__ ((unused))
401 _hurd_tls_new (thread_t child, tcbhead_t *tcb)
403 error_t err;
404 /* Fetch the target thread's state. */
405 struct i386_thread_state state;
406 mach_msg_type_number_t state_count = i386_THREAD_STATE_COUNT;
407 err = __thread_get_state (child, i386_REGS_SEGS_STATE,
408 (thread_state_t) &state,
409 &state_count);
410 if (err)
411 return err;
412 assert (state_count == i386_THREAD_STATE_COUNT);
413 /* Fetch the selector set by _hurd_tls_init. */
414 int sel;
415 asm ("mov %%gs, %w0" : "=q" (sel) : "0" (0));
416 if (sel == state.ds) /* _hurd_tls_init was never called. */
417 return 0;
419 HURD_TLS_DESC_DECL (desc, tcb);
421 tcb->tcb = tcb;
423 if (HURD_SEL_LDT (sel))
424 err = __i386_set_ldt (child, sel, &desc, 1);
425 else
426 err = __i386_set_gdt (child, &sel, desc);
428 if (err)
429 return err;
431 /* Update gs to use the selector. */
432 state.gs = sel;
433 return __thread_set_state (child, i386_REGS_SEGS_STATE,
434 (thread_state_t) &state,
435 state_count);
438 /* Global scope switch support. */
439 # define THREAD_GSCOPE_FLAG_UNUSED 0
440 # define THREAD_GSCOPE_FLAG_USED 1
441 # define THREAD_GSCOPE_FLAG_WAIT 2
443 # define THREAD_GSCOPE_SET_FLAG() \
444 THREAD_SETMEM (THREAD_SELF, gscope_flag, THREAD_GSCOPE_FLAG_USED)
446 # define THREAD_GSCOPE_RESET_FLAG() \
447 ({ \
448 int __flag; \
449 asm volatile ("xchgl %0, %%gs:%P1" \
450 : "=r" (__flag) \
451 : "i" (offsetof (tcbhead_t, gscope_flag)), \
452 "0" (THREAD_GSCOPE_FLAG_UNUSED)); \
453 if (__flag == THREAD_GSCOPE_FLAG_WAIT) \
454 lll_wake (THREAD_SELF->gscope_flag, LLL_PRIVATE); \
457 #endif /* !__ASSEMBLER__ */
459 #endif /* i386/tls.h */