1 /* Copyright (C) 2002-2023 Free Software Foundation, Inc.
2 This file is part of the GNU C Library.
4 The GNU C Library is free software; you can redistribute it and/or
5 modify it under the terms of the GNU Lesser General Public
6 License as published by the Free Software Foundation; either
7 version 2.1 of the License, or (at your option) any later version.
9 The GNU C Library is distributed in the hope that it will be useful,
10 but WITHOUT ANY WARRANTY; without even the implied warranty of
11 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
12 Lesser General Public License for more details.
14 You should have received a copy of the GNU Lesser General Public
15 License along with the GNU C Library; if not, see
16 <https://www.gnu.org/licenses/>. */
25 #include <sys/types.h>
26 #include <hp-timing.h>
28 #include <lowlevellock.h>
29 #include <pthreaddef.h>
30 #include <dl-sysdep.h>
31 #include <thread_db.h>
34 #include <bits/types/res_state.h>
35 #include <kernel-features.h>
36 #include <tls-internal-struct.h>
37 #include <internal-sigset.h>
40 # define TCB_ALIGNMENT 32
41 #elif TCB_ALIGNMENT < 32
42 # error TCB_ALIGNMENT must be at least 32
46 /* We keep thread specific data in a special data structure, a two-level
47 array. The top-level array contains pointers to dynamically allocated
48 arrays of a certain number of data pointers. So we can implement a
49 sparse array. Each dynamic second-level array has
50 PTHREAD_KEY_2NDLEVEL_SIZE
51 entries. This value shouldn't be too large. */
52 #define PTHREAD_KEY_2NDLEVEL_SIZE 32
54 /* We need to address PTHREAD_KEYS_MAX key with PTHREAD_KEY_2NDLEVEL_SIZE
55 keys in each subarray. */
56 #define PTHREAD_KEY_1STLEVEL_SIZE \
57 ((PTHREAD_KEYS_MAX + PTHREAD_KEY_2NDLEVEL_SIZE - 1) \
58 / PTHREAD_KEY_2NDLEVEL_SIZE)
63 /* Internal version of the buffer to store cancellation handler
65 struct pthread_unwind_buf
75 /* This is the placeholder of the public version. */
80 /* Pointer to the previous cleanup buffer. */
81 struct pthread_unwind_buf
*prev
;
83 /* Backward compatibility: state of the old-style cleanup
84 handler at the time of the previous new-style cleanup handler
86 struct _pthread_cleanup_buffer
*cleanup
;
88 /* Cancellation type before the push call. */
95 /* Opcodes and data types for communication with the signal handler to
96 change user/group IDs. */
100 /* Enforce zero-extension for the pointer argument in
102 int setgroups (size_t size, const gid_t *list);
104 The kernel XID arguments are unsigned and do not require sign
106 unsigned long int id
[3];
108 volatile int error
; /* -1: no call yet, 0: success seen, >0: error seen. */
112 /* Data structure used by the kernel to find robust futexes. */
113 struct robust_list_head
116 long int futex_offset
;
117 void *list_op_pending
;
121 /* Data structure used to handle thread priority protection. */
122 struct priority_protection_data
125 unsigned int priomap
[];
129 /* Thread descriptor data structure. */
135 /* This overlaps the TCB as used for TLS without threads (see tls.h). */
140 /* multiple_threads is enabled either when the process has spawned at
141 least one thread or when a single-threaded process cancels itself.
142 This enables additional code to introduce locking before doing some
143 compare_and_exchange operations and also enable cancellation points.
144 The concepts of multiple threads and cancellation points ideally
145 should be separate, since it is not necessary for multiple threads to
146 have been created for cancellation points to be enabled, as is the
147 case is when single-threaded process cancels itself.
149 Since enabling multiple_threads enables additional code in
150 cancellation points and compare_and_exchange operations, there is a
151 potential for an unneeded performance hit when it is enabled in a
152 single-threaded, self-canceling process. This is OK though, since a
153 single-threaded process will enable async cancellation only when it
154 looks to cancel itself and is hence going to end anyway. */
155 int multiple_threads
;
160 /* This extra padding has no special purpose, and this structure layout
161 is private and subject to change without affecting the official ABI.
162 We just have it here in case it might be convenient for some
163 implementation-specific instrumentation hack or suchlike. */
167 /* This descriptor's link on the GL (dl_stack_used) or
168 GL (dl_stack_user) list. */
171 /* Thread ID - which is also a 'is this thread descriptor (and
172 therefore stack) used' flag. */
175 /* List of robust mutexes the thread is holding. */
176 #if __PTHREAD_MUTEX_HAVE_PREV
178 struct robust_list_head robust_head
;
180 /* The list above is strange. It is basically a double linked list
181 but the pointer to the next/previous element of the list points
182 in the middle of the object, the __next element. Whenever
183 casting to __pthread_list_t we need to adjust the pointer
185 These operations are effectively concurrent code in that the thread
186 can get killed at any point in time and the kernel takes over. Thus,
187 the __next elements are a kind of concurrent list and we need to
188 enforce using compiler barriers that the individual operations happen
189 in such a way that the kernel always sees a consistent list. The
190 backward links (ie, the __prev elements) are not used by the kernel.
191 FIXME We should use relaxed MO atomic operations here and signal fences
192 because this kind of concurrency is similar to synchronizing with a
194 # define QUEUE_PTR_ADJUST (offsetof (__pthread_list_t, __next))
196 # define ENQUEUE_MUTEX_BOTH(mutex, val) \
198 __pthread_list_t *next = (__pthread_list_t *) \
199 ((((uintptr_t) THREAD_GETMEM (THREAD_SELF, robust_head.list)) & ~1ul) \
200 - QUEUE_PTR_ADJUST); \
201 next->__prev = (void *) &mutex->__data.__list.__next; \
202 mutex->__data.__list.__next = THREAD_GETMEM (THREAD_SELF, \
204 mutex->__data.__list.__prev = (void *) &THREAD_SELF->robust_head; \
205 /* Ensure that the new list entry is ready before we insert it. */ \
206 __asm ("" ::: "memory"); \
207 THREAD_SETMEM (THREAD_SELF, robust_head.list, \
208 (void *) (((uintptr_t) &mutex->__data.__list.__next) \
211 # define DEQUEUE_MUTEX(mutex) \
213 __pthread_list_t *next = (__pthread_list_t *) \
214 ((char *) (((uintptr_t) mutex->__data.__list.__next) & ~1ul) \
215 - QUEUE_PTR_ADJUST); \
216 next->__prev = mutex->__data.__list.__prev; \
217 __pthread_list_t *prev = (__pthread_list_t *) \
218 ((char *) (((uintptr_t) mutex->__data.__list.__prev) & ~1ul) \
219 - QUEUE_PTR_ADJUST); \
220 prev->__next = mutex->__data.__list.__next; \
221 /* Ensure that we remove the entry from the list before we change the \
222 __next pointer of the entry, which is read by the kernel. */ \
223 __asm ("" ::: "memory"); \
224 mutex->__data.__list.__prev = NULL; \
225 mutex->__data.__list.__next = NULL; \
230 __pthread_slist_t robust_list
;
231 struct robust_list_head robust_head
;
234 # define ENQUEUE_MUTEX_BOTH(mutex, val) \
236 mutex->__data.__list.__next \
237 = THREAD_GETMEM (THREAD_SELF, robust_list.__next); \
238 /* Ensure that the new list entry is ready before we insert it. */ \
239 __asm ("" ::: "memory"); \
240 THREAD_SETMEM (THREAD_SELF, robust_list.__next, \
241 (void *) (((uintptr_t) &mutex->__data.__list) | val)); \
243 # define DEQUEUE_MUTEX(mutex) \
245 __pthread_slist_t *runp = (__pthread_slist_t *) \
246 (((uintptr_t) THREAD_GETMEM (THREAD_SELF, robust_list.__next)) & ~1ul); \
247 if (runp == &mutex->__data.__list) \
248 THREAD_SETMEM (THREAD_SELF, robust_list.__next, runp->__next); \
251 __pthread_slist_t *next = (__pthread_slist_t *) \
252 (((uintptr_t) runp->__next) & ~1ul); \
253 while (next != &mutex->__data.__list) \
256 next = (__pthread_slist_t *) (((uintptr_t) runp->__next) & ~1ul); \
259 runp->__next = next->__next; \
260 /* Ensure that we remove the entry from the list before we change the \
261 __next pointer of the entry, which is read by the kernel. */ \
262 __asm ("" ::: "memory"); \
263 mutex->__data.__list.__next = NULL; \
267 #define ENQUEUE_MUTEX(mutex) ENQUEUE_MUTEX_BOTH (mutex, 0)
268 #define ENQUEUE_MUTEX_PI(mutex) ENQUEUE_MUTEX_BOTH (mutex, 1)
270 /* List of cleanup buffers. */
271 struct _pthread_cleanup_buffer
*cleanup
;
273 /* Unwind information. */
274 struct pthread_unwind_buf
*cleanup_jmp_buf
;
275 #define HAVE_CLEANUP_JMP_BUF
277 /* Flags determining processing of cancellation. */
279 /* Bit set if cancellation is disabled. */
280 #define CANCELSTATE_BIT 0
281 #define CANCELSTATE_BITMASK (1 << CANCELSTATE_BIT)
282 /* Bit set if asynchronous cancellation mode is selected. */
283 #define CANCELTYPE_BIT 1
284 #define CANCELTYPE_BITMASK (1 << CANCELTYPE_BIT)
285 /* Bit set if canceling has been initiated. */
286 #define CANCELING_BIT 2
287 #define CANCELING_BITMASK (1 << CANCELING_BIT)
288 /* Bit set if canceled. */
289 #define CANCELED_BIT 3
290 #define CANCELED_BITMASK (1 << CANCELED_BIT)
291 /* Bit set if thread is exiting. */
292 #define EXITING_BIT 4
293 #define EXITING_BITMASK (1 << EXITING_BIT)
294 /* Bit set if thread terminated and TCB is freed. */
295 #define TERMINATED_BIT 5
296 #define TERMINATED_BITMASK (1 << TERMINATED_BIT)
297 /* Bit set if thread is supposed to change XID. */
299 #define SETXID_BITMASK (1 << SETXID_BIT)
301 /* Flags. Including those copied from the thread attribute. */
304 /* We allocate one block of references here. This should be enough
305 to avoid allocating any memory dynamically for most applications. */
306 struct pthread_key_data
308 /* Sequence number. We use uintptr_t to not require padding on
309 32- and 64-bit machines. On 64-bit machines it helps to avoid
315 } specific_1stblock
[PTHREAD_KEY_2NDLEVEL_SIZE
];
317 /* Two-level array for the thread-specific data. */
318 struct pthread_key_data
*specific
[PTHREAD_KEY_1STLEVEL_SIZE
];
320 /* Flag which is set when specific data is set. */
323 /* True if events must be reported. */
326 /* True if the user provided the stack. */
329 /* True if thread must stop at startup time. */
332 /* Indicate that a thread creation setup has failed (for instance the
333 scheduler or affinity). */
336 /* Lock to synchronize access to the descriptor. */
339 /* Lock for synchronizing setxid calls. */
340 unsigned int setxid_futex
;
342 /* If the thread waits to join another one the ID of the latter is
345 In case a thread is detached this field contains a pointer of the
346 TCB if the thread itself. This is something which cannot happen
347 in normal operation. */
348 struct pthread
*joinid
;
349 /* Check whether a thread is detached. */
350 #define IS_DETACHED(pd) ((pd)->joinid == (pd))
352 /* The result of the thread function. */
355 /* Scheduling parameters for the new thread. */
356 struct sched_param schedparam
;
359 /* Start position of the code to be executed and the argument passed
361 void *(*start_routine
) (void *);
365 td_eventbuf_t eventbuf
;
366 /* Next descriptor with a pending event. */
367 struct pthread
*nextevent
;
369 /* Machine-specific unwind info. */
370 struct _Unwind_Exception exc
;
372 /* If nonzero, pointer to the area allocated for the stack and guard. */
374 /* Size of the stackblock area including the guard. */
375 size_t stackblock_size
;
376 /* Size of the included guard area. */
378 /* This is what the user specified and what we will report. */
379 size_t reported_guardsize
;
381 /* Thread Priority Protection data. */
382 struct priority_protection_data
*tpp
;
384 /* Resolver state. */
385 struct __res_state res
;
387 /* Signal mask for the new thread. Used during thread startup to
388 restore the signal mask. (Threads are launched with all signals
390 internal_sigset_t sigmask
;
392 /* Used by the exception handling implementation in the dynamic loader. */
393 struct rtld_catch
*rtld_catch
;
395 /* Indicates whether is a C11 thread created by thrd_creat. */
398 /* Used in __pthread_kill_internal to detected a thread that has
399 exited or is about to exit. exit_lock must only be acquired
400 after blocking signals. */
402 int exit_lock
; /* A low-level lock (for use with __libc_lock_init etc). */
404 /* Used on strsignal. */
405 struct tls_internal_t tls_state
;
407 /* rseq area registered with the kernel. Use a custom definition
408 here to isolate from kernel struct rseq changes. The
409 implementation of sched_getcpu needs acccess to the cpu_id field;
410 the other fields are unused and not included here. */
415 uint32_t cpu_id_start
;
418 char pad
[32]; /* Original rseq area size. */
419 } rseq_area
__attribute__ ((aligned (32)));
421 /* Amount of end padding, if any, in this structure.
422 This definition relies on rseq_area being last. */
423 #define PTHREAD_STRUCT_END_PADDING \
424 (sizeof (struct pthread) - offsetof (struct pthread, rseq_area) \
425 + sizeof ((struct pthread) {}.rseq_area))
426 } __attribute ((aligned (TCB_ALIGNMENT
)));
429 cancel_enabled_and_canceled (int value
)
431 return (value
& (CANCELSTATE_BITMASK
| CANCELED_BITMASK
| EXITING_BITMASK
432 | TERMINATED_BITMASK
))
437 cancel_enabled_and_canceled_and_async (int value
)
439 return ((value
) & (CANCELSTATE_BITMASK
| CANCELTYPE_BITMASK
| CANCELED_BITMASK
440 | EXITING_BITMASK
| TERMINATED_BITMASK
))
441 == (CANCELTYPE_BITMASK
| CANCELED_BITMASK
);
444 /* This yields the pointer that TLS support code calls the thread pointer. */
446 # define TLS_TPADJ(pd) (pd)
448 # define TLS_TPADJ(pd) ((struct pthread *)((char *) (pd) + TLS_PRE_TCB_SIZE))