1 /* Copyright (C) 2002-2023 Free Software Foundation, Inc.
2 This file is part of the GNU C Library.
4 The GNU C Library is free software; you can redistribute it and/or
5 modify it under the terms of the GNU Lesser General Public
6 License as published by the Free Software Foundation; either
7 version 2.1 of the License, or (at your option) any later version.
9 The GNU C Library is distributed in the hope that it will be useful,
10 but WITHOUT ANY WARRANTY; without even the implied warranty of
11 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
12 Lesser General Public License for more details.
14 You should have received a copy of the GNU Lesser General Public
15 License along with the GNU C Library; if not, see
16 <https://www.gnu.org/licenses/>. */
25 #include <sys/param.h>
26 #include <dl-sysdep.h>
30 #include <lowlevellock.h>
31 #include <futex-internal.h>
32 #include <kernel-features.h>
33 #include <nptl-stack.h>
34 #include <libc-lock.h>
35 #include <tls-internal.h>
37 #include <setvmaname.h>
39 /* Default alignment of stack. */
41 # define STACK_ALIGN __alignof__ (long double)
44 /* Default value for minimal stack size after allocating thread
45 descriptor and guard. */
46 #ifndef MINIMAL_REST_STACK
47 # define MINIMAL_REST_STACK 4096
51 /* Newer kernels have the MAP_STACK flag to indicate a mapping is used for
52 a stack. Use it when possible. */
57 /* Get a stack frame from the cache. We have to match by size since
58 some blocks might be too small or far too large. */
59 static struct pthread
*
60 get_cached_stack (size_t *sizep
, void **memp
)
63 struct pthread
*result
= NULL
;
66 lll_lock (GL (dl_stack_cache_lock
), LLL_PRIVATE
);
68 /* Search the cache for a matching entry. We search for the
69 smallest stack which has at least the required size. Note that
70 in normal situations the size of all allocated stacks is the
71 same. As the very least there are only a few different sizes.
72 Therefore this loop will exit early most of the time with an
74 list_for_each (entry
, &GL (dl_stack_cache
))
78 curr
= list_entry (entry
, struct pthread
, list
);
79 if (__nptl_stack_in_use (curr
) && curr
->stackblock_size
>= size
)
81 if (curr
->stackblock_size
== size
)
88 || result
->stackblock_size
> curr
->stackblock_size
)
93 if (__builtin_expect (result
== NULL
, 0)
94 /* Make sure the size difference is not too excessive. In that
95 case we do not use the block. */
96 || __builtin_expect (result
->stackblock_size
> 4 * size
, 0))
98 /* Release the lock. */
99 lll_unlock (GL (dl_stack_cache_lock
), LLL_PRIVATE
);
104 /* Don't allow setxid until cloned. */
105 result
->setxid_futex
= -1;
107 /* Dequeue the entry. */
108 __nptl_stack_list_del (&result
->list
);
110 /* And add to the list of stacks in use. */
111 __nptl_stack_list_add (&result
->list
, &GL (dl_stack_used
));
113 /* And decrease the cache size. */
114 GL (dl_stack_cache_actsize
) -= result
->stackblock_size
;
116 /* Release the lock early. */
117 lll_unlock (GL (dl_stack_cache_lock
), LLL_PRIVATE
);
119 /* Report size and location of the stack to the caller. */
120 *sizep
= result
->stackblock_size
;
121 *memp
= result
->stackblock
;
123 /* Cancellation handling is back to the default. */
124 result
->cancelhandling
= 0;
125 result
->cleanup
= NULL
;
126 result
->setup_failed
= 0;
128 /* No pending event. */
129 result
->nextevent
= NULL
;
131 result
->exiting
= false;
132 __libc_lock_init (result
->exit_lock
);
133 memset (&result
->tls_state
, 0, sizeof result
->tls_state
);
136 dtv_t
*dtv
= GET_DTV (TLS_TPADJ (result
));
137 for (size_t cnt
= 0; cnt
< dtv
[-1].counter
; ++cnt
)
138 free (dtv
[1 + cnt
].pointer
.to_free
);
139 memset (dtv
, '\0', (dtv
[-1].counter
+ 1) * sizeof (dtv_t
));
141 /* Re-initialize the TLS. */
142 _dl_allocate_tls_init (TLS_TPADJ (result
), true);
147 /* Return the guard page position on allocated stack. */
149 __attribute ((always_inline
))
150 guard_position (void *mem
, size_t size
, size_t guardsize
, struct pthread
*pd
,
153 #ifdef NEED_SEPARATE_REGISTER_STACK
154 return mem
+ (((size
- guardsize
) / 2) & ~pagesize_m1
);
155 #elif _STACK_GROWS_DOWN
157 #elif _STACK_GROWS_UP
158 return (char *) (((uintptr_t) pd
- guardsize
) & ~pagesize_m1
);
162 /* Based on stack allocated with PROT_NONE, setup the required portions with
163 'prot' flags based on the guard page position. */
165 setup_stack_prot (char *mem
, size_t size
, char *guard
, size_t guardsize
,
168 char *guardend
= guard
+ guardsize
;
169 #if _STACK_GROWS_DOWN && !defined(NEED_SEPARATE_REGISTER_STACK)
170 /* As defined at guard_position, for architectures with downward stack
171 the guard page is always at start of the allocated area. */
172 if (__mprotect (guardend
, size
- guardsize
, prot
) != 0)
175 size_t mprots1
= (uintptr_t) guard
- (uintptr_t) mem
;
176 if (__mprotect (mem
, mprots1
, prot
) != 0)
178 size_t mprots2
= ((uintptr_t) mem
+ size
) - (uintptr_t) guardend
;
179 if (__mprotect (guardend
, mprots2
, prot
) != 0)
185 /* Mark the memory of the stack as usable to the kernel. It frees everything
186 except for the space used for the TCB itself. */
187 static __always_inline
void
188 advise_stack_range (void *mem
, size_t size
, uintptr_t pd
, size_t guardsize
)
190 uintptr_t sp
= (uintptr_t) CURRENT_STACK_FRAME
;
191 size_t pagesize_m1
= __getpagesize () - 1;
192 #if _STACK_GROWS_DOWN && !defined(NEED_SEPARATE_REGISTER_STACK)
193 size_t freesize
= (sp
- (uintptr_t) mem
) & ~pagesize_m1
;
194 assert (freesize
< size
);
195 if (freesize
> PTHREAD_STACK_MIN
)
196 __madvise (mem
, freesize
- PTHREAD_STACK_MIN
, MADV_DONTNEED
);
198 /* Page aligned start of memory to free (higher than or equal
199 to current sp plus the minimum stack size). */
200 uintptr_t freeblock
= (sp
+ PTHREAD_STACK_MIN
+ pagesize_m1
) & ~pagesize_m1
;
201 uintptr_t free_end
= (pd
- guardsize
) & ~pagesize_m1
;
202 if (free_end
> freeblock
)
204 size_t freesize
= free_end
- freeblock
;
205 assert (freesize
< size
);
206 __madvise ((void*) freeblock
, freesize
, MADV_DONTNEED
);
211 /* Returns a usable stack for a new thread either by allocating a
212 new stack or reusing a cached stack of sufficient size.
213 ATTR must be non-NULL and point to a valid pthread_attr.
214 PDP must be non-NULL. */
216 allocate_stack (const struct pthread_attr
*attr
, struct pthread
**pdp
,
217 void **stack
, size_t *stacksize
)
221 size_t pagesize_m1
= __getpagesize () - 1;
222 size_t tls_static_size_for_stack
= __nptl_tls_static_size_for_stack ();
223 size_t tls_static_align_m1
= GLRO (dl_tls_static_align
) - 1;
225 assert (powerof2 (pagesize_m1
+ 1));
226 assert (TCB_ALIGNMENT
>= STACK_ALIGN
);
228 /* Get the stack size from the attribute if it is set. Otherwise we
229 use the default we determined at start time. */
230 if (attr
->stacksize
!= 0)
231 size
= attr
->stacksize
;
234 lll_lock (__default_pthread_attr_lock
, LLL_PRIVATE
);
235 size
= __default_pthread_attr
.internal
.stacksize
;
236 lll_unlock (__default_pthread_attr_lock
, LLL_PRIVATE
);
239 /* Get memory for the stack. */
240 if (__glibc_unlikely (attr
->flags
& ATTR_FLAG_STACKADDR
))
243 char *stackaddr
= (char *) attr
->stackaddr
;
245 /* Assume the same layout as the _STACK_GROWS_DOWN case, with struct
246 pthread at the top of the stack block. Later we adjust the guard
247 location and stack address to match the _STACK_GROWS_UP case. */
249 stackaddr
+= attr
->stacksize
;
251 /* If the user also specified the size of the stack make sure it
253 if (attr
->stacksize
!= 0
254 && attr
->stacksize
< (tls_static_size_for_stack
255 + MINIMAL_REST_STACK
))
258 /* Adjust stack size for alignment of the TLS block. */
260 adj
= ((uintptr_t) stackaddr
- TLS_TCB_SIZE
)
261 & tls_static_align_m1
;
262 assert (size
> adj
+ TLS_TCB_SIZE
);
264 adj
= ((uintptr_t) stackaddr
- tls_static_size_for_stack
)
265 & tls_static_align_m1
;
269 /* The user provided some memory. Let's hope it matches the
270 size... We do not allocate guard pages if the user provided
271 the stack. It is the user's responsibility to do this if it
274 pd
= (struct pthread
*) ((uintptr_t) stackaddr
275 - TLS_TCB_SIZE
- adj
);
277 pd
= (struct pthread
*) (((uintptr_t) stackaddr
278 - tls_static_size_for_stack
- adj
)
282 /* The user provided stack memory needs to be cleared. */
283 memset (pd
, '\0', sizeof (struct pthread
));
285 /* The first TSD block is included in the TCB. */
286 pd
->specific
[0] = pd
->specific_1stblock
;
288 /* Remember the stack-related values. */
289 pd
->stackblock
= (char *) stackaddr
- size
;
290 pd
->stackblock_size
= size
;
292 /* This is a user-provided stack. It will not be queued in the
293 stack cache nor will the memory (except the TLS memory) be freed. */
294 pd
->user_stack
= true;
296 /* This is at least the second thread. */
297 pd
->header
.multiple_threads
= 1;
299 #ifdef NEED_DL_SYSINFO
300 SETUP_THREAD_SYSINFO (pd
);
303 /* Don't allow setxid until cloned. */
304 pd
->setxid_futex
= -1;
306 /* Allocate the DTV for this thread. */
307 if (_dl_allocate_tls (TLS_TPADJ (pd
)) == NULL
)
309 /* Something went wrong. */
310 assert (errno
== ENOMEM
);
315 /* Prepare to modify global data. */
316 lll_lock (GL (dl_stack_cache_lock
), LLL_PRIVATE
);
318 /* And add to the list of stacks in use. */
319 list_add (&pd
->list
, &GL (dl_stack_user
));
321 lll_unlock (GL (dl_stack_cache_lock
), LLL_PRIVATE
);
325 /* Allocate some anonymous memory. If possible use the cache. */
327 size_t reported_guardsize
;
330 const int prot
= (PROT_READ
| PROT_WRITE
331 | ((GL(dl_stack_flags
) & PF_X
) ? PROT_EXEC
: 0));
333 /* Adjust the stack size for alignment. */
334 size
&= ~tls_static_align_m1
;
337 /* Make sure the size of the stack is enough for the guard and
338 eventually the thread descriptor. On some targets there is
339 a minimum guard size requirement, ARCH_MIN_GUARD_SIZE, so
340 internally enforce it (unless the guard was disabled), but
341 report the original guard size for backward compatibility:
342 before POSIX 2008 the guardsize was specified to be one page
343 by default which is observable via pthread_attr_getguardsize
344 and pthread_getattr_np. */
345 guardsize
= (attr
->guardsize
+ pagesize_m1
) & ~pagesize_m1
;
346 reported_guardsize
= guardsize
;
347 if (guardsize
> 0 && guardsize
< ARCH_MIN_GUARD_SIZE
)
348 guardsize
= ARCH_MIN_GUARD_SIZE
;
349 if (guardsize
< attr
->guardsize
|| size
+ guardsize
< guardsize
)
350 /* Arithmetic overflow. */
353 if (__builtin_expect (size
< ((guardsize
+ tls_static_size_for_stack
354 + MINIMAL_REST_STACK
+ pagesize_m1
)
357 /* The stack is too small (or the guard too large). */
360 /* Try to get a stack from the cache. */
362 pd
= get_cached_stack (&size
, &mem
);
365 /* If a guard page is required, avoid committing memory by first
366 allocate with PROT_NONE and then reserve with required permission
367 excluding the guard page. */
368 mem
= __mmap (NULL
, size
, (guardsize
== 0) ? prot
: PROT_NONE
,
369 MAP_PRIVATE
| MAP_ANONYMOUS
| MAP_STACK
, -1, 0);
371 if (__glibc_unlikely (mem
== MAP_FAILED
))
374 /* Do madvise in case the tunable glibc.pthread.stack_hugetlb is
375 set to 0, disabling hugetlb. */
376 if (__glibc_unlikely (__nptl_stack_hugetlb
== 0)
377 && __madvise (mem
, size
, MADV_NOHUGEPAGE
) != 0)
380 /* SIZE is guaranteed to be greater than zero.
381 So we can never get a null pointer back from mmap. */
382 assert (mem
!= NULL
);
384 /* Place the thread descriptor at the end of the stack. */
386 pd
= (struct pthread
*) ((((uintptr_t) mem
+ size
)
388 & ~tls_static_align_m1
);
390 pd
= (struct pthread
*) ((((uintptr_t) mem
+ size
391 - tls_static_size_for_stack
)
392 & ~tls_static_align_m1
)
396 /* Now mprotect the required region excluding the guard area. */
397 if (__glibc_likely (guardsize
> 0))
399 char *guard
= guard_position (mem
, size
, guardsize
, pd
,
401 if (setup_stack_prot (mem
, size
, guard
, guardsize
, prot
) != 0)
403 __munmap (mem
, size
);
408 /* Remember the stack-related values. */
409 pd
->stackblock
= mem
;
410 pd
->stackblock_size
= size
;
411 /* Update guardsize for newly allocated guardsize to avoid
412 an mprotect in guard resize below. */
413 pd
->guardsize
= guardsize
;
415 /* We allocated the first block thread-specific data array.
416 This address will not change for the lifetime of this
418 pd
->specific
[0] = pd
->specific_1stblock
;
420 /* This is at least the second thread. */
421 pd
->header
.multiple_threads
= 1;
423 #ifdef NEED_DL_SYSINFO
424 SETUP_THREAD_SYSINFO (pd
);
427 /* Don't allow setxid until cloned. */
428 pd
->setxid_futex
= -1;
430 /* Allocate the DTV for this thread. */
431 if (_dl_allocate_tls (TLS_TPADJ (pd
)) == NULL
)
433 /* Something went wrong. */
434 assert (errno
== ENOMEM
);
436 /* Free the stack memory we just allocated. */
437 (void) __munmap (mem
, size
);
443 /* Prepare to modify global data. */
444 lll_lock (GL (dl_stack_cache_lock
), LLL_PRIVATE
);
446 /* And add to the list of stacks in use. */
447 __nptl_stack_list_add (&pd
->list
, &GL (dl_stack_used
));
449 lll_unlock (GL (dl_stack_cache_lock
), LLL_PRIVATE
);
452 /* There might have been a race. Another thread might have
453 caused the stacks to get exec permission while this new
454 stack was prepared. Detect if this was possible and
455 change the permission if necessary. */
456 if (__builtin_expect ((GL(dl_stack_flags
) & PF_X
) != 0
457 && (prot
& PROT_EXEC
) == 0, 0))
459 int err
= __nptl_change_stack_perm (pd
);
462 /* Free the stack memory we just allocated. */
463 (void) __munmap (mem
, size
);
470 /* Note that all of the stack and the thread descriptor is
471 zeroed. This means we do not have to initialize fields
472 with initial value zero. This is specifically true for
473 the 'tid' field which is always set back to zero once the
474 stack is not used anymore and for the 'guardsize' field
475 which will be read next. */
478 /* Create or resize the guard area if necessary. */
479 if (__glibc_unlikely (guardsize
> pd
->guardsize
))
481 char *guard
= guard_position (mem
, size
, guardsize
, pd
,
483 if (__mprotect (guard
, guardsize
, PROT_NONE
) != 0)
486 lll_lock (GL (dl_stack_cache_lock
), LLL_PRIVATE
);
488 /* Remove the thread from the list. */
489 __nptl_stack_list_del (&pd
->list
);
491 lll_unlock (GL (dl_stack_cache_lock
), LLL_PRIVATE
);
493 /* Get rid of the TLS block we allocated. */
494 _dl_deallocate_tls (TLS_TPADJ (pd
), false);
496 /* Free the stack memory regardless of whether the size
497 of the cache is over the limit or not. If this piece
498 of memory caused problems we better do not use it
499 anymore. Uh, and we ignore possible errors. There
500 is nothing we could do. */
501 (void) __munmap (mem
, size
);
506 pd
->guardsize
= guardsize
;
508 else if (__builtin_expect (pd
->guardsize
- guardsize
> size
- reqsize
,
511 /* The old guard area is too large. */
513 #ifdef NEED_SEPARATE_REGISTER_STACK
514 char *guard
= mem
+ (((size
- guardsize
) / 2) & ~pagesize_m1
);
515 char *oldguard
= mem
+ (((size
- pd
->guardsize
) / 2) & ~pagesize_m1
);
518 && __mprotect (oldguard
, guard
- oldguard
, prot
) != 0)
521 if (__mprotect (guard
+ guardsize
,
522 oldguard
+ pd
->guardsize
- guard
- guardsize
,
525 #elif _STACK_GROWS_DOWN
526 if (__mprotect ((char *) mem
+ guardsize
, pd
->guardsize
- guardsize
,
529 #elif _STACK_GROWS_UP
530 char *new_guard
= (char *)(((uintptr_t) pd
- guardsize
)
532 char *old_guard
= (char *)(((uintptr_t) pd
- pd
->guardsize
)
534 /* The guard size difference might be > 0, but once rounded
535 to the nearest page the size difference might be zero. */
536 if (new_guard
> old_guard
537 && __mprotect (old_guard
, new_guard
- old_guard
, prot
) != 0)
541 pd
->guardsize
= guardsize
;
543 /* The pthread_getattr_np() calls need to get passed the size
544 requested in the attribute, regardless of how large the
545 actually used guardsize is. */
546 pd
->reported_guardsize
= reported_guardsize
;
549 /* Initialize the lock. We have to do this unconditionally since the
550 stillborn thread could be canceled while the lock is taken. */
551 pd
->lock
= LLL_LOCK_INITIALIZER
;
553 /* The robust mutex lists also need to be initialized
554 unconditionally because the cleanup for the previous stack owner
555 might have happened in the kernel. */
556 pd
->robust_head
.futex_offset
= (offsetof (pthread_mutex_t
, __data
.__lock
)
557 - offsetof (pthread_mutex_t
,
558 __data
.__list
.__next
));
559 pd
->robust_head
.list_op_pending
= NULL
;
560 #if __PTHREAD_MUTEX_HAVE_PREV
561 pd
->robust_prev
= &pd
->robust_head
;
563 pd
->robust_head
.list
= &pd
->robust_head
;
565 /* We place the thread descriptor at the end of the stack. */
571 /* The stack begins before the TCB and the static TLS block. */
572 stacktop
= ((char *) (pd
+ 1) - tls_static_size_for_stack
);
574 stacktop
= (char *) (pd
- 1);
577 *stacksize
= stacktop
- pd
->stackblock
;
578 *stack
= pd
->stackblock
;
583 /* Maximum supported name from initial kernel support, not exported
585 #define ANON_VMA_NAME_MAX_LEN 80
587 #define SET_STACK_NAME(__prefix, __stack, __stacksize, __tid) \
589 char __stack_name[sizeof (__prefix) + \
590 INT_BUFSIZE_BOUND (unsigned int)]; \
591 _Static_assert (sizeof __stack_name <= ANON_VMA_NAME_MAX_LEN, \
592 "VMA name size larger than maximum supported"); \
593 __snprintf (__stack_name, sizeof (__stack_name), __prefix "%u", \
594 (unsigned int) __tid); \
595 __set_vma_name (__stack, __stacksize, __stack_name); \
598 /* Add or remove an associated name to the PD VMA stack. */
600 name_stack_maps (struct pthread
*pd
, bool set
)
602 #if _STACK_GROWS_DOWN && !defined(NEED_SEPARATE_REGISTER_STACK)
603 void *stack
= pd
->stackblock
+ pd
->guardsize
;
605 void *stack
= pd
->stackblock
;
607 size_t stacksize
= pd
->stackblock_size
- pd
->guardsize
;
610 __set_vma_name (stack
, stacksize
, NULL
);
613 unsigned int tid
= pd
->tid
;
615 SET_STACK_NAME (" glibc: pthread user stack: ", stack
, stacksize
, tid
);
617 SET_STACK_NAME (" glibc: pthread stack: ", stack
, stacksize
, tid
);