1 /* Copyright (C) 2002-2019 Free Software Foundation, Inc.
2 This file is part of the GNU C Library.
3 Contributed by Ulrich Drepper <drepper@redhat.com>, 2002.
5 The GNU C Library is free software; you can redistribute it and/or
6 modify it under the terms of the GNU Lesser General Public
7 License as published by the Free Software Foundation; either
8 version 2.1 of the License, or (at your option) any later version.
10 The GNU C Library is distributed in the hope that it will be useful,
11 but WITHOUT ANY WARRANTY; without even the implied warranty of
12 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 Lesser General Public License for more details.
15 You should have received a copy of the GNU Lesser General Public
16 License along with the GNU C Library; if not, see
17 <https://www.gnu.org/licenses/>. */
26 #include <sys/param.h>
27 #include <dl-sysdep.h>
31 #include <lowlevellock.h>
32 #include <futex-internal.h>
33 #include <kernel-features.h>
34 #include <stack-aliasing.h>
37 #ifndef NEED_SEPARATE_REGISTER_STACK
39 /* Most architectures have exactly one stack pointer. Some have more. */
40 # define STACK_VARIABLES void *stackaddr = NULL
42 /* How to pass the values to the 'create_thread' function. */
43 # define STACK_VARIABLES_ARGS stackaddr
45 /* How to declare function which gets there parameters. */
46 # define STACK_VARIABLES_PARMS void *stackaddr
48 /* How to declare allocate_stack. */
49 # define ALLOCATE_STACK_PARMS void **stack
51 /* This is how the function is called. We do it this way to allow
52 other variants of the function to have more parameters. */
53 # define ALLOCATE_STACK(attr, pd) allocate_stack (attr, pd, &stackaddr)
57 /* We need two stacks. The kernel will place them but we have to tell
58 the kernel about the size of the reserved address space. */
59 # define STACK_VARIABLES void *stackaddr = NULL; size_t stacksize = 0
61 /* How to pass the values to the 'create_thread' function. */
62 # define STACK_VARIABLES_ARGS stackaddr, stacksize
64 /* How to declare function which gets there parameters. */
65 # define STACK_VARIABLES_PARMS void *stackaddr, size_t stacksize
67 /* How to declare allocate_stack. */
68 # define ALLOCATE_STACK_PARMS void **stack, size_t *stacksize
70 /* This is how the function is called. We do it this way to allow
71 other variants of the function to have more parameters. */
72 # define ALLOCATE_STACK(attr, pd) \
73 allocate_stack (attr, pd, &stackaddr, &stacksize)
78 /* Default alignment of stack. */
80 # define STACK_ALIGN __alignof__ (long double)
83 /* Default value for minimal stack size after allocating thread
84 descriptor and guard. */
85 #ifndef MINIMAL_REST_STACK
86 # define MINIMAL_REST_STACK 4096
90 /* Newer kernels have the MAP_STACK flag to indicate a mapping is used for
91 a stack. Use it when possible. */
96 /* This yields the pointer that TLS support code calls the thread pointer. */
98 # define TLS_TPADJ(pd) (pd)
100 # define TLS_TPADJ(pd) ((struct pthread *)((char *) (pd) + TLS_PRE_TCB_SIZE))
103 /* Cache handling for not-yet free stacks. */
105 /* Maximum size in kB of cache. */
106 static size_t stack_cache_maxsize
= 40 * 1024 * 1024; /* 40MiBi by default. */
107 static size_t stack_cache_actsize
;
109 /* Mutex protecting this variable. */
110 static int stack_cache_lock
= LLL_LOCK_INITIALIZER
;
112 /* List of queued stack frames. */
113 static LIST_HEAD (stack_cache
);
115 /* List of the stacks in use. */
116 static LIST_HEAD (stack_used
);
118 /* We need to record what list operations we are going to do so that,
119 in case of an asynchronous interruption due to a fork() call, we
120 can correct for the work. */
121 static uintptr_t in_flight_stack
;
123 /* List of the threads with user provided stacks in use. No need to
124 initialize this, since it's done in __pthread_initialize_minimal. */
125 list_t __stack_user
__attribute__ ((nocommon
));
126 hidden_data_def (__stack_user
)
129 /* Check whether the stack is still used or not. */
130 #define FREE_P(descr) ((descr)->tid <= 0)
134 stack_list_del (list_t
*elem
)
136 in_flight_stack
= (uintptr_t) elem
;
138 atomic_write_barrier ();
142 atomic_write_barrier ();
149 stack_list_add (list_t
*elem
, list_t
*list
)
151 in_flight_stack
= (uintptr_t) elem
| 1;
153 atomic_write_barrier ();
155 list_add (elem
, list
);
157 atomic_write_barrier ();
163 /* We create a double linked list of all cache entries. Double linked
164 because this allows removing entries from the end. */
167 /* Get a stack frame from the cache. We have to match by size since
168 some blocks might be too small or far too large. */
169 static struct pthread
*
170 get_cached_stack (size_t *sizep
, void **memp
)
172 size_t size
= *sizep
;
173 struct pthread
*result
= NULL
;
176 lll_lock (stack_cache_lock
, LLL_PRIVATE
);
178 /* Search the cache for a matching entry. We search for the
179 smallest stack which has at least the required size. Note that
180 in normal situations the size of all allocated stacks is the
181 same. As the very least there are only a few different sizes.
182 Therefore this loop will exit early most of the time with an
184 list_for_each (entry
, &stack_cache
)
186 struct pthread
*curr
;
188 curr
= list_entry (entry
, struct pthread
, list
);
189 if (FREE_P (curr
) && curr
->stackblock_size
>= size
)
191 if (curr
->stackblock_size
== size
)
198 || result
->stackblock_size
> curr
->stackblock_size
)
203 if (__builtin_expect (result
== NULL
, 0)
204 /* Make sure the size difference is not too excessive. In that
205 case we do not use the block. */
206 || __builtin_expect (result
->stackblock_size
> 4 * size
, 0))
208 /* Release the lock. */
209 lll_unlock (stack_cache_lock
, LLL_PRIVATE
);
214 /* Don't allow setxid until cloned. */
215 result
->setxid_futex
= -1;
217 /* Dequeue the entry. */
218 stack_list_del (&result
->list
);
220 /* And add to the list of stacks in use. */
221 stack_list_add (&result
->list
, &stack_used
);
223 /* And decrease the cache size. */
224 stack_cache_actsize
-= result
->stackblock_size
;
226 /* Release the lock early. */
227 lll_unlock (stack_cache_lock
, LLL_PRIVATE
);
229 /* Report size and location of the stack to the caller. */
230 *sizep
= result
->stackblock_size
;
231 *memp
= result
->stackblock
;
233 /* Cancellation handling is back to the default. */
234 result
->cancelhandling
= 0;
235 result
->cleanup
= NULL
;
237 /* No pending event. */
238 result
->nextevent
= NULL
;
241 dtv_t
*dtv
= GET_DTV (TLS_TPADJ (result
));
242 for (size_t cnt
= 0; cnt
< dtv
[-1].counter
; ++cnt
)
243 free (dtv
[1 + cnt
].pointer
.to_free
);
244 memset (dtv
, '\0', (dtv
[-1].counter
+ 1) * sizeof (dtv_t
));
246 /* Re-initialize the TLS. */
247 _dl_allocate_tls_init (TLS_TPADJ (result
));
253 /* Free stacks until cache size is lower than LIMIT. */
255 free_stacks (size_t limit
)
257 /* We reduce the size of the cache. Remove the last entries until
258 the size is below the limit. */
262 /* Search from the end of the list. */
263 list_for_each_prev_safe (entry
, prev
, &stack_cache
)
265 struct pthread
*curr
;
267 curr
= list_entry (entry
, struct pthread
, list
);
270 /* Unlink the block. */
271 stack_list_del (entry
);
273 /* Account for the freed memory. */
274 stack_cache_actsize
-= curr
->stackblock_size
;
276 /* Free the memory associated with the ELF TLS. */
277 _dl_deallocate_tls (TLS_TPADJ (curr
), false);
279 /* Remove this block. This should never fail. If it does
280 something is really wrong. */
281 if (__munmap (curr
->stackblock
, curr
->stackblock_size
) != 0)
284 /* Maybe we have freed enough. */
285 if (stack_cache_actsize
<= limit
)
291 /* Free all the stacks on cleanup. */
293 __nptl_stacks_freeres (void)
298 /* Add a stack frame which is not used anymore to the stack. Must be
299 called with the cache lock held. */
301 __attribute ((always_inline
))
302 queue_stack (struct pthread
*stack
)
304 /* We unconditionally add the stack to the list. The memory may
305 still be in use but it will not be reused until the kernel marks
306 the stack as not used anymore. */
307 stack_list_add (&stack
->list
, &stack_cache
);
309 stack_cache_actsize
+= stack
->stackblock_size
;
310 if (__glibc_unlikely (stack_cache_actsize
> stack_cache_maxsize
))
311 free_stacks (stack_cache_maxsize
);
316 change_stack_perm (struct pthread
*pd
317 #ifdef NEED_SEPARATE_REGISTER_STACK
322 #ifdef NEED_SEPARATE_REGISTER_STACK
323 void *stack
= (pd
->stackblock
324 + (((((pd
->stackblock_size
- pd
->guardsize
) / 2)
325 & pagemask
) + pd
->guardsize
) & pagemask
));
326 size_t len
= pd
->stackblock
+ pd
->stackblock_size
- stack
;
327 #elif _STACK_GROWS_DOWN
328 void *stack
= pd
->stackblock
+ pd
->guardsize
;
329 size_t len
= pd
->stackblock_size
- pd
->guardsize
;
330 #elif _STACK_GROWS_UP
331 void *stack
= pd
->stackblock
;
332 size_t len
= (uintptr_t) pd
- pd
->guardsize
- (uintptr_t) pd
->stackblock
;
334 # error "Define either _STACK_GROWS_DOWN or _STACK_GROWS_UP"
336 if (__mprotect (stack
, len
, PROT_READ
| PROT_WRITE
| PROT_EXEC
) != 0)
342 /* Return the guard page position on allocated stack. */
344 __attribute ((always_inline
))
345 guard_position (void *mem
, size_t size
, size_t guardsize
, struct pthread
*pd
,
348 #ifdef NEED_SEPARATE_REGISTER_STACK
349 return mem
+ (((size
- guardsize
) / 2) & ~pagesize_m1
);
350 #elif _STACK_GROWS_DOWN
352 #elif _STACK_GROWS_UP
353 return (char *) (((uintptr_t) pd
- guardsize
) & ~pagesize_m1
);
357 /* Based on stack allocated with PROT_NONE, setup the required portions with
358 'prot' flags based on the guard page position. */
360 setup_stack_prot (char *mem
, size_t size
, char *guard
, size_t guardsize
,
363 char *guardend
= guard
+ guardsize
;
364 #if _STACK_GROWS_DOWN && !defined(NEED_SEPARATE_REGISTER_STACK)
365 /* As defined at guard_position, for architectures with downward stack
366 the guard page is always at start of the allocated area. */
367 if (__mprotect (guardend
, size
- guardsize
, prot
) != 0)
370 size_t mprots1
= (uintptr_t) guard
- (uintptr_t) mem
;
371 if (__mprotect (mem
, mprots1
, prot
) != 0)
373 size_t mprots2
= ((uintptr_t) mem
+ size
) - (uintptr_t) guardend
;
374 if (__mprotect (guardend
, mprots2
, prot
) != 0)
380 /* Mark the memory of the stack as usable to the kernel. It frees everything
381 except for the space used for the TCB itself. */
382 static __always_inline
void
383 advise_stack_range (void *mem
, size_t size
, uintptr_t pd
, size_t guardsize
)
385 uintptr_t sp
= (uintptr_t) CURRENT_STACK_FRAME
;
386 size_t pagesize_m1
= __getpagesize () - 1;
387 #if _STACK_GROWS_DOWN && !defined(NEED_SEPARATE_REGISTER_STACK)
388 size_t freesize
= (sp
- (uintptr_t) mem
) & ~pagesize_m1
;
389 assert (freesize
< size
);
390 if (freesize
> PTHREAD_STACK_MIN
)
391 __madvise (mem
, freesize
- PTHREAD_STACK_MIN
, MADV_DONTNEED
);
393 /* Page aligned start of memory to free (higher than or equal
394 to current sp plus the minimum stack size). */
395 uintptr_t freeblock
= (sp
+ PTHREAD_STACK_MIN
+ pagesize_m1
) & ~pagesize_m1
;
396 uintptr_t free_end
= (pd
- guardsize
) & ~pagesize_m1
;
397 if (free_end
> freeblock
)
399 size_t freesize
= free_end
- freeblock
;
400 assert (freesize
< size
);
401 __madvise ((void*) freeblock
, freesize
, MADV_DONTNEED
);
406 /* Returns a usable stack for a new thread either by allocating a
407 new stack or reusing a cached stack of sufficient size.
408 ATTR must be non-NULL and point to a valid pthread_attr.
409 PDP must be non-NULL. */
411 allocate_stack (const struct pthread_attr
*attr
, struct pthread
**pdp
,
412 ALLOCATE_STACK_PARMS
)
416 size_t pagesize_m1
= __getpagesize () - 1;
418 assert (powerof2 (pagesize_m1
+ 1));
419 assert (TCB_ALIGNMENT
>= STACK_ALIGN
);
421 /* Get the stack size from the attribute if it is set. Otherwise we
422 use the default we determined at start time. */
423 if (attr
->stacksize
!= 0)
424 size
= attr
->stacksize
;
427 lll_lock (__default_pthread_attr_lock
, LLL_PRIVATE
);
428 size
= __default_pthread_attr
.stacksize
;
429 lll_unlock (__default_pthread_attr_lock
, LLL_PRIVATE
);
432 /* Get memory for the stack. */
433 if (__glibc_unlikely (attr
->flags
& ATTR_FLAG_STACKADDR
))
436 char *stackaddr
= (char *) attr
->stackaddr
;
438 /* Assume the same layout as the _STACK_GROWS_DOWN case, with struct
439 pthread at the top of the stack block. Later we adjust the guard
440 location and stack address to match the _STACK_GROWS_UP case. */
442 stackaddr
+= attr
->stacksize
;
444 /* If the user also specified the size of the stack make sure it
446 if (attr
->stacksize
!= 0
447 && attr
->stacksize
< (__static_tls_size
+ MINIMAL_REST_STACK
))
450 /* Adjust stack size for alignment of the TLS block. */
452 adj
= ((uintptr_t) stackaddr
- TLS_TCB_SIZE
)
453 & __static_tls_align_m1
;
454 assert (size
> adj
+ TLS_TCB_SIZE
);
456 adj
= ((uintptr_t) stackaddr
- __static_tls_size
)
457 & __static_tls_align_m1
;
461 /* The user provided some memory. Let's hope it matches the
462 size... We do not allocate guard pages if the user provided
463 the stack. It is the user's responsibility to do this if it
466 pd
= (struct pthread
*) ((uintptr_t) stackaddr
467 - TLS_TCB_SIZE
- adj
);
469 pd
= (struct pthread
*) (((uintptr_t) stackaddr
470 - __static_tls_size
- adj
)
474 /* The user provided stack memory needs to be cleared. */
475 memset (pd
, '\0', sizeof (struct pthread
));
477 /* The first TSD block is included in the TCB. */
478 pd
->specific
[0] = pd
->specific_1stblock
;
480 /* Remember the stack-related values. */
481 pd
->stackblock
= (char *) stackaddr
- size
;
482 pd
->stackblock_size
= size
;
484 /* This is a user-provided stack. It will not be queued in the
485 stack cache nor will the memory (except the TLS memory) be freed. */
486 pd
->user_stack
= true;
488 /* This is at least the second thread. */
489 pd
->header
.multiple_threads
= 1;
490 #ifndef TLS_MULTIPLE_THREADS_IN_TCB
491 __pthread_multiple_threads
= *__libc_multiple_threads_ptr
= 1;
494 #ifdef NEED_DL_SYSINFO
495 SETUP_THREAD_SYSINFO (pd
);
498 /* Don't allow setxid until cloned. */
499 pd
->setxid_futex
= -1;
501 /* Allocate the DTV for this thread. */
502 if (_dl_allocate_tls (TLS_TPADJ (pd
)) == NULL
)
504 /* Something went wrong. */
505 assert (errno
== ENOMEM
);
510 /* Prepare to modify global data. */
511 lll_lock (stack_cache_lock
, LLL_PRIVATE
);
513 /* And add to the list of stacks in use. */
514 list_add (&pd
->list
, &__stack_user
);
516 lll_unlock (stack_cache_lock
, LLL_PRIVATE
);
520 /* Allocate some anonymous memory. If possible use the cache. */
524 const int prot
= (PROT_READ
| PROT_WRITE
525 | ((GL(dl_stack_flags
) & PF_X
) ? PROT_EXEC
: 0));
527 /* Adjust the stack size for alignment. */
528 size
&= ~__static_tls_align_m1
;
531 /* Make sure the size of the stack is enough for the guard and
532 eventually the thread descriptor. */
533 guardsize
= (attr
->guardsize
+ pagesize_m1
) & ~pagesize_m1
;
534 if (guardsize
< attr
->guardsize
|| size
+ guardsize
< guardsize
)
535 /* Arithmetic overflow. */
538 if (__builtin_expect (size
< ((guardsize
+ __static_tls_size
539 + MINIMAL_REST_STACK
+ pagesize_m1
)
542 /* The stack is too small (or the guard too large). */
545 /* Try to get a stack from the cache. */
547 pd
= get_cached_stack (&size
, &mem
);
550 /* To avoid aliasing effects on a larger scale than pages we
551 adjust the allocated stack size if necessary. This way
552 allocations directly following each other will not have
553 aliasing problems. */
554 #if MULTI_PAGE_ALIASING != 0
555 if ((size
% MULTI_PAGE_ALIASING
) == 0)
556 size
+= pagesize_m1
+ 1;
559 /* If a guard page is required, avoid committing memory by first
560 allocate with PROT_NONE and then reserve with required permission
561 excluding the guard page. */
562 mem
= __mmap (NULL
, size
, (guardsize
== 0) ? prot
: PROT_NONE
,
563 MAP_PRIVATE
| MAP_ANONYMOUS
| MAP_STACK
, -1, 0);
565 if (__glibc_unlikely (mem
== MAP_FAILED
))
568 /* SIZE is guaranteed to be greater than zero.
569 So we can never get a null pointer back from mmap. */
570 assert (mem
!= NULL
);
572 /* Place the thread descriptor at the end of the stack. */
574 pd
= (struct pthread
*) ((((uintptr_t) mem
+ size
)
576 & ~__static_tls_align_m1
);
578 pd
= (struct pthread
*) ((((uintptr_t) mem
+ size
580 & ~__static_tls_align_m1
)
584 /* Now mprotect the required region excluding the guard area. */
585 if (__glibc_likely (guardsize
> 0))
587 char *guard
= guard_position (mem
, size
, guardsize
, pd
,
589 if (setup_stack_prot (mem
, size
, guard
, guardsize
, prot
) != 0)
591 __munmap (mem
, size
);
596 /* Remember the stack-related values. */
597 pd
->stackblock
= mem
;
598 pd
->stackblock_size
= size
;
599 /* Update guardsize for newly allocated guardsize to avoid
600 an mprotect in guard resize below. */
601 pd
->guardsize
= guardsize
;
603 /* We allocated the first block thread-specific data array.
604 This address will not change for the lifetime of this
606 pd
->specific
[0] = pd
->specific_1stblock
;
608 /* This is at least the second thread. */
609 pd
->header
.multiple_threads
= 1;
610 #ifndef TLS_MULTIPLE_THREADS_IN_TCB
611 __pthread_multiple_threads
= *__libc_multiple_threads_ptr
= 1;
614 #ifdef NEED_DL_SYSINFO
615 SETUP_THREAD_SYSINFO (pd
);
618 /* Don't allow setxid until cloned. */
619 pd
->setxid_futex
= -1;
621 /* Allocate the DTV for this thread. */
622 if (_dl_allocate_tls (TLS_TPADJ (pd
)) == NULL
)
624 /* Something went wrong. */
625 assert (errno
== ENOMEM
);
627 /* Free the stack memory we just allocated. */
628 (void) __munmap (mem
, size
);
634 /* Prepare to modify global data. */
635 lll_lock (stack_cache_lock
, LLL_PRIVATE
);
637 /* And add to the list of stacks in use. */
638 stack_list_add (&pd
->list
, &stack_used
);
640 lll_unlock (stack_cache_lock
, LLL_PRIVATE
);
643 /* There might have been a race. Another thread might have
644 caused the stacks to get exec permission while this new
645 stack was prepared. Detect if this was possible and
646 change the permission if necessary. */
647 if (__builtin_expect ((GL(dl_stack_flags
) & PF_X
) != 0
648 && (prot
& PROT_EXEC
) == 0, 0))
650 int err
= change_stack_perm (pd
651 #ifdef NEED_SEPARATE_REGISTER_STACK
657 /* Free the stack memory we just allocated. */
658 (void) __munmap (mem
, size
);
665 /* Note that all of the stack and the thread descriptor is
666 zeroed. This means we do not have to initialize fields
667 with initial value zero. This is specifically true for
668 the 'tid' field which is always set back to zero once the
669 stack is not used anymore and for the 'guardsize' field
670 which will be read next. */
673 /* Create or resize the guard area if necessary. */
674 if (__glibc_unlikely (guardsize
> pd
->guardsize
))
676 char *guard
= guard_position (mem
, size
, guardsize
, pd
,
678 if (__mprotect (guard
, guardsize
, PROT_NONE
) != 0)
681 lll_lock (stack_cache_lock
, LLL_PRIVATE
);
683 /* Remove the thread from the list. */
684 stack_list_del (&pd
->list
);
686 lll_unlock (stack_cache_lock
, LLL_PRIVATE
);
688 /* Get rid of the TLS block we allocated. */
689 _dl_deallocate_tls (TLS_TPADJ (pd
), false);
691 /* Free the stack memory regardless of whether the size
692 of the cache is over the limit or not. If this piece
693 of memory caused problems we better do not use it
694 anymore. Uh, and we ignore possible errors. There
695 is nothing we could do. */
696 (void) __munmap (mem
, size
);
701 pd
->guardsize
= guardsize
;
703 else if (__builtin_expect (pd
->guardsize
- guardsize
> size
- reqsize
,
706 /* The old guard area is too large. */
708 #ifdef NEED_SEPARATE_REGISTER_STACK
709 char *guard
= mem
+ (((size
- guardsize
) / 2) & ~pagesize_m1
);
710 char *oldguard
= mem
+ (((size
- pd
->guardsize
) / 2) & ~pagesize_m1
);
713 && __mprotect (oldguard
, guard
- oldguard
, prot
) != 0)
716 if (__mprotect (guard
+ guardsize
,
717 oldguard
+ pd
->guardsize
- guard
- guardsize
,
720 #elif _STACK_GROWS_DOWN
721 if (__mprotect ((char *) mem
+ guardsize
, pd
->guardsize
- guardsize
,
724 #elif _STACK_GROWS_UP
725 char *new_guard
= (char *)(((uintptr_t) pd
- guardsize
)
727 char *old_guard
= (char *)(((uintptr_t) pd
- pd
->guardsize
)
729 /* The guard size difference might be > 0, but once rounded
730 to the nearest page the size difference might be zero. */
731 if (new_guard
> old_guard
732 && __mprotect (old_guard
, new_guard
- old_guard
, prot
) != 0)
736 pd
->guardsize
= guardsize
;
738 /* The pthread_getattr_np() calls need to get passed the size
739 requested in the attribute, regardless of how large the
740 actually used guardsize is. */
741 pd
->reported_guardsize
= guardsize
;
744 /* Initialize the lock. We have to do this unconditionally since the
745 stillborn thread could be canceled while the lock is taken. */
746 pd
->lock
= LLL_LOCK_INITIALIZER
;
748 /* The robust mutex lists also need to be initialized
749 unconditionally because the cleanup for the previous stack owner
750 might have happened in the kernel. */
751 pd
->robust_head
.futex_offset
= (offsetof (pthread_mutex_t
, __data
.__lock
)
752 - offsetof (pthread_mutex_t
,
753 __data
.__list
.__next
));
754 pd
->robust_head
.list_op_pending
= NULL
;
755 #if __PTHREAD_MUTEX_HAVE_PREV
756 pd
->robust_prev
= &pd
->robust_head
;
758 pd
->robust_head
.list
= &pd
->robust_head
;
760 /* We place the thread descriptor at the end of the stack. */
763 #if _STACK_GROWS_DOWN
767 /* The stack begins before the TCB and the static TLS block. */
768 stacktop
= ((char *) (pd
+ 1) - __static_tls_size
);
770 stacktop
= (char *) (pd
- 1);
773 # ifdef NEED_SEPARATE_REGISTER_STACK
774 *stack
= pd
->stackblock
;
775 *stacksize
= stacktop
- *stack
;
780 *stack
= pd
->stackblock
;
788 __deallocate_stack (struct pthread
*pd
)
790 lll_lock (stack_cache_lock
, LLL_PRIVATE
);
792 /* Remove the thread from the list of threads with user defined
794 stack_list_del (&pd
->list
);
796 /* Not much to do. Just free the mmap()ed memory. Note that we do
797 not reset the 'used' flag in the 'tid' field. This is done by
798 the kernel. If no thread has been created yet this field is
800 if (__glibc_likely (! pd
->user_stack
))
801 (void) queue_stack (pd
);
803 /* Free the memory associated with the ELF TLS. */
804 _dl_deallocate_tls (TLS_TPADJ (pd
), false);
806 lll_unlock (stack_cache_lock
, LLL_PRIVATE
);
811 __make_stacks_executable (void **stack_endp
)
813 /* First the main thread's stack. */
814 int err
= _dl_make_stack_executable (stack_endp
);
818 #ifdef NEED_SEPARATE_REGISTER_STACK
819 const size_t pagemask
= ~(__getpagesize () - 1);
822 lll_lock (stack_cache_lock
, LLL_PRIVATE
);
825 list_for_each (runp
, &stack_used
)
827 err
= change_stack_perm (list_entry (runp
, struct pthread
, list
)
828 #ifdef NEED_SEPARATE_REGISTER_STACK
836 /* Also change the permission for the currently unused stacks. This
837 might be wasted time but better spend it here than adding a check
840 list_for_each (runp
, &stack_cache
)
842 err
= change_stack_perm (list_entry (runp
, struct pthread
, list
)
843 #ifdef NEED_SEPARATE_REGISTER_STACK
851 lll_unlock (stack_cache_lock
, LLL_PRIVATE
);
857 /* In case of a fork() call the memory allocation in the child will be
858 the same but only one thread is running. All stacks except that of
859 the one running thread are not used anymore. We have to recycle
862 __reclaim_stacks (void)
864 struct pthread
*self
= (struct pthread
*) THREAD_SELF
;
866 /* No locking necessary. The caller is the only stack in use. But
867 we have to be aware that we might have interrupted a list
870 if (in_flight_stack
!= 0)
872 bool add_p
= in_flight_stack
& 1;
873 list_t
*elem
= (list_t
*) (in_flight_stack
& ~(uintptr_t) 1);
877 /* We always add at the beginning of the list. So in this case we
878 only need to check the beginning of these lists to see if the
879 pointers at the head of the list are inconsistent. */
882 if (stack_used
.next
->prev
!= &stack_used
)
884 else if (stack_cache
.next
->prev
!= &stack_cache
)
889 assert (l
->next
->prev
== elem
);
890 elem
->next
= l
->next
;
897 /* We can simply always replay the delete operation. */
898 elem
->next
->prev
= elem
->prev
;
899 elem
->prev
->next
= elem
->next
;
903 /* Mark all stacks except the still running one as free. */
905 list_for_each (runp
, &stack_used
)
907 struct pthread
*curp
= list_entry (runp
, struct pthread
, list
);
910 /* This marks the stack as free. */
913 /* Account for the size of the stack. */
914 stack_cache_actsize
+= curp
->stackblock_size
;
916 if (curp
->specific_used
)
918 /* Clear the thread-specific data. */
919 memset (curp
->specific_1stblock
, '\0',
920 sizeof (curp
->specific_1stblock
));
922 curp
->specific_used
= false;
924 for (size_t cnt
= 1; cnt
< PTHREAD_KEY_1STLEVEL_SIZE
; ++cnt
)
925 if (curp
->specific
[cnt
] != NULL
)
927 memset (curp
->specific
[cnt
], '\0',
928 sizeof (curp
->specific_1stblock
));
930 /* We have allocated the block which we do not
931 free here so re-set the bit. */
932 curp
->specific_used
= true;
938 /* Add the stack of all running threads to the cache. */
939 list_splice (&stack_used
, &stack_cache
);
941 /* Remove the entry for the current thread to from the cache list
942 and add it to the list of running threads. Which of the two
943 lists is decided by the user_stack flag. */
944 stack_list_del (&self
->list
);
946 /* Re-initialize the lists for all the threads. */
947 INIT_LIST_HEAD (&stack_used
);
948 INIT_LIST_HEAD (&__stack_user
);
950 if (__glibc_unlikely (THREAD_GETMEM (self
, user_stack
)))
951 list_add (&self
->list
, &__stack_user
);
953 list_add (&self
->list
, &stack_used
);
955 /* There is one thread running. */
960 /* Initialize locks. */
961 stack_cache_lock
= LLL_LOCK_INITIALIZER
;
962 __default_pthread_attr_lock
= LLL_LOCK_INITIALIZER
;
967 setxid_mark_thread (struct xid_command
*cmdp
, struct pthread
*t
)
971 /* Wait until this thread is cloned. */
972 if (t
->setxid_futex
== -1
973 && ! atomic_compare_and_exchange_bool_acq (&t
->setxid_futex
, -2, -1))
975 futex_wait_simple (&t
->setxid_futex
, -2, FUTEX_PRIVATE
);
976 while (t
->setxid_futex
== -2);
978 /* Don't let the thread exit before the setxid handler runs. */
983 ch
= t
->cancelhandling
;
985 /* If the thread is exiting right now, ignore it. */
986 if ((ch
& EXITING_BITMASK
) != 0)
988 /* Release the futex if there is no other setxid in
990 if ((ch
& SETXID_BITMASK
) == 0)
993 futex_wake (&t
->setxid_futex
, 1, FUTEX_PRIVATE
);
998 while (atomic_compare_and_exchange_bool_acq (&t
->cancelhandling
,
999 ch
| SETXID_BITMASK
, ch
));
1004 setxid_unmark_thread (struct xid_command
*cmdp
, struct pthread
*t
)
1010 ch
= t
->cancelhandling
;
1011 if ((ch
& SETXID_BITMASK
) == 0)
1014 while (atomic_compare_and_exchange_bool_acq (&t
->cancelhandling
,
1015 ch
& ~SETXID_BITMASK
, ch
));
1017 /* Release the futex just in case. */
1018 t
->setxid_futex
= 1;
1019 futex_wake (&t
->setxid_futex
, 1, FUTEX_PRIVATE
);
1024 setxid_signal_thread (struct xid_command
*cmdp
, struct pthread
*t
)
1026 if ((t
->cancelhandling
& SETXID_BITMASK
) == 0)
1030 pid_t pid
= __getpid ();
1031 INTERNAL_SYSCALL_DECL (err
);
1032 val
= INTERNAL_SYSCALL_CALL (tgkill
, err
, pid
, t
->tid
, SIGSETXID
);
1034 /* If this failed, it must have had not started yet or else exited. */
1035 if (!INTERNAL_SYSCALL_ERROR_P (val
, err
))
1037 atomic_increment (&cmdp
->cntr
);
1044 /* Check for consistency across set*id system call results. The abort
1045 should not happen as long as all privileges changes happen through
1046 the glibc wrappers. ERROR must be 0 (no error) or an errno
1050 __nptl_setxid_error (struct xid_command
*cmdp
, int error
)
1054 int olderror
= cmdp
->error
;
1055 if (olderror
== error
)
1059 /* Mismatch between current and previous results. Save the
1060 error value to memory so that is not clobbered by the
1061 abort function and preserved in coredumps. */
1062 volatile int xid_err
__attribute__((unused
)) = error
;
1066 while (atomic_compare_and_exchange_bool_acq (&cmdp
->error
, error
, -1));
1071 __nptl_setxid (struct xid_command
*cmdp
)
1075 lll_lock (stack_cache_lock
, LLL_PRIVATE
);
1081 struct pthread
*self
= THREAD_SELF
;
1083 /* Iterate over the list with system-allocated threads first. */
1085 list_for_each (runp
, &stack_used
)
1087 struct pthread
*t
= list_entry (runp
, struct pthread
, list
);
1091 setxid_mark_thread (cmdp
, t
);
1094 /* Now the list with threads using user-allocated stacks. */
1095 list_for_each (runp
, &__stack_user
)
1097 struct pthread
*t
= list_entry (runp
, struct pthread
, list
);
1101 setxid_mark_thread (cmdp
, t
);
1104 /* Iterate until we don't succeed in signalling anyone. That means
1105 we have gotten all running threads, and their children will be
1106 automatically correct once started. */
1111 list_for_each (runp
, &stack_used
)
1113 struct pthread
*t
= list_entry (runp
, struct pthread
, list
);
1117 signalled
+= setxid_signal_thread (cmdp
, t
);
1120 list_for_each (runp
, &__stack_user
)
1122 struct pthread
*t
= list_entry (runp
, struct pthread
, list
);
1126 signalled
+= setxid_signal_thread (cmdp
, t
);
1129 int cur
= cmdp
->cntr
;
1132 futex_wait_simple ((unsigned int *) &cmdp
->cntr
, cur
,
1137 while (signalled
!= 0);
1139 /* Clean up flags, so that no thread blocks during exit waiting
1140 for a signal which will never come. */
1141 list_for_each (runp
, &stack_used
)
1143 struct pthread
*t
= list_entry (runp
, struct pthread
, list
);
1147 setxid_unmark_thread (cmdp
, t
);
1150 list_for_each (runp
, &__stack_user
)
1152 struct pthread
*t
= list_entry (runp
, struct pthread
, list
);
1156 setxid_unmark_thread (cmdp
, t
);
1159 /* This must be last, otherwise the current thread might not have
1160 permissions to send SIGSETXID syscall to the other threads. */
1161 INTERNAL_SYSCALL_DECL (err
);
1162 result
= INTERNAL_SYSCALL_NCS (cmdp
->syscall_no
, err
, 3,
1163 cmdp
->id
[0], cmdp
->id
[1], cmdp
->id
[2]);
1165 if (__glibc_unlikely (INTERNAL_SYSCALL_ERROR_P (result
, err
)))
1167 error
= INTERNAL_SYSCALL_ERRNO (result
, err
);
1168 __set_errno (error
);
1171 __nptl_setxid_error (cmdp
, error
);
1173 lll_unlock (stack_cache_lock
, LLL_PRIVATE
);
1177 static inline void __attribute__((always_inline
))
1178 init_one_static_tls (struct pthread
*curp
, struct link_map
*map
)
1181 void *dest
= (char *) curp
- map
->l_tls_offset
;
1182 # elif TLS_DTV_AT_TP
1183 void *dest
= (char *) curp
+ map
->l_tls_offset
+ TLS_PRE_TCB_SIZE
;
1185 # error "Either TLS_TCB_AT_TP or TLS_DTV_AT_TP must be defined"
1188 /* Initialize the memory. */
1189 memset (__mempcpy (dest
, map
->l_tls_initimage
, map
->l_tls_initimage_size
),
1190 '\0', map
->l_tls_blocksize
- map
->l_tls_initimage_size
);
1195 __pthread_init_static_tls (struct link_map
*map
)
1197 lll_lock (stack_cache_lock
, LLL_PRIVATE
);
1199 /* Iterate over the list with system-allocated threads first. */
1201 list_for_each (runp
, &stack_used
)
1202 init_one_static_tls (list_entry (runp
, struct pthread
, list
), map
);
1204 /* Now the list with threads using user-allocated stacks. */
1205 list_for_each (runp
, &__stack_user
)
1206 init_one_static_tls (list_entry (runp
, struct pthread
, list
), map
);
1208 lll_unlock (stack_cache_lock
, LLL_PRIVATE
);
1214 __wait_lookup_done (void)
1216 lll_lock (stack_cache_lock
, LLL_PRIVATE
);
1218 struct pthread
*self
= THREAD_SELF
;
1220 /* Iterate over the list with system-allocated threads first. */
1222 list_for_each (runp
, &stack_used
)
1224 struct pthread
*t
= list_entry (runp
, struct pthread
, list
);
1225 if (t
== self
|| t
->header
.gscope_flag
== THREAD_GSCOPE_FLAG_UNUSED
)
1228 int *const gscope_flagp
= &t
->header
.gscope_flag
;
1230 /* We have to wait until this thread is done with the global
1231 scope. First tell the thread that we are waiting and
1232 possibly have to be woken. */
1233 if (atomic_compare_and_exchange_bool_acq (gscope_flagp
,
1234 THREAD_GSCOPE_FLAG_WAIT
,
1235 THREAD_GSCOPE_FLAG_USED
))
1239 futex_wait_simple ((unsigned int *) gscope_flagp
,
1240 THREAD_GSCOPE_FLAG_WAIT
, FUTEX_PRIVATE
);
1241 while (*gscope_flagp
== THREAD_GSCOPE_FLAG_WAIT
);
1244 /* Now the list with threads using user-allocated stacks. */
1245 list_for_each (runp
, &__stack_user
)
1247 struct pthread
*t
= list_entry (runp
, struct pthread
, list
);
1248 if (t
== self
|| t
->header
.gscope_flag
== THREAD_GSCOPE_FLAG_UNUSED
)
1251 int *const gscope_flagp
= &t
->header
.gscope_flag
;
1253 /* We have to wait until this thread is done with the global
1254 scope. First tell the thread that we are waiting and
1255 possibly have to be woken. */
1256 if (atomic_compare_and_exchange_bool_acq (gscope_flagp
,
1257 THREAD_GSCOPE_FLAG_WAIT
,
1258 THREAD_GSCOPE_FLAG_USED
))
1262 futex_wait_simple ((unsigned int *) gscope_flagp
,
1263 THREAD_GSCOPE_FLAG_WAIT
, FUTEX_PRIVATE
);
1264 while (*gscope_flagp
== THREAD_GSCOPE_FLAG_WAIT
);
1267 lll_unlock (stack_cache_lock
, LLL_PRIVATE
);