1 /* Copyright (C) 2002-2017 Free Software Foundation, Inc.
2 This file is part of the GNU C Library.
3 Contributed by Ulrich Drepper <drepper@redhat.com>, 2002.
5 The GNU C Library is free software; you can redistribute it and/or
6 modify it under the terms of the GNU Lesser General Public
7 License as published by the Free Software Foundation; either
8 version 2.1 of the License, or (at your option) any later version.
10 The GNU C Library is distributed in the hope that it will be useful,
11 but WITHOUT ANY WARRANTY; without even the implied warranty of
12 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 Lesser General Public License for more details.
15 You should have received a copy of the GNU Lesser General Public
16 License along with the GNU C Library; if not, see
17 <http://www.gnu.org/licenses/>. */
26 #include <sys/param.h>
27 #include <dl-sysdep.h>
31 #include <lowlevellock.h>
32 #include <futex-internal.h>
33 #include <kernel-features.h>
34 #include <stack-aliasing.h>
37 #ifndef NEED_SEPARATE_REGISTER_STACK
39 /* Most architectures have exactly one stack pointer. Some have more. */
40 # define STACK_VARIABLES void *stackaddr = NULL
42 /* How to pass the values to the 'create_thread' function. */
43 # define STACK_VARIABLES_ARGS stackaddr
45 /* How to declare function which gets there parameters. */
46 # define STACK_VARIABLES_PARMS void *stackaddr
48 /* How to declare allocate_stack. */
49 # define ALLOCATE_STACK_PARMS void **stack
51 /* This is how the function is called. We do it this way to allow
52 other variants of the function to have more parameters. */
53 # define ALLOCATE_STACK(attr, pd) allocate_stack (attr, pd, &stackaddr)
57 /* We need two stacks. The kernel will place them but we have to tell
58 the kernel about the size of the reserved address space. */
59 # define STACK_VARIABLES void *stackaddr = NULL; size_t stacksize = 0
61 /* How to pass the values to the 'create_thread' function. */
62 # define STACK_VARIABLES_ARGS stackaddr, stacksize
64 /* How to declare function which gets there parameters. */
65 # define STACK_VARIABLES_PARMS void *stackaddr, size_t stacksize
67 /* How to declare allocate_stack. */
68 # define ALLOCATE_STACK_PARMS void **stack, size_t *stacksize
70 /* This is how the function is called. We do it this way to allow
71 other variants of the function to have more parameters. */
72 # define ALLOCATE_STACK(attr, pd) \
73 allocate_stack (attr, pd, &stackaddr, &stacksize)
78 /* Default alignment of stack. */
80 # define STACK_ALIGN __alignof__ (long double)
83 /* Default value for minimal stack size after allocating thread
84 descriptor and guard. */
85 #ifndef MINIMAL_REST_STACK
86 # define MINIMAL_REST_STACK 4096
90 /* Newer kernels have the MAP_STACK flag to indicate a mapping is used for
91 a stack. Use it when possible. */
96 /* This yields the pointer that TLS support code calls the thread pointer. */
98 # define TLS_TPADJ(pd) (pd)
100 # define TLS_TPADJ(pd) ((struct pthread *)((char *) (pd) + TLS_PRE_TCB_SIZE))
103 /* Cache handling for not-yet free stacks. */
105 /* Maximum size in kB of cache. */
106 static size_t stack_cache_maxsize
= 40 * 1024 * 1024; /* 40MiBi by default. */
107 static size_t stack_cache_actsize
;
109 /* Mutex protecting this variable. */
110 static int stack_cache_lock
= LLL_LOCK_INITIALIZER
;
112 /* List of queued stack frames. */
113 static LIST_HEAD (stack_cache
);
115 /* List of the stacks in use. */
116 static LIST_HEAD (stack_used
);
118 /* We need to record what list operations we are going to do so that,
119 in case of an asynchronous interruption due to a fork() call, we
120 can correct for the work. */
121 static uintptr_t in_flight_stack
;
123 /* List of the threads with user provided stacks in use. No need to
124 initialize this, since it's done in __pthread_initialize_minimal. */
125 list_t __stack_user
__attribute__ ((nocommon
));
126 hidden_data_def (__stack_user
)
129 /* Check whether the stack is still used or not. */
130 #define FREE_P(descr) ((descr)->tid <= 0)
134 stack_list_del (list_t
*elem
)
136 in_flight_stack
= (uintptr_t) elem
;
138 atomic_write_barrier ();
142 atomic_write_barrier ();
149 stack_list_add (list_t
*elem
, list_t
*list
)
151 in_flight_stack
= (uintptr_t) elem
| 1;
153 atomic_write_barrier ();
155 list_add (elem
, list
);
157 atomic_write_barrier ();
163 /* We create a double linked list of all cache entries. Double linked
164 because this allows removing entries from the end. */
167 /* Get a stack frame from the cache. We have to match by size since
168 some blocks might be too small or far too large. */
169 static struct pthread
*
170 get_cached_stack (size_t *sizep
, void **memp
)
172 size_t size
= *sizep
;
173 struct pthread
*result
= NULL
;
176 lll_lock (stack_cache_lock
, LLL_PRIVATE
);
178 /* Search the cache for a matching entry. We search for the
179 smallest stack which has at least the required size. Note that
180 in normal situations the size of all allocated stacks is the
181 same. As the very least there are only a few different sizes.
182 Therefore this loop will exit early most of the time with an
184 list_for_each (entry
, &stack_cache
)
186 struct pthread
*curr
;
188 curr
= list_entry (entry
, struct pthread
, list
);
189 if (FREE_P (curr
) && curr
->stackblock_size
>= size
)
191 if (curr
->stackblock_size
== size
)
198 || result
->stackblock_size
> curr
->stackblock_size
)
203 if (__builtin_expect (result
== NULL
, 0)
204 /* Make sure the size difference is not too excessive. In that
205 case we do not use the block. */
206 || __builtin_expect (result
->stackblock_size
> 4 * size
, 0))
208 /* Release the lock. */
209 lll_unlock (stack_cache_lock
, LLL_PRIVATE
);
214 /* Don't allow setxid until cloned. */
215 result
->setxid_futex
= -1;
217 /* Dequeue the entry. */
218 stack_list_del (&result
->list
);
220 /* And add to the list of stacks in use. */
221 stack_list_add (&result
->list
, &stack_used
);
223 /* And decrease the cache size. */
224 stack_cache_actsize
-= result
->stackblock_size
;
226 /* Release the lock early. */
227 lll_unlock (stack_cache_lock
, LLL_PRIVATE
);
229 /* Report size and location of the stack to the caller. */
230 *sizep
= result
->stackblock_size
;
231 *memp
= result
->stackblock
;
233 /* Cancellation handling is back to the default. */
234 result
->cancelhandling
= 0;
235 result
->cleanup
= NULL
;
237 /* No pending event. */
238 result
->nextevent
= NULL
;
241 dtv_t
*dtv
= GET_DTV (TLS_TPADJ (result
));
242 for (size_t cnt
= 0; cnt
< dtv
[-1].counter
; ++cnt
)
243 free (dtv
[1 + cnt
].pointer
.to_free
);
244 memset (dtv
, '\0', (dtv
[-1].counter
+ 1) * sizeof (dtv_t
));
246 /* Re-initialize the TLS. */
247 _dl_allocate_tls_init (TLS_TPADJ (result
));
253 /* Free stacks until cache size is lower than LIMIT. */
255 __free_stacks (size_t limit
)
257 /* We reduce the size of the cache. Remove the last entries until
258 the size is below the limit. */
262 /* Search from the end of the list. */
263 list_for_each_prev_safe (entry
, prev
, &stack_cache
)
265 struct pthread
*curr
;
267 curr
= list_entry (entry
, struct pthread
, list
);
270 /* Unlink the block. */
271 stack_list_del (entry
);
273 /* Account for the freed memory. */
274 stack_cache_actsize
-= curr
->stackblock_size
;
276 /* Free the memory associated with the ELF TLS. */
277 _dl_deallocate_tls (TLS_TPADJ (curr
), false);
279 /* Remove this block. This should never fail. If it does
280 something is really wrong. */
281 if (__munmap (curr
->stackblock
, curr
->stackblock_size
) != 0)
284 /* Maybe we have freed enough. */
285 if (stack_cache_actsize
<= limit
)
292 /* Add a stack frame which is not used anymore to the stack. Must be
293 called with the cache lock held. */
295 __attribute ((always_inline
))
296 queue_stack (struct pthread
*stack
)
298 /* We unconditionally add the stack to the list. The memory may
299 still be in use but it will not be reused until the kernel marks
300 the stack as not used anymore. */
301 stack_list_add (&stack
->list
, &stack_cache
);
303 stack_cache_actsize
+= stack
->stackblock_size
;
304 if (__glibc_unlikely (stack_cache_actsize
> stack_cache_maxsize
))
305 __free_stacks (stack_cache_maxsize
);
311 change_stack_perm (struct pthread
*pd
312 #ifdef NEED_SEPARATE_REGISTER_STACK
317 #ifdef NEED_SEPARATE_REGISTER_STACK
318 void *stack
= (pd
->stackblock
319 + (((((pd
->stackblock_size
- pd
->guardsize
) / 2)
320 & pagemask
) + pd
->guardsize
) & pagemask
));
321 size_t len
= pd
->stackblock
+ pd
->stackblock_size
- stack
;
322 #elif _STACK_GROWS_DOWN
323 void *stack
= pd
->stackblock
+ pd
->guardsize
;
324 size_t len
= pd
->stackblock_size
- pd
->guardsize
;
325 #elif _STACK_GROWS_UP
326 void *stack
= pd
->stackblock
;
327 size_t len
= (uintptr_t) pd
- pd
->guardsize
- (uintptr_t) pd
->stackblock
;
329 # error "Define either _STACK_GROWS_DOWN or _STACK_GROWS_UP"
331 if (__mprotect (stack
, len
, PROT_READ
| PROT_WRITE
| PROT_EXEC
) != 0)
337 /* Return the guard page position on allocated stack. */
339 __attribute ((always_inline
))
340 guard_position (void *mem
, size_t size
, size_t guardsize
, struct pthread
*pd
,
343 #ifdef NEED_SEPARATE_REGISTER_STACK
344 return mem
+ (((size
- guardsize
) / 2) & ~pagesize_m1
);
345 #elif _STACK_GROWS_DOWN
347 #elif _STACK_GROWS_UP
348 return (char *) (((uintptr_t) pd
- guardsize
) & ~pagesize_m1
);
352 /* Based on stack allocated with PROT_NONE, setup the required portions with
353 'prot' flags based on the guard page position. */
355 setup_stack_prot (char *mem
, size_t size
, char *guard
, size_t guardsize
,
358 char *guardend
= guard
+ guardsize
;
359 #if _STACK_GROWS_DOWN
360 /* As defined at guard_position, for architectures with downward stack
361 the guard page is always at start of the allocated area. */
362 if (__mprotect (guardend
, size
- guardsize
, prot
) != 0)
365 size_t mprots1
= (uintptr_t) guard
- (uintptr_t) mem
;
366 if (__mprotect (mem
, mprots1
, prot
) != 0)
368 size_t mprots2
= ((uintptr_t) mem
+ size
) - (uintptr_t) guardend
;
369 if (__mprotect (guardend
, mprots2
, prot
) != 0)
375 /* Returns a usable stack for a new thread either by allocating a
376 new stack or reusing a cached stack of sufficient size.
377 ATTR must be non-NULL and point to a valid pthread_attr.
378 PDP must be non-NULL. */
380 allocate_stack (const struct pthread_attr
*attr
, struct pthread
**pdp
,
381 ALLOCATE_STACK_PARMS
)
385 size_t pagesize_m1
= __getpagesize () - 1;
387 assert (powerof2 (pagesize_m1
+ 1));
388 assert (TCB_ALIGNMENT
>= STACK_ALIGN
);
390 /* Get the stack size from the attribute if it is set. Otherwise we
391 use the default we determined at start time. */
392 if (attr
->stacksize
!= 0)
393 size
= attr
->stacksize
;
396 lll_lock (__default_pthread_attr_lock
, LLL_PRIVATE
);
397 size
= __default_pthread_attr
.stacksize
;
398 lll_unlock (__default_pthread_attr_lock
, LLL_PRIVATE
);
401 /* Get memory for the stack. */
402 if (__glibc_unlikely (attr
->flags
& ATTR_FLAG_STACKADDR
))
405 char *stackaddr
= (char *) attr
->stackaddr
;
407 /* Assume the same layout as the _STACK_GROWS_DOWN case, with struct
408 pthread at the top of the stack block. Later we adjust the guard
409 location and stack address to match the _STACK_GROWS_UP case. */
411 stackaddr
+= attr
->stacksize
;
413 /* If the user also specified the size of the stack make sure it
415 if (attr
->stacksize
!= 0
416 && attr
->stacksize
< (__static_tls_size
+ MINIMAL_REST_STACK
))
419 /* Adjust stack size for alignment of the TLS block. */
421 adj
= ((uintptr_t) stackaddr
- TLS_TCB_SIZE
)
422 & __static_tls_align_m1
;
423 assert (size
> adj
+ TLS_TCB_SIZE
);
425 adj
= ((uintptr_t) stackaddr
- __static_tls_size
)
426 & __static_tls_align_m1
;
430 /* The user provided some memory. Let's hope it matches the
431 size... We do not allocate guard pages if the user provided
432 the stack. It is the user's responsibility to do this if it
435 pd
= (struct pthread
*) ((uintptr_t) stackaddr
436 - TLS_TCB_SIZE
- adj
);
438 pd
= (struct pthread
*) (((uintptr_t) stackaddr
439 - __static_tls_size
- adj
)
443 /* The user provided stack memory needs to be cleared. */
444 memset (pd
, '\0', sizeof (struct pthread
));
446 /* The first TSD block is included in the TCB. */
447 pd
->specific
[0] = pd
->specific_1stblock
;
449 /* Remember the stack-related values. */
450 pd
->stackblock
= (char *) stackaddr
- size
;
451 pd
->stackblock_size
= size
;
453 /* This is a user-provided stack. It will not be queued in the
454 stack cache nor will the memory (except the TLS memory) be freed. */
455 pd
->user_stack
= true;
457 /* This is at least the second thread. */
458 pd
->header
.multiple_threads
= 1;
459 #ifndef TLS_MULTIPLE_THREADS_IN_TCB
460 __pthread_multiple_threads
= *__libc_multiple_threads_ptr
= 1;
463 #ifndef __ASSUME_PRIVATE_FUTEX
464 /* The thread must know when private futexes are supported. */
465 pd
->header
.private_futex
= THREAD_GETMEM (THREAD_SELF
,
466 header
.private_futex
);
469 #ifdef NEED_DL_SYSINFO
470 SETUP_THREAD_SYSINFO (pd
);
473 /* Don't allow setxid until cloned. */
474 pd
->setxid_futex
= -1;
476 /* Allocate the DTV for this thread. */
477 if (_dl_allocate_tls (TLS_TPADJ (pd
)) == NULL
)
479 /* Something went wrong. */
480 assert (errno
== ENOMEM
);
485 /* Prepare to modify global data. */
486 lll_lock (stack_cache_lock
, LLL_PRIVATE
);
488 /* And add to the list of stacks in use. */
489 list_add (&pd
->list
, &__stack_user
);
491 lll_unlock (stack_cache_lock
, LLL_PRIVATE
);
495 /* Allocate some anonymous memory. If possible use the cache. */
499 const int prot
= (PROT_READ
| PROT_WRITE
500 | ((GL(dl_stack_flags
) & PF_X
) ? PROT_EXEC
: 0));
502 /* Adjust the stack size for alignment. */
503 size
&= ~__static_tls_align_m1
;
506 /* Make sure the size of the stack is enough for the guard and
507 eventually the thread descriptor. */
508 guardsize
= (attr
->guardsize
+ pagesize_m1
) & ~pagesize_m1
;
509 if (__builtin_expect (size
< ((guardsize
+ __static_tls_size
510 + MINIMAL_REST_STACK
+ pagesize_m1
)
513 /* The stack is too small (or the guard too large). */
516 /* Try to get a stack from the cache. */
518 pd
= get_cached_stack (&size
, &mem
);
521 /* To avoid aliasing effects on a larger scale than pages we
522 adjust the allocated stack size if necessary. This way
523 allocations directly following each other will not have
524 aliasing problems. */
525 #if MULTI_PAGE_ALIASING != 0
526 if ((size
% MULTI_PAGE_ALIASING
) == 0)
527 size
+= pagesize_m1
+ 1;
530 /* If a guard page is required, avoid committing memory by first
531 allocate with PROT_NONE and then reserve with required permission
532 excluding the guard page. */
533 mem
= __mmap (NULL
, size
, (guardsize
== 0) ? prot
: PROT_NONE
,
534 MAP_PRIVATE
| MAP_ANONYMOUS
| MAP_STACK
, -1, 0);
536 if (__glibc_unlikely (mem
== MAP_FAILED
))
539 /* SIZE is guaranteed to be greater than zero.
540 So we can never get a null pointer back from mmap. */
541 assert (mem
!= NULL
);
543 /* Place the thread descriptor at the end of the stack. */
545 pd
= (struct pthread
*) ((char *) mem
+ size
) - 1;
547 pd
= (struct pthread
*) ((((uintptr_t) mem
+ size
549 & ~__static_tls_align_m1
)
553 /* Now mprotect the required region excluding the guard area. */
554 if (__glibc_likely (guardsize
> 0))
556 char *guard
= guard_position (mem
, size
, guardsize
, pd
,
558 if (setup_stack_prot (mem
, size
, guard
, guardsize
, prot
) != 0)
560 __munmap (mem
, size
);
565 /* Remember the stack-related values. */
566 pd
->stackblock
= mem
;
567 pd
->stackblock_size
= size
;
568 /* Update guardsize for newly allocated guardsize to avoid
569 an mprotect in guard resize below. */
570 pd
->guardsize
= guardsize
;
572 /* We allocated the first block thread-specific data array.
573 This address will not change for the lifetime of this
575 pd
->specific
[0] = pd
->specific_1stblock
;
577 /* This is at least the second thread. */
578 pd
->header
.multiple_threads
= 1;
579 #ifndef TLS_MULTIPLE_THREADS_IN_TCB
580 __pthread_multiple_threads
= *__libc_multiple_threads_ptr
= 1;
583 #ifndef __ASSUME_PRIVATE_FUTEX
584 /* The thread must know when private futexes are supported. */
585 pd
->header
.private_futex
= THREAD_GETMEM (THREAD_SELF
,
586 header
.private_futex
);
589 #ifdef NEED_DL_SYSINFO
590 SETUP_THREAD_SYSINFO (pd
);
593 /* Don't allow setxid until cloned. */
594 pd
->setxid_futex
= -1;
596 /* Allocate the DTV for this thread. */
597 if (_dl_allocate_tls (TLS_TPADJ (pd
)) == NULL
)
599 /* Something went wrong. */
600 assert (errno
== ENOMEM
);
602 /* Free the stack memory we just allocated. */
603 (void) __munmap (mem
, size
);
609 /* Prepare to modify global data. */
610 lll_lock (stack_cache_lock
, LLL_PRIVATE
);
612 /* And add to the list of stacks in use. */
613 stack_list_add (&pd
->list
, &stack_used
);
615 lll_unlock (stack_cache_lock
, LLL_PRIVATE
);
618 /* There might have been a race. Another thread might have
619 caused the stacks to get exec permission while this new
620 stack was prepared. Detect if this was possible and
621 change the permission if necessary. */
622 if (__builtin_expect ((GL(dl_stack_flags
) & PF_X
) != 0
623 && (prot
& PROT_EXEC
) == 0, 0))
625 int err
= change_stack_perm (pd
626 #ifdef NEED_SEPARATE_REGISTER_STACK
632 /* Free the stack memory we just allocated. */
633 (void) __munmap (mem
, size
);
640 /* Note that all of the stack and the thread descriptor is
641 zeroed. This means we do not have to initialize fields
642 with initial value zero. This is specifically true for
643 the 'tid' field which is always set back to zero once the
644 stack is not used anymore and for the 'guardsize' field
645 which will be read next. */
648 /* Create or resize the guard area if necessary. */
649 if (__glibc_unlikely (guardsize
> pd
->guardsize
))
651 char *guard
= guard_position (mem
, size
, guardsize
, pd
,
653 if (__mprotect (guard
, guardsize
, PROT_NONE
) != 0)
656 lll_lock (stack_cache_lock
, LLL_PRIVATE
);
658 /* Remove the thread from the list. */
659 stack_list_del (&pd
->list
);
661 lll_unlock (stack_cache_lock
, LLL_PRIVATE
);
663 /* Get rid of the TLS block we allocated. */
664 _dl_deallocate_tls (TLS_TPADJ (pd
), false);
666 /* Free the stack memory regardless of whether the size
667 of the cache is over the limit or not. If this piece
668 of memory caused problems we better do not use it
669 anymore. Uh, and we ignore possible errors. There
670 is nothing we could do. */
671 (void) __munmap (mem
, size
);
676 pd
->guardsize
= guardsize
;
678 else if (__builtin_expect (pd
->guardsize
- guardsize
> size
- reqsize
,
681 /* The old guard area is too large. */
683 #ifdef NEED_SEPARATE_REGISTER_STACK
684 char *guard
= mem
+ (((size
- guardsize
) / 2) & ~pagesize_m1
);
685 char *oldguard
= mem
+ (((size
- pd
->guardsize
) / 2) & ~pagesize_m1
);
688 && __mprotect (oldguard
, guard
- oldguard
, prot
) != 0)
691 if (__mprotect (guard
+ guardsize
,
692 oldguard
+ pd
->guardsize
- guard
- guardsize
,
695 #elif _STACK_GROWS_DOWN
696 if (__mprotect ((char *) mem
+ guardsize
, pd
->guardsize
- guardsize
,
699 #elif _STACK_GROWS_UP
700 char *new_guard
= (char *)(((uintptr_t) pd
- guardsize
)
702 char *old_guard
= (char *)(((uintptr_t) pd
- pd
->guardsize
)
704 /* The guard size difference might be > 0, but once rounded
705 to the nearest page the size difference might be zero. */
706 if (new_guard
> old_guard
707 && mprotect (old_guard
, new_guard
- old_guard
, prot
) != 0)
711 pd
->guardsize
= guardsize
;
713 /* The pthread_getattr_np() calls need to get passed the size
714 requested in the attribute, regardless of how large the
715 actually used guardsize is. */
716 pd
->reported_guardsize
= guardsize
;
719 /* Initialize the lock. We have to do this unconditionally since the
720 stillborn thread could be canceled while the lock is taken. */
721 pd
->lock
= LLL_LOCK_INITIALIZER
;
723 /* The robust mutex lists also need to be initialized
724 unconditionally because the cleanup for the previous stack owner
725 might have happened in the kernel. */
726 pd
->robust_head
.futex_offset
= (offsetof (pthread_mutex_t
, __data
.__lock
)
727 - offsetof (pthread_mutex_t
,
728 __data
.__list
.__next
));
729 pd
->robust_head
.list_op_pending
= NULL
;
730 #ifdef __PTHREAD_MUTEX_HAVE_PREV
731 pd
->robust_prev
= &pd
->robust_head
;
733 pd
->robust_head
.list
= &pd
->robust_head
;
735 /* We place the thread descriptor at the end of the stack. */
738 #if _STACK_GROWS_DOWN
742 /* The stack begins before the TCB and the static TLS block. */
743 stacktop
= ((char *) (pd
+ 1) - __static_tls_size
);
745 stacktop
= (char *) (pd
- 1);
748 # ifdef NEED_SEPARATE_REGISTER_STACK
749 *stack
= pd
->stackblock
;
750 *stacksize
= stacktop
- *stack
;
755 *stack
= pd
->stackblock
;
764 __deallocate_stack (struct pthread
*pd
)
766 lll_lock (stack_cache_lock
, LLL_PRIVATE
);
768 /* Remove the thread from the list of threads with user defined
770 stack_list_del (&pd
->list
);
772 /* Not much to do. Just free the mmap()ed memory. Note that we do
773 not reset the 'used' flag in the 'tid' field. This is done by
774 the kernel. If no thread has been created yet this field is
776 if (__glibc_likely (! pd
->user_stack
))
777 (void) queue_stack (pd
);
779 /* Free the memory associated with the ELF TLS. */
780 _dl_deallocate_tls (TLS_TPADJ (pd
), false);
782 lll_unlock (stack_cache_lock
, LLL_PRIVATE
);
788 __make_stacks_executable (void **stack_endp
)
790 /* First the main thread's stack. */
791 int err
= _dl_make_stack_executable (stack_endp
);
795 #ifdef NEED_SEPARATE_REGISTER_STACK
796 const size_t pagemask
= ~(__getpagesize () - 1);
799 lll_lock (stack_cache_lock
, LLL_PRIVATE
);
802 list_for_each (runp
, &stack_used
)
804 err
= change_stack_perm (list_entry (runp
, struct pthread
, list
)
805 #ifdef NEED_SEPARATE_REGISTER_STACK
813 /* Also change the permission for the currently unused stacks. This
814 might be wasted time but better spend it here than adding a check
817 list_for_each (runp
, &stack_cache
)
819 err
= change_stack_perm (list_entry (runp
, struct pthread
, list
)
820 #ifdef NEED_SEPARATE_REGISTER_STACK
828 lll_unlock (stack_cache_lock
, LLL_PRIVATE
);
834 /* In case of a fork() call the memory allocation in the child will be
835 the same but only one thread is running. All stacks except that of
836 the one running thread are not used anymore. We have to recycle
839 __reclaim_stacks (void)
841 struct pthread
*self
= (struct pthread
*) THREAD_SELF
;
843 /* No locking necessary. The caller is the only stack in use. But
844 we have to be aware that we might have interrupted a list
847 if (in_flight_stack
!= 0)
849 bool add_p
= in_flight_stack
& 1;
850 list_t
*elem
= (list_t
*) (in_flight_stack
& ~(uintptr_t) 1);
854 /* We always add at the beginning of the list. So in this case we
855 only need to check the beginning of these lists to see if the
856 pointers at the head of the list are inconsistent. */
859 if (stack_used
.next
->prev
!= &stack_used
)
861 else if (stack_cache
.next
->prev
!= &stack_cache
)
866 assert (l
->next
->prev
== elem
);
867 elem
->next
= l
->next
;
874 /* We can simply always replay the delete operation. */
875 elem
->next
->prev
= elem
->prev
;
876 elem
->prev
->next
= elem
->next
;
880 /* Mark all stacks except the still running one as free. */
882 list_for_each (runp
, &stack_used
)
884 struct pthread
*curp
= list_entry (runp
, struct pthread
, list
);
887 /* This marks the stack as free. */
890 /* Account for the size of the stack. */
891 stack_cache_actsize
+= curp
->stackblock_size
;
893 if (curp
->specific_used
)
895 /* Clear the thread-specific data. */
896 memset (curp
->specific_1stblock
, '\0',
897 sizeof (curp
->specific_1stblock
));
899 curp
->specific_used
= false;
901 for (size_t cnt
= 1; cnt
< PTHREAD_KEY_1STLEVEL_SIZE
; ++cnt
)
902 if (curp
->specific
[cnt
] != NULL
)
904 memset (curp
->specific
[cnt
], '\0',
905 sizeof (curp
->specific_1stblock
));
907 /* We have allocated the block which we do not
908 free here so re-set the bit. */
909 curp
->specific_used
= true;
915 /* Add the stack of all running threads to the cache. */
916 list_splice (&stack_used
, &stack_cache
);
918 /* Remove the entry for the current thread to from the cache list
919 and add it to the list of running threads. Which of the two
920 lists is decided by the user_stack flag. */
921 stack_list_del (&self
->list
);
923 /* Re-initialize the lists for all the threads. */
924 INIT_LIST_HEAD (&stack_used
);
925 INIT_LIST_HEAD (&__stack_user
);
927 if (__glibc_unlikely (THREAD_GETMEM (self
, user_stack
)))
928 list_add (&self
->list
, &__stack_user
);
930 list_add (&self
->list
, &stack_used
);
932 /* There is one thread running. */
937 /* Initialize locks. */
938 stack_cache_lock
= LLL_LOCK_INITIALIZER
;
939 __default_pthread_attr_lock
= LLL_LOCK_INITIALIZER
;
944 # undef __find_thread_by_id
945 /* Find a thread given the thread ID. */
948 __find_thread_by_id (pid_t tid
)
950 struct pthread
*result
= NULL
;
952 lll_lock (stack_cache_lock
, LLL_PRIVATE
);
954 /* Iterate over the list with system-allocated threads first. */
956 list_for_each (runp
, &stack_used
)
958 struct pthread
*curp
;
960 curp
= list_entry (runp
, struct pthread
, list
);
962 if (curp
->tid
== tid
)
969 /* Now the list with threads using user-allocated stacks. */
970 list_for_each (runp
, &__stack_user
)
972 struct pthread
*curp
;
974 curp
= list_entry (runp
, struct pthread
, list
);
976 if (curp
->tid
== tid
)
984 lll_unlock (stack_cache_lock
, LLL_PRIVATE
);
994 setxid_mark_thread (struct xid_command
*cmdp
, struct pthread
*t
)
998 /* Wait until this thread is cloned. */
999 if (t
->setxid_futex
== -1
1000 && ! atomic_compare_and_exchange_bool_acq (&t
->setxid_futex
, -2, -1))
1002 futex_wait_simple (&t
->setxid_futex
, -2, FUTEX_PRIVATE
);
1003 while (t
->setxid_futex
== -2);
1005 /* Don't let the thread exit before the setxid handler runs. */
1006 t
->setxid_futex
= 0;
1010 ch
= t
->cancelhandling
;
1012 /* If the thread is exiting right now, ignore it. */
1013 if ((ch
& EXITING_BITMASK
) != 0)
1015 /* Release the futex if there is no other setxid in
1017 if ((ch
& SETXID_BITMASK
) == 0)
1019 t
->setxid_futex
= 1;
1020 futex_wake (&t
->setxid_futex
, 1, FUTEX_PRIVATE
);
1025 while (atomic_compare_and_exchange_bool_acq (&t
->cancelhandling
,
1026 ch
| SETXID_BITMASK
, ch
));
1032 setxid_unmark_thread (struct xid_command
*cmdp
, struct pthread
*t
)
1038 ch
= t
->cancelhandling
;
1039 if ((ch
& SETXID_BITMASK
) == 0)
1042 while (atomic_compare_and_exchange_bool_acq (&t
->cancelhandling
,
1043 ch
& ~SETXID_BITMASK
, ch
));
1045 /* Release the futex just in case. */
1046 t
->setxid_futex
= 1;
1047 futex_wake (&t
->setxid_futex
, 1, FUTEX_PRIVATE
);
1053 setxid_signal_thread (struct xid_command
*cmdp
, struct pthread
*t
)
1055 if ((t
->cancelhandling
& SETXID_BITMASK
) == 0)
1059 pid_t pid
= __getpid ();
1060 INTERNAL_SYSCALL_DECL (err
);
1061 val
= INTERNAL_SYSCALL_CALL (tgkill
, err
, pid
, t
->tid
, SIGSETXID
);
1063 /* If this failed, it must have had not started yet or else exited. */
1064 if (!INTERNAL_SYSCALL_ERROR_P (val
, err
))
1066 atomic_increment (&cmdp
->cntr
);
1073 /* Check for consistency across set*id system call results. The abort
1074 should not happen as long as all privileges changes happen through
1075 the glibc wrappers. ERROR must be 0 (no error) or an errno
1079 __nptl_setxid_error (struct xid_command
*cmdp
, int error
)
1083 int olderror
= cmdp
->error
;
1084 if (olderror
== error
)
1087 /* Mismatch between current and previous results. */
1090 while (atomic_compare_and_exchange_bool_acq (&cmdp
->error
, error
, -1));
1095 __nptl_setxid (struct xid_command
*cmdp
)
1099 lll_lock (stack_cache_lock
, LLL_PRIVATE
);
1105 struct pthread
*self
= THREAD_SELF
;
1107 /* Iterate over the list with system-allocated threads first. */
1109 list_for_each (runp
, &stack_used
)
1111 struct pthread
*t
= list_entry (runp
, struct pthread
, list
);
1115 setxid_mark_thread (cmdp
, t
);
1118 /* Now the list with threads using user-allocated stacks. */
1119 list_for_each (runp
, &__stack_user
)
1121 struct pthread
*t
= list_entry (runp
, struct pthread
, list
);
1125 setxid_mark_thread (cmdp
, t
);
1128 /* Iterate until we don't succeed in signalling anyone. That means
1129 we have gotten all running threads, and their children will be
1130 automatically correct once started. */
1135 list_for_each (runp
, &stack_used
)
1137 struct pthread
*t
= list_entry (runp
, struct pthread
, list
);
1141 signalled
+= setxid_signal_thread (cmdp
, t
);
1144 list_for_each (runp
, &__stack_user
)
1146 struct pthread
*t
= list_entry (runp
, struct pthread
, list
);
1150 signalled
+= setxid_signal_thread (cmdp
, t
);
1153 int cur
= cmdp
->cntr
;
1156 futex_wait_simple ((unsigned int *) &cmdp
->cntr
, cur
,
1161 while (signalled
!= 0);
1163 /* Clean up flags, so that no thread blocks during exit waiting
1164 for a signal which will never come. */
1165 list_for_each (runp
, &stack_used
)
1167 struct pthread
*t
= list_entry (runp
, struct pthread
, list
);
1171 setxid_unmark_thread (cmdp
, t
);
1174 list_for_each (runp
, &__stack_user
)
1176 struct pthread
*t
= list_entry (runp
, struct pthread
, list
);
1180 setxid_unmark_thread (cmdp
, t
);
1183 /* This must be last, otherwise the current thread might not have
1184 permissions to send SIGSETXID syscall to the other threads. */
1185 INTERNAL_SYSCALL_DECL (err
);
1186 result
= INTERNAL_SYSCALL_NCS (cmdp
->syscall_no
, err
, 3,
1187 cmdp
->id
[0], cmdp
->id
[1], cmdp
->id
[2]);
1189 if (__glibc_unlikely (INTERNAL_SYSCALL_ERROR_P (result
, err
)))
1191 error
= INTERNAL_SYSCALL_ERRNO (result
, err
);
1192 __set_errno (error
);
1195 __nptl_setxid_error (cmdp
, error
);
1197 lll_unlock (stack_cache_lock
, LLL_PRIVATE
);
1200 #endif /* SIGSETXID. */
1203 static inline void __attribute__((always_inline
))
1204 init_one_static_tls (struct pthread
*curp
, struct link_map
*map
)
1207 void *dest
= (char *) curp
- map
->l_tls_offset
;
1208 # elif TLS_DTV_AT_TP
1209 void *dest
= (char *) curp
+ map
->l_tls_offset
+ TLS_PRE_TCB_SIZE
;
1211 # error "Either TLS_TCB_AT_TP or TLS_DTV_AT_TP must be defined"
1214 /* Initialize the memory. */
1215 memset (__mempcpy (dest
, map
->l_tls_initimage
, map
->l_tls_initimage_size
),
1216 '\0', map
->l_tls_blocksize
- map
->l_tls_initimage_size
);
1221 __pthread_init_static_tls (struct link_map
*map
)
1223 lll_lock (stack_cache_lock
, LLL_PRIVATE
);
1225 /* Iterate over the list with system-allocated threads first. */
1227 list_for_each (runp
, &stack_used
)
1228 init_one_static_tls (list_entry (runp
, struct pthread
, list
), map
);
1230 /* Now the list with threads using user-allocated stacks. */
1231 list_for_each (runp
, &__stack_user
)
1232 init_one_static_tls (list_entry (runp
, struct pthread
, list
), map
);
1234 lll_unlock (stack_cache_lock
, LLL_PRIVATE
);
1240 __wait_lookup_done (void)
1242 lll_lock (stack_cache_lock
, LLL_PRIVATE
);
1244 struct pthread
*self
= THREAD_SELF
;
1246 /* Iterate over the list with system-allocated threads first. */
1248 list_for_each (runp
, &stack_used
)
1250 struct pthread
*t
= list_entry (runp
, struct pthread
, list
);
1251 if (t
== self
|| t
->header
.gscope_flag
== THREAD_GSCOPE_FLAG_UNUSED
)
1254 int *const gscope_flagp
= &t
->header
.gscope_flag
;
1256 /* We have to wait until this thread is done with the global
1257 scope. First tell the thread that we are waiting and
1258 possibly have to be woken. */
1259 if (atomic_compare_and_exchange_bool_acq (gscope_flagp
,
1260 THREAD_GSCOPE_FLAG_WAIT
,
1261 THREAD_GSCOPE_FLAG_USED
))
1265 futex_wait_simple ((unsigned int *) gscope_flagp
,
1266 THREAD_GSCOPE_FLAG_WAIT
, FUTEX_PRIVATE
);
1267 while (*gscope_flagp
== THREAD_GSCOPE_FLAG_WAIT
);
1270 /* Now the list with threads using user-allocated stacks. */
1271 list_for_each (runp
, &__stack_user
)
1273 struct pthread
*t
= list_entry (runp
, struct pthread
, list
);
1274 if (t
== self
|| t
->header
.gscope_flag
== THREAD_GSCOPE_FLAG_UNUSED
)
1277 int *const gscope_flagp
= &t
->header
.gscope_flag
;
1279 /* We have to wait until this thread is done with the global
1280 scope. First tell the thread that we are waiting and
1281 possibly have to be woken. */
1282 if (atomic_compare_and_exchange_bool_acq (gscope_flagp
,
1283 THREAD_GSCOPE_FLAG_WAIT
,
1284 THREAD_GSCOPE_FLAG_USED
))
1288 futex_wait_simple ((unsigned int *) gscope_flagp
,
1289 THREAD_GSCOPE_FLAG_WAIT
, FUTEX_PRIVATE
);
1290 while (*gscope_flagp
== THREAD_GSCOPE_FLAG_WAIT
);
1293 lll_unlock (stack_cache_lock
, LLL_PRIVATE
);