1 /* Copyright (C) 2002-2016 Free Software Foundation, Inc.
2 This file is part of the GNU C Library.
3 Contributed by Ulrich Drepper <drepper@redhat.com>, 2002.
5 The GNU C Library is free software; you can redistribute it and/or
6 modify it under the terms of the GNU Lesser General Public
7 License as published by the Free Software Foundation; either
8 version 2.1 of the License, or (at your option) any later version.
10 The GNU C Library is distributed in the hope that it will be useful,
11 but WITHOUT ANY WARRANTY; without even the implied warranty of
12 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 Lesser General Public License for more details.
15 You should have received a copy of the GNU Lesser General Public
16 License along with the GNU C Library; if not, see
17 <http://www.gnu.org/licenses/>. */
26 #include <sys/param.h>
27 #include <dl-sysdep.h>
31 #include <lowlevellock.h>
32 #include <futex-internal.h>
33 #include <kernel-features.h>
34 #include <stack-aliasing.h>
37 #ifndef NEED_SEPARATE_REGISTER_STACK
39 /* Most architectures have exactly one stack pointer. Some have more. */
40 # define STACK_VARIABLES void *stackaddr = NULL
42 /* How to pass the values to the 'create_thread' function. */
43 # define STACK_VARIABLES_ARGS stackaddr
45 /* How to declare function which gets there parameters. */
46 # define STACK_VARIABLES_PARMS void *stackaddr
48 /* How to declare allocate_stack. */
49 # define ALLOCATE_STACK_PARMS void **stack
51 /* This is how the function is called. We do it this way to allow
52 other variants of the function to have more parameters. */
53 # define ALLOCATE_STACK(attr, pd) allocate_stack (attr, pd, &stackaddr)
57 /* We need two stacks. The kernel will place them but we have to tell
58 the kernel about the size of the reserved address space. */
59 # define STACK_VARIABLES void *stackaddr = NULL; size_t stacksize = 0
61 /* How to pass the values to the 'create_thread' function. */
62 # define STACK_VARIABLES_ARGS stackaddr, stacksize
64 /* How to declare function which gets there parameters. */
65 # define STACK_VARIABLES_PARMS void *stackaddr, size_t stacksize
67 /* How to declare allocate_stack. */
68 # define ALLOCATE_STACK_PARMS void **stack, size_t *stacksize
70 /* This is how the function is called. We do it this way to allow
71 other variants of the function to have more parameters. */
72 # define ALLOCATE_STACK(attr, pd) \
73 allocate_stack (attr, pd, &stackaddr, &stacksize)
78 /* Default alignment of stack. */
80 # define STACK_ALIGN __alignof__ (long double)
83 /* Default value for minimal stack size after allocating thread
84 descriptor and guard. */
85 #ifndef MINIMAL_REST_STACK
86 # define MINIMAL_REST_STACK 4096
90 /* Newer kernels have the MAP_STACK flag to indicate a mapping is used for
91 a stack. Use it when possible. */
96 /* This yields the pointer that TLS support code calls the thread pointer. */
98 # define TLS_TPADJ(pd) (pd)
100 # define TLS_TPADJ(pd) ((struct pthread *)((char *) (pd) + TLS_PRE_TCB_SIZE))
103 /* Cache handling for not-yet free stacks. */
105 /* Maximum size in kB of cache. */
106 static size_t stack_cache_maxsize
= 40 * 1024 * 1024; /* 40MiBi by default. */
107 static size_t stack_cache_actsize
;
109 /* Mutex protecting this variable. */
110 static int stack_cache_lock
= LLL_LOCK_INITIALIZER
;
112 /* List of queued stack frames. */
113 static LIST_HEAD (stack_cache
);
115 /* List of the stacks in use. */
116 static LIST_HEAD (stack_used
);
118 /* We need to record what list operations we are going to do so that,
119 in case of an asynchronous interruption due to a fork() call, we
120 can correct for the work. */
121 static uintptr_t in_flight_stack
;
123 /* List of the threads with user provided stacks in use. No need to
124 initialize this, since it's done in __pthread_initialize_minimal. */
125 list_t __stack_user
__attribute__ ((nocommon
));
126 hidden_data_def (__stack_user
)
128 #if COLORING_INCREMENT != 0
129 /* Number of threads created. */
130 static unsigned int nptl_ncreated
;
134 /* Check whether the stack is still used or not. */
135 #define FREE_P(descr) ((descr)->tid <= 0)
139 stack_list_del (list_t
*elem
)
141 in_flight_stack
= (uintptr_t) elem
;
143 atomic_write_barrier ();
147 atomic_write_barrier ();
154 stack_list_add (list_t
*elem
, list_t
*list
)
156 in_flight_stack
= (uintptr_t) elem
| 1;
158 atomic_write_barrier ();
160 list_add (elem
, list
);
162 atomic_write_barrier ();
168 /* We create a double linked list of all cache entries. Double linked
169 because this allows removing entries from the end. */
172 /* Get a stack frame from the cache. We have to match by size since
173 some blocks might be too small or far too large. */
174 static struct pthread
*
175 get_cached_stack (size_t *sizep
, void **memp
)
177 size_t size
= *sizep
;
178 struct pthread
*result
= NULL
;
181 lll_lock (stack_cache_lock
, LLL_PRIVATE
);
183 /* Search the cache for a matching entry. We search for the
184 smallest stack which has at least the required size. Note that
185 in normal situations the size of all allocated stacks is the
186 same. As the very least there are only a few different sizes.
187 Therefore this loop will exit early most of the time with an
189 list_for_each (entry
, &stack_cache
)
191 struct pthread
*curr
;
193 curr
= list_entry (entry
, struct pthread
, list
);
194 if (FREE_P (curr
) && curr
->stackblock_size
>= size
)
196 if (curr
->stackblock_size
== size
)
203 || result
->stackblock_size
> curr
->stackblock_size
)
208 if (__builtin_expect (result
== NULL
, 0)
209 /* Make sure the size difference is not too excessive. In that
210 case we do not use the block. */
211 || __builtin_expect (result
->stackblock_size
> 4 * size
, 0))
213 /* Release the lock. */
214 lll_unlock (stack_cache_lock
, LLL_PRIVATE
);
219 /* Don't allow setxid until cloned. */
220 result
->setxid_futex
= -1;
222 /* Dequeue the entry. */
223 stack_list_del (&result
->list
);
225 /* And add to the list of stacks in use. */
226 stack_list_add (&result
->list
, &stack_used
);
228 /* And decrease the cache size. */
229 stack_cache_actsize
-= result
->stackblock_size
;
231 /* Release the lock early. */
232 lll_unlock (stack_cache_lock
, LLL_PRIVATE
);
234 /* Report size and location of the stack to the caller. */
235 *sizep
= result
->stackblock_size
;
236 *memp
= result
->stackblock
;
238 /* Cancellation handling is back to the default. */
239 result
->cancelhandling
= 0;
240 result
->cleanup
= NULL
;
242 /* No pending event. */
243 result
->nextevent
= NULL
;
246 dtv_t
*dtv
= GET_DTV (TLS_TPADJ (result
));
247 for (size_t cnt
= 0; cnt
< dtv
[-1].counter
; ++cnt
)
248 free (dtv
[1 + cnt
].pointer
.to_free
);
249 memset (dtv
, '\0', (dtv
[-1].counter
+ 1) * sizeof (dtv_t
));
251 /* Re-initialize the TLS. */
252 _dl_allocate_tls_init (TLS_TPADJ (result
));
258 /* Free stacks until cache size is lower than LIMIT. */
260 __free_stacks (size_t limit
)
262 /* We reduce the size of the cache. Remove the last entries until
263 the size is below the limit. */
267 /* Search from the end of the list. */
268 list_for_each_prev_safe (entry
, prev
, &stack_cache
)
270 struct pthread
*curr
;
272 curr
= list_entry (entry
, struct pthread
, list
);
275 /* Unlink the block. */
276 stack_list_del (entry
);
278 /* Account for the freed memory. */
279 stack_cache_actsize
-= curr
->stackblock_size
;
281 /* Free the memory associated with the ELF TLS. */
282 _dl_deallocate_tls (TLS_TPADJ (curr
), false);
284 /* Remove this block. This should never fail. If it does
285 something is really wrong. */
286 if (munmap (curr
->stackblock
, curr
->stackblock_size
) != 0)
289 /* Maybe we have freed enough. */
290 if (stack_cache_actsize
<= limit
)
297 /* Add a stack frame which is not used anymore to the stack. Must be
298 called with the cache lock held. */
300 __attribute ((always_inline
))
301 queue_stack (struct pthread
*stack
)
303 /* We unconditionally add the stack to the list. The memory may
304 still be in use but it will not be reused until the kernel marks
305 the stack as not used anymore. */
306 stack_list_add (&stack
->list
, &stack_cache
);
308 stack_cache_actsize
+= stack
->stackblock_size
;
309 if (__glibc_unlikely (stack_cache_actsize
> stack_cache_maxsize
))
310 __free_stacks (stack_cache_maxsize
);
316 change_stack_perm (struct pthread
*pd
317 #ifdef NEED_SEPARATE_REGISTER_STACK
322 #ifdef NEED_SEPARATE_REGISTER_STACK
323 void *stack
= (pd
->stackblock
324 + (((((pd
->stackblock_size
- pd
->guardsize
) / 2)
325 & pagemask
) + pd
->guardsize
) & pagemask
));
326 size_t len
= pd
->stackblock
+ pd
->stackblock_size
- stack
;
327 #elif _STACK_GROWS_DOWN
328 void *stack
= pd
->stackblock
+ pd
->guardsize
;
329 size_t len
= pd
->stackblock_size
- pd
->guardsize
;
330 #elif _STACK_GROWS_UP
331 void *stack
= pd
->stackblock
;
332 size_t len
= (uintptr_t) pd
- pd
->guardsize
- (uintptr_t) pd
->stackblock
;
334 # error "Define either _STACK_GROWS_DOWN or _STACK_GROWS_UP"
336 if (mprotect (stack
, len
, PROT_READ
| PROT_WRITE
| PROT_EXEC
) != 0)
343 /* Returns a usable stack for a new thread either by allocating a
344 new stack or reusing a cached stack of sufficient size.
345 ATTR must be non-NULL and point to a valid pthread_attr.
346 PDP must be non-NULL. */
348 allocate_stack (const struct pthread_attr
*attr
, struct pthread
**pdp
,
349 ALLOCATE_STACK_PARMS
)
353 size_t pagesize_m1
= __getpagesize () - 1;
355 assert (powerof2 (pagesize_m1
+ 1));
356 assert (TCB_ALIGNMENT
>= STACK_ALIGN
);
358 /* Get the stack size from the attribute if it is set. Otherwise we
359 use the default we determined at start time. */
360 if (attr
->stacksize
!= 0)
361 size
= attr
->stacksize
;
364 lll_lock (__default_pthread_attr_lock
, LLL_PRIVATE
);
365 size
= __default_pthread_attr
.stacksize
;
366 lll_unlock (__default_pthread_attr_lock
, LLL_PRIVATE
);
369 /* Get memory for the stack. */
370 if (__glibc_unlikely (attr
->flags
& ATTR_FLAG_STACKADDR
))
373 char *stackaddr
= (char *) attr
->stackaddr
;
375 /* Assume the same layout as the _STACK_GROWS_DOWN case, with struct
376 pthread at the top of the stack block. Later we adjust the guard
377 location and stack address to match the _STACK_GROWS_UP case. */
379 stackaddr
+= attr
->stacksize
;
381 /* If the user also specified the size of the stack make sure it
383 if (attr
->stacksize
!= 0
384 && attr
->stacksize
< (__static_tls_size
+ MINIMAL_REST_STACK
))
387 /* Adjust stack size for alignment of the TLS block. */
389 adj
= ((uintptr_t) stackaddr
- TLS_TCB_SIZE
)
390 & __static_tls_align_m1
;
391 assert (size
> adj
+ TLS_TCB_SIZE
);
393 adj
= ((uintptr_t) stackaddr
- __static_tls_size
)
394 & __static_tls_align_m1
;
398 /* The user provided some memory. Let's hope it matches the
399 size... We do not allocate guard pages if the user provided
400 the stack. It is the user's responsibility to do this if it
403 pd
= (struct pthread
*) ((uintptr_t) stackaddr
404 - TLS_TCB_SIZE
- adj
);
406 pd
= (struct pthread
*) (((uintptr_t) stackaddr
407 - __static_tls_size
- adj
)
411 /* The user provided stack memory needs to be cleared. */
412 memset (pd
, '\0', sizeof (struct pthread
));
414 /* The first TSD block is included in the TCB. */
415 pd
->specific
[0] = pd
->specific_1stblock
;
417 /* Remember the stack-related values. */
418 pd
->stackblock
= (char *) stackaddr
- size
;
419 pd
->stackblock_size
= size
;
421 /* This is a user-provided stack. It will not be queued in the
422 stack cache nor will the memory (except the TLS memory) be freed. */
423 pd
->user_stack
= true;
425 /* This is at least the second thread. */
426 pd
->header
.multiple_threads
= 1;
427 #ifndef TLS_MULTIPLE_THREADS_IN_TCB
428 __pthread_multiple_threads
= *__libc_multiple_threads_ptr
= 1;
431 #ifndef __ASSUME_PRIVATE_FUTEX
432 /* The thread must know when private futexes are supported. */
433 pd
->header
.private_futex
= THREAD_GETMEM (THREAD_SELF
,
434 header
.private_futex
);
437 #ifdef NEED_DL_SYSINFO
438 SETUP_THREAD_SYSINFO (pd
);
441 /* The process ID is also the same as that of the caller. */
442 pd
->pid
= THREAD_GETMEM (THREAD_SELF
, pid
);
444 /* Don't allow setxid until cloned. */
445 pd
->setxid_futex
= -1;
447 /* Allocate the DTV for this thread. */
448 if (_dl_allocate_tls (TLS_TPADJ (pd
)) == NULL
)
450 /* Something went wrong. */
451 assert (errno
== ENOMEM
);
456 /* Prepare to modify global data. */
457 lll_lock (stack_cache_lock
, LLL_PRIVATE
);
459 /* And add to the list of stacks in use. */
460 list_add (&pd
->list
, &__stack_user
);
462 lll_unlock (stack_cache_lock
, LLL_PRIVATE
);
466 /* Allocate some anonymous memory. If possible use the cache. */
470 const int prot
= (PROT_READ
| PROT_WRITE
471 | ((GL(dl_stack_flags
) & PF_X
) ? PROT_EXEC
: 0));
473 #if COLORING_INCREMENT != 0
474 /* Add one more page for stack coloring. Don't do it for stacks
475 with 16 times pagesize or larger. This might just cause
476 unnecessary misalignment. */
477 if (size
<= 16 * pagesize_m1
)
478 size
+= pagesize_m1
+ 1;
481 /* Adjust the stack size for alignment. */
482 size
&= ~__static_tls_align_m1
;
485 /* Make sure the size of the stack is enough for the guard and
486 eventually the thread descriptor. */
487 guardsize
= (attr
->guardsize
+ pagesize_m1
) & ~pagesize_m1
;
488 if (__builtin_expect (size
< ((guardsize
+ __static_tls_size
489 + MINIMAL_REST_STACK
+ pagesize_m1
)
492 /* The stack is too small (or the guard too large). */
495 /* Try to get a stack from the cache. */
497 pd
= get_cached_stack (&size
, &mem
);
500 /* To avoid aliasing effects on a larger scale than pages we
501 adjust the allocated stack size if necessary. This way
502 allocations directly following each other will not have
503 aliasing problems. */
504 #if MULTI_PAGE_ALIASING != 0
505 if ((size
% MULTI_PAGE_ALIASING
) == 0)
506 size
+= pagesize_m1
+ 1;
509 mem
= mmap (NULL
, size
, prot
,
510 MAP_PRIVATE
| MAP_ANONYMOUS
| MAP_STACK
, -1, 0);
512 if (__glibc_unlikely (mem
== MAP_FAILED
))
515 /* SIZE is guaranteed to be greater than zero.
516 So we can never get a null pointer back from mmap. */
517 assert (mem
!= NULL
);
519 #if COLORING_INCREMENT != 0
520 /* Atomically increment NCREATED. */
521 unsigned int ncreated
= atomic_increment_val (&nptl_ncreated
);
523 /* We chose the offset for coloring by incrementing it for
524 every new thread by a fixed amount. The offset used
525 module the page size. Even if coloring would be better
526 relative to higher alignment values it makes no sense to
527 do it since the mmap() interface does not allow us to
528 specify any alignment for the returned memory block. */
529 size_t coloring
= (ncreated
* COLORING_INCREMENT
) & pagesize_m1
;
531 /* Make sure the coloring offsets does not disturb the alignment
532 of the TCB and static TLS block. */
533 if (__glibc_unlikely ((coloring
& __static_tls_align_m1
) != 0))
534 coloring
= (((coloring
+ __static_tls_align_m1
)
535 & ~(__static_tls_align_m1
))
538 /* Unless specified we do not make any adjustments. */
542 /* Place the thread descriptor at the end of the stack. */
544 pd
= (struct pthread
*) ((char *) mem
+ size
- coloring
) - 1;
546 pd
= (struct pthread
*) ((((uintptr_t) mem
+ size
- coloring
548 & ~__static_tls_align_m1
)
552 /* Remember the stack-related values. */
553 pd
->stackblock
= mem
;
554 pd
->stackblock_size
= size
;
556 /* We allocated the first block thread-specific data array.
557 This address will not change for the lifetime of this
559 pd
->specific
[0] = pd
->specific_1stblock
;
561 /* This is at least the second thread. */
562 pd
->header
.multiple_threads
= 1;
563 #ifndef TLS_MULTIPLE_THREADS_IN_TCB
564 __pthread_multiple_threads
= *__libc_multiple_threads_ptr
= 1;
567 #ifndef __ASSUME_PRIVATE_FUTEX
568 /* The thread must know when private futexes are supported. */
569 pd
->header
.private_futex
= THREAD_GETMEM (THREAD_SELF
,
570 header
.private_futex
);
573 #ifdef NEED_DL_SYSINFO
574 SETUP_THREAD_SYSINFO (pd
);
577 /* Don't allow setxid until cloned. */
578 pd
->setxid_futex
= -1;
580 /* The process ID is also the same as that of the caller. */
581 pd
->pid
= THREAD_GETMEM (THREAD_SELF
, pid
);
583 /* Allocate the DTV for this thread. */
584 if (_dl_allocate_tls (TLS_TPADJ (pd
)) == NULL
)
586 /* Something went wrong. */
587 assert (errno
== ENOMEM
);
589 /* Free the stack memory we just allocated. */
590 (void) munmap (mem
, size
);
596 /* Prepare to modify global data. */
597 lll_lock (stack_cache_lock
, LLL_PRIVATE
);
599 /* And add to the list of stacks in use. */
600 stack_list_add (&pd
->list
, &stack_used
);
602 lll_unlock (stack_cache_lock
, LLL_PRIVATE
);
605 /* There might have been a race. Another thread might have
606 caused the stacks to get exec permission while this new
607 stack was prepared. Detect if this was possible and
608 change the permission if necessary. */
609 if (__builtin_expect ((GL(dl_stack_flags
) & PF_X
) != 0
610 && (prot
& PROT_EXEC
) == 0, 0))
612 int err
= change_stack_perm (pd
613 #ifdef NEED_SEPARATE_REGISTER_STACK
619 /* Free the stack memory we just allocated. */
620 (void) munmap (mem
, size
);
627 /* Note that all of the stack and the thread descriptor is
628 zeroed. This means we do not have to initialize fields
629 with initial value zero. This is specifically true for
630 the 'tid' field which is always set back to zero once the
631 stack is not used anymore and for the 'guardsize' field
632 which will be read next. */
635 /* Create or resize the guard area if necessary. */
636 if (__glibc_unlikely (guardsize
> pd
->guardsize
))
638 #ifdef NEED_SEPARATE_REGISTER_STACK
639 char *guard
= mem
+ (((size
- guardsize
) / 2) & ~pagesize_m1
);
640 #elif _STACK_GROWS_DOWN
642 #elif _STACK_GROWS_UP
643 char *guard
= (char *) (((uintptr_t) pd
- guardsize
) & ~pagesize_m1
);
645 if (mprotect (guard
, guardsize
, PROT_NONE
) != 0)
648 lll_lock (stack_cache_lock
, LLL_PRIVATE
);
650 /* Remove the thread from the list. */
651 stack_list_del (&pd
->list
);
653 lll_unlock (stack_cache_lock
, LLL_PRIVATE
);
655 /* Get rid of the TLS block we allocated. */
656 _dl_deallocate_tls (TLS_TPADJ (pd
), false);
658 /* Free the stack memory regardless of whether the size
659 of the cache is over the limit or not. If this piece
660 of memory caused problems we better do not use it
661 anymore. Uh, and we ignore possible errors. There
662 is nothing we could do. */
663 (void) munmap (mem
, size
);
668 pd
->guardsize
= guardsize
;
670 else if (__builtin_expect (pd
->guardsize
- guardsize
> size
- reqsize
,
673 /* The old guard area is too large. */
675 #ifdef NEED_SEPARATE_REGISTER_STACK
676 char *guard
= mem
+ (((size
- guardsize
) / 2) & ~pagesize_m1
);
677 char *oldguard
= mem
+ (((size
- pd
->guardsize
) / 2) & ~pagesize_m1
);
680 && mprotect (oldguard
, guard
- oldguard
, prot
) != 0)
683 if (mprotect (guard
+ guardsize
,
684 oldguard
+ pd
->guardsize
- guard
- guardsize
,
687 #elif _STACK_GROWS_DOWN
688 if (mprotect ((char *) mem
+ guardsize
, pd
->guardsize
- guardsize
,
691 #elif _STACK_GROWS_UP
692 if (mprotect ((char *) pd
- pd
->guardsize
,
693 pd
->guardsize
- guardsize
, prot
) != 0)
697 pd
->guardsize
= guardsize
;
699 /* The pthread_getattr_np() calls need to get passed the size
700 requested in the attribute, regardless of how large the
701 actually used guardsize is. */
702 pd
->reported_guardsize
= guardsize
;
705 /* Initialize the lock. We have to do this unconditionally since the
706 stillborn thread could be canceled while the lock is taken. */
707 pd
->lock
= LLL_LOCK_INITIALIZER
;
709 /* The robust mutex lists also need to be initialized
710 unconditionally because the cleanup for the previous stack owner
711 might have happened in the kernel. */
712 pd
->robust_head
.futex_offset
= (offsetof (pthread_mutex_t
, __data
.__lock
)
713 - offsetof (pthread_mutex_t
,
714 __data
.__list
.__next
));
715 pd
->robust_head
.list_op_pending
= NULL
;
716 #ifdef __PTHREAD_MUTEX_HAVE_PREV
717 pd
->robust_prev
= &pd
->robust_head
;
719 pd
->robust_head
.list
= &pd
->robust_head
;
721 /* We place the thread descriptor at the end of the stack. */
724 #if _STACK_GROWS_DOWN
728 /* The stack begins before the TCB and the static TLS block. */
729 stacktop
= ((char *) (pd
+ 1) - __static_tls_size
);
731 stacktop
= (char *) (pd
- 1);
734 # ifdef NEED_SEPARATE_REGISTER_STACK
735 *stack
= pd
->stackblock
;
736 *stacksize
= stacktop
- *stack
;
741 *stack
= pd
->stackblock
;
750 __deallocate_stack (struct pthread
*pd
)
752 lll_lock (stack_cache_lock
, LLL_PRIVATE
);
754 /* Remove the thread from the list of threads with user defined
756 stack_list_del (&pd
->list
);
758 /* Not much to do. Just free the mmap()ed memory. Note that we do
759 not reset the 'used' flag in the 'tid' field. This is done by
760 the kernel. If no thread has been created yet this field is
762 if (__glibc_likely (! pd
->user_stack
))
763 (void) queue_stack (pd
);
765 /* Free the memory associated with the ELF TLS. */
766 _dl_deallocate_tls (TLS_TPADJ (pd
), false);
768 lll_unlock (stack_cache_lock
, LLL_PRIVATE
);
774 __make_stacks_executable (void **stack_endp
)
776 /* First the main thread's stack. */
777 int err
= _dl_make_stack_executable (stack_endp
);
781 #ifdef NEED_SEPARATE_REGISTER_STACK
782 const size_t pagemask
= ~(__getpagesize () - 1);
785 lll_lock (stack_cache_lock
, LLL_PRIVATE
);
788 list_for_each (runp
, &stack_used
)
790 err
= change_stack_perm (list_entry (runp
, struct pthread
, list
)
791 #ifdef NEED_SEPARATE_REGISTER_STACK
799 /* Also change the permission for the currently unused stacks. This
800 might be wasted time but better spend it here than adding a check
803 list_for_each (runp
, &stack_cache
)
805 err
= change_stack_perm (list_entry (runp
, struct pthread
, list
)
806 #ifdef NEED_SEPARATE_REGISTER_STACK
814 lll_unlock (stack_cache_lock
, LLL_PRIVATE
);
820 /* In case of a fork() call the memory allocation in the child will be
821 the same but only one thread is running. All stacks except that of
822 the one running thread are not used anymore. We have to recycle
825 __reclaim_stacks (void)
827 struct pthread
*self
= (struct pthread
*) THREAD_SELF
;
829 /* No locking necessary. The caller is the only stack in use. But
830 we have to be aware that we might have interrupted a list
833 if (in_flight_stack
!= 0)
835 bool add_p
= in_flight_stack
& 1;
836 list_t
*elem
= (list_t
*) (in_flight_stack
& ~(uintptr_t) 1);
840 /* We always add at the beginning of the list. So in this case we
841 only need to check the beginning of these lists to see if the
842 pointers at the head of the list are inconsistent. */
845 if (stack_used
.next
->prev
!= &stack_used
)
847 else if (stack_cache
.next
->prev
!= &stack_cache
)
852 assert (l
->next
->prev
== elem
);
853 elem
->next
= l
->next
;
860 /* We can simply always replay the delete operation. */
861 elem
->next
->prev
= elem
->prev
;
862 elem
->prev
->next
= elem
->next
;
866 /* Mark all stacks except the still running one as free. */
868 list_for_each (runp
, &stack_used
)
870 struct pthread
*curp
= list_entry (runp
, struct pthread
, list
);
873 /* This marks the stack as free. */
876 /* The PID field must be initialized for the new process. */
877 curp
->pid
= self
->pid
;
879 /* Account for the size of the stack. */
880 stack_cache_actsize
+= curp
->stackblock_size
;
882 if (curp
->specific_used
)
884 /* Clear the thread-specific data. */
885 memset (curp
->specific_1stblock
, '\0',
886 sizeof (curp
->specific_1stblock
));
888 curp
->specific_used
= false;
890 for (size_t cnt
= 1; cnt
< PTHREAD_KEY_1STLEVEL_SIZE
; ++cnt
)
891 if (curp
->specific
[cnt
] != NULL
)
893 memset (curp
->specific
[cnt
], '\0',
894 sizeof (curp
->specific_1stblock
));
896 /* We have allocated the block which we do not
897 free here so re-set the bit. */
898 curp
->specific_used
= true;
904 /* Reset the PIDs in any cached stacks. */
905 list_for_each (runp
, &stack_cache
)
907 struct pthread
*curp
= list_entry (runp
, struct pthread
, list
);
908 curp
->pid
= self
->pid
;
911 /* Add the stack of all running threads to the cache. */
912 list_splice (&stack_used
, &stack_cache
);
914 /* Remove the entry for the current thread to from the cache list
915 and add it to the list of running threads. Which of the two
916 lists is decided by the user_stack flag. */
917 stack_list_del (&self
->list
);
919 /* Re-initialize the lists for all the threads. */
920 INIT_LIST_HEAD (&stack_used
);
921 INIT_LIST_HEAD (&__stack_user
);
923 if (__glibc_unlikely (THREAD_GETMEM (self
, user_stack
)))
924 list_add (&self
->list
, &__stack_user
);
926 list_add (&self
->list
, &stack_used
);
928 /* There is one thread running. */
933 /* Initialize locks. */
934 stack_cache_lock
= LLL_LOCK_INITIALIZER
;
935 __default_pthread_attr_lock
= LLL_LOCK_INITIALIZER
;
940 # undef __find_thread_by_id
941 /* Find a thread given the thread ID. */
944 __find_thread_by_id (pid_t tid
)
946 struct pthread
*result
= NULL
;
948 lll_lock (stack_cache_lock
, LLL_PRIVATE
);
950 /* Iterate over the list with system-allocated threads first. */
952 list_for_each (runp
, &stack_used
)
954 struct pthread
*curp
;
956 curp
= list_entry (runp
, struct pthread
, list
);
958 if (curp
->tid
== tid
)
965 /* Now the list with threads using user-allocated stacks. */
966 list_for_each (runp
, &__stack_user
)
968 struct pthread
*curp
;
970 curp
= list_entry (runp
, struct pthread
, list
);
972 if (curp
->tid
== tid
)
980 lll_unlock (stack_cache_lock
, LLL_PRIVATE
);
990 setxid_mark_thread (struct xid_command
*cmdp
, struct pthread
*t
)
994 /* Wait until this thread is cloned. */
995 if (t
->setxid_futex
== -1
996 && ! atomic_compare_and_exchange_bool_acq (&t
->setxid_futex
, -2, -1))
998 futex_wait_simple (&t
->setxid_futex
, -2, FUTEX_PRIVATE
);
999 while (t
->setxid_futex
== -2);
1001 /* Don't let the thread exit before the setxid handler runs. */
1002 t
->setxid_futex
= 0;
1006 ch
= t
->cancelhandling
;
1008 /* If the thread is exiting right now, ignore it. */
1009 if ((ch
& EXITING_BITMASK
) != 0)
1011 /* Release the futex if there is no other setxid in
1013 if ((ch
& SETXID_BITMASK
) == 0)
1015 t
->setxid_futex
= 1;
1016 futex_wake (&t
->setxid_futex
, 1, FUTEX_PRIVATE
);
1021 while (atomic_compare_and_exchange_bool_acq (&t
->cancelhandling
,
1022 ch
| SETXID_BITMASK
, ch
));
1028 setxid_unmark_thread (struct xid_command
*cmdp
, struct pthread
*t
)
1034 ch
= t
->cancelhandling
;
1035 if ((ch
& SETXID_BITMASK
) == 0)
1038 while (atomic_compare_and_exchange_bool_acq (&t
->cancelhandling
,
1039 ch
& ~SETXID_BITMASK
, ch
));
1041 /* Release the futex just in case. */
1042 t
->setxid_futex
= 1;
1043 futex_wake (&t
->setxid_futex
, 1, FUTEX_PRIVATE
);
1049 setxid_signal_thread (struct xid_command
*cmdp
, struct pthread
*t
)
1051 if ((t
->cancelhandling
& SETXID_BITMASK
) == 0)
1055 INTERNAL_SYSCALL_DECL (err
);
1056 val
= INTERNAL_SYSCALL (tgkill
, err
, 3, THREAD_GETMEM (THREAD_SELF
, pid
),
1059 /* If this failed, it must have had not started yet or else exited. */
1060 if (!INTERNAL_SYSCALL_ERROR_P (val
, err
))
1062 atomic_increment (&cmdp
->cntr
);
1069 /* Check for consistency across set*id system call results. The abort
1070 should not happen as long as all privileges changes happen through
1071 the glibc wrappers. ERROR must be 0 (no error) or an errno
1075 __nptl_setxid_error (struct xid_command
*cmdp
, int error
)
1079 int olderror
= cmdp
->error
;
1080 if (olderror
== error
)
1083 /* Mismatch between current and previous results. */
1086 while (atomic_compare_and_exchange_bool_acq (&cmdp
->error
, error
, -1));
1091 __nptl_setxid (struct xid_command
*cmdp
)
1095 lll_lock (stack_cache_lock
, LLL_PRIVATE
);
1101 struct pthread
*self
= THREAD_SELF
;
1103 /* Iterate over the list with system-allocated threads first. */
1105 list_for_each (runp
, &stack_used
)
1107 struct pthread
*t
= list_entry (runp
, struct pthread
, list
);
1111 setxid_mark_thread (cmdp
, t
);
1114 /* Now the list with threads using user-allocated stacks. */
1115 list_for_each (runp
, &__stack_user
)
1117 struct pthread
*t
= list_entry (runp
, struct pthread
, list
);
1121 setxid_mark_thread (cmdp
, t
);
1124 /* Iterate until we don't succeed in signalling anyone. That means
1125 we have gotten all running threads, and their children will be
1126 automatically correct once started. */
1131 list_for_each (runp
, &stack_used
)
1133 struct pthread
*t
= list_entry (runp
, struct pthread
, list
);
1137 signalled
+= setxid_signal_thread (cmdp
, t
);
1140 list_for_each (runp
, &__stack_user
)
1142 struct pthread
*t
= list_entry (runp
, struct pthread
, list
);
1146 signalled
+= setxid_signal_thread (cmdp
, t
);
1149 int cur
= cmdp
->cntr
;
1152 futex_wait_simple ((unsigned int *) &cmdp
->cntr
, cur
,
1157 while (signalled
!= 0);
1159 /* Clean up flags, so that no thread blocks during exit waiting
1160 for a signal which will never come. */
1161 list_for_each (runp
, &stack_used
)
1163 struct pthread
*t
= list_entry (runp
, struct pthread
, list
);
1167 setxid_unmark_thread (cmdp
, t
);
1170 list_for_each (runp
, &__stack_user
)
1172 struct pthread
*t
= list_entry (runp
, struct pthread
, list
);
1176 setxid_unmark_thread (cmdp
, t
);
1179 /* This must be last, otherwise the current thread might not have
1180 permissions to send SIGSETXID syscall to the other threads. */
1181 INTERNAL_SYSCALL_DECL (err
);
1182 result
= INTERNAL_SYSCALL_NCS (cmdp
->syscall_no
, err
, 3,
1183 cmdp
->id
[0], cmdp
->id
[1], cmdp
->id
[2]);
1185 if (__glibc_unlikely (INTERNAL_SYSCALL_ERROR_P (result
, err
)))
1187 error
= INTERNAL_SYSCALL_ERRNO (result
, err
);
1188 __set_errno (error
);
1191 __nptl_setxid_error (cmdp
, error
);
1193 lll_unlock (stack_cache_lock
, LLL_PRIVATE
);
1196 #endif /* SIGSETXID. */
1199 static inline void __attribute__((always_inline
))
1200 init_one_static_tls (struct pthread
*curp
, struct link_map
*map
)
1203 void *dest
= (char *) curp
- map
->l_tls_offset
;
1204 # elif TLS_DTV_AT_TP
1205 void *dest
= (char *) curp
+ map
->l_tls_offset
+ TLS_PRE_TCB_SIZE
;
1207 # error "Either TLS_TCB_AT_TP or TLS_DTV_AT_TP must be defined"
1210 /* Fill in the DTV slot so that a later LD/GD access will find it. */
1211 dtv_t
*dtv
= GET_DTV (TLS_TPADJ (curp
));
1212 dtv
[map
->l_tls_modid
].pointer
.to_free
= NULL
;
1213 dtv
[map
->l_tls_modid
].pointer
.val
= dest
;
1215 /* Initialize the memory. */
1216 memset (__mempcpy (dest
, map
->l_tls_initimage
, map
->l_tls_initimage_size
),
1217 '\0', map
->l_tls_blocksize
- map
->l_tls_initimage_size
);
1222 __pthread_init_static_tls (struct link_map
*map
)
1224 lll_lock (stack_cache_lock
, LLL_PRIVATE
);
1226 /* Iterate over the list with system-allocated threads first. */
1228 list_for_each (runp
, &stack_used
)
1229 init_one_static_tls (list_entry (runp
, struct pthread
, list
), map
);
1231 /* Now the list with threads using user-allocated stacks. */
1232 list_for_each (runp
, &__stack_user
)
1233 init_one_static_tls (list_entry (runp
, struct pthread
, list
), map
);
1235 lll_unlock (stack_cache_lock
, LLL_PRIVATE
);
1241 __wait_lookup_done (void)
1243 lll_lock (stack_cache_lock
, LLL_PRIVATE
);
1245 struct pthread
*self
= THREAD_SELF
;
1247 /* Iterate over the list with system-allocated threads first. */
1249 list_for_each (runp
, &stack_used
)
1251 struct pthread
*t
= list_entry (runp
, struct pthread
, list
);
1252 if (t
== self
|| t
->header
.gscope_flag
== THREAD_GSCOPE_FLAG_UNUSED
)
1255 int *const gscope_flagp
= &t
->header
.gscope_flag
;
1257 /* We have to wait until this thread is done with the global
1258 scope. First tell the thread that we are waiting and
1259 possibly have to be woken. */
1260 if (atomic_compare_and_exchange_bool_acq (gscope_flagp
,
1261 THREAD_GSCOPE_FLAG_WAIT
,
1262 THREAD_GSCOPE_FLAG_USED
))
1266 futex_wait_simple ((unsigned int *) gscope_flagp
,
1267 THREAD_GSCOPE_FLAG_WAIT
, FUTEX_PRIVATE
);
1268 while (*gscope_flagp
== THREAD_GSCOPE_FLAG_WAIT
);
1271 /* Now the list with threads using user-allocated stacks. */
1272 list_for_each (runp
, &__stack_user
)
1274 struct pthread
*t
= list_entry (runp
, struct pthread
, list
);
1275 if (t
== self
|| t
->header
.gscope_flag
== THREAD_GSCOPE_FLAG_UNUSED
)
1278 int *const gscope_flagp
= &t
->header
.gscope_flag
;
1280 /* We have to wait until this thread is done with the global
1281 scope. First tell the thread that we are waiting and
1282 possibly have to be woken. */
1283 if (atomic_compare_and_exchange_bool_acq (gscope_flagp
,
1284 THREAD_GSCOPE_FLAG_WAIT
,
1285 THREAD_GSCOPE_FLAG_USED
))
1289 futex_wait_simple ((unsigned int *) gscope_flagp
,
1290 THREAD_GSCOPE_FLAG_WAIT
, FUTEX_PRIVATE
);
1291 while (*gscope_flagp
== THREAD_GSCOPE_FLAG_WAIT
);
1294 lll_unlock (stack_cache_lock
, LLL_PRIVATE
);