Define __intmax_t, __uintmax_t in bits/types.h.
[glibc.git] / nptl / allocatestack.c
blob98a0ea2862e7734d02642f4b3a53789930d8976e
1 /* Copyright (C) 2002-2016 Free Software Foundation, Inc.
2 This file is part of the GNU C Library.
3 Contributed by Ulrich Drepper <drepper@redhat.com>, 2002.
5 The GNU C Library is free software; you can redistribute it and/or
6 modify it under the terms of the GNU Lesser General Public
7 License as published by the Free Software Foundation; either
8 version 2.1 of the License, or (at your option) any later version.
10 The GNU C Library is distributed in the hope that it will be useful,
11 but WITHOUT ANY WARRANTY; without even the implied warranty of
12 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 Lesser General Public License for more details.
15 You should have received a copy of the GNU Lesser General Public
16 License along with the GNU C Library; if not, see
17 <http://www.gnu.org/licenses/>. */
19 #include <assert.h>
20 #include <errno.h>
21 #include <signal.h>
22 #include <stdint.h>
23 #include <string.h>
24 #include <unistd.h>
25 #include <sys/mman.h>
26 #include <sys/param.h>
27 #include <dl-sysdep.h>
28 #include <dl-tls.h>
29 #include <tls.h>
30 #include <list.h>
31 #include <lowlevellock.h>
32 #include <futex-internal.h>
33 #include <kernel-features.h>
34 #include <stack-aliasing.h>
37 #ifndef NEED_SEPARATE_REGISTER_STACK
39 /* Most architectures have exactly one stack pointer. Some have more. */
40 # define STACK_VARIABLES void *stackaddr = NULL
42 /* How to pass the values to the 'create_thread' function. */
43 # define STACK_VARIABLES_ARGS stackaddr
45 /* How to declare function which gets there parameters. */
46 # define STACK_VARIABLES_PARMS void *stackaddr
48 /* How to declare allocate_stack. */
49 # define ALLOCATE_STACK_PARMS void **stack
51 /* This is how the function is called. We do it this way to allow
52 other variants of the function to have more parameters. */
53 # define ALLOCATE_STACK(attr, pd) allocate_stack (attr, pd, &stackaddr)
55 #else
57 /* We need two stacks. The kernel will place them but we have to tell
58 the kernel about the size of the reserved address space. */
59 # define STACK_VARIABLES void *stackaddr = NULL; size_t stacksize = 0
61 /* How to pass the values to the 'create_thread' function. */
62 # define STACK_VARIABLES_ARGS stackaddr, stacksize
64 /* How to declare function which gets there parameters. */
65 # define STACK_VARIABLES_PARMS void *stackaddr, size_t stacksize
67 /* How to declare allocate_stack. */
68 # define ALLOCATE_STACK_PARMS void **stack, size_t *stacksize
70 /* This is how the function is called. We do it this way to allow
71 other variants of the function to have more parameters. */
72 # define ALLOCATE_STACK(attr, pd) \
73 allocate_stack (attr, pd, &stackaddr, &stacksize)
75 #endif
78 /* Default alignment of stack. */
79 #ifndef STACK_ALIGN
80 # define STACK_ALIGN __alignof__ (long double)
81 #endif
83 /* Default value for minimal stack size after allocating thread
84 descriptor and guard. */
85 #ifndef MINIMAL_REST_STACK
86 # define MINIMAL_REST_STACK 4096
87 #endif
90 /* Newer kernels have the MAP_STACK flag to indicate a mapping is used for
91 a stack. Use it when possible. */
92 #ifndef MAP_STACK
93 # define MAP_STACK 0
94 #endif
96 /* This yields the pointer that TLS support code calls the thread pointer. */
97 #if TLS_TCB_AT_TP
98 # define TLS_TPADJ(pd) (pd)
99 #elif TLS_DTV_AT_TP
100 # define TLS_TPADJ(pd) ((struct pthread *)((char *) (pd) + TLS_PRE_TCB_SIZE))
101 #endif
103 /* Cache handling for not-yet free stacks. */
105 /* Maximum size in kB of cache. */
106 static size_t stack_cache_maxsize = 40 * 1024 * 1024; /* 40MiBi by default. */
107 static size_t stack_cache_actsize;
109 /* Mutex protecting this variable. */
110 static int stack_cache_lock = LLL_LOCK_INITIALIZER;
112 /* List of queued stack frames. */
113 static LIST_HEAD (stack_cache);
115 /* List of the stacks in use. */
116 static LIST_HEAD (stack_used);
118 /* We need to record what list operations we are going to do so that,
119 in case of an asynchronous interruption due to a fork() call, we
120 can correct for the work. */
121 static uintptr_t in_flight_stack;
123 /* List of the threads with user provided stacks in use. No need to
124 initialize this, since it's done in __pthread_initialize_minimal. */
125 list_t __stack_user __attribute__ ((nocommon));
126 hidden_data_def (__stack_user)
128 #if COLORING_INCREMENT != 0
129 /* Number of threads created. */
130 static unsigned int nptl_ncreated;
131 #endif
134 /* Check whether the stack is still used or not. */
135 #define FREE_P(descr) ((descr)->tid <= 0)
138 static void
139 stack_list_del (list_t *elem)
141 in_flight_stack = (uintptr_t) elem;
143 atomic_write_barrier ();
145 list_del (elem);
147 atomic_write_barrier ();
149 in_flight_stack = 0;
153 static void
154 stack_list_add (list_t *elem, list_t *list)
156 in_flight_stack = (uintptr_t) elem | 1;
158 atomic_write_barrier ();
160 list_add (elem, list);
162 atomic_write_barrier ();
164 in_flight_stack = 0;
168 /* We create a double linked list of all cache entries. Double linked
169 because this allows removing entries from the end. */
172 /* Get a stack frame from the cache. We have to match by size since
173 some blocks might be too small or far too large. */
174 static struct pthread *
175 get_cached_stack (size_t *sizep, void **memp)
177 size_t size = *sizep;
178 struct pthread *result = NULL;
179 list_t *entry;
181 lll_lock (stack_cache_lock, LLL_PRIVATE);
183 /* Search the cache for a matching entry. We search for the
184 smallest stack which has at least the required size. Note that
185 in normal situations the size of all allocated stacks is the
186 same. As the very least there are only a few different sizes.
187 Therefore this loop will exit early most of the time with an
188 exact match. */
189 list_for_each (entry, &stack_cache)
191 struct pthread *curr;
193 curr = list_entry (entry, struct pthread, list);
194 if (FREE_P (curr) && curr->stackblock_size >= size)
196 if (curr->stackblock_size == size)
198 result = curr;
199 break;
202 if (result == NULL
203 || result->stackblock_size > curr->stackblock_size)
204 result = curr;
208 if (__builtin_expect (result == NULL, 0)
209 /* Make sure the size difference is not too excessive. In that
210 case we do not use the block. */
211 || __builtin_expect (result->stackblock_size > 4 * size, 0))
213 /* Release the lock. */
214 lll_unlock (stack_cache_lock, LLL_PRIVATE);
216 return NULL;
219 /* Don't allow setxid until cloned. */
220 result->setxid_futex = -1;
222 /* Dequeue the entry. */
223 stack_list_del (&result->list);
225 /* And add to the list of stacks in use. */
226 stack_list_add (&result->list, &stack_used);
228 /* And decrease the cache size. */
229 stack_cache_actsize -= result->stackblock_size;
231 /* Release the lock early. */
232 lll_unlock (stack_cache_lock, LLL_PRIVATE);
234 /* Report size and location of the stack to the caller. */
235 *sizep = result->stackblock_size;
236 *memp = result->stackblock;
238 /* Cancellation handling is back to the default. */
239 result->cancelhandling = 0;
240 result->cleanup = NULL;
242 /* No pending event. */
243 result->nextevent = NULL;
245 /* Clear the DTV. */
246 dtv_t *dtv = GET_DTV (TLS_TPADJ (result));
247 for (size_t cnt = 0; cnt < dtv[-1].counter; ++cnt)
248 free (dtv[1 + cnt].pointer.to_free);
249 memset (dtv, '\0', (dtv[-1].counter + 1) * sizeof (dtv_t));
251 /* Re-initialize the TLS. */
252 _dl_allocate_tls_init (TLS_TPADJ (result));
254 return result;
258 /* Free stacks until cache size is lower than LIMIT. */
259 void
260 __free_stacks (size_t limit)
262 /* We reduce the size of the cache. Remove the last entries until
263 the size is below the limit. */
264 list_t *entry;
265 list_t *prev;
267 /* Search from the end of the list. */
268 list_for_each_prev_safe (entry, prev, &stack_cache)
270 struct pthread *curr;
272 curr = list_entry (entry, struct pthread, list);
273 if (FREE_P (curr))
275 /* Unlink the block. */
276 stack_list_del (entry);
278 /* Account for the freed memory. */
279 stack_cache_actsize -= curr->stackblock_size;
281 /* Free the memory associated with the ELF TLS. */
282 _dl_deallocate_tls (TLS_TPADJ (curr), false);
284 /* Remove this block. This should never fail. If it does
285 something is really wrong. */
286 if (munmap (curr->stackblock, curr->stackblock_size) != 0)
287 abort ();
289 /* Maybe we have freed enough. */
290 if (stack_cache_actsize <= limit)
291 break;
297 /* Add a stack frame which is not used anymore to the stack. Must be
298 called with the cache lock held. */
299 static inline void
300 __attribute ((always_inline))
301 queue_stack (struct pthread *stack)
303 /* We unconditionally add the stack to the list. The memory may
304 still be in use but it will not be reused until the kernel marks
305 the stack as not used anymore. */
306 stack_list_add (&stack->list, &stack_cache);
308 stack_cache_actsize += stack->stackblock_size;
309 if (__glibc_unlikely (stack_cache_actsize > stack_cache_maxsize))
310 __free_stacks (stack_cache_maxsize);
314 static int
315 internal_function
316 change_stack_perm (struct pthread *pd
317 #ifdef NEED_SEPARATE_REGISTER_STACK
318 , size_t pagemask
319 #endif
322 #ifdef NEED_SEPARATE_REGISTER_STACK
323 void *stack = (pd->stackblock
324 + (((((pd->stackblock_size - pd->guardsize) / 2)
325 & pagemask) + pd->guardsize) & pagemask));
326 size_t len = pd->stackblock + pd->stackblock_size - stack;
327 #elif _STACK_GROWS_DOWN
328 void *stack = pd->stackblock + pd->guardsize;
329 size_t len = pd->stackblock_size - pd->guardsize;
330 #elif _STACK_GROWS_UP
331 void *stack = pd->stackblock;
332 size_t len = (uintptr_t) pd - pd->guardsize - (uintptr_t) pd->stackblock;
333 #else
334 # error "Define either _STACK_GROWS_DOWN or _STACK_GROWS_UP"
335 #endif
336 if (mprotect (stack, len, PROT_READ | PROT_WRITE | PROT_EXEC) != 0)
337 return errno;
339 return 0;
343 /* Returns a usable stack for a new thread either by allocating a
344 new stack or reusing a cached stack of sufficient size.
345 ATTR must be non-NULL and point to a valid pthread_attr.
346 PDP must be non-NULL. */
347 static int
348 allocate_stack (const struct pthread_attr *attr, struct pthread **pdp,
349 ALLOCATE_STACK_PARMS)
351 struct pthread *pd;
352 size_t size;
353 size_t pagesize_m1 = __getpagesize () - 1;
355 assert (powerof2 (pagesize_m1 + 1));
356 assert (TCB_ALIGNMENT >= STACK_ALIGN);
358 /* Get the stack size from the attribute if it is set. Otherwise we
359 use the default we determined at start time. */
360 if (attr->stacksize != 0)
361 size = attr->stacksize;
362 else
364 lll_lock (__default_pthread_attr_lock, LLL_PRIVATE);
365 size = __default_pthread_attr.stacksize;
366 lll_unlock (__default_pthread_attr_lock, LLL_PRIVATE);
369 /* Get memory for the stack. */
370 if (__glibc_unlikely (attr->flags & ATTR_FLAG_STACKADDR))
372 uintptr_t adj;
373 char *stackaddr = (char *) attr->stackaddr;
375 /* Assume the same layout as the _STACK_GROWS_DOWN case, with struct
376 pthread at the top of the stack block. Later we adjust the guard
377 location and stack address to match the _STACK_GROWS_UP case. */
378 if (_STACK_GROWS_UP)
379 stackaddr += attr->stacksize;
381 /* If the user also specified the size of the stack make sure it
382 is large enough. */
383 if (attr->stacksize != 0
384 && attr->stacksize < (__static_tls_size + MINIMAL_REST_STACK))
385 return EINVAL;
387 /* Adjust stack size for alignment of the TLS block. */
388 #if TLS_TCB_AT_TP
389 adj = ((uintptr_t) stackaddr - TLS_TCB_SIZE)
390 & __static_tls_align_m1;
391 assert (size > adj + TLS_TCB_SIZE);
392 #elif TLS_DTV_AT_TP
393 adj = ((uintptr_t) stackaddr - __static_tls_size)
394 & __static_tls_align_m1;
395 assert (size > adj);
396 #endif
398 /* The user provided some memory. Let's hope it matches the
399 size... We do not allocate guard pages if the user provided
400 the stack. It is the user's responsibility to do this if it
401 is wanted. */
402 #if TLS_TCB_AT_TP
403 pd = (struct pthread *) ((uintptr_t) stackaddr
404 - TLS_TCB_SIZE - adj);
405 #elif TLS_DTV_AT_TP
406 pd = (struct pthread *) (((uintptr_t) stackaddr
407 - __static_tls_size - adj)
408 - TLS_PRE_TCB_SIZE);
409 #endif
411 /* The user provided stack memory needs to be cleared. */
412 memset (pd, '\0', sizeof (struct pthread));
414 /* The first TSD block is included in the TCB. */
415 pd->specific[0] = pd->specific_1stblock;
417 /* Remember the stack-related values. */
418 pd->stackblock = (char *) stackaddr - size;
419 pd->stackblock_size = size;
421 /* This is a user-provided stack. It will not be queued in the
422 stack cache nor will the memory (except the TLS memory) be freed. */
423 pd->user_stack = true;
425 /* This is at least the second thread. */
426 pd->header.multiple_threads = 1;
427 #ifndef TLS_MULTIPLE_THREADS_IN_TCB
428 __pthread_multiple_threads = *__libc_multiple_threads_ptr = 1;
429 #endif
431 #ifndef __ASSUME_PRIVATE_FUTEX
432 /* The thread must know when private futexes are supported. */
433 pd->header.private_futex = THREAD_GETMEM (THREAD_SELF,
434 header.private_futex);
435 #endif
437 #ifdef NEED_DL_SYSINFO
438 SETUP_THREAD_SYSINFO (pd);
439 #endif
441 /* Don't allow setxid until cloned. */
442 pd->setxid_futex = -1;
444 /* Allocate the DTV for this thread. */
445 if (_dl_allocate_tls (TLS_TPADJ (pd)) == NULL)
447 /* Something went wrong. */
448 assert (errno == ENOMEM);
449 return errno;
453 /* Prepare to modify global data. */
454 lll_lock (stack_cache_lock, LLL_PRIVATE);
456 /* And add to the list of stacks in use. */
457 list_add (&pd->list, &__stack_user);
459 lll_unlock (stack_cache_lock, LLL_PRIVATE);
461 else
463 /* Allocate some anonymous memory. If possible use the cache. */
464 size_t guardsize;
465 size_t reqsize;
466 void *mem;
467 const int prot = (PROT_READ | PROT_WRITE
468 | ((GL(dl_stack_flags) & PF_X) ? PROT_EXEC : 0));
470 #if COLORING_INCREMENT != 0
471 /* Add one more page for stack coloring. Don't do it for stacks
472 with 16 times pagesize or larger. This might just cause
473 unnecessary misalignment. */
474 if (size <= 16 * pagesize_m1)
475 size += pagesize_m1 + 1;
476 #endif
478 /* Adjust the stack size for alignment. */
479 size &= ~__static_tls_align_m1;
480 assert (size != 0);
482 /* Make sure the size of the stack is enough for the guard and
483 eventually the thread descriptor. */
484 guardsize = (attr->guardsize + pagesize_m1) & ~pagesize_m1;
485 if (__builtin_expect (size < ((guardsize + __static_tls_size
486 + MINIMAL_REST_STACK + pagesize_m1)
487 & ~pagesize_m1),
489 /* The stack is too small (or the guard too large). */
490 return EINVAL;
492 /* Try to get a stack from the cache. */
493 reqsize = size;
494 pd = get_cached_stack (&size, &mem);
495 if (pd == NULL)
497 /* To avoid aliasing effects on a larger scale than pages we
498 adjust the allocated stack size if necessary. This way
499 allocations directly following each other will not have
500 aliasing problems. */
501 #if MULTI_PAGE_ALIASING != 0
502 if ((size % MULTI_PAGE_ALIASING) == 0)
503 size += pagesize_m1 + 1;
504 #endif
506 mem = mmap (NULL, size, prot,
507 MAP_PRIVATE | MAP_ANONYMOUS | MAP_STACK, -1, 0);
509 if (__glibc_unlikely (mem == MAP_FAILED))
510 return errno;
512 /* SIZE is guaranteed to be greater than zero.
513 So we can never get a null pointer back from mmap. */
514 assert (mem != NULL);
516 #if COLORING_INCREMENT != 0
517 /* Atomically increment NCREATED. */
518 unsigned int ncreated = atomic_increment_val (&nptl_ncreated);
520 /* We chose the offset for coloring by incrementing it for
521 every new thread by a fixed amount. The offset used
522 module the page size. Even if coloring would be better
523 relative to higher alignment values it makes no sense to
524 do it since the mmap() interface does not allow us to
525 specify any alignment for the returned memory block. */
526 size_t coloring = (ncreated * COLORING_INCREMENT) & pagesize_m1;
528 /* Make sure the coloring offsets does not disturb the alignment
529 of the TCB and static TLS block. */
530 if (__glibc_unlikely ((coloring & __static_tls_align_m1) != 0))
531 coloring = (((coloring + __static_tls_align_m1)
532 & ~(__static_tls_align_m1))
533 & ~pagesize_m1);
534 #else
535 /* Unless specified we do not make any adjustments. */
536 # define coloring 0
537 #endif
539 /* Place the thread descriptor at the end of the stack. */
540 #if TLS_TCB_AT_TP
541 pd = (struct pthread *) ((char *) mem + size - coloring) - 1;
542 #elif TLS_DTV_AT_TP
543 pd = (struct pthread *) ((((uintptr_t) mem + size - coloring
544 - __static_tls_size)
545 & ~__static_tls_align_m1)
546 - TLS_PRE_TCB_SIZE);
547 #endif
549 /* Remember the stack-related values. */
550 pd->stackblock = mem;
551 pd->stackblock_size = size;
553 /* We allocated the first block thread-specific data array.
554 This address will not change for the lifetime of this
555 descriptor. */
556 pd->specific[0] = pd->specific_1stblock;
558 /* This is at least the second thread. */
559 pd->header.multiple_threads = 1;
560 #ifndef TLS_MULTIPLE_THREADS_IN_TCB
561 __pthread_multiple_threads = *__libc_multiple_threads_ptr = 1;
562 #endif
564 #ifndef __ASSUME_PRIVATE_FUTEX
565 /* The thread must know when private futexes are supported. */
566 pd->header.private_futex = THREAD_GETMEM (THREAD_SELF,
567 header.private_futex);
568 #endif
570 #ifdef NEED_DL_SYSINFO
571 SETUP_THREAD_SYSINFO (pd);
572 #endif
574 /* Don't allow setxid until cloned. */
575 pd->setxid_futex = -1;
577 /* Allocate the DTV for this thread. */
578 if (_dl_allocate_tls (TLS_TPADJ (pd)) == NULL)
580 /* Something went wrong. */
581 assert (errno == ENOMEM);
583 /* Free the stack memory we just allocated. */
584 (void) munmap (mem, size);
586 return errno;
590 /* Prepare to modify global data. */
591 lll_lock (stack_cache_lock, LLL_PRIVATE);
593 /* And add to the list of stacks in use. */
594 stack_list_add (&pd->list, &stack_used);
596 lll_unlock (stack_cache_lock, LLL_PRIVATE);
599 /* There might have been a race. Another thread might have
600 caused the stacks to get exec permission while this new
601 stack was prepared. Detect if this was possible and
602 change the permission if necessary. */
603 if (__builtin_expect ((GL(dl_stack_flags) & PF_X) != 0
604 && (prot & PROT_EXEC) == 0, 0))
606 int err = change_stack_perm (pd
607 #ifdef NEED_SEPARATE_REGISTER_STACK
608 , ~pagesize_m1
609 #endif
611 if (err != 0)
613 /* Free the stack memory we just allocated. */
614 (void) munmap (mem, size);
616 return err;
621 /* Note that all of the stack and the thread descriptor is
622 zeroed. This means we do not have to initialize fields
623 with initial value zero. This is specifically true for
624 the 'tid' field which is always set back to zero once the
625 stack is not used anymore and for the 'guardsize' field
626 which will be read next. */
629 /* Create or resize the guard area if necessary. */
630 if (__glibc_unlikely (guardsize > pd->guardsize))
632 #ifdef NEED_SEPARATE_REGISTER_STACK
633 char *guard = mem + (((size - guardsize) / 2) & ~pagesize_m1);
634 #elif _STACK_GROWS_DOWN
635 char *guard = mem;
636 #elif _STACK_GROWS_UP
637 char *guard = (char *) (((uintptr_t) pd - guardsize) & ~pagesize_m1);
638 #endif
639 if (mprotect (guard, guardsize, PROT_NONE) != 0)
641 mprot_error:
642 lll_lock (stack_cache_lock, LLL_PRIVATE);
644 /* Remove the thread from the list. */
645 stack_list_del (&pd->list);
647 lll_unlock (stack_cache_lock, LLL_PRIVATE);
649 /* Get rid of the TLS block we allocated. */
650 _dl_deallocate_tls (TLS_TPADJ (pd), false);
652 /* Free the stack memory regardless of whether the size
653 of the cache is over the limit or not. If this piece
654 of memory caused problems we better do not use it
655 anymore. Uh, and we ignore possible errors. There
656 is nothing we could do. */
657 (void) munmap (mem, size);
659 return errno;
662 pd->guardsize = guardsize;
664 else if (__builtin_expect (pd->guardsize - guardsize > size - reqsize,
667 /* The old guard area is too large. */
669 #ifdef NEED_SEPARATE_REGISTER_STACK
670 char *guard = mem + (((size - guardsize) / 2) & ~pagesize_m1);
671 char *oldguard = mem + (((size - pd->guardsize) / 2) & ~pagesize_m1);
673 if (oldguard < guard
674 && mprotect (oldguard, guard - oldguard, prot) != 0)
675 goto mprot_error;
677 if (mprotect (guard + guardsize,
678 oldguard + pd->guardsize - guard - guardsize,
679 prot) != 0)
680 goto mprot_error;
681 #elif _STACK_GROWS_DOWN
682 if (mprotect ((char *) mem + guardsize, pd->guardsize - guardsize,
683 prot) != 0)
684 goto mprot_error;
685 #elif _STACK_GROWS_UP
686 if (mprotect ((char *) pd - pd->guardsize,
687 pd->guardsize - guardsize, prot) != 0)
688 goto mprot_error;
689 #endif
691 pd->guardsize = guardsize;
693 /* The pthread_getattr_np() calls need to get passed the size
694 requested in the attribute, regardless of how large the
695 actually used guardsize is. */
696 pd->reported_guardsize = guardsize;
699 /* Initialize the lock. We have to do this unconditionally since the
700 stillborn thread could be canceled while the lock is taken. */
701 pd->lock = LLL_LOCK_INITIALIZER;
703 /* The robust mutex lists also need to be initialized
704 unconditionally because the cleanup for the previous stack owner
705 might have happened in the kernel. */
706 pd->robust_head.futex_offset = (offsetof (pthread_mutex_t, __data.__lock)
707 - offsetof (pthread_mutex_t,
708 __data.__list.__next));
709 pd->robust_head.list_op_pending = NULL;
710 #ifdef __PTHREAD_MUTEX_HAVE_PREV
711 pd->robust_prev = &pd->robust_head;
712 #endif
713 pd->robust_head.list = &pd->robust_head;
715 /* We place the thread descriptor at the end of the stack. */
716 *pdp = pd;
718 #if _STACK_GROWS_DOWN
719 void *stacktop;
721 # if TLS_TCB_AT_TP
722 /* The stack begins before the TCB and the static TLS block. */
723 stacktop = ((char *) (pd + 1) - __static_tls_size);
724 # elif TLS_DTV_AT_TP
725 stacktop = (char *) (pd - 1);
726 # endif
728 # ifdef NEED_SEPARATE_REGISTER_STACK
729 *stack = pd->stackblock;
730 *stacksize = stacktop - *stack;
731 # else
732 *stack = stacktop;
733 # endif
734 #else
735 *stack = pd->stackblock;
736 #endif
738 return 0;
742 void
743 internal_function
744 __deallocate_stack (struct pthread *pd)
746 lll_lock (stack_cache_lock, LLL_PRIVATE);
748 /* Remove the thread from the list of threads with user defined
749 stacks. */
750 stack_list_del (&pd->list);
752 /* Not much to do. Just free the mmap()ed memory. Note that we do
753 not reset the 'used' flag in the 'tid' field. This is done by
754 the kernel. If no thread has been created yet this field is
755 still zero. */
756 if (__glibc_likely (! pd->user_stack))
757 (void) queue_stack (pd);
758 else
759 /* Free the memory associated with the ELF TLS. */
760 _dl_deallocate_tls (TLS_TPADJ (pd), false);
762 lll_unlock (stack_cache_lock, LLL_PRIVATE);
767 internal_function
768 __make_stacks_executable (void **stack_endp)
770 /* First the main thread's stack. */
771 int err = _dl_make_stack_executable (stack_endp);
772 if (err != 0)
773 return err;
775 #ifdef NEED_SEPARATE_REGISTER_STACK
776 const size_t pagemask = ~(__getpagesize () - 1);
777 #endif
779 lll_lock (stack_cache_lock, LLL_PRIVATE);
781 list_t *runp;
782 list_for_each (runp, &stack_used)
784 err = change_stack_perm (list_entry (runp, struct pthread, list)
785 #ifdef NEED_SEPARATE_REGISTER_STACK
786 , pagemask
787 #endif
789 if (err != 0)
790 break;
793 /* Also change the permission for the currently unused stacks. This
794 might be wasted time but better spend it here than adding a check
795 in the fast path. */
796 if (err == 0)
797 list_for_each (runp, &stack_cache)
799 err = change_stack_perm (list_entry (runp, struct pthread, list)
800 #ifdef NEED_SEPARATE_REGISTER_STACK
801 , pagemask
802 #endif
804 if (err != 0)
805 break;
808 lll_unlock (stack_cache_lock, LLL_PRIVATE);
810 return err;
814 /* In case of a fork() call the memory allocation in the child will be
815 the same but only one thread is running. All stacks except that of
816 the one running thread are not used anymore. We have to recycle
817 them. */
818 void
819 __reclaim_stacks (void)
821 struct pthread *self = (struct pthread *) THREAD_SELF;
823 /* No locking necessary. The caller is the only stack in use. But
824 we have to be aware that we might have interrupted a list
825 operation. */
827 if (in_flight_stack != 0)
829 bool add_p = in_flight_stack & 1;
830 list_t *elem = (list_t *) (in_flight_stack & ~(uintptr_t) 1);
832 if (add_p)
834 /* We always add at the beginning of the list. So in this case we
835 only need to check the beginning of these lists to see if the
836 pointers at the head of the list are inconsistent. */
837 list_t *l = NULL;
839 if (stack_used.next->prev != &stack_used)
840 l = &stack_used;
841 else if (stack_cache.next->prev != &stack_cache)
842 l = &stack_cache;
844 if (l != NULL)
846 assert (l->next->prev == elem);
847 elem->next = l->next;
848 elem->prev = l;
849 l->next = elem;
852 else
854 /* We can simply always replay the delete operation. */
855 elem->next->prev = elem->prev;
856 elem->prev->next = elem->next;
860 /* Mark all stacks except the still running one as free. */
861 list_t *runp;
862 list_for_each (runp, &stack_used)
864 struct pthread *curp = list_entry (runp, struct pthread, list);
865 if (curp != self)
867 /* This marks the stack as free. */
868 curp->tid = 0;
870 /* Account for the size of the stack. */
871 stack_cache_actsize += curp->stackblock_size;
873 if (curp->specific_used)
875 /* Clear the thread-specific data. */
876 memset (curp->specific_1stblock, '\0',
877 sizeof (curp->specific_1stblock));
879 curp->specific_used = false;
881 for (size_t cnt = 1; cnt < PTHREAD_KEY_1STLEVEL_SIZE; ++cnt)
882 if (curp->specific[cnt] != NULL)
884 memset (curp->specific[cnt], '\0',
885 sizeof (curp->specific_1stblock));
887 /* We have allocated the block which we do not
888 free here so re-set the bit. */
889 curp->specific_used = true;
895 /* Add the stack of all running threads to the cache. */
896 list_splice (&stack_used, &stack_cache);
898 /* Remove the entry for the current thread to from the cache list
899 and add it to the list of running threads. Which of the two
900 lists is decided by the user_stack flag. */
901 stack_list_del (&self->list);
903 /* Re-initialize the lists for all the threads. */
904 INIT_LIST_HEAD (&stack_used);
905 INIT_LIST_HEAD (&__stack_user);
907 if (__glibc_unlikely (THREAD_GETMEM (self, user_stack)))
908 list_add (&self->list, &__stack_user);
909 else
910 list_add (&self->list, &stack_used);
912 /* There is one thread running. */
913 __nptl_nthreads = 1;
915 in_flight_stack = 0;
917 /* Initialize locks. */
918 stack_cache_lock = LLL_LOCK_INITIALIZER;
919 __default_pthread_attr_lock = LLL_LOCK_INITIALIZER;
923 #if HP_TIMING_AVAIL
924 # undef __find_thread_by_id
925 /* Find a thread given the thread ID. */
926 attribute_hidden
927 struct pthread *
928 __find_thread_by_id (pid_t tid)
930 struct pthread *result = NULL;
932 lll_lock (stack_cache_lock, LLL_PRIVATE);
934 /* Iterate over the list with system-allocated threads first. */
935 list_t *runp;
936 list_for_each (runp, &stack_used)
938 struct pthread *curp;
940 curp = list_entry (runp, struct pthread, list);
942 if (curp->tid == tid)
944 result = curp;
945 goto out;
949 /* Now the list with threads using user-allocated stacks. */
950 list_for_each (runp, &__stack_user)
952 struct pthread *curp;
954 curp = list_entry (runp, struct pthread, list);
956 if (curp->tid == tid)
958 result = curp;
959 goto out;
963 out:
964 lll_unlock (stack_cache_lock, LLL_PRIVATE);
966 return result;
968 #endif
971 #ifdef SIGSETXID
972 static void
973 internal_function
974 setxid_mark_thread (struct xid_command *cmdp, struct pthread *t)
976 int ch;
978 /* Wait until this thread is cloned. */
979 if (t->setxid_futex == -1
980 && ! atomic_compare_and_exchange_bool_acq (&t->setxid_futex, -2, -1))
982 futex_wait_simple (&t->setxid_futex, -2, FUTEX_PRIVATE);
983 while (t->setxid_futex == -2);
985 /* Don't let the thread exit before the setxid handler runs. */
986 t->setxid_futex = 0;
990 ch = t->cancelhandling;
992 /* If the thread is exiting right now, ignore it. */
993 if ((ch & EXITING_BITMASK) != 0)
995 /* Release the futex if there is no other setxid in
996 progress. */
997 if ((ch & SETXID_BITMASK) == 0)
999 t->setxid_futex = 1;
1000 futex_wake (&t->setxid_futex, 1, FUTEX_PRIVATE);
1002 return;
1005 while (atomic_compare_and_exchange_bool_acq (&t->cancelhandling,
1006 ch | SETXID_BITMASK, ch));
1010 static void
1011 internal_function
1012 setxid_unmark_thread (struct xid_command *cmdp, struct pthread *t)
1014 int ch;
1018 ch = t->cancelhandling;
1019 if ((ch & SETXID_BITMASK) == 0)
1020 return;
1022 while (atomic_compare_and_exchange_bool_acq (&t->cancelhandling,
1023 ch & ~SETXID_BITMASK, ch));
1025 /* Release the futex just in case. */
1026 t->setxid_futex = 1;
1027 futex_wake (&t->setxid_futex, 1, FUTEX_PRIVATE);
1031 static int
1032 internal_function
1033 setxid_signal_thread (struct xid_command *cmdp, struct pthread *t)
1035 if ((t->cancelhandling & SETXID_BITMASK) == 0)
1036 return 0;
1038 int val;
1039 pid_t pid = __getpid ();
1040 INTERNAL_SYSCALL_DECL (err);
1041 val = INTERNAL_SYSCALL_CALL (tgkill, err, pid, t->tid, SIGSETXID);
1043 /* If this failed, it must have had not started yet or else exited. */
1044 if (!INTERNAL_SYSCALL_ERROR_P (val, err))
1046 atomic_increment (&cmdp->cntr);
1047 return 1;
1049 else
1050 return 0;
1053 /* Check for consistency across set*id system call results. The abort
1054 should not happen as long as all privileges changes happen through
1055 the glibc wrappers. ERROR must be 0 (no error) or an errno
1056 code. */
1057 void
1058 attribute_hidden
1059 __nptl_setxid_error (struct xid_command *cmdp, int error)
1063 int olderror = cmdp->error;
1064 if (olderror == error)
1065 break;
1066 if (olderror != -1)
1067 /* Mismatch between current and previous results. */
1068 abort ();
1070 while (atomic_compare_and_exchange_bool_acq (&cmdp->error, error, -1));
1074 attribute_hidden
1075 __nptl_setxid (struct xid_command *cmdp)
1077 int signalled;
1078 int result;
1079 lll_lock (stack_cache_lock, LLL_PRIVATE);
1081 __xidcmd = cmdp;
1082 cmdp->cntr = 0;
1083 cmdp->error = -1;
1085 struct pthread *self = THREAD_SELF;
1087 /* Iterate over the list with system-allocated threads first. */
1088 list_t *runp;
1089 list_for_each (runp, &stack_used)
1091 struct pthread *t = list_entry (runp, struct pthread, list);
1092 if (t == self)
1093 continue;
1095 setxid_mark_thread (cmdp, t);
1098 /* Now the list with threads using user-allocated stacks. */
1099 list_for_each (runp, &__stack_user)
1101 struct pthread *t = list_entry (runp, struct pthread, list);
1102 if (t == self)
1103 continue;
1105 setxid_mark_thread (cmdp, t);
1108 /* Iterate until we don't succeed in signalling anyone. That means
1109 we have gotten all running threads, and their children will be
1110 automatically correct once started. */
1113 signalled = 0;
1115 list_for_each (runp, &stack_used)
1117 struct pthread *t = list_entry (runp, struct pthread, list);
1118 if (t == self)
1119 continue;
1121 signalled += setxid_signal_thread (cmdp, t);
1124 list_for_each (runp, &__stack_user)
1126 struct pthread *t = list_entry (runp, struct pthread, list);
1127 if (t == self)
1128 continue;
1130 signalled += setxid_signal_thread (cmdp, t);
1133 int cur = cmdp->cntr;
1134 while (cur != 0)
1136 futex_wait_simple ((unsigned int *) &cmdp->cntr, cur,
1137 FUTEX_PRIVATE);
1138 cur = cmdp->cntr;
1141 while (signalled != 0);
1143 /* Clean up flags, so that no thread blocks during exit waiting
1144 for a signal which will never come. */
1145 list_for_each (runp, &stack_used)
1147 struct pthread *t = list_entry (runp, struct pthread, list);
1148 if (t == self)
1149 continue;
1151 setxid_unmark_thread (cmdp, t);
1154 list_for_each (runp, &__stack_user)
1156 struct pthread *t = list_entry (runp, struct pthread, list);
1157 if (t == self)
1158 continue;
1160 setxid_unmark_thread (cmdp, t);
1163 /* This must be last, otherwise the current thread might not have
1164 permissions to send SIGSETXID syscall to the other threads. */
1165 INTERNAL_SYSCALL_DECL (err);
1166 result = INTERNAL_SYSCALL_NCS (cmdp->syscall_no, err, 3,
1167 cmdp->id[0], cmdp->id[1], cmdp->id[2]);
1168 int error = 0;
1169 if (__glibc_unlikely (INTERNAL_SYSCALL_ERROR_P (result, err)))
1171 error = INTERNAL_SYSCALL_ERRNO (result, err);
1172 __set_errno (error);
1173 result = -1;
1175 __nptl_setxid_error (cmdp, error);
1177 lll_unlock (stack_cache_lock, LLL_PRIVATE);
1178 return result;
1180 #endif /* SIGSETXID. */
1183 static inline void __attribute__((always_inline))
1184 init_one_static_tls (struct pthread *curp, struct link_map *map)
1186 # if TLS_TCB_AT_TP
1187 void *dest = (char *) curp - map->l_tls_offset;
1188 # elif TLS_DTV_AT_TP
1189 void *dest = (char *) curp + map->l_tls_offset + TLS_PRE_TCB_SIZE;
1190 # else
1191 # error "Either TLS_TCB_AT_TP or TLS_DTV_AT_TP must be defined"
1192 # endif
1194 /* Fill in the DTV slot so that a later LD/GD access will find it. */
1195 dtv_t *dtv = GET_DTV (TLS_TPADJ (curp));
1196 dtv[map->l_tls_modid].pointer.to_free = NULL;
1197 dtv[map->l_tls_modid].pointer.val = dest;
1199 /* Initialize the memory. */
1200 memset (__mempcpy (dest, map->l_tls_initimage, map->l_tls_initimage_size),
1201 '\0', map->l_tls_blocksize - map->l_tls_initimage_size);
1204 void
1205 attribute_hidden
1206 __pthread_init_static_tls (struct link_map *map)
1208 lll_lock (stack_cache_lock, LLL_PRIVATE);
1210 /* Iterate over the list with system-allocated threads first. */
1211 list_t *runp;
1212 list_for_each (runp, &stack_used)
1213 init_one_static_tls (list_entry (runp, struct pthread, list), map);
1215 /* Now the list with threads using user-allocated stacks. */
1216 list_for_each (runp, &__stack_user)
1217 init_one_static_tls (list_entry (runp, struct pthread, list), map);
1219 lll_unlock (stack_cache_lock, LLL_PRIVATE);
1223 void
1224 attribute_hidden
1225 __wait_lookup_done (void)
1227 lll_lock (stack_cache_lock, LLL_PRIVATE);
1229 struct pthread *self = THREAD_SELF;
1231 /* Iterate over the list with system-allocated threads first. */
1232 list_t *runp;
1233 list_for_each (runp, &stack_used)
1235 struct pthread *t = list_entry (runp, struct pthread, list);
1236 if (t == self || t->header.gscope_flag == THREAD_GSCOPE_FLAG_UNUSED)
1237 continue;
1239 int *const gscope_flagp = &t->header.gscope_flag;
1241 /* We have to wait until this thread is done with the global
1242 scope. First tell the thread that we are waiting and
1243 possibly have to be woken. */
1244 if (atomic_compare_and_exchange_bool_acq (gscope_flagp,
1245 THREAD_GSCOPE_FLAG_WAIT,
1246 THREAD_GSCOPE_FLAG_USED))
1247 continue;
1250 futex_wait_simple ((unsigned int *) gscope_flagp,
1251 THREAD_GSCOPE_FLAG_WAIT, FUTEX_PRIVATE);
1252 while (*gscope_flagp == THREAD_GSCOPE_FLAG_WAIT);
1255 /* Now the list with threads using user-allocated stacks. */
1256 list_for_each (runp, &__stack_user)
1258 struct pthread *t = list_entry (runp, struct pthread, list);
1259 if (t == self || t->header.gscope_flag == THREAD_GSCOPE_FLAG_UNUSED)
1260 continue;
1262 int *const gscope_flagp = &t->header.gscope_flag;
1264 /* We have to wait until this thread is done with the global
1265 scope. First tell the thread that we are waiting and
1266 possibly have to be woken. */
1267 if (atomic_compare_and_exchange_bool_acq (gscope_flagp,
1268 THREAD_GSCOPE_FLAG_WAIT,
1269 THREAD_GSCOPE_FLAG_USED))
1270 continue;
1273 futex_wait_simple ((unsigned int *) gscope_flagp,
1274 THREAD_GSCOPE_FLAG_WAIT, FUTEX_PRIVATE);
1275 while (*gscope_flagp == THREAD_GSCOPE_FLAG_WAIT);
1278 lll_unlock (stack_cache_lock, LLL_PRIVATE);