Convert _Complex cosine functions to generated code
[glibc.git] / nptl / allocatestack.c
blob60b34dc6fcb6d18e3db59b9905423cdc73e94d99
1 /* Copyright (C) 2002-2016 Free Software Foundation, Inc.
2 This file is part of the GNU C Library.
3 Contributed by Ulrich Drepper <drepper@redhat.com>, 2002.
5 The GNU C Library is free software; you can redistribute it and/or
6 modify it under the terms of the GNU Lesser General Public
7 License as published by the Free Software Foundation; either
8 version 2.1 of the License, or (at your option) any later version.
10 The GNU C Library is distributed in the hope that it will be useful,
11 but WITHOUT ANY WARRANTY; without even the implied warranty of
12 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 Lesser General Public License for more details.
15 You should have received a copy of the GNU Lesser General Public
16 License along with the GNU C Library; if not, see
17 <http://www.gnu.org/licenses/>. */
19 #include <assert.h>
20 #include <errno.h>
21 #include <signal.h>
22 #include <stdint.h>
23 #include <string.h>
24 #include <unistd.h>
25 #include <sys/mman.h>
26 #include <sys/param.h>
27 #include <dl-sysdep.h>
28 #include <dl-tls.h>
29 #include <tls.h>
30 #include <list.h>
31 #include <lowlevellock.h>
32 #include <futex-internal.h>
33 #include <kernel-features.h>
34 #include <stack-aliasing.h>
37 #ifndef NEED_SEPARATE_REGISTER_STACK
39 /* Most architectures have exactly one stack pointer. Some have more. */
40 # define STACK_VARIABLES void *stackaddr = NULL
42 /* How to pass the values to the 'create_thread' function. */
43 # define STACK_VARIABLES_ARGS stackaddr
45 /* How to declare function which gets there parameters. */
46 # define STACK_VARIABLES_PARMS void *stackaddr
48 /* How to declare allocate_stack. */
49 # define ALLOCATE_STACK_PARMS void **stack
51 /* This is how the function is called. We do it this way to allow
52 other variants of the function to have more parameters. */
53 # define ALLOCATE_STACK(attr, pd) allocate_stack (attr, pd, &stackaddr)
55 #else
57 /* We need two stacks. The kernel will place them but we have to tell
58 the kernel about the size of the reserved address space. */
59 # define STACK_VARIABLES void *stackaddr = NULL; size_t stacksize = 0
61 /* How to pass the values to the 'create_thread' function. */
62 # define STACK_VARIABLES_ARGS stackaddr, stacksize
64 /* How to declare function which gets there parameters. */
65 # define STACK_VARIABLES_PARMS void *stackaddr, size_t stacksize
67 /* How to declare allocate_stack. */
68 # define ALLOCATE_STACK_PARMS void **stack, size_t *stacksize
70 /* This is how the function is called. We do it this way to allow
71 other variants of the function to have more parameters. */
72 # define ALLOCATE_STACK(attr, pd) \
73 allocate_stack (attr, pd, &stackaddr, &stacksize)
75 #endif
78 /* Default alignment of stack. */
79 #ifndef STACK_ALIGN
80 # define STACK_ALIGN __alignof__ (long double)
81 #endif
83 /* Default value for minimal stack size after allocating thread
84 descriptor and guard. */
85 #ifndef MINIMAL_REST_STACK
86 # define MINIMAL_REST_STACK 4096
87 #endif
90 /* Newer kernels have the MAP_STACK flag to indicate a mapping is used for
91 a stack. Use it when possible. */
92 #ifndef MAP_STACK
93 # define MAP_STACK 0
94 #endif
96 /* This yields the pointer that TLS support code calls the thread pointer. */
97 #if TLS_TCB_AT_TP
98 # define TLS_TPADJ(pd) (pd)
99 #elif TLS_DTV_AT_TP
100 # define TLS_TPADJ(pd) ((struct pthread *)((char *) (pd) + TLS_PRE_TCB_SIZE))
101 #endif
103 /* Cache handling for not-yet free stacks. */
105 /* Maximum size in kB of cache. */
106 static size_t stack_cache_maxsize = 40 * 1024 * 1024; /* 40MiBi by default. */
107 static size_t stack_cache_actsize;
109 /* Mutex protecting this variable. */
110 static int stack_cache_lock = LLL_LOCK_INITIALIZER;
112 /* List of queued stack frames. */
113 static LIST_HEAD (stack_cache);
115 /* List of the stacks in use. */
116 static LIST_HEAD (stack_used);
118 /* We need to record what list operations we are going to do so that,
119 in case of an asynchronous interruption due to a fork() call, we
120 can correct for the work. */
121 static uintptr_t in_flight_stack;
123 /* List of the threads with user provided stacks in use. No need to
124 initialize this, since it's done in __pthread_initialize_minimal. */
125 list_t __stack_user __attribute__ ((nocommon));
126 hidden_data_def (__stack_user)
128 #if COLORING_INCREMENT != 0
129 /* Number of threads created. */
130 static unsigned int nptl_ncreated;
131 #endif
134 /* Check whether the stack is still used or not. */
135 #define FREE_P(descr) ((descr)->tid <= 0)
138 static void
139 stack_list_del (list_t *elem)
141 in_flight_stack = (uintptr_t) elem;
143 atomic_write_barrier ();
145 list_del (elem);
147 atomic_write_barrier ();
149 in_flight_stack = 0;
153 static void
154 stack_list_add (list_t *elem, list_t *list)
156 in_flight_stack = (uintptr_t) elem | 1;
158 atomic_write_barrier ();
160 list_add (elem, list);
162 atomic_write_barrier ();
164 in_flight_stack = 0;
168 /* We create a double linked list of all cache entries. Double linked
169 because this allows removing entries from the end. */
172 /* Get a stack frame from the cache. We have to match by size since
173 some blocks might be too small or far too large. */
174 static struct pthread *
175 get_cached_stack (size_t *sizep, void **memp)
177 size_t size = *sizep;
178 struct pthread *result = NULL;
179 list_t *entry;
181 lll_lock (stack_cache_lock, LLL_PRIVATE);
183 /* Search the cache for a matching entry. We search for the
184 smallest stack which has at least the required size. Note that
185 in normal situations the size of all allocated stacks is the
186 same. As the very least there are only a few different sizes.
187 Therefore this loop will exit early most of the time with an
188 exact match. */
189 list_for_each (entry, &stack_cache)
191 struct pthread *curr;
193 curr = list_entry (entry, struct pthread, list);
194 if (FREE_P (curr) && curr->stackblock_size >= size)
196 if (curr->stackblock_size == size)
198 result = curr;
199 break;
202 if (result == NULL
203 || result->stackblock_size > curr->stackblock_size)
204 result = curr;
208 if (__builtin_expect (result == NULL, 0)
209 /* Make sure the size difference is not too excessive. In that
210 case we do not use the block. */
211 || __builtin_expect (result->stackblock_size > 4 * size, 0))
213 /* Release the lock. */
214 lll_unlock (stack_cache_lock, LLL_PRIVATE);
216 return NULL;
219 /* Don't allow setxid until cloned. */
220 result->setxid_futex = -1;
222 /* Dequeue the entry. */
223 stack_list_del (&result->list);
225 /* And add to the list of stacks in use. */
226 stack_list_add (&result->list, &stack_used);
228 /* And decrease the cache size. */
229 stack_cache_actsize -= result->stackblock_size;
231 /* Release the lock early. */
232 lll_unlock (stack_cache_lock, LLL_PRIVATE);
234 /* Report size and location of the stack to the caller. */
235 *sizep = result->stackblock_size;
236 *memp = result->stackblock;
238 /* Cancellation handling is back to the default. */
239 result->cancelhandling = 0;
240 result->cleanup = NULL;
242 /* No pending event. */
243 result->nextevent = NULL;
245 /* Clear the DTV. */
246 dtv_t *dtv = GET_DTV (TLS_TPADJ (result));
247 for (size_t cnt = 0; cnt < dtv[-1].counter; ++cnt)
248 free (dtv[1 + cnt].pointer.to_free);
249 memset (dtv, '\0', (dtv[-1].counter + 1) * sizeof (dtv_t));
251 /* Re-initialize the TLS. */
252 _dl_allocate_tls_init (TLS_TPADJ (result));
254 return result;
258 /* Free stacks until cache size is lower than LIMIT. */
259 void
260 __free_stacks (size_t limit)
262 /* We reduce the size of the cache. Remove the last entries until
263 the size is below the limit. */
264 list_t *entry;
265 list_t *prev;
267 /* Search from the end of the list. */
268 list_for_each_prev_safe (entry, prev, &stack_cache)
270 struct pthread *curr;
272 curr = list_entry (entry, struct pthread, list);
273 if (FREE_P (curr))
275 /* Unlink the block. */
276 stack_list_del (entry);
278 /* Account for the freed memory. */
279 stack_cache_actsize -= curr->stackblock_size;
281 /* Free the memory associated with the ELF TLS. */
282 _dl_deallocate_tls (TLS_TPADJ (curr), false);
284 /* Remove this block. This should never fail. If it does
285 something is really wrong. */
286 if (munmap (curr->stackblock, curr->stackblock_size) != 0)
287 abort ();
289 /* Maybe we have freed enough. */
290 if (stack_cache_actsize <= limit)
291 break;
297 /* Add a stack frame which is not used anymore to the stack. Must be
298 called with the cache lock held. */
299 static inline void
300 __attribute ((always_inline))
301 queue_stack (struct pthread *stack)
303 /* We unconditionally add the stack to the list. The memory may
304 still be in use but it will not be reused until the kernel marks
305 the stack as not used anymore. */
306 stack_list_add (&stack->list, &stack_cache);
308 stack_cache_actsize += stack->stackblock_size;
309 if (__glibc_unlikely (stack_cache_actsize > stack_cache_maxsize))
310 __free_stacks (stack_cache_maxsize);
314 static int
315 internal_function
316 change_stack_perm (struct pthread *pd
317 #ifdef NEED_SEPARATE_REGISTER_STACK
318 , size_t pagemask
319 #endif
322 #ifdef NEED_SEPARATE_REGISTER_STACK
323 void *stack = (pd->stackblock
324 + (((((pd->stackblock_size - pd->guardsize) / 2)
325 & pagemask) + pd->guardsize) & pagemask));
326 size_t len = pd->stackblock + pd->stackblock_size - stack;
327 #elif _STACK_GROWS_DOWN
328 void *stack = pd->stackblock + pd->guardsize;
329 size_t len = pd->stackblock_size - pd->guardsize;
330 #elif _STACK_GROWS_UP
331 void *stack = pd->stackblock;
332 size_t len = (uintptr_t) pd - pd->guardsize - (uintptr_t) pd->stackblock;
333 #else
334 # error "Define either _STACK_GROWS_DOWN or _STACK_GROWS_UP"
335 #endif
336 if (mprotect (stack, len, PROT_READ | PROT_WRITE | PROT_EXEC) != 0)
337 return errno;
339 return 0;
343 /* Returns a usable stack for a new thread either by allocating a
344 new stack or reusing a cached stack of sufficient size.
345 ATTR must be non-NULL and point to a valid pthread_attr.
346 PDP must be non-NULL. */
347 static int
348 allocate_stack (const struct pthread_attr *attr, struct pthread **pdp,
349 ALLOCATE_STACK_PARMS)
351 struct pthread *pd;
352 size_t size;
353 size_t pagesize_m1 = __getpagesize () - 1;
355 assert (powerof2 (pagesize_m1 + 1));
356 assert (TCB_ALIGNMENT >= STACK_ALIGN);
358 /* Get the stack size from the attribute if it is set. Otherwise we
359 use the default we determined at start time. */
360 if (attr->stacksize != 0)
361 size = attr->stacksize;
362 else
364 lll_lock (__default_pthread_attr_lock, LLL_PRIVATE);
365 size = __default_pthread_attr.stacksize;
366 lll_unlock (__default_pthread_attr_lock, LLL_PRIVATE);
369 /* Get memory for the stack. */
370 if (__glibc_unlikely (attr->flags & ATTR_FLAG_STACKADDR))
372 uintptr_t adj;
373 char *stackaddr = (char *) attr->stackaddr;
375 /* Assume the same layout as the _STACK_GROWS_DOWN case, with struct
376 pthread at the top of the stack block. Later we adjust the guard
377 location and stack address to match the _STACK_GROWS_UP case. */
378 if (_STACK_GROWS_UP)
379 stackaddr += attr->stacksize;
381 /* If the user also specified the size of the stack make sure it
382 is large enough. */
383 if (attr->stacksize != 0
384 && attr->stacksize < (__static_tls_size + MINIMAL_REST_STACK))
385 return EINVAL;
387 /* Adjust stack size for alignment of the TLS block. */
388 #if TLS_TCB_AT_TP
389 adj = ((uintptr_t) stackaddr - TLS_TCB_SIZE)
390 & __static_tls_align_m1;
391 assert (size > adj + TLS_TCB_SIZE);
392 #elif TLS_DTV_AT_TP
393 adj = ((uintptr_t) stackaddr - __static_tls_size)
394 & __static_tls_align_m1;
395 assert (size > adj);
396 #endif
398 /* The user provided some memory. Let's hope it matches the
399 size... We do not allocate guard pages if the user provided
400 the stack. It is the user's responsibility to do this if it
401 is wanted. */
402 #if TLS_TCB_AT_TP
403 pd = (struct pthread *) ((uintptr_t) stackaddr
404 - TLS_TCB_SIZE - adj);
405 #elif TLS_DTV_AT_TP
406 pd = (struct pthread *) (((uintptr_t) stackaddr
407 - __static_tls_size - adj)
408 - TLS_PRE_TCB_SIZE);
409 #endif
411 /* The user provided stack memory needs to be cleared. */
412 memset (pd, '\0', sizeof (struct pthread));
414 /* The first TSD block is included in the TCB. */
415 pd->specific[0] = pd->specific_1stblock;
417 /* Remember the stack-related values. */
418 pd->stackblock = (char *) stackaddr - size;
419 pd->stackblock_size = size;
421 /* This is a user-provided stack. It will not be queued in the
422 stack cache nor will the memory (except the TLS memory) be freed. */
423 pd->user_stack = true;
425 /* This is at least the second thread. */
426 pd->header.multiple_threads = 1;
427 #ifndef TLS_MULTIPLE_THREADS_IN_TCB
428 __pthread_multiple_threads = *__libc_multiple_threads_ptr = 1;
429 #endif
431 #ifndef __ASSUME_PRIVATE_FUTEX
432 /* The thread must know when private futexes are supported. */
433 pd->header.private_futex = THREAD_GETMEM (THREAD_SELF,
434 header.private_futex);
435 #endif
437 #ifdef NEED_DL_SYSINFO
438 SETUP_THREAD_SYSINFO (pd);
439 #endif
441 /* The process ID is also the same as that of the caller. */
442 pd->pid = THREAD_GETMEM (THREAD_SELF, pid);
444 /* Don't allow setxid until cloned. */
445 pd->setxid_futex = -1;
447 /* Allocate the DTV for this thread. */
448 if (_dl_allocate_tls (TLS_TPADJ (pd)) == NULL)
450 /* Something went wrong. */
451 assert (errno == ENOMEM);
452 return errno;
456 /* Prepare to modify global data. */
457 lll_lock (stack_cache_lock, LLL_PRIVATE);
459 /* And add to the list of stacks in use. */
460 list_add (&pd->list, &__stack_user);
462 lll_unlock (stack_cache_lock, LLL_PRIVATE);
464 else
466 /* Allocate some anonymous memory. If possible use the cache. */
467 size_t guardsize;
468 size_t reqsize;
469 void *mem;
470 const int prot = (PROT_READ | PROT_WRITE
471 | ((GL(dl_stack_flags) & PF_X) ? PROT_EXEC : 0));
473 #if COLORING_INCREMENT != 0
474 /* Add one more page for stack coloring. Don't do it for stacks
475 with 16 times pagesize or larger. This might just cause
476 unnecessary misalignment. */
477 if (size <= 16 * pagesize_m1)
478 size += pagesize_m1 + 1;
479 #endif
481 /* Adjust the stack size for alignment. */
482 size &= ~__static_tls_align_m1;
483 assert (size != 0);
485 /* Make sure the size of the stack is enough for the guard and
486 eventually the thread descriptor. */
487 guardsize = (attr->guardsize + pagesize_m1) & ~pagesize_m1;
488 if (__builtin_expect (size < ((guardsize + __static_tls_size
489 + MINIMAL_REST_STACK + pagesize_m1)
490 & ~pagesize_m1),
492 /* The stack is too small (or the guard too large). */
493 return EINVAL;
495 /* Try to get a stack from the cache. */
496 reqsize = size;
497 pd = get_cached_stack (&size, &mem);
498 if (pd == NULL)
500 /* To avoid aliasing effects on a larger scale than pages we
501 adjust the allocated stack size if necessary. This way
502 allocations directly following each other will not have
503 aliasing problems. */
504 #if MULTI_PAGE_ALIASING != 0
505 if ((size % MULTI_PAGE_ALIASING) == 0)
506 size += pagesize_m1 + 1;
507 #endif
509 mem = mmap (NULL, size, prot,
510 MAP_PRIVATE | MAP_ANONYMOUS | MAP_STACK, -1, 0);
512 if (__glibc_unlikely (mem == MAP_FAILED))
513 return errno;
515 /* SIZE is guaranteed to be greater than zero.
516 So we can never get a null pointer back from mmap. */
517 assert (mem != NULL);
519 #if COLORING_INCREMENT != 0
520 /* Atomically increment NCREATED. */
521 unsigned int ncreated = atomic_increment_val (&nptl_ncreated);
523 /* We chose the offset for coloring by incrementing it for
524 every new thread by a fixed amount. The offset used
525 module the page size. Even if coloring would be better
526 relative to higher alignment values it makes no sense to
527 do it since the mmap() interface does not allow us to
528 specify any alignment for the returned memory block. */
529 size_t coloring = (ncreated * COLORING_INCREMENT) & pagesize_m1;
531 /* Make sure the coloring offsets does not disturb the alignment
532 of the TCB and static TLS block. */
533 if (__glibc_unlikely ((coloring & __static_tls_align_m1) != 0))
534 coloring = (((coloring + __static_tls_align_m1)
535 & ~(__static_tls_align_m1))
536 & ~pagesize_m1);
537 #else
538 /* Unless specified we do not make any adjustments. */
539 # define coloring 0
540 #endif
542 /* Place the thread descriptor at the end of the stack. */
543 #if TLS_TCB_AT_TP
544 pd = (struct pthread *) ((char *) mem + size - coloring) - 1;
545 #elif TLS_DTV_AT_TP
546 pd = (struct pthread *) ((((uintptr_t) mem + size - coloring
547 - __static_tls_size)
548 & ~__static_tls_align_m1)
549 - TLS_PRE_TCB_SIZE);
550 #endif
552 /* Remember the stack-related values. */
553 pd->stackblock = mem;
554 pd->stackblock_size = size;
556 /* We allocated the first block thread-specific data array.
557 This address will not change for the lifetime of this
558 descriptor. */
559 pd->specific[0] = pd->specific_1stblock;
561 /* This is at least the second thread. */
562 pd->header.multiple_threads = 1;
563 #ifndef TLS_MULTIPLE_THREADS_IN_TCB
564 __pthread_multiple_threads = *__libc_multiple_threads_ptr = 1;
565 #endif
567 #ifndef __ASSUME_PRIVATE_FUTEX
568 /* The thread must know when private futexes are supported. */
569 pd->header.private_futex = THREAD_GETMEM (THREAD_SELF,
570 header.private_futex);
571 #endif
573 #ifdef NEED_DL_SYSINFO
574 SETUP_THREAD_SYSINFO (pd);
575 #endif
577 /* Don't allow setxid until cloned. */
578 pd->setxid_futex = -1;
580 /* The process ID is also the same as that of the caller. */
581 pd->pid = THREAD_GETMEM (THREAD_SELF, pid);
583 /* Allocate the DTV for this thread. */
584 if (_dl_allocate_tls (TLS_TPADJ (pd)) == NULL)
586 /* Something went wrong. */
587 assert (errno == ENOMEM);
589 /* Free the stack memory we just allocated. */
590 (void) munmap (mem, size);
592 return errno;
596 /* Prepare to modify global data. */
597 lll_lock (stack_cache_lock, LLL_PRIVATE);
599 /* And add to the list of stacks in use. */
600 stack_list_add (&pd->list, &stack_used);
602 lll_unlock (stack_cache_lock, LLL_PRIVATE);
605 /* There might have been a race. Another thread might have
606 caused the stacks to get exec permission while this new
607 stack was prepared. Detect if this was possible and
608 change the permission if necessary. */
609 if (__builtin_expect ((GL(dl_stack_flags) & PF_X) != 0
610 && (prot & PROT_EXEC) == 0, 0))
612 int err = change_stack_perm (pd
613 #ifdef NEED_SEPARATE_REGISTER_STACK
614 , ~pagesize_m1
615 #endif
617 if (err != 0)
619 /* Free the stack memory we just allocated. */
620 (void) munmap (mem, size);
622 return err;
627 /* Note that all of the stack and the thread descriptor is
628 zeroed. This means we do not have to initialize fields
629 with initial value zero. This is specifically true for
630 the 'tid' field which is always set back to zero once the
631 stack is not used anymore and for the 'guardsize' field
632 which will be read next. */
635 /* Create or resize the guard area if necessary. */
636 if (__glibc_unlikely (guardsize > pd->guardsize))
638 #ifdef NEED_SEPARATE_REGISTER_STACK
639 char *guard = mem + (((size - guardsize) / 2) & ~pagesize_m1);
640 #elif _STACK_GROWS_DOWN
641 char *guard = mem;
642 #elif _STACK_GROWS_UP
643 char *guard = (char *) (((uintptr_t) pd - guardsize) & ~pagesize_m1);
644 #endif
645 if (mprotect (guard, guardsize, PROT_NONE) != 0)
647 mprot_error:
648 lll_lock (stack_cache_lock, LLL_PRIVATE);
650 /* Remove the thread from the list. */
651 stack_list_del (&pd->list);
653 lll_unlock (stack_cache_lock, LLL_PRIVATE);
655 /* Get rid of the TLS block we allocated. */
656 _dl_deallocate_tls (TLS_TPADJ (pd), false);
658 /* Free the stack memory regardless of whether the size
659 of the cache is over the limit or not. If this piece
660 of memory caused problems we better do not use it
661 anymore. Uh, and we ignore possible errors. There
662 is nothing we could do. */
663 (void) munmap (mem, size);
665 return errno;
668 pd->guardsize = guardsize;
670 else if (__builtin_expect (pd->guardsize - guardsize > size - reqsize,
673 /* The old guard area is too large. */
675 #ifdef NEED_SEPARATE_REGISTER_STACK
676 char *guard = mem + (((size - guardsize) / 2) & ~pagesize_m1);
677 char *oldguard = mem + (((size - pd->guardsize) / 2) & ~pagesize_m1);
679 if (oldguard < guard
680 && mprotect (oldguard, guard - oldguard, prot) != 0)
681 goto mprot_error;
683 if (mprotect (guard + guardsize,
684 oldguard + pd->guardsize - guard - guardsize,
685 prot) != 0)
686 goto mprot_error;
687 #elif _STACK_GROWS_DOWN
688 if (mprotect ((char *) mem + guardsize, pd->guardsize - guardsize,
689 prot) != 0)
690 goto mprot_error;
691 #elif _STACK_GROWS_UP
692 if (mprotect ((char *) pd - pd->guardsize,
693 pd->guardsize - guardsize, prot) != 0)
694 goto mprot_error;
695 #endif
697 pd->guardsize = guardsize;
699 /* The pthread_getattr_np() calls need to get passed the size
700 requested in the attribute, regardless of how large the
701 actually used guardsize is. */
702 pd->reported_guardsize = guardsize;
705 /* Initialize the lock. We have to do this unconditionally since the
706 stillborn thread could be canceled while the lock is taken. */
707 pd->lock = LLL_LOCK_INITIALIZER;
709 /* The robust mutex lists also need to be initialized
710 unconditionally because the cleanup for the previous stack owner
711 might have happened in the kernel. */
712 pd->robust_head.futex_offset = (offsetof (pthread_mutex_t, __data.__lock)
713 - offsetof (pthread_mutex_t,
714 __data.__list.__next));
715 pd->robust_head.list_op_pending = NULL;
716 #ifdef __PTHREAD_MUTEX_HAVE_PREV
717 pd->robust_prev = &pd->robust_head;
718 #endif
719 pd->robust_head.list = &pd->robust_head;
721 /* We place the thread descriptor at the end of the stack. */
722 *pdp = pd;
724 #if _STACK_GROWS_DOWN
725 void *stacktop;
727 # if TLS_TCB_AT_TP
728 /* The stack begins before the TCB and the static TLS block. */
729 stacktop = ((char *) (pd + 1) - __static_tls_size);
730 # elif TLS_DTV_AT_TP
731 stacktop = (char *) (pd - 1);
732 # endif
734 # ifdef NEED_SEPARATE_REGISTER_STACK
735 *stack = pd->stackblock;
736 *stacksize = stacktop - *stack;
737 # else
738 *stack = stacktop;
739 # endif
740 #else
741 *stack = pd->stackblock;
742 #endif
744 return 0;
748 void
749 internal_function
750 __deallocate_stack (struct pthread *pd)
752 lll_lock (stack_cache_lock, LLL_PRIVATE);
754 /* Remove the thread from the list of threads with user defined
755 stacks. */
756 stack_list_del (&pd->list);
758 /* Not much to do. Just free the mmap()ed memory. Note that we do
759 not reset the 'used' flag in the 'tid' field. This is done by
760 the kernel. If no thread has been created yet this field is
761 still zero. */
762 if (__glibc_likely (! pd->user_stack))
763 (void) queue_stack (pd);
764 else
765 /* Free the memory associated with the ELF TLS. */
766 _dl_deallocate_tls (TLS_TPADJ (pd), false);
768 lll_unlock (stack_cache_lock, LLL_PRIVATE);
773 internal_function
774 __make_stacks_executable (void **stack_endp)
776 /* First the main thread's stack. */
777 int err = _dl_make_stack_executable (stack_endp);
778 if (err != 0)
779 return err;
781 #ifdef NEED_SEPARATE_REGISTER_STACK
782 const size_t pagemask = ~(__getpagesize () - 1);
783 #endif
785 lll_lock (stack_cache_lock, LLL_PRIVATE);
787 list_t *runp;
788 list_for_each (runp, &stack_used)
790 err = change_stack_perm (list_entry (runp, struct pthread, list)
791 #ifdef NEED_SEPARATE_REGISTER_STACK
792 , pagemask
793 #endif
795 if (err != 0)
796 break;
799 /* Also change the permission for the currently unused stacks. This
800 might be wasted time but better spend it here than adding a check
801 in the fast path. */
802 if (err == 0)
803 list_for_each (runp, &stack_cache)
805 err = change_stack_perm (list_entry (runp, struct pthread, list)
806 #ifdef NEED_SEPARATE_REGISTER_STACK
807 , pagemask
808 #endif
810 if (err != 0)
811 break;
814 lll_unlock (stack_cache_lock, LLL_PRIVATE);
816 return err;
820 /* In case of a fork() call the memory allocation in the child will be
821 the same but only one thread is running. All stacks except that of
822 the one running thread are not used anymore. We have to recycle
823 them. */
824 void
825 __reclaim_stacks (void)
827 struct pthread *self = (struct pthread *) THREAD_SELF;
829 /* No locking necessary. The caller is the only stack in use. But
830 we have to be aware that we might have interrupted a list
831 operation. */
833 if (in_flight_stack != 0)
835 bool add_p = in_flight_stack & 1;
836 list_t *elem = (list_t *) (in_flight_stack & ~(uintptr_t) 1);
838 if (add_p)
840 /* We always add at the beginning of the list. So in this case we
841 only need to check the beginning of these lists to see if the
842 pointers at the head of the list are inconsistent. */
843 list_t *l = NULL;
845 if (stack_used.next->prev != &stack_used)
846 l = &stack_used;
847 else if (stack_cache.next->prev != &stack_cache)
848 l = &stack_cache;
850 if (l != NULL)
852 assert (l->next->prev == elem);
853 elem->next = l->next;
854 elem->prev = l;
855 l->next = elem;
858 else
860 /* We can simply always replay the delete operation. */
861 elem->next->prev = elem->prev;
862 elem->prev->next = elem->next;
866 /* Mark all stacks except the still running one as free. */
867 list_t *runp;
868 list_for_each (runp, &stack_used)
870 struct pthread *curp = list_entry (runp, struct pthread, list);
871 if (curp != self)
873 /* This marks the stack as free. */
874 curp->tid = 0;
876 /* The PID field must be initialized for the new process. */
877 curp->pid = self->pid;
879 /* Account for the size of the stack. */
880 stack_cache_actsize += curp->stackblock_size;
882 if (curp->specific_used)
884 /* Clear the thread-specific data. */
885 memset (curp->specific_1stblock, '\0',
886 sizeof (curp->specific_1stblock));
888 curp->specific_used = false;
890 for (size_t cnt = 1; cnt < PTHREAD_KEY_1STLEVEL_SIZE; ++cnt)
891 if (curp->specific[cnt] != NULL)
893 memset (curp->specific[cnt], '\0',
894 sizeof (curp->specific_1stblock));
896 /* We have allocated the block which we do not
897 free here so re-set the bit. */
898 curp->specific_used = true;
904 /* Reset the PIDs in any cached stacks. */
905 list_for_each (runp, &stack_cache)
907 struct pthread *curp = list_entry (runp, struct pthread, list);
908 curp->pid = self->pid;
911 /* Add the stack of all running threads to the cache. */
912 list_splice (&stack_used, &stack_cache);
914 /* Remove the entry for the current thread to from the cache list
915 and add it to the list of running threads. Which of the two
916 lists is decided by the user_stack flag. */
917 stack_list_del (&self->list);
919 /* Re-initialize the lists for all the threads. */
920 INIT_LIST_HEAD (&stack_used);
921 INIT_LIST_HEAD (&__stack_user);
923 if (__glibc_unlikely (THREAD_GETMEM (self, user_stack)))
924 list_add (&self->list, &__stack_user);
925 else
926 list_add (&self->list, &stack_used);
928 /* There is one thread running. */
929 __nptl_nthreads = 1;
931 in_flight_stack = 0;
933 /* Initialize locks. */
934 stack_cache_lock = LLL_LOCK_INITIALIZER;
935 __default_pthread_attr_lock = LLL_LOCK_INITIALIZER;
939 #if HP_TIMING_AVAIL
940 # undef __find_thread_by_id
941 /* Find a thread given the thread ID. */
942 attribute_hidden
943 struct pthread *
944 __find_thread_by_id (pid_t tid)
946 struct pthread *result = NULL;
948 lll_lock (stack_cache_lock, LLL_PRIVATE);
950 /* Iterate over the list with system-allocated threads first. */
951 list_t *runp;
952 list_for_each (runp, &stack_used)
954 struct pthread *curp;
956 curp = list_entry (runp, struct pthread, list);
958 if (curp->tid == tid)
960 result = curp;
961 goto out;
965 /* Now the list with threads using user-allocated stacks. */
966 list_for_each (runp, &__stack_user)
968 struct pthread *curp;
970 curp = list_entry (runp, struct pthread, list);
972 if (curp->tid == tid)
974 result = curp;
975 goto out;
979 out:
980 lll_unlock (stack_cache_lock, LLL_PRIVATE);
982 return result;
984 #endif
987 #ifdef SIGSETXID
988 static void
989 internal_function
990 setxid_mark_thread (struct xid_command *cmdp, struct pthread *t)
992 int ch;
994 /* Wait until this thread is cloned. */
995 if (t->setxid_futex == -1
996 && ! atomic_compare_and_exchange_bool_acq (&t->setxid_futex, -2, -1))
998 futex_wait_simple (&t->setxid_futex, -2, FUTEX_PRIVATE);
999 while (t->setxid_futex == -2);
1001 /* Don't let the thread exit before the setxid handler runs. */
1002 t->setxid_futex = 0;
1006 ch = t->cancelhandling;
1008 /* If the thread is exiting right now, ignore it. */
1009 if ((ch & EXITING_BITMASK) != 0)
1011 /* Release the futex if there is no other setxid in
1012 progress. */
1013 if ((ch & SETXID_BITMASK) == 0)
1015 t->setxid_futex = 1;
1016 futex_wake (&t->setxid_futex, 1, FUTEX_PRIVATE);
1018 return;
1021 while (atomic_compare_and_exchange_bool_acq (&t->cancelhandling,
1022 ch | SETXID_BITMASK, ch));
1026 static void
1027 internal_function
1028 setxid_unmark_thread (struct xid_command *cmdp, struct pthread *t)
1030 int ch;
1034 ch = t->cancelhandling;
1035 if ((ch & SETXID_BITMASK) == 0)
1036 return;
1038 while (atomic_compare_and_exchange_bool_acq (&t->cancelhandling,
1039 ch & ~SETXID_BITMASK, ch));
1041 /* Release the futex just in case. */
1042 t->setxid_futex = 1;
1043 futex_wake (&t->setxid_futex, 1, FUTEX_PRIVATE);
1047 static int
1048 internal_function
1049 setxid_signal_thread (struct xid_command *cmdp, struct pthread *t)
1051 if ((t->cancelhandling & SETXID_BITMASK) == 0)
1052 return 0;
1054 int val;
1055 INTERNAL_SYSCALL_DECL (err);
1056 val = INTERNAL_SYSCALL (tgkill, err, 3, THREAD_GETMEM (THREAD_SELF, pid),
1057 t->tid, SIGSETXID);
1059 /* If this failed, it must have had not started yet or else exited. */
1060 if (!INTERNAL_SYSCALL_ERROR_P (val, err))
1062 atomic_increment (&cmdp->cntr);
1063 return 1;
1065 else
1066 return 0;
1069 /* Check for consistency across set*id system call results. The abort
1070 should not happen as long as all privileges changes happen through
1071 the glibc wrappers. ERROR must be 0 (no error) or an errno
1072 code. */
1073 void
1074 attribute_hidden
1075 __nptl_setxid_error (struct xid_command *cmdp, int error)
1079 int olderror = cmdp->error;
1080 if (olderror == error)
1081 break;
1082 if (olderror != -1)
1083 /* Mismatch between current and previous results. */
1084 abort ();
1086 while (atomic_compare_and_exchange_bool_acq (&cmdp->error, error, -1));
1090 attribute_hidden
1091 __nptl_setxid (struct xid_command *cmdp)
1093 int signalled;
1094 int result;
1095 lll_lock (stack_cache_lock, LLL_PRIVATE);
1097 __xidcmd = cmdp;
1098 cmdp->cntr = 0;
1099 cmdp->error = -1;
1101 struct pthread *self = THREAD_SELF;
1103 /* Iterate over the list with system-allocated threads first. */
1104 list_t *runp;
1105 list_for_each (runp, &stack_used)
1107 struct pthread *t = list_entry (runp, struct pthread, list);
1108 if (t == self)
1109 continue;
1111 setxid_mark_thread (cmdp, t);
1114 /* Now the list with threads using user-allocated stacks. */
1115 list_for_each (runp, &__stack_user)
1117 struct pthread *t = list_entry (runp, struct pthread, list);
1118 if (t == self)
1119 continue;
1121 setxid_mark_thread (cmdp, t);
1124 /* Iterate until we don't succeed in signalling anyone. That means
1125 we have gotten all running threads, and their children will be
1126 automatically correct once started. */
1129 signalled = 0;
1131 list_for_each (runp, &stack_used)
1133 struct pthread *t = list_entry (runp, struct pthread, list);
1134 if (t == self)
1135 continue;
1137 signalled += setxid_signal_thread (cmdp, t);
1140 list_for_each (runp, &__stack_user)
1142 struct pthread *t = list_entry (runp, struct pthread, list);
1143 if (t == self)
1144 continue;
1146 signalled += setxid_signal_thread (cmdp, t);
1149 int cur = cmdp->cntr;
1150 while (cur != 0)
1152 futex_wait_simple ((unsigned int *) &cmdp->cntr, cur,
1153 FUTEX_PRIVATE);
1154 cur = cmdp->cntr;
1157 while (signalled != 0);
1159 /* Clean up flags, so that no thread blocks during exit waiting
1160 for a signal which will never come. */
1161 list_for_each (runp, &stack_used)
1163 struct pthread *t = list_entry (runp, struct pthread, list);
1164 if (t == self)
1165 continue;
1167 setxid_unmark_thread (cmdp, t);
1170 list_for_each (runp, &__stack_user)
1172 struct pthread *t = list_entry (runp, struct pthread, list);
1173 if (t == self)
1174 continue;
1176 setxid_unmark_thread (cmdp, t);
1179 /* This must be last, otherwise the current thread might not have
1180 permissions to send SIGSETXID syscall to the other threads. */
1181 INTERNAL_SYSCALL_DECL (err);
1182 result = INTERNAL_SYSCALL_NCS (cmdp->syscall_no, err, 3,
1183 cmdp->id[0], cmdp->id[1], cmdp->id[2]);
1184 int error = 0;
1185 if (__glibc_unlikely (INTERNAL_SYSCALL_ERROR_P (result, err)))
1187 error = INTERNAL_SYSCALL_ERRNO (result, err);
1188 __set_errno (error);
1189 result = -1;
1191 __nptl_setxid_error (cmdp, error);
1193 lll_unlock (stack_cache_lock, LLL_PRIVATE);
1194 return result;
1196 #endif /* SIGSETXID. */
1199 static inline void __attribute__((always_inline))
1200 init_one_static_tls (struct pthread *curp, struct link_map *map)
1202 # if TLS_TCB_AT_TP
1203 void *dest = (char *) curp - map->l_tls_offset;
1204 # elif TLS_DTV_AT_TP
1205 void *dest = (char *) curp + map->l_tls_offset + TLS_PRE_TCB_SIZE;
1206 # else
1207 # error "Either TLS_TCB_AT_TP or TLS_DTV_AT_TP must be defined"
1208 # endif
1210 /* We cannot delay the initialization of the Static TLS area, since
1211 it can be accessed with LE or IE, but since the DTV is only used
1212 by GD and LD, we can delay its update to avoid a race. */
1213 memset (__mempcpy (dest, map->l_tls_initimage, map->l_tls_initimage_size),
1214 '\0', map->l_tls_blocksize - map->l_tls_initimage_size);
1217 void
1218 attribute_hidden
1219 __pthread_init_static_tls (struct link_map *map)
1221 lll_lock (stack_cache_lock, LLL_PRIVATE);
1223 /* Iterate over the list with system-allocated threads first. */
1224 list_t *runp;
1225 list_for_each (runp, &stack_used)
1226 init_one_static_tls (list_entry (runp, struct pthread, list), map);
1228 /* Now the list with threads using user-allocated stacks. */
1229 list_for_each (runp, &__stack_user)
1230 init_one_static_tls (list_entry (runp, struct pthread, list), map);
1232 lll_unlock (stack_cache_lock, LLL_PRIVATE);
1236 void
1237 attribute_hidden
1238 __wait_lookup_done (void)
1240 lll_lock (stack_cache_lock, LLL_PRIVATE);
1242 struct pthread *self = THREAD_SELF;
1244 /* Iterate over the list with system-allocated threads first. */
1245 list_t *runp;
1246 list_for_each (runp, &stack_used)
1248 struct pthread *t = list_entry (runp, struct pthread, list);
1249 if (t == self || t->header.gscope_flag == THREAD_GSCOPE_FLAG_UNUSED)
1250 continue;
1252 int *const gscope_flagp = &t->header.gscope_flag;
1254 /* We have to wait until this thread is done with the global
1255 scope. First tell the thread that we are waiting and
1256 possibly have to be woken. */
1257 if (atomic_compare_and_exchange_bool_acq (gscope_flagp,
1258 THREAD_GSCOPE_FLAG_WAIT,
1259 THREAD_GSCOPE_FLAG_USED))
1260 continue;
1263 futex_wait_simple ((unsigned int *) gscope_flagp,
1264 THREAD_GSCOPE_FLAG_WAIT, FUTEX_PRIVATE);
1265 while (*gscope_flagp == THREAD_GSCOPE_FLAG_WAIT);
1268 /* Now the list with threads using user-allocated stacks. */
1269 list_for_each (runp, &__stack_user)
1271 struct pthread *t = list_entry (runp, struct pthread, list);
1272 if (t == self || t->header.gscope_flag == THREAD_GSCOPE_FLAG_UNUSED)
1273 continue;
1275 int *const gscope_flagp = &t->header.gscope_flag;
1277 /* We have to wait until this thread is done with the global
1278 scope. First tell the thread that we are waiting and
1279 possibly have to be woken. */
1280 if (atomic_compare_and_exchange_bool_acq (gscope_flagp,
1281 THREAD_GSCOPE_FLAG_WAIT,
1282 THREAD_GSCOPE_FLAG_USED))
1283 continue;
1286 futex_wait_simple ((unsigned int *) gscope_flagp,
1287 THREAD_GSCOPE_FLAG_WAIT, FUTEX_PRIVATE);
1288 while (*gscope_flagp == THREAD_GSCOPE_FLAG_WAIT);
1291 lll_unlock (stack_cache_lock, LLL_PRIVATE);