* allocatestack.c (allocate_stack): Initialize robust_list.
[glibc.git] / nptl / allocatestack.c
blob046a2470fc8b2999994d30fddd5ac75a717d8244
1 /* Copyright (C) 2002, 2003, 2004, 2005, 2006 Free Software Foundation, Inc.
2 This file is part of the GNU C Library.
3 Contributed by Ulrich Drepper <drepper@redhat.com>, 2002.
5 The GNU C Library is free software; you can redistribute it and/or
6 modify it under the terms of the GNU Lesser General Public
7 License as published by the Free Software Foundation; either
8 version 2.1 of the License, or (at your option) any later version.
10 The GNU C Library is distributed in the hope that it will be useful,
11 but WITHOUT ANY WARRANTY; without even the implied warranty of
12 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 Lesser General Public License for more details.
15 You should have received a copy of the GNU Lesser General Public
16 License along with the GNU C Library; if not, write to the Free
17 Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
18 02111-1307 USA. */
20 #include <assert.h>
21 #include <errno.h>
22 #include <signal.h>
23 #include <stdint.h>
24 #include <string.h>
25 #include <unistd.h>
26 #include <sys/mman.h>
27 #include <sys/param.h>
28 #include <dl-sysdep.h>
29 #include <tls.h>
30 #include <lowlevellock.h>
33 #ifndef NEED_SEPARATE_REGISTER_STACK
35 /* Most architectures have exactly one stack pointer. Some have more. */
36 # define STACK_VARIABLES void *stackaddr = NULL
38 /* How to pass the values to the 'create_thread' function. */
39 # define STACK_VARIABLES_ARGS stackaddr
41 /* How to declare function which gets there parameters. */
42 # define STACK_VARIABLES_PARMS void *stackaddr
44 /* How to declare allocate_stack. */
45 # define ALLOCATE_STACK_PARMS void **stack
47 /* This is how the function is called. We do it this way to allow
48 other variants of the function to have more parameters. */
49 # define ALLOCATE_STACK(attr, pd) allocate_stack (attr, pd, &stackaddr)
51 #else
53 /* We need two stacks. The kernel will place them but we have to tell
54 the kernel about the size of the reserved address space. */
55 # define STACK_VARIABLES void *stackaddr = NULL; size_t stacksize = 0
57 /* How to pass the values to the 'create_thread' function. */
58 # define STACK_VARIABLES_ARGS stackaddr, stacksize
60 /* How to declare function which gets there parameters. */
61 # define STACK_VARIABLES_PARMS void *stackaddr, size_t stacksize
63 /* How to declare allocate_stack. */
64 # define ALLOCATE_STACK_PARMS void **stack, size_t *stacksize
66 /* This is how the function is called. We do it this way to allow
67 other variants of the function to have more parameters. */
68 # define ALLOCATE_STACK(attr, pd) \
69 allocate_stack (attr, pd, &stackaddr, &stacksize)
71 #endif
74 /* Default alignment of stack. */
75 #ifndef STACK_ALIGN
76 # define STACK_ALIGN __alignof__ (long double)
77 #endif
79 /* Default value for minimal stack size after allocating thread
80 descriptor and guard. */
81 #ifndef MINIMAL_REST_STACK
82 # define MINIMAL_REST_STACK 4096
83 #endif
86 /* Let the architecture add some flags to the mmap() call used to
87 allocate stacks. */
88 #ifndef ARCH_MAP_FLAGS
89 # define ARCH_MAP_FLAGS 0
90 #endif
92 /* This yields the pointer that TLS support code calls the thread pointer. */
93 #if TLS_TCB_AT_TP
94 # define TLS_TPADJ(pd) (pd)
95 #elif TLS_DTV_AT_TP
96 # define TLS_TPADJ(pd) ((struct pthread *)((char *) (pd) + TLS_PRE_TCB_SIZE))
97 #endif
99 /* Cache handling for not-yet free stacks. */
101 /* Maximum size in kB of cache. */
102 static size_t stack_cache_maxsize = 40 * 1024 * 1024; /* 40MiBi by default. */
103 static size_t stack_cache_actsize;
105 /* Mutex protecting this variable. */
106 static lll_lock_t stack_cache_lock = LLL_LOCK_INITIALIZER;
108 /* List of queued stack frames. */
109 static LIST_HEAD (stack_cache);
111 /* List of the stacks in use. */
112 static LIST_HEAD (stack_used);
114 /* List of the threads with user provided stacks in use. No need to
115 initialize this, since it's done in __pthread_initialize_minimal. */
116 list_t __stack_user __attribute__ ((nocommon));
117 hidden_data_def (__stack_user)
119 #if COLORING_INCREMENT != 0
120 /* Number of threads created. */
121 static unsigned int nptl_ncreated;
122 #endif
125 /* Check whether the stack is still used or not. */
126 #define FREE_P(descr) ((descr)->tid <= 0)
129 /* We create a double linked list of all cache entries. Double linked
130 because this allows removing entries from the end. */
133 /* Get a stack frame from the cache. We have to match by size since
134 some blocks might be too small or far too large. */
135 static struct pthread *
136 get_cached_stack (size_t *sizep, void **memp)
138 size_t size = *sizep;
139 struct pthread *result = NULL;
140 list_t *entry;
142 lll_lock (stack_cache_lock);
144 /* Search the cache for a matching entry. We search for the
145 smallest stack which has at least the required size. Note that
146 in normal situations the size of all allocated stacks is the
147 same. As the very least there are only a few different sizes.
148 Therefore this loop will exit early most of the time with an
149 exact match. */
150 list_for_each (entry, &stack_cache)
152 struct pthread *curr;
154 curr = list_entry (entry, struct pthread, list);
155 if (FREE_P (curr) && curr->stackblock_size >= size)
157 if (curr->stackblock_size == size)
159 result = curr;
160 break;
163 if (result == NULL
164 || result->stackblock_size > curr->stackblock_size)
165 result = curr;
169 if (__builtin_expect (result == NULL, 0)
170 /* Make sure the size difference is not too excessive. In that
171 case we do not use the block. */
172 || __builtin_expect (result->stackblock_size > 4 * size, 0))
174 /* Release the lock. */
175 lll_unlock (stack_cache_lock);
177 return NULL;
180 /* Dequeue the entry. */
181 list_del (&result->list);
183 /* And add to the list of stacks in use. */
184 list_add (&result->list, &stack_used);
186 /* And decrease the cache size. */
187 stack_cache_actsize -= result->stackblock_size;
189 /* Release the lock early. */
190 lll_unlock (stack_cache_lock);
192 /* Report size and location of the stack to the caller. */
193 *sizep = result->stackblock_size;
194 *memp = result->stackblock;
196 /* Cancellation handling is back to the default. */
197 result->cancelhandling = 0;
198 result->cleanup = NULL;
200 /* No pending event. */
201 result->nextevent = NULL;
203 /* Clear the DTV. */
204 dtv_t *dtv = GET_DTV (TLS_TPADJ (result));
205 memset (dtv, '\0', (dtv[-1].counter + 1) * sizeof (dtv_t));
207 /* Re-initialize the TLS. */
208 _dl_allocate_tls_init (TLS_TPADJ (result));
210 return result;
214 /* Add a stack frame which is not used anymore to the stack. Must be
215 called with the cache lock held. */
216 static inline void
217 __attribute ((always_inline))
218 queue_stack (struct pthread *stack)
220 /* We unconditionally add the stack to the list. The memory may
221 still be in use but it will not be reused until the kernel marks
222 the stack as not used anymore. */
223 list_add (&stack->list, &stack_cache);
225 stack_cache_actsize += stack->stackblock_size;
226 if (__builtin_expect (stack_cache_actsize > stack_cache_maxsize, 0))
228 /* We reduce the size of the cache. Remove the last entries
229 until the size is below the limit. */
230 list_t *entry;
231 list_t *prev;
233 /* Search from the end of the list. */
234 list_for_each_prev_safe (entry, prev, &stack_cache)
236 struct pthread *curr;
238 curr = list_entry (entry, struct pthread, list);
239 if (FREE_P (curr))
241 /* Unlink the block. */
242 list_del (entry);
244 /* Account for the freed memory. */
245 stack_cache_actsize -= curr->stackblock_size;
247 /* Free the memory associated with the ELF TLS. */
248 _dl_deallocate_tls (TLS_TPADJ (curr), false);
250 /* Remove this block. This should never fail. If it
251 does something is really wrong. */
252 if (munmap (curr->stackblock, curr->stackblock_size) != 0)
253 abort ();
255 /* Maybe we have freed enough. */
256 if (stack_cache_actsize <= stack_cache_maxsize)
257 break;
264 static int
265 internal_function
266 change_stack_perm (struct pthread *pd
267 #ifdef NEED_SEPARATE_REGISTER_STACK
268 , size_t pagemask
269 #endif
272 #ifdef NEED_SEPARATE_REGISTER_STACK
273 void *stack = (pd->stackblock
274 + (((((pd->stackblock_size - pd->guardsize) / 2)
275 & pagemask) + pd->guardsize) & pagemask));
276 size_t len = pd->stackblock + pd->stackblock_size - stack;
277 #else
278 void *stack = pd->stackblock + pd->guardsize;
279 size_t len = pd->stackblock_size - pd->guardsize;
280 #endif
281 if (mprotect (stack, len, PROT_READ | PROT_WRITE | PROT_EXEC) != 0)
282 return errno;
284 return 0;
288 static int
289 allocate_stack (const struct pthread_attr *attr, struct pthread **pdp,
290 ALLOCATE_STACK_PARMS)
292 struct pthread *pd;
293 size_t size;
294 size_t pagesize_m1 = __getpagesize () - 1;
295 void *stacktop;
297 assert (attr != NULL);
298 assert (powerof2 (pagesize_m1 + 1));
299 assert (TCB_ALIGNMENT >= STACK_ALIGN);
301 /* Get the stack size from the attribute if it is set. Otherwise we
302 use the default we determined at start time. */
303 size = attr->stacksize ?: __default_stacksize;
305 /* Get memory for the stack. */
306 if (__builtin_expect (attr->flags & ATTR_FLAG_STACKADDR, 0))
308 uintptr_t adj;
310 /* If the user also specified the size of the stack make sure it
311 is large enough. */
312 if (attr->stacksize != 0
313 && attr->stacksize < (__static_tls_size + MINIMAL_REST_STACK))
314 return EINVAL;
316 /* Adjust stack size for alignment of the TLS block. */
317 #if TLS_TCB_AT_TP
318 adj = ((uintptr_t) attr->stackaddr - TLS_TCB_SIZE)
319 & __static_tls_align_m1;
320 assert (size > adj + TLS_TCB_SIZE);
321 #elif TLS_DTV_AT_TP
322 adj = ((uintptr_t) attr->stackaddr - __static_tls_size)
323 & __static_tls_align_m1;
324 assert (size > adj);
325 #endif
327 /* The user provided some memory. Let's hope it matches the
328 size... We do not allocate guard pages if the user provided
329 the stack. It is the user's responsibility to do this if it
330 is wanted. */
331 #if TLS_TCB_AT_TP
332 pd = (struct pthread *) ((uintptr_t) attr->stackaddr
333 - TLS_TCB_SIZE - adj);
334 #elif TLS_DTV_AT_TP
335 pd = (struct pthread *) (((uintptr_t) attr->stackaddr
336 - __static_tls_size - adj)
337 - TLS_PRE_TCB_SIZE);
338 #endif
340 /* The user provided stack memory needs to be cleared. */
341 memset (pd, '\0', sizeof (struct pthread));
343 /* The first TSD block is included in the TCB. */
344 pd->specific[0] = pd->specific_1stblock;
346 /* Remember the stack-related values. */
347 pd->stackblock = (char *) attr->stackaddr - size;
348 pd->stackblock_size = size;
350 /* This is a user-provided stack. It will not be queued in the
351 stack cache nor will the memory (except the TLS memory) be freed. */
352 pd->user_stack = true;
354 /* This is at least the second thread. */
355 pd->header.multiple_threads = 1;
356 #ifndef TLS_MULTIPLE_THREADS_IN_TCB
357 __pthread_multiple_threads = *__libc_multiple_threads_ptr = 1;
358 #endif
360 #ifdef NEED_DL_SYSINFO
361 /* Copy the sysinfo value from the parent. */
362 THREAD_SYSINFO(pd) = THREAD_SELF_SYSINFO;
363 #endif
365 /* The process ID is also the same as that of the caller. */
366 pd->pid = THREAD_GETMEM (THREAD_SELF, pid);
368 /* List of robust mutexes. */
369 #ifdef __PTHREAD_MUTEX_HAVE_PREV
370 pd->robust_list.__prev = &pd->robust_list;
371 #endif
372 pd->robust_list.__next = &pd->robust_list;
374 /* Allocate the DTV for this thread. */
375 if (_dl_allocate_tls (TLS_TPADJ (pd)) == NULL)
377 /* Something went wrong. */
378 assert (errno == ENOMEM);
379 return EAGAIN;
383 /* Prepare to modify global data. */
384 lll_lock (stack_cache_lock);
386 /* And add to the list of stacks in use. */
387 list_add (&pd->list, &__stack_user);
389 lll_unlock (stack_cache_lock);
391 else
393 /* Allocate some anonymous memory. If possible use the cache. */
394 size_t guardsize;
395 size_t reqsize;
396 void *mem;
397 const int prot = (PROT_READ | PROT_WRITE
398 | ((GL(dl_stack_flags) & PF_X) ? PROT_EXEC : 0));
400 #if COLORING_INCREMENT != 0
401 /* Add one more page for stack coloring. Don't do it for stacks
402 with 16 times pagesize or larger. This might just cause
403 unnecessary misalignment. */
404 if (size <= 16 * pagesize_m1)
405 size += pagesize_m1 + 1;
406 #endif
408 /* Adjust the stack size for alignment. */
409 size &= ~__static_tls_align_m1;
410 assert (size != 0);
412 /* Make sure the size of the stack is enough for the guard and
413 eventually the thread descriptor. */
414 guardsize = (attr->guardsize + pagesize_m1) & ~pagesize_m1;
415 if (__builtin_expect (size < ((guardsize + __static_tls_size
416 + MINIMAL_REST_STACK + pagesize_m1)
417 & ~pagesize_m1),
419 /* The stack is too small (or the guard too large). */
420 return EINVAL;
422 /* Try to get a stack from the cache. */
423 reqsize = size;
424 pd = get_cached_stack (&size, &mem);
425 if (pd == NULL)
427 /* To avoid aliasing effects on a larger scale than pages we
428 adjust the allocated stack size if necessary. This way
429 allocations directly following each other will not have
430 aliasing problems. */
431 #if MULTI_PAGE_ALIASING != 0
432 if ((size % MULTI_PAGE_ALIASING) == 0)
433 size += pagesize_m1 + 1;
434 #endif
436 mem = mmap (NULL, size, prot,
437 MAP_PRIVATE | MAP_ANONYMOUS | ARCH_MAP_FLAGS, -1, 0);
439 if (__builtin_expect (mem == MAP_FAILED, 0))
441 #ifdef ARCH_RETRY_MMAP
442 mem = ARCH_RETRY_MMAP (size);
443 if (__builtin_expect (mem == MAP_FAILED, 0))
444 #endif
445 return errno;
448 /* SIZE is guaranteed to be greater than zero.
449 So we can never get a null pointer back from mmap. */
450 assert (mem != NULL);
452 #if COLORING_INCREMENT != 0
453 /* Atomically increment NCREATED. */
454 unsigned int ncreated = atomic_increment_val (&nptl_ncreated);
456 /* We chose the offset for coloring by incrementing it for
457 every new thread by a fixed amount. The offset used
458 module the page size. Even if coloring would be better
459 relative to higher alignment values it makes no sense to
460 do it since the mmap() interface does not allow us to
461 specify any alignment for the returned memory block. */
462 size_t coloring = (ncreated * COLORING_INCREMENT) & pagesize_m1;
464 /* Make sure the coloring offsets does not disturb the alignment
465 of the TCB and static TLS block. */
466 if (__builtin_expect ((coloring & __static_tls_align_m1) != 0, 0))
467 coloring = (((coloring + __static_tls_align_m1)
468 & ~(__static_tls_align_m1))
469 & ~pagesize_m1);
470 #else
471 /* Unless specified we do not make any adjustments. */
472 # define coloring 0
473 #endif
475 /* Place the thread descriptor at the end of the stack. */
476 #if TLS_TCB_AT_TP
477 pd = (struct pthread *) ((char *) mem + size - coloring) - 1;
478 #elif TLS_DTV_AT_TP
479 pd = (struct pthread *) ((((uintptr_t) mem + size - coloring
480 - __static_tls_size)
481 & ~__static_tls_align_m1)
482 - TLS_PRE_TCB_SIZE);
483 #endif
485 /* Remember the stack-related values. */
486 pd->stackblock = mem;
487 pd->stackblock_size = size;
489 /* We allocated the first block thread-specific data array.
490 This address will not change for the lifetime of this
491 descriptor. */
492 pd->specific[0] = pd->specific_1stblock;
494 /* This is at least the second thread. */
495 pd->header.multiple_threads = 1;
496 #ifndef TLS_MULTIPLE_THREADS_IN_TCB
497 __pthread_multiple_threads = *__libc_multiple_threads_ptr = 1;
498 #endif
500 #ifdef NEED_DL_SYSINFO
501 /* Copy the sysinfo value from the parent. */
502 THREAD_SYSINFO(pd) = THREAD_SELF_SYSINFO;
503 #endif
505 /* The process ID is also the same as that of the caller. */
506 pd->pid = THREAD_GETMEM (THREAD_SELF, pid);
508 /* List of robust mutexes. */
509 #ifdef __PTHREAD_MUTEX_HAVE_PREV
510 pd->robust_list.__prev = &pd->robust_list;
511 #endif
512 pd->robust_list.__next = &pd->robust_list;
514 /* Allocate the DTV for this thread. */
515 if (_dl_allocate_tls (TLS_TPADJ (pd)) == NULL)
517 /* Something went wrong. */
518 assert (errno == ENOMEM);
520 /* Free the stack memory we just allocated. */
521 (void) munmap (mem, size);
523 return EAGAIN;
527 /* Prepare to modify global data. */
528 lll_lock (stack_cache_lock);
530 /* And add to the list of stacks in use. */
531 list_add (&pd->list, &stack_used);
533 lll_unlock (stack_cache_lock);
536 /* There might have been a race. Another thread might have
537 caused the stacks to get exec permission while this new
538 stack was prepared. Detect if this was possible and
539 change the permission if necessary. */
540 if (__builtin_expect ((GL(dl_stack_flags) & PF_X) != 0
541 && (prot & PROT_EXEC) == 0, 0))
543 int err = change_stack_perm (pd
544 #ifdef NEED_SEPARATE_REGISTER_STACK
545 , ~pagesize_m1
546 #endif
548 if (err != 0)
550 /* Free the stack memory we just allocated. */
551 (void) munmap (mem, size);
553 return err;
558 /* Note that all of the stack and the thread descriptor is
559 zeroed. This means we do not have to initialize fields
560 with initial value zero. This is specifically true for
561 the 'tid' field which is always set back to zero once the
562 stack is not used anymore and for the 'guardsize' field
563 which will be read next. */
566 /* Create or resize the guard area if necessary. */
567 if (__builtin_expect (guardsize > pd->guardsize, 0))
569 #ifdef NEED_SEPARATE_REGISTER_STACK
570 char *guard = mem + (((size - guardsize) / 2) & ~pagesize_m1);
571 #else
572 char *guard = mem;
573 #endif
574 if (mprotect (guard, guardsize, PROT_NONE) != 0)
576 int err;
577 mprot_error:
578 err = errno;
580 lll_lock (stack_cache_lock);
582 /* Remove the thread from the list. */
583 list_del (&pd->list);
585 lll_unlock (stack_cache_lock);
587 /* Get rid of the TLS block we allocated. */
588 _dl_deallocate_tls (TLS_TPADJ (pd), false);
590 /* Free the stack memory regardless of whether the size
591 of the cache is over the limit or not. If this piece
592 of memory caused problems we better do not use it
593 anymore. Uh, and we ignore possible errors. There
594 is nothing we could do. */
595 (void) munmap (mem, size);
597 return err;
600 pd->guardsize = guardsize;
602 else if (__builtin_expect (pd->guardsize - guardsize > size - reqsize,
605 /* The old guard area is too large. */
607 #ifdef NEED_SEPARATE_REGISTER_STACK
608 char *guard = mem + (((size - guardsize) / 2) & ~pagesize_m1);
609 char *oldguard = mem + (((size - pd->guardsize) / 2) & ~pagesize_m1);
611 if (oldguard < guard
612 && mprotect (oldguard, guard - oldguard, prot) != 0)
613 goto mprot_error;
615 if (mprotect (guard + guardsize,
616 oldguard + pd->guardsize - guard - guardsize,
617 prot) != 0)
618 goto mprot_error;
619 #else
620 if (mprotect ((char *) mem + guardsize, pd->guardsize - guardsize,
621 prot) != 0)
622 goto mprot_error;
623 #endif
625 pd->guardsize = guardsize;
627 /* The pthread_getattr_np() calls need to get passed the size
628 requested in the attribute, regardless of how large the
629 actually used guardsize is. */
630 pd->reported_guardsize = guardsize;
633 /* Initialize the lock. We have to do this unconditionally since the
634 stillborn thread could be canceled while the lock is taken. */
635 pd->lock = LLL_LOCK_INITIALIZER;
637 /* We place the thread descriptor at the end of the stack. */
638 *pdp = pd;
640 #if TLS_TCB_AT_TP
641 /* The stack begins before the TCB and the static TLS block. */
642 stacktop = ((char *) (pd + 1) - __static_tls_size);
643 #elif TLS_DTV_AT_TP
644 stacktop = (char *) (pd - 1);
645 #endif
647 #ifdef NEED_SEPARATE_REGISTER_STACK
648 *stack = pd->stackblock;
649 *stacksize = stacktop - *stack;
650 #else
651 *stack = stacktop;
652 #endif
654 return 0;
658 void
659 internal_function
660 __deallocate_stack (struct pthread *pd)
662 lll_lock (stack_cache_lock);
664 /* Remove the thread from the list of threads with user defined
665 stacks. */
666 list_del (&pd->list);
668 /* Not much to do. Just free the mmap()ed memory. Note that we do
669 not reset the 'used' flag in the 'tid' field. This is done by
670 the kernel. If no thread has been created yet this field is
671 still zero. */
672 if (__builtin_expect (! pd->user_stack, 1))
673 (void) queue_stack (pd);
674 else
675 /* Free the memory associated with the ELF TLS. */
676 _dl_deallocate_tls (TLS_TPADJ (pd), false);
678 lll_unlock (stack_cache_lock);
683 internal_function
684 __make_stacks_executable (void **stack_endp)
686 /* First the main thread's stack. */
687 int err = _dl_make_stack_executable (stack_endp);
688 if (err != 0)
689 return err;
691 #ifdef NEED_SEPARATE_REGISTER_STACK
692 const size_t pagemask = ~(__getpagesize () - 1);
693 #endif
695 lll_lock (stack_cache_lock);
697 list_t *runp;
698 list_for_each (runp, &stack_used)
700 err = change_stack_perm (list_entry (runp, struct pthread, list)
701 #ifdef NEED_SEPARATE_REGISTER_STACK
702 , pagemask
703 #endif
705 if (err != 0)
706 break;
709 /* Also change the permission for the currently unused stacks. This
710 might be wasted time but better spend it here than adding a check
711 in the fast path. */
712 if (err == 0)
713 list_for_each (runp, &stack_cache)
715 err = change_stack_perm (list_entry (runp, struct pthread, list)
716 #ifdef NEED_SEPARATE_REGISTER_STACK
717 , pagemask
718 #endif
720 if (err != 0)
721 break;
724 lll_unlock (stack_cache_lock);
726 return err;
730 /* In case of a fork() call the memory allocation in the child will be
731 the same but only one thread is running. All stacks except that of
732 the one running thread are not used anymore. We have to recycle
733 them. */
734 void
735 __reclaim_stacks (void)
737 struct pthread *self = (struct pthread *) THREAD_SELF;
739 /* No locking necessary. The caller is the only stack in use. */
741 /* Mark all stacks except the still running one as free. */
742 list_t *runp;
743 list_for_each (runp, &stack_used)
745 struct pthread *curp;
747 curp = list_entry (runp, struct pthread, list);
748 if (curp != self)
750 /* This marks the stack as free. */
751 curp->tid = 0;
753 /* The PID field must be initialized for the new process. */
754 curp->pid = self->pid;
756 /* Account for the size of the stack. */
757 stack_cache_actsize += curp->stackblock_size;
761 /* Add the stack of all running threads to the cache. */
762 list_splice (&stack_used, &stack_cache);
764 /* Remove the entry for the current thread to from the cache list
765 and add it to the list of running threads. Which of the two
766 lists is decided by the user_stack flag. */
767 list_del (&self->list);
769 /* Re-initialize the lists for all the threads. */
770 INIT_LIST_HEAD (&stack_used);
771 INIT_LIST_HEAD (&__stack_user);
773 if (__builtin_expect (THREAD_GETMEM (self, user_stack), 0))
774 list_add (&self->list, &__stack_user);
775 else
776 list_add (&self->list, &stack_used);
778 /* There is one thread running. */
779 __nptl_nthreads = 1;
781 /* Initialize the lock. */
782 stack_cache_lock = LLL_LOCK_INITIALIZER;
786 #if HP_TIMING_AVAIL
787 # undef __find_thread_by_id
788 /* Find a thread given the thread ID. */
789 attribute_hidden
790 struct pthread *
791 __find_thread_by_id (pid_t tid)
793 struct pthread *result = NULL;
795 lll_lock (stack_cache_lock);
797 /* Iterate over the list with system-allocated threads first. */
798 list_t *runp;
799 list_for_each (runp, &stack_used)
801 struct pthread *curp;
803 curp = list_entry (runp, struct pthread, list);
805 if (curp->tid == tid)
807 result = curp;
808 goto out;
812 /* Now the list with threads using user-allocated stacks. */
813 list_for_each (runp, &__stack_user)
815 struct pthread *curp;
817 curp = list_entry (runp, struct pthread, list);
819 if (curp->tid == tid)
821 result = curp;
822 goto out;
826 out:
827 lll_unlock (stack_cache_lock);
829 return result;
831 #endif
834 static void
835 internal_function
836 setxid_signal_thread (struct xid_command *cmdp, struct pthread *t)
838 if (! IS_DETACHED (t))
840 int ch;
843 ch = t->cancelhandling;
845 /* If the thread is exiting right now, ignore it. */
846 if ((ch & EXITING_BITMASK) != 0)
847 return;
849 while (atomic_compare_and_exchange_bool_acq (&t->cancelhandling,
850 ch | SETXID_BITMASK, ch));
853 int val;
854 INTERNAL_SYSCALL_DECL (err);
855 #if __ASSUME_TGKILL
856 val = INTERNAL_SYSCALL (tgkill, err, 3, THREAD_GETMEM (THREAD_SELF, pid),
857 t->tid, SIGSETXID);
858 #else
859 # ifdef __NR_tgkill
860 val = INTERNAL_SYSCALL (tgkill, err, 3, THREAD_GETMEM (THREAD_SELF, pid),
861 t->tid, SIGSETXID);
862 if (INTERNAL_SYSCALL_ERROR_P (val, err)
863 && INTERNAL_SYSCALL_ERRNO (val, err) == ENOSYS)
864 # endif
865 val = INTERNAL_SYSCALL (tkill, err, 2, t->tid, SIGSETXID);
866 #endif
868 if (!INTERNAL_SYSCALL_ERROR_P (val, err))
869 atomic_increment (&cmdp->cntr);
874 attribute_hidden
875 __nptl_setxid (struct xid_command *cmdp)
877 int result;
878 lll_lock (stack_cache_lock);
880 __xidcmd = cmdp;
881 cmdp->cntr = 0;
883 struct pthread *self = THREAD_SELF;
885 /* Iterate over the list with system-allocated threads first. */
886 list_t *runp;
887 list_for_each (runp, &stack_used)
889 struct pthread *t = list_entry (runp, struct pthread, list);
890 if (t == self)
891 continue;
893 setxid_signal_thread (cmdp, t);
896 /* Now the list with threads using user-allocated stacks. */
897 list_for_each (runp, &__stack_user)
899 struct pthread *t = list_entry (runp, struct pthread, list);
900 if (t == self)
901 continue;
903 setxid_signal_thread (cmdp, t);
906 int cur = cmdp->cntr;
907 while (cur != 0)
909 lll_futex_wait (&cmdp->cntr, cur);
910 cur = cmdp->cntr;
913 /* This must be last, otherwise the current thread might not have
914 permissions to send SIGSETXID syscall to the other threads. */
915 INTERNAL_SYSCALL_DECL (err);
916 result = INTERNAL_SYSCALL_NCS (cmdp->syscall_no, err, 3,
917 cmdp->id[0], cmdp->id[1], cmdp->id[2]);
918 if (INTERNAL_SYSCALL_ERROR_P (result, err))
920 __set_errno (INTERNAL_SYSCALL_ERRNO (result, err));
921 result = -1;
924 lll_unlock (stack_cache_lock);
925 return result;
928 static inline void __attribute__((always_inline))
929 init_one_static_tls (struct pthread *curp, struct link_map *map)
931 dtv_t *dtv = GET_DTV (TLS_TPADJ (curp));
932 # if TLS_TCB_AT_TP
933 void *dest = (char *) curp - map->l_tls_offset;
934 # elif TLS_DTV_AT_TP
935 void *dest = (char *) curp + map->l_tls_offset + TLS_PRE_TCB_SIZE;
936 # else
937 # error "Either TLS_TCB_AT_TP or TLS_DTV_AT_TP must be defined"
938 # endif
940 /* Fill in the DTV slot so that a later LD/GD access will find it. */
941 dtv[map->l_tls_modid].pointer.val = dest;
942 dtv[map->l_tls_modid].pointer.is_static = true;
944 /* Initialize the memory. */
945 memset (__mempcpy (dest, map->l_tls_initimage, map->l_tls_initimage_size),
946 '\0', map->l_tls_blocksize - map->l_tls_initimage_size);
949 void
950 attribute_hidden
951 __pthread_init_static_tls (struct link_map *map)
953 lll_lock (stack_cache_lock);
955 /* Iterate over the list with system-allocated threads first. */
956 list_t *runp;
957 list_for_each (runp, &stack_used)
958 init_one_static_tls (list_entry (runp, struct pthread, list), map);
960 /* Now the list with threads using user-allocated stacks. */
961 list_for_each (runp, &__stack_user)
962 init_one_static_tls (list_entry (runp, struct pthread, list), map);
964 lll_unlock (stack_cache_lock);