1 /* Copyright (C) 2002,2003,2004,2005,2006,2007 Free Software Foundation, Inc.
2 This file is part of the GNU C Library.
3 Contributed by Ulrich Drepper <drepper@redhat.com>, 2002.
5 The GNU C Library is free software; you can redistribute it and/or
6 modify it under the terms of the GNU Lesser General Public
7 License as published by the Free Software Foundation; either
8 version 2.1 of the License, or (at your option) any later version.
10 The GNU C Library is distributed in the hope that it will be useful,
11 but WITHOUT ANY WARRANTY; without even the implied warranty of
12 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 Lesser General Public License for more details.
15 You should have received a copy of the GNU Lesser General Public
16 License along with the GNU C Library; if not, write to the Free
17 Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
27 #include <sys/param.h>
28 #include <dl-sysdep.h>
30 #include <lowlevellock.h>
33 #ifndef NEED_SEPARATE_REGISTER_STACK
35 /* Most architectures have exactly one stack pointer. Some have more. */
36 # define STACK_VARIABLES void *stackaddr = NULL
38 /* How to pass the values to the 'create_thread' function. */
39 # define STACK_VARIABLES_ARGS stackaddr
41 /* How to declare function which gets there parameters. */
42 # define STACK_VARIABLES_PARMS void *stackaddr
44 /* How to declare allocate_stack. */
45 # define ALLOCATE_STACK_PARMS void **stack
47 /* This is how the function is called. We do it this way to allow
48 other variants of the function to have more parameters. */
49 # define ALLOCATE_STACK(attr, pd) allocate_stack (attr, pd, &stackaddr)
53 /* We need two stacks. The kernel will place them but we have to tell
54 the kernel about the size of the reserved address space. */
55 # define STACK_VARIABLES void *stackaddr = NULL; size_t stacksize = 0
57 /* How to pass the values to the 'create_thread' function. */
58 # define STACK_VARIABLES_ARGS stackaddr, stacksize
60 /* How to declare function which gets there parameters. */
61 # define STACK_VARIABLES_PARMS void *stackaddr, size_t stacksize
63 /* How to declare allocate_stack. */
64 # define ALLOCATE_STACK_PARMS void **stack, size_t *stacksize
66 /* This is how the function is called. We do it this way to allow
67 other variants of the function to have more parameters. */
68 # define ALLOCATE_STACK(attr, pd) \
69 allocate_stack (attr, pd, &stackaddr, &stacksize)
74 /* Default alignment of stack. */
76 # define STACK_ALIGN __alignof__ (long double)
79 /* Default value for minimal stack size after allocating thread
80 descriptor and guard. */
81 #ifndef MINIMAL_REST_STACK
82 # define MINIMAL_REST_STACK 4096
86 /* Let the architecture add some flags to the mmap() call used to
88 #ifndef ARCH_MAP_FLAGS
89 # define ARCH_MAP_FLAGS 0
92 /* This yields the pointer that TLS support code calls the thread pointer. */
94 # define TLS_TPADJ(pd) (pd)
96 # define TLS_TPADJ(pd) ((struct pthread *)((char *) (pd) + TLS_PRE_TCB_SIZE))
99 /* Cache handling for not-yet free stacks. */
101 /* Maximum size in kB of cache. */
102 static size_t stack_cache_maxsize
= 40 * 1024 * 1024; /* 40MiBi by default. */
103 static size_t stack_cache_actsize
;
105 /* Mutex protecting this variable. */
106 static lll_lock_t stack_cache_lock
= LLL_LOCK_INITIALIZER
;
108 /* List of queued stack frames. */
109 static LIST_HEAD (stack_cache
);
111 /* List of the stacks in use. */
112 static LIST_HEAD (stack_used
);
114 /* List of the threads with user provided stacks in use. No need to
115 initialize this, since it's done in __pthread_initialize_minimal. */
116 list_t __stack_user
__attribute__ ((nocommon
));
117 hidden_data_def (__stack_user
)
119 #if COLORING_INCREMENT != 0
120 /* Number of threads created. */
121 static unsigned int nptl_ncreated
;
125 /* Check whether the stack is still used or not. */
126 #define FREE_P(descr) ((descr)->tid <= 0)
129 /* We create a double linked list of all cache entries. Double linked
130 because this allows removing entries from the end. */
133 /* Get a stack frame from the cache. We have to match by size since
134 some blocks might be too small or far too large. */
135 static struct pthread
*
136 get_cached_stack (size_t *sizep
, void **memp
)
138 size_t size
= *sizep
;
139 struct pthread
*result
= NULL
;
142 lll_lock (stack_cache_lock
);
144 /* Search the cache for a matching entry. We search for the
145 smallest stack which has at least the required size. Note that
146 in normal situations the size of all allocated stacks is the
147 same. As the very least there are only a few different sizes.
148 Therefore this loop will exit early most of the time with an
150 list_for_each (entry
, &stack_cache
)
152 struct pthread
*curr
;
154 curr
= list_entry (entry
, struct pthread
, list
);
155 if (FREE_P (curr
) && curr
->stackblock_size
>= size
)
157 if (curr
->stackblock_size
== size
)
164 || result
->stackblock_size
> curr
->stackblock_size
)
169 if (__builtin_expect (result
== NULL
, 0)
170 /* Make sure the size difference is not too excessive. In that
171 case we do not use the block. */
172 || __builtin_expect (result
->stackblock_size
> 4 * size
, 0))
174 /* Release the lock. */
175 lll_unlock (stack_cache_lock
);
180 /* Dequeue the entry. */
181 list_del (&result
->list
);
183 /* And add to the list of stacks in use. */
184 list_add (&result
->list
, &stack_used
);
186 /* And decrease the cache size. */
187 stack_cache_actsize
-= result
->stackblock_size
;
189 /* Release the lock early. */
190 lll_unlock (stack_cache_lock
);
192 /* Report size and location of the stack to the caller. */
193 *sizep
= result
->stackblock_size
;
194 *memp
= result
->stackblock
;
196 /* Cancellation handling is back to the default. */
197 result
->cancelhandling
= 0;
198 result
->cleanup
= NULL
;
200 /* No pending event. */
201 result
->nextevent
= NULL
;
204 dtv_t
*dtv
= GET_DTV (TLS_TPADJ (result
));
205 memset (dtv
, '\0', (dtv
[-1].counter
+ 1) * sizeof (dtv_t
));
207 /* Re-initialize the TLS. */
208 _dl_allocate_tls_init (TLS_TPADJ (result
));
214 /* Free stacks until cache size is lower than LIMIT. */
216 free_stacks (size_t limit
)
218 /* We reduce the size of the cache. Remove the last entries until
219 the size is below the limit. */
223 /* Search from the end of the list. */
224 list_for_each_prev_safe (entry
, prev
, &stack_cache
)
226 struct pthread
*curr
;
228 curr
= list_entry (entry
, struct pthread
, list
);
231 /* Unlink the block. */
234 /* Account for the freed memory. */
235 stack_cache_actsize
-= curr
->stackblock_size
;
237 /* Free the memory associated with the ELF TLS. */
238 _dl_deallocate_tls (TLS_TPADJ (curr
), false);
240 /* Remove this block. This should never fail. If it does
241 something is really wrong. */
242 if (munmap (curr
->stackblock
, curr
->stackblock_size
) != 0)
245 /* Maybe we have freed enough. */
246 if (stack_cache_actsize
<= limit
)
253 /* Add a stack frame which is not used anymore to the stack. Must be
254 called with the cache lock held. */
256 __attribute ((always_inline
))
257 queue_stack (struct pthread
*stack
)
259 /* We unconditionally add the stack to the list. The memory may
260 still be in use but it will not be reused until the kernel marks
261 the stack as not used anymore. */
262 list_add (&stack
->list
, &stack_cache
);
264 stack_cache_actsize
+= stack
->stackblock_size
;
265 if (__builtin_expect (stack_cache_actsize
> stack_cache_maxsize
, 0))
266 free_stacks (stack_cache_maxsize
);
270 /* This function is called indirectly from the freeres code in libc. */
272 __free_stack_cache (void)
280 change_stack_perm (struct pthread
*pd
281 #ifdef NEED_SEPARATE_REGISTER_STACK
286 #ifdef NEED_SEPARATE_REGISTER_STACK
287 void *stack
= (pd
->stackblock
288 + (((((pd
->stackblock_size
- pd
->guardsize
) / 2)
289 & pagemask
) + pd
->guardsize
) & pagemask
));
290 size_t len
= pd
->stackblock
+ pd
->stackblock_size
- stack
;
291 #elif _STACK_GROWS_DOWN
292 void *stack
= pd
->stackblock
+ pd
->guardsize
;
293 size_t len
= pd
->stackblock_size
- pd
->guardsize
;
294 #elif _STACK_GROWS_UP
295 void *stack
= pd
->stackblock
;
296 size_t len
= (uintptr_t) pd
- pd
->guardsize
- (uintptr_t) pd
->stackblock
;
298 # error "Define either _STACK_GROWS_DOWN or _STACK_GROWS_UP"
300 if (mprotect (stack
, len
, PROT_READ
| PROT_WRITE
| PROT_EXEC
) != 0)
308 allocate_stack (const struct pthread_attr
*attr
, struct pthread
**pdp
,
309 ALLOCATE_STACK_PARMS
)
313 size_t pagesize_m1
= __getpagesize () - 1;
316 assert (attr
!= NULL
);
317 assert (powerof2 (pagesize_m1
+ 1));
318 assert (TCB_ALIGNMENT
>= STACK_ALIGN
);
320 /* Get the stack size from the attribute if it is set. Otherwise we
321 use the default we determined at start time. */
322 size
= attr
->stacksize
?: __default_stacksize
;
324 /* Get memory for the stack. */
325 if (__builtin_expect (attr
->flags
& ATTR_FLAG_STACKADDR
, 0))
329 /* If the user also specified the size of the stack make sure it
331 if (attr
->stacksize
!= 0
332 && attr
->stacksize
< (__static_tls_size
+ MINIMAL_REST_STACK
))
335 /* Adjust stack size for alignment of the TLS block. */
337 adj
= ((uintptr_t) attr
->stackaddr
- TLS_TCB_SIZE
)
338 & __static_tls_align_m1
;
339 assert (size
> adj
+ TLS_TCB_SIZE
);
341 adj
= ((uintptr_t) attr
->stackaddr
- __static_tls_size
)
342 & __static_tls_align_m1
;
346 /* The user provided some memory. Let's hope it matches the
347 size... We do not allocate guard pages if the user provided
348 the stack. It is the user's responsibility to do this if it
351 pd
= (struct pthread
*) ((uintptr_t) attr
->stackaddr
352 - TLS_TCB_SIZE
- adj
);
354 pd
= (struct pthread
*) (((uintptr_t) attr
->stackaddr
355 - __static_tls_size
- adj
)
359 /* The user provided stack memory needs to be cleared. */
360 memset (pd
, '\0', sizeof (struct pthread
));
362 /* The first TSD block is included in the TCB. */
363 pd
->specific
[0] = pd
->specific_1stblock
;
365 /* Remember the stack-related values. */
366 pd
->stackblock
= (char *) attr
->stackaddr
- size
;
367 pd
->stackblock_size
= size
;
369 /* This is a user-provided stack. It will not be queued in the
370 stack cache nor will the memory (except the TLS memory) be freed. */
371 pd
->user_stack
= true;
373 /* This is at least the second thread. */
374 pd
->header
.multiple_threads
= 1;
375 #ifndef TLS_MULTIPLE_THREADS_IN_TCB
376 __pthread_multiple_threads
= *__libc_multiple_threads_ptr
= 1;
379 #ifdef NEED_DL_SYSINFO
380 /* Copy the sysinfo value from the parent. */
381 THREAD_SYSINFO(pd
) = THREAD_SELF_SYSINFO
;
384 /* The process ID is also the same as that of the caller. */
385 pd
->pid
= THREAD_GETMEM (THREAD_SELF
, pid
);
387 /* Allocate the DTV for this thread. */
388 if (_dl_allocate_tls (TLS_TPADJ (pd
)) == NULL
)
390 /* Something went wrong. */
391 assert (errno
== ENOMEM
);
396 /* Prepare to modify global data. */
397 lll_lock (stack_cache_lock
);
399 /* And add to the list of stacks in use. */
400 list_add (&pd
->list
, &__stack_user
);
402 lll_unlock (stack_cache_lock
);
406 /* Allocate some anonymous memory. If possible use the cache. */
410 const int prot
= (PROT_READ
| PROT_WRITE
411 | ((GL(dl_stack_flags
) & PF_X
) ? PROT_EXEC
: 0));
413 #if COLORING_INCREMENT != 0
414 /* Add one more page for stack coloring. Don't do it for stacks
415 with 16 times pagesize or larger. This might just cause
416 unnecessary misalignment. */
417 if (size
<= 16 * pagesize_m1
)
418 size
+= pagesize_m1
+ 1;
421 /* Adjust the stack size for alignment. */
422 size
&= ~__static_tls_align_m1
;
425 /* Make sure the size of the stack is enough for the guard and
426 eventually the thread descriptor. */
427 guardsize
= (attr
->guardsize
+ pagesize_m1
) & ~pagesize_m1
;
428 if (__builtin_expect (size
< ((guardsize
+ __static_tls_size
429 + MINIMAL_REST_STACK
+ pagesize_m1
)
432 /* The stack is too small (or the guard too large). */
435 /* Try to get a stack from the cache. */
437 pd
= get_cached_stack (&size
, &mem
);
440 /* To avoid aliasing effects on a larger scale than pages we
441 adjust the allocated stack size if necessary. This way
442 allocations directly following each other will not have
443 aliasing problems. */
444 #if MULTI_PAGE_ALIASING != 0
445 if ((size
% MULTI_PAGE_ALIASING
) == 0)
446 size
+= pagesize_m1
+ 1;
449 mem
= mmap (NULL
, size
, prot
,
450 MAP_PRIVATE
| MAP_ANONYMOUS
| ARCH_MAP_FLAGS
, -1, 0);
452 if (__builtin_expect (mem
== MAP_FAILED
, 0))
454 #ifdef ARCH_RETRY_MMAP
455 mem
= ARCH_RETRY_MMAP (size
);
456 if (__builtin_expect (mem
== MAP_FAILED
, 0))
461 /* SIZE is guaranteed to be greater than zero.
462 So we can never get a null pointer back from mmap. */
463 assert (mem
!= NULL
);
465 #if COLORING_INCREMENT != 0
466 /* Atomically increment NCREATED. */
467 unsigned int ncreated
= atomic_increment_val (&nptl_ncreated
);
469 /* We chose the offset for coloring by incrementing it for
470 every new thread by a fixed amount. The offset used
471 module the page size. Even if coloring would be better
472 relative to higher alignment values it makes no sense to
473 do it since the mmap() interface does not allow us to
474 specify any alignment for the returned memory block. */
475 size_t coloring
= (ncreated
* COLORING_INCREMENT
) & pagesize_m1
;
477 /* Make sure the coloring offsets does not disturb the alignment
478 of the TCB and static TLS block. */
479 if (__builtin_expect ((coloring
& __static_tls_align_m1
) != 0, 0))
480 coloring
= (((coloring
+ __static_tls_align_m1
)
481 & ~(__static_tls_align_m1
))
484 /* Unless specified we do not make any adjustments. */
488 /* Place the thread descriptor at the end of the stack. */
490 pd
= (struct pthread
*) ((char *) mem
+ size
- coloring
) - 1;
492 pd
= (struct pthread
*) ((((uintptr_t) mem
+ size
- coloring
494 & ~__static_tls_align_m1
)
498 /* Remember the stack-related values. */
499 pd
->stackblock
= mem
;
500 pd
->stackblock_size
= size
;
502 /* We allocated the first block thread-specific data array.
503 This address will not change for the lifetime of this
505 pd
->specific
[0] = pd
->specific_1stblock
;
507 /* This is at least the second thread. */
508 pd
->header
.multiple_threads
= 1;
509 #ifndef TLS_MULTIPLE_THREADS_IN_TCB
510 __pthread_multiple_threads
= *__libc_multiple_threads_ptr
= 1;
513 #ifdef NEED_DL_SYSINFO
514 /* Copy the sysinfo value from the parent. */
515 THREAD_SYSINFO(pd
) = THREAD_SELF_SYSINFO
;
518 /* The process ID is also the same as that of the caller. */
519 pd
->pid
= THREAD_GETMEM (THREAD_SELF
, pid
);
521 /* Allocate the DTV for this thread. */
522 if (_dl_allocate_tls (TLS_TPADJ (pd
)) == NULL
)
524 /* Something went wrong. */
525 assert (errno
== ENOMEM
);
527 /* Free the stack memory we just allocated. */
528 (void) munmap (mem
, size
);
534 /* Prepare to modify global data. */
535 lll_lock (stack_cache_lock
);
537 /* And add to the list of stacks in use. */
538 list_add (&pd
->list
, &stack_used
);
540 lll_unlock (stack_cache_lock
);
543 /* There might have been a race. Another thread might have
544 caused the stacks to get exec permission while this new
545 stack was prepared. Detect if this was possible and
546 change the permission if necessary. */
547 if (__builtin_expect ((GL(dl_stack_flags
) & PF_X
) != 0
548 && (prot
& PROT_EXEC
) == 0, 0))
550 int err
= change_stack_perm (pd
551 #ifdef NEED_SEPARATE_REGISTER_STACK
557 /* Free the stack memory we just allocated. */
558 (void) munmap (mem
, size
);
565 /* Note that all of the stack and the thread descriptor is
566 zeroed. This means we do not have to initialize fields
567 with initial value zero. This is specifically true for
568 the 'tid' field which is always set back to zero once the
569 stack is not used anymore and for the 'guardsize' field
570 which will be read next. */
573 /* Create or resize the guard area if necessary. */
574 if (__builtin_expect (guardsize
> pd
->guardsize
, 0))
576 #ifdef NEED_SEPARATE_REGISTER_STACK
577 char *guard
= mem
+ (((size
- guardsize
) / 2) & ~pagesize_m1
);
578 #elif _STACK_GROWS_DOWN
580 # elif _STACK_GROWS_UP
581 char *guard
= (char *) (((uintptr_t) pd
- guardsize
) & ~pagesize_m1
);
583 if (mprotect (guard
, guardsize
, PROT_NONE
) != 0)
589 lll_lock (stack_cache_lock
);
591 /* Remove the thread from the list. */
592 list_del (&pd
->list
);
594 lll_unlock (stack_cache_lock
);
596 /* Get rid of the TLS block we allocated. */
597 _dl_deallocate_tls (TLS_TPADJ (pd
), false);
599 /* Free the stack memory regardless of whether the size
600 of the cache is over the limit or not. If this piece
601 of memory caused problems we better do not use it
602 anymore. Uh, and we ignore possible errors. There
603 is nothing we could do. */
604 (void) munmap (mem
, size
);
609 pd
->guardsize
= guardsize
;
611 else if (__builtin_expect (pd
->guardsize
- guardsize
> size
- reqsize
,
614 /* The old guard area is too large. */
616 #ifdef NEED_SEPARATE_REGISTER_STACK
617 char *guard
= mem
+ (((size
- guardsize
) / 2) & ~pagesize_m1
);
618 char *oldguard
= mem
+ (((size
- pd
->guardsize
) / 2) & ~pagesize_m1
);
621 && mprotect (oldguard
, guard
- oldguard
, prot
) != 0)
624 if (mprotect (guard
+ guardsize
,
625 oldguard
+ pd
->guardsize
- guard
- guardsize
,
628 #elif _STACK_GROWS_DOWN
629 if (mprotect ((char *) mem
+ guardsize
, pd
->guardsize
- guardsize
,
632 #elif _STACK_GROWS_UP
633 if (mprotect ((char *) pd
- pd
->guardsize
,
634 pd
->guardsize
- guardsize
, prot
) != 0)
638 pd
->guardsize
= guardsize
;
640 /* The pthread_getattr_np() calls need to get passed the size
641 requested in the attribute, regardless of how large the
642 actually used guardsize is. */
643 pd
->reported_guardsize
= guardsize
;
646 /* Initialize the lock. We have to do this unconditionally since the
647 stillborn thread could be canceled while the lock is taken. */
648 pd
->lock
= LLL_LOCK_INITIALIZER
;
650 /* The robust mutex lists also need to be initialized
651 unconditionally because the cleanup for the previous stack owner
652 might have happened in the kernel. */
653 pd
->robust_head
.futex_offset
= (offsetof (pthread_mutex_t
, __data
.__lock
)
654 - offsetof (pthread_mutex_t
,
655 __data
.__list
.__next
));
656 pd
->robust_head
.list_op_pending
= NULL
;
657 #ifdef __PTHREAD_MUTEX_HAVE_PREV
658 pd
->robust_prev
= &pd
->robust_head
;
660 pd
->robust_head
.list
= &pd
->robust_head
;
662 /* We place the thread descriptor at the end of the stack. */
666 /* The stack begins before the TCB and the static TLS block. */
667 stacktop
= ((char *) (pd
+ 1) - __static_tls_size
);
669 stacktop
= (char *) (pd
- 1);
672 #ifdef NEED_SEPARATE_REGISTER_STACK
673 *stack
= pd
->stackblock
;
674 *stacksize
= stacktop
- *stack
;
675 #elif _STACK_GROWS_DOWN
677 #elif _STACK_GROWS_UP
678 *stack
= pd
->stackblock
;
688 __deallocate_stack (struct pthread
*pd
)
690 lll_lock (stack_cache_lock
);
692 /* Remove the thread from the list of threads with user defined
694 list_del (&pd
->list
);
696 /* Not much to do. Just free the mmap()ed memory. Note that we do
697 not reset the 'used' flag in the 'tid' field. This is done by
698 the kernel. If no thread has been created yet this field is
700 if (__builtin_expect (! pd
->user_stack
, 1))
701 (void) queue_stack (pd
);
703 /* Free the memory associated with the ELF TLS. */
704 _dl_deallocate_tls (TLS_TPADJ (pd
), false);
706 lll_unlock (stack_cache_lock
);
712 __make_stacks_executable (void **stack_endp
)
714 /* First the main thread's stack. */
715 int err
= _dl_make_stack_executable (stack_endp
);
719 #ifdef NEED_SEPARATE_REGISTER_STACK
720 const size_t pagemask
= ~(__getpagesize () - 1);
723 lll_lock (stack_cache_lock
);
726 list_for_each (runp
, &stack_used
)
728 err
= change_stack_perm (list_entry (runp
, struct pthread
, list
)
729 #ifdef NEED_SEPARATE_REGISTER_STACK
737 /* Also change the permission for the currently unused stacks. This
738 might be wasted time but better spend it here than adding a check
741 list_for_each (runp
, &stack_cache
)
743 err
= change_stack_perm (list_entry (runp
, struct pthread
, list
)
744 #ifdef NEED_SEPARATE_REGISTER_STACK
752 lll_unlock (stack_cache_lock
);
758 /* In case of a fork() call the memory allocation in the child will be
759 the same but only one thread is running. All stacks except that of
760 the one running thread are not used anymore. We have to recycle
763 __reclaim_stacks (void)
765 struct pthread
*self
= (struct pthread
*) THREAD_SELF
;
767 /* No locking necessary. The caller is the only stack in use. */
769 /* Mark all stacks except the still running one as free. */
771 list_for_each (runp
, &stack_used
)
773 struct pthread
*curp
= list_entry (runp
, struct pthread
, list
);
776 /* This marks the stack as free. */
779 /* The PID field must be initialized for the new process. */
780 curp
->pid
= self
->pid
;
782 /* Account for the size of the stack. */
783 stack_cache_actsize
+= curp
->stackblock_size
;
787 /* Reset the PIDs in any cached stacks. */
788 list_for_each (runp
, &stack_cache
)
790 struct pthread
*curp
= list_entry (runp
, struct pthread
, list
);
791 curp
->pid
= self
->pid
;
794 /* Add the stack of all running threads to the cache. */
795 list_splice (&stack_used
, &stack_cache
);
797 /* Remove the entry for the current thread to from the cache list
798 and add it to the list of running threads. Which of the two
799 lists is decided by the user_stack flag. */
800 list_del (&self
->list
);
802 /* Re-initialize the lists for all the threads. */
803 INIT_LIST_HEAD (&stack_used
);
804 INIT_LIST_HEAD (&__stack_user
);
806 if (__builtin_expect (THREAD_GETMEM (self
, user_stack
), 0))
807 list_add (&self
->list
, &__stack_user
);
809 list_add (&self
->list
, &stack_used
);
811 /* There is one thread running. */
814 /* Initialize the lock. */
815 stack_cache_lock
= LLL_LOCK_INITIALIZER
;
820 # undef __find_thread_by_id
821 /* Find a thread given the thread ID. */
824 __find_thread_by_id (pid_t tid
)
826 struct pthread
*result
= NULL
;
828 lll_lock (stack_cache_lock
);
830 /* Iterate over the list with system-allocated threads first. */
832 list_for_each (runp
, &stack_used
)
834 struct pthread
*curp
;
836 curp
= list_entry (runp
, struct pthread
, list
);
838 if (curp
->tid
== tid
)
845 /* Now the list with threads using user-allocated stacks. */
846 list_for_each (runp
, &__stack_user
)
848 struct pthread
*curp
;
850 curp
= list_entry (runp
, struct pthread
, list
);
852 if (curp
->tid
== tid
)
860 lll_unlock (stack_cache_lock
);
869 setxid_signal_thread (struct xid_command
*cmdp
, struct pthread
*t
)
871 if (! IS_DETACHED (t
))
876 ch
= t
->cancelhandling
;
878 /* If the thread is exiting right now, ignore it. */
879 if ((ch
& EXITING_BITMASK
) != 0)
882 while (atomic_compare_and_exchange_bool_acq (&t
->cancelhandling
,
883 ch
| SETXID_BITMASK
, ch
));
887 INTERNAL_SYSCALL_DECL (err
);
889 val
= INTERNAL_SYSCALL (tgkill
, err
, 3, THREAD_GETMEM (THREAD_SELF
, pid
),
893 val
= INTERNAL_SYSCALL (tgkill
, err
, 3, THREAD_GETMEM (THREAD_SELF
, pid
),
895 if (INTERNAL_SYSCALL_ERROR_P (val
, err
)
896 && INTERNAL_SYSCALL_ERRNO (val
, err
) == ENOSYS
)
898 val
= INTERNAL_SYSCALL (tkill
, err
, 2, t
->tid
, SIGSETXID
);
901 if (!INTERNAL_SYSCALL_ERROR_P (val
, err
))
902 atomic_increment (&cmdp
->cntr
);
908 __nptl_setxid (struct xid_command
*cmdp
)
911 lll_lock (stack_cache_lock
);
916 struct pthread
*self
= THREAD_SELF
;
918 /* Iterate over the list with system-allocated threads first. */
920 list_for_each (runp
, &stack_used
)
922 struct pthread
*t
= list_entry (runp
, struct pthread
, list
);
926 setxid_signal_thread (cmdp
, t
);
929 /* Now the list with threads using user-allocated stacks. */
930 list_for_each (runp
, &__stack_user
)
932 struct pthread
*t
= list_entry (runp
, struct pthread
, list
);
936 setxid_signal_thread (cmdp
, t
);
939 int cur
= cmdp
->cntr
;
942 lll_futex_wait (&cmdp
->cntr
, cur
);
946 /* This must be last, otherwise the current thread might not have
947 permissions to send SIGSETXID syscall to the other threads. */
948 INTERNAL_SYSCALL_DECL (err
);
949 result
= INTERNAL_SYSCALL_NCS (cmdp
->syscall_no
, err
, 3,
950 cmdp
->id
[0], cmdp
->id
[1], cmdp
->id
[2]);
951 if (INTERNAL_SYSCALL_ERROR_P (result
, err
))
953 __set_errno (INTERNAL_SYSCALL_ERRNO (result
, err
));
957 lll_unlock (stack_cache_lock
);
961 static inline void __attribute__((always_inline
))
962 init_one_static_tls (struct pthread
*curp
, struct link_map
*map
)
964 dtv_t
*dtv
= GET_DTV (TLS_TPADJ (curp
));
966 void *dest
= (char *) curp
- map
->l_tls_offset
;
968 void *dest
= (char *) curp
+ map
->l_tls_offset
+ TLS_PRE_TCB_SIZE
;
970 # error "Either TLS_TCB_AT_TP or TLS_DTV_AT_TP must be defined"
973 /* Fill in the DTV slot so that a later LD/GD access will find it. */
974 dtv
[map
->l_tls_modid
].pointer
.val
= dest
;
975 dtv
[map
->l_tls_modid
].pointer
.is_static
= true;
977 /* Initialize the memory. */
978 memset (__mempcpy (dest
, map
->l_tls_initimage
, map
->l_tls_initimage_size
),
979 '\0', map
->l_tls_blocksize
- map
->l_tls_initimage_size
);
984 __pthread_init_static_tls (struct link_map
*map
)
986 lll_lock (stack_cache_lock
);
988 /* Iterate over the list with system-allocated threads first. */
990 list_for_each (runp
, &stack_used
)
991 init_one_static_tls (list_entry (runp
, struct pthread
, list
), map
);
993 /* Now the list with threads using user-allocated stacks. */
994 list_for_each (runp
, &__stack_user
)
995 init_one_static_tls (list_entry (runp
, struct pthread
, list
), map
);
997 lll_unlock (stack_cache_lock
);