1 /* Copyright (C) 2002-2015 Free Software Foundation, Inc.
2 This file is part of the GNU C Library.
3 Contributed by Ulrich Drepper <drepper@redhat.com>, 2002.
5 The GNU C Library is free software; you can redistribute it and/or
6 modify it under the terms of the GNU Lesser General Public
7 License as published by the Free Software Foundation; either
8 version 2.1 of the License, or (at your option) any later version.
10 The GNU C Library is distributed in the hope that it will be useful,
11 but WITHOUT ANY WARRANTY; without even the implied warranty of
12 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 Lesser General Public License for more details.
15 You should have received a copy of the GNU Lesser General Public
16 License along with the GNU C Library; if not, see
17 <http://www.gnu.org/licenses/>. */
26 #include <hp-timing.h>
29 #include <libc-internal.h>
31 #include <kernel-features.h>
32 #include <exit-thread.h>
33 #include <default-sched.h>
35 #include <shlib-compat.h>
37 #include <stap-probe.h>
40 /* Nozero if debugging mode is enabled. */
43 /* Globally enabled events. */
44 static td_thr_events_t __nptl_threads_events __attribute_used__
;
46 /* Pointer to descriptor with the last event. */
47 static struct pthread
*__nptl_last_event __attribute_used__
;
49 /* Number of threads running. */
50 unsigned int __nptl_nthreads
= 1;
53 /* Code to allocate and deallocate a stack. */
54 #include "allocatestack.c"
56 /* createthread.c defines this function, and two macros:
57 START_THREAD_DEFN and START_THREAD_SELF (see below).
59 create_thread is obliged to initialize PD->stopped_start. It
60 should be true if the STOPPED_START parameter is true, or if
61 create_thread needs the new thread to synchronize at startup for
62 some other implementation reason. If PD->stopped_start will be
63 true, then create_thread is obliged to perform the operation
64 "lll_lock (PD->lock, LLL_PRIVATE)" before starting the thread.
66 The return value is zero for success or an errno code for failure.
67 If the return value is ENOMEM, that will be translated to EAGAIN,
68 so create_thread need not do that. On failure, *THREAD_RAN should
69 be set to true iff the thread actually started up and then got
70 cancelled before calling user code (*PD->start_routine), in which
71 case it is responsible for doing its own cleanup. */
73 static int create_thread (struct pthread
*pd
, const struct pthread_attr
*attr
,
74 bool stopped_start
, STACK_VARIABLES_PARMS
,
77 #include <createthread.c>
82 __find_in_stack_list (pd
)
86 struct pthread
*result
= NULL
;
88 lll_lock (stack_cache_lock
, LLL_PRIVATE
);
90 list_for_each (entry
, &stack_used
)
94 curp
= list_entry (entry
, struct pthread
, list
);
103 list_for_each (entry
, &__stack_user
)
105 struct pthread
*curp
;
107 curp
= list_entry (entry
, struct pthread
, list
);
115 lll_unlock (stack_cache_lock
, LLL_PRIVATE
);
121 /* Deallocate POSIX thread-local-storage. */
124 __nptl_deallocate_tsd (void)
126 struct pthread
*self
= THREAD_SELF
;
128 /* Maybe no data was ever allocated. This happens often so we have
130 if (THREAD_GETMEM (self
, specific_used
))
140 /* So far no new nonzero data entry. */
141 THREAD_SETMEM (self
, specific_used
, false);
143 for (cnt
= idx
= 0; cnt
< PTHREAD_KEY_1STLEVEL_SIZE
; ++cnt
)
145 struct pthread_key_data
*level2
;
147 level2
= THREAD_GETMEM_NC (self
, specific
, cnt
);
153 for (inner
= 0; inner
< PTHREAD_KEY_2NDLEVEL_SIZE
;
156 void *data
= level2
[inner
].data
;
160 /* Always clear the data. */
161 level2
[inner
].data
= NULL
;
163 /* Make sure the data corresponds to a valid
164 key. This test fails if the key was
165 deallocated and also if it was
166 re-allocated. It is the user's
167 responsibility to free the memory in this
169 if (level2
[inner
].seq
170 == __pthread_keys
[idx
].seq
171 /* It is not necessary to register a destructor
173 && __pthread_keys
[idx
].destr
!= NULL
)
174 /* Call the user-provided destructor. */
175 __pthread_keys
[idx
].destr (data
);
180 idx
+= PTHREAD_KEY_1STLEVEL_SIZE
;
183 if (THREAD_GETMEM (self
, specific_used
) == 0)
184 /* No data has been modified. */
187 /* We only repeat the process a fixed number of times. */
188 while (__builtin_expect (++round
< PTHREAD_DESTRUCTOR_ITERATIONS
, 0));
190 /* Just clear the memory of the first block for reuse. */
191 memset (&THREAD_SELF
->specific_1stblock
, '\0',
192 sizeof (self
->specific_1stblock
));
195 /* Free the memory for the other blocks. */
196 for (cnt
= 1; cnt
< PTHREAD_KEY_1STLEVEL_SIZE
; ++cnt
)
198 struct pthread_key_data
*level2
;
200 level2
= THREAD_GETMEM_NC (self
, specific
, cnt
);
203 /* The first block is allocated as part of the thread
206 THREAD_SETMEM_NC (self
, specific
, cnt
, NULL
);
210 THREAD_SETMEM (self
, specific_used
, false);
215 /* Deallocate a thread's stack after optionally making sure the thread
216 descriptor is still valid. */
219 __free_tcb (struct pthread
*pd
)
221 /* The thread is exiting now. */
222 if (__builtin_expect (atomic_bit_test_set (&pd
->cancelhandling
,
223 TERMINATED_BIT
) == 0, 1))
225 /* Remove the descriptor from the list. */
226 if (DEBUGGING_P
&& __find_in_stack_list (pd
) == NULL
)
227 /* Something is really wrong. The descriptor for a still
228 running thread is gone. */
232 if (__glibc_unlikely (pd
->tpp
!= NULL
))
234 struct priority_protection_data
*tpp
= pd
->tpp
;
240 /* Queue the stack memory block for reuse and exit the process. The
241 kernel will signal via writing to the address returned by
242 QUEUE-STACK when the stack is available. */
243 __deallocate_stack (pd
);
248 /* Local function to start thread and handle cleanup.
249 createthread.c defines the macro START_THREAD_DEFN to the
250 declaration that its create_thread function will refer to, and
251 START_THREAD_SELF to the expression to optimally deliver the new
252 thread's THREAD_SELF value. */
255 struct pthread
*pd
= START_THREAD_SELF
;
258 /* Remember the time when the thread was started. */
261 THREAD_SETMEM (pd
, cpuclock_offset
, now
);
264 /* Initialize resolver state pointer. */
267 /* Initialize pointers to locale data. */
270 /* Allow setxid from now onwards. */
271 if (__glibc_unlikely (atomic_exchange_acq (&pd
->setxid_futex
, 0) == -2))
272 lll_futex_wake (&pd
->setxid_futex
, 1, LLL_PRIVATE
);
274 #ifdef __NR_set_robust_list
275 # ifndef __ASSUME_SET_ROBUST_LIST
276 if (__set_robust_list_avail
>= 0)
279 INTERNAL_SYSCALL_DECL (err
);
280 /* This call should never fail because the initial call in init.c
282 INTERNAL_SYSCALL (set_robust_list
, err
, 2, &pd
->robust_head
,
283 sizeof (struct robust_list_head
));
288 /* If the parent was running cancellation handlers while creating
289 the thread the new thread inherited the signal mask. Reset the
290 cancellation signal mask. */
291 if (__glibc_unlikely (pd
->parent_cancelhandling
& CANCELING_BITMASK
))
293 INTERNAL_SYSCALL_DECL (err
);
295 __sigemptyset (&mask
);
296 __sigaddset (&mask
, SIGCANCEL
);
297 (void) INTERNAL_SYSCALL (rt_sigprocmask
, err
, 4, SIG_UNBLOCK
, &mask
,
302 /* This is where the try/finally block should be created. For
303 compilers without that support we do use setjmp. */
304 struct pthread_unwind_buf unwind_buf
;
306 /* No previous handlers. */
307 unwind_buf
.priv
.data
.prev
= NULL
;
308 unwind_buf
.priv
.data
.cleanup
= NULL
;
311 not_first_call
= setjmp ((struct __jmp_buf_tag
*) unwind_buf
.cancel_jmp_buf
);
312 if (__glibc_likely (! not_first_call
))
314 /* Store the new cleanup handler info. */
315 THREAD_SETMEM (pd
, cleanup_jmp_buf
, &unwind_buf
);
317 if (__glibc_unlikely (pd
->stopped_start
))
319 int oldtype
= CANCEL_ASYNC ();
321 /* Get the lock the parent locked to force synchronization. */
322 lll_lock (pd
->lock
, LLL_PRIVATE
);
323 /* And give it up right away. */
324 lll_unlock (pd
->lock
, LLL_PRIVATE
);
326 CANCEL_RESET (oldtype
);
329 LIBC_PROBE (pthread_start
, 3, (pthread_t
) pd
, pd
->start_routine
, pd
->arg
);
331 /* Run the code the user provided. */
332 #ifdef CALL_THREAD_FCT
333 THREAD_SETMEM (pd
, result
, CALL_THREAD_FCT (pd
));
335 THREAD_SETMEM (pd
, result
, pd
->start_routine (pd
->arg
));
339 /* Call destructors for the thread_local TLS variables. */
341 if (&__call_tls_dtors
!= NULL
)
345 /* Run the destructor for the thread-local data. */
346 __nptl_deallocate_tsd ();
348 /* Clean up any state libc stored in thread-local variables. */
349 __libc_thread_freeres ();
351 /* If this is the last thread we terminate the process now. We
352 do not notify the debugger, it might just irritate it if there
353 is no thread left. */
354 if (__glibc_unlikely (atomic_decrement_and_test (&__nptl_nthreads
)))
355 /* This was the last thread. */
358 /* Report the death of the thread if this is wanted. */
359 if (__glibc_unlikely (pd
->report_events
))
361 /* See whether TD_DEATH is in any of the mask. */
362 const int idx
= __td_eventword (TD_DEATH
);
363 const uint32_t mask
= __td_eventmask (TD_DEATH
);
365 if ((mask
& (__nptl_threads_events
.event_bits
[idx
]
366 | pd
->eventbuf
.eventmask
.event_bits
[idx
])) != 0)
368 /* Yep, we have to signal the death. Add the descriptor to
369 the list but only if it is not already on it. */
370 if (pd
->nextevent
== NULL
)
372 pd
->eventbuf
.eventnum
= TD_DEATH
;
373 pd
->eventbuf
.eventdata
= pd
;
376 pd
->nextevent
= __nptl_last_event
;
377 while (atomic_compare_and_exchange_bool_acq (&__nptl_last_event
,
381 /* Now call the function to signal the event. */
382 __nptl_death_event ();
386 /* The thread is exiting now. Don't set this bit until after we've hit
387 the event-reporting breakpoint, so that td_thr_get_info on us while at
388 the breakpoint reports TD_THR_RUN state rather than TD_THR_ZOMBIE. */
389 atomic_bit_set (&pd
->cancelhandling
, EXITING_BIT
);
391 #ifndef __ASSUME_SET_ROBUST_LIST
392 /* If this thread has any robust mutexes locked, handle them now. */
393 # ifdef __PTHREAD_MUTEX_HAVE_PREV
394 void *robust
= pd
->robust_head
.list
;
396 __pthread_slist_t
*robust
= pd
->robust_list
.__next
;
398 /* We let the kernel do the notification if it is able to do so.
399 If we have to do it here there for sure are no PI mutexes involved
400 since the kernel support for them is even more recent. */
401 if (__set_robust_list_avail
< 0
402 && __builtin_expect (robust
!= (void *) &pd
->robust_head
, 0))
406 struct __pthread_mutex_s
*this = (struct __pthread_mutex_s
*)
407 ((char *) robust
- offsetof (struct __pthread_mutex_s
,
409 robust
= *((void **) robust
);
411 # ifdef __PTHREAD_MUTEX_HAVE_PREV
412 this->__list
.__prev
= NULL
;
414 this->__list
.__next
= NULL
;
416 atomic_or (&this->__lock
, FUTEX_OWNER_DIED
);
417 lll_futex_wake (&this->__lock
, 1, /* XYZ */ LLL_SHARED
);
419 while (robust
!= (void *) &pd
->robust_head
);
423 /* Mark the memory of the stack as usable to the kernel. We free
424 everything except for the space used for the TCB itself. */
425 size_t pagesize_m1
= __getpagesize () - 1;
426 #ifdef _STACK_GROWS_DOWN
427 char *sp
= CURRENT_STACK_FRAME
;
428 size_t freesize
= (sp
- (char *) pd
->stackblock
) & ~pagesize_m1
;
432 assert (freesize
< pd
->stackblock_size
);
433 if (freesize
> PTHREAD_STACK_MIN
)
434 __madvise (pd
->stackblock
, freesize
- PTHREAD_STACK_MIN
, MADV_DONTNEED
);
436 /* If the thread is detached free the TCB. */
437 if (IS_DETACHED (pd
))
440 else if (__glibc_unlikely (pd
->cancelhandling
& SETXID_BITMASK
))
442 /* Some other thread might call any of the setXid functions and expect
443 us to reply. In this case wait until we did that. */
445 lll_futex_wait (&pd
->setxid_futex
, 0, LLL_PRIVATE
);
446 while (pd
->cancelhandling
& SETXID_BITMASK
);
448 /* Reset the value so that the stack can be reused. */
449 pd
->setxid_futex
= 0;
452 /* We cannot call '_exit' here. '_exit' will terminate the process.
454 The 'exit' implementation in the kernel will signal when the
455 process is really dead since 'clone' got passed the CLONE_CHILD_CLEARTID
456 flag. The 'tid' field in the TCB will be set to zero.
458 The exit code is zero since in case all threads exit by calling
459 'pthread_exit' the exit status must be 0 (zero). */
466 /* Return true iff obliged to report TD_CREATE events. */
468 report_thread_creation (struct pthread
*pd
)
470 if (__glibc_unlikely (THREAD_GETMEM (THREAD_SELF
, report_events
)))
472 /* The parent thread is supposed to report events.
473 Check whether the TD_CREATE event is needed, too. */
474 const size_t idx
= __td_eventword (TD_CREATE
);
475 const uint32_t mask
= __td_eventmask (TD_CREATE
);
477 return ((mask
& (__nptl_threads_events
.event_bits
[idx
]
478 | pd
->eventbuf
.eventmask
.event_bits
[idx
])) != 0);
485 __pthread_create_2_1 (newthread
, attr
, start_routine
, arg
)
486 pthread_t
*newthread
;
487 const pthread_attr_t
*attr
;
488 void *(*start_routine
) (void *);
493 const struct pthread_attr
*iattr
= (struct pthread_attr
*) attr
;
494 struct pthread_attr default_attr
;
495 bool free_cpuset
= false;
498 lll_lock (__default_pthread_attr_lock
, LLL_PRIVATE
);
499 default_attr
= __default_pthread_attr
;
500 size_t cpusetsize
= default_attr
.cpusetsize
;
504 if (__glibc_likely (__libc_use_alloca (cpusetsize
)))
505 cpuset
= __alloca (cpusetsize
);
508 cpuset
= malloc (cpusetsize
);
511 lll_unlock (__default_pthread_attr_lock
, LLL_PRIVATE
);
516 memcpy (cpuset
, default_attr
.cpuset
, cpusetsize
);
517 default_attr
.cpuset
= cpuset
;
519 lll_unlock (__default_pthread_attr_lock
, LLL_PRIVATE
);
520 iattr
= &default_attr
;
523 struct pthread
*pd
= NULL
;
524 int err
= ALLOCATE_STACK (iattr
, &pd
);
527 if (__glibc_unlikely (err
!= 0))
528 /* Something went wrong. Maybe a parameter of the attributes is
529 invalid or we could not allocate memory. Note we have to
530 translate error codes. */
532 retval
= err
== ENOMEM
? EAGAIN
: err
;
537 /* Initialize the TCB. All initializations with zero should be
538 performed in 'get_cached_stack'. This way we avoid doing this if
539 the stack freshly allocated with 'mmap'. */
542 /* Reference to the TCB itself. */
543 pd
->header
.self
= pd
;
545 /* Self-reference for TLS. */
549 /* Store the address of the start routine and the parameter. Since
550 we do not start the function directly the stillborn thread will
551 get the information from its thread descriptor. */
552 pd
->start_routine
= start_routine
;
555 /* Copy the thread attribute flags. */
556 struct pthread
*self
= THREAD_SELF
;
557 pd
->flags
= ((iattr
->flags
& ~(ATTR_FLAG_SCHED_SET
| ATTR_FLAG_POLICY_SET
))
558 | (self
->flags
& (ATTR_FLAG_SCHED_SET
| ATTR_FLAG_POLICY_SET
)));
560 /* Initialize the field for the ID of the thread which is waiting
561 for us. This is a self-reference in case the thread is created
563 pd
->joinid
= iattr
->flags
& ATTR_FLAG_DETACHSTATE
? pd
: NULL
;
565 /* The debug events are inherited from the parent. */
566 pd
->eventbuf
= self
->eventbuf
;
569 /* Copy the parent's scheduling parameters. The flags will say what
570 is valid and what is not. */
571 pd
->schedpolicy
= self
->schedpolicy
;
572 pd
->schedparam
= self
->schedparam
;
574 /* Copy the stack guard canary. */
575 #ifdef THREAD_COPY_STACK_GUARD
576 THREAD_COPY_STACK_GUARD (pd
);
579 /* Copy the pointer guard value. */
580 #ifdef THREAD_COPY_POINTER_GUARD
581 THREAD_COPY_POINTER_GUARD (pd
);
584 /* Verify the sysinfo bits were copied in allocate_stack if needed. */
585 #ifdef NEED_DL_SYSINFO
586 CHECK_THREAD_SYSINFO (pd
);
589 /* Inform start_thread (above) about cancellation state that might
590 translate into inherited signal state. */
591 pd
->parent_cancelhandling
= THREAD_GETMEM (THREAD_SELF
, cancelhandling
);
593 /* Determine scheduling parameters for the thread. */
594 if (__builtin_expect ((iattr
->flags
& ATTR_FLAG_NOTINHERITSCHED
) != 0, 0)
595 && (iattr
->flags
& (ATTR_FLAG_SCHED_SET
| ATTR_FLAG_POLICY_SET
)) != 0)
597 /* Use the scheduling parameters the user provided. */
598 if (iattr
->flags
& ATTR_FLAG_POLICY_SET
)
600 pd
->schedpolicy
= iattr
->schedpolicy
;
601 pd
->flags
|= ATTR_FLAG_POLICY_SET
;
603 if (iattr
->flags
& ATTR_FLAG_SCHED_SET
)
605 /* The values were validated in pthread_attr_setschedparam. */
606 pd
->schedparam
= iattr
->schedparam
;
607 pd
->flags
|= ATTR_FLAG_SCHED_SET
;
610 if ((pd
->flags
& (ATTR_FLAG_SCHED_SET
| ATTR_FLAG_POLICY_SET
))
611 != (ATTR_FLAG_SCHED_SET
| ATTR_FLAG_POLICY_SET
))
612 collect_default_sched (pd
);
615 /* Pass the descriptor to the caller. */
616 *newthread
= (pthread_t
) pd
;
618 LIBC_PROBE (pthread_create
, 4, newthread
, attr
, start_routine
, arg
);
620 /* One more thread. We cannot have the thread do this itself, since it
621 might exist but not have been scheduled yet by the time we've returned
622 and need to check the value to behave correctly. We must do it before
623 creating the thread, in case it does get scheduled first and then
624 might mistakenly think it was the only thread. In the failure case,
625 we momentarily store a false value; this doesn't matter because there
626 is no kosher thing a signal handler interrupting us right here can do
627 that cares whether the thread count is correct. */
628 atomic_increment (&__nptl_nthreads
);
630 bool thread_ran
= false;
632 /* Start the thread. */
633 if (__glibc_unlikely (report_thread_creation (pd
)))
635 /* Create the thread. We always create the thread stopped
636 so that it does not get far before we tell the debugger. */
637 retval
= create_thread (pd
, iattr
, true, STACK_VARIABLES_ARGS
,
641 /* create_thread should have set this so that the logic below can
643 assert (pd
->stopped_start
);
645 /* Now fill in the information about the new thread in
646 the newly created thread's data structure. We cannot let
647 the new thread do this since we don't know whether it was
648 already scheduled when we send the event. */
649 pd
->eventbuf
.eventnum
= TD_CREATE
;
650 pd
->eventbuf
.eventdata
= pd
;
652 /* Enqueue the descriptor. */
654 pd
->nextevent
= __nptl_last_event
;
655 while (atomic_compare_and_exchange_bool_acq (&__nptl_last_event
,
659 /* Now call the function which signals the event. */
660 __nptl_create_event ();
664 retval
= create_thread (pd
, iattr
, false, STACK_VARIABLES_ARGS
,
667 if (__glibc_unlikely (retval
!= 0))
669 /* If thread creation "failed", that might mean that the thread got
670 created and ran a little--short of running user code--but then
671 create_thread cancelled it. In that case, the thread will do all
672 its own cleanup just like a normal thread exit after a successful
673 creation would do. */
676 assert (pd
->stopped_start
);
679 /* Oops, we lied for a second. */
680 atomic_decrement (&__nptl_nthreads
);
682 /* Perhaps a thread wants to change the IDs and is waiting for this
684 if (__glibc_unlikely (atomic_exchange_acq (&pd
->setxid_futex
, 0)
686 lll_futex_wake (&pd
->setxid_futex
, 1, LLL_PRIVATE
);
688 /* Free the resources. */
689 __deallocate_stack (pd
);
692 /* We have to translate error codes. */
693 if (retval
== ENOMEM
)
698 if (pd
->stopped_start
)
699 /* The thread blocked on this lock either because we're doing TD_CREATE
700 event reporting, or for some other reason that create_thread chose.
701 Now let it run free. */
702 lll_unlock (pd
->lock
, LLL_PRIVATE
);
704 /* We now have for sure more than one thread. The main thread might
705 not yet have the flag set. No need to set the global variable
706 again if this is what we use. */
707 THREAD_SETMEM (THREAD_SELF
, header
.multiple_threads
, 1);
711 if (__glibc_unlikely (free_cpuset
))
712 free (default_attr
.cpuset
);
716 versioned_symbol (libpthread
, __pthread_create_2_1
, pthread_create
, GLIBC_2_1
);
719 #if SHLIB_COMPAT(libpthread, GLIBC_2_0, GLIBC_2_1)
721 __pthread_create_2_0 (newthread
, attr
, start_routine
, arg
)
722 pthread_t
*newthread
;
723 const pthread_attr_t
*attr
;
724 void *(*start_routine
) (void *);
727 /* The ATTR attribute is not really of type `pthread_attr_t *'. It has
728 the old size and access to the new members might crash the program.
729 We convert the struct now. */
730 struct pthread_attr new_attr
;
734 struct pthread_attr
*iattr
= (struct pthread_attr
*) attr
;
735 size_t ps
= __getpagesize ();
737 /* Copy values from the user-provided attributes. */
738 new_attr
.schedparam
= iattr
->schedparam
;
739 new_attr
.schedpolicy
= iattr
->schedpolicy
;
740 new_attr
.flags
= iattr
->flags
;
742 /* Fill in default values for the fields not present in the old
744 new_attr
.guardsize
= ps
;
745 new_attr
.stackaddr
= NULL
;
746 new_attr
.stacksize
= 0;
747 new_attr
.cpuset
= NULL
;
749 /* We will pass this value on to the real implementation. */
750 attr
= (pthread_attr_t
*) &new_attr
;
753 return __pthread_create_2_1 (newthread
, attr
, start_routine
, arg
);
755 compat_symbol (libpthread
, __pthread_create_2_0
, pthread_create
,
759 /* Information for libthread_db. */
761 #include "../nptl_db/db_info.c"
763 /* If pthread_create is present, libgcc_eh.a and libsupc++.a expects some other POSIX thread
764 functions to be present as well. */
765 PTHREAD_STATIC_FN_REQUIRE (pthread_mutex_lock
)
766 PTHREAD_STATIC_FN_REQUIRE (pthread_mutex_trylock
)
767 PTHREAD_STATIC_FN_REQUIRE (pthread_mutex_unlock
)
769 PTHREAD_STATIC_FN_REQUIRE (pthread_once
)
770 PTHREAD_STATIC_FN_REQUIRE (pthread_cancel
)
772 PTHREAD_STATIC_FN_REQUIRE (pthread_key_create
)
773 PTHREAD_STATIC_FN_REQUIRE (pthread_key_delete
)
774 PTHREAD_STATIC_FN_REQUIRE (pthread_setspecific
)
775 PTHREAD_STATIC_FN_REQUIRE (pthread_getspecific
)