1 /* Copyright (C) 2002-2014 Free Software Foundation, Inc.
2 This file is part of the GNU C Library.
3 Contributed by Ulrich Drepper <drepper@redhat.com>, 2002.
5 The GNU C Library is free software; you can redistribute it and/or
6 modify it under the terms of the GNU Lesser General Public
7 License as published by the Free Software Foundation; either
8 version 2.1 of the License, or (at your option) any later version.
10 The GNU C Library is distributed in the hope that it will be useful,
11 but WITHOUT ANY WARRANTY; without even the implied warranty of
12 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 Lesser General Public License for more details.
15 You should have received a copy of the GNU Lesser General Public
16 License along with the GNU C Library; if not, see
17 <http://www.gnu.org/licenses/>. */
26 #include <hp-timing.h>
29 #include <libc-internal.h>
31 #include <kernel-features.h>
32 #include <exit-thread.h>
34 #include <shlib-compat.h>
36 #include <stap-probe.h>
39 /* Nozero if debugging mode is enabled. */
42 /* Globally enabled events. */
43 static td_thr_events_t __nptl_threads_events __attribute_used__
;
45 /* Pointer to descriptor with the last event. */
46 static struct pthread
*__nptl_last_event __attribute_used__
;
48 /* Number of threads running. */
49 unsigned int __nptl_nthreads
= 1;
52 /* Code to allocate and deallocate a stack. */
53 #include "allocatestack.c"
55 /* createthread.c defines this function, and two macros:
56 START_THREAD_DEFN and START_THREAD_SELF (see below).
58 create_thread is obliged to initialize PD->stopped_start. It
59 should be true if the STOPPED_START parameter is true, or if
60 create_thread needs the new thread to synchronize at startup for
61 some other implementation reason. If PD->stopped_start will be
62 true, then create_thread is obliged to perform the operation
63 "lll_lock (PD->lock, LLL_PRIVATE)" before starting the thread.
65 The return value is zero for success or an errno code for failure.
66 If the return value is ENOMEM, that will be translated to EAGAIN,
67 so create_thread need not do that. On failure, *THREAD_RAN should
68 be set to true iff the thread actually started up and then got
69 cancelled before calling user code (*PD->start_routine), in which
70 case it is responsible for doing its own cleanup. */
72 static int create_thread (struct pthread
*pd
, const struct pthread_attr
*attr
,
73 bool stopped_start
, STACK_VARIABLES_PARMS
,
76 #include <createthread.c>
81 __find_in_stack_list (pd
)
85 struct pthread
*result
= NULL
;
87 lll_lock (stack_cache_lock
, LLL_PRIVATE
);
89 list_for_each (entry
, &stack_used
)
93 curp
= list_entry (entry
, struct pthread
, list
);
102 list_for_each (entry
, &__stack_user
)
104 struct pthread
*curp
;
106 curp
= list_entry (entry
, struct pthread
, list
);
114 lll_unlock (stack_cache_lock
, LLL_PRIVATE
);
120 /* Deallocate POSIX thread-local-storage. */
123 __nptl_deallocate_tsd (void)
125 struct pthread
*self
= THREAD_SELF
;
127 /* Maybe no data was ever allocated. This happens often so we have
129 if (THREAD_GETMEM (self
, specific_used
))
139 /* So far no new nonzero data entry. */
140 THREAD_SETMEM (self
, specific_used
, false);
142 for (cnt
= idx
= 0; cnt
< PTHREAD_KEY_1STLEVEL_SIZE
; ++cnt
)
144 struct pthread_key_data
*level2
;
146 level2
= THREAD_GETMEM_NC (self
, specific
, cnt
);
152 for (inner
= 0; inner
< PTHREAD_KEY_2NDLEVEL_SIZE
;
155 void *data
= level2
[inner
].data
;
159 /* Always clear the data. */
160 level2
[inner
].data
= NULL
;
162 /* Make sure the data corresponds to a valid
163 key. This test fails if the key was
164 deallocated and also if it was
165 re-allocated. It is the user's
166 responsibility to free the memory in this
168 if (level2
[inner
].seq
169 == __pthread_keys
[idx
].seq
170 /* It is not necessary to register a destructor
172 && __pthread_keys
[idx
].destr
!= NULL
)
173 /* Call the user-provided destructor. */
174 __pthread_keys
[idx
].destr (data
);
179 idx
+= PTHREAD_KEY_1STLEVEL_SIZE
;
182 if (THREAD_GETMEM (self
, specific_used
) == 0)
183 /* No data has been modified. */
186 /* We only repeat the process a fixed number of times. */
187 while (__builtin_expect (++round
< PTHREAD_DESTRUCTOR_ITERATIONS
, 0));
189 /* Just clear the memory of the first block for reuse. */
190 memset (&THREAD_SELF
->specific_1stblock
, '\0',
191 sizeof (self
->specific_1stblock
));
194 /* Free the memory for the other blocks. */
195 for (cnt
= 1; cnt
< PTHREAD_KEY_1STLEVEL_SIZE
; ++cnt
)
197 struct pthread_key_data
*level2
;
199 level2
= THREAD_GETMEM_NC (self
, specific
, cnt
);
202 /* The first block is allocated as part of the thread
205 THREAD_SETMEM_NC (self
, specific
, cnt
, NULL
);
209 THREAD_SETMEM (self
, specific_used
, false);
214 /* Deallocate a thread's stack after optionally making sure the thread
215 descriptor is still valid. */
218 __free_tcb (struct pthread
*pd
)
220 /* The thread is exiting now. */
221 if (__builtin_expect (atomic_bit_test_set (&pd
->cancelhandling
,
222 TERMINATED_BIT
) == 0, 1))
224 /* Remove the descriptor from the list. */
225 if (DEBUGGING_P
&& __find_in_stack_list (pd
) == NULL
)
226 /* Something is really wrong. The descriptor for a still
227 running thread is gone. */
231 if (__glibc_unlikely (pd
->tpp
!= NULL
))
233 struct priority_protection_data
*tpp
= pd
->tpp
;
239 /* Queue the stack memory block for reuse and exit the process. The
240 kernel will signal via writing to the address returned by
241 QUEUE-STACK when the stack is available. */
242 __deallocate_stack (pd
);
247 /* Local function to start thread and handle cleanup.
248 createthread.c defines the macro START_THREAD_DEFN to the
249 declaration that its create_thread function will refer to, and
250 START_THREAD_SELF to the expression to optimally deliver the new
251 thread's THREAD_SELF value. */
254 struct pthread
*pd
= START_THREAD_SELF
;
257 /* Remember the time when the thread was started. */
260 THREAD_SETMEM (pd
, cpuclock_offset
, now
);
263 /* Initialize resolver state pointer. */
266 /* Initialize pointers to locale data. */
269 /* Allow setxid from now onwards. */
270 if (__glibc_unlikely (atomic_exchange_acq (&pd
->setxid_futex
, 0) == -2))
271 lll_futex_wake (&pd
->setxid_futex
, 1, LLL_PRIVATE
);
273 #ifdef __NR_set_robust_list
274 # ifndef __ASSUME_SET_ROBUST_LIST
275 if (__set_robust_list_avail
>= 0)
278 INTERNAL_SYSCALL_DECL (err
);
279 /* This call should never fail because the initial call in init.c
281 INTERNAL_SYSCALL (set_robust_list
, err
, 2, &pd
->robust_head
,
282 sizeof (struct robust_list_head
));
287 /* If the parent was running cancellation handlers while creating
288 the thread the new thread inherited the signal mask. Reset the
289 cancellation signal mask. */
290 if (__glibc_unlikely (pd
->parent_cancelhandling
& CANCELING_BITMASK
))
292 INTERNAL_SYSCALL_DECL (err
);
294 __sigemptyset (&mask
);
295 __sigaddset (&mask
, SIGCANCEL
);
296 (void) INTERNAL_SYSCALL (rt_sigprocmask
, err
, 4, SIG_UNBLOCK
, &mask
,
301 /* This is where the try/finally block should be created. For
302 compilers without that support we do use setjmp. */
303 struct pthread_unwind_buf unwind_buf
;
305 /* No previous handlers. */
306 unwind_buf
.priv
.data
.prev
= NULL
;
307 unwind_buf
.priv
.data
.cleanup
= NULL
;
310 not_first_call
= setjmp ((struct __jmp_buf_tag
*) unwind_buf
.cancel_jmp_buf
);
311 if (__glibc_likely (! not_first_call
))
313 /* Store the new cleanup handler info. */
314 THREAD_SETMEM (pd
, cleanup_jmp_buf
, &unwind_buf
);
316 if (__glibc_unlikely (pd
->stopped_start
))
318 int oldtype
= CANCEL_ASYNC ();
320 /* Get the lock the parent locked to force synchronization. */
321 lll_lock (pd
->lock
, LLL_PRIVATE
);
322 /* And give it up right away. */
323 lll_unlock (pd
->lock
, LLL_PRIVATE
);
325 CANCEL_RESET (oldtype
);
328 LIBC_PROBE (pthread_start
, 3, (pthread_t
) pd
, pd
->start_routine
, pd
->arg
);
330 /* Run the code the user provided. */
331 #ifdef CALL_THREAD_FCT
332 THREAD_SETMEM (pd
, result
, CALL_THREAD_FCT (pd
));
334 THREAD_SETMEM (pd
, result
, pd
->start_routine (pd
->arg
));
338 /* Call destructors for the thread_local TLS variables. */
340 if (&__call_tls_dtors
!= NULL
)
344 /* Run the destructor for the thread-local data. */
345 __nptl_deallocate_tsd ();
347 /* Clean up any state libc stored in thread-local variables. */
348 __libc_thread_freeres ();
350 /* If this is the last thread we terminate the process now. We
351 do not notify the debugger, it might just irritate it if there
352 is no thread left. */
353 if (__glibc_unlikely (atomic_decrement_and_test (&__nptl_nthreads
)))
354 /* This was the last thread. */
357 /* Report the death of the thread if this is wanted. */
358 if (__glibc_unlikely (pd
->report_events
))
360 /* See whether TD_DEATH is in any of the mask. */
361 const int idx
= __td_eventword (TD_DEATH
);
362 const uint32_t mask
= __td_eventmask (TD_DEATH
);
364 if ((mask
& (__nptl_threads_events
.event_bits
[idx
]
365 | pd
->eventbuf
.eventmask
.event_bits
[idx
])) != 0)
367 /* Yep, we have to signal the death. Add the descriptor to
368 the list but only if it is not already on it. */
369 if (pd
->nextevent
== NULL
)
371 pd
->eventbuf
.eventnum
= TD_DEATH
;
372 pd
->eventbuf
.eventdata
= pd
;
375 pd
->nextevent
= __nptl_last_event
;
376 while (atomic_compare_and_exchange_bool_acq (&__nptl_last_event
,
380 /* Now call the function to signal the event. */
381 __nptl_death_event ();
385 /* The thread is exiting now. Don't set this bit until after we've hit
386 the event-reporting breakpoint, so that td_thr_get_info on us while at
387 the breakpoint reports TD_THR_RUN state rather than TD_THR_ZOMBIE. */
388 atomic_bit_set (&pd
->cancelhandling
, EXITING_BIT
);
390 #ifndef __ASSUME_SET_ROBUST_LIST
391 /* If this thread has any robust mutexes locked, handle them now. */
392 # ifdef __PTHREAD_MUTEX_HAVE_PREV
393 void *robust
= pd
->robust_head
.list
;
395 __pthread_slist_t
*robust
= pd
->robust_list
.__next
;
397 /* We let the kernel do the notification if it is able to do so.
398 If we have to do it here there for sure are no PI mutexes involved
399 since the kernel support for them is even more recent. */
400 if (__set_robust_list_avail
< 0
401 && __builtin_expect (robust
!= (void *) &pd
->robust_head
, 0))
405 struct __pthread_mutex_s
*this = (struct __pthread_mutex_s
*)
406 ((char *) robust
- offsetof (struct __pthread_mutex_s
,
408 robust
= *((void **) robust
);
410 # ifdef __PTHREAD_MUTEX_HAVE_PREV
411 this->__list
.__prev
= NULL
;
413 this->__list
.__next
= NULL
;
415 atomic_or (&this->__lock
, FUTEX_OWNER_DIED
);
416 lll_futex_wake (this->__lock
, 1, /* XYZ */ LLL_SHARED
);
418 while (robust
!= (void *) &pd
->robust_head
);
422 /* Mark the memory of the stack as usable to the kernel. We free
423 everything except for the space used for the TCB itself. */
424 size_t pagesize_m1
= __getpagesize () - 1;
425 #ifdef _STACK_GROWS_DOWN
426 char *sp
= CURRENT_STACK_FRAME
;
427 size_t freesize
= (sp
- (char *) pd
->stackblock
) & ~pagesize_m1
;
431 assert (freesize
< pd
->stackblock_size
);
432 if (freesize
> PTHREAD_STACK_MIN
)
433 __madvise (pd
->stackblock
, freesize
- PTHREAD_STACK_MIN
, MADV_DONTNEED
);
435 /* If the thread is detached free the TCB. */
436 if (IS_DETACHED (pd
))
439 else if (__glibc_unlikely (pd
->cancelhandling
& SETXID_BITMASK
))
441 /* Some other thread might call any of the setXid functions and expect
442 us to reply. In this case wait until we did that. */
444 lll_futex_wait (&pd
->setxid_futex
, 0, LLL_PRIVATE
);
445 while (pd
->cancelhandling
& SETXID_BITMASK
);
447 /* Reset the value so that the stack can be reused. */
448 pd
->setxid_futex
= 0;
451 /* We cannot call '_exit' here. '_exit' will terminate the process.
453 The 'exit' implementation in the kernel will signal when the
454 process is really dead since 'clone' got passed the CLONE_CHILD_CLEARTID
455 flag. The 'tid' field in the TCB will be set to zero.
457 The exit code is zero since in case all threads exit by calling
458 'pthread_exit' the exit status must be 0 (zero). */
465 /* Return true iff obliged to report TD_CREATE events. */
467 report_thread_creation (struct pthread
*pd
)
469 if (__glibc_unlikely (THREAD_GETMEM (THREAD_SELF
, report_events
)))
471 /* The parent thread is supposed to report events.
472 Check whether the TD_CREATE event is needed, too. */
473 const size_t idx
= __td_eventword (TD_CREATE
);
474 const uint32_t mask
= __td_eventmask (TD_CREATE
);
476 return ((mask
& (__nptl_threads_events
.event_bits
[idx
]
477 | pd
->eventbuf
.eventmask
.event_bits
[idx
])) != 0);
484 __pthread_create_2_1 (newthread
, attr
, start_routine
, arg
)
485 pthread_t
*newthread
;
486 const pthread_attr_t
*attr
;
487 void *(*start_routine
) (void *);
492 const struct pthread_attr
*iattr
= (struct pthread_attr
*) attr
;
493 struct pthread_attr default_attr
;
494 bool free_cpuset
= false;
497 lll_lock (__default_pthread_attr_lock
, LLL_PRIVATE
);
498 default_attr
= __default_pthread_attr
;
499 size_t cpusetsize
= default_attr
.cpusetsize
;
503 if (__glibc_likely (__libc_use_alloca (cpusetsize
)))
504 cpuset
= __alloca (cpusetsize
);
507 cpuset
= malloc (cpusetsize
);
510 lll_unlock (__default_pthread_attr_lock
, LLL_PRIVATE
);
515 memcpy (cpuset
, default_attr
.cpuset
, cpusetsize
);
516 default_attr
.cpuset
= cpuset
;
518 lll_unlock (__default_pthread_attr_lock
, LLL_PRIVATE
);
519 iattr
= &default_attr
;
522 struct pthread
*pd
= NULL
;
523 int err
= ALLOCATE_STACK (iattr
, &pd
);
526 if (__glibc_unlikely (err
!= 0))
527 /* Something went wrong. Maybe a parameter of the attributes is
528 invalid or we could not allocate memory. Note we have to
529 translate error codes. */
531 retval
= err
== ENOMEM
? EAGAIN
: err
;
536 /* Initialize the TCB. All initializations with zero should be
537 performed in 'get_cached_stack'. This way we avoid doing this if
538 the stack freshly allocated with 'mmap'. */
541 /* Reference to the TCB itself. */
542 pd
->header
.self
= pd
;
544 /* Self-reference for TLS. */
548 /* Store the address of the start routine and the parameter. Since
549 we do not start the function directly the stillborn thread will
550 get the information from its thread descriptor. */
551 pd
->start_routine
= start_routine
;
554 /* Copy the thread attribute flags. */
555 struct pthread
*self
= THREAD_SELF
;
556 pd
->flags
= ((iattr
->flags
& ~(ATTR_FLAG_SCHED_SET
| ATTR_FLAG_POLICY_SET
))
557 | (self
->flags
& (ATTR_FLAG_SCHED_SET
| ATTR_FLAG_POLICY_SET
)));
559 /* Initialize the field for the ID of the thread which is waiting
560 for us. This is a self-reference in case the thread is created
562 pd
->joinid
= iattr
->flags
& ATTR_FLAG_DETACHSTATE
? pd
: NULL
;
564 /* The debug events are inherited from the parent. */
565 pd
->eventbuf
= self
->eventbuf
;
568 /* Copy the parent's scheduling parameters. The flags will say what
569 is valid and what is not. */
570 pd
->schedpolicy
= self
->schedpolicy
;
571 pd
->schedparam
= self
->schedparam
;
573 /* Copy the stack guard canary. */
574 #ifdef THREAD_COPY_STACK_GUARD
575 THREAD_COPY_STACK_GUARD (pd
);
578 /* Copy the pointer guard value. */
579 #ifdef THREAD_COPY_POINTER_GUARD
580 THREAD_COPY_POINTER_GUARD (pd
);
583 /* Verify the sysinfo bits were copied in allocate_stack if needed. */
584 #ifdef NEED_DL_SYSINFO
585 CHECK_THREAD_SYSINFO (pd
);
588 /* Inform start_thread (above) about cancellation state that might
589 translate into inherited signal state. */
590 pd
->parent_cancelhandling
= THREAD_GETMEM (THREAD_SELF
, cancelhandling
);
592 /* Determine scheduling parameters for the thread. */
593 if (__builtin_expect ((iattr
->flags
& ATTR_FLAG_NOTINHERITSCHED
) != 0, 0)
594 && (iattr
->flags
& (ATTR_FLAG_SCHED_SET
| ATTR_FLAG_POLICY_SET
)) != 0)
596 INTERNAL_SYSCALL_DECL (scerr
);
598 /* Use the scheduling parameters the user provided. */
599 if (iattr
->flags
& ATTR_FLAG_POLICY_SET
)
600 pd
->schedpolicy
= iattr
->schedpolicy
;
601 else if ((pd
->flags
& ATTR_FLAG_POLICY_SET
) == 0)
603 pd
->schedpolicy
= INTERNAL_SYSCALL (sched_getscheduler
, scerr
, 1, 0);
604 pd
->flags
|= ATTR_FLAG_POLICY_SET
;
607 if (iattr
->flags
& ATTR_FLAG_SCHED_SET
)
608 memcpy (&pd
->schedparam
, &iattr
->schedparam
,
609 sizeof (struct sched_param
));
610 else if ((pd
->flags
& ATTR_FLAG_SCHED_SET
) == 0)
612 INTERNAL_SYSCALL (sched_getparam
, scerr
, 2, 0, &pd
->schedparam
);
613 pd
->flags
|= ATTR_FLAG_SCHED_SET
;
616 /* Check for valid priorities. */
617 int minprio
= INTERNAL_SYSCALL (sched_get_priority_min
, scerr
, 1,
619 int maxprio
= INTERNAL_SYSCALL (sched_get_priority_max
, scerr
, 1,
621 if (pd
->schedparam
.sched_priority
< minprio
622 || pd
->schedparam
.sched_priority
> maxprio
)
624 /* Perhaps a thread wants to change the IDs and if waiting
625 for this stillborn thread. */
626 if (__builtin_expect (atomic_exchange_acq (&pd
->setxid_futex
, 0)
628 lll_futex_wake (&pd
->setxid_futex
, 1, LLL_PRIVATE
);
630 __deallocate_stack (pd
);
637 /* Pass the descriptor to the caller. */
638 *newthread
= (pthread_t
) pd
;
640 LIBC_PROBE (pthread_create
, 4, newthread
, attr
, start_routine
, arg
);
642 /* One more thread. We cannot have the thread do this itself, since it
643 might exist but not have been scheduled yet by the time we've returned
644 and need to check the value to behave correctly. We must do it before
645 creating the thread, in case it does get scheduled first and then
646 might mistakenly think it was the only thread. In the failure case,
647 we momentarily store a false value; this doesn't matter because there
648 is no kosher thing a signal handler interrupting us right here can do
649 that cares whether the thread count is correct. */
650 atomic_increment (&__nptl_nthreads
);
652 bool thread_ran
= false;
654 /* Start the thread. */
655 if (__glibc_unlikely (report_thread_creation (pd
)))
657 /* Create the thread. We always create the thread stopped
658 so that it does not get far before we tell the debugger. */
659 retval
= create_thread (pd
, iattr
, true, STACK_VARIABLES_ARGS
,
663 /* create_thread should have set this so that the logic below can
665 assert (pd
->stopped_start
);
667 /* Now fill in the information about the new thread in
668 the newly created thread's data structure. We cannot let
669 the new thread do this since we don't know whether it was
670 already scheduled when we send the event. */
671 pd
->eventbuf
.eventnum
= TD_CREATE
;
672 pd
->eventbuf
.eventdata
= pd
;
674 /* Enqueue the descriptor. */
676 pd
->nextevent
= __nptl_last_event
;
677 while (atomic_compare_and_exchange_bool_acq (&__nptl_last_event
,
681 /* Now call the function which signals the event. */
682 __nptl_create_event ();
686 retval
= create_thread (pd
, iattr
, false, STACK_VARIABLES_ARGS
,
689 if (__glibc_unlikely (retval
!= 0))
691 /* If thread creation "failed", that might mean that the thread got
692 created and ran a little--short of running user code--but then
693 create_thread cancelled it. In that case, the thread will do all
694 its own cleanup just like a normal thread exit after a successful
695 creation would do. */
698 assert (pd
->stopped_start
);
701 /* Oops, we lied for a second. */
702 atomic_decrement (&__nptl_nthreads
);
704 /* Perhaps a thread wants to change the IDs and is waiting for this
706 if (__glibc_unlikely (atomic_exchange_acq (&pd
->setxid_futex
, 0)
708 lll_futex_wake (&pd
->setxid_futex
, 1, LLL_PRIVATE
);
710 /* Free the resources. */
711 __deallocate_stack (pd
);
714 /* We have to translate error codes. */
715 if (retval
== ENOMEM
)
720 if (pd
->stopped_start
)
721 /* The thread blocked on this lock either because we're doing TD_CREATE
722 event reporting, or for some other reason that create_thread chose.
723 Now let it run free. */
724 lll_unlock (pd
->lock
, LLL_PRIVATE
);
726 /* We now have for sure more than one thread. The main thread might
727 not yet have the flag set. No need to set the global variable
728 again if this is what we use. */
729 THREAD_SETMEM (THREAD_SELF
, header
.multiple_threads
, 1);
733 if (__glibc_unlikely (free_cpuset
))
734 free (default_attr
.cpuset
);
738 versioned_symbol (libpthread
, __pthread_create_2_1
, pthread_create
, GLIBC_2_1
);
741 #if SHLIB_COMPAT(libpthread, GLIBC_2_0, GLIBC_2_1)
743 __pthread_create_2_0 (newthread
, attr
, start_routine
, arg
)
744 pthread_t
*newthread
;
745 const pthread_attr_t
*attr
;
746 void *(*start_routine
) (void *);
749 /* The ATTR attribute is not really of type `pthread_attr_t *'. It has
750 the old size and access to the new members might crash the program.
751 We convert the struct now. */
752 struct pthread_attr new_attr
;
756 struct pthread_attr
*iattr
= (struct pthread_attr
*) attr
;
757 size_t ps
= __getpagesize ();
759 /* Copy values from the user-provided attributes. */
760 new_attr
.schedparam
= iattr
->schedparam
;
761 new_attr
.schedpolicy
= iattr
->schedpolicy
;
762 new_attr
.flags
= iattr
->flags
;
764 /* Fill in default values for the fields not present in the old
766 new_attr
.guardsize
= ps
;
767 new_attr
.stackaddr
= NULL
;
768 new_attr
.stacksize
= 0;
769 new_attr
.cpuset
= NULL
;
771 /* We will pass this value on to the real implementation. */
772 attr
= (pthread_attr_t
*) &new_attr
;
775 return __pthread_create_2_1 (newthread
, attr
, start_routine
, arg
);
777 compat_symbol (libpthread
, __pthread_create_2_0
, pthread_create
,
781 /* Information for libthread_db. */
783 #include "../nptl_db/db_info.c"
785 /* If pthread_create is present, libgcc_eh.a and libsupc++.a expects some other POSIX thread
786 functions to be present as well. */
787 PTHREAD_STATIC_FN_REQUIRE (pthread_mutex_lock
)
788 PTHREAD_STATIC_FN_REQUIRE (pthread_mutex_trylock
)
789 PTHREAD_STATIC_FN_REQUIRE (pthread_mutex_unlock
)
791 PTHREAD_STATIC_FN_REQUIRE (pthread_once
)
792 PTHREAD_STATIC_FN_REQUIRE (pthread_cancel
)
794 PTHREAD_STATIC_FN_REQUIRE (pthread_key_create
)
795 PTHREAD_STATIC_FN_REQUIRE (pthread_key_delete
)
796 PTHREAD_STATIC_FN_REQUIRE (pthread_setspecific
)
797 PTHREAD_STATIC_FN_REQUIRE (pthread_getspecific
)