1 /* Copyright (C) 2002-2013 Free Software Foundation, Inc.
2 This file is part of the GNU C Library.
3 Contributed by Ulrich Drepper <drepper@redhat.com>, 2002.
5 The GNU C Library is free software; you can redistribute it and/or
6 modify it under the terms of the GNU Lesser General Public
7 License as published by the Free Software Foundation; either
8 version 2.1 of the License, or (at your option) any later version.
10 The GNU C Library is distributed in the hope that it will be useful,
11 but WITHOUT ANY WARRANTY; without even the implied warranty of
12 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 Lesser General Public License for more details.
15 You should have received a copy of the GNU Lesser General Public
16 License along with the GNU C Library; if not, see
17 <http://www.gnu.org/licenses/>. */
25 #include <hp-timing.h>
28 #include <libc-internal.h>
30 #include <kernel-features.h>
32 #include <shlib-compat.h>
34 #include <stap-probe.h>
37 /* Local function to start thread and handle cleanup. */
38 static int start_thread (void *arg
);
41 /* Nozero if debugging mode is enabled. */
44 /* Globally enabled events. */
45 static td_thr_events_t __nptl_threads_events __attribute_used__
;
47 /* Pointer to descriptor with the last event. */
48 static struct pthread
*__nptl_last_event __attribute_used__
;
50 /* Number of threads running. */
51 unsigned int __nptl_nthreads
= 1;
54 /* Code to allocate and deallocate a stack. */
55 #include "allocatestack.c"
57 /* Code to create the thread. */
58 #include <createthread.c>
63 __find_in_stack_list (pd
)
67 struct pthread
*result
= NULL
;
69 lll_lock (stack_cache_lock
, LLL_PRIVATE
);
71 list_for_each (entry
, &stack_used
)
75 curp
= list_entry (entry
, struct pthread
, list
);
84 list_for_each (entry
, &__stack_user
)
88 curp
= list_entry (entry
, struct pthread
, list
);
96 lll_unlock (stack_cache_lock
, LLL_PRIVATE
);
102 /* Deallocate POSIX thread-local-storage. */
105 __nptl_deallocate_tsd (void)
107 struct pthread
*self
= THREAD_SELF
;
109 /* Maybe no data was ever allocated. This happens often so we have
111 if (THREAD_GETMEM (self
, specific_used
))
121 /* So far no new nonzero data entry. */
122 THREAD_SETMEM (self
, specific_used
, false);
124 for (cnt
= idx
= 0; cnt
< PTHREAD_KEY_1STLEVEL_SIZE
; ++cnt
)
126 struct pthread_key_data
*level2
;
128 level2
= THREAD_GETMEM_NC (self
, specific
, cnt
);
134 for (inner
= 0; inner
< PTHREAD_KEY_2NDLEVEL_SIZE
;
137 void *data
= level2
[inner
].data
;
141 /* Always clear the data. */
142 level2
[inner
].data
= NULL
;
144 /* Make sure the data corresponds to a valid
145 key. This test fails if the key was
146 deallocated and also if it was
147 re-allocated. It is the user's
148 responsibility to free the memory in this
150 if (level2
[inner
].seq
151 == __pthread_keys
[idx
].seq
152 /* It is not necessary to register a destructor
154 && __pthread_keys
[idx
].destr
!= NULL
)
155 /* Call the user-provided destructor. */
156 __pthread_keys
[idx
].destr (data
);
161 idx
+= PTHREAD_KEY_1STLEVEL_SIZE
;
164 if (THREAD_GETMEM (self
, specific_used
) == 0)
165 /* No data has been modified. */
168 /* We only repeat the process a fixed number of times. */
169 while (__builtin_expect (++round
< PTHREAD_DESTRUCTOR_ITERATIONS
, 0));
171 /* Just clear the memory of the first block for reuse. */
172 memset (&THREAD_SELF
->specific_1stblock
, '\0',
173 sizeof (self
->specific_1stblock
));
176 /* Free the memory for the other blocks. */
177 for (cnt
= 1; cnt
< PTHREAD_KEY_1STLEVEL_SIZE
; ++cnt
)
179 struct pthread_key_data
*level2
;
181 level2
= THREAD_GETMEM_NC (self
, specific
, cnt
);
184 /* The first block is allocated as part of the thread
187 THREAD_SETMEM_NC (self
, specific
, cnt
, NULL
);
191 THREAD_SETMEM (self
, specific_used
, false);
196 /* Deallocate a thread's stack after optionally making sure the thread
197 descriptor is still valid. */
200 __free_tcb (struct pthread
*pd
)
202 /* The thread is exiting now. */
203 if (__builtin_expect (atomic_bit_test_set (&pd
->cancelhandling
,
204 TERMINATED_BIT
) == 0, 1))
206 /* Remove the descriptor from the list. */
207 if (DEBUGGING_P
&& __find_in_stack_list (pd
) == NULL
)
208 /* Something is really wrong. The descriptor for a still
209 running thread is gone. */
213 if (__builtin_expect (pd
->tpp
!= NULL
, 0))
215 struct priority_protection_data
*tpp
= pd
->tpp
;
221 /* Queue the stack memory block for reuse and exit the process. The
222 kernel will signal via writing to the address returned by
223 QUEUE-STACK when the stack is available. */
224 __deallocate_stack (pd
);
230 start_thread (void *arg
)
232 struct pthread
*pd
= (struct pthread
*) arg
;
235 /* Remember the time when the thread was started. */
238 THREAD_SETMEM (pd
, cpuclock_offset
, now
);
241 /* Initialize resolver state pointer. */
244 /* Initialize pointers to locale data. */
247 /* Allow setxid from now onwards. */
248 if (__builtin_expect (atomic_exchange_acq (&pd
->setxid_futex
, 0) == -2, 0))
249 lll_futex_wake (&pd
->setxid_futex
, 1, LLL_PRIVATE
);
251 #ifdef __NR_set_robust_list
252 # ifndef __ASSUME_SET_ROBUST_LIST
253 if (__set_robust_list_avail
>= 0)
256 INTERNAL_SYSCALL_DECL (err
);
257 /* This call should never fail because the initial call in init.c
259 INTERNAL_SYSCALL (set_robust_list
, err
, 2, &pd
->robust_head
,
260 sizeof (struct robust_list_head
));
264 /* If the parent was running cancellation handlers while creating
265 the thread the new thread inherited the signal mask. Reset the
266 cancellation signal mask. */
267 if (__builtin_expect (pd
->parent_cancelhandling
& CANCELING_BITMASK
, 0))
269 INTERNAL_SYSCALL_DECL (err
);
271 __sigemptyset (&mask
);
272 __sigaddset (&mask
, SIGCANCEL
);
273 (void) INTERNAL_SYSCALL (rt_sigprocmask
, err
, 4, SIG_UNBLOCK
, &mask
,
277 /* This is where the try/finally block should be created. For
278 compilers without that support we do use setjmp. */
279 struct pthread_unwind_buf unwind_buf
;
281 /* No previous handlers. */
282 unwind_buf
.priv
.data
.prev
= NULL
;
283 unwind_buf
.priv
.data
.cleanup
= NULL
;
286 not_first_call
= setjmp ((struct __jmp_buf_tag
*) unwind_buf
.cancel_jmp_buf
);
287 if (__builtin_expect (! not_first_call
, 1))
289 /* Store the new cleanup handler info. */
290 THREAD_SETMEM (pd
, cleanup_jmp_buf
, &unwind_buf
);
292 if (__builtin_expect (pd
->stopped_start
, 0))
294 int oldtype
= CANCEL_ASYNC ();
296 /* Get the lock the parent locked to force synchronization. */
297 lll_lock (pd
->lock
, LLL_PRIVATE
);
298 /* And give it up right away. */
299 lll_unlock (pd
->lock
, LLL_PRIVATE
);
301 CANCEL_RESET (oldtype
);
304 LIBC_PROBE (pthread_start
, 3, (pthread_t
) pd
, pd
->start_routine
, pd
->arg
);
306 /* Run the code the user provided. */
307 #ifdef CALL_THREAD_FCT
308 THREAD_SETMEM (pd
, result
, CALL_THREAD_FCT (pd
));
310 THREAD_SETMEM (pd
, result
, pd
->start_routine (pd
->arg
));
314 /* Call destructors for the thread_local TLS variables. */
317 /* Run the destructor for the thread-local data. */
318 __nptl_deallocate_tsd ();
320 /* Clean up any state libc stored in thread-local variables. */
321 __libc_thread_freeres ();
323 /* If this is the last thread we terminate the process now. We
324 do not notify the debugger, it might just irritate it if there
325 is no thread left. */
326 if (__builtin_expect (atomic_decrement_and_test (&__nptl_nthreads
), 0))
327 /* This was the last thread. */
330 /* Report the death of the thread if this is wanted. */
331 if (__builtin_expect (pd
->report_events
, 0))
333 /* See whether TD_DEATH is in any of the mask. */
334 const int idx
= __td_eventword (TD_DEATH
);
335 const uint32_t mask
= __td_eventmask (TD_DEATH
);
337 if ((mask
& (__nptl_threads_events
.event_bits
[idx
]
338 | pd
->eventbuf
.eventmask
.event_bits
[idx
])) != 0)
340 /* Yep, we have to signal the death. Add the descriptor to
341 the list but only if it is not already on it. */
342 if (pd
->nextevent
== NULL
)
344 pd
->eventbuf
.eventnum
= TD_DEATH
;
345 pd
->eventbuf
.eventdata
= pd
;
348 pd
->nextevent
= __nptl_last_event
;
349 while (atomic_compare_and_exchange_bool_acq (&__nptl_last_event
,
353 /* Now call the function to signal the event. */
354 __nptl_death_event ();
358 /* The thread is exiting now. Don't set this bit until after we've hit
359 the event-reporting breakpoint, so that td_thr_get_info on us while at
360 the breakpoint reports TD_THR_RUN state rather than TD_THR_ZOMBIE. */
361 atomic_bit_set (&pd
->cancelhandling
, EXITING_BIT
);
363 #ifndef __ASSUME_SET_ROBUST_LIST
364 /* If this thread has any robust mutexes locked, handle them now. */
365 # ifdef __PTHREAD_MUTEX_HAVE_PREV
366 void *robust
= pd
->robust_head
.list
;
368 __pthread_slist_t
*robust
= pd
->robust_list
.__next
;
370 /* We let the kernel do the notification if it is able to do so.
371 If we have to do it here there for sure are no PI mutexes involved
372 since the kernel support for them is even more recent. */
373 if (__set_robust_list_avail
< 0
374 && __builtin_expect (robust
!= (void *) &pd
->robust_head
, 0))
378 struct __pthread_mutex_s
*this = (struct __pthread_mutex_s
*)
379 ((char *) robust
- offsetof (struct __pthread_mutex_s
,
381 robust
= *((void **) robust
);
383 # ifdef __PTHREAD_MUTEX_HAVE_PREV
384 this->__list
.__prev
= NULL
;
386 this->__list
.__next
= NULL
;
388 lll_robust_dead (this->__lock
, /* XYZ */ LLL_SHARED
);
390 while (robust
!= (void *) &pd
->robust_head
);
394 /* Mark the memory of the stack as usable to the kernel. We free
395 everything except for the space used for the TCB itself. */
396 size_t pagesize_m1
= __getpagesize () - 1;
397 #ifdef _STACK_GROWS_DOWN
398 char *sp
= CURRENT_STACK_FRAME
;
399 size_t freesize
= (sp
- (char *) pd
->stackblock
) & ~pagesize_m1
;
403 assert (freesize
< pd
->stackblock_size
);
404 if (freesize
> PTHREAD_STACK_MIN
)
405 __madvise (pd
->stackblock
, freesize
- PTHREAD_STACK_MIN
, MADV_DONTNEED
);
407 /* If the thread is detached free the TCB. */
408 if (IS_DETACHED (pd
))
411 else if (__builtin_expect (pd
->cancelhandling
& SETXID_BITMASK
, 0))
413 /* Some other thread might call any of the setXid functions and expect
414 us to reply. In this case wait until we did that. */
416 lll_futex_wait (&pd
->setxid_futex
, 0, LLL_PRIVATE
);
417 while (pd
->cancelhandling
& SETXID_BITMASK
);
419 /* Reset the value so that the stack can be reused. */
420 pd
->setxid_futex
= 0;
423 /* We cannot call '_exit' here. '_exit' will terminate the process.
425 The 'exit' implementation in the kernel will signal when the
426 process is really dead since 'clone' got passed the CLONE_CHILD_CLEARTID
427 flag. The 'tid' field in the TCB will be set to zero.
429 The exit code is zero since in case all threads exit by calling
430 'pthread_exit' the exit status must be 0 (zero). */
431 __exit_thread_inline (0);
438 /* Default thread attributes for the case when the user does not
440 static const struct pthread_attr default_attr
=
442 /* Just some value > 0 which gets rounded to the nearest page size. */
448 __pthread_create_2_1 (newthread
, attr
, start_routine
, arg
)
449 pthread_t
*newthread
;
450 const pthread_attr_t
*attr
;
451 void *(*start_routine
) (void *);
456 const struct pthread_attr
*iattr
= (struct pthread_attr
*) attr
;
458 /* Is this the best idea? On NUMA machines this could mean
459 accessing far-away memory. */
460 iattr
= &default_attr
;
462 struct pthread
*pd
= NULL
;
463 int err
= ALLOCATE_STACK (iattr
, &pd
);
464 if (__builtin_expect (err
!= 0, 0))
465 /* Something went wrong. Maybe a parameter of the attributes is
466 invalid or we could not allocate memory. Note we have to
467 translate error codes. */
468 return err
== ENOMEM
? EAGAIN
: err
;
471 /* Initialize the TCB. All initializations with zero should be
472 performed in 'get_cached_stack'. This way we avoid doing this if
473 the stack freshly allocated with 'mmap'. */
476 /* Reference to the TCB itself. */
477 pd
->header
.self
= pd
;
479 /* Self-reference for TLS. */
483 /* Store the address of the start routine and the parameter. Since
484 we do not start the function directly the stillborn thread will
485 get the information from its thread descriptor. */
486 pd
->start_routine
= start_routine
;
489 /* Copy the thread attribute flags. */
490 struct pthread
*self
= THREAD_SELF
;
491 pd
->flags
= ((iattr
->flags
& ~(ATTR_FLAG_SCHED_SET
| ATTR_FLAG_POLICY_SET
))
492 | (self
->flags
& (ATTR_FLAG_SCHED_SET
| ATTR_FLAG_POLICY_SET
)));
494 /* Initialize the field for the ID of the thread which is waiting
495 for us. This is a self-reference in case the thread is created
497 pd
->joinid
= iattr
->flags
& ATTR_FLAG_DETACHSTATE
? pd
: NULL
;
499 /* The debug events are inherited from the parent. */
500 pd
->eventbuf
= self
->eventbuf
;
503 /* Copy the parent's scheduling parameters. The flags will say what
504 is valid and what is not. */
505 pd
->schedpolicy
= self
->schedpolicy
;
506 pd
->schedparam
= self
->schedparam
;
508 /* Copy the stack guard canary. */
509 #ifdef THREAD_COPY_STACK_GUARD
510 THREAD_COPY_STACK_GUARD (pd
);
513 /* Copy the pointer guard value. */
514 #ifdef THREAD_COPY_POINTER_GUARD
515 THREAD_COPY_POINTER_GUARD (pd
);
518 /* Determine scheduling parameters for the thread. */
520 && __builtin_expect ((iattr
->flags
& ATTR_FLAG_NOTINHERITSCHED
) != 0, 0)
521 && (iattr
->flags
& (ATTR_FLAG_SCHED_SET
| ATTR_FLAG_POLICY_SET
)) != 0)
523 INTERNAL_SYSCALL_DECL (scerr
);
525 /* Use the scheduling parameters the user provided. */
526 if (iattr
->flags
& ATTR_FLAG_POLICY_SET
)
527 pd
->schedpolicy
= iattr
->schedpolicy
;
528 else if ((pd
->flags
& ATTR_FLAG_POLICY_SET
) == 0)
530 pd
->schedpolicy
= INTERNAL_SYSCALL (sched_getscheduler
, scerr
, 1, 0);
531 pd
->flags
|= ATTR_FLAG_POLICY_SET
;
534 if (iattr
->flags
& ATTR_FLAG_SCHED_SET
)
535 memcpy (&pd
->schedparam
, &iattr
->schedparam
,
536 sizeof (struct sched_param
));
537 else if ((pd
->flags
& ATTR_FLAG_SCHED_SET
) == 0)
539 INTERNAL_SYSCALL (sched_getparam
, scerr
, 2, 0, &pd
->schedparam
);
540 pd
->flags
|= ATTR_FLAG_SCHED_SET
;
543 /* Check for valid priorities. */
544 int minprio
= INTERNAL_SYSCALL (sched_get_priority_min
, scerr
, 1,
546 int maxprio
= INTERNAL_SYSCALL (sched_get_priority_max
, scerr
, 1,
548 if (pd
->schedparam
.sched_priority
< minprio
549 || pd
->schedparam
.sched_priority
> maxprio
)
551 /* Perhaps a thread wants to change the IDs and if waiting
552 for this stillborn thread. */
553 if (__builtin_expect (atomic_exchange_acq (&pd
->setxid_futex
, 0)
555 lll_futex_wake (&pd
->setxid_futex
, 1, LLL_PRIVATE
);
557 __deallocate_stack (pd
);
563 /* Pass the descriptor to the caller. */
564 *newthread
= (pthread_t
) pd
;
566 LIBC_PROBE (pthread_create
, 4, newthread
, attr
, start_routine
, arg
);
568 /* Start the thread. */
569 return create_thread (pd
, iattr
, STACK_VARIABLES_ARGS
);
571 versioned_symbol (libpthread
, __pthread_create_2_1
, pthread_create
, GLIBC_2_1
);
574 #if SHLIB_COMPAT(libpthread, GLIBC_2_0, GLIBC_2_1)
576 __pthread_create_2_0 (newthread
, attr
, start_routine
, arg
)
577 pthread_t
*newthread
;
578 const pthread_attr_t
*attr
;
579 void *(*start_routine
) (void *);
582 /* The ATTR attribute is not really of type `pthread_attr_t *'. It has
583 the old size and access to the new members might crash the program.
584 We convert the struct now. */
585 struct pthread_attr new_attr
;
589 struct pthread_attr
*iattr
= (struct pthread_attr
*) attr
;
590 size_t ps
= __getpagesize ();
592 /* Copy values from the user-provided attributes. */
593 new_attr
.schedparam
= iattr
->schedparam
;
594 new_attr
.schedpolicy
= iattr
->schedpolicy
;
595 new_attr
.flags
= iattr
->flags
;
597 /* Fill in default values for the fields not present in the old
599 new_attr
.guardsize
= ps
;
600 new_attr
.stackaddr
= NULL
;
601 new_attr
.stacksize
= 0;
602 new_attr
.cpuset
= NULL
;
604 /* We will pass this value on to the real implementation. */
605 attr
= (pthread_attr_t
*) &new_attr
;
608 return __pthread_create_2_1 (newthread
, attr
, start_routine
, arg
);
610 compat_symbol (libpthread
, __pthread_create_2_0
, pthread_create
,
614 /* Information for libthread_db. */
616 #include "../nptl_db/db_info.c"
618 /* If pthread_create is present, libgcc_eh.a and libsupc++.a expects some other POSIX thread
619 functions to be present as well. */
620 PTHREAD_STATIC_FN_REQUIRE (pthread_mutex_lock
)
621 PTHREAD_STATIC_FN_REQUIRE (pthread_mutex_trylock
)
622 PTHREAD_STATIC_FN_REQUIRE (pthread_mutex_unlock
)
624 PTHREAD_STATIC_FN_REQUIRE (pthread_once
)
625 PTHREAD_STATIC_FN_REQUIRE (pthread_cancel
)
627 PTHREAD_STATIC_FN_REQUIRE (pthread_key_create
)
628 PTHREAD_STATIC_FN_REQUIRE (pthread_key_delete
)
629 PTHREAD_STATIC_FN_REQUIRE (pthread_setspecific
)
630 PTHREAD_STATIC_FN_REQUIRE (pthread_getspecific
)