arm: allow to build with DODEBUG=y
[uclibc-ng.git] / libpthread / nptl / pthread_create.c
blobd42a6e75b147f02a692439b13fd969cf063e23b1
1 /* Copyright (C) 2002-2007,2008,2009 Free Software Foundation, Inc.
2 This file is part of the GNU C Library.
3 Contributed by Ulrich Drepper <drepper@redhat.com>, 2002.
5 The GNU C Library is free software; you can redistribute it and/or
6 modify it under the terms of the GNU Lesser General Public
7 License as published by the Free Software Foundation; either
8 version 2.1 of the License, or (at your option) any later version.
10 The GNU C Library is distributed in the hope that it will be useful,
11 but WITHOUT ANY WARRANTY; without even the implied warranty of
12 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 Lesser General Public License for more details.
15 You should have received a copy of the GNU Lesser General Public
16 License along with the GNU C Library; if not, see
17 <http://www.gnu.org/licenses/>. */
19 #include <errno.h>
20 #include <stdbool.h>
21 #include <stdlib.h>
22 #include <string.h>
23 #include "pthreadP.h"
24 #include <hp-timing.h>
25 #include <ldsodefs.h>
26 #include <atomic.h>
27 #include <resolv.h>
28 #include <bits/kernel-features.h>
31 /* Local function to start thread and handle cleanup. */
32 static int start_thread (void *arg);
35 /* Nozero if debugging mode is enabled. */
36 int __pthread_debug;
38 /* Globally enabled events. */
39 static td_thr_events_t __nptl_threads_events __attribute_used__;
41 /* Pointer to descriptor with the last event. */
42 static struct pthread *__nptl_last_event __attribute_used__;
44 /* Number of threads running. */
45 unsigned int __nptl_nthreads = 1;
48 /* Code to allocate and deallocate a stack. */
49 #include "allocatestack.c"
51 /* Code to create the thread. */
52 #include <createthread.c>
55 struct pthread *
56 internal_function
57 __find_in_stack_list (
58 struct pthread *pd)
60 list_t *entry;
61 struct pthread *result = NULL;
63 lll_lock (stack_cache_lock, LLL_PRIVATE);
65 list_for_each (entry, &stack_used)
67 struct pthread *curp;
69 curp = list_entry (entry, struct pthread, list);
70 if (curp == pd)
72 result = curp;
73 break;
77 if (result == NULL)
78 list_for_each (entry, &__stack_user)
80 struct pthread *curp;
82 curp = list_entry (entry, struct pthread, list);
83 if (curp == pd)
85 result = curp;
86 break;
90 lll_unlock (stack_cache_lock, LLL_PRIVATE);
92 return result;
96 /* Deallocate POSIX thread-local-storage. */
97 void
98 attribute_hidden
99 __nptl_deallocate_tsd (void)
101 struct pthread *self = THREAD_SELF;
103 /* Maybe no data was ever allocated. This happens often so we have
104 a flag for this. */
105 if (THREAD_GETMEM (self, specific_used))
107 size_t round;
108 size_t cnt;
110 round = 0;
113 size_t idx;
115 /* So far no new nonzero data entry. */
116 THREAD_SETMEM (self, specific_used, false);
118 for (cnt = idx = 0; cnt < PTHREAD_KEY_1STLEVEL_SIZE; ++cnt)
120 struct pthread_key_data *level2;
122 level2 = THREAD_GETMEM_NC (self, specific, cnt);
124 if (level2 != NULL)
126 size_t inner;
128 for (inner = 0; inner < PTHREAD_KEY_2NDLEVEL_SIZE;
129 ++inner, ++idx)
131 void *data = level2[inner].data;
133 if (data != NULL)
135 /* Always clear the data. */
136 level2[inner].data = NULL;
138 /* Make sure the data corresponds to a valid
139 key. This test fails if the key was
140 deallocated and also if it was
141 re-allocated. It is the user's
142 responsibility to free the memory in this
143 case. */
144 if (level2[inner].seq
145 == __pthread_keys[idx].seq
146 /* It is not necessary to register a destructor
147 function. */
148 && __pthread_keys[idx].destr != NULL)
149 /* Call the user-provided destructor. */
150 __pthread_keys[idx].destr (data);
154 else
155 idx += PTHREAD_KEY_1STLEVEL_SIZE;
158 if (THREAD_GETMEM (self, specific_used) == 0)
159 /* No data has been modified. */
160 goto just_free;
162 /* We only repeat the process a fixed number of times. */
163 while (__builtin_expect (++round < PTHREAD_DESTRUCTOR_ITERATIONS, 0));
165 /* Just clear the memory of the first block for reuse. */
166 memset (&THREAD_SELF->specific_1stblock, '\0',
167 sizeof (self->specific_1stblock));
169 just_free:
170 /* Free the memory for the other blocks. */
171 for (cnt = 1; cnt < PTHREAD_KEY_1STLEVEL_SIZE; ++cnt)
173 struct pthread_key_data *level2;
175 level2 = THREAD_GETMEM_NC (self, specific, cnt);
176 if (level2 != NULL)
178 /* The first block is allocated as part of the thread
179 descriptor. */
180 free (level2);
181 THREAD_SETMEM_NC (self, specific, cnt, NULL);
185 THREAD_SETMEM (self, specific_used, false);
190 /* Deallocate a thread's stack after optionally making sure the thread
191 descriptor is still valid. */
192 void
193 internal_function
194 __free_tcb (struct pthread *pd)
196 /* The thread is exiting now. */
197 if (__builtin_expect (atomic_bit_test_set (&pd->cancelhandling,
198 TERMINATED_BIT) == 0, 1))
200 /* Remove the descriptor from the list. */
201 if (DEBUGGING_P && __find_in_stack_list (pd) == NULL)
202 /* Something is really wrong. The descriptor for a still
203 running thread is gone. */
204 abort ();
206 /* Free TPP data. */
207 if (__builtin_expect (pd->tpp != NULL, 0))
209 struct priority_protection_data *tpp = pd->tpp;
211 pd->tpp = NULL;
212 free (tpp);
215 /* Queue the stack memory block for reuse and exit the process. The
216 kernel will signal via writing to the address returned by
217 QUEUE-STACK when the stack is available. */
218 __deallocate_stack (pd);
223 static int
224 start_thread (void *arg)
226 struct pthread *pd = (struct pthread *) arg;
228 #if HP_TIMING_AVAIL
229 /* Remember the time when the thread was started. */
230 hp_timing_t now;
231 HP_TIMING_NOW (now);
232 THREAD_SETMEM (pd, cpuclock_offset, now);
233 #endif
234 #if defined __UCLIBC_HAS_RESOLVER_SUPPORT__
235 /* Initialize resolver state pointer. */
236 __resp = &pd->res;
237 #endif
238 #ifdef __NR_set_robust_list
239 # ifndef __ASSUME_SET_ROBUST_LIST
240 if (__set_robust_list_avail >= 0)
241 # endif
243 INTERNAL_SYSCALL_DECL (err);
244 /* This call should never fail because the initial call in init.c
245 succeeded. */
246 INTERNAL_SYSCALL (set_robust_list, err, 2, &pd->robust_head,
247 sizeof (struct robust_list_head));
249 #endif
251 /* If the parent was running cancellation handlers while creating
252 the thread the new thread inherited the signal mask. Reset the
253 cancellation signal mask. */
254 if (__builtin_expect (pd->parent_cancelhandling & CANCELING_BITMASK, 0))
256 INTERNAL_SYSCALL_DECL (err);
257 sigset_t mask;
258 __sigemptyset (&mask);
259 __sigaddset (&mask, SIGCANCEL);
260 (void) INTERNAL_SYSCALL (rt_sigprocmask, err, 4, SIG_UNBLOCK, &mask,
261 NULL, _NSIG / 8);
264 /* This is where the try/finally block should be created. For
265 compilers without that support we do use setjmp. */
266 struct pthread_unwind_buf unwind_buf;
268 /* No previous handlers. */
269 unwind_buf.priv.data.prev = NULL;
270 unwind_buf.priv.data.cleanup = NULL;
272 int not_first_call;
273 not_first_call = setjmp ((struct __jmp_buf_tag *) unwind_buf.cancel_jmp_buf);
274 if (__builtin_expect (! not_first_call, 1))
276 /* Store the new cleanup handler info. */
277 THREAD_SETMEM (pd, cleanup_jmp_buf, &unwind_buf);
279 if (__builtin_expect (pd->stopped_start, 0))
281 int oldtype = CANCEL_ASYNC ();
283 /* Get the lock the parent locked to force synchronization. */
284 lll_lock (pd->lock, LLL_PRIVATE);
285 /* And give it up right away. */
286 lll_unlock (pd->lock, LLL_PRIVATE);
288 CANCEL_RESET (oldtype);
291 /* Run the code the user provided. */
292 #ifdef CALL_THREAD_FCT
293 THREAD_SETMEM (pd, result, CALL_THREAD_FCT (pd));
294 #else
295 THREAD_SETMEM (pd, result, pd->start_routine (pd->arg));
296 #endif
299 /* Run the destructor for the thread-local data. */
300 __nptl_deallocate_tsd ();
302 /* Clean up any state libc stored in thread-local variables. */
303 /* disable for now
304 __libc_thread_freeres ();
306 /* If this is the last thread we terminate the process now. We
307 do not notify the debugger, it might just irritate it if there
308 is no thread left. */
309 if (__builtin_expect (atomic_decrement_and_test (&__nptl_nthreads), 0))
310 /* This was the last thread. */
311 exit (0);
313 /* Report the death of the thread if this is wanted. */
314 if (__builtin_expect (pd->report_events, 0))
316 /* See whether TD_DEATH is in any of the mask. */
317 const int idx = __td_eventword (TD_DEATH);
318 const uint32_t mask = __td_eventmask (TD_DEATH);
320 if ((mask & (__nptl_threads_events.event_bits[idx]
321 | pd->eventbuf.eventmask.event_bits[idx])) != 0)
323 /* Yep, we have to signal the death. Add the descriptor to
324 the list but only if it is not already on it. */
325 if (pd->nextevent == NULL)
327 pd->eventbuf.eventnum = TD_DEATH;
328 pd->eventbuf.eventdata = pd;
331 pd->nextevent = __nptl_last_event;
332 while (atomic_compare_and_exchange_bool_acq (&__nptl_last_event,
333 pd, pd->nextevent));
336 /* Now call the function to signal the event. */
337 __nptl_death_event ();
341 /* The thread is exiting now. Don't set this bit until after we've hit
342 the event-reporting breakpoint, so that td_thr_get_info on us while at
343 the breakpoint reports TD_THR_RUN state rather than TD_THR_ZOMBIE. */
344 atomic_bit_set (&pd->cancelhandling, EXITING_BIT);
346 #ifndef __ASSUME_SET_ROBUST_LIST
347 /* If this thread has any robust mutexes locked, handle them now. */
348 # if __WORDSIZE == 64
349 void *robust = pd->robust_head.list;
350 # else
351 __pthread_slist_t *robust = pd->robust_list.__next;
352 # endif
353 /* We let the kernel do the notification if it is able to do so.
354 If we have to do it here there for sure are no PI mutexes involved
355 since the kernel support for them is even more recent. */
356 if (__set_robust_list_avail < 0
357 && __builtin_expect (robust != (void *) &pd->robust_head, 0))
361 struct __pthread_mutex_s *this = (struct __pthread_mutex_s *)
362 ((char *) robust - offsetof (struct __pthread_mutex_s,
363 __list.__next));
364 robust = *((void **) robust);
366 # ifdef __PTHREAD_MUTEX_HAVE_PREV
367 this->__list.__prev = NULL;
368 # endif
369 this->__list.__next = NULL;
371 lll_robust_dead (this->__lock, /* XYZ */ LLL_SHARED);
373 while (robust != (void *) &pd->robust_head);
375 #endif
377 /* Mark the memory of the stack as usable to the kernel. We free
378 everything except for the space used for the TCB itself. */
379 size_t pagesize_m1 = __getpagesize () - 1;
380 char *sp = CURRENT_STACK_FRAME;
381 #ifdef _STACK_GROWS_DOWN
382 size_t freesize = (sp - (char *) pd->stackblock) & ~pagesize_m1;
383 #else
384 size_t freesize = ((char *) pd->stackblock - sp) & ~pagesize_m1;
385 #endif
386 assert (freesize < pd->stackblock_size);
387 if (freesize > PTHREAD_STACK_MIN)
388 madvise (pd->stackblock, freesize - PTHREAD_STACK_MIN, MADV_DONTNEED);
390 /* If the thread is detached free the TCB. */
391 if (IS_DETACHED (pd))
392 /* Free the TCB. */
393 __free_tcb (pd);
394 else if (__builtin_expect (pd->cancelhandling & SETXID_BITMASK, 0))
396 /* Some other thread might call any of the setXid functions and expect
397 us to reply. In this case wait until we did that. */
399 lll_futex_wait (&pd->setxid_futex, 0, LLL_PRIVATE);
400 while (pd->cancelhandling & SETXID_BITMASK);
402 /* Reset the value so that the stack can be reused. */
403 pd->setxid_futex = 0;
406 /* We cannot call '_exit' here. '_exit' will terminate the process.
408 The 'exit' implementation in the kernel will signal when the
409 process is really dead since 'clone' got passed the CLONE_CLEARTID
410 flag. The 'tid' field in the TCB will be set to zero.
412 The exit code is zero since in case all threads exit by calling
413 'pthread_exit' the exit status must be 0 (zero). */
414 __exit_thread_inline (0);
416 /* NOTREACHED */
417 return 0;
421 /* Default thread attributes for the case when the user does not
422 provide any. */
423 static const struct pthread_attr default_attr =
425 /* Just some value > 0 which gets rounded to the nearest page size. */
426 .guardsize = 1,
431 __pthread_create_2_1 (
432 pthread_t *newthread,
433 const pthread_attr_t *attr,
434 void *(*start_routine) (void *),
435 void *arg)
437 STACK_VARIABLES;
439 const struct pthread_attr *iattr = (struct pthread_attr *) attr;
440 if (iattr == NULL)
441 /* Is this the best idea? On NUMA machines this could mean
442 accessing far-away memory. */
443 iattr = &default_attr;
445 struct pthread *pd = NULL;
446 int err = ALLOCATE_STACK (iattr, &pd);
447 if (__builtin_expect (err != 0, 0))
448 /* Something went wrong. Maybe a parameter of the attributes is
449 invalid or we could not allocate memory. */
450 return err;
453 /* Initialize the TCB. All initializations with zero should be
454 performed in 'get_cached_stack'. This way we avoid doing this if
455 the stack freshly allocated with 'mmap'. */
457 #ifdef TLS_TCB_AT_TP
458 /* Reference to the TCB itself. */
459 pd->header.self = pd;
461 /* Self-reference for TLS. */
462 pd->header.tcb = pd;
463 #endif
465 /* Store the address of the start routine and the parameter. Since
466 we do not start the function directly the stillborn thread will
467 get the information from its thread descriptor. */
468 pd->start_routine = start_routine;
469 pd->arg = arg;
471 /* Copy the thread attribute flags. */
472 struct pthread *self = THREAD_SELF;
473 pd->flags = ((iattr->flags & ~(ATTR_FLAG_SCHED_SET | ATTR_FLAG_POLICY_SET))
474 | (self->flags & (ATTR_FLAG_SCHED_SET | ATTR_FLAG_POLICY_SET)));
476 /* Initialize the field for the ID of the thread which is waiting
477 for us. This is a self-reference in case the thread is created
478 detached. */
479 pd->joinid = iattr->flags & ATTR_FLAG_DETACHSTATE ? pd : NULL;
481 /* The debug events are inherited from the parent. */
482 pd->eventbuf = self->eventbuf;
485 /* Copy the parent's scheduling parameters. The flags will say what
486 is valid and what is not. */
487 pd->schedpolicy = self->schedpolicy;
488 pd->schedparam = self->schedparam;
490 /* Copy the stack guard canary. */
491 #ifdef THREAD_COPY_STACK_GUARD
492 THREAD_COPY_STACK_GUARD (pd);
493 #endif
495 /* Copy the pointer guard value. */
496 #ifdef THREAD_COPY_POINTER_GUARD
497 THREAD_COPY_POINTER_GUARD (pd);
498 #endif
500 /* Determine scheduling parameters for the thread. */
501 if (attr != NULL
502 && __builtin_expect ((iattr->flags & ATTR_FLAG_NOTINHERITSCHED) != 0, 0)
503 && (iattr->flags & (ATTR_FLAG_SCHED_SET | ATTR_FLAG_POLICY_SET)) != 0)
505 INTERNAL_SYSCALL_DECL (scerr);
507 /* Use the scheduling parameters the user provided. */
508 if (iattr->flags & ATTR_FLAG_POLICY_SET)
509 pd->schedpolicy = iattr->schedpolicy;
510 else if ((pd->flags & ATTR_FLAG_POLICY_SET) == 0)
512 pd->schedpolicy = INTERNAL_SYSCALL (sched_getscheduler, scerr, 1, 0);
513 pd->flags |= ATTR_FLAG_POLICY_SET;
516 if (iattr->flags & ATTR_FLAG_SCHED_SET)
517 memcpy (&pd->schedparam, &iattr->schedparam,
518 sizeof (struct sched_param));
519 else if ((pd->flags & ATTR_FLAG_SCHED_SET) == 0)
521 INTERNAL_SYSCALL (sched_getparam, scerr, 2, 0, &pd->schedparam);
522 pd->flags |= ATTR_FLAG_SCHED_SET;
525 /* Check for valid priorities. */
526 int minprio = INTERNAL_SYSCALL (sched_get_priority_min, scerr, 1,
527 iattr->schedpolicy);
528 int maxprio = INTERNAL_SYSCALL (sched_get_priority_max, scerr, 1,
529 iattr->schedpolicy);
530 if (pd->schedparam.sched_priority < minprio
531 || pd->schedparam.sched_priority > maxprio)
533 err = EINVAL;
534 goto errout;
538 /* Pass the descriptor to the caller. */
539 *newthread = (pthread_t) pd;
541 /* Remember whether the thread is detached or not. In case of an
542 error we have to free the stacks of non-detached stillborn
543 threads. */
544 bool is_detached = IS_DETACHED (pd);
546 /* Start the thread. */
547 err = create_thread (pd, iattr, STACK_VARIABLES_ARGS);
548 if (err != 0)
550 /* Something went wrong. Free the resources. */
551 if (!is_detached)
553 errout:
554 __deallocate_stack (pd);
556 return err;
559 return 0;
561 weak_alias(__pthread_create_2_1, pthread_create)
563 /* Information for libthread_db. */
565 #include "../nptl_db/db_info.c"
567 /* If pthread_create is present, libgcc_eh.a and libsupc++.a expects some other POSIX thread
568 functions to be present as well. */
569 PTHREAD_STATIC_FN_REQUIRE (pthread_mutex_lock)
570 PTHREAD_STATIC_FN_REQUIRE (pthread_mutex_trylock)
571 PTHREAD_STATIC_FN_REQUIRE (pthread_mutex_unlock)
573 PTHREAD_STATIC_FN_REQUIRE (pthread_once)
574 PTHREAD_STATIC_FN_REQUIRE (pthread_cancel)
576 PTHREAD_STATIC_FN_REQUIRE (pthread_key_create)
577 PTHREAD_STATIC_FN_REQUIRE (pthread_key_delete)
578 PTHREAD_STATIC_FN_REQUIRE (pthread_setspecific)
579 PTHREAD_STATIC_FN_REQUIRE (pthread_getspecific)
581 /* UCLIBC_MUTEX_xxx macros expects to have these as well */
582 PTHREAD_STATIC_FN_REQUIRE (pthread_mutex_init)
583 PTHREAD_STATIC_FN_REQUIRE (_pthread_cleanup_push_defer)
584 PTHREAD_STATIC_FN_REQUIRE (_pthread_cleanup_pop_restore)