Remove ENABLE_SSSE3_ON_ATOM.
[glibc.git] / nptl / pthread_create.c
blob89938b3fb8c052fefe3cedd117bdaecdad059b67
1 /* Copyright (C) 2002-2007,2008,2009 Free Software Foundation, Inc.
2 This file is part of the GNU C Library.
3 Contributed by Ulrich Drepper <drepper@redhat.com>, 2002.
5 The GNU C Library is free software; you can redistribute it and/or
6 modify it under the terms of the GNU Lesser General Public
7 License as published by the Free Software Foundation; either
8 version 2.1 of the License, or (at your option) any later version.
10 The GNU C Library is distributed in the hope that it will be useful,
11 but WITHOUT ANY WARRANTY; without even the implied warranty of
12 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 Lesser General Public License for more details.
15 You should have received a copy of the GNU Lesser General Public
16 License along with the GNU C Library; if not, write to the Free
17 Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
18 02111-1307 USA. */
20 #include <errno.h>
21 #include <stdbool.h>
22 #include <stdlib.h>
23 #include <string.h>
24 #include "pthreadP.h"
25 #include <hp-timing.h>
26 #include <ldsodefs.h>
27 #include <atomic.h>
28 #include <libc-internal.h>
29 #include <resolv.h>
30 #include <kernel-features.h>
32 #include <shlib-compat.h>
35 /* Local function to start thread and handle cleanup. */
36 static int start_thread (void *arg);
39 /* Nozero if debugging mode is enabled. */
40 int __pthread_debug;
42 /* Globally enabled events. */
43 static td_thr_events_t __nptl_threads_events __attribute_used__;
45 /* Pointer to descriptor with the last event. */
46 static struct pthread *__nptl_last_event __attribute_used__;
48 /* Number of threads running. */
49 unsigned int __nptl_nthreads = 1;
52 /* Code to allocate and deallocate a stack. */
53 #include "allocatestack.c"
55 /* Code to create the thread. */
56 #include <createthread.c>
59 struct pthread *
60 internal_function
61 __find_in_stack_list (pd)
62 struct pthread *pd;
64 list_t *entry;
65 struct pthread *result = NULL;
67 lll_lock (stack_cache_lock, LLL_PRIVATE);
69 list_for_each (entry, &stack_used)
71 struct pthread *curp;
73 curp = list_entry (entry, struct pthread, list);
74 if (curp == pd)
76 result = curp;
77 break;
81 if (result == NULL)
82 list_for_each (entry, &__stack_user)
84 struct pthread *curp;
86 curp = list_entry (entry, struct pthread, list);
87 if (curp == pd)
89 result = curp;
90 break;
94 lll_unlock (stack_cache_lock, LLL_PRIVATE);
96 return result;
100 /* Deallocate POSIX thread-local-storage. */
101 void
102 attribute_hidden
103 __nptl_deallocate_tsd (void)
105 struct pthread *self = THREAD_SELF;
107 /* Maybe no data was ever allocated. This happens often so we have
108 a flag for this. */
109 if (THREAD_GETMEM (self, specific_used))
111 size_t round;
112 size_t cnt;
114 round = 0;
117 size_t idx;
119 /* So far no new nonzero data entry. */
120 THREAD_SETMEM (self, specific_used, false);
122 for (cnt = idx = 0; cnt < PTHREAD_KEY_1STLEVEL_SIZE; ++cnt)
124 struct pthread_key_data *level2;
126 level2 = THREAD_GETMEM_NC (self, specific, cnt);
128 if (level2 != NULL)
130 size_t inner;
132 for (inner = 0; inner < PTHREAD_KEY_2NDLEVEL_SIZE;
133 ++inner, ++idx)
135 void *data = level2[inner].data;
137 if (data != NULL)
139 /* Always clear the data. */
140 level2[inner].data = NULL;
142 /* Make sure the data corresponds to a valid
143 key. This test fails if the key was
144 deallocated and also if it was
145 re-allocated. It is the user's
146 responsibility to free the memory in this
147 case. */
148 if (level2[inner].seq
149 == __pthread_keys[idx].seq
150 /* It is not necessary to register a destructor
151 function. */
152 && __pthread_keys[idx].destr != NULL)
153 /* Call the user-provided destructor. */
154 __pthread_keys[idx].destr (data);
158 else
159 idx += PTHREAD_KEY_1STLEVEL_SIZE;
162 if (THREAD_GETMEM (self, specific_used) == 0)
163 /* No data has been modified. */
164 goto just_free;
166 /* We only repeat the process a fixed number of times. */
167 while (__builtin_expect (++round < PTHREAD_DESTRUCTOR_ITERATIONS, 0));
169 /* Just clear the memory of the first block for reuse. */
170 memset (&THREAD_SELF->specific_1stblock, '\0',
171 sizeof (self->specific_1stblock));
173 just_free:
174 /* Free the memory for the other blocks. */
175 for (cnt = 1; cnt < PTHREAD_KEY_1STLEVEL_SIZE; ++cnt)
177 struct pthread_key_data *level2;
179 level2 = THREAD_GETMEM_NC (self, specific, cnt);
180 if (level2 != NULL)
182 /* The first block is allocated as part of the thread
183 descriptor. */
184 free (level2);
185 THREAD_SETMEM_NC (self, specific, cnt, NULL);
189 THREAD_SETMEM (self, specific_used, false);
194 /* Deallocate a thread's stack after optionally making sure the thread
195 descriptor is still valid. */
196 void
197 internal_function
198 __free_tcb (struct pthread *pd)
200 /* The thread is exiting now. */
201 if (__builtin_expect (atomic_bit_test_set (&pd->cancelhandling,
202 TERMINATED_BIT) == 0, 1))
204 /* Remove the descriptor from the list. */
205 if (DEBUGGING_P && __find_in_stack_list (pd) == NULL)
206 /* Something is really wrong. The descriptor for a still
207 running thread is gone. */
208 abort ();
210 /* Free TPP data. */
211 if (__builtin_expect (pd->tpp != NULL, 0))
213 struct priority_protection_data *tpp = pd->tpp;
215 pd->tpp = NULL;
216 free (tpp);
219 /* Queue the stack memory block for reuse and exit the process. The
220 kernel will signal via writing to the address returned by
221 QUEUE-STACK when the stack is available. */
222 __deallocate_stack (pd);
227 static int
228 start_thread (void *arg)
230 struct pthread *pd = (struct pthread *) arg;
232 #if HP_TIMING_AVAIL
233 /* Remember the time when the thread was started. */
234 hp_timing_t now;
235 HP_TIMING_NOW (now);
236 THREAD_SETMEM (pd, cpuclock_offset, now);
237 #endif
239 /* Initialize resolver state pointer. */
240 __resp = &pd->res;
242 #ifdef __NR_set_robust_list
243 # ifndef __ASSUME_SET_ROBUST_LIST
244 if (__set_robust_list_avail >= 0)
245 # endif
247 INTERNAL_SYSCALL_DECL (err);
248 /* This call should never fail because the initial call in init.c
249 succeeded. */
250 INTERNAL_SYSCALL (set_robust_list, err, 2, &pd->robust_head,
251 sizeof (struct robust_list_head));
253 #endif
255 /* If the parent was running cancellation handlers while creating
256 the thread the new thread inherited the signal mask. Reset the
257 cancellation signal mask. */
258 if (__builtin_expect (pd->parent_cancelhandling & CANCELING_BITMASK, 0))
260 INTERNAL_SYSCALL_DECL (err);
261 sigset_t mask;
262 __sigemptyset (&mask);
263 __sigaddset (&mask, SIGCANCEL);
264 (void) INTERNAL_SYSCALL (rt_sigprocmask, err, 4, SIG_UNBLOCK, &mask,
265 NULL, _NSIG / 8);
268 /* This is where the try/finally block should be created. For
269 compilers without that support we do use setjmp. */
270 struct pthread_unwind_buf unwind_buf;
272 /* No previous handlers. */
273 unwind_buf.priv.data.prev = NULL;
274 unwind_buf.priv.data.cleanup = NULL;
276 int not_first_call;
277 not_first_call = setjmp ((struct __jmp_buf_tag *) unwind_buf.cancel_jmp_buf);
278 if (__builtin_expect (! not_first_call, 1))
280 /* Store the new cleanup handler info. */
281 THREAD_SETMEM (pd, cleanup_jmp_buf, &unwind_buf);
283 if (__builtin_expect (pd->stopped_start, 0))
285 int oldtype = CANCEL_ASYNC ();
287 /* Get the lock the parent locked to force synchronization. */
288 lll_lock (pd->lock, LLL_PRIVATE);
289 /* And give it up right away. */
290 lll_unlock (pd->lock, LLL_PRIVATE);
292 CANCEL_RESET (oldtype);
295 /* Run the code the user provided. */
296 #ifdef CALL_THREAD_FCT
297 THREAD_SETMEM (pd, result, CALL_THREAD_FCT (pd));
298 #else
299 THREAD_SETMEM (pd, result, pd->start_routine (pd->arg));
300 #endif
303 /* Run the destructor for the thread-local data. */
304 __nptl_deallocate_tsd ();
306 /* Clean up any state libc stored in thread-local variables. */
307 __libc_thread_freeres ();
309 /* If this is the last thread we terminate the process now. We
310 do not notify the debugger, it might just irritate it if there
311 is no thread left. */
312 if (__builtin_expect (atomic_decrement_and_test (&__nptl_nthreads), 0))
313 /* This was the last thread. */
314 exit (0);
316 /* Report the death of the thread if this is wanted. */
317 if (__builtin_expect (pd->report_events, 0))
319 /* See whether TD_DEATH is in any of the mask. */
320 const int idx = __td_eventword (TD_DEATH);
321 const uint32_t mask = __td_eventmask (TD_DEATH);
323 if ((mask & (__nptl_threads_events.event_bits[idx]
324 | pd->eventbuf.eventmask.event_bits[idx])) != 0)
326 /* Yep, we have to signal the death. Add the descriptor to
327 the list but only if it is not already on it. */
328 if (pd->nextevent == NULL)
330 pd->eventbuf.eventnum = TD_DEATH;
331 pd->eventbuf.eventdata = pd;
334 pd->nextevent = __nptl_last_event;
335 while (atomic_compare_and_exchange_bool_acq (&__nptl_last_event,
336 pd, pd->nextevent));
339 /* Now call the function to signal the event. */
340 __nptl_death_event ();
344 /* The thread is exiting now. Don't set this bit until after we've hit
345 the event-reporting breakpoint, so that td_thr_get_info on us while at
346 the breakpoint reports TD_THR_RUN state rather than TD_THR_ZOMBIE. */
347 atomic_bit_set (&pd->cancelhandling, EXITING_BIT);
349 #ifndef __ASSUME_SET_ROBUST_LIST
350 /* If this thread has any robust mutexes locked, handle them now. */
351 # if __WORDSIZE == 64
352 void *robust = pd->robust_head.list;
353 # else
354 __pthread_slist_t *robust = pd->robust_list.__next;
355 # endif
356 /* We let the kernel do the notification if it is able to do so.
357 If we have to do it here there for sure are no PI mutexes involved
358 since the kernel support for them is even more recent. */
359 if (__set_robust_list_avail < 0
360 && __builtin_expect (robust != (void *) &pd->robust_head, 0))
364 struct __pthread_mutex_s *this = (struct __pthread_mutex_s *)
365 ((char *) robust - offsetof (struct __pthread_mutex_s,
366 __list.__next));
367 robust = *((void **) robust);
369 # ifdef __PTHREAD_MUTEX_HAVE_PREV
370 this->__list.__prev = NULL;
371 # endif
372 this->__list.__next = NULL;
374 lll_robust_dead (this->__lock, /* XYZ */ LLL_SHARED);
376 while (robust != (void *) &pd->robust_head);
378 #endif
380 /* Mark the memory of the stack as usable to the kernel. We free
381 everything except for the space used for the TCB itself. */
382 size_t pagesize_m1 = __getpagesize () - 1;
383 #ifdef _STACK_GROWS_DOWN
384 char *sp = CURRENT_STACK_FRAME;
385 size_t freesize = (sp - (char *) pd->stackblock) & ~pagesize_m1;
386 #else
387 # error "to do"
388 #endif
389 assert (freesize < pd->stackblock_size);
390 if (freesize > PTHREAD_STACK_MIN)
391 madvise (pd->stackblock, freesize - PTHREAD_STACK_MIN, MADV_DONTNEED);
393 /* If the thread is detached free the TCB. */
394 if (IS_DETACHED (pd))
395 /* Free the TCB. */
396 __free_tcb (pd);
397 else if (__builtin_expect (pd->cancelhandling & SETXID_BITMASK, 0))
399 /* Some other thread might call any of the setXid functions and expect
400 us to reply. In this case wait until we did that. */
402 lll_futex_wait (&pd->setxid_futex, 0, LLL_PRIVATE);
403 while (pd->cancelhandling & SETXID_BITMASK);
405 /* Reset the value so that the stack can be reused. */
406 pd->setxid_futex = 0;
409 /* We cannot call '_exit' here. '_exit' will terminate the process.
411 The 'exit' implementation in the kernel will signal when the
412 process is really dead since 'clone' got passed the CLONE_CLEARTID
413 flag. The 'tid' field in the TCB will be set to zero.
415 The exit code is zero since in case all threads exit by calling
416 'pthread_exit' the exit status must be 0 (zero). */
417 __exit_thread_inline (0);
419 /* NOTREACHED */
420 return 0;
424 /* Default thread attributes for the case when the user does not
425 provide any. */
426 static const struct pthread_attr default_attr =
428 /* Just some value > 0 which gets rounded to the nearest page size. */
429 .guardsize = 1,
434 __pthread_create_2_1 (newthread, attr, start_routine, arg)
435 pthread_t *newthread;
436 const pthread_attr_t *attr;
437 void *(*start_routine) (void *);
438 void *arg;
440 STACK_VARIABLES;
442 const struct pthread_attr *iattr = (struct pthread_attr *) attr;
443 if (iattr == NULL)
444 /* Is this the best idea? On NUMA machines this could mean
445 accessing far-away memory. */
446 iattr = &default_attr;
448 struct pthread *pd = NULL;
449 int err = ALLOCATE_STACK (iattr, &pd);
450 if (__builtin_expect (err != 0, 0))
451 /* Something went wrong. Maybe a parameter of the attributes is
452 invalid or we could not allocate memory. */
453 return err;
456 /* Initialize the TCB. All initializations with zero should be
457 performed in 'get_cached_stack'. This way we avoid doing this if
458 the stack freshly allocated with 'mmap'. */
460 #ifdef TLS_TCB_AT_TP
461 /* Reference to the TCB itself. */
462 pd->header.self = pd;
464 /* Self-reference for TLS. */
465 pd->header.tcb = pd;
466 #endif
468 /* Store the address of the start routine and the parameter. Since
469 we do not start the function directly the stillborn thread will
470 get the information from its thread descriptor. */
471 pd->start_routine = start_routine;
472 pd->arg = arg;
474 /* Copy the thread attribute flags. */
475 struct pthread *self = THREAD_SELF;
476 pd->flags = ((iattr->flags & ~(ATTR_FLAG_SCHED_SET | ATTR_FLAG_POLICY_SET))
477 | (self->flags & (ATTR_FLAG_SCHED_SET | ATTR_FLAG_POLICY_SET)));
479 /* Initialize the field for the ID of the thread which is waiting
480 for us. This is a self-reference in case the thread is created
481 detached. */
482 pd->joinid = iattr->flags & ATTR_FLAG_DETACHSTATE ? pd : NULL;
484 /* The debug events are inherited from the parent. */
485 pd->eventbuf = self->eventbuf;
488 /* Copy the parent's scheduling parameters. The flags will say what
489 is valid and what is not. */
490 pd->schedpolicy = self->schedpolicy;
491 pd->schedparam = self->schedparam;
493 /* Copy the stack guard canary. */
494 #ifdef THREAD_COPY_STACK_GUARD
495 THREAD_COPY_STACK_GUARD (pd);
496 #endif
498 /* Copy the pointer guard value. */
499 #ifdef THREAD_COPY_POINTER_GUARD
500 THREAD_COPY_POINTER_GUARD (pd);
501 #endif
503 /* Determine scheduling parameters for the thread. */
504 if (attr != NULL
505 && __builtin_expect ((iattr->flags & ATTR_FLAG_NOTINHERITSCHED) != 0, 0)
506 && (iattr->flags & (ATTR_FLAG_SCHED_SET | ATTR_FLAG_POLICY_SET)) != 0)
508 INTERNAL_SYSCALL_DECL (scerr);
510 /* Use the scheduling parameters the user provided. */
511 if (iattr->flags & ATTR_FLAG_POLICY_SET)
512 pd->schedpolicy = iattr->schedpolicy;
513 else if ((pd->flags & ATTR_FLAG_POLICY_SET) == 0)
515 pd->schedpolicy = INTERNAL_SYSCALL (sched_getscheduler, scerr, 1, 0);
516 pd->flags |= ATTR_FLAG_POLICY_SET;
519 if (iattr->flags & ATTR_FLAG_SCHED_SET)
520 memcpy (&pd->schedparam, &iattr->schedparam,
521 sizeof (struct sched_param));
522 else if ((pd->flags & ATTR_FLAG_SCHED_SET) == 0)
524 INTERNAL_SYSCALL (sched_getparam, scerr, 2, 0, &pd->schedparam);
525 pd->flags |= ATTR_FLAG_SCHED_SET;
528 /* Check for valid priorities. */
529 int minprio = INTERNAL_SYSCALL (sched_get_priority_min, scerr, 1,
530 iattr->schedpolicy);
531 int maxprio = INTERNAL_SYSCALL (sched_get_priority_max, scerr, 1,
532 iattr->schedpolicy);
533 if (pd->schedparam.sched_priority < minprio
534 || pd->schedparam.sched_priority > maxprio)
536 err = EINVAL;
537 goto errout;
541 /* Pass the descriptor to the caller. */
542 *newthread = (pthread_t) pd;
544 /* Remember whether the thread is detached or not. In case of an
545 error we have to free the stacks of non-detached stillborn
546 threads. */
547 bool is_detached = IS_DETACHED (pd);
549 /* Start the thread. */
550 err = create_thread (pd, iattr, STACK_VARIABLES_ARGS);
551 if (err != 0)
553 /* Something went wrong. Free the resources. */
554 if (!is_detached)
556 errout:
557 __deallocate_stack (pd);
559 return err;
562 return 0;
564 versioned_symbol (libpthread, __pthread_create_2_1, pthread_create, GLIBC_2_1);
567 #if SHLIB_COMPAT(libpthread, GLIBC_2_0, GLIBC_2_1)
569 __pthread_create_2_0 (newthread, attr, start_routine, arg)
570 pthread_t *newthread;
571 const pthread_attr_t *attr;
572 void *(*start_routine) (void *);
573 void *arg;
575 /* The ATTR attribute is not really of type `pthread_attr_t *'. It has
576 the old size and access to the new members might crash the program.
577 We convert the struct now. */
578 struct pthread_attr new_attr;
580 if (attr != NULL)
582 struct pthread_attr *iattr = (struct pthread_attr *) attr;
583 size_t ps = __getpagesize ();
585 /* Copy values from the user-provided attributes. */
586 new_attr.schedparam = iattr->schedparam;
587 new_attr.schedpolicy = iattr->schedpolicy;
588 new_attr.flags = iattr->flags;
590 /* Fill in default values for the fields not present in the old
591 implementation. */
592 new_attr.guardsize = ps;
593 new_attr.stackaddr = NULL;
594 new_attr.stacksize = 0;
595 new_attr.cpuset = NULL;
597 /* We will pass this value on to the real implementation. */
598 attr = (pthread_attr_t *) &new_attr;
601 return __pthread_create_2_1 (newthread, attr, start_routine, arg);
603 compat_symbol (libpthread, __pthread_create_2_0, pthread_create,
604 GLIBC_2_0);
605 #endif
607 /* Information for libthread_db. */
609 #include "../nptl_db/db_info.c"
611 /* If pthread_create is present, libgcc_eh.a and libsupc++.a expects some other POSIX thread
612 functions to be present as well. */
613 PTHREAD_STATIC_FN_REQUIRE (pthread_mutex_lock)
614 PTHREAD_STATIC_FN_REQUIRE (pthread_mutex_trylock)
615 PTHREAD_STATIC_FN_REQUIRE (pthread_mutex_unlock)
617 PTHREAD_STATIC_FN_REQUIRE (pthread_once)
618 PTHREAD_STATIC_FN_REQUIRE (pthread_cancel)
620 PTHREAD_STATIC_FN_REQUIRE (pthread_key_create)
621 PTHREAD_STATIC_FN_REQUIRE (pthread_key_delete)
622 PTHREAD_STATIC_FN_REQUIRE (pthread_setspecific)
623 PTHREAD_STATIC_FN_REQUIRE (pthread_getspecific)