S390: Ifunc resolver macro for vector instructions.
[glibc.git] / nptl / pthread_create.c
blobd10f4ea8004e1d8f3a268b95cc0f8d93b8d89867
1 /* Copyright (C) 2002-2015 Free Software Foundation, Inc.
2 This file is part of the GNU C Library.
3 Contributed by Ulrich Drepper <drepper@redhat.com>, 2002.
5 The GNU C Library is free software; you can redistribute it and/or
6 modify it under the terms of the GNU Lesser General Public
7 License as published by the Free Software Foundation; either
8 version 2.1 of the License, or (at your option) any later version.
10 The GNU C Library is distributed in the hope that it will be useful,
11 but WITHOUT ANY WARRANTY; without even the implied warranty of
12 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 Lesser General Public License for more details.
15 You should have received a copy of the GNU Lesser General Public
16 License along with the GNU C Library; if not, see
17 <http://www.gnu.org/licenses/>. */
19 #include <ctype.h>
20 #include <errno.h>
21 #include <stdbool.h>
22 #include <stdlib.h>
23 #include <string.h>
24 #include <stdint.h>
25 #include "pthreadP.h"
26 #include <hp-timing.h>
27 #include <ldsodefs.h>
28 #include <atomic.h>
29 #include <libc-internal.h>
30 #include <resolv.h>
31 #include <kernel-features.h>
32 #include <exit-thread.h>
33 #include <default-sched.h>
34 #include <futex-internal.h>
36 #include <shlib-compat.h>
38 #include <stap-probe.h>
41 /* Nozero if debugging mode is enabled. */
42 int __pthread_debug;
44 /* Globally enabled events. */
45 static td_thr_events_t __nptl_threads_events __attribute_used__;
47 /* Pointer to descriptor with the last event. */
48 static struct pthread *__nptl_last_event __attribute_used__;
50 /* Number of threads running. */
51 unsigned int __nptl_nthreads = 1;
54 /* Code to allocate and deallocate a stack. */
55 #include "allocatestack.c"
57 /* createthread.c defines this function, and two macros:
58 START_THREAD_DEFN and START_THREAD_SELF (see below).
60 create_thread is obliged to initialize PD->stopped_start. It
61 should be true if the STOPPED_START parameter is true, or if
62 create_thread needs the new thread to synchronize at startup for
63 some other implementation reason. If PD->stopped_start will be
64 true, then create_thread is obliged to perform the operation
65 "lll_lock (PD->lock, LLL_PRIVATE)" before starting the thread.
67 The return value is zero for success or an errno code for failure.
68 If the return value is ENOMEM, that will be translated to EAGAIN,
69 so create_thread need not do that. On failure, *THREAD_RAN should
70 be set to true iff the thread actually started up and then got
71 cancelled before calling user code (*PD->start_routine), in which
72 case it is responsible for doing its own cleanup. */
74 static int create_thread (struct pthread *pd, const struct pthread_attr *attr,
75 bool stopped_start, STACK_VARIABLES_PARMS,
76 bool *thread_ran);
78 #include <createthread.c>
81 struct pthread *
82 internal_function
83 __find_in_stack_list (pd)
84 struct pthread *pd;
86 list_t *entry;
87 struct pthread *result = NULL;
89 lll_lock (stack_cache_lock, LLL_PRIVATE);
91 list_for_each (entry, &stack_used)
93 struct pthread *curp;
95 curp = list_entry (entry, struct pthread, list);
96 if (curp == pd)
98 result = curp;
99 break;
103 if (result == NULL)
104 list_for_each (entry, &__stack_user)
106 struct pthread *curp;
108 curp = list_entry (entry, struct pthread, list);
109 if (curp == pd)
111 result = curp;
112 break;
116 lll_unlock (stack_cache_lock, LLL_PRIVATE);
118 return result;
122 /* Deallocate POSIX thread-local-storage. */
123 void
124 attribute_hidden
125 __nptl_deallocate_tsd (void)
127 struct pthread *self = THREAD_SELF;
129 /* Maybe no data was ever allocated. This happens often so we have
130 a flag for this. */
131 if (THREAD_GETMEM (self, specific_used))
133 size_t round;
134 size_t cnt;
136 round = 0;
139 size_t idx;
141 /* So far no new nonzero data entry. */
142 THREAD_SETMEM (self, specific_used, false);
144 for (cnt = idx = 0; cnt < PTHREAD_KEY_1STLEVEL_SIZE; ++cnt)
146 struct pthread_key_data *level2;
148 level2 = THREAD_GETMEM_NC (self, specific, cnt);
150 if (level2 != NULL)
152 size_t inner;
154 for (inner = 0; inner < PTHREAD_KEY_2NDLEVEL_SIZE;
155 ++inner, ++idx)
157 void *data = level2[inner].data;
159 if (data != NULL)
161 /* Always clear the data. */
162 level2[inner].data = NULL;
164 /* Make sure the data corresponds to a valid
165 key. This test fails if the key was
166 deallocated and also if it was
167 re-allocated. It is the user's
168 responsibility to free the memory in this
169 case. */
170 if (level2[inner].seq
171 == __pthread_keys[idx].seq
172 /* It is not necessary to register a destructor
173 function. */
174 && __pthread_keys[idx].destr != NULL)
175 /* Call the user-provided destructor. */
176 __pthread_keys[idx].destr (data);
180 else
181 idx += PTHREAD_KEY_1STLEVEL_SIZE;
184 if (THREAD_GETMEM (self, specific_used) == 0)
185 /* No data has been modified. */
186 goto just_free;
188 /* We only repeat the process a fixed number of times. */
189 while (__builtin_expect (++round < PTHREAD_DESTRUCTOR_ITERATIONS, 0));
191 /* Just clear the memory of the first block for reuse. */
192 memset (&THREAD_SELF->specific_1stblock, '\0',
193 sizeof (self->specific_1stblock));
195 just_free:
196 /* Free the memory for the other blocks. */
197 for (cnt = 1; cnt < PTHREAD_KEY_1STLEVEL_SIZE; ++cnt)
199 struct pthread_key_data *level2;
201 level2 = THREAD_GETMEM_NC (self, specific, cnt);
202 if (level2 != NULL)
204 /* The first block is allocated as part of the thread
205 descriptor. */
206 free (level2);
207 THREAD_SETMEM_NC (self, specific, cnt, NULL);
211 THREAD_SETMEM (self, specific_used, false);
216 /* Deallocate a thread's stack after optionally making sure the thread
217 descriptor is still valid. */
218 void
219 internal_function
220 __free_tcb (struct pthread *pd)
222 /* The thread is exiting now. */
223 if (__builtin_expect (atomic_bit_test_set (&pd->cancelhandling,
224 TERMINATED_BIT) == 0, 1))
226 /* Remove the descriptor from the list. */
227 if (DEBUGGING_P && __find_in_stack_list (pd) == NULL)
228 /* Something is really wrong. The descriptor for a still
229 running thread is gone. */
230 abort ();
232 /* Free TPP data. */
233 if (__glibc_unlikely (pd->tpp != NULL))
235 struct priority_protection_data *tpp = pd->tpp;
237 pd->tpp = NULL;
238 free (tpp);
241 /* Queue the stack memory block for reuse and exit the process. The
242 kernel will signal via writing to the address returned by
243 QUEUE-STACK when the stack is available. */
244 __deallocate_stack (pd);
249 /* Local function to start thread and handle cleanup.
250 createthread.c defines the macro START_THREAD_DEFN to the
251 declaration that its create_thread function will refer to, and
252 START_THREAD_SELF to the expression to optimally deliver the new
253 thread's THREAD_SELF value. */
254 START_THREAD_DEFN
256 struct pthread *pd = START_THREAD_SELF;
258 #if HP_TIMING_AVAIL
259 /* Remember the time when the thread was started. */
260 hp_timing_t now;
261 HP_TIMING_NOW (now);
262 THREAD_SETMEM (pd, cpuclock_offset, now);
263 #endif
265 /* Initialize resolver state pointer. */
266 __resp = &pd->res;
268 /* Initialize pointers to locale data. */
269 __ctype_init ();
271 /* Allow setxid from now onwards. */
272 if (__glibc_unlikely (atomic_exchange_acq (&pd->setxid_futex, 0) == -2))
273 futex_wake (&pd->setxid_futex, 1, FUTEX_PRIVATE);
275 #ifdef __NR_set_robust_list
276 # ifndef __ASSUME_SET_ROBUST_LIST
277 if (__set_robust_list_avail >= 0)
278 # endif
280 INTERNAL_SYSCALL_DECL (err);
281 /* This call should never fail because the initial call in init.c
282 succeeded. */
283 INTERNAL_SYSCALL (set_robust_list, err, 2, &pd->robust_head,
284 sizeof (struct robust_list_head));
286 #endif
288 #ifdef SIGCANCEL
289 /* If the parent was running cancellation handlers while creating
290 the thread the new thread inherited the signal mask. Reset the
291 cancellation signal mask. */
292 if (__glibc_unlikely (pd->parent_cancelhandling & CANCELING_BITMASK))
294 INTERNAL_SYSCALL_DECL (err);
295 sigset_t mask;
296 __sigemptyset (&mask);
297 __sigaddset (&mask, SIGCANCEL);
298 (void) INTERNAL_SYSCALL (rt_sigprocmask, err, 4, SIG_UNBLOCK, &mask,
299 NULL, _NSIG / 8);
301 #endif
303 /* This is where the try/finally block should be created. For
304 compilers without that support we do use setjmp. */
305 struct pthread_unwind_buf unwind_buf;
307 /* No previous handlers. */
308 unwind_buf.priv.data.prev = NULL;
309 unwind_buf.priv.data.cleanup = NULL;
311 int not_first_call;
312 not_first_call = setjmp ((struct __jmp_buf_tag *) unwind_buf.cancel_jmp_buf);
313 if (__glibc_likely (! not_first_call))
315 /* Store the new cleanup handler info. */
316 THREAD_SETMEM (pd, cleanup_jmp_buf, &unwind_buf);
318 if (__glibc_unlikely (pd->stopped_start))
320 int oldtype = CANCEL_ASYNC ();
322 /* Get the lock the parent locked to force synchronization. */
323 lll_lock (pd->lock, LLL_PRIVATE);
324 /* And give it up right away. */
325 lll_unlock (pd->lock, LLL_PRIVATE);
327 CANCEL_RESET (oldtype);
330 LIBC_PROBE (pthread_start, 3, (pthread_t) pd, pd->start_routine, pd->arg);
332 /* Run the code the user provided. */
333 #ifdef CALL_THREAD_FCT
334 THREAD_SETMEM (pd, result, CALL_THREAD_FCT (pd));
335 #else
336 THREAD_SETMEM (pd, result, pd->start_routine (pd->arg));
337 #endif
340 /* Call destructors for the thread_local TLS variables. */
341 #ifndef SHARED
342 if (&__call_tls_dtors != NULL)
343 #endif
344 __call_tls_dtors ();
346 /* Run the destructor for the thread-local data. */
347 __nptl_deallocate_tsd ();
349 /* Clean up any state libc stored in thread-local variables. */
350 __libc_thread_freeres ();
352 /* If this is the last thread we terminate the process now. We
353 do not notify the debugger, it might just irritate it if there
354 is no thread left. */
355 if (__glibc_unlikely (atomic_decrement_and_test (&__nptl_nthreads)))
356 /* This was the last thread. */
357 exit (0);
359 /* Report the death of the thread if this is wanted. */
360 if (__glibc_unlikely (pd->report_events))
362 /* See whether TD_DEATH is in any of the mask. */
363 const int idx = __td_eventword (TD_DEATH);
364 const uint32_t mask = __td_eventmask (TD_DEATH);
366 if ((mask & (__nptl_threads_events.event_bits[idx]
367 | pd->eventbuf.eventmask.event_bits[idx])) != 0)
369 /* Yep, we have to signal the death. Add the descriptor to
370 the list but only if it is not already on it. */
371 if (pd->nextevent == NULL)
373 pd->eventbuf.eventnum = TD_DEATH;
374 pd->eventbuf.eventdata = pd;
377 pd->nextevent = __nptl_last_event;
378 while (atomic_compare_and_exchange_bool_acq (&__nptl_last_event,
379 pd, pd->nextevent));
382 /* Now call the function to signal the event. */
383 __nptl_death_event ();
387 /* The thread is exiting now. Don't set this bit until after we've hit
388 the event-reporting breakpoint, so that td_thr_get_info on us while at
389 the breakpoint reports TD_THR_RUN state rather than TD_THR_ZOMBIE. */
390 atomic_bit_set (&pd->cancelhandling, EXITING_BIT);
392 #ifndef __ASSUME_SET_ROBUST_LIST
393 /* If this thread has any robust mutexes locked, handle them now. */
394 # ifdef __PTHREAD_MUTEX_HAVE_PREV
395 void *robust = pd->robust_head.list;
396 # else
397 __pthread_slist_t *robust = pd->robust_list.__next;
398 # endif
399 /* We let the kernel do the notification if it is able to do so.
400 If we have to do it here there for sure are no PI mutexes involved
401 since the kernel support for them is even more recent. */
402 if (__set_robust_list_avail < 0
403 && __builtin_expect (robust != (void *) &pd->robust_head, 0))
407 struct __pthread_mutex_s *this = (struct __pthread_mutex_s *)
408 ((char *) robust - offsetof (struct __pthread_mutex_s,
409 __list.__next));
410 robust = *((void **) robust);
412 # ifdef __PTHREAD_MUTEX_HAVE_PREV
413 this->__list.__prev = NULL;
414 # endif
415 this->__list.__next = NULL;
417 atomic_or (&this->__lock, FUTEX_OWNER_DIED);
418 futex_wake ((unsigned int *) &this->__lock, 1,
419 /* XYZ */ FUTEX_SHARED);
421 while (robust != (void *) &pd->robust_head);
423 #endif
425 /* Mark the memory of the stack as usable to the kernel. We free
426 everything except for the space used for the TCB itself. */
427 size_t pagesize_m1 = __getpagesize () - 1;
428 #ifdef _STACK_GROWS_DOWN
429 char *sp = CURRENT_STACK_FRAME;
430 size_t freesize = (sp - (char *) pd->stackblock) & ~pagesize_m1;
431 #else
432 # error "to do"
433 #endif
434 assert (freesize < pd->stackblock_size);
435 if (freesize > PTHREAD_STACK_MIN)
436 __madvise (pd->stackblock, freesize - PTHREAD_STACK_MIN, MADV_DONTNEED);
438 /* If the thread is detached free the TCB. */
439 if (IS_DETACHED (pd))
440 /* Free the TCB. */
441 __free_tcb (pd);
442 else if (__glibc_unlikely (pd->cancelhandling & SETXID_BITMASK))
444 /* Some other thread might call any of the setXid functions and expect
445 us to reply. In this case wait until we did that. */
447 /* XXX This differs from the typical futex_wait_simple pattern in that
448 the futex_wait condition (setxid_futex) is different from the
449 condition used in the surrounding loop (cancelhandling). We need
450 to check and document why this is correct. */
451 futex_wait_simple (&pd->setxid_futex, 0, FUTEX_PRIVATE);
452 while (pd->cancelhandling & SETXID_BITMASK);
454 /* Reset the value so that the stack can be reused. */
455 pd->setxid_futex = 0;
458 /* We cannot call '_exit' here. '_exit' will terminate the process.
460 The 'exit' implementation in the kernel will signal when the
461 process is really dead since 'clone' got passed the CLONE_CHILD_CLEARTID
462 flag. The 'tid' field in the TCB will be set to zero.
464 The exit code is zero since in case all threads exit by calling
465 'pthread_exit' the exit status must be 0 (zero). */
466 __exit_thread ();
468 /* NOTREACHED */
472 /* Return true iff obliged to report TD_CREATE events. */
473 static bool
474 report_thread_creation (struct pthread *pd)
476 if (__glibc_unlikely (THREAD_GETMEM (THREAD_SELF, report_events)))
478 /* The parent thread is supposed to report events.
479 Check whether the TD_CREATE event is needed, too. */
480 const size_t idx = __td_eventword (TD_CREATE);
481 const uint32_t mask = __td_eventmask (TD_CREATE);
483 return ((mask & (__nptl_threads_events.event_bits[idx]
484 | pd->eventbuf.eventmask.event_bits[idx])) != 0);
486 return false;
491 __pthread_create_2_1 (newthread, attr, start_routine, arg)
492 pthread_t *newthread;
493 const pthread_attr_t *attr;
494 void *(*start_routine) (void *);
495 void *arg;
497 STACK_VARIABLES;
499 const struct pthread_attr *iattr = (struct pthread_attr *) attr;
500 struct pthread_attr default_attr;
501 bool free_cpuset = false;
502 if (iattr == NULL)
504 lll_lock (__default_pthread_attr_lock, LLL_PRIVATE);
505 default_attr = __default_pthread_attr;
506 size_t cpusetsize = default_attr.cpusetsize;
507 if (cpusetsize > 0)
509 cpu_set_t *cpuset;
510 if (__glibc_likely (__libc_use_alloca (cpusetsize)))
511 cpuset = __alloca (cpusetsize);
512 else
514 cpuset = malloc (cpusetsize);
515 if (cpuset == NULL)
517 lll_unlock (__default_pthread_attr_lock, LLL_PRIVATE);
518 return ENOMEM;
520 free_cpuset = true;
522 memcpy (cpuset, default_attr.cpuset, cpusetsize);
523 default_attr.cpuset = cpuset;
525 lll_unlock (__default_pthread_attr_lock, LLL_PRIVATE);
526 iattr = &default_attr;
529 struct pthread *pd = NULL;
530 int err = ALLOCATE_STACK (iattr, &pd);
531 int retval = 0;
533 if (__glibc_unlikely (err != 0))
534 /* Something went wrong. Maybe a parameter of the attributes is
535 invalid or we could not allocate memory. Note we have to
536 translate error codes. */
538 retval = err == ENOMEM ? EAGAIN : err;
539 goto out;
543 /* Initialize the TCB. All initializations with zero should be
544 performed in 'get_cached_stack'. This way we avoid doing this if
545 the stack freshly allocated with 'mmap'. */
547 #if TLS_TCB_AT_TP
548 /* Reference to the TCB itself. */
549 pd->header.self = pd;
551 /* Self-reference for TLS. */
552 pd->header.tcb = pd;
553 #endif
555 /* Store the address of the start routine and the parameter. Since
556 we do not start the function directly the stillborn thread will
557 get the information from its thread descriptor. */
558 pd->start_routine = start_routine;
559 pd->arg = arg;
561 /* Copy the thread attribute flags. */
562 struct pthread *self = THREAD_SELF;
563 pd->flags = ((iattr->flags & ~(ATTR_FLAG_SCHED_SET | ATTR_FLAG_POLICY_SET))
564 | (self->flags & (ATTR_FLAG_SCHED_SET | ATTR_FLAG_POLICY_SET)));
566 /* Initialize the field for the ID of the thread which is waiting
567 for us. This is a self-reference in case the thread is created
568 detached. */
569 pd->joinid = iattr->flags & ATTR_FLAG_DETACHSTATE ? pd : NULL;
571 /* The debug events are inherited from the parent. */
572 pd->eventbuf = self->eventbuf;
575 /* Copy the parent's scheduling parameters. The flags will say what
576 is valid and what is not. */
577 pd->schedpolicy = self->schedpolicy;
578 pd->schedparam = self->schedparam;
580 /* Copy the stack guard canary. */
581 #ifdef THREAD_COPY_STACK_GUARD
582 THREAD_COPY_STACK_GUARD (pd);
583 #endif
585 /* Copy the pointer guard value. */
586 #ifdef THREAD_COPY_POINTER_GUARD
587 THREAD_COPY_POINTER_GUARD (pd);
588 #endif
590 /* Verify the sysinfo bits were copied in allocate_stack if needed. */
591 #ifdef NEED_DL_SYSINFO
592 CHECK_THREAD_SYSINFO (pd);
593 #endif
595 /* Inform start_thread (above) about cancellation state that might
596 translate into inherited signal state. */
597 pd->parent_cancelhandling = THREAD_GETMEM (THREAD_SELF, cancelhandling);
599 /* Determine scheduling parameters for the thread. */
600 if (__builtin_expect ((iattr->flags & ATTR_FLAG_NOTINHERITSCHED) != 0, 0)
601 && (iattr->flags & (ATTR_FLAG_SCHED_SET | ATTR_FLAG_POLICY_SET)) != 0)
603 /* Use the scheduling parameters the user provided. */
604 if (iattr->flags & ATTR_FLAG_POLICY_SET)
606 pd->schedpolicy = iattr->schedpolicy;
607 pd->flags |= ATTR_FLAG_POLICY_SET;
609 if (iattr->flags & ATTR_FLAG_SCHED_SET)
611 /* The values were validated in pthread_attr_setschedparam. */
612 pd->schedparam = iattr->schedparam;
613 pd->flags |= ATTR_FLAG_SCHED_SET;
616 if ((pd->flags & (ATTR_FLAG_SCHED_SET | ATTR_FLAG_POLICY_SET))
617 != (ATTR_FLAG_SCHED_SET | ATTR_FLAG_POLICY_SET))
618 collect_default_sched (pd);
621 /* Pass the descriptor to the caller. */
622 *newthread = (pthread_t) pd;
624 LIBC_PROBE (pthread_create, 4, newthread, attr, start_routine, arg);
626 /* One more thread. We cannot have the thread do this itself, since it
627 might exist but not have been scheduled yet by the time we've returned
628 and need to check the value to behave correctly. We must do it before
629 creating the thread, in case it does get scheduled first and then
630 might mistakenly think it was the only thread. In the failure case,
631 we momentarily store a false value; this doesn't matter because there
632 is no kosher thing a signal handler interrupting us right here can do
633 that cares whether the thread count is correct. */
634 atomic_increment (&__nptl_nthreads);
636 bool thread_ran = false;
638 /* Start the thread. */
639 if (__glibc_unlikely (report_thread_creation (pd)))
641 /* Create the thread. We always create the thread stopped
642 so that it does not get far before we tell the debugger. */
643 retval = create_thread (pd, iattr, true, STACK_VARIABLES_ARGS,
644 &thread_ran);
645 if (retval == 0)
647 /* create_thread should have set this so that the logic below can
648 test it. */
649 assert (pd->stopped_start);
651 /* Now fill in the information about the new thread in
652 the newly created thread's data structure. We cannot let
653 the new thread do this since we don't know whether it was
654 already scheduled when we send the event. */
655 pd->eventbuf.eventnum = TD_CREATE;
656 pd->eventbuf.eventdata = pd;
658 /* Enqueue the descriptor. */
660 pd->nextevent = __nptl_last_event;
661 while (atomic_compare_and_exchange_bool_acq (&__nptl_last_event,
662 pd, pd->nextevent)
663 != 0);
665 /* Now call the function which signals the event. */
666 __nptl_create_event ();
669 else
670 retval = create_thread (pd, iattr, false, STACK_VARIABLES_ARGS,
671 &thread_ran);
673 if (__glibc_unlikely (retval != 0))
675 /* If thread creation "failed", that might mean that the thread got
676 created and ran a little--short of running user code--but then
677 create_thread cancelled it. In that case, the thread will do all
678 its own cleanup just like a normal thread exit after a successful
679 creation would do. */
681 if (thread_ran)
682 assert (pd->stopped_start);
683 else
685 /* Oops, we lied for a second. */
686 atomic_decrement (&__nptl_nthreads);
688 /* Perhaps a thread wants to change the IDs and is waiting for this
689 stillborn thread. */
690 if (__glibc_unlikely (atomic_exchange_acq (&pd->setxid_futex, 0)
691 == -2))
692 futex_wake (&pd->setxid_futex, 1, FUTEX_PRIVATE);
694 /* Free the resources. */
695 __deallocate_stack (pd);
698 /* We have to translate error codes. */
699 if (retval == ENOMEM)
700 retval = EAGAIN;
702 else
704 if (pd->stopped_start)
705 /* The thread blocked on this lock either because we're doing TD_CREATE
706 event reporting, or for some other reason that create_thread chose.
707 Now let it run free. */
708 lll_unlock (pd->lock, LLL_PRIVATE);
710 /* We now have for sure more than one thread. The main thread might
711 not yet have the flag set. No need to set the global variable
712 again if this is what we use. */
713 THREAD_SETMEM (THREAD_SELF, header.multiple_threads, 1);
716 out:
717 if (__glibc_unlikely (free_cpuset))
718 free (default_attr.cpuset);
720 return retval;
722 versioned_symbol (libpthread, __pthread_create_2_1, pthread_create, GLIBC_2_1);
725 #if SHLIB_COMPAT(libpthread, GLIBC_2_0, GLIBC_2_1)
727 __pthread_create_2_0 (newthread, attr, start_routine, arg)
728 pthread_t *newthread;
729 const pthread_attr_t *attr;
730 void *(*start_routine) (void *);
731 void *arg;
733 /* The ATTR attribute is not really of type `pthread_attr_t *'. It has
734 the old size and access to the new members might crash the program.
735 We convert the struct now. */
736 struct pthread_attr new_attr;
738 if (attr != NULL)
740 struct pthread_attr *iattr = (struct pthread_attr *) attr;
741 size_t ps = __getpagesize ();
743 /* Copy values from the user-provided attributes. */
744 new_attr.schedparam = iattr->schedparam;
745 new_attr.schedpolicy = iattr->schedpolicy;
746 new_attr.flags = iattr->flags;
748 /* Fill in default values for the fields not present in the old
749 implementation. */
750 new_attr.guardsize = ps;
751 new_attr.stackaddr = NULL;
752 new_attr.stacksize = 0;
753 new_attr.cpuset = NULL;
755 /* We will pass this value on to the real implementation. */
756 attr = (pthread_attr_t *) &new_attr;
759 return __pthread_create_2_1 (newthread, attr, start_routine, arg);
761 compat_symbol (libpthread, __pthread_create_2_0, pthread_create,
762 GLIBC_2_0);
763 #endif
765 /* Information for libthread_db. */
767 #include "../nptl_db/db_info.c"
769 /* If pthread_create is present, libgcc_eh.a and libsupc++.a expects some other POSIX thread
770 functions to be present as well. */
771 PTHREAD_STATIC_FN_REQUIRE (pthread_mutex_lock)
772 PTHREAD_STATIC_FN_REQUIRE (pthread_mutex_trylock)
773 PTHREAD_STATIC_FN_REQUIRE (pthread_mutex_unlock)
775 PTHREAD_STATIC_FN_REQUIRE (pthread_once)
776 PTHREAD_STATIC_FN_REQUIRE (pthread_cancel)
778 PTHREAD_STATIC_FN_REQUIRE (pthread_key_create)
779 PTHREAD_STATIC_FN_REQUIRE (pthread_key_delete)
780 PTHREAD_STATIC_FN_REQUIRE (pthread_setspecific)
781 PTHREAD_STATIC_FN_REQUIRE (pthread_getspecific)