* sysdeps/m68k/fpu/bits/mathinline.h (isgreater, isgreaterequal)
[glibc.git] / linuxthreads / pthread.c
blob69f7634396ab1a42790d336cbe149e7b2380b65e
2 /* Linuxthreads - a simple clone()-based implementation of Posix */
3 /* threads for Linux. */
4 /* Copyright (C) 1996 Xavier Leroy (Xavier.Leroy@inria.fr) */
5 /* */
6 /* This program is free software; you can redistribute it and/or */
7 /* modify it under the terms of the GNU Library General Public License */
8 /* as published by the Free Software Foundation; either version 2 */
9 /* of the License, or (at your option) any later version. */
10 /* */
11 /* This program is distributed in the hope that it will be useful, */
12 /* but WITHOUT ANY WARRANTY; without even the implied warranty of */
13 /* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the */
14 /* GNU Library General Public License for more details. */
16 /* Thread creation, initialization, and basic low-level routines */
18 #include <errno.h>
19 #include <stddef.h>
20 #include <stdio.h>
21 #include <stdlib.h>
22 #include <string.h>
23 #include <unistd.h>
24 #include <fcntl.h>
25 #include <sys/wait.h>
26 #include <sys/resource.h>
27 #include <sys/time.h>
28 #include <shlib-compat.h>
29 #include "pthread.h"
30 #include "internals.h"
31 #include "spinlock.h"
32 #include "restart.h"
33 #include "smp.h"
34 #include <ldsodefs.h>
35 #include <tls.h>
36 #include <locale.h> /* for __uselocale */
37 #include <version.h>
39 /* Sanity check. */
40 #if __ASSUME_REALTIME_SIGNALS && !defined __SIGRTMIN
41 # error "This must not happen; new kernel assumed but old headers"
42 #endif
44 #if !(USE_TLS && HAVE___THREAD)
45 /* These variables are used by the setup code. */
46 extern int _errno;
47 extern int _h_errno;
49 /* We need the global/static resolver state here. */
50 # include <resolv.h>
51 # undef _res
53 extern struct __res_state _res;
54 #endif
56 #ifdef USE_TLS
58 /* We need only a few variables. */
59 static pthread_descr manager_thread;
61 #else
63 /* Descriptor of the initial thread */
65 struct _pthread_descr_struct __pthread_initial_thread = {
68 .self = &__pthread_initial_thread /* pthread_descr self */
71 &__pthread_initial_thread, /* pthread_descr p_nextlive */
72 &__pthread_initial_thread, /* pthread_descr p_prevlive */
73 NULL, /* pthread_descr p_nextwaiting */
74 NULL, /* pthread_descr p_nextlock */
75 PTHREAD_THREADS_MAX, /* pthread_t p_tid */
76 0, /* int p_pid */
77 0, /* int p_priority */
78 &__pthread_handles[0].h_lock, /* struct _pthread_fastlock * p_lock */
79 0, /* int p_signal */
80 NULL, /* sigjmp_buf * p_signal_buf */
81 NULL, /* sigjmp_buf * p_cancel_buf */
82 0, /* char p_terminated */
83 0, /* char p_detached */
84 0, /* char p_exited */
85 NULL, /* void * p_retval */
86 0, /* int p_retval */
87 NULL, /* pthread_descr p_joining */
88 NULL, /* struct _pthread_cleanup_buffer * p_cleanup */
89 0, /* char p_cancelstate */
90 0, /* char p_canceltype */
91 0, /* char p_canceled */
92 NULL, /* char * p_in_sighandler */
93 0, /* char p_sigwaiting */
94 PTHREAD_START_ARGS_INITIALIZER(NULL),
95 /* struct pthread_start_args p_start_args */
96 {NULL}, /* void ** p_specific[PTHREAD_KEY_1STLEVEL_SIZE] */
97 {NULL}, /* void * p_libc_specific[_LIBC_TSD_KEY_N] */
98 &_errno, /* int *p_errnop */
99 0, /* int p_errno */
100 &_h_errno, /* int *p_h_errnop */
101 0, /* int p_h_errno */
102 &_res, /* struct __res_state *p_resp */
103 {}, /* struct __res_state p_res */
104 1, /* int p_userstack */
105 NULL, /* void * p_guardaddr */
106 0, /* size_t p_guardsize */
107 0, /* Always index 0 */
108 0, /* int p_report_events */
109 {{{0, }}, 0, NULL}, /* td_eventbuf_t p_eventbuf */
110 __ATOMIC_INITIALIZER, /* struct pthread_atomic p_resume_count */
111 0, /* char p_woken_by_cancel */
112 0, /* char p_condvar_avail */
113 0, /* char p_sem_avail */
114 NULL, /* struct pthread_extricate_if *p_extricate */
115 NULL, /* pthread_readlock_info *p_readlock_list; */
116 NULL, /* pthread_readlock_info *p_readlock_free; */
117 0 /* int p_untracked_readlock_count; */
120 /* Descriptor of the manager thread; none of this is used but the error
121 variables, the p_pid and p_priority fields,
122 and the address for identification. */
124 #define manager_thread (&__pthread_manager_thread)
125 struct _pthread_descr_struct __pthread_manager_thread = {
128 .self = &__pthread_manager_thread /* pthread_descr self */
131 NULL, /* pthread_descr p_nextlive */
132 NULL, /* pthread_descr p_prevlive */
133 NULL, /* pthread_descr p_nextwaiting */
134 NULL, /* pthread_descr p_nextlock */
135 0, /* int p_tid */
136 0, /* int p_pid */
137 0, /* int p_priority */
138 &__pthread_handles[1].h_lock, /* struct _pthread_fastlock * p_lock */
139 0, /* int p_signal */
140 NULL, /* sigjmp_buf * p_signal_buf */
141 NULL, /* sigjmp_buf * p_cancel_buf */
142 0, /* char p_terminated */
143 0, /* char p_detached */
144 0, /* char p_exited */
145 NULL, /* void * p_retval */
146 0, /* int p_retval */
147 NULL, /* pthread_descr p_joining */
148 NULL, /* struct _pthread_cleanup_buffer * p_cleanup */
149 0, /* char p_cancelstate */
150 0, /* char p_canceltype */
151 0, /* char p_canceled */
152 NULL, /* char * p_in_sighandler */
153 0, /* char p_sigwaiting */
154 PTHREAD_START_ARGS_INITIALIZER(__pthread_manager),
155 /* struct pthread_start_args p_start_args */
156 {NULL}, /* void ** p_specific[PTHREAD_KEY_1STLEVEL_SIZE] */
157 {NULL}, /* void * p_libc_specific[_LIBC_TSD_KEY_N] */
158 &__pthread_manager_thread.p_errno, /* int *p_errnop */
159 0, /* int p_errno */
160 NULL, /* int *p_h_errnop */
161 0, /* int p_h_errno */
162 NULL, /* struct __res_state *p_resp */
163 {}, /* struct __res_state p_res */
164 0, /* int p_userstack */
165 NULL, /* void * p_guardaddr */
166 0, /* size_t p_guardsize */
167 1, /* Always index 1 */
168 0, /* int p_report_events */
169 {{{0, }}, 0, NULL}, /* td_eventbuf_t p_eventbuf */
170 __ATOMIC_INITIALIZER, /* struct pthread_atomic p_resume_count */
171 0, /* char p_woken_by_cancel */
172 0, /* char p_condvar_avail */
173 0, /* char p_sem_avail */
174 NULL, /* struct pthread_extricate_if *p_extricate */
175 NULL, /* pthread_readlock_info *p_readlock_list; */
176 NULL, /* pthread_readlock_info *p_readlock_free; */
177 0 /* int p_untracked_readlock_count; */
179 #endif
181 /* Pointer to the main thread (the father of the thread manager thread) */
182 /* Originally, this is the initial thread, but this changes after fork() */
184 #ifdef USE_TLS
185 pthread_descr __pthread_main_thread;
186 #else
187 pthread_descr __pthread_main_thread = &__pthread_initial_thread;
188 #endif
190 /* Limit between the stack of the initial thread (above) and the
191 stacks of other threads (below). Aligned on a STACK_SIZE boundary. */
193 char *__pthread_initial_thread_bos;
195 /* File descriptor for sending requests to the thread manager. */
196 /* Initially -1, meaning that the thread manager is not running. */
198 int __pthread_manager_request = -1;
200 /* Other end of the pipe for sending requests to the thread manager. */
202 int __pthread_manager_reader;
204 /* Limits of the thread manager stack */
206 char *__pthread_manager_thread_bos;
207 char *__pthread_manager_thread_tos;
209 /* For process-wide exit() */
211 int __pthread_exit_requested;
212 int __pthread_exit_code;
214 /* Maximum stack size. */
215 size_t __pthread_max_stacksize;
217 /* Nozero if the machine has more than one processor. */
218 int __pthread_smp_kernel;
221 #if !__ASSUME_REALTIME_SIGNALS
222 /* Pointers that select new or old suspend/resume functions
223 based on availability of rt signals. */
225 void (*__pthread_restart)(pthread_descr) = __pthread_restart_old;
226 void (*__pthread_suspend)(pthread_descr) = __pthread_suspend_old;
227 int (*__pthread_timedsuspend)(pthread_descr, const struct timespec *) = __pthread_timedsuspend_old;
228 #endif /* __ASSUME_REALTIME_SIGNALS */
230 /* Communicate relevant LinuxThreads constants to gdb */
232 const int __pthread_threads_max = PTHREAD_THREADS_MAX;
233 const int __pthread_sizeof_handle = sizeof(struct pthread_handle_struct);
234 const int __pthread_offsetof_descr = offsetof(struct pthread_handle_struct,
235 h_descr);
236 const int __pthread_offsetof_pid = offsetof(struct _pthread_descr_struct,
237 p_pid);
238 const int __linuxthreads_pthread_sizeof_descr
239 = sizeof(struct _pthread_descr_struct);
241 const int __linuxthreads_initial_report_events;
243 const char __linuxthreads_version[] = VERSION;
245 /* Forward declarations */
247 static void pthread_onexit_process(int retcode, void *arg);
248 #ifndef HAVE_Z_NODELETE
249 static void pthread_atexit_process(void *arg, int retcode);
250 static void pthread_atexit_retcode(void *arg, int retcode);
251 #endif
252 static void pthread_handle_sigcancel(int sig);
253 static void pthread_handle_sigrestart(int sig);
254 static void pthread_handle_sigdebug(int sig);
256 /* Signal numbers used for the communication.
257 In these variables we keep track of the used variables. If the
258 platform does not support any real-time signals we will define the
259 values to some unreasonable value which will signal failing of all
260 the functions below. */
261 #ifndef __SIGRTMIN
262 static int current_rtmin = -1;
263 static int current_rtmax = -1;
264 int __pthread_sig_restart = SIGUSR1;
265 int __pthread_sig_cancel = SIGUSR2;
266 int __pthread_sig_debug;
267 #else
268 static int current_rtmin;
269 static int current_rtmax;
271 #if __SIGRTMAX - __SIGRTMIN >= 3
272 int __pthread_sig_restart = __SIGRTMIN;
273 int __pthread_sig_cancel = __SIGRTMIN + 1;
274 int __pthread_sig_debug = __SIGRTMIN + 2;
275 #else
276 int __pthread_sig_restart = SIGUSR1;
277 int __pthread_sig_cancel = SIGUSR2;
278 int __pthread_sig_debug;
279 #endif
281 static int rtsigs_initialized;
283 #if !__ASSUME_REALTIME_SIGNALS
284 # include "testrtsig.h"
285 #endif
287 static void
288 init_rtsigs (void)
290 #if !__ASSUME_REALTIME_SIGNALS
291 if (__builtin_expect (!kernel_has_rtsig (), 0))
293 current_rtmin = -1;
294 current_rtmax = -1;
295 # if __SIGRTMAX - __SIGRTMIN >= 3
296 __pthread_sig_restart = SIGUSR1;
297 __pthread_sig_cancel = SIGUSR2;
298 __pthread_sig_debug = 0;
299 # endif
301 else
302 #endif /* __ASSUME_REALTIME_SIGNALS */
304 #if __SIGRTMAX - __SIGRTMIN >= 3
305 current_rtmin = __SIGRTMIN + 3;
306 # if !__ASSUME_REALTIME_SIGNALS
307 __pthread_restart = __pthread_restart_new;
308 __pthread_suspend = __pthread_wait_for_restart_signal;
309 __pthread_timedsuspend = __pthread_timedsuspend_new;
310 # endif /* __ASSUME_REALTIME_SIGNALS */
311 #else
312 current_rtmin = __SIGRTMIN;
313 #endif
315 current_rtmax = __SIGRTMAX;
318 rtsigs_initialized = 1;
320 #endif
322 /* Return number of available real-time signal with highest priority. */
324 __libc_current_sigrtmin (void)
326 #ifdef __SIGRTMIN
327 if (__builtin_expect (!rtsigs_initialized, 0))
328 init_rtsigs ();
329 #endif
330 return current_rtmin;
333 /* Return number of available real-time signal with lowest priority. */
335 __libc_current_sigrtmax (void)
337 #ifdef __SIGRTMIN
338 if (__builtin_expect (!rtsigs_initialized, 0))
339 init_rtsigs ();
340 #endif
341 return current_rtmax;
344 /* Allocate real-time signal with highest/lowest available
345 priority. Please note that we don't use a lock since we assume
346 this function to be called at program start. */
348 __libc_allocate_rtsig (int high)
350 #ifndef __SIGRTMIN
351 return -1;
352 #else
353 if (__builtin_expect (!rtsigs_initialized, 0))
354 init_rtsigs ();
355 if (__builtin_expect (current_rtmin == -1, 0)
356 || __builtin_expect (current_rtmin > current_rtmax, 0))
357 /* We don't have anymore signal available. */
358 return -1;
360 return high ? current_rtmin++ : current_rtmax--;
361 #endif
365 /* Initialize the pthread library.
366 Initialization is split in two functions:
367 - a constructor function that blocks the __pthread_sig_restart signal
368 (must do this very early, since the program could capture the signal
369 mask with e.g. sigsetjmp before creating the first thread);
370 - a regular function called from pthread_create when needed. */
372 static void pthread_initialize(void) __attribute__((constructor));
374 #ifndef HAVE_Z_NODELETE
375 extern void *__dso_handle __attribute__ ((weak));
376 #endif
379 #if defined USE_TLS && !defined SHARED
380 extern void __libc_setup_tls (size_t tcbsize, size_t tcbalign);
381 #endif
384 /* Do some minimal initialization which has to be done during the
385 startup of the C library. */
386 void
387 __pthread_initialize_minimal(void)
389 #ifdef USE_TLS
390 pthread_descr self;
392 /* First of all init __pthread_handles[0] and [1] if needed. */
393 # if __LT_SPINLOCK_INIT != 0
394 __pthread_handles[0].h_lock = __LOCK_INITIALIZER;
395 __pthread_handles[1].h_lock = __LOCK_INITIALIZER;
396 # endif
397 # ifndef SHARED
398 /* Unlike in the dynamically linked case the dynamic linker has not
399 taken care of initializing the TLS data structures. */
400 __libc_setup_tls (TLS_TCB_SIZE, TLS_TCB_ALIGN);
401 # endif
403 self = THREAD_SELF;
405 /* The memory for the thread descriptor was allocated elsewhere as
406 part of the TLS allocation. We have to initialize the data
407 structure by hand. This initialization must mirror the struct
408 definition above. */
409 self->p_nextlive = self->p_prevlive = self;
410 self->p_tid = PTHREAD_THREADS_MAX;
411 self->p_lock = &__pthread_handles[0].h_lock;
412 # ifndef HAVE___THREAD
413 self->p_errnop = &_errno;
414 self->p_h_errnop = &_h_errno;
415 # endif
416 /* self->p_start_args need not be initialized, it's all zero. */
417 self->p_userstack = 1;
418 # if __LT_SPINLOCK_INIT != 0
419 self->p_resume_count = (struct pthread_atomic) __ATOMIC_INITIALIZER;
420 # endif
422 /* Another variable which points to the thread descriptor. */
423 __pthread_main_thread = self;
425 /* And fill in the pointer the the thread __pthread_handles array. */
426 __pthread_handles[0].h_descr = self;
427 #else
428 /* First of all init __pthread_handles[0] and [1]. */
429 # if __LT_SPINLOCK_INIT != 0
430 __pthread_handles[0].h_lock = __LOCK_INITIALIZER;
431 __pthread_handles[1].h_lock = __LOCK_INITIALIZER;
432 # endif
433 __pthread_handles[0].h_descr = &__pthread_initial_thread;
434 __pthread_handles[1].h_descr = &__pthread_manager_thread;
436 /* If we have special thread_self processing, initialize that for the
437 main thread now. */
438 # ifdef INIT_THREAD_SELF
439 INIT_THREAD_SELF(&__pthread_initial_thread, 0);
440 # endif
441 #endif
443 #if HP_TIMING_AVAIL
444 # ifdef USE_TLS
445 self->p_cpuclock_offset = GL(dl_cpuclock_offset);
446 # else
447 __pthread_initial_thread.p_cpuclock_offset = GL(dl_cpuclock_offset);
448 # endif
449 #endif
451 #if !(USE_TLS && HAVE___THREAD)
452 /* Initialize thread-locale current locale to point to the global one.
453 With __thread support, the variable's initializer takes care of this. */
454 __uselocale (LC_GLOBAL_LOCALE);
455 #endif
459 void
460 __pthread_init_max_stacksize(void)
462 struct rlimit limit;
463 size_t max_stack;
465 getrlimit(RLIMIT_STACK, &limit);
466 #ifdef FLOATING_STACKS
467 if (limit.rlim_cur == RLIM_INFINITY)
468 limit.rlim_cur = ARCH_STACK_MAX_SIZE;
469 # ifdef NEED_SEPARATE_REGISTER_STACK
470 max_stack = limit.rlim_cur / 2;
471 # else
472 max_stack = limit.rlim_cur;
473 # endif
474 #else
475 /* Play with the stack size limit to make sure that no stack ever grows
476 beyond STACK_SIZE minus one page (to act as a guard page). */
477 # ifdef NEED_SEPARATE_REGISTER_STACK
478 /* STACK_SIZE bytes hold both the main stack and register backing
479 store. The rlimit value applies to each individually. */
480 max_stack = STACK_SIZE/2 - __getpagesize ();
481 # else
482 max_stack = STACK_SIZE - __getpagesize();
483 # endif
484 if (limit.rlim_cur > max_stack) {
485 limit.rlim_cur = max_stack;
486 setrlimit(RLIMIT_STACK, &limit);
488 #endif
489 __pthread_max_stacksize = max_stack;
493 static void pthread_initialize(void)
495 struct sigaction sa;
496 sigset_t mask;
498 /* If already done (e.g. by a constructor called earlier!), bail out */
499 if (__pthread_initial_thread_bos != NULL) return;
500 #ifdef TEST_FOR_COMPARE_AND_SWAP
501 /* Test if compare-and-swap is available */
502 __pthread_has_cas = compare_and_swap_is_available();
503 #endif
504 #ifdef FLOATING_STACKS
505 /* We don't need to know the bottom of the stack. Give the pointer some
506 value to signal that initialization happened. */
507 __pthread_initial_thread_bos = (void *) -1l;
508 #else
509 /* Determine stack size limits . */
510 __pthread_init_max_stacksize ();
511 # ifdef _STACK_GROWS_UP
512 /* The initial thread already has all the stack it needs */
513 __pthread_initial_thread_bos = (char *)
514 ((long)CURRENT_STACK_FRAME &~ (STACK_SIZE - 1));
515 # else
516 /* For the initial stack, reserve at least STACK_SIZE bytes of stack
517 below the current stack address, and align that on a
518 STACK_SIZE boundary. */
519 __pthread_initial_thread_bos =
520 (char *)(((long)CURRENT_STACK_FRAME - 2 * STACK_SIZE) & ~(STACK_SIZE - 1));
521 # endif
522 #endif
523 #ifdef USE_TLS
524 /* Update the descriptor for the initial thread. */
525 THREAD_SETMEM (((pthread_descr) NULL), p_pid, __getpid());
526 # ifndef HAVE___THREAD
527 /* Likewise for the resolver state _res. */
528 THREAD_SETMEM (((pthread_descr) NULL), p_resp, &_res);
529 # endif
530 #else
531 /* Update the descriptor for the initial thread. */
532 __pthread_initial_thread.p_pid = __getpid();
533 /* Likewise for the resolver state _res. */
534 __pthread_initial_thread.p_resp = &_res;
535 #endif
536 #ifdef __SIGRTMIN
537 /* Initialize real-time signals. */
538 init_rtsigs ();
539 #endif
540 /* Setup signal handlers for the initial thread.
541 Since signal handlers are shared between threads, these settings
542 will be inherited by all other threads. */
543 sa.sa_handler = pthread_handle_sigrestart;
544 sigemptyset(&sa.sa_mask);
545 sa.sa_flags = 0;
546 __libc_sigaction(__pthread_sig_restart, &sa, NULL);
547 sa.sa_handler = pthread_handle_sigcancel;
548 // sa.sa_flags = 0;
549 __libc_sigaction(__pthread_sig_cancel, &sa, NULL);
550 if (__pthread_sig_debug > 0) {
551 sa.sa_handler = pthread_handle_sigdebug;
552 sigemptyset(&sa.sa_mask);
553 // sa.sa_flags = 0;
554 __libc_sigaction(__pthread_sig_debug, &sa, NULL);
556 /* Initially, block __pthread_sig_restart. Will be unblocked on demand. */
557 sigemptyset(&mask);
558 sigaddset(&mask, __pthread_sig_restart);
559 sigprocmask(SIG_BLOCK, &mask, NULL);
560 /* Register an exit function to kill all other threads. */
561 /* Do it early so that user-registered atexit functions are called
562 before pthread_*exit_process. */
563 #ifndef HAVE_Z_NODELETE
564 if (__builtin_expect (&__dso_handle != NULL, 1))
565 __cxa_atexit ((void (*) (void *)) pthread_atexit_process, NULL,
566 __dso_handle);
567 else
568 #endif
569 __on_exit (pthread_onexit_process, NULL);
570 /* How many processors. */
571 __pthread_smp_kernel = is_smp_system ();
574 void __pthread_initialize(void)
576 pthread_initialize();
579 int __pthread_initialize_manager(void)
581 int manager_pipe[2];
582 int pid;
583 struct pthread_request request;
584 int report_events;
585 pthread_descr tcb;
587 #ifndef HAVE_Z_NODELETE
588 if (__builtin_expect (&__dso_handle != NULL, 1))
589 __cxa_atexit ((void (*) (void *)) pthread_atexit_retcode, NULL,
590 __dso_handle);
591 #endif
593 if (__pthread_max_stacksize == 0)
594 __pthread_init_max_stacksize ();
595 /* If basic initialization not done yet (e.g. we're called from a
596 constructor run before our constructor), do it now */
597 if (__pthread_initial_thread_bos == NULL) pthread_initialize();
598 /* Setup stack for thread manager */
599 __pthread_manager_thread_bos = malloc(THREAD_MANAGER_STACK_SIZE);
600 if (__pthread_manager_thread_bos == NULL) return -1;
601 __pthread_manager_thread_tos =
602 __pthread_manager_thread_bos + THREAD_MANAGER_STACK_SIZE;
603 /* Setup pipe to communicate with thread manager */
604 if (pipe(manager_pipe) == -1) {
605 free(__pthread_manager_thread_bos);
606 return -1;
609 #ifdef USE_TLS
610 /* Allocate memory for the thread descriptor and the dtv. */
611 __pthread_handles[1].h_descr = manager_thread = tcb
612 = _dl_allocate_tls (NULL);
613 if (tcb == NULL) {
614 free(__pthread_manager_thread_bos);
615 __libc_close(manager_pipe[0]);
616 __libc_close(manager_pipe[1]);
617 return -1;
620 /* Initialize the descriptor. */
621 tcb->p_header.data.tcb = tcb;
622 tcb->p_header.data.self = tcb;
623 tcb->p_lock = &__pthread_handles[1].h_lock;
624 # ifndef HAVE___THREAD
625 tcb->p_errnop = &tcb->p_errno;
626 # endif
627 tcb->p_start_args = (struct pthread_start_args) PTHREAD_START_ARGS_INITIALIZER(__pthread_manager);
628 tcb->p_nr = 1;
629 # if __LT_SPINLOCK_INIT != 0
630 self->p_resume_count = (struct pthread_atomic) __ATOMIC_INITIALIZER;
631 # endif
632 #else
633 tcb = &__pthread_manager_thread;
634 #endif
636 __pthread_manager_request = manager_pipe[1]; /* writing end */
637 __pthread_manager_reader = manager_pipe[0]; /* reading end */
639 /* Start the thread manager */
640 pid = 0;
641 #ifdef USE_TLS
642 if (__linuxthreads_initial_report_events != 0)
643 THREAD_SETMEM (((pthread_descr) NULL), p_report_events,
644 __linuxthreads_initial_report_events);
645 report_events = THREAD_GETMEM (((pthread_descr) NULL), p_report_events);
646 #else
647 if (__linuxthreads_initial_report_events != 0)
648 __pthread_initial_thread.p_report_events
649 = __linuxthreads_initial_report_events;
650 report_events = __pthread_initial_thread.p_report_events;
651 #endif
652 if (__builtin_expect (report_events, 0))
654 /* It's a bit more complicated. We have to report the creation of
655 the manager thread. */
656 int idx = __td_eventword (TD_CREATE);
657 uint32_t mask = __td_eventmask (TD_CREATE);
658 uint32_t event_bits;
660 #ifdef USE_TLS
661 event_bits = THREAD_GETMEM_NC (((pthread_descr) NULL),
662 p_eventbuf.eventmask.event_bits[idx]);
663 #else
664 event_bits = __pthread_initial_thread.p_eventbuf.eventmask.event_bits[idx];
665 #endif
667 if ((mask & (__pthread_threads_events.event_bits[idx] | event_bits))
668 != 0)
670 __pthread_lock(tcb->p_lock, NULL);
672 #ifdef NEED_SEPARATE_REGISTER_STACK
673 pid = __clone2(__pthread_manager_event,
674 (void **) __pthread_manager_thread_bos,
675 THREAD_MANAGER_STACK_SIZE,
676 CLONE_VM | CLONE_FS | CLONE_FILES | CLONE_SIGHAND,
677 tcb);
678 #elif _STACK_GROWS_UP
679 pid = __clone(__pthread_manager_event,
680 (void **) __pthread_manager_thread_bos,
681 CLONE_VM | CLONE_FS | CLONE_FILES | CLONE_SIGHAND,
682 tcb);
683 #else
684 pid = __clone(__pthread_manager_event,
685 (void **) __pthread_manager_thread_tos,
686 CLONE_VM | CLONE_FS | CLONE_FILES | CLONE_SIGHAND,
687 tcb);
688 #endif
690 if (pid != -1)
692 /* Now fill in the information about the new thread in
693 the newly created thread's data structure. We cannot let
694 the new thread do this since we don't know whether it was
695 already scheduled when we send the event. */
696 tcb->p_eventbuf.eventdata = tcb;
697 tcb->p_eventbuf.eventnum = TD_CREATE;
698 __pthread_last_event = tcb;
699 tcb->p_tid = 2* PTHREAD_THREADS_MAX + 1;
700 tcb->p_pid = pid;
702 /* Now call the function which signals the event. */
703 __linuxthreads_create_event ();
706 /* Now restart the thread. */
707 __pthread_unlock(tcb->p_lock);
711 if (__builtin_expect (pid, 0) == 0)
713 #ifdef NEED_SEPARATE_REGISTER_STACK
714 pid = __clone2(__pthread_manager, (void **) __pthread_manager_thread_bos,
715 THREAD_MANAGER_STACK_SIZE,
716 CLONE_VM | CLONE_FS | CLONE_FILES | CLONE_SIGHAND, tcb);
717 #elif _STACK_GROWS_UP
718 pid = __clone(__pthread_manager, (void **) __pthread_manager_thread_bos,
719 CLONE_VM | CLONE_FS | CLONE_FILES | CLONE_SIGHAND, tcb);
720 #else
721 pid = __clone(__pthread_manager, (void **) __pthread_manager_thread_tos,
722 CLONE_VM | CLONE_FS | CLONE_FILES | CLONE_SIGHAND, tcb);
723 #endif
725 if (__builtin_expect (pid, 0) == -1) {
726 free(__pthread_manager_thread_bos);
727 __libc_close(manager_pipe[0]);
728 __libc_close(manager_pipe[1]);
729 return -1;
731 tcb->p_tid = 2* PTHREAD_THREADS_MAX + 1;
732 tcb->p_pid = pid;
733 /* Make gdb aware of new thread manager */
734 if (__builtin_expect (__pthread_threads_debug, 0) && __pthread_sig_debug > 0)
736 raise(__pthread_sig_debug);
737 /* We suspend ourself and gdb will wake us up when it is
738 ready to handle us. */
739 __pthread_wait_for_restart_signal(thread_self());
741 /* Synchronize debugging of the thread manager */
742 request.req_kind = REQ_DEBUG;
743 TEMP_FAILURE_RETRY(__libc_write(__pthread_manager_request,
744 (char *) &request, sizeof(request)));
745 return 0;
748 /* Thread creation */
750 int __pthread_create_2_1(pthread_t *thread, const pthread_attr_t *attr,
751 void * (*start_routine)(void *), void *arg)
753 pthread_descr self = thread_self();
754 struct pthread_request request;
755 int retval;
756 if (__builtin_expect (__pthread_manager_request, 0) < 0) {
757 if (__pthread_initialize_manager() < 0) return EAGAIN;
759 request.req_thread = self;
760 request.req_kind = REQ_CREATE;
761 request.req_args.create.attr = attr;
762 request.req_args.create.fn = start_routine;
763 request.req_args.create.arg = arg;
764 sigprocmask(SIG_SETMASK, (const sigset_t *) NULL,
765 &request.req_args.create.mask);
766 TEMP_FAILURE_RETRY(__libc_write(__pthread_manager_request,
767 (char *) &request, sizeof(request)));
768 suspend(self);
769 retval = THREAD_GETMEM(self, p_retcode);
770 if (__builtin_expect (retval, 0) == 0)
771 *thread = (pthread_t) THREAD_GETMEM(self, p_retval);
772 return retval;
775 versioned_symbol (libpthread, __pthread_create_2_1, pthread_create, GLIBC_2_1);
777 #if SHLIB_COMPAT (libpthread, GLIBC_2_0, GLIBC_2_1)
779 int __pthread_create_2_0(pthread_t *thread, const pthread_attr_t *attr,
780 void * (*start_routine)(void *), void *arg)
782 /* The ATTR attribute is not really of type `pthread_attr_t *'. It has
783 the old size and access to the new members might crash the program.
784 We convert the struct now. */
785 pthread_attr_t new_attr;
787 if (attr != NULL)
789 size_t ps = __getpagesize ();
791 memcpy (&new_attr, attr,
792 (size_t) &(((pthread_attr_t*)NULL)->__guardsize));
793 new_attr.__guardsize = ps;
794 new_attr.__stackaddr_set = 0;
795 new_attr.__stackaddr = NULL;
796 new_attr.__stacksize = STACK_SIZE - ps;
797 attr = &new_attr;
799 return __pthread_create_2_1 (thread, attr, start_routine, arg);
801 compat_symbol (libpthread, __pthread_create_2_0, pthread_create, GLIBC_2_0);
802 #endif
804 /* Simple operations on thread identifiers */
806 pthread_t pthread_self(void)
808 pthread_descr self = thread_self();
809 return THREAD_GETMEM(self, p_tid);
812 int pthread_equal(pthread_t thread1, pthread_t thread2)
814 return thread1 == thread2;
817 /* Helper function for thread_self in the case of user-provided stacks */
819 #ifndef THREAD_SELF
821 pthread_descr __pthread_find_self(void)
823 char * sp = CURRENT_STACK_FRAME;
824 pthread_handle h;
826 /* __pthread_handles[0] is the initial thread, __pthread_handles[1] is
827 the manager threads handled specially in thread_self(), so start at 2 */
828 h = __pthread_handles + 2;
829 while (! (sp <= (char *) h->h_descr && sp >= h->h_bottom)) h++;
830 return h->h_descr;
833 #else
835 static pthread_descr thread_self_stack(void)
837 char *sp = CURRENT_STACK_FRAME;
838 pthread_handle h;
840 if (sp >= __pthread_manager_thread_bos && sp < __pthread_manager_thread_tos)
841 return manager_thread;
842 h = __pthread_handles + 2;
843 # ifdef USE_TLS
844 while (h->h_descr == NULL
845 || ! (sp <= (char *) h->h_descr->p_stackaddr && sp >= h->h_bottom))
846 h++;
847 # else
848 while (! (sp <= (char *) h->h_descr && sp >= h->h_bottom))
849 h++;
850 # endif
851 return h->h_descr;
854 #endif
856 /* Thread scheduling */
858 int pthread_setschedparam(pthread_t thread, int policy,
859 const struct sched_param *param)
861 pthread_handle handle = thread_handle(thread);
862 pthread_descr th;
864 __pthread_lock(&handle->h_lock, NULL);
865 if (__builtin_expect (invalid_handle(handle, thread), 0)) {
866 __pthread_unlock(&handle->h_lock);
867 return ESRCH;
869 th = handle->h_descr;
870 if (__builtin_expect (__sched_setscheduler(th->p_pid, policy, param) == -1,
871 0)) {
872 __pthread_unlock(&handle->h_lock);
873 return errno;
875 th->p_priority = policy == SCHED_OTHER ? 0 : param->sched_priority;
876 __pthread_unlock(&handle->h_lock);
877 if (__pthread_manager_request >= 0)
878 __pthread_manager_adjust_prio(th->p_priority);
879 return 0;
882 int pthread_getschedparam(pthread_t thread, int *policy,
883 struct sched_param *param)
885 pthread_handle handle = thread_handle(thread);
886 int pid, pol;
888 __pthread_lock(&handle->h_lock, NULL);
889 if (__builtin_expect (invalid_handle(handle, thread), 0)) {
890 __pthread_unlock(&handle->h_lock);
891 return ESRCH;
893 pid = handle->h_descr->p_pid;
894 __pthread_unlock(&handle->h_lock);
895 pol = __sched_getscheduler(pid);
896 if (__builtin_expect (pol, 0) == -1) return errno;
897 if (__sched_getparam(pid, param) == -1) return errno;
898 *policy = pol;
899 return 0;
902 int __pthread_yield (void)
904 /* For now this is equivalent with the POSIX call. */
905 return sched_yield ();
907 weak_alias (__pthread_yield, pthread_yield)
909 /* Process-wide exit() request */
911 static void pthread_onexit_process(int retcode, void *arg)
913 if (__builtin_expect (__pthread_manager_request, 0) >= 0) {
914 struct pthread_request request;
915 pthread_descr self = thread_self();
917 request.req_thread = self;
918 request.req_kind = REQ_PROCESS_EXIT;
919 request.req_args.exit.code = retcode;
920 TEMP_FAILURE_RETRY(__libc_write(__pthread_manager_request,
921 (char *) &request, sizeof(request)));
922 suspend(self);
923 /* Main thread should accumulate times for thread manager and its
924 children, so that timings for main thread account for all threads. */
925 if (self == __pthread_main_thread)
927 #ifdef USE_TLS
928 waitpid(manager_thread->p_pid, NULL, __WCLONE);
929 #else
930 waitpid(__pthread_manager_thread.p_pid, NULL, __WCLONE);
931 #endif
932 /* Since all threads have been asynchronously terminated
933 (possibly holding locks), free cannot be used any more. */
934 /*free (__pthread_manager_thread_bos);*/
935 __pthread_manager_thread_bos = __pthread_manager_thread_tos = NULL;
940 #ifndef HAVE_Z_NODELETE
941 static int __pthread_atexit_retcode;
943 static void pthread_atexit_process(void *arg, int retcode)
945 pthread_onexit_process (retcode ?: __pthread_atexit_retcode, arg);
948 static void pthread_atexit_retcode(void *arg, int retcode)
950 __pthread_atexit_retcode = retcode;
952 #endif
954 /* The handler for the RESTART signal just records the signal received
955 in the thread descriptor, and optionally performs a siglongjmp
956 (for pthread_cond_timedwait). */
958 static void pthread_handle_sigrestart(int sig)
960 pthread_descr self = thread_self();
961 THREAD_SETMEM(self, p_signal, sig);
962 if (THREAD_GETMEM(self, p_signal_jmp) != NULL)
963 siglongjmp(*THREAD_GETMEM(self, p_signal_jmp), 1);
966 /* The handler for the CANCEL signal checks for cancellation
967 (in asynchronous mode), for process-wide exit and exec requests.
968 For the thread manager thread, redirect the signal to
969 __pthread_manager_sighandler. */
971 static void pthread_handle_sigcancel(int sig)
973 pthread_descr self = thread_self();
974 sigjmp_buf * jmpbuf;
976 if (self == manager_thread)
978 #ifdef THREAD_SELF
979 /* A new thread might get a cancel signal before it is fully
980 initialized, so that the thread register might still point to the
981 manager thread. Double check that this is really the manager
982 thread. */
983 pthread_descr real_self = thread_self_stack();
984 if (real_self == manager_thread)
986 __pthread_manager_sighandler(sig);
987 return;
989 /* Oops, thread_self() isn't working yet.. */
990 self = real_self;
991 # ifdef INIT_THREAD_SELF
992 INIT_THREAD_SELF(self, self->p_nr);
993 # endif
994 #else
995 __pthread_manager_sighandler(sig);
996 return;
997 #endif
999 if (__builtin_expect (__pthread_exit_requested, 0)) {
1000 /* Main thread should accumulate times for thread manager and its
1001 children, so that timings for main thread account for all threads. */
1002 if (self == __pthread_main_thread) {
1003 #ifdef USE_TLS
1004 waitpid(manager_thread->p_pid, NULL, __WCLONE);
1005 #else
1006 waitpid(__pthread_manager_thread.p_pid, NULL, __WCLONE);
1007 #endif
1009 _exit(__pthread_exit_code);
1011 if (__builtin_expect (THREAD_GETMEM(self, p_canceled), 0)
1012 && THREAD_GETMEM(self, p_cancelstate) == PTHREAD_CANCEL_ENABLE) {
1013 if (THREAD_GETMEM(self, p_canceltype) == PTHREAD_CANCEL_ASYNCHRONOUS)
1014 __pthread_do_exit(PTHREAD_CANCELED, CURRENT_STACK_FRAME);
1015 jmpbuf = THREAD_GETMEM(self, p_cancel_jmp);
1016 if (jmpbuf != NULL) {
1017 THREAD_SETMEM(self, p_cancel_jmp, NULL);
1018 siglongjmp(*jmpbuf, 1);
1023 /* Handler for the DEBUG signal.
1024 The debugging strategy is as follows:
1025 On reception of a REQ_DEBUG request (sent by new threads created to
1026 the thread manager under debugging mode), the thread manager throws
1027 __pthread_sig_debug to itself. The debugger (if active) intercepts
1028 this signal, takes into account new threads and continue execution
1029 of the thread manager by propagating the signal because it doesn't
1030 know what it is specifically done for. In the current implementation,
1031 the thread manager simply discards it. */
1033 static void pthread_handle_sigdebug(int sig)
1035 /* Nothing */
1038 /* Reset the state of the thread machinery after a fork().
1039 Close the pipe used for requests and set the main thread to the forked
1040 thread.
1041 Notice that we can't free the stack segments, as the forked thread
1042 may hold pointers into them. */
1044 void __pthread_reset_main_thread(void)
1046 pthread_descr self = thread_self();
1047 struct rlimit limit;
1049 if (__pthread_manager_request != -1) {
1050 /* Free the thread manager stack */
1051 free(__pthread_manager_thread_bos);
1052 __pthread_manager_thread_bos = __pthread_manager_thread_tos = NULL;
1053 /* Close the two ends of the pipe */
1054 __libc_close(__pthread_manager_request);
1055 __libc_close(__pthread_manager_reader);
1056 __pthread_manager_request = __pthread_manager_reader = -1;
1059 /* Update the pid of the main thread */
1060 THREAD_SETMEM(self, p_pid, __getpid());
1061 /* Make the forked thread the main thread */
1062 __pthread_main_thread = self;
1063 THREAD_SETMEM(self, p_nextlive, self);
1064 THREAD_SETMEM(self, p_prevlive, self);
1065 #if !(USE_TLS && HAVE___THREAD)
1066 /* Now this thread modifies the global variables. */
1067 THREAD_SETMEM(self, p_errnop, &_errno);
1068 THREAD_SETMEM(self, p_h_errnop, &_h_errno);
1069 THREAD_SETMEM(self, p_resp, &_res);
1070 #endif
1072 if (getrlimit (RLIMIT_STACK, &limit) == 0
1073 && limit.rlim_cur != limit.rlim_max) {
1074 limit.rlim_cur = limit.rlim_max;
1075 setrlimit(RLIMIT_STACK, &limit);
1079 /* Process-wide exec() request */
1081 void __pthread_kill_other_threads_np(void)
1083 struct sigaction sa;
1084 /* Terminate all other threads and thread manager */
1085 pthread_onexit_process(0, NULL);
1086 /* Make current thread the main thread in case the calling thread
1087 changes its mind, does not exec(), and creates new threads instead. */
1088 __pthread_reset_main_thread();
1090 /* Reset the signal handlers behaviour for the signals the
1091 implementation uses since this would be passed to the new
1092 process. */
1093 sigemptyset(&sa.sa_mask);
1094 sa.sa_flags = 0;
1095 sa.sa_handler = SIG_DFL;
1096 __libc_sigaction(__pthread_sig_restart, &sa, NULL);
1097 __libc_sigaction(__pthread_sig_cancel, &sa, NULL);
1098 if (__pthread_sig_debug > 0)
1099 __libc_sigaction(__pthread_sig_debug, &sa, NULL);
1101 weak_alias (__pthread_kill_other_threads_np, pthread_kill_other_threads_np)
1103 /* Concurrency symbol level. */
1104 static int current_level;
1106 int __pthread_setconcurrency(int level)
1108 /* We don't do anything unless we have found a useful interpretation. */
1109 current_level = level;
1110 return 0;
1112 weak_alias (__pthread_setconcurrency, pthread_setconcurrency)
1114 int __pthread_getconcurrency(void)
1116 return current_level;
1118 weak_alias (__pthread_getconcurrency, pthread_getconcurrency)
1120 /* Primitives for controlling thread execution */
1122 void __pthread_wait_for_restart_signal(pthread_descr self)
1124 sigset_t mask;
1126 sigprocmask(SIG_SETMASK, NULL, &mask); /* Get current signal mask */
1127 sigdelset(&mask, __pthread_sig_restart); /* Unblock the restart signal */
1128 THREAD_SETMEM(self, p_signal, 0);
1129 do {
1130 sigsuspend(&mask); /* Wait for signal */
1131 } while (THREAD_GETMEM(self, p_signal) !=__pthread_sig_restart);
1133 READ_MEMORY_BARRIER(); /* See comment in __pthread_restart_new */
1136 #if !__ASSUME_REALTIME_SIGNALS
1137 /* The _old variants are for 2.0 and early 2.1 kernels which don't have RT
1138 signals.
1139 On these kernels, we use SIGUSR1 and SIGUSR2 for restart and cancellation.
1140 Since the restart signal does not queue, we use an atomic counter to create
1141 queuing semantics. This is needed to resolve a rare race condition in
1142 pthread_cond_timedwait_relative. */
1144 void __pthread_restart_old(pthread_descr th)
1146 if (atomic_increment(&th->p_resume_count) == -1)
1147 kill(th->p_pid, __pthread_sig_restart);
1150 void __pthread_suspend_old(pthread_descr self)
1152 if (atomic_decrement(&self->p_resume_count) <= 0)
1153 __pthread_wait_for_restart_signal(self);
1157 __pthread_timedsuspend_old(pthread_descr self, const struct timespec *abstime)
1159 sigset_t unblock, initial_mask;
1160 int was_signalled = 0;
1161 sigjmp_buf jmpbuf;
1163 if (atomic_decrement(&self->p_resume_count) == 0) {
1164 /* Set up a longjmp handler for the restart signal, unblock
1165 the signal and sleep. */
1167 if (sigsetjmp(jmpbuf, 1) == 0) {
1168 THREAD_SETMEM(self, p_signal_jmp, &jmpbuf);
1169 THREAD_SETMEM(self, p_signal, 0);
1170 /* Unblock the restart signal */
1171 sigemptyset(&unblock);
1172 sigaddset(&unblock, __pthread_sig_restart);
1173 sigprocmask(SIG_UNBLOCK, &unblock, &initial_mask);
1175 while (1) {
1176 struct timeval now;
1177 struct timespec reltime;
1179 /* Compute a time offset relative to now. */
1180 __gettimeofday (&now, NULL);
1181 reltime.tv_nsec = abstime->tv_nsec - now.tv_usec * 1000;
1182 reltime.tv_sec = abstime->tv_sec - now.tv_sec;
1183 if (reltime.tv_nsec < 0) {
1184 reltime.tv_nsec += 1000000000;
1185 reltime.tv_sec -= 1;
1188 /* Sleep for the required duration. If woken by a signal,
1189 resume waiting as required by Single Unix Specification. */
1190 if (reltime.tv_sec < 0 || __libc_nanosleep(&reltime, NULL) == 0)
1191 break;
1194 /* Block the restart signal again */
1195 sigprocmask(SIG_SETMASK, &initial_mask, NULL);
1196 was_signalled = 0;
1197 } else {
1198 was_signalled = 1;
1200 THREAD_SETMEM(self, p_signal_jmp, NULL);
1203 /* Now was_signalled is true if we exited the above code
1204 due to the delivery of a restart signal. In that case,
1205 we know we have been dequeued and resumed and that the
1206 resume count is balanced. Otherwise, there are some
1207 cases to consider. First, try to bump up the resume count
1208 back to zero. If it goes to 1, it means restart() was
1209 invoked on this thread. The signal must be consumed
1210 and the count bumped down and everything is cool. We
1211 can return a 1 to the caller.
1212 Otherwise, no restart was delivered yet, so a potential
1213 race exists; we return a 0 to the caller which must deal
1214 with this race in an appropriate way; for example by
1215 atomically removing the thread from consideration for a
1216 wakeup---if such a thing fails, it means a restart is
1217 being delivered. */
1219 if (!was_signalled) {
1220 if (atomic_increment(&self->p_resume_count) != -1) {
1221 __pthread_wait_for_restart_signal(self);
1222 atomic_decrement(&self->p_resume_count); /* should be zero now! */
1223 /* woke spontaneously and consumed restart signal */
1224 return 1;
1226 /* woke spontaneously but did not consume restart---caller must resolve */
1227 return 0;
1229 /* woken due to restart signal */
1230 return 1;
1232 #endif /* __ASSUME_REALTIME_SIGNALS */
1234 void __pthread_restart_new(pthread_descr th)
1236 /* The barrier is proabably not needed, in which case it still documents
1237 our assumptions. The intent is to commit previous writes to shared
1238 memory so the woken thread will have a consistent view. Complementary
1239 read barriers are present to the suspend functions. */
1240 WRITE_MEMORY_BARRIER();
1241 kill(th->p_pid, __pthread_sig_restart);
1244 /* There is no __pthread_suspend_new because it would just
1245 be a wasteful wrapper for __pthread_wait_for_restart_signal */
1248 __pthread_timedsuspend_new(pthread_descr self, const struct timespec *abstime)
1250 sigset_t unblock, initial_mask;
1251 int was_signalled = 0;
1252 sigjmp_buf jmpbuf;
1254 if (sigsetjmp(jmpbuf, 1) == 0) {
1255 THREAD_SETMEM(self, p_signal_jmp, &jmpbuf);
1256 THREAD_SETMEM(self, p_signal, 0);
1257 /* Unblock the restart signal */
1258 sigemptyset(&unblock);
1259 sigaddset(&unblock, __pthread_sig_restart);
1260 sigprocmask(SIG_UNBLOCK, &unblock, &initial_mask);
1262 while (1) {
1263 struct timeval now;
1264 struct timespec reltime;
1266 /* Compute a time offset relative to now. */
1267 __gettimeofday (&now, NULL);
1268 reltime.tv_nsec = abstime->tv_nsec - now.tv_usec * 1000;
1269 reltime.tv_sec = abstime->tv_sec - now.tv_sec;
1270 if (reltime.tv_nsec < 0) {
1271 reltime.tv_nsec += 1000000000;
1272 reltime.tv_sec -= 1;
1275 /* Sleep for the required duration. If woken by a signal,
1276 resume waiting as required by Single Unix Specification. */
1277 if (reltime.tv_sec < 0 || __libc_nanosleep(&reltime, NULL) == 0)
1278 break;
1281 /* Block the restart signal again */
1282 sigprocmask(SIG_SETMASK, &initial_mask, NULL);
1283 was_signalled = 0;
1284 } else {
1285 was_signalled = 1;
1287 THREAD_SETMEM(self, p_signal_jmp, NULL);
1289 /* Now was_signalled is true if we exited the above code
1290 due to the delivery of a restart signal. In that case,
1291 everything is cool. We have been removed from whatever
1292 we were waiting on by the other thread, and consumed its signal.
1294 Otherwise we this thread woke up spontaneously, or due to a signal other
1295 than restart. This is an ambiguous case that must be resolved by
1296 the caller; the thread is still eligible for a restart wakeup
1297 so there is a race. */
1299 READ_MEMORY_BARRIER(); /* See comment in __pthread_restart_new */
1300 return was_signalled;
1304 /* Debugging aid */
1306 #ifdef DEBUG
1307 #include <stdarg.h>
1309 void __pthread_message(const char * fmt, ...)
1311 char buffer[1024];
1312 va_list args;
1313 sprintf(buffer, "%05d : ", __getpid());
1314 va_start(args, fmt);
1315 vsnprintf(buffer + 8, sizeof(buffer) - 8, fmt, args);
1316 va_end(args);
1317 TEMP_FAILURE_RETRY(__libc_write(2, buffer, strlen(buffer)));
1320 #endif
1323 #ifndef SHARED
1324 /* We need a hook to force the cancelation wrappers and file locking
1325 to be linked in when static libpthread is used. */
1326 extern const int __pthread_provide_wrappers;
1327 static const int *const __pthread_require_wrappers =
1328 &__pthread_provide_wrappers;
1329 extern const int __pthread_provide_lockfile;
1330 static const int *const __pthread_require_lockfile =
1331 &__pthread_provide_lockfile;
1332 #endif