Updated to fedora-glibc-20050627T0850
[glibc.git] / linuxthreads / pthread.c
blob39863f2b5456a2e10a689819f96fd05221dd657f
2 /* Linuxthreads - a simple clone()-based implementation of Posix */
3 /* threads for Linux. */
4 /* Copyright (C) 1996 Xavier Leroy (Xavier.Leroy@inria.fr) */
5 /* */
6 /* This program is free software; you can redistribute it and/or */
7 /* modify it under the terms of the GNU Library General Public License */
8 /* as published by the Free Software Foundation; either version 2 */
9 /* of the License, or (at your option) any later version. */
10 /* */
11 /* This program is distributed in the hope that it will be useful, */
12 /* but WITHOUT ANY WARRANTY; without even the implied warranty of */
13 /* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the */
14 /* GNU Library General Public License for more details. */
16 /* Thread creation, initialization, and basic low-level routines */
18 #include <errno.h>
19 #include <stddef.h>
20 #include <stdio.h>
21 #include <stdlib.h>
22 #include <string.h>
23 #include <unistd.h>
24 #include <fcntl.h>
25 #include <sys/wait.h>
26 #include <sys/resource.h>
27 #include <sys/time.h>
28 #include <shlib-compat.h>
29 #include "pthread.h"
30 #include "internals.h"
31 #include "spinlock.h"
32 #include "restart.h"
33 #include "smp.h"
34 #include <ldsodefs.h>
35 #include <tls.h>
36 #include <version.h>
37 #include <not-cancel.h>
39 /* Sanity check. */
40 #if !defined __SIGRTMIN || (__SIGRTMAX - __SIGRTMIN) < 3
41 # error "This must not happen"
42 #endif
44 #if !(USE_TLS && HAVE___THREAD)
45 /* These variables are used by the setup code. */
46 extern int _errno;
47 extern int _h_errno;
49 /* We need the global/static resolver state here. */
50 # include <resolv.h>
51 # undef _res
53 extern struct __res_state _res;
54 #endif
56 #ifdef USE_TLS
58 /* We need only a few variables. */
59 #define manager_thread __pthread_manager_threadp
60 pthread_descr __pthread_manager_threadp attribute_hidden;
62 #else
64 /* Descriptor of the initial thread */
66 struct _pthread_descr_struct __pthread_initial_thread = {
67 .p_header.data.self = &__pthread_initial_thread,
68 .p_nextlive = &__pthread_initial_thread,
69 .p_prevlive = &__pthread_initial_thread,
70 .p_tid = PTHREAD_THREADS_MAX,
71 .p_lock = &__pthread_handles[0].h_lock,
72 .p_start_args = PTHREAD_START_ARGS_INITIALIZER(NULL),
73 #if !(USE_TLS && HAVE___THREAD)
74 .p_errnop = &_errno,
75 .p_h_errnop = &_h_errno,
76 .p_resp = &_res,
77 #endif
78 .p_userstack = 1,
79 .p_resume_count = __ATOMIC_INITIALIZER,
80 .p_alloca_cutoff = __MAX_ALLOCA_CUTOFF
83 /* Descriptor of the manager thread; none of this is used but the error
84 variables, the p_pid and p_priority fields,
85 and the address for identification. */
87 #define manager_thread (&__pthread_manager_thread)
88 struct _pthread_descr_struct __pthread_manager_thread = {
89 .p_header.data.self = &__pthread_manager_thread,
90 .p_header.data.multiple_threads = 1,
91 .p_lock = &__pthread_handles[1].h_lock,
92 .p_start_args = PTHREAD_START_ARGS_INITIALIZER(__pthread_manager),
93 #if !(USE_TLS && HAVE___THREAD)
94 .p_errnop = &__pthread_manager_thread.p_errno,
95 #endif
96 .p_nr = 1,
97 .p_resume_count = __ATOMIC_INITIALIZER,
98 .p_alloca_cutoff = PTHREAD_STACK_MIN / 4
100 #endif
102 /* Pointer to the main thread (the father of the thread manager thread) */
103 /* Originally, this is the initial thread, but this changes after fork() */
105 #ifdef USE_TLS
106 pthread_descr __pthread_main_thread;
107 #else
108 pthread_descr __pthread_main_thread = &__pthread_initial_thread;
109 #endif
111 /* Limit between the stack of the initial thread (above) and the
112 stacks of other threads (below). Aligned on a STACK_SIZE boundary. */
114 char *__pthread_initial_thread_bos;
116 /* File descriptor for sending requests to the thread manager. */
117 /* Initially -1, meaning that the thread manager is not running. */
119 int __pthread_manager_request = -1;
121 int __pthread_multiple_threads attribute_hidden;
123 /* Other end of the pipe for sending requests to the thread manager. */
125 int __pthread_manager_reader;
127 /* Limits of the thread manager stack */
129 char *__pthread_manager_thread_bos;
130 char *__pthread_manager_thread_tos;
132 /* For process-wide exit() */
134 int __pthread_exit_requested;
135 int __pthread_exit_code;
137 /* Maximum stack size. */
138 size_t __pthread_max_stacksize;
140 /* Nozero if the machine has more than one processor. */
141 int __pthread_smp_kernel;
144 #if !__ASSUME_REALTIME_SIGNALS
145 /* Pointers that select new or old suspend/resume functions
146 based on availability of rt signals. */
148 void (*__pthread_restart)(pthread_descr) = __pthread_restart_old;
149 void (*__pthread_suspend)(pthread_descr) = __pthread_suspend_old;
150 int (*__pthread_timedsuspend)(pthread_descr, const struct timespec *) = __pthread_timedsuspend_old;
151 #endif /* __ASSUME_REALTIME_SIGNALS */
153 /* Communicate relevant LinuxThreads constants to gdb */
155 const int __pthread_threads_max = PTHREAD_THREADS_MAX;
156 const int __pthread_sizeof_handle = sizeof(struct pthread_handle_struct);
157 const int __pthread_offsetof_descr = offsetof(struct pthread_handle_struct,
158 h_descr);
159 const int __pthread_offsetof_pid = offsetof(struct _pthread_descr_struct,
160 p_pid);
161 const int __linuxthreads_pthread_sizeof_descr
162 = sizeof(struct _pthread_descr_struct);
164 const int __linuxthreads_initial_report_events;
166 const char __linuxthreads_version[] = VERSION;
168 /* Forward declarations */
170 static void pthread_onexit_process(int retcode, void *arg);
171 #ifndef HAVE_Z_NODELETE
172 static void pthread_atexit_process(void *arg, int retcode);
173 static void pthread_atexit_retcode(void *arg, int retcode);
174 #endif
175 static void pthread_handle_sigcancel(int sig);
176 static void pthread_handle_sigrestart(int sig);
177 static void pthread_handle_sigdebug(int sig);
179 /* Signal numbers used for the communication.
180 In these variables we keep track of the used variables. If the
181 platform does not support any real-time signals we will define the
182 values to some unreasonable value which will signal failing of all
183 the functions below. */
184 int __pthread_sig_restart = __SIGRTMIN;
185 int __pthread_sig_cancel = __SIGRTMIN + 1;
186 int __pthread_sig_debug = __SIGRTMIN + 2;
188 extern int __libc_current_sigrtmin_private (void);
190 #if !__ASSUME_REALTIME_SIGNALS
191 static int rtsigs_initialized;
193 static void
194 init_rtsigs (void)
196 if (rtsigs_initialized)
197 return;
199 if (__libc_current_sigrtmin_private () == -1)
201 __pthread_sig_restart = SIGUSR1;
202 __pthread_sig_cancel = SIGUSR2;
203 __pthread_sig_debug = 0;
205 else
207 __pthread_restart = __pthread_restart_new;
208 __pthread_suspend = __pthread_wait_for_restart_signal;
209 __pthread_timedsuspend = __pthread_timedsuspend_new;
212 rtsigs_initialized = 1;
214 #endif
217 /* Initialize the pthread library.
218 Initialization is split in two functions:
219 - a constructor function that blocks the __pthread_sig_restart signal
220 (must do this very early, since the program could capture the signal
221 mask with e.g. sigsetjmp before creating the first thread);
222 - a regular function called from pthread_create when needed. */
224 static void pthread_initialize(void) __attribute__((constructor));
226 #ifndef HAVE_Z_NODELETE
227 extern void *__dso_handle __attribute__ ((weak));
228 #endif
231 #if defined USE_TLS && !defined SHARED
232 extern void __libc_setup_tls (size_t tcbsize, size_t tcbalign);
233 #endif
235 struct pthread_functions __pthread_functions =
237 #if !(USE_TLS && HAVE___THREAD)
238 .ptr_pthread_internal_tsd_set = __pthread_internal_tsd_set,
239 .ptr_pthread_internal_tsd_get = __pthread_internal_tsd_get,
240 .ptr_pthread_internal_tsd_address = __pthread_internal_tsd_address,
241 #endif
242 .ptr_pthread_fork = __pthread_fork,
243 .ptr_pthread_attr_destroy = __pthread_attr_destroy,
244 #if SHLIB_COMPAT(libpthread, GLIBC_2_0, GLIBC_2_1)
245 .ptr___pthread_attr_init_2_0 = __pthread_attr_init_2_0,
246 #endif
247 .ptr___pthread_attr_init_2_1 = __pthread_attr_init_2_1,
248 .ptr_pthread_attr_getdetachstate = __pthread_attr_getdetachstate,
249 .ptr_pthread_attr_setdetachstate = __pthread_attr_setdetachstate,
250 .ptr_pthread_attr_getinheritsched = __pthread_attr_getinheritsched,
251 .ptr_pthread_attr_setinheritsched = __pthread_attr_setinheritsched,
252 .ptr_pthread_attr_getschedparam = __pthread_attr_getschedparam,
253 .ptr_pthread_attr_setschedparam = __pthread_attr_setschedparam,
254 .ptr_pthread_attr_getschedpolicy = __pthread_attr_getschedpolicy,
255 .ptr_pthread_attr_setschedpolicy = __pthread_attr_setschedpolicy,
256 .ptr_pthread_attr_getscope = __pthread_attr_getscope,
257 .ptr_pthread_attr_setscope = __pthread_attr_setscope,
258 .ptr_pthread_condattr_destroy = __pthread_condattr_destroy,
259 .ptr_pthread_condattr_init = __pthread_condattr_init,
260 .ptr___pthread_cond_broadcast = __pthread_cond_broadcast,
261 .ptr___pthread_cond_destroy = __pthread_cond_destroy,
262 .ptr___pthread_cond_init = __pthread_cond_init,
263 .ptr___pthread_cond_signal = __pthread_cond_signal,
264 .ptr___pthread_cond_wait = __pthread_cond_wait,
265 .ptr___pthread_cond_timedwait = __pthread_cond_timedwait,
266 .ptr_pthread_equal = __pthread_equal,
267 .ptr___pthread_exit = __pthread_exit,
268 .ptr_pthread_getschedparam = __pthread_getschedparam,
269 .ptr_pthread_setschedparam = __pthread_setschedparam,
270 .ptr_pthread_mutex_destroy = __pthread_mutex_destroy,
271 .ptr_pthread_mutex_init = __pthread_mutex_init,
272 .ptr_pthread_mutex_lock = __pthread_mutex_lock,
273 .ptr_pthread_mutex_trylock = __pthread_mutex_trylock,
274 .ptr_pthread_mutex_unlock = __pthread_mutex_unlock,
275 .ptr_pthread_self = __pthread_self,
276 .ptr_pthread_setcancelstate = __pthread_setcancelstate,
277 .ptr_pthread_setcanceltype = __pthread_setcanceltype,
278 .ptr_pthread_do_exit = __pthread_do_exit,
279 .ptr_pthread_thread_self = __pthread_thread_self,
280 .ptr_pthread_cleanup_upto = __pthread_cleanup_upto,
281 .ptr_pthread_sigaction = __pthread_sigaction,
282 .ptr_pthread_sigwait = __pthread_sigwait,
283 .ptr_pthread_raise = __pthread_raise,
284 .ptr__pthread_cleanup_push = _pthread_cleanup_push,
285 .ptr__pthread_cleanup_pop = _pthread_cleanup_pop
287 #ifdef SHARED
288 # define ptr_pthread_functions &__pthread_functions
289 #else
290 # define ptr_pthread_functions NULL
291 #endif
293 static int *__libc_multiple_threads_ptr;
295 /* Do some minimal initialization which has to be done during the
296 startup of the C library. */
297 void
298 __pthread_initialize_minimal(void)
300 #ifdef USE_TLS
301 pthread_descr self;
303 /* First of all init __pthread_handles[0] and [1] if needed. */
304 # if __LT_SPINLOCK_INIT != 0
305 __pthread_handles[0].h_lock = __LOCK_INITIALIZER;
306 __pthread_handles[1].h_lock = __LOCK_INITIALIZER;
307 # endif
308 # ifndef SHARED
309 /* Unlike in the dynamically linked case the dynamic linker has not
310 taken care of initializing the TLS data structures. */
311 __libc_setup_tls (TLS_TCB_SIZE, TLS_TCB_ALIGN);
312 # elif !USE___THREAD
313 if (__builtin_expect (GL(dl_tls_dtv_slotinfo_list) == NULL, 0))
315 tcbhead_t *tcbp;
317 /* There is no actual TLS being used, so the thread register
318 was not initialized in the dynamic linker. */
320 /* We need to install special hooks so that the malloc and memalign
321 calls in _dl_tls_setup and _dl_allocate_tls won't cause full
322 malloc initialization that will try to set up its thread state. */
324 extern void __libc_malloc_pthread_startup (bool first_time);
325 __libc_malloc_pthread_startup (true);
327 if (__builtin_expect (_dl_tls_setup (), 0)
328 || __builtin_expect ((tcbp = _dl_allocate_tls (NULL)) == NULL, 0))
330 static const char msg[] = "\
331 cannot allocate TLS data structures for initial thread\n";
332 TEMP_FAILURE_RETRY (write_not_cancel (STDERR_FILENO,
333 msg, sizeof msg - 1));
334 abort ();
336 const char *lossage = TLS_INIT_TP (tcbp, 0);
337 if (__builtin_expect (lossage != NULL, 0))
339 static const char msg[] = "cannot set up thread-local storage: ";
340 const char nl = '\n';
341 TEMP_FAILURE_RETRY (write_not_cancel (STDERR_FILENO,
342 msg, sizeof msg - 1));
343 TEMP_FAILURE_RETRY (write_not_cancel (STDERR_FILENO,
344 lossage, strlen (lossage)));
345 TEMP_FAILURE_RETRY (write_not_cancel (STDERR_FILENO, &nl, 1));
348 /* Though it was allocated with libc's malloc, that was done without
349 the user's __malloc_hook installed. A later realloc that uses
350 the hooks might not work with that block from the plain malloc.
351 So we record this block as unfreeable just as the dynamic linker
352 does when it allocates the DTV before the libc malloc exists. */
353 GL(dl_initial_dtv) = GET_DTV (tcbp);
355 __libc_malloc_pthread_startup (false);
357 # endif
359 self = THREAD_SELF;
361 /* The memory for the thread descriptor was allocated elsewhere as
362 part of the TLS allocation. We have to initialize the data
363 structure by hand. This initialization must mirror the struct
364 definition above. */
365 self->p_nextlive = self->p_prevlive = self;
366 self->p_tid = PTHREAD_THREADS_MAX;
367 self->p_lock = &__pthread_handles[0].h_lock;
368 # ifndef HAVE___THREAD
369 self->p_errnop = &_errno;
370 self->p_h_errnop = &_h_errno;
371 # endif
372 /* self->p_start_args need not be initialized, it's all zero. */
373 self->p_userstack = 1;
374 # if __LT_SPINLOCK_INIT != 0
375 self->p_resume_count = (struct pthread_atomic) __ATOMIC_INITIALIZER;
376 # endif
377 self->p_alloca_cutoff = __MAX_ALLOCA_CUTOFF;
379 /* Another variable which points to the thread descriptor. */
380 __pthread_main_thread = self;
382 /* And fill in the pointer the the thread __pthread_handles array. */
383 __pthread_handles[0].h_descr = self;
385 #else /* USE_TLS */
387 /* First of all init __pthread_handles[0] and [1]. */
388 # if __LT_SPINLOCK_INIT != 0
389 __pthread_handles[0].h_lock = __LOCK_INITIALIZER;
390 __pthread_handles[1].h_lock = __LOCK_INITIALIZER;
391 # endif
392 __pthread_handles[0].h_descr = &__pthread_initial_thread;
393 __pthread_handles[1].h_descr = &__pthread_manager_thread;
395 /* If we have special thread_self processing, initialize that for the
396 main thread now. */
397 # ifdef INIT_THREAD_SELF
398 INIT_THREAD_SELF(&__pthread_initial_thread, 0);
399 # endif
400 #endif
402 #if HP_TIMING_AVAIL
403 # ifdef USE_TLS
404 self->p_cpuclock_offset = GL(dl_cpuclock_offset);
405 # else
406 __pthread_initial_thread.p_cpuclock_offset = GL(dl_cpuclock_offset);
407 # endif
408 #endif
410 __libc_multiple_threads_ptr = __libc_pthread_init (ptr_pthread_functions);
414 void
415 __pthread_init_max_stacksize(void)
417 struct rlimit limit;
418 size_t max_stack;
420 getrlimit(RLIMIT_STACK, &limit);
421 #ifdef FLOATING_STACKS
422 if (limit.rlim_cur == RLIM_INFINITY)
423 limit.rlim_cur = ARCH_STACK_MAX_SIZE;
424 # ifdef NEED_SEPARATE_REGISTER_STACK
425 max_stack = limit.rlim_cur / 2;
426 # else
427 max_stack = limit.rlim_cur;
428 # endif
429 #else
430 /* Play with the stack size limit to make sure that no stack ever grows
431 beyond STACK_SIZE minus one page (to act as a guard page). */
432 # ifdef NEED_SEPARATE_REGISTER_STACK
433 /* STACK_SIZE bytes hold both the main stack and register backing
434 store. The rlimit value applies to each individually. */
435 max_stack = STACK_SIZE/2 - __getpagesize ();
436 # else
437 max_stack = STACK_SIZE - __getpagesize();
438 # endif
439 if (limit.rlim_cur > max_stack) {
440 limit.rlim_cur = max_stack;
441 setrlimit(RLIMIT_STACK, &limit);
443 #endif
444 __pthread_max_stacksize = max_stack;
445 if (max_stack / 4 < __MAX_ALLOCA_CUTOFF)
447 #ifdef USE_TLS
448 pthread_descr self = THREAD_SELF;
449 self->p_alloca_cutoff = max_stack / 4;
450 #else
451 __pthread_initial_thread.p_alloca_cutoff = max_stack / 4;
452 #endif
456 #ifdef SHARED
457 # if USE___THREAD
458 /* When using __thread for this, we do it in libc so as not
459 to give libpthread its own TLS segment just for this. */
460 extern void **__libc_dl_error_tsd (void) __attribute__ ((const));
461 # else
462 static void ** __attribute__ ((const))
463 __libc_dl_error_tsd (void)
465 return &thread_self ()->p_libc_specific[_LIBC_TSD_KEY_DL_ERROR];
467 # endif
468 #endif
470 #ifdef USE_TLS
471 static inline void __attribute__((always_inline))
472 init_one_static_tls (pthread_descr descr, struct link_map *map)
474 # if TLS_TCB_AT_TP
475 dtv_t *dtv = GET_DTV (descr);
476 void *dest = (char *) descr - map->l_tls_offset;
477 # elif TLS_DTV_AT_TP
478 dtv_t *dtv = GET_DTV ((pthread_descr) ((char *) descr + TLS_PRE_TCB_SIZE));
479 void *dest = (char *) descr + map->l_tls_offset + TLS_PRE_TCB_SIZE;
480 # else
481 # error "Either TLS_TCB_AT_TP or TLS_DTV_AT_TP must be defined"
482 # endif
484 /* Fill in the DTV slot so that a later LD/GD access will find it. */
485 dtv[map->l_tls_modid].pointer.val = dest;
486 dtv[map->l_tls_modid].pointer.is_static = true;
488 /* Initialize the memory. */
489 memset (__mempcpy (dest, map->l_tls_initimage, map->l_tls_initimage_size),
490 '\0', map->l_tls_blocksize - map->l_tls_initimage_size);
493 static void
494 __pthread_init_static_tls (struct link_map *map)
496 size_t i;
498 for (i = 0; i < PTHREAD_THREADS_MAX; ++i)
499 if (__pthread_handles[i].h_descr != NULL && i != 1)
501 __pthread_lock (&__pthread_handles[i].h_lock, NULL);
502 if (__pthread_handles[i].h_descr != NULL)
503 init_one_static_tls (__pthread_handles[i].h_descr, map);
504 __pthread_unlock (&__pthread_handles[i].h_lock);
507 #endif
509 static void pthread_initialize(void)
511 struct sigaction sa;
512 sigset_t mask;
514 /* If already done (e.g. by a constructor called earlier!), bail out */
515 if (__pthread_initial_thread_bos != NULL) return;
516 #ifdef TEST_FOR_COMPARE_AND_SWAP
517 /* Test if compare-and-swap is available */
518 __pthread_has_cas = compare_and_swap_is_available();
519 #endif
520 #ifdef FLOATING_STACKS
521 /* We don't need to know the bottom of the stack. Give the pointer some
522 value to signal that initialization happened. */
523 __pthread_initial_thread_bos = (void *) -1l;
524 #else
525 /* Determine stack size limits . */
526 __pthread_init_max_stacksize ();
527 # ifdef _STACK_GROWS_UP
528 /* The initial thread already has all the stack it needs */
529 __pthread_initial_thread_bos = (char *)
530 ((long)CURRENT_STACK_FRAME &~ (STACK_SIZE - 1));
531 # else
532 /* For the initial stack, reserve at least STACK_SIZE bytes of stack
533 below the current stack address, and align that on a
534 STACK_SIZE boundary. */
535 __pthread_initial_thread_bos =
536 (char *)(((long)CURRENT_STACK_FRAME - 2 * STACK_SIZE) & ~(STACK_SIZE - 1));
537 # endif
538 #endif
539 #ifdef USE_TLS
540 /* Update the descriptor for the initial thread. */
541 THREAD_SETMEM (((pthread_descr) NULL), p_pid, __getpid());
542 # ifndef HAVE___THREAD
543 /* Likewise for the resolver state _res. */
544 THREAD_SETMEM (((pthread_descr) NULL), p_resp, &_res);
545 # endif
546 #else
547 /* Update the descriptor for the initial thread. */
548 __pthread_initial_thread.p_pid = __getpid();
549 /* Likewise for the resolver state _res. */
550 __pthread_initial_thread.p_resp = &_res;
551 #endif
552 #if !__ASSUME_REALTIME_SIGNALS
553 /* Initialize real-time signals. */
554 init_rtsigs ();
555 #endif
556 /* Setup signal handlers for the initial thread.
557 Since signal handlers are shared between threads, these settings
558 will be inherited by all other threads. */
559 sa.sa_handler = pthread_handle_sigrestart;
560 sigemptyset(&sa.sa_mask);
561 sa.sa_flags = 0;
562 __libc_sigaction(__pthread_sig_restart, &sa, NULL);
563 sa.sa_handler = pthread_handle_sigcancel;
564 sigaddset(&sa.sa_mask, __pthread_sig_restart);
565 // sa.sa_flags = 0;
566 __libc_sigaction(__pthread_sig_cancel, &sa, NULL);
567 if (__pthread_sig_debug > 0) {
568 sa.sa_handler = pthread_handle_sigdebug;
569 sigemptyset(&sa.sa_mask);
570 // sa.sa_flags = 0;
571 __libc_sigaction(__pthread_sig_debug, &sa, NULL);
573 /* Initially, block __pthread_sig_restart. Will be unblocked on demand. */
574 sigemptyset(&mask);
575 sigaddset(&mask, __pthread_sig_restart);
576 sigprocmask(SIG_BLOCK, &mask, NULL);
577 /* And unblock __pthread_sig_cancel if it has been blocked. */
578 sigdelset(&mask, __pthread_sig_restart);
579 sigaddset(&mask, __pthread_sig_cancel);
580 sigprocmask(SIG_UNBLOCK, &mask, NULL);
581 /* Register an exit function to kill all other threads. */
582 /* Do it early so that user-registered atexit functions are called
583 before pthread_*exit_process. */
584 #ifndef HAVE_Z_NODELETE
585 if (__builtin_expect (&__dso_handle != NULL, 1))
586 __cxa_atexit ((void (*) (void *)) pthread_atexit_process, NULL,
587 __dso_handle);
588 else
589 #endif
590 __on_exit (pthread_onexit_process, NULL);
591 /* How many processors. */
592 __pthread_smp_kernel = is_smp_system ();
594 #ifdef SHARED
595 /* Transfer the old value from the dynamic linker's internal location. */
596 *__libc_dl_error_tsd () = *(*GL(dl_error_catch_tsd)) ();
597 GL(dl_error_catch_tsd) = &__libc_dl_error_tsd;
599 /* Make __rtld_lock_{,un}lock_recursive use pthread_mutex_{,un}lock,
600 keep the lock count from the ld.so implementation. */
601 GL(dl_rtld_lock_recursive) = (void *) __pthread_mutex_lock;
602 GL(dl_rtld_unlock_recursive) = (void *) __pthread_mutex_unlock;
603 unsigned int rtld_lock_count = GL(dl_load_lock).mutex.__m_count;
604 GL(dl_load_lock).mutex.__m_count = 0;
605 while (rtld_lock_count-- > 0)
606 __pthread_mutex_lock (&GL(dl_load_lock).mutex);
607 #endif
609 #ifdef USE_TLS
610 GL(dl_init_static_tls) = &__pthread_init_static_tls;
611 #endif
614 void __pthread_initialize(void)
616 pthread_initialize();
619 int __pthread_initialize_manager(void)
621 int manager_pipe[2];
622 int pid;
623 struct pthread_request request;
624 int report_events;
625 pthread_descr mgr;
626 #ifdef USE_TLS
627 tcbhead_t *tcbp;
628 #endif
630 __pthread_multiple_threads = 1;
631 #if TLS_MULTIPLE_THREADS_IN_TCB || !defined USE_TLS || !TLS_DTV_AT_TP
632 __pthread_main_thread->p_multiple_threads = 1;
633 #endif
634 *__libc_multiple_threads_ptr = 1;
636 #ifndef HAVE_Z_NODELETE
637 if (__builtin_expect (&__dso_handle != NULL, 1))
638 __cxa_atexit ((void (*) (void *)) pthread_atexit_retcode, NULL,
639 __dso_handle);
640 #endif
642 if (__pthread_max_stacksize == 0)
643 __pthread_init_max_stacksize ();
644 /* If basic initialization not done yet (e.g. we're called from a
645 constructor run before our constructor), do it now */
646 if (__pthread_initial_thread_bos == NULL) pthread_initialize();
647 /* Setup stack for thread manager */
648 __pthread_manager_thread_bos = malloc(THREAD_MANAGER_STACK_SIZE);
649 if (__pthread_manager_thread_bos == NULL) return -1;
650 __pthread_manager_thread_tos =
651 __pthread_manager_thread_bos + THREAD_MANAGER_STACK_SIZE;
652 /* Setup pipe to communicate with thread manager */
653 if (pipe(manager_pipe) == -1) {
654 free(__pthread_manager_thread_bos);
655 return -1;
658 #ifdef USE_TLS
659 /* Allocate memory for the thread descriptor and the dtv. */
660 tcbp = _dl_allocate_tls (NULL);
661 if (tcbp == NULL) {
662 free(__pthread_manager_thread_bos);
663 close_not_cancel(manager_pipe[0]);
664 close_not_cancel(manager_pipe[1]);
665 return -1;
668 # if TLS_TCB_AT_TP
669 mgr = (pthread_descr) tcbp;
670 # elif TLS_DTV_AT_TP
671 /* pthread_descr is located right below tcbhead_t which _dl_allocate_tls
672 returns. */
673 mgr = (pthread_descr) ((char *) tcbp - TLS_PRE_TCB_SIZE);
674 # endif
675 __pthread_handles[1].h_descr = manager_thread = mgr;
677 /* Initialize the descriptor. */
678 #if !defined USE_TLS || !TLS_DTV_AT_TP
679 mgr->p_header.data.tcb = tcbp;
680 mgr->p_header.data.self = mgr;
681 mgr->p_header.data.multiple_threads = 1;
682 #elif TLS_MULTIPLE_THREADS_IN_TCB
683 mgr->p_multiple_threads = 1;
684 #endif
685 mgr->p_lock = &__pthread_handles[1].h_lock;
686 # ifndef HAVE___THREAD
687 mgr->p_errnop = &mgr->p_errno;
688 # endif
689 mgr->p_start_args = (struct pthread_start_args) PTHREAD_START_ARGS_INITIALIZER(__pthread_manager);
690 mgr->p_nr = 1;
691 # if __LT_SPINLOCK_INIT != 0
692 self->p_resume_count = (struct pthread_atomic) __ATOMIC_INITIALIZER;
693 # endif
694 mgr->p_alloca_cutoff = PTHREAD_STACK_MIN / 4;
695 #else
696 mgr = &__pthread_manager_thread;
697 #endif
699 __pthread_manager_request = manager_pipe[1]; /* writing end */
700 __pthread_manager_reader = manager_pipe[0]; /* reading end */
702 /* Start the thread manager */
703 pid = 0;
704 #ifdef USE_TLS
705 if (__linuxthreads_initial_report_events != 0)
706 THREAD_SETMEM (((pthread_descr) NULL), p_report_events,
707 __linuxthreads_initial_report_events);
708 report_events = THREAD_GETMEM (((pthread_descr) NULL), p_report_events);
709 #else
710 if (__linuxthreads_initial_report_events != 0)
711 __pthread_initial_thread.p_report_events
712 = __linuxthreads_initial_report_events;
713 report_events = __pthread_initial_thread.p_report_events;
714 #endif
715 if (__builtin_expect (report_events, 0))
717 /* It's a bit more complicated. We have to report the creation of
718 the manager thread. */
719 int idx = __td_eventword (TD_CREATE);
720 uint32_t mask = __td_eventmask (TD_CREATE);
721 uint32_t event_bits;
723 #ifdef USE_TLS
724 event_bits = THREAD_GETMEM_NC (((pthread_descr) NULL),
725 p_eventbuf.eventmask.event_bits[idx]);
726 #else
727 event_bits = __pthread_initial_thread.p_eventbuf.eventmask.event_bits[idx];
728 #endif
730 if ((mask & (__pthread_threads_events.event_bits[idx] | event_bits))
731 != 0)
733 __pthread_lock(mgr->p_lock, NULL);
735 #ifdef NEED_SEPARATE_REGISTER_STACK
736 pid = __clone2(__pthread_manager_event,
737 (void **) __pthread_manager_thread_bos,
738 THREAD_MANAGER_STACK_SIZE,
739 CLONE_VM | CLONE_FS | CLONE_FILES | CLONE_SIGHAND,
740 mgr);
741 #elif _STACK_GROWS_UP
742 pid = __clone(__pthread_manager_event,
743 (void **) __pthread_manager_thread_bos,
744 CLONE_VM | CLONE_FS | CLONE_FILES | CLONE_SIGHAND,
745 mgr);
746 #else
747 pid = __clone(__pthread_manager_event,
748 (void **) __pthread_manager_thread_tos,
749 CLONE_VM | CLONE_FS | CLONE_FILES | CLONE_SIGHAND,
750 mgr);
751 #endif
753 if (pid != -1)
755 /* Now fill in the information about the new thread in
756 the newly created thread's data structure. We cannot let
757 the new thread do this since we don't know whether it was
758 already scheduled when we send the event. */
759 mgr->p_eventbuf.eventdata = mgr;
760 mgr->p_eventbuf.eventnum = TD_CREATE;
761 __pthread_last_event = mgr;
762 mgr->p_tid = 2* PTHREAD_THREADS_MAX + 1;
763 mgr->p_pid = pid;
765 /* Now call the function which signals the event. */
766 __linuxthreads_create_event ();
769 /* Now restart the thread. */
770 __pthread_unlock(mgr->p_lock);
774 if (__builtin_expect (pid, 0) == 0)
776 #ifdef NEED_SEPARATE_REGISTER_STACK
777 pid = __clone2(__pthread_manager, (void **) __pthread_manager_thread_bos,
778 THREAD_MANAGER_STACK_SIZE,
779 CLONE_VM | CLONE_FS | CLONE_FILES | CLONE_SIGHAND, mgr);
780 #elif _STACK_GROWS_UP
781 pid = __clone(__pthread_manager, (void **) __pthread_manager_thread_bos,
782 CLONE_VM | CLONE_FS | CLONE_FILES | CLONE_SIGHAND, mgr);
783 #else
784 pid = __clone(__pthread_manager, (void **) __pthread_manager_thread_tos,
785 CLONE_VM | CLONE_FS | CLONE_FILES | CLONE_SIGHAND, mgr);
786 #endif
788 if (__builtin_expect (pid, 0) == -1) {
789 #ifdef USE_TLS
790 _dl_deallocate_tls (tcbp, true);
791 #endif
792 free(__pthread_manager_thread_bos);
793 close_not_cancel(manager_pipe[0]);
794 close_not_cancel(manager_pipe[1]);
795 return -1;
797 mgr->p_tid = 2* PTHREAD_THREADS_MAX + 1;
798 mgr->p_pid = pid;
799 /* Make gdb aware of new thread manager */
800 if (__builtin_expect (__pthread_threads_debug, 0) && __pthread_sig_debug > 0)
802 raise(__pthread_sig_debug);
803 /* We suspend ourself and gdb will wake us up when it is
804 ready to handle us. */
805 __pthread_wait_for_restart_signal(thread_self());
807 /* Synchronize debugging of the thread manager */
808 request.req_kind = REQ_DEBUG;
809 TEMP_FAILURE_RETRY(write_not_cancel(__pthread_manager_request,
810 (char *) &request, sizeof(request)));
811 return 0;
814 /* Thread creation */
816 int __pthread_create_2_1(pthread_t *thread, const pthread_attr_t *attr,
817 void * (*start_routine)(void *), void *arg)
819 pthread_descr self = thread_self();
820 struct pthread_request request;
821 int retval;
822 if (__builtin_expect (__pthread_manager_request, 0) < 0) {
823 if (__pthread_initialize_manager() < 0) return EAGAIN;
825 request.req_thread = self;
826 request.req_kind = REQ_CREATE;
827 request.req_args.create.attr = attr;
828 request.req_args.create.fn = start_routine;
829 request.req_args.create.arg = arg;
830 sigprocmask(SIG_SETMASK, (const sigset_t *) NULL,
831 &request.req_args.create.mask);
832 TEMP_FAILURE_RETRY(write_not_cancel(__pthread_manager_request,
833 (char *) &request, sizeof(request)));
834 suspend(self);
835 retval = THREAD_GETMEM(self, p_retcode);
836 if (__builtin_expect (retval, 0) == 0)
837 *thread = (pthread_t) THREAD_GETMEM(self, p_retval);
838 return retval;
841 versioned_symbol (libpthread, __pthread_create_2_1, pthread_create, GLIBC_2_1);
843 #if SHLIB_COMPAT (libpthread, GLIBC_2_0, GLIBC_2_1)
845 int __pthread_create_2_0(pthread_t *thread, const pthread_attr_t *attr,
846 void * (*start_routine)(void *), void *arg)
848 /* The ATTR attribute is not really of type `pthread_attr_t *'. It has
849 the old size and access to the new members might crash the program.
850 We convert the struct now. */
851 pthread_attr_t new_attr;
853 if (attr != NULL)
855 size_t ps = __getpagesize ();
857 memcpy (&new_attr, attr,
858 (size_t) &(((pthread_attr_t*)NULL)->__guardsize));
859 new_attr.__guardsize = ps;
860 new_attr.__stackaddr_set = 0;
861 new_attr.__stackaddr = NULL;
862 new_attr.__stacksize = STACK_SIZE - ps;
863 attr = &new_attr;
865 return __pthread_create_2_1 (thread, attr, start_routine, arg);
867 compat_symbol (libpthread, __pthread_create_2_0, pthread_create, GLIBC_2_0);
868 #endif
870 /* Simple operations on thread identifiers */
872 pthread_descr __pthread_thread_self(void)
874 return thread_self();
877 pthread_t __pthread_self(void)
879 pthread_descr self = thread_self();
880 return THREAD_GETMEM(self, p_tid);
882 strong_alias (__pthread_self, pthread_self);
884 int __pthread_equal(pthread_t thread1, pthread_t thread2)
886 return thread1 == thread2;
888 strong_alias (__pthread_equal, pthread_equal);
890 /* Helper function for thread_self in the case of user-provided stacks */
892 #ifndef THREAD_SELF
894 pthread_descr __pthread_find_self(void)
896 char * sp = CURRENT_STACK_FRAME;
897 pthread_handle h;
899 /* __pthread_handles[0] is the initial thread, __pthread_handles[1] is
900 the manager threads handled specially in thread_self(), so start at 2 */
901 h = __pthread_handles + 2;
902 # ifdef _STACK_GROWS_UP
903 while (! (sp >= (char *) h->h_descr && sp < h->h_descr->p_guardaddr)) h++;
904 # else
905 while (! (sp <= (char *) h->h_descr && sp >= h->h_bottom)) h++;
906 # endif
907 return h->h_descr;
910 #else
912 pthread_descr __pthread_self_stack(void)
914 char *sp = CURRENT_STACK_FRAME;
915 pthread_handle h;
917 if (sp >= __pthread_manager_thread_bos && sp < __pthread_manager_thread_tos)
918 return manager_thread;
919 h = __pthread_handles + 2;
920 # ifdef USE_TLS
921 # ifdef _STACK_GROWS_UP
922 while (h->h_descr == NULL
923 || ! (sp >= h->h_descr->p_stackaddr && sp < h->h_descr->p_guardaddr))
924 h++;
925 # else
926 while (h->h_descr == NULL
927 || ! (sp <= (char *) h->h_descr->p_stackaddr && sp >= h->h_bottom))
928 h++;
929 # endif
930 # else
931 # ifdef _STACK_GROWS_UP
932 while (! (sp >= (char *) h->h_descr && sp < h->h_descr->p_guardaddr))
933 h++;
934 # else
935 while (! (sp <= (char *) h->h_descr && sp >= h->h_bottom))
936 h++;
937 # endif
938 # endif
939 return h->h_descr;
942 #endif
944 /* Thread scheduling */
946 int __pthread_setschedparam(pthread_t thread, int policy,
947 const struct sched_param *param)
949 pthread_handle handle = thread_handle(thread);
950 pthread_descr th;
952 __pthread_lock(&handle->h_lock, NULL);
953 if (__builtin_expect (invalid_handle(handle, thread), 0)) {
954 __pthread_unlock(&handle->h_lock);
955 return ESRCH;
957 th = handle->h_descr;
958 if (__builtin_expect (__sched_setscheduler(th->p_pid, policy, param) == -1,
959 0)) {
960 __pthread_unlock(&handle->h_lock);
961 return errno;
963 th->p_priority = policy == SCHED_OTHER ? 0 : param->sched_priority;
964 __pthread_unlock(&handle->h_lock);
965 if (__pthread_manager_request >= 0)
966 __pthread_manager_adjust_prio(th->p_priority);
967 return 0;
969 strong_alias (__pthread_setschedparam, pthread_setschedparam);
971 int __pthread_getschedparam(pthread_t thread, int *policy,
972 struct sched_param *param)
974 pthread_handle handle = thread_handle(thread);
975 int pid, pol;
977 __pthread_lock(&handle->h_lock, NULL);
978 if (__builtin_expect (invalid_handle(handle, thread), 0)) {
979 __pthread_unlock(&handle->h_lock);
980 return ESRCH;
982 pid = handle->h_descr->p_pid;
983 __pthread_unlock(&handle->h_lock);
984 pol = __sched_getscheduler(pid);
985 if (__builtin_expect (pol, 0) == -1) return errno;
986 if (__sched_getparam(pid, param) == -1) return errno;
987 *policy = pol;
988 return 0;
990 strong_alias (__pthread_getschedparam, pthread_getschedparam);
992 int __pthread_yield (void)
994 /* For now this is equivalent with the POSIX call. */
995 return sched_yield ();
997 weak_alias (__pthread_yield, pthread_yield)
999 /* Process-wide exit() request */
1001 static void pthread_onexit_process(int retcode, void *arg)
1003 if (__builtin_expect (__pthread_manager_request, 0) >= 0) {
1004 struct pthread_request request;
1005 pthread_descr self = thread_self();
1007 request.req_thread = self;
1008 request.req_kind = REQ_PROCESS_EXIT;
1009 request.req_args.exit.code = retcode;
1010 TEMP_FAILURE_RETRY(write_not_cancel(__pthread_manager_request,
1011 (char *) &request, sizeof(request)));
1012 suspend(self);
1013 /* Main thread should accumulate times for thread manager and its
1014 children, so that timings for main thread account for all threads. */
1015 if (self == __pthread_main_thread)
1017 #ifdef USE_TLS
1018 waitpid(manager_thread->p_pid, NULL, __WCLONE);
1019 #else
1020 waitpid(__pthread_manager_thread.p_pid, NULL, __WCLONE);
1021 #endif
1022 /* Since all threads have been asynchronously terminated
1023 (possibly holding locks), free cannot be used any more.
1024 For mtrace, we'd like to print something though. */
1025 /* #ifdef USE_TLS
1026 tcbhead_t *tcbp = (tcbhead_t *) manager_thread;
1027 # if TLS_DTV_AT_TP
1028 tcbp = (tcbhead_t) ((char *) tcbp + TLS_PRE_TCB_SIZE);
1029 # endif
1030 _dl_deallocate_tls (tcbp, true);
1031 #endif
1032 free (__pthread_manager_thread_bos); */
1033 __pthread_manager_thread_bos = __pthread_manager_thread_tos = NULL;
1038 #ifndef HAVE_Z_NODELETE
1039 static int __pthread_atexit_retcode;
1041 static void pthread_atexit_process(void *arg, int retcode)
1043 pthread_onexit_process (retcode ?: __pthread_atexit_retcode, arg);
1046 static void pthread_atexit_retcode(void *arg, int retcode)
1048 __pthread_atexit_retcode = retcode;
1050 #endif
1052 /* The handler for the RESTART signal just records the signal received
1053 in the thread descriptor, and optionally performs a siglongjmp
1054 (for pthread_cond_timedwait). */
1056 static void pthread_handle_sigrestart(int sig)
1058 pthread_descr self = check_thread_self();
1059 THREAD_SETMEM(self, p_signal, sig);
1060 if (THREAD_GETMEM(self, p_signal_jmp) != NULL)
1061 siglongjmp(*THREAD_GETMEM(self, p_signal_jmp), 1);
1064 /* The handler for the CANCEL signal checks for cancellation
1065 (in asynchronous mode), for process-wide exit and exec requests.
1066 For the thread manager thread, redirect the signal to
1067 __pthread_manager_sighandler. */
1069 static void pthread_handle_sigcancel(int sig)
1071 pthread_descr self = check_thread_self();
1072 sigjmp_buf * jmpbuf;
1074 if (self == manager_thread)
1076 __pthread_manager_sighandler(sig);
1077 return;
1079 if (__builtin_expect (__pthread_exit_requested, 0)) {
1080 /* Main thread should accumulate times for thread manager and its
1081 children, so that timings for main thread account for all threads. */
1082 if (self == __pthread_main_thread) {
1083 #ifdef USE_TLS
1084 waitpid(manager_thread->p_pid, NULL, __WCLONE);
1085 #else
1086 waitpid(__pthread_manager_thread.p_pid, NULL, __WCLONE);
1087 #endif
1089 _exit(__pthread_exit_code);
1091 if (__builtin_expect (THREAD_GETMEM(self, p_canceled), 0)
1092 && THREAD_GETMEM(self, p_cancelstate) == PTHREAD_CANCEL_ENABLE) {
1093 if (THREAD_GETMEM(self, p_canceltype) == PTHREAD_CANCEL_ASYNCHRONOUS)
1094 __pthread_do_exit(PTHREAD_CANCELED, CURRENT_STACK_FRAME);
1095 jmpbuf = THREAD_GETMEM(self, p_cancel_jmp);
1096 if (jmpbuf != NULL) {
1097 THREAD_SETMEM(self, p_cancel_jmp, NULL);
1098 siglongjmp(*jmpbuf, 1);
1103 /* Handler for the DEBUG signal.
1104 The debugging strategy is as follows:
1105 On reception of a REQ_DEBUG request (sent by new threads created to
1106 the thread manager under debugging mode), the thread manager throws
1107 __pthread_sig_debug to itself. The debugger (if active) intercepts
1108 this signal, takes into account new threads and continue execution
1109 of the thread manager by propagating the signal because it doesn't
1110 know what it is specifically done for. In the current implementation,
1111 the thread manager simply discards it. */
1113 static void pthread_handle_sigdebug(int sig)
1115 /* Nothing */
1118 /* Reset the state of the thread machinery after a fork().
1119 Close the pipe used for requests and set the main thread to the forked
1120 thread.
1121 Notice that we can't free the stack segments, as the forked thread
1122 may hold pointers into them. */
1124 void __pthread_reset_main_thread(void)
1126 pthread_descr self = thread_self();
1128 if (__pthread_manager_request != -1) {
1129 /* Free the thread manager stack */
1130 free(__pthread_manager_thread_bos);
1131 __pthread_manager_thread_bos = __pthread_manager_thread_tos = NULL;
1132 /* Close the two ends of the pipe */
1133 close_not_cancel(__pthread_manager_request);
1134 close_not_cancel(__pthread_manager_reader);
1135 __pthread_manager_request = __pthread_manager_reader = -1;
1138 /* Update the pid of the main thread */
1139 THREAD_SETMEM(self, p_pid, __getpid());
1140 /* Make the forked thread the main thread */
1141 __pthread_main_thread = self;
1142 THREAD_SETMEM(self, p_nextlive, self);
1143 THREAD_SETMEM(self, p_prevlive, self);
1144 #if !(USE_TLS && HAVE___THREAD)
1145 /* Now this thread modifies the global variables. */
1146 THREAD_SETMEM(self, p_errnop, &_errno);
1147 THREAD_SETMEM(self, p_h_errnop, &_h_errno);
1148 THREAD_SETMEM(self, p_resp, &_res);
1149 #endif
1151 #ifndef FLOATING_STACKS
1152 /* This is to undo the setrlimit call in __pthread_init_max_stacksize.
1153 XXX This can be wrong if the user set the limit during the run. */
1155 struct rlimit limit;
1156 if (getrlimit (RLIMIT_STACK, &limit) == 0
1157 && limit.rlim_cur != limit.rlim_max)
1159 limit.rlim_cur = limit.rlim_max;
1160 setrlimit(RLIMIT_STACK, &limit);
1163 #endif
1166 /* Process-wide exec() request */
1168 void __pthread_kill_other_threads_np(void)
1170 struct sigaction sa;
1171 /* Terminate all other threads and thread manager */
1172 pthread_onexit_process(0, NULL);
1173 /* Make current thread the main thread in case the calling thread
1174 changes its mind, does not exec(), and creates new threads instead. */
1175 __pthread_reset_main_thread();
1177 /* Reset the signal handlers behaviour for the signals the
1178 implementation uses since this would be passed to the new
1179 process. */
1180 sigemptyset(&sa.sa_mask);
1181 sa.sa_flags = 0;
1182 sa.sa_handler = SIG_DFL;
1183 __libc_sigaction(__pthread_sig_restart, &sa, NULL);
1184 __libc_sigaction(__pthread_sig_cancel, &sa, NULL);
1185 if (__pthread_sig_debug > 0)
1186 __libc_sigaction(__pthread_sig_debug, &sa, NULL);
1188 weak_alias (__pthread_kill_other_threads_np, pthread_kill_other_threads_np)
1190 /* Concurrency symbol level. */
1191 static int current_level;
1193 int __pthread_setconcurrency(int level)
1195 /* We don't do anything unless we have found a useful interpretation. */
1196 current_level = level;
1197 return 0;
1199 weak_alias (__pthread_setconcurrency, pthread_setconcurrency)
1201 int __pthread_getconcurrency(void)
1203 return current_level;
1205 weak_alias (__pthread_getconcurrency, pthread_getconcurrency)
1207 /* Primitives for controlling thread execution */
1209 void __pthread_wait_for_restart_signal(pthread_descr self)
1211 sigset_t mask;
1213 sigprocmask(SIG_SETMASK, NULL, &mask); /* Get current signal mask */
1214 sigdelset(&mask, __pthread_sig_restart); /* Unblock the restart signal */
1215 THREAD_SETMEM(self, p_signal, 0);
1216 do {
1217 __pthread_sigsuspend(&mask); /* Wait for signal. Must not be a
1218 cancellation point. */
1219 } while (THREAD_GETMEM(self, p_signal) !=__pthread_sig_restart);
1221 READ_MEMORY_BARRIER(); /* See comment in __pthread_restart_new */
1224 #if !__ASSUME_REALTIME_SIGNALS
1225 /* The _old variants are for 2.0 and early 2.1 kernels which don't have RT
1226 signals.
1227 On these kernels, we use SIGUSR1 and SIGUSR2 for restart and cancellation.
1228 Since the restart signal does not queue, we use an atomic counter to create
1229 queuing semantics. This is needed to resolve a rare race condition in
1230 pthread_cond_timedwait_relative. */
1232 void __pthread_restart_old(pthread_descr th)
1234 if (atomic_increment(&th->p_resume_count) == -1)
1235 kill(th->p_pid, __pthread_sig_restart);
1238 void __pthread_suspend_old(pthread_descr self)
1240 if (atomic_decrement(&self->p_resume_count) <= 0)
1241 __pthread_wait_for_restart_signal(self);
1245 __pthread_timedsuspend_old(pthread_descr self, const struct timespec *abstime)
1247 sigset_t unblock, initial_mask;
1248 int was_signalled = 0;
1249 sigjmp_buf jmpbuf;
1251 if (atomic_decrement(&self->p_resume_count) == 0) {
1252 /* Set up a longjmp handler for the restart signal, unblock
1253 the signal and sleep. */
1255 if (sigsetjmp(jmpbuf, 1) == 0) {
1256 THREAD_SETMEM(self, p_signal_jmp, &jmpbuf);
1257 THREAD_SETMEM(self, p_signal, 0);
1258 /* Unblock the restart signal */
1259 sigemptyset(&unblock);
1260 sigaddset(&unblock, __pthread_sig_restart);
1261 sigprocmask(SIG_UNBLOCK, &unblock, &initial_mask);
1263 while (1) {
1264 struct timeval now;
1265 struct timespec reltime;
1267 /* Compute a time offset relative to now. */
1268 __gettimeofday (&now, NULL);
1269 reltime.tv_nsec = abstime->tv_nsec - now.tv_usec * 1000;
1270 reltime.tv_sec = abstime->tv_sec - now.tv_sec;
1271 if (reltime.tv_nsec < 0) {
1272 reltime.tv_nsec += 1000000000;
1273 reltime.tv_sec -= 1;
1276 /* Sleep for the required duration. If woken by a signal,
1277 resume waiting as required by Single Unix Specification. */
1278 if (reltime.tv_sec < 0 || __libc_nanosleep(&reltime, NULL) == 0)
1279 break;
1282 /* Block the restart signal again */
1283 sigprocmask(SIG_SETMASK, &initial_mask, NULL);
1284 was_signalled = 0;
1285 } else {
1286 was_signalled = 1;
1288 THREAD_SETMEM(self, p_signal_jmp, NULL);
1291 /* Now was_signalled is true if we exited the above code
1292 due to the delivery of a restart signal. In that case,
1293 we know we have been dequeued and resumed and that the
1294 resume count is balanced. Otherwise, there are some
1295 cases to consider. First, try to bump up the resume count
1296 back to zero. If it goes to 1, it means restart() was
1297 invoked on this thread. The signal must be consumed
1298 and the count bumped down and everything is cool. We
1299 can return a 1 to the caller.
1300 Otherwise, no restart was delivered yet, so a potential
1301 race exists; we return a 0 to the caller which must deal
1302 with this race in an appropriate way; for example by
1303 atomically removing the thread from consideration for a
1304 wakeup---if such a thing fails, it means a restart is
1305 being delivered. */
1307 if (!was_signalled) {
1308 if (atomic_increment(&self->p_resume_count) != -1) {
1309 __pthread_wait_for_restart_signal(self);
1310 atomic_decrement(&self->p_resume_count); /* should be zero now! */
1311 /* woke spontaneously and consumed restart signal */
1312 return 1;
1314 /* woke spontaneously but did not consume restart---caller must resolve */
1315 return 0;
1317 /* woken due to restart signal */
1318 return 1;
1320 #endif /* __ASSUME_REALTIME_SIGNALS */
1322 void __pthread_restart_new(pthread_descr th)
1324 /* The barrier is proabably not needed, in which case it still documents
1325 our assumptions. The intent is to commit previous writes to shared
1326 memory so the woken thread will have a consistent view. Complementary
1327 read barriers are present to the suspend functions. */
1328 WRITE_MEMORY_BARRIER();
1329 kill(th->p_pid, __pthread_sig_restart);
1332 /* There is no __pthread_suspend_new because it would just
1333 be a wasteful wrapper for __pthread_wait_for_restart_signal */
1336 __pthread_timedsuspend_new(pthread_descr self, const struct timespec *abstime)
1338 sigset_t unblock, initial_mask;
1339 int was_signalled = 0;
1340 sigjmp_buf jmpbuf;
1342 if (sigsetjmp(jmpbuf, 1) == 0) {
1343 THREAD_SETMEM(self, p_signal_jmp, &jmpbuf);
1344 THREAD_SETMEM(self, p_signal, 0);
1345 /* Unblock the restart signal */
1346 sigemptyset(&unblock);
1347 sigaddset(&unblock, __pthread_sig_restart);
1348 sigprocmask(SIG_UNBLOCK, &unblock, &initial_mask);
1350 while (1) {
1351 struct timeval now;
1352 struct timespec reltime;
1354 /* Compute a time offset relative to now. */
1355 __gettimeofday (&now, NULL);
1356 reltime.tv_nsec = abstime->tv_nsec - now.tv_usec * 1000;
1357 reltime.tv_sec = abstime->tv_sec - now.tv_sec;
1358 if (reltime.tv_nsec < 0) {
1359 reltime.tv_nsec += 1000000000;
1360 reltime.tv_sec -= 1;
1363 /* Sleep for the required duration. If woken by a signal,
1364 resume waiting as required by Single Unix Specification. */
1365 if (reltime.tv_sec < 0 || __libc_nanosleep(&reltime, NULL) == 0)
1366 break;
1369 /* Block the restart signal again */
1370 sigprocmask(SIG_SETMASK, &initial_mask, NULL);
1371 was_signalled = 0;
1372 } else {
1373 was_signalled = 1;
1375 THREAD_SETMEM(self, p_signal_jmp, NULL);
1377 /* Now was_signalled is true if we exited the above code
1378 due to the delivery of a restart signal. In that case,
1379 everything is cool. We have been removed from whatever
1380 we were waiting on by the other thread, and consumed its signal.
1382 Otherwise we this thread woke up spontaneously, or due to a signal other
1383 than restart. This is an ambiguous case that must be resolved by
1384 the caller; the thread is still eligible for a restart wakeup
1385 so there is a race. */
1387 READ_MEMORY_BARRIER(); /* See comment in __pthread_restart_new */
1388 return was_signalled;
1392 /* Debugging aid */
1394 #ifdef DEBUG
1395 #include <stdarg.h>
1397 void __pthread_message(const char * fmt, ...)
1399 char buffer[1024];
1400 va_list args;
1401 sprintf(buffer, "%05d : ", __getpid());
1402 va_start(args, fmt);
1403 vsnprintf(buffer + 8, sizeof(buffer) - 8, fmt, args);
1404 va_end(args);
1405 TEMP_FAILURE_RETRY(write_not_cancel(2, buffer, strlen(buffer)));
1408 #endif