Fix parameter name.
[glibc/pb-stable.git] / linuxthreads / pthread.c
blob6211124b313a46f36c388bdc071316496bd61b2e
2 /* Linuxthreads - a simple clone()-based implementation of Posix */
3 /* threads for Linux. */
4 /* Copyright (C) 1996 Xavier Leroy (Xavier.Leroy@inria.fr) */
5 /* */
6 /* This program is free software; you can redistribute it and/or */
7 /* modify it under the terms of the GNU Library General Public License */
8 /* as published by the Free Software Foundation; either version 2 */
9 /* of the License, or (at your option) any later version. */
10 /* */
11 /* This program is distributed in the hope that it will be useful, */
12 /* but WITHOUT ANY WARRANTY; without even the implied warranty of */
13 /* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the */
14 /* GNU Library General Public License for more details. */
16 /* Thread creation, initialization, and basic low-level routines */
18 #include <errno.h>
19 #include <stddef.h>
20 #include <stdio.h>
21 #include <stdlib.h>
22 #include <string.h>
23 #include <unistd.h>
24 #include <fcntl.h>
25 #include <sys/wait.h>
26 #include <sys/resource.h>
27 #include <sys/time.h>
28 #include <shlib-compat.h>
29 #include "pthread.h"
30 #include "internals.h"
31 #include "spinlock.h"
32 #include "restart.h"
33 #include "smp.h"
34 #include <ldsodefs.h>
35 #include <tls.h>
36 #include <version.h>
38 /* Sanity check. */
39 #if !defined __SIGRTMIN || (__SIGRTMAX - __SIGRTMIN) < 3
40 # error "This must not happen"
41 #endif
43 #if !(USE_TLS && HAVE___THREAD)
44 /* These variables are used by the setup code. */
45 extern int _errno;
46 extern int _h_errno;
48 /* We need the global/static resolver state here. */
49 # include <resolv.h>
50 # undef _res
52 extern struct __res_state _res;
53 #endif
55 #ifdef USE_TLS
57 /* We need only a few variables. */
58 static pthread_descr manager_thread;
60 #else
62 /* Descriptor of the initial thread */
64 struct _pthread_descr_struct __pthread_initial_thread = {
65 .p_header.data.self = &__pthread_initial_thread,
66 .p_nextlive = &__pthread_initial_thread,
67 .p_prevlive = &__pthread_initial_thread,
68 .p_tid = PTHREAD_THREADS_MAX,
69 .p_lock = &__pthread_handles[0].h_lock,
70 .p_start_args = PTHREAD_START_ARGS_INITIALIZER(NULL),
71 #if !(USE_TLS && HAVE___THREAD)
72 .p_errnop = &_errno,
73 .p_h_errnop = &_h_errno,
74 .p_resp = &_res,
75 #endif
76 .p_userstack = 1,
77 .p_resume_count = __ATOMIC_INITIALIZER,
78 .p_alloca_cutoff = __MAX_ALLOCA_CUTOFF
81 /* Descriptor of the manager thread; none of this is used but the error
82 variables, the p_pid and p_priority fields,
83 and the address for identification. */
85 #define manager_thread (&__pthread_manager_thread)
86 struct _pthread_descr_struct __pthread_manager_thread = {
87 .p_header.data.self = &__pthread_manager_thread,
88 .p_header.data.multiple_threads = 1,
89 .p_lock = &__pthread_handles[1].h_lock,
90 .p_start_args = PTHREAD_START_ARGS_INITIALIZER(__pthread_manager),
91 #if !(USE_TLS && HAVE___THREAD)
92 .p_errnop = &__pthread_manager_thread.p_errno,
93 #endif
94 .p_nr = 1,
95 .p_resume_count = __ATOMIC_INITIALIZER,
96 .p_alloca_cutoff = PTHREAD_STACK_MIN / 4
98 #endif
100 /* Pointer to the main thread (the father of the thread manager thread) */
101 /* Originally, this is the initial thread, but this changes after fork() */
103 #ifdef USE_TLS
104 pthread_descr __pthread_main_thread;
105 #else
106 pthread_descr __pthread_main_thread = &__pthread_initial_thread;
107 #endif
109 /* Limit between the stack of the initial thread (above) and the
110 stacks of other threads (below). Aligned on a STACK_SIZE boundary. */
112 char *__pthread_initial_thread_bos;
114 /* File descriptor for sending requests to the thread manager. */
115 /* Initially -1, meaning that the thread manager is not running. */
117 int __pthread_manager_request = -1;
119 int __pthread_multiple_threads attribute_hidden;
121 /* Other end of the pipe for sending requests to the thread manager. */
123 int __pthread_manager_reader;
125 /* Limits of the thread manager stack */
127 char *__pthread_manager_thread_bos;
128 char *__pthread_manager_thread_tos;
130 /* For process-wide exit() */
132 int __pthread_exit_requested;
133 int __pthread_exit_code;
135 /* Maximum stack size. */
136 size_t __pthread_max_stacksize;
138 /* Nozero if the machine has more than one processor. */
139 int __pthread_smp_kernel;
142 #if !__ASSUME_REALTIME_SIGNALS
143 /* Pointers that select new or old suspend/resume functions
144 based on availability of rt signals. */
146 void (*__pthread_restart)(pthread_descr) = __pthread_restart_old;
147 void (*__pthread_suspend)(pthread_descr) = __pthread_suspend_old;
148 int (*__pthread_timedsuspend)(pthread_descr, const struct timespec *) = __pthread_timedsuspend_old;
149 #endif /* __ASSUME_REALTIME_SIGNALS */
151 /* Communicate relevant LinuxThreads constants to gdb */
153 const int __pthread_threads_max = PTHREAD_THREADS_MAX;
154 const int __pthread_sizeof_handle = sizeof(struct pthread_handle_struct);
155 const int __pthread_offsetof_descr = offsetof(struct pthread_handle_struct,
156 h_descr);
157 const int __pthread_offsetof_pid = offsetof(struct _pthread_descr_struct,
158 p_pid);
159 const int __linuxthreads_pthread_sizeof_descr
160 = sizeof(struct _pthread_descr_struct);
162 const int __linuxthreads_initial_report_events;
164 const char __linuxthreads_version[] = VERSION;
166 /* Forward declarations */
168 static void pthread_onexit_process(int retcode, void *arg);
169 #ifndef HAVE_Z_NODELETE
170 static void pthread_atexit_process(void *arg, int retcode);
171 static void pthread_atexit_retcode(void *arg, int retcode);
172 #endif
173 static void pthread_handle_sigcancel(int sig);
174 static void pthread_handle_sigrestart(int sig);
175 static void pthread_handle_sigdebug(int sig);
177 /* Signal numbers used for the communication.
178 In these variables we keep track of the used variables. If the
179 platform does not support any real-time signals we will define the
180 values to some unreasonable value which will signal failing of all
181 the functions below. */
182 int __pthread_sig_restart = __SIGRTMIN;
183 int __pthread_sig_cancel = __SIGRTMIN + 1;
184 int __pthread_sig_debug = __SIGRTMIN + 2;
186 extern int __libc_current_sigrtmin_private (void);
188 #if !__ASSUME_REALTIME_SIGNALS
189 static int rtsigs_initialized;
191 static void
192 init_rtsigs (void)
194 if (rtsigs_initialized)
195 return;
197 if (__libc_current_sigrtmin_private () == -1)
199 __pthread_sig_restart = SIGUSR1;
200 __pthread_sig_cancel = SIGUSR2;
201 __pthread_sig_debug = 0;
203 else
205 __pthread_restart = __pthread_restart_new;
206 __pthread_suspend = __pthread_wait_for_restart_signal;
207 __pthread_timedsuspend = __pthread_timedsuspend_new;
210 rtsigs_initialized = 1;
212 #endif
215 /* Initialize the pthread library.
216 Initialization is split in two functions:
217 - a constructor function that blocks the __pthread_sig_restart signal
218 (must do this very early, since the program could capture the signal
219 mask with e.g. sigsetjmp before creating the first thread);
220 - a regular function called from pthread_create when needed. */
222 static void pthread_initialize(void) __attribute__((constructor));
224 #ifndef HAVE_Z_NODELETE
225 extern void *__dso_handle __attribute__ ((weak));
226 #endif
229 #if defined USE_TLS && !defined SHARED
230 extern void __libc_setup_tls (size_t tcbsize, size_t tcbalign);
231 #endif
233 struct pthread_functions __pthread_functions =
235 #if !(USE_TLS && HAVE___THREAD)
236 .ptr_pthread_internal_tsd_set = __pthread_internal_tsd_set,
237 .ptr_pthread_internal_tsd_get = __pthread_internal_tsd_get,
238 .ptr_pthread_internal_tsd_address = __pthread_internal_tsd_address,
239 #endif
240 .ptr_pthread_fork = __pthread_fork,
241 .ptr_pthread_attr_destroy = __pthread_attr_destroy,
242 #if SHLIB_COMPAT(libpthread, GLIBC_2_0, GLIBC_2_1)
243 .ptr___pthread_attr_init_2_0 = __pthread_attr_init_2_0,
244 #endif
245 .ptr___pthread_attr_init_2_1 = __pthread_attr_init_2_1,
246 .ptr_pthread_attr_getdetachstate = __pthread_attr_getdetachstate,
247 .ptr_pthread_attr_setdetachstate = __pthread_attr_setdetachstate,
248 .ptr_pthread_attr_getinheritsched = __pthread_attr_getinheritsched,
249 .ptr_pthread_attr_setinheritsched = __pthread_attr_setinheritsched,
250 .ptr_pthread_attr_getschedparam = __pthread_attr_getschedparam,
251 .ptr_pthread_attr_setschedparam = __pthread_attr_setschedparam,
252 .ptr_pthread_attr_getschedpolicy = __pthread_attr_getschedpolicy,
253 .ptr_pthread_attr_setschedpolicy = __pthread_attr_setschedpolicy,
254 .ptr_pthread_attr_getscope = __pthread_attr_getscope,
255 .ptr_pthread_attr_setscope = __pthread_attr_setscope,
256 .ptr_pthread_condattr_destroy = __pthread_condattr_destroy,
257 .ptr_pthread_condattr_init = __pthread_condattr_init,
258 .ptr___pthread_cond_broadcast = __pthread_cond_broadcast,
259 .ptr___pthread_cond_destroy = __pthread_cond_destroy,
260 .ptr___pthread_cond_init = __pthread_cond_init,
261 .ptr___pthread_cond_signal = __pthread_cond_signal,
262 .ptr___pthread_cond_wait = __pthread_cond_wait,
263 .ptr_pthread_equal = __pthread_equal,
264 .ptr___pthread_exit = __pthread_exit,
265 .ptr_pthread_getschedparam = __pthread_getschedparam,
266 .ptr_pthread_setschedparam = __pthread_setschedparam,
267 .ptr_pthread_mutex_destroy = __pthread_mutex_destroy,
268 .ptr_pthread_mutex_init = __pthread_mutex_init,
269 .ptr_pthread_mutex_lock = __pthread_mutex_lock,
270 .ptr_pthread_mutex_trylock = __pthread_mutex_trylock,
271 .ptr_pthread_mutex_unlock = __pthread_mutex_unlock,
272 .ptr_pthread_self = __pthread_self,
273 .ptr_pthread_setcancelstate = __pthread_setcancelstate,
274 .ptr_pthread_setcanceltype = __pthread_setcanceltype,
275 .ptr_pthread_do_exit = __pthread_do_exit,
276 .ptr_pthread_thread_self = __pthread_thread_self,
277 .ptr_pthread_cleanup_upto = __pthread_cleanup_upto,
278 .ptr_pthread_sigaction = __pthread_sigaction,
279 .ptr_pthread_sigwait = __pthread_sigwait,
280 .ptr_pthread_raise = __pthread_raise
282 #ifdef SHARED
283 # define ptr_pthread_functions &__pthread_functions
284 #else
285 # define ptr_pthread_functions NULL
286 #endif
288 static int *__libc_multiple_threads_ptr;
290 /* Do some minimal initialization which has to be done during the
291 startup of the C library. */
292 void
293 __pthread_initialize_minimal(void)
295 #ifdef USE_TLS
296 pthread_descr self;
298 /* First of all init __pthread_handles[0] and [1] if needed. */
299 # if __LT_SPINLOCK_INIT != 0
300 __pthread_handles[0].h_lock = __LOCK_INITIALIZER;
301 __pthread_handles[1].h_lock = __LOCK_INITIALIZER;
302 # endif
303 # ifndef SHARED
304 /* Unlike in the dynamically linked case the dynamic linker has not
305 taken care of initializing the TLS data structures. */
306 __libc_setup_tls (TLS_TCB_SIZE, TLS_TCB_ALIGN);
307 # elif !USE___THREAD
308 if (__builtin_expect (GL(dl_tls_dtv_slotinfo_list) == NULL, 0))
310 tcbhead_t *tcbp;
312 /* There is no actual TLS being used, so the thread register
313 was not initialized in the dynamic linker. */
315 /* We need to install special hooks so that the malloc and memalign
316 calls in _dl_tls_setup and _dl_allocate_tls won't cause full
317 malloc initialization that will try to set up its thread state. */
319 extern void __libc_malloc_pthread_startup (bool first_time);
320 __libc_malloc_pthread_startup (true);
322 if (__builtin_expect (_dl_tls_setup (), 0)
323 || __builtin_expect ((tcbp = _dl_allocate_tls (NULL)) == NULL, 0))
325 static const char msg[] = "\
326 cannot allocate TLS data structures for initial thread\n";
327 TEMP_FAILURE_RETRY (__libc_write (STDERR_FILENO,
328 msg, sizeof msg - 1));
329 abort ();
331 const char *lossage = TLS_INIT_TP (tcbp, 0);
332 if (__builtin_expect (lossage != NULL, 0))
334 static const char msg[] = "cannot set up thread-local storage: ";
335 const char nl = '\n';
336 TEMP_FAILURE_RETRY (__libc_write (STDERR_FILENO,
337 msg, sizeof msg - 1));
338 TEMP_FAILURE_RETRY (__libc_write (STDERR_FILENO,
339 lossage, strlen (lossage)));
340 TEMP_FAILURE_RETRY (__libc_write (STDERR_FILENO, &nl, 1));
343 /* Though it was allocated with libc's malloc, that was done without
344 the user's __malloc_hook installed. A later realloc that uses
345 the hooks might not work with that block from the plain malloc.
346 So we record this block as unfreeable just as the dynamic linker
347 does when it allocates the DTV before the libc malloc exists. */
348 GL(dl_initial_dtv) = GET_DTV (tcbp);
350 __libc_malloc_pthread_startup (false);
352 # endif
354 self = THREAD_SELF;
356 /* The memory for the thread descriptor was allocated elsewhere as
357 part of the TLS allocation. We have to initialize the data
358 structure by hand. This initialization must mirror the struct
359 definition above. */
360 self->p_nextlive = self->p_prevlive = self;
361 self->p_tid = PTHREAD_THREADS_MAX;
362 self->p_lock = &__pthread_handles[0].h_lock;
363 # ifndef HAVE___THREAD
364 self->p_errnop = &_errno;
365 self->p_h_errnop = &_h_errno;
366 # endif
367 /* self->p_start_args need not be initialized, it's all zero. */
368 self->p_userstack = 1;
369 # if __LT_SPINLOCK_INIT != 0
370 self->p_resume_count = (struct pthread_atomic) __ATOMIC_INITIALIZER;
371 # endif
372 self->p_alloca_cutoff = __MAX_ALLOCA_CUTOFF;
374 /* Another variable which points to the thread descriptor. */
375 __pthread_main_thread = self;
377 /* And fill in the pointer the the thread __pthread_handles array. */
378 __pthread_handles[0].h_descr = self;
380 #else /* USE_TLS */
382 /* First of all init __pthread_handles[0] and [1]. */
383 # if __LT_SPINLOCK_INIT != 0
384 __pthread_handles[0].h_lock = __LOCK_INITIALIZER;
385 __pthread_handles[1].h_lock = __LOCK_INITIALIZER;
386 # endif
387 __pthread_handles[0].h_descr = &__pthread_initial_thread;
388 __pthread_handles[1].h_descr = &__pthread_manager_thread;
390 /* If we have special thread_self processing, initialize that for the
391 main thread now. */
392 # ifdef INIT_THREAD_SELF
393 INIT_THREAD_SELF(&__pthread_initial_thread, 0);
394 # endif
395 #endif
397 #if HP_TIMING_AVAIL
398 # ifdef USE_TLS
399 self->p_cpuclock_offset = GL(dl_cpuclock_offset);
400 # else
401 __pthread_initial_thread.p_cpuclock_offset = GL(dl_cpuclock_offset);
402 # endif
403 #endif
405 __libc_multiple_threads_ptr = __libc_pthread_init (ptr_pthread_functions);
409 void
410 __pthread_init_max_stacksize(void)
412 struct rlimit limit;
413 size_t max_stack;
415 getrlimit(RLIMIT_STACK, &limit);
416 #ifdef FLOATING_STACKS
417 if (limit.rlim_cur == RLIM_INFINITY)
418 limit.rlim_cur = ARCH_STACK_MAX_SIZE;
419 # ifdef NEED_SEPARATE_REGISTER_STACK
420 max_stack = limit.rlim_cur / 2;
421 # else
422 max_stack = limit.rlim_cur;
423 # endif
424 #else
425 /* Play with the stack size limit to make sure that no stack ever grows
426 beyond STACK_SIZE minus one page (to act as a guard page). */
427 # ifdef NEED_SEPARATE_REGISTER_STACK
428 /* STACK_SIZE bytes hold both the main stack and register backing
429 store. The rlimit value applies to each individually. */
430 max_stack = STACK_SIZE/2 - __getpagesize ();
431 # else
432 max_stack = STACK_SIZE - __getpagesize();
433 # endif
434 if (limit.rlim_cur > max_stack) {
435 limit.rlim_cur = max_stack;
436 setrlimit(RLIMIT_STACK, &limit);
438 #endif
439 __pthread_max_stacksize = max_stack;
440 if (max_stack / 4 < __MAX_ALLOCA_CUTOFF)
442 #ifdef USE_TLS
443 pthread_descr self = THREAD_SELF;
444 self->p_alloca_cutoff = max_stack / 4;
445 #else
446 __pthread_initial_thread.p_alloca_cutoff = max_stack / 4;
447 #endif
451 #ifdef SHARED
452 # if USE___THREAD
453 /* When using __thread for this, we do it in libc so as not
454 to give libpthread its own TLS segment just for this. */
455 extern void **__libc_dl_error_tsd (void) __attribute__ ((const));
456 # else
457 static void ** __attribute__ ((const))
458 __libc_dl_error_tsd (void)
460 return &thread_self ()->p_libc_specific[_LIBC_TSD_KEY_DL_ERROR];
462 # endif
463 #endif
465 static void pthread_initialize(void)
467 struct sigaction sa;
468 sigset_t mask;
470 /* If already done (e.g. by a constructor called earlier!), bail out */
471 if (__pthread_initial_thread_bos != NULL) return;
472 #ifdef TEST_FOR_COMPARE_AND_SWAP
473 /* Test if compare-and-swap is available */
474 __pthread_has_cas = compare_and_swap_is_available();
475 #endif
476 #ifdef FLOATING_STACKS
477 /* We don't need to know the bottom of the stack. Give the pointer some
478 value to signal that initialization happened. */
479 __pthread_initial_thread_bos = (void *) -1l;
480 #else
481 /* Determine stack size limits . */
482 __pthread_init_max_stacksize ();
483 # ifdef _STACK_GROWS_UP
484 /* The initial thread already has all the stack it needs */
485 __pthread_initial_thread_bos = (char *)
486 ((long)CURRENT_STACK_FRAME &~ (STACK_SIZE - 1));
487 # else
488 /* For the initial stack, reserve at least STACK_SIZE bytes of stack
489 below the current stack address, and align that on a
490 STACK_SIZE boundary. */
491 __pthread_initial_thread_bos =
492 (char *)(((long)CURRENT_STACK_FRAME - 2 * STACK_SIZE) & ~(STACK_SIZE - 1));
493 # endif
494 #endif
495 #ifdef USE_TLS
496 /* Update the descriptor for the initial thread. */
497 THREAD_SETMEM (((pthread_descr) NULL), p_pid, __getpid());
498 # ifndef HAVE___THREAD
499 /* Likewise for the resolver state _res. */
500 THREAD_SETMEM (((pthread_descr) NULL), p_resp, &_res);
501 # endif
502 #else
503 /* Update the descriptor for the initial thread. */
504 __pthread_initial_thread.p_pid = __getpid();
505 /* Likewise for the resolver state _res. */
506 __pthread_initial_thread.p_resp = &_res;
507 #endif
508 #if !__ASSUME_REALTIME_SIGNALS
509 /* Initialize real-time signals. */
510 init_rtsigs ();
511 #endif
512 /* Setup signal handlers for the initial thread.
513 Since signal handlers are shared between threads, these settings
514 will be inherited by all other threads. */
515 sa.sa_handler = pthread_handle_sigrestart;
516 sigemptyset(&sa.sa_mask);
517 sa.sa_flags = 0;
518 __libc_sigaction(__pthread_sig_restart, &sa, NULL);
519 sa.sa_handler = pthread_handle_sigcancel;
520 // sa.sa_flags = 0;
521 __libc_sigaction(__pthread_sig_cancel, &sa, NULL);
522 if (__pthread_sig_debug > 0) {
523 sa.sa_handler = pthread_handle_sigdebug;
524 sigemptyset(&sa.sa_mask);
525 // sa.sa_flags = 0;
526 __libc_sigaction(__pthread_sig_debug, &sa, NULL);
528 /* Initially, block __pthread_sig_restart. Will be unblocked on demand. */
529 sigemptyset(&mask);
530 sigaddset(&mask, __pthread_sig_restart);
531 sigprocmask(SIG_BLOCK, &mask, NULL);
532 /* And unblock __pthread_sig_cancel if it has been blocked. */
533 sigdelset(&mask, __pthread_sig_restart);
534 sigaddset(&mask, __pthread_sig_cancel);
535 sigprocmask(SIG_UNBLOCK, &mask, NULL);
536 /* Register an exit function to kill all other threads. */
537 /* Do it early so that user-registered atexit functions are called
538 before pthread_*exit_process. */
539 #ifndef HAVE_Z_NODELETE
540 if (__builtin_expect (&__dso_handle != NULL, 1))
541 __cxa_atexit ((void (*) (void *)) pthread_atexit_process, NULL,
542 __dso_handle);
543 else
544 #endif
545 __on_exit (pthread_onexit_process, NULL);
546 /* How many processors. */
547 __pthread_smp_kernel = is_smp_system ();
549 #ifdef SHARED
550 /* Transfer the old value from the dynamic linker's internal location. */
551 *__libc_dl_error_tsd () = *(*GL(dl_error_catch_tsd)) ();
552 GL(dl_error_catch_tsd) = &__libc_dl_error_tsd;
553 #endif
556 void __pthread_initialize(void)
558 pthread_initialize();
561 int __pthread_initialize_manager(void)
563 int manager_pipe[2];
564 int pid;
565 struct pthread_request request;
566 int report_events;
567 pthread_descr mgr;
568 #ifdef USE_TLS
569 tcbhead_t *tcbp;
570 #endif
572 __pthread_multiple_threads = 1;
573 #if TLS_MULTIPLE_THREADS_IN_TCB || !defined USE_TLS || !TLS_DTV_AT_TP
574 __pthread_main_thread->p_multiple_threads = 1;
575 #endif
576 *__libc_multiple_threads_ptr = 1;
578 #ifndef HAVE_Z_NODELETE
579 if (__builtin_expect (&__dso_handle != NULL, 1))
580 __cxa_atexit ((void (*) (void *)) pthread_atexit_retcode, NULL,
581 __dso_handle);
582 #endif
584 if (__pthread_max_stacksize == 0)
585 __pthread_init_max_stacksize ();
586 /* If basic initialization not done yet (e.g. we're called from a
587 constructor run before our constructor), do it now */
588 if (__pthread_initial_thread_bos == NULL) pthread_initialize();
589 /* Setup stack for thread manager */
590 __pthread_manager_thread_bos = malloc(THREAD_MANAGER_STACK_SIZE);
591 if (__pthread_manager_thread_bos == NULL) return -1;
592 __pthread_manager_thread_tos =
593 __pthread_manager_thread_bos + THREAD_MANAGER_STACK_SIZE;
594 /* Setup pipe to communicate with thread manager */
595 if (pipe(manager_pipe) == -1) {
596 free(__pthread_manager_thread_bos);
597 return -1;
600 #ifdef USE_TLS
601 /* Allocate memory for the thread descriptor and the dtv. */
602 tcbp = _dl_allocate_tls (NULL);
603 if (tcbp == NULL) {
604 free(__pthread_manager_thread_bos);
605 __libc_close(manager_pipe[0]);
606 __libc_close(manager_pipe[1]);
607 return -1;
610 # if TLS_TCB_AT_TP
611 mgr = (pthread_descr) tcbp;
612 # elif TLS_DTV_AT_TP
613 /* pthread_descr is located right below tcbhead_t which _dl_allocate_tls
614 returns. */
615 mgr = (pthread_descr) ((char *) tcbp - TLS_PRE_TCB_SIZE);
616 # endif
617 __pthread_handles[1].h_descr = manager_thread = mgr;
619 /* Initialize the descriptor. */
620 #if !defined USE_TLS || !TLS_DTV_AT_TP
621 mgr->p_header.data.tcb = tcbp;
622 mgr->p_header.data.self = mgr;
623 mgr->p_header.data.multiple_threads = 1;
624 #elif TLS_MULTIPLE_THREADS_IN_TCB
625 mgr->p_multiple_threads = 1;
626 #endif
627 mgr->p_lock = &__pthread_handles[1].h_lock;
628 # ifndef HAVE___THREAD
629 mgr->p_errnop = &mgr->p_errno;
630 # endif
631 mgr->p_start_args = (struct pthread_start_args) PTHREAD_START_ARGS_INITIALIZER(__pthread_manager);
632 mgr->p_nr = 1;
633 # if __LT_SPINLOCK_INIT != 0
634 self->p_resume_count = (struct pthread_atomic) __ATOMIC_INITIALIZER;
635 # endif
636 mgr->p_alloca_cutoff = PTHREAD_STACK_MIN / 4;
637 #else
638 mgr = &__pthread_manager_thread;
639 #endif
641 __pthread_manager_request = manager_pipe[1]; /* writing end */
642 __pthread_manager_reader = manager_pipe[0]; /* reading end */
644 /* Start the thread manager */
645 pid = 0;
646 #ifdef USE_TLS
647 if (__linuxthreads_initial_report_events != 0)
648 THREAD_SETMEM (((pthread_descr) NULL), p_report_events,
649 __linuxthreads_initial_report_events);
650 report_events = THREAD_GETMEM (((pthread_descr) NULL), p_report_events);
651 #else
652 if (__linuxthreads_initial_report_events != 0)
653 __pthread_initial_thread.p_report_events
654 = __linuxthreads_initial_report_events;
655 report_events = __pthread_initial_thread.p_report_events;
656 #endif
657 if (__builtin_expect (report_events, 0))
659 /* It's a bit more complicated. We have to report the creation of
660 the manager thread. */
661 int idx = __td_eventword (TD_CREATE);
662 uint32_t mask = __td_eventmask (TD_CREATE);
663 uint32_t event_bits;
665 #ifdef USE_TLS
666 event_bits = THREAD_GETMEM_NC (((pthread_descr) NULL),
667 p_eventbuf.eventmask.event_bits[idx]);
668 #else
669 event_bits = __pthread_initial_thread.p_eventbuf.eventmask.event_bits[idx];
670 #endif
672 if ((mask & (__pthread_threads_events.event_bits[idx] | event_bits))
673 != 0)
675 __pthread_lock(mgr->p_lock, NULL);
677 #ifdef NEED_SEPARATE_REGISTER_STACK
678 pid = __clone2(__pthread_manager_event,
679 (void **) __pthread_manager_thread_bos,
680 THREAD_MANAGER_STACK_SIZE,
681 CLONE_VM | CLONE_FS | CLONE_FILES | CLONE_SIGHAND,
682 mgr);
683 #elif _STACK_GROWS_UP
684 pid = __clone(__pthread_manager_event,
685 (void **) __pthread_manager_thread_bos,
686 CLONE_VM | CLONE_FS | CLONE_FILES | CLONE_SIGHAND,
687 mgr);
688 #else
689 pid = __clone(__pthread_manager_event,
690 (void **) __pthread_manager_thread_tos,
691 CLONE_VM | CLONE_FS | CLONE_FILES | CLONE_SIGHAND,
692 mgr);
693 #endif
695 if (pid != -1)
697 /* Now fill in the information about the new thread in
698 the newly created thread's data structure. We cannot let
699 the new thread do this since we don't know whether it was
700 already scheduled when we send the event. */
701 mgr->p_eventbuf.eventdata = mgr;
702 mgr->p_eventbuf.eventnum = TD_CREATE;
703 __pthread_last_event = mgr;
704 mgr->p_tid = 2* PTHREAD_THREADS_MAX + 1;
705 mgr->p_pid = pid;
707 /* Now call the function which signals the event. */
708 __linuxthreads_create_event ();
711 /* Now restart the thread. */
712 __pthread_unlock(mgr->p_lock);
716 if (__builtin_expect (pid, 0) == 0)
718 #ifdef NEED_SEPARATE_REGISTER_STACK
719 pid = __clone2(__pthread_manager, (void **) __pthread_manager_thread_bos,
720 THREAD_MANAGER_STACK_SIZE,
721 CLONE_VM | CLONE_FS | CLONE_FILES | CLONE_SIGHAND, mgr);
722 #elif _STACK_GROWS_UP
723 pid = __clone(__pthread_manager, (void **) __pthread_manager_thread_bos,
724 CLONE_VM | CLONE_FS | CLONE_FILES | CLONE_SIGHAND, mgr);
725 #else
726 pid = __clone(__pthread_manager, (void **) __pthread_manager_thread_tos,
727 CLONE_VM | CLONE_FS | CLONE_FILES | CLONE_SIGHAND, mgr);
728 #endif
730 if (__builtin_expect (pid, 0) == -1) {
731 free(__pthread_manager_thread_bos);
732 __libc_close(manager_pipe[0]);
733 __libc_close(manager_pipe[1]);
734 return -1;
736 mgr->p_tid = 2* PTHREAD_THREADS_MAX + 1;
737 mgr->p_pid = pid;
738 /* Make gdb aware of new thread manager */
739 if (__builtin_expect (__pthread_threads_debug, 0) && __pthread_sig_debug > 0)
741 raise(__pthread_sig_debug);
742 /* We suspend ourself and gdb will wake us up when it is
743 ready to handle us. */
744 __pthread_wait_for_restart_signal(thread_self());
746 /* Synchronize debugging of the thread manager */
747 request.req_kind = REQ_DEBUG;
748 TEMP_FAILURE_RETRY(__libc_write(__pthread_manager_request,
749 (char *) &request, sizeof(request)));
750 return 0;
753 /* Thread creation */
755 int __pthread_create_2_1(pthread_t *thread, const pthread_attr_t *attr,
756 void * (*start_routine)(void *), void *arg)
758 pthread_descr self = thread_self();
759 struct pthread_request request;
760 int retval;
761 if (__builtin_expect (__pthread_manager_request, 0) < 0) {
762 if (__pthread_initialize_manager() < 0) return EAGAIN;
764 request.req_thread = self;
765 request.req_kind = REQ_CREATE;
766 request.req_args.create.attr = attr;
767 request.req_args.create.fn = start_routine;
768 request.req_args.create.arg = arg;
769 sigprocmask(SIG_SETMASK, (const sigset_t *) NULL,
770 &request.req_args.create.mask);
771 TEMP_FAILURE_RETRY(__libc_write(__pthread_manager_request,
772 (char *) &request, sizeof(request)));
773 suspend(self);
774 retval = THREAD_GETMEM(self, p_retcode);
775 if (__builtin_expect (retval, 0) == 0)
776 *thread = (pthread_t) THREAD_GETMEM(self, p_retval);
777 return retval;
780 versioned_symbol (libpthread, __pthread_create_2_1, pthread_create, GLIBC_2_1);
782 #if SHLIB_COMPAT (libpthread, GLIBC_2_0, GLIBC_2_1)
784 int __pthread_create_2_0(pthread_t *thread, const pthread_attr_t *attr,
785 void * (*start_routine)(void *), void *arg)
787 /* The ATTR attribute is not really of type `pthread_attr_t *'. It has
788 the old size and access to the new members might crash the program.
789 We convert the struct now. */
790 pthread_attr_t new_attr;
792 if (attr != NULL)
794 size_t ps = __getpagesize ();
796 memcpy (&new_attr, attr,
797 (size_t) &(((pthread_attr_t*)NULL)->__guardsize));
798 new_attr.__guardsize = ps;
799 new_attr.__stackaddr_set = 0;
800 new_attr.__stackaddr = NULL;
801 new_attr.__stacksize = STACK_SIZE - ps;
802 attr = &new_attr;
804 return __pthread_create_2_1 (thread, attr, start_routine, arg);
806 compat_symbol (libpthread, __pthread_create_2_0, pthread_create, GLIBC_2_0);
807 #endif
809 /* Simple operations on thread identifiers */
811 pthread_descr __pthread_thread_self(void)
813 return thread_self();
816 pthread_t __pthread_self(void)
818 pthread_descr self = thread_self();
819 return THREAD_GETMEM(self, p_tid);
821 strong_alias (__pthread_self, pthread_self);
823 int __pthread_equal(pthread_t thread1, pthread_t thread2)
825 return thread1 == thread2;
827 strong_alias (__pthread_equal, pthread_equal);
829 /* Helper function for thread_self in the case of user-provided stacks */
831 #ifndef THREAD_SELF
833 pthread_descr __pthread_find_self(void)
835 char * sp = CURRENT_STACK_FRAME;
836 pthread_handle h;
838 /* __pthread_handles[0] is the initial thread, __pthread_handles[1] is
839 the manager threads handled specially in thread_self(), so start at 2 */
840 h = __pthread_handles + 2;
841 while (! (sp <= (char *) h->h_descr && sp >= h->h_bottom)) h++;
842 return h->h_descr;
845 #else
847 static pthread_descr thread_self_stack(void)
849 char *sp = CURRENT_STACK_FRAME;
850 pthread_handle h;
852 if (sp >= __pthread_manager_thread_bos && sp < __pthread_manager_thread_tos)
853 return manager_thread;
854 h = __pthread_handles + 2;
855 # ifdef USE_TLS
856 while (h->h_descr == NULL
857 || ! (sp <= (char *) h->h_descr->p_stackaddr && sp >= h->h_bottom))
858 h++;
859 # else
860 while (! (sp <= (char *) h->h_descr && sp >= h->h_bottom))
861 h++;
862 # endif
863 return h->h_descr;
866 #endif
868 /* Thread scheduling */
870 int __pthread_setschedparam(pthread_t thread, int policy,
871 const struct sched_param *param)
873 pthread_handle handle = thread_handle(thread);
874 pthread_descr th;
876 __pthread_lock(&handle->h_lock, NULL);
877 if (__builtin_expect (invalid_handle(handle, thread), 0)) {
878 __pthread_unlock(&handle->h_lock);
879 return ESRCH;
881 th = handle->h_descr;
882 if (__builtin_expect (__sched_setscheduler(th->p_pid, policy, param) == -1,
883 0)) {
884 __pthread_unlock(&handle->h_lock);
885 return errno;
887 th->p_priority = policy == SCHED_OTHER ? 0 : param->sched_priority;
888 __pthread_unlock(&handle->h_lock);
889 if (__pthread_manager_request >= 0)
890 __pthread_manager_adjust_prio(th->p_priority);
891 return 0;
893 strong_alias (__pthread_setschedparam, pthread_setschedparam);
895 int __pthread_getschedparam(pthread_t thread, int *policy,
896 struct sched_param *param)
898 pthread_handle handle = thread_handle(thread);
899 int pid, pol;
901 __pthread_lock(&handle->h_lock, NULL);
902 if (__builtin_expect (invalid_handle(handle, thread), 0)) {
903 __pthread_unlock(&handle->h_lock);
904 return ESRCH;
906 pid = handle->h_descr->p_pid;
907 __pthread_unlock(&handle->h_lock);
908 pol = __sched_getscheduler(pid);
909 if (__builtin_expect (pol, 0) == -1) return errno;
910 if (__sched_getparam(pid, param) == -1) return errno;
911 *policy = pol;
912 return 0;
914 strong_alias (__pthread_getschedparam, pthread_getschedparam);
916 int __pthread_yield (void)
918 /* For now this is equivalent with the POSIX call. */
919 return sched_yield ();
921 weak_alias (__pthread_yield, pthread_yield)
923 /* Process-wide exit() request */
925 static void pthread_onexit_process(int retcode, void *arg)
927 if (__builtin_expect (__pthread_manager_request, 0) >= 0) {
928 struct pthread_request request;
929 pthread_descr self = thread_self();
931 request.req_thread = self;
932 request.req_kind = REQ_PROCESS_EXIT;
933 request.req_args.exit.code = retcode;
934 TEMP_FAILURE_RETRY(__libc_write(__pthread_manager_request,
935 (char *) &request, sizeof(request)));
936 suspend(self);
937 /* Main thread should accumulate times for thread manager and its
938 children, so that timings for main thread account for all threads. */
939 if (self == __pthread_main_thread)
941 #ifdef USE_TLS
942 waitpid(manager_thread->p_pid, NULL, __WCLONE);
943 #else
944 waitpid(__pthread_manager_thread.p_pid, NULL, __WCLONE);
945 #endif
946 /* Since all threads have been asynchronously terminated
947 (possibly holding locks), free cannot be used any more. */
948 /*free (__pthread_manager_thread_bos);*/
949 __pthread_manager_thread_bos = __pthread_manager_thread_tos = NULL;
954 #ifndef HAVE_Z_NODELETE
955 static int __pthread_atexit_retcode;
957 static void pthread_atexit_process(void *arg, int retcode)
959 pthread_onexit_process (retcode ?: __pthread_atexit_retcode, arg);
962 static void pthread_atexit_retcode(void *arg, int retcode)
964 __pthread_atexit_retcode = retcode;
966 #endif
968 /* The handler for the RESTART signal just records the signal received
969 in the thread descriptor, and optionally performs a siglongjmp
970 (for pthread_cond_timedwait). */
972 static void pthread_handle_sigrestart(int sig)
974 pthread_descr self = thread_self();
975 THREAD_SETMEM(self, p_signal, sig);
976 if (THREAD_GETMEM(self, p_signal_jmp) != NULL)
977 siglongjmp(*THREAD_GETMEM(self, p_signal_jmp), 1);
980 /* The handler for the CANCEL signal checks for cancellation
981 (in asynchronous mode), for process-wide exit and exec requests.
982 For the thread manager thread, redirect the signal to
983 __pthread_manager_sighandler. */
985 static void pthread_handle_sigcancel(int sig)
987 pthread_descr self = thread_self();
988 sigjmp_buf * jmpbuf;
990 if (self == manager_thread)
992 #ifdef THREAD_SELF
993 /* A new thread might get a cancel signal before it is fully
994 initialized, so that the thread register might still point to the
995 manager thread. Double check that this is really the manager
996 thread. */
997 pthread_descr real_self = thread_self_stack();
998 if (real_self == manager_thread)
1000 __pthread_manager_sighandler(sig);
1001 return;
1003 /* Oops, thread_self() isn't working yet.. */
1004 self = real_self;
1005 # ifdef INIT_THREAD_SELF
1006 INIT_THREAD_SELF(self, self->p_nr);
1007 # endif
1008 #else
1009 __pthread_manager_sighandler(sig);
1010 return;
1011 #endif
1013 if (__builtin_expect (__pthread_exit_requested, 0)) {
1014 /* Main thread should accumulate times for thread manager and its
1015 children, so that timings for main thread account for all threads. */
1016 if (self == __pthread_main_thread) {
1017 #ifdef USE_TLS
1018 waitpid(manager_thread->p_pid, NULL, __WCLONE);
1019 #else
1020 waitpid(__pthread_manager_thread.p_pid, NULL, __WCLONE);
1021 #endif
1023 _exit(__pthread_exit_code);
1025 if (__builtin_expect (THREAD_GETMEM(self, p_canceled), 0)
1026 && THREAD_GETMEM(self, p_cancelstate) == PTHREAD_CANCEL_ENABLE) {
1027 if (THREAD_GETMEM(self, p_canceltype) == PTHREAD_CANCEL_ASYNCHRONOUS)
1028 __pthread_do_exit(PTHREAD_CANCELED, CURRENT_STACK_FRAME);
1029 jmpbuf = THREAD_GETMEM(self, p_cancel_jmp);
1030 if (jmpbuf != NULL) {
1031 THREAD_SETMEM(self, p_cancel_jmp, NULL);
1032 siglongjmp(*jmpbuf, 1);
1037 /* Handler for the DEBUG signal.
1038 The debugging strategy is as follows:
1039 On reception of a REQ_DEBUG request (sent by new threads created to
1040 the thread manager under debugging mode), the thread manager throws
1041 __pthread_sig_debug to itself. The debugger (if active) intercepts
1042 this signal, takes into account new threads and continue execution
1043 of the thread manager by propagating the signal because it doesn't
1044 know what it is specifically done for. In the current implementation,
1045 the thread manager simply discards it. */
1047 static void pthread_handle_sigdebug(int sig)
1049 /* Nothing */
1052 /* Reset the state of the thread machinery after a fork().
1053 Close the pipe used for requests and set the main thread to the forked
1054 thread.
1055 Notice that we can't free the stack segments, as the forked thread
1056 may hold pointers into them. */
1058 void __pthread_reset_main_thread(void)
1060 pthread_descr self = thread_self();
1062 if (__pthread_manager_request != -1) {
1063 /* Free the thread manager stack */
1064 free(__pthread_manager_thread_bos);
1065 __pthread_manager_thread_bos = __pthread_manager_thread_tos = NULL;
1066 /* Close the two ends of the pipe */
1067 __libc_close(__pthread_manager_request);
1068 __libc_close(__pthread_manager_reader);
1069 __pthread_manager_request = __pthread_manager_reader = -1;
1072 /* Update the pid of the main thread */
1073 THREAD_SETMEM(self, p_pid, __getpid());
1074 /* Make the forked thread the main thread */
1075 __pthread_main_thread = self;
1076 THREAD_SETMEM(self, p_nextlive, self);
1077 THREAD_SETMEM(self, p_prevlive, self);
1078 #if !(USE_TLS && HAVE___THREAD)
1079 /* Now this thread modifies the global variables. */
1080 THREAD_SETMEM(self, p_errnop, &_errno);
1081 THREAD_SETMEM(self, p_h_errnop, &_h_errno);
1082 THREAD_SETMEM(self, p_resp, &_res);
1083 #endif
1085 #ifndef FLOATING_STACKS
1086 /* This is to undo the setrlimit call in __pthread_init_max_stacksize.
1087 XXX This can be wrong if the user set the limit during the run. */
1089 struct rlimit limit;
1090 if (getrlimit (RLIMIT_STACK, &limit) == 0
1091 && limit.rlim_cur != limit.rlim_max)
1093 limit.rlim_cur = limit.rlim_max;
1094 setrlimit(RLIMIT_STACK, &limit);
1097 #endif
1100 /* Process-wide exec() request */
1102 void __pthread_kill_other_threads_np(void)
1104 struct sigaction sa;
1105 /* Terminate all other threads and thread manager */
1106 pthread_onexit_process(0, NULL);
1107 /* Make current thread the main thread in case the calling thread
1108 changes its mind, does not exec(), and creates new threads instead. */
1109 __pthread_reset_main_thread();
1111 /* Reset the signal handlers behaviour for the signals the
1112 implementation uses since this would be passed to the new
1113 process. */
1114 sigemptyset(&sa.sa_mask);
1115 sa.sa_flags = 0;
1116 sa.sa_handler = SIG_DFL;
1117 __libc_sigaction(__pthread_sig_restart, &sa, NULL);
1118 __libc_sigaction(__pthread_sig_cancel, &sa, NULL);
1119 if (__pthread_sig_debug > 0)
1120 __libc_sigaction(__pthread_sig_debug, &sa, NULL);
1122 weak_alias (__pthread_kill_other_threads_np, pthread_kill_other_threads_np)
1124 /* Concurrency symbol level. */
1125 static int current_level;
1127 int __pthread_setconcurrency(int level)
1129 /* We don't do anything unless we have found a useful interpretation. */
1130 current_level = level;
1131 return 0;
1133 weak_alias (__pthread_setconcurrency, pthread_setconcurrency)
1135 int __pthread_getconcurrency(void)
1137 return current_level;
1139 weak_alias (__pthread_getconcurrency, pthread_getconcurrency)
1141 /* Primitives for controlling thread execution */
1143 void __pthread_wait_for_restart_signal(pthread_descr self)
1145 sigset_t mask;
1147 sigprocmask(SIG_SETMASK, NULL, &mask); /* Get current signal mask */
1148 sigdelset(&mask, __pthread_sig_restart); /* Unblock the restart signal */
1149 THREAD_SETMEM(self, p_signal, 0);
1150 do {
1151 __pthread_sigsuspend(&mask); /* Wait for signal. Must not be a
1152 cancellation point. */
1153 } while (THREAD_GETMEM(self, p_signal) !=__pthread_sig_restart);
1155 READ_MEMORY_BARRIER(); /* See comment in __pthread_restart_new */
1158 #if !__ASSUME_REALTIME_SIGNALS
1159 /* The _old variants are for 2.0 and early 2.1 kernels which don't have RT
1160 signals.
1161 On these kernels, we use SIGUSR1 and SIGUSR2 for restart and cancellation.
1162 Since the restart signal does not queue, we use an atomic counter to create
1163 queuing semantics. This is needed to resolve a rare race condition in
1164 pthread_cond_timedwait_relative. */
1166 void __pthread_restart_old(pthread_descr th)
1168 if (atomic_increment(&th->p_resume_count) == -1)
1169 kill(th->p_pid, __pthread_sig_restart);
1172 void __pthread_suspend_old(pthread_descr self)
1174 if (atomic_decrement(&self->p_resume_count) <= 0)
1175 __pthread_wait_for_restart_signal(self);
1179 __pthread_timedsuspend_old(pthread_descr self, const struct timespec *abstime)
1181 sigset_t unblock, initial_mask;
1182 int was_signalled = 0;
1183 sigjmp_buf jmpbuf;
1185 if (atomic_decrement(&self->p_resume_count) == 0) {
1186 /* Set up a longjmp handler for the restart signal, unblock
1187 the signal and sleep. */
1189 if (sigsetjmp(jmpbuf, 1) == 0) {
1190 THREAD_SETMEM(self, p_signal_jmp, &jmpbuf);
1191 THREAD_SETMEM(self, p_signal, 0);
1192 /* Unblock the restart signal */
1193 sigemptyset(&unblock);
1194 sigaddset(&unblock, __pthread_sig_restart);
1195 sigprocmask(SIG_UNBLOCK, &unblock, &initial_mask);
1197 while (1) {
1198 struct timeval now;
1199 struct timespec reltime;
1201 /* Compute a time offset relative to now. */
1202 __gettimeofday (&now, NULL);
1203 reltime.tv_nsec = abstime->tv_nsec - now.tv_usec * 1000;
1204 reltime.tv_sec = abstime->tv_sec - now.tv_sec;
1205 if (reltime.tv_nsec < 0) {
1206 reltime.tv_nsec += 1000000000;
1207 reltime.tv_sec -= 1;
1210 /* Sleep for the required duration. If woken by a signal,
1211 resume waiting as required by Single Unix Specification. */
1212 if (reltime.tv_sec < 0 || __libc_nanosleep(&reltime, NULL) == 0)
1213 break;
1216 /* Block the restart signal again */
1217 sigprocmask(SIG_SETMASK, &initial_mask, NULL);
1218 was_signalled = 0;
1219 } else {
1220 was_signalled = 1;
1222 THREAD_SETMEM(self, p_signal_jmp, NULL);
1225 /* Now was_signalled is true if we exited the above code
1226 due to the delivery of a restart signal. In that case,
1227 we know we have been dequeued and resumed and that the
1228 resume count is balanced. Otherwise, there are some
1229 cases to consider. First, try to bump up the resume count
1230 back to zero. If it goes to 1, it means restart() was
1231 invoked on this thread. The signal must be consumed
1232 and the count bumped down and everything is cool. We
1233 can return a 1 to the caller.
1234 Otherwise, no restart was delivered yet, so a potential
1235 race exists; we return a 0 to the caller which must deal
1236 with this race in an appropriate way; for example by
1237 atomically removing the thread from consideration for a
1238 wakeup---if such a thing fails, it means a restart is
1239 being delivered. */
1241 if (!was_signalled) {
1242 if (atomic_increment(&self->p_resume_count) != -1) {
1243 __pthread_wait_for_restart_signal(self);
1244 atomic_decrement(&self->p_resume_count); /* should be zero now! */
1245 /* woke spontaneously and consumed restart signal */
1246 return 1;
1248 /* woke spontaneously but did not consume restart---caller must resolve */
1249 return 0;
1251 /* woken due to restart signal */
1252 return 1;
1254 #endif /* __ASSUME_REALTIME_SIGNALS */
1256 void __pthread_restart_new(pthread_descr th)
1258 /* The barrier is proabably not needed, in which case it still documents
1259 our assumptions. The intent is to commit previous writes to shared
1260 memory so the woken thread will have a consistent view. Complementary
1261 read barriers are present to the suspend functions. */
1262 WRITE_MEMORY_BARRIER();
1263 kill(th->p_pid, __pthread_sig_restart);
1266 /* There is no __pthread_suspend_new because it would just
1267 be a wasteful wrapper for __pthread_wait_for_restart_signal */
1270 __pthread_timedsuspend_new(pthread_descr self, const struct timespec *abstime)
1272 sigset_t unblock, initial_mask;
1273 int was_signalled = 0;
1274 sigjmp_buf jmpbuf;
1276 if (sigsetjmp(jmpbuf, 1) == 0) {
1277 THREAD_SETMEM(self, p_signal_jmp, &jmpbuf);
1278 THREAD_SETMEM(self, p_signal, 0);
1279 /* Unblock the restart signal */
1280 sigemptyset(&unblock);
1281 sigaddset(&unblock, __pthread_sig_restart);
1282 sigprocmask(SIG_UNBLOCK, &unblock, &initial_mask);
1284 while (1) {
1285 struct timeval now;
1286 struct timespec reltime;
1288 /* Compute a time offset relative to now. */
1289 __gettimeofday (&now, NULL);
1290 reltime.tv_nsec = abstime->tv_nsec - now.tv_usec * 1000;
1291 reltime.tv_sec = abstime->tv_sec - now.tv_sec;
1292 if (reltime.tv_nsec < 0) {
1293 reltime.tv_nsec += 1000000000;
1294 reltime.tv_sec -= 1;
1297 /* Sleep for the required duration. If woken by a signal,
1298 resume waiting as required by Single Unix Specification. */
1299 if (reltime.tv_sec < 0 || __libc_nanosleep(&reltime, NULL) == 0)
1300 break;
1303 /* Block the restart signal again */
1304 sigprocmask(SIG_SETMASK, &initial_mask, NULL);
1305 was_signalled = 0;
1306 } else {
1307 was_signalled = 1;
1309 THREAD_SETMEM(self, p_signal_jmp, NULL);
1311 /* Now was_signalled is true if we exited the above code
1312 due to the delivery of a restart signal. In that case,
1313 everything is cool. We have been removed from whatever
1314 we were waiting on by the other thread, and consumed its signal.
1316 Otherwise we this thread woke up spontaneously, or due to a signal other
1317 than restart. This is an ambiguous case that must be resolved by
1318 the caller; the thread is still eligible for a restart wakeup
1319 so there is a race. */
1321 READ_MEMORY_BARRIER(); /* See comment in __pthread_restart_new */
1322 return was_signalled;
1326 /* Debugging aid */
1328 #ifdef DEBUG
1329 #include <stdarg.h>
1331 void __pthread_message(const char * fmt, ...)
1333 char buffer[1024];
1334 va_list args;
1335 sprintf(buffer, "%05d : ", __getpid());
1336 va_start(args, fmt);
1337 vsnprintf(buffer + 8, sizeof(buffer) - 8, fmt, args);
1338 va_end(args);
1339 TEMP_FAILURE_RETRY(__libc_write(2, buffer, strlen(buffer)));
1342 #endif