2 /* Linuxthreads - a simple clone()-based implementation of Posix */
3 /* threads for Linux. */
4 /* Copyright (C) 1996 Xavier Leroy (Xavier.Leroy@inria.fr) */
6 /* This program is free software; you can redistribute it and/or */
7 /* modify it under the terms of the GNU Library General Public License */
8 /* as published by the Free Software Foundation; either version 2 */
9 /* of the License, or (at your option) any later version. */
11 /* This program is distributed in the hope that it will be useful, */
12 /* but WITHOUT ANY WARRANTY; without even the implied warranty of */
13 /* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the */
14 /* GNU Library General Public License for more details. */
16 /* Thread creation, initialization, and basic low-level routines */
26 #include <sys/resource.h>
28 #include <shlib-compat.h>
30 #include "internals.h"
37 #include <not-cancel.h>
40 #if !defined __SIGRTMIN || (__SIGRTMAX - __SIGRTMIN) < 3
41 # error "This must not happen"
44 #if !(USE_TLS && HAVE___THREAD)
45 /* These variables are used by the setup code. */
49 /* We need the global/static resolver state here. */
53 extern struct __res_state _res
;
58 /* We need only a few variables. */
59 #define manager_thread __pthread_manager_threadp
60 pthread_descr __pthread_manager_threadp attribute_hidden
;
64 /* Descriptor of the initial thread */
66 struct _pthread_descr_struct __pthread_initial_thread
= {
67 .p_header
.data
.self
= &__pthread_initial_thread
,
68 .p_nextlive
= &__pthread_initial_thread
,
69 .p_prevlive
= &__pthread_initial_thread
,
70 .p_tid
= PTHREAD_THREADS_MAX
,
71 .p_lock
= &__pthread_handles
[0].h_lock
,
72 .p_start_args
= PTHREAD_START_ARGS_INITIALIZER(NULL
),
73 #if !(USE_TLS && HAVE___THREAD)
75 .p_h_errnop
= &_h_errno
,
79 .p_resume_count
= __ATOMIC_INITIALIZER
,
80 .p_alloca_cutoff
= __MAX_ALLOCA_CUTOFF
83 /* Descriptor of the manager thread; none of this is used but the error
84 variables, the p_pid and p_priority fields,
85 and the address for identification. */
87 #define manager_thread (&__pthread_manager_thread)
88 struct _pthread_descr_struct __pthread_manager_thread
= {
89 .p_header
.data
.self
= &__pthread_manager_thread
,
90 .p_header
.data
.multiple_threads
= 1,
91 .p_lock
= &__pthread_handles
[1].h_lock
,
92 .p_start_args
= PTHREAD_START_ARGS_INITIALIZER(__pthread_manager
),
93 #if !(USE_TLS && HAVE___THREAD)
94 .p_errnop
= &__pthread_manager_thread
.p_errno
,
97 .p_resume_count
= __ATOMIC_INITIALIZER
,
98 .p_alloca_cutoff
= PTHREAD_STACK_MIN
/ 4
102 /* Pointer to the main thread (the father of the thread manager thread) */
103 /* Originally, this is the initial thread, but this changes after fork() */
106 pthread_descr __pthread_main_thread
;
108 pthread_descr __pthread_main_thread
= &__pthread_initial_thread
;
111 /* Limit between the stack of the initial thread (above) and the
112 stacks of other threads (below). Aligned on a STACK_SIZE boundary. */
114 char *__pthread_initial_thread_bos
;
116 /* File descriptor for sending requests to the thread manager. */
117 /* Initially -1, meaning that the thread manager is not running. */
119 int __pthread_manager_request
= -1;
121 int __pthread_multiple_threads attribute_hidden
;
123 /* Other end of the pipe for sending requests to the thread manager. */
125 int __pthread_manager_reader
;
127 /* Limits of the thread manager stack */
129 char *__pthread_manager_thread_bos
;
130 char *__pthread_manager_thread_tos
;
132 /* For process-wide exit() */
134 int __pthread_exit_requested
;
135 int __pthread_exit_code
;
137 /* Maximum stack size. */
138 size_t __pthread_max_stacksize
;
140 /* Nozero if the machine has more than one processor. */
141 int __pthread_smp_kernel
;
144 #if !__ASSUME_REALTIME_SIGNALS
145 /* Pointers that select new or old suspend/resume functions
146 based on availability of rt signals. */
148 void (*__pthread_restart
)(pthread_descr
) = __pthread_restart_old
;
149 void (*__pthread_suspend
)(pthread_descr
) = __pthread_suspend_old
;
150 int (*__pthread_timedsuspend
)(pthread_descr
, const struct timespec
*) = __pthread_timedsuspend_old
;
151 #endif /* __ASSUME_REALTIME_SIGNALS */
153 /* Communicate relevant LinuxThreads constants to gdb */
155 const int __pthread_threads_max
= PTHREAD_THREADS_MAX
;
156 const int __pthread_sizeof_handle
= sizeof(struct pthread_handle_struct
);
157 const int __pthread_offsetof_descr
= offsetof(struct pthread_handle_struct
,
159 const int __pthread_offsetof_pid
= offsetof(struct _pthread_descr_struct
,
161 const int __linuxthreads_pthread_sizeof_descr
162 = sizeof(struct _pthread_descr_struct
);
164 const int __linuxthreads_initial_report_events
;
166 const char __linuxthreads_version
[] = VERSION
;
168 /* Forward declarations */
170 static void pthread_onexit_process(int retcode
, void *arg
);
171 #ifndef HAVE_Z_NODELETE
172 static void pthread_atexit_process(void *arg
, int retcode
);
173 static void pthread_atexit_retcode(void *arg
, int retcode
);
175 static void pthread_handle_sigcancel(int sig
);
176 static void pthread_handle_sigrestart(int sig
);
177 static void pthread_handle_sigdebug(int sig
);
179 /* Signal numbers used for the communication.
180 In these variables we keep track of the used variables. If the
181 platform does not support any real-time signals we will define the
182 values to some unreasonable value which will signal failing of all
183 the functions below. */
184 int __pthread_sig_restart
= __SIGRTMIN
;
185 int __pthread_sig_cancel
= __SIGRTMIN
+ 1;
186 int __pthread_sig_debug
= __SIGRTMIN
+ 2;
188 extern int __libc_current_sigrtmin_private (void);
190 #if !__ASSUME_REALTIME_SIGNALS
191 static int rtsigs_initialized
;
196 if (rtsigs_initialized
)
199 if (__libc_current_sigrtmin_private () == -1)
201 __pthread_sig_restart
= SIGUSR1
;
202 __pthread_sig_cancel
= SIGUSR2
;
203 __pthread_sig_debug
= 0;
207 __pthread_restart
= __pthread_restart_new
;
208 __pthread_suspend
= __pthread_wait_for_restart_signal
;
209 __pthread_timedsuspend
= __pthread_timedsuspend_new
;
212 rtsigs_initialized
= 1;
217 /* Initialize the pthread library.
218 Initialization is split in two functions:
219 - a constructor function that blocks the __pthread_sig_restart signal
220 (must do this very early, since the program could capture the signal
221 mask with e.g. sigsetjmp before creating the first thread);
222 - a regular function called from pthread_create when needed. */
224 static void pthread_initialize(void) __attribute__((constructor
));
226 #ifndef HAVE_Z_NODELETE
227 extern void *__dso_handle
__attribute__ ((weak
));
231 #if defined USE_TLS && !defined SHARED
232 extern void __libc_setup_tls (size_t tcbsize
, size_t tcbalign
);
235 struct pthread_functions __pthread_functions
=
237 #if !(USE_TLS && HAVE___THREAD)
238 .ptr_pthread_internal_tsd_set
= __pthread_internal_tsd_set
,
239 .ptr_pthread_internal_tsd_get
= __pthread_internal_tsd_get
,
240 .ptr_pthread_internal_tsd_address
= __pthread_internal_tsd_address
,
242 .ptr_pthread_fork
= __pthread_fork
,
243 .ptr_pthread_attr_destroy
= __pthread_attr_destroy
,
244 #if SHLIB_COMPAT(libpthread, GLIBC_2_0, GLIBC_2_1)
245 .ptr___pthread_attr_init_2_0
= __pthread_attr_init_2_0
,
247 .ptr___pthread_attr_init_2_1
= __pthread_attr_init_2_1
,
248 .ptr_pthread_attr_getdetachstate
= __pthread_attr_getdetachstate
,
249 .ptr_pthread_attr_setdetachstate
= __pthread_attr_setdetachstate
,
250 .ptr_pthread_attr_getinheritsched
= __pthread_attr_getinheritsched
,
251 .ptr_pthread_attr_setinheritsched
= __pthread_attr_setinheritsched
,
252 .ptr_pthread_attr_getschedparam
= __pthread_attr_getschedparam
,
253 .ptr_pthread_attr_setschedparam
= __pthread_attr_setschedparam
,
254 .ptr_pthread_attr_getschedpolicy
= __pthread_attr_getschedpolicy
,
255 .ptr_pthread_attr_setschedpolicy
= __pthread_attr_setschedpolicy
,
256 .ptr_pthread_attr_getscope
= __pthread_attr_getscope
,
257 .ptr_pthread_attr_setscope
= __pthread_attr_setscope
,
258 .ptr_pthread_condattr_destroy
= __pthread_condattr_destroy
,
259 .ptr_pthread_condattr_init
= __pthread_condattr_init
,
260 .ptr___pthread_cond_broadcast
= __pthread_cond_broadcast
,
261 .ptr___pthread_cond_destroy
= __pthread_cond_destroy
,
262 .ptr___pthread_cond_init
= __pthread_cond_init
,
263 .ptr___pthread_cond_signal
= __pthread_cond_signal
,
264 .ptr___pthread_cond_wait
= __pthread_cond_wait
,
265 .ptr___pthread_cond_timedwait
= __pthread_cond_timedwait
,
266 .ptr_pthread_equal
= __pthread_equal
,
267 .ptr___pthread_exit
= __pthread_exit
,
268 .ptr_pthread_getschedparam
= __pthread_getschedparam
,
269 .ptr_pthread_setschedparam
= __pthread_setschedparam
,
270 .ptr_pthread_mutex_destroy
= __pthread_mutex_destroy
,
271 .ptr_pthread_mutex_init
= __pthread_mutex_init
,
272 .ptr_pthread_mutex_lock
= __pthread_mutex_lock
,
273 .ptr_pthread_mutex_trylock
= __pthread_mutex_trylock
,
274 .ptr_pthread_mutex_unlock
= __pthread_mutex_unlock
,
275 .ptr_pthread_self
= __pthread_self
,
276 .ptr_pthread_setcancelstate
= __pthread_setcancelstate
,
277 .ptr_pthread_setcanceltype
= __pthread_setcanceltype
,
278 .ptr_pthread_do_exit
= __pthread_do_exit
,
279 .ptr_pthread_thread_self
= __pthread_thread_self
,
280 .ptr_pthread_cleanup_upto
= __pthread_cleanup_upto
,
281 .ptr_pthread_sigaction
= __pthread_sigaction
,
282 .ptr_pthread_sigwait
= __pthread_sigwait
,
283 .ptr_pthread_raise
= __pthread_raise
,
284 .ptr__pthread_cleanup_push
= _pthread_cleanup_push
,
285 .ptr__pthread_cleanup_pop
= _pthread_cleanup_pop
288 # define ptr_pthread_functions &__pthread_functions
290 # define ptr_pthread_functions NULL
293 static int *__libc_multiple_threads_ptr
;
295 /* Do some minimal initialization which has to be done during the
296 startup of the C library. */
298 __pthread_initialize_minimal(void)
303 /* First of all init __pthread_handles[0] and [1] if needed. */
304 # if __LT_SPINLOCK_INIT != 0
305 __pthread_handles
[0].h_lock
= __LOCK_INITIALIZER
;
306 __pthread_handles
[1].h_lock
= __LOCK_INITIALIZER
;
309 /* Unlike in the dynamically linked case the dynamic linker has not
310 taken care of initializing the TLS data structures. */
311 __libc_setup_tls (TLS_TCB_SIZE
, TLS_TCB_ALIGN
);
313 if (__builtin_expect (GL(dl_tls_dtv_slotinfo_list
) == NULL
, 0))
317 /* There is no actual TLS being used, so the thread register
318 was not initialized in the dynamic linker. */
320 /* We need to install special hooks so that the malloc and memalign
321 calls in _dl_tls_setup and _dl_allocate_tls won't cause full
322 malloc initialization that will try to set up its thread state. */
324 extern void __libc_malloc_pthread_startup (bool first_time
);
325 __libc_malloc_pthread_startup (true);
327 if (__builtin_expect (_dl_tls_setup (), 0)
328 || __builtin_expect ((tcbp
= _dl_allocate_tls (NULL
)) == NULL
, 0))
330 static const char msg
[] = "\
331 cannot allocate TLS data structures for initial thread\n";
332 TEMP_FAILURE_RETRY (write_not_cancel (STDERR_FILENO
,
333 msg
, sizeof msg
- 1));
336 const char *lossage
= TLS_INIT_TP (tcbp
, 0);
337 if (__builtin_expect (lossage
!= NULL
, 0))
339 static const char msg
[] = "cannot set up thread-local storage: ";
340 const char nl
= '\n';
341 TEMP_FAILURE_RETRY (write_not_cancel (STDERR_FILENO
,
342 msg
, sizeof msg
- 1));
343 TEMP_FAILURE_RETRY (write_not_cancel (STDERR_FILENO
,
344 lossage
, strlen (lossage
)));
345 TEMP_FAILURE_RETRY (write_not_cancel (STDERR_FILENO
, &nl
, 1));
348 /* Though it was allocated with libc's malloc, that was done without
349 the user's __malloc_hook installed. A later realloc that uses
350 the hooks might not work with that block from the plain malloc.
351 So we record this block as unfreeable just as the dynamic linker
352 does when it allocates the DTV before the libc malloc exists. */
353 GL(dl_initial_dtv
) = GET_DTV (tcbp
);
355 __libc_malloc_pthread_startup (false);
361 /* The memory for the thread descriptor was allocated elsewhere as
362 part of the TLS allocation. We have to initialize the data
363 structure by hand. This initialization must mirror the struct
365 self
->p_nextlive
= self
->p_prevlive
= self
;
366 self
->p_tid
= PTHREAD_THREADS_MAX
;
367 self
->p_lock
= &__pthread_handles
[0].h_lock
;
368 # ifndef HAVE___THREAD
369 self
->p_errnop
= &_errno
;
370 self
->p_h_errnop
= &_h_errno
;
372 /* self->p_start_args need not be initialized, it's all zero. */
373 self
->p_userstack
= 1;
374 # if __LT_SPINLOCK_INIT != 0
375 self
->p_resume_count
= (struct pthread_atomic
) __ATOMIC_INITIALIZER
;
377 self
->p_alloca_cutoff
= __MAX_ALLOCA_CUTOFF
;
379 /* Another variable which points to the thread descriptor. */
380 __pthread_main_thread
= self
;
382 /* And fill in the pointer the the thread __pthread_handles array. */
383 __pthread_handles
[0].h_descr
= self
;
387 /* First of all init __pthread_handles[0] and [1]. */
388 # if __LT_SPINLOCK_INIT != 0
389 __pthread_handles
[0].h_lock
= __LOCK_INITIALIZER
;
390 __pthread_handles
[1].h_lock
= __LOCK_INITIALIZER
;
392 __pthread_handles
[0].h_descr
= &__pthread_initial_thread
;
393 __pthread_handles
[1].h_descr
= &__pthread_manager_thread
;
395 /* If we have special thread_self processing, initialize that for the
397 # ifdef INIT_THREAD_SELF
398 INIT_THREAD_SELF(&__pthread_initial_thread
, 0);
404 self
->p_cpuclock_offset
= GL(dl_cpuclock_offset
);
406 __pthread_initial_thread
.p_cpuclock_offset
= GL(dl_cpuclock_offset
);
410 __libc_multiple_threads_ptr
= __libc_pthread_init (ptr_pthread_functions
);
415 __pthread_init_max_stacksize(void)
420 getrlimit(RLIMIT_STACK
, &limit
);
421 #ifdef FLOATING_STACKS
422 if (limit
.rlim_cur
== RLIM_INFINITY
)
423 limit
.rlim_cur
= ARCH_STACK_MAX_SIZE
;
424 # ifdef NEED_SEPARATE_REGISTER_STACK
425 max_stack
= limit
.rlim_cur
/ 2;
427 max_stack
= limit
.rlim_cur
;
430 /* Play with the stack size limit to make sure that no stack ever grows
431 beyond STACK_SIZE minus one page (to act as a guard page). */
432 # ifdef NEED_SEPARATE_REGISTER_STACK
433 /* STACK_SIZE bytes hold both the main stack and register backing
434 store. The rlimit value applies to each individually. */
435 max_stack
= STACK_SIZE
/2 - __getpagesize ();
437 max_stack
= STACK_SIZE
- __getpagesize();
439 if (limit
.rlim_cur
> max_stack
) {
440 limit
.rlim_cur
= max_stack
;
441 setrlimit(RLIMIT_STACK
, &limit
);
444 __pthread_max_stacksize
= max_stack
;
445 if (max_stack
/ 4 < __MAX_ALLOCA_CUTOFF
)
448 pthread_descr self
= THREAD_SELF
;
449 self
->p_alloca_cutoff
= max_stack
/ 4;
451 __pthread_initial_thread
.p_alloca_cutoff
= max_stack
/ 4;
458 /* When using __thread for this, we do it in libc so as not
459 to give libpthread its own TLS segment just for this. */
460 extern void **__libc_dl_error_tsd (void) __attribute__ ((const));
462 static void ** __attribute__ ((const))
463 __libc_dl_error_tsd (void)
465 return &thread_self ()->p_libc_specific
[_LIBC_TSD_KEY_DL_ERROR
];
471 static inline void __attribute__((always_inline
))
472 init_one_static_tls (pthread_descr descr
, struct link_map
*map
)
475 dtv_t
*dtv
= GET_DTV (descr
);
476 void *dest
= (char *) descr
- map
->l_tls_offset
;
478 dtv_t
*dtv
= GET_DTV ((pthread_descr
) ((char *) descr
+ TLS_PRE_TCB_SIZE
));
479 void *dest
= (char *) descr
+ map
->l_tls_offset
+ TLS_PRE_TCB_SIZE
;
481 # error "Either TLS_TCB_AT_TP or TLS_DTV_AT_TP must be defined"
484 /* Fill in the DTV slot so that a later LD/GD access will find it. */
485 dtv
[map
->l_tls_modid
].pointer
= dest
;
487 /* Initialize the memory. */
488 memset (__mempcpy (dest
, map
->l_tls_initimage
, map
->l_tls_initimage_size
),
489 '\0', map
->l_tls_blocksize
- map
->l_tls_initimage_size
);
493 __pthread_init_static_tls (struct link_map
*map
)
497 for (i
= 0; i
< PTHREAD_THREADS_MAX
; ++i
)
498 if (__pthread_handles
[i
].h_descr
!= NULL
&& i
!= 1)
500 __pthread_lock (&__pthread_handles
[i
].h_lock
, NULL
);
501 if (__pthread_handles
[i
].h_descr
!= NULL
)
502 init_one_static_tls (__pthread_handles
[i
].h_descr
, map
);
503 __pthread_unlock (&__pthread_handles
[i
].h_lock
);
508 static void pthread_initialize(void)
513 /* If already done (e.g. by a constructor called earlier!), bail out */
514 if (__pthread_initial_thread_bos
!= NULL
) return;
515 #ifdef TEST_FOR_COMPARE_AND_SWAP
516 /* Test if compare-and-swap is available */
517 __pthread_has_cas
= compare_and_swap_is_available();
519 #ifdef FLOATING_STACKS
520 /* We don't need to know the bottom of the stack. Give the pointer some
521 value to signal that initialization happened. */
522 __pthread_initial_thread_bos
= (void *) -1l;
524 /* Determine stack size limits . */
525 __pthread_init_max_stacksize ();
526 # ifdef _STACK_GROWS_UP
527 /* The initial thread already has all the stack it needs */
528 __pthread_initial_thread_bos
= (char *)
529 ((long)CURRENT_STACK_FRAME
&~ (STACK_SIZE
- 1));
531 /* For the initial stack, reserve at least STACK_SIZE bytes of stack
532 below the current stack address, and align that on a
533 STACK_SIZE boundary. */
534 __pthread_initial_thread_bos
=
535 (char *)(((long)CURRENT_STACK_FRAME
- 2 * STACK_SIZE
) & ~(STACK_SIZE
- 1));
539 /* Update the descriptor for the initial thread. */
540 THREAD_SETMEM (((pthread_descr
) NULL
), p_pid
, __getpid());
541 # ifndef HAVE___THREAD
542 /* Likewise for the resolver state _res. */
543 THREAD_SETMEM (((pthread_descr
) NULL
), p_resp
, &_res
);
546 /* Update the descriptor for the initial thread. */
547 __pthread_initial_thread
.p_pid
= __getpid();
548 /* Likewise for the resolver state _res. */
549 __pthread_initial_thread
.p_resp
= &_res
;
551 #if !__ASSUME_REALTIME_SIGNALS
552 /* Initialize real-time signals. */
555 /* Setup signal handlers for the initial thread.
556 Since signal handlers are shared between threads, these settings
557 will be inherited by all other threads. */
558 sa
.sa_handler
= pthread_handle_sigrestart
;
559 sigemptyset(&sa
.sa_mask
);
561 __libc_sigaction(__pthread_sig_restart
, &sa
, NULL
);
562 sa
.sa_handler
= pthread_handle_sigcancel
;
563 sigaddset(&sa
.sa_mask
, __pthread_sig_restart
);
565 __libc_sigaction(__pthread_sig_cancel
, &sa
, NULL
);
566 if (__pthread_sig_debug
> 0) {
567 sa
.sa_handler
= pthread_handle_sigdebug
;
568 sigemptyset(&sa
.sa_mask
);
570 __libc_sigaction(__pthread_sig_debug
, &sa
, NULL
);
572 /* Initially, block __pthread_sig_restart. Will be unblocked on demand. */
574 sigaddset(&mask
, __pthread_sig_restart
);
575 sigprocmask(SIG_BLOCK
, &mask
, NULL
);
576 /* And unblock __pthread_sig_cancel if it has been blocked. */
577 sigdelset(&mask
, __pthread_sig_restart
);
578 sigaddset(&mask
, __pthread_sig_cancel
);
579 sigprocmask(SIG_UNBLOCK
, &mask
, NULL
);
580 /* Register an exit function to kill all other threads. */
581 /* Do it early so that user-registered atexit functions are called
582 before pthread_*exit_process. */
583 #ifndef HAVE_Z_NODELETE
584 if (__builtin_expect (&__dso_handle
!= NULL
, 1))
585 __cxa_atexit ((void (*) (void *)) pthread_atexit_process
, NULL
,
589 __on_exit (pthread_onexit_process
, NULL
);
590 /* How many processors. */
591 __pthread_smp_kernel
= is_smp_system ();
594 /* Transfer the old value from the dynamic linker's internal location. */
595 *__libc_dl_error_tsd () = *(*GL(dl_error_catch_tsd
)) ();
596 GL(dl_error_catch_tsd
) = &__libc_dl_error_tsd
;
598 /* Make __rtld_lock_{,un}lock_recursive use pthread_mutex_{,un}lock,
599 keep the lock count from the ld.so implementation. */
600 GL(dl_rtld_lock_recursive
) = (void *) __pthread_mutex_lock
;
601 GL(dl_rtld_unlock_recursive
) = (void *) __pthread_mutex_unlock
;
602 unsigned int rtld_lock_count
= GL(dl_load_lock
).mutex
.__m_count
;
603 GL(dl_load_lock
).mutex
.__m_count
= 0;
604 while (rtld_lock_count
-- > 0)
605 __pthread_mutex_lock (&GL(dl_load_lock
).mutex
);
609 GL(dl_init_static_tls
) = &__pthread_init_static_tls
;
613 void __pthread_initialize(void)
615 pthread_initialize();
618 int __pthread_initialize_manager(void)
622 struct pthread_request request
;
629 __pthread_multiple_threads
= 1;
630 #if TLS_MULTIPLE_THREADS_IN_TCB || !defined USE_TLS || !TLS_DTV_AT_TP
631 __pthread_main_thread
->p_multiple_threads
= 1;
633 *__libc_multiple_threads_ptr
= 1;
635 #ifndef HAVE_Z_NODELETE
636 if (__builtin_expect (&__dso_handle
!= NULL
, 1))
637 __cxa_atexit ((void (*) (void *)) pthread_atexit_retcode
, NULL
,
641 if (__pthread_max_stacksize
== 0)
642 __pthread_init_max_stacksize ();
643 /* If basic initialization not done yet (e.g. we're called from a
644 constructor run before our constructor), do it now */
645 if (__pthread_initial_thread_bos
== NULL
) pthread_initialize();
646 /* Setup stack for thread manager */
647 __pthread_manager_thread_bos
= malloc(THREAD_MANAGER_STACK_SIZE
);
648 if (__pthread_manager_thread_bos
== NULL
) return -1;
649 __pthread_manager_thread_tos
=
650 __pthread_manager_thread_bos
+ THREAD_MANAGER_STACK_SIZE
;
651 /* Setup pipe to communicate with thread manager */
652 if (pipe(manager_pipe
) == -1) {
653 free(__pthread_manager_thread_bos
);
658 /* Allocate memory for the thread descriptor and the dtv. */
659 tcbp
= _dl_allocate_tls (NULL
);
661 free(__pthread_manager_thread_bos
);
662 close_not_cancel(manager_pipe
[0]);
663 close_not_cancel(manager_pipe
[1]);
668 mgr
= (pthread_descr
) tcbp
;
670 /* pthread_descr is located right below tcbhead_t which _dl_allocate_tls
672 mgr
= (pthread_descr
) ((char *) tcbp
- TLS_PRE_TCB_SIZE
);
674 __pthread_handles
[1].h_descr
= manager_thread
= mgr
;
676 /* Initialize the descriptor. */
677 #if !defined USE_TLS || !TLS_DTV_AT_TP
678 mgr
->p_header
.data
.tcb
= tcbp
;
679 mgr
->p_header
.data
.self
= mgr
;
680 mgr
->p_header
.data
.multiple_threads
= 1;
681 #elif TLS_MULTIPLE_THREADS_IN_TCB
682 mgr
->p_multiple_threads
= 1;
684 mgr
->p_lock
= &__pthread_handles
[1].h_lock
;
685 # ifndef HAVE___THREAD
686 mgr
->p_errnop
= &mgr
->p_errno
;
688 mgr
->p_start_args
= (struct pthread_start_args
) PTHREAD_START_ARGS_INITIALIZER(__pthread_manager
);
690 # if __LT_SPINLOCK_INIT != 0
691 self
->p_resume_count
= (struct pthread_atomic
) __ATOMIC_INITIALIZER
;
693 mgr
->p_alloca_cutoff
= PTHREAD_STACK_MIN
/ 4;
695 mgr
= &__pthread_manager_thread
;
698 __pthread_manager_request
= manager_pipe
[1]; /* writing end */
699 __pthread_manager_reader
= manager_pipe
[0]; /* reading end */
701 /* Start the thread manager */
704 if (__linuxthreads_initial_report_events
!= 0)
705 THREAD_SETMEM (((pthread_descr
) NULL
), p_report_events
,
706 __linuxthreads_initial_report_events
);
707 report_events
= THREAD_GETMEM (((pthread_descr
) NULL
), p_report_events
);
709 if (__linuxthreads_initial_report_events
!= 0)
710 __pthread_initial_thread
.p_report_events
711 = __linuxthreads_initial_report_events
;
712 report_events
= __pthread_initial_thread
.p_report_events
;
714 if (__builtin_expect (report_events
, 0))
716 /* It's a bit more complicated. We have to report the creation of
717 the manager thread. */
718 int idx
= __td_eventword (TD_CREATE
);
719 uint32_t mask
= __td_eventmask (TD_CREATE
);
723 event_bits
= THREAD_GETMEM_NC (((pthread_descr
) NULL
),
724 p_eventbuf
.eventmask
.event_bits
[idx
]);
726 event_bits
= __pthread_initial_thread
.p_eventbuf
.eventmask
.event_bits
[idx
];
729 if ((mask
& (__pthread_threads_events
.event_bits
[idx
] | event_bits
))
732 __pthread_lock(mgr
->p_lock
, NULL
);
734 #ifdef NEED_SEPARATE_REGISTER_STACK
735 pid
= __clone2(__pthread_manager_event
,
736 (void **) __pthread_manager_thread_bos
,
737 THREAD_MANAGER_STACK_SIZE
,
738 CLONE_VM
| CLONE_FS
| CLONE_FILES
| CLONE_SIGHAND
,
740 #elif _STACK_GROWS_UP
741 pid
= __clone(__pthread_manager_event
,
742 (void **) __pthread_manager_thread_bos
,
743 CLONE_VM
| CLONE_FS
| CLONE_FILES
| CLONE_SIGHAND
,
746 pid
= __clone(__pthread_manager_event
,
747 (void **) __pthread_manager_thread_tos
,
748 CLONE_VM
| CLONE_FS
| CLONE_FILES
| CLONE_SIGHAND
,
754 /* Now fill in the information about the new thread in
755 the newly created thread's data structure. We cannot let
756 the new thread do this since we don't know whether it was
757 already scheduled when we send the event. */
758 mgr
->p_eventbuf
.eventdata
= mgr
;
759 mgr
->p_eventbuf
.eventnum
= TD_CREATE
;
760 __pthread_last_event
= mgr
;
761 mgr
->p_tid
= 2* PTHREAD_THREADS_MAX
+ 1;
764 /* Now call the function which signals the event. */
765 __linuxthreads_create_event ();
768 /* Now restart the thread. */
769 __pthread_unlock(mgr
->p_lock
);
773 if (__builtin_expect (pid
, 0) == 0)
775 #ifdef NEED_SEPARATE_REGISTER_STACK
776 pid
= __clone2(__pthread_manager
, (void **) __pthread_manager_thread_bos
,
777 THREAD_MANAGER_STACK_SIZE
,
778 CLONE_VM
| CLONE_FS
| CLONE_FILES
| CLONE_SIGHAND
, mgr
);
779 #elif _STACK_GROWS_UP
780 pid
= __clone(__pthread_manager
, (void **) __pthread_manager_thread_bos
,
781 CLONE_VM
| CLONE_FS
| CLONE_FILES
| CLONE_SIGHAND
, mgr
);
783 pid
= __clone(__pthread_manager
, (void **) __pthread_manager_thread_tos
,
784 CLONE_VM
| CLONE_FS
| CLONE_FILES
| CLONE_SIGHAND
, mgr
);
787 if (__builtin_expect (pid
, 0) == -1) {
789 _dl_deallocate_tls (tcbp
, true);
791 free(__pthread_manager_thread_bos
);
792 close_not_cancel(manager_pipe
[0]);
793 close_not_cancel(manager_pipe
[1]);
796 mgr
->p_tid
= 2* PTHREAD_THREADS_MAX
+ 1;
798 /* Make gdb aware of new thread manager */
799 if (__builtin_expect (__pthread_threads_debug
, 0) && __pthread_sig_debug
> 0)
801 raise(__pthread_sig_debug
);
802 /* We suspend ourself and gdb will wake us up when it is
803 ready to handle us. */
804 __pthread_wait_for_restart_signal(thread_self());
806 /* Synchronize debugging of the thread manager */
807 request
.req_kind
= REQ_DEBUG
;
808 TEMP_FAILURE_RETRY(write_not_cancel(__pthread_manager_request
,
809 (char *) &request
, sizeof(request
)));
813 /* Thread creation */
815 int __pthread_create_2_1(pthread_t
*thread
, const pthread_attr_t
*attr
,
816 void * (*start_routine
)(void *), void *arg
)
818 pthread_descr self
= thread_self();
819 struct pthread_request request
;
821 if (__builtin_expect (__pthread_manager_request
, 0) < 0) {
822 if (__pthread_initialize_manager() < 0) return EAGAIN
;
824 request
.req_thread
= self
;
825 request
.req_kind
= REQ_CREATE
;
826 request
.req_args
.create
.attr
= attr
;
827 request
.req_args
.create
.fn
= start_routine
;
828 request
.req_args
.create
.arg
= arg
;
829 sigprocmask(SIG_SETMASK
, (const sigset_t
*) NULL
,
830 &request
.req_args
.create
.mask
);
831 TEMP_FAILURE_RETRY(write_not_cancel(__pthread_manager_request
,
832 (char *) &request
, sizeof(request
)));
834 retval
= THREAD_GETMEM(self
, p_retcode
);
835 if (__builtin_expect (retval
, 0) == 0)
836 *thread
= (pthread_t
) THREAD_GETMEM(self
, p_retval
);
840 versioned_symbol (libpthread
, __pthread_create_2_1
, pthread_create
, GLIBC_2_1
);
842 #if SHLIB_COMPAT (libpthread, GLIBC_2_0, GLIBC_2_1)
844 int __pthread_create_2_0(pthread_t
*thread
, const pthread_attr_t
*attr
,
845 void * (*start_routine
)(void *), void *arg
)
847 /* The ATTR attribute is not really of type `pthread_attr_t *'. It has
848 the old size and access to the new members might crash the program.
849 We convert the struct now. */
850 pthread_attr_t new_attr
;
854 size_t ps
= __getpagesize ();
856 memcpy (&new_attr
, attr
,
857 (size_t) &(((pthread_attr_t
*)NULL
)->__guardsize
));
858 new_attr
.__guardsize
= ps
;
859 new_attr
.__stackaddr_set
= 0;
860 new_attr
.__stackaddr
= NULL
;
861 new_attr
.__stacksize
= STACK_SIZE
- ps
;
864 return __pthread_create_2_1 (thread
, attr
, start_routine
, arg
);
866 compat_symbol (libpthread
, __pthread_create_2_0
, pthread_create
, GLIBC_2_0
);
869 /* Simple operations on thread identifiers */
871 pthread_descr
__pthread_thread_self(void)
873 return thread_self();
876 pthread_t
__pthread_self(void)
878 pthread_descr self
= thread_self();
879 return THREAD_GETMEM(self
, p_tid
);
881 strong_alias (__pthread_self
, pthread_self
);
883 int __pthread_equal(pthread_t thread1
, pthread_t thread2
)
885 return thread1
== thread2
;
887 strong_alias (__pthread_equal
, pthread_equal
);
889 /* Helper function for thread_self in the case of user-provided stacks */
893 pthread_descr
__pthread_find_self(void)
895 char * sp
= CURRENT_STACK_FRAME
;
898 /* __pthread_handles[0] is the initial thread, __pthread_handles[1] is
899 the manager threads handled specially in thread_self(), so start at 2 */
900 h
= __pthread_handles
+ 2;
901 # ifdef _STACK_GROWS_UP
902 while (! (sp
>= (char *) h
->h_descr
&& sp
< h
->h_descr
->p_guardaddr
)) h
++;
904 while (! (sp
<= (char *) h
->h_descr
&& sp
>= h
->h_bottom
)) h
++;
911 pthread_descr
__pthread_self_stack(void)
913 char *sp
= CURRENT_STACK_FRAME
;
916 if (sp
>= __pthread_manager_thread_bos
&& sp
< __pthread_manager_thread_tos
)
917 return manager_thread
;
918 h
= __pthread_handles
+ 2;
920 # ifdef _STACK_GROWS_UP
921 while (h
->h_descr
== NULL
922 || ! (sp
>= h
->h_descr
->p_stackaddr
&& sp
< h
->h_descr
->p_guardaddr
))
925 while (h
->h_descr
== NULL
926 || ! (sp
<= (char *) h
->h_descr
->p_stackaddr
&& sp
>= h
->h_bottom
))
930 # ifdef _STACK_GROWS_UP
931 while (! (sp
>= (char *) h
->h_descr
&& sp
< h
->h_descr
->p_guardaddr
))
934 while (! (sp
<= (char *) h
->h_descr
&& sp
>= h
->h_bottom
))
943 /* Thread scheduling */
945 int __pthread_setschedparam(pthread_t thread
, int policy
,
946 const struct sched_param
*param
)
948 pthread_handle handle
= thread_handle(thread
);
951 __pthread_lock(&handle
->h_lock
, NULL
);
952 if (__builtin_expect (invalid_handle(handle
, thread
), 0)) {
953 __pthread_unlock(&handle
->h_lock
);
956 th
= handle
->h_descr
;
957 if (__builtin_expect (__sched_setscheduler(th
->p_pid
, policy
, param
) == -1,
959 __pthread_unlock(&handle
->h_lock
);
962 th
->p_priority
= policy
== SCHED_OTHER
? 0 : param
->sched_priority
;
963 __pthread_unlock(&handle
->h_lock
);
964 if (__pthread_manager_request
>= 0)
965 __pthread_manager_adjust_prio(th
->p_priority
);
968 strong_alias (__pthread_setschedparam
, pthread_setschedparam
);
970 int __pthread_getschedparam(pthread_t thread
, int *policy
,
971 struct sched_param
*param
)
973 pthread_handle handle
= thread_handle(thread
);
976 __pthread_lock(&handle
->h_lock
, NULL
);
977 if (__builtin_expect (invalid_handle(handle
, thread
), 0)) {
978 __pthread_unlock(&handle
->h_lock
);
981 pid
= handle
->h_descr
->p_pid
;
982 __pthread_unlock(&handle
->h_lock
);
983 pol
= __sched_getscheduler(pid
);
984 if (__builtin_expect (pol
, 0) == -1) return errno
;
985 if (__sched_getparam(pid
, param
) == -1) return errno
;
989 strong_alias (__pthread_getschedparam
, pthread_getschedparam
);
991 int __pthread_yield (void)
993 /* For now this is equivalent with the POSIX call. */
994 return sched_yield ();
996 weak_alias (__pthread_yield
, pthread_yield
)
998 /* Process-wide exit() request */
1000 static void pthread_onexit_process(int retcode
, void *arg
)
1002 if (__builtin_expect (__pthread_manager_request
, 0) >= 0) {
1003 struct pthread_request request
;
1004 pthread_descr self
= thread_self();
1006 request
.req_thread
= self
;
1007 request
.req_kind
= REQ_PROCESS_EXIT
;
1008 request
.req_args
.exit
.code
= retcode
;
1009 TEMP_FAILURE_RETRY(write_not_cancel(__pthread_manager_request
,
1010 (char *) &request
, sizeof(request
)));
1012 /* Main thread should accumulate times for thread manager and its
1013 children, so that timings for main thread account for all threads. */
1014 if (self
== __pthread_main_thread
)
1017 waitpid(manager_thread
->p_pid
, NULL
, __WCLONE
);
1019 waitpid(__pthread_manager_thread
.p_pid
, NULL
, __WCLONE
);
1021 /* Since all threads have been asynchronously terminated
1022 (possibly holding locks), free cannot be used any more.
1023 For mtrace, we'd like to print something though. */
1025 tcbhead_t *tcbp = (tcbhead_t *) manager_thread;
1027 tcbp = (tcbhead_t) ((char *) tcbp + TLS_PRE_TCB_SIZE);
1029 _dl_deallocate_tls (tcbp, true);
1031 free (__pthread_manager_thread_bos); */
1032 __pthread_manager_thread_bos
= __pthread_manager_thread_tos
= NULL
;
1037 #ifndef HAVE_Z_NODELETE
1038 static int __pthread_atexit_retcode
;
1040 static void pthread_atexit_process(void *arg
, int retcode
)
1042 pthread_onexit_process (retcode
?: __pthread_atexit_retcode
, arg
);
1045 static void pthread_atexit_retcode(void *arg
, int retcode
)
1047 __pthread_atexit_retcode
= retcode
;
1051 /* The handler for the RESTART signal just records the signal received
1052 in the thread descriptor, and optionally performs a siglongjmp
1053 (for pthread_cond_timedwait). */
1055 static void pthread_handle_sigrestart(int sig
)
1057 pthread_descr self
= check_thread_self();
1058 THREAD_SETMEM(self
, p_signal
, sig
);
1059 if (THREAD_GETMEM(self
, p_signal_jmp
) != NULL
)
1060 siglongjmp(*THREAD_GETMEM(self
, p_signal_jmp
), 1);
1063 /* The handler for the CANCEL signal checks for cancellation
1064 (in asynchronous mode), for process-wide exit and exec requests.
1065 For the thread manager thread, redirect the signal to
1066 __pthread_manager_sighandler. */
1068 static void pthread_handle_sigcancel(int sig
)
1070 pthread_descr self
= check_thread_self();
1071 sigjmp_buf
* jmpbuf
;
1073 if (self
== manager_thread
)
1075 __pthread_manager_sighandler(sig
);
1078 if (__builtin_expect (__pthread_exit_requested
, 0)) {
1079 /* Main thread should accumulate times for thread manager and its
1080 children, so that timings for main thread account for all threads. */
1081 if (self
== __pthread_main_thread
) {
1083 waitpid(manager_thread
->p_pid
, NULL
, __WCLONE
);
1085 waitpid(__pthread_manager_thread
.p_pid
, NULL
, __WCLONE
);
1088 _exit(__pthread_exit_code
);
1090 if (__builtin_expect (THREAD_GETMEM(self
, p_canceled
), 0)
1091 && THREAD_GETMEM(self
, p_cancelstate
) == PTHREAD_CANCEL_ENABLE
) {
1092 if (THREAD_GETMEM(self
, p_canceltype
) == PTHREAD_CANCEL_ASYNCHRONOUS
)
1093 __pthread_do_exit(PTHREAD_CANCELED
, CURRENT_STACK_FRAME
);
1094 jmpbuf
= THREAD_GETMEM(self
, p_cancel_jmp
);
1095 if (jmpbuf
!= NULL
) {
1096 THREAD_SETMEM(self
, p_cancel_jmp
, NULL
);
1097 siglongjmp(*jmpbuf
, 1);
1102 /* Handler for the DEBUG signal.
1103 The debugging strategy is as follows:
1104 On reception of a REQ_DEBUG request (sent by new threads created to
1105 the thread manager under debugging mode), the thread manager throws
1106 __pthread_sig_debug to itself. The debugger (if active) intercepts
1107 this signal, takes into account new threads and continue execution
1108 of the thread manager by propagating the signal because it doesn't
1109 know what it is specifically done for. In the current implementation,
1110 the thread manager simply discards it. */
1112 static void pthread_handle_sigdebug(int sig
)
1117 /* Reset the state of the thread machinery after a fork().
1118 Close the pipe used for requests and set the main thread to the forked
1120 Notice that we can't free the stack segments, as the forked thread
1121 may hold pointers into them. */
1123 void __pthread_reset_main_thread(void)
1125 pthread_descr self
= thread_self();
1127 if (__pthread_manager_request
!= -1) {
1128 /* Free the thread manager stack */
1129 free(__pthread_manager_thread_bos
);
1130 __pthread_manager_thread_bos
= __pthread_manager_thread_tos
= NULL
;
1131 /* Close the two ends of the pipe */
1132 close_not_cancel(__pthread_manager_request
);
1133 close_not_cancel(__pthread_manager_reader
);
1134 __pthread_manager_request
= __pthread_manager_reader
= -1;
1137 /* Update the pid of the main thread */
1138 THREAD_SETMEM(self
, p_pid
, __getpid());
1139 /* Make the forked thread the main thread */
1140 __pthread_main_thread
= self
;
1141 THREAD_SETMEM(self
, p_nextlive
, self
);
1142 THREAD_SETMEM(self
, p_prevlive
, self
);
1143 #if !(USE_TLS && HAVE___THREAD)
1144 /* Now this thread modifies the global variables. */
1145 THREAD_SETMEM(self
, p_errnop
, &_errno
);
1146 THREAD_SETMEM(self
, p_h_errnop
, &_h_errno
);
1147 THREAD_SETMEM(self
, p_resp
, &_res
);
1150 #ifndef FLOATING_STACKS
1151 /* This is to undo the setrlimit call in __pthread_init_max_stacksize.
1152 XXX This can be wrong if the user set the limit during the run. */
1154 struct rlimit limit
;
1155 if (getrlimit (RLIMIT_STACK
, &limit
) == 0
1156 && limit
.rlim_cur
!= limit
.rlim_max
)
1158 limit
.rlim_cur
= limit
.rlim_max
;
1159 setrlimit(RLIMIT_STACK
, &limit
);
1165 /* Process-wide exec() request */
1167 void __pthread_kill_other_threads_np(void)
1169 struct sigaction sa
;
1170 /* Terminate all other threads and thread manager */
1171 pthread_onexit_process(0, NULL
);
1172 /* Make current thread the main thread in case the calling thread
1173 changes its mind, does not exec(), and creates new threads instead. */
1174 __pthread_reset_main_thread();
1176 /* Reset the signal handlers behaviour for the signals the
1177 implementation uses since this would be passed to the new
1179 sigemptyset(&sa
.sa_mask
);
1181 sa
.sa_handler
= SIG_DFL
;
1182 __libc_sigaction(__pthread_sig_restart
, &sa
, NULL
);
1183 __libc_sigaction(__pthread_sig_cancel
, &sa
, NULL
);
1184 if (__pthread_sig_debug
> 0)
1185 __libc_sigaction(__pthread_sig_debug
, &sa
, NULL
);
1187 weak_alias (__pthread_kill_other_threads_np
, pthread_kill_other_threads_np
)
1189 /* Concurrency symbol level. */
1190 static int current_level
;
1192 int __pthread_setconcurrency(int level
)
1194 /* We don't do anything unless we have found a useful interpretation. */
1195 current_level
= level
;
1198 weak_alias (__pthread_setconcurrency
, pthread_setconcurrency
)
1200 int __pthread_getconcurrency(void)
1202 return current_level
;
1204 weak_alias (__pthread_getconcurrency
, pthread_getconcurrency
)
1206 /* Primitives for controlling thread execution */
1208 void __pthread_wait_for_restart_signal(pthread_descr self
)
1212 sigprocmask(SIG_SETMASK
, NULL
, &mask
); /* Get current signal mask */
1213 sigdelset(&mask
, __pthread_sig_restart
); /* Unblock the restart signal */
1214 THREAD_SETMEM(self
, p_signal
, 0);
1216 __pthread_sigsuspend(&mask
); /* Wait for signal. Must not be a
1217 cancellation point. */
1218 } while (THREAD_GETMEM(self
, p_signal
) !=__pthread_sig_restart
);
1220 READ_MEMORY_BARRIER(); /* See comment in __pthread_restart_new */
1223 #if !__ASSUME_REALTIME_SIGNALS
1224 /* The _old variants are for 2.0 and early 2.1 kernels which don't have RT
1226 On these kernels, we use SIGUSR1 and SIGUSR2 for restart and cancellation.
1227 Since the restart signal does not queue, we use an atomic counter to create
1228 queuing semantics. This is needed to resolve a rare race condition in
1229 pthread_cond_timedwait_relative. */
1231 void __pthread_restart_old(pthread_descr th
)
1233 if (atomic_increment(&th
->p_resume_count
) == -1)
1234 kill(th
->p_pid
, __pthread_sig_restart
);
1237 void __pthread_suspend_old(pthread_descr self
)
1239 if (atomic_decrement(&self
->p_resume_count
) <= 0)
1240 __pthread_wait_for_restart_signal(self
);
1244 __pthread_timedsuspend_old(pthread_descr self
, const struct timespec
*abstime
)
1246 sigset_t unblock
, initial_mask
;
1247 int was_signalled
= 0;
1250 if (atomic_decrement(&self
->p_resume_count
) == 0) {
1251 /* Set up a longjmp handler for the restart signal, unblock
1252 the signal and sleep. */
1254 if (sigsetjmp(jmpbuf
, 1) == 0) {
1255 THREAD_SETMEM(self
, p_signal_jmp
, &jmpbuf
);
1256 THREAD_SETMEM(self
, p_signal
, 0);
1257 /* Unblock the restart signal */
1258 sigemptyset(&unblock
);
1259 sigaddset(&unblock
, __pthread_sig_restart
);
1260 sigprocmask(SIG_UNBLOCK
, &unblock
, &initial_mask
);
1264 struct timespec reltime
;
1266 /* Compute a time offset relative to now. */
1267 __gettimeofday (&now
, NULL
);
1268 reltime
.tv_nsec
= abstime
->tv_nsec
- now
.tv_usec
* 1000;
1269 reltime
.tv_sec
= abstime
->tv_sec
- now
.tv_sec
;
1270 if (reltime
.tv_nsec
< 0) {
1271 reltime
.tv_nsec
+= 1000000000;
1272 reltime
.tv_sec
-= 1;
1275 /* Sleep for the required duration. If woken by a signal,
1276 resume waiting as required by Single Unix Specification. */
1277 if (reltime
.tv_sec
< 0 || __libc_nanosleep(&reltime
, NULL
) == 0)
1281 /* Block the restart signal again */
1282 sigprocmask(SIG_SETMASK
, &initial_mask
, NULL
);
1287 THREAD_SETMEM(self
, p_signal_jmp
, NULL
);
1290 /* Now was_signalled is true if we exited the above code
1291 due to the delivery of a restart signal. In that case,
1292 we know we have been dequeued and resumed and that the
1293 resume count is balanced. Otherwise, there are some
1294 cases to consider. First, try to bump up the resume count
1295 back to zero. If it goes to 1, it means restart() was
1296 invoked on this thread. The signal must be consumed
1297 and the count bumped down and everything is cool. We
1298 can return a 1 to the caller.
1299 Otherwise, no restart was delivered yet, so a potential
1300 race exists; we return a 0 to the caller which must deal
1301 with this race in an appropriate way; for example by
1302 atomically removing the thread from consideration for a
1303 wakeup---if such a thing fails, it means a restart is
1306 if (!was_signalled
) {
1307 if (atomic_increment(&self
->p_resume_count
) != -1) {
1308 __pthread_wait_for_restart_signal(self
);
1309 atomic_decrement(&self
->p_resume_count
); /* should be zero now! */
1310 /* woke spontaneously and consumed restart signal */
1313 /* woke spontaneously but did not consume restart---caller must resolve */
1316 /* woken due to restart signal */
1319 #endif /* __ASSUME_REALTIME_SIGNALS */
1321 void __pthread_restart_new(pthread_descr th
)
1323 /* The barrier is proabably not needed, in which case it still documents
1324 our assumptions. The intent is to commit previous writes to shared
1325 memory so the woken thread will have a consistent view. Complementary
1326 read barriers are present to the suspend functions. */
1327 WRITE_MEMORY_BARRIER();
1328 kill(th
->p_pid
, __pthread_sig_restart
);
1331 /* There is no __pthread_suspend_new because it would just
1332 be a wasteful wrapper for __pthread_wait_for_restart_signal */
1335 __pthread_timedsuspend_new(pthread_descr self
, const struct timespec
*abstime
)
1337 sigset_t unblock
, initial_mask
;
1338 int was_signalled
= 0;
1341 if (sigsetjmp(jmpbuf
, 1) == 0) {
1342 THREAD_SETMEM(self
, p_signal_jmp
, &jmpbuf
);
1343 THREAD_SETMEM(self
, p_signal
, 0);
1344 /* Unblock the restart signal */
1345 sigemptyset(&unblock
);
1346 sigaddset(&unblock
, __pthread_sig_restart
);
1347 sigprocmask(SIG_UNBLOCK
, &unblock
, &initial_mask
);
1351 struct timespec reltime
;
1353 /* Compute a time offset relative to now. */
1354 __gettimeofday (&now
, NULL
);
1355 reltime
.tv_nsec
= abstime
->tv_nsec
- now
.tv_usec
* 1000;
1356 reltime
.tv_sec
= abstime
->tv_sec
- now
.tv_sec
;
1357 if (reltime
.tv_nsec
< 0) {
1358 reltime
.tv_nsec
+= 1000000000;
1359 reltime
.tv_sec
-= 1;
1362 /* Sleep for the required duration. If woken by a signal,
1363 resume waiting as required by Single Unix Specification. */
1364 if (reltime
.tv_sec
< 0 || __libc_nanosleep(&reltime
, NULL
) == 0)
1368 /* Block the restart signal again */
1369 sigprocmask(SIG_SETMASK
, &initial_mask
, NULL
);
1374 THREAD_SETMEM(self
, p_signal_jmp
, NULL
);
1376 /* Now was_signalled is true if we exited the above code
1377 due to the delivery of a restart signal. In that case,
1378 everything is cool. We have been removed from whatever
1379 we were waiting on by the other thread, and consumed its signal.
1381 Otherwise we this thread woke up spontaneously, or due to a signal other
1382 than restart. This is an ambiguous case that must be resolved by
1383 the caller; the thread is still eligible for a restart wakeup
1384 so there is a race. */
1386 READ_MEMORY_BARRIER(); /* See comment in __pthread_restart_new */
1387 return was_signalled
;
1396 void __pthread_message(const char * fmt
, ...)
1400 sprintf(buffer
, "%05d : ", __getpid());
1401 va_start(args
, fmt
);
1402 vsnprintf(buffer
+ 8, sizeof(buffer
) - 8, fmt
, args
);
1404 TEMP_FAILURE_RETRY(write_not_cancel(2, buffer
, strlen(buffer
)));