2 /* Linuxthreads - a simple clone()-based implementation of Posix */
3 /* threads for Linux. */
4 /* Copyright (C) 1996 Xavier Leroy (Xavier.Leroy@inria.fr) */
6 /* This program is free software; you can redistribute it and/or */
7 /* modify it under the terms of the GNU Library General Public License */
8 /* as published by the Free Software Foundation; either version 2 */
9 /* of the License, or (at your option) any later version. */
11 /* This program is distributed in the hope that it will be useful, */
12 /* but WITHOUT ANY WARRANTY; without even the implied warranty of */
13 /* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the */
14 /* GNU Library General Public License for more details. */
16 /* Thread creation, initialization, and basic low-level routines */
26 #include <sys/resource.h>
28 #include <shlib-compat.h>
30 #include "internals.h"
39 #if !defined __SIGRTMIN || (__SIGRTMAX - __SIGRTMIN) < 3
40 # error "This must not happen"
43 #if !(USE_TLS && HAVE___THREAD)
44 /* These variables are used by the setup code. */
48 /* We need the global/static resolver state here. */
52 extern struct __res_state _res
;
57 /* We need only a few variables. */
58 static pthread_descr manager_thread
;
62 /* Descriptor of the initial thread */
64 struct _pthread_descr_struct __pthread_initial_thread
= {
65 .p_header
.data
.self
= &__pthread_initial_thread
,
66 .p_nextlive
= &__pthread_initial_thread
,
67 .p_prevlive
= &__pthread_initial_thread
,
68 .p_tid
= PTHREAD_THREADS_MAX
,
69 .p_lock
= &__pthread_handles
[0].h_lock
,
70 .p_start_args
= PTHREAD_START_ARGS_INITIALIZER(NULL
),
71 #if !(USE_TLS && HAVE___THREAD)
73 .p_h_errnop
= &_h_errno
,
77 .p_resume_count
= __ATOMIC_INITIALIZER
,
78 .p_alloca_cutoff
= __MAX_ALLOCA_CUTOFF
81 /* Descriptor of the manager thread; none of this is used but the error
82 variables, the p_pid and p_priority fields,
83 and the address for identification. */
85 #define manager_thread (&__pthread_manager_thread)
86 struct _pthread_descr_struct __pthread_manager_thread
= {
87 .p_header
.data
.self
= &__pthread_manager_thread
,
88 .p_header
.data
.multiple_threads
= 1,
89 .p_lock
= &__pthread_handles
[1].h_lock
,
90 .p_start_args
= PTHREAD_START_ARGS_INITIALIZER(__pthread_manager
),
91 #if !(USE_TLS && HAVE___THREAD)
92 .p_errnop
= &__pthread_manager_thread
.p_errno
,
95 .p_resume_count
= __ATOMIC_INITIALIZER
,
96 .p_alloca_cutoff
= PTHREAD_STACK_MIN
/ 4
100 /* Pointer to the main thread (the father of the thread manager thread) */
101 /* Originally, this is the initial thread, but this changes after fork() */
104 pthread_descr __pthread_main_thread
;
106 pthread_descr __pthread_main_thread
= &__pthread_initial_thread
;
109 /* Limit between the stack of the initial thread (above) and the
110 stacks of other threads (below). Aligned on a STACK_SIZE boundary. */
112 char *__pthread_initial_thread_bos
;
114 /* File descriptor for sending requests to the thread manager. */
115 /* Initially -1, meaning that the thread manager is not running. */
117 int __pthread_manager_request
= -1;
119 int __pthread_multiple_threads attribute_hidden
;
121 /* Other end of the pipe for sending requests to the thread manager. */
123 int __pthread_manager_reader
;
125 /* Limits of the thread manager stack */
127 char *__pthread_manager_thread_bos
;
128 char *__pthread_manager_thread_tos
;
130 /* For process-wide exit() */
132 int __pthread_exit_requested
;
133 int __pthread_exit_code
;
135 /* Maximum stack size. */
136 size_t __pthread_max_stacksize
;
138 /* Nozero if the machine has more than one processor. */
139 int __pthread_smp_kernel
;
142 #if !__ASSUME_REALTIME_SIGNALS
143 /* Pointers that select new or old suspend/resume functions
144 based on availability of rt signals. */
146 void (*__pthread_restart
)(pthread_descr
) = __pthread_restart_old
;
147 void (*__pthread_suspend
)(pthread_descr
) = __pthread_suspend_old
;
148 int (*__pthread_timedsuspend
)(pthread_descr
, const struct timespec
*) = __pthread_timedsuspend_old
;
149 #endif /* __ASSUME_REALTIME_SIGNALS */
151 /* Communicate relevant LinuxThreads constants to gdb */
153 const int __pthread_threads_max
= PTHREAD_THREADS_MAX
;
154 const int __pthread_sizeof_handle
= sizeof(struct pthread_handle_struct
);
155 const int __pthread_offsetof_descr
= offsetof(struct pthread_handle_struct
,
157 const int __pthread_offsetof_pid
= offsetof(struct _pthread_descr_struct
,
159 const int __linuxthreads_pthread_sizeof_descr
160 = sizeof(struct _pthread_descr_struct
);
162 const int __linuxthreads_initial_report_events
;
164 const char __linuxthreads_version
[] = VERSION
;
166 /* Forward declarations */
168 static void pthread_onexit_process(int retcode
, void *arg
);
169 #ifndef HAVE_Z_NODELETE
170 static void pthread_atexit_process(void *arg
, int retcode
);
171 static void pthread_atexit_retcode(void *arg
, int retcode
);
173 static void pthread_handle_sigcancel(int sig
);
174 static void pthread_handle_sigrestart(int sig
);
175 static void pthread_handle_sigdebug(int sig
);
177 /* Signal numbers used for the communication.
178 In these variables we keep track of the used variables. If the
179 platform does not support any real-time signals we will define the
180 values to some unreasonable value which will signal failing of all
181 the functions below. */
182 int __pthread_sig_restart
= __SIGRTMIN
;
183 int __pthread_sig_cancel
= __SIGRTMIN
+ 1;
184 int __pthread_sig_debug
= __SIGRTMIN
+ 2;
186 extern int __libc_current_sigrtmin_private (void);
188 #if !__ASSUME_REALTIME_SIGNALS
189 static int rtsigs_initialized
;
194 if (rtsigs_initialized
)
197 if (__libc_current_sigrtmin_private () == -1)
199 __pthread_sig_restart
= SIGUSR1
;
200 __pthread_sig_cancel
= SIGUSR2
;
201 __pthread_sig_debug
= 0;
205 __pthread_restart
= __pthread_restart_new
;
206 __pthread_suspend
= __pthread_wait_for_restart_signal
;
207 __pthread_timedsuspend
= __pthread_timedsuspend_new
;
210 rtsigs_initialized
= 1;
215 /* Initialize the pthread library.
216 Initialization is split in two functions:
217 - a constructor function that blocks the __pthread_sig_restart signal
218 (must do this very early, since the program could capture the signal
219 mask with e.g. sigsetjmp before creating the first thread);
220 - a regular function called from pthread_create when needed. */
222 static void pthread_initialize(void) __attribute__((constructor
));
224 #ifndef HAVE_Z_NODELETE
225 extern void *__dso_handle
__attribute__ ((weak
));
229 #if defined USE_TLS && !defined SHARED
230 extern void __libc_setup_tls (size_t tcbsize
, size_t tcbalign
);
233 struct pthread_functions __pthread_functions
=
235 #if !(USE_TLS && HAVE___THREAD)
236 .ptr_pthread_internal_tsd_set
= __pthread_internal_tsd_set
,
237 .ptr_pthread_internal_tsd_get
= __pthread_internal_tsd_get
,
238 .ptr_pthread_internal_tsd_address
= __pthread_internal_tsd_address
,
240 .ptr_pthread_fork
= __pthread_fork
,
241 .ptr_pthread_attr_destroy
= __pthread_attr_destroy
,
242 #if SHLIB_COMPAT(libpthread, GLIBC_2_0, GLIBC_2_1)
243 .ptr___pthread_attr_init_2_0
= __pthread_attr_init_2_0
,
245 .ptr___pthread_attr_init_2_1
= __pthread_attr_init_2_1
,
246 .ptr_pthread_attr_getdetachstate
= __pthread_attr_getdetachstate
,
247 .ptr_pthread_attr_setdetachstate
= __pthread_attr_setdetachstate
,
248 .ptr_pthread_attr_getinheritsched
= __pthread_attr_getinheritsched
,
249 .ptr_pthread_attr_setinheritsched
= __pthread_attr_setinheritsched
,
250 .ptr_pthread_attr_getschedparam
= __pthread_attr_getschedparam
,
251 .ptr_pthread_attr_setschedparam
= __pthread_attr_setschedparam
,
252 .ptr_pthread_attr_getschedpolicy
= __pthread_attr_getschedpolicy
,
253 .ptr_pthread_attr_setschedpolicy
= __pthread_attr_setschedpolicy
,
254 .ptr_pthread_attr_getscope
= __pthread_attr_getscope
,
255 .ptr_pthread_attr_setscope
= __pthread_attr_setscope
,
256 .ptr_pthread_condattr_destroy
= __pthread_condattr_destroy
,
257 .ptr_pthread_condattr_init
= __pthread_condattr_init
,
258 .ptr___pthread_cond_broadcast
= __pthread_cond_broadcast
,
259 .ptr___pthread_cond_destroy
= __pthread_cond_destroy
,
260 .ptr___pthread_cond_init
= __pthread_cond_init
,
261 .ptr___pthread_cond_signal
= __pthread_cond_signal
,
262 .ptr___pthread_cond_wait
= __pthread_cond_wait
,
263 .ptr_pthread_equal
= __pthread_equal
,
264 .ptr___pthread_exit
= __pthread_exit
,
265 .ptr_pthread_getschedparam
= __pthread_getschedparam
,
266 .ptr_pthread_setschedparam
= __pthread_setschedparam
,
267 .ptr_pthread_mutex_destroy
= __pthread_mutex_destroy
,
268 .ptr_pthread_mutex_init
= __pthread_mutex_init
,
269 .ptr_pthread_mutex_lock
= __pthread_mutex_lock
,
270 .ptr_pthread_mutex_trylock
= __pthread_mutex_trylock
,
271 .ptr_pthread_mutex_unlock
= __pthread_mutex_unlock
,
272 .ptr_pthread_self
= __pthread_self
,
273 .ptr_pthread_setcancelstate
= __pthread_setcancelstate
,
274 .ptr_pthread_setcanceltype
= __pthread_setcanceltype
,
275 .ptr_pthread_do_exit
= __pthread_do_exit
,
276 .ptr_pthread_thread_self
= __pthread_thread_self
,
277 .ptr_pthread_cleanup_upto
= __pthread_cleanup_upto
,
278 .ptr_pthread_sigaction
= __pthread_sigaction
,
279 .ptr_pthread_sigwait
= __pthread_sigwait
,
280 .ptr_pthread_raise
= __pthread_raise
283 # define ptr_pthread_functions &__pthread_functions
285 # define ptr_pthread_functions NULL
288 static int *__libc_multiple_threads_ptr
;
290 /* Do some minimal initialization which has to be done during the
291 startup of the C library. */
293 __pthread_initialize_minimal(void)
298 /* First of all init __pthread_handles[0] and [1] if needed. */
299 # if __LT_SPINLOCK_INIT != 0
300 __pthread_handles
[0].h_lock
= __LOCK_INITIALIZER
;
301 __pthread_handles
[1].h_lock
= __LOCK_INITIALIZER
;
304 /* Unlike in the dynamically linked case the dynamic linker has not
305 taken care of initializing the TLS data structures. */
306 __libc_setup_tls (TLS_TCB_SIZE
, TLS_TCB_ALIGN
);
308 if (__builtin_expect (GL(dl_tls_dtv_slotinfo_list
) == NULL
, 0))
312 /* There is no actual TLS being used, so the thread register
313 was not initialized in the dynamic linker. */
315 /* We need to install special hooks so that the malloc and memalign
316 calls in _dl_tls_setup and _dl_allocate_tls won't cause full
317 malloc initialization that will try to set up its thread state. */
319 extern void __libc_malloc_pthread_startup (bool first_time
);
320 __libc_malloc_pthread_startup (true);
322 if (__builtin_expect (_dl_tls_setup (), 0)
323 || __builtin_expect ((tcbp
= _dl_allocate_tls (NULL
)) == NULL
, 0))
325 static const char msg
[] = "\
326 cannot allocate TLS data structures for initial thread\n";
327 TEMP_FAILURE_RETRY (__libc_write (STDERR_FILENO
,
328 msg
, sizeof msg
- 1));
331 const char *lossage
= TLS_INIT_TP (tcbp
, 0);
332 if (__builtin_expect (lossage
!= NULL
, 0))
334 static const char msg
[] = "cannot set up thread-local storage: ";
335 const char nl
= '\n';
336 TEMP_FAILURE_RETRY (__libc_write (STDERR_FILENO
,
337 msg
, sizeof msg
- 1));
338 TEMP_FAILURE_RETRY (__libc_write (STDERR_FILENO
,
339 lossage
, strlen (lossage
)));
340 TEMP_FAILURE_RETRY (__libc_write (STDERR_FILENO
, &nl
, 1));
343 /* Though it was allocated with libc's malloc, that was done without
344 the user's __malloc_hook installed. A later realloc that uses
345 the hooks might not work with that block from the plain malloc.
346 So we record this block as unfreeable just as the dynamic linker
347 does when it allocates the DTV before the libc malloc exists. */
348 GL(dl_initial_dtv
) = GET_DTV (tcbp
);
350 __libc_malloc_pthread_startup (false);
356 /* The memory for the thread descriptor was allocated elsewhere as
357 part of the TLS allocation. We have to initialize the data
358 structure by hand. This initialization must mirror the struct
360 self
->p_nextlive
= self
->p_prevlive
= self
;
361 self
->p_tid
= PTHREAD_THREADS_MAX
;
362 self
->p_lock
= &__pthread_handles
[0].h_lock
;
363 # ifndef HAVE___THREAD
364 self
->p_errnop
= &_errno
;
365 self
->p_h_errnop
= &_h_errno
;
367 /* self->p_start_args need not be initialized, it's all zero. */
368 self
->p_userstack
= 1;
369 # if __LT_SPINLOCK_INIT != 0
370 self
->p_resume_count
= (struct pthread_atomic
) __ATOMIC_INITIALIZER
;
372 self
->p_alloca_cutoff
= __MAX_ALLOCA_CUTOFF
;
374 /* Another variable which points to the thread descriptor. */
375 __pthread_main_thread
= self
;
377 /* And fill in the pointer the the thread __pthread_handles array. */
378 __pthread_handles
[0].h_descr
= self
;
382 /* First of all init __pthread_handles[0] and [1]. */
383 # if __LT_SPINLOCK_INIT != 0
384 __pthread_handles
[0].h_lock
= __LOCK_INITIALIZER
;
385 __pthread_handles
[1].h_lock
= __LOCK_INITIALIZER
;
387 __pthread_handles
[0].h_descr
= &__pthread_initial_thread
;
388 __pthread_handles
[1].h_descr
= &__pthread_manager_thread
;
390 /* If we have special thread_self processing, initialize that for the
392 # ifdef INIT_THREAD_SELF
393 INIT_THREAD_SELF(&__pthread_initial_thread
, 0);
399 self
->p_cpuclock_offset
= GL(dl_cpuclock_offset
);
401 __pthread_initial_thread
.p_cpuclock_offset
= GL(dl_cpuclock_offset
);
405 __libc_multiple_threads_ptr
= __libc_pthread_init (ptr_pthread_functions
);
410 __pthread_init_max_stacksize(void)
415 getrlimit(RLIMIT_STACK
, &limit
);
416 #ifdef FLOATING_STACKS
417 if (limit
.rlim_cur
== RLIM_INFINITY
)
418 limit
.rlim_cur
= ARCH_STACK_MAX_SIZE
;
419 # ifdef NEED_SEPARATE_REGISTER_STACK
420 max_stack
= limit
.rlim_cur
/ 2;
422 max_stack
= limit
.rlim_cur
;
425 /* Play with the stack size limit to make sure that no stack ever grows
426 beyond STACK_SIZE minus one page (to act as a guard page). */
427 # ifdef NEED_SEPARATE_REGISTER_STACK
428 /* STACK_SIZE bytes hold both the main stack and register backing
429 store. The rlimit value applies to each individually. */
430 max_stack
= STACK_SIZE
/2 - __getpagesize ();
432 max_stack
= STACK_SIZE
- __getpagesize();
434 if (limit
.rlim_cur
> max_stack
) {
435 limit
.rlim_cur
= max_stack
;
436 setrlimit(RLIMIT_STACK
, &limit
);
439 __pthread_max_stacksize
= max_stack
;
440 if (max_stack
/ 4 < __MAX_ALLOCA_CUTOFF
)
443 pthread_descr self
= THREAD_SELF
;
444 self
->p_alloca_cutoff
= max_stack
/ 4;
446 __pthread_initial_thread
.p_alloca_cutoff
= max_stack
/ 4;
453 /* When using __thread for this, we do it in libc so as not
454 to give libpthread its own TLS segment just for this. */
455 extern void **__libc_dl_error_tsd (void) __attribute__ ((const));
457 static void ** __attribute__ ((const))
458 __libc_dl_error_tsd (void)
460 return &thread_self ()->p_libc_specific
[_LIBC_TSD_KEY_DL_ERROR
];
466 static inline void __attribute__((always_inline
))
467 init_one_static_tls (pthread_descr descr
, struct link_map
*map
)
470 dtv_t
*dtv
= GET_DTV (descr
);
471 void *dest
= (char *) descr
- map
->l_tls_offset
;
473 dtv_t
*dtv
= GET_DTV ((pthread_descr
) ((char *) descr
+ TLS_PRE_TCB_SIZE
));
474 void *dest
= (char *) descr
+ map
->l_tls_offset
+ TLS_PRE_TCB_SIZE
;
476 # error "Either TLS_TCB_AT_TP or TLS_DTV_AT_TP must be defined"
479 /* Fill in the DTV slot so that a later LD/GD access will find it. */
480 dtv
[map
->l_tls_modid
].pointer
= dest
;
482 /* Initialize the memory. */
483 memset (__mempcpy (dest
, map
->l_tls_initimage
, map
->l_tls_initimage_size
),
484 '\0', map
->l_tls_blocksize
- map
->l_tls_initimage_size
);
488 __pthread_init_static_tls (struct link_map
*map
)
492 for (i
= 0; i
< PTHREAD_THREADS_MAX
; ++i
)
493 if (__pthread_handles
[i
].h_descr
!= NULL
&& i
!= 1)
495 __pthread_lock (&__pthread_handles
[i
].h_lock
, NULL
);
496 if (__pthread_handles
[i
].h_descr
!= NULL
)
497 init_one_static_tls (__pthread_handles
[i
].h_descr
, map
);
498 __pthread_unlock (&__pthread_handles
[i
].h_lock
);
503 static void pthread_initialize(void)
508 /* If already done (e.g. by a constructor called earlier!), bail out */
509 if (__pthread_initial_thread_bos
!= NULL
) return;
510 #ifdef TEST_FOR_COMPARE_AND_SWAP
511 /* Test if compare-and-swap is available */
512 __pthread_has_cas
= compare_and_swap_is_available();
514 #ifdef FLOATING_STACKS
515 /* We don't need to know the bottom of the stack. Give the pointer some
516 value to signal that initialization happened. */
517 __pthread_initial_thread_bos
= (void *) -1l;
519 /* Determine stack size limits . */
520 __pthread_init_max_stacksize ();
521 # ifdef _STACK_GROWS_UP
522 /* The initial thread already has all the stack it needs */
523 __pthread_initial_thread_bos
= (char *)
524 ((long)CURRENT_STACK_FRAME
&~ (STACK_SIZE
- 1));
526 /* For the initial stack, reserve at least STACK_SIZE bytes of stack
527 below the current stack address, and align that on a
528 STACK_SIZE boundary. */
529 __pthread_initial_thread_bos
=
530 (char *)(((long)CURRENT_STACK_FRAME
- 2 * STACK_SIZE
) & ~(STACK_SIZE
- 1));
534 /* Update the descriptor for the initial thread. */
535 THREAD_SETMEM (((pthread_descr
) NULL
), p_pid
, __getpid());
536 # ifndef HAVE___THREAD
537 /* Likewise for the resolver state _res. */
538 THREAD_SETMEM (((pthread_descr
) NULL
), p_resp
, &_res
);
541 /* Update the descriptor for the initial thread. */
542 __pthread_initial_thread
.p_pid
= __getpid();
543 /* Likewise for the resolver state _res. */
544 __pthread_initial_thread
.p_resp
= &_res
;
546 #if !__ASSUME_REALTIME_SIGNALS
547 /* Initialize real-time signals. */
550 /* Setup signal handlers for the initial thread.
551 Since signal handlers are shared between threads, these settings
552 will be inherited by all other threads. */
553 sa
.sa_handler
= pthread_handle_sigrestart
;
554 sigemptyset(&sa
.sa_mask
);
556 __libc_sigaction(__pthread_sig_restart
, &sa
, NULL
);
557 sa
.sa_handler
= pthread_handle_sigcancel
;
559 __libc_sigaction(__pthread_sig_cancel
, &sa
, NULL
);
560 if (__pthread_sig_debug
> 0) {
561 sa
.sa_handler
= pthread_handle_sigdebug
;
562 sigemptyset(&sa
.sa_mask
);
564 __libc_sigaction(__pthread_sig_debug
, &sa
, NULL
);
566 /* Initially, block __pthread_sig_restart. Will be unblocked on demand. */
568 sigaddset(&mask
, __pthread_sig_restart
);
569 sigprocmask(SIG_BLOCK
, &mask
, NULL
);
570 /* And unblock __pthread_sig_cancel if it has been blocked. */
571 sigdelset(&mask
, __pthread_sig_restart
);
572 sigaddset(&mask
, __pthread_sig_cancel
);
573 sigprocmask(SIG_UNBLOCK
, &mask
, NULL
);
574 /* Register an exit function to kill all other threads. */
575 /* Do it early so that user-registered atexit functions are called
576 before pthread_*exit_process. */
577 #ifndef HAVE_Z_NODELETE
578 if (__builtin_expect (&__dso_handle
!= NULL
, 1))
579 __cxa_atexit ((void (*) (void *)) pthread_atexit_process
, NULL
,
583 __on_exit (pthread_onexit_process
, NULL
);
584 /* How many processors. */
585 __pthread_smp_kernel
= is_smp_system ();
588 /* Transfer the old value from the dynamic linker's internal location. */
589 *__libc_dl_error_tsd () = *(*GL(dl_error_catch_tsd
)) ();
590 GL(dl_error_catch_tsd
) = &__libc_dl_error_tsd
;
592 /* Make __rtld_lock_{,un}lock_recursive use pthread_mutex_{,un}lock,
593 keep the lock count from the ld.so implementation. */
594 GL(dl_rtld_lock_recursive
) = (void *) __pthread_mutex_lock
;
595 GL(dl_rtld_unlock_recursive
) = (void *) __pthread_mutex_unlock
;
596 unsigned int rtld_lock_count
= GL(dl_load_lock
).mutex
.__m_count
;
597 GL(dl_load_lock
).mutex
.__m_count
= 0;
598 while (rtld_lock_count
-- > 0)
599 __pthread_mutex_lock (&GL(dl_load_lock
).mutex
);
603 GL(dl_init_static_tls
) = &__pthread_init_static_tls
;
607 void __pthread_initialize(void)
609 pthread_initialize();
612 int __pthread_initialize_manager(void)
616 struct pthread_request request
;
623 __pthread_multiple_threads
= 1;
624 #if TLS_MULTIPLE_THREADS_IN_TCB || !defined USE_TLS || !TLS_DTV_AT_TP
625 __pthread_main_thread
->p_multiple_threads
= 1;
627 *__libc_multiple_threads_ptr
= 1;
629 #ifndef HAVE_Z_NODELETE
630 if (__builtin_expect (&__dso_handle
!= NULL
, 1))
631 __cxa_atexit ((void (*) (void *)) pthread_atexit_retcode
, NULL
,
635 if (__pthread_max_stacksize
== 0)
636 __pthread_init_max_stacksize ();
637 /* If basic initialization not done yet (e.g. we're called from a
638 constructor run before our constructor), do it now */
639 if (__pthread_initial_thread_bos
== NULL
) pthread_initialize();
640 /* Setup stack for thread manager */
641 __pthread_manager_thread_bos
= malloc(THREAD_MANAGER_STACK_SIZE
);
642 if (__pthread_manager_thread_bos
== NULL
) return -1;
643 __pthread_manager_thread_tos
=
644 __pthread_manager_thread_bos
+ THREAD_MANAGER_STACK_SIZE
;
645 /* Setup pipe to communicate with thread manager */
646 if (pipe(manager_pipe
) == -1) {
647 free(__pthread_manager_thread_bos
);
652 /* Allocate memory for the thread descriptor and the dtv. */
653 tcbp
= _dl_allocate_tls (NULL
);
655 free(__pthread_manager_thread_bos
);
656 __libc_close(manager_pipe
[0]);
657 __libc_close(manager_pipe
[1]);
662 mgr
= (pthread_descr
) tcbp
;
664 /* pthread_descr is located right below tcbhead_t which _dl_allocate_tls
666 mgr
= (pthread_descr
) ((char *) tcbp
- TLS_PRE_TCB_SIZE
);
668 __pthread_handles
[1].h_descr
= manager_thread
= mgr
;
670 /* Initialize the descriptor. */
671 #if !defined USE_TLS || !TLS_DTV_AT_TP
672 mgr
->p_header
.data
.tcb
= tcbp
;
673 mgr
->p_header
.data
.self
= mgr
;
674 mgr
->p_header
.data
.multiple_threads
= 1;
675 #elif TLS_MULTIPLE_THREADS_IN_TCB
676 mgr
->p_multiple_threads
= 1;
678 mgr
->p_lock
= &__pthread_handles
[1].h_lock
;
679 # ifndef HAVE___THREAD
680 mgr
->p_errnop
= &mgr
->p_errno
;
682 mgr
->p_start_args
= (struct pthread_start_args
) PTHREAD_START_ARGS_INITIALIZER(__pthread_manager
);
684 # if __LT_SPINLOCK_INIT != 0
685 self
->p_resume_count
= (struct pthread_atomic
) __ATOMIC_INITIALIZER
;
687 mgr
->p_alloca_cutoff
= PTHREAD_STACK_MIN
/ 4;
689 mgr
= &__pthread_manager_thread
;
692 __pthread_manager_request
= manager_pipe
[1]; /* writing end */
693 __pthread_manager_reader
= manager_pipe
[0]; /* reading end */
695 /* Start the thread manager */
698 if (__linuxthreads_initial_report_events
!= 0)
699 THREAD_SETMEM (((pthread_descr
) NULL
), p_report_events
,
700 __linuxthreads_initial_report_events
);
701 report_events
= THREAD_GETMEM (((pthread_descr
) NULL
), p_report_events
);
703 if (__linuxthreads_initial_report_events
!= 0)
704 __pthread_initial_thread
.p_report_events
705 = __linuxthreads_initial_report_events
;
706 report_events
= __pthread_initial_thread
.p_report_events
;
708 if (__builtin_expect (report_events
, 0))
710 /* It's a bit more complicated. We have to report the creation of
711 the manager thread. */
712 int idx
= __td_eventword (TD_CREATE
);
713 uint32_t mask
= __td_eventmask (TD_CREATE
);
717 event_bits
= THREAD_GETMEM_NC (((pthread_descr
) NULL
),
718 p_eventbuf
.eventmask
.event_bits
[idx
]);
720 event_bits
= __pthread_initial_thread
.p_eventbuf
.eventmask
.event_bits
[idx
];
723 if ((mask
& (__pthread_threads_events
.event_bits
[idx
] | event_bits
))
726 __pthread_lock(mgr
->p_lock
, NULL
);
728 #ifdef NEED_SEPARATE_REGISTER_STACK
729 pid
= __clone2(__pthread_manager_event
,
730 (void **) __pthread_manager_thread_bos
,
731 THREAD_MANAGER_STACK_SIZE
,
732 CLONE_VM
| CLONE_FS
| CLONE_FILES
| CLONE_SIGHAND
,
734 #elif _STACK_GROWS_UP
735 pid
= __clone(__pthread_manager_event
,
736 (void **) __pthread_manager_thread_bos
,
737 CLONE_VM
| CLONE_FS
| CLONE_FILES
| CLONE_SIGHAND
,
740 pid
= __clone(__pthread_manager_event
,
741 (void **) __pthread_manager_thread_tos
,
742 CLONE_VM
| CLONE_FS
| CLONE_FILES
| CLONE_SIGHAND
,
748 /* Now fill in the information about the new thread in
749 the newly created thread's data structure. We cannot let
750 the new thread do this since we don't know whether it was
751 already scheduled when we send the event. */
752 mgr
->p_eventbuf
.eventdata
= mgr
;
753 mgr
->p_eventbuf
.eventnum
= TD_CREATE
;
754 __pthread_last_event
= mgr
;
755 mgr
->p_tid
= 2* PTHREAD_THREADS_MAX
+ 1;
758 /* Now call the function which signals the event. */
759 __linuxthreads_create_event ();
762 /* Now restart the thread. */
763 __pthread_unlock(mgr
->p_lock
);
767 if (__builtin_expect (pid
, 0) == 0)
769 #ifdef NEED_SEPARATE_REGISTER_STACK
770 pid
= __clone2(__pthread_manager
, (void **) __pthread_manager_thread_bos
,
771 THREAD_MANAGER_STACK_SIZE
,
772 CLONE_VM
| CLONE_FS
| CLONE_FILES
| CLONE_SIGHAND
, mgr
);
773 #elif _STACK_GROWS_UP
774 pid
= __clone(__pthread_manager
, (void **) __pthread_manager_thread_bos
,
775 CLONE_VM
| CLONE_FS
| CLONE_FILES
| CLONE_SIGHAND
, mgr
);
777 pid
= __clone(__pthread_manager
, (void **) __pthread_manager_thread_tos
,
778 CLONE_VM
| CLONE_FS
| CLONE_FILES
| CLONE_SIGHAND
, mgr
);
781 if (__builtin_expect (pid
, 0) == -1) {
782 free(__pthread_manager_thread_bos
);
783 __libc_close(manager_pipe
[0]);
784 __libc_close(manager_pipe
[1]);
787 mgr
->p_tid
= 2* PTHREAD_THREADS_MAX
+ 1;
789 /* Make gdb aware of new thread manager */
790 if (__builtin_expect (__pthread_threads_debug
, 0) && __pthread_sig_debug
> 0)
792 raise(__pthread_sig_debug
);
793 /* We suspend ourself and gdb will wake us up when it is
794 ready to handle us. */
795 __pthread_wait_for_restart_signal(thread_self());
797 /* Synchronize debugging of the thread manager */
798 request
.req_kind
= REQ_DEBUG
;
799 TEMP_FAILURE_RETRY(__libc_write(__pthread_manager_request
,
800 (char *) &request
, sizeof(request
)));
804 /* Thread creation */
806 int __pthread_create_2_1(pthread_t
*thread
, const pthread_attr_t
*attr
,
807 void * (*start_routine
)(void *), void *arg
)
809 pthread_descr self
= thread_self();
810 struct pthread_request request
;
812 if (__builtin_expect (__pthread_manager_request
, 0) < 0) {
813 if (__pthread_initialize_manager() < 0) return EAGAIN
;
815 request
.req_thread
= self
;
816 request
.req_kind
= REQ_CREATE
;
817 request
.req_args
.create
.attr
= attr
;
818 request
.req_args
.create
.fn
= start_routine
;
819 request
.req_args
.create
.arg
= arg
;
820 sigprocmask(SIG_SETMASK
, (const sigset_t
*) NULL
,
821 &request
.req_args
.create
.mask
);
822 TEMP_FAILURE_RETRY(__libc_write(__pthread_manager_request
,
823 (char *) &request
, sizeof(request
)));
825 retval
= THREAD_GETMEM(self
, p_retcode
);
826 if (__builtin_expect (retval
, 0) == 0)
827 *thread
= (pthread_t
) THREAD_GETMEM(self
, p_retval
);
831 versioned_symbol (libpthread
, __pthread_create_2_1
, pthread_create
, GLIBC_2_1
);
833 #if SHLIB_COMPAT (libpthread, GLIBC_2_0, GLIBC_2_1)
835 int __pthread_create_2_0(pthread_t
*thread
, const pthread_attr_t
*attr
,
836 void * (*start_routine
)(void *), void *arg
)
838 /* The ATTR attribute is not really of type `pthread_attr_t *'. It has
839 the old size and access to the new members might crash the program.
840 We convert the struct now. */
841 pthread_attr_t new_attr
;
845 size_t ps
= __getpagesize ();
847 memcpy (&new_attr
, attr
,
848 (size_t) &(((pthread_attr_t
*)NULL
)->__guardsize
));
849 new_attr
.__guardsize
= ps
;
850 new_attr
.__stackaddr_set
= 0;
851 new_attr
.__stackaddr
= NULL
;
852 new_attr
.__stacksize
= STACK_SIZE
- ps
;
855 return __pthread_create_2_1 (thread
, attr
, start_routine
, arg
);
857 compat_symbol (libpthread
, __pthread_create_2_0
, pthread_create
, GLIBC_2_0
);
860 /* Simple operations on thread identifiers */
862 pthread_descr
__pthread_thread_self(void)
864 return thread_self();
867 pthread_t
__pthread_self(void)
869 pthread_descr self
= thread_self();
870 return THREAD_GETMEM(self
, p_tid
);
872 strong_alias (__pthread_self
, pthread_self
);
874 int __pthread_equal(pthread_t thread1
, pthread_t thread2
)
876 return thread1
== thread2
;
878 strong_alias (__pthread_equal
, pthread_equal
);
880 /* Helper function for thread_self in the case of user-provided stacks */
884 pthread_descr
__pthread_find_self(void)
886 char * sp
= CURRENT_STACK_FRAME
;
889 /* __pthread_handles[0] is the initial thread, __pthread_handles[1] is
890 the manager threads handled specially in thread_self(), so start at 2 */
891 h
= __pthread_handles
+ 2;
892 while (! (sp
<= (char *) h
->h_descr
&& sp
>= h
->h_bottom
)) h
++;
898 static pthread_descr
thread_self_stack(void)
900 char *sp
= CURRENT_STACK_FRAME
;
903 if (sp
>= __pthread_manager_thread_bos
&& sp
< __pthread_manager_thread_tos
)
904 return manager_thread
;
905 h
= __pthread_handles
+ 2;
907 while (h
->h_descr
== NULL
908 || ! (sp
<= (char *) h
->h_descr
->p_stackaddr
&& sp
>= h
->h_bottom
))
911 while (! (sp
<= (char *) h
->h_descr
&& sp
>= h
->h_bottom
))
919 /* Thread scheduling */
921 int __pthread_setschedparam(pthread_t thread
, int policy
,
922 const struct sched_param
*param
)
924 pthread_handle handle
= thread_handle(thread
);
927 __pthread_lock(&handle
->h_lock
, NULL
);
928 if (__builtin_expect (invalid_handle(handle
, thread
), 0)) {
929 __pthread_unlock(&handle
->h_lock
);
932 th
= handle
->h_descr
;
933 if (__builtin_expect (__sched_setscheduler(th
->p_pid
, policy
, param
) == -1,
935 __pthread_unlock(&handle
->h_lock
);
938 th
->p_priority
= policy
== SCHED_OTHER
? 0 : param
->sched_priority
;
939 __pthread_unlock(&handle
->h_lock
);
940 if (__pthread_manager_request
>= 0)
941 __pthread_manager_adjust_prio(th
->p_priority
);
944 strong_alias (__pthread_setschedparam
, pthread_setschedparam
);
946 int __pthread_getschedparam(pthread_t thread
, int *policy
,
947 struct sched_param
*param
)
949 pthread_handle handle
= thread_handle(thread
);
952 __pthread_lock(&handle
->h_lock
, NULL
);
953 if (__builtin_expect (invalid_handle(handle
, thread
), 0)) {
954 __pthread_unlock(&handle
->h_lock
);
957 pid
= handle
->h_descr
->p_pid
;
958 __pthread_unlock(&handle
->h_lock
);
959 pol
= __sched_getscheduler(pid
);
960 if (__builtin_expect (pol
, 0) == -1) return errno
;
961 if (__sched_getparam(pid
, param
) == -1) return errno
;
965 strong_alias (__pthread_getschedparam
, pthread_getschedparam
);
967 int __pthread_yield (void)
969 /* For now this is equivalent with the POSIX call. */
970 return sched_yield ();
972 weak_alias (__pthread_yield
, pthread_yield
)
974 /* Process-wide exit() request */
976 static void pthread_onexit_process(int retcode
, void *arg
)
978 if (__builtin_expect (__pthread_manager_request
, 0) >= 0) {
979 struct pthread_request request
;
980 pthread_descr self
= thread_self();
982 request
.req_thread
= self
;
983 request
.req_kind
= REQ_PROCESS_EXIT
;
984 request
.req_args
.exit
.code
= retcode
;
985 TEMP_FAILURE_RETRY(__libc_write(__pthread_manager_request
,
986 (char *) &request
, sizeof(request
)));
988 /* Main thread should accumulate times for thread manager and its
989 children, so that timings for main thread account for all threads. */
990 if (self
== __pthread_main_thread
)
993 waitpid(manager_thread
->p_pid
, NULL
, __WCLONE
);
995 waitpid(__pthread_manager_thread
.p_pid
, NULL
, __WCLONE
);
997 /* Since all threads have been asynchronously terminated
998 (possibly holding locks), free cannot be used any more. */
999 /*free (__pthread_manager_thread_bos);*/
1000 __pthread_manager_thread_bos
= __pthread_manager_thread_tos
= NULL
;
1005 #ifndef HAVE_Z_NODELETE
1006 static int __pthread_atexit_retcode
;
1008 static void pthread_atexit_process(void *arg
, int retcode
)
1010 pthread_onexit_process (retcode
?: __pthread_atexit_retcode
, arg
);
1013 static void pthread_atexit_retcode(void *arg
, int retcode
)
1015 __pthread_atexit_retcode
= retcode
;
1019 /* The handler for the RESTART signal just records the signal received
1020 in the thread descriptor, and optionally performs a siglongjmp
1021 (for pthread_cond_timedwait). */
1023 static void pthread_handle_sigrestart(int sig
)
1025 pthread_descr self
= thread_self();
1026 THREAD_SETMEM(self
, p_signal
, sig
);
1027 if (THREAD_GETMEM(self
, p_signal_jmp
) != NULL
)
1028 siglongjmp(*THREAD_GETMEM(self
, p_signal_jmp
), 1);
1031 /* The handler for the CANCEL signal checks for cancellation
1032 (in asynchronous mode), for process-wide exit and exec requests.
1033 For the thread manager thread, redirect the signal to
1034 __pthread_manager_sighandler. */
1036 static void pthread_handle_sigcancel(int sig
)
1038 pthread_descr self
= thread_self();
1039 sigjmp_buf
* jmpbuf
;
1041 if (self
== manager_thread
)
1044 /* A new thread might get a cancel signal before it is fully
1045 initialized, so that the thread register might still point to the
1046 manager thread. Double check that this is really the manager
1048 pthread_descr real_self
= thread_self_stack();
1049 if (real_self
== manager_thread
)
1051 __pthread_manager_sighandler(sig
);
1054 /* Oops, thread_self() isn't working yet.. */
1056 # ifdef INIT_THREAD_SELF
1057 INIT_THREAD_SELF(self
, self
->p_nr
);
1060 __pthread_manager_sighandler(sig
);
1064 if (__builtin_expect (__pthread_exit_requested
, 0)) {
1065 /* Main thread should accumulate times for thread manager and its
1066 children, so that timings for main thread account for all threads. */
1067 if (self
== __pthread_main_thread
) {
1069 waitpid(manager_thread
->p_pid
, NULL
, __WCLONE
);
1071 waitpid(__pthread_manager_thread
.p_pid
, NULL
, __WCLONE
);
1074 _exit(__pthread_exit_code
);
1076 if (__builtin_expect (THREAD_GETMEM(self
, p_canceled
), 0)
1077 && THREAD_GETMEM(self
, p_cancelstate
) == PTHREAD_CANCEL_ENABLE
) {
1078 if (THREAD_GETMEM(self
, p_canceltype
) == PTHREAD_CANCEL_ASYNCHRONOUS
)
1079 __pthread_do_exit(PTHREAD_CANCELED
, CURRENT_STACK_FRAME
);
1080 jmpbuf
= THREAD_GETMEM(self
, p_cancel_jmp
);
1081 if (jmpbuf
!= NULL
) {
1082 THREAD_SETMEM(self
, p_cancel_jmp
, NULL
);
1083 siglongjmp(*jmpbuf
, 1);
1088 /* Handler for the DEBUG signal.
1089 The debugging strategy is as follows:
1090 On reception of a REQ_DEBUG request (sent by new threads created to
1091 the thread manager under debugging mode), the thread manager throws
1092 __pthread_sig_debug to itself. The debugger (if active) intercepts
1093 this signal, takes into account new threads and continue execution
1094 of the thread manager by propagating the signal because it doesn't
1095 know what it is specifically done for. In the current implementation,
1096 the thread manager simply discards it. */
1098 static void pthread_handle_sigdebug(int sig
)
1103 /* Reset the state of the thread machinery after a fork().
1104 Close the pipe used for requests and set the main thread to the forked
1106 Notice that we can't free the stack segments, as the forked thread
1107 may hold pointers into them. */
1109 void __pthread_reset_main_thread(void)
1111 pthread_descr self
= thread_self();
1113 if (__pthread_manager_request
!= -1) {
1114 /* Free the thread manager stack */
1115 free(__pthread_manager_thread_bos
);
1116 __pthread_manager_thread_bos
= __pthread_manager_thread_tos
= NULL
;
1117 /* Close the two ends of the pipe */
1118 __libc_close(__pthread_manager_request
);
1119 __libc_close(__pthread_manager_reader
);
1120 __pthread_manager_request
= __pthread_manager_reader
= -1;
1123 /* Update the pid of the main thread */
1124 THREAD_SETMEM(self
, p_pid
, __getpid());
1125 /* Make the forked thread the main thread */
1126 __pthread_main_thread
= self
;
1127 THREAD_SETMEM(self
, p_nextlive
, self
);
1128 THREAD_SETMEM(self
, p_prevlive
, self
);
1129 #if !(USE_TLS && HAVE___THREAD)
1130 /* Now this thread modifies the global variables. */
1131 THREAD_SETMEM(self
, p_errnop
, &_errno
);
1132 THREAD_SETMEM(self
, p_h_errnop
, &_h_errno
);
1133 THREAD_SETMEM(self
, p_resp
, &_res
);
1136 #ifndef FLOATING_STACKS
1137 /* This is to undo the setrlimit call in __pthread_init_max_stacksize.
1138 XXX This can be wrong if the user set the limit during the run. */
1140 struct rlimit limit
;
1141 if (getrlimit (RLIMIT_STACK
, &limit
) == 0
1142 && limit
.rlim_cur
!= limit
.rlim_max
)
1144 limit
.rlim_cur
= limit
.rlim_max
;
1145 setrlimit(RLIMIT_STACK
, &limit
);
1151 /* Process-wide exec() request */
1153 void __pthread_kill_other_threads_np(void)
1155 struct sigaction sa
;
1156 /* Terminate all other threads and thread manager */
1157 pthread_onexit_process(0, NULL
);
1158 /* Make current thread the main thread in case the calling thread
1159 changes its mind, does not exec(), and creates new threads instead. */
1160 __pthread_reset_main_thread();
1162 /* Reset the signal handlers behaviour for the signals the
1163 implementation uses since this would be passed to the new
1165 sigemptyset(&sa
.sa_mask
);
1167 sa
.sa_handler
= SIG_DFL
;
1168 __libc_sigaction(__pthread_sig_restart
, &sa
, NULL
);
1169 __libc_sigaction(__pthread_sig_cancel
, &sa
, NULL
);
1170 if (__pthread_sig_debug
> 0)
1171 __libc_sigaction(__pthread_sig_debug
, &sa
, NULL
);
1173 weak_alias (__pthread_kill_other_threads_np
, pthread_kill_other_threads_np
)
1175 /* Concurrency symbol level. */
1176 static int current_level
;
1178 int __pthread_setconcurrency(int level
)
1180 /* We don't do anything unless we have found a useful interpretation. */
1181 current_level
= level
;
1184 weak_alias (__pthread_setconcurrency
, pthread_setconcurrency
)
1186 int __pthread_getconcurrency(void)
1188 return current_level
;
1190 weak_alias (__pthread_getconcurrency
, pthread_getconcurrency
)
1192 /* Primitives for controlling thread execution */
1194 void __pthread_wait_for_restart_signal(pthread_descr self
)
1198 sigprocmask(SIG_SETMASK
, NULL
, &mask
); /* Get current signal mask */
1199 sigdelset(&mask
, __pthread_sig_restart
); /* Unblock the restart signal */
1200 THREAD_SETMEM(self
, p_signal
, 0);
1202 __pthread_sigsuspend(&mask
); /* Wait for signal. Must not be a
1203 cancellation point. */
1204 } while (THREAD_GETMEM(self
, p_signal
) !=__pthread_sig_restart
);
1206 READ_MEMORY_BARRIER(); /* See comment in __pthread_restart_new */
1209 #if !__ASSUME_REALTIME_SIGNALS
1210 /* The _old variants are for 2.0 and early 2.1 kernels which don't have RT
1212 On these kernels, we use SIGUSR1 and SIGUSR2 for restart and cancellation.
1213 Since the restart signal does not queue, we use an atomic counter to create
1214 queuing semantics. This is needed to resolve a rare race condition in
1215 pthread_cond_timedwait_relative. */
1217 void __pthread_restart_old(pthread_descr th
)
1219 if (atomic_increment(&th
->p_resume_count
) == -1)
1220 kill(th
->p_pid
, __pthread_sig_restart
);
1223 void __pthread_suspend_old(pthread_descr self
)
1225 if (atomic_decrement(&self
->p_resume_count
) <= 0)
1226 __pthread_wait_for_restart_signal(self
);
1230 __pthread_timedsuspend_old(pthread_descr self
, const struct timespec
*abstime
)
1232 sigset_t unblock
, initial_mask
;
1233 int was_signalled
= 0;
1236 if (atomic_decrement(&self
->p_resume_count
) == 0) {
1237 /* Set up a longjmp handler for the restart signal, unblock
1238 the signal and sleep. */
1240 if (sigsetjmp(jmpbuf
, 1) == 0) {
1241 THREAD_SETMEM(self
, p_signal_jmp
, &jmpbuf
);
1242 THREAD_SETMEM(self
, p_signal
, 0);
1243 /* Unblock the restart signal */
1244 sigemptyset(&unblock
);
1245 sigaddset(&unblock
, __pthread_sig_restart
);
1246 sigprocmask(SIG_UNBLOCK
, &unblock
, &initial_mask
);
1250 struct timespec reltime
;
1252 /* Compute a time offset relative to now. */
1253 __gettimeofday (&now
, NULL
);
1254 reltime
.tv_nsec
= abstime
->tv_nsec
- now
.tv_usec
* 1000;
1255 reltime
.tv_sec
= abstime
->tv_sec
- now
.tv_sec
;
1256 if (reltime
.tv_nsec
< 0) {
1257 reltime
.tv_nsec
+= 1000000000;
1258 reltime
.tv_sec
-= 1;
1261 /* Sleep for the required duration. If woken by a signal,
1262 resume waiting as required by Single Unix Specification. */
1263 if (reltime
.tv_sec
< 0 || __libc_nanosleep(&reltime
, NULL
) == 0)
1267 /* Block the restart signal again */
1268 sigprocmask(SIG_SETMASK
, &initial_mask
, NULL
);
1273 THREAD_SETMEM(self
, p_signal_jmp
, NULL
);
1276 /* Now was_signalled is true if we exited the above code
1277 due to the delivery of a restart signal. In that case,
1278 we know we have been dequeued and resumed and that the
1279 resume count is balanced. Otherwise, there are some
1280 cases to consider. First, try to bump up the resume count
1281 back to zero. If it goes to 1, it means restart() was
1282 invoked on this thread. The signal must be consumed
1283 and the count bumped down and everything is cool. We
1284 can return a 1 to the caller.
1285 Otherwise, no restart was delivered yet, so a potential
1286 race exists; we return a 0 to the caller which must deal
1287 with this race in an appropriate way; for example by
1288 atomically removing the thread from consideration for a
1289 wakeup---if such a thing fails, it means a restart is
1292 if (!was_signalled
) {
1293 if (atomic_increment(&self
->p_resume_count
) != -1) {
1294 __pthread_wait_for_restart_signal(self
);
1295 atomic_decrement(&self
->p_resume_count
); /* should be zero now! */
1296 /* woke spontaneously and consumed restart signal */
1299 /* woke spontaneously but did not consume restart---caller must resolve */
1302 /* woken due to restart signal */
1305 #endif /* __ASSUME_REALTIME_SIGNALS */
1307 void __pthread_restart_new(pthread_descr th
)
1309 /* The barrier is proabably not needed, in which case it still documents
1310 our assumptions. The intent is to commit previous writes to shared
1311 memory so the woken thread will have a consistent view. Complementary
1312 read barriers are present to the suspend functions. */
1313 WRITE_MEMORY_BARRIER();
1314 kill(th
->p_pid
, __pthread_sig_restart
);
1317 /* There is no __pthread_suspend_new because it would just
1318 be a wasteful wrapper for __pthread_wait_for_restart_signal */
1321 __pthread_timedsuspend_new(pthread_descr self
, const struct timespec
*abstime
)
1323 sigset_t unblock
, initial_mask
;
1324 int was_signalled
= 0;
1327 if (sigsetjmp(jmpbuf
, 1) == 0) {
1328 THREAD_SETMEM(self
, p_signal_jmp
, &jmpbuf
);
1329 THREAD_SETMEM(self
, p_signal
, 0);
1330 /* Unblock the restart signal */
1331 sigemptyset(&unblock
);
1332 sigaddset(&unblock
, __pthread_sig_restart
);
1333 sigprocmask(SIG_UNBLOCK
, &unblock
, &initial_mask
);
1337 struct timespec reltime
;
1339 /* Compute a time offset relative to now. */
1340 __gettimeofday (&now
, NULL
);
1341 reltime
.tv_nsec
= abstime
->tv_nsec
- now
.tv_usec
* 1000;
1342 reltime
.tv_sec
= abstime
->tv_sec
- now
.tv_sec
;
1343 if (reltime
.tv_nsec
< 0) {
1344 reltime
.tv_nsec
+= 1000000000;
1345 reltime
.tv_sec
-= 1;
1348 /* Sleep for the required duration. If woken by a signal,
1349 resume waiting as required by Single Unix Specification. */
1350 if (reltime
.tv_sec
< 0 || __libc_nanosleep(&reltime
, NULL
) == 0)
1354 /* Block the restart signal again */
1355 sigprocmask(SIG_SETMASK
, &initial_mask
, NULL
);
1360 THREAD_SETMEM(self
, p_signal_jmp
, NULL
);
1362 /* Now was_signalled is true if we exited the above code
1363 due to the delivery of a restart signal. In that case,
1364 everything is cool. We have been removed from whatever
1365 we were waiting on by the other thread, and consumed its signal.
1367 Otherwise we this thread woke up spontaneously, or due to a signal other
1368 than restart. This is an ambiguous case that must be resolved by
1369 the caller; the thread is still eligible for a restart wakeup
1370 so there is a race. */
1372 READ_MEMORY_BARRIER(); /* See comment in __pthread_restart_new */
1373 return was_signalled
;
1382 void __pthread_message(const char * fmt
, ...)
1386 sprintf(buffer
, "%05d : ", __getpid());
1387 va_start(args
, fmt
);
1388 vsnprintf(buffer
+ 8, sizeof(buffer
) - 8, fmt
, args
);
1390 TEMP_FAILURE_RETRY(__libc_write(2, buffer
, strlen(buffer
)));