2 /* Linuxthreads - a simple clone()-based implementation of Posix */
3 /* threads for Linux. */
4 /* Copyright (C) 1996 Xavier Leroy (Xavier.Leroy@inria.fr) */
6 /* This program is free software; you can redistribute it and/or */
7 /* modify it under the terms of the GNU Library General Public License */
8 /* as published by the Free Software Foundation; either version 2 */
9 /* of the License, or (at your option) any later version. */
11 /* This program is distributed in the hope that it will be useful, */
12 /* but WITHOUT ANY WARRANTY; without even the implied warranty of */
13 /* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the */
14 /* GNU Library General Public License for more details. */
16 /* Thread creation, initialization, and basic low-level routines */
26 #include <sys/resource.h>
28 #include <shlib-compat.h>
30 #include "internals.h"
36 #include <locale.h> /* for __uselocale */
40 #if __ASSUME_REALTIME_SIGNALS && !defined __SIGRTMIN
41 # error "This must not happen; new kernel assumed but old headers"
44 #if !(USE_TLS && HAVE___THREAD)
45 /* These variables are used by the setup code. */
49 /* We need the global/static resolver state here. */
53 extern struct __res_state _res
;
58 /* We need only a few variables. */
59 static pthread_descr manager_thread
;
63 /* Descriptor of the initial thread */
65 struct _pthread_descr_struct __pthread_initial_thread
= {
66 .p_header
.data
.self
= &__pthread_initial_thread
,
67 .p_nextlive
= &__pthread_initial_thread
,
68 .p_prevlive
= &__pthread_initial_thread
,
69 .p_tid
= PTHREAD_THREADS_MAX
,
70 .p_lock
= &__pthread_handles
[0].h_lock
,
71 .p_start_args
= PTHREAD_START_ARGS_INITIALIZER(NULL
),
72 #if !(USE_TLS && HAVE___THREAD)
74 .p_h_errnop
= &_h_errno
,
78 .p_resume_count
= __ATOMIC_INITIALIZER
,
79 .p_alloca_cutoff
= __MAX_ALLOCA_CUTOFF
82 /* Descriptor of the manager thread; none of this is used but the error
83 variables, the p_pid and p_priority fields,
84 and the address for identification. */
86 #define manager_thread (&__pthread_manager_thread)
87 struct _pthread_descr_struct __pthread_manager_thread
= {
88 .p_header
.data
.self
= &__pthread_manager_thread
,
89 .p_lock
= &__pthread_handles
[1].h_lock
,
90 .p_start_args
= PTHREAD_START_ARGS_INITIALIZER(__pthread_manager
),
91 #if !(USE_TLS && HAVE___THREAD)
92 .p_errnop
= &__pthread_manager_thread
.p_errno
,
95 .p_resume_count
= __ATOMIC_INITIALIZER
,
96 .p_alloca_cutoff
= PTHREAD_STACK_MIN
/ 4
100 /* Pointer to the main thread (the father of the thread manager thread) */
101 /* Originally, this is the initial thread, but this changes after fork() */
104 pthread_descr __pthread_main_thread
;
106 pthread_descr __pthread_main_thread
= &__pthread_initial_thread
;
109 /* Limit between the stack of the initial thread (above) and the
110 stacks of other threads (below). Aligned on a STACK_SIZE boundary. */
112 char *__pthread_initial_thread_bos
;
114 /* File descriptor for sending requests to the thread manager. */
115 /* Initially -1, meaning that the thread manager is not running. */
117 int __pthread_manager_request
= -1;
119 /* Other end of the pipe for sending requests to the thread manager. */
121 int __pthread_manager_reader
;
123 /* Limits of the thread manager stack */
125 char *__pthread_manager_thread_bos
;
126 char *__pthread_manager_thread_tos
;
128 /* For process-wide exit() */
130 int __pthread_exit_requested
;
131 int __pthread_exit_code
;
133 /* Maximum stack size. */
134 size_t __pthread_max_stacksize
;
136 /* Nozero if the machine has more than one processor. */
137 int __pthread_smp_kernel
;
140 #if !__ASSUME_REALTIME_SIGNALS
141 /* Pointers that select new or old suspend/resume functions
142 based on availability of rt signals. */
144 void (*__pthread_restart
)(pthread_descr
) = __pthread_restart_old
;
145 void (*__pthread_suspend
)(pthread_descr
) = __pthread_suspend_old
;
146 int (*__pthread_timedsuspend
)(pthread_descr
, const struct timespec
*) = __pthread_timedsuspend_old
;
147 #endif /* __ASSUME_REALTIME_SIGNALS */
149 /* Communicate relevant LinuxThreads constants to gdb */
151 const int __pthread_threads_max
= PTHREAD_THREADS_MAX
;
152 const int __pthread_sizeof_handle
= sizeof(struct pthread_handle_struct
);
153 const int __pthread_offsetof_descr
= offsetof(struct pthread_handle_struct
,
155 const int __pthread_offsetof_pid
= offsetof(struct _pthread_descr_struct
,
157 const int __linuxthreads_pthread_sizeof_descr
158 = sizeof(struct _pthread_descr_struct
);
160 const int __linuxthreads_initial_report_events
;
162 const char __linuxthreads_version
[] = VERSION
;
164 /* Forward declarations */
166 static void pthread_onexit_process(int retcode
, void *arg
);
167 #ifndef HAVE_Z_NODELETE
168 static void pthread_atexit_process(void *arg
, int retcode
);
169 static void pthread_atexit_retcode(void *arg
, int retcode
);
171 static void pthread_handle_sigcancel(int sig
);
172 static void pthread_handle_sigrestart(int sig
);
173 static void pthread_handle_sigdebug(int sig
);
175 /* Signal numbers used for the communication.
176 In these variables we keep track of the used variables. If the
177 platform does not support any real-time signals we will define the
178 values to some unreasonable value which will signal failing of all
179 the functions below. */
181 static int current_rtmin
= -1;
182 static int current_rtmax
= -1;
183 int __pthread_sig_restart
= SIGUSR1
;
184 int __pthread_sig_cancel
= SIGUSR2
;
185 int __pthread_sig_debug
;
187 static int current_rtmin
;
188 static int current_rtmax
;
190 #if __SIGRTMAX - __SIGRTMIN >= 3
191 int __pthread_sig_restart
= __SIGRTMIN
;
192 int __pthread_sig_cancel
= __SIGRTMIN
+ 1;
193 int __pthread_sig_debug
= __SIGRTMIN
+ 2;
195 int __pthread_sig_restart
= SIGUSR1
;
196 int __pthread_sig_cancel
= SIGUSR2
;
197 int __pthread_sig_debug
;
200 static int rtsigs_initialized
;
202 #if !__ASSUME_REALTIME_SIGNALS
203 # include "testrtsig.h"
209 #if !__ASSUME_REALTIME_SIGNALS
210 if (__builtin_expect (!kernel_has_rtsig (), 0))
214 # if __SIGRTMAX - __SIGRTMIN >= 3
215 __pthread_sig_restart
= SIGUSR1
;
216 __pthread_sig_cancel
= SIGUSR2
;
217 __pthread_sig_debug
= 0;
221 #endif /* __ASSUME_REALTIME_SIGNALS */
223 #if __SIGRTMAX - __SIGRTMIN >= 3
224 current_rtmin
= __SIGRTMIN
+ 3;
225 # if !__ASSUME_REALTIME_SIGNALS
226 __pthread_restart
= __pthread_restart_new
;
227 __pthread_suspend
= __pthread_wait_for_restart_signal
;
228 __pthread_timedsuspend
= __pthread_timedsuspend_new
;
229 # endif /* __ASSUME_REALTIME_SIGNALS */
231 current_rtmin
= __SIGRTMIN
;
234 current_rtmax
= __SIGRTMAX
;
237 rtsigs_initialized
= 1;
241 /* Return number of available real-time signal with highest priority. */
243 __libc_current_sigrtmin (void)
246 if (__builtin_expect (!rtsigs_initialized
, 0))
249 return current_rtmin
;
252 /* Return number of available real-time signal with lowest priority. */
254 __libc_current_sigrtmax (void)
257 if (__builtin_expect (!rtsigs_initialized
, 0))
260 return current_rtmax
;
263 /* Allocate real-time signal with highest/lowest available
264 priority. Please note that we don't use a lock since we assume
265 this function to be called at program start. */
267 __libc_allocate_rtsig (int high
)
272 if (__builtin_expect (!rtsigs_initialized
, 0))
274 if (__builtin_expect (current_rtmin
== -1, 0)
275 || __builtin_expect (current_rtmin
> current_rtmax
, 0))
276 /* We don't have anymore signal available. */
279 return high
? current_rtmin
++ : current_rtmax
--;
284 /* Initialize the pthread library.
285 Initialization is split in two functions:
286 - a constructor function that blocks the __pthread_sig_restart signal
287 (must do this very early, since the program could capture the signal
288 mask with e.g. sigsetjmp before creating the first thread);
289 - a regular function called from pthread_create when needed. */
291 static void pthread_initialize(void) __attribute__((constructor
));
293 #ifndef HAVE_Z_NODELETE
294 extern void *__dso_handle
__attribute__ ((weak
));
298 #if defined USE_TLS && !defined SHARED
299 extern void __libc_setup_tls (size_t tcbsize
, size_t tcbalign
);
303 /* Do some minimal initialization which has to be done during the
304 startup of the C library. */
306 __pthread_initialize_minimal(void)
311 /* First of all init __pthread_handles[0] and [1] if needed. */
312 # if __LT_SPINLOCK_INIT != 0
313 __pthread_handles
[0].h_lock
= __LOCK_INITIALIZER
;
314 __pthread_handles
[1].h_lock
= __LOCK_INITIALIZER
;
317 /* Unlike in the dynamically linked case the dynamic linker has not
318 taken care of initializing the TLS data structures. */
319 __libc_setup_tls (TLS_TCB_SIZE
, TLS_TCB_ALIGN
);
324 /* The memory for the thread descriptor was allocated elsewhere as
325 part of the TLS allocation. We have to initialize the data
326 structure by hand. This initialization must mirror the struct
328 self
->p_nextlive
= self
->p_prevlive
= self
;
329 self
->p_tid
= PTHREAD_THREADS_MAX
;
330 self
->p_lock
= &__pthread_handles
[0].h_lock
;
331 # ifndef HAVE___THREAD
332 self
->p_errnop
= &_errno
;
333 self
->p_h_errnop
= &_h_errno
;
335 /* self->p_start_args need not be initialized, it's all zero. */
336 self
->p_userstack
= 1;
337 # if __LT_SPINLOCK_INIT != 0
338 self
->p_resume_count
= (struct pthread_atomic
) __ATOMIC_INITIALIZER
;
340 self
->p_alloca_cutoff
= __MAX_ALLOCA_CUTOFF
;
342 /* Another variable which points to the thread descriptor. */
343 __pthread_main_thread
= self
;
345 /* And fill in the pointer the the thread __pthread_handles array. */
346 __pthread_handles
[0].h_descr
= self
;
348 /* First of all init __pthread_handles[0] and [1]. */
349 # if __LT_SPINLOCK_INIT != 0
350 __pthread_handles
[0].h_lock
= __LOCK_INITIALIZER
;
351 __pthread_handles
[1].h_lock
= __LOCK_INITIALIZER
;
353 __pthread_handles
[0].h_descr
= &__pthread_initial_thread
;
354 __pthread_handles
[1].h_descr
= &__pthread_manager_thread
;
356 /* If we have special thread_self processing, initialize that for the
358 # ifdef INIT_THREAD_SELF
359 INIT_THREAD_SELF(&__pthread_initial_thread
, 0);
365 self
->p_cpuclock_offset
= GL(dl_cpuclock_offset
);
367 __pthread_initial_thread
.p_cpuclock_offset
= GL(dl_cpuclock_offset
);
371 #if !(USE_TLS && HAVE___THREAD)
372 /* Initialize thread-locale current locale to point to the global one.
373 With __thread support, the variable's initializer takes care of this. */
374 __uselocale (LC_GLOBAL_LOCALE
);
380 __pthread_init_max_stacksize(void)
385 getrlimit(RLIMIT_STACK
, &limit
);
386 #ifdef FLOATING_STACKS
387 if (limit
.rlim_cur
== RLIM_INFINITY
)
388 limit
.rlim_cur
= ARCH_STACK_MAX_SIZE
;
389 # ifdef NEED_SEPARATE_REGISTER_STACK
390 max_stack
= limit
.rlim_cur
/ 2;
392 max_stack
= limit
.rlim_cur
;
395 /* Play with the stack size limit to make sure that no stack ever grows
396 beyond STACK_SIZE minus one page (to act as a guard page). */
397 # ifdef NEED_SEPARATE_REGISTER_STACK
398 /* STACK_SIZE bytes hold both the main stack and register backing
399 store. The rlimit value applies to each individually. */
400 max_stack
= STACK_SIZE
/2 - __getpagesize ();
402 max_stack
= STACK_SIZE
- __getpagesize();
404 if (limit
.rlim_cur
> max_stack
) {
405 limit
.rlim_cur
= max_stack
;
406 setrlimit(RLIMIT_STACK
, &limit
);
409 __pthread_max_stacksize
= max_stack
;
410 if (max_stack
/ 4 < __MAX_ALLOCA_CUTOFF
)
413 pthread_descr self
= THREAD_SELF
;
414 self
->p_alloca_cutoff
= max_stack
/ 4;
416 __pthread_initial_thread
.p_alloca_cutoff
= max_stack
/ 4;
422 static void pthread_initialize(void)
427 /* If already done (e.g. by a constructor called earlier!), bail out */
428 if (__pthread_initial_thread_bos
!= NULL
) return;
429 #ifdef TEST_FOR_COMPARE_AND_SWAP
430 /* Test if compare-and-swap is available */
431 __pthread_has_cas
= compare_and_swap_is_available();
433 #ifdef FLOATING_STACKS
434 /* We don't need to know the bottom of the stack. Give the pointer some
435 value to signal that initialization happened. */
436 __pthread_initial_thread_bos
= (void *) -1l;
438 /* Determine stack size limits . */
439 __pthread_init_max_stacksize ();
440 # ifdef _STACK_GROWS_UP
441 /* The initial thread already has all the stack it needs */
442 __pthread_initial_thread_bos
= (char *)
443 ((long)CURRENT_STACK_FRAME
&~ (STACK_SIZE
- 1));
445 /* For the initial stack, reserve at least STACK_SIZE bytes of stack
446 below the current stack address, and align that on a
447 STACK_SIZE boundary. */
448 __pthread_initial_thread_bos
=
449 (char *)(((long)CURRENT_STACK_FRAME
- 2 * STACK_SIZE
) & ~(STACK_SIZE
- 1));
453 /* Update the descriptor for the initial thread. */
454 THREAD_SETMEM (((pthread_descr
) NULL
), p_pid
, __getpid());
455 # ifndef HAVE___THREAD
456 /* Likewise for the resolver state _res. */
457 THREAD_SETMEM (((pthread_descr
) NULL
), p_resp
, &_res
);
460 /* Update the descriptor for the initial thread. */
461 __pthread_initial_thread
.p_pid
= __getpid();
462 /* Likewise for the resolver state _res. */
463 __pthread_initial_thread
.p_resp
= &_res
;
466 /* Initialize real-time signals. */
469 /* Setup signal handlers for the initial thread.
470 Since signal handlers are shared between threads, these settings
471 will be inherited by all other threads. */
472 sa
.sa_handler
= pthread_handle_sigrestart
;
473 sigemptyset(&sa
.sa_mask
);
475 __libc_sigaction(__pthread_sig_restart
, &sa
, NULL
);
476 sa
.sa_handler
= pthread_handle_sigcancel
;
478 __libc_sigaction(__pthread_sig_cancel
, &sa
, NULL
);
479 if (__pthread_sig_debug
> 0) {
480 sa
.sa_handler
= pthread_handle_sigdebug
;
481 sigemptyset(&sa
.sa_mask
);
483 __libc_sigaction(__pthread_sig_debug
, &sa
, NULL
);
485 /* Initially, block __pthread_sig_restart. Will be unblocked on demand. */
487 sigaddset(&mask
, __pthread_sig_restart
);
488 sigprocmask(SIG_BLOCK
, &mask
, NULL
);
489 /* Register an exit function to kill all other threads. */
490 /* Do it early so that user-registered atexit functions are called
491 before pthread_*exit_process. */
492 #ifndef HAVE_Z_NODELETE
493 if (__builtin_expect (&__dso_handle
!= NULL
, 1))
494 __cxa_atexit ((void (*) (void *)) pthread_atexit_process
, NULL
,
498 __on_exit (pthread_onexit_process
, NULL
);
499 /* How many processors. */
500 __pthread_smp_kernel
= is_smp_system ();
503 void __pthread_initialize(void)
505 pthread_initialize();
508 int __pthread_initialize_manager(void)
512 struct pthread_request request
;
516 #ifndef HAVE_Z_NODELETE
517 if (__builtin_expect (&__dso_handle
!= NULL
, 1))
518 __cxa_atexit ((void (*) (void *)) pthread_atexit_retcode
, NULL
,
522 if (__pthread_max_stacksize
== 0)
523 __pthread_init_max_stacksize ();
524 /* If basic initialization not done yet (e.g. we're called from a
525 constructor run before our constructor), do it now */
526 if (__pthread_initial_thread_bos
== NULL
) pthread_initialize();
527 /* Setup stack for thread manager */
528 __pthread_manager_thread_bos
= malloc(THREAD_MANAGER_STACK_SIZE
);
529 if (__pthread_manager_thread_bos
== NULL
) return -1;
530 __pthread_manager_thread_tos
=
531 __pthread_manager_thread_bos
+ THREAD_MANAGER_STACK_SIZE
;
532 /* Setup pipe to communicate with thread manager */
533 if (pipe(manager_pipe
) == -1) {
534 free(__pthread_manager_thread_bos
);
539 /* Allocate memory for the thread descriptor and the dtv. */
540 __pthread_handles
[1].h_descr
= manager_thread
= tcb
541 = _dl_allocate_tls (NULL
);
543 free(__pthread_manager_thread_bos
);
544 __libc_close(manager_pipe
[0]);
545 __libc_close(manager_pipe
[1]);
549 /* Initialize the descriptor. */
550 tcb
->p_header
.data
.tcb
= tcb
;
551 tcb
->p_header
.data
.self
= tcb
;
552 tcb
->p_lock
= &__pthread_handles
[1].h_lock
;
553 # ifndef HAVE___THREAD
554 tcb
->p_errnop
= &tcb
->p_errno
;
556 tcb
->p_start_args
= (struct pthread_start_args
) PTHREAD_START_ARGS_INITIALIZER(__pthread_manager
);
558 # if __LT_SPINLOCK_INIT != 0
559 self
->p_resume_count
= (struct pthread_atomic
) __ATOMIC_INITIALIZER
;
561 tcb
->p_alloca_cutoff
= PTHREAD_STACK_MIN
/ 4;
563 tcb
= &__pthread_manager_thread
;
566 __pthread_manager_request
= manager_pipe
[1]; /* writing end */
567 __pthread_manager_reader
= manager_pipe
[0]; /* reading end */
569 /* Start the thread manager */
572 if (__linuxthreads_initial_report_events
!= 0)
573 THREAD_SETMEM (((pthread_descr
) NULL
), p_report_events
,
574 __linuxthreads_initial_report_events
);
575 report_events
= THREAD_GETMEM (((pthread_descr
) NULL
), p_report_events
);
577 if (__linuxthreads_initial_report_events
!= 0)
578 __pthread_initial_thread
.p_report_events
579 = __linuxthreads_initial_report_events
;
580 report_events
= __pthread_initial_thread
.p_report_events
;
582 if (__builtin_expect (report_events
, 0))
584 /* It's a bit more complicated. We have to report the creation of
585 the manager thread. */
586 int idx
= __td_eventword (TD_CREATE
);
587 uint32_t mask
= __td_eventmask (TD_CREATE
);
591 event_bits
= THREAD_GETMEM_NC (((pthread_descr
) NULL
),
592 p_eventbuf
.eventmask
.event_bits
[idx
]);
594 event_bits
= __pthread_initial_thread
.p_eventbuf
.eventmask
.event_bits
[idx
];
597 if ((mask
& (__pthread_threads_events
.event_bits
[idx
] | event_bits
))
600 __pthread_lock(tcb
->p_lock
, NULL
);
602 #ifdef NEED_SEPARATE_REGISTER_STACK
603 pid
= __clone2(__pthread_manager_event
,
604 (void **) __pthread_manager_thread_bos
,
605 THREAD_MANAGER_STACK_SIZE
,
606 CLONE_VM
| CLONE_FS
| CLONE_FILES
| CLONE_SIGHAND
,
608 #elif _STACK_GROWS_UP
609 pid
= __clone(__pthread_manager_event
,
610 (void **) __pthread_manager_thread_bos
,
611 CLONE_VM
| CLONE_FS
| CLONE_FILES
| CLONE_SIGHAND
,
614 pid
= __clone(__pthread_manager_event
,
615 (void **) __pthread_manager_thread_tos
,
616 CLONE_VM
| CLONE_FS
| CLONE_FILES
| CLONE_SIGHAND
,
622 /* Now fill in the information about the new thread in
623 the newly created thread's data structure. We cannot let
624 the new thread do this since we don't know whether it was
625 already scheduled when we send the event. */
626 tcb
->p_eventbuf
.eventdata
= tcb
;
627 tcb
->p_eventbuf
.eventnum
= TD_CREATE
;
628 __pthread_last_event
= tcb
;
629 tcb
->p_tid
= 2* PTHREAD_THREADS_MAX
+ 1;
632 /* Now call the function which signals the event. */
633 __linuxthreads_create_event ();
636 /* Now restart the thread. */
637 __pthread_unlock(tcb
->p_lock
);
641 if (__builtin_expect (pid
, 0) == 0)
643 #ifdef NEED_SEPARATE_REGISTER_STACK
644 pid
= __clone2(__pthread_manager
, (void **) __pthread_manager_thread_bos
,
645 THREAD_MANAGER_STACK_SIZE
,
646 CLONE_VM
| CLONE_FS
| CLONE_FILES
| CLONE_SIGHAND
, tcb
);
647 #elif _STACK_GROWS_UP
648 pid
= __clone(__pthread_manager
, (void **) __pthread_manager_thread_bos
,
649 CLONE_VM
| CLONE_FS
| CLONE_FILES
| CLONE_SIGHAND
, tcb
);
651 pid
= __clone(__pthread_manager
, (void **) __pthread_manager_thread_tos
,
652 CLONE_VM
| CLONE_FS
| CLONE_FILES
| CLONE_SIGHAND
, tcb
);
655 if (__builtin_expect (pid
, 0) == -1) {
656 free(__pthread_manager_thread_bos
);
657 __libc_close(manager_pipe
[0]);
658 __libc_close(manager_pipe
[1]);
661 tcb
->p_tid
= 2* PTHREAD_THREADS_MAX
+ 1;
663 /* Make gdb aware of new thread manager */
664 if (__builtin_expect (__pthread_threads_debug
, 0) && __pthread_sig_debug
> 0)
666 raise(__pthread_sig_debug
);
667 /* We suspend ourself and gdb will wake us up when it is
668 ready to handle us. */
669 __pthread_wait_for_restart_signal(thread_self());
671 /* Synchronize debugging of the thread manager */
672 request
.req_kind
= REQ_DEBUG
;
673 TEMP_FAILURE_RETRY(__libc_write(__pthread_manager_request
,
674 (char *) &request
, sizeof(request
)));
678 /* Thread creation */
680 int __pthread_create_2_1(pthread_t
*thread
, const pthread_attr_t
*attr
,
681 void * (*start_routine
)(void *), void *arg
)
683 pthread_descr self
= thread_self();
684 struct pthread_request request
;
686 if (__builtin_expect (__pthread_manager_request
, 0) < 0) {
687 if (__pthread_initialize_manager() < 0) return EAGAIN
;
689 request
.req_thread
= self
;
690 request
.req_kind
= REQ_CREATE
;
691 request
.req_args
.create
.attr
= attr
;
692 request
.req_args
.create
.fn
= start_routine
;
693 request
.req_args
.create
.arg
= arg
;
694 sigprocmask(SIG_SETMASK
, (const sigset_t
*) NULL
,
695 &request
.req_args
.create
.mask
);
696 TEMP_FAILURE_RETRY(__libc_write(__pthread_manager_request
,
697 (char *) &request
, sizeof(request
)));
699 retval
= THREAD_GETMEM(self
, p_retcode
);
700 if (__builtin_expect (retval
, 0) == 0)
701 *thread
= (pthread_t
) THREAD_GETMEM(self
, p_retval
);
705 versioned_symbol (libpthread
, __pthread_create_2_1
, pthread_create
, GLIBC_2_1
);
707 #if SHLIB_COMPAT (libpthread, GLIBC_2_0, GLIBC_2_1)
709 int __pthread_create_2_0(pthread_t
*thread
, const pthread_attr_t
*attr
,
710 void * (*start_routine
)(void *), void *arg
)
712 /* The ATTR attribute is not really of type `pthread_attr_t *'. It has
713 the old size and access to the new members might crash the program.
714 We convert the struct now. */
715 pthread_attr_t new_attr
;
719 size_t ps
= __getpagesize ();
721 memcpy (&new_attr
, attr
,
722 (size_t) &(((pthread_attr_t
*)NULL
)->__guardsize
));
723 new_attr
.__guardsize
= ps
;
724 new_attr
.__stackaddr_set
= 0;
725 new_attr
.__stackaddr
= NULL
;
726 new_attr
.__stacksize
= STACK_SIZE
- ps
;
729 return __pthread_create_2_1 (thread
, attr
, start_routine
, arg
);
731 compat_symbol (libpthread
, __pthread_create_2_0
, pthread_create
, GLIBC_2_0
);
734 /* Simple operations on thread identifiers */
736 pthread_t
pthread_self(void)
738 pthread_descr self
= thread_self();
739 return THREAD_GETMEM(self
, p_tid
);
742 int pthread_equal(pthread_t thread1
, pthread_t thread2
)
744 return thread1
== thread2
;
747 /* Helper function for thread_self in the case of user-provided stacks */
751 pthread_descr
__pthread_find_self(void)
753 char * sp
= CURRENT_STACK_FRAME
;
756 /* __pthread_handles[0] is the initial thread, __pthread_handles[1] is
757 the manager threads handled specially in thread_self(), so start at 2 */
758 h
= __pthread_handles
+ 2;
759 while (! (sp
<= (char *) h
->h_descr
&& sp
>= h
->h_bottom
)) h
++;
765 static pthread_descr
thread_self_stack(void)
767 char *sp
= CURRENT_STACK_FRAME
;
770 if (sp
>= __pthread_manager_thread_bos
&& sp
< __pthread_manager_thread_tos
)
771 return manager_thread
;
772 h
= __pthread_handles
+ 2;
774 while (h
->h_descr
== NULL
775 || ! (sp
<= (char *) h
->h_descr
->p_stackaddr
&& sp
>= h
->h_bottom
))
778 while (! (sp
<= (char *) h
->h_descr
&& sp
>= h
->h_bottom
))
786 /* Thread scheduling */
788 int pthread_setschedparam(pthread_t thread
, int policy
,
789 const struct sched_param
*param
)
791 pthread_handle handle
= thread_handle(thread
);
794 __pthread_lock(&handle
->h_lock
, NULL
);
795 if (__builtin_expect (invalid_handle(handle
, thread
), 0)) {
796 __pthread_unlock(&handle
->h_lock
);
799 th
= handle
->h_descr
;
800 if (__builtin_expect (__sched_setscheduler(th
->p_pid
, policy
, param
) == -1,
802 __pthread_unlock(&handle
->h_lock
);
805 th
->p_priority
= policy
== SCHED_OTHER
? 0 : param
->sched_priority
;
806 __pthread_unlock(&handle
->h_lock
);
807 if (__pthread_manager_request
>= 0)
808 __pthread_manager_adjust_prio(th
->p_priority
);
812 int pthread_getschedparam(pthread_t thread
, int *policy
,
813 struct sched_param
*param
)
815 pthread_handle handle
= thread_handle(thread
);
818 __pthread_lock(&handle
->h_lock
, NULL
);
819 if (__builtin_expect (invalid_handle(handle
, thread
), 0)) {
820 __pthread_unlock(&handle
->h_lock
);
823 pid
= handle
->h_descr
->p_pid
;
824 __pthread_unlock(&handle
->h_lock
);
825 pol
= __sched_getscheduler(pid
);
826 if (__builtin_expect (pol
, 0) == -1) return errno
;
827 if (__sched_getparam(pid
, param
) == -1) return errno
;
832 int __pthread_yield (void)
834 /* For now this is equivalent with the POSIX call. */
835 return sched_yield ();
837 weak_alias (__pthread_yield
, pthread_yield
)
839 /* Process-wide exit() request */
841 static void pthread_onexit_process(int retcode
, void *arg
)
843 if (__builtin_expect (__pthread_manager_request
, 0) >= 0) {
844 struct pthread_request request
;
845 pthread_descr self
= thread_self();
847 request
.req_thread
= self
;
848 request
.req_kind
= REQ_PROCESS_EXIT
;
849 request
.req_args
.exit
.code
= retcode
;
850 TEMP_FAILURE_RETRY(__libc_write(__pthread_manager_request
,
851 (char *) &request
, sizeof(request
)));
853 /* Main thread should accumulate times for thread manager and its
854 children, so that timings for main thread account for all threads. */
855 if (self
== __pthread_main_thread
)
858 waitpid(manager_thread
->p_pid
, NULL
, __WCLONE
);
860 waitpid(__pthread_manager_thread
.p_pid
, NULL
, __WCLONE
);
862 /* Since all threads have been asynchronously terminated
863 (possibly holding locks), free cannot be used any more. */
864 /*free (__pthread_manager_thread_bos);*/
865 __pthread_manager_thread_bos
= __pthread_manager_thread_tos
= NULL
;
870 #ifndef HAVE_Z_NODELETE
871 static int __pthread_atexit_retcode
;
873 static void pthread_atexit_process(void *arg
, int retcode
)
875 pthread_onexit_process (retcode
?: __pthread_atexit_retcode
, arg
);
878 static void pthread_atexit_retcode(void *arg
, int retcode
)
880 __pthread_atexit_retcode
= retcode
;
884 /* The handler for the RESTART signal just records the signal received
885 in the thread descriptor, and optionally performs a siglongjmp
886 (for pthread_cond_timedwait). */
888 static void pthread_handle_sigrestart(int sig
)
890 pthread_descr self
= thread_self();
891 THREAD_SETMEM(self
, p_signal
, sig
);
892 if (THREAD_GETMEM(self
, p_signal_jmp
) != NULL
)
893 siglongjmp(*THREAD_GETMEM(self
, p_signal_jmp
), 1);
896 /* The handler for the CANCEL signal checks for cancellation
897 (in asynchronous mode), for process-wide exit and exec requests.
898 For the thread manager thread, redirect the signal to
899 __pthread_manager_sighandler. */
901 static void pthread_handle_sigcancel(int sig
)
903 pthread_descr self
= thread_self();
906 if (self
== manager_thread
)
909 /* A new thread might get a cancel signal before it is fully
910 initialized, so that the thread register might still point to the
911 manager thread. Double check that this is really the manager
913 pthread_descr real_self
= thread_self_stack();
914 if (real_self
== manager_thread
)
916 __pthread_manager_sighandler(sig
);
919 /* Oops, thread_self() isn't working yet.. */
921 # ifdef INIT_THREAD_SELF
922 INIT_THREAD_SELF(self
, self
->p_nr
);
925 __pthread_manager_sighandler(sig
);
929 if (__builtin_expect (__pthread_exit_requested
, 0)) {
930 /* Main thread should accumulate times for thread manager and its
931 children, so that timings for main thread account for all threads. */
932 if (self
== __pthread_main_thread
) {
934 waitpid(manager_thread
->p_pid
, NULL
, __WCLONE
);
936 waitpid(__pthread_manager_thread
.p_pid
, NULL
, __WCLONE
);
939 _exit(__pthread_exit_code
);
941 if (__builtin_expect (THREAD_GETMEM(self
, p_canceled
), 0)
942 && THREAD_GETMEM(self
, p_cancelstate
) == PTHREAD_CANCEL_ENABLE
) {
943 if (THREAD_GETMEM(self
, p_canceltype
) == PTHREAD_CANCEL_ASYNCHRONOUS
)
944 __pthread_do_exit(PTHREAD_CANCELED
, CURRENT_STACK_FRAME
);
945 jmpbuf
= THREAD_GETMEM(self
, p_cancel_jmp
);
946 if (jmpbuf
!= NULL
) {
947 THREAD_SETMEM(self
, p_cancel_jmp
, NULL
);
948 siglongjmp(*jmpbuf
, 1);
953 /* Handler for the DEBUG signal.
954 The debugging strategy is as follows:
955 On reception of a REQ_DEBUG request (sent by new threads created to
956 the thread manager under debugging mode), the thread manager throws
957 __pthread_sig_debug to itself. The debugger (if active) intercepts
958 this signal, takes into account new threads and continue execution
959 of the thread manager by propagating the signal because it doesn't
960 know what it is specifically done for. In the current implementation,
961 the thread manager simply discards it. */
963 static void pthread_handle_sigdebug(int sig
)
968 /* Reset the state of the thread machinery after a fork().
969 Close the pipe used for requests and set the main thread to the forked
971 Notice that we can't free the stack segments, as the forked thread
972 may hold pointers into them. */
974 void __pthread_reset_main_thread(void)
976 pthread_descr self
= thread_self();
978 if (__pthread_manager_request
!= -1) {
979 /* Free the thread manager stack */
980 free(__pthread_manager_thread_bos
);
981 __pthread_manager_thread_bos
= __pthread_manager_thread_tos
= NULL
;
982 /* Close the two ends of the pipe */
983 __libc_close(__pthread_manager_request
);
984 __libc_close(__pthread_manager_reader
);
985 __pthread_manager_request
= __pthread_manager_reader
= -1;
988 /* Update the pid of the main thread */
989 THREAD_SETMEM(self
, p_pid
, __getpid());
990 /* Make the forked thread the main thread */
991 __pthread_main_thread
= self
;
992 THREAD_SETMEM(self
, p_nextlive
, self
);
993 THREAD_SETMEM(self
, p_prevlive
, self
);
994 #if !(USE_TLS && HAVE___THREAD)
995 /* Now this thread modifies the global variables. */
996 THREAD_SETMEM(self
, p_errnop
, &_errno
);
997 THREAD_SETMEM(self
, p_h_errnop
, &_h_errno
);
998 THREAD_SETMEM(self
, p_resp
, &_res
);
1001 #ifndef FLOATING_STACKS
1002 /* This is to undo the setrlimit call in __pthread_init_max_stacksize.
1003 XXX This can be wrong if the user set the limit during the run. */
1005 struct rlimit limit
;
1006 if (getrlimit (RLIMIT_STACK
, &limit
) == 0
1007 && limit
.rlim_cur
!= limit
.rlim_max
)
1009 limit
.rlim_cur
= limit
.rlim_max
;
1010 setrlimit(RLIMIT_STACK
, &limit
);
1016 /* Process-wide exec() request */
1018 void __pthread_kill_other_threads_np(void)
1020 struct sigaction sa
;
1021 /* Terminate all other threads and thread manager */
1022 pthread_onexit_process(0, NULL
);
1023 /* Make current thread the main thread in case the calling thread
1024 changes its mind, does not exec(), and creates new threads instead. */
1025 __pthread_reset_main_thread();
1027 /* Reset the signal handlers behaviour for the signals the
1028 implementation uses since this would be passed to the new
1030 sigemptyset(&sa
.sa_mask
);
1032 sa
.sa_handler
= SIG_DFL
;
1033 __libc_sigaction(__pthread_sig_restart
, &sa
, NULL
);
1034 __libc_sigaction(__pthread_sig_cancel
, &sa
, NULL
);
1035 if (__pthread_sig_debug
> 0)
1036 __libc_sigaction(__pthread_sig_debug
, &sa
, NULL
);
1038 weak_alias (__pthread_kill_other_threads_np
, pthread_kill_other_threads_np
)
1040 /* Concurrency symbol level. */
1041 static int current_level
;
1043 int __pthread_setconcurrency(int level
)
1045 /* We don't do anything unless we have found a useful interpretation. */
1046 current_level
= level
;
1049 weak_alias (__pthread_setconcurrency
, pthread_setconcurrency
)
1051 int __pthread_getconcurrency(void)
1053 return current_level
;
1055 weak_alias (__pthread_getconcurrency
, pthread_getconcurrency
)
1057 /* Primitives for controlling thread execution */
1059 void __pthread_wait_for_restart_signal(pthread_descr self
)
1063 sigprocmask(SIG_SETMASK
, NULL
, &mask
); /* Get current signal mask */
1064 sigdelset(&mask
, __pthread_sig_restart
); /* Unblock the restart signal */
1065 THREAD_SETMEM(self
, p_signal
, 0);
1067 sigsuspend(&mask
); /* Wait for signal */
1068 } while (THREAD_GETMEM(self
, p_signal
) !=__pthread_sig_restart
);
1070 READ_MEMORY_BARRIER(); /* See comment in __pthread_restart_new */
1073 #if !__ASSUME_REALTIME_SIGNALS
1074 /* The _old variants are for 2.0 and early 2.1 kernels which don't have RT
1076 On these kernels, we use SIGUSR1 and SIGUSR2 for restart and cancellation.
1077 Since the restart signal does not queue, we use an atomic counter to create
1078 queuing semantics. This is needed to resolve a rare race condition in
1079 pthread_cond_timedwait_relative. */
1081 void __pthread_restart_old(pthread_descr th
)
1083 if (atomic_increment(&th
->p_resume_count
) == -1)
1084 kill(th
->p_pid
, __pthread_sig_restart
);
1087 void __pthread_suspend_old(pthread_descr self
)
1089 if (atomic_decrement(&self
->p_resume_count
) <= 0)
1090 __pthread_wait_for_restart_signal(self
);
1094 __pthread_timedsuspend_old(pthread_descr self
, const struct timespec
*abstime
)
1096 sigset_t unblock
, initial_mask
;
1097 int was_signalled
= 0;
1100 if (atomic_decrement(&self
->p_resume_count
) == 0) {
1101 /* Set up a longjmp handler for the restart signal, unblock
1102 the signal and sleep. */
1104 if (sigsetjmp(jmpbuf
, 1) == 0) {
1105 THREAD_SETMEM(self
, p_signal_jmp
, &jmpbuf
);
1106 THREAD_SETMEM(self
, p_signal
, 0);
1107 /* Unblock the restart signal */
1108 sigemptyset(&unblock
);
1109 sigaddset(&unblock
, __pthread_sig_restart
);
1110 sigprocmask(SIG_UNBLOCK
, &unblock
, &initial_mask
);
1114 struct timespec reltime
;
1116 /* Compute a time offset relative to now. */
1117 __gettimeofday (&now
, NULL
);
1118 reltime
.tv_nsec
= abstime
->tv_nsec
- now
.tv_usec
* 1000;
1119 reltime
.tv_sec
= abstime
->tv_sec
- now
.tv_sec
;
1120 if (reltime
.tv_nsec
< 0) {
1121 reltime
.tv_nsec
+= 1000000000;
1122 reltime
.tv_sec
-= 1;
1125 /* Sleep for the required duration. If woken by a signal,
1126 resume waiting as required by Single Unix Specification. */
1127 if (reltime
.tv_sec
< 0 || __libc_nanosleep(&reltime
, NULL
) == 0)
1131 /* Block the restart signal again */
1132 sigprocmask(SIG_SETMASK
, &initial_mask
, NULL
);
1137 THREAD_SETMEM(self
, p_signal_jmp
, NULL
);
1140 /* Now was_signalled is true if we exited the above code
1141 due to the delivery of a restart signal. In that case,
1142 we know we have been dequeued and resumed and that the
1143 resume count is balanced. Otherwise, there are some
1144 cases to consider. First, try to bump up the resume count
1145 back to zero. If it goes to 1, it means restart() was
1146 invoked on this thread. The signal must be consumed
1147 and the count bumped down and everything is cool. We
1148 can return a 1 to the caller.
1149 Otherwise, no restart was delivered yet, so a potential
1150 race exists; we return a 0 to the caller which must deal
1151 with this race in an appropriate way; for example by
1152 atomically removing the thread from consideration for a
1153 wakeup---if such a thing fails, it means a restart is
1156 if (!was_signalled
) {
1157 if (atomic_increment(&self
->p_resume_count
) != -1) {
1158 __pthread_wait_for_restart_signal(self
);
1159 atomic_decrement(&self
->p_resume_count
); /* should be zero now! */
1160 /* woke spontaneously and consumed restart signal */
1163 /* woke spontaneously but did not consume restart---caller must resolve */
1166 /* woken due to restart signal */
1169 #endif /* __ASSUME_REALTIME_SIGNALS */
1171 void __pthread_restart_new(pthread_descr th
)
1173 /* The barrier is proabably not needed, in which case it still documents
1174 our assumptions. The intent is to commit previous writes to shared
1175 memory so the woken thread will have a consistent view. Complementary
1176 read barriers are present to the suspend functions. */
1177 WRITE_MEMORY_BARRIER();
1178 kill(th
->p_pid
, __pthread_sig_restart
);
1181 /* There is no __pthread_suspend_new because it would just
1182 be a wasteful wrapper for __pthread_wait_for_restart_signal */
1185 __pthread_timedsuspend_new(pthread_descr self
, const struct timespec
*abstime
)
1187 sigset_t unblock
, initial_mask
;
1188 int was_signalled
= 0;
1191 if (sigsetjmp(jmpbuf
, 1) == 0) {
1192 THREAD_SETMEM(self
, p_signal_jmp
, &jmpbuf
);
1193 THREAD_SETMEM(self
, p_signal
, 0);
1194 /* Unblock the restart signal */
1195 sigemptyset(&unblock
);
1196 sigaddset(&unblock
, __pthread_sig_restart
);
1197 sigprocmask(SIG_UNBLOCK
, &unblock
, &initial_mask
);
1201 struct timespec reltime
;
1203 /* Compute a time offset relative to now. */
1204 __gettimeofday (&now
, NULL
);
1205 reltime
.tv_nsec
= abstime
->tv_nsec
- now
.tv_usec
* 1000;
1206 reltime
.tv_sec
= abstime
->tv_sec
- now
.tv_sec
;
1207 if (reltime
.tv_nsec
< 0) {
1208 reltime
.tv_nsec
+= 1000000000;
1209 reltime
.tv_sec
-= 1;
1212 /* Sleep for the required duration. If woken by a signal,
1213 resume waiting as required by Single Unix Specification. */
1214 if (reltime
.tv_sec
< 0 || __libc_nanosleep(&reltime
, NULL
) == 0)
1218 /* Block the restart signal again */
1219 sigprocmask(SIG_SETMASK
, &initial_mask
, NULL
);
1224 THREAD_SETMEM(self
, p_signal_jmp
, NULL
);
1226 /* Now was_signalled is true if we exited the above code
1227 due to the delivery of a restart signal. In that case,
1228 everything is cool. We have been removed from whatever
1229 we were waiting on by the other thread, and consumed its signal.
1231 Otherwise we this thread woke up spontaneously, or due to a signal other
1232 than restart. This is an ambiguous case that must be resolved by
1233 the caller; the thread is still eligible for a restart wakeup
1234 so there is a race. */
1236 READ_MEMORY_BARRIER(); /* See comment in __pthread_restart_new */
1237 return was_signalled
;
1246 void __pthread_message(const char * fmt
, ...)
1250 sprintf(buffer
, "%05d : ", __getpid());
1251 va_start(args
, fmt
);
1252 vsnprintf(buffer
+ 8, sizeof(buffer
) - 8, fmt
, args
);
1254 TEMP_FAILURE_RETRY(__libc_write(2, buffer
, strlen(buffer
)));
1261 /* We need a hook to force the cancelation wrappers and file locking
1262 to be linked in when static libpthread is used. */
1263 extern const int __pthread_provide_wrappers
;
1264 static const int *const __pthread_require_wrappers
=
1265 &__pthread_provide_wrappers
;
1266 extern const int __pthread_provide_lockfile
;
1267 static const int *const __pthread_require_lockfile
=
1268 &__pthread_provide_lockfile
;