Update.
[glibc.git] / linuxthreads / pthread.c
blobe2042bdb74292acdc2dee53cc9be912cd9a76741
2 /* Linuxthreads - a simple clone()-based implementation of Posix */
3 /* threads for Linux. */
4 /* Copyright (C) 1996 Xavier Leroy (Xavier.Leroy@inria.fr) */
5 /* */
6 /* This program is free software; you can redistribute it and/or */
7 /* modify it under the terms of the GNU Library General Public License */
8 /* as published by the Free Software Foundation; either version 2 */
9 /* of the License, or (at your option) any later version. */
10 /* */
11 /* This program is distributed in the hope that it will be useful, */
12 /* but WITHOUT ANY WARRANTY; without even the implied warranty of */
13 /* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the */
14 /* GNU Library General Public License for more details. */
16 /* Thread creation, initialization, and basic low-level routines */
18 #include <errno.h>
19 #include <stddef.h>
20 #include <stdio.h>
21 #include <stdlib.h>
22 #include <string.h>
23 #include <unistd.h>
24 #include <fcntl.h>
25 #include <sys/wait.h>
26 #include <sys/resource.h>
27 #include <sys/time.h>
28 #include <shlib-compat.h>
29 #include "pthread.h"
30 #include "internals.h"
31 #include "spinlock.h"
32 #include "restart.h"
33 #include "smp.h"
34 #include <ldsodefs.h>
35 #include <tls.h>
36 #include <locale.h> /* for __uselocale */
37 #include <version.h>
39 /* Sanity check. */
40 #if __ASSUME_REALTIME_SIGNALS && !defined __SIGRTMIN
41 # error "This must not happen; new kernel assumed but old headers"
42 #endif
44 #if !(USE_TLS && HAVE___THREAD)
45 /* These variables are used by the setup code. */
46 extern int _errno;
47 extern int _h_errno;
49 /* We need the global/static resolver state here. */
50 # include <resolv.h>
51 # undef _res
53 extern struct __res_state _res;
54 #endif
56 #ifdef USE_TLS
58 /* We need only a few variables. */
59 static pthread_descr manager_thread;
61 #else
63 /* Descriptor of the initial thread */
65 struct _pthread_descr_struct __pthread_initial_thread = {
66 .p_header.data.self = &__pthread_initial_thread,
67 .p_nextlive = &__pthread_initial_thread,
68 .p_prevlive = &__pthread_initial_thread,
69 .p_tid = PTHREAD_THREADS_MAX,
70 .p_lock = &__pthread_handles[0].h_lock,
71 .p_start_args = PTHREAD_START_ARGS_INITIALIZER(NULL),
72 #if !(USE_TLS && HAVE___THREAD)
73 .p_errnop = &_errno,
74 .p_h_errnop = &_h_errno,
75 .p_resp = &_res,
76 #endif
77 .p_userstack = 1,
78 .p_resume_count = __ATOMIC_INITIALIZER,
79 .p_alloca_cutoff = __MAX_ALLOCA_CUTOFF
82 /* Descriptor of the manager thread; none of this is used but the error
83 variables, the p_pid and p_priority fields,
84 and the address for identification. */
86 #define manager_thread (&__pthread_manager_thread)
87 struct _pthread_descr_struct __pthread_manager_thread = {
88 .p_header.data.self = &__pthread_manager_thread,
89 .p_lock = &__pthread_handles[1].h_lock,
90 .p_start_args = PTHREAD_START_ARGS_INITIALIZER(__pthread_manager),
91 #if !(USE_TLS && HAVE___THREAD)
92 .p_errnop = &__pthread_manager_thread.p_errno,
93 #endif
94 .p_nr = 1,
95 .p_resume_count = __ATOMIC_INITIALIZER,
96 .p_alloca_cutoff = PTHREAD_STACK_MIN / 4
98 #endif
100 /* Pointer to the main thread (the father of the thread manager thread) */
101 /* Originally, this is the initial thread, but this changes after fork() */
103 #ifdef USE_TLS
104 pthread_descr __pthread_main_thread;
105 #else
106 pthread_descr __pthread_main_thread = &__pthread_initial_thread;
107 #endif
109 /* Limit between the stack of the initial thread (above) and the
110 stacks of other threads (below). Aligned on a STACK_SIZE boundary. */
112 char *__pthread_initial_thread_bos;
114 /* File descriptor for sending requests to the thread manager. */
115 /* Initially -1, meaning that the thread manager is not running. */
117 int __pthread_manager_request = -1;
119 /* Other end of the pipe for sending requests to the thread manager. */
121 int __pthread_manager_reader;
123 /* Limits of the thread manager stack */
125 char *__pthread_manager_thread_bos;
126 char *__pthread_manager_thread_tos;
128 /* For process-wide exit() */
130 int __pthread_exit_requested;
131 int __pthread_exit_code;
133 /* Maximum stack size. */
134 size_t __pthread_max_stacksize;
136 /* Nozero if the machine has more than one processor. */
137 int __pthread_smp_kernel;
140 #if !__ASSUME_REALTIME_SIGNALS
141 /* Pointers that select new or old suspend/resume functions
142 based on availability of rt signals. */
144 void (*__pthread_restart)(pthread_descr) = __pthread_restart_old;
145 void (*__pthread_suspend)(pthread_descr) = __pthread_suspend_old;
146 int (*__pthread_timedsuspend)(pthread_descr, const struct timespec *) = __pthread_timedsuspend_old;
147 #endif /* __ASSUME_REALTIME_SIGNALS */
149 /* Communicate relevant LinuxThreads constants to gdb */
151 const int __pthread_threads_max = PTHREAD_THREADS_MAX;
152 const int __pthread_sizeof_handle = sizeof(struct pthread_handle_struct);
153 const int __pthread_offsetof_descr = offsetof(struct pthread_handle_struct,
154 h_descr);
155 const int __pthread_offsetof_pid = offsetof(struct _pthread_descr_struct,
156 p_pid);
157 const int __linuxthreads_pthread_sizeof_descr
158 = sizeof(struct _pthread_descr_struct);
160 const int __linuxthreads_initial_report_events;
162 const char __linuxthreads_version[] = VERSION;
164 /* Forward declarations */
166 static void pthread_onexit_process(int retcode, void *arg);
167 #ifndef HAVE_Z_NODELETE
168 static void pthread_atexit_process(void *arg, int retcode);
169 static void pthread_atexit_retcode(void *arg, int retcode);
170 #endif
171 static void pthread_handle_sigcancel(int sig);
172 static void pthread_handle_sigrestart(int sig);
173 static void pthread_handle_sigdebug(int sig);
175 /* Signal numbers used for the communication.
176 In these variables we keep track of the used variables. If the
177 platform does not support any real-time signals we will define the
178 values to some unreasonable value which will signal failing of all
179 the functions below. */
180 #ifndef __SIGRTMIN
181 static int current_rtmin = -1;
182 static int current_rtmax = -1;
183 int __pthread_sig_restart = SIGUSR1;
184 int __pthread_sig_cancel = SIGUSR2;
185 int __pthread_sig_debug;
186 #else
187 static int current_rtmin;
188 static int current_rtmax;
190 #if __SIGRTMAX - __SIGRTMIN >= 3
191 int __pthread_sig_restart = __SIGRTMIN;
192 int __pthread_sig_cancel = __SIGRTMIN + 1;
193 int __pthread_sig_debug = __SIGRTMIN + 2;
194 #else
195 int __pthread_sig_restart = SIGUSR1;
196 int __pthread_sig_cancel = SIGUSR2;
197 int __pthread_sig_debug;
198 #endif
200 static int rtsigs_initialized;
202 #if !__ASSUME_REALTIME_SIGNALS
203 # include "testrtsig.h"
204 #endif
206 static void
207 init_rtsigs (void)
209 #if !__ASSUME_REALTIME_SIGNALS
210 if (__builtin_expect (!kernel_has_rtsig (), 0))
212 current_rtmin = -1;
213 current_rtmax = -1;
214 # if __SIGRTMAX - __SIGRTMIN >= 3
215 __pthread_sig_restart = SIGUSR1;
216 __pthread_sig_cancel = SIGUSR2;
217 __pthread_sig_debug = 0;
218 # endif
220 else
221 #endif /* __ASSUME_REALTIME_SIGNALS */
223 #if __SIGRTMAX - __SIGRTMIN >= 3
224 current_rtmin = __SIGRTMIN + 3;
225 # if !__ASSUME_REALTIME_SIGNALS
226 __pthread_restart = __pthread_restart_new;
227 __pthread_suspend = __pthread_wait_for_restart_signal;
228 __pthread_timedsuspend = __pthread_timedsuspend_new;
229 # endif /* __ASSUME_REALTIME_SIGNALS */
230 #else
231 current_rtmin = __SIGRTMIN;
232 #endif
234 current_rtmax = __SIGRTMAX;
237 rtsigs_initialized = 1;
239 #endif
241 /* Return number of available real-time signal with highest priority. */
243 __libc_current_sigrtmin (void)
245 #ifdef __SIGRTMIN
246 if (__builtin_expect (!rtsigs_initialized, 0))
247 init_rtsigs ();
248 #endif
249 return current_rtmin;
252 /* Return number of available real-time signal with lowest priority. */
254 __libc_current_sigrtmax (void)
256 #ifdef __SIGRTMIN
257 if (__builtin_expect (!rtsigs_initialized, 0))
258 init_rtsigs ();
259 #endif
260 return current_rtmax;
263 /* Allocate real-time signal with highest/lowest available
264 priority. Please note that we don't use a lock since we assume
265 this function to be called at program start. */
267 __libc_allocate_rtsig (int high)
269 #ifndef __SIGRTMIN
270 return -1;
271 #else
272 if (__builtin_expect (!rtsigs_initialized, 0))
273 init_rtsigs ();
274 if (__builtin_expect (current_rtmin == -1, 0)
275 || __builtin_expect (current_rtmin > current_rtmax, 0))
276 /* We don't have anymore signal available. */
277 return -1;
279 return high ? current_rtmin++ : current_rtmax--;
280 #endif
284 /* Initialize the pthread library.
285 Initialization is split in two functions:
286 - a constructor function that blocks the __pthread_sig_restart signal
287 (must do this very early, since the program could capture the signal
288 mask with e.g. sigsetjmp before creating the first thread);
289 - a regular function called from pthread_create when needed. */
291 static void pthread_initialize(void) __attribute__((constructor));
293 #ifndef HAVE_Z_NODELETE
294 extern void *__dso_handle __attribute__ ((weak));
295 #endif
298 #if defined USE_TLS && !defined SHARED
299 extern void __libc_setup_tls (size_t tcbsize, size_t tcbalign);
300 #endif
303 /* Do some minimal initialization which has to be done during the
304 startup of the C library. */
305 void
306 __pthread_initialize_minimal(void)
308 #ifdef USE_TLS
309 pthread_descr self;
311 /* First of all init __pthread_handles[0] and [1] if needed. */
312 # if __LT_SPINLOCK_INIT != 0
313 __pthread_handles[0].h_lock = __LOCK_INITIALIZER;
314 __pthread_handles[1].h_lock = __LOCK_INITIALIZER;
315 # endif
316 # ifndef SHARED
317 /* Unlike in the dynamically linked case the dynamic linker has not
318 taken care of initializing the TLS data structures. */
319 __libc_setup_tls (TLS_TCB_SIZE, TLS_TCB_ALIGN);
320 # endif
322 self = THREAD_SELF;
324 /* The memory for the thread descriptor was allocated elsewhere as
325 part of the TLS allocation. We have to initialize the data
326 structure by hand. This initialization must mirror the struct
327 definition above. */
328 self->p_nextlive = self->p_prevlive = self;
329 self->p_tid = PTHREAD_THREADS_MAX;
330 self->p_lock = &__pthread_handles[0].h_lock;
331 # ifndef HAVE___THREAD
332 self->p_errnop = &_errno;
333 self->p_h_errnop = &_h_errno;
334 # endif
335 /* self->p_start_args need not be initialized, it's all zero. */
336 self->p_userstack = 1;
337 # if __LT_SPINLOCK_INIT != 0
338 self->p_resume_count = (struct pthread_atomic) __ATOMIC_INITIALIZER;
339 # endif
340 self->p_alloca_cutoff = __MAX_ALLOCA_CUTOFF;
342 /* Another variable which points to the thread descriptor. */
343 __pthread_main_thread = self;
345 /* And fill in the pointer the the thread __pthread_handles array. */
346 __pthread_handles[0].h_descr = self;
347 #else
348 /* First of all init __pthread_handles[0] and [1]. */
349 # if __LT_SPINLOCK_INIT != 0
350 __pthread_handles[0].h_lock = __LOCK_INITIALIZER;
351 __pthread_handles[1].h_lock = __LOCK_INITIALIZER;
352 # endif
353 __pthread_handles[0].h_descr = &__pthread_initial_thread;
354 __pthread_handles[1].h_descr = &__pthread_manager_thread;
356 /* If we have special thread_self processing, initialize that for the
357 main thread now. */
358 # ifdef INIT_THREAD_SELF
359 INIT_THREAD_SELF(&__pthread_initial_thread, 0);
360 # endif
361 #endif
363 #if HP_TIMING_AVAIL
364 # ifdef USE_TLS
365 self->p_cpuclock_offset = GL(dl_cpuclock_offset);
366 # else
367 __pthread_initial_thread.p_cpuclock_offset = GL(dl_cpuclock_offset);
368 # endif
369 #endif
371 #if !(USE_TLS && HAVE___THREAD)
372 /* Initialize thread-locale current locale to point to the global one.
373 With __thread support, the variable's initializer takes care of this. */
374 __uselocale (LC_GLOBAL_LOCALE);
375 #endif
379 void
380 __pthread_init_max_stacksize(void)
382 struct rlimit limit;
383 size_t max_stack;
385 getrlimit(RLIMIT_STACK, &limit);
386 #ifdef FLOATING_STACKS
387 if (limit.rlim_cur == RLIM_INFINITY)
388 limit.rlim_cur = ARCH_STACK_MAX_SIZE;
389 # ifdef NEED_SEPARATE_REGISTER_STACK
390 max_stack = limit.rlim_cur / 2;
391 # else
392 max_stack = limit.rlim_cur;
393 # endif
394 #else
395 /* Play with the stack size limit to make sure that no stack ever grows
396 beyond STACK_SIZE minus one page (to act as a guard page). */
397 # ifdef NEED_SEPARATE_REGISTER_STACK
398 /* STACK_SIZE bytes hold both the main stack and register backing
399 store. The rlimit value applies to each individually. */
400 max_stack = STACK_SIZE/2 - __getpagesize ();
401 # else
402 max_stack = STACK_SIZE - __getpagesize();
403 # endif
404 if (limit.rlim_cur > max_stack) {
405 limit.rlim_cur = max_stack;
406 setrlimit(RLIMIT_STACK, &limit);
408 #endif
409 __pthread_max_stacksize = max_stack;
410 if (max_stack / 4 < __MAX_ALLOCA_CUTOFF)
412 #ifdef USE_TLS
413 pthread_descr self = THREAD_SELF;
414 self->p_alloca_cutoff = max_stack / 4;
415 #else
416 __pthread_initial_thread.p_alloca_cutoff = max_stack / 4;
417 #endif
422 static void pthread_initialize(void)
424 struct sigaction sa;
425 sigset_t mask;
427 /* If already done (e.g. by a constructor called earlier!), bail out */
428 if (__pthread_initial_thread_bos != NULL) return;
429 #ifdef TEST_FOR_COMPARE_AND_SWAP
430 /* Test if compare-and-swap is available */
431 __pthread_has_cas = compare_and_swap_is_available();
432 #endif
433 #ifdef FLOATING_STACKS
434 /* We don't need to know the bottom of the stack. Give the pointer some
435 value to signal that initialization happened. */
436 __pthread_initial_thread_bos = (void *) -1l;
437 #else
438 /* Determine stack size limits . */
439 __pthread_init_max_stacksize ();
440 # ifdef _STACK_GROWS_UP
441 /* The initial thread already has all the stack it needs */
442 __pthread_initial_thread_bos = (char *)
443 ((long)CURRENT_STACK_FRAME &~ (STACK_SIZE - 1));
444 # else
445 /* For the initial stack, reserve at least STACK_SIZE bytes of stack
446 below the current stack address, and align that on a
447 STACK_SIZE boundary. */
448 __pthread_initial_thread_bos =
449 (char *)(((long)CURRENT_STACK_FRAME - 2 * STACK_SIZE) & ~(STACK_SIZE - 1));
450 # endif
451 #endif
452 #ifdef USE_TLS
453 /* Update the descriptor for the initial thread. */
454 THREAD_SETMEM (((pthread_descr) NULL), p_pid, __getpid());
455 # ifndef HAVE___THREAD
456 /* Likewise for the resolver state _res. */
457 THREAD_SETMEM (((pthread_descr) NULL), p_resp, &_res);
458 # endif
459 #else
460 /* Update the descriptor for the initial thread. */
461 __pthread_initial_thread.p_pid = __getpid();
462 /* Likewise for the resolver state _res. */
463 __pthread_initial_thread.p_resp = &_res;
464 #endif
465 #ifdef __SIGRTMIN
466 /* Initialize real-time signals. */
467 init_rtsigs ();
468 #endif
469 /* Setup signal handlers for the initial thread.
470 Since signal handlers are shared between threads, these settings
471 will be inherited by all other threads. */
472 sa.sa_handler = pthread_handle_sigrestart;
473 sigemptyset(&sa.sa_mask);
474 sa.sa_flags = 0;
475 __libc_sigaction(__pthread_sig_restart, &sa, NULL);
476 sa.sa_handler = pthread_handle_sigcancel;
477 // sa.sa_flags = 0;
478 __libc_sigaction(__pthread_sig_cancel, &sa, NULL);
479 if (__pthread_sig_debug > 0) {
480 sa.sa_handler = pthread_handle_sigdebug;
481 sigemptyset(&sa.sa_mask);
482 // sa.sa_flags = 0;
483 __libc_sigaction(__pthread_sig_debug, &sa, NULL);
485 /* Initially, block __pthread_sig_restart. Will be unblocked on demand. */
486 sigemptyset(&mask);
487 sigaddset(&mask, __pthread_sig_restart);
488 sigprocmask(SIG_BLOCK, &mask, NULL);
489 /* Register an exit function to kill all other threads. */
490 /* Do it early so that user-registered atexit functions are called
491 before pthread_*exit_process. */
492 #ifndef HAVE_Z_NODELETE
493 if (__builtin_expect (&__dso_handle != NULL, 1))
494 __cxa_atexit ((void (*) (void *)) pthread_atexit_process, NULL,
495 __dso_handle);
496 else
497 #endif
498 __on_exit (pthread_onexit_process, NULL);
499 /* How many processors. */
500 __pthread_smp_kernel = is_smp_system ();
503 void __pthread_initialize(void)
505 pthread_initialize();
508 int __pthread_initialize_manager(void)
510 int manager_pipe[2];
511 int pid;
512 struct pthread_request request;
513 int report_events;
514 pthread_descr tcb;
516 #ifndef HAVE_Z_NODELETE
517 if (__builtin_expect (&__dso_handle != NULL, 1))
518 __cxa_atexit ((void (*) (void *)) pthread_atexit_retcode, NULL,
519 __dso_handle);
520 #endif
522 if (__pthread_max_stacksize == 0)
523 __pthread_init_max_stacksize ();
524 /* If basic initialization not done yet (e.g. we're called from a
525 constructor run before our constructor), do it now */
526 if (__pthread_initial_thread_bos == NULL) pthread_initialize();
527 /* Setup stack for thread manager */
528 __pthread_manager_thread_bos = malloc(THREAD_MANAGER_STACK_SIZE);
529 if (__pthread_manager_thread_bos == NULL) return -1;
530 __pthread_manager_thread_tos =
531 __pthread_manager_thread_bos + THREAD_MANAGER_STACK_SIZE;
532 /* Setup pipe to communicate with thread manager */
533 if (pipe(manager_pipe) == -1) {
534 free(__pthread_manager_thread_bos);
535 return -1;
538 #ifdef USE_TLS
539 /* Allocate memory for the thread descriptor and the dtv. */
540 __pthread_handles[1].h_descr = manager_thread = tcb
541 = _dl_allocate_tls (NULL);
542 if (tcb == NULL) {
543 free(__pthread_manager_thread_bos);
544 __libc_close(manager_pipe[0]);
545 __libc_close(manager_pipe[1]);
546 return -1;
549 /* Initialize the descriptor. */
550 tcb->p_header.data.tcb = tcb;
551 tcb->p_header.data.self = tcb;
552 tcb->p_lock = &__pthread_handles[1].h_lock;
553 # ifndef HAVE___THREAD
554 tcb->p_errnop = &tcb->p_errno;
555 # endif
556 tcb->p_start_args = (struct pthread_start_args) PTHREAD_START_ARGS_INITIALIZER(__pthread_manager);
557 tcb->p_nr = 1;
558 # if __LT_SPINLOCK_INIT != 0
559 self->p_resume_count = (struct pthread_atomic) __ATOMIC_INITIALIZER;
560 # endif
561 tcb->p_alloca_cutoff = PTHREAD_STACK_MIN / 4;
562 #else
563 tcb = &__pthread_manager_thread;
564 #endif
566 __pthread_manager_request = manager_pipe[1]; /* writing end */
567 __pthread_manager_reader = manager_pipe[0]; /* reading end */
569 /* Start the thread manager */
570 pid = 0;
571 #ifdef USE_TLS
572 if (__linuxthreads_initial_report_events != 0)
573 THREAD_SETMEM (((pthread_descr) NULL), p_report_events,
574 __linuxthreads_initial_report_events);
575 report_events = THREAD_GETMEM (((pthread_descr) NULL), p_report_events);
576 #else
577 if (__linuxthreads_initial_report_events != 0)
578 __pthread_initial_thread.p_report_events
579 = __linuxthreads_initial_report_events;
580 report_events = __pthread_initial_thread.p_report_events;
581 #endif
582 if (__builtin_expect (report_events, 0))
584 /* It's a bit more complicated. We have to report the creation of
585 the manager thread. */
586 int idx = __td_eventword (TD_CREATE);
587 uint32_t mask = __td_eventmask (TD_CREATE);
588 uint32_t event_bits;
590 #ifdef USE_TLS
591 event_bits = THREAD_GETMEM_NC (((pthread_descr) NULL),
592 p_eventbuf.eventmask.event_bits[idx]);
593 #else
594 event_bits = __pthread_initial_thread.p_eventbuf.eventmask.event_bits[idx];
595 #endif
597 if ((mask & (__pthread_threads_events.event_bits[idx] | event_bits))
598 != 0)
600 __pthread_lock(tcb->p_lock, NULL);
602 #ifdef NEED_SEPARATE_REGISTER_STACK
603 pid = __clone2(__pthread_manager_event,
604 (void **) __pthread_manager_thread_bos,
605 THREAD_MANAGER_STACK_SIZE,
606 CLONE_VM | CLONE_FS | CLONE_FILES | CLONE_SIGHAND,
607 tcb);
608 #elif _STACK_GROWS_UP
609 pid = __clone(__pthread_manager_event,
610 (void **) __pthread_manager_thread_bos,
611 CLONE_VM | CLONE_FS | CLONE_FILES | CLONE_SIGHAND,
612 tcb);
613 #else
614 pid = __clone(__pthread_manager_event,
615 (void **) __pthread_manager_thread_tos,
616 CLONE_VM | CLONE_FS | CLONE_FILES | CLONE_SIGHAND,
617 tcb);
618 #endif
620 if (pid != -1)
622 /* Now fill in the information about the new thread in
623 the newly created thread's data structure. We cannot let
624 the new thread do this since we don't know whether it was
625 already scheduled when we send the event. */
626 tcb->p_eventbuf.eventdata = tcb;
627 tcb->p_eventbuf.eventnum = TD_CREATE;
628 __pthread_last_event = tcb;
629 tcb->p_tid = 2* PTHREAD_THREADS_MAX + 1;
630 tcb->p_pid = pid;
632 /* Now call the function which signals the event. */
633 __linuxthreads_create_event ();
636 /* Now restart the thread. */
637 __pthread_unlock(tcb->p_lock);
641 if (__builtin_expect (pid, 0) == 0)
643 #ifdef NEED_SEPARATE_REGISTER_STACK
644 pid = __clone2(__pthread_manager, (void **) __pthread_manager_thread_bos,
645 THREAD_MANAGER_STACK_SIZE,
646 CLONE_VM | CLONE_FS | CLONE_FILES | CLONE_SIGHAND, tcb);
647 #elif _STACK_GROWS_UP
648 pid = __clone(__pthread_manager, (void **) __pthread_manager_thread_bos,
649 CLONE_VM | CLONE_FS | CLONE_FILES | CLONE_SIGHAND, tcb);
650 #else
651 pid = __clone(__pthread_manager, (void **) __pthread_manager_thread_tos,
652 CLONE_VM | CLONE_FS | CLONE_FILES | CLONE_SIGHAND, tcb);
653 #endif
655 if (__builtin_expect (pid, 0) == -1) {
656 free(__pthread_manager_thread_bos);
657 __libc_close(manager_pipe[0]);
658 __libc_close(manager_pipe[1]);
659 return -1;
661 tcb->p_tid = 2* PTHREAD_THREADS_MAX + 1;
662 tcb->p_pid = pid;
663 /* Make gdb aware of new thread manager */
664 if (__builtin_expect (__pthread_threads_debug, 0) && __pthread_sig_debug > 0)
666 raise(__pthread_sig_debug);
667 /* We suspend ourself and gdb will wake us up when it is
668 ready to handle us. */
669 __pthread_wait_for_restart_signal(thread_self());
671 /* Synchronize debugging of the thread manager */
672 request.req_kind = REQ_DEBUG;
673 TEMP_FAILURE_RETRY(__libc_write(__pthread_manager_request,
674 (char *) &request, sizeof(request)));
675 return 0;
678 /* Thread creation */
680 int __pthread_create_2_1(pthread_t *thread, const pthread_attr_t *attr,
681 void * (*start_routine)(void *), void *arg)
683 pthread_descr self = thread_self();
684 struct pthread_request request;
685 int retval;
686 if (__builtin_expect (__pthread_manager_request, 0) < 0) {
687 if (__pthread_initialize_manager() < 0) return EAGAIN;
689 request.req_thread = self;
690 request.req_kind = REQ_CREATE;
691 request.req_args.create.attr = attr;
692 request.req_args.create.fn = start_routine;
693 request.req_args.create.arg = arg;
694 sigprocmask(SIG_SETMASK, (const sigset_t *) NULL,
695 &request.req_args.create.mask);
696 TEMP_FAILURE_RETRY(__libc_write(__pthread_manager_request,
697 (char *) &request, sizeof(request)));
698 suspend(self);
699 retval = THREAD_GETMEM(self, p_retcode);
700 if (__builtin_expect (retval, 0) == 0)
701 *thread = (pthread_t) THREAD_GETMEM(self, p_retval);
702 return retval;
705 versioned_symbol (libpthread, __pthread_create_2_1, pthread_create, GLIBC_2_1);
707 #if SHLIB_COMPAT (libpthread, GLIBC_2_0, GLIBC_2_1)
709 int __pthread_create_2_0(pthread_t *thread, const pthread_attr_t *attr,
710 void * (*start_routine)(void *), void *arg)
712 /* The ATTR attribute is not really of type `pthread_attr_t *'. It has
713 the old size and access to the new members might crash the program.
714 We convert the struct now. */
715 pthread_attr_t new_attr;
717 if (attr != NULL)
719 size_t ps = __getpagesize ();
721 memcpy (&new_attr, attr,
722 (size_t) &(((pthread_attr_t*)NULL)->__guardsize));
723 new_attr.__guardsize = ps;
724 new_attr.__stackaddr_set = 0;
725 new_attr.__stackaddr = NULL;
726 new_attr.__stacksize = STACK_SIZE - ps;
727 attr = &new_attr;
729 return __pthread_create_2_1 (thread, attr, start_routine, arg);
731 compat_symbol (libpthread, __pthread_create_2_0, pthread_create, GLIBC_2_0);
732 #endif
734 /* Simple operations on thread identifiers */
736 pthread_t pthread_self(void)
738 pthread_descr self = thread_self();
739 return THREAD_GETMEM(self, p_tid);
742 int pthread_equal(pthread_t thread1, pthread_t thread2)
744 return thread1 == thread2;
747 /* Helper function for thread_self in the case of user-provided stacks */
749 #ifndef THREAD_SELF
751 pthread_descr __pthread_find_self(void)
753 char * sp = CURRENT_STACK_FRAME;
754 pthread_handle h;
756 /* __pthread_handles[0] is the initial thread, __pthread_handles[1] is
757 the manager threads handled specially in thread_self(), so start at 2 */
758 h = __pthread_handles + 2;
759 while (! (sp <= (char *) h->h_descr && sp >= h->h_bottom)) h++;
760 return h->h_descr;
763 #else
765 static pthread_descr thread_self_stack(void)
767 char *sp = CURRENT_STACK_FRAME;
768 pthread_handle h;
770 if (sp >= __pthread_manager_thread_bos && sp < __pthread_manager_thread_tos)
771 return manager_thread;
772 h = __pthread_handles + 2;
773 # ifdef USE_TLS
774 while (h->h_descr == NULL
775 || ! (sp <= (char *) h->h_descr->p_stackaddr && sp >= h->h_bottom))
776 h++;
777 # else
778 while (! (sp <= (char *) h->h_descr && sp >= h->h_bottom))
779 h++;
780 # endif
781 return h->h_descr;
784 #endif
786 /* Thread scheduling */
788 int pthread_setschedparam(pthread_t thread, int policy,
789 const struct sched_param *param)
791 pthread_handle handle = thread_handle(thread);
792 pthread_descr th;
794 __pthread_lock(&handle->h_lock, NULL);
795 if (__builtin_expect (invalid_handle(handle, thread), 0)) {
796 __pthread_unlock(&handle->h_lock);
797 return ESRCH;
799 th = handle->h_descr;
800 if (__builtin_expect (__sched_setscheduler(th->p_pid, policy, param) == -1,
801 0)) {
802 __pthread_unlock(&handle->h_lock);
803 return errno;
805 th->p_priority = policy == SCHED_OTHER ? 0 : param->sched_priority;
806 __pthread_unlock(&handle->h_lock);
807 if (__pthread_manager_request >= 0)
808 __pthread_manager_adjust_prio(th->p_priority);
809 return 0;
812 int pthread_getschedparam(pthread_t thread, int *policy,
813 struct sched_param *param)
815 pthread_handle handle = thread_handle(thread);
816 int pid, pol;
818 __pthread_lock(&handle->h_lock, NULL);
819 if (__builtin_expect (invalid_handle(handle, thread), 0)) {
820 __pthread_unlock(&handle->h_lock);
821 return ESRCH;
823 pid = handle->h_descr->p_pid;
824 __pthread_unlock(&handle->h_lock);
825 pol = __sched_getscheduler(pid);
826 if (__builtin_expect (pol, 0) == -1) return errno;
827 if (__sched_getparam(pid, param) == -1) return errno;
828 *policy = pol;
829 return 0;
832 int __pthread_yield (void)
834 /* For now this is equivalent with the POSIX call. */
835 return sched_yield ();
837 weak_alias (__pthread_yield, pthread_yield)
839 /* Process-wide exit() request */
841 static void pthread_onexit_process(int retcode, void *arg)
843 if (__builtin_expect (__pthread_manager_request, 0) >= 0) {
844 struct pthread_request request;
845 pthread_descr self = thread_self();
847 request.req_thread = self;
848 request.req_kind = REQ_PROCESS_EXIT;
849 request.req_args.exit.code = retcode;
850 TEMP_FAILURE_RETRY(__libc_write(__pthread_manager_request,
851 (char *) &request, sizeof(request)));
852 suspend(self);
853 /* Main thread should accumulate times for thread manager and its
854 children, so that timings for main thread account for all threads. */
855 if (self == __pthread_main_thread)
857 #ifdef USE_TLS
858 waitpid(manager_thread->p_pid, NULL, __WCLONE);
859 #else
860 waitpid(__pthread_manager_thread.p_pid, NULL, __WCLONE);
861 #endif
862 /* Since all threads have been asynchronously terminated
863 (possibly holding locks), free cannot be used any more. */
864 /*free (__pthread_manager_thread_bos);*/
865 __pthread_manager_thread_bos = __pthread_manager_thread_tos = NULL;
870 #ifndef HAVE_Z_NODELETE
871 static int __pthread_atexit_retcode;
873 static void pthread_atexit_process(void *arg, int retcode)
875 pthread_onexit_process (retcode ?: __pthread_atexit_retcode, arg);
878 static void pthread_atexit_retcode(void *arg, int retcode)
880 __pthread_atexit_retcode = retcode;
882 #endif
884 /* The handler for the RESTART signal just records the signal received
885 in the thread descriptor, and optionally performs a siglongjmp
886 (for pthread_cond_timedwait). */
888 static void pthread_handle_sigrestart(int sig)
890 pthread_descr self = thread_self();
891 THREAD_SETMEM(self, p_signal, sig);
892 if (THREAD_GETMEM(self, p_signal_jmp) != NULL)
893 siglongjmp(*THREAD_GETMEM(self, p_signal_jmp), 1);
896 /* The handler for the CANCEL signal checks for cancellation
897 (in asynchronous mode), for process-wide exit and exec requests.
898 For the thread manager thread, redirect the signal to
899 __pthread_manager_sighandler. */
901 static void pthread_handle_sigcancel(int sig)
903 pthread_descr self = thread_self();
904 sigjmp_buf * jmpbuf;
906 if (self == manager_thread)
908 #ifdef THREAD_SELF
909 /* A new thread might get a cancel signal before it is fully
910 initialized, so that the thread register might still point to the
911 manager thread. Double check that this is really the manager
912 thread. */
913 pthread_descr real_self = thread_self_stack();
914 if (real_self == manager_thread)
916 __pthread_manager_sighandler(sig);
917 return;
919 /* Oops, thread_self() isn't working yet.. */
920 self = real_self;
921 # ifdef INIT_THREAD_SELF
922 INIT_THREAD_SELF(self, self->p_nr);
923 # endif
924 #else
925 __pthread_manager_sighandler(sig);
926 return;
927 #endif
929 if (__builtin_expect (__pthread_exit_requested, 0)) {
930 /* Main thread should accumulate times for thread manager and its
931 children, so that timings for main thread account for all threads. */
932 if (self == __pthread_main_thread) {
933 #ifdef USE_TLS
934 waitpid(manager_thread->p_pid, NULL, __WCLONE);
935 #else
936 waitpid(__pthread_manager_thread.p_pid, NULL, __WCLONE);
937 #endif
939 _exit(__pthread_exit_code);
941 if (__builtin_expect (THREAD_GETMEM(self, p_canceled), 0)
942 && THREAD_GETMEM(self, p_cancelstate) == PTHREAD_CANCEL_ENABLE) {
943 if (THREAD_GETMEM(self, p_canceltype) == PTHREAD_CANCEL_ASYNCHRONOUS)
944 __pthread_do_exit(PTHREAD_CANCELED, CURRENT_STACK_FRAME);
945 jmpbuf = THREAD_GETMEM(self, p_cancel_jmp);
946 if (jmpbuf != NULL) {
947 THREAD_SETMEM(self, p_cancel_jmp, NULL);
948 siglongjmp(*jmpbuf, 1);
953 /* Handler for the DEBUG signal.
954 The debugging strategy is as follows:
955 On reception of a REQ_DEBUG request (sent by new threads created to
956 the thread manager under debugging mode), the thread manager throws
957 __pthread_sig_debug to itself. The debugger (if active) intercepts
958 this signal, takes into account new threads and continue execution
959 of the thread manager by propagating the signal because it doesn't
960 know what it is specifically done for. In the current implementation,
961 the thread manager simply discards it. */
963 static void pthread_handle_sigdebug(int sig)
965 /* Nothing */
968 /* Reset the state of the thread machinery after a fork().
969 Close the pipe used for requests and set the main thread to the forked
970 thread.
971 Notice that we can't free the stack segments, as the forked thread
972 may hold pointers into them. */
974 void __pthread_reset_main_thread(void)
976 pthread_descr self = thread_self();
978 if (__pthread_manager_request != -1) {
979 /* Free the thread manager stack */
980 free(__pthread_manager_thread_bos);
981 __pthread_manager_thread_bos = __pthread_manager_thread_tos = NULL;
982 /* Close the two ends of the pipe */
983 __libc_close(__pthread_manager_request);
984 __libc_close(__pthread_manager_reader);
985 __pthread_manager_request = __pthread_manager_reader = -1;
988 /* Update the pid of the main thread */
989 THREAD_SETMEM(self, p_pid, __getpid());
990 /* Make the forked thread the main thread */
991 __pthread_main_thread = self;
992 THREAD_SETMEM(self, p_nextlive, self);
993 THREAD_SETMEM(self, p_prevlive, self);
994 #if !(USE_TLS && HAVE___THREAD)
995 /* Now this thread modifies the global variables. */
996 THREAD_SETMEM(self, p_errnop, &_errno);
997 THREAD_SETMEM(self, p_h_errnop, &_h_errno);
998 THREAD_SETMEM(self, p_resp, &_res);
999 #endif
1001 #ifndef FLOATING_STACKS
1002 /* This is to undo the setrlimit call in __pthread_init_max_stacksize.
1003 XXX This can be wrong if the user set the limit during the run. */
1005 struct rlimit limit;
1006 if (getrlimit (RLIMIT_STACK, &limit) == 0
1007 && limit.rlim_cur != limit.rlim_max)
1009 limit.rlim_cur = limit.rlim_max;
1010 setrlimit(RLIMIT_STACK, &limit);
1013 #endif
1016 /* Process-wide exec() request */
1018 void __pthread_kill_other_threads_np(void)
1020 struct sigaction sa;
1021 /* Terminate all other threads and thread manager */
1022 pthread_onexit_process(0, NULL);
1023 /* Make current thread the main thread in case the calling thread
1024 changes its mind, does not exec(), and creates new threads instead. */
1025 __pthread_reset_main_thread();
1027 /* Reset the signal handlers behaviour for the signals the
1028 implementation uses since this would be passed to the new
1029 process. */
1030 sigemptyset(&sa.sa_mask);
1031 sa.sa_flags = 0;
1032 sa.sa_handler = SIG_DFL;
1033 __libc_sigaction(__pthread_sig_restart, &sa, NULL);
1034 __libc_sigaction(__pthread_sig_cancel, &sa, NULL);
1035 if (__pthread_sig_debug > 0)
1036 __libc_sigaction(__pthread_sig_debug, &sa, NULL);
1038 weak_alias (__pthread_kill_other_threads_np, pthread_kill_other_threads_np)
1040 /* Concurrency symbol level. */
1041 static int current_level;
1043 int __pthread_setconcurrency(int level)
1045 /* We don't do anything unless we have found a useful interpretation. */
1046 current_level = level;
1047 return 0;
1049 weak_alias (__pthread_setconcurrency, pthread_setconcurrency)
1051 int __pthread_getconcurrency(void)
1053 return current_level;
1055 weak_alias (__pthread_getconcurrency, pthread_getconcurrency)
1057 /* Primitives for controlling thread execution */
1059 void __pthread_wait_for_restart_signal(pthread_descr self)
1061 sigset_t mask;
1063 sigprocmask(SIG_SETMASK, NULL, &mask); /* Get current signal mask */
1064 sigdelset(&mask, __pthread_sig_restart); /* Unblock the restart signal */
1065 THREAD_SETMEM(self, p_signal, 0);
1066 do {
1067 sigsuspend(&mask); /* Wait for signal */
1068 } while (THREAD_GETMEM(self, p_signal) !=__pthread_sig_restart);
1070 READ_MEMORY_BARRIER(); /* See comment in __pthread_restart_new */
1073 #if !__ASSUME_REALTIME_SIGNALS
1074 /* The _old variants are for 2.0 and early 2.1 kernels which don't have RT
1075 signals.
1076 On these kernels, we use SIGUSR1 and SIGUSR2 for restart and cancellation.
1077 Since the restart signal does not queue, we use an atomic counter to create
1078 queuing semantics. This is needed to resolve a rare race condition in
1079 pthread_cond_timedwait_relative. */
1081 void __pthread_restart_old(pthread_descr th)
1083 if (atomic_increment(&th->p_resume_count) == -1)
1084 kill(th->p_pid, __pthread_sig_restart);
1087 void __pthread_suspend_old(pthread_descr self)
1089 if (atomic_decrement(&self->p_resume_count) <= 0)
1090 __pthread_wait_for_restart_signal(self);
1094 __pthread_timedsuspend_old(pthread_descr self, const struct timespec *abstime)
1096 sigset_t unblock, initial_mask;
1097 int was_signalled = 0;
1098 sigjmp_buf jmpbuf;
1100 if (atomic_decrement(&self->p_resume_count) == 0) {
1101 /* Set up a longjmp handler for the restart signal, unblock
1102 the signal and sleep. */
1104 if (sigsetjmp(jmpbuf, 1) == 0) {
1105 THREAD_SETMEM(self, p_signal_jmp, &jmpbuf);
1106 THREAD_SETMEM(self, p_signal, 0);
1107 /* Unblock the restart signal */
1108 sigemptyset(&unblock);
1109 sigaddset(&unblock, __pthread_sig_restart);
1110 sigprocmask(SIG_UNBLOCK, &unblock, &initial_mask);
1112 while (1) {
1113 struct timeval now;
1114 struct timespec reltime;
1116 /* Compute a time offset relative to now. */
1117 __gettimeofday (&now, NULL);
1118 reltime.tv_nsec = abstime->tv_nsec - now.tv_usec * 1000;
1119 reltime.tv_sec = abstime->tv_sec - now.tv_sec;
1120 if (reltime.tv_nsec < 0) {
1121 reltime.tv_nsec += 1000000000;
1122 reltime.tv_sec -= 1;
1125 /* Sleep for the required duration. If woken by a signal,
1126 resume waiting as required by Single Unix Specification. */
1127 if (reltime.tv_sec < 0 || __libc_nanosleep(&reltime, NULL) == 0)
1128 break;
1131 /* Block the restart signal again */
1132 sigprocmask(SIG_SETMASK, &initial_mask, NULL);
1133 was_signalled = 0;
1134 } else {
1135 was_signalled = 1;
1137 THREAD_SETMEM(self, p_signal_jmp, NULL);
1140 /* Now was_signalled is true if we exited the above code
1141 due to the delivery of a restart signal. In that case,
1142 we know we have been dequeued and resumed and that the
1143 resume count is balanced. Otherwise, there are some
1144 cases to consider. First, try to bump up the resume count
1145 back to zero. If it goes to 1, it means restart() was
1146 invoked on this thread. The signal must be consumed
1147 and the count bumped down and everything is cool. We
1148 can return a 1 to the caller.
1149 Otherwise, no restart was delivered yet, so a potential
1150 race exists; we return a 0 to the caller which must deal
1151 with this race in an appropriate way; for example by
1152 atomically removing the thread from consideration for a
1153 wakeup---if such a thing fails, it means a restart is
1154 being delivered. */
1156 if (!was_signalled) {
1157 if (atomic_increment(&self->p_resume_count) != -1) {
1158 __pthread_wait_for_restart_signal(self);
1159 atomic_decrement(&self->p_resume_count); /* should be zero now! */
1160 /* woke spontaneously and consumed restart signal */
1161 return 1;
1163 /* woke spontaneously but did not consume restart---caller must resolve */
1164 return 0;
1166 /* woken due to restart signal */
1167 return 1;
1169 #endif /* __ASSUME_REALTIME_SIGNALS */
1171 void __pthread_restart_new(pthread_descr th)
1173 /* The barrier is proabably not needed, in which case it still documents
1174 our assumptions. The intent is to commit previous writes to shared
1175 memory so the woken thread will have a consistent view. Complementary
1176 read barriers are present to the suspend functions. */
1177 WRITE_MEMORY_BARRIER();
1178 kill(th->p_pid, __pthread_sig_restart);
1181 /* There is no __pthread_suspend_new because it would just
1182 be a wasteful wrapper for __pthread_wait_for_restart_signal */
1185 __pthread_timedsuspend_new(pthread_descr self, const struct timespec *abstime)
1187 sigset_t unblock, initial_mask;
1188 int was_signalled = 0;
1189 sigjmp_buf jmpbuf;
1191 if (sigsetjmp(jmpbuf, 1) == 0) {
1192 THREAD_SETMEM(self, p_signal_jmp, &jmpbuf);
1193 THREAD_SETMEM(self, p_signal, 0);
1194 /* Unblock the restart signal */
1195 sigemptyset(&unblock);
1196 sigaddset(&unblock, __pthread_sig_restart);
1197 sigprocmask(SIG_UNBLOCK, &unblock, &initial_mask);
1199 while (1) {
1200 struct timeval now;
1201 struct timespec reltime;
1203 /* Compute a time offset relative to now. */
1204 __gettimeofday (&now, NULL);
1205 reltime.tv_nsec = abstime->tv_nsec - now.tv_usec * 1000;
1206 reltime.tv_sec = abstime->tv_sec - now.tv_sec;
1207 if (reltime.tv_nsec < 0) {
1208 reltime.tv_nsec += 1000000000;
1209 reltime.tv_sec -= 1;
1212 /* Sleep for the required duration. If woken by a signal,
1213 resume waiting as required by Single Unix Specification. */
1214 if (reltime.tv_sec < 0 || __libc_nanosleep(&reltime, NULL) == 0)
1215 break;
1218 /* Block the restart signal again */
1219 sigprocmask(SIG_SETMASK, &initial_mask, NULL);
1220 was_signalled = 0;
1221 } else {
1222 was_signalled = 1;
1224 THREAD_SETMEM(self, p_signal_jmp, NULL);
1226 /* Now was_signalled is true if we exited the above code
1227 due to the delivery of a restart signal. In that case,
1228 everything is cool. We have been removed from whatever
1229 we were waiting on by the other thread, and consumed its signal.
1231 Otherwise we this thread woke up spontaneously, or due to a signal other
1232 than restart. This is an ambiguous case that must be resolved by
1233 the caller; the thread is still eligible for a restart wakeup
1234 so there is a race. */
1236 READ_MEMORY_BARRIER(); /* See comment in __pthread_restart_new */
1237 return was_signalled;
1241 /* Debugging aid */
1243 #ifdef DEBUG
1244 #include <stdarg.h>
1246 void __pthread_message(const char * fmt, ...)
1248 char buffer[1024];
1249 va_list args;
1250 sprintf(buffer, "%05d : ", __getpid());
1251 va_start(args, fmt);
1252 vsnprintf(buffer + 8, sizeof(buffer) - 8, fmt, args);
1253 va_end(args);
1254 TEMP_FAILURE_RETRY(__libc_write(2, buffer, strlen(buffer)));
1257 #endif
1260 #ifndef SHARED
1261 /* We need a hook to force the cancelation wrappers and file locking
1262 to be linked in when static libpthread is used. */
1263 extern const int __pthread_provide_wrappers;
1264 static const int *const __pthread_require_wrappers =
1265 &__pthread_provide_wrappers;
1266 extern const int __pthread_provide_lockfile;
1267 static const int *const __pthread_require_lockfile =
1268 &__pthread_provide_lockfile;
1269 #endif