2 /* Linuxthreads - a simple clone()-based implementation of Posix */
3 /* threads for Linux. */
4 /* Copyright (C) 1996 Xavier Leroy (Xavier.Leroy@inria.fr) */
6 /* This program is free software; you can redistribute it and/or */
7 /* modify it under the terms of the GNU Library General Public License */
8 /* as published by the Free Software Foundation; either version 2 */
9 /* of the License, or (at your option) any later version. */
11 /* This program is distributed in the hope that it will be useful, */
12 /* but WITHOUT ANY WARRANTY; without even the implied warranty of */
13 /* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the */
14 /* GNU Library General Public License for more details. */
16 /* Thread creation, initialization, and basic low-level routines */
26 #include <sys/resource.h>
27 #include <sys/sysctl.h>
28 #include <shlib-compat.h>
30 #include "internals.h"
35 #include <locale.h> /* for __uselocale */
39 #if __ASSUME_REALTIME_SIGNALS && !defined __SIGRTMIN
40 # error "This must not happen; new kernel assumed but old headers"
43 #if !(USE_TLS && HAVE___THREAD)
44 /* These variables are used by the setup code. */
48 /* We need the global/static resolver state here. */
52 extern struct __res_state _res
;
57 /* We need only a few variables. */
58 static pthread_descr manager_thread
;
62 /* Descriptor of the initial thread */
64 struct _pthread_descr_struct __pthread_initial_thread
= {
67 .self
= &__pthread_initial_thread
/* pthread_descr self */
70 &__pthread_initial_thread
, /* pthread_descr p_nextlive */
71 &__pthread_initial_thread
, /* pthread_descr p_prevlive */
72 NULL
, /* pthread_descr p_nextwaiting */
73 NULL
, /* pthread_descr p_nextlock */
74 PTHREAD_THREADS_MAX
, /* pthread_t p_tid */
76 0, /* int p_priority */
77 &__pthread_handles
[0].h_lock
, /* struct _pthread_fastlock * p_lock */
79 NULL
, /* sigjmp_buf * p_signal_buf */
80 NULL
, /* sigjmp_buf * p_cancel_buf */
81 0, /* char p_terminated */
82 0, /* char p_detached */
83 0, /* char p_exited */
84 NULL
, /* void * p_retval */
86 NULL
, /* pthread_descr p_joining */
87 NULL
, /* struct _pthread_cleanup_buffer * p_cleanup */
88 0, /* char p_cancelstate */
89 0, /* char p_canceltype */
90 0, /* char p_canceled */
91 NULL
, /* char * p_in_sighandler */
92 0, /* char p_sigwaiting */
93 PTHREAD_START_ARGS_INITIALIZER(NULL
),
94 /* struct pthread_start_args p_start_args */
95 {NULL
}, /* void ** p_specific[PTHREAD_KEY_1STLEVEL_SIZE] */
96 {NULL
}, /* void * p_libc_specific[_LIBC_TSD_KEY_N] */
97 &_errno
, /* int *p_errnop */
99 &_h_errno
, /* int *p_h_errnop */
100 0, /* int p_h_errno */
101 &_res
, /* struct __res_state *p_resp */
102 {}, /* struct __res_state p_res */
103 1, /* int p_userstack */
104 NULL
, /* void * p_guardaddr */
105 0, /* size_t p_guardsize */
106 0, /* Always index 0 */
107 0, /* int p_report_events */
108 {{{0, }}, 0, NULL
}, /* td_eventbuf_t p_eventbuf */
109 __ATOMIC_INITIALIZER
, /* struct pthread_atomic p_resume_count */
110 0, /* char p_woken_by_cancel */
111 0, /* char p_condvar_avail */
112 0, /* char p_sem_avail */
113 NULL
, /* struct pthread_extricate_if *p_extricate */
114 NULL
, /* pthread_readlock_info *p_readlock_list; */
115 NULL
, /* pthread_readlock_info *p_readlock_free; */
116 0 /* int p_untracked_readlock_count; */
119 /* Descriptor of the manager thread; none of this is used but the error
120 variables, the p_pid and p_priority fields,
121 and the address for identification. */
123 #define manager_thread (&__pthread_manager_thread)
124 struct _pthread_descr_struct __pthread_manager_thread
= {
127 .self
= &__pthread_manager_thread
/* pthread_descr self */
130 NULL
, /* pthread_descr p_nextlive */
131 NULL
, /* pthread_descr p_prevlive */
132 NULL
, /* pthread_descr p_nextwaiting */
133 NULL
, /* pthread_descr p_nextlock */
136 0, /* int p_priority */
137 &__pthread_handles
[1].h_lock
, /* struct _pthread_fastlock * p_lock */
138 0, /* int p_signal */
139 NULL
, /* sigjmp_buf * p_signal_buf */
140 NULL
, /* sigjmp_buf * p_cancel_buf */
141 0, /* char p_terminated */
142 0, /* char p_detached */
143 0, /* char p_exited */
144 NULL
, /* void * p_retval */
145 0, /* int p_retval */
146 NULL
, /* pthread_descr p_joining */
147 NULL
, /* struct _pthread_cleanup_buffer * p_cleanup */
148 0, /* char p_cancelstate */
149 0, /* char p_canceltype */
150 0, /* char p_canceled */
151 NULL
, /* char * p_in_sighandler */
152 0, /* char p_sigwaiting */
153 PTHREAD_START_ARGS_INITIALIZER(__pthread_manager
),
154 /* struct pthread_start_args p_start_args */
155 {NULL
}, /* void ** p_specific[PTHREAD_KEY_1STLEVEL_SIZE] */
156 {NULL
}, /* void * p_libc_specific[_LIBC_TSD_KEY_N] */
157 &__pthread_manager_thread
.p_errno
, /* int *p_errnop */
159 NULL
, /* int *p_h_errnop */
160 0, /* int p_h_errno */
161 NULL
, /* struct __res_state *p_resp */
162 {}, /* struct __res_state p_res */
163 0, /* int p_userstack */
164 NULL
, /* void * p_guardaddr */
165 0, /* size_t p_guardsize */
166 1, /* Always index 1 */
167 0, /* int p_report_events */
168 {{{0, }}, 0, NULL
}, /* td_eventbuf_t p_eventbuf */
169 __ATOMIC_INITIALIZER
, /* struct pthread_atomic p_resume_count */
170 0, /* char p_woken_by_cancel */
171 0, /* char p_condvar_avail */
172 0, /* char p_sem_avail */
173 NULL
, /* struct pthread_extricate_if *p_extricate */
174 NULL
, /* pthread_readlock_info *p_readlock_list; */
175 NULL
, /* pthread_readlock_info *p_readlock_free; */
176 0 /* int p_untracked_readlock_count; */
180 /* Pointer to the main thread (the father of the thread manager thread) */
181 /* Originally, this is the initial thread, but this changes after fork() */
184 pthread_descr __pthread_main_thread
;
186 pthread_descr __pthread_main_thread
= &__pthread_initial_thread
;
189 /* Limit between the stack of the initial thread (above) and the
190 stacks of other threads (below). Aligned on a STACK_SIZE boundary. */
192 char *__pthread_initial_thread_bos
;
194 /* File descriptor for sending requests to the thread manager. */
195 /* Initially -1, meaning that the thread manager is not running. */
197 int __pthread_manager_request
= -1;
199 /* Other end of the pipe for sending requests to the thread manager. */
201 int __pthread_manager_reader
;
203 /* Limits of the thread manager stack */
205 char *__pthread_manager_thread_bos
;
206 char *__pthread_manager_thread_tos
;
208 /* For process-wide exit() */
210 int __pthread_exit_requested
;
211 int __pthread_exit_code
;
213 /* Maximum stack size. */
214 size_t __pthread_max_stacksize
;
216 /* Nozero if the machine has more than one processor. */
217 int __pthread_smp_kernel
;
220 #if !__ASSUME_REALTIME_SIGNALS
221 /* Pointers that select new or old suspend/resume functions
222 based on availability of rt signals. */
224 void (*__pthread_restart
)(pthread_descr
) = __pthread_restart_old
;
225 void (*__pthread_suspend
)(pthread_descr
) = __pthread_suspend_old
;
226 int (*__pthread_timedsuspend
)(pthread_descr
, const struct timespec
*) = __pthread_timedsuspend_old
;
227 #endif /* __ASSUME_REALTIME_SIGNALS */
229 /* Communicate relevant LinuxThreads constants to gdb */
231 const int __pthread_threads_max
= PTHREAD_THREADS_MAX
;
232 const int __pthread_sizeof_handle
= sizeof(struct pthread_handle_struct
);
233 const int __pthread_offsetof_descr
= offsetof(struct pthread_handle_struct
,
235 const int __pthread_offsetof_pid
= offsetof(struct _pthread_descr_struct
,
237 const int __linuxthreads_pthread_sizeof_descr
238 = sizeof(struct _pthread_descr_struct
);
240 const int __linuxthreads_initial_report_events
;
242 const char __linuxthreads_version
[] = VERSION
;
244 /* Forward declarations */
246 static void pthread_onexit_process(int retcode
, void *arg
);
247 #ifndef HAVE_Z_NODELETE
248 static void pthread_atexit_process(void *arg
, int retcode
);
249 static void pthread_atexit_retcode(void *arg
, int retcode
);
251 static void pthread_handle_sigcancel(int sig
);
252 static void pthread_handle_sigrestart(int sig
);
253 static void pthread_handle_sigdebug(int sig
);
255 /* Signal numbers used for the communication.
256 In these variables we keep track of the used variables. If the
257 platform does not support any real-time signals we will define the
258 values to some unreasonable value which will signal failing of all
259 the functions below. */
261 static int current_rtmin
= -1;
262 static int current_rtmax
= -1;
263 int __pthread_sig_restart
= SIGUSR1
;
264 int __pthread_sig_cancel
= SIGUSR2
;
265 int __pthread_sig_debug
;
267 static int current_rtmin
;
268 static int current_rtmax
;
270 #if __SIGRTMAX - __SIGRTMIN >= 3
271 int __pthread_sig_restart
= __SIGRTMIN
;
272 int __pthread_sig_cancel
= __SIGRTMIN
+ 1;
273 int __pthread_sig_debug
= __SIGRTMIN
+ 2;
275 int __pthread_sig_restart
= SIGUSR1
;
276 int __pthread_sig_cancel
= SIGUSR2
;
277 int __pthread_sig_debug
;
280 static int rtsigs_initialized
;
282 #if !__ASSUME_REALTIME_SIGNALS
283 # include "testrtsig.h"
289 #if !__ASSUME_REALTIME_SIGNALS
290 if (__builtin_expect (!kernel_has_rtsig (), 0))
294 # if __SIGRTMAX - __SIGRTMIN >= 3
295 __pthread_sig_restart
= SIGUSR1
;
296 __pthread_sig_cancel
= SIGUSR2
;
297 __pthread_sig_debug
= 0;
301 #endif /* __ASSUME_REALTIME_SIGNALS */
303 #if __SIGRTMAX - __SIGRTMIN >= 3
304 current_rtmin
= __SIGRTMIN
+ 3;
305 # if !__ASSUME_REALTIME_SIGNALS
306 __pthread_restart
= __pthread_restart_new
;
307 __pthread_suspend
= __pthread_wait_for_restart_signal
;
308 __pthread_timedsuspend
= __pthread_timedsuspend_new
;
309 # endif /* __ASSUME_REALTIME_SIGNALS */
311 current_rtmin
= __SIGRTMIN
;
314 current_rtmax
= __SIGRTMAX
;
317 rtsigs_initialized
= 1;
321 /* Return number of available real-time signal with highest priority. */
323 __libc_current_sigrtmin (void)
326 if (__builtin_expect (!rtsigs_initialized
, 0))
329 return current_rtmin
;
332 /* Return number of available real-time signal with lowest priority. */
334 __libc_current_sigrtmax (void)
337 if (__builtin_expect (!rtsigs_initialized
, 0))
340 return current_rtmax
;
343 /* Allocate real-time signal with highest/lowest available
344 priority. Please note that we don't use a lock since we assume
345 this function to be called at program start. */
347 __libc_allocate_rtsig (int high
)
352 if (__builtin_expect (!rtsigs_initialized
, 0))
354 if (__builtin_expect (current_rtmin
== -1, 0)
355 || __builtin_expect (current_rtmin
> current_rtmax
, 0))
356 /* We don't have anymore signal available. */
359 return high
? current_rtmin
++ : current_rtmax
--;
363 /* The function we use to get the kernel revision. */
364 extern int __sysctl (int *name
, int nlen
, void *oldval
, size_t *oldlenp
,
365 void *newval
, size_t newlen
);
367 /* Test whether the machine has more than one processor. This is not the
368 best test but good enough. More complicated tests would require `malloc'
369 which is not available at that time. */
373 static const int sysctl_args
[] = { CTL_KERN
, KERN_VERSION
};
375 size_t reslen
= sizeof (buf
);
377 /* Try reading the number using `sysctl' first. */
378 if (__sysctl ((int *) sysctl_args
,
379 sizeof (sysctl_args
) / sizeof (sysctl_args
[0]),
380 buf
, &reslen
, NULL
, 0) < 0)
382 /* This was not successful. Now try reading the /proc filesystem. */
383 int fd
= __open ("/proc/sys/kernel/version", O_RDONLY
);
384 if (__builtin_expect (fd
, 0) == -1
385 || (reslen
= __read (fd
, buf
, sizeof (buf
))) <= 0)
386 /* This also didn't work. We give up and say it's a UP machine. */
392 return strstr (buf
, "SMP") != NULL
;
396 /* Initialize the pthread library.
397 Initialization is split in two functions:
398 - a constructor function that blocks the __pthread_sig_restart signal
399 (must do this very early, since the program could capture the signal
400 mask with e.g. sigsetjmp before creating the first thread);
401 - a regular function called from pthread_create when needed. */
403 static void pthread_initialize(void) __attribute__((constructor
));
405 #ifndef HAVE_Z_NODELETE
406 extern void *__dso_handle
__attribute__ ((weak
));
410 #if defined USE_TLS && !defined SHARED
411 extern void __libc_setup_tls (size_t tcbsize
, size_t tcbalign
);
415 /* Do some minimal initialization which has to be done during the
416 startup of the C library. */
418 __pthread_initialize_minimal(void)
424 /* Unlike in the dynamically linked case the dynamic linker has not
425 taken care of initializing the TLS data structures. */
426 __libc_setup_tls (TLS_TCB_SIZE
, TLS_TCB_ALIGN
);
431 /* The memory for the thread descriptor was allocated elsewhere as
432 part of the TLS allocation. We have to initialize the data
433 structure by hand. This initialization must mirror the struct
435 self
->p_nextlive
= self
->p_prevlive
= self
;
436 self
->p_tid
= PTHREAD_THREADS_MAX
;
437 self
->p_lock
= &__pthread_handles
[0].h_lock
;
438 # ifndef HAVE___THREAD
439 self
->p_errnop
= &_errno
;
440 self
->p_h_errnop
= &_h_errno
;
442 /* self->p_start_args need not be initialized, it's all zero. */
443 self
->p_userstack
= 1;
444 # if __LT_SPINLOCK_INIT != 0
445 self
->p_resume_count
= (struct pthread_atomic
) __ATOMIC_INITIALIZER
;
448 /* Another variable which points to the thread descriptor. */
449 __pthread_main_thread
= self
;
451 /* And fill in the pointer the the thread __pthread_handles array. */
452 __pthread_handles
[0].h_descr
= self
;
454 /* If we have special thread_self processing, initialize that for the
456 # ifdef INIT_THREAD_SELF
457 INIT_THREAD_SELF(&__pthread_initial_thread
, 0);
463 self
->p_cpuclock_offset
= GL(dl_cpuclock_offset
);
465 __pthread_initial_thread
.p_cpuclock_offset
= GL(dl_cpuclock_offset
);
469 #if !(USE_TLS && HAVE___THREAD) && defined SHARED
470 /* Initialize thread-locale current locale to point to the global one.
471 With __thread support, the variable's initializer takes care of this. */
472 __uselocale (LC_GLOBAL_LOCALE
);
478 __pthread_init_max_stacksize(void)
483 getrlimit(RLIMIT_STACK
, &limit
);
484 #ifdef FLOATING_STACKS
485 if (limit
.rlim_cur
== RLIM_INFINITY
)
486 limit
.rlim_cur
= ARCH_STACK_MAX_SIZE
;
487 # ifdef NEED_SEPARATE_REGISTER_STACK
488 max_stack
= limit
.rlim_cur
/ 2;
490 max_stack
= limit
.rlim_cur
;
493 /* Play with the stack size limit to make sure that no stack ever grows
494 beyond STACK_SIZE minus one page (to act as a guard page). */
495 # ifdef NEED_SEPARATE_REGISTER_STACK
496 /* STACK_SIZE bytes hold both the main stack and register backing
497 store. The rlimit value applies to each individually. */
498 max_stack
= STACK_SIZE
/2 - __getpagesize ();
500 max_stack
= STACK_SIZE
- __getpagesize();
502 if (limit
.rlim_cur
> max_stack
) {
503 limit
.rlim_cur
= max_stack
;
504 setrlimit(RLIMIT_STACK
, &limit
);
507 __pthread_max_stacksize
= max_stack
;
511 static void pthread_initialize(void)
516 /* If already done (e.g. by a constructor called earlier!), bail out */
517 if (__pthread_initial_thread_bos
!= NULL
) return;
518 #ifdef TEST_FOR_COMPARE_AND_SWAP
519 /* Test if compare-and-swap is available */
520 __pthread_has_cas
= compare_and_swap_is_available();
522 #ifdef FLOATING_STACKS
523 /* We don't need to know the bottom of the stack. Give the pointer some
524 value to signal that initialization happened. */
525 __pthread_initial_thread_bos
= (void *) -1l;
527 /* Determine stack size limits . */
528 __pthread_init_max_stacksize ();
529 # ifdef _STACK_GROWS_UP
530 /* The initial thread already has all the stack it needs */
531 __pthread_initial_thread_bos
= (char *)
532 ((long)CURRENT_STACK_FRAME
&~ (STACK_SIZE
- 1));
534 /* For the initial stack, reserve at least STACK_SIZE bytes of stack
535 below the current stack address, and align that on a
536 STACK_SIZE boundary. */
537 __pthread_initial_thread_bos
=
538 (char *)(((long)CURRENT_STACK_FRAME
- 2 * STACK_SIZE
) & ~(STACK_SIZE
- 1));
542 /* Update the descriptor for the initial thread. */
543 THREAD_SETMEM (((pthread_descr
) NULL
), p_pid
, __getpid());
544 # ifndef HAVE___THREAD
545 /* Likewise for the resolver state _res. */
546 THREAD_SETMEM (((pthread_descr
) NULL
), p_resp
, &_res
);
549 /* Update the descriptor for the initial thread. */
550 __pthread_initial_thread
.p_pid
= __getpid();
551 /* Likewise for the resolver state _res. */
552 __pthread_initial_thread
.p_resp
= &_res
;
555 /* Initialize real-time signals. */
558 /* Setup signal handlers for the initial thread.
559 Since signal handlers are shared between threads, these settings
560 will be inherited by all other threads. */
561 sa
.sa_handler
= pthread_handle_sigrestart
;
562 sigemptyset(&sa
.sa_mask
);
564 __libc_sigaction(__pthread_sig_restart
, &sa
, NULL
);
565 sa
.sa_handler
= pthread_handle_sigcancel
;
567 __libc_sigaction(__pthread_sig_cancel
, &sa
, NULL
);
568 if (__pthread_sig_debug
> 0) {
569 sa
.sa_handler
= pthread_handle_sigdebug
;
570 sigemptyset(&sa
.sa_mask
);
572 __libc_sigaction(__pthread_sig_debug
, &sa
, NULL
);
574 /* Initially, block __pthread_sig_restart. Will be unblocked on demand. */
576 sigaddset(&mask
, __pthread_sig_restart
);
577 sigprocmask(SIG_BLOCK
, &mask
, NULL
);
578 /* Register an exit function to kill all other threads. */
579 /* Do it early so that user-registered atexit functions are called
580 before pthread_*exit_process. */
581 #ifndef HAVE_Z_NODELETE
582 if (__builtin_expect (&__dso_handle
!= NULL
, 1))
583 __cxa_atexit ((void (*) (void *)) pthread_atexit_process
, NULL
,
587 __on_exit (pthread_onexit_process
, NULL
);
588 /* How many processors. */
589 __pthread_smp_kernel
= is_smp_system ();
592 void __pthread_initialize(void)
594 pthread_initialize();
597 int __pthread_initialize_manager(void)
601 struct pthread_request request
;
605 #ifndef HAVE_Z_NODELETE
606 if (__builtin_expect (&__dso_handle
!= NULL
, 1))
607 __cxa_atexit ((void (*) (void *)) pthread_atexit_retcode
, NULL
,
611 if (__pthread_max_stacksize
== 0)
612 __pthread_init_max_stacksize ();
613 /* If basic initialization not done yet (e.g. we're called from a
614 constructor run before our constructor), do it now */
615 if (__pthread_initial_thread_bos
== NULL
) pthread_initialize();
616 /* Setup stack for thread manager */
617 __pthread_manager_thread_bos
= malloc(THREAD_MANAGER_STACK_SIZE
);
618 if (__pthread_manager_thread_bos
== NULL
) return -1;
619 __pthread_manager_thread_tos
=
620 __pthread_manager_thread_bos
+ THREAD_MANAGER_STACK_SIZE
;
621 /* Setup pipe to communicate with thread manager */
622 if (pipe(manager_pipe
) == -1) {
623 free(__pthread_manager_thread_bos
);
628 /* Allocate memory for the thread descriptor and the dtv. */
629 __pthread_handles
[1].h_descr
= manager_thread
= tcb
630 = _dl_allocate_tls (NULL
);
632 free(__pthread_manager_thread_bos
);
633 __libc_close(manager_pipe
[0]);
634 __libc_close(manager_pipe
[1]);
638 /* Initialize the descriptor. */
639 tcb
->p_header
.data
.tcb
= tcb
;
640 tcb
->p_header
.data
.self
= tcb
;
641 tcb
->p_lock
= &__pthread_handles
[1].h_lock
;
642 # ifndef HAVE___THREAD
643 tcb
->p_errnop
= &tcb
->p_errno
;
645 tcb
->p_start_args
= (struct pthread_start_args
) PTHREAD_START_ARGS_INITIALIZER(__pthread_manager
);
647 # if __LT_SPINLOCK_INIT != 0
648 self
->p_resume_count
= (struct pthread_atomic
) __ATOMIC_INITIALIZER
;
651 tcb
= &__pthread_manager_thread
;
654 __pthread_manager_request
= manager_pipe
[1]; /* writing end */
655 __pthread_manager_reader
= manager_pipe
[0]; /* reading end */
657 /* Start the thread manager */
660 if (__linuxthreads_initial_report_events
!= 0)
661 THREAD_SETMEM (((pthread_descr
) NULL
), p_report_events
,
662 __linuxthreads_initial_report_events
);
663 report_events
= THREAD_GETMEM (((pthread_descr
) NULL
), p_report_events
);
665 if (__linuxthreads_initial_report_events
!= 0)
666 __pthread_initial_thread
.p_report_events
667 = __linuxthreads_initial_report_events
;
668 report_events
= __pthread_initial_thread
.p_report_events
;
670 if (__builtin_expect (report_events
, 0))
672 /* It's a bit more complicated. We have to report the creation of
673 the manager thread. */
674 int idx
= __td_eventword (TD_CREATE
);
675 uint32_t mask
= __td_eventmask (TD_CREATE
);
679 event_bits
= THREAD_GETMEM_NC (((pthread_descr
) NULL
),
680 p_eventbuf
.eventmask
.event_bits
[idx
]);
682 event_bits
= __pthread_initial_thread
.p_eventbuf
.eventmask
.event_bits
[idx
];
685 if ((mask
& (__pthread_threads_events
.event_bits
[idx
] | event_bits
))
688 __pthread_lock(tcb
->p_lock
, NULL
);
690 #ifdef NEED_SEPARATE_REGISTER_STACK
691 pid
= __clone2(__pthread_manager_event
,
692 (void **) __pthread_manager_thread_bos
,
693 THREAD_MANAGER_STACK_SIZE
,
694 CLONE_VM
| CLONE_FS
| CLONE_FILES
| CLONE_SIGHAND
,
696 #elif _STACK_GROWS_UP
697 pid
= __clone(__pthread_manager_event
,
698 (void **) __pthread_manager_thread_bos
,
699 CLONE_VM
| CLONE_FS
| CLONE_FILES
| CLONE_SIGHAND
,
702 pid
= __clone(__pthread_manager_event
,
703 (void **) __pthread_manager_thread_tos
,
704 CLONE_VM
| CLONE_FS
| CLONE_FILES
| CLONE_SIGHAND
,
710 /* Now fill in the information about the new thread in
711 the newly created thread's data structure. We cannot let
712 the new thread do this since we don't know whether it was
713 already scheduled when we send the event. */
714 tcb
->p_eventbuf
.eventdata
= tcb
;
715 tcb
->p_eventbuf
.eventnum
= TD_CREATE
;
716 __pthread_last_event
= tcb
;
717 tcb
->p_tid
= 2* PTHREAD_THREADS_MAX
+ 1;
720 /* Now call the function which signals the event. */
721 __linuxthreads_create_event ();
724 /* Now restart the thread. */
725 __pthread_unlock(tcb
->p_lock
);
729 if (__builtin_expect (pid
, 0) == 0)
731 #ifdef NEED_SEPARATE_REGISTER_STACK
732 pid
= __clone2(__pthread_manager
, (void **) __pthread_manager_thread_bos
,
733 THREAD_MANAGER_STACK_SIZE
,
734 CLONE_VM
| CLONE_FS
| CLONE_FILES
| CLONE_SIGHAND
, tcb
);
735 #elif _STACK_GROWS_UP
736 pid
= __clone(__pthread_manager
, (void **) __pthread_manager_thread_bos
,
737 CLONE_VM
| CLONE_FS
| CLONE_FILES
| CLONE_SIGHAND
, tcb
);
739 pid
= __clone(__pthread_manager
, (void **) __pthread_manager_thread_tos
,
740 CLONE_VM
| CLONE_FS
| CLONE_FILES
| CLONE_SIGHAND
, tcb
);
743 if (__builtin_expect (pid
, 0) == -1) {
744 free(__pthread_manager_thread_bos
);
745 __libc_close(manager_pipe
[0]);
746 __libc_close(manager_pipe
[1]);
749 tcb
->p_tid
= 2* PTHREAD_THREADS_MAX
+ 1;
751 /* Make gdb aware of new thread manager */
752 if (__builtin_expect (__pthread_threads_debug
, 0) && __pthread_sig_debug
> 0)
754 raise(__pthread_sig_debug
);
755 /* We suspend ourself and gdb will wake us up when it is
756 ready to handle us. */
757 __pthread_wait_for_restart_signal(thread_self());
759 /* Synchronize debugging of the thread manager */
760 request
.req_kind
= REQ_DEBUG
;
761 TEMP_FAILURE_RETRY(__libc_write(__pthread_manager_request
,
762 (char *) &request
, sizeof(request
)));
766 /* Thread creation */
768 int __pthread_create_2_1(pthread_t
*thread
, const pthread_attr_t
*attr
,
769 void * (*start_routine
)(void *), void *arg
)
771 pthread_descr self
= thread_self();
772 struct pthread_request request
;
774 if (__builtin_expect (__pthread_manager_request
, 0) < 0) {
775 if (__pthread_initialize_manager() < 0) return EAGAIN
;
777 request
.req_thread
= self
;
778 request
.req_kind
= REQ_CREATE
;
779 request
.req_args
.create
.attr
= attr
;
780 request
.req_args
.create
.fn
= start_routine
;
781 request
.req_args
.create
.arg
= arg
;
782 sigprocmask(SIG_SETMASK
, (const sigset_t
*) NULL
,
783 &request
.req_args
.create
.mask
);
784 TEMP_FAILURE_RETRY(__libc_write(__pthread_manager_request
,
785 (char *) &request
, sizeof(request
)));
787 retval
= THREAD_GETMEM(self
, p_retcode
);
788 if (__builtin_expect (retval
, 0) == 0)
789 *thread
= (pthread_t
) THREAD_GETMEM(self
, p_retval
);
793 versioned_symbol (libpthread
, __pthread_create_2_1
, pthread_create
, GLIBC_2_1
);
795 #if SHLIB_COMPAT (libpthread, GLIBC_2_0, GLIBC_2_1)
797 int __pthread_create_2_0(pthread_t
*thread
, const pthread_attr_t
*attr
,
798 void * (*start_routine
)(void *), void *arg
)
800 /* The ATTR attribute is not really of type `pthread_attr_t *'. It has
801 the old size and access to the new members might crash the program.
802 We convert the struct now. */
803 pthread_attr_t new_attr
;
807 size_t ps
= __getpagesize ();
809 memcpy (&new_attr
, attr
,
810 (size_t) &(((pthread_attr_t
*)NULL
)->__guardsize
));
811 new_attr
.__guardsize
= ps
;
812 new_attr
.__stackaddr_set
= 0;
813 new_attr
.__stackaddr
= NULL
;
814 new_attr
.__stacksize
= STACK_SIZE
- ps
;
817 return __pthread_create_2_1 (thread
, attr
, start_routine
, arg
);
819 compat_symbol (libpthread
, __pthread_create_2_0
, pthread_create
, GLIBC_2_0
);
822 /* Simple operations on thread identifiers */
824 pthread_t
pthread_self(void)
826 pthread_descr self
= thread_self();
827 return THREAD_GETMEM(self
, p_tid
);
830 int pthread_equal(pthread_t thread1
, pthread_t thread2
)
832 return thread1
== thread2
;
835 /* Helper function for thread_self in the case of user-provided stacks */
839 pthread_descr
__pthread_find_self(void)
841 char * sp
= CURRENT_STACK_FRAME
;
844 /* __pthread_handles[0] is the initial thread, __pthread_handles[1] is
845 the manager threads handled specially in thread_self(), so start at 2 */
846 h
= __pthread_handles
+ 2;
847 while (! (sp
<= (char *) h
->h_descr
&& sp
>= h
->h_bottom
)) h
++;
853 static pthread_descr
thread_self_stack(void)
855 char *sp
= CURRENT_STACK_FRAME
;
858 if (sp
>= __pthread_manager_thread_bos
&& sp
< __pthread_manager_thread_tos
)
859 return manager_thread
;
860 h
= __pthread_handles
+ 2;
862 while (h
->h_descr
== NULL
863 || ! (sp
<= (char *) h
->h_descr
->p_stackaddr
&& sp
>= h
->h_bottom
))
866 while (! (sp
<= (char *) h
->h_descr
&& sp
>= h
->h_bottom
))
874 /* Thread scheduling */
876 int pthread_setschedparam(pthread_t thread
, int policy
,
877 const struct sched_param
*param
)
879 pthread_handle handle
= thread_handle(thread
);
882 __pthread_lock(&handle
->h_lock
, NULL
);
883 if (__builtin_expect (invalid_handle(handle
, thread
), 0)) {
884 __pthread_unlock(&handle
->h_lock
);
887 th
= handle
->h_descr
;
888 if (__builtin_expect (__sched_setscheduler(th
->p_pid
, policy
, param
) == -1,
890 __pthread_unlock(&handle
->h_lock
);
893 th
->p_priority
= policy
== SCHED_OTHER
? 0 : param
->sched_priority
;
894 __pthread_unlock(&handle
->h_lock
);
895 if (__pthread_manager_request
>= 0)
896 __pthread_manager_adjust_prio(th
->p_priority
);
900 int pthread_getschedparam(pthread_t thread
, int *policy
,
901 struct sched_param
*param
)
903 pthread_handle handle
= thread_handle(thread
);
906 __pthread_lock(&handle
->h_lock
, NULL
);
907 if (__builtin_expect (invalid_handle(handle
, thread
), 0)) {
908 __pthread_unlock(&handle
->h_lock
);
911 pid
= handle
->h_descr
->p_pid
;
912 __pthread_unlock(&handle
->h_lock
);
913 pol
= __sched_getscheduler(pid
);
914 if (__builtin_expect (pol
, 0) == -1) return errno
;
915 if (__sched_getparam(pid
, param
) == -1) return errno
;
920 int __pthread_yield (void)
922 /* For now this is equivalent with the POSIX call. */
923 return sched_yield ();
925 weak_alias (__pthread_yield
, pthread_yield
)
927 /* Process-wide exit() request */
929 static void pthread_onexit_process(int retcode
, void *arg
)
931 if (__builtin_expect (__pthread_manager_request
, 0) >= 0) {
932 struct pthread_request request
;
933 pthread_descr self
= thread_self();
935 request
.req_thread
= self
;
936 request
.req_kind
= REQ_PROCESS_EXIT
;
937 request
.req_args
.exit
.code
= retcode
;
938 TEMP_FAILURE_RETRY(__libc_write(__pthread_manager_request
,
939 (char *) &request
, sizeof(request
)));
941 /* Main thread should accumulate times for thread manager and its
942 children, so that timings for main thread account for all threads. */
943 if (self
== __pthread_main_thread
)
946 waitpid(manager_thread
->p_pid
, NULL
, __WCLONE
);
948 waitpid(__pthread_manager_thread
.p_pid
, NULL
, __WCLONE
);
950 /* Since all threads have been asynchronously terminated
951 (possibly holding locks), free cannot be used any more. */
952 /*free (__pthread_manager_thread_bos);*/
953 __pthread_manager_thread_bos
= __pthread_manager_thread_tos
= NULL
;
958 #ifndef HAVE_Z_NODELETE
959 static int __pthread_atexit_retcode
;
961 static void pthread_atexit_process(void *arg
, int retcode
)
963 pthread_onexit_process (retcode
?: __pthread_atexit_retcode
, arg
);
966 static void pthread_atexit_retcode(void *arg
, int retcode
)
968 __pthread_atexit_retcode
= retcode
;
972 /* The handler for the RESTART signal just records the signal received
973 in the thread descriptor, and optionally performs a siglongjmp
974 (for pthread_cond_timedwait). */
976 static void pthread_handle_sigrestart(int sig
)
978 pthread_descr self
= thread_self();
979 THREAD_SETMEM(self
, p_signal
, sig
);
980 if (THREAD_GETMEM(self
, p_signal_jmp
) != NULL
)
981 siglongjmp(*THREAD_GETMEM(self
, p_signal_jmp
), 1);
984 /* The handler for the CANCEL signal checks for cancellation
985 (in asynchronous mode), for process-wide exit and exec requests.
986 For the thread manager thread, redirect the signal to
987 __pthread_manager_sighandler. */
989 static void pthread_handle_sigcancel(int sig
)
991 pthread_descr self
= thread_self();
994 if (self
== manager_thread
)
997 /* A new thread might get a cancel signal before it is fully
998 initialized, so that the thread register might still point to the
999 manager thread. Double check that this is really the manager
1001 pthread_descr real_self
= thread_self_stack();
1002 if (real_self
== manager_thread
)
1004 __pthread_manager_sighandler(sig
);
1007 /* Oops, thread_self() isn't working yet.. */
1009 # ifdef INIT_THREAD_SELF
1010 INIT_THREAD_SELF(self
, self
->p_nr
);
1013 __pthread_manager_sighandler(sig
);
1017 if (__builtin_expect (__pthread_exit_requested
, 0)) {
1018 /* Main thread should accumulate times for thread manager and its
1019 children, so that timings for main thread account for all threads. */
1020 if (self
== __pthread_main_thread
) {
1022 waitpid(manager_thread
->p_pid
, NULL
, __WCLONE
);
1024 waitpid(__pthread_manager_thread
.p_pid
, NULL
, __WCLONE
);
1027 _exit(__pthread_exit_code
);
1029 if (__builtin_expect (THREAD_GETMEM(self
, p_canceled
), 0)
1030 && THREAD_GETMEM(self
, p_cancelstate
) == PTHREAD_CANCEL_ENABLE
) {
1031 if (THREAD_GETMEM(self
, p_canceltype
) == PTHREAD_CANCEL_ASYNCHRONOUS
)
1032 __pthread_do_exit(PTHREAD_CANCELED
, CURRENT_STACK_FRAME
);
1033 jmpbuf
= THREAD_GETMEM(self
, p_cancel_jmp
);
1034 if (jmpbuf
!= NULL
) {
1035 THREAD_SETMEM(self
, p_cancel_jmp
, NULL
);
1036 siglongjmp(*jmpbuf
, 1);
1041 /* Handler for the DEBUG signal.
1042 The debugging strategy is as follows:
1043 On reception of a REQ_DEBUG request (sent by new threads created to
1044 the thread manager under debugging mode), the thread manager throws
1045 __pthread_sig_debug to itself. The debugger (if active) intercepts
1046 this signal, takes into account new threads and continue execution
1047 of the thread manager by propagating the signal because it doesn't
1048 know what it is specifically done for. In the current implementation,
1049 the thread manager simply discards it. */
1051 static void pthread_handle_sigdebug(int sig
)
1056 /* Reset the state of the thread machinery after a fork().
1057 Close the pipe used for requests and set the main thread to the forked
1059 Notice that we can't free the stack segments, as the forked thread
1060 may hold pointers into them. */
1062 void __pthread_reset_main_thread(void)
1064 pthread_descr self
= thread_self();
1065 struct rlimit limit
;
1067 if (__pthread_manager_request
!= -1) {
1068 /* Free the thread manager stack */
1069 free(__pthread_manager_thread_bos
);
1070 __pthread_manager_thread_bos
= __pthread_manager_thread_tos
= NULL
;
1071 /* Close the two ends of the pipe */
1072 __libc_close(__pthread_manager_request
);
1073 __libc_close(__pthread_manager_reader
);
1074 __pthread_manager_request
= __pthread_manager_reader
= -1;
1077 /* Update the pid of the main thread */
1078 THREAD_SETMEM(self
, p_pid
, __getpid());
1079 /* Make the forked thread the main thread */
1080 __pthread_main_thread
= self
;
1081 THREAD_SETMEM(self
, p_nextlive
, self
);
1082 THREAD_SETMEM(self
, p_prevlive
, self
);
1083 #if !(USE_TLS && HAVE___THREAD)
1084 /* Now this thread modifies the global variables. */
1085 THREAD_SETMEM(self
, p_errnop
, &_errno
);
1086 THREAD_SETMEM(self
, p_h_errnop
, &_h_errno
);
1087 THREAD_SETMEM(self
, p_resp
, &_res
);
1090 if (getrlimit (RLIMIT_STACK
, &limit
) == 0
1091 && limit
.rlim_cur
!= limit
.rlim_max
) {
1092 limit
.rlim_cur
= limit
.rlim_max
;
1093 setrlimit(RLIMIT_STACK
, &limit
);
1097 /* Process-wide exec() request */
1099 void __pthread_kill_other_threads_np(void)
1101 struct sigaction sa
;
1102 /* Terminate all other threads and thread manager */
1103 pthread_onexit_process(0, NULL
);
1104 /* Make current thread the main thread in case the calling thread
1105 changes its mind, does not exec(), and creates new threads instead. */
1106 __pthread_reset_main_thread();
1108 /* Reset the signal handlers behaviour for the signals the
1109 implementation uses since this would be passed to the new
1111 sigemptyset(&sa
.sa_mask
);
1113 sa
.sa_handler
= SIG_DFL
;
1114 __libc_sigaction(__pthread_sig_restart
, &sa
, NULL
);
1115 __libc_sigaction(__pthread_sig_cancel
, &sa
, NULL
);
1116 if (__pthread_sig_debug
> 0)
1117 __libc_sigaction(__pthread_sig_debug
, &sa
, NULL
);
1119 weak_alias (__pthread_kill_other_threads_np
, pthread_kill_other_threads_np
)
1121 /* Concurrency symbol level. */
1122 static int current_level
;
1124 int __pthread_setconcurrency(int level
)
1126 /* We don't do anything unless we have found a useful interpretation. */
1127 current_level
= level
;
1130 weak_alias (__pthread_setconcurrency
, pthread_setconcurrency
)
1132 int __pthread_getconcurrency(void)
1134 return current_level
;
1136 weak_alias (__pthread_getconcurrency
, pthread_getconcurrency
)
1138 /* Primitives for controlling thread execution */
1140 void __pthread_wait_for_restart_signal(pthread_descr self
)
1144 sigprocmask(SIG_SETMASK
, NULL
, &mask
); /* Get current signal mask */
1145 sigdelset(&mask
, __pthread_sig_restart
); /* Unblock the restart signal */
1146 THREAD_SETMEM(self
, p_signal
, 0);
1148 sigsuspend(&mask
); /* Wait for signal */
1149 } while (THREAD_GETMEM(self
, p_signal
) !=__pthread_sig_restart
);
1151 READ_MEMORY_BARRIER(); /* See comment in __pthread_restart_new */
1154 #if !__ASSUME_REALTIME_SIGNALS
1155 /* The _old variants are for 2.0 and early 2.1 kernels which don't have RT
1157 On these kernels, we use SIGUSR1 and SIGUSR2 for restart and cancellation.
1158 Since the restart signal does not queue, we use an atomic counter to create
1159 queuing semantics. This is needed to resolve a rare race condition in
1160 pthread_cond_timedwait_relative. */
1162 void __pthread_restart_old(pthread_descr th
)
1164 if (atomic_increment(&th
->p_resume_count
) == -1)
1165 kill(th
->p_pid
, __pthread_sig_restart
);
1168 void __pthread_suspend_old(pthread_descr self
)
1170 if (atomic_decrement(&self
->p_resume_count
) <= 0)
1171 __pthread_wait_for_restart_signal(self
);
1175 __pthread_timedsuspend_old(pthread_descr self
, const struct timespec
*abstime
)
1177 sigset_t unblock
, initial_mask
;
1178 int was_signalled
= 0;
1181 if (atomic_decrement(&self
->p_resume_count
) == 0) {
1182 /* Set up a longjmp handler for the restart signal, unblock
1183 the signal and sleep. */
1185 if (sigsetjmp(jmpbuf
, 1) == 0) {
1186 THREAD_SETMEM(self
, p_signal_jmp
, &jmpbuf
);
1187 THREAD_SETMEM(self
, p_signal
, 0);
1188 /* Unblock the restart signal */
1189 sigemptyset(&unblock
);
1190 sigaddset(&unblock
, __pthread_sig_restart
);
1191 sigprocmask(SIG_UNBLOCK
, &unblock
, &initial_mask
);
1195 struct timespec reltime
;
1197 /* Compute a time offset relative to now. */
1198 __gettimeofday (&now
, NULL
);
1199 reltime
.tv_nsec
= abstime
->tv_nsec
- now
.tv_usec
* 1000;
1200 reltime
.tv_sec
= abstime
->tv_sec
- now
.tv_sec
;
1201 if (reltime
.tv_nsec
< 0) {
1202 reltime
.tv_nsec
+= 1000000000;
1203 reltime
.tv_sec
-= 1;
1206 /* Sleep for the required duration. If woken by a signal,
1207 resume waiting as required by Single Unix Specification. */
1208 if (reltime
.tv_sec
< 0 || __libc_nanosleep(&reltime
, NULL
) == 0)
1212 /* Block the restart signal again */
1213 sigprocmask(SIG_SETMASK
, &initial_mask
, NULL
);
1218 THREAD_SETMEM(self
, p_signal_jmp
, NULL
);
1221 /* Now was_signalled is true if we exited the above code
1222 due to the delivery of a restart signal. In that case,
1223 we know we have been dequeued and resumed and that the
1224 resume count is balanced. Otherwise, there are some
1225 cases to consider. First, try to bump up the resume count
1226 back to zero. If it goes to 1, it means restart() was
1227 invoked on this thread. The signal must be consumed
1228 and the count bumped down and everything is cool. We
1229 can return a 1 to the caller.
1230 Otherwise, no restart was delivered yet, so a potential
1231 race exists; we return a 0 to the caller which must deal
1232 with this race in an appropriate way; for example by
1233 atomically removing the thread from consideration for a
1234 wakeup---if such a thing fails, it means a restart is
1237 if (!was_signalled
) {
1238 if (atomic_increment(&self
->p_resume_count
) != -1) {
1239 __pthread_wait_for_restart_signal(self
);
1240 atomic_decrement(&self
->p_resume_count
); /* should be zero now! */
1241 /* woke spontaneously and consumed restart signal */
1244 /* woke spontaneously but did not consume restart---caller must resolve */
1247 /* woken due to restart signal */
1250 #endif /* __ASSUME_REALTIME_SIGNALS */
1252 void __pthread_restart_new(pthread_descr th
)
1254 /* The barrier is proabably not needed, in which case it still documents
1255 our assumptions. The intent is to commit previous writes to shared
1256 memory so the woken thread will have a consistent view. Complementary
1257 read barriers are present to the suspend functions. */
1258 WRITE_MEMORY_BARRIER();
1259 kill(th
->p_pid
, __pthread_sig_restart
);
1262 /* There is no __pthread_suspend_new because it would just
1263 be a wasteful wrapper for __pthread_wait_for_restart_signal */
1266 __pthread_timedsuspend_new(pthread_descr self
, const struct timespec
*abstime
)
1268 sigset_t unblock
, initial_mask
;
1269 int was_signalled
= 0;
1272 if (sigsetjmp(jmpbuf
, 1) == 0) {
1273 THREAD_SETMEM(self
, p_signal_jmp
, &jmpbuf
);
1274 THREAD_SETMEM(self
, p_signal
, 0);
1275 /* Unblock the restart signal */
1276 sigemptyset(&unblock
);
1277 sigaddset(&unblock
, __pthread_sig_restart
);
1278 sigprocmask(SIG_UNBLOCK
, &unblock
, &initial_mask
);
1282 struct timespec reltime
;
1284 /* Compute a time offset relative to now. */
1285 __gettimeofday (&now
, NULL
);
1286 reltime
.tv_nsec
= abstime
->tv_nsec
- now
.tv_usec
* 1000;
1287 reltime
.tv_sec
= abstime
->tv_sec
- now
.tv_sec
;
1288 if (reltime
.tv_nsec
< 0) {
1289 reltime
.tv_nsec
+= 1000000000;
1290 reltime
.tv_sec
-= 1;
1293 /* Sleep for the required duration. If woken by a signal,
1294 resume waiting as required by Single Unix Specification. */
1295 if (reltime
.tv_sec
< 0 || __libc_nanosleep(&reltime
, NULL
) == 0)
1299 /* Block the restart signal again */
1300 sigprocmask(SIG_SETMASK
, &initial_mask
, NULL
);
1305 THREAD_SETMEM(self
, p_signal_jmp
, NULL
);
1307 /* Now was_signalled is true if we exited the above code
1308 due to the delivery of a restart signal. In that case,
1309 everything is cool. We have been removed from whatever
1310 we were waiting on by the other thread, and consumed its signal.
1312 Otherwise we this thread woke up spontaneously, or due to a signal other
1313 than restart. This is an ambiguous case that must be resolved by
1314 the caller; the thread is still eligible for a restart wakeup
1315 so there is a race. */
1317 READ_MEMORY_BARRIER(); /* See comment in __pthread_restart_new */
1318 return was_signalled
;
1327 void __pthread_message(char * fmt
, ...)
1331 sprintf(buffer
, "%05d : ", __getpid());
1332 va_start(args
, fmt
);
1333 vsnprintf(buffer
+ 8, sizeof(buffer
) - 8, fmt
, args
);
1335 TEMP_FAILURE_RETRY(__libc_write(2, buffer
, strlen(buffer
)));
1342 /* We need a hook to force the cancelation wrappers and file locking
1343 to be linked in when static libpthread is used. */
1344 extern const int __pthread_provide_wrappers
;
1345 static const int *const __pthread_require_wrappers
=
1346 &__pthread_provide_wrappers
;
1347 extern const int __pthread_provide_lockfile
;
1348 static const int *const __pthread_require_lockfile
=
1349 &__pthread_provide_lockfile
;