(main): Rewrite initializers to avoid warnings.
[glibc.git] / linuxthreads / manager.c
blob149cc938b9220fa3e915710d28adbeae6135b3c8
1 /* Linuxthreads - a simple clone()-based implementation of Posix */
2 /* threads for Linux. */
3 /* Copyright (C) 1996 Xavier Leroy (Xavier.Leroy@inria.fr) */
4 /* */
5 /* This program is free software; you can redistribute it and/or */
6 /* modify it under the terms of the GNU Library General Public License */
7 /* as published by the Free Software Foundation; either version 2 */
8 /* of the License, or (at your option) any later version. */
9 /* */
10 /* This program is distributed in the hope that it will be useful, */
11 /* but WITHOUT ANY WARRANTY; without even the implied warranty of */
12 /* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the */
13 /* GNU Library General Public License for more details. */
15 /* The "thread manager" thread: manages creation and termination of threads */
17 #include <errno.h>
18 #include <sched.h>
19 #include <stddef.h>
20 #include <stdio.h>
21 #include <stdlib.h>
22 #include <string.h>
23 #include <unistd.h>
24 #include <sys/poll.h> /* for poll */
25 #include <sys/mman.h> /* for mmap */
26 #include <sys/param.h>
27 #include <sys/time.h>
28 #include <sys/wait.h> /* for waitpid macros */
30 #include "pthread.h"
31 #include "internals.h"
32 #include "spinlock.h"
33 #include "restart.h"
34 #include "semaphore.h"
36 /* Array of active threads. Entry 0 is reserved for the initial thread. */
37 struct pthread_handle_struct __pthread_handles[PTHREAD_THREADS_MAX] =
38 { { LOCK_INITIALIZER, &__pthread_initial_thread, 0},
39 { LOCK_INITIALIZER, &__pthread_manager_thread, 0}, /* All NULLs */ };
41 /* For debugging purposes put the maximum number of threads in a variable. */
42 const int __linuxthreads_pthread_threads_max = PTHREAD_THREADS_MAX;
44 /* Indicate whether at least one thread has a user-defined stack (if 1),
45 or if all threads have stacks supplied by LinuxThreads (if 0). */
46 int __pthread_nonstandard_stacks;
48 /* Number of active entries in __pthread_handles (used by gdb) */
49 volatile int __pthread_handles_num = 2;
51 /* Whether to use debugger additional actions for thread creation
52 (set to 1 by gdb) */
53 volatile int __pthread_threads_debug;
55 /* Globally enabled events. */
56 volatile td_thr_events_t __pthread_threads_events;
58 /* Pointer to thread descriptor with last event. */
59 volatile pthread_descr __pthread_last_event;
61 /* Mapping from stack segment to thread descriptor. */
62 /* Stack segment numbers are also indices into the __pthread_handles array. */
63 /* Stack segment number 0 is reserved for the initial thread. */
65 static inline pthread_descr thread_segment(int seg)
67 return (pthread_descr)(THREAD_STACK_START_ADDRESS - (seg - 1) * STACK_SIZE)
68 - 1;
71 /* Flag set in signal handler to record child termination */
73 static volatile int terminated_children = 0;
75 /* Flag set when the initial thread is blocked on pthread_exit waiting
76 for all other threads to terminate */
78 static int main_thread_exiting = 0;
80 /* Counter used to generate unique thread identifier.
81 Thread identifier is pthread_threads_counter + segment. */
83 static pthread_t pthread_threads_counter = 0;
85 /* Forward declarations */
87 static int pthread_handle_create(pthread_t *thread, const pthread_attr_t *attr,
88 void * (*start_routine)(void *), void *arg,
89 sigset_t *mask, int father_pid,
90 int report_events,
91 td_thr_events_t *event_maskp);
92 static void pthread_handle_free(pthread_t th_id);
93 static void pthread_handle_exit(pthread_descr issuing_thread, int exitcode);
94 static void pthread_reap_children(void);
95 static void pthread_kill_all_threads(int sig, int main_thread_also);
97 /* The server thread managing requests for thread creation and termination */
99 int __pthread_manager(void *arg)
101 int reqfd = (int) (long int) arg;
102 struct pollfd ufd;
103 sigset_t mask;
104 int n;
105 struct pthread_request request;
107 /* If we have special thread_self processing, initialize it. */
108 #ifdef INIT_THREAD_SELF
109 INIT_THREAD_SELF(&__pthread_manager_thread, 1);
110 #endif
111 /* Set the error variable. */
112 __pthread_manager_thread.p_errnop = &__pthread_manager_thread.p_errno;
113 __pthread_manager_thread.p_h_errnop = &__pthread_manager_thread.p_h_errno;
114 /* Block all signals except __pthread_sig_cancel and SIGTRAP */
115 sigfillset(&mask);
116 sigdelset(&mask, __pthread_sig_cancel); /* for thread termination */
117 sigdelset(&mask, SIGTRAP); /* for debugging purposes */
118 if (__pthread_threads_debug && __pthread_sig_debug > 0)
119 sigdelset(&mask, __pthread_sig_debug);
120 sigprocmask(SIG_SETMASK, &mask, NULL);
121 /* Raise our priority to match that of main thread */
122 __pthread_manager_adjust_prio(__pthread_main_thread->p_priority);
123 /* Synchronize debugging of the thread manager */
124 n = __libc_read(reqfd, (char *)&request, sizeof(request));
125 ASSERT(n == sizeof(request) && request.req_kind == REQ_DEBUG);
126 ufd.fd = reqfd;
127 ufd.events = POLLIN;
128 /* Enter server loop */
129 while(1) {
130 n = __poll(&ufd, 1, 2000);
132 /* Check for termination of the main thread */
133 if (getppid() == 1) {
134 pthread_kill_all_threads(SIGKILL, 0);
135 _exit(0);
137 /* Check for dead children */
138 if (terminated_children) {
139 terminated_children = 0;
140 pthread_reap_children();
142 /* Read and execute request */
143 if (n == 1 && (ufd.revents & POLLIN)) {
144 n = __libc_read(reqfd, (char *)&request, sizeof(request));
145 ASSERT(n == sizeof(request));
146 switch(request.req_kind) {
147 case REQ_CREATE:
148 request.req_thread->p_retcode =
149 pthread_handle_create((pthread_t *) &request.req_thread->p_retval,
150 request.req_args.create.attr,
151 request.req_args.create.fn,
152 request.req_args.create.arg,
153 &request.req_args.create.mask,
154 request.req_thread->p_pid,
155 request.req_thread->p_report_events,
156 &request.req_thread->p_eventbuf.eventmask);
157 restart(request.req_thread);
158 break;
159 case REQ_FREE:
160 pthread_handle_free(request.req_args.free.thread_id);
161 break;
162 case REQ_PROCESS_EXIT:
163 pthread_handle_exit(request.req_thread,
164 request.req_args.exit.code);
165 /* NOTREACHED */
166 break;
167 case REQ_MAIN_THREAD_EXIT:
168 main_thread_exiting = 1;
169 /* Reap children in case all other threads died and the signal handler
170 went off before we set main_thread_exiting to 1, and therefore did
171 not do REQ_KICK. */
172 pthread_reap_children();
174 if (__pthread_main_thread->p_nextlive == __pthread_main_thread) {
175 restart(__pthread_main_thread);
176 /* The main thread will now call exit() which will trigger an
177 __on_exit handler, which in turn will send REQ_PROCESS_EXIT
178 to the thread manager. In case you are wondering how the
179 manager terminates from its loop here. */
181 break;
182 case REQ_POST:
183 __new_sem_post(request.req_args.post);
184 break;
185 case REQ_DEBUG:
186 /* Make gdb aware of new thread and gdb will restart the
187 new thread when it is ready to handle the new thread. */
188 if (__pthread_threads_debug && __pthread_sig_debug > 0)
189 raise(__pthread_sig_debug);
190 break;
191 case REQ_KICK:
192 /* This is just a prod to get the manager to reap some
193 threads right away, avoiding a potential delay at shutdown. */
194 break;
200 int __pthread_manager_event(void *arg)
202 /* If we have special thread_self processing, initialize it. */
203 #ifdef INIT_THREAD_SELF
204 INIT_THREAD_SELF(&__pthread_manager_thread, 1);
205 #endif
207 /* Get the lock the manager will free once all is correctly set up. */
208 __pthread_lock (THREAD_GETMEM((&__pthread_manager_thread), p_lock), NULL);
209 /* Free it immediately. */
210 __pthread_unlock (THREAD_GETMEM((&__pthread_manager_thread), p_lock));
212 return __pthread_manager(arg);
215 /* Process creation */
217 static int pthread_start_thread(void *arg)
219 pthread_descr self = (pthread_descr) arg;
220 struct pthread_request request;
221 void * outcome;
222 /* Initialize special thread_self processing, if any. */
223 #ifdef INIT_THREAD_SELF
224 INIT_THREAD_SELF(self, self->p_nr);
225 #endif
226 /* Make sure our pid field is initialized, just in case we get there
227 before our father has initialized it. */
228 THREAD_SETMEM(self, p_pid, __getpid());
229 /* Initial signal mask is that of the creating thread. (Otherwise,
230 we'd just inherit the mask of the thread manager.) */
231 sigprocmask(SIG_SETMASK, &self->p_start_args.mask, NULL);
232 /* Set the scheduling policy and priority for the new thread, if needed */
233 if (THREAD_GETMEM(self, p_start_args.schedpolicy) >= 0)
234 /* Explicit scheduling attributes were provided: apply them */
235 __sched_setscheduler(THREAD_GETMEM(self, p_pid),
236 THREAD_GETMEM(self, p_start_args.schedpolicy),
237 &self->p_start_args.schedparam);
238 else if (__pthread_manager_thread.p_priority > 0)
239 /* Default scheduling required, but thread manager runs in realtime
240 scheduling: switch new thread to SCHED_OTHER policy */
242 struct sched_param default_params;
243 default_params.sched_priority = 0;
244 __sched_setscheduler(THREAD_GETMEM(self, p_pid),
245 SCHED_OTHER, &default_params);
247 /* Make gdb aware of new thread */
248 if (__pthread_threads_debug && __pthread_sig_debug > 0) {
249 request.req_thread = self;
250 request.req_kind = REQ_DEBUG;
251 __libc_write(__pthread_manager_request,
252 (char *) &request, sizeof(request));
253 suspend(self);
255 /* Run the thread code */
256 outcome = self->p_start_args.start_routine(THREAD_GETMEM(self,
257 p_start_args.arg));
258 /* Exit with the given return value */
259 pthread_exit(outcome);
260 return 0;
263 static int pthread_start_thread_event(void *arg)
265 pthread_descr self = (pthread_descr) arg;
267 #ifdef INIT_THREAD_SELF
268 INIT_THREAD_SELF(self, self->p_nr);
269 #endif
270 /* Make sure our pid field is initialized, just in case we get there
271 before our father has initialized it. */
272 THREAD_SETMEM(self, p_pid, __getpid());
273 /* Get the lock the manager will free once all is correctly set up. */
274 __pthread_lock (THREAD_GETMEM(self, p_lock), NULL);
275 /* Free it immediately. */
276 __pthread_unlock (THREAD_GETMEM(self, p_lock));
278 /* Continue with the real function. */
279 return pthread_start_thread (arg);
282 static int pthread_allocate_stack(const pthread_attr_t *attr,
283 pthread_descr default_new_thread,
284 int pagesize,
285 pthread_descr * out_new_thread,
286 char ** out_new_thread_bottom,
287 char ** out_guardaddr,
288 size_t * out_guardsize)
290 pthread_descr new_thread;
291 char * new_thread_bottom;
292 char * guardaddr;
293 size_t stacksize, guardsize;
295 if (attr != NULL && attr->__stackaddr_set)
297 /* The user provided a stack. */
298 new_thread =
299 (pthread_descr) ((long)(attr->__stackaddr) & -sizeof(void *)) - 1;
300 new_thread_bottom = (char *) attr->__stackaddr - attr->__stacksize;
301 guardaddr = NULL;
302 guardsize = 0;
303 __pthread_nonstandard_stacks = 1;
305 else
307 stacksize = STACK_SIZE - pagesize;
308 if (attr != NULL)
309 stacksize = MIN (stacksize, roundup(attr->__stacksize, pagesize));
310 /* Allocate space for stack and thread descriptor at default address */
311 new_thread = default_new_thread;
312 new_thread_bottom = (char *) (new_thread + 1) - stacksize;
313 if (mmap((caddr_t)((char *)(new_thread + 1) - INITIAL_STACK_SIZE),
314 INITIAL_STACK_SIZE, PROT_READ | PROT_WRITE | PROT_EXEC,
315 MAP_PRIVATE | MAP_ANONYMOUS | MAP_FIXED | MAP_GROWSDOWN,
316 -1, 0) == MAP_FAILED)
317 /* Bad luck, this segment is already mapped. */
318 return -1;
319 /* We manage to get a stack. Now see whether we need a guard
320 and allocate it if necessary. Notice that the default
321 attributes (stack_size = STACK_SIZE - pagesize) do not need
322 a guard page, since the RLIMIT_STACK soft limit prevents stacks
323 from running into one another. */
324 if (stacksize == STACK_SIZE - pagesize)
326 /* We don't need a guard page. */
327 guardaddr = NULL;
328 guardsize = 0;
330 else
332 /* Put a bad page at the bottom of the stack */
333 guardsize = attr->__guardsize;
334 guardaddr = (void *)new_thread_bottom - guardsize;
335 if (mmap ((caddr_t) guardaddr, guardsize, 0, MAP_FIXED, -1, 0)
336 == MAP_FAILED)
338 /* We don't make this an error. */
339 guardaddr = NULL;
340 guardsize = 0;
344 /* Clear the thread data structure. */
345 memset (new_thread, '\0', sizeof (*new_thread));
346 *out_new_thread = new_thread;
347 *out_new_thread_bottom = new_thread_bottom;
348 *out_guardaddr = guardaddr;
349 *out_guardsize = guardsize;
350 return 0;
353 static int pthread_handle_create(pthread_t *thread, const pthread_attr_t *attr,
354 void * (*start_routine)(void *), void *arg,
355 sigset_t * mask, int father_pid,
356 int report_events,
357 td_thr_events_t *event_maskp)
359 size_t sseg;
360 int pid;
361 pthread_descr new_thread;
362 char * new_thread_bottom;
363 pthread_t new_thread_id;
364 char *guardaddr = NULL;
365 size_t guardsize = 0;
366 int pagesize = __getpagesize();
368 /* First check whether we have to change the policy and if yes, whether
369 we can do this. Normally this should be done by examining the
370 return value of the __sched_setscheduler call in pthread_start_thread
371 but this is hard to implement. FIXME */
372 if (attr != NULL && attr->__schedpolicy != SCHED_OTHER && geteuid () != 0)
373 return EPERM;
374 /* Find a free segment for the thread, and allocate a stack if needed */
375 for (sseg = 2; ; sseg++)
377 if (sseg >= PTHREAD_THREADS_MAX)
378 return EAGAIN;
379 if (__pthread_handles[sseg].h_descr != NULL)
380 continue;
381 if (pthread_allocate_stack(attr, thread_segment(sseg), pagesize,
382 &new_thread, &new_thread_bottom,
383 &guardaddr, &guardsize) == 0)
384 break;
386 __pthread_handles_num++;
387 /* Allocate new thread identifier */
388 pthread_threads_counter += PTHREAD_THREADS_MAX;
389 new_thread_id = sseg + pthread_threads_counter;
390 /* Initialize the thread descriptor. Elements which have to be
391 initialized to zero already have this value. */
392 new_thread->p_tid = new_thread_id;
393 new_thread->p_lock = &(__pthread_handles[sseg].h_lock);
394 new_thread->p_cancelstate = PTHREAD_CANCEL_ENABLE;
395 new_thread->p_canceltype = PTHREAD_CANCEL_DEFERRED;
396 new_thread->p_errnop = &new_thread->p_errno;
397 new_thread->p_h_errnop = &new_thread->p_h_errno;
398 new_thread->p_res._sock = -1;
399 new_thread->p_resp = &new_thread->p_res;
400 new_thread->p_guardaddr = guardaddr;
401 new_thread->p_guardsize = guardsize;
402 new_thread->p_header.data.self = new_thread;
403 new_thread->p_nr = sseg;
404 /* Initialize the thread handle */
405 __pthread_init_lock(&__pthread_handles[sseg].h_lock);
406 __pthread_handles[sseg].h_descr = new_thread;
407 __pthread_handles[sseg].h_bottom = new_thread_bottom;
408 /* Determine scheduling parameters for the thread */
409 new_thread->p_start_args.schedpolicy = -1;
410 if (attr != NULL) {
411 new_thread->p_detached = attr->__detachstate;
412 new_thread->p_userstack = attr->__stackaddr_set;
414 switch(attr->__inheritsched) {
415 case PTHREAD_EXPLICIT_SCHED:
416 new_thread->p_start_args.schedpolicy = attr->__schedpolicy;
417 memcpy (&new_thread->p_start_args.schedparam, &attr->__schedparam,
418 sizeof (struct sched_param));
419 break;
420 case PTHREAD_INHERIT_SCHED:
421 new_thread->p_start_args.schedpolicy = __sched_getscheduler(father_pid);
422 __sched_getparam(father_pid, &new_thread->p_start_args.schedparam);
423 break;
425 new_thread->p_priority =
426 new_thread->p_start_args.schedparam.sched_priority;
428 /* Finish setting up arguments to pthread_start_thread */
429 new_thread->p_start_args.start_routine = start_routine;
430 new_thread->p_start_args.arg = arg;
431 new_thread->p_start_args.mask = *mask;
432 /* Make the new thread ID available already now. If any of the later
433 functions fail we return an error value and the caller must not use
434 the stored thread ID. */
435 *thread = new_thread_id;
436 /* Raise priority of thread manager if needed */
437 __pthread_manager_adjust_prio(new_thread->p_priority);
438 /* Do the cloning. We have to use two different functions depending
439 on whether we are debugging or not. */
440 pid = 0; /* Note that the thread never can have PID zero. */
441 if (report_events)
443 /* See whether the TD_CREATE event bit is set in any of the
444 masks. */
445 int idx = __td_eventword (TD_CREATE);
446 uint32_t mask = __td_eventmask (TD_CREATE);
448 if ((mask & (__pthread_threads_events.event_bits[idx]
449 | event_maskp->event_bits[idx])) != 0)
451 /* Lock the mutex the child will use now so that it will stop. */
452 __pthread_lock(new_thread->p_lock, NULL);
454 /* We have to report this event. */
455 pid = __clone(pthread_start_thread_event, (void **) new_thread,
456 CLONE_VM | CLONE_FS | CLONE_FILES | CLONE_SIGHAND |
457 __pthread_sig_cancel, new_thread);
458 if (pid != -1)
460 /* Now fill in the information about the new thread in
461 the newly created thread's data structure. We cannot let
462 the new thread do this since we don't know whether it was
463 already scheduled when we send the event. */
464 new_thread->p_eventbuf.eventdata = new_thread;
465 new_thread->p_eventbuf.eventnum = TD_CREATE;
466 __pthread_last_event = new_thread;
468 /* We have to set the PID here since the callback function
469 in the debug library will need it and we cannot guarantee
470 the child got scheduled before the debugger. */
471 new_thread->p_pid = pid;
473 /* Now call the function which signals the event. */
474 __linuxthreads_create_event ();
476 /* Now restart the thread. */
477 __pthread_unlock(new_thread->p_lock);
481 if (pid == 0)
482 pid = __clone(pthread_start_thread, (void **) new_thread,
483 CLONE_VM | CLONE_FS | CLONE_FILES | CLONE_SIGHAND |
484 __pthread_sig_cancel, new_thread);
485 /* Check if cloning succeeded */
486 if (pid == -1) {
487 /* Free the stack if we allocated it */
488 if (attr == NULL || !attr->__stackaddr_set)
490 if (new_thread->p_guardsize != 0)
491 munmap(new_thread->p_guardaddr, new_thread->p_guardsize);
492 munmap((caddr_t)((char *)(new_thread+1) - INITIAL_STACK_SIZE),
493 INITIAL_STACK_SIZE);
495 __pthread_handles[sseg].h_descr = NULL;
496 __pthread_handles[sseg].h_bottom = NULL;
497 __pthread_handles_num--;
498 return errno;
500 /* Insert new thread in doubly linked list of active threads */
501 new_thread->p_prevlive = __pthread_main_thread;
502 new_thread->p_nextlive = __pthread_main_thread->p_nextlive;
503 __pthread_main_thread->p_nextlive->p_prevlive = new_thread;
504 __pthread_main_thread->p_nextlive = new_thread;
505 /* Set pid field of the new thread, in case we get there before the
506 child starts. */
507 new_thread->p_pid = pid;
508 return 0;
512 /* Try to free the resources of a thread when requested by pthread_join
513 or pthread_detach on a terminated thread. */
515 static void pthread_free(pthread_descr th)
517 pthread_handle handle;
518 pthread_readlock_info *iter, *next;
520 ASSERT(th->p_exited);
521 /* Make the handle invalid */
522 handle = thread_handle(th->p_tid);
523 __pthread_lock(&handle->h_lock, NULL);
524 handle->h_descr = NULL;
525 handle->h_bottom = (char *)(-1L);
526 __pthread_unlock(&handle->h_lock);
527 #ifdef FREE_THREAD
528 FREE_THREAD(th, th->p_nr);
529 #endif
530 /* One fewer threads in __pthread_handles */
531 __pthread_handles_num--;
533 /* Destroy read lock list, and list of free read lock structures.
534 If the former is not empty, it means the thread exited while
535 holding read locks! */
537 for (iter = th->p_readlock_list; iter != NULL; iter = next)
539 next = iter->pr_next;
540 free(iter);
543 for (iter = th->p_readlock_free; iter != NULL; iter = next)
545 next = iter->pr_next;
546 free(iter);
549 /* If initial thread, nothing to free */
550 if (th == &__pthread_initial_thread) return;
551 if (!th->p_userstack)
553 /* Free the stack and thread descriptor area */
554 if (th->p_guardsize != 0)
555 munmap(th->p_guardaddr, th->p_guardsize);
556 munmap((caddr_t) ((char *)(th+1) - STACK_SIZE), STACK_SIZE);
560 /* Handle threads that have exited */
562 static void pthread_exited(pid_t pid)
564 pthread_descr th;
565 int detached;
566 /* Find thread with that pid */
567 for (th = __pthread_main_thread->p_nextlive;
568 th != __pthread_main_thread;
569 th = th->p_nextlive) {
570 if (th->p_pid == pid) {
571 /* Remove thread from list of active threads */
572 th->p_nextlive->p_prevlive = th->p_prevlive;
573 th->p_prevlive->p_nextlive = th->p_nextlive;
574 /* Mark thread as exited, and if detached, free its resources */
575 __pthread_lock(th->p_lock, NULL);
576 th->p_exited = 1;
577 /* If we have to signal this event do it now. */
578 if (th->p_report_events)
580 /* See whether TD_DEATH is in any of the mask. */
581 int idx = __td_eventword (TD_REAP);
582 uint32_t mask = __td_eventmask (TD_REAP);
584 if ((mask & (__pthread_threads_events.event_bits[idx]
585 | th->p_eventbuf.eventmask.event_bits[idx])) != 0)
587 /* Yep, we have to signal the death. */
588 th->p_eventbuf.eventnum = TD_DEATH;
589 th->p_eventbuf.eventdata = th;
590 __pthread_last_event = th;
592 /* Now call the function to signal the event. */
593 __linuxthreads_reap_event();
596 detached = th->p_detached;
597 __pthread_unlock(th->p_lock);
598 if (detached)
599 pthread_free(th);
600 break;
603 /* If all threads have exited and the main thread is pending on a
604 pthread_exit, wake up the main thread and terminate ourselves. */
605 if (main_thread_exiting &&
606 __pthread_main_thread->p_nextlive == __pthread_main_thread) {
607 restart(__pthread_main_thread);
608 /* Same logic as REQ_MAIN_THREAD_EXIT. */
612 static void pthread_reap_children(void)
614 pid_t pid;
615 int status;
617 while ((pid = __libc_waitpid(-1, &status, WNOHANG | __WCLONE)) > 0) {
618 pthread_exited(pid);
619 if (WIFSIGNALED(status)) {
620 /* If a thread died due to a signal, send the same signal to
621 all other threads, including the main thread. */
622 pthread_kill_all_threads(WTERMSIG(status), 1);
623 _exit(0);
628 /* Try to free the resources of a thread when requested by pthread_join
629 or pthread_detach on a terminated thread. */
631 static void pthread_handle_free(pthread_t th_id)
633 pthread_handle handle = thread_handle(th_id);
634 pthread_descr th;
636 __pthread_lock(&handle->h_lock, NULL);
637 if (invalid_handle(handle, th_id)) {
638 /* pthread_reap_children has deallocated the thread already,
639 nothing needs to be done */
640 __pthread_unlock(&handle->h_lock);
641 return;
643 th = handle->h_descr;
644 if (th->p_exited) {
645 __pthread_unlock(&handle->h_lock);
646 pthread_free(th);
647 } else {
648 /* The Unix process of the thread is still running.
649 Mark the thread as detached so that the thread manager will
650 deallocate its resources when the Unix process exits. */
651 th->p_detached = 1;
652 __pthread_unlock(&handle->h_lock);
656 /* Send a signal to all running threads */
658 static void pthread_kill_all_threads(int sig, int main_thread_also)
660 pthread_descr th;
661 for (th = __pthread_main_thread->p_nextlive;
662 th != __pthread_main_thread;
663 th = th->p_nextlive) {
664 kill(th->p_pid, sig);
666 if (main_thread_also) {
667 kill(__pthread_main_thread->p_pid, sig);
671 /* Process-wide exit() */
673 static void pthread_handle_exit(pthread_descr issuing_thread, int exitcode)
675 pthread_descr th;
676 __pthread_exit_requested = 1;
677 __pthread_exit_code = exitcode;
678 /* Send the CANCEL signal to all running threads, including the main
679 thread, but excluding the thread from which the exit request originated
680 (that thread must complete the exit, e.g. calling atexit functions
681 and flushing stdio buffers). */
682 for (th = issuing_thread->p_nextlive;
683 th != issuing_thread;
684 th = th->p_nextlive) {
685 kill(th->p_pid, __pthread_sig_cancel);
687 /* Now, wait for all these threads, so that they don't become zombies
688 and their times are properly added to the thread manager's times. */
689 for (th = issuing_thread->p_nextlive;
690 th != issuing_thread;
691 th = th->p_nextlive) {
692 waitpid(th->p_pid, NULL, __WCLONE);
694 restart(issuing_thread);
695 _exit(0);
698 /* Handler for __pthread_sig_cancel in thread manager thread */
700 void __pthread_manager_sighandler(int sig)
702 int kick_manager = terminated_children == 0 && main_thread_exiting;
703 terminated_children = 1;
705 /* If the main thread is terminating, kick the thread manager loop
706 each time some threads terminate. This eliminates a two second
707 shutdown delay caused by the thread manager sleeping in the
708 call to __poll(). Instead, the thread manager is kicked into
709 action, reaps the outstanding threads and resumes the main thread
710 so that it can complete the shutdown. */
712 if (kick_manager) {
713 struct pthread_request request;
714 request.req_thread = 0;
715 request.req_kind = REQ_KICK;
716 __libc_write(__pthread_manager_request, (char *) &request, sizeof(request));
720 /* Adjust priority of thread manager so that it always run at a priority
721 higher than all threads */
723 void __pthread_manager_adjust_prio(int thread_prio)
725 struct sched_param param;
727 if (thread_prio <= __pthread_manager_thread.p_priority) return;
728 param.sched_priority =
729 thread_prio < __sched_get_priority_max(SCHED_FIFO)
730 ? thread_prio + 1 : thread_prio;
731 __sched_setscheduler(__pthread_manager_thread.p_pid, SCHED_FIFO, &param);
732 __pthread_manager_thread.p_priority = thread_prio;