1 /* Linuxthreads - a simple clone()-based implementation of Posix */
2 /* threads for Linux. */
3 /* Copyright (C) 1996 Xavier Leroy (Xavier.Leroy@inria.fr) */
5 /* This program is free software; you can redistribute it and/or */
6 /* modify it under the terms of the GNU Library General Public License */
7 /* as published by the Free Software Foundation; either version 2 */
8 /* of the License, or (at your option) any later version. */
10 /* This program is distributed in the hope that it will be useful, */
11 /* but WITHOUT ANY WARRANTY; without even the implied warranty of */
12 /* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the */
13 /* GNU Library General Public License for more details. */
15 /* The "thread manager" thread: manages creation and termination of threads */
25 #include <sys/poll.h> /* for poll */
26 #include <sys/mman.h> /* for mmap */
27 #include <sys/param.h>
29 #include <sys/wait.h> /* for waitpid macros */
30 #include <locale.h> /* for __uselocale */
34 #include "internals.h"
37 #include "semaphore.h"
39 /* For debugging purposes put the maximum number of threads in a variable. */
40 const int __linuxthreads_pthread_threads_max
= PTHREAD_THREADS_MAX
;
43 /* Indicate whether at least one thread has a user-defined stack (if 1),
44 or if all threads have stacks supplied by LinuxThreads (if 0). */
45 int __pthread_nonstandard_stacks
;
48 /* Number of active entries in __pthread_handles (used by gdb) */
49 volatile int __pthread_handles_num
= 2;
51 /* Whether to use debugger additional actions for thread creation
53 volatile int __pthread_threads_debug
;
55 /* Globally enabled events. */
56 volatile td_thr_events_t __pthread_threads_events
;
58 /* Pointer to thread descriptor with last event. */
59 volatile pthread_descr __pthread_last_event
;
61 static pthread_descr manager_thread
;
63 /* Mapping from stack segment to thread descriptor. */
64 /* Stack segment numbers are also indices into the __pthread_handles array. */
65 /* Stack segment number 0 is reserved for the initial thread. */
68 # define thread_segment(seq) NULL
70 static inline pthread_descr
thread_segment(int seg
)
72 return (pthread_descr
)(THREAD_STACK_START_ADDRESS
- (seg
- 1) * STACK_SIZE
)
77 /* Flag set in signal handler to record child termination */
79 static volatile int terminated_children
;
81 /* Flag set when the initial thread is blocked on pthread_exit waiting
82 for all other threads to terminate */
84 static int main_thread_exiting
;
86 /* Counter used to generate unique thread identifier.
87 Thread identifier is pthread_threads_counter + segment. */
89 static pthread_t pthread_threads_counter
;
91 /* Forward declarations */
93 static int pthread_handle_create(pthread_t
*thread
, const pthread_attr_t
*attr
,
94 void * (*start_routine
)(void *), void *arg
,
95 sigset_t
*mask
, int father_pid
,
97 td_thr_events_t
*event_maskp
);
98 static void pthread_handle_free(pthread_t th_id
);
99 static void pthread_handle_exit(pthread_descr issuing_thread
, int exitcode
)
100 __attribute__ ((noreturn
));
101 static void pthread_reap_children(void);
102 static void pthread_kill_all_threads(int sig
, int main_thread_also
);
103 static void pthread_for_each_thread(void *arg
,
104 void (*fn
)(void *, pthread_descr
));
106 /* The server thread managing requests for thread creation and termination */
109 __attribute__ ((noreturn
))
110 __pthread_manager(void *arg
)
112 pthread_descr self
= manager_thread
= arg
;
113 int reqfd
= __pthread_manager_reader
;
115 sigset_t manager_mask
;
117 struct pthread_request request
;
119 /* If we have special thread_self processing, initialize it. */
120 #ifdef INIT_THREAD_SELF
121 INIT_THREAD_SELF(self
, 1);
123 #if !(USE_TLS && HAVE___THREAD)
124 /* Set the error variable. */
125 self
->p_errnop
= &self
->p_errno
;
126 self
->p_h_errnop
= &self
->p_h_errno
;
128 /* Block all signals except __pthread_sig_cancel and SIGTRAP */
129 sigfillset(&manager_mask
);
130 sigdelset(&manager_mask
, __pthread_sig_cancel
); /* for thread termination */
131 sigdelset(&manager_mask
, SIGTRAP
); /* for debugging purposes */
132 if (__pthread_threads_debug
&& __pthread_sig_debug
> 0)
133 sigdelset(&manager_mask
, __pthread_sig_debug
);
134 sigprocmask(SIG_SETMASK
, &manager_mask
, NULL
);
135 /* Raise our priority to match that of main thread */
136 __pthread_manager_adjust_prio(__pthread_main_thread
->p_priority
);
137 /* Synchronize debugging of the thread manager */
138 n
= TEMP_FAILURE_RETRY(__libc_read(reqfd
, (char *)&request
,
140 ASSERT(n
== sizeof(request
) && request
.req_kind
== REQ_DEBUG
);
143 /* Enter server loop */
145 n
= __poll(&ufd
, 1, 2000);
147 /* Check for termination of the main thread */
148 if (getppid() == 1) {
149 pthread_kill_all_threads(SIGKILL
, 0);
152 /* Check for dead children */
153 if (terminated_children
) {
154 terminated_children
= 0;
155 pthread_reap_children();
157 /* Read and execute request */
158 if (n
== 1 && (ufd
.revents
& POLLIN
)) {
159 n
= TEMP_FAILURE_RETRY(__libc_read(reqfd
, (char *)&request
,
164 write(STDERR_FILENO
, d
, snprintf(d
, sizeof(d
), "*** read err %m\n"));
165 } else if (n
!= sizeof(request
)) {
166 write(STDERR_FILENO
, "*** short read in manager\n", 26);
170 switch(request
.req_kind
) {
172 request
.req_thread
->p_retcode
=
173 pthread_handle_create((pthread_t
*) &request
.req_thread
->p_retval
,
174 request
.req_args
.create
.attr
,
175 request
.req_args
.create
.fn
,
176 request
.req_args
.create
.arg
,
177 &request
.req_args
.create
.mask
,
178 request
.req_thread
->p_pid
,
179 request
.req_thread
->p_report_events
,
180 &request
.req_thread
->p_eventbuf
.eventmask
);
181 restart(request
.req_thread
);
184 pthread_handle_free(request
.req_args
.free
.thread_id
);
186 case REQ_PROCESS_EXIT
:
187 pthread_handle_exit(request
.req_thread
,
188 request
.req_args
.exit
.code
);
191 case REQ_MAIN_THREAD_EXIT
:
192 main_thread_exiting
= 1;
193 /* Reap children in case all other threads died and the signal handler
194 went off before we set main_thread_exiting to 1, and therefore did
196 pthread_reap_children();
198 if (__pthread_main_thread
->p_nextlive
== __pthread_main_thread
) {
199 restart(__pthread_main_thread
);
200 /* The main thread will now call exit() which will trigger an
201 __on_exit handler, which in turn will send REQ_PROCESS_EXIT
202 to the thread manager. In case you are wondering how the
203 manager terminates from its loop here. */
207 __new_sem_post(request
.req_args
.post
);
210 /* Make gdb aware of new thread and gdb will restart the
211 new thread when it is ready to handle the new thread. */
212 if (__pthread_threads_debug
&& __pthread_sig_debug
> 0)
213 raise(__pthread_sig_debug
);
216 /* This is just a prod to get the manager to reap some
217 threads right away, avoiding a potential delay at shutdown. */
219 case REQ_FOR_EACH_THREAD
:
220 pthread_for_each_thread(request
.req_args
.for_each
.arg
,
221 request
.req_args
.for_each
.fn
);
222 restart(request
.req_thread
);
229 int __pthread_manager_event(void *arg
)
231 pthread_descr self
= arg
;
232 /* If we have special thread_self processing, initialize it. */
233 #ifdef INIT_THREAD_SELF
234 INIT_THREAD_SELF(self
, 1);
237 /* Get the lock the manager will free once all is correctly set up. */
238 __pthread_lock (THREAD_GETMEM(self
, p_lock
), NULL
);
239 /* Free it immediately. */
240 __pthread_unlock (THREAD_GETMEM(self
, p_lock
));
242 return __pthread_manager(arg
);
245 /* Process creation */
248 __attribute__ ((noreturn
))
249 pthread_start_thread(void *arg
)
251 pthread_descr self
= (pthread_descr
) arg
;
252 struct pthread_request request
;
255 hp_timing_t tmpclock
;
257 /* Initialize special thread_self processing, if any. */
258 #ifdef INIT_THREAD_SELF
259 INIT_THREAD_SELF(self
, self
->p_nr
);
262 HP_TIMING_NOW (tmpclock
);
263 THREAD_SETMEM (self
, p_cpuclock_offset
, tmpclock
);
265 /* Make sure our pid field is initialized, just in case we get there
266 before our father has initialized it. */
267 THREAD_SETMEM(self
, p_pid
, __getpid());
268 /* Initial signal mask is that of the creating thread. (Otherwise,
269 we'd just inherit the mask of the thread manager.) */
270 sigprocmask(SIG_SETMASK
, &self
->p_start_args
.mask
, NULL
);
271 /* Set the scheduling policy and priority for the new thread, if needed */
272 if (THREAD_GETMEM(self
, p_start_args
.schedpolicy
) >= 0)
273 /* Explicit scheduling attributes were provided: apply them */
274 __sched_setscheduler(THREAD_GETMEM(self
, p_pid
),
275 THREAD_GETMEM(self
, p_start_args
.schedpolicy
),
276 &self
->p_start_args
.schedparam
);
277 else if (manager_thread
->p_priority
> 0)
278 /* Default scheduling required, but thread manager runs in realtime
279 scheduling: switch new thread to SCHED_OTHER policy */
281 struct sched_param default_params
;
282 default_params
.sched_priority
= 0;
283 __sched_setscheduler(THREAD_GETMEM(self
, p_pid
),
284 SCHED_OTHER
, &default_params
);
286 #if !(USE_TLS && HAVE___THREAD) && defined SHARED
287 /* Initialize thread-locale current locale to point to the global one.
288 With __thread support, the variable's initializer takes care of this. */
289 __uselocale (LC_GLOBAL_LOCALE
);
291 /* Make gdb aware of new thread */
292 if (__pthread_threads_debug
&& __pthread_sig_debug
> 0) {
293 request
.req_thread
= self
;
294 request
.req_kind
= REQ_DEBUG
;
295 TEMP_FAILURE_RETRY(__libc_write(__pthread_manager_request
,
296 (char *) &request
, sizeof(request
)));
299 /* Run the thread code */
300 outcome
= self
->p_start_args
.start_routine(THREAD_GETMEM(self
,
302 /* Exit with the given return value */
303 __pthread_do_exit(outcome
, CURRENT_STACK_FRAME
);
307 __attribute__ ((noreturn
))
308 pthread_start_thread_event(void *arg
)
310 pthread_descr self
= (pthread_descr
) arg
;
312 #ifdef INIT_THREAD_SELF
313 INIT_THREAD_SELF(self
, self
->p_nr
);
315 /* Make sure our pid field is initialized, just in case we get there
316 before our father has initialized it. */
317 THREAD_SETMEM(self
, p_pid
, __getpid());
318 /* Get the lock the manager will free once all is correctly set up. */
319 __pthread_lock (THREAD_GETMEM(self
, p_lock
), NULL
);
320 /* Free it immediately. */
321 __pthread_unlock (THREAD_GETMEM(self
, p_lock
));
323 /* Continue with the real function. */
324 pthread_start_thread (arg
);
327 #if defined USE_TLS && !FLOATING_STACKS
328 # error "TLS can only work with floating stacks"
331 static int pthread_allocate_stack(const pthread_attr_t
*attr
,
332 pthread_descr default_new_thread
,
334 char ** out_new_thread
,
335 char ** out_new_thread_bottom
,
336 char ** out_guardaddr
,
337 size_t * out_guardsize
,
338 size_t * out_stacksize
)
340 pthread_descr new_thread
;
341 char * new_thread_bottom
;
343 size_t stacksize
, guardsize
;
346 /* TLS cannot work with fixed thread descriptor addresses. */
347 assert (default_new_thread
== NULL
);
350 if (attr
!= NULL
&& attr
->__stackaddr_set
)
352 #ifdef _STACK_GROWS_UP
353 /* The user provided a stack. */
355 /* This value is not needed. */
356 new_thread
= (pthread_descr
) attr
->__stackaddr
;
357 new_thread_bottom
= (char *) new_thread
;
359 new_thread
= (pthread_descr
) attr
->__stackaddr
;
360 new_thread_bottom
= (char *) (new_thread
+ 1);
362 guardaddr
= attr
->__stackaddr
+ attr
->__stacksize
;
365 /* The user provided a stack. For now we interpret the supplied
366 address as 1 + the highest addr. in the stack segment. If a
367 separate register stack is needed, we place it at the low end
368 of the segment, relying on the associated stacksize to
369 determine the low end of the segment. This differs from many
370 (but not all) other pthreads implementations. The intent is
371 that on machines with a single stack growing toward higher
372 addresses, stackaddr would be the lowest address in the stack
373 segment, so that it is consistently close to the initial sp
376 new_thread
= (pthread_descr
) attr
->__stackaddr
;
379 (pthread_descr
) ((long)(attr
->__stackaddr
) & -sizeof(void *)) - 1;
381 new_thread_bottom
= (char *) attr
->__stackaddr
- attr
->__stacksize
;
382 guardaddr
= new_thread_bottom
;
386 __pthread_nonstandard_stacks
= 1;
389 /* Clear the thread data structure. */
390 memset (new_thread
, '\0', sizeof (*new_thread
));
392 stacksize
= attr
->__stacksize
;
396 #ifdef NEED_SEPARATE_REGISTER_STACK
397 const size_t granularity
= 2 * pagesize
;
398 /* Try to make stacksize/2 a multiple of pagesize */
400 const size_t granularity
= pagesize
;
404 /* Allocate space for stack and thread descriptor at default address */
408 guardsize
= page_roundup (attr
->__guardsize
, granularity
);
409 stacksize
= __pthread_max_stacksize
- guardsize
;
410 stacksize
= MIN (stacksize
,
411 page_roundup (attr
->__stacksize
, granularity
));
415 guardsize
= granularity
;
416 stacksize
= __pthread_max_stacksize
- guardsize
;
419 map_addr
= mmap(NULL
, stacksize
+ guardsize
,
420 PROT_READ
| PROT_WRITE
| PROT_EXEC
,
421 MAP_PRIVATE
| MAP_ANONYMOUS
, -1, 0);
422 if (map_addr
== MAP_FAILED
)
423 /* No more memory available. */
426 # ifdef NEED_SEPARATE_REGISTER_STACK
427 guardaddr
= map_addr
+ stacksize
/ 2;
429 mprotect (guardaddr
, guardsize
, PROT_NONE
);
431 new_thread_bottom
= (char *) map_addr
;
433 new_thread
= ((pthread_descr
) (new_thread_bottom
+ stacksize
436 new_thread
= ((pthread_descr
) (new_thread_bottom
+ stacksize
439 # elif _STACK_GROWS_DOWN
440 guardaddr
= map_addr
;
442 mprotect (guardaddr
, guardsize
, PROT_NONE
);
444 new_thread_bottom
= (char *) map_addr
+ guardsize
;
446 new_thread
= ((pthread_descr
) (new_thread_bottom
+ stacksize
));
448 new_thread
= ((pthread_descr
) (new_thread_bottom
+ stacksize
)) - 1;
450 # elif _STACK_GROWS_UP
451 guardaddr
= map_addr
+ stacksize
;
453 mprotect (guardaddr
, guardsize
, PROT_NONE
);
455 new_thread
= (pthread_descr
) map_addr
;
457 new_thread_bottom
= (char *) new_thread
;
459 new_thread_bottom
= (char *) (new_thread
+ 1);
462 # error You must define a stack direction
463 # endif /* Stack direction */
464 #else /* !FLOATING_STACKS */
469 guardsize
= page_roundup (attr
->__guardsize
, granularity
);
470 stacksize
= STACK_SIZE
- guardsize
;
471 stacksize
= MIN (stacksize
,
472 page_roundup (attr
->__stacksize
, granularity
));
476 guardsize
= granularity
;
477 stacksize
= STACK_SIZE
- granularity
;
480 # ifdef NEED_SEPARATE_REGISTER_STACK
481 new_thread
= default_new_thread
;
482 new_thread_bottom
= (char *) (new_thread
+ 1) - stacksize
- guardsize
;
483 /* Includes guard area, unlike the normal case. Use the bottom
484 end of the segment as backing store for the register stack.
485 Needed on IA64. In this case, we also map the entire stack at
486 once. According to David Mosberger, that's cheaper. It also
487 avoids the risk of intermittent failures due to other mappings
488 in the same region. The cost is that we might be able to map
489 slightly fewer stacks. */
491 /* First the main stack: */
492 map_addr
= (caddr_t
)((char *)(new_thread
+ 1) - stacksize
/ 2);
493 res_addr
= mmap(map_addr
, stacksize
/ 2,
494 PROT_READ
| PROT_WRITE
| PROT_EXEC
,
495 MAP_PRIVATE
| MAP_ANONYMOUS
, -1, 0);
496 if (res_addr
!= map_addr
)
498 /* Bad luck, this segment is already mapped. */
499 if (res_addr
!= MAP_FAILED
)
500 munmap(res_addr
, stacksize
/ 2);
503 /* Then the register stack: */
504 map_addr
= (caddr_t
)new_thread_bottom
;
505 res_addr
= mmap(map_addr
, stacksize
/2,
506 PROT_READ
| PROT_WRITE
| PROT_EXEC
,
507 MAP_PRIVATE
| MAP_ANONYMOUS
, -1, 0);
508 if (res_addr
!= map_addr
)
510 if (res_addr
!= MAP_FAILED
)
511 munmap(res_addr
, stacksize
/ 2);
512 munmap((caddr_t
)((char *)(new_thread
+ 1) - stacksize
/2),
517 guardaddr
= new_thread_bottom
+ stacksize
/2;
518 /* We leave the guard area in the middle unmapped. */
519 # else /* !NEED_SEPARATE_REGISTER_STACK */
520 # ifdef _STACK_GROWS_DOWN
521 new_thread
= default_new_thread
;
522 new_thread_bottom
= (char *) (new_thread
+ 1) - stacksize
;
523 map_addr
= new_thread_bottom
- guardsize
;
524 res_addr
= mmap(map_addr
, stacksize
+ guardsize
,
525 PROT_READ
| PROT_WRITE
| PROT_EXEC
,
526 MAP_PRIVATE
| MAP_ANONYMOUS
, -1, 0);
527 if (res_addr
!= map_addr
)
529 /* Bad luck, this segment is already mapped. */
530 if (res_addr
!= MAP_FAILED
)
531 munmap (res_addr
, stacksize
+ guardsize
);
535 /* We manage to get a stack. Protect the guard area pages if
537 guardaddr
= map_addr
;
539 mprotect (guardaddr
, guardsize
, PROT_NONE
);
541 /* The thread description goes at the bottom of this area, and
542 * the stack starts directly above it.
544 new_thread
= (pthread_descr
)((unsigned long)default_new_thread
&~ (STACK_SIZE
- 1));
545 map_addr
= mmap(new_thread
, stacksize
+ guardsize
,
546 PROT_READ
| PROT_WRITE
| PROT_EXEC
,
547 MAP_FIXED
| MAP_PRIVATE
| MAP_ANONYMOUS
, -1, 0);
548 if (map_addr
== MAP_FAILED
)
551 new_thread_bottom
= map_addr
+ sizeof(*new_thread
);
552 guardaddr
= map_addr
+ stacksize
;
554 mprotect (guardaddr
, guardsize
, PROT_NONE
);
556 # endif /* stack direction */
557 # endif /* !NEED_SEPARATE_REGISTER_STACK */
558 #endif /* !FLOATING_STACKS */
560 *out_new_thread
= (char *) new_thread
;
561 *out_new_thread_bottom
= new_thread_bottom
;
562 *out_guardaddr
= guardaddr
;
563 *out_guardsize
= guardsize
;
564 #ifdef NEED_SEPARATE_REGISTER_STACK
565 *out_stacksize
= stacksize
/ 2;
567 *out_stacksize
= stacksize
;
572 static int pthread_handle_create(pthread_t
*thread
, const pthread_attr_t
*attr
,
573 void * (*start_routine
)(void *), void *arg
,
574 sigset_t
* mask
, int father_pid
,
576 td_thr_events_t
*event_maskp
)
580 pthread_descr new_thread
;
582 char * new_thread_bottom
;
583 pthread_t new_thread_id
;
584 char *guardaddr
= NULL
;
585 size_t guardsize
= 0, stksize
= 0;
586 int pagesize
= __getpagesize();
590 new_thread
= _dl_allocate_tls (NULL
);
591 if (new_thread
== NULL
)
594 /* Prevent warnings. */
598 /* First check whether we have to change the policy and if yes, whether
599 we can do this. Normally this should be done by examining the
600 return value of the __sched_setscheduler call in pthread_start_thread
601 but this is hard to implement. FIXME */
602 if (attr
!= NULL
&& attr
->__schedpolicy
!= SCHED_OTHER
&& geteuid () != 0)
604 /* Find a free segment for the thread, and allocate a stack if needed */
605 for (sseg
= 2; ; sseg
++)
607 if (sseg
>= PTHREAD_THREADS_MAX
)
610 _dl_deallocate_tls (new_thread
, true);
614 if (__pthread_handles
[sseg
].h_descr
!= NULL
)
616 if (pthread_allocate_stack(attr
, thread_segment(sseg
),
617 pagesize
, &stack_addr
, &new_thread_bottom
,
618 &guardaddr
, &guardsize
, &stksize
) == 0)
621 new_thread
->p_stackaddr
= stack_addr
;
623 new_thread
= (pthread_descr
) stack_addr
;
628 __pthread_handles_num
++;
629 /* Allocate new thread identifier */
630 pthread_threads_counter
+= PTHREAD_THREADS_MAX
;
631 new_thread_id
= sseg
+ pthread_threads_counter
;
632 /* Initialize the thread descriptor. Elements which have to be
633 initialized to zero already have this value. */
634 new_thread
->p_header
.data
.tcb
= new_thread
;
635 new_thread
->p_header
.data
.self
= new_thread
;
636 new_thread
->p_tid
= new_thread_id
;
637 new_thread
->p_lock
= &(__pthread_handles
[sseg
].h_lock
);
638 new_thread
->p_cancelstate
= PTHREAD_CANCEL_ENABLE
;
639 new_thread
->p_canceltype
= PTHREAD_CANCEL_DEFERRED
;
640 #if !(USE_TLS && HAVE___THREAD)
641 new_thread
->p_errnop
= &new_thread
->p_errno
;
642 new_thread
->p_h_errnop
= &new_thread
->p_h_errno
;
643 new_thread
->p_resp
= &new_thread
->p_res
;
645 new_thread
->p_guardaddr
= guardaddr
;
646 new_thread
->p_guardsize
= guardsize
;
647 new_thread
->p_nr
= sseg
;
648 new_thread
->p_inheritsched
= attr
? attr
->__inheritsched
: 0;
649 new_thread
->p_alloca_cutoff
= stksize
/ 4 > __MAX_ALLOCA_CUTOFF
650 ? __MAX_ALLOCA_CUTOFF
: stksize
/ 4;
651 /* Initialize the thread handle */
652 __pthread_init_lock(&__pthread_handles
[sseg
].h_lock
);
653 __pthread_handles
[sseg
].h_descr
= new_thread
;
654 __pthread_handles
[sseg
].h_bottom
= new_thread_bottom
;
655 /* Determine scheduling parameters for the thread */
656 new_thread
->p_start_args
.schedpolicy
= -1;
658 new_thread
->p_detached
= attr
->__detachstate
;
659 new_thread
->p_userstack
= attr
->__stackaddr_set
;
661 switch(attr
->__inheritsched
) {
662 case PTHREAD_EXPLICIT_SCHED
:
663 new_thread
->p_start_args
.schedpolicy
= attr
->__schedpolicy
;
664 memcpy (&new_thread
->p_start_args
.schedparam
, &attr
->__schedparam
,
665 sizeof (struct sched_param
));
667 case PTHREAD_INHERIT_SCHED
:
668 new_thread
->p_start_args
.schedpolicy
= __sched_getscheduler(father_pid
);
669 __sched_getparam(father_pid
, &new_thread
->p_start_args
.schedparam
);
672 new_thread
->p_priority
=
673 new_thread
->p_start_args
.schedparam
.sched_priority
;
675 /* Finish setting up arguments to pthread_start_thread */
676 new_thread
->p_start_args
.start_routine
= start_routine
;
677 new_thread
->p_start_args
.arg
= arg
;
678 new_thread
->p_start_args
.mask
= *mask
;
679 /* Make the new thread ID available already now. If any of the later
680 functions fail we return an error value and the caller must not use
681 the stored thread ID. */
682 *thread
= new_thread_id
;
683 /* Raise priority of thread manager if needed */
684 __pthread_manager_adjust_prio(new_thread
->p_priority
);
685 /* Do the cloning. We have to use two different functions depending
686 on whether we are debugging or not. */
687 pid
= 0; /* Note that the thread never can have PID zero. */
690 /* See whether the TD_CREATE event bit is set in any of the
692 int idx
= __td_eventword (TD_CREATE
);
693 uint32_t mask
= __td_eventmask (TD_CREATE
);
695 if ((mask
& (__pthread_threads_events
.event_bits
[idx
]
696 | event_maskp
->event_bits
[idx
])) != 0)
698 /* Lock the mutex the child will use now so that it will stop. */
699 __pthread_lock(new_thread
->p_lock
, NULL
);
701 /* We have to report this event. */
702 #ifdef NEED_SEPARATE_REGISTER_STACK
703 /* Perhaps this version should be used on all platforms. But
704 this requires that __clone2 be uniformly supported
707 And there is some argument for changing the __clone2
708 interface to pass sp and bsp instead, making it more IA64
709 specific, but allowing stacks to grow outward from each
710 other, to get less paging and fewer mmaps. */
711 pid
= __clone2(pthread_start_thread_event
,
712 (void **)new_thread_bottom
,
713 (char *)new_thread
- new_thread_bottom
,
714 CLONE_VM
| CLONE_FS
| CLONE_FILES
| CLONE_SIGHAND
|
715 __pthread_sig_cancel
, new_thread
);
716 #elif _STACK_GROWS_UP
717 pid
= __clone(pthread_start_thread_event
, (void **) new_thread_bottom
,
718 CLONE_VM
| CLONE_FS
| CLONE_FILES
| CLONE_SIGHAND
|
719 __pthread_sig_cancel
, new_thread
);
721 pid
= __clone(pthread_start_thread_event
, (void **) new_thread
,
722 CLONE_VM
| CLONE_FS
| CLONE_FILES
| CLONE_SIGHAND
|
723 __pthread_sig_cancel
, new_thread
);
728 /* Now fill in the information about the new thread in
729 the newly created thread's data structure. We cannot let
730 the new thread do this since we don't know whether it was
731 already scheduled when we send the event. */
732 new_thread
->p_eventbuf
.eventdata
= new_thread
;
733 new_thread
->p_eventbuf
.eventnum
= TD_CREATE
;
734 __pthread_last_event
= new_thread
;
736 /* We have to set the PID here since the callback function
737 in the debug library will need it and we cannot guarantee
738 the child got scheduled before the debugger. */
739 new_thread
->p_pid
= pid
;
741 /* Now call the function which signals the event. */
742 __linuxthreads_create_event ();
744 /* Now restart the thread. */
745 __pthread_unlock(new_thread
->p_lock
);
751 #ifdef NEED_SEPARATE_REGISTER_STACK
752 pid
= __clone2(pthread_start_thread
,
753 (void **)new_thread_bottom
,
754 (char *)stack_addr
- new_thread_bottom
,
755 CLONE_VM
| CLONE_FS
| CLONE_FILES
| CLONE_SIGHAND
|
756 __pthread_sig_cancel
, new_thread
);
757 #elif _STACK_GROWS_UP
758 pid
= __clone(pthread_start_thread
, (void *) new_thread_bottom
,
759 CLONE_VM
| CLONE_FS
| CLONE_FILES
| CLONE_SIGHAND
|
760 __pthread_sig_cancel
, new_thread
);
762 pid
= __clone(pthread_start_thread
, stack_addr
,
763 CLONE_VM
| CLONE_FS
| CLONE_FILES
| CLONE_SIGHAND
|
764 __pthread_sig_cancel
, new_thread
);
765 #endif /* !NEED_SEPARATE_REGISTER_STACK */
768 /* Check if cloning succeeded */
770 /* Free the stack if we allocated it */
771 if (attr
== NULL
|| !attr
->__stackaddr_set
)
773 #ifdef NEED_SEPARATE_REGISTER_STACK
774 size_t stacksize
= ((char *)(new_thread
->p_guardaddr
)
775 - new_thread_bottom
);
776 munmap((caddr_t
)new_thread_bottom
,
777 2 * stacksize
+ new_thread
->p_guardsize
);
778 #elif _STACK_GROWS_UP
780 size_t stacksize
= guardaddr
- stack_addr
;
781 munmap(stack_addr
, stacksize
+ guardsize
);
783 size_t stacksize
= guardaddr
- (char *)new_thread
;
784 munmap(new_thread
, stacksize
+ guardsize
);
788 size_t stacksize
= stack_addr
- new_thread_bottom
;
790 size_t stacksize
= (char *)(new_thread
+1) - new_thread_bottom
;
792 munmap(new_thread_bottom
- guardsize
, guardsize
+ stacksize
);
796 _dl_deallocate_tls (new_thread
, true);
798 __pthread_handles
[sseg
].h_descr
= NULL
;
799 __pthread_handles
[sseg
].h_bottom
= NULL
;
800 __pthread_handles_num
--;
803 /* Insert new thread in doubly linked list of active threads */
804 new_thread
->p_prevlive
= __pthread_main_thread
;
805 new_thread
->p_nextlive
= __pthread_main_thread
->p_nextlive
;
806 __pthread_main_thread
->p_nextlive
->p_prevlive
= new_thread
;
807 __pthread_main_thread
->p_nextlive
= new_thread
;
808 /* Set pid field of the new thread, in case we get there before the
810 new_thread
->p_pid
= pid
;
815 /* Try to free the resources of a thread when requested by pthread_join
816 or pthread_detach on a terminated thread. */
818 static void pthread_free(pthread_descr th
)
820 pthread_handle handle
;
821 pthread_readlock_info
*iter
, *next
;
823 ASSERT(th
->p_exited
);
824 /* Make the handle invalid */
825 handle
= thread_handle(th
->p_tid
);
826 __pthread_lock(&handle
->h_lock
, NULL
);
827 handle
->h_descr
= NULL
;
828 handle
->h_bottom
= (char *)(-1L);
829 __pthread_unlock(&handle
->h_lock
);
831 FREE_THREAD(th
, th
->p_nr
);
833 /* One fewer threads in __pthread_handles */
834 __pthread_handles_num
--;
836 /* Destroy read lock list, and list of free read lock structures.
837 If the former is not empty, it means the thread exited while
838 holding read locks! */
840 for (iter
= th
->p_readlock_list
; iter
!= NULL
; iter
= next
)
842 next
= iter
->pr_next
;
846 for (iter
= th
->p_readlock_free
; iter
!= NULL
; iter
= next
)
848 next
= iter
->pr_next
;
852 /* If initial thread, nothing to free */
853 if (!th
->p_userstack
)
855 size_t guardsize
= th
->p_guardsize
;
856 /* Free the stack and thread descriptor area */
857 char *guardaddr
= th
->p_guardaddr
;
858 #ifdef _STACK_GROWS_UP
860 size_t stacksize
= guardaddr
- th
->p_stackaddr
;
862 size_t stacksize
= guardaddr
- (char *)th
;
864 guardaddr
= (char *)th
;
866 /* Guardaddr is always set, even if guardsize is 0. This allows
867 us to compute everything else. */
869 size_t stacksize
= th
->p_stackaddr
- guardaddr
- guardsize
;
871 size_t stacksize
= (char *)(th
+1) - guardaddr
- guardsize
;
873 # ifdef NEED_SEPARATE_REGISTER_STACK
874 /* Take account of the register stack, which is below guardaddr. */
875 guardaddr
-= stacksize
;
879 /* Unmap the stack. */
880 munmap(guardaddr
, stacksize
+ guardsize
);
883 _dl_deallocate_tls (th
, true);
888 /* Handle threads that have exited */
890 static void pthread_exited(pid_t pid
)
894 /* Find thread with that pid */
895 for (th
= __pthread_main_thread
->p_nextlive
;
896 th
!= __pthread_main_thread
;
897 th
= th
->p_nextlive
) {
898 if (th
->p_pid
== pid
) {
899 /* Remove thread from list of active threads */
900 th
->p_nextlive
->p_prevlive
= th
->p_prevlive
;
901 th
->p_prevlive
->p_nextlive
= th
->p_nextlive
;
902 /* Mark thread as exited, and if detached, free its resources */
903 __pthread_lock(th
->p_lock
, NULL
);
905 /* If we have to signal this event do it now. */
906 if (th
->p_report_events
)
908 /* See whether TD_REAP is in any of the mask. */
909 int idx
= __td_eventword (TD_REAP
);
910 uint32_t mask
= __td_eventmask (TD_REAP
);
912 if ((mask
& (__pthread_threads_events
.event_bits
[idx
]
913 | th
->p_eventbuf
.eventmask
.event_bits
[idx
])) != 0)
915 /* Yep, we have to signal the reapage. */
916 th
->p_eventbuf
.eventnum
= TD_REAP
;
917 th
->p_eventbuf
.eventdata
= th
;
918 __pthread_last_event
= th
;
920 /* Now call the function to signal the event. */
921 __linuxthreads_reap_event();
924 detached
= th
->p_detached
;
925 __pthread_unlock(th
->p_lock
);
931 /* If all threads have exited and the main thread is pending on a
932 pthread_exit, wake up the main thread and terminate ourselves. */
933 if (main_thread_exiting
&&
934 __pthread_main_thread
->p_nextlive
== __pthread_main_thread
) {
935 restart(__pthread_main_thread
);
936 /* Same logic as REQ_MAIN_THREAD_EXIT. */
940 static void pthread_reap_children(void)
945 while ((pid
= __libc_waitpid(-1, &status
, WNOHANG
| __WCLONE
)) > 0) {
947 if (WIFSIGNALED(status
)) {
948 /* If a thread died due to a signal, send the same signal to
949 all other threads, including the main thread. */
950 pthread_kill_all_threads(WTERMSIG(status
), 1);
956 /* Try to free the resources of a thread when requested by pthread_join
957 or pthread_detach on a terminated thread. */
959 static void pthread_handle_free(pthread_t th_id
)
961 pthread_handle handle
= thread_handle(th_id
);
964 __pthread_lock(&handle
->h_lock
, NULL
);
965 if (nonexisting_handle(handle
, th_id
)) {
966 /* pthread_reap_children has deallocated the thread already,
967 nothing needs to be done */
968 __pthread_unlock(&handle
->h_lock
);
971 th
= handle
->h_descr
;
973 __pthread_unlock(&handle
->h_lock
);
976 /* The Unix process of the thread is still running.
977 Mark the thread as detached so that the thread manager will
978 deallocate its resources when the Unix process exits. */
980 __pthread_unlock(&handle
->h_lock
);
984 /* Send a signal to all running threads */
986 static void pthread_kill_all_threads(int sig
, int main_thread_also
)
989 for (th
= __pthread_main_thread
->p_nextlive
;
990 th
!= __pthread_main_thread
;
991 th
= th
->p_nextlive
) {
992 kill(th
->p_pid
, sig
);
994 if (main_thread_also
) {
995 kill(__pthread_main_thread
->p_pid
, sig
);
999 static void pthread_for_each_thread(void *arg
,
1000 void (*fn
)(void *, pthread_descr
))
1004 for (th
= __pthread_main_thread
->p_nextlive
;
1005 th
!= __pthread_main_thread
;
1006 th
= th
->p_nextlive
) {
1010 fn(arg
, __pthread_main_thread
);
1013 /* Process-wide exit() */
1015 static void pthread_handle_exit(pthread_descr issuing_thread
, int exitcode
)
1018 __pthread_exit_requested
= 1;
1019 __pthread_exit_code
= exitcode
;
1020 /* A forced asynchronous cancellation follows. Make sure we won't
1021 get stuck later in the main thread with a system lock being held
1022 by one of the cancelled threads. Ideally one would use the same
1023 code as in pthread_atfork(), but we can't distinguish system and
1024 user handlers there. */
1026 /* Send the CANCEL signal to all running threads, including the main
1027 thread, but excluding the thread from which the exit request originated
1028 (that thread must complete the exit, e.g. calling atexit functions
1029 and flushing stdio buffers). */
1030 for (th
= issuing_thread
->p_nextlive
;
1031 th
!= issuing_thread
;
1032 th
= th
->p_nextlive
) {
1033 kill(th
->p_pid
, __pthread_sig_cancel
);
1035 /* Now, wait for all these threads, so that they don't become zombies
1036 and their times are properly added to the thread manager's times. */
1037 for (th
= issuing_thread
->p_nextlive
;
1038 th
!= issuing_thread
;
1039 th
= th
->p_nextlive
) {
1040 waitpid(th
->p_pid
, NULL
, __WCLONE
);
1042 __fresetlockfiles();
1043 restart(issuing_thread
);
1047 /* Handler for __pthread_sig_cancel in thread manager thread */
1049 void __pthread_manager_sighandler(int sig
)
1051 int kick_manager
= terminated_children
== 0 && main_thread_exiting
;
1052 terminated_children
= 1;
1054 /* If the main thread is terminating, kick the thread manager loop
1055 each time some threads terminate. This eliminates a two second
1056 shutdown delay caused by the thread manager sleeping in the
1057 call to __poll(). Instead, the thread manager is kicked into
1058 action, reaps the outstanding threads and resumes the main thread
1059 so that it can complete the shutdown. */
1062 struct pthread_request request
;
1063 request
.req_thread
= 0;
1064 request
.req_kind
= REQ_KICK
;
1065 TEMP_FAILURE_RETRY(__libc_write(__pthread_manager_request
,
1066 (char *) &request
, sizeof(request
)));
1070 /* Adjust priority of thread manager so that it always run at a priority
1071 higher than all threads */
1073 void __pthread_manager_adjust_prio(int thread_prio
)
1075 struct sched_param param
;
1077 if (thread_prio
<= manager_thread
->p_priority
) return;
1078 param
.sched_priority
=
1079 thread_prio
< __sched_get_priority_max(SCHED_FIFO
)
1080 ? thread_prio
+ 1 : thread_prio
;
1081 __sched_setscheduler(manager_thread
->p_pid
, SCHED_FIFO
, ¶m
);
1082 manager_thread
->p_priority
= thread_prio
;