1 /* Linuxthreads - a simple clone()-based implementation of Posix */
2 /* threads for Linux. */
3 /* Copyright (C) 1996 Xavier Leroy (Xavier.Leroy@inria.fr) */
5 /* This program is free software; you can redistribute it and/or */
6 /* modify it under the terms of the GNU Library General Public License */
7 /* as published by the Free Software Foundation; either version 2 */
8 /* of the License, or (at your option) any later version. */
10 /* This program is distributed in the hope that it will be useful, */
11 /* but WITHOUT ANY WARRANTY; without even the implied warranty of */
12 /* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the */
13 /* GNU Library General Public License for more details. */
15 /* The "thread manager" thread: manages creation and termination of threads */
25 #include <sys/poll.h> /* for poll */
26 #include <sys/mman.h> /* for mmap */
27 #include <sys/param.h>
29 #include <sys/wait.h> /* for waitpid macros */
30 #include <locale.h> /* for __uselocale */
31 #include <resolv.h> /* for __resp */
35 #include "internals.h"
38 #include "semaphore.h"
40 /* For debugging purposes put the maximum number of threads in a variable. */
41 const int __linuxthreads_pthread_threads_max
= PTHREAD_THREADS_MAX
;
44 /* Indicate whether at least one thread has a user-defined stack (if 1),
45 or if all threads have stacks supplied by LinuxThreads (if 0). */
46 int __pthread_nonstandard_stacks
;
49 /* Number of active entries in __pthread_handles (used by gdb) */
50 volatile int __pthread_handles_num
= 2;
52 /* Whether to use debugger additional actions for thread creation
54 volatile int __pthread_threads_debug
;
56 /* Globally enabled events. */
57 volatile td_thr_events_t __pthread_threads_events
;
59 /* Pointer to thread descriptor with last event. */
60 volatile pthread_descr __pthread_last_event
;
62 static pthread_descr manager_thread
;
64 /* Mapping from stack segment to thread descriptor. */
65 /* Stack segment numbers are also indices into the __pthread_handles array. */
66 /* Stack segment number 0 is reserved for the initial thread. */
69 # define thread_segment(seq) NULL
71 static inline pthread_descr
thread_segment(int seg
)
73 # ifdef _STACK_GROWS_UP
74 return (pthread_descr
)(THREAD_STACK_START_ADDRESS
+ (seg
- 1) * STACK_SIZE
)
77 return (pthread_descr
)(THREAD_STACK_START_ADDRESS
- (seg
- 1) * STACK_SIZE
)
83 /* Flag set in signal handler to record child termination */
85 static volatile int terminated_children
;
87 /* Flag set when the initial thread is blocked on pthread_exit waiting
88 for all other threads to terminate */
90 static int main_thread_exiting
;
92 /* Counter used to generate unique thread identifier.
93 Thread identifier is pthread_threads_counter + segment. */
95 static pthread_t pthread_threads_counter
;
97 /* Forward declarations */
99 static int pthread_handle_create(pthread_t
*thread
, const pthread_attr_t
*attr
,
100 void * (*start_routine
)(void *), void *arg
,
101 sigset_t
*mask
, int father_pid
,
103 td_thr_events_t
*event_maskp
);
104 static void pthread_handle_free(pthread_t th_id
);
105 static void pthread_handle_exit(pthread_descr issuing_thread
, int exitcode
)
106 __attribute__ ((noreturn
));
107 static void pthread_reap_children(void);
108 static void pthread_kill_all_threads(int sig
, int main_thread_also
);
109 static void pthread_for_each_thread(void *arg
,
110 void (*fn
)(void *, pthread_descr
));
112 /* The server thread managing requests for thread creation and termination */
115 __attribute__ ((noreturn
))
116 __pthread_manager(void *arg
)
118 pthread_descr self
= manager_thread
= arg
;
119 int reqfd
= __pthread_manager_reader
;
121 sigset_t manager_mask
;
123 struct pthread_request request
;
125 /* If we have special thread_self processing, initialize it. */
126 #ifdef INIT_THREAD_SELF
127 INIT_THREAD_SELF(self
, 1);
129 #if !(USE_TLS && HAVE___THREAD)
130 /* Set the error variable. */
131 self
->p_errnop
= &self
->p_errno
;
132 self
->p_h_errnop
= &self
->p_h_errno
;
134 /* Block all signals except __pthread_sig_cancel and SIGTRAP */
135 sigfillset(&manager_mask
);
136 sigdelset(&manager_mask
, __pthread_sig_cancel
); /* for thread termination */
137 sigdelset(&manager_mask
, SIGTRAP
); /* for debugging purposes */
138 if (__pthread_threads_debug
&& __pthread_sig_debug
> 0)
139 sigdelset(&manager_mask
, __pthread_sig_debug
);
140 sigprocmask(SIG_SETMASK
, &manager_mask
, NULL
);
141 /* Raise our priority to match that of main thread */
142 __pthread_manager_adjust_prio(__pthread_main_thread
->p_priority
);
143 /* Synchronize debugging of the thread manager */
144 n
= TEMP_FAILURE_RETRY(__libc_read(reqfd
, (char *)&request
,
146 ASSERT(n
== sizeof(request
) && request
.req_kind
== REQ_DEBUG
);
149 /* Enter server loop */
151 n
= __poll(&ufd
, 1, 2000);
153 /* Check for termination of the main thread */
154 if (getppid() == 1) {
155 pthread_kill_all_threads(SIGKILL
, 0);
158 /* Check for dead children */
159 if (terminated_children
) {
160 terminated_children
= 0;
161 pthread_reap_children();
163 /* Read and execute request */
164 if (n
== 1 && (ufd
.revents
& POLLIN
)) {
165 n
= TEMP_FAILURE_RETRY(__libc_read(reqfd
, (char *)&request
,
170 write(STDERR_FILENO
, d
, snprintf(d
, sizeof(d
), "*** read err %m\n"));
171 } else if (n
!= sizeof(request
)) {
172 write(STDERR_FILENO
, "*** short read in manager\n", 26);
176 switch(request
.req_kind
) {
178 request
.req_thread
->p_retcode
=
179 pthread_handle_create((pthread_t
*) &request
.req_thread
->p_retval
,
180 request
.req_args
.create
.attr
,
181 request
.req_args
.create
.fn
,
182 request
.req_args
.create
.arg
,
183 &request
.req_args
.create
.mask
,
184 request
.req_thread
->p_pid
,
185 request
.req_thread
->p_report_events
,
186 &request
.req_thread
->p_eventbuf
.eventmask
);
187 restart(request
.req_thread
);
190 pthread_handle_free(request
.req_args
.free
.thread_id
);
192 case REQ_PROCESS_EXIT
:
193 pthread_handle_exit(request
.req_thread
,
194 request
.req_args
.exit
.code
);
197 case REQ_MAIN_THREAD_EXIT
:
198 main_thread_exiting
= 1;
199 /* Reap children in case all other threads died and the signal handler
200 went off before we set main_thread_exiting to 1, and therefore did
202 pthread_reap_children();
204 if (__pthread_main_thread
->p_nextlive
== __pthread_main_thread
) {
205 restart(__pthread_main_thread
);
206 /* The main thread will now call exit() which will trigger an
207 __on_exit handler, which in turn will send REQ_PROCESS_EXIT
208 to the thread manager. In case you are wondering how the
209 manager terminates from its loop here. */
213 __new_sem_post(request
.req_args
.post
);
216 /* Make gdb aware of new thread and gdb will restart the
217 new thread when it is ready to handle the new thread. */
218 if (__pthread_threads_debug
&& __pthread_sig_debug
> 0)
219 raise(__pthread_sig_debug
);
222 /* This is just a prod to get the manager to reap some
223 threads right away, avoiding a potential delay at shutdown. */
225 case REQ_FOR_EACH_THREAD
:
226 pthread_for_each_thread(request
.req_args
.for_each
.arg
,
227 request
.req_args
.for_each
.fn
);
228 restart(request
.req_thread
);
235 int __pthread_manager_event(void *arg
)
237 pthread_descr self
= arg
;
238 /* If we have special thread_self processing, initialize it. */
239 #ifdef INIT_THREAD_SELF
240 INIT_THREAD_SELF(self
, 1);
243 /* Get the lock the manager will free once all is correctly set up. */
244 __pthread_lock (THREAD_GETMEM(self
, p_lock
), NULL
);
245 /* Free it immediately. */
246 __pthread_unlock (THREAD_GETMEM(self
, p_lock
));
248 return __pthread_manager(arg
);
251 /* Process creation */
254 __attribute__ ((noreturn
))
255 pthread_start_thread(void *arg
)
257 pthread_descr self
= (pthread_descr
) arg
;
258 struct pthread_request request
;
261 hp_timing_t tmpclock
;
263 /* Initialize special thread_self processing, if any. */
264 #ifdef INIT_THREAD_SELF
265 INIT_THREAD_SELF(self
, self
->p_nr
);
268 HP_TIMING_NOW (tmpclock
);
269 THREAD_SETMEM (self
, p_cpuclock_offset
, tmpclock
);
271 /* Make sure our pid field is initialized, just in case we get there
272 before our father has initialized it. */
273 THREAD_SETMEM(self
, p_pid
, __getpid());
274 /* Initial signal mask is that of the creating thread. (Otherwise,
275 we'd just inherit the mask of the thread manager.) */
276 sigprocmask(SIG_SETMASK
, &self
->p_start_args
.mask
, NULL
);
277 /* Set the scheduling policy and priority for the new thread, if needed */
278 if (THREAD_GETMEM(self
, p_start_args
.schedpolicy
) >= 0)
279 /* Explicit scheduling attributes were provided: apply them */
280 __sched_setscheduler(THREAD_GETMEM(self
, p_pid
),
281 THREAD_GETMEM(self
, p_start_args
.schedpolicy
),
282 &self
->p_start_args
.schedparam
);
283 else if (manager_thread
->p_priority
> 0)
284 /* Default scheduling required, but thread manager runs in realtime
285 scheduling: switch new thread to SCHED_OTHER policy */
287 struct sched_param default_params
;
288 default_params
.sched_priority
= 0;
289 __sched_setscheduler(THREAD_GETMEM(self
, p_pid
),
290 SCHED_OTHER
, &default_params
);
292 #if !(USE_TLS && HAVE___THREAD)
293 /* Initialize thread-locale current locale to point to the global one.
294 With __thread support, the variable's initializer takes care of this. */
295 __uselocale (LC_GLOBAL_LOCALE
);
297 /* Initialize __resp. */
298 __resp
= &self
->p_res
;
300 /* Make gdb aware of new thread */
301 if (__pthread_threads_debug
&& __pthread_sig_debug
> 0) {
302 request
.req_thread
= self
;
303 request
.req_kind
= REQ_DEBUG
;
304 TEMP_FAILURE_RETRY(__libc_write(__pthread_manager_request
,
305 (char *) &request
, sizeof(request
)));
308 /* Run the thread code */
309 outcome
= self
->p_start_args
.start_routine(THREAD_GETMEM(self
,
311 /* Exit with the given return value */
312 __pthread_do_exit(outcome
, CURRENT_STACK_FRAME
);
316 __attribute__ ((noreturn
))
317 pthread_start_thread_event(void *arg
)
319 pthread_descr self
= (pthread_descr
) arg
;
321 #ifdef INIT_THREAD_SELF
322 INIT_THREAD_SELF(self
, self
->p_nr
);
324 /* Make sure our pid field is initialized, just in case we get there
325 before our father has initialized it. */
326 THREAD_SETMEM(self
, p_pid
, __getpid());
327 /* Get the lock the manager will free once all is correctly set up. */
328 __pthread_lock (THREAD_GETMEM(self
, p_lock
), NULL
);
329 /* Free it immediately. */
330 __pthread_unlock (THREAD_GETMEM(self
, p_lock
));
332 /* Continue with the real function. */
333 pthread_start_thread (arg
);
336 #if defined USE_TLS && !FLOATING_STACKS
337 # error "TLS can only work with floating stacks"
340 static int pthread_allocate_stack(const pthread_attr_t
*attr
,
341 pthread_descr default_new_thread
,
343 char ** out_new_thread
,
344 char ** out_new_thread_bottom
,
345 char ** out_guardaddr
,
346 size_t * out_guardsize
,
347 size_t * out_stacksize
)
349 pthread_descr new_thread
;
350 char * new_thread_bottom
;
352 size_t stacksize
, guardsize
;
355 /* TLS cannot work with fixed thread descriptor addresses. */
356 assert (default_new_thread
== NULL
);
359 if (attr
!= NULL
&& attr
->__stackaddr_set
)
361 #ifdef _STACK_GROWS_UP
362 /* The user provided a stack. */
364 /* This value is not needed. */
365 new_thread
= (pthread_descr
) attr
->__stackaddr
;
366 new_thread_bottom
= (char *) new_thread
;
368 new_thread
= (pthread_descr
) attr
->__stackaddr
;
369 new_thread_bottom
= (char *) (new_thread
+ 1);
371 guardaddr
= attr
->__stackaddr
+ attr
->__stacksize
;
374 /* The user provided a stack. For now we interpret the supplied
375 address as 1 + the highest addr. in the stack segment. If a
376 separate register stack is needed, we place it at the low end
377 of the segment, relying on the associated stacksize to
378 determine the low end of the segment. This differs from many
379 (but not all) other pthreads implementations. The intent is
380 that on machines with a single stack growing toward higher
381 addresses, stackaddr would be the lowest address in the stack
382 segment, so that it is consistently close to the initial sp
385 new_thread
= (pthread_descr
) attr
->__stackaddr
;
388 (pthread_descr
) ((long)(attr
->__stackaddr
) & -sizeof(void *)) - 1;
390 new_thread_bottom
= (char *) attr
->__stackaddr
- attr
->__stacksize
;
391 guardaddr
= new_thread_bottom
;
395 __pthread_nonstandard_stacks
= 1;
398 /* Clear the thread data structure. */
399 memset (new_thread
, '\0', sizeof (*new_thread
));
401 stacksize
= attr
->__stacksize
;
405 #ifdef NEED_SEPARATE_REGISTER_STACK
406 const size_t granularity
= 2 * pagesize
;
407 /* Try to make stacksize/2 a multiple of pagesize */
409 const size_t granularity
= pagesize
;
413 /* Allocate space for stack and thread descriptor at default address */
417 guardsize
= page_roundup (attr
->__guardsize
, granularity
);
418 stacksize
= __pthread_max_stacksize
- guardsize
;
419 stacksize
= MIN (stacksize
,
420 page_roundup (attr
->__stacksize
, granularity
));
424 guardsize
= granularity
;
425 stacksize
= __pthread_max_stacksize
- guardsize
;
428 map_addr
= mmap(NULL
, stacksize
+ guardsize
,
429 PROT_READ
| PROT_WRITE
| PROT_EXEC
,
430 MAP_PRIVATE
| MAP_ANONYMOUS
, -1, 0);
431 if (map_addr
== MAP_FAILED
)
432 /* No more memory available. */
435 # ifdef NEED_SEPARATE_REGISTER_STACK
436 guardaddr
= map_addr
+ stacksize
/ 2;
438 mprotect (guardaddr
, guardsize
, PROT_NONE
);
440 new_thread_bottom
= (char *) map_addr
;
442 new_thread
= ((pthread_descr
) (new_thread_bottom
+ stacksize
445 new_thread
= ((pthread_descr
) (new_thread_bottom
+ stacksize
448 # elif _STACK_GROWS_DOWN
449 guardaddr
= map_addr
;
451 mprotect (guardaddr
, guardsize
, PROT_NONE
);
453 new_thread_bottom
= (char *) map_addr
+ guardsize
;
455 new_thread
= ((pthread_descr
) (new_thread_bottom
+ stacksize
));
457 new_thread
= ((pthread_descr
) (new_thread_bottom
+ stacksize
)) - 1;
459 # elif _STACK_GROWS_UP
460 guardaddr
= map_addr
+ stacksize
;
462 mprotect (guardaddr
, guardsize
, PROT_NONE
);
464 new_thread
= (pthread_descr
) map_addr
;
466 new_thread_bottom
= (char *) new_thread
;
468 new_thread_bottom
= (char *) (new_thread
+ 1);
471 # error You must define a stack direction
472 # endif /* Stack direction */
473 #else /* !FLOATING_STACKS */
478 guardsize
= page_roundup (attr
->__guardsize
, granularity
);
479 stacksize
= STACK_SIZE
- guardsize
;
480 stacksize
= MIN (stacksize
,
481 page_roundup (attr
->__stacksize
, granularity
));
485 guardsize
= granularity
;
486 stacksize
= STACK_SIZE
- granularity
;
489 # ifdef NEED_SEPARATE_REGISTER_STACK
490 new_thread
= default_new_thread
;
491 new_thread_bottom
= (char *) (new_thread
+ 1) - stacksize
- guardsize
;
492 /* Includes guard area, unlike the normal case. Use the bottom
493 end of the segment as backing store for the register stack.
494 Needed on IA64. In this case, we also map the entire stack at
495 once. According to David Mosberger, that's cheaper. It also
496 avoids the risk of intermittent failures due to other mappings
497 in the same region. The cost is that we might be able to map
498 slightly fewer stacks. */
500 /* First the main stack: */
501 map_addr
= (caddr_t
)((char *)(new_thread
+ 1) - stacksize
/ 2);
502 res_addr
= mmap(map_addr
, stacksize
/ 2,
503 PROT_READ
| PROT_WRITE
| PROT_EXEC
,
504 MAP_PRIVATE
| MAP_ANONYMOUS
, -1, 0);
505 if (res_addr
!= map_addr
)
507 /* Bad luck, this segment is already mapped. */
508 if (res_addr
!= MAP_FAILED
)
509 munmap(res_addr
, stacksize
/ 2);
512 /* Then the register stack: */
513 map_addr
= (caddr_t
)new_thread_bottom
;
514 res_addr
= mmap(map_addr
, stacksize
/2,
515 PROT_READ
| PROT_WRITE
| PROT_EXEC
,
516 MAP_PRIVATE
| MAP_ANONYMOUS
, -1, 0);
517 if (res_addr
!= map_addr
)
519 if (res_addr
!= MAP_FAILED
)
520 munmap(res_addr
, stacksize
/ 2);
521 munmap((caddr_t
)((char *)(new_thread
+ 1) - stacksize
/2),
526 guardaddr
= new_thread_bottom
+ stacksize
/2;
527 /* We leave the guard area in the middle unmapped. */
528 # else /* !NEED_SEPARATE_REGISTER_STACK */
529 # ifdef _STACK_GROWS_DOWN
530 new_thread
= default_new_thread
;
531 new_thread_bottom
= (char *) (new_thread
+ 1) - stacksize
;
532 map_addr
= new_thread_bottom
- guardsize
;
533 res_addr
= mmap(map_addr
, stacksize
+ guardsize
,
534 PROT_READ
| PROT_WRITE
| PROT_EXEC
,
535 MAP_PRIVATE
| MAP_ANONYMOUS
, -1, 0);
536 if (res_addr
!= map_addr
)
538 /* Bad luck, this segment is already mapped. */
539 if (res_addr
!= MAP_FAILED
)
540 munmap (res_addr
, stacksize
+ guardsize
);
544 /* We manage to get a stack. Protect the guard area pages if
546 guardaddr
= map_addr
;
548 mprotect (guardaddr
, guardsize
, PROT_NONE
);
550 /* The thread description goes at the bottom of this area, and
551 * the stack starts directly above it.
553 new_thread
= (pthread_descr
)((unsigned long)default_new_thread
&~ (STACK_SIZE
- 1));
554 map_addr
= mmap(new_thread
, stacksize
+ guardsize
,
555 PROT_READ
| PROT_WRITE
| PROT_EXEC
,
556 MAP_FIXED
| MAP_PRIVATE
| MAP_ANONYMOUS
, -1, 0);
557 if (map_addr
== MAP_FAILED
)
560 new_thread_bottom
= map_addr
+ sizeof(*new_thread
);
561 guardaddr
= map_addr
+ stacksize
;
563 mprotect (guardaddr
, guardsize
, PROT_NONE
);
565 # endif /* stack direction */
566 # endif /* !NEED_SEPARATE_REGISTER_STACK */
567 #endif /* !FLOATING_STACKS */
569 *out_new_thread
= (char *) new_thread
;
570 *out_new_thread_bottom
= new_thread_bottom
;
571 *out_guardaddr
= guardaddr
;
572 *out_guardsize
= guardsize
;
573 #ifdef NEED_SEPARATE_REGISTER_STACK
574 *out_stacksize
= stacksize
/ 2;
576 *out_stacksize
= stacksize
;
581 static int pthread_handle_create(pthread_t
*thread
, const pthread_attr_t
*attr
,
582 void * (*start_routine
)(void *), void *arg
,
583 sigset_t
* mask
, int father_pid
,
585 td_thr_events_t
*event_maskp
)
589 pthread_descr new_thread
;
591 char * new_thread_bottom
;
592 pthread_t new_thread_id
;
593 char *guardaddr
= NULL
;
594 size_t guardsize
= 0, stksize
= 0;
595 int pagesize
= __getpagesize();
599 new_thread
= _dl_allocate_tls (NULL
);
600 if (new_thread
== NULL
)
603 /* pthread_descr is below TP. */
604 new_thread
= (pthread_descr
) ((char *) new_thread
- TLS_PRE_TCB_SIZE
);
607 /* Prevent warnings. */
611 /* First check whether we have to change the policy and if yes, whether
612 we can do this. Normally this should be done by examining the
613 return value of the __sched_setscheduler call in pthread_start_thread
614 but this is hard to implement. FIXME */
615 if (attr
!= NULL
&& attr
->__schedpolicy
!= SCHED_OTHER
&& geteuid () != 0)
617 /* Find a free segment for the thread, and allocate a stack if needed */
618 for (sseg
= 2; ; sseg
++)
620 if (sseg
>= PTHREAD_THREADS_MAX
)
624 new_thread
= (pthread_descr
) ((char *) new_thread
+ TLS_PRE_TCB_SIZE
);
626 _dl_deallocate_tls (new_thread
, true);
630 if (__pthread_handles
[sseg
].h_descr
!= NULL
)
632 if (pthread_allocate_stack(attr
, thread_segment(sseg
),
633 pagesize
, &stack_addr
, &new_thread_bottom
,
634 &guardaddr
, &guardsize
, &stksize
) == 0)
637 new_thread
->p_stackaddr
= stack_addr
;
639 new_thread
= (pthread_descr
) stack_addr
;
644 __pthread_handles_num
++;
645 /* Allocate new thread identifier */
646 pthread_threads_counter
+= PTHREAD_THREADS_MAX
;
647 new_thread_id
= sseg
+ pthread_threads_counter
;
648 /* Initialize the thread descriptor. Elements which have to be
649 initialized to zero already have this value. */
650 #if !defined USE_TLS || !TLS_DTV_AT_TP
651 new_thread
->p_header
.data
.tcb
= new_thread
;
652 new_thread
->p_header
.data
.self
= new_thread
;
654 #if TLS_MULTIPLE_THREADS_IN_TCB || !defined USE_TLS || !TLS_DTV_AT_TP
655 new_thread
->p_multiple_threads
= 1;
657 new_thread
->p_tid
= new_thread_id
;
658 new_thread
->p_lock
= &(__pthread_handles
[sseg
].h_lock
);
659 new_thread
->p_cancelstate
= PTHREAD_CANCEL_ENABLE
;
660 new_thread
->p_canceltype
= PTHREAD_CANCEL_DEFERRED
;
661 #if !(USE_TLS && HAVE___THREAD)
662 new_thread
->p_errnop
= &new_thread
->p_errno
;
663 new_thread
->p_h_errnop
= &new_thread
->p_h_errno
;
664 new_thread
->p_resp
= &new_thread
->p_res
;
666 new_thread
->p_guardaddr
= guardaddr
;
667 new_thread
->p_guardsize
= guardsize
;
668 new_thread
->p_nr
= sseg
;
669 new_thread
->p_inheritsched
= attr
? attr
->__inheritsched
: 0;
670 new_thread
->p_alloca_cutoff
= stksize
/ 4 > __MAX_ALLOCA_CUTOFF
671 ? __MAX_ALLOCA_CUTOFF
: stksize
/ 4;
672 /* Initialize the thread handle */
673 __pthread_init_lock(&__pthread_handles
[sseg
].h_lock
);
674 __pthread_handles
[sseg
].h_descr
= new_thread
;
675 __pthread_handles
[sseg
].h_bottom
= new_thread_bottom
;
676 /* Determine scheduling parameters for the thread */
677 new_thread
->p_start_args
.schedpolicy
= -1;
679 new_thread
->p_detached
= attr
->__detachstate
;
680 new_thread
->p_userstack
= attr
->__stackaddr_set
;
682 switch(attr
->__inheritsched
) {
683 case PTHREAD_EXPLICIT_SCHED
:
684 new_thread
->p_start_args
.schedpolicy
= attr
->__schedpolicy
;
685 memcpy (&new_thread
->p_start_args
.schedparam
, &attr
->__schedparam
,
686 sizeof (struct sched_param
));
688 case PTHREAD_INHERIT_SCHED
:
689 new_thread
->p_start_args
.schedpolicy
= __sched_getscheduler(father_pid
);
690 __sched_getparam(father_pid
, &new_thread
->p_start_args
.schedparam
);
693 new_thread
->p_priority
=
694 new_thread
->p_start_args
.schedparam
.sched_priority
;
696 /* Finish setting up arguments to pthread_start_thread */
697 new_thread
->p_start_args
.start_routine
= start_routine
;
698 new_thread
->p_start_args
.arg
= arg
;
699 new_thread
->p_start_args
.mask
= *mask
;
700 /* Make the new thread ID available already now. If any of the later
701 functions fail we return an error value and the caller must not use
702 the stored thread ID. */
703 *thread
= new_thread_id
;
704 /* Raise priority of thread manager if needed */
705 __pthread_manager_adjust_prio(new_thread
->p_priority
);
706 /* Do the cloning. We have to use two different functions depending
707 on whether we are debugging or not. */
708 pid
= 0; /* Note that the thread never can have PID zero. */
711 /* See whether the TD_CREATE event bit is set in any of the
713 int idx
= __td_eventword (TD_CREATE
);
714 uint32_t mask
= __td_eventmask (TD_CREATE
);
716 if ((mask
& (__pthread_threads_events
.event_bits
[idx
]
717 | event_maskp
->event_bits
[idx
])) != 0)
719 /* Lock the mutex the child will use now so that it will stop. */
720 __pthread_lock(new_thread
->p_lock
, NULL
);
722 /* We have to report this event. */
723 #ifdef NEED_SEPARATE_REGISTER_STACK
724 /* Perhaps this version should be used on all platforms. But
725 this requires that __clone2 be uniformly supported
728 And there is some argument for changing the __clone2
729 interface to pass sp and bsp instead, making it more IA64
730 specific, but allowing stacks to grow outward from each
731 other, to get less paging and fewer mmaps. */
732 pid
= __clone2(pthread_start_thread_event
,
733 (void **)new_thread_bottom
,
734 (char *)stack_addr
- new_thread_bottom
,
735 CLONE_VM
| CLONE_FS
| CLONE_FILES
| CLONE_SIGHAND
|
736 __pthread_sig_cancel
, new_thread
);
737 #elif _STACK_GROWS_UP
738 pid
= __clone(pthread_start_thread_event
, (void *) new_thread_bottom
,
739 CLONE_VM
| CLONE_FS
| CLONE_FILES
| CLONE_SIGHAND
|
740 __pthread_sig_cancel
, new_thread
);
742 pid
= __clone(pthread_start_thread_event
, stack_addr
,
743 CLONE_VM
| CLONE_FS
| CLONE_FILES
| CLONE_SIGHAND
|
744 __pthread_sig_cancel
, new_thread
);
749 /* Now fill in the information about the new thread in
750 the newly created thread's data structure. We cannot let
751 the new thread do this since we don't know whether it was
752 already scheduled when we send the event. */
753 new_thread
->p_eventbuf
.eventdata
= new_thread
;
754 new_thread
->p_eventbuf
.eventnum
= TD_CREATE
;
755 __pthread_last_event
= new_thread
;
757 /* We have to set the PID here since the callback function
758 in the debug library will need it and we cannot guarantee
759 the child got scheduled before the debugger. */
760 new_thread
->p_pid
= pid
;
762 /* Now call the function which signals the event. */
763 __linuxthreads_create_event ();
765 /* Now restart the thread. */
766 __pthread_unlock(new_thread
->p_lock
);
772 #ifdef NEED_SEPARATE_REGISTER_STACK
773 pid
= __clone2(pthread_start_thread
,
774 (void **)new_thread_bottom
,
775 (char *)stack_addr
- new_thread_bottom
,
776 CLONE_VM
| CLONE_FS
| CLONE_FILES
| CLONE_SIGHAND
|
777 __pthread_sig_cancel
, new_thread
);
778 #elif _STACK_GROWS_UP
779 pid
= __clone(pthread_start_thread
, (void *) new_thread_bottom
,
780 CLONE_VM
| CLONE_FS
| CLONE_FILES
| CLONE_SIGHAND
|
781 __pthread_sig_cancel
, new_thread
);
783 pid
= __clone(pthread_start_thread
, stack_addr
,
784 CLONE_VM
| CLONE_FS
| CLONE_FILES
| CLONE_SIGHAND
|
785 __pthread_sig_cancel
, new_thread
);
786 #endif /* !NEED_SEPARATE_REGISTER_STACK */
789 /* Check if cloning succeeded */
791 /* Free the stack if we allocated it */
792 if (attr
== NULL
|| !attr
->__stackaddr_set
)
794 #ifdef NEED_SEPARATE_REGISTER_STACK
795 size_t stacksize
= ((char *)(new_thread
->p_guardaddr
)
796 - new_thread_bottom
);
797 munmap((caddr_t
)new_thread_bottom
,
798 2 * stacksize
+ new_thread
->p_guardsize
);
799 #elif _STACK_GROWS_UP
801 size_t stacksize
= guardaddr
- stack_addr
;
802 munmap(stack_addr
, stacksize
+ guardsize
);
804 size_t stacksize
= guardaddr
- (char *)new_thread
;
805 munmap(new_thread
, stacksize
+ guardsize
);
809 size_t stacksize
= stack_addr
- new_thread_bottom
;
811 size_t stacksize
= (char *)(new_thread
+1) - new_thread_bottom
;
813 munmap(new_thread_bottom
- guardsize
, guardsize
+ stacksize
);
818 new_thread
= (pthread_descr
) ((char *) new_thread
+ TLS_PRE_TCB_SIZE
);
820 _dl_deallocate_tls (new_thread
, true);
822 __pthread_handles
[sseg
].h_descr
= NULL
;
823 __pthread_handles
[sseg
].h_bottom
= NULL
;
824 __pthread_handles_num
--;
827 /* Insert new thread in doubly linked list of active threads */
828 new_thread
->p_prevlive
= __pthread_main_thread
;
829 new_thread
->p_nextlive
= __pthread_main_thread
->p_nextlive
;
830 __pthread_main_thread
->p_nextlive
->p_prevlive
= new_thread
;
831 __pthread_main_thread
->p_nextlive
= new_thread
;
832 /* Set pid field of the new thread, in case we get there before the
834 new_thread
->p_pid
= pid
;
839 /* Try to free the resources of a thread when requested by pthread_join
840 or pthread_detach on a terminated thread. */
842 static void pthread_free(pthread_descr th
)
844 pthread_handle handle
;
845 pthread_readlock_info
*iter
, *next
;
847 ASSERT(th
->p_exited
);
848 /* Make the handle invalid */
849 handle
= thread_handle(th
->p_tid
);
850 __pthread_lock(&handle
->h_lock
, NULL
);
851 handle
->h_descr
= NULL
;
852 handle
->h_bottom
= (char *)(-1L);
853 __pthread_unlock(&handle
->h_lock
);
855 FREE_THREAD(th
, th
->p_nr
);
857 /* One fewer threads in __pthread_handles */
858 __pthread_handles_num
--;
860 /* Destroy read lock list, and list of free read lock structures.
861 If the former is not empty, it means the thread exited while
862 holding read locks! */
864 for (iter
= th
->p_readlock_list
; iter
!= NULL
; iter
= next
)
866 next
= iter
->pr_next
;
870 for (iter
= th
->p_readlock_free
; iter
!= NULL
; iter
= next
)
872 next
= iter
->pr_next
;
876 /* If initial thread, nothing to free */
877 if (!th
->p_userstack
)
879 size_t guardsize
= th
->p_guardsize
;
880 /* Free the stack and thread descriptor area */
881 char *guardaddr
= th
->p_guardaddr
;
882 #ifdef _STACK_GROWS_UP
884 size_t stacksize
= guardaddr
- th
->p_stackaddr
;
886 size_t stacksize
= guardaddr
- (char *)th
;
888 guardaddr
= (char *)th
;
890 /* Guardaddr is always set, even if guardsize is 0. This allows
891 us to compute everything else. */
893 size_t stacksize
= th
->p_stackaddr
- guardaddr
- guardsize
;
895 size_t stacksize
= (char *)(th
+1) - guardaddr
- guardsize
;
897 # ifdef NEED_SEPARATE_REGISTER_STACK
898 /* Take account of the register stack, which is below guardaddr. */
899 guardaddr
-= stacksize
;
903 /* Unmap the stack. */
904 munmap(guardaddr
, stacksize
+ guardsize
);
910 th
= (pthread_descr
) ((char *) th
+ TLS_PRE_TCB_SIZE
);
912 _dl_deallocate_tls (th
, true);
916 /* Handle threads that have exited */
918 static void pthread_exited(pid_t pid
)
922 /* Find thread with that pid */
923 for (th
= __pthread_main_thread
->p_nextlive
;
924 th
!= __pthread_main_thread
;
925 th
= th
->p_nextlive
) {
926 if (th
->p_pid
== pid
) {
927 /* Remove thread from list of active threads */
928 th
->p_nextlive
->p_prevlive
= th
->p_prevlive
;
929 th
->p_prevlive
->p_nextlive
= th
->p_nextlive
;
930 /* Mark thread as exited, and if detached, free its resources */
931 __pthread_lock(th
->p_lock
, NULL
);
933 /* If we have to signal this event do it now. */
934 if (th
->p_report_events
)
936 /* See whether TD_REAP is in any of the mask. */
937 int idx
= __td_eventword (TD_REAP
);
938 uint32_t mask
= __td_eventmask (TD_REAP
);
940 if ((mask
& (__pthread_threads_events
.event_bits
[idx
]
941 | th
->p_eventbuf
.eventmask
.event_bits
[idx
])) != 0)
943 /* Yep, we have to signal the reapage. */
944 th
->p_eventbuf
.eventnum
= TD_REAP
;
945 th
->p_eventbuf
.eventdata
= th
;
946 __pthread_last_event
= th
;
948 /* Now call the function to signal the event. */
949 __linuxthreads_reap_event();
952 detached
= th
->p_detached
;
953 __pthread_unlock(th
->p_lock
);
959 /* If all threads have exited and the main thread is pending on a
960 pthread_exit, wake up the main thread and terminate ourselves. */
961 if (main_thread_exiting
&&
962 __pthread_main_thread
->p_nextlive
== __pthread_main_thread
) {
963 restart(__pthread_main_thread
);
964 /* Same logic as REQ_MAIN_THREAD_EXIT. */
968 static void pthread_reap_children(void)
973 while ((pid
= __libc_waitpid(-1, &status
, WNOHANG
| __WCLONE
)) > 0) {
975 if (WIFSIGNALED(status
)) {
976 /* If a thread died due to a signal, send the same signal to
977 all other threads, including the main thread. */
978 pthread_kill_all_threads(WTERMSIG(status
), 1);
984 /* Try to free the resources of a thread when requested by pthread_join
985 or pthread_detach on a terminated thread. */
987 static void pthread_handle_free(pthread_t th_id
)
989 pthread_handle handle
= thread_handle(th_id
);
992 __pthread_lock(&handle
->h_lock
, NULL
);
993 if (nonexisting_handle(handle
, th_id
)) {
994 /* pthread_reap_children has deallocated the thread already,
995 nothing needs to be done */
996 __pthread_unlock(&handle
->h_lock
);
999 th
= handle
->h_descr
;
1001 __pthread_unlock(&handle
->h_lock
);
1004 /* The Unix process of the thread is still running.
1005 Mark the thread as detached so that the thread manager will
1006 deallocate its resources when the Unix process exits. */
1008 __pthread_unlock(&handle
->h_lock
);
1012 /* Send a signal to all running threads */
1014 static void pthread_kill_all_threads(int sig
, int main_thread_also
)
1017 for (th
= __pthread_main_thread
->p_nextlive
;
1018 th
!= __pthread_main_thread
;
1019 th
= th
->p_nextlive
) {
1020 kill(th
->p_pid
, sig
);
1022 if (main_thread_also
) {
1023 kill(__pthread_main_thread
->p_pid
, sig
);
1027 static void pthread_for_each_thread(void *arg
,
1028 void (*fn
)(void *, pthread_descr
))
1032 for (th
= __pthread_main_thread
->p_nextlive
;
1033 th
!= __pthread_main_thread
;
1034 th
= th
->p_nextlive
) {
1038 fn(arg
, __pthread_main_thread
);
1041 /* Process-wide exit() */
1043 static void pthread_handle_exit(pthread_descr issuing_thread
, int exitcode
)
1046 __pthread_exit_requested
= 1;
1047 __pthread_exit_code
= exitcode
;
1048 /* A forced asynchronous cancellation follows. Make sure we won't
1049 get stuck later in the main thread with a system lock being held
1050 by one of the cancelled threads. Ideally one would use the same
1051 code as in pthread_atfork(), but we can't distinguish system and
1052 user handlers there. */
1054 /* Send the CANCEL signal to all running threads, including the main
1055 thread, but excluding the thread from which the exit request originated
1056 (that thread must complete the exit, e.g. calling atexit functions
1057 and flushing stdio buffers). */
1058 for (th
= issuing_thread
->p_nextlive
;
1059 th
!= issuing_thread
;
1060 th
= th
->p_nextlive
) {
1061 kill(th
->p_pid
, __pthread_sig_cancel
);
1063 /* Now, wait for all these threads, so that they don't become zombies
1064 and their times are properly added to the thread manager's times. */
1065 for (th
= issuing_thread
->p_nextlive
;
1066 th
!= issuing_thread
;
1067 th
= th
->p_nextlive
) {
1068 waitpid(th
->p_pid
, NULL
, __WCLONE
);
1070 __fresetlockfiles();
1071 restart(issuing_thread
);
1075 /* Handler for __pthread_sig_cancel in thread manager thread */
1077 void __pthread_manager_sighandler(int sig
)
1079 int kick_manager
= terminated_children
== 0 && main_thread_exiting
;
1080 terminated_children
= 1;
1082 /* If the main thread is terminating, kick the thread manager loop
1083 each time some threads terminate. This eliminates a two second
1084 shutdown delay caused by the thread manager sleeping in the
1085 call to __poll(). Instead, the thread manager is kicked into
1086 action, reaps the outstanding threads and resumes the main thread
1087 so that it can complete the shutdown. */
1090 struct pthread_request request
;
1091 request
.req_thread
= 0;
1092 request
.req_kind
= REQ_KICK
;
1093 TEMP_FAILURE_RETRY(__libc_write(__pthread_manager_request
,
1094 (char *) &request
, sizeof(request
)));
1098 /* Adjust priority of thread manager so that it always run at a priority
1099 higher than all threads */
1101 void __pthread_manager_adjust_prio(int thread_prio
)
1103 struct sched_param param
;
1105 if (thread_prio
<= manager_thread
->p_priority
) return;
1106 param
.sched_priority
=
1107 thread_prio
< __sched_get_priority_max(SCHED_FIFO
)
1108 ? thread_prio
+ 1 : thread_prio
;
1109 __sched_setscheduler(manager_thread
->p_pid
, SCHED_FIFO
, ¶m
);
1110 manager_thread
->p_priority
= thread_prio
;