2 * Copyright (c) 2005 David Xu <davidxu@freebsd.org>
3 * Copyright (C) 2003 Daniel M. Eischen <deischen@freebsd.org>
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
9 * 1. Redistributions of source code must retain the above copyright
10 * notice unmodified, this list of conditions, and the following
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
17 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
18 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
19 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
20 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
21 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
22 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
23 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
24 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
25 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28 #include <sys/types.h>
29 #include <sys/queue.h>
35 #include "libc_private.h"
36 #include "thr_private.h"
38 /* #define DEBUG_THREAD_LIST */
39 #ifdef DEBUG_THREAD_LIST
40 #define DBG_MSG stdout_debug
45 /* List of all threads */
46 struct thread_head _thread_list
= TAILQ_HEAD_INITIALIZER(_thread_list
);
48 /* List of threads needing GC */
49 struct thread_head _thread_gc_list
= TAILQ_HEAD_INITIALIZER(_thread_gc_list
);
51 /* Number of active threads */
52 int _thread_active_threads
= 1;
54 /* Garbage thread count. */
57 umtx_t _thr_list_lock
;
60 * Define a high water mark for the maximum number of threads that
61 * will be cached. Once this level is reached, any extra threads
64 #define MAX_CACHED_THREADS 100
67 * We've got to keep track of everything that is allocated, not only
68 * to have a speedy free list, but also so they can be deallocated
71 static TAILQ_HEAD(, __pthread_s
) free_threadq
;
72 static umtx_t free_thread_lock
;
73 static umtx_t tcb_lock
;
74 static int free_thread_count
= 0;
75 static int inited
= 0;
76 static u_int64_t next_uniqueid
= 1;
78 LIST_HEAD(thread_hash_head
, __pthread_s
);
79 #define HASH_QUEUES 128
80 static struct thread_hash_head thr_hashtable
[HASH_QUEUES
];
81 #define THREAD_HASH(thrd) (((unsigned long)thrd >> 12) % HASH_QUEUES)
83 static void thr_destroy(pthread_t curthread
, pthread_t thread
);
91 _thr_umtx_init(&_thr_list_lock
);
92 TAILQ_INIT(&_thread_list
);
93 TAILQ_INIT(&free_threadq
);
94 _thr_umtx_init(&free_thread_lock
);
95 _thr_umtx_init(&tcb_lock
);
97 for (i
= 0; i
< HASH_QUEUES
; ++i
)
98 LIST_INIT(&thr_hashtable
[i
]);
104 _thr_gc(pthread_t curthread
)
106 pthread_t td
, td_next
;
107 TAILQ_HEAD(, __pthread_s
) worklist
;
109 TAILQ_INIT(&worklist
);
110 THREAD_LIST_LOCK(curthread
);
112 /* Check the threads waiting for GC. */
113 for (td
= TAILQ_FIRST(&_thread_gc_list
); td
!= NULL
; td
= td_next
) {
114 td_next
= TAILQ_NEXT(td
, gcle
);
115 if (td
->terminated
== 0) {
116 /* make sure we are not still in userland */
119 _thr_stack_free(&td
->attr
);
120 if (((td
->tlflags
& TLFLAGS_DETACHED
) != 0) &&
121 (td
->refcount
== 0)) {
122 THR_GCLIST_REMOVE(td
);
124 * The thread has detached and is no longer
125 * referenced. It is safe to remove all
126 * remnants of the thread.
129 TAILQ_INSERT_HEAD(&worklist
, td
, gcle
);
132 THREAD_LIST_UNLOCK(curthread
);
134 while ((td
= TAILQ_FIRST(&worklist
)) != NULL
) {
135 TAILQ_REMOVE(&worklist
, td
, gcle
);
137 * XXX we don't free initial thread, because there might
138 * have some code referencing initial thread.
140 if (td
== _thr_initial
) {
141 DBG_MSG("Initial thread won't be freed\n");
145 _thr_free(curthread
, td
);
150 _thr_alloc(pthread_t curthread
)
152 pthread_t thread
= NULL
;
155 if (curthread
!= NULL
) {
158 if (free_thread_count
> 0) {
159 THR_LOCK_ACQUIRE(curthread
, &free_thread_lock
);
160 if ((thread
= TAILQ_FIRST(&free_threadq
)) != NULL
) {
161 TAILQ_REMOVE(&free_threadq
, thread
, tle
);
164 THR_LOCK_RELEASE(curthread
, &free_thread_lock
);
167 if (thread
== NULL
) {
168 thread
= __malloc(sizeof(struct __pthread_s
));
172 if (curthread
!= NULL
) {
173 THR_LOCK_ACQUIRE(curthread
, &tcb_lock
);
174 tcb
= _tcb_ctor(thread
, 0 /* not initial tls */);
175 THR_LOCK_RELEASE(curthread
, &tcb_lock
);
177 tcb
= _tcb_ctor(thread
, 1 /* initial tls */);
180 memset(thread
, 0, sizeof(*thread
));
183 thr_destroy(curthread
, thread
);
190 _thr_free(pthread_t curthread
, pthread_t thread
)
192 DBG_MSG("Freeing thread %p\n", thread
);
194 __free(thread
->name
);
198 * Always free tcb, as we only know it is part of RTLD TLS
199 * block, but don't know its detail and can not assume how
200 * it works, so better to avoid caching it here.
202 if (curthread
!= NULL
) {
203 THR_LOCK_ACQUIRE(curthread
, &tcb_lock
);
204 _tcb_dtor(thread
->tcb
);
205 THR_LOCK_RELEASE(curthread
, &tcb_lock
);
207 _tcb_dtor(thread
->tcb
);
210 if ((curthread
== NULL
) || (free_thread_count
>= MAX_CACHED_THREADS
)) {
211 thr_destroy(curthread
, thread
);
214 * Add the thread to the free thread list, this also avoids
215 * pthread id is reused too quickly, may help some buggy apps.
217 THR_LOCK_ACQUIRE(curthread
, &free_thread_lock
);
218 TAILQ_INSERT_TAIL(&free_threadq
, thread
, tle
);
220 THR_LOCK_RELEASE(curthread
, &free_thread_lock
);
225 thr_destroy(pthread_t curthread __unused
, pthread_t thread
)
231 * Add an active thread:
233 * o Assign the thread a unique id (which GDB uses to track
235 * o Add the thread to the list of all threads and increment
236 * number of active threads.
239 _thr_link(pthread_t curthread
, pthread_t thread
)
241 THREAD_LIST_LOCK(curthread
);
243 * Initialize the unique id (which GDB uses to track
244 * threads), add the thread to the list of all threads,
247 thread
->uniqueid
= next_uniqueid
++;
248 THR_LIST_ADD(thread
);
249 _thread_active_threads
++;
250 THREAD_LIST_UNLOCK(curthread
);
254 * Remove an active thread.
257 _thr_unlink(pthread_t curthread
, pthread_t thread
)
259 THREAD_LIST_LOCK(curthread
);
260 THR_LIST_REMOVE(thread
);
261 _thread_active_threads
--;
262 THREAD_LIST_UNLOCK(curthread
);
266 _thr_hash_add(pthread_t thread
)
268 struct thread_hash_head
*head
;
270 head
= &thr_hashtable
[THREAD_HASH(thread
)];
271 LIST_INSERT_HEAD(head
, thread
, hle
);
275 _thr_hash_remove(pthread_t thread
)
277 LIST_REMOVE(thread
, hle
);
281 _thr_hash_find(pthread_t thread
)
284 struct thread_hash_head
*head
;
286 head
= &thr_hashtable
[THREAD_HASH(thread
)];
287 LIST_FOREACH(td
, head
, hle
) {
295 * Find a thread in the linked list of active threads and add a reference
296 * to it. Threads with positive reference counts will not be deallocated
297 * until all references are released.
300 _thr_ref_add(pthread_t curthread
, pthread_t thread
,
306 /* Invalid thread: */
309 THREAD_LIST_LOCK(curthread
);
310 if ((ret
= _thr_find_thread(curthread
, thread
, include_dead
)) == 0) {
313 THREAD_LIST_UNLOCK(curthread
);
315 /* Return zero if the thread exists: */
320 _thr_ref_delete(pthread_t curthread
, pthread_t thread
)
322 THREAD_LIST_LOCK(curthread
);
323 _thr_ref_delete_unlocked(curthread
, thread
);
324 THREAD_LIST_UNLOCK(curthread
);
328 _thr_ref_delete_unlocked(pthread_t curthread __unused
, pthread_t thread
)
330 if (thread
!= NULL
) {
332 if ((thread
->refcount
== 0) && thread
->state
== PS_DEAD
&&
333 (thread
->tlflags
& TLFLAGS_DETACHED
) != 0)
334 THR_GCLIST_ADD(thread
);
339 _thr_find_thread(pthread_t curthread __unused
, pthread_t thread
,
347 pthread
= _thr_hash_find(thread
);
349 if (include_dead
== 0 && pthread
->state
== PS_DEAD
) {
354 /* Return zero if the thread exists: */
355 return ((pthread
!= NULL
) ? 0 : ESRCH
);