2 * Copyright (c) 2005 David Xu <davidxu@freebsd.org>
3 * Copyright (C) 2003 Daniel M. Eischen <deischen@freebsd.org>
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
9 * 1. Redistributions of source code must retain the above copyright
10 * notice unmodified, this list of conditions, and the following
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
17 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
18 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
19 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
20 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
21 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
22 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
23 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
24 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
25 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 * $DragonFly: src/lib/libthread_xu/thread/thr_list.c,v 1.7 2006/04/06 13:03:09 davidxu Exp $
30 #include <sys/cdefs.h>
31 #include <sys/types.h>
32 #include <sys/queue.h>
38 #include "thr_private.h"
39 #include "libc_private.h"
41 /* #define DEBUG_THREAD_LIST */
42 #ifdef DEBUG_THREAD_LIST
43 #define DBG_MSG stdout_debug
48 /* List of all threads */
49 struct thread_head _thread_list
= TAILQ_HEAD_INITIALIZER(_thread_list
);
51 /* List of threads needing GC */
52 struct thread_head _thread_gc_list
= TAILQ_HEAD_INITIALIZER(_thread_gc_list
);
54 /* Number of active threads */
55 int _thread_active_threads
= 1;
57 /* Garbage thread count. */
60 umtx_t _thr_list_lock
;
63 * Define a high water mark for the maximum number of threads that
64 * will be cached. Once this level is reached, any extra threads
67 #define MAX_CACHED_THREADS 100
70 * We've got to keep track of everything that is allocated, not only
71 * to have a speedy free list, but also so they can be deallocated
74 static TAILQ_HEAD(, pthread
) free_threadq
;
75 static umtx_t free_thread_lock
;
76 static umtx_t tcb_lock
;
77 static int free_thread_count
= 0;
78 static int inited
= 0;
79 static u_int64_t next_uniqueid
= 1;
81 LIST_HEAD(thread_hash_head
, pthread
);
82 #define HASH_QUEUES 128
83 static struct thread_hash_head thr_hashtable
[HASH_QUEUES
];
84 #define THREAD_HASH(thrd) (((unsigned long)thrd >> 12) % HASH_QUEUES)
86 static void thr_destroy(struct pthread
*curthread
, struct pthread
*thread
);
94 _thr_umtx_init(&_thr_list_lock
);
95 TAILQ_INIT(&_thread_list
);
96 TAILQ_INIT(&free_threadq
);
97 _thr_umtx_init(&free_thread_lock
);
98 _thr_umtx_init(&tcb_lock
);
100 for (i
= 0; i
< HASH_QUEUES
; ++i
)
101 LIST_INIT(&thr_hashtable
[i
]);
107 _thr_gc(struct pthread
*curthread
)
109 struct pthread
*td
, *td_next
;
110 TAILQ_HEAD(, pthread
) worklist
;
112 TAILQ_INIT(&worklist
);
113 THREAD_LIST_LOCK(curthread
);
115 /* Check the threads waiting for GC. */
116 for (td
= TAILQ_FIRST(&_thread_gc_list
); td
!= NULL
; td
= td_next
) {
117 td_next
= TAILQ_NEXT(td
, gcle
);
118 if (td
->terminated
== 0) {
119 /* make sure we are not still in userland */
122 _thr_stack_free(&td
->attr
);
123 if (((td
->tlflags
& TLFLAGS_DETACHED
) != 0) &&
124 (td
->refcount
== 0)) {
125 THR_GCLIST_REMOVE(td
);
127 * The thread has detached and is no longer
128 * referenced. It is safe to remove all
129 * remnants of the thread.
132 TAILQ_INSERT_HEAD(&worklist
, td
, gcle
);
135 THREAD_LIST_UNLOCK(curthread
);
137 while ((td
= TAILQ_FIRST(&worklist
)) != NULL
) {
138 TAILQ_REMOVE(&worklist
, td
, gcle
);
140 * XXX we don't free initial thread, because there might
141 * have some code referencing initial thread.
143 if (td
== _thr_initial
) {
144 DBG_MSG("Initial thread won't be freed\n");
148 _thr_free(curthread
, td
);
153 _thr_alloc(struct pthread
*curthread
)
155 struct pthread
*thread
= NULL
;
158 if (curthread
!= NULL
) {
161 if (free_thread_count
> 0) {
162 THR_LOCK_ACQUIRE(curthread
, &free_thread_lock
);
163 if ((thread
= TAILQ_FIRST(&free_threadq
)) != NULL
) {
164 TAILQ_REMOVE(&free_threadq
, thread
, tle
);
167 THR_LOCK_RELEASE(curthread
, &free_thread_lock
);
170 if (thread
== NULL
) {
171 thread
= malloc(sizeof(struct pthread
));
175 if (curthread
!= NULL
) {
176 THR_LOCK_ACQUIRE(curthread
, &tcb_lock
);
177 tcb
= _tcb_ctor(thread
, 0 /* not initial tls */);
178 THR_LOCK_RELEASE(curthread
, &tcb_lock
);
180 tcb
= _tcb_ctor(thread
, 1 /* initial tls */);
183 memset(thread
, 0, sizeof(*thread
));
186 thr_destroy(curthread
, thread
);
193 _thr_free(struct pthread
*curthread
, struct pthread
*thread
)
195 DBG_MSG("Freeing thread %p\n", thread
);
201 * Always free tcb, as we only know it is part of RTLD TLS
202 * block, but don't know its detail and can not assume how
203 * it works, so better to avoid caching it here.
205 if (curthread
!= NULL
) {
206 THR_LOCK_ACQUIRE(curthread
, &tcb_lock
);
207 _tcb_dtor(thread
->tcb
);
208 THR_LOCK_RELEASE(curthread
, &tcb_lock
);
210 _tcb_dtor(thread
->tcb
);
213 if ((curthread
== NULL
) || (free_thread_count
>= MAX_CACHED_THREADS
)) {
214 thr_destroy(curthread
, thread
);
217 * Add the thread to the free thread list, this also avoids
218 * pthread id is reused too quickly, may help some buggy apps.
220 THR_LOCK_ACQUIRE(curthread
, &free_thread_lock
);
221 TAILQ_INSERT_TAIL(&free_threadq
, thread
, tle
);
223 THR_LOCK_RELEASE(curthread
, &free_thread_lock
);
228 thr_destroy(struct pthread
*curthread __unused
, struct pthread
*thread
)
234 * Add an active thread:
236 * o Assign the thread a unique id (which GDB uses to track
238 * o Add the thread to the list of all threads and increment
239 * number of active threads.
242 _thr_link(struct pthread
*curthread
, struct pthread
*thread
)
244 THREAD_LIST_LOCK(curthread
);
246 * Initialize the unique id (which GDB uses to track
247 * threads), add the thread to the list of all threads,
250 thread
->uniqueid
= next_uniqueid
++;
251 THR_LIST_ADD(thread
);
252 _thread_active_threads
++;
253 THREAD_LIST_UNLOCK(curthread
);
257 * Remove an active thread.
260 _thr_unlink(struct pthread
*curthread
, struct pthread
*thread
)
262 THREAD_LIST_LOCK(curthread
);
263 THR_LIST_REMOVE(thread
);
264 _thread_active_threads
--;
265 THREAD_LIST_UNLOCK(curthread
);
269 _thr_hash_add(struct pthread
*thread
)
271 struct thread_hash_head
*head
;
273 head
= &thr_hashtable
[THREAD_HASH(thread
)];
274 LIST_INSERT_HEAD(head
, thread
, hle
);
278 _thr_hash_remove(struct pthread
*thread
)
280 LIST_REMOVE(thread
, hle
);
284 _thr_hash_find(struct pthread
*thread
)
287 struct thread_hash_head
*head
;
289 head
= &thr_hashtable
[THREAD_HASH(thread
)];
290 LIST_FOREACH(td
, head
, hle
) {
298 * Find a thread in the linked list of active threads and add a reference
299 * to it. Threads with positive reference counts will not be deallocated
300 * until all references are released.
303 _thr_ref_add(struct pthread
*curthread
, struct pthread
*thread
,
309 /* Invalid thread: */
312 THREAD_LIST_LOCK(curthread
);
313 if ((ret
= _thr_find_thread(curthread
, thread
, include_dead
)) == 0) {
316 THREAD_LIST_UNLOCK(curthread
);
318 /* Return zero if the thread exists: */
323 _thr_ref_delete(struct pthread
*curthread
, struct pthread
*thread
)
325 THREAD_LIST_LOCK(curthread
);
326 _thr_ref_delete_unlocked(curthread
, thread
);
327 THREAD_LIST_UNLOCK(curthread
);
331 _thr_ref_delete_unlocked(struct pthread
*curthread __unused
,
332 struct pthread
*thread
)
334 if (thread
!= NULL
) {
336 if ((thread
->refcount
== 0) && thread
->state
== PS_DEAD
&&
337 (thread
->tlflags
& TLFLAGS_DETACHED
) != 0)
338 THR_GCLIST_ADD(thread
);
343 _thr_find_thread(struct pthread
*curthread __unused
, struct pthread
*thread
,
346 struct pthread
*pthread
;
349 /* Invalid thread: */
352 pthread
= _thr_hash_find(thread
);
354 if (include_dead
== 0 && pthread
->state
== PS_DEAD
) {
359 /* Return zero if the thread exists: */
360 return ((pthread
!= NULL
) ? 0 : ESRCH
);