2 * Copyright (c) 1994 by Xerox Corporation. All rights reserved.
3 * Copyright (c) 1996 by Silicon Graphics. All rights reserved.
4 * Copyright (c) 1998 by Fergus Henderson. All rights reserved.
6 * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
7 * OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
9 * Permission is hereby granted to use or copy this program
10 * for any purpose, provided the above notices are retained on all copies.
11 * Permission to modify the code and to distribute modified code is granted,
12 * provided the above notices are retained, and a notice that the code was
13 * modified is included with the above copyright notice.
16 * Support code for LinuxThreads, the clone()-based kernel
17 * thread package for Linux which is included in libc6.
19 * This code relies on implementation details of LinuxThreads,
20 * (i.e. properties not guaranteed by the Pthread standard):
22 * - the function GC_linux_thread_top_of_stack(void)
23 * relies on the way LinuxThreads lays out thread stacks
24 * in the address space.
26 * Note that there is a lot of code duplication between linux_threads.c
27 * and irix_threads.c; any changes made here may need to be reflected
31 # if defined(LINUX_THREADS)
38 # include <sys/mman.h>
39 # include <sys/time.h>
40 # include <semaphore.h>
43 #undef pthread_sigmask
49 void GC_print_sig_mask()
54 if (pthread_sigmask(SIG_BLOCK
, NULL
, &blocked
) != 0)
55 ABORT("pthread_sigmask");
56 GC_printf0("Blocked: ");
57 for (i
= 1; i
<= MAXSIG
; i
++) {
58 if (sigismember(&blocked
, i
)) { GC_printf1("%ld ",(long) i
); }
64 /* We use the allocation lock to protect thread-related data structures. */
66 /* The set of all known threads. We intercept thread creation and */
67 /* joins. We never actually create detached threads. We allocate all */
68 /* new thread stacks ourselves. These allow us to maintain this */
70 /* Protected by GC_thr_lock. */
71 /* Some of this should be declared volatile, but that's incosnsistent */
72 /* with some library routine declarations. */
73 typedef struct GC_Thread_Rep
{
74 struct GC_Thread_Rep
* next
; /* More recently allocated threads */
75 /* with a given pthread id come */
76 /* first. (All but the first are */
77 /* guaranteed to be dead, but we may */
78 /* not yet have registered the join.) */
81 # define FINISHED 1 /* Thread has exited. */
82 # define DETACHED 2 /* Thread is intended to be detached. */
83 # define MAIN_THREAD 4 /* True for the original thread only. */
86 ptr_t stack_ptr
; /* Valid only when stopped. */
88 void * status
; /* The value returned from the thread. */
89 /* Used only to avoid premature */
90 /* reclamation of any data it might */
94 GC_thread
GC_lookup_thread(pthread_t id
);
97 * The only way to suspend threads given the pthread interface is to send
98 * signals. We can't use SIGSTOP directly, because we need to get the
99 * thread to save its stack pointer in the GC thread table before
100 * suspending. So we have to reserve a signal of our own for this.
101 * This means we have to intercept client calls to change the signal mask.
102 * The linuxthreads package already uses SIGUSR1 and SIGUSR2,
103 * so we need to reuse something else. I chose SIGPWR.
104 * (Perhaps SIGUNUSED would be a better choice.)
106 #define SIG_SUSPEND SIGPWR
108 #define SIG_RESTART SIGXCPU
110 sem_t GC_suspend_ack_sem
;
113 GC_linux_thread_top_of_stack() relies on implementation details of
114 LinuxThreads, namely that thread stacks are allocated on 2M boundaries
115 and grow to no more than 2M.
116 To make sure that we're using LinuxThreads and not some other thread
117 package, we generate a dummy reference to `__pthread_initial_thread_bos',
118 which is a symbol defined in LinuxThreads, but (hopefully) not in other
122 /* Note: on Caldera OpenLinux, this symbols is `local' in the
123 libpthread.so (but not in libpthread.a). We don't really need
124 this, so we just comment it out. */
125 extern char * __pthread_initial_thread_bos
;
126 char **dummy_var_to_force_linux_threads
= &__pthread_initial_thread_bos
;
129 #define LINUX_THREADS_STACK_SIZE (2 * 1024 * 1024)
131 static inline ptr_t
GC_linux_thread_top_of_stack(void)
133 char *sp
= GC_approx_sp();
134 ptr_t tos
= (ptr_t
) (((unsigned long)sp
| (LINUX_THREADS_STACK_SIZE
- 1)) + 1);
136 GC_printf1("SP = %lx\n", (unsigned long)sp
);
137 GC_printf1("TOS = %lx\n", (unsigned long)tos
);
142 void GC_suspend_handler(int sig
)
145 pthread_t my_thread
= pthread_self();
152 if (sig
!= SIG_SUSPEND
) ABORT("Bad signal in suspend_handler");
155 GC_printf1("Suspending 0x%x\n", my_thread
);
158 me
= GC_lookup_thread(my_thread
);
159 /* The lookup here is safe, since I'm doing this on behalf */
160 /* of a thread which holds the allocation lock in order */
161 /* to stop the world. Thus concurrent modification of the */
162 /* data structure is impossible. */
163 me
-> stack_ptr
= (ptr_t
)(&dummy
);
164 me
-> stack_end
= GC_linux_thread_top_of_stack();
166 /* Tell the thread that wants to stop the world that this */
167 /* thread has been stopped. Note that sem_post() is */
168 /* the only async-signal-safe primitive in LinuxThreads. */
169 sem_post(&GC_suspend_ack_sem
);
171 /* Wait until that thread tells us to restart by sending */
172 /* this thread a SIG_RESTART signal. */
173 /* SIG_RESTART should be masked at this point. Thus there */
175 if (sigfillset(&mask
) != 0) ABORT("sigfillset() failed");
176 if (sigdelset(&mask
, SIG_RESTART
) != 0) ABORT("sigdelset() failed");
179 sigsuspend(&mask
); /* Wait for signal */
180 } while (me
->signal
!= SIG_RESTART
);
183 GC_printf1("Continuing 0x%x\n", my_thread
);
187 void GC_restart_handler(int sig
)
191 if (sig
!= SIG_RESTART
) ABORT("Bad signal in suspend_handler");
193 /* Let the GC_suspend_handler() know that we got a SIG_RESTART. */
194 /* The lookup here is safe, since I'm doing this on behalf */
195 /* of a thread which holds the allocation lock in order */
196 /* to stop the world. Thus concurrent modification of the */
197 /* data structure is impossible. */
198 me
= GC_lookup_thread(pthread_self());
199 me
->signal
= SIG_RESTART
;
202 ** Note: even if we didn't do anything useful here,
203 ** it would still be necessary to have a signal handler,
204 ** rather than ignoring the signals, otherwise
205 ** the signals will not be delivered at all, and
206 ** will thus not interrupt the sigsuspend() above.
210 GC_printf1("In GC_restart_handler for 0x%x\n", pthread_self());
214 GC_bool GC_thr_initialized
= FALSE
;
216 # define THREAD_TABLE_SZ 128 /* Must be power of 2 */
217 volatile GC_thread GC_threads
[THREAD_TABLE_SZ
];
219 /* Add a thread to GC_threads. We assume it wasn't already there. */
220 /* Caller holds allocation lock. */
221 GC_thread
GC_new_thread(pthread_t id
)
223 int hv
= ((word
)id
) % THREAD_TABLE_SZ
;
225 static struct GC_Thread_Rep first_thread
;
226 static GC_bool first_thread_used
= FALSE
;
228 if (!first_thread_used
) {
229 result
= &first_thread
;
230 first_thread_used
= TRUE
;
231 /* Dont acquire allocation lock, since we may already hold it. */
233 result
= (struct GC_Thread_Rep
*)
234 GC_generic_malloc_inner(sizeof(struct GC_Thread_Rep
), NORMAL
);
236 if (result
== 0) return(0);
238 result
-> next
= GC_threads
[hv
];
239 GC_threads
[hv
] = result
;
240 /* result -> flags = 0; */
244 /* Delete a thread from GC_threads. We assume it is there. */
245 /* (The code intentionally traps if it wasn't.) */
246 /* Caller holds allocation lock. */
247 void GC_delete_thread(pthread_t id
)
249 int hv
= ((word
)id
) % THREAD_TABLE_SZ
;
250 register GC_thread p
= GC_threads
[hv
];
251 register GC_thread prev
= 0;
253 while (!pthread_equal(p
-> id
, id
)) {
258 GC_threads
[hv
] = p
-> next
;
260 prev
-> next
= p
-> next
;
264 /* If a thread has been joined, but we have not yet */
265 /* been notified, then there may be more than one thread */
266 /* in the table with the same pthread id. */
267 /* This is OK, but we need a way to delete a specific one. */
268 void GC_delete_gc_thread(pthread_t id
, GC_thread gc_id
)
270 int hv
= ((word
)id
) % THREAD_TABLE_SZ
;
271 register GC_thread p
= GC_threads
[hv
];
272 register GC_thread prev
= 0;
279 GC_threads
[hv
] = p
-> next
;
281 prev
-> next
= p
-> next
;
285 /* Return a GC_thread corresponding to a given thread_t. */
286 /* Returns 0 if it's not there. */
287 /* Caller holds allocation lock or otherwise inhibits */
289 /* If there is more than one thread with the given id we */
290 /* return the most recent one. */
291 GC_thread
GC_lookup_thread(pthread_t id
)
293 int hv
= ((word
)id
) % THREAD_TABLE_SZ
;
294 register GC_thread p
= GC_threads
[hv
];
296 while (p
!= 0 && !pthread_equal(p
-> id
, id
)) p
= p
-> next
;
300 /* Caller holds allocation lock. */
303 pthread_t my_thread
= pthread_self();
305 register GC_thread p
;
306 register int n_live_threads
= 0;
309 for (i
= 0; i
< THREAD_TABLE_SZ
; i
++) {
310 for (p
= GC_threads
[i
]; p
!= 0; p
= p
-> next
) {
311 if (p
-> id
!= my_thread
) {
312 if (p
-> flags
& FINISHED
) continue;
315 GC_printf1("Sending suspend signal to 0x%x\n", p
-> id
);
317 result
= pthread_kill(p
-> id
, SIG_SUSPEND
);
320 /* Not really there anymore. Possible? */
326 ABORT("pthread_kill failed");
331 for (i
= 0; i
< n_live_threads
; i
++) {
332 sem_wait(&GC_suspend_ack_sem
);
335 GC_printf1("World stopped 0x%x\n", pthread_self());
339 /* Caller holds allocation lock. */
340 void GC_start_world()
342 pthread_t my_thread
= pthread_self();
344 register GC_thread p
;
345 register int n_live_threads
= 0;
349 GC_printf0("World starting\n");
352 for (i
= 0; i
< THREAD_TABLE_SZ
; i
++) {
353 for (p
= GC_threads
[i
]; p
!= 0; p
= p
-> next
) {
354 if (p
-> id
!= my_thread
) {
355 if (p
-> flags
& FINISHED
) continue;
358 GC_printf1("Sending restart signal to 0x%x\n", p
-> id
);
360 result
= pthread_kill(p
-> id
, SIG_RESTART
);
363 /* Not really there anymore. Possible? */
369 ABORT("pthread_kill failed");
375 GC_printf0("World started\n");
379 /* We hold allocation lock. We assume the world is stopped. */
380 void GC_push_all_stacks()
383 register GC_thread p
;
384 register ptr_t sp
= GC_approx_sp();
385 register ptr_t lo
, hi
;
386 pthread_t me
= pthread_self();
388 if (!GC_thr_initialized
) GC_thr_init();
390 GC_printf1("Pushing stacks from thread 0x%lx\n", (unsigned long) me
);
392 for (i
= 0; i
< THREAD_TABLE_SZ
; i
++) {
393 for (p
= GC_threads
[i
]; p
!= 0; p
= p
-> next
) {
394 if (p
-> flags
& FINISHED
) continue;
395 if (pthread_equal(p
-> id
, me
)) {
400 if ((p
-> flags
& MAIN_THREAD
) == 0) {
401 if (pthread_equal(p
-> id
, me
)) {
402 hi
= GC_linux_thread_top_of_stack();
407 /* The original stack. */
411 GC_printf3("Stack for thread 0x%lx = [%lx,%lx)\n",
412 (unsigned long) p
-> id
,
413 (unsigned long) lo
, (unsigned long) hi
);
415 GC_push_all_stack(lo
, hi
);
421 /* We hold the allocation lock. */
425 struct sigaction act
;
427 GC_thr_initialized
= TRUE
;
429 if (sem_init(&GC_suspend_ack_sem
, 0, 0) != 0)
430 ABORT("sem_init failed");
432 act
.sa_flags
= SA_RESTART
;
433 if (sigfillset(&act
.sa_mask
) != 0) {
434 ABORT("sigfillset() failed");
436 /* SIG_RESTART is unmasked by the handler when necessary. */
437 act
.sa_handler
= GC_suspend_handler
;
438 if (sigaction(SIG_SUSPEND
, &act
, NULL
) != 0) {
439 ABORT("Cannot set SIG_SUSPEND handler");
442 act
.sa_handler
= GC_restart_handler
;
443 if (sigaction(SIG_RESTART
, &act
, NULL
) != 0) {
444 ABORT("Cannot set SIG_SUSPEND handler");
447 /* Add the initial thread, so we can stop it. */
448 t
= GC_new_thread(pthread_self());
449 t
-> stack_ptr
= (ptr_t
)(&t
);
450 t
-> flags
= DETACHED
| MAIN_THREAD
;
453 int GC_pthread_sigmask(int how
, const sigset_t
*set
, sigset_t
*oset
)
457 if (set
!= NULL
&& (how
== SIG_BLOCK
|| how
== SIG_SETMASK
)) {
459 sigdelset(&fudged_set
, SIG_SUSPEND
);
462 return(pthread_sigmask(how
, set
, oset
));
466 void *(*start_routine
)(void *);
470 void GC_thread_exit_proc(void *dummy
)
475 me
= GC_lookup_thread(pthread_self());
476 if (me
-> flags
& DETACHED
) {
477 GC_delete_thread(pthread_self());
479 me
-> flags
|= FINISHED
;
484 int GC_pthread_join(pthread_t thread
, void **retval
)
487 GC_thread thread_gc_id
;
490 thread_gc_id
= GC_lookup_thread(thread
);
491 /* This is guaranteed to be the intended one, since the thread id */
492 /* cant have been recycled by pthreads. */
494 result
= pthread_join(thread
, retval
);
496 /* Here the pthread thread id may have been recycled. */
497 GC_delete_gc_thread(thread
, thread_gc_id
);
502 void * GC_start_routine(void * arg
)
504 struct start_info
* si
= arg
;
509 me
= GC_lookup_thread(pthread_self());
511 pthread_cleanup_push(GC_thread_exit_proc
, 0);
512 # ifdef DEBUG_THREADS
513 GC_printf1("Starting thread 0x%x\n", pthread_self());
514 GC_printf1("pid = %ld\n", (long) getpid());
515 GC_printf1("sp = 0x%lx\n", (long) &arg
);
517 result
= (*(si
-> start_routine
))(si
-> arg
);
519 GC_printf1("Finishing thread 0x%x\n", pthread_self());
521 me
-> status
= result
;
522 me
-> flags
|= FINISHED
;
523 pthread_cleanup_pop(1);
524 /* This involves acquiring the lock, ensuring that we can't exit */
525 /* while a collection that thinks we're alive is trying to stop */
531 GC_pthread_create(pthread_t
*new_thread
,
532 const pthread_attr_t
*attr
,
533 void *(*start_routine
)(void *), void *arg
)
537 pthread_t my_new_thread
;
540 pthread_attr_t new_attr
;
543 struct start_info
* si
= GC_malloc(sizeof(struct start_info
));
545 if (0 == si
) return(ENOMEM
);
546 si
-> start_routine
= start_routine
;
549 if (!GC_thr_initialized
) GC_thr_init();
552 (void) pthread_attr_init(&new_attr
);
556 pthread_attr_getdetachstate(&new_attr
, &detachstate
);
557 if (PTHREAD_CREATE_DETACHED
== detachstate
) my_flags
|= DETACHED
;
558 result
= pthread_create(&my_new_thread
, &new_attr
, GC_start_routine
, si
);
559 /* No GC can start until the thread is registered, since we hold */
560 /* the allocation lock. */
562 t
= GC_new_thread(my_new_thread
);
563 t
-> flags
= my_flags
;
566 if (0 != new_thread
) *new_thread
= my_new_thread
;
569 /* pthread_attr_destroy(&new_attr); */
573 GC_bool GC_collecting
= 0;
574 /* A hint that we're in the collector and */
575 /* holding the allocation lock for an */
576 /* extended period. */
578 /* Reasonably fast spin locks. Basically the same implementation */
579 /* as STL alloc.h. This isn't really the right way to do this. */
580 /* but until the POSIX scheduling mess gets straightened out ... */
582 volatile unsigned int GC_allocate_lock
= 0;
587 # define low_spin_max 30 /* spin cycles if we suspect uniprocessor */
588 # define high_spin_max 1000 /* spin cycles for multiprocessor */
589 static unsigned spin_max
= low_spin_max
;
590 unsigned my_spin_max
;
591 static unsigned last_spins
= 0;
592 unsigned my_last_spins
;
593 volatile unsigned junk
;
594 # define PAUSE junk *= junk; junk *= junk; junk *= junk; junk *= junk
597 if (!GC_test_and_set(&GC_allocate_lock
)) {
601 my_spin_max
= spin_max
;
602 my_last_spins
= last_spins
;
603 for (i
= 0; i
< my_spin_max
; i
++) {
604 if (GC_collecting
) goto yield
;
605 if (i
< my_last_spins
/2 || GC_allocate_lock
) {
609 if (!GC_test_and_set(&GC_allocate_lock
)) {
612 * Spinning worked. Thus we're probably not being scheduled
613 * against the other process with which we were contending.
614 * Thus it makes sense to spin longer the next time.
617 spin_max
= high_spin_max
;
621 /* We are probably being scheduled against the other process. Sleep. */
622 spin_max
= low_spin_max
;
625 if (!GC_test_and_set(&GC_allocate_lock
)) {
628 # define SLEEP_THRESHOLD 12
629 /* nanosleep(<= 2ms) just spins under Linux. We */
630 /* want to be careful to avoid that behavior. */
631 if (i
< SLEEP_THRESHOLD
) {
637 /* Don't wait for more than about 60msecs, even */
638 /* under extreme contention. */
646 # endif /* LINUX_THREADS */