2 * Copyright (c) 1994 by Xerox Corporation. All rights reserved.
3 * Copyright (c) 1996 by Silicon Graphics. All rights reserved.
4 * Copyright (c) 1998 by Fergus Henderson. All rights reserved.
6 * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
7 * OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
9 * Permission is hereby granted to use or copy this program
10 * for any purpose, provided the above notices are retained on all copies.
11 * Permission to modify the code and to distribute modified code is granted,
12 * provided the above notices are retained, and a notice that the code was
13 * modified is included with the above copyright notice.
16 * Support code for LinuxThreads, the clone()-based kernel
17 * thread package for Linux which is included in libc6.
19 * This code relies on implementation details of LinuxThreads,
20 * (i.e. properties not guaranteed by the Pthread standard):
22 * - the function GC_linux_thread_top_of_stack(void)
23 * relies on the way LinuxThreads lays out thread stacks
24 * in the address space.
26 * Note that there is a lot of code duplication between linux_threads.c
27 * and irix_threads.c; any changes made here may need to be reflected
31 /* #define DEBUG_THREADS 1 */
33 /* ANSI C requires that a compilation unit contains something */
36 # if defined(LINUX_THREADS)
42 # include <sys/mman.h>
43 # include <sys/time.h>
44 # include <semaphore.h>
47 #undef pthread_sigmask
53 void GC_print_sig_mask()
58 if (pthread_sigmask(SIG_BLOCK
, NULL
, &blocked
) != 0)
59 ABORT("pthread_sigmask");
60 GC_printf0("Blocked: ");
61 for (i
= 1; i
<= MAXSIG
; i
++) {
62 if (sigismember(&blocked
, i
)) { GC_printf1("%ld ",(long) i
); }
68 /* We use the allocation lock to protect thread-related data structures. */
70 /* The set of all known threads. We intercept thread creation and */
71 /* joins. We never actually create detached threads. We allocate all */
72 /* new thread stacks ourselves. These allow us to maintain this */
74 /* Protected by GC_thr_lock. */
75 /* Some of this should be declared volatile, but that's incosnsistent */
76 /* with some library routine declarations. */
77 typedef struct GC_Thread_Rep
{
78 struct GC_Thread_Rep
* next
; /* More recently allocated threads */
79 /* with a given pthread id come */
80 /* first. (All but the first are */
81 /* guaranteed to be dead, but we may */
82 /* not yet have registered the join.) */
85 # define FINISHED 1 /* Thread has exited. */
86 # define DETACHED 2 /* Thread is intended to be detached. */
87 # define MAIN_THREAD 4 /* True for the original thread only. */
90 ptr_t stack_ptr
; /* Valid only when stopped. */
92 void * status
; /* The value returned from the thread. */
93 /* Used only to avoid premature */
94 /* reclamation of any data it might */
98 GC_thread
GC_lookup_thread(pthread_t id
);
101 * The only way to suspend threads given the pthread interface is to send
102 * signals. We can't use SIGSTOP directly, because we need to get the
103 * thread to save its stack pointer in the GC thread table before
104 * suspending. So we have to reserve a signal of our own for this.
105 * This means we have to intercept client calls to change the signal mask.
106 * The linuxthreads package already uses SIGUSR1 and SIGUSR2,
107 * so we need to reuse something else. I chose SIGPWR.
108 * (Perhaps SIGUNUSED would be a better choice.)
110 #define SIG_SUSPEND SIGPWR
112 #define SIG_RESTART SIGXCPU
114 sem_t GC_suspend_ack_sem
;
117 GC_linux_thread_top_of_stack() relies on implementation details of
118 LinuxThreads, namely that thread stacks are allocated on 2M boundaries
119 and grow to no more than 2M.
120 To make sure that we're using LinuxThreads and not some other thread
121 package, we generate a dummy reference to `pthread_kill_other_threads_np'
122 (was `__pthread_initial_thread_bos' but that disappeared),
123 which is a symbol defined in LinuxThreads, but (hopefully) not in other
126 void (*dummy_var_to_force_linux_threads
)() = pthread_kill_other_threads_np
;
128 #define LINUX_THREADS_STACK_SIZE (2 * 1024 * 1024)
130 static inline ptr_t
GC_linux_thread_top_of_stack(void)
132 char *sp
= GC_approx_sp();
133 ptr_t tos
= (ptr_t
) (((unsigned long)sp
| (LINUX_THREADS_STACK_SIZE
- 1)) + 1);
135 GC_printf1("SP = %lx\n", (unsigned long)sp
);
136 GC_printf1("TOS = %lx\n", (unsigned long)tos
);
141 void GC_suspend_handler(int sig
)
144 pthread_t my_thread
= pthread_self();
151 if (sig
!= SIG_SUSPEND
) ABORT("Bad signal in suspend_handler");
154 GC_printf1("Suspending 0x%x\n", my_thread
);
157 me
= GC_lookup_thread(my_thread
);
158 /* The lookup here is safe, since I'm doing this on behalf */
159 /* of a thread which holds the allocation lock in order */
160 /* to stop the world. Thus concurrent modification of the */
161 /* data structure is impossible. */
162 me
-> stack_ptr
= (ptr_t
)(&dummy
);
163 me
-> stack_end
= GC_linux_thread_top_of_stack();
165 /* Tell the thread that wants to stop the world that this */
166 /* thread has been stopped. Note that sem_post() is */
167 /* the only async-signal-safe primitive in LinuxThreads. */
168 sem_post(&GC_suspend_ack_sem
);
170 /* Wait until that thread tells us to restart by sending */
171 /* this thread a SIG_RESTART signal. */
172 /* SIG_RESTART should be masked at this point. Thus there */
174 if (sigfillset(&mask
) != 0) ABORT("sigfillset() failed");
175 if (sigdelset(&mask
, SIG_RESTART
) != 0) ABORT("sigdelset() failed");
177 if (sigdelset(&mask
, SIGINT
) != 0) ABORT("sigdelset() failed");
178 if (sigdelset(&mask
, SIGQUIT
) != 0) ABORT("sigdelset() failed");
179 if (sigdelset(&mask
, SIGTERM
) != 0) ABORT("sigdelset() failed");
183 sigsuspend(&mask
); /* Wait for signal */
184 } while (me
->signal
!= SIG_RESTART
);
187 GC_printf1("Continuing 0x%x\n", my_thread
);
191 void GC_restart_handler(int sig
)
195 if (sig
!= SIG_RESTART
) ABORT("Bad signal in suspend_handler");
197 /* Let the GC_suspend_handler() know that we got a SIG_RESTART. */
198 /* The lookup here is safe, since I'm doing this on behalf */
199 /* of a thread which holds the allocation lock in order */
200 /* to stop the world. Thus concurrent modification of the */
201 /* data structure is impossible. */
202 me
= GC_lookup_thread(pthread_self());
203 me
->signal
= SIG_RESTART
;
206 ** Note: even if we didn't do anything useful here,
207 ** it would still be necessary to have a signal handler,
208 ** rather than ignoring the signals, otherwise
209 ** the signals will not be delivered at all, and
210 ** will thus not interrupt the sigsuspend() above.
214 GC_printf1("In GC_restart_handler for 0x%x\n", pthread_self());
218 GC_bool GC_thr_initialized
= FALSE
;
220 # define THREAD_TABLE_SZ 128 /* Must be power of 2 */
221 volatile GC_thread GC_threads
[THREAD_TABLE_SZ
];
223 /* Add a thread to GC_threads. We assume it wasn't already there. */
224 /* Caller holds allocation lock. */
225 GC_thread
GC_new_thread(pthread_t id
)
227 int hv
= ((word
)id
) % THREAD_TABLE_SZ
;
229 static struct GC_Thread_Rep first_thread
;
230 static GC_bool first_thread_used
= FALSE
;
232 if (!first_thread_used
) {
233 result
= &first_thread
;
234 first_thread_used
= TRUE
;
235 /* Dont acquire allocation lock, since we may already hold it. */
237 result
= (struct GC_Thread_Rep
*)
238 GC_generic_malloc_inner(sizeof(struct GC_Thread_Rep
), NORMAL
);
240 if (result
== 0) return(0);
242 result
-> next
= GC_threads
[hv
];
243 GC_threads
[hv
] = result
;
244 /* result -> flags = 0; */
248 /* Delete a thread from GC_threads. We assume it is there. */
249 /* (The code intentionally traps if it wasn't.) */
250 /* Caller holds allocation lock. */
251 void GC_delete_thread(pthread_t id
)
253 int hv
= ((word
)id
) % THREAD_TABLE_SZ
;
254 register GC_thread p
= GC_threads
[hv
];
255 register GC_thread prev
= 0;
257 while (!pthread_equal(p
-> id
, id
)) {
262 GC_threads
[hv
] = p
-> next
;
264 prev
-> next
= p
-> next
;
268 /* If a thread has been joined, but we have not yet */
269 /* been notified, then there may be more than one thread */
270 /* in the table with the same pthread id. */
271 /* This is OK, but we need a way to delete a specific one. */
272 void GC_delete_gc_thread(pthread_t id
, GC_thread gc_id
)
274 int hv
= ((word
)id
) % THREAD_TABLE_SZ
;
275 register GC_thread p
= GC_threads
[hv
];
276 register GC_thread prev
= 0;
283 GC_threads
[hv
] = p
-> next
;
285 prev
-> next
= p
-> next
;
289 /* Return a GC_thread corresponding to a given thread_t. */
290 /* Returns 0 if it's not there. */
291 /* Caller holds allocation lock or otherwise inhibits */
293 /* If there is more than one thread with the given id we */
294 /* return the most recent one. */
295 GC_thread
GC_lookup_thread(pthread_t id
)
297 int hv
= ((word
)id
) % THREAD_TABLE_SZ
;
298 register GC_thread p
= GC_threads
[hv
];
300 while (p
!= 0 && !pthread_equal(p
-> id
, id
)) p
= p
-> next
;
304 /* Caller holds allocation lock. */
307 pthread_t my_thread
= pthread_self();
309 register GC_thread p
;
310 register int n_live_threads
= 0;
313 for (i
= 0; i
< THREAD_TABLE_SZ
; i
++) {
314 for (p
= GC_threads
[i
]; p
!= 0; p
= p
-> next
) {
315 if (p
-> id
!= my_thread
) {
316 if (p
-> flags
& FINISHED
) continue;
319 GC_printf1("Sending suspend signal to 0x%x\n", p
-> id
);
321 result
= pthread_kill(p
-> id
, SIG_SUSPEND
);
324 /* Not really there anymore. Possible? */
330 ABORT("pthread_kill failed");
335 for (i
= 0; i
< n_live_threads
; i
++) {
336 sem_wait(&GC_suspend_ack_sem
);
339 GC_printf1("World stopped 0x%x\n", pthread_self());
343 /* Caller holds allocation lock. */
344 void GC_start_world()
346 pthread_t my_thread
= pthread_self();
348 register GC_thread p
;
349 register int n_live_threads
= 0;
353 GC_printf0("World starting\n");
356 for (i
= 0; i
< THREAD_TABLE_SZ
; i
++) {
357 for (p
= GC_threads
[i
]; p
!= 0; p
= p
-> next
) {
358 if (p
-> id
!= my_thread
) {
359 if (p
-> flags
& FINISHED
) continue;
362 GC_printf1("Sending restart signal to 0x%x\n", p
-> id
);
364 result
= pthread_kill(p
-> id
, SIG_RESTART
);
367 /* Not really there anymore. Possible? */
373 ABORT("pthread_kill failed");
379 GC_printf0("World started\n");
383 /* We hold allocation lock. We assume the world is stopped. */
384 void GC_push_all_stacks()
387 register GC_thread p
;
388 register ptr_t sp
= GC_approx_sp();
389 register ptr_t lo
, hi
;
390 pthread_t me
= pthread_self();
392 if (!GC_thr_initialized
) GC_thr_init();
394 GC_printf1("Pushing stacks from thread 0x%lx\n", (unsigned long) me
);
396 for (i
= 0; i
< THREAD_TABLE_SZ
; i
++) {
397 for (p
= GC_threads
[i
]; p
!= 0; p
= p
-> next
) {
398 if (p
-> flags
& FINISHED
) continue;
399 if (pthread_equal(p
-> id
, me
)) {
404 if ((p
-> flags
& MAIN_THREAD
) == 0) {
405 if (pthread_equal(p
-> id
, me
)) {
406 hi
= GC_linux_thread_top_of_stack();
411 /* The original stack. */
415 GC_printf3("Stack for thread 0x%lx = [%lx,%lx)\n",
416 (unsigned long) p
-> id
,
417 (unsigned long) lo
, (unsigned long) hi
);
419 GC_push_all_stack(lo
, hi
);
425 /* We hold the allocation lock. */
429 struct sigaction act
;
431 if (GC_thr_initialized
) return;
432 GC_thr_initialized
= TRUE
;
434 if (sem_init(&GC_suspend_ack_sem
, 0, 0) != 0)
435 ABORT("sem_init failed");
437 act
.sa_flags
= SA_RESTART
;
438 if (sigfillset(&act
.sa_mask
) != 0) {
439 ABORT("sigfillset() failed");
443 if (sigdelset(&act
.sa_mask
, SIGINT
) != 0) {
444 ABORT("sigdelset() failed");
447 if (sigdelset(&act
.sa_mask
, SIGQUIT
) != 0) {
448 ABORT("sigdelset() failed");
451 if (sigdelset(&act
.sa_mask
, SIGTERM
) != 0) {
452 ABORT("sigdelset() failed");
456 /* SIG_RESTART is unmasked by the handler when necessary. */
457 act
.sa_handler
= GC_suspend_handler
;
458 if (sigaction(SIG_SUSPEND
, &act
, NULL
) != 0) {
459 ABORT("Cannot set SIG_SUSPEND handler");
462 act
.sa_handler
= GC_restart_handler
;
463 if (sigaction(SIG_RESTART
, &act
, NULL
) != 0) {
464 ABORT("Cannot set SIG_SUSPEND handler");
467 /* Add the initial thread, so we can stop it. */
468 t
= GC_new_thread(pthread_self());
470 t
-> flags
= DETACHED
| MAIN_THREAD
;
473 int GC_pthread_sigmask(int how
, const sigset_t
*set
, sigset_t
*oset
)
477 if (set
!= NULL
&& (how
== SIG_BLOCK
|| how
== SIG_SETMASK
)) {
479 sigdelset(&fudged_set
, SIG_SUSPEND
);
482 return(pthread_sigmask(how
, set
, oset
));
486 void *(*start_routine
)(void *);
489 sem_t registered
; /* 1 ==> in our thread table, but */
490 /* parent hasn't yet noticed. */
494 void GC_thread_exit_proc(void *arg
)
497 struct start_info
* si
= arg
;
500 me
= GC_lookup_thread(pthread_self());
501 if (me
-> flags
& DETACHED
) {
502 GC_delete_thread(pthread_self());
504 me
-> flags
|= FINISHED
;
509 int GC_pthread_join(pthread_t thread
, void **retval
)
512 GC_thread thread_gc_id
;
515 thread_gc_id
= GC_lookup_thread(thread
);
516 /* This is guaranteed to be the intended one, since the thread id */
517 /* cant have been recycled by pthreads. */
519 result
= pthread_join(thread
, retval
);
521 /* Here the pthread thread id may have been recycled. */
522 GC_delete_gc_thread(thread
, thread_gc_id
);
527 void * GC_start_routine(void * arg
)
529 struct start_info
* si
= arg
;
532 pthread_t my_pthread
;
533 void *(*start
)(void *);
536 my_pthread
= pthread_self();
538 me
= GC_new_thread(my_pthread
);
539 me
-> flags
= si
-> flags
;
543 start
= si
-> start_routine
;
544 start_arg
= si
-> arg
;
545 sem_post(&(si
-> registered
));
546 pthread_cleanup_push(GC_thread_exit_proc
, si
);
547 # ifdef DEBUG_THREADS
548 GC_printf1("Starting thread 0x%lx\n", pthread_self());
549 GC_printf1("pid = %ld\n", (long) getpid());
550 GC_printf1("sp = 0x%lx\n", (long) &arg
);
551 GC_printf1("start_routine = 0x%lx\n", start
);
553 result
= (*start
)(start_arg
);
555 GC_printf1("Finishing thread 0x%x\n", pthread_self());
557 me
-> status
= result
;
558 me
-> flags
|= FINISHED
;
559 pthread_cleanup_pop(1);
560 /* Cleanup acquires lock, ensuring that we can't exit */
561 /* while a collection that thinks we're alive is trying to stop */
567 GC_pthread_create(pthread_t
*new_thread
,
568 const pthread_attr_t
*attr
,
569 void *(*start_routine
)(void *), void *arg
)
573 pthread_t my_new_thread
;
576 pthread_attr_t new_attr
;
579 struct start_info
* si
= GC_malloc(sizeof(struct start_info
));
580 /* This is otherwise saved only in an area mmapped by the thread */
581 /* library, which isn't visible to the collector. */
583 if (0 == si
) return(ENOMEM
);
584 sem_init(&(si
-> registered
), 0, 0);
585 si
-> start_routine
= start_routine
;
588 if (!GC_thr_initialized
) GC_thr_init();
591 (void) pthread_attr_init(&new_attr
);
595 pthread_attr_getdetachstate(&new_attr
, &detachstate
);
596 if (PTHREAD_CREATE_DETACHED
== detachstate
) my_flags
|= DETACHED
;
597 si
-> flags
= my_flags
;
599 result
= pthread_create(new_thread
, &new_attr
, GC_start_routine
, si
);
600 /* Wait until child has been added to the thread table. */
601 /* This also ensures that we hold onto si until the child is done */
602 /* with it. Thus it doesn't matter whether it is otherwise */
603 /* visible to the collector. */
604 if (0 != sem_wait(&(si
-> registered
))) ABORT("sem_wait failed");
605 sem_destroy(&(si
-> registered
));
606 /* pthread_attr_destroy(&new_attr); */
607 /* pthread_attr_destroy(&new_attr); */
611 GC_bool GC_collecting
= 0;
612 /* A hint that we're in the collector and */
613 /* holding the allocation lock for an */
614 /* extended period. */
616 /* Reasonably fast spin locks. Basically the same implementation */
617 /* as STL alloc.h. This isn't really the right way to do this. */
618 /* but until the POSIX scheduling mess gets straightened out ... */
620 volatile unsigned int GC_allocate_lock
= 0;
625 # define low_spin_max 30 /* spin cycles if we suspect uniprocessor */
626 # define high_spin_max 1000 /* spin cycles for multiprocessor */
627 static unsigned spin_max
= low_spin_max
;
628 unsigned my_spin_max
;
629 static unsigned last_spins
= 0;
630 unsigned my_last_spins
;
631 volatile unsigned junk
;
632 # define PAUSE junk *= junk; junk *= junk; junk *= junk; junk *= junk
635 if (!GC_test_and_set(&GC_allocate_lock
)) {
639 my_spin_max
= spin_max
;
640 my_last_spins
= last_spins
;
641 for (i
= 0; i
< my_spin_max
; i
++) {
642 if (GC_collecting
) goto yield
;
643 if (i
< my_last_spins
/2 || GC_allocate_lock
) {
647 if (!GC_test_and_set(&GC_allocate_lock
)) {
650 * Spinning worked. Thus we're probably not being scheduled
651 * against the other process with which we were contending.
652 * Thus it makes sense to spin longer the next time.
655 spin_max
= high_spin_max
;
659 /* We are probably being scheduled against the other process. Sleep. */
660 spin_max
= low_spin_max
;
663 if (!GC_test_and_set(&GC_allocate_lock
)) {
666 # define SLEEP_THRESHOLD 12
667 /* nanosleep(<= 2ms) just spins under Linux. We */
668 /* want to be careful to avoid that behavior. */
669 if (i
< SLEEP_THRESHOLD
) {
675 /* Don't wait for more than about 60msecs, even */
676 /* under extreme contention. */
684 # endif /* LINUX_THREADS */