2 * Copyright (c) 1991-1995 by Xerox Corporation. All rights reserved.
3 * Copyright (c) 1996-1999 by Silicon Graphics. All rights reserved.
4 * Copyright (c) 1999 by Hewlett-Packard Company. All rights reserved.
6 * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
7 * OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
9 * Permission is hereby granted to use or copy this program
10 * for any purpose, provided the above notices are retained on all copies.
11 * Permission to modify the code and to distribute modified code is granted,
12 * provided the above notices are retained, and a notice that the code was
13 * modified is included with the above copyright notice.
16 * Support code for Irix (>=6.2) Pthreads. This relies on properties
17 * not guaranteed by the Pthread standard. It may or may not be portable
18 * to other implementations.
20 * This now also includes an initial attempt at thread support for
23 * Note that there is a lot of code duplication between linux_threads.c
24 * and hpux_irix_threads.c; any changes made here may need to be reflected
28 # if defined(GC_IRIX_THREADS) || defined(IRIX_THREADS)
30 # include "private/gc_priv.h"
32 # include <semaphore.h>
36 # include <sys/mman.h>
37 # include <sys/time.h>
40 #undef pthread_sigmask
47 void GC_print_sig_mask()
52 if (pthread_sigmask(SIG_BLOCK
, NULL
, &blocked
) != 0)
53 ABORT("pthread_sigmask");
54 GC_printf0("Blocked: ");
55 for (i
= 1; i
<= MAXSIG
; i
++) {
56 if (sigismember(&blocked
, i
)) { GC_printf1("%ld ",(long) i
); }
62 /* We use the allocation lock to protect thread-related data structures. */
64 /* The set of all known threads. We intercept thread creation and */
65 /* joins. We never actually create detached threads. We allocate all */
66 /* new thread stacks ourselves. These allow us to maintain this */
68 /* Protected by GC_thr_lock. */
69 /* Some of this should be declared volatile, but that's incosnsistent */
70 /* with some library routine declarations. */
71 typedef struct GC_Thread_Rep
{
72 struct GC_Thread_Rep
* next
; /* More recently allocated threads */
73 /* with a given pthread id come */
74 /* first. (All but the first are */
75 /* guaranteed to be dead, but we may */
76 /* not yet have registered the join.) */
79 # define NOT_STOPPED 0
80 # define PLEASE_STOP 1
83 # define FINISHED 1 /* Thread has exited. */
84 # define DETACHED 2 /* Thread is intended to be detached. */
85 # define CLIENT_OWNS_STACK 4
86 /* Stack was supplied by client. */
88 ptr_t stack_ptr
; /* Valid only when stopped. */
89 /* But must be within stack region at */
91 size_t stack_size
; /* 0 for original thread. */
92 void * status
; /* Used only to avoid premature */
93 /* reclamation of any data it might */
97 GC_thread
GC_lookup_thread(pthread_t id
);
100 * The only way to suspend threads given the pthread interface is to send
101 * signals. Unfortunately, this means we have to reserve
102 * a signal, and intercept client calls to change the signal mask.
104 # define SIG_SUSPEND (SIGRTMIN + 6)
106 pthread_mutex_t GC_suspend_lock
= PTHREAD_MUTEX_INITIALIZER
;
107 /* Number of threads stopped so far */
108 pthread_cond_t GC_suspend_ack_cv
= PTHREAD_COND_INITIALIZER
;
109 pthread_cond_t GC_continue_cv
= PTHREAD_COND_INITIALIZER
;
111 void GC_suspend_handler(int sig
)
119 if (sig
!= SIG_SUSPEND
) ABORT("Bad signal in suspend_handler");
120 me
= GC_lookup_thread(pthread_self());
121 /* The lookup here is safe, since I'm doing this on behalf */
122 /* of a thread which holds the allocation lock in order */
123 /* to stop the world. Thus concurrent modification of the */
124 /* data structure is impossible. */
125 if (PLEASE_STOP
!= me
-> stop
) {
126 /* Misdirected signal. */
127 pthread_mutex_unlock(&GC_suspend_lock
);
130 pthread_mutex_lock(&GC_suspend_lock
);
131 me
-> stack_ptr
= (ptr_t
)(&dummy
);
132 me
-> stop
= STOPPED
;
133 pthread_cond_signal(&GC_suspend_ack_cv
);
134 pthread_cond_wait(&GC_continue_cv
, &GC_suspend_lock
);
135 pthread_mutex_unlock(&GC_suspend_lock
);
136 /* GC_printf1("Continuing 0x%x\n", pthread_self()); */
140 GC_bool GC_thr_initialized
= FALSE
;
142 size_t GC_min_stack_sz
;
146 # define N_FREE_LISTS 25
147 ptr_t GC_stack_free_lists
[N_FREE_LISTS
] = { 0 };
148 /* GC_stack_free_lists[i] is free list for stacks of */
149 /* size GC_min_stack_sz*2**i. */
150 /* Free lists are linked through first word. */
152 /* Return a stack of size at least *stack_size. *stack_size is */
153 /* replaced by the actual stack size. */
154 /* Caller holds allocation lock. */
155 ptr_t
GC_stack_alloc(size_t * stack_size
)
157 register size_t requested_sz
= *stack_size
;
158 register size_t search_sz
= GC_min_stack_sz
;
159 register int index
= 0; /* = log2(search_sz/GC_min_stack_sz) */
160 register ptr_t result
;
162 while (search_sz
< requested_sz
) {
166 if ((result
= GC_stack_free_lists
[index
]) == 0
167 && (result
= GC_stack_free_lists
[index
+1]) != 0) {
168 /* Try next size up. */
169 search_sz
*= 2; index
++;
172 GC_stack_free_lists
[index
] = *(ptr_t
*)result
;
174 result
= (ptr_t
) GC_scratch_alloc(search_sz
+ 2*GC_page_sz
);
175 result
= (ptr_t
)(((word
)result
+ GC_page_sz
) & ~(GC_page_sz
- 1));
176 /* Protect hottest page to detect overflow. */
177 # ifdef STACK_GROWS_UP
178 /* mprotect(result + search_sz, GC_page_sz, PROT_NONE); */
180 /* mprotect(result, GC_page_sz, PROT_NONE); */
181 result
+= GC_page_sz
;
184 *stack_size
= search_sz
;
188 /* Caller holds allocation lock. */
189 void GC_stack_free(ptr_t stack
, size_t size
)
191 register int index
= 0;
192 register size_t search_sz
= GC_min_stack_sz
;
194 while (search_sz
< size
) {
198 if (search_sz
!= size
) ABORT("Bad stack size");
199 *(ptr_t
*)stack
= GC_stack_free_lists
[index
];
200 GC_stack_free_lists
[index
] = stack
;
205 # define THREAD_TABLE_SZ 128 /* Must be power of 2 */
206 volatile GC_thread GC_threads
[THREAD_TABLE_SZ
];
208 void GC_push_thread_structures
GC_PROTO((void))
210 GC_push_all((ptr_t
)(GC_threads
), (ptr_t
)(GC_threads
)+sizeof(GC_threads
));
213 /* Add a thread to GC_threads. We assume it wasn't already there. */
214 /* Caller holds allocation lock. */
215 GC_thread
GC_new_thread(pthread_t id
)
217 int hv
= ((word
)id
) % THREAD_TABLE_SZ
;
219 static struct GC_Thread_Rep first_thread
;
220 static GC_bool first_thread_used
= FALSE
;
222 if (!first_thread_used
) {
223 result
= &first_thread
;
224 first_thread_used
= TRUE
;
225 /* Dont acquire allocation lock, since we may already hold it. */
227 result
= (struct GC_Thread_Rep
*)
228 GC_INTERNAL_MALLOC(sizeof(struct GC_Thread_Rep
), NORMAL
);
230 if (result
== 0) return(0);
232 result
-> next
= GC_threads
[hv
];
233 GC_threads
[hv
] = result
;
234 /* result -> flags = 0; */
235 /* result -> stop = 0; */
239 /* Delete a thread from GC_threads. We assume it is there. */
240 /* (The code intentionally traps if it wasn't.) */
241 /* Caller holds allocation lock. */
242 void GC_delete_thread(pthread_t id
)
244 int hv
= ((word
)id
) % THREAD_TABLE_SZ
;
245 register GC_thread p
= GC_threads
[hv
];
246 register GC_thread prev
= 0;
248 while (!pthread_equal(p
-> id
, id
)) {
253 GC_threads
[hv
] = p
-> next
;
255 prev
-> next
= p
-> next
;
259 /* If a thread has been joined, but we have not yet */
260 /* been notified, then there may be more than one thread */
261 /* in the table with the same pthread id. */
262 /* This is OK, but we need a way to delete a specific one. */
263 void GC_delete_gc_thread(pthread_t id
, GC_thread gc_id
)
265 int hv
= ((word
)id
) % THREAD_TABLE_SZ
;
266 register GC_thread p
= GC_threads
[hv
];
267 register GC_thread prev
= 0;
274 GC_threads
[hv
] = p
-> next
;
276 prev
-> next
= p
-> next
;
280 /* Return a GC_thread corresponding to a given thread_t. */
281 /* Returns 0 if it's not there. */
282 /* Caller holds allocation lock or otherwise inhibits */
284 /* If there is more than one thread with the given id we */
285 /* return the most recent one. */
286 GC_thread
GC_lookup_thread(pthread_t id
)
288 int hv
= ((word
)id
) % THREAD_TABLE_SZ
;
289 register GC_thread p
= GC_threads
[hv
];
291 while (p
!= 0 && !pthread_equal(p
-> id
, id
)) p
= p
-> next
;
296 /* Caller holds allocation lock. */
299 pthread_t my_thread
= pthread_self();
301 register GC_thread p
;
303 struct timespec timeout
;
305 for (i
= 0; i
< THREAD_TABLE_SZ
; i
++) {
306 for (p
= GC_threads
[i
]; p
!= 0; p
= p
-> next
) {
307 if (p
-> id
!= my_thread
) {
308 if (p
-> flags
& FINISHED
) {
312 p
-> stop
= PLEASE_STOP
;
313 result
= pthread_kill(p
-> id
, SIG_SUSPEND
);
314 /* GC_printf1("Sent signal to 0x%x\n", p -> id); */
317 /* Not really there anymore. Possible? */
323 ABORT("pthread_kill failed");
328 pthread_mutex_lock(&GC_suspend_lock
);
329 for (i
= 0; i
< THREAD_TABLE_SZ
; i
++) {
330 for (p
= GC_threads
[i
]; p
!= 0; p
= p
-> next
) {
331 while (p
-> id
!= my_thread
&& p
-> stop
!= STOPPED
) {
332 clock_gettime(CLOCK_REALTIME
, &timeout
);
333 timeout
.tv_nsec
+= 50000000; /* 50 msecs */
334 if (timeout
.tv_nsec
>= 1000000000) {
335 timeout
.tv_nsec
-= 1000000000;
338 result
= pthread_cond_timedwait(&GC_suspend_ack_cv
,
341 if (result
== ETIMEDOUT
) {
342 /* Signal was lost or misdirected. Try again. */
343 /* Duplicate signals should be benign. */
344 result
= pthread_kill(p
-> id
, SIG_SUSPEND
);
349 pthread_mutex_unlock(&GC_suspend_lock
);
350 /* GC_printf1("World stopped 0x%x\n", pthread_self()); */
353 /* Caller holds allocation lock. */
354 void GC_start_world()
359 /* GC_printf0("World starting\n"); */
360 for (i
= 0; i
< THREAD_TABLE_SZ
; i
++) {
361 for (p
= GC_threads
[i
]; p
!= 0; p
= p
-> next
) {
362 p
-> stop
= NOT_STOPPED
;
365 pthread_mutex_lock(&GC_suspend_lock
);
366 /* All other threads are at pthread_cond_wait in signal handler. */
367 /* Otherwise we couldn't have acquired the lock. */
368 pthread_mutex_unlock(&GC_suspend_lock
);
369 pthread_cond_broadcast(&GC_continue_cv
);
373 --> not really supported yet
.
374 int GC_is_thread_stack(ptr_t addr
)
377 register GC_thread p
;
379 for (i
= 0; i
< THREAD_TABLE_SZ
; i
++) {
380 for (p
= GC_threads
[i
]; p
!= 0; p
= p
-> next
) {
381 if (p
-> stack_size
!= 0) {
382 if (p
-> stack
<= addr
&&
383 addr
< p
-> stack
+ p
-> stack_size
)
392 /* We hold allocation lock. Should do exactly the right thing if the */
393 /* world is stopped. Should not fail if it isn't. */
394 void GC_push_all_stacks()
397 register GC_thread p
;
398 register ptr_t sp
= GC_approx_sp();
399 register ptr_t hot
, cold
;
400 pthread_t me
= pthread_self();
402 if (!GC_thr_initialized
) GC_thr_init();
403 /* GC_printf1("Pushing stacks from thread 0x%x\n", me); */
404 for (i
= 0; i
< THREAD_TABLE_SZ
; i
++) {
405 for (p
= GC_threads
[i
]; p
!= 0; p
= p
-> next
) {
406 if (p
-> flags
& FINISHED
) continue;
407 if (pthread_equal(p
-> id
, me
)) {
408 hot
= GC_approx_sp();
410 hot
= p
-> stack_ptr
;
412 if (p
-> stack_size
!= 0) {
413 # ifdef STACK_GROWS_UP
416 cold
= p
-> stack
+ p
-> stack_size
;
419 /* The original stack. */
420 cold
= GC_stackbottom
;
422 # ifdef STACK_GROWS_UP
423 GC_push_all_stack(cold
, hot
);
425 GC_push_all_stack(hot
, cold
);
432 /* We hold the allocation lock. */
436 struct sigaction act
;
438 if (GC_thr_initialized
) return;
439 GC_thr_initialized
= TRUE
;
440 GC_min_stack_sz
= HBLKSIZE
;
441 GC_page_sz
= sysconf(_SC_PAGESIZE
);
442 (void) sigaction(SIG_SUSPEND
, 0, &act
);
443 if (act
.sa_handler
!= SIG_DFL
)
444 ABORT("Previously installed SIG_SUSPEND handler");
445 /* Install handler. */
446 act
.sa_handler
= GC_suspend_handler
;
447 act
.sa_flags
= SA_RESTART
;
448 (void) sigemptyset(&act
.sa_mask
);
449 if (0 != sigaction(SIG_SUSPEND
, &act
, 0))
450 ABORT("Failed to install SIG_SUSPEND handler");
451 /* Add the initial thread, so we can stop it. */
452 t
= GC_new_thread(pthread_self());
454 t
-> stack_ptr
= (ptr_t
)(&t
);
455 t
-> flags
= DETACHED
;
458 int GC_pthread_sigmask(int how
, const sigset_t
*set
, sigset_t
*oset
)
462 if (set
!= NULL
&& (how
== SIG_BLOCK
|| how
== SIG_SETMASK
)) {
464 sigdelset(&fudged_set
, SIG_SUSPEND
);
467 return(pthread_sigmask(how
, set
, oset
));
471 void *(*start_routine
)(void *);
476 sem_t registered
; /* 1 ==> in our thread table, but */
477 /* parent hasn't yet noticed. */
480 void GC_thread_exit_proc(void *arg
)
485 me
= GC_lookup_thread(pthread_self());
486 if (me
-> flags
& DETACHED
) {
487 GC_delete_thread(pthread_self());
489 me
-> flags
|= FINISHED
;
494 int GC_pthread_join(pthread_t thread
, void **retval
)
497 GC_thread thread_gc_id
;
500 thread_gc_id
= GC_lookup_thread(thread
);
501 /* This is guaranteed to be the intended one, since the thread id */
502 /* cant have been recycled by pthreads. */
504 result
= pthread_join(thread
, retval
);
505 /* Some versions of the Irix pthreads library can erroneously */
506 /* return EINTR when the call succeeds. */
507 if (EINTR
== result
) result
= 0;
510 /* Here the pthread thread id may have been recycled. */
511 GC_delete_gc_thread(thread
, thread_gc_id
);
517 int GC_pthread_detach(pthread_t thread
)
520 GC_thread thread_gc_id
;
523 thread_gc_id
= GC_lookup_thread(thread
);
525 result
= REAL_FUNC(pthread_detach
)(thread
);
528 thread_gc_id
-> flags
|= DETACHED
;
529 /* Here the pthread thread id may have been recycled. */
530 if (thread_gc_id
-> flags
& FINISHED
) {
531 GC_delete_gc_thread(thread
, thread_gc_id
);
538 void * GC_start_routine(void * arg
)
540 struct start_info
* si
= arg
;
543 pthread_t my_pthread
;
544 void *(*start
)(void *);
547 my_pthread
= pthread_self();
548 /* If a GC occurs before the thread is registered, that GC will */
549 /* ignore this thread. That's fine, since it will block trying to */
550 /* acquire the allocation lock, and won't yet hold interesting */
553 /* We register the thread here instead of in the parent, so that */
554 /* we don't need to hold the allocation lock during pthread_create. */
555 /* Holding the allocation lock there would make REDIRECT_MALLOC */
556 /* impossible. It probably still doesn't work, but we're a little */
558 /* This unfortunately means that we have to be careful the parent */
559 /* doesn't try to do a pthread_join before we're registered. */
560 me
= GC_new_thread(my_pthread
);
561 me
-> flags
= si
-> flags
;
562 me
-> stack
= si
-> stack
;
563 me
-> stack_size
= si
-> stack_size
;
564 me
-> stack_ptr
= (ptr_t
)si
-> stack
+ si
-> stack_size
- sizeof(word
);
566 start
= si
-> start_routine
;
567 start_arg
= si
-> arg
;
568 sem_post(&(si
-> registered
));
569 pthread_cleanup_push(GC_thread_exit_proc
, 0);
570 result
= (*start
)(start_arg
);
571 me
-> status
= result
;
572 me
-> flags
|= FINISHED
;
573 pthread_cleanup_pop(1);
574 /* This involves acquiring the lock, ensuring that we can't exit */
575 /* while a collection that thinks we're alive is trying to stop */
580 # define copy_attr(pa_ptr, source) *(pa_ptr) = *(source)
583 GC_pthread_create(pthread_t
*new_thread
,
584 const pthread_attr_t
*attr
,
585 void *(*start_routine
)(void *), void *arg
)
591 pthread_attr_t new_attr
;
594 struct start_info
* si
= GC_malloc(sizeof(struct start_info
));
595 /* This is otherwise saved only in an area mmapped by the thread */
596 /* library, which isn't visible to the collector. */
598 if (0 == si
) return(ENOMEM
);
599 if (0 != sem_init(&(si
-> registered
), 0, 0)) {
600 ABORT("sem_init failed");
602 si
-> start_routine
= start_routine
;
605 if (!GC_thr_initialized
) GC_thr_init();
608 (void) pthread_attr_init(&new_attr
);
610 copy_attr(&new_attr
, attr
);
611 pthread_attr_getstackaddr(&new_attr
, &stack
);
613 pthread_attr_getstacksize(&new_attr
, &stacksize
);
614 pthread_attr_getdetachstate(&new_attr
, &detachstate
);
615 if (stacksize
< GC_min_stack_sz
) ABORT("Stack too small");
617 stack
= (void *)GC_stack_alloc(&stacksize
);
622 pthread_attr_setstackaddr(&new_attr
, stack
);
624 my_flags
|= CLIENT_OWNS_STACK
;
626 if (PTHREAD_CREATE_DETACHED
== detachstate
) my_flags
|= DETACHED
;
627 si
-> flags
= my_flags
;
629 si
-> stack_size
= stacksize
;
630 result
= pthread_create(new_thread
, &new_attr
, GC_start_routine
, si
);
631 if (0 == new_thread
&& !(my_flags
& CLIENT_OWNS_STACK
)) {
632 GC_stack_free(stack
, stacksize
);
635 /* Wait until child has been added to the thread table. */
636 /* This also ensures that we hold onto si until the child is done */
637 /* with it. Thus it doesn't matter whether it is otherwise */
638 /* visible to the collector. */
639 while (0 != sem_wait(&(si
-> registered
))) {
640 if (errno
!= EINTR
) {
641 GC_printf1("Sem_wait: errno = %ld\n", (unsigned long) errno
);
642 ABORT("sem_wait failed");
645 sem_destroy(&(si
-> registered
));
646 pthread_attr_destroy(&new_attr
); /* Probably unnecessary under Irix */
650 VOLATILE GC_bool GC_collecting
= 0;
651 /* A hint that we're in the collector and */
652 /* holding the allocation lock for an */
653 /* extended period. */
655 /* Reasonably fast spin locks. Basically the same implementation */
656 /* as STL alloc.h. */
658 #define SLEEP_THRESHOLD 3
660 unsigned long GC_allocate_lock
= 0;
661 # define GC_TRY_LOCK() !GC_test_and_set(&GC_allocate_lock,1)
662 # define GC_LOCK_TAKEN GC_allocate_lock
666 # define low_spin_max 30 /* spin cycles if we suspect uniprocessor */
667 # define high_spin_max 1000 /* spin cycles for multiprocessor */
668 static unsigned spin_max
= low_spin_max
;
669 unsigned my_spin_max
;
670 static unsigned last_spins
= 0;
671 unsigned my_last_spins
;
672 volatile unsigned junk
;
673 # define PAUSE junk *= junk; junk *= junk; junk *= junk; junk *= junk
680 my_spin_max
= spin_max
;
681 my_last_spins
= last_spins
;
682 for (i
= 0; i
< my_spin_max
; i
++) {
683 if (GC_collecting
) goto yield
;
684 if (i
< my_last_spins
/2 || GC_LOCK_TAKEN
) {
691 * Spinning worked. Thus we're probably not being scheduled
692 * against the other process with which we were contending.
693 * Thus it makes sense to spin longer the next time.
696 spin_max
= high_spin_max
;
700 /* We are probably being scheduled against the other process. Sleep. */
701 spin_max
= low_spin_max
;
707 if (i
< SLEEP_THRESHOLD
) {
713 /* Don't wait for more than about 60msecs, even */
714 /* under extreme contention. */
725 int GC_no_Irix_threads
;
728 # endif /* IRIX_THREADS */