2 * Copyright (c) 1994 by Xerox Corporation. All rights reserved.
3 * Copyright (c) 1996 by Silicon Graphics. All rights reserved.
5 * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
6 * OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
8 * Permission is hereby granted to use or copy this program
9 * for any purpose, provided the above notices are retained on all copies.
10 * Permission to modify the code and to distribute modified code is granted,
11 * provided the above notices are retained, and a notice that the code was
12 * modified is included with the above copyright notice.
15 * Support code for Irix (>=6.2) Pthreads. This relies on properties
16 * not guaranteed by the Pthread standard. It may or may not be portable
17 * to other implementations.
19 * Note that there is a lot of code duplication between linux_threads.c
20 * and irix_threads.c; any changes made here may need to be reflected
24 # if defined(IRIX_THREADS)
28 # include <semaphore.h>
32 # include <sys/mman.h>
33 # include <sys/time.h>
36 #undef pthread_sigmask
42 void GC_print_sig_mask()
47 if (pthread_sigmask(SIG_BLOCK
, NULL
, &blocked
) != 0)
48 ABORT("pthread_sigmask");
49 GC_printf0("Blocked: ");
50 for (i
= 1; i
<= MAXSIG
; i
++) {
51 if (sigismember(&blocked
, i
)) { GC_printf1("%ld ",(long) i
); }
57 /* We use the allocation lock to protect thread-related data structures. */
59 /* The set of all known threads. We intercept thread creation and */
60 /* joins. We never actually create detached threads. We allocate all */
61 /* new thread stacks ourselves. These allow us to maintain this */
63 /* Protected by GC_thr_lock. */
64 /* Some of this should be declared volatile, but that's incosnsistent */
65 /* with some library routine declarations. */
66 typedef struct GC_Thread_Rep
{
67 struct GC_Thread_Rep
* next
; /* More recently allocated threads */
68 /* with a given pthread id come */
69 /* first. (All but the first are */
70 /* guaranteed to be dead, but we may */
71 /* not yet have registered the join.) */
74 # define NOT_STOPPED 0
75 # define PLEASE_STOP 1
78 # define FINISHED 1 /* Thread has exited. */
79 # define DETACHED 2 /* Thread is intended to be detached. */
80 # define CLIENT_OWNS_STACK 4
81 /* Stack was supplied by client. */
83 ptr_t stack_ptr
; /* Valid only when stopped. */
84 /* But must be within stack region at */
86 size_t stack_size
; /* 0 for original thread. */
87 void * status
; /* Used only to avoid premature */
88 /* reclamation of any data it might */
92 GC_thread
GC_lookup_thread(pthread_t id
);
95 * The only way to suspend threads given the pthread interface is to send
96 * signals. Unfortunately, this means we have to reserve
97 * a signal, and intercept client calls to change the signal mask.
99 # define SIG_SUSPEND (SIGRTMIN + 6)
101 pthread_mutex_t GC_suspend_lock
= PTHREAD_MUTEX_INITIALIZER
;
102 /* Number of threads stopped so far */
103 pthread_cond_t GC_suspend_ack_cv
= PTHREAD_COND_INITIALIZER
;
104 pthread_cond_t GC_continue_cv
= PTHREAD_COND_INITIALIZER
;
106 void GC_suspend_handler(int sig
)
114 if (sig
!= SIG_SUSPEND
) ABORT("Bad signal in suspend_handler");
115 me
= GC_lookup_thread(pthread_self());
116 /* The lookup here is safe, since I'm doing this on behalf */
117 /* of a thread which holds the allocation lock in order */
118 /* to stop the world. Thus concurrent modification of the */
119 /* data structure is impossible. */
120 if (PLEASE_STOP
!= me
-> stop
) {
121 /* Misdirected signal. */
122 pthread_mutex_unlock(&GC_suspend_lock
);
125 pthread_mutex_lock(&GC_suspend_lock
);
126 me
-> stack_ptr
= (ptr_t
)(&dummy
);
127 me
-> stop
= STOPPED
;
128 pthread_cond_signal(&GC_suspend_ack_cv
);
129 pthread_cond_wait(&GC_continue_cv
, &GC_suspend_lock
);
130 pthread_mutex_unlock(&GC_suspend_lock
);
131 /* GC_printf1("Continuing 0x%x\n", pthread_self()); */
135 GC_bool GC_thr_initialized
= FALSE
;
137 size_t GC_min_stack_sz
;
141 # define N_FREE_LISTS 25
142 ptr_t GC_stack_free_lists
[N_FREE_LISTS
] = { 0 };
143 /* GC_stack_free_lists[i] is free list for stacks of */
144 /* size GC_min_stack_sz*2**i. */
145 /* Free lists are linked through first word. */
147 /* Return a stack of size at least *stack_size. *stack_size is */
148 /* replaced by the actual stack size. */
149 /* Caller holds allocation lock. */
150 ptr_t
GC_stack_alloc(size_t * stack_size
)
152 register size_t requested_sz
= *stack_size
;
153 register size_t search_sz
= GC_min_stack_sz
;
154 register int index
= 0; /* = log2(search_sz/GC_min_stack_sz) */
155 register ptr_t result
;
157 while (search_sz
< requested_sz
) {
161 if ((result
= GC_stack_free_lists
[index
]) == 0
162 && (result
= GC_stack_free_lists
[index
+1]) != 0) {
163 /* Try next size up. */
164 search_sz
*= 2; index
++;
167 GC_stack_free_lists
[index
] = *(ptr_t
*)result
;
169 result
= (ptr_t
) GC_scratch_alloc(search_sz
+ 2*GC_page_sz
);
170 result
= (ptr_t
)(((word
)result
+ GC_page_sz
) & ~(GC_page_sz
- 1));
171 /* Protect hottest page to detect overflow. */
172 /* mprotect(result, GC_page_sz, PROT_NONE); */
173 result
+= GC_page_sz
;
175 *stack_size
= search_sz
;
179 /* Caller holds allocation lock. */
180 void GC_stack_free(ptr_t stack
, size_t size
)
182 register int index
= 0;
183 register size_t search_sz
= GC_min_stack_sz
;
185 while (search_sz
< size
) {
189 if (search_sz
!= size
) ABORT("Bad stack size");
190 *(ptr_t
*)stack
= GC_stack_free_lists
[index
];
191 GC_stack_free_lists
[index
] = stack
;
196 # define THREAD_TABLE_SZ 128 /* Must be power of 2 */
197 volatile GC_thread GC_threads
[THREAD_TABLE_SZ
];
199 /* Add a thread to GC_threads. We assume it wasn't already there. */
200 /* Caller holds allocation lock. */
201 GC_thread
GC_new_thread(pthread_t id
)
203 int hv
= ((word
)id
) % THREAD_TABLE_SZ
;
205 static struct GC_Thread_Rep first_thread
;
206 static GC_bool first_thread_used
= FALSE
;
208 if (!first_thread_used
) {
209 result
= &first_thread
;
210 first_thread_used
= TRUE
;
211 /* Dont acquire allocation lock, since we may already hold it. */
213 result
= (struct GC_Thread_Rep
*)
214 GC_generic_malloc_inner(sizeof(struct GC_Thread_Rep
), NORMAL
);
216 if (result
== 0) return(0);
218 result
-> next
= GC_threads
[hv
];
219 GC_threads
[hv
] = result
;
220 /* result -> flags = 0; */
221 /* result -> stop = 0; */
225 /* Delete a thread from GC_threads. We assume it is there. */
226 /* (The code intentionally traps if it wasn't.) */
227 /* Caller holds allocation lock. */
228 void GC_delete_thread(pthread_t id
)
230 int hv
= ((word
)id
) % THREAD_TABLE_SZ
;
231 register GC_thread p
= GC_threads
[hv
];
232 register GC_thread prev
= 0;
234 while (!pthread_equal(p
-> id
, id
)) {
239 GC_threads
[hv
] = p
-> next
;
241 prev
-> next
= p
-> next
;
245 /* If a thread has been joined, but we have not yet */
246 /* been notified, then there may be more than one thread */
247 /* in the table with the same pthread id. */
248 /* This is OK, but we need a way to delete a specific one. */
249 void GC_delete_gc_thread(pthread_t id
, GC_thread gc_id
)
251 int hv
= ((word
)id
) % THREAD_TABLE_SZ
;
252 register GC_thread p
= GC_threads
[hv
];
253 register GC_thread prev
= 0;
260 GC_threads
[hv
] = p
-> next
;
262 prev
-> next
= p
-> next
;
266 /* Return a GC_thread corresponding to a given thread_t. */
267 /* Returns 0 if it's not there. */
268 /* Caller holds allocation lock or otherwise inhibits */
270 /* If there is more than one thread with the given id we */
271 /* return the most recent one. */
272 GC_thread
GC_lookup_thread(pthread_t id
)
274 int hv
= ((word
)id
) % THREAD_TABLE_SZ
;
275 register GC_thread p
= GC_threads
[hv
];
277 while (p
!= 0 && !pthread_equal(p
-> id
, id
)) p
= p
-> next
;
282 /* Caller holds allocation lock. */
285 pthread_t my_thread
= pthread_self();
287 register GC_thread p
;
289 struct timespec timeout
;
291 for (i
= 0; i
< THREAD_TABLE_SZ
; i
++) {
292 for (p
= GC_threads
[i
]; p
!= 0; p
= p
-> next
) {
293 if (p
-> id
!= my_thread
) {
294 if (p
-> flags
& FINISHED
) {
298 p
-> stop
= PLEASE_STOP
;
299 result
= pthread_kill(p
-> id
, SIG_SUSPEND
);
300 /* GC_printf1("Sent signal to 0x%x\n", p -> id); */
303 /* Not really there anymore. Possible? */
309 ABORT("pthread_kill failed");
314 pthread_mutex_lock(&GC_suspend_lock
);
315 for (i
= 0; i
< THREAD_TABLE_SZ
; i
++) {
316 for (p
= GC_threads
[i
]; p
!= 0; p
= p
-> next
) {
317 while (p
-> id
!= my_thread
&& p
-> stop
!= STOPPED
) {
318 clock_gettime(CLOCK_REALTIME
, &timeout
);
319 timeout
.tv_nsec
+= 50000000; /* 50 msecs */
320 if (timeout
.tv_nsec
>= 1000000000) {
321 timeout
.tv_nsec
-= 1000000000;
324 result
= pthread_cond_timedwait(&GC_suspend_ack_cv
,
327 if (result
== ETIMEDOUT
) {
328 /* Signal was lost or misdirected. Try again. */
329 /* Duplicate signals should be benign. */
330 result
= pthread_kill(p
-> id
, SIG_SUSPEND
);
335 pthread_mutex_unlock(&GC_suspend_lock
);
336 /* GC_printf1("World stopped 0x%x\n", pthread_self()); */
339 /* Caller holds allocation lock. */
340 void GC_start_world()
345 /* GC_printf0("World starting\n"); */
346 for (i
= 0; i
< THREAD_TABLE_SZ
; i
++) {
347 for (p
= GC_threads
[i
]; p
!= 0; p
= p
-> next
) {
348 p
-> stop
= NOT_STOPPED
;
351 pthread_mutex_lock(&GC_suspend_lock
);
352 /* All other threads are at pthread_cond_wait in signal handler. */
353 /* Otherwise we couldn't have acquired the lock. */
354 pthread_mutex_unlock(&GC_suspend_lock
);
355 pthread_cond_broadcast(&GC_continue_cv
);
359 --> not really supported yet
.
360 int GC_is_thread_stack(ptr_t addr
)
363 register GC_thread p
;
365 for (i
= 0; i
< THREAD_TABLE_SZ
; i
++) {
366 for (p
= GC_threads
[i
]; p
!= 0; p
= p
-> next
) {
367 if (p
-> stack_size
!= 0) {
368 if (p
-> stack
<= addr
&&
369 addr
< p
-> stack
+ p
-> stack_size
)
378 /* We hold allocation lock. We assume the world is stopped. */
379 void GC_push_all_stacks()
382 register GC_thread p
;
383 register ptr_t sp
= GC_approx_sp();
384 register ptr_t lo
, hi
;
385 pthread_t me
= pthread_self();
387 if (!GC_thr_initialized
) GC_thr_init();
388 /* GC_printf1("Pushing stacks from thread 0x%x\n", me); */
389 for (i
= 0; i
< THREAD_TABLE_SZ
; i
++) {
390 for (p
= GC_threads
[i
]; p
!= 0; p
= p
-> next
) {
391 if (p
-> flags
& FINISHED
) continue;
392 if (pthread_equal(p
-> id
, me
)) {
397 if (p
-> stack_size
!= 0) {
398 hi
= p
-> stack
+ p
-> stack_size
;
400 /* The original stack. */
403 GC_push_all_stack(lo
, hi
);
409 /* We hold the allocation lock. */
413 struct sigaction act
;
415 if (GC_thr_initialized
) return;
416 GC_thr_initialized
= TRUE
;
417 GC_min_stack_sz
= HBLKSIZE
;
418 GC_page_sz
= sysconf(_SC_PAGESIZE
);
419 (void) sigaction(SIG_SUSPEND
, 0, &act
);
420 if (act
.sa_handler
!= SIG_DFL
)
421 ABORT("Previously installed SIG_SUSPEND handler");
422 /* Install handler. */
423 act
.sa_handler
= GC_suspend_handler
;
424 act
.sa_flags
= SA_RESTART
;
425 (void) sigemptyset(&act
.sa_mask
);
426 if (0 != sigaction(SIG_SUSPEND
, &act
, 0))
427 ABORT("Failed to install SIG_SUSPEND handler");
428 /* Add the initial thread, so we can stop it. */
429 t
= GC_new_thread(pthread_self());
431 t
-> stack_ptr
= (ptr_t
)(&t
);
432 t
-> flags
= DETACHED
;
435 int GC_pthread_sigmask(int how
, const sigset_t
*set
, sigset_t
*oset
)
439 if (set
!= NULL
&& (how
== SIG_BLOCK
|| how
== SIG_SETMASK
)) {
441 sigdelset(&fudged_set
, SIG_SUSPEND
);
444 return(pthread_sigmask(how
, set
, oset
));
448 void *(*start_routine
)(void *);
453 sem_t registered
; /* 1 ==> in our thread table, but */
454 /* parent hasn't yet noticed. */
457 void GC_thread_exit_proc(void *arg
)
462 me
= GC_lookup_thread(pthread_self());
463 if (me
-> flags
& DETACHED
) {
464 GC_delete_thread(pthread_self());
466 me
-> flags
|= FINISHED
;
471 int GC_pthread_join(pthread_t thread
, void **retval
)
474 GC_thread thread_gc_id
;
477 thread_gc_id
= GC_lookup_thread(thread
);
478 /* This is guaranteed to be the intended one, since the thread id */
479 /* cant have been recycled by pthreads. */
481 result
= pthread_join(thread
, retval
);
482 /* Some versions of the Irix pthreads library can erroneously */
483 /* return EINTR when the call succeeds. */
484 if (EINTR
== result
) result
= 0;
486 /* Here the pthread thread id may have been recycled. */
487 GC_delete_gc_thread(thread
, thread_gc_id
);
492 void * GC_start_routine(void * arg
)
494 struct start_info
* si
= arg
;
497 pthread_t my_pthread
;
498 void *(*start
)(void *);
501 my_pthread
= pthread_self();
502 /* If a GC occurs before the thread is registered, that GC will */
503 /* ignore this thread. That's fine, since it will block trying to */
504 /* acquire the allocation lock, and won't yet hold interesting */
507 /* We register the thread here instead of in the parent, so that */
508 /* we don't need to hold the allocation lock during pthread_create. */
509 /* Holding the allocation lock there would make REDIRECT_MALLOC */
510 /* impossible. It probably still doesn't work, but we're a little */
512 /* This unfortunately means that we have to be careful the parent */
513 /* doesn't try to do a pthread_join before we're registered. */
514 me
= GC_new_thread(my_pthread
);
515 me
-> flags
= si
-> flags
;
516 me
-> stack
= si
-> stack
;
517 me
-> stack_size
= si
-> stack_size
;
518 me
-> stack_ptr
= (ptr_t
)si
-> stack
+ si
-> stack_size
- sizeof(word
);
520 start
= si
-> start_routine
;
521 start_arg
= si
-> arg
;
522 sem_post(&(si
-> registered
));
523 pthread_cleanup_push(GC_thread_exit_proc
, 0);
524 result
= (*start
)(start_arg
);
525 me
-> status
= result
;
526 me
-> flags
|= FINISHED
;
527 pthread_cleanup_pop(1);
528 /* This involves acquiring the lock, ensuring that we can't exit */
529 /* while a collection that thinks we're alive is trying to stop */
535 GC_pthread_create(pthread_t
*new_thread
,
536 const pthread_attr_t
*attr
,
537 void *(*start_routine
)(void *), void *arg
)
543 pthread_attr_t new_attr
;
546 struct start_info
* si
= GC_malloc(sizeof(struct start_info
));
547 /* This is otherwise saved only in an area mmapped by the thread */
548 /* library, which isn't visible to the collector. */
550 if (0 == si
) return(ENOMEM
);
551 sem_init(&(si
-> registered
), 0, 0);
552 si
-> start_routine
= start_routine
;
555 if (!GC_thr_initialized
) GC_thr_init();
558 (void) pthread_attr_init(&new_attr
);
561 pthread_attr_getstackaddr(&new_attr
, &stack
);
563 pthread_attr_getstacksize(&new_attr
, &stacksize
);
564 pthread_attr_getdetachstate(&new_attr
, &detachstate
);
565 if (stacksize
< GC_min_stack_sz
) ABORT("Stack too small");
567 stack
= (void *)GC_stack_alloc(&stacksize
);
572 pthread_attr_setstackaddr(&new_attr
, stack
);
574 my_flags
|= CLIENT_OWNS_STACK
;
576 if (PTHREAD_CREATE_DETACHED
== detachstate
) my_flags
|= DETACHED
;
577 si
-> flags
= my_flags
;
579 si
-> stack_size
= stacksize
;
580 result
= pthread_create(new_thread
, &new_attr
, GC_start_routine
, si
);
581 if (0 == new_thread
&& !(my_flags
& CLIENT_OWNS_STACK
)) {
582 GC_stack_free(stack
, stacksize
);
585 /* Wait until child has been added to the thread table. */
586 /* This also ensures that we hold onto si until the child is done */
587 /* with it. Thus it doesn't matter whether it is otherwise */
588 /* visible to the collector. */
589 if (0 != sem_wait(&(si
-> registered
))) ABORT("sem_wait failed");
590 sem_destroy(&(si
-> registered
));
591 /* pthread_attr_destroy(&new_attr); */
595 GC_bool GC_collecting
= 0; /* A hint that we're in the collector and */
596 /* holding the allocation lock for an */
597 /* extended period. */
599 /* Reasonably fast spin locks. Basically the same implementation */
600 /* as STL alloc.h. This isn't really the right way to do this. */
601 /* but until the POSIX scheduling mess gets straightened out ... */
603 unsigned long GC_allocate_lock
= 0;
605 #define SLEEP_THRESHOLD 3
609 # define low_spin_max 30 /* spin cycles if we suspect uniprocessor */
610 # define high_spin_max 1000 /* spin cycles for multiprocessor */
611 static unsigned spin_max
= low_spin_max
;
612 unsigned my_spin_max
;
613 static unsigned last_spins
= 0;
614 unsigned my_last_spins
;
615 volatile unsigned junk
;
616 # define PAUSE junk *= junk; junk *= junk; junk *= junk; junk *= junk
619 if (!GC_test_and_set(&GC_allocate_lock
, 1)) {
623 my_spin_max
= spin_max
;
624 my_last_spins
= last_spins
;
625 for (i
= 0; i
< my_spin_max
; i
++) {
626 if (GC_collecting
) goto yield
;
627 if (i
< my_last_spins
/2 || GC_allocate_lock
) {
631 if (!GC_test_and_set(&GC_allocate_lock
, 1)) {
634 * Spinning worked. Thus we're probably not being scheduled
635 * against the other process with which we were contending.
636 * Thus it makes sense to spin longer the next time.
639 spin_max
= high_spin_max
;
643 /* We are probably being scheduled against the other process. Sleep. */
644 spin_max
= low_spin_max
;
647 if (!GC_test_and_set(&GC_allocate_lock
, 1)) {
650 if (i
< SLEEP_THRESHOLD
) {
656 /* Don't wait for more than about 60msecs, even */
657 /* under extreme contention. */
670 int GC_no_Irix_threads
;
673 # endif /* IRIX_THREADS */