* pa.md (conditional moves): Avoid holes in operand list.
[official-gcc.git] / boehm-gc / linux_threads.c
blobc3f19b8ec577b0f29edadff6a82aa413a3c86c79
1 /*
2 * Copyright (c) 1994 by Xerox Corporation. All rights reserved.
3 * Copyright (c) 1996 by Silicon Graphics. All rights reserved.
4 * Copyright (c) 1998 by Fergus Henderson. All rights reserved.
6 * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
7 * OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
9 * Permission is hereby granted to use or copy this program
10 * for any purpose, provided the above notices are retained on all copies.
11 * Permission to modify the code and to distribute modified code is granted,
12 * provided the above notices are retained, and a notice that the code was
13 * modified is included with the above copyright notice.
16 * Support code for LinuxThreads, the clone()-based kernel
17 * thread package for Linux which is included in libc6.
19 * This code relies on implementation details of LinuxThreads,
20 * (i.e. properties not guaranteed by the Pthread standard):
22 * - the function GC_linux_thread_top_of_stack(void)
23 * relies on the way LinuxThreads lays out thread stacks
24 * in the address space.
26 * Note that there is a lot of code duplication between linux_threads.c
27 * and irix_threads.c; any changes made here may need to be reflected
28 * there too.
31 /* #define DEBUG_THREADS 1 */
33 /* ANSI C requires that a compilation unit contains something */
34 # include "gc_priv.h"
36 # if defined(LINUX_THREADS)
38 # include <pthread.h>
39 # include <sched.h>
40 # include <time.h>
41 # include <errno.h>
42 # include <unistd.h>
43 # include <sys/mman.h>
44 # include <sys/time.h>
45 # include <semaphore.h>
46 # include <signal.h>
48 #ifdef USE_LD_WRAP
49 # define WRAP_FUNC(f) __wrap_##f
50 # define REAL_FUNC(f) __real_##f
51 #else
52 # define WRAP_FUNC(f) GC_##f
53 # define REAL_FUNC(f) f
54 # undef pthread_create
55 # undef pthread_sigmask
56 # undef pthread_join
57 #endif
60 void GC_thr_init();
62 #if 0
63 void GC_print_sig_mask()
65 sigset_t blocked;
66 int i;
68 if (pthread_sigmask(SIG_BLOCK, NULL, &blocked) != 0)
69 ABORT("pthread_sigmask");
70 GC_printf0("Blocked: ");
71 for (i = 1; i <= MAXSIG; i++) {
72 if (sigismember(&blocked, i)) { GC_printf1("%ld ",(long) i); }
74 GC_printf0("\n");
76 #endif
78 /* We use the allocation lock to protect thread-related data structures. */
80 /* The set of all known threads. We intercept thread creation and */
81 /* joins. We never actually create detached threads. We allocate all */
82 /* new thread stacks ourselves. These allow us to maintain this */
83 /* data structure. */
84 /* Protected by GC_thr_lock. */
85 /* Some of this should be declared volatile, but that's incosnsistent */
86 /* with some library routine declarations. */
87 typedef struct GC_Thread_Rep {
88 struct GC_Thread_Rep * next; /* More recently allocated threads */
89 /* with a given pthread id come */
90 /* first. (All but the first are */
91 /* guaranteed to be dead, but we may */
92 /* not yet have registered the join.) */
93 pthread_t id;
94 word flags;
95 # define FINISHED 1 /* Thread has exited. */
96 # define DETACHED 2 /* Thread is intended to be detached. */
97 # define MAIN_THREAD 4 /* True for the original thread only. */
99 ptr_t stack_end; /* Cold end of the stack. */
100 ptr_t stack_ptr; /* Valid only when stopped. */
101 # ifdef IA64
102 ptr_t backing_store_end;
103 ptr_t backing_store_ptr;
104 # endif
105 int signal;
106 void * status; /* The value returned from the thread. */
107 /* Used only to avoid premature */
108 /* reclamation of any data it might */
109 /* reference. */
110 } * GC_thread;
112 GC_thread GC_lookup_thread(pthread_t id);
115 * The only way to suspend threads given the pthread interface is to send
116 * signals. We can't use SIGSTOP directly, because we need to get the
117 * thread to save its stack pointer in the GC thread table before
118 * suspending. So we have to reserve a signal of our own for this.
119 * This means we have to intercept client calls to change the signal mask.
120 * The linuxthreads package already uses SIGUSR1 and SIGUSR2,
121 * so we need to reuse something else. I chose SIGPWR.
122 * (Perhaps SIGUNUSED would be a better choice.)
124 #define SIG_SUSPEND SIGPWR
126 #define SIG_RESTART SIGXCPU
128 sem_t GC_suspend_ack_sem;
131 GC_linux_thread_top_of_stack() relies on implementation details of
132 LinuxThreads, namely that thread stacks are allocated on 2M boundaries
133 and grow to no more than 2M.
134 To make sure that we're using LinuxThreads and not some other thread
135 package, we generate a dummy reference to `pthread_kill_other_threads_np'
136 (was `__pthread_initial_thread_bos' but that disappeared),
137 which is a symbol defined in LinuxThreads, but (hopefully) not in other
138 thread packages.
140 void (*dummy_var_to_force_linux_threads)() = pthread_kill_other_threads_np;
142 #define LINUX_THREADS_STACK_SIZE (2 * 1024 * 1024)
144 static inline ptr_t GC_linux_thread_top_of_stack(void)
146 char *sp = GC_approx_sp();
147 ptr_t tos = (ptr_t) (((unsigned long)sp | (LINUX_THREADS_STACK_SIZE - 1)) + 1);
148 #if DEBUG_THREADS
149 GC_printf1("SP = %lx\n", (unsigned long)sp);
150 GC_printf1("TOS = %lx\n", (unsigned long)tos);
151 #endif
152 return tos;
155 #if defined(SPARC) || defined(IA64)
156 extern word GC_save_regs_in_stack();
157 #endif
159 void GC_suspend_handler(int sig)
161 int dummy;
162 pthread_t my_thread = pthread_self();
163 GC_thread me;
164 sigset_t all_sigs;
165 sigset_t old_sigs;
166 int i;
167 sigset_t mask;
169 if (sig != SIG_SUSPEND) ABORT("Bad signal in suspend_handler");
171 #if DEBUG_THREADS
172 GC_printf1("Suspending 0x%x\n", my_thread);
173 #endif
175 me = GC_lookup_thread(my_thread);
176 /* The lookup here is safe, since I'm doing this on behalf */
177 /* of a thread which holds the allocation lock in order */
178 /* to stop the world. Thus concurrent modification of the */
179 /* data structure is impossible. */
180 # ifdef SPARC
181 me -> stack_ptr = (ptr_t)GC_save_regs_in_stack();
182 # else
183 me -> stack_ptr = (ptr_t)(&dummy);
184 # endif
185 # ifdef IA64
186 me -> backing_store_ptr = (ptr_t)GC_save_regs_in_stack();
187 # endif
189 /* Tell the thread that wants to stop the world that this */
190 /* thread has been stopped. Note that sem_post() is */
191 /* the only async-signal-safe primitive in LinuxThreads. */
192 sem_post(&GC_suspend_ack_sem);
194 /* Wait until that thread tells us to restart by sending */
195 /* this thread a SIG_RESTART signal. */
196 /* SIG_RESTART should be masked at this point. Thus there */
197 /* is no race. */
198 if (sigfillset(&mask) != 0) ABORT("sigfillset() failed");
199 if (sigdelset(&mask, SIG_RESTART) != 0) ABORT("sigdelset() failed");
200 # ifdef NO_SIGNALS
201 if (sigdelset(&mask, SIGINT) != 0) ABORT("sigdelset() failed");
202 if (sigdelset(&mask, SIGQUIT) != 0) ABORT("sigdelset() failed");
203 if (sigdelset(&mask, SIGTERM) != 0) ABORT("sigdelset() failed");
204 # endif
205 do {
206 me->signal = 0;
207 sigsuspend(&mask); /* Wait for signal */
208 } while (me->signal != SIG_RESTART);
210 #if DEBUG_THREADS
211 GC_printf1("Continuing 0x%x\n", my_thread);
212 #endif
215 void GC_restart_handler(int sig)
217 GC_thread me;
219 if (sig != SIG_RESTART) ABORT("Bad signal in suspend_handler");
221 /* Let the GC_suspend_handler() know that we got a SIG_RESTART. */
222 /* The lookup here is safe, since I'm doing this on behalf */
223 /* of a thread which holds the allocation lock in order */
224 /* to stop the world. Thus concurrent modification of the */
225 /* data structure is impossible. */
226 me = GC_lookup_thread(pthread_self());
227 me->signal = SIG_RESTART;
230 ** Note: even if we didn't do anything useful here,
231 ** it would still be necessary to have a signal handler,
232 ** rather than ignoring the signals, otherwise
233 ** the signals will not be delivered at all, and
234 ** will thus not interrupt the sigsuspend() above.
237 #if DEBUG_THREADS
238 GC_printf1("In GC_restart_handler for 0x%x\n", pthread_self());
239 #endif
242 GC_bool GC_thr_initialized = FALSE;
244 # define THREAD_TABLE_SZ 128 /* Must be power of 2 */
245 volatile GC_thread GC_threads[THREAD_TABLE_SZ];
247 /* Add a thread to GC_threads. We assume it wasn't already there. */
248 /* Caller holds allocation lock. */
249 GC_thread GC_new_thread(pthread_t id)
251 int hv = ((word)id) % THREAD_TABLE_SZ;
252 GC_thread result;
253 static struct GC_Thread_Rep first_thread;
254 static GC_bool first_thread_used = FALSE;
256 if (!first_thread_used) {
257 result = &first_thread;
258 first_thread_used = TRUE;
259 /* Dont acquire allocation lock, since we may already hold it. */
260 } else {
261 result = (struct GC_Thread_Rep *)
262 GC_generic_malloc_inner(sizeof(struct GC_Thread_Rep), NORMAL);
264 if (result == 0) return(0);
265 result -> id = id;
266 result -> next = GC_threads[hv];
267 GC_threads[hv] = result;
268 /* result -> flags = 0; */
269 return(result);
272 /* Delete a thread from GC_threads. We assume it is there. */
273 /* (The code intentionally traps if it wasn't.) */
274 /* Caller holds allocation lock. */
275 void GC_delete_thread(pthread_t id)
277 int hv = ((word)id) % THREAD_TABLE_SZ;
278 register GC_thread p = GC_threads[hv];
279 register GC_thread prev = 0;
281 while (!pthread_equal(p -> id, id)) {
282 prev = p;
283 p = p -> next;
285 if (prev == 0) {
286 GC_threads[hv] = p -> next;
287 } else {
288 prev -> next = p -> next;
292 /* If a thread has been joined, but we have not yet */
293 /* been notified, then there may be more than one thread */
294 /* in the table with the same pthread id. */
295 /* This is OK, but we need a way to delete a specific one. */
296 void GC_delete_gc_thread(pthread_t id, GC_thread gc_id)
298 int hv = ((word)id) % THREAD_TABLE_SZ;
299 register GC_thread p = GC_threads[hv];
300 register GC_thread prev = 0;
302 while (p != gc_id) {
303 prev = p;
304 p = p -> next;
306 if (prev == 0) {
307 GC_threads[hv] = p -> next;
308 } else {
309 prev -> next = p -> next;
313 /* Return a GC_thread corresponding to a given thread_t. */
314 /* Returns 0 if it's not there. */
315 /* Caller holds allocation lock or otherwise inhibits */
316 /* updates. */
317 /* If there is more than one thread with the given id we */
318 /* return the most recent one. */
319 GC_thread GC_lookup_thread(pthread_t id)
321 int hv = ((word)id) % THREAD_TABLE_SZ;
322 register GC_thread p = GC_threads[hv];
324 while (p != 0 && !pthread_equal(p -> id, id)) p = p -> next;
325 return(p);
328 /* Caller holds allocation lock. */
329 void GC_stop_world()
331 pthread_t my_thread = pthread_self();
332 register int i;
333 register GC_thread p;
334 register int n_live_threads = 0;
335 register int result;
337 for (i = 0; i < THREAD_TABLE_SZ; i++) {
338 for (p = GC_threads[i]; p != 0; p = p -> next) {
339 if (p -> id != my_thread) {
340 if (p -> flags & FINISHED) continue;
341 n_live_threads++;
342 #if DEBUG_THREADS
343 GC_printf1("Sending suspend signal to 0x%x\n", p -> id);
344 #endif
345 result = pthread_kill(p -> id, SIG_SUSPEND);
346 switch(result) {
347 case ESRCH:
348 /* Not really there anymore. Possible? */
349 n_live_threads--;
350 break;
351 case 0:
352 break;
353 default:
354 ABORT("pthread_kill failed");
359 for (i = 0; i < n_live_threads; i++) {
360 sem_wait(&GC_suspend_ack_sem);
362 #if DEBUG_THREADS
363 GC_printf1("World stopped 0x%x\n", pthread_self());
364 #endif
367 /* Caller holds allocation lock. */
368 void GC_start_world()
370 pthread_t my_thread = pthread_self();
371 register int i;
372 register GC_thread p;
373 register int n_live_threads = 0;
374 register int result;
376 # if DEBUG_THREADS
377 GC_printf0("World starting\n");
378 # endif
380 for (i = 0; i < THREAD_TABLE_SZ; i++) {
381 for (p = GC_threads[i]; p != 0; p = p -> next) {
382 if (p -> id != my_thread) {
383 if (p -> flags & FINISHED) continue;
384 n_live_threads++;
385 #if DEBUG_THREADS
386 GC_printf1("Sending restart signal to 0x%x\n", p -> id);
387 #endif
388 result = pthread_kill(p -> id, SIG_RESTART);
389 switch(result) {
390 case ESRCH:
391 /* Not really there anymore. Possible? */
392 n_live_threads--;
393 break;
394 case 0:
395 break;
396 default:
397 ABORT("pthread_kill failed");
402 #if DEBUG_THREADS
403 GC_printf0("World started\n");
404 #endif
407 # ifdef IA64
408 # define IF_IA64(x) x
409 # else
410 # define IF_IA64(x)
411 # endif
412 /* We hold allocation lock. Should do exactly the right thing if the */
413 /* world is stopped. Should not fail if it isn't. */
414 void GC_push_all_stacks()
416 int i;
417 GC_thread p;
418 ptr_t sp = GC_approx_sp();
419 ptr_t lo, hi;
420 /* On IA64, we also need to scan the register backing store. */
421 IF_IA64(ptr_t bs_lo; ptr_t bs_hi;)
422 pthread_t me = pthread_self();
424 if (!GC_thr_initialized) GC_thr_init();
425 #if DEBUG_THREADS
426 GC_printf1("Pushing stacks from thread 0x%lx\n", (unsigned long) me);
427 #endif
428 for (i = 0; i < THREAD_TABLE_SZ; i++) {
429 for (p = GC_threads[i]; p != 0; p = p -> next) {
430 if (p -> flags & FINISHED) continue;
431 if (pthread_equal(p -> id, me)) {
432 # ifdef SPARC
433 lo = (ptr_t)GC_save_regs_in_stack();
434 # else
435 lo = GC_approx_sp();
436 # endif
437 IF_IA64(bs_hi = (ptr_t)GC_save_regs_in_stack();)
438 } else {
439 lo = p -> stack_ptr;
440 IF_IA64(bs_hi = p -> backing_store_ptr;)
442 if ((p -> flags & MAIN_THREAD) == 0) {
443 hi = p -> stack_end;
444 IF_IA64(bs_lo = p -> backing_store_end);
445 } else {
446 /* The original stack. */
447 hi = GC_stackbottom;
448 IF_IA64(bs_lo = BACKING_STORE_BASE;)
450 #if DEBUG_THREADS
451 GC_printf3("Stack for thread 0x%lx = [%lx,%lx)\n",
452 (unsigned long) p -> id,
453 (unsigned long) lo, (unsigned long) hi);
454 #endif
455 if (0 == lo) ABORT("GC_push_all_stacks: sp not set!\n");
456 GC_push_all_stack(lo, hi);
457 # ifdef IA64
458 if (pthread_equal(p -> id, me)) {
459 GC_push_all_eager(bs_lo, bs_hi);
460 } else {
461 GC_push_all_stack(bs_lo, bs_hi);
463 # endif
469 /* We hold the allocation lock. */
470 void GC_thr_init()
472 int dummy;
473 GC_thread t;
474 struct sigaction act;
476 if (GC_thr_initialized) return;
477 GC_thr_initialized = TRUE;
479 if (sem_init(&GC_suspend_ack_sem, 0, 0) != 0)
480 ABORT("sem_init failed");
482 act.sa_flags = SA_RESTART;
483 if (sigfillset(&act.sa_mask) != 0) {
484 ABORT("sigfillset() failed");
487 # ifdef NO_SIGNALS
488 if (sigdelset(&act.sa_mask, SIGINT) != 0
489 || sigdelset(&act.sa_mask, SIGQUIT != 0)
490 || sigdelset(&act.sa_mask, SIGTERM != 0)) {
491 ABORT("sigdelset() failed");
493 # endif
495 /* SIG_RESTART is unmasked by the handler when necessary. */
496 act.sa_handler = GC_suspend_handler;
497 if (sigaction(SIG_SUSPEND, &act, NULL) != 0) {
498 ABORT("Cannot set SIG_SUSPEND handler");
501 act.sa_handler = GC_restart_handler;
502 if (sigaction(SIG_RESTART, &act, NULL) != 0) {
503 ABORT("Cannot set SIG_SUSPEND handler");
506 /* Add the initial thread, so we can stop it. */
507 t = GC_new_thread(pthread_self());
508 t -> stack_ptr = (ptr_t)(&dummy);
509 t -> flags = DETACHED | MAIN_THREAD;
512 int WRAP_FUNC(pthread_sigmask)(int how, const sigset_t *set, sigset_t *oset)
514 sigset_t fudged_set;
516 if (set != NULL && (how == SIG_BLOCK || how == SIG_SETMASK)) {
517 fudged_set = *set;
518 sigdelset(&fudged_set, SIG_SUSPEND);
519 set = &fudged_set;
521 return(REAL_FUNC(pthread_sigmask)(how, set, oset));
524 struct start_info {
525 void *(*start_routine)(void *);
526 void *arg;
527 word flags;
528 sem_t registered; /* 1 ==> in our thread table, but */
529 /* parent hasn't yet noticed. */
533 void GC_thread_exit_proc(void *arg)
535 GC_thread me;
536 struct start_info * si = arg;
538 LOCK();
539 me = GC_lookup_thread(pthread_self());
540 if (me -> flags & DETACHED) {
541 GC_delete_thread(pthread_self());
542 } else {
543 me -> flags |= FINISHED;
545 if (GC_incremental && GC_collection_in_progress()) {
546 int old_gc_no = GC_gc_no;
548 /* Make sure that no part of our stack is still on the mark stack, */
549 /* since it's about to be unmapped. */
550 while (GC_incremental && GC_collection_in_progress()
551 && old_gc_no == GC_gc_no) {
552 ENTER_GC();
553 GC_collect_a_little_inner(1);
554 EXIT_GC();
555 UNLOCK();
556 sched_yield();
557 LOCK();
560 UNLOCK();
563 int WRAP_FUNC(pthread_join)(pthread_t thread, void **retval)
565 int result;
566 GC_thread thread_gc_id;
568 LOCK();
569 thread_gc_id = GC_lookup_thread(thread);
570 /* This is guaranteed to be the intended one, since the thread id */
571 /* cant have been recycled by pthreads. */
572 UNLOCK();
573 result = REAL_FUNC(pthread_join)(thread, retval);
574 LOCK();
575 /* Here the pthread thread id may have been recycled. */
576 GC_delete_gc_thread(thread, thread_gc_id);
577 UNLOCK();
578 return result;
581 void * GC_start_routine(void * arg)
583 int dummy;
584 struct start_info * si = arg;
585 void * result;
586 GC_thread me;
587 pthread_t my_pthread;
588 void *(*start)(void *);
589 void *start_arg;
591 my_pthread = pthread_self();
592 # ifdef DEBUG_THREADS
593 GC_printf1("Starting thread 0x%lx\n", my_pthread);
594 GC_printf1("pid = %ld\n", (long) getpid());
595 GC_printf1("sp = 0x%lx\n", (long) &arg);
596 # endif
597 LOCK();
598 me = GC_new_thread(my_pthread);
599 me -> flags = si -> flags;
600 me -> stack_ptr = 0;
601 /* me -> stack_end = GC_linux_stack_base(); -- currently (11/99) */
602 /* doesn't work because the stack base in /proc/self/stat is the */
603 /* one for the main thread. There is a strong argument that that's */
604 /* a kernel bug, but a pervasive one. */
605 # ifdef STACK_GROWS_DOWN
606 me -> stack_end = (ptr_t)(((word)(&dummy) + (GC_page_size - 1))
607 & ~(GC_page_size - 1));
608 me -> stack_ptr = me -> stack_end - 0x10;
609 /* Needs to be plausible, since an asynchronous stack mark */
610 /* should not crash. */
611 # else
612 me -> stack_end = (ptr_t)(((word)(&dummy) & ~(GC_page_size - 1));
613 me -> stack_ptr = me -> stack_end + 0x10;
614 # endif
615 /* This is dubious, since we may be more than a page into the stack, */
616 /* and hence skip some of it, though it's not clear that matters. */
617 # ifdef IA64
618 me -> backing_store_end = (ptr_t)
619 (GC_save_regs_in_stack() & ~(GC_page_size - 1));
620 /* This is also < 100% convincing. We should also read this */
621 /* from /proc, but the hook to do so isn't there yet. */
622 # endif /* IA64 */
623 UNLOCK();
624 start = si -> start_routine;
625 # ifdef DEBUG_THREADS
626 GC_printf1("start_routine = 0x%lx\n", start);
627 # endif
628 start_arg = si -> arg;
629 sem_post(&(si -> registered));
630 pthread_cleanup_push(GC_thread_exit_proc, si);
631 result = (*start)(start_arg);
632 #if DEBUG_THREADS
633 GC_printf1("Finishing thread 0x%x\n", pthread_self());
634 #endif
635 me -> status = result;
636 me -> flags |= FINISHED;
637 pthread_cleanup_pop(1);
638 /* Cleanup acquires lock, ensuring that we can't exit */
639 /* while a collection that thinks we're alive is trying to stop */
640 /* us. */
641 return(result);
645 WRAP_FUNC(pthread_create)(pthread_t *new_thread,
646 const pthread_attr_t *attr,
647 void *(*start_routine)(void *), void *arg)
649 int result;
650 GC_thread t;
651 pthread_t my_new_thread;
652 void * stack;
653 size_t stacksize;
654 pthread_attr_t new_attr;
655 int detachstate;
656 word my_flags = 0;
657 struct start_info * si = GC_malloc(sizeof(struct start_info));
658 /* This is otherwise saved only in an area mmapped by the thread */
659 /* library, which isn't visible to the collector. */
661 if (0 == si) return(ENOMEM);
662 sem_init(&(si -> registered), 0, 0);
663 si -> start_routine = start_routine;
664 si -> arg = arg;
665 LOCK();
666 if (!GC_thr_initialized) GC_thr_init();
667 if (NULL == attr) {
668 stack = 0;
669 (void) pthread_attr_init(&new_attr);
670 } else {
671 new_attr = *attr;
673 pthread_attr_getdetachstate(&new_attr, &detachstate);
674 if (PTHREAD_CREATE_DETACHED == detachstate) my_flags |= DETACHED;
675 si -> flags = my_flags;
676 UNLOCK();
677 # ifdef DEBUG_THREADS
678 GC_printf1("About to start new thread from thread 0x%X\n",
679 pthread_self());
680 # endif
681 result = REAL_FUNC(pthread_create)(new_thread, &new_attr, GC_start_routine, si);
682 # ifdef DEBUG_THREADS
683 GC_printf1("Started thread 0x%X\n", *new_thread);
684 # endif
685 /* Wait until child has been added to the thread table. */
686 /* This also ensures that we hold onto si until the child is done */
687 /* with it. Thus it doesn't matter whether it is otherwise */
688 /* visible to the collector. */
689 if (0 != sem_wait(&(si -> registered))) ABORT("sem_wait failed");
690 sem_destroy(&(si -> registered));
691 /* pthread_attr_destroy(&new_attr); */
692 /* pthread_attr_destroy(&new_attr); */
693 return(result);
696 #if defined(USE_SPIN_LOCK)
698 VOLATILE GC_bool GC_collecting = 0;
699 /* A hint that we're in the collector and */
700 /* holding the allocation lock for an */
701 /* extended period. */
703 /* Reasonably fast spin locks. Basically the same implementation */
704 /* as STL alloc.h. This isn't really the right way to do this. */
705 /* but until the POSIX scheduling mess gets straightened out ... */
707 volatile unsigned int GC_allocate_lock = 0;
710 void GC_lock()
712 # define low_spin_max 30 /* spin cycles if we suspect uniprocessor */
713 # define high_spin_max 1000 /* spin cycles for multiprocessor */
714 static unsigned spin_max = low_spin_max;
715 unsigned my_spin_max;
716 static unsigned last_spins = 0;
717 unsigned my_last_spins;
718 volatile unsigned junk;
719 # define PAUSE junk *= junk; junk *= junk; junk *= junk; junk *= junk
720 int i;
722 if (!GC_test_and_set(&GC_allocate_lock)) {
723 return;
725 junk = 0;
726 my_spin_max = spin_max;
727 my_last_spins = last_spins;
728 for (i = 0; i < my_spin_max; i++) {
729 if (GC_collecting) goto yield;
730 if (i < my_last_spins/2 || GC_allocate_lock) {
731 PAUSE;
732 continue;
734 if (!GC_test_and_set(&GC_allocate_lock)) {
736 * got it!
737 * Spinning worked. Thus we're probably not being scheduled
738 * against the other process with which we were contending.
739 * Thus it makes sense to spin longer the next time.
741 last_spins = i;
742 spin_max = high_spin_max;
743 return;
746 /* We are probably being scheduled against the other process. Sleep. */
747 spin_max = low_spin_max;
748 yield:
749 for (i = 0;; ++i) {
750 if (!GC_test_and_set(&GC_allocate_lock)) {
751 return;
753 # define SLEEP_THRESHOLD 12
754 /* nanosleep(<= 2ms) just spins under Linux. We */
755 /* want to be careful to avoid that behavior. */
756 if (i < SLEEP_THRESHOLD) {
757 sched_yield();
758 } else {
759 struct timespec ts;
761 if (i > 26) i = 26;
762 /* Don't wait for more than about 60msecs, even */
763 /* under extreme contention. */
764 ts.tv_sec = 0;
765 ts.tv_nsec = 1 << i;
766 nanosleep(&ts, 0);
771 #endif /* known architecture */
773 # endif /* LINUX_THREADS */