* call.c (build_over_call): Mark COMPOUND_EXPRs generated for
[official-gcc.git] / boehm-gc / linux_threads.c
blob2c856f567a093f45daa9f9245f64c706b70ef885
1 /*
2 * Copyright (c) 1994 by Xerox Corporation. All rights reserved.
3 * Copyright (c) 1996 by Silicon Graphics. All rights reserved.
4 * Copyright (c) 1998 by Fergus Henderson. All rights reserved.
6 * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
7 * OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
9 * Permission is hereby granted to use or copy this program
10 * for any purpose, provided the above notices are retained on all copies.
11 * Permission to modify the code and to distribute modified code is granted,
12 * provided the above notices are retained, and a notice that the code was
13 * modified is included with the above copyright notice.
16 * Support code for LinuxThreads, the clone()-based kernel
17 * thread package for Linux which is included in libc6.
19 * This code relies on implementation details of LinuxThreads,
20 * (i.e. properties not guaranteed by the Pthread standard):
22 * - the function GC_linux_thread_top_of_stack(void)
23 * relies on the way LinuxThreads lays out thread stacks
24 * in the address space.
26 * Note that there is a lot of code duplication between linux_threads.c
27 * and irix_threads.c; any changes made here may need to be reflected
28 * there too.
31 /* #define DEBUG_THREADS 1 */
33 /* ANSI C requires that a compilation unit contains something */
34 # include "gc_priv.h"
36 # if defined(LINUX_THREADS)
38 # include <pthread.h>
39 # include <sched.h>
40 # include <time.h>
41 # include <errno.h>
42 # include <unistd.h>
43 # include <sys/mman.h>
44 # include <sys/time.h>
45 # include <semaphore.h>
46 # include <signal.h>
48 #ifdef USE_LD_WRAP
49 # define WRAP_FUNC(f) __wrap_##f
50 # define REAL_FUNC(f) __real_##f
51 #else
52 # define WRAP_FUNC(f) GC_##f
53 # define REAL_FUNC(f) f
54 # undef pthread_create
55 # undef pthread_sigmask
56 # undef pthread_join
57 #endif
60 void GC_thr_init();
62 #if 0
63 void GC_print_sig_mask()
65 sigset_t blocked;
66 int i;
68 if (pthread_sigmask(SIG_BLOCK, NULL, &blocked) != 0)
69 ABORT("pthread_sigmask");
70 GC_printf0("Blocked: ");
71 for (i = 1; i <= MAXSIG; i++) {
72 if (sigismember(&blocked, i)) { GC_printf1("%ld ",(long) i); }
74 GC_printf0("\n");
76 #endif
78 /* We use the allocation lock to protect thread-related data structures. */
80 /* The set of all known threads. We intercept thread creation and */
81 /* joins. We never actually create detached threads. We allocate all */
82 /* new thread stacks ourselves. These allow us to maintain this */
83 /* data structure. */
84 /* Protected by GC_thr_lock. */
85 /* Some of this should be declared volatile, but that's incosnsistent */
86 /* with some library routine declarations. */
87 typedef struct GC_Thread_Rep {
88 struct GC_Thread_Rep * next; /* More recently allocated threads */
89 /* with a given pthread id come */
90 /* first. (All but the first are */
91 /* guaranteed to be dead, but we may */
92 /* not yet have registered the join.) */
93 pthread_t id;
94 word flags;
95 # define FINISHED 1 /* Thread has exited. */
96 # define DETACHED 2 /* Thread is intended to be detached. */
97 # define MAIN_THREAD 4 /* True for the original thread only. */
99 ptr_t stack_end; /* Cold end of the stack. */
100 ptr_t stack_ptr; /* Valid only when stopped. */
101 # ifdef IA64
102 ptr_t backing_store_end;
103 ptr_t backing_store_ptr;
104 # endif
105 int signal;
106 void * status; /* The value returned from the thread. */
107 /* Used only to avoid premature */
108 /* reclamation of any data it might */
109 /* reference. */
110 } * GC_thread;
112 GC_thread GC_lookup_thread(pthread_t id);
115 * The only way to suspend threads given the pthread interface is to send
116 * signals. We can't use SIGSTOP directly, because we need to get the
117 * thread to save its stack pointer in the GC thread table before
118 * suspending. So we have to reserve a signal of our own for this.
119 * This means we have to intercept client calls to change the signal mask.
120 * The linuxthreads package already uses SIGUSR1 and SIGUSR2,
121 * so we need to reuse something else. I chose SIGPWR.
122 * (Perhaps SIGUNUSED would be a better choice.)
124 #define SIG_SUSPEND SIGPWR
126 #define SIG_RESTART SIGXCPU
128 sem_t GC_suspend_ack_sem;
131 GC_linux_thread_top_of_stack() relies on implementation details of
132 LinuxThreads, namely that thread stacks are allocated on 2M boundaries
133 and grow to no more than 2M.
134 To make sure that we're using LinuxThreads and not some other thread
135 package, we generate a dummy reference to `pthread_kill_other_threads_np'
136 (was `__pthread_initial_thread_bos' but that disappeared),
137 which is a symbol defined in LinuxThreads, but (hopefully) not in other
138 thread packages.
140 void (*dummy_var_to_force_linux_threads)() = pthread_kill_other_threads_np;
142 #define LINUX_THREADS_STACK_SIZE (2 * 1024 * 1024)
144 static inline ptr_t GC_linux_thread_top_of_stack(void)
146 char *sp = GC_approx_sp();
147 ptr_t tos = (ptr_t) (((unsigned long)sp | (LINUX_THREADS_STACK_SIZE - 1)) + 1);
148 #if DEBUG_THREADS
149 GC_printf1("SP = %lx\n", (unsigned long)sp);
150 GC_printf1("TOS = %lx\n", (unsigned long)tos);
151 #endif
152 return tos;
155 #if defined(SPARC) || defined(IA64)
156 extern word GC_save_regs_in_stack();
157 #endif
159 void GC_suspend_handler(int sig)
161 int dummy;
162 pthread_t my_thread = pthread_self();
163 GC_thread me;
164 sigset_t all_sigs;
165 sigset_t old_sigs;
166 int i;
167 sigset_t mask;
169 if (sig != SIG_SUSPEND) ABORT("Bad signal in suspend_handler");
171 #if DEBUG_THREADS
172 GC_printf1("Suspending 0x%x\n", my_thread);
173 #endif
175 me = GC_lookup_thread(my_thread);
176 /* The lookup here is safe, since I'm doing this on behalf */
177 /* of a thread which holds the allocation lock in order */
178 /* to stop the world. Thus concurrent modification of the */
179 /* data structure is impossible. */
180 # ifdef SPARC
181 me -> stack_ptr = (ptr_t)GC_save_regs_in_stack();
182 # else
183 me -> stack_ptr = (ptr_t)(&dummy);
184 # endif
185 # ifdef IA64
186 me -> backing_store_ptr = (ptr_t)GC_save_regs_in_stack();
187 # endif
189 /* Tell the thread that wants to stop the world that this */
190 /* thread has been stopped. Note that sem_post() is */
191 /* the only async-signal-safe primitive in LinuxThreads. */
192 sem_post(&GC_suspend_ack_sem);
194 /* Wait until that thread tells us to restart by sending */
195 /* this thread a SIG_RESTART signal. */
196 /* SIG_RESTART should be masked at this point. Thus there */
197 /* is no race. */
198 if (sigfillset(&mask) != 0) ABORT("sigfillset() failed");
199 if (sigdelset(&mask, SIG_RESTART) != 0) ABORT("sigdelset() failed");
200 # ifdef NO_SIGNALS
201 if (sigdelset(&mask, SIGINT) != 0) ABORT("sigdelset() failed");
202 if (sigdelset(&mask, SIGQUIT) != 0) ABORT("sigdelset() failed");
203 if (sigdelset(&mask, SIGTERM) != 0) ABORT("sigdelset() failed");
204 if (sigdelset(&mask, SIGABRT) != 0) ABORT("sigdelset() failed");
205 # endif
206 do {
207 me->signal = 0;
208 sigsuspend(&mask); /* Wait for signal */
209 } while (me->signal != SIG_RESTART);
211 #if DEBUG_THREADS
212 GC_printf1("Continuing 0x%x\n", my_thread);
213 #endif
216 void GC_restart_handler(int sig)
218 GC_thread me;
220 if (sig != SIG_RESTART) ABORT("Bad signal in suspend_handler");
222 /* Let the GC_suspend_handler() know that we got a SIG_RESTART. */
223 /* The lookup here is safe, since I'm doing this on behalf */
224 /* of a thread which holds the allocation lock in order */
225 /* to stop the world. Thus concurrent modification of the */
226 /* data structure is impossible. */
227 me = GC_lookup_thread(pthread_self());
228 me->signal = SIG_RESTART;
231 ** Note: even if we didn't do anything useful here,
232 ** it would still be necessary to have a signal handler,
233 ** rather than ignoring the signals, otherwise
234 ** the signals will not be delivered at all, and
235 ** will thus not interrupt the sigsuspend() above.
238 #if DEBUG_THREADS
239 GC_printf1("In GC_restart_handler for 0x%x\n", pthread_self());
240 #endif
243 GC_bool GC_thr_initialized = FALSE;
245 # define THREAD_TABLE_SZ 128 /* Must be power of 2 */
246 volatile GC_thread GC_threads[THREAD_TABLE_SZ];
248 /* Add a thread to GC_threads. We assume it wasn't already there. */
249 /* Caller holds allocation lock. */
250 GC_thread GC_new_thread(pthread_t id)
252 int hv = ((word)id) % THREAD_TABLE_SZ;
253 GC_thread result;
254 static struct GC_Thread_Rep first_thread;
255 static GC_bool first_thread_used = FALSE;
257 if (!first_thread_used) {
258 result = &first_thread;
259 first_thread_used = TRUE;
260 /* Dont acquire allocation lock, since we may already hold it. */
261 } else {
262 result = (struct GC_Thread_Rep *)
263 GC_generic_malloc_inner(sizeof(struct GC_Thread_Rep), NORMAL);
265 if (result == 0) return(0);
266 result -> id = id;
267 result -> next = GC_threads[hv];
268 GC_threads[hv] = result;
269 /* result -> flags = 0; */
270 return(result);
273 /* Delete a thread from GC_threads. We assume it is there. */
274 /* (The code intentionally traps if it wasn't.) */
275 /* Caller holds allocation lock. */
276 void GC_delete_thread(pthread_t id)
278 int hv = ((word)id) % THREAD_TABLE_SZ;
279 register GC_thread p = GC_threads[hv];
280 register GC_thread prev = 0;
282 while (!pthread_equal(p -> id, id)) {
283 prev = p;
284 p = p -> next;
286 if (prev == 0) {
287 GC_threads[hv] = p -> next;
288 } else {
289 prev -> next = p -> next;
293 /* If a thread has been joined, but we have not yet */
294 /* been notified, then there may be more than one thread */
295 /* in the table with the same pthread id. */
296 /* This is OK, but we need a way to delete a specific one. */
297 void GC_delete_gc_thread(pthread_t id, GC_thread gc_id)
299 int hv = ((word)id) % THREAD_TABLE_SZ;
300 register GC_thread p = GC_threads[hv];
301 register GC_thread prev = 0;
303 while (p != gc_id) {
304 prev = p;
305 p = p -> next;
307 if (prev == 0) {
308 GC_threads[hv] = p -> next;
309 } else {
310 prev -> next = p -> next;
314 /* Return a GC_thread corresponding to a given thread_t. */
315 /* Returns 0 if it's not there. */
316 /* Caller holds allocation lock or otherwise inhibits */
317 /* updates. */
318 /* If there is more than one thread with the given id we */
319 /* return the most recent one. */
320 GC_thread GC_lookup_thread(pthread_t id)
322 int hv = ((word)id) % THREAD_TABLE_SZ;
323 register GC_thread p = GC_threads[hv];
325 while (p != 0 && !pthread_equal(p -> id, id)) p = p -> next;
326 return(p);
329 /* Caller holds allocation lock. */
330 void GC_stop_world()
332 pthread_t my_thread = pthread_self();
333 register int i;
334 register GC_thread p;
335 register int n_live_threads = 0;
336 register int result;
338 for (i = 0; i < THREAD_TABLE_SZ; i++) {
339 for (p = GC_threads[i]; p != 0; p = p -> next) {
340 if (p -> id != my_thread) {
341 if (p -> flags & FINISHED) continue;
342 n_live_threads++;
343 #if DEBUG_THREADS
344 GC_printf1("Sending suspend signal to 0x%x\n", p -> id);
345 #endif
346 result = pthread_kill(p -> id, SIG_SUSPEND);
347 switch(result) {
348 case ESRCH:
349 /* Not really there anymore. Possible? */
350 n_live_threads--;
351 break;
352 case 0:
353 break;
354 default:
355 ABORT("pthread_kill failed");
360 for (i = 0; i < n_live_threads; i++) {
361 sem_wait(&GC_suspend_ack_sem);
363 #if DEBUG_THREADS
364 GC_printf1("World stopped 0x%x\n", pthread_self());
365 #endif
368 /* Caller holds allocation lock. */
369 void GC_start_world()
371 pthread_t my_thread = pthread_self();
372 register int i;
373 register GC_thread p;
374 register int n_live_threads = 0;
375 register int result;
377 # if DEBUG_THREADS
378 GC_printf0("World starting\n");
379 # endif
381 for (i = 0; i < THREAD_TABLE_SZ; i++) {
382 for (p = GC_threads[i]; p != 0; p = p -> next) {
383 if (p -> id != my_thread) {
384 if (p -> flags & FINISHED) continue;
385 n_live_threads++;
386 #if DEBUG_THREADS
387 GC_printf1("Sending restart signal to 0x%x\n", p -> id);
388 #endif
389 result = pthread_kill(p -> id, SIG_RESTART);
390 switch(result) {
391 case ESRCH:
392 /* Not really there anymore. Possible? */
393 n_live_threads--;
394 break;
395 case 0:
396 break;
397 default:
398 ABORT("pthread_kill failed");
403 #if DEBUG_THREADS
404 GC_printf0("World started\n");
405 #endif
408 # ifdef IA64
409 # define IF_IA64(x) x
410 # else
411 # define IF_IA64(x)
412 # endif
413 /* We hold allocation lock. Should do exactly the right thing if the */
414 /* world is stopped. Should not fail if it isn't. */
415 void GC_push_all_stacks()
417 int i;
418 GC_thread p;
419 ptr_t sp = GC_approx_sp();
420 ptr_t lo, hi;
421 /* On IA64, we also need to scan the register backing store. */
422 IF_IA64(ptr_t bs_lo; ptr_t bs_hi;)
423 pthread_t me = pthread_self();
425 if (!GC_thr_initialized) GC_thr_init();
426 #if DEBUG_THREADS
427 GC_printf1("Pushing stacks from thread 0x%lx\n", (unsigned long) me);
428 #endif
429 for (i = 0; i < THREAD_TABLE_SZ; i++) {
430 for (p = GC_threads[i]; p != 0; p = p -> next) {
431 if (p -> flags & FINISHED) continue;
432 if (pthread_equal(p -> id, me)) {
433 # ifdef SPARC
434 lo = (ptr_t)GC_save_regs_in_stack();
435 # else
436 lo = GC_approx_sp();
437 # endif
438 IF_IA64(bs_hi = (ptr_t)GC_save_regs_in_stack();)
439 } else {
440 lo = p -> stack_ptr;
441 IF_IA64(bs_hi = p -> backing_store_ptr;)
443 if ((p -> flags & MAIN_THREAD) == 0) {
444 hi = p -> stack_end;
445 IF_IA64(bs_lo = p -> backing_store_end);
446 } else {
447 /* The original stack. */
448 hi = GC_stackbottom;
449 IF_IA64(bs_lo = BACKING_STORE_BASE;)
451 #if DEBUG_THREADS
452 GC_printf3("Stack for thread 0x%lx = [%lx,%lx)\n",
453 (unsigned long) p -> id,
454 (unsigned long) lo, (unsigned long) hi);
455 #endif
456 if (0 == lo) ABORT("GC_push_all_stacks: sp not set!\n");
457 GC_push_all_stack(lo, hi);
458 # ifdef IA64
459 if (pthread_equal(p -> id, me)) {
460 GC_push_all_eager(bs_lo, bs_hi);
461 } else {
462 GC_push_all_stack(bs_lo, bs_hi);
464 # endif
470 /* We hold the allocation lock. */
471 void GC_thr_init()
473 int dummy;
474 GC_thread t;
475 struct sigaction act;
477 if (GC_thr_initialized) return;
478 GC_thr_initialized = TRUE;
480 if (sem_init(&GC_suspend_ack_sem, 0, 0) != 0)
481 ABORT("sem_init failed");
483 act.sa_flags = SA_RESTART;
484 if (sigfillset(&act.sa_mask) != 0) {
485 ABORT("sigfillset() failed");
488 # ifdef NO_SIGNALS
489 if (sigdelset(&act.sa_mask, SIGINT) != 0
490 || sigdelset(&act.sa_mask, SIGQUIT != 0)
491 || sigdelset(&act.sa_mask, SIGTERM != 0)
492 || sigdelset(&act.sa_mask, SIGABRT != 0)) {
493 ABORT("sigdelset() failed");
495 # endif
497 /* SIG_RESTART is unmasked by the handler when necessary. */
498 act.sa_handler = GC_suspend_handler;
499 if (sigaction(SIG_SUSPEND, &act, NULL) != 0) {
500 ABORT("Cannot set SIG_SUSPEND handler");
503 act.sa_handler = GC_restart_handler;
504 if (sigaction(SIG_RESTART, &act, NULL) != 0) {
505 ABORT("Cannot set SIG_SUSPEND handler");
508 /* Add the initial thread, so we can stop it. */
509 t = GC_new_thread(pthread_self());
510 t -> stack_ptr = (ptr_t)(&dummy);
511 t -> flags = DETACHED | MAIN_THREAD;
514 int WRAP_FUNC(pthread_sigmask)(int how, const sigset_t *set, sigset_t *oset)
516 sigset_t fudged_set;
518 if (set != NULL && (how == SIG_BLOCK || how == SIG_SETMASK)) {
519 fudged_set = *set;
520 sigdelset(&fudged_set, SIG_SUSPEND);
521 set = &fudged_set;
523 return(REAL_FUNC(pthread_sigmask)(how, set, oset));
526 struct start_info {
527 void *(*start_routine)(void *);
528 void *arg;
529 word flags;
530 sem_t registered; /* 1 ==> in our thread table, but */
531 /* parent hasn't yet noticed. */
535 void GC_thread_exit_proc(void *arg)
537 GC_thread me;
538 struct start_info * si = arg;
540 LOCK();
541 me = GC_lookup_thread(pthread_self());
542 if (me -> flags & DETACHED) {
543 GC_delete_thread(pthread_self());
544 } else {
545 me -> flags |= FINISHED;
547 if (GC_incremental && GC_collection_in_progress()) {
548 int old_gc_no = GC_gc_no;
550 /* Make sure that no part of our stack is still on the mark stack, */
551 /* since it's about to be unmapped. */
552 while (GC_incremental && GC_collection_in_progress()
553 && old_gc_no == GC_gc_no) {
554 ENTER_GC();
555 GC_collect_a_little_inner(1);
556 EXIT_GC();
557 UNLOCK();
558 sched_yield();
559 LOCK();
562 UNLOCK();
565 int WRAP_FUNC(pthread_join)(pthread_t thread, void **retval)
567 int result;
568 GC_thread thread_gc_id;
570 LOCK();
571 thread_gc_id = GC_lookup_thread(thread);
572 /* This is guaranteed to be the intended one, since the thread id */
573 /* cant have been recycled by pthreads. */
574 UNLOCK();
575 result = REAL_FUNC(pthread_join)(thread, retval);
576 LOCK();
577 /* Here the pthread thread id may have been recycled. */
578 GC_delete_gc_thread(thread, thread_gc_id);
579 UNLOCK();
580 return result;
583 void * GC_start_routine(void * arg)
585 int dummy;
586 struct start_info * si = arg;
587 void * result;
588 GC_thread me;
589 pthread_t my_pthread;
590 void *(*start)(void *);
591 void *start_arg;
593 my_pthread = pthread_self();
594 # ifdef DEBUG_THREADS
595 GC_printf1("Starting thread 0x%lx\n", my_pthread);
596 GC_printf1("pid = %ld\n", (long) getpid());
597 GC_printf1("sp = 0x%lx\n", (long) &arg);
598 # endif
599 LOCK();
600 me = GC_new_thread(my_pthread);
601 me -> flags = si -> flags;
602 me -> stack_ptr = 0;
603 /* me -> stack_end = GC_linux_stack_base(); -- currently (11/99) */
604 /* doesn't work because the stack base in /proc/self/stat is the */
605 /* one for the main thread. There is a strong argument that that's */
606 /* a kernel bug, but a pervasive one. */
607 # ifdef STACK_GROWS_DOWN
608 me -> stack_end = (ptr_t)(((word)(&dummy) + (GC_page_size - 1))
609 & ~(GC_page_size - 1));
610 me -> stack_ptr = me -> stack_end - 0x10;
611 /* Needs to be plausible, since an asynchronous stack mark */
612 /* should not crash. */
613 # else
614 me -> stack_end = (ptr_t)((word)(&dummy) & ~(GC_page_size - 1));
615 me -> stack_ptr = me -> stack_end + 0x10;
616 # endif
617 /* This is dubious, since we may be more than a page into the stack, */
618 /* and hence skip some of it, though it's not clear that matters. */
619 # ifdef IA64
620 me -> backing_store_end = (ptr_t)
621 (GC_save_regs_in_stack() & ~(GC_page_size - 1));
622 /* This is also < 100% convincing. We should also read this */
623 /* from /proc, but the hook to do so isn't there yet. */
624 # endif /* IA64 */
625 UNLOCK();
626 start = si -> start_routine;
627 # ifdef DEBUG_THREADS
628 GC_printf1("start_routine = 0x%lx\n", start);
629 # endif
630 start_arg = si -> arg;
631 sem_post(&(si -> registered));
632 pthread_cleanup_push(GC_thread_exit_proc, si);
633 result = (*start)(start_arg);
634 #if DEBUG_THREADS
635 GC_printf1("Finishing thread 0x%x\n", pthread_self());
636 #endif
637 me -> status = result;
638 me -> flags |= FINISHED;
639 pthread_cleanup_pop(1);
640 /* Cleanup acquires lock, ensuring that we can't exit */
641 /* while a collection that thinks we're alive is trying to stop */
642 /* us. */
643 return(result);
647 WRAP_FUNC(pthread_create)(pthread_t *new_thread,
648 const pthread_attr_t *attr,
649 void *(*start_routine)(void *), void *arg)
651 int result;
652 GC_thread t;
653 pthread_t my_new_thread;
654 void * stack;
655 size_t stacksize;
656 pthread_attr_t new_attr;
657 int detachstate;
658 word my_flags = 0;
659 struct start_info * si = GC_malloc(sizeof(struct start_info));
660 /* This is otherwise saved only in an area mmapped by the thread */
661 /* library, which isn't visible to the collector. */
663 if (0 == si) return(ENOMEM);
664 sem_init(&(si -> registered), 0, 0);
665 si -> start_routine = start_routine;
666 si -> arg = arg;
667 LOCK();
668 if (!GC_thr_initialized) GC_thr_init();
669 if (NULL == attr) {
670 stack = 0;
671 (void) pthread_attr_init(&new_attr);
672 } else {
673 new_attr = *attr;
675 pthread_attr_getdetachstate(&new_attr, &detachstate);
676 if (PTHREAD_CREATE_DETACHED == detachstate) my_flags |= DETACHED;
677 si -> flags = my_flags;
678 UNLOCK();
679 # ifdef DEBUG_THREADS
680 GC_printf1("About to start new thread from thread 0x%X\n",
681 pthread_self());
682 # endif
683 result = REAL_FUNC(pthread_create)(new_thread, &new_attr, GC_start_routine, si);
684 # ifdef DEBUG_THREADS
685 GC_printf1("Started thread 0x%X\n", *new_thread);
686 # endif
687 /* Wait until child has been added to the thread table. */
688 /* This also ensures that we hold onto si until the child is done */
689 /* with it. Thus it doesn't matter whether it is otherwise */
690 /* visible to the collector. */
691 if (0 != sem_wait(&(si -> registered))) ABORT("sem_wait failed");
692 sem_destroy(&(si -> registered));
693 /* pthread_attr_destroy(&new_attr); */
694 /* pthread_attr_destroy(&new_attr); */
695 return(result);
698 #if defined(USE_SPIN_LOCK)
700 VOLATILE GC_bool GC_collecting = 0;
701 /* A hint that we're in the collector and */
702 /* holding the allocation lock for an */
703 /* extended period. */
705 /* Reasonably fast spin locks. Basically the same implementation */
706 /* as STL alloc.h. This isn't really the right way to do this. */
707 /* but until the POSIX scheduling mess gets straightened out ... */
709 volatile unsigned int GC_allocate_lock = 0;
712 void GC_lock()
714 # define low_spin_max 30 /* spin cycles if we suspect uniprocessor */
715 # define high_spin_max 1000 /* spin cycles for multiprocessor */
716 static unsigned spin_max = low_spin_max;
717 unsigned my_spin_max;
718 static unsigned last_spins = 0;
719 unsigned my_last_spins;
720 volatile unsigned junk;
721 # define PAUSE junk *= junk; junk *= junk; junk *= junk; junk *= junk
722 int i;
724 if (!GC_test_and_set(&GC_allocate_lock)) {
725 return;
727 junk = 0;
728 my_spin_max = spin_max;
729 my_last_spins = last_spins;
730 for (i = 0; i < my_spin_max; i++) {
731 if (GC_collecting) goto yield;
732 if (i < my_last_spins/2 || GC_allocate_lock) {
733 PAUSE;
734 continue;
736 if (!GC_test_and_set(&GC_allocate_lock)) {
738 * got it!
739 * Spinning worked. Thus we're probably not being scheduled
740 * against the other process with which we were contending.
741 * Thus it makes sense to spin longer the next time.
743 last_spins = i;
744 spin_max = high_spin_max;
745 return;
748 /* We are probably being scheduled against the other process. Sleep. */
749 spin_max = low_spin_max;
750 yield:
751 for (i = 0;; ++i) {
752 if (!GC_test_and_set(&GC_allocate_lock)) {
753 return;
755 # define SLEEP_THRESHOLD 12
756 /* nanosleep(<= 2ms) just spins under Linux. We */
757 /* want to be careful to avoid that behavior. */
758 if (i < SLEEP_THRESHOLD) {
759 sched_yield();
760 } else {
761 struct timespec ts;
763 if (i > 26) i = 26;
764 /* Don't wait for more than about 60msecs, even */
765 /* under extreme contention. */
766 ts.tv_sec = 0;
767 ts.tv_nsec = 1 << i;
768 nanosleep(&ts, 0);
773 #endif /* known architecture */
775 # endif /* LINUX_THREADS */