2001-08-28 Alexandre Petit-Bianco <apbianco@redhat.com>
[official-gcc.git] / boehm-gc / linux_threads.c
blob8ab140b6b38234912ba7a13dfef086bf7d4d900b
1 /*
2 * Copyright (c) 1994 by Xerox Corporation. All rights reserved.
3 * Copyright (c) 1996 by Silicon Graphics. All rights reserved.
4 * Copyright (c) 1998 by Fergus Henderson. All rights reserved.
5 * Copyright (c) 2000-2001 by Hewlett-Packard Company. All rights reserved.
7 * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
8 * OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
10 * Permission is hereby granted to use or copy this program
11 * for any purpose, provided the above notices are retained on all copies.
12 * Permission to modify the code and to distribute modified code is granted,
13 * provided the above notices are retained, and a notice that the code was
14 * modified is included with the above copyright notice.
17 * Support code for LinuxThreads, the clone()-based kernel
18 * thread package for Linux which is included in libc6.
20 * This code relies on implementation details of LinuxThreads,
21 * (i.e. properties not guaranteed by the Pthread standard),
22 * though this version now does less of that than the other Pthreads
23 * support code.
25 * Note that there is a lot of code duplication between linux_threads.c
26 * and thread support for some of the other Posix platforms; any changes
27 * made here may need to be reflected there too.
30 * Linux_threads.c now also includes some code to support HPUX and
31 * OSF1 (Compaq Tru64 Unix, really). The OSF1 support is not yet
32 * functional. The OSF1 code is based on Eric Benson's
33 * patch, though that was originally against hpux_irix_threads. The code
34 * here is completely untested. With 0.0000001% probability, it might
35 * actually work.
37 * Eric also suggested an alternate basis for a lock implementation in
38 * his code:
39 * + #elif defined(OSF1)
40 * + unsigned long GC_allocate_lock = 0;
41 * + msemaphore GC_allocate_semaphore;
42 * + # define GC_TRY_LOCK() \
43 * + ((msem_lock(&GC_allocate_semaphore, MSEM_IF_NOWAIT) == 0) \
44 * + ? (GC_allocate_lock = 1) \
45 * + : 0)
46 * + # define GC_LOCK_TAKEN GC_allocate_lock
49 /* #define DEBUG_THREADS 1 */
51 /* ANSI C requires that a compilation unit contains something */
53 # if defined(GC_LINUX_THREADS) || defined(LINUX_THREADS) \
54 || defined(GC_HPUX_THREADS) || defined(HPUX_THREADS) \
55 || defined(GC_OSF1_THREADS) || defined(OSF1_THREADS) \
57 # include "private/gc_priv.h"
59 # if defined(HPUX_THREADS) && !defined(USE_PTHREAD_SPECIFIC) \
60 && !defined(USE_HPUX_TLS)
61 # define USE_HPUX_TLS
62 # endif
64 # ifdef THREAD_LOCAL_ALLOC
65 # if !defined(USE_PTHREAD_SPECIFIC) && !defined(USE_HPUX_TLS)
66 # include "private/specific.h"
67 # endif
68 # if defined(USE_PTHREAD_SPECIFIC)
69 # define GC_getspecific pthread_getspecific
70 # define GC_setspecific pthread_setspecific
71 # define GC_key_create pthread_key_create
72 typedef pthread_key_t GC_key_t;
73 # endif
74 # if defined(USE_HPUX_TLS)
75 # define GC_getspecific(x) (x)
76 # define GC_setspecific(key, v) ((key) = (v), 0)
77 # define GC_key_create(key, d) 0
78 typedef void * GC_key_t;
79 # endif
80 # endif
81 # include <stdlib.h>
82 # include <pthread.h>
83 # include <sched.h>
84 # include <time.h>
85 # include <errno.h>
86 # include <unistd.h>
87 # include <sys/mman.h>
88 # include <sys/time.h>
89 # include <semaphore.h>
90 # include <signal.h>
91 # include <sys/types.h>
92 # include <sys/stat.h>
93 # include <fcntl.h>
95 #ifndef __GNUC__
96 # define __inline__
97 #endif
99 #ifdef GC_USE_LD_WRAP
100 # define WRAP_FUNC(f) __wrap_##f
101 # define REAL_FUNC(f) __real_##f
102 #else
103 # define WRAP_FUNC(f) GC_##f
104 # define REAL_FUNC(f) f
105 # undef pthread_create
106 # undef pthread_sigmask
107 # undef pthread_join
108 # undef pthread_detach
109 #endif
112 void GC_thr_init();
114 #if 0
115 void GC_print_sig_mask()
117 sigset_t blocked;
118 int i;
120 if (pthread_sigmask(SIG_BLOCK, NULL, &blocked) != 0)
121 ABORT("pthread_sigmask");
122 GC_printf0("Blocked: ");
123 for (i = 1; i <= MAXSIG; i++) {
124 if (sigismember(&blocked, i)) { GC_printf1("%ld ",(long) i); }
126 GC_printf0("\n");
128 #endif
131 /* We use the allocation lock to protect thread-related data structures. */
133 /* The set of all known threads. We intercept thread creation and */
134 /* joins. */
135 /* Protected by allocation/GC lock. */
136 /* Some of this should be declared volatile, but that's inconsistent */
137 /* with some library routine declarations. */
138 typedef struct GC_Thread_Rep {
139 struct GC_Thread_Rep * next; /* More recently allocated threads */
140 /* with a given pthread id come */
141 /* first. (All but the first are */
142 /* guaranteed to be dead, but we may */
143 /* not yet have registered the join.) */
144 pthread_t id;
145 short flags;
146 # define FINISHED 1 /* Thread has exited. */
147 # define DETACHED 2 /* Thread is intended to be detached. */
148 # define MAIN_THREAD 4 /* True for the original thread only. */
149 short thread_blocked; /* Protected by GC lock. */
150 /* Treated as a boolean value. If set, */
151 /* thread will acquire GC lock before */
152 /* doing any pointer manipulations, and */
153 /* has set its sp value. Thus it does */
154 /* not need to be sent a signal to stop */
155 /* it. */
156 ptr_t stack_end; /* Cold end of the stack. */
157 ptr_t stack_ptr; /* Valid only when stopped. */
158 # ifdef IA64
159 ptr_t backing_store_end;
160 ptr_t backing_store_ptr;
161 # endif
162 int signal;
163 void * status; /* The value returned from the thread. */
164 /* Used only to avoid premature */
165 /* reclamation of any data it might */
166 /* reference. */
167 # ifdef THREAD_LOCAL_ALLOC
168 # if CPP_WORDSZ == 64 && defined(ALIGN_DOUBLE)
169 # define GRANULARITY 16
170 # define NFREELISTS 49
171 # else
172 # define GRANULARITY 8
173 # define NFREELISTS 65
174 # endif
175 /* The ith free list corresponds to size i*GRANULARITY */
176 # define INDEX_FROM_BYTES(n) ((ADD_SLOP(n) + GRANULARITY - 1)/GRANULARITY)
177 # define BYTES_FROM_INDEX(i) ((i) * GRANULARITY - EXTRA_BYTES)
178 # define SMALL_ENOUGH(bytes) (ADD_SLOP(bytes) <= \
179 (NFREELISTS-1)*GRANULARITY)
180 ptr_t ptrfree_freelists[NFREELISTS];
181 ptr_t normal_freelists[NFREELISTS];
182 # ifdef GC_GCJ_SUPPORT
183 ptr_t gcj_freelists[NFREELISTS];
184 # endif
185 /* Free lists contain either a pointer or a small count */
186 /* reflecting the number of granules allocated at that */
187 /* size. */
188 /* 0 ==> thread-local allocation in use, free list */
189 /* empty. */
190 /* > 0, <= DIRECT_GRANULES ==> Using global allocation, */
191 /* too few objects of this size have been */
192 /* allocated by this thread. */
193 /* >= HBLKSIZE => pointer to nonempty free list. */
194 /* > DIRECT_GRANULES, < HBLKSIZE ==> transition to */
195 /* local alloc, equivalent to 0. */
196 # define DIRECT_GRANULES (HBLKSIZE/GRANULARITY)
197 /* Don't use local free lists for up to this much */
198 /* allocation. */
199 # endif
200 } * GC_thread;
202 GC_thread GC_lookup_thread(pthread_t id);
204 static GC_bool parallel_initialized = FALSE;
206 # if defined(__GNUC__)
207 void GC_init_parallel() __attribute__ ((constructor));
208 # else
209 void GC_init_parallel();
210 # endif
212 # if defined(THREAD_LOCAL_ALLOC) && !defined(DBG_HDRS_ALL)
214 /* We don't really support thread-local allocation with DBG_HDRS_ALL */
216 #ifdef USE_HPUX_TLS
217 __thread
218 #endif
219 GC_key_t GC_thread_key;
221 static GC_bool keys_initialized;
223 /* Recover the contents of the freelist array fl into the global one gfl.*/
224 /* Note that the indexing scheme differs, in that gfl has finer size */
225 /* resolution, even if not all entries are used. */
226 /* We hold the allocator lock. */
227 static void return_freelists(ptr_t *fl, ptr_t *gfl)
229 int i;
230 ptr_t q, *qptr;
231 size_t nwords;
233 for (i = 1; i < NFREELISTS; ++i) {
234 nwords = i * (GRANULARITY/sizeof(word));
235 qptr = fl + i;
236 q = *qptr;
237 if ((word)q < HBLKSIZE) continue;
238 if (gfl[nwords] == 0) {
239 gfl[nwords] = q;
240 } else {
241 /* Concatenate: */
242 for (; (word)q >= HBLKSIZE; qptr = &(obj_link(q)), q = *qptr);
243 GC_ASSERT(0 == q);
244 *qptr = gfl[nwords];
245 gfl[nwords] = fl[i];
247 /* Clear fl[i], since the thread structure may hang around. */
248 /* Do it in a way that is likely to trap if we access it. */
249 fl[i] = (ptr_t)HBLKSIZE;
253 /* We statically allocate a single "size 0" object. It is linked to */
254 /* itself, and is thus repeatedly reused for all size 0 allocation */
255 /* requests. (Size 0 gcj allocation requests are incorrect, and */
256 /* we arrange for those to fault asap.) */
257 static ptr_t size_zero_object = (ptr_t)(&size_zero_object);
259 /* Each thread structure must be initialized. */
260 /* This call must be made from the new thread. */
261 /* Caller holds allocation lock. */
262 void GC_init_thread_local(GC_thread p)
264 int i;
266 if (!keys_initialized) {
267 if (0 != GC_key_create(&GC_thread_key, 0)) {
268 ABORT("Failed to create key for local allocator");
270 keys_initialized = TRUE;
272 if (0 != GC_setspecific(GC_thread_key, p)) {
273 ABORT("Failed to set thread specific allocation pointers");
275 for (i = 1; i < NFREELISTS; ++i) {
276 p -> ptrfree_freelists[i] = (ptr_t)1;
277 p -> normal_freelists[i] = (ptr_t)1;
278 # ifdef GC_GCJ_SUPPORT
279 p -> gcj_freelists[i] = (ptr_t)1;
280 # endif
282 /* Set up the size 0 free lists. */
283 p -> ptrfree_freelists[0] = (ptr_t)(&size_zero_object);
284 p -> normal_freelists[0] = (ptr_t)(&size_zero_object);
285 # ifdef GC_GCJ_SUPPORT
286 p -> gcj_freelists[0] = (ptr_t)(-1);
287 # endif
290 #ifdef GC_GCJ_SUPPORT
291 extern ptr_t * GC_gcjobjfreelist;
292 #endif
294 /* We hold the allocator lock. */
295 void GC_destroy_thread_local(GC_thread p)
297 /* We currently only do this from the thread itself. */
298 GC_ASSERT(GC_getspecific(GC_thread_key) == (void *)p);
299 return_freelists(p -> ptrfree_freelists, GC_aobjfreelist);
300 return_freelists(p -> normal_freelists, GC_objfreelist);
301 # ifdef GC_GCJ_SUPPORT
302 return_freelists(p -> gcj_freelists, GC_gcjobjfreelist);
303 # endif
306 extern GC_PTR GC_generic_malloc_many();
308 GC_PTR GC_local_malloc(size_t bytes)
310 if (EXPECT(!SMALL_ENOUGH(bytes),0)) {
311 return(GC_malloc(bytes));
312 } else {
313 int index = INDEX_FROM_BYTES(bytes);
314 ptr_t * my_fl;
315 ptr_t my_entry;
316 GC_key_t k = GC_thread_key;
317 void * tsd;
319 # if defined(REDIRECT_MALLOC) && !defined(USE_PTHREAD_SPECIFIC) \
320 || !defined(__GNUC__)
321 if (EXPECT(0 == k, 0)) {
322 /* This can happen if we get called when the world is */
323 /* being initialized. Whether we can actually complete */
324 /* the initialization then is unclear. */
325 GC_init_parallel();
326 k = GC_thread_key;
328 # endif
329 tsd = GC_getspecific(GC_thread_key);
330 # ifdef GC_ASSERTIONS
331 LOCK();
332 GC_ASSERT(tsd == (void *)GC_lookup_thread(pthread_self()));
333 UNLOCK();
334 # endif
335 my_fl = ((GC_thread)tsd) -> normal_freelists + index;
336 my_entry = *my_fl;
337 if (EXPECT((word)my_entry >= HBLKSIZE, 1)) {
338 ptr_t next = obj_link(my_entry);
339 GC_PTR result = (GC_PTR)my_entry;
340 *my_fl = next;
341 obj_link(my_entry) = 0;
342 PREFETCH_FOR_WRITE(next);
343 return result;
344 } else if ((word)my_entry - 1 < DIRECT_GRANULES) {
345 *my_fl = my_entry + index + 1;
346 return GC_malloc(bytes);
347 } else {
348 GC_generic_malloc_many(BYTES_FROM_INDEX(index), NORMAL, my_fl);
349 if (*my_fl == 0) return GC_oom_fn(bytes);
350 return GC_local_malloc(bytes);
355 GC_PTR GC_local_malloc_atomic(size_t bytes)
357 if (EXPECT(!SMALL_ENOUGH(bytes), 0)) {
358 return(GC_malloc_atomic(bytes));
359 } else {
360 int index = INDEX_FROM_BYTES(bytes);
361 ptr_t * my_fl = ((GC_thread)GC_getspecific(GC_thread_key))
362 -> ptrfree_freelists + index;
363 ptr_t my_entry = *my_fl;
364 if (EXPECT((word)my_entry >= HBLKSIZE, 1)) {
365 GC_PTR result = (GC_PTR)my_entry;
366 *my_fl = obj_link(my_entry);
367 return result;
368 } else if ((word)my_entry - 1 < DIRECT_GRANULES) {
369 *my_fl = my_entry + index + 1;
370 return GC_malloc_atomic(bytes);
371 } else {
372 GC_generic_malloc_many(BYTES_FROM_INDEX(index), PTRFREE, my_fl);
373 /* *my_fl is updated while the collector is excluded; */
374 /* the free list is always visible to the collector as */
375 /* such. */
376 if (*my_fl == 0) return GC_oom_fn(bytes);
377 return GC_local_malloc_atomic(bytes);
382 #ifdef GC_GCJ_SUPPORT
384 #include "include/gc_gcj.h"
386 #ifdef GC_ASSERTIONS
387 extern GC_bool GC_gcj_malloc_initialized;
388 #endif
390 extern int GC_gcj_kind;
392 GC_PTR GC_local_gcj_malloc(size_t bytes,
393 void * ptr_to_struct_containing_descr)
395 GC_ASSERT(GC_gcj_malloc_initialized);
396 if (EXPECT(!SMALL_ENOUGH(bytes), 0)) {
397 return GC_gcj_malloc(bytes, ptr_to_struct_containing_descr);
398 } else {
399 int index = INDEX_FROM_BYTES(bytes);
400 ptr_t * my_fl = ((GC_thread)GC_getspecific(GC_thread_key))
401 -> gcj_freelists + index;
402 ptr_t my_entry = *my_fl;
403 if (EXPECT((word)my_entry >= HBLKSIZE, 1)) {
404 GC_PTR result = (GC_PTR)my_entry;
405 GC_ASSERT(!GC_incremental);
406 /* We assert that any concurrent marker will stop us. */
407 /* Thus it is impossible for a mark procedure to see the */
408 /* allocation of the next object, but to see this object */
409 /* still containing a free list pointer. Otherwise the */
410 /* marker might find a random "mark descriptor". */
411 *(volatile ptr_t *)my_fl = obj_link(my_entry);
412 /* We must update the freelist before we store the pointer. */
413 /* Otherwise a GC at this point would see a corrupted */
414 /* free list. */
415 /* A memory barrier is probably never needed, since the */
416 /* action of stopping this thread will cause prior writes */
417 /* to complete. */
418 *(void * volatile *)result = ptr_to_struct_containing_descr;
419 return result;
420 } else if ((word)my_entry - 1 < DIRECT_GRANULES) {
421 *my_fl = my_entry + index + 1;
422 return GC_gcj_malloc(bytes, ptr_to_struct_containing_descr);
423 } else {
424 GC_generic_malloc_many(BYTES_FROM_INDEX(index), GC_gcj_kind, my_fl);
425 if (*my_fl == 0) return GC_oom_fn(bytes);
426 return GC_local_gcj_malloc(bytes, ptr_to_struct_containing_descr);
431 #endif /* GC_GCJ_SUPPORT */
433 # else /* !THREAD_LOCAL_ALLOC && !DBG_HDRS_ALL */
435 # define GC_destroy_thread_local(t)
437 # endif /* !THREAD_LOCAL_ALLOC */
440 * We use signals to stop threads during GC.
442 * Suspended threads wait in signal handler for SIG_THR_RESTART.
443 * That's more portable than semaphores or condition variables.
444 * (We do use sem_post from a signal handler, but that should be portable.)
446 * The thread suspension signal SIG_SUSPEND is now defined in gc_priv.h.
447 * Note that we can't just stop a thread; we need it to save its stack
448 * pointer(s) and acknowledge.
451 #ifndef SIG_THR_RESTART
452 # if defined(HPUX_THREADS) || defined(GC_OSF1_THREADS)
453 # define SIG_THR_RESTART _SIGRTMIN + 5
454 # else
455 # define SIG_THR_RESTART SIGXCPU
456 # endif
457 #endif
459 sem_t GC_suspend_ack_sem;
461 #if !defined(HPUX_THREADS) && !defined(GC_OSF1_THREADS)
463 To make sure that we're using LinuxThreads and not some other thread
464 package, we generate a dummy reference to `pthread_kill_other_threads_np'
465 (was `__pthread_initial_thread_bos' but that disappeared),
466 which is a symbol defined in LinuxThreads, but (hopefully) not in other
467 thread packages.
469 void (*dummy_var_to_force_linux_threads)() = pthread_kill_other_threads_np;
470 #endif /* !HPUX_THREADS */
472 #if defined(SPARC) || defined(IA64)
473 extern word GC_save_regs_in_stack();
474 #endif
476 long GC_nprocs = 1; /* Number of processors. We may not have */
477 /* access to all of them, but this is as good */
478 /* a guess as any ... */
480 #ifdef PARALLEL_MARK
482 # ifndef MAX_MARKERS
483 # define MAX_MARKERS 16
484 # endif
486 static ptr_t marker_sp[MAX_MARKERS] = {0};
488 void * GC_mark_thread(void * id)
490 word my_mark_no = 0;
492 marker_sp[(word)id] = GC_approx_sp();
493 for (;; ++my_mark_no) {
494 /* GC_mark_no is passed only to allow GC_help_marker to terminate */
495 /* promptly. This is important if it were called from the signal */
496 /* handler or from the GC lock acquisition code. Under Linux, it's */
497 /* not safe to call it from a signal handler, since it uses mutexes */
498 /* and condition variables. Since it is called only here, the */
499 /* argument is unnecessary. */
500 if (my_mark_no < GC_mark_no || my_mark_no > GC_mark_no + 2) {
501 /* resynchronize if we get far off, e.g. because GC_mark_no */
502 /* wrapped. */
503 my_mark_no = GC_mark_no;
505 # ifdef DEBUG_THREADS
506 GC_printf1("Starting mark helper for mark number %ld\n", my_mark_no);
507 # endif
508 GC_help_marker(my_mark_no);
512 extern long GC_markers; /* Number of mark threads we would */
513 /* like to have. Includes the */
514 /* initiating thread. */
516 pthread_t GC_mark_threads[MAX_MARKERS];
518 #define PTHREAD_CREATE REAL_FUNC(pthread_create)
520 static void start_mark_threads()
522 unsigned i;
523 pthread_attr_t attr;
525 if (GC_markers > MAX_MARKERS) {
526 WARN("Limiting number of mark threads\n", 0);
527 GC_markers = MAX_MARKERS;
529 if (0 != pthread_attr_init(&attr)) ABORT("pthread_attr_init failed");
531 if (0 != pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED))
532 ABORT("pthread_attr_setdetachstate failed");
533 # ifdef CONDPRINT
534 if (GC_print_stats) {
535 GC_printf1("Starting %ld marker threads\n", GC_markers - 1);
537 # endif
538 for (i = 0; i < GC_markers - 1; ++i) {
539 if (0 != PTHREAD_CREATE(GC_mark_threads + i, &attr,
540 GC_mark_thread, (void *)(word)i)) {
541 WARN("Marker thread creation failed, errno = %ld.\n", errno);
546 #else /* !PARALLEL_MARK */
548 static __inline__ void start_mark_threads()
552 #endif /* !PARALLEL_MARK */
554 void GC_suspend_handler(int sig)
556 int dummy;
557 pthread_t my_thread = pthread_self();
558 GC_thread me;
559 sigset_t all_sigs;
560 sigset_t old_sigs;
561 int i;
562 sigset_t mask;
563 # ifdef PARALLEL_MARK
564 word my_mark_no = GC_mark_no;
565 /* Marker can't proceed until we acknowledge. Thus this is */
566 /* guaranteed to be the mark_no correspending to our */
567 /* suspension, i.e. the marker can't have incremented it yet. */
568 # endif
570 if (sig != SIG_SUSPEND) ABORT("Bad signal in suspend_handler");
572 #if DEBUG_THREADS
573 GC_printf1("Suspending 0x%x\n", my_thread);
574 #endif
576 me = GC_lookup_thread(my_thread);
577 /* The lookup here is safe, since I'm doing this on behalf */
578 /* of a thread which holds the allocation lock in order */
579 /* to stop the world. Thus concurrent modification of the */
580 /* data structure is impossible. */
581 # ifdef SPARC
582 me -> stack_ptr = (ptr_t)GC_save_regs_in_stack();
583 # else
584 me -> stack_ptr = (ptr_t)(&dummy);
585 # endif
586 # ifdef IA64
587 me -> backing_store_ptr = (ptr_t)GC_save_regs_in_stack();
588 # endif
590 /* Tell the thread that wants to stop the world that this */
591 /* thread has been stopped. Note that sem_post() is */
592 /* the only async-signal-safe primitive in LinuxThreads. */
593 sem_post(&GC_suspend_ack_sem);
595 /* Wait until that thread tells us to restart by sending */
596 /* this thread a SIG_THR_RESTART signal. */
597 /* SIG_THR_RESTART should be masked at this point. Thus there */
598 /* is no race. */
599 if (sigfillset(&mask) != 0) ABORT("sigfillset() failed");
600 if (sigdelset(&mask, SIG_THR_RESTART) != 0) ABORT("sigdelset() failed");
601 # ifdef NO_SIGNALS
602 if (sigdelset(&mask, SIGINT) != 0) ABORT("sigdelset() failed");
603 if (sigdelset(&mask, SIGQUIT) != 0) ABORT("sigdelset() failed");
604 if (sigdelset(&mask, SIGTERM) != 0) ABORT("sigdelset() failed");
605 if (sigdelset(&mask, SIGABRT) != 0) ABORT("sigdelset() failed");
606 # endif
607 do {
608 me->signal = 0;
609 sigsuspend(&mask); /* Wait for signal */
610 } while (me->signal != SIG_THR_RESTART);
612 #if DEBUG_THREADS
613 GC_printf1("Continuing 0x%x\n", my_thread);
614 #endif
617 void GC_restart_handler(int sig)
619 GC_thread me;
621 if (sig != SIG_THR_RESTART) ABORT("Bad signal in suspend_handler");
623 /* Let the GC_suspend_handler() know that we got a SIG_THR_RESTART. */
624 /* The lookup here is safe, since I'm doing this on behalf */
625 /* of a thread which holds the allocation lock in order */
626 /* to stop the world. Thus concurrent modification of the */
627 /* data structure is impossible. */
628 me = GC_lookup_thread(pthread_self());
629 me->signal = SIG_THR_RESTART;
632 ** Note: even if we didn't do anything useful here,
633 ** it would still be necessary to have a signal handler,
634 ** rather than ignoring the signals, otherwise
635 ** the signals will not be delivered at all, and
636 ** will thus not interrupt the sigsuspend() above.
639 #if DEBUG_THREADS
640 GC_printf1("In GC_restart_handler for 0x%x\n", pthread_self());
641 #endif
644 /* Defining INSTALL_LOOPING_SEGV_HANDLER causes SIGSEGV and SIGBUS to */
645 /* result in an infinite loop in a signal handler. This can be very */
646 /* useful for debugging, since (as of RH7) gdb still seems to have */
647 /* serious problems with threads. */
648 #ifdef INSTALL_LOOPING_SEGV_HANDLER
649 void GC_looping_handler(int sig)
651 GC_printf3("Signal %ld in thread %lx, pid %ld\n",
652 sig, pthread_self(), getpid());
653 for (;;);
655 #endif
657 GC_bool GC_thr_initialized = FALSE;
659 # define THREAD_TABLE_SZ 128 /* Must be power of 2 */
660 volatile GC_thread GC_threads[THREAD_TABLE_SZ];
662 void GC_push_thread_structures GC_PROTO((void))
664 GC_push_all((ptr_t)(GC_threads), (ptr_t)(GC_threads)+sizeof(GC_threads));
667 #ifdef THREAD_LOCAL_ALLOC
668 /* We must explicitly mark ptrfree and gcj free lists, since the free */
669 /* list links wouldn't otherwise be found. We also set them in the */
670 /* normal free lists, since that involves touching less memory than if */
671 /* we scanned them normally. */
672 void GC_mark_thread_local_free_lists(void)
674 int i, j;
675 GC_thread p;
676 ptr_t q;
678 for (i = 0; i < THREAD_TABLE_SZ; ++i) {
679 for (p = GC_threads[i]; 0 != p; p = p -> next) {
680 for (j = 1; j < NFREELISTS; ++j) {
681 q = p -> ptrfree_freelists[j];
682 if ((word)q > HBLKSIZE) GC_set_fl_marks(q);
683 q = p -> normal_freelists[j];
684 if ((word)q > HBLKSIZE) GC_set_fl_marks(q);
685 # ifdef GC_GCJ_SUPPORT
686 q = p -> gcj_freelists[j];
687 if ((word)q > HBLKSIZE) GC_set_fl_marks(q);
688 # endif /* GC_GCJ_SUPPORT */
693 #endif /* THREAD_LOCAL_ALLOC */
695 /* Add a thread to GC_threads. We assume it wasn't already there. */
696 /* Caller holds allocation lock. */
697 GC_thread GC_new_thread(pthread_t id)
699 int hv = ((word)id) % THREAD_TABLE_SZ;
700 GC_thread result;
701 static struct GC_Thread_Rep first_thread;
702 static GC_bool first_thread_used = FALSE;
704 if (!first_thread_used) {
705 result = &first_thread;
706 first_thread_used = TRUE;
707 } else {
708 result = (struct GC_Thread_Rep *)
709 GC_INTERNAL_MALLOC(sizeof(struct GC_Thread_Rep), NORMAL);
711 if (result == 0) return(0);
712 result -> id = id;
713 result -> next = GC_threads[hv];
714 GC_threads[hv] = result;
715 GC_ASSERT(result -> flags == 0 && result -> thread_blocked == 0);
716 return(result);
719 /* Delete a thread from GC_threads. We assume it is there. */
720 /* (The code intentionally traps if it wasn't.) */
721 /* Caller holds allocation lock. */
722 void GC_delete_thread(pthread_t id)
724 int hv = ((word)id) % THREAD_TABLE_SZ;
725 register GC_thread p = GC_threads[hv];
726 register GC_thread prev = 0;
728 while (!pthread_equal(p -> id, id)) {
729 prev = p;
730 p = p -> next;
732 if (prev == 0) {
733 GC_threads[hv] = p -> next;
734 } else {
735 prev -> next = p -> next;
737 GC_INTERNAL_FREE(p);
740 /* If a thread has been joined, but we have not yet */
741 /* been notified, then there may be more than one thread */
742 /* in the table with the same pthread id. */
743 /* This is OK, but we need a way to delete a specific one. */
744 void GC_delete_gc_thread(pthread_t id, GC_thread gc_id)
746 int hv = ((word)id) % THREAD_TABLE_SZ;
747 register GC_thread p = GC_threads[hv];
748 register GC_thread prev = 0;
750 while (p != gc_id) {
751 prev = p;
752 p = p -> next;
754 if (prev == 0) {
755 GC_threads[hv] = p -> next;
756 } else {
757 prev -> next = p -> next;
759 GC_INTERNAL_FREE(p);
762 /* Return a GC_thread corresponding to a given thread_t. */
763 /* Returns 0 if it's not there. */
764 /* Caller holds allocation lock or otherwise inhibits */
765 /* updates. */
766 /* If there is more than one thread with the given id we */
767 /* return the most recent one. */
768 GC_thread GC_lookup_thread(pthread_t id)
770 int hv = ((word)id) % THREAD_TABLE_SZ;
771 register GC_thread p = GC_threads[hv];
773 while (p != 0 && !pthread_equal(p -> id, id)) p = p -> next;
774 return(p);
777 /* There seems to be a very rare thread stopping problem. To help us */
778 /* debug that, we save the ids of the stopping thread. */
779 pthread_t GC_stopping_thread;
780 int GC_stopping_pid;
782 /* Caller holds allocation lock. */
783 void GC_stop_world()
785 pthread_t my_thread = pthread_self();
786 register int i;
787 register GC_thread p;
788 register int n_live_threads = 0;
789 register int result;
791 GC_stopping_thread = my_thread; /* debugging only. */
792 GC_stopping_pid = getpid(); /* debugging only. */
793 /* Make sure all free list construction has stopped before we start. */
794 /* No new construction can start, since free list construction is */
795 /* required to acquire and release the GC lock before it starts, */
796 /* and we have the lock. */
797 # ifdef PARALLEL_MARK
798 GC_acquire_mark_lock();
799 GC_ASSERT(GC_fl_builder_count == 0);
800 /* We should have previously waited for it to become zero. */
801 # endif /* PARALLEL_MARK */
802 for (i = 0; i < THREAD_TABLE_SZ; i++) {
803 for (p = GC_threads[i]; p != 0; p = p -> next) {
804 if (p -> id != my_thread) {
805 if (p -> flags & FINISHED) continue;
806 if (p -> thread_blocked) /* Will wait */ continue;
807 n_live_threads++;
808 #if DEBUG_THREADS
809 GC_printf1("Sending suspend signal to 0x%x\n", p -> id);
810 #endif
811 result = pthread_kill(p -> id, SIG_SUSPEND);
812 switch(result) {
813 case ESRCH:
814 /* Not really there anymore. Possible? */
815 n_live_threads--;
816 break;
817 case 0:
818 break;
819 default:
820 ABORT("pthread_kill failed");
825 for (i = 0; i < n_live_threads; i++) {
826 if (0 != sem_wait(&GC_suspend_ack_sem))
827 ABORT("sem_wait in handler failed");
829 # ifdef PARALLEL_MARK
830 GC_release_mark_lock();
831 # endif
832 #if DEBUG_THREADS
833 GC_printf1("World stopped 0x%x\n", pthread_self());
834 #endif
835 GC_stopping_thread = 0; /* debugging only */
838 /* Caller holds allocation lock, and has held it continuously since */
839 /* the world stopped. */
840 void GC_start_world()
842 pthread_t my_thread = pthread_self();
843 register int i;
844 register GC_thread p;
845 register int n_live_threads = 0;
846 register int result;
848 # if DEBUG_THREADS
849 GC_printf0("World starting\n");
850 # endif
852 for (i = 0; i < THREAD_TABLE_SZ; i++) {
853 for (p = GC_threads[i]; p != 0; p = p -> next) {
854 if (p -> id != my_thread) {
855 if (p -> flags & FINISHED) continue;
856 if (p -> thread_blocked) continue;
857 n_live_threads++;
858 #if DEBUG_THREADS
859 GC_printf1("Sending restart signal to 0x%x\n", p -> id);
860 #endif
861 result = pthread_kill(p -> id, SIG_THR_RESTART);
862 switch(result) {
863 case ESRCH:
864 /* Not really there anymore. Possible? */
865 n_live_threads--;
866 break;
867 case 0:
868 break;
869 default:
870 ABORT("pthread_kill failed");
875 #if DEBUG_THREADS
876 GC_printf0("World started\n");
877 #endif
878 GC_stopping_thread = 0; /* debugging only */
881 # ifdef IA64
882 # define IF_IA64(x) x
883 # else
884 # define IF_IA64(x)
885 # endif
886 /* We hold allocation lock. Should do exactly the right thing if the */
887 /* world is stopped. Should not fail if it isn't. */
888 void GC_push_all_stacks()
890 int i;
891 GC_thread p;
892 ptr_t sp = GC_approx_sp();
893 ptr_t lo, hi;
894 /* On IA64, we also need to scan the register backing store. */
895 IF_IA64(ptr_t bs_lo; ptr_t bs_hi;)
896 pthread_t me = pthread_self();
898 if (!GC_thr_initialized) GC_thr_init();
899 #if DEBUG_THREADS
900 GC_printf1("Pushing stacks from thread 0x%lx\n", (unsigned long) me);
901 #endif
902 for (i = 0; i < THREAD_TABLE_SZ; i++) {
903 for (p = GC_threads[i]; p != 0; p = p -> next) {
904 if (p -> flags & FINISHED) continue;
905 if (pthread_equal(p -> id, me)) {
906 # ifdef SPARC
907 lo = (ptr_t)GC_save_regs_in_stack();
908 # else
909 lo = GC_approx_sp();
910 # endif
911 IF_IA64(bs_hi = (ptr_t)GC_save_regs_in_stack();)
912 } else {
913 lo = p -> stack_ptr;
914 IF_IA64(bs_hi = p -> backing_store_ptr;)
916 if ((p -> flags & MAIN_THREAD) == 0) {
917 hi = p -> stack_end;
918 IF_IA64(bs_lo = p -> backing_store_end);
919 } else {
920 /* The original stack. */
921 hi = GC_stackbottom;
922 IF_IA64(bs_lo = BACKING_STORE_BASE;)
924 #if DEBUG_THREADS
925 GC_printf3("Stack for thread 0x%lx = [%lx,%lx)\n",
926 (unsigned long) p -> id,
927 (unsigned long) lo, (unsigned long) hi);
928 #endif
929 if (0 == lo) ABORT("GC_push_all_stacks: sp not set!\n");
930 # ifdef STACK_GROWS_UP
931 /* We got them backwards! */
932 GC_push_all_stack(hi, lo);
933 # else
934 GC_push_all_stack(lo, hi);
935 # endif
936 # ifdef IA64
937 if (pthread_equal(p -> id, me)) {
938 GC_push_all_eager(bs_lo, bs_hi);
939 } else {
940 GC_push_all_stack(bs_lo, bs_hi);
942 # endif
947 #ifdef USE_PROC_FOR_LIBRARIES
948 int GC_segment_is_thread_stack(ptr_t lo, ptr_t hi)
950 int i;
951 GC_thread p;
953 # ifdef PARALLEL_MARK
954 for (i = 0; i < GC_markers; ++i) {
955 if (marker_sp[i] > lo & marker_sp[i] < hi) return 1;
957 # endif
958 for (i = 0; i < THREAD_TABLE_SZ; i++) {
959 for (p = GC_threads[i]; p != 0; p = p -> next) {
960 if (0 != p -> stack_end) {
961 # ifdef STACK_GROWS_UP
962 if (p -> stack_end >= lo && p -> stack_end < hi) return 1;
963 # else /* STACK_GROWS_DOWN */
964 if (p -> stack_end > lo && p -> stack_end <= hi) return 1;
965 # endif
969 return 0;
971 #endif /* USE_PROC_FOR_LIBRARIES */
973 #ifdef LINUX_THREADS
974 /* Return the number of processors, or i<= 0 if it can't be determined. */
975 int GC_get_nprocs()
977 /* Should be "return sysconf(_SC_NPROCESSORS_ONLN);" but that */
978 /* appears to be buggy in many cases. */
979 /* We look for lines "cpu<n>" in /proc/stat. */
980 # define STAT_BUF_SIZE 4096
981 # if defined(GC_USE_LD_WRAP)
982 # define STAT_READ __real_read
983 # else
984 # define STAT_READ read
985 # endif
986 char stat_buf[STAT_BUF_SIZE];
987 int f;
988 char c;
989 word result = 1;
990 /* Some old kernels only have a single "cpu nnnn ..." */
991 /* entry in /proc/stat. We identify those as */
992 /* uniprocessors. */
993 size_t i, len = 0;
995 f = open("/proc/stat", O_RDONLY);
996 if (f < 0 || (len = STAT_READ(f, stat_buf, STAT_BUF_SIZE)) < 100) {
997 WARN("Couldn't read /proc/stat\n", 0);
998 return -1;
1000 for (i = 0; i < len - 100; ++i) {
1001 if (stat_buf[i] == '\n' && stat_buf[i+1] == 'c'
1002 && stat_buf[i+2] == 'p' && stat_buf[i+3] == 'u') {
1003 int cpu_no = atoi(stat_buf + i + 4);
1004 if (cpu_no >= result) result = cpu_no + 1;
1007 return result;
1009 #endif /* LINUX_THREADS */
1011 /* We hold the allocation lock. */
1012 void GC_thr_init()
1014 int dummy;
1015 GC_thread t;
1016 struct sigaction act;
1018 if (GC_thr_initialized) return;
1019 GC_thr_initialized = TRUE;
1021 if (sem_init(&GC_suspend_ack_sem, 0, 0) != 0)
1022 ABORT("sem_init failed");
1024 act.sa_flags = SA_RESTART;
1025 if (sigfillset(&act.sa_mask) != 0) {
1026 ABORT("sigfillset() failed");
1028 # ifdef NO_SIGNALS
1029 if (sigdelset(&act.sa_mask, SIGINT) != 0
1030 || sigdelset(&act.sa_mask, SIGQUIT != 0)
1031 || sigdelset(&act.sa_mask, SIGABRT != 0)
1032 || sigdelset(&act.sa_mask, SIGTERM != 0)) {
1033 ABORT("sigdelset() failed");
1035 # endif
1037 /* SIG_THR_RESTART is unmasked by the handler when necessary. */
1038 act.sa_handler = GC_suspend_handler;
1039 if (sigaction(SIG_SUSPEND, &act, NULL) != 0) {
1040 ABORT("Cannot set SIG_SUSPEND handler");
1043 act.sa_handler = GC_restart_handler;
1044 if (sigaction(SIG_THR_RESTART, &act, NULL) != 0) {
1045 ABORT("Cannot set SIG_THR_RESTART handler");
1047 # ifdef INSTALL_LOOPING_SEGV_HANDLER
1048 act.sa_handler = GC_looping_handler;
1049 if (sigaction(SIGSEGV, &act, NULL) != 0
1050 || sigaction(SIGBUS, &act, NULL) != 0) {
1051 ABORT("Cannot set SIGSEGV or SIGBUS looping handler");
1053 # endif /* INSTALL_LOOPING_SEGV_HANDLER */
1055 /* Add the initial thread, so we can stop it. */
1056 t = GC_new_thread(pthread_self());
1057 t -> stack_ptr = (ptr_t)(&dummy);
1058 t -> flags = DETACHED | MAIN_THREAD;
1060 /* Set GC_nprocs. */
1062 char * nprocs_string = GETENV("GC_NPROCS");
1063 GC_nprocs = -1;
1064 if (nprocs_string != NULL) GC_nprocs = atoi(nprocs_string);
1066 if (GC_nprocs <= 0) {
1067 # if defined(HPUX_THREADS)
1068 GC_nprocs = pthread_num_processors_np();
1069 # endif
1070 # if defined(OSF1_THREADS)
1071 GC_nprocs = 1;
1072 # endif
1073 # ifdef LINUX_THREADS
1074 GC_nprocs = GC_get_nprocs();
1075 # endif
1077 if (GC_nprocs <= 0) {
1078 WARN("GC_get_nprocs() returned %ld\n", GC_nprocs);
1079 GC_nprocs = 2;
1080 # ifdef PARALLEL_MARK
1081 GC_markers = 1;
1082 # endif
1083 } else {
1084 # ifdef PARALLEL_MARK
1085 GC_markers = GC_nprocs;
1086 # endif
1088 # ifdef PARALLEL_MARK
1089 # ifdef CONDPRINT
1090 if (GC_print_stats) {
1091 GC_printf2("Number of processors = %ld, "
1092 "number of marker threads = %ld\n", GC_nprocs, GC_markers);
1094 # endif
1095 if (GC_markers == 1) {
1096 GC_parallel = FALSE;
1097 # ifdef CONDPRINT
1098 if (GC_print_stats) {
1099 GC_printf0("Single marker thread, turning off parallel marking\n");
1101 # endif
1102 } else {
1103 GC_parallel = TRUE;
1105 # endif
1109 /* Perform all initializations, including those that */
1110 /* may require allocation. */
1111 /* Called as constructor without allocation lock. */
1112 /* Must be called before a second thread is created. */
1113 /* Called without allocation lock. */
1114 void GC_init_parallel()
1116 if (parallel_initialized) return;
1117 parallel_initialized = TRUE;
1118 /* GC_init() calls us back, so set flag first. */
1119 if (!GC_is_initialized) GC_init();
1120 /* If we are using a parallel marker, start the helper threads. */
1121 # ifdef PARALLEL_MARK
1122 if (GC_parallel) start_mark_threads();
1123 # endif
1124 /* Initialize thread local free lists if used. */
1125 # if defined(THREAD_LOCAL_ALLOC) && !defined(DBG_HDRS_ALL)
1126 LOCK();
1127 GC_init_thread_local(GC_lookup_thread(pthread_self()));
1128 UNLOCK();
1129 # endif
1133 int WRAP_FUNC(pthread_sigmask)(int how, const sigset_t *set, sigset_t *oset)
1135 sigset_t fudged_set;
1137 if (set != NULL && (how == SIG_BLOCK || how == SIG_SETMASK)) {
1138 fudged_set = *set;
1139 sigdelset(&fudged_set, SIG_SUSPEND);
1140 set = &fudged_set;
1142 return(REAL_FUNC(pthread_sigmask)(how, set, oset));
1145 /* Wrappers for functions that are likely to block for an appreciable */
1146 /* length of time. Must be called in pairs, if at all. */
1147 /* Nothing much beyond the system call itself should be executed */
1148 /* between these. */
1150 void GC_start_blocking(void) {
1151 # define SP_SLOP 128
1152 GC_thread me;
1153 LOCK();
1154 me = GC_lookup_thread(pthread_self());
1155 GC_ASSERT(!(me -> thread_blocked));
1156 # ifdef SPARC
1157 me -> stack_ptr = (ptr_t)GC_save_regs_in_stack();
1158 # else
1159 me -> stack_ptr = (ptr_t)GC_approx_sp();
1160 # endif
1161 # ifdef IA64
1162 me -> backing_store_ptr = (ptr_t)GC_save_regs_in_stack() + SP_SLOP;
1163 # endif
1164 /* Add some slop to the stack pointer, since the wrapped call may */
1165 /* end up pushing more callee-save registers. */
1166 # ifdef STACK_GROWS_UP
1167 me -> stack_ptr += SP_SLOP;
1168 # else
1169 me -> stack_ptr -= SP_SLOP;
1170 # endif
1171 me -> thread_blocked = TRUE;
1172 UNLOCK();
1175 GC_end_blocking(void) {
1176 GC_thread me;
1177 LOCK(); /* This will block if the world is stopped. */
1178 me = GC_lookup_thread(pthread_self());
1179 GC_ASSERT(me -> thread_blocked);
1180 me -> thread_blocked = FALSE;
1181 UNLOCK();
1184 /* A wrapper for the standard C sleep function */
1185 int WRAP_FUNC(sleep) (unsigned int seconds)
1187 int result;
1189 GC_start_blocking();
1190 result = REAL_FUNC(sleep)(seconds);
1191 GC_end_blocking();
1192 return result;
1195 struct start_info {
1196 void *(*start_routine)(void *);
1197 void *arg;
1198 word flags;
1199 sem_t registered; /* 1 ==> in our thread table, but */
1200 /* parent hasn't yet noticed. */
1203 /* Called at thread exit. */
1204 /* Never called for main thread. That's OK, since it */
1205 /* results in at most a tiny one-time leak. And */
1206 /* linuxthreads doesn't reclaim the main threads */
1207 /* resources or id anyway. */
1208 void GC_thread_exit_proc(void *arg)
1210 GC_thread me;
1212 LOCK();
1213 me = GC_lookup_thread(pthread_self());
1214 GC_destroy_thread_local(me);
1215 if (me -> flags & DETACHED) {
1216 GC_delete_thread(pthread_self());
1217 } else {
1218 me -> flags |= FINISHED;
1220 # if defined(THREAD_LOCAL_ALLOC) && !defined(USE_PTHREAD_SPECIFIC) \
1221 && !defined(USE_HPUX_TLS) && !defined(DBG_HDRS_ALL)
1222 GC_remove_specific(GC_thread_key);
1223 # endif
1224 if (GC_incremental && GC_collection_in_progress()) {
1225 int old_gc_no = GC_gc_no;
1227 /* Make sure that no part of our stack is still on the mark stack, */
1228 /* since it's about to be unmapped. */
1229 while (GC_incremental && GC_collection_in_progress()
1230 && old_gc_no == GC_gc_no) {
1231 ENTER_GC();
1232 GC_collect_a_little_inner(1);
1233 EXIT_GC();
1234 UNLOCK();
1235 sched_yield();
1236 LOCK();
1239 UNLOCK();
1242 int WRAP_FUNC(pthread_join)(pthread_t thread, void **retval)
1244 int result;
1245 GC_thread thread_gc_id;
1247 LOCK();
1248 thread_gc_id = GC_lookup_thread(thread);
1249 /* This is guaranteed to be the intended one, since the thread id */
1250 /* cant have been recycled by pthreads. */
1251 UNLOCK();
1252 result = REAL_FUNC(pthread_join)(thread, retval);
1253 if (result == 0) {
1254 LOCK();
1255 /* Here the pthread thread id may have been recycled. */
1256 GC_delete_gc_thread(thread, thread_gc_id);
1257 UNLOCK();
1259 return result;
1263 WRAP_FUNC(pthread_detach)(pthread_t thread)
1265 int result;
1266 GC_thread thread_gc_id;
1268 LOCK();
1269 thread_gc_id = GC_lookup_thread(thread);
1270 UNLOCK();
1271 result = REAL_FUNC(pthread_detach)(thread);
1272 if (result == 0) {
1273 LOCK();
1274 thread_gc_id -> flags |= DETACHED;
1275 /* Here the pthread thread id may have been recycled. */
1276 if (thread_gc_id -> flags & FINISHED) {
1277 GC_delete_gc_thread(thread, thread_gc_id);
1279 UNLOCK();
1281 return result;
1284 void * GC_start_routine(void * arg)
1286 int dummy;
1287 struct start_info * si = arg;
1288 void * result;
1289 GC_thread me;
1290 pthread_t my_pthread;
1291 void *(*start)(void *);
1292 void *start_arg;
1294 my_pthread = pthread_self();
1295 # ifdef DEBUG_THREADS
1296 GC_printf1("Starting thread 0x%lx\n", my_pthread);
1297 GC_printf1("pid = %ld\n", (long) getpid());
1298 GC_printf1("sp = 0x%lx\n", (long) &arg);
1299 # endif
1300 LOCK();
1301 me = GC_new_thread(my_pthread);
1302 me -> flags = si -> flags;
1303 me -> stack_ptr = 0;
1304 /* me -> stack_end = GC_linux_stack_base(); -- currently (11/99) */
1305 /* doesn't work because the stack base in /proc/self/stat is the */
1306 /* one for the main thread. There is a strong argument that that's */
1307 /* a kernel bug, but a pervasive one. */
1308 # ifdef STACK_GROWS_DOWN
1309 me -> stack_end = (ptr_t)(((word)(&dummy) + (GC_page_size - 1))
1310 & ~(GC_page_size - 1));
1311 me -> stack_ptr = me -> stack_end - 0x10;
1312 /* Needs to be plausible, since an asynchronous stack mark */
1313 /* should not crash. */
1314 # else
1315 me -> stack_end = (ptr_t)((word)(&dummy) & ~(GC_page_size - 1));
1316 me -> stack_ptr = me -> stack_end + 0x10;
1317 # endif
1318 /* This is dubious, since we may be more than a page into the stack, */
1319 /* and hence skip some of it, though it's not clear that matters. */
1320 # ifdef IA64
1321 me -> backing_store_end = (ptr_t)
1322 (GC_save_regs_in_stack() & ~(GC_page_size - 1));
1323 /* This is also < 100% convincing. We should also read this */
1324 /* from /proc, but the hook to do so isn't there yet. */
1325 # endif /* IA64 */
1326 UNLOCK();
1327 start = si -> start_routine;
1328 # ifdef DEBUG_THREADS
1329 GC_printf1("start_routine = 0x%lx\n", start);
1330 # endif
1331 start_arg = si -> arg;
1332 sem_post(&(si -> registered)); /* Last action on si. */
1333 /* OK to deallocate. */
1334 pthread_cleanup_push(GC_thread_exit_proc, 0);
1335 # if defined(THREAD_LOCAL_ALLOC) && !defined(DBG_HDRS_ALL)
1336 LOCK();
1337 GC_init_thread_local(me);
1338 UNLOCK();
1339 # endif
1340 result = (*start)(start_arg);
1341 #if DEBUG_THREADS
1342 GC_printf1("Finishing thread 0x%x\n", pthread_self());
1343 #endif
1344 me -> status = result;
1345 me -> flags |= FINISHED;
1346 pthread_cleanup_pop(1);
1347 /* Cleanup acquires lock, ensuring that we can't exit */
1348 /* while a collection that thinks we're alive is trying to stop */
1349 /* us. */
1350 return(result);
1354 WRAP_FUNC(pthread_create)(pthread_t *new_thread,
1355 const pthread_attr_t *attr,
1356 void *(*start_routine)(void *), void *arg)
1358 int result;
1359 GC_thread t;
1360 pthread_t my_new_thread;
1361 int detachstate;
1362 word my_flags = 0;
1363 struct start_info * si;
1364 /* This is otherwise saved only in an area mmapped by the thread */
1365 /* library, which isn't visible to the collector. */
1367 LOCK();
1368 si = (struct start_info *)GC_INTERNAL_MALLOC(sizeof(struct start_info), NORMAL);
1369 UNLOCK();
1370 if (!parallel_initialized) GC_init_parallel();
1371 if (0 == si) return(ENOMEM);
1372 sem_init(&(si -> registered), 0, 0);
1373 si -> start_routine = start_routine;
1374 si -> arg = arg;
1375 LOCK();
1376 if (!GC_thr_initialized) GC_thr_init();
1377 if (NULL == attr) {
1378 detachstate = PTHREAD_CREATE_JOINABLE;
1379 } else {
1380 pthread_attr_getdetachstate(attr, &detachstate);
1382 if (PTHREAD_CREATE_DETACHED == detachstate) my_flags |= DETACHED;
1383 si -> flags = my_flags;
1384 UNLOCK();
1385 # ifdef DEBUG_THREADS
1386 GC_printf1("About to start new thread from thread 0x%X\n",
1387 pthread_self());
1388 # endif
1389 result = REAL_FUNC(pthread_create)(new_thread, attr, GC_start_routine, si);
1390 # ifdef DEBUG_THREADS
1391 GC_printf1("Started thread 0x%X\n", *new_thread);
1392 # endif
1393 /* Wait until child has been added to the thread table. */
1394 /* This also ensures that we hold onto si until the child is done */
1395 /* with it. Thus it doesn't matter whether it is otherwise */
1396 /* visible to the collector. */
1397 while (0 != sem_wait(&(si -> registered))) {
1398 if (EINTR != errno) ABORT("sem_wait failed");
1400 sem_destroy(&(si -> registered));
1401 LOCK();
1402 GC_INTERNAL_FREE(si);
1403 UNLOCK();
1404 return(result);
1407 #ifdef GENERIC_COMPARE_AND_SWAP
1408 pthread_mutex_t GC_compare_and_swap_lock = PTHREAD_MUTEX_INITIALIZER;
1410 GC_bool GC_compare_and_exchange(volatile GC_word *addr,
1411 GC_word old, GC_word new_val)
1413 GC_bool result;
1414 pthread_mutex_lock(&GC_compare_and_swap_lock);
1415 if (*addr == old) {
1416 *addr = new_val;
1417 result = TRUE;
1418 } else {
1419 result = FALSE;
1421 pthread_mutex_unlock(&GC_compare_and_swap_lock);
1422 return result;
1425 GC_word GC_atomic_add(volatile GC_word *addr, GC_word how_much)
1427 GC_word old;
1428 pthread_mutex_lock(&GC_compare_and_swap_lock);
1429 old = *addr;
1430 *addr = old + how_much;
1431 pthread_mutex_unlock(&GC_compare_and_swap_lock);
1432 return old;
1435 #endif /* GENERIC_COMPARE_AND_SWAP */
1436 /* Spend a few cycles in a way that can't introduce contention with */
1437 /* othre threads. */
1438 void GC_pause()
1440 int i;
1441 volatile word dummy = 0;
1443 for (i = 0; i < 10; ++i) {
1444 # ifdef __GNUC__
1445 __asm__ __volatile__ (" " : : : "memory");
1446 # else
1447 /* Something that's unlikely to be optimized away. */
1448 GC_noop(++dummy);
1449 # endif
1453 #define SPIN_MAX 1024 /* Maximum number of calls to GC_pause before */
1454 /* give up. */
1456 VOLATILE GC_bool GC_collecting = 0;
1457 /* A hint that we're in the collector and */
1458 /* holding the allocation lock for an */
1459 /* extended period. */
1461 #if !defined(USE_SPIN_LOCK) || defined(PARALLEL_MARK)
1462 /* If we don't want to use the below spinlock implementation, either */
1463 /* because we don't have a GC_test_and_set implementation, or because */
1464 /* we don't want to risk sleeping, we can still try spinning on */
1465 /* pthread_mutex_trylock for a while. This appears to be very */
1466 /* beneficial in many cases. */
1467 /* I suspect that under high contention this is nearly always better */
1468 /* than the spin lock. But it's a bit slower on a uniprocessor. */
1469 /* Hence we still default to the spin lock. */
1470 /* This is also used to acquire the mark lock for the parallel */
1471 /* marker. */
1473 /* Here we use a strict exponential backoff scheme. I don't know */
1474 /* whether that's better or worse than the above. We eventually */
1475 /* yield by calling pthread_mutex_lock(); it never makes sense to */
1476 /* explicitly sleep. */
1478 void GC_generic_lock(pthread_mutex_t * lock)
1480 unsigned pause_length = 1;
1481 unsigned i;
1483 if (0 == pthread_mutex_trylock(lock)) return;
1484 for (; pause_length <= SPIN_MAX; pause_length <<= 1) {
1485 for (i = 0; i < pause_length; ++i) {
1486 GC_pause();
1488 switch(pthread_mutex_trylock(lock)) {
1489 case 0:
1490 return;
1491 case EBUSY:
1492 break;
1493 default:
1494 ABORT("Unexpected error from pthread_mutex_trylock");
1497 pthread_mutex_lock(lock);
1500 #endif /* !USE_SPIN_LOCK || PARALLEL_MARK */
1502 #if defined(USE_SPIN_LOCK)
1504 /* Reasonably fast spin locks. Basically the same implementation */
1505 /* as STL alloc.h. This isn't really the right way to do this. */
1506 /* but until the POSIX scheduling mess gets straightened out ... */
1508 volatile unsigned int GC_allocate_lock = 0;
1511 void GC_lock()
1513 # define low_spin_max 30 /* spin cycles if we suspect uniprocessor */
1514 # define high_spin_max SPIN_MAX /* spin cycles for multiprocessor */
1515 static unsigned spin_max = low_spin_max;
1516 unsigned my_spin_max;
1517 static unsigned last_spins = 0;
1518 unsigned my_last_spins;
1519 int i;
1521 if (!GC_test_and_set(&GC_allocate_lock)) {
1522 return;
1524 my_spin_max = spin_max;
1525 my_last_spins = last_spins;
1526 for (i = 0; i < my_spin_max; i++) {
1527 if (GC_collecting || GC_nprocs == 1) goto yield;
1528 if (i < my_last_spins/2 || GC_allocate_lock) {
1529 GC_pause();
1530 continue;
1532 if (!GC_test_and_set(&GC_allocate_lock)) {
1534 * got it!
1535 * Spinning worked. Thus we're probably not being scheduled
1536 * against the other process with which we were contending.
1537 * Thus it makes sense to spin longer the next time.
1539 last_spins = i;
1540 spin_max = high_spin_max;
1541 return;
1544 /* We are probably being scheduled against the other process. Sleep. */
1545 spin_max = low_spin_max;
1546 yield:
1547 for (i = 0;; ++i) {
1548 if (!GC_test_and_set(&GC_allocate_lock)) {
1549 return;
1551 # define SLEEP_THRESHOLD 12
1552 /* nanosleep(<= 2ms) just spins under Linux. We */
1553 /* want to be careful to avoid that behavior. */
1554 if (i < SLEEP_THRESHOLD) {
1555 sched_yield();
1556 } else {
1557 struct timespec ts;
1559 if (i > 24) i = 24;
1560 /* Don't wait for more than about 15msecs, even */
1561 /* under extreme contention. */
1562 ts.tv_sec = 0;
1563 ts.tv_nsec = 1 << i;
1564 nanosleep(&ts, 0);
1569 #else /* !USE_SPINLOCK */
1571 void GC_lock()
1573 if (1 == GC_nprocs || GC_collecting) {
1574 pthread_mutex_lock(&GC_allocate_ml);
1575 } else {
1576 GC_generic_lock(&GC_allocate_ml);
1580 #endif /* !USE_SPINLOCK */
1582 #if defined(PARALLEL_MARK) || defined(THREAD_LOCAL_ALLOC)
1584 #ifdef GC_ASSERTIONS
1585 pthread_t GC_mark_lock_holder = NO_THREAD;
1586 #endif
1588 #ifdef IA64
1589 /* Ugly workaround for a linux threads bug in the final versions */
1590 /* of glibc2.1. Pthread_mutex_trylock sets the mutex owner */
1591 /* field even when it fails to acquire the mutex. This causes */
1592 /* pthread_cond_wait to die. Remove for glibc2.2. */
1593 /* According to the man page, we should use */
1594 /* PTHREAD_ERRORCHECK_MUTEX_INITIALIZER_NP, but that isn't actually */
1595 /* defined. */
1596 static pthread_mutex_t mark_mutex =
1597 {0, 0, 0, PTHREAD_MUTEX_ERRORCHECK_NP, {0, 0}};
1598 #else
1599 static pthread_mutex_t mark_mutex = PTHREAD_MUTEX_INITIALIZER;
1600 #endif
1602 static pthread_cond_t builder_cv = PTHREAD_COND_INITIALIZER;
1604 void GC_acquire_mark_lock()
1607 if (pthread_mutex_lock(&mark_mutex) != 0) {
1608 ABORT("pthread_mutex_lock failed");
1611 GC_generic_lock(&mark_mutex);
1612 # ifdef GC_ASSERTIONS
1613 GC_mark_lock_holder = pthread_self();
1614 # endif
1617 void GC_release_mark_lock()
1619 GC_ASSERT(GC_mark_lock_holder == pthread_self());
1620 # ifdef GC_ASSERTIONS
1621 GC_mark_lock_holder = NO_THREAD;
1622 # endif
1623 if (pthread_mutex_unlock(&mark_mutex) != 0) {
1624 ABORT("pthread_mutex_unlock failed");
1628 /* Collector must wait for a freelist builders for 2 reasons: */
1629 /* 1) Mark bits may still be getting examined without lock. */
1630 /* 2) Partial free lists referenced only by locals may not be scanned */
1631 /* correctly, e.g. if they contain "pointer-free" objects, since the */
1632 /* free-list link may be ignored. */
1633 void GC_wait_builder()
1635 GC_ASSERT(GC_mark_lock_holder == pthread_self());
1636 # ifdef GC_ASSERTIONS
1637 GC_mark_lock_holder = NO_THREAD;
1638 # endif
1639 if (pthread_cond_wait(&builder_cv, &mark_mutex) != 0) {
1640 ABORT("pthread_cond_wait failed");
1642 GC_ASSERT(GC_mark_lock_holder == NO_THREAD);
1643 # ifdef GC_ASSERTIONS
1644 GC_mark_lock_holder = pthread_self();
1645 # endif
1648 void GC_wait_for_reclaim()
1650 GC_acquire_mark_lock();
1651 while (GC_fl_builder_count > 0) {
1652 GC_wait_builder();
1654 GC_release_mark_lock();
1657 void GC_notify_all_builder()
1659 GC_ASSERT(GC_mark_lock_holder == pthread_self());
1660 if (pthread_cond_broadcast(&builder_cv) != 0) {
1661 ABORT("pthread_cond_broadcast failed");
1665 #endif /* PARALLEL_MARK || THREAD_LOCAL_ALLOC */
1667 #ifdef PARALLEL_MARK
1669 static pthread_cond_t mark_cv = PTHREAD_COND_INITIALIZER;
1671 void GC_wait_marker()
1673 GC_ASSERT(GC_mark_lock_holder == pthread_self());
1674 # ifdef GC_ASSERTIONS
1675 GC_mark_lock_holder = NO_THREAD;
1676 # endif
1677 if (pthread_cond_wait(&mark_cv, &mark_mutex) != 0) {
1678 ABORT("pthread_cond_wait failed");
1680 GC_ASSERT(GC_mark_lock_holder == NO_THREAD);
1681 # ifdef GC_ASSERTIONS
1682 GC_mark_lock_holder = pthread_self();
1683 # endif
1686 void GC_notify_all_marker()
1688 if (pthread_cond_broadcast(&mark_cv) != 0) {
1689 ABORT("pthread_cond_broadcast failed");
1693 #endif /* PARALLEL_MARK */
1695 # endif /* LINUX_THREADS */