* config/sparc/sparc.md (prefetch): New.
[official-gcc.git] / boehm-gc / linux_threads.c
blobc4a2b89a18d9750fa4760970303b11dcd5f8ed38
1 /*
2 * Copyright (c) 1994 by Xerox Corporation. All rights reserved.
3 * Copyright (c) 1996 by Silicon Graphics. All rights reserved.
4 * Copyright (c) 1998 by Fergus Henderson. All rights reserved.
5 * Copyright (c) 2000-2001 by Hewlett-Packard Company. All rights reserved.
7 * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
8 * OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
10 * Permission is hereby granted to use or copy this program
11 * for any purpose, provided the above notices are retained on all copies.
12 * Permission to modify the code and to distribute modified code is granted,
13 * provided the above notices are retained, and a notice that the code was
14 * modified is included with the above copyright notice.
17 * Support code for LinuxThreads, the clone()-based kernel
18 * thread package for Linux which is included in libc6.
20 * This code relies on implementation details of LinuxThreads,
21 * (i.e. properties not guaranteed by the Pthread standard),
22 * though this version now does less of that than the other Pthreads
23 * support code.
25 * Note that there is a lot of code duplication between linux_threads.c
26 * and thread support for some of the other Posix platforms; any changes
27 * made here may need to be reflected there too.
30 * Linux_threads.c now also includes some code to support HPUX and
31 * OSF1 (Compaq Tru64 Unix, really). The OSF1 support is not yet
32 * functional. The OSF1 code is based on Eric Benson's
33 * patch, though that was originally against hpux_irix_threads. The code
34 * here is completely untested. With 0.0000001% probability, it might
35 * actually work.
37 * Eric also suggested an alternate basis for a lock implementation in
38 * his code:
39 * + #elif defined(OSF1)
40 * + unsigned long GC_allocate_lock = 0;
41 * + msemaphore GC_allocate_semaphore;
42 * + # define GC_TRY_LOCK() \
43 * + ((msem_lock(&GC_allocate_semaphore, MSEM_IF_NOWAIT) == 0) \
44 * + ? (GC_allocate_lock = 1) \
45 * + : 0)
46 * + # define GC_LOCK_TAKEN GC_allocate_lock
49 /* #define DEBUG_THREADS 1 */
51 /* ANSI C requires that a compilation unit contains something */
53 # include "gc.h"
55 # if defined(GC_PTHREADS) && !defined(GC_SOLARIS_THREADS) \
56 && !defined(GC_IRIX_THREADS)
58 # include "private/gc_priv.h"
60 # if defined(GC_HPUX_THREADS) && !defined(USE_PTHREAD_SPECIFIC) \
61 && !defined(USE_HPUX_TLS)
62 # define USE_HPUX_TLS
63 # endif
65 # ifdef THREAD_LOCAL_ALLOC
66 # if !defined(USE_PTHREAD_SPECIFIC) && !defined(USE_HPUX_TLS)
67 # include "private/specific.h"
68 # endif
69 # if defined(USE_PTHREAD_SPECIFIC)
70 # define GC_getspecific pthread_getspecific
71 # define GC_setspecific pthread_setspecific
72 # define GC_key_create pthread_key_create
73 typedef pthread_key_t GC_key_t;
74 # endif
75 # if defined(USE_HPUX_TLS)
76 # define GC_getspecific(x) (x)
77 # define GC_setspecific(key, v) ((key) = (v), 0)
78 # define GC_key_create(key, d) 0
79 typedef void * GC_key_t;
80 # endif
81 # endif
82 # include <stdlib.h>
83 # include <pthread.h>
84 # include <sched.h>
85 # include <time.h>
86 # include <errno.h>
87 # include <unistd.h>
88 # include <sys/mman.h>
89 # include <sys/time.h>
90 # include <semaphore.h>
91 # include <signal.h>
92 # include <sys/types.h>
93 # include <sys/stat.h>
94 # include <fcntl.h>
96 #ifndef __GNUC__
97 # define __inline__
98 #endif
100 #ifdef GC_USE_LD_WRAP
101 # define WRAP_FUNC(f) __wrap_##f
102 # define REAL_FUNC(f) __real_##f
103 #else
104 # define WRAP_FUNC(f) GC_##f
105 # define REAL_FUNC(f) f
106 # undef pthread_create
107 # undef pthread_sigmask
108 # undef pthread_join
109 # undef pthread_detach
110 #endif
113 void GC_thr_init();
115 #if 0
116 void GC_print_sig_mask()
118 sigset_t blocked;
119 int i;
121 if (pthread_sigmask(SIG_BLOCK, NULL, &blocked) != 0)
122 ABORT("pthread_sigmask");
123 GC_printf0("Blocked: ");
124 for (i = 1; i <= MAXSIG; i++) {
125 if (sigismember(&blocked, i)) { GC_printf1("%ld ",(long) i); }
127 GC_printf0("\n");
129 #endif
132 /* We use the allocation lock to protect thread-related data structures. */
134 /* The set of all known threads. We intercept thread creation and */
135 /* joins. */
136 /* Protected by allocation/GC lock. */
137 /* Some of this should be declared volatile, but that's inconsistent */
138 /* with some library routine declarations. */
139 typedef struct GC_Thread_Rep {
140 struct GC_Thread_Rep * next; /* More recently allocated threads */
141 /* with a given pthread id come */
142 /* first. (All but the first are */
143 /* guaranteed to be dead, but we may */
144 /* not yet have registered the join.) */
145 pthread_t id;
146 short flags;
147 # define FINISHED 1 /* Thread has exited. */
148 # define DETACHED 2 /* Thread is intended to be detached. */
149 # define MAIN_THREAD 4 /* True for the original thread only. */
150 short thread_blocked; /* Protected by GC lock. */
151 /* Treated as a boolean value. If set, */
152 /* thread will acquire GC lock before */
153 /* doing any pointer manipulations, and */
154 /* has set its sp value. Thus it does */
155 /* not need to be sent a signal to stop */
156 /* it. */
157 ptr_t stack_end; /* Cold end of the stack. */
158 ptr_t stack_ptr; /* Valid only when stopped. */
159 # ifdef IA64
160 ptr_t backing_store_end;
161 ptr_t backing_store_ptr;
162 # endif
163 int signal;
164 void * status; /* The value returned from the thread. */
165 /* Used only to avoid premature */
166 /* reclamation of any data it might */
167 /* reference. */
168 # ifdef THREAD_LOCAL_ALLOC
169 # if CPP_WORDSZ == 64 && defined(ALIGN_DOUBLE)
170 # define GRANULARITY 16
171 # define NFREELISTS 49
172 # else
173 # define GRANULARITY 8
174 # define NFREELISTS 65
175 # endif
176 /* The ith free list corresponds to size i*GRANULARITY */
177 # define INDEX_FROM_BYTES(n) ((ADD_SLOP(n) + GRANULARITY - 1)/GRANULARITY)
178 # define BYTES_FROM_INDEX(i) ((i) * GRANULARITY - EXTRA_BYTES)
179 # define SMALL_ENOUGH(bytes) (ADD_SLOP(bytes) <= \
180 (NFREELISTS-1)*GRANULARITY)
181 ptr_t ptrfree_freelists[NFREELISTS];
182 ptr_t normal_freelists[NFREELISTS];
183 # ifdef GC_GCJ_SUPPORT
184 ptr_t gcj_freelists[NFREELISTS];
185 # endif
186 /* Free lists contain either a pointer or a small count */
187 /* reflecting the number of granules allocated at that */
188 /* size. */
189 /* 0 ==> thread-local allocation in use, free list */
190 /* empty. */
191 /* > 0, <= DIRECT_GRANULES ==> Using global allocation, */
192 /* too few objects of this size have been */
193 /* allocated by this thread. */
194 /* >= HBLKSIZE => pointer to nonempty free list. */
195 /* > DIRECT_GRANULES, < HBLKSIZE ==> transition to */
196 /* local alloc, equivalent to 0. */
197 # define DIRECT_GRANULES (HBLKSIZE/GRANULARITY)
198 /* Don't use local free lists for up to this much */
199 /* allocation. */
200 # endif
201 } * GC_thread;
203 GC_thread GC_lookup_thread(pthread_t id);
205 static GC_bool parallel_initialized = FALSE;
207 void GC_init_parallel();
209 # if defined(THREAD_LOCAL_ALLOC) && !defined(DBG_HDRS_ALL)
211 /* We don't really support thread-local allocation with DBG_HDRS_ALL */
213 #ifdef USE_HPUX_TLS
214 __thread
215 #endif
216 GC_key_t GC_thread_key;
218 static GC_bool keys_initialized;
220 /* Recover the contents of the freelist array fl into the global one gfl.*/
221 /* Note that the indexing scheme differs, in that gfl has finer size */
222 /* resolution, even if not all entries are used. */
223 /* We hold the allocator lock. */
224 static void return_freelists(ptr_t *fl, ptr_t *gfl)
226 int i;
227 ptr_t q, *qptr;
228 size_t nwords;
230 for (i = 1; i < NFREELISTS; ++i) {
231 nwords = i * (GRANULARITY/sizeof(word));
232 qptr = fl + i;
233 q = *qptr;
234 if ((word)q < HBLKSIZE) continue;
235 if (gfl[nwords] == 0) {
236 gfl[nwords] = q;
237 } else {
238 /* Concatenate: */
239 for (; (word)q >= HBLKSIZE; qptr = &(obj_link(q)), q = *qptr);
240 GC_ASSERT(0 == q);
241 *qptr = gfl[nwords];
242 gfl[nwords] = fl[i];
244 /* Clear fl[i], since the thread structure may hang around. */
245 /* Do it in a way that is likely to trap if we access it. */
246 fl[i] = (ptr_t)HBLKSIZE;
250 /* We statically allocate a single "size 0" object. It is linked to */
251 /* itself, and is thus repeatedly reused for all size 0 allocation */
252 /* requests. (Size 0 gcj allocation requests are incorrect, and */
253 /* we arrange for those to fault asap.) */
254 static ptr_t size_zero_object = (ptr_t)(&size_zero_object);
256 /* Each thread structure must be initialized. */
257 /* This call must be made from the new thread. */
258 /* Caller holds allocation lock. */
259 void GC_init_thread_local(GC_thread p)
261 int i;
263 if (!keys_initialized) {
264 if (0 != GC_key_create(&GC_thread_key, 0)) {
265 ABORT("Failed to create key for local allocator");
267 keys_initialized = TRUE;
269 if (0 != GC_setspecific(GC_thread_key, p)) {
270 ABORT("Failed to set thread specific allocation pointers");
272 for (i = 1; i < NFREELISTS; ++i) {
273 p -> ptrfree_freelists[i] = (ptr_t)1;
274 p -> normal_freelists[i] = (ptr_t)1;
275 # ifdef GC_GCJ_SUPPORT
276 p -> gcj_freelists[i] = (ptr_t)1;
277 # endif
279 /* Set up the size 0 free lists. */
280 p -> ptrfree_freelists[0] = (ptr_t)(&size_zero_object);
281 p -> normal_freelists[0] = (ptr_t)(&size_zero_object);
282 # ifdef GC_GCJ_SUPPORT
283 p -> gcj_freelists[0] = (ptr_t)(-1);
284 # endif
287 #ifdef GC_GCJ_SUPPORT
288 extern ptr_t * GC_gcjobjfreelist;
289 #endif
291 /* We hold the allocator lock. */
292 void GC_destroy_thread_local(GC_thread p)
294 /* We currently only do this from the thread itself. */
295 GC_ASSERT(GC_getspecific(GC_thread_key) == (void *)p);
296 return_freelists(p -> ptrfree_freelists, GC_aobjfreelist);
297 return_freelists(p -> normal_freelists, GC_objfreelist);
298 # ifdef GC_GCJ_SUPPORT
299 return_freelists(p -> gcj_freelists, GC_gcjobjfreelist);
300 # endif
303 extern GC_PTR GC_generic_malloc_many();
305 GC_PTR GC_local_malloc(size_t bytes)
307 if (EXPECT(!SMALL_ENOUGH(bytes),0)) {
308 return(GC_malloc(bytes));
309 } else {
310 int index = INDEX_FROM_BYTES(bytes);
311 ptr_t * my_fl;
312 ptr_t my_entry;
313 GC_key_t k = GC_thread_key;
314 void * tsd;
316 # if defined(REDIRECT_MALLOC) && !defined(USE_PTHREAD_SPECIFIC) \
317 || !defined(__GNUC__)
318 if (EXPECT(0 == k, 0)) {
319 /* This can happen if we get called when the world is */
320 /* being initialized. Whether we can actually complete */
321 /* the initialization then is unclear. */
322 GC_init_parallel();
323 k = GC_thread_key;
325 # endif
326 tsd = GC_getspecific(GC_thread_key);
327 # ifdef GC_ASSERTIONS
328 LOCK();
329 GC_ASSERT(tsd == (void *)GC_lookup_thread(pthread_self()));
330 UNLOCK();
331 # endif
332 my_fl = ((GC_thread)tsd) -> normal_freelists + index;
333 my_entry = *my_fl;
334 if (EXPECT((word)my_entry >= HBLKSIZE, 1)) {
335 ptr_t next = obj_link(my_entry);
336 GC_PTR result = (GC_PTR)my_entry;
337 *my_fl = next;
338 obj_link(my_entry) = 0;
339 PREFETCH_FOR_WRITE(next);
340 return result;
341 } else if ((word)my_entry - 1 < DIRECT_GRANULES) {
342 *my_fl = my_entry + index + 1;
343 return GC_malloc(bytes);
344 } else {
345 GC_generic_malloc_many(BYTES_FROM_INDEX(index), NORMAL, my_fl);
346 if (*my_fl == 0) return GC_oom_fn(bytes);
347 return GC_local_malloc(bytes);
352 GC_PTR GC_local_malloc_atomic(size_t bytes)
354 if (EXPECT(!SMALL_ENOUGH(bytes), 0)) {
355 return(GC_malloc_atomic(bytes));
356 } else {
357 int index = INDEX_FROM_BYTES(bytes);
358 ptr_t * my_fl = ((GC_thread)GC_getspecific(GC_thread_key))
359 -> ptrfree_freelists + index;
360 ptr_t my_entry = *my_fl;
361 if (EXPECT((word)my_entry >= HBLKSIZE, 1)) {
362 GC_PTR result = (GC_PTR)my_entry;
363 *my_fl = obj_link(my_entry);
364 return result;
365 } else if ((word)my_entry - 1 < DIRECT_GRANULES) {
366 *my_fl = my_entry + index + 1;
367 return GC_malloc_atomic(bytes);
368 } else {
369 GC_generic_malloc_many(BYTES_FROM_INDEX(index), PTRFREE, my_fl);
370 /* *my_fl is updated while the collector is excluded; */
371 /* the free list is always visible to the collector as */
372 /* such. */
373 if (*my_fl == 0) return GC_oom_fn(bytes);
374 return GC_local_malloc_atomic(bytes);
379 #ifdef GC_GCJ_SUPPORT
381 #include "include/gc_gcj.h"
383 #ifdef GC_ASSERTIONS
384 extern GC_bool GC_gcj_malloc_initialized;
385 #endif
387 extern int GC_gcj_kind;
389 GC_PTR GC_local_gcj_malloc(size_t bytes,
390 void * ptr_to_struct_containing_descr)
392 GC_ASSERT(GC_gcj_malloc_initialized);
393 if (EXPECT(!SMALL_ENOUGH(bytes), 0)) {
394 return GC_gcj_malloc(bytes, ptr_to_struct_containing_descr);
395 } else {
396 int index = INDEX_FROM_BYTES(bytes);
397 ptr_t * my_fl = ((GC_thread)GC_getspecific(GC_thread_key))
398 -> gcj_freelists + index;
399 ptr_t my_entry = *my_fl;
400 if (EXPECT((word)my_entry >= HBLKSIZE, 1)) {
401 GC_PTR result = (GC_PTR)my_entry;
402 GC_ASSERT(!GC_incremental);
403 /* We assert that any concurrent marker will stop us. */
404 /* Thus it is impossible for a mark procedure to see the */
405 /* allocation of the next object, but to see this object */
406 /* still containing a free list pointer. Otherwise the */
407 /* marker might find a random "mark descriptor". */
408 *(volatile ptr_t *)my_fl = obj_link(my_entry);
409 /* We must update the freelist before we store the pointer. */
410 /* Otherwise a GC at this point would see a corrupted */
411 /* free list. */
412 /* A memory barrier is probably never needed, since the */
413 /* action of stopping this thread will cause prior writes */
414 /* to complete. */
415 *(void * volatile *)result = ptr_to_struct_containing_descr;
416 return result;
417 } else if ((word)my_entry - 1 < DIRECT_GRANULES) {
418 *my_fl = my_entry + index + 1;
419 return GC_gcj_malloc(bytes, ptr_to_struct_containing_descr);
420 } else {
421 GC_generic_malloc_many(BYTES_FROM_INDEX(index), GC_gcj_kind, my_fl);
422 if (*my_fl == 0) return GC_oom_fn(bytes);
423 return GC_local_gcj_malloc(bytes, ptr_to_struct_containing_descr);
428 #endif /* GC_GCJ_SUPPORT */
430 # else /* !THREAD_LOCAL_ALLOC && !DBG_HDRS_ALL */
432 # define GC_destroy_thread_local(t)
434 # endif /* !THREAD_LOCAL_ALLOC */
437 * We use signals to stop threads during GC.
439 * Suspended threads wait in signal handler for SIG_THR_RESTART.
440 * That's more portable than semaphores or condition variables.
441 * (We do use sem_post from a signal handler, but that should be portable.)
443 * The thread suspension signal SIG_SUSPEND is now defined in gc_priv.h.
444 * Note that we can't just stop a thread; we need it to save its stack
445 * pointer(s) and acknowledge.
448 #ifndef SIG_THR_RESTART
449 # if defined(GC_HPUX_THREADS) || defined(GC_OSF1_THREADS)
450 # define SIG_THR_RESTART _SIGRTMIN + 5
451 # else
452 # define SIG_THR_RESTART SIGXCPU
453 # endif
454 #endif
456 sem_t GC_suspend_ack_sem;
458 #if 0
460 To make sure that we're using LinuxThreads and not some other thread
461 package, we generate a dummy reference to `pthread_kill_other_threads_np'
462 (was `__pthread_initial_thread_bos' but that disappeared),
463 which is a symbol defined in LinuxThreads, but (hopefully) not in other
464 thread packages.
466 We no longer do this, since this code is now portable enough that it might
467 actually work for something else.
469 void (*dummy_var_to_force_linux_threads)() = pthread_kill_other_threads_np;
470 #endif /* 0 */
472 #if defined(SPARC) || defined(IA64)
473 extern word GC_save_regs_in_stack();
474 #endif
476 long GC_nprocs = 1; /* Number of processors. We may not have */
477 /* access to all of them, but this is as good */
478 /* a guess as any ... */
480 #ifdef PARALLEL_MARK
482 # ifndef MAX_MARKERS
483 # define MAX_MARKERS 16
484 # endif
486 static ptr_t marker_sp[MAX_MARKERS] = {0};
488 void * GC_mark_thread(void * id)
490 word my_mark_no = 0;
492 marker_sp[(word)id] = GC_approx_sp();
493 for (;; ++my_mark_no) {
494 /* GC_mark_no is passed only to allow GC_help_marker to terminate */
495 /* promptly. This is important if it were called from the signal */
496 /* handler or from the GC lock acquisition code. Under Linux, it's */
497 /* not safe to call it from a signal handler, since it uses mutexes */
498 /* and condition variables. Since it is called only here, the */
499 /* argument is unnecessary. */
500 if (my_mark_no < GC_mark_no || my_mark_no > GC_mark_no + 2) {
501 /* resynchronize if we get far off, e.g. because GC_mark_no */
502 /* wrapped. */
503 my_mark_no = GC_mark_no;
505 # ifdef DEBUG_THREADS
506 GC_printf1("Starting mark helper for mark number %ld\n", my_mark_no);
507 # endif
508 GC_help_marker(my_mark_no);
512 extern long GC_markers; /* Number of mark threads we would */
513 /* like to have. Includes the */
514 /* initiating thread. */
516 pthread_t GC_mark_threads[MAX_MARKERS];
518 #define PTHREAD_CREATE REAL_FUNC(pthread_create)
520 static void start_mark_threads()
522 unsigned i;
523 pthread_attr_t attr;
525 if (GC_markers > MAX_MARKERS) {
526 WARN("Limiting number of mark threads\n", 0);
527 GC_markers = MAX_MARKERS;
529 if (0 != pthread_attr_init(&attr)) ABORT("pthread_attr_init failed");
531 if (0 != pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED))
532 ABORT("pthread_attr_setdetachstate failed");
534 # ifdef HPUX
535 /* Default stack size is usually too small: fix it. */
536 /* Otherwise marker threads or GC may run out of */
537 /* space. */
538 # define MIN_STACK_SIZE (8*HBLKSIZE*sizeof(word))
540 size_t old_size;
541 int code;
543 if (pthread_attr_getstacksize(&attr, &old_size) != 0)
544 ABORT("pthread_attr_getstacksize failed\n");
545 if (old_size < MIN_STACK_SIZE) {
546 if (pthread_attr_setstacksize(&attr, MIN_STACK_SIZE) != 0)
547 ABORT("pthread_attr_getstacksize failed\n");
550 # endif /* HPUX */
551 # ifdef CONDPRINT
552 if (GC_print_stats) {
553 GC_printf1("Starting %ld marker threads\n", GC_markers - 1);
555 # endif
556 for (i = 0; i < GC_markers - 1; ++i) {
557 if (0 != PTHREAD_CREATE(GC_mark_threads + i, &attr,
558 GC_mark_thread, (void *)(word)i)) {
559 WARN("Marker thread creation failed, errno = %ld.\n", errno);
564 #else /* !PARALLEL_MARK */
566 static __inline__ void start_mark_threads()
570 #endif /* !PARALLEL_MARK */
572 void GC_suspend_handler(int sig)
574 int dummy;
575 pthread_t my_thread = pthread_self();
576 GC_thread me;
577 sigset_t all_sigs;
578 sigset_t old_sigs;
579 int i;
580 sigset_t mask;
581 # ifdef PARALLEL_MARK
582 word my_mark_no = GC_mark_no;
583 /* Marker can't proceed until we acknowledge. Thus this is */
584 /* guaranteed to be the mark_no correspending to our */
585 /* suspension, i.e. the marker can't have incremented it yet. */
586 # endif
588 if (sig != SIG_SUSPEND) ABORT("Bad signal in suspend_handler");
590 #if DEBUG_THREADS
591 GC_printf1("Suspending 0x%x\n", my_thread);
592 #endif
594 me = GC_lookup_thread(my_thread);
595 /* The lookup here is safe, since I'm doing this on behalf */
596 /* of a thread which holds the allocation lock in order */
597 /* to stop the world. Thus concurrent modification of the */
598 /* data structure is impossible. */
599 # ifdef SPARC
600 me -> stack_ptr = (ptr_t)GC_save_regs_in_stack();
601 # else
602 me -> stack_ptr = (ptr_t)(&dummy);
603 # endif
604 # ifdef IA64
605 me -> backing_store_ptr = (ptr_t)GC_save_regs_in_stack();
606 # endif
608 /* Tell the thread that wants to stop the world that this */
609 /* thread has been stopped. Note that sem_post() is */
610 /* the only async-signal-safe primitive in LinuxThreads. */
611 sem_post(&GC_suspend_ack_sem);
613 /* Wait until that thread tells us to restart by sending */
614 /* this thread a SIG_THR_RESTART signal. */
615 /* SIG_THR_RESTART should be masked at this point. Thus there */
616 /* is no race. */
617 if (sigfillset(&mask) != 0) ABORT("sigfillset() failed");
618 if (sigdelset(&mask, SIG_THR_RESTART) != 0) ABORT("sigdelset() failed");
619 # ifdef NO_SIGNALS
620 if (sigdelset(&mask, SIGINT) != 0) ABORT("sigdelset() failed");
621 if (sigdelset(&mask, SIGQUIT) != 0) ABORT("sigdelset() failed");
622 if (sigdelset(&mask, SIGTERM) != 0) ABORT("sigdelset() failed");
623 if (sigdelset(&mask, SIGABRT) != 0) ABORT("sigdelset() failed");
624 # endif
625 do {
626 me->signal = 0;
627 sigsuspend(&mask); /* Wait for signal */
628 } while (me->signal != SIG_THR_RESTART);
630 #if DEBUG_THREADS
631 GC_printf1("Continuing 0x%x\n", my_thread);
632 #endif
635 void GC_restart_handler(int sig)
637 GC_thread me;
639 if (sig != SIG_THR_RESTART) ABORT("Bad signal in suspend_handler");
641 /* Let the GC_suspend_handler() know that we got a SIG_THR_RESTART. */
642 /* The lookup here is safe, since I'm doing this on behalf */
643 /* of a thread which holds the allocation lock in order */
644 /* to stop the world. Thus concurrent modification of the */
645 /* data structure is impossible. */
646 me = GC_lookup_thread(pthread_self());
647 me->signal = SIG_THR_RESTART;
650 ** Note: even if we didn't do anything useful here,
651 ** it would still be necessary to have a signal handler,
652 ** rather than ignoring the signals, otherwise
653 ** the signals will not be delivered at all, and
654 ** will thus not interrupt the sigsuspend() above.
657 #if DEBUG_THREADS
658 GC_printf1("In GC_restart_handler for 0x%x\n", pthread_self());
659 #endif
662 /* Defining INSTALL_LOOPING_SEGV_HANDLER causes SIGSEGV and SIGBUS to */
663 /* result in an infinite loop in a signal handler. This can be very */
664 /* useful for debugging, since (as of RH7) gdb still seems to have */
665 /* serious problems with threads. */
666 #ifdef INSTALL_LOOPING_SEGV_HANDLER
667 void GC_looping_handler(int sig)
669 GC_printf3("Signal %ld in thread %lx, pid %ld\n",
670 sig, pthread_self(), getpid());
671 for (;;);
673 #endif
675 GC_bool GC_thr_initialized = FALSE;
677 # define THREAD_TABLE_SZ 128 /* Must be power of 2 */
678 volatile GC_thread GC_threads[THREAD_TABLE_SZ];
680 void GC_push_thread_structures GC_PROTO((void))
682 GC_push_all((ptr_t)(GC_threads), (ptr_t)(GC_threads)+sizeof(GC_threads));
685 #ifdef THREAD_LOCAL_ALLOC
686 /* We must explicitly mark ptrfree and gcj free lists, since the free */
687 /* list links wouldn't otherwise be found. We also set them in the */
688 /* normal free lists, since that involves touching less memory than if */
689 /* we scanned them normally. */
690 void GC_mark_thread_local_free_lists(void)
692 int i, j;
693 GC_thread p;
694 ptr_t q;
696 for (i = 0; i < THREAD_TABLE_SZ; ++i) {
697 for (p = GC_threads[i]; 0 != p; p = p -> next) {
698 for (j = 1; j < NFREELISTS; ++j) {
699 q = p -> ptrfree_freelists[j];
700 if ((word)q > HBLKSIZE) GC_set_fl_marks(q);
701 q = p -> normal_freelists[j];
702 if ((word)q > HBLKSIZE) GC_set_fl_marks(q);
703 # ifdef GC_GCJ_SUPPORT
704 q = p -> gcj_freelists[j];
705 if ((word)q > HBLKSIZE) GC_set_fl_marks(q);
706 # endif /* GC_GCJ_SUPPORT */
711 #endif /* THREAD_LOCAL_ALLOC */
713 /* Add a thread to GC_threads. We assume it wasn't already there. */
714 /* Caller holds allocation lock. */
715 GC_thread GC_new_thread(pthread_t id)
717 int hv = ((word)id) % THREAD_TABLE_SZ;
718 GC_thread result;
719 static struct GC_Thread_Rep first_thread;
720 static GC_bool first_thread_used = FALSE;
722 if (!first_thread_used) {
723 result = &first_thread;
724 first_thread_used = TRUE;
725 } else {
726 result = (struct GC_Thread_Rep *)
727 GC_INTERNAL_MALLOC(sizeof(struct GC_Thread_Rep), NORMAL);
729 if (result == 0) return(0);
730 result -> id = id;
731 result -> next = GC_threads[hv];
732 GC_threads[hv] = result;
733 GC_ASSERT(result -> flags == 0 && result -> thread_blocked == 0);
734 return(result);
737 /* Delete a thread from GC_threads. We assume it is there. */
738 /* (The code intentionally traps if it wasn't.) */
739 /* Caller holds allocation lock. */
740 void GC_delete_thread(pthread_t id)
742 int hv = ((word)id) % THREAD_TABLE_SZ;
743 register GC_thread p = GC_threads[hv];
744 register GC_thread prev = 0;
746 while (!pthread_equal(p -> id, id)) {
747 prev = p;
748 p = p -> next;
750 if (prev == 0) {
751 GC_threads[hv] = p -> next;
752 } else {
753 prev -> next = p -> next;
755 GC_INTERNAL_FREE(p);
758 /* If a thread has been joined, but we have not yet */
759 /* been notified, then there may be more than one thread */
760 /* in the table with the same pthread id. */
761 /* This is OK, but we need a way to delete a specific one. */
762 void GC_delete_gc_thread(pthread_t id, GC_thread gc_id)
764 int hv = ((word)id) % THREAD_TABLE_SZ;
765 register GC_thread p = GC_threads[hv];
766 register GC_thread prev = 0;
768 while (p != gc_id) {
769 prev = p;
770 p = p -> next;
772 if (prev == 0) {
773 GC_threads[hv] = p -> next;
774 } else {
775 prev -> next = p -> next;
777 GC_INTERNAL_FREE(p);
780 /* Return a GC_thread corresponding to a given thread_t. */
781 /* Returns 0 if it's not there. */
782 /* Caller holds allocation lock or otherwise inhibits */
783 /* updates. */
784 /* If there is more than one thread with the given id we */
785 /* return the most recent one. */
786 GC_thread GC_lookup_thread(pthread_t id)
788 int hv = ((word)id) % THREAD_TABLE_SZ;
789 register GC_thread p = GC_threads[hv];
791 while (p != 0 && !pthread_equal(p -> id, id)) p = p -> next;
792 return(p);
795 /* There seems to be a very rare thread stopping problem. To help us */
796 /* debug that, we save the ids of the stopping thread. */
797 pthread_t GC_stopping_thread;
798 int GC_stopping_pid;
800 /* Caller holds allocation lock. */
801 void GC_stop_world()
803 pthread_t my_thread = pthread_self();
804 register int i;
805 register GC_thread p;
806 register int n_live_threads = 0;
807 register int result;
809 GC_stopping_thread = my_thread; /* debugging only. */
810 GC_stopping_pid = getpid(); /* debugging only. */
811 /* Make sure all free list construction has stopped before we start. */
812 /* No new construction can start, since free list construction is */
813 /* required to acquire and release the GC lock before it starts, */
814 /* and we have the lock. */
815 # ifdef PARALLEL_MARK
816 GC_acquire_mark_lock();
817 GC_ASSERT(GC_fl_builder_count == 0);
818 /* We should have previously waited for it to become zero. */
819 # endif /* PARALLEL_MARK */
820 for (i = 0; i < THREAD_TABLE_SZ; i++) {
821 for (p = GC_threads[i]; p != 0; p = p -> next) {
822 if (p -> id != my_thread) {
823 if (p -> flags & FINISHED) continue;
824 if (p -> thread_blocked) /* Will wait */ continue;
825 n_live_threads++;
826 #if DEBUG_THREADS
827 GC_printf1("Sending suspend signal to 0x%x\n", p -> id);
828 #endif
829 result = pthread_kill(p -> id, SIG_SUSPEND);
830 switch(result) {
831 case ESRCH:
832 /* Not really there anymore. Possible? */
833 n_live_threads--;
834 break;
835 case 0:
836 break;
837 default:
838 ABORT("pthread_kill failed");
843 for (i = 0; i < n_live_threads; i++) {
844 if (0 != sem_wait(&GC_suspend_ack_sem))
845 ABORT("sem_wait in handler failed");
847 # ifdef PARALLEL_MARK
848 GC_release_mark_lock();
849 # endif
850 #if DEBUG_THREADS
851 GC_printf1("World stopped 0x%x\n", pthread_self());
852 #endif
853 GC_stopping_thread = 0; /* debugging only */
856 /* Caller holds allocation lock, and has held it continuously since */
857 /* the world stopped. */
858 void GC_start_world()
860 pthread_t my_thread = pthread_self();
861 register int i;
862 register GC_thread p;
863 register int n_live_threads = 0;
864 register int result;
866 # if DEBUG_THREADS
867 GC_printf0("World starting\n");
868 # endif
870 for (i = 0; i < THREAD_TABLE_SZ; i++) {
871 for (p = GC_threads[i]; p != 0; p = p -> next) {
872 if (p -> id != my_thread) {
873 if (p -> flags & FINISHED) continue;
874 if (p -> thread_blocked) continue;
875 n_live_threads++;
876 #if DEBUG_THREADS
877 GC_printf1("Sending restart signal to 0x%x\n", p -> id);
878 #endif
879 result = pthread_kill(p -> id, SIG_THR_RESTART);
880 switch(result) {
881 case ESRCH:
882 /* Not really there anymore. Possible? */
883 n_live_threads--;
884 break;
885 case 0:
886 break;
887 default:
888 ABORT("pthread_kill failed");
893 #if DEBUG_THREADS
894 GC_printf0("World started\n");
895 #endif
896 GC_stopping_thread = 0; /* debugging only */
899 # ifdef IA64
900 # define IF_IA64(x) x
901 # else
902 # define IF_IA64(x)
903 # endif
904 /* We hold allocation lock. Should do exactly the right thing if the */
905 /* world is stopped. Should not fail if it isn't. */
906 void GC_push_all_stacks()
908 int i;
909 GC_thread p;
910 ptr_t sp = GC_approx_sp();
911 ptr_t lo, hi;
912 /* On IA64, we also need to scan the register backing store. */
913 IF_IA64(ptr_t bs_lo; ptr_t bs_hi;)
914 pthread_t me = pthread_self();
916 if (!GC_thr_initialized) GC_thr_init();
917 #if DEBUG_THREADS
918 GC_printf1("Pushing stacks from thread 0x%lx\n", (unsigned long) me);
919 #endif
920 for (i = 0; i < THREAD_TABLE_SZ; i++) {
921 for (p = GC_threads[i]; p != 0; p = p -> next) {
922 if (p -> flags & FINISHED) continue;
923 if (pthread_equal(p -> id, me)) {
924 # ifdef SPARC
925 lo = (ptr_t)GC_save_regs_in_stack();
926 # else
927 lo = GC_approx_sp();
928 # endif
929 IF_IA64(bs_hi = (ptr_t)GC_save_regs_in_stack();)
930 } else {
931 lo = p -> stack_ptr;
932 IF_IA64(bs_hi = p -> backing_store_ptr;)
934 if ((p -> flags & MAIN_THREAD) == 0) {
935 hi = p -> stack_end;
936 IF_IA64(bs_lo = p -> backing_store_end);
937 } else {
938 /* The original stack. */
939 hi = GC_stackbottom;
940 IF_IA64(bs_lo = BACKING_STORE_BASE;)
942 #if DEBUG_THREADS
943 GC_printf3("Stack for thread 0x%lx = [%lx,%lx)\n",
944 (unsigned long) p -> id,
945 (unsigned long) lo, (unsigned long) hi);
946 #endif
947 if (0 == lo) ABORT("GC_push_all_stacks: sp not set!\n");
948 # ifdef STACK_GROWS_UP
949 /* We got them backwards! */
950 GC_push_all_stack(hi, lo);
951 # else
952 GC_push_all_stack(lo, hi);
953 # endif
954 # ifdef IA64
955 if (pthread_equal(p -> id, me)) {
956 GC_push_all_eager(bs_lo, bs_hi);
957 } else {
958 GC_push_all_stack(bs_lo, bs_hi);
960 # endif
965 #ifdef USE_PROC_FOR_LIBRARIES
966 int GC_segment_is_thread_stack(ptr_t lo, ptr_t hi)
968 int i;
969 GC_thread p;
971 # ifdef PARALLEL_MARK
972 for (i = 0; i < GC_markers; ++i) {
973 if (marker_sp[i] > lo & marker_sp[i] < hi) return 1;
975 # endif
976 for (i = 0; i < THREAD_TABLE_SZ; i++) {
977 for (p = GC_threads[i]; p != 0; p = p -> next) {
978 if (0 != p -> stack_end) {
979 # ifdef STACK_GROWS_UP
980 if (p -> stack_end >= lo && p -> stack_end < hi) return 1;
981 # else /* STACK_GROWS_DOWN */
982 if (p -> stack_end > lo && p -> stack_end <= hi) return 1;
983 # endif
987 return 0;
989 #endif /* USE_PROC_FOR_LIBRARIES */
991 #ifdef GC_LINUX_THREADS
992 /* Return the number of processors, or i<= 0 if it can't be determined. */
993 int GC_get_nprocs()
995 /* Should be "return sysconf(_SC_NPROCESSORS_ONLN);" but that */
996 /* appears to be buggy in many cases. */
997 /* We look for lines "cpu<n>" in /proc/stat. */
998 # define STAT_BUF_SIZE 4096
999 # if defined(GC_USE_LD_WRAP)
1000 # define STAT_READ __real_read
1001 # else
1002 # define STAT_READ read
1003 # endif
1004 char stat_buf[STAT_BUF_SIZE];
1005 int f;
1006 char c;
1007 word result = 1;
1008 /* Some old kernels only have a single "cpu nnnn ..." */
1009 /* entry in /proc/stat. We identify those as */
1010 /* uniprocessors. */
1011 size_t i, len = 0;
1013 f = open("/proc/stat", O_RDONLY);
1014 if (f < 0 || (len = STAT_READ(f, stat_buf, STAT_BUF_SIZE)) < 100) {
1015 WARN("Couldn't read /proc/stat\n", 0);
1016 return -1;
1018 for (i = 0; i < len - 100; ++i) {
1019 if (stat_buf[i] == '\n' && stat_buf[i+1] == 'c'
1020 && stat_buf[i+2] == 'p' && stat_buf[i+3] == 'u') {
1021 int cpu_no = atoi(stat_buf + i + 4);
1022 if (cpu_no >= result) result = cpu_no + 1;
1025 return result;
1027 #endif /* GC_LINUX_THREADS */
1029 /* We hold the allocation lock. */
1030 void GC_thr_init()
1032 int dummy;
1033 GC_thread t;
1034 struct sigaction act;
1036 if (GC_thr_initialized) return;
1037 GC_thr_initialized = TRUE;
1039 if (sem_init(&GC_suspend_ack_sem, 0, 0) != 0)
1040 ABORT("sem_init failed");
1042 act.sa_flags = SA_RESTART;
1043 if (sigfillset(&act.sa_mask) != 0) {
1044 ABORT("sigfillset() failed");
1046 # ifdef NO_SIGNALS
1047 if (sigdelset(&act.sa_mask, SIGINT) != 0
1048 || sigdelset(&act.sa_mask, SIGQUIT != 0)
1049 || sigdelset(&act.sa_mask, SIGABRT != 0)
1050 || sigdelset(&act.sa_mask, SIGTERM != 0)) {
1051 ABORT("sigdelset() failed");
1053 # endif
1055 /* SIG_THR_RESTART is unmasked by the handler when necessary. */
1056 act.sa_handler = GC_suspend_handler;
1057 if (sigaction(SIG_SUSPEND, &act, NULL) != 0) {
1058 ABORT("Cannot set SIG_SUSPEND handler");
1061 act.sa_handler = GC_restart_handler;
1062 if (sigaction(SIG_THR_RESTART, &act, NULL) != 0) {
1063 ABORT("Cannot set SIG_THR_RESTART handler");
1065 # ifdef INSTALL_LOOPING_SEGV_HANDLER
1066 act.sa_handler = GC_looping_handler;
1067 if (sigaction(SIGSEGV, &act, NULL) != 0
1068 || sigaction(SIGBUS, &act, NULL) != 0) {
1069 ABORT("Cannot set SIGSEGV or SIGBUS looping handler");
1071 # endif /* INSTALL_LOOPING_SEGV_HANDLER */
1073 /* Add the initial thread, so we can stop it. */
1074 t = GC_new_thread(pthread_self());
1075 t -> stack_ptr = (ptr_t)(&dummy);
1076 t -> flags = DETACHED | MAIN_THREAD;
1078 /* Set GC_nprocs. */
1080 char * nprocs_string = GETENV("GC_NPROCS");
1081 GC_nprocs = -1;
1082 if (nprocs_string != NULL) GC_nprocs = atoi(nprocs_string);
1084 if (GC_nprocs <= 0) {
1085 # if defined(GC_HPUX_THREADS)
1086 GC_nprocs = pthread_num_processors_np();
1087 # endif
1088 # if defined(GC_OSF1_THREADS) || defined(GC_FREEBSD_THREADS)
1089 GC_nprocs = 1;
1090 # endif
1091 # if defined(GC_LINUX_THREADS)
1092 GC_nprocs = GC_get_nprocs();
1093 # endif
1095 if (GC_nprocs <= 0) {
1096 WARN("GC_get_nprocs() returned %ld\n", GC_nprocs);
1097 GC_nprocs = 2;
1098 # ifdef PARALLEL_MARK
1099 GC_markers = 1;
1100 # endif
1101 } else {
1102 # ifdef PARALLEL_MARK
1103 GC_markers = GC_nprocs;
1104 # endif
1106 # ifdef PARALLEL_MARK
1107 # ifdef CONDPRINT
1108 if (GC_print_stats) {
1109 GC_printf2("Number of processors = %ld, "
1110 "number of marker threads = %ld\n", GC_nprocs, GC_markers);
1112 # endif
1113 if (GC_markers == 1) {
1114 GC_parallel = FALSE;
1115 # ifdef CONDPRINT
1116 if (GC_print_stats) {
1117 GC_printf0("Single marker thread, turning off parallel marking\n");
1119 # endif
1120 } else {
1121 GC_parallel = TRUE;
1123 # endif
1127 /* Perform all initializations, including those that */
1128 /* may require allocation. */
1129 /* Called as constructor without allocation lock. */
1130 /* Must be called before a second thread is created. */
1131 /* Called without allocation lock. */
1132 void GC_init_parallel()
1134 if (parallel_initialized) return;
1135 parallel_initialized = TRUE;
1136 /* GC_init() calls us back, so set flag first. */
1137 if (!GC_is_initialized) GC_init();
1138 /* If we are using a parallel marker, start the helper threads. */
1139 # ifdef PARALLEL_MARK
1140 if (GC_parallel) start_mark_threads();
1141 # endif
1142 /* Initialize thread local free lists if used. */
1143 # if defined(THREAD_LOCAL_ALLOC) && !defined(DBG_HDRS_ALL)
1144 LOCK();
1145 GC_init_thread_local(GC_lookup_thread(pthread_self()));
1146 UNLOCK();
1147 # endif
1151 int WRAP_FUNC(pthread_sigmask)(int how, const sigset_t *set, sigset_t *oset)
1153 sigset_t fudged_set;
1155 if (set != NULL && (how == SIG_BLOCK || how == SIG_SETMASK)) {
1156 fudged_set = *set;
1157 sigdelset(&fudged_set, SIG_SUSPEND);
1158 set = &fudged_set;
1160 return(REAL_FUNC(pthread_sigmask)(how, set, oset));
1163 /* Wrappers for functions that are likely to block for an appreciable */
1164 /* length of time. Must be called in pairs, if at all. */
1165 /* Nothing much beyond the system call itself should be executed */
1166 /* between these. */
1168 void GC_start_blocking(void) {
1169 # define SP_SLOP 128
1170 GC_thread me;
1171 LOCK();
1172 me = GC_lookup_thread(pthread_self());
1173 GC_ASSERT(!(me -> thread_blocked));
1174 # ifdef SPARC
1175 me -> stack_ptr = (ptr_t)GC_save_regs_in_stack();
1176 # else
1177 me -> stack_ptr = (ptr_t)GC_approx_sp();
1178 # endif
1179 # ifdef IA64
1180 me -> backing_store_ptr = (ptr_t)GC_save_regs_in_stack() + SP_SLOP;
1181 # endif
1182 /* Add some slop to the stack pointer, since the wrapped call may */
1183 /* end up pushing more callee-save registers. */
1184 # ifdef STACK_GROWS_UP
1185 me -> stack_ptr += SP_SLOP;
1186 # else
1187 me -> stack_ptr -= SP_SLOP;
1188 # endif
1189 me -> thread_blocked = TRUE;
1190 UNLOCK();
1193 GC_end_blocking(void) {
1194 GC_thread me;
1195 LOCK(); /* This will block if the world is stopped. */
1196 me = GC_lookup_thread(pthread_self());
1197 GC_ASSERT(me -> thread_blocked);
1198 me -> thread_blocked = FALSE;
1199 UNLOCK();
1202 /* A wrapper for the standard C sleep function */
1203 int WRAP_FUNC(sleep) (unsigned int seconds)
1205 int result;
1207 GC_start_blocking();
1208 result = REAL_FUNC(sleep)(seconds);
1209 GC_end_blocking();
1210 return result;
1213 struct start_info {
1214 void *(*start_routine)(void *);
1215 void *arg;
1216 word flags;
1217 sem_t registered; /* 1 ==> in our thread table, but */
1218 /* parent hasn't yet noticed. */
1221 /* Called at thread exit. */
1222 /* Never called for main thread. That's OK, since it */
1223 /* results in at most a tiny one-time leak. And */
1224 /* linuxthreads doesn't reclaim the main threads */
1225 /* resources or id anyway. */
1226 void GC_thread_exit_proc(void *arg)
1228 GC_thread me;
1230 LOCK();
1231 me = GC_lookup_thread(pthread_self());
1232 GC_destroy_thread_local(me);
1233 if (me -> flags & DETACHED) {
1234 GC_delete_thread(pthread_self());
1235 } else {
1236 me -> flags |= FINISHED;
1238 # if defined(THREAD_LOCAL_ALLOC) && !defined(USE_PTHREAD_SPECIFIC) \
1239 && !defined(USE_HPUX_TLS) && !defined(DBG_HDRS_ALL)
1240 GC_remove_specific(GC_thread_key);
1241 # endif
1242 if (GC_incremental && GC_collection_in_progress()) {
1243 int old_gc_no = GC_gc_no;
1245 /* Make sure that no part of our stack is still on the mark stack, */
1246 /* since it's about to be unmapped. */
1247 while (GC_incremental && GC_collection_in_progress()
1248 && old_gc_no == GC_gc_no) {
1249 ENTER_GC();
1250 GC_collect_a_little_inner(1);
1251 EXIT_GC();
1252 UNLOCK();
1253 sched_yield();
1254 LOCK();
1257 UNLOCK();
1260 int WRAP_FUNC(pthread_join)(pthread_t thread, void **retval)
1262 int result;
1263 GC_thread thread_gc_id;
1265 LOCK();
1266 thread_gc_id = GC_lookup_thread(thread);
1267 /* This is guaranteed to be the intended one, since the thread id */
1268 /* cant have been recycled by pthreads. */
1269 UNLOCK();
1270 result = REAL_FUNC(pthread_join)(thread, retval);
1271 if (result == 0) {
1272 LOCK();
1273 /* Here the pthread thread id may have been recycled. */
1274 GC_delete_gc_thread(thread, thread_gc_id);
1275 UNLOCK();
1277 return result;
1281 WRAP_FUNC(pthread_detach)(pthread_t thread)
1283 int result;
1284 GC_thread thread_gc_id;
1286 LOCK();
1287 thread_gc_id = GC_lookup_thread(thread);
1288 UNLOCK();
1289 result = REAL_FUNC(pthread_detach)(thread);
1290 if (result == 0) {
1291 LOCK();
1292 thread_gc_id -> flags |= DETACHED;
1293 /* Here the pthread thread id may have been recycled. */
1294 if (thread_gc_id -> flags & FINISHED) {
1295 GC_delete_gc_thread(thread, thread_gc_id);
1297 UNLOCK();
1299 return result;
1302 void * GC_start_routine(void * arg)
1304 int dummy;
1305 struct start_info * si = arg;
1306 void * result;
1307 GC_thread me;
1308 pthread_t my_pthread;
1309 void *(*start)(void *);
1310 void *start_arg;
1312 my_pthread = pthread_self();
1313 # ifdef DEBUG_THREADS
1314 GC_printf1("Starting thread 0x%lx\n", my_pthread);
1315 GC_printf1("pid = %ld\n", (long) getpid());
1316 GC_printf1("sp = 0x%lx\n", (long) &arg);
1317 # endif
1318 LOCK();
1319 me = GC_new_thread(my_pthread);
1320 me -> flags = si -> flags;
1321 me -> stack_ptr = 0;
1322 /* me -> stack_end = GC_linux_stack_base(); -- currently (11/99) */
1323 /* doesn't work because the stack base in /proc/self/stat is the */
1324 /* one for the main thread. There is a strong argument that that's */
1325 /* a kernel bug, but a pervasive one. */
1326 # ifdef STACK_GROWS_DOWN
1327 me -> stack_end = (ptr_t)(((word)(&dummy) + (GC_page_size - 1))
1328 & ~(GC_page_size - 1));
1329 me -> stack_ptr = me -> stack_end - 0x10;
1330 /* Needs to be plausible, since an asynchronous stack mark */
1331 /* should not crash. */
1332 # else
1333 me -> stack_end = (ptr_t)((word)(&dummy) & ~(GC_page_size - 1));
1334 me -> stack_ptr = me -> stack_end + 0x10;
1335 # endif
1336 /* This is dubious, since we may be more than a page into the stack, */
1337 /* and hence skip some of it, though it's not clear that matters. */
1338 # ifdef IA64
1339 me -> backing_store_end = (ptr_t)
1340 (GC_save_regs_in_stack() & ~(GC_page_size - 1));
1341 /* This is also < 100% convincing. We should also read this */
1342 /* from /proc, but the hook to do so isn't there yet. */
1343 # endif /* IA64 */
1344 UNLOCK();
1345 start = si -> start_routine;
1346 # ifdef DEBUG_THREADS
1347 GC_printf1("start_routine = 0x%lx\n", start);
1348 # endif
1349 start_arg = si -> arg;
1350 sem_post(&(si -> registered)); /* Last action on si. */
1351 /* OK to deallocate. */
1352 pthread_cleanup_push(GC_thread_exit_proc, 0);
1353 # if defined(THREAD_LOCAL_ALLOC) && !defined(DBG_HDRS_ALL)
1354 LOCK();
1355 GC_init_thread_local(me);
1356 UNLOCK();
1357 # endif
1358 result = (*start)(start_arg);
1359 #if DEBUG_THREADS
1360 GC_printf1("Finishing thread 0x%x\n", pthread_self());
1361 #endif
1362 me -> status = result;
1363 me -> flags |= FINISHED;
1364 pthread_cleanup_pop(1);
1365 /* Cleanup acquires lock, ensuring that we can't exit */
1366 /* while a collection that thinks we're alive is trying to stop */
1367 /* us. */
1368 return(result);
1372 WRAP_FUNC(pthread_create)(pthread_t *new_thread,
1373 const pthread_attr_t *attr,
1374 void *(*start_routine)(void *), void *arg)
1376 int result;
1377 GC_thread t;
1378 pthread_t my_new_thread;
1379 int detachstate;
1380 word my_flags = 0;
1381 struct start_info * si;
1382 /* This is otherwise saved only in an area mmapped by the thread */
1383 /* library, which isn't visible to the collector. */
1385 LOCK();
1386 si = (struct start_info *)GC_INTERNAL_MALLOC(sizeof(struct start_info), NORMAL);
1387 UNLOCK();
1388 if (!parallel_initialized) GC_init_parallel();
1389 if (0 == si) return(ENOMEM);
1390 sem_init(&(si -> registered), 0, 0);
1391 si -> start_routine = start_routine;
1392 si -> arg = arg;
1393 LOCK();
1394 if (!GC_thr_initialized) GC_thr_init();
1395 if (NULL == attr) {
1396 detachstate = PTHREAD_CREATE_JOINABLE;
1397 } else {
1398 pthread_attr_getdetachstate(attr, &detachstate);
1400 if (PTHREAD_CREATE_DETACHED == detachstate) my_flags |= DETACHED;
1401 si -> flags = my_flags;
1402 UNLOCK();
1403 # ifdef DEBUG_THREADS
1404 GC_printf1("About to start new thread from thread 0x%X\n",
1405 pthread_self());
1406 # endif
1407 result = REAL_FUNC(pthread_create)(new_thread, attr, GC_start_routine, si);
1408 # ifdef DEBUG_THREADS
1409 GC_printf1("Started thread 0x%X\n", *new_thread);
1410 # endif
1411 /* Wait until child has been added to the thread table. */
1412 /* This also ensures that we hold onto si until the child is done */
1413 /* with it. Thus it doesn't matter whether it is otherwise */
1414 /* visible to the collector. */
1415 while (0 != sem_wait(&(si -> registered))) {
1416 if (EINTR != errno) ABORT("sem_wait failed");
1418 sem_destroy(&(si -> registered));
1419 LOCK();
1420 GC_INTERNAL_FREE(si);
1421 UNLOCK();
1422 return(result);
1425 #ifdef GENERIC_COMPARE_AND_SWAP
1426 pthread_mutex_t GC_compare_and_swap_lock = PTHREAD_MUTEX_INITIALIZER;
1428 GC_bool GC_compare_and_exchange(volatile GC_word *addr,
1429 GC_word old, GC_word new_val)
1431 GC_bool result;
1432 pthread_mutex_lock(&GC_compare_and_swap_lock);
1433 if (*addr == old) {
1434 *addr = new_val;
1435 result = TRUE;
1436 } else {
1437 result = FALSE;
1439 pthread_mutex_unlock(&GC_compare_and_swap_lock);
1440 return result;
1443 GC_word GC_atomic_add(volatile GC_word *addr, GC_word how_much)
1445 GC_word old;
1446 pthread_mutex_lock(&GC_compare_and_swap_lock);
1447 old = *addr;
1448 *addr = old + how_much;
1449 pthread_mutex_unlock(&GC_compare_and_swap_lock);
1450 return old;
1453 #endif /* GENERIC_COMPARE_AND_SWAP */
1454 /* Spend a few cycles in a way that can't introduce contention with */
1455 /* othre threads. */
1456 void GC_pause()
1458 int i;
1459 volatile word dummy = 0;
1461 for (i = 0; i < 10; ++i) {
1462 # ifdef __GNUC__
1463 __asm__ __volatile__ (" " : : : "memory");
1464 # else
1465 /* Something that's unlikely to be optimized away. */
1466 GC_noop(++dummy);
1467 # endif
1471 #define SPIN_MAX 1024 /* Maximum number of calls to GC_pause before */
1472 /* give up. */
1474 VOLATILE GC_bool GC_collecting = 0;
1475 /* A hint that we're in the collector and */
1476 /* holding the allocation lock for an */
1477 /* extended period. */
1479 #if !defined(USE_SPIN_LOCK) || defined(PARALLEL_MARK)
1480 /* If we don't want to use the below spinlock implementation, either */
1481 /* because we don't have a GC_test_and_set implementation, or because */
1482 /* we don't want to risk sleeping, we can still try spinning on */
1483 /* pthread_mutex_trylock for a while. This appears to be very */
1484 /* beneficial in many cases. */
1485 /* I suspect that under high contention this is nearly always better */
1486 /* than the spin lock. But it's a bit slower on a uniprocessor. */
1487 /* Hence we still default to the spin lock. */
1488 /* This is also used to acquire the mark lock for the parallel */
1489 /* marker. */
1491 /* Here we use a strict exponential backoff scheme. I don't know */
1492 /* whether that's better or worse than the above. We eventually */
1493 /* yield by calling pthread_mutex_lock(); it never makes sense to */
1494 /* explicitly sleep. */
1496 void GC_generic_lock(pthread_mutex_t * lock)
1498 unsigned pause_length = 1;
1499 unsigned i;
1501 if (0 == pthread_mutex_trylock(lock)) return;
1502 for (; pause_length <= SPIN_MAX; pause_length <<= 1) {
1503 for (i = 0; i < pause_length; ++i) {
1504 GC_pause();
1506 switch(pthread_mutex_trylock(lock)) {
1507 case 0:
1508 return;
1509 case EBUSY:
1510 break;
1511 default:
1512 ABORT("Unexpected error from pthread_mutex_trylock");
1515 pthread_mutex_lock(lock);
1518 #endif /* !USE_SPIN_LOCK || PARALLEL_MARK */
1520 #if defined(USE_SPIN_LOCK)
1522 /* Reasonably fast spin locks. Basically the same implementation */
1523 /* as STL alloc.h. This isn't really the right way to do this. */
1524 /* but until the POSIX scheduling mess gets straightened out ... */
1526 volatile unsigned int GC_allocate_lock = 0;
1529 void GC_lock()
1531 # define low_spin_max 30 /* spin cycles if we suspect uniprocessor */
1532 # define high_spin_max SPIN_MAX /* spin cycles for multiprocessor */
1533 static unsigned spin_max = low_spin_max;
1534 unsigned my_spin_max;
1535 static unsigned last_spins = 0;
1536 unsigned my_last_spins;
1537 int i;
1539 if (!GC_test_and_set(&GC_allocate_lock)) {
1540 return;
1542 my_spin_max = spin_max;
1543 my_last_spins = last_spins;
1544 for (i = 0; i < my_spin_max; i++) {
1545 if (GC_collecting || GC_nprocs == 1) goto yield;
1546 if (i < my_last_spins/2 || GC_allocate_lock) {
1547 GC_pause();
1548 continue;
1550 if (!GC_test_and_set(&GC_allocate_lock)) {
1552 * got it!
1553 * Spinning worked. Thus we're probably not being scheduled
1554 * against the other process with which we were contending.
1555 * Thus it makes sense to spin longer the next time.
1557 last_spins = i;
1558 spin_max = high_spin_max;
1559 return;
1562 /* We are probably being scheduled against the other process. Sleep. */
1563 spin_max = low_spin_max;
1564 yield:
1565 for (i = 0;; ++i) {
1566 if (!GC_test_and_set(&GC_allocate_lock)) {
1567 return;
1569 # define SLEEP_THRESHOLD 12
1570 /* nanosleep(<= 2ms) just spins under Linux. We */
1571 /* want to be careful to avoid that behavior. */
1572 if (i < SLEEP_THRESHOLD) {
1573 sched_yield();
1574 } else {
1575 struct timespec ts;
1577 if (i > 24) i = 24;
1578 /* Don't wait for more than about 15msecs, even */
1579 /* under extreme contention. */
1580 ts.tv_sec = 0;
1581 ts.tv_nsec = 1 << i;
1582 nanosleep(&ts, 0);
1587 #else /* !USE_SPINLOCK */
1589 void GC_lock()
1591 if (1 == GC_nprocs || GC_collecting) {
1592 pthread_mutex_lock(&GC_allocate_ml);
1593 } else {
1594 GC_generic_lock(&GC_allocate_ml);
1598 #endif /* !USE_SPINLOCK */
1600 #if defined(PARALLEL_MARK) || defined(THREAD_LOCAL_ALLOC)
1602 #ifdef GC_ASSERTIONS
1603 pthread_t GC_mark_lock_holder = NO_THREAD;
1604 #endif
1606 #if 0
1607 /* Ugly workaround for a linux threads bug in the final versions */
1608 /* of glibc2.1. Pthread_mutex_trylock sets the mutex owner */
1609 /* field even when it fails to acquire the mutex. This causes */
1610 /* pthread_cond_wait to die. Remove for glibc2.2. */
1611 /* According to the man page, we should use */
1612 /* PTHREAD_ERRORCHECK_MUTEX_INITIALIZER_NP, but that isn't actually */
1613 /* defined. */
1614 static pthread_mutex_t mark_mutex =
1615 {0, 0, 0, PTHREAD_MUTEX_ERRORCHECK_NP, {0, 0}};
1616 #else
1617 static pthread_mutex_t mark_mutex = PTHREAD_MUTEX_INITIALIZER;
1618 #endif
1620 static pthread_cond_t builder_cv = PTHREAD_COND_INITIALIZER;
1622 void GC_acquire_mark_lock()
1625 if (pthread_mutex_lock(&mark_mutex) != 0) {
1626 ABORT("pthread_mutex_lock failed");
1629 GC_generic_lock(&mark_mutex);
1630 # ifdef GC_ASSERTIONS
1631 GC_mark_lock_holder = pthread_self();
1632 # endif
1635 void GC_release_mark_lock()
1637 GC_ASSERT(GC_mark_lock_holder == pthread_self());
1638 # ifdef GC_ASSERTIONS
1639 GC_mark_lock_holder = NO_THREAD;
1640 # endif
1641 if (pthread_mutex_unlock(&mark_mutex) != 0) {
1642 ABORT("pthread_mutex_unlock failed");
1646 /* Collector must wait for a freelist builders for 2 reasons: */
1647 /* 1) Mark bits may still be getting examined without lock. */
1648 /* 2) Partial free lists referenced only by locals may not be scanned */
1649 /* correctly, e.g. if they contain "pointer-free" objects, since the */
1650 /* free-list link may be ignored. */
1651 void GC_wait_builder()
1653 GC_ASSERT(GC_mark_lock_holder == pthread_self());
1654 # ifdef GC_ASSERTIONS
1655 GC_mark_lock_holder = NO_THREAD;
1656 # endif
1657 if (pthread_cond_wait(&builder_cv, &mark_mutex) != 0) {
1658 ABORT("pthread_cond_wait failed");
1660 GC_ASSERT(GC_mark_lock_holder == NO_THREAD);
1661 # ifdef GC_ASSERTIONS
1662 GC_mark_lock_holder = pthread_self();
1663 # endif
1666 void GC_wait_for_reclaim()
1668 GC_acquire_mark_lock();
1669 while (GC_fl_builder_count > 0) {
1670 GC_wait_builder();
1672 GC_release_mark_lock();
1675 void GC_notify_all_builder()
1677 GC_ASSERT(GC_mark_lock_holder == pthread_self());
1678 if (pthread_cond_broadcast(&builder_cv) != 0) {
1679 ABORT("pthread_cond_broadcast failed");
1683 #endif /* PARALLEL_MARK || THREAD_LOCAL_ALLOC */
1685 #ifdef PARALLEL_MARK
1687 static pthread_cond_t mark_cv = PTHREAD_COND_INITIALIZER;
1689 void GC_wait_marker()
1691 GC_ASSERT(GC_mark_lock_holder == pthread_self());
1692 # ifdef GC_ASSERTIONS
1693 GC_mark_lock_holder = NO_THREAD;
1694 # endif
1695 if (pthread_cond_wait(&mark_cv, &mark_mutex) != 0) {
1696 ABORT("pthread_cond_wait failed");
1698 GC_ASSERT(GC_mark_lock_holder == NO_THREAD);
1699 # ifdef GC_ASSERTIONS
1700 GC_mark_lock_holder = pthread_self();
1701 # endif
1704 void GC_notify_all_marker()
1706 if (pthread_cond_broadcast(&mark_cv) != 0) {
1707 ABORT("pthread_cond_broadcast failed");
1711 #endif /* PARALLEL_MARK */
1713 # endif /* GC_LINUX_THREADS and friends */