Add support for kaOS as cross build target system.
[official-gcc.git] / boehm-gc / linux_threads.c
blobc968e7cbfd4a934ff6010287ffce025932ae7f3c
1 /*
2 * Copyright (c) 1994 by Xerox Corporation. All rights reserved.
3 * Copyright (c) 1996 by Silicon Graphics. All rights reserved.
4 * Copyright (c) 1998 by Fergus Henderson. All rights reserved.
5 * Copyright (c) 2000-2001 by Hewlett-Packard Company. All rights reserved.
7 * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
8 * OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
10 * Permission is hereby granted to use or copy this program
11 * for any purpose, provided the above notices are retained on all copies.
12 * Permission to modify the code and to distribute modified code is granted,
13 * provided the above notices are retained, and a notice that the code was
14 * modified is included with the above copyright notice.
17 * Support code for LinuxThreads, the clone()-based kernel
18 * thread package for Linux which is included in libc6.
20 * This code relies on implementation details of LinuxThreads,
21 * (i.e. properties not guaranteed by the Pthread standard),
22 * though this version now does less of that than the other Pthreads
23 * support code.
25 * Note that there is a lot of code duplication between linux_threads.c
26 * and thread support for some of the other Posix platforms; any changes
27 * made here may need to be reflected there too.
30 * Linux_threads.c now also includes some code to support HPUX and
31 * OSF1 (Compaq Tru64 Unix, really). The OSF1 support is not yet
32 * functional. The OSF1 code is based on Eric Benson's
33 * patch, though that was originally against hpux_irix_threads. The code
34 * here is completely untested. With 0.0000001% probability, it might
35 * actually work.
37 * Eric also suggested an alternate basis for a lock implementation in
38 * his code:
39 * + #elif defined(OSF1)
40 * + unsigned long GC_allocate_lock = 0;
41 * + msemaphore GC_allocate_semaphore;
42 * + # define GC_TRY_LOCK() \
43 * + ((msem_lock(&GC_allocate_semaphore, MSEM_IF_NOWAIT) == 0) \
44 * + ? (GC_allocate_lock = 1) \
45 * + : 0)
46 * + # define GC_LOCK_TAKEN GC_allocate_lock
49 /* #define DEBUG_THREADS 1 */
51 /* ANSI C requires that a compilation unit contains something */
53 # include "gc.h"
55 # if defined(GC_PTHREADS) && !defined(GC_SOLARIS_THREADS) \
56 && !defined(GC_IRIX_THREADS)
58 # include "private/gc_priv.h"
60 # if defined(GC_HPUX_THREADS) && !defined(USE_PTHREAD_SPECIFIC) \
61 && !defined(USE_HPUX_TLS)
62 # define USE_HPUX_TLS
63 # endif
65 # ifdef THREAD_LOCAL_ALLOC
66 # if !defined(USE_PTHREAD_SPECIFIC) && !defined(USE_HPUX_TLS)
67 # include "private/specific.h"
68 # endif
69 # if defined(USE_PTHREAD_SPECIFIC)
70 # define GC_getspecific pthread_getspecific
71 # define GC_setspecific pthread_setspecific
72 # define GC_key_create pthread_key_create
73 typedef pthread_key_t GC_key_t;
74 # endif
75 # if defined(USE_HPUX_TLS)
76 # define GC_getspecific(x) (x)
77 # define GC_setspecific(key, v) ((key) = (v), 0)
78 # define GC_key_create(key, d) 0
79 typedef void * GC_key_t;
80 # endif
81 # endif
82 # include <stdlib.h>
83 # include <pthread.h>
84 # include <sched.h>
85 # include <time.h>
86 # include <errno.h>
87 # include <unistd.h>
88 # include <sys/mman.h>
89 # include <sys/time.h>
90 # include <semaphore.h>
91 # include <signal.h>
92 # include <sys/types.h>
93 # include <sys/stat.h>
94 # include <fcntl.h>
96 #ifndef __GNUC__
97 # define __inline__
98 #endif
100 #ifdef GC_USE_LD_WRAP
101 # define WRAP_FUNC(f) __wrap_##f
102 # define REAL_FUNC(f) __real_##f
103 #else
104 # define WRAP_FUNC(f) GC_##f
105 # define REAL_FUNC(f) f
106 # undef pthread_create
107 # undef pthread_sigmask
108 # undef pthread_join
109 # undef pthread_detach
110 #endif
113 void GC_thr_init();
115 #if 0
116 void GC_print_sig_mask()
118 sigset_t blocked;
119 int i;
121 if (pthread_sigmask(SIG_BLOCK, NULL, &blocked) != 0)
122 ABORT("pthread_sigmask");
123 GC_printf0("Blocked: ");
124 for (i = 1; i <= MAXSIG; i++) {
125 if (sigismember(&blocked, i)) { GC_printf1("%ld ",(long) i); }
127 GC_printf0("\n");
129 #endif
132 /* We use the allocation lock to protect thread-related data structures. */
134 /* The set of all known threads. We intercept thread creation and */
135 /* joins. */
136 /* Protected by allocation/GC lock. */
137 /* Some of this should be declared volatile, but that's inconsistent */
138 /* with some library routine declarations. */
139 typedef struct GC_Thread_Rep {
140 struct GC_Thread_Rep * next; /* More recently allocated threads */
141 /* with a given pthread id come */
142 /* first. (All but the first are */
143 /* guaranteed to be dead, but we may */
144 /* not yet have registered the join.) */
145 pthread_t id;
146 short flags;
147 # define FINISHED 1 /* Thread has exited. */
148 # define DETACHED 2 /* Thread is intended to be detached. */
149 # define MAIN_THREAD 4 /* True for the original thread only. */
150 short thread_blocked; /* Protected by GC lock. */
151 /* Treated as a boolean value. If set, */
152 /* thread will acquire GC lock before */
153 /* doing any pointer manipulations, and */
154 /* has set its sp value. Thus it does */
155 /* not need to be sent a signal to stop */
156 /* it. */
157 ptr_t stack_end; /* Cold end of the stack. */
158 ptr_t stack_ptr; /* Valid only when stopped. */
159 # ifdef IA64
160 ptr_t backing_store_end;
161 ptr_t backing_store_ptr;
162 # endif
163 int signal;
164 void * status; /* The value returned from the thread. */
165 /* Used only to avoid premature */
166 /* reclamation of any data it might */
167 /* reference. */
168 # ifdef THREAD_LOCAL_ALLOC
169 # if CPP_WORDSZ == 64 && defined(ALIGN_DOUBLE)
170 # define GRANULARITY 16
171 # define NFREELISTS 49
172 # else
173 # define GRANULARITY 8
174 # define NFREELISTS 65
175 # endif
176 /* The ith free list corresponds to size i*GRANULARITY */
177 # define INDEX_FROM_BYTES(n) ((ADD_SLOP(n) + GRANULARITY - 1)/GRANULARITY)
178 # define BYTES_FROM_INDEX(i) ((i) * GRANULARITY - EXTRA_BYTES)
179 # define SMALL_ENOUGH(bytes) (ADD_SLOP(bytes) <= \
180 (NFREELISTS-1)*GRANULARITY)
181 ptr_t ptrfree_freelists[NFREELISTS];
182 ptr_t normal_freelists[NFREELISTS];
183 # ifdef GC_GCJ_SUPPORT
184 ptr_t gcj_freelists[NFREELISTS];
185 # endif
186 /* Free lists contain either a pointer or a small count */
187 /* reflecting the number of granules allocated at that */
188 /* size. */
189 /* 0 ==> thread-local allocation in use, free list */
190 /* empty. */
191 /* > 0, <= DIRECT_GRANULES ==> Using global allocation, */
192 /* too few objects of this size have been */
193 /* allocated by this thread. */
194 /* >= HBLKSIZE => pointer to nonempty free list. */
195 /* > DIRECT_GRANULES, < HBLKSIZE ==> transition to */
196 /* local alloc, equivalent to 0. */
197 # define DIRECT_GRANULES (HBLKSIZE/GRANULARITY)
198 /* Don't use local free lists for up to this much */
199 /* allocation. */
200 # endif
201 } * GC_thread;
203 GC_thread GC_lookup_thread(pthread_t id);
205 static GC_bool parallel_initialized = FALSE;
207 void GC_init_parallel();
209 # if defined(THREAD_LOCAL_ALLOC) && !defined(DBG_HDRS_ALL)
211 /* We don't really support thread-local allocation with DBG_HDRS_ALL */
213 #ifdef USE_HPUX_TLS
214 __thread
215 #endif
216 GC_key_t GC_thread_key;
218 static GC_bool keys_initialized;
220 /* Recover the contents of the freelist array fl into the global one gfl.*/
221 /* Note that the indexing scheme differs, in that gfl has finer size */
222 /* resolution, even if not all entries are used. */
223 /* We hold the allocator lock. */
224 static void return_freelists(ptr_t *fl, ptr_t *gfl)
226 int i;
227 ptr_t q, *qptr;
228 size_t nwords;
230 for (i = 1; i < NFREELISTS; ++i) {
231 nwords = i * (GRANULARITY/sizeof(word));
232 qptr = fl + i;
233 q = *qptr;
234 if ((word)q >= HBLKSIZE) {
235 if (gfl[nwords] == 0) {
236 gfl[nwords] = q;
237 } else {
238 /* Concatenate: */
239 for (; (word)q >= HBLKSIZE; qptr = &(obj_link(q)), q = *qptr);
240 GC_ASSERT(0 == q);
241 *qptr = gfl[nwords];
242 gfl[nwords] = fl[i];
245 /* Clear fl[i], since the thread structure may hang around. */
246 /* Do it in a way that is likely to trap if we access it. */
247 fl[i] = (ptr_t)HBLKSIZE;
251 /* We statically allocate a single "size 0" object. It is linked to */
252 /* itself, and is thus repeatedly reused for all size 0 allocation */
253 /* requests. (Size 0 gcj allocation requests are incorrect, and */
254 /* we arrange for those to fault asap.) */
255 static ptr_t size_zero_object = (ptr_t)(&size_zero_object);
257 /* Each thread structure must be initialized. */
258 /* This call must be made from the new thread. */
259 /* Caller holds allocation lock. */
260 void GC_init_thread_local(GC_thread p)
262 int i;
264 if (!keys_initialized) {
265 if (0 != GC_key_create(&GC_thread_key, 0)) {
266 ABORT("Failed to create key for local allocator");
268 keys_initialized = TRUE;
270 if (0 != GC_setspecific(GC_thread_key, p)) {
271 ABORT("Failed to set thread specific allocation pointers");
273 for (i = 1; i < NFREELISTS; ++i) {
274 p -> ptrfree_freelists[i] = (ptr_t)1;
275 p -> normal_freelists[i] = (ptr_t)1;
276 # ifdef GC_GCJ_SUPPORT
277 p -> gcj_freelists[i] = (ptr_t)1;
278 # endif
280 /* Set up the size 0 free lists. */
281 p -> ptrfree_freelists[0] = (ptr_t)(&size_zero_object);
282 p -> normal_freelists[0] = (ptr_t)(&size_zero_object);
283 # ifdef GC_GCJ_SUPPORT
284 p -> gcj_freelists[0] = (ptr_t)(-1);
285 # endif
288 #ifdef GC_GCJ_SUPPORT
289 extern ptr_t * GC_gcjobjfreelist;
290 #endif
292 /* We hold the allocator lock. */
293 void GC_destroy_thread_local(GC_thread p)
295 /* We currently only do this from the thread itself. */
296 GC_ASSERT(GC_getspecific(GC_thread_key) == (void *)p);
297 return_freelists(p -> ptrfree_freelists, GC_aobjfreelist);
298 return_freelists(p -> normal_freelists, GC_objfreelist);
299 # ifdef GC_GCJ_SUPPORT
300 return_freelists(p -> gcj_freelists, GC_gcjobjfreelist);
301 # endif
304 extern GC_PTR GC_generic_malloc_many();
306 GC_PTR GC_local_malloc(size_t bytes)
308 if (EXPECT(!SMALL_ENOUGH(bytes),0)) {
309 return(GC_malloc(bytes));
310 } else {
311 int index = INDEX_FROM_BYTES(bytes);
312 ptr_t * my_fl;
313 ptr_t my_entry;
314 GC_key_t k = GC_thread_key;
315 void * tsd;
317 # if defined(REDIRECT_MALLOC) && !defined(USE_PTHREAD_SPECIFIC) \
318 || !defined(__GNUC__)
319 if (EXPECT(0 == k, 0)) {
320 /* This can happen if we get called when the world is */
321 /* being initialized. Whether we can actually complete */
322 /* the initialization then is unclear. */
323 GC_init_parallel();
324 k = GC_thread_key;
326 # endif
327 tsd = GC_getspecific(GC_thread_key);
328 # ifdef GC_ASSERTIONS
329 LOCK();
330 GC_ASSERT(tsd == (void *)GC_lookup_thread(pthread_self()));
331 UNLOCK();
332 # endif
333 my_fl = ((GC_thread)tsd) -> normal_freelists + index;
334 my_entry = *my_fl;
335 if (EXPECT((word)my_entry >= HBLKSIZE, 1)) {
336 ptr_t next = obj_link(my_entry);
337 GC_PTR result = (GC_PTR)my_entry;
338 *my_fl = next;
339 obj_link(my_entry) = 0;
340 PREFETCH_FOR_WRITE(next);
341 return result;
342 } else if ((word)my_entry - 1 < DIRECT_GRANULES) {
343 *my_fl = my_entry + index + 1;
344 return GC_malloc(bytes);
345 } else {
346 GC_generic_malloc_many(BYTES_FROM_INDEX(index), NORMAL, my_fl);
347 if (*my_fl == 0) return GC_oom_fn(bytes);
348 return GC_local_malloc(bytes);
353 GC_PTR GC_local_malloc_atomic(size_t bytes)
355 if (EXPECT(!SMALL_ENOUGH(bytes), 0)) {
356 return(GC_malloc_atomic(bytes));
357 } else {
358 int index = INDEX_FROM_BYTES(bytes);
359 ptr_t * my_fl = ((GC_thread)GC_getspecific(GC_thread_key))
360 -> ptrfree_freelists + index;
361 ptr_t my_entry = *my_fl;
362 if (EXPECT((word)my_entry >= HBLKSIZE, 1)) {
363 GC_PTR result = (GC_PTR)my_entry;
364 *my_fl = obj_link(my_entry);
365 return result;
366 } else if ((word)my_entry - 1 < DIRECT_GRANULES) {
367 *my_fl = my_entry + index + 1;
368 return GC_malloc_atomic(bytes);
369 } else {
370 GC_generic_malloc_many(BYTES_FROM_INDEX(index), PTRFREE, my_fl);
371 /* *my_fl is updated while the collector is excluded; */
372 /* the free list is always visible to the collector as */
373 /* such. */
374 if (*my_fl == 0) return GC_oom_fn(bytes);
375 return GC_local_malloc_atomic(bytes);
380 #ifdef GC_GCJ_SUPPORT
382 #include "include/gc_gcj.h"
384 #ifdef GC_ASSERTIONS
385 extern GC_bool GC_gcj_malloc_initialized;
386 #endif
388 extern int GC_gcj_kind;
390 GC_PTR GC_local_gcj_malloc(size_t bytes,
391 void * ptr_to_struct_containing_descr)
393 GC_ASSERT(GC_gcj_malloc_initialized);
394 if (EXPECT(!SMALL_ENOUGH(bytes), 0)) {
395 return GC_gcj_malloc(bytes, ptr_to_struct_containing_descr);
396 } else {
397 int index = INDEX_FROM_BYTES(bytes);
398 ptr_t * my_fl = ((GC_thread)GC_getspecific(GC_thread_key))
399 -> gcj_freelists + index;
400 ptr_t my_entry = *my_fl;
401 if (EXPECT((word)my_entry >= HBLKSIZE, 1)) {
402 GC_PTR result = (GC_PTR)my_entry;
403 GC_ASSERT(!GC_incremental);
404 /* We assert that any concurrent marker will stop us. */
405 /* Thus it is impossible for a mark procedure to see the */
406 /* allocation of the next object, but to see this object */
407 /* still containing a free list pointer. Otherwise the */
408 /* marker might find a random "mark descriptor". */
409 *(volatile ptr_t *)my_fl = obj_link(my_entry);
410 /* We must update the freelist before we store the pointer. */
411 /* Otherwise a GC at this point would see a corrupted */
412 /* free list. */
413 /* A memory barrier is probably never needed, since the */
414 /* action of stopping this thread will cause prior writes */
415 /* to complete. */
416 GC_ASSERT(((void * volatile *)result)[1] == 0);
417 *(void * volatile *)result = ptr_to_struct_containing_descr;
418 return result;
419 } else if ((word)my_entry - 1 < DIRECT_GRANULES) {
420 *my_fl = my_entry + index + 1;
421 return GC_gcj_malloc(bytes, ptr_to_struct_containing_descr);
422 } else {
423 GC_generic_malloc_many(BYTES_FROM_INDEX(index), GC_gcj_kind, my_fl);
424 if (*my_fl == 0) return GC_oom_fn(bytes);
425 return GC_local_gcj_malloc(bytes, ptr_to_struct_containing_descr);
430 #endif /* GC_GCJ_SUPPORT */
432 # else /* !THREAD_LOCAL_ALLOC && !DBG_HDRS_ALL */
434 # define GC_destroy_thread_local(t)
436 # endif /* !THREAD_LOCAL_ALLOC */
439 * We use signals to stop threads during GC.
441 * Suspended threads wait in signal handler for SIG_THR_RESTART.
442 * That's more portable than semaphores or condition variables.
443 * (We do use sem_post from a signal handler, but that should be portable.)
445 * The thread suspension signal SIG_SUSPEND is now defined in gc_priv.h.
446 * Note that we can't just stop a thread; we need it to save its stack
447 * pointer(s) and acknowledge.
450 #ifndef SIG_THR_RESTART
451 # if defined(GC_HPUX_THREADS) || defined(GC_OSF1_THREADS)
452 # define SIG_THR_RESTART _SIGRTMIN + 5
453 # else
454 # define SIG_THR_RESTART SIGXCPU
455 # endif
456 #endif
458 sem_t GC_suspend_ack_sem;
460 #if 0
462 To make sure that we're using LinuxThreads and not some other thread
463 package, we generate a dummy reference to `pthread_kill_other_threads_np'
464 (was `__pthread_initial_thread_bos' but that disappeared),
465 which is a symbol defined in LinuxThreads, but (hopefully) not in other
466 thread packages.
468 We no longer do this, since this code is now portable enough that it might
469 actually work for something else.
471 void (*dummy_var_to_force_linux_threads)() = pthread_kill_other_threads_np;
472 #endif /* 0 */
474 #if defined(SPARC) || defined(IA64)
475 extern word GC_save_regs_in_stack();
476 #endif
478 long GC_nprocs = 1; /* Number of processors. We may not have */
479 /* access to all of them, but this is as good */
480 /* a guess as any ... */
482 #ifdef PARALLEL_MARK
484 # ifndef MAX_MARKERS
485 # define MAX_MARKERS 16
486 # endif
488 static ptr_t marker_sp[MAX_MARKERS] = {0};
490 void * GC_mark_thread(void * id)
492 word my_mark_no = 0;
494 marker_sp[(word)id] = GC_approx_sp();
495 for (;; ++my_mark_no) {
496 /* GC_mark_no is passed only to allow GC_help_marker to terminate */
497 /* promptly. This is important if it were called from the signal */
498 /* handler or from the GC lock acquisition code. Under Linux, it's */
499 /* not safe to call it from a signal handler, since it uses mutexes */
500 /* and condition variables. Since it is called only here, the */
501 /* argument is unnecessary. */
502 if (my_mark_no < GC_mark_no || my_mark_no > GC_mark_no + 2) {
503 /* resynchronize if we get far off, e.g. because GC_mark_no */
504 /* wrapped. */
505 my_mark_no = GC_mark_no;
507 # ifdef DEBUG_THREADS
508 GC_printf1("Starting mark helper for mark number %ld\n", my_mark_no);
509 # endif
510 GC_help_marker(my_mark_no);
514 extern long GC_markers; /* Number of mark threads we would */
515 /* like to have. Includes the */
516 /* initiating thread. */
518 pthread_t GC_mark_threads[MAX_MARKERS];
520 #define PTHREAD_CREATE REAL_FUNC(pthread_create)
522 static void start_mark_threads()
524 unsigned i;
525 pthread_attr_t attr;
527 if (GC_markers > MAX_MARKERS) {
528 WARN("Limiting number of mark threads\n", 0);
529 GC_markers = MAX_MARKERS;
531 if (0 != pthread_attr_init(&attr)) ABORT("pthread_attr_init failed");
533 if (0 != pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED))
534 ABORT("pthread_attr_setdetachstate failed");
536 # ifdef HPUX
537 /* Default stack size is usually too small: fix it. */
538 /* Otherwise marker threads or GC may run out of */
539 /* space. */
540 # define MIN_STACK_SIZE (8*HBLKSIZE*sizeof(word))
542 size_t old_size;
543 int code;
545 if (pthread_attr_getstacksize(&attr, &old_size) != 0)
546 ABORT("pthread_attr_getstacksize failed\n");
547 if (old_size < MIN_STACK_SIZE) {
548 if (pthread_attr_setstacksize(&attr, MIN_STACK_SIZE) != 0)
549 ABORT("pthread_attr_setstacksize failed\n");
552 # endif /* HPUX */
553 # ifdef CONDPRINT
554 if (GC_print_stats) {
555 GC_printf1("Starting %ld marker threads\n", GC_markers - 1);
557 # endif
558 for (i = 0; i < GC_markers - 1; ++i) {
559 if (0 != PTHREAD_CREATE(GC_mark_threads + i, &attr,
560 GC_mark_thread, (void *)(word)i)) {
561 WARN("Marker thread creation failed, errno = %ld.\n", errno);
566 #else /* !PARALLEL_MARK */
568 static __inline__ void start_mark_threads()
572 #endif /* !PARALLEL_MARK */
574 void GC_suspend_handler(int sig)
576 int dummy;
577 pthread_t my_thread = pthread_self();
578 GC_thread me;
579 sigset_t all_sigs;
580 sigset_t old_sigs;
581 int i;
582 sigset_t mask;
583 # ifdef PARALLEL_MARK
584 word my_mark_no = GC_mark_no;
585 /* Marker can't proceed until we acknowledge. Thus this is */
586 /* guaranteed to be the mark_no correspending to our */
587 /* suspension, i.e. the marker can't have incremented it yet. */
588 # endif
590 if (sig != SIG_SUSPEND) ABORT("Bad signal in suspend_handler");
592 #if DEBUG_THREADS
593 GC_printf1("Suspending 0x%x\n", my_thread);
594 #endif
596 me = GC_lookup_thread(my_thread);
597 /* The lookup here is safe, since I'm doing this on behalf */
598 /* of a thread which holds the allocation lock in order */
599 /* to stop the world. Thus concurrent modification of the */
600 /* data structure is impossible. */
601 # ifdef SPARC
602 me -> stack_ptr = (ptr_t)GC_save_regs_in_stack();
603 # else
604 me -> stack_ptr = (ptr_t)(&dummy);
605 # endif
606 # ifdef IA64
607 me -> backing_store_ptr = (ptr_t)GC_save_regs_in_stack();
608 # endif
610 /* Tell the thread that wants to stop the world that this */
611 /* thread has been stopped. Note that sem_post() is */
612 /* the only async-signal-safe primitive in LinuxThreads. */
613 sem_post(&GC_suspend_ack_sem);
615 /* Wait until that thread tells us to restart by sending */
616 /* this thread a SIG_THR_RESTART signal. */
617 /* SIG_THR_RESTART should be masked at this point. Thus there */
618 /* is no race. */
619 if (sigfillset(&mask) != 0) ABORT("sigfillset() failed");
620 if (sigdelset(&mask, SIG_THR_RESTART) != 0) ABORT("sigdelset() failed");
621 # ifdef NO_SIGNALS
622 if (sigdelset(&mask, SIGINT) != 0) ABORT("sigdelset() failed");
623 if (sigdelset(&mask, SIGQUIT) != 0) ABORT("sigdelset() failed");
624 if (sigdelset(&mask, SIGTERM) != 0) ABORT("sigdelset() failed");
625 if (sigdelset(&mask, SIGABRT) != 0) ABORT("sigdelset() failed");
626 # endif
627 do {
628 me->signal = 0;
629 sigsuspend(&mask); /* Wait for signal */
630 } while (me->signal != SIG_THR_RESTART);
632 #if DEBUG_THREADS
633 GC_printf1("Continuing 0x%x\n", my_thread);
634 #endif
637 void GC_restart_handler(int sig)
639 GC_thread me;
641 if (sig != SIG_THR_RESTART) ABORT("Bad signal in suspend_handler");
643 /* Let the GC_suspend_handler() know that we got a SIG_THR_RESTART. */
644 /* The lookup here is safe, since I'm doing this on behalf */
645 /* of a thread which holds the allocation lock in order */
646 /* to stop the world. Thus concurrent modification of the */
647 /* data structure is impossible. */
648 me = GC_lookup_thread(pthread_self());
649 me->signal = SIG_THR_RESTART;
652 ** Note: even if we didn't do anything useful here,
653 ** it would still be necessary to have a signal handler,
654 ** rather than ignoring the signals, otherwise
655 ** the signals will not be delivered at all, and
656 ** will thus not interrupt the sigsuspend() above.
659 #if DEBUG_THREADS
660 GC_printf1("In GC_restart_handler for 0x%x\n", pthread_self());
661 #endif
664 /* Defining INSTALL_LOOPING_SEGV_HANDLER causes SIGSEGV and SIGBUS to */
665 /* result in an infinite loop in a signal handler. This can be very */
666 /* useful for debugging, since (as of RH7) gdb still seems to have */
667 /* serious problems with threads. */
668 #ifdef INSTALL_LOOPING_SEGV_HANDLER
669 void GC_looping_handler(int sig)
671 GC_printf3("Signal %ld in thread %lx, pid %ld\n",
672 sig, pthread_self(), getpid());
673 for (;;);
675 #endif
677 GC_bool GC_thr_initialized = FALSE;
679 # define THREAD_TABLE_SZ 128 /* Must be power of 2 */
680 volatile GC_thread GC_threads[THREAD_TABLE_SZ];
682 void GC_push_thread_structures GC_PROTO((void))
684 GC_push_all((ptr_t)(GC_threads), (ptr_t)(GC_threads)+sizeof(GC_threads));
687 #ifdef THREAD_LOCAL_ALLOC
688 /* We must explicitly mark ptrfree and gcj free lists, since the free */
689 /* list links wouldn't otherwise be found. We also set them in the */
690 /* normal free lists, since that involves touching less memory than if */
691 /* we scanned them normally. */
692 void GC_mark_thread_local_free_lists(void)
694 int i, j;
695 GC_thread p;
696 ptr_t q;
698 for (i = 0; i < THREAD_TABLE_SZ; ++i) {
699 for (p = GC_threads[i]; 0 != p; p = p -> next) {
700 for (j = 1; j < NFREELISTS; ++j) {
701 q = p -> ptrfree_freelists[j];
702 if ((word)q > HBLKSIZE) GC_set_fl_marks(q);
703 q = p -> normal_freelists[j];
704 if ((word)q > HBLKSIZE) GC_set_fl_marks(q);
705 # ifdef GC_GCJ_SUPPORT
706 q = p -> gcj_freelists[j];
707 if ((word)q > HBLKSIZE) GC_set_fl_marks(q);
708 # endif /* GC_GCJ_SUPPORT */
713 #endif /* THREAD_LOCAL_ALLOC */
715 /* Add a thread to GC_threads. We assume it wasn't already there. */
716 /* Caller holds allocation lock. */
717 GC_thread GC_new_thread(pthread_t id)
719 int hv = ((word)id) % THREAD_TABLE_SZ;
720 GC_thread result;
721 static struct GC_Thread_Rep first_thread;
722 static GC_bool first_thread_used = FALSE;
724 if (!first_thread_used) {
725 result = &first_thread;
726 first_thread_used = TRUE;
727 } else {
728 result = (struct GC_Thread_Rep *)
729 GC_INTERNAL_MALLOC(sizeof(struct GC_Thread_Rep), NORMAL);
731 if (result == 0) return(0);
732 result -> id = id;
733 result -> next = GC_threads[hv];
734 GC_threads[hv] = result;
735 GC_ASSERT(result -> flags == 0 && result -> thread_blocked == 0);
736 return(result);
739 /* Delete a thread from GC_threads. We assume it is there. */
740 /* (The code intentionally traps if it wasn't.) */
741 /* Caller holds allocation lock. */
742 void GC_delete_thread(pthread_t id)
744 int hv = ((word)id) % THREAD_TABLE_SZ;
745 register GC_thread p = GC_threads[hv];
746 register GC_thread prev = 0;
748 while (!pthread_equal(p -> id, id)) {
749 prev = p;
750 p = p -> next;
752 if (prev == 0) {
753 GC_threads[hv] = p -> next;
754 } else {
755 prev -> next = p -> next;
757 GC_INTERNAL_FREE(p);
760 /* If a thread has been joined, but we have not yet */
761 /* been notified, then there may be more than one thread */
762 /* in the table with the same pthread id. */
763 /* This is OK, but we need a way to delete a specific one. */
764 void GC_delete_gc_thread(pthread_t id, GC_thread gc_id)
766 int hv = ((word)id) % THREAD_TABLE_SZ;
767 register GC_thread p = GC_threads[hv];
768 register GC_thread prev = 0;
770 while (p != gc_id) {
771 prev = p;
772 p = p -> next;
774 if (prev == 0) {
775 GC_threads[hv] = p -> next;
776 } else {
777 prev -> next = p -> next;
779 GC_INTERNAL_FREE(p);
782 /* Return a GC_thread corresponding to a given thread_t. */
783 /* Returns 0 if it's not there. */
784 /* Caller holds allocation lock or otherwise inhibits */
785 /* updates. */
786 /* If there is more than one thread with the given id we */
787 /* return the most recent one. */
788 GC_thread GC_lookup_thread(pthread_t id)
790 int hv = ((word)id) % THREAD_TABLE_SZ;
791 register GC_thread p = GC_threads[hv];
793 while (p != 0 && !pthread_equal(p -> id, id)) p = p -> next;
794 return(p);
797 /* There seems to be a very rare thread stopping problem. To help us */
798 /* debug that, we save the ids of the stopping thread. */
799 pthread_t GC_stopping_thread;
800 int GC_stopping_pid;
802 /* Caller holds allocation lock. */
803 void GC_stop_world()
805 pthread_t my_thread = pthread_self();
806 register int i;
807 register GC_thread p;
808 register int n_live_threads = 0;
809 register int result;
811 GC_stopping_thread = my_thread; /* debugging only. */
812 GC_stopping_pid = getpid(); /* debugging only. */
813 /* Make sure all free list construction has stopped before we start. */
814 /* No new construction can start, since free list construction is */
815 /* required to acquire and release the GC lock before it starts, */
816 /* and we have the lock. */
817 # ifdef PARALLEL_MARK
818 GC_acquire_mark_lock();
819 GC_ASSERT(GC_fl_builder_count == 0);
820 /* We should have previously waited for it to become zero. */
821 # endif /* PARALLEL_MARK */
822 for (i = 0; i < THREAD_TABLE_SZ; i++) {
823 for (p = GC_threads[i]; p != 0; p = p -> next) {
824 if (p -> id != my_thread) {
825 if (p -> flags & FINISHED) continue;
826 if (p -> thread_blocked) /* Will wait */ continue;
827 n_live_threads++;
828 #if DEBUG_THREADS
829 GC_printf1("Sending suspend signal to 0x%x\n", p -> id);
830 #endif
831 result = pthread_kill(p -> id, SIG_SUSPEND);
832 switch(result) {
833 case ESRCH:
834 /* Not really there anymore. Possible? */
835 n_live_threads--;
836 break;
837 case 0:
838 break;
839 default:
840 ABORT("pthread_kill failed");
845 for (i = 0; i < n_live_threads; i++) {
846 if (0 != sem_wait(&GC_suspend_ack_sem))
847 ABORT("sem_wait in handler failed");
849 # ifdef PARALLEL_MARK
850 GC_release_mark_lock();
851 # endif
852 #if DEBUG_THREADS
853 GC_printf1("World stopped 0x%x\n", pthread_self());
854 #endif
855 GC_stopping_thread = 0; /* debugging only */
858 /* Caller holds allocation lock, and has held it continuously since */
859 /* the world stopped. */
860 void GC_start_world()
862 pthread_t my_thread = pthread_self();
863 register int i;
864 register GC_thread p;
865 register int n_live_threads = 0;
866 register int result;
868 # if DEBUG_THREADS
869 GC_printf0("World starting\n");
870 # endif
872 for (i = 0; i < THREAD_TABLE_SZ; i++) {
873 for (p = GC_threads[i]; p != 0; p = p -> next) {
874 if (p -> id != my_thread) {
875 if (p -> flags & FINISHED) continue;
876 if (p -> thread_blocked) continue;
877 n_live_threads++;
878 #if DEBUG_THREADS
879 GC_printf1("Sending restart signal to 0x%x\n", p -> id);
880 #endif
881 result = pthread_kill(p -> id, SIG_THR_RESTART);
882 switch(result) {
883 case ESRCH:
884 /* Not really there anymore. Possible? */
885 n_live_threads--;
886 break;
887 case 0:
888 break;
889 default:
890 ABORT("pthread_kill failed");
895 #if DEBUG_THREADS
896 GC_printf0("World started\n");
897 #endif
898 GC_stopping_thread = 0; /* debugging only */
901 # ifdef IA64
902 # define IF_IA64(x) x
903 # else
904 # define IF_IA64(x)
905 # endif
906 /* We hold allocation lock. Should do exactly the right thing if the */
907 /* world is stopped. Should not fail if it isn't. */
908 void GC_push_all_stacks()
910 int i;
911 GC_thread p;
912 ptr_t sp = GC_approx_sp();
913 ptr_t lo, hi;
914 /* On IA64, we also need to scan the register backing store. */
915 IF_IA64(ptr_t bs_lo; ptr_t bs_hi;)
916 pthread_t me = pthread_self();
918 if (!GC_thr_initialized) GC_thr_init();
919 #if DEBUG_THREADS
920 GC_printf1("Pushing stacks from thread 0x%lx\n", (unsigned long) me);
921 #endif
922 for (i = 0; i < THREAD_TABLE_SZ; i++) {
923 for (p = GC_threads[i]; p != 0; p = p -> next) {
924 if (p -> flags & FINISHED) continue;
925 if (pthread_equal(p -> id, me)) {
926 # ifdef SPARC
927 lo = (ptr_t)GC_save_regs_in_stack();
928 # else
929 lo = GC_approx_sp();
930 # endif
931 IF_IA64(bs_hi = (ptr_t)GC_save_regs_in_stack();)
932 } else {
933 lo = p -> stack_ptr;
934 IF_IA64(bs_hi = p -> backing_store_ptr;)
936 if ((p -> flags & MAIN_THREAD) == 0) {
937 hi = p -> stack_end;
938 IF_IA64(bs_lo = p -> backing_store_end);
939 } else {
940 /* The original stack. */
941 hi = GC_stackbottom;
942 IF_IA64(bs_lo = BACKING_STORE_BASE;)
944 #if DEBUG_THREADS
945 GC_printf3("Stack for thread 0x%lx = [%lx,%lx)\n",
946 (unsigned long) p -> id,
947 (unsigned long) lo, (unsigned long) hi);
948 #endif
949 if (0 == lo) ABORT("GC_push_all_stacks: sp not set!\n");
950 # ifdef STACK_GROWS_UP
951 /* We got them backwards! */
952 GC_push_all_stack(hi, lo);
953 # else
954 GC_push_all_stack(lo, hi);
955 # endif
956 # ifdef IA64
957 if (pthread_equal(p -> id, me)) {
958 GC_push_all_eager(bs_lo, bs_hi);
959 } else {
960 GC_push_all_stack(bs_lo, bs_hi);
962 # endif
967 #ifdef USE_PROC_FOR_LIBRARIES
968 int GC_segment_is_thread_stack(ptr_t lo, ptr_t hi)
970 int i;
971 GC_thread p;
973 # ifdef PARALLEL_MARK
974 for (i = 0; i < GC_markers; ++i) {
975 if (marker_sp[i] > lo & marker_sp[i] < hi) return 1;
977 # endif
978 for (i = 0; i < THREAD_TABLE_SZ; i++) {
979 for (p = GC_threads[i]; p != 0; p = p -> next) {
980 if (0 != p -> stack_end) {
981 # ifdef STACK_GROWS_UP
982 if (p -> stack_end >= lo && p -> stack_end < hi) return 1;
983 # else /* STACK_GROWS_DOWN */
984 if (p -> stack_end > lo && p -> stack_end <= hi) return 1;
985 # endif
989 return 0;
991 #endif /* USE_PROC_FOR_LIBRARIES */
993 #ifdef GC_LINUX_THREADS
994 /* Return the number of processors, or i<= 0 if it can't be determined. */
995 int GC_get_nprocs()
997 /* Should be "return sysconf(_SC_NPROCESSORS_ONLN);" but that */
998 /* appears to be buggy in many cases. */
999 /* We look for lines "cpu<n>" in /proc/stat. */
1000 # define STAT_BUF_SIZE 4096
1001 # if defined(GC_USE_LD_WRAP)
1002 # define STAT_READ __real_read
1003 # else
1004 # define STAT_READ read
1005 # endif
1006 char stat_buf[STAT_BUF_SIZE];
1007 int f;
1008 char c;
1009 word result = 1;
1010 /* Some old kernels only have a single "cpu nnnn ..." */
1011 /* entry in /proc/stat. We identify those as */
1012 /* uniprocessors. */
1013 size_t i, len = 0;
1015 f = open("/proc/stat", O_RDONLY);
1016 if (f < 0 || (len = STAT_READ(f, stat_buf, STAT_BUF_SIZE)) < 100) {
1017 WARN("Couldn't read /proc/stat\n", 0);
1018 return -1;
1020 close(f);
1021 for (i = 0; i < len - 100; ++i) {
1022 if (stat_buf[i] == '\n' && stat_buf[i+1] == 'c'
1023 && stat_buf[i+2] == 'p' && stat_buf[i+3] == 'u') {
1024 int cpu_no = atoi(stat_buf + i + 4);
1025 if (cpu_no >= result) result = cpu_no + 1;
1028 return result;
1030 #endif /* GC_LINUX_THREADS */
1032 /* We hold the allocation lock. */
1033 void GC_thr_init()
1035 int dummy;
1036 GC_thread t;
1037 struct sigaction act;
1039 if (GC_thr_initialized) return;
1040 GC_thr_initialized = TRUE;
1042 if (sem_init(&GC_suspend_ack_sem, 0, 0) != 0)
1043 ABORT("sem_init failed");
1045 act.sa_flags = SA_RESTART;
1046 if (sigfillset(&act.sa_mask) != 0) {
1047 ABORT("sigfillset() failed");
1049 # ifdef NO_SIGNALS
1050 if (sigdelset(&act.sa_mask, SIGINT) != 0
1051 || sigdelset(&act.sa_mask, SIGQUIT != 0)
1052 || sigdelset(&act.sa_mask, SIGABRT != 0)
1053 || sigdelset(&act.sa_mask, SIGTERM != 0)) {
1054 ABORT("sigdelset() failed");
1056 # endif
1058 /* SIG_THR_RESTART is unmasked by the handler when necessary. */
1059 act.sa_handler = GC_suspend_handler;
1060 if (sigaction(SIG_SUSPEND, &act, NULL) != 0) {
1061 ABORT("Cannot set SIG_SUSPEND handler");
1064 act.sa_handler = GC_restart_handler;
1065 if (sigaction(SIG_THR_RESTART, &act, NULL) != 0) {
1066 ABORT("Cannot set SIG_THR_RESTART handler");
1068 # ifdef INSTALL_LOOPING_SEGV_HANDLER
1069 act.sa_handler = GC_looping_handler;
1070 if (sigaction(SIGSEGV, &act, NULL) != 0
1071 || sigaction(SIGBUS, &act, NULL) != 0) {
1072 ABORT("Cannot set SIGSEGV or SIGBUS looping handler");
1074 # endif /* INSTALL_LOOPING_SEGV_HANDLER */
1076 /* Add the initial thread, so we can stop it. */
1077 t = GC_new_thread(pthread_self());
1078 t -> stack_ptr = (ptr_t)(&dummy);
1079 t -> flags = DETACHED | MAIN_THREAD;
1081 /* Set GC_nprocs. */
1083 char * nprocs_string = GETENV("GC_NPROCS");
1084 GC_nprocs = -1;
1085 if (nprocs_string != NULL) GC_nprocs = atoi(nprocs_string);
1087 if (GC_nprocs <= 0) {
1088 # if defined(GC_HPUX_THREADS)
1089 GC_nprocs = pthread_num_processors_np();
1090 # endif
1091 # if defined(GC_OSF1_THREADS) || defined(GC_FREEBSD_THREADS)
1092 GC_nprocs = 1;
1093 # endif
1094 # if defined(GC_LINUX_THREADS)
1095 GC_nprocs = GC_get_nprocs();
1096 # endif
1098 if (GC_nprocs <= 0) {
1099 WARN("GC_get_nprocs() returned %ld\n", GC_nprocs);
1100 GC_nprocs = 2;
1101 # ifdef PARALLEL_MARK
1102 GC_markers = 1;
1103 # endif
1104 } else {
1105 # ifdef PARALLEL_MARK
1106 GC_markers = GC_nprocs;
1107 # endif
1109 # ifdef PARALLEL_MARK
1110 # ifdef CONDPRINT
1111 if (GC_print_stats) {
1112 GC_printf2("Number of processors = %ld, "
1113 "number of marker threads = %ld\n", GC_nprocs, GC_markers);
1115 # endif
1116 if (GC_markers == 1) {
1117 GC_parallel = FALSE;
1118 # ifdef CONDPRINT
1119 if (GC_print_stats) {
1120 GC_printf0("Single marker thread, turning off parallel marking\n");
1122 # endif
1123 } else {
1124 GC_parallel = TRUE;
1126 # endif
1130 /* Perform all initializations, including those that */
1131 /* may require allocation. */
1132 /* Called as constructor without allocation lock. */
1133 /* Must be called before a second thread is created. */
1134 /* Called without allocation lock. */
1135 void GC_init_parallel()
1137 if (parallel_initialized) return;
1138 parallel_initialized = TRUE;
1139 /* GC_init() calls us back, so set flag first. */
1140 if (!GC_is_initialized) GC_init();
1141 /* If we are using a parallel marker, start the helper threads. */
1142 # ifdef PARALLEL_MARK
1143 if (GC_parallel) start_mark_threads();
1144 # endif
1145 /* Initialize thread local free lists if used. */
1146 # if defined(THREAD_LOCAL_ALLOC) && !defined(DBG_HDRS_ALL)
1147 LOCK();
1148 GC_init_thread_local(GC_lookup_thread(pthread_self()));
1149 UNLOCK();
1150 # endif
1154 int WRAP_FUNC(pthread_sigmask)(int how, const sigset_t *set, sigset_t *oset)
1156 sigset_t fudged_set;
1158 if (set != NULL && (how == SIG_BLOCK || how == SIG_SETMASK)) {
1159 fudged_set = *set;
1160 sigdelset(&fudged_set, SIG_SUSPEND);
1161 set = &fudged_set;
1163 return(REAL_FUNC(pthread_sigmask)(how, set, oset));
1166 /* Wrappers for functions that are likely to block for an appreciable */
1167 /* length of time. Must be called in pairs, if at all. */
1168 /* Nothing much beyond the system call itself should be executed */
1169 /* between these. */
1171 void GC_start_blocking(void) {
1172 # define SP_SLOP 128
1173 GC_thread me;
1174 LOCK();
1175 me = GC_lookup_thread(pthread_self());
1176 GC_ASSERT(!(me -> thread_blocked));
1177 # ifdef SPARC
1178 me -> stack_ptr = (ptr_t)GC_save_regs_in_stack();
1179 # else
1180 me -> stack_ptr = (ptr_t)GC_approx_sp();
1181 # endif
1182 # ifdef IA64
1183 me -> backing_store_ptr = (ptr_t)GC_save_regs_in_stack() + SP_SLOP;
1184 # endif
1185 /* Add some slop to the stack pointer, since the wrapped call may */
1186 /* end up pushing more callee-save registers. */
1187 # ifdef STACK_GROWS_UP
1188 me -> stack_ptr += SP_SLOP;
1189 # else
1190 me -> stack_ptr -= SP_SLOP;
1191 # endif
1192 me -> thread_blocked = TRUE;
1193 UNLOCK();
1196 GC_end_blocking(void) {
1197 GC_thread me;
1198 LOCK(); /* This will block if the world is stopped. */
1199 me = GC_lookup_thread(pthread_self());
1200 GC_ASSERT(me -> thread_blocked);
1201 me -> thread_blocked = FALSE;
1202 UNLOCK();
1205 /* A wrapper for the standard C sleep function */
1206 int WRAP_FUNC(sleep) (unsigned int seconds)
1208 int result;
1210 GC_start_blocking();
1211 result = REAL_FUNC(sleep)(seconds);
1212 GC_end_blocking();
1213 return result;
1216 struct start_info {
1217 void *(*start_routine)(void *);
1218 void *arg;
1219 word flags;
1220 sem_t registered; /* 1 ==> in our thread table, but */
1221 /* parent hasn't yet noticed. */
1224 /* Called at thread exit. */
1225 /* Never called for main thread. That's OK, since it */
1226 /* results in at most a tiny one-time leak. And */
1227 /* linuxthreads doesn't reclaim the main threads */
1228 /* resources or id anyway. */
1229 void GC_thread_exit_proc(void *arg)
1231 GC_thread me;
1233 LOCK();
1234 me = GC_lookup_thread(pthread_self());
1235 GC_destroy_thread_local(me);
1236 if (me -> flags & DETACHED) {
1237 GC_delete_thread(pthread_self());
1238 } else {
1239 me -> flags |= FINISHED;
1241 # if defined(THREAD_LOCAL_ALLOC) && !defined(USE_PTHREAD_SPECIFIC) \
1242 && !defined(USE_HPUX_TLS) && !defined(DBG_HDRS_ALL)
1243 GC_remove_specific(GC_thread_key);
1244 # endif
1245 if (GC_incremental && GC_collection_in_progress()) {
1246 int old_gc_no = GC_gc_no;
1248 /* Make sure that no part of our stack is still on the mark stack, */
1249 /* since it's about to be unmapped. */
1250 while (GC_incremental && GC_collection_in_progress()
1251 && old_gc_no == GC_gc_no) {
1252 ENTER_GC();
1253 GC_collect_a_little_inner(1);
1254 EXIT_GC();
1255 UNLOCK();
1256 sched_yield();
1257 LOCK();
1260 UNLOCK();
1263 int WRAP_FUNC(pthread_join)(pthread_t thread, void **retval)
1265 int result;
1266 GC_thread thread_gc_id;
1268 LOCK();
1269 thread_gc_id = GC_lookup_thread(thread);
1270 /* This is guaranteed to be the intended one, since the thread id */
1271 /* cant have been recycled by pthreads. */
1272 UNLOCK();
1273 result = REAL_FUNC(pthread_join)(thread, retval);
1274 # if defined (GC_FREEBSD_THREADS)
1275 /* On FreeBSD, the wrapped pthread_join() sometimes returns (what
1276 appears to be) a spurious EINTR which caused the test and real code
1277 to gratuitously fail. Having looked at system pthread library source
1278 code, I see how this return code may be generated. In one path of
1279 code, pthread_join() just returns the errno setting of the thread
1280 being joined. This does not match the POSIX specification or the
1281 local man pages thus I have taken the liberty to catch this one
1282 spurious return value properly conditionalized on GC_FREEBSD_THREADS. */
1283 if (result == EINTR) result = 0;
1284 # endif
1285 if (result == 0) {
1286 LOCK();
1287 /* Here the pthread thread id may have been recycled. */
1288 GC_delete_gc_thread(thread, thread_gc_id);
1289 UNLOCK();
1291 return result;
1295 WRAP_FUNC(pthread_detach)(pthread_t thread)
1297 int result;
1298 GC_thread thread_gc_id;
1300 LOCK();
1301 thread_gc_id = GC_lookup_thread(thread);
1302 UNLOCK();
1303 result = REAL_FUNC(pthread_detach)(thread);
1304 if (result == 0) {
1305 LOCK();
1306 thread_gc_id -> flags |= DETACHED;
1307 /* Here the pthread thread id may have been recycled. */
1308 if (thread_gc_id -> flags & FINISHED) {
1309 GC_delete_gc_thread(thread, thread_gc_id);
1311 UNLOCK();
1313 return result;
1316 void * GC_start_routine(void * arg)
1318 int dummy;
1319 struct start_info * si = arg;
1320 void * result;
1321 GC_thread me;
1322 pthread_t my_pthread;
1323 void *(*start)(void *);
1324 void *start_arg;
1326 my_pthread = pthread_self();
1327 # ifdef DEBUG_THREADS
1328 GC_printf1("Starting thread 0x%lx\n", my_pthread);
1329 GC_printf1("pid = %ld\n", (long) getpid());
1330 GC_printf1("sp = 0x%lx\n", (long) &arg);
1331 # endif
1332 LOCK();
1333 me = GC_new_thread(my_pthread);
1334 me -> flags = si -> flags;
1335 me -> stack_ptr = 0;
1336 /* me -> stack_end = GC_linux_stack_base(); -- currently (11/99) */
1337 /* doesn't work because the stack base in /proc/self/stat is the */
1338 /* one for the main thread. There is a strong argument that that's */
1339 /* a kernel bug, but a pervasive one. */
1340 # ifdef STACK_GROWS_DOWN
1341 me -> stack_end = (ptr_t)(((word)(&dummy) + (GC_page_size - 1))
1342 & ~(GC_page_size - 1));
1343 me -> stack_ptr = me -> stack_end - 0x10;
1344 /* Needs to be plausible, since an asynchronous stack mark */
1345 /* should not crash. */
1346 # else
1347 me -> stack_end = (ptr_t)((word)(&dummy) & ~(GC_page_size - 1));
1348 me -> stack_ptr = me -> stack_end + 0x10;
1349 # endif
1350 /* This is dubious, since we may be more than a page into the stack, */
1351 /* and hence skip some of it, though it's not clear that matters. */
1352 # ifdef IA64
1353 me -> backing_store_end = (ptr_t)
1354 (GC_save_regs_in_stack() & ~(GC_page_size - 1));
1355 /* This is also < 100% convincing. We should also read this */
1356 /* from /proc, but the hook to do so isn't there yet. */
1357 # endif /* IA64 */
1358 UNLOCK();
1359 start = si -> start_routine;
1360 # ifdef DEBUG_THREADS
1361 GC_printf1("start_routine = 0x%lx\n", start);
1362 # endif
1363 start_arg = si -> arg;
1364 # ifdef DEBUG_THREADS
1365 GC_printf1("sem_post from 0x%lx\n", my_pthread);
1366 # endif
1367 sem_post(&(si -> registered)); /* Last action on si. */
1368 /* OK to deallocate. */
1369 pthread_cleanup_push(GC_thread_exit_proc, 0);
1370 # if defined(THREAD_LOCAL_ALLOC) && !defined(DBG_HDRS_ALL)
1371 LOCK();
1372 GC_init_thread_local(me);
1373 UNLOCK();
1374 # endif
1375 result = (*start)(start_arg);
1376 #if DEBUG_THREADS
1377 GC_printf1("Finishing thread 0x%x\n", pthread_self());
1378 #endif
1379 me -> status = result;
1380 me -> flags |= FINISHED;
1381 pthread_cleanup_pop(1);
1382 /* Cleanup acquires lock, ensuring that we can't exit */
1383 /* while a collection that thinks we're alive is trying to stop */
1384 /* us. */
1385 return(result);
1389 WRAP_FUNC(pthread_create)(pthread_t *new_thread,
1390 const pthread_attr_t *attr,
1391 void *(*start_routine)(void *), void *arg)
1393 int result;
1394 GC_thread t;
1395 pthread_t my_new_thread;
1396 int detachstate;
1397 word my_flags = 0;
1398 struct start_info * si;
1399 /* This is otherwise saved only in an area mmapped by the thread */
1400 /* library, which isn't visible to the collector. */
1402 LOCK();
1403 si = (struct start_info *)GC_INTERNAL_MALLOC(sizeof(struct start_info), NORMAL);
1404 UNLOCK();
1405 if (!parallel_initialized) GC_init_parallel();
1406 if (0 == si) return(ENOMEM);
1407 sem_init(&(si -> registered), 0, 0);
1408 si -> start_routine = start_routine;
1409 si -> arg = arg;
1410 LOCK();
1411 if (!GC_thr_initialized) GC_thr_init();
1412 if (NULL == attr) {
1413 detachstate = PTHREAD_CREATE_JOINABLE;
1414 } else {
1415 pthread_attr_getdetachstate(attr, &detachstate);
1417 if (PTHREAD_CREATE_DETACHED == detachstate) my_flags |= DETACHED;
1418 si -> flags = my_flags;
1419 UNLOCK();
1420 # ifdef DEBUG_THREADS
1421 GC_printf1("About to start new thread from thread 0x%X\n",
1422 pthread_self());
1423 # endif
1424 result = REAL_FUNC(pthread_create)(new_thread, attr, GC_start_routine, si);
1425 # ifdef DEBUG_THREADS
1426 GC_printf1("Started thread 0x%X\n", *new_thread);
1427 # endif
1428 /* Wait until child has been added to the thread table. */
1429 /* This also ensures that we hold onto si until the child is done */
1430 /* with it. Thus it doesn't matter whether it is otherwise */
1431 /* visible to the collector. */
1432 while (0 != sem_wait(&(si -> registered))) {
1433 if (EINTR != errno) ABORT("sem_wait failed");
1435 # ifdef DEBUG_THREADS
1436 GC_printf1("sem_wait complete from thread 0x%X\n",
1437 pthread_self());
1438 # endif
1439 sem_destroy(&(si -> registered));
1440 LOCK();
1441 GC_INTERNAL_FREE(si);
1442 UNLOCK();
1443 return(result);
1446 #ifdef GENERIC_COMPARE_AND_SWAP
1447 pthread_mutex_t GC_compare_and_swap_lock = PTHREAD_MUTEX_INITIALIZER;
1449 GC_bool GC_compare_and_exchange(volatile GC_word *addr,
1450 GC_word old, GC_word new_val)
1452 GC_bool result;
1453 pthread_mutex_lock(&GC_compare_and_swap_lock);
1454 if (*addr == old) {
1455 *addr = new_val;
1456 result = TRUE;
1457 } else {
1458 result = FALSE;
1460 pthread_mutex_unlock(&GC_compare_and_swap_lock);
1461 return result;
1464 GC_word GC_atomic_add(volatile GC_word *addr, GC_word how_much)
1466 GC_word old;
1467 pthread_mutex_lock(&GC_compare_and_swap_lock);
1468 old = *addr;
1469 *addr = old + how_much;
1470 pthread_mutex_unlock(&GC_compare_and_swap_lock);
1471 return old;
1474 #endif /* GENERIC_COMPARE_AND_SWAP */
1475 /* Spend a few cycles in a way that can't introduce contention with */
1476 /* othre threads. */
1477 void GC_pause()
1479 int i;
1480 volatile word dummy = 0;
1482 for (i = 0; i < 10; ++i) {
1483 # ifdef __GNUC__
1484 __asm__ __volatile__ (" " : : : "memory");
1485 # else
1486 /* Something that's unlikely to be optimized away. */
1487 GC_noop(++dummy);
1488 # endif
1492 #define SPIN_MAX 1024 /* Maximum number of calls to GC_pause before */
1493 /* give up. */
1495 VOLATILE GC_bool GC_collecting = 0;
1496 /* A hint that we're in the collector and */
1497 /* holding the allocation lock for an */
1498 /* extended period. */
1500 #if !defined(USE_SPIN_LOCK) || defined(PARALLEL_MARK)
1501 /* If we don't want to use the below spinlock implementation, either */
1502 /* because we don't have a GC_test_and_set implementation, or because */
1503 /* we don't want to risk sleeping, we can still try spinning on */
1504 /* pthread_mutex_trylock for a while. This appears to be very */
1505 /* beneficial in many cases. */
1506 /* I suspect that under high contention this is nearly always better */
1507 /* than the spin lock. But it's a bit slower on a uniprocessor. */
1508 /* Hence we still default to the spin lock. */
1509 /* This is also used to acquire the mark lock for the parallel */
1510 /* marker. */
1512 /* Here we use a strict exponential backoff scheme. I don't know */
1513 /* whether that's better or worse than the above. We eventually */
1514 /* yield by calling pthread_mutex_lock(); it never makes sense to */
1515 /* explicitly sleep. */
1517 void GC_generic_lock(pthread_mutex_t * lock)
1519 unsigned pause_length = 1;
1520 unsigned i;
1522 if (0 == pthread_mutex_trylock(lock)) return;
1523 for (; pause_length <= SPIN_MAX; pause_length <<= 1) {
1524 for (i = 0; i < pause_length; ++i) {
1525 GC_pause();
1527 switch(pthread_mutex_trylock(lock)) {
1528 case 0:
1529 return;
1530 case EBUSY:
1531 break;
1532 default:
1533 ABORT("Unexpected error from pthread_mutex_trylock");
1536 pthread_mutex_lock(lock);
1539 #endif /* !USE_SPIN_LOCK || PARALLEL_MARK */
1541 #if defined(USE_SPIN_LOCK)
1543 /* Reasonably fast spin locks. Basically the same implementation */
1544 /* as STL alloc.h. This isn't really the right way to do this. */
1545 /* but until the POSIX scheduling mess gets straightened out ... */
1547 volatile unsigned int GC_allocate_lock = 0;
1550 void GC_lock()
1552 # define low_spin_max 30 /* spin cycles if we suspect uniprocessor */
1553 # define high_spin_max SPIN_MAX /* spin cycles for multiprocessor */
1554 static unsigned spin_max = low_spin_max;
1555 unsigned my_spin_max;
1556 static unsigned last_spins = 0;
1557 unsigned my_last_spins;
1558 int i;
1560 if (!GC_test_and_set(&GC_allocate_lock)) {
1561 return;
1563 my_spin_max = spin_max;
1564 my_last_spins = last_spins;
1565 for (i = 0; i < my_spin_max; i++) {
1566 if (GC_collecting || GC_nprocs == 1) goto yield;
1567 if (i < my_last_spins/2 || GC_allocate_lock) {
1568 GC_pause();
1569 continue;
1571 if (!GC_test_and_set(&GC_allocate_lock)) {
1573 * got it!
1574 * Spinning worked. Thus we're probably not being scheduled
1575 * against the other process with which we were contending.
1576 * Thus it makes sense to spin longer the next time.
1578 last_spins = i;
1579 spin_max = high_spin_max;
1580 return;
1583 /* We are probably being scheduled against the other process. Sleep. */
1584 spin_max = low_spin_max;
1585 yield:
1586 for (i = 0;; ++i) {
1587 if (!GC_test_and_set(&GC_allocate_lock)) {
1588 return;
1590 # define SLEEP_THRESHOLD 12
1591 /* nanosleep(<= 2ms) just spins under Linux. We */
1592 /* want to be careful to avoid that behavior. */
1593 if (i < SLEEP_THRESHOLD) {
1594 sched_yield();
1595 } else {
1596 struct timespec ts;
1598 if (i > 24) i = 24;
1599 /* Don't wait for more than about 15msecs, even */
1600 /* under extreme contention. */
1601 ts.tv_sec = 0;
1602 ts.tv_nsec = 1 << i;
1603 nanosleep(&ts, 0);
1608 #else /* !USE_SPINLOCK */
1610 void GC_lock()
1612 if (1 == GC_nprocs || GC_collecting) {
1613 pthread_mutex_lock(&GC_allocate_ml);
1614 } else {
1615 GC_generic_lock(&GC_allocate_ml);
1619 #endif /* !USE_SPINLOCK */
1621 #if defined(PARALLEL_MARK) || defined(THREAD_LOCAL_ALLOC)
1623 #ifdef GC_ASSERTIONS
1624 pthread_t GC_mark_lock_holder = NO_THREAD;
1625 #endif
1627 #if 0
1628 /* Ugly workaround for a linux threads bug in the final versions */
1629 /* of glibc2.1. Pthread_mutex_trylock sets the mutex owner */
1630 /* field even when it fails to acquire the mutex. This causes */
1631 /* pthread_cond_wait to die. Remove for glibc2.2. */
1632 /* According to the man page, we should use */
1633 /* PTHREAD_ERRORCHECK_MUTEX_INITIALIZER_NP, but that isn't actually */
1634 /* defined. */
1635 static pthread_mutex_t mark_mutex =
1636 {0, 0, 0, PTHREAD_MUTEX_ERRORCHECK_NP, {0, 0}};
1637 #else
1638 static pthread_mutex_t mark_mutex = PTHREAD_MUTEX_INITIALIZER;
1639 #endif
1641 static pthread_cond_t builder_cv = PTHREAD_COND_INITIALIZER;
1643 void GC_acquire_mark_lock()
1646 if (pthread_mutex_lock(&mark_mutex) != 0) {
1647 ABORT("pthread_mutex_lock failed");
1650 GC_generic_lock(&mark_mutex);
1651 # ifdef GC_ASSERTIONS
1652 GC_mark_lock_holder = pthread_self();
1653 # endif
1656 void GC_release_mark_lock()
1658 GC_ASSERT(GC_mark_lock_holder == pthread_self());
1659 # ifdef GC_ASSERTIONS
1660 GC_mark_lock_holder = NO_THREAD;
1661 # endif
1662 if (pthread_mutex_unlock(&mark_mutex) != 0) {
1663 ABORT("pthread_mutex_unlock failed");
1667 /* Collector must wait for a freelist builders for 2 reasons: */
1668 /* 1) Mark bits may still be getting examined without lock. */
1669 /* 2) Partial free lists referenced only by locals may not be scanned */
1670 /* correctly, e.g. if they contain "pointer-free" objects, since the */
1671 /* free-list link may be ignored. */
1672 void GC_wait_builder()
1674 GC_ASSERT(GC_mark_lock_holder == pthread_self());
1675 # ifdef GC_ASSERTIONS
1676 GC_mark_lock_holder = NO_THREAD;
1677 # endif
1678 if (pthread_cond_wait(&builder_cv, &mark_mutex) != 0) {
1679 ABORT("pthread_cond_wait failed");
1681 GC_ASSERT(GC_mark_lock_holder == NO_THREAD);
1682 # ifdef GC_ASSERTIONS
1683 GC_mark_lock_holder = pthread_self();
1684 # endif
1687 void GC_wait_for_reclaim()
1689 GC_acquire_mark_lock();
1690 while (GC_fl_builder_count > 0) {
1691 GC_wait_builder();
1693 GC_release_mark_lock();
1696 void GC_notify_all_builder()
1698 GC_ASSERT(GC_mark_lock_holder == pthread_self());
1699 if (pthread_cond_broadcast(&builder_cv) != 0) {
1700 ABORT("pthread_cond_broadcast failed");
1704 #endif /* PARALLEL_MARK || THREAD_LOCAL_ALLOC */
1706 #ifdef PARALLEL_MARK
1708 static pthread_cond_t mark_cv = PTHREAD_COND_INITIALIZER;
1710 void GC_wait_marker()
1712 GC_ASSERT(GC_mark_lock_holder == pthread_self());
1713 # ifdef GC_ASSERTIONS
1714 GC_mark_lock_holder = NO_THREAD;
1715 # endif
1716 if (pthread_cond_wait(&mark_cv, &mark_mutex) != 0) {
1717 ABORT("pthread_cond_wait failed");
1719 GC_ASSERT(GC_mark_lock_holder == NO_THREAD);
1720 # ifdef GC_ASSERTIONS
1721 GC_mark_lock_holder = pthread_self();
1722 # endif
1725 void GC_notify_all_marker()
1727 if (pthread_cond_broadcast(&mark_cv) != 0) {
1728 ABORT("pthread_cond_broadcast failed");
1732 #endif /* PARALLEL_MARK */
1734 # endif /* GC_LINUX_THREADS and friends */