Daily bump.
[official-gcc.git] / boehm-gc / solaris_threads.c
blob65b2c6517b179da2d918d4de888317f2a0c8d6d9
1 /*
2 * Copyright (c) 1994 by Xerox Corporation. All rights reserved.
4 * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
5 * OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
7 * Permission is hereby granted to use or copy this program
8 * for any purpose, provided the above notices are retained on all copies.
9 * Permission to modify the code and to distribute modified code is granted,
10 * provided the above notices are retained, and a notice that the code was
11 * modified is included with the above copyright notice.
14 * Support code for Solaris threads. Provides functionality we wish Sun
15 * had provided. Relies on some information we probably shouldn't rely on.
17 /* Boehm, September 14, 1994 4:44 pm PDT */
19 # if defined(SOLARIS_THREADS)
21 # include "gc_priv.h"
22 # include "solaris_threads.h"
23 # include <thread.h>
24 # include <synch.h>
25 # include <signal.h>
26 # include <fcntl.h>
27 # include <sys/types.h>
28 # include <sys/mman.h>
29 # include <sys/time.h>
30 # include <sys/resource.h>
31 # include <sys/stat.h>
32 # include <sys/syscall.h>
33 # include <sys/procfs.h>
34 # include <sys/lwp.h>
35 # include <sys/reg.h>
36 # define _CLASSIC_XOPEN_TYPES
37 # include <unistd.h>
38 # include <errno.h>
41 * This is the default size of the LWP arrays. If there are more LWPs
42 * than this when a stop-the-world GC happens, set_max_lwps will be
43 * called to cope.
44 * This must be higher than the number of LWPs at startup time.
45 * The threads library creates a thread early on, so the min. is 3
47 # define DEFAULT_MAX_LWPS 4
49 #undef thr_join
50 #undef thr_create
51 #undef thr_suspend
52 #undef thr_continue
54 cond_t GC_prom_join_cv; /* Broadcast when any thread terminates */
55 cond_t GC_create_cv; /* Signalled when a new undetached */
56 /* thread starts. */
59 #ifdef MMAP_STACKS
60 static int GC_zfd;
61 #endif /* MMAP_STACKS */
63 /* We use the allocation lock to protect thread-related data structures. */
65 /* We stop the world using /proc primitives. This makes some */
66 /* minimal assumptions about the threads implementation. */
67 /* We don't play by the rules, since the rules make this */
68 /* impossible (as of Solaris 2.3). Also note that as of */
69 /* Solaris 2.3 the various thread and lwp suspension */
70 /* primitives failed to stop threads by the time the request */
71 /* is completed. */
74 static sigset_t old_mask;
76 /* Sleep for n milliseconds, n < 1000 */
77 void GC_msec_sleep(int n)
79 struct timespec ts;
81 ts.tv_sec = 0;
82 ts.tv_nsec = 1000000*n;
83 if (syscall(SYS_nanosleep, &ts, 0) < 0) {
84 ABORT("nanosleep failed");
87 /* Turn off preemption; gross but effective. */
88 /* Caller has allocation lock. */
89 /* Actually this is not needed under Solaris 2.3 and */
90 /* 2.4, but hopefully that'll change. */
91 void preempt_off()
93 sigset_t set;
95 (void)sigfillset(&set);
96 sigdelset(&set, SIGABRT);
97 syscall(SYS_sigprocmask, SIG_SETMASK, &set, &old_mask);
100 void preempt_on()
102 syscall(SYS_sigprocmask, SIG_SETMASK, &old_mask, NULL);
105 int GC_main_proc_fd = -1;
108 struct lwp_cache_entry {
109 lwpid_t lc_id;
110 int lc_descr; /* /proc file descriptor. */
111 } GC_lwp_cache_default[DEFAULT_MAX_LWPS];
113 static int max_lwps = DEFAULT_MAX_LWPS;
114 static struct lwp_cache_entry *GC_lwp_cache = GC_lwp_cache_default;
116 static prgregset_t GC_lwp_registers_default[DEFAULT_MAX_LWPS];
117 static prgregset_t *GC_lwp_registers = GC_lwp_registers_default;
119 /* Return a file descriptor for the /proc entry corresponding */
120 /* to the given lwp. The file descriptor may be stale if the */
121 /* lwp exited and a new one was forked. */
122 static int open_lwp(lwpid_t id)
124 int result;
125 static int next_victim = 0;
126 register int i;
128 for (i = 0; i < max_lwps; i++) {
129 if (GC_lwp_cache[i].lc_id == id) return(GC_lwp_cache[i].lc_descr);
131 result = syscall(SYS_ioctl, GC_main_proc_fd, PIOCOPENLWP, &id);
133 * If PIOCOPENLWP fails, try closing fds in the cache until it succeeds.
135 if (result < 0 && errno == EMFILE) {
136 for (i = 0; i < max_lwps; i++) {
137 if (GC_lwp_cache[i].lc_id != 0) {
138 (void)syscall(SYS_close, GC_lwp_cache[i].lc_descr);
139 result = syscall(SYS_ioctl, GC_main_proc_fd, PIOCOPENLWP, &id);
140 if (result >= 0 || (result < 0 && errno != EMFILE))
141 break;
145 if (result < 0) {
146 if (errno == EMFILE) {
147 ABORT("Too many open files");
149 return(-1) /* exited? */;
151 if (GC_lwp_cache[next_victim].lc_id != 0)
152 (void)syscall(SYS_close, GC_lwp_cache[next_victim].lc_descr);
153 GC_lwp_cache[next_victim].lc_id = id;
154 GC_lwp_cache[next_victim].lc_descr = result;
155 if (++next_victim >= max_lwps)
156 next_victim = 0;
157 return(result);
160 static void uncache_lwp(lwpid_t id)
162 register int i;
164 for (i = 0; i < max_lwps; i++) {
165 if (GC_lwp_cache[i].lc_id == id) {
166 (void)syscall(SYS_close, GC_lwp_cache[id].lc_descr);
167 GC_lwp_cache[i].lc_id = 0;
168 break;
172 /* Sequence of current lwp ids */
173 static lwpid_t GC_current_ids_default[DEFAULT_MAX_LWPS + 1];
174 static lwpid_t *GC_current_ids = GC_current_ids_default;
176 /* Temporary used below (can be big if large number of LWPs) */
177 static lwpid_t last_ids_default[DEFAULT_MAX_LWPS + 1];
178 static lwpid_t *last_ids = last_ids_default;
181 #define ROUNDUP(n) WORDS_TO_BYTES(ROUNDED_UP_WORDS(n))
183 static void set_max_lwps(GC_word n)
185 char *mem;
186 char *oldmem;
187 int required_bytes = ROUNDUP(n * sizeof(struct lwp_cache_entry))
188 + ROUNDUP(n * sizeof(prgregset_t))
189 + ROUNDUP((n + 1) * sizeof(lwpid_t))
190 + ROUNDUP((n + 1) * sizeof(lwpid_t));
192 GC_expand_hp_inner(divHBLKSZ((word)required_bytes));
193 oldmem = mem = GC_scratch_alloc(required_bytes);
194 if (0 == mem) ABORT("No space for lwp data structures");
197 * We can either flush the old lwp cache or copy it over. Do the latter.
199 memcpy(mem, GC_lwp_cache, max_lwps * sizeof(struct lwp_cache_entry));
200 GC_lwp_cache = (struct lwp_cache_entry*)mem;
201 mem += ROUNDUP(n * sizeof(struct lwp_cache_entry));
203 BZERO(GC_lwp_registers, max_lwps * sizeof(GC_lwp_registers[0]));
204 GC_lwp_registers = (prgregset_t *)mem;
205 mem += ROUNDUP(n * sizeof(prgregset_t));
208 GC_current_ids = (lwpid_t *)mem;
209 mem += ROUNDUP((n + 1) * sizeof(lwpid_t));
211 last_ids = (lwpid_t *)mem;
212 mem += ROUNDUP((n + 1)* sizeof(lwpid_t));
214 if (mem > oldmem + required_bytes)
215 ABORT("set_max_lwps buffer overflow");
217 max_lwps = n;
221 /* Stop all lwps in process. Assumes preemption is off. */
222 /* Caller has allocation lock (and any other locks he may */
223 /* need). */
224 static void stop_all_lwps()
226 int lwp_fd;
227 char buf[30];
228 prstatus_t status;
229 register int i;
230 GC_bool changed;
231 lwpid_t me = _lwp_self();
233 if (GC_main_proc_fd == -1) {
234 sprintf(buf, "/proc/%d", getpid());
235 GC_main_proc_fd = syscall(SYS_open, buf, O_RDONLY);
236 if (GC_main_proc_fd < 0) {
237 if (errno == EMFILE)
238 ABORT("/proc open failed: too many open files");
239 GC_printf1("/proc open failed: errno %d", errno);
240 abort();
243 BZERO(GC_lwp_registers, sizeof (prgregset_t) * max_lwps);
244 for (i = 0; i < max_lwps; i++)
245 last_ids[i] = 0;
246 for (;;) {
247 if (syscall(SYS_ioctl, GC_main_proc_fd, PIOCSTATUS, &status) < 0)
248 ABORT("Main PIOCSTATUS failed");
249 if (status.pr_nlwp < 1)
250 ABORT("Invalid number of lwps returned by PIOCSTATUS");
251 if (status.pr_nlwp >= max_lwps) {
252 set_max_lwps(status.pr_nlwp*2 + 10);
254 * The data in the old GC_current_ids and
255 * GC_lwp_registers has been trashed. Cleaning out last_ids
256 * will make sure every LWP gets re-examined.
258 for (i = 0; i < max_lwps; i++)
259 last_ids[i] = 0;
260 continue;
262 if (syscall(SYS_ioctl, GC_main_proc_fd, PIOCLWPIDS, GC_current_ids) < 0)
263 ABORT("PIOCLWPIDS failed");
264 changed = FALSE;
265 for (i = 0; GC_current_ids[i] != 0 && i < max_lwps; i++) {
266 if (GC_current_ids[i] != last_ids[i]) {
267 changed = TRUE;
268 if (GC_current_ids[i] != me) {
269 /* PIOCSTOP doesn't work without a writable */
270 /* descriptor. And that makes the process */
271 /* undebuggable. */
272 if (_lwp_suspend(GC_current_ids[i]) < 0) {
273 /* Could happen if the lwp exited */
274 uncache_lwp(GC_current_ids[i]);
275 GC_current_ids[i] = me; /* ignore */
281 * In the unlikely event something does a fork between the
282 * PIOCSTATUS and the PIOCLWPIDS.
284 if (i >= max_lwps)
285 continue;
286 /* All lwps in GC_current_ids != me have been suspended. Note */
287 /* that _lwp_suspend is idempotent. */
288 for (i = 0; GC_current_ids[i] != 0; i++) {
289 if (GC_current_ids[i] != last_ids[i]) {
290 if (GC_current_ids[i] != me) {
291 lwp_fd = open_lwp(GC_current_ids[i]);
292 if (lwp_fd == -1)
294 GC_current_ids[i] = me;
295 continue;
297 /* LWP should be stopped. Empirically it sometimes */
298 /* isn't, and more frequently the PR_STOPPED flag */
299 /* is not set. Wait for PR_STOPPED. */
300 if (syscall(SYS_ioctl, lwp_fd,
301 PIOCSTATUS, &status) < 0) {
302 /* Possible if the descriptor was stale, or */
303 /* we encountered the 2.3 _lwp_suspend bug. */
304 uncache_lwp(GC_current_ids[i]);
305 GC_current_ids[i] = me; /* handle next time. */
306 } else {
307 while (!(status.pr_flags & PR_STOPPED)) {
308 GC_msec_sleep(1);
309 if (syscall(SYS_ioctl, lwp_fd,
310 PIOCSTATUS, &status) < 0) {
311 ABORT("Repeated PIOCSTATUS failed");
313 if (status.pr_flags & PR_STOPPED) break;
315 GC_msec_sleep(20);
316 if (syscall(SYS_ioctl, lwp_fd,
317 PIOCSTATUS, &status) < 0) {
318 ABORT("Repeated PIOCSTATUS failed");
321 if (status.pr_who != GC_current_ids[i]) {
322 /* can happen if thread was on death row */
323 uncache_lwp(GC_current_ids[i]);
324 GC_current_ids[i] = me; /* handle next time. */
325 continue;
327 /* Save registers where collector can */
328 /* find them. */
329 BCOPY(status.pr_reg, GC_lwp_registers[i],
330 sizeof (prgregset_t));
335 if (!changed) break;
336 for (i = 0; i < max_lwps; i++) last_ids[i] = GC_current_ids[i];
340 /* Restart all lwps in process. Assumes preemption is off. */
341 static void restart_all_lwps()
343 int lwp_fd;
344 register int i;
345 GC_bool changed;
346 lwpid_t me = _lwp_self();
347 # define PARANOID
349 for (i = 0; GC_current_ids[i] != 0; i++) {
350 # ifdef PARANOID
351 if (GC_current_ids[i] != me) {
352 int lwp_fd = open_lwp(GC_current_ids[i]);
353 prstatus_t status;
355 if (lwp_fd < 0) ABORT("open_lwp failed");
356 if (syscall(SYS_ioctl, lwp_fd,
357 PIOCSTATUS, &status) < 0) {
358 ABORT("PIOCSTATUS failed in restart_all_lwps");
360 if (memcmp(status.pr_reg, GC_lwp_registers[i],
361 sizeof (prgregset_t)) != 0) {
362 int j;
364 for(j = 0; j < NGREG; j++)
366 GC_printf3("%i: %x -> %x\n", j,
367 GC_lwp_registers[i][j],
368 status.pr_reg[j]);
370 ABORT("Register contents changed");
372 if (!status.pr_flags & PR_STOPPED) {
373 ABORT("lwp no longer stopped");
375 #ifdef SPARC
377 gwindows_t windows;
378 if (syscall(SYS_ioctl, lwp_fd,
379 PIOCGWIN, &windows) < 0) {
380 ABORT("PIOCSTATUS failed in restart_all_lwps");
382 if (windows.wbcnt > 0) ABORT("unsaved register windows");
384 #endif
386 # endif /* PARANOID */
387 if (GC_current_ids[i] == me) continue;
388 if (_lwp_continue(GC_current_ids[i]) < 0) {
389 ABORT("Failed to restart lwp");
392 if (i >= max_lwps) ABORT("Too many lwps");
395 GC_bool GC_multithreaded = 0;
397 void GC_stop_world()
399 preempt_off();
400 if (GC_multithreaded)
401 stop_all_lwps();
404 void GC_start_world()
406 if (GC_multithreaded)
407 restart_all_lwps();
408 preempt_on();
411 void GC_thr_init(void);
413 GC_bool GC_thr_initialized = FALSE;
415 size_t GC_min_stack_sz;
417 size_t GC_page_sz;
420 * stack_head is stored at the top of free stacks
422 struct stack_head {
423 struct stack_head *next;
424 ptr_t base;
425 thread_t owner;
428 # define N_FREE_LISTS 25
429 struct stack_head *GC_stack_free_lists[N_FREE_LISTS] = { 0 };
430 /* GC_stack_free_lists[i] is free list for stacks of */
431 /* size GC_min_stack_sz*2**i. */
432 /* Free lists are linked through stack_head stored */ /* at top of stack. */
434 /* Return a stack of size at least *stack_size. *stack_size is */
435 /* replaced by the actual stack size. */
436 /* Caller holds allocation lock. */
437 ptr_t GC_stack_alloc(size_t * stack_size)
439 register size_t requested_sz = *stack_size;
440 register size_t search_sz = GC_min_stack_sz;
441 register int index = 0; /* = log2(search_sz/GC_min_stack_sz) */
442 register ptr_t base;
443 register struct stack_head *result;
445 while (search_sz < requested_sz) {
446 search_sz *= 2;
447 index++;
449 if ((result = GC_stack_free_lists[index]) == 0
450 && (result = GC_stack_free_lists[index+1]) != 0) {
451 /* Try next size up. */
452 search_sz *= 2; index++;
454 if (result != 0) {
455 base = GC_stack_free_lists[index]->base;
456 GC_stack_free_lists[index] = GC_stack_free_lists[index]->next;
457 } else {
458 #ifdef MMAP_STACKS
459 base = (ptr_t)mmap(0, search_sz + GC_page_sz,
460 PROT_READ|PROT_WRITE, MAP_PRIVATE |MAP_NORESERVE,
461 GC_zfd, 0);
462 if (base == (ptr_t)-1)
464 *stack_size = 0;
465 return NULL;
468 mprotect(base, GC_page_sz, PROT_NONE);
469 /* Should this use divHBLKSZ(search_sz + GC_page_sz) ? -- cf */
470 GC_is_fresh((struct hblk *)base, divHBLKSZ(search_sz));
471 base += GC_page_sz;
473 #else
474 base = (ptr_t) GC_scratch_alloc(search_sz + 2*GC_page_sz);
475 if (base == NULL)
477 *stack_size = 0;
478 return NULL;
481 base = (ptr_t)(((word)base + GC_page_sz) & ~(GC_page_sz - 1));
482 /* Protect hottest page to detect overflow. */
483 # ifdef SOLARIS23_MPROTECT_BUG_FIXED
484 mprotect(base, GC_page_sz, PROT_NONE);
485 # endif
486 GC_is_fresh((struct hblk *)base, divHBLKSZ(search_sz));
488 base += GC_page_sz;
489 #endif
491 *stack_size = search_sz;
492 return(base);
495 /* Caller holds allocationlock. */
496 void GC_stack_free(ptr_t stack, size_t size)
498 register int index = 0;
499 register size_t search_sz = GC_min_stack_sz;
500 register struct stack_head *head;
502 #ifdef MMAP_STACKS
503 /* Zero pointers */
504 mmap(stack, size, PROT_READ|PROT_WRITE, MAP_PRIVATE|MAP_NORESERVE|MAP_FIXED,
505 GC_zfd, 0);
506 #endif
507 while (search_sz < size) {
508 search_sz *= 2;
509 index++;
511 if (search_sz != size) ABORT("Bad stack size");
513 head = (struct stack_head *)(stack + search_sz - sizeof(struct stack_head));
514 head->next = GC_stack_free_lists[index];
515 head->base = stack;
516 GC_stack_free_lists[index] = head;
519 void GC_my_stack_limits();
521 /* Notify virtual dirty bit implementation that known empty parts of */
522 /* stacks do not contain useful data. */
523 /* Caller holds allocation lock. */
524 void GC_old_stacks_are_fresh()
526 /* No point in doing this for MMAP stacks - and pointers are zero'd out */
527 /* by the mmap in GC_stack_free */
528 #ifndef MMAP_STACKS
529 register int i;
530 register struct stack_head *s;
531 register ptr_t p;
532 register size_t sz;
533 register struct hblk * h;
534 int dummy;
536 for (i = 0, sz= GC_min_stack_sz; i < N_FREE_LISTS;
537 i++, sz *= 2) {
538 for (s = GC_stack_free_lists[i]; s != 0; s = s->next) {
539 p = s->base;
540 h = (struct hblk *)(((word)p + HBLKSIZE-1) & ~(HBLKSIZE-1));
541 if ((ptr_t)h == p) {
542 GC_is_fresh((struct hblk *)p, divHBLKSZ(sz));
543 } else {
544 GC_is_fresh((struct hblk *)p, divHBLKSZ(sz) - 1);
545 BZERO(p, (ptr_t)h - p);
549 #endif /* MMAP_STACKS */
550 GC_my_stack_limits();
553 /* The set of all known threads. We intercept thread creation and */
554 /* joins. We never actually create detached threads. We allocate all */
555 /* new thread stacks ourselves. These allow us to maintain this */
556 /* data structure. */
558 # define THREAD_TABLE_SZ 128 /* Must be power of 2 */
559 volatile GC_thread GC_threads[THREAD_TABLE_SZ];
561 /* Add a thread to GC_threads. We assume it wasn't already there. */
562 /* Caller holds allocation lock. */
563 GC_thread GC_new_thread(thread_t id)
565 int hv = ((word)id) % THREAD_TABLE_SZ;
566 GC_thread result;
567 static struct GC_Thread_Rep first_thread;
568 static GC_bool first_thread_used = FALSE;
570 if (!first_thread_used) {
571 result = &first_thread;
572 first_thread_used = TRUE;
573 /* Dont acquire allocation lock, since we may already hold it. */
574 } else {
575 result = (struct GC_Thread_Rep *)
576 GC_generic_malloc_inner(sizeof(struct GC_Thread_Rep), NORMAL);
578 if (result == 0) return(0);
579 result -> id = id;
580 result -> next = GC_threads[hv];
581 GC_threads[hv] = result;
582 /* result -> finished = 0; */
583 (void) cond_init(&(result->join_cv), USYNC_THREAD, 0);
584 return(result);
587 /* Delete a thread from GC_threads. We assume it is there. */
588 /* (The code intentionally traps if it wasn't.) */
589 /* Caller holds allocation lock. */
590 void GC_delete_thread(thread_t id)
592 int hv = ((word)id) % THREAD_TABLE_SZ;
593 register GC_thread p = GC_threads[hv];
594 register GC_thread prev = 0;
596 while (p -> id != id) {
597 prev = p;
598 p = p -> next;
600 if (prev == 0) {
601 GC_threads[hv] = p -> next;
602 } else {
603 prev -> next = p -> next;
607 /* Return the GC_thread correpsonding to a given thread_t. */
608 /* Returns 0 if it's not there. */
609 /* Caller holds allocation lock. */
610 GC_thread GC_lookup_thread(thread_t id)
612 int hv = ((word)id) % THREAD_TABLE_SZ;
613 register GC_thread p = GC_threads[hv];
615 while (p != 0 && p -> id != id) p = p -> next;
616 return(p);
619 # define MAX_ORIG_STACK_SIZE (8 * 1024 * 1024)
621 word GC_get_orig_stack_size() {
622 struct rlimit rl;
623 static int warned = 0;
624 int result;
626 if (getrlimit(RLIMIT_STACK, &rl) != 0) ABORT("getrlimit failed");
627 result = (word)rl.rlim_cur & ~(HBLKSIZE-1);
628 if (result > MAX_ORIG_STACK_SIZE) {
629 if (!warned) {
630 WARN("Large stack limit(%ld): only scanning 8 MB", result);
631 warned = 1;
633 result = MAX_ORIG_STACK_SIZE;
635 return result;
638 /* Notify dirty bit implementation of unused parts of my stack. */
639 /* Caller holds allocation lock. */
640 void GC_my_stack_limits()
642 int dummy;
643 register ptr_t hottest = (ptr_t)((word)(&dummy) & ~(HBLKSIZE-1));
644 register GC_thread me = GC_lookup_thread(thr_self());
645 register size_t stack_size = me -> stack_size;
646 register ptr_t stack;
648 if (stack_size == 0) {
649 /* original thread */
650 /* Empirically, what should be the stack page with lowest */
651 /* address is actually inaccessible. */
652 stack_size = GC_get_orig_stack_size() - GC_page_sz;
653 stack = GC_stackbottom - stack_size + GC_page_sz;
654 } else {
655 stack = me -> stack;
657 if (stack > hottest || stack + stack_size < hottest) {
658 ABORT("sp out of bounds");
660 GC_is_fresh((struct hblk *)stack, divHBLKSZ(hottest - stack));
664 /* We hold allocation lock. We assume the world is stopped. */
665 void GC_push_all_stacks()
667 register int i;
668 register GC_thread p;
669 register ptr_t sp = GC_approx_sp();
670 register ptr_t bottom, top;
671 struct rlimit rl;
673 # define PUSH(bottom,top) \
674 if (GC_dirty_maintained) { \
675 GC_push_dirty((bottom), (top), GC_page_was_ever_dirty, \
676 GC_push_all_stack); \
677 } else { \
678 GC_push_all_stack((bottom), (top)); \
680 GC_push_all_stack((ptr_t)GC_lwp_registers,
681 (ptr_t)GC_lwp_registers
682 + max_lwps * sizeof(GC_lwp_registers[0]));
683 for (i = 0; i < THREAD_TABLE_SZ; i++) {
684 for (p = GC_threads[i]; p != 0; p = p -> next) {
685 if (p -> stack_size != 0) {
686 bottom = p -> stack;
687 top = p -> stack + p -> stack_size;
688 } else {
689 /* The original stack. */
690 bottom = GC_stackbottom - GC_get_orig_stack_size() + GC_page_sz;
691 top = GC_stackbottom;
693 if ((word)sp > (word)bottom && (word)sp < (word)top) bottom = sp;
694 PUSH(bottom, top);
700 int GC_is_thread_stack(ptr_t addr)
702 register int i;
703 register GC_thread p;
704 register ptr_t bottom, top;
705 struct rlimit rl;
707 for (i = 0; i < THREAD_TABLE_SZ; i++) {
708 for (p = GC_threads[i]; p != 0; p = p -> next) {
709 if (p -> stack_size != 0) {
710 if (p -> stack <= addr &&
711 addr < p -> stack + p -> stack_size)
712 return 1;
718 /* The only thread that ever really performs a thr_join. */
719 void * GC_thr_daemon(void * dummy)
721 void *status;
722 thread_t departed;
723 register GC_thread t;
724 register int i;
725 register int result;
727 for(;;) {
728 start:
729 result = thr_join((thread_t)0, &departed, &status);
730 LOCK();
731 if (result != 0) {
732 /* No more threads; wait for create. */
733 for (i = 0; i < THREAD_TABLE_SZ; i++) {
734 for (t = GC_threads[i]; t != 0; t = t -> next) {
735 if (!(t -> flags & (DETACHED | FINISHED))) {
736 UNLOCK();
737 goto start; /* Thread started just before we */
738 /* acquired the lock. */
742 cond_wait(&GC_create_cv, &GC_allocate_ml);
743 UNLOCK();
744 } else {
745 t = GC_lookup_thread(departed);
746 GC_multithreaded--;
747 if (!(t -> flags & CLIENT_OWNS_STACK)) {
748 GC_stack_free(t -> stack, t -> stack_size);
750 if (t -> flags & DETACHED) {
751 GC_delete_thread(departed);
752 } else {
753 t -> status = status;
754 t -> flags |= FINISHED;
755 cond_signal(&(t -> join_cv));
756 cond_broadcast(&GC_prom_join_cv);
758 UNLOCK();
763 /* We hold the allocation lock, or caller ensures that 2 instances */
764 /* cannot be invoked concurrently. */
765 void GC_thr_init(void)
767 GC_thread t;
768 thread_t tid;
770 if (GC_thr_initialized)
771 return;
772 GC_thr_initialized = TRUE;
773 GC_min_stack_sz = ((thr_min_stack() + 32*1024 + HBLKSIZE-1)
774 & ~(HBLKSIZE - 1));
775 GC_page_sz = sysconf(_SC_PAGESIZE);
776 #ifdef MMAP_STACKS
777 GC_zfd = open("/dev/zero", O_RDONLY);
778 if (GC_zfd == -1)
779 ABORT("Can't open /dev/zero");
780 #endif /* MMAP_STACKS */
781 cond_init(&GC_prom_join_cv, USYNC_THREAD, 0);
782 cond_init(&GC_create_cv, USYNC_THREAD, 0);
783 /* Add the initial thread, so we can stop it. */
784 t = GC_new_thread(thr_self());
785 t -> stack_size = 0;
786 t -> flags = DETACHED | CLIENT_OWNS_STACK;
787 if (thr_create(0 /* stack */, 0 /* stack_size */, GC_thr_daemon,
788 0 /* arg */, THR_DETACHED | THR_DAEMON,
789 &tid /* thread_id */) != 0) {
790 ABORT("Cant fork daemon");
792 thr_setprio(tid, 126);
795 /* We acquire the allocation lock to prevent races with */
796 /* stopping/starting world. */
797 /* This is no more correct than the underlying Solaris 2.X */
798 /* implementation. Under 2.3 THIS IS BROKEN. */
799 int GC_thr_suspend(thread_t target_thread)
801 GC_thread t;
802 int result;
804 LOCK();
805 result = thr_suspend(target_thread);
806 if (result == 0) {
807 t = GC_lookup_thread(target_thread);
808 if (t == 0) ABORT("thread unknown to GC");
809 t -> flags |= SUSPENDED;
811 UNLOCK();
812 return(result);
815 int GC_thr_continue(thread_t target_thread)
817 GC_thread t;
818 int result;
820 LOCK();
821 result = thr_continue(target_thread);
822 if (result == 0) {
823 t = GC_lookup_thread(target_thread);
824 if (t == 0) ABORT("thread unknown to GC");
825 t -> flags &= ~SUSPENDED;
827 UNLOCK();
828 return(result);
831 int GC_thr_join(thread_t wait_for, thread_t *departed, void **status)
833 register GC_thread t;
834 int result = 0;
836 LOCK();
837 if (wait_for == 0) {
838 register int i;
839 register GC_bool thread_exists;
841 for (;;) {
842 thread_exists = FALSE;
843 for (i = 0; i < THREAD_TABLE_SZ; i++) {
844 for (t = GC_threads[i]; t != 0; t = t -> next) {
845 if (!(t -> flags & DETACHED)) {
846 if (t -> flags & FINISHED) {
847 goto found;
849 thread_exists = TRUE;
853 if (!thread_exists) {
854 result = ESRCH;
855 goto out;
857 cond_wait(&GC_prom_join_cv, &GC_allocate_ml);
859 } else {
860 t = GC_lookup_thread(wait_for);
861 if (t == 0 || t -> flags & DETACHED) {
862 result = ESRCH;
863 goto out;
865 if (wait_for == thr_self()) {
866 result = EDEADLK;
867 goto out;
869 while (!(t -> flags & FINISHED)) {
870 cond_wait(&(t -> join_cv), &GC_allocate_ml);
874 found:
875 if (status) *status = t -> status;
876 if (departed) *departed = t -> id;
877 cond_destroy(&(t -> join_cv));
878 GC_delete_thread(t -> id);
879 out:
880 UNLOCK();
881 return(result);
886 GC_thr_create(void *stack_base, size_t stack_size,
887 void *(*start_routine)(void *), void *arg, long flags,
888 thread_t *new_thread)
890 int result;
891 GC_thread t;
892 thread_t my_new_thread;
893 word my_flags = 0;
894 void * stack = stack_base;
896 LOCK();
897 if (!GC_thr_initialized)
899 GC_thr_init();
901 GC_multithreaded++;
902 if (stack == 0) {
903 if (stack_size == 0) stack_size = GC_min_stack_sz;
904 stack = (void *)GC_stack_alloc(&stack_size);
905 if (stack == 0) {
906 GC_multithreaded--;
907 UNLOCK();
908 return(ENOMEM);
910 } else {
911 my_flags |= CLIENT_OWNS_STACK;
913 if (flags & THR_DETACHED) my_flags |= DETACHED;
914 if (flags & THR_SUSPENDED) my_flags |= SUSPENDED;
915 result = thr_create(stack, stack_size, start_routine,
916 arg, flags & ~THR_DETACHED, &my_new_thread);
917 if (result == 0) {
918 t = GC_new_thread(my_new_thread);
919 t -> flags = my_flags;
920 if (!(my_flags & DETACHED)) cond_init(&(t -> join_cv), USYNC_THREAD, 0);
921 t -> stack = stack;
922 t -> stack_size = stack_size;
923 if (new_thread != 0) *new_thread = my_new_thread;
924 cond_signal(&GC_create_cv);
925 } else {
926 GC_multithreaded--;
927 if (!(my_flags & CLIENT_OWNS_STACK)) {
928 GC_stack_free(stack, stack_size);
931 UNLOCK();
932 return(result);
935 # else /* SOLARIS_THREADS */
937 #ifndef LINT
938 int GC_no_sunOS_threads;
939 #endif
940 #endif