fix typo
[official-gcc.git] / gcc-4_6 / libgo / runtime / go-go.c
blob3d8e9e629084eeb68666a5f4d30716058b349e65
1 /* go-go.c -- the go function.
3 Copyright 2009 The Go Authors. All rights reserved.
4 Use of this source code is governed by a BSD-style
5 license that can be found in the LICENSE file. */
7 #include <errno.h>
8 #include <limits.h>
9 #include <signal.h>
10 #include <stdint.h>
11 #include <stdlib.h>
12 #include <pthread.h>
13 #include <semaphore.h>
15 #include "config.h"
16 #include "go-assert.h"
17 #include "go-panic.h"
18 #include "go-alloc.h"
19 #include "runtime.h"
20 #include "malloc.h"
22 #ifdef USING_SPLIT_STACK
23 /* FIXME: This is not declared anywhere. */
24 extern void *__splitstack_find (void *, void *, size_t *, void **, void **,
25 void **);
26 #endif
28 /* We stop the threads by sending them the signal GO_SIG_STOP and we
29 start them by sending them the signal GO_SIG_START. */
31 #define GO_SIG_START (SIGRTMIN + 1)
32 #define GO_SIG_STOP (SIGRTMIN + 2)
34 #ifndef SA_RESTART
35 #define SA_RESTART 0
36 #endif
38 /* A doubly linked list of the threads we have started. */
40 struct __go_thread_id
42 /* Links. */
43 struct __go_thread_id *prev;
44 struct __go_thread_id *next;
45 /* True if the thread ID has not yet been filled in. */
46 _Bool tentative;
47 /* Thread ID. */
48 pthread_t id;
49 /* Thread's M structure. */
50 struct M *m;
51 /* If the thread ID has not been filled in, the function we are
52 running. */
53 void (*pfn) (void *);
54 /* If the thread ID has not been filled in, the argument to the
55 function. */
56 void *arg;
59 static struct __go_thread_id *__go_all_thread_ids;
61 /* A lock to control access to ALL_THREAD_IDS. */
63 static pthread_mutex_t __go_thread_ids_lock = PTHREAD_MUTEX_INITIALIZER;
65 /* A semaphore used to wait until all the threads have stopped. */
67 static sem_t __go_thread_ready_sem;
69 /* A signal set used to wait until garbage collection is complete. */
71 static sigset_t __go_thread_wait_sigset;
73 /* Remove the current thread from the list of threads. */
75 static void
76 remove_current_thread (void)
78 struct __go_thread_id *list_entry;
79 MCache *mcache;
80 int i;
82 list_entry = m->list_entry;
83 mcache = m->mcache;
85 i = pthread_mutex_lock (&__go_thread_ids_lock);
86 __go_assert (i == 0);
88 if (list_entry->prev != NULL)
89 list_entry->prev->next = list_entry->next;
90 else
91 __go_all_thread_ids = list_entry->next;
92 if (list_entry->next != NULL)
93 list_entry->next->prev = list_entry->prev;
95 /* This will look runtime_mheap as needed. */
96 runtime_MCache_ReleaseAll (mcache);
98 /* This should never deadlock--there shouldn't be any code that
99 holds the runtime_mheap lock when locking __go_thread_ids_lock.
100 We don't want to do this after releasing __go_thread_ids_lock
101 because it will mean that the garbage collector might run, and
102 the garbage collector does not try to lock runtime_mheap in all
103 cases since it knows it is running single-threaded. */
104 runtime_lock (&runtime_mheap);
105 mstats.heap_alloc += mcache->local_alloc;
106 mstats.heap_objects += mcache->local_objects;
107 __builtin_memset (mcache, 0, sizeof (struct MCache));
108 runtime_FixAlloc_Free (&runtime_mheap.cachealloc, mcache);
109 runtime_unlock (&runtime_mheap);
111 /* As soon as we release this look, a GC could run. Since this
112 thread is no longer on the list, the GC will not find our M
113 structure, so it could get freed at any time. That means that
114 any code from here to thread exit must not assume that m is
115 valid. */
116 m = NULL;
118 i = pthread_mutex_unlock (&__go_thread_ids_lock);
119 __go_assert (i == 0);
121 free (list_entry);
124 /* Start the thread. */
126 static void *
127 start_go_thread (void *thread_arg)
129 struct M *newm = (struct M *) thread_arg;
130 void (*pfn) (void *);
131 void *arg;
132 struct __go_thread_id *list_entry;
133 int i;
135 #ifdef __rtems__
136 __wrap_rtems_task_variable_add ((void **) &m);
137 __wrap_rtems_task_variable_add ((void **) &__go_panic_defer);
138 #endif
140 m = newm;
142 list_entry = newm->list_entry;
144 pfn = list_entry->pfn;
145 arg = list_entry->arg;
147 #ifndef USING_SPLIT_STACK
148 /* If we don't support split stack, record the current stack as the
149 top of the stack. There shouldn't be anything relevant to the
150 garbage collector above this point. */
151 m->gc_sp = (void *) &arg;
152 #endif
154 /* Finish up the entry on the thread list. */
156 i = pthread_mutex_lock (&__go_thread_ids_lock);
157 __go_assert (i == 0);
159 list_entry->id = pthread_self ();
160 list_entry->pfn = NULL;
161 list_entry->arg = NULL;
162 list_entry->tentative = 0;
164 i = pthread_mutex_unlock (&__go_thread_ids_lock);
165 __go_assert (i == 0);
167 (*pfn) (arg);
169 remove_current_thread ();
171 return NULL;
174 /* The runtime.Goexit function. */
176 void Goexit (void) asm ("libgo_runtime.runtime.Goexit");
178 void
179 Goexit (void)
181 remove_current_thread ();
182 pthread_exit (NULL);
183 abort ();
186 /* Implement the go statement. */
188 void
189 __go_go (void (*pfn) (void*), void *arg)
191 int i;
192 pthread_attr_t attr;
193 struct M *newm;
194 struct __go_thread_id *list_entry;
195 pthread_t tid;
197 i = pthread_attr_init (&attr);
198 __go_assert (i == 0);
199 i = pthread_attr_setdetachstate (&attr, PTHREAD_CREATE_DETACHED);
200 __go_assert (i == 0);
202 #ifdef LINKER_SUPPORTS_SPLIT_STACK
203 /* The linker knows how to handle calls between code which uses
204 -fsplit-stack and code which does not. That means that we can
205 run with a smaller stack and rely on the -fsplit-stack support to
206 save us. The GNU/Linux glibc library won't let us have a very
207 small stack, but we make it as small as we can. */
208 #ifndef PTHREAD_STACK_MIN
209 #define PTHREAD_STACK_MIN 8192
210 #endif
211 i = pthread_attr_setstacksize (&attr, PTHREAD_STACK_MIN);
212 __go_assert (i == 0);
213 #endif
215 newm = __go_alloc (sizeof (M));
217 list_entry = malloc (sizeof (struct __go_thread_id));
218 list_entry->prev = NULL;
219 list_entry->next = NULL;
220 list_entry->tentative = 1;
221 list_entry->m = newm;
222 list_entry->pfn = pfn;
223 list_entry->arg = arg;
225 newm->list_entry = list_entry;
227 newm->mcache = runtime_allocmcache ();
229 /* Add the thread to the list of all threads, marked as tentative
230 since it is not yet ready to go. */
231 i = pthread_mutex_lock (&__go_thread_ids_lock);
232 __go_assert (i == 0);
234 if (__go_all_thread_ids != NULL)
235 __go_all_thread_ids->prev = list_entry;
236 list_entry->next = __go_all_thread_ids;
237 __go_all_thread_ids = list_entry;
239 i = pthread_mutex_unlock (&__go_thread_ids_lock);
240 __go_assert (i == 0);
242 /* Start the thread. */
243 i = pthread_create (&tid, &attr, start_go_thread, newm);
244 __go_assert (i == 0);
246 i = pthread_attr_destroy (&attr);
247 __go_assert (i == 0);
250 /* This is the signal handler for GO_SIG_START. The garbage collector
251 will send this signal to a thread when it wants the thread to
252 start. We don't have to actually do anything here, but we need a
253 signal handler since ignoring the signal will mean that the
254 sigsuspend will never see it. */
256 static void
257 gc_start_handler (int sig __attribute__ ((unused)))
261 /* Tell the garbage collector that we are ready, and wait for the
262 garbage collector to tell us that it is done. This may be called
263 by a signal handler, so it is restricted to using functions which
264 are async cancel safe. */
266 static void
267 stop_for_gc (void)
269 int i;
271 /* Tell the garbage collector about our stack. */
272 #ifdef USING_SPLIT_STACK
273 m->gc_sp = __splitstack_find (NULL, NULL, &m->gc_len,
274 &m->gc_next_segment, &m->gc_next_sp,
275 &m->gc_initial_sp);
276 #else
278 uintptr_t top = (uintptr_t) m->gc_sp;
279 uintptr_t bottom = (uintptr_t) &top;
280 if (top < bottom)
282 m->gc_next_sp = m->gc_sp;
283 m->gc_len = bottom - top;
285 else
287 m->gc_next_sp = (void *) bottom;
288 m->gc_len = top - bottom;
291 #endif
293 /* FIXME: Perhaps we should just move __go_panic_defer into M. */
294 m->gc_panic_defer = __go_panic_defer;
296 /* Tell the garbage collector that we are ready by posting to the
297 semaphore. */
298 i = sem_post (&__go_thread_ready_sem);
299 __go_assert (i == 0);
301 /* Wait for the garbage collector to tell us to continue. */
302 sigsuspend (&__go_thread_wait_sigset);
305 /* This is the signal handler for GO_SIG_STOP. The garbage collector
306 will send this signal to a thread when it wants the thread to
307 stop. */
309 static void
310 gc_stop_handler (int sig __attribute__ ((unused)))
312 struct M *pm = m;
314 if (__sync_bool_compare_and_swap (&pm->holds_finlock, 1, 1))
316 /* We can't interrupt the thread while it holds the finalizer
317 lock. Otherwise we can get into a deadlock when mark calls
318 runtime_walkfintab. */
319 __sync_bool_compare_and_swap (&pm->gcing_for_finlock, 0, 1);
320 return;
323 if (__sync_bool_compare_and_swap (&pm->mallocing, 1, 1))
325 /* m->mallocing was already non-zero. We can't interrupt the
326 thread while it is running an malloc. Instead, tell it to
327 call back to us when done. */
328 __sync_bool_compare_and_swap (&pm->gcing, 0, 1);
329 return;
332 if (__sync_bool_compare_and_swap (&pm->nomemprof, 1, 1))
334 /* Similarly, we can't interrupt the thread while it is building
335 profiling information. Otherwise we can get into a deadlock
336 when sweepspan calls MProf_Free. */
337 __sync_bool_compare_and_swap (&pm->gcing_for_prof, 0, 1);
338 return;
341 stop_for_gc ();
344 /* This is called by malloc when it gets a signal during the malloc
345 call itself. */
348 __go_run_goroutine_gc (int r)
350 /* Force callee-saved registers to be saved on the stack. This is
351 not needed if we are invoked from the signal handler, but it is
352 needed if we are called directly, since otherwise we might miss
353 something that a function somewhere up the call stack is holding
354 in a register. */
355 __builtin_unwind_init ();
357 stop_for_gc ();
359 /* This avoids tail recursion, to make sure that the saved registers
360 are on the stack. */
361 return r;
364 /* Stop all the other threads for garbage collection. */
366 void
367 runtime_stoptheworld (void)
369 int i;
370 pthread_t me;
371 int c;
372 struct __go_thread_id *p;
374 i = pthread_mutex_lock (&__go_thread_ids_lock);
375 __go_assert (i == 0);
377 me = pthread_self ();
378 c = 0;
379 p = __go_all_thread_ids;
380 while (p != NULL)
382 if (p->tentative || pthread_equal (me, p->id))
383 p = p->next;
384 else
386 i = pthread_kill (p->id, GO_SIG_STOP);
387 if (i == 0)
389 ++c;
390 p = p->next;
392 else if (i == ESRCH)
394 struct __go_thread_id *next;
396 /* This thread died somehow. Remove it from the
397 list. */
398 next = p->next;
399 if (p->prev != NULL)
400 p->prev->next = next;
401 else
402 __go_all_thread_ids = next;
403 if (next != NULL)
404 next->prev = p->prev;
405 free (p);
406 p = next;
408 else
409 abort ();
413 /* Wait for each thread to receive the signal and post to the
414 semaphore. If a thread receives the signal but contrives to die
415 before it posts to the semaphore, then we will hang forever
416 here. */
418 while (c > 0)
420 i = sem_wait (&__go_thread_ready_sem);
421 if (i < 0 && errno == EINTR)
422 continue;
423 __go_assert (i == 0);
424 --c;
427 /* The gc_panic_defer field should now be set for all M's except the
428 one in this thread. Set this one now. */
429 m->gc_panic_defer = __go_panic_defer;
431 /* Leave with __go_thread_ids_lock held. */
434 /* Scan all the stacks for garbage collection. This should be called
435 with __go_thread_ids_lock held. */
437 void
438 __go_scanstacks (void (*scan) (byte *, int64))
440 pthread_t me;
441 struct __go_thread_id *p;
443 /* Make sure all the registers for this thread are on the stack. */
444 __builtin_unwind_init ();
446 me = pthread_self ();
447 for (p = __go_all_thread_ids; p != NULL; p = p->next)
449 if (p->tentative)
451 /* The goroutine function and argument can be allocated on
452 the heap, so we have to scan them for a thread that has
453 not yet started. */
454 scan ((void *) &p->pfn, sizeof (void *));
455 scan ((void *) &p->arg, sizeof (void *));
456 scan ((void *) &p->m, sizeof (void *));
457 continue;
460 #ifdef USING_SPLIT_STACK
462 void *sp;
463 size_t len;
464 void *next_segment;
465 void *next_sp;
466 void *initial_sp;
468 if (pthread_equal (me, p->id))
470 next_segment = NULL;
471 next_sp = NULL;
472 initial_sp = NULL;
473 sp = __splitstack_find (NULL, NULL, &len, &next_segment,
474 &next_sp, &initial_sp);
476 else
478 sp = p->m->gc_sp;
479 len = p->m->gc_len;
480 next_segment = p->m->gc_next_segment;
481 next_sp = p->m->gc_next_sp;
482 initial_sp = p->m->gc_initial_sp;
485 while (sp != NULL)
487 scan (sp, len);
488 sp = __splitstack_find (next_segment, next_sp, &len,
489 &next_segment, &next_sp, &initial_sp);
492 #else /* !defined(USING_SPLIT_STACK) */
494 if (pthread_equal (me, p->id))
496 uintptr_t top = (uintptr_t) m->gc_sp;
497 uintptr_t bottom = (uintptr_t) &top;
498 if (top < bottom)
499 scan (m->gc_sp, bottom - top);
500 else
501 scan ((void *) bottom, top - bottom);
503 else
505 scan (p->m->gc_next_sp, p->m->gc_len);
508 #endif /* !defined(USING_SPLIT_STACK) */
510 /* Also scan the M structure while we're at it. */
512 scan ((void *) &p->m, sizeof (void *));
516 /* Release all the memory caches. This is called with
517 __go_thread_ids_lock held. */
519 void
520 __go_stealcache (void)
522 struct __go_thread_id *p;
524 for (p = __go_all_thread_ids; p != NULL; p = p->next)
525 runtime_MCache_ReleaseAll (p->m->mcache);
528 /* Gather memory cache statistics. This is called with
529 __go_thread_ids_lock held. */
531 void
532 __go_cachestats (void)
534 struct __go_thread_id *p;
536 for (p = __go_all_thread_ids; p != NULL; p = p->next)
538 MCache *c;
540 c = p->m->mcache;
541 mstats.heap_alloc += c->local_alloc;
542 c->local_alloc = 0;
543 mstats.heap_objects += c->local_objects;
544 c->local_objects = 0;
548 /* Start the other threads after garbage collection. */
550 void
551 runtime_starttheworld (void)
553 int i;
554 pthread_t me;
555 struct __go_thread_id *p;
557 /* Here __go_thread_ids_lock should be held. */
559 me = pthread_self ();
560 p = __go_all_thread_ids;
561 while (p != NULL)
563 if (p->tentative || pthread_equal (me, p->id))
564 p = p->next;
565 else
567 i = pthread_kill (p->id, GO_SIG_START);
568 if (i == 0)
569 p = p->next;
570 else
571 abort ();
575 i = pthread_mutex_unlock (&__go_thread_ids_lock);
576 __go_assert (i == 0);
579 /* Initialize the interaction between goroutines and the garbage
580 collector. */
582 void
583 __go_gc_goroutine_init (void *sp __attribute__ ((unused)))
585 struct __go_thread_id *list_entry;
586 int i;
587 sigset_t sset;
588 struct sigaction act;
590 /* Add the initial thread to the list of all threads. */
592 list_entry = malloc (sizeof (struct __go_thread_id));
593 list_entry->prev = NULL;
594 list_entry->next = NULL;
595 list_entry->tentative = 0;
596 list_entry->id = pthread_self ();
597 list_entry->m = m;
598 list_entry->pfn = NULL;
599 list_entry->arg = NULL;
600 __go_all_thread_ids = list_entry;
602 /* Initialize the semaphore which signals when threads are ready for
603 GC. */
605 i = sem_init (&__go_thread_ready_sem, 0, 0);
606 __go_assert (i == 0);
608 /* Fetch the current signal mask. */
610 i = sigemptyset (&sset);
611 __go_assert (i == 0);
612 i = sigprocmask (SIG_BLOCK, NULL, &sset);
613 __go_assert (i == 0);
615 /* Make sure that GO_SIG_START is not blocked and GO_SIG_STOP is
616 blocked, and save that set for use with later calls to sigsuspend
617 while waiting for GC to complete. */
619 i = sigdelset (&sset, GO_SIG_START);
620 __go_assert (i == 0);
621 i = sigaddset (&sset, GO_SIG_STOP);
622 __go_assert (i == 0);
623 __go_thread_wait_sigset = sset;
625 /* Block SIG_SET_START and unblock SIG_SET_STOP, and use that for
626 the process signal mask. */
628 i = sigaddset (&sset, GO_SIG_START);
629 __go_assert (i == 0);
630 i = sigdelset (&sset, GO_SIG_STOP);
631 __go_assert (i == 0);
632 i = sigprocmask (SIG_SETMASK, &sset, NULL);
633 __go_assert (i == 0);
635 /* Install the signal handlers. */
636 memset (&act, 0, sizeof act);
637 i = sigemptyset (&act.sa_mask);
638 __go_assert (i == 0);
640 act.sa_handler = gc_start_handler;
641 act.sa_flags = SA_RESTART;
642 i = sigaction (GO_SIG_START, &act, NULL);
643 __go_assert (i == 0);
645 /* We could consider using an alternate signal stack for this. The
646 function does not use much stack space, so it may be OK. */
647 act.sa_handler = gc_stop_handler;
648 i = sigaction (GO_SIG_STOP, &act, NULL);
649 __go_assert (i == 0);
651 #ifndef USING_SPLIT_STACK
652 /* If we don't support split stack, record the current stack as the
653 top of the stack. */
654 m->gc_sp = sp;
655 #endif