Fix PR47707
[official-gcc.git] / libgo / runtime / go-go.c
blob8c2de2877cf0936862dab99b33e1895112d0b094
1 /* go-go.c -- the go function.
3 Copyright 2009 The Go Authors. All rights reserved.
4 Use of this source code is governed by a BSD-style
5 license that can be found in the LICENSE file. */
7 #include <errno.h>
8 #include <limits.h>
9 #include <signal.h>
10 #include <stdint.h>
11 #include <stdlib.h>
12 #include <pthread.h>
13 #include <semaphore.h>
15 #include "config.h"
16 #include "go-assert.h"
17 #include "go-panic.h"
18 #include "go-alloc.h"
19 #include "runtime.h"
20 #include "malloc.h"
22 #ifdef USING_SPLIT_STACK
23 /* FIXME: This is not declared anywhere. */
24 extern void *__splitstack_find (void *, void *, size_t *, void **, void **,
25 void **);
26 #endif
28 /* We stop the threads by sending them the signal GO_SIG_STOP and we
29 start them by sending them the signal GO_SIG_START. */
31 #define GO_SIG_START (SIGRTMIN + 1)
32 #define GO_SIG_STOP (SIGRTMIN + 2)
34 #ifndef SA_RESTART
35 #define SA_RESTART 0
36 #endif
38 /* A doubly linked list of the threads we have started. */
40 struct __go_thread_id
42 /* Links. */
43 struct __go_thread_id *prev;
44 struct __go_thread_id *next;
45 /* True if the thread ID has not yet been filled in. */
46 _Bool tentative;
47 /* Thread ID. */
48 pthread_t id;
49 /* Thread's M structure. */
50 struct M *m;
51 /* If the thread ID has not been filled in, the function we are
52 running. */
53 void (*pfn) (void *);
54 /* If the thread ID has not been filled in, the argument to the
55 function. */
56 void *arg;
59 static struct __go_thread_id *__go_all_thread_ids;
61 /* A lock to control access to ALL_THREAD_IDS. */
63 static pthread_mutex_t __go_thread_ids_lock = PTHREAD_MUTEX_INITIALIZER;
65 /* A semaphore used to wait until all the threads have stopped. */
67 static sem_t __go_thread_ready_sem;
69 /* A signal set used to wait until garbage collection is complete. */
71 static sigset_t __go_thread_wait_sigset;
73 /* Remove the current thread from the list of threads. */
75 static void
76 remove_current_thread (void)
78 struct __go_thread_id *list_entry;
79 MCache *mcache;
80 int i;
82 list_entry = m->list_entry;
83 mcache = m->mcache;
85 i = pthread_mutex_lock (&__go_thread_ids_lock);
86 __go_assert (i == 0);
88 if (list_entry->prev != NULL)
89 list_entry->prev->next = list_entry->next;
90 else
91 __go_all_thread_ids = list_entry->next;
92 if (list_entry->next != NULL)
93 list_entry->next->prev = list_entry->prev;
95 runtime_MCache_ReleaseAll (mcache);
97 /* As soon as we release this look, a GC could run. Since this
98 thread is no longer on the list, the GC will not find our M
99 structure, so it could get freed at any time. That means that
100 any code from here to thread exit must not assume that the m is
101 valid. */
102 m = NULL;
104 i = pthread_mutex_unlock (&__go_thread_ids_lock);
105 __go_assert (i == 0);
107 runtime_lock (&runtime_mheap);
108 mstats.heap_alloc += mcache->local_alloc;
109 mstats.heap_objects += mcache->local_objects;
110 __builtin_memset (mcache, 0, sizeof (struct MCache));
111 runtime_FixAlloc_Free (&runtime_mheap.cachealloc, mcache);
112 runtime_unlock (&runtime_mheap);
114 free (list_entry);
117 /* Start the thread. */
119 static void *
120 start_go_thread (void *thread_arg)
122 struct M *newm = (struct M *) thread_arg;
123 void (*pfn) (void *);
124 void *arg;
125 struct __go_thread_id *list_entry;
126 int i;
128 #ifdef __rtems__
129 __wrap_rtems_task_variable_add ((void **) &m);
130 __wrap_rtems_task_variable_add ((void **) &__go_panic_defer);
131 #endif
133 m = newm;
135 list_entry = newm->list_entry;
137 pfn = list_entry->pfn;
138 arg = list_entry->arg;
140 #ifndef USING_SPLIT_STACK
141 /* If we don't support split stack, record the current stack as the
142 top of the stack. There shouldn't be anything relevant to the
143 garbage collector above this point. */
144 m->gc_sp = (void *) &arg;
145 #endif
147 /* Finish up the entry on the thread list. */
149 i = pthread_mutex_lock (&__go_thread_ids_lock);
150 __go_assert (i == 0);
152 list_entry->id = pthread_self ();
153 list_entry->pfn = NULL;
154 list_entry->arg = NULL;
155 list_entry->tentative = 0;
157 i = pthread_mutex_unlock (&__go_thread_ids_lock);
158 __go_assert (i == 0);
160 (*pfn) (arg);
162 remove_current_thread ();
164 return NULL;
167 /* The runtime.Goexit function. */
169 void Goexit (void) asm ("libgo_runtime.runtime.Goexit");
171 void
172 Goexit (void)
174 remove_current_thread ();
175 pthread_exit (NULL);
176 abort ();
179 /* Implement the go statement. */
181 void
182 __go_go (void (*pfn) (void*), void *arg)
184 int i;
185 pthread_attr_t attr;
186 struct M *newm;
187 struct __go_thread_id *list_entry;
188 pthread_t tid;
190 i = pthread_attr_init (&attr);
191 __go_assert (i == 0);
192 i = pthread_attr_setdetachstate (&attr, PTHREAD_CREATE_DETACHED);
193 __go_assert (i == 0);
195 #ifdef LINKER_SUPPORTS_SPLIT_STACK
196 /* The linker knows how to handle calls between code which uses
197 -fsplit-stack and code which does not. That means that we can
198 run with a smaller stack and rely on the -fsplit-stack support to
199 save us. The GNU/Linux glibc library won't let us have a very
200 small stack, but we make it as small as we can. */
201 #ifndef PTHREAD_STACK_MIN
202 #define PTHREAD_STACK_MIN 8192
203 #endif
204 i = pthread_attr_setstacksize (&attr, PTHREAD_STACK_MIN);
205 __go_assert (i == 0);
206 #endif
208 newm = __go_alloc (sizeof (M));
210 list_entry = malloc (sizeof (struct __go_thread_id));
211 list_entry->prev = NULL;
212 list_entry->next = NULL;
213 list_entry->tentative = 1;
214 list_entry->m = newm;
215 list_entry->pfn = pfn;
216 list_entry->arg = arg;
218 newm->list_entry = list_entry;
220 newm->mcache = runtime_allocmcache ();
222 /* Add the thread to the list of all threads, marked as tentative
223 since it is not yet ready to go. */
224 i = pthread_mutex_lock (&__go_thread_ids_lock);
225 __go_assert (i == 0);
227 if (__go_all_thread_ids != NULL)
228 __go_all_thread_ids->prev = list_entry;
229 list_entry->next = __go_all_thread_ids;
230 __go_all_thread_ids = list_entry;
232 i = pthread_mutex_unlock (&__go_thread_ids_lock);
233 __go_assert (i == 0);
235 /* Start the thread. */
236 i = pthread_create (&tid, &attr, start_go_thread, newm);
237 __go_assert (i == 0);
239 i = pthread_attr_destroy (&attr);
240 __go_assert (i == 0);
243 /* This is the signal handler for GO_SIG_START. The garbage collector
244 will send this signal to a thread when it wants the thread to
245 start. We don't have to actually do anything here, but we need a
246 signal handler since ignoring the signal will mean that the
247 sigsuspend will never see it. */
249 static void
250 gc_start_handler (int sig __attribute__ ((unused)))
254 /* Tell the garbage collector that we are ready, and wait for the
255 garbage collector to tell us that it is done. This may be called
256 by a signal handler, so it is restricted to using functions which
257 are async cancel safe. */
259 static void
260 stop_for_gc (void)
262 int i;
264 /* Tell the garbage collector about our stack. */
265 #ifdef USING_SPLIT_STACK
266 m->gc_sp = __splitstack_find (NULL, NULL, &m->gc_len,
267 &m->gc_next_segment, &m->gc_next_sp,
268 &m->gc_initial_sp);
269 #else
271 uintptr_t top = (uintptr_t) m->gc_sp;
272 uintptr_t bottom = (uintptr_t) &top;
273 if (top < bottom)
275 m->gc_next_sp = m->gc_sp;
276 m->gc_len = bottom - top;
278 else
280 m->gc_next_sp = (void *) bottom;
281 m->gc_len = top - bottom;
284 #endif
286 /* FIXME: Perhaps we should just move __go_panic_defer into M. */
287 m->gc_panic_defer = __go_panic_defer;
289 /* Tell the garbage collector that we are ready by posting to the
290 semaphore. */
291 i = sem_post (&__go_thread_ready_sem);
292 __go_assert (i == 0);
294 /* Wait for the garbage collector to tell us to continue. */
295 sigsuspend (&__go_thread_wait_sigset);
298 /* This is the signal handler for GO_SIG_STOP. The garbage collector
299 will send this signal to a thread when it wants the thread to
300 stop. */
302 static void
303 gc_stop_handler (int sig __attribute__ ((unused)))
305 struct M *pm = m;
307 if (__sync_bool_compare_and_swap (&pm->holds_finlock, 1, 1))
309 /* We can't interrupt the thread while it holds the finalizer
310 lock. Otherwise we can get into a deadlock when mark calls
311 runtime_walkfintab. */
312 __sync_bool_compare_and_swap (&pm->gcing_for_finlock, 0, 1);
313 return;
316 if (__sync_bool_compare_and_swap (&pm->mallocing, 1, 1))
318 /* m->mallocing was already non-zero. We can't interrupt the
319 thread while it is running an malloc. Instead, tell it to
320 call back to us when done. */
321 __sync_bool_compare_and_swap (&pm->gcing, 0, 1);
322 return;
325 if (__sync_bool_compare_and_swap (&pm->nomemprof, 1, 1))
327 /* Similarly, we can't interrupt the thread while it is building
328 profiling information. Otherwise we can get into a deadlock
329 when sweepspan calls MProf_Free. */
330 __sync_bool_compare_and_swap (&pm->gcing_for_prof, 0, 1);
331 return;
334 stop_for_gc ();
337 /* This is called by malloc when it gets a signal during the malloc
338 call itself. */
341 __go_run_goroutine_gc (int r)
343 /* Force callee-saved registers to be saved on the stack. This is
344 not needed if we are invoked from the signal handler, but it is
345 needed if we are called directly, since otherwise we might miss
346 something that a function somewhere up the call stack is holding
347 in a register. */
348 __builtin_unwind_init ();
350 stop_for_gc ();
352 /* This avoids tail recursion, to make sure that the saved registers
353 are on the stack. */
354 return r;
357 /* Stop all the other threads for garbage collection. */
359 void
360 runtime_stoptheworld (void)
362 int i;
363 pthread_t me;
364 int c;
365 struct __go_thread_id *p;
367 i = pthread_mutex_lock (&__go_thread_ids_lock);
368 __go_assert (i == 0);
370 me = pthread_self ();
371 c = 0;
372 p = __go_all_thread_ids;
373 while (p != NULL)
375 if (p->tentative || pthread_equal (me, p->id))
376 p = p->next;
377 else
379 i = pthread_kill (p->id, GO_SIG_STOP);
380 if (i == 0)
382 ++c;
383 p = p->next;
385 else if (i == ESRCH)
387 struct __go_thread_id *next;
389 /* This thread died somehow. Remove it from the
390 list. */
391 next = p->next;
392 if (p->prev != NULL)
393 p->prev->next = next;
394 else
395 __go_all_thread_ids = next;
396 if (next != NULL)
397 next->prev = p->prev;
398 free (p);
399 p = next;
401 else
402 abort ();
406 /* Wait for each thread to receive the signal and post to the
407 semaphore. If a thread receives the signal but contrives to die
408 before it posts to the semaphore, then we will hang forever
409 here. */
411 while (c > 0)
413 i = sem_wait (&__go_thread_ready_sem);
414 if (i < 0 && errno == EINTR)
415 continue;
416 __go_assert (i == 0);
417 --c;
420 /* The gc_panic_defer field should now be set for all M's except the
421 one in this thread. Set this one now. */
422 m->gc_panic_defer = __go_panic_defer;
424 /* Leave with __go_thread_ids_lock held. */
427 /* Scan all the stacks for garbage collection. This should be called
428 with __go_thread_ids_lock held. */
430 void
431 __go_scanstacks (void (*scan) (byte *, int64))
433 pthread_t me;
434 struct __go_thread_id *p;
436 /* Make sure all the registers for this thread are on the stack. */
437 __builtin_unwind_init ();
439 me = pthread_self ();
440 for (p = __go_all_thread_ids; p != NULL; p = p->next)
442 if (p->tentative)
444 /* The goroutine function and argument can be allocated on
445 the heap, so we have to scan them for a thread that has
446 not yet started. */
447 scan ((void *) &p->pfn, sizeof (void *));
448 scan ((void *) &p->arg, sizeof (void *));
449 scan ((void *) &p->m, sizeof (void *));
450 continue;
453 #ifdef USING_SPLIT_STACK
455 void *sp;
456 size_t len;
457 void *next_segment;
458 void *next_sp;
459 void *initial_sp;
461 if (pthread_equal (me, p->id))
463 next_segment = NULL;
464 next_sp = NULL;
465 initial_sp = NULL;
466 sp = __splitstack_find (NULL, NULL, &len, &next_segment,
467 &next_sp, &initial_sp);
469 else
471 sp = p->m->gc_sp;
472 len = p->m->gc_len;
473 next_segment = p->m->gc_next_segment;
474 next_sp = p->m->gc_next_sp;
475 initial_sp = p->m->gc_initial_sp;
478 while (sp != NULL)
480 scan (sp, len);
481 sp = __splitstack_find (next_segment, next_sp, &len,
482 &next_segment, &next_sp, &initial_sp);
485 #else /* !defined(USING_SPLIT_STACK) */
487 if (pthread_equal (me, p->id))
489 uintptr_t top = (uintptr_t) m->gc_sp;
490 uintptr_t bottom = (uintptr_t) &top;
491 if (top < bottom)
492 scan (m->gc_sp, bottom - top);
493 else
494 scan ((void *) bottom, top - bottom);
496 else
498 scan (p->m->gc_next_sp, p->m->gc_len);
501 #endif /* !defined(USING_SPLIT_STACK) */
503 /* Also scan the M structure while we're at it. */
505 scan ((void *) &p->m, sizeof (void *));
509 /* Release all the memory caches. This is called with
510 __go_thread_ids_lock held. */
512 void
513 __go_stealcache (void)
515 struct __go_thread_id *p;
517 for (p = __go_all_thread_ids; p != NULL; p = p->next)
518 runtime_MCache_ReleaseAll (p->m->mcache);
521 /* Gather memory cache statistics. This is called with
522 __go_thread_ids_lock held. */
524 void
525 __go_cachestats (void)
527 struct __go_thread_id *p;
529 for (p = __go_all_thread_ids; p != NULL; p = p->next)
531 MCache *c;
533 c = p->m->mcache;
534 mstats.heap_alloc += c->local_alloc;
535 c->local_alloc = 0;
536 mstats.heap_objects += c->local_objects;
537 c->local_objects = 0;
541 /* Start the other threads after garbage collection. */
543 void
544 runtime_starttheworld (void)
546 int i;
547 pthread_t me;
548 struct __go_thread_id *p;
550 /* Here __go_thread_ids_lock should be held. */
552 me = pthread_self ();
553 p = __go_all_thread_ids;
554 while (p != NULL)
556 if (p->tentative || pthread_equal (me, p->id))
557 p = p->next;
558 else
560 i = pthread_kill (p->id, GO_SIG_START);
561 if (i == 0)
562 p = p->next;
563 else
564 abort ();
568 i = pthread_mutex_unlock (&__go_thread_ids_lock);
569 __go_assert (i == 0);
572 /* Initialize the interaction between goroutines and the garbage
573 collector. */
575 void
576 __go_gc_goroutine_init (void *sp __attribute__ ((unused)))
578 struct __go_thread_id *list_entry;
579 int i;
580 sigset_t sset;
581 struct sigaction act;
583 /* Add the initial thread to the list of all threads. */
585 list_entry = malloc (sizeof (struct __go_thread_id));
586 list_entry->prev = NULL;
587 list_entry->next = NULL;
588 list_entry->tentative = 0;
589 list_entry->id = pthread_self ();
590 list_entry->m = m;
591 list_entry->pfn = NULL;
592 list_entry->arg = NULL;
593 __go_all_thread_ids = list_entry;
595 /* Initialize the semaphore which signals when threads are ready for
596 GC. */
598 i = sem_init (&__go_thread_ready_sem, 0, 0);
599 __go_assert (i == 0);
601 /* Fetch the current signal mask. */
603 i = sigemptyset (&sset);
604 __go_assert (i == 0);
605 i = sigprocmask (SIG_BLOCK, NULL, &sset);
606 __go_assert (i == 0);
608 /* Make sure that GO_SIG_START is not blocked and GO_SIG_STOP is
609 blocked, and save that set for use with later calls to sigsuspend
610 while waiting for GC to complete. */
612 i = sigdelset (&sset, GO_SIG_START);
613 __go_assert (i == 0);
614 i = sigaddset (&sset, GO_SIG_STOP);
615 __go_assert (i == 0);
616 __go_thread_wait_sigset = sset;
618 /* Block SIG_SET_START and unblock SIG_SET_STOP, and use that for
619 the process signal mask. */
621 i = sigaddset (&sset, GO_SIG_START);
622 __go_assert (i == 0);
623 i = sigdelset (&sset, GO_SIG_STOP);
624 __go_assert (i == 0);
625 i = sigprocmask (SIG_SETMASK, &sset, NULL);
626 __go_assert (i == 0);
628 /* Install the signal handlers. */
629 memset (&act, 0, sizeof act);
630 i = sigemptyset (&act.sa_mask);
631 __go_assert (i == 0);
633 act.sa_handler = gc_start_handler;
634 act.sa_flags = SA_RESTART;
635 i = sigaction (GO_SIG_START, &act, NULL);
636 __go_assert (i == 0);
638 /* We could consider using an alternate signal stack for this. The
639 function does not use much stack space, so it may be OK. */
640 act.sa_handler = gc_stop_handler;
641 i = sigaction (GO_SIG_STOP, &act, NULL);
642 __go_assert (i == 0);
644 #ifndef USING_SPLIT_STACK
645 /* If we don't support split stack, record the current stack as the
646 top of the stack. */
647 m->gc_sp = sp;
648 #endif