1 /* go-go.c -- the go function.
3 Copyright 2009 The Go Authors. All rights reserved.
4 Use of this source code is governed by a BSD-style
5 license that can be found in the LICENSE file. */
13 #include <semaphore.h>
16 #include "go-assert.h"
22 #ifdef USING_SPLIT_STACK
23 /* FIXME: This is not declared anywhere. */
24 extern void *__splitstack_find (void *, void *, size_t *, void **, void **,
28 /* We stop the threads by sending them the signal GO_SIG_STOP and we
29 start them by sending them the signal GO_SIG_START. */
31 #define GO_SIG_START (SIGRTMIN + 1)
32 #define GO_SIG_STOP (SIGRTMIN + 2)
38 /* A doubly linked list of the threads we have started. */
43 struct __go_thread_id
*prev
;
44 struct __go_thread_id
*next
;
45 /* True if the thread ID has not yet been filled in. */
49 /* Thread's M structure. */
51 /* If the thread ID has not been filled in, the function we are
54 /* If the thread ID has not been filled in, the argument to the
59 static struct __go_thread_id
*__go_all_thread_ids
;
61 /* A lock to control access to ALL_THREAD_IDS. */
63 static pthread_mutex_t __go_thread_ids_lock
= PTHREAD_MUTEX_INITIALIZER
;
65 /* A semaphore used to wait until all the threads have stopped. */
67 static sem_t __go_thread_ready_sem
;
69 /* A signal set used to wait until garbage collection is complete. */
71 static sigset_t __go_thread_wait_sigset
;
73 /* Remove the current thread from the list of threads. */
76 remove_current_thread (void)
78 struct __go_thread_id
*list_entry
;
82 list_entry
= m
->list_entry
;
85 i
= pthread_mutex_lock (&__go_thread_ids_lock
);
88 if (list_entry
->prev
!= NULL
)
89 list_entry
->prev
->next
= list_entry
->next
;
91 __go_all_thread_ids
= list_entry
->next
;
92 if (list_entry
->next
!= NULL
)
93 list_entry
->next
->prev
= list_entry
->prev
;
95 /* This will look runtime_mheap as needed. */
96 runtime_MCache_ReleaseAll (mcache
);
98 /* This should never deadlock--there shouldn't be any code that
99 holds the runtime_mheap lock when locking __go_thread_ids_lock.
100 We don't want to do this after releasing __go_thread_ids_lock
101 because it will mean that the garbage collector might run, and
102 the garbage collector does not try to lock runtime_mheap in all
103 cases since it knows it is running single-threaded. */
104 runtime_lock (&runtime_mheap
);
105 mstats
.heap_alloc
+= mcache
->local_alloc
;
106 mstats
.heap_objects
+= mcache
->local_objects
;
107 __builtin_memset (mcache
, 0, sizeof (struct MCache
));
108 runtime_FixAlloc_Free (&runtime_mheap
.cachealloc
, mcache
);
109 runtime_unlock (&runtime_mheap
);
111 /* As soon as we release this look, a GC could run. Since this
112 thread is no longer on the list, the GC will not find our M
113 structure, so it could get freed at any time. That means that
114 any code from here to thread exit must not assume that m is
118 i
= pthread_mutex_unlock (&__go_thread_ids_lock
);
119 __go_assert (i
== 0);
124 /* Start the thread. */
127 start_go_thread (void *thread_arg
)
129 struct M
*newm
= (struct M
*) thread_arg
;
130 void (*pfn
) (void *);
132 struct __go_thread_id
*list_entry
;
136 __wrap_rtems_task_variable_add ((void **) &m
);
137 __wrap_rtems_task_variable_add ((void **) &__go_panic_defer
);
142 list_entry
= newm
->list_entry
;
144 pfn
= list_entry
->pfn
;
145 arg
= list_entry
->arg
;
147 #ifndef USING_SPLIT_STACK
148 /* If we don't support split stack, record the current stack as the
149 top of the stack. There shouldn't be anything relevant to the
150 garbage collector above this point. */
151 m
->gc_sp
= (void *) &arg
;
154 /* Finish up the entry on the thread list. */
156 i
= pthread_mutex_lock (&__go_thread_ids_lock
);
157 __go_assert (i
== 0);
159 list_entry
->id
= pthread_self ();
160 list_entry
->pfn
= NULL
;
161 list_entry
->arg
= NULL
;
162 list_entry
->tentative
= 0;
164 i
= pthread_mutex_unlock (&__go_thread_ids_lock
);
165 __go_assert (i
== 0);
169 remove_current_thread ();
174 /* The runtime.Goexit function. */
176 void Goexit (void) asm ("libgo_runtime.runtime.Goexit");
181 remove_current_thread ();
186 /* Implement the go statement. */
189 __go_go (void (*pfn
) (void*), void *arg
)
194 struct __go_thread_id
*list_entry
;
197 i
= pthread_attr_init (&attr
);
198 __go_assert (i
== 0);
199 i
= pthread_attr_setdetachstate (&attr
, PTHREAD_CREATE_DETACHED
);
200 __go_assert (i
== 0);
202 #ifdef LINKER_SUPPORTS_SPLIT_STACK
203 /* The linker knows how to handle calls between code which uses
204 -fsplit-stack and code which does not. That means that we can
205 run with a smaller stack and rely on the -fsplit-stack support to
206 save us. The GNU/Linux glibc library won't let us have a very
207 small stack, but we make it as small as we can. */
208 #ifndef PTHREAD_STACK_MIN
209 #define PTHREAD_STACK_MIN 8192
211 i
= pthread_attr_setstacksize (&attr
, PTHREAD_STACK_MIN
);
212 __go_assert (i
== 0);
215 newm
= __go_alloc (sizeof (M
));
217 list_entry
= malloc (sizeof (struct __go_thread_id
));
218 list_entry
->prev
= NULL
;
219 list_entry
->next
= NULL
;
220 list_entry
->tentative
= 1;
221 list_entry
->m
= newm
;
222 list_entry
->pfn
= pfn
;
223 list_entry
->arg
= arg
;
225 newm
->list_entry
= list_entry
;
227 newm
->mcache
= runtime_allocmcache ();
229 /* Add the thread to the list of all threads, marked as tentative
230 since it is not yet ready to go. */
231 i
= pthread_mutex_lock (&__go_thread_ids_lock
);
232 __go_assert (i
== 0);
234 if (__go_all_thread_ids
!= NULL
)
235 __go_all_thread_ids
->prev
= list_entry
;
236 list_entry
->next
= __go_all_thread_ids
;
237 __go_all_thread_ids
= list_entry
;
239 i
= pthread_mutex_unlock (&__go_thread_ids_lock
);
240 __go_assert (i
== 0);
242 /* Start the thread. */
243 i
= pthread_create (&tid
, &attr
, start_go_thread
, newm
);
244 __go_assert (i
== 0);
246 i
= pthread_attr_destroy (&attr
);
247 __go_assert (i
== 0);
250 /* This is the signal handler for GO_SIG_START. The garbage collector
251 will send this signal to a thread when it wants the thread to
252 start. We don't have to actually do anything here, but we need a
253 signal handler since ignoring the signal will mean that the
254 sigsuspend will never see it. */
257 gc_start_handler (int sig
__attribute__ ((unused
)))
261 /* Tell the garbage collector that we are ready, and wait for the
262 garbage collector to tell us that it is done. This may be called
263 by a signal handler, so it is restricted to using functions which
264 are async cancel safe. */
271 /* Tell the garbage collector about our stack. */
272 #ifdef USING_SPLIT_STACK
273 m
->gc_sp
= __splitstack_find (NULL
, NULL
, &m
->gc_len
,
274 &m
->gc_next_segment
, &m
->gc_next_sp
,
278 uintptr_t top
= (uintptr_t) m
->gc_sp
;
279 uintptr_t bottom
= (uintptr_t) &top
;
282 m
->gc_next_sp
= m
->gc_sp
;
283 m
->gc_len
= bottom
- top
;
287 m
->gc_next_sp
= (void *) bottom
;
288 m
->gc_len
= top
- bottom
;
293 /* FIXME: Perhaps we should just move __go_panic_defer into M. */
294 m
->gc_panic_defer
= __go_panic_defer
;
296 /* Tell the garbage collector that we are ready by posting to the
298 i
= sem_post (&__go_thread_ready_sem
);
299 __go_assert (i
== 0);
301 /* Wait for the garbage collector to tell us to continue. */
302 sigsuspend (&__go_thread_wait_sigset
);
305 /* This is the signal handler for GO_SIG_STOP. The garbage collector
306 will send this signal to a thread when it wants the thread to
310 gc_stop_handler (int sig
__attribute__ ((unused
)))
314 if (__sync_bool_compare_and_swap (&pm
->holds_finlock
, 1, 1))
316 /* We can't interrupt the thread while it holds the finalizer
317 lock. Otherwise we can get into a deadlock when mark calls
318 runtime_walkfintab. */
319 __sync_bool_compare_and_swap (&pm
->gcing_for_finlock
, 0, 1);
323 if (__sync_bool_compare_and_swap (&pm
->mallocing
, 1, 1))
325 /* m->mallocing was already non-zero. We can't interrupt the
326 thread while it is running an malloc. Instead, tell it to
327 call back to us when done. */
328 __sync_bool_compare_and_swap (&pm
->gcing
, 0, 1);
332 if (__sync_bool_compare_and_swap (&pm
->nomemprof
, 1, 1))
334 /* Similarly, we can't interrupt the thread while it is building
335 profiling information. Otherwise we can get into a deadlock
336 when sweepspan calls MProf_Free. */
337 __sync_bool_compare_and_swap (&pm
->gcing_for_prof
, 0, 1);
344 /* This is called by malloc when it gets a signal during the malloc
348 __go_run_goroutine_gc (int r
)
350 /* Force callee-saved registers to be saved on the stack. This is
351 not needed if we are invoked from the signal handler, but it is
352 needed if we are called directly, since otherwise we might miss
353 something that a function somewhere up the call stack is holding
355 __builtin_unwind_init ();
359 /* This avoids tail recursion, to make sure that the saved registers
364 /* Stop all the other threads for garbage collection. */
367 runtime_stoptheworld (void)
372 struct __go_thread_id
*p
;
374 i
= pthread_mutex_lock (&__go_thread_ids_lock
);
375 __go_assert (i
== 0);
377 me
= pthread_self ();
379 p
= __go_all_thread_ids
;
382 if (p
->tentative
|| pthread_equal (me
, p
->id
))
386 i
= pthread_kill (p
->id
, GO_SIG_STOP
);
394 struct __go_thread_id
*next
;
396 /* This thread died somehow. Remove it from the
400 p
->prev
->next
= next
;
402 __go_all_thread_ids
= next
;
404 next
->prev
= p
->prev
;
413 /* Wait for each thread to receive the signal and post to the
414 semaphore. If a thread receives the signal but contrives to die
415 before it posts to the semaphore, then we will hang forever
420 i
= sem_wait (&__go_thread_ready_sem
);
421 if (i
< 0 && errno
== EINTR
)
423 __go_assert (i
== 0);
427 /* The gc_panic_defer field should now be set for all M's except the
428 one in this thread. Set this one now. */
429 m
->gc_panic_defer
= __go_panic_defer
;
431 /* Leave with __go_thread_ids_lock held. */
434 /* Scan all the stacks for garbage collection. This should be called
435 with __go_thread_ids_lock held. */
438 __go_scanstacks (void (*scan
) (byte
*, int64
))
441 struct __go_thread_id
*p
;
443 /* Make sure all the registers for this thread are on the stack. */
444 __builtin_unwind_init ();
446 me
= pthread_self ();
447 for (p
= __go_all_thread_ids
; p
!= NULL
; p
= p
->next
)
451 /* The goroutine function and argument can be allocated on
452 the heap, so we have to scan them for a thread that has
454 scan ((void *) &p
->pfn
, sizeof (void *));
455 scan ((void *) &p
->arg
, sizeof (void *));
456 scan ((void *) &p
->m
, sizeof (void *));
460 #ifdef USING_SPLIT_STACK
468 if (pthread_equal (me
, p
->id
))
473 sp
= __splitstack_find (NULL
, NULL
, &len
, &next_segment
,
474 &next_sp
, &initial_sp
);
480 next_segment
= p
->m
->gc_next_segment
;
481 next_sp
= p
->m
->gc_next_sp
;
482 initial_sp
= p
->m
->gc_initial_sp
;
488 sp
= __splitstack_find (next_segment
, next_sp
, &len
,
489 &next_segment
, &next_sp
, &initial_sp
);
492 #else /* !defined(USING_SPLIT_STACK) */
494 if (pthread_equal (me
, p
->id
))
496 uintptr_t top
= (uintptr_t) m
->gc_sp
;
497 uintptr_t bottom
= (uintptr_t) &top
;
499 scan (m
->gc_sp
, bottom
- top
);
501 scan ((void *) bottom
, top
- bottom
);
505 scan (p
->m
->gc_next_sp
, p
->m
->gc_len
);
508 #endif /* !defined(USING_SPLIT_STACK) */
510 /* Also scan the M structure while we're at it. */
512 scan ((void *) &p
->m
, sizeof (void *));
516 /* Release all the memory caches. This is called with
517 __go_thread_ids_lock held. */
520 __go_stealcache (void)
522 struct __go_thread_id
*p
;
524 for (p
= __go_all_thread_ids
; p
!= NULL
; p
= p
->next
)
525 runtime_MCache_ReleaseAll (p
->m
->mcache
);
528 /* Gather memory cache statistics. This is called with
529 __go_thread_ids_lock held. */
532 __go_cachestats (void)
534 struct __go_thread_id
*p
;
536 for (p
= __go_all_thread_ids
; p
!= NULL
; p
= p
->next
)
541 mstats
.heap_alloc
+= c
->local_alloc
;
543 mstats
.heap_objects
+= c
->local_objects
;
544 c
->local_objects
= 0;
548 /* Start the other threads after garbage collection. */
551 runtime_starttheworld (void)
555 struct __go_thread_id
*p
;
557 /* Here __go_thread_ids_lock should be held. */
559 me
= pthread_self ();
560 p
= __go_all_thread_ids
;
563 if (p
->tentative
|| pthread_equal (me
, p
->id
))
567 i
= pthread_kill (p
->id
, GO_SIG_START
);
575 i
= pthread_mutex_unlock (&__go_thread_ids_lock
);
576 __go_assert (i
== 0);
579 /* Initialize the interaction between goroutines and the garbage
583 __go_gc_goroutine_init (void *sp
__attribute__ ((unused
)))
585 struct __go_thread_id
*list_entry
;
588 struct sigaction act
;
590 /* Add the initial thread to the list of all threads. */
592 list_entry
= malloc (sizeof (struct __go_thread_id
));
593 list_entry
->prev
= NULL
;
594 list_entry
->next
= NULL
;
595 list_entry
->tentative
= 0;
596 list_entry
->id
= pthread_self ();
598 list_entry
->pfn
= NULL
;
599 list_entry
->arg
= NULL
;
600 __go_all_thread_ids
= list_entry
;
602 /* Initialize the semaphore which signals when threads are ready for
605 i
= sem_init (&__go_thread_ready_sem
, 0, 0);
606 __go_assert (i
== 0);
608 /* Fetch the current signal mask. */
610 i
= sigemptyset (&sset
);
611 __go_assert (i
== 0);
612 i
= sigprocmask (SIG_BLOCK
, NULL
, &sset
);
613 __go_assert (i
== 0);
615 /* Make sure that GO_SIG_START is not blocked and GO_SIG_STOP is
616 blocked, and save that set for use with later calls to sigsuspend
617 while waiting for GC to complete. */
619 i
= sigdelset (&sset
, GO_SIG_START
);
620 __go_assert (i
== 0);
621 i
= sigaddset (&sset
, GO_SIG_STOP
);
622 __go_assert (i
== 0);
623 __go_thread_wait_sigset
= sset
;
625 /* Block SIG_SET_START and unblock SIG_SET_STOP, and use that for
626 the process signal mask. */
628 i
= sigaddset (&sset
, GO_SIG_START
);
629 __go_assert (i
== 0);
630 i
= sigdelset (&sset
, GO_SIG_STOP
);
631 __go_assert (i
== 0);
632 i
= sigprocmask (SIG_SETMASK
, &sset
, NULL
);
633 __go_assert (i
== 0);
635 /* Install the signal handlers. */
636 memset (&act
, 0, sizeof act
);
637 i
= sigemptyset (&act
.sa_mask
);
638 __go_assert (i
== 0);
640 act
.sa_handler
= gc_start_handler
;
641 act
.sa_flags
= SA_RESTART
;
642 i
= sigaction (GO_SIG_START
, &act
, NULL
);
643 __go_assert (i
== 0);
645 /* We could consider using an alternate signal stack for this. The
646 function does not use much stack space, so it may be OK. */
647 act
.sa_handler
= gc_stop_handler
;
648 i
= sigaction (GO_SIG_STOP
, &act
, NULL
);
649 __go_assert (i
== 0);
651 #ifndef USING_SPLIT_STACK
652 /* If we don't support split stack, record the current stack as the