Fix numerous typos in comments
[official-gcc.git] / libgo / runtime / proc.c
blob06a9c2ad6b84f1cb429d513313b1931522ce6fc1
1 // Copyright 2009 The Go Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style
3 // license that can be found in the LICENSE file.
5 #include <errno.h>
6 #include <limits.h>
7 #include <signal.h>
8 #include <stdlib.h>
9 #include <pthread.h>
10 #include <unistd.h>
12 #include "config.h"
14 #ifdef HAVE_DL_ITERATE_PHDR
15 #include <link.h>
16 #endif
18 #include "runtime.h"
19 #include "arch.h"
20 #include "defs.h"
21 #include "malloc.h"
22 #include "go-type.h"
24 #ifdef USING_SPLIT_STACK
26 /* FIXME: These are not declared anywhere. */
28 extern void __splitstack_getcontext(void *context[10]);
30 extern void __splitstack_setcontext(void *context[10]);
32 extern void *__splitstack_makecontext(size_t, void *context[10], size_t *);
34 extern void * __splitstack_resetcontext(void *context[10], size_t *);
36 extern void *__splitstack_find(void *, void *, size_t *, void **, void **,
37 void **);
39 extern void __splitstack_block_signals (int *, int *);
41 extern void __splitstack_block_signals_context (void *context[10], int *,
42 int *);
44 #endif
46 #ifndef PTHREAD_STACK_MIN
47 # define PTHREAD_STACK_MIN 8192
48 #endif
50 #if defined(USING_SPLIT_STACK) && defined(LINKER_SUPPORTS_SPLIT_STACK)
51 # define StackMin PTHREAD_STACK_MIN
52 #else
53 # define StackMin ((sizeof(char *) < 8) ? 2 * 1024 * 1024 : 4 * 1024 * 1024)
54 #endif
56 uintptr runtime_stacks_sys;
58 static void gtraceback(G*);
60 #ifdef __rtems__
61 #define __thread
62 #endif
64 static __thread G *g;
66 #ifndef SETCONTEXT_CLOBBERS_TLS
68 static inline void
69 initcontext(void)
73 static inline void
74 fixcontext(ucontext_t *c __attribute__ ((unused)))
78 #else
80 # if defined(__x86_64__) && defined(__sun__)
82 // x86_64 Solaris 10 and 11 have a bug: setcontext switches the %fs
83 // register to that of the thread which called getcontext. The effect
84 // is that the address of all __thread variables changes. This bug
85 // also affects pthread_self() and pthread_getspecific. We work
86 // around it by clobbering the context field directly to keep %fs the
87 // same.
89 static __thread greg_t fs;
91 static inline void
92 initcontext(void)
94 ucontext_t c;
96 getcontext(&c);
97 fs = c.uc_mcontext.gregs[REG_FSBASE];
100 static inline void
101 fixcontext(ucontext_t* c)
103 c->uc_mcontext.gregs[REG_FSBASE] = fs;
106 # elif defined(__NetBSD__)
108 // NetBSD has a bug: setcontext clobbers tlsbase, we need to save
109 // and restore it ourselves.
111 static __thread __greg_t tlsbase;
113 static inline void
114 initcontext(void)
116 ucontext_t c;
118 getcontext(&c);
119 tlsbase = c.uc_mcontext._mc_tlsbase;
122 static inline void
123 fixcontext(ucontext_t* c)
125 c->uc_mcontext._mc_tlsbase = tlsbase;
128 # elif defined(__sparc__)
130 static inline void
131 initcontext(void)
135 static inline void
136 fixcontext(ucontext_t *c)
138 /* ??? Using
139 register unsigned long thread __asm__("%g7");
140 c->uc_mcontext.gregs[REG_G7] = thread;
141 results in
142 error: variable ‘thread’ might be clobbered by \
143 ‘longjmp’ or ‘vfork’ [-Werror=clobbered]
144 which ought to be false, as %g7 is a fixed register. */
146 if (sizeof (c->uc_mcontext.gregs[REG_G7]) == 8)
147 asm ("stx %%g7, %0" : "=m"(c->uc_mcontext.gregs[REG_G7]));
148 else
149 asm ("st %%g7, %0" : "=m"(c->uc_mcontext.gregs[REG_G7]));
152 # else
154 # error unknown case for SETCONTEXT_CLOBBERS_TLS
156 # endif
158 #endif
160 // ucontext_arg returns a properly aligned ucontext_t value. On some
161 // systems a ucontext_t value must be aligned to a 16-byte boundary.
162 // The g structure that has fields of type ucontext_t is defined in
163 // Go, and Go has no simple way to align a field to such a boundary.
164 // So we make the field larger in runtime2.go and pick an appropriate
165 // offset within the field here.
166 static ucontext_t*
167 ucontext_arg(void** go_ucontext)
169 uintptr_t p = (uintptr_t)go_ucontext;
170 size_t align = __alignof__(ucontext_t);
171 if(align > 16) {
172 // We only ensured space for up to a 16 byte alignment
173 // in libgo/go/runtime/runtime2.go.
174 runtime_throw("required alignment of ucontext_t too large");
176 p = (p + align - 1) &~ (uintptr_t)(align - 1);
177 return (ucontext_t*)p;
180 // We can not always refer to the TLS variables directly. The
181 // compiler will call tls_get_addr to get the address of the variable,
182 // and it may hold it in a register across a call to schedule. When
183 // we get back from the call we may be running in a different thread,
184 // in which case the register now points to the TLS variable for a
185 // different thread. We use non-inlinable functions to avoid this
186 // when necessary.
188 G* runtime_g(void) __attribute__ ((noinline, no_split_stack));
191 runtime_g(void)
193 return g;
196 M* runtime_m(void) __attribute__ ((noinline, no_split_stack));
199 runtime_m(void)
201 if(g == nil)
202 return nil;
203 return g->m;
206 // Set g.
207 void
208 runtime_setg(G* gp)
210 g = gp;
213 // Start a new thread.
214 static void
215 runtime_newosproc(M *mp)
217 pthread_attr_t attr;
218 sigset_t clear, old;
219 pthread_t tid;
220 int tries;
221 int ret;
223 if(pthread_attr_init(&attr) != 0)
224 runtime_throw("pthread_attr_init");
225 if(pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED) != 0)
226 runtime_throw("pthread_attr_setdetachstate");
228 // Block signals during pthread_create so that the new thread
229 // starts with signals disabled. It will enable them in minit.
230 sigfillset(&clear);
232 #ifdef SIGTRAP
233 // Blocking SIGTRAP reportedly breaks gdb on Alpha GNU/Linux.
234 sigdelset(&clear, SIGTRAP);
235 #endif
237 sigemptyset(&old);
238 pthread_sigmask(SIG_BLOCK, &clear, &old);
240 for (tries = 0; tries < 20; tries++) {
241 ret = pthread_create(&tid, &attr, runtime_mstart, mp);
242 if (ret != EAGAIN) {
243 break;
245 runtime_usleep((tries + 1) * 1000); // Milliseconds.
248 pthread_sigmask(SIG_SETMASK, &old, nil);
250 if (ret != 0) {
251 runtime_printf("pthread_create failed: %d\n", ret);
252 runtime_throw("pthread_create");
256 // First function run by a new goroutine. This replaces gogocall.
257 static void
258 kickoff(void)
260 void (*fn)(void*);
261 void *param;
263 if(g->traceback != nil)
264 gtraceback(g);
266 fn = (void (*)(void*))(g->entry);
267 param = g->param;
268 g->entry = nil;
269 g->param = nil;
270 fn(param);
271 runtime_goexit1();
274 // Switch context to a different goroutine. This is like longjmp.
275 void runtime_gogo(G*) __attribute__ ((noinline));
276 void
277 runtime_gogo(G* newg)
279 #ifdef USING_SPLIT_STACK
280 __splitstack_setcontext(&newg->stackcontext[0]);
281 #endif
282 g = newg;
283 newg->fromgogo = true;
284 fixcontext(ucontext_arg(&newg->context[0]));
285 setcontext(ucontext_arg(&newg->context[0]));
286 runtime_throw("gogo setcontext returned");
289 // Save context and call fn passing g as a parameter. This is like
290 // setjmp. Because getcontext always returns 0, unlike setjmp, we use
291 // g->fromgogo as a code. It will be true if we got here via
292 // setcontext. g == nil the first time this is called in a new m.
293 void runtime_mcall(void (*)(G*)) __attribute__ ((noinline));
294 void
295 runtime_mcall(void (*pfn)(G*))
297 M *mp;
298 G *gp;
299 #ifndef USING_SPLIT_STACK
300 void *afterregs;
301 #endif
303 // Ensure that all registers are on the stack for the garbage
304 // collector.
305 __builtin_unwind_init();
307 gp = g;
308 mp = gp->m;
309 if(gp == mp->g0)
310 runtime_throw("runtime: mcall called on m->g0 stack");
312 if(gp != nil) {
314 #ifdef USING_SPLIT_STACK
315 __splitstack_getcontext(&g->stackcontext[0]);
316 #else
317 // We have to point to an address on the stack that is
318 // below the saved registers.
319 gp->gcnextsp = &afterregs;
320 #endif
321 gp->fromgogo = false;
322 getcontext(ucontext_arg(&gp->context[0]));
324 // When we return from getcontext, we may be running
325 // in a new thread. That means that g may have
326 // changed. It is a global variables so we will
327 // reload it, but the address of g may be cached in
328 // our local stack frame, and that address may be
329 // wrong. Call the function to reload the value for
330 // this thread.
331 gp = runtime_g();
332 mp = gp->m;
334 if(gp->traceback != nil)
335 gtraceback(gp);
337 if (gp == nil || !gp->fromgogo) {
338 #ifdef USING_SPLIT_STACK
339 __splitstack_setcontext(&mp->g0->stackcontext[0]);
340 #endif
341 mp->g0->entry = (byte*)pfn;
342 mp->g0->param = gp;
344 // It's OK to set g directly here because this case
345 // can not occur if we got here via a setcontext to
346 // the getcontext call just above.
347 g = mp->g0;
349 fixcontext(ucontext_arg(&mp->g0->context[0]));
350 setcontext(ucontext_arg(&mp->g0->context[0]));
351 runtime_throw("runtime: mcall function returned");
355 // Goroutine scheduler
356 // The scheduler's job is to distribute ready-to-run goroutines over worker threads.
358 // The main concepts are:
359 // G - goroutine.
360 // M - worker thread, or machine.
361 // P - processor, a resource that is required to execute Go code.
362 // M must have an associated P to execute Go code, however it can be
363 // blocked or in a syscall w/o an associated P.
365 // Design doc at http://golang.org/s/go11sched.
367 enum
369 // Number of goroutine ids to grab from runtime_sched->goidgen to local per-P cache at once.
370 // 16 seems to provide enough amortization, but other than that it's mostly arbitrary number.
371 GoidCacheBatch = 16,
374 extern Sched* runtime_getsched() __asm__ (GOSYM_PREFIX "runtime.getsched");
375 extern bool* runtime_getCgoHasExtraM()
376 __asm__ (GOSYM_PREFIX "runtime.getCgoHasExtraM");
377 extern P** runtime_getAllP()
378 __asm__ (GOSYM_PREFIX "runtime.getAllP");
379 extern G* allocg(void)
380 __asm__ (GOSYM_PREFIX "runtime.allocg");
381 extern bool needaddgcproc(void)
382 __asm__ (GOSYM_PREFIX "runtime.needaddgcproc");
383 extern void startm(P*, bool)
384 __asm__(GOSYM_PREFIX "runtime.startm");
385 extern void newm(void(*)(void), P*)
386 __asm__(GOSYM_PREFIX "runtime.newm");
388 Sched* runtime_sched;
389 M runtime_m0;
390 G runtime_g0; // idle goroutine for m0
391 G* runtime_lastg;
392 P** runtime_allp;
393 int8* runtime_goos;
394 int32 runtime_ncpu;
395 bool runtime_precisestack;
397 bool runtime_isarchive;
399 void* runtime_mstart(void*);
400 static void exitsyscall0(G*);
401 static void park0(G*);
402 static void goexit0(G*);
403 static bool exitsyscallfast(void);
405 extern void setncpu(int32)
406 __asm__(GOSYM_PREFIX "runtime.setncpu");
407 extern void setpagesize(uintptr_t)
408 __asm__(GOSYM_PREFIX "runtime.setpagesize");
409 extern void allgadd(G*)
410 __asm__(GOSYM_PREFIX "runtime.allgadd");
411 extern void mcommoninit(M*)
412 __asm__(GOSYM_PREFIX "runtime.mcommoninit");
413 extern void stopm(void)
414 __asm__(GOSYM_PREFIX "runtime.stopm");
415 extern void handoffp(P*)
416 __asm__(GOSYM_PREFIX "runtime.handoffp");
417 extern void wakep(void)
418 __asm__(GOSYM_PREFIX "runtime.wakep");
419 extern void stoplockedm(void)
420 __asm__(GOSYM_PREFIX "runtime.stoplockedm");
421 extern void schedule(void)
422 __asm__(GOSYM_PREFIX "runtime.schedule");
423 extern void execute(G*, bool)
424 __asm__(GOSYM_PREFIX "runtime.execute");
425 extern void gfput(P*, G*)
426 __asm__(GOSYM_PREFIX "runtime.gfput");
427 extern G* gfget(P*)
428 __asm__(GOSYM_PREFIX "runtime.gfget");
429 extern void procresize(int32)
430 __asm__(GOSYM_PREFIX "runtime.procresize");
431 extern void acquirep(P*)
432 __asm__(GOSYM_PREFIX "runtime.acquirep");
433 extern P* releasep(void)
434 __asm__(GOSYM_PREFIX "runtime.releasep");
435 extern void incidlelocked(int32)
436 __asm__(GOSYM_PREFIX "runtime.incidlelocked");
437 extern void checkdead(void)
438 __asm__(GOSYM_PREFIX "runtime.checkdead");
439 extern void sysmon(void)
440 __asm__(GOSYM_PREFIX "runtime.sysmon");
441 extern void mput(M*)
442 __asm__(GOSYM_PREFIX "runtime.mput");
443 extern M* mget(void)
444 __asm__(GOSYM_PREFIX "runtime.mget");
445 extern void globrunqput(G*)
446 __asm__(GOSYM_PREFIX "runtime.globrunqput");
447 extern P* pidleget(void)
448 __asm__(GOSYM_PREFIX "runtime.pidleget");
449 extern bool runqempty(P*)
450 __asm__(GOSYM_PREFIX "runtime.runqempty");
451 extern void runqput(P*, G*, bool)
452 __asm__(GOSYM_PREFIX "runtime.runqput");
454 bool runtime_isstarted;
456 // The bootstrap sequence is:
458 // call osinit
459 // call schedinit
460 // make & queue new G
461 // call runtime_mstart
463 // The new G calls runtime_main.
464 void
465 runtime_schedinit(void)
467 M *m;
468 int32 n, procs;
469 String s;
470 const byte *p;
471 Eface i;
473 setncpu(runtime_ncpu);
474 setpagesize(getpagesize());
475 runtime_sched = runtime_getsched();
477 m = &runtime_m0;
478 g = &runtime_g0;
479 m->g0 = g;
480 m->curg = g;
481 g->m = m;
483 initcontext();
485 runtime_sched->maxmcount = 10000;
486 runtime_precisestack = 0;
488 // runtime_symtabinit();
489 runtime_mallocinit();
490 mcommoninit(m);
491 runtime_alginit(); // maps must not be used before this call
493 // Initialize the itable value for newErrorCString,
494 // so that the next time it gets called, possibly
495 // in a fault during a garbage collection, it will not
496 // need to allocated memory.
497 runtime_newErrorCString(0, &i);
499 // Initialize the cached gotraceback value, since
500 // gotraceback calls getenv, which mallocs on Plan 9.
501 runtime_gotraceback(nil);
503 runtime_goargs();
504 runtime_goenvs();
505 runtime_parsedebugvars();
507 runtime_sched->lastpoll = runtime_nanotime();
508 procs = 1;
509 s = runtime_getenv("GOMAXPROCS");
510 p = s.str;
511 if(p != nil && (n = runtime_atoi(p, s.len)) > 0) {
512 if(n > _MaxGomaxprocs)
513 n = _MaxGomaxprocs;
514 procs = n;
516 runtime_allp = runtime_getAllP();
517 procresize(procs);
519 // Can not enable GC until all roots are registered.
520 // mstats()->enablegc = 1;
523 extern void main_init(void) __asm__ (GOSYM_PREFIX "__go_init_main");
524 extern void main_main(void) __asm__ (GOSYM_PREFIX "main.main");
526 // Used to determine the field alignment.
528 struct field_align
530 char c;
531 Hchan *p;
534 static void
535 initDone(void *arg __attribute__ ((unused))) {
536 runtime_unlockOSThread();
539 // The main goroutine.
540 // Note: C frames in general are not copyable during stack growth, for two reasons:
541 // 1) We don't know where in a frame to find pointers to other stack locations.
542 // 2) There's no guarantee that globals or heap values do not point into the frame.
544 // The C frame for runtime.main is copyable, because:
545 // 1) There are no pointers to other stack locations in the frame
546 // (d.fn points at a global, d.link is nil, d.argp is -1).
547 // 2) The only pointer into this frame is from the defer chain,
548 // which is explicitly handled during stack copying.
549 void
550 runtime_main(void* dummy __attribute__((unused)))
552 Defer d;
553 _Bool frame;
555 newm(sysmon, nil);
557 // Lock the main goroutine onto this, the main OS thread,
558 // during initialization. Most programs won't care, but a few
559 // do require certain calls to be made by the main thread.
560 // Those can arrange for main.main to run in the main thread
561 // by calling runtime.LockOSThread during initialization
562 // to preserve the lock.
563 runtime_lockOSThread();
565 // Defer unlock so that runtime.Goexit during init does the unlock too.
566 d.pfn = (uintptr)(void*)initDone;
567 d.link = g->_defer;
568 d.arg = (void*)-1;
569 d._panic = g->_panic;
570 d.retaddr = 0;
571 d.makefunccanrecover = 0;
572 d.frame = &frame;
573 d.special = true;
574 g->_defer = &d;
576 if(g->m != &runtime_m0)
577 runtime_throw("runtime_main not on m0");
578 __go_go(runtime_MHeap_Scavenger, nil);
580 makeMainInitDone();
582 _cgo_notify_runtime_init_done();
584 main_init();
586 closeMainInitDone();
588 if(g->_defer != &d || (void*)d.pfn != initDone)
589 runtime_throw("runtime: bad defer entry after init");
590 g->_defer = d.link;
591 runtime_unlockOSThread();
593 // For gccgo we have to wait until after main is initialized
594 // to enable GC, because initializing main registers the GC
595 // roots.
596 mstats()->enablegc = 1;
598 if(runtime_isarchive) {
599 // This is not a complete program, but is instead a
600 // library built using -buildmode=c-archive or
601 // c-shared. Now that we are initialized, there is
602 // nothing further to do.
603 return;
606 main_main();
608 // Make racy client program work: if panicking on
609 // another goroutine at the same time as main returns,
610 // let the other goroutine finish printing the panic trace.
611 // Once it does, it will exit. See issue 3934.
612 if(runtime_panicking())
613 runtime_park(nil, nil, "panicwait");
615 runtime_exit(0);
616 for(;;)
617 *(int32*)0 = 0;
620 void getTraceback(G*, G*) __asm__(GOSYM_PREFIX "runtime.getTraceback");
622 // getTraceback stores a traceback of gp in the g's traceback field
623 // and then returns to me. We expect that gp's traceback is not nil.
624 // It works by saving me's current context, and checking gp's traceback field.
625 // If gp's traceback field is not nil, it starts running gp.
626 // In places where we call getcontext, we check the traceback field.
627 // If it is not nil, we collect a traceback, and then return to the
628 // goroutine stored in the traceback field, which is me.
629 void getTraceback(G* me, G* gp)
631 #ifdef USING_SPLIT_STACK
632 __splitstack_getcontext(&me->stackcontext[0]);
633 #endif
634 getcontext(ucontext_arg(&me->context[0]));
636 if (gp->traceback != nil) {
637 runtime_gogo(gp);
641 // Do a stack trace of gp, and then restore the context to
642 // gp->dotraceback.
644 static void
645 gtraceback(G* gp)
647 Traceback* traceback;
649 traceback = gp->traceback;
650 gp->traceback = nil;
651 if(gp->m != nil)
652 runtime_throw("gtraceback: m is not nil");
653 gp->m = traceback->gp->m;
654 traceback->c = runtime_callers(1, traceback->locbuf,
655 sizeof traceback->locbuf / sizeof traceback->locbuf[0], false);
656 gp->m = nil;
657 runtime_gogo(traceback->gp);
660 // Called to start an M.
661 void*
662 runtime_mstart(void* mp)
664 M *m;
665 G *gp;
667 m = (M*)mp;
668 g = m->g0;
669 g->m = m;
670 gp = g;
672 initcontext();
674 gp->entry = nil;
675 gp->param = nil;
677 // Record top of stack for use by mcall.
678 // Once we call schedule we're never coming back,
679 // so other calls can reuse this stack space.
680 #ifdef USING_SPLIT_STACK
681 __splitstack_getcontext(&g->stackcontext[0]);
682 #else
683 gp->gcinitialsp = &mp;
684 // Setting gcstacksize to 0 is a marker meaning that gcinitialsp
685 // is the top of the stack, not the bottom.
686 gp->gcstacksize = 0;
687 gp->gcnextsp = &mp;
688 #endif
689 getcontext(ucontext_arg(&gp->context[0]));
691 if(gp->traceback != nil)
692 gtraceback(gp);
694 if(gp->entry != nil) {
695 // Got here from mcall.
696 void (*pfn)(G*) = (void (*)(G*))gp->entry;
697 G* gp1 = (G*)gp->param;
698 gp->entry = nil;
699 gp->param = nil;
700 pfn(gp1);
701 *(int*)0x21 = 0x21;
703 runtime_minit();
705 #ifdef USING_SPLIT_STACK
707 int dont_block_signals = 0;
708 __splitstack_block_signals(&dont_block_signals, nil);
710 #endif
712 // Install signal handlers; after minit so that minit can
713 // prepare the thread to be able to handle the signals.
714 if(m == &runtime_m0) {
715 if(runtime_iscgo) {
716 bool* cgoHasExtraM = runtime_getCgoHasExtraM();
717 if(!*cgoHasExtraM) {
718 *cgoHasExtraM = true;
719 runtime_newextram();
722 runtime_initsig(false);
725 if(m->mstartfn)
726 ((void (*)(void))m->mstartfn)();
728 if(m->helpgc) {
729 m->helpgc = 0;
730 stopm();
731 } else if(m != &runtime_m0) {
732 acquirep((P*)m->nextp);
733 m->nextp = 0;
735 schedule();
737 // TODO(brainman): This point is never reached, because scheduler
738 // does not release os threads at the moment. But once this path
739 // is enabled, we must remove our seh here.
741 return nil;
744 typedef struct CgoThreadStart CgoThreadStart;
745 struct CgoThreadStart
747 M *m;
748 G *g;
749 uintptr *tls;
750 void (*fn)(void);
753 M* runtime_allocm(P*, bool, byte**, uintptr*)
754 __asm__(GOSYM_PREFIX "runtime.allocm");
756 // Allocate a new m unassociated with any thread.
757 // Can use p for allocation context if needed.
759 runtime_allocm(P *p, bool allocatestack, byte** ret_g0_stack, uintptr* ret_g0_stacksize)
761 M *mp;
763 g->m->locks++; // disable GC because it can be called from sysmon
764 if(g->m->p == 0)
765 acquirep(p); // temporarily borrow p for mallocs in this function
766 #if 0
767 if(mtype == nil) {
768 Eface e;
769 runtime_gc_m_ptr(&e);
770 mtype = ((const PtrType*)e.__type_descriptor)->__element_type;
772 #endif
774 mp = runtime_mal(sizeof *mp);
775 mcommoninit(mp);
776 mp->g0 = runtime_malg(allocatestack, false, ret_g0_stack, ret_g0_stacksize);
777 mp->g0->m = mp;
779 if(p == (P*)g->m->p)
780 releasep();
781 g->m->locks--;
783 return mp;
786 void setGContext(void) __asm__ (GOSYM_PREFIX "runtime.setGContext");
788 // setGContext sets up a new goroutine context for the current g.
789 void
790 setGContext()
792 int val;
793 G *gp;
795 initcontext();
796 gp = g;
797 gp->entry = nil;
798 gp->param = nil;
799 #ifdef USING_SPLIT_STACK
800 __splitstack_getcontext(&gp->stackcontext[0]);
801 val = 0;
802 __splitstack_block_signals(&val, nil);
803 #else
804 gp->gcinitialsp = &val;
805 gp->gcstack = nil;
806 gp->gcstacksize = 0;
807 gp->gcnextsp = &val;
808 #endif
809 getcontext(ucontext_arg(&gp->context[0]));
811 if(gp->entry != nil) {
812 // Got here from mcall.
813 void (*pfn)(G*) = (void (*)(G*))gp->entry;
814 G* gp1 = (G*)gp->param;
815 gp->entry = nil;
816 gp->param = nil;
817 pfn(gp1);
818 *(int*)0x22 = 0x22;
822 void makeGContext(G*, byte*, uintptr)
823 __asm__(GOSYM_PREFIX "runtime.makeGContext");
825 // makeGContext makes a new context for a g.
826 void
827 makeGContext(G* gp, byte* sp, uintptr spsize) {
828 ucontext_t *uc;
830 uc = ucontext_arg(&gp->context[0]);
831 getcontext(uc);
832 uc->uc_stack.ss_sp = sp;
833 uc->uc_stack.ss_size = (size_t)spsize;
834 makecontext(uc, kickoff, 0);
837 // Create a new m. It will start off with a call to fn, or else the scheduler.
838 void
839 newm(void(*fn)(void), P *p)
841 M *mp;
843 mp = runtime_allocm(p, false, nil, nil);
844 mp->nextp = (uintptr)p;
845 mp->mstartfn = (uintptr)(void*)fn;
847 runtime_newosproc(mp);
850 static void
851 mspinning(void)
853 g->m->spinning = true;
856 // Schedules some M to run the p (creates an M if necessary).
857 // If p==nil, tries to get an idle P, if no idle P's does nothing.
858 void
859 startm(P *p, bool spinning)
861 M *mp;
862 void (*fn)(void);
864 runtime_lock(&runtime_sched->lock);
865 if(p == nil) {
866 p = pidleget();
867 if(p == nil) {
868 runtime_unlock(&runtime_sched->lock);
869 if(spinning)
870 runtime_xadd(&runtime_sched->nmspinning, -1);
871 return;
874 mp = mget();
875 runtime_unlock(&runtime_sched->lock);
876 if(mp == nil) {
877 fn = nil;
878 if(spinning)
879 fn = mspinning;
880 newm(fn, p);
881 return;
883 if(mp->spinning)
884 runtime_throw("startm: m is spinning");
885 if(mp->nextp)
886 runtime_throw("startm: m has p");
887 if(spinning && !runqempty(p)) {
888 runtime_throw("startm: p has runnable gs");
890 mp->spinning = spinning;
891 mp->nextp = (uintptr)p;
892 runtime_notewakeup(&mp->park);
895 // Puts the current goroutine into a waiting state and calls unlockf.
896 // If unlockf returns false, the goroutine is resumed.
897 void
898 runtime_park(bool(*unlockf)(G*, void*), void *lock, const char *reason)
900 if(g->atomicstatus != _Grunning)
901 runtime_throw("bad g status");
902 g->m->waitlock = lock;
903 g->m->waitunlockf = unlockf;
904 g->waitreason = runtime_gostringnocopy((const byte*)reason);
905 runtime_mcall(park0);
908 void gopark(FuncVal *, void *, String, byte, int)
909 __asm__ ("runtime.gopark");
911 void
912 gopark(FuncVal *unlockf, void *lock, String reason,
913 byte traceEv __attribute__ ((unused)),
914 int traceskip __attribute__ ((unused)))
916 if(g->atomicstatus != _Grunning)
917 runtime_throw("bad g status");
918 g->m->waitlock = lock;
919 g->m->waitunlockf = unlockf == nil ? nil : (void*)unlockf->fn;
920 g->waitreason = reason;
921 runtime_mcall(park0);
924 static bool
925 parkunlock(G *gp, void *lock)
927 USED(gp);
928 runtime_unlock(lock);
929 return true;
932 // Puts the current goroutine into a waiting state and unlocks the lock.
933 // The goroutine can be made runnable again by calling runtime_ready(gp).
934 void
935 runtime_parkunlock(Lock *lock, const char *reason)
937 runtime_park(parkunlock, lock, reason);
940 void goparkunlock(Lock *, String, byte, int)
941 __asm__ (GOSYM_PREFIX "runtime.goparkunlock");
943 void
944 goparkunlock(Lock *lock, String reason, byte traceEv __attribute__ ((unused)),
945 int traceskip __attribute__ ((unused)))
947 if(g->atomicstatus != _Grunning)
948 runtime_throw("bad g status");
949 g->m->waitlock = lock;
950 g->m->waitunlockf = parkunlock;
951 g->waitreason = reason;
952 runtime_mcall(park0);
955 // runtime_park continuation on g0.
956 static void
957 park0(G *gp)
959 M *m;
960 bool ok;
962 m = g->m;
963 gp->atomicstatus = _Gwaiting;
964 gp->m = nil;
965 m->curg = nil;
966 if(m->waitunlockf) {
967 ok = ((bool (*)(G*, void*))m->waitunlockf)(gp, m->waitlock);
968 m->waitunlockf = nil;
969 m->waitlock = nil;
970 if(!ok) {
971 gp->atomicstatus = _Grunnable;
972 execute(gp, true); // Schedule it back, never returns.
975 if(m->lockedg) {
976 stoplockedm();
977 execute(gp, true); // Never returns.
979 schedule();
982 // Scheduler yield.
983 void
984 runtime_gosched(void)
986 if(g->atomicstatus != _Grunning)
987 runtime_throw("bad g status");
988 runtime_mcall(runtime_gosched0);
991 // runtime_gosched continuation on g0.
992 void
993 runtime_gosched0(G *gp)
995 M *m;
997 m = g->m;
998 gp->atomicstatus = _Grunnable;
999 gp->m = nil;
1000 m->curg = nil;
1001 runtime_lock(&runtime_sched->lock);
1002 globrunqput(gp);
1003 runtime_unlock(&runtime_sched->lock);
1004 if(m->lockedg) {
1005 stoplockedm();
1006 execute(gp, true); // Never returns.
1008 schedule();
1011 // Finishes execution of the current goroutine.
1012 // Need to mark it as nosplit, because it runs with sp > stackbase (as runtime_lessstack).
1013 // Since it does not return it does not matter. But if it is preempted
1014 // at the split stack check, GC will complain about inconsistent sp.
1015 void runtime_goexit1(void) __attribute__ ((noinline));
1016 void
1017 runtime_goexit1(void)
1019 if(g->atomicstatus != _Grunning)
1020 runtime_throw("bad g status");
1021 runtime_mcall(goexit0);
1024 // runtime_goexit1 continuation on g0.
1025 static void
1026 goexit0(G *gp)
1028 M *m;
1030 m = g->m;
1031 gp->atomicstatus = _Gdead;
1032 gp->entry = nil;
1033 gp->m = nil;
1034 gp->lockedm = nil;
1035 gp->paniconfault = 0;
1036 gp->_defer = nil; // should be true already but just in case.
1037 gp->_panic = nil; // non-nil for Goexit during panic. points at stack-allocated data.
1038 gp->writebuf.__values = nil;
1039 gp->writebuf.__count = 0;
1040 gp->writebuf.__capacity = 0;
1041 gp->waitreason = runtime_gostringnocopy(nil);
1042 gp->param = nil;
1043 m->curg->m = nil;
1044 m->curg = nil;
1045 m->lockedg = nil;
1046 if(m->locked & ~_LockExternal) {
1047 runtime_printf("invalid m->locked = %d\n", m->locked);
1048 runtime_throw("internal lockOSThread error");
1050 m->locked = 0;
1051 gfput((P*)m->p, gp);
1052 schedule();
1055 // The goroutine g is about to enter a system call.
1056 // Record that it's not using the cpu anymore.
1057 // This is called only from the go syscall library and cgocall,
1058 // not from the low-level system calls used by the runtime.
1060 // Entersyscall cannot split the stack: the runtime_gosave must
1061 // make g->sched refer to the caller's stack segment, because
1062 // entersyscall is going to return immediately after.
1064 void runtime_entersyscall(int32) __attribute__ ((no_split_stack));
1065 static void doentersyscall(uintptr, uintptr)
1066 __attribute__ ((no_split_stack, noinline));
1068 void
1069 runtime_entersyscall(int32 dummy __attribute__ ((unused)))
1071 // Save the registers in the g structure so that any pointers
1072 // held in registers will be seen by the garbage collector.
1073 getcontext(ucontext_arg(&g->gcregs[0]));
1075 // Do the work in a separate function, so that this function
1076 // doesn't save any registers on its own stack. If this
1077 // function does save any registers, we might store the wrong
1078 // value in the call to getcontext.
1080 // FIXME: This assumes that we do not need to save any
1081 // callee-saved registers to access the TLS variable g. We
1082 // don't want to put the ucontext_t on the stack because it is
1083 // large and we can not split the stack here.
1084 doentersyscall((uintptr)runtime_getcallerpc(&dummy),
1085 (uintptr)runtime_getcallersp(&dummy));
1088 static void
1089 doentersyscall(uintptr pc, uintptr sp)
1091 // Disable preemption because during this function g is in _Gsyscall status,
1092 // but can have inconsistent g->sched, do not let GC observe it.
1093 g->m->locks++;
1095 // Leave SP around for GC and traceback.
1096 #ifdef USING_SPLIT_STACK
1098 size_t gcstacksize;
1099 g->gcstack = __splitstack_find(nil, nil, &gcstacksize,
1100 &g->gcnextsegment, &g->gcnextsp,
1101 &g->gcinitialsp);
1102 g->gcstacksize = (uintptr)gcstacksize;
1104 #else
1106 void *v;
1108 g->gcnextsp = (byte *) &v;
1110 #endif
1112 g->syscallsp = sp;
1113 g->syscallpc = pc;
1115 g->atomicstatus = _Gsyscall;
1117 if(runtime_atomicload(&runtime_sched->sysmonwait)) { // TODO: fast atomic
1118 runtime_lock(&runtime_sched->lock);
1119 if(runtime_atomicload(&runtime_sched->sysmonwait)) {
1120 runtime_atomicstore(&runtime_sched->sysmonwait, 0);
1121 runtime_notewakeup(&runtime_sched->sysmonnote);
1123 runtime_unlock(&runtime_sched->lock);
1126 g->m->mcache = nil;
1127 ((P*)(g->m->p))->m = 0;
1128 runtime_atomicstore(&((P*)g->m->p)->status, _Psyscall);
1129 if(runtime_atomicload(&runtime_sched->gcwaiting)) {
1130 runtime_lock(&runtime_sched->lock);
1131 if (runtime_sched->stopwait > 0 && runtime_cas(&((P*)g->m->p)->status, _Psyscall, _Pgcstop)) {
1132 if(--runtime_sched->stopwait == 0)
1133 runtime_notewakeup(&runtime_sched->stopnote);
1135 runtime_unlock(&runtime_sched->lock);
1138 g->m->locks--;
1141 // The same as runtime_entersyscall(), but with a hint that the syscall is blocking.
1142 void
1143 runtime_entersyscallblock(int32 dummy __attribute__ ((unused)))
1145 P *p;
1147 g->m->locks++; // see comment in entersyscall
1149 // Leave SP around for GC and traceback.
1150 #ifdef USING_SPLIT_STACK
1152 size_t gcstacksize;
1153 g->gcstack = __splitstack_find(nil, nil, &gcstacksize,
1154 &g->gcnextsegment, &g->gcnextsp,
1155 &g->gcinitialsp);
1156 g->gcstacksize = (uintptr)gcstacksize;
1158 #else
1159 g->gcnextsp = (byte *) &p;
1160 #endif
1162 // Save the registers in the g structure so that any pointers
1163 // held in registers will be seen by the garbage collector.
1164 getcontext(ucontext_arg(&g->gcregs[0]));
1166 g->syscallpc = (uintptr)runtime_getcallerpc(&dummy);
1167 g->syscallsp = (uintptr)runtime_getcallersp(&dummy);
1169 g->atomicstatus = _Gsyscall;
1171 p = releasep();
1172 handoffp(p);
1173 if(g->isbackground) // do not consider blocked scavenger for deadlock detection
1174 incidlelocked(1);
1176 g->m->locks--;
1179 // The goroutine g exited its system call.
1180 // Arrange for it to run on a cpu again.
1181 // This is called only from the go syscall library, not
1182 // from the low-level system calls used by the runtime.
1183 void
1184 runtime_exitsyscall(int32 dummy __attribute__ ((unused)))
1186 G *gp;
1188 gp = g;
1189 gp->m->locks++; // see comment in entersyscall
1191 if(gp->isbackground) // do not consider blocked scavenger for deadlock detection
1192 incidlelocked(-1);
1194 gp->waitsince = 0;
1195 if(exitsyscallfast()) {
1196 // There's a cpu for us, so we can run.
1197 ((P*)gp->m->p)->syscalltick++;
1198 gp->atomicstatus = _Grunning;
1199 // Garbage collector isn't running (since we are),
1200 // so okay to clear gcstack and gcsp.
1201 #ifdef USING_SPLIT_STACK
1202 gp->gcstack = nil;
1203 #endif
1204 gp->gcnextsp = nil;
1205 runtime_memclr(&gp->gcregs[0], sizeof gp->gcregs);
1206 gp->syscallsp = 0;
1207 gp->m->locks--;
1208 return;
1211 gp->m->locks--;
1213 // Call the scheduler.
1214 runtime_mcall(exitsyscall0);
1216 // Scheduler returned, so we're allowed to run now.
1217 // Delete the gcstack information that we left for
1218 // the garbage collector during the system call.
1219 // Must wait until now because until gosched returns
1220 // we don't know for sure that the garbage collector
1221 // is not running.
1222 #ifdef USING_SPLIT_STACK
1223 gp->gcstack = nil;
1224 #endif
1225 gp->gcnextsp = nil;
1226 runtime_memclr(&gp->gcregs[0], sizeof gp->gcregs);
1228 gp->syscallsp = 0;
1230 // Note that this gp->m might be different than the earlier
1231 // gp->m after returning from runtime_mcall.
1232 ((P*)gp->m->p)->syscalltick++;
1235 static bool
1236 exitsyscallfast(void)
1238 G *gp;
1239 P *p;
1241 gp = g;
1243 // Freezetheworld sets stopwait but does not retake P's.
1244 if(runtime_sched->stopwait) {
1245 gp->m->p = 0;
1246 return false;
1249 // Try to re-acquire the last P.
1250 if(gp->m->p && ((P*)gp->m->p)->status == _Psyscall && runtime_cas(&((P*)gp->m->p)->status, _Psyscall, _Prunning)) {
1251 // There's a cpu for us, so we can run.
1252 gp->m->mcache = ((P*)gp->m->p)->mcache;
1253 ((P*)gp->m->p)->m = (uintptr)gp->m;
1254 return true;
1256 // Try to get any other idle P.
1257 gp->m->p = 0;
1258 if(runtime_sched->pidle) {
1259 runtime_lock(&runtime_sched->lock);
1260 p = pidleget();
1261 if(p && runtime_atomicload(&runtime_sched->sysmonwait)) {
1262 runtime_atomicstore(&runtime_sched->sysmonwait, 0);
1263 runtime_notewakeup(&runtime_sched->sysmonnote);
1265 runtime_unlock(&runtime_sched->lock);
1266 if(p) {
1267 acquirep(p);
1268 return true;
1271 return false;
1274 // runtime_exitsyscall slow path on g0.
1275 // Failed to acquire P, enqueue gp as runnable.
1276 static void
1277 exitsyscall0(G *gp)
1279 M *m;
1280 P *p;
1282 m = g->m;
1283 gp->atomicstatus = _Grunnable;
1284 gp->m = nil;
1285 m->curg = nil;
1286 runtime_lock(&runtime_sched->lock);
1287 p = pidleget();
1288 if(p == nil)
1289 globrunqput(gp);
1290 else if(runtime_atomicload(&runtime_sched->sysmonwait)) {
1291 runtime_atomicstore(&runtime_sched->sysmonwait, 0);
1292 runtime_notewakeup(&runtime_sched->sysmonnote);
1294 runtime_unlock(&runtime_sched->lock);
1295 if(p) {
1296 acquirep(p);
1297 execute(gp, false); // Never returns.
1299 if(m->lockedg) {
1300 // Wait until another thread schedules gp and so m again.
1301 stoplockedm();
1302 execute(gp, false); // Never returns.
1304 stopm();
1305 schedule(); // Never returns.
1308 void syscall_entersyscall(void)
1309 __asm__(GOSYM_PREFIX "syscall.Entersyscall");
1311 void syscall_entersyscall(void) __attribute__ ((no_split_stack));
1313 void
1314 syscall_entersyscall()
1316 runtime_entersyscall(0);
1319 void syscall_exitsyscall(void)
1320 __asm__(GOSYM_PREFIX "syscall.Exitsyscall");
1322 void syscall_exitsyscall(void) __attribute__ ((no_split_stack));
1324 void
1325 syscall_exitsyscall()
1327 runtime_exitsyscall(0);
1330 // Allocate a new g, with a stack big enough for stacksize bytes.
1332 runtime_malg(bool allocatestack, bool signalstack, byte** ret_stack, uintptr* ret_stacksize)
1334 uintptr stacksize;
1335 G *newg;
1336 byte* unused_stack;
1337 uintptr unused_stacksize;
1338 #if USING_SPLIT_STACK
1339 int dont_block_signals = 0;
1340 size_t ss_stacksize;
1341 #endif
1343 if (ret_stack == nil) {
1344 ret_stack = &unused_stack;
1346 if (ret_stacksize == nil) {
1347 ret_stacksize = &unused_stacksize;
1349 newg = allocg();
1350 if(allocatestack) {
1351 stacksize = StackMin;
1352 if(signalstack) {
1353 stacksize = 32 * 1024; // OS X wants >= 8K, GNU/Linux >= 2K
1354 #ifdef SIGSTKSZ
1355 if(stacksize < SIGSTKSZ)
1356 stacksize = SIGSTKSZ;
1357 #endif
1360 #if USING_SPLIT_STACK
1361 *ret_stack = __splitstack_makecontext(stacksize,
1362 &newg->stackcontext[0],
1363 &ss_stacksize);
1364 *ret_stacksize = (uintptr)ss_stacksize;
1365 __splitstack_block_signals_context(&newg->stackcontext[0],
1366 &dont_block_signals, nil);
1367 #else
1368 // In 64-bit mode, the maximum Go allocation space is
1369 // 128G. Our stack size is 4M, which only permits 32K
1370 // goroutines. In order to not limit ourselves,
1371 // allocate the stacks out of separate memory. In
1372 // 32-bit mode, the Go allocation space is all of
1373 // memory anyhow.
1374 if(sizeof(void*) == 8) {
1375 void *p = runtime_SysAlloc(stacksize, &mstats()->other_sys);
1376 if(p == nil)
1377 runtime_throw("runtime: cannot allocate memory for goroutine stack");
1378 *ret_stack = (byte*)p;
1379 } else {
1380 *ret_stack = runtime_mallocgc(stacksize, 0, FlagNoProfiling|FlagNoGC);
1381 runtime_xadd(&runtime_stacks_sys, stacksize);
1383 *ret_stacksize = (uintptr)stacksize;
1384 newg->gcinitialsp = *ret_stack;
1385 newg->gcstacksize = (uintptr)stacksize;
1386 #endif
1388 return newg;
1392 __go_go(void (*fn)(void*), void* arg)
1394 byte *sp;
1395 size_t spsize;
1396 G *newg;
1397 P *p;
1399 //runtime_printf("newproc1 %p %p narg=%d nret=%d\n", fn->fn, argp, narg, nret);
1400 if(fn == nil) {
1401 g->m->throwing = -1; // do not dump full stacks
1402 runtime_throw("go of nil func value");
1404 g->m->locks++; // disable preemption because it can be holding p in a local var
1406 p = (P*)g->m->p;
1407 if((newg = gfget(p)) != nil) {
1408 #ifdef USING_SPLIT_STACK
1409 int dont_block_signals = 0;
1411 sp = __splitstack_resetcontext(&newg->stackcontext[0],
1412 &spsize);
1413 __splitstack_block_signals_context(&newg->stackcontext[0],
1414 &dont_block_signals, nil);
1415 #else
1416 sp = newg->gcinitialsp;
1417 spsize = newg->gcstacksize;
1418 if(spsize == 0)
1419 runtime_throw("bad spsize in __go_go");
1420 newg->gcnextsp = sp;
1421 #endif
1422 newg->traceback = nil;
1423 } else {
1424 uintptr malsize;
1426 newg = runtime_malg(true, false, &sp, &malsize);
1427 spsize = (size_t)malsize;
1428 newg->atomicstatus = _Gdead;
1429 allgadd(newg);
1432 newg->entry = (byte*)fn;
1433 newg->param = arg;
1434 newg->gopc = (uintptr)__builtin_return_address(0);
1435 newg->atomicstatus = _Grunnable;
1436 if(p->goidcache == p->goidcacheend) {
1437 p->goidcache = runtime_xadd64(&runtime_sched->goidgen, GoidCacheBatch);
1438 p->goidcacheend = p->goidcache + GoidCacheBatch;
1440 newg->goid = p->goidcache++;
1442 makeGContext(newg, sp, (uintptr)spsize);
1444 runqput(p, newg, true);
1446 if(runtime_atomicload(&runtime_sched->npidle) != 0 && runtime_atomicload(&runtime_sched->nmspinning) == 0 && fn != runtime_main) // TODO: fast atomic
1447 wakep();
1448 g->m->locks--;
1449 return newg;
1452 void
1453 runtime_Breakpoint(void)
1455 runtime_breakpoint();
1458 void runtime_Gosched (void) __asm__ (GOSYM_PREFIX "runtime.Gosched");
1460 void
1461 runtime_Gosched(void)
1463 runtime_gosched();
1466 static struct {
1467 uint32 lock;
1468 int32 hz;
1469 } prof;
1471 static void System(void) {}
1472 static void GC(void) {}
1474 // Called if we receive a SIGPROF signal.
1475 void
1476 runtime_sigprof()
1478 M *mp = g->m;
1479 int32 n, i;
1480 bool traceback;
1481 uintptr pcbuf[TracebackMaxFrames];
1482 Location locbuf[TracebackMaxFrames];
1483 Slice stk;
1485 if(prof.hz == 0)
1486 return;
1488 if(mp == nil)
1489 return;
1491 // Profiling runs concurrently with GC, so it must not allocate.
1492 mp->mallocing++;
1494 traceback = true;
1496 if(mp->mcache == nil)
1497 traceback = false;
1499 n = 0;
1501 if(runtime_atomicload(&runtime_in_callers) > 0) {
1502 // If SIGPROF arrived while already fetching runtime
1503 // callers we can have trouble on older systems
1504 // because the unwind library calls dl_iterate_phdr
1505 // which was not recursive in the past.
1506 traceback = false;
1509 if(traceback) {
1510 n = runtime_callers(0, locbuf, nelem(locbuf), false);
1511 for(i = 0; i < n; i++)
1512 pcbuf[i] = locbuf[i].pc;
1514 if(!traceback || n <= 0) {
1515 n = 2;
1516 pcbuf[0] = (uintptr)runtime_getcallerpc(&n);
1517 if(mp->gcing || mp->helpgc)
1518 pcbuf[1] = (uintptr)GC;
1519 else
1520 pcbuf[1] = (uintptr)System;
1523 if (prof.hz != 0) {
1524 stk.__values = &pcbuf[0];
1525 stk.__count = n;
1526 stk.__capacity = n;
1528 // Simple cas-lock to coordinate with setcpuprofilerate.
1529 while (!runtime_cas(&prof.lock, 0, 1)) {
1530 runtime_osyield();
1532 if (prof.hz != 0) {
1533 runtime_cpuprofAdd(stk);
1535 runtime_atomicstore(&prof.lock, 0);
1538 mp->mallocing--;
1541 // Arrange to call fn with a traceback hz times a second.
1542 void
1543 runtime_setcpuprofilerate_m(int32 hz)
1545 // Force sane arguments.
1546 if(hz < 0)
1547 hz = 0;
1549 // Disable preemption, otherwise we can be rescheduled to another thread
1550 // that has profiling enabled.
1551 g->m->locks++;
1553 // Stop profiler on this thread so that it is safe to lock prof.
1554 // if a profiling signal came in while we had prof locked,
1555 // it would deadlock.
1556 runtime_resetcpuprofiler(0);
1558 while (!runtime_cas(&prof.lock, 0, 1)) {
1559 runtime_osyield();
1561 prof.hz = hz;
1562 runtime_atomicstore(&prof.lock, 0);
1564 runtime_lock(&runtime_sched->lock);
1565 runtime_sched->profilehz = hz;
1566 runtime_unlock(&runtime_sched->lock);
1568 if(hz != 0)
1569 runtime_resetcpuprofiler(hz);
1571 g->m->locks--;
1574 // Return whether we are waiting for a GC. This gc toolchain uses
1575 // preemption instead.
1576 bool
1577 runtime_gcwaiting(void)
1579 return runtime_sched->gcwaiting;
1582 // os_beforeExit is called from os.Exit(0).
1583 //go:linkname os_beforeExit os.runtime_beforeExit
1585 extern void os_beforeExit() __asm__ (GOSYM_PREFIX "os.runtime_beforeExit");
1587 void
1588 os_beforeExit()
1592 intgo NumCPU(void) __asm__ (GOSYM_PREFIX "runtime.NumCPU");
1594 intgo
1595 NumCPU()
1597 return (intgo)(runtime_ncpu);