1 // Copyright 2009 The Go Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style
3 // license that can be found in the LICENSE file.
14 #ifdef HAVE_DL_ITERATE_PHDR
24 #ifdef USING_SPLIT_STACK
26 /* FIXME: These are not declared anywhere. */
28 extern void __splitstack_getcontext(void *context
[10]);
30 extern void __splitstack_setcontext(void *context
[10]);
32 extern void *__splitstack_makecontext(size_t, void *context
[10], size_t *);
34 extern void * __splitstack_resetcontext(void *context
[10], size_t *);
36 extern void *__splitstack_find(void *, void *, size_t *, void **, void **,
39 extern void __splitstack_block_signals (int *, int *);
41 extern void __splitstack_block_signals_context (void *context
[10], int *,
46 #ifndef PTHREAD_STACK_MIN
47 # define PTHREAD_STACK_MIN 8192
50 #if defined(USING_SPLIT_STACK) && defined(LINKER_SUPPORTS_SPLIT_STACK)
51 # define StackMin PTHREAD_STACK_MIN
53 # define StackMin ((sizeof(char *) < 8) ? 2 * 1024 * 1024 : 4 * 1024 * 1024)
56 uintptr runtime_stacks_sys
;
58 static void gtraceback(G
*);
66 #ifndef SETCONTEXT_CLOBBERS_TLS
74 fixcontext(ucontext_t
*c
__attribute__ ((unused
)))
80 # if defined(__x86_64__) && defined(__sun__)
82 // x86_64 Solaris 10 and 11 have a bug: setcontext switches the %fs
83 // register to that of the thread which called getcontext. The effect
84 // is that the address of all __thread variables changes. This bug
85 // also affects pthread_self() and pthread_getspecific. We work
86 // around it by clobbering the context field directly to keep %fs the
89 static __thread greg_t fs
;
97 fs
= c
.uc_mcontext
.gregs
[REG_FSBASE
];
101 fixcontext(ucontext_t
* c
)
103 c
->uc_mcontext
.gregs
[REG_FSBASE
] = fs
;
106 # elif defined(__NetBSD__)
108 // NetBSD has a bug: setcontext clobbers tlsbase, we need to save
109 // and restore it ourselves.
111 static __thread __greg_t tlsbase
;
119 tlsbase
= c
.uc_mcontext
._mc_tlsbase
;
123 fixcontext(ucontext_t
* c
)
125 c
->uc_mcontext
._mc_tlsbase
= tlsbase
;
128 # elif defined(__sparc__)
136 fixcontext(ucontext_t
*c
)
139 register unsigned long thread __asm__("%g7");
140 c->uc_mcontext.gregs[REG_G7] = thread;
142 error: variable ‘thread’ might be clobbered by \
143 ‘longjmp’ or ‘vfork’ [-Werror=clobbered]
144 which ought to be false, as %g7 is a fixed register. */
146 if (sizeof (c
->uc_mcontext
.gregs
[REG_G7
]) == 8)
147 asm ("stx %%g7, %0" : "=m"(c
->uc_mcontext
.gregs
[REG_G7
]));
149 asm ("st %%g7, %0" : "=m"(c
->uc_mcontext
.gregs
[REG_G7
]));
154 # error unknown case for SETCONTEXT_CLOBBERS_TLS
160 // ucontext_arg returns a properly aligned ucontext_t value. On some
161 // systems a ucontext_t value must be aligned to a 16-byte boundary.
162 // The g structure that has fields of type ucontext_t is defined in
163 // Go, and Go has no simple way to align a field to such a boundary.
164 // So we make the field larger in runtime2.go and pick an appropriate
165 // offset within the field here.
167 ucontext_arg(void** go_ucontext
)
169 uintptr_t p
= (uintptr_t)go_ucontext
;
170 size_t align
= __alignof__(ucontext_t
);
172 // We only ensured space for up to a 16 byte alignment
173 // in libgo/go/runtime/runtime2.go.
174 runtime_throw("required alignment of ucontext_t too large");
176 p
= (p
+ align
- 1) &~ (uintptr_t)(align
- 1);
177 return (ucontext_t
*)p
;
180 // We can not always refer to the TLS variables directly. The
181 // compiler will call tls_get_addr to get the address of the variable,
182 // and it may hold it in a register across a call to schedule. When
183 // we get back from the call we may be running in a different thread,
184 // in which case the register now points to the TLS variable for a
185 // different thread. We use non-inlinable functions to avoid this
188 G
* runtime_g(void) __attribute__ ((noinline
, no_split_stack
));
196 M
* runtime_m(void) __attribute__ ((noinline
, no_split_stack
));
213 // Start a new thread.
215 runtime_newosproc(M
*mp
)
223 if(pthread_attr_init(&attr
) != 0)
224 runtime_throw("pthread_attr_init");
225 if(pthread_attr_setdetachstate(&attr
, PTHREAD_CREATE_DETACHED
) != 0)
226 runtime_throw("pthread_attr_setdetachstate");
228 // Block signals during pthread_create so that the new thread
229 // starts with signals disabled. It will enable them in minit.
233 // Blocking SIGTRAP reportedly breaks gdb on Alpha GNU/Linux.
234 sigdelset(&clear
, SIGTRAP
);
238 pthread_sigmask(SIG_BLOCK
, &clear
, &old
);
240 for (tries
= 0; tries
< 20; tries
++) {
241 ret
= pthread_create(&tid
, &attr
, runtime_mstart
, mp
);
245 runtime_usleep((tries
+ 1) * 1000); // Milliseconds.
248 pthread_sigmask(SIG_SETMASK
, &old
, nil
);
251 runtime_printf("pthread_create failed: %d\n", ret
);
252 runtime_throw("pthread_create");
256 // First function run by a new goroutine. This replaces gogocall.
263 if(g
->traceback
!= nil
)
266 fn
= (void (*)(void*))(g
->entry
);
274 // Switch context to a different goroutine. This is like longjmp.
275 void runtime_gogo(G
*) __attribute__ ((noinline
));
277 runtime_gogo(G
* newg
)
279 #ifdef USING_SPLIT_STACK
280 __splitstack_setcontext(&newg
->stackcontext
[0]);
283 newg
->fromgogo
= true;
284 fixcontext(ucontext_arg(&newg
->context
[0]));
285 setcontext(ucontext_arg(&newg
->context
[0]));
286 runtime_throw("gogo setcontext returned");
289 // Save context and call fn passing g as a parameter. This is like
290 // setjmp. Because getcontext always returns 0, unlike setjmp, we use
291 // g->fromgogo as a code. It will be true if we got here via
292 // setcontext. g == nil the first time this is called in a new m.
293 void runtime_mcall(void (*)(G
*)) __attribute__ ((noinline
));
295 runtime_mcall(void (*pfn
)(G
*))
299 #ifndef USING_SPLIT_STACK
303 // Ensure that all registers are on the stack for the garbage
305 __builtin_unwind_init();
310 runtime_throw("runtime: mcall called on m->g0 stack");
314 #ifdef USING_SPLIT_STACK
315 __splitstack_getcontext(&g
->stackcontext
[0]);
317 // We have to point to an address on the stack that is
318 // below the saved registers.
319 gp
->gcnextsp
= &afterregs
;
321 gp
->fromgogo
= false;
322 getcontext(ucontext_arg(&gp
->context
[0]));
324 // When we return from getcontext, we may be running
325 // in a new thread. That means that g may have
326 // changed. It is a global variables so we will
327 // reload it, but the address of g may be cached in
328 // our local stack frame, and that address may be
329 // wrong. Call the function to reload the value for
334 if(gp
->traceback
!= nil
)
337 if (gp
== nil
|| !gp
->fromgogo
) {
338 #ifdef USING_SPLIT_STACK
339 __splitstack_setcontext(&mp
->g0
->stackcontext
[0]);
341 mp
->g0
->entry
= (byte
*)pfn
;
344 // It's OK to set g directly here because this case
345 // can not occur if we got here via a setcontext to
346 // the getcontext call just above.
349 fixcontext(ucontext_arg(&mp
->g0
->context
[0]));
350 setcontext(ucontext_arg(&mp
->g0
->context
[0]));
351 runtime_throw("runtime: mcall function returned");
355 // Goroutine scheduler
356 // The scheduler's job is to distribute ready-to-run goroutines over worker threads.
358 // The main concepts are:
360 // M - worker thread, or machine.
361 // P - processor, a resource that is required to execute Go code.
362 // M must have an associated P to execute Go code, however it can be
363 // blocked or in a syscall w/o an associated P.
365 // Design doc at http://golang.org/s/go11sched.
369 // Number of goroutine ids to grab from runtime_sched->goidgen to local per-P cache at once.
370 // 16 seems to provide enough amortization, but other than that it's mostly arbitrary number.
374 extern Sched
* runtime_getsched() __asm__ (GOSYM_PREFIX
"runtime.getsched");
375 extern bool* runtime_getCgoHasExtraM()
376 __asm__ (GOSYM_PREFIX
"runtime.getCgoHasExtraM");
377 extern P
** runtime_getAllP()
378 __asm__ (GOSYM_PREFIX
"runtime.getAllP");
379 extern G
* allocg(void)
380 __asm__ (GOSYM_PREFIX
"runtime.allocg");
381 extern bool needaddgcproc(void)
382 __asm__ (GOSYM_PREFIX
"runtime.needaddgcproc");
383 extern void startm(P
*, bool)
384 __asm__(GOSYM_PREFIX
"runtime.startm");
385 extern void newm(void(*)(void), P
*)
386 __asm__(GOSYM_PREFIX
"runtime.newm");
388 Sched
* runtime_sched
;
390 G runtime_g0
; // idle goroutine for m0
395 bool runtime_precisestack
;
397 bool runtime_isarchive
;
399 void* runtime_mstart(void*);
400 static void exitsyscall0(G
*);
401 static void park0(G
*);
402 static void goexit0(G
*);
403 static bool exitsyscallfast(void);
405 extern void setncpu(int32
)
406 __asm__(GOSYM_PREFIX
"runtime.setncpu");
407 extern void setpagesize(uintptr_t)
408 __asm__(GOSYM_PREFIX
"runtime.setpagesize");
409 extern void allgadd(G
*)
410 __asm__(GOSYM_PREFIX
"runtime.allgadd");
411 extern void mcommoninit(M
*)
412 __asm__(GOSYM_PREFIX
"runtime.mcommoninit");
413 extern void stopm(void)
414 __asm__(GOSYM_PREFIX
"runtime.stopm");
415 extern void handoffp(P
*)
416 __asm__(GOSYM_PREFIX
"runtime.handoffp");
417 extern void wakep(void)
418 __asm__(GOSYM_PREFIX
"runtime.wakep");
419 extern void stoplockedm(void)
420 __asm__(GOSYM_PREFIX
"runtime.stoplockedm");
421 extern void schedule(void)
422 __asm__(GOSYM_PREFIX
"runtime.schedule");
423 extern void execute(G
*, bool)
424 __asm__(GOSYM_PREFIX
"runtime.execute");
425 extern void gfput(P
*, G
*)
426 __asm__(GOSYM_PREFIX
"runtime.gfput");
428 __asm__(GOSYM_PREFIX
"runtime.gfget");
429 extern void procresize(int32
)
430 __asm__(GOSYM_PREFIX
"runtime.procresize");
431 extern void acquirep(P
*)
432 __asm__(GOSYM_PREFIX
"runtime.acquirep");
433 extern P
* releasep(void)
434 __asm__(GOSYM_PREFIX
"runtime.releasep");
435 extern void incidlelocked(int32
)
436 __asm__(GOSYM_PREFIX
"runtime.incidlelocked");
437 extern void checkdead(void)
438 __asm__(GOSYM_PREFIX
"runtime.checkdead");
439 extern void sysmon(void)
440 __asm__(GOSYM_PREFIX
"runtime.sysmon");
442 __asm__(GOSYM_PREFIX
"runtime.mput");
444 __asm__(GOSYM_PREFIX
"runtime.mget");
445 extern void globrunqput(G
*)
446 __asm__(GOSYM_PREFIX
"runtime.globrunqput");
447 extern P
* pidleget(void)
448 __asm__(GOSYM_PREFIX
"runtime.pidleget");
449 extern bool runqempty(P
*)
450 __asm__(GOSYM_PREFIX
"runtime.runqempty");
451 extern void runqput(P
*, G
*, bool)
452 __asm__(GOSYM_PREFIX
"runtime.runqput");
454 bool runtime_isstarted
;
456 // The bootstrap sequence is:
460 // make & queue new G
461 // call runtime_mstart
463 // The new G calls runtime_main.
465 runtime_schedinit(void)
473 setncpu(runtime_ncpu
);
474 setpagesize(getpagesize());
475 runtime_sched
= runtime_getsched();
485 runtime_sched
->maxmcount
= 10000;
486 runtime_precisestack
= 0;
488 // runtime_symtabinit();
489 runtime_mallocinit();
491 runtime_alginit(); // maps must not be used before this call
493 // Initialize the itable value for newErrorCString,
494 // so that the next time it gets called, possibly
495 // in a fault during a garbage collection, it will not
496 // need to allocated memory.
497 runtime_newErrorCString(0, &i
);
499 // Initialize the cached gotraceback value, since
500 // gotraceback calls getenv, which mallocs on Plan 9.
501 runtime_gotraceback(nil
);
505 runtime_parsedebugvars();
507 runtime_sched
->lastpoll
= runtime_nanotime();
509 s
= runtime_getenv("GOMAXPROCS");
511 if(p
!= nil
&& (n
= runtime_atoi(p
, s
.len
)) > 0) {
512 if(n
> _MaxGomaxprocs
)
516 runtime_allp
= runtime_getAllP();
519 // Can not enable GC until all roots are registered.
520 // mstats()->enablegc = 1;
523 extern void main_init(void) __asm__ (GOSYM_PREFIX
"__go_init_main");
524 extern void main_main(void) __asm__ (GOSYM_PREFIX
"main.main");
526 // Used to determine the field alignment.
535 initDone(void *arg
__attribute__ ((unused
))) {
536 runtime_unlockOSThread();
539 // The main goroutine.
540 // Note: C frames in general are not copyable during stack growth, for two reasons:
541 // 1) We don't know where in a frame to find pointers to other stack locations.
542 // 2) There's no guarantee that globals or heap values do not point into the frame.
544 // The C frame for runtime.main is copyable, because:
545 // 1) There are no pointers to other stack locations in the frame
546 // (d.fn points at a global, d.link is nil, d.argp is -1).
547 // 2) The only pointer into this frame is from the defer chain,
548 // which is explicitly handled during stack copying.
550 runtime_main(void* dummy
__attribute__((unused
)))
557 // Lock the main goroutine onto this, the main OS thread,
558 // during initialization. Most programs won't care, but a few
559 // do require certain calls to be made by the main thread.
560 // Those can arrange for main.main to run in the main thread
561 // by calling runtime.LockOSThread during initialization
562 // to preserve the lock.
563 runtime_lockOSThread();
565 // Defer unlock so that runtime.Goexit during init does the unlock too.
566 d
.pfn
= (uintptr
)(void*)initDone
;
569 d
._panic
= g
->_panic
;
571 d
.makefunccanrecover
= 0;
576 if(g
->m
!= &runtime_m0
)
577 runtime_throw("runtime_main not on m0");
578 __go_go(runtime_MHeap_Scavenger
, nil
);
582 _cgo_notify_runtime_init_done();
588 if(g
->_defer
!= &d
|| (void*)d
.pfn
!= initDone
)
589 runtime_throw("runtime: bad defer entry after init");
591 runtime_unlockOSThread();
593 // For gccgo we have to wait until after main is initialized
594 // to enable GC, because initializing main registers the GC
596 mstats()->enablegc
= 1;
598 if(runtime_isarchive
) {
599 // This is not a complete program, but is instead a
600 // library built using -buildmode=c-archive or
601 // c-shared. Now that we are initialized, there is
602 // nothing further to do.
608 // Make racy client program work: if panicking on
609 // another goroutine at the same time as main returns,
610 // let the other goroutine finish printing the panic trace.
611 // Once it does, it will exit. See issue 3934.
612 if(runtime_panicking())
613 runtime_park(nil
, nil
, "panicwait");
620 void getTraceback(G
*, G
*) __asm__(GOSYM_PREFIX
"runtime.getTraceback");
622 // getTraceback stores a traceback of gp in the g's traceback field
623 // and then returns to me. We expect that gp's traceback is not nil.
624 // It works by saving me's current context, and checking gp's traceback field.
625 // If gp's traceback field is not nil, it starts running gp.
626 // In places where we call getcontext, we check the traceback field.
627 // If it is not nil, we collect a traceback, and then return to the
628 // goroutine stored in the traceback field, which is me.
629 void getTraceback(G
* me
, G
* gp
)
631 #ifdef USING_SPLIT_STACK
632 __splitstack_getcontext(&me
->stackcontext
[0]);
634 getcontext(ucontext_arg(&me
->context
[0]));
636 if (gp
->traceback
!= nil
) {
641 // Do a stack trace of gp, and then restore the context to
647 Traceback
* traceback
;
649 traceback
= gp
->traceback
;
652 runtime_throw("gtraceback: m is not nil");
653 gp
->m
= traceback
->gp
->m
;
654 traceback
->c
= runtime_callers(1, traceback
->locbuf
,
655 sizeof traceback
->locbuf
/ sizeof traceback
->locbuf
[0], false);
657 runtime_gogo(traceback
->gp
);
660 // Called to start an M.
662 runtime_mstart(void* mp
)
677 // Record top of stack for use by mcall.
678 // Once we call schedule we're never coming back,
679 // so other calls can reuse this stack space.
680 #ifdef USING_SPLIT_STACK
681 __splitstack_getcontext(&g
->stackcontext
[0]);
683 gp
->gcinitialsp
= &mp
;
684 // Setting gcstacksize to 0 is a marker meaning that gcinitialsp
685 // is the top of the stack, not the bottom.
689 getcontext(ucontext_arg(&gp
->context
[0]));
691 if(gp
->traceback
!= nil
)
694 if(gp
->entry
!= nil
) {
695 // Got here from mcall.
696 void (*pfn
)(G
*) = (void (*)(G
*))gp
->entry
;
697 G
* gp1
= (G
*)gp
->param
;
705 #ifdef USING_SPLIT_STACK
707 int dont_block_signals
= 0;
708 __splitstack_block_signals(&dont_block_signals
, nil
);
712 // Install signal handlers; after minit so that minit can
713 // prepare the thread to be able to handle the signals.
714 if(m
== &runtime_m0
) {
716 bool* cgoHasExtraM
= runtime_getCgoHasExtraM();
718 *cgoHasExtraM
= true;
722 runtime_initsig(false);
726 ((void (*)(void))m
->mstartfn
)();
731 } else if(m
!= &runtime_m0
) {
732 acquirep((P
*)m
->nextp
);
737 // TODO(brainman): This point is never reached, because scheduler
738 // does not release os threads at the moment. But once this path
739 // is enabled, we must remove our seh here.
744 typedef struct CgoThreadStart CgoThreadStart
;
745 struct CgoThreadStart
753 M
* runtime_allocm(P
*, bool, byte
**, uintptr
*)
754 __asm__(GOSYM_PREFIX
"runtime.allocm");
756 // Allocate a new m unassociated with any thread.
757 // Can use p for allocation context if needed.
759 runtime_allocm(P
*p
, bool allocatestack
, byte
** ret_g0_stack
, uintptr
* ret_g0_stacksize
)
763 g
->m
->locks
++; // disable GC because it can be called from sysmon
765 acquirep(p
); // temporarily borrow p for mallocs in this function
769 runtime_gc_m_ptr(&e
);
770 mtype
= ((const PtrType
*)e
.__type_descriptor
)->__element_type
;
774 mp
= runtime_mal(sizeof *mp
);
776 mp
->g0
= runtime_malg(allocatestack
, false, ret_g0_stack
, ret_g0_stacksize
);
786 void setGContext(void) __asm__ (GOSYM_PREFIX
"runtime.setGContext");
788 // setGContext sets up a new goroutine context for the current g.
799 #ifdef USING_SPLIT_STACK
800 __splitstack_getcontext(&gp
->stackcontext
[0]);
802 __splitstack_block_signals(&val
, nil
);
804 gp
->gcinitialsp
= &val
;
809 getcontext(ucontext_arg(&gp
->context
[0]));
811 if(gp
->entry
!= nil
) {
812 // Got here from mcall.
813 void (*pfn
)(G
*) = (void (*)(G
*))gp
->entry
;
814 G
* gp1
= (G
*)gp
->param
;
822 void makeGContext(G
*, byte
*, uintptr
)
823 __asm__(GOSYM_PREFIX
"runtime.makeGContext");
825 // makeGContext makes a new context for a g.
827 makeGContext(G
* gp
, byte
* sp
, uintptr spsize
) {
830 uc
= ucontext_arg(&gp
->context
[0]);
832 uc
->uc_stack
.ss_sp
= sp
;
833 uc
->uc_stack
.ss_size
= (size_t)spsize
;
834 makecontext(uc
, kickoff
, 0);
837 // Create a new m. It will start off with a call to fn, or else the scheduler.
839 newm(void(*fn
)(void), P
*p
)
843 mp
= runtime_allocm(p
, false, nil
, nil
);
844 mp
->nextp
= (uintptr
)p
;
845 mp
->mstartfn
= (uintptr
)(void*)fn
;
847 runtime_newosproc(mp
);
853 g
->m
->spinning
= true;
856 // Schedules some M to run the p (creates an M if necessary).
857 // If p==nil, tries to get an idle P, if no idle P's does nothing.
859 startm(P
*p
, bool spinning
)
864 runtime_lock(&runtime_sched
->lock
);
868 runtime_unlock(&runtime_sched
->lock
);
870 runtime_xadd(&runtime_sched
->nmspinning
, -1);
875 runtime_unlock(&runtime_sched
->lock
);
884 runtime_throw("startm: m is spinning");
886 runtime_throw("startm: m has p");
887 if(spinning
&& !runqempty(p
)) {
888 runtime_throw("startm: p has runnable gs");
890 mp
->spinning
= spinning
;
891 mp
->nextp
= (uintptr
)p
;
892 runtime_notewakeup(&mp
->park
);
895 // Puts the current goroutine into a waiting state and calls unlockf.
896 // If unlockf returns false, the goroutine is resumed.
898 runtime_park(bool(*unlockf
)(G
*, void*), void *lock
, const char *reason
)
900 if(g
->atomicstatus
!= _Grunning
)
901 runtime_throw("bad g status");
902 g
->m
->waitlock
= lock
;
903 g
->m
->waitunlockf
= unlockf
;
904 g
->waitreason
= runtime_gostringnocopy((const byte
*)reason
);
905 runtime_mcall(park0
);
908 void gopark(FuncVal
*, void *, String
, byte
, int)
909 __asm__ ("runtime.gopark");
912 gopark(FuncVal
*unlockf
, void *lock
, String reason
,
913 byte traceEv
__attribute__ ((unused
)),
914 int traceskip
__attribute__ ((unused
)))
916 if(g
->atomicstatus
!= _Grunning
)
917 runtime_throw("bad g status");
918 g
->m
->waitlock
= lock
;
919 g
->m
->waitunlockf
= unlockf
== nil
? nil
: (void*)unlockf
->fn
;
920 g
->waitreason
= reason
;
921 runtime_mcall(park0
);
925 parkunlock(G
*gp
, void *lock
)
928 runtime_unlock(lock
);
932 // Puts the current goroutine into a waiting state and unlocks the lock.
933 // The goroutine can be made runnable again by calling runtime_ready(gp).
935 runtime_parkunlock(Lock
*lock
, const char *reason
)
937 runtime_park(parkunlock
, lock
, reason
);
940 void goparkunlock(Lock
*, String
, byte
, int)
941 __asm__ (GOSYM_PREFIX
"runtime.goparkunlock");
944 goparkunlock(Lock
*lock
, String reason
, byte traceEv
__attribute__ ((unused
)),
945 int traceskip
__attribute__ ((unused
)))
947 if(g
->atomicstatus
!= _Grunning
)
948 runtime_throw("bad g status");
949 g
->m
->waitlock
= lock
;
950 g
->m
->waitunlockf
= parkunlock
;
951 g
->waitreason
= reason
;
952 runtime_mcall(park0
);
955 // runtime_park continuation on g0.
963 gp
->atomicstatus
= _Gwaiting
;
967 ok
= ((bool (*)(G
*, void*))m
->waitunlockf
)(gp
, m
->waitlock
);
968 m
->waitunlockf
= nil
;
971 gp
->atomicstatus
= _Grunnable
;
972 execute(gp
, true); // Schedule it back, never returns.
977 execute(gp
, true); // Never returns.
984 runtime_gosched(void)
986 if(g
->atomicstatus
!= _Grunning
)
987 runtime_throw("bad g status");
988 runtime_mcall(runtime_gosched0
);
991 // runtime_gosched continuation on g0.
993 runtime_gosched0(G
*gp
)
998 gp
->atomicstatus
= _Grunnable
;
1001 runtime_lock(&runtime_sched
->lock
);
1003 runtime_unlock(&runtime_sched
->lock
);
1006 execute(gp
, true); // Never returns.
1011 // Finishes execution of the current goroutine.
1012 // Need to mark it as nosplit, because it runs with sp > stackbase (as runtime_lessstack).
1013 // Since it does not return it does not matter. But if it is preempted
1014 // at the split stack check, GC will complain about inconsistent sp.
1015 void runtime_goexit1(void) __attribute__ ((noinline
));
1017 runtime_goexit1(void)
1019 if(g
->atomicstatus
!= _Grunning
)
1020 runtime_throw("bad g status");
1021 runtime_mcall(goexit0
);
1024 // runtime_goexit1 continuation on g0.
1031 gp
->atomicstatus
= _Gdead
;
1035 gp
->paniconfault
= 0;
1036 gp
->_defer
= nil
; // should be true already but just in case.
1037 gp
->_panic
= nil
; // non-nil for Goexit during panic. points at stack-allocated data.
1038 gp
->writebuf
.__values
= nil
;
1039 gp
->writebuf
.__count
= 0;
1040 gp
->writebuf
.__capacity
= 0;
1041 gp
->waitreason
= runtime_gostringnocopy(nil
);
1046 if(m
->locked
& ~_LockExternal
) {
1047 runtime_printf("invalid m->locked = %d\n", m
->locked
);
1048 runtime_throw("internal lockOSThread error");
1051 gfput((P
*)m
->p
, gp
);
1055 // The goroutine g is about to enter a system call.
1056 // Record that it's not using the cpu anymore.
1057 // This is called only from the go syscall library and cgocall,
1058 // not from the low-level system calls used by the runtime.
1060 // Entersyscall cannot split the stack: the runtime_gosave must
1061 // make g->sched refer to the caller's stack segment, because
1062 // entersyscall is going to return immediately after.
1064 void runtime_entersyscall(int32
) __attribute__ ((no_split_stack
));
1065 static void doentersyscall(uintptr
, uintptr
)
1066 __attribute__ ((no_split_stack
, noinline
));
1069 runtime_entersyscall(int32 dummy
__attribute__ ((unused
)))
1071 // Save the registers in the g structure so that any pointers
1072 // held in registers will be seen by the garbage collector.
1073 getcontext(ucontext_arg(&g
->gcregs
[0]));
1075 // Do the work in a separate function, so that this function
1076 // doesn't save any registers on its own stack. If this
1077 // function does save any registers, we might store the wrong
1078 // value in the call to getcontext.
1080 // FIXME: This assumes that we do not need to save any
1081 // callee-saved registers to access the TLS variable g. We
1082 // don't want to put the ucontext_t on the stack because it is
1083 // large and we can not split the stack here.
1084 doentersyscall((uintptr
)runtime_getcallerpc(&dummy
),
1085 (uintptr
)runtime_getcallersp(&dummy
));
1089 doentersyscall(uintptr pc
, uintptr sp
)
1091 // Disable preemption because during this function g is in _Gsyscall status,
1092 // but can have inconsistent g->sched, do not let GC observe it.
1095 // Leave SP around for GC and traceback.
1096 #ifdef USING_SPLIT_STACK
1099 g
->gcstack
= __splitstack_find(nil
, nil
, &gcstacksize
,
1100 &g
->gcnextsegment
, &g
->gcnextsp
,
1102 g
->gcstacksize
= (uintptr
)gcstacksize
;
1108 g
->gcnextsp
= (byte
*) &v
;
1115 g
->atomicstatus
= _Gsyscall
;
1117 if(runtime_atomicload(&runtime_sched
->sysmonwait
)) { // TODO: fast atomic
1118 runtime_lock(&runtime_sched
->lock
);
1119 if(runtime_atomicload(&runtime_sched
->sysmonwait
)) {
1120 runtime_atomicstore(&runtime_sched
->sysmonwait
, 0);
1121 runtime_notewakeup(&runtime_sched
->sysmonnote
);
1123 runtime_unlock(&runtime_sched
->lock
);
1127 ((P
*)(g
->m
->p
))->m
= 0;
1128 runtime_atomicstore(&((P
*)g
->m
->p
)->status
, _Psyscall
);
1129 if(runtime_atomicload(&runtime_sched
->gcwaiting
)) {
1130 runtime_lock(&runtime_sched
->lock
);
1131 if (runtime_sched
->stopwait
> 0 && runtime_cas(&((P
*)g
->m
->p
)->status
, _Psyscall
, _Pgcstop
)) {
1132 if(--runtime_sched
->stopwait
== 0)
1133 runtime_notewakeup(&runtime_sched
->stopnote
);
1135 runtime_unlock(&runtime_sched
->lock
);
1141 // The same as runtime_entersyscall(), but with a hint that the syscall is blocking.
1143 runtime_entersyscallblock(int32 dummy
__attribute__ ((unused
)))
1147 g
->m
->locks
++; // see comment in entersyscall
1149 // Leave SP around for GC and traceback.
1150 #ifdef USING_SPLIT_STACK
1153 g
->gcstack
= __splitstack_find(nil
, nil
, &gcstacksize
,
1154 &g
->gcnextsegment
, &g
->gcnextsp
,
1156 g
->gcstacksize
= (uintptr
)gcstacksize
;
1159 g
->gcnextsp
= (byte
*) &p
;
1162 // Save the registers in the g structure so that any pointers
1163 // held in registers will be seen by the garbage collector.
1164 getcontext(ucontext_arg(&g
->gcregs
[0]));
1166 g
->syscallpc
= (uintptr
)runtime_getcallerpc(&dummy
);
1167 g
->syscallsp
= (uintptr
)runtime_getcallersp(&dummy
);
1169 g
->atomicstatus
= _Gsyscall
;
1173 if(g
->isbackground
) // do not consider blocked scavenger for deadlock detection
1179 // The goroutine g exited its system call.
1180 // Arrange for it to run on a cpu again.
1181 // This is called only from the go syscall library, not
1182 // from the low-level system calls used by the runtime.
1184 runtime_exitsyscall(int32 dummy
__attribute__ ((unused
)))
1189 gp
->m
->locks
++; // see comment in entersyscall
1191 if(gp
->isbackground
) // do not consider blocked scavenger for deadlock detection
1195 if(exitsyscallfast()) {
1196 // There's a cpu for us, so we can run.
1197 ((P
*)gp
->m
->p
)->syscalltick
++;
1198 gp
->atomicstatus
= _Grunning
;
1199 // Garbage collector isn't running (since we are),
1200 // so okay to clear gcstack and gcsp.
1201 #ifdef USING_SPLIT_STACK
1205 runtime_memclr(&gp
->gcregs
[0], sizeof gp
->gcregs
);
1213 // Call the scheduler.
1214 runtime_mcall(exitsyscall0
);
1216 // Scheduler returned, so we're allowed to run now.
1217 // Delete the gcstack information that we left for
1218 // the garbage collector during the system call.
1219 // Must wait until now because until gosched returns
1220 // we don't know for sure that the garbage collector
1222 #ifdef USING_SPLIT_STACK
1226 runtime_memclr(&gp
->gcregs
[0], sizeof gp
->gcregs
);
1230 // Note that this gp->m might be different than the earlier
1231 // gp->m after returning from runtime_mcall.
1232 ((P
*)gp
->m
->p
)->syscalltick
++;
1236 exitsyscallfast(void)
1243 // Freezetheworld sets stopwait but does not retake P's.
1244 if(runtime_sched
->stopwait
) {
1249 // Try to re-acquire the last P.
1250 if(gp
->m
->p
&& ((P
*)gp
->m
->p
)->status
== _Psyscall
&& runtime_cas(&((P
*)gp
->m
->p
)->status
, _Psyscall
, _Prunning
)) {
1251 // There's a cpu for us, so we can run.
1252 gp
->m
->mcache
= ((P
*)gp
->m
->p
)->mcache
;
1253 ((P
*)gp
->m
->p
)->m
= (uintptr
)gp
->m
;
1256 // Try to get any other idle P.
1258 if(runtime_sched
->pidle
) {
1259 runtime_lock(&runtime_sched
->lock
);
1261 if(p
&& runtime_atomicload(&runtime_sched
->sysmonwait
)) {
1262 runtime_atomicstore(&runtime_sched
->sysmonwait
, 0);
1263 runtime_notewakeup(&runtime_sched
->sysmonnote
);
1265 runtime_unlock(&runtime_sched
->lock
);
1274 // runtime_exitsyscall slow path on g0.
1275 // Failed to acquire P, enqueue gp as runnable.
1283 gp
->atomicstatus
= _Grunnable
;
1286 runtime_lock(&runtime_sched
->lock
);
1290 else if(runtime_atomicload(&runtime_sched
->sysmonwait
)) {
1291 runtime_atomicstore(&runtime_sched
->sysmonwait
, 0);
1292 runtime_notewakeup(&runtime_sched
->sysmonnote
);
1294 runtime_unlock(&runtime_sched
->lock
);
1297 execute(gp
, false); // Never returns.
1300 // Wait until another thread schedules gp and so m again.
1302 execute(gp
, false); // Never returns.
1305 schedule(); // Never returns.
1308 void syscall_entersyscall(void)
1309 __asm__(GOSYM_PREFIX
"syscall.Entersyscall");
1311 void syscall_entersyscall(void) __attribute__ ((no_split_stack
));
1314 syscall_entersyscall()
1316 runtime_entersyscall(0);
1319 void syscall_exitsyscall(void)
1320 __asm__(GOSYM_PREFIX
"syscall.Exitsyscall");
1322 void syscall_exitsyscall(void) __attribute__ ((no_split_stack
));
1325 syscall_exitsyscall()
1327 runtime_exitsyscall(0);
1330 // Allocate a new g, with a stack big enough for stacksize bytes.
1332 runtime_malg(bool allocatestack
, bool signalstack
, byte
** ret_stack
, uintptr
* ret_stacksize
)
1337 uintptr unused_stacksize
;
1338 #if USING_SPLIT_STACK
1339 int dont_block_signals
= 0;
1340 size_t ss_stacksize
;
1343 if (ret_stack
== nil
) {
1344 ret_stack
= &unused_stack
;
1346 if (ret_stacksize
== nil
) {
1347 ret_stacksize
= &unused_stacksize
;
1351 stacksize
= StackMin
;
1353 stacksize
= 32 * 1024; // OS X wants >= 8K, GNU/Linux >= 2K
1355 if(stacksize
< SIGSTKSZ
)
1356 stacksize
= SIGSTKSZ
;
1360 #if USING_SPLIT_STACK
1361 *ret_stack
= __splitstack_makecontext(stacksize
,
1362 &newg
->stackcontext
[0],
1364 *ret_stacksize
= (uintptr
)ss_stacksize
;
1365 __splitstack_block_signals_context(&newg
->stackcontext
[0],
1366 &dont_block_signals
, nil
);
1368 // In 64-bit mode, the maximum Go allocation space is
1369 // 128G. Our stack size is 4M, which only permits 32K
1370 // goroutines. In order to not limit ourselves,
1371 // allocate the stacks out of separate memory. In
1372 // 32-bit mode, the Go allocation space is all of
1374 if(sizeof(void*) == 8) {
1375 void *p
= runtime_SysAlloc(stacksize
, &mstats()->other_sys
);
1377 runtime_throw("runtime: cannot allocate memory for goroutine stack");
1378 *ret_stack
= (byte
*)p
;
1380 *ret_stack
= runtime_mallocgc(stacksize
, 0, FlagNoProfiling
|FlagNoGC
);
1381 runtime_xadd(&runtime_stacks_sys
, stacksize
);
1383 *ret_stacksize
= (uintptr
)stacksize
;
1384 newg
->gcinitialsp
= *ret_stack
;
1385 newg
->gcstacksize
= (uintptr
)stacksize
;
1392 __go_go(void (*fn
)(void*), void* arg
)
1399 //runtime_printf("newproc1 %p %p narg=%d nret=%d\n", fn->fn, argp, narg, nret);
1401 g
->m
->throwing
= -1; // do not dump full stacks
1402 runtime_throw("go of nil func value");
1404 g
->m
->locks
++; // disable preemption because it can be holding p in a local var
1407 if((newg
= gfget(p
)) != nil
) {
1408 #ifdef USING_SPLIT_STACK
1409 int dont_block_signals
= 0;
1411 sp
= __splitstack_resetcontext(&newg
->stackcontext
[0],
1413 __splitstack_block_signals_context(&newg
->stackcontext
[0],
1414 &dont_block_signals
, nil
);
1416 sp
= newg
->gcinitialsp
;
1417 spsize
= newg
->gcstacksize
;
1419 runtime_throw("bad spsize in __go_go");
1420 newg
->gcnextsp
= sp
;
1422 newg
->traceback
= nil
;
1426 newg
= runtime_malg(true, false, &sp
, &malsize
);
1427 spsize
= (size_t)malsize
;
1428 newg
->atomicstatus
= _Gdead
;
1432 newg
->entry
= (byte
*)fn
;
1434 newg
->gopc
= (uintptr
)__builtin_return_address(0);
1435 newg
->atomicstatus
= _Grunnable
;
1436 if(p
->goidcache
== p
->goidcacheend
) {
1437 p
->goidcache
= runtime_xadd64(&runtime_sched
->goidgen
, GoidCacheBatch
);
1438 p
->goidcacheend
= p
->goidcache
+ GoidCacheBatch
;
1440 newg
->goid
= p
->goidcache
++;
1442 makeGContext(newg
, sp
, (uintptr
)spsize
);
1444 runqput(p
, newg
, true);
1446 if(runtime_atomicload(&runtime_sched
->npidle
) != 0 && runtime_atomicload(&runtime_sched
->nmspinning
) == 0 && fn
!= runtime_main
) // TODO: fast atomic
1453 runtime_Breakpoint(void)
1455 runtime_breakpoint();
1458 void runtime_Gosched (void) __asm__ (GOSYM_PREFIX
"runtime.Gosched");
1461 runtime_Gosched(void)
1471 static void System(void) {}
1472 static void GC(void) {}
1474 // Called if we receive a SIGPROF signal.
1481 uintptr pcbuf
[TracebackMaxFrames
];
1482 Location locbuf
[TracebackMaxFrames
];
1491 // Profiling runs concurrently with GC, so it must not allocate.
1496 if(mp
->mcache
== nil
)
1501 if(runtime_atomicload(&runtime_in_callers
) > 0) {
1502 // If SIGPROF arrived while already fetching runtime
1503 // callers we can have trouble on older systems
1504 // because the unwind library calls dl_iterate_phdr
1505 // which was not recursive in the past.
1510 n
= runtime_callers(0, locbuf
, nelem(locbuf
), false);
1511 for(i
= 0; i
< n
; i
++)
1512 pcbuf
[i
] = locbuf
[i
].pc
;
1514 if(!traceback
|| n
<= 0) {
1516 pcbuf
[0] = (uintptr
)runtime_getcallerpc(&n
);
1517 if(mp
->gcing
|| mp
->helpgc
)
1518 pcbuf
[1] = (uintptr
)GC
;
1520 pcbuf
[1] = (uintptr
)System
;
1524 stk
.__values
= &pcbuf
[0];
1528 // Simple cas-lock to coordinate with setcpuprofilerate.
1529 while (!runtime_cas(&prof
.lock
, 0, 1)) {
1533 runtime_cpuprofAdd(stk
);
1535 runtime_atomicstore(&prof
.lock
, 0);
1541 // Arrange to call fn with a traceback hz times a second.
1543 runtime_setcpuprofilerate_m(int32 hz
)
1545 // Force sane arguments.
1549 // Disable preemption, otherwise we can be rescheduled to another thread
1550 // that has profiling enabled.
1553 // Stop profiler on this thread so that it is safe to lock prof.
1554 // if a profiling signal came in while we had prof locked,
1555 // it would deadlock.
1556 runtime_resetcpuprofiler(0);
1558 while (!runtime_cas(&prof
.lock
, 0, 1)) {
1562 runtime_atomicstore(&prof
.lock
, 0);
1564 runtime_lock(&runtime_sched
->lock
);
1565 runtime_sched
->profilehz
= hz
;
1566 runtime_unlock(&runtime_sched
->lock
);
1569 runtime_resetcpuprofiler(hz
);
1574 // Return whether we are waiting for a GC. This gc toolchain uses
1575 // preemption instead.
1577 runtime_gcwaiting(void)
1579 return runtime_sched
->gcwaiting
;
1582 // os_beforeExit is called from os.Exit(0).
1583 //go:linkname os_beforeExit os.runtime_beforeExit
1585 extern void os_beforeExit() __asm__ (GOSYM_PREFIX
"os.runtime_beforeExit");
1592 intgo
NumCPU(void) __asm__ (GOSYM_PREFIX
"runtime.NumCPU");
1597 return (intgo
)(runtime_ncpu
);