compiler, runtime: call gcWriteBarrier instead of writebarrierptr
[official-gcc.git] / libgo / go / runtime / proc.go
blob4fc45dd4f64c59d31ac3c7765260832c79cdbf08
1 // Copyright 2014 The Go Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style
3 // license that can be found in the LICENSE file.
5 package runtime
7 import (
8 "runtime/internal/atomic"
9 "runtime/internal/sys"
10 "unsafe"
13 // Functions called by C code.
14 //go:linkname main runtime.main
15 //go:linkname goparkunlock runtime.goparkunlock
16 //go:linkname newextram runtime.newextram
17 //go:linkname acquirep runtime.acquirep
18 //go:linkname releasep runtime.releasep
19 //go:linkname incidlelocked runtime.incidlelocked
20 //go:linkname schedinit runtime.schedinit
21 //go:linkname ready runtime.ready
22 //go:linkname gcprocs runtime.gcprocs
23 //go:linkname stopm runtime.stopm
24 //go:linkname handoffp runtime.handoffp
25 //go:linkname wakep runtime.wakep
26 //go:linkname stoplockedm runtime.stoplockedm
27 //go:linkname schedule runtime.schedule
28 //go:linkname execute runtime.execute
29 //go:linkname goexit1 runtime.goexit1
30 //go:linkname reentersyscall runtime.reentersyscall
31 //go:linkname reentersyscallblock runtime.reentersyscallblock
32 //go:linkname exitsyscall runtime.exitsyscall
33 //go:linkname gfget runtime.gfget
34 //go:linkname helpgc runtime.helpgc
35 //go:linkname kickoff runtime.kickoff
36 //go:linkname mstart1 runtime.mstart1
37 //go:linkname mexit runtime.mexit
38 //go:linkname globrunqput runtime.globrunqput
39 //go:linkname pidleget runtime.pidleget
41 // Exported for test (see runtime/testdata/testprogcgo/dropm_stub.go).
42 //go:linkname getm runtime.getm
44 // Function called by misc/cgo/test.
45 //go:linkname lockedOSThread runtime.lockedOSThread
47 // C functions for thread and context management.
48 func newosproc(*m)
50 //go:noescape
51 func malg(bool, bool, *unsafe.Pointer, *uintptr) *g
53 //go:noescape
54 func resetNewG(*g, *unsafe.Pointer, *uintptr)
55 func gogo(*g)
56 func setGContext()
57 func makeGContext(*g, unsafe.Pointer, uintptr)
58 func getTraceback(me, gp *g)
59 func gtraceback(*g)
60 func _cgo_notify_runtime_init_done()
61 func alreadyInCallers() bool
62 func stackfree(*g)
64 // Functions created by the compiler.
65 //extern __go_init_main
66 func main_init()
68 //extern main.main
69 func main_main()
71 var buildVersion = sys.TheVersion
73 // Goroutine scheduler
74 // The scheduler's job is to distribute ready-to-run goroutines over worker threads.
76 // The main concepts are:
77 // G - goroutine.
78 // M - worker thread, or machine.
79 // P - processor, a resource that is required to execute Go code.
80 // M must have an associated P to execute Go code, however it can be
81 // blocked or in a syscall w/o an associated P.
83 // Design doc at https://golang.org/s/go11sched.
85 // Worker thread parking/unparking.
86 // We need to balance between keeping enough running worker threads to utilize
87 // available hardware parallelism and parking excessive running worker threads
88 // to conserve CPU resources and power. This is not simple for two reasons:
89 // (1) scheduler state is intentionally distributed (in particular, per-P work
90 // queues), so it is not possible to compute global predicates on fast paths;
91 // (2) for optimal thread management we would need to know the future (don't park
92 // a worker thread when a new goroutine will be readied in near future).
94 // Three rejected approaches that would work badly:
95 // 1. Centralize all scheduler state (would inhibit scalability).
96 // 2. Direct goroutine handoff. That is, when we ready a new goroutine and there
97 // is a spare P, unpark a thread and handoff it the thread and the goroutine.
98 // This would lead to thread state thrashing, as the thread that readied the
99 // goroutine can be out of work the very next moment, we will need to park it.
100 // Also, it would destroy locality of computation as we want to preserve
101 // dependent goroutines on the same thread; and introduce additional latency.
102 // 3. Unpark an additional thread whenever we ready a goroutine and there is an
103 // idle P, but don't do handoff. This would lead to excessive thread parking/
104 // unparking as the additional threads will instantly park without discovering
105 // any work to do.
107 // The current approach:
108 // We unpark an additional thread when we ready a goroutine if (1) there is an
109 // idle P and there are no "spinning" worker threads. A worker thread is considered
110 // spinning if it is out of local work and did not find work in global run queue/
111 // netpoller; the spinning state is denoted in m.spinning and in sched.nmspinning.
112 // Threads unparked this way are also considered spinning; we don't do goroutine
113 // handoff so such threads are out of work initially. Spinning threads do some
114 // spinning looking for work in per-P run queues before parking. If a spinning
115 // thread finds work it takes itself out of the spinning state and proceeds to
116 // execution. If it does not find work it takes itself out of the spinning state
117 // and then parks.
118 // If there is at least one spinning thread (sched.nmspinning>1), we don't unpark
119 // new threads when readying goroutines. To compensate for that, if the last spinning
120 // thread finds work and stops spinning, it must unpark a new spinning thread.
121 // This approach smooths out unjustified spikes of thread unparking,
122 // but at the same time guarantees eventual maximal CPU parallelism utilization.
124 // The main implementation complication is that we need to be very careful during
125 // spinning->non-spinning thread transition. This transition can race with submission
126 // of a new goroutine, and either one part or another needs to unpark another worker
127 // thread. If they both fail to do that, we can end up with semi-persistent CPU
128 // underutilization. The general pattern for goroutine readying is: submit a goroutine
129 // to local work queue, #StoreLoad-style memory barrier, check sched.nmspinning.
130 // The general pattern for spinning->non-spinning transition is: decrement nmspinning,
131 // #StoreLoad-style memory barrier, check all per-P work queues for new work.
132 // Note that all this complexity does not apply to global run queue as we are not
133 // sloppy about thread unparking when submitting to global queue. Also see comments
134 // for nmspinning manipulation.
136 var (
137 m0 m
138 g0 g
141 // main_init_done is a signal used by cgocallbackg that initialization
142 // has been completed. It is made before _cgo_notify_runtime_init_done,
143 // so all cgo calls can rely on it existing. When main_init is complete,
144 // it is closed, meaning cgocallbackg can reliably receive from it.
145 var main_init_done chan bool
147 // mainStarted indicates that the main M has started.
148 var mainStarted bool
150 // runtimeInitTime is the nanotime() at which the runtime started.
151 var runtimeInitTime int64
153 // Value to use for signal mask for newly created M's.
154 var initSigmask sigset
156 // The main goroutine.
157 func main() {
158 g := getg()
160 // Max stack size is 1 GB on 64-bit, 250 MB on 32-bit.
161 // Using decimal instead of binary GB and MB because
162 // they look nicer in the stack overflow failure message.
163 if sys.PtrSize == 8 {
164 maxstacksize = 1000000000
165 } else {
166 maxstacksize = 250000000
169 // Allow newproc to start new Ms.
170 mainStarted = true
172 systemstack(func() {
173 newm(sysmon, nil)
176 // Lock the main goroutine onto this, the main OS thread,
177 // during initialization. Most programs won't care, but a few
178 // do require certain calls to be made by the main thread.
179 // Those can arrange for main.main to run in the main thread
180 // by calling runtime.LockOSThread during initialization
181 // to preserve the lock.
182 lockOSThread()
184 if g.m != &m0 {
185 throw("runtime.main not on m0")
188 // Defer unlock so that runtime.Goexit during init does the unlock too.
189 needUnlock := true
190 defer func() {
191 if needUnlock {
192 unlockOSThread()
196 // Record when the world started. Must be after runtime_init
197 // because nanotime on some platforms depends on startNano.
198 runtimeInitTime = nanotime()
200 main_init_done = make(chan bool)
201 if iscgo {
202 // Start the template thread in case we enter Go from
203 // a C-created thread and need to create a new thread.
204 startTemplateThread()
205 _cgo_notify_runtime_init_done()
208 fn := main_init // make an indirect call, as the linker doesn't know the address of the main package when laying down the runtime
209 fn()
210 createGcRootsIndex()
211 close(main_init_done)
213 needUnlock = false
214 unlockOSThread()
216 // For gccgo we have to wait until after main is initialized
217 // to enable GC, because initializing main registers the GC roots.
218 gcenable()
220 if isarchive || islibrary {
221 // A program compiled with -buildmode=c-archive or c-shared
222 // has a main, but it is not executed.
223 return
225 fn = main_main // make an indirect call, as the linker doesn't know the address of the main package when laying down the runtime
226 fn()
227 if raceenabled {
228 racefini()
231 // Make racy client program work: if panicking on
232 // another goroutine at the same time as main returns,
233 // let the other goroutine finish printing the panic trace.
234 // Once it does, it will exit. See issues 3934 and 20018.
235 if atomic.Load(&runningPanicDefers) != 0 {
236 // Running deferred functions should not take long.
237 for c := 0; c < 1000; c++ {
238 if atomic.Load(&runningPanicDefers) == 0 {
239 break
241 Gosched()
244 if atomic.Load(&panicking) != 0 {
245 gopark(nil, nil, "panicwait", traceEvGoStop, 1)
248 exit(0)
249 for {
250 var x *int32
251 *x = 0
255 // os_beforeExit is called from os.Exit(0).
256 //go:linkname os_beforeExit os.runtime_beforeExit
257 func os_beforeExit() {
258 if raceenabled {
259 racefini()
263 // start forcegc helper goroutine
264 func init() {
265 expectSystemGoroutine()
266 go forcegchelper()
269 func forcegchelper() {
270 setSystemGoroutine()
272 forcegc.g = getg()
273 for {
274 lock(&forcegc.lock)
275 if forcegc.idle != 0 {
276 throw("forcegc: phase error")
278 atomic.Store(&forcegc.idle, 1)
279 goparkunlock(&forcegc.lock, "force gc (idle)", traceEvGoBlock, 1)
280 // this goroutine is explicitly resumed by sysmon
281 if debug.gctrace > 0 {
282 println("GC forced")
284 // Time-triggered, fully concurrent.
285 gcStart(gcBackgroundMode, gcTrigger{kind: gcTriggerTime, now: nanotime()})
289 //go:nosplit
291 // Gosched yields the processor, allowing other goroutines to run. It does not
292 // suspend the current goroutine, so execution resumes automatically.
293 func Gosched() {
294 mcall(gosched_m)
297 // goschedguarded yields the processor like gosched, but also checks
298 // for forbidden states and opts out of the yield in those cases.
299 //go:nosplit
300 func goschedguarded() {
301 mcall(goschedguarded_m)
304 // Puts the current goroutine into a waiting state and calls unlockf.
305 // If unlockf returns false, the goroutine is resumed.
306 // unlockf must not access this G's stack, as it may be moved between
307 // the call to gopark and the call to unlockf.
308 func gopark(unlockf func(*g, unsafe.Pointer) bool, lock unsafe.Pointer, reason string, traceEv byte, traceskip int) {
309 mp := acquirem()
310 gp := mp.curg
311 status := readgstatus(gp)
312 if status != _Grunning && status != _Gscanrunning {
313 throw("gopark: bad g status")
315 mp.waitlock = lock
316 mp.waitunlockf = *(*unsafe.Pointer)(unsafe.Pointer(&unlockf))
317 gp.waitreason = reason
318 mp.waittraceev = traceEv
319 mp.waittraceskip = traceskip
320 releasem(mp)
321 // can't do anything that might move the G between Ms here.
322 mcall(park_m)
325 // Puts the current goroutine into a waiting state and unlocks the lock.
326 // The goroutine can be made runnable again by calling goready(gp).
327 func goparkunlock(lock *mutex, reason string, traceEv byte, traceskip int) {
328 gopark(parkunlock_c, unsafe.Pointer(lock), reason, traceEv, traceskip)
331 func goready(gp *g, traceskip int) {
332 systemstack(func() {
333 ready(gp, traceskip, true)
337 //go:nosplit
338 func acquireSudog() *sudog {
339 // Delicate dance: the semaphore implementation calls
340 // acquireSudog, acquireSudog calls new(sudog),
341 // new calls malloc, malloc can call the garbage collector,
342 // and the garbage collector calls the semaphore implementation
343 // in stopTheWorld.
344 // Break the cycle by doing acquirem/releasem around new(sudog).
345 // The acquirem/releasem increments m.locks during new(sudog),
346 // which keeps the garbage collector from being invoked.
347 mp := acquirem()
348 pp := mp.p.ptr()
349 if len(pp.sudogcache) == 0 {
350 lock(&sched.sudoglock)
351 // First, try to grab a batch from central cache.
352 for len(pp.sudogcache) < cap(pp.sudogcache)/2 && sched.sudogcache != nil {
353 s := sched.sudogcache
354 sched.sudogcache = s.next
355 s.next = nil
356 pp.sudogcache = append(pp.sudogcache, s)
358 unlock(&sched.sudoglock)
359 // If the central cache is empty, allocate a new one.
360 if len(pp.sudogcache) == 0 {
361 pp.sudogcache = append(pp.sudogcache, new(sudog))
364 n := len(pp.sudogcache)
365 s := pp.sudogcache[n-1]
366 pp.sudogcache[n-1] = nil
367 pp.sudogcache = pp.sudogcache[:n-1]
368 if s.elem != nil {
369 throw("acquireSudog: found s.elem != nil in cache")
371 releasem(mp)
372 return s
375 //go:nosplit
376 func releaseSudog(s *sudog) {
377 if s.elem != nil {
378 throw("runtime: sudog with non-nil elem")
380 if s.isSelect {
381 throw("runtime: sudog with non-false isSelect")
383 if s.next != nil {
384 throw("runtime: sudog with non-nil next")
386 if s.prev != nil {
387 throw("runtime: sudog with non-nil prev")
389 if s.waitlink != nil {
390 throw("runtime: sudog with non-nil waitlink")
392 if s.c != nil {
393 throw("runtime: sudog with non-nil c")
395 gp := getg()
396 if gp.param != nil {
397 throw("runtime: releaseSudog with non-nil gp.param")
399 mp := acquirem() // avoid rescheduling to another P
400 pp := mp.p.ptr()
401 if len(pp.sudogcache) == cap(pp.sudogcache) {
402 // Transfer half of local cache to the central cache.
403 var first, last *sudog
404 for len(pp.sudogcache) > cap(pp.sudogcache)/2 {
405 n := len(pp.sudogcache)
406 p := pp.sudogcache[n-1]
407 pp.sudogcache[n-1] = nil
408 pp.sudogcache = pp.sudogcache[:n-1]
409 if first == nil {
410 first = p
411 } else {
412 last.next = p
414 last = p
416 lock(&sched.sudoglock)
417 last.next = sched.sudogcache
418 sched.sudogcache = first
419 unlock(&sched.sudoglock)
421 pp.sudogcache = append(pp.sudogcache, s)
422 releasem(mp)
425 // funcPC returns the entry PC of the function f.
426 // It assumes that f is a func value. Otherwise the behavior is undefined.
427 // CAREFUL: In programs with plugins, funcPC can return different values
428 // for the same function (because there are actually multiple copies of
429 // the same function in the address space). To be safe, don't use the
430 // results of this function in any == expression. It is only safe to
431 // use the result as an address at which to start executing code.
433 // For gccgo note that this differs from the gc implementation; the gc
434 // implementation adds sys.PtrSize to the address of the interface
435 // value, but GCC's alias analysis decides that that can not be a
436 // reference to the second field of the interface, and in some cases
437 // it drops the initialization of the second field as a dead store.
438 //go:nosplit
439 func funcPC(f interface{}) uintptr {
440 i := (*iface)(unsafe.Pointer(&f))
441 return **(**uintptr)(i.data)
444 func lockedOSThread() bool {
445 gp := getg()
446 return gp.lockedm != 0 && gp.m.lockedg != 0
449 var (
450 allgs []*g
451 allglock mutex
454 func allgadd(gp *g) {
455 if readgstatus(gp) == _Gidle {
456 throw("allgadd: bad status Gidle")
459 lock(&allglock)
460 allgs = append(allgs, gp)
461 allglen = uintptr(len(allgs))
462 unlock(&allglock)
465 const (
466 // Number of goroutine ids to grab from sched.goidgen to local per-P cache at once.
467 // 16 seems to provide enough amortization, but other than that it's mostly arbitrary number.
468 _GoidCacheBatch = 16
471 // The bootstrap sequence is:
473 // call osinit
474 // call schedinit
475 // make & queue new G
476 // call runtime·mstart
478 // The new G calls runtime·main.
479 func schedinit() {
480 _m_ := &m0
481 _g_ := &g0
482 _m_.g0 = _g_
483 _m_.curg = _g_
484 _g_.m = _m_
485 setg(_g_)
487 sched.maxmcount = 10000
489 mallocinit()
490 mcommoninit(_g_.m)
491 alginit() // maps must not be used before this call
493 msigsave(_g_.m)
494 initSigmask = _g_.m.sigmask
496 goargs()
497 goenvs()
498 parsedebugvars()
499 gcinit()
501 sched.lastpoll = uint64(nanotime())
502 procs := ncpu
503 if n, ok := atoi32(gogetenv("GOMAXPROCS")); ok && n > 0 {
504 procs = n
506 if procresize(procs) != nil {
507 throw("unknown runnable goroutine during bootstrap")
510 // For cgocheck > 1, we turn on the write barrier at all times
511 // and check all pointer writes. We can't do this until after
512 // procresize because the write barrier needs a P.
513 if debug.cgocheck > 1 {
514 writeBarrier.cgo = true
515 writeBarrier.enabled = true
516 for _, p := range allp {
517 p.wbBuf.reset()
521 if buildVersion == "" {
522 // Condition should never trigger. This code just serves
523 // to ensure runtime·buildVersion is kept in the resulting binary.
524 buildVersion = "unknown"
528 func dumpgstatus(gp *g) {
529 _g_ := getg()
530 print("runtime: gp: gp=", gp, ", goid=", gp.goid, ", gp->atomicstatus=", readgstatus(gp), "\n")
531 print("runtime: g: g=", _g_, ", goid=", _g_.goid, ", g->atomicstatus=", readgstatus(_g_), "\n")
534 func checkmcount() {
535 // sched lock is held
536 if mcount() > sched.maxmcount {
537 print("runtime: program exceeds ", sched.maxmcount, "-thread limit\n")
538 throw("thread exhaustion")
542 func mcommoninit(mp *m) {
543 _g_ := getg()
545 // g0 stack won't make sense for user (and is not necessary unwindable).
546 if _g_ != _g_.m.g0 {
547 callers(1, mp.createstack[:])
550 lock(&sched.lock)
551 if sched.mnext+1 < sched.mnext {
552 throw("runtime: thread ID overflow")
554 mp.id = sched.mnext
555 sched.mnext++
556 checkmcount()
558 mp.fastrand[0] = 1597334677 * uint32(mp.id)
559 mp.fastrand[1] = uint32(cputicks())
560 if mp.fastrand[0]|mp.fastrand[1] == 0 {
561 mp.fastrand[1] = 1
564 mpreinit(mp)
566 // Add to allm so garbage collector doesn't free g->m
567 // when it is just in a register or thread-local storage.
568 mp.alllink = allm
570 // NumCgoCall() iterates over allm w/o schedlock,
571 // so we need to publish it safely.
572 atomicstorep(unsafe.Pointer(&allm), unsafe.Pointer(mp))
573 unlock(&sched.lock)
576 // Mark gp ready to run.
577 func ready(gp *g, traceskip int, next bool) {
578 if trace.enabled {
579 traceGoUnpark(gp, traceskip)
582 status := readgstatus(gp)
584 // Mark runnable.
585 _g_ := getg()
586 _g_.m.locks++ // disable preemption because it can be holding p in a local var
587 if status&^_Gscan != _Gwaiting {
588 dumpgstatus(gp)
589 throw("bad g->status in ready")
592 // status is Gwaiting or Gscanwaiting, make Grunnable and put on runq
593 casgstatus(gp, _Gwaiting, _Grunnable)
594 runqput(_g_.m.p.ptr(), gp, next)
595 if atomic.Load(&sched.npidle) != 0 && atomic.Load(&sched.nmspinning) == 0 {
596 wakep()
598 _g_.m.locks--
601 func gcprocs() int32 {
602 // Figure out how many CPUs to use during GC.
603 // Limited by gomaxprocs, number of actual CPUs, and MaxGcproc.
604 lock(&sched.lock)
605 n := gomaxprocs
606 if n > ncpu {
607 n = ncpu
609 if n > _MaxGcproc {
610 n = _MaxGcproc
612 if n > sched.nmidle+1 { // one M is currently running
613 n = sched.nmidle + 1
615 unlock(&sched.lock)
616 return n
619 func needaddgcproc() bool {
620 lock(&sched.lock)
621 n := gomaxprocs
622 if n > ncpu {
623 n = ncpu
625 if n > _MaxGcproc {
626 n = _MaxGcproc
628 n -= sched.nmidle + 1 // one M is currently running
629 unlock(&sched.lock)
630 return n > 0
633 func helpgc(nproc int32) {
634 _g_ := getg()
635 lock(&sched.lock)
636 pos := 0
637 for n := int32(1); n < nproc; n++ { // one M is currently running
638 if allp[pos].mcache == _g_.m.mcache {
639 pos++
641 mp := mget()
642 if mp == nil {
643 throw("gcprocs inconsistency")
645 mp.helpgc = n
646 mp.p.set(allp[pos])
647 mp.mcache = allp[pos].mcache
648 pos++
649 notewakeup(&mp.park)
651 unlock(&sched.lock)
654 // freezeStopWait is a large value that freezetheworld sets
655 // sched.stopwait to in order to request that all Gs permanently stop.
656 const freezeStopWait = 0x7fffffff
658 // freezing is set to non-zero if the runtime is trying to freeze the
659 // world.
660 var freezing uint32
662 // Similar to stopTheWorld but best-effort and can be called several times.
663 // There is no reverse operation, used during crashing.
664 // This function must not lock any mutexes.
665 func freezetheworld() {
666 atomic.Store(&freezing, 1)
667 // stopwait and preemption requests can be lost
668 // due to races with concurrently executing threads,
669 // so try several times
670 for i := 0; i < 5; i++ {
671 // this should tell the scheduler to not start any new goroutines
672 sched.stopwait = freezeStopWait
673 atomic.Store(&sched.gcwaiting, 1)
674 // this should stop running goroutines
675 if !preemptall() {
676 break // no running goroutines
678 usleep(1000)
680 // to be sure
681 usleep(1000)
682 preemptall()
683 usleep(1000)
686 func isscanstatus(status uint32) bool {
687 if status == _Gscan {
688 throw("isscanstatus: Bad status Gscan")
690 return status&_Gscan == _Gscan
693 // All reads and writes of g's status go through readgstatus, casgstatus
694 // castogscanstatus, casfrom_Gscanstatus.
695 //go:nosplit
696 func readgstatus(gp *g) uint32 {
697 return atomic.Load(&gp.atomicstatus)
700 // Ownership of gcscanvalid:
702 // If gp is running (meaning status == _Grunning or _Grunning|_Gscan),
703 // then gp owns gp.gcscanvalid, and other goroutines must not modify it.
705 // Otherwise, a second goroutine can lock the scan state by setting _Gscan
706 // in the status bit and then modify gcscanvalid, and then unlock the scan state.
708 // Note that the first condition implies an exception to the second:
709 // if a second goroutine changes gp's status to _Grunning|_Gscan,
710 // that second goroutine still does not have the right to modify gcscanvalid.
712 // The Gscanstatuses are acting like locks and this releases them.
713 // If it proves to be a performance hit we should be able to make these
714 // simple atomic stores but for now we are going to throw if
715 // we see an inconsistent state.
716 func casfrom_Gscanstatus(gp *g, oldval, newval uint32) {
717 success := false
719 // Check that transition is valid.
720 switch oldval {
721 default:
722 print("runtime: casfrom_Gscanstatus bad oldval gp=", gp, ", oldval=", hex(oldval), ", newval=", hex(newval), "\n")
723 dumpgstatus(gp)
724 throw("casfrom_Gscanstatus:top gp->status is not in scan state")
725 case _Gscanrunnable,
726 _Gscanwaiting,
727 _Gscanrunning,
728 _Gscansyscall:
729 if newval == oldval&^_Gscan {
730 success = atomic.Cas(&gp.atomicstatus, oldval, newval)
733 if !success {
734 print("runtime: casfrom_Gscanstatus failed gp=", gp, ", oldval=", hex(oldval), ", newval=", hex(newval), "\n")
735 dumpgstatus(gp)
736 throw("casfrom_Gscanstatus: gp->status is not in scan state")
740 // This will return false if the gp is not in the expected status and the cas fails.
741 // This acts like a lock acquire while the casfromgstatus acts like a lock release.
742 func castogscanstatus(gp *g, oldval, newval uint32) bool {
743 switch oldval {
744 case _Grunnable,
745 _Grunning,
746 _Gwaiting,
747 _Gsyscall:
748 if newval == oldval|_Gscan {
749 return atomic.Cas(&gp.atomicstatus, oldval, newval)
752 print("runtime: castogscanstatus oldval=", hex(oldval), " newval=", hex(newval), "\n")
753 throw("castogscanstatus")
754 panic("not reached")
757 // If asked to move to or from a Gscanstatus this will throw. Use the castogscanstatus
758 // and casfrom_Gscanstatus instead.
759 // casgstatus will loop if the g->atomicstatus is in a Gscan status until the routine that
760 // put it in the Gscan state is finished.
761 //go:nosplit
762 func casgstatus(gp *g, oldval, newval uint32) {
763 if (oldval&_Gscan != 0) || (newval&_Gscan != 0) || oldval == newval {
764 systemstack(func() {
765 print("runtime: casgstatus: oldval=", hex(oldval), " newval=", hex(newval), "\n")
766 throw("casgstatus: bad incoming values")
770 if oldval == _Grunning && gp.gcscanvalid {
771 // If oldvall == _Grunning, then the actual status must be
772 // _Grunning or _Grunning|_Gscan; either way,
773 // we own gp.gcscanvalid, so it's safe to read.
774 // gp.gcscanvalid must not be true when we are running.
775 systemstack(func() {
776 print("runtime: casgstatus ", hex(oldval), "->", hex(newval), " gp.status=", hex(gp.atomicstatus), " gp.gcscanvalid=true\n")
777 throw("casgstatus")
781 // See http://golang.org/cl/21503 for justification of the yield delay.
782 const yieldDelay = 5 * 1000
783 var nextYield int64
785 // loop if gp->atomicstatus is in a scan state giving
786 // GC time to finish and change the state to oldval.
787 for i := 0; !atomic.Cas(&gp.atomicstatus, oldval, newval); i++ {
788 if oldval == _Gwaiting && gp.atomicstatus == _Grunnable {
789 systemstack(func() {
790 throw("casgstatus: waiting for Gwaiting but is Grunnable")
793 // Help GC if needed.
794 // if gp.preemptscan && !gp.gcworkdone && (oldval == _Grunning || oldval == _Gsyscall) {
795 // gp.preemptscan = false
796 // systemstack(func() {
797 // gcphasework(gp)
798 // })
799 // }
800 // But meanwhile just yield.
801 if i == 0 {
802 nextYield = nanotime() + yieldDelay
804 if nanotime() < nextYield {
805 for x := 0; x < 10 && gp.atomicstatus != oldval; x++ {
806 procyield(1)
808 } else {
809 osyield()
810 nextYield = nanotime() + yieldDelay/2
813 if newval == _Grunning {
814 gp.gcscanvalid = false
818 // scang blocks until gp's stack has been scanned.
819 // It might be scanned by scang or it might be scanned by the goroutine itself.
820 // Either way, the stack scan has completed when scang returns.
821 func scang(gp *g, gcw *gcWork) {
822 // Invariant; we (the caller, markroot for a specific goroutine) own gp.gcscandone.
823 // Nothing is racing with us now, but gcscandone might be set to true left over
824 // from an earlier round of stack scanning (we scan twice per GC).
825 // We use gcscandone to record whether the scan has been done during this round.
827 gp.gcscandone = false
829 // See http://golang.org/cl/21503 for justification of the yield delay.
830 const yieldDelay = 10 * 1000
831 var nextYield int64
833 // Endeavor to get gcscandone set to true,
834 // either by doing the stack scan ourselves or by coercing gp to scan itself.
835 // gp.gcscandone can transition from false to true when we're not looking
836 // (if we asked for preemption), so any time we lock the status using
837 // castogscanstatus we have to double-check that the scan is still not done.
838 loop:
839 for i := 0; !gp.gcscandone; i++ {
840 switch s := readgstatus(gp); s {
841 default:
842 dumpgstatus(gp)
843 throw("stopg: invalid status")
845 case _Gdead:
846 // No stack.
847 gp.gcscandone = true
848 break loop
850 case _Gcopystack:
851 // Stack being switched. Go around again.
853 case _Grunnable, _Gsyscall, _Gwaiting:
854 // Claim goroutine by setting scan bit.
855 // Racing with execution or readying of gp.
856 // The scan bit keeps them from running
857 // the goroutine until we're done.
858 if castogscanstatus(gp, s, s|_Gscan) {
859 if gp.scanningself {
860 // Don't try to scan the stack
861 // if the goroutine is going to do
862 // it itself.
863 restartg(gp)
864 break
866 if !gp.gcscandone {
867 scanstack(gp, gcw)
868 gp.gcscandone = true
870 restartg(gp)
871 break loop
874 case _Gscanwaiting:
875 // newstack is doing a scan for us right now. Wait.
877 case _Gscanrunning:
878 // checkPreempt is scanning. Wait.
880 case _Grunning:
881 // Goroutine running. Try to preempt execution so it can scan itself.
882 // The preemption handler (in newstack) does the actual scan.
884 // Optimization: if there is already a pending preemption request
885 // (from the previous loop iteration), don't bother with the atomics.
886 if gp.preemptscan && gp.preempt {
887 break
890 // Ask for preemption and self scan.
891 if castogscanstatus(gp, _Grunning, _Gscanrunning) {
892 if !gp.gcscandone {
893 gp.preemptscan = true
894 gp.preempt = true
896 casfrom_Gscanstatus(gp, _Gscanrunning, _Grunning)
900 if i == 0 {
901 nextYield = nanotime() + yieldDelay
903 if nanotime() < nextYield {
904 procyield(10)
905 } else {
906 osyield()
907 nextYield = nanotime() + yieldDelay/2
911 gp.preemptscan = false // cancel scan request if no longer needed
914 // The GC requests that this routine be moved from a scanmumble state to a mumble state.
915 func restartg(gp *g) {
916 s := readgstatus(gp)
917 switch s {
918 default:
919 dumpgstatus(gp)
920 throw("restartg: unexpected status")
922 case _Gdead:
923 // ok
925 case _Gscanrunnable,
926 _Gscanwaiting,
927 _Gscansyscall:
928 casfrom_Gscanstatus(gp, s, s&^_Gscan)
932 // stopTheWorld stops all P's from executing goroutines, interrupting
933 // all goroutines at GC safe points and records reason as the reason
934 // for the stop. On return, only the current goroutine's P is running.
935 // stopTheWorld must not be called from a system stack and the caller
936 // must not hold worldsema. The caller must call startTheWorld when
937 // other P's should resume execution.
939 // stopTheWorld is safe for multiple goroutines to call at the
940 // same time. Each will execute its own stop, and the stops will
941 // be serialized.
943 // This is also used by routines that do stack dumps. If the system is
944 // in panic or being exited, this may not reliably stop all
945 // goroutines.
946 func stopTheWorld(reason string) {
947 semacquire(&worldsema)
948 getg().m.preemptoff = reason
949 systemstack(stopTheWorldWithSema)
952 // startTheWorld undoes the effects of stopTheWorld.
953 func startTheWorld() {
954 systemstack(func() { startTheWorldWithSema(false) })
955 // worldsema must be held over startTheWorldWithSema to ensure
956 // gomaxprocs cannot change while worldsema is held.
957 semrelease(&worldsema)
958 getg().m.preemptoff = ""
961 // Holding worldsema grants an M the right to try to stop the world
962 // and prevents gomaxprocs from changing concurrently.
963 var worldsema uint32 = 1
965 // stopTheWorldWithSema is the core implementation of stopTheWorld.
966 // The caller is responsible for acquiring worldsema and disabling
967 // preemption first and then should stopTheWorldWithSema on the system
968 // stack:
970 // semacquire(&worldsema, 0)
971 // m.preemptoff = "reason"
972 // systemstack(stopTheWorldWithSema)
974 // When finished, the caller must either call startTheWorld or undo
975 // these three operations separately:
977 // m.preemptoff = ""
978 // systemstack(startTheWorldWithSema)
979 // semrelease(&worldsema)
981 // It is allowed to acquire worldsema once and then execute multiple
982 // startTheWorldWithSema/stopTheWorldWithSema pairs.
983 // Other P's are able to execute between successive calls to
984 // startTheWorldWithSema and stopTheWorldWithSema.
985 // Holding worldsema causes any other goroutines invoking
986 // stopTheWorld to block.
987 func stopTheWorldWithSema() {
988 _g_ := getg()
990 // If we hold a lock, then we won't be able to stop another M
991 // that is blocked trying to acquire the lock.
992 if _g_.m.locks > 0 {
993 throw("stopTheWorld: holding locks")
996 lock(&sched.lock)
997 sched.stopwait = gomaxprocs
998 atomic.Store(&sched.gcwaiting, 1)
999 preemptall()
1000 // stop current P
1001 _g_.m.p.ptr().status = _Pgcstop // Pgcstop is only diagnostic.
1002 sched.stopwait--
1003 // try to retake all P's in Psyscall status
1004 for _, p := range allp {
1005 s := p.status
1006 if s == _Psyscall && atomic.Cas(&p.status, s, _Pgcstop) {
1007 if trace.enabled {
1008 traceGoSysBlock(p)
1009 traceProcStop(p)
1011 p.syscalltick++
1012 sched.stopwait--
1015 // stop idle P's
1016 for {
1017 p := pidleget()
1018 if p == nil {
1019 break
1021 p.status = _Pgcstop
1022 sched.stopwait--
1024 wait := sched.stopwait > 0
1025 unlock(&sched.lock)
1027 // wait for remaining P's to stop voluntarily
1028 if wait {
1029 for {
1030 // wait for 100us, then try to re-preempt in case of any races
1031 if notetsleep(&sched.stopnote, 100*1000) {
1032 noteclear(&sched.stopnote)
1033 break
1035 preemptall()
1039 // sanity checks
1040 bad := ""
1041 if sched.stopwait != 0 {
1042 bad = "stopTheWorld: not stopped (stopwait != 0)"
1043 } else {
1044 for _, p := range allp {
1045 if p.status != _Pgcstop {
1046 bad = "stopTheWorld: not stopped (status != _Pgcstop)"
1050 if atomic.Load(&freezing) != 0 {
1051 // Some other thread is panicking. This can cause the
1052 // sanity checks above to fail if the panic happens in
1053 // the signal handler on a stopped thread. Either way,
1054 // we should halt this thread.
1055 lock(&deadlock)
1056 lock(&deadlock)
1058 if bad != "" {
1059 throw(bad)
1063 func mhelpgc() {
1064 _g_ := getg()
1065 _g_.m.helpgc = -1
1068 func startTheWorldWithSema(emitTraceEvent bool) int64 {
1069 _g_ := getg()
1071 _g_.m.locks++ // disable preemption because it can be holding p in a local var
1072 if netpollinited() {
1073 gp := netpoll(false) // non-blocking
1074 injectglist(gp)
1076 add := needaddgcproc()
1077 lock(&sched.lock)
1079 procs := gomaxprocs
1080 if newprocs != 0 {
1081 procs = newprocs
1082 newprocs = 0
1084 p1 := procresize(procs)
1085 sched.gcwaiting = 0
1086 if sched.sysmonwait != 0 {
1087 sched.sysmonwait = 0
1088 notewakeup(&sched.sysmonnote)
1090 unlock(&sched.lock)
1092 for p1 != nil {
1093 p := p1
1094 p1 = p1.link.ptr()
1095 if p.m != 0 {
1096 mp := p.m.ptr()
1097 p.m = 0
1098 if mp.nextp != 0 {
1099 throw("startTheWorld: inconsistent mp->nextp")
1101 mp.nextp.set(p)
1102 notewakeup(&mp.park)
1103 } else {
1104 // Start M to run P. Do not start another M below.
1105 newm(nil, p)
1106 add = false
1110 // Capture start-the-world time before doing clean-up tasks.
1111 startTime := nanotime()
1112 if emitTraceEvent {
1113 traceGCSTWDone()
1116 // Wakeup an additional proc in case we have excessive runnable goroutines
1117 // in local queues or in the global queue. If we don't, the proc will park itself.
1118 // If we have lots of excessive work, resetspinning will unpark additional procs as necessary.
1119 if atomic.Load(&sched.npidle) != 0 && atomic.Load(&sched.nmspinning) == 0 {
1120 wakep()
1123 if add {
1124 // If GC could have used another helper proc, start one now,
1125 // in the hope that it will be available next time.
1126 // It would have been even better to start it before the collection,
1127 // but doing so requires allocating memory, so it's tricky to
1128 // coordinate. This lazy approach works out in practice:
1129 // we don't mind if the first couple gc rounds don't have quite
1130 // the maximum number of procs.
1131 newm(mhelpgc, nil)
1133 _g_.m.locks--
1135 return startTime
1138 // First function run by a new goroutine.
1139 // This is passed to makecontext.
1140 func kickoff() {
1141 gp := getg()
1143 if gp.traceback != 0 {
1144 gtraceback(gp)
1147 fv := gp.entry
1148 param := gp.param
1150 // When running on the g0 stack we can wind up here without a p,
1151 // for example from mcall(exitsyscall0) in exitsyscall, in
1152 // which case we can not run a write barrier.
1153 // It is also possible for us to get here from the systemstack
1154 // call in wbBufFlush, at which point the write barrier buffer
1155 // is full and we can not run a write barrier.
1156 // Setting gp.entry = nil or gp.param = nil will try to run a
1157 // write barrier, so if we are on the g0 stack due to mcall
1158 // (systemstack calls mcall) then clear the field using uintptr.
1159 // This is OK when gp.param is gp.m.curg, as curg will be kept
1160 // alive elsewhere, and gp.entry always points into g, or
1161 // to a statically allocated value, or (in the case of mcall)
1162 // to the stack.
1163 if gp == gp.m.g0 && gp.param == unsafe.Pointer(gp.m.curg) {
1164 *(*uintptr)(unsafe.Pointer(&gp.entry)) = 0
1165 *(*uintptr)(unsafe.Pointer(&gp.param)) = 0
1166 } else if gp.m.p == 0 {
1167 throw("no p in kickoff")
1168 } else {
1169 gp.entry = nil
1170 gp.param = nil
1173 fv(param)
1174 goexit1()
1177 func mstart1() {
1178 _g_ := getg()
1180 if _g_ != _g_.m.g0 {
1181 throw("bad runtime·mstart")
1184 asminit()
1186 // Install signal handlers; after minit so that minit can
1187 // prepare the thread to be able to handle the signals.
1188 // For gccgo minit was called by C code.
1189 if _g_.m == &m0 {
1190 mstartm0()
1193 if fn := _g_.m.mstartfn; fn != nil {
1194 fn()
1197 if _g_.m.helpgc != 0 {
1198 _g_.m.helpgc = 0
1199 stopm()
1200 } else if _g_.m != &m0 {
1201 acquirep(_g_.m.nextp.ptr())
1202 _g_.m.nextp = 0
1204 schedule()
1207 // mstartm0 implements part of mstart1 that only runs on the m0.
1209 // Write barriers are allowed here because we know the GC can't be
1210 // running yet, so they'll be no-ops.
1212 //go:yeswritebarrierrec
1213 func mstartm0() {
1214 // Create an extra M for callbacks on threads not created by Go.
1215 if iscgo && !cgoHasExtraM {
1216 cgoHasExtraM = true
1217 newextram()
1219 initsig(false)
1222 // mexit tears down and exits the current thread.
1224 // Don't call this directly to exit the thread, since it must run at
1225 // the top of the thread stack. Instead, use gogo(&_g_.m.g0.sched) to
1226 // unwind the stack to the point that exits the thread.
1228 // It is entered with m.p != nil, so write barriers are allowed. It
1229 // will release the P before exiting.
1231 //go:yeswritebarrierrec
1232 func mexit(osStack bool) {
1233 g := getg()
1234 m := g.m
1236 if m == &m0 {
1237 // This is the main thread. Just wedge it.
1239 // On Linux, exiting the main thread puts the process
1240 // into a non-waitable zombie state. On Plan 9,
1241 // exiting the main thread unblocks wait even though
1242 // other threads are still running. On Solaris we can
1243 // neither exitThread nor return from mstart. Other
1244 // bad things probably happen on other platforms.
1246 // We could try to clean up this M more before wedging
1247 // it, but that complicates signal handling.
1248 handoffp(releasep())
1249 lock(&sched.lock)
1250 sched.nmfreed++
1251 checkdead()
1252 unlock(&sched.lock)
1253 notesleep(&m.park)
1254 throw("locked m0 woke up")
1257 sigblock()
1258 unminit()
1260 // Free the gsignal stack.
1261 if m.gsignal != nil {
1262 stackfree(m.gsignal)
1265 // Remove m from allm.
1266 lock(&sched.lock)
1267 for pprev := &allm; *pprev != nil; pprev = &(*pprev).alllink {
1268 if *pprev == m {
1269 *pprev = m.alllink
1270 goto found
1273 throw("m not found in allm")
1274 found:
1275 if !osStack {
1276 // Delay reaping m until it's done with the stack.
1278 // If this is using an OS stack, the OS will free it
1279 // so there's no need for reaping.
1280 atomic.Store(&m.freeWait, 1)
1281 // Put m on the free list, though it will not be reaped until
1282 // freeWait is 0. Note that the free list must not be linked
1283 // through alllink because some functions walk allm without
1284 // locking, so may be using alllink.
1285 m.freelink = sched.freem
1286 sched.freem = m
1288 unlock(&sched.lock)
1290 // Release the P.
1291 handoffp(releasep())
1292 // After this point we must not have write barriers.
1294 // Invoke the deadlock detector. This must happen after
1295 // handoffp because it may have started a new M to take our
1296 // P's work.
1297 lock(&sched.lock)
1298 sched.nmfreed++
1299 checkdead()
1300 unlock(&sched.lock)
1302 if osStack {
1303 // Return from mstart and let the system thread
1304 // library free the g0 stack and terminate the thread.
1305 return
1308 // mstart is the thread's entry point, so there's nothing to
1309 // return to. Exit the thread directly. exitThread will clear
1310 // m.freeWait when it's done with the stack and the m can be
1311 // reaped.
1312 exitThread(&m.freeWait)
1315 // forEachP calls fn(p) for every P p when p reaches a GC safe point.
1316 // If a P is currently executing code, this will bring the P to a GC
1317 // safe point and execute fn on that P. If the P is not executing code
1318 // (it is idle or in a syscall), this will call fn(p) directly while
1319 // preventing the P from exiting its state. This does not ensure that
1320 // fn will run on every CPU executing Go code, but it acts as a global
1321 // memory barrier. GC uses this as a "ragged barrier."
1323 // The caller must hold worldsema.
1325 //go:systemstack
1326 func forEachP(fn func(*p)) {
1327 mp := acquirem()
1328 _p_ := getg().m.p.ptr()
1330 lock(&sched.lock)
1331 if sched.safePointWait != 0 {
1332 throw("forEachP: sched.safePointWait != 0")
1334 sched.safePointWait = gomaxprocs - 1
1335 sched.safePointFn = fn
1337 // Ask all Ps to run the safe point function.
1338 for _, p := range allp {
1339 if p != _p_ {
1340 atomic.Store(&p.runSafePointFn, 1)
1343 preemptall()
1345 // Any P entering _Pidle or _Psyscall from now on will observe
1346 // p.runSafePointFn == 1 and will call runSafePointFn when
1347 // changing its status to _Pidle/_Psyscall.
1349 // Run safe point function for all idle Ps. sched.pidle will
1350 // not change because we hold sched.lock.
1351 for p := sched.pidle.ptr(); p != nil; p = p.link.ptr() {
1352 if atomic.Cas(&p.runSafePointFn, 1, 0) {
1353 fn(p)
1354 sched.safePointWait--
1358 wait := sched.safePointWait > 0
1359 unlock(&sched.lock)
1361 // Run fn for the current P.
1362 fn(_p_)
1364 // Force Ps currently in _Psyscall into _Pidle and hand them
1365 // off to induce safe point function execution.
1366 for _, p := range allp {
1367 s := p.status
1368 if s == _Psyscall && p.runSafePointFn == 1 && atomic.Cas(&p.status, s, _Pidle) {
1369 if trace.enabled {
1370 traceGoSysBlock(p)
1371 traceProcStop(p)
1373 p.syscalltick++
1374 handoffp(p)
1378 // Wait for remaining Ps to run fn.
1379 if wait {
1380 for {
1381 // Wait for 100us, then try to re-preempt in
1382 // case of any races.
1384 // Requires system stack.
1385 if notetsleep(&sched.safePointNote, 100*1000) {
1386 noteclear(&sched.safePointNote)
1387 break
1389 preemptall()
1392 if sched.safePointWait != 0 {
1393 throw("forEachP: not done")
1395 for _, p := range allp {
1396 if p.runSafePointFn != 0 {
1397 throw("forEachP: P did not run fn")
1401 lock(&sched.lock)
1402 sched.safePointFn = nil
1403 unlock(&sched.lock)
1404 releasem(mp)
1407 // runSafePointFn runs the safe point function, if any, for this P.
1408 // This should be called like
1410 // if getg().m.p.runSafePointFn != 0 {
1411 // runSafePointFn()
1412 // }
1414 // runSafePointFn must be checked on any transition in to _Pidle or
1415 // _Psyscall to avoid a race where forEachP sees that the P is running
1416 // just before the P goes into _Pidle/_Psyscall and neither forEachP
1417 // nor the P run the safe-point function.
1418 func runSafePointFn() {
1419 p := getg().m.p.ptr()
1420 // Resolve the race between forEachP running the safe-point
1421 // function on this P's behalf and this P running the
1422 // safe-point function directly.
1423 if !atomic.Cas(&p.runSafePointFn, 1, 0) {
1424 return
1426 sched.safePointFn(p)
1427 lock(&sched.lock)
1428 sched.safePointWait--
1429 if sched.safePointWait == 0 {
1430 notewakeup(&sched.safePointNote)
1432 unlock(&sched.lock)
1435 // Allocate a new m unassociated with any thread.
1436 // Can use p for allocation context if needed.
1437 // fn is recorded as the new m's m.mstartfn.
1439 // This function is allowed to have write barriers even if the caller
1440 // isn't because it borrows _p_.
1442 //go:yeswritebarrierrec
1443 func allocm(_p_ *p, fn func(), allocatestack bool) (mp *m, g0Stack unsafe.Pointer, g0StackSize uintptr) {
1444 _g_ := getg()
1445 _g_.m.locks++ // disable GC because it can be called from sysmon
1446 if _g_.m.p == 0 {
1447 acquirep(_p_) // temporarily borrow p for mallocs in this function
1450 // Release the free M list. We need to do this somewhere and
1451 // this may free up a stack we can use.
1452 if sched.freem != nil {
1453 lock(&sched.lock)
1454 var newList *m
1455 for freem := sched.freem; freem != nil; {
1456 if freem.freeWait != 0 {
1457 next := freem.freelink
1458 freem.freelink = newList
1459 newList = freem
1460 freem = next
1461 continue
1463 stackfree(freem.g0)
1464 freem = freem.freelink
1466 sched.freem = newList
1467 unlock(&sched.lock)
1470 mp = new(m)
1471 mp.mstartfn = fn
1472 mcommoninit(mp)
1474 mp.g0 = malg(allocatestack, false, &g0Stack, &g0StackSize)
1475 mp.g0.m = mp
1477 if _p_ == _g_.m.p.ptr() {
1478 releasep()
1480 _g_.m.locks--
1482 return mp, g0Stack, g0StackSize
1485 // needm is called when a cgo callback happens on a
1486 // thread without an m (a thread not created by Go).
1487 // In this case, needm is expected to find an m to use
1488 // and return with m, g initialized correctly.
1489 // Since m and g are not set now (likely nil, but see below)
1490 // needm is limited in what routines it can call. In particular
1491 // it can only call nosplit functions (textflag 7) and cannot
1492 // do any scheduling that requires an m.
1494 // In order to avoid needing heavy lifting here, we adopt
1495 // the following strategy: there is a stack of available m's
1496 // that can be stolen. Using compare-and-swap
1497 // to pop from the stack has ABA races, so we simulate
1498 // a lock by doing an exchange (via casp) to steal the stack
1499 // head and replace the top pointer with MLOCKED (1).
1500 // This serves as a simple spin lock that we can use even
1501 // without an m. The thread that locks the stack in this way
1502 // unlocks the stack by storing a valid stack head pointer.
1504 // In order to make sure that there is always an m structure
1505 // available to be stolen, we maintain the invariant that there
1506 // is always one more than needed. At the beginning of the
1507 // program (if cgo is in use) the list is seeded with a single m.
1508 // If needm finds that it has taken the last m off the list, its job
1509 // is - once it has installed its own m so that it can do things like
1510 // allocate memory - to create a spare m and put it on the list.
1512 // Each of these extra m's also has a g0 and a curg that are
1513 // pressed into service as the scheduling stack and current
1514 // goroutine for the duration of the cgo callback.
1516 // When the callback is done with the m, it calls dropm to
1517 // put the m back on the list.
1518 //go:nosplit
1519 func needm(x byte) {
1520 if iscgo && !cgoHasExtraM {
1521 // Can happen if C/C++ code calls Go from a global ctor.
1522 // Can not throw, because scheduler is not initialized yet.
1523 write(2, unsafe.Pointer(&earlycgocallback[0]), int32(len(earlycgocallback)))
1524 exit(1)
1527 // Lock extra list, take head, unlock popped list.
1528 // nilokay=false is safe here because of the invariant above,
1529 // that the extra list always contains or will soon contain
1530 // at least one m.
1531 mp := lockextra(false)
1533 // Set needextram when we've just emptied the list,
1534 // so that the eventual call into cgocallbackg will
1535 // allocate a new m for the extra list. We delay the
1536 // allocation until then so that it can be done
1537 // after exitsyscall makes sure it is okay to be
1538 // running at all (that is, there's no garbage collection
1539 // running right now).
1540 mp.needextram = mp.schedlink == 0
1541 extraMCount--
1542 unlockextra(mp.schedlink.ptr())
1544 // Save and block signals before installing g.
1545 // Once g is installed, any incoming signals will try to execute,
1546 // but we won't have the sigaltstack settings and other data
1547 // set up appropriately until the end of minit, which will
1548 // unblock the signals. This is the same dance as when
1549 // starting a new m to run Go code via newosproc.
1550 msigsave(mp)
1551 sigblock()
1553 // Install g (= m->curg).
1554 setg(mp.curg)
1556 // Initialize this thread to use the m.
1557 asminit()
1558 minit()
1560 setGContext()
1562 // mp.curg is now a real goroutine.
1563 casgstatus(mp.curg, _Gdead, _Gsyscall)
1564 atomic.Xadd(&sched.ngsys, -1)
1567 var earlycgocallback = []byte("fatal error: cgo callback before cgo call\n")
1569 // newextram allocates m's and puts them on the extra list.
1570 // It is called with a working local m, so that it can do things
1571 // like call schedlock and allocate.
1572 func newextram() {
1573 c := atomic.Xchg(&extraMWaiters, 0)
1574 if c > 0 {
1575 for i := uint32(0); i < c; i++ {
1576 oneNewExtraM()
1578 } else {
1579 // Make sure there is at least one extra M.
1580 mp := lockextra(true)
1581 unlockextra(mp)
1582 if mp == nil {
1583 oneNewExtraM()
1588 // oneNewExtraM allocates an m and puts it on the extra list.
1589 func oneNewExtraM() {
1590 // Create extra goroutine locked to extra m.
1591 // The goroutine is the context in which the cgo callback will run.
1592 // The sched.pc will never be returned to, but setting it to
1593 // goexit makes clear to the traceback routines where
1594 // the goroutine stack ends.
1595 mp, g0SP, g0SPSize := allocm(nil, nil, true)
1596 gp := malg(true, false, nil, nil)
1597 gp.gcscanvalid = true
1598 gp.gcscandone = true
1599 // malg returns status as _Gidle. Change to _Gdead before
1600 // adding to allg where GC can see it. We use _Gdead to hide
1601 // this from tracebacks and stack scans since it isn't a
1602 // "real" goroutine until needm grabs it.
1603 casgstatus(gp, _Gidle, _Gdead)
1604 gp.m = mp
1605 mp.curg = gp
1606 mp.lockedInt++
1607 mp.lockedg.set(gp)
1608 gp.lockedm.set(mp)
1609 gp.goid = int64(atomic.Xadd64(&sched.goidgen, 1))
1610 // put on allg for garbage collector
1611 allgadd(gp)
1613 // The context for gp will be set up in needm.
1614 // Here we need to set the context for g0.
1615 makeGContext(mp.g0, g0SP, g0SPSize)
1617 // gp is now on the allg list, but we don't want it to be
1618 // counted by gcount. It would be more "proper" to increment
1619 // sched.ngfree, but that requires locking. Incrementing ngsys
1620 // has the same effect.
1621 atomic.Xadd(&sched.ngsys, +1)
1623 // Add m to the extra list.
1624 mnext := lockextra(true)
1625 mp.schedlink.set(mnext)
1626 extraMCount++
1627 unlockextra(mp)
1630 // dropm is called when a cgo callback has called needm but is now
1631 // done with the callback and returning back into the non-Go thread.
1632 // It puts the current m back onto the extra list.
1634 // The main expense here is the call to signalstack to release the
1635 // m's signal stack, and then the call to needm on the next callback
1636 // from this thread. It is tempting to try to save the m for next time,
1637 // which would eliminate both these costs, but there might not be
1638 // a next time: the current thread (which Go does not control) might exit.
1639 // If we saved the m for that thread, there would be an m leak each time
1640 // such a thread exited. Instead, we acquire and release an m on each
1641 // call. These should typically not be scheduling operations, just a few
1642 // atomics, so the cost should be small.
1644 // TODO(rsc): An alternative would be to allocate a dummy pthread per-thread
1645 // variable using pthread_key_create. Unlike the pthread keys we already use
1646 // on OS X, this dummy key would never be read by Go code. It would exist
1647 // only so that we could register at thread-exit-time destructor.
1648 // That destructor would put the m back onto the extra list.
1649 // This is purely a performance optimization. The current version,
1650 // in which dropm happens on each cgo call, is still correct too.
1651 // We may have to keep the current version on systems with cgo
1652 // but without pthreads, like Windows.
1654 // CgocallBackDone calls this after releasing p, so no write barriers.
1655 //go:nowritebarrierrec
1656 func dropm() {
1657 // Clear m and g, and return m to the extra list.
1658 // After the call to setg we can only call nosplit functions
1659 // with no pointer manipulation.
1660 mp := getg().m
1662 // Return mp.curg to dead state.
1663 casgstatus(mp.curg, _Gsyscall, _Gdead)
1664 atomic.Xadd(&sched.ngsys, +1)
1666 // Block signals before unminit.
1667 // Unminit unregisters the signal handling stack (but needs g on some systems).
1668 // Setg(nil) clears g, which is the signal handler's cue not to run Go handlers.
1669 // It's important not to try to handle a signal between those two steps.
1670 sigmask := mp.sigmask
1671 sigblock()
1672 unminit()
1674 // gccgo sets the stack to Gdead here, because the splitstack
1675 // context is not initialized.
1676 atomic.Store(&mp.curg.atomicstatus, _Gdead)
1677 mp.curg.gcstack = 0
1678 mp.curg.gcnextsp = 0
1680 mnext := lockextra(true)
1681 extraMCount++
1682 mp.schedlink.set(mnext)
1684 setg(nil)
1686 // Commit the release of mp.
1687 unlockextra(mp)
1689 msigrestore(sigmask)
1692 // A helper function for EnsureDropM.
1693 func getm() uintptr {
1694 return uintptr(unsafe.Pointer(getg().m))
1697 var extram uintptr
1698 var extraMCount uint32 // Protected by lockextra
1699 var extraMWaiters uint32
1701 // lockextra locks the extra list and returns the list head.
1702 // The caller must unlock the list by storing a new list head
1703 // to extram. If nilokay is true, then lockextra will
1704 // return a nil list head if that's what it finds. If nilokay is false,
1705 // lockextra will keep waiting until the list head is no longer nil.
1706 //go:nosplit
1707 //go:nowritebarrierrec
1708 func lockextra(nilokay bool) *m {
1709 const locked = 1
1711 incr := false
1712 for {
1713 old := atomic.Loaduintptr(&extram)
1714 if old == locked {
1715 yield := osyield
1716 yield()
1717 continue
1719 if old == 0 && !nilokay {
1720 if !incr {
1721 // Add 1 to the number of threads
1722 // waiting for an M.
1723 // This is cleared by newextram.
1724 atomic.Xadd(&extraMWaiters, 1)
1725 incr = true
1727 usleep(1)
1728 continue
1730 if atomic.Casuintptr(&extram, old, locked) {
1731 return (*m)(unsafe.Pointer(old))
1733 yield := osyield
1734 yield()
1735 continue
1739 //go:nosplit
1740 //go:nowritebarrierrec
1741 func unlockextra(mp *m) {
1742 atomic.Storeuintptr(&extram, uintptr(unsafe.Pointer(mp)))
1745 // execLock serializes exec and clone to avoid bugs or unspecified behaviour
1746 // around exec'ing while creating/destroying threads. See issue #19546.
1747 var execLock rwmutex
1749 // newmHandoff contains a list of m structures that need new OS threads.
1750 // This is used by newm in situations where newm itself can't safely
1751 // start an OS thread.
1752 var newmHandoff struct {
1753 lock mutex
1755 // newm points to a list of M structures that need new OS
1756 // threads. The list is linked through m.schedlink.
1757 newm muintptr
1759 // waiting indicates that wake needs to be notified when an m
1760 // is put on the list.
1761 waiting bool
1762 wake note
1764 // haveTemplateThread indicates that the templateThread has
1765 // been started. This is not protected by lock. Use cas to set
1766 // to 1.
1767 haveTemplateThread uint32
1770 // Create a new m. It will start off with a call to fn, or else the scheduler.
1771 // fn needs to be static and not a heap allocated closure.
1772 // May run with m.p==nil, so write barriers are not allowed.
1773 //go:nowritebarrierrec
1774 func newm(fn func(), _p_ *p) {
1775 mp, _, _ := allocm(_p_, fn, false)
1776 mp.nextp.set(_p_)
1777 mp.sigmask = initSigmask
1778 if gp := getg(); gp != nil && gp.m != nil && (gp.m.lockedExt != 0 || gp.m.incgo) && GOOS != "plan9" {
1779 // We're on a locked M or a thread that may have been
1780 // started by C. The kernel state of this thread may
1781 // be strange (the user may have locked it for that
1782 // purpose). We don't want to clone that into another
1783 // thread. Instead, ask a known-good thread to create
1784 // the thread for us.
1786 // This is disabled on Plan 9. See golang.org/issue/22227.
1788 // TODO: This may be unnecessary on Windows, which
1789 // doesn't model thread creation off fork.
1790 lock(&newmHandoff.lock)
1791 if newmHandoff.haveTemplateThread == 0 {
1792 throw("on a locked thread with no template thread")
1794 mp.schedlink = newmHandoff.newm
1795 newmHandoff.newm.set(mp)
1796 if newmHandoff.waiting {
1797 newmHandoff.waiting = false
1798 notewakeup(&newmHandoff.wake)
1800 unlock(&newmHandoff.lock)
1801 return
1803 newm1(mp)
1806 func newm1(mp *m) {
1807 execLock.rlock() // Prevent process clone.
1808 newosproc(mp)
1809 execLock.runlock()
1812 // startTemplateThread starts the template thread if it is not already
1813 // running.
1815 // The calling thread must itself be in a known-good state.
1816 func startTemplateThread() {
1817 if !atomic.Cas(&newmHandoff.haveTemplateThread, 0, 1) {
1818 return
1820 newm(templateThread, nil)
1823 // tmeplateThread is a thread in a known-good state that exists solely
1824 // to start new threads in known-good states when the calling thread
1825 // may not be a a good state.
1827 // Many programs never need this, so templateThread is started lazily
1828 // when we first enter a state that might lead to running on a thread
1829 // in an unknown state.
1831 // templateThread runs on an M without a P, so it must not have write
1832 // barriers.
1834 //go:nowritebarrierrec
1835 func templateThread() {
1836 lock(&sched.lock)
1837 sched.nmsys++
1838 checkdead()
1839 unlock(&sched.lock)
1841 for {
1842 lock(&newmHandoff.lock)
1843 for newmHandoff.newm != 0 {
1844 newm := newmHandoff.newm.ptr()
1845 newmHandoff.newm = 0
1846 unlock(&newmHandoff.lock)
1847 for newm != nil {
1848 next := newm.schedlink.ptr()
1849 newm.schedlink = 0
1850 newm1(newm)
1851 newm = next
1853 lock(&newmHandoff.lock)
1855 newmHandoff.waiting = true
1856 noteclear(&newmHandoff.wake)
1857 unlock(&newmHandoff.lock)
1858 notesleep(&newmHandoff.wake)
1862 // Stops execution of the current m until new work is available.
1863 // Returns with acquired P.
1864 func stopm() {
1865 _g_ := getg()
1867 if _g_.m.locks != 0 {
1868 throw("stopm holding locks")
1870 if _g_.m.p != 0 {
1871 throw("stopm holding p")
1873 if _g_.m.spinning {
1874 throw("stopm spinning")
1877 retry:
1878 lock(&sched.lock)
1879 mput(_g_.m)
1880 unlock(&sched.lock)
1881 notesleep(&_g_.m.park)
1882 noteclear(&_g_.m.park)
1883 if _g_.m.helpgc != 0 {
1884 // helpgc() set _g_.m.p and _g_.m.mcache, so we have a P.
1885 gchelper()
1886 // Undo the effects of helpgc().
1887 _g_.m.helpgc = 0
1888 _g_.m.mcache = nil
1889 _g_.m.p = 0
1890 goto retry
1892 acquirep(_g_.m.nextp.ptr())
1893 _g_.m.nextp = 0
1896 func mspinning() {
1897 // startm's caller incremented nmspinning. Set the new M's spinning.
1898 getg().m.spinning = true
1901 // Schedules some M to run the p (creates an M if necessary).
1902 // If p==nil, tries to get an idle P, if no idle P's does nothing.
1903 // May run with m.p==nil, so write barriers are not allowed.
1904 // If spinning is set, the caller has incremented nmspinning and startm will
1905 // either decrement nmspinning or set m.spinning in the newly started M.
1906 //go:nowritebarrierrec
1907 func startm(_p_ *p, spinning bool) {
1908 lock(&sched.lock)
1909 if _p_ == nil {
1910 _p_ = pidleget()
1911 if _p_ == nil {
1912 unlock(&sched.lock)
1913 if spinning {
1914 // The caller incremented nmspinning, but there are no idle Ps,
1915 // so it's okay to just undo the increment and give up.
1916 if int32(atomic.Xadd(&sched.nmspinning, -1)) < 0 {
1917 throw("startm: negative nmspinning")
1920 return
1923 mp := mget()
1924 unlock(&sched.lock)
1925 if mp == nil {
1926 var fn func()
1927 if spinning {
1928 // The caller incremented nmspinning, so set m.spinning in the new M.
1929 fn = mspinning
1931 newm(fn, _p_)
1932 return
1934 if mp.spinning {
1935 throw("startm: m is spinning")
1937 if mp.nextp != 0 {
1938 throw("startm: m has p")
1940 if spinning && !runqempty(_p_) {
1941 throw("startm: p has runnable gs")
1943 // The caller incremented nmspinning, so set m.spinning in the new M.
1944 mp.spinning = spinning
1945 mp.nextp.set(_p_)
1946 notewakeup(&mp.park)
1949 // Hands off P from syscall or locked M.
1950 // Always runs without a P, so write barriers are not allowed.
1951 //go:nowritebarrierrec
1952 func handoffp(_p_ *p) {
1953 // handoffp must start an M in any situation where
1954 // findrunnable would return a G to run on _p_.
1956 // if it has local work, start it straight away
1957 if !runqempty(_p_) || sched.runqsize != 0 {
1958 startm(_p_, false)
1959 return
1961 // if it has GC work, start it straight away
1962 if gcBlackenEnabled != 0 && gcMarkWorkAvailable(_p_) {
1963 startm(_p_, false)
1964 return
1966 // no local work, check that there are no spinning/idle M's,
1967 // otherwise our help is not required
1968 if atomic.Load(&sched.nmspinning)+atomic.Load(&sched.npidle) == 0 && atomic.Cas(&sched.nmspinning, 0, 1) { // TODO: fast atomic
1969 startm(_p_, true)
1970 return
1972 lock(&sched.lock)
1973 if sched.gcwaiting != 0 {
1974 _p_.status = _Pgcstop
1975 sched.stopwait--
1976 if sched.stopwait == 0 {
1977 notewakeup(&sched.stopnote)
1979 unlock(&sched.lock)
1980 return
1982 if _p_.runSafePointFn != 0 && atomic.Cas(&_p_.runSafePointFn, 1, 0) {
1983 sched.safePointFn(_p_)
1984 sched.safePointWait--
1985 if sched.safePointWait == 0 {
1986 notewakeup(&sched.safePointNote)
1989 if sched.runqsize != 0 {
1990 unlock(&sched.lock)
1991 startm(_p_, false)
1992 return
1994 // If this is the last running P and nobody is polling network,
1995 // need to wakeup another M to poll network.
1996 if sched.npidle == uint32(gomaxprocs-1) && atomic.Load64(&sched.lastpoll) != 0 {
1997 unlock(&sched.lock)
1998 startm(_p_, false)
1999 return
2001 pidleput(_p_)
2002 unlock(&sched.lock)
2005 // Tries to add one more P to execute G's.
2006 // Called when a G is made runnable (newproc, ready).
2007 func wakep() {
2008 // be conservative about spinning threads
2009 if !atomic.Cas(&sched.nmspinning, 0, 1) {
2010 return
2012 startm(nil, true)
2015 // Stops execution of the current m that is locked to a g until the g is runnable again.
2016 // Returns with acquired P.
2017 func stoplockedm() {
2018 _g_ := getg()
2020 if _g_.m.lockedg == 0 || _g_.m.lockedg.ptr().lockedm.ptr() != _g_.m {
2021 throw("stoplockedm: inconsistent locking")
2023 if _g_.m.p != 0 {
2024 // Schedule another M to run this p.
2025 _p_ := releasep()
2026 handoffp(_p_)
2028 incidlelocked(1)
2029 // Wait until another thread schedules lockedg again.
2030 notesleep(&_g_.m.park)
2031 noteclear(&_g_.m.park)
2032 status := readgstatus(_g_.m.lockedg.ptr())
2033 if status&^_Gscan != _Grunnable {
2034 print("runtime:stoplockedm: g is not Grunnable or Gscanrunnable\n")
2035 dumpgstatus(_g_)
2036 throw("stoplockedm: not runnable")
2038 acquirep(_g_.m.nextp.ptr())
2039 _g_.m.nextp = 0
2042 // Schedules the locked m to run the locked gp.
2043 // May run during STW, so write barriers are not allowed.
2044 //go:nowritebarrierrec
2045 func startlockedm(gp *g) {
2046 _g_ := getg()
2048 mp := gp.lockedm.ptr()
2049 if mp == _g_.m {
2050 throw("startlockedm: locked to me")
2052 if mp.nextp != 0 {
2053 throw("startlockedm: m has p")
2055 // directly handoff current P to the locked m
2056 incidlelocked(-1)
2057 _p_ := releasep()
2058 mp.nextp.set(_p_)
2059 notewakeup(&mp.park)
2060 stopm()
2063 // Stops the current m for stopTheWorld.
2064 // Returns when the world is restarted.
2065 func gcstopm() {
2066 _g_ := getg()
2068 if sched.gcwaiting == 0 {
2069 throw("gcstopm: not waiting for gc")
2071 if _g_.m.spinning {
2072 _g_.m.spinning = false
2073 // OK to just drop nmspinning here,
2074 // startTheWorld will unpark threads as necessary.
2075 if int32(atomic.Xadd(&sched.nmspinning, -1)) < 0 {
2076 throw("gcstopm: negative nmspinning")
2079 _p_ := releasep()
2080 lock(&sched.lock)
2081 _p_.status = _Pgcstop
2082 sched.stopwait--
2083 if sched.stopwait == 0 {
2084 notewakeup(&sched.stopnote)
2086 unlock(&sched.lock)
2087 stopm()
2090 // Schedules gp to run on the current M.
2091 // If inheritTime is true, gp inherits the remaining time in the
2092 // current time slice. Otherwise, it starts a new time slice.
2093 // Never returns.
2095 // Write barriers are allowed because this is called immediately after
2096 // acquiring a P in several places.
2098 //go:yeswritebarrierrec
2099 func execute(gp *g, inheritTime bool) {
2100 _g_ := getg()
2102 casgstatus(gp, _Grunnable, _Grunning)
2103 gp.waitsince = 0
2104 gp.preempt = false
2105 if !inheritTime {
2106 _g_.m.p.ptr().schedtick++
2108 _g_.m.curg = gp
2109 gp.m = _g_.m
2111 // Check whether the profiler needs to be turned on or off.
2112 hz := sched.profilehz
2113 if _g_.m.profilehz != hz {
2114 setThreadCPUProfiler(hz)
2117 if trace.enabled {
2118 // GoSysExit has to happen when we have a P, but before GoStart.
2119 // So we emit it here.
2120 if gp.syscallsp != 0 && gp.sysblocktraced {
2121 traceGoSysExit(gp.sysexitticks)
2123 traceGoStart()
2126 gogo(gp)
2129 // Finds a runnable goroutine to execute.
2130 // Tries to steal from other P's, get g from global queue, poll network.
2131 func findrunnable() (gp *g, inheritTime bool) {
2132 _g_ := getg()
2134 // The conditions here and in handoffp must agree: if
2135 // findrunnable would return a G to run, handoffp must start
2136 // an M.
2138 top:
2139 _p_ := _g_.m.p.ptr()
2140 if sched.gcwaiting != 0 {
2141 gcstopm()
2142 goto top
2144 if _p_.runSafePointFn != 0 {
2145 runSafePointFn()
2147 if fingwait && fingwake {
2148 if gp := wakefing(); gp != nil {
2149 ready(gp, 0, true)
2152 if *cgo_yield != nil {
2153 asmcgocall(*cgo_yield, nil)
2156 // local runq
2157 if gp, inheritTime := runqget(_p_); gp != nil {
2158 return gp, inheritTime
2161 // global runq
2162 if sched.runqsize != 0 {
2163 lock(&sched.lock)
2164 gp := globrunqget(_p_, 0)
2165 unlock(&sched.lock)
2166 if gp != nil {
2167 return gp, false
2171 // Poll network.
2172 // This netpoll is only an optimization before we resort to stealing.
2173 // We can safely skip it if there are no waiters or a thread is blocked
2174 // in netpoll already. If there is any kind of logical race with that
2175 // blocked thread (e.g. it has already returned from netpoll, but does
2176 // not set lastpoll yet), this thread will do blocking netpoll below
2177 // anyway.
2178 if netpollinited() && atomic.Load(&netpollWaiters) > 0 && atomic.Load64(&sched.lastpoll) != 0 {
2179 if gp := netpoll(false); gp != nil { // non-blocking
2180 // netpoll returns list of goroutines linked by schedlink.
2181 injectglist(gp.schedlink.ptr())
2182 casgstatus(gp, _Gwaiting, _Grunnable)
2183 if trace.enabled {
2184 traceGoUnpark(gp, 0)
2186 return gp, false
2190 // Steal work from other P's.
2191 procs := uint32(gomaxprocs)
2192 if atomic.Load(&sched.npidle) == procs-1 {
2193 // Either GOMAXPROCS=1 or everybody, except for us, is idle already.
2194 // New work can appear from returning syscall/cgocall, network or timers.
2195 // Neither of that submits to local run queues, so no point in stealing.
2196 goto stop
2198 // If number of spinning M's >= number of busy P's, block.
2199 // This is necessary to prevent excessive CPU consumption
2200 // when GOMAXPROCS>>1 but the program parallelism is low.
2201 if !_g_.m.spinning && 2*atomic.Load(&sched.nmspinning) >= procs-atomic.Load(&sched.npidle) {
2202 goto stop
2204 if !_g_.m.spinning {
2205 _g_.m.spinning = true
2206 atomic.Xadd(&sched.nmspinning, 1)
2208 for i := 0; i < 4; i++ {
2209 for enum := stealOrder.start(fastrand()); !enum.done(); enum.next() {
2210 if sched.gcwaiting != 0 {
2211 goto top
2213 stealRunNextG := i > 2 // first look for ready queues with more than 1 g
2214 if gp := runqsteal(_p_, allp[enum.position()], stealRunNextG); gp != nil {
2215 return gp, false
2220 stop:
2222 // We have nothing to do. If we're in the GC mark phase, can
2223 // safely scan and blacken objects, and have work to do, run
2224 // idle-time marking rather than give up the P.
2225 if gcBlackenEnabled != 0 && _p_.gcBgMarkWorker != 0 && gcMarkWorkAvailable(_p_) {
2226 _p_.gcMarkWorkerMode = gcMarkWorkerIdleMode
2227 gp := _p_.gcBgMarkWorker.ptr()
2228 casgstatus(gp, _Gwaiting, _Grunnable)
2229 if trace.enabled {
2230 traceGoUnpark(gp, 0)
2232 return gp, false
2235 // Before we drop our P, make a snapshot of the allp slice,
2236 // which can change underfoot once we no longer block
2237 // safe-points. We don't need to snapshot the contents because
2238 // everything up to cap(allp) is immutable.
2239 allpSnapshot := allp
2241 // return P and block
2242 lock(&sched.lock)
2243 if sched.gcwaiting != 0 || _p_.runSafePointFn != 0 {
2244 unlock(&sched.lock)
2245 goto top
2247 if sched.runqsize != 0 {
2248 gp := globrunqget(_p_, 0)
2249 unlock(&sched.lock)
2250 return gp, false
2252 if releasep() != _p_ {
2253 throw("findrunnable: wrong p")
2255 pidleput(_p_)
2256 unlock(&sched.lock)
2258 // Delicate dance: thread transitions from spinning to non-spinning state,
2259 // potentially concurrently with submission of new goroutines. We must
2260 // drop nmspinning first and then check all per-P queues again (with
2261 // #StoreLoad memory barrier in between). If we do it the other way around,
2262 // another thread can submit a goroutine after we've checked all run queues
2263 // but before we drop nmspinning; as the result nobody will unpark a thread
2264 // to run the goroutine.
2265 // If we discover new work below, we need to restore m.spinning as a signal
2266 // for resetspinning to unpark a new worker thread (because there can be more
2267 // than one starving goroutine). However, if after discovering new work
2268 // we also observe no idle Ps, it is OK to just park the current thread:
2269 // the system is fully loaded so no spinning threads are required.
2270 // Also see "Worker thread parking/unparking" comment at the top of the file.
2271 wasSpinning := _g_.m.spinning
2272 if _g_.m.spinning {
2273 _g_.m.spinning = false
2274 if int32(atomic.Xadd(&sched.nmspinning, -1)) < 0 {
2275 throw("findrunnable: negative nmspinning")
2279 // check all runqueues once again
2280 for _, _p_ := range allpSnapshot {
2281 if !runqempty(_p_) {
2282 lock(&sched.lock)
2283 _p_ = pidleget()
2284 unlock(&sched.lock)
2285 if _p_ != nil {
2286 acquirep(_p_)
2287 if wasSpinning {
2288 _g_.m.spinning = true
2289 atomic.Xadd(&sched.nmspinning, 1)
2291 goto top
2293 break
2297 // Check for idle-priority GC work again.
2298 if gcBlackenEnabled != 0 && gcMarkWorkAvailable(nil) {
2299 lock(&sched.lock)
2300 _p_ = pidleget()
2301 if _p_ != nil && _p_.gcBgMarkWorker == 0 {
2302 pidleput(_p_)
2303 _p_ = nil
2305 unlock(&sched.lock)
2306 if _p_ != nil {
2307 acquirep(_p_)
2308 if wasSpinning {
2309 _g_.m.spinning = true
2310 atomic.Xadd(&sched.nmspinning, 1)
2312 // Go back to idle GC check.
2313 goto stop
2317 // poll network
2318 if netpollinited() && atomic.Load(&netpollWaiters) > 0 && atomic.Xchg64(&sched.lastpoll, 0) != 0 {
2319 if _g_.m.p != 0 {
2320 throw("findrunnable: netpoll with p")
2322 if _g_.m.spinning {
2323 throw("findrunnable: netpoll with spinning")
2325 gp := netpoll(true) // block until new work is available
2326 atomic.Store64(&sched.lastpoll, uint64(nanotime()))
2327 if gp != nil {
2328 lock(&sched.lock)
2329 _p_ = pidleget()
2330 unlock(&sched.lock)
2331 if _p_ != nil {
2332 acquirep(_p_)
2333 injectglist(gp.schedlink.ptr())
2334 casgstatus(gp, _Gwaiting, _Grunnable)
2335 if trace.enabled {
2336 traceGoUnpark(gp, 0)
2338 return gp, false
2340 injectglist(gp)
2343 stopm()
2344 goto top
2347 // pollWork returns true if there is non-background work this P could
2348 // be doing. This is a fairly lightweight check to be used for
2349 // background work loops, like idle GC. It checks a subset of the
2350 // conditions checked by the actual scheduler.
2351 func pollWork() bool {
2352 if sched.runqsize != 0 {
2353 return true
2355 p := getg().m.p.ptr()
2356 if !runqempty(p) {
2357 return true
2359 if netpollinited() && atomic.Load(&netpollWaiters) > 0 && sched.lastpoll != 0 {
2360 if gp := netpoll(false); gp != nil {
2361 injectglist(gp)
2362 return true
2365 return false
2368 func resetspinning() {
2369 _g_ := getg()
2370 if !_g_.m.spinning {
2371 throw("resetspinning: not a spinning m")
2373 _g_.m.spinning = false
2374 nmspinning := atomic.Xadd(&sched.nmspinning, -1)
2375 if int32(nmspinning) < 0 {
2376 throw("findrunnable: negative nmspinning")
2378 // M wakeup policy is deliberately somewhat conservative, so check if we
2379 // need to wakeup another P here. See "Worker thread parking/unparking"
2380 // comment at the top of the file for details.
2381 if nmspinning == 0 && atomic.Load(&sched.npidle) > 0 {
2382 wakep()
2386 // Injects the list of runnable G's into the scheduler.
2387 // Can run concurrently with GC.
2388 func injectglist(glist *g) {
2389 if glist == nil {
2390 return
2392 if trace.enabled {
2393 for gp := glist; gp != nil; gp = gp.schedlink.ptr() {
2394 traceGoUnpark(gp, 0)
2397 lock(&sched.lock)
2398 var n int
2399 for n = 0; glist != nil; n++ {
2400 gp := glist
2401 glist = gp.schedlink.ptr()
2402 casgstatus(gp, _Gwaiting, _Grunnable)
2403 globrunqput(gp)
2405 unlock(&sched.lock)
2406 for ; n != 0 && sched.npidle != 0; n-- {
2407 startm(nil, false)
2411 // One round of scheduler: find a runnable goroutine and execute it.
2412 // Never returns.
2413 func schedule() {
2414 _g_ := getg()
2416 if _g_.m.locks != 0 {
2417 throw("schedule: holding locks")
2420 if _g_.m.lockedg != 0 {
2421 stoplockedm()
2422 execute(_g_.m.lockedg.ptr(), false) // Never returns.
2425 // We should not schedule away from a g that is executing a cgo call,
2426 // since the cgo call is using the m's g0 stack.
2427 if _g_.m.incgo {
2428 throw("schedule: in cgo")
2431 top:
2432 if sched.gcwaiting != 0 {
2433 gcstopm()
2434 goto top
2436 if _g_.m.p.ptr().runSafePointFn != 0 {
2437 runSafePointFn()
2440 var gp *g
2441 var inheritTime bool
2442 if trace.enabled || trace.shutdown {
2443 gp = traceReader()
2444 if gp != nil {
2445 casgstatus(gp, _Gwaiting, _Grunnable)
2446 traceGoUnpark(gp, 0)
2449 if gp == nil && gcBlackenEnabled != 0 {
2450 gp = gcController.findRunnableGCWorker(_g_.m.p.ptr())
2452 if gp == nil {
2453 // Check the global runnable queue once in a while to ensure fairness.
2454 // Otherwise two goroutines can completely occupy the local runqueue
2455 // by constantly respawning each other.
2456 if _g_.m.p.ptr().schedtick%61 == 0 && sched.runqsize > 0 {
2457 lock(&sched.lock)
2458 gp = globrunqget(_g_.m.p.ptr(), 1)
2459 unlock(&sched.lock)
2462 if gp == nil {
2463 gp, inheritTime = runqget(_g_.m.p.ptr())
2464 if gp != nil && _g_.m.spinning {
2465 throw("schedule: spinning with local work")
2468 // Because gccgo does not implement preemption as a stack check,
2469 // we need to check for preemption here for fairness.
2470 // Otherwise goroutines on the local queue may starve
2471 // goroutines on the global queue.
2472 // Since we preempt by storing the goroutine on the global
2473 // queue, this is the only place we need to check preempt.
2474 // This does not call checkPreempt because gp is not running.
2475 if gp != nil && gp.preempt {
2476 gp.preempt = false
2477 lock(&sched.lock)
2478 globrunqput(gp)
2479 unlock(&sched.lock)
2480 goto top
2483 if gp == nil {
2484 gp, inheritTime = findrunnable() // blocks until work is available
2487 // This thread is going to run a goroutine and is not spinning anymore,
2488 // so if it was marked as spinning we need to reset it now and potentially
2489 // start a new spinning M.
2490 if _g_.m.spinning {
2491 resetspinning()
2494 if gp.lockedm != 0 {
2495 // Hands off own p to the locked m,
2496 // then blocks waiting for a new p.
2497 startlockedm(gp)
2498 goto top
2501 execute(gp, inheritTime)
2504 // dropg removes the association between m and the current goroutine m->curg (gp for short).
2505 // Typically a caller sets gp's status away from Grunning and then
2506 // immediately calls dropg to finish the job. The caller is also responsible
2507 // for arranging that gp will be restarted using ready at an
2508 // appropriate time. After calling dropg and arranging for gp to be
2509 // readied later, the caller can do other work but eventually should
2510 // call schedule to restart the scheduling of goroutines on this m.
2511 func dropg() {
2512 _g_ := getg()
2514 setMNoWB(&_g_.m.curg.m, nil)
2515 setGNoWB(&_g_.m.curg, nil)
2518 func parkunlock_c(gp *g, lock unsafe.Pointer) bool {
2519 unlock((*mutex)(lock))
2520 return true
2523 // park continuation on g0.
2524 func park_m(gp *g) {
2525 _g_ := getg()
2527 if trace.enabled {
2528 traceGoPark(_g_.m.waittraceev, _g_.m.waittraceskip)
2531 casgstatus(gp, _Grunning, _Gwaiting)
2532 dropg()
2534 if _g_.m.waitunlockf != nil {
2535 fn := *(*func(*g, unsafe.Pointer) bool)(unsafe.Pointer(&_g_.m.waitunlockf))
2536 ok := fn(gp, _g_.m.waitlock)
2537 _g_.m.waitunlockf = nil
2538 _g_.m.waitlock = nil
2539 if !ok {
2540 if trace.enabled {
2541 traceGoUnpark(gp, 2)
2543 casgstatus(gp, _Gwaiting, _Grunnable)
2544 execute(gp, true) // Schedule it back, never returns.
2547 schedule()
2550 func goschedImpl(gp *g) {
2551 status := readgstatus(gp)
2552 if status&^_Gscan != _Grunning {
2553 dumpgstatus(gp)
2554 throw("bad g status")
2556 casgstatus(gp, _Grunning, _Grunnable)
2557 dropg()
2558 lock(&sched.lock)
2559 globrunqput(gp)
2560 unlock(&sched.lock)
2562 schedule()
2565 // Gosched continuation on g0.
2566 func gosched_m(gp *g) {
2567 if trace.enabled {
2568 traceGoSched()
2570 goschedImpl(gp)
2573 // goschedguarded is a forbidden-states-avoided version of gosched_m
2574 func goschedguarded_m(gp *g) {
2576 if gp.m.locks != 0 || gp.m.mallocing != 0 || gp.m.preemptoff != "" || gp.m.p.ptr().status != _Prunning {
2577 gogo(gp) // never return
2580 if trace.enabled {
2581 traceGoSched()
2583 goschedImpl(gp)
2586 func gopreempt_m(gp *g) {
2587 if trace.enabled {
2588 traceGoPreempt()
2590 goschedImpl(gp)
2593 // Finishes execution of the current goroutine.
2594 func goexit1() {
2595 if trace.enabled {
2596 traceGoEnd()
2598 mcall(goexit0)
2601 // goexit continuation on g0.
2602 func goexit0(gp *g) {
2603 _g_ := getg()
2605 casgstatus(gp, _Grunning, _Gdead)
2606 if isSystemGoroutine(gp) {
2607 atomic.Xadd(&sched.ngsys, -1)
2608 gp.isSystemGoroutine = false
2610 gp.m = nil
2611 locked := gp.lockedm != 0
2612 gp.lockedm = 0
2613 _g_.m.lockedg = 0
2614 gp.entry = nil
2615 gp.paniconfault = false
2616 gp._defer = nil // should be true already but just in case.
2617 gp._panic = nil // non-nil for Goexit during panic. points at stack-allocated data.
2618 gp.writebuf = nil
2619 gp.waitreason = ""
2620 gp.param = nil
2621 gp.labels = nil
2622 gp.timer = nil
2624 if gcBlackenEnabled != 0 && gp.gcAssistBytes > 0 {
2625 // Flush assist credit to the global pool. This gives
2626 // better information to pacing if the application is
2627 // rapidly creating an exiting goroutines.
2628 scanCredit := int64(gcController.assistWorkPerByte * float64(gp.gcAssistBytes))
2629 atomic.Xaddint64(&gcController.bgScanCredit, scanCredit)
2630 gp.gcAssistBytes = 0
2633 // Note that gp's stack scan is now "valid" because it has no
2634 // stack.
2635 gp.gcscanvalid = true
2636 dropg()
2638 if _g_.m.lockedInt != 0 {
2639 print("invalid m->lockedInt = ", _g_.m.lockedInt, "\n")
2640 throw("internal lockOSThread error")
2642 _g_.m.lockedExt = 0
2643 gfput(_g_.m.p.ptr(), gp)
2644 if locked {
2645 // The goroutine may have locked this thread because
2646 // it put it in an unusual kernel state. Kill it
2647 // rather than returning it to the thread pool.
2649 // Return to mstart, which will release the P and exit
2650 // the thread.
2651 if GOOS != "plan9" { // See golang.org/issue/22227.
2652 _g_.m.exiting = true
2653 gogo(_g_.m.g0)
2656 schedule()
2659 // The goroutine g is about to enter a system call.
2660 // Record that it's not using the cpu anymore.
2661 // This is called only from the go syscall library and cgocall,
2662 // not from the low-level system calls used by the runtime.
2664 // The entersyscall function is written in C, so that it can save the
2665 // current register context so that the GC will see them.
2666 // It calls reentersyscall.
2668 // Syscall tracing:
2669 // At the start of a syscall we emit traceGoSysCall to capture the stack trace.
2670 // If the syscall does not block, that is it, we do not emit any other events.
2671 // If the syscall blocks (that is, P is retaken), retaker emits traceGoSysBlock;
2672 // when syscall returns we emit traceGoSysExit and when the goroutine starts running
2673 // (potentially instantly, if exitsyscallfast returns true) we emit traceGoStart.
2674 // To ensure that traceGoSysExit is emitted strictly after traceGoSysBlock,
2675 // we remember current value of syscalltick in m (_g_.m.syscalltick = _g_.m.p.ptr().syscalltick),
2676 // whoever emits traceGoSysBlock increments p.syscalltick afterwards;
2677 // and we wait for the increment before emitting traceGoSysExit.
2678 // Note that the increment is done even if tracing is not enabled,
2679 // because tracing can be enabled in the middle of syscall. We don't want the wait to hang.
2681 //go:nosplit
2682 //go:noinline
2683 func reentersyscall(pc, sp uintptr) {
2684 _g_ := getg()
2686 // Disable preemption because during this function g is in Gsyscall status,
2687 // but can have inconsistent g->sched, do not let GC observe it.
2688 _g_.m.locks++
2690 _g_.syscallsp = sp
2691 _g_.syscallpc = pc
2692 casgstatus(_g_, _Grunning, _Gsyscall)
2694 if trace.enabled {
2695 systemstack(traceGoSysCall)
2698 if atomic.Load(&sched.sysmonwait) != 0 {
2699 systemstack(entersyscall_sysmon)
2702 if _g_.m.p.ptr().runSafePointFn != 0 {
2703 // runSafePointFn may stack split if run on this stack
2704 systemstack(runSafePointFn)
2707 _g_.m.syscalltick = _g_.m.p.ptr().syscalltick
2708 _g_.sysblocktraced = true
2709 _g_.m.mcache = nil
2710 _g_.m.p.ptr().m = 0
2711 atomic.Store(&_g_.m.p.ptr().status, _Psyscall)
2712 if sched.gcwaiting != 0 {
2713 systemstack(entersyscall_gcwait)
2716 _g_.m.locks--
2719 func entersyscall_sysmon() {
2720 lock(&sched.lock)
2721 if atomic.Load(&sched.sysmonwait) != 0 {
2722 atomic.Store(&sched.sysmonwait, 0)
2723 notewakeup(&sched.sysmonnote)
2725 unlock(&sched.lock)
2728 func entersyscall_gcwait() {
2729 _g_ := getg()
2730 _p_ := _g_.m.p.ptr()
2732 lock(&sched.lock)
2733 if sched.stopwait > 0 && atomic.Cas(&_p_.status, _Psyscall, _Pgcstop) {
2734 if trace.enabled {
2735 traceGoSysBlock(_p_)
2736 traceProcStop(_p_)
2738 _p_.syscalltick++
2739 if sched.stopwait--; sched.stopwait == 0 {
2740 notewakeup(&sched.stopnote)
2743 unlock(&sched.lock)
2746 // The same as reentersyscall(), but with a hint that the syscall is blocking.
2747 //go:nosplit
2748 func reentersyscallblock(pc, sp uintptr) {
2749 _g_ := getg()
2751 _g_.m.locks++ // see comment in entersyscall
2752 _g_.throwsplit = true
2753 _g_.m.syscalltick = _g_.m.p.ptr().syscalltick
2754 _g_.sysblocktraced = true
2755 _g_.m.p.ptr().syscalltick++
2757 // Leave SP around for GC and traceback.
2758 _g_.syscallsp = sp
2759 _g_.syscallpc = pc
2760 casgstatus(_g_, _Grunning, _Gsyscall)
2761 systemstack(entersyscallblock_handoff)
2763 _g_.m.locks--
2766 func entersyscallblock_handoff() {
2767 if trace.enabled {
2768 traceGoSysCall()
2769 traceGoSysBlock(getg().m.p.ptr())
2771 handoffp(releasep())
2774 // The goroutine g exited its system call.
2775 // Arrange for it to run on a cpu again.
2776 // This is called only from the go syscall library, not
2777 // from the low-level system calls used by the runtime.
2779 // Write barriers are not allowed because our P may have been stolen.
2781 //go:nosplit
2782 //go:nowritebarrierrec
2783 func exitsyscall() {
2784 _g_ := getg()
2786 _g_.m.locks++ // see comment in entersyscall
2788 _g_.waitsince = 0
2789 oldp := _g_.m.p.ptr()
2790 if exitsyscallfast() {
2791 if _g_.m.mcache == nil {
2792 systemstack(func() {
2793 throw("lost mcache")
2796 if trace.enabled {
2797 if oldp != _g_.m.p.ptr() || _g_.m.syscalltick != _g_.m.p.ptr().syscalltick {
2798 systemstack(traceGoStart)
2801 // There's a cpu for us, so we can run.
2802 _g_.m.p.ptr().syscalltick++
2803 // We need to cas the status and scan before resuming...
2804 casgstatus(_g_, _Gsyscall, _Grunning)
2806 exitsyscallclear(_g_)
2807 _g_.m.locks--
2808 _g_.throwsplit = false
2810 // Check preemption, since unlike gc we don't check on
2811 // every call.
2812 if getg().preempt {
2813 checkPreempt()
2816 return
2819 _g_.sysexitticks = 0
2820 if trace.enabled {
2821 // Wait till traceGoSysBlock event is emitted.
2822 // This ensures consistency of the trace (the goroutine is started after it is blocked).
2823 for oldp != nil && oldp.syscalltick == _g_.m.syscalltick {
2824 osyield()
2826 // We can't trace syscall exit right now because we don't have a P.
2827 // Tracing code can invoke write barriers that cannot run without a P.
2828 // So instead we remember the syscall exit time and emit the event
2829 // in execute when we have a P.
2830 _g_.sysexitticks = cputicks()
2833 _g_.m.locks--
2835 // Call the scheduler.
2836 mcall(exitsyscall0)
2838 if _g_.m.mcache == nil {
2839 systemstack(func() {
2840 throw("lost mcache")
2844 // Scheduler returned, so we're allowed to run now.
2845 // Delete the syscallsp information that we left for
2846 // the garbage collector during the system call.
2847 // Must wait until now because until gosched returns
2848 // we don't know for sure that the garbage collector
2849 // is not running.
2850 exitsyscallclear(_g_)
2852 _g_.m.p.ptr().syscalltick++
2853 _g_.throwsplit = false
2856 //go:nosplit
2857 func exitsyscallfast() bool {
2858 _g_ := getg()
2860 // Freezetheworld sets stopwait but does not retake P's.
2861 if sched.stopwait == freezeStopWait {
2862 _g_.m.mcache = nil
2863 _g_.m.p = 0
2864 return false
2867 // Try to re-acquire the last P.
2868 if _g_.m.p != 0 && _g_.m.p.ptr().status == _Psyscall && atomic.Cas(&_g_.m.p.ptr().status, _Psyscall, _Prunning) {
2869 // There's a cpu for us, so we can run.
2870 exitsyscallfast_reacquired()
2871 return true
2874 // Try to get any other idle P.
2875 oldp := _g_.m.p.ptr()
2876 _g_.m.mcache = nil
2877 _g_.m.p = 0
2878 if sched.pidle != 0 {
2879 var ok bool
2880 systemstack(func() {
2881 ok = exitsyscallfast_pidle()
2882 if ok && trace.enabled {
2883 if oldp != nil {
2884 // Wait till traceGoSysBlock event is emitted.
2885 // This ensures consistency of the trace (the goroutine is started after it is blocked).
2886 for oldp.syscalltick == _g_.m.syscalltick {
2887 osyield()
2890 traceGoSysExit(0)
2893 if ok {
2894 return true
2897 return false
2900 // exitsyscallfast_reacquired is the exitsyscall path on which this G
2901 // has successfully reacquired the P it was running on before the
2902 // syscall.
2904 // This function is allowed to have write barriers because exitsyscall
2905 // has acquired a P at this point.
2907 //go:yeswritebarrierrec
2908 //go:nosplit
2909 func exitsyscallfast_reacquired() {
2910 _g_ := getg()
2911 _g_.m.mcache = _g_.m.p.ptr().mcache
2912 _g_.m.p.ptr().m.set(_g_.m)
2913 if _g_.m.syscalltick != _g_.m.p.ptr().syscalltick {
2914 if trace.enabled {
2915 // The p was retaken and then enter into syscall again (since _g_.m.syscalltick has changed).
2916 // traceGoSysBlock for this syscall was already emitted,
2917 // but here we effectively retake the p from the new syscall running on the same p.
2918 systemstack(func() {
2919 // Denote blocking of the new syscall.
2920 traceGoSysBlock(_g_.m.p.ptr())
2921 // Denote completion of the current syscall.
2922 traceGoSysExit(0)
2925 _g_.m.p.ptr().syscalltick++
2929 func exitsyscallfast_pidle() bool {
2930 lock(&sched.lock)
2931 _p_ := pidleget()
2932 if _p_ != nil && atomic.Load(&sched.sysmonwait) != 0 {
2933 atomic.Store(&sched.sysmonwait, 0)
2934 notewakeup(&sched.sysmonnote)
2936 unlock(&sched.lock)
2937 if _p_ != nil {
2938 acquirep(_p_)
2939 return true
2941 return false
2944 // exitsyscall slow path on g0.
2945 // Failed to acquire P, enqueue gp as runnable.
2947 //go:nowritebarrierrec
2948 func exitsyscall0(gp *g) {
2949 _g_ := getg()
2951 casgstatus(gp, _Gsyscall, _Grunnable)
2952 dropg()
2953 lock(&sched.lock)
2954 _p_ := pidleget()
2955 if _p_ == nil {
2956 globrunqput(gp)
2957 } else if atomic.Load(&sched.sysmonwait) != 0 {
2958 atomic.Store(&sched.sysmonwait, 0)
2959 notewakeup(&sched.sysmonnote)
2961 unlock(&sched.lock)
2962 if _p_ != nil {
2963 acquirep(_p_)
2964 execute(gp, false) // Never returns.
2966 if _g_.m.lockedg != 0 {
2967 // Wait until another thread schedules gp and so m again.
2968 stoplockedm()
2969 execute(gp, false) // Never returns.
2971 stopm()
2972 schedule() // Never returns.
2975 // exitsyscallclear clears GC-related information that we only track
2976 // during a syscall.
2977 func exitsyscallclear(gp *g) {
2978 // Garbage collector isn't running (since we are), so okay to
2979 // clear syscallsp.
2980 gp.syscallsp = 0
2982 gp.gcstack = 0
2983 gp.gcnextsp = 0
2984 memclrNoHeapPointers(unsafe.Pointer(&gp.gcregs), unsafe.Sizeof(gp.gcregs))
2987 // Code generated by cgo, and some library code, calls syscall.Entersyscall
2988 // and syscall.Exitsyscall.
2990 //go:linkname syscall_entersyscall syscall.Entersyscall
2991 //go:nosplit
2992 func syscall_entersyscall() {
2993 entersyscall()
2996 //go:linkname syscall_exitsyscall syscall.Exitsyscall
2997 //go:nosplit
2998 func syscall_exitsyscall() {
2999 exitsyscall()
3002 func beforefork() {
3003 gp := getg().m.curg
3005 // Block signals during a fork, so that the child does not run
3006 // a signal handler before exec if a signal is sent to the process
3007 // group. See issue #18600.
3008 gp.m.locks++
3009 msigsave(gp.m)
3010 sigblock()
3013 // Called from syscall package before fork.
3014 //go:linkname syscall_runtime_BeforeFork syscall.runtime_BeforeFork
3015 //go:nosplit
3016 func syscall_runtime_BeforeFork() {
3017 systemstack(beforefork)
3020 func afterfork() {
3021 gp := getg().m.curg
3023 msigrestore(gp.m.sigmask)
3025 gp.m.locks--
3028 // Called from syscall package after fork in parent.
3029 //go:linkname syscall_runtime_AfterFork syscall.runtime_AfterFork
3030 //go:nosplit
3031 func syscall_runtime_AfterFork() {
3032 systemstack(afterfork)
3035 // inForkedChild is true while manipulating signals in the child process.
3036 // This is used to avoid calling libc functions in case we are using vfork.
3037 var inForkedChild bool
3039 // Called from syscall package after fork in child.
3040 // It resets non-sigignored signals to the default handler, and
3041 // restores the signal mask in preparation for the exec.
3043 // Because this might be called during a vfork, and therefore may be
3044 // temporarily sharing address space with the parent process, this must
3045 // not change any global variables or calling into C code that may do so.
3047 //go:linkname syscall_runtime_AfterForkInChild syscall.runtime_AfterForkInChild
3048 //go:nosplit
3049 //go:nowritebarrierrec
3050 func syscall_runtime_AfterForkInChild() {
3051 // It's OK to change the global variable inForkedChild here
3052 // because we are going to change it back. There is no race here,
3053 // because if we are sharing address space with the parent process,
3054 // then the parent process can not be running concurrently.
3055 inForkedChild = true
3057 clearSignalHandlers()
3059 // When we are the child we are the only thread running,
3060 // so we know that nothing else has changed gp.m.sigmask.
3061 msigrestore(getg().m.sigmask)
3063 inForkedChild = false
3066 // Called from syscall package before Exec.
3067 //go:linkname syscall_runtime_BeforeExec syscall.runtime_BeforeExec
3068 func syscall_runtime_BeforeExec() {
3069 // Prevent thread creation during exec.
3070 execLock.lock()
3073 // Called from syscall package after Exec.
3074 //go:linkname syscall_runtime_AfterExec syscall.runtime_AfterExec
3075 func syscall_runtime_AfterExec() {
3076 execLock.unlock()
3079 // Create a new g running fn passing arg as the single argument.
3080 // Put it on the queue of g's waiting to run.
3081 // The compiler turns a go statement into a call to this.
3082 //go:linkname newproc __go_go
3083 func newproc(fn uintptr, arg unsafe.Pointer) *g {
3084 _g_ := getg()
3086 if fn == 0 {
3087 _g_.m.throwing = -1 // do not dump full stacks
3088 throw("go of nil func value")
3090 _g_.m.locks++ // disable preemption because it can be holding p in a local var
3092 _p_ := _g_.m.p.ptr()
3093 newg := gfget(_p_)
3094 var (
3095 sp unsafe.Pointer
3096 spsize uintptr
3098 if newg == nil {
3099 newg = malg(true, false, &sp, &spsize)
3100 casgstatus(newg, _Gidle, _Gdead)
3101 allgadd(newg) // publishes with a g->status of Gdead so GC scanner doesn't look at uninitialized stack.
3102 } else {
3103 resetNewG(newg, &sp, &spsize)
3105 newg.traceback = 0
3107 if readgstatus(newg) != _Gdead {
3108 throw("newproc1: new g is not Gdead")
3111 // Store the C function pointer into entryfn, take the address
3112 // of entryfn, convert it to a Go function value, and store
3113 // that in entry.
3114 newg.entryfn = fn
3115 var entry func(unsafe.Pointer)
3116 *(*unsafe.Pointer)(unsafe.Pointer(&entry)) = unsafe.Pointer(&newg.entryfn)
3117 newg.entry = entry
3119 newg.param = arg
3120 newg.gopc = getcallerpc()
3121 newg.startpc = fn
3122 if _g_.m.curg != nil {
3123 newg.labels = _g_.m.curg.labels
3125 if isSystemGoroutine(newg) {
3126 atomic.Xadd(&sched.ngsys, +1)
3128 newg.gcscanvalid = false
3129 casgstatus(newg, _Gdead, _Grunnable)
3131 if _p_.goidcache == _p_.goidcacheend {
3132 // Sched.goidgen is the last allocated id,
3133 // this batch must be [sched.goidgen+1, sched.goidgen+GoidCacheBatch].
3134 // At startup sched.goidgen=0, so main goroutine receives goid=1.
3135 _p_.goidcache = atomic.Xadd64(&sched.goidgen, _GoidCacheBatch)
3136 _p_.goidcache -= _GoidCacheBatch - 1
3137 _p_.goidcacheend = _p_.goidcache + _GoidCacheBatch
3139 newg.goid = int64(_p_.goidcache)
3140 _p_.goidcache++
3141 if trace.enabled {
3142 traceGoCreate(newg, newg.startpc)
3145 makeGContext(newg, sp, spsize)
3147 runqput(_p_, newg, true)
3149 if atomic.Load(&sched.npidle) != 0 && atomic.Load(&sched.nmspinning) == 0 && mainStarted {
3150 wakep()
3152 _g_.m.locks--
3153 return newg
3156 // expectedSystemGoroutines counts the number of goroutines expected
3157 // to mark themselves as system goroutines. After they mark themselves
3158 // by calling setSystemGoroutine, this is decremented. NumGoroutines
3159 // uses this to wait for all system goroutines to mark themselves
3160 // before it counts them.
3161 var expectedSystemGoroutines uint32
3163 // expectSystemGoroutine is called when starting a goroutine that will
3164 // call setSystemGoroutine. It increments expectedSystemGoroutines.
3165 func expectSystemGoroutine() {
3166 atomic.Xadd(&expectedSystemGoroutines, +1)
3169 // waitForSystemGoroutines waits for all currently expected system
3170 // goroutines to register themselves.
3171 func waitForSystemGoroutines() {
3172 for atomic.Load(&expectedSystemGoroutines) > 0 {
3173 Gosched()
3174 osyield()
3178 // setSystemGoroutine marks this goroutine as a "system goroutine".
3179 // In the gc toolchain this is done by comparing startpc to a list of
3180 // saved special PCs. In gccgo that approach does not work as startpc
3181 // is often a thunk that invokes the real function with arguments,
3182 // so the thunk address never matches the saved special PCs. Instead,
3183 // since there are only a limited number of "system goroutines",
3184 // we force each one to mark itself as special.
3185 func setSystemGoroutine() {
3186 getg().isSystemGoroutine = true
3187 atomic.Xadd(&sched.ngsys, +1)
3188 atomic.Xadd(&expectedSystemGoroutines, -1)
3191 // Put on gfree list.
3192 // If local list is too long, transfer a batch to the global list.
3193 func gfput(_p_ *p, gp *g) {
3194 if readgstatus(gp) != _Gdead {
3195 throw("gfput: bad status (not Gdead)")
3198 gp.schedlink.set(_p_.gfree)
3199 _p_.gfree = gp
3200 _p_.gfreecnt++
3201 if _p_.gfreecnt >= 64 {
3202 lock(&sched.gflock)
3203 for _p_.gfreecnt >= 32 {
3204 _p_.gfreecnt--
3205 gp = _p_.gfree
3206 _p_.gfree = gp.schedlink.ptr()
3207 gp.schedlink.set(sched.gfree)
3208 sched.gfree = gp
3209 sched.ngfree++
3211 unlock(&sched.gflock)
3215 // Get from gfree list.
3216 // If local list is empty, grab a batch from global list.
3217 func gfget(_p_ *p) *g {
3218 retry:
3219 gp := _p_.gfree
3220 if gp == nil && sched.gfree != nil {
3221 lock(&sched.gflock)
3222 for _p_.gfreecnt < 32 {
3223 if sched.gfree != nil {
3224 gp = sched.gfree
3225 sched.gfree = gp.schedlink.ptr()
3226 } else {
3227 break
3229 _p_.gfreecnt++
3230 sched.ngfree--
3231 gp.schedlink.set(_p_.gfree)
3232 _p_.gfree = gp
3234 unlock(&sched.gflock)
3235 goto retry
3237 if gp != nil {
3238 _p_.gfree = gp.schedlink.ptr()
3239 _p_.gfreecnt--
3241 return gp
3244 // Purge all cached G's from gfree list to the global list.
3245 func gfpurge(_p_ *p) {
3246 lock(&sched.gflock)
3247 for _p_.gfreecnt != 0 {
3248 _p_.gfreecnt--
3249 gp := _p_.gfree
3250 _p_.gfree = gp.schedlink.ptr()
3251 gp.schedlink.set(sched.gfree)
3252 sched.gfree = gp
3253 sched.ngfree++
3255 unlock(&sched.gflock)
3258 // Breakpoint executes a breakpoint trap.
3259 func Breakpoint() {
3260 breakpoint()
3263 // dolockOSThread is called by LockOSThread and lockOSThread below
3264 // after they modify m.locked. Do not allow preemption during this call,
3265 // or else the m might be different in this function than in the caller.
3266 //go:nosplit
3267 func dolockOSThread() {
3268 _g_ := getg()
3269 _g_.m.lockedg.set(_g_)
3270 _g_.lockedm.set(_g_.m)
3273 //go:nosplit
3275 // LockOSThread wires the calling goroutine to its current operating system thread.
3276 // The calling goroutine will always execute in that thread,
3277 // and no other goroutine will execute in it,
3278 // until the calling goroutine has made as many calls to
3279 // UnlockOSThread as to LockOSThread.
3280 // If the calling goroutine exits without unlocking the thread,
3281 // the thread will be terminated.
3283 // A goroutine should call LockOSThread before calling OS services or
3284 // non-Go library functions that depend on per-thread state.
3285 func LockOSThread() {
3286 if atomic.Load(&newmHandoff.haveTemplateThread) == 0 && GOOS != "plan9" {
3287 // If we need to start a new thread from the locked
3288 // thread, we need the template thread. Start it now
3289 // while we're in a known-good state.
3290 startTemplateThread()
3292 _g_ := getg()
3293 _g_.m.lockedExt++
3294 if _g_.m.lockedExt == 0 {
3295 _g_.m.lockedExt--
3296 panic("LockOSThread nesting overflow")
3298 dolockOSThread()
3301 //go:nosplit
3302 func lockOSThread() {
3303 getg().m.lockedInt++
3304 dolockOSThread()
3307 // dounlockOSThread is called by UnlockOSThread and unlockOSThread below
3308 // after they update m->locked. Do not allow preemption during this call,
3309 // or else the m might be in different in this function than in the caller.
3310 //go:nosplit
3311 func dounlockOSThread() {
3312 _g_ := getg()
3313 if _g_.m.lockedInt != 0 || _g_.m.lockedExt != 0 {
3314 return
3316 _g_.m.lockedg = 0
3317 _g_.lockedm = 0
3320 //go:nosplit
3322 // UnlockOSThread undoes an earlier call to LockOSThread.
3323 // If this drops the number of active LockOSThread calls on the
3324 // calling goroutine to zero, it unwires the calling goroutine from
3325 // its fixed operating system thread.
3326 // If there are no active LockOSThread calls, this is a no-op.
3328 // Before calling UnlockOSThread, the caller must ensure that the OS
3329 // thread is suitable for running other goroutines. If the caller made
3330 // any permanent changes to the state of the thread that would affect
3331 // other goroutines, it should not call this function and thus leave
3332 // the goroutine locked to the OS thread until the goroutine (and
3333 // hence the thread) exits.
3334 func UnlockOSThread() {
3335 _g_ := getg()
3336 if _g_.m.lockedExt == 0 {
3337 return
3339 _g_.m.lockedExt--
3340 dounlockOSThread()
3343 //go:nosplit
3344 func unlockOSThread() {
3345 _g_ := getg()
3346 if _g_.m.lockedInt == 0 {
3347 systemstack(badunlockosthread)
3349 _g_.m.lockedInt--
3350 dounlockOSThread()
3353 func badunlockosthread() {
3354 throw("runtime: internal error: misuse of lockOSThread/unlockOSThread")
3357 func gcount() int32 {
3358 n := int32(allglen) - sched.ngfree - int32(atomic.Load(&sched.ngsys))
3359 for _, _p_ := range allp {
3360 n -= _p_.gfreecnt
3363 // All these variables can be changed concurrently, so the result can be inconsistent.
3364 // But at least the current goroutine is running.
3365 if n < 1 {
3366 n = 1
3368 return n
3371 func mcount() int32 {
3372 return int32(sched.mnext - sched.nmfreed)
3375 var prof struct {
3376 signalLock uint32
3377 hz int32
3380 func _System() { _System() }
3381 func _ExternalCode() { _ExternalCode() }
3382 func _LostExternalCode() { _LostExternalCode() }
3383 func _GC() { _GC() }
3384 func _LostSIGPROFDuringAtomic64() { _LostSIGPROFDuringAtomic64() }
3386 // Counts SIGPROFs received while in atomic64 critical section, on mips{,le}
3387 var lostAtomic64Count uint64
3389 var _SystemPC = funcPC(_System)
3390 var _ExternalCodePC = funcPC(_ExternalCode)
3391 var _LostExternalCodePC = funcPC(_LostExternalCode)
3392 var _GCPC = funcPC(_GC)
3393 var _LostSIGPROFDuringAtomic64PC = funcPC(_LostSIGPROFDuringAtomic64)
3395 // Called if we receive a SIGPROF signal.
3396 // Called by the signal handler, may run during STW.
3397 //go:nowritebarrierrec
3398 func sigprof(pc uintptr, gp *g, mp *m) {
3399 if prof.hz == 0 {
3400 return
3403 // Profiling runs concurrently with GC, so it must not allocate.
3404 // Set a trap in case the code does allocate.
3405 // Note that on windows, one thread takes profiles of all the
3406 // other threads, so mp is usually not getg().m.
3407 // In fact mp may not even be stopped.
3408 // See golang.org/issue/17165.
3409 getg().m.mallocing++
3411 traceback := true
3413 // If SIGPROF arrived while already fetching runtime callers
3414 // we can have trouble on older systems because the unwind
3415 // library calls dl_iterate_phdr which was not reentrant in
3416 // the past. alreadyInCallers checks for that.
3417 if gp == nil || alreadyInCallers() {
3418 traceback = false
3421 var stk [maxCPUProfStack]uintptr
3422 n := 0
3423 if traceback {
3424 var stklocs [maxCPUProfStack]location
3425 n = callers(0, stklocs[:])
3427 // Issue 26595: the stack trace we've just collected is going
3428 // to include frames that we don't want to report in the CPU
3429 // profile, including signal handler frames. Here is what we
3430 // might typically see at the point of "callers" above for a
3431 // signal delivered to the application routine "interesting"
3432 // called by "main".
3434 // 0: runtime.sigprof
3435 // 1: runtime.sighandler
3436 // 2: runtime.sigtrampgo
3437 // 3: runtime.sigtramp
3438 // 4: <signal handler called>
3439 // 5: main.interesting_routine
3440 // 6: main.main
3442 // To ensure a sane profile, walk through the frames in
3443 // "stklocs" until we find the "runtime.sigtramp" frame, then
3444 // report only those frames below the frame one down from
3445 // that. If for some reason "runtime.sigtramp" is not present,
3446 // don't make any changes.
3447 framesToDiscard := 0
3448 for i := 0; i < n; i++ {
3449 if stklocs[i].function == "runtime.sigtramp" && i+2 < n {
3450 framesToDiscard = i + 2
3451 n -= framesToDiscard
3452 break
3455 for i := 0; i < n; i++ {
3456 stk[i] = stklocs[i+framesToDiscard].pc
3460 if n <= 0 {
3461 // Normal traceback is impossible or has failed.
3462 // Account it against abstract "System" or "GC".
3463 n = 2
3464 stk[0] = pc
3465 if mp.preemptoff != "" || mp.helpgc != 0 {
3466 stk[1] = _GCPC + sys.PCQuantum
3467 } else {
3468 stk[1] = _SystemPC + sys.PCQuantum
3472 if prof.hz != 0 {
3473 if (GOARCH == "mips" || GOARCH == "mipsle") && lostAtomic64Count > 0 {
3474 cpuprof.addLostAtomic64(lostAtomic64Count)
3475 lostAtomic64Count = 0
3477 cpuprof.add(gp, stk[:n])
3479 getg().m.mallocing--
3482 // Use global arrays rather than using up lots of stack space in the
3483 // signal handler. This is safe since while we are executing a SIGPROF
3484 // signal other SIGPROF signals are blocked.
3485 var nonprofGoStklocs [maxCPUProfStack]location
3486 var nonprofGoStk [maxCPUProfStack]uintptr
3488 // sigprofNonGo is called if we receive a SIGPROF signal on a non-Go thread,
3489 // and the signal handler collected a stack trace in sigprofCallers.
3490 // When this is called, sigprofCallersUse will be non-zero.
3491 // g is nil, and what we can do is very limited.
3492 //go:nosplit
3493 //go:nowritebarrierrec
3494 func sigprofNonGo(pc uintptr) {
3495 if prof.hz != 0 {
3496 n := callers(0, nonprofGoStklocs[:])
3498 for i := 0; i < n; i++ {
3499 nonprofGoStk[i] = nonprofGoStklocs[i].pc
3502 if n <= 0 {
3503 n = 2
3504 nonprofGoStk[0] = pc
3505 nonprofGoStk[1] = _ExternalCodePC + sys.PCQuantum
3508 cpuprof.addNonGo(nonprofGoStk[:n])
3512 // sigprofNonGoPC is called when a profiling signal arrived on a
3513 // non-Go thread and we have a single PC value, not a stack trace.
3514 // g is nil, and what we can do is very limited.
3515 //go:nosplit
3516 //go:nowritebarrierrec
3517 func sigprofNonGoPC(pc uintptr) {
3518 if prof.hz != 0 {
3519 stk := []uintptr{
3521 _ExternalCodePC + sys.PCQuantum,
3523 cpuprof.addNonGo(stk)
3527 // setcpuprofilerate sets the CPU profiling rate to hz times per second.
3528 // If hz <= 0, setcpuprofilerate turns off CPU profiling.
3529 func setcpuprofilerate(hz int32) {
3530 // Force sane arguments.
3531 if hz < 0 {
3532 hz = 0
3535 // Disable preemption, otherwise we can be rescheduled to another thread
3536 // that has profiling enabled.
3537 _g_ := getg()
3538 _g_.m.locks++
3540 // Stop profiler on this thread so that it is safe to lock prof.
3541 // if a profiling signal came in while we had prof locked,
3542 // it would deadlock.
3543 setThreadCPUProfiler(0)
3545 for !atomic.Cas(&prof.signalLock, 0, 1) {
3546 osyield()
3548 if prof.hz != hz {
3549 setProcessCPUProfiler(hz)
3550 prof.hz = hz
3552 atomic.Store(&prof.signalLock, 0)
3554 lock(&sched.lock)
3555 sched.profilehz = hz
3556 unlock(&sched.lock)
3558 if hz != 0 {
3559 setThreadCPUProfiler(hz)
3562 _g_.m.locks--
3565 // Change number of processors. The world is stopped, sched is locked.
3566 // gcworkbufs are not being modified by either the GC or
3567 // the write barrier code.
3568 // Returns list of Ps with local work, they need to be scheduled by the caller.
3569 func procresize(nprocs int32) *p {
3570 old := gomaxprocs
3571 if old < 0 || nprocs <= 0 {
3572 throw("procresize: invalid arg")
3574 if trace.enabled {
3575 traceGomaxprocs(nprocs)
3578 // update statistics
3579 now := nanotime()
3580 if sched.procresizetime != 0 {
3581 sched.totaltime += int64(old) * (now - sched.procresizetime)
3583 sched.procresizetime = now
3585 // Grow allp if necessary.
3586 if nprocs > int32(len(allp)) {
3587 // Synchronize with retake, which could be running
3588 // concurrently since it doesn't run on a P.
3589 lock(&allpLock)
3590 if nprocs <= int32(cap(allp)) {
3591 allp = allp[:nprocs]
3592 } else {
3593 nallp := make([]*p, nprocs)
3594 // Copy everything up to allp's cap so we
3595 // never lose old allocated Ps.
3596 copy(nallp, allp[:cap(allp)])
3597 allp = nallp
3599 unlock(&allpLock)
3602 // initialize new P's
3603 for i := int32(0); i < nprocs; i++ {
3604 pp := allp[i]
3605 if pp == nil {
3606 pp = new(p)
3607 pp.id = i
3608 pp.status = _Pgcstop
3609 pp.sudogcache = pp.sudogbuf[:0]
3610 pp.deferpool = pp.deferpoolbuf[:0]
3611 pp.wbBuf.reset()
3612 atomicstorep(unsafe.Pointer(&allp[i]), unsafe.Pointer(pp))
3614 if pp.mcache == nil {
3615 if old == 0 && i == 0 {
3616 if getg().m.mcache == nil {
3617 throw("missing mcache?")
3619 pp.mcache = getg().m.mcache // bootstrap
3620 } else {
3621 pp.mcache = allocmcache()
3626 // free unused P's
3627 for i := nprocs; i < old; i++ {
3628 p := allp[i]
3629 if trace.enabled && p == getg().m.p.ptr() {
3630 // moving to p[0], pretend that we were descheduled
3631 // and then scheduled again to keep the trace sane.
3632 traceGoSched()
3633 traceProcStop(p)
3635 // move all runnable goroutines to the global queue
3636 for p.runqhead != p.runqtail {
3637 // pop from tail of local queue
3638 p.runqtail--
3639 gp := p.runq[p.runqtail%uint32(len(p.runq))].ptr()
3640 // push onto head of global queue
3641 globrunqputhead(gp)
3643 if p.runnext != 0 {
3644 globrunqputhead(p.runnext.ptr())
3645 p.runnext = 0
3647 // if there's a background worker, make it runnable and put
3648 // it on the global queue so it can clean itself up
3649 if gp := p.gcBgMarkWorker.ptr(); gp != nil {
3650 casgstatus(gp, _Gwaiting, _Grunnable)
3651 if trace.enabled {
3652 traceGoUnpark(gp, 0)
3654 globrunqput(gp)
3655 // This assignment doesn't race because the
3656 // world is stopped.
3657 p.gcBgMarkWorker.set(nil)
3659 // Flush p's write barrier buffer.
3660 if gcphase != _GCoff {
3661 wbBufFlush1(p)
3662 p.gcw.dispose()
3664 for i := range p.sudogbuf {
3665 p.sudogbuf[i] = nil
3667 p.sudogcache = p.sudogbuf[:0]
3668 for i := range p.deferpoolbuf {
3669 p.deferpoolbuf[i] = nil
3671 p.deferpool = p.deferpoolbuf[:0]
3672 freemcache(p.mcache)
3673 p.mcache = nil
3674 gfpurge(p)
3675 traceProcFree(p)
3676 p.gcAssistTime = 0
3677 p.status = _Pdead
3678 // can't free P itself because it can be referenced by an M in syscall
3681 // Trim allp.
3682 if int32(len(allp)) != nprocs {
3683 lock(&allpLock)
3684 allp = allp[:nprocs]
3685 unlock(&allpLock)
3688 _g_ := getg()
3689 if _g_.m.p != 0 && _g_.m.p.ptr().id < nprocs {
3690 // continue to use the current P
3691 _g_.m.p.ptr().status = _Prunning
3692 } else {
3693 // release the current P and acquire allp[0]
3694 if _g_.m.p != 0 {
3695 _g_.m.p.ptr().m = 0
3697 _g_.m.p = 0
3698 _g_.m.mcache = nil
3699 p := allp[0]
3700 p.m = 0
3701 p.status = _Pidle
3702 acquirep(p)
3703 if trace.enabled {
3704 traceGoStart()
3707 var runnablePs *p
3708 for i := nprocs - 1; i >= 0; i-- {
3709 p := allp[i]
3710 if _g_.m.p.ptr() == p {
3711 continue
3713 p.status = _Pidle
3714 if runqempty(p) {
3715 pidleput(p)
3716 } else {
3717 p.m.set(mget())
3718 p.link.set(runnablePs)
3719 runnablePs = p
3722 stealOrder.reset(uint32(nprocs))
3723 var int32p *int32 = &gomaxprocs // make compiler check that gomaxprocs is an int32
3724 atomic.Store((*uint32)(unsafe.Pointer(int32p)), uint32(nprocs))
3725 return runnablePs
3728 // Associate p and the current m.
3730 // This function is allowed to have write barriers even if the caller
3731 // isn't because it immediately acquires _p_.
3733 //go:yeswritebarrierrec
3734 func acquirep(_p_ *p) {
3735 // Do the part that isn't allowed to have write barriers.
3736 acquirep1(_p_)
3738 // have p; write barriers now allowed
3739 _g_ := getg()
3740 _g_.m.mcache = _p_.mcache
3742 if trace.enabled {
3743 traceProcStart()
3747 // acquirep1 is the first step of acquirep, which actually acquires
3748 // _p_. This is broken out so we can disallow write barriers for this
3749 // part, since we don't yet have a P.
3751 //go:nowritebarrierrec
3752 func acquirep1(_p_ *p) {
3753 _g_ := getg()
3755 if _g_.m.p != 0 || _g_.m.mcache != nil {
3756 throw("acquirep: already in go")
3758 if _p_.m != 0 || _p_.status != _Pidle {
3759 id := int64(0)
3760 if _p_.m != 0 {
3761 id = _p_.m.ptr().id
3763 print("acquirep: p->m=", _p_.m, "(", id, ") p->status=", _p_.status, "\n")
3764 throw("acquirep: invalid p state")
3766 _g_.m.p.set(_p_)
3767 _p_.m.set(_g_.m)
3768 _p_.status = _Prunning
3771 // Disassociate p and the current m.
3772 func releasep() *p {
3773 _g_ := getg()
3775 if _g_.m.p == 0 || _g_.m.mcache == nil {
3776 throw("releasep: invalid arg")
3778 _p_ := _g_.m.p.ptr()
3779 if _p_.m.ptr() != _g_.m || _p_.mcache != _g_.m.mcache || _p_.status != _Prunning {
3780 print("releasep: m=", _g_.m, " m->p=", _g_.m.p.ptr(), " p->m=", _p_.m, " m->mcache=", _g_.m.mcache, " p->mcache=", _p_.mcache, " p->status=", _p_.status, "\n")
3781 throw("releasep: invalid p state")
3783 if trace.enabled {
3784 traceProcStop(_g_.m.p.ptr())
3786 _g_.m.p = 0
3787 _g_.m.mcache = nil
3788 _p_.m = 0
3789 _p_.status = _Pidle
3790 return _p_
3793 func incidlelocked(v int32) {
3794 lock(&sched.lock)
3795 sched.nmidlelocked += v
3796 if v > 0 {
3797 checkdead()
3799 unlock(&sched.lock)
3802 // Check for deadlock situation.
3803 // The check is based on number of running M's, if 0 -> deadlock.
3804 // sched.lock must be held.
3805 func checkdead() {
3806 // For -buildmode=c-shared or -buildmode=c-archive it's OK if
3807 // there are no running goroutines. The calling program is
3808 // assumed to be running.
3809 if islibrary || isarchive {
3810 return
3813 // If we are dying because of a signal caught on an already idle thread,
3814 // freezetheworld will cause all running threads to block.
3815 // And runtime will essentially enter into deadlock state,
3816 // except that there is a thread that will call exit soon.
3817 if panicking > 0 {
3818 return
3821 run := mcount() - sched.nmidle - sched.nmidlelocked - sched.nmsys
3822 if run > 0 {
3823 return
3825 if run < 0 {
3826 print("runtime: checkdead: nmidle=", sched.nmidle, " nmidlelocked=", sched.nmidlelocked, " mcount=", mcount(), " nmsys=", sched.nmsys, "\n")
3827 throw("checkdead: inconsistent counts")
3830 grunning := 0
3831 lock(&allglock)
3832 for i := 0; i < len(allgs); i++ {
3833 gp := allgs[i]
3834 if isSystemGoroutine(gp) {
3835 continue
3837 s := readgstatus(gp)
3838 switch s &^ _Gscan {
3839 case _Gwaiting:
3840 grunning++
3841 case _Grunnable,
3842 _Grunning,
3843 _Gsyscall:
3844 unlock(&allglock)
3845 print("runtime: checkdead: find g ", gp.goid, " in status ", s, "\n")
3846 throw("checkdead: runnable g")
3849 unlock(&allglock)
3850 if grunning == 0 { // possible if main goroutine calls runtime·Goexit()
3851 throw("no goroutines (main called runtime.Goexit) - deadlock!")
3854 // Maybe jump time forward for playground.
3855 gp := timejump()
3856 if gp != nil {
3857 casgstatus(gp, _Gwaiting, _Grunnable)
3858 globrunqput(gp)
3859 _p_ := pidleget()
3860 if _p_ == nil {
3861 throw("checkdead: no p for timer")
3863 mp := mget()
3864 if mp == nil {
3865 // There should always be a free M since
3866 // nothing is running.
3867 throw("checkdead: no m for timer")
3869 mp.nextp.set(_p_)
3870 notewakeup(&mp.park)
3871 return
3874 getg().m.throwing = -1 // do not dump full stacks
3875 throw("all goroutines are asleep - deadlock!")
3878 // forcegcperiod is the maximum time in nanoseconds between garbage
3879 // collections. If we go this long without a garbage collection, one
3880 // is forced to run.
3882 // This is a variable for testing purposes. It normally doesn't change.
3883 var forcegcperiod int64 = 2 * 60 * 1e9
3885 // Always runs without a P, so write barriers are not allowed.
3887 //go:nowritebarrierrec
3888 func sysmon() {
3889 lock(&sched.lock)
3890 sched.nmsys++
3891 checkdead()
3892 unlock(&sched.lock)
3894 // If a heap span goes unused for 5 minutes after a garbage collection,
3895 // we hand it back to the operating system.
3896 scavengelimit := int64(5 * 60 * 1e9)
3898 if debug.scavenge > 0 {
3899 // Scavenge-a-lot for testing.
3900 forcegcperiod = 10 * 1e6
3901 scavengelimit = 20 * 1e6
3904 lastscavenge := nanotime()
3905 nscavenge := 0
3907 lasttrace := int64(0)
3908 idle := 0 // how many cycles in succession we had not wokeup somebody
3909 delay := uint32(0)
3910 for {
3911 if idle == 0 { // start with 20us sleep...
3912 delay = 20
3913 } else if idle > 50 { // start doubling the sleep after 1ms...
3914 delay *= 2
3916 if delay > 10*1000 { // up to 10ms
3917 delay = 10 * 1000
3919 usleep(delay)
3920 if debug.schedtrace <= 0 && (sched.gcwaiting != 0 || atomic.Load(&sched.npidle) == uint32(gomaxprocs)) {
3921 lock(&sched.lock)
3922 if atomic.Load(&sched.gcwaiting) != 0 || atomic.Load(&sched.npidle) == uint32(gomaxprocs) {
3923 atomic.Store(&sched.sysmonwait, 1)
3924 unlock(&sched.lock)
3925 // Make wake-up period small enough
3926 // for the sampling to be correct.
3927 maxsleep := forcegcperiod / 2
3928 if scavengelimit < forcegcperiod {
3929 maxsleep = scavengelimit / 2
3931 shouldRelax := true
3932 if osRelaxMinNS > 0 {
3933 next := timeSleepUntil()
3934 now := nanotime()
3935 if next-now < osRelaxMinNS {
3936 shouldRelax = false
3939 if shouldRelax {
3940 osRelax(true)
3942 notetsleep(&sched.sysmonnote, maxsleep)
3943 if shouldRelax {
3944 osRelax(false)
3946 lock(&sched.lock)
3947 atomic.Store(&sched.sysmonwait, 0)
3948 noteclear(&sched.sysmonnote)
3949 idle = 0
3950 delay = 20
3952 unlock(&sched.lock)
3954 // trigger libc interceptors if needed
3955 if *cgo_yield != nil {
3956 asmcgocall(*cgo_yield, nil)
3958 // poll network if not polled for more than 10ms
3959 lastpoll := int64(atomic.Load64(&sched.lastpoll))
3960 now := nanotime()
3961 if netpollinited() && lastpoll != 0 && lastpoll+10*1000*1000 < now {
3962 atomic.Cas64(&sched.lastpoll, uint64(lastpoll), uint64(now))
3963 gp := netpoll(false) // non-blocking - returns list of goroutines
3964 if gp != nil {
3965 // Need to decrement number of idle locked M's
3966 // (pretending that one more is running) before injectglist.
3967 // Otherwise it can lead to the following situation:
3968 // injectglist grabs all P's but before it starts M's to run the P's,
3969 // another M returns from syscall, finishes running its G,
3970 // observes that there is no work to do and no other running M's
3971 // and reports deadlock.
3972 incidlelocked(-1)
3973 injectglist(gp)
3974 incidlelocked(1)
3977 // retake P's blocked in syscalls
3978 // and preempt long running G's
3979 if retake(now) != 0 {
3980 idle = 0
3981 } else {
3982 idle++
3984 // check if we need to force a GC
3985 if t := (gcTrigger{kind: gcTriggerTime, now: now}); t.test() && atomic.Load(&forcegc.idle) != 0 {
3986 lock(&forcegc.lock)
3987 forcegc.idle = 0
3988 forcegc.g.schedlink = 0
3989 injectglist(forcegc.g)
3990 unlock(&forcegc.lock)
3992 // scavenge heap once in a while
3993 if lastscavenge+scavengelimit/2 < now {
3994 mheap_.scavenge(int32(nscavenge), uint64(now), uint64(scavengelimit))
3995 lastscavenge = now
3996 nscavenge++
3998 if debug.schedtrace > 0 && lasttrace+int64(debug.schedtrace)*1000000 <= now {
3999 lasttrace = now
4000 schedtrace(debug.scheddetail > 0)
4005 type sysmontick struct {
4006 schedtick uint32
4007 schedwhen int64
4008 syscalltick uint32
4009 syscallwhen int64
4012 // forcePreemptNS is the time slice given to a G before it is
4013 // preempted.
4014 const forcePreemptNS = 10 * 1000 * 1000 // 10ms
4016 func retake(now int64) uint32 {
4017 n := 0
4018 // Prevent allp slice changes. This lock will be completely
4019 // uncontended unless we're already stopping the world.
4020 lock(&allpLock)
4021 // We can't use a range loop over allp because we may
4022 // temporarily drop the allpLock. Hence, we need to re-fetch
4023 // allp each time around the loop.
4024 for i := 0; i < len(allp); i++ {
4025 _p_ := allp[i]
4026 if _p_ == nil {
4027 // This can happen if procresize has grown
4028 // allp but not yet created new Ps.
4029 continue
4031 pd := &_p_.sysmontick
4032 s := _p_.status
4033 if s == _Psyscall {
4034 // Retake P from syscall if it's there for more than 1 sysmon tick (at least 20us).
4035 t := int64(_p_.syscalltick)
4036 if int64(pd.syscalltick) != t {
4037 pd.syscalltick = uint32(t)
4038 pd.syscallwhen = now
4039 continue
4041 // On the one hand we don't want to retake Ps if there is no other work to do,
4042 // but on the other hand we want to retake them eventually
4043 // because they can prevent the sysmon thread from deep sleep.
4044 if runqempty(_p_) && atomic.Load(&sched.nmspinning)+atomic.Load(&sched.npidle) > 0 && pd.syscallwhen+10*1000*1000 > now {
4045 continue
4047 // Drop allpLock so we can take sched.lock.
4048 unlock(&allpLock)
4049 // Need to decrement number of idle locked M's
4050 // (pretending that one more is running) before the CAS.
4051 // Otherwise the M from which we retake can exit the syscall,
4052 // increment nmidle and report deadlock.
4053 incidlelocked(-1)
4054 if atomic.Cas(&_p_.status, s, _Pidle) {
4055 if trace.enabled {
4056 traceGoSysBlock(_p_)
4057 traceProcStop(_p_)
4060 _p_.syscalltick++
4061 handoffp(_p_)
4063 incidlelocked(1)
4064 lock(&allpLock)
4065 } else if s == _Prunning {
4066 // Preempt G if it's running for too long.
4067 t := int64(_p_.schedtick)
4068 if int64(pd.schedtick) != t {
4069 pd.schedtick = uint32(t)
4070 pd.schedwhen = now
4071 continue
4073 if pd.schedwhen+forcePreemptNS > now {
4074 continue
4076 preemptone(_p_)
4079 unlock(&allpLock)
4080 return uint32(n)
4083 // Tell all goroutines that they have been preempted and they should stop.
4084 // This function is purely best-effort. It can fail to inform a goroutine if a
4085 // processor just started running it.
4086 // No locks need to be held.
4087 // Returns true if preemption request was issued to at least one goroutine.
4088 func preemptall() bool {
4089 res := false
4090 for _, _p_ := range allp {
4091 if _p_.status != _Prunning {
4092 continue
4094 if preemptone(_p_) {
4095 res = true
4098 return res
4101 // Tell the goroutine running on processor P to stop.
4102 // This function is purely best-effort. It can incorrectly fail to inform the
4103 // goroutine. It can send inform the wrong goroutine. Even if it informs the
4104 // correct goroutine, that goroutine might ignore the request if it is
4105 // simultaneously executing newstack.
4106 // No lock needs to be held.
4107 // Returns true if preemption request was issued.
4108 // The actual preemption will happen at some point in the future
4109 // and will be indicated by the gp->status no longer being
4110 // Grunning
4111 func preemptone(_p_ *p) bool {
4112 mp := _p_.m.ptr()
4113 if mp == nil || mp == getg().m {
4114 return false
4116 gp := mp.curg
4117 if gp == nil || gp == mp.g0 {
4118 return false
4121 gp.preempt = true
4123 // At this point the gc implementation sets gp.stackguard0 to
4124 // a value that causes the goroutine to suspend itself.
4125 // gccgo has no support for this, and it's hard to support.
4126 // The split stack code reads a value from its TCB.
4127 // We have no way to set a value in the TCB of a different thread.
4128 // And, of course, not all systems support split stack anyhow.
4129 // Checking the field in the g is expensive, since it requires
4130 // loading the g from TLS. The best mechanism is likely to be
4131 // setting a global variable and figuring out a way to efficiently
4132 // check that global variable.
4134 // For now we check gp.preempt in schedule, mallocgc, selectgo,
4135 // and a few other places, which is at least better than doing
4136 // nothing at all.
4138 return true
4141 var starttime int64
4143 func schedtrace(detailed bool) {
4144 now := nanotime()
4145 if starttime == 0 {
4146 starttime = now
4149 lock(&sched.lock)
4150 print("SCHED ", (now-starttime)/1e6, "ms: gomaxprocs=", gomaxprocs, " idleprocs=", sched.npidle, " threads=", mcount(), " spinningthreads=", sched.nmspinning, " idlethreads=", sched.nmidle, " runqueue=", sched.runqsize)
4151 if detailed {
4152 print(" gcwaiting=", sched.gcwaiting, " nmidlelocked=", sched.nmidlelocked, " stopwait=", sched.stopwait, " sysmonwait=", sched.sysmonwait, "\n")
4154 // We must be careful while reading data from P's, M's and G's.
4155 // Even if we hold schedlock, most data can be changed concurrently.
4156 // E.g. (p->m ? p->m->id : -1) can crash if p->m changes from non-nil to nil.
4157 for i, _p_ := range allp {
4158 mp := _p_.m.ptr()
4159 h := atomic.Load(&_p_.runqhead)
4160 t := atomic.Load(&_p_.runqtail)
4161 if detailed {
4162 id := int64(-1)
4163 if mp != nil {
4164 id = mp.id
4166 print(" P", i, ": status=", _p_.status, " schedtick=", _p_.schedtick, " syscalltick=", _p_.syscalltick, " m=", id, " runqsize=", t-h, " gfreecnt=", _p_.gfreecnt, "\n")
4167 } else {
4168 // In non-detailed mode format lengths of per-P run queues as:
4169 // [len1 len2 len3 len4]
4170 print(" ")
4171 if i == 0 {
4172 print("[")
4174 print(t - h)
4175 if i == len(allp)-1 {
4176 print("]\n")
4181 if !detailed {
4182 unlock(&sched.lock)
4183 return
4186 for mp := allm; mp != nil; mp = mp.alllink {
4187 _p_ := mp.p.ptr()
4188 gp := mp.curg
4189 lockedg := mp.lockedg.ptr()
4190 id1 := int32(-1)
4191 if _p_ != nil {
4192 id1 = _p_.id
4194 id2 := int64(-1)
4195 if gp != nil {
4196 id2 = gp.goid
4198 id3 := int64(-1)
4199 if lockedg != nil {
4200 id3 = lockedg.goid
4202 print(" M", mp.id, ": p=", id1, " curg=", id2, " mallocing=", mp.mallocing, " throwing=", mp.throwing, " preemptoff=", mp.preemptoff, ""+" locks=", mp.locks, " dying=", mp.dying, " helpgc=", mp.helpgc, " spinning=", mp.spinning, " blocked=", mp.blocked, " lockedg=", id3, "\n")
4205 lock(&allglock)
4206 for gi := 0; gi < len(allgs); gi++ {
4207 gp := allgs[gi]
4208 mp := gp.m
4209 lockedm := gp.lockedm.ptr()
4210 id1 := int64(-1)
4211 if mp != nil {
4212 id1 = mp.id
4214 id2 := int64(-1)
4215 if lockedm != nil {
4216 id2 = lockedm.id
4218 print(" G", gp.goid, ": status=", readgstatus(gp), "(", gp.waitreason, ") m=", id1, " lockedm=", id2, "\n")
4220 unlock(&allglock)
4221 unlock(&sched.lock)
4224 // Put mp on midle list.
4225 // Sched must be locked.
4226 // May run during STW, so write barriers are not allowed.
4227 //go:nowritebarrierrec
4228 func mput(mp *m) {
4229 mp.schedlink = sched.midle
4230 sched.midle.set(mp)
4231 sched.nmidle++
4232 checkdead()
4235 // Try to get an m from midle list.
4236 // Sched must be locked.
4237 // May run during STW, so write barriers are not allowed.
4238 //go:nowritebarrierrec
4239 func mget() *m {
4240 mp := sched.midle.ptr()
4241 if mp != nil {
4242 sched.midle = mp.schedlink
4243 sched.nmidle--
4245 return mp
4248 // Put gp on the global runnable queue.
4249 // Sched must be locked.
4250 // May run during STW, so write barriers are not allowed.
4251 //go:nowritebarrierrec
4252 func globrunqput(gp *g) {
4253 gp.schedlink = 0
4254 if sched.runqtail != 0 {
4255 sched.runqtail.ptr().schedlink.set(gp)
4256 } else {
4257 sched.runqhead.set(gp)
4259 sched.runqtail.set(gp)
4260 sched.runqsize++
4263 // Put gp at the head of the global runnable queue.
4264 // Sched must be locked.
4265 // May run during STW, so write barriers are not allowed.
4266 //go:nowritebarrierrec
4267 func globrunqputhead(gp *g) {
4268 gp.schedlink = sched.runqhead
4269 sched.runqhead.set(gp)
4270 if sched.runqtail == 0 {
4271 sched.runqtail.set(gp)
4273 sched.runqsize++
4276 // Put a batch of runnable goroutines on the global runnable queue.
4277 // Sched must be locked.
4278 func globrunqputbatch(ghead *g, gtail *g, n int32) {
4279 gtail.schedlink = 0
4280 if sched.runqtail != 0 {
4281 sched.runqtail.ptr().schedlink.set(ghead)
4282 } else {
4283 sched.runqhead.set(ghead)
4285 sched.runqtail.set(gtail)
4286 sched.runqsize += n
4289 // Try get a batch of G's from the global runnable queue.
4290 // Sched must be locked.
4291 func globrunqget(_p_ *p, max int32) *g {
4292 if sched.runqsize == 0 {
4293 return nil
4296 n := sched.runqsize/gomaxprocs + 1
4297 if n > sched.runqsize {
4298 n = sched.runqsize
4300 if max > 0 && n > max {
4301 n = max
4303 if n > int32(len(_p_.runq))/2 {
4304 n = int32(len(_p_.runq)) / 2
4307 sched.runqsize -= n
4308 if sched.runqsize == 0 {
4309 sched.runqtail = 0
4312 gp := sched.runqhead.ptr()
4313 sched.runqhead = gp.schedlink
4315 for ; n > 0; n-- {
4316 gp1 := sched.runqhead.ptr()
4317 sched.runqhead = gp1.schedlink
4318 runqput(_p_, gp1, false)
4320 return gp
4323 // Put p to on _Pidle list.
4324 // Sched must be locked.
4325 // May run during STW, so write barriers are not allowed.
4326 //go:nowritebarrierrec
4327 func pidleput(_p_ *p) {
4328 if !runqempty(_p_) {
4329 throw("pidleput: P has non-empty run queue")
4331 _p_.link = sched.pidle
4332 sched.pidle.set(_p_)
4333 atomic.Xadd(&sched.npidle, 1) // TODO: fast atomic
4336 // Try get a p from _Pidle list.
4337 // Sched must be locked.
4338 // May run during STW, so write barriers are not allowed.
4339 //go:nowritebarrierrec
4340 func pidleget() *p {
4341 _p_ := sched.pidle.ptr()
4342 if _p_ != nil {
4343 sched.pidle = _p_.link
4344 atomic.Xadd(&sched.npidle, -1) // TODO: fast atomic
4346 return _p_
4349 // runqempty returns true if _p_ has no Gs on its local run queue.
4350 // It never returns true spuriously.
4351 func runqempty(_p_ *p) bool {
4352 // Defend against a race where 1) _p_ has G1 in runqnext but runqhead == runqtail,
4353 // 2) runqput on _p_ kicks G1 to the runq, 3) runqget on _p_ empties runqnext.
4354 // Simply observing that runqhead == runqtail and then observing that runqnext == nil
4355 // does not mean the queue is empty.
4356 for {
4357 head := atomic.Load(&_p_.runqhead)
4358 tail := atomic.Load(&_p_.runqtail)
4359 runnext := atomic.Loaduintptr((*uintptr)(unsafe.Pointer(&_p_.runnext)))
4360 if tail == atomic.Load(&_p_.runqtail) {
4361 return head == tail && runnext == 0
4366 // To shake out latent assumptions about scheduling order,
4367 // we introduce some randomness into scheduling decisions
4368 // when running with the race detector.
4369 // The need for this was made obvious by changing the
4370 // (deterministic) scheduling order in Go 1.5 and breaking
4371 // many poorly-written tests.
4372 // With the randomness here, as long as the tests pass
4373 // consistently with -race, they shouldn't have latent scheduling
4374 // assumptions.
4375 const randomizeScheduler = raceenabled
4377 // runqput tries to put g on the local runnable queue.
4378 // If next if false, runqput adds g to the tail of the runnable queue.
4379 // If next is true, runqput puts g in the _p_.runnext slot.
4380 // If the run queue is full, runnext puts g on the global queue.
4381 // Executed only by the owner P.
4382 func runqput(_p_ *p, gp *g, next bool) {
4383 if randomizeScheduler && next && fastrand()%2 == 0 {
4384 next = false
4387 if next {
4388 retryNext:
4389 oldnext := _p_.runnext
4390 if !_p_.runnext.cas(oldnext, guintptr(unsafe.Pointer(gp))) {
4391 goto retryNext
4393 if oldnext == 0 {
4394 return
4396 // Kick the old runnext out to the regular run queue.
4397 gp = oldnext.ptr()
4400 retry:
4401 h := atomic.Load(&_p_.runqhead) // load-acquire, synchronize with consumers
4402 t := _p_.runqtail
4403 if t-h < uint32(len(_p_.runq)) {
4404 _p_.runq[t%uint32(len(_p_.runq))].set(gp)
4405 atomic.Store(&_p_.runqtail, t+1) // store-release, makes the item available for consumption
4406 return
4408 if runqputslow(_p_, gp, h, t) {
4409 return
4411 // the queue is not full, now the put above must succeed
4412 goto retry
4415 // Put g and a batch of work from local runnable queue on global queue.
4416 // Executed only by the owner P.
4417 func runqputslow(_p_ *p, gp *g, h, t uint32) bool {
4418 var batch [len(_p_.runq)/2 + 1]*g
4420 // First, grab a batch from local queue.
4421 n := t - h
4422 n = n / 2
4423 if n != uint32(len(_p_.runq)/2) {
4424 throw("runqputslow: queue is not full")
4426 for i := uint32(0); i < n; i++ {
4427 batch[i] = _p_.runq[(h+i)%uint32(len(_p_.runq))].ptr()
4429 if !atomic.Cas(&_p_.runqhead, h, h+n) { // cas-release, commits consume
4430 return false
4432 batch[n] = gp
4434 if randomizeScheduler {
4435 for i := uint32(1); i <= n; i++ {
4436 j := fastrandn(i + 1)
4437 batch[i], batch[j] = batch[j], batch[i]
4441 // Link the goroutines.
4442 for i := uint32(0); i < n; i++ {
4443 batch[i].schedlink.set(batch[i+1])
4446 // Now put the batch on global queue.
4447 lock(&sched.lock)
4448 globrunqputbatch(batch[0], batch[n], int32(n+1))
4449 unlock(&sched.lock)
4450 return true
4453 // Get g from local runnable queue.
4454 // If inheritTime is true, gp should inherit the remaining time in the
4455 // current time slice. Otherwise, it should start a new time slice.
4456 // Executed only by the owner P.
4457 func runqget(_p_ *p) (gp *g, inheritTime bool) {
4458 // If there's a runnext, it's the next G to run.
4459 for {
4460 next := _p_.runnext
4461 if next == 0 {
4462 break
4464 if _p_.runnext.cas(next, 0) {
4465 return next.ptr(), true
4469 for {
4470 h := atomic.Load(&_p_.runqhead) // load-acquire, synchronize with other consumers
4471 t := _p_.runqtail
4472 if t == h {
4473 return nil, false
4475 gp := _p_.runq[h%uint32(len(_p_.runq))].ptr()
4476 if atomic.Cas(&_p_.runqhead, h, h+1) { // cas-release, commits consume
4477 return gp, false
4482 // Grabs a batch of goroutines from _p_'s runnable queue into batch.
4483 // Batch is a ring buffer starting at batchHead.
4484 // Returns number of grabbed goroutines.
4485 // Can be executed by any P.
4486 func runqgrab(_p_ *p, batch *[256]guintptr, batchHead uint32, stealRunNextG bool) uint32 {
4487 for {
4488 h := atomic.Load(&_p_.runqhead) // load-acquire, synchronize with other consumers
4489 t := atomic.Load(&_p_.runqtail) // load-acquire, synchronize with the producer
4490 n := t - h
4491 n = n - n/2
4492 if n == 0 {
4493 if stealRunNextG {
4494 // Try to steal from _p_.runnext.
4495 if next := _p_.runnext; next != 0 {
4496 if _p_.status == _Prunning {
4497 // Sleep to ensure that _p_ isn't about to run the g
4498 // we are about to steal.
4499 // The important use case here is when the g running
4500 // on _p_ ready()s another g and then almost
4501 // immediately blocks. Instead of stealing runnext
4502 // in this window, back off to give _p_ a chance to
4503 // schedule runnext. This will avoid thrashing gs
4504 // between different Ps.
4505 // A sync chan send/recv takes ~50ns as of time of
4506 // writing, so 3us gives ~50x overshoot.
4507 if GOOS != "windows" {
4508 usleep(3)
4509 } else {
4510 // On windows system timer granularity is
4511 // 1-15ms, which is way too much for this
4512 // optimization. So just yield.
4513 osyield()
4516 if !_p_.runnext.cas(next, 0) {
4517 continue
4519 batch[batchHead%uint32(len(batch))] = next
4520 return 1
4523 return 0
4525 if n > uint32(len(_p_.runq)/2) { // read inconsistent h and t
4526 continue
4528 for i := uint32(0); i < n; i++ {
4529 g := _p_.runq[(h+i)%uint32(len(_p_.runq))]
4530 batch[(batchHead+i)%uint32(len(batch))] = g
4532 if atomic.Cas(&_p_.runqhead, h, h+n) { // cas-release, commits consume
4533 return n
4538 // Steal half of elements from local runnable queue of p2
4539 // and put onto local runnable queue of p.
4540 // Returns one of the stolen elements (or nil if failed).
4541 func runqsteal(_p_, p2 *p, stealRunNextG bool) *g {
4542 t := _p_.runqtail
4543 n := runqgrab(p2, &_p_.runq, t, stealRunNextG)
4544 if n == 0 {
4545 return nil
4548 gp := _p_.runq[(t+n)%uint32(len(_p_.runq))].ptr()
4549 if n == 0 {
4550 return gp
4552 h := atomic.Load(&_p_.runqhead) // load-acquire, synchronize with consumers
4553 if t-h+n >= uint32(len(_p_.runq)) {
4554 throw("runqsteal: runq overflow")
4556 atomic.Store(&_p_.runqtail, t+n) // store-release, makes the item available for consumption
4557 return gp
4560 //go:linkname setMaxThreads runtime_debug.setMaxThreads
4561 func setMaxThreads(in int) (out int) {
4562 lock(&sched.lock)
4563 out = int(sched.maxmcount)
4564 if in > 0x7fffffff { // MaxInt32
4565 sched.maxmcount = 0x7fffffff
4566 } else {
4567 sched.maxmcount = int32(in)
4569 checkmcount()
4570 unlock(&sched.lock)
4571 return
4574 //go:nosplit
4575 func procPin() int {
4576 _g_ := getg()
4577 mp := _g_.m
4579 mp.locks++
4580 return int(mp.p.ptr().id)
4583 //go:nosplit
4584 func procUnpin() {
4585 _g_ := getg()
4586 _g_.m.locks--
4589 //go:linkname sync_runtime_procPin sync.runtime_procPin
4590 //go:nosplit
4591 func sync_runtime_procPin() int {
4592 return procPin()
4595 //go:linkname sync_runtime_procUnpin sync.runtime_procUnpin
4596 //go:nosplit
4597 func sync_runtime_procUnpin() {
4598 procUnpin()
4601 //go:linkname sync_atomic_runtime_procPin sync_atomic.runtime_procPin
4602 //go:nosplit
4603 func sync_atomic_runtime_procPin() int {
4604 return procPin()
4607 //go:linkname sync_atomic_runtime_procUnpin sync_atomic.runtime_procUnpin
4608 //go:nosplit
4609 func sync_atomic_runtime_procUnpin() {
4610 procUnpin()
4613 // Active spinning for sync.Mutex.
4614 //go:linkname sync_runtime_canSpin sync.runtime_canSpin
4615 //go:nosplit
4616 func sync_runtime_canSpin(i int) bool {
4617 // sync.Mutex is cooperative, so we are conservative with spinning.
4618 // Spin only few times and only if running on a multicore machine and
4619 // GOMAXPROCS>1 and there is at least one other running P and local runq is empty.
4620 // As opposed to runtime mutex we don't do passive spinning here,
4621 // because there can be work on global runq on on other Ps.
4622 if i >= active_spin || ncpu <= 1 || gomaxprocs <= int32(sched.npidle+sched.nmspinning)+1 {
4623 return false
4625 if p := getg().m.p.ptr(); !runqempty(p) {
4626 return false
4628 return true
4631 //go:linkname sync_runtime_doSpin sync.runtime_doSpin
4632 //go:nosplit
4633 func sync_runtime_doSpin() {
4634 procyield(active_spin_cnt)
4637 var stealOrder randomOrder
4639 // randomOrder/randomEnum are helper types for randomized work stealing.
4640 // They allow to enumerate all Ps in different pseudo-random orders without repetitions.
4641 // The algorithm is based on the fact that if we have X such that X and GOMAXPROCS
4642 // are coprime, then a sequences of (i + X) % GOMAXPROCS gives the required enumeration.
4643 type randomOrder struct {
4644 count uint32
4645 coprimes []uint32
4648 type randomEnum struct {
4649 i uint32
4650 count uint32
4651 pos uint32
4652 inc uint32
4655 func (ord *randomOrder) reset(count uint32) {
4656 ord.count = count
4657 ord.coprimes = ord.coprimes[:0]
4658 for i := uint32(1); i <= count; i++ {
4659 if gcd(i, count) == 1 {
4660 ord.coprimes = append(ord.coprimes, i)
4665 func (ord *randomOrder) start(i uint32) randomEnum {
4666 return randomEnum{
4667 count: ord.count,
4668 pos: i % ord.count,
4669 inc: ord.coprimes[i%uint32(len(ord.coprimes))],
4673 func (enum *randomEnum) done() bool {
4674 return enum.i == enum.count
4677 func (enum *randomEnum) next() {
4678 enum.i++
4679 enum.pos = (enum.pos + enum.inc) % enum.count
4682 func (enum *randomEnum) position() uint32 {
4683 return enum.pos
4686 func gcd(a, b uint32) uint32 {
4687 for b != 0 {
4688 a, b = b, a%b
4690 return a