compiler: improve name mangling for packpaths
[official-gcc.git] / libgo / go / runtime / proc.go
blobbb16924e01c6d158456a474541d083505c50ab08
1 // Copyright 2014 The Go Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style
3 // license that can be found in the LICENSE file.
5 package runtime
7 import (
8 "internal/cpu"
9 "runtime/internal/atomic"
10 "runtime/internal/sys"
11 "unsafe"
14 // Functions called by C code.
15 //go:linkname main runtime.main
16 //go:linkname goparkunlock runtime.goparkunlock
17 //go:linkname newextram runtime.newextram
18 //go:linkname acquirep runtime.acquirep
19 //go:linkname releasep runtime.releasep
20 //go:linkname incidlelocked runtime.incidlelocked
21 //go:linkname schedinit runtime.schedinit
22 //go:linkname ready runtime.ready
23 //go:linkname gcprocs runtime.gcprocs
24 //go:linkname stopm runtime.stopm
25 //go:linkname handoffp runtime.handoffp
26 //go:linkname wakep runtime.wakep
27 //go:linkname stoplockedm runtime.stoplockedm
28 //go:linkname schedule runtime.schedule
29 //go:linkname execute runtime.execute
30 //go:linkname goexit1 runtime.goexit1
31 //go:linkname reentersyscall runtime.reentersyscall
32 //go:linkname reentersyscallblock runtime.reentersyscallblock
33 //go:linkname exitsyscall runtime.exitsyscall
34 //go:linkname gfget runtime.gfget
35 //go:linkname helpgc runtime.helpgc
36 //go:linkname kickoff runtime.kickoff
37 //go:linkname mstart1 runtime.mstart1
38 //go:linkname mexit runtime.mexit
39 //go:linkname globrunqput runtime.globrunqput
40 //go:linkname pidleget runtime.pidleget
42 // Exported for test (see runtime/testdata/testprogcgo/dropm_stub.go).
43 //go:linkname getm runtime.getm
45 // Function called by misc/cgo/test.
46 //go:linkname lockedOSThread runtime.lockedOSThread
48 // C functions for thread and context management.
49 func newosproc(*m)
51 //go:noescape
52 func malg(bool, bool, *unsafe.Pointer, *uintptr) *g
54 //go:noescape
55 func resetNewG(*g, *unsafe.Pointer, *uintptr)
56 func gogo(*g)
57 func setGContext()
58 func makeGContext(*g, unsafe.Pointer, uintptr)
59 func getTraceback(me, gp *g)
60 func gtraceback(*g)
61 func _cgo_notify_runtime_init_done()
62 func alreadyInCallers() bool
63 func stackfree(*g)
65 // Functions created by the compiler.
66 //extern __go_init_main
67 func main_init()
69 //extern main.main
70 func main_main()
72 var buildVersion = sys.TheVersion
74 // Goroutine scheduler
75 // The scheduler's job is to distribute ready-to-run goroutines over worker threads.
77 // The main concepts are:
78 // G - goroutine.
79 // M - worker thread, or machine.
80 // P - processor, a resource that is required to execute Go code.
81 // M must have an associated P to execute Go code, however it can be
82 // blocked or in a syscall w/o an associated P.
84 // Design doc at https://golang.org/s/go11sched.
86 // Worker thread parking/unparking.
87 // We need to balance between keeping enough running worker threads to utilize
88 // available hardware parallelism and parking excessive running worker threads
89 // to conserve CPU resources and power. This is not simple for two reasons:
90 // (1) scheduler state is intentionally distributed (in particular, per-P work
91 // queues), so it is not possible to compute global predicates on fast paths;
92 // (2) for optimal thread management we would need to know the future (don't park
93 // a worker thread when a new goroutine will be readied in near future).
95 // Three rejected approaches that would work badly:
96 // 1. Centralize all scheduler state (would inhibit scalability).
97 // 2. Direct goroutine handoff. That is, when we ready a new goroutine and there
98 // is a spare P, unpark a thread and handoff it the thread and the goroutine.
99 // This would lead to thread state thrashing, as the thread that readied the
100 // goroutine can be out of work the very next moment, we will need to park it.
101 // Also, it would destroy locality of computation as we want to preserve
102 // dependent goroutines on the same thread; and introduce additional latency.
103 // 3. Unpark an additional thread whenever we ready a goroutine and there is an
104 // idle P, but don't do handoff. This would lead to excessive thread parking/
105 // unparking as the additional threads will instantly park without discovering
106 // any work to do.
108 // The current approach:
109 // We unpark an additional thread when we ready a goroutine if (1) there is an
110 // idle P and there are no "spinning" worker threads. A worker thread is considered
111 // spinning if it is out of local work and did not find work in global run queue/
112 // netpoller; the spinning state is denoted in m.spinning and in sched.nmspinning.
113 // Threads unparked this way are also considered spinning; we don't do goroutine
114 // handoff so such threads are out of work initially. Spinning threads do some
115 // spinning looking for work in per-P run queues before parking. If a spinning
116 // thread finds work it takes itself out of the spinning state and proceeds to
117 // execution. If it does not find work it takes itself out of the spinning state
118 // and then parks.
119 // If there is at least one spinning thread (sched.nmspinning>1), we don't unpark
120 // new threads when readying goroutines. To compensate for that, if the last spinning
121 // thread finds work and stops spinning, it must unpark a new spinning thread.
122 // This approach smooths out unjustified spikes of thread unparking,
123 // but at the same time guarantees eventual maximal CPU parallelism utilization.
125 // The main implementation complication is that we need to be very careful during
126 // spinning->non-spinning thread transition. This transition can race with submission
127 // of a new goroutine, and either one part or another needs to unpark another worker
128 // thread. If they both fail to do that, we can end up with semi-persistent CPU
129 // underutilization. The general pattern for goroutine readying is: submit a goroutine
130 // to local work queue, #StoreLoad-style memory barrier, check sched.nmspinning.
131 // The general pattern for spinning->non-spinning transition is: decrement nmspinning,
132 // #StoreLoad-style memory barrier, check all per-P work queues for new work.
133 // Note that all this complexity does not apply to global run queue as we are not
134 // sloppy about thread unparking when submitting to global queue. Also see comments
135 // for nmspinning manipulation.
137 var (
138 m0 m
139 g0 g
142 // main_init_done is a signal used by cgocallbackg that initialization
143 // has been completed. It is made before _cgo_notify_runtime_init_done,
144 // so all cgo calls can rely on it existing. When main_init is complete,
145 // it is closed, meaning cgocallbackg can reliably receive from it.
146 var main_init_done chan bool
148 // mainStarted indicates that the main M has started.
149 var mainStarted bool
151 // runtimeInitTime is the nanotime() at which the runtime started.
152 var runtimeInitTime int64
154 // Value to use for signal mask for newly created M's.
155 var initSigmask sigset
157 // The main goroutine.
158 func main() {
159 g := getg()
161 // Max stack size is 1 GB on 64-bit, 250 MB on 32-bit.
162 // Using decimal instead of binary GB and MB because
163 // they look nicer in the stack overflow failure message.
164 if sys.PtrSize == 8 {
165 maxstacksize = 1000000000
166 } else {
167 maxstacksize = 250000000
170 // Allow newproc to start new Ms.
171 mainStarted = true
173 if GOARCH != "wasm" { // no threads on wasm yet, so no sysmon
174 systemstack(func() {
175 newm(sysmon, nil)
179 // Lock the main goroutine onto this, the main OS thread,
180 // during initialization. Most programs won't care, but a few
181 // do require certain calls to be made by the main thread.
182 // Those can arrange for main.main to run in the main thread
183 // by calling runtime.LockOSThread during initialization
184 // to preserve the lock.
185 lockOSThread()
187 if g.m != &m0 {
188 throw("runtime.main not on m0")
191 // Defer unlock so that runtime.Goexit during init does the unlock too.
192 needUnlock := true
193 defer func() {
194 if needUnlock {
195 unlockOSThread()
199 // Record when the world started. Must be after runtime_init
200 // because nanotime on some platforms depends on startNano.
201 runtimeInitTime = nanotime()
203 main_init_done = make(chan bool)
204 if iscgo {
205 // Start the template thread in case we enter Go from
206 // a C-created thread and need to create a new thread.
207 startTemplateThread()
208 _cgo_notify_runtime_init_done()
211 fn := main_init // make an indirect call, as the linker doesn't know the address of the main package when laying down the runtime
212 fn()
213 createGcRootsIndex()
214 close(main_init_done)
216 needUnlock = false
217 unlockOSThread()
219 // For gccgo we have to wait until after main is initialized
220 // to enable GC, because initializing main registers the GC roots.
221 gcenable()
223 if isarchive || islibrary {
224 // A program compiled with -buildmode=c-archive or c-shared
225 // has a main, but it is not executed.
226 return
228 fn = main_main // make an indirect call, as the linker doesn't know the address of the main package when laying down the runtime
229 fn()
230 if raceenabled {
231 racefini()
234 // Make racy client program work: if panicking on
235 // another goroutine at the same time as main returns,
236 // let the other goroutine finish printing the panic trace.
237 // Once it does, it will exit. See issues 3934 and 20018.
238 if atomic.Load(&runningPanicDefers) != 0 {
239 // Running deferred functions should not take long.
240 for c := 0; c < 1000; c++ {
241 if atomic.Load(&runningPanicDefers) == 0 {
242 break
244 Gosched()
247 if atomic.Load(&panicking) != 0 {
248 gopark(nil, nil, waitReasonPanicWait, traceEvGoStop, 1)
251 exit(0)
252 for {
253 var x *int32
254 *x = 0
258 // os_beforeExit is called from os.Exit(0).
259 //go:linkname os_beforeExit os.runtime_beforeExit
260 func os_beforeExit() {
261 if raceenabled {
262 racefini()
266 // start forcegc helper goroutine
267 func init() {
268 expectSystemGoroutine()
269 go forcegchelper()
272 func forcegchelper() {
273 setSystemGoroutine()
275 forcegc.g = getg()
276 for {
277 lock(&forcegc.lock)
278 if forcegc.idle != 0 {
279 throw("forcegc: phase error")
281 atomic.Store(&forcegc.idle, 1)
282 goparkunlock(&forcegc.lock, waitReasonForceGGIdle, traceEvGoBlock, 1)
283 // this goroutine is explicitly resumed by sysmon
284 if debug.gctrace > 0 {
285 println("GC forced")
287 // Time-triggered, fully concurrent.
288 gcStart(gcBackgroundMode, gcTrigger{kind: gcTriggerTime, now: nanotime()})
292 //go:nosplit
294 // Gosched yields the processor, allowing other goroutines to run. It does not
295 // suspend the current goroutine, so execution resumes automatically.
296 func Gosched() {
297 checkTimeouts()
298 mcall(gosched_m)
301 // goschedguarded yields the processor like gosched, but also checks
302 // for forbidden states and opts out of the yield in those cases.
303 //go:nosplit
304 func goschedguarded() {
305 mcall(goschedguarded_m)
308 // Puts the current goroutine into a waiting state and calls unlockf.
309 // If unlockf returns false, the goroutine is resumed.
310 // unlockf must not access this G's stack, as it may be moved between
311 // the call to gopark and the call to unlockf.
312 // Reason explains why the goroutine has been parked.
313 // It is displayed in stack traces and heap dumps.
314 // Reasons should be unique and descriptive.
315 // Do not re-use reasons, add new ones.
316 func gopark(unlockf func(*g, unsafe.Pointer) bool, lock unsafe.Pointer, reason waitReason, traceEv byte, traceskip int) {
317 if reason != waitReasonSleep {
318 checkTimeouts() // timeouts may expire while two goroutines keep the scheduler busy
320 mp := acquirem()
321 gp := mp.curg
322 status := readgstatus(gp)
323 if status != _Grunning && status != _Gscanrunning {
324 throw("gopark: bad g status")
326 mp.waitlock = lock
327 mp.waitunlockf = *(*unsafe.Pointer)(unsafe.Pointer(&unlockf))
328 gp.waitreason = reason
329 mp.waittraceev = traceEv
330 mp.waittraceskip = traceskip
331 releasem(mp)
332 // can't do anything that might move the G between Ms here.
333 mcall(park_m)
336 // Puts the current goroutine into a waiting state and unlocks the lock.
337 // The goroutine can be made runnable again by calling goready(gp).
338 func goparkunlock(lock *mutex, reason waitReason, traceEv byte, traceskip int) {
339 gopark(parkunlock_c, unsafe.Pointer(lock), reason, traceEv, traceskip)
342 func goready(gp *g, traceskip int) {
343 systemstack(func() {
344 ready(gp, traceskip, true)
348 //go:nosplit
349 func acquireSudog() *sudog {
350 // Delicate dance: the semaphore implementation calls
351 // acquireSudog, acquireSudog calls new(sudog),
352 // new calls malloc, malloc can call the garbage collector,
353 // and the garbage collector calls the semaphore implementation
354 // in stopTheWorld.
355 // Break the cycle by doing acquirem/releasem around new(sudog).
356 // The acquirem/releasem increments m.locks during new(sudog),
357 // which keeps the garbage collector from being invoked.
358 mp := acquirem()
359 pp := mp.p.ptr()
360 if len(pp.sudogcache) == 0 {
361 lock(&sched.sudoglock)
362 // First, try to grab a batch from central cache.
363 for len(pp.sudogcache) < cap(pp.sudogcache)/2 && sched.sudogcache != nil {
364 s := sched.sudogcache
365 sched.sudogcache = s.next
366 s.next = nil
367 pp.sudogcache = append(pp.sudogcache, s)
369 unlock(&sched.sudoglock)
370 // If the central cache is empty, allocate a new one.
371 if len(pp.sudogcache) == 0 {
372 pp.sudogcache = append(pp.sudogcache, new(sudog))
375 n := len(pp.sudogcache)
376 s := pp.sudogcache[n-1]
377 pp.sudogcache[n-1] = nil
378 pp.sudogcache = pp.sudogcache[:n-1]
379 if s.elem != nil {
380 throw("acquireSudog: found s.elem != nil in cache")
382 releasem(mp)
383 return s
386 //go:nosplit
387 func releaseSudog(s *sudog) {
388 if s.elem != nil {
389 throw("runtime: sudog with non-nil elem")
391 if s.isSelect {
392 throw("runtime: sudog with non-false isSelect")
394 if s.next != nil {
395 throw("runtime: sudog with non-nil next")
397 if s.prev != nil {
398 throw("runtime: sudog with non-nil prev")
400 if s.waitlink != nil {
401 throw("runtime: sudog with non-nil waitlink")
403 if s.c != nil {
404 throw("runtime: sudog with non-nil c")
406 gp := getg()
407 if gp.param != nil {
408 throw("runtime: releaseSudog with non-nil gp.param")
410 mp := acquirem() // avoid rescheduling to another P
411 pp := mp.p.ptr()
412 if len(pp.sudogcache) == cap(pp.sudogcache) {
413 // Transfer half of local cache to the central cache.
414 var first, last *sudog
415 for len(pp.sudogcache) > cap(pp.sudogcache)/2 {
416 n := len(pp.sudogcache)
417 p := pp.sudogcache[n-1]
418 pp.sudogcache[n-1] = nil
419 pp.sudogcache = pp.sudogcache[:n-1]
420 if first == nil {
421 first = p
422 } else {
423 last.next = p
425 last = p
427 lock(&sched.sudoglock)
428 last.next = sched.sudogcache
429 sched.sudogcache = first
430 unlock(&sched.sudoglock)
432 pp.sudogcache = append(pp.sudogcache, s)
433 releasem(mp)
436 // funcPC returns the entry PC of the function f.
437 // It assumes that f is a func value. Otherwise the behavior is undefined.
438 // CAREFUL: In programs with plugins, funcPC can return different values
439 // for the same function (because there are actually multiple copies of
440 // the same function in the address space). To be safe, don't use the
441 // results of this function in any == expression. It is only safe to
442 // use the result as an address at which to start executing code.
444 // For gccgo note that this differs from the gc implementation; the gc
445 // implementation adds sys.PtrSize to the address of the interface
446 // value, but GCC's alias analysis decides that that can not be a
447 // reference to the second field of the interface, and in some cases
448 // it drops the initialization of the second field as a dead store.
449 //go:nosplit
450 func funcPC(f interface{}) uintptr {
451 i := (*iface)(unsafe.Pointer(&f))
452 return **(**uintptr)(i.data)
455 func lockedOSThread() bool {
456 gp := getg()
457 return gp.lockedm != 0 && gp.m.lockedg != 0
460 var (
461 allgs []*g
462 allglock mutex
465 func allgadd(gp *g) {
466 if readgstatus(gp) == _Gidle {
467 throw("allgadd: bad status Gidle")
470 lock(&allglock)
471 allgs = append(allgs, gp)
472 allglen = uintptr(len(allgs))
473 unlock(&allglock)
476 const (
477 // Number of goroutine ids to grab from sched.goidgen to local per-P cache at once.
478 // 16 seems to provide enough amortization, but other than that it's mostly arbitrary number.
479 _GoidCacheBatch = 16
482 // cpuinit extracts the environment variable GODEBUGCPU from the environment on
483 // Linux and Darwin if the GOEXPERIMENT debugcpu was set and calls internal/cpu.Initialize.
484 func cpuinit() {
485 const prefix = "GODEBUGCPU="
486 var env string
488 if haveexperiment("debugcpu") && (GOOS == "linux" || GOOS == "darwin") {
489 cpu.DebugOptions = true
491 // Similar to goenv_unix but extracts the environment value for
492 // GODEBUGCPU directly.
493 // TODO(moehrmann): remove when general goenvs() can be called before cpuinit()
494 n := int32(0)
495 for argv_index(argv, argc+1+n) != nil {
499 for i := int32(0); i < n; i++ {
500 p := argv_index(argv, argc+1+i)
501 s := *(*string)(unsafe.Pointer(&stringStruct{unsafe.Pointer(p), findnull(p)}))
503 if hasprefix(s, prefix) {
504 env = gostring(p)[len(prefix):]
505 break
510 cpu.Initialize(env)
513 // The bootstrap sequence is:
515 // call osinit
516 // call schedinit
517 // make & queue new G
518 // call runtime·mstart
520 // The new G calls runtime·main.
521 func schedinit() {
522 _m_ := &m0
523 _g_ := &g0
524 _m_.g0 = _g_
525 _m_.curg = _g_
526 _g_.m = _m_
527 setg(_g_)
529 sched.maxmcount = 10000
531 mallocinit()
532 mcommoninit(_g_.m)
533 cpuinit() // must run before alginit
534 alginit() // maps must not be used before this call
536 msigsave(_g_.m)
537 initSigmask = _g_.m.sigmask
539 goargs()
540 goenvs()
541 parsedebugvars()
542 gcinit()
544 sched.lastpoll = uint64(nanotime())
545 procs := ncpu
546 if n, ok := atoi32(gogetenv("GOMAXPROCS")); ok && n > 0 {
547 procs = n
549 if procresize(procs) != nil {
550 throw("unknown runnable goroutine during bootstrap")
553 // For cgocheck > 1, we turn on the write barrier at all times
554 // and check all pointer writes. We can't do this until after
555 // procresize because the write barrier needs a P.
556 if debug.cgocheck > 1 {
557 writeBarrier.cgo = true
558 writeBarrier.enabled = true
559 for _, p := range allp {
560 p.wbBuf.reset()
564 if buildVersion == "" {
565 // Condition should never trigger. This code just serves
566 // to ensure runtime·buildVersion is kept in the resulting binary.
567 buildVersion = "unknown"
571 func dumpgstatus(gp *g) {
572 _g_ := getg()
573 print("runtime: gp: gp=", gp, ", goid=", gp.goid, ", gp->atomicstatus=", readgstatus(gp), "\n")
574 print("runtime: g: g=", _g_, ", goid=", _g_.goid, ", g->atomicstatus=", readgstatus(_g_), "\n")
577 func checkmcount() {
578 // sched lock is held
579 if mcount() > sched.maxmcount {
580 print("runtime: program exceeds ", sched.maxmcount, "-thread limit\n")
581 throw("thread exhaustion")
585 func mcommoninit(mp *m) {
586 _g_ := getg()
588 // g0 stack won't make sense for user (and is not necessary unwindable).
589 if _g_ != _g_.m.g0 {
590 callers(1, mp.createstack[:])
593 lock(&sched.lock)
594 if sched.mnext+1 < sched.mnext {
595 throw("runtime: thread ID overflow")
597 mp.id = sched.mnext
598 sched.mnext++
599 checkmcount()
601 mp.fastrand[0] = 1597334677 * uint32(mp.id)
602 mp.fastrand[1] = uint32(cputicks())
603 if mp.fastrand[0]|mp.fastrand[1] == 0 {
604 mp.fastrand[1] = 1
607 mpreinit(mp)
609 // Add to allm so garbage collector doesn't free g->m
610 // when it is just in a register or thread-local storage.
611 mp.alllink = allm
613 // NumCgoCall() iterates over allm w/o schedlock,
614 // so we need to publish it safely.
615 atomicstorep(unsafe.Pointer(&allm), unsafe.Pointer(mp))
616 unlock(&sched.lock)
619 // Mark gp ready to run.
620 func ready(gp *g, traceskip int, next bool) {
621 if trace.enabled {
622 traceGoUnpark(gp, traceskip)
625 status := readgstatus(gp)
627 // Mark runnable.
628 _g_ := getg()
629 _g_.m.locks++ // disable preemption because it can be holding p in a local var
630 if status&^_Gscan != _Gwaiting {
631 dumpgstatus(gp)
632 throw("bad g->status in ready")
635 // status is Gwaiting or Gscanwaiting, make Grunnable and put on runq
636 casgstatus(gp, _Gwaiting, _Grunnable)
637 runqput(_g_.m.p.ptr(), gp, next)
638 if atomic.Load(&sched.npidle) != 0 && atomic.Load(&sched.nmspinning) == 0 {
639 wakep()
641 _g_.m.locks--
644 func gcprocs() int32 {
645 // Figure out how many CPUs to use during GC.
646 // Limited by gomaxprocs, number of actual CPUs, and MaxGcproc.
647 lock(&sched.lock)
648 n := gomaxprocs
649 if n > ncpu {
650 n = ncpu
652 if n > _MaxGcproc {
653 n = _MaxGcproc
655 if n > sched.nmidle+1 { // one M is currently running
656 n = sched.nmidle + 1
658 unlock(&sched.lock)
659 return n
662 func needaddgcproc() bool {
663 lock(&sched.lock)
664 n := gomaxprocs
665 if n > ncpu {
666 n = ncpu
668 if n > _MaxGcproc {
669 n = _MaxGcproc
671 n -= sched.nmidle + 1 // one M is currently running
672 unlock(&sched.lock)
673 return n > 0
676 func helpgc(nproc int32) {
677 _g_ := getg()
678 lock(&sched.lock)
679 pos := 0
680 for n := int32(1); n < nproc; n++ { // one M is currently running
681 if allp[pos].mcache == _g_.m.mcache {
682 pos++
684 mp := mget()
685 if mp == nil {
686 throw("gcprocs inconsistency")
688 mp.helpgc = n
689 mp.p.set(allp[pos])
690 mp.mcache = allp[pos].mcache
691 pos++
692 notewakeup(&mp.park)
694 unlock(&sched.lock)
697 // freezeStopWait is a large value that freezetheworld sets
698 // sched.stopwait to in order to request that all Gs permanently stop.
699 const freezeStopWait = 0x7fffffff
701 // freezing is set to non-zero if the runtime is trying to freeze the
702 // world.
703 var freezing uint32
705 // Similar to stopTheWorld but best-effort and can be called several times.
706 // There is no reverse operation, used during crashing.
707 // This function must not lock any mutexes.
708 func freezetheworld() {
709 atomic.Store(&freezing, 1)
710 // stopwait and preemption requests can be lost
711 // due to races with concurrently executing threads,
712 // so try several times
713 for i := 0; i < 5; i++ {
714 // this should tell the scheduler to not start any new goroutines
715 sched.stopwait = freezeStopWait
716 atomic.Store(&sched.gcwaiting, 1)
717 // this should stop running goroutines
718 if !preemptall() {
719 break // no running goroutines
721 usleep(1000)
723 // to be sure
724 usleep(1000)
725 preemptall()
726 usleep(1000)
729 func isscanstatus(status uint32) bool {
730 if status == _Gscan {
731 throw("isscanstatus: Bad status Gscan")
733 return status&_Gscan == _Gscan
736 // All reads and writes of g's status go through readgstatus, casgstatus
737 // castogscanstatus, casfrom_Gscanstatus.
738 //go:nosplit
739 func readgstatus(gp *g) uint32 {
740 return atomic.Load(&gp.atomicstatus)
743 // Ownership of gcscanvalid:
745 // If gp is running (meaning status == _Grunning or _Grunning|_Gscan),
746 // then gp owns gp.gcscanvalid, and other goroutines must not modify it.
748 // Otherwise, a second goroutine can lock the scan state by setting _Gscan
749 // in the status bit and then modify gcscanvalid, and then unlock the scan state.
751 // Note that the first condition implies an exception to the second:
752 // if a second goroutine changes gp's status to _Grunning|_Gscan,
753 // that second goroutine still does not have the right to modify gcscanvalid.
755 // The Gscanstatuses are acting like locks and this releases them.
756 // If it proves to be a performance hit we should be able to make these
757 // simple atomic stores but for now we are going to throw if
758 // we see an inconsistent state.
759 func casfrom_Gscanstatus(gp *g, oldval, newval uint32) {
760 success := false
762 // Check that transition is valid.
763 switch oldval {
764 default:
765 print("runtime: casfrom_Gscanstatus bad oldval gp=", gp, ", oldval=", hex(oldval), ", newval=", hex(newval), "\n")
766 dumpgstatus(gp)
767 throw("casfrom_Gscanstatus:top gp->status is not in scan state")
768 case _Gscanrunnable,
769 _Gscanwaiting,
770 _Gscanrunning,
771 _Gscansyscall:
772 if newval == oldval&^_Gscan {
773 success = atomic.Cas(&gp.atomicstatus, oldval, newval)
776 if !success {
777 print("runtime: casfrom_Gscanstatus failed gp=", gp, ", oldval=", hex(oldval), ", newval=", hex(newval), "\n")
778 dumpgstatus(gp)
779 throw("casfrom_Gscanstatus: gp->status is not in scan state")
783 // This will return false if the gp is not in the expected status and the cas fails.
784 // This acts like a lock acquire while the casfromgstatus acts like a lock release.
785 func castogscanstatus(gp *g, oldval, newval uint32) bool {
786 switch oldval {
787 case _Grunnable,
788 _Grunning,
789 _Gwaiting,
790 _Gsyscall:
791 if newval == oldval|_Gscan {
792 return atomic.Cas(&gp.atomicstatus, oldval, newval)
795 print("runtime: castogscanstatus oldval=", hex(oldval), " newval=", hex(newval), "\n")
796 throw("castogscanstatus")
797 panic("not reached")
800 // If asked to move to or from a Gscanstatus this will throw. Use the castogscanstatus
801 // and casfrom_Gscanstatus instead.
802 // casgstatus will loop if the g->atomicstatus is in a Gscan status until the routine that
803 // put it in the Gscan state is finished.
804 //go:nosplit
805 func casgstatus(gp *g, oldval, newval uint32) {
806 if (oldval&_Gscan != 0) || (newval&_Gscan != 0) || oldval == newval {
807 systemstack(func() {
808 print("runtime: casgstatus: oldval=", hex(oldval), " newval=", hex(newval), "\n")
809 throw("casgstatus: bad incoming values")
813 if oldval == _Grunning && gp.gcscanvalid {
814 // If oldvall == _Grunning, then the actual status must be
815 // _Grunning or _Grunning|_Gscan; either way,
816 // we own gp.gcscanvalid, so it's safe to read.
817 // gp.gcscanvalid must not be true when we are running.
818 systemstack(func() {
819 print("runtime: casgstatus ", hex(oldval), "->", hex(newval), " gp.status=", hex(gp.atomicstatus), " gp.gcscanvalid=true\n")
820 throw("casgstatus")
824 // See https://golang.org/cl/21503 for justification of the yield delay.
825 const yieldDelay = 5 * 1000
826 var nextYield int64
828 // loop if gp->atomicstatus is in a scan state giving
829 // GC time to finish and change the state to oldval.
830 for i := 0; !atomic.Cas(&gp.atomicstatus, oldval, newval); i++ {
831 if oldval == _Gwaiting && gp.atomicstatus == _Grunnable {
832 throw("casgstatus: waiting for Gwaiting but is Grunnable")
834 // Help GC if needed.
835 // if gp.preemptscan && !gp.gcworkdone && (oldval == _Grunning || oldval == _Gsyscall) {
836 // gp.preemptscan = false
837 // systemstack(func() {
838 // gcphasework(gp)
839 // })
840 // }
841 // But meanwhile just yield.
842 if i == 0 {
843 nextYield = nanotime() + yieldDelay
845 if nanotime() < nextYield {
846 for x := 0; x < 10 && gp.atomicstatus != oldval; x++ {
847 procyield(1)
849 } else {
850 osyield()
851 nextYield = nanotime() + yieldDelay/2
854 if newval == _Grunning {
855 gp.gcscanvalid = false
859 // scang blocks until gp's stack has been scanned.
860 // It might be scanned by scang or it might be scanned by the goroutine itself.
861 // Either way, the stack scan has completed when scang returns.
862 func scang(gp *g, gcw *gcWork) {
863 // Invariant; we (the caller, markroot for a specific goroutine) own gp.gcscandone.
864 // Nothing is racing with us now, but gcscandone might be set to true left over
865 // from an earlier round of stack scanning (we scan twice per GC).
866 // We use gcscandone to record whether the scan has been done during this round.
868 gp.gcscandone = false
870 // See https://golang.org/cl/21503 for justification of the yield delay.
871 const yieldDelay = 10 * 1000
872 var nextYield int64
874 // Endeavor to get gcscandone set to true,
875 // either by doing the stack scan ourselves or by coercing gp to scan itself.
876 // gp.gcscandone can transition from false to true when we're not looking
877 // (if we asked for preemption), so any time we lock the status using
878 // castogscanstatus we have to double-check that the scan is still not done.
879 loop:
880 for i := 0; !gp.gcscandone; i++ {
881 switch s := readgstatus(gp); s {
882 default:
883 dumpgstatus(gp)
884 throw("stopg: invalid status")
886 case _Gdead:
887 // No stack.
888 gp.gcscandone = true
889 break loop
891 case _Gcopystack:
892 // Stack being switched. Go around again.
894 case _Grunnable, _Gsyscall, _Gwaiting:
895 // Claim goroutine by setting scan bit.
896 // Racing with execution or readying of gp.
897 // The scan bit keeps them from running
898 // the goroutine until we're done.
899 if castogscanstatus(gp, s, s|_Gscan) {
900 if gp.scanningself {
901 // Don't try to scan the stack
902 // if the goroutine is going to do
903 // it itself.
904 restartg(gp)
905 break
907 if !gp.gcscandone {
908 scanstack(gp, gcw)
909 gp.gcscandone = true
911 restartg(gp)
912 break loop
915 case _Gscanwaiting:
916 // newstack is doing a scan for us right now. Wait.
918 case _Gscanrunning:
919 // checkPreempt is scanning. Wait.
921 case _Grunning:
922 // Goroutine running. Try to preempt execution so it can scan itself.
923 // The preemption handler (in newstack) does the actual scan.
925 // Optimization: if there is already a pending preemption request
926 // (from the previous loop iteration), don't bother with the atomics.
927 if gp.preemptscan && gp.preempt {
928 break
931 // Ask for preemption and self scan.
932 if castogscanstatus(gp, _Grunning, _Gscanrunning) {
933 if !gp.gcscandone {
934 gp.preemptscan = true
935 gp.preempt = true
937 casfrom_Gscanstatus(gp, _Gscanrunning, _Grunning)
941 if i == 0 {
942 nextYield = nanotime() + yieldDelay
944 if nanotime() < nextYield {
945 procyield(10)
946 } else {
947 osyield()
948 nextYield = nanotime() + yieldDelay/2
952 gp.preemptscan = false // cancel scan request if no longer needed
955 // The GC requests that this routine be moved from a scanmumble state to a mumble state.
956 func restartg(gp *g) {
957 s := readgstatus(gp)
958 switch s {
959 default:
960 dumpgstatus(gp)
961 throw("restartg: unexpected status")
963 case _Gdead:
964 // ok
966 case _Gscanrunnable,
967 _Gscanwaiting,
968 _Gscansyscall:
969 casfrom_Gscanstatus(gp, s, s&^_Gscan)
973 // stopTheWorld stops all P's from executing goroutines, interrupting
974 // all goroutines at GC safe points and records reason as the reason
975 // for the stop. On return, only the current goroutine's P is running.
976 // stopTheWorld must not be called from a system stack and the caller
977 // must not hold worldsema. The caller must call startTheWorld when
978 // other P's should resume execution.
980 // stopTheWorld is safe for multiple goroutines to call at the
981 // same time. Each will execute its own stop, and the stops will
982 // be serialized.
984 // This is also used by routines that do stack dumps. If the system is
985 // in panic or being exited, this may not reliably stop all
986 // goroutines.
987 func stopTheWorld(reason string) {
988 semacquire(&worldsema)
989 getg().m.preemptoff = reason
990 systemstack(stopTheWorldWithSema)
993 // startTheWorld undoes the effects of stopTheWorld.
994 func startTheWorld() {
995 systemstack(func() { startTheWorldWithSema(false) })
996 // worldsema must be held over startTheWorldWithSema to ensure
997 // gomaxprocs cannot change while worldsema is held.
998 semrelease(&worldsema)
999 getg().m.preemptoff = ""
1002 // Holding worldsema grants an M the right to try to stop the world
1003 // and prevents gomaxprocs from changing concurrently.
1004 var worldsema uint32 = 1
1006 // stopTheWorldWithSema is the core implementation of stopTheWorld.
1007 // The caller is responsible for acquiring worldsema and disabling
1008 // preemption first and then should stopTheWorldWithSema on the system
1009 // stack:
1011 // semacquire(&worldsema, 0)
1012 // m.preemptoff = "reason"
1013 // systemstack(stopTheWorldWithSema)
1015 // When finished, the caller must either call startTheWorld or undo
1016 // these three operations separately:
1018 // m.preemptoff = ""
1019 // systemstack(startTheWorldWithSema)
1020 // semrelease(&worldsema)
1022 // It is allowed to acquire worldsema once and then execute multiple
1023 // startTheWorldWithSema/stopTheWorldWithSema pairs.
1024 // Other P's are able to execute between successive calls to
1025 // startTheWorldWithSema and stopTheWorldWithSema.
1026 // Holding worldsema causes any other goroutines invoking
1027 // stopTheWorld to block.
1028 func stopTheWorldWithSema() {
1029 _g_ := getg()
1031 // If we hold a lock, then we won't be able to stop another M
1032 // that is blocked trying to acquire the lock.
1033 if _g_.m.locks > 0 {
1034 throw("stopTheWorld: holding locks")
1037 lock(&sched.lock)
1038 sched.stopwait = gomaxprocs
1039 atomic.Store(&sched.gcwaiting, 1)
1040 preemptall()
1041 // stop current P
1042 _g_.m.p.ptr().status = _Pgcstop // Pgcstop is only diagnostic.
1043 sched.stopwait--
1044 // try to retake all P's in Psyscall status
1045 for _, p := range allp {
1046 s := p.status
1047 if s == _Psyscall && atomic.Cas(&p.status, s, _Pgcstop) {
1048 if trace.enabled {
1049 traceGoSysBlock(p)
1050 traceProcStop(p)
1052 p.syscalltick++
1053 sched.stopwait--
1056 // stop idle P's
1057 for {
1058 p := pidleget()
1059 if p == nil {
1060 break
1062 p.status = _Pgcstop
1063 sched.stopwait--
1065 wait := sched.stopwait > 0
1066 unlock(&sched.lock)
1068 // wait for remaining P's to stop voluntarily
1069 if wait {
1070 for {
1071 // wait for 100us, then try to re-preempt in case of any races
1072 if notetsleep(&sched.stopnote, 100*1000) {
1073 noteclear(&sched.stopnote)
1074 break
1076 preemptall()
1080 // sanity checks
1081 bad := ""
1082 if sched.stopwait != 0 {
1083 bad = "stopTheWorld: not stopped (stopwait != 0)"
1084 } else {
1085 for _, p := range allp {
1086 if p.status != _Pgcstop {
1087 bad = "stopTheWorld: not stopped (status != _Pgcstop)"
1091 if atomic.Load(&freezing) != 0 {
1092 // Some other thread is panicking. This can cause the
1093 // sanity checks above to fail if the panic happens in
1094 // the signal handler on a stopped thread. Either way,
1095 // we should halt this thread.
1096 lock(&deadlock)
1097 lock(&deadlock)
1099 if bad != "" {
1100 throw(bad)
1104 func mhelpgc() {
1105 _g_ := getg()
1106 _g_.m.helpgc = -1
1109 func startTheWorldWithSema(emitTraceEvent bool) int64 {
1110 _g_ := getg()
1112 _g_.m.locks++ // disable preemption because it can be holding p in a local var
1113 if netpollinited() {
1114 gp := netpoll(false) // non-blocking
1115 injectglist(gp)
1117 add := needaddgcproc()
1118 lock(&sched.lock)
1120 procs := gomaxprocs
1121 if newprocs != 0 {
1122 procs = newprocs
1123 newprocs = 0
1125 p1 := procresize(procs)
1126 sched.gcwaiting = 0
1127 if sched.sysmonwait != 0 {
1128 sched.sysmonwait = 0
1129 notewakeup(&sched.sysmonnote)
1131 unlock(&sched.lock)
1133 for p1 != nil {
1134 p := p1
1135 p1 = p1.link.ptr()
1136 if p.m != 0 {
1137 mp := p.m.ptr()
1138 p.m = 0
1139 if mp.nextp != 0 {
1140 throw("startTheWorld: inconsistent mp->nextp")
1142 mp.nextp.set(p)
1143 notewakeup(&mp.park)
1144 } else {
1145 // Start M to run P. Do not start another M below.
1146 newm(nil, p)
1147 add = false
1151 // Capture start-the-world time before doing clean-up tasks.
1152 startTime := nanotime()
1153 if emitTraceEvent {
1154 traceGCSTWDone()
1157 // Wakeup an additional proc in case we have excessive runnable goroutines
1158 // in local queues or in the global queue. If we don't, the proc will park itself.
1159 // If we have lots of excessive work, resetspinning will unpark additional procs as necessary.
1160 if atomic.Load(&sched.npidle) != 0 && atomic.Load(&sched.nmspinning) == 0 {
1161 wakep()
1164 if add {
1165 // If GC could have used another helper proc, start one now,
1166 // in the hope that it will be available next time.
1167 // It would have been even better to start it before the collection,
1168 // but doing so requires allocating memory, so it's tricky to
1169 // coordinate. This lazy approach works out in practice:
1170 // we don't mind if the first couple gc rounds don't have quite
1171 // the maximum number of procs.
1172 newm(mhelpgc, nil)
1174 _g_.m.locks--
1176 return startTime
1179 // First function run by a new goroutine.
1180 // This is passed to makecontext.
1181 func kickoff() {
1182 gp := getg()
1184 if gp.traceback != 0 {
1185 gtraceback(gp)
1188 fv := gp.entry
1189 param := gp.param
1191 // When running on the g0 stack we can wind up here without a p,
1192 // for example from mcall(exitsyscall0) in exitsyscall, in
1193 // which case we can not run a write barrier.
1194 // It is also possible for us to get here from the systemstack
1195 // call in wbBufFlush, at which point the write barrier buffer
1196 // is full and we can not run a write barrier.
1197 // Setting gp.entry = nil or gp.param = nil will try to run a
1198 // write barrier, so if we are on the g0 stack due to mcall
1199 // (systemstack calls mcall) then clear the field using uintptr.
1200 // This is OK when gp.param is gp.m.curg, as curg will be kept
1201 // alive elsewhere, and gp.entry always points into g, or
1202 // to a statically allocated value, or (in the case of mcall)
1203 // to the stack.
1204 if gp == gp.m.g0 && gp.param == unsafe.Pointer(gp.m.curg) {
1205 *(*uintptr)(unsafe.Pointer(&gp.entry)) = 0
1206 *(*uintptr)(unsafe.Pointer(&gp.param)) = 0
1207 } else if gp.m.p == 0 {
1208 throw("no p in kickoff")
1209 } else {
1210 gp.entry = nil
1211 gp.param = nil
1214 fv(param)
1215 goexit1()
1218 func mstart1() {
1219 _g_ := getg()
1221 if _g_ != _g_.m.g0 {
1222 throw("bad runtime·mstart")
1225 asminit()
1227 // Install signal handlers; after minit so that minit can
1228 // prepare the thread to be able to handle the signals.
1229 // For gccgo minit was called by C code.
1230 if _g_.m == &m0 {
1231 mstartm0()
1234 if fn := _g_.m.mstartfn; fn != nil {
1235 fn()
1238 if _g_.m.helpgc != 0 {
1239 _g_.m.helpgc = 0
1240 stopm()
1241 } else if _g_.m != &m0 {
1242 acquirep(_g_.m.nextp.ptr())
1243 _g_.m.nextp = 0
1245 schedule()
1248 // mstartm0 implements part of mstart1 that only runs on the m0.
1250 // Write barriers are allowed here because we know the GC can't be
1251 // running yet, so they'll be no-ops.
1253 //go:yeswritebarrierrec
1254 func mstartm0() {
1255 // Create an extra M for callbacks on threads not created by Go.
1256 // An extra M is also needed on Windows for callbacks created by
1257 // syscall.NewCallback. See issue #6751 for details.
1258 if (iscgo || GOOS == "windows") && !cgoHasExtraM {
1259 cgoHasExtraM = true
1260 newextram()
1262 initsig(false)
1265 // mexit tears down and exits the current thread.
1267 // Don't call this directly to exit the thread, since it must run at
1268 // the top of the thread stack. Instead, use gogo(&_g_.m.g0.sched) to
1269 // unwind the stack to the point that exits the thread.
1271 // It is entered with m.p != nil, so write barriers are allowed. It
1272 // will release the P before exiting.
1274 //go:yeswritebarrierrec
1275 func mexit(osStack bool) {
1276 g := getg()
1277 m := g.m
1279 if m == &m0 {
1280 // This is the main thread. Just wedge it.
1282 // On Linux, exiting the main thread puts the process
1283 // into a non-waitable zombie state. On Plan 9,
1284 // exiting the main thread unblocks wait even though
1285 // other threads are still running. On Solaris we can
1286 // neither exitThread nor return from mstart. Other
1287 // bad things probably happen on other platforms.
1289 // We could try to clean up this M more before wedging
1290 // it, but that complicates signal handling.
1291 handoffp(releasep())
1292 lock(&sched.lock)
1293 sched.nmfreed++
1294 checkdead()
1295 unlock(&sched.lock)
1296 notesleep(&m.park)
1297 throw("locked m0 woke up")
1300 sigblock()
1301 unminit()
1303 // Free the gsignal stack.
1304 if m.gsignal != nil {
1305 stackfree(m.gsignal)
1308 // Remove m from allm.
1309 lock(&sched.lock)
1310 for pprev := &allm; *pprev != nil; pprev = &(*pprev).alllink {
1311 if *pprev == m {
1312 *pprev = m.alllink
1313 goto found
1316 throw("m not found in allm")
1317 found:
1318 if !osStack {
1319 // Delay reaping m until it's done with the stack.
1321 // If this is using an OS stack, the OS will free it
1322 // so there's no need for reaping.
1323 atomic.Store(&m.freeWait, 1)
1324 // Put m on the free list, though it will not be reaped until
1325 // freeWait is 0. Note that the free list must not be linked
1326 // through alllink because some functions walk allm without
1327 // locking, so may be using alllink.
1328 m.freelink = sched.freem
1329 sched.freem = m
1331 unlock(&sched.lock)
1333 // Release the P.
1334 handoffp(releasep())
1335 // After this point we must not have write barriers.
1337 // Invoke the deadlock detector. This must happen after
1338 // handoffp because it may have started a new M to take our
1339 // P's work.
1340 lock(&sched.lock)
1341 sched.nmfreed++
1342 checkdead()
1343 unlock(&sched.lock)
1345 if osStack {
1346 // Return from mstart and let the system thread
1347 // library free the g0 stack and terminate the thread.
1348 return
1351 // mstart is the thread's entry point, so there's nothing to
1352 // return to. Exit the thread directly. exitThread will clear
1353 // m.freeWait when it's done with the stack and the m can be
1354 // reaped.
1355 exitThread(&m.freeWait)
1358 // forEachP calls fn(p) for every P p when p reaches a GC safe point.
1359 // If a P is currently executing code, this will bring the P to a GC
1360 // safe point and execute fn on that P. If the P is not executing code
1361 // (it is idle or in a syscall), this will call fn(p) directly while
1362 // preventing the P from exiting its state. This does not ensure that
1363 // fn will run on every CPU executing Go code, but it acts as a global
1364 // memory barrier. GC uses this as a "ragged barrier."
1366 // The caller must hold worldsema.
1368 //go:systemstack
1369 func forEachP(fn func(*p)) {
1370 mp := acquirem()
1371 _p_ := getg().m.p.ptr()
1373 lock(&sched.lock)
1374 if sched.safePointWait != 0 {
1375 throw("forEachP: sched.safePointWait != 0")
1377 sched.safePointWait = gomaxprocs - 1
1378 sched.safePointFn = fn
1380 // Ask all Ps to run the safe point function.
1381 for _, p := range allp {
1382 if p != _p_ {
1383 atomic.Store(&p.runSafePointFn, 1)
1386 preemptall()
1388 // Any P entering _Pidle or _Psyscall from now on will observe
1389 // p.runSafePointFn == 1 and will call runSafePointFn when
1390 // changing its status to _Pidle/_Psyscall.
1392 // Run safe point function for all idle Ps. sched.pidle will
1393 // not change because we hold sched.lock.
1394 for p := sched.pidle.ptr(); p != nil; p = p.link.ptr() {
1395 if atomic.Cas(&p.runSafePointFn, 1, 0) {
1396 fn(p)
1397 sched.safePointWait--
1401 wait := sched.safePointWait > 0
1402 unlock(&sched.lock)
1404 // Run fn for the current P.
1405 fn(_p_)
1407 // Force Ps currently in _Psyscall into _Pidle and hand them
1408 // off to induce safe point function execution.
1409 for _, p := range allp {
1410 s := p.status
1411 if s == _Psyscall && p.runSafePointFn == 1 && atomic.Cas(&p.status, s, _Pidle) {
1412 if trace.enabled {
1413 traceGoSysBlock(p)
1414 traceProcStop(p)
1416 p.syscalltick++
1417 handoffp(p)
1421 // Wait for remaining Ps to run fn.
1422 if wait {
1423 for {
1424 // Wait for 100us, then try to re-preempt in
1425 // case of any races.
1427 // Requires system stack.
1428 if notetsleep(&sched.safePointNote, 100*1000) {
1429 noteclear(&sched.safePointNote)
1430 break
1432 preemptall()
1435 if sched.safePointWait != 0 {
1436 throw("forEachP: not done")
1438 for _, p := range allp {
1439 if p.runSafePointFn != 0 {
1440 throw("forEachP: P did not run fn")
1444 lock(&sched.lock)
1445 sched.safePointFn = nil
1446 unlock(&sched.lock)
1447 releasem(mp)
1450 // runSafePointFn runs the safe point function, if any, for this P.
1451 // This should be called like
1453 // if getg().m.p.runSafePointFn != 0 {
1454 // runSafePointFn()
1455 // }
1457 // runSafePointFn must be checked on any transition in to _Pidle or
1458 // _Psyscall to avoid a race where forEachP sees that the P is running
1459 // just before the P goes into _Pidle/_Psyscall and neither forEachP
1460 // nor the P run the safe-point function.
1461 func runSafePointFn() {
1462 p := getg().m.p.ptr()
1463 // Resolve the race between forEachP running the safe-point
1464 // function on this P's behalf and this P running the
1465 // safe-point function directly.
1466 if !atomic.Cas(&p.runSafePointFn, 1, 0) {
1467 return
1469 sched.safePointFn(p)
1470 lock(&sched.lock)
1471 sched.safePointWait--
1472 if sched.safePointWait == 0 {
1473 notewakeup(&sched.safePointNote)
1475 unlock(&sched.lock)
1478 // Allocate a new m unassociated with any thread.
1479 // Can use p for allocation context if needed.
1480 // fn is recorded as the new m's m.mstartfn.
1482 // This function is allowed to have write barriers even if the caller
1483 // isn't because it borrows _p_.
1485 //go:yeswritebarrierrec
1486 func allocm(_p_ *p, fn func(), allocatestack bool) (mp *m, g0Stack unsafe.Pointer, g0StackSize uintptr) {
1487 _g_ := getg()
1488 _g_.m.locks++ // disable GC because it can be called from sysmon
1489 if _g_.m.p == 0 {
1490 acquirep(_p_) // temporarily borrow p for mallocs in this function
1493 // Release the free M list. We need to do this somewhere and
1494 // this may free up a stack we can use.
1495 if sched.freem != nil {
1496 lock(&sched.lock)
1497 var newList *m
1498 for freem := sched.freem; freem != nil; {
1499 if freem.freeWait != 0 {
1500 next := freem.freelink
1501 freem.freelink = newList
1502 newList = freem
1503 freem = next
1504 continue
1506 stackfree(freem.g0)
1507 freem = freem.freelink
1509 sched.freem = newList
1510 unlock(&sched.lock)
1513 mp = new(m)
1514 mp.mstartfn = fn
1515 mcommoninit(mp)
1517 mp.g0 = malg(allocatestack, false, &g0Stack, &g0StackSize)
1518 mp.g0.m = mp
1520 if _p_ == _g_.m.p.ptr() {
1521 releasep()
1523 _g_.m.locks--
1525 return mp, g0Stack, g0StackSize
1528 // needm is called when a cgo callback happens on a
1529 // thread without an m (a thread not created by Go).
1530 // In this case, needm is expected to find an m to use
1531 // and return with m, g initialized correctly.
1532 // Since m and g are not set now (likely nil, but see below)
1533 // needm is limited in what routines it can call. In particular
1534 // it can only call nosplit functions (textflag 7) and cannot
1535 // do any scheduling that requires an m.
1537 // In order to avoid needing heavy lifting here, we adopt
1538 // the following strategy: there is a stack of available m's
1539 // that can be stolen. Using compare-and-swap
1540 // to pop from the stack has ABA races, so we simulate
1541 // a lock by doing an exchange (via casp) to steal the stack
1542 // head and replace the top pointer with MLOCKED (1).
1543 // This serves as a simple spin lock that we can use even
1544 // without an m. The thread that locks the stack in this way
1545 // unlocks the stack by storing a valid stack head pointer.
1547 // In order to make sure that there is always an m structure
1548 // available to be stolen, we maintain the invariant that there
1549 // is always one more than needed. At the beginning of the
1550 // program (if cgo is in use) the list is seeded with a single m.
1551 // If needm finds that it has taken the last m off the list, its job
1552 // is - once it has installed its own m so that it can do things like
1553 // allocate memory - to create a spare m and put it on the list.
1555 // Each of these extra m's also has a g0 and a curg that are
1556 // pressed into service as the scheduling stack and current
1557 // goroutine for the duration of the cgo callback.
1559 // When the callback is done with the m, it calls dropm to
1560 // put the m back on the list.
1561 //go:nosplit
1562 func needm(x byte) {
1563 if (iscgo || GOOS == "windows") && !cgoHasExtraM {
1564 // Can happen if C/C++ code calls Go from a global ctor.
1565 // Can also happen on Windows if a global ctor uses a
1566 // callback created by syscall.NewCallback. See issue #6751
1567 // for details.
1569 // Can not throw, because scheduler is not initialized yet.
1570 write(2, unsafe.Pointer(&earlycgocallback[0]), int32(len(earlycgocallback)))
1571 exit(1)
1574 // Lock extra list, take head, unlock popped list.
1575 // nilokay=false is safe here because of the invariant above,
1576 // that the extra list always contains or will soon contain
1577 // at least one m.
1578 mp := lockextra(false)
1580 // Set needextram when we've just emptied the list,
1581 // so that the eventual call into cgocallbackg will
1582 // allocate a new m for the extra list. We delay the
1583 // allocation until then so that it can be done
1584 // after exitsyscall makes sure it is okay to be
1585 // running at all (that is, there's no garbage collection
1586 // running right now).
1587 mp.needextram = mp.schedlink == 0
1588 extraMCount--
1589 unlockextra(mp.schedlink.ptr())
1591 // Save and block signals before installing g.
1592 // Once g is installed, any incoming signals will try to execute,
1593 // but we won't have the sigaltstack settings and other data
1594 // set up appropriately until the end of minit, which will
1595 // unblock the signals. This is the same dance as when
1596 // starting a new m to run Go code via newosproc.
1597 msigsave(mp)
1598 sigblock()
1600 // Install g (= m->curg).
1601 setg(mp.curg)
1603 // Initialize this thread to use the m.
1604 asminit()
1605 minit()
1607 setGContext()
1609 // mp.curg is now a real goroutine.
1610 casgstatus(mp.curg, _Gdead, _Gsyscall)
1611 atomic.Xadd(&sched.ngsys, -1)
1614 var earlycgocallback = []byte("fatal error: cgo callback before cgo call\n")
1616 // newextram allocates m's and puts them on the extra list.
1617 // It is called with a working local m, so that it can do things
1618 // like call schedlock and allocate.
1619 func newextram() {
1620 c := atomic.Xchg(&extraMWaiters, 0)
1621 if c > 0 {
1622 for i := uint32(0); i < c; i++ {
1623 oneNewExtraM()
1625 } else {
1626 // Make sure there is at least one extra M.
1627 mp := lockextra(true)
1628 unlockextra(mp)
1629 if mp == nil {
1630 oneNewExtraM()
1635 // oneNewExtraM allocates an m and puts it on the extra list.
1636 func oneNewExtraM() {
1637 // Create extra goroutine locked to extra m.
1638 // The goroutine is the context in which the cgo callback will run.
1639 // The sched.pc will never be returned to, but setting it to
1640 // goexit makes clear to the traceback routines where
1641 // the goroutine stack ends.
1642 mp, g0SP, g0SPSize := allocm(nil, nil, true)
1643 gp := malg(true, false, nil, nil)
1644 gp.gcscanvalid = true
1645 gp.gcscandone = true
1646 // malg returns status as _Gidle. Change to _Gdead before
1647 // adding to allg where GC can see it. We use _Gdead to hide
1648 // this from tracebacks and stack scans since it isn't a
1649 // "real" goroutine until needm grabs it.
1650 casgstatus(gp, _Gidle, _Gdead)
1651 gp.m = mp
1652 mp.curg = gp
1653 mp.lockedInt++
1654 mp.lockedg.set(gp)
1655 gp.lockedm.set(mp)
1656 gp.goid = int64(atomic.Xadd64(&sched.goidgen, 1))
1657 // put on allg for garbage collector
1658 allgadd(gp)
1660 // The context for gp will be set up in needm.
1661 // Here we need to set the context for g0.
1662 makeGContext(mp.g0, g0SP, g0SPSize)
1664 // gp is now on the allg list, but we don't want it to be
1665 // counted by gcount. It would be more "proper" to increment
1666 // sched.ngfree, but that requires locking. Incrementing ngsys
1667 // has the same effect.
1668 atomic.Xadd(&sched.ngsys, +1)
1670 // Add m to the extra list.
1671 mnext := lockextra(true)
1672 mp.schedlink.set(mnext)
1673 extraMCount++
1674 unlockextra(mp)
1677 // dropm is called when a cgo callback has called needm but is now
1678 // done with the callback and returning back into the non-Go thread.
1679 // It puts the current m back onto the extra list.
1681 // The main expense here is the call to signalstack to release the
1682 // m's signal stack, and then the call to needm on the next callback
1683 // from this thread. It is tempting to try to save the m for next time,
1684 // which would eliminate both these costs, but there might not be
1685 // a next time: the current thread (which Go does not control) might exit.
1686 // If we saved the m for that thread, there would be an m leak each time
1687 // such a thread exited. Instead, we acquire and release an m on each
1688 // call. These should typically not be scheduling operations, just a few
1689 // atomics, so the cost should be small.
1691 // TODO(rsc): An alternative would be to allocate a dummy pthread per-thread
1692 // variable using pthread_key_create. Unlike the pthread keys we already use
1693 // on OS X, this dummy key would never be read by Go code. It would exist
1694 // only so that we could register at thread-exit-time destructor.
1695 // That destructor would put the m back onto the extra list.
1696 // This is purely a performance optimization. The current version,
1697 // in which dropm happens on each cgo call, is still correct too.
1698 // We may have to keep the current version on systems with cgo
1699 // but without pthreads, like Windows.
1701 // CgocallBackDone calls this after releasing p, so no write barriers.
1702 //go:nowritebarrierrec
1703 func dropm() {
1704 // Clear m and g, and return m to the extra list.
1705 // After the call to setg we can only call nosplit functions
1706 // with no pointer manipulation.
1707 mp := getg().m
1709 // Return mp.curg to dead state.
1710 casgstatus(mp.curg, _Gsyscall, _Gdead)
1711 atomic.Xadd(&sched.ngsys, +1)
1713 // Block signals before unminit.
1714 // Unminit unregisters the signal handling stack (but needs g on some systems).
1715 // Setg(nil) clears g, which is the signal handler's cue not to run Go handlers.
1716 // It's important not to try to handle a signal between those two steps.
1717 sigmask := mp.sigmask
1718 sigblock()
1719 unminit()
1721 // gccgo sets the stack to Gdead here, because the splitstack
1722 // context is not initialized.
1723 atomic.Store(&mp.curg.atomicstatus, _Gdead)
1724 mp.curg.gcstack = 0
1725 mp.curg.gcnextsp = 0
1727 mnext := lockextra(true)
1728 extraMCount++
1729 mp.schedlink.set(mnext)
1731 setg(nil)
1733 // Commit the release of mp.
1734 unlockextra(mp)
1736 msigrestore(sigmask)
1739 // A helper function for EnsureDropM.
1740 func getm() uintptr {
1741 return uintptr(unsafe.Pointer(getg().m))
1744 var extram uintptr
1745 var extraMCount uint32 // Protected by lockextra
1746 var extraMWaiters uint32
1748 // lockextra locks the extra list and returns the list head.
1749 // The caller must unlock the list by storing a new list head
1750 // to extram. If nilokay is true, then lockextra will
1751 // return a nil list head if that's what it finds. If nilokay is false,
1752 // lockextra will keep waiting until the list head is no longer nil.
1753 //go:nosplit
1754 //go:nowritebarrierrec
1755 func lockextra(nilokay bool) *m {
1756 const locked = 1
1758 incr := false
1759 for {
1760 old := atomic.Loaduintptr(&extram)
1761 if old == locked {
1762 yield := osyield
1763 yield()
1764 continue
1766 if old == 0 && !nilokay {
1767 if !incr {
1768 // Add 1 to the number of threads
1769 // waiting for an M.
1770 // This is cleared by newextram.
1771 atomic.Xadd(&extraMWaiters, 1)
1772 incr = true
1774 usleep(1)
1775 continue
1777 if atomic.Casuintptr(&extram, old, locked) {
1778 return (*m)(unsafe.Pointer(old))
1780 yield := osyield
1781 yield()
1782 continue
1786 //go:nosplit
1787 //go:nowritebarrierrec
1788 func unlockextra(mp *m) {
1789 atomic.Storeuintptr(&extram, uintptr(unsafe.Pointer(mp)))
1792 // execLock serializes exec and clone to avoid bugs or unspecified behaviour
1793 // around exec'ing while creating/destroying threads. See issue #19546.
1794 var execLock rwmutex
1796 // newmHandoff contains a list of m structures that need new OS threads.
1797 // This is used by newm in situations where newm itself can't safely
1798 // start an OS thread.
1799 var newmHandoff struct {
1800 lock mutex
1802 // newm points to a list of M structures that need new OS
1803 // threads. The list is linked through m.schedlink.
1804 newm muintptr
1806 // waiting indicates that wake needs to be notified when an m
1807 // is put on the list.
1808 waiting bool
1809 wake note
1811 // haveTemplateThread indicates that the templateThread has
1812 // been started. This is not protected by lock. Use cas to set
1813 // to 1.
1814 haveTemplateThread uint32
1817 // Create a new m. It will start off with a call to fn, or else the scheduler.
1818 // fn needs to be static and not a heap allocated closure.
1819 // May run with m.p==nil, so write barriers are not allowed.
1820 //go:nowritebarrierrec
1821 func newm(fn func(), _p_ *p) {
1822 mp, _, _ := allocm(_p_, fn, false)
1823 mp.nextp.set(_p_)
1824 mp.sigmask = initSigmask
1825 if gp := getg(); gp != nil && gp.m != nil && (gp.m.lockedExt != 0 || gp.m.incgo) && GOOS != "plan9" {
1826 // We're on a locked M or a thread that may have been
1827 // started by C. The kernel state of this thread may
1828 // be strange (the user may have locked it for that
1829 // purpose). We don't want to clone that into another
1830 // thread. Instead, ask a known-good thread to create
1831 // the thread for us.
1833 // This is disabled on Plan 9. See golang.org/issue/22227.
1835 // TODO: This may be unnecessary on Windows, which
1836 // doesn't model thread creation off fork.
1837 lock(&newmHandoff.lock)
1838 if newmHandoff.haveTemplateThread == 0 {
1839 throw("on a locked thread with no template thread")
1841 mp.schedlink = newmHandoff.newm
1842 newmHandoff.newm.set(mp)
1843 if newmHandoff.waiting {
1844 newmHandoff.waiting = false
1845 notewakeup(&newmHandoff.wake)
1847 unlock(&newmHandoff.lock)
1848 return
1850 newm1(mp)
1853 func newm1(mp *m) {
1854 execLock.rlock() // Prevent process clone.
1855 newosproc(mp)
1856 execLock.runlock()
1859 // startTemplateThread starts the template thread if it is not already
1860 // running.
1862 // The calling thread must itself be in a known-good state.
1863 func startTemplateThread() {
1864 if GOARCH == "wasm" { // no threads on wasm yet
1865 return
1867 if !atomic.Cas(&newmHandoff.haveTemplateThread, 0, 1) {
1868 return
1870 newm(templateThread, nil)
1873 // templateThread is a thread in a known-good state that exists solely
1874 // to start new threads in known-good states when the calling thread
1875 // may not be a a good state.
1877 // Many programs never need this, so templateThread is started lazily
1878 // when we first enter a state that might lead to running on a thread
1879 // in an unknown state.
1881 // templateThread runs on an M without a P, so it must not have write
1882 // barriers.
1884 //go:nowritebarrierrec
1885 func templateThread() {
1886 lock(&sched.lock)
1887 sched.nmsys++
1888 checkdead()
1889 unlock(&sched.lock)
1891 for {
1892 lock(&newmHandoff.lock)
1893 for newmHandoff.newm != 0 {
1894 newm := newmHandoff.newm.ptr()
1895 newmHandoff.newm = 0
1896 unlock(&newmHandoff.lock)
1897 for newm != nil {
1898 next := newm.schedlink.ptr()
1899 newm.schedlink = 0
1900 newm1(newm)
1901 newm = next
1903 lock(&newmHandoff.lock)
1905 newmHandoff.waiting = true
1906 noteclear(&newmHandoff.wake)
1907 unlock(&newmHandoff.lock)
1908 notesleep(&newmHandoff.wake)
1912 // Stops execution of the current m until new work is available.
1913 // Returns with acquired P.
1914 func stopm() {
1915 _g_ := getg()
1917 if _g_.m.locks != 0 {
1918 throw("stopm holding locks")
1920 if _g_.m.p != 0 {
1921 throw("stopm holding p")
1923 if _g_.m.spinning {
1924 throw("stopm spinning")
1927 retry:
1928 lock(&sched.lock)
1929 mput(_g_.m)
1930 unlock(&sched.lock)
1931 notesleep(&_g_.m.park)
1932 noteclear(&_g_.m.park)
1933 if _g_.m.helpgc != 0 {
1934 // helpgc() set _g_.m.p and _g_.m.mcache, so we have a P.
1935 gchelper()
1936 // Undo the effects of helpgc().
1937 _g_.m.helpgc = 0
1938 _g_.m.mcache = nil
1939 _g_.m.p = 0
1940 goto retry
1942 acquirep(_g_.m.nextp.ptr())
1943 _g_.m.nextp = 0
1946 func mspinning() {
1947 // startm's caller incremented nmspinning. Set the new M's spinning.
1948 getg().m.spinning = true
1951 // Schedules some M to run the p (creates an M if necessary).
1952 // If p==nil, tries to get an idle P, if no idle P's does nothing.
1953 // May run with m.p==nil, so write barriers are not allowed.
1954 // If spinning is set, the caller has incremented nmspinning and startm will
1955 // either decrement nmspinning or set m.spinning in the newly started M.
1956 //go:nowritebarrierrec
1957 func startm(_p_ *p, spinning bool) {
1958 lock(&sched.lock)
1959 if _p_ == nil {
1960 _p_ = pidleget()
1961 if _p_ == nil {
1962 unlock(&sched.lock)
1963 if spinning {
1964 // The caller incremented nmspinning, but there are no idle Ps,
1965 // so it's okay to just undo the increment and give up.
1966 if int32(atomic.Xadd(&sched.nmspinning, -1)) < 0 {
1967 throw("startm: negative nmspinning")
1970 return
1973 mp := mget()
1974 unlock(&sched.lock)
1975 if mp == nil {
1976 var fn func()
1977 if spinning {
1978 // The caller incremented nmspinning, so set m.spinning in the new M.
1979 fn = mspinning
1981 newm(fn, _p_)
1982 return
1984 if mp.spinning {
1985 throw("startm: m is spinning")
1987 if mp.nextp != 0 {
1988 throw("startm: m has p")
1990 if spinning && !runqempty(_p_) {
1991 throw("startm: p has runnable gs")
1993 // The caller incremented nmspinning, so set m.spinning in the new M.
1994 mp.spinning = spinning
1995 mp.nextp.set(_p_)
1996 notewakeup(&mp.park)
1999 // Hands off P from syscall or locked M.
2000 // Always runs without a P, so write barriers are not allowed.
2001 //go:nowritebarrierrec
2002 func handoffp(_p_ *p) {
2003 // handoffp must start an M in any situation where
2004 // findrunnable would return a G to run on _p_.
2006 // if it has local work, start it straight away
2007 if !runqempty(_p_) || sched.runqsize != 0 {
2008 startm(_p_, false)
2009 return
2011 // if it has GC work, start it straight away
2012 if gcBlackenEnabled != 0 && gcMarkWorkAvailable(_p_) {
2013 startm(_p_, false)
2014 return
2016 // no local work, check that there are no spinning/idle M's,
2017 // otherwise our help is not required
2018 if atomic.Load(&sched.nmspinning)+atomic.Load(&sched.npidle) == 0 && atomic.Cas(&sched.nmspinning, 0, 1) { // TODO: fast atomic
2019 startm(_p_, true)
2020 return
2022 lock(&sched.lock)
2023 if sched.gcwaiting != 0 {
2024 _p_.status = _Pgcstop
2025 sched.stopwait--
2026 if sched.stopwait == 0 {
2027 notewakeup(&sched.stopnote)
2029 unlock(&sched.lock)
2030 return
2032 if _p_.runSafePointFn != 0 && atomic.Cas(&_p_.runSafePointFn, 1, 0) {
2033 sched.safePointFn(_p_)
2034 sched.safePointWait--
2035 if sched.safePointWait == 0 {
2036 notewakeup(&sched.safePointNote)
2039 if sched.runqsize != 0 {
2040 unlock(&sched.lock)
2041 startm(_p_, false)
2042 return
2044 // If this is the last running P and nobody is polling network,
2045 // need to wakeup another M to poll network.
2046 if sched.npidle == uint32(gomaxprocs-1) && atomic.Load64(&sched.lastpoll) != 0 {
2047 unlock(&sched.lock)
2048 startm(_p_, false)
2049 return
2051 pidleput(_p_)
2052 unlock(&sched.lock)
2055 // Tries to add one more P to execute G's.
2056 // Called when a G is made runnable (newproc, ready).
2057 func wakep() {
2058 // be conservative about spinning threads
2059 if !atomic.Cas(&sched.nmspinning, 0, 1) {
2060 return
2062 startm(nil, true)
2065 // Stops execution of the current m that is locked to a g until the g is runnable again.
2066 // Returns with acquired P.
2067 func stoplockedm() {
2068 _g_ := getg()
2070 if _g_.m.lockedg == 0 || _g_.m.lockedg.ptr().lockedm.ptr() != _g_.m {
2071 throw("stoplockedm: inconsistent locking")
2073 if _g_.m.p != 0 {
2074 // Schedule another M to run this p.
2075 _p_ := releasep()
2076 handoffp(_p_)
2078 incidlelocked(1)
2079 // Wait until another thread schedules lockedg again.
2080 notesleep(&_g_.m.park)
2081 noteclear(&_g_.m.park)
2082 status := readgstatus(_g_.m.lockedg.ptr())
2083 if status&^_Gscan != _Grunnable {
2084 print("runtime:stoplockedm: g is not Grunnable or Gscanrunnable\n")
2085 dumpgstatus(_g_)
2086 throw("stoplockedm: not runnable")
2088 acquirep(_g_.m.nextp.ptr())
2089 _g_.m.nextp = 0
2092 // Schedules the locked m to run the locked gp.
2093 // May run during STW, so write barriers are not allowed.
2094 //go:nowritebarrierrec
2095 func startlockedm(gp *g) {
2096 _g_ := getg()
2098 mp := gp.lockedm.ptr()
2099 if mp == _g_.m {
2100 throw("startlockedm: locked to me")
2102 if mp.nextp != 0 {
2103 throw("startlockedm: m has p")
2105 // directly handoff current P to the locked m
2106 incidlelocked(-1)
2107 _p_ := releasep()
2108 mp.nextp.set(_p_)
2109 notewakeup(&mp.park)
2110 stopm()
2113 // Stops the current m for stopTheWorld.
2114 // Returns when the world is restarted.
2115 func gcstopm() {
2116 _g_ := getg()
2118 if sched.gcwaiting == 0 {
2119 throw("gcstopm: not waiting for gc")
2121 if _g_.m.spinning {
2122 _g_.m.spinning = false
2123 // OK to just drop nmspinning here,
2124 // startTheWorld will unpark threads as necessary.
2125 if int32(atomic.Xadd(&sched.nmspinning, -1)) < 0 {
2126 throw("gcstopm: negative nmspinning")
2129 _p_ := releasep()
2130 lock(&sched.lock)
2131 _p_.status = _Pgcstop
2132 sched.stopwait--
2133 if sched.stopwait == 0 {
2134 notewakeup(&sched.stopnote)
2136 unlock(&sched.lock)
2137 stopm()
2140 // Schedules gp to run on the current M.
2141 // If inheritTime is true, gp inherits the remaining time in the
2142 // current time slice. Otherwise, it starts a new time slice.
2143 // Never returns.
2145 // Write barriers are allowed because this is called immediately after
2146 // acquiring a P in several places.
2148 //go:yeswritebarrierrec
2149 func execute(gp *g, inheritTime bool) {
2150 _g_ := getg()
2152 casgstatus(gp, _Grunnable, _Grunning)
2153 gp.waitsince = 0
2154 gp.preempt = false
2155 if !inheritTime {
2156 _g_.m.p.ptr().schedtick++
2158 _g_.m.curg = gp
2159 gp.m = _g_.m
2161 // Check whether the profiler needs to be turned on or off.
2162 hz := sched.profilehz
2163 if _g_.m.profilehz != hz {
2164 setThreadCPUProfiler(hz)
2167 if trace.enabled {
2168 // GoSysExit has to happen when we have a P, but before GoStart.
2169 // So we emit it here.
2170 if gp.syscallsp != 0 && gp.sysblocktraced {
2171 traceGoSysExit(gp.sysexitticks)
2173 traceGoStart()
2176 gogo(gp)
2179 // Finds a runnable goroutine to execute.
2180 // Tries to steal from other P's, get g from global queue, poll network.
2181 func findrunnable() (gp *g, inheritTime bool) {
2182 _g_ := getg()
2184 // The conditions here and in handoffp must agree: if
2185 // findrunnable would return a G to run, handoffp must start
2186 // an M.
2188 top:
2189 _p_ := _g_.m.p.ptr()
2190 if sched.gcwaiting != 0 {
2191 gcstopm()
2192 goto top
2194 if _p_.runSafePointFn != 0 {
2195 runSafePointFn()
2197 if fingwait && fingwake {
2198 if gp := wakefing(); gp != nil {
2199 ready(gp, 0, true)
2202 if *cgo_yield != nil {
2203 asmcgocall(*cgo_yield, nil)
2206 // local runq
2207 if gp, inheritTime := runqget(_p_); gp != nil {
2208 return gp, inheritTime
2211 // global runq
2212 if sched.runqsize != 0 {
2213 lock(&sched.lock)
2214 gp := globrunqget(_p_, 0)
2215 unlock(&sched.lock)
2216 if gp != nil {
2217 return gp, false
2221 // Poll network.
2222 // This netpoll is only an optimization before we resort to stealing.
2223 // We can safely skip it if there are no waiters or a thread is blocked
2224 // in netpoll already. If there is any kind of logical race with that
2225 // blocked thread (e.g. it has already returned from netpoll, but does
2226 // not set lastpoll yet), this thread will do blocking netpoll below
2227 // anyway.
2228 if netpollinited() && atomic.Load(&netpollWaiters) > 0 && atomic.Load64(&sched.lastpoll) != 0 {
2229 if gp := netpoll(false); gp != nil { // non-blocking
2230 // netpoll returns list of goroutines linked by schedlink.
2231 injectglist(gp.schedlink.ptr())
2232 casgstatus(gp, _Gwaiting, _Grunnable)
2233 if trace.enabled {
2234 traceGoUnpark(gp, 0)
2236 return gp, false
2240 // Steal work from other P's.
2241 procs := uint32(gomaxprocs)
2242 if atomic.Load(&sched.npidle) == procs-1 {
2243 // Either GOMAXPROCS=1 or everybody, except for us, is idle already.
2244 // New work can appear from returning syscall/cgocall, network or timers.
2245 // Neither of that submits to local run queues, so no point in stealing.
2246 goto stop
2248 // If number of spinning M's >= number of busy P's, block.
2249 // This is necessary to prevent excessive CPU consumption
2250 // when GOMAXPROCS>>1 but the program parallelism is low.
2251 if !_g_.m.spinning && 2*atomic.Load(&sched.nmspinning) >= procs-atomic.Load(&sched.npidle) {
2252 goto stop
2254 if !_g_.m.spinning {
2255 _g_.m.spinning = true
2256 atomic.Xadd(&sched.nmspinning, 1)
2258 for i := 0; i < 4; i++ {
2259 for enum := stealOrder.start(fastrand()); !enum.done(); enum.next() {
2260 if sched.gcwaiting != 0 {
2261 goto top
2263 stealRunNextG := i > 2 // first look for ready queues with more than 1 g
2264 if gp := runqsteal(_p_, allp[enum.position()], stealRunNextG); gp != nil {
2265 return gp, false
2270 stop:
2272 // We have nothing to do. If we're in the GC mark phase, can
2273 // safely scan and blacken objects, and have work to do, run
2274 // idle-time marking rather than give up the P.
2275 if gcBlackenEnabled != 0 && _p_.gcBgMarkWorker != 0 && gcMarkWorkAvailable(_p_) {
2276 _p_.gcMarkWorkerMode = gcMarkWorkerIdleMode
2277 gp := _p_.gcBgMarkWorker.ptr()
2278 casgstatus(gp, _Gwaiting, _Grunnable)
2279 if trace.enabled {
2280 traceGoUnpark(gp, 0)
2282 return gp, false
2285 // wasm only:
2286 // Check if a goroutine is waiting for a callback from the WebAssembly host.
2287 // If yes, pause the execution until a callback was triggered.
2288 if pauseSchedulerUntilCallback() {
2289 // A callback was triggered and caused at least one goroutine to wake up.
2290 goto top
2293 // Before we drop our P, make a snapshot of the allp slice,
2294 // which can change underfoot once we no longer block
2295 // safe-points. We don't need to snapshot the contents because
2296 // everything up to cap(allp) is immutable.
2297 allpSnapshot := allp
2299 // return P and block
2300 lock(&sched.lock)
2301 if sched.gcwaiting != 0 || _p_.runSafePointFn != 0 {
2302 unlock(&sched.lock)
2303 goto top
2305 if sched.runqsize != 0 {
2306 gp := globrunqget(_p_, 0)
2307 unlock(&sched.lock)
2308 return gp, false
2310 if releasep() != _p_ {
2311 throw("findrunnable: wrong p")
2313 pidleput(_p_)
2314 unlock(&sched.lock)
2316 // Delicate dance: thread transitions from spinning to non-spinning state,
2317 // potentially concurrently with submission of new goroutines. We must
2318 // drop nmspinning first and then check all per-P queues again (with
2319 // #StoreLoad memory barrier in between). If we do it the other way around,
2320 // another thread can submit a goroutine after we've checked all run queues
2321 // but before we drop nmspinning; as the result nobody will unpark a thread
2322 // to run the goroutine.
2323 // If we discover new work below, we need to restore m.spinning as a signal
2324 // for resetspinning to unpark a new worker thread (because there can be more
2325 // than one starving goroutine). However, if after discovering new work
2326 // we also observe no idle Ps, it is OK to just park the current thread:
2327 // the system is fully loaded so no spinning threads are required.
2328 // Also see "Worker thread parking/unparking" comment at the top of the file.
2329 wasSpinning := _g_.m.spinning
2330 if _g_.m.spinning {
2331 _g_.m.spinning = false
2332 if int32(atomic.Xadd(&sched.nmspinning, -1)) < 0 {
2333 throw("findrunnable: negative nmspinning")
2337 // check all runqueues once again
2338 for _, _p_ := range allpSnapshot {
2339 if !runqempty(_p_) {
2340 lock(&sched.lock)
2341 _p_ = pidleget()
2342 unlock(&sched.lock)
2343 if _p_ != nil {
2344 acquirep(_p_)
2345 if wasSpinning {
2346 _g_.m.spinning = true
2347 atomic.Xadd(&sched.nmspinning, 1)
2349 goto top
2351 break
2355 // Check for idle-priority GC work again.
2356 if gcBlackenEnabled != 0 && gcMarkWorkAvailable(nil) {
2357 lock(&sched.lock)
2358 _p_ = pidleget()
2359 if _p_ != nil && _p_.gcBgMarkWorker == 0 {
2360 pidleput(_p_)
2361 _p_ = nil
2363 unlock(&sched.lock)
2364 if _p_ != nil {
2365 acquirep(_p_)
2366 if wasSpinning {
2367 _g_.m.spinning = true
2368 atomic.Xadd(&sched.nmspinning, 1)
2370 // Go back to idle GC check.
2371 goto stop
2375 // poll network
2376 if netpollinited() && atomic.Load(&netpollWaiters) > 0 && atomic.Xchg64(&sched.lastpoll, 0) != 0 {
2377 if _g_.m.p != 0 {
2378 throw("findrunnable: netpoll with p")
2380 if _g_.m.spinning {
2381 throw("findrunnable: netpoll with spinning")
2383 gp := netpoll(true) // block until new work is available
2384 atomic.Store64(&sched.lastpoll, uint64(nanotime()))
2385 if gp != nil {
2386 lock(&sched.lock)
2387 _p_ = pidleget()
2388 unlock(&sched.lock)
2389 if _p_ != nil {
2390 acquirep(_p_)
2391 injectglist(gp.schedlink.ptr())
2392 casgstatus(gp, _Gwaiting, _Grunnable)
2393 if trace.enabled {
2394 traceGoUnpark(gp, 0)
2396 return gp, false
2398 injectglist(gp)
2401 stopm()
2402 goto top
2405 // pollWork returns true if there is non-background work this P could
2406 // be doing. This is a fairly lightweight check to be used for
2407 // background work loops, like idle GC. It checks a subset of the
2408 // conditions checked by the actual scheduler.
2409 func pollWork() bool {
2410 if sched.runqsize != 0 {
2411 return true
2413 p := getg().m.p.ptr()
2414 if !runqempty(p) {
2415 return true
2417 if netpollinited() && atomic.Load(&netpollWaiters) > 0 && sched.lastpoll != 0 {
2418 if gp := netpoll(false); gp != nil {
2419 injectglist(gp)
2420 return true
2423 return false
2426 func resetspinning() {
2427 _g_ := getg()
2428 if !_g_.m.spinning {
2429 throw("resetspinning: not a spinning m")
2431 _g_.m.spinning = false
2432 nmspinning := atomic.Xadd(&sched.nmspinning, -1)
2433 if int32(nmspinning) < 0 {
2434 throw("findrunnable: negative nmspinning")
2436 // M wakeup policy is deliberately somewhat conservative, so check if we
2437 // need to wakeup another P here. See "Worker thread parking/unparking"
2438 // comment at the top of the file for details.
2439 if nmspinning == 0 && atomic.Load(&sched.npidle) > 0 {
2440 wakep()
2444 // Injects the list of runnable G's into the scheduler.
2445 // Can run concurrently with GC.
2446 func injectglist(glist *g) {
2447 if glist == nil {
2448 return
2450 if trace.enabled {
2451 for gp := glist; gp != nil; gp = gp.schedlink.ptr() {
2452 traceGoUnpark(gp, 0)
2455 lock(&sched.lock)
2456 var n int
2457 for n = 0; glist != nil; n++ {
2458 gp := glist
2459 glist = gp.schedlink.ptr()
2460 casgstatus(gp, _Gwaiting, _Grunnable)
2461 globrunqput(gp)
2463 unlock(&sched.lock)
2464 for ; n != 0 && sched.npidle != 0; n-- {
2465 startm(nil, false)
2469 // One round of scheduler: find a runnable goroutine and execute it.
2470 // Never returns.
2471 func schedule() {
2472 _g_ := getg()
2474 if _g_.m.locks != 0 {
2475 throw("schedule: holding locks")
2478 if _g_.m.lockedg != 0 {
2479 stoplockedm()
2480 execute(_g_.m.lockedg.ptr(), false) // Never returns.
2483 // We should not schedule away from a g that is executing a cgo call,
2484 // since the cgo call is using the m's g0 stack.
2485 if _g_.m.incgo {
2486 throw("schedule: in cgo")
2489 top:
2490 if sched.gcwaiting != 0 {
2491 gcstopm()
2492 goto top
2494 if _g_.m.p.ptr().runSafePointFn != 0 {
2495 runSafePointFn()
2498 var gp *g
2499 var inheritTime bool
2500 if trace.enabled || trace.shutdown {
2501 gp = traceReader()
2502 if gp != nil {
2503 casgstatus(gp, _Gwaiting, _Grunnable)
2504 traceGoUnpark(gp, 0)
2507 if gp == nil && gcBlackenEnabled != 0 {
2508 gp = gcController.findRunnableGCWorker(_g_.m.p.ptr())
2510 if gp == nil {
2511 // Check the global runnable queue once in a while to ensure fairness.
2512 // Otherwise two goroutines can completely occupy the local runqueue
2513 // by constantly respawning each other.
2514 if _g_.m.p.ptr().schedtick%61 == 0 && sched.runqsize > 0 {
2515 lock(&sched.lock)
2516 gp = globrunqget(_g_.m.p.ptr(), 1)
2517 unlock(&sched.lock)
2520 if gp == nil {
2521 gp, inheritTime = runqget(_g_.m.p.ptr())
2522 if gp != nil && _g_.m.spinning {
2523 throw("schedule: spinning with local work")
2526 // Because gccgo does not implement preemption as a stack check,
2527 // we need to check for preemption here for fairness.
2528 // Otherwise goroutines on the local queue may starve
2529 // goroutines on the global queue.
2530 // Since we preempt by storing the goroutine on the global
2531 // queue, this is the only place we need to check preempt.
2532 // This does not call checkPreempt because gp is not running.
2533 if gp != nil && gp.preempt {
2534 gp.preempt = false
2535 lock(&sched.lock)
2536 globrunqput(gp)
2537 unlock(&sched.lock)
2538 goto top
2541 if gp == nil {
2542 gp, inheritTime = findrunnable() // blocks until work is available
2545 // This thread is going to run a goroutine and is not spinning anymore,
2546 // so if it was marked as spinning we need to reset it now and potentially
2547 // start a new spinning M.
2548 if _g_.m.spinning {
2549 resetspinning()
2552 if gp.lockedm != 0 {
2553 // Hands off own p to the locked m,
2554 // then blocks waiting for a new p.
2555 startlockedm(gp)
2556 goto top
2559 execute(gp, inheritTime)
2562 // dropg removes the association between m and the current goroutine m->curg (gp for short).
2563 // Typically a caller sets gp's status away from Grunning and then
2564 // immediately calls dropg to finish the job. The caller is also responsible
2565 // for arranging that gp will be restarted using ready at an
2566 // appropriate time. After calling dropg and arranging for gp to be
2567 // readied later, the caller can do other work but eventually should
2568 // call schedule to restart the scheduling of goroutines on this m.
2569 func dropg() {
2570 _g_ := getg()
2572 setMNoWB(&_g_.m.curg.m, nil)
2573 setGNoWB(&_g_.m.curg, nil)
2576 func parkunlock_c(gp *g, lock unsafe.Pointer) bool {
2577 unlock((*mutex)(lock))
2578 return true
2581 // park continuation on g0.
2582 func park_m(gp *g) {
2583 _g_ := getg()
2585 if trace.enabled {
2586 traceGoPark(_g_.m.waittraceev, _g_.m.waittraceskip)
2589 casgstatus(gp, _Grunning, _Gwaiting)
2590 dropg()
2592 if _g_.m.waitunlockf != nil {
2593 fn := *(*func(*g, unsafe.Pointer) bool)(unsafe.Pointer(&_g_.m.waitunlockf))
2594 ok := fn(gp, _g_.m.waitlock)
2595 _g_.m.waitunlockf = nil
2596 _g_.m.waitlock = nil
2597 if !ok {
2598 if trace.enabled {
2599 traceGoUnpark(gp, 2)
2601 casgstatus(gp, _Gwaiting, _Grunnable)
2602 execute(gp, true) // Schedule it back, never returns.
2605 schedule()
2608 func goschedImpl(gp *g) {
2609 status := readgstatus(gp)
2610 if status&^_Gscan != _Grunning {
2611 dumpgstatus(gp)
2612 throw("bad g status")
2614 casgstatus(gp, _Grunning, _Grunnable)
2615 dropg()
2616 lock(&sched.lock)
2617 globrunqput(gp)
2618 unlock(&sched.lock)
2620 schedule()
2623 // Gosched continuation on g0.
2624 func gosched_m(gp *g) {
2625 if trace.enabled {
2626 traceGoSched()
2628 goschedImpl(gp)
2631 // goschedguarded is a forbidden-states-avoided version of gosched_m
2632 func goschedguarded_m(gp *g) {
2634 if gp.m.locks != 0 || gp.m.mallocing != 0 || gp.m.preemptoff != "" || gp.m.p.ptr().status != _Prunning {
2635 gogo(gp) // never return
2638 if trace.enabled {
2639 traceGoSched()
2641 goschedImpl(gp)
2644 func gopreempt_m(gp *g) {
2645 if trace.enabled {
2646 traceGoPreempt()
2648 goschedImpl(gp)
2651 // Finishes execution of the current goroutine.
2652 func goexit1() {
2653 if trace.enabled {
2654 traceGoEnd()
2656 mcall(goexit0)
2659 // goexit continuation on g0.
2660 func goexit0(gp *g) {
2661 _g_ := getg()
2663 casgstatus(gp, _Grunning, _Gdead)
2664 if isSystemGoroutine(gp) {
2665 atomic.Xadd(&sched.ngsys, -1)
2666 gp.isSystemGoroutine = false
2668 gp.m = nil
2669 locked := gp.lockedm != 0
2670 gp.lockedm = 0
2671 _g_.m.lockedg = 0
2672 gp.entry = nil
2673 gp.paniconfault = false
2674 gp._defer = nil // should be true already but just in case.
2675 gp._panic = nil // non-nil for Goexit during panic. points at stack-allocated data.
2676 gp.writebuf = nil
2677 gp.waitreason = 0
2678 gp.param = nil
2679 gp.labels = nil
2680 gp.timer = nil
2682 if gcBlackenEnabled != 0 && gp.gcAssistBytes > 0 {
2683 // Flush assist credit to the global pool. This gives
2684 // better information to pacing if the application is
2685 // rapidly creating an exiting goroutines.
2686 scanCredit := int64(gcController.assistWorkPerByte * float64(gp.gcAssistBytes))
2687 atomic.Xaddint64(&gcController.bgScanCredit, scanCredit)
2688 gp.gcAssistBytes = 0
2691 // Note that gp's stack scan is now "valid" because it has no
2692 // stack.
2693 gp.gcscanvalid = true
2694 dropg()
2696 if GOARCH == "wasm" { // no threads yet on wasm
2697 gfput(_g_.m.p.ptr(), gp)
2698 schedule() // never returns
2701 if _g_.m.lockedInt != 0 {
2702 print("invalid m->lockedInt = ", _g_.m.lockedInt, "\n")
2703 throw("internal lockOSThread error")
2705 _g_.m.lockedExt = 0
2706 gfput(_g_.m.p.ptr(), gp)
2707 if locked {
2708 // The goroutine may have locked this thread because
2709 // it put it in an unusual kernel state. Kill it
2710 // rather than returning it to the thread pool.
2712 // Return to mstart, which will release the P and exit
2713 // the thread.
2714 if GOOS != "plan9" { // See golang.org/issue/22227.
2715 _g_.m.exiting = true
2716 gogo(_g_.m.g0)
2719 schedule()
2722 // The goroutine g is about to enter a system call.
2723 // Record that it's not using the cpu anymore.
2724 // This is called only from the go syscall library and cgocall,
2725 // not from the low-level system calls used by the runtime.
2727 // The entersyscall function is written in C, so that it can save the
2728 // current register context so that the GC will see them.
2729 // It calls reentersyscall.
2731 // Syscall tracing:
2732 // At the start of a syscall we emit traceGoSysCall to capture the stack trace.
2733 // If the syscall does not block, that is it, we do not emit any other events.
2734 // If the syscall blocks (that is, P is retaken), retaker emits traceGoSysBlock;
2735 // when syscall returns we emit traceGoSysExit and when the goroutine starts running
2736 // (potentially instantly, if exitsyscallfast returns true) we emit traceGoStart.
2737 // To ensure that traceGoSysExit is emitted strictly after traceGoSysBlock,
2738 // we remember current value of syscalltick in m (_g_.m.syscalltick = _g_.m.p.ptr().syscalltick),
2739 // whoever emits traceGoSysBlock increments p.syscalltick afterwards;
2740 // and we wait for the increment before emitting traceGoSysExit.
2741 // Note that the increment is done even if tracing is not enabled,
2742 // because tracing can be enabled in the middle of syscall. We don't want the wait to hang.
2744 //go:nosplit
2745 //go:noinline
2746 func reentersyscall(pc, sp uintptr) {
2747 _g_ := getg()
2749 // Disable preemption because during this function g is in Gsyscall status,
2750 // but can have inconsistent g->sched, do not let GC observe it.
2751 _g_.m.locks++
2753 _g_.syscallsp = sp
2754 _g_.syscallpc = pc
2755 casgstatus(_g_, _Grunning, _Gsyscall)
2757 if trace.enabled {
2758 systemstack(traceGoSysCall)
2761 if atomic.Load(&sched.sysmonwait) != 0 {
2762 systemstack(entersyscall_sysmon)
2765 if _g_.m.p.ptr().runSafePointFn != 0 {
2766 // runSafePointFn may stack split if run on this stack
2767 systemstack(runSafePointFn)
2770 _g_.m.syscalltick = _g_.m.p.ptr().syscalltick
2771 _g_.sysblocktraced = true
2772 _g_.m.mcache = nil
2773 _g_.m.p.ptr().m = 0
2774 atomic.Store(&_g_.m.p.ptr().status, _Psyscall)
2775 if sched.gcwaiting != 0 {
2776 systemstack(entersyscall_gcwait)
2779 _g_.m.locks--
2782 func entersyscall_sysmon() {
2783 lock(&sched.lock)
2784 if atomic.Load(&sched.sysmonwait) != 0 {
2785 atomic.Store(&sched.sysmonwait, 0)
2786 notewakeup(&sched.sysmonnote)
2788 unlock(&sched.lock)
2791 func entersyscall_gcwait() {
2792 _g_ := getg()
2793 _p_ := _g_.m.p.ptr()
2795 lock(&sched.lock)
2796 if sched.stopwait > 0 && atomic.Cas(&_p_.status, _Psyscall, _Pgcstop) {
2797 if trace.enabled {
2798 traceGoSysBlock(_p_)
2799 traceProcStop(_p_)
2801 _p_.syscalltick++
2802 if sched.stopwait--; sched.stopwait == 0 {
2803 notewakeup(&sched.stopnote)
2806 unlock(&sched.lock)
2809 func reentersyscallblock(pc, sp uintptr) {
2810 _g_ := getg()
2812 _g_.m.locks++ // see comment in entersyscall
2813 _g_.throwsplit = true
2814 _g_.m.syscalltick = _g_.m.p.ptr().syscalltick
2815 _g_.sysblocktraced = true
2816 _g_.m.p.ptr().syscalltick++
2818 // Leave SP around for GC and traceback.
2819 _g_.syscallsp = sp
2820 _g_.syscallpc = pc
2821 casgstatus(_g_, _Grunning, _Gsyscall)
2822 systemstack(entersyscallblock_handoff)
2824 _g_.m.locks--
2827 func entersyscallblock_handoff() {
2828 if trace.enabled {
2829 traceGoSysCall()
2830 traceGoSysBlock(getg().m.p.ptr())
2832 handoffp(releasep())
2835 // The goroutine g exited its system call.
2836 // Arrange for it to run on a cpu again.
2837 // This is called only from the go syscall library, not
2838 // from the low-level system calls used by the runtime.
2840 // Write barriers are not allowed because our P may have been stolen.
2842 //go:nosplit
2843 //go:nowritebarrierrec
2844 func exitsyscall() {
2845 _g_ := getg()
2847 _g_.m.locks++ // see comment in entersyscall
2849 _g_.waitsince = 0
2850 oldp := _g_.m.p.ptr()
2851 if exitsyscallfast() {
2852 if _g_.m.mcache == nil {
2853 throw("lost mcache")
2855 if trace.enabled {
2856 if oldp != _g_.m.p.ptr() || _g_.m.syscalltick != _g_.m.p.ptr().syscalltick {
2857 systemstack(traceGoStart)
2860 // There's a cpu for us, so we can run.
2861 _g_.m.p.ptr().syscalltick++
2862 // We need to cas the status and scan before resuming...
2863 casgstatus(_g_, _Gsyscall, _Grunning)
2865 exitsyscallclear(_g_)
2866 _g_.m.locks--
2867 _g_.throwsplit = false
2869 // Check preemption, since unlike gc we don't check on
2870 // every call.
2871 if getg().preempt {
2872 checkPreempt()
2875 return
2878 _g_.sysexitticks = 0
2879 if trace.enabled {
2880 // Wait till traceGoSysBlock event is emitted.
2881 // This ensures consistency of the trace (the goroutine is started after it is blocked).
2882 for oldp != nil && oldp.syscalltick == _g_.m.syscalltick {
2883 osyield()
2885 // We can't trace syscall exit right now because we don't have a P.
2886 // Tracing code can invoke write barriers that cannot run without a P.
2887 // So instead we remember the syscall exit time and emit the event
2888 // in execute when we have a P.
2889 _g_.sysexitticks = cputicks()
2892 _g_.m.locks--
2894 // Call the scheduler.
2895 mcall(exitsyscall0)
2897 if _g_.m.mcache == nil {
2898 throw("lost mcache")
2901 // Scheduler returned, so we're allowed to run now.
2902 // Delete the syscallsp information that we left for
2903 // the garbage collector during the system call.
2904 // Must wait until now because until gosched returns
2905 // we don't know for sure that the garbage collector
2906 // is not running.
2907 exitsyscallclear(_g_)
2909 _g_.m.p.ptr().syscalltick++
2910 _g_.throwsplit = false
2913 //go:nosplit
2914 func exitsyscallfast() bool {
2915 _g_ := getg()
2917 // Freezetheworld sets stopwait but does not retake P's.
2918 if sched.stopwait == freezeStopWait {
2919 _g_.m.mcache = nil
2920 _g_.m.p = 0
2921 return false
2924 // Try to re-acquire the last P.
2925 if _g_.m.p != 0 && _g_.m.p.ptr().status == _Psyscall && atomic.Cas(&_g_.m.p.ptr().status, _Psyscall, _Prunning) {
2926 // There's a cpu for us, so we can run.
2927 exitsyscallfast_reacquired()
2928 return true
2931 // Try to get any other idle P.
2932 oldp := _g_.m.p.ptr()
2933 _g_.m.mcache = nil
2934 _g_.m.p = 0
2935 if sched.pidle != 0 {
2936 var ok bool
2937 systemstack(func() {
2938 ok = exitsyscallfast_pidle()
2939 if ok && trace.enabled {
2940 if oldp != nil {
2941 // Wait till traceGoSysBlock event is emitted.
2942 // This ensures consistency of the trace (the goroutine is started after it is blocked).
2943 for oldp.syscalltick == _g_.m.syscalltick {
2944 osyield()
2947 traceGoSysExit(0)
2950 if ok {
2951 return true
2954 return false
2957 // exitsyscallfast_reacquired is the exitsyscall path on which this G
2958 // has successfully reacquired the P it was running on before the
2959 // syscall.
2961 // This function is allowed to have write barriers because exitsyscall
2962 // has acquired a P at this point.
2964 //go:yeswritebarrierrec
2965 //go:nosplit
2966 func exitsyscallfast_reacquired() {
2967 _g_ := getg()
2968 _g_.m.mcache = _g_.m.p.ptr().mcache
2969 _g_.m.p.ptr().m.set(_g_.m)
2970 if _g_.m.syscalltick != _g_.m.p.ptr().syscalltick {
2971 if trace.enabled {
2972 // The p was retaken and then enter into syscall again (since _g_.m.syscalltick has changed).
2973 // traceGoSysBlock for this syscall was already emitted,
2974 // but here we effectively retake the p from the new syscall running on the same p.
2975 systemstack(func() {
2976 // Denote blocking of the new syscall.
2977 traceGoSysBlock(_g_.m.p.ptr())
2978 // Denote completion of the current syscall.
2979 traceGoSysExit(0)
2982 _g_.m.p.ptr().syscalltick++
2986 func exitsyscallfast_pidle() bool {
2987 lock(&sched.lock)
2988 _p_ := pidleget()
2989 if _p_ != nil && atomic.Load(&sched.sysmonwait) != 0 {
2990 atomic.Store(&sched.sysmonwait, 0)
2991 notewakeup(&sched.sysmonnote)
2993 unlock(&sched.lock)
2994 if _p_ != nil {
2995 acquirep(_p_)
2996 return true
2998 return false
3001 // exitsyscall slow path on g0.
3002 // Failed to acquire P, enqueue gp as runnable.
3004 //go:nowritebarrierrec
3005 func exitsyscall0(gp *g) {
3006 _g_ := getg()
3008 casgstatus(gp, _Gsyscall, _Grunnable)
3009 dropg()
3010 lock(&sched.lock)
3011 _p_ := pidleget()
3012 if _p_ == nil {
3013 globrunqput(gp)
3014 } else if atomic.Load(&sched.sysmonwait) != 0 {
3015 atomic.Store(&sched.sysmonwait, 0)
3016 notewakeup(&sched.sysmonnote)
3018 unlock(&sched.lock)
3019 if _p_ != nil {
3020 acquirep(_p_)
3021 execute(gp, false) // Never returns.
3023 if _g_.m.lockedg != 0 {
3024 // Wait until another thread schedules gp and so m again.
3025 stoplockedm()
3026 execute(gp, false) // Never returns.
3028 stopm()
3029 schedule() // Never returns.
3032 // exitsyscallclear clears GC-related information that we only track
3033 // during a syscall.
3034 func exitsyscallclear(gp *g) {
3035 // Garbage collector isn't running (since we are), so okay to
3036 // clear syscallsp.
3037 gp.syscallsp = 0
3039 gp.gcstack = 0
3040 gp.gcnextsp = 0
3041 memclrNoHeapPointers(unsafe.Pointer(&gp.gcregs), unsafe.Sizeof(gp.gcregs))
3044 // Code generated by cgo, and some library code, calls syscall.Entersyscall
3045 // and syscall.Exitsyscall.
3047 //go:linkname syscall_entersyscall syscall.Entersyscall
3048 //go:nosplit
3049 func syscall_entersyscall() {
3050 entersyscall()
3053 //go:linkname syscall_exitsyscall syscall.Exitsyscall
3054 //go:nosplit
3055 func syscall_exitsyscall() {
3056 exitsyscall()
3059 func beforefork() {
3060 gp := getg().m.curg
3062 // Block signals during a fork, so that the child does not run
3063 // a signal handler before exec if a signal is sent to the process
3064 // group. See issue #18600.
3065 gp.m.locks++
3066 msigsave(gp.m)
3067 sigblock()
3070 // Called from syscall package before fork.
3071 //go:linkname syscall_runtime_BeforeFork syscall.runtime_BeforeFork
3072 //go:nosplit
3073 func syscall_runtime_BeforeFork() {
3074 systemstack(beforefork)
3077 func afterfork() {
3078 gp := getg().m.curg
3080 msigrestore(gp.m.sigmask)
3082 gp.m.locks--
3085 // Called from syscall package after fork in parent.
3086 //go:linkname syscall_runtime_AfterFork syscall.runtime_AfterFork
3087 //go:nosplit
3088 func syscall_runtime_AfterFork() {
3089 systemstack(afterfork)
3092 // inForkedChild is true while manipulating signals in the child process.
3093 // This is used to avoid calling libc functions in case we are using vfork.
3094 var inForkedChild bool
3096 // Called from syscall package after fork in child.
3097 // It resets non-sigignored signals to the default handler, and
3098 // restores the signal mask in preparation for the exec.
3100 // Because this might be called during a vfork, and therefore may be
3101 // temporarily sharing address space with the parent process, this must
3102 // not change any global variables or calling into C code that may do so.
3104 //go:linkname syscall_runtime_AfterForkInChild syscall.runtime_AfterForkInChild
3105 //go:nosplit
3106 //go:nowritebarrierrec
3107 func syscall_runtime_AfterForkInChild() {
3108 // It's OK to change the global variable inForkedChild here
3109 // because we are going to change it back. There is no race here,
3110 // because if we are sharing address space with the parent process,
3111 // then the parent process can not be running concurrently.
3112 inForkedChild = true
3114 clearSignalHandlers()
3116 // When we are the child we are the only thread running,
3117 // so we know that nothing else has changed gp.m.sigmask.
3118 msigrestore(getg().m.sigmask)
3120 inForkedChild = false
3123 // Called from syscall package before Exec.
3124 //go:linkname syscall_runtime_BeforeExec syscall.runtime_BeforeExec
3125 func syscall_runtime_BeforeExec() {
3126 // Prevent thread creation during exec.
3127 execLock.lock()
3130 // Called from syscall package after Exec.
3131 //go:linkname syscall_runtime_AfterExec syscall.runtime_AfterExec
3132 func syscall_runtime_AfterExec() {
3133 execLock.unlock()
3136 // Create a new g running fn passing arg as the single argument.
3137 // Put it on the queue of g's waiting to run.
3138 // The compiler turns a go statement into a call to this.
3139 //go:linkname newproc __go_go
3140 func newproc(fn uintptr, arg unsafe.Pointer) *g {
3141 _g_ := getg()
3143 if fn == 0 {
3144 _g_.m.throwing = -1 // do not dump full stacks
3145 throw("go of nil func value")
3147 _g_.m.locks++ // disable preemption because it can be holding p in a local var
3149 _p_ := _g_.m.p.ptr()
3150 newg := gfget(_p_)
3151 var (
3152 sp unsafe.Pointer
3153 spsize uintptr
3155 if newg == nil {
3156 newg = malg(true, false, &sp, &spsize)
3157 casgstatus(newg, _Gidle, _Gdead)
3158 allgadd(newg) // publishes with a g->status of Gdead so GC scanner doesn't look at uninitialized stack.
3159 } else {
3160 resetNewG(newg, &sp, &spsize)
3162 newg.traceback = 0
3164 if readgstatus(newg) != _Gdead {
3165 throw("newproc1: new g is not Gdead")
3168 // Store the C function pointer into entryfn, take the address
3169 // of entryfn, convert it to a Go function value, and store
3170 // that in entry.
3171 newg.entryfn = fn
3172 var entry func(unsafe.Pointer)
3173 *(*unsafe.Pointer)(unsafe.Pointer(&entry)) = unsafe.Pointer(&newg.entryfn)
3174 newg.entry = entry
3176 newg.param = arg
3177 newg.gopc = getcallerpc()
3178 newg.startpc = fn
3179 if _g_.m.curg != nil {
3180 newg.labels = _g_.m.curg.labels
3182 if isSystemGoroutine(newg) {
3183 atomic.Xadd(&sched.ngsys, +1)
3185 newg.gcscanvalid = false
3186 casgstatus(newg, _Gdead, _Grunnable)
3188 if _p_.goidcache == _p_.goidcacheend {
3189 // Sched.goidgen is the last allocated id,
3190 // this batch must be [sched.goidgen+1, sched.goidgen+GoidCacheBatch].
3191 // At startup sched.goidgen=0, so main goroutine receives goid=1.
3192 _p_.goidcache = atomic.Xadd64(&sched.goidgen, _GoidCacheBatch)
3193 _p_.goidcache -= _GoidCacheBatch - 1
3194 _p_.goidcacheend = _p_.goidcache + _GoidCacheBatch
3196 newg.goid = int64(_p_.goidcache)
3197 _p_.goidcache++
3198 if trace.enabled {
3199 traceGoCreate(newg, newg.startpc)
3202 makeGContext(newg, sp, spsize)
3204 runqput(_p_, newg, true)
3206 if atomic.Load(&sched.npidle) != 0 && atomic.Load(&sched.nmspinning) == 0 && mainStarted {
3207 wakep()
3209 _g_.m.locks--
3210 return newg
3213 // expectedSystemGoroutines counts the number of goroutines expected
3214 // to mark themselves as system goroutines. After they mark themselves
3215 // by calling setSystemGoroutine, this is decremented. NumGoroutines
3216 // uses this to wait for all system goroutines to mark themselves
3217 // before it counts them.
3218 var expectedSystemGoroutines uint32
3220 // expectSystemGoroutine is called when starting a goroutine that will
3221 // call setSystemGoroutine. It increments expectedSystemGoroutines.
3222 func expectSystemGoroutine() {
3223 atomic.Xadd(&expectedSystemGoroutines, +1)
3226 // waitForSystemGoroutines waits for all currently expected system
3227 // goroutines to register themselves.
3228 func waitForSystemGoroutines() {
3229 for atomic.Load(&expectedSystemGoroutines) > 0 {
3230 Gosched()
3231 osyield()
3235 // setSystemGoroutine marks this goroutine as a "system goroutine".
3236 // In the gc toolchain this is done by comparing startpc to a list of
3237 // saved special PCs. In gccgo that approach does not work as startpc
3238 // is often a thunk that invokes the real function with arguments,
3239 // so the thunk address never matches the saved special PCs. Instead,
3240 // since there are only a limited number of "system goroutines",
3241 // we force each one to mark itself as special.
3242 func setSystemGoroutine() {
3243 getg().isSystemGoroutine = true
3244 atomic.Xadd(&sched.ngsys, +1)
3245 atomic.Xadd(&expectedSystemGoroutines, -1)
3248 // saveAncestors copies previous ancestors of the given caller g and
3249 // includes infor for the current caller into a new set of tracebacks for
3250 // a g being created.
3251 func saveAncestors(callergp *g) *[]ancestorInfo {
3252 // Copy all prior info, except for the root goroutine (goid 0).
3253 if debug.tracebackancestors <= 0 || callergp.goid == 0 {
3254 return nil
3256 var callerAncestors []ancestorInfo
3257 if callergp.ancestors != nil {
3258 callerAncestors = *callergp.ancestors
3260 n := int32(len(callerAncestors)) + 1
3261 if n > debug.tracebackancestors {
3262 n = debug.tracebackancestors
3264 ancestors := make([]ancestorInfo, n)
3265 copy(ancestors[1:], callerAncestors)
3267 var pcs [_TracebackMaxFrames]uintptr
3268 // FIXME: This should get a traceback of callergp.
3269 // npcs := gcallers(callergp, 0, pcs[:])
3270 npcs := 0
3271 ipcs := make([]uintptr, npcs)
3272 copy(ipcs, pcs[:])
3273 ancestors[0] = ancestorInfo{
3274 pcs: ipcs,
3275 goid: callergp.goid,
3276 gopc: callergp.gopc,
3279 ancestorsp := new([]ancestorInfo)
3280 *ancestorsp = ancestors
3281 return ancestorsp
3284 // Put on gfree list.
3285 // If local list is too long, transfer a batch to the global list.
3286 func gfput(_p_ *p, gp *g) {
3287 if readgstatus(gp) != _Gdead {
3288 throw("gfput: bad status (not Gdead)")
3291 gp.schedlink.set(_p_.gfree)
3292 _p_.gfree = gp
3293 _p_.gfreecnt++
3294 if _p_.gfreecnt >= 64 {
3295 lock(&sched.gflock)
3296 for _p_.gfreecnt >= 32 {
3297 _p_.gfreecnt--
3298 gp = _p_.gfree
3299 _p_.gfree = gp.schedlink.ptr()
3300 gp.schedlink.set(sched.gfree)
3301 sched.gfree = gp
3302 sched.ngfree++
3304 unlock(&sched.gflock)
3308 // Get from gfree list.
3309 // If local list is empty, grab a batch from global list.
3310 func gfget(_p_ *p) *g {
3311 retry:
3312 gp := _p_.gfree
3313 if gp == nil && sched.gfree != nil {
3314 lock(&sched.gflock)
3315 for _p_.gfreecnt < 32 {
3316 if sched.gfree != nil {
3317 gp = sched.gfree
3318 sched.gfree = gp.schedlink.ptr()
3319 } else {
3320 break
3322 _p_.gfreecnt++
3323 sched.ngfree--
3324 gp.schedlink.set(_p_.gfree)
3325 _p_.gfree = gp
3327 unlock(&sched.gflock)
3328 goto retry
3330 if gp != nil {
3331 _p_.gfree = gp.schedlink.ptr()
3332 _p_.gfreecnt--
3334 return gp
3337 // Purge all cached G's from gfree list to the global list.
3338 func gfpurge(_p_ *p) {
3339 lock(&sched.gflock)
3340 for _p_.gfreecnt != 0 {
3341 _p_.gfreecnt--
3342 gp := _p_.gfree
3343 _p_.gfree = gp.schedlink.ptr()
3344 gp.schedlink.set(sched.gfree)
3345 sched.gfree = gp
3346 sched.ngfree++
3348 unlock(&sched.gflock)
3351 // Breakpoint executes a breakpoint trap.
3352 func Breakpoint() {
3353 breakpoint()
3356 // dolockOSThread is called by LockOSThread and lockOSThread below
3357 // after they modify m.locked. Do not allow preemption during this call,
3358 // or else the m might be different in this function than in the caller.
3359 //go:nosplit
3360 func dolockOSThread() {
3361 if GOARCH == "wasm" {
3362 return // no threads on wasm yet
3364 _g_ := getg()
3365 _g_.m.lockedg.set(_g_)
3366 _g_.lockedm.set(_g_.m)
3369 //go:nosplit
3371 // LockOSThread wires the calling goroutine to its current operating system thread.
3372 // The calling goroutine will always execute in that thread,
3373 // and no other goroutine will execute in it,
3374 // until the calling goroutine has made as many calls to
3375 // UnlockOSThread as to LockOSThread.
3376 // If the calling goroutine exits without unlocking the thread,
3377 // the thread will be terminated.
3379 // All init functions are run on the startup thread. Calling LockOSThread
3380 // from an init function will cause the main function to be invoked on
3381 // that thread.
3383 // A goroutine should call LockOSThread before calling OS services or
3384 // non-Go library functions that depend on per-thread state.
3385 func LockOSThread() {
3386 if atomic.Load(&newmHandoff.haveTemplateThread) == 0 && GOOS != "plan9" {
3387 // If we need to start a new thread from the locked
3388 // thread, we need the template thread. Start it now
3389 // while we're in a known-good state.
3390 startTemplateThread()
3392 _g_ := getg()
3393 _g_.m.lockedExt++
3394 if _g_.m.lockedExt == 0 {
3395 _g_.m.lockedExt--
3396 panic("LockOSThread nesting overflow")
3398 dolockOSThread()
3401 //go:nosplit
3402 func lockOSThread() {
3403 getg().m.lockedInt++
3404 dolockOSThread()
3407 // dounlockOSThread is called by UnlockOSThread and unlockOSThread below
3408 // after they update m->locked. Do not allow preemption during this call,
3409 // or else the m might be in different in this function than in the caller.
3410 //go:nosplit
3411 func dounlockOSThread() {
3412 if GOARCH == "wasm" {
3413 return // no threads on wasm yet
3415 _g_ := getg()
3416 if _g_.m.lockedInt != 0 || _g_.m.lockedExt != 0 {
3417 return
3419 _g_.m.lockedg = 0
3420 _g_.lockedm = 0
3423 //go:nosplit
3425 // UnlockOSThread undoes an earlier call to LockOSThread.
3426 // If this drops the number of active LockOSThread calls on the
3427 // calling goroutine to zero, it unwires the calling goroutine from
3428 // its fixed operating system thread.
3429 // If there are no active LockOSThread calls, this is a no-op.
3431 // Before calling UnlockOSThread, the caller must ensure that the OS
3432 // thread is suitable for running other goroutines. If the caller made
3433 // any permanent changes to the state of the thread that would affect
3434 // other goroutines, it should not call this function and thus leave
3435 // the goroutine locked to the OS thread until the goroutine (and
3436 // hence the thread) exits.
3437 func UnlockOSThread() {
3438 _g_ := getg()
3439 if _g_.m.lockedExt == 0 {
3440 return
3442 _g_.m.lockedExt--
3443 dounlockOSThread()
3446 //go:nosplit
3447 func unlockOSThread() {
3448 _g_ := getg()
3449 if _g_.m.lockedInt == 0 {
3450 systemstack(badunlockosthread)
3452 _g_.m.lockedInt--
3453 dounlockOSThread()
3456 func badunlockosthread() {
3457 throw("runtime: internal error: misuse of lockOSThread/unlockOSThread")
3460 func gcount() int32 {
3461 n := int32(allglen) - sched.ngfree - int32(atomic.Load(&sched.ngsys))
3462 for _, _p_ := range allp {
3463 n -= _p_.gfreecnt
3466 // All these variables can be changed concurrently, so the result can be inconsistent.
3467 // But at least the current goroutine is running.
3468 if n < 1 {
3469 n = 1
3471 return n
3474 func mcount() int32 {
3475 return int32(sched.mnext - sched.nmfreed)
3478 var prof struct {
3479 signalLock uint32
3480 hz int32
3483 func _System() { _System() }
3484 func _ExternalCode() { _ExternalCode() }
3485 func _LostExternalCode() { _LostExternalCode() }
3486 func _GC() { _GC() }
3487 func _LostSIGPROFDuringAtomic64() { _LostSIGPROFDuringAtomic64() }
3488 func _VDSO() { _VDSO() }
3490 // Counts SIGPROFs received while in atomic64 critical section, on mips{,le}
3491 var lostAtomic64Count uint64
3493 var _SystemPC = funcPC(_System)
3494 var _ExternalCodePC = funcPC(_ExternalCode)
3495 var _LostExternalCodePC = funcPC(_LostExternalCode)
3496 var _GCPC = funcPC(_GC)
3497 var _LostSIGPROFDuringAtomic64PC = funcPC(_LostSIGPROFDuringAtomic64)
3499 // Called if we receive a SIGPROF signal.
3500 // Called by the signal handler, may run during STW.
3501 //go:nowritebarrierrec
3502 func sigprof(pc uintptr, gp *g, mp *m) {
3503 if prof.hz == 0 {
3504 return
3507 // Profiling runs concurrently with GC, so it must not allocate.
3508 // Set a trap in case the code does allocate.
3509 // Note that on windows, one thread takes profiles of all the
3510 // other threads, so mp is usually not getg().m.
3511 // In fact mp may not even be stopped.
3512 // See golang.org/issue/17165.
3513 getg().m.mallocing++
3515 traceback := true
3517 // If SIGPROF arrived while already fetching runtime callers
3518 // we can have trouble on older systems because the unwind
3519 // library calls dl_iterate_phdr which was not reentrant in
3520 // the past. alreadyInCallers checks for that.
3521 if gp == nil || alreadyInCallers() {
3522 traceback = false
3525 var stk [maxCPUProfStack]uintptr
3526 n := 0
3527 if traceback {
3528 var stklocs [maxCPUProfStack]location
3529 n = callers(0, stklocs[:])
3531 // Issue 26595: the stack trace we've just collected is going
3532 // to include frames that we don't want to report in the CPU
3533 // profile, including signal handler frames. Here is what we
3534 // might typically see at the point of "callers" above for a
3535 // signal delivered to the application routine "interesting"
3536 // called by "main".
3538 // 0: runtime.sigprof
3539 // 1: runtime.sighandler
3540 // 2: runtime.sigtrampgo
3541 // 3: runtime.sigtramp
3542 // 4: <signal handler called>
3543 // 5: main.interesting_routine
3544 // 6: main.main
3546 // To ensure a sane profile, walk through the frames in
3547 // "stklocs" until we find the "runtime.sigtramp" frame, then
3548 // report only those frames below the frame one down from
3549 // that. If for some reason "runtime.sigtramp" is not present,
3550 // don't make any changes.
3551 framesToDiscard := 0
3552 for i := 0; i < n; i++ {
3553 if stklocs[i].function == "runtime.sigtramp" && i+2 < n {
3554 framesToDiscard = i + 2
3555 n -= framesToDiscard
3556 break
3559 for i := 0; i < n; i++ {
3560 stk[i] = stklocs[i+framesToDiscard].pc
3564 if n <= 0 {
3565 // Normal traceback is impossible or has failed.
3566 // Account it against abstract "System" or "GC".
3567 n = 2
3568 stk[0] = pc
3569 if mp.preemptoff != "" || mp.helpgc != 0 {
3570 stk[1] = _GCPC + sys.PCQuantum
3571 } else {
3572 stk[1] = _SystemPC + sys.PCQuantum
3576 if prof.hz != 0 {
3577 if (GOARCH == "mips" || GOARCH == "mipsle" || GOARCH == "arm") && lostAtomic64Count > 0 {
3578 cpuprof.addLostAtomic64(lostAtomic64Count)
3579 lostAtomic64Count = 0
3581 cpuprof.add(gp, stk[:n])
3583 getg().m.mallocing--
3586 // Use global arrays rather than using up lots of stack space in the
3587 // signal handler. This is safe since while we are executing a SIGPROF
3588 // signal other SIGPROF signals are blocked.
3589 var nonprofGoStklocs [maxCPUProfStack]location
3590 var nonprofGoStk [maxCPUProfStack]uintptr
3592 // sigprofNonGo is called if we receive a SIGPROF signal on a non-Go thread,
3593 // and the signal handler collected a stack trace in sigprofCallers.
3594 // When this is called, sigprofCallersUse will be non-zero.
3595 // g is nil, and what we can do is very limited.
3596 //go:nosplit
3597 //go:nowritebarrierrec
3598 func sigprofNonGo(pc uintptr) {
3599 if prof.hz != 0 {
3600 n := callers(0, nonprofGoStklocs[:])
3602 for i := 0; i < n; i++ {
3603 nonprofGoStk[i] = nonprofGoStklocs[i].pc
3606 if n <= 0 {
3607 n = 2
3608 nonprofGoStk[0] = pc
3609 nonprofGoStk[1] = _ExternalCodePC + sys.PCQuantum
3612 cpuprof.addNonGo(nonprofGoStk[:n])
3616 // sigprofNonGoPC is called when a profiling signal arrived on a
3617 // non-Go thread and we have a single PC value, not a stack trace.
3618 // g is nil, and what we can do is very limited.
3619 //go:nosplit
3620 //go:nowritebarrierrec
3621 func sigprofNonGoPC(pc uintptr) {
3622 if prof.hz != 0 {
3623 stk := []uintptr{
3625 _ExternalCodePC + sys.PCQuantum,
3627 cpuprof.addNonGo(stk)
3631 // setcpuprofilerate sets the CPU profiling rate to hz times per second.
3632 // If hz <= 0, setcpuprofilerate turns off CPU profiling.
3633 func setcpuprofilerate(hz int32) {
3634 // Force sane arguments.
3635 if hz < 0 {
3636 hz = 0
3639 // Disable preemption, otherwise we can be rescheduled to another thread
3640 // that has profiling enabled.
3641 _g_ := getg()
3642 _g_.m.locks++
3644 // Stop profiler on this thread so that it is safe to lock prof.
3645 // if a profiling signal came in while we had prof locked,
3646 // it would deadlock.
3647 setThreadCPUProfiler(0)
3649 for !atomic.Cas(&prof.signalLock, 0, 1) {
3650 osyield()
3652 if prof.hz != hz {
3653 setProcessCPUProfiler(hz)
3654 prof.hz = hz
3656 atomic.Store(&prof.signalLock, 0)
3658 lock(&sched.lock)
3659 sched.profilehz = hz
3660 unlock(&sched.lock)
3662 if hz != 0 {
3663 setThreadCPUProfiler(hz)
3666 _g_.m.locks--
3669 // Change number of processors. The world is stopped, sched is locked.
3670 // gcworkbufs are not being modified by either the GC or
3671 // the write barrier code.
3672 // Returns list of Ps with local work, they need to be scheduled by the caller.
3673 func procresize(nprocs int32) *p {
3674 old := gomaxprocs
3675 if old < 0 || nprocs <= 0 {
3676 throw("procresize: invalid arg")
3678 if trace.enabled {
3679 traceGomaxprocs(nprocs)
3682 // update statistics
3683 now := nanotime()
3684 if sched.procresizetime != 0 {
3685 sched.totaltime += int64(old) * (now - sched.procresizetime)
3687 sched.procresizetime = now
3689 // Grow allp if necessary.
3690 if nprocs > int32(len(allp)) {
3691 // Synchronize with retake, which could be running
3692 // concurrently since it doesn't run on a P.
3693 lock(&allpLock)
3694 if nprocs <= int32(cap(allp)) {
3695 allp = allp[:nprocs]
3696 } else {
3697 nallp := make([]*p, nprocs)
3698 // Copy everything up to allp's cap so we
3699 // never lose old allocated Ps.
3700 copy(nallp, allp[:cap(allp)])
3701 allp = nallp
3703 unlock(&allpLock)
3706 // initialize new P's
3707 for i := int32(0); i < nprocs; i++ {
3708 pp := allp[i]
3709 if pp == nil {
3710 pp = new(p)
3711 pp.id = i
3712 pp.status = _Pgcstop
3713 pp.sudogcache = pp.sudogbuf[:0]
3714 pp.deferpool = pp.deferpoolbuf[:0]
3715 pp.wbBuf.reset()
3716 atomicstorep(unsafe.Pointer(&allp[i]), unsafe.Pointer(pp))
3718 if pp.mcache == nil {
3719 if old == 0 && i == 0 {
3720 if getg().m.mcache == nil {
3721 throw("missing mcache?")
3723 pp.mcache = getg().m.mcache // bootstrap
3724 } else {
3725 pp.mcache = allocmcache()
3730 // free unused P's
3731 for i := nprocs; i < old; i++ {
3732 p := allp[i]
3733 if trace.enabled && p == getg().m.p.ptr() {
3734 // moving to p[0], pretend that we were descheduled
3735 // and then scheduled again to keep the trace sane.
3736 traceGoSched()
3737 traceProcStop(p)
3739 // move all runnable goroutines to the global queue
3740 for p.runqhead != p.runqtail {
3741 // pop from tail of local queue
3742 p.runqtail--
3743 gp := p.runq[p.runqtail%uint32(len(p.runq))].ptr()
3744 // push onto head of global queue
3745 globrunqputhead(gp)
3747 if p.runnext != 0 {
3748 globrunqputhead(p.runnext.ptr())
3749 p.runnext = 0
3751 // if there's a background worker, make it runnable and put
3752 // it on the global queue so it can clean itself up
3753 if gp := p.gcBgMarkWorker.ptr(); gp != nil {
3754 casgstatus(gp, _Gwaiting, _Grunnable)
3755 if trace.enabled {
3756 traceGoUnpark(gp, 0)
3758 globrunqput(gp)
3759 // This assignment doesn't race because the
3760 // world is stopped.
3761 p.gcBgMarkWorker.set(nil)
3763 // Flush p's write barrier buffer.
3764 if gcphase != _GCoff {
3765 wbBufFlush1(p)
3766 p.gcw.dispose()
3768 for i := range p.sudogbuf {
3769 p.sudogbuf[i] = nil
3771 p.sudogcache = p.sudogbuf[:0]
3772 for i := range p.deferpoolbuf {
3773 p.deferpoolbuf[i] = nil
3775 p.deferpool = p.deferpoolbuf[:0]
3776 freemcache(p.mcache)
3777 p.mcache = nil
3778 gfpurge(p)
3779 traceProcFree(p)
3780 p.gcAssistTime = 0
3781 p.status = _Pdead
3782 // can't free P itself because it can be referenced by an M in syscall
3785 // Trim allp.
3786 if int32(len(allp)) != nprocs {
3787 lock(&allpLock)
3788 allp = allp[:nprocs]
3789 unlock(&allpLock)
3792 _g_ := getg()
3793 if _g_.m.p != 0 && _g_.m.p.ptr().id < nprocs {
3794 // continue to use the current P
3795 _g_.m.p.ptr().status = _Prunning
3796 } else {
3797 // release the current P and acquire allp[0]
3798 if _g_.m.p != 0 {
3799 _g_.m.p.ptr().m = 0
3801 _g_.m.p = 0
3802 _g_.m.mcache = nil
3803 p := allp[0]
3804 p.m = 0
3805 p.status = _Pidle
3806 acquirep(p)
3807 if trace.enabled {
3808 traceGoStart()
3811 var runnablePs *p
3812 for i := nprocs - 1; i >= 0; i-- {
3813 p := allp[i]
3814 if _g_.m.p.ptr() == p {
3815 continue
3817 p.status = _Pidle
3818 if runqempty(p) {
3819 pidleput(p)
3820 } else {
3821 p.m.set(mget())
3822 p.link.set(runnablePs)
3823 runnablePs = p
3826 stealOrder.reset(uint32(nprocs))
3827 var int32p *int32 = &gomaxprocs // make compiler check that gomaxprocs is an int32
3828 atomic.Store((*uint32)(unsafe.Pointer(int32p)), uint32(nprocs))
3829 return runnablePs
3832 // Associate p and the current m.
3834 // This function is allowed to have write barriers even if the caller
3835 // isn't because it immediately acquires _p_.
3837 //go:yeswritebarrierrec
3838 func acquirep(_p_ *p) {
3839 // Do the part that isn't allowed to have write barriers.
3840 acquirep1(_p_)
3842 // have p; write barriers now allowed
3843 _g_ := getg()
3844 _g_.m.mcache = _p_.mcache
3846 if trace.enabled {
3847 traceProcStart()
3851 // acquirep1 is the first step of acquirep, which actually acquires
3852 // _p_. This is broken out so we can disallow write barriers for this
3853 // part, since we don't yet have a P.
3855 //go:nowritebarrierrec
3856 func acquirep1(_p_ *p) {
3857 _g_ := getg()
3859 if _g_.m.p != 0 || _g_.m.mcache != nil {
3860 throw("acquirep: already in go")
3862 if _p_.m != 0 || _p_.status != _Pidle {
3863 id := int64(0)
3864 if _p_.m != 0 {
3865 id = _p_.m.ptr().id
3867 print("acquirep: p->m=", _p_.m, "(", id, ") p->status=", _p_.status, "\n")
3868 throw("acquirep: invalid p state")
3870 _g_.m.p.set(_p_)
3871 _p_.m.set(_g_.m)
3872 _p_.status = _Prunning
3875 // Disassociate p and the current m.
3876 func releasep() *p {
3877 _g_ := getg()
3879 if _g_.m.p == 0 || _g_.m.mcache == nil {
3880 throw("releasep: invalid arg")
3882 _p_ := _g_.m.p.ptr()
3883 if _p_.m.ptr() != _g_.m || _p_.mcache != _g_.m.mcache || _p_.status != _Prunning {
3884 print("releasep: m=", _g_.m, " m->p=", _g_.m.p.ptr(), " p->m=", _p_.m, " m->mcache=", _g_.m.mcache, " p->mcache=", _p_.mcache, " p->status=", _p_.status, "\n")
3885 throw("releasep: invalid p state")
3887 if trace.enabled {
3888 traceProcStop(_g_.m.p.ptr())
3890 _g_.m.p = 0
3891 _g_.m.mcache = nil
3892 _p_.m = 0
3893 _p_.status = _Pidle
3894 return _p_
3897 func incidlelocked(v int32) {
3898 lock(&sched.lock)
3899 sched.nmidlelocked += v
3900 if v > 0 {
3901 checkdead()
3903 unlock(&sched.lock)
3906 // Check for deadlock situation.
3907 // The check is based on number of running M's, if 0 -> deadlock.
3908 // sched.lock must be held.
3909 func checkdead() {
3910 // For -buildmode=c-shared or -buildmode=c-archive it's OK if
3911 // there are no running goroutines. The calling program is
3912 // assumed to be running.
3913 if islibrary || isarchive {
3914 return
3917 // If we are dying because of a signal caught on an already idle thread,
3918 // freezetheworld will cause all running threads to block.
3919 // And runtime will essentially enter into deadlock state,
3920 // except that there is a thread that will call exit soon.
3921 if panicking > 0 {
3922 return
3925 // If we are not running under cgo, but we have an extra M then account
3926 // for it. (It is possible to have an extra M on Windows without cgo to
3927 // accommodate callbacks created by syscall.NewCallback. See issue #6751
3928 // for details.)
3929 var run0 int32
3930 if !iscgo && cgoHasExtraM {
3931 run0 = 1
3934 run := mcount() - sched.nmidle - sched.nmidlelocked - sched.nmsys
3935 if run > run0 {
3936 return
3938 if run < 0 {
3939 print("runtime: checkdead: nmidle=", sched.nmidle, " nmidlelocked=", sched.nmidlelocked, " mcount=", mcount(), " nmsys=", sched.nmsys, "\n")
3940 throw("checkdead: inconsistent counts")
3943 grunning := 0
3944 lock(&allglock)
3945 for i := 0; i < len(allgs); i++ {
3946 gp := allgs[i]
3947 if isSystemGoroutine(gp) {
3948 continue
3950 s := readgstatus(gp)
3951 switch s &^ _Gscan {
3952 case _Gwaiting:
3953 grunning++
3954 case _Grunnable,
3955 _Grunning,
3956 _Gsyscall:
3957 unlock(&allglock)
3958 print("runtime: checkdead: find g ", gp.goid, " in status ", s, "\n")
3959 throw("checkdead: runnable g")
3962 unlock(&allglock)
3963 if grunning == 0 { // possible if main goroutine calls runtime·Goexit()
3964 throw("no goroutines (main called runtime.Goexit) - deadlock!")
3967 // Maybe jump time forward for playground.
3968 gp := timejump()
3969 if gp != nil {
3970 casgstatus(gp, _Gwaiting, _Grunnable)
3971 globrunqput(gp)
3972 _p_ := pidleget()
3973 if _p_ == nil {
3974 throw("checkdead: no p for timer")
3976 mp := mget()
3977 if mp == nil {
3978 // There should always be a free M since
3979 // nothing is running.
3980 throw("checkdead: no m for timer")
3982 mp.nextp.set(_p_)
3983 notewakeup(&mp.park)
3984 return
3987 getg().m.throwing = -1 // do not dump full stacks
3988 throw("all goroutines are asleep - deadlock!")
3991 // forcegcperiod is the maximum time in nanoseconds between garbage
3992 // collections. If we go this long without a garbage collection, one
3993 // is forced to run.
3995 // This is a variable for testing purposes. It normally doesn't change.
3996 var forcegcperiod int64 = 2 * 60 * 1e9
3998 // Always runs without a P, so write barriers are not allowed.
4000 //go:nowritebarrierrec
4001 func sysmon() {
4002 lock(&sched.lock)
4003 sched.nmsys++
4004 checkdead()
4005 unlock(&sched.lock)
4007 // If a heap span goes unused for 5 minutes after a garbage collection,
4008 // we hand it back to the operating system.
4009 scavengelimit := int64(5 * 60 * 1e9)
4011 if debug.scavenge > 0 {
4012 // Scavenge-a-lot for testing.
4013 forcegcperiod = 10 * 1e6
4014 scavengelimit = 20 * 1e6
4017 lastscavenge := nanotime()
4018 nscavenge := 0
4020 lasttrace := int64(0)
4021 idle := 0 // how many cycles in succession we had not wokeup somebody
4022 delay := uint32(0)
4023 for {
4024 if idle == 0 { // start with 20us sleep...
4025 delay = 20
4026 } else if idle > 50 { // start doubling the sleep after 1ms...
4027 delay *= 2
4029 if delay > 10*1000 { // up to 10ms
4030 delay = 10 * 1000
4032 usleep(delay)
4033 if debug.schedtrace <= 0 && (sched.gcwaiting != 0 || atomic.Load(&sched.npidle) == uint32(gomaxprocs)) {
4034 lock(&sched.lock)
4035 if atomic.Load(&sched.gcwaiting) != 0 || atomic.Load(&sched.npidle) == uint32(gomaxprocs) {
4036 atomic.Store(&sched.sysmonwait, 1)
4037 unlock(&sched.lock)
4038 // Make wake-up period small enough
4039 // for the sampling to be correct.
4040 maxsleep := forcegcperiod / 2
4041 if scavengelimit < forcegcperiod {
4042 maxsleep = scavengelimit / 2
4044 shouldRelax := true
4045 if osRelaxMinNS > 0 {
4046 next := timeSleepUntil()
4047 now := nanotime()
4048 if next-now < osRelaxMinNS {
4049 shouldRelax = false
4052 if shouldRelax {
4053 osRelax(true)
4055 notetsleep(&sched.sysmonnote, maxsleep)
4056 if shouldRelax {
4057 osRelax(false)
4059 lock(&sched.lock)
4060 atomic.Store(&sched.sysmonwait, 0)
4061 noteclear(&sched.sysmonnote)
4062 idle = 0
4063 delay = 20
4065 unlock(&sched.lock)
4067 // trigger libc interceptors if needed
4068 if *cgo_yield != nil {
4069 asmcgocall(*cgo_yield, nil)
4071 // poll network if not polled for more than 10ms
4072 lastpoll := int64(atomic.Load64(&sched.lastpoll))
4073 now := nanotime()
4074 if netpollinited() && lastpoll != 0 && lastpoll+10*1000*1000 < now {
4075 atomic.Cas64(&sched.lastpoll, uint64(lastpoll), uint64(now))
4076 gp := netpoll(false) // non-blocking - returns list of goroutines
4077 if gp != nil {
4078 // Need to decrement number of idle locked M's
4079 // (pretending that one more is running) before injectglist.
4080 // Otherwise it can lead to the following situation:
4081 // injectglist grabs all P's but before it starts M's to run the P's,
4082 // another M returns from syscall, finishes running its G,
4083 // observes that there is no work to do and no other running M's
4084 // and reports deadlock.
4085 incidlelocked(-1)
4086 injectglist(gp)
4087 incidlelocked(1)
4090 // retake P's blocked in syscalls
4091 // and preempt long running G's
4092 if retake(now) != 0 {
4093 idle = 0
4094 } else {
4095 idle++
4097 // check if we need to force a GC
4098 if t := (gcTrigger{kind: gcTriggerTime, now: now}); t.test() && atomic.Load(&forcegc.idle) != 0 {
4099 lock(&forcegc.lock)
4100 forcegc.idle = 0
4101 forcegc.g.schedlink = 0
4102 injectglist(forcegc.g)
4103 unlock(&forcegc.lock)
4105 // scavenge heap once in a while
4106 if lastscavenge+scavengelimit/2 < now {
4107 mheap_.scavenge(int32(nscavenge), uint64(now), uint64(scavengelimit))
4108 lastscavenge = now
4109 nscavenge++
4111 if debug.schedtrace > 0 && lasttrace+int64(debug.schedtrace)*1000000 <= now {
4112 lasttrace = now
4113 schedtrace(debug.scheddetail > 0)
4118 type sysmontick struct {
4119 schedtick uint32
4120 schedwhen int64
4121 syscalltick uint32
4122 syscallwhen int64
4125 // forcePreemptNS is the time slice given to a G before it is
4126 // preempted.
4127 const forcePreemptNS = 10 * 1000 * 1000 // 10ms
4129 func retake(now int64) uint32 {
4130 n := 0
4131 // Prevent allp slice changes. This lock will be completely
4132 // uncontended unless we're already stopping the world.
4133 lock(&allpLock)
4134 // We can't use a range loop over allp because we may
4135 // temporarily drop the allpLock. Hence, we need to re-fetch
4136 // allp each time around the loop.
4137 for i := 0; i < len(allp); i++ {
4138 _p_ := allp[i]
4139 if _p_ == nil {
4140 // This can happen if procresize has grown
4141 // allp but not yet created new Ps.
4142 continue
4144 pd := &_p_.sysmontick
4145 s := _p_.status
4146 if s == _Psyscall {
4147 // Retake P from syscall if it's there for more than 1 sysmon tick (at least 20us).
4148 t := int64(_p_.syscalltick)
4149 if int64(pd.syscalltick) != t {
4150 pd.syscalltick = uint32(t)
4151 pd.syscallwhen = now
4152 continue
4154 // On the one hand we don't want to retake Ps if there is no other work to do,
4155 // but on the other hand we want to retake them eventually
4156 // because they can prevent the sysmon thread from deep sleep.
4157 if runqempty(_p_) && atomic.Load(&sched.nmspinning)+atomic.Load(&sched.npidle) > 0 && pd.syscallwhen+10*1000*1000 > now {
4158 continue
4160 // Drop allpLock so we can take sched.lock.
4161 unlock(&allpLock)
4162 // Need to decrement number of idle locked M's
4163 // (pretending that one more is running) before the CAS.
4164 // Otherwise the M from which we retake can exit the syscall,
4165 // increment nmidle and report deadlock.
4166 incidlelocked(-1)
4167 if atomic.Cas(&_p_.status, s, _Pidle) {
4168 if trace.enabled {
4169 traceGoSysBlock(_p_)
4170 traceProcStop(_p_)
4173 _p_.syscalltick++
4174 handoffp(_p_)
4176 incidlelocked(1)
4177 lock(&allpLock)
4178 } else if s == _Prunning {
4179 // Preempt G if it's running for too long.
4180 t := int64(_p_.schedtick)
4181 if int64(pd.schedtick) != t {
4182 pd.schedtick = uint32(t)
4183 pd.schedwhen = now
4184 continue
4186 if pd.schedwhen+forcePreemptNS > now {
4187 continue
4189 preemptone(_p_)
4192 unlock(&allpLock)
4193 return uint32(n)
4196 // Tell all goroutines that they have been preempted and they should stop.
4197 // This function is purely best-effort. It can fail to inform a goroutine if a
4198 // processor just started running it.
4199 // No locks need to be held.
4200 // Returns true if preemption request was issued to at least one goroutine.
4201 func preemptall() bool {
4202 res := false
4203 for _, _p_ := range allp {
4204 if _p_.status != _Prunning {
4205 continue
4207 if preemptone(_p_) {
4208 res = true
4211 return res
4214 // Tell the goroutine running on processor P to stop.
4215 // This function is purely best-effort. It can incorrectly fail to inform the
4216 // goroutine. It can send inform the wrong goroutine. Even if it informs the
4217 // correct goroutine, that goroutine might ignore the request if it is
4218 // simultaneously executing newstack.
4219 // No lock needs to be held.
4220 // Returns true if preemption request was issued.
4221 // The actual preemption will happen at some point in the future
4222 // and will be indicated by the gp->status no longer being
4223 // Grunning
4224 func preemptone(_p_ *p) bool {
4225 mp := _p_.m.ptr()
4226 if mp == nil || mp == getg().m {
4227 return false
4229 gp := mp.curg
4230 if gp == nil || gp == mp.g0 {
4231 return false
4234 gp.preempt = true
4236 // At this point the gc implementation sets gp.stackguard0 to
4237 // a value that causes the goroutine to suspend itself.
4238 // gccgo has no support for this, and it's hard to support.
4239 // The split stack code reads a value from its TCB.
4240 // We have no way to set a value in the TCB of a different thread.
4241 // And, of course, not all systems support split stack anyhow.
4242 // Checking the field in the g is expensive, since it requires
4243 // loading the g from TLS. The best mechanism is likely to be
4244 // setting a global variable and figuring out a way to efficiently
4245 // check that global variable.
4247 // For now we check gp.preempt in schedule, mallocgc, selectgo,
4248 // and a few other places, which is at least better than doing
4249 // nothing at all.
4251 return true
4254 var starttime int64
4256 func schedtrace(detailed bool) {
4257 now := nanotime()
4258 if starttime == 0 {
4259 starttime = now
4262 lock(&sched.lock)
4263 print("SCHED ", (now-starttime)/1e6, "ms: gomaxprocs=", gomaxprocs, " idleprocs=", sched.npidle, " threads=", mcount(), " spinningthreads=", sched.nmspinning, " idlethreads=", sched.nmidle, " runqueue=", sched.runqsize)
4264 if detailed {
4265 print(" gcwaiting=", sched.gcwaiting, " nmidlelocked=", sched.nmidlelocked, " stopwait=", sched.stopwait, " sysmonwait=", sched.sysmonwait, "\n")
4267 // We must be careful while reading data from P's, M's and G's.
4268 // Even if we hold schedlock, most data can be changed concurrently.
4269 // E.g. (p->m ? p->m->id : -1) can crash if p->m changes from non-nil to nil.
4270 for i, _p_ := range allp {
4271 mp := _p_.m.ptr()
4272 h := atomic.Load(&_p_.runqhead)
4273 t := atomic.Load(&_p_.runqtail)
4274 if detailed {
4275 id := int64(-1)
4276 if mp != nil {
4277 id = mp.id
4279 print(" P", i, ": status=", _p_.status, " schedtick=", _p_.schedtick, " syscalltick=", _p_.syscalltick, " m=", id, " runqsize=", t-h, " gfreecnt=", _p_.gfreecnt, "\n")
4280 } else {
4281 // In non-detailed mode format lengths of per-P run queues as:
4282 // [len1 len2 len3 len4]
4283 print(" ")
4284 if i == 0 {
4285 print("[")
4287 print(t - h)
4288 if i == len(allp)-1 {
4289 print("]\n")
4294 if !detailed {
4295 unlock(&sched.lock)
4296 return
4299 for mp := allm; mp != nil; mp = mp.alllink {
4300 _p_ := mp.p.ptr()
4301 gp := mp.curg
4302 lockedg := mp.lockedg.ptr()
4303 id1 := int32(-1)
4304 if _p_ != nil {
4305 id1 = _p_.id
4307 id2 := int64(-1)
4308 if gp != nil {
4309 id2 = gp.goid
4311 id3 := int64(-1)
4312 if lockedg != nil {
4313 id3 = lockedg.goid
4315 print(" M", mp.id, ": p=", id1, " curg=", id2, " mallocing=", mp.mallocing, " throwing=", mp.throwing, " preemptoff=", mp.preemptoff, ""+" locks=", mp.locks, " dying=", mp.dying, " helpgc=", mp.helpgc, " spinning=", mp.spinning, " blocked=", mp.blocked, " lockedg=", id3, "\n")
4318 lock(&allglock)
4319 for gi := 0; gi < len(allgs); gi++ {
4320 gp := allgs[gi]
4321 mp := gp.m
4322 lockedm := gp.lockedm.ptr()
4323 id1 := int64(-1)
4324 if mp != nil {
4325 id1 = mp.id
4327 id2 := int64(-1)
4328 if lockedm != nil {
4329 id2 = lockedm.id
4331 print(" G", gp.goid, ": status=", readgstatus(gp), "(", gp.waitreason.String(), ") m=", id1, " lockedm=", id2, "\n")
4333 unlock(&allglock)
4334 unlock(&sched.lock)
4337 // Put mp on midle list.
4338 // Sched must be locked.
4339 // May run during STW, so write barriers are not allowed.
4340 //go:nowritebarrierrec
4341 func mput(mp *m) {
4342 mp.schedlink = sched.midle
4343 sched.midle.set(mp)
4344 sched.nmidle++
4345 checkdead()
4348 // Try to get an m from midle list.
4349 // Sched must be locked.
4350 // May run during STW, so write barriers are not allowed.
4351 //go:nowritebarrierrec
4352 func mget() *m {
4353 mp := sched.midle.ptr()
4354 if mp != nil {
4355 sched.midle = mp.schedlink
4356 sched.nmidle--
4358 return mp
4361 // Put gp on the global runnable queue.
4362 // Sched must be locked.
4363 // May run during STW, so write barriers are not allowed.
4364 //go:nowritebarrierrec
4365 func globrunqput(gp *g) {
4366 gp.schedlink = 0
4367 if sched.runqtail != 0 {
4368 sched.runqtail.ptr().schedlink.set(gp)
4369 } else {
4370 sched.runqhead.set(gp)
4372 sched.runqtail.set(gp)
4373 sched.runqsize++
4376 // Put gp at the head of the global runnable queue.
4377 // Sched must be locked.
4378 // May run during STW, so write barriers are not allowed.
4379 //go:nowritebarrierrec
4380 func globrunqputhead(gp *g) {
4381 gp.schedlink = sched.runqhead
4382 sched.runqhead.set(gp)
4383 if sched.runqtail == 0 {
4384 sched.runqtail.set(gp)
4386 sched.runqsize++
4389 // Put a batch of runnable goroutines on the global runnable queue.
4390 // Sched must be locked.
4391 func globrunqputbatch(ghead *g, gtail *g, n int32) {
4392 gtail.schedlink = 0
4393 if sched.runqtail != 0 {
4394 sched.runqtail.ptr().schedlink.set(ghead)
4395 } else {
4396 sched.runqhead.set(ghead)
4398 sched.runqtail.set(gtail)
4399 sched.runqsize += n
4402 // Try get a batch of G's from the global runnable queue.
4403 // Sched must be locked.
4404 func globrunqget(_p_ *p, max int32) *g {
4405 if sched.runqsize == 0 {
4406 return nil
4409 n := sched.runqsize/gomaxprocs + 1
4410 if n > sched.runqsize {
4411 n = sched.runqsize
4413 if max > 0 && n > max {
4414 n = max
4416 if n > int32(len(_p_.runq))/2 {
4417 n = int32(len(_p_.runq)) / 2
4420 sched.runqsize -= n
4421 if sched.runqsize == 0 {
4422 sched.runqtail = 0
4425 gp := sched.runqhead.ptr()
4426 sched.runqhead = gp.schedlink
4428 for ; n > 0; n-- {
4429 gp1 := sched.runqhead.ptr()
4430 sched.runqhead = gp1.schedlink
4431 runqput(_p_, gp1, false)
4433 return gp
4436 // Put p to on _Pidle list.
4437 // Sched must be locked.
4438 // May run during STW, so write barriers are not allowed.
4439 //go:nowritebarrierrec
4440 func pidleput(_p_ *p) {
4441 if !runqempty(_p_) {
4442 throw("pidleput: P has non-empty run queue")
4444 _p_.link = sched.pidle
4445 sched.pidle.set(_p_)
4446 atomic.Xadd(&sched.npidle, 1) // TODO: fast atomic
4449 // Try get a p from _Pidle list.
4450 // Sched must be locked.
4451 // May run during STW, so write barriers are not allowed.
4452 //go:nowritebarrierrec
4453 func pidleget() *p {
4454 _p_ := sched.pidle.ptr()
4455 if _p_ != nil {
4456 sched.pidle = _p_.link
4457 atomic.Xadd(&sched.npidle, -1) // TODO: fast atomic
4459 return _p_
4462 // runqempty returns true if _p_ has no Gs on its local run queue.
4463 // It never returns true spuriously.
4464 func runqempty(_p_ *p) bool {
4465 // Defend against a race where 1) _p_ has G1 in runqnext but runqhead == runqtail,
4466 // 2) runqput on _p_ kicks G1 to the runq, 3) runqget on _p_ empties runqnext.
4467 // Simply observing that runqhead == runqtail and then observing that runqnext == nil
4468 // does not mean the queue is empty.
4469 for {
4470 head := atomic.Load(&_p_.runqhead)
4471 tail := atomic.Load(&_p_.runqtail)
4472 runnext := atomic.Loaduintptr((*uintptr)(unsafe.Pointer(&_p_.runnext)))
4473 if tail == atomic.Load(&_p_.runqtail) {
4474 return head == tail && runnext == 0
4479 // To shake out latent assumptions about scheduling order,
4480 // we introduce some randomness into scheduling decisions
4481 // when running with the race detector.
4482 // The need for this was made obvious by changing the
4483 // (deterministic) scheduling order in Go 1.5 and breaking
4484 // many poorly-written tests.
4485 // With the randomness here, as long as the tests pass
4486 // consistently with -race, they shouldn't have latent scheduling
4487 // assumptions.
4488 const randomizeScheduler = raceenabled
4490 // runqput tries to put g on the local runnable queue.
4491 // If next is false, runqput adds g to the tail of the runnable queue.
4492 // If next is true, runqput puts g in the _p_.runnext slot.
4493 // If the run queue is full, runnext puts g on the global queue.
4494 // Executed only by the owner P.
4495 func runqput(_p_ *p, gp *g, next bool) {
4496 if randomizeScheduler && next && fastrand()%2 == 0 {
4497 next = false
4500 if next {
4501 retryNext:
4502 oldnext := _p_.runnext
4503 if !_p_.runnext.cas(oldnext, guintptr(unsafe.Pointer(gp))) {
4504 goto retryNext
4506 if oldnext == 0 {
4507 return
4509 // Kick the old runnext out to the regular run queue.
4510 gp = oldnext.ptr()
4513 retry:
4514 h := atomic.Load(&_p_.runqhead) // load-acquire, synchronize with consumers
4515 t := _p_.runqtail
4516 if t-h < uint32(len(_p_.runq)) {
4517 _p_.runq[t%uint32(len(_p_.runq))].set(gp)
4518 atomic.Store(&_p_.runqtail, t+1) // store-release, makes the item available for consumption
4519 return
4521 if runqputslow(_p_, gp, h, t) {
4522 return
4524 // the queue is not full, now the put above must succeed
4525 goto retry
4528 // Put g and a batch of work from local runnable queue on global queue.
4529 // Executed only by the owner P.
4530 func runqputslow(_p_ *p, gp *g, h, t uint32) bool {
4531 var batch [len(_p_.runq)/2 + 1]*g
4533 // First, grab a batch from local queue.
4534 n := t - h
4535 n = n / 2
4536 if n != uint32(len(_p_.runq)/2) {
4537 throw("runqputslow: queue is not full")
4539 for i := uint32(0); i < n; i++ {
4540 batch[i] = _p_.runq[(h+i)%uint32(len(_p_.runq))].ptr()
4542 if !atomic.Cas(&_p_.runqhead, h, h+n) { // cas-release, commits consume
4543 return false
4545 batch[n] = gp
4547 if randomizeScheduler {
4548 for i := uint32(1); i <= n; i++ {
4549 j := fastrandn(i + 1)
4550 batch[i], batch[j] = batch[j], batch[i]
4554 // Link the goroutines.
4555 for i := uint32(0); i < n; i++ {
4556 batch[i].schedlink.set(batch[i+1])
4559 // Now put the batch on global queue.
4560 lock(&sched.lock)
4561 globrunqputbatch(batch[0], batch[n], int32(n+1))
4562 unlock(&sched.lock)
4563 return true
4566 // Get g from local runnable queue.
4567 // If inheritTime is true, gp should inherit the remaining time in the
4568 // current time slice. Otherwise, it should start a new time slice.
4569 // Executed only by the owner P.
4570 func runqget(_p_ *p) (gp *g, inheritTime bool) {
4571 // If there's a runnext, it's the next G to run.
4572 for {
4573 next := _p_.runnext
4574 if next == 0 {
4575 break
4577 if _p_.runnext.cas(next, 0) {
4578 return next.ptr(), true
4582 for {
4583 h := atomic.Load(&_p_.runqhead) // load-acquire, synchronize with other consumers
4584 t := _p_.runqtail
4585 if t == h {
4586 return nil, false
4588 gp := _p_.runq[h%uint32(len(_p_.runq))].ptr()
4589 if atomic.Cas(&_p_.runqhead, h, h+1) { // cas-release, commits consume
4590 return gp, false
4595 // Grabs a batch of goroutines from _p_'s runnable queue into batch.
4596 // Batch is a ring buffer starting at batchHead.
4597 // Returns number of grabbed goroutines.
4598 // Can be executed by any P.
4599 func runqgrab(_p_ *p, batch *[256]guintptr, batchHead uint32, stealRunNextG bool) uint32 {
4600 for {
4601 h := atomic.Load(&_p_.runqhead) // load-acquire, synchronize with other consumers
4602 t := atomic.Load(&_p_.runqtail) // load-acquire, synchronize with the producer
4603 n := t - h
4604 n = n - n/2
4605 if n == 0 {
4606 if stealRunNextG {
4607 // Try to steal from _p_.runnext.
4608 if next := _p_.runnext; next != 0 {
4609 if _p_.status == _Prunning {
4610 // Sleep to ensure that _p_ isn't about to run the g
4611 // we are about to steal.
4612 // The important use case here is when the g running
4613 // on _p_ ready()s another g and then almost
4614 // immediately blocks. Instead of stealing runnext
4615 // in this window, back off to give _p_ a chance to
4616 // schedule runnext. This will avoid thrashing gs
4617 // between different Ps.
4618 // A sync chan send/recv takes ~50ns as of time of
4619 // writing, so 3us gives ~50x overshoot.
4620 if GOOS != "windows" {
4621 usleep(3)
4622 } else {
4623 // On windows system timer granularity is
4624 // 1-15ms, which is way too much for this
4625 // optimization. So just yield.
4626 osyield()
4629 if !_p_.runnext.cas(next, 0) {
4630 continue
4632 batch[batchHead%uint32(len(batch))] = next
4633 return 1
4636 return 0
4638 if n > uint32(len(_p_.runq)/2) { // read inconsistent h and t
4639 continue
4641 for i := uint32(0); i < n; i++ {
4642 g := _p_.runq[(h+i)%uint32(len(_p_.runq))]
4643 batch[(batchHead+i)%uint32(len(batch))] = g
4645 if atomic.Cas(&_p_.runqhead, h, h+n) { // cas-release, commits consume
4646 return n
4651 // Steal half of elements from local runnable queue of p2
4652 // and put onto local runnable queue of p.
4653 // Returns one of the stolen elements (or nil if failed).
4654 func runqsteal(_p_, p2 *p, stealRunNextG bool) *g {
4655 t := _p_.runqtail
4656 n := runqgrab(p2, &_p_.runq, t, stealRunNextG)
4657 if n == 0 {
4658 return nil
4661 gp := _p_.runq[(t+n)%uint32(len(_p_.runq))].ptr()
4662 if n == 0 {
4663 return gp
4665 h := atomic.Load(&_p_.runqhead) // load-acquire, synchronize with consumers
4666 if t-h+n >= uint32(len(_p_.runq)) {
4667 throw("runqsteal: runq overflow")
4669 atomic.Store(&_p_.runqtail, t+n) // store-release, makes the item available for consumption
4670 return gp
4673 //go:linkname setMaxThreads runtime..z2fdebug.setMaxThreads
4674 func setMaxThreads(in int) (out int) {
4675 lock(&sched.lock)
4676 out = int(sched.maxmcount)
4677 if in > 0x7fffffff { // MaxInt32
4678 sched.maxmcount = 0x7fffffff
4679 } else {
4680 sched.maxmcount = int32(in)
4682 checkmcount()
4683 unlock(&sched.lock)
4684 return
4687 func haveexperiment(name string) bool {
4688 // The gofrontend does not support experiments.
4689 return false
4692 //go:nosplit
4693 func procPin() int {
4694 _g_ := getg()
4695 mp := _g_.m
4697 mp.locks++
4698 return int(mp.p.ptr().id)
4701 //go:nosplit
4702 func procUnpin() {
4703 _g_ := getg()
4704 _g_.m.locks--
4707 //go:linkname sync_runtime_procPin sync.runtime_procPin
4708 //go:nosplit
4709 func sync_runtime_procPin() int {
4710 return procPin()
4713 //go:linkname sync_runtime_procUnpin sync.runtime_procUnpin
4714 //go:nosplit
4715 func sync_runtime_procUnpin() {
4716 procUnpin()
4719 //go:linkname sync_atomic_runtime_procPin sync..z2fatomic.runtime_procPin
4720 //go:nosplit
4721 func sync_atomic_runtime_procPin() int {
4722 return procPin()
4725 //go:linkname sync_atomic_runtime_procUnpin sync..z2fatomic.runtime_procUnpin
4726 //go:nosplit
4727 func sync_atomic_runtime_procUnpin() {
4728 procUnpin()
4731 // Active spinning for sync.Mutex.
4732 //go:linkname sync_runtime_canSpin sync.runtime_canSpin
4733 //go:nosplit
4734 func sync_runtime_canSpin(i int) bool {
4735 // sync.Mutex is cooperative, so we are conservative with spinning.
4736 // Spin only few times and only if running on a multicore machine and
4737 // GOMAXPROCS>1 and there is at least one other running P and local runq is empty.
4738 // As opposed to runtime mutex we don't do passive spinning here,
4739 // because there can be work on global runq or on other Ps.
4740 if i >= active_spin || ncpu <= 1 || gomaxprocs <= int32(sched.npidle+sched.nmspinning)+1 {
4741 return false
4743 if p := getg().m.p.ptr(); !runqempty(p) {
4744 return false
4746 return true
4749 //go:linkname sync_runtime_doSpin sync.runtime_doSpin
4750 //go:nosplit
4751 func sync_runtime_doSpin() {
4752 procyield(active_spin_cnt)
4755 var stealOrder randomOrder
4757 // randomOrder/randomEnum are helper types for randomized work stealing.
4758 // They allow to enumerate all Ps in different pseudo-random orders without repetitions.
4759 // The algorithm is based on the fact that if we have X such that X and GOMAXPROCS
4760 // are coprime, then a sequences of (i + X) % GOMAXPROCS gives the required enumeration.
4761 type randomOrder struct {
4762 count uint32
4763 coprimes []uint32
4766 type randomEnum struct {
4767 i uint32
4768 count uint32
4769 pos uint32
4770 inc uint32
4773 func (ord *randomOrder) reset(count uint32) {
4774 ord.count = count
4775 ord.coprimes = ord.coprimes[:0]
4776 for i := uint32(1); i <= count; i++ {
4777 if gcd(i, count) == 1 {
4778 ord.coprimes = append(ord.coprimes, i)
4783 func (ord *randomOrder) start(i uint32) randomEnum {
4784 return randomEnum{
4785 count: ord.count,
4786 pos: i % ord.count,
4787 inc: ord.coprimes[i%uint32(len(ord.coprimes))],
4791 func (enum *randomEnum) done() bool {
4792 return enum.i == enum.count
4795 func (enum *randomEnum) next() {
4796 enum.i++
4797 enum.pos = (enum.pos + enum.inc) % enum.count
4800 func (enum *randomEnum) position() uint32 {
4801 return enum.pos
4804 func gcd(a, b uint32) uint32 {
4805 for b != 0 {
4806 a, b = b, a%b
4808 return a